aboutsummaryrefslogtreecommitdiff
path: root/arch/powerpc/mm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/mm')
-rw-r--r--arch/powerpc/mm/hash_utils_64.c1
-rw-r--r--arch/powerpc/mm/lmb.c20
-rw-r--r--arch/powerpc/mm/mem.c15
-rw-r--r--arch/powerpc/mm/numa.c4
-rw-r--r--arch/powerpc/mm/stab.c4
5 files changed, 24 insertions, 20 deletions
diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c
index 149351a84b9..95b4cd6b65e 100644
--- a/arch/powerpc/mm/hash_utils_64.c
+++ b/arch/powerpc/mm/hash_utils_64.c
@@ -430,7 +430,6 @@ void __init htab_initialize(void)
* the absolute address space.
*/
table = lmb_alloc(htab_size_bytes, htab_size_bytes);
- BUG_ON(table == 0);
DBG("Hash table allocated at %lx, size: %lx\n", table,
htab_size_bytes);
diff --git a/arch/powerpc/mm/lmb.c b/arch/powerpc/mm/lmb.c
index 9584608fd76..874cd103ce6 100644
--- a/arch/powerpc/mm/lmb.c
+++ b/arch/powerpc/mm/lmb.c
@@ -31,6 +31,8 @@
#define DBG(fmt...)
#endif
+#define LMB_ALLOC_ANYWHERE 0
+
struct lmb lmb;
void lmb_dump_all(void)
@@ -197,6 +199,8 @@ long __init lmb_reserve(unsigned long base, unsigned long size)
{
struct lmb_region *_rgn = &(lmb.reserved);
+ BUG_ON(0 == size);
+
return lmb_add_region(_rgn, base, size);
}
@@ -224,9 +228,25 @@ unsigned long __init lmb_alloc(unsigned long size, unsigned long align)
unsigned long __init lmb_alloc_base(unsigned long size, unsigned long align,
unsigned long max_addr)
{
+ unsigned long alloc;
+
+ alloc = __lmb_alloc_base(size, align, max_addr);
+
+ if (alloc < 0)
+ panic("ERROR: Failed to allocate 0x%lx bytes below 0x%lx.\n",
+ size, max_addr);
+
+ return alloc;
+}
+
+unsigned long __init __lmb_alloc_base(unsigned long size, unsigned long align,
+ unsigned long max_addr)
+{
long i, j;
unsigned long base = 0;
+ BUG_ON(0 == size);
+
#ifdef CONFIG_PPC32
/* On 32-bit, make sure we allocate lowmem */
if (max_addr == LMB_ALLOC_ANYWHERE)
diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c
index 15aac0d78df..6809cdba6e9 100644
--- a/arch/powerpc/mm/mem.c
+++ b/arch/powerpc/mm/mem.c
@@ -249,7 +249,6 @@ void __init do_init_bootmem(void)
bootmap_pages = bootmem_bootmap_pages(total_pages);
start = lmb_alloc(bootmap_pages << PAGE_SHIFT, PAGE_SIZE);
- BUG_ON(!start);
boot_mapsize = init_bootmem(start >> PAGE_SHIFT, total_pages);
@@ -435,17 +434,12 @@ void clear_user_page(void *page, unsigned long vaddr, struct page *pg)
{
clear_page(page);
- if (cpu_has_feature(CPU_FTR_COHERENT_ICACHE))
- return;
/*
* We shouldnt have to do this, but some versions of glibc
* require it (ld.so assumes zero filled pages are icache clean)
* - Anton
*/
-
- /* avoid an atomic op if possible */
- if (test_bit(PG_arch_1, &pg->flags))
- clear_bit(PG_arch_1, &pg->flags);
+ flush_dcache_page(pg);
}
EXPORT_SYMBOL(clear_user_page);
@@ -469,12 +463,7 @@ void copy_user_page(void *vto, void *vfrom, unsigned long vaddr,
return;
#endif
- if (cpu_has_feature(CPU_FTR_COHERENT_ICACHE))
- return;
-
- /* avoid an atomic op if possible */
- if (test_bit(PG_arch_1, &pg->flags))
- clear_bit(PG_arch_1, &pg->flags);
+ flush_dcache_page(pg);
}
void flush_icache_user_range(struct vm_area_struct *vma, struct page *page,
diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c
index 2863a912bcd..da5280f8cf4 100644
--- a/arch/powerpc/mm/numa.c
+++ b/arch/powerpc/mm/numa.c
@@ -570,11 +570,11 @@ static void __init *careful_allocation(int nid, unsigned long size,
unsigned long end_pfn)
{
int new_nid;
- unsigned long ret = lmb_alloc_base(size, align, end_pfn << PAGE_SHIFT);
+ unsigned long ret = __lmb_alloc_base(size, align, end_pfn << PAGE_SHIFT);
/* retry over all memory */
if (!ret)
- ret = lmb_alloc_base(size, align, lmb_end_of_DRAM());
+ ret = __lmb_alloc_base(size, align, lmb_end_of_DRAM());
if (!ret)
panic("numa.c: cannot allocate %lu bytes on node %d",
diff --git a/arch/powerpc/mm/stab.c b/arch/powerpc/mm/stab.c
index 82e4951826b..91d25fb27f8 100644
--- a/arch/powerpc/mm/stab.c
+++ b/arch/powerpc/mm/stab.c
@@ -247,10 +247,6 @@ void stabs_alloc(void)
newstab = lmb_alloc_base(HW_PAGE_SIZE, HW_PAGE_SIZE,
1<<SID_SHIFT);
- if (! newstab)
- panic("Unable to allocate segment table for CPU %d.\n",
- cpu);
-
newstab = (unsigned long)__va(newstab);
memset((void *)newstab, 0, HW_PAGE_SIZE);