aboutsummaryrefslogtreecommitdiff
path: root/arch/mips/mm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/mips/mm')
-rw-r--r--arch/mips/mm/dma-default.c37
-rw-r--r--arch/mips/mm/init.c43
2 files changed, 44 insertions, 36 deletions
diff --git a/arch/mips/mm/dma-default.c b/arch/mips/mm/dma-default.c
index 33519ce4954..ae76795685c 100644
--- a/arch/mips/mm/dma-default.c
+++ b/arch/mips/mm/dma-default.c
@@ -40,16 +40,38 @@ static inline int cpu_is_noncoherent_r10000(struct device *dev)
current_cpu_type() == CPU_R12000);
}
+static gfp_t massage_gfp_flags(const struct device *dev, gfp_t gfp)
+{
+ /* ignore region specifiers */
+ gfp &= ~(__GFP_DMA | __GFP_DMA32 | __GFP_HIGHMEM);
+
+#ifdef CONFIG_ZONE_DMA32
+ if (dev == NULL)
+ gfp |= __GFP_DMA;
+ else if (dev->coherent_dma_mask < DMA_BIT_MASK(24))
+ gfp |= __GFP_DMA;
+ else
+#endif
+#ifdef CONFIG_ZONE_DMA32
+ if (dev->coherent_dma_mask < DMA_BIT_MASK(32))
+ gfp |= __GFP_DMA32;
+ else
+#endif
+ ;
+
+ /* Don't invoke OOM killer */
+ gfp |= __GFP_NORETRY;
+
+ return gfp;
+}
+
void *dma_alloc_noncoherent(struct device *dev, size_t size,
dma_addr_t * dma_handle, gfp_t gfp)
{
void *ret;
- /* ignore region specifiers */
- gfp &= ~(__GFP_DMA | __GFP_HIGHMEM);
+ gfp = massage_gfp_flags(dev, gfp);
- if (dev == NULL || (dev->coherent_dma_mask < 0xffffffff))
- gfp |= GFP_DMA;
ret = (void *) __get_free_pages(gfp, get_order(size));
if (ret != NULL) {
@@ -67,11 +89,8 @@ void *dma_alloc_coherent(struct device *dev, size_t size,
{
void *ret;
- /* ignore region specifiers */
- gfp &= ~(__GFP_DMA | __GFP_HIGHMEM);
+ gfp = massage_gfp_flags(dev, gfp);
- if (dev == NULL || (dev->coherent_dma_mask < 0xffffffff))
- gfp |= GFP_DMA;
ret = (void *) __get_free_pages(gfp, get_order(size));
if (ret) {
@@ -343,7 +362,7 @@ int dma_supported(struct device *dev, u64 mask)
* so we can't guarantee allocations that must be
* within a tighter range than GFP_DMA..
*/
- if (mask < 0x00ffffff)
+ if (mask < DMA_BIT_MASK(24))
return 0;
return 1;
diff --git a/arch/mips/mm/init.c b/arch/mips/mm/init.c
index ec3b9e9f30f..480dec04f55 100644
--- a/arch/mips/mm/init.c
+++ b/arch/mips/mm/init.c
@@ -347,11 +347,8 @@ static int __init page_is_ram(unsigned long pagenr)
void __init paging_init(void)
{
- unsigned long zones_size[MAX_NR_ZONES] = { 0, };
-#ifndef CONFIG_FLATMEM
- unsigned long zholes_size[MAX_NR_ZONES] = { 0, };
- unsigned long i, j, pfn;
-#endif
+ unsigned long max_zone_pfns[MAX_NR_ZONES];
+ unsigned long lastpfn;
pagetable_init();
@@ -361,35 +358,27 @@ void __init paging_init(void)
kmap_coherent_init();
#ifdef CONFIG_ZONE_DMA
- if (min_low_pfn < MAX_DMA_PFN && MAX_DMA_PFN <= max_low_pfn) {
- zones_size[ZONE_DMA] = MAX_DMA_PFN - min_low_pfn;
- zones_size[ZONE_NORMAL] = max_low_pfn - MAX_DMA_PFN;
- } else if (max_low_pfn < MAX_DMA_PFN)
- zones_size[ZONE_DMA] = max_low_pfn - min_low_pfn;
- else
+ max_zone_pfns[ZONE_DMA] = MAX_DMA_PFN;
#endif
- zones_size[ZONE_NORMAL] = max_low_pfn - min_low_pfn;
-
+#ifdef CONFIG_ZONE_DMA32
+ max_zone_pfns[ZONE_DMA32] = MAX_DMA32_PFN;
+#endif
+ max_zone_pfns[ZONE_NORMAL] = max_low_pfn;
+ lastpfn = max_low_pfn;
#ifdef CONFIG_HIGHMEM
- zones_size[ZONE_HIGHMEM] = highend_pfn - highstart_pfn;
+ max_zone_pfns[ZONE_HIGHMEM] = highend_pfn;
+ lastpfn = highend_pfn;
- if (cpu_has_dc_aliases && zones_size[ZONE_HIGHMEM]) {
+ if (cpu_has_dc_aliases && max_low_pfn != highend_pfn) {
printk(KERN_WARNING "This processor doesn't support highmem."
- " %ldk highmem ignored\n", zones_size[ZONE_HIGHMEM]);
- zones_size[ZONE_HIGHMEM] = 0;
+ " %ldk highmem ignored\n",
+ (highend_pfn - max_low_pfn) << (PAGE_SHIFT - 10));
+ max_zone_pfns[ZONE_HIGHMEM] = max_low_pfn;
+ lastpfn = max_low_pfn;
}
#endif
-#ifdef CONFIG_FLATMEM
- free_area_init(zones_size);
-#else
- pfn = min_low_pfn;
- for (i = 0; i < MAX_NR_ZONES; i++)
- for (j = 0; j < zones_size[i]; j++, pfn++)
- if (!page_is_ram(pfn))
- zholes_size[i]++;
- free_area_init_node(0, NODE_DATA(0), zones_size, 0, zholes_size);
-#endif
+ free_area_init_nodes(max_zone_pfns);
}
static struct kcore_list kcore_mem, kcore_vmalloc;