diff options
Diffstat (limited to 'arch/ppc')
-rw-r--r-- | arch/ppc/8xx_io/commproc.c | 22 | ||||
-rw-r--r-- | arch/ppc/lib/rheap.c | 95 | ||||
-rw-r--r-- | arch/ppc/syslib/cpm2_common.c | 23 |
3 files changed, 71 insertions, 69 deletions
diff --git a/arch/ppc/8xx_io/commproc.c b/arch/ppc/8xx_io/commproc.c index 7a8722beac1..e2c6210f234 100644 --- a/arch/ppc/8xx_io/commproc.c +++ b/arch/ppc/8xx_io/commproc.c @@ -402,7 +402,7 @@ void m8xx_cpm_dpinit(void) * with the processor and the microcode patches applied / activated. * But the following should be at least safe. */ - rh_attach_region(&cpm_dpmem_info, (void *)CPM_DATAONLY_BASE, CPM_DATAONLY_SIZE); + rh_attach_region(&cpm_dpmem_info, CPM_DATAONLY_BASE, CPM_DATAONLY_SIZE); } /* @@ -410,9 +410,9 @@ void m8xx_cpm_dpinit(void) * This function returns an offset into the DPRAM area. * Use cpm_dpram_addr() to get the virtual address of the area. */ -uint cpm_dpalloc(uint size, uint align) +unsigned long cpm_dpalloc(uint size, uint align) { - void *start; + unsigned long start; unsigned long flags; spin_lock_irqsave(&cpm_dpmem_lock, flags); @@ -420,34 +420,34 @@ uint cpm_dpalloc(uint size, uint align) start = rh_alloc(&cpm_dpmem_info, size, "commproc"); spin_unlock_irqrestore(&cpm_dpmem_lock, flags); - return (uint)start; + return start; } EXPORT_SYMBOL(cpm_dpalloc); -int cpm_dpfree(uint offset) +int cpm_dpfree(unsigned long offset) { int ret; unsigned long flags; spin_lock_irqsave(&cpm_dpmem_lock, flags); - ret = rh_free(&cpm_dpmem_info, (void *)offset); + ret = rh_free(&cpm_dpmem_info, offset); spin_unlock_irqrestore(&cpm_dpmem_lock, flags); return ret; } EXPORT_SYMBOL(cpm_dpfree); -uint cpm_dpalloc_fixed(uint offset, uint size, uint align) +unsigned long cpm_dpalloc_fixed(unsigned long offset, uint size, uint align) { - void *start; + unsigned long start; unsigned long flags; spin_lock_irqsave(&cpm_dpmem_lock, flags); cpm_dpmem_info.alignment = align; - start = rh_alloc_fixed(&cpm_dpmem_info, (void *)offset, size, "commproc"); + start = rh_alloc_fixed(&cpm_dpmem_info, offset, size, "commproc"); spin_unlock_irqrestore(&cpm_dpmem_lock, flags); - return (uint)start; + return start; } EXPORT_SYMBOL(cpm_dpalloc_fixed); @@ -457,7 +457,7 @@ void cpm_dpdump(void) } EXPORT_SYMBOL(cpm_dpdump); -void *cpm_dpram_addr(uint offset) +void *cpm_dpram_addr(unsigned long offset) { return ((immap_t *)IMAP_ADDR)->im_cpm.cp_dpmem + offset; } diff --git a/arch/ppc/lib/rheap.c b/arch/ppc/lib/rheap.c index d40700795a9..9dc2f3458de 100644 --- a/arch/ppc/lib/rheap.c +++ b/arch/ppc/lib/rheap.c @@ -132,7 +132,7 @@ static rh_block_t *get_slot(rh_info_t * info) info->empty_slots--; /* Initialize */ - blk->start = NULL; + blk->start = 0; blk->size = 0; blk->owner = NULL; @@ -157,7 +157,7 @@ static void attach_free_block(rh_info_t * info, rh_block_t * blkn) /* We assume that they are aligned properly */ size = blkn->size; - s = (unsigned long)blkn->start; + s = blkn->start; e = s + size; /* Find the blocks immediately before and after the given one @@ -169,7 +169,7 @@ static void attach_free_block(rh_info_t * info, rh_block_t * blkn) list_for_each(l, &info->free_list) { blk = list_entry(l, rh_block_t, list); - bs = (unsigned long)blk->start; + bs = blk->start; be = bs + blk->size; if (next == NULL && s >= bs) @@ -187,10 +187,10 @@ static void attach_free_block(rh_info_t * info, rh_block_t * blkn) } /* Now check if they are really adjacent */ - if (before != NULL && s != (unsigned long)before->start + before->size) + if (before && s != (before->start + before->size)) before = NULL; - if (after != NULL && e != (unsigned long)after->start) + if (after && e != after->start) after = NULL; /* No coalescing; list insert and return */ @@ -215,7 +215,7 @@ static void attach_free_block(rh_info_t * info, rh_block_t * blkn) /* Grow the after block backwards */ if (before == NULL && after != NULL) { - after->start = (int8_t *)after->start - size; + after->start -= size; after->size += size; return; } @@ -320,14 +320,14 @@ void rh_init(rh_info_t * info, unsigned int alignment, int max_blocks, } /* Attach a free memory region, coalesces regions if adjuscent */ -int rh_attach_region(rh_info_t * info, void *start, int size) +int rh_attach_region(rh_info_t * info, unsigned long start, int size) { rh_block_t *blk; unsigned long s, e, m; int r; /* The region must be aligned */ - s = (unsigned long)start; + s = start; e = s + size; m = info->alignment - 1; @@ -337,9 +337,12 @@ int rh_attach_region(rh_info_t * info, void *start, int size) /* Round end down */ e = e & ~m; + if (IS_ERR_VALUE(e) || (e < s)) + return -ERANGE; + /* Take final values */ - start = (void *)s; - size = (int)(e - s); + start = s; + size = e - s; /* Grow the blocks, if needed */ r = assure_empty(info, 1); @@ -357,7 +360,7 @@ int rh_attach_region(rh_info_t * info, void *start, int size) } /* Detatch given address range, splits free block if needed. */ -void *rh_detach_region(rh_info_t * info, void *start, int size) +unsigned long rh_detach_region(rh_info_t * info, unsigned long start, int size) { struct list_head *l; rh_block_t *blk, *newblk; @@ -365,10 +368,10 @@ void *rh_detach_region(rh_info_t * info, void *start, int size) /* Validate size */ if (size <= 0) - return ERR_PTR(-EINVAL); + return (unsigned long) -EINVAL; /* The region must be aligned */ - s = (unsigned long)start; + s = start; e = s + size; m = info->alignment - 1; @@ -379,34 +382,34 @@ void *rh_detach_region(rh_info_t * info, void *start, int size) e = e & ~m; if (assure_empty(info, 1) < 0) - return ERR_PTR(-ENOMEM); + return (unsigned long) -ENOMEM; blk = NULL; list_for_each(l, &info->free_list) { blk = list_entry(l, rh_block_t, list); /* The range must lie entirely inside one free block */ - bs = (unsigned long)blk->start; - be = (unsigned long)blk->start + blk->size; + bs = blk->start; + be = blk->start + blk->size; if (s >= bs && e <= be) break; blk = NULL; } if (blk == NULL) - return ERR_PTR(-ENOMEM); + return (unsigned long) -ENOMEM; /* Perfect fit */ if (bs == s && be == e) { /* Delete from free list, release slot */ list_del(&blk->list); release_slot(info, blk); - return (void *)s; + return s; } /* blk still in free list, with updated start and/or size */ if (bs == s || be == e) { if (bs == s) - blk->start = (int8_t *)blk->start + size; + blk->start += size; blk->size -= size; } else { @@ -415,31 +418,31 @@ void *rh_detach_region(rh_info_t * info, void *start, int size) /* the back free fragment */ newblk = get_slot(info); - newblk->start = (void *)e; + newblk->start = e; newblk->size = be - e; list_add(&newblk->list, &blk->list); } - return (void *)s; + return s; } -void *rh_alloc(rh_info_t * info, int size, const char *owner) +unsigned long rh_alloc(rh_info_t * info, int size, const char *owner) { struct list_head *l; rh_block_t *blk; rh_block_t *newblk; - void *start; + unsigned long start; /* Validate size */ if (size <= 0) - return ERR_PTR(-EINVAL); + return (unsigned long) -EINVAL; /* Align to configured alignment */ size = (size + (info->alignment - 1)) & ~(info->alignment - 1); if (assure_empty(info, 1) < 0) - return ERR_PTR(-ENOMEM); + return (unsigned long) -ENOMEM; blk = NULL; list_for_each(l, &info->free_list) { @@ -450,7 +453,7 @@ void *rh_alloc(rh_info_t * info, int size, const char *owner) } if (blk == NULL) - return ERR_PTR(-ENOMEM); + return (unsigned long) -ENOMEM; /* Just fits */ if (blk->size == size) { @@ -470,7 +473,7 @@ void *rh_alloc(rh_info_t * info, int size, const char *owner) newblk->owner = owner; /* blk still in free list, with updated start, size */ - blk->start = (int8_t *)blk->start + size; + blk->start += size; blk->size -= size; start = newblk->start; @@ -481,18 +484,18 @@ void *rh_alloc(rh_info_t * info, int size, const char *owner) } /* allocate at precisely the given address */ -void *rh_alloc_fixed(rh_info_t * info, void *start, int size, const char *owner) +unsigned long rh_alloc_fixed(rh_info_t * info, unsigned long start, int size, const char *owner) { struct list_head *l; rh_block_t *blk, *newblk1, *newblk2; - unsigned long s, e, m, bs, be; + unsigned long s, e, m, bs=0, be=0; /* Validate size */ if (size <= 0) - return ERR_PTR(-EINVAL); + return (unsigned long) -EINVAL; /* The region must be aligned */ - s = (unsigned long)start; + s = start; e = s + size; m = info->alignment - 1; @@ -503,20 +506,20 @@ void *rh_alloc_fixed(rh_info_t * info, void *start, int size, const char *owner) e = e & ~m; if (assure_empty(info, 2) < 0) - return ERR_PTR(-ENOMEM); + return (unsigned long) -ENOMEM; blk = NULL; list_for_each(l, &info->free_list) { blk = list_entry(l, rh_block_t, list); /* The range must lie entirely inside one free block */ - bs = (unsigned long)blk->start; - be = (unsigned long)blk->start + blk->size; + bs = blk->start; + be = blk->start + blk->size; if (s >= bs && e <= be) break; } if (blk == NULL) - return ERR_PTR(-ENOMEM); + return (unsigned long) -ENOMEM; /* Perfect fit */ if (bs == s && be == e) { @@ -534,7 +537,7 @@ void *rh_alloc_fixed(rh_info_t * info, void *start, int size, const char *owner) /* blk still in free list, with updated start and/or size */ if (bs == s || be == e) { if (bs == s) - blk->start = (int8_t *)blk->start + size; + blk->start += size; blk->size -= size; } else { @@ -543,14 +546,14 @@ void *rh_alloc_fixed(rh_info_t * info, void *start, int size, const char *owner) /* The back free fragment */ newblk2 = get_slot(info); - newblk2->start = (void *)e; + newblk2->start = e; newblk2->size = be - e; list_add(&newblk2->list, &blk->list); } newblk1 = get_slot(info); - newblk1->start = (void *)s; + newblk1->start = s; newblk1->size = e - s; newblk1->owner = owner; @@ -560,7 +563,7 @@ void *rh_alloc_fixed(rh_info_t * info, void *start, int size, const char *owner) return start; } -int rh_free(rh_info_t * info, void *start) +int rh_free(rh_info_t * info, unsigned long start) { rh_block_t *blk, *blk2; struct list_head *l; @@ -625,7 +628,7 @@ int rh_get_stats(rh_info_t * info, int what, int max_stats, rh_stats_t * stats) return nr; } -int rh_set_owner(rh_info_t * info, void *start, const char *owner) +int rh_set_owner(rh_info_t * info, unsigned long start, const char *owner) { rh_block_t *blk, *blk2; struct list_head *l; @@ -667,8 +670,8 @@ void rh_dump(rh_info_t * info) nr = maxnr; for (i = 0; i < nr; i++) printk(KERN_INFO - " 0x%p-0x%p (%u)\n", - st[i].start, (int8_t *) st[i].start + st[i].size, + " 0x%lx-0x%lx (%u)\n", + st[i].start, st[i].start + st[i].size, st[i].size); printk(KERN_INFO "\n"); @@ -678,8 +681,8 @@ void rh_dump(rh_info_t * info) nr = maxnr; for (i = 0; i < nr; i++) printk(KERN_INFO - " 0x%p-0x%p (%u) %s\n", - st[i].start, (int8_t *) st[i].start + st[i].size, + " 0x%lx-0x%lx (%u) %s\n", + st[i].start, st[i].start + st[i].size, st[i].size, st[i].owner != NULL ? st[i].owner : ""); printk(KERN_INFO "\n"); } @@ -687,6 +690,6 @@ void rh_dump(rh_info_t * info) void rh_dump_blk(rh_info_t * info, rh_block_t * blk) { printk(KERN_INFO - "blk @0x%p: 0x%p-0x%p (%u)\n", - blk, blk->start, (int8_t *) blk->start + blk->size, blk->size); + "blk @0x%p: 0x%lx-0x%lx (%u)\n", + blk, blk->start, blk->start + blk->size, blk->size); } diff --git a/arch/ppc/syslib/cpm2_common.c b/arch/ppc/syslib/cpm2_common.c index cbac44b1620..6cd859d7721 100644 --- a/arch/ppc/syslib/cpm2_common.c +++ b/arch/ppc/syslib/cpm2_common.c @@ -136,15 +136,14 @@ static void cpm2_dpinit(void) * varies with the processor and the microcode patches activated. * But the following should be at least safe. */ - rh_attach_region(&cpm_dpmem_info, (void *)CPM_DATAONLY_BASE, - CPM_DATAONLY_SIZE); + rh_attach_region(&cpm_dpmem_info, CPM_DATAONLY_BASE, CPM_DATAONLY_SIZE); } /* This function returns an index into the DPRAM area. */ -uint cpm_dpalloc(uint size, uint align) +unsigned long cpm_dpalloc(uint size, uint align) { - void *start; + unsigned long start; unsigned long flags; spin_lock_irqsave(&cpm_dpmem_lock, flags); @@ -152,17 +151,17 @@ uint cpm_dpalloc(uint size, uint align) start = rh_alloc(&cpm_dpmem_info, size, "commproc"); spin_unlock_irqrestore(&cpm_dpmem_lock, flags); - return (uint)start; + return start; } EXPORT_SYMBOL(cpm_dpalloc); -int cpm_dpfree(uint offset) +int cpm_dpfree(unsigned long offset) { int ret; unsigned long flags; spin_lock_irqsave(&cpm_dpmem_lock, flags); - ret = rh_free(&cpm_dpmem_info, (void *)offset); + ret = rh_free(&cpm_dpmem_info, offset); spin_unlock_irqrestore(&cpm_dpmem_lock, flags); return ret; @@ -170,17 +169,17 @@ int cpm_dpfree(uint offset) EXPORT_SYMBOL(cpm_dpfree); /* not sure if this is ever needed */ -uint cpm_dpalloc_fixed(uint offset, uint size, uint align) +unsigned long cpm_dpalloc_fixed(unsigned long offset, uint size, uint align) { - void *start; + unsigned long start; unsigned long flags; spin_lock_irqsave(&cpm_dpmem_lock, flags); cpm_dpmem_info.alignment = align; - start = rh_alloc_fixed(&cpm_dpmem_info, (void *)offset, size, "commproc"); + start = rh_alloc_fixed(&cpm_dpmem_info, offset, size, "commproc"); spin_unlock_irqrestore(&cpm_dpmem_lock, flags); - return (uint)start; + return start; } EXPORT_SYMBOL(cpm_dpalloc_fixed); @@ -190,7 +189,7 @@ void cpm_dpdump(void) } EXPORT_SYMBOL(cpm_dpdump); -void *cpm_dpram_addr(uint offset) +void *cpm_dpram_addr(unsigned long offset) { return (void *)&cpm2_immr->im_dprambase[offset]; } |