aboutsummaryrefslogtreecommitdiff
path: root/arch/powerpc/platforms/cell
diff options
context:
space:
mode:
authorHarvey Harrison <harvey.harrison@gmail.com>2008-03-29 08:21:07 +1100
committerPaul Mackerras <paulus@samba.org>2008-04-01 20:43:09 +1100
commite48b1b452ff630288c930fd8e0c2d808bc15f7ad (patch)
tree7ba5f83964a1d965d8b8b6187b39083e1a99dfd2 /arch/powerpc/platforms/cell
parenta78bfbfcfaca64e6198f164c43a60afc8a50e2c6 (diff)
[POWERPC] Replace remaining __FUNCTION__ occurrences
__FUNCTION__ is gcc-specific, use __func__ Signed-off-by: Harvey Harrison <harvey.harrison@gmail.com> Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Paul Mackerras <paulus@samba.org>
Diffstat (limited to 'arch/powerpc/platforms/cell')
-rw-r--r--arch/powerpc/platforms/cell/iommu.c8
-rw-r--r--arch/powerpc/platforms/cell/pervasive.c2
-rw-r--r--arch/powerpc/platforms/cell/ras.c10
-rw-r--r--arch/powerpc/platforms/cell/spu_base.c8
-rw-r--r--arch/powerpc/platforms/cell/spu_callbacks.c2
-rw-r--r--arch/powerpc/platforms/cell/spu_manage.c8
-rw-r--r--arch/powerpc/platforms/cell/spufs/file.c4
-rw-r--r--arch/powerpc/platforms/cell/spufs/run.c10
8 files changed, 26 insertions, 26 deletions
diff --git a/arch/powerpc/platforms/cell/iommu.c b/arch/powerpc/platforms/cell/iommu.c
index 49fe641d434..45646b2b4af 100644
--- a/arch/powerpc/platforms/cell/iommu.c
+++ b/arch/powerpc/platforms/cell/iommu.c
@@ -316,7 +316,7 @@ static void cell_iommu_setup_stab(struct cbe_iommu *iommu,
segments = max(dbase + dsize, fbase + fsize) >> IO_SEGMENT_SHIFT;
pr_debug("%s: iommu[%d]: segments: %lu\n",
- __FUNCTION__, iommu->nid, segments);
+ __func__, iommu->nid, segments);
/* set up the segment table */
stab_size = segments * sizeof(unsigned long);
@@ -343,7 +343,7 @@ static unsigned long *cell_iommu_alloc_ptab(struct cbe_iommu *iommu,
(1 << 12) / sizeof(unsigned long));
ptab_size = segments * pages_per_segment * sizeof(unsigned long);
- pr_debug("%s: iommu[%d]: ptab_size: %lu, order: %d\n", __FUNCTION__,
+ pr_debug("%s: iommu[%d]: ptab_size: %lu, order: %d\n", __func__,
iommu->nid, ptab_size, get_order(ptab_size));
page = alloc_pages_node(iommu->nid, GFP_KERNEL, get_order(ptab_size));
BUG_ON(!page);
@@ -355,7 +355,7 @@ static unsigned long *cell_iommu_alloc_ptab(struct cbe_iommu *iommu,
n_pte_pages = (pages_per_segment * sizeof(unsigned long)) >> 12;
pr_debug("%s: iommu[%d]: stab at %p, ptab at %p, n_pte_pages: %lu\n",
- __FUNCTION__, iommu->nid, iommu->stab, ptab,
+ __func__, iommu->nid, iommu->stab, ptab,
n_pte_pages);
/* initialise the STEs */
@@ -394,7 +394,7 @@ static void cell_iommu_enable_hardware(struct cbe_iommu *iommu)
if (cell_iommu_find_ioc(iommu->nid, &xlate_base))
panic("%s: missing IOC register mappings for node %d\n",
- __FUNCTION__, iommu->nid);
+ __func__, iommu->nid);
iommu->xlate_regs = ioremap(xlate_base, IOC_Reg_Size);
iommu->cmd_regs = iommu->xlate_regs + IOC_IOCmd_Offset;
diff --git a/arch/powerpc/platforms/cell/pervasive.c b/arch/powerpc/platforms/cell/pervasive.c
index 0304589c0a8..8a3631ce912 100644
--- a/arch/powerpc/platforms/cell/pervasive.c
+++ b/arch/powerpc/platforms/cell/pervasive.c
@@ -65,7 +65,7 @@ static void cbe_power_save(void)
break;
default:
printk(KERN_WARNING "%s: unknown configuration\n",
- __FUNCTION__);
+ __func__);
break;
}
mtspr(SPRN_TSC_CELL, thread_switch_control);
diff --git a/arch/powerpc/platforms/cell/ras.c b/arch/powerpc/platforms/cell/ras.c
index e43024c0392..655704ad03c 100644
--- a/arch/powerpc/platforms/cell/ras.c
+++ b/arch/powerpc/platforms/cell/ras.c
@@ -132,7 +132,7 @@ static int __init cbe_ptcal_enable_on_node(int nid, int order)
(unsigned int)(addr >> 32),
(unsigned int)(addr & 0xffffffff))) {
printk(KERN_ERR "%s: error enabling PTCAL on node %d!\n",
- __FUNCTION__, nid);
+ __func__, nid);
goto out_free_pages;
}
@@ -162,7 +162,7 @@ static int __init cbe_ptcal_enable(void)
if (!size)
return -ENODEV;
- pr_debug("%s: enabling PTCAL, size = 0x%x\n", __FUNCTION__, *size);
+ pr_debug("%s: enabling PTCAL, size = 0x%x\n", __func__, *size);
order = get_order(*size);
of_node_put(np);
@@ -180,7 +180,7 @@ static int __init cbe_ptcal_enable(void)
const u32 *nid = of_get_property(np, "node-id", NULL);
if (!nid) {
printk(KERN_ERR "%s: node %s is missing node-id?\n",
- __FUNCTION__, np->full_name);
+ __func__, np->full_name);
continue;
}
cbe_ptcal_enable_on_node(*nid, order);
@@ -195,13 +195,13 @@ static int cbe_ptcal_disable(void)
struct ptcal_area *area, *tmp;
int ret = 0;
- pr_debug("%s: disabling PTCAL\n", __FUNCTION__);
+ pr_debug("%s: disabling PTCAL\n", __func__);
list_for_each_entry_safe(area, tmp, &ptcal_list, list) {
/* disable ptcal on this node */
if (rtas_call(ptcal_stop_tok, 1, 1, NULL, area->nid)) {
printk(KERN_ERR "%s: error disabling PTCAL "
- "on node %d!\n", __FUNCTION__,
+ "on node %d!\n", __func__,
area->nid);
ret = -EIO;
continue;
diff --git a/arch/powerpc/platforms/cell/spu_base.c b/arch/powerpc/platforms/cell/spu_base.c
index 712001f6b7d..6bab44b7716 100644
--- a/arch/powerpc/platforms/cell/spu_base.c
+++ b/arch/powerpc/platforms/cell/spu_base.c
@@ -165,7 +165,7 @@ static int __spu_trap_data_seg(struct spu *spu, unsigned long ea)
struct spu_slb slb;
int psize;
- pr_debug("%s\n", __FUNCTION__);
+ pr_debug("%s\n", __func__);
slb.esid = (ea & ESID_MASK) | SLB_ESID_V;
@@ -215,7 +215,7 @@ static int __spu_trap_data_seg(struct spu *spu, unsigned long ea)
extern int hash_page(unsigned long ea, unsigned long access, unsigned long trap); //XXX
static int __spu_trap_data_map(struct spu *spu, unsigned long ea, u64 dsisr)
{
- pr_debug("%s, %lx, %lx\n", __FUNCTION__, dsisr, ea);
+ pr_debug("%s, %lx, %lx\n", __func__, dsisr, ea);
/* Handle kernel space hash faults immediately.
User hash faults need to be deferred to process context. */
@@ -351,7 +351,7 @@ spu_irq_class_1(int irq, void *data)
__spu_trap_data_seg(spu, dar);
spin_unlock(&spu->register_lock);
- pr_debug("%s: %lx %lx %lx %lx\n", __FUNCTION__, mask, stat,
+ pr_debug("%s: %lx %lx %lx %lx\n", __func__, mask, stat,
dar, dsisr);
if (stat & CLASS1_STORAGE_FAULT_INTR)
@@ -726,7 +726,7 @@ static int __init init_spu_base(void)
if (ret < 0) {
printk(KERN_WARNING "%s: Error initializing spus\n",
- __FUNCTION__);
+ __func__);
goto out_unregister_sysdev_class;
}
diff --git a/arch/powerpc/platforms/cell/spu_callbacks.c b/arch/powerpc/platforms/cell/spu_callbacks.c
index dceb8b6a938..19f6bfdbb93 100644
--- a/arch/powerpc/platforms/cell/spu_callbacks.c
+++ b/arch/powerpc/platforms/cell/spu_callbacks.c
@@ -54,7 +54,7 @@ long spu_sys_callback(struct spu_syscall_block *s)
long (*syscall)(u64 a1, u64 a2, u64 a3, u64 a4, u64 a5, u64 a6);
if (s->nr_ret >= ARRAY_SIZE(spu_syscall_table)) {
- pr_debug("%s: invalid syscall #%ld", __FUNCTION__, s->nr_ret);
+ pr_debug("%s: invalid syscall #%ld", __func__, s->nr_ret);
return -ENOSYS;
}
diff --git a/arch/powerpc/platforms/cell/spu_manage.c b/arch/powerpc/platforms/cell/spu_manage.c
index d351bdebf5f..4c506c1463c 100644
--- a/arch/powerpc/platforms/cell/spu_manage.c
+++ b/arch/powerpc/platforms/cell/spu_manage.c
@@ -92,7 +92,7 @@ static int __init spu_map_interrupts_old(struct spu *spu,
tmp = of_get_property(np->parent->parent, "node-id", NULL);
if (!tmp) {
- printk(KERN_WARNING "%s: can't find node-id\n", __FUNCTION__);
+ printk(KERN_WARNING "%s: can't find node-id\n", __func__);
nid = spu->node;
} else
nid = tmp[0];
@@ -296,7 +296,7 @@ static int __init of_enumerate_spus(int (*fn)(void *data))
ret = fn(node);
if (ret) {
printk(KERN_WARNING "%s: Error initializing %s\n",
- __FUNCTION__, node->name);
+ __func__, node->name);
break;
}
n++;
@@ -327,7 +327,7 @@ static int __init of_create_spu(struct spu *spu, void *data)
if (!legacy_map) {
legacy_map = 1;
printk(KERN_WARNING "%s: Legacy device tree found, "
- "trying to map old style\n", __FUNCTION__);
+ "trying to map old style\n", __func__);
}
ret = spu_map_device_old(spu);
if (ret) {
@@ -342,7 +342,7 @@ static int __init of_create_spu(struct spu *spu, void *data)
if (!legacy_irq) {
legacy_irq = 1;
printk(KERN_WARNING "%s: Legacy device tree found, "
- "trying old style irq\n", __FUNCTION__);
+ "trying old style irq\n", __func__);
}
ret = spu_map_interrupts_old(spu, spe);
if (ret) {
diff --git a/arch/powerpc/platforms/cell/spufs/file.c b/arch/powerpc/platforms/cell/spufs/file.c
index f7a7e8635fb..f14d3a0a861 100644
--- a/arch/powerpc/platforms/cell/spufs/file.c
+++ b/arch/powerpc/platforms/cell/spufs/file.c
@@ -1556,7 +1556,7 @@ void spufs_mfc_callback(struct spu *spu)
wake_up_all(&ctx->mfc_wq);
- pr_debug("%s %s\n", __FUNCTION__, spu->name);
+ pr_debug("%s %s\n", __func__, spu->name);
if (ctx->mfc_fasync) {
u32 free_elements, tagstatus;
unsigned int mask;
@@ -1790,7 +1790,7 @@ static unsigned int spufs_mfc_poll(struct file *file,poll_table *wait)
if (tagstatus & ctx->tagwait)
mask |= POLLIN | POLLRDNORM;
- pr_debug("%s: free %d tagstatus %d tagwait %d\n", __FUNCTION__,
+ pr_debug("%s: free %d tagstatus %d tagwait %d\n", __func__,
free_elements, tagstatus, ctx->tagwait);
return mask;
diff --git a/arch/powerpc/platforms/cell/spufs/run.c b/arch/powerpc/platforms/cell/spufs/run.c
index cac69e11677..5986d08115a 100644
--- a/arch/powerpc/platforms/cell/spufs/run.c
+++ b/arch/powerpc/platforms/cell/spufs/run.c
@@ -98,7 +98,7 @@ static int spu_setup_isolated(struct spu_context *ctx)
!= MFC_CNTL_PURGE_DMA_COMPLETE) {
if (time_after(jiffies, timeout)) {
printk(KERN_ERR "%s: timeout flushing MFC DMA queue\n",
- __FUNCTION__);
+ __func__);
ret = -EIO;
goto out;
}
@@ -124,7 +124,7 @@ static int spu_setup_isolated(struct spu_context *ctx)
status_loading) {
if (time_after(jiffies, timeout)) {
printk(KERN_ERR "%s: timeout waiting for loader\n",
- __FUNCTION__);
+ __func__);
ret = -EIO;
goto out_drop_priv;
}
@@ -134,7 +134,7 @@ static int spu_setup_isolated(struct spu_context *ctx)
if (!(status & SPU_STATUS_RUNNING)) {
/* If isolated LOAD has failed: run SPU, we will get a stop-and
* signal later. */
- pr_debug("%s: isolated LOAD failed\n", __FUNCTION__);
+ pr_debug("%s: isolated LOAD failed\n", __func__);
ctx->ops->runcntl_write(ctx, SPU_RUNCNTL_RUNNABLE);
ret = -EACCES;
goto out_drop_priv;
@@ -142,7 +142,7 @@ static int spu_setup_isolated(struct spu_context *ctx)
if (!(status & SPU_STATUS_ISOLATED_STATE)) {
/* This isn't allowed by the CBEA, but check anyway */
- pr_debug("%s: SPU fell out of isolated mode?\n", __FUNCTION__);
+ pr_debug("%s: SPU fell out of isolated mode?\n", __func__);
ctx->ops->runcntl_write(ctx, SPU_RUNCNTL_STOP);
ret = -EINVAL;
goto out_drop_priv;
@@ -282,7 +282,7 @@ static int spu_handle_restartsys(struct spu_context *ctx, long *spu_ret,
break;
default:
printk(KERN_WARNING "%s: unexpected return code %ld\n",
- __FUNCTION__, *spu_ret);
+ __func__, *spu_ret);
ret = 0;
}
return ret;