From 0887309589824fb1c3744c69a330c99c369124a0 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Mon, 23 Apr 2007 21:08:06 +0200 Subject: [POWERPC] spufs: use cancel_rearming_delayed_workqueue when stopping spu contexts The scheduler workqueue may rearm itself and deadlock when we try to stop it. Put a flag in place to avoid skip the work if we're tearing down the context. Signed-off-by: Christoph Hellwig Signed-off-by: Arnd Bergmann --- arch/powerpc/platforms/cell/spufs/sched.c | 25 ++++++++++++++++++++++--- arch/powerpc/platforms/cell/spufs/spufs.h | 2 +- 2 files changed, 23 insertions(+), 4 deletions(-) (limited to 'arch') diff --git a/arch/powerpc/platforms/cell/spufs/sched.c b/arch/powerpc/platforms/cell/spufs/sched.c index c9561582ce2..003e330fc76 100644 --- a/arch/powerpc/platforms/cell/spufs/sched.c +++ b/arch/powerpc/platforms/cell/spufs/sched.c @@ -71,14 +71,25 @@ static inline int node_allowed(int node) void spu_start_tick(struct spu_context *ctx) { - if (ctx->policy == SCHED_RR) + if (ctx->policy == SCHED_RR) { + /* + * Make sure the exiting bit is cleared. + */ + clear_bit(SPU_SCHED_EXITING, &ctx->sched_flags); queue_delayed_work(spu_sched_wq, &ctx->sched_work, SPU_TIMESLICE); + } } void spu_stop_tick(struct spu_context *ctx) { - if (ctx->policy == SCHED_RR) + if (ctx->policy == SCHED_RR) { + /* + * While the work can be rearming normally setting this flag + * makes sure it does not rearm itself anymore. + */ + set_bit(SPU_SCHED_EXITING, &ctx->sched_flags); cancel_delayed_work(&ctx->sched_work); + } } void spu_sched_tick(struct work_struct *work) @@ -88,6 +99,14 @@ void spu_sched_tick(struct work_struct *work) struct spu *spu; int rearm = 1; + /* + * If this context is being stopped avoid rescheduling from the + * scheduler tick because we would block on the state_mutex. + * The caller will yield the spu later on anyway. + */ + if (test_bit(SPU_SCHED_EXITING, &ctx->sched_flags)) + return; + mutex_lock(&ctx->state_mutex); spu = ctx->spu; if (spu) { @@ -377,7 +396,7 @@ static struct spu *find_victim(struct spu_context *ctx) * @ctx: spu context to schedule * @flags: flags (currently ignored) * - * Tries to find a free spu to run @ctx. If no free spu is availble + * Tries to find a free spu to run @ctx. If no free spu is available * add the context to the runqueue so it gets woken up once an spu * is available. */ diff --git a/arch/powerpc/platforms/cell/spufs/spufs.h b/arch/powerpc/platforms/cell/spufs/spufs.h index 5c4e47d69d7..f418378abdf 100644 --- a/arch/powerpc/platforms/cell/spufs/spufs.h +++ b/arch/powerpc/platforms/cell/spufs/spufs.h @@ -41,7 +41,7 @@ struct spu_gang; /* ctx->sched_flags */ enum { - SPU_SCHED_WAKE = 0, /* currently unused */ + SPU_SCHED_EXITING = 0, }; struct spu_context { -- cgit v1.2.3 From 43c2bbd932b66403688f3d812065d82f8fb8f4b3 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Mon, 23 Apr 2007 21:08:07 +0200 Subject: [POWERPC] spufs: clear mapping pointers after last close Make sure the pointers to various mappings are cleared once the last user stopped using them. This avoids accessing freed memory when tearing down the gang directory aswell as optimizing away pte invalidations if no one uses these. Signed-off-by: Christoph Hellwig Signed-off-by: Arnd Bergmann --- arch/powerpc/platforms/cell/spufs/context.c | 1 + arch/powerpc/platforms/cell/spufs/file.c | 147 ++++++++++++++++++++++++++-- arch/powerpc/platforms/cell/spufs/inode.c | 1 + arch/powerpc/platforms/cell/spufs/spufs.h | 12 ++- 4 files changed, 148 insertions(+), 13 deletions(-) (limited to 'arch') diff --git a/arch/powerpc/platforms/cell/spufs/context.c b/arch/powerpc/platforms/cell/spufs/context.c index 04ad2e364e9..b3954aba424 100644 --- a/arch/powerpc/platforms/cell/spufs/context.c +++ b/arch/powerpc/platforms/cell/spufs/context.c @@ -41,6 +41,7 @@ struct spu_context *alloc_spu_context(struct spu_gang *gang) goto out_free; } spin_lock_init(&ctx->mmio_lock); + spin_lock_init(&ctx->mapping_lock); kref_init(&ctx->kref); mutex_init(&ctx->state_mutex); init_MUTEX(&ctx->run_sema); diff --git a/arch/powerpc/platforms/cell/spufs/file.c b/arch/powerpc/platforms/cell/spufs/file.c index 505266a568d..deb340e6e0a 100644 --- a/arch/powerpc/platforms/cell/spufs/file.c +++ b/arch/powerpc/platforms/cell/spufs/file.c @@ -44,8 +44,26 @@ spufs_mem_open(struct inode *inode, struct file *file) { struct spufs_inode_info *i = SPUFS_I(inode); struct spu_context *ctx = i->i_ctx; + + spin_lock(&ctx->mapping_lock); file->private_data = ctx; - ctx->local_store = inode->i_mapping; + if (!i->i_openers++) + ctx->local_store = inode->i_mapping; + spin_unlock(&ctx->mapping_lock); + smp_wmb(); + return 0; +} + +static int +spufs_mem_release(struct inode *inode, struct file *file) +{ + struct spufs_inode_info *i = SPUFS_I(inode); + struct spu_context *ctx = i->i_ctx; + + spin_lock(&ctx->mapping_lock); + if (!--i->i_openers) + ctx->local_store = NULL; + spin_unlock(&ctx->mapping_lock); smp_wmb(); return 0; } @@ -149,6 +167,7 @@ spufs_mem_mmap(struct file *file, struct vm_area_struct *vma) static const struct file_operations spufs_mem_fops = { .open = spufs_mem_open, + .release = spufs_mem_release, .read = spufs_mem_read, .write = spufs_mem_write, .llseek = generic_file_llseek, @@ -238,16 +257,35 @@ static int spufs_cntl_open(struct inode *inode, struct file *file) struct spufs_inode_info *i = SPUFS_I(inode); struct spu_context *ctx = i->i_ctx; + spin_lock(&ctx->mapping_lock); file->private_data = ctx; - ctx->cntl = inode->i_mapping; + if (!i->i_openers++) + ctx->cntl = inode->i_mapping; + spin_unlock(&ctx->mapping_lock); smp_wmb(); return simple_attr_open(inode, file, spufs_cntl_get, spufs_cntl_set, "0x%08lx"); } +static int +spufs_cntl_release(struct inode *inode, struct file *file) +{ + struct spufs_inode_info *i = SPUFS_I(inode); + struct spu_context *ctx = i->i_ctx; + + simple_attr_close(inode, file); + + spin_lock(&ctx->mapping_lock); + if (!--i->i_openers) + ctx->cntl = NULL; + spin_unlock(&ctx->mapping_lock); + smp_wmb(); + return 0; +} + static const struct file_operations spufs_cntl_fops = { .open = spufs_cntl_open, - .release = simple_attr_close, + .release = spufs_cntl_release, .read = simple_attr_read, .write = simple_attr_write, .mmap = spufs_cntl_mmap, @@ -723,12 +761,30 @@ static int spufs_signal1_open(struct inode *inode, struct file *file) { struct spufs_inode_info *i = SPUFS_I(inode); struct spu_context *ctx = i->i_ctx; + + spin_lock(&ctx->mapping_lock); file->private_data = ctx; - ctx->signal1 = inode->i_mapping; + if (!i->i_openers++) + ctx->signal1 = inode->i_mapping; + spin_unlock(&ctx->mapping_lock); smp_wmb(); return nonseekable_open(inode, file); } +static int +spufs_signal1_release(struct inode *inode, struct file *file) +{ + struct spufs_inode_info *i = SPUFS_I(inode); + struct spu_context *ctx = i->i_ctx; + + spin_lock(&ctx->mapping_lock); + if (!--i->i_openers) + ctx->signal1 = NULL; + spin_unlock(&ctx->mapping_lock); + smp_wmb(); + return 0; +} + static ssize_t __spufs_signal1_read(struct spu_context *ctx, char __user *buf, size_t len, loff_t *pos) { @@ -821,6 +877,7 @@ static int spufs_signal1_mmap(struct file *file, struct vm_area_struct *vma) static const struct file_operations spufs_signal1_fops = { .open = spufs_signal1_open, + .release = spufs_signal1_release, .read = spufs_signal1_read, .write = spufs_signal1_write, .mmap = spufs_signal1_mmap, @@ -830,12 +887,30 @@ static int spufs_signal2_open(struct inode *inode, struct file *file) { struct spufs_inode_info *i = SPUFS_I(inode); struct spu_context *ctx = i->i_ctx; + + spin_lock(&ctx->mapping_lock); file->private_data = ctx; - ctx->signal2 = inode->i_mapping; + if (!i->i_openers++) + ctx->signal2 = inode->i_mapping; + spin_unlock(&ctx->mapping_lock); smp_wmb(); return nonseekable_open(inode, file); } +static int +spufs_signal2_release(struct inode *inode, struct file *file) +{ + struct spufs_inode_info *i = SPUFS_I(inode); + struct spu_context *ctx = i->i_ctx; + + spin_lock(&ctx->mapping_lock); + if (!--i->i_openers) + ctx->signal2 = NULL; + spin_unlock(&ctx->mapping_lock); + smp_wmb(); + return 0; +} + static ssize_t __spufs_signal2_read(struct spu_context *ctx, char __user *buf, size_t len, loff_t *pos) { @@ -932,6 +1007,7 @@ static int spufs_signal2_mmap(struct file *file, struct vm_area_struct *vma) static const struct file_operations spufs_signal2_fops = { .open = spufs_signal2_open, + .release = spufs_signal2_release, .read = spufs_signal2_read, .write = spufs_signal2_write, .mmap = spufs_signal2_mmap, @@ -1031,13 +1107,32 @@ static int spufs_mss_open(struct inode *inode, struct file *file) struct spu_context *ctx = i->i_ctx; file->private_data = i->i_ctx; - ctx->mss = inode->i_mapping; + + spin_lock(&ctx->mapping_lock); + if (!i->i_openers++) + ctx->mss = inode->i_mapping; + spin_unlock(&ctx->mapping_lock); smp_wmb(); return nonseekable_open(inode, file); } +static int +spufs_mss_release(struct inode *inode, struct file *file) +{ + struct spufs_inode_info *i = SPUFS_I(inode); + struct spu_context *ctx = i->i_ctx; + + spin_lock(&ctx->mapping_lock); + if (!--i->i_openers) + ctx->mss = NULL; + spin_unlock(&ctx->mapping_lock); + smp_wmb(); + return 0; +} + static const struct file_operations spufs_mss_fops = { .open = spufs_mss_open, + .release = spufs_mss_release, .mmap = spufs_mss_mmap, }; @@ -1072,14 +1167,32 @@ static int spufs_psmap_open(struct inode *inode, struct file *file) struct spufs_inode_info *i = SPUFS_I(inode); struct spu_context *ctx = i->i_ctx; + spin_lock(&ctx->mapping_lock); file->private_data = i->i_ctx; - ctx->psmap = inode->i_mapping; + if (!i->i_openers++) + ctx->psmap = inode->i_mapping; + spin_unlock(&ctx->mapping_lock); smp_wmb(); return nonseekable_open(inode, file); } +static int +spufs_psmap_release(struct inode *inode, struct file *file) +{ + struct spufs_inode_info *i = SPUFS_I(inode); + struct spu_context *ctx = i->i_ctx; + + spin_lock(&ctx->mapping_lock); + if (!--i->i_openers) + ctx->psmap = NULL; + spin_unlock(&ctx->mapping_lock); + smp_wmb(); + return 0; +} + static const struct file_operations spufs_psmap_fops = { .open = spufs_psmap_open, + .release = spufs_psmap_release, .mmap = spufs_psmap_mmap, }; @@ -1126,12 +1239,29 @@ static int spufs_mfc_open(struct inode *inode, struct file *file) if (atomic_read(&inode->i_count) != 1) return -EBUSY; + spin_lock(&ctx->mapping_lock); file->private_data = ctx; - ctx->mfc = inode->i_mapping; + if (!i->i_openers++) + ctx->mfc = inode->i_mapping; + spin_unlock(&ctx->mapping_lock); smp_wmb(); return nonseekable_open(inode, file); } +static int +spufs_mfc_release(struct inode *inode, struct file *file) +{ + struct spufs_inode_info *i = SPUFS_I(inode); + struct spu_context *ctx = i->i_ctx; + + spin_lock(&ctx->mapping_lock); + if (!--i->i_openers) + ctx->mfc = NULL; + spin_unlock(&ctx->mapping_lock); + smp_wmb(); + return 0; +} + /* interrupt-level mfc callback function. */ void spufs_mfc_callback(struct spu *spu) { @@ -1399,6 +1529,7 @@ static int spufs_mfc_fasync(int fd, struct file *file, int on) static const struct file_operations spufs_mfc_fops = { .open = spufs_mfc_open, + .release = spufs_mfc_release, .read = spufs_mfc_read, .write = spufs_mfc_write, .poll = spufs_mfc_poll, diff --git a/arch/powerpc/platforms/cell/spufs/inode.c b/arch/powerpc/platforms/cell/spufs/inode.c index e3f4ee97c91..423596a6b99 100644 --- a/arch/powerpc/platforms/cell/spufs/inode.c +++ b/arch/powerpc/platforms/cell/spufs/inode.c @@ -54,6 +54,7 @@ spufs_alloc_inode(struct super_block *sb) ei->i_gang = NULL; ei->i_ctx = NULL; + ei->i_openers = 0; return &ei->vfs_inode; } diff --git a/arch/powerpc/platforms/cell/spufs/spufs.h b/arch/powerpc/platforms/cell/spufs/spufs.h index f418378abdf..0fb366d9d25 100644 --- a/arch/powerpc/platforms/cell/spufs/spufs.h +++ b/arch/powerpc/platforms/cell/spufs/spufs.h @@ -50,11 +50,12 @@ struct spu_context { spinlock_t mmio_lock; /* protects mmio access */ struct address_space *local_store; /* local store mapping. */ struct address_space *mfc; /* 'mfc' area mappings. */ - struct address_space *cntl; /* 'control' area mappings. */ - struct address_space *signal1; /* 'signal1' area mappings. */ - struct address_space *signal2; /* 'signal2' area mappings. */ - struct address_space *mss; /* 'mss' area mappings. */ - struct address_space *psmap; /* 'psmap' area mappings. */ + struct address_space *cntl; /* 'control' area mappings. */ + struct address_space *signal1; /* 'signal1' area mappings. */ + struct address_space *signal2; /* 'signal2' area mappings. */ + struct address_space *mss; /* 'mss' area mappings. */ + struct address_space *psmap; /* 'psmap' area mappings. */ + spinlock_t mapping_lock; u64 object_id; /* user space pointer for oprofile */ enum { SPU_STATE_RUNNABLE, SPU_STATE_SAVED } state; @@ -149,6 +150,7 @@ struct spufs_inode_info { struct spu_context *i_ctx; struct spu_gang *i_gang; struct inode vfs_inode; + int i_openers; }; #define SPUFS_I(inode) \ container_of(inode, struct spufs_inode_info, vfs_inode) -- cgit v1.2.3 From b3e76cc3244ac139fc75750c5af9edbb9f191a10 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Mon, 23 Apr 2007 21:08:08 +0200 Subject: [POWERPC] spu sched: ensure preempted threads are put back on the runqueue To not lose a spu thread we need to make sure it always gets put back on the runqueue. Signed-off-by: Christoph Hellwig Acked-by: Jeremy Kerr Signed-off-by: Arnd Bergmann --- arch/powerpc/platforms/cell/spufs/sched.c | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-) (limited to 'arch') diff --git a/arch/powerpc/platforms/cell/spufs/sched.c b/arch/powerpc/platforms/cell/spufs/sched.c index 003e330fc76..5149dff65c9 100644 --- a/arch/powerpc/platforms/cell/spufs/sched.c +++ b/arch/powerpc/platforms/cell/spufs/sched.c @@ -97,7 +97,7 @@ void spu_sched_tick(struct work_struct *work) struct spu_context *ctx = container_of(work, struct spu_context, sched_work.work); struct spu *spu; - int rearm = 1; + int preempted = 0; /* * If this context is being stopped avoid rescheduling from the @@ -113,12 +113,19 @@ void spu_sched_tick(struct work_struct *work) int best = sched_find_first_bit(spu_prio->bitmap); if (best <= ctx->prio) { spu_deactivate(ctx); - rearm = 0; + preempted = 1; } } mutex_unlock(&ctx->state_mutex); - if (rearm) + if (preempted) { + /* + * We need to break out of the wait loop in spu_run manually + * to ensure this context gets put on the runqueue again + * ASAP. + */ + wake_up(&ctx->stop_wq); + } else spu_start_tick(ctx); } -- cgit v1.2.3 From e097b513285e616215b23af234d127298bb8d89a Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Mon, 23 Apr 2007 21:08:09 +0200 Subject: [POWERPC] spu sched: ensure preempted threads are put back on the runqueue, part2 To not lose a spu thread we need to make sure it always gets put back on the runqueue. In find_victim aswell as in the scheduler tick as done in the previous patch. Signed-off-by: Christoph Hellwig Signed-off-by: Arnd Bergmann --- arch/powerpc/platforms/cell/spufs/sched.c | 6 ++++++ 1 file changed, 6 insertions(+) (limited to 'arch') diff --git a/arch/powerpc/platforms/cell/spufs/sched.c b/arch/powerpc/platforms/cell/spufs/sched.c index 5149dff65c9..405a0555d75 100644 --- a/arch/powerpc/platforms/cell/spufs/sched.c +++ b/arch/powerpc/platforms/cell/spufs/sched.c @@ -391,6 +391,12 @@ static struct spu *find_victim(struct spu_context *ctx) } spu_unbind_context(spu, victim); mutex_unlock(&victim->state_mutex); + /* + * We need to break out of the wait loop in spu_run + * manually to ensure this context gets put on the + * runqueue again ASAP. + */ + wake_up(&victim->stop_wq); return spu; } } -- cgit v1.2.3 From 390c53430498c9973e015432806edd53b2efe6c6 Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Mon, 23 Apr 2007 21:08:10 +0200 Subject: [POWERPC] spufs: add memory barriers after set_bit set_bit does not guarantee ordering on powerpc, so using it for communication between threads requires explicit mb() calls. Signed-off-by: Arnd Bergmann --- arch/powerpc/platforms/cell/spufs/sched.c | 3 +++ 1 file changed, 3 insertions(+) (limited to 'arch') diff --git a/arch/powerpc/platforms/cell/spufs/sched.c b/arch/powerpc/platforms/cell/spufs/sched.c index 405a0555d75..1582d764523 100644 --- a/arch/powerpc/platforms/cell/spufs/sched.c +++ b/arch/powerpc/platforms/cell/spufs/sched.c @@ -76,6 +76,7 @@ void spu_start_tick(struct spu_context *ctx) * Make sure the exiting bit is cleared. */ clear_bit(SPU_SCHED_EXITING, &ctx->sched_flags); + mb(); queue_delayed_work(spu_sched_wq, &ctx->sched_work, SPU_TIMESLICE); } } @@ -88,6 +89,7 @@ void spu_stop_tick(struct spu_context *ctx) * makes sure it does not rearm itself anymore. */ set_bit(SPU_SCHED_EXITING, &ctx->sched_flags); + mb(); cancel_delayed_work(&ctx->sched_work); } } @@ -239,6 +241,7 @@ static void spu_add_to_rq(struct spu_context *ctx) spin_lock(&spu_prio->runq_lock); list_add_tail(&ctx->rq, &spu_prio->runq[ctx->prio]); set_bit(ctx->prio, spu_prio->bitmap); + mb(); spin_unlock(&spu_prio->runq_lock); } -- cgit v1.2.3 From a475c2f43520cb095452201da57395000cfeb94c Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Mon, 23 Apr 2007 21:08:11 +0200 Subject: [POWERPC] spufs: remove woken threads from the runqueue early A single context should only be woken once, and we should not have more wakeups for a given priority than the number of contexts on that runqueue position. Also add some asserts to trap future problems in this area more easily. Signed-off-by: Christoph Hellwig Signed-off-by: Arnd Bergmann --- arch/powerpc/platforms/cell/spufs/context.c | 2 ++ arch/powerpc/platforms/cell/spufs/sched.c | 44 +++++++++++------------------ 2 files changed, 19 insertions(+), 27 deletions(-) (limited to 'arch') diff --git a/arch/powerpc/platforms/cell/spufs/context.c b/arch/powerpc/platforms/cell/spufs/context.c index b3954aba424..065147fb1cc 100644 --- a/arch/powerpc/platforms/cell/spufs/context.c +++ b/arch/powerpc/platforms/cell/spufs/context.c @@ -52,6 +52,7 @@ struct spu_context *alloc_spu_context(struct spu_gang *gang) ctx->state = SPU_STATE_SAVED; ctx->ops = &spu_backing_ops; ctx->owner = get_task_mm(current); + INIT_LIST_HEAD(&ctx->rq); if (gang) spu_gang_add_ctx(gang, ctx); ctx->rt_priority = current->rt_priority; @@ -76,6 +77,7 @@ void destroy_spu_context(struct kref *kref) spu_fini_csa(&ctx->csa); if (ctx->gang) spu_gang_remove_ctx(ctx->gang, ctx); + BUG_ON(!list_empty(&ctx->rq)); kfree(ctx); } diff --git a/arch/powerpc/platforms/cell/spufs/sched.c b/arch/powerpc/platforms/cell/spufs/sched.c index 1582d764523..876828cc95a 100644 --- a/arch/powerpc/platforms/cell/spufs/sched.c +++ b/arch/powerpc/platforms/cell/spufs/sched.c @@ -245,6 +245,14 @@ static void spu_add_to_rq(struct spu_context *ctx) spin_unlock(&spu_prio->runq_lock); } +static void __spu_del_from_rq(struct spu_context *ctx, int prio) +{ + if (!list_empty(&ctx->rq)) + list_del_init(&ctx->rq); + if (list_empty(&spu_prio->runq[prio])) + clear_bit(ctx->prio, spu_prio->bitmap); +} + /** * spu_del_from_rq - remove a context from the runqueue * @ctx: context to remove @@ -252,33 +260,10 @@ static void spu_add_to_rq(struct spu_context *ctx) static void spu_del_from_rq(struct spu_context *ctx) { spin_lock(&spu_prio->runq_lock); - list_del_init(&ctx->rq); - if (list_empty(&spu_prio->runq[ctx->prio])) - clear_bit(ctx->prio, spu_prio->bitmap); + __spu_del_from_rq(ctx, ctx->prio); spin_unlock(&spu_prio->runq_lock); } -/** - * spu_grab_context - remove one context from the runqueue - * @prio: priority of the context to be removed - * - * This function removes one context from the runqueue for priority @prio. - * If there is more than one context with the given priority the first - * task on the runqueue will be taken. - * - * Returns the spu_context it just removed. - * - * Must be called with spu_prio->runq_lock held. - */ -static struct spu_context *spu_grab_context(int prio) -{ - struct list_head *rq = &spu_prio->runq[prio]; - - if (list_empty(rq)) - return NULL; - return list_entry(rq->next, struct spu_context, rq); -} - static void spu_prio_wait(struct spu_context *ctx) { DEFINE_WAIT(wait); @@ -309,9 +294,14 @@ static void spu_reschedule(struct spu *spu) spin_lock(&spu_prio->runq_lock); best = sched_find_first_bit(spu_prio->bitmap); if (best < MAX_PRIO) { - struct spu_context *ctx = spu_grab_context(best); - if (ctx) - wake_up(&ctx->stop_wq); + struct list_head *rq = &spu_prio->runq[best]; + struct spu_context *ctx; + + BUG_ON(list_empty(rq)); + + ctx = list_entry(rq->next, struct spu_context, rq); + __spu_del_from_rq(ctx, best); + wake_up(&ctx->stop_wq); } spin_unlock(&spu_prio->runq_lock); } -- cgit v1.2.3 From 7ec18ab923a2e377ecb05c74a2d38f457f79950f Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Mon, 23 Apr 2007 21:08:12 +0200 Subject: [POWERPC] spufs: streamline locking for isolated spu setup For quite a while now spu state is protected by a simple mutex instead of the old rw_semaphore, and this means we can simplify the locking around spu_setup_isolated a lot. Instead of doing an spu_release before entering spu_setup_isolated and then calling the complicated spu_acquire_exclusive we can now simply enter the function locked an in guaranteed runnable state, so that the only bit of spu_acquire_exclusive that's left is the call to spu_unmap_mappings. Similarly there's no more need to unlock and reacquire the state_mutex when spu_setup_isolated is done, but we can always return with the lock held and only drop it in spu_run_init in the failure case. Signed-off-by: Christoph Hellwig Signed-off-by: Arnd Bergmann --- arch/powerpc/platforms/cell/spufs/context.c | 40 ----------------------------- arch/powerpc/platforms/cell/spufs/run.c | 30 ++++++++++++---------- arch/powerpc/platforms/cell/spufs/spufs.h | 1 - 3 files changed, 16 insertions(+), 55 deletions(-) (limited to 'arch') diff --git a/arch/powerpc/platforms/cell/spufs/context.c b/arch/powerpc/platforms/cell/spufs/context.c index 065147fb1cc..ce17a284718 100644 --- a/arch/powerpc/platforms/cell/spufs/context.c +++ b/arch/powerpc/platforms/cell/spufs/context.c @@ -121,46 +121,6 @@ void spu_unmap_mappings(struct spu_context *ctx) unmap_mapping_range(ctx->psmap, 0, 0x20000, 1); } -/** - * spu_acquire_exclusive - lock spu contex and protect against userspace access - * @ctx: spu contex to lock - * - * Note: - * Returns 0 and with the context locked on success - * Returns negative error and with the context _unlocked_ on failure. - */ -int spu_acquire_exclusive(struct spu_context *ctx) -{ - int ret = -EINVAL; - - spu_acquire(ctx); - /* - * Context is about to be freed, so we can't acquire it anymore. - */ - if (!ctx->owner) - goto out_unlock; - - if (ctx->state == SPU_STATE_SAVED) { - ret = spu_activate(ctx, 0); - if (ret) - goto out_unlock; - } else { - /* - * We need to exclude userspace access to the context. - * - * To protect against memory access we invalidate all ptes - * and make sure the pagefault handlers block on the mutex. - */ - spu_unmap_mappings(ctx); - } - - return 0; - - out_unlock: - spu_release(ctx); - return ret; -} - /** * spu_acquire_runnable - lock spu contex and make sure it is in runnable state * @ctx: spu contex to lock diff --git a/arch/powerpc/platforms/cell/spufs/run.c b/arch/powerpc/platforms/cell/spufs/run.c index f95a611ca36..7df5202c9a9 100644 --- a/arch/powerpc/platforms/cell/spufs/run.c +++ b/arch/powerpc/platforms/cell/spufs/run.c @@ -63,13 +63,18 @@ static int spu_setup_isolated(struct spu_context *ctx) const u32 status_loading = SPU_STATUS_RUNNING | SPU_STATUS_ISOLATED_STATE | SPU_STATUS_ISOLATED_LOAD_STATUS; + ret = -ENODEV; if (!isolated_loader) - return -ENODEV; - - ret = spu_acquire_exclusive(ctx); - if (ret) goto out; + /* + * We need to exclude userspace access to the context. + * + * To protect against memory access we invalidate all ptes + * and make sure the pagefault handlers block on the mutex. + */ + spu_unmap_mappings(ctx); + mfc_cntl = &ctx->spu->priv2->mfc_control_RW; /* purge the MFC DMA queue to ensure no spurious accesses before we @@ -82,7 +87,7 @@ static int spu_setup_isolated(struct spu_context *ctx) printk(KERN_ERR "%s: timeout flushing MFC DMA queue\n", __FUNCTION__); ret = -EIO; - goto out_unlock; + goto out; } cond_resched(); } @@ -119,12 +124,15 @@ static int spu_setup_isolated(struct spu_context *ctx) pr_debug("%s: isolated LOAD failed\n", __FUNCTION__); ctx->ops->runcntl_write(ctx, SPU_RUNCNTL_RUNNABLE); ret = -EACCES; + goto out_drop_priv; + } - } else if (!(status & SPU_STATUS_ISOLATED_STATE)) { + if (!(status & SPU_STATUS_ISOLATED_STATE)) { /* This isn't allowed by the CBEA, but check anyway */ pr_debug("%s: SPU fell out of isolated mode?\n", __FUNCTION__); ctx->ops->runcntl_write(ctx, SPU_RUNCNTL_STOP); ret = -EINVAL; + goto out_drop_priv; } out_drop_priv: @@ -132,8 +140,6 @@ out_drop_priv: sr1 |= MFC_STATE1_PROBLEM_STATE_MASK; spu_mfc_sr1_set(ctx->spu, sr1); -out_unlock: - spu_release(ctx); out: return ret; } @@ -149,13 +155,9 @@ static inline int spu_run_init(struct spu_context *ctx, u32 * npc) if (ctx->flags & SPU_CREATE_ISOLATE) { if (!(ctx->ops->status_read(ctx) & SPU_STATUS_ISOLATED_STATE)) { - /* Need to release ctx, because spu_setup_isolated will - * acquire it exclusively. - */ - spu_release(ctx); ret = spu_setup_isolated(ctx); - if (!ret) - ret = spu_acquire_runnable(ctx, 0); + if (ret) + spu_release(ctx); } /* if userspace has set the runcntrl register (eg, to issue an diff --git a/arch/powerpc/platforms/cell/spufs/spufs.h b/arch/powerpc/platforms/cell/spufs/spufs.h index 0fb366d9d25..cae2ad435b0 100644 --- a/arch/powerpc/platforms/cell/spufs/spufs.h +++ b/arch/powerpc/platforms/cell/spufs/spufs.h @@ -192,7 +192,6 @@ void spu_unmap_mappings(struct spu_context *ctx); void spu_forget(struct spu_context *ctx); int spu_acquire_runnable(struct spu_context *ctx, unsigned long flags); void spu_acquire_saved(struct spu_context *ctx); -int spu_acquire_exclusive(struct spu_context *ctx); int spu_activate(struct spu_context *ctx, unsigned long flags); void spu_deactivate(struct spu_context *ctx); -- cgit v1.2.3 From 4e0f4ed0df71013290cd2a01f7b84264f7b99678 Mon Sep 17 00:00:00 2001 From: Luke Browning Date: Mon, 23 Apr 2007 21:08:13 +0200 Subject: [POWERPC] spu sched: make addition to stop_wq and runque atomic vs wakeup Addition to stop_wq needs to happen before adding to the runqeueue and under the same lock so that we don't have a race window for a lost wake up in the spu scheduler. Signed-off-by: Luke Browning Signed-off-by: Christoph Hellwig Signed-off-by: Arnd Bergmann --- arch/powerpc/platforms/cell/spufs/sched.c | 38 +++++++++++++------------------ 1 file changed, 16 insertions(+), 22 deletions(-) (limited to 'arch') diff --git a/arch/powerpc/platforms/cell/spufs/sched.c b/arch/powerpc/platforms/cell/spufs/sched.c index 876828cc95a..91030b8abdc 100644 --- a/arch/powerpc/platforms/cell/spufs/sched.c +++ b/arch/powerpc/platforms/cell/spufs/sched.c @@ -236,44 +236,40 @@ static void spu_unbind_context(struct spu *spu, struct spu_context *ctx) * spu_add_to_rq - add a context to the runqueue * @ctx: context to add */ -static void spu_add_to_rq(struct spu_context *ctx) +static void __spu_add_to_rq(struct spu_context *ctx) { - spin_lock(&spu_prio->runq_lock); - list_add_tail(&ctx->rq, &spu_prio->runq[ctx->prio]); - set_bit(ctx->prio, spu_prio->bitmap); - mb(); - spin_unlock(&spu_prio->runq_lock); + int prio = ctx->prio; + + list_add_tail(&ctx->rq, &spu_prio->runq[prio]); + set_bit(prio, spu_prio->bitmap); } -static void __spu_del_from_rq(struct spu_context *ctx, int prio) +static void __spu_del_from_rq(struct spu_context *ctx) { + int prio = ctx->prio; + if (!list_empty(&ctx->rq)) list_del_init(&ctx->rq); if (list_empty(&spu_prio->runq[prio])) - clear_bit(ctx->prio, spu_prio->bitmap); -} - -/** - * spu_del_from_rq - remove a context from the runqueue - * @ctx: context to remove - */ -static void spu_del_from_rq(struct spu_context *ctx) -{ - spin_lock(&spu_prio->runq_lock); - __spu_del_from_rq(ctx, ctx->prio); - spin_unlock(&spu_prio->runq_lock); + clear_bit(prio, spu_prio->bitmap); } static void spu_prio_wait(struct spu_context *ctx) { DEFINE_WAIT(wait); + spin_lock(&spu_prio->runq_lock); prepare_to_wait_exclusive(&ctx->stop_wq, &wait, TASK_INTERRUPTIBLE); if (!signal_pending(current)) { + __spu_add_to_rq(ctx); + spin_unlock(&spu_prio->runq_lock); mutex_unlock(&ctx->state_mutex); schedule(); mutex_lock(&ctx->state_mutex); + spin_lock(&spu_prio->runq_lock); + __spu_del_from_rq(ctx); } + spin_unlock(&spu_prio->runq_lock); __set_current_state(TASK_RUNNING); remove_wait_queue(&ctx->stop_wq, &wait); } @@ -300,7 +296,7 @@ static void spu_reschedule(struct spu *spu) BUG_ON(list_empty(rq)); ctx = list_entry(rq->next, struct spu_context, rq); - __spu_del_from_rq(ctx, best); + __spu_del_from_rq(ctx); wake_up(&ctx->stop_wq); } spin_unlock(&spu_prio->runq_lock); @@ -427,9 +423,7 @@ int spu_activate(struct spu_context *ctx, unsigned long flags) return 0; } - spu_add_to_rq(ctx); spu_prio_wait(ctx); - spu_del_from_rq(ctx); } while (!signal_pending(current)); return -ERESTARTSYS; -- cgit v1.2.3 From 62c05d583ec016c40011462d5f03b072bfbd3dc7 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Mon, 23 Apr 2007 21:08:14 +0200 Subject: [POWERPC] spu_base: move spu_init_channels out of spu_mutex There is no reason to execute spu_init_channels under spu_mutex after the spu has been taken off the freelist it's ours. Signed-off-by: Christoph Hellwig Signed-off-by: Arnd Bergmann --- arch/powerpc/platforms/cell/spu_base.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/powerpc/platforms/cell/spu_base.c b/arch/powerpc/platforms/cell/spu_base.c index eba7a2641dc..6242f3c19f6 100644 --- a/arch/powerpc/platforms/cell/spu_base.c +++ b/arch/powerpc/platforms/cell/spu_base.c @@ -431,10 +431,11 @@ struct spu *spu_alloc_node(int node) spu = list_entry(spu_list[node].next, struct spu, list); list_del_init(&spu->list); pr_debug("Got SPU %d %d\n", spu->number, spu->node); - spu_init_channels(spu); } mutex_unlock(&spu_mutex); + if (spu) + spu_init_channels(spu); return spu; } EXPORT_SYMBOL_GPL(spu_alloc_node); -- cgit v1.2.3 From 57dace2391ba10135e38457904121e7ef34d0c83 Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Mon, 23 Apr 2007 21:08:15 +0200 Subject: [POWERPC] spufs: make spu page faults not block scheduling Until now, we have always entered the spu page fault handler with a mutex for the spu context held. This has multiple bad side-effects: - it becomes impossible to suspend the context during page faults - if an spu program attempts to access its own mmio areas through DMA, we get an immediate livelock when the nopage function tries to acquire the same mutex This patch makes the page fault logic operate on a struct spu_context instead of a struct spu, and moves it from spu_base.c to a new file fault.c inside of spufs. We now also need to copy the dar and dsisr contents of the last fault into the saved context to have it accessible in case we schedule out the context before activating the page fault handler. Signed-off-by: Arnd Bergmann --- arch/powerpc/platforms/cell/spu_base.c | 103 ------------- arch/powerpc/platforms/cell/spufs/Makefile | 2 +- arch/powerpc/platforms/cell/spufs/backing_ops.c | 6 + arch/powerpc/platforms/cell/spufs/fault.c | 193 ++++++++++++++++++++++++ arch/powerpc/platforms/cell/spufs/hw_ops.c | 9 ++ arch/powerpc/platforms/cell/spufs/run.c | 28 +--- arch/powerpc/platforms/cell/spufs/spufs.h | 4 + arch/powerpc/platforms/cell/spufs/switch.c | 8 +- 8 files changed, 223 insertions(+), 130 deletions(-) create mode 100644 arch/powerpc/platforms/cell/spufs/fault.c (limited to 'arch') diff --git a/arch/powerpc/platforms/cell/spu_base.c b/arch/powerpc/platforms/cell/spu_base.c index 6242f3c19f6..31fa55f3341 100644 --- a/arch/powerpc/platforms/cell/spu_base.c +++ b/arch/powerpc/platforms/cell/spu_base.c @@ -290,7 +290,6 @@ spu_irq_class_1(int irq, void *data) return stat ? IRQ_HANDLED : IRQ_NONE; } -EXPORT_SYMBOL_GPL(spu_irq_class_1_bottom); static irqreturn_t spu_irq_class_2(int irq, void *data) @@ -462,108 +461,6 @@ void spu_free(struct spu *spu) } EXPORT_SYMBOL_GPL(spu_free); -static int spu_handle_mm_fault(struct spu *spu) -{ - struct mm_struct *mm = spu->mm; - struct vm_area_struct *vma; - u64 ea, dsisr, is_write; - int ret; - - ea = spu->dar; - dsisr = spu->dsisr; -#if 0 - if (!IS_VALID_EA(ea)) { - return -EFAULT; - } -#endif /* XXX */ - if (mm == NULL) { - return -EFAULT; - } - if (mm->pgd == NULL) { - return -EFAULT; - } - - down_read(&mm->mmap_sem); - vma = find_vma(mm, ea); - if (!vma) - goto bad_area; - if (vma->vm_start <= ea) - goto good_area; - if (!(vma->vm_flags & VM_GROWSDOWN)) - goto bad_area; -#if 0 - if (expand_stack(vma, ea)) - goto bad_area; -#endif /* XXX */ -good_area: - is_write = dsisr & MFC_DSISR_ACCESS_PUT; - if (is_write) { - if (!(vma->vm_flags & VM_WRITE)) - goto bad_area; - } else { - if (dsisr & MFC_DSISR_ACCESS_DENIED) - goto bad_area; - if (!(vma->vm_flags & (VM_READ | VM_EXEC))) - goto bad_area; - } - ret = 0; - switch (handle_mm_fault(mm, vma, ea, is_write)) { - case VM_FAULT_MINOR: - current->min_flt++; - break; - case VM_FAULT_MAJOR: - current->maj_flt++; - break; - case VM_FAULT_SIGBUS: - ret = -EFAULT; - goto bad_area; - case VM_FAULT_OOM: - ret = -ENOMEM; - goto bad_area; - default: - BUG(); - } - up_read(&mm->mmap_sem); - return ret; - -bad_area: - up_read(&mm->mmap_sem); - return -EFAULT; -} - -int spu_irq_class_1_bottom(struct spu *spu) -{ - u64 ea, dsisr, access, error = 0UL; - int ret = 0; - - ea = spu->dar; - dsisr = spu->dsisr; - if (dsisr & (MFC_DSISR_PTE_NOT_FOUND | MFC_DSISR_ACCESS_DENIED)) { - u64 flags; - - access = (_PAGE_PRESENT | _PAGE_USER); - access |= (dsisr & MFC_DSISR_ACCESS_PUT) ? _PAGE_RW : 0UL; - local_irq_save(flags); - if (hash_page(ea, access, 0x300) != 0) - error |= CLASS1_ENABLE_STORAGE_FAULT_INTR; - local_irq_restore(flags); - } - if (error & CLASS1_ENABLE_STORAGE_FAULT_INTR) { - if ((ret = spu_handle_mm_fault(spu)) != 0) - error |= CLASS1_ENABLE_STORAGE_FAULT_INTR; - else - error &= ~CLASS1_ENABLE_STORAGE_FAULT_INTR; - } - spu->dar = 0UL; - spu->dsisr = 0UL; - if (!error) { - spu_restart_dma(spu); - } else { - spu->dma_callback(spu, SPE_EVENT_SPE_DATA_STORAGE); - } - return ret; -} - struct sysdev_class spu_sysdev_class = { set_kset_name("spu") }; diff --git a/arch/powerpc/platforms/cell/spufs/Makefile b/arch/powerpc/platforms/cell/spufs/Makefile index 472217d19fa..2cd89c11af5 100644 --- a/arch/powerpc/platforms/cell/spufs/Makefile +++ b/arch/powerpc/platforms/cell/spufs/Makefile @@ -1,4 +1,4 @@ -obj-y += switch.o +obj-y += switch.o fault.o obj-$(CONFIG_SPU_FS) += spufs.o spufs-y += inode.o file.o context.o syscalls.o coredump.o diff --git a/arch/powerpc/platforms/cell/spufs/backing_ops.c b/arch/powerpc/platforms/cell/spufs/backing_ops.c index 1898f0d3a8b..3322528fa6e 100644 --- a/arch/powerpc/platforms/cell/spufs/backing_ops.c +++ b/arch/powerpc/platforms/cell/spufs/backing_ops.c @@ -350,6 +350,11 @@ static int spu_backing_send_mfc_command(struct spu_context *ctx, return ret; } +static void spu_backing_restart_dma(struct spu_context *ctx) +{ + /* nothing to do here */ +} + struct spu_context_ops spu_backing_ops = { .mbox_read = spu_backing_mbox_read, .mbox_stat_read = spu_backing_mbox_stat_read, @@ -376,4 +381,5 @@ struct spu_context_ops spu_backing_ops = { .read_mfc_tagstatus = spu_backing_read_mfc_tagstatus, .get_mfc_free_elements = spu_backing_get_mfc_free_elements, .send_mfc_command = spu_backing_send_mfc_command, + .restart_dma = spu_backing_restart_dma, }; diff --git a/arch/powerpc/platforms/cell/spufs/fault.c b/arch/powerpc/platforms/cell/spufs/fault.c new file mode 100644 index 00000000000..182dc914cbc --- /dev/null +++ b/arch/powerpc/platforms/cell/spufs/fault.c @@ -0,0 +1,193 @@ +/* + * Low-level SPU handling + * + * (C) Copyright IBM Deutschland Entwicklung GmbH 2005 + * + * Author: Arnd Bergmann + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2, or (at your option) + * any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ +#include +#include +#include + +#include +#include + +#include "spufs.h" + +/* + * This ought to be kept in sync with the powerpc specific do_page_fault + * function. Currently, there are a few corner cases that we haven't had + * to handle fortunately. + */ +static int spu_handle_mm_fault(struct mm_struct *mm, unsigned long ea, unsigned long dsisr) +{ + struct vm_area_struct *vma; + unsigned long is_write; + int ret; + +#if 0 + if (!IS_VALID_EA(ea)) { + return -EFAULT; + } +#endif /* XXX */ + if (mm == NULL) { + return -EFAULT; + } + if (mm->pgd == NULL) { + return -EFAULT; + } + + down_read(&mm->mmap_sem); + vma = find_vma(mm, ea); + if (!vma) + goto bad_area; + if (vma->vm_start <= ea) + goto good_area; + if (!(vma->vm_flags & VM_GROWSDOWN)) + goto bad_area; + if (expand_stack(vma, ea)) + goto bad_area; +good_area: + is_write = dsisr & MFC_DSISR_ACCESS_PUT; + if (is_write) { + if (!(vma->vm_flags & VM_WRITE)) + goto bad_area; + } else { + if (dsisr & MFC_DSISR_ACCESS_DENIED) + goto bad_area; + if (!(vma->vm_flags & (VM_READ | VM_EXEC))) + goto bad_area; + } + ret = 0; + switch (handle_mm_fault(mm, vma, ea, is_write)) { + case VM_FAULT_MINOR: + current->min_flt++; + break; + case VM_FAULT_MAJOR: + current->maj_flt++; + break; + case VM_FAULT_SIGBUS: + ret = -EFAULT; + goto bad_area; + case VM_FAULT_OOM: + ret = -ENOMEM; + goto bad_area; + default: + BUG(); + } + up_read(&mm->mmap_sem); + return ret; + +bad_area: + up_read(&mm->mmap_sem); + return -EFAULT; +} + +static void spufs_handle_dma_error(struct spu_context *ctx, int type) +{ + if (ctx->flags & SPU_CREATE_EVENTS_ENABLED) { + ctx->event_return |= type; + wake_up_all(&ctx->stop_wq); + } else { + switch (type) { + case SPE_EVENT_DMA_ALIGNMENT: + case SPE_EVENT_SPE_DATA_STORAGE: + case SPE_EVENT_INVALID_DMA: + force_sig(SIGBUS, /* info, */ current); + break; + case SPE_EVENT_SPE_ERROR: + force_sig(SIGILL, /* info */ current); + break; + } + } +} + +void spufs_dma_callback(struct spu *spu, int type) +{ + spufs_handle_dma_error(spu->ctx, type); +} +EXPORT_SYMBOL_GPL(spufs_dma_callback); + +/* + * bottom half handler for page faults, we can't do this from + * interrupt context, since we might need to sleep. + * we also need to give up the mutex so we can get scheduled + * out while waiting for the backing store. + * + * TODO: try calling hash_page from the interrupt handler first + * in order to speed up the easy case. + */ +int spufs_handle_class1(struct spu_context *ctx) +{ + u64 ea, dsisr, access; + unsigned long flags; + int ret; + + /* + * dar and dsisr get passed from the registers + * to the spu_context, to this function, but not + * back to the spu if it gets scheduled again. + * + * if we don't handle the fault for a saved context + * in time, we can still expect to get the same fault + * the immediately after the context restore. + */ + if (ctx->state == SPU_STATE_RUNNABLE) { + ea = ctx->spu->dar; + dsisr = ctx->spu->dsisr; + ctx->spu->dar= ctx->spu->dsisr = 0; + } else { + ea = ctx->csa.priv1.mfc_dar_RW; + dsisr = ctx->csa.priv1.mfc_dsisr_RW; + ctx->csa.priv1.mfc_dar_RW = 0; + ctx->csa.priv1.mfc_dsisr_RW = 0; + } + + if (!(dsisr & (MFC_DSISR_PTE_NOT_FOUND | MFC_DSISR_ACCESS_DENIED))) + return 0; + + pr_debug("ctx %p: ea %016lx, dsisr %016lx state %d\n", ctx, ea, + dsisr, ctx->state); + + /* we must not hold the lock when entering spu_handle_mm_fault */ + spu_release(ctx); + + access = (_PAGE_PRESENT | _PAGE_USER); + access |= (dsisr & MFC_DSISR_ACCESS_PUT) ? _PAGE_RW : 0UL; + local_irq_save(flags); + ret = hash_page(ea, access, 0x300); + local_irq_restore(flags); + + /* hashing failed, so try the actual fault handler */ + if (ret) + ret = spu_handle_mm_fault(current->mm, ea, dsisr); + + spu_acquire(ctx); + /* + * If we handled the fault successfully and are in runnable + * state, restart the DMA. + * In case of unhandled error report the problem to user space. + */ + if (!ret) { + if (ctx->spu) + ctx->ops->restart_dma(ctx); + } else + spufs_handle_dma_error(ctx, SPE_EVENT_SPE_DATA_STORAGE); + + return ret; +} +EXPORT_SYMBOL_GPL(spufs_handle_class1); diff --git a/arch/powerpc/platforms/cell/spufs/hw_ops.c b/arch/powerpc/platforms/cell/spufs/hw_ops.c index ae42e03b8c8..428875c5e4e 100644 --- a/arch/powerpc/platforms/cell/spufs/hw_ops.c +++ b/arch/powerpc/platforms/cell/spufs/hw_ops.c @@ -296,6 +296,14 @@ static int spu_hw_send_mfc_command(struct spu_context *ctx, } } +static void spu_hw_restart_dma(struct spu_context *ctx) +{ + struct spu_priv2 __iomem *priv2 = ctx->spu->priv2; + + if (!test_bit(SPU_CONTEXT_SWITCH_PENDING, &ctx->spu->flags)) + out_be64(&priv2->mfc_control_RW, MFC_CNTL_RESTART_DMA_COMMAND); +} + struct spu_context_ops spu_hw_ops = { .mbox_read = spu_hw_mbox_read, .mbox_stat_read = spu_hw_mbox_stat_read, @@ -320,4 +328,5 @@ struct spu_context_ops spu_hw_ops = { .read_mfc_tagstatus = spu_hw_read_mfc_tagstatus, .get_mfc_free_elements = spu_hw_get_mfc_free_elements, .send_mfc_command = spu_hw_send_mfc_command, + .restart_dma = spu_hw_restart_dma, }; diff --git a/arch/powerpc/platforms/cell/spufs/run.c b/arch/powerpc/platforms/cell/spufs/run.c index 7df5202c9a9..1a8195bf75d 100644 --- a/arch/powerpc/platforms/cell/spufs/run.c +++ b/arch/powerpc/platforms/cell/spufs/run.c @@ -18,27 +18,6 @@ void spufs_stop_callback(struct spu *spu) wake_up_all(&ctx->stop_wq); } -void spufs_dma_callback(struct spu *spu, int type) -{ - struct spu_context *ctx = spu->ctx; - - if (ctx->flags & SPU_CREATE_EVENTS_ENABLED) { - ctx->event_return |= type; - wake_up_all(&ctx->stop_wq); - } else { - switch (type) { - case SPE_EVENT_DMA_ALIGNMENT: - case SPE_EVENT_SPE_DATA_STORAGE: - case SPE_EVENT_INVALID_DMA: - force_sig(SIGBUS, /* info, */ current); - break; - case SPE_EVENT_SPE_ERROR: - force_sig(SIGILL, /* info */ current); - break; - } - } -} - static inline int spu_stopped(struct spu_context *ctx, u32 * stat) { struct spu *spu; @@ -294,11 +273,8 @@ int spu_process_callback(struct spu_context *ctx) static inline int spu_process_events(struct spu_context *ctx) { struct spu *spu = ctx->spu; - u64 pte_fault = MFC_DSISR_PTE_NOT_FOUND | MFC_DSISR_ACCESS_DENIED; int ret = 0; - if (spu->dsisr & pte_fault) - ret = spu_irq_class_1_bottom(spu); if (spu->class_0_pending) ret = spu_irq_class_0_bottom(spu); if (!ret && signal_pending(current)) @@ -332,6 +308,10 @@ long spufs_run_spu(struct file *file, struct spu_context *ctx, break; status &= ~SPU_STATUS_STOPPED_BY_STOP; } + ret = spufs_handle_class1(ctx); + if (ret) + break; + if (unlikely(ctx->state != SPU_STATE_RUNNABLE)) { ret = spu_reacquire_runnable(ctx, npc, &status); if (ret) { diff --git a/arch/powerpc/platforms/cell/spufs/spufs.h b/arch/powerpc/platforms/cell/spufs/spufs.h index cae2ad435b0..9993c9b3cff 100644 --- a/arch/powerpc/platforms/cell/spufs/spufs.h +++ b/arch/powerpc/platforms/cell/spufs/spufs.h @@ -141,6 +141,7 @@ struct spu_context_ops { struct spu_dma_info * info); void (*proxydma_info_read) (struct spu_context * ctx, struct spu_proxydma_info * info); + void (*restart_dma)(struct spu_context *ctx); }; extern struct spu_context_ops spu_hw_ops; @@ -172,6 +173,9 @@ int put_spu_gang(struct spu_gang *gang); void spu_gang_remove_ctx(struct spu_gang *gang, struct spu_context *ctx); void spu_gang_add_ctx(struct spu_gang *gang, struct spu_context *ctx); +/* fault handling */ +int spufs_handle_class1(struct spu_context *ctx); + /* context management */ static inline void spu_acquire(struct spu_context *ctx) { diff --git a/arch/powerpc/platforms/cell/spufs/switch.c b/arch/powerpc/platforms/cell/spufs/switch.c index fd91c73de34..8347c4a3f89 100644 --- a/arch/powerpc/platforms/cell/spufs/switch.c +++ b/arch/powerpc/platforms/cell/spufs/switch.c @@ -2084,6 +2084,10 @@ int spu_save(struct spu_state *prev, struct spu *spu) int rc; acquire_spu_lock(spu); /* Step 1. */ + prev->dar = spu->dar; + prev->dsisr = spu->dsisr; + spu->dar = 0; + spu->dsisr = 0; rc = __do_spu_save(prev, spu); /* Steps 2-53. */ release_spu_lock(spu); if (rc != 0 && rc != 2 && rc != 6) { @@ -2109,9 +2113,9 @@ int spu_restore(struct spu_state *new, struct spu *spu) acquire_spu_lock(spu); harvest(NULL, spu); - spu->dar = 0; - spu->dsisr = 0; spu->slb_replace = 0; + new->dar = 0; + new->dsisr = 0; spu->class_0_pending = 0; rc = __do_spu_restore(new, spu); release_spu_lock(spu); -- cgit v1.2.3 From c8a1e9393a86f862ab9c8bc0db9b8a1822226f84 Mon Sep 17 00:00:00 2001 From: Jeremy Kerr Date: Mon, 23 Apr 2007 21:08:16 +0200 Subject: [POWERPC] spufs: provide siginfo for SPE faults This change populates a siginfo struct for SPE application exceptions (ie, invalid DMAs and illegal instructions). Tested on an IBM Cell Blade. Signed-off-by: Jeremy Kerr Signed-off-by: Arnd Bergmann --- arch/powerpc/platforms/cell/spufs/fault.c | 32 ++++++++++++++++++++++++------- 1 file changed, 25 insertions(+), 7 deletions(-) (limited to 'arch') diff --git a/arch/powerpc/platforms/cell/spufs/fault.c b/arch/powerpc/platforms/cell/spufs/fault.c index 182dc914cbc..0f75c07e29d 100644 --- a/arch/powerpc/platforms/cell/spufs/fault.c +++ b/arch/powerpc/platforms/cell/spufs/fault.c @@ -97,28 +97,46 @@ bad_area: return -EFAULT; } -static void spufs_handle_dma_error(struct spu_context *ctx, int type) +static void spufs_handle_dma_error(struct spu_context *ctx, + unsigned long ea, int type) { if (ctx->flags & SPU_CREATE_EVENTS_ENABLED) { ctx->event_return |= type; wake_up_all(&ctx->stop_wq); } else { + siginfo_t info; + memset(&info, 0, sizeof(info)); + switch (type) { - case SPE_EVENT_DMA_ALIGNMENT: - case SPE_EVENT_SPE_DATA_STORAGE: case SPE_EVENT_INVALID_DMA: - force_sig(SIGBUS, /* info, */ current); + info.si_signo = SIGBUS; + info.si_code = BUS_OBJERR; + break; + case SPE_EVENT_SPE_DATA_STORAGE: + info.si_signo = SIGBUS; + info.si_addr = (void __user *)ea; + info.si_code = BUS_ADRERR; + break; + case SPE_EVENT_DMA_ALIGNMENT: + info.si_signo = SIGBUS; + /* DAR isn't set for an alignment fault :( */ + info.si_code = BUS_ADRALN; break; case SPE_EVENT_SPE_ERROR: - force_sig(SIGILL, /* info */ current); + info.si_signo = SIGILL; + info.si_addr = (void __user *)(unsigned long) + ctx->ops->npc_read(ctx) - 4; + info.si_code = ILL_ILLOPC; break; } + if (info.si_signo) + force_sig_info(info.si_signo, &info, current); } } void spufs_dma_callback(struct spu *spu, int type) { - spufs_handle_dma_error(spu->ctx, type); + spufs_handle_dma_error(spu->ctx, spu->dar, type); } EXPORT_SYMBOL_GPL(spufs_dma_callback); @@ -186,7 +204,7 @@ int spufs_handle_class1(struct spu_context *ctx) if (ctx->spu) ctx->ops->restart_dma(ctx); } else - spufs_handle_dma_error(ctx, SPE_EVENT_SPE_DATA_STORAGE); + spufs_handle_dma_error(ctx, ea, SPE_EVENT_SPE_DATA_STORAGE); return ret; } -- cgit v1.2.3 From e45d48a34d4d1862d28d22c2533b8c6bb83b8c1f Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Mon, 23 Apr 2007 21:08:17 +0200 Subject: [POWERPC] spufs: turn run_sema into run_mutex There is no reason for run_sema to be a struct semaphore. Changing it to a mutex and rename it accordingly. Signed-off-by: Christoph Hellwig Signed-off-by: Arnd Bergmann --- arch/powerpc/platforms/cell/spufs/context.c | 2 +- arch/powerpc/platforms/cell/spufs/run.c | 4 ++-- arch/powerpc/platforms/cell/spufs/spufs.h | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) (limited to 'arch') diff --git a/arch/powerpc/platforms/cell/spufs/context.c b/arch/powerpc/platforms/cell/spufs/context.c index ce17a284718..a87d9ca3dba 100644 --- a/arch/powerpc/platforms/cell/spufs/context.c +++ b/arch/powerpc/platforms/cell/spufs/context.c @@ -44,7 +44,7 @@ struct spu_context *alloc_spu_context(struct spu_gang *gang) spin_lock_init(&ctx->mapping_lock); kref_init(&ctx->kref); mutex_init(&ctx->state_mutex); - init_MUTEX(&ctx->run_sema); + mutex_init(&ctx->run_mutex); init_waitqueue_head(&ctx->ibox_wq); init_waitqueue_head(&ctx->wbox_wq); init_waitqueue_head(&ctx->stop_wq); diff --git a/arch/powerpc/platforms/cell/spufs/run.c b/arch/powerpc/platforms/cell/spufs/run.c index 1a8195bf75d..7cf5b298fa1 100644 --- a/arch/powerpc/platforms/cell/spufs/run.c +++ b/arch/powerpc/platforms/cell/spufs/run.c @@ -288,7 +288,7 @@ long spufs_run_spu(struct file *file, struct spu_context *ctx, int ret; u32 status; - if (down_interruptible(&ctx->run_sema)) + if (mutex_lock_interruptible(&ctx->run_mutex)) return -ERESTARTSYS; ctx->ops->master_start(ctx); @@ -345,6 +345,6 @@ out2: out: *event = ctx->event_return; - up(&ctx->run_sema); + mutex_unlock(&ctx->run_mutex); return ret; } diff --git a/arch/powerpc/platforms/cell/spufs/spufs.h b/arch/powerpc/platforms/cell/spufs/spufs.h index 9993c9b3cff..dd5fc6494ec 100644 --- a/arch/powerpc/platforms/cell/spufs/spufs.h +++ b/arch/powerpc/platforms/cell/spufs/spufs.h @@ -60,7 +60,7 @@ struct spu_context { enum { SPU_STATE_RUNNABLE, SPU_STATE_SAVED } state; struct mutex state_mutex; - struct semaphore run_sema; + struct mutex run_mutex; struct mm_struct *owner; -- cgit v1.2.3 From 577f8f1021f9ee6ef2a98a142652759ec122d27f Mon Sep 17 00:00:00 2001 From: Akinobu Mita Date: Mon, 23 Apr 2007 21:08:18 +0200 Subject: [POWERPC] spufs: check spu_acquire_runnable() return value This patch checks return value of spu_acquire_runnable() in spufs_mfc_write(). Signed-off-by: Akinobu Mita Signed-off-by: Christoph Hellwig Signed-off-by: Arnd Bergmann --- arch/powerpc/platforms/cell/spufs/file.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/powerpc/platforms/cell/spufs/file.c b/arch/powerpc/platforms/cell/spufs/file.c index deb340e6e0a..525d6b00cba 100644 --- a/arch/powerpc/platforms/cell/spufs/file.c +++ b/arch/powerpc/platforms/cell/spufs/file.c @@ -1443,7 +1443,10 @@ static ssize_t spufs_mfc_write(struct file *file, const char __user *buffer, if (ret) goto out; - spu_acquire_runnable(ctx, 0); + ret = spu_acquire_runnable(ctx, 0); + if (ret) + goto out; + if (file->f_flags & O_NONBLOCK) { ret = ctx->ops->send_mfc_command(ctx, &cmd); } else { -- cgit v1.2.3 From c99c1994a2bb9493b4ac372b2b6ee2606d291171 Mon Sep 17 00:00:00 2001 From: Akinobu Mita Date: Mon, 23 Apr 2007 21:08:19 +0200 Subject: [POWERPC] spufs: fix missing error handling in module_init() spufs module_init forgot to call a few cleanup functions on error path. This patch also includes cosmetic changes in spu_sched_init() (identation fix and return error code). [modified by hch to apply ontop of the latest schedule changes] Cc: Arnd Bergmann Signed-off-by: Akinobu Mita Signed-off-by: Christoph Hellwig Signed-off-by: Arnd Bergmann --- arch/powerpc/platforms/cell/spufs/inode.c | 16 ++++++++++------ 1 file changed, 10 insertions(+), 6 deletions(-) (limited to 'arch') diff --git a/arch/powerpc/platforms/cell/spufs/inode.c b/arch/powerpc/platforms/cell/spufs/inode.c index 423596a6b99..5d09c2e8f39 100644 --- a/arch/powerpc/platforms/cell/spufs/inode.c +++ b/arch/powerpc/platforms/cell/spufs/inode.c @@ -661,25 +661,29 @@ static int __init spufs_init(void) if (!spufs_inode_cache) goto out; - if (spu_sched_init() != 0) { - kmem_cache_destroy(spufs_inode_cache); - goto out; - } - ret = register_filesystem(&spufs_type); + ret = spu_sched_init(); if (ret) goto out_cache; + ret = register_filesystem(&spufs_type); + if (ret) + goto out_sched; ret = register_spu_syscalls(&spufs_calls); if (ret) goto out_fs; ret = register_arch_coredump_calls(&spufs_coredump_calls); if (ret) - goto out_fs; + goto out_syscalls; spufs_init_isolated_loader(); return 0; + +out_syscalls: + unregister_spu_syscalls(&spufs_calls); out_fs: unregister_filesystem(&spufs_type); +out_sched: + spu_sched_exit(); out_cache: kmem_cache_destroy(spufs_inode_cache); out: -- cgit v1.2.3 From db1384b40d12eda6910513ff429ad90453ca49e1 Mon Sep 17 00:00:00 2001 From: Akinobu Mita Date: Mon, 23 Apr 2007 21:08:20 +0200 Subject: [POWERPC] spufs: fix memory leak on spufs reloading When SPU isolation mode enabled, isolated_loader would be allocated by spufs_init_isolated_loader() on module_init(). But anyone do not free it. This patch introduces spufs_exit_isolated_loader() which is the opposite of spufs_init_isolated_loader() and called on module_exit(). Cc: Arnd Bergmann Signed-off-by: Akinobu Mita Signed-off-by: Christoph Hellwig Signed-off-by: Arnd Bergmann --- arch/powerpc/platforms/cell/spufs/inode.c | 6 ++++++ 1 file changed, 6 insertions(+) (limited to 'arch') diff --git a/arch/powerpc/platforms/cell/spufs/inode.c b/arch/powerpc/platforms/cell/spufs/inode.c index 5d09c2e8f39..6b52dfabaee 100644 --- a/arch/powerpc/platforms/cell/spufs/inode.c +++ b/arch/powerpc/platforms/cell/spufs/inode.c @@ -561,6 +561,11 @@ spufs_parse_options(char *options, struct inode *root) return 1; } +static void spufs_exit_isolated_loader(void) +{ + kfree(isolated_loader); +} + static void spufs_init_isolated_loader(void) { @@ -694,6 +699,7 @@ module_init(spufs_init); static void __exit spufs_exit(void) { spu_sched_exit(); + spufs_exit_isolated_loader(); unregister_arch_coredump_calls(&spufs_coredump_calls); unregister_spu_syscalls(&spufs_calls); unregister_filesystem(&spufs_type); -- cgit v1.2.3 From 8a7d86bdb22678b17928eef0c8fa356d8b21cc76 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Mon, 23 Apr 2007 21:08:21 +0200 Subject: [POWERPC] spufs: avoid spurious memory barriers We now have proper locking around assignets of the mapping pointers, and the spin_unlock implies enough of a barrier to get rid of the explicit one. Signed-off-by: Christoph Hellwig Signed-off-by: Arnd Bergmann --- arch/powerpc/platforms/cell/spufs/file.c | 14 -------------- 1 file changed, 14 deletions(-) (limited to 'arch') diff --git a/arch/powerpc/platforms/cell/spufs/file.c b/arch/powerpc/platforms/cell/spufs/file.c index 525d6b00cba..d010b2464a9 100644 --- a/arch/powerpc/platforms/cell/spufs/file.c +++ b/arch/powerpc/platforms/cell/spufs/file.c @@ -50,7 +50,6 @@ spufs_mem_open(struct inode *inode, struct file *file) if (!i->i_openers++) ctx->local_store = inode->i_mapping; spin_unlock(&ctx->mapping_lock); - smp_wmb(); return 0; } @@ -64,7 +63,6 @@ spufs_mem_release(struct inode *inode, struct file *file) if (!--i->i_openers) ctx->local_store = NULL; spin_unlock(&ctx->mapping_lock); - smp_wmb(); return 0; } @@ -262,7 +260,6 @@ static int spufs_cntl_open(struct inode *inode, struct file *file) if (!i->i_openers++) ctx->cntl = inode->i_mapping; spin_unlock(&ctx->mapping_lock); - smp_wmb(); return simple_attr_open(inode, file, spufs_cntl_get, spufs_cntl_set, "0x%08lx"); } @@ -279,7 +276,6 @@ spufs_cntl_release(struct inode *inode, struct file *file) if (!--i->i_openers) ctx->cntl = NULL; spin_unlock(&ctx->mapping_lock); - smp_wmb(); return 0; } @@ -767,7 +763,6 @@ static int spufs_signal1_open(struct inode *inode, struct file *file) if (!i->i_openers++) ctx->signal1 = inode->i_mapping; spin_unlock(&ctx->mapping_lock); - smp_wmb(); return nonseekable_open(inode, file); } @@ -781,7 +776,6 @@ spufs_signal1_release(struct inode *inode, struct file *file) if (!--i->i_openers) ctx->signal1 = NULL; spin_unlock(&ctx->mapping_lock); - smp_wmb(); return 0; } @@ -893,7 +887,6 @@ static int spufs_signal2_open(struct inode *inode, struct file *file) if (!i->i_openers++) ctx->signal2 = inode->i_mapping; spin_unlock(&ctx->mapping_lock); - smp_wmb(); return nonseekable_open(inode, file); } @@ -907,7 +900,6 @@ spufs_signal2_release(struct inode *inode, struct file *file) if (!--i->i_openers) ctx->signal2 = NULL; spin_unlock(&ctx->mapping_lock); - smp_wmb(); return 0; } @@ -1112,7 +1104,6 @@ static int spufs_mss_open(struct inode *inode, struct file *file) if (!i->i_openers++) ctx->mss = inode->i_mapping; spin_unlock(&ctx->mapping_lock); - smp_wmb(); return nonseekable_open(inode, file); } @@ -1126,7 +1117,6 @@ spufs_mss_release(struct inode *inode, struct file *file) if (!--i->i_openers) ctx->mss = NULL; spin_unlock(&ctx->mapping_lock); - smp_wmb(); return 0; } @@ -1172,7 +1162,6 @@ static int spufs_psmap_open(struct inode *inode, struct file *file) if (!i->i_openers++) ctx->psmap = inode->i_mapping; spin_unlock(&ctx->mapping_lock); - smp_wmb(); return nonseekable_open(inode, file); } @@ -1186,7 +1175,6 @@ spufs_psmap_release(struct inode *inode, struct file *file) if (!--i->i_openers) ctx->psmap = NULL; spin_unlock(&ctx->mapping_lock); - smp_wmb(); return 0; } @@ -1244,7 +1232,6 @@ static int spufs_mfc_open(struct inode *inode, struct file *file) if (!i->i_openers++) ctx->mfc = inode->i_mapping; spin_unlock(&ctx->mapping_lock); - smp_wmb(); return nonseekable_open(inode, file); } @@ -1258,7 +1245,6 @@ spufs_mfc_release(struct inode *inode, struct file *file) if (!--i->i_openers) ctx->mfc = NULL; spin_unlock(&ctx->mapping_lock); - smp_wmb(); return 0; } -- cgit v1.2.3 From 9e2fe2ce4e957a79d3dc5d813e0cfb10d79b79b3 Mon Sep 17 00:00:00 2001 From: Akinobu Mita Date: Mon, 23 Apr 2007 21:08:22 +0200 Subject: [POWERPC] spufs: use memcpy_fromio() to copy from local store GCC may generates inline copy loop to handle memcpy() function instead of kernel defined memcpy(). But this inlined version of memcpy() causes an alignment interrupt when copying from local store. This patch uses memcpy_fromio() and memcpy_toio to copy local store to prevent memcpy() being inlined. Signed-off-by: Akinobu Mita Signed-off-by: Arnd Bergmann --- arch/powerpc/platforms/cell/spufs/run.c | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) (limited to 'arch') diff --git a/arch/powerpc/platforms/cell/spufs/run.c b/arch/powerpc/platforms/cell/spufs/run.c index 7cf5b298fa1..b5e7bdee1bc 100644 --- a/arch/powerpc/platforms/cell/spufs/run.c +++ b/arch/powerpc/platforms/cell/spufs/run.c @@ -234,17 +234,17 @@ int spu_process_callback(struct spu_context *ctx) { struct spu_syscall_block s; u32 ls_pointer, npc; - char *ls; + void __iomem *ls; long spu_ret; int ret; /* get syscall block from local store */ - npc = ctx->ops->npc_read(ctx); - ls = ctx->ops->get_ls(ctx); - ls_pointer = *(u32*)(ls + npc); + npc = ctx->ops->npc_read(ctx) & ~3; + ls = (void __iomem *)ctx->ops->get_ls(ctx); + ls_pointer = in_be32(ls + npc); if (ls_pointer > (LS_SIZE - sizeof(s))) return -EFAULT; - memcpy(&s, ls + ls_pointer, sizeof (s)); + memcpy_fromio(&s, ls + ls_pointer, sizeof(s)); /* do actual syscall without pinning the spu */ ret = 0; @@ -264,7 +264,7 @@ int spu_process_callback(struct spu_context *ctx) } /* write result, jump over indirect pointer */ - memcpy(ls + ls_pointer, &spu_ret, sizeof (spu_ret)); + memcpy_toio(ls + ls_pointer, &spu_ret, sizeof(spu_ret)); ctx->ops->npc_write(ctx, npc); ctx->ops->runcntl_write(ctx, SPU_RUNCNTL_RUNNABLE); return ret; -- cgit v1.2.3 From f11f5ee70f48899506514e5e0d10ee2c8ddd359a Mon Sep 17 00:00:00 2001 From: Jeremy Kerr Date: Mon, 23 Apr 2007 21:08:23 +0200 Subject: [POWERPC] spufs: add mode= mount option Add a 'mode=' option to spufs mount arguments. This allows more control over access to the top-level spufs directory. Tested on Cell. Signed-off-by: Jeremy Kerr Signed-off-by: Arnd Bergmann --- arch/powerpc/platforms/cell/spufs/inode.c | 14 ++++++++++---- 1 file changed, 10 insertions(+), 4 deletions(-) (limited to 'arch') diff --git a/arch/powerpc/platforms/cell/spufs/inode.c b/arch/powerpc/platforms/cell/spufs/inode.c index 6b52dfabaee..8f6cd876330 100644 --- a/arch/powerpc/platforms/cell/spufs/inode.c +++ b/arch/powerpc/platforms/cell/spufs/inode.c @@ -521,13 +521,14 @@ out: /* File system initialization */ enum { - Opt_uid, Opt_gid, Opt_err, + Opt_uid, Opt_gid, Opt_mode, Opt_err, }; static match_table_t spufs_tokens = { - { Opt_uid, "uid=%d" }, - { Opt_gid, "gid=%d" }, - { Opt_err, NULL }, + { Opt_uid, "uid=%d" }, + { Opt_gid, "gid=%d" }, + { Opt_mode, "mode=%o" }, + { Opt_err, NULL }, }; static int @@ -554,6 +555,11 @@ spufs_parse_options(char *options, struct inode *root) return 0; root->i_gid = option; break; + case Opt_mode: + if (match_octal(&args[0], &option)) + return 0; + root->i_mode = option | S_IFDIR; + break; default: return 0; } -- cgit v1.2.3 From d3764397d07b1e03943edfdcc3fb77af7bdac02b Mon Sep 17 00:00:00 2001 From: Jeremy Kerr Date: Mon, 23 Apr 2007 21:08:24 +0200 Subject: [POWERPC] spufs: Minor cleanup of spu_wait Change the loop in spu_wait to be a little more straightforward. Signed-off-by: Jeremy Kerr Signed-off-by: Arnd Bergmann --- arch/powerpc/platforms/cell/spufs/spufs.h | 13 ++++++------- 1 file changed, 6 insertions(+), 7 deletions(-) (limited to 'arch') diff --git a/arch/powerpc/platforms/cell/spufs/spufs.h b/arch/powerpc/platforms/cell/spufs/spufs.h index dd5fc6494ec..0a947fd7de5 100644 --- a/arch/powerpc/platforms/cell/spufs/spufs.h +++ b/arch/powerpc/platforms/cell/spufs/spufs.h @@ -223,14 +223,13 @@ extern char *isolated_loader; prepare_to_wait(&(wq), &__wait, TASK_INTERRUPTIBLE); \ if (condition) \ break; \ - if (!signal_pending(current)) { \ - spu_release(ctx); \ - schedule(); \ - spu_acquire(ctx); \ - continue; \ + if (signal_pending(current)) { \ + __ret = -ERESTARTSYS; \ + break; \ } \ - __ret = -ERESTARTSYS; \ - break; \ + spu_release(ctx); \ + schedule(); \ + spu_acquire(ctx); \ } \ finish_wait(&(wq), &__wait); \ __ret; \ -- cgit v1.2.3 From 6cf2179202cf706471777ad6ee5d0377d5990ab7 Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Mon, 23 Apr 2007 21:08:25 +0200 Subject: [POWERPC] spufs: fix memory leak on coredump Dynamically allocated read/write buffer in spufs_arch_write_note() will not be freed. Convert it to get_free_page at the same time. Cc: Akinobu Mita Signed-off-by: Arnd Bergmann --- arch/powerpc/platforms/cell/spufs/coredump.c | 19 ++++++++++--------- 1 file changed, 10 insertions(+), 9 deletions(-) (limited to 'arch') diff --git a/arch/powerpc/platforms/cell/spufs/coredump.c b/arch/powerpc/platforms/cell/spufs/coredump.c index 725e1956115..5d9ad5a0307 100644 --- a/arch/powerpc/platforms/cell/spufs/coredump.c +++ b/arch/powerpc/platforms/cell/spufs/coredump.c @@ -169,12 +169,12 @@ static void spufs_arch_write_note(struct spufs_ctx_info *ctx_info, int i, struct spu_context *ctx; loff_t pos = 0; int sz, dfd, rc, total = 0; - const int bufsz = 4096; + const int bufsz = PAGE_SIZE; char *name; char fullname[80], *buf; struct elf_note en; - buf = kmalloc(bufsz, GFP_KERNEL); + buf = (void *)get_zeroed_page(GFP_KERNEL); if (!buf) return; @@ -187,9 +187,8 @@ static void spufs_arch_write_note(struct spufs_ctx_info *ctx_info, int i, sz = spufs_coredump_read[i].size; ctx = ctx_info->ctx; - if (!ctx) { - return; - } + if (!ctx) + goto out; sprintf(fullname, "SPU/%d/%s", dfd, name); en.n_namesz = strlen(fullname) + 1; @@ -197,23 +196,25 @@ static void spufs_arch_write_note(struct spufs_ctx_info *ctx_info, int i, en.n_type = NT_SPU; if (!spufs_dump_write(file, &en, sizeof(en))) - return; + goto out; if (!spufs_dump_write(file, fullname, en.n_namesz)) - return; + goto out; if (!spufs_dump_seek(file, roundup((unsigned long)file->f_pos, 4))) - return; + goto out; do { rc = do_coredump_read(i, ctx, buf, bufsz, &pos); if (rc > 0) { if (!spufs_dump_write(file, buf, rc)) - return; + goto out; total += rc; } } while (rc == bufsz && total < sz); spufs_dump_seek(file, roundup((unsigned long)file->f_pos - total + sz, 4)); +out: + free_page((unsigned long)buf); } static void spufs_arch_write_notes(struct file *file) -- cgit v1.2.3 From fe8a29db5bce1b5bd1ceb85fd153fac52cdab7b2 Mon Sep 17 00:00:00 2001 From: Akinobu Mita Date: Mon, 23 Apr 2007 21:08:26 +0200 Subject: [POWERPC] spufs: enable SPU coredump for kernel-builtin spufs spu_coredump_calls.owner is NULL in case of a builtin spufs, so the checks in here break. Check for the availability of the spu_coredump_calls variable instead. Signed-off-by: Arnd Bergmann --- arch/powerpc/platforms/cell/spu_coredump.c | 34 ++++++++++++++---------------- 1 file changed, 16 insertions(+), 18 deletions(-) (limited to 'arch') diff --git a/arch/powerpc/platforms/cell/spu_coredump.c b/arch/powerpc/platforms/cell/spu_coredump.c index 6915b418ee7..4fd37ff1e21 100644 --- a/arch/powerpc/platforms/cell/spu_coredump.c +++ b/arch/powerpc/platforms/cell/spu_coredump.c @@ -26,19 +26,18 @@ #include -static struct spu_coredump_calls spu_coredump_calls; +static struct spu_coredump_calls *spu_coredump_calls; static DEFINE_MUTEX(spu_coredump_mutex); int arch_notes_size(void) { long ret; - struct module *owner = spu_coredump_calls.owner; ret = -ENOSYS; mutex_lock(&spu_coredump_mutex); - if (owner && try_module_get(owner)) { - ret = spu_coredump_calls.arch_notes_size(); - module_put(owner); + if (spu_coredump_calls && try_module_get(spu_coredump_calls->owner)) { + ret = spu_coredump_calls->arch_notes_size(); + module_put(spu_coredump_calls->owner); } mutex_unlock(&spu_coredump_mutex); return ret; @@ -46,36 +45,35 @@ int arch_notes_size(void) void arch_write_notes(struct file *file) { - struct module *owner = spu_coredump_calls.owner; - mutex_lock(&spu_coredump_mutex); - if (owner && try_module_get(owner)) { - spu_coredump_calls.arch_write_notes(file); - module_put(owner); + if (spu_coredump_calls && try_module_get(spu_coredump_calls->owner)) { + spu_coredump_calls->arch_write_notes(file); + module_put(spu_coredump_calls->owner); } mutex_unlock(&spu_coredump_mutex); } int register_arch_coredump_calls(struct spu_coredump_calls *calls) { - if (spu_coredump_calls.owner) - return -EBUSY; + int ret = 0; + mutex_lock(&spu_coredump_mutex); - spu_coredump_calls.arch_notes_size = calls->arch_notes_size; - spu_coredump_calls.arch_write_notes = calls->arch_write_notes; - spu_coredump_calls.owner = calls->owner; + if (spu_coredump_calls) + ret = -EBUSY; + else + spu_coredump_calls = calls; mutex_unlock(&spu_coredump_mutex); - return 0; + return ret; } EXPORT_SYMBOL_GPL(register_arch_coredump_calls); void unregister_arch_coredump_calls(struct spu_coredump_calls *calls) { - BUG_ON(spu_coredump_calls.owner != calls->owner); + BUG_ON(spu_coredump_calls != calls); mutex_lock(&spu_coredump_mutex); - spu_coredump_calls.owner = NULL; + spu_coredump_calls = NULL; mutex_unlock(&spu_coredump_mutex); } EXPORT_SYMBOL_GPL(unregister_arch_coredump_calls); -- cgit v1.2.3 From aa45e2569ffe963dfbbbfddfdccd12afe69b2d65 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Mon, 23 Apr 2007 21:08:27 +0200 Subject: [POWERPC] spufs: various run.c cleanups - remove the spu_acquire_runnable from spu_run_init. I need to opencode it in spufs_run_spu in the next patch - remove various inline attributes, we don't really want to inline long functions with multiple callsites - cleanup return values and runcntl_write calls in spu_run_init - use normal kernel codingstyle in spu_reacquire_runnable Signed-off-by: Christoph Hellwig Signed-off-by: Arnd Bergmann --- arch/powerpc/platforms/cell/spufs/run.c | 51 ++++++++++++++++++++------------- 1 file changed, 31 insertions(+), 20 deletions(-) (limited to 'arch') diff --git a/arch/powerpc/platforms/cell/spufs/run.c b/arch/powerpc/platforms/cell/spufs/run.c index b5e7bdee1bc..57626600b1a 100644 --- a/arch/powerpc/platforms/cell/spufs/run.c +++ b/arch/powerpc/platforms/cell/spufs/run.c @@ -123,20 +123,15 @@ out: return ret; } -static inline int spu_run_init(struct spu_context *ctx, u32 * npc) +static int spu_run_init(struct spu_context *ctx, u32 * npc) { - int ret; - unsigned long runcntl = SPU_RUNCNTL_RUNNABLE; - - ret = spu_acquire_runnable(ctx, 0); - if (ret) - return ret; - if (ctx->flags & SPU_CREATE_ISOLATE) { + unsigned long runcntl; + if (!(ctx->ops->status_read(ctx) & SPU_STATUS_ISOLATED_STATE)) { - ret = spu_setup_isolated(ctx); + int ret = spu_setup_isolated(ctx); if (ret) - spu_release(ctx); + return ret; } /* if userspace has set the runcntrl register (eg, to issue an @@ -145,16 +140,17 @@ static inline int spu_run_init(struct spu_context *ctx, u32 * npc) (SPU_RUNCNTL_RUNNABLE | SPU_RUNCNTL_ISOLATE); if (runcntl == 0) runcntl = SPU_RUNCNTL_RUNNABLE; + ctx->ops->runcntl_write(ctx, runcntl); } else { spu_start_tick(ctx); ctx->ops->npc_write(ctx, *npc); + ctx->ops->runcntl_write(ctx, SPU_RUNCNTL_RUNNABLE); } - ctx->ops->runcntl_write(ctx, runcntl); - return ret; + return 0; } -static inline int spu_run_fini(struct spu_context *ctx, u32 * npc, +static int spu_run_fini(struct spu_context *ctx, u32 * npc, u32 * status) { int ret = 0; @@ -170,19 +166,27 @@ static inline int spu_run_fini(struct spu_context *ctx, u32 * npc, return ret; } -static inline int spu_reacquire_runnable(struct spu_context *ctx, u32 *npc, +static int spu_reacquire_runnable(struct spu_context *ctx, u32 *npc, u32 *status) { int ret; - if ((ret = spu_run_fini(ctx, npc, status)) != 0) + ret = spu_run_fini(ctx, npc, status); + if (ret) return ret; - if (*status & (SPU_STATUS_STOPPED_BY_STOP | - SPU_STATUS_STOPPED_BY_HALT)) { + + if (*status & (SPU_STATUS_STOPPED_BY_STOP | SPU_STATUS_STOPPED_BY_HALT)) return *status; - } - if ((ret = spu_run_init(ctx, npc)) != 0) + + ret = spu_acquire_runnable(ctx, 0); + if (ret) return ret; + + ret = spu_run_init(ctx, npc); + if (ret) { + spu_release(ctx); + return ret; + } return 0; } @@ -293,9 +297,16 @@ long spufs_run_spu(struct file *file, struct spu_context *ctx, ctx->ops->master_start(ctx); ctx->event_return = 0; - ret = spu_run_init(ctx, npc); + + ret = spu_acquire_runnable(ctx, 0); if (ret) + return ret; + + ret = spu_run_init(ctx, npc); + if (ret) { + spu_release(ctx); goto out; + } do { ret = spufs_wait(ctx->stop_wq, spu_stopped(ctx, &status)); -- cgit v1.2.3 From befdc746ee027d686a06be29cb1391f9d2c45cf6 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Mon, 23 Apr 2007 21:08:28 +0200 Subject: [POWERPC] spu_base: remove cleanup_spu_base spu_base.c is always built into the kernel image, so there is no need for a cleanup function. And some of the things it does are in the way for my following patches, so I'd rather get rid of it ASAP. Signed-off-by: Christoph Hellwig Signed-off-by: Arnd Bergmann --- arch/powerpc/platforms/cell/spu_base.c | 47 ++++++++-------------------------- 1 file changed, 10 insertions(+), 37 deletions(-) (limited to 'arch') diff --git a/arch/powerpc/platforms/cell/spu_base.c b/arch/powerpc/platforms/cell/spu_base.c index 31fa55f3341..2e8aa9433b3 100644 --- a/arch/powerpc/platforms/cell/spu_base.c +++ b/arch/powerpc/platforms/cell/spu_base.c @@ -534,12 +534,6 @@ static int spu_create_sysdev(struct spu *spu) return 0; } -static void spu_destroy_sysdev(struct spu *spu) -{ - sysfs_remove_device_from_node(&spu->sysdev, spu->node); - sysdev_unregister(&spu->sysdev); -} - static int __init create_spu(void *data) { struct spu *spu; @@ -591,43 +585,17 @@ out: return ret; } -static void destroy_spu(struct spu *spu) -{ - list_del_init(&spu->list); - list_del_init(&spu->full_list); - - spu_destroy_sysdev(spu); - spu_free_irqs(spu); - spu_destroy_spu(spu); - kfree(spu); -} - -static void cleanup_spu_base(void) -{ - struct spu *spu, *tmp; - int node; - - mutex_lock(&spu_mutex); - for (node = 0; node < MAX_NUMNODES; node++) { - list_for_each_entry_safe(spu, tmp, &spu_list[node], list) - destroy_spu(spu); - } - mutex_unlock(&spu_mutex); - sysdev_class_unregister(&spu_sysdev_class); -} -module_exit(cleanup_spu_base); - static int __init init_spu_base(void) { - int i, ret; + int i, ret = 0; if (!spu_management_ops) - return 0; + goto out; /* create sysdev class for spus */ ret = sysdev_class_register(&spu_sysdev_class); if (ret) - return ret; + goto out; for (i = 0; i < MAX_NUMNODES; i++) INIT_LIST_HEAD(&spu_list[i]); @@ -637,12 +605,17 @@ static int __init init_spu_base(void) if (ret) { printk(KERN_WARNING "%s: Error initializing spus\n", __FUNCTION__); - cleanup_spu_base(); - return ret; + goto out_unregister_sysdev_class; } xmon_register_spus(&spu_full_list); + return 0; + + out_unregister_sysdev_class: + sysdev_class_unregister(&spu_sysdev_class); + out: + return ret; } module_init(init_spu_base); -- cgit v1.2.3 From ccf17e9d008dfebbf90dfa4ee1a56e81c784c73e Mon Sep 17 00:00:00 2001 From: Jeremy Kerr Date: Mon, 23 Apr 2007 21:08:29 +0200 Subject: [POWERPC] spu_base: fix initialisation on systems with no SPEs This change fixes the case where spu_base and spufs are initialised on a system with no SPEs - unconditionally create the spu_lists so spu_alloc doesn't explode, and check for spu_management ops before starting spufs. Signed-off-by: Jeremy Kerr Signed-off-by: Arnd Bergmann arch/powerpc/platforms/cell/spu_base.c | 7 ++++--- arch/powerpc/platforms/cell/spufs/inode.c | 5 +++++ 2 files changed, 9 insertions(+), 3 deletions(-) --- arch/powerpc/platforms/cell/spu_base.c | 8 +++++--- arch/powerpc/platforms/cell/spufs/inode.c | 5 +++++ 2 files changed, 10 insertions(+), 3 deletions(-) (limited to 'arch') diff --git a/arch/powerpc/platforms/cell/spu_base.c b/arch/powerpc/platforms/cell/spu_base.c index 2e8aa9433b3..8086eb1ed60 100644 --- a/arch/powerpc/platforms/cell/spu_base.c +++ b/arch/powerpc/platforms/cell/spu_base.c @@ -36,6 +36,8 @@ #include const struct spu_management_ops *spu_management_ops; +EXPORT_SYMBOL_GPL(spu_management_ops); + const struct spu_priv1_ops *spu_priv1_ops; static struct list_head spu_list[MAX_NUMNODES]; @@ -589,6 +591,9 @@ static int __init init_spu_base(void) { int i, ret = 0; + for (i = 0; i < MAX_NUMNODES; i++) + INIT_LIST_HEAD(&spu_list[i]); + if (!spu_management_ops) goto out; @@ -597,9 +602,6 @@ static int __init init_spu_base(void) if (ret) goto out; - for (i = 0; i < MAX_NUMNODES; i++) - INIT_LIST_HEAD(&spu_list[i]); - ret = spu_enumerate_spus(create_spu); if (ret) { diff --git a/arch/powerpc/platforms/cell/spufs/inode.c b/arch/powerpc/platforms/cell/spufs/inode.c index 8f6cd876330..13e4f70ec8c 100644 --- a/arch/powerpc/platforms/cell/spufs/inode.c +++ b/arch/powerpc/platforms/cell/spufs/inode.c @@ -36,6 +36,7 @@ #include #include #include +#include #include #include "spufs.h" @@ -665,6 +666,10 @@ static int __init spufs_init(void) { int ret; + ret = -ENODEV; + if (!spu_management_ops) + goto out; + ret = -ENOMEM; spufs_inode_cache = kmem_cache_create("spufs_inode_cache", sizeof(struct spufs_inode_info), 0, -- cgit v1.2.3