diff options
author | Mark Nutter <mnutter@us.ibm.com> | 2006-10-24 18:31:16 +0200 |
---|---|---|
committer | Paul Mackerras <paulus@samba.org> | 2006-10-25 14:20:21 +1000 |
commit | 5737edd1ddbde5ab7f63bb3cb36015edbdb7c295 (patch) | |
tree | 10162eb7c27fa0a3d1614c2d46fdcf2c87410d3a /arch | |
parent | cc21a66d7f727ab97b27af9cf763bc0b51510ffa (diff) |
[POWERPC] spufs: add support for nonschedulable contexts
This adds two new flags to spu_create:
SPU_CREATE_NONSCHED: create a context that is never moved
away from an SPE once it has started running. This flag
can only be used by tasks with the CAP_SYS_NICE capability.
SPU_CREATE_ISOLATED: create a nonschedulable context that
enters isolation mode upon first run. This requires the
SPU_CREATE_NONSCHED flag.
Signed-off-by: Jeremy Kerr <jk@ozlabs.org>
Signed-off-by: Arnd Bergmann <arnd.bergmann@de.ibm.com>
Signed-off-by: Paul Mackerras <paulus@samba.org>
Diffstat (limited to 'arch')
-rw-r--r-- | arch/powerpc/platforms/cell/spufs/file.c | 22 | ||||
-rw-r--r-- | arch/powerpc/platforms/cell/spufs/hw_ops.c | 5 | ||||
-rw-r--r-- | arch/powerpc/platforms/cell/spufs/inode.c | 17 | ||||
-rw-r--r-- | arch/powerpc/platforms/cell/spufs/run.c | 10 | ||||
-rw-r--r-- | arch/powerpc/platforms/cell/spufs/spufs.h | 1 | ||||
-rw-r--r-- | arch/powerpc/platforms/cell/spufs/switch.c | 50 |
6 files changed, 99 insertions, 6 deletions
diff --git a/arch/powerpc/platforms/cell/spufs/file.c b/arch/powerpc/platforms/cell/spufs/file.c index 0de8e114e6b..8ca330671ad 100644 --- a/arch/powerpc/platforms/cell/spufs/file.c +++ b/arch/powerpc/platforms/cell/spufs/file.c @@ -1531,3 +1531,25 @@ struct tree_descr spufs_dir_contents[] = { { "object-id", &spufs_object_id_ops, 0666, }, {}, }; + +struct tree_descr spufs_dir_nosched_contents[] = { + { "mem", &spufs_mem_fops, 0666, }, + { "mbox", &spufs_mbox_fops, 0444, }, + { "ibox", &spufs_ibox_fops, 0444, }, + { "wbox", &spufs_wbox_fops, 0222, }, + { "mbox_stat", &spufs_mbox_stat_fops, 0444, }, + { "ibox_stat", &spufs_ibox_stat_fops, 0444, }, + { "wbox_stat", &spufs_wbox_stat_fops, 0444, }, + { "signal1", &spufs_signal1_fops, 0666, }, + { "signal2", &spufs_signal2_fops, 0666, }, + { "signal1_type", &spufs_signal1_type, 0666, }, + { "signal2_type", &spufs_signal2_type, 0666, }, + { "mss", &spufs_mss_fops, 0666, }, + { "mfc", &spufs_mfc_fops, 0666, }, + { "cntl", &spufs_cntl_fops, 0666, }, + { "npc", &spufs_npc_ops, 0666, }, + { "psmap", &spufs_psmap_fops, 0666, }, + { "phys-id", &spufs_id_ops, 0666, }, + { "object-id", &spufs_object_id_ops, 0666, }, + {}, +}; diff --git a/arch/powerpc/platforms/cell/spufs/hw_ops.c b/arch/powerpc/platforms/cell/spufs/hw_ops.c index efc452e71ab..2ad534a04be 100644 --- a/arch/powerpc/platforms/cell/spufs/hw_ops.c +++ b/arch/powerpc/platforms/cell/spufs/hw_ops.c @@ -219,8 +219,11 @@ static char *spu_hw_get_ls(struct spu_context *ctx) static void spu_hw_runcntl_write(struct spu_context *ctx, u32 val) { - eieio(); + spin_lock_irq(&ctx->spu->register_lock); + if (val & SPU_RUNCNTL_ISOLATE) + out_be64(&ctx->spu->priv2->spu_privcntl_RW, 4LL); out_be32(&ctx->spu->problem->spu_runcntl_RW, val); + spin_unlock_irq(&ctx->spu->register_lock); } static void spu_hw_runcntl_stop(struct spu_context *ctx) diff --git a/arch/powerpc/platforms/cell/spufs/inode.c b/arch/powerpc/platforms/cell/spufs/inode.c index 427d00a4f6a..787ae71a685 100644 --- a/arch/powerpc/platforms/cell/spufs/inode.c +++ b/arch/powerpc/platforms/cell/spufs/inode.c @@ -258,7 +258,12 @@ spufs_mkdir(struct inode *dir, struct dentry *dentry, unsigned int flags, inode->i_op = &spufs_dir_inode_operations; inode->i_fop = &simple_dir_operations; - ret = spufs_fill_dir(dentry, spufs_dir_contents, mode, ctx); + if (flags & SPU_CREATE_NOSCHED) + ret = spufs_fill_dir(dentry, spufs_dir_nosched_contents, + mode, ctx); + else + ret = spufs_fill_dir(dentry, spufs_dir_contents, mode, ctx); + if (ret) goto out_free_ctx; @@ -307,6 +312,16 @@ static int spufs_create_context(struct inode *inode, { int ret; + ret = -EPERM; + if ((flags & SPU_CREATE_NOSCHED) && + !capable(CAP_SYS_NICE)) + goto out_unlock; + + ret = -EINVAL; + if ((flags & (SPU_CREATE_NOSCHED | SPU_CREATE_ISOLATE)) + == SPU_CREATE_ISOLATE) + goto out_unlock; + ret = spufs_mkdir(inode, dentry, flags, mode & S_IRWXUGO); if (ret) goto out_unlock; diff --git a/arch/powerpc/platforms/cell/spufs/run.c b/arch/powerpc/platforms/cell/spufs/run.c index 63df8cf4ba1..0c03a04b6a3 100644 --- a/arch/powerpc/platforms/cell/spufs/run.c +++ b/arch/powerpc/platforms/cell/spufs/run.c @@ -51,11 +51,17 @@ static inline int spu_stopped(struct spu_context *ctx, u32 * stat) static inline int spu_run_init(struct spu_context *ctx, u32 * npc) { int ret; + unsigned long runcntl = SPU_RUNCNTL_RUNNABLE; if ((ret = spu_acquire_runnable(ctx)) != 0) return ret; - ctx->ops->npc_write(ctx, *npc); - ctx->ops->runcntl_write(ctx, SPU_RUNCNTL_RUNNABLE); + + if (ctx->flags & SPU_CREATE_ISOLATE) + runcntl |= SPU_RUNCNTL_ISOLATE; + else + ctx->ops->npc_write(ctx, *npc); + + ctx->ops->runcntl_write(ctx, runcntl); return 0; } diff --git a/arch/powerpc/platforms/cell/spufs/spufs.h b/arch/powerpc/platforms/cell/spufs/spufs.h index a0f55ca2d48..b17b809ecd7 100644 --- a/arch/powerpc/platforms/cell/spufs/spufs.h +++ b/arch/powerpc/platforms/cell/spufs/spufs.h @@ -135,6 +135,7 @@ struct spufs_inode_info { container_of(inode, struct spufs_inode_info, vfs_inode) extern struct tree_descr spufs_dir_contents[]; +extern struct tree_descr spufs_dir_nosched_contents[]; /* system call implementation */ long spufs_run_spu(struct file *file, diff --git a/arch/powerpc/platforms/cell/spufs/switch.c b/arch/powerpc/platforms/cell/spufs/switch.c index b85347ff6b2..b47fb50ac2c 100644 --- a/arch/powerpc/platforms/cell/spufs/switch.c +++ b/arch/powerpc/platforms/cell/spufs/switch.c @@ -1916,6 +1916,51 @@ static void save_lscsa(struct spu_state *prev, struct spu *spu) wait_spu_stopped(prev, spu); /* Step 57. */ } +static void force_spu_isolate_exit(struct spu *spu) +{ + struct spu_problem __iomem *prob = spu->problem; + struct spu_priv2 __iomem *priv2 = spu->priv2; + + /* Stop SPE execution and wait for completion. */ + out_be32(&prob->spu_runcntl_RW, SPU_RUNCNTL_STOP); + iobarrier_rw(); + POLL_WHILE_TRUE(in_be32(&prob->spu_status_R) & SPU_STATUS_RUNNING); + + /* Restart SPE master runcntl. */ + spu_mfc_sr1_set(spu, MFC_STATE1_MASTER_RUN_CONTROL_MASK); + iobarrier_w(); + + /* Initiate isolate exit request and wait for completion. */ + out_be64(&priv2->spu_privcntl_RW, 4LL); + iobarrier_w(); + out_be32(&prob->spu_runcntl_RW, 2); + iobarrier_rw(); + POLL_WHILE_FALSE((in_be32(&prob->spu_status_R) + & SPU_STATUS_STOPPED_BY_STOP)); + + /* Reset load request to normal. */ + out_be64(&priv2->spu_privcntl_RW, SPU_PRIVCNT_LOAD_REQUEST_NORMAL); + iobarrier_w(); +} + +/** + * stop_spu_isolate + * Check SPU run-control state and force isolated + * exit function as necessary. + */ +static void stop_spu_isolate(struct spu *spu) +{ + struct spu_problem __iomem *prob = spu->problem; + + if (in_be32(&prob->spu_status_R) & SPU_STATUS_ISOLATED_STATE) { + /* The SPU is in isolated state; the only way + * to get it out is to perform an isolated + * exit (clean) operation. + */ + force_spu_isolate_exit(spu); + } +} + static void harvest(struct spu_state *prev, struct spu *spu) { /* @@ -1928,6 +1973,7 @@ static void harvest(struct spu_state *prev, struct spu *spu) inhibit_user_access(prev, spu); /* Step 3. */ terminate_spu_app(prev, spu); /* Step 4. */ set_switch_pending(prev, spu); /* Step 5. */ + stop_spu_isolate(spu); /* NEW. */ remove_other_spu_access(prev, spu); /* Step 6. */ suspend_mfc(prev, spu); /* Step 7. */ wait_suspend_mfc_complete(prev, spu); /* Step 8. */ @@ -2096,11 +2142,11 @@ int spu_save(struct spu_state *prev, struct spu *spu) acquire_spu_lock(spu); /* Step 1. */ rc = __do_spu_save(prev, spu); /* Steps 2-53. */ release_spu_lock(spu); - if (rc) { + if (rc != 0 && rc != 2 && rc != 6) { panic("%s failed on SPU[%d], rc=%d.\n", __func__, spu->number, rc); } - return rc; + return 0; } EXPORT_SYMBOL_GPL(spu_save); |