diff options
Diffstat (limited to 'arch')
-rw-r--r-- | arch/sh/Kconfig | 1 | ||||
-rw-r--r-- | arch/sh/boards/mach-landisk/gio.c | 10 | ||||
-rw-r--r-- | arch/sh/mm/cache-sh4.c | 22 | ||||
-rw-r--r-- | arch/sh/mm/cache.c | 10 | ||||
-rw-r--r-- | arch/x86/crypto/aesni-intel_glue.c | 10 |
5 files changed, 29 insertions, 24 deletions
diff --git a/arch/sh/Kconfig b/arch/sh/Kconfig index b940424f8cc..0dc7e3cbeff 100644 --- a/arch/sh/Kconfig +++ b/arch/sh/Kconfig @@ -37,7 +37,6 @@ config SUPERH32 select HAVE_FTRACE_MCOUNT_RECORD select HAVE_DYNAMIC_FTRACE select HAVE_FUNCTION_TRACE_MCOUNT_TEST - select HAVE_FTRACE_SYSCALLS select HAVE_FUNCTION_GRAPH_TRACER select HAVE_ARCH_KGDB select ARCH_HIBERNATION_POSSIBLE if MMU diff --git a/arch/sh/boards/mach-landisk/gio.c b/arch/sh/boards/mach-landisk/gio.c index 25cdf735800..52801318819 100644 --- a/arch/sh/boards/mach-landisk/gio.c +++ b/arch/sh/boards/mach-landisk/gio.c @@ -14,7 +14,6 @@ */ #include <linux/module.h> #include <linux/init.h> -#include <linux/smp_lock.h> #include <linux/kdev_t.h> #include <linux/cdev.h> #include <linux/fs.h> @@ -35,7 +34,7 @@ static int gio_open(struct inode *inode, struct file *filp) int minor; int ret = -ENOENT; - lock_kernel(); + preempt_disable(); minor = MINOR(inode->i_rdev); if (minor < DEVCOUNT) { if (openCnt > 0) { @@ -45,7 +44,7 @@ static int gio_open(struct inode *inode, struct file *filp) ret = 0; } } - unlock_kernel(); + preempt_enable(); return ret; } @@ -60,8 +59,7 @@ static int gio_close(struct inode *inode, struct file *filp) return 0; } -static int gio_ioctl(struct inode *inode, struct file *filp, - unsigned int cmd, unsigned long arg) +static long gio_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) { unsigned int data; static unsigned int addr = 0; @@ -129,7 +127,7 @@ static const struct file_operations gio_fops = { .owner = THIS_MODULE, .open = gio_open, /* open */ .release = gio_close, /* release */ - .ioctl = gio_ioctl, /* ioctl */ + .unlocked_ioctl = gio_ioctl, }; static int __init gio_init(void) diff --git a/arch/sh/mm/cache-sh4.c b/arch/sh/mm/cache-sh4.c index a98c7d8984f..519e2d16cd0 100644 --- a/arch/sh/mm/cache-sh4.c +++ b/arch/sh/mm/cache-sh4.c @@ -26,7 +26,7 @@ #define MAX_DCACHE_PAGES 64 /* XXX: Tune for ways */ #define MAX_ICACHE_PAGES 32 -static void __flush_cache_4096(unsigned long addr, unsigned long phys, +static void __flush_cache_one(unsigned long addr, unsigned long phys, unsigned long exec_offset); /* @@ -89,8 +89,7 @@ static void __uses_jump_to_uncached sh4_flush_icache_range(void *args) local_irq_restore(flags); } -static inline void flush_cache_4096(unsigned long start, - unsigned long phys) +static inline void flush_cache_one(unsigned long start, unsigned long phys) { unsigned long flags, exec_offset = 0; @@ -103,8 +102,7 @@ static inline void flush_cache_4096(unsigned long start, exec_offset = 0x20000000; local_irq_save(flags); - __flush_cache_4096(start | SH_CACHE_ASSOC, - P1SEGADDR(phys), exec_offset); + __flush_cache_one(start | SH_CACHE_ASSOC, P1SEGADDR(phys), exec_offset); local_irq_restore(flags); } @@ -129,8 +127,8 @@ static void sh4_flush_dcache_page(void *arg) /* Loop all the D-cache */ n = boot_cpu_data.dcache.n_aliases; - for (i = 0; i < n; i++, addr += 4096) - flush_cache_4096(addr, phys); + for (i = 0; i < n; i++, addr += PAGE_SIZE) + flush_cache_one(addr, phys); } wmb(); @@ -318,11 +316,11 @@ static void sh4_flush_cache_page(void *args) /* We only need to flush D-cache when we have alias */ if ((address^phys) & alias_mask) { /* Loop 4K of the D-cache */ - flush_cache_4096( + flush_cache_one( CACHE_OC_ADDRESS_ARRAY | (address & alias_mask), phys); /* Loop another 4K of the D-cache */ - flush_cache_4096( + flush_cache_one( CACHE_OC_ADDRESS_ARRAY | (phys & alias_mask), phys); } @@ -337,7 +335,7 @@ static void sh4_flush_cache_page(void *args) * kernel has never executed the code through its identity * translation. */ - flush_cache_4096( + flush_cache_one( CACHE_IC_ADDRESS_ARRAY | (address & alias_mask), phys); } @@ -393,7 +391,7 @@ static void sh4_flush_cache_range(void *args) } /** - * __flush_cache_4096 + * __flush_cache_one * * @addr: address in memory mapped cache array * @phys: P1 address to flush (has to match tags if addr has 'A' bit @@ -406,7 +404,7 @@ static void sh4_flush_cache_range(void *args) * operation (purge/write-back) is selected by the lower 2 bits of * 'phys'. */ -static void __flush_cache_4096(unsigned long addr, unsigned long phys, +static void __flush_cache_one(unsigned long addr, unsigned long phys, unsigned long exec_offset) { int way_count; diff --git a/arch/sh/mm/cache.c b/arch/sh/mm/cache.c index 5e1091be9dc..a2dc7f9ecc5 100644 --- a/arch/sh/mm/cache.c +++ b/arch/sh/mm/cache.c @@ -265,6 +265,8 @@ static void __init emit_cache_params(void) void __init cpu_cache_init(void) { + unsigned int cache_disabled = !(__raw_readl(CCR) & CCR_CACHE_ENABLE); + compute_alias(&boot_cpu_data.icache); compute_alias(&boot_cpu_data.dcache); compute_alias(&boot_cpu_data.scache); @@ -273,6 +275,13 @@ void __init cpu_cache_init(void) __flush_purge_region = noop__flush_region; __flush_invalidate_region = noop__flush_region; + /* + * No flushing is necessary in the disabled cache case so we can + * just keep the noop functions in local_flush_..() and __flush_..() + */ + if (unlikely(cache_disabled)) + goto skip; + if (boot_cpu_data.family == CPU_FAMILY_SH2) { extern void __weak sh2_cache_init(void); @@ -312,5 +321,6 @@ void __init cpu_cache_init(void) sh5_cache_init(); } +skip: emit_cache_params(); } diff --git a/arch/x86/crypto/aesni-intel_glue.c b/arch/x86/crypto/aesni-intel_glue.c index 585edebe12c..49c552c060e 100644 --- a/arch/x86/crypto/aesni-intel_glue.c +++ b/arch/x86/crypto/aesni-intel_glue.c @@ -82,7 +82,7 @@ static int aes_set_key_common(struct crypto_tfm *tfm, void *raw_ctx, return -EINVAL; } - if (irq_fpu_usable()) + if (!irq_fpu_usable()) err = crypto_aes_expand_key(ctx, in_key, key_len); else { kernel_fpu_begin(); @@ -103,7 +103,7 @@ static void aes_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) { struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm)); - if (irq_fpu_usable()) + if (!irq_fpu_usable()) crypto_aes_encrypt_x86(ctx, dst, src); else { kernel_fpu_begin(); @@ -116,7 +116,7 @@ static void aes_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) { struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm)); - if (irq_fpu_usable()) + if (!irq_fpu_usable()) crypto_aes_decrypt_x86(ctx, dst, src); else { kernel_fpu_begin(); @@ -342,7 +342,7 @@ static int ablk_encrypt(struct ablkcipher_request *req) struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req); struct async_aes_ctx *ctx = crypto_ablkcipher_ctx(tfm); - if (irq_fpu_usable()) { + if (!irq_fpu_usable()) { struct ablkcipher_request *cryptd_req = ablkcipher_request_ctx(req); memcpy(cryptd_req, req, sizeof(*req)); @@ -363,7 +363,7 @@ static int ablk_decrypt(struct ablkcipher_request *req) struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req); struct async_aes_ctx *ctx = crypto_ablkcipher_ctx(tfm); - if (irq_fpu_usable()) { + if (!irq_fpu_usable()) { struct ablkcipher_request *cryptd_req = ablkcipher_request_ctx(req); memcpy(cryptd_req, req, sizeof(*req)); |