diff options
-rw-r--r-- | arch/i386/lib/usercopy.c | 119 | ||||
-rw-r--r-- | include/asm-i386/uaccess.h | 46 |
2 files changed, 153 insertions, 12 deletions
diff --git a/arch/i386/lib/usercopy.c b/arch/i386/lib/usercopy.c index 6979297ce27..c5aa65f7c02 100644 --- a/arch/i386/lib/usercopy.c +++ b/arch/i386/lib/usercopy.c @@ -528,6 +528,97 @@ static unsigned long __copy_user_zeroing_intel_nocache(void *to, return size; } +static unsigned long __copy_user_intel_nocache(void *to, + const void __user *from, unsigned long size) +{ + int d0, d1; + + __asm__ __volatile__( + " .align 2,0x90\n" + "0: movl 32(%4), %%eax\n" + " cmpl $67, %0\n" + " jbe 2f\n" + "1: movl 64(%4), %%eax\n" + " .align 2,0x90\n" + "2: movl 0(%4), %%eax\n" + "21: movl 4(%4), %%edx\n" + " movnti %%eax, 0(%3)\n" + " movnti %%edx, 4(%3)\n" + "3: movl 8(%4), %%eax\n" + "31: movl 12(%4),%%edx\n" + " movnti %%eax, 8(%3)\n" + " movnti %%edx, 12(%3)\n" + "4: movl 16(%4), %%eax\n" + "41: movl 20(%4), %%edx\n" + " movnti %%eax, 16(%3)\n" + " movnti %%edx, 20(%3)\n" + "10: movl 24(%4), %%eax\n" + "51: movl 28(%4), %%edx\n" + " movnti %%eax, 24(%3)\n" + " movnti %%edx, 28(%3)\n" + "11: movl 32(%4), %%eax\n" + "61: movl 36(%4), %%edx\n" + " movnti %%eax, 32(%3)\n" + " movnti %%edx, 36(%3)\n" + "12: movl 40(%4), %%eax\n" + "71: movl 44(%4), %%edx\n" + " movnti %%eax, 40(%3)\n" + " movnti %%edx, 44(%3)\n" + "13: movl 48(%4), %%eax\n" + "81: movl 52(%4), %%edx\n" + " movnti %%eax, 48(%3)\n" + " movnti %%edx, 52(%3)\n" + "14: movl 56(%4), %%eax\n" + "91: movl 60(%4), %%edx\n" + " movnti %%eax, 56(%3)\n" + " movnti %%edx, 60(%3)\n" + " addl $-64, %0\n" + " addl $64, %4\n" + " addl $64, %3\n" + " cmpl $63, %0\n" + " ja 0b\n" + " sfence \n" + "5: movl %0, %%eax\n" + " shrl $2, %0\n" + " andl $3, %%eax\n" + " cld\n" + "6: rep; movsl\n" + " movl %%eax,%0\n" + "7: rep; movsb\n" + "8:\n" + ".section .fixup,\"ax\"\n" + "9: lea 0(%%eax,%0,4),%0\n" + "16: jmp 8b\n" + ".previous\n" + ".section __ex_table,\"a\"\n" + " .align 4\n" + " .long 0b,16b\n" + " .long 1b,16b\n" + " .long 2b,16b\n" + " .long 21b,16b\n" + " .long 3b,16b\n" + " .long 31b,16b\n" + " .long 4b,16b\n" + " .long 41b,16b\n" + " .long 10b,16b\n" + " .long 51b,16b\n" + " .long 11b,16b\n" + " .long 61b,16b\n" + " .long 12b,16b\n" + " .long 71b,16b\n" + " .long 13b,16b\n" + " .long 81b,16b\n" + " .long 14b,16b\n" + " .long 91b,16b\n" + " .long 6b,9b\n" + " .long 7b,16b\n" + ".previous" + : "=&c"(size), "=&D" (d0), "=&S" (d1) + : "1"(to), "2"(from), "0"(size) + : "eax", "edx", "memory"); + return size; +} + #else /* @@ -694,6 +785,19 @@ unsigned long __copy_from_user_ll(void *to, const void __user *from, } EXPORT_SYMBOL(__copy_from_user_ll); +unsigned long __copy_from_user_ll_nozero(void *to, const void __user *from, + unsigned long n) +{ + BUG_ON((long)n < 0); + if (movsl_is_ok(to, from, n)) + __copy_user(to, from, n); + else + n = __copy_user_intel((void __user *)to, + (const void *)from, n); + return n; +} +EXPORT_SYMBOL(__copy_from_user_ll_nozero); + unsigned long __copy_from_user_ll_nocache(void *to, const void __user *from, unsigned long n) { @@ -709,6 +813,21 @@ unsigned long __copy_from_user_ll_nocache(void *to, const void __user *from, return n; } +unsigned long __copy_from_user_ll_nocache_nozero(void *to, const void __user *from, + unsigned long n) +{ + BUG_ON((long)n < 0); +#ifdef CONFIG_X86_INTEL_USERCOPY + if ( n > 64 && cpu_has_xmm2) + n = __copy_user_intel_nocache(to, from, n); + else + __copy_user(to, from, n); +#else + __copy_user(to, from, n); +#endif + return n; +} + /** * copy_to_user: - Copy a block of data into user space. * @to: Destination address, in user space. diff --git a/include/asm-i386/uaccess.h b/include/asm-i386/uaccess.h index d0d253277be..54d905ebc63 100644 --- a/include/asm-i386/uaccess.h +++ b/include/asm-i386/uaccess.h @@ -390,8 +390,12 @@ unsigned long __must_check __copy_to_user_ll(void __user *to, const void *from, unsigned long n); unsigned long __must_check __copy_from_user_ll(void *to, const void __user *from, unsigned long n); +unsigned long __must_check __copy_from_user_ll_nozero(void *to, + const void __user *from, unsigned long n); unsigned long __must_check __copy_from_user_ll_nocache(void *to, const void __user *from, unsigned long n); +unsigned long __must_check __copy_from_user_ll_nocache_nozero(void *to, + const void __user *from, unsigned long n); /* * Here we special-case 1, 2 and 4-byte copy_*_user invocations. On a fault @@ -463,11 +467,36 @@ __copy_to_user(void __user *to, const void *from, unsigned long n) * atomic context and will fail rather than sleep. In this case the * uncopied bytes will *NOT* be padded with zeros. See fs/filemap.h * for explanation of why this is needed. - * FIXME this isn't implimented yet EMXIF */ static __always_inline unsigned long __copy_from_user_inatomic(void *to, const void __user *from, unsigned long n) { + /* Avoid zeroing the tail if the copy fails.. + * If 'n' is constant and 1, 2, or 4, we do still zero on a failure, + * but as the zeroing behaviour is only significant when n is not + * constant, that shouldn't be a problem. + */ + if (__builtin_constant_p(n)) { + unsigned long ret; + + switch (n) { + case 1: + __get_user_size(*(u8 *)to, from, 1, ret, 1); + return ret; + case 2: + __get_user_size(*(u16 *)to, from, 2, ret, 2); + return ret; + case 4: + __get_user_size(*(u32 *)to, from, 4, ret, 4); + return ret; + } + } + return __copy_from_user_ll_nozero(to, from, n); +} +static __always_inline unsigned long +__copy_from_user(void *to, const void __user *from, unsigned long n) +{ + might_sleep(); if (__builtin_constant_p(n)) { unsigned long ret; @@ -488,9 +517,10 @@ __copy_from_user_inatomic(void *to, const void __user *from, unsigned long n) #define ARCH_HAS_NOCACHE_UACCESS -static __always_inline unsigned long __copy_from_user_inatomic_nocache(void *to, +static __always_inline unsigned long __copy_from_user_nocache(void *to, const void __user *from, unsigned long n) { + might_sleep(); if (__builtin_constant_p(n)) { unsigned long ret; @@ -510,17 +540,9 @@ static __always_inline unsigned long __copy_from_user_inatomic_nocache(void *to, } static __always_inline unsigned long -__copy_from_user(void *to, const void __user *from, unsigned long n) +__copy_from_user_inatomic_nocache(void *to, const void __user *from, unsigned long n) { - might_sleep(); - return __copy_from_user_inatomic(to, from, n); -} - -static __always_inline unsigned long -__copy_from_user_nocache(void *to, const void __user *from, unsigned long n) -{ - might_sleep(); - return __copy_from_user_inatomic_nocache(to, from, n); + return __copy_from_user_ll_nocache_nozero(to, from, n); } unsigned long __must_check copy_to_user(void __user *to, |