aboutsummaryrefslogtreecommitdiff
path: root/include/asm-i386
diff options
context:
space:
mode:
Diffstat (limited to 'include/asm-i386')
-rw-r--r--include/asm-i386/bitops.h2
-rw-r--r--include/asm-i386/current.h2
-rw-r--r--include/asm-i386/string.h8
-rw-r--r--include/asm-i386/uaccess.h8
4 files changed, 10 insertions, 10 deletions
diff --git a/include/asm-i386/bitops.h b/include/asm-i386/bitops.h
index fe0819fe9c6..88e6ca248cd 100644
--- a/include/asm-i386/bitops.h
+++ b/include/asm-i386/bitops.h
@@ -247,7 +247,7 @@ static inline int test_and_change_bit(int nr, volatile unsigned long* addr)
static int test_bit(int nr, const volatile void * addr);
#endif
-static inline int constant_test_bit(int nr, const volatile unsigned long *addr)
+static __always_inline int constant_test_bit(int nr, const volatile unsigned long *addr)
{
return ((1UL << (nr & 31)) & (addr[nr >> 5])) != 0;
}
diff --git a/include/asm-i386/current.h b/include/asm-i386/current.h
index d97328951f5..3cbbecd7901 100644
--- a/include/asm-i386/current.h
+++ b/include/asm-i386/current.h
@@ -5,7 +5,7 @@
struct task_struct;
-static inline struct task_struct * get_current(void)
+static __always_inline struct task_struct * get_current(void)
{
return current_thread_info()->task;
}
diff --git a/include/asm-i386/string.h b/include/asm-i386/string.h
index 02c8f5d2206..bb5f88a27f7 100644
--- a/include/asm-i386/string.h
+++ b/include/asm-i386/string.h
@@ -201,7 +201,7 @@ __asm__ __volatile__(
return __res;
}
-static inline void * __memcpy(void * to, const void * from, size_t n)
+static __always_inline void * __memcpy(void * to, const void * from, size_t n)
{
int d0, d1, d2;
__asm__ __volatile__(
@@ -223,7 +223,7 @@ return (to);
* This looks ugly, but the compiler can optimize it totally,
* as the count is constant.
*/
-static inline void * __constant_memcpy(void * to, const void * from, size_t n)
+static __always_inline void * __constant_memcpy(void * to, const void * from, size_t n)
{
long esi, edi;
if (!n) return to;
@@ -367,7 +367,7 @@ return s;
* things 32 bits at a time even when we don't know the size of the
* area at compile-time..
*/
-static inline void * __constant_c_memset(void * s, unsigned long c, size_t count)
+static __always_inline void * __constant_c_memset(void * s, unsigned long c, size_t count)
{
int d0, d1;
__asm__ __volatile__(
@@ -416,7 +416,7 @@ extern char *strstr(const char *cs, const char *ct);
* This looks horribly ugly, but the compiler can optimize it totally,
* as we by now know that both pattern and count is constant..
*/
-static inline void * __constant_c_and_count_memset(void * s, unsigned long pattern, size_t count)
+static __always_inline void * __constant_c_and_count_memset(void * s, unsigned long pattern, size_t count)
{
switch (count) {
case 0:
diff --git a/include/asm-i386/uaccess.h b/include/asm-i386/uaccess.h
index 89ab7e2bc5a..3f1337c3420 100644
--- a/include/asm-i386/uaccess.h
+++ b/include/asm-i386/uaccess.h
@@ -411,7 +411,7 @@ unsigned long __must_check __copy_from_user_ll(void *to,
* Returns number of bytes that could not be copied.
* On success, this will be zero.
*/
-static inline unsigned long __must_check
+static __always_inline unsigned long __must_check
__copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
{
if (__builtin_constant_p(n)) {
@@ -432,7 +432,7 @@ __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
return __copy_to_user_ll(to, from, n);
}
-static inline unsigned long __must_check
+static __always_inline unsigned long __must_check
__copy_to_user(void __user *to, const void *from, unsigned long n)
{
might_sleep();
@@ -456,7 +456,7 @@ __copy_to_user(void __user *to, const void *from, unsigned long n)
* If some data could not be copied, this function will pad the copied
* data to the requested size using zero bytes.
*/
-static inline unsigned long
+static __always_inline unsigned long
__copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
{
if (__builtin_constant_p(n)) {
@@ -477,7 +477,7 @@ __copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
return __copy_from_user_ll(to, from, n);
}
-static inline unsigned long
+static __always_inline unsigned long
__copy_from_user(void *to, const void __user *from, unsigned long n)
{
might_sleep();