aboutsummaryrefslogtreecommitdiff
path: root/include/asm-xtensa
diff options
context:
space:
mode:
Diffstat (limited to 'include/asm-xtensa')
-rw-r--r--include/asm-xtensa/atomic.h12
-rw-r--r--include/asm-xtensa/auxvec.h4
-rw-r--r--include/asm-xtensa/checksum.h4
-rw-r--r--include/asm-xtensa/delay.h2
-rw-r--r--include/asm-xtensa/fcntl.h48
-rw-r--r--include/asm-xtensa/hdreg.h17
-rw-r--r--include/asm-xtensa/io.h14
-rw-r--r--include/asm-xtensa/mmu_context.h18
-rw-r--r--include/asm-xtensa/page.h2
-rw-r--r--include/asm-xtensa/page.h.n135
-rw-r--r--include/asm-xtensa/pci.h4
-rw-r--r--include/asm-xtensa/pgtable.h6
-rw-r--r--include/asm-xtensa/semaphore.h10
-rw-r--r--include/asm-xtensa/socket.h2
-rw-r--r--include/asm-xtensa/string.h8
-rw-r--r--include/asm-xtensa/system.h10
-rw-r--r--include/asm-xtensa/tlbflush.h40
-rw-r--r--include/asm-xtensa/types.h2
-rw-r--r--include/asm-xtensa/uaccess.h10
19 files changed, 80 insertions, 268 deletions
diff --git a/include/asm-xtensa/atomic.h b/include/asm-xtensa/atomic.h
index d72bcb32ba4..24f86f0e43c 100644
--- a/include/asm-xtensa/atomic.h
+++ b/include/asm-xtensa/atomic.h
@@ -66,7 +66,7 @@ typedef struct { volatile int counter; } atomic_t;
*
* Atomically adds @i to @v.
*/
-extern __inline__ void atomic_add(int i, atomic_t * v)
+static inline void atomic_add(int i, atomic_t * v)
{
unsigned int vval;
@@ -90,7 +90,7 @@ extern __inline__ void atomic_add(int i, atomic_t * v)
*
* Atomically subtracts @i from @v.
*/
-extern __inline__ void atomic_sub(int i, atomic_t *v)
+static inline void atomic_sub(int i, atomic_t *v)
{
unsigned int vval;
@@ -111,7 +111,7 @@ extern __inline__ void atomic_sub(int i, atomic_t *v)
* We use atomic_{add|sub}_return to define other functions.
*/
-extern __inline__ int atomic_add_return(int i, atomic_t * v)
+static inline int atomic_add_return(int i, atomic_t * v)
{
unsigned int vval;
@@ -130,7 +130,7 @@ extern __inline__ int atomic_add_return(int i, atomic_t * v)
return vval;
}
-extern __inline__ int atomic_sub_return(int i, atomic_t * v)
+static inline int atomic_sub_return(int i, atomic_t * v)
{
unsigned int vval;
@@ -224,7 +224,7 @@ extern __inline__ int atomic_sub_return(int i, atomic_t * v)
#define atomic_add_negative(i,v) (atomic_add_return((i),(v)) < 0)
-extern __inline__ void atomic_clear_mask(unsigned int mask, atomic_t *v)
+static inline void atomic_clear_mask(unsigned int mask, atomic_t *v)
{
unsigned int all_f = -1;
unsigned int vval;
@@ -243,7 +243,7 @@ extern __inline__ void atomic_clear_mask(unsigned int mask, atomic_t *v)
);
}
-extern __inline__ void atomic_set_mask(unsigned int mask, atomic_t *v)
+static inline void atomic_set_mask(unsigned int mask, atomic_t *v)
{
unsigned int vval;
diff --git a/include/asm-xtensa/auxvec.h b/include/asm-xtensa/auxvec.h
new file mode 100644
index 00000000000..257dec75c5a
--- /dev/null
+++ b/include/asm-xtensa/auxvec.h
@@ -0,0 +1,4 @@
+#ifndef __XTENSA_AUXVEC_H
+#define __XTENSA_AUXVEC_H
+
+#endif
diff --git a/include/asm-xtensa/checksum.h b/include/asm-xtensa/checksum.h
index 1a00fad1992..81a797ae3ab 100644
--- a/include/asm-xtensa/checksum.h
+++ b/include/asm-xtensa/checksum.h
@@ -47,14 +47,14 @@ asmlinkage unsigned int csum_partial_copy_generic( const char *src, char *dst, i
* If you use these functions directly please don't forget the
* verify_area().
*/
-extern __inline__
+static inline
unsigned int csum_partial_copy_nocheck ( const char *src, char *dst,
int len, int sum)
{
return csum_partial_copy_generic ( src, dst, len, sum, NULL, NULL);
}
-extern __inline__
+static inline
unsigned int csum_partial_copy_from_user ( const char *src, char *dst,
int len, int sum, int *err_ptr)
{
diff --git a/include/asm-xtensa/delay.h b/include/asm-xtensa/delay.h
index 0a123d53a63..1bc601ec362 100644
--- a/include/asm-xtensa/delay.h
+++ b/include/asm-xtensa/delay.h
@@ -18,7 +18,7 @@
extern unsigned long loops_per_jiffy;
-extern __inline__ void __delay(unsigned long loops)
+static inline void __delay(unsigned long loops)
{
/* 2 cycles per loop. */
__asm__ __volatile__ ("1: addi %0, %0, -2; bgeui %0, 2, 1b"
diff --git a/include/asm-xtensa/fcntl.h b/include/asm-xtensa/fcntl.h
index 48876bb727d..ec066ae96ca 100644
--- a/include/asm-xtensa/fcntl.h
+++ b/include/asm-xtensa/fcntl.h
@@ -14,31 +14,17 @@
/* open/fcntl - O_SYNC is only implemented on blocks devices and on files
located on an ext2 file system */
-#define O_ACCMODE 0x0003
-#define O_RDONLY 0x0000
-#define O_WRONLY 0x0001
-#define O_RDWR 0x0002
#define O_APPEND 0x0008
#define O_SYNC 0x0010
#define O_NONBLOCK 0x0080
#define O_CREAT 0x0100 /* not fcntl */
-#define O_TRUNC 0x0200 /* not fcntl */
#define O_EXCL 0x0400 /* not fcntl */
#define O_NOCTTY 0x0800 /* not fcntl */
#define FASYNC 0x1000 /* fcntl, for BSD compatibility */
#define O_LARGEFILE 0x2000 /* allow large file opens - currently ignored */
#define O_DIRECT 0x8000 /* direct disk access hint - currently ignored*/
-#define O_DIRECTORY 0x10000 /* must be a directory */
-#define O_NOFOLLOW 0x20000 /* don't follow links */
#define O_NOATIME 0x100000
-#define O_NDELAY O_NONBLOCK
-
-#define F_DUPFD 0 /* dup */
-#define F_GETFD 1 /* get close_on_exec */
-#define F_SETFD 2 /* set/clear close_on_exec */
-#define F_GETFL 3 /* get file->f_flags */
-#define F_SETFL 4 /* set file->f_flags */
#define F_GETLK 14
#define F_GETLK64 15
#define F_SETLK 6
@@ -48,35 +34,6 @@
#define F_SETOWN 24 /* for sockets. */
#define F_GETOWN 23 /* for sockets. */
-#define F_SETSIG 10 /* for sockets. */
-#define F_GETSIG 11 /* for sockets. */
-
-/* for F_[GET|SET]FL */
-#define FD_CLOEXEC 1 /* actually anything with low bit set goes */
-
-/* for posix fcntl() and lockf() */
-#define F_RDLCK 0
-#define F_WRLCK 1
-#define F_UNLCK 2
-
-/* for old implementation of bsd flock () */
-#define F_EXLCK 4 /* or 3 */
-#define F_SHLCK 8 /* or 4 */
-
-/* for leases */
-#define F_INPROGRESS 16
-
-/* operations for bsd flock(), also used by the kernel implementation */
-#define LOCK_SH 1 /* shared lock */
-#define LOCK_EX 2 /* exclusive lock */
-#define LOCK_NB 4 /* or'd with one of the above to prevent
- blocking */
-#define LOCK_UN 8 /* remove lock */
-
-#define LOCK_MAND 32 /* This is a mandatory flock ... */
-#define LOCK_READ 64 /* which allows concurrent read operations */
-#define LOCK_WRITE 128 /* which allows concurrent write operations */
-#define LOCK_RW 192 /* which allows concurrent read & write ops */
typedef struct flock {
short l_type;
@@ -96,6 +53,9 @@ struct flock64 {
pid_t l_pid;
};
-#define F_LINUX_SPECIFIC_BASE 1024
+#define HAVE_ARCH_STRUCT_FLOCK
+#define HAVE_ARCH_STRUCT_FLOCK64
+
+#include <asm-generic/fcntl.h>
#endif /* _XTENSA_FCNTL_H */
diff --git a/include/asm-xtensa/hdreg.h b/include/asm-xtensa/hdreg.h
deleted file mode 100644
index 64b80607b80..00000000000
--- a/include/asm-xtensa/hdreg.h
+++ /dev/null
@@ -1,17 +0,0 @@
-/*
- * include/asm-xtensa/hdreg.h
- *
- * This file is subject to the terms and conditions of the GNU General
- * Public License. See the file "COPYING" in the main directory of
- * this archive for more details.
- *
- * Copyright (C) 2002 - 2005 Tensilica Inc.
- * Copyright (C) 1994-1996 Linus Torvalds & authors
- */
-
-#ifndef _XTENSA_HDREG_H
-#define _XTENSA_HDREG_H
-
-typedef unsigned int ide_ioreg_t;
-
-#endif
diff --git a/include/asm-xtensa/io.h b/include/asm-xtensa/io.h
index 2c471c42ecf..c5c13985bbe 100644
--- a/include/asm-xtensa/io.h
+++ b/include/asm-xtensa/io.h
@@ -41,12 +41,12 @@ static inline unsigned int _swapl (unsigned int v)
* These are trivial on the 1:1 Linux/Xtensa mapping
*/
-extern inline unsigned long virt_to_phys(volatile void * address)
+static inline unsigned long virt_to_phys(volatile void * address)
{
return PHYSADDR((unsigned long)address);
}
-extern inline void * phys_to_virt(unsigned long address)
+static inline void * phys_to_virt(unsigned long address)
{
return (void*) CACHED_ADDR(address);
}
@@ -55,12 +55,12 @@ extern inline void * phys_to_virt(unsigned long address)
* IO bus memory addresses are also 1:1 with the physical address
*/
-extern inline unsigned long virt_to_bus(volatile void * address)
+static inline unsigned long virt_to_bus(volatile void * address)
{
return PHYSADDR((unsigned long)address);
}
-extern inline void * bus_to_virt (unsigned long address)
+static inline void * bus_to_virt (unsigned long address)
{
return (void *) CACHED_ADDR(address);
}
@@ -69,17 +69,17 @@ extern inline void * bus_to_virt (unsigned long address)
* Change "struct page" to physical address.
*/
-extern inline void *ioremap(unsigned long offset, unsigned long size)
+static inline void *ioremap(unsigned long offset, unsigned long size)
{
return (void *) CACHED_ADDR_IO(offset);
}
-extern inline void *ioremap_nocache(unsigned long offset, unsigned long size)
+static inline void *ioremap_nocache(unsigned long offset, unsigned long size)
{
return (void *) BYPASS_ADDR_IO(offset);
}
-extern inline void iounmap(void *addr)
+static inline void iounmap(void *addr)
{
}
diff --git a/include/asm-xtensa/mmu_context.h b/include/asm-xtensa/mmu_context.h
index 1b0801548cd..364a7b057bf 100644
--- a/include/asm-xtensa/mmu_context.h
+++ b/include/asm-xtensa/mmu_context.h
@@ -199,13 +199,13 @@ extern pgd_t *current_pgd;
#define ASID_FIRST_VERSION \
((unsigned long)(~ASID_VERSION_MASK) + 1 + ASID_FIRST_NONRESERVED)
-extern inline void set_rasid_register (unsigned long val)
+static inline void set_rasid_register (unsigned long val)
{
__asm__ __volatile__ (" wsr %0, "__stringify(RASID)"\n\t"
" isync\n" : : "a" (val));
}
-extern inline unsigned long get_rasid_register (void)
+static inline unsigned long get_rasid_register (void)
{
unsigned long tmp;
__asm__ __volatile__ (" rsr %0, "__stringify(RASID)"\n\t" : "=a" (tmp));
@@ -215,7 +215,7 @@ extern inline unsigned long get_rasid_register (void)
#if ((XCHAL_MMU_ASID_INVALID == 0) && (XCHAL_MMU_ASID_KERNEL == 1))
-extern inline void
+static inline void
get_new_mmu_context(struct mm_struct *mm, unsigned long asid)
{
extern void flush_tlb_all(void);
@@ -234,7 +234,7 @@ get_new_mmu_context(struct mm_struct *mm, unsigned long asid)
/* XCHAL_MMU_ASID_INVALID == 0 and XCHAL_MMU_ASID_KERNEL ==1 are
really the best, but if you insist... */
-extern inline int validate_asid (unsigned long asid)
+static inline int validate_asid (unsigned long asid)
{
switch (asid) {
case XCHAL_MMU_ASID_INVALID:
@@ -247,7 +247,7 @@ extern inline int validate_asid (unsigned long asid)
return 1; /* valid */
}
-extern inline void
+static inline void
get_new_mmu_context(struct mm_struct *mm, unsigned long asid)
{
extern void flush_tlb_all(void);
@@ -274,14 +274,14 @@ get_new_mmu_context(struct mm_struct *mm, unsigned long asid)
* instance.
*/
-extern inline int
+static inline int
init_new_context(struct task_struct *tsk, struct mm_struct *mm)
{
mm->context = NO_CONTEXT;
return 0;
}
-extern inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
+static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
struct task_struct *tsk)
{
unsigned long asid = asid_cache;
@@ -301,7 +301,7 @@ extern inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
* Destroy context related info for an mm_struct that is about
* to be put to rest.
*/
-extern inline void destroy_context(struct mm_struct *mm)
+static inline void destroy_context(struct mm_struct *mm)
{
/* Nothing to do. */
}
@@ -310,7 +310,7 @@ extern inline void destroy_context(struct mm_struct *mm)
* After we have set current->mm to a new value, this activates
* the context for the new mm so we see the new mappings.
*/
-extern inline void
+static inline void
activate_mm(struct mm_struct *prev, struct mm_struct *next)
{
/* Unconditionally get a new ASID. */
diff --git a/include/asm-xtensa/page.h b/include/asm-xtensa/page.h
index b495e5b5a94..8ded36f255a 100644
--- a/include/asm-xtensa/page.h
+++ b/include/asm-xtensa/page.h
@@ -55,7 +55,7 @@ typedef struct { unsigned long pgprot; } pgprot_t;
* Pure 2^n version of get_order
*/
-extern __inline__ int get_order(unsigned long size)
+static inline int get_order(unsigned long size)
{
int order;
#ifndef XCHAL_HAVE_NSU
diff --git a/include/asm-xtensa/page.h.n b/include/asm-xtensa/page.h.n
deleted file mode 100644
index 546cc6624f2..00000000000
--- a/include/asm-xtensa/page.h.n
+++ /dev/null
@@ -1,135 +0,0 @@
-/*
- * linux/include/asm-xtensa/page.h
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version2 as
- * published by the Free Software Foundation.
- *
- * Copyright (C) 2001 - 2005 Tensilica Inc.
- */
-
-#ifndef _XTENSA_PAGE_H
-#define _XTENSA_PAGE_H
-
-#ifdef __KERNEL__
-
-#include <asm/processor.h>
-#include <linux/config.h>
-
-/*
- * PAGE_SHIFT determines the page size
- * PAGE_ALIGN(x) aligns the pointer to the (next) page boundary
- */
-#define PAGE_SHIFT XCHAL_MMU_MIN_PTE_PAGE_SIZE
-#define PAGE_SIZE (1 << PAGE_SHIFT)
-#define PAGE_MASK (~(PAGE_SIZE-1))
-#define PAGE_ALIGN(addr) (((addr)+PAGE_SIZE - 1) & PAGE_MASK)
-
-#define DCACHE_WAY_SIZE (XCHAL_DCACHE_SIZE / XCHAL_DCACHE_WAYS)
-#define PAGE_OFFSET XCHAL_KSEG_CACHED_VADDR
-
-#ifdef __ASSEMBLY__
-
-#define __pgprot(x) (x)
-
-#else
-
-
-/*
- * These are used to make use of C type-checking..
- */
-typedef struct { unsigned long pte; } pte_t; /* page table entry */
-typedef struct { unsigned long pmd; } pmd_t; /* PMD table entry */
-typedef struct { unsigned long pgd; } pgd_t; /* PGD table entry */
-typedef struct { unsigned long pgprot; } pgprot_t;
-
-#define pte_val(x) ((x).pte)
-#define pmd_val(x) ((x).pmd)
-#define pgd_val(x) ((x).pgd)
-#define pgprot_val(x) ((x).pgprot)
-
-#define __pte(x) ((pte_t) { (x) } )
-#define __pmd(x) ((pmd_t) { (x) } )
-#define __pgd(x) ((pgd_t) { (x) } )
-#define __pgprot(x) ((pgprot_t) { (x) } )
-
-/*
- * Pure 2^n version of get_order
- */
-extern __inline__ int get_order(unsigned long size)
-{
- int order;
-#ifndef XCHAL_HAVE_NSU
- unsigned long x1, x2, x4, x8, x16;
-
- size = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
- x1 = size & 0xAAAAAAAA;
- x2 = size & 0xCCCCCCCC;
- x4 = size & 0xF0F0F0F0;
- x8 = size & 0xFF00FF00;
- x16 = size & 0xFFFF0000;
- order = x2 ? 2 : 0;
- order += (x16 != 0) * 16;
- order += (x8 != 0) * 8;
- order += (x4 != 0) * 4;
- order += (x1 != 0);
-
- return order;
-#else
- size = (size - 1) >> PAGE_SHIFT;
- asm ("nsau %0, %1" : "=r" (order) : "r" (size));
- return 32 - order;
-#endif
-}
-
-
-struct page;
-extern void clear_page(void *page);
-extern void copy_page(void *to, void *from);
-
-/*
- * If we have cache aliasing and writeback caches, we might have to do
- * some extra work
- */
-
-#if (DCACHE_WAY_SIZE > PAGE_SIZE) && XCHAL_DCACHE_IS_WRITEBACK
-void clear_user_page(void *addr, unsigned long vaddr, struct page* page);
-void copy_user_page(void *to, void* from, unsigned long vaddr, struct page* page);
-#else
-# define clear_user_page(page,vaddr,pg) clear_page(page)
-# define copy_user_page(to, from, vaddr, pg) copy_page(to, from)
-#endif
-
-
-/*
- * This handles the memory map. We handle pages at
- * XCHAL_KSEG_CACHED_VADDR for kernels with 32 bit address space.
- * These macros are for conversion of kernel address, not user
- * addresses.
- */
-
-#define __pa(x) ((unsigned long) (x) - PAGE_OFFSET)
-#define __va(x) ((void *)((unsigned long) (x) + PAGE_OFFSET))
-#define pfn_valid(pfn) ((unsigned long)pfn < max_mapnr)
-#ifndef CONFIG_DISCONTIGMEM
-# define pfn_to_page(pfn) (mem_map + (pfn))
-# define page_to_pfn(page) ((unsigned long)((page) - mem_map))
-#else
-# error CONFIG_DISCONTIGMEM not supported
-#endif
-
-#define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
-#define page_to_virt(page) __va(page_to_pfn(page) << PAGE_SHIFT)
-#define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
-#define page_to_phys(page) (page_to_pfn(page) << PAGE_SHIFT)
-
-#define WANT_PAGE_VIRTUAL
-
-
-#endif /* __ASSEMBLY__ */
-
-#define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_EXEC | \
- VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
-
-#endif /* __KERNEL__ */
-#endif /* _XTENSA_PAGE_H */
diff --git a/include/asm-xtensa/pci.h b/include/asm-xtensa/pci.h
index 6817742301c..24eb7fc25da 100644
--- a/include/asm-xtensa/pci.h
+++ b/include/asm-xtensa/pci.h
@@ -22,12 +22,12 @@
extern struct pci_controller* pcibios_alloc_controller(void);
-extern inline void pcibios_set_master(struct pci_dev *dev)
+static inline void pcibios_set_master(struct pci_dev *dev)
{
/* No special bus mastering setup handling */
}
-extern inline void pcibios_penalize_isa_irq(int irq)
+static inline void pcibios_penalize_isa_irq(int irq)
{
/* We don't do dynamic PCI IRQ allocation */
}
diff --git a/include/asm-xtensa/pgtable.h b/include/asm-xtensa/pgtable.h
index 0bb6416ae26..883ebc2d75d 100644
--- a/include/asm-xtensa/pgtable.h
+++ b/include/asm-xtensa/pgtable.h
@@ -260,7 +260,7 @@ static inline pte_t pte_mkwrite(pte_t pte) { pte_val(pte) |= _PAGE_RW; return pt
#define pfn_pte(pfn, prot) __pte(((pfn) << PAGE_SHIFT) | pgprot_val(prot))
#define mk_pte(page, prot) pfn_pte(page_to_pfn(page), prot)
-extern inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
+static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
{
return __pte((pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot));
}
@@ -278,14 +278,14 @@ static inline void update_pte(pte_t *ptep, pte_t pteval)
#endif
}
-extern inline void
+static inline void
set_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep, pte_t pteval)
{
update_pte(ptep, pteval);
}
-extern inline void
+static inline void
set_pmd(pmd_t *pmdp, pmd_t pmdval)
{
*pmdp = pmdval;
diff --git a/include/asm-xtensa/semaphore.h b/include/asm-xtensa/semaphore.h
index c8a7574a9a5..db740b8bc6f 100644
--- a/include/asm-xtensa/semaphore.h
+++ b/include/asm-xtensa/semaphore.h
@@ -47,7 +47,7 @@ struct semaphore {
#define DECLARE_MUTEX(name) __DECLARE_SEMAPHORE_GENERIC(name,1)
#define DECLARE_MUTEX_LOCKED(name) __DECLARE_SEMAPHORE_GENERIC(name,0)
-extern inline void sema_init (struct semaphore *sem, int val)
+static inline void sema_init (struct semaphore *sem, int val)
{
/*
* *sem = (struct semaphore)__SEMAPHORE_INITIALIZER((*sem),val);
@@ -79,7 +79,7 @@ asmlinkage void __up(struct semaphore * sem);
extern spinlock_t semaphore_wake_lock;
-extern __inline__ void down(struct semaphore * sem)
+static inline void down(struct semaphore * sem)
{
#if WAITQUEUE_DEBUG
CHECK_MAGIC(sem->__magic);
@@ -89,7 +89,7 @@ extern __inline__ void down(struct semaphore * sem)
__down(sem);
}
-extern __inline__ int down_interruptible(struct semaphore * sem)
+static inline int down_interruptible(struct semaphore * sem)
{
int ret = 0;
#if WAITQUEUE_DEBUG
@@ -101,7 +101,7 @@ extern __inline__ int down_interruptible(struct semaphore * sem)
return ret;
}
-extern __inline__ int down_trylock(struct semaphore * sem)
+static inline int down_trylock(struct semaphore * sem)
{
int ret = 0;
#if WAITQUEUE_DEBUG
@@ -117,7 +117,7 @@ extern __inline__ int down_trylock(struct semaphore * sem)
* Note! This is subtle. We jump to wake people up only if
* the semaphore was negative (== somebody was waiting on it).
*/
-extern __inline__ void up(struct semaphore * sem)
+static inline void up(struct semaphore * sem)
{
#if WAITQUEUE_DEBUG
CHECK_MAGIC(sem->__magic);
diff --git a/include/asm-xtensa/socket.h b/include/asm-xtensa/socket.h
index daccd05a14c..00f83f3a6d7 100644
--- a/include/asm-xtensa/socket.h
+++ b/include/asm-xtensa/socket.h
@@ -24,6 +24,8 @@
#define SO_BROADCAST 6
#define SO_SNDBUF 7
#define SO_RCVBUF 8
+#define SO_SNDBUFFORCE 32
+#define SO_RCVBUFFORCE 33
#define SO_KEEPALIVE 9
#define SO_OOBINLINE 10
#define SO_NO_CHECK 11
diff --git a/include/asm-xtensa/string.h b/include/asm-xtensa/string.h
index 3f81b27d980..5fb8c27cbef 100644
--- a/include/asm-xtensa/string.h
+++ b/include/asm-xtensa/string.h
@@ -16,7 +16,7 @@
#define _XTENSA_STRING_H
#define __HAVE_ARCH_STRCPY
-extern __inline__ char *strcpy(char *__dest, const char *__src)
+static inline char *strcpy(char *__dest, const char *__src)
{
register char *__xdest = __dest;
unsigned long __dummy;
@@ -35,7 +35,7 @@ extern __inline__ char *strcpy(char *__dest, const char *__src)
}
#define __HAVE_ARCH_STRNCPY
-extern __inline__ char *strncpy(char *__dest, const char *__src, size_t __n)
+static inline char *strncpy(char *__dest, const char *__src, size_t __n)
{
register char *__xdest = __dest;
unsigned long __dummy;
@@ -60,7 +60,7 @@ extern __inline__ char *strncpy(char *__dest, const char *__src, size_t __n)
}
#define __HAVE_ARCH_STRCMP
-extern __inline__ int strcmp(const char *__cs, const char *__ct)
+static inline int strcmp(const char *__cs, const char *__ct)
{
register int __res;
unsigned long __dummy;
@@ -82,7 +82,7 @@ extern __inline__ int strcmp(const char *__cs, const char *__ct)
}
#define __HAVE_ARCH_STRNCMP
-extern __inline__ int strncmp(const char *__cs, const char *__ct, size_t __n)
+static inline int strncmp(const char *__cs, const char *__ct, size_t __n)
{
register int __res;
unsigned long __dummy;
diff --git a/include/asm-xtensa/system.h b/include/asm-xtensa/system.h
index 690fe325e67..f09393232e5 100644
--- a/include/asm-xtensa/system.h
+++ b/include/asm-xtensa/system.h
@@ -56,7 +56,7 @@ static inline int irqs_disabled(void)
#define clear_cpenable() __clear_cpenable()
-extern __inline__ void __clear_cpenable(void)
+static inline void __clear_cpenable(void)
{
#if XCHAL_HAVE_CP
unsigned long i = 0;
@@ -64,7 +64,7 @@ extern __inline__ void __clear_cpenable(void)
#endif
}
-extern __inline__ void enable_coprocessor(int i)
+static inline void enable_coprocessor(int i)
{
#if XCHAL_HAVE_CP
int cp;
@@ -74,7 +74,7 @@ extern __inline__ void enable_coprocessor(int i)
#endif
}
-extern __inline__ void disable_coprocessor(int i)
+static inline void disable_coprocessor(int i)
{
#if XCHAL_HAVE_CP
int cp;
@@ -123,7 +123,7 @@ do { \
* cmpxchg
*/
-extern __inline__ unsigned long
+static inline unsigned long
__cmpxchg_u32(volatile int *p, int old, int new)
{
__asm__ __volatile__("rsil a15, "__stringify(LOCKLEVEL)"\n\t"
@@ -173,7 +173,7 @@ __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size)
* where no register reference will cause an overflow.
*/
-extern __inline__ unsigned long xchg_u32(volatile int * m, unsigned long val)
+static inline unsigned long xchg_u32(volatile int * m, unsigned long val)
{
unsigned long tmp;
__asm__ __volatile__("rsil a15, "__stringify(LOCKLEVEL)"\n\t"
diff --git a/include/asm-xtensa/tlbflush.h b/include/asm-xtensa/tlbflush.h
index 23bfe9db45f..43f6ec859af 100644
--- a/include/asm-xtensa/tlbflush.h
+++ b/include/asm-xtensa/tlbflush.h
@@ -39,7 +39,7 @@ extern void flush_tlb_range(struct vm_area_struct*,unsigned long,unsigned long);
* page-table pages.
*/
-extern inline void flush_tlb_pgtables(struct mm_struct *mm,
+static inline void flush_tlb_pgtables(struct mm_struct *mm,
unsigned long start, unsigned long end)
{
}
@@ -51,26 +51,26 @@ extern inline void flush_tlb_pgtables(struct mm_struct *mm,
#define ITLB_PROBE_SUCCESS (1 << ITLB_WAYS_LOG2)
#define DTLB_PROBE_SUCCESS (1 << DTLB_WAYS_LOG2)
-extern inline unsigned long itlb_probe(unsigned long addr)
+static inline unsigned long itlb_probe(unsigned long addr)
{
unsigned long tmp;
__asm__ __volatile__("pitlb %0, %1\n\t" : "=a" (tmp) : "a" (addr));
return tmp;
}
-extern inline unsigned long dtlb_probe(unsigned long addr)
+static inline unsigned long dtlb_probe(unsigned long addr)
{
unsigned long tmp;
__asm__ __volatile__("pdtlb %0, %1\n\t" : "=a" (tmp) : "a" (addr));
return tmp;
}
-extern inline void invalidate_itlb_entry (unsigned long probe)
+static inline void invalidate_itlb_entry (unsigned long probe)
{
__asm__ __volatile__("iitlb %0; isync\n\t" : : "a" (probe));
}
-extern inline void invalidate_dtlb_entry (unsigned long probe)
+static inline void invalidate_dtlb_entry (unsigned long probe)
{
__asm__ __volatile__("idtlb %0; dsync\n\t" : : "a" (probe));
}
@@ -80,68 +80,68 @@ extern inline void invalidate_dtlb_entry (unsigned long probe)
* caller must follow up with an 'isync', which can be relatively
* expensive on some Xtensa implementations.
*/
-extern inline void invalidate_itlb_entry_no_isync (unsigned entry)
+static inline void invalidate_itlb_entry_no_isync (unsigned entry)
{
/* Caller must follow up with 'isync'. */
__asm__ __volatile__ ("iitlb %0\n" : : "a" (entry) );
}
-extern inline void invalidate_dtlb_entry_no_isync (unsigned entry)
+static inline void invalidate_dtlb_entry_no_isync (unsigned entry)
{
/* Caller must follow up with 'isync'. */
__asm__ __volatile__ ("idtlb %0\n" : : "a" (entry) );
}
-extern inline void set_itlbcfg_register (unsigned long val)
+static inline void set_itlbcfg_register (unsigned long val)
{
__asm__ __volatile__("wsr %0, "__stringify(ITLBCFG)"\n\t" "isync\n\t"
: : "a" (val));
}
-extern inline void set_dtlbcfg_register (unsigned long val)
+static inline void set_dtlbcfg_register (unsigned long val)
{
__asm__ __volatile__("wsr %0, "__stringify(DTLBCFG)"; dsync\n\t"
: : "a" (val));
}
-extern inline void set_ptevaddr_register (unsigned long val)
+static inline void set_ptevaddr_register (unsigned long val)
{
__asm__ __volatile__(" wsr %0, "__stringify(PTEVADDR)"; isync\n"
: : "a" (val));
}
-extern inline unsigned long read_ptevaddr_register (void)
+static inline unsigned long read_ptevaddr_register (void)
{
unsigned long tmp;
__asm__ __volatile__("rsr %0, "__stringify(PTEVADDR)"\n\t" : "=a" (tmp));
return tmp;
}
-extern inline void write_dtlb_entry (pte_t entry, int way)
+static inline void write_dtlb_entry (pte_t entry, int way)
{
__asm__ __volatile__("wdtlb %1, %0; dsync\n\t"
: : "r" (way), "r" (entry) );
}
-extern inline void write_itlb_entry (pte_t entry, int way)
+static inline void write_itlb_entry (pte_t entry, int way)
{
__asm__ __volatile__("witlb %1, %0; isync\n\t"
: : "r" (way), "r" (entry) );
}
-extern inline void invalidate_page_directory (void)
+static inline void invalidate_page_directory (void)
{
invalidate_dtlb_entry (DTLB_WAY_PGTABLE);
}
-extern inline void invalidate_itlb_mapping (unsigned address)
+static inline void invalidate_itlb_mapping (unsigned address)
{
unsigned long tlb_entry;
while ((tlb_entry = itlb_probe (address)) & ITLB_PROBE_SUCCESS)
invalidate_itlb_entry (tlb_entry);
}
-extern inline void invalidate_dtlb_mapping (unsigned address)
+static inline void invalidate_dtlb_mapping (unsigned address)
{
unsigned long tlb_entry;
while ((tlb_entry = dtlb_probe (address)) & DTLB_PROBE_SUCCESS)
@@ -165,28 +165,28 @@ extern inline void invalidate_dtlb_mapping (unsigned address)
* as[07..00] contain the asid
*/
-extern inline unsigned long read_dtlb_virtual (int way)
+static inline unsigned long read_dtlb_virtual (int way)
{
unsigned long tmp;
__asm__ __volatile__("rdtlb0 %0, %1\n\t" : "=a" (tmp), "+a" (way));
return tmp;
}
-extern inline unsigned long read_dtlb_translation (int way)
+static inline unsigned long read_dtlb_translation (int way)
{
unsigned long tmp;
__asm__ __volatile__("rdtlb1 %0, %1\n\t" : "=a" (tmp), "+a" (way));
return tmp;
}
-extern inline unsigned long read_itlb_virtual (int way)
+static inline unsigned long read_itlb_virtual (int way)
{
unsigned long tmp;
__asm__ __volatile__("ritlb0 %0, %1\n\t" : "=a" (tmp), "+a" (way));
return tmp;
}
-extern inline unsigned long read_itlb_translation (int way)
+static inline unsigned long read_itlb_translation (int way)
{
unsigned long tmp;
__asm__ __volatile__("ritlb1 %0, %1\n\t" : "=a" (tmp), "+a" (way));
diff --git a/include/asm-xtensa/types.h b/include/asm-xtensa/types.h
index ebac0046985..9d99a8e9e33 100644
--- a/include/asm-xtensa/types.h
+++ b/include/asm-xtensa/types.h
@@ -58,8 +58,6 @@ typedef unsigned long long u64;
typedef u32 dma_addr_t;
-typedef unsigned int kmem_bufctl_t;
-
#endif /* __KERNEL__ */
#endif
diff --git a/include/asm-xtensa/uaccess.h b/include/asm-xtensa/uaccess.h
index 35576b25c7b..fc268ac923c 100644
--- a/include/asm-xtensa/uaccess.h
+++ b/include/asm-xtensa/uaccess.h
@@ -211,7 +211,7 @@
#define __access_ok(addr,size) (__kernel_ok || __user_ok((addr),(size)))
#define access_ok(type,addr,size) __access_ok((unsigned long)(addr),(size))
-extern inline int verify_area(int type, const void * addr, unsigned long size)
+static inline int verify_area(int type, const void * addr, unsigned long size)
{
return access_ok(type,addr,size) ? 0 : -EFAULT;
}
@@ -464,7 +464,7 @@ __generic_copy_from_user(void *to, const void *from, unsigned long n)
* success.
*/
-extern inline unsigned long
+static inline unsigned long
__xtensa_clear_user(void *addr, unsigned long size)
{
if ( ! memset(addr, 0, size) )
@@ -472,7 +472,7 @@ __xtensa_clear_user(void *addr, unsigned long size)
return 0;
}
-extern inline unsigned long
+static inline unsigned long
clear_user(void *addr, unsigned long size)
{
if (access_ok(VERIFY_WRITE, addr, size))
@@ -486,7 +486,7 @@ clear_user(void *addr, unsigned long size)
extern long __strncpy_user(char *, const char *, long);
#define __strncpy_from_user __strncpy_user
-extern inline long
+static inline long
strncpy_from_user(char *dst, const char *src, long count)
{
if (access_ok(VERIFY_READ, src, 1))
@@ -502,7 +502,7 @@ strncpy_from_user(char *dst, const char *src, long count)
*/
extern long __strnlen_user(const char *, long);
-extern inline long strnlen_user(const char *str, long len)
+static inline long strnlen_user(const char *str, long len)
{
unsigned long top = __kernel_ok ? ~0UL : TASK_SIZE - 1;