diff options
Diffstat (limited to 'include/linux')
-rw-r--r-- | include/linux/bio.h | 7 | ||||
-rw-r--r-- | include/linux/bit_spinlock.h | 77 | ||||
-rw-r--r-- | include/linux/blkdev.h | 2 | ||||
-rw-r--r-- | include/linux/chio.h | 2 | ||||
-rw-r--r-- | include/linux/dmapool.h | 3 | ||||
-rw-r--r-- | include/linux/fs.h | 2 | ||||
-rw-r--r-- | include/linux/jbd.h | 1 | ||||
-rw-r--r-- | include/linux/jiffies.h | 40 | ||||
-rw-r--r-- | include/linux/radix-tree.h | 4 | ||||
-rw-r--r-- | include/linux/reiserfs_fs.h | 6 | ||||
-rw-r--r-- | include/linux/sched.h | 4 | ||||
-rw-r--r-- | include/linux/slab.h | 3 | ||||
-rw-r--r-- | include/linux/spinlock.h | 627 | ||||
-rw-r--r-- | include/linux/spinlock_api_smp.h | 57 | ||||
-rw-r--r-- | include/linux/spinlock_api_up.h | 80 | ||||
-rw-r--r-- | include/linux/spinlock_types.h | 67 | ||||
-rw-r--r-- | include/linux/spinlock_types_up.h | 51 | ||||
-rw-r--r-- | include/linux/spinlock_up.h | 74 | ||||
-rw-r--r-- | include/linux/time.h | 9 | ||||
-rw-r--r-- | include/linux/writeback.h | 2 |
20 files changed, 571 insertions, 547 deletions
diff --git a/include/linux/bio.h b/include/linux/bio.h index cdaf03a14a5..6e1c79c8b6b 100644 --- a/include/linux/bio.h +++ b/include/linux/bio.h @@ -314,9 +314,8 @@ void zero_fill_bio(struct bio *bio); * bvec_kmap_irq and bvec_kunmap_irq!! * * This function MUST be inlined - it plays with the CPU interrupt flags. - * Hence the `extern inline'. */ -extern inline char *bvec_kmap_irq(struct bio_vec *bvec, unsigned long *flags) +static inline char *bvec_kmap_irq(struct bio_vec *bvec, unsigned long *flags) { unsigned long addr; @@ -332,7 +331,7 @@ extern inline char *bvec_kmap_irq(struct bio_vec *bvec, unsigned long *flags) return (char *) addr + bvec->bv_offset; } -extern inline void bvec_kunmap_irq(char *buffer, unsigned long *flags) +static inline void bvec_kunmap_irq(char *buffer, unsigned long *flags) { unsigned long ptr = (unsigned long) buffer & PAGE_MASK; @@ -345,7 +344,7 @@ extern inline void bvec_kunmap_irq(char *buffer, unsigned long *flags) #define bvec_kunmap_irq(buf, flags) do { *(flags) = 0; } while (0) #endif -extern inline char *__bio_kmap_irq(struct bio *bio, unsigned short idx, +static inline char *__bio_kmap_irq(struct bio *bio, unsigned short idx, unsigned long *flags) { return bvec_kmap_irq(bio_iovec_idx(bio, idx), flags); diff --git a/include/linux/bit_spinlock.h b/include/linux/bit_spinlock.h new file mode 100644 index 00000000000..6b20af0bbb7 --- /dev/null +++ b/include/linux/bit_spinlock.h @@ -0,0 +1,77 @@ +#ifndef __LINUX_BIT_SPINLOCK_H +#define __LINUX_BIT_SPINLOCK_H + +/* + * bit-based spin_lock() + * + * Don't use this unless you really need to: spin_lock() and spin_unlock() + * are significantly faster. + */ +static inline void bit_spin_lock(int bitnum, unsigned long *addr) +{ + /* + * Assuming the lock is uncontended, this never enters + * the body of the outer loop. If it is contended, then + * within the inner loop a non-atomic test is used to + * busywait with less bus contention for a good time to + * attempt to acquire the lock bit. + */ + preempt_disable(); +#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) + while (test_and_set_bit(bitnum, addr)) { + while (test_bit(bitnum, addr)) { + preempt_enable(); + cpu_relax(); + preempt_disable(); + } + } +#endif + __acquire(bitlock); +} + +/* + * Return true if it was acquired + */ +static inline int bit_spin_trylock(int bitnum, unsigned long *addr) +{ + preempt_disable(); +#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) + if (test_and_set_bit(bitnum, addr)) { + preempt_enable(); + return 0; + } +#endif + __acquire(bitlock); + return 1; +} + +/* + * bit-based spin_unlock() + */ +static inline void bit_spin_unlock(int bitnum, unsigned long *addr) +{ +#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) + BUG_ON(!test_bit(bitnum, addr)); + smp_mb__before_clear_bit(); + clear_bit(bitnum, addr); +#endif + preempt_enable(); + __release(bitlock); +} + +/* + * Return true if the lock is held. + */ +static inline int bit_spin_is_locked(int bitnum, unsigned long *addr) +{ +#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) + return test_bit(bitnum, addr); +#elif defined CONFIG_PREEMPT + return preempt_count(); +#else + return 1; +#endif +} + +#endif /* __LINUX_BIT_SPINLOCK_H */ + diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index aefa26fbae8..efdc9b5bc05 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -728,7 +728,7 @@ static inline unsigned int blksize_bits(unsigned int size) return bits; } -extern inline unsigned int block_size(struct block_device *bdev) +static inline unsigned int block_size(struct block_device *bdev) { return bdev->bd_block_size; } diff --git a/include/linux/chio.h b/include/linux/chio.h index 63035ae67e6..a404c111c93 100644 --- a/include/linux/chio.h +++ b/include/linux/chio.h @@ -96,7 +96,7 @@ struct changer_position { */ struct changer_element_status { int ces_type; - unsigned char *ces_data; + unsigned char __user *ces_data; }; #define CESTATUS_FULL 0x01 /* full */ #define CESTATUS_IMPEXP 0x02 /* media was imported (inserted by sysop) */ diff --git a/include/linux/dmapool.h b/include/linux/dmapool.h index e60bfdac348..4932ee5c77f 100644 --- a/include/linux/dmapool.h +++ b/include/linux/dmapool.h @@ -19,7 +19,8 @@ struct dma_pool *dma_pool_create(const char *name, struct device *dev, void dma_pool_destroy(struct dma_pool *pool); -void *dma_pool_alloc(struct dma_pool *pool, int mem_flags, dma_addr_t *handle); +void *dma_pool_alloc(struct dma_pool *pool, unsigned int __nocast mem_flags, + dma_addr_t *handle); void dma_pool_free(struct dma_pool *pool, void *vaddr, dma_addr_t addr); diff --git a/include/linux/fs.h b/include/linux/fs.h index 7f61227827d..e0b77c5af9a 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -1509,8 +1509,6 @@ extern void do_generic_mapping_read(struct address_space *mapping, loff_t *, read_descriptor_t *, read_actor_t); extern void file_ra_state_init(struct file_ra_state *ra, struct address_space *mapping); -extern ssize_t generic_file_direct_IO(int rw, struct kiocb *iocb, - const struct iovec *iov, loff_t offset, unsigned long nr_segs); extern ssize_t generic_file_readv(struct file *filp, const struct iovec *iov, unsigned long nr_segs, loff_t *ppos); ssize_t generic_file_writev(struct file *filp, const struct iovec *iov, diff --git a/include/linux/jbd.h b/include/linux/jbd.h index 84321a4cac9..de097269bd7 100644 --- a/include/linux/jbd.h +++ b/include/linux/jbd.h @@ -28,6 +28,7 @@ #include <linux/buffer_head.h> #include <linux/journal-head.h> #include <linux/stddef.h> +#include <linux/bit_spinlock.h> #include <asm/semaphore.h> #endif diff --git a/include/linux/jiffies.h b/include/linux/jiffies.h index d7a2555a886..6acfdbba734 100644 --- a/include/linux/jiffies.h +++ b/include/linux/jiffies.h @@ -254,23 +254,23 @@ static inline u64 get_jiffies_64(void) */ static inline unsigned int jiffies_to_msecs(const unsigned long j) { -#if HZ <= 1000 && !(1000 % HZ) - return (1000 / HZ) * j; -#elif HZ > 1000 && !(HZ % 1000) - return (j + (HZ / 1000) - 1)/(HZ / 1000); +#if HZ <= MSEC_PER_SEC && !(MSEC_PER_SEC % HZ) + return (MSEC_PER_SEC / HZ) * j; +#elif HZ > MSEC_PER_SEC && !(HZ % MSEC_PER_SEC) + return (j + (HZ / MSEC_PER_SEC) - 1)/(HZ / MSEC_PER_SEC); #else - return (j * 1000) / HZ; + return (j * MSEC_PER_SEC) / HZ; #endif } static inline unsigned int jiffies_to_usecs(const unsigned long j) { -#if HZ <= 1000000 && !(1000000 % HZ) - return (1000000 / HZ) * j; -#elif HZ > 1000000 && !(HZ % 1000000) - return (j + (HZ / 1000000) - 1)/(HZ / 1000000); +#if HZ <= USEC_PER_SEC && !(USEC_PER_SEC % HZ) + return (USEC_PER_SEC / HZ) * j; +#elif HZ > USEC_PER_SEC && !(HZ % USEC_PER_SEC) + return (j + (HZ / USEC_PER_SEC) - 1)/(HZ / USEC_PER_SEC); #else - return (j * 1000000) / HZ; + return (j * USEC_PER_SEC) / HZ; #endif } @@ -278,12 +278,12 @@ static inline unsigned long msecs_to_jiffies(const unsigned int m) { if (m > jiffies_to_msecs(MAX_JIFFY_OFFSET)) return MAX_JIFFY_OFFSET; -#if HZ <= 1000 && !(1000 % HZ) - return (m + (1000 / HZ) - 1) / (1000 / HZ); -#elif HZ > 1000 && !(HZ % 1000) - return m * (HZ / 1000); +#if HZ <= MSEC_PER_SEC && !(MSEC_PER_SEC % HZ) + return (m + (MSEC_PER_SEC / HZ) - 1) / (MSEC_PER_SEC / HZ); +#elif HZ > MSEC_PER_SEC && !(HZ % MSEC_PER_SEC) + return m * (HZ / MSEC_PER_SEC); #else - return (m * HZ + 999) / 1000; + return (m * HZ + MSEC_PER_SEC - 1) / MSEC_PER_SEC; #endif } @@ -291,12 +291,12 @@ static inline unsigned long usecs_to_jiffies(const unsigned int u) { if (u > jiffies_to_usecs(MAX_JIFFY_OFFSET)) return MAX_JIFFY_OFFSET; -#if HZ <= 1000000 && !(1000000 % HZ) - return (u + (1000000 / HZ) - 1) / (1000000 / HZ); -#elif HZ > 1000000 && !(HZ % 1000000) - return u * (HZ / 1000000); +#if HZ <= USEC_PER_SEC && !(USEC_PER_SEC % HZ) + return (u + (USEC_PER_SEC / HZ) - 1) / (USEC_PER_SEC / HZ); +#elif HZ > USEC_PER_SEC && !(HZ % USEC_PER_SEC) + return u * (HZ / USEC_PER_SEC); #else - return (u * HZ + 999999) / 1000000; + return (u * HZ + USEC_PER_SEC - 1) / USEC_PER_SEC; #endif } diff --git a/include/linux/radix-tree.h b/include/linux/radix-tree.h index 8081a281fa5..9c51917b1cc 100644 --- a/include/linux/radix-tree.h +++ b/include/linux/radix-tree.h @@ -24,7 +24,7 @@ struct radix_tree_root { unsigned int height; - int gfp_mask; + unsigned int gfp_mask; struct radix_tree_node *rnode; }; @@ -50,7 +50,7 @@ void *radix_tree_delete(struct radix_tree_root *, unsigned long); unsigned int radix_tree_gang_lookup(struct radix_tree_root *root, void **results, unsigned long first_index, unsigned int max_items); -int radix_tree_preload(int gfp_mask); +int radix_tree_preload(unsigned int __nocast gfp_mask); void radix_tree_init(void); void *radix_tree_tag_set(struct radix_tree_root *root, unsigned long index, int tag); diff --git a/include/linux/reiserfs_fs.h b/include/linux/reiserfs_fs.h index 17e458e17e2..af00b10294c 100644 --- a/include/linux/reiserfs_fs.h +++ b/include/linux/reiserfs_fs.h @@ -2097,7 +2097,7 @@ void reiserfs_free_block(struct reiserfs_transaction_handle *th, struct inode *, b_blocknr_t, int for_unformatted); int reiserfs_allocate_blocknrs(reiserfs_blocknr_hint_t *, b_blocknr_t *, int, int); -extern inline int reiserfs_new_form_blocknrs(struct tree_balance *tb, +static inline int reiserfs_new_form_blocknrs(struct tree_balance *tb, b_blocknr_t * new_blocknrs, int amount_needed) { @@ -2113,7 +2113,7 @@ extern inline int reiserfs_new_form_blocknrs(struct tree_balance *tb, 0); } -extern inline int reiserfs_new_unf_blocknrs(struct reiserfs_transaction_handle +static inline int reiserfs_new_unf_blocknrs(struct reiserfs_transaction_handle *th, struct inode *inode, b_blocknr_t * new_blocknrs, struct path *path, long block) @@ -2130,7 +2130,7 @@ extern inline int reiserfs_new_unf_blocknrs(struct reiserfs_transaction_handle } #ifdef REISERFS_PREALLOCATE -extern inline int reiserfs_new_unf_blocknrs2(struct reiserfs_transaction_handle +static inline int reiserfs_new_unf_blocknrs2(struct reiserfs_transaction_handle *th, struct inode *inode, b_blocknr_t * new_blocknrs, struct path *path, long block) diff --git a/include/linux/sched.h b/include/linux/sched.h index c551e6a1447..4b83cb23000 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -114,6 +114,7 @@ extern unsigned long nr_iowait(void); #define TASK_TRACED 8 #define EXIT_ZOMBIE 16 #define EXIT_DEAD 32 +#define TASK_NONINTERACTIVE 64 #define __set_task_state(tsk, state_value) \ do { (tsk)->state = (state_value); } while (0) @@ -202,6 +203,8 @@ extern int in_sched_functions(unsigned long addr); #define MAX_SCHEDULE_TIMEOUT LONG_MAX extern signed long FASTCALL(schedule_timeout(signed long timeout)); +extern signed long schedule_timeout_interruptible(signed long timeout); +extern signed long schedule_timeout_uninterruptible(signed long timeout); asmlinkage void schedule(void); struct namespace; @@ -782,6 +785,7 @@ struct task_struct { short il_next; #endif #ifdef CONFIG_CPUSETS + short cpuset_sem_nest_depth; struct cpuset *cpuset; nodemask_t mems_allowed; int cpuset_mems_generation; diff --git a/include/linux/slab.h b/include/linux/slab.h index 42a6bea58af..1f356f3bbc6 100644 --- a/include/linux/slab.h +++ b/include/linux/slab.h @@ -118,7 +118,8 @@ extern void kfree(const void *); extern unsigned int ksize(const void *); #ifdef CONFIG_NUMA -extern void *kmem_cache_alloc_node(kmem_cache_t *, int flags, int node); +extern void *kmem_cache_alloc_node(kmem_cache_t *, + unsigned int __nocast flags, int node); extern void *kmalloc_node(size_t size, unsigned int __nocast flags, int node); #else static inline void *kmem_cache_alloc_node(kmem_cache_t *cachep, int flags, int node) diff --git a/include/linux/spinlock.h b/include/linux/spinlock.h index d6ba068719b..cdc99a27840 100644 --- a/include/linux/spinlock.h +++ b/include/linux/spinlock.h @@ -2,7 +2,48 @@ #define __LINUX_SPINLOCK_H /* - * include/linux/spinlock.h - generic locking declarations + * include/linux/spinlock.h - generic spinlock/rwlock declarations + * + * here's the role of the various spinlock/rwlock related include files: + * + * on SMP builds: + * + * asm/spinlock_types.h: contains the raw_spinlock_t/raw_rwlock_t and the + * initializers + * + * linux/spinlock_types.h: + * defines the generic type and initializers + * + * asm/spinlock.h: contains the __raw_spin_*()/etc. lowlevel + * implementations, mostly inline assembly code + * + * (also included on UP-debug builds:) + * + * linux/spinlock_api_smp.h: + * contains the prototypes for the _spin_*() APIs. + * + * linux/spinlock.h: builds the final spin_*() APIs. + * + * on UP builds: + * + * linux/spinlock_type_up.h: + * contains the generic, simplified UP spinlock type. + * (which is an empty structure on non-debug builds) + * + * linux/spinlock_types.h: + * defines the generic type and initializers + * + * linux/spinlock_up.h: + * contains the __raw_spin_*()/etc. version of UP + * builds. (which are NOPs on non-debug, non-preempt + * builds) + * + * (included on UP-non-debug builds:) + * + * linux/spinlock_api_up.h: + * builds the _spin_*() APIs. + * + * linux/spinlock.h: builds the final spin_*() APIs. */ #include <linux/config.h> @@ -13,7 +54,6 @@ #include <linux/kernel.h> #include <linux/stringify.h> -#include <asm/processor.h> /* for cpu relax */ #include <asm/system.h> /* @@ -35,423 +75,84 @@ #define __lockfunc fastcall __attribute__((section(".spinlock.text"))) /* - * If CONFIG_SMP is set, pull in the _raw_* definitions + * Pull the raw_spinlock_t and raw_rwlock_t definitions: */ -#ifdef CONFIG_SMP - -#define assert_spin_locked(x) BUG_ON(!spin_is_locked(x)) -#include <asm/spinlock.h> - -int __lockfunc _spin_trylock(spinlock_t *lock); -int __lockfunc _read_trylock(rwlock_t *lock); -int __lockfunc _write_trylock(rwlock_t *lock); - -void __lockfunc _spin_lock(spinlock_t *lock) __acquires(spinlock_t); -void __lockfunc _read_lock(rwlock_t *lock) __acquires(rwlock_t); -void __lockfunc _write_lock(rwlock_t *lock) __acquires(rwlock_t); - -void __lockfunc _spin_unlock(spinlock_t *lock) __releases(spinlock_t); -void __lockfunc _read_unlock(rwlock_t *lock) __releases(rwlock_t); -void __lockfunc _write_unlock(rwlock_t *lock) __releases(rwlock_t); - -unsigned long __lockfunc _spin_lock_irqsave(spinlock_t *lock) __acquires(spinlock_t); -unsigned long __lockfunc _read_lock_irqsave(rwlock_t *lock) __acquires(rwlock_t); -unsigned long __lockfunc _write_lock_irqsave(rwlock_t *lock) __acquires(rwlock_t); - -void __lockfunc _spin_lock_irq(spinlock_t *lock) __acquires(spinlock_t); -void __lockfunc _spin_lock_bh(spinlock_t *lock) __acquires(spinlock_t); -void __lockfunc _read_lock_irq(rwlock_t *lock) __acquires(rwlock_t); -void __lockfunc _read_lock_bh(rwlock_t *lock) __acquires(rwlock_t); -void __lockfunc _write_lock_irq(rwlock_t *lock) __acquires(rwlock_t); -void __lockfunc _write_lock_bh(rwlock_t *lock) __acquires(rwlock_t); - -void __lockfunc _spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags) __releases(spinlock_t); -void __lockfunc _spin_unlock_irq(spinlock_t *lock) __releases(spinlock_t); -void __lockfunc _spin_unlock_bh(spinlock_t *lock) __releases(spinlock_t); -void __lockfunc _read_unlock_irqrestore(rwlock_t *lock, unsigned long flags) __releases(rwlock_t); -void __lockfunc _read_unlock_irq(rwlock_t *lock) __releases(rwlock_t); -void __lockfunc _read_unlock_bh(rwlock_t *lock) __releases(rwlock_t); -void __lockfunc _write_unlock_irqrestore(rwlock_t *lock, unsigned long flags) __releases(rwlock_t); -void __lockfunc _write_unlock_irq(rwlock_t *lock) __releases(rwlock_t); -void __lockfunc _write_unlock_bh(rwlock_t *lock) __releases(rwlock_t); - -int __lockfunc _spin_trylock_bh(spinlock_t *lock); -int __lockfunc generic_raw_read_trylock(rwlock_t *lock); -int in_lock_functions(unsigned long addr); - -#else +#include <linux/spinlock_types.h> -#define in_lock_functions(ADDR) 0 +extern int __lockfunc generic__raw_read_trylock(raw_rwlock_t *lock); -#if !defined(CONFIG_PREEMPT) && !defined(CONFIG_DEBUG_SPINLOCK) -# define _atomic_dec_and_lock(atomic,lock) atomic_dec_and_test(atomic) -# define ATOMIC_DEC_AND_LOCK -#endif - -#ifdef CONFIG_DEBUG_SPINLOCK - -#define SPINLOCK_MAGIC 0x1D244B3C -typedef struct { - unsigned long magic; - volatile unsigned long lock; - volatile unsigned int babble; - const char *module; - char *owner; - int oline; -} spinlock_t; -#define SPIN_LOCK_UNLOCKED (spinlock_t) { SPINLOCK_MAGIC, 0, 10, __FILE__ , NULL, 0} - -#define spin_lock_init(x) \ - do { \ - (x)->magic = SPINLOCK_MAGIC; \ - (x)->lock = 0; \ - (x)->babble = 5; \ - (x)->module = __FILE__; \ - (x)->owner = NULL; \ - (x)->oline = 0; \ - } while (0) - -#define CHECK_LOCK(x) \ - do { \ - if ((x)->magic != SPINLOCK_MAGIC) { \ - printk(KERN_ERR "%s:%d: spin_is_locked on uninitialized spinlock %p.\n", \ - __FILE__, __LINE__, (x)); \ - } \ - } while(0) - -#define _raw_spin_lock(x) \ - do { \ - CHECK_LOCK(x); \ - if ((x)->lock&&(x)->babble) { \ - (x)->babble--; \ - printk("%s:%d: spin_lock(%s:%p) already locked by %s/%d\n", \ - __FILE__,__LINE__, (x)->module, \ - (x), (x)->owner, (x)->oline); \ - } \ - (x)->lock = 1; \ - (x)->owner = __FILE__; \ - (x)->oline = __LINE__; \ - } while (0) - -/* without debugging, spin_is_locked on UP always says - * FALSE. --> printk if already locked. */ -#define spin_is_locked(x) \ - ({ \ - CHECK_LOCK(x); \ - if ((x)->lock&&(x)->babble) { \ - (x)->babble--; \ - printk("%s:%d: spin_is_locked(%s:%p) already locked by %s/%d\n", \ - __FILE__,__LINE__, (x)->module, \ - (x), (x)->owner, (x)->oline); \ - } \ - 0; \ - }) - -/* with debugging, assert_spin_locked() on UP does check - * the lock value properly */ -#define assert_spin_locked(x) \ - ({ \ - CHECK_LOCK(x); \ - BUG_ON(!(x)->lock); \ - }) - -/* without debugging, spin_trylock on UP always says - * TRUE. --> printk if already locked. */ -#define _raw_spin_trylock(x) \ - ({ \ - CHECK_LOCK(x); \ - if ((x)->lock&&(x)->babble) { \ - (x)->babble--; \ - printk("%s:%d: spin_trylock(%s:%p) already locked by %s/%d\n", \ - __FILE__,__LINE__, (x)->module, \ - (x), (x)->owner, (x)->oline); \ - } \ - (x)->lock = 1; \ - (x)->owner = __FILE__; \ - (x)->oline = __LINE__; \ - 1; \ - }) - -#define spin_unlock_wait(x) \ - do { \ - CHECK_LOCK(x); \ - if ((x)->lock&&(x)->babble) { \ - (x)->babble--; \ - printk("%s:%d: spin_unlock_wait(%s:%p) owned by %s/%d\n", \ - __FILE__,__LINE__, (x)->module, (x), \ - (x)->owner, (x)->oline); \ - }\ - } while (0) - -#define _raw_spin_unlock(x) \ - do { \ - CHECK_LOCK(x); \ - if (!(x)->lock&&(x)->babble) { \ - (x)->babble--; \ - printk("%s:%d: spin_unlock(%s:%p) not locked\n", \ - __FILE__,__LINE__, (x)->module, (x));\ - } \ - (x)->lock = 0; \ - } while (0) -#else /* - * gcc versions before ~2.95 have a nasty bug with empty initializers. + * Pull the __raw*() functions/declarations (UP-nondebug doesnt need them): */ -#if (__GNUC__ > 2) - typedef struct { } spinlock_t; - #define SPIN_LOCK_UNLOCKED (spinlock_t) { } +#if defined(CONFIG_SMP) +# include <asm/spinlock.h> #else - typedef struct { int gcc_is_buggy; } spinlock_t; - #define SPIN_LOCK_UNLOCKED (spinlock_t) { 0 } +# include <linux/spinlock_up.h> #endif +#define spin_lock_init(lock) do { *(lock) = SPIN_LOCK_UNLOCKED; } while (0) +#define rwlock_init(lock) do { *(lock) = RW_LOCK_UNLOCKED; } while (0) + +#define spin_is_locked(lock) __raw_spin_is_locked(&(lock)->raw_lock) + +/** + * spin_unlock_wait - wait until the spinlock gets unlocked + * @lock: the spinlock in question. + */ +#define spin_unlock_wait(lock) __raw_spin_unlock_wait(&(lock)->raw_lock) + /* - * If CONFIG_SMP is unset, declare the _raw_* definitions as nops + * Pull the _spin_*()/_read_*()/_write_*() functions/declarations: */ -#define spin_lock_init(lock) do { (void)(lock); } while(0) -#define _raw_spin_lock(lock) do { (void)(lock); } while(0) -#define spin_is_locked(lock) ((void)(lock), 0) -#define assert_spin_locked(lock) do { (void)(lock); } while(0) -#define _raw_spin_trylock(lock) (((void)(lock), 1)) -#define spin_unlock_wait(lock) (void)(lock) -#define _raw_spin_unlock(lock) do { (void)(lock); } while(0) -#endif /* CONFIG_DEBUG_SPINLOCK */ - -/* RW spinlocks: No debug version */ - -#if (__GNUC__ > 2) - typedef struct { } rwlock_t; - #define RW_LOCK_UNLOCKED (rwlock_t) { } +#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) +# include <linux/spinlock_api_smp.h> #else - typedef struct { int gcc_is_buggy; } rwlock_t; - #define RW_LOCK_UNLOCKED (rwlock_t) { 0 } +# include <linux/spinlock_api_up.h> #endif -#define rwlock_init(lock) do { (void)(lock); } while(0) -#define _raw_read_lock(lock) do { (void)(lock); } while(0) -#define _raw_read_unlock(lock) do { (void)(lock); } while(0) -#define _raw_write_lock(lock) do { (void)(lock); } while(0) -#define _raw_write_unlock(lock) do { (void)(lock); } while(0) -#define read_can_lock(lock) (((void)(lock), 1)) -#define write_can_lock(lock) (((void)(lock), 1)) -#define _raw_read_trylock(lock) ({ (void)(lock); (1); }) -#define _raw_write_trylock(lock) ({ (void)(lock); (1); }) - -#define _spin_trylock(lock) ({preempt_disable(); _raw_spin_trylock(lock) ? \ - 1 : ({preempt_enable(); 0;});}) - -#define _read_trylock(lock) ({preempt_disable();_raw_read_trylock(lock) ? \ - 1 : ({preempt_enable(); 0;});}) - -#define _write_trylock(lock) ({preempt_disable(); _raw_write_trylock(lock) ? \ - 1 : ({preempt_enable(); 0;});}) - -#define _spin_trylock_bh(lock) ({preempt_disable(); local_bh_disable(); \ - _raw_spin_trylock(lock) ? \ - 1 : ({preempt_enable_no_resched(); local_bh_enable(); 0;});}) - -#define _spin_lock(lock) \ -do { \ - preempt_disable(); \ - _raw_spin_lock(lock); \ - __acquire(lock); \ -} while(0) - -#define _write_lock(lock) \ -do { \ - preempt_disable(); \ - _raw_write_lock(lock); \ - __acquire(lock); \ -} while(0) - -#define _read_lock(lock) \ -do { \ - preempt_disable(); \ - _raw_read_lock(lock); \ - __acquire(lock); \ -} while(0) - -#define _spin_unlock(lock) \ -do { \ - _raw_spin_unlock(lock); \ - preempt_enable(); \ - __release(lock); \ -} while (0) - -#define _write_unlock(lock) \ -do { \ - _raw_write_unlock(lock); \ - preempt_enable(); \ - __release(lock); \ -} while(0) - -#define _read_unlock(lock) \ -do { \ - _raw_read_unlock(lock); \ - preempt_enable(); \ - __release(lock); \ -} while(0) - -#define _spin_lock_irqsave(lock, flags) \ -do { \ - local_irq_save(flags); \ - preempt_disable(); \ - _raw_spin_lock(lock); \ - __acquire(lock); \ -} while (0) - -#define _spin_lock_irq(lock) \ -do { \ - local_irq_disable(); \ - preempt_disable(); \ - _raw_spin_lock(lock); \ - __acquire(lock); \ -} while (0) - -#define _spin_lock_bh(lock) \ -do { \ - local_bh_disable(); \ - preempt_disable(); \ - _raw_spin_lock(lock); \ - __acquire(lock); \ -} while (0) - -#define _read_lock_irqsave(lock, flags) \ -do { \ - local_irq_save(flags); \ - preempt_disable(); \ - _raw_read_lock(lock); \ - __acquire(lock); \ -} while (0) - -#define _read_lock_irq(lock) \ -do { \ - local_irq_disable(); \ - preempt_disable(); \ - _raw_read_lock(lock); \ - __acquire(lock); \ -} while (0) - -#define _read_lock_bh(lock) \ -do { \ - local_bh_disable(); \ - preempt_disable(); \ - _raw_read_lock(lock); \ - __acquire(lock); \ -} while (0) - -#define _write_lock_irqsave(lock, flags) \ -do { \ - local_irq_save(flags); \ - preempt_disable(); \ - _raw_write_lock(lock); \ - __acquire(lock); \ -} while (0) +#ifdef CONFIG_DEBUG_SPINLOCK + extern void _raw_spin_lock(spinlock_t *lock); +#define _raw_spin_lock_flags(lock, flags) _raw_spin_lock(lock) + extern int _raw_spin_trylock(spinlock_t *lock); + extern void _raw_spin_unlock(spinlock_t *lock); + + extern void _raw_read_lock(rwlock_t *lock); + extern int _raw_read_trylock(rwlock_t *lock); + extern void _raw_read_unlock(rwlock_t *lock); + extern void _raw_write_lock(rwlock_t *lock); + extern int _raw_write_trylock(rwlock_t *lock); + extern void _raw_write_unlock(rwlock_t *lock); +#else +# define _raw_spin_unlock(lock) __raw_spin_unlock(&(lock)->raw_lock) +# define _raw_spin_trylock(lock) __raw_spin_trylock(&(lock)->raw_lock) +# define _raw_spin_lock(lock) __raw_spin_lock(&(lock)->raw_lock) +# define _raw_spin_lock_flags(lock, flags) \ + __raw_spin_lock_flags(&(lock)->raw_lock, *(flags)) +# define _raw_read_lock(rwlock) __raw_read_lock(&(rwlock)->raw_lock) +# define _raw_write_lock(rwlock) __raw_write_lock(&(rwlock)->raw_lock) +# define _raw_read_unlock(rwlock) __raw_read_unlock(&(rwlock)->raw_lock) +# define _raw_write_unlock(rwlock) __raw_write_unlock(&(rwlock)->raw_lock) +# define _raw_read_trylock(rwlock) __raw_read_trylock(&(rwlock)->raw_lock) +# define _raw_write_trylock(rwlock) __raw_write_trylock(&(rwlock)->raw_lock) +#endif -#define _write_lock_irq(lock) \ -do { \ - local_irq_disable(); \ - preempt_disable(); \ - _raw_write_lock(lock); \ - __acquire(lock); \ -} while (0) - -#define _write_lock_bh(lock) \ -do { \ - local_bh_disable(); \ - preempt_disable(); \ - _raw_write_lock(lock); \ - __acquire(lock); \ -} while (0) - -#define _spin_unlock_irqrestore(lock, flags) \ -do { \ - _raw_spin_unlock(lock); \ - local_irq_restore(flags); \ - preempt_enable(); \ - __release(lock); \ -} while (0) - -#define _spin_unlock_irq(lock) \ -do { \ - _raw_spin_unlock(lock); \ - local_irq_enable(); \ - preempt_enable(); \ - __release(lock); \ -} while (0) - -#define _spin_unlock_bh(lock) \ -do { \ - _raw_spin_unlock(lock); \ - preempt_enable_no_resched(); \ - local_bh_enable(); \ - __release(lock); \ -} while (0) - -#define _write_unlock_bh(lock) \ -do { \ - _raw_write_unlock(lock); \ - preempt_enable_no_resched(); \ - local_bh_enable(); \ - __release(lock); \ -} while (0) - -#define _read_unlock_irqrestore(lock, flags) \ -do { \ - _raw_read_unlock(lock); \ - local_irq_restore(flags); \ - preempt_enable(); \ - __release(lock); \ -} while (0) - -#define _write_unlock_irqrestore(lock, flags) \ -do { \ - _raw_write_unlock(lock); \ - local_irq_restore(flags); \ - preempt_enable(); \ - __release(lock); \ -} while (0) - -#define _read_unlock_irq(lock) \ -do { \ - _raw_read_unlock(lock); \ - local_irq_enable(); \ - preempt_enable(); \ - __release(lock); \ -} while (0) - -#define _read_unlock_bh(lock) \ -do { \ - _raw_read_unlock(lock); \ - preempt_enable_no_resched(); \ - local_bh_enable(); \ - __release(lock); \ -} while (0) - -#define _write_unlock_irq(lock) \ -do { \ - _raw_write_unlock(lock); \ - local_irq_enable(); \ - preempt_enable(); \ - __release(lock); \ -} while (0) - -#endif /* !SMP */ +#define read_can_lock(rwlock) __raw_read_can_lock(&(rwlock)->raw_lock) +#define write_can_lock(rwlock) __raw_write_can_lock(&(rwlock)->raw_lock) /* * Define the various spin_lock and rw_lock methods. Note we define these * regardless of whether CONFIG_SMP or CONFIG_PREEMPT are set. The various * methods are defined as nops in the case they are not required. */ -#define spin_trylock(lock) __cond_lock(_spin_trylock(lock)) -#define read_trylock(lock) __cond_lock(_read_trylock(lock)) -#define write_trylock(lock) __cond_lock(_write_trylock(lock)) +#define spin_trylock(lock) __cond_lock(_spin_trylock(lock)) +#define read_trylock(lock) __cond_lock(_read_trylock(lock)) +#define write_trylock(lock) __cond_lock(_write_trylock(lock)) -#define spin_lock(lock) _spin_lock(lock) -#define write_lock(lock) _write_lock(lock) -#define read_lock(lock) _read_lock(lock) +#define spin_lock(lock) _spin_lock(lock) +#define write_lock(lock) _write_lock(lock) +#define read_lock(lock) _read_lock(lock) -#ifdef CONFIG_SMP +#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) #define spin_lock_irqsave(lock, flags) flags = _spin_lock_irqsave(lock) #define read_lock_irqsave(lock, flags) flags = _read_lock_irqsave(lock) #define write_lock_irqsave(lock, flags) flags = _write_lock_irqsave(lock) @@ -470,137 +171,59 @@ do { \ #define write_lock_irq(lock) _write_lock_irq(lock) #define write_lock_bh(lock) _write_lock_bh(lock) -#define spin_unlock(lock) _spin_unlock(lock) -#define write_unlock(lock) _write_unlock(lock) -#define read_unlock(lock) _read_unlock(lock) +#define spin_unlock(lock) _spin_unlock(lock) +#define write_unlock(lock) _write_unlock(lock) +#define read_unlock(lock) _read_unlock(lock) -#define spin_unlock_irqrestore(lock, flags) _spin_unlock_irqrestore(lock, flags) +#define spin_unlock_irqrestore(lock, flags) \ + _spin_unlock_irqrestore(lock, flags) #define spin_unlock_irq(lock) _spin_unlock_irq(lock) #define spin_unlock_bh(lock) _spin_unlock_bh(lock) -#define read_unlock_irqrestore(lock, flags) _read_unlock_irqrestore(lock, flags) -#define read_unlock_irq(lock) _read_unlock_irq(lock) -#define read_unlock_bh(lock) _read_unlock_bh(lock) +#define read_unlock_irqrestore(lock, flags) \ + _read_unlock_irqrestore(lock, flags) +#define read_unlock_irq(lock) _read_unlock_irq(lock) +#define read_unlock_bh(lock) _read_unlock_bh(lock) -#define write_unlock_irqrestore(lock, flags) _write_unlock_irqrestore(lock, flags) -#define write_unlock_irq(lock) _write_unlock_irq(lock) -#define write_unlock_bh(lock) _write_unlock_bh(lock) +#define write_unlock_irqrestore(lock, flags) \ + _write_unlock_irqrestore(lock, flags) +#define write_unlock_irq(lock) _write_unlock_irq(lock) +#define write_unlock_bh(lock) _write_unlock_bh(lock) -#define spin_trylock_bh(lock) __cond_lock(_spin_trylock_bh(lock)) +#define spin_trylock_bh(lock) __cond_lock(_spin_trylock_bh(lock)) #define spin_trylock_irq(lock) \ ({ \ local_irq_disable(); \ _spin_trylock(lock) ? \ - 1 : ({local_irq_enable(); 0; }); \ + 1 : ({ local_irq_enable(); 0; }); \ }) #define spin_trylock_irqsave(lock, flags) \ ({ \ local_irq_save(flags); \ _spin_trylock(lock) ? \ - 1 : ({local_irq_restore(flags); 0;}); \ + 1 : ({ local_irq_restore(flags); 0; }); \ }) -#ifdef CONFIG_LOCKMETER -extern void _metered_spin_lock (spinlock_t *lock); -extern void _metered_spin_unlock (spinlock_t *lock); -extern int _metered_spin_trylock(spinlock_t *lock); -extern void _metered_read_lock (rwlock_t *lock); -extern void _metered_read_unlock (rwlock_t *lock); -extern void _metered_write_lock (rwlock_t *lock); -extern void _metered_write_unlock (rwlock_t *lock); -extern int _metered_read_trylock (rwlock_t *lock); -extern int _metered_write_trylock(rwlock_t *lock); -#endif - -/* "lock on reference count zero" */ -#ifndef ATOMIC_DEC_AND_LOCK -#include <asm/atomic.h> -extern int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock); -#endif - -#define atomic_dec_and_lock(atomic,lock) __cond_lock(_atomic_dec_and_lock(atomic,lock)) - -/* - * bit-based spin_lock() - * - * Don't use this unless you really need to: spin_lock() and spin_unlock() - * are significantly faster. - */ -static inline void bit_spin_lock(int bitnum, unsigned long *addr) -{ - /* - * Assuming the lock is uncontended, this never enters - * the body of the outer loop. If it is contended, then - * within the inner loop a non-atomic test is used to - * busywait with less bus contention for a good time to - * attempt to acquire the lock bit. - */ - preempt_disable(); -#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) - while (test_and_set_bit(bitnum, addr)) { - while (test_bit(bitnum, addr)) { - preempt_enable(); - cpu_relax(); - preempt_disable(); - } - } -#endif - __acquire(bitlock); -} - /* - * Return true if it was acquired + * Pull the atomic_t declaration: + * (asm-mips/atomic.h needs above definitions) */ -static inline int bit_spin_trylock(int bitnum, unsigned long *addr) -{ - preempt_disable(); -#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) - if (test_and_set_bit(bitnum, addr)) { - preempt_enable(); - return 0; - } -#endif - __acquire(bitlock); - return 1; -} - -/* - * bit-based spin_unlock() - */ -static inline void bit_spin_unlock(int bitnum, unsigned long *addr) -{ -#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) - BUG_ON(!test_bit(bitnum, addr)); - smp_mb__before_clear_bit(); - clear_bit(bitnum, addr); -#endif - preempt_enable(); - __release(bitlock); -} - -/* - * Return true if the lock is held. +#include <asm/atomic.h> +/** + * atomic_dec_and_lock - lock on reaching reference count zero + * @atomic: the atomic counter + * @lock: the spinlock in question */ -static inline int bit_spin_is_locked(int bitnum, unsigned long *addr) -{ -#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) - return test_bit(bitnum, addr); -#elif defined CONFIG_PREEMPT - return preempt_count(); -#else - return 1; -#endif -} - -#define DEFINE_SPINLOCK(x) spinlock_t x = SPIN_LOCK_UNLOCKED -#define DEFINE_RWLOCK(x) rwlock_t x = RW_LOCK_UNLOCKED +extern int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock); +#define atomic_dec_and_lock(atomic, lock) \ + __cond_lock(_atomic_dec_and_lock(atomic, lock)) /** * spin_can_lock - would spin_trylock() succeed? * @lock: the spinlock in question. */ -#define spin_can_lock(lock) (!spin_is_locked(lock)) +#define spin_can_lock(lock) (!spin_is_locked(lock)) #endif /* __LINUX_SPINLOCK_H */ diff --git a/include/linux/spinlock_api_smp.h b/include/linux/spinlock_api_smp.h new file mode 100644 index 00000000000..78e6989ffb5 --- /dev/null +++ b/include/linux/spinlock_api_smp.h @@ -0,0 +1,57 @@ +#ifndef __LINUX_SPINLOCK_API_SMP_H +#define __LINUX_SPINLOCK_API_SMP_H + +#ifndef __LINUX_SPINLOCK_H +# error "please don't include this file directly" +#endif + +/* + * include/linux/spinlock_api_smp.h + * + * spinlock API declarations on SMP (and debug) + * (implemented in kernel/spinlock.c) + * + * portions Copyright 2005, Red Hat, Inc., Ingo Molnar + * Released under the General Public License (GPL). + */ + +int in_lock_functions(unsigned long addr); + +#define assert_spin_locked(x) BUG_ON(!spin_is_locked(x)) + +void __lockfunc _spin_lock(spinlock_t *lock) __acquires(spinlock_t); +void __lockfunc _read_lock(rwlock_t *lock) __acquires(rwlock_t); +void __lockfunc _write_lock(rwlock_t *lock) __acquires(rwlock_t); +void __lockfunc _spin_lock_bh(spinlock_t *lock) __acquires(spinlock_t); +void __lockfunc _read_lock_bh(rwlock_t *lock) __acquires(rwlock_t); +void __lockfunc _write_lock_bh(rwlock_t *lock) __acquires(rwlock_t); +void __lockfunc _spin_lock_irq(spinlock_t *lock) __acquires(spinlock_t); +void __lockfunc _read_lock_irq(rwlock_t *lock) __acquires(rwlock_t); +void __lockfunc _write_lock_irq(rwlock_t *lock) __acquires(rwlock_t); +unsigned long __lockfunc _spin_lock_irqsave(spinlock_t *lock) + __acquires(spinlock_t); +unsigned long __lockfunc _read_lock_irqsave(rwlock_t *lock) + __acquires(rwlock_t); +unsigned long __lockfunc _write_lock_irqsave(rwlock_t *lock) + __acquires(rwlock_t); +int __lockfunc _spin_trylock(spinlock_t *lock); +int __lockfunc _read_trylock(rwlock_t *lock); +int __lockfunc _write_trylock(rwlock_t *lock); +int __lockfunc _spin_trylock_bh(spinlock_t *lock); +void __lockfunc _spin_unlock(spinlock_t *lock) __releases(spinlock_t); +void __lockfunc _read_unlock(rwlock_t *lock) __releases(rwlock_t); +void __lockfunc _write_unlock(rwlock_t *lock) __releases(rwlock_t); +void __lockfunc _spin_unlock_bh(spinlock_t *lock) __releases(spinlock_t); +void __lockfunc _read_unlock_bh(rwlock_t *lock) __releases(rwlock_t); +void __lockfunc _write_unlock_bh(rwlock_t *lock) __releases(rwlock_t); +void __lockfunc _spin_unlock_irq(spinlock_t *lock) __releases(spinlock_t); +void __lockfunc _read_unlock_irq(rwlock_t *lock) __releases(rwlock_t); +void __lockfunc _write_unlock_irq(rwlock_t *lock) __releases(rwlock_t); +void __lockfunc _spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags) + __releases(spinlock_t); +void __lockfunc _read_unlock_irqrestore(rwlock_t *lock, unsigned long flags) + __releases(rwlock_t); +void __lockfunc _write_unlock_irqrestore(rwlock_t *lock, unsigned long flags) + __releases(rwlock_t); + +#endif /* __LINUX_SPINLOCK_API_SMP_H */ diff --git a/include/linux/spinlock_api_up.h b/include/linux/spinlock_api_up.h new file mode 100644 index 00000000000..cd81cee566f --- /dev/null +++ b/include/linux/spinlock_api_up.h @@ -0,0 +1,80 @@ +#ifndef __LINUX_SPINLOCK_API_UP_H +#define __LINUX_SPINLOCK_API_UP_H + +#ifndef __LINUX_SPINLOCK_H +# error "please don't include this file directly" +#endif + +/* + * include/linux/spinlock_api_up.h + * + * spinlock API implementation on UP-nondebug (inlined implementation) + * + * portions Copyright 2005, Red Hat, Inc., Ingo Molnar + * Released under the General Public License (GPL). + */ + +#define in_lock_functions(ADDR) 0 + +#define assert_spin_locked(lock) do { (void)(lock); } while (0) + +/* + * In the UP-nondebug case there's no real locking going on, so the + * only thing we have to do is to keep the preempt counts and irq + * flags straight, to supress compiler warnings of unused lock + * variables, and to add the proper checker annotations: + */ +#define __LOCK(lock) \ + do { preempt_disable(); __acquire(lock); (void)(lock); } while (0) + +#define __LOCK_BH(lock) \ + do { local_bh_disable(); __LOCK(lock); } while (0) + +#define __LOCK_IRQ(lock) \ + do { local_irq_disable(); __LOCK(lock); } while (0) + +#define __LOCK_IRQSAVE(lock, flags) \ + do { local_irq_save(flags); __LOCK(lock); } while (0) + +#define __UNLOCK(lock) \ + do { preempt_enable(); __release(lock); (void)(lock); } while (0) + +#define __UNLOCK_BH(lock) \ + do { preempt_enable_no_resched(); local_bh_enable(); __release(lock); (void)(lock); } while (0) + +#define __UNLOCK_IRQ(lock) \ + do { local_irq_enable(); __UNLOCK(lock); } while (0) + +#define __UNLOCK_IRQRESTORE(lock, flags) \ + do { local_irq_restore(flags); __UNLOCK(lock); } while (0) + +#define _spin_lock(lock) __LOCK(lock) +#define _read_lock(lock) __LOCK(lock) +#define _write_lock(lock) __LOCK(lock) +#define _spin_lock_bh(lock) __LOCK_BH(lock) +#define _read_lock_bh(lock) __LOCK_BH(lock) +#define _write_lock_bh(lock) __LOCK_BH(lock) +#define _spin_lock_irq(lock) __LOCK_IRQ(lock) +#define _read_lock_irq(lock) __LOCK_IRQ(lock) +#define _write_lock_irq(lock) __LOCK_IRQ(lock) +#define _spin_lock_irqsave(lock, flags) __LOCK_IRQSAVE(lock, flags) +#define _read_lock_irqsave(lock, flags) __LOCK_IRQSAVE(lock, flags) +#define _write_lock_irqsave(lock, flags) __LOCK_IRQSAVE(lock, flags) +#define _spin_trylock(lock) ({ __LOCK(lock); 1; }) +#define _read_trylock(lock) ({ __LOCK(lock); 1; }) +#define _write_trylock(lock) ({ __LOCK(lock); 1; }) +#define _spin_trylock_bh(lock) ({ __LOCK_BH(lock); 1; }) +#define _spin_unlock(lock) __UNLOCK(lock) +#define _read_unlock(lock) __UNLOCK(lock) +#define _write_unlock(lock) __UNLOCK(lock) +#define _spin_unlock_bh(lock) __UNLOCK_BH(lock) +#define _write_unlock_bh(lock) __UNLOCK_BH(lock) +#define _read_unlock_bh(lock) __UNLOCK_BH(lock) +#define _spin_unlock_irq(lock) __UNLOCK_IRQ(lock) +#define _read_unlock_irq(lock) __UNLOCK_IRQ(lock) +#define _write_unlock_irq(lock) __UNLOCK_IRQ(lock) +#define _spin_unlock_irqrestore(lock, flags) __UNLOCK_IRQRESTORE(lock, flags) +#define _read_unlock_irqrestore(lock, flags) __UNLOCK_IRQRESTORE(lock, flags) +#define _write_unlock_irqrestore(lock, flags) __UNLOCK_IRQRESTORE(lock, flags) + +#endif /* __LINUX_SPINLOCK_API_UP_H */ diff --git a/include/linux/spinlock_types.h b/include/linux/spinlock_types.h new file mode 100644 index 00000000000..9cb51e07039 --- /dev/null +++ b/include/linux/spinlock_types.h @@ -0,0 +1,67 @@ +#ifndef __LINUX_SPINLOCK_TYPES_H +#define __LINUX_SPINLOCK_TYPES_H + +/* + * include/linux/spinlock_types.h - generic spinlock type definitions + * and initializers + * + * portions Copyright 2005, Red Hat, Inc., Ingo Molnar + * Released under the General Public License (GPL). + */ + +#if defined(CONFIG_SMP) +# include <asm/spinlock_types.h> +#else +# include <linux/spinlock_types_up.h> +#endif + +typedef struct { + raw_spinlock_t raw_lock; +#if defined(CONFIG_PREEMPT) && defined(CONFIG_SMP) + unsigned int break_lock; +#endif +#ifdef CONFIG_DEBUG_SPINLOCK + unsigned int magic, owner_cpu; + void *owner; +#endif +} spinlock_t; + +#define SPINLOCK_MAGIC 0xdead4ead + +typedef struct { + raw_rwlock_t raw_lock; +#if defined(CONFIG_PREEMPT) && defined(CONFIG_SMP) + unsigned int break_lock; +#endif +#ifdef CONFIG_DEBUG_SPINLOCK + unsigned int magic, owner_cpu; + void *owner; +#endif +} rwlock_t; + +#define RWLOCK_MAGIC 0xdeaf1eed + +#define SPINLOCK_OWNER_INIT ((void *)-1L) + +#ifdef CONFIG_DEBUG_SPINLOCK +# define SPIN_LOCK_UNLOCKED \ + (spinlock_t) { .raw_lock = __RAW_SPIN_LOCK_UNLOCKED, \ + .magic = SPINLOCK_MAGIC, \ + .owner = SPINLOCK_OWNER_INIT, \ + .owner_cpu = -1 } +#define RW_LOCK_UNLOCKED \ + (rwlock_t) { .raw_lock = __RAW_RW_LOCK_UNLOCKED, \ + .magic = RWLOCK_MAGIC, \ + .owner = SPINLOCK_OWNER_INIT, \ + .owner_cpu = -1 } +#else +# define SPIN_LOCK_UNLOCKED \ + (spinlock_t) { .raw_lock = __RAW_SPIN_LOCK_UNLOCKED } +#define RW_LOCK_UNLOCKED \ + (rwlock_t) { .raw_lock = __RAW_RW_LOCK_UNLOCKED } +#endif + +#define DEFINE_SPINLOCK(x) spinlock_t x = SPIN_LOCK_UNLOCKED +#define DEFINE_RWLOCK(x) rwlock_t x = RW_LOCK_UNLOCKED + +#endif /* __LINUX_SPINLOCK_TYPES_H */ diff --git a/include/linux/spinlock_types_up.h b/include/linux/spinlock_types_up.h new file mode 100644 index 00000000000..def2d173a8d --- /dev/null +++ b/include/linux/spinlock_types_up.h @@ -0,0 +1,51 @@ +#ifndef __LINUX_SPINLOCK_TYPES_UP_H +#define __LINUX_SPINLOCK_TYPES_UP_H + +#ifndef __LINUX_SPINLOCK_TYPES_H +# error "please don't include this file directly" +#endif + +/* + * include/linux/spinlock_types_up.h - spinlock type definitions for UP + * + * portions Copyright 2005, Red Hat, Inc., Ingo Molnar + * Released under the General Public License (GPL). + */ + +#ifdef CONFIG_DEBUG_SPINLOCK + +typedef struct { + volatile unsigned int slock; +} raw_spinlock_t; + +#define __RAW_SPIN_LOCK_UNLOCKED { 1 } + +#else + +/* + * All gcc 2.95 versions and early versions of 2.96 have a nasty bug + * with empty initializers. + */ +#if (__GNUC__ > 2) +typedef struct { } raw_spinlock_t; + +#define __RAW_SPIN_LOCK_UNLOCKED { } +#else +typedef struct { int gcc_is_buggy; } raw_spinlock_t; +#define __RAW_SPIN_LOCK_UNLOCKED (raw_spinlock_t) { 0 } +#endif + +#endif + +#if (__GNUC__ > 2) +typedef struct { + /* no debug version on UP */ +} raw_rwlock_t; + +#define __RAW_RW_LOCK_UNLOCKED { } +#else +typedef struct { int gcc_is_buggy; } raw_rwlock_t; +#define __RAW_RW_LOCK_UNLOCKED (raw_rwlock_t) { 0 } +#endif + +#endif /* __LINUX_SPINLOCK_TYPES_UP_H */ diff --git a/include/linux/spinlock_up.h b/include/linux/spinlock_up.h new file mode 100644 index 00000000000..31accf2f0b1 --- /dev/null +++ b/include/linux/spinlock_up.h @@ -0,0 +1,74 @@ +#ifndef __LINUX_SPINLOCK_UP_H +#define __LINUX_SPINLOCK_UP_H + +#ifndef __LINUX_SPINLOCK_H +# error "please don't include this file directly" +#endif + +/* + * include/linux/spinlock_up.h - UP-debug version of spinlocks. + * + * portions Copyright 2005, Red Hat, Inc., Ingo Molnar + * Released under the General Public License (GPL). + * + * In the debug case, 1 means unlocked, 0 means locked. (the values + * are inverted, to catch initialization bugs) + * + * No atomicity anywhere, we are on UP. + */ + +#ifdef CONFIG_DEBUG_SPINLOCK + +#define __raw_spin_is_locked(x) ((x)->slock == 0) + +static inline void __raw_spin_lock(raw_spinlock_t *lock) +{ + lock->slock = 0; +} + +static inline void +__raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long flags) +{ + local_irq_save(flags); + lock->slock = 0; +} + +static inline int __raw_spin_trylock(raw_spinlock_t *lock) +{ + char oldval = lock->slock; + + lock->slock = 0; + + return oldval > 0; +} + +static inline void __raw_spin_unlock(raw_spinlock_t *lock) +{ + lock->slock = 1; +} + +/* + * Read-write spinlocks. No debug version. + */ +#define __raw_read_lock(lock) do { (void)(lock); } while (0) +#define __raw_write_lock(lock) do { (void)(lock); } while (0) +#define __raw_read_trylock(lock) ({ (void)(lock); 1; }) +#define __raw_write_trylock(lock) ({ (void)(lock); 1; }) +#define __raw_read_unlock(lock) do { (void)(lock); } while (0) +#define __raw_write_unlock(lock) do { (void)(lock); } while (0) + +#else /* DEBUG_SPINLOCK */ +#define __raw_spin_is_locked(lock) ((void)(lock), 0) +/* for sched.c and kernel_lock.c: */ +# define __raw_spin_lock(lock) do { (void)(lock); } while (0) +# define __raw_spin_unlock(lock) do { (void)(lock); } while (0) +# define __raw_spin_trylock(lock) ({ (void)(lock); 1; }) +#endif /* DEBUG_SPINLOCK */ + +#define __raw_read_can_lock(lock) (((void)(lock), 1)) +#define __raw_write_can_lock(lock) (((void)(lock), 1)) + +#define __raw_spin_unlock_wait(lock) \ + do { cpu_relax(); } while (__raw_spin_is_locked(lock)) + +#endif /* __LINUX_SPINLOCK_UP_H */ diff --git a/include/linux/time.h b/include/linux/time.h index c10d4c21c18..8e83f4e778b 100644 --- a/include/linux/time.h +++ b/include/linux/time.h @@ -28,17 +28,10 @@ struct timezone { #ifdef __KERNEL__ /* Parameters used to convert the timespec values */ -#ifndef USEC_PER_SEC +#define MSEC_PER_SEC (1000L) #define USEC_PER_SEC (1000000L) -#endif - -#ifndef NSEC_PER_SEC #define NSEC_PER_SEC (1000000000L) -#endif - -#ifndef NSEC_PER_USEC #define NSEC_PER_USEC (1000L) -#endif static __inline__ int timespec_equal(struct timespec *a, struct timespec *b) { diff --git a/include/linux/writeback.h b/include/linux/writeback.h index 542dbaee651..343d883d69c 100644 --- a/include/linux/writeback.h +++ b/include/linux/writeback.h @@ -109,8 +109,6 @@ int pdflush_operation(void (*fn)(unsigned long), unsigned long arg0); int do_writepages(struct address_space *mapping, struct writeback_control *wbc); int sync_page_range(struct inode *inode, struct address_space *mapping, loff_t pos, size_t count); -int sync_page_range_nolock(struct inode *inode, struct address_space - *mapping, loff_t pos, size_t count); /* pdflush.c */ extern int nr_pdflush_threads; /* Global so it can be exported to sysctl |