diff options
Diffstat (limited to 'include/linux')
144 files changed, 2893 insertions, 1026 deletions
diff --git a/include/linux/Kbuild b/include/linux/Kbuild index 71d70d1fbce..4c4142c5aa6 100644 --- a/include/linux/Kbuild +++ b/include/linux/Kbuild @@ -189,7 +189,6 @@ unifdef-y += connector.h unifdef-y += cuda.h unifdef-y += cyclades.h unifdef-y += dccp.h -unifdef-y += dirent.h unifdef-y += dlm.h unifdef-y += dlm_plock.h unifdef-y += edd.h @@ -256,7 +255,9 @@ unifdef-y += kd.h unifdef-y += kernelcapi.h unifdef-y += kernel.h unifdef-y += keyboard.h +ifneq ($(wildcard $(srctree)/include/asm-$(SRCARCH)/kvm.h),) unifdef-y += kvm.h +endif unifdef-y += llc.h unifdef-y += loop.h unifdef-y += lp.h diff --git a/include/linux/acct.h b/include/linux/acct.h index e8cae54e8d8..882dc724876 100644 --- a/include/linux/acct.h +++ b/include/linux/acct.h @@ -120,17 +120,20 @@ struct acct_v3 struct vfsmount; struct super_block; struct pacct_struct; +struct pid_namespace; extern void acct_auto_close_mnt(struct vfsmount *m); extern void acct_auto_close(struct super_block *sb); extern void acct_init_pacct(struct pacct_struct *pacct); extern void acct_collect(long exitcode, int group_dead); extern void acct_process(void); +extern void acct_exit_ns(struct pid_namespace *); #else #define acct_auto_close_mnt(x) do { } while (0) #define acct_auto_close(x) do { } while (0) #define acct_init_pacct(x) do { } while (0) #define acct_collect(x,y) do { } while (0) #define acct_process() do { } while (0) +#define acct_exit_ns(ns) do { } while (0) #endif /* diff --git a/include/linux/acpi.h b/include/linux/acpi.h index a1717763937..702f79dad16 100644 --- a/include/linux/acpi.h +++ b/include/linux/acpi.h @@ -236,6 +236,7 @@ int acpi_check_mem_region(resource_size_t start, resource_size_t n, const char *name); #ifdef CONFIG_PM_SLEEP +void __init acpi_no_s4_hw_signature(void); void __init acpi_old_suspend_ordering(void); #endif /* CONFIG_PM_SLEEP */ #else /* CONFIG_ACPI */ diff --git a/include/linux/aio.h b/include/linux/aio.h index b51ddd28444..09b276c3522 100644 --- a/include/linux/aio.h +++ b/include/linux/aio.h @@ -7,7 +7,6 @@ #include <linux/uio.h> #include <asm/atomic.h> -#include <linux/uio.h> #define AIO_MAXSEGS 4 #define AIO_KIOGRP_NR_ATOMIC 8 diff --git a/include/linux/anon_inodes.h b/include/linux/anon_inodes.h index 6129e58ca7c..e0a0cdc2da4 100644 --- a/include/linux/anon_inodes.h +++ b/include/linux/anon_inodes.h @@ -9,7 +9,7 @@ #define _LINUX_ANON_INODES_H int anon_inode_getfd(const char *name, const struct file_operations *fops, - void *priv); + void *priv, int flags); #endif /* _LINUX_ANON_INODES_H */ diff --git a/include/linux/async_tx.h b/include/linux/async_tx.h index eb640f0acfa..0f50d4cc436 100644 --- a/include/linux/async_tx.h +++ b/include/linux/async_tx.h @@ -101,21 +101,14 @@ async_tx_find_channel(struct dma_async_tx_descriptor *depend_tx, /** * async_tx_sync_epilog - actions to take if an operation is run synchronously - * @flags: async_tx flags - * @depend_tx: transaction depends on depend_tx * @cb_fn: function to call when the transaction completes * @cb_fn_param: parameter to pass to the callback routine */ static inline void -async_tx_sync_epilog(unsigned long flags, - struct dma_async_tx_descriptor *depend_tx, - dma_async_tx_callback cb_fn, void *cb_fn_param) +async_tx_sync_epilog(dma_async_tx_callback cb_fn, void *cb_fn_param) { if (cb_fn) cb_fn(cb_fn_param); - - if (depend_tx && (flags & ASYNC_TX_DEP_ACK)) - async_tx_ack(depend_tx); } void @@ -152,4 +145,6 @@ struct dma_async_tx_descriptor * async_trigger_callback(enum async_tx_flags flags, struct dma_async_tx_descriptor *depend_tx, dma_async_tx_callback cb_fn, void *cb_fn_param); + +void async_tx_quiesce(struct dma_async_tx_descriptor **tx); #endif /* _ASYNC_TX_H_ */ diff --git a/include/linux/atmel-pwm-bl.h b/include/linux/atmel-pwm-bl.h new file mode 100644 index 00000000000..0153a47806c --- /dev/null +++ b/include/linux/atmel-pwm-bl.h @@ -0,0 +1,43 @@ +/* + * Copyright (C) 2007 Atmel Corporation + * + * Driver for the AT32AP700X PS/2 controller (PSIF). + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation. + */ + +#ifndef __INCLUDE_ATMEL_PWM_BL_H +#define __INCLUDE_ATMEL_PWM_BL_H + +/** + * struct atmel_pwm_bl_platform_data + * @pwm_channel: which PWM channel in the PWM module to use. + * @pwm_frequency: PWM frequency to generate, the driver will try to be as + * close as the prescaler allows. + * @pwm_compare_max: value to use in the PWM channel compare register. + * @pwm_duty_max: maximum duty cycle value, must be less than or equal to + * pwm_compare_max. + * @pwm_duty_min: minimum duty cycle value, must be less than pwm_duty_max. + * @pwm_active_low: set to one if the low part of the PWM signal increases the + * brightness of the backlight. + * @gpio_on: GPIO line to control the backlight on/off, set to -1 if not used. + * @on_active_low: set to one if the on/off signal is on when GPIO is low. + * + * This struct must be added to the platform device in the board code. It is + * used by the atmel-pwm-bl driver to setup the GPIO to control on/off and the + * PWM device. + */ +struct atmel_pwm_bl_platform_data { + unsigned int pwm_channel; + unsigned int pwm_frequency; + unsigned int pwm_compare_max; + unsigned int pwm_duty_max; + unsigned int pwm_duty_min; + unsigned int pwm_active_low; + int gpio_on; + unsigned int on_active_low; +}; + +#endif /* __INCLUDE_ATMEL_PWM_BL_H */ diff --git a/include/linux/audit.h b/include/linux/audit.h index 8b82974bdc1..6272a395d43 100644 --- a/include/linux/audit.h +++ b/include/linux/audit.h @@ -286,7 +286,6 @@ #define AUDIT_ARCH_SHEL64 (EM_SH|__AUDIT_ARCH_64BIT|__AUDIT_ARCH_LE) #define AUDIT_ARCH_SPARC (EM_SPARC) #define AUDIT_ARCH_SPARC64 (EM_SPARCV9|__AUDIT_ARCH_64BIT) -#define AUDIT_ARCH_V850 (EM_V850|__AUDIT_ARCH_LE) #define AUDIT_ARCH_X86_64 (EM_X86_64|__AUDIT_ARCH_64BIT|__AUDIT_ARCH_LE) #define AUDIT_PERM_EXEC 1 diff --git a/include/linux/auto_fs4.h b/include/linux/auto_fs4.h index 31a29541b50..b785c6f8644 100644 --- a/include/linux/auto_fs4.h +++ b/include/linux/auto_fs4.h @@ -98,8 +98,6 @@ union autofs_v5_packet_union { #define AUTOFS_IOC_EXPIRE_INDIRECT AUTOFS_IOC_EXPIRE_MULTI #define AUTOFS_IOC_EXPIRE_DIRECT AUTOFS_IOC_EXPIRE_MULTI #define AUTOFS_IOC_PROTOSUBVER _IOR(0x93,0x67,int) -#define AUTOFS_IOC_ASKREGHOST _IOR(0x93,0x68,int) -#define AUTOFS_IOC_TOGGLEREGHOST _IOR(0x93,0x69,int) #define AUTOFS_IOC_ASKUMOUNT _IOR(0x93,0x70,int) diff --git a/include/linux/auxvec.h b/include/linux/auxvec.h index 0da17d14fd1..d7afa9dd663 100644 --- a/include/linux/auxvec.h +++ b/include/linux/auxvec.h @@ -26,9 +26,13 @@ #define AT_SECURE 23 /* secure mode boolean */ +#define AT_BASE_PLATFORM 24 /* string identifying real platform, may + * differ from AT_PLATFORM. */ + #define AT_EXECFN 31 /* filename of program */ + #ifdef __KERNEL__ -#define AT_VECTOR_SIZE_BASE 17 /* NEW_AUX_ENT entries in auxiliary table */ +#define AT_VECTOR_SIZE_BASE 18 /* NEW_AUX_ENT entries in auxiliary table */ /* number of "#define AT_.*" above, minus {AT_NULL, AT_IGNORE, AT_NOTELF} */ #endif diff --git a/include/linux/bcd.h b/include/linux/bcd.h index c545308125b..7ac518e3c15 100644 --- a/include/linux/bcd.h +++ b/include/linux/bcd.h @@ -10,8 +10,13 @@ #ifndef _BCD_H #define _BCD_H -#define BCD2BIN(val) (((val) & 0x0f) + ((val)>>4)*10) -#define BIN2BCD(val) ((((val)/10)<<4) + (val)%10) +#include <linux/compiler.h> + +unsigned bcd2bin(unsigned char val) __attribute_const__; +unsigned char bin2bcd(unsigned val) __attribute_const__; + +#define BCD2BIN(val) bcd2bin(val) +#define BIN2BCD(val) bin2bcd(val) /* backwards compat */ #define BCD_TO_BIN(val) ((val)=BCD2BIN(val)) diff --git a/include/linux/binfmts.h b/include/linux/binfmts.h index ee0ed48e834..826f6235080 100644 --- a/include/linux/binfmts.h +++ b/include/linux/binfmts.h @@ -38,7 +38,7 @@ struct linux_binprm{ misc_bang:1; struct file * file; int e_uid, e_gid; - kernel_cap_t cap_inheritable, cap_permitted; + kernel_cap_t cap_post_exec_permitted; bool cap_effective; void *security; int argc, envc; diff --git a/include/linux/bootmem.h b/include/linux/bootmem.h index a1d9b79078e..652470b687c 100644 --- a/include/linux/bootmem.h +++ b/include/linux/bootmem.h @@ -28,52 +28,73 @@ extern unsigned long saved_max_pfn; * memory pages (including holes) on the node. */ typedef struct bootmem_data { - unsigned long node_boot_start; + unsigned long node_min_pfn; unsigned long node_low_pfn; void *node_bootmem_map; - unsigned long last_offset; - unsigned long last_pos; - unsigned long last_success; /* Previous allocation point. To speed - * up searching */ + unsigned long last_end_off; + unsigned long hint_idx; struct list_head list; } bootmem_data_t; +extern bootmem_data_t bootmem_node_data[]; + extern unsigned long bootmem_bootmap_pages(unsigned long); + +extern unsigned long init_bootmem_node(pg_data_t *pgdat, + unsigned long freepfn, + unsigned long startpfn, + unsigned long endpfn); extern unsigned long init_bootmem(unsigned long addr, unsigned long memend); + +extern unsigned long free_all_bootmem_node(pg_data_t *pgdat); +extern unsigned long free_all_bootmem(void); + +extern void free_bootmem_node(pg_data_t *pgdat, + unsigned long addr, + unsigned long size); extern void free_bootmem(unsigned long addr, unsigned long size); -extern void *__alloc_bootmem(unsigned long size, + +/* + * Flags for reserve_bootmem (also if CONFIG_HAVE_ARCH_BOOTMEM_NODE, + * the architecture-specific code should honor this). + * + * If flags is 0, then the return value is always 0 (success). If + * flags contains BOOTMEM_EXCLUSIVE, then -EBUSY is returned if the + * memory already was reserved. + */ +#define BOOTMEM_DEFAULT 0 +#define BOOTMEM_EXCLUSIVE (1<<0) + +extern int reserve_bootmem_node(pg_data_t *pgdat, + unsigned long physaddr, + unsigned long size, + int flags); +#ifndef CONFIG_HAVE_ARCH_BOOTMEM_NODE +extern int reserve_bootmem(unsigned long addr, unsigned long size, int flags); +#endif + +extern void *__alloc_bootmem_nopanic(unsigned long size, unsigned long align, unsigned long goal); -extern void *__alloc_bootmem_nopanic(unsigned long size, +extern void *__alloc_bootmem(unsigned long size, unsigned long align, unsigned long goal); extern void *__alloc_bootmem_low(unsigned long size, unsigned long align, unsigned long goal); +extern void *__alloc_bootmem_node(pg_data_t *pgdat, + unsigned long size, + unsigned long align, + unsigned long goal); +extern void *__alloc_bootmem_node_nopanic(pg_data_t *pgdat, + unsigned long size, + unsigned long align, + unsigned long goal); extern void *__alloc_bootmem_low_node(pg_data_t *pgdat, unsigned long size, unsigned long align, unsigned long goal); -extern void *__alloc_bootmem_core(struct bootmem_data *bdata, - unsigned long size, - unsigned long align, - unsigned long goal, - unsigned long limit); - -/* - * flags for reserve_bootmem (also if CONFIG_HAVE_ARCH_BOOTMEM_NODE, - * the architecture-specific code should honor this) - */ -#define BOOTMEM_DEFAULT 0 -#define BOOTMEM_EXCLUSIVE (1<<0) - #ifndef CONFIG_HAVE_ARCH_BOOTMEM_NODE -/* - * If flags is 0, then the return value is always 0 (success). If - * flags contains BOOTMEM_EXCLUSIVE, then -EBUSY is returned if the - * memory already was reserved. - */ -extern int reserve_bootmem(unsigned long addr, unsigned long size, int flags); #define alloc_bootmem(x) \ __alloc_bootmem(x, SMP_CACHE_BYTES, __pa(MAX_DMA_ADDRESS)) #define alloc_bootmem_low(x) \ @@ -82,31 +103,6 @@ extern int reserve_bootmem(unsigned long addr, unsigned long size, int flags); __alloc_bootmem(x, PAGE_SIZE, __pa(MAX_DMA_ADDRESS)) #define alloc_bootmem_low_pages(x) \ __alloc_bootmem_low(x, PAGE_SIZE, 0) -#endif /* !CONFIG_HAVE_ARCH_BOOTMEM_NODE */ - -extern int reserve_bootmem_generic(unsigned long addr, unsigned long size, - int flags); -extern unsigned long free_all_bootmem(void); -extern unsigned long free_all_bootmem_node(pg_data_t *pgdat); -extern void *__alloc_bootmem_node(pg_data_t *pgdat, - unsigned long size, - unsigned long align, - unsigned long goal); -extern unsigned long init_bootmem_node(pg_data_t *pgdat, - unsigned long freepfn, - unsigned long startpfn, - unsigned long endpfn); -extern int reserve_bootmem_node(pg_data_t *pgdat, - unsigned long physaddr, - unsigned long size, - int flags); -extern void free_bootmem_node(pg_data_t *pgdat, - unsigned long addr, - unsigned long size); -extern void *alloc_bootmem_section(unsigned long size, - unsigned long section_nr); - -#ifndef CONFIG_HAVE_ARCH_BOOTMEM_NODE #define alloc_bootmem_node(pgdat, x) \ __alloc_bootmem_node(pgdat, x, SMP_CACHE_BYTES, __pa(MAX_DMA_ADDRESS)) #define alloc_bootmem_pages_node(pgdat, x) \ @@ -115,6 +111,12 @@ extern void *alloc_bootmem_section(unsigned long size, __alloc_bootmem_low_node(pgdat, x, PAGE_SIZE, 0) #endif /* !CONFIG_HAVE_ARCH_BOOTMEM_NODE */ +extern int reserve_bootmem_generic(unsigned long addr, unsigned long size, + int flags); + +extern void *alloc_bootmem_section(unsigned long size, + unsigned long section_nr); + #ifdef CONFIG_HAVE_ARCH_ALLOC_REMAP extern void *alloc_remap(int nid, unsigned long size); #else diff --git a/include/linux/byteorder/big_endian.h b/include/linux/byteorder/big_endian.h index 961ed4b48d8..44f95b92393 100644 --- a/include/linux/byteorder/big_endian.h +++ b/include/linux/byteorder/big_endian.h @@ -94,12 +94,12 @@ static inline __u16 __be16_to_cpup(const __be16 *p) #define __le32_to_cpus(x) __swab32s((x)) #define __cpu_to_le16s(x) __swab16s((x)) #define __le16_to_cpus(x) __swab16s((x)) -#define __cpu_to_be64s(x) do {} while (0) -#define __be64_to_cpus(x) do {} while (0) -#define __cpu_to_be32s(x) do {} while (0) -#define __be32_to_cpus(x) do {} while (0) -#define __cpu_to_be16s(x) do {} while (0) -#define __be16_to_cpus(x) do {} while (0) +#define __cpu_to_be64s(x) do { (void)(x); } while (0) +#define __be64_to_cpus(x) do { (void)(x); } while (0) +#define __cpu_to_be32s(x) do { (void)(x); } while (0) +#define __be32_to_cpus(x) do { (void)(x); } while (0) +#define __cpu_to_be16s(x) do { (void)(x); } while (0) +#define __be16_to_cpus(x) do { (void)(x); } while (0) #ifdef __KERNEL__ #include <linux/byteorder/generic.h> diff --git a/include/linux/byteorder/little_endian.h b/include/linux/byteorder/little_endian.h index 05dc7c35b3b..4cc170a3176 100644 --- a/include/linux/byteorder/little_endian.h +++ b/include/linux/byteorder/little_endian.h @@ -88,12 +88,12 @@ static inline __u16 __be16_to_cpup(const __be16 *p) { return __swab16p((__u16 *)p); } -#define __cpu_to_le64s(x) do {} while (0) -#define __le64_to_cpus(x) do {} while (0) -#define __cpu_to_le32s(x) do {} while (0) -#define __le32_to_cpus(x) do {} while (0) -#define __cpu_to_le16s(x) do {} while (0) -#define __le16_to_cpus(x) do {} while (0) +#define __cpu_to_le64s(x) do { (void)(x); } while (0) +#define __le64_to_cpus(x) do { (void)(x); } while (0) +#define __cpu_to_le32s(x) do { (void)(x); } while (0) +#define __le32_to_cpus(x) do { (void)(x); } while (0) +#define __cpu_to_le16s(x) do { (void)(x); } while (0) +#define __le16_to_cpus(x) do { (void)(x); } while (0) #define __cpu_to_be64s(x) __swab64s((x)) #define __be64_to_cpus(x) __swab64s((x)) #define __cpu_to_be32s(x) __swab32s((x)) diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h index e155aa78d85..c98dd7cb707 100644 --- a/include/linux/cgroup.h +++ b/include/linux/cgroup.h @@ -21,11 +21,13 @@ struct cgroupfs_root; struct cgroup_subsys; struct inode; +struct cgroup; extern int cgroup_init_early(void); extern int cgroup_init(void); extern void cgroup_init_smp(void); extern void cgroup_lock(void); +extern bool cgroup_lock_live_group(struct cgroup *cgrp); extern void cgroup_unlock(void); extern void cgroup_fork(struct task_struct *p); extern void cgroup_fork_callbacks(struct task_struct *p); @@ -205,50 +207,64 @@ struct cftype { * subsystem, followed by a period */ char name[MAX_CFTYPE_NAME]; int private; - int (*open) (struct inode *inode, struct file *file); - ssize_t (*read) (struct cgroup *cgrp, struct cftype *cft, - struct file *file, - char __user *buf, size_t nbytes, loff_t *ppos); + + /* + * If non-zero, defines the maximum length of string that can + * be passed to write_string; defaults to 64 + */ + size_t max_write_len; + + int (*open)(struct inode *inode, struct file *file); + ssize_t (*read)(struct cgroup *cgrp, struct cftype *cft, + struct file *file, + char __user *buf, size_t nbytes, loff_t *ppos); /* * read_u64() is a shortcut for the common case of returning a * single integer. Use it in place of read() */ - u64 (*read_u64) (struct cgroup *cgrp, struct cftype *cft); + u64 (*read_u64)(struct cgroup *cgrp, struct cftype *cft); /* * read_s64() is a signed version of read_u64() */ - s64 (*read_s64) (struct cgroup *cgrp, struct cftype *cft); + s64 (*read_s64)(struct cgroup *cgrp, struct cftype *cft); /* * read_map() is used for defining a map of key/value * pairs. It should call cb->fill(cb, key, value) for each * entry. The key/value pairs (and their ordering) should not * change between reboots. */ - int (*read_map) (struct cgroup *cont, struct cftype *cft, - struct cgroup_map_cb *cb); + int (*read_map)(struct cgroup *cont, struct cftype *cft, + struct cgroup_map_cb *cb); /* * read_seq_string() is used for outputting a simple sequence * using seqfile. */ - int (*read_seq_string) (struct cgroup *cont, struct cftype *cft, - struct seq_file *m); + int (*read_seq_string)(struct cgroup *cont, struct cftype *cft, + struct seq_file *m); - ssize_t (*write) (struct cgroup *cgrp, struct cftype *cft, - struct file *file, - const char __user *buf, size_t nbytes, loff_t *ppos); + ssize_t (*write)(struct cgroup *cgrp, struct cftype *cft, + struct file *file, + const char __user *buf, size_t nbytes, loff_t *ppos); /* * write_u64() is a shortcut for the common case of accepting * a single integer (as parsed by simple_strtoull) from * userspace. Use in place of write(); return 0 or error. */ - int (*write_u64) (struct cgroup *cgrp, struct cftype *cft, u64 val); + int (*write_u64)(struct cgroup *cgrp, struct cftype *cft, u64 val); /* * write_s64() is a signed version of write_u64() */ - int (*write_s64) (struct cgroup *cgrp, struct cftype *cft, s64 val); + int (*write_s64)(struct cgroup *cgrp, struct cftype *cft, s64 val); /* + * write_string() is passed a nul-terminated kernelspace + * buffer of maximum length determined by max_write_len. + * Returns 0 or -ve error code. + */ + int (*write_string)(struct cgroup *cgrp, struct cftype *cft, + const char *buffer); + /* * trigger() callback can be used to get some kick from the * userspace, when the actual string written is not important * at all. The private field can be used to determine the @@ -256,7 +272,7 @@ struct cftype { */ int (*trigger)(struct cgroup *cgrp, unsigned int event); - int (*release) (struct inode *inode, struct file *file); + int (*release)(struct inode *inode, struct file *file); }; struct cgroup_scanner { @@ -348,7 +364,8 @@ static inline struct cgroup* task_cgroup(struct task_struct *task, return task_subsys_state(task, subsys_id)->cgroup; } -int cgroup_clone(struct task_struct *tsk, struct cgroup_subsys *ss); +int cgroup_clone(struct task_struct *tsk, struct cgroup_subsys *ss, + char *nodename); /* A cgroup_iter should be treated as an opaque object */ struct cgroup_iter { diff --git a/include/linux/coda.h b/include/linux/coda.h index b5cf0780c51..96c87693800 100644 --- a/include/linux/coda.h +++ b/include/linux/coda.h @@ -199,28 +199,6 @@ typedef u_int32_t vuid_t; typedef u_int32_t vgid_t; #endif /*_VUID_T_ */ -#ifdef CONFIG_CODA_FS_OLD_API -struct CodaFid { - u_int32_t opaque[3]; -}; - -static __inline__ ino_t coda_f2i(struct CodaFid *fid) -{ - if ( ! fid ) - return 0; - if (fid->opaque[1] == 0xfffffffe || fid->opaque[1] == 0xffffffff) - return ((fid->opaque[0] << 20) | (fid->opaque[2] & 0xfffff)); - else - return (fid->opaque[2] + (fid->opaque[1]<<10) + (fid->opaque[0]<<20)); -} - -struct coda_cred { - vuid_t cr_uid, cr_euid, cr_suid, cr_fsuid; /* Real, efftve, set, fs uid*/ - vgid_t cr_groupid, cr_egid, cr_sgid, cr_fsgid; /* same for groups */ -}; - -#else /* not defined(CONFIG_CODA_FS_OLD_API) */ - struct CodaFid { u_int32_t opaque[4]; }; @@ -228,8 +206,6 @@ struct CodaFid { #define coda_f2i(fid)\ (fid ? (fid->opaque[3] ^ (fid->opaque[2]<<10) ^ (fid->opaque[1]<<20) ^ fid->opaque[0]) : 0) -#endif - #ifndef _VENUS_VATTR_T_ #define _VENUS_VATTR_T_ /* @@ -313,15 +289,7 @@ struct coda_statfs { #define CIOC_KERNEL_VERSION _IOWR('c', 10, size_t) -#if 0 -#define CODA_KERNEL_VERSION 0 /* don't care about kernel version number */ -#define CODA_KERNEL_VERSION 1 /* The old venus 4.6 compatible interface */ -#endif -#ifdef CONFIG_CODA_FS_OLD_API -#define CODA_KERNEL_VERSION 2 /* venus_lookup got an extra parameter */ -#else #define CODA_KERNEL_VERSION 3 /* 128-bit file identifiers */ -#endif /* * Venus <-> Coda RPC arguments @@ -329,16 +297,9 @@ struct coda_statfs { struct coda_in_hdr { u_int32_t opcode; u_int32_t unique; /* Keep multiple outstanding msgs distinct */ -#ifdef CONFIG_CODA_FS_OLD_API - u_int16_t pid; /* Common to all */ - u_int16_t pgid; /* Common to all */ - u_int16_t sid; /* Common to all */ - struct coda_cred cred; /* Common to all */ -#else pid_t pid; pid_t pgid; vuid_t uid; -#endif }; /* Really important that opcode and unique are 1st two fields! */ @@ -613,11 +574,7 @@ struct coda_vget_out { /* CODA_PURGEUSER is a venus->kernel call */ struct coda_purgeuser_out { struct coda_out_hdr oh; -#ifdef CONFIG_CODA_FS_OLD_API - struct coda_cred cred; -#else vuid_t uid; -#endif }; /* coda_zapfile: */ diff --git a/include/linux/consolemap.h b/include/linux/consolemap.h index e2bf7e5db39..c4811da1338 100644 --- a/include/linux/consolemap.h +++ b/include/linux/consolemap.h @@ -3,6 +3,9 @@ * * Interface between console.c, selection.c and consolemap.c */ +#ifndef __LINUX_CONSOLEMAP_H__ +#define __LINUX_CONSOLEMAP_H__ + #define LAT1_MAP 0 #define GRAF_MAP 1 #define IBMPC_MAP 2 @@ -10,6 +13,7 @@ #include <linux/types.h> +#ifdef CONFIG_CONSOLE_TRANSLATIONS struct vc_data; extern u16 inverse_translate(struct vc_data *conp, int glyph, int use_unicode); @@ -18,3 +22,13 @@ extern int conv_uni_to_pc(struct vc_data *conp, long ucs); extern u32 conv_8bit_to_uni(unsigned char c); extern int conv_uni_to_8bit(u32 uni); void console_map_init(void); +#else +#define inverse_translate(conp, glyph, uni) ((uint16_t)glyph) +#define set_translate(m, vc) ((unsigned short *)NULL) +#define conv_uni_to_pc(conp, ucs) ((int) (ucs > 0xff ? -1: ucs)) +#define conv_8bit_to_uni(c) ((uint32_t)(c)) +#define conv_uni_to_8bit(c) ((int) ((c) & 0xff)) +#define console_map_init(c) do { ; } while (0) +#endif /* CONFIG_CONSOLE_TRANSLATIONS */ + +#endif /* __LINUX_CONSOLEMAP_H__ */ diff --git a/include/linux/cpu.h b/include/linux/cpu.h index 7464ba3b433..d7faf880849 100644 --- a/include/linux/cpu.h +++ b/include/linux/cpu.h @@ -69,10 +69,11 @@ static inline void unregister_cpu_notifier(struct notifier_block *nb) #endif int cpu_up(unsigned int cpu); - extern void cpu_hotplug_init(void); +extern void cpu_maps_update_begin(void); +extern void cpu_maps_update_done(void); -#else +#else /* CONFIG_SMP */ static inline int register_cpu_notifier(struct notifier_block *nb) { @@ -87,10 +88,16 @@ static inline void cpu_hotplug_init(void) { } +static inline void cpu_maps_update_begin(void) +{ +} + +static inline void cpu_maps_update_done(void) +{ +} + #endif /* CONFIG_SMP */ extern struct sysdev_class cpu_sysdev_class; -extern void cpu_maps_update_begin(void); -extern void cpu_maps_update_done(void); #ifdef CONFIG_HOTPLUG_CPU /* Stop CPUs going up and down. */ diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h index c24875bd9c5..1b5c98e7fef 100644 --- a/include/linux/cpumask.h +++ b/include/linux/cpumask.h @@ -17,6 +17,20 @@ * For details of cpus_onto(), see bitmap_onto in lib/bitmap.c. * For details of cpus_fold(), see bitmap_fold in lib/bitmap.c. * + * . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . + * Note: The alternate operations with the suffix "_nr" are used + * to limit the range of the loop to nr_cpu_ids instead of + * NR_CPUS when NR_CPUS > 64 for performance reasons. + * If NR_CPUS is <= 64 then most assembler bitmask + * operators execute faster with a constant range, so + * the operator will continue to use NR_CPUS. + * + * Another consideration is that nr_cpu_ids is initialized + * to NR_CPUS and isn't lowered until the possible cpus are + * discovered (including any disabled cpus). So early uses + * will span the entire range of NR_CPUS. + * . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . + * * The available cpumask operations are: * * void cpu_set(cpu, mask) turn on bit 'cpu' in mask @@ -38,18 +52,60 @@ * int cpus_empty(mask) Is mask empty (no bits sets)? * int cpus_full(mask) Is mask full (all bits sets)? * int cpus_weight(mask) Hamming weigh - number of set bits + * int cpus_weight_nr(mask) Same using nr_cpu_ids instead of NR_CPUS * * void cpus_shift_right(dst, src, n) Shift right * void cpus_shift_left(dst, src, n) Shift left * * int first_cpu(mask) Number lowest set bit, or NR_CPUS * int next_cpu(cpu, mask) Next cpu past 'cpu', or NR_CPUS + * int next_cpu_nr(cpu, mask) Next cpu past 'cpu', or nr_cpu_ids * * cpumask_t cpumask_of_cpu(cpu) Return cpumask with bit 'cpu' set + *ifdef CONFIG_HAS_CPUMASK_OF_CPU + * cpumask_of_cpu_ptr_declare(v) Declares cpumask_t *v + * cpumask_of_cpu_ptr_next(v, cpu) Sets v = &cpumask_of_cpu_map[cpu] + * cpumask_of_cpu_ptr(v, cpu) Combines above two operations + *else + * cpumask_of_cpu_ptr_declare(v) Declares cpumask_t _v and *v = &_v + * cpumask_of_cpu_ptr_next(v, cpu) Sets _v = cpumask_of_cpu(cpu) + * cpumask_of_cpu_ptr(v, cpu) Combines above two operations + *endif * CPU_MASK_ALL Initializer - all bits set * CPU_MASK_NONE Initializer - no bits set * unsigned long *cpus_addr(mask) Array of unsigned long's in mask * + * CPUMASK_ALLOC kmalloc's a structure that is a composite of many cpumask_t + * variables, and CPUMASK_PTR provides pointers to each field. + * + * The structure should be defined something like this: + * struct my_cpumasks { + * cpumask_t mask1; + * cpumask_t mask2; + * }; + * + * Usage is then: + * CPUMASK_ALLOC(my_cpumasks); + * CPUMASK_PTR(mask1, my_cpumasks); + * CPUMASK_PTR(mask2, my_cpumasks); + * + * --- DO NOT reference cpumask_t pointers until this check --- + * if (my_cpumasks == NULL) + * "kmalloc failed"... + * + * References are now pointers to the cpumask_t variables (*mask1, ...) + * + *if NR_CPUS > BITS_PER_LONG + * CPUMASK_ALLOC(m) Declares and allocates struct m *m = + * kmalloc(sizeof(*m), GFP_KERNEL) + * CPUMASK_FREE(m) Macro for kfree(m) + *else + * CPUMASK_ALLOC(m) Declares struct m _m, *m = &_m + * CPUMASK_FREE(m) Nop + *endif + * CPUMASK_PTR(v, m) Declares cpumask_t *v = &(m->v) + * ------------------------------------------------------------------------ + * * int cpumask_scnprintf(buf, len, mask) Format cpumask for printing * int cpumask_parse_user(ubuf, ulen, mask) Parse ascii string as cpumask * int cpulist_scnprintf(buf, len, mask) Format cpumask as list for printing @@ -59,7 +115,8 @@ * void cpus_onto(dst, orig, relmap) *dst = orig relative to relmap * void cpus_fold(dst, orig, sz) dst bits = orig bits mod sz * - * for_each_cpu_mask(cpu, mask) for-loop cpu over mask + * for_each_cpu_mask(cpu, mask) for-loop cpu over mask using NR_CPUS + * for_each_cpu_mask_nr(cpu, mask) for-loop cpu over mask using nr_cpu_ids * * int num_online_cpus() Number of online CPUs * int num_possible_cpus() Number of all possible CPUs @@ -216,23 +273,19 @@ static inline void __cpus_shift_left(cpumask_t *dstp, bitmap_shift_left(dstp->bits, srcp->bits, n, nbits); } -#ifdef CONFIG_SMP -int __first_cpu(const cpumask_t *srcp); -#define first_cpu(src) __first_cpu(&(src)) -int __next_cpu(int n, const cpumask_t *srcp); -#define next_cpu(n, src) __next_cpu((n), &(src)) -#else -#define first_cpu(src) ({ (void)(src); 0; }) -#define next_cpu(n, src) ({ (void)(src); 1; }) -#endif #ifdef CONFIG_HAVE_CPUMASK_OF_CPU_MAP extern cpumask_t *cpumask_of_cpu_map; -#define cpumask_of_cpu(cpu) (cpumask_of_cpu_map[cpu]) - +#define cpumask_of_cpu(cpu) (cpumask_of_cpu_map[cpu]) +#define cpumask_of_cpu_ptr(v, cpu) \ + const cpumask_t *v = &cpumask_of_cpu(cpu) +#define cpumask_of_cpu_ptr_declare(v) \ + const cpumask_t *v +#define cpumask_of_cpu_ptr_next(v, cpu) \ + v = &cpumask_of_cpu(cpu) #else #define cpumask_of_cpu(cpu) \ -(*({ \ +({ \ typeof(_unused_cpumask_arg_) m; \ if (sizeof(m) == sizeof(unsigned long)) { \ m.bits[0] = 1UL<<(cpu); \ @@ -240,8 +293,16 @@ extern cpumask_t *cpumask_of_cpu_map; cpus_clear(m); \ cpu_set((cpu), m); \ } \ - &m; \ -})) + m; \ +}) +#define cpumask_of_cpu_ptr(v, cpu) \ + cpumask_t _##v = cpumask_of_cpu(cpu); \ + const cpumask_t *v = &_##v +#define cpumask_of_cpu_ptr_declare(v) \ + cpumask_t _##v; \ + const cpumask_t *v = &_##v +#define cpumask_of_cpu_ptr_next(v, cpu) \ + _##v = cpumask_of_cpu(cpu) #endif #define CPU_MASK_LAST_WORD BITMAP_LAST_WORD_MASK(NR_CPUS) @@ -281,6 +342,15 @@ extern cpumask_t cpu_mask_all; #define cpus_addr(src) ((src).bits) +#if NR_CPUS > BITS_PER_LONG +#define CPUMASK_ALLOC(m) struct m *m = kmalloc(sizeof(*m), GFP_KERNEL) +#define CPUMASK_FREE(m) kfree(m) +#else +#define CPUMASK_ALLOC(m) struct m _m, *m = &_m +#define CPUMASK_FREE(m) +#endif +#define CPUMASK_PTR(v, m) cpumask_t *v = &(m->v) + #define cpumask_scnprintf(buf, len, src) \ __cpumask_scnprintf((buf), (len), &(src), NR_CPUS) static inline int __cpumask_scnprintf(char *buf, int len, @@ -343,29 +413,59 @@ static inline void __cpus_fold(cpumask_t *dstp, const cpumask_t *origp, bitmap_fold(dstp->bits, origp->bits, sz, nbits); } -#if NR_CPUS > 1 -#define for_each_cpu_mask(cpu, mask) \ - for ((cpu) = first_cpu(mask); \ - (cpu) < NR_CPUS; \ - (cpu) = next_cpu((cpu), (mask))) -#else /* NR_CPUS == 1 */ -#define for_each_cpu_mask(cpu, mask) \ +#if NR_CPUS == 1 + +#define nr_cpu_ids 1 +#define first_cpu(src) ({ (void)(src); 0; }) +#define next_cpu(n, src) ({ (void)(src); 1; }) +#define any_online_cpu(mask) 0 +#define for_each_cpu_mask(cpu, mask) \ for ((cpu) = 0; (cpu) < 1; (cpu)++, (void)mask) -#endif /* NR_CPUS */ + +#else /* NR_CPUS > 1 */ + +extern int nr_cpu_ids; +int __first_cpu(const cpumask_t *srcp); +int __next_cpu(int n, const cpumask_t *srcp); +int __any_online_cpu(const cpumask_t *mask); + +#define first_cpu(src) __first_cpu(&(src)) +#define next_cpu(n, src) __next_cpu((n), &(src)) +#define any_online_cpu(mask) __any_online_cpu(&(mask)) +#define for_each_cpu_mask(cpu, mask) \ + for ((cpu) = -1; \ + (cpu) = next_cpu((cpu), (mask)), \ + (cpu) < NR_CPUS; ) +#endif + +#if NR_CPUS <= 64 #define next_cpu_nr(n, src) next_cpu(n, src) #define cpus_weight_nr(cpumask) cpus_weight(cpumask) #define for_each_cpu_mask_nr(cpu, mask) for_each_cpu_mask(cpu, mask) +#else /* NR_CPUS > 64 */ + +int __next_cpu_nr(int n, const cpumask_t *srcp); +#define next_cpu_nr(n, src) __next_cpu_nr((n), &(src)) +#define cpus_weight_nr(cpumask) __cpus_weight(&(cpumask), nr_cpu_ids) +#define for_each_cpu_mask_nr(cpu, mask) \ + for ((cpu) = -1; \ + (cpu) = next_cpu_nr((cpu), (mask)), \ + (cpu) < nr_cpu_ids; ) + +#endif /* NR_CPUS > 64 */ + /* * The following particular system cpumasks and operations manage - * possible, present and online cpus. Each of them is a fixed size + * possible, present, active and online cpus. Each of them is a fixed size * bitmap of size NR_CPUS. * * #ifdef CONFIG_HOTPLUG_CPU * cpu_possible_map - has bit 'cpu' set iff cpu is populatable * cpu_present_map - has bit 'cpu' set iff cpu is populated * cpu_online_map - has bit 'cpu' set iff cpu available to scheduler + * cpu_active_map - has bit 'cpu' set iff cpu available to migration * #else * cpu_possible_map - has bit 'cpu' set iff cpu is populated * cpu_present_map - copy of cpu_possible_map @@ -416,14 +516,16 @@ static inline void __cpus_fold(cpumask_t *dstp, const cpumask_t *origp, extern cpumask_t cpu_possible_map; extern cpumask_t cpu_online_map; extern cpumask_t cpu_present_map; +extern cpumask_t cpu_active_map; #if NR_CPUS > 1 -#define num_online_cpus() cpus_weight(cpu_online_map) -#define num_possible_cpus() cpus_weight(cpu_possible_map) -#define num_present_cpus() cpus_weight(cpu_present_map) +#define num_online_cpus() cpus_weight_nr(cpu_online_map) +#define num_possible_cpus() cpus_weight_nr(cpu_possible_map) +#define num_present_cpus() cpus_weight_nr(cpu_present_map) #define cpu_online(cpu) cpu_isset((cpu), cpu_online_map) #define cpu_possible(cpu) cpu_isset((cpu), cpu_possible_map) #define cpu_present(cpu) cpu_isset((cpu), cpu_present_map) +#define cpu_active(cpu) cpu_isset((cpu), cpu_active_map) #else #define num_online_cpus() 1 #define num_possible_cpus() 1 @@ -431,21 +533,13 @@ extern cpumask_t cpu_present_map; #define cpu_online(cpu) ((cpu) == 0) #define cpu_possible(cpu) ((cpu) == 0) #define cpu_present(cpu) ((cpu) == 0) +#define cpu_active(cpu) ((cpu) == 0) #endif #define cpu_is_offline(cpu) unlikely(!cpu_online(cpu)) -#ifdef CONFIG_SMP -extern int nr_cpu_ids; -#define any_online_cpu(mask) __any_online_cpu(&(mask)) -int __any_online_cpu(const cpumask_t *mask); -#else -#define nr_cpu_ids 1 -#define any_online_cpu(mask) 0 -#endif - -#define for_each_possible_cpu(cpu) for_each_cpu_mask((cpu), cpu_possible_map) -#define for_each_online_cpu(cpu) for_each_cpu_mask((cpu), cpu_online_map) -#define for_each_present_cpu(cpu) for_each_cpu_mask((cpu), cpu_present_map) +#define for_each_possible_cpu(cpu) for_each_cpu_mask_nr((cpu), cpu_possible_map) +#define for_each_online_cpu(cpu) for_each_cpu_mask_nr((cpu), cpu_online_map) +#define for_each_present_cpu(cpu) for_each_cpu_mask_nr((cpu), cpu_present_map) #endif /* __LINUX_CPUMASK_H */ diff --git a/include/linux/cpuset.h b/include/linux/cpuset.h index 038578362b4..e8f450c499b 100644 --- a/include/linux/cpuset.h +++ b/include/linux/cpuset.h @@ -78,6 +78,8 @@ extern void cpuset_track_online_nodes(void); extern int current_cpuset_is_being_rebound(void); +extern void rebuild_sched_domains(void); + #else /* !CONFIG_CPUSETS */ static inline int cpuset_init_early(void) { return 0; } @@ -156,6 +158,11 @@ static inline int current_cpuset_is_being_rebound(void) return 0; } +static inline void rebuild_sched_domains(void) +{ + partition_sched_domains(0, NULL, NULL); +} + #endif /* !CONFIG_CPUSETS */ #endif /* _LINUX_CPUSET_H */ diff --git a/include/linux/crash_dump.h b/include/linux/crash_dump.h index 22c7ac5cd80..025e4f57510 100644 --- a/include/linux/crash_dump.h +++ b/include/linux/crash_dump.h @@ -8,7 +8,13 @@ #include <linux/proc_fs.h> #define ELFCORE_ADDR_MAX (-1ULL) + +#ifdef CONFIG_PROC_VMCORE extern unsigned long long elfcorehdr_addr; +#else +static const unsigned long long elfcorehdr_addr = ELFCORE_ADDR_MAX; +#endif + extern ssize_t copy_oldmem_page(unsigned long, char *, size_t, unsigned long, int); extern const struct file_operations proc_vmcore_operations; @@ -22,5 +28,13 @@ extern struct proc_dir_entry *proc_vmcore; #define vmcore_elf_check_arch(x) (elf_check_arch(x) || vmcore_elf_check_arch_cross(x)) +static inline int is_kdump_kernel(void) +{ + return (elfcorehdr_addr != ELFCORE_ADDR_MAX) ? 1 : 0; +} +#else /* !CONFIG_CRASH_DUMP */ +static inline int is_kdump_kernel(void) { return 0; } #endif /* CONFIG_CRASH_DUMP */ + +extern unsigned long saved_max_pfn; #endif /* LINUX_CRASHDUMP_H */ diff --git a/include/linux/dca.h b/include/linux/dca.h index af61cd1f37e..b00a753eda5 100644 --- a/include/linux/dca.h +++ b/include/linux/dca.h @@ -10,6 +10,7 @@ void dca_unregister_notify(struct notifier_block *nb); #define DCA_PROVIDER_REMOVE 0x0002 struct dca_provider { + struct list_head node; struct dca_ops *ops; struct device *cd; int id; @@ -18,7 +19,9 @@ struct dca_provider { struct dca_ops { int (*add_requester) (struct dca_provider *, struct device *); int (*remove_requester) (struct dca_provider *, struct device *); - u8 (*get_tag) (struct dca_provider *, int cpu); + u8 (*get_tag) (struct dca_provider *, struct device *, + int cpu); + int (*dev_managed) (struct dca_provider *, struct device *); }; struct dca_provider *alloc_dca_provider(struct dca_ops *ops, int priv_size); @@ -32,9 +35,11 @@ static inline void *dca_priv(struct dca_provider *dca) } /* Requester API */ +#define DCA_GET_TAG_TWO_ARGS int dca_add_requester(struct device *dev); int dca_remove_requester(struct device *dev); u8 dca_get_tag(int cpu); +u8 dca3_get_tag(struct device *dev, int cpu); /* internal stuff */ int __init dca_sysfs_init(void); diff --git a/include/linux/delayacct.h b/include/linux/delayacct.h index ab94bc08355..f352f06fa06 100644 --- a/include/linux/delayacct.h +++ b/include/linux/delayacct.h @@ -39,6 +39,8 @@ extern void __delayacct_blkio_start(void); extern void __delayacct_blkio_end(void); extern int __delayacct_add_tsk(struct taskstats *, struct task_struct *); extern __u64 __delayacct_blkio_ticks(struct task_struct *); +extern void __delayacct_freepages_start(void); +extern void __delayacct_freepages_end(void); static inline int delayacct_is_task_waiting_on_io(struct task_struct *p) { @@ -107,6 +109,18 @@ static inline __u64 delayacct_blkio_ticks(struct task_struct *tsk) return 0; } +static inline void delayacct_freepages_start(void) +{ + if (current->delays) + __delayacct_freepages_start(); +} + +static inline void delayacct_freepages_end(void) +{ + if (current->delays) + __delayacct_freepages_end(); +} + #else static inline void delayacct_set_flag(int flag) {} @@ -129,6 +143,11 @@ static inline __u64 delayacct_blkio_ticks(struct task_struct *tsk) { return 0; } static inline int delayacct_is_task_waiting_on_io(struct task_struct *p) { return 0; } +static inline void delayacct_freepages_start(void) +{} +static inline void delayacct_freepages_end(void) +{} + #endif /* CONFIG_TASK_DELAY_ACCT */ #endif diff --git a/include/linux/dirent.h b/include/linux/dirent.h index 5d6023b8780..f072fb8d10a 100644 --- a/include/linux/dirent.h +++ b/include/linux/dirent.h @@ -1,23 +1,6 @@ #ifndef _LINUX_DIRENT_H #define _LINUX_DIRENT_H -struct dirent { - long d_ino; - __kernel_off_t d_off; - unsigned short d_reclen; - char d_name[256]; /* We must not include limits.h! */ -}; - -struct dirent64 { - __u64 d_ino; - __s64 d_off; - unsigned short d_reclen; - unsigned char d_type; - char d_name[256]; -}; - -#ifdef __KERNEL__ - struct linux_dirent64 { u64 d_ino; s64 d_off; @@ -26,7 +9,4 @@ struct linux_dirent64 { char d_name[0]; }; -#endif /* __KERNEL__ */ - - #endif diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h index d08a5c5eb92..adb0b084eb5 100644 --- a/include/linux/dmaengine.h +++ b/include/linux/dmaengine.h @@ -89,10 +89,23 @@ enum dma_transaction_type { DMA_MEMSET, DMA_MEMCPY_CRC32C, DMA_INTERRUPT, + DMA_SLAVE, }; /* last transaction type for creation of the capabilities mask */ -#define DMA_TX_TYPE_END (DMA_INTERRUPT + 1) +#define DMA_TX_TYPE_END (DMA_SLAVE + 1) + +/** + * enum dma_slave_width - DMA slave register access width. + * @DMA_SLAVE_WIDTH_8BIT: Do 8-bit slave register accesses + * @DMA_SLAVE_WIDTH_16BIT: Do 16-bit slave register accesses + * @DMA_SLAVE_WIDTH_32BIT: Do 32-bit slave register accesses + */ +enum dma_slave_width { + DMA_SLAVE_WIDTH_8BIT, + DMA_SLAVE_WIDTH_16BIT, + DMA_SLAVE_WIDTH_32BIT, +}; /** * enum dma_ctrl_flags - DMA flags to augment operation preparation, @@ -102,10 +115,14 @@ enum dma_transaction_type { * @DMA_CTRL_ACK - the descriptor cannot be reused until the client * acknowledges receipt, i.e. has has a chance to establish any * dependency chains + * @DMA_COMPL_SKIP_SRC_UNMAP - set to disable dma-unmapping the source buffer(s) + * @DMA_COMPL_SKIP_DEST_UNMAP - set to disable dma-unmapping the destination(s) */ enum dma_ctrl_flags { DMA_PREP_INTERRUPT = (1 << 0), DMA_CTRL_ACK = (1 << 1), + DMA_COMPL_SKIP_SRC_UNMAP = (1 << 2), + DMA_COMPL_SKIP_DEST_UNMAP = (1 << 3), }; /** @@ -115,6 +132,32 @@ enum dma_ctrl_flags { typedef struct { DECLARE_BITMAP(bits, DMA_TX_TYPE_END); } dma_cap_mask_t; /** + * struct dma_slave - Information about a DMA slave + * @dev: device acting as DMA slave + * @dma_dev: required DMA master device. If non-NULL, the client can not be + * bound to other masters than this. + * @tx_reg: physical address of data register used for + * memory-to-peripheral transfers + * @rx_reg: physical address of data register used for + * peripheral-to-memory transfers + * @reg_width: peripheral register width + * + * If dma_dev is non-NULL, the client can not be bound to other DMA + * masters than the one corresponding to this device. The DMA master + * driver may use this to determine if there is controller-specific + * data wrapped around this struct. Drivers of platform code that sets + * the dma_dev field must therefore make sure to use an appropriate + * controller-specific dma slave structure wrapping this struct. + */ +struct dma_slave { + struct device *dev; + struct device *dma_dev; + dma_addr_t tx_reg; + dma_addr_t rx_reg; + enum dma_slave_width reg_width; +}; + +/** * struct dma_chan_percpu - the per-CPU part of struct dma_chan * @refcount: local_t used for open-coded "bigref" counting * @memcpy_count: transaction counter @@ -139,6 +182,7 @@ struct dma_chan_percpu { * @rcu: the DMA channel's RCU head * @device_node: used to add this to the device chan list * @local: per-cpu pointer to a struct dma_chan_percpu + * @client-count: how many clients are using this channel */ struct dma_chan { struct dma_device *device; @@ -154,6 +198,7 @@ struct dma_chan { struct list_head device_node; struct dma_chan_percpu *local; + int client_count; }; #define to_dma_chan(p) container_of(p, struct dma_chan, dev) @@ -202,11 +247,14 @@ typedef enum dma_state_client (*dma_event_callback) (struct dma_client *client, * @event_callback: func ptr to call when something happens * @cap_mask: only return channels that satisfy the requested capabilities * a value of zero corresponds to any capability + * @slave: data for preparing slave transfer. Must be non-NULL iff the + * DMA_SLAVE capability is requested. * @global_node: list_head for global dma_client_list */ struct dma_client { dma_event_callback event_callback; dma_cap_mask_t cap_mask; + struct dma_slave *slave; struct list_head global_node; }; @@ -263,6 +311,8 @@ struct dma_async_tx_descriptor { * @device_prep_dma_zero_sum: prepares a zero_sum operation * @device_prep_dma_memset: prepares a memset operation * @device_prep_dma_interrupt: prepares an end of chain interrupt operation + * @device_prep_slave_sg: prepares a slave dma operation + * @device_terminate_all: terminate all pending operations * @device_issue_pending: push pending transactions to hardware */ struct dma_device { @@ -279,7 +329,8 @@ struct dma_device { int dev_id; struct device *dev; - int (*device_alloc_chan_resources)(struct dma_chan *chan); + int (*device_alloc_chan_resources)(struct dma_chan *chan, + struct dma_client *client); void (*device_free_chan_resources)(struct dma_chan *chan); struct dma_async_tx_descriptor *(*device_prep_dma_memcpy)( @@ -297,6 +348,12 @@ struct dma_device { struct dma_async_tx_descriptor *(*device_prep_dma_interrupt)( struct dma_chan *chan, unsigned long flags); + struct dma_async_tx_descriptor *(*device_prep_slave_sg)( + struct dma_chan *chan, struct scatterlist *sgl, + unsigned int sg_len, enum dma_data_direction direction, + unsigned long flags); + void (*device_terminate_all)(struct dma_chan *chan); + enum dma_status (*device_is_tx_complete)(struct dma_chan *chan, dma_cookie_t cookie, dma_cookie_t *last, dma_cookie_t *used); @@ -318,16 +375,14 @@ dma_cookie_t dma_async_memcpy_pg_to_pg(struct dma_chan *chan, void dma_async_tx_descriptor_init(struct dma_async_tx_descriptor *tx, struct dma_chan *chan); -static inline void -async_tx_ack(struct dma_async_tx_descriptor *tx) +static inline void async_tx_ack(struct dma_async_tx_descriptor *tx) { tx->flags |= DMA_CTRL_ACK; } -static inline int -async_tx_test_ack(struct dma_async_tx_descriptor *tx) +static inline bool async_tx_test_ack(struct dma_async_tx_descriptor *tx) { - return tx->flags & DMA_CTRL_ACK; + return (tx->flags & DMA_CTRL_ACK) == DMA_CTRL_ACK; } #define first_dma_cap(mask) __first_dma_cap(&(mask)) diff --git a/include/linux/dw_dmac.h b/include/linux/dw_dmac.h new file mode 100644 index 00000000000..04d217b442b --- /dev/null +++ b/include/linux/dw_dmac.h @@ -0,0 +1,62 @@ +/* + * Driver for the Synopsys DesignWare DMA Controller (aka DMACA on + * AVR32 systems.) + * + * Copyright (C) 2007 Atmel Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#ifndef DW_DMAC_H +#define DW_DMAC_H + +#include <linux/dmaengine.h> + +/** + * struct dw_dma_platform_data - Controller configuration parameters + * @nr_channels: Number of channels supported by hardware (max 8) + */ +struct dw_dma_platform_data { + unsigned int nr_channels; +}; + +/** + * struct dw_dma_slave - Controller-specific information about a slave + * @slave: Generic information about the slave + * @ctl_lo: Platform-specific initializer for the CTL_LO register + * @cfg_hi: Platform-specific initializer for the CFG_HI register + * @cfg_lo: Platform-specific initializer for the CFG_LO register + */ +struct dw_dma_slave { + struct dma_slave slave; + u32 cfg_hi; + u32 cfg_lo; +}; + +/* Platform-configurable bits in CFG_HI */ +#define DWC_CFGH_FCMODE (1 << 0) +#define DWC_CFGH_FIFO_MODE (1 << 1) +#define DWC_CFGH_PROTCTL(x) ((x) << 2) +#define DWC_CFGH_SRC_PER(x) ((x) << 7) +#define DWC_CFGH_DST_PER(x) ((x) << 11) + +/* Platform-configurable bits in CFG_LO */ +#define DWC_CFGL_PRIO(x) ((x) << 5) /* priority */ +#define DWC_CFGL_LOCK_CH_XFER (0 << 12) /* scope of LOCK_CH */ +#define DWC_CFGL_LOCK_CH_BLOCK (1 << 12) +#define DWC_CFGL_LOCK_CH_XACT (2 << 12) +#define DWC_CFGL_LOCK_BUS_XFER (0 << 14) /* scope of LOCK_BUS */ +#define DWC_CFGL_LOCK_BUS_BLOCK (1 << 14) +#define DWC_CFGL_LOCK_BUS_XACT (2 << 14) +#define DWC_CFGL_LOCK_CH (1 << 15) /* channel lockout */ +#define DWC_CFGL_LOCK_BUS (1 << 16) /* busmaster lockout */ +#define DWC_CFGL_HS_DST_POL (1 << 18) /* dst handshake active low */ +#define DWC_CFGL_HS_SRC_POL (1 << 19) /* src handshake active low */ + +static inline struct dw_dma_slave *to_dw_dma_slave(struct dma_slave *slave) +{ + return container_of(slave, struct dw_dma_slave, slave); +} + +#endif /* DW_DMAC_H */ diff --git a/include/linux/eventfd.h b/include/linux/eventfd.h index a701399b7fe..a667637b54e 100644 --- a/include/linux/eventfd.h +++ b/include/linux/eventfd.h @@ -10,6 +10,13 @@ #ifdef CONFIG_EVENTFD +/* For O_CLOEXEC and O_NONBLOCK */ +#include <linux/fcntl.h> + +/* Flags for eventfd2. */ +#define EFD_CLOEXEC O_CLOEXEC +#define EFD_NONBLOCK O_NONBLOCK + struct file *eventfd_fget(int fd); int eventfd_signal(struct file *file, int n); diff --git a/include/linux/eventpoll.h b/include/linux/eventpoll.h index cf79853967f..f1e1d3c4712 100644 --- a/include/linux/eventpoll.h +++ b/include/linux/eventpoll.h @@ -14,8 +14,12 @@ #ifndef _LINUX_EVENTPOLL_H #define _LINUX_EVENTPOLL_H +/* For O_CLOEXEC */ +#include <linux/fcntl.h> #include <linux/types.h> +/* Flags for epoll_create1. */ +#define EPOLL_CLOEXEC O_CLOEXEC /* Valid opcodes to issue to sys_epoll_ctl() */ #define EPOLL_CTL_ADD 1 diff --git a/include/linux/ext2_fs.h b/include/linux/ext2_fs.h index 84cec2aa9f1..2efe7b863cf 100644 --- a/include/linux/ext2_fs.h +++ b/include/linux/ext2_fs.h @@ -284,8 +284,8 @@ struct ext2_inode { #ifdef __hurd__ #define i_translator osd1.hurd1.h_i_translator -#define i_frag osd2.hurd2.h_i_frag; -#define i_fsize osd2.hurd2.h_i_fsize; +#define i_frag osd2.hurd2.h_i_frag +#define i_fsize osd2.hurd2.h_i_fsize #define i_uid_high osd2.hurd2.h_i_uid_high #define i_gid_high osd2.hurd2.h_i_gid_high #define i_author osd2.hurd2.h_i_author diff --git a/include/linux/ext3_fs.h b/include/linux/ext3_fs.h index 36c54039637..80171ee89a2 100644 --- a/include/linux/ext3_fs.h +++ b/include/linux/ext3_fs.h @@ -832,6 +832,7 @@ extern void ext3_discard_reservation (struct inode *); extern void ext3_dirty_inode(struct inode *); extern int ext3_change_inode_journal_flag(struct inode *, int); extern int ext3_get_inode_loc(struct inode *, struct ext3_iloc *); +extern int ext3_can_truncate(struct inode *inode); extern void ext3_truncate (struct inode *); extern void ext3_set_inode_flags(struct inode *); extern void ext3_get_inode_flags(struct ext3_inode_info *); diff --git a/include/linux/fb.h b/include/linux/fb.h index 72295b09922..3b8870e32af 100644 --- a/include/linux/fb.h +++ b/include/linux/fb.h @@ -8,7 +8,6 @@ struct dentry; /* Definitions of frame buffers */ -#define FB_MAJOR 29 #define FB_MAX 32 /* sufficient for now */ /* ioctls @@ -120,6 +119,10 @@ struct dentry; #define FB_ACCEL_XGI_VOLARI_V 47 /* XGI Volari V3XT, V5, V8 */ #define FB_ACCEL_XGI_VOLARI_Z 48 /* XGI Volari Z7 */ #define FB_ACCEL_OMAP1610 49 /* TI OMAP16xx */ +#define FB_ACCEL_TRIDENT_TGUI 50 /* Trident TGUI */ +#define FB_ACCEL_TRIDENT_3DIMAGE 51 /* Trident 3DImage */ +#define FB_ACCEL_TRIDENT_BLADE3D 52 /* Trident Blade3D */ +#define FB_ACCEL_TRIDENT_BLADEXP 53 /* Trident BladeXP */ #define FB_ACCEL_NEOMAGIC_NM2070 90 /* NeoMagic NM2070 */ #define FB_ACCEL_NEOMAGIC_NM2090 91 /* NeoMagic NM2090 */ #define FB_ACCEL_NEOMAGIC_NM2093 92 /* NeoMagic NM2093 */ diff --git a/include/linux/fd1772.h b/include/linux/fd1772.h deleted file mode 100644 index 871d6e4c677..00000000000 --- a/include/linux/fd1772.h +++ /dev/null @@ -1,80 +0,0 @@ -#ifndef _LINUX_FD1772REG_H -#define _LINUX_FD1772REG_H - -/* -** WD1772 stuff - originally from the M68K Linux - * Modified for Archimedes by Dave Gilbert (gilbertd@cs.man.ac.uk) - */ - -/* register codes */ - -#define FDC1772SELREG_STP (0x80) /* command/status register */ -#define FDC1772SELREG_TRA (0x82) /* track register */ -#define FDC1772SELREG_SEC (0x84) /* sector register */ -#define FDC1772SELREG_DTA (0x86) /* data register */ - -/* register names for FDC1772_READ/WRITE macros */ - -#define FDC1772REG_CMD 0 -#define FDC1772REG_STATUS 0 -#define FDC1772REG_TRACK 2 -#define FDC1772REG_SECTOR 4 -#define FDC1772REG_DATA 6 - -/* command opcodes */ - -#define FDC1772CMD_RESTORE (0x00) /* - */ -#define FDC1772CMD_SEEK (0x10) /* | */ -#define FDC1772CMD_STEP (0x20) /* | TYP 1 Commands */ -#define FDC1772CMD_STIN (0x40) /* | */ -#define FDC1772CMD_STOT (0x60) /* - */ -#define FDC1772CMD_RDSEC (0x80) /* - TYP 2 Commands */ -#define FDC1772CMD_WRSEC (0xa0) /* - " */ -#define FDC1772CMD_RDADR (0xc0) /* - */ -#define FDC1772CMD_RDTRA (0xe0) /* | TYP 3 Commands */ -#define FDC1772CMD_WRTRA (0xf0) /* - */ -#define FDC1772CMD_FORCI (0xd0) /* - TYP 4 Command */ - -/* command modifier bits */ - -#define FDC1772CMDADD_SR6 (0x00) /* step rate settings */ -#define FDC1772CMDADD_SR12 (0x01) -#define FDC1772CMDADD_SR2 (0x02) -#define FDC1772CMDADD_SR3 (0x03) -#define FDC1772CMDADD_V (0x04) /* verify */ -#define FDC1772CMDADD_H (0x08) /* wait for spin-up */ -#define FDC1772CMDADD_U (0x10) /* update track register */ -#define FDC1772CMDADD_M (0x10) /* multiple sector access */ -#define FDC1772CMDADD_E (0x04) /* head settling flag */ -#define FDC1772CMDADD_P (0x02) /* precompensation */ -#define FDC1772CMDADD_A0 (0x01) /* DAM flag */ - -/* status register bits */ - -#define FDC1772STAT_MOTORON (0x80) /* motor on */ -#define FDC1772STAT_WPROT (0x40) /* write protected (FDC1772CMD_WR*) */ -#define FDC1772STAT_SPINUP (0x20) /* motor speed stable (Type I) */ -#define FDC1772STAT_DELDAM (0x20) /* sector has deleted DAM (Type II+III) */ -#define FDC1772STAT_RECNF (0x10) /* record not found */ -#define FDC1772STAT_CRC (0x08) /* CRC error */ -#define FDC1772STAT_TR00 (0x04) /* Track 00 flag (Type I) */ -#define FDC1772STAT_LOST (0x04) /* Lost Data (Type II+III) */ -#define FDC1772STAT_IDX (0x02) /* Index status (Type I) */ -#define FDC1772STAT_DRQ (0x02) /* DRQ status (Type II+III) */ -#define FDC1772STAT_BUSY (0x01) /* FDC1772 is busy */ - - -/* PSG Port A Bit Nr 0 .. Side Sel .. 0 -> Side 1 1 -> Side 2 */ -#define DSKSIDE (0x01) - -#define DSKDRVNONE (0x06) -#define DSKDRV0 (0x02) -#define DSKDRV1 (0x04) - -/* step rates */ -#define FDC1772STEP_6 0x00 -#define FDC1772STEP_12 0x01 -#define FDC1772STEP_2 0x02 -#define FDC1772STEP_3 0x03 - -#endif diff --git a/include/linux/fs.h b/include/linux/fs.h index 9c2ac5c0ef5..53d2edb709b 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -499,7 +499,7 @@ struct backing_dev_info; struct address_space { struct inode *host; /* owner: inode, block_device */ struct radix_tree_root page_tree; /* radix tree of all pages */ - rwlock_t tree_lock; /* and rwlock protecting it */ + spinlock_t tree_lock; /* and lock protecting it */ unsigned int i_mmap_writable;/* count VM_SHARED mappings */ struct prio_tree_root i_mmap; /* tree of private and shared mappings */ struct list_head i_mmap_nonlinear;/*list VM_NONLINEAR mappings */ @@ -886,6 +886,12 @@ static inline int file_check_writeable(struct file *filp) #define FL_SLEEP 128 /* A blocking lock */ /* + * Special return value from posix_lock_file() and vfs_lock_file() for + * asynchronous locking. + */ +#define FILE_LOCK_DEFERRED 1 + +/* * The POSIX file lock owner is determined by * the "struct files_struct" in the thread group * (or NULL for no owner - BSD locks). @@ -1025,6 +1031,7 @@ extern int send_sigurg(struct fown_struct *fown); extern struct list_head super_blocks; extern spinlock_t sb_lock; +#define sb_entry(list) list_entry((list), struct super_block, s_list) #define S_BIAS (1<<30) struct super_block { struct list_head s_list; /* Keep this first */ @@ -1058,6 +1065,9 @@ struct super_block { struct list_head s_more_io; /* parked for more writeback */ struct hlist_head s_anon; /* anonymous dentries for (nfs) exporting */ struct list_head s_files; + /* s_dentry_lru and s_nr_dentry_unused are protected by dcache_lock */ + struct list_head s_dentry_lru; /* unused dentry lru */ + int s_nr_dentry_unused; /* # of dentry on lru */ struct block_device *s_bdev; struct mtd_info *s_mtd; @@ -1773,8 +1783,9 @@ static inline void allow_write_access(struct file *file) atomic_inc(&file->f_path.dentry->d_inode->i_writecount); } extern int do_pipe(int *); -extern struct file *create_read_pipe(struct file *f); -extern struct file *create_write_pipe(void); +extern int do_pipe_flags(int *, int); +extern struct file *create_read_pipe(struct file *f, int flags); +extern struct file *create_write_pipe(int flags); extern void free_write_pipe(struct file *); extern struct file *do_filp_open(int dfd, const char *pathname, @@ -2006,8 +2017,6 @@ extern void simple_release_fs(struct vfsmount **mount, int *count); extern ssize_t simple_read_from_buffer(void __user *to, size_t count, loff_t *ppos, const void *from, size_t available); -extern ssize_t memory_read_from_buffer(void *to, size_t count, loff_t *ppos, - const void *from, size_t available); #ifdef CONFIG_MIGRATION extern int buffer_migrate_page(struct address_space *, diff --git a/include/linux/fuse.h b/include/linux/fuse.h index d4828219769..265635dc990 100644 --- a/include/linux/fuse.h +++ b/include/linux/fuse.h @@ -104,11 +104,14 @@ struct fuse_file_lock { /** * INIT request/reply flags + * + * FUSE_EXPORT_SUPPORT: filesystem handles lookups of "." and ".." */ #define FUSE_ASYNC_READ (1 << 0) #define FUSE_POSIX_LOCKS (1 << 1) #define FUSE_FILE_OPS (1 << 2) #define FUSE_ATOMIC_O_TRUNC (1 << 3) +#define FUSE_EXPORT_SUPPORT (1 << 4) #define FUSE_BIG_WRITES (1 << 5) /** diff --git a/include/linux/genhd.h b/include/linux/genhd.h index e8787417f65..118216f1bd3 100644 --- a/include/linux/genhd.h +++ b/include/linux/genhd.h @@ -541,7 +541,7 @@ extern dev_t blk_lookup_devt(const char *name, int part); extern char *disk_name (struct gendisk *hd, int part, char *buf); extern int rescan_partitions(struct gendisk *disk, struct block_device *bdev); -extern void add_partition(struct gendisk *, int, sector_t, sector_t, int); +extern int __must_check add_partition(struct gendisk *, int, sector_t, sector_t, int); extern void delete_partition(struct gendisk *, int); extern void printk_all_partitions(void); diff --git a/include/linux/gfp.h b/include/linux/gfp.h index b414be38718..e8003afeffb 100644 --- a/include/linux/gfp.h +++ b/include/linux/gfp.h @@ -173,11 +173,24 @@ static inline void arch_free_page(struct page *page, int order) { } static inline void arch_alloc_page(struct page *page, int order) { } #endif -extern struct page *__alloc_pages(gfp_t, unsigned int, struct zonelist *); +struct page * +__alloc_pages_internal(gfp_t gfp_mask, unsigned int order, + struct zonelist *zonelist, nodemask_t *nodemask); + +static inline struct page * +__alloc_pages(gfp_t gfp_mask, unsigned int order, + struct zonelist *zonelist) +{ + return __alloc_pages_internal(gfp_mask, order, zonelist, NULL); +} + +static inline struct page * +__alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order, + struct zonelist *zonelist, nodemask_t *nodemask) +{ + return __alloc_pages_internal(gfp_mask, order, zonelist, nodemask); +} -extern struct page * -__alloc_pages_nodemask(gfp_t, unsigned int, - struct zonelist *, nodemask_t *nodemask); static inline struct page *alloc_pages_node(int nid, gfp_t gfp_mask, unsigned int order) @@ -215,6 +228,9 @@ extern struct page *alloc_page_vma(gfp_t gfp_mask, extern unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order); extern unsigned long get_zeroed_page(gfp_t gfp_mask); +void *alloc_pages_exact(size_t size, gfp_t gfp_mask); +void free_pages_exact(void *virt, size_t size); + #define __get_free_page(gfp_mask) \ __get_free_pages((gfp_mask),0) diff --git a/include/linux/gpio.h b/include/linux/gpio.h index 98be6c5762b..730a20b8357 100644 --- a/include/linux/gpio.h +++ b/include/linux/gpio.h @@ -79,6 +79,19 @@ static inline void gpio_set_value_cansleep(unsigned gpio, int value) WARN_ON(1); } +static inline int gpio_export(unsigned gpio, bool direction_may_change) +{ + /* GPIO can never have been requested or set as {in,out}put */ + WARN_ON(1); + return -EINVAL; +} + +static inline void gpio_unexport(unsigned gpio) +{ + /* GPIO can never have been exported */ + WARN_ON(1); +} + static inline int gpio_to_irq(unsigned gpio) { /* GPIO can never have been requested or set as input */ diff --git a/include/linux/hid.h b/include/linux/hid.h index fe56b86f2c6..ac4e678a04e 100644 --- a/include/linux/hid.h +++ b/include/linux/hid.h @@ -512,7 +512,7 @@ struct hid_descriptor { /* Applications from HID Usage Tables 4/8/99 Version 1.1 */ /* We ignore a few input applications that are not widely used */ -#define IS_INPUT_APPLICATION(a) (((a >= 0x00010000) && (a <= 0x00010008)) || (a == 0x00010080) || (a == 0x000c0001)) +#define IS_INPUT_APPLICATION(a) (((a >= 0x00010000) && (a <= 0x00010008)) || (a == 0x00010080) || (a == 0x000c0001) || (a == 0x000d0002)) /* HID core API */ diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h index a79e80b689d..32e0ef0f6e1 100644 --- a/include/linux/hugetlb.h +++ b/include/linux/hugetlb.h @@ -8,7 +8,6 @@ #include <linux/mempolicy.h> #include <linux/shm.h> #include <asm/tlbflush.h> -#include <asm/hugetlb.h> struct ctl_table; @@ -17,38 +16,45 @@ static inline int is_vm_hugetlb_page(struct vm_area_struct *vma) return vma->vm_flags & VM_HUGETLB; } +void reset_vma_resv_huge_pages(struct vm_area_struct *vma); int hugetlb_sysctl_handler(struct ctl_table *, int, struct file *, void __user *, size_t *, loff_t *); int hugetlb_overcommit_handler(struct ctl_table *, int, struct file *, void __user *, size_t *, loff_t *); int hugetlb_treat_movable_handler(struct ctl_table *, int, struct file *, void __user *, size_t *, loff_t *); int copy_hugetlb_page_range(struct mm_struct *, struct mm_struct *, struct vm_area_struct *); int follow_hugetlb_page(struct mm_struct *, struct vm_area_struct *, struct page **, struct vm_area_struct **, unsigned long *, int *, int, int); -void unmap_hugepage_range(struct vm_area_struct *, unsigned long, unsigned long); -void __unmap_hugepage_range(struct vm_area_struct *, unsigned long, unsigned long); +void unmap_hugepage_range(struct vm_area_struct *, + unsigned long, unsigned long, struct page *); +void __unmap_hugepage_range(struct vm_area_struct *, + unsigned long, unsigned long, struct page *); int hugetlb_prefault(struct address_space *, struct vm_area_struct *); int hugetlb_report_meminfo(char *); int hugetlb_report_node_meminfo(int, char *); unsigned long hugetlb_total_pages(void); int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long address, int write_access); -int hugetlb_reserve_pages(struct inode *inode, long from, long to); +int hugetlb_reserve_pages(struct inode *inode, long from, long to, + struct vm_area_struct *vma); void hugetlb_unreserve_pages(struct inode *inode, long offset, long freed); -extern unsigned long max_huge_pages; -extern unsigned long sysctl_overcommit_huge_pages; extern unsigned long hugepages_treat_as_movable; extern const unsigned long hugetlb_zero, hugetlb_infinity; extern int sysctl_hugetlb_shm_group; +extern struct list_head huge_boot_pages; /* arch callbacks */ -pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr); +pte_t *huge_pte_alloc(struct mm_struct *mm, + unsigned long addr, unsigned long sz); pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr); int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep); struct page *follow_huge_addr(struct mm_struct *mm, unsigned long address, int write); struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address, pmd_t *pmd, int write); +struct page *follow_huge_pud(struct mm_struct *mm, unsigned long address, + pud_t *pud, int write); int pmd_huge(pmd_t pmd); +int pud_huge(pud_t pmd); void hugetlb_change_protection(struct vm_area_struct *vma, unsigned long address, unsigned long end, pgprot_t newprot); @@ -58,6 +64,11 @@ static inline int is_vm_hugetlb_page(struct vm_area_struct *vma) { return 0; } + +static inline void reset_vma_resv_huge_pages(struct vm_area_struct *vma) +{ +} + static inline unsigned long hugetlb_total_pages(void) { return 0; @@ -67,12 +78,14 @@ static inline unsigned long hugetlb_total_pages(void) #define follow_huge_addr(mm, addr, write) ERR_PTR(-EINVAL) #define copy_hugetlb_page_range(src, dst, vma) ({ BUG(); 0; }) #define hugetlb_prefault(mapping, vma) ({ BUG(); 0; }) -#define unmap_hugepage_range(vma, start, end) BUG() +#define unmap_hugepage_range(vma, start, end, page) BUG() #define hugetlb_report_meminfo(buf) 0 #define hugetlb_report_node_meminfo(n, buf) 0 #define follow_huge_pmd(mm, addr, pmd, write) NULL -#define prepare_hugepage_range(addr,len) (-EINVAL) +#define follow_huge_pud(mm, addr, pud, write) NULL +#define prepare_hugepage_range(file, addr, len) (-EINVAL) #define pmd_huge(x) 0 +#define pud_huge(x) 0 #define is_hugepage_only_range(mm, addr, len) 0 #define hugetlb_free_pgd_range(tlb, addr, end, floor, ceiling) ({BUG(); 0; }) #define hugetlb_fault(mm, vma, addr, write) ({ BUG(); 0; }) @@ -93,6 +106,7 @@ struct hugetlbfs_config { umode_t mode; long nr_blocks; long nr_inodes; + struct hstate *hstate; }; struct hugetlbfs_sb_info { @@ -101,6 +115,7 @@ struct hugetlbfs_sb_info { long max_inodes; /* inodes allowed */ long free_inodes; /* inodes free */ spinlock_t stat_lock; + struct hstate *hstate; }; @@ -125,8 +140,6 @@ struct file *hugetlb_file_setup(const char *name, size_t); int hugetlb_get_quota(struct address_space *mapping, long delta); void hugetlb_put_quota(struct address_space *mapping, long delta); -#define BLOCKS_PER_HUGEPAGE (HPAGE_SIZE / 512) - static inline int is_file_hugepages(struct file *file) { if (file->f_op == &hugetlbfs_file_operations) @@ -155,4 +168,115 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, unsigned long flags); #endif /* HAVE_ARCH_HUGETLB_UNMAPPED_AREA */ +#ifdef CONFIG_HUGETLB_PAGE + +#define HSTATE_NAME_LEN 32 +/* Defines one hugetlb page size */ +struct hstate { + int hugetlb_next_nid; + unsigned int order; + unsigned long mask; + unsigned long max_huge_pages; + unsigned long nr_huge_pages; + unsigned long free_huge_pages; + unsigned long resv_huge_pages; + unsigned long surplus_huge_pages; + unsigned long nr_overcommit_huge_pages; + struct list_head hugepage_freelists[MAX_NUMNODES]; + unsigned int nr_huge_pages_node[MAX_NUMNODES]; + unsigned int free_huge_pages_node[MAX_NUMNODES]; + unsigned int surplus_huge_pages_node[MAX_NUMNODES]; + char name[HSTATE_NAME_LEN]; +}; + +struct huge_bootmem_page { + struct list_head list; + struct hstate *hstate; +}; + +/* arch callback */ +int __init alloc_bootmem_huge_page(struct hstate *h); + +void __init hugetlb_add_hstate(unsigned order); +struct hstate *size_to_hstate(unsigned long size); + +#ifndef HUGE_MAX_HSTATE +#define HUGE_MAX_HSTATE 1 +#endif + +extern struct hstate hstates[HUGE_MAX_HSTATE]; +extern unsigned int default_hstate_idx; + +#define default_hstate (hstates[default_hstate_idx]) + +static inline struct hstate *hstate_inode(struct inode *i) +{ + struct hugetlbfs_sb_info *hsb; + hsb = HUGETLBFS_SB(i->i_sb); + return hsb->hstate; +} + +static inline struct hstate *hstate_file(struct file *f) +{ + return hstate_inode(f->f_dentry->d_inode); +} + +static inline struct hstate *hstate_vma(struct vm_area_struct *vma) +{ + return hstate_file(vma->vm_file); +} + +static inline unsigned long huge_page_size(struct hstate *h) +{ + return (unsigned long)PAGE_SIZE << h->order; +} + +static inline unsigned long huge_page_mask(struct hstate *h) +{ + return h->mask; +} + +static inline unsigned int huge_page_order(struct hstate *h) +{ + return h->order; +} + +static inline unsigned huge_page_shift(struct hstate *h) +{ + return h->order + PAGE_SHIFT; +} + +static inline unsigned int pages_per_huge_page(struct hstate *h) +{ + return 1 << h->order; +} + +static inline unsigned int blocks_per_huge_page(struct hstate *h) +{ + return huge_page_size(h) / 512; +} + +#include <asm/hugetlb.h> + +static inline struct hstate *page_hstate(struct page *page) +{ + return size_to_hstate(PAGE_SIZE << compound_order(page)); +} + +#else +struct hstate {}; +#define alloc_bootmem_huge_page(h) NULL +#define hstate_file(f) NULL +#define hstate_vma(v) NULL +#define hstate_inode(i) NULL +#define huge_page_size(h) PAGE_SIZE +#define huge_page_mask(h) PAGE_MASK +#define huge_page_order(h) 0 +#define huge_page_shift(h) PAGE_SHIFT +static inline unsigned int pages_per_huge_page(struct hstate *h) +{ + return 1; +} +#endif + #endif /* _LINUX_HUGETLB_H */ diff --git a/include/linux/i2c/max732x.h b/include/linux/i2c/max732x.h new file mode 100644 index 00000000000..e10336631c6 --- /dev/null +++ b/include/linux/i2c/max732x.h @@ -0,0 +1,19 @@ +#ifndef __LINUX_I2C_MAX732X_H +#define __LINUX_I2C_MAX732X_H + +/* platform data for the MAX732x 8/16-bit I/O expander driver */ + +struct max732x_platform_data { + /* number of the first GPIO */ + unsigned gpio_base; + + void *context; /* param to setup/teardown */ + + int (*setup)(struct i2c_client *client, + unsigned gpio, unsigned ngpio, + void *context); + int (*teardown)(struct i2c_client *client, + unsigned gpio, unsigned ngpio, + void *context); +}; +#endif /* __LINUX_I2C_MAX732X_H */ diff --git a/include/linux/i2o.h b/include/linux/i2o.h index 7d51cbca49a..75ae6d8aba4 100644 --- a/include/linux/i2o.h +++ b/include/linux/i2o.h @@ -758,7 +758,7 @@ static inline dma_addr_t i2o_dma_map_single(struct i2o_controller *c, void *ptr, } dma_addr = dma_map_single(&c->pdev->dev, ptr, size, direction); - if (!dma_mapping_error(dma_addr)) { + if (!dma_mapping_error(&c->pdev->dev, dma_addr)) { #ifdef CONFIG_I2O_EXT_ADAPTEC_DMA64 if ((sizeof(dma_addr_t) > 4) && c->pae_support) { *mptr++ = cpu_to_le32(0x7C020002); diff --git a/include/linux/ide.h b/include/linux/ide.h index 4726126f5a5..b846bc44a27 100644 --- a/include/linux/ide.h +++ b/include/linux/ide.h @@ -178,6 +178,7 @@ typedef struct hw_regs_s { ide_ack_intr_t *ack_intr; /* acknowledge interrupt */ hwif_chipset_t chipset; struct device *dev, *parent; + unsigned long config; } hw_regs_t; void ide_init_port_data(struct hwif_s *, unsigned int); @@ -210,7 +211,21 @@ static inline int __ide_default_irq(unsigned long base) return 0; } +#if defined(CONFIG_ARM) || defined(CONFIG_FRV) || defined(CONFIG_M68K) || \ + defined(CONFIG_MIPS) || defined(CONFIG_MN10300) || defined(CONFIG_PARISC) \ + || defined(CONFIG_PPC) || defined(CONFIG_SPARC) || defined(CONFIG_SPARC64) #include <asm/ide.h> +#else +#include <asm-generic/ide_iops.h> +#endif + +#ifndef MAX_HWIFS +#if defined(CONFIG_BLACKFIN) || defined(CONFIG_H8300) || defined(CONFIG_XTENSA) +# define MAX_HWIFS 1 +#else +# define MAX_HWIFS 10 +#endif +#endif #if !defined(MAX_HWIFS) || defined(CONFIG_EMBEDDED) #undef MAX_HWIFS @@ -307,7 +322,65 @@ struct ide_acpi_drive_link; struct ide_acpi_hwif_link; #endif -typedef struct ide_drive_s { +/* ATAPI device flags */ +enum { + IDE_AFLAG_DRQ_INTERRUPT = (1 << 0), + IDE_AFLAG_MEDIA_CHANGED = (1 << 1), + + /* ide-cd */ + /* Drive cannot lock the door. */ + IDE_AFLAG_NO_DOORLOCK = (1 << 2), + /* Drive cannot eject the disc. */ + IDE_AFLAG_NO_EJECT = (1 << 3), + /* Drive is a pre ATAPI 1.2 drive. */ + IDE_AFLAG_PRE_ATAPI12 = (1 << 4), + /* TOC addresses are in BCD. */ + IDE_AFLAG_TOCADDR_AS_BCD = (1 << 5), + /* TOC track numbers are in BCD. */ + IDE_AFLAG_TOCTRACKS_AS_BCD = (1 << 6), + /* + * Drive does not provide data in multiples of SECTOR_SIZE + * when more than one interrupt is needed. + */ + IDE_AFLAG_LIMIT_NFRAMES = (1 << 7), + /* Seeking in progress. */ + IDE_AFLAG_SEEKING = (1 << 8), + /* Saved TOC information is current. */ + IDE_AFLAG_TOC_VALID = (1 << 9), + /* We think that the drive door is locked. */ + IDE_AFLAG_DOOR_LOCKED = (1 << 10), + /* SET_CD_SPEED command is unsupported. */ + IDE_AFLAG_NO_SPEED_SELECT = (1 << 11), + IDE_AFLAG_VERTOS_300_SSD = (1 << 12), + IDE_AFLAG_VERTOS_600_ESD = (1 << 13), + IDE_AFLAG_SANYO_3CD = (1 << 14), + IDE_AFLAG_FULL_CAPS_PAGE = (1 << 15), + IDE_AFLAG_PLAY_AUDIO_OK = (1 << 16), + IDE_AFLAG_LE_SPEED_FIELDS = (1 << 17), + + /* ide-floppy */ + /* Format in progress */ + IDE_AFLAG_FORMAT_IN_PROGRESS = (1 << 18), + /* Avoid commands not supported in Clik drive */ + IDE_AFLAG_CLIK_DRIVE = (1 << 19), + /* Requires BH algorithm for packets */ + IDE_AFLAG_ZIP_DRIVE = (1 << 20), + + /* ide-tape */ + IDE_AFLAG_IGNORE_DSC = (1 << 21), + /* 0 When the tape position is unknown */ + IDE_AFLAG_ADDRESS_VALID = (1 << 22), + /* Device already opened */ + IDE_AFLAG_BUSY = (1 << 23), + /* Attempt to auto-detect the current user block size */ + IDE_AFLAG_DETECT_BS = (1 << 24), + /* Currently on a filemark */ + IDE_AFLAG_FILEMARK = (1 << 25), + /* 0 = no tape is loaded, so we don't rewind after ejecting */ + IDE_AFLAG_MEDIUM_PRESENT = (1 << 26) +}; + +struct ide_drive_s { char name[4]; /* drive name, such as "hda" */ char driver_req[10]; /* requests specific driver */ @@ -355,7 +428,6 @@ typedef struct ide_drive_s { unsigned nodma : 1; /* disallow DMA */ unsigned remap_0_to_1 : 1; /* 0=noremap, 1=remap 0->1 (for EZDrive) */ unsigned blocked : 1; /* 1=powermanagment told us not to do anything, so sleep nicely */ - unsigned vdma : 1; /* 1=doing PIO over DMA 0=doing normal DMA */ unsigned scsi : 1; /* 0=default, 1=ide-scsi emulation */ unsigned sleeping : 1; /* 1=sleeping & sleep field valid */ unsigned post_reset : 1; @@ -400,7 +472,14 @@ typedef struct ide_drive_s { struct list_head list; struct device gendev; struct completion gendev_rel_comp; /* to deal with device release() */ -} ide_drive_t; + + /* callback for packet commands */ + void (*pc_callback)(struct ide_drive_s *); + + unsigned long atapi_flags; +}; + +typedef struct ide_drive_s ide_drive_t; #define to_ide_device(dev)container_of(dev, ide_drive_t, gendev) @@ -408,8 +487,28 @@ typedef struct ide_drive_s { ((1<<ide_pci)|(1<<ide_cmd646)|(1<<ide_ali14xx)) #define IDE_CHIPSET_IS_PCI(c) ((IDE_CHIPSET_PCI_MASK >> (c)) & 1) +struct ide_task_s; struct ide_port_info; +struct ide_tp_ops { + void (*exec_command)(struct hwif_s *, u8); + u8 (*read_status)(struct hwif_s *); + u8 (*read_altstatus)(struct hwif_s *); + u8 (*read_sff_dma_status)(struct hwif_s *); + + void (*set_irq)(struct hwif_s *, int); + + void (*tf_load)(ide_drive_t *, struct ide_task_s *); + void (*tf_read)(ide_drive_t *, struct ide_task_s *); + + void (*input_data)(ide_drive_t *, struct request *, void *, + unsigned int); + void (*output_data)(ide_drive_t *, struct request *, void *, + unsigned int); +}; + +extern const struct ide_tp_ops default_tp_ops; + struct ide_port_ops { /* host specific initialization of a device */ void (*init_dev)(ide_drive_t *); @@ -447,7 +546,7 @@ struct ide_dma_ops { void (*dma_timeout)(struct ide_drive_s *); }; -struct ide_task_s; +struct ide_host; typedef struct hwif_s { struct hwif_s *next; /* for linked-list in ide_hwgroup_t */ @@ -455,6 +554,8 @@ typedef struct hwif_s { struct hwgroup_s *hwgroup; /* actually (ide_hwgroup_t *) */ struct proc_dir_entry *proc; /* /proc/ide/ directory entry */ + struct ide_host *host; + char name[6]; /* name of interface, eg. "ide0" */ struct ide_io_ports io_ports; @@ -486,22 +587,12 @@ typedef struct hwif_s { void (*rw_disk)(ide_drive_t *, struct request *); + const struct ide_tp_ops *tp_ops; const struct ide_port_ops *port_ops; const struct ide_dma_ops *dma_ops; - void (*tf_load)(ide_drive_t *, struct ide_task_s *); - void (*tf_read)(ide_drive_t *, struct ide_task_s *); - - void (*input_data)(ide_drive_t *, struct request *, void *, unsigned); - void (*output_data)(ide_drive_t *, struct request *, void *, unsigned); - void (*ide_dma_clear_irq)(ide_drive_t *drive); - void (*OUTB)(u8 addr, unsigned long port); - void (*OUTBSYNC)(struct hwif_s *hwif, u8 addr, unsigned long port); - - u8 (*INB)(unsigned long port); - /* dma physical region descriptor table (cpu view) */ unsigned int *dmatable_cpu; /* dma physical region descriptor table (dma view) */ @@ -524,8 +615,6 @@ typedef struct hwif_s { int irq; /* our irq number */ unsigned long dma_base; /* base addr for dma ports */ - unsigned long dma_command; /* dma command register */ - unsigned long dma_status; /* dma status register */ unsigned long config_data; /* for use by chipset-specific code */ unsigned long select_data; /* for use by chipset-specific code */ @@ -552,6 +641,14 @@ typedef struct hwif_s { #endif } ____cacheline_internodealigned_in_smp ide_hwif_t; +struct ide_host { + ide_hwif_t *ports[MAX_HWIFS]; + unsigned int n_ports; + struct device *dev[2]; + unsigned long host_flags; + void *host_priv; +}; + /* * internal ide interrupt handler type */ @@ -611,8 +708,6 @@ enum { PC_FLAG_WRITING = (1 << 6), /* command timed out */ PC_FLAG_TIMEDOUT = (1 << 7), - PC_FLAG_ZIP_DRIVE = (1 << 8), - PC_FLAG_DRQ_INTERRUPT = (1 << 9), }; struct ide_atapi_pc { @@ -646,8 +741,6 @@ struct ide_atapi_pc { */ u8 pc_buf[256]; - void (*callback)(ide_drive_t *); - /* idetape only */ struct idetape_bh *bh; char *b_data; @@ -802,18 +895,14 @@ struct ide_driver_s { #define to_ide_driver(drv) container_of(drv, ide_driver_t, gen_driver) +int ide_device_get(ide_drive_t *); +void ide_device_put(ide_drive_t *); + int generic_ide_ioctl(ide_drive_t *, struct file *, struct block_device *, unsigned, unsigned long); extern int ide_vlb_clk; extern int ide_pci_clk; -ide_hwif_t *ide_find_port_slot(const struct ide_port_info *); - -static inline ide_hwif_t *ide_find_port(void) -{ - return ide_find_port_slot(NULL); -} - extern int ide_end_request (ide_drive_t *drive, int uptodate, int nrsecs); int ide_end_dequeued_request(ide_drive_t *drive, struct request *rq, int uptodate, int nr_sectors); @@ -884,6 +973,7 @@ enum { IDE_TFLAG_IN_HOB = IDE_TFLAG_IN_HOB_FEATURE | IDE_TFLAG_IN_HOB_NSECT | IDE_TFLAG_IN_HOB_LBA, + IDE_TFLAG_IN_FEATURE = (1 << 1), IDE_TFLAG_IN_NSECT = (1 << 25), IDE_TFLAG_IN_LBAL = (1 << 26), IDE_TFLAG_IN_LBAM = (1 << 27), @@ -948,9 +1038,25 @@ typedef struct ide_task_s { void ide_tf_dump(const char *, struct ide_taskfile *); +void ide_exec_command(ide_hwif_t *, u8); +u8 ide_read_status(ide_hwif_t *); +u8 ide_read_altstatus(ide_hwif_t *); +u8 ide_read_sff_dma_status(ide_hwif_t *); + +void ide_set_irq(ide_hwif_t *, int); + +void ide_tf_load(ide_drive_t *, ide_task_t *); +void ide_tf_read(ide_drive_t *, ide_task_t *); + +void ide_input_data(ide_drive_t *, struct request *, void *, unsigned int); +void ide_output_data(ide_drive_t *, struct request *, void *, unsigned int); + extern void SELECT_DRIVE(ide_drive_t *); void SELECT_MASK(ide_drive_t *, int); +u8 ide_read_error(ide_drive_t *); +void ide_read_bcount_and_ireason(ide_drive_t *, u16 *, u8 *); + extern int drive_is_ready(ide_drive_t *); void ide_pktcmd_tf_load(ide_drive_t *, u32, u16, u8); @@ -1000,12 +1106,15 @@ extern int __ide_pci_register_driver(struct pci_driver *driver, struct module *o #define ide_pci_register_driver(d) pci_register_driver(d) #endif -void ide_pci_setup_ports(struct pci_dev *, const struct ide_port_info *, int, u8 *); +void ide_pci_setup_ports(struct pci_dev *, const struct ide_port_info *, int, + hw_regs_t *, hw_regs_t **); void ide_setup_pci_noise(struct pci_dev *, const struct ide_port_info *); #ifdef CONFIG_BLK_DEV_IDEDMA_PCI int ide_pci_set_master(struct pci_dev *, const char *); unsigned long ide_pci_dma_base(ide_hwif_t *, const struct ide_port_info *); +extern const struct ide_dma_ops sff_dma_ops; +int ide_pci_check_simplex(ide_hwif_t *, const struct ide_port_info *); int ide_hwif_setup_dma(ide_hwif_t *, const struct ide_port_info *); #else static inline int ide_hwif_setup_dma(ide_hwif_t *hwif, @@ -1015,10 +1124,6 @@ static inline int ide_hwif_setup_dma(ide_hwif_t *hwif, } #endif -extern void default_hwif_iops(ide_hwif_t *); -extern void default_hwif_mmiops(ide_hwif_t *); -extern void default_hwif_transport(ide_hwif_t *); - typedef struct ide_pci_enablebit_s { u8 reg; /* byte pci reg holding the enable-bit */ u8 mask; /* mask to isolate the enable-bit */ @@ -1081,7 +1186,6 @@ enum { IDE_HFLAG_IO_32BIT = (1 << 24), /* unmask IRQs */ IDE_HFLAG_UNMASK_IRQS = (1 << 25), - IDE_HFLAG_ABUSE_SET_DMA_MODE = (1 << 26), /* serialize ports if DMA is possible (for sl82c105) */ IDE_HFLAG_SERIALIZE_DMA = (1 << 27), /* force host out of "simplex" mode */ @@ -1092,8 +1196,6 @@ enum { IDE_HFLAG_NO_IO_32BIT = (1 << 30), /* never unmask IRQs */ IDE_HFLAG_NO_UNMASK_IRQS = (1 << 31), - /* host uses VDMA (disabled for now) */ - IDE_HFLAG_VDMA = 0, }; #ifdef CONFIG_BLK_DEV_OFFBOARD @@ -1104,12 +1206,13 @@ enum { struct ide_port_info { char *name; - unsigned int (*init_chipset)(struct pci_dev *, const char *); + unsigned int (*init_chipset)(struct pci_dev *); void (*init_iops)(ide_hwif_t *); void (*init_hwif)(ide_hwif_t *); int (*init_dma)(ide_hwif_t *, const struct ide_port_info *); + const struct ide_tp_ops *tp_ops; const struct ide_port_ops *port_ops; const struct ide_dma_ops *dma_ops; @@ -1122,8 +1225,10 @@ struct ide_port_info { u8 udma_mask; }; -int ide_setup_pci_device(struct pci_dev *, const struct ide_port_info *); -int ide_setup_pci_devices(struct pci_dev *, struct pci_dev *, const struct ide_port_info *); +int ide_pci_init_one(struct pci_dev *, const struct ide_port_info *, void *); +int ide_pci_init_two(struct pci_dev *, struct pci_dev *, + const struct ide_port_info *, void *); +void ide_pci_remove(struct pci_dev *); void ide_map_sg(ide_drive_t *, struct request *); void ide_init_sg_cmd(ide_drive_t *, struct request *); @@ -1163,7 +1268,6 @@ void ide_destroy_dmatable(ide_drive_t *); extern int ide_build_dmatable(ide_drive_t *, struct request *); int ide_allocate_dma_engine(ide_hwif_t *); void ide_release_dma_engine(ide_hwif_t *); -void ide_setup_dma(ide_hwif_t *, unsigned long); void ide_dma_host_set(ide_drive_t *, int); extern int ide_dma_setup(ide_drive_t *); @@ -1217,8 +1321,14 @@ void ide_undecoded_slave(ide_drive_t *); void ide_port_apply_params(ide_hwif_t *); -int ide_device_add_all(u8 *idx, const struct ide_port_info *); -int ide_device_add(u8 idx[4], const struct ide_port_info *); +struct ide_host *ide_host_alloc_all(const struct ide_port_info *, hw_regs_t **); +struct ide_host *ide_host_alloc(const struct ide_port_info *, hw_regs_t **); +void ide_host_free(struct ide_host *); +int ide_host_register(struct ide_host *, const struct ide_port_info *, + hw_regs_t **); +int ide_host_add(const struct ide_port_info *, hw_regs_t **, + struct ide_host **); +void ide_host_remove(struct ide_host *); int ide_legacy_device_add(const struct ide_port_info *, unsigned long); void ide_port_unregister_devices(ide_hwif_t *); void ide_port_scan(ide_hwif_t *); @@ -1350,33 +1460,4 @@ static inline ide_drive_t *ide_get_paired_drive(ide_drive_t *drive) return &hwif->drives[(drive->dn ^ 1) & 1]; } - -static inline void ide_set_irq(ide_drive_t *drive, int on) -{ - ide_hwif_t *hwif = drive->hwif; - - hwif->OUTBSYNC(hwif, ATA_DEVCTL_OBS | (on ? 0 : 2), - hwif->io_ports.ctl_addr); -} - -static inline u8 ide_read_status(ide_drive_t *drive) -{ - ide_hwif_t *hwif = drive->hwif; - - return hwif->INB(hwif->io_ports.status_addr); -} - -static inline u8 ide_read_altstatus(ide_drive_t *drive) -{ - ide_hwif_t *hwif = drive->hwif; - - return hwif->INB(hwif->io_ports.ctl_addr); -} - -static inline u8 ide_read_error(ide_drive_t *drive) -{ - ide_hwif_t *hwif = drive->hwif; - - return hwif->INB(hwif->io_ports.error_addr); -} #endif /* _IDE_H */ diff --git a/include/linux/idr.h b/include/linux/idr.h index 9a2d762124d..fa035f96f2a 100644 --- a/include/linux/idr.h +++ b/include/linux/idr.h @@ -15,6 +15,7 @@ #include <linux/types.h> #include <linux/bitops.h> #include <linux/init.h> +#include <linux/rcupdate.h> #if BITS_PER_LONG == 32 # define IDR_BITS 5 @@ -51,6 +52,7 @@ struct idr_layer { unsigned long bitmap; /* A zero bit means "space here" */ struct idr_layer *ary[1<<IDR_BITS]; int count; /* When zero, we can release it */ + struct rcu_head rcu_head; }; struct idr { @@ -71,6 +73,28 @@ struct idr { } #define DEFINE_IDR(name) struct idr name = IDR_INIT(name) +/* Actions to be taken after a call to _idr_sub_alloc */ +#define IDR_NEED_TO_GROW -2 +#define IDR_NOMORE_SPACE -3 + +#define _idr_rc_to_errno(rc) ((rc) == -1 ? -EAGAIN : -ENOSPC) + +/** + * idr synchronization (stolen from radix-tree.h) + * + * idr_find() is able to be called locklessly, using RCU. The caller must + * ensure calls to this function are made within rcu_read_lock() regions. + * Other readers (lock-free or otherwise) and modifications may be running + * concurrently. + * + * It is still required that the caller manage the synchronization and + * lifetimes of the items. So if RCU lock-free lookups are used, typically + * this would mean that the items have their own locks, or are amenable to + * lock-free access; and that the items are freed by RCU (or only freed after + * having been deleted from the idr tree *and* a synchronize_rcu() grace + * period). + */ + /* * This is what we export. */ diff --git a/include/linux/init.h b/include/linux/init.h index 21d658cdfa2..11b84e10605 100644 --- a/include/linux/init.h +++ b/include/linux/init.h @@ -170,6 +170,13 @@ extern void (*late_time_init)(void); __attribute__((__section__(".initcall" level ".init"))) = fn /* + * Early initcalls run before initializing SMP. + * + * Only for built-in code, not modules. + */ +#define early_initcall(fn) __define_initcall("early",fn,early) + +/* * A "pure" initcall has no dependencies on anything else, and purely * initializes variables that couldn't be statically initialized. * @@ -275,13 +282,7 @@ void __init parse_early_param(void); #define security_initcall(fn) module_init(fn) -/* These macros create a dummy inline: gcc 2.9x does not count alias - as usage, hence the `unused function' warning when __init functions - are declared static. We use the dummy __*_module_inline functions - both to kill the warning and check the type of the init/cleanup - function. */ - -/* Each module must use one module_init(), or one no_module_init */ +/* Each module must use one module_init(). */ #define module_init(initfn) \ static inline initcall_t __inittest(void) \ { return initfn; } \ diff --git a/include/linux/init_task.h b/include/linux/init_task.h index 93c45acf249..021d8e720c7 100644 --- a/include/linux/init_task.h +++ b/include/linux/init_task.h @@ -122,7 +122,7 @@ extern struct group_info init_groups; .state = 0, \ .stack = &init_thread_info, \ .usage = ATOMIC_INIT(2), \ - .flags = 0, \ + .flags = PF_KTHREAD, \ .lock_depth = -1, \ .prio = MAX_PRIO-20, \ .static_prio = MAX_PRIO-20, \ diff --git a/include/linux/inotify.h b/include/linux/inotify.h index 742b917e7d1..bd578578a8b 100644 --- a/include/linux/inotify.h +++ b/include/linux/inotify.h @@ -7,6 +7,8 @@ #ifndef _LINUX_INOTIFY_H #define _LINUX_INOTIFY_H +/* For O_CLOEXEC and O_NONBLOCK */ +#include <linux/fcntl.h> #include <linux/types.h> /* @@ -63,6 +65,10 @@ struct inotify_event { IN_MOVED_TO | IN_DELETE | IN_CREATE | IN_DELETE_SELF | \ IN_MOVE_SELF) +/* Flags for sys_inotify_init1. */ +#define IN_CLOEXEC O_CLOEXEC +#define IN_NONBLOCK O_NONBLOCK + #ifdef __KERNEL__ #include <linux/dcache.h> diff --git a/include/linux/ipc_namespace.h b/include/linux/ipc_namespace.h index ea6c18a8b0d..ea330f9e710 100644 --- a/include/linux/ipc_namespace.h +++ b/include/linux/ipc_namespace.h @@ -36,6 +36,7 @@ struct ipc_namespace { int msg_ctlmni; atomic_t msg_bytes; atomic_t msg_hdrs; + int auto_msgmni; size_t shm_ctlmax; size_t shm_ctlall; @@ -53,7 +54,7 @@ extern atomic_t nr_ipc_ns; extern int register_ipcns_notifier(struct ipc_namespace *); extern int cond_register_ipcns_notifier(struct ipc_namespace *); -extern int unregister_ipcns_notifier(struct ipc_namespace *); +extern void unregister_ipcns_notifier(struct ipc_namespace *); extern int ipcns_notify(unsigned long); #else /* CONFIG_SYSVIPC */ diff --git a/include/linux/irqflags.h b/include/linux/irqflags.h index 2b1c2e58566..74bde13224c 100644 --- a/include/linux/irqflags.h +++ b/include/linux/irqflags.h @@ -11,6 +11,8 @@ #ifndef _LINUX_TRACE_IRQFLAGS_H #define _LINUX_TRACE_IRQFLAGS_H +#include <linux/typecheck.h> + #ifdef CONFIG_TRACE_IRQFLAGS extern void trace_softirqs_on(unsigned long ip); extern void trace_softirqs_off(unsigned long ip); @@ -58,18 +60,24 @@ do { trace_hardirqs_on(); raw_local_irq_enable(); } while (0) #define local_irq_disable() \ do { raw_local_irq_disable(); trace_hardirqs_off(); } while (0) -#define local_irq_save(flags) \ - do { raw_local_irq_save(flags); trace_hardirqs_off(); } while (0) +#define local_irq_save(flags) \ + do { \ + typecheck(unsigned long, flags); \ + raw_local_irq_save(flags); \ + trace_hardirqs_off(); \ + } while (0) -#define local_irq_restore(flags) \ - do { \ - if (raw_irqs_disabled_flags(flags)) { \ - raw_local_irq_restore(flags); \ - trace_hardirqs_off(); \ - } else { \ - trace_hardirqs_on(); \ - raw_local_irq_restore(flags); \ - } \ + +#define local_irq_restore(flags) \ + do { \ + typecheck(unsigned long, flags); \ + if (raw_irqs_disabled_flags(flags)) { \ + raw_local_irq_restore(flags); \ + trace_hardirqs_off(); \ + } else { \ + trace_hardirqs_on(); \ + raw_local_irq_restore(flags); \ + } \ } while (0) #else /* !CONFIG_TRACE_IRQFLAGS_SUPPORT */ /* @@ -78,8 +86,16 @@ */ # define raw_local_irq_disable() local_irq_disable() # define raw_local_irq_enable() local_irq_enable() -# define raw_local_irq_save(flags) local_irq_save(flags) -# define raw_local_irq_restore(flags) local_irq_restore(flags) +# define raw_local_irq_save(flags) \ + do { \ + typecheck(unsigned long, flags); \ + local_irq_save(flags); \ + } while (0) +# define raw_local_irq_restore(flags) \ + do { \ + typecheck(unsigned long, flags); \ + local_irq_restore(flags); \ + } while (0) #endif /* CONFIG_TRACE_IRQFLAGS_SUPPORT */ #ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT @@ -89,7 +105,11 @@ raw_safe_halt(); \ } while (0) -#define local_save_flags(flags) raw_local_save_flags(flags) +#define local_save_flags(flags) \ + do { \ + typecheck(unsigned long, flags); \ + raw_local_save_flags(flags); \ + } while (0) #define irqs_disabled() \ ({ \ @@ -99,7 +119,11 @@ raw_irqs_disabled_flags(_flags); \ }) -#define irqs_disabled_flags(flags) raw_irqs_disabled_flags(flags) +#define irqs_disabled_flags(flags) \ +({ \ + typecheck(unsigned long, flags); \ + raw_irqs_disabled_flags(flags); \ +}) #endif /* CONFIG_X86 */ #endif diff --git a/include/linux/kallsyms.h b/include/linux/kallsyms.h index 00c1801099f..57aefa160a9 100644 --- a/include/linux/kallsyms.h +++ b/include/linux/kallsyms.h @@ -6,6 +6,7 @@ #define _LINUX_KALLSYMS_H #include <linux/errno.h> +#include <linux/kernel.h> #include <linux/stddef.h> #define KSYM_NAME_LEN 128 @@ -105,18 +106,10 @@ static inline void print_fn_descriptor_symbol(const char *fmt, void *addr) print_symbol(fmt, (unsigned long)addr); } -#ifndef CONFIG_64BIT -#define print_ip_sym(ip) \ -do { \ - printk("[<%08lx>]", ip); \ - print_symbol(" %s\n", ip); \ -} while(0) -#else -#define print_ip_sym(ip) \ -do { \ - printk("[<%016lx>]", ip); \ - print_symbol(" %s\n", ip); \ -} while(0) -#endif +static inline void print_ip_sym(unsigned long ip) +{ + printk("[<%p>]", (void *) ip); + print_symbol(" %s\n", ip); +} #endif /*_LINUX_KALLSYMS_H*/ diff --git a/include/linux/kernel.h b/include/linux/kernel.h index f9cd7a513f9..fdbbf72ca2e 100644 --- a/include/linux/kernel.h +++ b/include/linux/kernel.h @@ -14,6 +14,8 @@ #include <linux/compiler.h> #include <linux/bitops.h> #include <linux/log2.h> +#include <linux/typecheck.h> +#include <linux/ratelimit.h> #include <asm/byteorder.h> #include <asm/bug.h> @@ -188,11 +190,8 @@ asmlinkage int vprintk(const char *fmt, va_list args) asmlinkage int printk(const char * fmt, ...) __attribute__ ((format (printf, 1, 2))) __cold; -extern int printk_ratelimit_jiffies; -extern int printk_ratelimit_burst; +extern struct ratelimit_state printk_ratelimit_state; extern int printk_ratelimit(void); -extern int __ratelimit(int ratelimit_jiffies, int ratelimit_burst); -extern int __printk_ratelimit(int ratelimit_jiffies, int ratelimit_burst); extern bool printk_timed_ratelimit(unsigned long *caller_jiffies, unsigned int interval_msec); #else @@ -203,8 +202,6 @@ static inline int printk(const char *s, ...) __attribute__ ((format (printf, 1, 2))); static inline int __cold printk(const char *s, ...) { return 0; } static inline int printk_ratelimit(void) { return 0; } -static inline int __printk_ratelimit(int ratelimit_jiffies, \ - int ratelimit_burst) { return 0; } static inline bool printk_timed_ratelimit(unsigned long *caller_jiffies, \ unsigned int interval_msec) \ { return false; } @@ -441,26 +438,6 @@ static inline char *pack_hex_byte(char *buf, u8 byte) const typeof( ((type *)0)->member ) *__mptr = (ptr); \ (type *)( (char *)__mptr - offsetof(type,member) );}) -/* - * Check at compile time that something is of a particular type. - * Always evaluates to 1 so you may use it easily in comparisons. - */ -#define typecheck(type,x) \ -({ type __dummy; \ - typeof(x) __dummy2; \ - (void)(&__dummy == &__dummy2); \ - 1; \ -}) - -/* - * Check at compile time that 'function' is a certain type, or is a pointer - * to that type (needs to use typedef for the function type.) - */ -#define typecheck_fn(type,function) \ -({ typeof(type) __tmp = function; \ - (void)__tmp; \ -}) - struct sysinfo; extern int do_sysinfo(struct sysinfo *info); diff --git a/include/linux/kexec.h b/include/linux/kexec.h index 3265968cd2c..82f88a8a827 100644 --- a/include/linux/kexec.h +++ b/include/linux/kexec.h @@ -83,6 +83,7 @@ struct kimage { unsigned long start; struct page *control_code_page; + struct page *swap_page; unsigned long nr_segments; struct kexec_segment segment[KEXEC_SEGMENT_MAX]; @@ -98,18 +99,20 @@ struct kimage { unsigned int type : 1; #define KEXEC_TYPE_DEFAULT 0 #define KEXEC_TYPE_CRASH 1 + unsigned int preserve_context : 1; }; /* kexec interface functions */ -extern NORET_TYPE void machine_kexec(struct kimage *image) ATTRIB_NORET; +extern void machine_kexec(struct kimage *image); extern int machine_kexec_prepare(struct kimage *image); extern void machine_kexec_cleanup(struct kimage *image); extern asmlinkage long sys_kexec_load(unsigned long entry, unsigned long nr_segments, struct kexec_segment __user *segments, unsigned long flags); +extern int kernel_kexec(void); #ifdef CONFIG_COMPAT extern asmlinkage long compat_sys_kexec_load(unsigned long entry, unsigned long nr_segments, @@ -156,8 +159,9 @@ extern struct kimage *kexec_crash_image; #define kexec_flush_icache_page(page) #endif -#define KEXEC_ON_CRASH 0x00000001 -#define KEXEC_ARCH_MASK 0xffff0000 +#define KEXEC_ON_CRASH 0x00000001 +#define KEXEC_PRESERVE_CONTEXT 0x00000002 +#define KEXEC_ARCH_MASK 0xffff0000 /* These values match the ELF architecture values. * Unless there is a good reason that should continue to be the case. @@ -174,7 +178,12 @@ extern struct kimage *kexec_crash_image; #define KEXEC_ARCH_MIPS_LE (10 << 16) #define KEXEC_ARCH_MIPS ( 8 << 16) -#define KEXEC_FLAGS (KEXEC_ON_CRASH) /* List of defined/legal kexec flags */ +/* List of defined/legal kexec flags */ +#ifndef CONFIG_KEXEC_JUMP +#define KEXEC_FLAGS KEXEC_ON_CRASH +#else +#define KEXEC_FLAGS (KEXEC_ON_CRASH | KEXEC_PRESERVE_CONTEXT) +#endif #define VMCOREINFO_BYTES (4096) #define VMCOREINFO_NOTE_NAME "VMCOREINFO" diff --git a/include/linux/kmod.h b/include/linux/kmod.h index 0509c4ce485..a1a91577813 100644 --- a/include/linux/kmod.h +++ b/include/linux/kmod.h @@ -19,6 +19,7 @@ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ +#include <linux/gfp.h> #include <linux/stddef.h> #include <linux/errno.h> #include <linux/compiler.h> @@ -41,8 +42,8 @@ struct file; struct subprocess_info; /* Allocate a subprocess_info structure */ -struct subprocess_info *call_usermodehelper_setup(char *path, - char **argv, char **envp); +struct subprocess_info *call_usermodehelper_setup(char *path, char **argv, + char **envp, gfp_t gfp_mask); /* Set various pieces of state into the subprocess_info structure */ void call_usermodehelper_setkeys(struct subprocess_info *info, @@ -69,8 +70,9 @@ static inline int call_usermodehelper(char *path, char **argv, char **envp, enum umh_wait wait) { struct subprocess_info *info; + gfp_t gfp_mask = (wait == UMH_NO_WAIT) ? GFP_ATOMIC : GFP_KERNEL; - info = call_usermodehelper_setup(path, argv, envp); + info = call_usermodehelper_setup(path, argv, envp, gfp_mask); if (info == NULL) return -ENOMEM; return call_usermodehelper_exec(info, wait); @@ -81,8 +83,9 @@ call_usermodehelper_keys(char *path, char **argv, char **envp, struct key *session_keyring, enum umh_wait wait) { struct subprocess_info *info; + gfp_t gfp_mask = (wait == UMH_NO_WAIT) ? GFP_ATOMIC : GFP_KERNEL; - info = call_usermodehelper_setup(path, argv, envp); + info = call_usermodehelper_setup(path, argv, envp, gfp_mask); if (info == NULL) return -ENOMEM; diff --git a/include/linux/kobject.h b/include/linux/kobject.h index 60f0d418ae3..5437ac0276e 100644 --- a/include/linux/kobject.h +++ b/include/linux/kobject.h @@ -186,6 +186,8 @@ extern struct kobject *kset_find_obj(struct kset *, const char *); /* The global /sys/kernel/ kobject for people to chain off of */ extern struct kobject *kernel_kobj; +/* The global /sys/kernel/mm/ kobject for people to chain off of */ +extern struct kobject *mm_kobj; /* The global /sys/hypervisor/ kobject for people to chain off of */ extern struct kobject *hypervisor_kobj; /* The global /sys/power/ kobject for people to chain off of */ diff --git a/include/linux/kprobes.h b/include/linux/kprobes.h index 04a3556bdea..0be7795655f 100644 --- a/include/linux/kprobes.h +++ b/include/linux/kprobes.h @@ -157,11 +157,10 @@ struct kretprobe { int nmissed; size_t data_size; struct hlist_head free_instances; - struct hlist_head used_instances; + spinlock_t lock; }; struct kretprobe_instance { - struct hlist_node uflist; /* either on free list or used list */ struct hlist_node hlist; struct kretprobe *rp; kprobe_opcode_t *ret_addr; @@ -201,7 +200,6 @@ static inline int init_test_probes(void) } #endif /* CONFIG_KPROBES_SANITY_TEST */ -extern spinlock_t kretprobe_lock; extern struct mutex kprobe_mutex; extern int arch_prepare_kprobe(struct kprobe *p); extern void arch_arm_kprobe(struct kprobe *p); @@ -214,6 +212,9 @@ extern void kprobes_inc_nmissed_count(struct kprobe *p); /* Get the kprobe at this addr (if any) - called with preemption disabled */ struct kprobe *get_kprobe(void *addr); +void kretprobe_hash_lock(struct task_struct *tsk, + struct hlist_head **head, unsigned long *flags); +void kretprobe_hash_unlock(struct task_struct *tsk, unsigned long *flags); struct hlist_head * kretprobe_inst_table_head(struct task_struct *tsk); /* kprobe_running() will just return the current_kprobe on this CPU */ diff --git a/include/linux/kthread.h b/include/linux/kthread.h index 00dd957e245..aabc8a13ba7 100644 --- a/include/linux/kthread.h +++ b/include/linux/kthread.h @@ -6,7 +6,8 @@ struct task_struct *kthread_create(int (*threadfn)(void *data), void *data, - const char namefmt[], ...); + const char namefmt[], ...) + __attribute__((format(printf, 3, 4))); /** * kthread_run - create and wake a thread. diff --git a/include/linux/lcd.h b/include/linux/lcd.h index 1d379787f2e..173febac665 100644 --- a/include/linux/lcd.h +++ b/include/linux/lcd.h @@ -47,7 +47,7 @@ struct lcd_ops { int (*set_contrast)(struct lcd_device *, int contrast); /* Check if given framebuffer device is the one LCD is bound to; return 0 if not, !=0 if it is. If NULL, lcd always matches the fb. */ - int (*check_fb)(struct fb_info *); + int (*check_fb)(struct lcd_device *, struct fb_info *); }; struct lcd_device { diff --git a/include/linux/leds-pca9532.h b/include/linux/leds-pca9532.h new file mode 100644 index 00000000000..81b4207deb9 --- /dev/null +++ b/include/linux/leds-pca9532.h @@ -0,0 +1,45 @@ +/* + * pca9532.h - platform data structure for pca9532 led controller + * + * Copyright (C) 2008 Riku Voipio <riku.voipio@movial.fi> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. + * + * Datasheet: http://www.nxp.com/acrobat/datasheets/PCA9532_3.pdf + * + */ + +#ifndef __LINUX_PCA9532_H +#define __LINUX_PCA9532_H + +#include <linux/leds.h> + +enum pca9532_state { + PCA9532_OFF = 0x0, + PCA9532_ON = 0x1, + PCA9532_PWM0 = 0x2, + PCA9532_PWM1 = 0x3 +}; + +enum pca9532_type { PCA9532_TYPE_NONE, PCA9532_TYPE_LED, + PCA9532_TYPE_N2100_BEEP }; + +struct pca9532_led { + u8 id; + struct i2c_client *client; + char *name; + struct led_classdev ldev; + enum pca9532_type type; + enum pca9532_state state; +}; + +struct pca9532_platform_data { + struct pca9532_led leds[16]; + u8 pwm[2]; + u8 psc[2]; +}; + +#endif /* __LINUX_PCA9532_H */ + diff --git a/include/linux/leds.h b/include/linux/leds.h index 519df72e939..d41ccb56146 100644 --- a/include/linux/leds.h +++ b/include/linux/leds.h @@ -48,7 +48,7 @@ struct led_classdev { struct device *dev; struct list_head node; /* LED Device list */ - char *default_trigger; /* Trigger to use */ + const char *default_trigger; /* Trigger to use */ #ifdef CONFIG_LEDS_TRIGGERS /* Protects the trigger data below */ @@ -118,6 +118,20 @@ extern void ledtrig_ide_activity(void); #define ledtrig_ide_activity() do {} while(0) #endif +/* + * Generic LED platform data for describing LED names and default triggers. + */ +struct led_info { + const char *name; + char *default_trigger; + int flags; +}; + +struct led_platform_data { + int num_leds; + struct led_info *leds; +}; + /* For the leds-gpio driver */ struct gpio_led { const char *name; diff --git a/include/linux/list.h b/include/linux/list.h index 139ec41d9c2..453916bc041 100644 --- a/include/linux/list.h +++ b/include/linux/list.h @@ -61,14 +61,10 @@ extern void __list_add(struct list_head *new, * Insert a new entry after the specified head. * This is good for implementing stacks. */ -#ifndef CONFIG_DEBUG_LIST static inline void list_add(struct list_head *new, struct list_head *head) { __list_add(new, head, head->next); } -#else -extern void list_add(struct list_head *new, struct list_head *head); -#endif /** diff --git a/include/linux/major.h b/include/linux/major.h index 0cb98053537..53d5fafd85c 100644 --- a/include/linux/major.h +++ b/include/linux/major.h @@ -53,7 +53,7 @@ #define STL_SIOMEMMAJOR 28 #define ACSI_MAJOR 28 #define AZTECH_CDROM_MAJOR 29 -#define GRAPHDEV_MAJOR 29 /* SparcLinux & Linux/68k /dev/fb */ +#define FB_MAJOR 29 /* /dev/fb* framebuffers */ #define CM206_CDROM_MAJOR 32 #define IDE2_MAJOR 33 #define IDE3_MAJOR 34 diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h index e6608776bc9..fdf3967e139 100644 --- a/include/linux/memcontrol.h +++ b/include/linux/memcontrol.h @@ -35,7 +35,10 @@ extern int mem_cgroup_charge(struct page *page, struct mm_struct *mm, extern int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm, gfp_t gfp_mask); extern void mem_cgroup_uncharge_page(struct page *page); +extern void mem_cgroup_uncharge_cache_page(struct page *page); extern void mem_cgroup_move_lists(struct page *page, bool active); +extern int mem_cgroup_shrink_usage(struct mm_struct *mm, gfp_t gfp_mask); + extern unsigned long mem_cgroup_isolate_pages(unsigned long nr_to_scan, struct list_head *dst, unsigned long *scanned, int order, @@ -50,9 +53,9 @@ extern struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p); #define mm_match_cgroup(mm, cgroup) \ ((cgroup) == mem_cgroup_from_task((mm)->owner)) -extern int mem_cgroup_prepare_migration(struct page *page); +extern int +mem_cgroup_prepare_migration(struct page *page, struct page *newpage); extern void mem_cgroup_end_migration(struct page *page); -extern void mem_cgroup_page_migration(struct page *page, struct page *newpage); /* * For memory reclaim. @@ -97,6 +100,15 @@ static inline void mem_cgroup_uncharge_page(struct page *page) { } +static inline void mem_cgroup_uncharge_cache_page(struct page *page) +{ +} + +static inline int mem_cgroup_shrink_usage(struct mm_struct *mm, gfp_t gfp_mask) +{ + return 0; +} + static inline void mem_cgroup_move_lists(struct page *page, bool active) { } @@ -112,7 +124,8 @@ static inline int task_in_mem_cgroup(struct task_struct *task, return 1; } -static inline int mem_cgroup_prepare_migration(struct page *page) +static inline int +mem_cgroup_prepare_migration(struct page *page, struct page *newpage) { return 0; } @@ -121,11 +134,6 @@ static inline void mem_cgroup_end_migration(struct page *page) { } -static inline void -mem_cgroup_page_migration(struct page *page, struct page *newpage) -{ -} - static inline int mem_cgroup_calc_mapped_ratio(struct mem_cgroup *mem) { return 0; diff --git a/include/linux/memory_hotplug.h b/include/linux/memory_hotplug.h index ea9f5ad9ec8..763ba81fc0f 100644 --- a/include/linux/memory_hotplug.h +++ b/include/linux/memory_hotplug.h @@ -13,12 +13,12 @@ struct mem_section; #ifdef CONFIG_MEMORY_HOTPLUG /* - * Magic number for free bootmem. + * Types for free bootmem. * The normal smallest mapcount is -1. Here is smaller value than it. */ -#define SECTION_INFO 0xfffffffe -#define MIX_INFO 0xfffffffd -#define NODE_INFO 0xfffffffc +#define SECTION_INFO (-1 - 1) +#define MIX_SECTION_INFO (-1 - 2) +#define NODE_INFO (-1 - 3) /* * pgdat resizing functions @@ -199,6 +199,18 @@ extern int walk_memory_resource(unsigned long start_pfn, unsigned long nr_pages, void *arg, int (*func)(unsigned long, unsigned long, void *)); +#ifdef CONFIG_MEMORY_HOTREMOVE + +extern int is_mem_section_removable(unsigned long pfn, unsigned long nr_pages); + +#else +static inline int is_mem_section_removable(unsigned long pfn, + unsigned long nr_pages) +{ + return 0; +} +#endif /* CONFIG_MEMORY_HOTREMOVE */ + extern int add_memory(int nid, u64 start, u64 size); extern int arch_add_memory(int nid, u64 start, u64 size); extern int remove_memory(u64 start, u64 size); diff --git a/include/linux/mempolicy.h b/include/linux/mempolicy.h index 3a39570b81b..085c903fe0f 100644 --- a/include/linux/mempolicy.h +++ b/include/linux/mempolicy.h @@ -59,6 +59,7 @@ enum { #include <linux/rbtree.h> #include <linux/spinlock.h> #include <linux/nodemask.h> +#include <linux/pagemap.h> struct mm_struct; @@ -220,6 +221,24 @@ extern int mpol_parse_str(char *str, struct mempolicy **mpol, int no_context); extern int mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol, int no_context); #endif + +/* Check if a vma is migratable */ +static inline int vma_migratable(struct vm_area_struct *vma) +{ + if (vma->vm_flags & (VM_IO|VM_HUGETLB|VM_PFNMAP|VM_RESERVED)) + return 0; + /* + * Migration allocates pages in the highest zone. If we cannot + * do so then migration (at least from node to node) is not + * possible. + */ + if (vma->vm_file && + gfp_zone(mapping_gfp_mask(vma->vm_file->f_mapping)) + < policy_zone) + return 0; + return 1; +} + #else struct mempolicy {}; diff --git a/include/linux/memstick.h b/include/linux/memstick.h index 37a5cdb0391..a9f998a3f48 100644 --- a/include/linux/memstick.h +++ b/include/linux/memstick.h @@ -263,6 +263,10 @@ struct memstick_dev { /* Get next request from the media driver. */ int (*next_request)(struct memstick_dev *card, struct memstick_request **mrq); + /* Tell the media driver to stop doing things */ + void (*stop)(struct memstick_dev *card); + /* Allow the media driver to continue */ + void (*start)(struct memstick_dev *card); struct device dev; }; @@ -284,7 +288,7 @@ struct memstick_host { /* Notify the host that some requests are pending. */ void (*request)(struct memstick_host *host); /* Set host IO parameters (power, clock, etc). */ - void (*set_param)(struct memstick_host *host, + int (*set_param)(struct memstick_host *host, enum memstick_param param, int value); unsigned long private[0] ____cacheline_aligned; diff --git a/include/linux/mfd/core.h b/include/linux/mfd/core.h new file mode 100644 index 00000000000..bb3dd054592 --- /dev/null +++ b/include/linux/mfd/core.h @@ -0,0 +1,55 @@ +#ifndef MFD_CORE_H +#define MFD_CORE_H +/* + * drivers/mfd/mfd-core.h + * + * core MFD support + * Copyright (c) 2006 Ian Molton + * Copyright (c) 2007 Dmitry Baryshkov + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + */ + +#include <linux/platform_device.h> + +/* + * This struct describes the MFD part ("cell"). + * After registration the copy of this structure will become the platform data + * of the resulting platform_device + */ +struct mfd_cell { + const char *name; + + int (*enable)(struct platform_device *dev); + int (*disable)(struct platform_device *dev); + int (*suspend)(struct platform_device *dev); + int (*resume)(struct platform_device *dev); + + void *driver_data; /* driver-specific data */ + + /* + * This resources can be specified relatievly to the parent device. + * For accessing device you should use resources from device + */ + int num_resources; + const struct resource *resources; +}; + +static inline struct mfd_cell * +mfd_get_cell(struct platform_device *pdev) +{ + return (struct mfd_cell *)pdev->dev.platform_data; +} + +extern int mfd_add_devices( + struct platform_device *parent, + const struct mfd_cell *cells, int n_devs, + struct resource *mem_base, + int irq_base); + +extern void mfd_remove_devices(struct platform_device *parent); + +#endif diff --git a/include/linux/mfd/tc6393xb.h b/include/linux/mfd/tc6393xb.h new file mode 100644 index 00000000000..7cc824a58f7 --- /dev/null +++ b/include/linux/mfd/tc6393xb.h @@ -0,0 +1,49 @@ +/* + * Toshiba TC6393XB SoC support + * + * Copyright(c) 2005-2006 Chris Humbert + * Copyright(c) 2005 Dirk Opfer + * Copyright(c) 2005 Ian Molton <spyro@f2s.com> + * Copyright(c) 2007 Dmitry Baryshkov + * + * Based on code written by Sharp/Lineo for 2.4 kernels + * Based on locomo.c + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef TC6393XB_H +#define TC6393XB_H + +/* Also one should provide the CK3P6MI clock */ +struct tc6393xb_platform_data { + u16 scr_pll2cr; /* PLL2 Control */ + u16 scr_gper; /* GP Enable */ + u32 scr_gpo_doecr; /* GPO Data OE Control */ + u32 scr_gpo_dsr; /* GPO Data Set */ + + int (*enable)(struct platform_device *dev); + int (*disable)(struct platform_device *dev); + int (*suspend)(struct platform_device *dev); + int (*resume)(struct platform_device *dev); + + int irq_base; /* a base for cascaded irq */ + int gpio_base; + + struct tmio_nand_data *nand_data; +}; + +/* + * Relative to irq_base + */ +#define IRQ_TC6393_NAND 0 +#define IRQ_TC6393_MMC 1 +#define IRQ_TC6393_OHCI 2 +#define IRQ_TC6393_SERIAL 3 +#define IRQ_TC6393_FB 4 + +#define TC6393XB_NR_IRQS 8 + +#endif diff --git a/include/linux/mfd/tmio.h b/include/linux/mfd/tmio.h new file mode 100644 index 00000000000..9438d8c9ac1 --- /dev/null +++ b/include/linux/mfd/tmio.h @@ -0,0 +1,17 @@ +#ifndef MFD_TMIO_H +#define MFD_TMIO_H + +/* + * data for the NAND controller + */ +struct tmio_nand_data { + struct nand_bbt_descr *badblock_pattern; + struct mtd_partition *partition; + unsigned int num_partitions; +}; + +#define TMIO_NAND_CONFIG "tmio-nand-config" +#define TMIO_NAND_CONTROL "tmio-nand-control" +#define TMIO_NAND_IRQ "tmio-nand" + +#endif diff --git a/include/linux/migrate.h b/include/linux/migrate.h index e10a90a93b5..03aea612d28 100644 --- a/include/linux/migrate.h +++ b/include/linux/migrate.h @@ -3,28 +3,10 @@ #include <linux/mm.h> #include <linux/mempolicy.h> -#include <linux/pagemap.h> typedef struct page *new_page_t(struct page *, unsigned long private, int **); #ifdef CONFIG_MIGRATION -/* Check if a vma is migratable */ -static inline int vma_migratable(struct vm_area_struct *vma) -{ - if (vma->vm_flags & (VM_IO|VM_HUGETLB|VM_PFNMAP|VM_RESERVED)) - return 0; - /* - * Migration allocates pages in the highest zone. If we cannot - * do so then migration (at least from node to node) is not - * possible. - */ - if (vma->vm_file && - gfp_zone(mapping_gfp_mask(vma->vm_file->f_mapping)) - < policy_zone) - return 0; - return 1; -} - extern int isolate_lru_page(struct page *p, struct list_head *pagelist); extern int putback_lru_pages(struct list_head *l); extern int migrate_page(struct address_space *, @@ -39,9 +21,6 @@ extern int migrate_vmas(struct mm_struct *mm, const nodemask_t *from, const nodemask_t *to, unsigned long flags); #else -static inline int vma_migratable(struct vm_area_struct *vma) - { return 0; } - static inline int isolate_lru_page(struct page *p, struct list_head *list) { return -ENOSYS; } static inline int putback_lru_pages(struct list_head *l) { return 0; } diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h index 81b3dd5206e..655ea0d1ee1 100644 --- a/include/linux/mlx4/device.h +++ b/include/linux/mlx4/device.h @@ -68,6 +68,14 @@ enum { MLX4_DEV_CAP_FLAG_UD_MCAST = 1 << 21 }; +enum { + MLX4_BMME_FLAG_LOCAL_INV = 1 << 6, + MLX4_BMME_FLAG_REMOTE_INV = 1 << 7, + MLX4_BMME_FLAG_TYPE_2_WIN = 1 << 9, + MLX4_BMME_FLAG_RESERVED_LKEY = 1 << 10, + MLX4_BMME_FLAG_FAST_REG_WR = 1 << 11, +}; + enum mlx4_event { MLX4_EVENT_TYPE_COMP = 0x00, MLX4_EVENT_TYPE_PATH_MIG = 0x01, @@ -184,6 +192,8 @@ struct mlx4_caps { u32 max_msg_sz; u32 page_size_cap; u32 flags; + u32 bmme_flags; + u32 reserved_lkey; u16 stat_rate_support; u8 port_width_cap[MLX4_MAX_PORTS + 1]; int max_gso_sz; diff --git a/include/linux/mlx4/qp.h b/include/linux/mlx4/qp.h index 7f128b266fa..e27082cd650 100644 --- a/include/linux/mlx4/qp.h +++ b/include/linux/mlx4/qp.h @@ -219,7 +219,7 @@ struct mlx4_wqe_datagram_seg { __be32 reservd[2]; }; -struct mlx4_lso_seg { +struct mlx4_wqe_lso_seg { __be32 mss_hdr_size; __be32 header[0]; }; @@ -233,6 +233,14 @@ struct mlx4_wqe_bind_seg { __be64 length; }; +enum { + MLX4_WQE_FMR_PERM_LOCAL_READ = 1 << 27, + MLX4_WQE_FMR_PERM_LOCAL_WRITE = 1 << 28, + MLX4_WQE_FMR_PERM_REMOTE_READ = 1 << 29, + MLX4_WQE_FMR_PERM_REMOTE_WRITE = 1 << 30, + MLX4_WQE_FMR_PERM_ATOMIC = 1 << 31 +}; + struct mlx4_wqe_fmr_seg { __be32 flags; __be32 mem_key; @@ -255,11 +263,11 @@ struct mlx4_wqe_fmr_ext_seg { }; struct mlx4_wqe_local_inval_seg { - u8 flags; - u8 reserved1[3]; + __be32 flags; + u32 reserved1; __be32 mem_key; - u8 reserved2[3]; - u8 guest_id; + u32 reserved2[2]; + __be32 guest_id; __be64 pa; }; diff --git a/include/linux/mm.h b/include/linux/mm.h index 2128ef7780c..6e695eaab4c 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -41,6 +41,9 @@ extern unsigned long mmap_min_addr; #define nth_page(page,n) pfn_to_page(page_to_pfn((page)) + (n)) +/* to align the pointer to the (next) page boundary */ +#define PAGE_ALIGN(addr) ALIGN(addr, PAGE_SIZE) + /* * Linux kernel virtual memory manager primitives. * The idea being to have a "virtual" mm in the same way @@ -100,6 +103,7 @@ extern unsigned int kobjsize(const void *objp); #define VM_DONTEXPAND 0x00040000 /* Cannot expand with mremap() */ #define VM_RESERVED 0x00080000 /* Count as reserved_vm like IO */ #define VM_ACCOUNT 0x00100000 /* Is a VM accounted object */ +#define VM_NORESERVE 0x00200000 /* should the VM suppress accounting */ #define VM_HUGETLB 0x00400000 /* Huge TLB Page VM */ #define VM_NONLINEAR 0x00800000 /* Is non-linear (remap_file_pages) */ #define VM_MAPPED_COPY 0x01000000 /* T if mapped copy of data (nommu mmap) */ @@ -166,12 +170,16 @@ struct vm_operations_struct { void (*open)(struct vm_area_struct * area); void (*close)(struct vm_area_struct * area); int (*fault)(struct vm_area_struct *vma, struct vm_fault *vmf); - unsigned long (*nopfn)(struct vm_area_struct *area, - unsigned long address); /* notification that a previously read-only page is about to become * writable, if an error is returned it will cause a SIGBUS */ int (*page_mkwrite)(struct vm_area_struct *vma, struct page *page); + + /* called by access_process_vm when get_user_pages() fails, typically + * for use by special VMAs that can switch between memory and hardware + */ + int (*access)(struct vm_area_struct *vma, unsigned long addr, + void *buf, int len, int write); #ifdef CONFIG_NUMA /* * set_policy() op must add a reference to any non-NULL @new mempolicy @@ -675,13 +683,6 @@ static inline int page_mapped(struct page *page) } /* - * Error return values for the *_nopfn functions - */ -#define NOPFN_SIGBUS ((unsigned long) -1) -#define NOPFN_OOM ((unsigned long) -2) -#define NOPFN_REFAULT ((unsigned long) -3) - -/* * Different kinds of faults, as returned by handle_mm_fault(). * Used to decide whether a process gets delivered SIGBUS or * just gets major/minor fault counters bumped up. @@ -772,14 +773,14 @@ struct mm_walk { int walk_page_range(unsigned long addr, unsigned long end, struct mm_walk *walk); -void free_pgd_range(struct mmu_gather **tlb, unsigned long addr, +void free_pgd_range(struct mmu_gather *tlb, unsigned long addr, unsigned long end, unsigned long floor, unsigned long ceiling); -void free_pgtables(struct mmu_gather **tlb, struct vm_area_struct *start_vma, - unsigned long floor, unsigned long ceiling); int copy_page_range(struct mm_struct *dst, struct mm_struct *src, struct vm_area_struct *vma); void unmap_mapping_range(struct address_space *mapping, loff_t const holebegin, loff_t const holelen, int even_cows); +int generic_access_phys(struct vm_area_struct *vma, unsigned long addr, + void *buf, int len, int write); static inline void unmap_shared_mapping_range(struct address_space *mapping, loff_t const holebegin, loff_t const holelen) @@ -809,7 +810,6 @@ extern int access_process_vm(struct task_struct *tsk, unsigned long addr, void * int get_user_pages(struct task_struct *tsk, struct mm_struct *mm, unsigned long start, int len, int write, int force, struct page **pages, struct vm_area_struct **vmas); -void print_bad_pte(struct vm_area_struct *, pte_t, unsigned long); extern int try_to_release_page(struct page * page, gfp_t gfp_mask); extern void do_invalidatepage(struct page *page, unsigned long offset); @@ -832,6 +832,39 @@ extern int mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev, unsigned long start, unsigned long end, unsigned long newflags); +#ifdef CONFIG_HAVE_GET_USER_PAGES_FAST +/* + * get_user_pages_fast provides equivalent functionality to get_user_pages, + * operating on current and current->mm (force=0 and doesn't return any vmas). + * + * get_user_pages_fast may take mmap_sem and page tables, so no assumptions + * can be made about locking. get_user_pages_fast is to be implemented in a + * way that is advantageous (vs get_user_pages()) when the user memory area is + * already faulted in and present in ptes. However if the pages have to be + * faulted in, it may turn out to be slightly slower). + */ +int get_user_pages_fast(unsigned long start, int nr_pages, int write, + struct page **pages); + +#else +/* + * Should probably be moved to asm-generic, and architectures can include it if + * they don't implement their own get_user_pages_fast. + */ +#define get_user_pages_fast(start, nr_pages, write, pages) \ +({ \ + struct mm_struct *mm = current->mm; \ + int ret; \ + \ + down_read(&mm->mmap_sem); \ + ret = get_user_pages(current, mm, start, nr_pages, \ + write, 0, pages, NULL); \ + up_read(&mm->mmap_sem); \ + \ + ret; \ +}) +#endif + /* * A callback you can register to apply pressure to ageable caches. * @@ -965,9 +998,8 @@ static inline void pgtable_page_dtor(struct page *page) NULL: pte_offset_kernel(pmd, address)) extern void free_area_init(unsigned long * zones_size); -extern void free_area_init_node(int nid, pg_data_t *pgdat, - unsigned long * zones_size, unsigned long zone_start_pfn, - unsigned long *zholes_size); +extern void free_area_init_node(int nid, unsigned long * zones_size, + unsigned long zone_start_pfn, unsigned long *zholes_size); #ifdef CONFIG_ARCH_POPULATES_NODE_MAP /* * With CONFIG_ARCH_POPULATES_NODE_MAP set, an architecture may initialise its diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h index 02a27ae7853..746f975b58e 100644 --- a/include/linux/mm_types.h +++ b/include/linux/mm_types.h @@ -159,6 +159,17 @@ struct vm_area_struct { #endif }; +struct core_thread { + struct task_struct *task; + struct core_thread *next; +}; + +struct core_state { + atomic_t nr_threads; + struct core_thread dumper; + struct completion startup; +}; + struct mm_struct { struct vm_area_struct * mmap; /* list of VMAs */ struct rb_root mm_rb; @@ -175,7 +186,6 @@ struct mm_struct { atomic_t mm_users; /* How many users with user space? */ atomic_t mm_count; /* How many references to "struct mm_struct" (users count as 1) */ int map_count; /* number of VMAs */ - int core_waiters; struct rw_semaphore mmap_sem; spinlock_t page_table_lock; /* Protects page tables and some counters */ @@ -219,8 +229,7 @@ struct mm_struct { unsigned long flags; /* Must use atomic bitops to access the bits */ - /* coredumping support */ - struct completion *core_startup_done, core_done; + struct core_state *core_state; /* coredumping support */ /* aio bits */ rwlock_t ioctx_list_lock; /* aio lock */ diff --git a/include/linux/module.h b/include/linux/module.h index fce15ebd0e1..68e09557c95 100644 --- a/include/linux/module.h +++ b/include/linux/module.h @@ -23,7 +23,7 @@ /* Not Yet Implemented */ #define MODULE_SUPPORTED_DEVICE(name) -/* v850 toolchain uses a `_' prefix for all user symbols */ +/* some toolchains uses a `_' prefix for all user symbols */ #ifndef MODULE_SYMBOL_PREFIX #define MODULE_SYMBOL_PREFIX "" #endif diff --git a/include/linux/msdos_fs.h b/include/linux/msdos_fs.h index 81cd36b735b..ba63858056c 100644 --- a/include/linux/msdos_fs.h +++ b/include/linux/msdos_fs.h @@ -2,11 +2,11 @@ #define _LINUX_MSDOS_FS_H #include <linux/magic.h> +#include <asm/byteorder.h> /* * The MS-DOS filesystem constants/structures */ -#include <asm/byteorder.h> #define SECTOR_SIZE 512 /* sector size (bytes) */ #define SECTOR_BITS 9 /* log2(SECTOR_SIZE) */ @@ -89,24 +89,22 @@ #define IS_FSINFO(x) (le32_to_cpu((x)->signature1) == FAT_FSINFO_SIG1 \ && le32_to_cpu((x)->signature2) == FAT_FSINFO_SIG2) +struct __fat_dirent { + long d_ino; + __kernel_off_t d_off; + unsigned short d_reclen; + char d_name[256]; /* We must not include limits.h! */ +}; + /* * ioctl commands */ -#define VFAT_IOCTL_READDIR_BOTH _IOR('r', 1, struct dirent [2]) -#define VFAT_IOCTL_READDIR_SHORT _IOR('r', 2, struct dirent [2]) +#define VFAT_IOCTL_READDIR_BOTH _IOR('r', 1, struct __fat_dirent[2]) +#define VFAT_IOCTL_READDIR_SHORT _IOR('r', 2, struct __fat_dirent[2]) /* <linux/videotext.h> has used 0x72 ('r') in collision, so skip a few */ #define FAT_IOCTL_GET_ATTRIBUTES _IOR('r', 0x10, __u32) #define FAT_IOCTL_SET_ATTRIBUTES _IOW('r', 0x11, __u32) -/* - * vfat shortname flags - */ -#define VFAT_SFN_DISPLAY_LOWER 0x0001 /* convert to lowercase for display */ -#define VFAT_SFN_DISPLAY_WIN95 0x0002 /* emulate win95 rule for display */ -#define VFAT_SFN_DISPLAY_WINNT 0x0004 /* emulate winnt rule for display */ -#define VFAT_SFN_CREATE_WIN95 0x0100 /* emulate win95 rule for create */ -#define VFAT_SFN_CREATE_WINNT 0x0200 /* emulate winnt rule for create */ - struct fat_boot_sector { __u8 ignored[3]; /* Boot strap short or near jump */ __u8 system_id[8]; /* Name - can be used to special case @@ -168,14 +166,6 @@ struct msdos_dir_slot { __u8 name11_12[4]; /* last 2 characters in name */ }; -struct fat_slot_info { - loff_t i_pos; /* on-disk position of directory entry */ - loff_t slot_off; /* offset for slot or de start */ - int nr_slots; /* number of slots + 1(de) in filename */ - struct msdos_dir_entry *de; - struct buffer_head *bh; -}; - #ifdef __KERNEL__ #include <linux/buffer_head.h> @@ -184,6 +174,15 @@ struct fat_slot_info { #include <linux/fs.h> #include <linux/mutex.h> +/* + * vfat shortname flags + */ +#define VFAT_SFN_DISPLAY_LOWER 0x0001 /* convert to lowercase for display */ +#define VFAT_SFN_DISPLAY_WIN95 0x0002 /* emulate win95 rule for display */ +#define VFAT_SFN_DISPLAY_WINNT 0x0004 /* emulate winnt rule for display */ +#define VFAT_SFN_CREATE_WIN95 0x0100 /* emulate win95 rule for create */ +#define VFAT_SFN_CREATE_WINNT 0x0200 /* emulate winnt rule for create */ + struct fat_mount_options { uid_t fs_uid; gid_t fs_gid; @@ -202,10 +201,10 @@ struct fat_mount_options { utf8:1, /* Use of UTF-8 character set (Default) */ unicode_xlate:1, /* create escape sequences for unhandled Unicode */ numtail:1, /* Does first alias have a numeric '~1' type tail? */ - atari:1, /* Use Atari GEMDOS variation of MS-DOS fs */ flush:1, /* write things quickly */ nocase:1, /* Does this need case conversion? 0=need case conversion*/ - usefree:1; /* Use free_clusters for FAT32 */ + usefree:1, /* Use free_clusters for FAT32 */ + tz_utc:1; /* Filesystem timestamps are in UTC */ }; #define FAT_HASH_BITS 8 @@ -267,6 +266,14 @@ struct msdos_inode_info { struct inode vfs_inode; }; +struct fat_slot_info { + loff_t i_pos; /* on-disk position of directory entry */ + loff_t slot_off; /* offset for slot or de start */ + int nr_slots; /* number of slots + 1(de) in filename */ + struct msdos_dir_entry *de; + struct buffer_head *bh; +}; + static inline struct msdos_sb_info *MSDOS_SB(struct super_block *sb) { return sb->s_fs_info; @@ -428,8 +435,9 @@ extern int fat_flush_inodes(struct super_block *sb, struct inode *i1, extern void fat_fs_panic(struct super_block *s, const char *fmt, ...); extern void fat_clusters_flush(struct super_block *sb); extern int fat_chain_add(struct inode *inode, int new_dclus, int nr_cluster); -extern int date_dos2unix(unsigned short time, unsigned short date); -extern void fat_date_unix2dos(int unix_date, __le16 *time, __le16 *date); +extern int date_dos2unix(unsigned short time, unsigned short date, int tz_utc); +extern void fat_date_unix2dos(int unix_date, __le16 *time, __le16 *date, + int tz_utc); extern int fat_sync_bhs(struct buffer_head **bhs, int nr_bhs); int fat_cache_init(void); diff --git a/include/linux/mtd/ubi.h b/include/linux/mtd/ubi.h index f71201d0f3e..6316fafe5c2 100644 --- a/include/linux/mtd/ubi.h +++ b/include/linux/mtd/ubi.h @@ -45,13 +45,13 @@ enum { * @size: how many physical eraseblocks are reserved for this volume * @used_bytes: how many bytes of data this volume contains * @used_ebs: how many physical eraseblocks of this volume actually contain any - * data + * data * @vol_type: volume type (%UBI_DYNAMIC_VOLUME or %UBI_STATIC_VOLUME) * @corrupted: non-zero if the volume is corrupted (static volumes only) * @upd_marker: non-zero if the volume has update marker set * @alignment: volume alignment * @usable_leb_size: how many bytes are available in logical eraseblocks of - * this volume + * this volume * @name_len: volume name length * @name: volume name * @cdev: UBI volume character device major and minor numbers @@ -152,6 +152,7 @@ int ubi_leb_erase(struct ubi_volume_desc *desc, int lnum); int ubi_leb_unmap(struct ubi_volume_desc *desc, int lnum); int ubi_leb_map(struct ubi_volume_desc *desc, int lnum, int dtype); int ubi_is_mapped(struct ubi_volume_desc *desc, int lnum); +int ubi_sync(int ubi_num); /* * This function is the same as the 'ubi_leb_read()' function, but it does not diff --git a/include/linux/net.h b/include/linux/net.h index 150a48c68d5..4a9a30f2d68 100644 --- a/include/linux/net.h +++ b/include/linux/net.h @@ -20,6 +20,7 @@ #include <linux/wait.h> #include <linux/socket.h> +#include <linux/fcntl.h> /* For O_CLOEXEC and O_NONBLOCK */ #include <asm/socket.h> struct poll_table_struct; @@ -46,6 +47,7 @@ struct net; #define SYS_GETSOCKOPT 15 /* sys_getsockopt(2) */ #define SYS_SENDMSG 16 /* sys_sendmsg(2) */ #define SYS_RECVMSG 17 /* sys_recvmsg(2) */ +#define SYS_PACCEPT 18 /* sys_paccept(2) */ typedef enum { SS_FREE = 0, /* not allocated */ @@ -94,6 +96,15 @@ enum sock_type { }; #define SOCK_MAX (SOCK_PACKET + 1) +/* Mask which covers at least up to SOCK_MASK-1. The + * remaining bits are used as flags. */ +#define SOCK_TYPE_MASK 0xf + +/* Flags for socket, socketpair, paccept */ +#define SOCK_CLOEXEC O_CLOEXEC +#ifndef SOCK_NONBLOCK +#define SOCK_NONBLOCK O_NONBLOCK +#endif #endif /* ARCH_HAS_SOCKET_TYPES */ @@ -208,10 +219,12 @@ extern int sock_sendmsg(struct socket *sock, struct msghdr *msg, size_t len); extern int sock_recvmsg(struct socket *sock, struct msghdr *msg, size_t size, int flags); -extern int sock_map_fd(struct socket *sock); +extern int sock_map_fd(struct socket *sock, int flags); extern struct socket *sockfd_lookup(int fd, int *err); #define sockfd_put(sock) fput(sock->file) extern int net_ratelimit(void); +extern long do_accept(int fd, struct sockaddr __user *upeer_sockaddr, + int __user *upeer_addrlen, int flags); #define net_random() random32() #define net_srandom(seed) srandom32((__force u32)seed) @@ -338,8 +351,7 @@ static const struct proto_ops name##_ops = { \ #ifdef CONFIG_SYSCTL #include <linux/sysctl.h> -extern int net_msg_cost; -extern int net_msg_burst; +extern struct ratelimit_state net_ratelimit_state; #endif #endif /* __KERNEL__ */ diff --git a/include/linux/nfsd/nfsd.h b/include/linux/nfsd/nfsd.h index a2861d95ecc..108f47e5fd9 100644 --- a/include/linux/nfsd/nfsd.h +++ b/include/linux/nfsd/nfsd.h @@ -12,7 +12,6 @@ #include <linux/types.h> #include <linux/unistd.h> -#include <linux/dirent.h> #include <linux/fs.h> #include <linux/posix_acl.h> #include <linux/mount.h> diff --git a/include/linux/notifier.h b/include/linux/notifier.h index bd3d72ddf33..da2698b0fdd 100644 --- a/include/linux/notifier.h +++ b/include/linux/notifier.h @@ -214,6 +214,8 @@ static inline int notifier_to_errno(int ret) #define CPU_DEAD 0x0007 /* CPU (unsigned)v dead */ #define CPU_DYING 0x0008 /* CPU (unsigned)v not running any task, * not handling interrupts, soon dead */ +#define CPU_POST_DEAD 0x0009 /* CPU (unsigned)v dead, cpu_hotplug + * lock is dropped */ /* Used for CPU hotplug events occuring while tasks are frozen due to a suspend * operation in progress diff --git a/include/linux/nsproxy.h b/include/linux/nsproxy.h index 0e66b57631f..c8a768e5964 100644 --- a/include/linux/nsproxy.h +++ b/include/linux/nsproxy.h @@ -82,9 +82,12 @@ static inline void get_nsproxy(struct nsproxy *ns) } #ifdef CONFIG_CGROUP_NS -int ns_cgroup_clone(struct task_struct *tsk); +int ns_cgroup_clone(struct task_struct *tsk, struct pid *pid); #else -static inline int ns_cgroup_clone(struct task_struct *tsk) { return 0; } +static inline int ns_cgroup_clone(struct task_struct *tsk, struct pid *pid) +{ + return 0; +} #endif #endif diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h index 0d2a4e7012a..54590a9a103 100644 --- a/include/linux/page-flags.h +++ b/include/linux/page-flags.h @@ -96,7 +96,22 @@ enum pageflags { #ifdef CONFIG_IA64_UNCACHED_ALLOCATOR PG_uncached, /* Page has been mapped as uncached */ #endif - __NR_PAGEFLAGS + __NR_PAGEFLAGS, + + /* Filesystems */ + PG_checked = PG_owner_priv_1, + + /* XEN */ + PG_pinned = PG_owner_priv_1, + PG_savepinned = PG_dirty, + + /* SLOB */ + PG_slob_page = PG_active, + PG_slob_free = PG_private, + + /* SLUB */ + PG_slub_frozen = PG_active, + PG_slub_debug = PG_error, }; #ifndef __GENERATING_BOUNDS_H @@ -155,13 +170,19 @@ PAGEFLAG(Dirty, dirty) TESTSCFLAG(Dirty, dirty) __CLEARPAGEFLAG(Dirty, dirty) PAGEFLAG(LRU, lru) __CLEARPAGEFLAG(LRU, lru) PAGEFLAG(Active, active) __CLEARPAGEFLAG(Active, active) __PAGEFLAG(Slab, slab) -PAGEFLAG(Checked, owner_priv_1) /* Used by some filesystems */ -PAGEFLAG(Pinned, owner_priv_1) TESTSCFLAG(Pinned, owner_priv_1) /* Xen */ -PAGEFLAG(SavePinned, dirty); /* Xen */ +PAGEFLAG(Checked, checked) /* Used by some filesystems */ +PAGEFLAG(Pinned, pinned) TESTSCFLAG(Pinned, pinned) /* Xen */ +PAGEFLAG(SavePinned, savepinned); /* Xen */ PAGEFLAG(Reserved, reserved) __CLEARPAGEFLAG(Reserved, reserved) PAGEFLAG(Private, private) __CLEARPAGEFLAG(Private, private) __SETPAGEFLAG(Private, private) +__PAGEFLAG(SlobPage, slob_page) +__PAGEFLAG(SlobFree, slob_free) + +__PAGEFLAG(SlubFrozen, slub_frozen) +__PAGEFLAG(SlubDebug, slub_debug) + /* * Only test-and-set exist for PG_writeback. The unconditional operators are * risky: they bypass page accounting. diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h index d2fca802f80..a81d8189042 100644 --- a/include/linux/pagemap.h +++ b/include/linux/pagemap.h @@ -12,6 +12,7 @@ #include <asm/uaccess.h> #include <linux/gfp.h> #include <linux/bitops.h> +#include <linux/hardirq.h> /* for in_interrupt() */ /* * Bits in mapping->flags. The lower __GFP_BITS_SHIFT bits are the page @@ -22,7 +23,7 @@ static inline void mapping_set_error(struct address_space *mapping, int error) { - if (error) { + if (unlikely(error)) { if (error == -ENOSPC) set_bit(AS_ENOSPC, &mapping->flags); else @@ -62,6 +63,98 @@ static inline void mapping_set_gfp_mask(struct address_space *m, gfp_t mask) #define page_cache_release(page) put_page(page) void release_pages(struct page **pages, int nr, int cold); +/* + * speculatively take a reference to a page. + * If the page is free (_count == 0), then _count is untouched, and 0 + * is returned. Otherwise, _count is incremented by 1 and 1 is returned. + * + * This function must be called inside the same rcu_read_lock() section as has + * been used to lookup the page in the pagecache radix-tree (or page table): + * this allows allocators to use a synchronize_rcu() to stabilize _count. + * + * Unless an RCU grace period has passed, the count of all pages coming out + * of the allocator must be considered unstable. page_count may return higher + * than expected, and put_page must be able to do the right thing when the + * page has been finished with, no matter what it is subsequently allocated + * for (because put_page is what is used here to drop an invalid speculative + * reference). + * + * This is the interesting part of the lockless pagecache (and lockless + * get_user_pages) locking protocol, where the lookup-side (eg. find_get_page) + * has the following pattern: + * 1. find page in radix tree + * 2. conditionally increment refcount + * 3. check the page is still in pagecache (if no, goto 1) + * + * Remove-side that cares about stability of _count (eg. reclaim) has the + * following (with tree_lock held for write): + * A. atomically check refcount is correct and set it to 0 (atomic_cmpxchg) + * B. remove page from pagecache + * C. free the page + * + * There are 2 critical interleavings that matter: + * - 2 runs before A: in this case, A sees elevated refcount and bails out + * - A runs before 2: in this case, 2 sees zero refcount and retries; + * subsequently, B will complete and 1 will find no page, causing the + * lookup to return NULL. + * + * It is possible that between 1 and 2, the page is removed then the exact same + * page is inserted into the same position in pagecache. That's OK: the + * old find_get_page using tree_lock could equally have run before or after + * such a re-insertion, depending on order that locks are granted. + * + * Lookups racing against pagecache insertion isn't a big problem: either 1 + * will find the page or it will not. Likewise, the old find_get_page could run + * either before the insertion or afterwards, depending on timing. + */ +static inline int page_cache_get_speculative(struct page *page) +{ + VM_BUG_ON(in_interrupt()); + +#if !defined(CONFIG_SMP) && defined(CONFIG_CLASSIC_RCU) +# ifdef CONFIG_PREEMPT + VM_BUG_ON(!in_atomic()); +# endif + /* + * Preempt must be disabled here - we rely on rcu_read_lock doing + * this for us. + * + * Pagecache won't be truncated from interrupt context, so if we have + * found a page in the radix tree here, we have pinned its refcount by + * disabling preempt, and hence no need for the "speculative get" that + * SMP requires. + */ + VM_BUG_ON(page_count(page) == 0); + atomic_inc(&page->_count); + +#else + if (unlikely(!get_page_unless_zero(page))) { + /* + * Either the page has been freed, or will be freed. + * In either case, retry here and the caller should + * do the right thing (see comments above). + */ + return 0; + } +#endif + VM_BUG_ON(PageTail(page)); + + return 1; +} + +static inline int page_freeze_refs(struct page *page, int count) +{ + return likely(atomic_cmpxchg(&page->_count, count, 0) == count); +} + +static inline void page_unfreeze_refs(struct page *page, int count) +{ + VM_BUG_ON(page_count(page) != 0); + VM_BUG_ON(count == 0); + + atomic_set(&page->_count, count); +} + #ifdef CONFIG_NUMA extern struct page *__page_cache_alloc(gfp_t gfp); #else @@ -133,7 +226,7 @@ static inline struct page *read_mapping_page(struct address_space *mapping, return read_cache_page(mapping, index, filler, data); } -int add_to_page_cache(struct page *page, struct address_space *mapping, +int add_to_page_cache_locked(struct page *page, struct address_space *mapping, pgoff_t index, gfp_t gfp_mask); int add_to_page_cache_lru(struct page *page, struct address_space *mapping, pgoff_t index, gfp_t gfp_mask); @@ -141,6 +234,22 @@ extern void remove_from_page_cache(struct page *page); extern void __remove_from_page_cache(struct page *page); /* + * Like add_to_page_cache_locked, but used to add newly allocated pages: + * the page is new, so we can just run SetPageLocked() against it. + */ +static inline int add_to_page_cache(struct page *page, + struct address_space *mapping, pgoff_t offset, gfp_t gfp_mask) +{ + int error; + + SetPageLocked(page); + error = add_to_page_cache_locked(page, mapping, offset, gfp_mask); + if (unlikely(error)) + ClearPageLocked(page); + return error; +} + +/* * Return byte-offset into filesystem object for page. */ static inline loff_t page_offset(struct page *page) diff --git a/include/linux/parport.h b/include/linux/parport.h index dcb9e01a69c..6a0d7cdb577 100644 --- a/include/linux/parport.h +++ b/include/linux/parport.h @@ -560,5 +560,8 @@ extern int parport_device_proc_unregister(struct pardevice *device); #endif /* !CONFIG_PARPORT_NOT_PC */ +extern unsigned long parport_default_timeslice; +extern int parport_default_spintime; + #endif /* __KERNEL__ */ #endif /* _PARPORT_H_ */ diff --git a/include/linux/parser.h b/include/linux/parser.h index 7dcd0507575..cc554ca8bc7 100644 --- a/include/linux/parser.h +++ b/include/linux/parser.h @@ -14,7 +14,7 @@ struct match_token { const char *pattern; }; -typedef struct match_token match_table_t[]; +typedef const struct match_token match_table_t[]; /* Maximum number of arguments that match_token will find in a pattern */ enum {MAX_OPT_ARGS = 3}; diff --git a/include/linux/pci.h b/include/linux/pci.h index a6a088e1a80..1d296d31abe 100644 --- a/include/linux/pci.h +++ b/include/linux/pci.h @@ -638,7 +638,9 @@ int pci_save_state(struct pci_dev *dev); int pci_restore_state(struct pci_dev *dev); int pci_set_power_state(struct pci_dev *dev, pci_power_t state); pci_power_t pci_choose_state(struct pci_dev *dev, pm_message_t state); +bool pci_pme_capable(struct pci_dev *dev, pci_power_t state); int pci_enable_wake(struct pci_dev *dev, pci_power_t state, int enable); +pci_power_t pci_target_state(struct pci_dev *dev); int pci_prepare_to_sleep(struct pci_dev *dev); int pci_back_from_sleep(struct pci_dev *dev); diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h index d8507eb394c..c3b1761aba2 100644 --- a/include/linux/pci_ids.h +++ b/include/linux/pci_ids.h @@ -2371,6 +2371,14 @@ #define PCI_DEVICE_ID_INTEL_ICH9_7 0x2916 #define PCI_DEVICE_ID_INTEL_ICH9_8 0x2918 #define PCI_DEVICE_ID_INTEL_82855PM_HB 0x3340 +#define PCI_DEVICE_ID_INTEL_IOAT_TBG4 0x3429 +#define PCI_DEVICE_ID_INTEL_IOAT_TBG5 0x342a +#define PCI_DEVICE_ID_INTEL_IOAT_TBG6 0x342b +#define PCI_DEVICE_ID_INTEL_IOAT_TBG7 0x342c +#define PCI_DEVICE_ID_INTEL_IOAT_TBG0 0x3430 +#define PCI_DEVICE_ID_INTEL_IOAT_TBG1 0x3431 +#define PCI_DEVICE_ID_INTEL_IOAT_TBG2 0x3432 +#define PCI_DEVICE_ID_INTEL_IOAT_TBG3 0x3433 #define PCI_DEVICE_ID_INTEL_82830_HB 0x3575 #define PCI_DEVICE_ID_INTEL_82830_CGC 0x3577 #define PCI_DEVICE_ID_INTEL_82855GM_HB 0x3580 @@ -2392,6 +2400,9 @@ #define PCI_DEVICE_ID_INTEL_ICH10_4 0x3a30 #define PCI_DEVICE_ID_INTEL_ICH10_5 0x3a60 #define PCI_DEVICE_ID_INTEL_IOAT_SNB 0x402f +#define PCI_DEVICE_ID_INTEL_5100_16 0x65f0 +#define PCI_DEVICE_ID_INTEL_5100_21 0x65f5 +#define PCI_DEVICE_ID_INTEL_5100_22 0x65f6 #define PCI_DEVICE_ID_INTEL_5400_ERR 0x4030 #define PCI_DEVICE_ID_INTEL_5400_FBD0 0x4035 #define PCI_DEVICE_ID_INTEL_5400_FBD1 0x4036 diff --git a/include/linux/percpu.h b/include/linux/percpu.h index 4cdd393e71e..fac3337547e 100644 --- a/include/linux/percpu.h +++ b/include/linux/percpu.h @@ -74,11 +74,6 @@ struct percpu_data { (__typeof__(ptr))__p->ptrs[(cpu)]; \ }) -extern void *percpu_populate(void *__pdata, size_t size, gfp_t gfp, int cpu); -extern void percpu_depopulate(void *__pdata, int cpu); -extern int __percpu_populate_mask(void *__pdata, size_t size, gfp_t gfp, - cpumask_t *mask); -extern void __percpu_depopulate_mask(void *__pdata, cpumask_t *mask); extern void *__percpu_alloc_mask(size_t size, gfp_t gfp, cpumask_t *mask); extern void percpu_free(void *__pdata); @@ -86,26 +81,6 @@ extern void percpu_free(void *__pdata); #define percpu_ptr(ptr, cpu) ({ (void)(cpu); (ptr); }) -static inline void percpu_depopulate(void *__pdata, int cpu) -{ -} - -static inline void __percpu_depopulate_mask(void *__pdata, cpumask_t *mask) -{ -} - -static inline void *percpu_populate(void *__pdata, size_t size, gfp_t gfp, - int cpu) -{ - return percpu_ptr(__pdata, cpu); -} - -static inline int __percpu_populate_mask(void *__pdata, size_t size, gfp_t gfp, - cpumask_t *mask) -{ - return 0; -} - static __always_inline void *__percpu_alloc_mask(size_t size, gfp_t gfp, cpumask_t *mask) { return kzalloc(size, gfp); @@ -118,10 +93,6 @@ static inline void percpu_free(void *__pdata) #endif /* CONFIG_SMP */ -#define percpu_populate_mask(__pdata, size, gfp, mask) \ - __percpu_populate_mask((__pdata), (size), (gfp), &(mask)) -#define percpu_depopulate_mask(__pdata, mask) \ - __percpu_depopulate_mask((__pdata), &(mask)) #define percpu_alloc_mask(size, gfp, mask) \ __percpu_alloc_mask((size), (gfp), &(mask)) diff --git a/include/linux/pid.h b/include/linux/pid.h index c21c7e8124a..22921ac4cfd 100644 --- a/include/linux/pid.h +++ b/include/linux/pid.h @@ -48,7 +48,7 @@ enum pid_type */ struct upid { - /* Try to keep pid_chain in the same cacheline as nr for find_pid */ + /* Try to keep pid_chain in the same cacheline as nr for find_vpid */ int nr; struct pid_namespace *ns; struct hlist_node pid_chain; @@ -57,10 +57,10 @@ struct upid { struct pid { atomic_t count; + unsigned int level; /* lists of tasks that use this pid */ struct hlist_head tasks[PIDTYPE_MAX]; struct rcu_head rcu; - unsigned int level; struct upid numbers[1]; }; @@ -105,14 +105,12 @@ extern struct pid_namespace init_pid_ns; * or rcu_read_lock() held. * * find_pid_ns() finds the pid in the namespace specified - * find_pid() find the pid by its global id, i.e. in the init namespace * find_vpid() finr the pid by its virtual id, i.e. in the current namespace * - * see also find_task_by_pid() set in include/linux/sched.h + * see also find_task_by_vpid() set in include/linux/sched.h */ extern struct pid *find_pid_ns(int nr, struct pid_namespace *ns); extern struct pid *find_vpid(int nr); -extern struct pid *find_pid(int nr); /* * Lookup a PID in the hash table, and return with it's count elevated. diff --git a/include/linux/pid_namespace.h b/include/linux/pid_namespace.h index caff5283d15..1af82c4e17d 100644 --- a/include/linux/pid_namespace.h +++ b/include/linux/pid_namespace.h @@ -14,6 +14,8 @@ struct pidmap { #define PIDMAP_ENTRIES ((PID_MAX_LIMIT + 8*PAGE_SIZE - 1)/PAGE_SIZE/8) +struct bsd_acct_struct; + struct pid_namespace { struct kref kref; struct pidmap pidmap[PIDMAP_ENTRIES]; @@ -25,6 +27,9 @@ struct pid_namespace { #ifdef CONFIG_PROC_FS struct vfsmount *proc_mnt; #endif +#ifdef CONFIG_BSD_PROCESS_ACCT + struct bsd_acct_struct *bacct; +#endif }; extern struct pid_namespace init_pid_ns; @@ -85,4 +90,7 @@ static inline struct task_struct *task_child_reaper(struct task_struct *tsk) return tsk->nsproxy->pid_ns->child_reaper; } +void pidhash_init(void); +void pidmap_init(void); + #endif /* _LINUX_PID_NS_H */ diff --git a/include/linux/pm.h b/include/linux/pm.h index 4ad9de94449..4dcce54b6d7 100644 --- a/include/linux/pm.h +++ b/include/linux/pm.h @@ -22,78 +22,6 @@ #define _LINUX_PM_H #include <linux/list.h> -#include <asm/atomic.h> -#include <asm/errno.h> - -/* - * Power management requests... these are passed to pm_send_all() and friends. - * - * these functions are old and deprecated, see below. - */ -typedef int __bitwise pm_request_t; - -#define PM_SUSPEND ((__force pm_request_t) 1) /* enter D1-D3 */ -#define PM_RESUME ((__force pm_request_t) 2) /* enter D0 */ - - -/* - * Device types... these are passed to pm_register - */ -typedef int __bitwise pm_dev_t; - -#define PM_UNKNOWN_DEV ((__force pm_dev_t) 0) /* generic */ -#define PM_SYS_DEV ((__force pm_dev_t) 1) /* system device (fan, KB controller, ...) */ -#define PM_PCI_DEV ((__force pm_dev_t) 2) /* PCI device */ -#define PM_USB_DEV ((__force pm_dev_t) 3) /* USB device */ -#define PM_SCSI_DEV ((__force pm_dev_t) 4) /* SCSI device */ -#define PM_ISA_DEV ((__force pm_dev_t) 5) /* ISA device */ -#define PM_MTD_DEV ((__force pm_dev_t) 6) /* Memory Technology Device */ - -/* - * System device hardware ID (PnP) values - */ -enum -{ - PM_SYS_UNKNOWN = 0x00000000, /* generic */ - PM_SYS_KBC = 0x41d00303, /* keyboard controller */ - PM_SYS_COM = 0x41d00500, /* serial port */ - PM_SYS_IRDA = 0x41d00510, /* IRDA controller */ - PM_SYS_FDC = 0x41d00700, /* floppy controller */ - PM_SYS_VGA = 0x41d00900, /* VGA controller */ - PM_SYS_PCMCIA = 0x41d00e00, /* PCMCIA controller */ -}; - -/* - * Device identifier - */ -#define PM_PCI_ID(dev) ((dev)->bus->number << 16 | (dev)->devfn) - -/* - * Request handler callback - */ -struct pm_dev; - -typedef int (*pm_callback)(struct pm_dev *dev, pm_request_t rqst, void *data); - -/* - * Dynamic device information - */ -struct pm_dev -{ - pm_dev_t type; - unsigned long id; - pm_callback callback; - void *data; - - unsigned long flags; - unsigned long state; - unsigned long prev_state; - - struct list_head entry; -}; - -/* Functions above this comment are list-based old-style power - * management. Please avoid using them. */ /* * Callbacks for platform drivers to implement. @@ -317,6 +245,21 @@ struct pm_ext_ops { * RECOVER Creation of a hibernation image or restoration of the main * memory contents from a hibernation image has failed, call * ->thaw() and ->complete() for all devices. + * + * The following PM_EVENT_ messages are defined for internal use by + * kernel subsystems. They are never issued by the PM core. + * + * USER_SUSPEND Manual selective suspend was issued by userspace. + * + * USER_RESUME Manual selective resume was issued by userspace. + * + * REMOTE_WAKEUP Remote-wakeup request was received from the device. + * + * AUTO_SUSPEND Automatic (device idle) runtime suspend was + * initiated by the subsystem. + * + * AUTO_RESUME Automatic (device needed) runtime resume was + * requested by a driver. */ #define PM_EVENT_ON 0x0000 @@ -328,9 +271,18 @@ struct pm_ext_ops { #define PM_EVENT_THAW 0x0020 #define PM_EVENT_RESTORE 0x0040 #define PM_EVENT_RECOVER 0x0080 +#define PM_EVENT_USER 0x0100 +#define PM_EVENT_REMOTE 0x0200 +#define PM_EVENT_AUTO 0x0400 -#define PM_EVENT_SLEEP (PM_EVENT_SUSPEND | PM_EVENT_HIBERNATE) +#define PM_EVENT_SLEEP (PM_EVENT_SUSPEND | PM_EVENT_HIBERNATE) +#define PM_EVENT_USER_SUSPEND (PM_EVENT_USER | PM_EVENT_SUSPEND) +#define PM_EVENT_USER_RESUME (PM_EVENT_USER | PM_EVENT_RESUME) +#define PM_EVENT_REMOTE_WAKEUP (PM_EVENT_REMOTE | PM_EVENT_RESUME) +#define PM_EVENT_AUTO_SUSPEND (PM_EVENT_AUTO | PM_EVENT_SUSPEND) +#define PM_EVENT_AUTO_RESUME (PM_EVENT_AUTO | PM_EVENT_RESUME) +#define PMSG_ON ((struct pm_message){ .event = PM_EVENT_ON, }) #define PMSG_FREEZE ((struct pm_message){ .event = PM_EVENT_FREEZE, }) #define PMSG_QUIESCE ((struct pm_message){ .event = PM_EVENT_QUIESCE, }) #define PMSG_SUSPEND ((struct pm_message){ .event = PM_EVENT_SUSPEND, }) @@ -339,7 +291,16 @@ struct pm_ext_ops { #define PMSG_THAW ((struct pm_message){ .event = PM_EVENT_THAW, }) #define PMSG_RESTORE ((struct pm_message){ .event = PM_EVENT_RESTORE, }) #define PMSG_RECOVER ((struct pm_message){ .event = PM_EVENT_RECOVER, }) -#define PMSG_ON ((struct pm_message){ .event = PM_EVENT_ON, }) +#define PMSG_USER_SUSPEND ((struct pm_messge) \ + { .event = PM_EVENT_USER_SUSPEND, }) +#define PMSG_USER_RESUME ((struct pm_messge) \ + { .event = PM_EVENT_USER_RESUME, }) +#define PMSG_REMOTE_RESUME ((struct pm_messge) \ + { .event = PM_EVENT_REMOTE_RESUME, }) +#define PMSG_AUTO_SUSPEND ((struct pm_messge) \ + { .event = PM_EVENT_AUTO_SUSPEND, }) +#define PMSG_AUTO_RESUME ((struct pm_messge) \ + { .event = PM_EVENT_AUTO_RESUME, }) /** * Device power management states diff --git a/include/linux/pm_legacy.h b/include/linux/pm_legacy.h deleted file mode 100644 index 446f4f42b95..00000000000 --- a/include/linux/pm_legacy.h +++ /dev/null @@ -1,35 +0,0 @@ -#ifndef __LINUX_PM_LEGACY_H__ -#define __LINUX_PM_LEGACY_H__ - - -#ifdef CONFIG_PM_LEGACY - -/* - * Register a device with power management - */ -struct pm_dev __deprecated * -pm_register(pm_dev_t type, unsigned long id, pm_callback callback); - -/* - * Send a request to all devices - */ -int __deprecated pm_send_all(pm_request_t rqst, void *data); - -#else /* CONFIG_PM_LEGACY */ - -static inline struct pm_dev *pm_register(pm_dev_t type, - unsigned long id, - pm_callback callback) -{ - return NULL; -} - -static inline int pm_send_all(pm_request_t rqst, void *data) -{ - return 0; -} - -#endif /* CONFIG_PM_LEGACY */ - -#endif /* __LINUX_PM_LEGACY_H__ */ - diff --git a/include/linux/proc_fs.h b/include/linux/proc_fs.h index 15a9eaf4a80..f560d1705af 100644 --- a/include/linux/proc_fs.h +++ b/include/linux/proc_fs.h @@ -79,6 +79,7 @@ struct proc_dir_entry { int pde_users; /* number of callers into module in progress */ spinlock_t pde_unload_lock; /* proc_fops checks and pde_users bumps */ struct completion *pde_unload_completion; + struct list_head pde_openers; /* who did ->open, but not ->release */ }; struct kcore_list { @@ -138,7 +139,6 @@ extern int proc_readdir(struct file *, void *, filldir_t); extern struct dentry *proc_lookup(struct inode *, struct dentry *, struct nameidata *); extern const struct file_operations proc_kcore_operations; -extern const struct file_operations proc_kmsg_operations; extern const struct file_operations ppc_htab_operations; extern int pid_ns_prepare_proc(struct pid_namespace *ns); diff --git a/include/linux/profile.h b/include/linux/profile.h index 05c1cc73693..7e7087239af 100644 --- a/include/linux/profile.h +++ b/include/linux/profile.h @@ -8,8 +8,6 @@ #include <asm/errno.h> -extern int prof_on __read_mostly; - #define CPU_PROFILING 1 #define SCHED_PROFILING 2 #define SLEEP_PROFILING 3 @@ -19,14 +17,31 @@ struct proc_dir_entry; struct pt_regs; struct notifier_block; +#if defined(CONFIG_PROFILING) && defined(CONFIG_PROC_FS) +void create_prof_cpu_mask(struct proc_dir_entry *de); +#else +static inline void create_prof_cpu_mask(struct proc_dir_entry *de) +{ +} +#endif + +enum profile_type { + PROFILE_TASK_EXIT, + PROFILE_MUNMAP +}; + +#ifdef CONFIG_PROFILING + +extern int prof_on __read_mostly; + /* init basic kernel profiler */ void __init profile_init(void); -void profile_tick(int); +void profile_tick(int type); /* * Add multiple profiler hits to a given address: */ -void profile_hits(int, void *ip, unsigned int nr_hits); +void profile_hits(int type, void *ip, unsigned int nr_hits); /* * Single profiler hit: @@ -40,19 +55,6 @@ static inline void profile_hit(int type, void *ip) profile_hits(type, ip, 1); } -#ifdef CONFIG_PROC_FS -void create_prof_cpu_mask(struct proc_dir_entry *); -#else -#define create_prof_cpu_mask(x) do { (void)(x); } while (0) -#endif - -enum profile_type { - PROFILE_TASK_EXIT, - PROFILE_MUNMAP -}; - -#ifdef CONFIG_PROFILING - struct task_struct; struct mm_struct; @@ -80,6 +82,28 @@ struct pt_regs; #else +#define prof_on 0 + +static inline void profile_init(void) +{ + return; +} + +static inline void profile_tick(int type) +{ + return; +} + +static inline void profile_hits(int type, void *ip, unsigned int nr_hits) +{ + return; +} + +static inline void profile_hit(int type, void *ip) +{ + return; +} + static inline int task_handoff_register(struct notifier_block * n) { return -ENOSYS; diff --git a/include/linux/ptrace.h b/include/linux/ptrace.h index c6f5f9dd0ce..fd31756e1a0 100644 --- a/include/linux/ptrace.h +++ b/include/linux/ptrace.h @@ -121,6 +121,74 @@ static inline void ptrace_unlink(struct task_struct *child) int generic_ptrace_peekdata(struct task_struct *tsk, long addr, long data); int generic_ptrace_pokedata(struct task_struct *tsk, long addr, long data); +/** + * task_ptrace - return %PT_* flags that apply to a task + * @task: pointer to &task_struct in question + * + * Returns the %PT_* flags that apply to @task. + */ +static inline int task_ptrace(struct task_struct *task) +{ + return task->ptrace; +} + +/** + * ptrace_event - possibly stop for a ptrace event notification + * @mask: %PT_* bit to check in @current->ptrace + * @event: %PTRACE_EVENT_* value to report if @mask is set + * @message: value for %PTRACE_GETEVENTMSG to return + * + * This checks the @mask bit to see if ptrace wants stops for this event. + * If so we stop, reporting @event and @message to the ptrace parent. + * + * Returns nonzero if we did a ptrace notification, zero if not. + * + * Called without locks. + */ +static inline int ptrace_event(int mask, int event, unsigned long message) +{ + if (mask && likely(!(current->ptrace & mask))) + return 0; + current->ptrace_message = message; + ptrace_notify((event << 8) | SIGTRAP); + return 1; +} + +/** + * ptrace_init_task - initialize ptrace state for a new child + * @child: new child task + * @ptrace: true if child should be ptrace'd by parent's tracer + * + * This is called immediately after adding @child to its parent's children + * list. @ptrace is false in the normal case, and true to ptrace @child. + * + * Called with current's siglock and write_lock_irq(&tasklist_lock) held. + */ +static inline void ptrace_init_task(struct task_struct *child, bool ptrace) +{ + INIT_LIST_HEAD(&child->ptrace_entry); + INIT_LIST_HEAD(&child->ptraced); + child->parent = child->real_parent; + child->ptrace = 0; + if (unlikely(ptrace)) { + child->ptrace = current->ptrace; + __ptrace_link(child, current->parent); + } +} + +/** + * ptrace_release_task - final ptrace-related cleanup of a zombie being reaped + * @task: task in %EXIT_DEAD state + * + * Called with write_lock(&tasklist_lock) held. + */ +static inline void ptrace_release_task(struct task_struct *task) +{ + BUG_ON(!list_empty(&task->ptraced)); + ptrace_unlink(task); + BUG_ON(!list_empty(&task->ptrace_entry)); +} + #ifndef force_successful_syscall_return /* * System call handlers that, upon successful completion, need to return a @@ -246,6 +314,10 @@ static inline void user_enable_block_step(struct task_struct *task) #define arch_ptrace_stop(code, info) do { } while (0) #endif +extern int task_current_syscall(struct task_struct *target, long *callno, + unsigned long args[6], unsigned int maxargs, + unsigned long *sp, unsigned long *pc); + #endif #endif diff --git a/include/linux/quota.h b/include/linux/quota.h index dcddfb20094..376a05048bc 100644 --- a/include/linux/quota.h +++ b/include/linux/quota.h @@ -41,9 +41,6 @@ #define __DQUOT_VERSION__ "dquot_6.5.1" #define __DQUOT_NUM_VERSION__ 6*10000+5*100+1 -typedef __kernel_uid32_t qid_t; /* Type in which we store ids in memory */ -typedef __u64 qsize_t; /* Type in which we store sizes */ - /* Size of blocks in which are counted size limits */ #define QUOTABLOCK_BITS 10 #define QUOTABLOCK_SIZE (1 << QUOTABLOCK_BITS) @@ -138,6 +135,10 @@ struct if_dqinfo { #define QUOTA_NL_BHARDWARN 4 /* Block hardlimit reached */ #define QUOTA_NL_BSOFTLONGWARN 5 /* Block grace time expired */ #define QUOTA_NL_BSOFTWARN 6 /* Block softlimit reached */ +#define QUOTA_NL_IHARDBELOW 7 /* Usage got below inode hardlimit */ +#define QUOTA_NL_ISOFTBELOW 8 /* Usage got below inode softlimit */ +#define QUOTA_NL_BHARDBELOW 9 /* Usage got below block hardlimit */ +#define QUOTA_NL_BSOFTBELOW 10 /* Usage got below block softlimit */ enum { QUOTA_NL_C_UNSPEC, @@ -172,6 +173,9 @@ enum { #include <asm/atomic.h> +typedef __kernel_uid32_t qid_t; /* Type in which we store ids in memory */ +typedef __u64 qsize_t; /* Type in which we store sizes */ + extern spinlock_t dq_data_lock; /* Maximal numbers of writes for quota operation (insert/delete/update) @@ -223,12 +227,10 @@ struct super_block; #define DQF_INFO_DIRTY (1 << DQF_INFO_DIRTY_B) /* Is info dirty? */ extern void mark_info_dirty(struct super_block *sb, int type); -#define info_dirty(info) test_bit(DQF_INFO_DIRTY_B, &(info)->dqi_flags) -#define info_any_dquot_dirty(info) (!list_empty(&(info)->dqi_dirty_list)) -#define info_any_dirty(info) (info_dirty(info) || info_any_dquot_dirty(info)) - -#define sb_dqopt(sb) (&(sb)->s_dquot) -#define sb_dqinfo(sb, type) (sb_dqopt(sb)->info+(type)) +static inline int info_dirty(struct mem_dqinfo *info) +{ + return test_bit(DQF_INFO_DIRTY_B, &info->dqi_flags); +} struct dqstats { int lookups; @@ -337,19 +339,6 @@ struct quota_info { struct quota_format_ops *ops[MAXQUOTAS]; /* Operations for each type */ }; -#define sb_has_quota_enabled(sb, type) ((type)==USRQUOTA ? \ - (sb_dqopt(sb)->flags & DQUOT_USR_ENABLED) : (sb_dqopt(sb)->flags & DQUOT_GRP_ENABLED)) - -#define sb_any_quota_enabled(sb) (sb_has_quota_enabled(sb, USRQUOTA) | \ - sb_has_quota_enabled(sb, GRPQUOTA)) - -#define sb_has_quota_suspended(sb, type) \ - ((type) == USRQUOTA ? (sb_dqopt(sb)->flags & DQUOT_USR_SUSPENDED) : \ - (sb_dqopt(sb)->flags & DQUOT_GRP_SUSPENDED)) - -#define sb_any_quota_suspended(sb) (sb_has_quota_suspended(sb, USRQUOTA) | \ - sb_has_quota_suspended(sb, GRPQUOTA)) - int register_quota_format(struct quota_format_type *fmt); void unregister_quota_format(struct quota_format_type *fmt); diff --git a/include/linux/quotaops.h b/include/linux/quotaops.h index f8670205385..742187f7a05 100644 --- a/include/linux/quotaops.h +++ b/include/linux/quotaops.h @@ -11,42 +11,85 @@ #define _LINUX_QUOTAOPS_ #include <linux/smp_lock.h> - #include <linux/fs.h> +static inline struct quota_info *sb_dqopt(struct super_block *sb) +{ + return &sb->s_dquot; +} + #if defined(CONFIG_QUOTA) /* * declaration of quota_function calls in kernel. */ -extern void sync_dquots(struct super_block *sb, int type); - -extern int dquot_initialize(struct inode *inode, int type); -extern int dquot_drop(struct inode *inode); - -extern int dquot_alloc_space(struct inode *inode, qsize_t number, int prealloc); -extern int dquot_alloc_inode(const struct inode *inode, unsigned long number); - -extern int dquot_free_space(struct inode *inode, qsize_t number); -extern int dquot_free_inode(const struct inode *inode, unsigned long number); - -extern int dquot_transfer(struct inode *inode, struct iattr *iattr); -extern int dquot_commit(struct dquot *dquot); -extern int dquot_acquire(struct dquot *dquot); -extern int dquot_release(struct dquot *dquot); -extern int dquot_commit_info(struct super_block *sb, int type); -extern int dquot_mark_dquot_dirty(struct dquot *dquot); - -extern int vfs_quota_on(struct super_block *sb, int type, int format_id, - char *path, int remount); -extern int vfs_quota_on_mount(struct super_block *sb, char *qf_name, - int format_id, int type); -extern int vfs_quota_off(struct super_block *sb, int type, int remount); -extern int vfs_quota_sync(struct super_block *sb, int type); -extern int vfs_get_dqinfo(struct super_block *sb, int type, struct if_dqinfo *ii); -extern int vfs_set_dqinfo(struct super_block *sb, int type, struct if_dqinfo *ii); -extern int vfs_get_dqblk(struct super_block *sb, int type, qid_t id, struct if_dqblk *di); -extern int vfs_set_dqblk(struct super_block *sb, int type, qid_t id, struct if_dqblk *di); +void sync_dquots(struct super_block *sb, int type); + +int dquot_initialize(struct inode *inode, int type); +int dquot_drop(struct inode *inode); + +int dquot_alloc_space(struct inode *inode, qsize_t number, int prealloc); +int dquot_alloc_inode(const struct inode *inode, unsigned long number); + +int dquot_free_space(struct inode *inode, qsize_t number); +int dquot_free_inode(const struct inode *inode, unsigned long number); + +int dquot_transfer(struct inode *inode, struct iattr *iattr); +int dquot_commit(struct dquot *dquot); +int dquot_acquire(struct dquot *dquot); +int dquot_release(struct dquot *dquot); +int dquot_commit_info(struct super_block *sb, int type); +int dquot_mark_dquot_dirty(struct dquot *dquot); + +int vfs_quota_on(struct super_block *sb, int type, int format_id, + char *path, int remount); +int vfs_quota_on_mount(struct super_block *sb, char *qf_name, + int format_id, int type); +int vfs_quota_off(struct super_block *sb, int type, int remount); +int vfs_quota_sync(struct super_block *sb, int type); +int vfs_get_dqinfo(struct super_block *sb, int type, struct if_dqinfo *ii); +int vfs_set_dqinfo(struct super_block *sb, int type, struct if_dqinfo *ii); +int vfs_get_dqblk(struct super_block *sb, int type, qid_t id, struct if_dqblk *di); +int vfs_set_dqblk(struct super_block *sb, int type, qid_t id, struct if_dqblk *di); + +void vfs_dq_drop(struct inode *inode); +int vfs_dq_transfer(struct inode *inode, struct iattr *iattr); +int vfs_dq_quota_on_remount(struct super_block *sb); + +static inline struct mem_dqinfo *sb_dqinfo(struct super_block *sb, int type) +{ + return sb_dqopt(sb)->info + type; +} + +/* + * Functions for checking status of quota + */ + +static inline int sb_has_quota_enabled(struct super_block *sb, int type) +{ + if (type == USRQUOTA) + return sb_dqopt(sb)->flags & DQUOT_USR_ENABLED; + return sb_dqopt(sb)->flags & DQUOT_GRP_ENABLED; +} + +static inline int sb_any_quota_enabled(struct super_block *sb) +{ + return sb_has_quota_enabled(sb, USRQUOTA) || + sb_has_quota_enabled(sb, GRPQUOTA); +} + +static inline int sb_has_quota_suspended(struct super_block *sb, int type) +{ + if (type == USRQUOTA) + return sb_dqopt(sb)->flags & DQUOT_USR_SUSPENDED; + return sb_dqopt(sb)->flags & DQUOT_GRP_SUSPENDED; +} + +static inline int sb_any_quota_suspended(struct super_block *sb) +{ + return sb_has_quota_suspended(sb, USRQUOTA) || + sb_has_quota_suspended(sb, GRPQUOTA); +} /* * Operations supported for diskquotas. @@ -59,38 +102,16 @@ extern struct quotactl_ops vfs_quotactl_ops; /* It is better to call this function outside of any transaction as it might * need a lot of space in journal for dquot structure allocation. */ -static inline void DQUOT_INIT(struct inode *inode) +static inline void vfs_dq_init(struct inode *inode) { BUG_ON(!inode->i_sb); if (sb_any_quota_enabled(inode->i_sb) && !IS_NOQUOTA(inode)) inode->i_sb->dq_op->initialize(inode, -1); } -/* The same as with DQUOT_INIT */ -static inline void DQUOT_DROP(struct inode *inode) -{ - /* Here we can get arbitrary inode from clear_inode() so we have - * to be careful. OTOH we don't need locking as quota operations - * are allowed to change only at mount time */ - if (!IS_NOQUOTA(inode) && inode->i_sb && inode->i_sb->dq_op - && inode->i_sb->dq_op->drop) { - int cnt; - /* Test before calling to rule out calls from proc and such - * where we are not allowed to block. Note that this is - * actually reliable test even without the lock - the caller - * must assure that nobody can come after the DQUOT_DROP and - * add quota pointers back anyway */ - for (cnt = 0; cnt < MAXQUOTAS; cnt++) - if (inode->i_dquot[cnt] != NODQUOT) - break; - if (cnt < MAXQUOTAS) - inode->i_sb->dq_op->drop(inode); - } -} - /* The following allocation/freeing/transfer functions *must* be called inside * a transaction (deadlocks possible otherwise) */ -static inline int DQUOT_PREALLOC_SPACE_NODIRTY(struct inode *inode, qsize_t nr) +static inline int vfs_dq_prealloc_space_nodirty(struct inode *inode, qsize_t nr) { if (sb_any_quota_enabled(inode->i_sb)) { /* Used space is updated in alloc_space() */ @@ -102,15 +123,15 @@ static inline int DQUOT_PREALLOC_SPACE_NODIRTY(struct inode *inode, qsize_t nr) return 0; } -static inline int DQUOT_PREALLOC_SPACE(struct inode *inode, qsize_t nr) +static inline int vfs_dq_prealloc_space(struct inode *inode, qsize_t nr) { int ret; - if (!(ret = DQUOT_PREALLOC_SPACE_NODIRTY(inode, nr))) + if (!(ret = vfs_dq_prealloc_space_nodirty(inode, nr))) mark_inode_dirty(inode); return ret; } -static inline int DQUOT_ALLOC_SPACE_NODIRTY(struct inode *inode, qsize_t nr) +static inline int vfs_dq_alloc_space_nodirty(struct inode *inode, qsize_t nr) { if (sb_any_quota_enabled(inode->i_sb)) { /* Used space is updated in alloc_space() */ @@ -122,25 +143,25 @@ static inline int DQUOT_ALLOC_SPACE_NODIRTY(struct inode *inode, qsize_t nr) return 0; } -static inline int DQUOT_ALLOC_SPACE(struct inode *inode, qsize_t nr) +static inline int vfs_dq_alloc_space(struct inode *inode, qsize_t nr) { int ret; - if (!(ret = DQUOT_ALLOC_SPACE_NODIRTY(inode, nr))) + if (!(ret = vfs_dq_alloc_space_nodirty(inode, nr))) mark_inode_dirty(inode); return ret; } -static inline int DQUOT_ALLOC_INODE(struct inode *inode) +static inline int vfs_dq_alloc_inode(struct inode *inode) { if (sb_any_quota_enabled(inode->i_sb)) { - DQUOT_INIT(inode); + vfs_dq_init(inode); if (inode->i_sb->dq_op->alloc_inode(inode, 1) == NO_QUOTA) return 1; } return 0; } -static inline void DQUOT_FREE_SPACE_NODIRTY(struct inode *inode, qsize_t nr) +static inline void vfs_dq_free_space_nodirty(struct inode *inode, qsize_t nr) { if (sb_any_quota_enabled(inode->i_sb)) inode->i_sb->dq_op->free_space(inode, nr); @@ -148,35 +169,25 @@ static inline void DQUOT_FREE_SPACE_NODIRTY(struct inode *inode, qsize_t nr) inode_sub_bytes(inode, nr); } -static inline void DQUOT_FREE_SPACE(struct inode *inode, qsize_t nr) +static inline void vfs_dq_free_space(struct inode *inode, qsize_t nr) { - DQUOT_FREE_SPACE_NODIRTY(inode, nr); + vfs_dq_free_space_nodirty(inode, nr); mark_inode_dirty(inode); } -static inline void DQUOT_FREE_INODE(struct inode *inode) +static inline void vfs_dq_free_inode(struct inode *inode) { if (sb_any_quota_enabled(inode->i_sb)) inode->i_sb->dq_op->free_inode(inode, 1); } -static inline int DQUOT_TRANSFER(struct inode *inode, struct iattr *iattr) -{ - if (sb_any_quota_enabled(inode->i_sb) && !IS_NOQUOTA(inode)) { - DQUOT_INIT(inode); - if (inode->i_sb->dq_op->transfer(inode, iattr) == NO_QUOTA) - return 1; - } - return 0; -} - /* The following two functions cannot be called inside a transaction */ -static inline void DQUOT_SYNC(struct super_block *sb) +static inline void vfs_dq_sync(struct super_block *sb) { sync_dquots(sb, -1); } -static inline int DQUOT_OFF(struct super_block *sb, int remount) +static inline int vfs_dq_off(struct super_block *sb, int remount) { int ret = -ENOSYS; @@ -185,22 +196,27 @@ static inline int DQUOT_OFF(struct super_block *sb, int remount) return ret; } -static inline int DQUOT_ON_REMOUNT(struct super_block *sb) +#else + +static inline int sb_has_quota_enabled(struct super_block *sb, int type) { - int cnt; - int ret = 0, err; + return 0; +} - if (!sb->s_qcop || !sb->s_qcop->quota_on) - return -ENOSYS; - for (cnt = 0; cnt < MAXQUOTAS; cnt++) { - err = sb->s_qcop->quota_on(sb, cnt, 0, NULL, 1); - if (err < 0 && !ret) - ret = err; - } - return ret; +static inline int sb_any_quota_enabled(struct super_block *sb) +{ + return 0; } -#else +static inline int sb_has_quota_suspended(struct super_block *sb, int type) +{ + return 0; +} + +static inline int sb_any_quota_suspended(struct super_block *sb) +{ + return 0; +} /* * NO-OP when quota not configured. @@ -208,113 +224,144 @@ static inline int DQUOT_ON_REMOUNT(struct super_block *sb) #define sb_dquot_ops (NULL) #define sb_quotactl_ops (NULL) -static inline void DQUOT_INIT(struct inode *inode) +static inline void vfs_dq_init(struct inode *inode) { } -static inline void DQUOT_DROP(struct inode *inode) +static inline void vfs_dq_drop(struct inode *inode) { } -static inline int DQUOT_ALLOC_INODE(struct inode *inode) +static inline int vfs_dq_alloc_inode(struct inode *inode) { return 0; } -static inline void DQUOT_FREE_INODE(struct inode *inode) +static inline void vfs_dq_free_inode(struct inode *inode) { } -static inline void DQUOT_SYNC(struct super_block *sb) +static inline void vfs_dq_sync(struct super_block *sb) { } -static inline int DQUOT_OFF(struct super_block *sb, int remount) +static inline int vfs_dq_off(struct super_block *sb, int remount) { return 0; } -static inline int DQUOT_ON_REMOUNT(struct super_block *sb) +static inline int vfs_dq_quota_on_remount(struct super_block *sb) { return 0; } -static inline int DQUOT_TRANSFER(struct inode *inode, struct iattr *iattr) +static inline int vfs_dq_transfer(struct inode *inode, struct iattr *iattr) { return 0; } -static inline int DQUOT_PREALLOC_SPACE_NODIRTY(struct inode *inode, qsize_t nr) +static inline int vfs_dq_prealloc_space_nodirty(struct inode *inode, qsize_t nr) { inode_add_bytes(inode, nr); return 0; } -static inline int DQUOT_PREALLOC_SPACE(struct inode *inode, qsize_t nr) +static inline int vfs_dq_prealloc_space(struct inode *inode, qsize_t nr) { - DQUOT_PREALLOC_SPACE_NODIRTY(inode, nr); + vfs_dq_prealloc_space_nodirty(inode, nr); mark_inode_dirty(inode); return 0; } -static inline int DQUOT_ALLOC_SPACE_NODIRTY(struct inode *inode, qsize_t nr) +static inline int vfs_dq_alloc_space_nodirty(struct inode *inode, qsize_t nr) { inode_add_bytes(inode, nr); return 0; } -static inline int DQUOT_ALLOC_SPACE(struct inode *inode, qsize_t nr) +static inline int vfs_dq_alloc_space(struct inode *inode, qsize_t nr) { - DQUOT_ALLOC_SPACE_NODIRTY(inode, nr); + vfs_dq_alloc_space_nodirty(inode, nr); mark_inode_dirty(inode); return 0; } -static inline void DQUOT_FREE_SPACE_NODIRTY(struct inode *inode, qsize_t nr) +static inline void vfs_dq_free_space_nodirty(struct inode *inode, qsize_t nr) { inode_sub_bytes(inode, nr); } -static inline void DQUOT_FREE_SPACE(struct inode *inode, qsize_t nr) +static inline void vfs_dq_free_space(struct inode *inode, qsize_t nr) { - DQUOT_FREE_SPACE_NODIRTY(inode, nr); + vfs_dq_free_space_nodirty(inode, nr); mark_inode_dirty(inode); } #endif /* CONFIG_QUOTA */ -static inline int DQUOT_PREALLOC_BLOCK_NODIRTY(struct inode *inode, qsize_t nr) +static inline int vfs_dq_prealloc_block_nodirty(struct inode *inode, qsize_t nr) { - return DQUOT_PREALLOC_SPACE_NODIRTY(inode, + return vfs_dq_prealloc_space_nodirty(inode, nr << inode->i_sb->s_blocksize_bits); } -static inline int DQUOT_PREALLOC_BLOCK(struct inode *inode, qsize_t nr) +static inline int vfs_dq_prealloc_block(struct inode *inode, qsize_t nr) { - return DQUOT_PREALLOC_SPACE(inode, + return vfs_dq_prealloc_space(inode, nr << inode->i_sb->s_blocksize_bits); } -static inline int DQUOT_ALLOC_BLOCK_NODIRTY(struct inode *inode, qsize_t nr) +static inline int vfs_dq_alloc_block_nodirty(struct inode *inode, qsize_t nr) { - return DQUOT_ALLOC_SPACE_NODIRTY(inode, + return vfs_dq_alloc_space_nodirty(inode, nr << inode->i_sb->s_blocksize_bits); } -static inline int DQUOT_ALLOC_BLOCK(struct inode *inode, qsize_t nr) +static inline int vfs_dq_alloc_block(struct inode *inode, qsize_t nr) { - return DQUOT_ALLOC_SPACE(inode, + return vfs_dq_alloc_space(inode, nr << inode->i_sb->s_blocksize_bits); } -static inline void DQUOT_FREE_BLOCK_NODIRTY(struct inode *inode, qsize_t nr) +static inline void vfs_dq_free_block_nodirty(struct inode *inode, qsize_t nr) { - DQUOT_FREE_SPACE_NODIRTY(inode, nr << inode->i_sb->s_blocksize_bits); + vfs_dq_free_space_nodirty(inode, nr << inode->i_sb->s_blocksize_bits); } -static inline void DQUOT_FREE_BLOCK(struct inode *inode, qsize_t nr) +static inline void vfs_dq_free_block(struct inode *inode, qsize_t nr) { - DQUOT_FREE_SPACE(inode, nr << inode->i_sb->s_blocksize_bits); + vfs_dq_free_space(inode, nr << inode->i_sb->s_blocksize_bits); } +/* + * Define uppercase equivalents for compatibility with old function names + * Can go away when we think all users have been converted (15/04/2008) + */ +#define DQUOT_INIT(inode) vfs_dq_init(inode) +#define DQUOT_DROP(inode) vfs_dq_drop(inode) +#define DQUOT_PREALLOC_SPACE_NODIRTY(inode, nr) \ + vfs_dq_prealloc_space_nodirty(inode, nr) +#define DQUOT_PREALLOC_SPACE(inode, nr) vfs_dq_prealloc_space(inode, nr) +#define DQUOT_ALLOC_SPACE_NODIRTY(inode, nr) \ + vfs_dq_alloc_space_nodirty(inode, nr) +#define DQUOT_ALLOC_SPACE(inode, nr) vfs_dq_alloc_space(inode, nr) +#define DQUOT_PREALLOC_BLOCK_NODIRTY(inode, nr) \ + vfs_dq_prealloc_block_nodirty(inode, nr) +#define DQUOT_PREALLOC_BLOCK(inode, nr) vfs_dq_prealloc_block(inode, nr) +#define DQUOT_ALLOC_BLOCK_NODIRTY(inode, nr) \ + vfs_dq_alloc_block_nodirty(inode, nr) +#define DQUOT_ALLOC_BLOCK(inode, nr) vfs_dq_alloc_block(inode, nr) +#define DQUOT_ALLOC_INODE(inode) vfs_dq_alloc_inode(inode) +#define DQUOT_FREE_SPACE_NODIRTY(inode, nr) \ + vfs_dq_free_space_nodirty(inode, nr) +#define DQUOT_FREE_SPACE(inode, nr) vfs_dq_free_space(inode, nr) +#define DQUOT_FREE_BLOCK_NODIRTY(inode, nr) \ + vfs_dq_free_block_nodirty(inode, nr) +#define DQUOT_FREE_BLOCK(inode, nr) vfs_dq_free_block(inode, nr) +#define DQUOT_FREE_INODE(inode) vfs_dq_free_inode(inode) +#define DQUOT_TRANSFER(inode, iattr) vfs_dq_transfer(inode, iattr) +#define DQUOT_SYNC(sb) vfs_dq_sync(sb) +#define DQUOT_OFF(sb, remount) vfs_dq_off(sb, remount) +#define DQUOT_ON_REMOUNT(sb) vfs_dq_quota_on_remount(sb) + #endif /* _LINUX_QUOTAOPS_ */ diff --git a/include/linux/radix-tree.h b/include/linux/radix-tree.h index b8ce2b444bb..a916c6660df 100644 --- a/include/linux/radix-tree.h +++ b/include/linux/radix-tree.h @@ -99,12 +99,15 @@ do { \ * * The notable exceptions to this rule are the following functions: * radix_tree_lookup + * radix_tree_lookup_slot * radix_tree_tag_get * radix_tree_gang_lookup + * radix_tree_gang_lookup_slot * radix_tree_gang_lookup_tag + * radix_tree_gang_lookup_tag_slot * radix_tree_tagged * - * The first 4 functions are able to be called locklessly, using RCU. The + * The first 7 functions are able to be called locklessly, using RCU. The * caller must ensure calls to these functions are made within rcu_read_lock() * regions. Other readers (lock-free or otherwise) and modifications may be * running concurrently. @@ -159,6 +162,9 @@ void *radix_tree_delete(struct radix_tree_root *, unsigned long); unsigned int radix_tree_gang_lookup(struct radix_tree_root *root, void **results, unsigned long first_index, unsigned int max_items); +unsigned int +radix_tree_gang_lookup_slot(struct radix_tree_root *root, void ***results, + unsigned long first_index, unsigned int max_items); unsigned long radix_tree_next_hole(struct radix_tree_root *root, unsigned long index, unsigned long max_scan); int radix_tree_preload(gfp_t gfp_mask); @@ -173,6 +179,10 @@ unsigned int radix_tree_gang_lookup_tag(struct radix_tree_root *root, void **results, unsigned long first_index, unsigned int max_items, unsigned int tag); +unsigned int +radix_tree_gang_lookup_tag_slot(struct radix_tree_root *root, void ***results, + unsigned long first_index, unsigned int max_items, + unsigned int tag); int radix_tree_tagged(struct radix_tree_root *root, unsigned int tag); static inline void radix_tree_preload_end(void) diff --git a/include/linux/ratelimit.h b/include/linux/ratelimit.h new file mode 100644 index 00000000000..18a5b9ba9d4 --- /dev/null +++ b/include/linux/ratelimit.h @@ -0,0 +1,27 @@ +#ifndef _LINUX_RATELIMIT_H +#define _LINUX_RATELIMIT_H +#include <linux/param.h> + +#define DEFAULT_RATELIMIT_INTERVAL (5 * HZ) +#define DEFAULT_RATELIMIT_BURST 10 + +struct ratelimit_state { + int interval; + int burst; + int printed; + int missed; + unsigned long begin; +}; + +#define DEFINE_RATELIMIT_STATE(name, interval, burst) \ + struct ratelimit_state name = {interval, burst,} + +extern int __ratelimit(struct ratelimit_state *rs); + +static inline int ratelimit(void) +{ + static DEFINE_RATELIMIT_STATE(rs, DEFAULT_RATELIMIT_INTERVAL, + DEFAULT_RATELIMIT_BURST); + return __ratelimit(&rs); +} +#endif diff --git a/include/linux/rcupreempt.h b/include/linux/rcupreempt.h index f04b64eca63..0967f03b070 100644 --- a/include/linux/rcupreempt.h +++ b/include/linux/rcupreempt.h @@ -115,16 +115,21 @@ DECLARE_PER_CPU(struct rcu_dyntick_sched, rcu_dyntick_sched); static inline void rcu_enter_nohz(void) { + static DEFINE_RATELIMIT_STATE(rs, 10 * HZ, 1); + smp_mb(); /* CPUs seeing ++ must see prior RCU read-side crit sects */ __get_cpu_var(rcu_dyntick_sched).dynticks++; - WARN_ON(__get_cpu_var(rcu_dyntick_sched).dynticks & 0x1); + WARN_ON_RATELIMIT(__get_cpu_var(rcu_dyntick_sched).dynticks & 0x1, &rs); } static inline void rcu_exit_nohz(void) { + static DEFINE_RATELIMIT_STATE(rs, 10 * HZ, 1); + smp_mb(); /* CPUs seeing ++ must see later RCU read-side crit sects */ __get_cpu_var(rcu_dyntick_sched).dynticks++; - WARN_ON(!(__get_cpu_var(rcu_dyntick_sched).dynticks & 0x1)); + WARN_ON_RATELIMIT(!(__get_cpu_var(rcu_dyntick_sched).dynticks & 0x1), + &rs); } #else /* CONFIG_NO_HZ */ diff --git a/include/linux/reiserfs_fs.h b/include/linux/reiserfs_fs.h index 4aacaeecb56..e9963af16cd 100644 --- a/include/linux/reiserfs_fs.h +++ b/include/linux/reiserfs_fs.h @@ -526,8 +526,8 @@ struct item_head { ** p is the array of __u32, i is the index into the array, v is the value ** to store there. */ -#define get_block_num(p, i) le32_to_cpu(get_unaligned((p) + (i))) -#define put_block_num(p, i, v) put_unaligned(cpu_to_le32(v), (p) + (i)) +#define get_block_num(p, i) get_unaligned_le32((p) + (i)) +#define put_block_num(p, i, v) put_unaligned_le32((v), (p) + (i)) // // in old version uniqueness field shows key type diff --git a/include/linux/reiserfs_fs_sb.h b/include/linux/reiserfs_fs_sb.h index 336ee43ed7d..315517e8bfa 100644 --- a/include/linux/reiserfs_fs_sb.h +++ b/include/linux/reiserfs_fs_sb.h @@ -152,7 +152,7 @@ struct reiserfs_journal_list { atomic_t j_nonzerolen; atomic_t j_commit_left; atomic_t j_older_commits_done; /* all commits older than this on disk */ - struct semaphore j_commit_lock; + struct mutex j_commit_mutex; unsigned long j_trans_id; time_t j_timestamp; struct reiserfs_list_bitmap *j_list_bitmap; @@ -193,8 +193,8 @@ struct reiserfs_journal { struct buffer_head *j_header_bh; time_t j_trans_start_time; /* time this transaction started */ - struct semaphore j_lock; - struct semaphore j_flush_sem; + struct mutex j_mutex; + struct mutex j_flush_mutex; wait_queue_head_t j_join_wait; /* wait for current transaction to finish before starting new one */ atomic_t j_jlock; /* lock for j_join_wait */ int j_list_bitmap_index; /* number of next list bitmap to use */ diff --git a/include/linux/relay.h b/include/linux/relay.h index 6cd8c4425fc..953fc055e87 100644 --- a/include/linux/relay.h +++ b/include/linux/relay.h @@ -48,6 +48,7 @@ struct rchan_buf size_t *padding; /* padding counts per sub-buffer */ size_t prev_padding; /* temporary variable */ size_t bytes_consumed; /* bytes consumed in cur read subbuf */ + size_t early_bytes; /* bytes consumed before VFS inited */ unsigned int cpu; /* this buf's cpu */ } ____cacheline_aligned; @@ -68,6 +69,7 @@ struct rchan int is_global; /* One global buffer ? */ struct list_head list; /* for channel list */ struct dentry *parent; /* parent dentry passed to open */ + int has_base_filename; /* has a filename associated? */ char base_filename[NAME_MAX]; /* saved base filename */ }; @@ -169,6 +171,9 @@ struct rchan *relay_open(const char *base_filename, size_t n_subbufs, struct rchan_callbacks *cb, void *private_data); +extern int relay_late_setup_files(struct rchan *chan, + const char *base_filename, + struct dentry *parent); extern void relay_close(struct rchan *chan); extern void relay_flush(struct rchan *chan); extern void relay_subbufs_consumed(struct rchan *chan, diff --git a/include/linux/res_counter.h b/include/linux/res_counter.h index 6d9e1fca098..fdeadd9740d 100644 --- a/include/linux/res_counter.h +++ b/include/linux/res_counter.h @@ -63,9 +63,14 @@ u64 res_counter_read_u64(struct res_counter *counter, int member); ssize_t res_counter_read(struct res_counter *counter, int member, const char __user *buf, size_t nbytes, loff_t *pos, int (*read_strategy)(unsigned long long val, char *s)); -ssize_t res_counter_write(struct res_counter *counter, int member, - const char __user *buf, size_t nbytes, loff_t *pos, - int (*write_strategy)(char *buf, unsigned long long *val)); + +typedef int (*write_strategy_fn)(const char *buf, unsigned long long *val); + +int res_counter_memparse_write_strategy(const char *buf, + unsigned long long *res); + +int res_counter_write(struct res_counter *counter, int member, + const char *buffer, write_strategy_fn write_strategy); /* * the field descriptors. one for each member of res_counter @@ -95,8 +100,10 @@ void res_counter_init(struct res_counter *counter); * counter->limit _locked call expects the counter->lock to be taken */ -int res_counter_charge_locked(struct res_counter *counter, unsigned long val); -int res_counter_charge(struct res_counter *counter, unsigned long val); +int __must_check res_counter_charge_locked(struct res_counter *counter, + unsigned long val); +int __must_check res_counter_charge(struct res_counter *counter, + unsigned long val); /* * uncharge - tell that some portion of the resource is released @@ -151,4 +158,20 @@ static inline void res_counter_reset_failcnt(struct res_counter *cnt) cnt->failcnt = 0; spin_unlock_irqrestore(&cnt->lock, flags); } + +static inline int res_counter_set_limit(struct res_counter *cnt, + unsigned long long limit) +{ + unsigned long flags; + int ret = -EBUSY; + + spin_lock_irqsave(&cnt->lock, flags); + if (cnt->usage < limit) { + cnt->limit = limit; + ret = 0; + } + spin_unlock_irqrestore(&cnt->lock, flags); + return ret; +} + #endif diff --git a/include/linux/rtc.h b/include/linux/rtc.h index f2d0d152772..91f597ad6ac 100644 --- a/include/linux/rtc.h +++ b/include/linux/rtc.h @@ -115,6 +115,23 @@ extern void rtc_time_to_tm(unsigned long time, struct rtc_time *tm); extern struct class *rtc_class; +/* + * For these RTC methods the device parameter is the physical device + * on whatever bus holds the hardware (I2C, Platform, SPI, etc), which + * was passed to rtc_device_register(). Its driver_data normally holds + * device state, including the rtc_device pointer for the RTC. + * + * Most of these methods are called with rtc_device.ops_lock held, + * through the rtc_*(struct rtc_device *, ...) calls. + * + * The (current) exceptions are mostly filesystem hooks: + * - the proc() hook for procfs + * - non-ioctl() chardev hooks: open(), release(), read_callback() + * - periodic irq calls: irq_set_state(), irq_set_freq() + * + * REVISIT those periodic irq calls *do* have ops_lock when they're + * issued through ioctl() ... + */ struct rtc_class_ops { int (*open)(struct device *); void (*release)(struct device *); @@ -208,8 +225,6 @@ typedef struct rtc_task { int rtc_register(rtc_task_t *task); int rtc_unregister(rtc_task_t *task); int rtc_control(rtc_task_t *t, unsigned int cmd, unsigned long arg); -void rtc_get_rtc_time(struct rtc_time *rtc_tm); -irqreturn_t rtc_interrupt(int irq, void *dev_id); #endif /* __KERNEL__ */ diff --git a/include/linux/scatterlist.h b/include/linux/scatterlist.h index 71fc8136004..e5996984ddd 100644 --- a/include/linux/scatterlist.h +++ b/include/linux/scatterlist.h @@ -224,4 +224,42 @@ size_t sg_copy_to_buffer(struct scatterlist *sgl, unsigned int nents, */ #define SG_MAX_SINGLE_ALLOC (PAGE_SIZE / sizeof(struct scatterlist)) + +/* + * Mapping sg iterator + * + * Iterates over sg entries mapping page-by-page. On each successful + * iteration, @miter->page points to the mapped page and + * @miter->length bytes of data can be accessed at @miter->addr. As + * long as an interation is enclosed between start and stop, the user + * is free to choose control structure and when to stop. + * + * @miter->consumed is set to @miter->length on each iteration. It + * can be adjusted if the user can't consume all the bytes in one go. + * Also, a stopped iteration can be resumed by calling next on it. + * This is useful when iteration needs to release all resources and + * continue later (e.g. at the next interrupt). + */ + +#define SG_MITER_ATOMIC (1 << 0) /* use kmap_atomic */ + +struct sg_mapping_iter { + /* the following three fields can be accessed directly */ + struct page *page; /* currently mapped page */ + void *addr; /* pointer to the mapped area */ + size_t length; /* length of the mapped area */ + size_t consumed; /* number of consumed bytes */ + + /* these are internal states, keep away */ + struct scatterlist *__sg; /* current entry */ + unsigned int __nents; /* nr of remaining entries */ + unsigned int __offset; /* offset within sg */ + unsigned int __flags; +}; + +void sg_miter_start(struct sg_mapping_iter *miter, struct scatterlist *sgl, + unsigned int nents, unsigned int flags); +bool sg_miter_next(struct sg_mapping_iter *miter); +void sg_miter_stop(struct sg_mapping_iter *miter); + #endif /* _LINUX_SCATTERLIST_H */ diff --git a/include/linux/sched.h b/include/linux/sched.h index 1941d8b5cf1..f59318a0099 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -292,13 +292,13 @@ extern void sched_show_task(struct task_struct *p); #ifdef CONFIG_DETECT_SOFTLOCKUP extern void softlockup_tick(void); -extern void spawn_softlockup_task(void); extern void touch_softlockup_watchdog(void); extern void touch_all_softlockup_watchdogs(void); -extern unsigned long softlockup_thresh; +extern unsigned int softlockup_panic; extern unsigned long sysctl_hung_task_check_count; extern unsigned long sysctl_hung_task_timeout_secs; extern unsigned long sysctl_hung_task_warnings; +extern int softlockup_thresh; #else static inline void softlockup_tick(void) { @@ -505,6 +505,10 @@ struct signal_struct { unsigned long nvcsw, nivcsw, cnvcsw, cnivcsw; unsigned long min_flt, maj_flt, cmin_flt, cmaj_flt; unsigned long inblock, oublock, cinblock, coublock; +#ifdef CONFIG_TASK_XACCT + u64 rchar, wchar, syscr, syscw; +#endif + struct task_io_accounting ioac; /* * Cumulative ns of scheduled CPU time for dead threads in the @@ -667,6 +671,10 @@ struct task_delay_info { /* io operations performed */ u32 swapin_count; /* total count of the number of swapin block */ /* io operations performed */ + + struct timespec freepages_start, freepages_end; + u64 freepages_delay; /* wait for memory reclaim */ + u32 freepages_count; /* total count of memory reclaim */ }; #endif /* CONFIG_TASK_DELAY_ACCT */ @@ -824,7 +832,16 @@ extern void partition_sched_domains(int ndoms_new, cpumask_t *doms_new, struct sched_domain_attr *dattr_new); extern int arch_reinit_sched_domains(void); -#endif /* CONFIG_SMP */ +#else /* CONFIG_SMP */ + +struct sched_domain_attr; + +static inline void +partition_sched_domains(int ndoms_new, cpumask_t *doms_new, + struct sched_domain_attr *dattr_new) +{ +} +#endif /* !CONFIG_SMP */ struct io_context; /* See blkdev.h */ #define NGROUPS_SMALL 32 @@ -1247,7 +1264,7 @@ struct task_struct { #if defined(CONFIG_TASK_XACCT) u64 acct_rss_mem1; /* accumulated rss usage */ u64 acct_vm_mem1; /* accumulated virtual memory usage */ - cputime_t acct_stimexpd;/* stime since last update */ + cputime_t acct_timexpd; /* stime + utime since last update */ #endif #ifdef CONFIG_CPUSETS nodemask_t mems_allowed; @@ -1486,7 +1503,7 @@ static inline void put_task_struct(struct task_struct *t) #define PF_KSWAPD 0x00040000 /* I am kswapd */ #define PF_SWAPOFF 0x00080000 /* I am in swapoff */ #define PF_LESS_THROTTLE 0x00100000 /* Throttle me less: I clean memory */ -#define PF_BORROWED_MM 0x00200000 /* I am a kthread doing use_mm */ +#define PF_KTHREAD 0x00200000 /* I am a kernel thread */ #define PF_RANDOMIZE 0x00400000 /* randomize virtual address space */ #define PF_SWAPWRITE 0x00800000 /* Allowed to write to swap */ #define PF_SPREAD_PAGE 0x01000000 /* Spread page cache over cpuset */ @@ -1705,19 +1722,13 @@ extern struct pid_namespace init_pid_ns; * finds a task by its pid in the specified namespace * find_task_by_vpid(): * finds a task by its virtual pid - * find_task_by_pid(): - * finds a task by its global pid * - * see also find_pid() etc in include/linux/pid.h + * see also find_vpid() etc in include/linux/pid.h */ extern struct task_struct *find_task_by_pid_type_ns(int type, int pid, struct pid_namespace *ns); -static inline struct task_struct *__deprecated find_task_by_pid(pid_t nr) -{ - return find_task_by_pid_type_ns(PIDTYPE_PID, nr, &init_pid_ns); -} extern struct task_struct *find_task_by_vpid(pid_t nr); extern struct task_struct *find_task_by_pid_ns(pid_t nr, struct pid_namespace *ns); @@ -1785,12 +1796,11 @@ extern int kill_pid_info_as_uid(int, struct siginfo *, struct pid *, uid_t, uid_ extern int kill_pgrp(struct pid *pid, int sig, int priv); extern int kill_pid(struct pid *pid, int sig, int priv); extern int kill_proc_info(int, struct siginfo *, pid_t); -extern void do_notify_parent(struct task_struct *, int); +extern int do_notify_parent(struct task_struct *, int); extern void force_sig(int, struct task_struct *); extern void force_sig_specific(int, struct task_struct *); extern int send_sig(int, struct task_struct *, int); extern void zap_other_threads(struct task_struct *p); -extern int kill_proc(pid_t, int, int); extern struct sigqueue *sigqueue_alloc(void); extern void sigqueue_free(struct sigqueue *); extern int send_sigqueue(struct sigqueue *, struct task_struct *, int group); @@ -1872,9 +1882,13 @@ extern void set_task_comm(struct task_struct *tsk, char *from); extern char *get_task_comm(char *to, struct task_struct *tsk); #ifdef CONFIG_SMP -extern void wait_task_inactive(struct task_struct * p); +extern unsigned long wait_task_inactive(struct task_struct *, long match_state); #else -#define wait_task_inactive(p) do { } while (0) +static inline unsigned long wait_task_inactive(struct task_struct *p, + long match_state) +{ + return 1; +} #endif #define next_task(p) list_entry(rcu_dereference((p)->tasks.next), struct task_struct, tasks) @@ -1973,6 +1987,13 @@ static inline unsigned long *end_of_stack(struct task_struct *p) #endif +static inline int object_is_on_stack(void *obj) +{ + void *stack = task_stack_page(current); + + return (obj >= stack) && (obj < (stack + THREAD_SIZE)); +} + extern void thread_info_cache_init(void); /* set thread flags in other task's structures @@ -2037,9 +2058,6 @@ static inline int signal_pending_state(long state, struct task_struct *p) if (!signal_pending(p)) return 0; - if (state & (__TASK_STOPPED | __TASK_TRACED)) - return 0; - return (state & TASK_INTERRUPTIBLE) || __fatal_signal_pending(p); } @@ -2124,16 +2142,7 @@ static inline void set_task_cpu(struct task_struct *p, unsigned int cpu) #endif /* CONFIG_SMP */ -#ifdef HAVE_ARCH_PICK_MMAP_LAYOUT extern void arch_pick_mmap_layout(struct mm_struct *mm); -#else -static inline void arch_pick_mmap_layout(struct mm_struct *mm) -{ - mm->mmap_base = TASK_UNMAPPED_BASE; - mm->get_unmapped_area = arch_get_unmapped_area; - mm->unmap_area = arch_unmap_area; -} -#endif #ifdef CONFIG_TRACING extern void @@ -2216,14 +2225,6 @@ static inline void inc_syscw(struct task_struct *tsk) } #endif -#ifdef CONFIG_SMP -void migration_init(void); -#else -static inline void migration_init(void) -{ -} -#endif - #ifndef TASK_SIZE_OF #define TASK_SIZE_OF(tsk) TASK_SIZE #endif diff --git a/include/linux/security.h b/include/linux/security.h index 31c8851ec5d..f0e9adb22ac 100644 --- a/include/linux/security.h +++ b/include/linux/security.h @@ -102,9 +102,7 @@ extern unsigned long mmap_min_addr; #define LSM_SETID_FS 8 /* forward declares to avoid warnings */ -struct nfsctl_arg; struct sched_param; -struct swap_info_struct; struct request_sock; /* bprm_apply_creds unsafe reasons */ diff --git a/include/linux/sem.h b/include/linux/sem.h index c8eaad9e4b7..1b191c176bc 100644 --- a/include/linux/sem.h +++ b/include/linux/sem.h @@ -78,6 +78,7 @@ struct seminfo { #ifdef __KERNEL__ #include <asm/atomic.h> +#include <linux/rcupdate.h> struct task_struct; @@ -93,23 +94,19 @@ struct sem_array { time_t sem_otime; /* last semop time */ time_t sem_ctime; /* last change time */ struct sem *sem_base; /* ptr to first semaphore in array */ - struct sem_queue *sem_pending; /* pending operations to be processed */ - struct sem_queue **sem_pending_last; /* last pending operation */ - struct sem_undo *undo; /* undo requests on this array */ + struct list_head sem_pending; /* pending operations to be processed */ + struct list_head list_id; /* undo requests on this array */ unsigned long sem_nsems; /* no. of semaphores in array */ }; /* One queue for each sleeping process in the system. */ struct sem_queue { - struct sem_queue * next; /* next entry in the queue */ - struct sem_queue ** prev; /* previous entry in the queue, *(q->prev) == q */ - struct task_struct* sleeper; /* this process */ - struct sem_undo * undo; /* undo structure */ + struct list_head list; /* queue of pending operations */ + struct task_struct *sleeper; /* this process */ + struct sem_undo *undo; /* undo structure */ int pid; /* process id of requesting process */ int status; /* completion status of operation */ - struct sem_array * sma; /* semaphore array for operations */ - int id; /* internal sem id */ - struct sembuf * sops; /* array of pending operations */ + struct sembuf *sops; /* array of pending operations */ int nsops; /* number of operations */ int alter; /* does the operation alter the array? */ }; @@ -118,8 +115,11 @@ struct sem_queue { * when the process exits. */ struct sem_undo { - struct sem_undo * proc_next; /* next entry on this process */ - struct sem_undo * id_next; /* next entry on this semaphore set */ + struct list_head list_proc; /* per-process list: all undos from one process. */ + /* rcu protected */ + struct rcu_head rcu; /* rcu struct for sem_undo() */ + struct sem_undo_list *ulp; /* sem_undo_list for the process */ + struct list_head list_id; /* per semaphore array list: all undos for one array */ int semid; /* semaphore set identifier */ short * semadj; /* array of adjustments, one per semaphore */ }; @@ -128,9 +128,9 @@ struct sem_undo { * that may be shared among all a CLONE_SYSVSEM task group. */ struct sem_undo_list { - atomic_t refcnt; - spinlock_t lock; - struct sem_undo *proc_list; + atomic_t refcnt; + spinlock_t lock; + struct list_head list_proc; }; struct sysv_sem { diff --git a/include/linux/semaphore.h b/include/linux/semaphore.h index 9cae64b00d6..7415839ac89 100644 --- a/include/linux/semaphore.h +++ b/include/linux/semaphore.h @@ -26,10 +26,8 @@ struct semaphore { .wait_list = LIST_HEAD_INIT((name).wait_list), \ } -#define __DECLARE_SEMAPHORE_GENERIC(name, count) \ - struct semaphore name = __SEMAPHORE_INITIALIZER(name, count) - -#define DECLARE_MUTEX(name) __DECLARE_SEMAPHORE_GENERIC(name, 1) +#define DECLARE_MUTEX(name) \ + struct semaphore name = __SEMAPHORE_INITIALIZER(name, 1) static inline void sema_init(struct semaphore *sem, int val) { diff --git a/include/linux/serial_core.h b/include/linux/serial_core.h index f3a1c0e4502..3b2f6c04855 100644 --- a/include/linux/serial_core.h +++ b/include/linux/serial_core.h @@ -59,9 +59,6 @@ #define PORT_SUNZILOG 38 #define PORT_SUNSAB 39 -/* NEC v850. */ -#define PORT_V850E_UART 40 - /* DEC */ #define PORT_DZ 46 #define PORT_ZS 47 diff --git a/include/linux/signalfd.h b/include/linux/signalfd.h index ea037f28df9..bef0c46d471 100644 --- a/include/linux/signalfd.h +++ b/include/linux/signalfd.h @@ -8,6 +8,12 @@ #ifndef _LINUX_SIGNALFD_H #define _LINUX_SIGNALFD_H +/* For O_CLOEXEC and O_NONBLOCK */ +#include <linux/fcntl.h> + +/* Flags for signalfd4. */ +#define SFD_CLOEXEC O_CLOEXEC +#define SFD_NONBLOCK O_NONBLOCK struct signalfd_siginfo { __u32 ssi_signo; diff --git a/include/linux/slab.h b/include/linux/slab.h index be6f1d40b66..5ff9676c1e2 100644 --- a/include/linux/slab.h +++ b/include/linux/slab.h @@ -58,7 +58,7 @@ int slab_is_available(void); struct kmem_cache *kmem_cache_create(const char *, size_t, size_t, unsigned long, - void (*)(struct kmem_cache *, void *)); + void (*)(void *)); void kmem_cache_destroy(struct kmem_cache *); int kmem_cache_shrink(struct kmem_cache *); void kmem_cache_free(struct kmem_cache *, void *); @@ -181,7 +181,7 @@ size_t ksize(const void *); */ static inline void *kcalloc(size_t n, size_t size, gfp_t flags) { - if (n != 0 && size > ULONG_MAX / n) + if (size != 0 && n > ULONG_MAX / size) return NULL; return __kmalloc(n * size, flags | __GFP_ZERO); } diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h index d117ea2825a..5bad61a93f6 100644 --- a/include/linux/slub_def.h +++ b/include/linux/slub_def.h @@ -85,7 +85,7 @@ struct kmem_cache { struct kmem_cache_order_objects min; gfp_t allocflags; /* gfp flags to use on each alloc */ int refcount; /* Refcount for slab cache destroy */ - void (*ctor)(struct kmem_cache *, void *); + void (*ctor)(void *); int inuse; /* Offset to metadata */ int align; /* Alignment */ const char *name; /* Name (only for display!) */ diff --git a/include/linux/sm501.h b/include/linux/sm501.h index 95c1c39ba44..214f93209b8 100644 --- a/include/linux/sm501.h +++ b/include/linux/sm501.h @@ -46,24 +46,6 @@ extern unsigned long sm501_modify_reg(struct device *dev, unsigned long set, unsigned long clear); -/* sm501_gpio_set - * - * set the state of the given GPIO line -*/ - -extern void sm501_gpio_set(struct device *dev, - unsigned long gpio, - unsigned int to, - unsigned int dir); - -/* sm501_gpio_get - * - * get the state of the given GPIO line -*/ - -extern unsigned long sm501_gpio_get(struct device *dev, - unsigned long gpio); - /* Platform data definitions */ @@ -73,6 +55,8 @@ extern unsigned long sm501_gpio_get(struct device *dev, #define SM501FB_FLAG_USE_HWACCEL (1<<3) #define SM501FB_FLAG_PANEL_NO_FPEN (1<<4) #define SM501FB_FLAG_PANEL_NO_VBIASEN (1<<5) +#define SM501FB_FLAG_PANEL_INV_FPEN (1<<6) +#define SM501FB_FLAG_PANEL_INV_VBIASEN (1<<7) struct sm501_platdata_fbsub { struct fb_videomode *def_mode; @@ -102,11 +86,19 @@ struct sm501_platdata_fb { struct sm501_platdata_fbsub *fb_pnl; }; -/* gpio i2c */ +/* gpio i2c + * + * Note, we have to pass in the bus number, as the number used will be + * passed to the i2c-gpio driver's platform_device.id, subsequently used + * to register the i2c bus. +*/ struct sm501_platdata_gpio_i2c { + unsigned int bus_num; unsigned int pin_sda; unsigned int pin_scl; + int udelay; + int timeout; }; /* sm501_initdata @@ -129,6 +121,7 @@ struct sm501_reg_init { #define SM501_USE_FBACCEL (1<<6) #define SM501_USE_AC97 (1<<7) #define SM501_USE_I2S (1<<8) +#define SM501_USE_GPIO (1<<9) #define SM501_USE_ALL (0xffffffff) @@ -155,6 +148,8 @@ struct sm501_init_gpio { struct sm501_reg_init gpio_ddr_high; }; +#define SM501_FLAG_SUSPEND_OFF (1<<4) + /* sm501_platdata * * This is passed with the platform device to allow the board @@ -168,6 +163,12 @@ struct sm501_platdata { struct sm501_init_gpio *init_gpiop; struct sm501_platdata_fb *fb; + int flags; + int gpio_base; + + int (*get_power)(struct device *dev); + int (*set_power)(struct device *dev, unsigned int on); + struct sm501_platdata_gpio_i2c *gpio_i2c; unsigned int gpio_i2c_nr; }; diff --git a/include/linux/smb_fs.h b/include/linux/smb_fs.h index 2c5cd55f44f..923cd8a247b 100644 --- a/include/linux/smb_fs.h +++ b/include/linux/smb_fs.h @@ -43,18 +43,13 @@ static inline struct smb_inode_info *SMB_I(struct inode *inode) } /* macro names are short for word, double-word, long value (?) */ -#define WVAL(buf,pos) \ - (le16_to_cpu(get_unaligned((__le16 *)((u8 *)(buf) + (pos))))) -#define DVAL(buf,pos) \ - (le32_to_cpu(get_unaligned((__le32 *)((u8 *)(buf) + (pos))))) -#define LVAL(buf,pos) \ - (le64_to_cpu(get_unaligned((__le64 *)((u8 *)(buf) + (pos))))) -#define WSET(buf,pos,val) \ - put_unaligned(cpu_to_le16((u16)(val)), (__le16 *)((u8 *)(buf) + (pos))) -#define DSET(buf,pos,val) \ - put_unaligned(cpu_to_le32((u32)(val)), (__le32 *)((u8 *)(buf) + (pos))) -#define LSET(buf,pos,val) \ - put_unaligned(cpu_to_le64((u64)(val)), (__le64 *)((u8 *)(buf) + (pos))) +#define WVAL(buf, pos) (get_unaligned_le16((u8 *)(buf) + (pos))) +#define DVAL(buf, pos) (get_unaligned_le32((u8 *)(buf) + (pos))) +#define LVAL(buf, pos) (get_unaligned_le64((u8 *)(buf) + (pos))) + +#define WSET(buf, pos, val) put_unaligned_le16((val), (u8 *)(buf) + (pos)) +#define DSET(buf, pos, val) put_unaligned_le32((val), (u8 *)(buf) + (pos)) +#define LSET(buf, pos, val) put_unaligned_le64((val), (u8 *)(buf) + (pos)) /* where to find the base of the SMB packet proper */ #define smb_base(buf) ((u8 *)(((u8 *)(buf))+4)) diff --git a/include/linux/smc91x.h b/include/linux/smc91x.h index 8e0556b8781..3827b922ba1 100644 --- a/include/linux/smc91x.h +++ b/include/linux/smc91x.h @@ -5,9 +5,19 @@ #define SMC91X_USE_16BIT (1 << 1) #define SMC91X_USE_32BIT (1 << 2) +#define SMC91X_NOWAIT (1 << 3) + +/* two bits for IO_SHIFT, let's hope later designs will keep this sane */ +#define SMC91X_IO_SHIFT_0 (0 << 4) +#define SMC91X_IO_SHIFT_1 (1 << 4) +#define SMC91X_IO_SHIFT_2 (2 << 4) +#define SMC91X_IO_SHIFT_3 (3 << 4) +#define SMC91X_IO_SHIFT(x) (((x) >> 4) & 0x3) + +#define SMC91X_USE_DMA (1 << 6) + struct smc91x_platdata { unsigned long flags; - unsigned long irq_flags; /* IRQF_... */ }; #endif /* __SMC91X_H__ */ diff --git a/include/linux/smp.h b/include/linux/smp.h index 48262f86c96..66484d4a845 100644 --- a/include/linux/smp.h +++ b/include/linux/smp.h @@ -74,15 +74,10 @@ void __smp_call_function_single(int cpuid, struct call_single_data *data); #ifdef CONFIG_USE_GENERIC_SMP_HELPERS void generic_smp_call_function_single_interrupt(void); void generic_smp_call_function_interrupt(void); -void init_call_single_data(void); void ipi_call_lock(void); void ipi_call_unlock(void); void ipi_call_lock_irq(void); void ipi_call_unlock_irq(void); -#else -static inline void init_call_single_data(void) -{ -} #endif /* diff --git a/include/linux/spi/ds1305.h b/include/linux/spi/ds1305.h new file mode 100644 index 00000000000..287ec830eab --- /dev/null +++ b/include/linux/spi/ds1305.h @@ -0,0 +1,35 @@ +#ifndef __LINUX_SPI_DS1305_H +#define __LINUX_SPI_DS1305_H + +/* + * One-time configuration for ds1305 and ds1306 RTC chips. + * + * Put a pointer to this in spi_board_info.platform_data if you want to + * be sure that Linux (re)initializes this as needed ... after losing + * backup power, and potentially on the first boot. + */ +struct ds1305_platform_data { + + /* Trickle charge configuration: it's OK to leave out the MAGIC + * bitmask; mask in either DS1 or DS2, and then one of 2K/4k/8K. + */ +#define DS1305_TRICKLE_MAGIC 0xa0 +#define DS1305_TRICKLE_DS2 0x08 /* two diodes */ +#define DS1305_TRICKLE_DS1 0x04 /* one diode */ +#define DS1305_TRICKLE_2K 0x01 /* 2 KOhm resistance */ +#define DS1305_TRICKLE_4K 0x02 /* 4 KOhm resistance */ +#define DS1305_TRICKLE_8K 0x03 /* 8 KOhm resistance */ + u8 trickle; + + /* set only on ds1306 parts */ + bool is_ds1306; + + /* ds1306 only: enable 1 Hz output */ + bool en_1hz; + + /* REVISIT: the driver currently expects nINT0 to be wired + * as the alarm IRQ. ALM1 may also need to be set up ... + */ +}; + +#endif /* __LINUX_SPI_DS1305_H */ diff --git a/include/linux/spi/mcp23s08.h b/include/linux/spi/mcp23s08.h index 835ddf47d45..22ef107d770 100644 --- a/include/linux/spi/mcp23s08.h +++ b/include/linux/spi/mcp23s08.h @@ -1,18 +1,25 @@ -/* FIXME driver should be able to handle all four slaves that - * can be hooked up to each chipselect, as well as IRQs... - */ +/* FIXME driver should be able to handle IRQs... */ + +struct mcp23s08_chip_info { + bool is_present; /* true iff populated */ + u8 pullups; /* BIT(x) means enable pullup x */ +}; struct mcp23s08_platform_data { - /* four slaves can share one SPI chipselect */ - u8 slave; + /* Four slaves (numbered 0..3) can share one SPI chipselect, and + * will provide 8..32 GPIOs using 1..4 gpio_chip instances. + */ + struct mcp23s08_chip_info chip[4]; - /* number assigned to the first GPIO */ + /* "base" is the number of the first GPIO. Dynamic assignment is + * not currently supported, and even if there are gaps in chip + * addressing the GPIO numbers are sequential .. so for example + * if only slaves 0 and 3 are present, their GPIOs range from + * base to base+15. + */ unsigned base; - /* pins with pullups */ - u8 pullups; - void *context; /* param to setup/teardown */ int (*setup)(struct spi_device *spi, diff --git a/include/linux/spi/spi.h b/include/linux/spi/spi.h index b9a76c97208..a9cc29d4665 100644 --- a/include/linux/spi/spi.h +++ b/include/linux/spi/spi.h @@ -82,7 +82,7 @@ struct spi_device { int irq; void *controller_state; void *controller_data; - const char *modalias; + char modalias[32]; /* * likely need more hooks for more protocol options affecting how diff --git a/include/linux/spinlock.h b/include/linux/spinlock.h index d311a090fae..61e5610ad16 100644 --- a/include/linux/spinlock.h +++ b/include/linux/spinlock.h @@ -46,6 +46,7 @@ * linux/spinlock.h: builds the final spin_*() APIs. */ +#include <linux/typecheck.h> #include <linux/preempt.h> #include <linux/linkage.h> #include <linux/compiler.h> @@ -191,23 +192,53 @@ do { \ #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) -#define spin_lock_irqsave(lock, flags) flags = _spin_lock_irqsave(lock) -#define read_lock_irqsave(lock, flags) flags = _read_lock_irqsave(lock) -#define write_lock_irqsave(lock, flags) flags = _write_lock_irqsave(lock) +#define spin_lock_irqsave(lock, flags) \ + do { \ + typecheck(unsigned long, flags); \ + flags = _spin_lock_irqsave(lock); \ + } while (0) +#define read_lock_irqsave(lock, flags) \ + do { \ + typecheck(unsigned long, flags); \ + flags = _read_lock_irqsave(lock); \ + } while (0) +#define write_lock_irqsave(lock, flags) \ + do { \ + typecheck(unsigned long, flags); \ + flags = _write_lock_irqsave(lock); \ + } while (0) #ifdef CONFIG_DEBUG_LOCK_ALLOC -#define spin_lock_irqsave_nested(lock, flags, subclass) \ - flags = _spin_lock_irqsave_nested(lock, subclass) +#define spin_lock_irqsave_nested(lock, flags, subclass) \ + do { \ + typecheck(unsigned long, flags); \ + flags = _spin_lock_irqsave_nested(lock, subclass); \ + } while (0) #else -#define spin_lock_irqsave_nested(lock, flags, subclass) \ - flags = _spin_lock_irqsave(lock) +#define spin_lock_irqsave_nested(lock, flags, subclass) \ + do { \ + typecheck(unsigned long, flags); \ + flags = _spin_lock_irqsave(lock); \ + } while (0) #endif #else -#define spin_lock_irqsave(lock, flags) _spin_lock_irqsave(lock, flags) -#define read_lock_irqsave(lock, flags) _read_lock_irqsave(lock, flags) -#define write_lock_irqsave(lock, flags) _write_lock_irqsave(lock, flags) +#define spin_lock_irqsave(lock, flags) \ + do { \ + typecheck(unsigned long, flags); \ + _spin_lock_irqsave(lock, flags); \ + } while (0) +#define read_lock_irqsave(lock, flags) \ + do { \ + typecheck(unsigned long, flags); \ + _read_lock_irqsave(lock, flags); \ + } while (0) +#define write_lock_irqsave(lock, flags) \ + do { \ + typecheck(unsigned long, flags); \ + _write_lock_irqsave(lock, flags); \ + } while (0) #define spin_lock_irqsave_nested(lock, flags, subclass) \ spin_lock_irqsave(lock, flags) @@ -260,16 +291,25 @@ do { \ } while (0) #endif -#define spin_unlock_irqrestore(lock, flags) \ - _spin_unlock_irqrestore(lock, flags) +#define spin_unlock_irqrestore(lock, flags) \ + do { \ + typecheck(unsigned long, flags); \ + _spin_unlock_irqrestore(lock, flags); \ + } while (0) #define spin_unlock_bh(lock) _spin_unlock_bh(lock) -#define read_unlock_irqrestore(lock, flags) \ - _read_unlock_irqrestore(lock, flags) +#define read_unlock_irqrestore(lock, flags) \ + do { \ + typecheck(unsigned long, flags); \ + _read_unlock_irqrestore(lock, flags); \ + } while (0) #define read_unlock_bh(lock) _read_unlock_bh(lock) -#define write_unlock_irqrestore(lock, flags) \ - _write_unlock_irqrestore(lock, flags) +#define write_unlock_irqrestore(lock, flags) \ + do { \ + typecheck(unsigned long, flags); \ + _write_unlock_irqrestore(lock, flags); \ + } while (0) #define write_unlock_bh(lock) _write_unlock_bh(lock) #define spin_trylock_bh(lock) __cond_lock(lock, _spin_trylock_bh(lock)) diff --git a/include/linux/ssb/ssb.h b/include/linux/ssb/ssb.h index 4bf8cade9db..e530026eedf 100644 --- a/include/linux/ssb/ssb.h +++ b/include/linux/ssb/ssb.h @@ -427,9 +427,9 @@ static inline int ssb_dma_mapping_error(struct ssb_device *dev, dma_addr_t addr) { switch (dev->bus->bustype) { case SSB_BUSTYPE_PCI: - return pci_dma_mapping_error(addr); + return pci_dma_mapping_error(dev->bus->host_pci, addr); case SSB_BUSTYPE_SSB: - return dma_mapping_error(addr); + return dma_mapping_error(dev->dev, addr); default: __ssb_dma_not_implemented(dev); } diff --git a/include/linux/string.h b/include/linux/string.h index efdc44593b5..810d80df0a1 100644 --- a/include/linux/string.h +++ b/include/linux/string.h @@ -111,5 +111,8 @@ extern void argv_free(char **argv); extern bool sysfs_streq(const char *s1, const char *s2); +extern ssize_t memory_read_from_buffer(void *to, size_t count, loff_t *ppos, + const void *from, size_t available); + #endif #endif /* _LINUX_STRING_H_ */ diff --git a/include/linux/suspend.h b/include/linux/suspend.h index e8e69159af7..c6343509597 100644 --- a/include/linux/suspend.h +++ b/include/linux/suspend.h @@ -278,4 +278,6 @@ static inline void register_nosave_region_late(unsigned long b, unsigned long e) } #endif +extern struct mutex pm_mutex; + #endif /* _LINUX_SUSPEND_H */ diff --git a/include/linux/swap.h b/include/linux/swap.h index 0b3377650c8..de40f169a4e 100644 --- a/include/linux/swap.h +++ b/include/linux/swap.h @@ -237,7 +237,6 @@ extern struct page *swapin_readahead(swp_entry_t, gfp_t, /* linux/mm/swapfile.c */ extern long total_swap_pages; -extern unsigned int nr_swapfiles; extern void si_swapinfo(struct sysinfo *); extern swp_entry_t get_swap_page(void); extern swp_entry_t get_swap_page_of_type(int); @@ -254,8 +253,6 @@ extern int can_share_swap_page(struct page *); extern int remove_exclusive_swap_page(struct page *); struct backing_dev_info; -extern spinlock_t swap_lock; - /* linux/mm/thrash.c */ extern struct mm_struct * swap_token_mm; extern void grab_swap_token(void); diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h index 0522f368f9d..d6ff145919c 100644 --- a/include/linux/syscalls.h +++ b/include/linux/syscalls.h @@ -305,6 +305,7 @@ asmlinkage long sys_fcntl64(unsigned int fd, #endif asmlinkage long sys_dup(unsigned int fildes); asmlinkage long sys_dup2(unsigned int oldfd, unsigned int newfd); +asmlinkage long sys_dup3(unsigned int oldfd, unsigned int newfd, int flags); asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int on); asmlinkage long sys_ioctl(unsigned int fd, unsigned int cmd, unsigned long arg); @@ -409,6 +410,8 @@ asmlinkage long sys_getsockopt(int fd, int level, int optname, asmlinkage long sys_bind(int, struct sockaddr __user *, int); asmlinkage long sys_connect(int, struct sockaddr __user *, int); asmlinkage long sys_accept(int, struct sockaddr __user *, int __user *); +asmlinkage long sys_paccept(int, struct sockaddr __user *, int __user *, + const __user sigset_t *, size_t, int); asmlinkage long sys_getsockname(int, struct sockaddr __user *, int __user *); asmlinkage long sys_getpeername(int, struct sockaddr __user *, int __user *); asmlinkage long sys_send(int, void __user *, size_t, unsigned); @@ -428,6 +431,7 @@ asmlinkage long sys_poll(struct pollfd __user *ufds, unsigned int nfds, asmlinkage long sys_select(int n, fd_set __user *inp, fd_set __user *outp, fd_set __user *exp, struct timeval __user *tvp); asmlinkage long sys_epoll_create(int size); +asmlinkage long sys_epoll_create1(int flags); asmlinkage long sys_epoll_ctl(int epfd, int op, int fd, struct epoll_event __user *event); asmlinkage long sys_epoll_wait(int epfd, struct epoll_event __user *events, @@ -443,7 +447,7 @@ asmlinkage long sys_newuname(struct new_utsname __user *name); asmlinkage long sys_getrlimit(unsigned int resource, struct rlimit __user *rlim); -#if defined(COMPAT_RLIM_OLD_INFINITY) || !(defined(CONFIG_IA64) || defined(CONFIG_V850)) +#if defined(COMPAT_RLIM_OLD_INFINITY) || !(defined(CONFIG_IA64)) asmlinkage long sys_old_getrlimit(unsigned int resource, struct rlimit __user *rlim); #endif asmlinkage long sys_setrlimit(unsigned int resource, @@ -543,6 +547,7 @@ asmlinkage long sys_get_mempolicy(int __user *policy, unsigned long addr, unsigned long flags); asmlinkage long sys_inotify_init(void); +asmlinkage long sys_inotify_init1(int flags); asmlinkage long sys_inotify_add_watch(int fd, const char __user *path, u32 mask); asmlinkage long sys_inotify_rm_watch(int fd, u32 wd); @@ -608,12 +613,14 @@ asmlinkage long sys_set_robust_list(struct robust_list_head __user *head, size_t len); asmlinkage long sys_getcpu(unsigned __user *cpu, unsigned __user *node, struct getcpu_cache __user *cache); asmlinkage long sys_signalfd(int ufd, sigset_t __user *user_mask, size_t sizemask); +asmlinkage long sys_signalfd4(int ufd, sigset_t __user *user_mask, size_t sizemask, int flags); asmlinkage long sys_timerfd_create(int clockid, int flags); asmlinkage long sys_timerfd_settime(int ufd, int flags, const struct itimerspec __user *utmr, struct itimerspec __user *otmr); asmlinkage long sys_timerfd_gettime(int ufd, struct itimerspec __user *otmr); asmlinkage long sys_eventfd(unsigned int count); +asmlinkage long sys_eventfd2(unsigned int count, int flags); asmlinkage long sys_fallocate(int fd, int mode, loff_t offset, loff_t len); int kernel_execve(const char *filename, char *const argv[], char *const envp[]); diff --git a/include/linux/taskstats.h b/include/linux/taskstats.h index 5d69c0744ff..18269e956a7 100644 --- a/include/linux/taskstats.h +++ b/include/linux/taskstats.h @@ -31,7 +31,7 @@ */ -#define TASKSTATS_VERSION 6 +#define TASKSTATS_VERSION 7 #define TS_COMM_LEN 32 /* should be >= TASK_COMM_LEN * in linux/sched.h */ @@ -157,6 +157,10 @@ struct taskstats { __u64 ac_utimescaled; /* utime scaled on frequency etc */ __u64 ac_stimescaled; /* stime scaled on frequency etc */ __u64 cpu_scaled_run_real_total; /* scaled cpu_run_real_total */ + + /* Delay waiting for memory reclaim */ + __u64 freepages_count; + __u64 freepages_delay_total; }; diff --git a/include/linux/tick.h b/include/linux/tick.h index a881c652f7e..d3c02695dc5 100644 --- a/include/linux/tick.h +++ b/include/linux/tick.h @@ -49,6 +49,7 @@ struct tick_sched { unsigned long check_clocks; enum tick_nohz_mode nohz_mode; ktime_t idle_tick; + int inidle; int tick_stopped; unsigned long idle_jiffies; unsigned long idle_calls; @@ -105,14 +106,14 @@ static inline int tick_check_oneshot_change(int allow_nohz) { return 0; } #endif /* !CONFIG_GENERIC_CLOCKEVENTS */ # ifdef CONFIG_NO_HZ -extern void tick_nohz_stop_sched_tick(void); +extern void tick_nohz_stop_sched_tick(int inidle); extern void tick_nohz_restart_sched_tick(void); extern void tick_nohz_update_jiffies(void); extern ktime_t tick_nohz_get_sleep_length(void); extern void tick_nohz_stop_idle(int cpu); extern u64 get_cpu_idle_time_us(int cpu, u64 *last_update_time); # else -static inline void tick_nohz_stop_sched_tick(void) { } +static inline void tick_nohz_stop_sched_tick(int inidle) { } static inline void tick_nohz_restart_sched_tick(void) { } static inline void tick_nohz_update_jiffies(void) { } static inline ktime_t tick_nohz_get_sleep_length(void) diff --git a/include/linux/timerfd.h b/include/linux/timerfd.h index cf2b10d7573..86cb0501d3e 100644 --- a/include/linux/timerfd.h +++ b/include/linux/timerfd.h @@ -8,9 +8,15 @@ #ifndef _LINUX_TIMERFD_H #define _LINUX_TIMERFD_H +/* For O_CLOEXEC and O_NONBLOCK */ +#include <linux/fcntl.h> +/* Flags for timerfd_settime. */ #define TFD_TIMER_ABSTIME (1 << 0) +/* Flags for timerfd_create. */ +#define TFD_CLOEXEC O_CLOEXEC +#define TFD_NONBLOCK O_NONBLOCK #endif /* _LINUX_TIMERFD_H */ diff --git a/include/linux/tracehook.h b/include/linux/tracehook.h new file mode 100644 index 00000000000..589f429619c --- /dev/null +++ b/include/linux/tracehook.h @@ -0,0 +1,575 @@ +/* + * Tracing hooks + * + * Copyright (C) 2008 Red Hat, Inc. All rights reserved. + * + * This copyrighted material is made available to anyone wishing to use, + * modify, copy, or redistribute it subject to the terms and conditions + * of the GNU General Public License v.2. + * + * This file defines hook entry points called by core code where + * user tracing/debugging support might need to do something. These + * entry points are called tracehook_*(). Each hook declared below + * has a detailed kerneldoc comment giving the context (locking et + * al) from which it is called, and the meaning of its return value. + * + * Each function here typically has only one call site, so it is ok + * to have some nontrivial tracehook_*() inlines. In all cases, the + * fast path when no tracing is enabled should be very short. + * + * The purpose of this file and the tracehook_* layer is to consolidate + * the interface that the kernel core and arch code uses to enable any + * user debugging or tracing facility (such as ptrace). The interfaces + * here are carefully documented so that maintainers of core and arch + * code do not need to think about the implementation details of the + * tracing facilities. Likewise, maintainers of the tracing code do not + * need to understand all the calling core or arch code in detail, just + * documented circumstances of each call, such as locking conditions. + * + * If the calling core code changes so that locking is different, then + * it is ok to change the interface documented here. The maintainer of + * core code changing should notify the maintainers of the tracing code + * that they need to work out the change. + * + * Some tracehook_*() inlines take arguments that the current tracing + * implementations might not necessarily use. These function signatures + * are chosen to pass in all the information that is on hand in the + * caller and might conceivably be relevant to a tracer, so that the + * core code won't have to be updated when tracing adds more features. + * If a call site changes so that some of those parameters are no longer + * already on hand without extra work, then the tracehook_* interface + * can change so there is no make-work burden on the core code. The + * maintainer of core code changing should notify the maintainers of the + * tracing code that they need to work out the change. + */ + +#ifndef _LINUX_TRACEHOOK_H +#define _LINUX_TRACEHOOK_H 1 + +#include <linux/sched.h> +#include <linux/ptrace.h> +#include <linux/security.h> +struct linux_binprm; + +/** + * tracehook_expect_breakpoints - guess if task memory might be touched + * @task: current task, making a new mapping + * + * Return nonzero if @task is expected to want breakpoint insertion in + * its memory at some point. A zero return is no guarantee it won't + * be done, but this is a hint that it's known to be likely. + * + * May be called with @task->mm->mmap_sem held for writing. + */ +static inline int tracehook_expect_breakpoints(struct task_struct *task) +{ + return (task_ptrace(task) & PT_PTRACED) != 0; +} + +/* + * ptrace report for syscall entry and exit looks identical. + */ +static inline void ptrace_report_syscall(struct pt_regs *regs) +{ + int ptrace = task_ptrace(current); + + if (!(ptrace & PT_PTRACED)) + return; + + ptrace_notify(SIGTRAP | ((ptrace & PT_TRACESYSGOOD) ? 0x80 : 0)); + + /* + * this isn't the same as continuing with a signal, but it will do + * for normal use. strace only continues with a signal if the + * stopping signal is not SIGTRAP. -brl + */ + if (current->exit_code) { + send_sig(current->exit_code, current, 1); + current->exit_code = 0; + } +} + +/** + * tracehook_report_syscall_entry - task is about to attempt a system call + * @regs: user register state of current task + * + * This will be called if %TIF_SYSCALL_TRACE has been set, when the + * current task has just entered the kernel for a system call. + * Full user register state is available here. Changing the values + * in @regs can affect the system call number and arguments to be tried. + * It is safe to block here, preventing the system call from beginning. + * + * Returns zero normally, or nonzero if the calling arch code should abort + * the system call. That must prevent normal entry so no system call is + * made. If @task ever returns to user mode after this, its register state + * is unspecified, but should be something harmless like an %ENOSYS error + * return. It should preserve enough information so that syscall_rollback() + * can work (see asm-generic/syscall.h). + * + * Called without locks, just after entering kernel mode. + */ +static inline __must_check int tracehook_report_syscall_entry( + struct pt_regs *regs) +{ + ptrace_report_syscall(regs); + return 0; +} + +/** + * tracehook_report_syscall_exit - task has just finished a system call + * @regs: user register state of current task + * @step: nonzero if simulating single-step or block-step + * + * This will be called if %TIF_SYSCALL_TRACE has been set, when the + * current task has just finished an attempted system call. Full + * user register state is available here. It is safe to block here, + * preventing signals from being processed. + * + * If @step is nonzero, this report is also in lieu of the normal + * trap that would follow the system call instruction because + * user_enable_block_step() or user_enable_single_step() was used. + * In this case, %TIF_SYSCALL_TRACE might not be set. + * + * Called without locks, just before checking for pending signals. + */ +static inline void tracehook_report_syscall_exit(struct pt_regs *regs, int step) +{ + ptrace_report_syscall(regs); +} + +/** + * tracehook_unsafe_exec - check for exec declared unsafe due to tracing + * @task: current task doing exec + * + * Return %LSM_UNSAFE_* bits applied to an exec because of tracing. + * + * Called with task_lock() held on @task. + */ +static inline int tracehook_unsafe_exec(struct task_struct *task) +{ + int unsafe = 0; + int ptrace = task_ptrace(task); + if (ptrace & PT_PTRACED) { + if (ptrace & PT_PTRACE_CAP) + unsafe |= LSM_UNSAFE_PTRACE_CAP; + else + unsafe |= LSM_UNSAFE_PTRACE; + } + return unsafe; +} + +/** + * tracehook_tracer_task - return the task that is tracing the given task + * @tsk: task to consider + * + * Returns NULL if noone is tracing @task, or the &struct task_struct + * pointer to its tracer. + * + * Must called under rcu_read_lock(). The pointer returned might be kept + * live only by RCU. During exec, this may be called with task_lock() + * held on @task, still held from when tracehook_unsafe_exec() was called. + */ +static inline struct task_struct *tracehook_tracer_task(struct task_struct *tsk) +{ + if (task_ptrace(tsk) & PT_PTRACED) + return rcu_dereference(tsk->parent); + return NULL; +} + +/** + * tracehook_report_exec - a successful exec was completed + * @fmt: &struct linux_binfmt that performed the exec + * @bprm: &struct linux_binprm containing exec details + * @regs: user-mode register state + * + * An exec just completed, we are shortly going to return to user mode. + * The freshly initialized register state can be seen and changed in @regs. + * The name, file and other pointers in @bprm are still on hand to be + * inspected, but will be freed as soon as this returns. + * + * Called with no locks, but with some kernel resources held live + * and a reference on @fmt->module. + */ +static inline void tracehook_report_exec(struct linux_binfmt *fmt, + struct linux_binprm *bprm, + struct pt_regs *regs) +{ + if (!ptrace_event(PT_TRACE_EXEC, PTRACE_EVENT_EXEC, 0) && + unlikely(task_ptrace(current) & PT_PTRACED)) + send_sig(SIGTRAP, current, 0); +} + +/** + * tracehook_report_exit - task has begun to exit + * @exit_code: pointer to value destined for @current->exit_code + * + * @exit_code points to the value passed to do_exit(), which tracing + * might change here. This is almost the first thing in do_exit(), + * before freeing any resources or setting the %PF_EXITING flag. + * + * Called with no locks held. + */ +static inline void tracehook_report_exit(long *exit_code) +{ + ptrace_event(PT_TRACE_EXIT, PTRACE_EVENT_EXIT, *exit_code); +} + +/** + * tracehook_prepare_clone - prepare for new child to be cloned + * @clone_flags: %CLONE_* flags from clone/fork/vfork system call + * + * This is called before a new user task is to be cloned. + * Its return value will be passed to tracehook_finish_clone(). + * + * Called with no locks held. + */ +static inline int tracehook_prepare_clone(unsigned clone_flags) +{ + if (clone_flags & CLONE_UNTRACED) + return 0; + + if (clone_flags & CLONE_VFORK) { + if (current->ptrace & PT_TRACE_VFORK) + return PTRACE_EVENT_VFORK; + } else if ((clone_flags & CSIGNAL) != SIGCHLD) { + if (current->ptrace & PT_TRACE_CLONE) + return PTRACE_EVENT_CLONE; + } else if (current->ptrace & PT_TRACE_FORK) + return PTRACE_EVENT_FORK; + + return 0; +} + +/** + * tracehook_finish_clone - new child created and being attached + * @child: new child task + * @clone_flags: %CLONE_* flags from clone/fork/vfork system call + * @trace: return value from tracehook_clone_prepare() + * + * This is called immediately after adding @child to its parent's children list. + * The @trace value is that returned by tracehook_prepare_clone(). + * + * Called with current's siglock and write_lock_irq(&tasklist_lock) held. + */ +static inline void tracehook_finish_clone(struct task_struct *child, + unsigned long clone_flags, int trace) +{ + ptrace_init_task(child, (clone_flags & CLONE_PTRACE) || trace); +} + +/** + * tracehook_report_clone - in parent, new child is about to start running + * @trace: return value from tracehook_clone_prepare() + * @regs: parent's user register state + * @clone_flags: flags from parent's system call + * @pid: new child's PID in the parent's namespace + * @child: new child task + * + * Called after a child is set up, but before it has been started running. + * The @trace value is that returned by tracehook_clone_prepare(). + * This is not a good place to block, because the child has not started yet. + * Suspend the child here if desired, and block in tracehook_clone_complete(). + * This must prevent the child from self-reaping if tracehook_clone_complete() + * uses the @child pointer; otherwise it might have died and been released by + * the time tracehook_report_clone_complete() is called. + * + * Called with no locks held, but the child cannot run until this returns. + */ +static inline void tracehook_report_clone(int trace, struct pt_regs *regs, + unsigned long clone_flags, + pid_t pid, struct task_struct *child) +{ + if (unlikely(trace)) { + /* + * The child starts up with an immediate SIGSTOP. + */ + sigaddset(&child->pending.signal, SIGSTOP); + set_tsk_thread_flag(child, TIF_SIGPENDING); + } +} + +/** + * tracehook_report_clone_complete - new child is running + * @trace: return value from tracehook_clone_prepare() + * @regs: parent's user register state + * @clone_flags: flags from parent's system call + * @pid: new child's PID in the parent's namespace + * @child: child task, already running + * + * This is called just after the child has started running. This is + * just before the clone/fork syscall returns, or blocks for vfork + * child completion if @clone_flags has the %CLONE_VFORK bit set. + * The @child pointer may be invalid if a self-reaping child died and + * tracehook_report_clone() took no action to prevent it from self-reaping. + * + * Called with no locks held. + */ +static inline void tracehook_report_clone_complete(int trace, + struct pt_regs *regs, + unsigned long clone_flags, + pid_t pid, + struct task_struct *child) +{ + if (unlikely(trace)) + ptrace_event(0, trace, pid); +} + +/** + * tracehook_report_vfork_done - vfork parent's child has exited or exec'd + * @child: child task, already running + * @pid: new child's PID in the parent's namespace + * + * Called after a %CLONE_VFORK parent has waited for the child to complete. + * The clone/vfork system call will return immediately after this. + * The @child pointer may be invalid if a self-reaping child died and + * tracehook_report_clone() took no action to prevent it from self-reaping. + * + * Called with no locks held. + */ +static inline void tracehook_report_vfork_done(struct task_struct *child, + pid_t pid) +{ + ptrace_event(PT_TRACE_VFORK_DONE, PTRACE_EVENT_VFORK_DONE, pid); +} + +/** + * tracehook_prepare_release_task - task is being reaped, clean up tracing + * @task: task in %EXIT_DEAD state + * + * This is called in release_task() just before @task gets finally reaped + * and freed. This would be the ideal place to remove and clean up any + * tracing-related state for @task. + * + * Called with no locks held. + */ +static inline void tracehook_prepare_release_task(struct task_struct *task) +{ +} + +/** + * tracehook_finish_release_task - task is being reaped, clean up tracing + * @task: task in %EXIT_DEAD state + * + * This is called in release_task() when @task is being in the middle of + * being reaped. After this, there must be no tracing entanglements. + * + * Called with write_lock_irq(&tasklist_lock) held. + */ +static inline void tracehook_finish_release_task(struct task_struct *task) +{ + ptrace_release_task(task); +} + +/** + * tracehook_signal_handler - signal handler setup is complete + * @sig: number of signal being delivered + * @info: siginfo_t of signal being delivered + * @ka: sigaction setting that chose the handler + * @regs: user register state + * @stepping: nonzero if debugger single-step or block-step in use + * + * Called by the arch code after a signal handler has been set up. + * Register and stack state reflects the user handler about to run. + * Signal mask changes have already been made. + * + * Called without locks, shortly before returning to user mode + * (or handling more signals). + */ +static inline void tracehook_signal_handler(int sig, siginfo_t *info, + const struct k_sigaction *ka, + struct pt_regs *regs, int stepping) +{ + if (stepping) + ptrace_notify(SIGTRAP); +} + +/** + * tracehook_consider_ignored_signal - suppress short-circuit of ignored signal + * @task: task receiving the signal + * @sig: signal number being sent + * @handler: %SIG_IGN or %SIG_DFL + * + * Return zero iff tracing doesn't care to examine this ignored signal, + * so it can short-circuit normal delivery and never even get queued. + * Either @handler is %SIG_DFL and @sig's default is ignore, or it's %SIG_IGN. + * + * Called with @task->sighand->siglock held. + */ +static inline int tracehook_consider_ignored_signal(struct task_struct *task, + int sig, + void __user *handler) +{ + return (task_ptrace(task) & PT_PTRACED) != 0; +} + +/** + * tracehook_consider_fatal_signal - suppress special handling of fatal signal + * @task: task receiving the signal + * @sig: signal number being sent + * @handler: %SIG_DFL or %SIG_IGN + * + * Return nonzero to prevent special handling of this termination signal. + * Normally @handler is %SIG_DFL. It can be %SIG_IGN if @sig is ignored, + * in which case force_sig() is about to reset it to %SIG_DFL. + * When this returns zero, this signal might cause a quick termination + * that does not give the debugger a chance to intercept the signal. + * + * Called with or without @task->sighand->siglock held. + */ +static inline int tracehook_consider_fatal_signal(struct task_struct *task, + int sig, + void __user *handler) +{ + return (task_ptrace(task) & PT_PTRACED) != 0; +} + +/** + * tracehook_force_sigpending - let tracing force signal_pending(current) on + * + * Called when recomputing our signal_pending() flag. Return nonzero + * to force the signal_pending() flag on, so that tracehook_get_signal() + * will be called before the next return to user mode. + * + * Called with @current->sighand->siglock held. + */ +static inline int tracehook_force_sigpending(void) +{ + return 0; +} + +/** + * tracehook_get_signal - deliver synthetic signal to traced task + * @task: @current + * @regs: task_pt_regs(@current) + * @info: details of synthetic signal + * @return_ka: sigaction for synthetic signal + * + * Return zero to check for a real pending signal normally. + * Return -1 after releasing the siglock to repeat the check. + * Return a signal number to induce an artifical signal delivery, + * setting *@info and *@return_ka to specify its details and behavior. + * + * The @return_ka->sa_handler value controls the disposition of the + * signal, no matter the signal number. For %SIG_DFL, the return value + * is a representative signal to indicate the behavior (e.g. %SIGTERM + * for death, %SIGQUIT for core dump, %SIGSTOP for job control stop, + * %SIGTSTP for stop unless in an orphaned pgrp), but the signal number + * reported will be @info->si_signo instead. + * + * Called with @task->sighand->siglock held, before dequeuing pending signals. + */ +static inline int tracehook_get_signal(struct task_struct *task, + struct pt_regs *regs, + siginfo_t *info, + struct k_sigaction *return_ka) +{ + return 0; +} + +/** + * tracehook_notify_jctl - report about job control stop/continue + * @notify: nonzero if this is the last thread in the group to stop + * @why: %CLD_STOPPED or %CLD_CONTINUED + * + * This is called when we might call do_notify_parent_cldstop(). + * It's called when about to stop for job control; we are already in + * %TASK_STOPPED state, about to call schedule(). It's also called when + * a delayed %CLD_STOPPED or %CLD_CONTINUED report is ready to be made. + * + * Return nonzero to generate a %SIGCHLD with @why, which is + * normal if @notify is nonzero. + * + * Called with no locks held. + */ +static inline int tracehook_notify_jctl(int notify, int why) +{ + return notify || (current->ptrace & PT_PTRACED); +} + +/** + * tracehook_notify_death - task is dead, ready to notify parent + * @task: @current task now exiting + * @death_cookie: value to pass to tracehook_report_death() + * @group_dead: nonzero if this was the last thread in the group to die + * + * Return the signal number to send our parent with do_notify_parent(), or + * zero to send no signal and leave a zombie, or -1 to self-reap right now. + * + * Called with write_lock_irq(&tasklist_lock) held. + */ +static inline int tracehook_notify_death(struct task_struct *task, + void **death_cookie, int group_dead) +{ + if (task->exit_signal == -1) + return task->ptrace ? SIGCHLD : -1; + + /* + * If something other than our normal parent is ptracing us, then + * send it a SIGCHLD instead of honoring exit_signal. exit_signal + * only has special meaning to our real parent. + */ + if (thread_group_empty(task) && !ptrace_reparented(task)) + return task->exit_signal; + + return task->ptrace ? SIGCHLD : 0; +} + +/** + * tracehook_report_death - task is dead and ready to be reaped + * @task: @current task now exiting + * @signal: signal number sent to parent, or 0 or -1 + * @death_cookie: value passed back from tracehook_notify_death() + * @group_dead: nonzero if this was the last thread in the group to die + * + * Thread has just become a zombie or is about to self-reap. If positive, + * @signal is the signal number just sent to the parent (usually %SIGCHLD). + * If @signal is -1, this thread will self-reap. If @signal is 0, this is + * a delayed_group_leader() zombie. The @death_cookie was passed back by + * tracehook_notify_death(). + * + * If normal reaping is not inhibited, @task->exit_state might be changing + * in parallel. + * + * Called without locks. + */ +static inline void tracehook_report_death(struct task_struct *task, + int signal, void *death_cookie, + int group_dead) +{ +} + +#ifdef TIF_NOTIFY_RESUME +/** + * set_notify_resume - cause tracehook_notify_resume() to be called + * @task: task that will call tracehook_notify_resume() + * + * Calling this arranges that @task will call tracehook_notify_resume() + * before returning to user mode. If it's already running in user mode, + * it will enter the kernel and call tracehook_notify_resume() soon. + * If it's blocked, it will not be woken. + */ +static inline void set_notify_resume(struct task_struct *task) +{ + if (!test_and_set_tsk_thread_flag(task, TIF_NOTIFY_RESUME)) + kick_process(task); +} + +/** + * tracehook_notify_resume - report when about to return to user mode + * @regs: user-mode registers of @current task + * + * This is called when %TIF_NOTIFY_RESUME has been set. Now we are + * about to return to user mode, and the user state in @regs can be + * inspected or adjusted. The caller in arch code has cleared + * %TIF_NOTIFY_RESUME before the call. If the flag gets set again + * asynchronously, this will be called again before we return to + * user mode. + * + * Called without locks. + */ +static inline void tracehook_notify_resume(struct pt_regs *regs) +{ +} +#endif /* TIF_NOTIFY_RESUME */ + +#endif /* <linux/tracehook.h> */ diff --git a/include/linux/typecheck.h b/include/linux/typecheck.h new file mode 100644 index 00000000000..eb5b74a575b --- /dev/null +++ b/include/linux/typecheck.h @@ -0,0 +1,24 @@ +#ifndef TYPECHECK_H_INCLUDED +#define TYPECHECK_H_INCLUDED + +/* + * Check at compile time that something is of a particular type. + * Always evaluates to 1 so you may use it easily in comparisons. + */ +#define typecheck(type,x) \ +({ type __dummy; \ + typeof(x) __dummy2; \ + (void)(&__dummy == &__dummy2); \ + 1; \ +}) + +/* + * Check at compile time that 'function' is a certain type, or is a pointer + * to that type (needs to use typedef for the function type.) + */ +#define typecheck_fn(type,function) \ +({ typeof(type) __tmp = function; \ + (void)__tmp; \ +}) + +#endif /* TYPECHECK_H_INCLUDED */ diff --git a/include/linux/usb/composite.h b/include/linux/usb/composite.h index 747c3a49cdc..c932390c6da 100644 --- a/include/linux/usb/composite.h +++ b/include/linux/usb/composite.h @@ -330,7 +330,7 @@ extern int usb_string_id(struct usb_composite_dev *c); dev_vdbg(&(d)->gadget->dev , fmt , ## args) #define ERROR(d, fmt, args...) \ dev_err(&(d)->gadget->dev , fmt , ## args) -#define WARN(d, fmt, args...) \ +#define WARNING(d, fmt, args...) \ dev_warn(&(d)->gadget->dev , fmt , ## args) #define INFO(d, fmt, args...) \ dev_info(&(d)->gadget->dev , fmt , ## args) diff --git a/include/linux/virtio_9p.h b/include/linux/virtio_9p.h index 8eff0b53910..b3c4a60ceeb 100644 --- a/include/linux/virtio_9p.h +++ b/include/linux/virtio_9p.h @@ -1,5 +1,7 @@ #ifndef _LINUX_VIRTIO_9P_H #define _LINUX_VIRTIO_9P_H +/* This header is BSD licensed so anyone can use the definitions to implement + * compatible drivers/servers. */ #include <linux/virtio_config.h> /* The ID for virtio console */ diff --git a/include/linux/virtio_balloon.h b/include/linux/virtio_balloon.h index 979524ee75b..c30c7bfbf39 100644 --- a/include/linux/virtio_balloon.h +++ b/include/linux/virtio_balloon.h @@ -1,5 +1,7 @@ #ifndef _LINUX_VIRTIO_BALLOON_H #define _LINUX_VIRTIO_BALLOON_H +/* This header is BSD licensed so anyone can use the definitions to implement + * compatible drivers/servers. */ #include <linux/virtio_config.h> /* The ID for virtio_balloon */ diff --git a/include/linux/virtio_blk.h b/include/linux/virtio_blk.h index 5f79a5f9de7..c1aef85243b 100644 --- a/include/linux/virtio_blk.h +++ b/include/linux/virtio_blk.h @@ -1,5 +1,7 @@ #ifndef _LINUX_VIRTIO_BLK_H #define _LINUX_VIRTIO_BLK_H +/* This header is BSD licensed so anyone can use the definitions to implement + * compatible drivers/servers. */ #include <linux/virtio_config.h> /* The ID for virtio_block */ @@ -11,6 +13,7 @@ #define VIRTIO_BLK_F_SEG_MAX 2 /* Indicates maximum # of segments */ #define VIRTIO_BLK_F_GEOMETRY 4 /* Legacy geometry available */ #define VIRTIO_BLK_F_RO 5 /* Disk is read-only */ +#define VIRTIO_BLK_F_BLK_SIZE 6 /* Block size of disk is available*/ struct virtio_blk_config { @@ -26,6 +29,8 @@ struct virtio_blk_config __u8 heads; __u8 sectors; } geometry; + /* block size of device (if VIRTIO_BLK_F_BLK_SIZE) */ + __u32 blk_size; } __attribute__((packed)); /* These two define direction. */ diff --git a/include/linux/virtio_config.h b/include/linux/virtio_config.h index f364bbf63c3..bf8ec283b23 100644 --- a/include/linux/virtio_config.h +++ b/include/linux/virtio_config.h @@ -1,5 +1,8 @@ #ifndef _LINUX_VIRTIO_CONFIG_H #define _LINUX_VIRTIO_CONFIG_H +/* This header, excluding the #ifdef __KERNEL__ part, is BSD licensed so + * anyone can use the definitions to implement compatible drivers/servers. */ + /* Virtio devices use a standardized configuration space to define their * features and pass configuration information, but each implementation can * store and access that space differently. */ @@ -15,6 +18,12 @@ /* We've given up on this device. */ #define VIRTIO_CONFIG_S_FAILED 0x80 +/* Some virtio feature bits (currently bits 28 through 31) are reserved for the + * transport being used (eg. virtio_ring), the rest are per-device feature + * bits. */ +#define VIRTIO_TRANSPORT_F_START 28 +#define VIRTIO_TRANSPORT_F_END 32 + /* Do we get callbacks when the ring is completely used, even if we've * suppressed them? */ #define VIRTIO_F_NOTIFY_ON_EMPTY 24 @@ -52,9 +61,10 @@ * @get_features: get the array of feature bits for this device. * vdev: the virtio_device * Returns the first 32 feature bits (all we currently need). - * @set_features: confirm what device features we'll be using. + * @finalize_features: confirm what device features we'll be using. * vdev: the virtio_device - * feature: the first 32 feature bits + * This gives the final feature bits for the device: it can change + * the dev->feature bits if it wants. */ struct virtio_config_ops { @@ -70,7 +80,7 @@ struct virtio_config_ops void (*callback)(struct virtqueue *)); void (*del_vq)(struct virtqueue *vq); u32 (*get_features)(struct virtio_device *vdev); - void (*set_features)(struct virtio_device *vdev, u32 features); + void (*finalize_features)(struct virtio_device *vdev); }; /* If driver didn't advertise the feature, it will never appear. */ diff --git a/include/linux/virtio_console.h b/include/linux/virtio_console.h index ed2d4ead7eb..19a0da0dba4 100644 --- a/include/linux/virtio_console.h +++ b/include/linux/virtio_console.h @@ -1,6 +1,8 @@ #ifndef _LINUX_VIRTIO_CONSOLE_H #define _LINUX_VIRTIO_CONSOLE_H #include <linux/virtio_config.h> +/* This header, excluding the #ifdef __KERNEL__ part, is BSD licensed so + * anyone can use the definitions to implement compatible drivers/servers. */ /* The ID for virtio console */ #define VIRTIO_ID_CONSOLE 3 diff --git a/include/linux/virtio_net.h b/include/linux/virtio_net.h index 38c0571820f..5e33761b9b8 100644 --- a/include/linux/virtio_net.h +++ b/include/linux/virtio_net.h @@ -1,5 +1,7 @@ #ifndef _LINUX_VIRTIO_NET_H #define _LINUX_VIRTIO_NET_H +/* This header is BSD licensed so anyone can use the definitions to implement + * compatible drivers/servers. */ #include <linux/virtio_config.h> /* The ID for virtio_net */ diff --git a/include/linux/virtio_pci.h b/include/linux/virtio_pci.h index b3151659cf4..cdef3574293 100644 --- a/include/linux/virtio_pci.h +++ b/include/linux/virtio_pci.h @@ -9,9 +9,8 @@ * Authors: * Anthony Liguori <aliguori@us.ibm.com> * - * This work is licensed under the terms of the GNU GPL, version 2 or later. - * See the COPYING file in the top-level directory. - * + * This header is BSD licensed so anyone can use the definitions to implement + * compatible drivers/servers. */ #ifndef _LINUX_VIRTIO_PCI_H diff --git a/include/linux/virtio_ring.h b/include/linux/virtio_ring.h index abe481ed990..c4a598fb382 100644 --- a/include/linux/virtio_ring.h +++ b/include/linux/virtio_ring.h @@ -120,6 +120,8 @@ struct virtqueue *vring_new_virtqueue(unsigned int num, void (*notify)(struct virtqueue *vq), void (*callback)(struct virtqueue *vq)); void vring_del_virtqueue(struct virtqueue *vq); +/* Filter out transport-specific feature bits. */ +void vring_transport_features(struct virtio_device *vdev); irqreturn_t vring_interrupt(int irq, void *_vq); #endif /* __KERNEL__ */ diff --git a/include/linux/virtio_rng.h b/include/linux/virtio_rng.h index 331afb6c9f6..1a85dab8a94 100644 --- a/include/linux/virtio_rng.h +++ b/include/linux/virtio_rng.h @@ -1,5 +1,7 @@ #ifndef _LINUX_VIRTIO_RNG_H #define _LINUX_VIRTIO_RNG_H +/* This header is BSD licensed so anyone can use the definitions to implement + * compatible drivers/servers. */ #include <linux/virtio_config.h> /* The ID for virtio_rng */ diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h index e83b69346d2..58334d43951 100644 --- a/include/linux/vmstat.h +++ b/include/linux/vmstat.h @@ -44,6 +44,12 @@ enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT, NR_VM_EVENT_ITEMS }; +extern const struct seq_operations fragmentation_op; +extern const struct seq_operations pagetypeinfo_op; +extern const struct seq_operations zoneinfo_op; +extern const struct seq_operations vmstat_op; +extern int sysctl_stat_interval; + #ifdef CONFIG_VM_EVENT_COUNTERS /* * Light weight per cpu counter implementation. diff --git a/include/linux/vt_kern.h b/include/linux/vt_kern.h index 9448ffbdcbf..14c0e91be9b 100644 --- a/include/linux/vt_kern.h +++ b/include/linux/vt_kern.h @@ -12,6 +12,7 @@ #include <linux/mutex.h> #include <linux/console_struct.h> #include <linux/mm.h> +#include <linux/consolemap.h> /* * Presently, a lot of graphics programs do not restore the contents of @@ -54,6 +55,7 @@ void redraw_screen(struct vc_data *vc, int is_switch); struct tty_struct; int tioclinux(struct tty_struct *tty, unsigned long arg); +#ifdef CONFIG_CONSOLE_TRANSLATIONS /* consolemap.c */ struct unimapinit; @@ -71,6 +73,23 @@ void con_free_unimap(struct vc_data *vc); void con_protect_unimap(struct vc_data *vc, int rdonly); int con_copy_unimap(struct vc_data *dst_vc, struct vc_data *src_vc); +#define vc_translate(vc, c) ((vc)->vc_translate[(c) | \ + (vc)->vc_toggle_meta ? 0x80 : 0]) +#else +#define con_set_trans_old(arg) (0) +#define con_get_trans_old(arg) (-EINVAL) +#define con_set_trans_new(arg) (0) +#define con_get_trans_new(arg) (-EINVAL) +#define con_clear_unimap(vc, ui) (0) +#define con_set_unimap(vc, ct, list) (0) +#define con_set_default_unimap(vc) (0) +#define con_copy_unimap(d, s) (0) +#define con_get_unimap(vc, ct, uct, list) (-EINVAL) +#define con_free_unimap(vc) do { ; } while (0) + +#define vc_translate(vc, c) (c) +#endif + /* vt.c */ int vt_waitactive(int vt); void change_console(struct vc_data *new_vc); diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h index 542526c6e8e..5c158c477ac 100644 --- a/include/linux/workqueue.h +++ b/include/linux/workqueue.h @@ -179,6 +179,8 @@ __create_workqueue_key(const char *name, int singlethread, extern void destroy_workqueue(struct workqueue_struct *wq); extern int queue_work(struct workqueue_struct *wq, struct work_struct *work); +extern int queue_work_on(int cpu, struct workqueue_struct *wq, + struct work_struct *work); extern int queue_delayed_work(struct workqueue_struct *wq, struct delayed_work *work, unsigned long delay); extern int queue_delayed_work_on(int cpu, struct workqueue_struct *wq, @@ -188,6 +190,7 @@ extern void flush_workqueue(struct workqueue_struct *wq); extern void flush_scheduled_work(void); extern int schedule_work(struct work_struct *work); +extern int schedule_work_on(int cpu, struct work_struct *work); extern int schedule_delayed_work(struct delayed_work *work, unsigned long delay); extern int schedule_delayed_work_on(int cpu, struct delayed_work *work, unsigned long delay); @@ -198,6 +201,8 @@ extern int keventd_up(void); extern void init_workqueues(void); int execute_in_process_context(work_func_t fn, struct execute_work *); +extern int flush_work(struct work_struct *work); + extern int cancel_work_sync(struct work_struct *work); /* |