diff options
Diffstat (limited to 'include')
42 files changed, 765 insertions, 121 deletions
diff --git a/include/asm-alpha/core_t2.h b/include/asm-alpha/core_t2.h index dba70c62a16..457c34b6eb0 100644 --- a/include/asm-alpha/core_t2.h +++ b/include/asm-alpha/core_t2.h @@ -435,7 +435,7 @@ static inline void t2_outl(u32 b, unsigned long addr) set_hae(msb); \ } -static spinlock_t t2_hae_lock = SPIN_LOCK_UNLOCKED; +static DEFINE_SPINLOCK(t2_hae_lock); __EXTERN_INLINE u8 t2_readb(const volatile void __iomem *xaddr) { diff --git a/include/asm-generic/bug.h b/include/asm-generic/bug.h index 845cb67ad8e..8ceab7bcd8b 100644 --- a/include/asm-generic/bug.h +++ b/include/asm-generic/bug.h @@ -51,4 +51,10 @@ __ret; \ }) +#ifdef CONFIG_SMP +# define WARN_ON_SMP(x) WARN_ON(x) +#else +# define WARN_ON_SMP(x) do { } while (0) +#endif + #endif diff --git a/include/asm-i386/cpu.h b/include/asm-i386/cpu.h index e7252c216ca..b1bc7b1b64b 100644 --- a/include/asm-i386/cpu.h +++ b/include/asm-i386/cpu.h @@ -7,8 +7,6 @@ #include <linux/nodemask.h> #include <linux/percpu.h> -#include <asm/node.h> - struct i386_cpu { struct cpu cpu; }; diff --git a/include/asm-i386/elf.h b/include/asm-i386/elf.h index 4153d80e4d2..1eac92cb5b1 100644 --- a/include/asm-i386/elf.h +++ b/include/asm-i386/elf.h @@ -10,6 +10,7 @@ #include <asm/processor.h> #include <asm/system.h> /* for savesegment */ #include <asm/auxvec.h> +#include <asm/desc.h> #include <linux/utsname.h> @@ -129,15 +130,41 @@ extern int dump_task_extended_fpu (struct task_struct *, struct user_fxsr_struct #define ELF_CORE_COPY_FPREGS(tsk, elf_fpregs) dump_task_fpu(tsk, elf_fpregs) #define ELF_CORE_COPY_XFPREGS(tsk, elf_xfpregs) dump_task_extended_fpu(tsk, elf_xfpregs) -#define VSYSCALL_BASE (__fix_to_virt(FIX_VSYSCALL)) -#define VSYSCALL_EHDR ((const struct elfhdr *) VSYSCALL_BASE) -#define VSYSCALL_ENTRY ((unsigned long) &__kernel_vsyscall) +#define VDSO_HIGH_BASE (__fix_to_virt(FIX_VDSO)) +#define VDSO_BASE ((unsigned long)current->mm->context.vdso) + +#ifdef CONFIG_COMPAT_VDSO +# define VDSO_COMPAT_BASE VDSO_HIGH_BASE +# define VDSO_PRELINK VDSO_HIGH_BASE +#else +# define VDSO_COMPAT_BASE VDSO_BASE +# define VDSO_PRELINK 0 +#endif + +#define VDSO_COMPAT_SYM(x) \ + (VDSO_COMPAT_BASE + (unsigned long)(x) - VDSO_PRELINK) + +#define VDSO_SYM(x) \ + (VDSO_BASE + (unsigned long)(x) - VDSO_PRELINK) + +#define VDSO_HIGH_EHDR ((const struct elfhdr *) VDSO_HIGH_BASE) +#define VDSO_EHDR ((const struct elfhdr *) VDSO_COMPAT_BASE) + extern void __kernel_vsyscall; +#define VDSO_ENTRY VDSO_SYM(&__kernel_vsyscall) + +#define ARCH_HAS_SETUP_ADDITIONAL_PAGES +struct linux_binprm; +extern int arch_setup_additional_pages(struct linux_binprm *bprm, + int executable_stack); + +extern unsigned int vdso_enabled; + #define ARCH_DLINFO \ -do { \ - NEW_AUX_ENT(AT_SYSINFO, VSYSCALL_ENTRY); \ - NEW_AUX_ENT(AT_SYSINFO_EHDR, VSYSCALL_BASE); \ +do if (vdso_enabled) { \ + NEW_AUX_ENT(AT_SYSINFO, VDSO_ENTRY); \ + NEW_AUX_ENT(AT_SYSINFO_EHDR, VDSO_COMPAT_BASE); \ } while (0) /* @@ -148,15 +175,15 @@ do { \ * Dumping its extra ELF program headers includes all the other information * a debugger needs to easily find how the vsyscall DSO was being used. */ -#define ELF_CORE_EXTRA_PHDRS (VSYSCALL_EHDR->e_phnum) +#define ELF_CORE_EXTRA_PHDRS (VDSO_HIGH_EHDR->e_phnum) #define ELF_CORE_WRITE_EXTRA_PHDRS \ do { \ const struct elf_phdr *const vsyscall_phdrs = \ - (const struct elf_phdr *) (VSYSCALL_BASE \ - + VSYSCALL_EHDR->e_phoff); \ + (const struct elf_phdr *) (VDSO_HIGH_BASE \ + + VDSO_HIGH_EHDR->e_phoff); \ int i; \ Elf32_Off ofs = 0; \ - for (i = 0; i < VSYSCALL_EHDR->e_phnum; ++i) { \ + for (i = 0; i < VDSO_HIGH_EHDR->e_phnum; ++i) { \ struct elf_phdr phdr = vsyscall_phdrs[i]; \ if (phdr.p_type == PT_LOAD) { \ BUG_ON(ofs != 0); \ @@ -174,10 +201,10 @@ do { \ #define ELF_CORE_WRITE_EXTRA_DATA \ do { \ const struct elf_phdr *const vsyscall_phdrs = \ - (const struct elf_phdr *) (VSYSCALL_BASE \ - + VSYSCALL_EHDR->e_phoff); \ + (const struct elf_phdr *) (VDSO_HIGH_BASE \ + + VDSO_HIGH_EHDR->e_phoff); \ int i; \ - for (i = 0; i < VSYSCALL_EHDR->e_phnum; ++i) { \ + for (i = 0; i < VDSO_HIGH_EHDR->e_phnum; ++i) { \ if (vsyscall_phdrs[i].p_type == PT_LOAD) \ DUMP_WRITE((void *) vsyscall_phdrs[i].p_vaddr, \ PAGE_ALIGN(vsyscall_phdrs[i].p_memsz)); \ diff --git a/include/asm-i386/fixmap.h b/include/asm-i386/fixmap.h index f7e068f4d2f..a48cc3f7ccc 100644 --- a/include/asm-i386/fixmap.h +++ b/include/asm-i386/fixmap.h @@ -51,7 +51,7 @@ */ enum fixed_addresses { FIX_HOLE, - FIX_VSYSCALL, + FIX_VDSO, #ifdef CONFIG_X86_LOCAL_APIC FIX_APIC_BASE, /* local (CPU) APIC) -- required for SMP or not */ #endif @@ -115,14 +115,6 @@ extern void __set_fixmap (enum fixed_addresses idx, #define __fix_to_virt(x) (FIXADDR_TOP - ((x) << PAGE_SHIFT)) #define __virt_to_fix(x) ((FIXADDR_TOP - ((x)&PAGE_MASK)) >> PAGE_SHIFT) -/* - * This is the range that is readable by user mode, and things - * acting like user mode such as get_user_pages. - */ -#define FIXADDR_USER_START (__fix_to_virt(FIX_VSYSCALL)) -#define FIXADDR_USER_END (FIXADDR_USER_START + PAGE_SIZE) - - extern void __this_fixmap_does_not_exist(void); /* diff --git a/include/asm-i386/mmu.h b/include/asm-i386/mmu.h index f431a0b86d4..8358dd3df7a 100644 --- a/include/asm-i386/mmu.h +++ b/include/asm-i386/mmu.h @@ -12,6 +12,7 @@ typedef struct { int size; struct semaphore sem; void *ldt; + void *vdso; } mm_context_t; #endif diff --git a/include/asm-i386/node.h b/include/asm-i386/node.h deleted file mode 100644 index e13c6ffa72a..00000000000 --- a/include/asm-i386/node.h +++ /dev/null @@ -1,29 +0,0 @@ -#ifndef _ASM_I386_NODE_H_ -#define _ASM_I386_NODE_H_ - -#include <linux/device.h> -#include <linux/mmzone.h> -#include <linux/node.h> -#include <linux/topology.h> -#include <linux/nodemask.h> - -struct i386_node { - struct node node; -}; -extern struct i386_node node_devices[MAX_NUMNODES]; - -static inline int arch_register_node(int num){ - int p_node; - struct node *parent = NULL; - - if (!node_online(num)) - return 0; - p_node = parent_node(num); - - if (p_node != num) - parent = &node_devices[p_node].node; - - return register_node(&node_devices[num].node, num, parent); -} - -#endif /* _ASM_I386_NODE_H_ */ diff --git a/include/asm-i386/page.h b/include/asm-i386/page.h index e3a552fa553..f5bf544c729 100644 --- a/include/asm-i386/page.h +++ b/include/asm-i386/page.h @@ -96,6 +96,8 @@ typedef struct { unsigned long pgprot; } pgprot_t; #ifndef __ASSEMBLY__ +struct vm_area_struct; + /* * This much address space is reserved for vmalloc() and iomap() * as well as fixmap mappings. @@ -139,6 +141,7 @@ extern int page_is_ram(unsigned long pagenr); #include <asm-generic/memory_model.h> #include <asm-generic/page.h> +#define __HAVE_ARCH_GATE_AREA 1 #endif /* __KERNEL__ */ #endif /* _I386_PAGE_H */ diff --git a/include/asm-i386/processor.h b/include/asm-i386/processor.h index 55ea992da32..b32346d62e1 100644 --- a/include/asm-i386/processor.h +++ b/include/asm-i386/processor.h @@ -71,8 +71,12 @@ struct cpuinfo_x86 { cpumask_t llc_shared_map; /* cpus sharing the last level cache */ #endif unsigned char x86_max_cores; /* cpuid returned max cores value */ - unsigned char booted_cores; /* number of cores as seen by OS */ unsigned char apicid; +#ifdef CONFIG_SMP + unsigned char booted_cores; /* number of cores as seen by OS */ + __u8 phys_proc_id; /* Physical processor id. */ + __u8 cpu_core_id; /* Core id */ +#endif } __attribute__((__aligned__(SMP_CACHE_BYTES))); #define X86_VENDOR_INTEL 0 @@ -104,8 +108,6 @@ extern struct cpuinfo_x86 cpu_data[]; #define current_cpu_data boot_cpu_data #endif -extern int phys_proc_id[NR_CPUS]; -extern int cpu_core_id[NR_CPUS]; extern int cpu_llc_id[NR_CPUS]; extern char ignore_fpu_irq; diff --git a/include/asm-i386/thread_info.h b/include/asm-i386/thread_info.h index fdbc7f422ea..2833fa2c0dd 100644 --- a/include/asm-i386/thread_info.h +++ b/include/asm-i386/thread_info.h @@ -37,6 +37,7 @@ struct thread_info { 0-0xBFFFFFFF for user-thead 0-0xFFFFFFFF for kernel-thread */ + void *sysenter_return; struct restart_block restart_block; unsigned long previous_esp; /* ESP of the previous stack in case @@ -83,17 +84,15 @@ struct thread_info { #define init_stack (init_thread_union.stack) +/* how to get the current stack pointer from C */ +register unsigned long current_stack_pointer asm("esp") __attribute_used__; + /* how to get the thread information struct from C */ static inline struct thread_info *current_thread_info(void) { - struct thread_info *ti; - __asm__("andl %%esp,%0; ":"=r" (ti) : "0" (~(THREAD_SIZE - 1))); - return ti; + return (struct thread_info *)(current_stack_pointer & ~(THREAD_SIZE - 1)); } -/* how to get the current stack pointer from C */ -register unsigned long current_stack_pointer asm("esp") __attribute_used__; - /* thread information allocation */ #ifdef CONFIG_DEBUG_STACK_USAGE #define alloc_thread_info(tsk) \ diff --git a/include/asm-i386/topology.h b/include/asm-i386/topology.h index b94e5eeef91..6adbd9b1ae8 100644 --- a/include/asm-i386/topology.h +++ b/include/asm-i386/topology.h @@ -28,10 +28,8 @@ #define _ASM_I386_TOPOLOGY_H #ifdef CONFIG_X86_HT -#define topology_physical_package_id(cpu) \ - (phys_proc_id[cpu] == BAD_APICID ? -1 : phys_proc_id[cpu]) -#define topology_core_id(cpu) \ - (cpu_core_id[cpu] == BAD_APICID ? 0 : cpu_core_id[cpu]) +#define topology_physical_package_id(cpu) (cpu_data[cpu].phys_proc_id) +#define topology_core_id(cpu) (cpu_data[cpu].cpu_core_id) #define topology_core_siblings(cpu) (cpu_core_map[cpu]) #define topology_thread_siblings(cpu) (cpu_sibling_map[cpu]) #endif @@ -114,4 +112,9 @@ extern unsigned long node_remap_size[]; extern cpumask_t cpu_coregroup_map(int cpu); +#ifdef CONFIG_SMP +#define mc_capable() (boot_cpu_data.x86_max_cores > 1) +#define smt_capable() (smp_num_siblings > 1) +#endif + #endif /* _ASM_I386_TOPOLOGY_H */ diff --git a/include/asm-i386/unwind.h b/include/asm-i386/unwind.h index d480f2e3821..69f0f1df672 100644 --- a/include/asm-i386/unwind.h +++ b/include/asm-i386/unwind.h @@ -78,8 +78,8 @@ static inline int arch_unw_user_mode(const struct unwind_frame_info *info) return user_mode_vm(&info->regs); #else return info->regs.eip < PAGE_OFFSET - || (info->regs.eip >= __fix_to_virt(FIX_VSYSCALL) - && info->regs.eip < __fix_to_virt(FIX_VSYSCALL) + PAGE_SIZE) + || (info->regs.eip >= __fix_to_virt(FIX_VDSO) + && info->regs.eip < __fix_to_virt(FIX_VDSO) + PAGE_SIZE) || info->regs.esp < PAGE_OFFSET; #endif } diff --git a/include/asm-ia64/nodedata.h b/include/asm-ia64/nodedata.h index a140310bf84..2fb337b0e9b 100644 --- a/include/asm-ia64/nodedata.h +++ b/include/asm-ia64/nodedata.h @@ -46,6 +46,18 @@ struct ia64_node_data { */ #define NODE_DATA(nid) (local_node_data->pg_data_ptrs[nid]) +/* + * LOCAL_DATA_ADDR - This is to calculate the address of other node's + * "local_node_data" at hot-plug phase. The local_node_data + * is pointed by per_cpu_page. Kernel usually use it for + * just executing cpu. However, when new node is hot-added, + * the addresses of local data for other nodes are necessary + * to update all of them. + */ +#define LOCAL_DATA_ADDR(pgdat) \ + ((struct ia64_node_data *)((u64)(pgdat) + \ + L1_CACHE_ALIGN(sizeof(struct pglist_data)))) + #endif /* CONFIG_NUMA */ #endif /* _ASM_IA64_NODEDATA_H */ diff --git a/include/asm-ia64/topology.h b/include/asm-ia64/topology.h index 616b5ed2aa7..937c2125752 100644 --- a/include/asm-ia64/topology.h +++ b/include/asm-ia64/topology.h @@ -112,6 +112,7 @@ void build_cpu_to_node_map(void); #define topology_core_id(cpu) (cpu_data(cpu)->core_id) #define topology_core_siblings(cpu) (cpu_core_map[cpu]) #define topology_thread_siblings(cpu) (cpu_sibling_map[cpu]) +#define smt_capable() (smp_num_siblings > 1) #endif #include <asm-generic/topology.h> diff --git a/include/asm-powerpc/topology.h b/include/asm-powerpc/topology.h index 92f3e5507d2..bbc3844b086 100644 --- a/include/asm-powerpc/topology.h +++ b/include/asm-powerpc/topology.h @@ -93,5 +93,10 @@ static inline void sysfs_remove_device_from_node(struct sys_device *dev, #endif /* CONFIG_NUMA */ +#ifdef CONFIG_SMP +#include <asm/cputable.h> +#define smt_capable() (cpu_has_feature(CPU_FTR_SMT)) +#endif + #endif /* __KERNEL__ */ #endif /* _ASM_POWERPC_TOPOLOGY_H */ diff --git a/include/asm-sparc64/topology.h b/include/asm-sparc64/topology.h index 0e234e201bd..98a6c613589 100644 --- a/include/asm-sparc64/topology.h +++ b/include/asm-sparc64/topology.h @@ -1,6 +1,9 @@ #ifndef _ASM_SPARC64_TOPOLOGY_H #define _ASM_SPARC64_TOPOLOGY_H +#include <asm/spitfire.h> +#define smt_capable() (tlb_type == hypervisor) + #include <asm-generic/topology.h> #endif /* _ASM_SPARC64_TOPOLOGY_H */ diff --git a/include/asm-x86_64/hw_irq.h b/include/asm-x86_64/hw_irq.h index 1b2ac55d320..93187746278 100644 --- a/include/asm-x86_64/hw_irq.h +++ b/include/asm-x86_64/hw_irq.h @@ -124,7 +124,7 @@ asmlinkage void IRQ_NAME(nr); \ __asm__( \ "\n.p2align\n" \ "IRQ" #nr "_interrupt:\n\t" \ - "push $" #nr "-256 ; " \ + "push $~(" #nr ") ; " \ "jmp common_interrupt"); #if defined(CONFIG_X86_IO_APIC) diff --git a/include/asm-x86_64/topology.h b/include/asm-x86_64/topology.h index c4e46e7fa7b..6e7a2e976b0 100644 --- a/include/asm-x86_64/topology.h +++ b/include/asm-x86_64/topology.h @@ -59,6 +59,8 @@ extern int __node_distance(int, int); #define topology_core_id(cpu) (cpu_data[cpu].cpu_core_id) #define topology_core_siblings(cpu) (cpu_core_map[cpu]) #define topology_thread_siblings(cpu) (cpu_sibling_map[cpu]) +#define mc_capable() (boot_cpu_data.x86_max_cores > 1) +#define smt_capable() (smp_num_siblings > 1) #endif #include <asm-generic/topology.h> diff --git a/include/linux/acpi.h b/include/linux/acpi.h index 90d6df1551e..88b5dfd8ee1 100644 --- a/include/linux/acpi.h +++ b/include/linux/acpi.h @@ -528,12 +528,18 @@ static inline void acpi_set_cstate_limit(unsigned int new_limit) { return; } #ifdef CONFIG_ACPI_NUMA int acpi_get_pxm(acpi_handle handle); +int acpi_get_node(acpi_handle *handle); #else static inline int acpi_get_pxm(acpi_handle handle) { return 0; } +static inline int acpi_get_node(acpi_handle *handle) +{ + return 0; +} #endif +extern int acpi_paddr_to_node(u64 start_addr, u64 size); extern int pnpacpi_disabled; diff --git a/include/linux/buffer_head.h b/include/linux/buffer_head.h index fb7e9b7ccbe..737e407d0cd 100644 --- a/include/linux/buffer_head.h +++ b/include/linux/buffer_head.h @@ -149,7 +149,6 @@ void create_empty_buffers(struct page *, unsigned long, unsigned long b_state); void end_buffer_read_sync(struct buffer_head *bh, int uptodate); void end_buffer_write_sync(struct buffer_head *bh, int uptodate); -void end_buffer_async_write(struct buffer_head *bh, int uptodate); /* Things to do with buffers at mapping->private_list */ void mark_buffer_dirty_inode(struct buffer_head *bh, struct inode *inode); @@ -214,6 +213,7 @@ int nobh_truncate_page(struct address_space *, loff_t); int nobh_writepage(struct page *page, get_block_t *get_block, struct writeback_control *wbc); +void buffer_init(void); /* * inline definitions diff --git a/include/linux/cpu.h b/include/linux/cpu.h index 08d50c53aab..a3caf6866ba 100644 --- a/include/linux/cpu.h +++ b/include/linux/cpu.h @@ -31,17 +31,23 @@ struct cpu { struct sys_device sysdev; }; -extern int register_cpu(struct cpu *, int, struct node *); +extern int register_cpu(struct cpu *cpu, int num); extern struct sys_device *get_cpu_sysdev(unsigned cpu); #ifdef CONFIG_HOTPLUG_CPU -extern void unregister_cpu(struct cpu *, struct node *); +extern void unregister_cpu(struct cpu *cpu); #endif struct notifier_block; #ifdef CONFIG_SMP /* Need to know about CPUs going up/down? */ extern int register_cpu_notifier(struct notifier_block *nb); +#ifdef CONFIG_HOTPLUG_CPU extern void unregister_cpu_notifier(struct notifier_block *nb); +#else +static inline void unregister_cpu_notifier(struct notifier_block *nb) +{ +} +#endif extern int current_in_cpu_hotplug(void); int cpu_up(unsigned int cpu); @@ -73,6 +79,8 @@ extern int lock_cpu_hotplug_interruptible(void); { .notifier_call = fn, .priority = pri }; \ register_cpu_notifier(&fn##_nb); \ } +#define register_hotcpu_notifier(nb) register_cpu_notifier(nb) +#define unregister_hotcpu_notifier(nb) unregister_cpu_notifier(nb) int cpu_down(unsigned int cpu); #define cpu_is_offline(cpu) unlikely(!cpu_online(cpu)) #else @@ -80,6 +88,8 @@ int cpu_down(unsigned int cpu); #define unlock_cpu_hotplug() do { } while (0) #define lock_cpu_hotplug_interruptible() 0 #define hotcpu_notifier(fn, pri) +#define register_hotcpu_notifier(nb) +#define unregister_hotcpu_notifier(nb) /* CPUs don't go offline once they're online w/o CONFIG_HOTPLUG_CPU */ static inline int cpu_is_offline(int cpu) { return 0; } diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h index 78b236ca04f..272010a6078 100644 --- a/include/linux/dmaengine.h +++ b/include/linux/dmaengine.h @@ -20,7 +20,7 @@ */ #ifndef DMAENGINE_H #define DMAENGINE_H -#include <linux/config.h> + #ifdef CONFIG_DMA_ENGINE #include <linux/device.h> diff --git a/include/linux/futex.h b/include/linux/futex.h index 966a5b3da43..34c3a215f2c 100644 --- a/include/linux/futex.h +++ b/include/linux/futex.h @@ -12,6 +12,9 @@ #define FUTEX_REQUEUE 3 #define FUTEX_CMP_REQUEUE 4 #define FUTEX_WAKE_OP 5 +#define FUTEX_LOCK_PI 6 +#define FUTEX_UNLOCK_PI 7 +#define FUTEX_TRYLOCK_PI 8 /* * Support for robust futexes: the kernel cleans up held futexes at @@ -90,18 +93,21 @@ struct robust_list_head { */ #define ROBUST_LIST_LIMIT 2048 -long do_futex(unsigned long uaddr, int op, int val, - unsigned long timeout, unsigned long uaddr2, int val2, - int val3); +long do_futex(u32 __user *uaddr, int op, u32 val, unsigned long timeout, + u32 __user *uaddr2, u32 val2, u32 val3); extern int handle_futex_death(u32 __user *uaddr, struct task_struct *curr); #ifdef CONFIG_FUTEX extern void exit_robust_list(struct task_struct *curr); +extern void exit_pi_state_list(struct task_struct *curr); #else static inline void exit_robust_list(struct task_struct *curr) { } +static inline void exit_pi_state_list(struct task_struct *curr) +{ +} #endif #define FUTEX_OP_SET 0 /* *(int *)UADDR2 = OPARG; */ diff --git a/include/linux/init_task.h b/include/linux/init_task.h index e127ef7e8da..3a256957fb5 100644 --- a/include/linux/init_task.h +++ b/include/linux/init_task.h @@ -87,6 +87,7 @@ extern struct group_info init_groups; .lock_depth = -1, \ .prio = MAX_PRIO-20, \ .static_prio = MAX_PRIO-20, \ + .normal_prio = MAX_PRIO-20, \ .policy = SCHED_NORMAL, \ .cpus_allowed = CPU_MASK_ALL, \ .mm = NULL, \ @@ -122,6 +123,8 @@ extern struct group_info init_groups; .journal_info = NULL, \ .cpu_timers = INIT_CPU_TIMERS(tsk.cpu_timers), \ .fs_excl = ATOMIC_INIT(0), \ + .pi_lock = SPIN_LOCK_UNLOCKED, \ + INIT_RT_MUTEXES(tsk) \ } diff --git a/include/linux/ioport.h b/include/linux/ioport.h index cd6bd001ba4..edfc733b157 100644 --- a/include/linux/ioport.h +++ b/include/linux/ioport.h @@ -105,6 +105,9 @@ extern int allocate_resource(struct resource *root, struct resource *new, int adjust_resource(struct resource *res, unsigned long start, unsigned long size); +/* get registered SYSTEM_RAM resources in specified area */ +extern int find_next_system_ram(struct resource *res); + /* Convenience shorthand with allocation */ #define request_region(start,n,name) __request_region(&ioport_resource, (start), (n), (name)) #define request_mem_region(start,n,name) __request_region(&iomem_resource, (start), (n), (name)) diff --git a/include/linux/ipmi.h b/include/linux/ipmi.h index 5653b2f23b6..d09fbeabf1d 100644 --- a/include/linux/ipmi.h +++ b/include/linux/ipmi.h @@ -210,11 +210,7 @@ struct kernel_ipmi_msg #include <linux/list.h> #include <linux/module.h> #include <linux/device.h> - -#ifdef CONFIG_PROC_FS #include <linux/proc_fs.h> -extern struct proc_dir_entry *proc_ipmi_root; -#endif /* CONFIG_PROC_FS */ /* Opaque type for a IPMI message user. One of these is needed to send and receive messages. */ diff --git a/include/linux/list.h b/include/linux/list.h index 37ca31b21bb..6b74adf5297 100644 --- a/include/linux/list.h +++ b/include/linux/list.h @@ -4,18 +4,11 @@ #ifdef __KERNEL__ #include <linux/stddef.h> +#include <linux/poison.h> #include <linux/prefetch.h> #include <asm/system.h> /* - * These are non-NULL pointers that will result in page faults - * under normal circumstances, used to verify that nobody uses - * non-initialized list entries. - */ -#define LIST_POISON1 ((void *) 0x00100100) -#define LIST_POISON2 ((void *) 0x00200200) - -/* * Simple doubly linked list implementation. * * Some of the internal functions ("__xxx") are useful when diff --git a/include/linux/memory_hotplug.h b/include/linux/memory_hotplug.h index 91120638617..218501cfaeb 100644 --- a/include/linux/memory_hotplug.h +++ b/include/linux/memory_hotplug.h @@ -63,6 +63,76 @@ extern int online_pages(unsigned long, unsigned long); /* reasonably generic interface to expand the physical pages in a zone */ extern int __add_pages(struct zone *zone, unsigned long start_pfn, unsigned long nr_pages); + +#ifdef CONFIG_NUMA +extern int memory_add_physaddr_to_nid(u64 start); +#else +static inline int memory_add_physaddr_to_nid(u64 start) +{ + return 0; +} +#endif + +#ifdef CONFIG_HAVE_ARCH_NODEDATA_EXTENSION +/* + * For supporting node-hotadd, we have to allocate a new pgdat. + * + * If an arch has generic style NODE_DATA(), + * node_data[nid] = kzalloc() works well. But it depends on the architecture. + * + * In general, generic_alloc_nodedata() is used. + * Now, arch_free_nodedata() is just defined for error path of node_hot_add. + * + */ +extern pg_data_t *arch_alloc_nodedata(int nid); +extern void arch_free_nodedata(pg_data_t *pgdat); +extern void arch_refresh_nodedata(int nid, pg_data_t *pgdat); + +#else /* CONFIG_HAVE_ARCH_NODEDATA_EXTENSION */ + +#define arch_alloc_nodedata(nid) generic_alloc_nodedata(nid) +#define arch_free_nodedata(pgdat) generic_free_nodedata(pgdat) + +#ifdef CONFIG_NUMA +/* + * If ARCH_HAS_NODEDATA_EXTENSION=n, this func is used to allocate pgdat. + * XXX: kmalloc_node() can't work well to get new node's memory at this time. + * Because, pgdat for the new node is not allocated/initialized yet itself. + * To use new node's memory, more consideration will be necessary. + */ +#define generic_alloc_nodedata(nid) \ +({ \ + kzalloc(sizeof(pg_data_t), GFP_KERNEL); \ +}) +/* + * This definition is just for error path in node hotadd. + * For node hotremove, we have to replace this. + */ +#define generic_free_nodedata(pgdat) kfree(pgdat) + +extern pg_data_t *node_data[]; +static inline void arch_refresh_nodedata(int nid, pg_data_t *pgdat) +{ + node_data[nid] = pgdat; +} + +#else /* !CONFIG_NUMA */ + +/* never called */ +static inline pg_data_t *generic_alloc_nodedata(int nid) +{ + BUG(); + return NULL; +} +static inline void generic_free_nodedata(pg_data_t *pgdat) +{ +} +static inline void arch_refresh_nodedata(int nid, pg_data_t *pgdat) +{ +} +#endif /* CONFIG_NUMA */ +#endif /* CONFIG_HAVE_ARCH_NODEDATA_EXTENSION */ + #else /* ! CONFIG_MEMORY_HOTPLUG */ /* * Stub functions for when hotplug is off @@ -99,7 +169,8 @@ static inline int __remove_pages(struct zone *zone, unsigned long start_pfn, return -ENOSYS; } -extern int add_memory(u64 start, u64 size); +extern int add_memory(int nid, u64 start, u64 size); +extern int arch_add_memory(int nid, u64 start, u64 size); extern int remove_memory(u64 start, u64 size); #endif /* __LINUX_MEMORY_HOTPLUG_H */ diff --git a/include/linux/mm.h b/include/linux/mm.h index a929ea197e4..c41a1299b8c 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -1030,13 +1030,20 @@ static inline void vm_stat_account(struct mm_struct *mm, } #endif /* CONFIG_PROC_FS */ +static inline void +debug_check_no_locks_freed(const void *from, unsigned long len) +{ + mutex_debug_check_no_locks_freed(from, len); + rt_mutex_debug_check_no_locks_freed(from, len); +} + #ifndef CONFIG_DEBUG_PAGEALLOC static inline void kernel_map_pages(struct page *page, int numpages, int enable) { if (!PageHighMem(page) && !enable) - mutex_debug_check_no_locks_freed(page_address(page), - numpages * PAGE_SIZE); + debug_check_no_locks_freed(page_address(page), + numpages * PAGE_SIZE); } #endif @@ -1065,5 +1072,7 @@ void drop_slab(void); extern int randomize_va_space; #endif +const char *arch_vma_name(struct vm_area_struct *vma); + #endif /* __KERNEL__ */ #endif /* _LINUX_MM_H */ diff --git a/include/linux/node.h b/include/linux/node.h index 254dc3de650..81dcec84cd8 100644 --- a/include/linux/node.h +++ b/include/linux/node.h @@ -26,8 +26,25 @@ struct node { struct sys_device sysdev; }; +extern struct node node_devices[]; + extern int register_node(struct node *, int, struct node *); extern void unregister_node(struct node *node); +extern int register_one_node(int nid); +extern void unregister_one_node(int nid); +#ifdef CONFIG_NUMA +extern int register_cpu_under_node(unsigned int cpu, unsigned int nid); +extern int unregister_cpu_under_node(unsigned int cpu, unsigned int nid); +#else +static inline int register_cpu_under_node(unsigned int cpu, unsigned int nid) +{ + return 0; +} +static inline int unregister_cpu_under_node(unsigned int cpu, unsigned int nid) +{ + return 0; +} +#endif #define to_node(sys_device) container_of(sys_device, struct node, sysdev) diff --git a/include/linux/nsc_gpio.h b/include/linux/nsc_gpio.h new file mode 100644 index 00000000000..135742cfada --- /dev/null +++ b/include/linux/nsc_gpio.h @@ -0,0 +1,42 @@ +/** + nsc_gpio.c + + National Semiconductor GPIO common access methods. + + struct nsc_gpio_ops abstracts the low-level access + operations for the GPIO units on 2 NSC chip families; the GEODE + integrated CPU, and the PC-8736[03456] integrated PC-peripheral + chips. + + The GPIO units on these chips have the same pin architecture, but + the access methods differ. Thus, scx200_gpio and pc8736x_gpio + implement their own versions of these routines; and use the common + file-operations routines implemented in nsc_gpio module. + + Copyright (c) 2005 Jim Cromie <jim.cromie@gmail.com> + + NB: this work was tested on the Geode SC-1100 and PC-87366 chips. + NSC sold the GEODE line to AMD, and the PC-8736x line to Winbond. +*/ + +struct nsc_gpio_ops { + struct module* owner; + u32 (*gpio_config) (unsigned iminor, u32 mask, u32 bits); + void (*gpio_dump) (struct nsc_gpio_ops *amp, unsigned iminor); + int (*gpio_get) (unsigned iminor); + void (*gpio_set) (unsigned iminor, int state); + void (*gpio_set_high)(unsigned iminor); + void (*gpio_set_low) (unsigned iminor); + void (*gpio_change) (unsigned iminor); + int (*gpio_current) (unsigned iminor); + struct device* dev; /* for dev_dbg() support, set in init */ +}; + +extern ssize_t nsc_gpio_write(struct file *file, const char __user *data, + size_t len, loff_t *ppos); + +extern ssize_t nsc_gpio_read(struct file *file, char __user *buf, + size_t len, loff_t *ppos); + +extern void nsc_gpio_dump(struct nsc_gpio_ops *amp, unsigned index); + diff --git a/include/linux/plist.h b/include/linux/plist.h new file mode 100644 index 00000000000..3404faef542 --- /dev/null +++ b/include/linux/plist.h @@ -0,0 +1,247 @@ +/* + * Descending-priority-sorted double-linked list + * + * (C) 2002-2003 Intel Corp + * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>. + * + * 2001-2005 (c) MontaVista Software, Inc. + * Daniel Walker <dwalker@mvista.com> + * + * (C) 2005 Thomas Gleixner <tglx@linutronix.de> + * + * Simplifications of the original code by + * Oleg Nesterov <oleg@tv-sign.ru> + * + * Licensed under the FSF's GNU Public License v2 or later. + * + * Based on simple lists (include/linux/list.h). + * + * This is a priority-sorted list of nodes; each node has a + * priority from INT_MIN (highest) to INT_MAX (lowest). + * + * Addition is O(K), removal is O(1), change of priority of a node is + * O(K) and K is the number of RT priority levels used in the system. + * (1 <= K <= 99) + * + * This list is really a list of lists: + * + * - The tier 1 list is the prio_list, different priority nodes. + * + * - The tier 2 list is the node_list, serialized nodes. + * + * Simple ASCII art explanation: + * + * |HEAD | + * | | + * |prio_list.prev|<------------------------------------| + * |prio_list.next|<->|pl|<->|pl|<--------------->|pl|<-| + * |10 | |10| |21| |21| |21| |40| (prio) + * | | | | | | | | | | | | + * | | | | | | | | | | | | + * |node_list.next|<->|nl|<->|nl|<->|nl|<->|nl|<->|nl|<-| + * |node_list.prev|<------------------------------------| + * + * The nodes on the prio_list list are sorted by priority to simplify + * the insertion of new nodes. There are no nodes with duplicate + * priorites on the list. + * + * The nodes on the node_list is ordered by priority and can contain + * entries which have the same priority. Those entries are ordered + * FIFO + * + * Addition means: look for the prio_list node in the prio_list + * for the priority of the node and insert it before the node_list + * entry of the next prio_list node. If it is the first node of + * that priority, add it to the prio_list in the right position and + * insert it into the serialized node_list list + * + * Removal means remove it from the node_list and remove it from + * the prio_list if the node_list list_head is non empty. In case + * of removal from the prio_list it must be checked whether other + * entries of the same priority are on the list or not. If there + * is another entry of the same priority then this entry has to + * replace the removed entry on the prio_list. If the entry which + * is removed is the only entry of this priority then a simple + * remove from both list is sufficient. + * + * INT_MIN is the highest priority, 0 is the medium highest, INT_MAX + * is lowest priority. + * + * No locking is done, up to the caller. + * + */ +#ifndef _LINUX_PLIST_H_ +#define _LINUX_PLIST_H_ + +#include <linux/list.h> +#include <linux/spinlock_types.h> + +struct plist_head { + struct list_head prio_list; + struct list_head node_list; +#ifdef CONFIG_DEBUG_PI_LIST + spinlock_t *lock; +#endif +}; + +struct plist_node { + int prio; + struct plist_head plist; +}; + +#ifdef CONFIG_DEBUG_PI_LIST +# define PLIST_HEAD_LOCK_INIT(_lock) .lock = _lock +#else +# define PLIST_HEAD_LOCK_INIT(_lock) +#endif + +/** + * #PLIST_HEAD_INIT - static struct plist_head initializer + * + * @head: struct plist_head variable name + */ +#define PLIST_HEAD_INIT(head, _lock) \ +{ \ + .prio_list = LIST_HEAD_INIT((head).prio_list), \ + .node_list = LIST_HEAD_INIT((head).node_list), \ + PLIST_HEAD_LOCK_INIT(&(_lock)) \ +} + +/** + * #PLIST_NODE_INIT - static struct plist_node initializer + * + * @node: struct plist_node variable name + * @__prio: initial node priority + */ +#define PLIST_NODE_INIT(node, __prio) \ +{ \ + .prio = (__prio), \ + .plist = PLIST_HEAD_INIT((node).plist, NULL), \ +} + +/** + * plist_head_init - dynamic struct plist_head initializer + * + * @head: &struct plist_head pointer + */ +static inline void +plist_head_init(struct plist_head *head, spinlock_t *lock) +{ + INIT_LIST_HEAD(&head->prio_list); + INIT_LIST_HEAD(&head->node_list); +#ifdef CONFIG_DEBUG_PI_LIST + head->lock = lock; +#endif +} + +/** + * plist_node_init - Dynamic struct plist_node initializer + * + * @node: &struct plist_node pointer + * @prio: initial node priority + */ +static inline void plist_node_init(struct plist_node *node, int prio) +{ + node->prio = prio; + plist_head_init(&node->plist, NULL); +} + +extern void plist_add(struct plist_node *node, struct plist_head *head); +extern void plist_del(struct plist_node *node, struct plist_head *head); + +/** + * plist_for_each - iterate over the plist + * + * @pos1: the type * to use as a loop counter. + * @head: the head for your list. + */ +#define plist_for_each(pos, head) \ + list_for_each_entry(pos, &(head)->node_list, plist.node_list) + +/** + * plist_for_each_entry_safe - iterate over a plist of given type safe + * against removal of list entry + * + * @pos1: the type * to use as a loop counter. + * @n1: another type * to use as temporary storage + * @head: the head for your list. + */ +#define plist_for_each_safe(pos, n, head) \ + list_for_each_entry_safe(pos, n, &(head)->node_list, plist.node_list) + +/** + * plist_for_each_entry - iterate over list of given type + * + * @pos: the type * to use as a loop counter. + * @head: the head for your list. + * @member: the name of the list_struct within the struct. + */ +#define plist_for_each_entry(pos, head, mem) \ + list_for_each_entry(pos, &(head)->node_list, mem.plist.node_list) + +/** + * plist_for_each_entry_safe - iterate over list of given type safe against + * removal of list entry + * + * @pos: the type * to use as a loop counter. + * @n: another type * to use as temporary storage + * @head: the head for your list. + * @m: the name of the list_struct within the struct. + */ +#define plist_for_each_entry_safe(pos, n, head, m) \ + list_for_each_entry_safe(pos, n, &(head)->node_list, m.plist.node_list) + +/** + * plist_head_empty - return !0 if a plist_head is empty + * + * @head: &struct plist_head pointer + */ +static inline int plist_head_empty(const struct plist_head *head) +{ + return list_empty(&head->node_list); +} + +/** + * plist_node_empty - return !0 if plist_node is not on a list + * + * @node: &struct plist_node pointer + */ +static inline int plist_node_empty(const struct plist_node *node) +{ + return plist_head_empty(&node->plist); +} + +/* All functions below assume the plist_head is not empty. */ + +/** + * plist_first_entry - get the struct for the first entry + * + * @ptr: the &struct plist_head pointer. + * @type: the type of the struct this is embedded in. + * @member: the name of the list_struct within the struct. + */ +#ifdef CONFIG_DEBUG_PI_LIST +# define plist_first_entry(head, type, member) \ +({ \ + WARN_ON(plist_head_empty(head)); \ + container_of(plist_first(head), type, member); \ +}) +#else +# define plist_first_entry(head, type, member) \ + container_of(plist_first(head), type, member) +#endif + +/** + * plist_first - return the first node (and thus, highest priority) + * + * @head: the &struct plist_head pointer + * + * Assumes the plist is _not_ empty. + */ +static inline struct plist_node* plist_first(const struct plist_head *head) +{ + return list_entry(head->node_list.next, + struct plist_node, plist.node_list); +} + +#endif diff --git a/include/linux/poison.h b/include/linux/poison.h new file mode 100644 index 00000000000..a5347c02432 --- /dev/null +++ b/include/linux/poison.h @@ -0,0 +1,58 @@ +#ifndef _LINUX_POISON_H +#define _LINUX_POISON_H + +/********** include/linux/list.h **********/ +/* + * These are non-NULL pointers that will result in page faults + * under normal circumstances, used to verify that nobody uses + * non-initialized list entries. + */ +#define LIST_POISON1 ((void *) 0x00100100) +#define LIST_POISON2 ((void *) 0x00200200) + +/********** mm/slab.c **********/ +/* + * Magic nums for obj red zoning. + * Placed in the first word before and the first word after an obj. + */ +#define RED_INACTIVE 0x5A2CF071UL /* when obj is inactive */ +#define RED_ACTIVE 0x170FC2A5UL /* when obj is active */ + +/* ...and for poisoning */ +#define POISON_INUSE 0x5a /* for use-uninitialised poisoning */ +#define POISON_FREE 0x6b /* for use-after-free poisoning */ +#define POISON_END 0xa5 /* end-byte of poisoning */ + +/********** arch/$ARCH/mm/init.c **********/ +#define POISON_FREE_INITMEM 0xcc + +/********** arch/x86_64/mm/init.c **********/ +#define POISON_FREE_INITDATA 0xba + +/********** arch/ia64/hp/common/sba_iommu.c **********/ +/* + * arch/ia64/hp/common/sba_iommu.c uses a 16-byte poison string with a + * value of "SBAIOMMU POISON\0" for spill-over poisoning. + */ + +/********** fs/jbd/journal.c **********/ +#define JBD_POISON_FREE 0x5b + +/********** drivers/base/dmapool.c **********/ +#define POOL_POISON_FREED 0xa7 /* !inuse */ +#define POOL_POISON_ALLOCATED 0xa9 /* !initted */ + +/********** drivers/atm/ **********/ +#define ATM_POISON_FREE 0x12 + +/********** kernel/mutexes **********/ +#define MUTEX_DEBUG_INIT 0x11 +#define MUTEX_DEBUG_FREE 0x22 + +/********** security/ **********/ +#define KEY_DESTROY 0xbd + +/********** sound/oss/ **********/ +#define OSS_POISON_FREE 0xAB + +#endif diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h index 6312758393b..48dfe00070c 100644 --- a/include/linux/rcupdate.h +++ b/include/linux/rcupdate.h @@ -258,6 +258,7 @@ extern void rcu_init(void); extern void rcu_check_callbacks(int cpu, int user); extern void rcu_restart_cpu(int cpu); extern long rcu_batches_completed(void); +extern long rcu_batches_completed_bh(void); /* Exported interfaces */ extern void FASTCALL(call_rcu(struct rcu_head *head, diff --git a/include/linux/rtmutex.h b/include/linux/rtmutex.h new file mode 100644 index 00000000000..fa4a3b82ba7 --- /dev/null +++ b/include/linux/rtmutex.h @@ -0,0 +1,117 @@ +/* + * RT Mutexes: blocking mutual exclusion locks with PI support + * + * started by Ingo Molnar and Thomas Gleixner: + * + * Copyright (C) 2004-2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com> + * Copyright (C) 2006, Timesys Corp., Thomas Gleixner <tglx@timesys.com> + * + * This file contains the public data structure and API definitions. + */ + +#ifndef __LINUX_RT_MUTEX_H +#define __LINUX_RT_MUTEX_H + +#include <linux/linkage.h> +#include <linux/plist.h> +#include <linux/spinlock_types.h> + +/* + * The rt_mutex structure + * + * @wait_lock: spinlock to protect the structure + * @wait_list: pilist head to enqueue waiters in priority order + * @owner: the mutex owner + */ +struct rt_mutex { + spinlock_t wait_lock; + struct plist_head wait_list; + struct task_struct *owner; +#ifdef CONFIG_DEBUG_RT_MUTEXES + int save_state; + struct list_head held_list_entry; + unsigned long acquire_ip; + const char *name, *file; + int line; + void *magic; +#endif +}; + +struct rt_mutex_waiter; +struct hrtimer_sleeper; + +#ifdef CONFIG_DEBUG_RT_MUTEXES + extern int rt_mutex_debug_check_no_locks_freed(const void *from, + unsigned long len); + extern void rt_mutex_debug_check_no_locks_held(struct task_struct *task); +#else + static inline int rt_mutex_debug_check_no_locks_freed(const void *from, + unsigned long len) + { + return 0; + } +# define rt_mutex_debug_check_no_locks_held(task) do { } while (0) +#endif + +#ifdef CONFIG_DEBUG_RT_MUTEXES +# define __DEBUG_RT_MUTEX_INITIALIZER(mutexname) \ + , .name = #mutexname, .file = __FILE__, .line = __LINE__ +# define rt_mutex_init(mutex) __rt_mutex_init(mutex, __FUNCTION__) + extern void rt_mutex_debug_task_free(struct task_struct *tsk); +#else +# define __DEBUG_RT_MUTEX_INITIALIZER(mutexname) +# define rt_mutex_init(mutex) __rt_mutex_init(mutex, NULL) +# define rt_mutex_debug_task_free(t) do { } while (0) +#endif + +#define __RT_MUTEX_INITIALIZER(mutexname) \ + { .wait_lock = SPIN_LOCK_UNLOCKED \ + , .wait_list = PLIST_HEAD_INIT(mutexname.wait_list, mutexname.wait_lock) \ + , .owner = NULL \ + __DEBUG_RT_MUTEX_INITIALIZER(mutexname)} + +#define DEFINE_RT_MUTEX(mutexname) \ + struct rt_mutex mutexname = __RT_MUTEX_INITIALIZER(mutexname) + +/*** + * rt_mutex_is_locked - is the mutex locked + * @lock: the mutex to be queried + * + * Returns 1 if the mutex is locked, 0 if unlocked. + */ +static inline int rt_mutex_is_locked(struct rt_mutex *lock) +{ + return lock->owner != NULL; +} + +extern void __rt_mutex_init(struct rt_mutex *lock, const char *name); +extern void rt_mutex_destroy(struct rt_mutex *lock); + +extern void rt_mutex_lock(struct rt_mutex *lock); +extern int rt_mutex_lock_interruptible(struct rt_mutex *lock, + int detect_deadlock); +extern int rt_mutex_timed_lock(struct rt_mutex *lock, + struct hrtimer_sleeper *timeout, + int detect_deadlock); + +extern int rt_mutex_trylock(struct rt_mutex *lock); + +extern void rt_mutex_unlock(struct rt_mutex *lock); + +#ifdef CONFIG_DEBUG_RT_MUTEXES +# define INIT_RT_MUTEX_DEBUG(tsk) \ + .held_list_head = LIST_HEAD_INIT(tsk.held_list_head), \ + .held_list_lock = SPIN_LOCK_UNLOCKED +#else +# define INIT_RT_MUTEX_DEBUG(tsk) +#endif + +#ifdef CONFIG_RT_MUTEXES +# define INIT_RT_MUTEXES(tsk) \ + .pi_waiters = PLIST_HEAD_INIT(tsk.pi_waiters, tsk.pi_lock), \ + INIT_RT_MUTEX_DEBUG(tsk) +#else +# define INIT_RT_MUTEXES(tsk) +#endif + +#endif diff --git a/include/linux/sched.h b/include/linux/sched.h index 122a25c1b99..821f0481ebe 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -73,6 +73,7 @@ struct sched_param { #include <linux/seccomp.h> #include <linux/rcupdate.h> #include <linux/futex.h> +#include <linux/rtmutex.h> #include <linux/time.h> #include <linux/param.h> @@ -83,6 +84,7 @@ struct sched_param { #include <asm/processor.h> struct exec_domain; +struct futex_pi_state; /* * List of flags we want to share for kernel threads, @@ -123,6 +125,7 @@ extern unsigned long nr_running(void); extern unsigned long nr_uninterruptible(void); extern unsigned long nr_active(void); extern unsigned long nr_iowait(void); +extern unsigned long weighted_cpuload(const int cpu); /* @@ -494,8 +497,11 @@ struct signal_struct { #define MAX_PRIO (MAX_RT_PRIO + 40) -#define rt_task(p) (unlikely((p)->prio < MAX_RT_PRIO)) +#define rt_prio(prio) unlikely((prio) < MAX_RT_PRIO) +#define rt_task(p) rt_prio((p)->prio) #define batch_task(p) (unlikely((p)->policy == SCHED_BATCH)) +#define has_rt_policy(p) \ + unlikely((p)->policy != SCHED_NORMAL && (p)->policy != SCHED_BATCH) /* * Some day this will be a full-fledged user tracking system.. @@ -558,9 +564,9 @@ enum idle_type /* * sched-domains (multiprocessor balancing) declarations: */ -#ifdef CONFIG_SMP #define SCHED_LOAD_SCALE 128UL /* increase resolution of load */ +#ifdef CONFIG_SMP #define SD_LOAD_BALANCE 1 /* Do load balancing on this domain. */ #define SD_BALANCE_NEWIDLE 2 /* Balance when about to become idle */ #define SD_BALANCE_EXEC 4 /* Balance on exec */ @@ -569,6 +575,11 @@ enum idle_type #define SD_WAKE_AFFINE 32 /* Wake task to waking CPU */ #define SD_WAKE_BALANCE 64 /* Perform balancing at task wakeup */ #define SD_SHARE_CPUPOWER 128 /* Domain members share cpu power */ +#define SD_POWERSAVINGS_BALANCE 256 /* Balance for power savings */ + +#define BALANCE_FOR_POWER ((sched_mc_power_savings || sched_smt_power_savings) \ + ? SD_POWERSAVINGS_BALANCE : 0) + struct sched_group { struct sched_group *next; /* Must be a circular list */ @@ -638,7 +649,7 @@ struct sched_domain { #endif }; -extern void partition_sched_domains(cpumask_t *partition1, +extern int partition_sched_domains(cpumask_t *partition1, cpumask_t *partition2); /* @@ -713,10 +724,13 @@ struct task_struct { int lock_depth; /* BKL lock depth */ -#if defined(CONFIG_SMP) && defined(__ARCH_WANT_UNLOCKED_CTXSW) +#ifdef CONFIG_SMP +#ifdef __ARCH_WANT_UNLOCKED_CTXSW int oncpu; #endif - int prio, static_prio; +#endif + int load_weight; /* for niceness load balancing purposes */ + int prio, static_prio, normal_prio; struct list_head run_list; prio_array_t *array; @@ -843,6 +857,20 @@ struct task_struct { /* Protection of (de-)allocation: mm, files, fs, tty, keyrings */ spinlock_t alloc_lock; + /* Protection of the PI data structures: */ + spinlock_t pi_lock; + +#ifdef CONFIG_RT_MUTEXES + /* PI waiters blocked on a rt_mutex held by this task */ + struct plist_head pi_waiters; + /* Deadlock detection and priority inheritance handling */ + struct rt_mutex_waiter *pi_blocked_on; +# ifdef CONFIG_DEBUG_RT_MUTEXES + spinlock_t held_list_lock; + struct list_head held_list_head; +# endif +#endif + #ifdef CONFIG_DEBUG_MUTEXES /* mutex deadlock detection */ struct mutex_waiter *blocked_on; @@ -888,6 +916,8 @@ struct task_struct { #ifdef CONFIG_COMPAT struct compat_robust_list_head __user *compat_robust_list; #endif + struct list_head pi_state_list; + struct futex_pi_state *pi_state_cache; atomic_t fs_excl; /* holding fs exclusive resources */ struct rcu_head rcu; @@ -955,6 +985,7 @@ static inline void put_task_struct(struct task_struct *t) #define PF_SPREAD_PAGE 0x01000000 /* Spread page cache over cpuset */ #define PF_SPREAD_SLAB 0x02000000 /* Spread some slab caches over cpuset */ #define PF_MEMPOLICY 0x10000000 /* Non-default NUMA mempolicy */ +#define PF_MUTEX_TESTER 0x20000000 /* Thread belongs to the rt mutex tester */ /* * Only the _current_ task can read/write to tsk->flags, but other @@ -1009,6 +1040,19 @@ static inline void idle_task_exit(void) {} #endif extern void sched_idle_next(void); + +#ifdef CONFIG_RT_MUTEXES +extern int rt_mutex_getprio(task_t *p); +extern void rt_mutex_setprio(task_t *p, int prio); +extern void rt_mutex_adjust_pi(task_t *p); +#else +static inline int rt_mutex_getprio(task_t *p) +{ + return p->normal_prio; +} +# define rt_mutex_adjust_pi(p) do { } while (0) +#endif + extern void set_user_nice(task_t *p, long nice); extern int task_prio(const task_t *p); extern int task_nice(const task_t *p); @@ -1408,6 +1452,11 @@ static inline void arch_pick_mmap_layout(struct mm_struct *mm) extern long sched_setaffinity(pid_t pid, cpumask_t new_mask); extern long sched_getaffinity(pid_t pid, cpumask_t *mask); +#include <linux/sysdev.h> +extern int sched_mc_power_savings, sched_smt_power_savings; +extern struct sysdev_attribute attr_sched_mc_power_savings, attr_sched_smt_power_savings; +extern int sched_create_sysfs_power_savings_entries(struct sysdev_class *cls); + extern void normalize_rt_tasks(void); #ifdef CONFIG_PM diff --git a/include/linux/scx200.h b/include/linux/scx200.h index a22f9e173ad..693c0557e70 100644 --- a/include/linux/scx200.h +++ b/include/linux/scx200.h @@ -49,10 +49,3 @@ extern unsigned scx200_cb_base; #define SCx200_REV 0x3d /* Revision Register */ #define SCx200_CBA 0x3e /* Configuration Base Address Register */ #define SCx200_CBA_SCRATCH 0x64 /* Configuration Base Address Scratchpad */ - -/* - Local variables: - compile-command: "make -C ../.. bzImage modules" - c-basic-offset: 8 - End: -*/ diff --git a/include/linux/scx200_gpio.h b/include/linux/scx200_gpio.h index 30cdd648ba7..90dd069cc14 100644 --- a/include/linux/scx200_gpio.h +++ b/include/linux/scx200_gpio.h @@ -1,6 +1,6 @@ #include <linux/spinlock.h> -u32 scx200_gpio_configure(int index, u32 set, u32 clear); +u32 scx200_gpio_configure(unsigned index, u32 set, u32 clear); extern unsigned scx200_gpio_base; extern long scx200_gpio_shadow[2]; @@ -17,7 +17,7 @@ extern long scx200_gpio_shadow[2]; /* returns the value of the GPIO pin */ -static inline int scx200_gpio_get(int index) { +static inline int scx200_gpio_get(unsigned index) { __SCx200_GPIO_BANK; __SCx200_GPIO_IOADDR + 0x04; __SCx200_GPIO_INDEX; @@ -29,7 +29,7 @@ static inline int scx200_gpio_get(int index) { driven if the GPIO is configured as an output, it might not be the state of the GPIO right now if the GPIO is configured as an input) */ -static inline int scx200_gpio_current(int index) { +static inline int scx200_gpio_current(unsigned index) { __SCx200_GPIO_BANK; __SCx200_GPIO_INDEX; @@ -38,7 +38,7 @@ static inline int scx200_gpio_current(int index) { /* drive the GPIO signal high */ -static inline void scx200_gpio_set_high(int index) { +static inline void scx200_gpio_set_high(unsigned index) { __SCx200_GPIO_BANK; __SCx200_GPIO_IOADDR; __SCx200_GPIO_SHADOW; @@ -49,7 +49,7 @@ static inline void scx200_gpio_set_high(int index) { /* drive the GPIO signal low */ -static inline void scx200_gpio_set_low(int index) { +static inline void scx200_gpio_set_low(unsigned index) { __SCx200_GPIO_BANK; __SCx200_GPIO_IOADDR; __SCx200_GPIO_SHADOW; @@ -60,7 +60,7 @@ static inline void scx200_gpio_set_low(int index) { /* drive the GPIO signal to state */ -static inline void scx200_gpio_set(int index, int state) { +static inline void scx200_gpio_set(unsigned index, int state) { __SCx200_GPIO_BANK; __SCx200_GPIO_IOADDR; __SCx200_GPIO_SHADOW; @@ -73,7 +73,7 @@ static inline void scx200_gpio_set(int index, int state) { } /* toggle the GPIO signal */ -static inline void scx200_gpio_change(int index) { +static inline void scx200_gpio_change(unsigned index) { __SCx200_GPIO_BANK; __SCx200_GPIO_IOADDR; __SCx200_GPIO_SHADOW; @@ -87,10 +87,3 @@ static inline void scx200_gpio_change(int index) { #undef __SCx200_GPIO_SHADOW #undef __SCx200_GPIO_INDEX #undef __SCx200_GPIO_OUT - -/* - Local variables: - compile-command: "make -C ../.. bzImage modules" - c-basic-offset: 8 - End: -*/ diff --git a/include/linux/swap.h b/include/linux/swap.h index dc3f3aa0c83..c41e2d6d1ac 100644 --- a/include/linux/swap.h +++ b/include/linux/swap.h @@ -199,6 +199,8 @@ static inline int zone_reclaim(struct zone *z, gfp_t mask, unsigned int order) } #endif +extern int kswapd_run(int nid); + #ifdef CONFIG_MMU /* linux/mm/shmem.c */ extern int shmem_unuse(swp_entry_t entry, struct page *page); diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h index 33785b79d54..008f04c5673 100644 --- a/include/linux/syscalls.h +++ b/include/linux/syscalls.h @@ -174,9 +174,9 @@ asmlinkage long sys_waitid(int which, pid_t pid, int options, struct rusage __user *ru); asmlinkage long sys_waitpid(pid_t pid, int __user *stat_addr, int options); asmlinkage long sys_set_tid_address(int __user *tidptr); -asmlinkage long sys_futex(u32 __user *uaddr, int op, int val, +asmlinkage long sys_futex(u32 __user *uaddr, int op, u32 val, struct timespec __user *utime, u32 __user *uaddr2, - int val3); + u32 val3); asmlinkage long sys_init_module(void __user *umod, unsigned long len, const char __user *uargs); diff --git a/include/linux/sysctl.h b/include/linux/sysctl.h index 349ef908a22..46e4d8f2771 100644 --- a/include/linux/sysctl.h +++ b/include/linux/sysctl.h @@ -149,6 +149,7 @@ enum KERN_ACPI_VIDEO_FLAGS=71, /* int: flags for setting up video after ACPI sleep */ KERN_IA64_UNALIGNED=72, /* int: ia64 unaligned userland trap enable */ KERN_COMPAT_LOG=73, /* int: print compat layer messages */ + KERN_MAX_LOCK_DEPTH=74, }; @@ -189,6 +190,7 @@ enum VM_ZONE_RECLAIM_MODE=31, /* reclaim local zone memory before going off node */ VM_ZONE_RECLAIM_INTERVAL=32, /* time period to wait after reclaim failure */ VM_PANIC_ON_OOM=33, /* panic at out-of-memory */ + VM_VDSO_ENABLED=34, /* map VDSO into new processes? */ }; diff --git a/include/linux/topology.h b/include/linux/topology.h index a305ae2e44b..ec1eca85290 100644 --- a/include/linux/topology.h +++ b/include/linux/topology.h @@ -134,7 +134,8 @@ .flags = SD_LOAD_BALANCE \ | SD_BALANCE_NEWIDLE \ | SD_BALANCE_EXEC \ - | SD_WAKE_AFFINE, \ + | SD_WAKE_AFFINE \ + | BALANCE_FOR_POWER, \ .last_balance = jiffies, \ .balance_interval = 1, \ .nr_balance_failed = 0, \ |