diff options
Diffstat (limited to 'include')
-rw-r--r-- | include/asm-generic/bitops/sched.h | 21 | ||||
-rw-r--r-- | include/asm-mips/mach-au1x00/au1xxx_ide.h | 28 | ||||
-rw-r--r-- | include/linux/eeprom_93cx6.h | 72 | ||||
-rw-r--r-- | include/linux/hardirq.h | 13 | ||||
-rw-r--r-- | include/linux/ide.h | 18 | ||||
-rw-r--r-- | include/linux/sched.h | 251 | ||||
-rw-r--r-- | include/linux/topology.h | 12 | ||||
-rw-r--r-- | include/linux/wait.h | 16 | ||||
-rw-r--r-- | include/pcmcia/ciscode.h | 2 |
9 files changed, 312 insertions, 121 deletions
diff --git a/include/asm-generic/bitops/sched.h b/include/asm-generic/bitops/sched.h index 815bb014806..604fab7031a 100644 --- a/include/asm-generic/bitops/sched.h +++ b/include/asm-generic/bitops/sched.h @@ -6,28 +6,23 @@ /* * Every architecture must define this function. It's the fastest - * way of searching a 140-bit bitmap where the first 100 bits are - * unlikely to be set. It's guaranteed that at least one of the 140 - * bits is cleared. + * way of searching a 100-bit bitmap. It's guaranteed that at least + * one of the 100 bits is cleared. */ static inline int sched_find_first_bit(const unsigned long *b) { #if BITS_PER_LONG == 64 - if (unlikely(b[0])) + if (b[0]) return __ffs(b[0]); - if (likely(b[1])) - return __ffs(b[1]) + 64; - return __ffs(b[2]) + 128; + return __ffs(b[1]) + 64; #elif BITS_PER_LONG == 32 - if (unlikely(b[0])) + if (b[0]) return __ffs(b[0]); - if (unlikely(b[1])) + if (b[1]) return __ffs(b[1]) + 32; - if (unlikely(b[2])) + if (b[2]) return __ffs(b[2]) + 64; - if (b[3]) - return __ffs(b[3]) + 96; - return __ffs(b[4]) + 128; + return __ffs(b[3]) + 96; #else #error BITS_PER_LONG not defined #endif diff --git a/include/asm-mips/mach-au1x00/au1xxx_ide.h b/include/asm-mips/mach-au1x00/au1xxx_ide.h index 8fcae21adbd..4663e8b415c 100644 --- a/include/asm-mips/mach-au1x00/au1xxx_ide.h +++ b/include/asm-mips/mach-au1x00/au1xxx_ide.h @@ -88,26 +88,26 @@ static const struct drive_list_entry dma_white_list [] = { /* * Hitachi */ - { "HITACHI_DK14FA-20" , "ALL" }, - { "HTS726060M9AT00" , "ALL" }, + { "HITACHI_DK14FA-20" , NULL }, + { "HTS726060M9AT00" , NULL }, /* * Maxtor */ - { "Maxtor 6E040L0" , "ALL" }, - { "Maxtor 6Y080P0" , "ALL" }, - { "Maxtor 6Y160P0" , "ALL" }, + { "Maxtor 6E040L0" , NULL }, + { "Maxtor 6Y080P0" , NULL }, + { "Maxtor 6Y160P0" , NULL }, /* * Seagate */ - { "ST3120026A" , "ALL" }, - { "ST320014A" , "ALL" }, - { "ST94011A" , "ALL" }, - { "ST340016A" , "ALL" }, + { "ST3120026A" , NULL }, + { "ST320014A" , NULL }, + { "ST94011A" , NULL }, + { "ST340016A" , NULL }, /* * Western Digital */ - { "WDC WD400UE-00HCT0" , "ALL" }, - { "WDC WD400JB-00JJC0" , "ALL" }, + { "WDC WD400UE-00HCT0" , NULL }, + { "WDC WD400JB-00JJC0" , NULL }, { NULL , NULL } }; @@ -116,9 +116,9 @@ static const struct drive_list_entry dma_black_list [] = { /* * Western Digital */ - { "WDC WD100EB-00CGH0" , "ALL" }, - { "WDC WD200BB-00AUA1" , "ALL" }, - { "WDC AC24300L" , "ALL" }, + { "WDC WD100EB-00CGH0" , NULL }, + { "WDC WD200BB-00AUA1" , NULL }, + { "WDC AC24300L" , NULL }, { NULL , NULL } }; #endif diff --git a/include/linux/eeprom_93cx6.h b/include/linux/eeprom_93cx6.h new file mode 100644 index 00000000000..d774b7778c9 --- /dev/null +++ b/include/linux/eeprom_93cx6.h @@ -0,0 +1,72 @@ +/* + Copyright (C) 2004 - 2006 rt2x00 SourceForge Project + <http://rt2x00.serialmonkey.com> + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the + Free Software Foundation, Inc., + 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ + +/* + Module: eeprom_93cx6 + Abstract: EEPROM reader datastructures for 93cx6 chipsets. + Supported chipsets: 93c46 & 93c66. + */ + +/* + * EEPROM operation defines. + */ +#define PCI_EEPROM_WIDTH_93C46 6 +#define PCI_EEPROM_WIDTH_93C66 8 +#define PCI_EEPROM_WIDTH_OPCODE 3 +#define PCI_EEPROM_WRITE_OPCODE 0x05 +#define PCI_EEPROM_READ_OPCODE 0x06 +#define PCI_EEPROM_EWDS_OPCODE 0x10 +#define PCI_EEPROM_EWEN_OPCODE 0x13 + +/** + * struct eeprom_93cx6 - control structure for setting the commands + * for reading the eeprom data. + * @data: private pointer for the driver. + * @register_read(struct eeprom_93cx6 *eeprom): handler to + * read the eeprom register, this function should set all reg_* fields. + * @register_write(struct eeprom_93cx6 *eeprom): handler to + * write to the eeprom register by using all reg_* fields. + * @width: eeprom width, should be one of the PCI_EEPROM_WIDTH_* defines + * @reg_data_in: register field to indicate data input + * @reg_data_out: register field to indicate data output + * @reg_data_clock: register field to set the data clock + * @reg_chip_select: register field to set the chip select + * + * This structure is used for the communication between the driver + * and the eeprom_93cx6 handlers for reading the eeprom. + */ +struct eeprom_93cx6 { + void *data; + + void (*register_read)(struct eeprom_93cx6 *eeprom); + void (*register_write)(struct eeprom_93cx6 *eeprom); + + int width; + + char reg_data_in; + char reg_data_out; + char reg_data_clock; + char reg_chip_select; +}; + +extern void eeprom_93cx6_read(struct eeprom_93cx6 *eeprom, + const u8 word, u16 *data); +extern void eeprom_93cx6_multiread(struct eeprom_93cx6 *eeprom, + const u8 word, __le16 *data, const u16 words); diff --git a/include/linux/hardirq.h b/include/linux/hardirq.h index 7803014f3a1..8d302298a16 100644 --- a/include/linux/hardirq.h +++ b/include/linux/hardirq.h @@ -79,6 +79,19 @@ #endif #ifdef CONFIG_PREEMPT +# define PREEMPT_CHECK_OFFSET 1 +#else +# define PREEMPT_CHECK_OFFSET 0 +#endif + +/* + * Check whether we were atomic before we did preempt_disable(): + * (used by the scheduler) + */ +#define in_atomic_preempt_off() \ + ((preempt_count() & ~PREEMPT_ACTIVE) != PREEMPT_CHECK_OFFSET) + +#ifdef CONFIG_PREEMPT # define preemptible() (preempt_count() == 0 && !irqs_disabled()) # define IRQ_EXIT_OFFSET (HARDIRQ_OFFSET-1) #else diff --git a/include/linux/ide.h b/include/linux/ide.h index 1e365acdd36..19ab2580405 100644 --- a/include/linux/ide.h +++ b/include/linux/ide.h @@ -25,6 +25,7 @@ #include <asm/system.h> #include <asm/io.h> #include <asm/semaphore.h> +#include <asm/mutex.h> /****************************************************************************** * IDE driver configuration options (play with these as desired): @@ -685,6 +686,8 @@ typedef struct hwif_s { u8 mwdma_mask; u8 swdma_mask; + u8 cbl; /* cable type */ + hwif_chipset_t chipset; /* sub-module for tuning.. */ struct pci_dev *pci_dev; /* for pci chipsets */ @@ -735,8 +738,8 @@ typedef struct hwif_s { void (*ide_dma_clear_irq)(ide_drive_t *drive); void (*dma_host_on)(ide_drive_t *drive); void (*dma_host_off)(ide_drive_t *drive); - int (*ide_dma_lostirq)(ide_drive_t *drive); - int (*ide_dma_timeout)(ide_drive_t *drive); + void (*dma_lost_irq)(ide_drive_t *drive); + void (*dma_timeout)(ide_drive_t *drive); void (*OUTB)(u8 addr, unsigned long port); void (*OUTBSYNC)(ide_drive_t *drive, u8 addr, unsigned long port); @@ -791,7 +794,6 @@ typedef struct hwif_s { unsigned sharing_irq: 1; /* 1 = sharing irq with another hwif */ unsigned reset : 1; /* reset after probe */ unsigned autodma : 1; /* auto-attempt using DMA at boot */ - unsigned udma_four : 1; /* 1=ATA-66 capable, 0=default */ unsigned no_lba48 : 1; /* 1 = cannot do LBA48 */ unsigned no_lba48_dma : 1; /* 1 = cannot do LBA48 DMA */ unsigned auto_poll : 1; /* supports nop auto-poll */ @@ -863,7 +865,7 @@ typedef struct hwgroup_s { typedef struct ide_driver_s ide_driver_t; -extern struct semaphore ide_setting_sem; +extern struct mutex ide_setting_mtx; int set_io_32bit(ide_drive_t *, int); int set_pio_mode(ide_drive_t *, int); @@ -1304,8 +1306,8 @@ extern int __ide_dma_check(ide_drive_t *); extern int ide_dma_setup(ide_drive_t *); extern void ide_dma_start(ide_drive_t *); extern int __ide_dma_end(ide_drive_t *); -extern int __ide_dma_lostirq(ide_drive_t *); -extern int __ide_dma_timeout(ide_drive_t *); +extern void ide_dma_lost_irq(ide_drive_t *); +extern void ide_dma_timeout(ide_drive_t *); #endif /* CONFIG_BLK_DEV_IDEDMA_PCI */ #else @@ -1382,11 +1384,11 @@ extern const ide_pio_timings_t ide_pio_timings[6]; extern spinlock_t ide_lock; -extern struct semaphore ide_cfg_sem; +extern struct mutex ide_cfg_mtx; /* * Structure locking: * - * ide_cfg_sem and ide_lock together protect changes to + * ide_cfg_mtx and ide_lock together protect changes to * ide_hwif_t->{next,hwgroup} * ide_drive_t->next * diff --git a/include/linux/sched.h b/include/linux/sched.h index 693f0e6c54d..cfb680585ab 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -34,6 +34,8 @@ #define SCHED_FIFO 1 #define SCHED_RR 2 #define SCHED_BATCH 3 +/* SCHED_ISO: reserved but not implemented yet */ +#define SCHED_IDLE 5 #ifdef __KERNEL__ @@ -130,6 +132,26 @@ extern unsigned long nr_active(void); extern unsigned long nr_iowait(void); extern unsigned long weighted_cpuload(const int cpu); +struct seq_file; +struct cfs_rq; +#ifdef CONFIG_SCHED_DEBUG +extern void proc_sched_show_task(struct task_struct *p, struct seq_file *m); +extern void proc_sched_set_task(struct task_struct *p); +extern void +print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq, u64 now); +#else +static inline void +proc_sched_show_task(struct task_struct *p, struct seq_file *m) +{ +} +static inline void proc_sched_set_task(struct task_struct *p) +{ +} +static inline void +print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq, u64 now) +{ +} +#endif /* * Task state bitmask. NOTE! These bits are also @@ -193,6 +215,7 @@ struct task_struct; extern void sched_init(void); extern void sched_init_smp(void); extern void init_idle(struct task_struct *idle, int cpu); +extern void init_idle_bootup_task(struct task_struct *idle); extern cpumask_t nohz_cpu_mask; #if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ) @@ -479,7 +502,7 @@ struct signal_struct { * from jiffies_to_ns(utime + stime) if sched_clock uses something * other than jiffies.) */ - unsigned long long sched_time; + unsigned long long sum_sched_runtime; /* * We don't bother to synchronize most readers of this at all, @@ -521,31 +544,6 @@ struct signal_struct { #define SIGNAL_STOP_CONTINUED 0x00000004 /* SIGCONT since WCONTINUED reap */ #define SIGNAL_GROUP_EXIT 0x00000008 /* group exit in progress */ - -/* - * Priority of a process goes from 0..MAX_PRIO-1, valid RT - * priority is 0..MAX_RT_PRIO-1, and SCHED_NORMAL/SCHED_BATCH - * tasks are in the range MAX_RT_PRIO..MAX_PRIO-1. Priority - * values are inverted: lower p->prio value means higher priority. - * - * The MAX_USER_RT_PRIO value allows the actual maximum - * RT priority to be separate from the value exported to - * user-space. This allows kernel threads to set their - * priority to a value higher than any user task. Note: - * MAX_RT_PRIO must not be smaller than MAX_USER_RT_PRIO. - */ - -#define MAX_USER_RT_PRIO 100 -#define MAX_RT_PRIO MAX_USER_RT_PRIO - -#define MAX_PRIO (MAX_RT_PRIO + 40) - -#define rt_prio(prio) unlikely((prio) < MAX_RT_PRIO) -#define rt_task(p) rt_prio((p)->prio) -#define batch_task(p) (unlikely((p)->policy == SCHED_BATCH)) -#define is_rt_policy(p) ((p) != SCHED_NORMAL && (p) != SCHED_BATCH) -#define has_rt_policy(p) unlikely(is_rt_policy((p)->policy)) - /* * Some day this will be a full-fledged user tracking system.. */ @@ -583,13 +581,13 @@ struct reclaim_state; #if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT) struct sched_info { /* cumulative counters */ - unsigned long cpu_time, /* time spent on the cpu */ - run_delay, /* time spent waiting on a runqueue */ - pcnt; /* # of timeslices run on this cpu */ + unsigned long pcnt; /* # of times run on this cpu */ + unsigned long long cpu_time, /* time spent on the cpu */ + run_delay; /* time spent waiting on a runqueue */ /* timestamps */ - unsigned long last_arrival, /* when we last ran on a cpu */ - last_queued; /* when we were last queued to run */ + unsigned long long last_arrival,/* when we last ran on a cpu */ + last_queued; /* when we were last queued to run */ }; #endif /* defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT) */ @@ -639,18 +637,24 @@ static inline int sched_info_on(void) #endif } -enum idle_type -{ - SCHED_IDLE, - NOT_IDLE, - NEWLY_IDLE, - MAX_IDLE_TYPES +enum cpu_idle_type { + CPU_IDLE, + CPU_NOT_IDLE, + CPU_NEWLY_IDLE, + CPU_MAX_IDLE_TYPES }; /* * sched-domains (multiprocessor balancing) declarations: */ -#define SCHED_LOAD_SCALE 128UL /* increase resolution of load */ + +/* + * Increase resolution of nice-level calculations: + */ +#define SCHED_LOAD_SHIFT 10 +#define SCHED_LOAD_SCALE (1L << SCHED_LOAD_SHIFT) + +#define SCHED_LOAD_SCALE_FUZZ (SCHED_LOAD_SCALE >> 5) #ifdef CONFIG_SMP #define SD_LOAD_BALANCE 1 /* Do load balancing on this domain. */ @@ -719,14 +723,14 @@ struct sched_domain { #ifdef CONFIG_SCHEDSTATS /* load_balance() stats */ - unsigned long lb_cnt[MAX_IDLE_TYPES]; - unsigned long lb_failed[MAX_IDLE_TYPES]; - unsigned long lb_balanced[MAX_IDLE_TYPES]; - unsigned long lb_imbalance[MAX_IDLE_TYPES]; - unsigned long lb_gained[MAX_IDLE_TYPES]; - unsigned long lb_hot_gained[MAX_IDLE_TYPES]; - unsigned long lb_nobusyg[MAX_IDLE_TYPES]; - unsigned long lb_nobusyq[MAX_IDLE_TYPES]; + unsigned long lb_cnt[CPU_MAX_IDLE_TYPES]; + unsigned long lb_failed[CPU_MAX_IDLE_TYPES]; + unsigned long lb_balanced[CPU_MAX_IDLE_TYPES]; + unsigned long lb_imbalance[CPU_MAX_IDLE_TYPES]; + unsigned long lb_gained[CPU_MAX_IDLE_TYPES]; + unsigned long lb_hot_gained[CPU_MAX_IDLE_TYPES]; + unsigned long lb_nobusyg[CPU_MAX_IDLE_TYPES]; + unsigned long lb_nobusyq[CPU_MAX_IDLE_TYPES]; /* Active load balancing */ unsigned long alb_cnt; @@ -753,12 +757,6 @@ struct sched_domain { extern int partition_sched_domains(cpumask_t *partition1, cpumask_t *partition2); -/* - * Maximum cache size the migration-costs auto-tuning code will - * search from: - */ -extern unsigned int max_cache_size; - #endif /* CONFIG_SMP */ @@ -809,14 +807,86 @@ struct mempolicy; struct pipe_inode_info; struct uts_namespace; -enum sleep_type { - SLEEP_NORMAL, - SLEEP_NONINTERACTIVE, - SLEEP_INTERACTIVE, - SLEEP_INTERRUPTED, +struct rq; +struct sched_domain; + +struct sched_class { + struct sched_class *next; + + void (*enqueue_task) (struct rq *rq, struct task_struct *p, + int wakeup, u64 now); + void (*dequeue_task) (struct rq *rq, struct task_struct *p, + int sleep, u64 now); + void (*yield_task) (struct rq *rq, struct task_struct *p); + + void (*check_preempt_curr) (struct rq *rq, struct task_struct *p); + + struct task_struct * (*pick_next_task) (struct rq *rq, u64 now); + void (*put_prev_task) (struct rq *rq, struct task_struct *p, u64 now); + + int (*load_balance) (struct rq *this_rq, int this_cpu, + struct rq *busiest, + unsigned long max_nr_move, unsigned long max_load_move, + struct sched_domain *sd, enum cpu_idle_type idle, + int *all_pinned, unsigned long *total_load_moved); + + void (*set_curr_task) (struct rq *rq); + void (*task_tick) (struct rq *rq, struct task_struct *p); + void (*task_new) (struct rq *rq, struct task_struct *p); }; -struct prio_array; +struct load_weight { + unsigned long weight, inv_weight; +}; + +/* + * CFS stats for a schedulable entity (task, task-group etc) + * + * Current field usage histogram: + * + * 4 se->block_start + * 4 se->run_node + * 4 se->sleep_start + * 4 se->sleep_start_fair + * 6 se->load.weight + * 7 se->delta_fair + * 15 se->wait_runtime + */ +struct sched_entity { + long wait_runtime; + unsigned long delta_fair_run; + unsigned long delta_fair_sleep; + unsigned long delta_exec; + s64 fair_key; + struct load_weight load; /* for load-balancing */ + struct rb_node run_node; + unsigned int on_rq; + + u64 wait_start_fair; + u64 wait_start; + u64 exec_start; + u64 sleep_start; + u64 sleep_start_fair; + u64 block_start; + u64 sleep_max; + u64 block_max; + u64 exec_max; + u64 wait_max; + u64 last_ran; + + u64 sum_exec_runtime; + s64 sum_wait_runtime; + s64 sum_sleep_runtime; + unsigned long wait_runtime_overruns; + unsigned long wait_runtime_underruns; +#ifdef CONFIG_FAIR_GROUP_SCHED + struct sched_entity *parent; + /* rq on which this entity is (to be) queued: */ + struct cfs_rq *cfs_rq; + /* rq "owned" by this entity/group: */ + struct cfs_rq *my_q; +#endif +}; struct task_struct { volatile long state; /* -1 unrunnable, 0 runnable, >0 stopped */ @@ -832,23 +902,20 @@ struct task_struct { int oncpu; #endif #endif - int load_weight; /* for niceness load balancing purposes */ + int prio, static_prio, normal_prio; struct list_head run_list; - struct prio_array *array; + struct sched_class *sched_class; + struct sched_entity se; unsigned short ioprio; #ifdef CONFIG_BLK_DEV_IO_TRACE unsigned int btrace_seq; #endif - unsigned long sleep_avg; - unsigned long long timestamp, last_ran; - unsigned long long sched_time; /* sched_clock time spent running */ - enum sleep_type sleep_type; unsigned int policy; cpumask_t cpus_allowed; - unsigned int time_slice, first_time_slice; + unsigned int time_slice; #if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT) struct sched_info sched_info; @@ -1078,6 +1145,37 @@ struct task_struct { #endif }; +/* + * Priority of a process goes from 0..MAX_PRIO-1, valid RT + * priority is 0..MAX_RT_PRIO-1, and SCHED_NORMAL/SCHED_BATCH + * tasks are in the range MAX_RT_PRIO..MAX_PRIO-1. Priority + * values are inverted: lower p->prio value means higher priority. + * + * The MAX_USER_RT_PRIO value allows the actual maximum + * RT priority to be separate from the value exported to + * user-space. This allows kernel threads to set their + * priority to a value higher than any user task. Note: + * MAX_RT_PRIO must not be smaller than MAX_USER_RT_PRIO. + */ + +#define MAX_USER_RT_PRIO 100 +#define MAX_RT_PRIO MAX_USER_RT_PRIO + +#define MAX_PRIO (MAX_RT_PRIO + 40) +#define DEFAULT_PRIO (MAX_RT_PRIO + 20) + +static inline int rt_prio(int prio) +{ + if (unlikely(prio < MAX_RT_PRIO)) + return 1; + return 0; +} + +static inline int rt_task(struct task_struct *p) +{ + return rt_prio(p->prio); +} + static inline pid_t process_group(struct task_struct *tsk) { return tsk->signal->pgrp; @@ -1223,7 +1321,7 @@ static inline int set_cpus_allowed(struct task_struct *p, cpumask_t new_mask) extern unsigned long long sched_clock(void); extern unsigned long long -current_sched_time(const struct task_struct *current_task); +task_sched_runtime(struct task_struct *task); /* sched_exec is called by processes performing an exec */ #ifdef CONFIG_SMP @@ -1232,6 +1330,8 @@ extern void sched_exec(void); #define sched_exec() {} #endif +extern void sched_clock_unstable_event(void); + #ifdef CONFIG_HOTPLUG_CPU extern void idle_task_exit(void); #else @@ -1240,6 +1340,14 @@ static inline void idle_task_exit(void) {} extern void sched_idle_next(void); +extern unsigned int sysctl_sched_granularity; +extern unsigned int sysctl_sched_wakeup_granularity; +extern unsigned int sysctl_sched_batch_wakeup_granularity; +extern unsigned int sysctl_sched_stat_granularity; +extern unsigned int sysctl_sched_runtime_limit; +extern unsigned int sysctl_sched_child_runs_first; +extern unsigned int sysctl_sched_features; + #ifdef CONFIG_RT_MUTEXES extern int rt_mutex_getprio(struct task_struct *p); extern void rt_mutex_setprio(struct task_struct *p, int prio); @@ -1317,8 +1425,8 @@ extern void FASTCALL(wake_up_new_task(struct task_struct * tsk, #else static inline void kick_process(struct task_struct *tsk) { } #endif -extern void FASTCALL(sched_fork(struct task_struct * p, int clone_flags)); -extern void FASTCALL(sched_exit(struct task_struct * p)); +extern void sched_fork(struct task_struct *p, int clone_flags); +extern void sched_dead(struct task_struct *p); extern int in_group_p(gid_t); extern int in_egroup_p(gid_t); @@ -1406,7 +1514,7 @@ extern struct mm_struct * mm_alloc(void); extern void FASTCALL(__mmdrop(struct mm_struct *)); static inline void mmdrop(struct mm_struct * mm) { - if (atomic_dec_and_test(&mm->mm_count)) + if (unlikely(atomic_dec_and_test(&mm->mm_count))) __mmdrop(mm); } @@ -1638,10 +1746,7 @@ static inline unsigned int task_cpu(const struct task_struct *p) return task_thread_info(p)->cpu; } -static inline void set_task_cpu(struct task_struct *p, unsigned int cpu) -{ - task_thread_info(p)->cpu = cpu; -} +extern void set_task_cpu(struct task_struct *p, unsigned int cpu); #else diff --git a/include/linux/topology.h b/include/linux/topology.h index a9d1f049cc1..da6c39b2d05 100644 --- a/include/linux/topology.h +++ b/include/linux/topology.h @@ -98,7 +98,7 @@ .cache_nice_tries = 0, \ .busy_idx = 0, \ .idle_idx = 0, \ - .newidle_idx = 1, \ + .newidle_idx = 0, \ .wake_idx = 0, \ .forkexec_idx = 0, \ .flags = SD_LOAD_BALANCE \ @@ -128,14 +128,15 @@ .imbalance_pct = 125, \ .cache_nice_tries = 1, \ .busy_idx = 2, \ - .idle_idx = 1, \ - .newidle_idx = 2, \ + .idle_idx = 0, \ + .newidle_idx = 0, \ .wake_idx = 1, \ .forkexec_idx = 1, \ .flags = SD_LOAD_BALANCE \ | SD_BALANCE_NEWIDLE \ | SD_BALANCE_EXEC \ | SD_WAKE_AFFINE \ + | SD_WAKE_IDLE \ | SD_SHARE_PKG_RESOURCES\ | BALANCE_FOR_MC_POWER, \ .last_balance = jiffies, \ @@ -158,14 +159,15 @@ .imbalance_pct = 125, \ .cache_nice_tries = 1, \ .busy_idx = 2, \ - .idle_idx = 1, \ - .newidle_idx = 2, \ + .idle_idx = 0, \ + .newidle_idx = 0, \ .wake_idx = 1, \ .forkexec_idx = 1, \ .flags = SD_LOAD_BALANCE \ | SD_BALANCE_NEWIDLE \ | SD_BALANCE_EXEC \ | SD_WAKE_AFFINE \ + | SD_WAKE_IDLE \ | BALANCE_FOR_PKG_POWER,\ .last_balance = jiffies, \ .balance_interval = 1, \ diff --git a/include/linux/wait.h b/include/linux/wait.h index e820d00e138..0e686280450 100644 --- a/include/linux/wait.h +++ b/include/linux/wait.h @@ -366,15 +366,15 @@ static inline void remove_wait_queue_locked(wait_queue_head_t *q, /* * These are the old interfaces to sleep waiting for an event. - * They are racy. DO NOT use them, use the wait_event* interfaces above. - * We plan to remove these interfaces during 2.7. + * They are racy. DO NOT use them, use the wait_event* interfaces above. + * We plan to remove these interfaces. */ -extern void FASTCALL(sleep_on(wait_queue_head_t *q)); -extern long FASTCALL(sleep_on_timeout(wait_queue_head_t *q, - signed long timeout)); -extern void FASTCALL(interruptible_sleep_on(wait_queue_head_t *q)); -extern long FASTCALL(interruptible_sleep_on_timeout(wait_queue_head_t *q, - signed long timeout)); +extern void sleep_on(wait_queue_head_t *q); +extern long sleep_on_timeout(wait_queue_head_t *q, + signed long timeout); +extern void interruptible_sleep_on(wait_queue_head_t *q); +extern long interruptible_sleep_on_timeout(wait_queue_head_t *q, + signed long timeout); /* * Waitqueues which are removed from the waitqueue_head at wakeup time diff --git a/include/pcmcia/ciscode.h b/include/pcmcia/ciscode.h index eae7e2e8449..ad6e278ba7f 100644 --- a/include/pcmcia/ciscode.h +++ b/include/pcmcia/ciscode.h @@ -126,4 +126,6 @@ #define MANFID_POSSIO 0x030c #define PRODID_POSSIO_GCC 0x0003 +#define MANFID_NEC 0x0010 + #endif /* _LINUX_CISCODE_H */ |