From c41917df8a1adde34864116ce2231a7fe308d2ff Mon Sep 17 00:00:00 2001 From: Ralf Baechle Date: Thu, 19 Jul 2007 21:28:35 +0200 Subject: [PATCH] sched: sched_cacheflush is now unused Since Ingo's recent scheduler rewrite which was merged as commit 0437e109e1841607f2988891eaa36c531c6aa6ac sched_cacheflush is unused. Signed-off-by: Ralf Baechle Signed-off-by: Ingo Molnar --- arch/ia64/kernel/setup.c | 9 --------- include/asm-alpha/system.h | 10 ---------- include/asm-arm/system.h | 10 ---------- include/asm-arm26/system.h | 10 ---------- include/asm-i386/system.h | 9 --------- include/asm-ia64/system.h | 1 - include/asm-m32r/system.h | 10 ---------- include/asm-mips/system.h | 10 ---------- include/asm-parisc/system.h | 11 ----------- include/asm-powerpc/system.h | 10 ---------- include/asm-ppc/system.h | 10 ---------- include/asm-s390/system.h | 10 ---------- include/asm-sh/system.h | 10 ---------- include/asm-sparc/system.h | 10 ---------- include/asm-sparc64/system.h | 10 ---------- include/asm-x86_64/system.h | 9 --------- 16 files changed, 149 deletions(-) diff --git a/arch/ia64/kernel/setup.c b/arch/ia64/kernel/setup.c index 4d9864cc92c..cf06fe79904 100644 --- a/arch/ia64/kernel/setup.c +++ b/arch/ia64/kernel/setup.c @@ -980,15 +980,6 @@ cpu_init (void) pm_idle = default_idle; } -/* - * On SMP systems, when the scheduler does migration-cost autodetection, - * it needs a way to flush as much of the CPU's caches as possible. - */ -void sched_cacheflush(void) -{ - ia64_sal_cache_flush(3); -} - void __init check_bugs (void) { diff --git a/include/asm-alpha/system.h b/include/asm-alpha/system.h index cf1021a97b2..620c4d86cbf 100644 --- a/include/asm-alpha/system.h +++ b/include/asm-alpha/system.h @@ -139,16 +139,6 @@ extern void halt(void) __attribute__((noreturn)); struct task_struct; extern struct task_struct *alpha_switch_to(unsigned long, struct task_struct*); -/* - * On SMP systems, when the scheduler does migration-cost autodetection, - * it needs a way to flush as much of the CPU's caches as possible. - * - * TODO: fill this in! - */ -static inline void sched_cacheflush(void) -{ -} - #define imb() \ __asm__ __volatile__ ("call_pal %0 #imb" : : "i" (PAL_imb) : "memory") diff --git a/include/asm-arm/system.h b/include/asm-arm/system.h index 6f8e6a69dc5..94ea8c6dc1a 100644 --- a/include/asm-arm/system.h +++ b/include/asm-arm/system.h @@ -254,16 +254,6 @@ do { \ last = __switch_to(prev,task_thread_info(prev), task_thread_info(next)); \ } while (0) -/* - * On SMP systems, when the scheduler does migration-cost autodetection, - * it needs a way to flush as much of the CPU's caches as possible. - * - * TODO: fill this in! - */ -static inline void sched_cacheflush(void) -{ -} - #if defined(CONFIG_CPU_SA1100) || defined(CONFIG_CPU_SA110) /* * On the StrongARM, "swp" is terminally broken since it bypasses the diff --git a/include/asm-arm26/system.h b/include/asm-arm26/system.h index 4703593b3bb..e09da5ff1f5 100644 --- a/include/asm-arm26/system.h +++ b/include/asm-arm26/system.h @@ -109,16 +109,6 @@ do { \ last = __switch_to(prev,task_thread_info(prev),task_thread_info(next)); \ } while (0) -/* - * On SMP systems, when the scheduler does migration-cost autodetection, - * it needs a way to flush as much of the CPU's caches as possible. - * - * TODO: fill this in! - */ -static inline void sched_cacheflush(void) -{ -} - /* * Save the current interrupt enable state & disable IRQs */ diff --git a/include/asm-i386/system.h b/include/asm-i386/system.h index 94ed3686a5f..609756c6167 100644 --- a/include/asm-i386/system.h +++ b/include/asm-i386/system.h @@ -310,15 +310,6 @@ void enable_hlt(void); extern int es7000_plat; void cpu_idle_wait(void); -/* - * On SMP systems, when the scheduler does migration-cost autodetection, - * it needs a way to flush as much of the CPU's caches as possible: - */ -static inline void sched_cacheflush(void) -{ - wbinvd(); -} - extern unsigned long arch_align_stack(unsigned long sp); extern void free_init_pages(char *what, unsigned long begin, unsigned long end); diff --git a/include/asm-ia64/system.h b/include/asm-ia64/system.h index 384fbf7f2a0..91bb8e00066 100644 --- a/include/asm-ia64/system.h +++ b/include/asm-ia64/system.h @@ -259,7 +259,6 @@ extern void ia64_load_extra (struct task_struct *task); #define ia64_platform_is(x) (strcmp(x, platform_name) == 0) void cpu_idle_wait(void); -void sched_cacheflush(void); #define arch_align_stack(x) (x) diff --git a/include/asm-m32r/system.h b/include/asm-m32r/system.h index 8ee73d3f316..2365de5c295 100644 --- a/include/asm-m32r/system.h +++ b/include/asm-m32r/system.h @@ -54,16 +54,6 @@ ); \ } while(0) -/* - * On SMP systems, when the scheduler does migration-cost autodetection, - * it needs a way to flush as much of the CPU's caches as possible. - * - * TODO: fill this in! - */ -static inline void sched_cacheflush(void) -{ -} - /* Interrupt Control */ #if !defined(CONFIG_CHIP_M32102) && !defined(CONFIG_CHIP_M32104) #define local_irq_enable() \ diff --git a/include/asm-mips/system.h b/include/asm-mips/system.h index 46bdb3f566f..76339165bc2 100644 --- a/include/asm-mips/system.h +++ b/include/asm-mips/system.h @@ -71,16 +71,6 @@ do { \ write_c0_userlocal(task_thread_info(current)->tp_value);\ } while(0) -/* - * On SMP systems, when the scheduler does migration-cost autodetection, - * it needs a way to flush as much of the CPU's caches as possible. - * - * TODO: fill this in! - */ -static inline void sched_cacheflush(void) -{ -} - static inline unsigned long __xchg_u32(volatile int * m, unsigned int val) { __u32 retval; diff --git a/include/asm-parisc/system.h b/include/asm-parisc/system.h index 21fbfc5afd0..ee80c920b46 100644 --- a/include/asm-parisc/system.h +++ b/include/asm-parisc/system.h @@ -48,17 +48,6 @@ extern struct task_struct *_switch_to(struct task_struct *, struct task_struct * (last) = _switch_to(prev, next); \ } while(0) -/* - * On SMP systems, when the scheduler does migration-cost autodetection, - * it needs a way to flush as much of the CPU's caches as possible. - * - * TODO: fill this in! - */ -static inline void sched_cacheflush(void) -{ -} - - /* interrupt control */ #define local_save_flags(x) __asm__ __volatile__("ssm 0, %0" : "=r" (x) : : "memory") #define local_irq_disable() __asm__ __volatile__("rsm %0,%%r0\n" : : "i" (PSW_I) : "memory" ) diff --git a/include/asm-powerpc/system.h b/include/asm-powerpc/system.h index 32aa42b748b..41520b7a7b7 100644 --- a/include/asm-powerpc/system.h +++ b/include/asm-powerpc/system.h @@ -184,16 +184,6 @@ struct thread_struct; extern struct task_struct *_switch(struct thread_struct *prev, struct thread_struct *next); -/* - * On SMP systems, when the scheduler does migration-cost autodetection, - * it needs a way to flush as much of the CPU's caches as possible. - * - * TODO: fill this in! - */ -static inline void sched_cacheflush(void) -{ -} - extern unsigned int rtas_data; extern int mem_init_done; /* set on boot once kmalloc can be called */ extern unsigned long memory_limit; diff --git a/include/asm-ppc/system.h b/include/asm-ppc/system.h index d84a3cf4d03..f1311a8f310 100644 --- a/include/asm-ppc/system.h +++ b/include/asm-ppc/system.h @@ -129,16 +129,6 @@ extern struct task_struct *__switch_to(struct task_struct *, struct task_struct *); #define switch_to(prev, next, last) ((last) = __switch_to((prev), (next))) -/* - * On SMP systems, when the scheduler does migration-cost autodetection, - * it needs a way to flush as much of the CPU's caches as possible. - * - * TODO: fill this in! - */ -static inline void sched_cacheflush(void) -{ -} - struct thread_struct; extern struct task_struct *_switch(struct thread_struct *prev, struct thread_struct *next); diff --git a/include/asm-s390/system.h b/include/asm-s390/system.h index bbe137c3ed6..64a3cd05cae 100644 --- a/include/asm-s390/system.h +++ b/include/asm-s390/system.h @@ -97,16 +97,6 @@ static inline void restore_access_regs(unsigned int *acrs) prev = __switch_to(prev,next); \ } while (0) -/* - * On SMP systems, when the scheduler does migration-cost autodetection, - * it needs a way to flush as much of the CPU's caches as possible. - * - * TODO: fill this in! - */ -static inline void sched_cacheflush(void) -{ -} - #ifdef CONFIG_VIRT_CPU_ACCOUNTING extern void account_vtime(struct task_struct *); extern void account_tick_vtime(struct task_struct *); diff --git a/include/asm-sh/system.h b/include/asm-sh/system.h index 7c75045ae22..24504253720 100644 --- a/include/asm-sh/system.h +++ b/include/asm-sh/system.h @@ -64,16 +64,6 @@ struct task_struct *__switch_to(struct task_struct *prev, last = __last; \ } while (0) -/* - * On SMP systems, when the scheduler does migration-cost autodetection, - * it needs a way to flush as much of the CPU's caches as possible. - * - * TODO: fill this in! - */ -static inline void sched_cacheflush(void) -{ -} - #ifdef CONFIG_CPU_SH4A #define __icbi() \ { \ diff --git a/include/asm-sparc/system.h b/include/asm-sparc/system.h index 8b4e23b3bb3..d1a2572e3f5 100644 --- a/include/asm-sparc/system.h +++ b/include/asm-sparc/system.h @@ -164,16 +164,6 @@ extern void fpsave(unsigned long *fpregs, unsigned long *fsr, "o0", "o1", "o2", "o3", "o7"); \ } while(0) -/* - * On SMP systems, when the scheduler does migration-cost autodetection, - * it needs a way to flush as much of the CPU's caches as possible. - * - * TODO: fill this in! - */ -static inline void sched_cacheflush(void) -{ -} - /* * Changing the IRQ level on the Sparc. */ diff --git a/include/asm-sparc64/system.h b/include/asm-sparc64/system.h index 8ba380ec6da..409067408ee 100644 --- a/include/asm-sparc64/system.h +++ b/include/asm-sparc64/system.h @@ -204,16 +204,6 @@ do { if (test_thread_flag(TIF_PERFCTR)) { \ } \ } while(0) -/* - * On SMP systems, when the scheduler does migration-cost autodetection, - * it needs a way to flush as much of the CPU's caches as possible. - * - * TODO: fill this in! - */ -static inline void sched_cacheflush(void) -{ -} - static inline unsigned long xchg32(__volatile__ unsigned int *m, unsigned int val) { unsigned long tmp1, tmp2; diff --git a/include/asm-x86_64/system.h b/include/asm-x86_64/system.h index ead9f9a5623..e4f246d62c4 100644 --- a/include/asm-x86_64/system.h +++ b/include/asm-x86_64/system.h @@ -111,15 +111,6 @@ static inline void write_cr4(unsigned long val) #define wbinvd() \ __asm__ __volatile__ ("wbinvd": : :"memory"); -/* - * On SMP systems, when the scheduler does migration-cost autodetection, - * it needs a way to flush as much of the CPU's caches as possible. - */ -static inline void sched_cacheflush(void) -{ - wbinvd(); -} - #endif /* __KERNEL__ */ #define nop() __asm__ __volatile__ ("nop") -- cgit v1.2.3 From 9439aab8dbc33c2c03c3a19dba267360383ba38c Mon Sep 17 00:00:00 2001 From: Suresh Siddha Date: Thu, 19 Jul 2007 21:28:35 +0200 Subject: [PATCH] sched: fix newly idle load balance in case of SMT In the presence of SMT, newly idle balance was never happening for multi-core and SMP domains (even when both the logical siblings are idle). If thread 0 is already idle and when thread 1 is about to go to idle, newly idle load balance always think that one of the threads is not idle and skips doing the newly idle load balance for multi-core and SMP domains. This is because of the idle_cpu() macro, which checks if the current process on a cpu is an idle process. But this is not the case for the thread doing the load_balance_newidle(). Fix this by using runqueue's nr_running field instead of idle_cpu(). And also skip the logic of 'only one idle cpu in the group will be doing load balancing' during newly idle case. Signed-off-by: Suresh Siddha Signed-off-by: Ingo Molnar --- kernel/sched.c | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/kernel/sched.c b/kernel/sched.c index 645256b228c..e36d99d1ddb 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -2235,7 +2235,7 @@ find_busiest_group(struct sched_domain *sd, int this_cpu, rq = cpu_rq(i); - if (*sd_idle && !idle_cpu(i)) + if (*sd_idle && rq->nr_running) *sd_idle = 0; /* Bias balancing toward cpus of our domain */ @@ -2257,9 +2257,11 @@ find_busiest_group(struct sched_domain *sd, int this_cpu, /* * First idle cpu or the first cpu(busiest) in this sched group * is eligible for doing load balancing at this and above - * domains. + * domains. In the newly idle case, we will allow all the cpu's + * to do the newly idle load balance. */ - if (local_group && balance_cpu != this_cpu && balance) { + if (idle != CPU_NEWLY_IDLE && local_group && + balance_cpu != this_cpu && balance) { *balance = 0; goto ret; } -- cgit v1.2.3 From 969bb4e4032dac67287951d8f6642a3b5119694e Mon Sep 17 00:00:00 2001 From: Suresh Siddha Date: Thu, 19 Jul 2007 21:28:35 +0200 Subject: [PATCH] sched: fix the all pinned logic in load_balance_newidle() nr_moved is not the correct check for triggering all pinned logic. Fix the all pinned logic in the case of load_balance_newidle(). Signed-off-by: Suresh Siddha Signed-off-by: Ingo Molnar --- kernel/sched.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/kernel/sched.c b/kernel/sched.c index e36d99d1ddb..a35a92ff38f 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -2679,6 +2679,7 @@ load_balance_newidle(int this_cpu, struct rq *this_rq, struct sched_domain *sd) unsigned long imbalance; int nr_moved = 0; int sd_idle = 0; + int all_pinned = 0; cpumask_t cpus = CPU_MASK_ALL; /* @@ -2717,10 +2718,11 @@ redo: double_lock_balance(this_rq, busiest); nr_moved = move_tasks(this_rq, this_cpu, busiest, minus_1_or_zero(busiest->nr_running), - imbalance, sd, CPU_NEWLY_IDLE, NULL); + imbalance, sd, CPU_NEWLY_IDLE, + &all_pinned); spin_unlock(&busiest->lock); - if (!nr_moved) { + if (unlikely(all_pinned)) { cpu_clear(cpu_of(busiest), cpus); if (!cpus_empty(cpus)) goto redo; -- cgit v1.2.3 From e436d80085133858bf2613a630365e8a0459fd58 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Thu, 19 Jul 2007 21:28:35 +0200 Subject: [PATCH] sched: implement cpu_clock(cpu) high-speed time source Implement the cpu_clock(cpu) interface for kernel-internal use: high-speed (but slightly incorrect) per-cpu clock constructed from sched_clock(). This API, unused at the moment, will be used in the future by blktrace, by the softlockup-watchdog, by printk and by lockstat. Signed-off-by: Ingo Molnar --- include/linux/sched.h | 7 +++++++ kernel/sched.c | 17 +++++++++++++++++ 2 files changed, 24 insertions(+) diff --git a/include/linux/sched.h b/include/linux/sched.h index 94f624aef01..33b9b4841ee 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -1348,6 +1348,13 @@ static inline int set_cpus_allowed(struct task_struct *p, cpumask_t new_mask) #endif extern unsigned long long sched_clock(void); + +/* + * For kernel-internal use: high-speed (but slightly incorrect) per-cpu + * clock constructed from sched_clock(): + */ +extern unsigned long long cpu_clock(int cpu); + extern unsigned long long task_sched_runtime(struct task_struct *task); diff --git a/kernel/sched.c b/kernel/sched.c index a35a92ff38f..93cf241cfbe 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -379,6 +379,23 @@ static inline unsigned long long rq_clock(struct rq *rq) #define task_rq(p) cpu_rq(task_cpu(p)) #define cpu_curr(cpu) (cpu_rq(cpu)->curr) +/* + * For kernel-internal use: high-speed (but slightly incorrect) per-cpu + * clock constructed from sched_clock(): + */ +unsigned long long cpu_clock(int cpu) +{ + struct rq *rq = cpu_rq(cpu); + unsigned long long now; + unsigned long flags; + + spin_lock_irqsave(&rq->lock, flags); + now = rq_clock(rq); + spin_unlock_irqrestore(&rq->lock, flags); + + return now; +} + #ifdef CONFIG_FAIR_GROUP_SCHED /* Change a task's ->cfs_rq if it moves across CPUs */ static inline void set_task_cfs_rq(struct task_struct *p) -- cgit v1.2.3