From 490dea45d00f01847ebebd007685d564aaf2cd98 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Mon, 24 Nov 2008 17:06:57 +0100 Subject: itimers: remove the per-cpu-ish-ness Either we bounce once cacheline per cpu per tick, yielding n^2 bounces or we just bounce a single.. Also, using per-cpu allocations for the thread-groups complicates the per-cpu allocator in that its currently aimed to be a fixed sized allocator and the only possible extention to that would be vmap based, which is seriously constrained on 32 bit archs. So making the per-cpu memory requirement depend on the number of processes is an issue. Lastly, it didn't deal with cpu-hotplug, although admittedly that might be fixable. Signed-off-by: Peter Zijlstra Signed-off-by: Ingo Molnar --- include/linux/init_task.h | 6 ++++ include/linux/sched.h | 29 ++++++++++++-------- kernel/fork.c | 15 +++++----- kernel/posix-cpu-timers.c | 70 ----------------------------------------------- kernel/sched_stats.h | 33 ++++++++++------------ 5 files changed, 46 insertions(+), 107 deletions(-) diff --git a/include/linux/init_task.h b/include/linux/init_task.h index 2f3c2d4ef73..ea0ea1a4c36 100644 --- a/include/linux/init_task.h +++ b/include/linux/init_task.h @@ -48,6 +48,12 @@ extern struct fs_struct init_fs; .posix_timers = LIST_HEAD_INIT(sig.posix_timers), \ .cpu_timers = INIT_CPU_TIMERS(sig.cpu_timers), \ .rlim = INIT_RLIMITS, \ + .cputime = { .totals = { \ + .utime = cputime_zero, \ + .stime = cputime_zero, \ + .sum_exec_runtime = 0, \ + .lock = __SPIN_LOCK_UNLOCKED(sig.cputime.totals.lock), \ + }, }, \ } extern struct nsproxy init_nsproxy; diff --git a/include/linux/sched.h b/include/linux/sched.h index 4cae9b81a1f..c20943eabb4 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -450,6 +450,7 @@ struct task_cputime { cputime_t utime; cputime_t stime; unsigned long long sum_exec_runtime; + spinlock_t lock; }; /* Alternate field names when used to cache expirations. */ #define prof_exp stime @@ -465,7 +466,7 @@ struct task_cputime { * used for thread group CPU clock calculations. */ struct thread_group_cputime { - struct task_cputime *totals; + struct task_cputime totals; }; /* @@ -2180,24 +2181,30 @@ static inline int spin_needbreak(spinlock_t *lock) * Thread group CPU time accounting. */ -extern int thread_group_cputime_alloc(struct task_struct *); -extern void thread_group_cputime(struct task_struct *, struct task_cputime *); - -static inline void thread_group_cputime_init(struct signal_struct *sig) +static inline +void thread_group_cputime(struct task_struct *tsk, struct task_cputime *times) { - sig->cputime.totals = NULL; + struct task_cputime *totals = &tsk->signal->cputime.totals; + unsigned long flags; + + spin_lock_irqsave(&totals->lock, flags); + *times = *totals; + spin_unlock_irqrestore(&totals->lock, flags); } -static inline int thread_group_cputime_clone_thread(struct task_struct *curr) +static inline void thread_group_cputime_init(struct signal_struct *sig) { - if (curr->signal->cputime.totals) - return 0; - return thread_group_cputime_alloc(curr); + sig->cputime.totals = (struct task_cputime){ + .utime = cputime_zero, + .stime = cputime_zero, + .sum_exec_runtime = 0, + }; + + spin_lock_init(&sig->cputime.totals.lock); } static inline void thread_group_cputime_free(struct signal_struct *sig) { - free_percpu(sig->cputime.totals); } /* diff --git a/kernel/fork.c b/kernel/fork.c index 7b8f2a78be3..7087d8c0e5e 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -820,14 +820,15 @@ static int copy_signal(unsigned long clone_flags, struct task_struct *tsk) int ret; if (clone_flags & CLONE_THREAD) { - ret = thread_group_cputime_clone_thread(current); - if (likely(!ret)) { - atomic_inc(¤t->signal->count); - atomic_inc(¤t->signal->live); - } - return ret; + atomic_inc(¤t->signal->count); + atomic_inc(¤t->signal->live); + return 0; } sig = kmem_cache_alloc(signal_cachep, GFP_KERNEL); + + if (sig) + posix_cpu_timers_init_group(sig); + tsk->signal = sig; if (!sig) return -ENOMEM; @@ -864,8 +865,6 @@ static int copy_signal(unsigned long clone_flags, struct task_struct *tsk) memcpy(sig->rlim, current->signal->rlim, sizeof sig->rlim); task_unlock(current->group_leader); - posix_cpu_timers_init_group(sig); - acct_init_pacct(&sig->pacct); tty_audit_fork(sig); diff --git a/kernel/posix-cpu-timers.c b/kernel/posix-cpu-timers.c index 157de3a4783..fa07da94d7b 100644 --- a/kernel/posix-cpu-timers.c +++ b/kernel/posix-cpu-timers.c @@ -9,76 +9,6 @@ #include #include -/* - * Allocate the thread_group_cputime structure appropriately and fill in the - * current values of the fields. Called from copy_signal() via - * thread_group_cputime_clone_thread() when adding a second or subsequent - * thread to a thread group. Assumes interrupts are enabled when called. - */ -int thread_group_cputime_alloc(struct task_struct *tsk) -{ - struct signal_struct *sig = tsk->signal; - struct task_cputime *cputime; - - /* - * If we have multiple threads and we don't already have a - * per-CPU task_cputime struct (checked in the caller), allocate - * one and fill it in with the times accumulated so far. We may - * race with another thread so recheck after we pick up the sighand - * lock. - */ - cputime = alloc_percpu(struct task_cputime); - if (cputime == NULL) - return -ENOMEM; - spin_lock_irq(&tsk->sighand->siglock); - if (sig->cputime.totals) { - spin_unlock_irq(&tsk->sighand->siglock); - free_percpu(cputime); - return 0; - } - sig->cputime.totals = cputime; - cputime = per_cpu_ptr(sig->cputime.totals, smp_processor_id()); - cputime->utime = tsk->utime; - cputime->stime = tsk->stime; - cputime->sum_exec_runtime = tsk->se.sum_exec_runtime; - spin_unlock_irq(&tsk->sighand->siglock); - return 0; -} - -/** - * thread_group_cputime - Sum the thread group time fields across all CPUs. - * - * @tsk: The task we use to identify the thread group. - * @times: task_cputime structure in which we return the summed fields. - * - * Walk the list of CPUs to sum the per-CPU time fields in the thread group - * time structure. - */ -void thread_group_cputime( - struct task_struct *tsk, - struct task_cputime *times) -{ - struct task_cputime *totals, *tot; - int i; - - totals = tsk->signal->cputime.totals; - if (!totals) { - times->utime = tsk->utime; - times->stime = tsk->stime; - times->sum_exec_runtime = tsk->se.sum_exec_runtime; - return; - } - - times->stime = times->utime = cputime_zero; - times->sum_exec_runtime = 0; - for_each_possible_cpu(i) { - tot = per_cpu_ptr(totals, i); - times->utime = cputime_add(times->utime, tot->utime); - times->stime = cputime_add(times->stime, tot->stime); - times->sum_exec_runtime += tot->sum_exec_runtime; - } -} - /* * Called after updating RLIMIT_CPU to set timer expiration if necessary. */ diff --git a/kernel/sched_stats.h b/kernel/sched_stats.h index f2773b5d122..8ab0cef8eca 100644 --- a/kernel/sched_stats.h +++ b/kernel/sched_stats.h @@ -296,6 +296,7 @@ sched_info_switch(struct task_struct *prev, struct task_struct *next) static inline void account_group_user_time(struct task_struct *tsk, cputime_t cputime) { + struct task_cputime *times; struct signal_struct *sig; /* tsk == current, ensure it is safe to use ->signal */ @@ -303,13 +304,11 @@ static inline void account_group_user_time(struct task_struct *tsk, return; sig = tsk->signal; - if (sig->cputime.totals) { - struct task_cputime *times; + times = &sig->cputime.totals; - times = per_cpu_ptr(sig->cputime.totals, get_cpu()); - times->utime = cputime_add(times->utime, cputime); - put_cpu_no_resched(); - } + spin_lock(×->lock); + times->utime = cputime_add(times->utime, cputime); + spin_unlock(×->lock); } /** @@ -325,6 +324,7 @@ static inline void account_group_user_time(struct task_struct *tsk, static inline void account_group_system_time(struct task_struct *tsk, cputime_t cputime) { + struct task_cputime *times; struct signal_struct *sig; /* tsk == current, ensure it is safe to use ->signal */ @@ -332,13 +332,11 @@ static inline void account_group_system_time(struct task_struct *tsk, return; sig = tsk->signal; - if (sig->cputime.totals) { - struct task_cputime *times; + times = &sig->cputime.totals; - times = per_cpu_ptr(sig->cputime.totals, get_cpu()); - times->stime = cputime_add(times->stime, cputime); - put_cpu_no_resched(); - } + spin_lock(×->lock); + times->stime = cputime_add(times->stime, cputime); + spin_unlock(×->lock); } /** @@ -354,6 +352,7 @@ static inline void account_group_system_time(struct task_struct *tsk, static inline void account_group_exec_runtime(struct task_struct *tsk, unsigned long long ns) { + struct task_cputime *times; struct signal_struct *sig; sig = tsk->signal; @@ -362,11 +361,9 @@ static inline void account_group_exec_runtime(struct task_struct *tsk, if (unlikely(!sig)) return; - if (sig->cputime.totals) { - struct task_cputime *times; + times = &sig->cputime.totals; - times = per_cpu_ptr(sig->cputime.totals, get_cpu()); - times->sum_exec_runtime += ns; - put_cpu_no_resched(); - } + spin_lock(×->lock); + times->sum_exec_runtime += ns; + spin_unlock(×->lock); } -- cgit v1.2.3 From 783adf42cf039083dd3c734c07c3bdc707e2bb15 Mon Sep 17 00:00:00 2001 From: Steven Noonan Date: Sun, 11 Jan 2009 01:04:21 -0800 Subject: kernel/fork.c: unused variable 'ret' Removed the unused variable. Signed-off-by: Steven Noonan Signed-off-by: Ingo Molnar --- kernel/fork.c | 1 - 1 file changed, 1 deletion(-) diff --git a/kernel/fork.c b/kernel/fork.c index e995899ea83..81da4aae85c 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -817,7 +817,6 @@ static void posix_cpu_timers_init_group(struct signal_struct *sig) static int copy_signal(unsigned long clone_flags, struct task_struct *tsk) { struct signal_struct *sig; - int ret; if (clone_flags & CLONE_THREAD) { atomic_inc(¤t->signal->count); -- cgit v1.2.3 From 6d612b0f943289856c6e8186c564cda922cd040e Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Mon, 12 Jan 2009 12:52:23 +0100 Subject: locking, hpet: annotate false positive warning Alexander Beregalov reported that this warning is caused by the HPET code: > hpet0: at MMIO 0xfed00000, IRQs 2, 8, 0 > hpet0: 3 comparators, 64-bit 14.318180 MHz counter > ODEBUG: object is on stack, but not annotated > ------------[ cut here ]------------ > WARNING: at lib/debugobjects.c:251 __debug_object_init+0x2a4/0x352() > Bisected down to 26afe5f2fbf06ea0765aaa316640c4dd472310c0 > (x86: HPET_MSI Initialise per-cpu HPET timers) The commit is fine - but the on-stack workqueue entry needs annotation. Reported-and-bisected-by: Alexander Beregalov Signed-off-by: Peter Zijlstra Tested-by: Alexander Beregalov Signed-off-by: Ingo Molnar --- arch/x86/kernel/hpet.c | 2 +- include/linux/workqueue.h | 6 ++++++ 2 files changed, 7 insertions(+), 1 deletion(-) diff --git a/arch/x86/kernel/hpet.c b/arch/x86/kernel/hpet.c index cd759ad9069..bb2e0f0975a 100644 --- a/arch/x86/kernel/hpet.c +++ b/arch/x86/kernel/hpet.c @@ -628,7 +628,7 @@ static int hpet_cpuhp_notify(struct notifier_block *n, switch (action & 0xf) { case CPU_ONLINE: - INIT_DELAYED_WORK(&work.work, hpet_work); + INIT_DELAYED_WORK_ON_STACK(&work.work, hpet_work); init_completion(&work.complete); /* FIXME: add schedule_work_on() */ schedule_delayed_work_on(cpu, &work.work, 0); diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h index b36291130f2..47151c8495a 100644 --- a/include/linux/workqueue.h +++ b/include/linux/workqueue.h @@ -118,6 +118,12 @@ struct execute_work { init_timer(&(_work)->timer); \ } while (0) +#define INIT_DELAYED_WORK_ON_STACK(_work, _func) \ + do { \ + INIT_WORK(&(_work)->work, (_func)); \ + init_timer_on_stack(&(_work)->timer); \ + } while (0) + #define INIT_DELAYED_WORK_DEFERRABLE(_work, _func) \ do { \ INIT_WORK(&(_work)->work, (_func)); \ -- cgit v1.2.3 From 934d96eafadcf3eb3ccd094af9919f020907fc41 Mon Sep 17 00:00:00 2001 From: Jaswinder Singh Rajput Date: Wed, 14 Jan 2009 20:38:17 +0530 Subject: time-sched.c: tick_nohz_update_jiffies should be static Impact: cleanup, reduce kernel size a bit, avoid sparse warning Fixes sparse warning: kernel/time/tick-sched.c:137:6: warning: symbol 'tick_nohz_update_jiffies' was not declared. Should it be static? Signed-off-by: Jaswinder Singh Rajput Signed-off-by: Ingo Molnar --- kernel/time/tick-sched.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c index 1b6c05bd0d0..d3f1ef4d5cb 100644 --- a/kernel/time/tick-sched.c +++ b/kernel/time/tick-sched.c @@ -134,7 +134,7 @@ __setup("nohz=", setup_tick_nohz); * value. We do this unconditionally on any cpu, as we don't know whether the * cpu, which has the update task assigned is in a long sleep. */ -void tick_nohz_update_jiffies(void) +static void tick_nohz_update_jiffies(void) { int cpu = smp_processor_id(); struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu); -- cgit v1.2.3 From 1d4a7f1c4faf53eb9e822743ec8a70b3019a26d2 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Sun, 18 Jan 2009 16:39:29 +0100 Subject: hrtimers: fix inconsistent lock state on resume in hres_timers_resume Andrey Borzenkov reported this lockdep assert: > [17854.688347] ================================= > [17854.688347] [ INFO: inconsistent lock state ] > [17854.688347] 2.6.29-rc2-1avb #1 > [17854.688347] --------------------------------- > [17854.688347] inconsistent {in-hardirq-W} -> {hardirq-on-W} usage. > [17854.688347] pm-suspend/18240 [HC0[0]:SC0[0]:HE1:SE1] takes: > [17854.688347] (&cpu_base->lock){++..}, at: [] retrigger_next_event+0x5c/0xa0 > [17854.688347] {in-hardirq-W} state was registered at: > [17854.688347] [] __lock_acquire+0x79d/0x1930 > [17854.688347] [] lock_acquire+0x5c/0x80 > [17854.688347] [] _spin_lock+0x35/0x70 > [17854.688347] [] hrtimer_run_queues+0x31/0x140 > [17854.688347] [] run_local_timers+0x8/0x20 > [17854.688347] [] update_process_times+0x23/0x60 > [17854.688347] [] tick_periodic+0x24/0x80 > [17854.688347] [] tick_handle_periodic+0x12/0x70 > [17854.688347] [] timer_interrupt+0x14/0x20 > [17854.688347] [] handle_IRQ_event+0x29/0x60 > [17854.688347] [] handle_level_irq+0x69/0xe0 > [17854.688347] [] 0xffffffff > [17854.688347] irq event stamp: 55771 > [17854.688347] hardirqs last enabled at (55771): [] _spin_unlock_irqrestore+0x35/0x60 > [17854.688347] hardirqs last disabled at (55770): [] _spin_lock_irqsave+0x19/0x80 > [17854.688347] softirqs last enabled at (54836): [] __do_softirq+0xc4/0x110 > [17854.688347] softirqs last disabled at (54831): [] do_softirq+0x8e/0xe0 > [17854.688347] > [17854.688347] other info that might help us debug this: > [17854.688347] 3 locks held by pm-suspend/18240: > [17854.688347] #0: (&buffer->mutex){--..}, at: [] sysfs_write_file+0x25/0x100 > [17854.688347] #1: (pm_mutex){--..}, at: [] enter_state+0x4f/0x140 > [17854.688347] #2: (dpm_list_mtx){--..}, at: [] device_pm_lock+0xf/0x20 > [17854.688347] > [17854.688347] stack backtrace: > [17854.688347] Pid: 18240, comm: pm-suspend Not tainted 2.6.29-rc2-1avb #1 > [17854.688347] Call Trace: > [17854.688347] [] ? printk+0x18/0x20 > [17854.688347] [] print_usage_bug+0x16c/0x1d0 > [17854.688347] [] mark_lock+0x8bf/0xc90 > [17854.688347] [] ? pit_next_event+0x2f/0x40 > [17854.688347] [] __lock_acquire+0x580/0x1930 > [17854.688347] [] ? _spin_unlock+0x1d/0x20 > [17854.688347] [] ? pit_next_event+0x2f/0x40 > [17854.688347] [] ? clockevents_program_event+0x98/0x160 > [17854.688347] [] ? mark_held_locks+0x48/0x90 > [17854.688347] [] ? _spin_unlock_irqrestore+0x35/0x60 > [17854.688347] [] ? trace_hardirqs_on_caller+0x139/0x190 > [17854.688347] [] ? trace_hardirqs_on+0xb/0x10 > [17854.688347] [] lock_acquire+0x5c/0x80 > [17854.688347] [] ? retrigger_next_event+0x5c/0xa0 > [17854.688347] [] _spin_lock+0x35/0x70 > [17854.688347] [] ? retrigger_next_event+0x5c/0xa0 > [17854.688347] [] retrigger_next_event+0x5c/0xa0 > [17854.688347] [] hres_timers_resume+0xa/0x10 > [17854.688347] [] timekeeping_resume+0xee/0x150 > [17854.688347] [] __sysdev_resume+0x14/0x50 > [17854.688347] [] sysdev_resume+0x47/0x80 > [17854.688347] [] device_power_up+0xb/0x20 > [17854.688347] [] suspend_devices_and_enter+0xcf/0x150 > [17854.688347] [] ? freeze_processes+0x3f/0x90 > [17854.688347] [] enter_state+0xf4/0x140 > [17854.688347] [] state_store+0x7d/0xc0 > [17854.688347] [] ? state_store+0x0/0xc0 > [17854.688347] [] kobj_attr_store+0x24/0x30 > [17854.688347] [] sysfs_write_file+0x9c/0x100 > [17854.688347] [] vfs_write+0x9c/0x160 > [17854.688347] [] ? restore_nocheck_notrace+0x0/0xe > [17854.688347] [] ? sysfs_write_file+0x0/0x100 > [17854.688347] [] sys_write+0x3d/0x70 > [17854.688347] [] sysenter_do_call+0x12/0x31 Andrey's analysis: > timekeeping_resume() is called via class ->resume > method; and according to comments in sysdev_resume() and > device_power_up(), they are called with interrupts disabled. > > Looking at suspend_enter, irqs *are* disabled at this point. > > So it actually looks like something (may be some driver) > unconditionally enabled irqs in resume path. Add a debug check to test this theory. If it triggers then it triggers because the resume code calls it with irqs enabled, which is a no-no not just for timekeeping_resume(), but also bad for a number of other resume handlers. Reported-by: Andrey Borzenkov Signed-off-by: Peter Zijlstra Signed-off-by: Ingo Molnar --- kernel/hrtimer.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c index 1455b7651b6..cb83c6d4c07 100644 --- a/kernel/hrtimer.c +++ b/kernel/hrtimer.c @@ -614,7 +614,9 @@ void clock_was_set(void) */ void hres_timers_resume(void) { - /* Retrigger the CPU local events: */ + WARN_ONCE(!irqs_disabled(), + KERN_INFO "hres_timers_resume() called with IRQs enabled!"); + retrigger_next_event(NULL); } -- cgit v1.2.3