diff options
-rw-r--r-- | Documentation/cpusets.txt | 2 | ||||
-rw-r--r-- | kernel/cpuset.c | 4 | ||||
-rw-r--r-- | kernel/rcupreempt.c | 2 | ||||
-rw-r--r-- | kernel/sched.c | 7 | ||||
-rw-r--r-- | kernel/softlockup.c | 15 |
5 files changed, 19 insertions, 11 deletions
diff --git a/Documentation/cpusets.txt b/Documentation/cpusets.txt index d803c5c68ab..353504de308 100644 --- a/Documentation/cpusets.txt +++ b/Documentation/cpusets.txt @@ -542,7 +542,7 @@ otherwise initial value -1 that indicates the cpuset has no request. 2 : search cores in a package. 3 : search cpus in a node [= system wide on non-NUMA system] ( 4 : search nodes in a chunk of node [on NUMA system] ) - ( 5~ : search system wide [on NUMA system]) + ( 5 : search system wide [on NUMA system] ) This file is per-cpuset and affect the sched domain where the cpuset belongs to. Therefore if the flag 'sched_load_balance' of a cpuset diff --git a/kernel/cpuset.c b/kernel/cpuset.c index bceb8955797..9fceb97e989 100644 --- a/kernel/cpuset.c +++ b/kernel/cpuset.c @@ -1037,8 +1037,8 @@ int current_cpuset_is_being_rebound(void) static int update_relax_domain_level(struct cpuset *cs, s64 val) { - if ((int)val < 0) - val = -1; + if (val < -1 || val >= SD_LV_MAX) + return -EINVAL; if (val != cs->relax_domain_level) { cs->relax_domain_level = val; diff --git a/kernel/rcupreempt.c b/kernel/rcupreempt.c index e1cdf196a51..5e02b774070 100644 --- a/kernel/rcupreempt.c +++ b/kernel/rcupreempt.c @@ -217,8 +217,6 @@ long rcu_batches_completed(void) } EXPORT_SYMBOL_GPL(rcu_batches_completed); -EXPORT_SYMBOL_GPL(rcu_batches_completed_bh); - void __rcu_read_lock(void) { int idx; diff --git a/kernel/sched.c b/kernel/sched.c index 4a3cb061415..b048ad8a11a 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -6879,7 +6879,12 @@ static int default_relax_domain_level = -1; static int __init setup_relax_domain_level(char *str) { - default_relax_domain_level = simple_strtoul(str, NULL, 0); + unsigned long val; + + val = simple_strtoul(str, NULL, 0); + if (val < SD_LV_MAX) + default_relax_domain_level = val; + return 1; } __setup("relax_domain_level=", setup_relax_domain_level); diff --git a/kernel/softlockup.c b/kernel/softlockup.c index 01b6522fd92..c828c2339cc 100644 --- a/kernel/softlockup.c +++ b/kernel/softlockup.c @@ -49,12 +49,17 @@ static unsigned long get_timestamp(int this_cpu) return cpu_clock(this_cpu) >> 30LL; /* 2^30 ~= 10^9 */ } -void touch_softlockup_watchdog(void) +static void __touch_softlockup_watchdog(void) { int this_cpu = raw_smp_processor_id(); __raw_get_cpu_var(touch_timestamp) = get_timestamp(this_cpu); } + +void touch_softlockup_watchdog(void) +{ + __raw_get_cpu_var(touch_timestamp) = 0; +} EXPORT_SYMBOL(touch_softlockup_watchdog); void touch_all_softlockup_watchdogs(void) @@ -80,7 +85,7 @@ void softlockup_tick(void) unsigned long now; if (touch_timestamp == 0) { - touch_softlockup_watchdog(); + __touch_softlockup_watchdog(); return; } @@ -95,7 +100,7 @@ void softlockup_tick(void) /* do not print during early bootup: */ if (unlikely(system_state != SYSTEM_RUNNING)) { - touch_softlockup_watchdog(); + __touch_softlockup_watchdog(); return; } @@ -214,7 +219,7 @@ static int watchdog(void *__bind_cpu) sched_setscheduler(current, SCHED_FIFO, ¶m); /* initialize timestamp */ - touch_softlockup_watchdog(); + __touch_softlockup_watchdog(); set_current_state(TASK_INTERRUPTIBLE); /* @@ -223,7 +228,7 @@ static int watchdog(void *__bind_cpu) * debug-printout triggers in softlockup_tick(). */ while (!kthread_should_stop()) { - touch_softlockup_watchdog(); + __touch_softlockup_watchdog(); schedule(); if (kthread_should_stop()) |