aboutsummaryrefslogtreecommitdiff
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/cpu.c32
-rw-r--r--kernel/exit.c2
-rw-r--r--kernel/hrtimer.c19
-rw-r--r--kernel/irq/devres.c2
-rw-r--r--kernel/lockdep.c8
-rw-r--r--kernel/module.c18
-rw-r--r--kernel/power/disk.c9
-rw-r--r--kernel/power/swsusp.c2
-rw-r--r--kernel/power/user.c12
-rw-r--r--kernel/sched.c36
-rw-r--r--kernel/time.c2
-rw-r--r--kernel/time/clockevents.c69
-rw-r--r--kernel/time/clocksource.c3
-rw-r--r--kernel/time/jiffies.c2
-rw-r--r--kernel/time/ntp.c30
-rw-r--r--kernel/time/timer_list.c6
-rw-r--r--kernel/timer.c21
17 files changed, 108 insertions, 165 deletions
diff --git a/kernel/cpu.c b/kernel/cpu.c
index 3d4206ada5c..36e70845cfc 100644
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
@@ -254,6 +254,12 @@ int __cpuinit cpu_up(unsigned int cpu)
}
#ifdef CONFIG_SUSPEND_SMP
+/* Needed to prevent the microcode driver from requesting firmware in its CPU
+ * hotplug notifier during the suspend/resume.
+ */
+int suspend_cpu_hotplug;
+EXPORT_SYMBOL(suspend_cpu_hotplug);
+
static cpumask_t frozen_cpus;
int disable_nonboot_cpus(void)
@@ -261,16 +267,8 @@ int disable_nonboot_cpus(void)
int cpu, first_cpu, error = 0;
mutex_lock(&cpu_add_remove_lock);
- first_cpu = first_cpu(cpu_present_map);
- if (!cpu_online(first_cpu)) {
- error = _cpu_up(first_cpu);
- if (error) {
- printk(KERN_ERR "Could not bring CPU%d up.\n",
- first_cpu);
- goto out;
- }
- }
-
+ suspend_cpu_hotplug = 1;
+ first_cpu = first_cpu(cpu_online_map);
/* We take down all of the non-boot CPUs in one shot to avoid races
* with the userspace trying to use the CPU hotplug at the same time
*/
@@ -296,7 +294,7 @@ int disable_nonboot_cpus(void)
} else {
printk(KERN_ERR "Non-boot CPUs are not disabled\n");
}
-out:
+ suspend_cpu_hotplug = 0;
mutex_unlock(&cpu_add_remove_lock);
return error;
}
@@ -308,20 +306,22 @@ void enable_nonboot_cpus(void)
/* Allow everyone to use the CPU hotplug again */
mutex_lock(&cpu_add_remove_lock);
cpu_hotplug_disabled = 0;
- mutex_unlock(&cpu_add_remove_lock);
if (cpus_empty(frozen_cpus))
- return;
+ goto out;
+ suspend_cpu_hotplug = 1;
printk("Enabling non-boot CPUs ...\n");
for_each_cpu_mask(cpu, frozen_cpus) {
- error = cpu_up(cpu);
+ error = _cpu_up(cpu);
if (!error) {
printk("CPU%d is up\n", cpu);
continue;
}
- printk(KERN_WARNING "Error taking CPU%d up: %d\n",
- cpu, error);
+ printk(KERN_WARNING "Error taking CPU%d up: %d\n", cpu, error);
}
cpus_clear(frozen_cpus);
+ suspend_cpu_hotplug = 0;
+out:
+ mutex_unlock(&cpu_add_remove_lock);
}
#endif
diff --git a/kernel/exit.c b/kernel/exit.c
index f132349c032..b55ed4cc910 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -790,7 +790,7 @@ static void exit_notify(struct task_struct *tsk)
pgrp = task_pgrp(tsk);
if ((task_pgrp(t) != pgrp) &&
- (task_session(t) != task_session(tsk)) &&
+ (task_session(t) == task_session(tsk)) &&
will_become_orphaned_pgrp(pgrp, tsk) &&
has_stopped_jobs(pgrp)) {
__kill_pgrp_info(SIGHUP, SEND_SIG_PRIV, pgrp);
diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c
index 6a7938a0d51..b74860aaf5f 100644
--- a/kernel/hrtimer.c
+++ b/kernel/hrtimer.c
@@ -459,6 +459,18 @@ void clock_was_set(void)
}
/*
+ * During resume we might have to reprogram the high resolution timer
+ * interrupt (on the local CPU):
+ */
+void hres_timers_resume(void)
+{
+ WARN_ON_ONCE(num_online_cpus() > 1);
+
+ /* Retrigger the CPU local events: */
+ retrigger_next_event(NULL);
+}
+
+/*
* Check, whether the timer is on the callback pending list
*/
static inline int hrtimer_cb_pending(const struct hrtimer *timer)
@@ -814,7 +826,12 @@ hrtimer_start(struct hrtimer *timer, ktime_t tim, const enum hrtimer_mode mode)
timer_stats_hrtimer_set_start_info(timer);
- enqueue_hrtimer(timer, new_base, base == new_base);
+ /*
+ * Only allow reprogramming if the new base is on this CPU.
+ * (it might still be on another CPU if the timer was pending)
+ */
+ enqueue_hrtimer(timer, new_base,
+ new_base->cpu_base == &__get_cpu_var(hrtimer_bases));
unlock_hrtimer_base(timer, &flags);
diff --git a/kernel/irq/devres.c b/kernel/irq/devres.c
index 85a430da0fb..d8ee241115f 100644
--- a/kernel/irq/devres.c
+++ b/kernel/irq/devres.c
@@ -54,7 +54,7 @@ int devm_request_irq(struct device *dev, unsigned int irq,
rc = request_irq(irq, handler, irqflags, devname, dev_id);
if (rc) {
- kfree(dr);
+ devres_free(dr);
return rc;
}
diff --git a/kernel/lockdep.c b/kernel/lockdep.c
index 8dc24c92dc6..7065a687ac5 100644
--- a/kernel/lockdep.c
+++ b/kernel/lockdep.c
@@ -2742,6 +2742,10 @@ void debug_show_all_locks(void)
int count = 10;
int unlock = 1;
+ if (unlikely(!debug_locks)) {
+ printk("INFO: lockdep is turned off.\n");
+ return;
+ }
printk("\nShowing all locks held in the system:\n");
/*
@@ -2785,6 +2789,10 @@ EXPORT_SYMBOL_GPL(debug_show_all_locks);
void debug_show_held_locks(struct task_struct *task)
{
+ if (unlikely(!debug_locks)) {
+ printk("INFO: lockdep is turned off.\n");
+ return;
+ }
lockdep_print_held_locks(task);
}
diff --git a/kernel/module.c b/kernel/module.c
index fbc51de6444..dcdb32b8b13 100644
--- a/kernel/module.c
+++ b/kernel/module.c
@@ -2384,8 +2384,13 @@ void module_add_driver(struct module *mod, struct device_driver *drv)
/* Lookup built-in module entry in /sys/modules */
mkobj = kset_find_obj(&module_subsys.kset, drv->mod_name);
- if (mkobj)
+ if (mkobj) {
mk = container_of(mkobj, struct module_kobject, kobj);
+ /* remember our module structure */
+ drv->mkobj = mk;
+ /* kset_find_obj took a reference */
+ kobject_put(mkobj);
+ }
}
if (!mk)
@@ -2405,17 +2410,22 @@ EXPORT_SYMBOL(module_add_driver);
void module_remove_driver(struct device_driver *drv)
{
+ struct module_kobject *mk = NULL;
char *driver_name;
if (!drv)
return;
sysfs_remove_link(&drv->kobj, "module");
- if (drv->owner && drv->owner->mkobj.drivers_dir) {
+
+ if (drv->owner)
+ mk = &drv->owner->mkobj;
+ else if (drv->mkobj)
+ mk = drv->mkobj;
+ if (mk && mk->drivers_dir) {
driver_name = make_driver_name(drv);
if (driver_name) {
- sysfs_remove_link(drv->owner->mkobj.drivers_dir,
- driver_name);
+ sysfs_remove_link(mk->drivers_dir, driver_name);
kfree(driver_name);
}
}
diff --git a/kernel/power/disk.c b/kernel/power/disk.c
index 873cdf8ea5a..aec19b063e3 100644
--- a/kernel/power/disk.c
+++ b/kernel/power/disk.c
@@ -58,7 +58,6 @@ static inline int platform_prepare(void)
static void power_down(suspend_disk_method_t mode)
{
- disable_nonboot_cpus();
switch(mode) {
case PM_DISK_PLATFORM:
if (pm_ops && pm_ops->enter) {
@@ -241,18 +240,11 @@ static int software_resume(void)
goto Done;
}
- error = platform_prepare();
- if (error) {
- swsusp_free();
- goto Thaw;
- }
-
pr_debug("PM: Reading swsusp image.\n");
error = swsusp_read();
if (error) {
swsusp_free();
- platform_finish();
goto Thaw;
}
@@ -270,7 +262,6 @@ static int software_resume(void)
enable_nonboot_cpus();
Free:
swsusp_free();
- platform_finish();
device_resume();
resume_console();
Thaw:
diff --git a/kernel/power/swsusp.c b/kernel/power/swsusp.c
index 7fb834397a0..175370824f3 100644
--- a/kernel/power/swsusp.c
+++ b/kernel/power/swsusp.c
@@ -229,13 +229,13 @@ int swsusp_shrink_memory(void)
size += highmem_size;
for_each_zone (zone)
if (populated_zone(zone)) {
+ tmp += snapshot_additional_pages(zone);
if (is_highmem(zone)) {
highmem_size -=
zone_page_state(zone, NR_FREE_PAGES);
} else {
tmp -= zone_page_state(zone, NR_FREE_PAGES);
tmp += zone->lowmem_reserve[ZONE_NORMAL];
- tmp += snapshot_additional_pages(zone);
}
}
diff --git a/kernel/power/user.c b/kernel/power/user.c
index d6a8dcc26ae..7cf6713b232 100644
--- a/kernel/power/user.c
+++ b/kernel/power/user.c
@@ -368,9 +368,12 @@ static int snapshot_ioctl(struct inode *inode, struct file *filp,
if (error) {
printk(KERN_ERR "Failed to suspend some devices.\n");
} else {
- /* Enter S3, system is already frozen */
- suspend_enter(PM_SUSPEND_MEM);
-
+ error = disable_nonboot_cpus();
+ if (!error) {
+ /* Enter S3, system is already frozen */
+ suspend_enter(PM_SUSPEND_MEM);
+ enable_nonboot_cpus();
+ }
/* Wake up devices */
device_resume();
}
@@ -398,10 +401,9 @@ static int snapshot_ioctl(struct inode *inode, struct file *filp,
case PMOPS_ENTER:
if (data->platform_suspend) {
- disable_nonboot_cpus();
kernel_shutdown_prepare(SYSTEM_SUSPEND_DISK);
error = pm_ops->enter(PM_SUSPEND_DISK);
- enable_nonboot_cpus();
+ error = 0;
}
break;
diff --git a/kernel/sched.c b/kernel/sched.c
index a4ca632c477..b9a68373014 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -4687,32 +4687,10 @@ out_unlock:
return retval;
}
-static inline struct task_struct *eldest_child(struct task_struct *p)
-{
- if (list_empty(&p->children))
- return NULL;
- return list_entry(p->children.next,struct task_struct,sibling);
-}
-
-static inline struct task_struct *older_sibling(struct task_struct *p)
-{
- if (p->sibling.prev==&p->parent->children)
- return NULL;
- return list_entry(p->sibling.prev,struct task_struct,sibling);
-}
-
-static inline struct task_struct *younger_sibling(struct task_struct *p)
-{
- if (p->sibling.next==&p->parent->children)
- return NULL;
- return list_entry(p->sibling.next,struct task_struct,sibling);
-}
-
static const char stat_nam[] = "RSDTtZX";
static void show_task(struct task_struct *p)
{
- struct task_struct *relative;
unsigned long free = 0;
unsigned state;
@@ -4738,19 +4716,7 @@ static void show_task(struct task_struct *p)
free = (unsigned long)n - (unsigned long)end_of_stack(p);
}
#endif
- printk("%5lu %5d %6d ", free, p->pid, p->parent->pid);
- if ((relative = eldest_child(p)))
- printk("%5d ", relative->pid);
- else
- printk(" ");
- if ((relative = younger_sibling(p)))
- printk("%7d", relative->pid);
- else
- printk(" ");
- if ((relative = older_sibling(p)))
- printk(" %5d", relative->pid);
- else
- printk(" ");
+ printk("%5lu %5d %6d", free, p->pid, p->parent->pid);
if (!p->mm)
printk(" (L-TLB)\n");
else
diff --git a/kernel/time.c b/kernel/time.c
index c6c80ea5d0e..2f47888e46c 100644
--- a/kernel/time.c
+++ b/kernel/time.c
@@ -635,6 +635,7 @@ timeval_to_jiffies(const struct timeval *value)
(((u64)usec * USEC_CONVERSION + USEC_ROUND) >>
(USEC_JIFFIE_SC - SEC_JIFFIE_SC))) >> SEC_JIFFIE_SC;
}
+EXPORT_SYMBOL(timeval_to_jiffies);
void jiffies_to_timeval(const unsigned long jiffies, struct timeval *value)
{
@@ -649,6 +650,7 @@ void jiffies_to_timeval(const unsigned long jiffies, struct timeval *value)
tv_usec /= NSEC_PER_USEC;
value->tv_usec = tv_usec;
}
+EXPORT_SYMBOL(jiffies_to_timeval);
/*
* Convert jiffies/jiffies_64 to clock_t and back.
diff --git a/kernel/time/clockevents.c b/kernel/time/clockevents.c
index 67932ea78c1..76212b2a99d 100644
--- a/kernel/time/clockevents.c
+++ b/kernel/time/clockevents.c
@@ -274,72 +274,3 @@ void clockevents_notify(unsigned long reason, void *arg)
}
EXPORT_SYMBOL_GPL(clockevents_notify);
-#ifdef CONFIG_SYSFS
-
-/**
- * clockevents_show_registered - sysfs interface for listing clockevents
- * @dev: unused
- * @buf: char buffer to be filled with clock events list
- *
- * Provides sysfs interface for listing registered clock event devices
- */
-static ssize_t clockevents_show_registered(struct sys_device *dev, char *buf)
-{
- struct list_head *tmp;
- char *p = buf;
- int cpu;
-
- spin_lock(&clockevents_lock);
-
- list_for_each(tmp, &clockevent_devices) {
- struct clock_event_device *ce;
-
- ce = list_entry(tmp, struct clock_event_device, list);
- p += sprintf(p, "%-20s F:%04x M:%d", ce->name,
- ce->features, ce->mode);
- p += sprintf(p, " C:");
- if (!cpus_equal(ce->cpumask, cpu_possible_map)) {
- for_each_cpu_mask(cpu, ce->cpumask)
- p += sprintf(p, " %d", cpu);
- } else {
- /*
- * FIXME: Add the cpu which is handling this sucker
- */
- }
- p += sprintf(p, "\n");
- }
-
- spin_unlock(&clockevents_lock);
-
- return p - buf;
-}
-
-/*
- * Sysfs setup bits:
- */
-static SYSDEV_ATTR(registered, 0600,
- clockevents_show_registered, NULL);
-
-static struct sysdev_class clockevents_sysclass = {
- set_kset_name("clockevents"),
-};
-
-static struct sys_device clockevents_sys_device = {
- .id = 0,
- .cls = &clockevents_sysclass,
-};
-
-static int __init clockevents_sysfs_init(void)
-{
- int error = sysdev_class_register(&clockevents_sysclass);
-
- if (!error)
- error = sysdev_register(&clockevents_sys_device);
- if (!error)
- error = sysdev_create_file(
- &clockevents_sys_device,
- &attr_registered);
- return error;
-}
-device_initcall(clockevents_sysfs_init);
-#endif
diff --git a/kernel/time/clocksource.c b/kernel/time/clocksource.c
index 5b0e46b56fd..fe5c7db2424 100644
--- a/kernel/time/clocksource.c
+++ b/kernel/time/clocksource.c
@@ -151,7 +151,8 @@ static void clocksource_check_watchdog(struct clocksource *cs)
watchdog_timer.expires = jiffies + WATCHDOG_INTERVAL;
add_timer(&watchdog_timer);
}
- } else if (cs->flags & CLOCK_SOURCE_IS_CONTINUOUS) {
+ } else {
+ if (cs->flags & CLOCK_SOURCE_IS_CONTINUOUS)
cs->flags |= CLOCK_SOURCE_VALID_FOR_HRES;
if (!watchdog || cs->rating > watchdog->rating) {
diff --git a/kernel/time/jiffies.c b/kernel/time/jiffies.c
index 3be8da8fed7..4c256fdb887 100644
--- a/kernel/time/jiffies.c
+++ b/kernel/time/jiffies.c
@@ -69,4 +69,4 @@ static int __init init_jiffies_clocksource(void)
return clocksource_register(&clocksource_jiffies);
}
-module_init(init_jiffies_clocksource);
+core_initcall(init_jiffies_clocksource);
diff --git a/kernel/time/ntp.c b/kernel/time/ntp.c
index eb12509e00b..cb25649c6f5 100644
--- a/kernel/time/ntp.c
+++ b/kernel/time/ntp.c
@@ -32,7 +32,7 @@ static u64 tick_length, tick_length_base;
/* TIME_ERROR prevents overwriting the CMOS clock */
static int time_state = TIME_OK; /* clock synchronization status */
int time_status = STA_UNSYNC; /* clock status bits */
-static long time_offset; /* time adjustment (ns) */
+static s64 time_offset; /* time adjustment (ns) */
static long time_constant = 2; /* pll time constant */
long time_maxerror = NTP_PHASE_LIMIT; /* maximum error (us) */
long time_esterror = NTP_PHASE_LIMIT; /* estimated error (us) */
@@ -196,7 +196,7 @@ void __attribute__ ((weak)) notify_arch_cmos_timer(void)
*/
int do_adjtimex(struct timex *txc)
{
- long ltemp, mtemp, save_adjust;
+ long mtemp, save_adjust, rem;
s64 freq_adj, temp64;
int result;
@@ -277,14 +277,14 @@ int do_adjtimex(struct timex *txc)
time_adjust = txc->offset;
}
else if (time_status & STA_PLL) {
- ltemp = txc->offset * NSEC_PER_USEC;
+ time_offset = txc->offset * NSEC_PER_USEC;
/*
* Scale the phase adjustment and
* clamp to the operating range.
*/
- time_offset = min(ltemp, MAXPHASE * NSEC_PER_USEC);
- time_offset = max(time_offset, -MAXPHASE * NSEC_PER_USEC);
+ time_offset = min(time_offset, (s64)MAXPHASE * NSEC_PER_USEC);
+ time_offset = max(time_offset, (s64)-MAXPHASE * NSEC_PER_USEC);
/*
* Select whether the frequency is to be controlled
@@ -297,11 +297,11 @@ int do_adjtimex(struct timex *txc)
mtemp = xtime.tv_sec - time_reftime;
time_reftime = xtime.tv_sec;
- freq_adj = (s64)time_offset * mtemp;
+ freq_adj = time_offset * mtemp;
freq_adj = shift_right(freq_adj, time_constant * 2 +
(SHIFT_PLL + 2) * 2 - SHIFT_NSEC);
if (mtemp >= MINSEC && (time_status & STA_FLL || mtemp > MAXSEC)) {
- temp64 = (s64)time_offset << (SHIFT_NSEC - SHIFT_FLL);
+ temp64 = time_offset << (SHIFT_NSEC - SHIFT_FLL);
if (time_offset < 0) {
temp64 = -temp64;
do_div(temp64, mtemp);
@@ -314,8 +314,10 @@ int do_adjtimex(struct timex *txc)
freq_adj += time_freq;
freq_adj = min(freq_adj, (s64)MAXFREQ_NSEC);
time_freq = max(freq_adj, (s64)-MAXFREQ_NSEC);
- time_offset = (time_offset / NTP_INTERVAL_FREQ)
- << SHIFT_UPDATE;
+ time_offset = div_long_long_rem_signed(time_offset,
+ NTP_INTERVAL_FREQ,
+ &rem);
+ time_offset <<= SHIFT_UPDATE;
} /* STA_PLL */
} /* txc->modes & ADJ_OFFSET */
if (txc->modes & ADJ_TICK)
@@ -328,12 +330,12 @@ leave: if ((time_status & (STA_UNSYNC|STA_CLOCKERR)) != 0)
result = TIME_ERROR;
if ((txc->modes & ADJ_OFFSET_SINGLESHOT) == ADJ_OFFSET_SINGLESHOT)
- txc->offset = save_adjust;
+ txc->offset = save_adjust;
else
- txc->offset = shift_right(time_offset, SHIFT_UPDATE)
- * NTP_INTERVAL_FREQ / 1000;
- txc->freq = (time_freq / NSEC_PER_USEC)
- << (SHIFT_USEC - SHIFT_NSEC);
+ txc->offset = ((long)shift_right(time_offset, SHIFT_UPDATE)) *
+ NTP_INTERVAL_FREQ / 1000;
+ txc->freq = (time_freq / NSEC_PER_USEC) <<
+ (SHIFT_USEC - SHIFT_NSEC);
txc->maxerror = time_maxerror;
txc->esterror = time_esterror;
txc->status = time_status;
diff --git a/kernel/time/timer_list.c b/kernel/time/timer_list.c
index f82c635c3d5..59df5e8555a 100644
--- a/kernel/time/timer_list.c
+++ b/kernel/time/timer_list.c
@@ -194,9 +194,9 @@ print_tickdevice(struct seq_file *m, struct tick_device *td)
return;
}
SEQ_printf(m, "%s\n", dev->name);
- SEQ_printf(m, " max_delta_ns: %ld\n", dev->max_delta_ns);
- SEQ_printf(m, " min_delta_ns: %ld\n", dev->min_delta_ns);
- SEQ_printf(m, " mult: %ld\n", dev->mult);
+ SEQ_printf(m, " max_delta_ns: %lu\n", dev->max_delta_ns);
+ SEQ_printf(m, " min_delta_ns: %lu\n", dev->min_delta_ns);
+ SEQ_printf(m, " mult: %lu\n", dev->mult);
SEQ_printf(m, " shift: %d\n", dev->shift);
SEQ_printf(m, " mode: %d\n", dev->mode);
SEQ_printf(m, " next_event: %Ld nsecs\n",
diff --git a/kernel/timer.c b/kernel/timer.c
index 797cccb8643..dd6c2c1c561 100644
--- a/kernel/timer.c
+++ b/kernel/timer.c
@@ -695,15 +695,28 @@ static unsigned long cmp_next_hrtimer_event(unsigned long now,
{
ktime_t hr_delta = hrtimer_get_next_event();
struct timespec tsdelta;
+ unsigned long delta;
if (hr_delta.tv64 == KTIME_MAX)
return expires;
- if (hr_delta.tv64 <= TICK_NSEC)
- return now;
+ /*
+ * Expired timer available, let it expire in the next tick
+ */
+ if (hr_delta.tv64 <= 0)
+ return now + 1;
tsdelta = ktime_to_timespec(hr_delta);
- now += timespec_to_jiffies(&tsdelta);
+ delta = timespec_to_jiffies(&tsdelta);
+ /*
+ * Take rounding errors in to account and make sure, that it
+ * expires in the next tick. Otherwise we go into an endless
+ * ping pong due to tick_nohz_stop_sched_tick() retriggering
+ * the timer softirq
+ */
+ if (delta < 1)
+ delta = 1;
+ now += delta;
if (time_before(now, expires))
return now;
return expires;
@@ -1003,7 +1016,7 @@ static int timekeeping_resume(struct sys_device *dev)
clockevents_notify(CLOCK_EVT_NOTIFY_RESUME, NULL);
/* Resume hrtimers */
- clock_was_set();
+ hres_timers_resume();
return 0;
}