diff options
author | Paul Mackerras <paulus@samba.org> | 2007-04-13 03:50:03 +1000 |
---|---|---|
committer | Paul Mackerras <paulus@samba.org> | 2007-04-13 03:50:03 +1000 |
commit | e049d1ca3094f3d1d94617f456a9961202f96e3a (patch) | |
tree | a30397ad22f2fbea268bd28fa69c60aad9dfa62a /kernel | |
parent | edfac96a92b88d3b0b53e3f8231b74beee9ecd1d (diff) | |
parent | 80584ff3b99c36ead7e130e453b3a48b18072d18 (diff) |
Merge branch 'linux-2.6' into for-2.6.22
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/auditsc.c | 24 | ||||
-rw-r--r-- | kernel/cpu.c | 32 | ||||
-rw-r--r-- | kernel/exit.c | 2 | ||||
-rw-r--r-- | kernel/fork.c | 2 | ||||
-rw-r--r-- | kernel/futex.c | 2 | ||||
-rw-r--r-- | kernel/hrtimer.c | 30 | ||||
-rw-r--r-- | kernel/irq/devres.c | 2 | ||||
-rw-r--r-- | kernel/lockdep.c | 8 | ||||
-rw-r--r-- | kernel/module.c | 24 | ||||
-rw-r--r-- | kernel/power/console.c | 10 | ||||
-rw-r--r-- | kernel/power/disk.c | 7 | ||||
-rw-r--r-- | kernel/power/swsusp.c | 2 | ||||
-rw-r--r-- | kernel/power/user.c | 9 | ||||
-rw-r--r-- | kernel/sched.c | 36 | ||||
-rw-r--r-- | kernel/sysctl.c | 3 | ||||
-rw-r--r-- | kernel/time.c | 2 | ||||
-rw-r--r-- | kernel/time/clockevents.c | 69 | ||||
-rw-r--r-- | kernel/time/clocksource.c | 3 | ||||
-rw-r--r-- | kernel/time/jiffies.c | 2 | ||||
-rw-r--r-- | kernel/time/ntp.c | 30 | ||||
-rw-r--r-- | kernel/time/tick-broadcast.c | 27 | ||||
-rw-r--r-- | kernel/time/tick-common.c | 13 | ||||
-rw-r--r-- | kernel/time/tick-internal.h | 11 | ||||
-rw-r--r-- | kernel/time/tick-oneshot.c | 12 | ||||
-rw-r--r-- | kernel/time/timer_list.c | 6 | ||||
-rw-r--r-- | kernel/timer.c | 21 |
26 files changed, 192 insertions, 197 deletions
diff --git a/kernel/auditsc.c b/kernel/auditsc.c index 359955800dd..628c7ac590a 100644 --- a/kernel/auditsc.c +++ b/kernel/auditsc.c @@ -739,28 +739,26 @@ static inline void audit_free_context(struct audit_context *context) void audit_log_task_context(struct audit_buffer *ab) { char *ctx = NULL; - ssize_t len = 0; + unsigned len; + int error; + u32 sid; + + selinux_get_task_sid(current, &sid); + if (!sid) + return; - len = security_getprocattr(current, "current", NULL, 0); - if (len < 0) { - if (len != -EINVAL) + error = selinux_sid_to_string(sid, &ctx, &len); + if (error) { + if (error != -EINVAL) goto error_path; return; } - ctx = kmalloc(len, GFP_KERNEL); - if (!ctx) - goto error_path; - - len = security_getprocattr(current, "current", ctx, len); - if (len < 0 ) - goto error_path; - audit_log_format(ab, " subj=%s", ctx); + kfree(ctx); return; error_path: - kfree(ctx); audit_panic("error in audit_log_task_context"); return; } diff --git a/kernel/cpu.c b/kernel/cpu.c index 3d4206ada5c..36e70845cfc 100644 --- a/kernel/cpu.c +++ b/kernel/cpu.c @@ -254,6 +254,12 @@ int __cpuinit cpu_up(unsigned int cpu) } #ifdef CONFIG_SUSPEND_SMP +/* Needed to prevent the microcode driver from requesting firmware in its CPU + * hotplug notifier during the suspend/resume. + */ +int suspend_cpu_hotplug; +EXPORT_SYMBOL(suspend_cpu_hotplug); + static cpumask_t frozen_cpus; int disable_nonboot_cpus(void) @@ -261,16 +267,8 @@ int disable_nonboot_cpus(void) int cpu, first_cpu, error = 0; mutex_lock(&cpu_add_remove_lock); - first_cpu = first_cpu(cpu_present_map); - if (!cpu_online(first_cpu)) { - error = _cpu_up(first_cpu); - if (error) { - printk(KERN_ERR "Could not bring CPU%d up.\n", - first_cpu); - goto out; - } - } - + suspend_cpu_hotplug = 1; + first_cpu = first_cpu(cpu_online_map); /* We take down all of the non-boot CPUs in one shot to avoid races * with the userspace trying to use the CPU hotplug at the same time */ @@ -296,7 +294,7 @@ int disable_nonboot_cpus(void) } else { printk(KERN_ERR "Non-boot CPUs are not disabled\n"); } -out: + suspend_cpu_hotplug = 0; mutex_unlock(&cpu_add_remove_lock); return error; } @@ -308,20 +306,22 @@ void enable_nonboot_cpus(void) /* Allow everyone to use the CPU hotplug again */ mutex_lock(&cpu_add_remove_lock); cpu_hotplug_disabled = 0; - mutex_unlock(&cpu_add_remove_lock); if (cpus_empty(frozen_cpus)) - return; + goto out; + suspend_cpu_hotplug = 1; printk("Enabling non-boot CPUs ...\n"); for_each_cpu_mask(cpu, frozen_cpus) { - error = cpu_up(cpu); + error = _cpu_up(cpu); if (!error) { printk("CPU%d is up\n", cpu); continue; } - printk(KERN_WARNING "Error taking CPU%d up: %d\n", - cpu, error); + printk(KERN_WARNING "Error taking CPU%d up: %d\n", cpu, error); } cpus_clear(frozen_cpus); + suspend_cpu_hotplug = 0; +out: + mutex_unlock(&cpu_add_remove_lock); } #endif diff --git a/kernel/exit.c b/kernel/exit.c index f132349c032..b55ed4cc910 100644 --- a/kernel/exit.c +++ b/kernel/exit.c @@ -790,7 +790,7 @@ static void exit_notify(struct task_struct *tsk) pgrp = task_pgrp(tsk); if ((task_pgrp(t) != pgrp) && - (task_session(t) != task_session(tsk)) && + (task_session(t) == task_session(tsk)) && will_become_orphaned_pgrp(pgrp, tsk) && has_stopped_jobs(pgrp)) { __kill_pgrp_info(SIGHUP, SEND_SIG_PRIV, pgrp); diff --git a/kernel/fork.c b/kernel/fork.c index d154cc78648..6af959c034d 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -933,8 +933,8 @@ asmlinkage long sys_set_tid_address(int __user *tidptr) static inline void rt_mutex_init_task(struct task_struct *p) { -#ifdef CONFIG_RT_MUTEXES spin_lock_init(&p->pi_lock); +#ifdef CONFIG_RT_MUTEXES plist_head_init(&p->pi_waiters, &p->pi_lock); p->pi_blocked_on = NULL; #endif diff --git a/kernel/futex.c b/kernel/futex.c index e749e7df14b..5a270b5e3f9 100644 --- a/kernel/futex.c +++ b/kernel/futex.c @@ -565,6 +565,7 @@ static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_q *this) if (!pi_state) return -EINVAL; + spin_lock(&pi_state->pi_mutex.wait_lock); new_owner = rt_mutex_next_owner(&pi_state->pi_mutex); /* @@ -604,6 +605,7 @@ static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_q *this) pi_state->owner = new_owner; spin_unlock_irq(&new_owner->pi_lock); + spin_unlock(&pi_state->pi_mutex.wait_lock); rt_mutex_unlock(&pi_state->pi_mutex); return 0; diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c index ec4cb9f3e3b..b74860aaf5f 100644 --- a/kernel/hrtimer.c +++ b/kernel/hrtimer.c @@ -135,7 +135,7 @@ EXPORT_SYMBOL_GPL(ktime_get_ts); static void hrtimer_get_softirq_time(struct hrtimer_cpu_base *base) { ktime_t xtim, tomono; - struct timespec xts; + struct timespec xts, tom; unsigned long seq; do { @@ -145,10 +145,11 @@ static void hrtimer_get_softirq_time(struct hrtimer_cpu_base *base) #else xts = xtime; #endif + tom = wall_to_monotonic; } while (read_seqretry(&xtime_lock, seq)); xtim = timespec_to_ktime(xts); - tomono = timespec_to_ktime(wall_to_monotonic); + tomono = timespec_to_ktime(tom); base->clock_base[CLOCK_REALTIME].softirq_time = xtim; base->clock_base[CLOCK_MONOTONIC].softirq_time = ktime_add(xtim, tomono); @@ -458,6 +459,18 @@ void clock_was_set(void) } /* + * During resume we might have to reprogram the high resolution timer + * interrupt (on the local CPU): + */ +void hres_timers_resume(void) +{ + WARN_ON_ONCE(num_online_cpus() > 1); + + /* Retrigger the CPU local events: */ + retrigger_next_event(NULL); +} + +/* * Check, whether the timer is on the callback pending list */ static inline int hrtimer_cb_pending(const struct hrtimer *timer) @@ -644,6 +657,12 @@ hrtimer_forward(struct hrtimer *timer, ktime_t now, ktime_t interval) orun++; } timer->expires = ktime_add(timer->expires, interval); + /* + * Make sure, that the result did not wrap with a very large + * interval. + */ + if (timer->expires.tv64 < 0) + timer->expires = ktime_set(KTIME_SEC_MAX, 0); return orun; } @@ -807,7 +826,12 @@ hrtimer_start(struct hrtimer *timer, ktime_t tim, const enum hrtimer_mode mode) timer_stats_hrtimer_set_start_info(timer); - enqueue_hrtimer(timer, new_base, base == new_base); + /* + * Only allow reprogramming if the new base is on this CPU. + * (it might still be on another CPU if the timer was pending) + */ + enqueue_hrtimer(timer, new_base, + new_base->cpu_base == &__get_cpu_var(hrtimer_bases)); unlock_hrtimer_base(timer, &flags); diff --git a/kernel/irq/devres.c b/kernel/irq/devres.c index 85a430da0fb..d8ee241115f 100644 --- a/kernel/irq/devres.c +++ b/kernel/irq/devres.c @@ -54,7 +54,7 @@ int devm_request_irq(struct device *dev, unsigned int irq, rc = request_irq(irq, handler, irqflags, devname, dev_id); if (rc) { - kfree(dr); + devres_free(dr); return rc; } diff --git a/kernel/lockdep.c b/kernel/lockdep.c index 8dc24c92dc6..7065a687ac5 100644 --- a/kernel/lockdep.c +++ b/kernel/lockdep.c @@ -2742,6 +2742,10 @@ void debug_show_all_locks(void) int count = 10; int unlock = 1; + if (unlikely(!debug_locks)) { + printk("INFO: lockdep is turned off.\n"); + return; + } printk("\nShowing all locks held in the system:\n"); /* @@ -2785,6 +2789,10 @@ EXPORT_SYMBOL_GPL(debug_show_all_locks); void debug_show_held_locks(struct task_struct *task) { + if (unlikely(!debug_locks)) { + printk("INFO: lockdep is turned off.\n"); + return; + } lockdep_print_held_locks(task); } diff --git a/kernel/module.c b/kernel/module.c index f77e893e462..dcdb32b8b13 100644 --- a/kernel/module.c +++ b/kernel/module.c @@ -2384,8 +2384,13 @@ void module_add_driver(struct module *mod, struct device_driver *drv) /* Lookup built-in module entry in /sys/modules */ mkobj = kset_find_obj(&module_subsys.kset, drv->mod_name); - if (mkobj) + if (mkobj) { mk = container_of(mkobj, struct module_kobject, kobj); + /* remember our module structure */ + drv->mkobj = mk; + /* kset_find_obj took a reference */ + kobject_put(mkobj); + } } if (!mk) @@ -2405,26 +2410,25 @@ EXPORT_SYMBOL(module_add_driver); void module_remove_driver(struct device_driver *drv) { + struct module_kobject *mk = NULL; char *driver_name; if (!drv) return; sysfs_remove_link(&drv->kobj, "module"); - if (drv->owner && drv->owner->mkobj.drivers_dir) { + + if (drv->owner) + mk = &drv->owner->mkobj; + else if (drv->mkobj) + mk = drv->mkobj; + if (mk && mk->drivers_dir) { driver_name = make_driver_name(drv); if (driver_name) { - sysfs_remove_link(drv->owner->mkobj.drivers_dir, - driver_name); + sysfs_remove_link(mk->drivers_dir, driver_name); kfree(driver_name); } } - /* - * Undo the additional reference we added in module_add_driver() - * via kset_find_obj() - */ - if (drv->mod_name) - kobject_put(&drv->kobj); } EXPORT_SYMBOL(module_remove_driver); #endif diff --git a/kernel/power/console.c b/kernel/power/console.c index 623786d4415..89bcf4973ee 100644 --- a/kernel/power/console.c +++ b/kernel/power/console.c @@ -27,7 +27,15 @@ int pm_prepare_console(void) return 1; } - set_console(SUSPEND_CONSOLE); + if (set_console(SUSPEND_CONSOLE)) { + /* + * We're unable to switch to the SUSPEND_CONSOLE. + * Let the calling function know so it can decide + * what to do. + */ + release_console_sem(); + return 1; + } release_console_sem(); if (vt_waitactive(SUSPEND_CONSOLE)) { diff --git a/kernel/power/disk.c b/kernel/power/disk.c index 406b20adb27..aec19b063e3 100644 --- a/kernel/power/disk.c +++ b/kernel/power/disk.c @@ -240,12 +240,6 @@ static int software_resume(void) goto Done; } - error = platform_prepare(); - if (error) { - swsusp_free(); - goto Thaw; - } - pr_debug("PM: Reading swsusp image.\n"); error = swsusp_read(); @@ -268,7 +262,6 @@ static int software_resume(void) enable_nonboot_cpus(); Free: swsusp_free(); - platform_finish(); device_resume(); resume_console(); Thaw: diff --git a/kernel/power/swsusp.c b/kernel/power/swsusp.c index 7fb834397a0..175370824f3 100644 --- a/kernel/power/swsusp.c +++ b/kernel/power/swsusp.c @@ -229,13 +229,13 @@ int swsusp_shrink_memory(void) size += highmem_size; for_each_zone (zone) if (populated_zone(zone)) { + tmp += snapshot_additional_pages(zone); if (is_highmem(zone)) { highmem_size -= zone_page_state(zone, NR_FREE_PAGES); } else { tmp -= zone_page_state(zone, NR_FREE_PAGES); tmp += zone->lowmem_reserve[ZONE_NORMAL]; - tmp += snapshot_additional_pages(zone); } } diff --git a/kernel/power/user.c b/kernel/power/user.c index dd09efe7df5..7cf6713b232 100644 --- a/kernel/power/user.c +++ b/kernel/power/user.c @@ -368,9 +368,12 @@ static int snapshot_ioctl(struct inode *inode, struct file *filp, if (error) { printk(KERN_ERR "Failed to suspend some devices.\n"); } else { - /* Enter S3, system is already frozen */ - suspend_enter(PM_SUSPEND_MEM); - + error = disable_nonboot_cpus(); + if (!error) { + /* Enter S3, system is already frozen */ + suspend_enter(PM_SUSPEND_MEM); + enable_nonboot_cpus(); + } /* Wake up devices */ device_resume(); } diff --git a/kernel/sched.c b/kernel/sched.c index a4ca632c477..b9a68373014 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -4687,32 +4687,10 @@ out_unlock: return retval; } -static inline struct task_struct *eldest_child(struct task_struct *p) -{ - if (list_empty(&p->children)) - return NULL; - return list_entry(p->children.next,struct task_struct,sibling); -} - -static inline struct task_struct *older_sibling(struct task_struct *p) -{ - if (p->sibling.prev==&p->parent->children) - return NULL; - return list_entry(p->sibling.prev,struct task_struct,sibling); -} - -static inline struct task_struct *younger_sibling(struct task_struct *p) -{ - if (p->sibling.next==&p->parent->children) - return NULL; - return list_entry(p->sibling.next,struct task_struct,sibling); -} - static const char stat_nam[] = "RSDTtZX"; static void show_task(struct task_struct *p) { - struct task_struct *relative; unsigned long free = 0; unsigned state; @@ -4738,19 +4716,7 @@ static void show_task(struct task_struct *p) free = (unsigned long)n - (unsigned long)end_of_stack(p); } #endif - printk("%5lu %5d %6d ", free, p->pid, p->parent->pid); - if ((relative = eldest_child(p))) - printk("%5d ", relative->pid); - else - printk(" "); - if ((relative = younger_sibling(p))) - printk("%7d", relative->pid); - else - printk(" "); - if ((relative = older_sibling(p))) - printk(" %5d", relative->pid); - else - printk(" "); + printk("%5lu %5d %6d", free, p->pid, p->parent->pid); if (!p->mm) printk(" (L-TLB)\n"); else diff --git a/kernel/sysctl.c b/kernel/sysctl.c index 34b2301276c..1b255df4fcd 100644 --- a/kernel/sysctl.c +++ b/kernel/sysctl.c @@ -846,7 +846,8 @@ static ctl_table vm_table[] = { .extra2 = &one_hundred, }, #endif -#ifdef CONFIG_X86_32 +#if defined(CONFIG_X86_32) || \ + (defined(CONFIG_SUPERH) && defined(CONFIG_VSYSCALL)) { .ctl_name = VM_VDSO_ENABLED, .procname = "vdso_enabled", diff --git a/kernel/time.c b/kernel/time.c index c6c80ea5d0e..2f47888e46c 100644 --- a/kernel/time.c +++ b/kernel/time.c @@ -635,6 +635,7 @@ timeval_to_jiffies(const struct timeval *value) (((u64)usec * USEC_CONVERSION + USEC_ROUND) >> (USEC_JIFFIE_SC - SEC_JIFFIE_SC))) >> SEC_JIFFIE_SC; } +EXPORT_SYMBOL(timeval_to_jiffies); void jiffies_to_timeval(const unsigned long jiffies, struct timeval *value) { @@ -649,6 +650,7 @@ void jiffies_to_timeval(const unsigned long jiffies, struct timeval *value) tv_usec /= NSEC_PER_USEC; value->tv_usec = tv_usec; } +EXPORT_SYMBOL(jiffies_to_timeval); /* * Convert jiffies/jiffies_64 to clock_t and back. diff --git a/kernel/time/clockevents.c b/kernel/time/clockevents.c index 67932ea78c1..76212b2a99d 100644 --- a/kernel/time/clockevents.c +++ b/kernel/time/clockevents.c @@ -274,72 +274,3 @@ void clockevents_notify(unsigned long reason, void *arg) } EXPORT_SYMBOL_GPL(clockevents_notify); -#ifdef CONFIG_SYSFS - -/** - * clockevents_show_registered - sysfs interface for listing clockevents - * @dev: unused - * @buf: char buffer to be filled with clock events list - * - * Provides sysfs interface for listing registered clock event devices - */ -static ssize_t clockevents_show_registered(struct sys_device *dev, char *buf) -{ - struct list_head *tmp; - char *p = buf; - int cpu; - - spin_lock(&clockevents_lock); - - list_for_each(tmp, &clockevent_devices) { - struct clock_event_device *ce; - - ce = list_entry(tmp, struct clock_event_device, list); - p += sprintf(p, "%-20s F:%04x M:%d", ce->name, - ce->features, ce->mode); - p += sprintf(p, " C:"); - if (!cpus_equal(ce->cpumask, cpu_possible_map)) { - for_each_cpu_mask(cpu, ce->cpumask) - p += sprintf(p, " %d", cpu); - } else { - /* - * FIXME: Add the cpu which is handling this sucker - */ - } - p += sprintf(p, "\n"); - } - - spin_unlock(&clockevents_lock); - - return p - buf; -} - -/* - * Sysfs setup bits: - */ -static SYSDEV_ATTR(registered, 0600, - clockevents_show_registered, NULL); - -static struct sysdev_class clockevents_sysclass = { - set_kset_name("clockevents"), -}; - -static struct sys_device clockevents_sys_device = { - .id = 0, - .cls = &clockevents_sysclass, -}; - -static int __init clockevents_sysfs_init(void) -{ - int error = sysdev_class_register(&clockevents_sysclass); - - if (!error) - error = sysdev_register(&clockevents_sys_device); - if (!error) - error = sysdev_create_file( - &clockevents_sys_device, - &attr_registered); - return error; -} -device_initcall(clockevents_sysfs_init); -#endif diff --git a/kernel/time/clocksource.c b/kernel/time/clocksource.c index 5b0e46b56fd..fe5c7db2424 100644 --- a/kernel/time/clocksource.c +++ b/kernel/time/clocksource.c @@ -151,7 +151,8 @@ static void clocksource_check_watchdog(struct clocksource *cs) watchdog_timer.expires = jiffies + WATCHDOG_INTERVAL; add_timer(&watchdog_timer); } - } else if (cs->flags & CLOCK_SOURCE_IS_CONTINUOUS) { + } else { + if (cs->flags & CLOCK_SOURCE_IS_CONTINUOUS) cs->flags |= CLOCK_SOURCE_VALID_FOR_HRES; if (!watchdog || cs->rating > watchdog->rating) { diff --git a/kernel/time/jiffies.c b/kernel/time/jiffies.c index 3be8da8fed7..4c256fdb887 100644 --- a/kernel/time/jiffies.c +++ b/kernel/time/jiffies.c @@ -69,4 +69,4 @@ static int __init init_jiffies_clocksource(void) return clocksource_register(&clocksource_jiffies); } -module_init(init_jiffies_clocksource); +core_initcall(init_jiffies_clocksource); diff --git a/kernel/time/ntp.c b/kernel/time/ntp.c index eb12509e00b..cb25649c6f5 100644 --- a/kernel/time/ntp.c +++ b/kernel/time/ntp.c @@ -32,7 +32,7 @@ static u64 tick_length, tick_length_base; /* TIME_ERROR prevents overwriting the CMOS clock */ static int time_state = TIME_OK; /* clock synchronization status */ int time_status = STA_UNSYNC; /* clock status bits */ -static long time_offset; /* time adjustment (ns) */ +static s64 time_offset; /* time adjustment (ns) */ static long time_constant = 2; /* pll time constant */ long time_maxerror = NTP_PHASE_LIMIT; /* maximum error (us) */ long time_esterror = NTP_PHASE_LIMIT; /* estimated error (us) */ @@ -196,7 +196,7 @@ void __attribute__ ((weak)) notify_arch_cmos_timer(void) */ int do_adjtimex(struct timex *txc) { - long ltemp, mtemp, save_adjust; + long mtemp, save_adjust, rem; s64 freq_adj, temp64; int result; @@ -277,14 +277,14 @@ int do_adjtimex(struct timex *txc) time_adjust = txc->offset; } else if (time_status & STA_PLL) { - ltemp = txc->offset * NSEC_PER_USEC; + time_offset = txc->offset * NSEC_PER_USEC; /* * Scale the phase adjustment and * clamp to the operating range. */ - time_offset = min(ltemp, MAXPHASE * NSEC_PER_USEC); - time_offset = max(time_offset, -MAXPHASE * NSEC_PER_USEC); + time_offset = min(time_offset, (s64)MAXPHASE * NSEC_PER_USEC); + time_offset = max(time_offset, (s64)-MAXPHASE * NSEC_PER_USEC); /* * Select whether the frequency is to be controlled @@ -297,11 +297,11 @@ int do_adjtimex(struct timex *txc) mtemp = xtime.tv_sec - time_reftime; time_reftime = xtime.tv_sec; - freq_adj = (s64)time_offset * mtemp; + freq_adj = time_offset * mtemp; freq_adj = shift_right(freq_adj, time_constant * 2 + (SHIFT_PLL + 2) * 2 - SHIFT_NSEC); if (mtemp >= MINSEC && (time_status & STA_FLL || mtemp > MAXSEC)) { - temp64 = (s64)time_offset << (SHIFT_NSEC - SHIFT_FLL); + temp64 = time_offset << (SHIFT_NSEC - SHIFT_FLL); if (time_offset < 0) { temp64 = -temp64; do_div(temp64, mtemp); @@ -314,8 +314,10 @@ int do_adjtimex(struct timex *txc) freq_adj += time_freq; freq_adj = min(freq_adj, (s64)MAXFREQ_NSEC); time_freq = max(freq_adj, (s64)-MAXFREQ_NSEC); - time_offset = (time_offset / NTP_INTERVAL_FREQ) - << SHIFT_UPDATE; + time_offset = div_long_long_rem_signed(time_offset, + NTP_INTERVAL_FREQ, + &rem); + time_offset <<= SHIFT_UPDATE; } /* STA_PLL */ } /* txc->modes & ADJ_OFFSET */ if (txc->modes & ADJ_TICK) @@ -328,12 +330,12 @@ leave: if ((time_status & (STA_UNSYNC|STA_CLOCKERR)) != 0) result = TIME_ERROR; if ((txc->modes & ADJ_OFFSET_SINGLESHOT) == ADJ_OFFSET_SINGLESHOT) - txc->offset = save_adjust; + txc->offset = save_adjust; else - txc->offset = shift_right(time_offset, SHIFT_UPDATE) - * NTP_INTERVAL_FREQ / 1000; - txc->freq = (time_freq / NSEC_PER_USEC) - << (SHIFT_USEC - SHIFT_NSEC); + txc->offset = ((long)shift_right(time_offset, SHIFT_UPDATE)) * + NTP_INTERVAL_FREQ / 1000; + txc->freq = (time_freq / NSEC_PER_USEC) << + (SHIFT_USEC - SHIFT_NSEC); txc->maxerror = time_maxerror; txc->esterror = time_esterror; txc->status = time_status; diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c index 5567745470f..eadfce2fff7 100644 --- a/kernel/time/tick-broadcast.c +++ b/kernel/time/tick-broadcast.c @@ -307,12 +307,19 @@ int tick_resume_broadcast(void) spin_lock_irqsave(&tick_broadcast_lock, flags); bc = tick_broadcast_device.evtdev; - if (bc) { - if (tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC && - !cpus_empty(tick_broadcast_mask)) - tick_broadcast_start_periodic(bc); - broadcast = cpu_isset(smp_processor_id(), tick_broadcast_mask); + if (bc) { + switch (tick_broadcast_device.mode) { + case TICKDEV_MODE_PERIODIC: + if(!cpus_empty(tick_broadcast_mask)) + tick_broadcast_start_periodic(bc); + broadcast = cpu_isset(smp_processor_id(), + tick_broadcast_mask); + break; + case TICKDEV_MODE_ONESHOT: + broadcast = tick_resume_broadcast_oneshot(bc); + break; + } } spin_unlock_irqrestore(&tick_broadcast_lock, flags); @@ -347,6 +354,16 @@ static int tick_broadcast_set_event(ktime_t expires, int force) } } +int tick_resume_broadcast_oneshot(struct clock_event_device *bc) +{ + clockevents_set_mode(bc, CLOCK_EVT_MODE_ONESHOT); + + if(!cpus_empty(tick_broadcast_oneshot_mask)) + tick_broadcast_set_event(ktime_get(), 1); + + return cpu_isset(smp_processor_id(), tick_broadcast_oneshot_mask); +} + /* * Reprogram the broadcast device: * diff --git a/kernel/time/tick-common.c b/kernel/time/tick-common.c index 43ba1bdec14..bfda3f7f071 100644 --- a/kernel/time/tick-common.c +++ b/kernel/time/tick-common.c @@ -298,18 +298,17 @@ static void tick_shutdown(unsigned int *cpup) spin_unlock_irqrestore(&tick_device_lock, flags); } -static void tick_suspend_periodic(void) +static void tick_suspend(void) { struct tick_device *td = &__get_cpu_var(tick_cpu_device); unsigned long flags; spin_lock_irqsave(&tick_device_lock, flags); - if (td->mode == TICKDEV_MODE_PERIODIC) - clockevents_set_mode(td->evtdev, CLOCK_EVT_MODE_SHUTDOWN); + clockevents_set_mode(td->evtdev, CLOCK_EVT_MODE_SHUTDOWN); spin_unlock_irqrestore(&tick_device_lock, flags); } -static void tick_resume_periodic(void) +static void tick_resume(void) { struct tick_device *td = &__get_cpu_var(tick_cpu_device); unsigned long flags; @@ -317,6 +316,8 @@ static void tick_resume_periodic(void) spin_lock_irqsave(&tick_device_lock, flags); if (td->mode == TICKDEV_MODE_PERIODIC) tick_setup_periodic(td->evtdev, 0); + else + tick_resume_oneshot(); spin_unlock_irqrestore(&tick_device_lock, flags); } @@ -348,13 +349,13 @@ static int tick_notify(struct notifier_block *nb, unsigned long reason, break; case CLOCK_EVT_NOTIFY_SUSPEND: - tick_suspend_periodic(); + tick_suspend(); tick_suspend_broadcast(); break; case CLOCK_EVT_NOTIFY_RESUME: if (!tick_resume_broadcast()) - tick_resume_periodic(); + tick_resume(); break; default: diff --git a/kernel/time/tick-internal.h b/kernel/time/tick-internal.h index 75890efd24f..c9d203bde51 100644 --- a/kernel/time/tick-internal.h +++ b/kernel/time/tick-internal.h @@ -19,12 +19,13 @@ extern void tick_setup_oneshot(struct clock_event_device *newdev, extern int tick_program_event(ktime_t expires, int force); extern void tick_oneshot_notify(void); extern int tick_switch_to_oneshot(void (*handler)(struct clock_event_device *)); - +extern void tick_resume_oneshot(void); # ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST extern void tick_broadcast_setup_oneshot(struct clock_event_device *bc); extern void tick_broadcast_oneshot_control(unsigned long reason); extern void tick_broadcast_switch_to_oneshot(void); extern void tick_shutdown_broadcast_oneshot(unsigned int *cpup); +extern int tick_resume_broadcast_oneshot(struct clock_event_device *bc); # else /* BROADCAST */ static inline void tick_broadcast_setup_oneshot(struct clock_event_device *bc) { @@ -43,6 +44,10 @@ void tick_setup_oneshot(struct clock_event_device *newdev, { BUG(); } +static inline void tick_resume_oneshot(void) +{ + BUG(); +} static inline int tick_program_event(ktime_t expires, int force) { return 0; @@ -54,6 +59,10 @@ static inline void tick_broadcast_setup_oneshot(struct clock_event_device *bc) } static inline void tick_broadcast_oneshot_control(unsigned long reason) { } static inline void tick_shutdown_broadcast_oneshot(unsigned int *cpup) { } +static inline int tick_resume_broadcast_oneshot(struct clock_event_device *bc) +{ + return 0; +} #endif /* !TICK_ONESHOT */ /* diff --git a/kernel/time/tick-oneshot.c b/kernel/time/tick-oneshot.c index 2e8b7ff863c..f6997ab0c3c 100644 --- a/kernel/time/tick-oneshot.c +++ b/kernel/time/tick-oneshot.c @@ -41,6 +41,18 @@ int tick_program_event(ktime_t expires, int force) } /** + * tick_resume_onshot - resume oneshot mode + */ +void tick_resume_oneshot(void) +{ + struct tick_device *td = &__get_cpu_var(tick_cpu_device); + struct clock_event_device *dev = td->evtdev; + + clockevents_set_mode(dev, CLOCK_EVT_MODE_ONESHOT); + tick_program_event(ktime_get(), 1); +} + +/** * tick_setup_oneshot - setup the event device for oneshot mode (hres or nohz) */ void tick_setup_oneshot(struct clock_event_device *newdev, diff --git a/kernel/time/timer_list.c b/kernel/time/timer_list.c index f82c635c3d5..59df5e8555a 100644 --- a/kernel/time/timer_list.c +++ b/kernel/time/timer_list.c @@ -194,9 +194,9 @@ print_tickdevice(struct seq_file *m, struct tick_device *td) return; } SEQ_printf(m, "%s\n", dev->name); - SEQ_printf(m, " max_delta_ns: %ld\n", dev->max_delta_ns); - SEQ_printf(m, " min_delta_ns: %ld\n", dev->min_delta_ns); - SEQ_printf(m, " mult: %ld\n", dev->mult); + SEQ_printf(m, " max_delta_ns: %lu\n", dev->max_delta_ns); + SEQ_printf(m, " min_delta_ns: %lu\n", dev->min_delta_ns); + SEQ_printf(m, " mult: %lu\n", dev->mult); SEQ_printf(m, " shift: %d\n", dev->shift); SEQ_printf(m, " mode: %d\n", dev->mode); SEQ_printf(m, " next_event: %Ld nsecs\n", diff --git a/kernel/timer.c b/kernel/timer.c index 797cccb8643..dd6c2c1c561 100644 --- a/kernel/timer.c +++ b/kernel/timer.c @@ -695,15 +695,28 @@ static unsigned long cmp_next_hrtimer_event(unsigned long now, { ktime_t hr_delta = hrtimer_get_next_event(); struct timespec tsdelta; + unsigned long delta; if (hr_delta.tv64 == KTIME_MAX) return expires; - if (hr_delta.tv64 <= TICK_NSEC) - return now; + /* + * Expired timer available, let it expire in the next tick + */ + if (hr_delta.tv64 <= 0) + return now + 1; tsdelta = ktime_to_timespec(hr_delta); - now += timespec_to_jiffies(&tsdelta); + delta = timespec_to_jiffies(&tsdelta); + /* + * Take rounding errors in to account and make sure, that it + * expires in the next tick. Otherwise we go into an endless + * ping pong due to tick_nohz_stop_sched_tick() retriggering + * the timer softirq + */ + if (delta < 1) + delta = 1; + now += delta; if (time_before(now, expires)) return now; return expires; @@ -1003,7 +1016,7 @@ static int timekeeping_resume(struct sys_device *dev) clockevents_notify(CLOCK_EVT_NOTIFY_RESUME, NULL); /* Resume hrtimers */ - clock_was_set(); + hres_timers_resume(); return 0; } |