aboutsummaryrefslogtreecommitdiff
path: root/kernel
diff options
context:
space:
mode:
authorPaul Mackerras <paulus@samba.org>2007-12-21 22:21:08 +1100
committerPaul Mackerras <paulus@samba.org>2007-12-21 22:21:08 +1100
commitc2a7dcad9f0d92d7a96e735abb8bec7b9c621536 (patch)
treebf9b20fdd5ab07e5b0e4e0b95c6a3dbab1005cb9 /kernel
parent373a6da165ac3012a74fd072da340eabca55d031 (diff)
parentea67db4cdbbf7f4e74150e71da0984e25121f500 (diff)
Merge branch 'linux-2.6'
Diffstat (limited to 'kernel')
-rw-r--r--kernel/irq/chip.c9
-rw-r--r--kernel/panic.c18
-rw-r--r--kernel/rwsem.c5
-rw-r--r--kernel/sched.c18
-rw-r--r--kernel/sched_fair.c3
-rw-r--r--kernel/sched_rt.c2
-rw-r--r--kernel/sysctl.c16
-rw-r--r--kernel/sysctl_check.c7
-rw-r--r--kernel/time/tick-broadcast.c56
-rw-r--r--kernel/timer.c4
10 files changed, 75 insertions, 63 deletions
diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c
index 9b5dff6b3f6..44019ce30a1 100644
--- a/kernel/irq/chip.c
+++ b/kernel/irq/chip.c
@@ -297,18 +297,13 @@ handle_simple_irq(unsigned int irq, struct irq_desc *desc)
if (unlikely(desc->status & IRQ_INPROGRESS))
goto out_unlock;
+ desc->status &= ~(IRQ_REPLAY | IRQ_WAITING);
kstat_cpu(cpu).irqs[irq]++;
action = desc->action;
- if (unlikely(!action || (desc->status & IRQ_DISABLED))) {
- if (desc->chip->mask)
- desc->chip->mask(irq);
- desc->status &= ~(IRQ_REPLAY | IRQ_WAITING);
- desc->status |= IRQ_PENDING;
+ if (unlikely(!action || (desc->status & IRQ_DISABLED)))
goto out_unlock;
- }
- desc->status &= ~(IRQ_REPLAY | IRQ_WAITING | IRQ_PENDING);
desc->status |= IRQ_INPROGRESS;
spin_unlock(&desc->lock);
diff --git a/kernel/panic.c b/kernel/panic.c
index 6f6e03e9159..da4d6bac270 100644
--- a/kernel/panic.c
+++ b/kernel/panic.c
@@ -19,6 +19,7 @@
#include <linux/nmi.h>
#include <linux/kexec.h>
#include <linux/debug_locks.h>
+#include <linux/random.h>
int panic_on_oops;
int tainted;
@@ -266,12 +267,29 @@ void oops_enter(void)
}
/*
+ * 64-bit random ID for oopses:
+ */
+static u64 oops_id;
+
+static int init_oops_id(void)
+{
+ if (!oops_id)
+ get_random_bytes(&oops_id, sizeof(oops_id));
+
+ return 0;
+}
+late_initcall(init_oops_id);
+
+/*
* Called when the architecture exits its oops handler, after printing
* everything.
*/
void oops_exit(void)
{
do_oops_enter_exit();
+ init_oops_id();
+ printk(KERN_WARNING "---[ end trace %016llx ]---\n",
+ (unsigned long long)oops_id);
}
#ifdef CONFIG_CC_STACKPROTECTOR
diff --git a/kernel/rwsem.c b/kernel/rwsem.c
index 1ec620c0306..cae050b05f5 100644
--- a/kernel/rwsem.c
+++ b/kernel/rwsem.c
@@ -6,6 +6,7 @@
#include <linux/types.h>
#include <linux/kernel.h>
+#include <linux/sched.h>
#include <linux/module.h>
#include <linux/rwsem.h>
@@ -15,7 +16,7 @@
/*
* lock for reading
*/
-void down_read(struct rw_semaphore *sem)
+void __sched down_read(struct rw_semaphore *sem)
{
might_sleep();
rwsem_acquire_read(&sem->dep_map, 0, 0, _RET_IP_);
@@ -42,7 +43,7 @@ EXPORT_SYMBOL(down_read_trylock);
/*
* lock for writing
*/
-void down_write(struct rw_semaphore *sem)
+void __sched down_write(struct rw_semaphore *sem)
{
might_sleep();
rwsem_acquire(&sem->dep_map, 0, 0, _RET_IP_);
diff --git a/kernel/sched.c b/kernel/sched.c
index c6e551de795..3df84ea6aba 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -508,10 +508,15 @@ EXPORT_SYMBOL_GPL(cpu_clock);
# define finish_arch_switch(prev) do { } while (0)
#endif
+static inline int task_current(struct rq *rq, struct task_struct *p)
+{
+ return rq->curr == p;
+}
+
#ifndef __ARCH_WANT_UNLOCKED_CTXSW
static inline int task_running(struct rq *rq, struct task_struct *p)
{
- return rq->curr == p;
+ return task_current(rq, p);
}
static inline void prepare_lock_switch(struct rq *rq, struct task_struct *next)
@@ -540,7 +545,7 @@ static inline int task_running(struct rq *rq, struct task_struct *p)
#ifdef CONFIG_SMP
return p->oncpu;
#else
- return rq->curr == p;
+ return task_current(rq, p);
#endif
}
@@ -663,6 +668,7 @@ void sched_clock_idle_wakeup_event(u64 delta_ns)
struct rq *rq = cpu_rq(smp_processor_id());
u64 now = sched_clock();
+ touch_softlockup_watchdog();
rq->idle_clock += delta_ns;
/*
* Override the previous timestamp and ignore all
@@ -3334,7 +3340,7 @@ unsigned long long task_sched_runtime(struct task_struct *p)
rq = task_rq_lock(p, &flags);
ns = p->se.sum_exec_runtime;
- if (rq->curr == p) {
+ if (task_current(rq, p)) {
update_rq_clock(rq);
delta_exec = rq->clock - p->se.exec_start;
if ((s64)delta_exec > 0)
@@ -4021,7 +4027,7 @@ void rt_mutex_setprio(struct task_struct *p, int prio)
oldprio = p->prio;
on_rq = p->se.on_rq;
- running = task_running(rq, p);
+ running = task_current(rq, p);
if (on_rq) {
dequeue_task(rq, p, 0);
if (running)
@@ -4332,7 +4338,7 @@ recheck:
}
update_rq_clock(rq);
on_rq = p->se.on_rq;
- running = task_running(rq, p);
+ running = task_current(rq, p);
if (on_rq) {
deactivate_task(rq, p, 0);
if (running)
@@ -7101,7 +7107,7 @@ void sched_move_task(struct task_struct *tsk)
update_rq_clock(rq);
- running = task_running(rq, tsk);
+ running = task_current(rq, tsk);
on_rq = tsk->se.on_rq;
if (on_rq) {
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index c33f0ceb3de..da7c061e720 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -511,8 +511,7 @@ place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial)
if (!initial) {
/* sleeps upto a single latency don't count. */
- if (sched_feat(NEW_FAIR_SLEEPERS) && entity_is_task(se) &&
- task_of(se)->policy != SCHED_BATCH)
+ if (sched_feat(NEW_FAIR_SLEEPERS) && entity_is_task(se))
vruntime -= sysctl_sched_latency;
/* ensure we never gain time by being placed backwards. */
diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c
index ee9c8b6529e..9ba3daa0347 100644
--- a/kernel/sched_rt.c
+++ b/kernel/sched_rt.c
@@ -208,6 +208,8 @@ move_one_task_rt(struct rq *this_rq, int this_cpu, struct rq *busiest,
static void task_tick_rt(struct rq *rq, struct task_struct *p)
{
+ update_curr_rt(rq);
+
/*
* RR tasks need a special form of timeslice management.
* FIFO tasks have no timeslices.
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index 8ac51714b08..c68f68dcc60 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -225,10 +225,10 @@ static struct ctl_table root_table[] = {
};
#ifdef CONFIG_SCHED_DEBUG
-static unsigned long min_sched_granularity_ns = 100000; /* 100 usecs */
-static unsigned long max_sched_granularity_ns = NSEC_PER_SEC; /* 1 second */
-static unsigned long min_wakeup_granularity_ns; /* 0 usecs */
-static unsigned long max_wakeup_granularity_ns = NSEC_PER_SEC; /* 1 second */
+static int min_sched_granularity_ns = 100000; /* 100 usecs */
+static int max_sched_granularity_ns = NSEC_PER_SEC; /* 1 second */
+static int min_wakeup_granularity_ns; /* 0 usecs */
+static int max_wakeup_granularity_ns = NSEC_PER_SEC; /* 1 second */
#endif
static struct ctl_table kern_table[] = {
@@ -906,11 +906,11 @@ static struct ctl_table vm_table[] = {
},
{
.ctl_name = CTL_UNNUMBERED,
- .procname = "hugetlb_dynamic_pool",
- .data = &hugetlb_dynamic_pool,
- .maxlen = sizeof(hugetlb_dynamic_pool),
+ .procname = "nr_overcommit_hugepages",
+ .data = &nr_overcommit_huge_pages,
+ .maxlen = sizeof(nr_overcommit_huge_pages),
.mode = 0644,
- .proc_handler = &proc_dointvec,
+ .proc_handler = &proc_doulongvec_minmax,
},
#endif
{
diff --git a/kernel/sysctl_check.c b/kernel/sysctl_check.c
index bed939f82c3..a68425a5cc1 100644
--- a/kernel/sysctl_check.c
+++ b/kernel/sysctl_check.c
@@ -428,7 +428,7 @@ static struct trans_ctl_table trans_net_netrom_table[] = {
{}
};
-static struct trans_ctl_table trans_net_ax25_table[] = {
+static struct trans_ctl_table trans_net_ax25_param_table[] = {
{ NET_AX25_IP_DEFAULT_MODE, "ip_default_mode" },
{ NET_AX25_DEFAULT_MODE, "ax25_default_mode" },
{ NET_AX25_BACKOFF_TYPE, "backoff_type" },
@@ -446,6 +446,11 @@ static struct trans_ctl_table trans_net_ax25_table[] = {
{}
};
+static struct trans_ctl_table trans_net_ax25_table[] = {
+ { 0, NULL, trans_net_ax25_param_table },
+ {}
+};
+
static struct trans_ctl_table trans_net_bridge_table[] = {
{ NET_BRIDGE_NF_CALL_ARPTABLES, "bridge-nf-call-arptables" },
{ NET_BRIDGE_NF_CALL_IPTABLES, "bridge-nf-call-iptables" },
diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c
index aa82d7bf478..5b86698faa0 100644
--- a/kernel/time/tick-broadcast.c
+++ b/kernel/time/tick-broadcast.c
@@ -384,45 +384,19 @@ int tick_resume_broadcast_oneshot(struct clock_event_device *bc)
}
/*
- * Reprogram the broadcast device:
- *
- * Called with tick_broadcast_lock held and interrupts disabled.
- */
-static int tick_broadcast_reprogram(void)
-{
- ktime_t expires = { .tv64 = KTIME_MAX };
- struct tick_device *td;
- int cpu;
-
- /*
- * Find the event which expires next:
- */
- for (cpu = first_cpu(tick_broadcast_oneshot_mask); cpu != NR_CPUS;
- cpu = next_cpu(cpu, tick_broadcast_oneshot_mask)) {
- td = &per_cpu(tick_cpu_device, cpu);
- if (td->evtdev->next_event.tv64 < expires.tv64)
- expires = td->evtdev->next_event;
- }
-
- if (expires.tv64 == KTIME_MAX)
- return 0;
-
- return tick_broadcast_set_event(expires, 0);
-}
-
-/*
* Handle oneshot mode broadcasting
*/
static void tick_handle_oneshot_broadcast(struct clock_event_device *dev)
{
struct tick_device *td;
cpumask_t mask;
- ktime_t now;
+ ktime_t now, next_event;
int cpu;
spin_lock(&tick_broadcast_lock);
again:
dev->next_event.tv64 = KTIME_MAX;
+ next_event.tv64 = KTIME_MAX;
mask = CPU_MASK_NONE;
now = ktime_get();
/* Find all expired events */
@@ -431,19 +405,31 @@ again:
td = &per_cpu(tick_cpu_device, cpu);
if (td->evtdev->next_event.tv64 <= now.tv64)
cpu_set(cpu, mask);
+ else if (td->evtdev->next_event.tv64 < next_event.tv64)
+ next_event.tv64 = td->evtdev->next_event.tv64;
}
/*
- * Wakeup the cpus which have an expired event. The broadcast
- * device is reprogrammed in the return from idle code.
+ * Wakeup the cpus which have an expired event.
+ */
+ tick_do_broadcast(mask);
+
+ /*
+ * Two reasons for reprogram:
+ *
+ * - The global event did not expire any CPU local
+ * events. This happens in dyntick mode, as the maximum PIT
+ * delta is quite small.
+ *
+ * - There are pending events on sleeping CPUs which were not
+ * in the event mask
*/
- if (!tick_do_broadcast(mask)) {
+ if (next_event.tv64 != KTIME_MAX) {
/*
- * The global event did not expire any CPU local
- * events. This happens in dyntick mode, as the
- * maximum PIT delta is quite small.
+ * Rearm the broadcast device. If event expired,
+ * repeat the above
*/
- if (tick_broadcast_reprogram())
+ if (tick_broadcast_set_event(next_event, 0))
goto again;
}
spin_unlock(&tick_broadcast_lock);
diff --git a/kernel/timer.c b/kernel/timer.c
index a05817c021d..d4527dcef1a 100644
--- a/kernel/timer.c
+++ b/kernel/timer.c
@@ -1219,11 +1219,11 @@ asmlinkage long sys_sysinfo(struct sysinfo __user *info)
*/
static struct lock_class_key base_lock_keys[NR_CPUS];
-static int __devinit init_timers_cpu(int cpu)
+static int __cpuinit init_timers_cpu(int cpu)
{
int j;
tvec_base_t *base;
- static char __devinitdata tvec_base_done[NR_CPUS];
+ static char __cpuinitdata tvec_base_done[NR_CPUS];
if (!tvec_base_done[cpu]) {
static char boot_done;