aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2008-12-14 12:34:15 +0100
committerIngo Molnar <mingo@elte.hu>2008-12-14 20:31:26 +0100
commit6c594c21fcb02c662f11c97be4d7d2b73060a205 (patch)
treedbd56d57fbc4576e18002a5cc08b1f4327a2248f
parent5d6a27d8a096868ae313f71f563b06074a7e34fe (diff)
perfcounters: add task migrations counter
Impact: add new feature, new sw counter Add a counter that counts the number of cross-CPU migrations a task is suffering. Signed-off-by: Ingo Molnar <mingo@elte.hu>
-rw-r--r--include/linux/perf_counter.h8
-rw-r--r--include/linux/sched.h3
-rw-r--r--kernel/perf_counter.c49
-rw-r--r--kernel/sched.c7
4 files changed, 61 insertions, 6 deletions
diff --git a/include/linux/perf_counter.h b/include/linux/perf_counter.h
index d2a16563415..f30486fc55d 100644
--- a/include/linux/perf_counter.h
+++ b/include/linux/perf_counter.h
@@ -42,6 +42,8 @@ enum hw_event_types {
PERF_COUNT_BRANCH_INSTRUCTIONS = 4,
PERF_COUNT_BRANCH_MISSES = 5,
+ PERF_HW_EVENTS_MAX = 6,
+
/*
* Special "software" counters provided by the kernel, even if
* the hardware does not support performance counters. These
@@ -50,11 +52,11 @@ enum hw_event_types {
*/
PERF_COUNT_CPU_CLOCK = -1,
PERF_COUNT_TASK_CLOCK = -2,
- /*
- * Future software events:
- */
PERF_COUNT_PAGE_FAULTS = -3,
PERF_COUNT_CONTEXT_SWITCHES = -4,
+ PERF_COUNT_CPU_MIGRATIONS = -5,
+
+ PERF_SW_EVENTS_MIN = -6,
};
/*
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 4c530278391..2e15be8fc79 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -1014,6 +1014,8 @@ struct sched_entity {
u64 last_wakeup;
u64 avg_overlap;
+ u64 nr_migrations;
+
#ifdef CONFIG_SCHEDSTATS
u64 wait_start;
u64 wait_max;
@@ -1029,7 +1031,6 @@ struct sched_entity {
u64 exec_max;
u64 slice_max;
- u64 nr_migrations;
u64 nr_migrations_cold;
u64 nr_failed_migrations_affine;
u64 nr_failed_migrations_running;
diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c
index 09287091c52..fb11e351e44 100644
--- a/kernel/perf_counter.c
+++ b/kernel/perf_counter.c
@@ -936,6 +936,52 @@ static const struct hw_perf_counter_ops perf_ops_context_switches = {
.hw_perf_counter_read = context_switches_perf_counter_read,
};
+static inline u64 get_cpu_migrations(void)
+{
+ return current->se.nr_migrations;
+}
+
+static void cpu_migrations_perf_counter_update(struct perf_counter *counter)
+{
+ u64 prev, now;
+ s64 delta;
+
+ prev = atomic64_read(&counter->hw.prev_count);
+ now = get_cpu_migrations();
+
+ atomic64_set(&counter->hw.prev_count, now);
+
+ delta = now - prev;
+ if (WARN_ON_ONCE(delta < 0))
+ delta = 0;
+
+ atomic64_add(delta, &counter->count);
+}
+
+static void cpu_migrations_perf_counter_read(struct perf_counter *counter)
+{
+ cpu_migrations_perf_counter_update(counter);
+}
+
+static void cpu_migrations_perf_counter_enable(struct perf_counter *counter)
+{
+ /*
+ * se.nr_migrations is a per-task value already,
+ * so we dont have to clear it on switch-in.
+ */
+}
+
+static void cpu_migrations_perf_counter_disable(struct perf_counter *counter)
+{
+ cpu_migrations_perf_counter_update(counter);
+}
+
+static const struct hw_perf_counter_ops perf_ops_cpu_migrations = {
+ .hw_perf_counter_enable = cpu_migrations_perf_counter_enable,
+ .hw_perf_counter_disable = cpu_migrations_perf_counter_disable,
+ .hw_perf_counter_read = cpu_migrations_perf_counter_read,
+};
+
static const struct hw_perf_counter_ops *
sw_perf_counter_init(struct perf_counter *counter)
{
@@ -951,6 +997,9 @@ sw_perf_counter_init(struct perf_counter *counter)
case PERF_COUNT_CONTEXT_SWITCHES:
hw_ops = &perf_ops_context_switches;
break;
+ case PERF_COUNT_CPU_MIGRATIONS:
+ hw_ops = &perf_ops_cpu_migrations;
+ break;
default:
break;
}
diff --git a/kernel/sched.c b/kernel/sched.c
index 5c3f4106314..382cfdb5e38 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -1852,12 +1852,14 @@ void set_task_cpu(struct task_struct *p, unsigned int new_cpu)
p->se.sleep_start -= clock_offset;
if (p->se.block_start)
p->se.block_start -= clock_offset;
+#endif
if (old_cpu != new_cpu) {
- schedstat_inc(p, se.nr_migrations);
+ p->se.nr_migrations++;
+#ifdef CONFIG_SCHEDSTATS
if (task_hot(p, old_rq->clock, NULL))
schedstat_inc(p, se.nr_forced2_migrations);
- }
#endif
+ }
p->se.vruntime -= old_cfsrq->min_vruntime -
new_cfsrq->min_vruntime;
@@ -2375,6 +2377,7 @@ static void __sched_fork(struct task_struct *p)
p->se.exec_start = 0;
p->se.sum_exec_runtime = 0;
p->se.prev_sum_exec_runtime = 0;
+ p->se.nr_migrations = 0;
p->se.last_wakeup = 0;
p->se.avg_overlap = 0;