From 0793a61d4df8daeac6492dbf8d2f3e5713caae5e Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Thu, 4 Dec 2008 20:12:29 +0100 Subject: performance counters: core code Implement the core kernel bits of Performance Counters subsystem. The Linux Performance Counter subsystem provides an abstraction of performance counter hardware capabilities. It provides per task and per CPU counters, and it provides event capabilities on top of those. Performance counters are accessed via special file descriptors. There's one file descriptor per virtual counter used. The special file descriptor is opened via the perf_counter_open() system call: int perf_counter_open(u32 hw_event_type, u32 hw_event_period, u32 record_type, pid_t pid, int cpu); The syscall returns the new fd. The fd can be used via the normal VFS system calls: read() can be used to read the counter, fcntl() can be used to set the blocking mode, etc. Multiple counters can be kept open at a time, and the counters can be poll()ed. See more details in Documentation/perf-counters.txt. Signed-off-by: Thomas Gleixner Signed-off-by: Ingo Molnar --- drivers/char/sysrq.c | 2 + include/linux/perf_counter.h | 171 ++++++++ include/linux/sched.h | 9 + include/linux/syscalls.h | 6 + init/Kconfig | 29 ++ kernel/Makefile | 1 + kernel/fork.c | 1 + kernel/perf_counter.c | 943 +++++++++++++++++++++++++++++++++++++++++++ kernel/sched.c | 24 ++ kernel/sys_ni.c | 3 + 10 files changed, 1189 insertions(+) create mode 100644 include/linux/perf_counter.h create mode 100644 kernel/perf_counter.c diff --git a/drivers/char/sysrq.c b/drivers/char/sysrq.c index ce0d9da52a8..52146c2a8d9 100644 --- a/drivers/char/sysrq.c +++ b/drivers/char/sysrq.c @@ -25,6 +25,7 @@ #include #include #include +#include #include #include #include @@ -244,6 +245,7 @@ static void sysrq_handle_showregs(int key, struct tty_struct *tty) struct pt_regs *regs = get_irq_regs(); if (regs) show_regs(regs); + perf_counter_print_debug(); } static struct sysrq_key_op sysrq_showregs_op = { .handler = sysrq_handle_showregs, diff --git a/include/linux/perf_counter.h b/include/linux/perf_counter.h new file mode 100644 index 00000000000..22c4469abf4 --- /dev/null +++ b/include/linux/perf_counter.h @@ -0,0 +1,171 @@ +/* + * Performance counters: + * + * Copyright(C) 2008, Thomas Gleixner + * Copyright(C) 2008, Red Hat, Inc., Ingo Molnar + * + * Data type definitions, declarations, prototypes. + * + * Started by: Thomas Gleixner and Ingo Molnar + * + * For licencing details see kernel-base/COPYING + */ +#ifndef _LINUX_PERF_COUNTER_H +#define _LINUX_PERF_COUNTER_H + +#include + +#include +#include +#include +#include +#include + +struct task_struct; + +/* + * Generalized hardware event types, used by the hw_event_type parameter + * of the sys_perf_counter_open() syscall: + */ +enum hw_event_types { + PERF_COUNT_CYCLES, + PERF_COUNT_INSTRUCTIONS, + PERF_COUNT_CACHE_REFERENCES, + PERF_COUNT_CACHE_MISSES, + PERF_COUNT_BRANCH_INSTRUCTIONS, + PERF_COUNT_BRANCH_MISSES, + /* + * If this bit is set in the type, then trigger NMI sampling: + */ + PERF_COUNT_NMI = (1 << 30), +}; + +/* + * IRQ-notification data record type: + */ +enum perf_record_type { + PERF_RECORD_SIMPLE, + PERF_RECORD_IRQ, + PERF_RECORD_GROUP, +}; + +/** + * struct hw_perf_counter - performance counter hardware details + */ +struct hw_perf_counter { + u64 config; + unsigned long config_base; + unsigned long counter_base; + int nmi; + unsigned int idx; + u64 prev_count; + s32 next_count; + u64 irq_period; +}; + +/* + * Hardcoded buffer length limit for now, for IRQ-fed events: + */ +#define PERF_DATA_BUFLEN 2048 + +/** + * struct perf_data - performance counter IRQ data sampling ... + */ +struct perf_data { + int len; + int rd_idx; + int overrun; + u8 data[PERF_DATA_BUFLEN]; +}; + +/** + * struct perf_counter - performance counter kernel representation: + */ +struct perf_counter { + struct list_head list; + int active; +#if BITS_PER_LONG == 64 + atomic64_t count; +#else + atomic_t count32[2]; +#endif + u64 __irq_period; + + struct hw_perf_counter hw; + + struct perf_counter_context *ctx; + struct task_struct *task; + + /* + * Protect attach/detach: + */ + struct mutex mutex; + + int oncpu; + int cpu; + + s32 hw_event_type; + enum perf_record_type record_type; + + /* read() / irq related data */ + wait_queue_head_t waitq; + /* optional: for NMIs */ + int wakeup_pending; + struct perf_data *irqdata; + struct perf_data *usrdata; + struct perf_data data[2]; +}; + +/** + * struct perf_counter_context - counter context structure + * + * Used as a container for task counters and CPU counters as well: + */ +struct perf_counter_context { +#ifdef CONFIG_PERF_COUNTERS + /* + * Protect the list of counters: + */ + spinlock_t lock; + struct list_head counters; + int nr_counters; + int nr_active; + struct task_struct *task; +#endif +}; + +/** + * struct perf_counter_cpu_context - per cpu counter context structure + */ +struct perf_cpu_context { + struct perf_counter_context ctx; + struct perf_counter_context *task_ctx; + int active_oncpu; + int max_pertask; +}; + +/* + * Set by architecture code: + */ +extern int perf_max_counters; + +#ifdef CONFIG_PERF_COUNTERS +extern void perf_counter_task_sched_in(struct task_struct *task, int cpu); +extern void perf_counter_task_sched_out(struct task_struct *task, int cpu); +extern void perf_counter_task_tick(struct task_struct *task, int cpu); +extern void perf_counter_init_task(struct task_struct *task); +extern void perf_counter_notify(struct pt_regs *regs); +extern void perf_counter_print_debug(void); +#else +static inline void +perf_counter_task_sched_in(struct task_struct *task, int cpu) { } +static inline void +perf_counter_task_sched_out(struct task_struct *task, int cpu) { } +static inline void +perf_counter_task_tick(struct task_struct *task, int cpu) { } +static inline void perf_counter_init_task(struct task_struct *task) { } +static inline void perf_counter_notify(struct pt_regs *regs) { } +static inline void perf_counter_print_debug(void) { } +#endif + +#endif /* _LINUX_PERF_COUNTER_H */ diff --git a/include/linux/sched.h b/include/linux/sched.h index 55e30d11447..4c530278391 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -71,6 +71,7 @@ struct sched_param { #include #include #include +#include #include #include #include @@ -1326,6 +1327,7 @@ struct task_struct { struct list_head pi_state_list; struct futex_pi_state *pi_state_cache; #endif + struct perf_counter_context perf_counter_ctx; #ifdef CONFIG_NUMA struct mempolicy *mempolicy; short il_next; @@ -2285,6 +2287,13 @@ static inline void inc_syscw(struct task_struct *tsk) #define TASK_SIZE_OF(tsk) TASK_SIZE #endif +/* + * Call the function if the target task is executing on a CPU right now: + */ +extern void task_oncpu_function_call(struct task_struct *p, + void (*func) (void *info), void *info); + + #ifdef CONFIG_MM_OWNER extern void mm_update_next_owner(struct mm_struct *mm); extern void mm_init_owner(struct mm_struct *mm, struct task_struct *p); diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h index 04fb47bfb92..6cce728a626 100644 --- a/include/linux/syscalls.h +++ b/include/linux/syscalls.h @@ -624,4 +624,10 @@ asmlinkage long sys_fallocate(int fd, int mode, loff_t offset, loff_t len); int kernel_execve(const char *filename, char *const argv[], char *const envp[]); +asmlinkage int +sys_perf_counter_open(u32 hw_event_type, + u32 hw_event_period, + u32 record_type, + pid_t pid, + int cpu); #endif diff --git a/init/Kconfig b/init/Kconfig index f763762d544..78bede218f1 100644 --- a/init/Kconfig +++ b/init/Kconfig @@ -732,6 +732,35 @@ config AIO by some high performance threaded applications. Disabling this option saves about 7k. +config HAVE_PERF_COUNTERS + bool + +menu "Performance Counters" + +config PERF_COUNTERS + bool "Kernel Performance Counters" + depends on HAVE_PERF_COUNTERS + default y + help + Enable kernel support for performance counter hardware. + + Performance counters are special hardware registers available + on most modern CPUs. These registers count the number of certain + types of hw events: such as instructions executed, cachemisses + suffered, or branches mis-predicted - without slowing down the + kernel or applications. These registers can also trigger interrupts + when a threshold number of events have passed - and can thus be + used to profile the code that runs on that CPU. + + The Linux Performance Counter subsystem provides an abstraction of + these hardware capabilities, available via a system call. It + provides per task and per CPU counters, and it provides event + capabilities on top of those. + + Say Y if unsure. + +endmenu + config VM_EVENT_COUNTERS default y bool "Enable VM event counters for /proc/vmstat" if EMBEDDED diff --git a/kernel/Makefile b/kernel/Makefile index 19fad003b19..1f184a1dc40 100644 --- a/kernel/Makefile +++ b/kernel/Makefile @@ -89,6 +89,7 @@ obj-$(CONFIG_HAVE_GENERIC_DMA_COHERENT) += dma-coherent.o obj-$(CONFIG_FUNCTION_TRACER) += trace/ obj-$(CONFIG_TRACING) += trace/ obj-$(CONFIG_SMP) += sched_cpupri.o +obj-$(CONFIG_PERF_COUNTERS) += perf_counter.o ifneq ($(CONFIG_SCHED_NO_NO_OMIT_FRAME_POINTER),y) # According to Alan Modra , the -fno-omit-frame-pointer is diff --git a/kernel/fork.c b/kernel/fork.c index 2a372a0e206..441fadff1fa 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -975,6 +975,7 @@ static struct task_struct *copy_process(unsigned long clone_flags, goto fork_out; rt_mutex_init_task(p); + perf_counter_init_task(p); #ifdef CONFIG_PROVE_LOCKING DEBUG_LOCKS_WARN_ON(!p->hardirqs_enabled); diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c new file mode 100644 index 00000000000..20508f05365 --- /dev/null +++ b/kernel/perf_counter.c @@ -0,0 +1,943 @@ +/* + * Performance counter core code + * + * Copyright(C) 2008 Thomas Gleixner + * Copyright(C) 2008 Red Hat, Inc., Ingo Molnar + * + * For licencing details see kernel-base/COPYING + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* + * Each CPU has a list of per CPU counters: + */ +DEFINE_PER_CPU(struct perf_cpu_context, perf_cpu_context); + +int perf_max_counters __read_mostly; +static int perf_reserved_percpu __read_mostly; +static int perf_overcommit __read_mostly = 1; + +/* + * Mutex for (sysadmin-configurable) counter reservations: + */ +static DEFINE_MUTEX(perf_resource_mutex); + +/* + * Architecture provided APIs - weak aliases: + */ + +int __weak hw_perf_counter_init(struct perf_counter *counter, u32 hw_event_type) +{ + return -EINVAL; +} + +void __weak hw_perf_counter_enable(struct perf_counter *counter) { } +void __weak hw_perf_counter_disable(struct perf_counter *counter) { } +void __weak hw_perf_counter_read(struct perf_counter *counter) { } +void __weak hw_perf_disable_all(void) { } +void __weak hw_perf_enable_all(void) { } +void __weak hw_perf_counter_setup(void) { } + +#if BITS_PER_LONG == 64 + +/* + * Read the cached counter in counter safe against cross CPU / NMI + * modifications. 64 bit version - no complications. + */ +static inline u64 perf_read_counter_safe(struct perf_counter *counter) +{ + return (u64) atomic64_read(&counter->count); +} + +#else + +/* + * Read the cached counter in counter safe against cross CPU / NMI + * modifications. 32 bit version. + */ +static u64 perf_read_counter_safe(struct perf_counter *counter) +{ + u32 cntl, cnth; + + local_irq_disable(); + do { + cnth = atomic_read(&counter->count32[1]); + cntl = atomic_read(&counter->count32[0]); + } while (cnth != atomic_read(&counter->count32[1])); + + local_irq_enable(); + + return cntl | ((u64) cnth) << 32; +} + +#endif + +/* + * Cross CPU call to remove a performance counter + * + * We disable the counter on the hardware level first. After that we + * remove it from the context list. + */ +static void __perf_remove_from_context(void *info) +{ + struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context); + struct perf_counter *counter = info; + struct perf_counter_context *ctx = counter->ctx; + + /* + * If this is a task context, we need to check whether it is + * the current task context of this cpu. If not it has been + * scheduled out before the smp call arrived. + */ + if (ctx->task && cpuctx->task_ctx != ctx) + return; + + spin_lock(&ctx->lock); + + if (counter->active) { + hw_perf_counter_disable(counter); + counter->active = 0; + ctx->nr_active--; + cpuctx->active_oncpu--; + counter->task = NULL; + } + ctx->nr_counters--; + + /* + * Protect the list operation against NMI by disabling the + * counters on a global level. NOP for non NMI based counters. + */ + hw_perf_disable_all(); + list_del_init(&counter->list); + hw_perf_enable_all(); + + if (!ctx->task) { + /* + * Allow more per task counters with respect to the + * reservation: + */ + cpuctx->max_pertask = + min(perf_max_counters - ctx->nr_counters, + perf_max_counters - perf_reserved_percpu); + } + + spin_unlock(&ctx->lock); +} + + +/* + * Remove the counter from a task's (or a CPU's) list of counters. + * + * Must be called with counter->mutex held. + * + * CPU counters are removed with a smp call. For task counters we only + * call when the task is on a CPU. + */ +static void perf_remove_from_context(struct perf_counter *counter) +{ + struct perf_counter_context *ctx = counter->ctx; + struct task_struct *task = ctx->task; + + if (!task) { + /* + * Per cpu counters are removed via an smp call and + * the removal is always sucessful. + */ + smp_call_function_single(counter->cpu, + __perf_remove_from_context, + counter, 1); + return; + } + +retry: + task_oncpu_function_call(task, __perf_remove_from_context, + counter); + + spin_lock_irq(&ctx->lock); + /* + * If the context is active we need to retry the smp call. + */ + if (ctx->nr_active && !list_empty(&counter->list)) { + spin_unlock_irq(&ctx->lock); + goto retry; + } + + /* + * The lock prevents that this context is scheduled in so we + * can remove the counter safely, if it the call above did not + * succeed. + */ + if (!list_empty(&counter->list)) { + ctx->nr_counters--; + list_del_init(&counter->list); + counter->task = NULL; + } + spin_unlock_irq(&ctx->lock); +} + +/* + * Cross CPU call to install and enable a preformance counter + */ +static void __perf_install_in_context(void *info) +{ + struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context); + struct perf_counter *counter = info; + struct perf_counter_context *ctx = counter->ctx; + int cpu = smp_processor_id(); + + /* + * If this is a task context, we need to check whether it is + * the current task context of this cpu. If not it has been + * scheduled out before the smp call arrived. + */ + if (ctx->task && cpuctx->task_ctx != ctx) + return; + + spin_lock(&ctx->lock); + + /* + * Protect the list operation against NMI by disabling the + * counters on a global level. NOP for non NMI based counters. + */ + hw_perf_disable_all(); + list_add_tail(&counter->list, &ctx->counters); + hw_perf_enable_all(); + + ctx->nr_counters++; + + if (cpuctx->active_oncpu < perf_max_counters) { + hw_perf_counter_enable(counter); + counter->active = 1; + counter->oncpu = cpu; + ctx->nr_active++; + cpuctx->active_oncpu++; + } + + if (!ctx->task && cpuctx->max_pertask) + cpuctx->max_pertask--; + + spin_unlock(&ctx->lock); +} + +/* + * Attach a performance counter to a context + * + * First we add the counter to the list with the hardware enable bit + * in counter->hw_config cleared. + * + * If the counter is attached to a task which is on a CPU we use a smp + * call to enable it in the task context. The task might have been + * scheduled away, but we check this in the smp call again. + */ +static void +perf_install_in_context(struct perf_counter_context *ctx, + struct perf_counter *counter, + int cpu) +{ + struct task_struct *task = ctx->task; + + counter->ctx = ctx; + if (!task) { + /* + * Per cpu counters are installed via an smp call and + * the install is always sucessful. + */ + smp_call_function_single(cpu, __perf_install_in_context, + counter, 1); + return; + } + + counter->task = task; +retry: + task_oncpu_function_call(task, __perf_install_in_context, + counter); + + spin_lock_irq(&ctx->lock); + /* + * If the context is active and the counter has not been added + * we need to retry the smp call. + */ + if (ctx->nr_active && list_empty(&counter->list)) { + spin_unlock_irq(&ctx->lock); + goto retry; + } + + /* + * The lock prevents that this context is scheduled in so we + * can add the counter safely, if it the call above did not + * succeed. + */ + if (list_empty(&counter->list)) { + list_add_tail(&counter->list, &ctx->counters); + ctx->nr_counters++; + } + spin_unlock_irq(&ctx->lock); +} + +/* + * Called from scheduler to remove the counters of the current task, + * with interrupts disabled. + * + * We stop each counter and update the counter value in counter->count. + * + * This does not protect us against NMI, but hw_perf_counter_disable() + * sets the disabled bit in the control field of counter _before_ + * accessing the counter control register. If a NMI hits, then it will + * not restart the counter. + */ +void perf_counter_task_sched_out(struct task_struct *task, int cpu) +{ + struct perf_cpu_context *cpuctx = &per_cpu(perf_cpu_context, cpu); + struct perf_counter_context *ctx = &task->perf_counter_ctx; + struct perf_counter *counter; + + if (likely(!cpuctx->task_ctx)) + return; + + spin_lock(&ctx->lock); + list_for_each_entry(counter, &ctx->counters, list) { + if (!ctx->nr_active) + break; + if (counter->active) { + hw_perf_counter_disable(counter); + counter->active = 0; + counter->oncpu = -1; + ctx->nr_active--; + cpuctx->active_oncpu--; + } + } + spin_unlock(&ctx->lock); + cpuctx->task_ctx = NULL; +} + +/* + * Called from scheduler to add the counters of the current task + * with interrupts disabled. + * + * We restore the counter value and then enable it. + * + * This does not protect us against NMI, but hw_perf_counter_enable() + * sets the enabled bit in the control field of counter _before_ + * accessing the counter control register. If a NMI hits, then it will + * keep the counter running. + */ +void perf_counter_task_sched_in(struct task_struct *task, int cpu) +{ + struct perf_cpu_context *cpuctx = &per_cpu(perf_cpu_context, cpu); + struct perf_counter_context *ctx = &task->perf_counter_ctx; + struct perf_counter *counter; + + if (likely(!ctx->nr_counters)) + return; + + spin_lock(&ctx->lock); + list_for_each_entry(counter, &ctx->counters, list) { + if (ctx->nr_active == cpuctx->max_pertask) + break; + if (counter->cpu != -1 && counter->cpu != cpu) + continue; + + hw_perf_counter_enable(counter); + counter->active = 1; + counter->oncpu = cpu; + ctx->nr_active++; + cpuctx->active_oncpu++; + } + spin_unlock(&ctx->lock); + cpuctx->task_ctx = ctx; +} + +void perf_counter_task_tick(struct task_struct *curr, int cpu) +{ + struct perf_counter_context *ctx = &curr->perf_counter_ctx; + struct perf_counter *counter; + + if (likely(!ctx->nr_counters)) + return; + + perf_counter_task_sched_out(curr, cpu); + + spin_lock(&ctx->lock); + + /* + * Rotate the first entry last: + */ + hw_perf_disable_all(); + list_for_each_entry(counter, &ctx->counters, list) { + list_del(&counter->list); + list_add_tail(&counter->list, &ctx->counters); + break; + } + hw_perf_enable_all(); + + spin_unlock(&ctx->lock); + + perf_counter_task_sched_in(curr, cpu); +} + +/* + * Initialize the perf_counter context in task_struct + */ +void perf_counter_init_task(struct task_struct *task) +{ + struct perf_counter_context *ctx = &task->perf_counter_ctx; + + spin_lock_init(&ctx->lock); + INIT_LIST_HEAD(&ctx->counters); + ctx->nr_counters = 0; + ctx->task = task; +} + +/* + * Cross CPU call to read the hardware counter + */ +static void __hw_perf_counter_read(void *info) +{ + hw_perf_counter_read(info); +} + +static u64 perf_read_counter(struct perf_counter *counter) +{ + /* + * If counter is enabled and currently active on a CPU, update the + * value in the counter structure: + */ + if (counter->active) { + smp_call_function_single(counter->oncpu, + __hw_perf_counter_read, counter, 1); + } + + return perf_read_counter_safe(counter); +} + +/* + * Cross CPU call to switch performance data pointers + */ +static void __perf_switch_irq_data(void *info) +{ + struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context); + struct perf_counter *counter = info; + struct perf_counter_context *ctx = counter->ctx; + struct perf_data *oldirqdata = counter->irqdata; + + /* + * If this is a task context, we need to check whether it is + * the current task context of this cpu. If not it has been + * scheduled out before the smp call arrived. + */ + if (ctx->task) { + if (cpuctx->task_ctx != ctx) + return; + spin_lock(&ctx->lock); + } + + /* Change the pointer NMI safe */ + atomic_long_set((atomic_long_t *)&counter->irqdata, + (unsigned long) counter->usrdata); + counter->usrdata = oldirqdata; + + if (ctx->task) + spin_unlock(&ctx->lock); +} + +static struct perf_data *perf_switch_irq_data(struct perf_counter *counter) +{ + struct perf_counter_context *ctx = counter->ctx; + struct perf_data *oldirqdata = counter->irqdata; + struct task_struct *task = ctx->task; + + if (!task) { + smp_call_function_single(counter->cpu, + __perf_switch_irq_data, + counter, 1); + return counter->usrdata; + } + +retry: + spin_lock_irq(&ctx->lock); + if (!counter->active) { + counter->irqdata = counter->usrdata; + counter->usrdata = oldirqdata; + spin_unlock_irq(&ctx->lock); + return oldirqdata; + } + spin_unlock_irq(&ctx->lock); + task_oncpu_function_call(task, __perf_switch_irq_data, counter); + /* Might have failed, because task was scheduled out */ + if (counter->irqdata == oldirqdata) + goto retry; + + return counter->usrdata; +} + +static void put_context(struct perf_counter_context *ctx) +{ + if (ctx->task) + put_task_struct(ctx->task); +} + +static struct perf_counter_context *find_get_context(pid_t pid, int cpu) +{ + struct perf_cpu_context *cpuctx; + struct perf_counter_context *ctx; + struct task_struct *task; + + /* + * If cpu is not a wildcard then this is a percpu counter: + */ + if (cpu != -1) { + /* Must be root to operate on a CPU counter: */ + if (!capable(CAP_SYS_ADMIN)) + return ERR_PTR(-EACCES); + + if (cpu < 0 || cpu > num_possible_cpus()) + return ERR_PTR(-EINVAL); + + /* + * We could be clever and allow to attach a counter to an + * offline CPU and activate it when the CPU comes up, but + * that's for later. + */ + if (!cpu_isset(cpu, cpu_online_map)) + return ERR_PTR(-ENODEV); + + cpuctx = &per_cpu(perf_cpu_context, cpu); + ctx = &cpuctx->ctx; + + WARN_ON_ONCE(ctx->task); + return ctx; + } + + rcu_read_lock(); + if (!pid) + task = current; + else + task = find_task_by_vpid(pid); + if (task) + get_task_struct(task); + rcu_read_unlock(); + + if (!task) + return ERR_PTR(-ESRCH); + + ctx = &task->perf_counter_ctx; + ctx->task = task; + + /* Reuse ptrace permission checks for now. */ + if (!ptrace_may_access(task, PTRACE_MODE_READ)) { + put_context(ctx); + return ERR_PTR(-EACCES); + } + + return ctx; +} + +/* + * Called when the last reference to the file is gone. + */ +static int perf_release(struct inode *inode, struct file *file) +{ + struct perf_counter *counter = file->private_data; + struct perf_counter_context *ctx = counter->ctx; + + file->private_data = NULL; + + mutex_lock(&counter->mutex); + + perf_remove_from_context(counter); + put_context(ctx); + + mutex_unlock(&counter->mutex); + + kfree(counter); + + return 0; +} + +/* + * Read the performance counter - simple non blocking version for now + */ +static ssize_t +perf_read_hw(struct perf_counter *counter, char __user *buf, size_t count) +{ + u64 cntval; + + if (count != sizeof(cntval)) + return -EINVAL; + + mutex_lock(&counter->mutex); + cntval = perf_read_counter(counter); + mutex_unlock(&counter->mutex); + + return put_user(cntval, (u64 __user *) buf) ? -EFAULT : sizeof(cntval); +} + +static ssize_t +perf_copy_usrdata(struct perf_data *usrdata, char __user *buf, size_t count) +{ + if (!usrdata->len) + return 0; + + count = min(count, (size_t)usrdata->len); + if (copy_to_user(buf, usrdata->data + usrdata->rd_idx, count)) + return -EFAULT; + + /* Adjust the counters */ + usrdata->len -= count; + if (!usrdata->len) + usrdata->rd_idx = 0; + else + usrdata->rd_idx += count; + + return count; +} + +static ssize_t +perf_read_irq_data(struct perf_counter *counter, + char __user *buf, + size_t count, + int nonblocking) +{ + struct perf_data *irqdata, *usrdata; + DECLARE_WAITQUEUE(wait, current); + ssize_t res; + + irqdata = counter->irqdata; + usrdata = counter->usrdata; + + if (usrdata->len + irqdata->len >= count) + goto read_pending; + + if (nonblocking) + return -EAGAIN; + + spin_lock_irq(&counter->waitq.lock); + __add_wait_queue(&counter->waitq, &wait); + for (;;) { + set_current_state(TASK_INTERRUPTIBLE); + if (usrdata->len + irqdata->len >= count) + break; + + if (signal_pending(current)) + break; + + spin_unlock_irq(&counter->waitq.lock); + schedule(); + spin_lock_irq(&counter->waitq.lock); + } + __remove_wait_queue(&counter->waitq, &wait); + __set_current_state(TASK_RUNNING); + spin_unlock_irq(&counter->waitq.lock); + + if (usrdata->len + irqdata->len < count) + return -ERESTARTSYS; +read_pending: + mutex_lock(&counter->mutex); + + /* Drain pending data first: */ + res = perf_copy_usrdata(usrdata, buf, count); + if (res < 0 || res == count) + goto out; + + /* Switch irq buffer: */ + usrdata = perf_switch_irq_data(counter); + if (perf_copy_usrdata(usrdata, buf + res, count - res) < 0) { + if (!res) + res = -EFAULT; + } else { + res = count; + } +out: + mutex_unlock(&counter->mutex); + + return res; +} + +static ssize_t +perf_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) +{ + struct perf_counter *counter = file->private_data; + + switch (counter->record_type) { + case PERF_RECORD_SIMPLE: + return perf_read_hw(counter, buf, count); + + case PERF_RECORD_IRQ: + case PERF_RECORD_GROUP: + return perf_read_irq_data(counter, buf, count, + file->f_flags & O_NONBLOCK); + } + return -EINVAL; +} + +static unsigned int perf_poll(struct file *file, poll_table *wait) +{ + struct perf_counter *counter = file->private_data; + unsigned int events = 0; + unsigned long flags; + + poll_wait(file, &counter->waitq, wait); + + spin_lock_irqsave(&counter->waitq.lock, flags); + if (counter->usrdata->len || counter->irqdata->len) + events |= POLLIN; + spin_unlock_irqrestore(&counter->waitq.lock, flags); + + return events; +} + +static const struct file_operations perf_fops = { + .release = perf_release, + .read = perf_read, + .poll = perf_poll, +}; + +/* + * Allocate and initialize a counter structure + */ +static struct perf_counter * +perf_counter_alloc(u32 hw_event_period, int cpu, u32 record_type) +{ + struct perf_counter *counter = kzalloc(sizeof(*counter), GFP_KERNEL); + + if (!counter) + return NULL; + + mutex_init(&counter->mutex); + INIT_LIST_HEAD(&counter->list); + init_waitqueue_head(&counter->waitq); + + counter->irqdata = &counter->data[0]; + counter->usrdata = &counter->data[1]; + counter->cpu = cpu; + counter->record_type = record_type; + counter->__irq_period = hw_event_period; + counter->wakeup_pending = 0; + + return counter; +} + +/** + * sys_perf_task_open - open a performance counter associate it to a task + * @hw_event_type: event type for monitoring/sampling... + * @pid: target pid + */ +asmlinkage int +sys_perf_counter_open(u32 hw_event_type, + u32 hw_event_period, + u32 record_type, + pid_t pid, + int cpu) +{ + struct perf_counter_context *ctx; + struct perf_counter *counter; + int ret; + + ctx = find_get_context(pid, cpu); + if (IS_ERR(ctx)) + return PTR_ERR(ctx); + + ret = -ENOMEM; + counter = perf_counter_alloc(hw_event_period, cpu, record_type); + if (!counter) + goto err_put_context; + + ret = hw_perf_counter_init(counter, hw_event_type); + if (ret) + goto err_free_put_context; + + perf_install_in_context(ctx, counter, cpu); + + ret = anon_inode_getfd("[perf_counter]", &perf_fops, counter, 0); + if (ret < 0) + goto err_remove_free_put_context; + + return ret; + +err_remove_free_put_context: + mutex_lock(&counter->mutex); + perf_remove_from_context(counter); + mutex_unlock(&counter->mutex); + +err_free_put_context: + kfree(counter); + +err_put_context: + put_context(ctx); + + return ret; +} + +static void __cpuinit perf_init_cpu(int cpu) +{ + struct perf_cpu_context *ctx; + + ctx = &per_cpu(perf_cpu_context, cpu); + spin_lock_init(&ctx->ctx.lock); + INIT_LIST_HEAD(&ctx->ctx.counters); + + mutex_lock(&perf_resource_mutex); + ctx->max_pertask = perf_max_counters - perf_reserved_percpu; + mutex_unlock(&perf_resource_mutex); + hw_perf_counter_setup(); +} + +#ifdef CONFIG_HOTPLUG_CPU +static void __perf_exit_cpu(void *info) +{ + struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context); + struct perf_counter_context *ctx = &cpuctx->ctx; + struct perf_counter *counter, *tmp; + + list_for_each_entry_safe(counter, tmp, &ctx->counters, list) + __perf_remove_from_context(counter); + +} +static void perf_exit_cpu(int cpu) +{ + smp_call_function_single(cpu, __perf_exit_cpu, NULL, 1); +} +#else +static inline void perf_exit_cpu(int cpu) { } +#endif + +static int __cpuinit +perf_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu) +{ + unsigned int cpu = (long)hcpu; + + switch (action) { + + case CPU_UP_PREPARE: + case CPU_UP_PREPARE_FROZEN: + perf_init_cpu(cpu); + break; + + case CPU_DOWN_PREPARE: + case CPU_DOWN_PREPARE_FROZEN: + perf_exit_cpu(cpu); + break; + + default: + break; + } + + return NOTIFY_OK; +} + +static struct notifier_block __cpuinitdata perf_cpu_nb = { + .notifier_call = perf_cpu_notify, +}; + +static int __init perf_counter_init(void) +{ + perf_cpu_notify(&perf_cpu_nb, (unsigned long)CPU_UP_PREPARE, + (void *)(long)smp_processor_id()); + register_cpu_notifier(&perf_cpu_nb); + + return 0; +} +early_initcall(perf_counter_init); + +static ssize_t perf_show_reserve_percpu(struct sysdev_class *class, char *buf) +{ + return sprintf(buf, "%d\n", perf_reserved_percpu); +} + +static ssize_t +perf_set_reserve_percpu(struct sysdev_class *class, + const char *buf, + size_t count) +{ + struct perf_cpu_context *cpuctx; + unsigned long val; + int err, cpu, mpt; + + err = strict_strtoul(buf, 10, &val); + if (err) + return err; + if (val > perf_max_counters) + return -EINVAL; + + mutex_lock(&perf_resource_mutex); + perf_reserved_percpu = val; + for_each_online_cpu(cpu) { + cpuctx = &per_cpu(perf_cpu_context, cpu); + spin_lock_irq(&cpuctx->ctx.lock); + mpt = min(perf_max_counters - cpuctx->ctx.nr_counters, + perf_max_counters - perf_reserved_percpu); + cpuctx->max_pertask = mpt; + spin_unlock_irq(&cpuctx->ctx.lock); + } + mutex_unlock(&perf_resource_mutex); + + return count; +} + +static ssize_t perf_show_overcommit(struct sysdev_class *class, char *buf) +{ + return sprintf(buf, "%d\n", perf_overcommit); +} + +static ssize_t +perf_set_overcommit(struct sysdev_class *class, const char *buf, size_t count) +{ + unsigned long val; + int err; + + err = strict_strtoul(buf, 10, &val); + if (err) + return err; + if (val > 1) + return -EINVAL; + + mutex_lock(&perf_resource_mutex); + perf_overcommit = val; + mutex_unlock(&perf_resource_mutex); + + return count; +} + +static SYSDEV_CLASS_ATTR( + reserve_percpu, + 0644, + perf_show_reserve_percpu, + perf_set_reserve_percpu + ); + +static SYSDEV_CLASS_ATTR( + overcommit, + 0644, + perf_show_overcommit, + perf_set_overcommit + ); + +static struct attribute *perfclass_attrs[] = { + &attr_reserve_percpu.attr, + &attr_overcommit.attr, + NULL +}; + +static struct attribute_group perfclass_attr_group = { + .attrs = perfclass_attrs, + .name = "perf_counters", +}; + +static int __init perf_counter_sysfs_init(void) +{ + return sysfs_create_group(&cpu_sysdev_class.kset.kobj, + &perfclass_attr_group); +} +device_initcall(perf_counter_sysfs_init); + diff --git a/kernel/sched.c b/kernel/sched.c index b7480fb5c3d..254d56de254 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -2212,6 +2212,27 @@ static int sched_balance_self(int cpu, int flag) #endif /* CONFIG_SMP */ +/** + * task_oncpu_function_call - call a function on the cpu on which a task runs + * @p: the task to evaluate + * @func: the function to be called + * @info: the function call argument + * + * Calls the function @func when the task is currently running. This might + * be on the current CPU, which just calls the function directly + */ +void task_oncpu_function_call(struct task_struct *p, + void (*func) (void *info), void *info) +{ + int cpu; + + preempt_disable(); + cpu = task_cpu(p); + if (task_curr(p)) + smp_call_function_single(cpu, func, info, 1); + preempt_enable(); +} + /*** * try_to_wake_up - wake up a thread * @p: the to-be-woken-up thread @@ -2534,6 +2555,7 @@ prepare_task_switch(struct rq *rq, struct task_struct *prev, struct task_struct *next) { fire_sched_out_preempt_notifiers(prev, next); + perf_counter_task_sched_out(prev, cpu_of(rq)); prepare_lock_switch(rq, next); prepare_arch_switch(next); } @@ -2574,6 +2596,7 @@ static void finish_task_switch(struct rq *rq, struct task_struct *prev) */ prev_state = prev->state; finish_arch_switch(prev); + perf_counter_task_sched_in(current, cpu_of(rq)); finish_lock_switch(rq, prev); #ifdef CONFIG_SMP if (current->sched_class->post_schedule) @@ -4296,6 +4319,7 @@ void scheduler_tick(void) rq->idle_at_tick = idle_cpu(cpu); trigger_load_balance(rq, cpu); #endif + perf_counter_task_tick(curr, cpu); } #if defined(CONFIG_PREEMPT) && (defined(CONFIG_DEBUG_PREEMPT) || \ diff --git a/kernel/sys_ni.c b/kernel/sys_ni.c index e14a2328170..4be8bbc7577 100644 --- a/kernel/sys_ni.c +++ b/kernel/sys_ni.c @@ -174,3 +174,6 @@ cond_syscall(compat_sys_timerfd_settime); cond_syscall(compat_sys_timerfd_gettime); cond_syscall(sys_eventfd); cond_syscall(sys_eventfd2); + +/* performance counters: */ +cond_syscall(sys_perf_counter_open); -- cgit v1.2.3 From e7bc62b6b3aeaa8849f8383e0cfb7ca6c003adc6 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Thu, 4 Dec 2008 20:13:45 +0100 Subject: performance counters: documentation Add more documentation about performance counters. Signed-off-by: Ingo Molnar --- Documentation/perf-counters.txt | 104 ++++++++++++++++++++++++++++++++++++++++ 1 file changed, 104 insertions(+) create mode 100644 Documentation/perf-counters.txt diff --git a/Documentation/perf-counters.txt b/Documentation/perf-counters.txt new file mode 100644 index 00000000000..19033a0bb52 --- /dev/null +++ b/Documentation/perf-counters.txt @@ -0,0 +1,104 @@ + +Performance Counters for Linux +------------------------------ + +Performance counters are special hardware registers available on most modern +CPUs. These registers count the number of certain types of hw events: such +as instructions executed, cachemisses suffered, or branches mis-predicted - +without slowing down the kernel or applications. These registers can also +trigger interrupts when a threshold number of events have passed - and can +thus be used to profile the code that runs on that CPU. + +The Linux Performance Counter subsystem provides an abstraction of these +hardware capabilities. It provides per task and per CPU counters, and +it provides event capabilities on top of those. + +Performance counters are accessed via special file descriptors. +There's one file descriptor per virtual counter used. + +The special file descriptor is opened via the perf_counter_open() +system call: + + int + perf_counter_open(u32 hw_event_type, + u32 hw_event_period, + u32 record_type, + pid_t pid, + int cpu); + +The syscall returns the new fd. The fd can be used via the normal +VFS system calls: read() can be used to read the counter, fcntl() +can be used to set the blocking mode, etc. + +Multiple counters can be kept open at a time, and the counters +can be poll()ed. + +When creating a new counter fd, 'hw_event_type' is one of: + + enum hw_event_types { + PERF_COUNT_CYCLES, + PERF_COUNT_INSTRUCTIONS, + PERF_COUNT_CACHE_REFERENCES, + PERF_COUNT_CACHE_MISSES, + PERF_COUNT_BRANCH_INSTRUCTIONS, + PERF_COUNT_BRANCH_MISSES, + }; + +These are standardized types of events that work uniformly on all CPUs +that implements Performance Counters support under Linux. If a CPU is +not able to count branch-misses, then the system call will return +-EINVAL. + +[ Note: more hw_event_types are supported as well, but they are CPU + specific and are enumerated via /sys on a per CPU basis. Raw hw event + types can be passed in as negative numbers. For example, to count + "External bus cycles while bus lock signal asserted" events on Intel + Core CPUs, pass in a -0x4064 event type value. ] + +The parameter 'hw_event_period' is the number of events before waking up +a read() that is blocked on a counter fd. Zero value means a non-blocking +counter. + +'record_type' is the type of data that a read() will provide for the +counter, and it can be one of: + + enum perf_record_type { + PERF_RECORD_SIMPLE, + PERF_RECORD_IRQ, + }; + +a "simple" counter is one that counts hardware events and allows +them to be read out into a u64 count value. (read() returns 8 on +a successful read of a simple counter.) + +An "irq" counter is one that will also provide an IRQ context information: +the IP of the interrupted context. In this case read() will return +the 8-byte counter value, plus the Instruction Pointer address of the +interrupted context. + +The 'pid' parameter allows the counter to be specific to a task: + + pid == 0: if the pid parameter is zero, the counter is attached to the + current task. + + pid > 0: the counter is attached to a specific task (if the current task + has sufficient privilege to do so) + + pid < 0: all tasks are counted (per cpu counters) + +The 'cpu' parameter allows a counter to be made specific to a full +CPU: + + cpu >= 0: the counter is restricted to a specific CPU + cpu == -1: the counter counts on all CPUs + +Note: the combination of 'pid == -1' and 'cpu == -1' is not valid. + +A 'pid > 0' and 'cpu == -1' counter is a per task counter that counts +events of that task and 'follows' that task to whatever CPU the task +gets schedule to. Per task counters can be created by any user, for +their own tasks. + +A 'pid == -1' and 'cpu == x' counter is a per CPU counter that counts +all events on CPU-x. Per CPU counters need CAP_SYS_ADMIN privilege. + -- cgit v1.2.3 From 241771ef016b5c0c83cd7a4372a74321c973c1e6 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Wed, 3 Dec 2008 10:39:53 +0100 Subject: performance counters: x86 support Implement performance counters for x86 Intel CPUs. It's simplified right now: the PERFMON CPU feature is assumed, which is available in Core2 and later Intel CPUs. The design is flexible to be extended to more CPU types as well. Signed-off-by: Ingo Molnar --- arch/x86/Kconfig | 1 + arch/x86/ia32/ia32entry.S | 3 +- arch/x86/include/asm/hardirq_32.h | 1 + arch/x86/include/asm/hw_irq.h | 2 + arch/x86/include/asm/intel_arch_perfmon.h | 34 +- arch/x86/include/asm/irq_vectors.h | 5 + arch/x86/include/asm/mach-default/entry_arch.h | 5 + arch/x86/include/asm/pda.h | 1 + arch/x86/include/asm/thread_info.h | 4 +- arch/x86/include/asm/unistd_32.h | 1 + arch/x86/include/asm/unistd_64.h | 3 +- arch/x86/kernel/apic.c | 2 + arch/x86/kernel/cpu/Makefile | 12 +- arch/x86/kernel/cpu/common.c | 2 + arch/x86/kernel/cpu/perf_counter.c | 571 +++++++++++++++++++++++++ arch/x86/kernel/entry_64.S | 5 + arch/x86/kernel/irq.c | 5 + arch/x86/kernel/irqinit_32.c | 3 + arch/x86/kernel/irqinit_64.c | 5 + arch/x86/kernel/signal.c | 7 +- arch/x86/kernel/syscall_table_32.S | 1 + 21 files changed, 652 insertions(+), 21 deletions(-) create mode 100644 arch/x86/kernel/cpu/perf_counter.c diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index d4d4cb7629e..f2fdc186724 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -643,6 +643,7 @@ config X86_UP_IOAPIC config X86_LOCAL_APIC def_bool y depends on X86_64 || (X86_32 && (X86_UP_APIC || (SMP && !X86_VOYAGER) || X86_GENERICARCH)) + select HAVE_PERF_COUNTERS config X86_IO_APIC def_bool y diff --git a/arch/x86/ia32/ia32entry.S b/arch/x86/ia32/ia32entry.S index 256b00b6189..3c14ed07dc4 100644 --- a/arch/x86/ia32/ia32entry.S +++ b/arch/x86/ia32/ia32entry.S @@ -823,7 +823,8 @@ ia32_sys_call_table: .quad compat_sys_signalfd4 .quad sys_eventfd2 .quad sys_epoll_create1 - .quad sys_dup3 /* 330 */ + .quad sys_dup3 /* 330 */ .quad sys_pipe2 .quad sys_inotify_init1 + .quad sys_perf_counter_open ia32_syscall_end: diff --git a/arch/x86/include/asm/hardirq_32.h b/arch/x86/include/asm/hardirq_32.h index 5ca135e72f2..b3e475dc933 100644 --- a/arch/x86/include/asm/hardirq_32.h +++ b/arch/x86/include/asm/hardirq_32.h @@ -9,6 +9,7 @@ typedef struct { unsigned long idle_timestamp; unsigned int __nmi_count; /* arch dependent */ unsigned int apic_timer_irqs; /* arch dependent */ + unsigned int apic_perf_irqs; /* arch dependent */ unsigned int irq0_irqs; unsigned int irq_resched_count; unsigned int irq_call_count; diff --git a/arch/x86/include/asm/hw_irq.h b/arch/x86/include/asm/hw_irq.h index 8de644b6b95..aa93e53b85e 100644 --- a/arch/x86/include/asm/hw_irq.h +++ b/arch/x86/include/asm/hw_irq.h @@ -30,6 +30,8 @@ /* Interrupt handlers registered during init_IRQ */ extern void apic_timer_interrupt(void); extern void error_interrupt(void); +extern void perf_counter_interrupt(void); + extern void spurious_interrupt(void); extern void thermal_interrupt(void); extern void reschedule_interrupt(void); diff --git a/arch/x86/include/asm/intel_arch_perfmon.h b/arch/x86/include/asm/intel_arch_perfmon.h index fa0fd068bc2..71598a9eab6 100644 --- a/arch/x86/include/asm/intel_arch_perfmon.h +++ b/arch/x86/include/asm/intel_arch_perfmon.h @@ -1,22 +1,24 @@ #ifndef _ASM_X86_INTEL_ARCH_PERFMON_H #define _ASM_X86_INTEL_ARCH_PERFMON_H -#define MSR_ARCH_PERFMON_PERFCTR0 0xc1 -#define MSR_ARCH_PERFMON_PERFCTR1 0xc2 +#define MSR_ARCH_PERFMON_PERFCTR0 0xc1 +#define MSR_ARCH_PERFMON_PERFCTR1 0xc2 -#define MSR_ARCH_PERFMON_EVENTSEL0 0x186 -#define MSR_ARCH_PERFMON_EVENTSEL1 0x187 +#define MSR_ARCH_PERFMON_EVENTSEL0 0x186 +#define MSR_ARCH_PERFMON_EVENTSEL1 0x187 -#define ARCH_PERFMON_EVENTSEL0_ENABLE (1 << 22) -#define ARCH_PERFMON_EVENTSEL_INT (1 << 20) -#define ARCH_PERFMON_EVENTSEL_OS (1 << 17) -#define ARCH_PERFMON_EVENTSEL_USR (1 << 16) +#define ARCH_PERFMON_EVENTSEL0_ENABLE (1 << 22) +#define ARCH_PERFMON_EVENTSEL_INT (1 << 20) +#define ARCH_PERFMON_EVENTSEL_OS (1 << 17) +#define ARCH_PERFMON_EVENTSEL_USR (1 << 16) -#define ARCH_PERFMON_UNHALTED_CORE_CYCLES_SEL (0x3c) -#define ARCH_PERFMON_UNHALTED_CORE_CYCLES_UMASK (0x00 << 8) -#define ARCH_PERFMON_UNHALTED_CORE_CYCLES_INDEX (0) +#define ARCH_PERFMON_UNHALTED_CORE_CYCLES_SEL 0x3c +#define ARCH_PERFMON_UNHALTED_CORE_CYCLES_UMASK (0x00 << 8) +#define ARCH_PERFMON_UNHALTED_CORE_CYCLES_INDEX 0 #define ARCH_PERFMON_UNHALTED_CORE_CYCLES_PRESENT \ - (1 << (ARCH_PERFMON_UNHALTED_CORE_CYCLES_INDEX)) + (1 << (ARCH_PERFMON_UNHALTED_CORE_CYCLES_INDEX)) + +#define ARCH_PERFMON_BRANCH_MISSES_RETIRED 6 union cpuid10_eax { struct { @@ -28,4 +30,12 @@ union cpuid10_eax { unsigned int full; }; +#ifdef CONFIG_PERF_COUNTERS +extern void init_hw_perf_counters(void); +extern void perf_counters_lapic_init(int nmi); +#else +static inline void init_hw_perf_counters(void) { } +static inline void perf_counters_lapic_init(int nmi) { } +#endif + #endif /* _ASM_X86_INTEL_ARCH_PERFMON_H */ diff --git a/arch/x86/include/asm/irq_vectors.h b/arch/x86/include/asm/irq_vectors.h index 0005adb0f94..b8d277f1252 100644 --- a/arch/x86/include/asm/irq_vectors.h +++ b/arch/x86/include/asm/irq_vectors.h @@ -86,6 +86,11 @@ */ #define LOCAL_TIMER_VECTOR 0xef +/* + * Performance monitoring interrupt vector: + */ +#define LOCAL_PERF_VECTOR 0xee + /* * First APIC vector available to drivers: (vectors 0x30-0xee) we * start at 0x31(0x41) to spread out vectors evenly between priority diff --git a/arch/x86/include/asm/mach-default/entry_arch.h b/arch/x86/include/asm/mach-default/entry_arch.h index 6b1add8e31d..ad31e5d90e9 100644 --- a/arch/x86/include/asm/mach-default/entry_arch.h +++ b/arch/x86/include/asm/mach-default/entry_arch.h @@ -25,10 +25,15 @@ BUILD_INTERRUPT(irq_move_cleanup_interrupt,IRQ_MOVE_CLEANUP_VECTOR) * a much simpler SMP time architecture: */ #ifdef CONFIG_X86_LOCAL_APIC + BUILD_INTERRUPT(apic_timer_interrupt,LOCAL_TIMER_VECTOR) BUILD_INTERRUPT(error_interrupt,ERROR_APIC_VECTOR) BUILD_INTERRUPT(spurious_interrupt,SPURIOUS_APIC_VECTOR) +#ifdef CONFIG_PERF_COUNTERS +BUILD_INTERRUPT(perf_counter_interrupt, LOCAL_PERF_VECTOR) +#endif + #ifdef CONFIG_X86_MCE_P4THERMAL BUILD_INTERRUPT(thermal_interrupt,THERMAL_APIC_VECTOR) #endif diff --git a/arch/x86/include/asm/pda.h b/arch/x86/include/asm/pda.h index 2fbfff88df3..90a8d9d4206 100644 --- a/arch/x86/include/asm/pda.h +++ b/arch/x86/include/asm/pda.h @@ -30,6 +30,7 @@ struct x8664_pda { short isidle; struct mm_struct *active_mm; unsigned apic_timer_irqs; + unsigned apic_perf_irqs; unsigned irq0_irqs; unsigned irq_resched_count; unsigned irq_call_count; diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h index e44d379faad..810bf266d13 100644 --- a/arch/x86/include/asm/thread_info.h +++ b/arch/x86/include/asm/thread_info.h @@ -80,6 +80,7 @@ struct thread_info { #define TIF_SYSCALL_AUDIT 7 /* syscall auditing active */ #define TIF_SECCOMP 8 /* secure computing */ #define TIF_MCE_NOTIFY 10 /* notify userspace of an MCE */ +#define TIF_PERF_COUNTERS 11 /* notify perf counter work */ #define TIF_NOTSC 16 /* TSC is not accessible in userland */ #define TIF_IA32 17 /* 32bit process */ #define TIF_FORK 18 /* ret_from_fork */ @@ -103,6 +104,7 @@ struct thread_info { #define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT) #define _TIF_SECCOMP (1 << TIF_SECCOMP) #define _TIF_MCE_NOTIFY (1 << TIF_MCE_NOTIFY) +#define _TIF_PERF_COUNTERS (1 << TIF_PERF_COUNTERS) #define _TIF_NOTSC (1 << TIF_NOTSC) #define _TIF_IA32 (1 << TIF_IA32) #define _TIF_FORK (1 << TIF_FORK) @@ -135,7 +137,7 @@ struct thread_info { /* Only used for 64 bit */ #define _TIF_DO_NOTIFY_MASK \ - (_TIF_SIGPENDING|_TIF_MCE_NOTIFY|_TIF_NOTIFY_RESUME) + (_TIF_SIGPENDING|_TIF_MCE_NOTIFY|_TIF_PERF_COUNTERS|_TIF_NOTIFY_RESUME) /* flags to check in __switch_to() */ #define _TIF_WORK_CTXSW \ diff --git a/arch/x86/include/asm/unistd_32.h b/arch/x86/include/asm/unistd_32.h index f2bba78430a..7e47658b0a6 100644 --- a/arch/x86/include/asm/unistd_32.h +++ b/arch/x86/include/asm/unistd_32.h @@ -338,6 +338,7 @@ #define __NR_dup3 330 #define __NR_pipe2 331 #define __NR_inotify_init1 332 +#define __NR_perf_counter_open 333 #ifdef __KERNEL__ diff --git a/arch/x86/include/asm/unistd_64.h b/arch/x86/include/asm/unistd_64.h index d2e415e6666..53025feaf88 100644 --- a/arch/x86/include/asm/unistd_64.h +++ b/arch/x86/include/asm/unistd_64.h @@ -653,7 +653,8 @@ __SYSCALL(__NR_dup3, sys_dup3) __SYSCALL(__NR_pipe2, sys_pipe2) #define __NR_inotify_init1 294 __SYSCALL(__NR_inotify_init1, sys_inotify_init1) - +#define __NR_perf_counter_open 295 +__SYSCALL(__NR_perf_counter_open, sys_perf_counter_open) #ifndef __NO_STUBS #define __ARCH_WANT_OLD_READDIR diff --git a/arch/x86/kernel/apic.c b/arch/x86/kernel/apic.c index 16f94879b52..8ab8c185867 100644 --- a/arch/x86/kernel/apic.c +++ b/arch/x86/kernel/apic.c @@ -31,6 +31,7 @@ #include #include +#include #include #include #include @@ -1147,6 +1148,7 @@ void __cpuinit setup_local_APIC(void) apic_write(APIC_ESR, 0); } #endif + perf_counters_lapic_init(0); preempt_disable(); diff --git a/arch/x86/kernel/cpu/Makefile b/arch/x86/kernel/cpu/Makefile index 82ec6075c05..89e53361fe2 100644 --- a/arch/x86/kernel/cpu/Makefile +++ b/arch/x86/kernel/cpu/Makefile @@ -1,5 +1,5 @@ # -# Makefile for x86-compatible CPU details and quirks +# Makefile for x86-compatible CPU details, features and quirks # obj-y := intel_cacheinfo.o addon_cpuid_features.o @@ -16,11 +16,13 @@ obj-$(CONFIG_CPU_SUP_CENTAUR_64) += centaur_64.o obj-$(CONFIG_CPU_SUP_TRANSMETA_32) += transmeta.o obj-$(CONFIG_CPU_SUP_UMC_32) += umc.o -obj-$(CONFIG_X86_MCE) += mcheck/ -obj-$(CONFIG_MTRR) += mtrr/ -obj-$(CONFIG_CPU_FREQ) += cpufreq/ +obj-$(CONFIG_PERF_COUNTERS) += perf_counter.o -obj-$(CONFIG_X86_LOCAL_APIC) += perfctr-watchdog.o +obj-$(CONFIG_X86_MCE) += mcheck/ +obj-$(CONFIG_MTRR) += mtrr/ +obj-$(CONFIG_CPU_FREQ) += cpufreq/ + +obj-$(CONFIG_X86_LOCAL_APIC) += perfctr-watchdog.o quiet_cmd_mkcapflags = MKCAP $@ cmd_mkcapflags = $(PERL) $(srctree)/$(src)/mkcapflags.pl $< $@ diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index b9c9ea0217a..4461011db47 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c @@ -17,6 +17,7 @@ #include #include #include +#include #include #include #include @@ -750,6 +751,7 @@ void __init identify_boot_cpu(void) #else vgetcpu_set_mode(); #endif + init_hw_perf_counters(); } void __cpuinit identify_secondary_cpu(struct cpuinfo_x86 *c) diff --git a/arch/x86/kernel/cpu/perf_counter.c b/arch/x86/kernel/cpu/perf_counter.c new file mode 100644 index 00000000000..82440cbed0e --- /dev/null +++ b/arch/x86/kernel/cpu/perf_counter.c @@ -0,0 +1,571 @@ +/* + * Performance counter x86 architecture code + * + * Copyright(C) 2008 Thomas Gleixner + * Copyright(C) 2008 Red Hat, Inc., Ingo Molnar + * + * For licencing details see kernel-base/COPYING + */ + +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +static bool perf_counters_initialized __read_mostly; + +/* + * Number of (generic) HW counters: + */ +static int nr_hw_counters __read_mostly; +static u32 perf_counter_mask __read_mostly; + +/* No support for fixed function counters yet */ + +#define MAX_HW_COUNTERS 8 + +struct cpu_hw_counters { + struct perf_counter *counters[MAX_HW_COUNTERS]; + unsigned long used[BITS_TO_LONGS(MAX_HW_COUNTERS)]; + int enable_all; +}; + +/* + * Intel PerfMon v3. Used on Core2 and later. + */ +static DEFINE_PER_CPU(struct cpu_hw_counters, cpu_hw_counters); + +const int intel_perfmon_event_map[] = +{ + [PERF_COUNT_CYCLES] = 0x003c, + [PERF_COUNT_INSTRUCTIONS] = 0x00c0, + [PERF_COUNT_CACHE_REFERENCES] = 0x4f2e, + [PERF_COUNT_CACHE_MISSES] = 0x412e, + [PERF_COUNT_BRANCH_INSTRUCTIONS] = 0x00c4, + [PERF_COUNT_BRANCH_MISSES] = 0x00c5, +}; + +const int max_intel_perfmon_events = ARRAY_SIZE(intel_perfmon_event_map); + +/* + * Setup the hardware configuration for a given hw_event_type + */ +int hw_perf_counter_init(struct perf_counter *counter, s32 hw_event_type) +{ + struct hw_perf_counter *hwc = &counter->hw; + + if (unlikely(!perf_counters_initialized)) + return -EINVAL; + + /* + * Count user events, and generate PMC IRQs: + * (keep 'enabled' bit clear for now) + */ + hwc->config = ARCH_PERFMON_EVENTSEL_USR | ARCH_PERFMON_EVENTSEL_INT; + + /* + * If privileged enough, count OS events too, and allow + * NMI events as well: + */ + hwc->nmi = 0; + if (capable(CAP_SYS_ADMIN)) { + hwc->config |= ARCH_PERFMON_EVENTSEL_OS; + if (hw_event_type & PERF_COUNT_NMI) + hwc->nmi = 1; + } + + hwc->config_base = MSR_ARCH_PERFMON_EVENTSEL0; + hwc->counter_base = MSR_ARCH_PERFMON_PERFCTR0; + + hwc->irq_period = counter->__irq_period; + /* + * Intel PMCs cannot be accessed sanely above 32 bit width, + * so we install an artificial 1<<31 period regardless of + * the generic counter period: + */ + if (!hwc->irq_period) + hwc->irq_period = 0x7FFFFFFF; + + hwc->next_count = -((s32) hwc->irq_period); + + /* + * Negative event types mean raw encoded event+umask values: + */ + if (hw_event_type < 0) { + counter->hw_event_type = -hw_event_type; + counter->hw_event_type &= ~PERF_COUNT_NMI; + } else { + hw_event_type &= ~PERF_COUNT_NMI; + if (hw_event_type >= max_intel_perfmon_events) + return -EINVAL; + /* + * The generic map: + */ + counter->hw_event_type = intel_perfmon_event_map[hw_event_type]; + } + hwc->config |= counter->hw_event_type; + counter->wakeup_pending = 0; + + return 0; +} + +static void __hw_perf_enable_all(void) +{ + wrmsr(MSR_CORE_PERF_GLOBAL_CTRL, perf_counter_mask, 0); +} + +void hw_perf_enable_all(void) +{ + struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters); + + cpuc->enable_all = 1; + __hw_perf_enable_all(); +} + +void hw_perf_disable_all(void) +{ + struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters); + + cpuc->enable_all = 0; + wrmsr(MSR_CORE_PERF_GLOBAL_CTRL, 0, 0); +} + +static DEFINE_PER_CPU(u64, prev_next_count[MAX_HW_COUNTERS]); + +static void __hw_perf_counter_enable(struct hw_perf_counter *hwc, int idx) +{ + per_cpu(prev_next_count[idx], smp_processor_id()) = hwc->next_count; + + wrmsr(hwc->counter_base + idx, hwc->next_count, 0); + wrmsr(hwc->config_base + idx, hwc->config, 0); +} + +void hw_perf_counter_enable(struct perf_counter *counter) +{ + struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters); + struct hw_perf_counter *hwc = &counter->hw; + int idx = hwc->idx; + + /* Try to get the previous counter again */ + if (test_and_set_bit(idx, cpuc->used)) { + idx = find_first_zero_bit(cpuc->used, nr_hw_counters); + set_bit(idx, cpuc->used); + hwc->idx = idx; + } + + perf_counters_lapic_init(hwc->nmi); + + wrmsr(hwc->config_base + idx, + hwc->config & ~ARCH_PERFMON_EVENTSEL0_ENABLE, 0); + + cpuc->counters[idx] = counter; + counter->hw.config |= ARCH_PERFMON_EVENTSEL0_ENABLE; + __hw_perf_counter_enable(hwc, idx); +} + +#ifdef CONFIG_X86_64 +static inline void atomic64_counter_set(struct perf_counter *counter, u64 val) +{ + atomic64_set(&counter->count, val); +} + +static inline u64 atomic64_counter_read(struct perf_counter *counter) +{ + return atomic64_read(&counter->count); +} +#else +/* + * Todo: add proper atomic64_t support to 32-bit x86: + */ +static inline void atomic64_counter_set(struct perf_counter *counter, u64 val64) +{ + u32 *val32 = (void *)&val64; + + atomic_set(counter->count32 + 0, *(val32 + 0)); + atomic_set(counter->count32 + 1, *(val32 + 1)); +} + +static inline u64 atomic64_counter_read(struct perf_counter *counter) +{ + return atomic_read(counter->count32 + 0) | + (u64) atomic_read(counter->count32 + 1) << 32; +} +#endif + +static void __hw_perf_save_counter(struct perf_counter *counter, + struct hw_perf_counter *hwc, int idx) +{ + s64 raw = -1; + s64 delta; + int err; + + /* + * Get the raw hw counter value: + */ + err = rdmsrl_safe(hwc->counter_base + idx, &raw); + WARN_ON_ONCE(err); + + /* + * Rebase it to zero (it started counting at -irq_period), + * to see the delta since ->prev_count: + */ + delta = (s64)hwc->irq_period + (s64)(s32)raw; + + atomic64_counter_set(counter, hwc->prev_count + delta); + + /* + * Adjust the ->prev_count offset - if we went beyond + * irq_period of units, then we got an IRQ and the counter + * was set back to -irq_period: + */ + while (delta >= (s64)hwc->irq_period) { + hwc->prev_count += hwc->irq_period; + delta -= (s64)hwc->irq_period; + } + + /* + * Calculate the next raw counter value we'll write into + * the counter at the next sched-in time: + */ + delta -= (s64)hwc->irq_period; + + hwc->next_count = (s32)delta; +} + +void perf_counter_print_debug(void) +{ + u64 ctrl, status, overflow, pmc_ctrl, pmc_count, next_count; + int cpu, err, idx; + + local_irq_disable(); + + cpu = smp_processor_id(); + + err = rdmsrl_safe(MSR_CORE_PERF_GLOBAL_CTRL, &ctrl); + WARN_ON_ONCE(err); + + err = rdmsrl_safe(MSR_CORE_PERF_GLOBAL_STATUS, &status); + WARN_ON_ONCE(err); + + err = rdmsrl_safe(MSR_CORE_PERF_GLOBAL_OVF_CTRL, &overflow); + WARN_ON_ONCE(err); + + printk(KERN_INFO "\n"); + printk(KERN_INFO "CPU#%d: ctrl: %016llx\n", cpu, ctrl); + printk(KERN_INFO "CPU#%d: status: %016llx\n", cpu, status); + printk(KERN_INFO "CPU#%d: overflow: %016llx\n", cpu, overflow); + + for (idx = 0; idx < nr_hw_counters; idx++) { + err = rdmsrl_safe(MSR_ARCH_PERFMON_EVENTSEL0 + idx, &pmc_ctrl); + WARN_ON_ONCE(err); + + err = rdmsrl_safe(MSR_ARCH_PERFMON_PERFCTR0 + idx, &pmc_count); + WARN_ON_ONCE(err); + + next_count = per_cpu(prev_next_count[idx], cpu); + + printk(KERN_INFO "CPU#%d: PMC%d ctrl: %016llx\n", + cpu, idx, pmc_ctrl); + printk(KERN_INFO "CPU#%d: PMC%d count: %016llx\n", + cpu, idx, pmc_count); + printk(KERN_INFO "CPU#%d: PMC%d next: %016llx\n", + cpu, idx, next_count); + } + local_irq_enable(); +} + +void hw_perf_counter_disable(struct perf_counter *counter) +{ + struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters); + struct hw_perf_counter *hwc = &counter->hw; + unsigned int idx = hwc->idx; + + counter->hw.config &= ~ARCH_PERFMON_EVENTSEL0_ENABLE; + wrmsr(hwc->config_base + idx, hwc->config, 0); + + clear_bit(idx, cpuc->used); + cpuc->counters[idx] = NULL; + __hw_perf_save_counter(counter, hwc, idx); +} + +void hw_perf_counter_read(struct perf_counter *counter) +{ + struct hw_perf_counter *hwc = &counter->hw; + unsigned long addr = hwc->counter_base + hwc->idx; + s64 offs, val = -1LL; + s32 val32; + int err; + + /* Careful: NMI might modify the counter offset */ + do { + offs = hwc->prev_count; + err = rdmsrl_safe(addr, &val); + WARN_ON_ONCE(err); + } while (offs != hwc->prev_count); + + val32 = (s32) val; + val = (s64)hwc->irq_period + (s64)val32; + atomic64_counter_set(counter, hwc->prev_count + val); +} + +static void perf_store_irq_data(struct perf_counter *counter, u64 data) +{ + struct perf_data *irqdata = counter->irqdata; + + if (irqdata->len > PERF_DATA_BUFLEN - sizeof(u64)) { + irqdata->overrun++; + } else { + u64 *p = (u64 *) &irqdata->data[irqdata->len]; + + *p = data; + irqdata->len += sizeof(u64); + } +} + +static void perf_save_and_restart(struct perf_counter *counter) +{ + struct hw_perf_counter *hwc = &counter->hw; + int idx = hwc->idx; + + wrmsr(hwc->config_base + idx, + hwc->config & ~ARCH_PERFMON_EVENTSEL0_ENABLE, 0); + + if (hwc->config & ARCH_PERFMON_EVENTSEL0_ENABLE) { + __hw_perf_save_counter(counter, hwc, idx); + __hw_perf_counter_enable(hwc, idx); + } +} + +static void +perf_handle_group(struct perf_counter *leader, u64 *status, u64 *overflown) +{ + struct perf_counter_context *ctx = leader->ctx; + struct perf_counter *counter; + int bit; + + list_for_each_entry(counter, &ctx->counters, list) { + if (counter->record_type != PERF_RECORD_SIMPLE || + counter == leader) + continue; + + if (counter->active) { + /* + * When counter was not in the overflow mask, we have to + * read it from hardware. We read it as well, when it + * has not been read yet and clear the bit in the + * status mask. + */ + bit = counter->hw.idx; + if (!test_bit(bit, (unsigned long *) overflown) || + test_bit(bit, (unsigned long *) status)) { + clear_bit(bit, (unsigned long *) status); + perf_save_and_restart(counter); + } + } + perf_store_irq_data(leader, counter->hw_event_type); + perf_store_irq_data(leader, atomic64_counter_read(counter)); + } +} + +/* + * This handler is triggered by the local APIC, so the APIC IRQ handling + * rules apply: + */ +static void __smp_perf_counter_interrupt(struct pt_regs *regs, int nmi) +{ + int bit, cpu = smp_processor_id(); + struct cpu_hw_counters *cpuc; + u64 ack, status; + + rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, status); + if (!status) { + ack_APIC_irq(); + return; + } + + /* Disable counters globally */ + wrmsr(MSR_CORE_PERF_GLOBAL_CTRL, 0, 0); + ack_APIC_irq(); + + cpuc = &per_cpu(cpu_hw_counters, cpu); + +again: + ack = status; + for_each_bit(bit, (unsigned long *) &status, nr_hw_counters) { + struct perf_counter *counter = cpuc->counters[bit]; + + clear_bit(bit, (unsigned long *) &status); + if (!counter) + continue; + + perf_save_and_restart(counter); + + switch (counter->record_type) { + case PERF_RECORD_SIMPLE: + continue; + case PERF_RECORD_IRQ: + perf_store_irq_data(counter, instruction_pointer(regs)); + break; + case PERF_RECORD_GROUP: + perf_store_irq_data(counter, counter->hw_event_type); + perf_store_irq_data(counter, + atomic64_counter_read(counter)); + perf_handle_group(counter, &status, &ack); + break; + } + /* + * From NMI context we cannot call into the scheduler to + * do a task wakeup - but we mark these counters as + * wakeup_pending and initate a wakeup callback: + */ + if (nmi) { + counter->wakeup_pending = 1; + set_tsk_thread_flag(current, TIF_PERF_COUNTERS); + } else { + wake_up(&counter->waitq); + } + } + + wrmsr(MSR_CORE_PERF_GLOBAL_OVF_CTRL, ack, 0); + + /* + * Repeat if there is more work to be done: + */ + rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, status); + if (status) + goto again; + + /* + * Do not reenable when global enable is off: + */ + if (cpuc->enable_all) + __hw_perf_enable_all(); +} + +void smp_perf_counter_interrupt(struct pt_regs *regs) +{ + irq_enter(); +#ifdef CONFIG_X86_64 + add_pda(apic_perf_irqs, 1); +#else + per_cpu(irq_stat, smp_processor_id()).apic_perf_irqs++; +#endif + apic_write(APIC_LVTPC, LOCAL_PERF_VECTOR); + __smp_perf_counter_interrupt(regs, 0); + + irq_exit(); +} + +/* + * This handler is triggered by NMI contexts: + */ +void perf_counter_notify(struct pt_regs *regs) +{ + struct cpu_hw_counters *cpuc; + unsigned long flags; + int bit, cpu; + + local_irq_save(flags); + cpu = smp_processor_id(); + cpuc = &per_cpu(cpu_hw_counters, cpu); + + for_each_bit(bit, cpuc->used, nr_hw_counters) { + struct perf_counter *counter = cpuc->counters[bit]; + + if (!counter) + continue; + + if (counter->wakeup_pending) { + counter->wakeup_pending = 0; + wake_up(&counter->waitq); + } + } + + local_irq_restore(flags); +} + +void __cpuinit perf_counters_lapic_init(int nmi) +{ + u32 apic_val; + + if (!perf_counters_initialized) + return; + /* + * Enable the performance counter vector in the APIC LVT: + */ + apic_val = apic_read(APIC_LVTERR); + + apic_write(APIC_LVTERR, apic_val | APIC_LVT_MASKED); + if (nmi) + apic_write(APIC_LVTPC, APIC_DM_NMI); + else + apic_write(APIC_LVTPC, LOCAL_PERF_VECTOR); + apic_write(APIC_LVTERR, apic_val); +} + +static int __kprobes +perf_counter_nmi_handler(struct notifier_block *self, + unsigned long cmd, void *__args) +{ + struct die_args *args = __args; + struct pt_regs *regs; + + if (likely(cmd != DIE_NMI_IPI)) + return NOTIFY_DONE; + + regs = args->regs; + + apic_write(APIC_LVTPC, APIC_DM_NMI); + __smp_perf_counter_interrupt(regs, 1); + + return NOTIFY_STOP; +} + +static __read_mostly struct notifier_block perf_counter_nmi_notifier = { + .notifier_call = perf_counter_nmi_handler +}; + +void __init init_hw_perf_counters(void) +{ + union cpuid10_eax eax; + unsigned int unused; + unsigned int ebx; + + if (!cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) + return; + + /* + * Check whether the Architectural PerfMon supports + * Branch Misses Retired Event or not. + */ + cpuid(10, &(eax.full), &ebx, &unused, &unused); + if (eax.split.mask_length <= ARCH_PERFMON_BRANCH_MISSES_RETIRED) + return; + + printk(KERN_INFO "Intel Performance Monitoring support detected.\n"); + + printk(KERN_INFO "... version: %d\n", eax.split.version_id); + printk(KERN_INFO "... num_counters: %d\n", eax.split.num_counters); + nr_hw_counters = eax.split.num_counters; + if (nr_hw_counters > MAX_HW_COUNTERS) { + nr_hw_counters = MAX_HW_COUNTERS; + WARN(1, KERN_ERR "hw perf counters %d > max(%d), clipping!", + nr_hw_counters, MAX_HW_COUNTERS); + } + perf_counter_mask = (1 << nr_hw_counters) - 1; + perf_max_counters = nr_hw_counters; + + printk(KERN_INFO "... bit_width: %d\n", eax.split.bit_width); + printk(KERN_INFO "... mask_length: %d\n", eax.split.mask_length); + + perf_counters_lapic_init(0); + register_die_notifier(&perf_counter_nmi_notifier); + + perf_counters_initialized = true; +} diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S index 3194636a429..fc013cfde30 100644 --- a/arch/x86/kernel/entry_64.S +++ b/arch/x86/kernel/entry_64.S @@ -984,6 +984,11 @@ apicinterrupt ERROR_APIC_VECTOR \ apicinterrupt SPURIOUS_APIC_VECTOR \ spurious_interrupt smp_spurious_interrupt +#ifdef CONFIG_PERF_COUNTERS +apicinterrupt LOCAL_PERF_VECTOR \ + perf_counter_interrupt smp_perf_counter_interrupt +#endif + /* * Exception entry points. */ diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c index d1d4dc52f64..d92bc71e41a 100644 --- a/arch/x86/kernel/irq.c +++ b/arch/x86/kernel/irq.c @@ -56,6 +56,10 @@ static int show_other_interrupts(struct seq_file *p) for_each_online_cpu(j) seq_printf(p, "%10u ", irq_stats(j)->apic_timer_irqs); seq_printf(p, " Local timer interrupts\n"); + seq_printf(p, "CNT: "); + for_each_online_cpu(j) + seq_printf(p, "%10u ", irq_stats(j)->apic_perf_irqs); + seq_printf(p, " Performance counter interrupts\n"); #endif #ifdef CONFIG_SMP seq_printf(p, "RES: "); @@ -160,6 +164,7 @@ u64 arch_irq_stat_cpu(unsigned int cpu) #ifdef CONFIG_X86_LOCAL_APIC sum += irq_stats(cpu)->apic_timer_irqs; + sum += irq_stats(cpu)->apic_perf_irqs; #endif #ifdef CONFIG_SMP sum += irq_stats(cpu)->irq_resched_count; diff --git a/arch/x86/kernel/irqinit_32.c b/arch/x86/kernel/irqinit_32.c index 607db63044a..6a33b5e3016 100644 --- a/arch/x86/kernel/irqinit_32.c +++ b/arch/x86/kernel/irqinit_32.c @@ -160,6 +160,9 @@ void __init native_init_IRQ(void) /* IPI vectors for APIC spurious and error interrupts */ alloc_intr_gate(SPURIOUS_APIC_VECTOR, spurious_interrupt); alloc_intr_gate(ERROR_APIC_VECTOR, error_interrupt); +# ifdef CONFIG_PERF_COUNTERS + alloc_intr_gate(LOCAL_PERF_VECTOR, perf_counter_interrupt); +# endif #endif #if defined(CONFIG_X86_LOCAL_APIC) && defined(CONFIG_X86_MCE_P4THERMAL) diff --git a/arch/x86/kernel/irqinit_64.c b/arch/x86/kernel/irqinit_64.c index 8670b3ce626..91d785c25ad 100644 --- a/arch/x86/kernel/irqinit_64.c +++ b/arch/x86/kernel/irqinit_64.c @@ -138,6 +138,11 @@ static void __init apic_intr_init(void) /* IPI vectors for APIC spurious and error interrupts */ alloc_intr_gate(SPURIOUS_APIC_VECTOR, spurious_interrupt); alloc_intr_gate(ERROR_APIC_VECTOR, error_interrupt); + + /* Performance monitoring interrupt: */ +#ifdef CONFIG_PERF_COUNTERS + alloc_intr_gate(LOCAL_PERF_VECTOR, perf_counter_interrupt); +#endif } void __init native_init_IRQ(void) diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c index b1cc6da6420..dee553c503d 100644 --- a/arch/x86/kernel/signal.c +++ b/arch/x86/kernel/signal.c @@ -6,7 +6,7 @@ * 2000-06-20 Pentium III FXSR, SSE support by Gareth Hughes * 2000-2002 x86-64 support by Andi Kleen */ - +#include #include #include #include @@ -891,6 +891,11 @@ do_notify_resume(struct pt_regs *regs, void *unused, __u32 thread_info_flags) tracehook_notify_resume(regs); } + if (thread_info_flags & _TIF_PERF_COUNTERS) { + clear_thread_flag(TIF_PERF_COUNTERS); + perf_counter_notify(regs); + } + #ifdef CONFIG_X86_32 clear_thread_flag(TIF_IRET); #endif /* CONFIG_X86_32 */ diff --git a/arch/x86/kernel/syscall_table_32.S b/arch/x86/kernel/syscall_table_32.S index d44395ff34c..496726ddcea 100644 --- a/arch/x86/kernel/syscall_table_32.S +++ b/arch/x86/kernel/syscall_table_32.S @@ -332,3 +332,4 @@ ENTRY(sys_call_table) .long sys_dup3 /* 330 */ .long sys_pipe2 .long sys_inotify_init1 + .long sys_perf_counter_open -- cgit v1.2.3 From 87b9cf4623ad4e5fc009e48c020593dffd5d3793 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Mon, 8 Dec 2008 14:20:16 +0100 Subject: x86, perfcounters: read out MSR_CORE_PERF_GLOBAL_STATUS with counters disabled Impact: make perfcounter NMI and IRQ sequence more robust Make __smp_perf_counter_interrupt() a bit more conservative: first disable all counters, then read out the status. Most invocations are because there are real events, so there's no performance impact. Code flow gets a bit simpler as well this way. Signed-off-by: Ingo Molnar --- arch/x86/kernel/cpu/perf_counter.c | 12 +++++------- 1 file changed, 5 insertions(+), 7 deletions(-) diff --git a/arch/x86/kernel/cpu/perf_counter.c b/arch/x86/kernel/cpu/perf_counter.c index 82440cbed0e..615e953208e 100644 --- a/arch/x86/kernel/cpu/perf_counter.c +++ b/arch/x86/kernel/cpu/perf_counter.c @@ -383,18 +383,16 @@ static void __smp_perf_counter_interrupt(struct pt_regs *regs, int nmi) struct cpu_hw_counters *cpuc; u64 ack, status; - rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, status); - if (!status) { - ack_APIC_irq(); - return; - } - /* Disable counters globally */ wrmsr(MSR_CORE_PERF_GLOBAL_CTRL, 0, 0); ack_APIC_irq(); cpuc = &per_cpu(cpu_hw_counters, cpu); + rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, status); + if (!status) + goto out; + again: ack = status; for_each_bit(bit, (unsigned long *) &status, nr_hw_counters) { @@ -440,7 +438,7 @@ again: rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, status); if (status) goto again; - +out: /* * Do not reenable when global enable is off: */ -- cgit v1.2.3 From 4c59e4676dc95f6f58a2cff5390b2699fa5b5549 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Mon, 8 Dec 2008 19:38:33 +0100 Subject: perfcounters: select ANON_INODES The perfcounters subsystem depends on CONFIG_ANON_INODES facilities, so make sure it's selected. Signed-off-by: Ingo Molnar --- init/Kconfig | 1 + 1 file changed, 1 insertion(+) diff --git a/init/Kconfig b/init/Kconfig index 78bede218f1..7d147a36e96 100644 --- a/init/Kconfig +++ b/init/Kconfig @@ -741,6 +741,7 @@ config PERF_COUNTERS bool "Kernel Performance Counters" depends on HAVE_PERF_COUNTERS default y + select ANON_INODES help Enable kernel support for performance counter hardware. -- cgit v1.2.3 From 7e2ae34749edf19e76e594b9c4b2cdde1066afc5 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Tue, 9 Dec 2008 11:40:46 +0100 Subject: perfcounters, x86: simplify disable/enable of counters Impact: fix spurious missed counter wakeups In the case of NMI events, close a race window that can occur if an NMI hits counter code that temporarily disables+enables a counter, and the NMI leaks into the disabled section. Signed-off-by: Ingo Molnar --- arch/x86/kernel/cpu/perf_counter.c | 40 ++++++++++++++++++++++++++------------ 1 file changed, 28 insertions(+), 12 deletions(-) diff --git a/arch/x86/kernel/cpu/perf_counter.c b/arch/x86/kernel/cpu/perf_counter.c index 615e953208e..7d528ffc2d2 100644 --- a/arch/x86/kernel/cpu/perf_counter.c +++ b/arch/x86/kernel/cpu/perf_counter.c @@ -136,14 +136,25 @@ void hw_perf_disable_all(void) wrmsr(MSR_CORE_PERF_GLOBAL_CTRL, 0, 0); } +static inline void +__hw_perf_counter_disable(struct hw_perf_counter *hwc, unsigned int idx) +{ + wrmsr(hwc->config_base + idx, hwc->config, 0); +} + static DEFINE_PER_CPU(u64, prev_next_count[MAX_HW_COUNTERS]); -static void __hw_perf_counter_enable(struct hw_perf_counter *hwc, int idx) +static void __hw_perf_counter_set_period(struct hw_perf_counter *hwc, int idx) { per_cpu(prev_next_count[idx], smp_processor_id()) = hwc->next_count; wrmsr(hwc->counter_base + idx, hwc->next_count, 0); - wrmsr(hwc->config_base + idx, hwc->config, 0); +} + +static void __hw_perf_counter_enable(struct hw_perf_counter *hwc, int idx) +{ + wrmsr(hwc->config_base + idx, + hwc->config | ARCH_PERFMON_EVENTSEL0_ENABLE, 0); } void hw_perf_counter_enable(struct perf_counter *counter) @@ -161,11 +172,11 @@ void hw_perf_counter_enable(struct perf_counter *counter) perf_counters_lapic_init(hwc->nmi); - wrmsr(hwc->config_base + idx, - hwc->config & ~ARCH_PERFMON_EVENTSEL0_ENABLE, 0); + __hw_perf_counter_disable(hwc, idx); cpuc->counters[idx] = counter; - counter->hw.config |= ARCH_PERFMON_EVENTSEL0_ENABLE; + + __hw_perf_counter_set_period(hwc, idx); __hw_perf_counter_enable(hwc, idx); } @@ -286,8 +297,7 @@ void hw_perf_counter_disable(struct perf_counter *counter) struct hw_perf_counter *hwc = &counter->hw; unsigned int idx = hwc->idx; - counter->hw.config &= ~ARCH_PERFMON_EVENTSEL0_ENABLE; - wrmsr(hwc->config_base + idx, hwc->config, 0); + __hw_perf_counter_disable(hwc, idx); clear_bit(idx, cpuc->used); cpuc->counters[idx] = NULL; @@ -328,18 +338,24 @@ static void perf_store_irq_data(struct perf_counter *counter, u64 data) } } +/* + * NMI-safe enable method: + */ static void perf_save_and_restart(struct perf_counter *counter) { struct hw_perf_counter *hwc = &counter->hw; int idx = hwc->idx; + u64 pmc_ctrl; + int err; - wrmsr(hwc->config_base + idx, - hwc->config & ~ARCH_PERFMON_EVENTSEL0_ENABLE, 0); + err = rdmsrl_safe(MSR_ARCH_PERFMON_EVENTSEL0 + idx, &pmc_ctrl); + WARN_ON_ONCE(err); - if (hwc->config & ARCH_PERFMON_EVENTSEL0_ENABLE) { - __hw_perf_save_counter(counter, hwc, idx); + __hw_perf_save_counter(counter, hwc, idx); + __hw_perf_counter_set_period(hwc, idx); + + if (pmc_ctrl & ARCH_PERFMON_EVENTSEL0_ENABLE) __hw_perf_counter_enable(hwc, idx); - } } static void -- cgit v1.2.3 From 1e12567678054bc1d4c944ecfad17624b3e49345 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Tue, 9 Dec 2008 12:18:18 +0100 Subject: perfcounters, x86: clean up debug code Impact: cleanup Get rid of unused debug code. Signed-off-by: Ingo Molnar --- arch/x86/kernel/cpu/perf_counter.c | 35 ++++++++++++----------------------- 1 file changed, 12 insertions(+), 23 deletions(-) diff --git a/arch/x86/kernel/cpu/perf_counter.c b/arch/x86/kernel/cpu/perf_counter.c index 7d528ffc2d2..919ec46679b 100644 --- a/arch/x86/kernel/cpu/perf_counter.c +++ b/arch/x86/kernel/cpu/perf_counter.c @@ -214,13 +214,11 @@ static void __hw_perf_save_counter(struct perf_counter *counter, { s64 raw = -1; s64 delta; - int err; /* * Get the raw hw counter value: */ - err = rdmsrl_safe(hwc->counter_base + idx, &raw); - WARN_ON_ONCE(err); + rdmsrl(hwc->counter_base + idx, raw); /* * Rebase it to zero (it started counting at -irq_period), @@ -252,20 +250,18 @@ static void __hw_perf_save_counter(struct perf_counter *counter, void perf_counter_print_debug(void) { u64 ctrl, status, overflow, pmc_ctrl, pmc_count, next_count; - int cpu, err, idx; + int cpu, idx; + + if (!nr_hw_counters) + return; local_irq_disable(); cpu = smp_processor_id(); - err = rdmsrl_safe(MSR_CORE_PERF_GLOBAL_CTRL, &ctrl); - WARN_ON_ONCE(err); - - err = rdmsrl_safe(MSR_CORE_PERF_GLOBAL_STATUS, &status); - WARN_ON_ONCE(err); - - err = rdmsrl_safe(MSR_CORE_PERF_GLOBAL_OVF_CTRL, &overflow); - WARN_ON_ONCE(err); + rdmsrl(MSR_CORE_PERF_GLOBAL_CTRL, ctrl); + rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, status); + rdmsrl(MSR_CORE_PERF_GLOBAL_OVF_CTRL, overflow); printk(KERN_INFO "\n"); printk(KERN_INFO "CPU#%d: ctrl: %016llx\n", cpu, ctrl); @@ -273,11 +269,8 @@ void perf_counter_print_debug(void) printk(KERN_INFO "CPU#%d: overflow: %016llx\n", cpu, overflow); for (idx = 0; idx < nr_hw_counters; idx++) { - err = rdmsrl_safe(MSR_ARCH_PERFMON_EVENTSEL0 + idx, &pmc_ctrl); - WARN_ON_ONCE(err); - - err = rdmsrl_safe(MSR_ARCH_PERFMON_PERFCTR0 + idx, &pmc_count); - WARN_ON_ONCE(err); + rdmsrl(MSR_ARCH_PERFMON_EVENTSEL0 + idx, pmc_ctrl); + rdmsrl(MSR_ARCH_PERFMON_PERFCTR0 + idx, pmc_count); next_count = per_cpu(prev_next_count[idx], cpu); @@ -310,13 +303,11 @@ void hw_perf_counter_read(struct perf_counter *counter) unsigned long addr = hwc->counter_base + hwc->idx; s64 offs, val = -1LL; s32 val32; - int err; /* Careful: NMI might modify the counter offset */ do { offs = hwc->prev_count; - err = rdmsrl_safe(addr, &val); - WARN_ON_ONCE(err); + rdmsrl(addr, val); } while (offs != hwc->prev_count); val32 = (s32) val; @@ -346,10 +337,8 @@ static void perf_save_and_restart(struct perf_counter *counter) struct hw_perf_counter *hwc = &counter->hw; int idx = hwc->idx; u64 pmc_ctrl; - int err; - err = rdmsrl_safe(MSR_ARCH_PERFMON_EVENTSEL0 + idx, &pmc_ctrl); - WARN_ON_ONCE(err); + rdmsrl(MSR_ARCH_PERFMON_EVENTSEL0 + idx, pmc_ctrl); __hw_perf_save_counter(counter, hwc, idx); __hw_perf_counter_set_period(hwc, idx); -- cgit v1.2.3 From 43874d238d5f208854a73c3225ca2a22833eec8b Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Tue, 9 Dec 2008 12:23:59 +0100 Subject: perfcounters: consolidate global-disable codepaths Impact: cleanup Simplify global disable handling. Signed-off-by: Ingo Molnar --- arch/x86/kernel/cpu/perf_counter.c | 23 ++++++----------------- 1 file changed, 6 insertions(+), 17 deletions(-) diff --git a/arch/x86/kernel/cpu/perf_counter.c b/arch/x86/kernel/cpu/perf_counter.c index 919ec46679b..6a93d1f04d9 100644 --- a/arch/x86/kernel/cpu/perf_counter.c +++ b/arch/x86/kernel/cpu/perf_counter.c @@ -33,7 +33,6 @@ static u32 perf_counter_mask __read_mostly; struct cpu_hw_counters { struct perf_counter *counters[MAX_HW_COUNTERS]; unsigned long used[BITS_TO_LONGS(MAX_HW_COUNTERS)]; - int enable_all; }; /* @@ -115,24 +114,13 @@ int hw_perf_counter_init(struct perf_counter *counter, s32 hw_event_type) return 0; } -static void __hw_perf_enable_all(void) -{ - wrmsr(MSR_CORE_PERF_GLOBAL_CTRL, perf_counter_mask, 0); -} - void hw_perf_enable_all(void) { - struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters); - - cpuc->enable_all = 1; - __hw_perf_enable_all(); + wrmsr(MSR_CORE_PERF_GLOBAL_CTRL, perf_counter_mask, 0); } void hw_perf_disable_all(void) { - struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters); - - cpuc->enable_all = 0; wrmsr(MSR_CORE_PERF_GLOBAL_CTRL, 0, 0); } @@ -385,8 +373,10 @@ perf_handle_group(struct perf_counter *leader, u64 *status, u64 *overflown) static void __smp_perf_counter_interrupt(struct pt_regs *regs, int nmi) { int bit, cpu = smp_processor_id(); + u64 ack, status, saved_global; struct cpu_hw_counters *cpuc; - u64 ack, status; + + rdmsrl(MSR_CORE_PERF_GLOBAL_CTRL, saved_global); /* Disable counters globally */ wrmsr(MSR_CORE_PERF_GLOBAL_CTRL, 0, 0); @@ -445,10 +435,9 @@ again: goto again; out: /* - * Do not reenable when global enable is off: + * Restore - do not reenable when global enable is off: */ - if (cpuc->enable_all) - __hw_perf_enable_all(); + wrmsr(MSR_CORE_PERF_GLOBAL_CTRL, saved_global, 0); } void smp_perf_counter_interrupt(struct pt_regs *regs) -- cgit v1.2.3 From 4ac13294e44664bb7edf4daf52edb71e7c6bbe84 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Tue, 9 Dec 2008 21:43:39 +0100 Subject: perf counters: protect them against CSTATE transitions Impact: fix rare lost events problem There are CPUs whose performance counters misbehave on CSTATE transitions, so provide a way to just disable/enable them around deep idle methods. (hw_perf_enable_all() is cheap on x86.) Signed-off-by: Thomas Gleixner Signed-off-by: Ingo Molnar --- arch/x86/kernel/cpu/perf_counter.c | 14 +++++++++++++- drivers/acpi/processor_idle.c | 8 ++++++++ include/linux/perf_counter.h | 4 ++++ 3 files changed, 25 insertions(+), 1 deletion(-) diff --git a/arch/x86/kernel/cpu/perf_counter.c b/arch/x86/kernel/cpu/perf_counter.c index 6a93d1f04d9..0a7f3bea2dc 100644 --- a/arch/x86/kernel/cpu/perf_counter.c +++ b/arch/x86/kernel/cpu/perf_counter.c @@ -12,6 +12,7 @@ #include #include #include +#include #include #include @@ -119,10 +120,21 @@ void hw_perf_enable_all(void) wrmsr(MSR_CORE_PERF_GLOBAL_CTRL, perf_counter_mask, 0); } -void hw_perf_disable_all(void) +void hw_perf_restore_ctrl(u64 ctrl) { + wrmsr(MSR_CORE_PERF_GLOBAL_CTRL, ctrl, 0); +} +EXPORT_SYMBOL_GPL(hw_perf_restore_ctrl); + +u64 hw_perf_disable_all(void) +{ + u64 ctrl; + + rdmsrl(MSR_CORE_PERF_GLOBAL_CTRL, ctrl); wrmsr(MSR_CORE_PERF_GLOBAL_CTRL, 0, 0); + return ctrl; } +EXPORT_SYMBOL_GPL(hw_perf_disable_all); static inline void __hw_perf_counter_disable(struct hw_perf_counter *hwc, unsigned int idx) diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c index 5f8d746a9b8..cca804e6f1d 100644 --- a/drivers/acpi/processor_idle.c +++ b/drivers/acpi/processor_idle.c @@ -270,8 +270,11 @@ static atomic_t c3_cpu_count; /* Common C-state entry for C2, C3, .. */ static void acpi_cstate_enter(struct acpi_processor_cx *cstate) { + u64 pctrl; + /* Don't trace irqs off for idle */ stop_critical_timings(); + pctrl = hw_perf_disable_all(); if (cstate->entry_method == ACPI_CSTATE_FFH) { /* Call into architectural FFH based C-state */ acpi_processor_ffh_cstate_enter(cstate); @@ -284,6 +287,7 @@ static void acpi_cstate_enter(struct acpi_processor_cx *cstate) gets asserted in time to freeze execution properly. */ unused = inl(acpi_gbl_FADT.xpm_timer_block.address); } + hw_perf_restore_ctrl(pctrl); start_critical_timings(); } #endif /* !CONFIG_CPU_IDLE */ @@ -1425,8 +1429,11 @@ static inline void acpi_idle_update_bm_rld(struct acpi_processor *pr, */ static inline void acpi_idle_do_entry(struct acpi_processor_cx *cx) { + u64 pctrl; + /* Don't trace irqs off for idle */ stop_critical_timings(); + pctrl = hw_perf_disable_all(); if (cx->entry_method == ACPI_CSTATE_FFH) { /* Call into architectural FFH based C-state */ acpi_processor_ffh_cstate_enter(cx); @@ -1441,6 +1448,7 @@ static inline void acpi_idle_do_entry(struct acpi_processor_cx *cx) gets asserted in time to freeze execution properly. */ unused = inl(acpi_gbl_FADT.xpm_timer_block.address); } + hw_perf_restore_ctrl(pctrl); start_critical_timings(); } diff --git a/include/linux/perf_counter.h b/include/linux/perf_counter.h index 22c4469abf4..5031b5614f2 100644 --- a/include/linux/perf_counter.h +++ b/include/linux/perf_counter.h @@ -156,6 +156,8 @@ extern void perf_counter_task_tick(struct task_struct *task, int cpu); extern void perf_counter_init_task(struct task_struct *task); extern void perf_counter_notify(struct pt_regs *regs); extern void perf_counter_print_debug(void); +extern void hw_perf_restore_ctrl(u64 ctrl); +extern u64 hw_perf_disable_all(void); #else static inline void perf_counter_task_sched_in(struct task_struct *task, int cpu) { } @@ -166,6 +168,8 @@ perf_counter_task_tick(struct task_struct *task, int cpu) { } static inline void perf_counter_init_task(struct task_struct *task) { } static inline void perf_counter_notify(struct pt_regs *regs) { } static inline void perf_counter_print_debug(void) { } +static inline void hw_perf_restore_ctrl(u64 ctrl) { } +static inline u64 hw_perf_disable_all(void) { return 0; } #endif #endif /* _LINUX_PERF_COUNTER_H */ -- cgit v1.2.3 From eab656ae04b9d3b83265e3db01c0d2c46b748ef7 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Mon, 8 Dec 2008 19:26:59 +0100 Subject: perf counters: clean up 'raw' type API Impact: cleanup Introduce a separate hw_event type. Signed-off-by: Thomas Gleixner Signed-off-by: Ingo Molnar --- include/linux/perf_counter.h | 7 +++++++ include/linux/syscalls.h | 8 +++----- kernel/perf_counter.c | 15 ++++++++------- 3 files changed, 18 insertions(+), 12 deletions(-) diff --git a/include/linux/perf_counter.h b/include/linux/perf_counter.h index 5031b5614f2..daedd7d87c2 100644 --- a/include/linux/perf_counter.h +++ b/include/linux/perf_counter.h @@ -38,6 +38,7 @@ enum hw_event_types { * If this bit is set in the type, then trigger NMI sampling: */ PERF_COUNT_NMI = (1 << 30), + PERF_COUNT_RAW = (1 << 31), }; /* @@ -49,6 +50,12 @@ enum perf_record_type { PERF_RECORD_GROUP, }; +struct perf_counter_event { + u32 hw_event_type; + u32 hw_event_period; + u64 hw_raw_ctrl; +}; + /** * struct hw_perf_counter - performance counter hardware details */ diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h index 6cce728a626..3ecd73d03da 100644 --- a/include/linux/syscalls.h +++ b/include/linux/syscalls.h @@ -54,6 +54,7 @@ struct compat_stat; struct compat_timeval; struct robust_list_head; struct getcpu_cache; +struct perf_counter_event; #include #include @@ -625,9 +626,6 @@ asmlinkage long sys_fallocate(int fd, int mode, loff_t offset, loff_t len); int kernel_execve(const char *filename, char *const argv[], char *const envp[]); asmlinkage int -sys_perf_counter_open(u32 hw_event_type, - u32 hw_event_period, - u32 record_type, - pid_t pid, - int cpu); +sys_perf_counter_open(struct perf_counter_event __user *uevent, u32 record_type, + pid_t pid, int cpu, int masterfd); #endif diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c index 20508f05365..96c333a5b0f 100644 --- a/kernel/perf_counter.c +++ b/kernel/perf_counter.c @@ -734,26 +734,27 @@ perf_counter_alloc(u32 hw_event_period, int cpu, u32 record_type) * @pid: target pid */ asmlinkage int -sys_perf_counter_open(u32 hw_event_type, - u32 hw_event_period, - u32 record_type, - pid_t pid, - int cpu) +sys_perf_counter_open(struct perf_counter_event __user *uevent, u32 record_type, + pid_t pid, int cpu, int masterfd) { struct perf_counter_context *ctx; + struct perf_counter_event event; struct perf_counter *counter; int ret; + if (copy_from_user(&event, uevent, sizeof(event)) != 0) + return -EFAULT; + ctx = find_get_context(pid, cpu); if (IS_ERR(ctx)) return PTR_ERR(ctx); ret = -ENOMEM; - counter = perf_counter_alloc(hw_event_period, cpu, record_type); + counter = perf_counter_alloc(event.hw_event_period, cpu, record_type); if (!counter) goto err_put_context; - ret = hw_perf_counter_init(counter, hw_event_type); + ret = hw_perf_counter_init(counter, event.hw_event_type); if (ret) goto err_free_put_context; -- cgit v1.2.3 From dfa7c899b401d7dc5d85aca416aee64ac82812f2 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Mon, 8 Dec 2008 19:35:37 +0100 Subject: perf counters: expand use of counter->event Impact: change syscall, cleanup Make use of the new perf_counters event type. Signed-off-by: Thomas Gleixner Signed-off-by: Ingo Molnar --- arch/x86/kernel/cpu/perf_counter.c | 22 +++++++++++----------- include/linux/perf_counter.h | 4 +--- kernel/perf_counter.c | 10 +++++----- 3 files changed, 17 insertions(+), 19 deletions(-) diff --git a/arch/x86/kernel/cpu/perf_counter.c b/arch/x86/kernel/cpu/perf_counter.c index 0a7f3bea2dc..30e7ebf7827 100644 --- a/arch/x86/kernel/cpu/perf_counter.c +++ b/arch/x86/kernel/cpu/perf_counter.c @@ -56,9 +56,10 @@ const int max_intel_perfmon_events = ARRAY_SIZE(intel_perfmon_event_map); /* * Setup the hardware configuration for a given hw_event_type */ -int hw_perf_counter_init(struct perf_counter *counter, s32 hw_event_type) +int hw_perf_counter_init(struct perf_counter *counter) { struct hw_perf_counter *hwc = &counter->hw; + u32 hw_event_type = counter->event.hw_event_type; if (unlikely(!perf_counters_initialized)) return -EINVAL; @@ -83,7 +84,7 @@ int hw_perf_counter_init(struct perf_counter *counter, s32 hw_event_type) hwc->config_base = MSR_ARCH_PERFMON_EVENTSEL0; hwc->counter_base = MSR_ARCH_PERFMON_PERFCTR0; - hwc->irq_period = counter->__irq_period; + hwc->irq_period = counter->event.hw_event_period; /* * Intel PMCs cannot be accessed sanely above 32 bit width, * so we install an artificial 1<<31 period regardless of @@ -95,21 +96,19 @@ int hw_perf_counter_init(struct perf_counter *counter, s32 hw_event_type) hwc->next_count = -((s32) hwc->irq_period); /* - * Negative event types mean raw encoded event+umask values: + * Raw event type provide the config in the event structure */ - if (hw_event_type < 0) { - counter->hw_event_type = -hw_event_type; - counter->hw_event_type &= ~PERF_COUNT_NMI; + hw_event_type &= ~PERF_COUNT_NMI; + if (hw_event_type == PERF_COUNT_RAW) { + hwc->config |= counter->event.hw_raw_ctrl; } else { - hw_event_type &= ~PERF_COUNT_NMI; if (hw_event_type >= max_intel_perfmon_events) return -EINVAL; /* * The generic map: */ - counter->hw_event_type = intel_perfmon_event_map[hw_event_type]; + hwc->config |= intel_perfmon_event_map[hw_event_type]; } - hwc->config |= counter->hw_event_type; counter->wakeup_pending = 0; return 0; @@ -373,7 +372,7 @@ perf_handle_group(struct perf_counter *leader, u64 *status, u64 *overflown) perf_save_and_restart(counter); } } - perf_store_irq_data(leader, counter->hw_event_type); + perf_store_irq_data(leader, counter->event.hw_event_type); perf_store_irq_data(leader, atomic64_counter_read(counter)); } } @@ -418,7 +417,8 @@ again: perf_store_irq_data(counter, instruction_pointer(regs)); break; case PERF_RECORD_GROUP: - perf_store_irq_data(counter, counter->hw_event_type); + perf_store_irq_data(counter, + counter->event.hw_event_type); perf_store_irq_data(counter, atomic64_counter_read(counter)); perf_handle_group(counter, &status, &ack); diff --git a/include/linux/perf_counter.h b/include/linux/perf_counter.h index daedd7d87c2..1f0017673e7 100644 --- a/include/linux/perf_counter.h +++ b/include/linux/perf_counter.h @@ -96,8 +96,7 @@ struct perf_counter { #else atomic_t count32[2]; #endif - u64 __irq_period; - + struct perf_counter_event event; struct hw_perf_counter hw; struct perf_counter_context *ctx; @@ -111,7 +110,6 @@ struct perf_counter { int oncpu; int cpu; - s32 hw_event_type; enum perf_record_type record_type; /* read() / irq related data */ diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c index 96c333a5b0f..2557c670a3b 100644 --- a/kernel/perf_counter.c +++ b/kernel/perf_counter.c @@ -37,7 +37,7 @@ static DEFINE_MUTEX(perf_resource_mutex); * Architecture provided APIs - weak aliases: */ -int __weak hw_perf_counter_init(struct perf_counter *counter, u32 hw_event_type) +int __weak hw_perf_counter_init(struct perf_counter *counter) { return -EINVAL; } @@ -707,7 +707,7 @@ static const struct file_operations perf_fops = { * Allocate and initialize a counter structure */ static struct perf_counter * -perf_counter_alloc(u32 hw_event_period, int cpu, u32 record_type) +perf_counter_alloc(struct perf_counter_event *event, int cpu, u32 record_type) { struct perf_counter *counter = kzalloc(sizeof(*counter), GFP_KERNEL); @@ -722,7 +722,7 @@ perf_counter_alloc(u32 hw_event_period, int cpu, u32 record_type) counter->usrdata = &counter->data[1]; counter->cpu = cpu; counter->record_type = record_type; - counter->__irq_period = hw_event_period; + counter->event = *event; counter->wakeup_pending = 0; return counter; @@ -750,11 +750,11 @@ sys_perf_counter_open(struct perf_counter_event __user *uevent, u32 record_type, return PTR_ERR(ctx); ret = -ENOMEM; - counter = perf_counter_alloc(event.hw_event_period, cpu, record_type); + counter = perf_counter_alloc(&event, cpu, record_type); if (!counter) goto err_put_context; - ret = hw_perf_counter_init(counter, event.hw_event_type); + ret = hw_perf_counter_init(counter); if (ret) goto err_free_put_context; -- cgit v1.2.3 From 9f66a3810fe0d4100972db84290f3ae4a4d77025 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Wed, 10 Dec 2008 12:33:23 +0100 Subject: perf counters: restructure the API Impact: clean up new API Thorough cleanup of the new perf counters API, we now get clean separation of the various concepts: - introduce perf_counter_hw_event to separate out the event source details - move special type flags into separate attributes: PERF_COUNT_NMI, PERF_COUNT_RAW - extend the type to u64 and reserve it fully to the architecture in the raw type case. And make use of all these changes in the core and x86 perfcounters code. Also change the syscall signature to: asmlinkage int sys_perf_counter_open( struct perf_counter_hw_event *hw_event_uptr __user, pid_t pid, int cpu, int group_fd); ( Note that group_fd is unused for now - it's reserved for the counter groups abstraction. ) Signed-off-by: Ingo Molnar --- arch/x86/kernel/cpu/perf_counter.c | 29 ++++++----- include/linux/perf_counter.h | 98 ++++++++++++++++++++++++-------------- include/linux/syscalls.h | 12 +++-- kernel/perf_counter.c | 38 ++++++++------- 4 files changed, 106 insertions(+), 71 deletions(-) diff --git a/arch/x86/kernel/cpu/perf_counter.c b/arch/x86/kernel/cpu/perf_counter.c index 30e7ebf7827..ef1936a871a 100644 --- a/arch/x86/kernel/cpu/perf_counter.c +++ b/arch/x86/kernel/cpu/perf_counter.c @@ -58,8 +58,8 @@ const int max_intel_perfmon_events = ARRAY_SIZE(intel_perfmon_event_map); */ int hw_perf_counter_init(struct perf_counter *counter) { + struct perf_counter_hw_event *hw_event = &counter->hw_event; struct hw_perf_counter *hwc = &counter->hw; - u32 hw_event_type = counter->event.hw_event_type; if (unlikely(!perf_counters_initialized)) return -EINVAL; @@ -77,14 +77,14 @@ int hw_perf_counter_init(struct perf_counter *counter) hwc->nmi = 0; if (capable(CAP_SYS_ADMIN)) { hwc->config |= ARCH_PERFMON_EVENTSEL_OS; - if (hw_event_type & PERF_COUNT_NMI) + if (hw_event->nmi) hwc->nmi = 1; } - hwc->config_base = MSR_ARCH_PERFMON_EVENTSEL0; - hwc->counter_base = MSR_ARCH_PERFMON_PERFCTR0; + hwc->config_base = MSR_ARCH_PERFMON_EVENTSEL0; + hwc->counter_base = MSR_ARCH_PERFMON_PERFCTR0; - hwc->irq_period = counter->event.hw_event_period; + hwc->irq_period = hw_event->irq_period; /* * Intel PMCs cannot be accessed sanely above 32 bit width, * so we install an artificial 1<<31 period regardless of @@ -93,21 +93,20 @@ int hw_perf_counter_init(struct perf_counter *counter) if (!hwc->irq_period) hwc->irq_period = 0x7FFFFFFF; - hwc->next_count = -((s32) hwc->irq_period); + hwc->next_count = -(s32)hwc->irq_period; /* * Raw event type provide the config in the event structure */ - hw_event_type &= ~PERF_COUNT_NMI; - if (hw_event_type == PERF_COUNT_RAW) { - hwc->config |= counter->event.hw_raw_ctrl; + if (hw_event->raw) { + hwc->config |= hw_event->type; } else { - if (hw_event_type >= max_intel_perfmon_events) + if (hw_event->type >= max_intel_perfmon_events) return -EINVAL; /* * The generic map: */ - hwc->config |= intel_perfmon_event_map[hw_event_type]; + hwc->config |= intel_perfmon_event_map[hw_event->type]; } counter->wakeup_pending = 0; @@ -354,7 +353,7 @@ perf_handle_group(struct perf_counter *leader, u64 *status, u64 *overflown) int bit; list_for_each_entry(counter, &ctx->counters, list) { - if (counter->record_type != PERF_RECORD_SIMPLE || + if (counter->hw_event.record_type != PERF_RECORD_SIMPLE || counter == leader) continue; @@ -372,7 +371,7 @@ perf_handle_group(struct perf_counter *leader, u64 *status, u64 *overflown) perf_save_and_restart(counter); } } - perf_store_irq_data(leader, counter->event.hw_event_type); + perf_store_irq_data(leader, counter->hw_event.type); perf_store_irq_data(leader, atomic64_counter_read(counter)); } } @@ -410,7 +409,7 @@ again: perf_save_and_restart(counter); - switch (counter->record_type) { + switch (counter->hw_event.record_type) { case PERF_RECORD_SIMPLE: continue; case PERF_RECORD_IRQ: @@ -418,7 +417,7 @@ again: break; case PERF_RECORD_GROUP: perf_store_irq_data(counter, - counter->event.hw_event_type); + counter->hw_event.type); perf_store_irq_data(counter, atomic64_counter_read(counter)); perf_handle_group(counter, &status, &ack); diff --git a/include/linux/perf_counter.h b/include/linux/perf_counter.h index 1f0017673e7..a2b4852e2d7 100644 --- a/include/linux/perf_counter.h +++ b/include/linux/perf_counter.h @@ -24,65 +24,93 @@ struct task_struct; /* - * Generalized hardware event types, used by the hw_event_type parameter - * of the sys_perf_counter_open() syscall: + * User-space ABI bits: + */ + +/* + * Generalized performance counter event types, used by the hw_event.type + * parameter of the sys_perf_counter_open() syscall: */ enum hw_event_types { - PERF_COUNT_CYCLES, - PERF_COUNT_INSTRUCTIONS, - PERF_COUNT_CACHE_REFERENCES, - PERF_COUNT_CACHE_MISSES, - PERF_COUNT_BRANCH_INSTRUCTIONS, - PERF_COUNT_BRANCH_MISSES, /* - * If this bit is set in the type, then trigger NMI sampling: + * Common hardware events, generalized by the kernel: */ - PERF_COUNT_NMI = (1 << 30), - PERF_COUNT_RAW = (1 << 31), + PERF_COUNT_CYCLES = 0, + PERF_COUNT_INSTRUCTIONS = 1, + PERF_COUNT_CACHE_REFERENCES = 2, + PERF_COUNT_CACHE_MISSES = 3, + PERF_COUNT_BRANCH_INSTRUCTIONS = 4, + PERF_COUNT_BRANCH_MISSES = 5, + + /* + * Special "software" counters provided by the kernel, even if + * the hardware does not support performance counters. These + * counters measure various physical and sw events of the + * kernel (and allow the profiling of them as well): + */ + PERF_COUNT_CPU_CLOCK = -1, + PERF_COUNT_TASK_CLOCK = -2, + PERF_COUNT_PAGE_FAULTS = -3, + PERF_COUNT_CONTEXT_SWITCHES = -4, }; /* * IRQ-notification data record type: */ -enum perf_record_type { - PERF_RECORD_SIMPLE, - PERF_RECORD_IRQ, - PERF_RECORD_GROUP, +enum perf_counter_record_type { + PERF_RECORD_SIMPLE = 0, + PERF_RECORD_IRQ = 1, + PERF_RECORD_GROUP = 2, }; -struct perf_counter_event { - u32 hw_event_type; - u32 hw_event_period; - u64 hw_raw_ctrl; +/* + * Hardware event to monitor via a performance monitoring counter: + */ +struct perf_counter_hw_event { + u64 type; + + u64 irq_period; + u32 record_type; + + u32 disabled : 1, /* off by default */ + nmi : 1, /* NMI sampling */ + raw : 1, /* raw event type */ + __reserved_1 : 29; + + u64 __reserved_2; }; +/* + * Kernel-internal data types: + */ + /** - * struct hw_perf_counter - performance counter hardware details + * struct hw_perf_counter - performance counter hardware details: */ struct hw_perf_counter { - u64 config; - unsigned long config_base; - unsigned long counter_base; - int nmi; - unsigned int idx; - u64 prev_count; - s32 next_count; - u64 irq_period; + u64 config; + unsigned long config_base; + unsigned long counter_base; + int nmi; + unsigned int idx; + u64 prev_count; + u64 irq_period; + s32 next_count; }; /* * Hardcoded buffer length limit for now, for IRQ-fed events: */ -#define PERF_DATA_BUFLEN 2048 +#define PERF_DATA_BUFLEN 2048 /** * struct perf_data - performance counter IRQ data sampling ... */ struct perf_data { - int len; - int rd_idx; - int overrun; - u8 data[PERF_DATA_BUFLEN]; + int len; + int rd_idx; + int overrun; + u8 data[PERF_DATA_BUFLEN]; }; /** @@ -96,7 +124,7 @@ struct perf_counter { #else atomic_t count32[2]; #endif - struct perf_counter_event event; + struct perf_counter_hw_event hw_event; struct hw_perf_counter hw; struct perf_counter_context *ctx; @@ -110,8 +138,6 @@ struct perf_counter { int oncpu; int cpu; - enum perf_record_type record_type; - /* read() / irq related data */ wait_queue_head_t waitq; /* optional: for NMIs */ diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h index 3ecd73d03da..a549678b7c3 100644 --- a/include/linux/syscalls.h +++ b/include/linux/syscalls.h @@ -54,7 +54,7 @@ struct compat_stat; struct compat_timeval; struct robust_list_head; struct getcpu_cache; -struct perf_counter_event; +struct perf_counter_hw_event; #include #include @@ -625,7 +625,11 @@ asmlinkage long sys_fallocate(int fd, int mode, loff_t offset, loff_t len); int kernel_execve(const char *filename, char *const argv[], char *const envp[]); -asmlinkage int -sys_perf_counter_open(struct perf_counter_event __user *uevent, u32 record_type, - pid_t pid, int cpu, int masterfd); + +asmlinkage int sys_perf_counter_open( + + struct perf_counter_hw_event *hw_event_uptr __user, + pid_t pid, + int cpu, + int group_fd); #endif diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c index 2557c670a3b..0d323ceda3a 100644 --- a/kernel/perf_counter.c +++ b/kernel/perf_counter.c @@ -669,7 +669,7 @@ perf_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) { struct perf_counter *counter = file->private_data; - switch (counter->record_type) { + switch (counter->hw_event.record_type) { case PERF_RECORD_SIMPLE: return perf_read_hw(counter, buf, count); @@ -707,7 +707,7 @@ static const struct file_operations perf_fops = { * Allocate and initialize a counter structure */ static struct perf_counter * -perf_counter_alloc(struct perf_counter_event *event, int cpu, u32 record_type) +perf_counter_alloc(struct perf_counter_hw_event *hw_event, int cpu) { struct perf_counter *counter = kzalloc(sizeof(*counter), GFP_KERNEL); @@ -718,31 +718,37 @@ perf_counter_alloc(struct perf_counter_event *event, int cpu, u32 record_type) INIT_LIST_HEAD(&counter->list); init_waitqueue_head(&counter->waitq); - counter->irqdata = &counter->data[0]; - counter->usrdata = &counter->data[1]; - counter->cpu = cpu; - counter->record_type = record_type; - counter->event = *event; - counter->wakeup_pending = 0; + counter->irqdata = &counter->data[0]; + counter->usrdata = &counter->data[1]; + counter->cpu = cpu; + counter->hw_event = *hw_event; + counter->wakeup_pending = 0; return counter; } /** - * sys_perf_task_open - open a performance counter associate it to a task - * @hw_event_type: event type for monitoring/sampling... + * sys_perf_task_open - open a performance counter, associate it to a task/cpu + * + * @hw_event_uptr: event type attributes for monitoring/sampling * @pid: target pid + * @cpu: target cpu + * @group_fd: group leader counter fd */ -asmlinkage int -sys_perf_counter_open(struct perf_counter_event __user *uevent, u32 record_type, - pid_t pid, int cpu, int masterfd) +asmlinkage int sys_perf_counter_open( + + struct perf_counter_hw_event *hw_event_uptr __user, + pid_t pid, + int cpu, + int group_fd) + { struct perf_counter_context *ctx; - struct perf_counter_event event; + struct perf_counter_hw_event hw_event; struct perf_counter *counter; int ret; - if (copy_from_user(&event, uevent, sizeof(event)) != 0) + if (copy_from_user(&hw_event, hw_event_uptr, sizeof(hw_event)) != 0) return -EFAULT; ctx = find_get_context(pid, cpu); @@ -750,7 +756,7 @@ sys_perf_counter_open(struct perf_counter_event __user *uevent, u32 record_type, return PTR_ERR(ctx); ret = -ENOMEM; - counter = perf_counter_alloc(&event, cpu, record_type); + counter = perf_counter_alloc(&hw_event, cpu); if (!counter) goto err_put_context; -- cgit v1.2.3 From 04289bb9891882202d7e961c4c04d2376930e9f9 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Thu, 11 Dec 2008 08:38:42 +0100 Subject: perf counters: add support for group counters Impact: add group counters This patch adds the "counter groups" abstraction. Groups of counters behave much like normal 'single' counters, with a few semantic and behavioral extensions on top of that. A counter group is created by creating a new counter with the open() syscall's group-leader group_fd file descriptor parameter pointing to another, already existing counter. Groups of counters are scheduled in and out in one atomic group, and they are also roundrobin-scheduled atomically. Counters that are member of a group can also record events with an (atomic) extended timestamp that extends to all members of the group, if the record type is set to PERF_RECORD_GROUP. Signed-off-by: Ingo Molnar --- arch/x86/kernel/cpu/perf_counter.c | 28 ++-- include/linux/perf_counter.h | 8 +- kernel/perf_counter.c | 282 ++++++++++++++++++++++++++++--------- 3 files changed, 236 insertions(+), 82 deletions(-) diff --git a/arch/x86/kernel/cpu/perf_counter.c b/arch/x86/kernel/cpu/perf_counter.c index ef1936a871a..54b4ad0cce6 100644 --- a/arch/x86/kernel/cpu/perf_counter.c +++ b/arch/x86/kernel/cpu/perf_counter.c @@ -346,18 +346,22 @@ static void perf_save_and_restart(struct perf_counter *counter) } static void -perf_handle_group(struct perf_counter *leader, u64 *status, u64 *overflown) +perf_handle_group(struct perf_counter *sibling, u64 *status, u64 *overflown) { - struct perf_counter_context *ctx = leader->ctx; - struct perf_counter *counter; + struct perf_counter *counter, *group_leader = sibling->group_leader; int bit; - list_for_each_entry(counter, &ctx->counters, list) { - if (counter->hw_event.record_type != PERF_RECORD_SIMPLE || - counter == leader) - continue; + /* + * Store the counter's own timestamp first: + */ + perf_store_irq_data(sibling, sibling->hw_event.type); + perf_store_irq_data(sibling, atomic64_counter_read(sibling)); - if (counter->active) { + /* + * Then store sibling timestamps (if any): + */ + list_for_each_entry(counter, &group_leader->sibling_list, list_entry) { + if (!counter->active) { /* * When counter was not in the overflow mask, we have to * read it from hardware. We read it as well, when it @@ -371,8 +375,8 @@ perf_handle_group(struct perf_counter *leader, u64 *status, u64 *overflown) perf_save_and_restart(counter); } } - perf_store_irq_data(leader, counter->hw_event.type); - perf_store_irq_data(leader, atomic64_counter_read(counter)); + perf_store_irq_data(sibling, counter->hw_event.type); + perf_store_irq_data(sibling, atomic64_counter_read(counter)); } } @@ -416,10 +420,6 @@ again: perf_store_irq_data(counter, instruction_pointer(regs)); break; case PERF_RECORD_GROUP: - perf_store_irq_data(counter, - counter->hw_event.type); - perf_store_irq_data(counter, - atomic64_counter_read(counter)); perf_handle_group(counter, &status, &ack); break; } diff --git a/include/linux/perf_counter.h b/include/linux/perf_counter.h index a2b4852e2d7..7af7d896546 100644 --- a/include/linux/perf_counter.h +++ b/include/linux/perf_counter.h @@ -117,7 +117,10 @@ struct perf_data { * struct perf_counter - performance counter kernel representation: */ struct perf_counter { - struct list_head list; + struct list_head list_entry; + struct list_head sibling_list; + struct perf_counter *group_leader; + int active; #if BITS_PER_LONG == 64 atomic64_t count; @@ -158,7 +161,8 @@ struct perf_counter_context { * Protect the list of counters: */ spinlock_t lock; - struct list_head counters; + + struct list_head counter_list; int nr_counters; int nr_active; struct task_struct *task; diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c index 0d323ceda3a..fa59fe8c02d 100644 --- a/kernel/perf_counter.c +++ b/kernel/perf_counter.c @@ -10,6 +10,7 @@ #include #include #include +#include #include #include #include @@ -55,7 +56,7 @@ void __weak hw_perf_counter_setup(void) { } * Read the cached counter in counter safe against cross CPU / NMI * modifications. 64 bit version - no complications. */ -static inline u64 perf_read_counter_safe(struct perf_counter *counter) +static inline u64 perf_counter_read_safe(struct perf_counter *counter) { return (u64) atomic64_read(&counter->count); } @@ -66,7 +67,7 @@ static inline u64 perf_read_counter_safe(struct perf_counter *counter) * Read the cached counter in counter safe against cross CPU / NMI * modifications. 32 bit version. */ -static u64 perf_read_counter_safe(struct perf_counter *counter) +static u64 perf_counter_read_safe(struct perf_counter *counter) { u32 cntl, cnth; @@ -83,13 +84,55 @@ static u64 perf_read_counter_safe(struct perf_counter *counter) #endif +static void +list_add_counter(struct perf_counter *counter, struct perf_counter_context *ctx) +{ + struct perf_counter *group_leader = counter->group_leader; + + /* + * Depending on whether it is a standalone or sibling counter, + * add it straight to the context's counter list, or to the group + * leader's sibling list: + */ + if (counter->group_leader == counter) + list_add_tail(&counter->list_entry, &ctx->counter_list); + else + list_add_tail(&counter->list_entry, &group_leader->sibling_list); +} + +static void +list_del_counter(struct perf_counter *counter, struct perf_counter_context *ctx) +{ + struct perf_counter *sibling, *tmp; + + list_del_init(&counter->list_entry); + + if (list_empty(&counter->sibling_list)) + return; + + /* + * If this was a group counter with sibling counters then + * upgrade the siblings to singleton counters by adding them + * to the context list directly: + */ + list_for_each_entry_safe(sibling, tmp, + &counter->sibling_list, list_entry) { + + list_del_init(&sibling->list_entry); + list_add_tail(&sibling->list_entry, &ctx->counter_list); + WARN_ON_ONCE(!sibling->group_leader); + WARN_ON_ONCE(sibling->group_leader == sibling); + sibling->group_leader = sibling; + } +} + /* * Cross CPU call to remove a performance counter * * We disable the counter on the hardware level first. After that we * remove it from the context list. */ -static void __perf_remove_from_context(void *info) +static void __perf_counter_remove_from_context(void *info) { struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context); struct perf_counter *counter = info; @@ -119,7 +162,7 @@ static void __perf_remove_from_context(void *info) * counters on a global level. NOP for non NMI based counters. */ hw_perf_disable_all(); - list_del_init(&counter->list); + list_del_counter(counter, ctx); hw_perf_enable_all(); if (!ctx->task) { @@ -144,7 +187,7 @@ static void __perf_remove_from_context(void *info) * CPU counters are removed with a smp call. For task counters we only * call when the task is on a CPU. */ -static void perf_remove_from_context(struct perf_counter *counter) +static void perf_counter_remove_from_context(struct perf_counter *counter) { struct perf_counter_context *ctx = counter->ctx; struct task_struct *task = ctx->task; @@ -155,32 +198,32 @@ static void perf_remove_from_context(struct perf_counter *counter) * the removal is always sucessful. */ smp_call_function_single(counter->cpu, - __perf_remove_from_context, + __perf_counter_remove_from_context, counter, 1); return; } retry: - task_oncpu_function_call(task, __perf_remove_from_context, + task_oncpu_function_call(task, __perf_counter_remove_from_context, counter); spin_lock_irq(&ctx->lock); /* * If the context is active we need to retry the smp call. */ - if (ctx->nr_active && !list_empty(&counter->list)) { + if (ctx->nr_active && !list_empty(&counter->list_entry)) { spin_unlock_irq(&ctx->lock); goto retry; } /* * The lock prevents that this context is scheduled in so we - * can remove the counter safely, if it the call above did not + * can remove the counter safely, if the call above did not * succeed. */ - if (!list_empty(&counter->list)) { + if (!list_empty(&counter->list_entry)) { ctx->nr_counters--; - list_del_init(&counter->list); + list_del_counter(counter, ctx); counter->task = NULL; } spin_unlock_irq(&ctx->lock); @@ -211,7 +254,7 @@ static void __perf_install_in_context(void *info) * counters on a global level. NOP for non NMI based counters. */ hw_perf_disable_all(); - list_add_tail(&counter->list, &ctx->counters); + list_add_counter(counter, ctx); hw_perf_enable_all(); ctx->nr_counters++; @@ -268,7 +311,7 @@ retry: * If the context is active and the counter has not been added * we need to retry the smp call. */ - if (ctx->nr_active && list_empty(&counter->list)) { + if (ctx->nr_active && list_empty(&counter->list_entry)) { spin_unlock_irq(&ctx->lock); goto retry; } @@ -278,13 +321,45 @@ retry: * can add the counter safely, if it the call above did not * succeed. */ - if (list_empty(&counter->list)) { - list_add_tail(&counter->list, &ctx->counters); + if (list_empty(&counter->list_entry)) { + list_add_counter(counter, ctx); ctx->nr_counters++; } spin_unlock_irq(&ctx->lock); } +static void +counter_sched_out(struct perf_counter *counter, + struct perf_cpu_context *cpuctx, + struct perf_counter_context *ctx) +{ + if (!counter->active) + return; + + hw_perf_counter_disable(counter); + counter->active = 0; + counter->oncpu = -1; + + cpuctx->active_oncpu--; + ctx->nr_active--; +} + +static void +group_sched_out(struct perf_counter *group_counter, + struct perf_cpu_context *cpuctx, + struct perf_counter_context *ctx) +{ + struct perf_counter *counter; + + counter_sched_out(group_counter, cpuctx, ctx); + + /* + * Schedule out siblings (if any): + */ + list_for_each_entry(counter, &group_counter->sibling_list, list_entry) + counter_sched_out(counter, cpuctx, ctx); +} + /* * Called from scheduler to remove the counters of the current task, * with interrupts disabled. @@ -306,21 +381,48 @@ void perf_counter_task_sched_out(struct task_struct *task, int cpu) return; spin_lock(&ctx->lock); - list_for_each_entry(counter, &ctx->counters, list) { - if (!ctx->nr_active) - break; - if (counter->active) { - hw_perf_counter_disable(counter); - counter->active = 0; - counter->oncpu = -1; - ctx->nr_active--; - cpuctx->active_oncpu--; - } + if (ctx->nr_active) { + list_for_each_entry(counter, &ctx->counter_list, list_entry) + group_sched_out(counter, cpuctx, ctx); } spin_unlock(&ctx->lock); cpuctx->task_ctx = NULL; } +static void +counter_sched_in(struct perf_counter *counter, + struct perf_cpu_context *cpuctx, + struct perf_counter_context *ctx, + int cpu) +{ + if (!counter->active) + return; + + hw_perf_counter_enable(counter); + counter->active = 1; + counter->oncpu = cpu; /* TODO: put 'cpu' into cpuctx->cpu */ + + cpuctx->active_oncpu++; + ctx->nr_active++; +} + +static void +group_sched_in(struct perf_counter *group_counter, + struct perf_cpu_context *cpuctx, + struct perf_counter_context *ctx, + int cpu) +{ + struct perf_counter *counter; + + counter_sched_in(group_counter, cpuctx, ctx, cpu); + + /* + * Schedule in siblings as one group (if any): + */ + list_for_each_entry(counter, &group_counter->sibling_list, list_entry) + counter_sched_in(counter, cpuctx, ctx, cpu); +} + /* * Called from scheduler to add the counters of the current task * with interrupts disabled. @@ -342,19 +444,21 @@ void perf_counter_task_sched_in(struct task_struct *task, int cpu) return; spin_lock(&ctx->lock); - list_for_each_entry(counter, &ctx->counters, list) { + list_for_each_entry(counter, &ctx->counter_list, list_entry) { if (ctx->nr_active == cpuctx->max_pertask) break; + + /* + * Listen to the 'cpu' scheduling filter constraint + * of counters: + */ if (counter->cpu != -1 && counter->cpu != cpu) continue; - hw_perf_counter_enable(counter); - counter->active = 1; - counter->oncpu = cpu; - ctx->nr_active++; - cpuctx->active_oncpu++; + group_sched_in(counter, cpuctx, ctx, cpu); } spin_unlock(&ctx->lock); + cpuctx->task_ctx = ctx; } @@ -371,12 +475,12 @@ void perf_counter_task_tick(struct task_struct *curr, int cpu) spin_lock(&ctx->lock); /* - * Rotate the first entry last: + * Rotate the first entry last (works just fine for group counters too): */ hw_perf_disable_all(); - list_for_each_entry(counter, &ctx->counters, list) { - list_del(&counter->list); - list_add_tail(&counter->list, &ctx->counters); + list_for_each_entry(counter, &ctx->counter_list, list_entry) { + list_del(&counter->list_entry); + list_add_tail(&counter->list_entry, &ctx->counter_list); break; } hw_perf_enable_all(); @@ -386,17 +490,24 @@ void perf_counter_task_tick(struct task_struct *curr, int cpu) perf_counter_task_sched_in(curr, cpu); } +/* + * Initialize the perf_counter context in a task_struct: + */ +static void +__perf_counter_init_context(struct perf_counter_context *ctx, + struct task_struct *task) +{ + spin_lock_init(&ctx->lock); + INIT_LIST_HEAD(&ctx->counter_list); + ctx->nr_counters = 0; + ctx->task = task; +} /* * Initialize the perf_counter context in task_struct */ void perf_counter_init_task(struct task_struct *task) { - struct perf_counter_context *ctx = &task->perf_counter_ctx; - - spin_lock_init(&ctx->lock); - INIT_LIST_HEAD(&ctx->counters); - ctx->nr_counters = 0; - ctx->task = task; + __perf_counter_init_context(&task->perf_counter_ctx, task); } /* @@ -407,7 +518,7 @@ static void __hw_perf_counter_read(void *info) hw_perf_counter_read(info); } -static u64 perf_read_counter(struct perf_counter *counter) +static u64 perf_counter_read(struct perf_counter *counter) { /* * If counter is enabled and currently active on a CPU, update the @@ -418,7 +529,7 @@ static u64 perf_read_counter(struct perf_counter *counter) __hw_perf_counter_read, counter, 1); } - return perf_read_counter_safe(counter); + return perf_counter_read_safe(counter); } /* @@ -555,7 +666,7 @@ static int perf_release(struct inode *inode, struct file *file) mutex_lock(&counter->mutex); - perf_remove_from_context(counter); + perf_counter_remove_from_context(counter); put_context(ctx); mutex_unlock(&counter->mutex); @@ -577,7 +688,7 @@ perf_read_hw(struct perf_counter *counter, char __user *buf, size_t count) return -EINVAL; mutex_lock(&counter->mutex); - cntval = perf_read_counter(counter); + cntval = perf_counter_read(counter); mutex_unlock(&counter->mutex); return put_user(cntval, (u64 __user *) buf) ? -EFAULT : sizeof(cntval); @@ -707,15 +818,25 @@ static const struct file_operations perf_fops = { * Allocate and initialize a counter structure */ static struct perf_counter * -perf_counter_alloc(struct perf_counter_hw_event *hw_event, int cpu) +perf_counter_alloc(struct perf_counter_hw_event *hw_event, + int cpu, + struct perf_counter *group_leader) { struct perf_counter *counter = kzalloc(sizeof(*counter), GFP_KERNEL); if (!counter) return NULL; + /* + * Single counters are their own group leaders, with an + * empty sibling list: + */ + if (!group_leader) + group_leader = counter; + mutex_init(&counter->mutex); - INIT_LIST_HEAD(&counter->list); + INIT_LIST_HEAD(&counter->list_entry); + INIT_LIST_HEAD(&counter->sibling_list); init_waitqueue_head(&counter->waitq); counter->irqdata = &counter->data[0]; @@ -723,6 +844,7 @@ perf_counter_alloc(struct perf_counter_hw_event *hw_event, int cpu) counter->cpu = cpu; counter->hw_event = *hw_event; counter->wakeup_pending = 0; + counter->group_leader = group_leader; return counter; } @@ -743,20 +865,45 @@ asmlinkage int sys_perf_counter_open( int group_fd) { - struct perf_counter_context *ctx; + struct perf_counter *counter, *group_leader; struct perf_counter_hw_event hw_event; - struct perf_counter *counter; + struct perf_counter_context *ctx; + struct file *group_file = NULL; + int fput_needed = 0; int ret; if (copy_from_user(&hw_event, hw_event_uptr, sizeof(hw_event)) != 0) return -EFAULT; + /* + * Look up the group leader: + */ + group_leader = NULL; + if (group_fd != -1) { + ret = -EINVAL; + group_file = fget_light(group_fd, &fput_needed); + if (!group_file) + goto out_fput; + if (group_file->f_op != &perf_fops) + goto out_fput; + + group_leader = group_file->private_data; + /* + * Do not allow a recursive hierarchy: + */ + if (group_leader->group_leader) + goto out_fput; + } + + /* + * Get the target context (task or percpu): + */ ctx = find_get_context(pid, cpu); if (IS_ERR(ctx)) return PTR_ERR(ctx); ret = -ENOMEM; - counter = perf_counter_alloc(&hw_event, cpu); + counter = perf_counter_alloc(&hw_event, cpu, group_leader); if (!counter) goto err_put_context; @@ -770,11 +917,14 @@ asmlinkage int sys_perf_counter_open( if (ret < 0) goto err_remove_free_put_context; +out_fput: + fput_light(group_file, fput_needed); + return ret; err_remove_free_put_context: mutex_lock(&counter->mutex); - perf_remove_from_context(counter); + perf_counter_remove_from_context(counter); mutex_unlock(&counter->mutex); err_free_put_context: @@ -783,40 +933,40 @@ err_free_put_context: err_put_context: put_context(ctx); - return ret; + goto out_fput; } -static void __cpuinit perf_init_cpu(int cpu) +static void __cpuinit perf_counter_init_cpu(int cpu) { - struct perf_cpu_context *ctx; + struct perf_cpu_context *cpuctx; - ctx = &per_cpu(perf_cpu_context, cpu); - spin_lock_init(&ctx->ctx.lock); - INIT_LIST_HEAD(&ctx->ctx.counters); + cpuctx = &per_cpu(perf_cpu_context, cpu); + __perf_counter_init_context(&cpuctx->ctx, NULL); mutex_lock(&perf_resource_mutex); - ctx->max_pertask = perf_max_counters - perf_reserved_percpu; + cpuctx->max_pertask = perf_max_counters - perf_reserved_percpu; mutex_unlock(&perf_resource_mutex); + hw_perf_counter_setup(); } #ifdef CONFIG_HOTPLUG_CPU -static void __perf_exit_cpu(void *info) +static void __perf_counter_exit_cpu(void *info) { struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context); struct perf_counter_context *ctx = &cpuctx->ctx; struct perf_counter *counter, *tmp; - list_for_each_entry_safe(counter, tmp, &ctx->counters, list) - __perf_remove_from_context(counter); + list_for_each_entry_safe(counter, tmp, &ctx->counter_list, list_entry) + __perf_counter_remove_from_context(counter); } -static void perf_exit_cpu(int cpu) +static void perf_counter_exit_cpu(int cpu) { - smp_call_function_single(cpu, __perf_exit_cpu, NULL, 1); + smp_call_function_single(cpu, __perf_counter_exit_cpu, NULL, 1); } #else -static inline void perf_exit_cpu(int cpu) { } +static inline void perf_counter_exit_cpu(int cpu) { } #endif static int __cpuinit @@ -828,12 +978,12 @@ perf_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu) case CPU_UP_PREPARE: case CPU_UP_PREPARE_FROZEN: - perf_init_cpu(cpu); + perf_counter_init_cpu(cpu); break; case CPU_DOWN_PREPARE: case CPU_DOWN_PREPARE_FROZEN: - perf_exit_cpu(cpu); + perf_counter_exit_cpu(cpu); break; default: -- cgit v1.2.3 From ccff286d85098ba5438e22aa2ea807fc1e18cf2f Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Thu, 11 Dec 2008 11:26:29 +0100 Subject: perf counters: group counter, fixes Impact: bugfix Check that a group does not span outside the context of a CPU or a task. Also, do not allow deep recursive hierarchies. Signed-off-by: Ingo Molnar --- kernel/perf_counter.c | 39 ++++++++++++++++++++------------------- 1 file changed, 20 insertions(+), 19 deletions(-) diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c index fa59fe8c02d..278209c547a 100644 --- a/kernel/perf_counter.c +++ b/kernel/perf_counter.c @@ -107,9 +107,6 @@ list_del_counter(struct perf_counter *counter, struct perf_counter_context *ctx) list_del_init(&counter->list_entry); - if (list_empty(&counter->sibling_list)) - return; - /* * If this was a group counter with sibling counters then * upgrade the siblings to singleton counters by adding them @@ -395,9 +392,6 @@ counter_sched_in(struct perf_counter *counter, struct perf_counter_context *ctx, int cpu) { - if (!counter->active) - return; - hw_perf_counter_enable(counter); counter->active = 1; counter->oncpu = cpu; /* TODO: put 'cpu' into cpuctx->cpu */ @@ -876,32 +870,39 @@ asmlinkage int sys_perf_counter_open( return -EFAULT; /* - * Look up the group leader: + * Get the target context (task or percpu): + */ + ctx = find_get_context(pid, cpu); + if (IS_ERR(ctx)) + return PTR_ERR(ctx); + + /* + * Look up the group leader (we will attach this counter to it): */ group_leader = NULL; if (group_fd != -1) { ret = -EINVAL; group_file = fget_light(group_fd, &fput_needed); if (!group_file) - goto out_fput; + goto err_put_context; if (group_file->f_op != &perf_fops) - goto out_fput; + goto err_put_context; group_leader = group_file->private_data; /* - * Do not allow a recursive hierarchy: + * Do not allow a recursive hierarchy (this new sibling + * becoming part of another group-sibling): + */ + if (group_leader->group_leader != group_leader) + goto err_put_context; + /* + * Do not allow to attach to a group in a different + * task or CPU context: */ - if (group_leader->group_leader) - goto out_fput; + if (group_leader->ctx != ctx) + goto err_put_context; } - /* - * Get the target context (task or percpu): - */ - ctx = find_get_context(pid, cpu); - if (IS_ERR(ctx)) - return PTR_ERR(ctx); - ret = -ENOMEM; counter = perf_counter_alloc(&hw_event, cpu, group_leader); if (!counter) -- cgit v1.2.3 From 621a01eac89b5e2f81a4cf576568b31f40a02724 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Thu, 11 Dec 2008 12:46:46 +0100 Subject: perf counters: hw driver API Impact: restructure code, introduce hw_ops driver abstraction Introduce this abstraction to handle counter details: struct hw_perf_counter_ops { void (*hw_perf_counter_enable) (struct perf_counter *counter); void (*hw_perf_counter_disable) (struct perf_counter *counter); void (*hw_perf_counter_read) (struct perf_counter *counter); }; This will be useful to support assymetric hw details, and it will also be useful to implement "software counters". (Counters that count kernel managed sw events such as pagefaults, context-switches, wall-clock time or task-local time.) Signed-off-by: Ingo Molnar --- arch/x86/kernel/cpu/perf_counter.c | 37 ++++++++++++++++++++++--------- include/linux/perf_counter.h | 15 +++++++++++++ kernel/perf_counter.c | 45 ++++++++++++++++++++------------------ 3 files changed, 66 insertions(+), 31 deletions(-) diff --git a/arch/x86/kernel/cpu/perf_counter.c b/arch/x86/kernel/cpu/perf_counter.c index 54b4ad0cce6..718b635dece 100644 --- a/arch/x86/kernel/cpu/perf_counter.c +++ b/arch/x86/kernel/cpu/perf_counter.c @@ -56,7 +56,7 @@ const int max_intel_perfmon_events = ARRAY_SIZE(intel_perfmon_event_map); /* * Setup the hardware configuration for a given hw_event_type */ -int hw_perf_counter_init(struct perf_counter *counter) +static int __hw_perf_counter_init(struct perf_counter *counter) { struct perf_counter_hw_event *hw_event = &counter->hw_event; struct hw_perf_counter *hwc = &counter->hw; @@ -135,7 +135,7 @@ u64 hw_perf_disable_all(void) EXPORT_SYMBOL_GPL(hw_perf_disable_all); static inline void -__hw_perf_counter_disable(struct hw_perf_counter *hwc, unsigned int idx) +__x86_perf_counter_disable(struct hw_perf_counter *hwc, unsigned int idx) { wrmsr(hwc->config_base + idx, hwc->config, 0); } @@ -149,13 +149,13 @@ static void __hw_perf_counter_set_period(struct hw_perf_counter *hwc, int idx) wrmsr(hwc->counter_base + idx, hwc->next_count, 0); } -static void __hw_perf_counter_enable(struct hw_perf_counter *hwc, int idx) +static void __x86_perf_counter_enable(struct hw_perf_counter *hwc, int idx) { wrmsr(hwc->config_base + idx, hwc->config | ARCH_PERFMON_EVENTSEL0_ENABLE, 0); } -void hw_perf_counter_enable(struct perf_counter *counter) +static void x86_perf_counter_enable(struct perf_counter *counter) { struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters); struct hw_perf_counter *hwc = &counter->hw; @@ -170,12 +170,12 @@ void hw_perf_counter_enable(struct perf_counter *counter) perf_counters_lapic_init(hwc->nmi); - __hw_perf_counter_disable(hwc, idx); + __x86_perf_counter_disable(hwc, idx); cpuc->counters[idx] = counter; __hw_perf_counter_set_period(hwc, idx); - __hw_perf_counter_enable(hwc, idx); + __x86_perf_counter_enable(hwc, idx); } #ifdef CONFIG_X86_64 @@ -282,20 +282,20 @@ void perf_counter_print_debug(void) local_irq_enable(); } -void hw_perf_counter_disable(struct perf_counter *counter) +static void x86_perf_counter_disable(struct perf_counter *counter) { struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters); struct hw_perf_counter *hwc = &counter->hw; unsigned int idx = hwc->idx; - __hw_perf_counter_disable(hwc, idx); + __x86_perf_counter_disable(hwc, idx); clear_bit(idx, cpuc->used); cpuc->counters[idx] = NULL; __hw_perf_save_counter(counter, hwc, idx); } -void hw_perf_counter_read(struct perf_counter *counter) +static void x86_perf_counter_read(struct perf_counter *counter) { struct hw_perf_counter *hwc = &counter->hw; unsigned long addr = hwc->counter_base + hwc->idx; @@ -342,7 +342,7 @@ static void perf_save_and_restart(struct perf_counter *counter) __hw_perf_counter_set_period(hwc, idx); if (pmc_ctrl & ARCH_PERFMON_EVENTSEL0_ENABLE) - __hw_perf_counter_enable(hwc, idx); + __x86_perf_counter_enable(hwc, idx); } static void @@ -572,3 +572,20 @@ void __init init_hw_perf_counters(void) perf_counters_initialized = true; } + +static struct hw_perf_counter_ops x86_perf_counter_ops = { + .hw_perf_counter_enable = x86_perf_counter_enable, + .hw_perf_counter_disable = x86_perf_counter_disable, + .hw_perf_counter_read = x86_perf_counter_read, +}; + +struct hw_perf_counter_ops *hw_perf_counter_init(struct perf_counter *counter) +{ + int err; + + err = __hw_perf_counter_init(counter); + if (err) + return NULL; + + return &x86_perf_counter_ops; +} diff --git a/include/linux/perf_counter.h b/include/linux/perf_counter.h index 7af7d896546..27385641ecb 100644 --- a/include/linux/perf_counter.h +++ b/include/linux/perf_counter.h @@ -113,6 +113,17 @@ struct perf_data { u8 data[PERF_DATA_BUFLEN]; }; +struct perf_counter; + +/** + * struct hw_perf_counter_ops - performance counter hw ops + */ +struct hw_perf_counter_ops { + void (*hw_perf_counter_enable) (struct perf_counter *counter); + void (*hw_perf_counter_disable) (struct perf_counter *counter); + void (*hw_perf_counter_read) (struct perf_counter *counter); +}; + /** * struct perf_counter - performance counter kernel representation: */ @@ -120,6 +131,7 @@ struct perf_counter { struct list_head list_entry; struct list_head sibling_list; struct perf_counter *group_leader; + struct hw_perf_counter_ops *hw_ops; int active; #if BITS_PER_LONG == 64 @@ -185,6 +197,9 @@ struct perf_cpu_context { extern int perf_max_counters; #ifdef CONFIG_PERF_COUNTERS +extern struct hw_perf_counter_ops * +hw_perf_counter_init(struct perf_counter *counter); + extern void perf_counter_task_sched_in(struct task_struct *task, int cpu); extern void perf_counter_task_sched_out(struct task_struct *task, int cpu); extern void perf_counter_task_tick(struct task_struct *task, int cpu); diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c index 278209c547a..e6e41ca9546 100644 --- a/kernel/perf_counter.c +++ b/kernel/perf_counter.c @@ -37,18 +37,15 @@ static DEFINE_MUTEX(perf_resource_mutex); /* * Architecture provided APIs - weak aliases: */ - -int __weak hw_perf_counter_init(struct perf_counter *counter) +extern __weak struct hw_perf_counter_ops * +hw_perf_counter_init(struct perf_counter *counter) { - return -EINVAL; + return ERR_PTR(-EINVAL); } -void __weak hw_perf_counter_enable(struct perf_counter *counter) { } -void __weak hw_perf_counter_disable(struct perf_counter *counter) { } -void __weak hw_perf_counter_read(struct perf_counter *counter) { } -void __weak hw_perf_disable_all(void) { } -void __weak hw_perf_enable_all(void) { } -void __weak hw_perf_counter_setup(void) { } +void __weak hw_perf_disable_all(void) { } +void __weak hw_perf_enable_all(void) { } +void __weak hw_perf_counter_setup(void) { } #if BITS_PER_LONG == 64 @@ -146,7 +143,7 @@ static void __perf_counter_remove_from_context(void *info) spin_lock(&ctx->lock); if (counter->active) { - hw_perf_counter_disable(counter); + counter->hw_ops->hw_perf_counter_disable(counter); counter->active = 0; ctx->nr_active--; cpuctx->active_oncpu--; @@ -257,7 +254,7 @@ static void __perf_install_in_context(void *info) ctx->nr_counters++; if (cpuctx->active_oncpu < perf_max_counters) { - hw_perf_counter_enable(counter); + counter->hw_ops->hw_perf_counter_enable(counter); counter->active = 1; counter->oncpu = cpu; ctx->nr_active++; @@ -333,7 +330,7 @@ counter_sched_out(struct perf_counter *counter, if (!counter->active) return; - hw_perf_counter_disable(counter); + counter->hw_ops->hw_perf_counter_disable(counter); counter->active = 0; counter->oncpu = -1; @@ -392,7 +389,7 @@ counter_sched_in(struct perf_counter *counter, struct perf_counter_context *ctx, int cpu) { - hw_perf_counter_enable(counter); + counter->hw_ops->hw_perf_counter_enable(counter); counter->active = 1; counter->oncpu = cpu; /* TODO: put 'cpu' into cpuctx->cpu */ @@ -509,7 +506,9 @@ void perf_counter_init_task(struct task_struct *task) */ static void __hw_perf_counter_read(void *info) { - hw_perf_counter_read(info); + struct perf_counter *counter = info; + + counter->hw_ops->hw_perf_counter_read(counter); } static u64 perf_counter_read(struct perf_counter *counter) @@ -816,8 +815,10 @@ perf_counter_alloc(struct perf_counter_hw_event *hw_event, int cpu, struct perf_counter *group_leader) { - struct perf_counter *counter = kzalloc(sizeof(*counter), GFP_KERNEL); + struct hw_perf_counter_ops *hw_ops; + struct perf_counter *counter; + counter = kzalloc(sizeof(*counter), GFP_KERNEL); if (!counter) return NULL; @@ -839,6 +840,14 @@ perf_counter_alloc(struct perf_counter_hw_event *hw_event, counter->hw_event = *hw_event; counter->wakeup_pending = 0; counter->group_leader = group_leader; + counter->hw_ops = NULL; + + hw_ops = hw_perf_counter_init(counter); + if (!hw_ops) { + kfree(counter); + return NULL; + } + counter->hw_ops = hw_ops; return counter; } @@ -908,10 +917,6 @@ asmlinkage int sys_perf_counter_open( if (!counter) goto err_put_context; - ret = hw_perf_counter_init(counter); - if (ret) - goto err_free_put_context; - perf_install_in_context(ctx, counter, cpu); ret = anon_inode_getfd("[perf_counter]", &perf_fops, counter, 0); @@ -927,8 +932,6 @@ err_remove_free_put_context: mutex_lock(&counter->mutex); perf_counter_remove_from_context(counter); mutex_unlock(&counter->mutex); - -err_free_put_context: kfree(counter); err_put_context: -- cgit v1.2.3 From 5c92d12411dfe5f0f3d1b1c1e2f756245e6f7249 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Thu, 11 Dec 2008 13:21:10 +0100 Subject: perf counters: implement PERF_COUNT_CPU_CLOCK Impact: add new perf-counter type The 'CPU clock' counter counts the amount of CPU clock time that is elapsing, in nanoseconds. (regardless of how much of it the task is spending on a CPU executing) This counter type is a Linux kernel based abstraction, it is available even if the hardware does not support native hardware performance counters. Signed-off-by: Ingo Molnar --- arch/x86/kernel/cpu/perf_counter.c | 36 ++------------- include/linux/perf_counter.h | 9 ++-- kernel/perf_counter.c | 95 ++++++++++++++++++++++++++++++++------ 3 files changed, 92 insertions(+), 48 deletions(-) diff --git a/arch/x86/kernel/cpu/perf_counter.c b/arch/x86/kernel/cpu/perf_counter.c index 718b635dece..43c8e9a38b4 100644 --- a/arch/x86/kernel/cpu/perf_counter.c +++ b/arch/x86/kernel/cpu/perf_counter.c @@ -178,35 +178,6 @@ static void x86_perf_counter_enable(struct perf_counter *counter) __x86_perf_counter_enable(hwc, idx); } -#ifdef CONFIG_X86_64 -static inline void atomic64_counter_set(struct perf_counter *counter, u64 val) -{ - atomic64_set(&counter->count, val); -} - -static inline u64 atomic64_counter_read(struct perf_counter *counter) -{ - return atomic64_read(&counter->count); -} -#else -/* - * Todo: add proper atomic64_t support to 32-bit x86: - */ -static inline void atomic64_counter_set(struct perf_counter *counter, u64 val64) -{ - u32 *val32 = (void *)&val64; - - atomic_set(counter->count32 + 0, *(val32 + 0)); - atomic_set(counter->count32 + 1, *(val32 + 1)); -} - -static inline u64 atomic64_counter_read(struct perf_counter *counter) -{ - return atomic_read(counter->count32 + 0) | - (u64) atomic_read(counter->count32 + 1) << 32; -} -#endif - static void __hw_perf_save_counter(struct perf_counter *counter, struct hw_perf_counter *hwc, int idx) { @@ -309,7 +280,7 @@ static void x86_perf_counter_read(struct perf_counter *counter) } while (offs != hwc->prev_count); val32 = (s32) val; - val = (s64)hwc->irq_period + (s64)val32; + val = (s64)hwc->irq_period + (s64)val32; atomic64_counter_set(counter, hwc->prev_count + val); } @@ -573,13 +544,14 @@ void __init init_hw_perf_counters(void) perf_counters_initialized = true; } -static struct hw_perf_counter_ops x86_perf_counter_ops = { +static const struct hw_perf_counter_ops x86_perf_counter_ops = { .hw_perf_counter_enable = x86_perf_counter_enable, .hw_perf_counter_disable = x86_perf_counter_disable, .hw_perf_counter_read = x86_perf_counter_read, }; -struct hw_perf_counter_ops *hw_perf_counter_init(struct perf_counter *counter) +const struct hw_perf_counter_ops * +hw_perf_counter_init(struct perf_counter *counter) { int err; diff --git a/include/linux/perf_counter.h b/include/linux/perf_counter.h index 27385641ecb..9a1713a1be2 100644 --- a/include/linux/perf_counter.h +++ b/include/linux/perf_counter.h @@ -131,7 +131,7 @@ struct perf_counter { struct list_head list_entry; struct list_head sibling_list; struct perf_counter *group_leader; - struct hw_perf_counter_ops *hw_ops; + const struct hw_perf_counter_ops *hw_ops; int active; #if BITS_PER_LONG == 64 @@ -197,7 +197,7 @@ struct perf_cpu_context { extern int perf_max_counters; #ifdef CONFIG_PERF_COUNTERS -extern struct hw_perf_counter_ops * +extern const struct hw_perf_counter_ops * hw_perf_counter_init(struct perf_counter *counter); extern void perf_counter_task_sched_in(struct task_struct *task, int cpu); @@ -208,6 +208,9 @@ extern void perf_counter_notify(struct pt_regs *regs); extern void perf_counter_print_debug(void); extern void hw_perf_restore_ctrl(u64 ctrl); extern u64 hw_perf_disable_all(void); +extern void atomic64_counter_set(struct perf_counter *counter, u64 val64); +extern u64 atomic64_counter_read(struct perf_counter *counter); + #else static inline void perf_counter_task_sched_in(struct task_struct *task, int cpu) { } @@ -219,7 +222,7 @@ static inline void perf_counter_init_task(struct task_struct *task) { } static inline void perf_counter_notify(struct pt_regs *regs) { } static inline void perf_counter_print_debug(void) { } static inline void hw_perf_restore_ctrl(u64 ctrl) { } -static inline u64 hw_perf_disable_all(void) { return 0; } +static inline u64 hw_perf_disable_all(void) { return 0; } #endif #endif /* _LINUX_PERF_COUNTER_H */ diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c index e6e41ca9546..506286e5ba6 100644 --- a/kernel/perf_counter.c +++ b/kernel/perf_counter.c @@ -37,15 +37,15 @@ static DEFINE_MUTEX(perf_resource_mutex); /* * Architecture provided APIs - weak aliases: */ -extern __weak struct hw_perf_counter_ops * +extern __weak const struct hw_perf_counter_ops * hw_perf_counter_init(struct perf_counter *counter) { return ERR_PTR(-EINVAL); } -void __weak hw_perf_disable_all(void) { } -void __weak hw_perf_enable_all(void) { } -void __weak hw_perf_counter_setup(void) { } +u64 __weak hw_perf_disable_all(void) { return 0; } +void __weak hw_perf_restore_ctrl(u64 ctrl) { } +void __weak hw_perf_counter_setup(void) { } #if BITS_PER_LONG == 64 @@ -58,6 +58,16 @@ static inline u64 perf_counter_read_safe(struct perf_counter *counter) return (u64) atomic64_read(&counter->count); } +void atomic64_counter_set(struct perf_counter *counter, u64 val) +{ + atomic64_set(&counter->count, val); +} + +u64 atomic64_counter_read(struct perf_counter *counter) +{ + return atomic64_read(&counter->count); +} + #else /* @@ -79,6 +89,20 @@ static u64 perf_counter_read_safe(struct perf_counter *counter) return cntl | ((u64) cnth) << 32; } +void atomic64_counter_set(struct perf_counter *counter, u64 val64) +{ + u32 *val32 = (void *)&val64; + + atomic_set(counter->count32 + 0, *(val32 + 0)); + atomic_set(counter->count32 + 1, *(val32 + 1)); +} + +u64 atomic64_counter_read(struct perf_counter *counter) +{ + return atomic_read(counter->count32 + 0) | + (u64) atomic_read(counter->count32 + 1) << 32; +} + #endif static void @@ -131,6 +155,7 @@ static void __perf_counter_remove_from_context(void *info) struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context); struct perf_counter *counter = info; struct perf_counter_context *ctx = counter->ctx; + u64 perf_flags; /* * If this is a task context, we need to check whether it is @@ -155,9 +180,9 @@ static void __perf_counter_remove_from_context(void *info) * Protect the list operation against NMI by disabling the * counters on a global level. NOP for non NMI based counters. */ - hw_perf_disable_all(); + perf_flags = hw_perf_disable_all(); list_del_counter(counter, ctx); - hw_perf_enable_all(); + hw_perf_restore_ctrl(perf_flags); if (!ctx->task) { /* @@ -232,6 +257,7 @@ static void __perf_install_in_context(void *info) struct perf_counter *counter = info; struct perf_counter_context *ctx = counter->ctx; int cpu = smp_processor_id(); + u64 perf_flags; /* * If this is a task context, we need to check whether it is @@ -247,9 +273,9 @@ static void __perf_install_in_context(void *info) * Protect the list operation against NMI by disabling the * counters on a global level. NOP for non NMI based counters. */ - hw_perf_disable_all(); + perf_flags = hw_perf_disable_all(); list_add_counter(counter, ctx); - hw_perf_enable_all(); + hw_perf_restore_ctrl(perf_flags); ctx->nr_counters++; @@ -457,6 +483,7 @@ void perf_counter_task_tick(struct task_struct *curr, int cpu) { struct perf_counter_context *ctx = &curr->perf_counter_ctx; struct perf_counter *counter; + u64 perf_flags; if (likely(!ctx->nr_counters)) return; @@ -468,13 +495,13 @@ void perf_counter_task_tick(struct task_struct *curr, int cpu) /* * Rotate the first entry last (works just fine for group counters too): */ - hw_perf_disable_all(); + perf_flags = hw_perf_disable_all(); list_for_each_entry(counter, &ctx->counter_list, list_entry) { list_del(&counter->list_entry); list_add_tail(&counter->list_entry, &ctx->counter_list); break; } - hw_perf_enable_all(); + hw_perf_restore_ctrl(perf_flags); spin_unlock(&ctx->lock); @@ -807,6 +834,42 @@ static const struct file_operations perf_fops = { .poll = perf_poll, }; +static void cpu_clock_perf_counter_enable(struct perf_counter *counter) +{ +} + +static void cpu_clock_perf_counter_disable(struct perf_counter *counter) +{ +} + +static void cpu_clock_perf_counter_read(struct perf_counter *counter) +{ + int cpu = raw_smp_processor_id(); + + atomic64_counter_set(counter, cpu_clock(cpu)); +} + +static const struct hw_perf_counter_ops perf_ops_cpu_clock = { + .hw_perf_counter_enable = cpu_clock_perf_counter_enable, + .hw_perf_counter_disable = cpu_clock_perf_counter_disable, + .hw_perf_counter_read = cpu_clock_perf_counter_read, +}; + +static const struct hw_perf_counter_ops * +sw_perf_counter_init(struct perf_counter *counter) +{ + const struct hw_perf_counter_ops *hw_ops = NULL; + + switch (counter->hw_event.type) { + case PERF_COUNT_CPU_CLOCK: + hw_ops = &perf_ops_cpu_clock; + break; + default: + break; + } + return hw_ops; +} + /* * Allocate and initialize a counter structure */ @@ -815,7 +878,7 @@ perf_counter_alloc(struct perf_counter_hw_event *hw_event, int cpu, struct perf_counter *group_leader) { - struct hw_perf_counter_ops *hw_ops; + const struct hw_perf_counter_ops *hw_ops; struct perf_counter *counter; counter = kzalloc(sizeof(*counter), GFP_KERNEL); @@ -842,7 +905,13 @@ perf_counter_alloc(struct perf_counter_hw_event *hw_event, counter->group_leader = group_leader; counter->hw_ops = NULL; - hw_ops = hw_perf_counter_init(counter); + hw_ops = NULL; + if (!hw_event->raw && hw_event->type < 0) + hw_ops = sw_perf_counter_init(counter); + if (!hw_ops) { + hw_ops = hw_perf_counter_init(counter); + } + if (!hw_ops) { kfree(counter); return NULL; @@ -912,7 +981,7 @@ asmlinkage int sys_perf_counter_open( goto err_put_context; } - ret = -ENOMEM; + ret = -EINVAL; counter = perf_counter_alloc(&hw_event, cpu, group_leader); if (!counter) goto err_put_context; -- cgit v1.2.3 From 01b2838c4298c5e0d30b4993c195ac34dd9df61e Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Thu, 11 Dec 2008 13:45:51 +0100 Subject: perf counters: consolidate hw_perf save/restore APIs Impact: cleanup Rename them to better match up the usual IRQ disable/enable APIs: hw_perf_disable_all() => hw_perf_save_disable() hw_perf_restore_ctrl() => hw_perf_restore() Signed-off-by: Ingo Molnar --- arch/x86/kernel/cpu/perf_counter.c | 8 ++++---- drivers/acpi/processor_idle.c | 10 +++++----- include/linux/perf_counter.h | 10 +++++----- kernel/perf_counter.c | 16 ++++++++-------- 4 files changed, 22 insertions(+), 22 deletions(-) diff --git a/arch/x86/kernel/cpu/perf_counter.c b/arch/x86/kernel/cpu/perf_counter.c index 43c8e9a38b4..3e1dbebe22b 100644 --- a/arch/x86/kernel/cpu/perf_counter.c +++ b/arch/x86/kernel/cpu/perf_counter.c @@ -118,13 +118,13 @@ void hw_perf_enable_all(void) wrmsr(MSR_CORE_PERF_GLOBAL_CTRL, perf_counter_mask, 0); } -void hw_perf_restore_ctrl(u64 ctrl) +void hw_perf_restore(u64 ctrl) { wrmsr(MSR_CORE_PERF_GLOBAL_CTRL, ctrl, 0); } -EXPORT_SYMBOL_GPL(hw_perf_restore_ctrl); +EXPORT_SYMBOL_GPL(hw_perf_restore); -u64 hw_perf_disable_all(void) +u64 hw_perf_save_disable(void) { u64 ctrl; @@ -132,7 +132,7 @@ u64 hw_perf_disable_all(void) wrmsr(MSR_CORE_PERF_GLOBAL_CTRL, 0, 0); return ctrl; } -EXPORT_SYMBOL_GPL(hw_perf_disable_all); +EXPORT_SYMBOL_GPL(hw_perf_save_disable); static inline void __x86_perf_counter_disable(struct hw_perf_counter *hwc, unsigned int idx) diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c index cca804e6f1d..a3e66a33b7a 100644 --- a/drivers/acpi/processor_idle.c +++ b/drivers/acpi/processor_idle.c @@ -270,11 +270,11 @@ static atomic_t c3_cpu_count; /* Common C-state entry for C2, C3, .. */ static void acpi_cstate_enter(struct acpi_processor_cx *cstate) { - u64 pctrl; + u64 perf_flags; /* Don't trace irqs off for idle */ stop_critical_timings(); - pctrl = hw_perf_disable_all(); + perf_flags = hw_perf_save_disable(); if (cstate->entry_method == ACPI_CSTATE_FFH) { /* Call into architectural FFH based C-state */ acpi_processor_ffh_cstate_enter(cstate); @@ -287,7 +287,7 @@ static void acpi_cstate_enter(struct acpi_processor_cx *cstate) gets asserted in time to freeze execution properly. */ unused = inl(acpi_gbl_FADT.xpm_timer_block.address); } - hw_perf_restore_ctrl(pctrl); + hw_perf_restore(perf_flags); start_critical_timings(); } #endif /* !CONFIG_CPU_IDLE */ @@ -1433,7 +1433,7 @@ static inline void acpi_idle_do_entry(struct acpi_processor_cx *cx) /* Don't trace irqs off for idle */ stop_critical_timings(); - pctrl = hw_perf_disable_all(); + pctrl = hw_perf_save_disable(); if (cx->entry_method == ACPI_CSTATE_FFH) { /* Call into architectural FFH based C-state */ acpi_processor_ffh_cstate_enter(cx); @@ -1448,7 +1448,7 @@ static inline void acpi_idle_do_entry(struct acpi_processor_cx *cx) gets asserted in time to freeze execution properly. */ unused = inl(acpi_gbl_FADT.xpm_timer_block.address); } - hw_perf_restore_ctrl(pctrl); + hw_perf_restore(pctrl); start_critical_timings(); } diff --git a/include/linux/perf_counter.h b/include/linux/perf_counter.h index 9a1713a1be2..68f6e3ad531 100644 --- a/include/linux/perf_counter.h +++ b/include/linux/perf_counter.h @@ -67,7 +67,7 @@ enum perf_counter_record_type { * Hardware event to monitor via a performance monitoring counter: */ struct perf_counter_hw_event { - u64 type; + s64 type; u64 irq_period; u32 record_type; @@ -206,8 +206,8 @@ extern void perf_counter_task_tick(struct task_struct *task, int cpu); extern void perf_counter_init_task(struct task_struct *task); extern void perf_counter_notify(struct pt_regs *regs); extern void perf_counter_print_debug(void); -extern void hw_perf_restore_ctrl(u64 ctrl); -extern u64 hw_perf_disable_all(void); +extern u64 hw_perf_save_disable(void); +extern void hw_perf_restore(u64 ctrl); extern void atomic64_counter_set(struct perf_counter *counter, u64 val64); extern u64 atomic64_counter_read(struct perf_counter *counter); @@ -221,8 +221,8 @@ perf_counter_task_tick(struct task_struct *task, int cpu) { } static inline void perf_counter_init_task(struct task_struct *task) { } static inline void perf_counter_notify(struct pt_regs *regs) { } static inline void perf_counter_print_debug(void) { } -static inline void hw_perf_restore_ctrl(u64 ctrl) { } -static inline u64 hw_perf_disable_all(void) { return 0; } +static inline void hw_perf_restore(u64 ctrl) { } +static inline u64 hw_perf_save_disable(void) { return 0; } #endif #endif /* _LINUX_PERF_COUNTER_H */ diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c index 506286e5ba6..0e93fea1712 100644 --- a/kernel/perf_counter.c +++ b/kernel/perf_counter.c @@ -43,8 +43,8 @@ hw_perf_counter_init(struct perf_counter *counter) return ERR_PTR(-EINVAL); } -u64 __weak hw_perf_disable_all(void) { return 0; } -void __weak hw_perf_restore_ctrl(u64 ctrl) { } +u64 __weak hw_perf_save_disable(void) { return 0; } +void __weak hw_perf_restore(u64 ctrl) { } void __weak hw_perf_counter_setup(void) { } #if BITS_PER_LONG == 64 @@ -180,9 +180,9 @@ static void __perf_counter_remove_from_context(void *info) * Protect the list operation against NMI by disabling the * counters on a global level. NOP for non NMI based counters. */ - perf_flags = hw_perf_disable_all(); + perf_flags = hw_perf_save_disable(); list_del_counter(counter, ctx); - hw_perf_restore_ctrl(perf_flags); + hw_perf_restore(perf_flags); if (!ctx->task) { /* @@ -273,9 +273,9 @@ static void __perf_install_in_context(void *info) * Protect the list operation against NMI by disabling the * counters on a global level. NOP for non NMI based counters. */ - perf_flags = hw_perf_disable_all(); + perf_flags = hw_perf_save_disable(); list_add_counter(counter, ctx); - hw_perf_restore_ctrl(perf_flags); + hw_perf_restore(perf_flags); ctx->nr_counters++; @@ -495,13 +495,13 @@ void perf_counter_task_tick(struct task_struct *curr, int cpu) /* * Rotate the first entry last (works just fine for group counters too): */ - perf_flags = hw_perf_disable_all(); + perf_flags = hw_perf_save_disable(); list_for_each_entry(counter, &ctx->counter_list, list_entry) { list_del(&counter->list_entry); list_add_tail(&counter->list_entry, &ctx->counter_list); break; } - hw_perf_restore_ctrl(perf_flags); + hw_perf_restore(perf_flags); spin_unlock(&ctx->lock); -- cgit v1.2.3 From bae43c9945ebeef15e7952e317efb02393d3bfc7 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Thu, 11 Dec 2008 14:03:20 +0100 Subject: perf counters: implement PERF_COUNT_TASK_CLOCK Impact: add new perf-counter type The 'task clock' counter counts the amount of time a task is executing, in nanoseconds. It stops ticking when a task is scheduled out either due to it blocking, sleeping or it being preempted. This counter type is a Linux kernel based abstraction, it is available even if the hardware does not support native hardware performance counters. Signed-off-by: Ingo Molnar --- include/linux/perf_counter.h | 7 +++++-- kernel/perf_counter.c | 22 ++++++++++++++++++++++ 2 files changed, 27 insertions(+), 2 deletions(-) diff --git a/include/linux/perf_counter.h b/include/linux/perf_counter.h index 68f6e3ad531..30c0ec8c1ee 100644 --- a/include/linux/perf_counter.h +++ b/include/linux/perf_counter.h @@ -50,8 +50,11 @@ enum hw_event_types { */ PERF_COUNT_CPU_CLOCK = -1, PERF_COUNT_TASK_CLOCK = -2, - PERF_COUNT_PAGE_FAULTS = -3, - PERF_COUNT_CONTEXT_SWITCHES = -4, + /* + * Future software events: + */ + /* PERF_COUNT_PAGE_FAULTS = -3, + PERF_COUNT_CONTEXT_SWITCHES = -4, */ }; /* diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c index 0e93fea1712..a0fe8474ee2 100644 --- a/kernel/perf_counter.c +++ b/kernel/perf_counter.c @@ -855,6 +855,25 @@ static const struct hw_perf_counter_ops perf_ops_cpu_clock = { .hw_perf_counter_read = cpu_clock_perf_counter_read, }; +static void task_clock_perf_counter_enable(struct perf_counter *counter) +{ +} + +static void task_clock_perf_counter_disable(struct perf_counter *counter) +{ +} + +static void task_clock_perf_counter_read(struct perf_counter *counter) +{ + atomic64_counter_set(counter, current->se.sum_exec_runtime); +} + +static const struct hw_perf_counter_ops perf_ops_task_clock = { + .hw_perf_counter_enable = task_clock_perf_counter_enable, + .hw_perf_counter_disable = task_clock_perf_counter_disable, + .hw_perf_counter_read = task_clock_perf_counter_read, +}; + static const struct hw_perf_counter_ops * sw_perf_counter_init(struct perf_counter *counter) { @@ -864,6 +883,9 @@ sw_perf_counter_init(struct perf_counter *counter) case PERF_COUNT_CPU_CLOCK: hw_ops = &perf_ops_cpu_clock; break; + case PERF_COUNT_TASK_CLOCK: + hw_ops = &perf_ops_task_clock; + break; default: break; } -- cgit v1.2.3 From 1d1c7ddbfab358445a542715551301b7fc363e28 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Thu, 11 Dec 2008 14:59:31 +0100 Subject: perf counters: add prctl interface to disable/enable counters Add a way for self-monitoring tasks to disable/enable counters summarily, via a prctl: PR_TASK_PERF_COUNTERS_DISABLE 31 PR_TASK_PERF_COUNTERS_ENABLE 32 Signed-off-by: Ingo Molnar --- include/linux/perf_counter.h | 4 +++ include/linux/prctl.h | 3 ++ kernel/perf_counter.c | 86 ++++++++++++++++++++++++++++++++++++++++---- kernel/sys.c | 7 ++++ 4 files changed, 93 insertions(+), 7 deletions(-) diff --git a/include/linux/perf_counter.h b/include/linux/perf_counter.h index 30c0ec8c1ee..97d86c293ee 100644 --- a/include/linux/perf_counter.h +++ b/include/linux/perf_counter.h @@ -213,6 +213,8 @@ extern u64 hw_perf_save_disable(void); extern void hw_perf_restore(u64 ctrl); extern void atomic64_counter_set(struct perf_counter *counter, u64 val64); extern u64 atomic64_counter_read(struct perf_counter *counter); +extern int perf_counter_task_disable(void); +extern int perf_counter_task_enable(void); #else static inline void @@ -226,6 +228,8 @@ static inline void perf_counter_notify(struct pt_regs *regs) { } static inline void perf_counter_print_debug(void) { } static inline void hw_perf_restore(u64 ctrl) { } static inline u64 hw_perf_save_disable(void) { return 0; } +static inline int perf_counter_task_disable(void) { return -EINVAL; } +static inline int perf_counter_task_enable(void) { return -EINVAL; } #endif #endif /* _LINUX_PERF_COUNTER_H */ diff --git a/include/linux/prctl.h b/include/linux/prctl.h index 48d887e3c6e..b00df4c79c6 100644 --- a/include/linux/prctl.h +++ b/include/linux/prctl.h @@ -85,4 +85,7 @@ #define PR_SET_TIMERSLACK 29 #define PR_GET_TIMERSLACK 30 +#define PR_TASK_PERF_COUNTERS_DISABLE 31 +#define PR_TASK_PERF_COUNTERS_ENABLE 32 + #endif /* _LINUX_PRCTL_H */ diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c index a0fe8474ee2..4e679b91d8b 100644 --- a/kernel/perf_counter.c +++ b/kernel/perf_counter.c @@ -415,6 +415,9 @@ counter_sched_in(struct perf_counter *counter, struct perf_counter_context *ctx, int cpu) { + if (counter->active == -1) + return; + counter->hw_ops->hw_perf_counter_enable(counter); counter->active = 1; counter->oncpu = cpu; /* TODO: put 'cpu' into cpuctx->cpu */ @@ -479,6 +482,79 @@ void perf_counter_task_sched_in(struct task_struct *task, int cpu) cpuctx->task_ctx = ctx; } +int perf_counter_task_disable(void) +{ + struct task_struct *curr = current; + struct perf_counter_context *ctx = &curr->perf_counter_ctx; + struct perf_counter *counter; + u64 perf_flags; + int cpu; + + if (likely(!ctx->nr_counters)) + return 0; + + local_irq_disable(); + cpu = smp_processor_id(); + + perf_counter_task_sched_out(curr, cpu); + + spin_lock(&ctx->lock); + + /* + * Disable all the counters: + */ + perf_flags = hw_perf_save_disable(); + + list_for_each_entry(counter, &ctx->counter_list, list_entry) { + WARN_ON_ONCE(counter->active == 1); + counter->active = -1; + } + hw_perf_restore(perf_flags); + + spin_unlock(&ctx->lock); + + local_irq_enable(); + + return 0; +} + +int perf_counter_task_enable(void) +{ + struct task_struct *curr = current; + struct perf_counter_context *ctx = &curr->perf_counter_ctx; + struct perf_counter *counter; + u64 perf_flags; + int cpu; + + if (likely(!ctx->nr_counters)) + return 0; + + local_irq_disable(); + cpu = smp_processor_id(); + + spin_lock(&ctx->lock); + + /* + * Disable all the counters: + */ + perf_flags = hw_perf_save_disable(); + + list_for_each_entry(counter, &ctx->counter_list, list_entry) { + if (counter->active != -1) + continue; + counter->active = 0; + } + hw_perf_restore(perf_flags); + + spin_unlock(&ctx->lock); + + perf_counter_task_sched_in(curr, cpu); + + local_irq_enable(); + + return 0; +} + void perf_counter_task_tick(struct task_struct *curr, int cpu) { struct perf_counter_context *ctx = &curr->perf_counter_ctx; @@ -951,13 +1027,9 @@ perf_counter_alloc(struct perf_counter_hw_event *hw_event, * @cpu: target cpu * @group_fd: group leader counter fd */ -asmlinkage int sys_perf_counter_open( - - struct perf_counter_hw_event *hw_event_uptr __user, - pid_t pid, - int cpu, - int group_fd) - +asmlinkage int +sys_perf_counter_open(struct perf_counter_hw_event *hw_event_uptr __user, + pid_t pid, int cpu, int group_fd) { struct perf_counter *counter, *group_leader; struct perf_counter_hw_event hw_event; diff --git a/kernel/sys.c b/kernel/sys.c index 31deba8f7d1..0f66633be31 100644 --- a/kernel/sys.c +++ b/kernel/sys.c @@ -14,6 +14,7 @@ #include #include #include +#include #include #include #include @@ -1716,6 +1717,12 @@ asmlinkage long sys_prctl(int option, unsigned long arg2, unsigned long arg3, case PR_SET_TSC: error = SET_TSC_CTL(arg2); break; + case PR_TASK_PERF_COUNTERS_DISABLE: + error = perf_counter_task_disable(); + break; + case PR_TASK_PERF_COUNTERS_ENABLE: + error = perf_counter_task_enable(); + break; case PR_GET_TIMERSLACK: error = current->timer_slack_ns; break; -- cgit v1.2.3 From 6a930700c8b655a9e25e42fc4adc0b225ebbcefc Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Thu, 11 Dec 2008 15:17:03 +0100 Subject: perf counters: clean up state transitions Impact: cleanup Introduce a proper enum for the 3 states of a counter: PERF_COUNTER_STATE_OFF = -1 PERF_COUNTER_STATE_INACTIVE = 0 PERF_COUNTER_STATE_ACTIVE = 1 and rename counter->active to counter->state and propagate the changes everywhere. Signed-off-by: Ingo Molnar --- arch/x86/kernel/cpu/perf_counter.c | 2 +- include/linux/perf_counter.h | 11 ++++++++++- kernel/perf_counter.c | 29 ++++++++++++++--------------- 3 files changed, 25 insertions(+), 17 deletions(-) diff --git a/arch/x86/kernel/cpu/perf_counter.c b/arch/x86/kernel/cpu/perf_counter.c index 3e1dbebe22b..4854cca7fff 100644 --- a/arch/x86/kernel/cpu/perf_counter.c +++ b/arch/x86/kernel/cpu/perf_counter.c @@ -332,7 +332,7 @@ perf_handle_group(struct perf_counter *sibling, u64 *status, u64 *overflown) * Then store sibling timestamps (if any): */ list_for_each_entry(counter, &group_leader->sibling_list, list_entry) { - if (!counter->active) { + if (counter->state != PERF_COUNTER_STATE_ACTIVE) { /* * When counter was not in the overflow mask, we have to * read it from hardware. We read it as well, when it diff --git a/include/linux/perf_counter.h b/include/linux/perf_counter.h index 97d86c293ee..8cb095fa442 100644 --- a/include/linux/perf_counter.h +++ b/include/linux/perf_counter.h @@ -127,6 +127,15 @@ struct hw_perf_counter_ops { void (*hw_perf_counter_read) (struct perf_counter *counter); }; +/** + * enum perf_counter_active_state - the states of a counter + */ +enum perf_counter_active_state { + PERF_COUNTER_STATE_OFF = -1, + PERF_COUNTER_STATE_INACTIVE = 0, + PERF_COUNTER_STATE_ACTIVE = 1, +}; + /** * struct perf_counter - performance counter kernel representation: */ @@ -136,7 +145,7 @@ struct perf_counter { struct perf_counter *group_leader; const struct hw_perf_counter_ops *hw_ops; - int active; + enum perf_counter_active_state state; #if BITS_PER_LONG == 64 atomic64_t count; #else diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c index 4e679b91d8b..559130b8774 100644 --- a/kernel/perf_counter.c +++ b/kernel/perf_counter.c @@ -167,9 +167,9 @@ static void __perf_counter_remove_from_context(void *info) spin_lock(&ctx->lock); - if (counter->active) { + if (counter->state == PERF_COUNTER_STATE_ACTIVE) { counter->hw_ops->hw_perf_counter_disable(counter); - counter->active = 0; + counter->state = PERF_COUNTER_STATE_INACTIVE; ctx->nr_active--; cpuctx->active_oncpu--; counter->task = NULL; @@ -281,7 +281,7 @@ static void __perf_install_in_context(void *info) if (cpuctx->active_oncpu < perf_max_counters) { counter->hw_ops->hw_perf_counter_enable(counter); - counter->active = 1; + counter->state = PERF_COUNTER_STATE_ACTIVE; counter->oncpu = cpu; ctx->nr_active++; cpuctx->active_oncpu++; @@ -328,7 +328,6 @@ retry: spin_lock_irq(&ctx->lock); /* - * If the context is active and the counter has not been added * we need to retry the smp call. */ if (ctx->nr_active && list_empty(&counter->list_entry)) { @@ -353,12 +352,12 @@ counter_sched_out(struct perf_counter *counter, struct perf_cpu_context *cpuctx, struct perf_counter_context *ctx) { - if (!counter->active) + if (counter->state != PERF_COUNTER_STATE_ACTIVE) return; counter->hw_ops->hw_perf_counter_disable(counter); - counter->active = 0; - counter->oncpu = -1; + counter->state = PERF_COUNTER_STATE_INACTIVE; + counter->oncpu = -1; cpuctx->active_oncpu--; ctx->nr_active--; @@ -415,11 +414,11 @@ counter_sched_in(struct perf_counter *counter, struct perf_counter_context *ctx, int cpu) { - if (counter->active == -1) + if (counter->state == PERF_COUNTER_STATE_OFF) return; counter->hw_ops->hw_perf_counter_enable(counter); - counter->active = 1; + counter->state = PERF_COUNTER_STATE_ACTIVE; counter->oncpu = cpu; /* TODO: put 'cpu' into cpuctx->cpu */ cpuctx->active_oncpu++; @@ -506,8 +505,8 @@ int perf_counter_task_disable(void) perf_flags = hw_perf_save_disable(); list_for_each_entry(counter, &ctx->counter_list, list_entry) { - WARN_ON_ONCE(counter->active == 1); - counter->active = -1; + WARN_ON_ONCE(counter->state == PERF_COUNTER_STATE_ACTIVE); + counter->state = PERF_COUNTER_STATE_OFF; } hw_perf_restore(perf_flags); @@ -540,9 +539,9 @@ int perf_counter_task_enable(void) perf_flags = hw_perf_save_disable(); list_for_each_entry(counter, &ctx->counter_list, list_entry) { - if (counter->active != -1) + if (counter->state != PERF_COUNTER_STATE_OFF) continue; - counter->active = 0; + counter->state = PERF_COUNTER_STATE_INACTIVE; } hw_perf_restore(perf_flags); @@ -620,7 +619,7 @@ static u64 perf_counter_read(struct perf_counter *counter) * If counter is enabled and currently active on a CPU, update the * value in the counter structure: */ - if (counter->active) { + if (counter->state == PERF_COUNTER_STATE_ACTIVE) { smp_call_function_single(counter->oncpu, __hw_perf_counter_read, counter, 1); } @@ -673,7 +672,7 @@ static struct perf_data *perf_switch_irq_data(struct perf_counter *counter) retry: spin_lock_irq(&ctx->lock); - if (!counter->active) { + if (counter->state != PERF_COUNTER_STATE_ACTIVE) { counter->irqdata = counter->usrdata; counter->usrdata = oldirqdata; spin_unlock_irq(&ctx->lock); -- cgit v1.2.3 From 447557ac7ce120306b4a31d6003faef39cb1bf14 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Thu, 11 Dec 2008 20:40:18 +0100 Subject: perf counters: update docs Impact: update docs Signed-off-by: Ingo Molnar --- Documentation/perf-counters.txt | 107 ++++++++++++++++++++++++++++------------ 1 file changed, 75 insertions(+), 32 deletions(-) diff --git a/Documentation/perf-counters.txt b/Documentation/perf-counters.txt index 19033a0bb52..fddd32189a5 100644 --- a/Documentation/perf-counters.txt +++ b/Documentation/perf-counters.txt @@ -10,8 +10,8 @@ trigger interrupts when a threshold number of events have passed - and can thus be used to profile the code that runs on that CPU. The Linux Performance Counter subsystem provides an abstraction of these -hardware capabilities. It provides per task and per CPU counters, and -it provides event capabilities on top of those. +hardware capabilities. It provides per task and per CPU counters, counter +groups, and it provides event capabilities on top of those. Performance counters are accessed via special file descriptors. There's one file descriptor per virtual counter used. @@ -19,12 +19,8 @@ There's one file descriptor per virtual counter used. The special file descriptor is opened via the perf_counter_open() system call: - int - perf_counter_open(u32 hw_event_type, - u32 hw_event_period, - u32 record_type, - pid_t pid, - int cpu); + int sys_perf_counter_open(struct perf_counter_hw_event *hw_event_uptr, + pid_t pid, int cpu, int group_fd); The syscall returns the new fd. The fd can be used via the normal VFS system calls: read() can be used to read the counter, fcntl() @@ -33,39 +29,78 @@ can be used to set the blocking mode, etc. Multiple counters can be kept open at a time, and the counters can be poll()ed. -When creating a new counter fd, 'hw_event_type' is one of: - - enum hw_event_types { - PERF_COUNT_CYCLES, - PERF_COUNT_INSTRUCTIONS, - PERF_COUNT_CACHE_REFERENCES, - PERF_COUNT_CACHE_MISSES, - PERF_COUNT_BRANCH_INSTRUCTIONS, - PERF_COUNT_BRANCH_MISSES, - }; +When creating a new counter fd, 'perf_counter_hw_event' is: + +/* + * Hardware event to monitor via a performance monitoring counter: + */ +struct perf_counter_hw_event { + s64 type; + + u64 irq_period; + u32 record_type; + + u32 disabled : 1, /* off by default */ + nmi : 1, /* NMI sampling */ + raw : 1, /* raw event type */ + __reserved_1 : 29; + + u64 __reserved_2; +}; + +/* + * Generalized performance counter event types, used by the hw_event.type + * parameter of the sys_perf_counter_open() syscall: + */ +enum hw_event_types { + /* + * Common hardware events, generalized by the kernel: + */ + PERF_COUNT_CYCLES = 0, + PERF_COUNT_INSTRUCTIONS = 1, + PERF_COUNT_CACHE_REFERENCES = 2, + PERF_COUNT_CACHE_MISSES = 3, + PERF_COUNT_BRANCH_INSTRUCTIONS = 4, + PERF_COUNT_BRANCH_MISSES = 5, + + /* + * Special "software" counters provided by the kernel, even if + * the hardware does not support performance counters. These + * counters measure various physical and sw events of the + * kernel (and allow the profiling of them as well): + */ + PERF_COUNT_CPU_CLOCK = -1, + PERF_COUNT_TASK_CLOCK = -2, + /* + * Future software events: + */ + /* PERF_COUNT_PAGE_FAULTS = -3, + PERF_COUNT_CONTEXT_SWITCHES = -4, */ +}; These are standardized types of events that work uniformly on all CPUs that implements Performance Counters support under Linux. If a CPU is not able to count branch-misses, then the system call will return -EINVAL. -[ Note: more hw_event_types are supported as well, but they are CPU - specific and are enumerated via /sys on a per CPU basis. Raw hw event - types can be passed in as negative numbers. For example, to count - "External bus cycles while bus lock signal asserted" events on Intel - Core CPUs, pass in a -0x4064 event type value. ] - -The parameter 'hw_event_period' is the number of events before waking up -a read() that is blocked on a counter fd. Zero value means a non-blocking -counter. +More hw_event_types are supported as well, but they are CPU +specific and are enumerated via /sys on a per CPU basis. Raw hw event +types can be passed in under hw_event.type if hw_event.raw is 1. +For example, to count "External bus cycles while bus lock signal asserted" +events on Intel Core CPUs, pass in a 0x4064 event type value and set +hw_event.raw to 1. 'record_type' is the type of data that a read() will provide for the counter, and it can be one of: - enum perf_record_type { - PERF_RECORD_SIMPLE, - PERF_RECORD_IRQ, - }; +/* + * IRQ-notification data record type: + */ +enum perf_counter_record_type { + PERF_RECORD_SIMPLE = 0, + PERF_RECORD_IRQ = 1, + PERF_RECORD_GROUP = 2, +}; a "simple" counter is one that counts hardware events and allows them to be read out into a u64 count value. (read() returns 8 on @@ -76,6 +111,10 @@ the IP of the interrupted context. In this case read() will return the 8-byte counter value, plus the Instruction Pointer address of the interrupted context. +The parameter 'hw_event_period' is the number of events before waking up +a read() that is blocked on a counter fd. Zero value means a non-blocking +counter. + The 'pid' parameter allows the counter to be specific to a task: pid == 0: if the pid parameter is zero, the counter is attached to the @@ -92,7 +131,7 @@ CPU: cpu >= 0: the counter is restricted to a specific CPU cpu == -1: the counter counts on all CPUs -Note: the combination of 'pid == -1' and 'cpu == -1' is not valid. +(Note: the combination of 'pid == -1' and 'cpu == -1' is not valid.) A 'pid > 0' and 'cpu == -1' counter is a per task counter that counts events of that task and 'follows' that task to whatever CPU the task @@ -102,3 +141,7 @@ their own tasks. A 'pid == -1' and 'cpu == x' counter is a per CPU counter that counts all events on CPU-x. Per CPU counters need CAP_SYS_ADMIN privilege. +Group counters are created by passing in a group_fd of another counter. +Groups are scheduled at once and can be used with PERF_RECORD_GROUP +to record multi-dimensional timestamps. + -- cgit v1.2.3 From 9b194e831fb2c322ed81a373e49620f34edc2778 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Sun, 14 Dec 2008 20:22:35 +0100 Subject: x86: implement atomic64_t on 32-bit Impact: new API Implement the atomic64_t APIs on 32-bit as well. Will be used by the performance counters code. Signed-off-by: Ingo Molnar --- arch/x86/include/asm/atomic_32.h | 218 +++++++++++++++++++++++++++++++++++++++ 1 file changed, 218 insertions(+) diff --git a/arch/x86/include/asm/atomic_32.h b/arch/x86/include/asm/atomic_32.h index ad5b9f6ecdd..9927e01b03c 100644 --- a/arch/x86/include/asm/atomic_32.h +++ b/arch/x86/include/asm/atomic_32.h @@ -255,5 +255,223 @@ static inline int atomic_add_unless(atomic_t *v, int a, int u) #define smp_mb__before_atomic_inc() barrier() #define smp_mb__after_atomic_inc() barrier() +/* An 64bit atomic type */ + +typedef struct { + unsigned long long counter; +} atomic64_t; + +#define ATOMIC64_INIT(val) { (val) } + +/** + * atomic64_read - read atomic64 variable + * @v: pointer of type atomic64_t + * + * Atomically reads the value of @v. + * Doesn't imply a read memory barrier. + */ +#define __atomic64_read(ptr) ((ptr)->counter) + +static inline unsigned long long +cmpxchg8b(unsigned long long *ptr, unsigned long long old, unsigned long long new) +{ + asm volatile( + + LOCK_PREFIX "cmpxchg8b (%[ptr])\n" + + : "=A" (old) + + : [ptr] "D" (ptr), + "A" (old), + "b" (ll_low(new)), + "c" (ll_high(new)) + + : "memory"); + + return old; +} + +static inline unsigned long long +atomic64_cmpxchg(atomic64_t *ptr, unsigned long long old_val, + unsigned long long new_val) +{ + return cmpxchg8b(&ptr->counter, old_val, new_val); +} + +/** + * atomic64_set - set atomic64 variable + * @ptr: pointer to type atomic64_t + * @new_val: value to assign + * + * Atomically sets the value of @ptr to @new_val. + */ +static inline void atomic64_set(atomic64_t *ptr, unsigned long long new_val) +{ + unsigned long long old_val; + + do { + old_val = atomic_read(ptr); + } while (atomic64_cmpxchg(ptr, old_val, new_val) != old_val); +} + +/** + * atomic64_read - read atomic64 variable + * @ptr: pointer to type atomic64_t + * + * Atomically reads the value of @ptr and returns it. + */ +static inline unsigned long long atomic64_read(atomic64_t *ptr) +{ + unsigned long long curr_val; + + do { + curr_val = __atomic64_read(ptr); + } while (atomic64_cmpxchg(ptr, curr_val, curr_val) != curr_val); + + return curr_val; +} + +/** + * atomic64_add_return - add and return + * @delta: integer value to add + * @ptr: pointer to type atomic64_t + * + * Atomically adds @delta to @ptr and returns @delta + *@ptr + */ +static inline unsigned long long +atomic64_add_return(unsigned long long delta, atomic64_t *ptr) +{ + unsigned long long old_val, new_val; + + do { + old_val = atomic_read(ptr); + new_val = old_val + delta; + + } while (atomic64_cmpxchg(ptr, old_val, new_val) != old_val); + + return new_val; +} + +static inline long atomic64_sub_return(unsigned long long delta, atomic64_t *ptr) +{ + return atomic64_add_return(-delta, ptr); +} + +static inline long atomic64_inc_return(atomic64_t *ptr) +{ + return atomic64_add_return(1, ptr); +} + +static inline long atomic64_dec_return(atomic64_t *ptr) +{ + return atomic64_sub_return(1, ptr); +} + +/** + * atomic64_add - add integer to atomic64 variable + * @delta: integer value to add + * @ptr: pointer to type atomic64_t + * + * Atomically adds @delta to @ptr. + */ +static inline void atomic64_add(unsigned long long delta, atomic64_t *ptr) +{ + atomic64_add_return(delta, ptr); +} + +/** + * atomic64_sub - subtract the atomic64 variable + * @delta: integer value to subtract + * @ptr: pointer to type atomic64_t + * + * Atomically subtracts @delta from @ptr. + */ +static inline void atomic64_sub(unsigned long long delta, atomic64_t *ptr) +{ + atomic64_add(-delta, ptr); +} + +/** + * atomic64_sub_and_test - subtract value from variable and test result + * @delta: integer value to subtract + * @ptr: pointer to type atomic64_t + * + * Atomically subtracts @delta from @ptr and returns + * true if the result is zero, or false for all + * other cases. + */ +static inline int +atomic64_sub_and_test(unsigned long long delta, atomic64_t *ptr) +{ + unsigned long long old_val = atomic64_sub_return(delta, ptr); + + return old_val == 0; +} + +/** + * atomic64_inc - increment atomic64 variable + * @ptr: pointer to type atomic64_t + * + * Atomically increments @ptr by 1. + */ +static inline void atomic64_inc(atomic64_t *ptr) +{ + atomic64_add(1, ptr); +} + +/** + * atomic64_dec - decrement atomic64 variable + * @ptr: pointer to type atomic64_t + * + * Atomically decrements @ptr by 1. + */ +static inline void atomic64_dec(atomic64_t *ptr) +{ + atomic64_sub(1, ptr); +} + +/** + * atomic64_dec_and_test - decrement and test + * @ptr: pointer to type atomic64_t + * + * Atomically decrements @ptr by 1 and + * returns true if the result is 0, or false for all other + * cases. + */ +static inline int atomic64_dec_and_test(atomic64_t *ptr) +{ + return atomic64_sub_and_test(1, ptr); +} + +/** + * atomic64_inc_and_test - increment and test + * @ptr: pointer to type atomic64_t + * + * Atomically increments @ptr by 1 + * and returns true if the result is zero, or false for all + * other cases. + */ +static inline int atomic64_inc_and_test(atomic64_t *ptr) +{ + return atomic64_sub_and_test(-1, ptr); +} + +/** + * atomic64_add_negative - add and test if negative + * @delta: integer value to add + * @ptr: pointer to type atomic64_t + * + * Atomically adds @delta to @ptr and returns true + * if the result is negative, or false when + * result is greater than or equal to zero. + */ +static inline int +atomic64_add_negative(unsigned long long delta, atomic64_t *ptr) +{ + long long old_val = atomic64_add_return(delta, ptr); + + return old_val < 0; +} + #include #endif /* _ASM_X86_ATOMIC_32_H */ -- cgit v1.2.3 From ee06094f8279e1312fc0a31591320cc7b6f0ab1e Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Sat, 13 Dec 2008 09:00:03 +0100 Subject: perfcounters: restructure x86 counter math Impact: restructure code Change counter math from absolute values to clear delta logic. We try to extract elapsed deltas from the raw hw counter - and put that into the generic counter. Signed-off-by: Ingo Molnar --- arch/x86/Kconfig | 2 +- arch/x86/kernel/cpu/perf_counter.c | 230 ++++++++++++++++++++----------------- include/linux/perf_counter.h | 15 ++- kernel/perf_counter.c | 68 +---------- 4 files changed, 137 insertions(+), 178 deletions(-) diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index f2fdc186724..fe94490bab6 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -643,7 +643,7 @@ config X86_UP_IOAPIC config X86_LOCAL_APIC def_bool y depends on X86_64 || (X86_32 && (X86_UP_APIC || (SMP && !X86_VOYAGER) || X86_GENERICARCH)) - select HAVE_PERF_COUNTERS + select HAVE_PERF_COUNTERS if (!M386 && !M486) config X86_IO_APIC def_bool y diff --git a/arch/x86/kernel/cpu/perf_counter.c b/arch/x86/kernel/cpu/perf_counter.c index b903f8df72b..5afae13d8d5 100644 --- a/arch/x86/kernel/cpu/perf_counter.c +++ b/arch/x86/kernel/cpu/perf_counter.c @@ -53,6 +53,48 @@ const int intel_perfmon_event_map[] = const int max_intel_perfmon_events = ARRAY_SIZE(intel_perfmon_event_map); +/* + * Propagate counter elapsed time into the generic counter. + * Can only be executed on the CPU where the counter is active. + * Returns the delta events processed. + */ +static void +x86_perf_counter_update(struct perf_counter *counter, + struct hw_perf_counter *hwc, int idx) +{ + u64 prev_raw_count, new_raw_count, delta; + + WARN_ON_ONCE(counter->state != PERF_COUNTER_STATE_ACTIVE); + /* + * Careful: an NMI might modify the previous counter value. + * + * Our tactic to handle this is to first atomically read and + * exchange a new raw count - then add that new-prev delta + * count to the generic counter atomically: + */ +again: + prev_raw_count = atomic64_read(&hwc->prev_count); + rdmsrl(hwc->counter_base + idx, new_raw_count); + + if (atomic64_cmpxchg(&hwc->prev_count, prev_raw_count, + new_raw_count) != prev_raw_count) + goto again; + + /* + * Now we have the new raw value and have updated the prev + * timestamp already. We can now calculate the elapsed delta + * (counter-)time and add that to the generic counter. + * + * Careful, not all hw sign-extends above the physical width + * of the count, so we do that by clipping the delta to 32 bits: + */ + delta = (u64)(u32)((s32)new_raw_count - (s32)prev_raw_count); + WARN_ON_ONCE((int)delta < 0); + + atomic64_add(delta, &counter->count); + atomic64_sub(delta, &hwc->period_left); +} + /* * Setup the hardware configuration for a given hw_event_type */ @@ -90,10 +132,10 @@ static int __hw_perf_counter_init(struct perf_counter *counter) * so we install an artificial 1<<31 period regardless of * the generic counter period: */ - if (!hwc->irq_period) + if ((s64)hwc->irq_period <= 0 || hwc->irq_period > 0x7FFFFFFF) hwc->irq_period = 0x7FFFFFFF; - hwc->next_count = -(s32)hwc->irq_period; + atomic64_set(&hwc->period_left, hwc->irq_period); /* * Raw event type provide the config in the event structure @@ -118,12 +160,6 @@ void hw_perf_enable_all(void) wrmsr(MSR_CORE_PERF_GLOBAL_CTRL, perf_counter_mask, 0); } -void hw_perf_restore(u64 ctrl) -{ - wrmsr(MSR_CORE_PERF_GLOBAL_CTRL, ctrl, 0); -} -EXPORT_SYMBOL_GPL(hw_perf_restore); - u64 hw_perf_save_disable(void) { u64 ctrl; @@ -134,27 +170,74 @@ u64 hw_perf_save_disable(void) } EXPORT_SYMBOL_GPL(hw_perf_save_disable); +void hw_perf_restore(u64 ctrl) +{ + wrmsr(MSR_CORE_PERF_GLOBAL_CTRL, ctrl, 0); +} +EXPORT_SYMBOL_GPL(hw_perf_restore); + static inline void -__x86_perf_counter_disable(struct hw_perf_counter *hwc, unsigned int idx) +__x86_perf_counter_disable(struct perf_counter *counter, + struct hw_perf_counter *hwc, unsigned int idx) { - wrmsr(hwc->config_base + idx, hwc->config, 0); + int err; + + err = wrmsr_safe(hwc->config_base + idx, hwc->config, 0); + WARN_ON_ONCE(err); } -static DEFINE_PER_CPU(u64, prev_next_count[MAX_HW_COUNTERS]); +static DEFINE_PER_CPU(u64, prev_left[MAX_HW_COUNTERS]); -static void __hw_perf_counter_set_period(struct hw_perf_counter *hwc, int idx) +/* + * Set the next IRQ period, based on the hwc->period_left value. + * To be called with the counter disabled in hw: + */ +static void +__hw_perf_counter_set_period(struct perf_counter *counter, + struct hw_perf_counter *hwc, int idx) { - per_cpu(prev_next_count[idx], smp_processor_id()) = hwc->next_count; + s32 left = atomic64_read(&hwc->period_left); + s32 period = hwc->irq_period; + + WARN_ON_ONCE(period <= 0); + + /* + * If we are way outside a reasoable range then just skip forward: + */ + if (unlikely(left <= -period)) { + left = period; + atomic64_set(&hwc->period_left, left); + } + + if (unlikely(left <= 0)) { + left += period; + atomic64_set(&hwc->period_left, left); + } - wrmsr(hwc->counter_base + idx, hwc->next_count, 0); + WARN_ON_ONCE(left <= 0); + + per_cpu(prev_left[idx], smp_processor_id()) = left; + + /* + * The hw counter starts counting from this counter offset, + * mark it to be able to extra future deltas: + */ + atomic64_set(&hwc->prev_count, (u64)(s64)-left); + + wrmsr(hwc->counter_base + idx, -left, 0); } -static void __x86_perf_counter_enable(struct hw_perf_counter *hwc, int idx) +static void +__x86_perf_counter_enable(struct perf_counter *counter, + struct hw_perf_counter *hwc, int idx) { wrmsr(hwc->config_base + idx, hwc->config | ARCH_PERFMON_EVENTSEL0_ENABLE, 0); } +/* + * Find a PMC slot for the freshly enabled / scheduled in counter: + */ static void x86_perf_counter_enable(struct perf_counter *counter) { struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters); @@ -170,55 +253,17 @@ static void x86_perf_counter_enable(struct perf_counter *counter) perf_counters_lapic_init(hwc->nmi); - __x86_perf_counter_disable(hwc, idx); + __x86_perf_counter_disable(counter, hwc, idx); cpuc->counters[idx] = counter; - __hw_perf_counter_set_period(hwc, idx); - __x86_perf_counter_enable(hwc, idx); -} - -static void __hw_perf_save_counter(struct perf_counter *counter, - struct hw_perf_counter *hwc, int idx) -{ - s64 raw = -1; - s64 delta; - - /* - * Get the raw hw counter value: - */ - rdmsrl(hwc->counter_base + idx, raw); - - /* - * Rebase it to zero (it started counting at -irq_period), - * to see the delta since ->prev_count: - */ - delta = (s64)hwc->irq_period + (s64)(s32)raw; - - atomic64_counter_set(counter, hwc->prev_count + delta); - - /* - * Adjust the ->prev_count offset - if we went beyond - * irq_period of units, then we got an IRQ and the counter - * was set back to -irq_period: - */ - while (delta >= (s64)hwc->irq_period) { - hwc->prev_count += hwc->irq_period; - delta -= (s64)hwc->irq_period; - } - - /* - * Calculate the next raw counter value we'll write into - * the counter at the next sched-in time: - */ - delta -= (s64)hwc->irq_period; - - hwc->next_count = (s32)delta; + __hw_perf_counter_set_period(counter, hwc, idx); + __x86_perf_counter_enable(counter, hwc, idx); } void perf_counter_print_debug(void) { - u64 ctrl, status, overflow, pmc_ctrl, pmc_count, next_count; + u64 ctrl, status, overflow, pmc_ctrl, pmc_count, prev_left; int cpu, idx; if (!nr_hw_counters) @@ -241,14 +286,14 @@ void perf_counter_print_debug(void) rdmsrl(MSR_ARCH_PERFMON_EVENTSEL0 + idx, pmc_ctrl); rdmsrl(MSR_ARCH_PERFMON_PERFCTR0 + idx, pmc_count); - next_count = per_cpu(prev_next_count[idx], cpu); + prev_left = per_cpu(prev_left[idx], cpu); printk(KERN_INFO "CPU#%d: PMC%d ctrl: %016llx\n", cpu, idx, pmc_ctrl); printk(KERN_INFO "CPU#%d: PMC%d count: %016llx\n", cpu, idx, pmc_count); - printk(KERN_INFO "CPU#%d: PMC%d next: %016llx\n", - cpu, idx, next_count); + printk(KERN_INFO "CPU#%d: PMC%d left: %016llx\n", + cpu, idx, prev_left); } local_irq_enable(); } @@ -259,29 +304,16 @@ static void x86_perf_counter_disable(struct perf_counter *counter) struct hw_perf_counter *hwc = &counter->hw; unsigned int idx = hwc->idx; - __x86_perf_counter_disable(hwc, idx); + __x86_perf_counter_disable(counter, hwc, idx); clear_bit(idx, cpuc->used); cpuc->counters[idx] = NULL; - __hw_perf_save_counter(counter, hwc, idx); -} -static void x86_perf_counter_read(struct perf_counter *counter) -{ - struct hw_perf_counter *hwc = &counter->hw; - unsigned long addr = hwc->counter_base + hwc->idx; - s64 offs, val = -1LL; - s32 val32; - - /* Careful: NMI might modify the counter offset */ - do { - offs = hwc->prev_count; - rdmsrl(addr, val); - } while (offs != hwc->prev_count); - - val32 = (s32) val; - val = (s64)hwc->irq_period + (s64)val32; - atomic64_counter_set(counter, hwc->prev_count + val); + /* + * Drain the remaining delta count out of a counter + * that we are disabling: + */ + x86_perf_counter_update(counter, hwc, idx); } static void perf_store_irq_data(struct perf_counter *counter, u64 data) @@ -299,7 +331,8 @@ static void perf_store_irq_data(struct perf_counter *counter, u64 data) } /* - * NMI-safe enable method: + * Save and restart an expired counter. Called by NMI contexts, + * so it has to be careful about preempting normal counter ops: */ static void perf_save_and_restart(struct perf_counter *counter) { @@ -309,45 +342,25 @@ static void perf_save_and_restart(struct perf_counter *counter) rdmsrl(MSR_ARCH_PERFMON_EVENTSEL0 + idx, pmc_ctrl); - __hw_perf_save_counter(counter, hwc, idx); - __hw_perf_counter_set_period(hwc, idx); + x86_perf_counter_update(counter, hwc, idx); + __hw_perf_counter_set_period(counter, hwc, idx); if (pmc_ctrl & ARCH_PERFMON_EVENTSEL0_ENABLE) - __x86_perf_counter_enable(hwc, idx); + __x86_perf_counter_enable(counter, hwc, idx); } static void perf_handle_group(struct perf_counter *sibling, u64 *status, u64 *overflown) { struct perf_counter *counter, *group_leader = sibling->group_leader; - int bit; - - /* - * Store the counter's own timestamp first: - */ - perf_store_irq_data(sibling, sibling->hw_event.type); - perf_store_irq_data(sibling, atomic64_counter_read(sibling)); /* - * Then store sibling timestamps (if any): + * Store sibling timestamps (if any): */ list_for_each_entry(counter, &group_leader->sibling_list, list_entry) { - if (counter->state != PERF_COUNTER_STATE_ACTIVE) { - /* - * When counter was not in the overflow mask, we have to - * read it from hardware. We read it as well, when it - * has not been read yet and clear the bit in the - * status mask. - */ - bit = counter->hw.idx; - if (!test_bit(bit, (unsigned long *) overflown) || - test_bit(bit, (unsigned long *) status)) { - clear_bit(bit, (unsigned long *) status); - perf_save_and_restart(counter); - } - } + x86_perf_counter_update(counter, &counter->hw, counter->hw.idx); perf_store_irq_data(sibling, counter->hw_event.type); - perf_store_irq_data(sibling, atomic64_counter_read(counter)); + perf_store_irq_data(sibling, atomic64_read(&counter->count)); } } @@ -540,6 +553,11 @@ void __init init_hw_perf_counters(void) perf_counters_initialized = true; } +static void x86_perf_counter_read(struct perf_counter *counter) +{ + x86_perf_counter_update(counter, &counter->hw, counter->hw.idx); +} + static const struct hw_perf_counter_ops x86_perf_counter_ops = { .hw_perf_counter_enable = x86_perf_counter_enable, .hw_perf_counter_disable = x86_perf_counter_disable, diff --git a/include/linux/perf_counter.h b/include/linux/perf_counter.h index 8cb095fa442..72460289c65 100644 --- a/include/linux/perf_counter.h +++ b/include/linux/perf_counter.h @@ -91,14 +91,16 @@ struct perf_counter_hw_event { * struct hw_perf_counter - performance counter hardware details: */ struct hw_perf_counter { +#ifdef CONFIG_PERF_COUNTERS u64 config; unsigned long config_base; unsigned long counter_base; int nmi; unsigned int idx; - u64 prev_count; + atomic64_t prev_count; u64 irq_period; - s32 next_count; + atomic64_t period_left; +#endif }; /* @@ -140,17 +142,15 @@ enum perf_counter_active_state { * struct perf_counter - performance counter kernel representation: */ struct perf_counter { +#ifdef CONFIG_PERF_COUNTERS struct list_head list_entry; struct list_head sibling_list; struct perf_counter *group_leader; const struct hw_perf_counter_ops *hw_ops; enum perf_counter_active_state state; -#if BITS_PER_LONG == 64 atomic64_t count; -#else - atomic_t count32[2]; -#endif + struct perf_counter_hw_event hw_event; struct hw_perf_counter hw; @@ -172,6 +172,7 @@ struct perf_counter { struct perf_data *irqdata; struct perf_data *usrdata; struct perf_data data[2]; +#endif }; /** @@ -220,8 +221,6 @@ extern void perf_counter_notify(struct pt_regs *regs); extern void perf_counter_print_debug(void); extern u64 hw_perf_save_disable(void); extern void hw_perf_restore(u64 ctrl); -extern void atomic64_counter_set(struct perf_counter *counter, u64 val64); -extern u64 atomic64_counter_read(struct perf_counter *counter); extern int perf_counter_task_disable(void); extern int perf_counter_task_enable(void); diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c index 559130b8774..416861ce8b2 100644 --- a/kernel/perf_counter.c +++ b/kernel/perf_counter.c @@ -44,67 +44,9 @@ hw_perf_counter_init(struct perf_counter *counter) } u64 __weak hw_perf_save_disable(void) { return 0; } -void __weak hw_perf_restore(u64 ctrl) { } +void __weak hw_perf_restore(u64 ctrl) { } void __weak hw_perf_counter_setup(void) { } -#if BITS_PER_LONG == 64 - -/* - * Read the cached counter in counter safe against cross CPU / NMI - * modifications. 64 bit version - no complications. - */ -static inline u64 perf_counter_read_safe(struct perf_counter *counter) -{ - return (u64) atomic64_read(&counter->count); -} - -void atomic64_counter_set(struct perf_counter *counter, u64 val) -{ - atomic64_set(&counter->count, val); -} - -u64 atomic64_counter_read(struct perf_counter *counter) -{ - return atomic64_read(&counter->count); -} - -#else - -/* - * Read the cached counter in counter safe against cross CPU / NMI - * modifications. 32 bit version. - */ -static u64 perf_counter_read_safe(struct perf_counter *counter) -{ - u32 cntl, cnth; - - local_irq_disable(); - do { - cnth = atomic_read(&counter->count32[1]); - cntl = atomic_read(&counter->count32[0]); - } while (cnth != atomic_read(&counter->count32[1])); - - local_irq_enable(); - - return cntl | ((u64) cnth) << 32; -} - -void atomic64_counter_set(struct perf_counter *counter, u64 val64) -{ - u32 *val32 = (void *)&val64; - - atomic_set(counter->count32 + 0, *(val32 + 0)); - atomic_set(counter->count32 + 1, *(val32 + 1)); -} - -u64 atomic64_counter_read(struct perf_counter *counter) -{ - return atomic_read(counter->count32 + 0) | - (u64) atomic_read(counter->count32 + 1) << 32; -} - -#endif - static void list_add_counter(struct perf_counter *counter, struct perf_counter_context *ctx) { @@ -280,11 +222,11 @@ static void __perf_install_in_context(void *info) ctx->nr_counters++; if (cpuctx->active_oncpu < perf_max_counters) { - counter->hw_ops->hw_perf_counter_enable(counter); counter->state = PERF_COUNTER_STATE_ACTIVE; counter->oncpu = cpu; ctx->nr_active++; cpuctx->active_oncpu++; + counter->hw_ops->hw_perf_counter_enable(counter); } if (!ctx->task && cpuctx->max_pertask) @@ -624,7 +566,7 @@ static u64 perf_counter_read(struct perf_counter *counter) __hw_perf_counter_read, counter, 1); } - return perf_counter_read_safe(counter); + return atomic64_read(&counter->count); } /* @@ -921,7 +863,7 @@ static void cpu_clock_perf_counter_read(struct perf_counter *counter) { int cpu = raw_smp_processor_id(); - atomic64_counter_set(counter, cpu_clock(cpu)); + atomic64_set(&counter->count, cpu_clock(cpu)); } static const struct hw_perf_counter_ops perf_ops_cpu_clock = { @@ -940,7 +882,7 @@ static void task_clock_perf_counter_disable(struct perf_counter *counter) static void task_clock_perf_counter_read(struct perf_counter *counter) { - atomic64_counter_set(counter, current->se.sum_exec_runtime); + atomic64_set(&counter->count, current->se.sum_exec_runtime); } static const struct hw_perf_counter_ops perf_ops_task_clock = { -- cgit v1.2.3 From 9b51f66dcb09ac5eb6bc68fc111d5c7a1e0131d6 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Fri, 12 Dec 2008 13:49:45 +0100 Subject: perfcounters: implement "counter inheritance" Impact: implement new performance feature Counter inheritance can be used to run performance counters in a workload, transparently - and pipe back the counter results to the parent counter. Inheritance for performance counters works the following way: when creating a counter it can be marked with the .inherit=1 flag. Such counters are then 'inherited' by all child tasks (be they fork()-ed or clone()-ed). These counters get inherited through exec() boundaries as well (except through setuid boundaries). The counter values get added back to the parent counter(s) when the child task(s) exit - much like stime/utime statistics are gathered. So inherited counters are ideal to gather summary statistics about an application's behavior via shell commands, without having to modify that application. The timec.c command utilizes counter inheritance: http://redhat.com/~mingo/perfcounters/timec.c Sample output: $ ./timec -e 1 -e 3 -e 5 ls -lR /usr/include/ >/dev/null Performance counter stats for 'ls': 163516953 instructions 2295 cache-misses 2855182 branch-misses Signed-off-by: Ingo Molnar --- include/linux/perf_counter.h | 24 +++-- kernel/exit.c | 7 +- kernel/perf_counter.c | 248 +++++++++++++++++++++++++++++++++++-------- 3 files changed, 228 insertions(+), 51 deletions(-) diff --git a/include/linux/perf_counter.h b/include/linux/perf_counter.h index 72460289c65..e5d25bf8f74 100644 --- a/include/linux/perf_counter.h +++ b/include/linux/perf_counter.h @@ -75,10 +75,11 @@ struct perf_counter_hw_event { u64 irq_period; u32 record_type; - u32 disabled : 1, /* off by default */ - nmi : 1, /* NMI sampling */ - raw : 1, /* raw event type */ - __reserved_1 : 29; + u32 disabled : 1, /* off by default */ + nmi : 1, /* NMI sampling */ + raw : 1, /* raw event type */ + inherit : 1, /* children inherit it */ + __reserved_1 : 28; u64 __reserved_2; }; @@ -138,6 +139,8 @@ enum perf_counter_active_state { PERF_COUNTER_STATE_ACTIVE = 1, }; +struct file; + /** * struct perf_counter - performance counter kernel representation: */ @@ -156,7 +159,10 @@ struct perf_counter { struct perf_counter_context *ctx; struct task_struct *task; + struct file *filp; + unsigned int nr_inherited; + struct perf_counter *parent; /* * Protect attach/detach: */ @@ -210,13 +216,16 @@ struct perf_cpu_context { extern int perf_max_counters; #ifdef CONFIG_PERF_COUNTERS +extern void +perf_counter_show(struct perf_counter *counter, char *str, int trace); extern const struct hw_perf_counter_ops * hw_perf_counter_init(struct perf_counter *counter); extern void perf_counter_task_sched_in(struct task_struct *task, int cpu); extern void perf_counter_task_sched_out(struct task_struct *task, int cpu); extern void perf_counter_task_tick(struct task_struct *task, int cpu); -extern void perf_counter_init_task(struct task_struct *task); +extern void perf_counter_init_task(struct task_struct *child); +extern void perf_counter_exit_task(struct task_struct *child); extern void perf_counter_notify(struct pt_regs *regs); extern void perf_counter_print_debug(void); extern u64 hw_perf_save_disable(void); @@ -226,12 +235,15 @@ extern int perf_counter_task_enable(void); #else static inline void +perf_counter_show(struct perf_counter *counter, char *str, int trace) { } +static inline void perf_counter_task_sched_in(struct task_struct *task, int cpu) { } static inline void perf_counter_task_sched_out(struct task_struct *task, int cpu) { } static inline void perf_counter_task_tick(struct task_struct *task, int cpu) { } -static inline void perf_counter_init_task(struct task_struct *task) { } +static inline void perf_counter_init_task(struct task_struct *child) { } +static inline void perf_counter_exit_task(struct task_struct *child) { } static inline void perf_counter_notify(struct pt_regs *regs) { } static inline void perf_counter_print_debug(void) { } static inline void hw_perf_restore(u64 ctrl) { } diff --git a/kernel/exit.c b/kernel/exit.c index 2d8be7ebb0f..d336c90a5f1 100644 --- a/kernel/exit.c +++ b/kernel/exit.c @@ -1093,11 +1093,12 @@ NORET_TYPE void do_exit(long code) mpol_put(tsk->mempolicy); tsk->mempolicy = NULL; #endif -#ifdef CONFIG_FUTEX /* - * This must happen late, after the PID is not - * hashed anymore: + * These must happen late, after the PID is not + * hashed anymore, but still at a point that may sleep: */ + perf_counter_exit_task(tsk); +#ifdef CONFIG_FUTEX if (unlikely(!list_empty(&tsk->pi_state_list))) exit_pi_state_list(tsk); if (unlikely(current->pi_state_cache)) diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c index 416861ce8b2..f5e81dd193d 100644 --- a/kernel/perf_counter.c +++ b/kernel/perf_counter.c @@ -80,8 +80,6 @@ list_del_counter(struct perf_counter *counter, struct perf_counter_context *ctx) list_del_init(&sibling->list_entry); list_add_tail(&sibling->list_entry, &ctx->counter_list); - WARN_ON_ONCE(!sibling->group_leader); - WARN_ON_ONCE(sibling->group_leader == sibling); sibling->group_leader = sibling; } } @@ -97,6 +95,7 @@ static void __perf_counter_remove_from_context(void *info) struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context); struct perf_counter *counter = info; struct perf_counter_context *ctx = counter->ctx; + unsigned long flags; u64 perf_flags; /* @@ -107,7 +106,7 @@ static void __perf_counter_remove_from_context(void *info) if (ctx->task && cpuctx->task_ctx != ctx) return; - spin_lock(&ctx->lock); + spin_lock_irqsave(&ctx->lock, flags); if (counter->state == PERF_COUNTER_STATE_ACTIVE) { counter->hw_ops->hw_perf_counter_disable(counter); @@ -136,7 +135,7 @@ static void __perf_counter_remove_from_context(void *info) perf_max_counters - perf_reserved_percpu); } - spin_unlock(&ctx->lock); + spin_unlock_irqrestore(&ctx->lock, flags); } @@ -199,6 +198,7 @@ static void __perf_install_in_context(void *info) struct perf_counter *counter = info; struct perf_counter_context *ctx = counter->ctx; int cpu = smp_processor_id(); + unsigned long flags; u64 perf_flags; /* @@ -209,7 +209,7 @@ static void __perf_install_in_context(void *info) if (ctx->task && cpuctx->task_ctx != ctx) return; - spin_lock(&ctx->lock); + spin_lock_irqsave(&ctx->lock, flags); /* * Protect the list operation against NMI by disabling the @@ -232,7 +232,7 @@ static void __perf_install_in_context(void *info) if (!ctx->task && cpuctx->max_pertask) cpuctx->max_pertask--; - spin_unlock(&ctx->lock); + spin_unlock_irqrestore(&ctx->lock, flags); } /* @@ -446,10 +446,9 @@ int perf_counter_task_disable(void) */ perf_flags = hw_perf_save_disable(); - list_for_each_entry(counter, &ctx->counter_list, list_entry) { - WARN_ON_ONCE(counter->state == PERF_COUNTER_STATE_ACTIVE); + list_for_each_entry(counter, &ctx->counter_list, list_entry) counter->state = PERF_COUNTER_STATE_OFF; - } + hw_perf_restore(perf_flags); spin_unlock(&ctx->lock); @@ -525,26 +524,6 @@ void perf_counter_task_tick(struct task_struct *curr, int cpu) perf_counter_task_sched_in(curr, cpu); } -/* - * Initialize the perf_counter context in a task_struct: - */ -static void -__perf_counter_init_context(struct perf_counter_context *ctx, - struct task_struct *task) -{ - spin_lock_init(&ctx->lock); - INIT_LIST_HEAD(&ctx->counter_list); - ctx->nr_counters = 0; - ctx->task = task; -} -/* - * Initialize the perf_counter context in task_struct - */ -void perf_counter_init_task(struct task_struct *task) -{ - __perf_counter_init_context(&task->perf_counter_ctx, task); -} - /* * Cross CPU call to read the hardware counter */ @@ -663,7 +642,6 @@ static struct perf_counter_context *find_get_context(pid_t pid, int cpu) cpuctx = &per_cpu(perf_cpu_context, cpu); ctx = &cpuctx->ctx; - WARN_ON_ONCE(ctx->task); return ctx; } @@ -915,12 +893,13 @@ sw_perf_counter_init(struct perf_counter *counter) static struct perf_counter * perf_counter_alloc(struct perf_counter_hw_event *hw_event, int cpu, - struct perf_counter *group_leader) + struct perf_counter *group_leader, + gfp_t gfpflags) { const struct hw_perf_counter_ops *hw_ops; struct perf_counter *counter; - counter = kzalloc(sizeof(*counter), GFP_KERNEL); + counter = kzalloc(sizeof(*counter), gfpflags); if (!counter) return NULL; @@ -947,9 +926,8 @@ perf_counter_alloc(struct perf_counter_hw_event *hw_event, hw_ops = NULL; if (!hw_event->raw && hw_event->type < 0) hw_ops = sw_perf_counter_init(counter); - if (!hw_ops) { + if (!hw_ops) hw_ops = hw_perf_counter_init(counter); - } if (!hw_ops) { kfree(counter); @@ -975,8 +953,10 @@ sys_perf_counter_open(struct perf_counter_hw_event *hw_event_uptr __user, struct perf_counter *counter, *group_leader; struct perf_counter_hw_event hw_event; struct perf_counter_context *ctx; + struct file *counter_file = NULL; struct file *group_file = NULL; int fput_needed = 0; + int fput_needed2 = 0; int ret; if (copy_from_user(&hw_event, hw_event_uptr, sizeof(hw_event)) != 0) @@ -1017,25 +997,29 @@ sys_perf_counter_open(struct perf_counter_hw_event *hw_event_uptr __user, } ret = -EINVAL; - counter = perf_counter_alloc(&hw_event, cpu, group_leader); + counter = perf_counter_alloc(&hw_event, cpu, group_leader, GFP_KERNEL); if (!counter) goto err_put_context; - perf_install_in_context(ctx, counter, cpu); - ret = anon_inode_getfd("[perf_counter]", &perf_fops, counter, 0); if (ret < 0) - goto err_remove_free_put_context; + goto err_free_put_context; + + counter_file = fget_light(ret, &fput_needed2); + if (!counter_file) + goto err_free_put_context; + + counter->filp = counter_file; + perf_install_in_context(ctx, counter, cpu); + + fput_light(counter_file, fput_needed2); out_fput: fput_light(group_file, fput_needed); return ret; -err_remove_free_put_context: - mutex_lock(&counter->mutex); - perf_counter_remove_from_context(counter); - mutex_unlock(&counter->mutex); +err_free_put_context: kfree(counter); err_put_context: @@ -1044,6 +1028,186 @@ err_put_context: goto out_fput; } +/* + * Initialize the perf_counter context in a task_struct: + */ +static void +__perf_counter_init_context(struct perf_counter_context *ctx, + struct task_struct *task) +{ + memset(ctx, 0, sizeof(*ctx)); + spin_lock_init(&ctx->lock); + INIT_LIST_HEAD(&ctx->counter_list); + ctx->task = task; +} + +/* + * inherit a counter from parent task to child task: + */ +static int +inherit_counter(struct perf_counter *parent_counter, + struct task_struct *parent, + struct perf_counter_context *parent_ctx, + struct task_struct *child, + struct perf_counter_context *child_ctx) +{ + struct perf_counter *child_counter; + + child_counter = perf_counter_alloc(&parent_counter->hw_event, + parent_counter->cpu, NULL, + GFP_ATOMIC); + if (!child_counter) + return -ENOMEM; + + /* + * Link it up in the child's context: + */ + child_counter->ctx = child_ctx; + child_counter->task = child; + list_add_counter(child_counter, child_ctx); + child_ctx->nr_counters++; + + child_counter->parent = parent_counter; + parent_counter->nr_inherited++; + /* + * inherit into child's child as well: + */ + child_counter->hw_event.inherit = 1; + + /* + * Get a reference to the parent filp - we will fput it + * when the child counter exits. This is safe to do because + * we are in the parent and we know that the filp still + * exists and has a nonzero count: + */ + atomic_long_inc(&parent_counter->filp->f_count); + + return 0; +} + +static void +__perf_counter_exit_task(struct task_struct *child, + struct perf_counter *child_counter, + struct perf_counter_context *child_ctx) +{ + struct perf_counter *parent_counter; + u64 parent_val, child_val; + u64 perf_flags; + + /* + * Disable and unlink this counter. + * + * Be careful about zapping the list - IRQ/NMI context + * could still be processing it: + */ + local_irq_disable(); + perf_flags = hw_perf_save_disable(); + + if (child_counter->state == PERF_COUNTER_STATE_ACTIVE) + child_counter->hw_ops->hw_perf_counter_disable(child_counter); + list_del_init(&child_counter->list_entry); + + hw_perf_restore(perf_flags); + local_irq_enable(); + + parent_counter = child_counter->parent; + /* + * It can happen that parent exits first, and has counters + * that are still around due to the child reference. These + * counters need to be zapped - but otherwise linger. + */ + if (!parent_counter) + return; + + parent_val = atomic64_read(&parent_counter->count); + child_val = atomic64_read(&child_counter->count); + + /* + * Add back the child's count to the parent's count: + */ + atomic64_add(child_val, &parent_counter->count); + + fput(parent_counter->filp); + + kfree(child_counter); +} + +/* + * When a child task exist, feed back counter values to parent counters. + * + * Note: we are running in child context, but the PID is not hashed + * anymore so new counters will not be added. + */ +void perf_counter_exit_task(struct task_struct *child) +{ + struct perf_counter *child_counter, *tmp; + struct perf_counter_context *child_ctx; + + child_ctx = &child->perf_counter_ctx; + + if (likely(!child_ctx->nr_counters)) + return; + + list_for_each_entry_safe(child_counter, tmp, &child_ctx->counter_list, + list_entry) + __perf_counter_exit_task(child, child_counter, child_ctx); +} + +/* + * Initialize the perf_counter context in task_struct + */ +void perf_counter_init_task(struct task_struct *child) +{ + struct perf_counter_context *child_ctx, *parent_ctx; + struct perf_counter *counter, *parent_counter; + struct task_struct *parent = current; + unsigned long flags; + + child_ctx = &child->perf_counter_ctx; + parent_ctx = &parent->perf_counter_ctx; + + __perf_counter_init_context(child_ctx, child); + + /* + * This is executed from the parent task context, so inherit + * counters that have been marked for cloning: + */ + + if (likely(!parent_ctx->nr_counters)) + return; + + /* + * Lock the parent list. No need to lock the child - not PID + * hashed yet and not running, so nobody can access it. + */ + spin_lock_irqsave(&parent_ctx->lock, flags); + + /* + * We dont have to disable NMIs - we are only looking at + * the list, not manipulating it: + */ + list_for_each_entry(counter, &parent_ctx->counter_list, list_entry) { + if (!counter->hw_event.inherit || counter->group_leader != counter) + continue; + + /* + * Instead of creating recursive hierarchies of counters, + * we link inheritd counters back to the original parent, + * which has a filp for sure, which we use as the reference + * count: + */ + parent_counter = counter; + if (counter->parent) + parent_counter = counter->parent; + + if (inherit_counter(parent_counter, parent, + parent_ctx, child, child_ctx)) + break; + } + + spin_unlock_irqrestore(&parent_ctx->lock, flags); +} + static void __cpuinit perf_counter_init_cpu(int cpu) { struct perf_cpu_context *cpuctx; -- cgit v1.2.3 From 8cb391e8786c8072367f0aeb90551903fef074ba Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Sun, 14 Dec 2008 12:22:31 +0100 Subject: perfcounters: fix task clock counter Impact: bugfix Update the task clock counter to the new math. Signed-off-by: Ingo Molnar --- kernel/perf_counter.c | 27 +++++++++++++++++++++++---- 1 file changed, 23 insertions(+), 4 deletions(-) diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c index f5e81dd193d..1f81cde0dc4 100644 --- a/kernel/perf_counter.c +++ b/kernel/perf_counter.c @@ -850,17 +850,36 @@ static const struct hw_perf_counter_ops perf_ops_cpu_clock = { .hw_perf_counter_read = cpu_clock_perf_counter_read, }; -static void task_clock_perf_counter_enable(struct perf_counter *counter) +static void task_clock_perf_counter_update(struct perf_counter *counter) { + u64 prev, now; + s64 delta; + + prev = atomic64_read(&counter->hw.prev_count); + now = current->se.sum_exec_runtime; + + atomic64_set(&counter->hw.prev_count, now); + + delta = now - prev; + if (WARN_ON_ONCE(delta < 0)) + delta = 0; + + atomic64_add(delta, &counter->count); } -static void task_clock_perf_counter_disable(struct perf_counter *counter) +static void task_clock_perf_counter_read(struct perf_counter *counter) { + task_clock_perf_counter_update(counter); } -static void task_clock_perf_counter_read(struct perf_counter *counter) +static void task_clock_perf_counter_enable(struct perf_counter *counter) +{ + atomic64_set(&counter->hw.prev_count, current->se.sum_exec_runtime); +} + +static void task_clock_perf_counter_disable(struct perf_counter *counter) { - atomic64_set(&counter->count, current->se.sum_exec_runtime); + task_clock_perf_counter_update(counter); } static const struct hw_perf_counter_ops perf_ops_task_clock = { -- cgit v1.2.3 From 5d6a27d8a096868ae313f71f563b06074a7e34fe Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Sun, 14 Dec 2008 12:28:33 +0100 Subject: perfcounters: add context switch counter Impact: add new feature, new sw counter Add a counter that counts the number of context-switches a task is doing. Signed-off-by: Ingo Molnar --- include/linux/perf_counter.h | 4 ++-- kernel/perf_counter.c | 51 ++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 53 insertions(+), 2 deletions(-) diff --git a/include/linux/perf_counter.h b/include/linux/perf_counter.h index e5d25bf8f74..d2a16563415 100644 --- a/include/linux/perf_counter.h +++ b/include/linux/perf_counter.h @@ -53,8 +53,8 @@ enum hw_event_types { /* * Future software events: */ - /* PERF_COUNT_PAGE_FAULTS = -3, - PERF_COUNT_CONTEXT_SWITCHES = -4, */ + PERF_COUNT_PAGE_FAULTS = -3, + PERF_COUNT_CONTEXT_SWITCHES = -4, }; /* diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c index 1f81cde0dc4..09287091c52 100644 --- a/kernel/perf_counter.c +++ b/kernel/perf_counter.c @@ -888,6 +888,54 @@ static const struct hw_perf_counter_ops perf_ops_task_clock = { .hw_perf_counter_read = task_clock_perf_counter_read, }; +static u64 get_context_switches(void) +{ + struct task_struct *curr = current; + + return curr->nvcsw + curr->nivcsw; +} + +static void context_switches_perf_counter_update(struct perf_counter *counter) +{ + u64 prev, now; + s64 delta; + + prev = atomic64_read(&counter->hw.prev_count); + now = get_context_switches(); + + atomic64_set(&counter->hw.prev_count, now); + + delta = now - prev; + if (WARN_ON_ONCE(delta < 0)) + delta = 0; + + atomic64_add(delta, &counter->count); +} + +static void context_switches_perf_counter_read(struct perf_counter *counter) +{ + context_switches_perf_counter_update(counter); +} + +static void context_switches_perf_counter_enable(struct perf_counter *counter) +{ + /* + * ->nvcsw + curr->nivcsw is a per-task value already, + * so we dont have to clear it on switch-in. + */ +} + +static void context_switches_perf_counter_disable(struct perf_counter *counter) +{ + context_switches_perf_counter_update(counter); +} + +static const struct hw_perf_counter_ops perf_ops_context_switches = { + .hw_perf_counter_enable = context_switches_perf_counter_enable, + .hw_perf_counter_disable = context_switches_perf_counter_disable, + .hw_perf_counter_read = context_switches_perf_counter_read, +}; + static const struct hw_perf_counter_ops * sw_perf_counter_init(struct perf_counter *counter) { @@ -900,6 +948,9 @@ sw_perf_counter_init(struct perf_counter *counter) case PERF_COUNT_TASK_CLOCK: hw_ops = &perf_ops_task_clock; break; + case PERF_COUNT_CONTEXT_SWITCHES: + hw_ops = &perf_ops_context_switches; + break; default: break; } -- cgit v1.2.3 From 6c594c21fcb02c662f11c97be4d7d2b73060a205 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Sun, 14 Dec 2008 12:34:15 +0100 Subject: perfcounters: add task migrations counter Impact: add new feature, new sw counter Add a counter that counts the number of cross-CPU migrations a task is suffering. Signed-off-by: Ingo Molnar --- include/linux/perf_counter.h | 8 +++++--- include/linux/sched.h | 3 ++- kernel/perf_counter.c | 49 ++++++++++++++++++++++++++++++++++++++++++++ kernel/sched.c | 7 +++++-- 4 files changed, 61 insertions(+), 6 deletions(-) diff --git a/include/linux/perf_counter.h b/include/linux/perf_counter.h index d2a16563415..f30486fc55d 100644 --- a/include/linux/perf_counter.h +++ b/include/linux/perf_counter.h @@ -42,6 +42,8 @@ enum hw_event_types { PERF_COUNT_BRANCH_INSTRUCTIONS = 4, PERF_COUNT_BRANCH_MISSES = 5, + PERF_HW_EVENTS_MAX = 6, + /* * Special "software" counters provided by the kernel, even if * the hardware does not support performance counters. These @@ -50,11 +52,11 @@ enum hw_event_types { */ PERF_COUNT_CPU_CLOCK = -1, PERF_COUNT_TASK_CLOCK = -2, - /* - * Future software events: - */ PERF_COUNT_PAGE_FAULTS = -3, PERF_COUNT_CONTEXT_SWITCHES = -4, + PERF_COUNT_CPU_MIGRATIONS = -5, + + PERF_SW_EVENTS_MIN = -6, }; /* diff --git a/include/linux/sched.h b/include/linux/sched.h index 4c530278391..2e15be8fc79 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -1014,6 +1014,8 @@ struct sched_entity { u64 last_wakeup; u64 avg_overlap; + u64 nr_migrations; + #ifdef CONFIG_SCHEDSTATS u64 wait_start; u64 wait_max; @@ -1029,7 +1031,6 @@ struct sched_entity { u64 exec_max; u64 slice_max; - u64 nr_migrations; u64 nr_migrations_cold; u64 nr_failed_migrations_affine; u64 nr_failed_migrations_running; diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c index 09287091c52..fb11e351e44 100644 --- a/kernel/perf_counter.c +++ b/kernel/perf_counter.c @@ -936,6 +936,52 @@ static const struct hw_perf_counter_ops perf_ops_context_switches = { .hw_perf_counter_read = context_switches_perf_counter_read, }; +static inline u64 get_cpu_migrations(void) +{ + return current->se.nr_migrations; +} + +static void cpu_migrations_perf_counter_update(struct perf_counter *counter) +{ + u64 prev, now; + s64 delta; + + prev = atomic64_read(&counter->hw.prev_count); + now = get_cpu_migrations(); + + atomic64_set(&counter->hw.prev_count, now); + + delta = now - prev; + if (WARN_ON_ONCE(delta < 0)) + delta = 0; + + atomic64_add(delta, &counter->count); +} + +static void cpu_migrations_perf_counter_read(struct perf_counter *counter) +{ + cpu_migrations_perf_counter_update(counter); +} + +static void cpu_migrations_perf_counter_enable(struct perf_counter *counter) +{ + /* + * se.nr_migrations is a per-task value already, + * so we dont have to clear it on switch-in. + */ +} + +static void cpu_migrations_perf_counter_disable(struct perf_counter *counter) +{ + cpu_migrations_perf_counter_update(counter); +} + +static const struct hw_perf_counter_ops perf_ops_cpu_migrations = { + .hw_perf_counter_enable = cpu_migrations_perf_counter_enable, + .hw_perf_counter_disable = cpu_migrations_perf_counter_disable, + .hw_perf_counter_read = cpu_migrations_perf_counter_read, +}; + static const struct hw_perf_counter_ops * sw_perf_counter_init(struct perf_counter *counter) { @@ -951,6 +997,9 @@ sw_perf_counter_init(struct perf_counter *counter) case PERF_COUNT_CONTEXT_SWITCHES: hw_ops = &perf_ops_context_switches; break; + case PERF_COUNT_CPU_MIGRATIONS: + hw_ops = &perf_ops_cpu_migrations; + break; default: break; } diff --git a/kernel/sched.c b/kernel/sched.c index 5c3f4106314..382cfdb5e38 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -1852,12 +1852,14 @@ void set_task_cpu(struct task_struct *p, unsigned int new_cpu) p->se.sleep_start -= clock_offset; if (p->se.block_start) p->se.block_start -= clock_offset; +#endif if (old_cpu != new_cpu) { - schedstat_inc(p, se.nr_migrations); + p->se.nr_migrations++; +#ifdef CONFIG_SCHEDSTATS if (task_hot(p, old_rq->clock, NULL)) schedstat_inc(p, se.nr_forced2_migrations); - } #endif + } p->se.vruntime -= old_cfsrq->min_vruntime - new_cfsrq->min_vruntime; @@ -2375,6 +2377,7 @@ static void __sched_fork(struct task_struct *p) p->se.exec_start = 0; p->se.sum_exec_runtime = 0; p->se.prev_sum_exec_runtime = 0; + p->se.nr_migrations = 0; p->se.last_wakeup = 0; p->se.avg_overlap = 0; -- cgit v1.2.3 From e06c61a879910869aa5bf3f8f634abfee1a7bebc Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Sun, 14 Dec 2008 14:44:31 +0100 Subject: perfcounters: add nr-of-faults counter Impact: add new feature, new sw counter Add a counter that counts the number of pagefaults a task is experiencing. Signed-off-by: Ingo Molnar --- kernel/perf_counter.c | 51 +++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 51 insertions(+) diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c index fb11e351e44..59c52f9ee43 100644 --- a/kernel/perf_counter.c +++ b/kernel/perf_counter.c @@ -888,6 +888,54 @@ static const struct hw_perf_counter_ops perf_ops_task_clock = { .hw_perf_counter_read = task_clock_perf_counter_read, }; +static u64 get_page_faults(void) +{ + struct task_struct *curr = current; + + return curr->maj_flt + curr->min_flt; +} + +static void page_faults_perf_counter_update(struct perf_counter *counter) +{ + u64 prev, now; + s64 delta; + + prev = atomic64_read(&counter->hw.prev_count); + now = get_page_faults(); + + atomic64_set(&counter->hw.prev_count, now); + + delta = now - prev; + if (WARN_ON_ONCE(delta < 0)) + delta = 0; + + atomic64_add(delta, &counter->count); +} + +static void page_faults_perf_counter_read(struct perf_counter *counter) +{ + page_faults_perf_counter_update(counter); +} + +static void page_faults_perf_counter_enable(struct perf_counter *counter) +{ + /* + * page-faults is a per-task value already, + * so we dont have to clear it on switch-in. + */ +} + +static void page_faults_perf_counter_disable(struct perf_counter *counter) +{ + page_faults_perf_counter_update(counter); +} + +static const struct hw_perf_counter_ops perf_ops_page_faults = { + .hw_perf_counter_enable = page_faults_perf_counter_enable, + .hw_perf_counter_disable = page_faults_perf_counter_disable, + .hw_perf_counter_read = page_faults_perf_counter_read, +}; + static u64 get_context_switches(void) { struct task_struct *curr = current; @@ -994,6 +1042,9 @@ sw_perf_counter_init(struct perf_counter *counter) case PERF_COUNT_TASK_CLOCK: hw_ops = &perf_ops_task_clock; break; + case PERF_COUNT_PAGE_FAULTS: + hw_ops = &perf_ops_page_faults; + break; case PERF_COUNT_CONTEXT_SWITCHES: hw_ops = &perf_ops_context_switches; break; -- cgit v1.2.3 From 2b9ff0db19b5e2c77000b7201525f9c3d6e8328d Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Sun, 14 Dec 2008 18:36:30 +0100 Subject: perfcounters: fix non-intel-perfmon CPUs Do not write MSR_CORE_PERF_GLOBAL_CTRL on CPUs where it does not exist. Signed-off-by: Ingo Molnar --- arch/x86/kernel/cpu/perf_counter.c | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/arch/x86/kernel/cpu/perf_counter.c b/arch/x86/kernel/cpu/perf_counter.c index 5afae13d8d5..6d30f603b62 100644 --- a/arch/x86/kernel/cpu/perf_counter.c +++ b/arch/x86/kernel/cpu/perf_counter.c @@ -157,6 +157,9 @@ static int __hw_perf_counter_init(struct perf_counter *counter) void hw_perf_enable_all(void) { + if (unlikely(!perf_counters_initialized)) + return; + wrmsr(MSR_CORE_PERF_GLOBAL_CTRL, perf_counter_mask, 0); } @@ -164,14 +167,21 @@ u64 hw_perf_save_disable(void) { u64 ctrl; + if (unlikely(!perf_counters_initialized)) + return 0; + rdmsrl(MSR_CORE_PERF_GLOBAL_CTRL, ctrl); wrmsr(MSR_CORE_PERF_GLOBAL_CTRL, 0, 0); + return ctrl; } EXPORT_SYMBOL_GPL(hw_perf_save_disable); void hw_perf_restore(u64 ctrl) { + if (unlikely(!perf_counters_initialized)) + return; + wrmsr(MSR_CORE_PERF_GLOBAL_CTRL, ctrl, 0); } EXPORT_SYMBOL_GPL(hw_perf_restore); -- cgit v1.2.3 From 088e2852c858159d47f71ee8da38e0fb1b21f806 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Sun, 14 Dec 2008 20:21:00 +0100 Subject: perfcounters, x86: fix sw counters on non-PMC CPUs Make perf_max_counters default to at least 1 - this allows the sw counters to be used. Signed-off-by: Ingo Molnar --- kernel/perf_counter.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c index 59c52f9ee43..539fa8283a0 100644 --- a/kernel/perf_counter.c +++ b/kernel/perf_counter.c @@ -25,7 +25,7 @@ */ DEFINE_PER_CPU(struct perf_cpu_context, perf_cpu_context); -int perf_max_counters __read_mostly; +int perf_max_counters __read_mostly = 1; static int perf_reserved_percpu __read_mostly; static int perf_overcommit __read_mostly = 1; -- cgit v1.2.3 From 75f224cf7700ed6006574dc3f2efa29860727570 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Sun, 14 Dec 2008 21:58:46 +0100 Subject: perfcounters: fix lapic initialization Fix non-working NMI sampling in certain bootup scenarios. Signed-off-by: Ingo Molnar --- arch/x86/kernel/cpu/perf_counter.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/arch/x86/kernel/cpu/perf_counter.c b/arch/x86/kernel/cpu/perf_counter.c index 6d30f603b62..8a154bd7ba9 100644 --- a/arch/x86/kernel/cpu/perf_counter.c +++ b/arch/x86/kernel/cpu/perf_counter.c @@ -557,10 +557,10 @@ void __init init_hw_perf_counters(void) printk(KERN_INFO "... bit_width: %d\n", eax.split.bit_width); printk(KERN_INFO "... mask_length: %d\n", eax.split.mask_length); + perf_counters_initialized = true; + perf_counters_lapic_init(0); register_die_notifier(&perf_counter_nmi_notifier); - - perf_counters_initialized = true; } static void x86_perf_counter_read(struct perf_counter *counter) -- cgit v1.2.3 From 0cc0c027d4e028632933f1be2dc4cd730358183b Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Sun, 14 Dec 2008 23:20:36 +0100 Subject: perfcounters: release CPU context when exiting task counters If counters are exiting via do_exit() not via filp close, then the CPU context needs to be released - otherwise future percpu counter creations might fail. Signed-off-by: Ingo Molnar --- kernel/perf_counter.c | 14 ++++++++++++-- 1 file changed, 12 insertions(+), 2 deletions(-) diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c index 539fa8283a0..16396e9406f 100644 --- a/kernel/perf_counter.c +++ b/kernel/perf_counter.c @@ -1273,8 +1273,19 @@ __perf_counter_exit_task(struct task_struct *child, local_irq_disable(); perf_flags = hw_perf_save_disable(); - if (child_counter->state == PERF_COUNTER_STATE_ACTIVE) + if (child_counter->state == PERF_COUNTER_STATE_ACTIVE) { + struct perf_cpu_context *cpuctx; + + cpuctx = &__get_cpu_var(perf_cpu_context); + child_counter->hw_ops->hw_perf_counter_disable(child_counter); + child_counter->state = PERF_COUNTER_STATE_INACTIVE; + child_counter->oncpu = -1; + + cpuctx->active_oncpu--; + child_ctx->nr_active--; + } + list_del_init(&child_counter->list_entry); hw_perf_restore(perf_flags); @@ -1539,4 +1550,3 @@ static int __init perf_counter_sysfs_init(void) &perfclass_attr_group); } device_initcall(perf_counter_sysfs_init); - -- cgit v1.2.3 From f65cb45cba63f249458b669aa67069eabc37b2f5 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Tue, 16 Dec 2008 13:40:44 +0100 Subject: perfcounters: flush on setuid exec Pavel Machek pointed out that performance counters should be flushed when crossing protection domains on setuid execution. Reported-by: Pavel Machek Acked-by: Pavel Machek Signed-off-by: Ingo Molnar --- fs/exec.c | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/fs/exec.c b/fs/exec.c index ec5df9a3831..d5165d899a4 100644 --- a/fs/exec.c +++ b/fs/exec.c @@ -33,6 +33,7 @@ #include #include #include +#include #include #include #include @@ -1017,6 +1018,13 @@ int flush_old_exec(struct linux_binprm * bprm) set_dumpable(current->mm, suid_dumpable); } + /* + * Flush performance counters when crossing a + * security domain: + */ + if (!get_dumpable(current->mm)) + perf_counter_exit_task(current); + /* An exec changes our domain. We are no longer part of the thread group */ -- cgit v1.2.3 From a86ed50859d65a08beec9474df97b88438a996df Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Wed, 17 Dec 2008 00:43:10 +0100 Subject: perfcounters: use hw_event.disable flag Impact: implement default-off counters Make sure that counters that are created with counter.hw_event.disabled=1, get created in disabled state. They can be enabled via: prctl(PR_TASK_PERF_COUNTERS_ENABLE); Signed-off-by: Ingo Molnar --- kernel/perf_counter.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c index 16396e9406f..5431e790b5d 100644 --- a/kernel/perf_counter.c +++ b/kernel/perf_counter.c @@ -1093,6 +1093,9 @@ perf_counter_alloc(struct perf_counter_hw_event *hw_event, counter->group_leader = group_leader; counter->hw_ops = NULL; + if (hw_event->disabled) + counter->state = PERF_COUNTER_STATE_OFF; + hw_ops = NULL; if (!hw_event->raw && hw_event->type < 0) hw_ops = sw_perf_counter_init(counter); -- cgit v1.2.3 From 94c46572a6d9bb497eda0a14099d9f1360d57d5d Mon Sep 17 00:00:00 2001 From: Jaswinder Singh Date: Fri, 19 Dec 2008 22:37:58 +0530 Subject: x86: perf_counter.c intel_perfmon_event_map and max_intel_perfmon_events should be static Impact: cleanup, avoid sparse warnings, reduce kernel size a bit Fixes these sparse warnings: arch/x86/kernel/cpu/perf_counter.c:44:11: warning: symbol 'intel_perfmon_event_map' was not declared. Should it be static? arch/x86/kernel/cpu/perf_counter.c:54:11: warning: symbol 'max_intel_perfmon_events' was not declared. Should it be static? Signed-off-by: Jaswinder Singh Signed-off-by: Ingo Molnar --- arch/x86/kernel/cpu/perf_counter.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/arch/x86/kernel/cpu/perf_counter.c b/arch/x86/kernel/cpu/perf_counter.c index 8a154bd7ba9..bdbdb56eaa3 100644 --- a/arch/x86/kernel/cpu/perf_counter.c +++ b/arch/x86/kernel/cpu/perf_counter.c @@ -41,7 +41,7 @@ struct cpu_hw_counters { */ static DEFINE_PER_CPU(struct cpu_hw_counters, cpu_hw_counters); -const int intel_perfmon_event_map[] = +static const int intel_perfmon_event_map[] = { [PERF_COUNT_CYCLES] = 0x003c, [PERF_COUNT_INSTRUCTIONS] = 0x00c0, @@ -51,7 +51,7 @@ const int intel_perfmon_event_map[] = [PERF_COUNT_BRANCH_MISSES] = 0x00c5, }; -const int max_intel_perfmon_events = ARRAY_SIZE(intel_perfmon_event_map); +static const int max_intel_perfmon_events = ARRAY_SIZE(intel_perfmon_event_map); /* * Propagate counter elapsed time into the generic counter. -- cgit v1.2.3 From 8fb9331391af95ca1f4e5c0a0da8120b13cbae01 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Tue, 23 Dec 2008 12:04:16 +0100 Subject: perfcounters: remove warnings Impact: remove debug checks Signed-off-by: Ingo Molnar --- arch/x86/kernel/cpu/perf_counter.c | 7 ------- include/linux/perf_counter.h | 4 ---- kernel/perf_counter.c | 8 -------- 3 files changed, 19 deletions(-) diff --git a/arch/x86/kernel/cpu/perf_counter.c b/arch/x86/kernel/cpu/perf_counter.c index bdbdb56eaa3..89fad5d4fb3 100644 --- a/arch/x86/kernel/cpu/perf_counter.c +++ b/arch/x86/kernel/cpu/perf_counter.c @@ -64,7 +64,6 @@ x86_perf_counter_update(struct perf_counter *counter, { u64 prev_raw_count, new_raw_count, delta; - WARN_ON_ONCE(counter->state != PERF_COUNTER_STATE_ACTIVE); /* * Careful: an NMI might modify the previous counter value. * @@ -89,7 +88,6 @@ again: * of the count, so we do that by clipping the delta to 32 bits: */ delta = (u64)(u32)((s32)new_raw_count - (s32)prev_raw_count); - WARN_ON_ONCE((int)delta < 0); atomic64_add(delta, &counter->count); atomic64_sub(delta, &hwc->period_left); @@ -193,7 +191,6 @@ __x86_perf_counter_disable(struct perf_counter *counter, int err; err = wrmsr_safe(hwc->config_base + idx, hwc->config, 0); - WARN_ON_ONCE(err); } static DEFINE_PER_CPU(u64, prev_left[MAX_HW_COUNTERS]); @@ -209,8 +206,6 @@ __hw_perf_counter_set_period(struct perf_counter *counter, s32 left = atomic64_read(&hwc->period_left); s32 period = hwc->irq_period; - WARN_ON_ONCE(period <= 0); - /* * If we are way outside a reasoable range then just skip forward: */ @@ -224,8 +219,6 @@ __hw_perf_counter_set_period(struct perf_counter *counter, atomic64_set(&hwc->period_left, left); } - WARN_ON_ONCE(left <= 0); - per_cpu(prev_left[idx], smp_processor_id()) = left; /* diff --git a/include/linux/perf_counter.h b/include/linux/perf_counter.h index f30486fc55d..d038450de87 100644 --- a/include/linux/perf_counter.h +++ b/include/linux/perf_counter.h @@ -218,8 +218,6 @@ struct perf_cpu_context { extern int perf_max_counters; #ifdef CONFIG_PERF_COUNTERS -extern void -perf_counter_show(struct perf_counter *counter, char *str, int trace); extern const struct hw_perf_counter_ops * hw_perf_counter_init(struct perf_counter *counter); @@ -237,8 +235,6 @@ extern int perf_counter_task_enable(void); #else static inline void -perf_counter_show(struct perf_counter *counter, char *str, int trace) { } -static inline void perf_counter_task_sched_in(struct task_struct *task, int cpu) { } static inline void perf_counter_task_sched_out(struct task_struct *task, int cpu) { } diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c index 5431e790b5d..aab6c123b02 100644 --- a/kernel/perf_counter.c +++ b/kernel/perf_counter.c @@ -861,8 +861,6 @@ static void task_clock_perf_counter_update(struct perf_counter *counter) atomic64_set(&counter->hw.prev_count, now); delta = now - prev; - if (WARN_ON_ONCE(delta < 0)) - delta = 0; atomic64_add(delta, &counter->count); } @@ -906,8 +904,6 @@ static void page_faults_perf_counter_update(struct perf_counter *counter) atomic64_set(&counter->hw.prev_count, now); delta = now - prev; - if (WARN_ON_ONCE(delta < 0)) - delta = 0; atomic64_add(delta, &counter->count); } @@ -954,8 +950,6 @@ static void context_switches_perf_counter_update(struct perf_counter *counter) atomic64_set(&counter->hw.prev_count, now); delta = now - prev; - if (WARN_ON_ONCE(delta < 0)) - delta = 0; atomic64_add(delta, &counter->count); } @@ -1000,8 +994,6 @@ static void cpu_migrations_perf_counter_update(struct perf_counter *counter) atomic64_set(&counter->hw.prev_count, now); delta = now - prev; - if (WARN_ON_ONCE(delta < 0)) - delta = 0; atomic64_add(delta, &counter->count); } -- cgit v1.2.3 From 7995888fcb0246543ee8027bf2835a250ba8c925 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Wed, 17 Dec 2008 08:54:56 +0100 Subject: perfcounters: tweak group scheduling Impact: schedule in groups atomically If there are multiple groups in a task, make sure they are scheduled in and out atomically. Signed-off-by: Ingo Molnar --- kernel/perf_counter.c | 16 +++++++++++++--- 1 file changed, 13 insertions(+), 3 deletions(-) diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c index aab6c123b02..f8a4d9a5d5d 100644 --- a/kernel/perf_counter.c +++ b/kernel/perf_counter.c @@ -367,21 +367,26 @@ counter_sched_in(struct perf_counter *counter, ctx->nr_active++; } -static void +static int group_sched_in(struct perf_counter *group_counter, struct perf_cpu_context *cpuctx, struct perf_counter_context *ctx, int cpu) { struct perf_counter *counter; + int was_group = 0; counter_sched_in(group_counter, cpuctx, ctx, cpu); /* * Schedule in siblings as one group (if any): */ - list_for_each_entry(counter, &group_counter->sibling_list, list_entry) + list_for_each_entry(counter, &group_counter->sibling_list, list_entry) { counter_sched_in(counter, cpuctx, ctx, cpu); + was_group = 1; + } + + return was_group; } /* @@ -416,7 +421,12 @@ void perf_counter_task_sched_in(struct task_struct *task, int cpu) if (counter->cpu != -1 && counter->cpu != cpu) continue; - group_sched_in(counter, cpuctx, ctx, cpu); + /* + * If we scheduled in a group atomically and + * exclusively, break out: + */ + if (group_sched_in(counter, cpuctx, ctx, cpu)) + break; } spin_unlock(&ctx->lock); -- cgit v1.2.3 From 5c167b8585c8d91206b395d57011ead7711e322f Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Wed, 17 Dec 2008 09:02:19 +0100 Subject: x86, perfcounters: rename intel_arch_perfmon.h => perf_counter.h Impact: rename include file We'll be providing an asm/perf_counter.h to the generic perfcounter code, so use the already existing x86 file for this purpose and rename it. Signed-off-by: Ingo Molnar --- arch/x86/include/asm/intel_arch_perfmon.h | 41 ------------------------------- arch/x86/include/asm/perf_counter.h | 41 +++++++++++++++++++++++++++++++ arch/x86/kernel/apic.c | 2 +- arch/x86/kernel/cpu/common.c | 2 +- arch/x86/kernel/cpu/perf_counter.c | 2 +- arch/x86/kernel/cpu/perfctr-watchdog.c | 2 +- arch/x86/oprofile/op_model_ppro.c | 2 +- 7 files changed, 46 insertions(+), 46 deletions(-) delete mode 100644 arch/x86/include/asm/intel_arch_perfmon.h create mode 100644 arch/x86/include/asm/perf_counter.h diff --git a/arch/x86/include/asm/intel_arch_perfmon.h b/arch/x86/include/asm/intel_arch_perfmon.h deleted file mode 100644 index 71598a9eab6..00000000000 --- a/arch/x86/include/asm/intel_arch_perfmon.h +++ /dev/null @@ -1,41 +0,0 @@ -#ifndef _ASM_X86_INTEL_ARCH_PERFMON_H -#define _ASM_X86_INTEL_ARCH_PERFMON_H - -#define MSR_ARCH_PERFMON_PERFCTR0 0xc1 -#define MSR_ARCH_PERFMON_PERFCTR1 0xc2 - -#define MSR_ARCH_PERFMON_EVENTSEL0 0x186 -#define MSR_ARCH_PERFMON_EVENTSEL1 0x187 - -#define ARCH_PERFMON_EVENTSEL0_ENABLE (1 << 22) -#define ARCH_PERFMON_EVENTSEL_INT (1 << 20) -#define ARCH_PERFMON_EVENTSEL_OS (1 << 17) -#define ARCH_PERFMON_EVENTSEL_USR (1 << 16) - -#define ARCH_PERFMON_UNHALTED_CORE_CYCLES_SEL 0x3c -#define ARCH_PERFMON_UNHALTED_CORE_CYCLES_UMASK (0x00 << 8) -#define ARCH_PERFMON_UNHALTED_CORE_CYCLES_INDEX 0 -#define ARCH_PERFMON_UNHALTED_CORE_CYCLES_PRESENT \ - (1 << (ARCH_PERFMON_UNHALTED_CORE_CYCLES_INDEX)) - -#define ARCH_PERFMON_BRANCH_MISSES_RETIRED 6 - -union cpuid10_eax { - struct { - unsigned int version_id:8; - unsigned int num_counters:8; - unsigned int bit_width:8; - unsigned int mask_length:8; - } split; - unsigned int full; -}; - -#ifdef CONFIG_PERF_COUNTERS -extern void init_hw_perf_counters(void); -extern void perf_counters_lapic_init(int nmi); -#else -static inline void init_hw_perf_counters(void) { } -static inline void perf_counters_lapic_init(int nmi) { } -#endif - -#endif /* _ASM_X86_INTEL_ARCH_PERFMON_H */ diff --git a/arch/x86/include/asm/perf_counter.h b/arch/x86/include/asm/perf_counter.h new file mode 100644 index 00000000000..9dadce1124e --- /dev/null +++ b/arch/x86/include/asm/perf_counter.h @@ -0,0 +1,41 @@ +#ifndef _ASM_X86_PERF_COUNTER_H +#define _ASM_X86_PERF_COUNTER_H + +#define MSR_ARCH_PERFMON_PERFCTR0 0xc1 +#define MSR_ARCH_PERFMON_PERFCTR1 0xc2 + +#define MSR_ARCH_PERFMON_EVENTSEL0 0x186 +#define MSR_ARCH_PERFMON_EVENTSEL1 0x187 + +#define ARCH_PERFMON_EVENTSEL0_ENABLE (1 << 22) +#define ARCH_PERFMON_EVENTSEL_INT (1 << 20) +#define ARCH_PERFMON_EVENTSEL_OS (1 << 17) +#define ARCH_PERFMON_EVENTSEL_USR (1 << 16) + +#define ARCH_PERFMON_UNHALTED_CORE_CYCLES_SEL 0x3c +#define ARCH_PERFMON_UNHALTED_CORE_CYCLES_UMASK (0x00 << 8) +#define ARCH_PERFMON_UNHALTED_CORE_CYCLES_INDEX 0 +#define ARCH_PERFMON_UNHALTED_CORE_CYCLES_PRESENT \ + (1 << (ARCH_PERFMON_UNHALTED_CORE_CYCLES_INDEX)) + +#define ARCH_PERFMON_BRANCH_MISSES_RETIRED 6 + +union cpuid10_eax { + struct { + unsigned int version_id:8; + unsigned int num_counters:8; + unsigned int bit_width:8; + unsigned int mask_length:8; + } split; + unsigned int full; +}; + +#ifdef CONFIG_PERF_COUNTERS +extern void init_hw_perf_counters(void); +extern void perf_counters_lapic_init(int nmi); +#else +static inline void init_hw_perf_counters(void) { } +static inline void perf_counters_lapic_init(int nmi) { } +#endif + +#endif /* _ASM_X86_PERF_COUNTER_H */ diff --git a/arch/x86/kernel/apic.c b/arch/x86/kernel/apic.c index 0579ec1cd6e..4f859acb156 100644 --- a/arch/x86/kernel/apic.c +++ b/arch/x86/kernel/apic.c @@ -31,7 +31,7 @@ #include #include -#include +#include #include #include #include diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index 4461011db47..ad331b4d623 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c @@ -17,7 +17,7 @@ #include #include #include -#include +#include #include #include #include diff --git a/arch/x86/kernel/cpu/perf_counter.c b/arch/x86/kernel/cpu/perf_counter.c index 89fad5d4fb3..a4a3a09a654 100644 --- a/arch/x86/kernel/cpu/perf_counter.c +++ b/arch/x86/kernel/cpu/perf_counter.c @@ -16,7 +16,7 @@ #include #include -#include +#include #include static bool perf_counters_initialized __read_mostly; diff --git a/arch/x86/kernel/cpu/perfctr-watchdog.c b/arch/x86/kernel/cpu/perfctr-watchdog.c index 9abd48b2267..d6f5b9fbde3 100644 --- a/arch/x86/kernel/cpu/perfctr-watchdog.c +++ b/arch/x86/kernel/cpu/perfctr-watchdog.c @@ -20,7 +20,7 @@ #include #include -#include +#include struct nmi_watchdog_ctlblk { unsigned int cccr_msr; diff --git a/arch/x86/oprofile/op_model_ppro.c b/arch/x86/oprofile/op_model_ppro.c index e9f80c744cf..07c914555a5 100644 --- a/arch/x86/oprofile/op_model_ppro.c +++ b/arch/x86/oprofile/op_model_ppro.c @@ -18,7 +18,7 @@ #include #include #include -#include +#include #include "op_x86_model.h" #include "op_counter.h" -- cgit v1.2.3 From eb2b861810d4ff72454c83996b891df4e0aaff9a Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Wed, 17 Dec 2008 09:09:13 +0100 Subject: x86, perfcounters: prepare for fixed-mode PMCs Impact: refactor the x86 code for fixed-mode PMCs Extend the data structures and rename the existing facilities to allow for a 'generic' versus 'fixed' counter distinction. Signed-off-by: Ingo Molnar --- arch/x86/include/asm/perf_counter.h | 11 ++++++++ arch/x86/kernel/cpu/perf_counter.c | 53 ++++++++++++++++++------------------- include/linux/perf_counter.h | 1 + 3 files changed, 38 insertions(+), 27 deletions(-) diff --git a/arch/x86/include/asm/perf_counter.h b/arch/x86/include/asm/perf_counter.h index 9dadce1124e..dd5a4a559e2 100644 --- a/arch/x86/include/asm/perf_counter.h +++ b/arch/x86/include/asm/perf_counter.h @@ -1,6 +1,13 @@ #ifndef _ASM_X86_PERF_COUNTER_H #define _ASM_X86_PERF_COUNTER_H +/* + * Performance counter hw details: + */ + +#define X86_PMC_MAX_GENERIC 8 +#define X86_PMC_MAX_FIXED 3 + #define MSR_ARCH_PERFMON_PERFCTR0 0xc1 #define MSR_ARCH_PERFMON_PERFCTR1 0xc2 @@ -20,6 +27,10 @@ #define ARCH_PERFMON_BRANCH_MISSES_RETIRED 6 +/* + * Intel "Architectural Performance Monitoring" CPUID + * detection/enumeration details: + */ union cpuid10_eax { struct { unsigned int version_id:8; diff --git a/arch/x86/kernel/cpu/perf_counter.c b/arch/x86/kernel/cpu/perf_counter.c index a4a3a09a654..fc3af868823 100644 --- a/arch/x86/kernel/cpu/perf_counter.c +++ b/arch/x86/kernel/cpu/perf_counter.c @@ -27,13 +27,12 @@ static bool perf_counters_initialized __read_mostly; static int nr_hw_counters __read_mostly; static u32 perf_counter_mask __read_mostly; -/* No support for fixed function counters yet */ - -#define MAX_HW_COUNTERS 8 - struct cpu_hw_counters { - struct perf_counter *counters[MAX_HW_COUNTERS]; - unsigned long used[BITS_TO_LONGS(MAX_HW_COUNTERS)]; + struct perf_counter *generic[X86_PMC_MAX_GENERIC]; + unsigned long used[BITS_TO_LONGS(X86_PMC_MAX_GENERIC)]; + + struct perf_counter *fixed[X86_PMC_MAX_FIXED]; + unsigned long used_fixed[BITS_TO_LONGS(X86_PMC_MAX_FIXED)]; }; /* @@ -185,7 +184,7 @@ void hw_perf_restore(u64 ctrl) EXPORT_SYMBOL_GPL(hw_perf_restore); static inline void -__x86_perf_counter_disable(struct perf_counter *counter, +__pmc_generic_disable(struct perf_counter *counter, struct hw_perf_counter *hwc, unsigned int idx) { int err; @@ -193,7 +192,7 @@ __x86_perf_counter_disable(struct perf_counter *counter, err = wrmsr_safe(hwc->config_base + idx, hwc->config, 0); } -static DEFINE_PER_CPU(u64, prev_left[MAX_HW_COUNTERS]); +static DEFINE_PER_CPU(u64, prev_left[X86_PMC_MAX_GENERIC]); /* * Set the next IRQ period, based on the hwc->period_left value. @@ -231,7 +230,7 @@ __hw_perf_counter_set_period(struct perf_counter *counter, } static void -__x86_perf_counter_enable(struct perf_counter *counter, +__pmc_generic_enable(struct perf_counter *counter, struct hw_perf_counter *hwc, int idx) { wrmsr(hwc->config_base + idx, @@ -241,7 +240,7 @@ __x86_perf_counter_enable(struct perf_counter *counter, /* * Find a PMC slot for the freshly enabled / scheduled in counter: */ -static void x86_perf_counter_enable(struct perf_counter *counter) +static void pmc_generic_enable(struct perf_counter *counter) { struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters); struct hw_perf_counter *hwc = &counter->hw; @@ -256,12 +255,12 @@ static void x86_perf_counter_enable(struct perf_counter *counter) perf_counters_lapic_init(hwc->nmi); - __x86_perf_counter_disable(counter, hwc, idx); + __pmc_generic_disable(counter, hwc, idx); - cpuc->counters[idx] = counter; + cpuc->generic[idx] = counter; __hw_perf_counter_set_period(counter, hwc, idx); - __x86_perf_counter_enable(counter, hwc, idx); + __pmc_generic_enable(counter, hwc, idx); } void perf_counter_print_debug(void) @@ -301,16 +300,16 @@ void perf_counter_print_debug(void) local_irq_enable(); } -static void x86_perf_counter_disable(struct perf_counter *counter) +static void pmc_generic_disable(struct perf_counter *counter) { struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters); struct hw_perf_counter *hwc = &counter->hw; unsigned int idx = hwc->idx; - __x86_perf_counter_disable(counter, hwc, idx); + __pmc_generic_disable(counter, hwc, idx); clear_bit(idx, cpuc->used); - cpuc->counters[idx] = NULL; + cpuc->generic[idx] = NULL; /* * Drain the remaining delta count out of a counter @@ -349,7 +348,7 @@ static void perf_save_and_restart(struct perf_counter *counter) __hw_perf_counter_set_period(counter, hwc, idx); if (pmc_ctrl & ARCH_PERFMON_EVENTSEL0_ENABLE) - __x86_perf_counter_enable(counter, hwc, idx); + __pmc_generic_enable(counter, hwc, idx); } static void @@ -392,7 +391,7 @@ static void __smp_perf_counter_interrupt(struct pt_regs *regs, int nmi) again: ack = status; for_each_bit(bit, (unsigned long *) &status, nr_hw_counters) { - struct perf_counter *counter = cpuc->counters[bit]; + struct perf_counter *counter = cpuc->generic[bit]; clear_bit(bit, (unsigned long *) &status); if (!counter) @@ -412,7 +411,7 @@ again: } /* * From NMI context we cannot call into the scheduler to - * do a task wakeup - but we mark these counters as + * do a task wakeup - but we mark these generic as * wakeup_pending and initate a wakeup callback: */ if (nmi) { @@ -462,7 +461,7 @@ void perf_counter_notify(struct pt_regs *regs) cpuc = &per_cpu(cpu_hw_counters, cpu); for_each_bit(bit, cpuc->used, nr_hw_counters) { - struct perf_counter *counter = cpuc->counters[bit]; + struct perf_counter *counter = cpuc->generic[bit]; if (!counter) continue; @@ -539,10 +538,10 @@ void __init init_hw_perf_counters(void) printk(KERN_INFO "... version: %d\n", eax.split.version_id); printk(KERN_INFO "... num_counters: %d\n", eax.split.num_counters); nr_hw_counters = eax.split.num_counters; - if (nr_hw_counters > MAX_HW_COUNTERS) { - nr_hw_counters = MAX_HW_COUNTERS; + if (nr_hw_counters > X86_PMC_MAX_GENERIC) { + nr_hw_counters = X86_PMC_MAX_GENERIC; WARN(1, KERN_ERR "hw perf counters %d > max(%d), clipping!", - nr_hw_counters, MAX_HW_COUNTERS); + nr_hw_counters, X86_PMC_MAX_GENERIC); } perf_counter_mask = (1 << nr_hw_counters) - 1; perf_max_counters = nr_hw_counters; @@ -556,15 +555,15 @@ void __init init_hw_perf_counters(void) register_die_notifier(&perf_counter_nmi_notifier); } -static void x86_perf_counter_read(struct perf_counter *counter) +static void pmc_generic_read(struct perf_counter *counter) { x86_perf_counter_update(counter, &counter->hw, counter->hw.idx); } static const struct hw_perf_counter_ops x86_perf_counter_ops = { - .hw_perf_counter_enable = x86_perf_counter_enable, - .hw_perf_counter_disable = x86_perf_counter_disable, - .hw_perf_counter_read = x86_perf_counter_read, + .hw_perf_counter_enable = pmc_generic_enable, + .hw_perf_counter_disable = pmc_generic_disable, + .hw_perf_counter_read = pmc_generic_read, }; const struct hw_perf_counter_ops * diff --git a/include/linux/perf_counter.h b/include/linux/perf_counter.h index d038450de87..984da540224 100644 --- a/include/linux/perf_counter.h +++ b/include/linux/perf_counter.h @@ -14,6 +14,7 @@ #define _LINUX_PERF_COUNTER_H #include +#include #include #include -- cgit v1.2.3 From 703e937c83bbad79075a7846e062e447c2fee6a4 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Wed, 17 Dec 2008 10:51:15 +0100 Subject: perfcounters: add fixed-mode PMC enumeration Enumerate fixed-mode PMCs based on CPUID, and feed that into the perfcounter code. Does not use fixed-mode PMCs yet. Signed-off-by: Ingo Molnar --- arch/x86/include/asm/perf_counter.h | 23 +++++++++++++++++++++++ arch/x86/kernel/cpu/perf_counter.c | 23 +++++++++++++++++------ 2 files changed, 40 insertions(+), 6 deletions(-) diff --git a/arch/x86/include/asm/perf_counter.h b/arch/x86/include/asm/perf_counter.h index dd5a4a559e2..945a315e6d6 100644 --- a/arch/x86/include/asm/perf_counter.h +++ b/arch/x86/include/asm/perf_counter.h @@ -41,6 +41,29 @@ union cpuid10_eax { unsigned int full; }; +union cpuid10_edx { + struct { + unsigned int num_counters_fixed:4; + unsigned int reserved:28; + } split; + unsigned int full; +}; + + +/* + * Fixed-purpose performance counters: + */ + +/* Instr_Retired.Any: */ +#define MSR_ARCH_PERFMON_FIXED_CTR0 0x309 + +/* CPU_CLK_Unhalted.Core: */ +#define MSR_ARCH_PERFMON_FIXED_CTR1 0x30a + +/* CPU_CLK_Unhalted.Ref: */ +#define MSR_ARCH_PERFMON_FIXED_CTR2 0x30b + + #ifdef CONFIG_PERF_COUNTERS extern void init_hw_perf_counters(void); extern void perf_counters_lapic_init(int nmi); diff --git a/arch/x86/kernel/cpu/perf_counter.c b/arch/x86/kernel/cpu/perf_counter.c index fc3af868823..2fca50c4597 100644 --- a/arch/x86/kernel/cpu/perf_counter.c +++ b/arch/x86/kernel/cpu/perf_counter.c @@ -27,6 +27,8 @@ static bool perf_counters_initialized __read_mostly; static int nr_hw_counters __read_mostly; static u32 perf_counter_mask __read_mostly; +static int nr_hw_counters_fixed __read_mostly; + struct cpu_hw_counters { struct perf_counter *generic[X86_PMC_MAX_GENERIC]; unsigned long used[BITS_TO_LONGS(X86_PMC_MAX_GENERIC)]; @@ -519,8 +521,9 @@ static __read_mostly struct notifier_block perf_counter_nmi_notifier = { void __init init_hw_perf_counters(void) { union cpuid10_eax eax; - unsigned int unused; unsigned int ebx; + unsigned int unused; + union cpuid10_edx edx; if (!cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) return; @@ -529,14 +532,14 @@ void __init init_hw_perf_counters(void) * Check whether the Architectural PerfMon supports * Branch Misses Retired Event or not. */ - cpuid(10, &(eax.full), &ebx, &unused, &unused); + cpuid(10, &eax.full, &ebx, &unused, &edx.full); if (eax.split.mask_length <= ARCH_PERFMON_BRANCH_MISSES_RETIRED) return; printk(KERN_INFO "Intel Performance Monitoring support detected.\n"); - printk(KERN_INFO "... version: %d\n", eax.split.version_id); - printk(KERN_INFO "... num_counters: %d\n", eax.split.num_counters); + printk(KERN_INFO "... version: %d\n", eax.split.version_id); + printk(KERN_INFO "... num counters: %d\n", eax.split.num_counters); nr_hw_counters = eax.split.num_counters; if (nr_hw_counters > X86_PMC_MAX_GENERIC) { nr_hw_counters = X86_PMC_MAX_GENERIC; @@ -546,8 +549,16 @@ void __init init_hw_perf_counters(void) perf_counter_mask = (1 << nr_hw_counters) - 1; perf_max_counters = nr_hw_counters; - printk(KERN_INFO "... bit_width: %d\n", eax.split.bit_width); - printk(KERN_INFO "... mask_length: %d\n", eax.split.mask_length); + printk(KERN_INFO "... bit width: %d\n", eax.split.bit_width); + printk(KERN_INFO "... mask length: %d\n", eax.split.mask_length); + + nr_hw_counters_fixed = edx.split.num_counters_fixed; + if (nr_hw_counters_fixed > X86_PMC_MAX_FIXED) { + nr_hw_counters_fixed = X86_PMC_MAX_FIXED; + WARN(1, KERN_ERR "hw perf counters fixed %d > max(%d), clipping!", + nr_hw_counters_fixed, X86_PMC_MAX_FIXED); + } + printk(KERN_INFO "... fixed counters: %d\n", nr_hw_counters_fixed); perf_counters_initialized = true; -- cgit v1.2.3 From 862a1a5f346fe7e9181ea51eaae48cf2cd70f746 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Wed, 17 Dec 2008 13:09:20 +0100 Subject: x86, perfcounters: refactor code for fixed-function PMCs Impact: clean up Signed-off-by: Ingo Molnar --- arch/x86/include/asm/perf_counter.h | 14 ++++++- arch/x86/kernel/cpu/perf_counter.c | 73 ++++++++++++++++++++----------------- 2 files changed, 52 insertions(+), 35 deletions(-) diff --git a/arch/x86/include/asm/perf_counter.h b/arch/x86/include/asm/perf_counter.h index 945a315e6d6..13745deb16c 100644 --- a/arch/x86/include/asm/perf_counter.h +++ b/arch/x86/include/asm/perf_counter.h @@ -8,6 +8,10 @@ #define X86_PMC_MAX_GENERIC 8 #define X86_PMC_MAX_FIXED 3 +#define X86_PMC_IDX_GENERIC 0 +#define X86_PMC_IDX_FIXED 32 +#define X86_PMC_IDX_MAX 64 + #define MSR_ARCH_PERFMON_PERFCTR0 0xc1 #define MSR_ARCH_PERFMON_PERFCTR1 0xc2 @@ -54,6 +58,15 @@ union cpuid10_edx { * Fixed-purpose performance counters: */ +/* + * All 3 fixed-mode PMCs are configured via this single MSR: + */ +#define MSR_ARCH_PERFMON_FIXED_CTR_CTRL 0x38d + +/* + * The counts are available in three separate MSRs: + */ + /* Instr_Retired.Any: */ #define MSR_ARCH_PERFMON_FIXED_CTR0 0x309 @@ -63,7 +76,6 @@ union cpuid10_edx { /* CPU_CLK_Unhalted.Ref: */ #define MSR_ARCH_PERFMON_FIXED_CTR2 0x30b - #ifdef CONFIG_PERF_COUNTERS extern void init_hw_perf_counters(void); extern void perf_counters_lapic_init(int nmi); diff --git a/arch/x86/kernel/cpu/perf_counter.c b/arch/x86/kernel/cpu/perf_counter.c index 2fca50c4597..358af526640 100644 --- a/arch/x86/kernel/cpu/perf_counter.c +++ b/arch/x86/kernel/cpu/perf_counter.c @@ -24,17 +24,14 @@ static bool perf_counters_initialized __read_mostly; /* * Number of (generic) HW counters: */ -static int nr_hw_counters __read_mostly; -static u32 perf_counter_mask __read_mostly; +static int nr_counters_generic __read_mostly; +static u64 perf_counter_mask __read_mostly; -static int nr_hw_counters_fixed __read_mostly; +static int nr_counters_fixed __read_mostly; struct cpu_hw_counters { - struct perf_counter *generic[X86_PMC_MAX_GENERIC]; - unsigned long used[BITS_TO_LONGS(X86_PMC_MAX_GENERIC)]; - - struct perf_counter *fixed[X86_PMC_MAX_FIXED]; - unsigned long used_fixed[BITS_TO_LONGS(X86_PMC_MAX_FIXED)]; + struct perf_counter *counters[X86_PMC_IDX_MAX]; + unsigned long used[BITS_TO_LONGS(X86_PMC_IDX_MAX)]; }; /* @@ -159,7 +156,7 @@ void hw_perf_enable_all(void) if (unlikely(!perf_counters_initialized)) return; - wrmsr(MSR_CORE_PERF_GLOBAL_CTRL, perf_counter_mask, 0); + wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, perf_counter_mask); } u64 hw_perf_save_disable(void) @@ -170,7 +167,7 @@ u64 hw_perf_save_disable(void) return 0; rdmsrl(MSR_CORE_PERF_GLOBAL_CTRL, ctrl); - wrmsr(MSR_CORE_PERF_GLOBAL_CTRL, 0, 0); + wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0); return ctrl; } @@ -181,7 +178,7 @@ void hw_perf_restore(u64 ctrl) if (unlikely(!perf_counters_initialized)) return; - wrmsr(MSR_CORE_PERF_GLOBAL_CTRL, ctrl, 0); + wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, ctrl); } EXPORT_SYMBOL_GPL(hw_perf_restore); @@ -239,6 +236,11 @@ __pmc_generic_enable(struct perf_counter *counter, hwc->config | ARCH_PERFMON_EVENTSEL0_ENABLE, 0); } +static int fixed_mode_idx(struct hw_perf_counter *hwc) +{ + return -1; +} + /* * Find a PMC slot for the freshly enabled / scheduled in counter: */ @@ -250,7 +252,7 @@ static void pmc_generic_enable(struct perf_counter *counter) /* Try to get the previous counter again */ if (test_and_set_bit(idx, cpuc->used)) { - idx = find_first_zero_bit(cpuc->used, nr_hw_counters); + idx = find_first_zero_bit(cpuc->used, nr_counters_generic); set_bit(idx, cpuc->used); hwc->idx = idx; } @@ -259,7 +261,7 @@ static void pmc_generic_enable(struct perf_counter *counter) __pmc_generic_disable(counter, hwc, idx); - cpuc->generic[idx] = counter; + cpuc->counters[idx] = counter; __hw_perf_counter_set_period(counter, hwc, idx); __pmc_generic_enable(counter, hwc, idx); @@ -270,7 +272,7 @@ void perf_counter_print_debug(void) u64 ctrl, status, overflow, pmc_ctrl, pmc_count, prev_left; int cpu, idx; - if (!nr_hw_counters) + if (!nr_counters_generic) return; local_irq_disable(); @@ -286,7 +288,7 @@ void perf_counter_print_debug(void) printk(KERN_INFO "CPU#%d: status: %016llx\n", cpu, status); printk(KERN_INFO "CPU#%d: overflow: %016llx\n", cpu, overflow); - for (idx = 0; idx < nr_hw_counters; idx++) { + for (idx = 0; idx < nr_counters_generic; idx++) { rdmsrl(MSR_ARCH_PERFMON_EVENTSEL0 + idx, pmc_ctrl); rdmsrl(MSR_ARCH_PERFMON_PERFCTR0 + idx, pmc_count); @@ -311,7 +313,7 @@ static void pmc_generic_disable(struct perf_counter *counter) __pmc_generic_disable(counter, hwc, idx); clear_bit(idx, cpuc->used); - cpuc->generic[idx] = NULL; + cpuc->counters[idx] = NULL; /* * Drain the remaining delta count out of a counter @@ -381,7 +383,7 @@ static void __smp_perf_counter_interrupt(struct pt_regs *regs, int nmi) rdmsrl(MSR_CORE_PERF_GLOBAL_CTRL, saved_global); /* Disable counters globally */ - wrmsr(MSR_CORE_PERF_GLOBAL_CTRL, 0, 0); + wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0); ack_APIC_irq(); cpuc = &per_cpu(cpu_hw_counters, cpu); @@ -392,8 +394,8 @@ static void __smp_perf_counter_interrupt(struct pt_regs *regs, int nmi) again: ack = status; - for_each_bit(bit, (unsigned long *) &status, nr_hw_counters) { - struct perf_counter *counter = cpuc->generic[bit]; + for_each_bit(bit, (unsigned long *) &status, nr_counters_generic) { + struct perf_counter *counter = cpuc->counters[bit]; clear_bit(bit, (unsigned long *) &status); if (!counter) @@ -424,7 +426,7 @@ again: } } - wrmsr(MSR_CORE_PERF_GLOBAL_OVF_CTRL, ack, 0); + wrmsrl(MSR_CORE_PERF_GLOBAL_OVF_CTRL, ack); /* * Repeat if there is more work to be done: @@ -436,7 +438,7 @@ out: /* * Restore - do not reenable when global enable is off: */ - wrmsr(MSR_CORE_PERF_GLOBAL_CTRL, saved_global, 0); + wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, saved_global); } void smp_perf_counter_interrupt(struct pt_regs *regs) @@ -462,8 +464,8 @@ void perf_counter_notify(struct pt_regs *regs) cpu = smp_processor_id(); cpuc = &per_cpu(cpu_hw_counters, cpu); - for_each_bit(bit, cpuc->used, nr_hw_counters) { - struct perf_counter *counter = cpuc->generic[bit]; + for_each_bit(bit, cpuc->used, X86_PMC_IDX_MAX) { + struct perf_counter *counter = cpuc->counters[bit]; if (!counter) continue; @@ -540,26 +542,29 @@ void __init init_hw_perf_counters(void) printk(KERN_INFO "... version: %d\n", eax.split.version_id); printk(KERN_INFO "... num counters: %d\n", eax.split.num_counters); - nr_hw_counters = eax.split.num_counters; - if (nr_hw_counters > X86_PMC_MAX_GENERIC) { - nr_hw_counters = X86_PMC_MAX_GENERIC; + nr_counters_generic = eax.split.num_counters; + if (nr_counters_generic > X86_PMC_MAX_GENERIC) { + nr_counters_generic = X86_PMC_MAX_GENERIC; WARN(1, KERN_ERR "hw perf counters %d > max(%d), clipping!", - nr_hw_counters, X86_PMC_MAX_GENERIC); + nr_counters_generic, X86_PMC_MAX_GENERIC); } - perf_counter_mask = (1 << nr_hw_counters) - 1; - perf_max_counters = nr_hw_counters; + perf_counter_mask = (1 << nr_counters_generic) - 1; + perf_max_counters = nr_counters_generic; printk(KERN_INFO "... bit width: %d\n", eax.split.bit_width); printk(KERN_INFO "... mask length: %d\n", eax.split.mask_length); - nr_hw_counters_fixed = edx.split.num_counters_fixed; - if (nr_hw_counters_fixed > X86_PMC_MAX_FIXED) { - nr_hw_counters_fixed = X86_PMC_MAX_FIXED; + nr_counters_fixed = edx.split.num_counters_fixed; + if (nr_counters_fixed > X86_PMC_MAX_FIXED) { + nr_counters_fixed = X86_PMC_MAX_FIXED; WARN(1, KERN_ERR "hw perf counters fixed %d > max(%d), clipping!", - nr_hw_counters_fixed, X86_PMC_MAX_FIXED); + nr_counters_fixed, X86_PMC_MAX_FIXED); } - printk(KERN_INFO "... fixed counters: %d\n", nr_hw_counters_fixed); + printk(KERN_INFO "... fixed counters: %d\n", nr_counters_fixed); + + perf_counter_mask |= ((1LL << nr_counters_fixed)-1) << X86_PMC_IDX_FIXED; + printk(KERN_INFO "... counter mask: %016Lx\n", perf_counter_mask); perf_counters_initialized = true; perf_counters_lapic_init(0); -- cgit v1.2.3 From 7671581f1666ef4b54a1c1e598c51ac44c060a9b Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Wed, 17 Dec 2008 14:20:28 +0100 Subject: perfcounters: hw ops rename Impact: rename field names Shorten them. Signed-off-by: Ingo Molnar --- arch/x86/kernel/cpu/perf_counter.c | 6 ++--- include/linux/perf_counter.h | 6 ++--- kernel/perf_counter.c | 50 +++++++++++++++++++------------------- 3 files changed, 31 insertions(+), 31 deletions(-) diff --git a/arch/x86/kernel/cpu/perf_counter.c b/arch/x86/kernel/cpu/perf_counter.c index 358af526640..b6755712142 100644 --- a/arch/x86/kernel/cpu/perf_counter.c +++ b/arch/x86/kernel/cpu/perf_counter.c @@ -577,9 +577,9 @@ static void pmc_generic_read(struct perf_counter *counter) } static const struct hw_perf_counter_ops x86_perf_counter_ops = { - .hw_perf_counter_enable = pmc_generic_enable, - .hw_perf_counter_disable = pmc_generic_disable, - .hw_perf_counter_read = pmc_generic_read, + .enable = pmc_generic_enable, + .disable = pmc_generic_disable, + .read = pmc_generic_read, }; const struct hw_perf_counter_ops * diff --git a/include/linux/perf_counter.h b/include/linux/perf_counter.h index 984da540224..48f76d2e54c 100644 --- a/include/linux/perf_counter.h +++ b/include/linux/perf_counter.h @@ -128,9 +128,9 @@ struct perf_counter; * struct hw_perf_counter_ops - performance counter hw ops */ struct hw_perf_counter_ops { - void (*hw_perf_counter_enable) (struct perf_counter *counter); - void (*hw_perf_counter_disable) (struct perf_counter *counter); - void (*hw_perf_counter_read) (struct perf_counter *counter); + void (*enable) (struct perf_counter *counter); + void (*disable) (struct perf_counter *counter); + void (*read) (struct perf_counter *counter); }; /** diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c index f8a4d9a5d5d..961d651aa57 100644 --- a/kernel/perf_counter.c +++ b/kernel/perf_counter.c @@ -109,7 +109,7 @@ static void __perf_counter_remove_from_context(void *info) spin_lock_irqsave(&ctx->lock, flags); if (counter->state == PERF_COUNTER_STATE_ACTIVE) { - counter->hw_ops->hw_perf_counter_disable(counter); + counter->hw_ops->disable(counter); counter->state = PERF_COUNTER_STATE_INACTIVE; ctx->nr_active--; cpuctx->active_oncpu--; @@ -226,7 +226,7 @@ static void __perf_install_in_context(void *info) counter->oncpu = cpu; ctx->nr_active++; cpuctx->active_oncpu++; - counter->hw_ops->hw_perf_counter_enable(counter); + counter->hw_ops->enable(counter); } if (!ctx->task && cpuctx->max_pertask) @@ -297,7 +297,7 @@ counter_sched_out(struct perf_counter *counter, if (counter->state != PERF_COUNTER_STATE_ACTIVE) return; - counter->hw_ops->hw_perf_counter_disable(counter); + counter->hw_ops->disable(counter); counter->state = PERF_COUNTER_STATE_INACTIVE; counter->oncpu = -1; @@ -327,7 +327,7 @@ group_sched_out(struct perf_counter *group_counter, * * We stop each counter and update the counter value in counter->count. * - * This does not protect us against NMI, but hw_perf_counter_disable() + * This does not protect us against NMI, but disable() * sets the disabled bit in the control field of counter _before_ * accessing the counter control register. If a NMI hits, then it will * not restart the counter. @@ -359,7 +359,7 @@ counter_sched_in(struct perf_counter *counter, if (counter->state == PERF_COUNTER_STATE_OFF) return; - counter->hw_ops->hw_perf_counter_enable(counter); + counter->hw_ops->enable(counter); counter->state = PERF_COUNTER_STATE_ACTIVE; counter->oncpu = cpu; /* TODO: put 'cpu' into cpuctx->cpu */ @@ -395,7 +395,7 @@ group_sched_in(struct perf_counter *group_counter, * * We restore the counter value and then enable it. * - * This does not protect us against NMI, but hw_perf_counter_enable() + * This does not protect us against NMI, but enable() * sets the enabled bit in the control field of counter _before_ * accessing the counter control register. If a NMI hits, then it will * keep the counter running. @@ -537,11 +537,11 @@ void perf_counter_task_tick(struct task_struct *curr, int cpu) /* * Cross CPU call to read the hardware counter */ -static void __hw_perf_counter_read(void *info) +static void __read(void *info) { struct perf_counter *counter = info; - counter->hw_ops->hw_perf_counter_read(counter); + counter->hw_ops->read(counter); } static u64 perf_counter_read(struct perf_counter *counter) @@ -552,7 +552,7 @@ static u64 perf_counter_read(struct perf_counter *counter) */ if (counter->state == PERF_COUNTER_STATE_ACTIVE) { smp_call_function_single(counter->oncpu, - __hw_perf_counter_read, counter, 1); + __read, counter, 1); } return atomic64_read(&counter->count); @@ -855,9 +855,9 @@ static void cpu_clock_perf_counter_read(struct perf_counter *counter) } static const struct hw_perf_counter_ops perf_ops_cpu_clock = { - .hw_perf_counter_enable = cpu_clock_perf_counter_enable, - .hw_perf_counter_disable = cpu_clock_perf_counter_disable, - .hw_perf_counter_read = cpu_clock_perf_counter_read, + .enable = cpu_clock_perf_counter_enable, + .disable = cpu_clock_perf_counter_disable, + .read = cpu_clock_perf_counter_read, }; static void task_clock_perf_counter_update(struct perf_counter *counter) @@ -891,9 +891,9 @@ static void task_clock_perf_counter_disable(struct perf_counter *counter) } static const struct hw_perf_counter_ops perf_ops_task_clock = { - .hw_perf_counter_enable = task_clock_perf_counter_enable, - .hw_perf_counter_disable = task_clock_perf_counter_disable, - .hw_perf_counter_read = task_clock_perf_counter_read, + .enable = task_clock_perf_counter_enable, + .disable = task_clock_perf_counter_disable, + .read = task_clock_perf_counter_read, }; static u64 get_page_faults(void) @@ -937,9 +937,9 @@ static void page_faults_perf_counter_disable(struct perf_counter *counter) } static const struct hw_perf_counter_ops perf_ops_page_faults = { - .hw_perf_counter_enable = page_faults_perf_counter_enable, - .hw_perf_counter_disable = page_faults_perf_counter_disable, - .hw_perf_counter_read = page_faults_perf_counter_read, + .enable = page_faults_perf_counter_enable, + .disable = page_faults_perf_counter_disable, + .read = page_faults_perf_counter_read, }; static u64 get_context_switches(void) @@ -983,9 +983,9 @@ static void context_switches_perf_counter_disable(struct perf_counter *counter) } static const struct hw_perf_counter_ops perf_ops_context_switches = { - .hw_perf_counter_enable = context_switches_perf_counter_enable, - .hw_perf_counter_disable = context_switches_perf_counter_disable, - .hw_perf_counter_read = context_switches_perf_counter_read, + .enable = context_switches_perf_counter_enable, + .disable = context_switches_perf_counter_disable, + .read = context_switches_perf_counter_read, }; static inline u64 get_cpu_migrations(void) @@ -1027,9 +1027,9 @@ static void cpu_migrations_perf_counter_disable(struct perf_counter *counter) } static const struct hw_perf_counter_ops perf_ops_cpu_migrations = { - .hw_perf_counter_enable = cpu_migrations_perf_counter_enable, - .hw_perf_counter_disable = cpu_migrations_perf_counter_disable, - .hw_perf_counter_read = cpu_migrations_perf_counter_read, + .enable = cpu_migrations_perf_counter_enable, + .disable = cpu_migrations_perf_counter_disable, + .read = cpu_migrations_perf_counter_read, }; static const struct hw_perf_counter_ops * @@ -1283,7 +1283,7 @@ __perf_counter_exit_task(struct task_struct *child, cpuctx = &__get_cpu_var(perf_cpu_context); - child_counter->hw_ops->hw_perf_counter_disable(child_counter); + child_counter->hw_ops->disable(child_counter); child_counter->state = PERF_COUNTER_STATE_INACTIVE; child_counter->oncpu = -1; -- cgit v1.2.3 From aa9c4c0f967fdb482ea95e8473ec3d201e6e0781 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Wed, 17 Dec 2008 14:10:57 +0100 Subject: perfcounters: fix task clock counter Impact: fix per task clock counter precision Signed-off-by: Ingo Molnar --- include/linux/kernel_stat.h | 8 ++++++ kernel/exit.c | 17 +++++++---- kernel/perf_counter.c | 70 ++++++++++++++++++++++++++++++++++----------- kernel/sched.c | 49 +++++++++++++++++++++++++++++-- 4 files changed, 120 insertions(+), 24 deletions(-) diff --git a/include/linux/kernel_stat.h b/include/linux/kernel_stat.h index 4a145caeee0..1b2e3242497 100644 --- a/include/linux/kernel_stat.h +++ b/include/linux/kernel_stat.h @@ -66,7 +66,15 @@ static inline unsigned int kstat_irqs(unsigned int irq) return sum; } + +/* + * Lock/unlock the current runqueue - to extract task statistics: + */ +extern void curr_rq_lock_irq_save(unsigned long *flags); +extern void curr_rq_unlock_irq_restore(unsigned long *flags); +extern unsigned long long __task_delta_exec(struct task_struct *tsk, int update); extern unsigned long long task_delta_exec(struct task_struct *); + extern void account_user_time(struct task_struct *, cputime_t); extern void account_user_time_scaled(struct task_struct *, cputime_t); extern void account_system_time(struct task_struct *, int, cputime_t); diff --git a/kernel/exit.c b/kernel/exit.c index d336c90a5f1..244edfd9686 100644 --- a/kernel/exit.c +++ b/kernel/exit.c @@ -922,6 +922,12 @@ static void exit_notify(struct task_struct *tsk, int group_dead) forget_original_parent(tsk); exit_task_namespaces(tsk); + /* + * Flush inherited counters to the parent - before the parent + * gets woken up by child-exit notifications. + */ + perf_counter_exit_task(tsk); + write_lock_irq(&tasklist_lock); if (group_dead) kill_orphaned_pgrp(tsk->group_leader, NULL); @@ -1093,11 +1099,6 @@ NORET_TYPE void do_exit(long code) mpol_put(tsk->mempolicy); tsk->mempolicy = NULL; #endif - /* - * These must happen late, after the PID is not - * hashed anymore, but still at a point that may sleep: - */ - perf_counter_exit_task(tsk); #ifdef CONFIG_FUTEX if (unlikely(!list_empty(&tsk->pi_state_list))) exit_pi_state_list(tsk); @@ -1121,6 +1122,12 @@ NORET_TYPE void do_exit(long code) if (tsk->splice_pipe) __free_pipe_info(tsk->splice_pipe); + /* + * These must happen late, after the PID is not + * hashed anymore, but still at a point that may sleep: + */ + perf_counter_exit_task(tsk); + preempt_disable(); /* causes final put_task_struct in finish_task_switch(). */ tsk->state = TASK_DEAD; diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c index 961d651aa57..f1110ac1267 100644 --- a/kernel/perf_counter.c +++ b/kernel/perf_counter.c @@ -18,6 +18,7 @@ #include #include #include +#include #include /* @@ -106,7 +107,8 @@ static void __perf_counter_remove_from_context(void *info) if (ctx->task && cpuctx->task_ctx != ctx) return; - spin_lock_irqsave(&ctx->lock, flags); + curr_rq_lock_irq_save(&flags); + spin_lock(&ctx->lock); if (counter->state == PERF_COUNTER_STATE_ACTIVE) { counter->hw_ops->disable(counter); @@ -135,7 +137,8 @@ static void __perf_counter_remove_from_context(void *info) perf_max_counters - perf_reserved_percpu); } - spin_unlock_irqrestore(&ctx->lock, flags); + spin_unlock(&ctx->lock); + curr_rq_unlock_irq_restore(&flags); } @@ -209,7 +212,8 @@ static void __perf_install_in_context(void *info) if (ctx->task && cpuctx->task_ctx != ctx) return; - spin_lock_irqsave(&ctx->lock, flags); + curr_rq_lock_irq_save(&flags); + spin_lock(&ctx->lock); /* * Protect the list operation against NMI by disabling the @@ -232,7 +236,8 @@ static void __perf_install_in_context(void *info) if (!ctx->task && cpuctx->max_pertask) cpuctx->max_pertask--; - spin_unlock_irqrestore(&ctx->lock, flags); + spin_unlock(&ctx->lock); + curr_rq_unlock_irq_restore(&flags); } /* @@ -438,15 +443,19 @@ int perf_counter_task_disable(void) struct task_struct *curr = current; struct perf_counter_context *ctx = &curr->perf_counter_ctx; struct perf_counter *counter; + unsigned long flags; u64 perf_flags; int cpu; if (likely(!ctx->nr_counters)) return 0; - local_irq_disable(); + curr_rq_lock_irq_save(&flags); cpu = smp_processor_id(); + /* force the update of the task clock: */ + __task_delta_exec(curr, 1); + perf_counter_task_sched_out(curr, cpu); spin_lock(&ctx->lock); @@ -463,7 +472,7 @@ int perf_counter_task_disable(void) spin_unlock(&ctx->lock); - local_irq_enable(); + curr_rq_unlock_irq_restore(&flags); return 0; } @@ -473,15 +482,19 @@ int perf_counter_task_enable(void) struct task_struct *curr = current; struct perf_counter_context *ctx = &curr->perf_counter_ctx; struct perf_counter *counter; + unsigned long flags; u64 perf_flags; int cpu; if (likely(!ctx->nr_counters)) return 0; - local_irq_disable(); + curr_rq_lock_irq_save(&flags); cpu = smp_processor_id(); + /* force the update of the task clock: */ + __task_delta_exec(curr, 1); + spin_lock(&ctx->lock); /* @@ -493,6 +506,7 @@ int perf_counter_task_enable(void) if (counter->state != PERF_COUNTER_STATE_OFF) continue; counter->state = PERF_COUNTER_STATE_INACTIVE; + counter->hw_event.disabled = 0; } hw_perf_restore(perf_flags); @@ -500,7 +514,7 @@ int perf_counter_task_enable(void) perf_counter_task_sched_in(curr, cpu); - local_irq_enable(); + curr_rq_unlock_irq_restore(&flags); return 0; } @@ -540,8 +554,11 @@ void perf_counter_task_tick(struct task_struct *curr, int cpu) static void __read(void *info) { struct perf_counter *counter = info; + unsigned long flags; + curr_rq_lock_irq_save(&flags); counter->hw_ops->read(counter); + curr_rq_unlock_irq_restore(&flags); } static u64 perf_counter_read(struct perf_counter *counter) @@ -860,13 +877,27 @@ static const struct hw_perf_counter_ops perf_ops_cpu_clock = { .read = cpu_clock_perf_counter_read, }; -static void task_clock_perf_counter_update(struct perf_counter *counter) +/* + * Called from within the scheduler: + */ +static u64 task_clock_perf_counter_val(struct perf_counter *counter, int update) { - u64 prev, now; + struct task_struct *curr = counter->task; + u64 delta; + + WARN_ON_ONCE(counter->task != current); + + delta = __task_delta_exec(curr, update); + + return curr->se.sum_exec_runtime + delta; +} + +static void task_clock_perf_counter_update(struct perf_counter *counter, u64 now) +{ + u64 prev; s64 delta; prev = atomic64_read(&counter->hw.prev_count); - now = current->se.sum_exec_runtime; atomic64_set(&counter->hw.prev_count, now); @@ -877,17 +908,23 @@ static void task_clock_perf_counter_update(struct perf_counter *counter) static void task_clock_perf_counter_read(struct perf_counter *counter) { - task_clock_perf_counter_update(counter); + u64 now = task_clock_perf_counter_val(counter, 1); + + task_clock_perf_counter_update(counter, now); } static void task_clock_perf_counter_enable(struct perf_counter *counter) { - atomic64_set(&counter->hw.prev_count, current->se.sum_exec_runtime); + u64 now = task_clock_perf_counter_val(counter, 0); + + atomic64_set(&counter->hw.prev_count, now); } static void task_clock_perf_counter_disable(struct perf_counter *counter) { - task_clock_perf_counter_update(counter); + u64 now = task_clock_perf_counter_val(counter, 0); + + task_clock_perf_counter_update(counter, now); } static const struct hw_perf_counter_ops perf_ops_task_clock = { @@ -1267,6 +1304,7 @@ __perf_counter_exit_task(struct task_struct *child, { struct perf_counter *parent_counter; u64 parent_val, child_val; + unsigned long flags; u64 perf_flags; /* @@ -1275,7 +1313,7 @@ __perf_counter_exit_task(struct task_struct *child, * Be careful about zapping the list - IRQ/NMI context * could still be processing it: */ - local_irq_disable(); + curr_rq_lock_irq_save(&flags); perf_flags = hw_perf_save_disable(); if (child_counter->state == PERF_COUNTER_STATE_ACTIVE) { @@ -1294,7 +1332,7 @@ __perf_counter_exit_task(struct task_struct *child, list_del_init(&child_counter->list_entry); hw_perf_restore(perf_flags); - local_irq_enable(); + curr_rq_unlock_irq_restore(&flags); parent_counter = child_counter->parent; /* diff --git a/kernel/sched.c b/kernel/sched.c index 382cfdb5e38..4d84ff4c877 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -638,7 +638,7 @@ static inline int cpu_of(struct rq *rq) #define task_rq(p) cpu_rq(task_cpu(p)) #define cpu_curr(cpu) (cpu_rq(cpu)->curr) -static inline void update_rq_clock(struct rq *rq) +inline void update_rq_clock(struct rq *rq) { rq->clock = sched_clock_cpu(cpu_of(rq)); } @@ -969,6 +969,26 @@ static struct rq *task_rq_lock(struct task_struct *p, unsigned long *flags) } } +void curr_rq_lock_irq_save(unsigned long *flags) + __acquires(rq->lock) +{ + struct rq *rq; + + local_irq_save(*flags); + rq = cpu_rq(smp_processor_id()); + spin_lock(&rq->lock); +} + +void curr_rq_unlock_irq_restore(unsigned long *flags) + __releases(rq->lock) +{ + struct rq *rq; + + rq = cpu_rq(smp_processor_id()); + spin_unlock(&rq->lock); + local_irq_restore(*flags); +} + void task_rq_unlock_wait(struct task_struct *p) { struct rq *rq = task_rq(p); @@ -2558,7 +2578,6 @@ prepare_task_switch(struct rq *rq, struct task_struct *prev, struct task_struct *next) { fire_sched_out_preempt_notifiers(prev, next); - perf_counter_task_sched_out(prev, cpu_of(rq)); prepare_lock_switch(rq, next); prepare_arch_switch(next); } @@ -4089,6 +4108,29 @@ DEFINE_PER_CPU(struct kernel_stat, kstat); EXPORT_PER_CPU_SYMBOL(kstat); +/* + * Return any ns on the sched_clock that have not yet been banked in + * @p in case that task is currently running. + */ +unsigned long long __task_delta_exec(struct task_struct *p, int update) +{ + s64 delta_exec; + struct rq *rq; + + rq = task_rq(p); + WARN_ON_ONCE(!runqueue_is_locked()); + WARN_ON_ONCE(!task_current(rq, p)); + + if (update) + update_rq_clock(rq); + + delta_exec = rq->clock - p->se.exec_start; + + WARN_ON_ONCE(delta_exec < 0); + + return delta_exec; +} + /* * Return any ns on the sched_clock that have not yet been banked in * @p in case that task is currently running. @@ -4316,13 +4358,13 @@ void scheduler_tick(void) update_rq_clock(rq); update_cpu_load(rq); curr->sched_class->task_tick(rq, curr, 0); + perf_counter_task_tick(curr, cpu); spin_unlock(&rq->lock); #ifdef CONFIG_SMP rq->idle_at_tick = idle_cpu(cpu); trigger_load_balance(rq, cpu); #endif - perf_counter_task_tick(curr, cpu); } #if defined(CONFIG_PREEMPT) && (defined(CONFIG_DEBUG_PREEMPT) || \ @@ -4512,6 +4554,7 @@ need_resched_nonpreemptible: if (likely(prev != next)) { sched_info_switch(prev, next); + perf_counter_task_sched_out(prev, cpu); rq->nr_switches++; rq->curr = next; -- cgit v1.2.3 From eef6cbf5844c620d9db9be99e4908cdf92492fb9 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Fri, 19 Dec 2008 10:20:42 +0100 Subject: perfcounters: pull inherited counters Change counter inheritance from a 'push' to a 'pull' model: instead of child tasks pushing their final counts to the parent, reuse the wait4 infrastructure to pull counters as child tasks are exit-processed, much like how cutime/cstime is collected. Signed-off-by: Ingo Molnar --- include/linux/init_task.h | 9 +++++++++ kernel/exit.c | 21 +++++++++------------ 2 files changed, 18 insertions(+), 12 deletions(-) diff --git a/include/linux/init_task.h b/include/linux/init_task.h index 23fd8909b9e..54fa2fa2c8e 100644 --- a/include/linux/init_task.h +++ b/include/linux/init_task.h @@ -113,6 +113,14 @@ extern struct group_info init_groups; # define CAP_INIT_BSET CAP_INIT_EFF_SET #endif +#ifdef CONFIG_PERF_COUNTERS +# define INIT_PERF_COUNTERS(tsk) \ + .perf_counter_ctx.counter_list = \ + LIST_HEAD_INIT(tsk.perf_counter_ctx.counter_list), +#else +# define INIT_PERF_COUNTERS(tsk) +#endif + /* * INIT_TASK is used to set up the first task table, touch at * your own risk!. Base=0, limit=0x1fffff (=2MB) @@ -180,6 +188,7 @@ extern struct group_info init_groups; INIT_IDS \ INIT_TRACE_IRQFLAGS \ INIT_LOCKDEP \ + INIT_PERF_COUNTERS(tsk) \ } diff --git a/kernel/exit.c b/kernel/exit.c index 244edfd9686..101b7eeff44 100644 --- a/kernel/exit.c +++ b/kernel/exit.c @@ -153,6 +153,9 @@ static void delayed_put_task_struct(struct rcu_head *rhp) { struct task_struct *tsk = container_of(rhp, struct task_struct, rcu); +#ifdef CONFIG_PERF_COUNTERS + WARN_ON_ONCE(!list_empty(&tsk->perf_counter_ctx.counter_list)); +#endif trace_sched_process_free(tsk); put_task_struct(tsk); } @@ -922,12 +925,6 @@ static void exit_notify(struct task_struct *tsk, int group_dead) forget_original_parent(tsk); exit_task_namespaces(tsk); - /* - * Flush inherited counters to the parent - before the parent - * gets woken up by child-exit notifications. - */ - perf_counter_exit_task(tsk); - write_lock_irq(&tasklist_lock); if (group_dead) kill_orphaned_pgrp(tsk->group_leader, NULL); @@ -1122,12 +1119,6 @@ NORET_TYPE void do_exit(long code) if (tsk->splice_pipe) __free_pipe_info(tsk->splice_pipe); - /* - * These must happen late, after the PID is not - * hashed anymore, but still at a point that may sleep: - */ - perf_counter_exit_task(tsk); - preempt_disable(); /* causes final put_task_struct in finish_task_switch(). */ tsk->state = TASK_DEAD; @@ -1371,6 +1362,12 @@ static int wait_task_zombie(struct task_struct *p, int options, */ read_unlock(&tasklist_lock); + /* + * Flush inherited counters to the parent - before the parent + * gets woken up by child-exit notifications. + */ + perf_counter_exit_task(p); + retval = ru ? getrusage(p, RUSAGE_BOTH, ru) : 0; status = (p->signal->flags & SIGNAL_GROUP_EXIT) ? p->signal->group_exit_code : p->exit_code; -- cgit v1.2.3 From 78b6084c907cea15bb40a564b974e072f5163781 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Sun, 21 Dec 2008 15:07:49 +0100 Subject: perfcounters: fix init context lock Signed-off-by: Ingo Molnar --- include/linux/init_task.h | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/include/linux/init_task.h b/include/linux/init_task.h index 54fa2fa2c8e..467cff545c3 100644 --- a/include/linux/init_task.h +++ b/include/linux/init_task.h @@ -116,7 +116,9 @@ extern struct group_info init_groups; #ifdef CONFIG_PERF_COUNTERS # define INIT_PERF_COUNTERS(tsk) \ .perf_counter_ctx.counter_list = \ - LIST_HEAD_INIT(tsk.perf_counter_ctx.counter_list), + LIST_HEAD_INIT(tsk.perf_counter_ctx.counter_list), \ + .perf_counter_ctx.lock = \ + __SPIN_LOCK_UNLOCKED(tsk.perf_counter_ctx.lock), #else # define INIT_PERF_COUNTERS(tsk) #endif -- cgit v1.2.3 From 95cdd2e7851cce79ab839cb0b3cbe68d7911d0f1 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Sun, 21 Dec 2008 13:50:42 +0100 Subject: perfcounters: enable lowlevel pmc code to schedule counters Allow lowlevel ->enable() op to return an error if a counter can not be added. This can be used to handle counter constraints. Signed-off-by: Ingo Molnar --- arch/x86/kernel/cpu/perf_counter.c | 6 +++- include/linux/perf_counter.h | 2 +- kernel/perf_counter.c | 62 +++++++++++++++++++++++++++----------- 3 files changed, 51 insertions(+), 19 deletions(-) diff --git a/arch/x86/kernel/cpu/perf_counter.c b/arch/x86/kernel/cpu/perf_counter.c index b6755712142..74090a393a7 100644 --- a/arch/x86/kernel/cpu/perf_counter.c +++ b/arch/x86/kernel/cpu/perf_counter.c @@ -244,7 +244,7 @@ static int fixed_mode_idx(struct hw_perf_counter *hwc) /* * Find a PMC slot for the freshly enabled / scheduled in counter: */ -static void pmc_generic_enable(struct perf_counter *counter) +static int pmc_generic_enable(struct perf_counter *counter) { struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters); struct hw_perf_counter *hwc = &counter->hw; @@ -253,6 +253,8 @@ static void pmc_generic_enable(struct perf_counter *counter) /* Try to get the previous counter again */ if (test_and_set_bit(idx, cpuc->used)) { idx = find_first_zero_bit(cpuc->used, nr_counters_generic); + if (idx == nr_counters_generic) + return -EAGAIN; set_bit(idx, cpuc->used); hwc->idx = idx; } @@ -265,6 +267,8 @@ static void pmc_generic_enable(struct perf_counter *counter) __hw_perf_counter_set_period(counter, hwc, idx); __pmc_generic_enable(counter, hwc, idx); + + return 0; } void perf_counter_print_debug(void) diff --git a/include/linux/perf_counter.h b/include/linux/perf_counter.h index 48f76d2e54c..53af11d3767 100644 --- a/include/linux/perf_counter.h +++ b/include/linux/perf_counter.h @@ -128,7 +128,7 @@ struct perf_counter; * struct hw_perf_counter_ops - performance counter hw ops */ struct hw_perf_counter_ops { - void (*enable) (struct perf_counter *counter); + int (*enable) (struct perf_counter *counter); void (*disable) (struct perf_counter *counter); void (*read) (struct perf_counter *counter); }; diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c index f1110ac1267..2e73929a695 100644 --- a/kernel/perf_counter.c +++ b/kernel/perf_counter.c @@ -355,21 +355,25 @@ void perf_counter_task_sched_out(struct task_struct *task, int cpu) cpuctx->task_ctx = NULL; } -static void +static int counter_sched_in(struct perf_counter *counter, struct perf_cpu_context *cpuctx, struct perf_counter_context *ctx, int cpu) { if (counter->state == PERF_COUNTER_STATE_OFF) - return; + return 0; + + if (counter->hw_ops->enable(counter)) + return -EAGAIN; - counter->hw_ops->enable(counter); counter->state = PERF_COUNTER_STATE_ACTIVE; counter->oncpu = cpu; /* TODO: put 'cpu' into cpuctx->cpu */ cpuctx->active_oncpu++; ctx->nr_active++; + + return 0; } static int @@ -378,20 +382,38 @@ group_sched_in(struct perf_counter *group_counter, struct perf_counter_context *ctx, int cpu) { - struct perf_counter *counter; - int was_group = 0; + struct perf_counter *counter, *partial_group; + int ret = 0; - counter_sched_in(group_counter, cpuctx, ctx, cpu); + if (counter_sched_in(group_counter, cpuctx, ctx, cpu)) + return -EAGAIN; /* * Schedule in siblings as one group (if any): */ list_for_each_entry(counter, &group_counter->sibling_list, list_entry) { - counter_sched_in(counter, cpuctx, ctx, cpu); - was_group = 1; + if (counter_sched_in(counter, cpuctx, ctx, cpu)) { + partial_group = counter; + goto group_error; + } + ret = -EAGAIN; } - return was_group; + return ret; + +group_error: + /* + * Groups can be scheduled in as one unit only, so undo any + * partial group before returning: + */ + list_for_each_entry(counter, &group_counter->sibling_list, list_entry) { + if (counter == partial_group) + break; + counter_sched_out(counter, cpuctx, ctx); + } + counter_sched_out(group_counter, cpuctx, ctx); + + return -EAGAIN; } /* @@ -416,9 +438,6 @@ void perf_counter_task_sched_in(struct task_struct *task, int cpu) spin_lock(&ctx->lock); list_for_each_entry(counter, &ctx->counter_list, list_entry) { - if (ctx->nr_active == cpuctx->max_pertask) - break; - /* * Listen to the 'cpu' scheduling filter constraint * of counters: @@ -856,8 +875,9 @@ static const struct file_operations perf_fops = { .poll = perf_poll, }; -static void cpu_clock_perf_counter_enable(struct perf_counter *counter) +static int cpu_clock_perf_counter_enable(struct perf_counter *counter) { + return 0; } static void cpu_clock_perf_counter_disable(struct perf_counter *counter) @@ -913,11 +933,13 @@ static void task_clock_perf_counter_read(struct perf_counter *counter) task_clock_perf_counter_update(counter, now); } -static void task_clock_perf_counter_enable(struct perf_counter *counter) +static int task_clock_perf_counter_enable(struct perf_counter *counter) { u64 now = task_clock_perf_counter_val(counter, 0); atomic64_set(&counter->hw.prev_count, now); + + return 0; } static void task_clock_perf_counter_disable(struct perf_counter *counter) @@ -960,12 +982,14 @@ static void page_faults_perf_counter_read(struct perf_counter *counter) page_faults_perf_counter_update(counter); } -static void page_faults_perf_counter_enable(struct perf_counter *counter) +static int page_faults_perf_counter_enable(struct perf_counter *counter) { /* * page-faults is a per-task value already, * so we dont have to clear it on switch-in. */ + + return 0; } static void page_faults_perf_counter_disable(struct perf_counter *counter) @@ -1006,12 +1030,14 @@ static void context_switches_perf_counter_read(struct perf_counter *counter) context_switches_perf_counter_update(counter); } -static void context_switches_perf_counter_enable(struct perf_counter *counter) +static int context_switches_perf_counter_enable(struct perf_counter *counter) { /* * ->nvcsw + curr->nivcsw is a per-task value already, * so we dont have to clear it on switch-in. */ + + return 0; } static void context_switches_perf_counter_disable(struct perf_counter *counter) @@ -1050,12 +1076,14 @@ static void cpu_migrations_perf_counter_read(struct perf_counter *counter) cpu_migrations_perf_counter_update(counter); } -static void cpu_migrations_perf_counter_enable(struct perf_counter *counter) +static int cpu_migrations_perf_counter_enable(struct perf_counter *counter) { /* * se.nr_migrations is a per-task value already, * so we dont have to clear it on switch-in. */ + + return 0; } static void cpu_migrations_perf_counter_disable(struct perf_counter *counter) -- cgit v1.2.3 From 0dff86aa7b9ec65a6d07167b7afb050b5fc98ddc Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Tue, 23 Dec 2008 12:28:12 +0100 Subject: x86, perfcounters: print out the ->used bitmask Impact: extend debug printouts Signed-off-by: Ingo Molnar --- arch/x86/kernel/cpu/perf_counter.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/arch/x86/kernel/cpu/perf_counter.c b/arch/x86/kernel/cpu/perf_counter.c index 74090a393a7..f3359c2b391 100644 --- a/arch/x86/kernel/cpu/perf_counter.c +++ b/arch/x86/kernel/cpu/perf_counter.c @@ -255,6 +255,7 @@ static int pmc_generic_enable(struct perf_counter *counter) idx = find_first_zero_bit(cpuc->used, nr_counters_generic); if (idx == nr_counters_generic) return -EAGAIN; + set_bit(idx, cpuc->used); hwc->idx = idx; } @@ -274,6 +275,7 @@ static int pmc_generic_enable(struct perf_counter *counter) void perf_counter_print_debug(void) { u64 ctrl, status, overflow, pmc_ctrl, pmc_count, prev_left; + struct cpu_hw_counters *cpuc; int cpu, idx; if (!nr_counters_generic) @@ -282,6 +284,7 @@ void perf_counter_print_debug(void) local_irq_disable(); cpu = smp_processor_id(); + cpuc = &per_cpu(cpu_hw_counters, cpu); rdmsrl(MSR_CORE_PERF_GLOBAL_CTRL, ctrl); rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, status); @@ -291,6 +294,7 @@ void perf_counter_print_debug(void) printk(KERN_INFO "CPU#%d: ctrl: %016llx\n", cpu, ctrl); printk(KERN_INFO "CPU#%d: status: %016llx\n", cpu, status); printk(KERN_INFO "CPU#%d: overflow: %016llx\n", cpu, overflow); + printk(KERN_INFO "CPU#%d: used: %016llx\n", cpu, *(u64 *)cpuc->used); for (idx = 0; idx < nr_counters_generic; idx++) { rdmsrl(MSR_ARCH_PERFMON_EVENTSEL0 + idx, pmc_ctrl); -- cgit v1.2.3 From 8fe91e61cdc407c7556d3cd71cf20141a25bbcea Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Tue, 23 Dec 2008 12:29:25 +0100 Subject: perfcounters: remove ->nr_inherited Impact: remove dead code nr_inherited was not maintained correctly (not decremented) - and also not used - remove it. Signed-off-by: Ingo Molnar --- include/linux/perf_counter.h | 1 - kernel/perf_counter.c | 1 - 2 files changed, 2 deletions(-) diff --git a/include/linux/perf_counter.h b/include/linux/perf_counter.h index 53af11d3767..1ea08e9f31c 100644 --- a/include/linux/perf_counter.h +++ b/include/linux/perf_counter.h @@ -164,7 +164,6 @@ struct perf_counter { struct task_struct *task; struct file *filp; - unsigned int nr_inherited; struct perf_counter *parent; /* * Protect attach/detach: diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c index 2e73929a695..48e1dbcdc1c 100644 --- a/kernel/perf_counter.c +++ b/kernel/perf_counter.c @@ -1308,7 +1308,6 @@ inherit_counter(struct perf_counter *parent_counter, child_ctx->nr_counters++; child_counter->parent = parent_counter; - parent_counter->nr_inherited++; /* * inherit into child's child as well: */ -- cgit v1.2.3 From 235c7fc7c500e4fd1700c4ad01b5612bcdc1b449 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Sun, 21 Dec 2008 14:43:25 +0100 Subject: perfcounters: generalize the counter scheduler Impact: clean up and refactor code refactor the counter scheduler: separate out in/out functions and introduce a counter-rotation function as well. Signed-off-by: Ingo Molnar --- kernel/perf_counter.c | 220 ++++++++++++++++++++++++++++++++------------------ 1 file changed, 142 insertions(+), 78 deletions(-) diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c index 48e1dbcdc1c..d7a79f321b1 100644 --- a/kernel/perf_counter.c +++ b/kernel/perf_counter.c @@ -111,11 +111,12 @@ static void __perf_counter_remove_from_context(void *info) spin_lock(&ctx->lock); if (counter->state == PERF_COUNTER_STATE_ACTIVE) { - counter->hw_ops->disable(counter); counter->state = PERF_COUNTER_STATE_INACTIVE; + counter->hw_ops->disable(counter); ctx->nr_active--; cpuctx->active_oncpu--; counter->task = NULL; + counter->oncpu = -1; } ctx->nr_counters--; @@ -192,8 +193,36 @@ retry: spin_unlock_irq(&ctx->lock); } +static int +counter_sched_in(struct perf_counter *counter, + struct perf_cpu_context *cpuctx, + struct perf_counter_context *ctx, + int cpu) +{ + if (counter->state == PERF_COUNTER_STATE_OFF) + return 0; + + counter->state = PERF_COUNTER_STATE_ACTIVE; + counter->oncpu = cpu; /* TODO: put 'cpu' into cpuctx->cpu */ + /* + * The new state must be visible before we turn it on in the hardware: + */ + smp_wmb(); + + if (counter->hw_ops->enable(counter)) { + counter->state = PERF_COUNTER_STATE_INACTIVE; + counter->oncpu = -1; + return -EAGAIN; + } + + cpuctx->active_oncpu++; + ctx->nr_active++; + + return 0; +} + /* - * Cross CPU call to install and enable a preformance counter + * Cross CPU call to install and enable a performance counter */ static void __perf_install_in_context(void *info) { @@ -220,22 +249,17 @@ static void __perf_install_in_context(void *info) * counters on a global level. NOP for non NMI based counters. */ perf_flags = hw_perf_save_disable(); - list_add_counter(counter, ctx); - hw_perf_restore(perf_flags); + list_add_counter(counter, ctx); ctx->nr_counters++; - if (cpuctx->active_oncpu < perf_max_counters) { - counter->state = PERF_COUNTER_STATE_ACTIVE; - counter->oncpu = cpu; - ctx->nr_active++; - cpuctx->active_oncpu++; - counter->hw_ops->enable(counter); - } + counter_sched_in(counter, cpuctx, ctx, cpu); if (!ctx->task && cpuctx->max_pertask) cpuctx->max_pertask--; + hw_perf_restore(perf_flags); + spin_unlock(&ctx->lock); curr_rq_unlock_irq_restore(&flags); } @@ -302,8 +326,8 @@ counter_sched_out(struct perf_counter *counter, if (counter->state != PERF_COUNTER_STATE_ACTIVE) return; - counter->hw_ops->disable(counter); counter->state = PERF_COUNTER_STATE_INACTIVE; + counter->hw_ops->disable(counter); counter->oncpu = -1; cpuctx->active_oncpu--; @@ -326,6 +350,22 @@ group_sched_out(struct perf_counter *group_counter, counter_sched_out(counter, cpuctx, ctx); } +void __perf_counter_sched_out(struct perf_counter_context *ctx, + struct perf_cpu_context *cpuctx) +{ + struct perf_counter *counter; + + if (likely(!ctx->nr_counters)) + return; + + spin_lock(&ctx->lock); + if (ctx->nr_active) { + list_for_each_entry(counter, &ctx->counter_list, list_entry) + group_sched_out(counter, cpuctx, ctx); + } + spin_unlock(&ctx->lock); +} + /* * Called from scheduler to remove the counters of the current task, * with interrupts disabled. @@ -341,39 +381,18 @@ void perf_counter_task_sched_out(struct task_struct *task, int cpu) { struct perf_cpu_context *cpuctx = &per_cpu(perf_cpu_context, cpu); struct perf_counter_context *ctx = &task->perf_counter_ctx; - struct perf_counter *counter; if (likely(!cpuctx->task_ctx)) return; - spin_lock(&ctx->lock); - if (ctx->nr_active) { - list_for_each_entry(counter, &ctx->counter_list, list_entry) - group_sched_out(counter, cpuctx, ctx); - } - spin_unlock(&ctx->lock); + __perf_counter_sched_out(ctx, cpuctx); + cpuctx->task_ctx = NULL; } -static int -counter_sched_in(struct perf_counter *counter, - struct perf_cpu_context *cpuctx, - struct perf_counter_context *ctx, - int cpu) +static void perf_counter_cpu_sched_out(struct perf_cpu_context *cpuctx) { - if (counter->state == PERF_COUNTER_STATE_OFF) - return 0; - - if (counter->hw_ops->enable(counter)) - return -EAGAIN; - - counter->state = PERF_COUNTER_STATE_ACTIVE; - counter->oncpu = cpu; /* TODO: put 'cpu' into cpuctx->cpu */ - - cpuctx->active_oncpu++; - ctx->nr_active++; - - return 0; + __perf_counter_sched_out(&cpuctx->ctx, cpuctx); } static int @@ -416,21 +435,10 @@ group_error: return -EAGAIN; } -/* - * Called from scheduler to add the counters of the current task - * with interrupts disabled. - * - * We restore the counter value and then enable it. - * - * This does not protect us against NMI, but enable() - * sets the enabled bit in the control field of counter _before_ - * accessing the counter control register. If a NMI hits, then it will - * keep the counter running. - */ -void perf_counter_task_sched_in(struct task_struct *task, int cpu) +static void +__perf_counter_sched_in(struct perf_counter_context *ctx, + struct perf_cpu_context *cpuctx, int cpu) { - struct perf_cpu_context *cpuctx = &per_cpu(perf_cpu_context, cpu); - struct perf_counter_context *ctx = &task->perf_counter_ctx; struct perf_counter *counter; if (likely(!ctx->nr_counters)) @@ -453,10 +461,35 @@ void perf_counter_task_sched_in(struct task_struct *task, int cpu) break; } spin_unlock(&ctx->lock); +} +/* + * Called from scheduler to add the counters of the current task + * with interrupts disabled. + * + * We restore the counter value and then enable it. + * + * This does not protect us against NMI, but enable() + * sets the enabled bit in the control field of counter _before_ + * accessing the counter control register. If a NMI hits, then it will + * keep the counter running. + */ +void perf_counter_task_sched_in(struct task_struct *task, int cpu) +{ + struct perf_cpu_context *cpuctx = &per_cpu(perf_cpu_context, cpu); + struct perf_counter_context *ctx = &task->perf_counter_ctx; + + __perf_counter_sched_in(ctx, cpuctx, cpu); cpuctx->task_ctx = ctx; } +static void perf_counter_cpu_sched_in(struct perf_cpu_context *cpuctx, int cpu) +{ + struct perf_counter_context *ctx = &cpuctx->ctx; + + __perf_counter_sched_in(ctx, cpuctx, cpu); +} + int perf_counter_task_disable(void) { struct task_struct *curr = current; @@ -514,6 +547,8 @@ int perf_counter_task_enable(void) /* force the update of the task clock: */ __task_delta_exec(curr, 1); + perf_counter_task_sched_out(curr, cpu); + spin_lock(&ctx->lock); /* @@ -538,19 +573,18 @@ int perf_counter_task_enable(void) return 0; } -void perf_counter_task_tick(struct task_struct *curr, int cpu) +/* + * Round-robin a context's counters: + */ +static void rotate_ctx(struct perf_counter_context *ctx) { - struct perf_counter_context *ctx = &curr->perf_counter_ctx; struct perf_counter *counter; u64 perf_flags; - if (likely(!ctx->nr_counters)) + if (!ctx->nr_counters) return; - perf_counter_task_sched_out(curr, cpu); - spin_lock(&ctx->lock); - /* * Rotate the first entry last (works just fine for group counters too): */ @@ -563,7 +597,24 @@ void perf_counter_task_tick(struct task_struct *curr, int cpu) hw_perf_restore(perf_flags); spin_unlock(&ctx->lock); +} + +void perf_counter_task_tick(struct task_struct *curr, int cpu) +{ + struct perf_cpu_context *cpuctx = &per_cpu(perf_cpu_context, cpu); + struct perf_counter_context *ctx = &curr->perf_counter_ctx; + const int rotate_percpu = 0; + + if (rotate_percpu) + perf_counter_cpu_sched_out(cpuctx); + perf_counter_task_sched_out(curr, cpu); + if (rotate_percpu) + rotate_ctx(&cpuctx->ctx); + rotate_ctx(ctx); + + if (rotate_percpu) + perf_counter_cpu_sched_in(cpuctx, cpu); perf_counter_task_sched_in(curr, cpu); } @@ -905,8 +956,6 @@ static u64 task_clock_perf_counter_val(struct perf_counter *counter, int update) struct task_struct *curr = counter->task; u64 delta; - WARN_ON_ONCE(counter->task != current); - delta = __task_delta_exec(curr, update); return curr->se.sum_exec_runtime + delta; @@ -1160,6 +1209,7 @@ perf_counter_alloc(struct perf_counter_hw_event *hw_event, counter->group_leader = group_leader; counter->hw_ops = NULL; + counter->state = PERF_COUNTER_STATE_INACTIVE; if (hw_event->disabled) counter->state = PERF_COUNTER_STATE_OFF; @@ -1331,35 +1381,49 @@ __perf_counter_exit_task(struct task_struct *child, { struct perf_counter *parent_counter; u64 parent_val, child_val; - unsigned long flags; - u64 perf_flags; /* - * Disable and unlink this counter. - * - * Be careful about zapping the list - IRQ/NMI context - * could still be processing it: + * If we do not self-reap then we have to wait for the + * child task to unschedule (it will happen for sure), + * so that its counter is at its final count. (This + * condition triggers rarely - child tasks usually get + * off their CPU before the parent has a chance to + * get this far into the reaping action) */ - curr_rq_lock_irq_save(&flags); - perf_flags = hw_perf_save_disable(); - - if (child_counter->state == PERF_COUNTER_STATE_ACTIVE) { + if (child != current) { + wait_task_inactive(child, 0); + list_del_init(&child_counter->list_entry); + } else { struct perf_cpu_context *cpuctx; + unsigned long flags; + u64 perf_flags; + + /* + * Disable and unlink this counter. + * + * Be careful about zapping the list - IRQ/NMI context + * could still be processing it: + */ + curr_rq_lock_irq_save(&flags); + perf_flags = hw_perf_save_disable(); cpuctx = &__get_cpu_var(perf_cpu_context); - child_counter->hw_ops->disable(child_counter); - child_counter->state = PERF_COUNTER_STATE_INACTIVE; - child_counter->oncpu = -1; + if (child_counter->state == PERF_COUNTER_STATE_ACTIVE) { + child_counter->state = PERF_COUNTER_STATE_INACTIVE; + child_counter->hw_ops->disable(child_counter); + cpuctx->active_oncpu--; + child_ctx->nr_active--; + child_counter->oncpu = -1; + } - cpuctx->active_oncpu--; - child_ctx->nr_active--; - } + list_del_init(&child_counter->list_entry); - list_del_init(&child_counter->list_entry); + child_ctx->nr_counters--; - hw_perf_restore(perf_flags); - curr_rq_unlock_irq_restore(&flags); + hw_perf_restore(perf_flags); + curr_rq_unlock_irq_restore(&flags); + } parent_counter = child_counter->parent; /* -- cgit v1.2.3 From f650a672359819454c3d8d4135ecd1558cde0b24 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Tue, 23 Dec 2008 12:17:29 +0100 Subject: perfcounters: add PERF_COUNT_BUS_CYCLES Generalize "bus cycles" hw events - and map them to CPU_CLK_Unhalted.Ref on x86. (which is a good enough approximation) Signed-off-by: Ingo Molnar --- arch/x86/kernel/cpu/perf_counter.c | 3 ++- include/linux/perf_counter.h | 5 +++-- 2 files changed, 5 insertions(+), 3 deletions(-) diff --git a/arch/x86/kernel/cpu/perf_counter.c b/arch/x86/kernel/cpu/perf_counter.c index f3359c2b391..86b2fdd344a 100644 --- a/arch/x86/kernel/cpu/perf_counter.c +++ b/arch/x86/kernel/cpu/perf_counter.c @@ -41,12 +41,13 @@ static DEFINE_PER_CPU(struct cpu_hw_counters, cpu_hw_counters); static const int intel_perfmon_event_map[] = { - [PERF_COUNT_CYCLES] = 0x003c, + [PERF_COUNT_CPU_CYCLES] = 0x003c, [PERF_COUNT_INSTRUCTIONS] = 0x00c0, [PERF_COUNT_CACHE_REFERENCES] = 0x4f2e, [PERF_COUNT_CACHE_MISSES] = 0x412e, [PERF_COUNT_BRANCH_INSTRUCTIONS] = 0x00c4, [PERF_COUNT_BRANCH_MISSES] = 0x00c5, + [PERF_COUNT_BUS_CYCLES] = 0x013c, }; static const int max_intel_perfmon_events = ARRAY_SIZE(intel_perfmon_event_map); diff --git a/include/linux/perf_counter.h b/include/linux/perf_counter.h index 1ea08e9f31c..ec77d1643d3 100644 --- a/include/linux/perf_counter.h +++ b/include/linux/perf_counter.h @@ -36,14 +36,15 @@ enum hw_event_types { /* * Common hardware events, generalized by the kernel: */ - PERF_COUNT_CYCLES = 0, + PERF_COUNT_CPU_CYCLES = 0, PERF_COUNT_INSTRUCTIONS = 1, PERF_COUNT_CACHE_REFERENCES = 2, PERF_COUNT_CACHE_MISSES = 3, PERF_COUNT_BRANCH_INSTRUCTIONS = 4, PERF_COUNT_BRANCH_MISSES = 5, + PERF_COUNT_BUS_CYCLES = 6, - PERF_HW_EVENTS_MAX = 6, + PERF_HW_EVENTS_MAX = 7, /* * Special "software" counters provided by the kernel, even if -- cgit v1.2.3 From 2f18d1e8d07ae67dd0afce875287756d4bd31a46 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Mon, 22 Dec 2008 11:10:42 +0100 Subject: x86, perfcounters: add support for fixed-function pmcs Impact: extend performance counter support on x86 Intel CPUs Modern Intel CPUs have 3 "fixed-function" performance counters, which count these hardware events: Instr_Retired.Any CPU_CLK_Unhalted.Core CPU_CLK_Unhalted.Ref Add support for them to the performance counters subsystem. Their use is transparent to user-space: the counter scheduler is extended to automatically recognize the cases where a fixed-function PMC can be utilized instead of a generic PMC. In such cases the generic PMC is kept available for more counters. The above fixed-function events map to these generic counter hw events: PERF_COUNT_INSTRUCTIONS PERF_COUNT_CPU_CYCLES PERF_COUNT_BUS_CYCLES (The 'bus' cycles are in reality often CPU-ish cycles, just with a fixed frequency.) Signed-off-by: Ingo Molnar --- arch/x86/include/asm/perf_counter.h | 8 ++ arch/x86/kernel/cpu/perf_counter.c | 149 ++++++++++++++++++++++++++++++------ 2 files changed, 133 insertions(+), 24 deletions(-) diff --git a/arch/x86/include/asm/perf_counter.h b/arch/x86/include/asm/perf_counter.h index 13745deb16c..2e08ed73664 100644 --- a/arch/x86/include/asm/perf_counter.h +++ b/arch/x86/include/asm/perf_counter.h @@ -23,6 +23,11 @@ #define ARCH_PERFMON_EVENTSEL_OS (1 << 17) #define ARCH_PERFMON_EVENTSEL_USR (1 << 16) +/* + * Includes eventsel and unit mask as well: + */ +#define ARCH_PERFMON_EVENT_MASK 0xffff + #define ARCH_PERFMON_UNHALTED_CORE_CYCLES_SEL 0x3c #define ARCH_PERFMON_UNHALTED_CORE_CYCLES_UMASK (0x00 << 8) #define ARCH_PERFMON_UNHALTED_CORE_CYCLES_INDEX 0 @@ -69,12 +74,15 @@ union cpuid10_edx { /* Instr_Retired.Any: */ #define MSR_ARCH_PERFMON_FIXED_CTR0 0x309 +#define X86_PMC_IDX_FIXED_INSTRUCTIONS (X86_PMC_IDX_FIXED + 0) /* CPU_CLK_Unhalted.Core: */ #define MSR_ARCH_PERFMON_FIXED_CTR1 0x30a +#define X86_PMC_IDX_FIXED_CPU_CYCLES (X86_PMC_IDX_FIXED + 1) /* CPU_CLK_Unhalted.Ref: */ #define MSR_ARCH_PERFMON_FIXED_CTR2 0x30b +#define X86_PMC_IDX_FIXED_BUS_CYCLES (X86_PMC_IDX_FIXED + 2) #ifdef CONFIG_PERF_COUNTERS extern void init_hw_perf_counters(void); diff --git a/arch/x86/kernel/cpu/perf_counter.c b/arch/x86/kernel/cpu/perf_counter.c index 86b2fdd344a..da46eca1254 100644 --- a/arch/x86/kernel/cpu/perf_counter.c +++ b/arch/x86/kernel/cpu/perf_counter.c @@ -26,6 +26,7 @@ static bool perf_counters_initialized __read_mostly; */ static int nr_counters_generic __read_mostly; static u64 perf_counter_mask __read_mostly; +static u64 counter_value_mask __read_mostly; static int nr_counters_fixed __read_mostly; @@ -120,9 +121,6 @@ static int __hw_perf_counter_init(struct perf_counter *counter) hwc->nmi = 1; } - hwc->config_base = MSR_ARCH_PERFMON_EVENTSEL0; - hwc->counter_base = MSR_ARCH_PERFMON_PERFCTR0; - hwc->irq_period = hw_event->irq_period; /* * Intel PMCs cannot be accessed sanely above 32 bit width, @@ -183,16 +181,34 @@ void hw_perf_restore(u64 ctrl) } EXPORT_SYMBOL_GPL(hw_perf_restore); +static inline void +__pmc_fixed_disable(struct perf_counter *counter, + struct hw_perf_counter *hwc, unsigned int __idx) +{ + int idx = __idx - X86_PMC_IDX_FIXED; + u64 ctrl_val, mask; + int err; + + mask = 0xfULL << (idx * 4); + + rdmsrl(hwc->config_base, ctrl_val); + ctrl_val &= ~mask; + err = checking_wrmsrl(hwc->config_base, ctrl_val); +} + static inline void __pmc_generic_disable(struct perf_counter *counter, struct hw_perf_counter *hwc, unsigned int idx) { int err; + if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) + return __pmc_fixed_disable(counter, hwc, idx); + err = wrmsr_safe(hwc->config_base + idx, hwc->config, 0); } -static DEFINE_PER_CPU(u64, prev_left[X86_PMC_MAX_GENERIC]); +static DEFINE_PER_CPU(u64, prev_left[X86_PMC_IDX_MAX]); /* * Set the next IRQ period, based on the hwc->period_left value. @@ -202,8 +218,9 @@ static void __hw_perf_counter_set_period(struct perf_counter *counter, struct hw_perf_counter *hwc, int idx) { - s32 left = atomic64_read(&hwc->period_left); + s64 left = atomic64_read(&hwc->period_left); s32 period = hwc->irq_period; + int err; /* * If we are way outside a reasoable range then just skip forward: @@ -224,21 +241,64 @@ __hw_perf_counter_set_period(struct perf_counter *counter, * The hw counter starts counting from this counter offset, * mark it to be able to extra future deltas: */ - atomic64_set(&hwc->prev_count, (u64)(s64)-left); + atomic64_set(&hwc->prev_count, (u64)-left); - wrmsr(hwc->counter_base + idx, -left, 0); + err = checking_wrmsrl(hwc->counter_base + idx, + (u64)(-left) & counter_value_mask); +} + +static inline void +__pmc_fixed_enable(struct perf_counter *counter, + struct hw_perf_counter *hwc, unsigned int __idx) +{ + int idx = __idx - X86_PMC_IDX_FIXED; + u64 ctrl_val, bits, mask; + int err; + + /* + * Enable IRQ generation (0x8) and ring-3 counting (0x2), + * and enable ring-0 counting if allowed: + */ + bits = 0x8ULL | 0x2ULL; + if (hwc->config & ARCH_PERFMON_EVENTSEL_OS) + bits |= 0x1; + bits <<= (idx * 4); + mask = 0xfULL << (idx * 4); + + rdmsrl(hwc->config_base, ctrl_val); + ctrl_val &= ~mask; + ctrl_val |= bits; + err = checking_wrmsrl(hwc->config_base, ctrl_val); } static void __pmc_generic_enable(struct perf_counter *counter, struct hw_perf_counter *hwc, int idx) { + if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) + return __pmc_fixed_enable(counter, hwc, idx); + wrmsr(hwc->config_base + idx, hwc->config | ARCH_PERFMON_EVENTSEL0_ENABLE, 0); } -static int fixed_mode_idx(struct hw_perf_counter *hwc) +static int +fixed_mode_idx(struct perf_counter *counter, struct hw_perf_counter *hwc) { + unsigned int event; + + if (unlikely(hwc->nmi)) + return -1; + + event = hwc->config & ARCH_PERFMON_EVENT_MASK; + + if (unlikely(event == intel_perfmon_event_map[PERF_COUNT_INSTRUCTIONS])) + return X86_PMC_IDX_FIXED_INSTRUCTIONS; + if (unlikely(event == intel_perfmon_event_map[PERF_COUNT_CPU_CYCLES])) + return X86_PMC_IDX_FIXED_CPU_CYCLES; + if (unlikely(event == intel_perfmon_event_map[PERF_COUNT_BUS_CYCLES])) + return X86_PMC_IDX_FIXED_BUS_CYCLES; + return -1; } @@ -249,16 +309,39 @@ static int pmc_generic_enable(struct perf_counter *counter) { struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters); struct hw_perf_counter *hwc = &counter->hw; - int idx = hwc->idx; + int idx; - /* Try to get the previous counter again */ - if (test_and_set_bit(idx, cpuc->used)) { - idx = find_first_zero_bit(cpuc->used, nr_counters_generic); - if (idx == nr_counters_generic) - return -EAGAIN; + idx = fixed_mode_idx(counter, hwc); + if (idx >= 0) { + /* + * Try to get the fixed counter, if that is already taken + * then try to get a generic counter: + */ + if (test_and_set_bit(idx, cpuc->used)) + goto try_generic; - set_bit(idx, cpuc->used); + hwc->config_base = MSR_ARCH_PERFMON_FIXED_CTR_CTRL; + /* + * We set it so that counter_base + idx in wrmsr/rdmsr maps to + * MSR_ARCH_PERFMON_FIXED_CTR0 ... CTR2: + */ + hwc->counter_base = + MSR_ARCH_PERFMON_FIXED_CTR0 - X86_PMC_IDX_FIXED; hwc->idx = idx; + } else { + idx = hwc->idx; + /* Try to get the previous generic counter again */ + if (test_and_set_bit(idx, cpuc->used)) { +try_generic: + idx = find_first_zero_bit(cpuc->used, nr_counters_generic); + if (idx == nr_counters_generic) + return -EAGAIN; + + set_bit(idx, cpuc->used); + hwc->idx = idx; + } + hwc->config_base = MSR_ARCH_PERFMON_EVENTSEL0; + hwc->counter_base = MSR_ARCH_PERFMON_PERFCTR0; } perf_counters_lapic_init(hwc->nmi); @@ -266,6 +349,10 @@ static int pmc_generic_enable(struct perf_counter *counter) __pmc_generic_disable(counter, hwc, idx); cpuc->counters[idx] = counter; + /* + * Make it visible before enabling the hw: + */ + smp_wmb(); __hw_perf_counter_set_period(counter, hwc, idx); __pmc_generic_enable(counter, hwc, idx); @@ -275,7 +362,7 @@ static int pmc_generic_enable(struct perf_counter *counter) void perf_counter_print_debug(void) { - u64 ctrl, status, overflow, pmc_ctrl, pmc_count, prev_left; + u64 ctrl, status, overflow, pmc_ctrl, pmc_count, prev_left, fixed; struct cpu_hw_counters *cpuc; int cpu, idx; @@ -290,11 +377,13 @@ void perf_counter_print_debug(void) rdmsrl(MSR_CORE_PERF_GLOBAL_CTRL, ctrl); rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, status); rdmsrl(MSR_CORE_PERF_GLOBAL_OVF_CTRL, overflow); + rdmsrl(MSR_ARCH_PERFMON_FIXED_CTR_CTRL, fixed); printk(KERN_INFO "\n"); printk(KERN_INFO "CPU#%d: ctrl: %016llx\n", cpu, ctrl); printk(KERN_INFO "CPU#%d: status: %016llx\n", cpu, status); printk(KERN_INFO "CPU#%d: overflow: %016llx\n", cpu, overflow); + printk(KERN_INFO "CPU#%d: fixed: %016llx\n", cpu, fixed); printk(KERN_INFO "CPU#%d: used: %016llx\n", cpu, *(u64 *)cpuc->used); for (idx = 0; idx < nr_counters_generic; idx++) { @@ -303,13 +392,19 @@ void perf_counter_print_debug(void) prev_left = per_cpu(prev_left[idx], cpu); - printk(KERN_INFO "CPU#%d: PMC%d ctrl: %016llx\n", + printk(KERN_INFO "CPU#%d: gen-PMC%d ctrl: %016llx\n", cpu, idx, pmc_ctrl); - printk(KERN_INFO "CPU#%d: PMC%d count: %016llx\n", + printk(KERN_INFO "CPU#%d: gen-PMC%d count: %016llx\n", cpu, idx, pmc_count); - printk(KERN_INFO "CPU#%d: PMC%d left: %016llx\n", + printk(KERN_INFO "CPU#%d: gen-PMC%d left: %016llx\n", cpu, idx, prev_left); } + for (idx = 0; idx < nr_counters_fixed; idx++) { + rdmsrl(MSR_ARCH_PERFMON_FIXED_CTR0 + idx, pmc_count); + + printk(KERN_INFO "CPU#%d: fixed-PMC%d count: %016llx\n", + cpu, idx, pmc_count); + } local_irq_enable(); } @@ -323,6 +418,11 @@ static void pmc_generic_disable(struct perf_counter *counter) clear_bit(idx, cpuc->used); cpuc->counters[idx] = NULL; + /* + * Make sure the cleared pointer becomes visible before we + * (potentially) free the counter: + */ + smp_wmb(); /* * Drain the remaining delta count out of a counter @@ -353,14 +453,11 @@ static void perf_save_and_restart(struct perf_counter *counter) { struct hw_perf_counter *hwc = &counter->hw; int idx = hwc->idx; - u64 pmc_ctrl; - - rdmsrl(MSR_ARCH_PERFMON_EVENTSEL0 + idx, pmc_ctrl); x86_perf_counter_update(counter, hwc, idx); __hw_perf_counter_set_period(counter, hwc, idx); - if (pmc_ctrl & ARCH_PERFMON_EVENTSEL0_ENABLE) + if (counter->state == PERF_COUNTER_STATE_ACTIVE) __pmc_generic_enable(counter, hwc, idx); } @@ -373,6 +470,7 @@ perf_handle_group(struct perf_counter *sibling, u64 *status, u64 *overflown) * Store sibling timestamps (if any): */ list_for_each_entry(counter, &group_leader->sibling_list, list_entry) { + x86_perf_counter_update(counter, &counter->hw, counter->hw.idx); perf_store_irq_data(sibling, counter->hw_event.type); perf_store_irq_data(sibling, atomic64_read(&counter->count)); @@ -403,7 +501,7 @@ static void __smp_perf_counter_interrupt(struct pt_regs *regs, int nmi) again: ack = status; - for_each_bit(bit, (unsigned long *) &status, nr_counters_generic) { + for_each_bit(bit, (unsigned long *)&status, X86_PMC_IDX_MAX) { struct perf_counter *counter = cpuc->counters[bit]; clear_bit(bit, (unsigned long *) &status); @@ -561,6 +659,9 @@ void __init init_hw_perf_counters(void) perf_max_counters = nr_counters_generic; printk(KERN_INFO "... bit width: %d\n", eax.split.bit_width); + counter_value_mask = (1ULL << eax.split.bit_width) - 1; + printk(KERN_INFO "... value mask: %016Lx\n", counter_value_mask); + printk(KERN_INFO "... mask length: %d\n", eax.split.mask_length); nr_counters_fixed = edx.split.num_counters_fixed; -- cgit v1.2.3 From e44aef58ecfbe061eb4c53b939bcc35fb1bee82d Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Thu, 25 Dec 2008 09:02:11 +0100 Subject: perfcounters: include asm/perf_counter.h only if CONFIG_PERF_COUNTERS=y Impact: build fix on ia64 KOSAKI Motohiro reported that -tip doesnt build on ia64 because asm/perf_counter.h only exists on x86 for now. Fix it. Reported-by: KOSAKI Motohiro Tested-by: KOSAKI Motohiro Acked-by: KOSAKI Motohiro Signed-off-by: Ingo Molnar --- include/linux/perf_counter.h | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/include/linux/perf_counter.h b/include/linux/perf_counter.h index ec77d1643d3..cc3a75a239a 100644 --- a/include/linux/perf_counter.h +++ b/include/linux/perf_counter.h @@ -14,7 +14,10 @@ #define _LINUX_PERF_COUNTER_H #include -#include + +#ifdef CONFIG_PERF_COUNTERS +# include +#endif #include #include -- cgit v1.2.3 From 01ea1ccaa24dea3552e103be13b7897211607a8b Mon Sep 17 00:00:00 2001 From: Yinghai Lu Date: Fri, 26 Dec 2008 21:05:06 -0800 Subject: perf_counter: more barrier in blank weak function Impact: fix panic possible panic Some versions of GCC inline the weak global function if it's empty. Add a barrier() to work it around. Signed-off-by: Yinghai Lu Signed-off-by: Ingo Molnar --- kernel/perf_counter.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c index d7a79f321b1..37f771691f9 100644 --- a/kernel/perf_counter.c +++ b/kernel/perf_counter.c @@ -45,8 +45,8 @@ hw_perf_counter_init(struct perf_counter *counter) } u64 __weak hw_perf_save_disable(void) { return 0; } -void __weak hw_perf_restore(u64 ctrl) { } -void __weak hw_perf_counter_setup(void) { } +void __weak hw_perf_restore(u64 ctrl) { barrier(); } +void __weak hw_perf_counter_setup(void) { barrier(); } static void list_add_counter(struct perf_counter *counter, struct perf_counter_context *ctx) -- cgit v1.2.3 From 2b583d8bc8d7105b58d7481a4a0ceb718dac49c6 Mon Sep 17 00:00:00 2001 From: Jaswinder Singh Rajput Date: Sat, 27 Dec 2008 19:15:43 +0530 Subject: x86: perf_counter remove unwanted hw_perf_enable_all Impact: clean, reduce kernel size a bit, avoid sparse warnings Fixes sparse warnings: arch/x86/kernel/cpu/perf_counter.c:153:6: warning: symbol 'hw_perf_enable_all' was not declared. Should it be static? arch/x86/kernel/cpu/perf_counter.c:279:3: warning: returning void-valued expression arch/x86/kernel/cpu/perf_counter.c:206:3: warning: returning void-valued expression arch/x86/kernel/cpu/perf_counter.c:206:3: warning: returning void-valued expression Signed-off-by: Jaswinder Singh Rajput Signed-off-by: Ingo Molnar --- arch/x86/kernel/cpu/perf_counter.c | 24 +++++++----------------- 1 file changed, 7 insertions(+), 17 deletions(-) diff --git a/arch/x86/kernel/cpu/perf_counter.c b/arch/x86/kernel/cpu/perf_counter.c index da46eca1254..9376771f757 100644 --- a/arch/x86/kernel/cpu/perf_counter.c +++ b/arch/x86/kernel/cpu/perf_counter.c @@ -150,14 +150,6 @@ static int __hw_perf_counter_init(struct perf_counter *counter) return 0; } -void hw_perf_enable_all(void) -{ - if (unlikely(!perf_counters_initialized)) - return; - - wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, perf_counter_mask); -} - u64 hw_perf_save_disable(void) { u64 ctrl; @@ -200,12 +192,10 @@ static inline void __pmc_generic_disable(struct perf_counter *counter, struct hw_perf_counter *hwc, unsigned int idx) { - int err; - if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) - return __pmc_fixed_disable(counter, hwc, idx); - - err = wrmsr_safe(hwc->config_base + idx, hwc->config, 0); + __pmc_fixed_disable(counter, hwc, idx); + else + wrmsr_safe(hwc->config_base + idx, hwc->config, 0); } static DEFINE_PER_CPU(u64, prev_left[X86_PMC_IDX_MAX]); @@ -276,10 +266,10 @@ __pmc_generic_enable(struct perf_counter *counter, struct hw_perf_counter *hwc, int idx) { if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) - return __pmc_fixed_enable(counter, hwc, idx); - - wrmsr(hwc->config_base + idx, - hwc->config | ARCH_PERFMON_EVENTSEL0_ENABLE, 0); + __pmc_fixed_enable(counter, hwc, idx); + else + wrmsr(hwc->config_base + idx, + hwc->config | ARCH_PERFMON_EVENTSEL0_ENABLE, 0); } static int -- cgit v1.2.3 From ff6f05416ece2caec1a7a1f8180d6598e0ab9272 Mon Sep 17 00:00:00 2001 From: Paul Mackerras Date: Fri, 9 Jan 2009 16:19:25 +1100 Subject: perf_counter: Fix return value from dummy hw_perf_counter_init Impact: fix oops-causing bug Currently, if you try to use perf_counters on an architecture that has no hardware support, and you select an event that doesn't map to any of the defined software counters, you get an oops rather than an error. This is because the dummy hw_perf_counter_init returns ERR_PTR(-EINVAL) but the caller (perf_counter_alloc) only tests for NULL. This makes the dummy hw_perf_counter_init return NULL instead. Signed-off-by: Paul Mackerras --- kernel/perf_counter.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c index 37f771691f9..4be1a8d872b 100644 --- a/kernel/perf_counter.c +++ b/kernel/perf_counter.c @@ -41,7 +41,7 @@ static DEFINE_MUTEX(perf_resource_mutex); extern __weak const struct hw_perf_counter_ops * hw_perf_counter_init(struct perf_counter *counter) { - return ERR_PTR(-EINVAL); + return NULL; } u64 __weak hw_perf_save_disable(void) { return 0; } -- cgit v1.2.3 From 9abf8a08bc8f18a3b125f834f00e2e71b49c15d2 Mon Sep 17 00:00:00 2001 From: Paul Mackerras Date: Fri, 9 Jan 2009 16:26:43 +1100 Subject: perf_counter: Fix the cpu_clock software counter Impact: bug fix Currently if you do (e.g.) timec -e -1 ls, it will report 0 for the value of the cpu_clock counter. The reason is that the core assumes that a counter's count field is up-to-date when the counter is inactive, and doesn't call the counter's read function. However, the cpu_clock counter code only updates the count in the read function. This fixes it by making both the read and disable functions update the count. It also makes the counter ignore time passing while the counter is disabled, by making the enable function update the hw.prev_count field. Signed-off-by: Paul Mackerras --- kernel/perf_counter.c | 20 +++++++++++++++++--- 1 file changed, 17 insertions(+), 3 deletions(-) diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c index 4be1a8d872b..b7a027a2ef0 100644 --- a/kernel/perf_counter.c +++ b/kernel/perf_counter.c @@ -928,18 +928,32 @@ static const struct file_operations perf_fops = { static int cpu_clock_perf_counter_enable(struct perf_counter *counter) { + int cpu = raw_smp_processor_id(); + + atomic64_set(&counter->hw.prev_count, cpu_clock(cpu)); return 0; } +static void cpu_clock_perf_counter_update(struct perf_counter *counter) +{ + int cpu = raw_smp_processor_id(); + s64 prev; + u64 now; + + now = cpu_clock(cpu); + prev = atomic64_read(&counter->hw.prev_count); + atomic64_set(&counter->hw.prev_count, now); + atomic64_add(now - prev, &counter->count); +} + static void cpu_clock_perf_counter_disable(struct perf_counter *counter) { + cpu_clock_perf_counter_update(counter); } static void cpu_clock_perf_counter_read(struct perf_counter *counter) { - int cpu = raw_smp_processor_id(); - - atomic64_set(&counter->count, cpu_clock(cpu)); + cpu_clock_perf_counter_update(counter); } static const struct hw_perf_counter_ops perf_ops_cpu_clock = { -- cgit v1.2.3 From 3cbed429a9ccdb7a243f733b1056fe5c39e9004c Mon Sep 17 00:00:00 2001 From: Paul Mackerras Date: Fri, 9 Jan 2009 16:43:42 +1100 Subject: perf_counter: Add optional hw_perf_group_sched_in arch function Impact: extend perf_counter infrastructure This adds an optional hw_perf_group_sched_in() arch function that enables a whole group of counters in one go. It returns 1 if it added the group successfully, 0 if it did nothing (and therefore the core needs to add the counters individually), or a negative number if an error occurred. It should add all the counters and enable any software counters in the group, or else add none of them and return an error. There are a couple of related changes/improvements in the group handling here: * As an optimization, group_sched_out() and group_sched_in() now check the state of the group leader, and do nothing if the leader is not active or disabled. * We now call hw_perf_save_disable/hw_perf_restore around the complete set of counter enable/disable calls in __perf_counter_sched_in/out, to give the arch code the opportunity to defer updating the hardware state until the hw_perf_restore call if it wants. * We no longer stop adding groups after we get to a group that has more than one counter. We will ultimately add an option for a group to be exclusive. The current code doesn't really implement exclusive groups anyway, since a group could end up going on with other counters that get added before it. Signed-off-by: Paul Mackerras --- include/linux/perf_counter.h | 3 +++ kernel/perf_counter.c | 31 ++++++++++++++++++++++++++----- 2 files changed, 29 insertions(+), 5 deletions(-) diff --git a/include/linux/perf_counter.h b/include/linux/perf_counter.h index cc3a75a239a..b21d1ea4c05 100644 --- a/include/linux/perf_counter.h +++ b/include/linux/perf_counter.h @@ -236,6 +236,9 @@ extern u64 hw_perf_save_disable(void); extern void hw_perf_restore(u64 ctrl); extern int perf_counter_task_disable(void); extern int perf_counter_task_enable(void); +extern int hw_perf_group_sched_in(struct perf_counter *group_leader, + struct perf_cpu_context *cpuctx, + struct perf_counter_context *ctx, int cpu); #else static inline void diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c index b7a027a2ef0..9ad11e44d9a 100644 --- a/kernel/perf_counter.c +++ b/kernel/perf_counter.c @@ -47,6 +47,12 @@ hw_perf_counter_init(struct perf_counter *counter) u64 __weak hw_perf_save_disable(void) { return 0; } void __weak hw_perf_restore(u64 ctrl) { barrier(); } void __weak hw_perf_counter_setup(void) { barrier(); } +int __weak hw_perf_group_sched_in(struct perf_counter *group_leader, + struct perf_cpu_context *cpuctx, + struct perf_counter_context *ctx, int cpu) +{ + return 0; +} static void list_add_counter(struct perf_counter *counter, struct perf_counter_context *ctx) @@ -341,6 +347,9 @@ group_sched_out(struct perf_counter *group_counter, { struct perf_counter *counter; + if (group_counter->state != PERF_COUNTER_STATE_ACTIVE) + return; + counter_sched_out(group_counter, cpuctx, ctx); /* @@ -354,15 +363,18 @@ void __perf_counter_sched_out(struct perf_counter_context *ctx, struct perf_cpu_context *cpuctx) { struct perf_counter *counter; + u64 flags; if (likely(!ctx->nr_counters)) return; spin_lock(&ctx->lock); + flags = hw_perf_save_disable(); if (ctx->nr_active) { list_for_each_entry(counter, &ctx->counter_list, list_entry) group_sched_out(counter, cpuctx, ctx); } + hw_perf_restore(flags); spin_unlock(&ctx->lock); } @@ -402,7 +414,14 @@ group_sched_in(struct perf_counter *group_counter, int cpu) { struct perf_counter *counter, *partial_group; - int ret = 0; + int ret; + + if (group_counter->state == PERF_COUNTER_STATE_OFF) + return 0; + + ret = hw_perf_group_sched_in(group_counter, cpuctx, ctx, cpu); + if (ret) + return ret < 0 ? ret : 0; if (counter_sched_in(group_counter, cpuctx, ctx, cpu)) return -EAGAIN; @@ -415,10 +434,9 @@ group_sched_in(struct perf_counter *group_counter, partial_group = counter; goto group_error; } - ret = -EAGAIN; } - return ret; + return 0; group_error: /* @@ -440,11 +458,13 @@ __perf_counter_sched_in(struct perf_counter_context *ctx, struct perf_cpu_context *cpuctx, int cpu) { struct perf_counter *counter; + u64 flags; if (likely(!ctx->nr_counters)) return; spin_lock(&ctx->lock); + flags = hw_perf_save_disable(); list_for_each_entry(counter, &ctx->counter_list, list_entry) { /* * Listen to the 'cpu' scheduling filter constraint @@ -454,12 +474,13 @@ __perf_counter_sched_in(struct perf_counter_context *ctx, continue; /* - * If we scheduled in a group atomically and - * exclusively, break out: + * If we scheduled in a group atomically and exclusively, + * or if this group can't go on, break out: */ if (group_sched_in(counter, cpuctx, ctx, cpu)) break; } + hw_perf_restore(flags); spin_unlock(&ctx->lock); } -- cgit v1.2.3 From 4eb96fcfe07b7f2a05577e57533840f8e26bea53 Mon Sep 17 00:00:00 2001 From: Paul Mackerras Date: Fri, 9 Jan 2009 17:24:34 +1100 Subject: perf_counter: Add dummy perf_counter_print_debug function Impact: minimize requirements on architectures Currently, an architecture just enabling CONFIG_PERF_COUNTERS but not providing any extra functions will fail to build with perf_counter_print_debug being undefined, since we don't provide an empty dummy definition like we do with the hw_perf_* functions. This provides an empty dummy perf_counter_print_debug() to make it easier for architectures to turn on CONFIG_PERF_COUNTERS. Signed-off-by: Paul Mackerras --- kernel/perf_counter.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c index 9ad11e44d9a..4c0dccb756a 100644 --- a/kernel/perf_counter.c +++ b/kernel/perf_counter.c @@ -54,6 +54,8 @@ int __weak hw_perf_group_sched_in(struct perf_counter *group_leader, return 0; } +void __weak perf_counter_print_debug(void) { } + static void list_add_counter(struct perf_counter *counter, struct perf_counter_context *ctx) { -- cgit v1.2.3 From d662ed26734473d4cb5f3d78cebfec8f9126e97c Mon Sep 17 00:00:00 2001 From: Paul Mackerras Date: Fri, 9 Jan 2009 17:01:53 +1100 Subject: powerpc/perf_counter: Add perf_counter system call on powerpc ... with an empty/dummy asm/perf_counter.h so it builds. Signed-off-by: Paul Mackerras --- arch/powerpc/include/asm/perf_counter.h | 10 ++++++++++ arch/powerpc/include/asm/systbl.h | 1 + arch/powerpc/include/asm/unistd.h | 3 ++- arch/powerpc/platforms/Kconfig.cputype | 1 + 4 files changed, 14 insertions(+), 1 deletion(-) create mode 100644 arch/powerpc/include/asm/perf_counter.h diff --git a/arch/powerpc/include/asm/perf_counter.h b/arch/powerpc/include/asm/perf_counter.h new file mode 100644 index 00000000000..59530ae1d53 --- /dev/null +++ b/arch/powerpc/include/asm/perf_counter.h @@ -0,0 +1,10 @@ +/* + * Performance counter support - PowerPC-specific definitions. + * + * Copyright 2008-2009 Paul Mackerras, IBM Corporation. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ diff --git a/arch/powerpc/include/asm/systbl.h b/arch/powerpc/include/asm/systbl.h index 803def23665..da300c4d288 100644 --- a/arch/powerpc/include/asm/systbl.h +++ b/arch/powerpc/include/asm/systbl.h @@ -322,3 +322,4 @@ SYSCALL_SPU(epoll_create1) SYSCALL_SPU(dup3) SYSCALL_SPU(pipe2) SYSCALL(inotify_init1) +SYSCALL(perf_counter_open) diff --git a/arch/powerpc/include/asm/unistd.h b/arch/powerpc/include/asm/unistd.h index e07d0c76ed7..7cef5afe89d 100644 --- a/arch/powerpc/include/asm/unistd.h +++ b/arch/powerpc/include/asm/unistd.h @@ -341,10 +341,11 @@ #define __NR_dup3 316 #define __NR_pipe2 317 #define __NR_inotify_init1 318 +#define __NR_perf_counter_open 319 #ifdef __KERNEL__ -#define __NR_syscalls 319 +#define __NR_syscalls 320 #define __NR__exit __NR_exit #define NR_syscalls __NR_syscalls diff --git a/arch/powerpc/platforms/Kconfig.cputype b/arch/powerpc/platforms/Kconfig.cputype index 3d0c776f888..94dd1fb9a00 100644 --- a/arch/powerpc/platforms/Kconfig.cputype +++ b/arch/powerpc/platforms/Kconfig.cputype @@ -1,6 +1,7 @@ config PPC64 bool "64-bit kernel" default n + select HAVE_PERF_COUNTERS help This option selects whether a 32-bit or a 64-bit kernel will be built. -- cgit v1.2.3 From 93a6d3ce6962044fe9badf528fed46b455d58292 Mon Sep 17 00:00:00 2001 From: Paul Mackerras Date: Fri, 9 Jan 2009 16:52:19 +1100 Subject: powerpc: Provide a way to defer perf counter work until interrupts are enabled Because 64-bit powerpc uses lazy (soft) interrupt disabling, it is possible for a performance monitor exception to come in when the kernel thinks interrupts are disabled (i.e. when they are soft-disabled but hard-enabled). In such a situation the performance monitor exception handler might have some processing to do (such as process wakeups) which can't be done in what is effectively an NMI handler. This provides a way to defer that work until interrupts get enabled, either in raw_local_irq_restore() or by returning from an interrupt handler to code that had interrupts enabled. We have a per-processor flag that indicates that there is work pending to do when interrupts subsequently get re-enabled. This flag is checked in the interrupt return path and in raw_local_irq_restore(), and if it is set, perf_counter_do_pending() is called to do the pending work. Signed-off-by: Paul Mackerras --- arch/powerpc/include/asm/hw_irq.h | 31 +++++++++++++++++++++++++++++++ arch/powerpc/include/asm/paca.h | 1 + arch/powerpc/kernel/asm-offsets.c | 1 + arch/powerpc/kernel/entry_64.S | 9 +++++++++ arch/powerpc/kernel/irq.c | 10 ++++++++++ 5 files changed, 52 insertions(+) diff --git a/arch/powerpc/include/asm/hw_irq.h b/arch/powerpc/include/asm/hw_irq.h index f75a5fc64d2..e10f151c3db 100644 --- a/arch/powerpc/include/asm/hw_irq.h +++ b/arch/powerpc/include/asm/hw_irq.h @@ -131,5 +131,36 @@ static inline int irqs_disabled_flags(unsigned long flags) */ struct hw_interrupt_type; +#ifdef CONFIG_PERF_COUNTERS +static inline unsigned long get_perf_counter_pending(void) +{ + unsigned long x; + + asm volatile("lbz %0,%1(13)" + : "=r" (x) + : "i" (offsetof(struct paca_struct, perf_counter_pending))); + return x; +} + +static inline void set_perf_counter_pending(int x) +{ + asm volatile("stb %0,%1(13)" : : + "r" (x), + "i" (offsetof(struct paca_struct, perf_counter_pending))); +} + +extern void perf_counter_do_pending(void); + +#else + +static inline unsigned long get_perf_counter_pending(void) +{ + return 0; +} + +static inline void set_perf_counter_pending(int x) {} +static inline void perf_counter_do_pending(void) {} +#endif /* CONFIG_PERF_COUNTERS */ + #endif /* __KERNEL__ */ #endif /* _ASM_POWERPC_HW_IRQ_H */ diff --git a/arch/powerpc/include/asm/paca.h b/arch/powerpc/include/asm/paca.h index 082b3aedf14..6ef05572301 100644 --- a/arch/powerpc/include/asm/paca.h +++ b/arch/powerpc/include/asm/paca.h @@ -99,6 +99,7 @@ struct paca_struct { u8 soft_enabled; /* irq soft-enable flag */ u8 hard_enabled; /* set if irqs are enabled in MSR */ u8 io_sync; /* writel() needs spin_unlock sync */ + u8 perf_counter_pending; /* PM interrupt while soft-disabled */ /* Stuff for accurate time accounting */ u64 user_time; /* accumulated usermode TB ticks */ diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c index 661d07d2146..cea46290011 100644 --- a/arch/powerpc/kernel/asm-offsets.c +++ b/arch/powerpc/kernel/asm-offsets.c @@ -127,6 +127,7 @@ int main(void) DEFINE(PACAKMSR, offsetof(struct paca_struct, kernel_msr)); DEFINE(PACASOFTIRQEN, offsetof(struct paca_struct, soft_enabled)); DEFINE(PACAHARDIRQEN, offsetof(struct paca_struct, hard_enabled)); + DEFINE(PACAPERFPEND, offsetof(struct paca_struct, perf_counter_pending)); DEFINE(PACASLBCACHE, offsetof(struct paca_struct, slb_cache)); DEFINE(PACASLBCACHEPTR, offsetof(struct paca_struct, slb_cache_ptr)); DEFINE(PACACONTEXTID, offsetof(struct paca_struct, context.id)); diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S index 383ed6eb008..f30b4e553c5 100644 --- a/arch/powerpc/kernel/entry_64.S +++ b/arch/powerpc/kernel/entry_64.S @@ -526,6 +526,15 @@ ALT_FW_FTR_SECTION_END_IFCLR(FW_FEATURE_ISERIES) 2: TRACE_AND_RESTORE_IRQ(r5); +#ifdef CONFIG_PERF_COUNTERS + /* check paca->perf_counter_pending if we're enabling ints */ + lbz r3,PACAPERFPEND(r13) + and. r3,r3,r5 + beq 27f + bl .perf_counter_do_pending +27: +#endif /* CONFIG_PERF_COUNTERS */ + /* extract EE bit and use it to restore paca->hard_enabled */ ld r3,_MSR(r1) rldicl r4,r3,49,63 /* r0 = (r3 >> 15) & 1 */ diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c index ac222d0ab12..4efb886ea43 100644 --- a/arch/powerpc/kernel/irq.c +++ b/arch/powerpc/kernel/irq.c @@ -104,6 +104,13 @@ static inline notrace void set_soft_enabled(unsigned long enable) : : "r" (enable), "i" (offsetof(struct paca_struct, soft_enabled))); } +#ifdef CONFIG_PERF_COUNTERS +notrace void __weak perf_counter_do_pending(void) +{ + set_perf_counter_pending(0); +} +#endif + notrace void raw_local_irq_restore(unsigned long en) { /* @@ -135,6 +142,9 @@ notrace void raw_local_irq_restore(unsigned long en) iseries_handle_interrupts(); } + if (get_perf_counter_pending()) + perf_counter_do_pending(); + /* * if (get_paca()->hard_enabled) return; * But again we need to take care that gcc gets hard_enabled directly -- cgit v1.2.3 From 4574910e5087085a1f330ff8373cee4503f5c77c Mon Sep 17 00:00:00 2001 From: Paul Mackerras Date: Fri, 9 Jan 2009 20:21:55 +1100 Subject: powerpc/perf_counter: Add generic support for POWER-family PMU hardware This provides the architecture-specific functions needed to access PMU hardware on the 64-bit PowerPC processors. It has been designed for the IBM POWER family (POWER 4/4+/5/5+/6 and PPC970) but will hopefully also suit other 64-bit PowerPC machines (although probably not Cell given how different it is in this area). This doesn't include back-ends for any specific processors. This implements a system which allows back-ends to express the constraints that their hardware has on what events can be counted simultaneously. The constraints are expressed as a 64-bit mask + 64-bit value for each event, and the encoding is capable of expressing the constraints arising from having a set of multiplexers feeding an event bus, with some events being available through multiple multiplexer settings, such as we get on POWER4 and PPC970. Furthermore, the back-end can supply alternative event codes for each event, and the constraint checking code will try all possible combinations of alternative event codes to try to find a combination that will fit. Signed-off-by: Paul Mackerras --- arch/powerpc/include/asm/perf_counter.h | 62 +++ arch/powerpc/kernel/Makefile | 1 + arch/powerpc/kernel/perf_counter.c | 754 ++++++++++++++++++++++++++++++++ 3 files changed, 817 insertions(+) create mode 100644 arch/powerpc/kernel/perf_counter.c diff --git a/arch/powerpc/include/asm/perf_counter.h b/arch/powerpc/include/asm/perf_counter.h index 59530ae1d53..9d7ff6d7fb5 100644 --- a/arch/powerpc/include/asm/perf_counter.h +++ b/arch/powerpc/include/asm/perf_counter.h @@ -8,3 +8,65 @@ * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ +#include + +#define MAX_HWCOUNTERS 8 +#define MAX_EVENT_ALTERNATIVES 8 + +/* + * This struct provides the constants and functions needed to + * describe the PMU on a particular POWER-family CPU. + */ +struct power_pmu { + int n_counter; + int max_alternatives; + u64 add_fields; + u64 test_adder; + int (*compute_mmcr)(unsigned int events[], int n_ev, + unsigned int hwc[], u64 mmcr[]); + int (*get_constraint)(unsigned int event, u64 *mskp, u64 *valp); + int (*get_alternatives)(unsigned int event, unsigned int alt[]); + void (*disable_pmc)(unsigned int pmc, u64 mmcr[]); + int n_generic; + int *generic_events; +}; + +extern struct power_pmu *ppmu; + +/* + * The power_pmu.get_constraint function returns a 64-bit value and + * a 64-bit mask that express the constraints between this event and + * other events. + * + * The value and mask are divided up into (non-overlapping) bitfields + * of three different types: + * + * Select field: this expresses the constraint that some set of bits + * in MMCR* needs to be set to a specific value for this event. For a + * select field, the mask contains 1s in every bit of the field, and + * the value contains a unique value for each possible setting of the + * MMCR* bits. The constraint checking code will ensure that two events + * that set the same field in their masks have the same value in their + * value dwords. + * + * Add field: this expresses the constraint that there can be at most + * N events in a particular class. A field of k bits can be used for + * N <= 2^(k-1) - 1. The mask has the most significant bit of the field + * set (and the other bits 0), and the value has only the least significant + * bit of the field set. In addition, the 'add_fields' and 'test_adder' + * in the struct power_pmu for this processor come into play. The + * add_fields value contains 1 in the LSB of the field, and the + * test_adder contains 2^(k-1) - 1 - N in the field. + * + * NAND field: this expresses the constraint that you may not have events + * in all of a set of classes. (For example, on PPC970, you can't select + * events from the FPU, ISU and IDU simultaneously, although any two are + * possible.) For N classes, the field is N+1 bits wide, and each class + * is assigned one bit from the least-significant N bits. The mask has + * only the most-significant bit set, and the value has only the bit + * for the event's class set. The test_adder has the least significant + * bit set in the field. + * + * If an event is not subject to the constraint expressed by a particular + * field, then it will have 0 in both the mask and value for that field. + */ diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile index 1308a86e907..fde190bbb2b 100644 --- a/arch/powerpc/kernel/Makefile +++ b/arch/powerpc/kernel/Makefile @@ -94,6 +94,7 @@ obj-$(CONFIG_AUDIT) += audit.o obj64-$(CONFIG_AUDIT) += compat_audit.o obj-$(CONFIG_DYNAMIC_FTRACE) += ftrace.o +obj-$(CONFIG_PERF_COUNTERS) += perf_counter.o obj-$(CONFIG_8XX_MINIMAL_FPEMU) += softemu8xx.o diff --git a/arch/powerpc/kernel/perf_counter.c b/arch/powerpc/kernel/perf_counter.c new file mode 100644 index 00000000000..c7d4c2966a5 --- /dev/null +++ b/arch/powerpc/kernel/perf_counter.c @@ -0,0 +1,754 @@ +/* + * Performance counter support - powerpc architecture code + * + * Copyright 2008-2009 Paul Mackerras, IBM Corporation. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ +#include +#include +#include +#include +#include +#include +#include + +struct cpu_hw_counters { + int n_counters; + int n_percpu; + int disabled; + int n_added; + struct perf_counter *counter[MAX_HWCOUNTERS]; + unsigned int events[MAX_HWCOUNTERS]; + u64 mmcr[3]; +}; +DEFINE_PER_CPU(struct cpu_hw_counters, cpu_hw_counters); + +struct power_pmu *ppmu; + +void perf_counter_print_debug(void) +{ +} + +/* + * Return 1 for a software counter, 0 for a hardware counter + */ +static inline int is_software_counter(struct perf_counter *counter) +{ + return !counter->hw_event.raw && counter->hw_event.type < 0; +} + +/* + * Read one performance monitor counter (PMC). + */ +static unsigned long read_pmc(int idx) +{ + unsigned long val; + + switch (idx) { + case 1: + val = mfspr(SPRN_PMC1); + break; + case 2: + val = mfspr(SPRN_PMC2); + break; + case 3: + val = mfspr(SPRN_PMC3); + break; + case 4: + val = mfspr(SPRN_PMC4); + break; + case 5: + val = mfspr(SPRN_PMC5); + break; + case 6: + val = mfspr(SPRN_PMC6); + break; + case 7: + val = mfspr(SPRN_PMC7); + break; + case 8: + val = mfspr(SPRN_PMC8); + break; + default: + printk(KERN_ERR "oops trying to read PMC%d\n", idx); + val = 0; + } + return val; +} + +/* + * Write one PMC. + */ +static void write_pmc(int idx, unsigned long val) +{ + switch (idx) { + case 1: + mtspr(SPRN_PMC1, val); + break; + case 2: + mtspr(SPRN_PMC2, val); + break; + case 3: + mtspr(SPRN_PMC3, val); + break; + case 4: + mtspr(SPRN_PMC4, val); + break; + case 5: + mtspr(SPRN_PMC5, val); + break; + case 6: + mtspr(SPRN_PMC6, val); + break; + case 7: + mtspr(SPRN_PMC7, val); + break; + case 8: + mtspr(SPRN_PMC8, val); + break; + default: + printk(KERN_ERR "oops trying to write PMC%d\n", idx); + } +} + +/* + * Check if a set of events can all go on the PMU at once. + * If they can't, this will look at alternative codes for the events + * and see if any combination of alternative codes is feasible. + * The feasible set is returned in event[]. + */ +static int power_check_constraints(unsigned int event[], int n_ev) +{ + u64 mask, value, nv; + unsigned int alternatives[MAX_HWCOUNTERS][MAX_EVENT_ALTERNATIVES]; + u64 amasks[MAX_HWCOUNTERS][MAX_EVENT_ALTERNATIVES]; + u64 avalues[MAX_HWCOUNTERS][MAX_EVENT_ALTERNATIVES]; + u64 smasks[MAX_HWCOUNTERS], svalues[MAX_HWCOUNTERS]; + int n_alt[MAX_HWCOUNTERS], choice[MAX_HWCOUNTERS]; + int i, j; + u64 addf = ppmu->add_fields; + u64 tadd = ppmu->test_adder; + + if (n_ev > ppmu->n_counter) + return -1; + + /* First see if the events will go on as-is */ + for (i = 0; i < n_ev; ++i) { + alternatives[i][0] = event[i]; + if (ppmu->get_constraint(event[i], &amasks[i][0], + &avalues[i][0])) + return -1; + choice[i] = 0; + } + value = mask = 0; + for (i = 0; i < n_ev; ++i) { + nv = (value | avalues[i][0]) + (value & avalues[i][0] & addf); + if ((((nv + tadd) ^ value) & mask) != 0 || + (((nv + tadd) ^ avalues[i][0]) & amasks[i][0]) != 0) + break; + value = nv; + mask |= amasks[i][0]; + } + if (i == n_ev) + return 0; /* all OK */ + + /* doesn't work, gather alternatives... */ + if (!ppmu->get_alternatives) + return -1; + for (i = 0; i < n_ev; ++i) { + n_alt[i] = ppmu->get_alternatives(event[i], alternatives[i]); + for (j = 1; j < n_alt[i]; ++j) + ppmu->get_constraint(alternatives[i][j], + &amasks[i][j], &avalues[i][j]); + } + + /* enumerate all possibilities and see if any will work */ + i = 0; + j = -1; + value = mask = nv = 0; + while (i < n_ev) { + if (j >= 0) { + /* we're backtracking, restore context */ + value = svalues[i]; + mask = smasks[i]; + j = choice[i]; + } + /* + * See if any alternative k for event i, + * where k > j, will satisfy the constraints. + */ + while (++j < n_alt[i]) { + nv = (value | avalues[i][j]) + + (value & avalues[i][j] & addf); + if ((((nv + tadd) ^ value) & mask) == 0 && + (((nv + tadd) ^ avalues[i][j]) + & amasks[i][j]) == 0) + break; + } + if (j >= n_alt[i]) { + /* + * No feasible alternative, backtrack + * to event i-1 and continue enumerating its + * alternatives from where we got up to. + */ + if (--i < 0) + return -1; + } else { + /* + * Found a feasible alternative for event i, + * remember where we got up to with this event, + * go on to the next event, and start with + * the first alternative for it. + */ + choice[i] = j; + svalues[i] = value; + smasks[i] = mask; + value = nv; + mask |= amasks[i][j]; + ++i; + j = -1; + } + } + + /* OK, we have a feasible combination, tell the caller the solution */ + for (i = 0; i < n_ev; ++i) + event[i] = alternatives[i][choice[i]]; + return 0; +} + +static void power_perf_read(struct perf_counter *counter) +{ + long val, delta, prev; + + if (!counter->hw.idx) + return; + /* + * Performance monitor interrupts come even when interrupts + * are soft-disabled, as long as interrupts are hard-enabled. + * Therefore we treat them like NMIs. + */ + do { + prev = atomic64_read(&counter->hw.prev_count); + barrier(); + val = read_pmc(counter->hw.idx); + } while (atomic64_cmpxchg(&counter->hw.prev_count, prev, val) != prev); + + /* The counters are only 32 bits wide */ + delta = (val - prev) & 0xfffffffful; + atomic64_add(delta, &counter->count); + atomic64_sub(delta, &counter->hw.period_left); +} + +/* + * Disable all counters to prevent PMU interrupts and to allow + * counters to be added or removed. + */ +u64 hw_perf_save_disable(void) +{ + struct cpu_hw_counters *cpuhw; + unsigned long ret; + unsigned long flags; + + local_irq_save(flags); + cpuhw = &__get_cpu_var(cpu_hw_counters); + + ret = cpuhw->disabled; + if (!ret) { + cpuhw->disabled = 1; + cpuhw->n_added = 0; + + /* + * Set the 'freeze counters' bit. + * The barrier is to make sure the mtspr has been + * executed and the PMU has frozen the counters + * before we return. + */ + mtspr(SPRN_MMCR0, mfspr(SPRN_MMCR0) | MMCR0_FC); + mb(); + } + local_irq_restore(flags); + return ret; +} + +/* + * Re-enable all counters if disable == 0. + * If we were previously disabled and counters were added, then + * put the new config on the PMU. + */ +void hw_perf_restore(u64 disable) +{ + struct perf_counter *counter; + struct cpu_hw_counters *cpuhw; + unsigned long flags; + long i; + unsigned long val; + s64 left; + unsigned int hwc_index[MAX_HWCOUNTERS]; + + if (disable) + return; + local_irq_save(flags); + cpuhw = &__get_cpu_var(cpu_hw_counters); + cpuhw->disabled = 0; + + /* + * If we didn't change anything, or only removed counters, + * no need to recalculate MMCR* settings and reset the PMCs. + * Just reenable the PMU with the current MMCR* settings + * (possibly updated for removal of counters). + */ + if (!cpuhw->n_added) { + mtspr(SPRN_MMCRA, cpuhw->mmcr[2]); + mtspr(SPRN_MMCR1, cpuhw->mmcr[1]); + mtspr(SPRN_MMCR0, cpuhw->mmcr[0]); + goto out; + } + + /* + * Compute MMCR* values for the new set of counters + */ + if (ppmu->compute_mmcr(cpuhw->events, cpuhw->n_counters, hwc_index, + cpuhw->mmcr)) { + /* shouldn't ever get here */ + printk(KERN_ERR "oops compute_mmcr failed\n"); + goto out; + } + + /* + * Write the new configuration to MMCR* with the freeze + * bit set and set the hardware counters to their initial values. + * Then unfreeze the counters. + */ + mtspr(SPRN_MMCRA, cpuhw->mmcr[2]); + mtspr(SPRN_MMCR1, cpuhw->mmcr[1]); + mtspr(SPRN_MMCR0, (cpuhw->mmcr[0] & ~(MMCR0_PMC1CE | MMCR0_PMCjCE)) + | MMCR0_FC); + + /* + * Read off any pre-existing counters that need to move + * to another PMC. + */ + for (i = 0; i < cpuhw->n_counters; ++i) { + counter = cpuhw->counter[i]; + if (counter->hw.idx && counter->hw.idx != hwc_index[i] + 1) { + power_perf_read(counter); + write_pmc(counter->hw.idx, 0); + counter->hw.idx = 0; + } + } + + /* + * Initialize the PMCs for all the new and moved counters. + */ + for (i = 0; i < cpuhw->n_counters; ++i) { + counter = cpuhw->counter[i]; + if (counter->hw.idx) + continue; + val = 0; + if (counter->hw_event.irq_period) { + left = atomic64_read(&counter->hw.period_left); + if (left < 0x80000000L) + val = 0x80000000L - left; + } + atomic64_set(&counter->hw.prev_count, val); + counter->hw.idx = hwc_index[i] + 1; + write_pmc(counter->hw.idx, val); + } + mb(); + cpuhw->mmcr[0] |= MMCR0_PMXE | MMCR0_FCECE; + mtspr(SPRN_MMCR0, cpuhw->mmcr[0]); + + out: + local_irq_restore(flags); +} + +static int collect_events(struct perf_counter *group, int max_count, + struct perf_counter *ctrs[], unsigned int *events) +{ + int n = 0; + struct perf_counter *counter; + + if (!is_software_counter(group)) { + if (n >= max_count) + return -1; + ctrs[n] = group; + events[n++] = group->hw.config; + } + list_for_each_entry(counter, &group->sibling_list, list_entry) { + if (!is_software_counter(counter) && + counter->state != PERF_COUNTER_STATE_OFF) { + if (n >= max_count) + return -1; + ctrs[n] = counter; + events[n++] = counter->hw.config; + } + } + return n; +} + +static void counter_sched_in(struct perf_counter *counter, int cpu) +{ + counter->state = PERF_COUNTER_STATE_ACTIVE; + counter->oncpu = cpu; + if (is_software_counter(counter)) + counter->hw_ops->enable(counter); +} + +/* + * Called to enable a whole group of counters. + * Returns 1 if the group was enabled, or -EAGAIN if it could not be. + * Assumes the caller has disabled interrupts and has + * frozen the PMU with hw_perf_save_disable. + */ +int hw_perf_group_sched_in(struct perf_counter *group_leader, + struct perf_cpu_context *cpuctx, + struct perf_counter_context *ctx, int cpu) +{ + struct cpu_hw_counters *cpuhw; + long i, n, n0; + struct perf_counter *sub; + + cpuhw = &__get_cpu_var(cpu_hw_counters); + n0 = cpuhw->n_counters; + n = collect_events(group_leader, ppmu->n_counter - n0, + &cpuhw->counter[n0], &cpuhw->events[n0]); + if (n < 0) + return -EAGAIN; + if (power_check_constraints(cpuhw->events, n + n0)) + return -EAGAIN; + cpuhw->n_counters = n0 + n; + cpuhw->n_added += n; + + /* + * OK, this group can go on; update counter states etc., + * and enable any software counters + */ + for (i = n0; i < n0 + n; ++i) + cpuhw->counter[i]->hw.config = cpuhw->events[i]; + n = 1; + counter_sched_in(group_leader, cpu); + list_for_each_entry(sub, &group_leader->sibling_list, list_entry) { + if (sub->state != PERF_COUNTER_STATE_OFF) { + counter_sched_in(sub, cpu); + ++n; + } + } + cpuctx->active_oncpu += n; + ctx->nr_active += n; + + return 1; +} + +/* + * Add a counter to the PMU. + * If all counters are not already frozen, then we disable and + * re-enable the PMU in order to get hw_perf_restore to do the + * actual work of reconfiguring the PMU. + */ +static int power_perf_enable(struct perf_counter *counter) +{ + struct cpu_hw_counters *cpuhw; + unsigned long flags; + u64 pmudis; + int n0; + int ret = -EAGAIN; + + local_irq_save(flags); + pmudis = hw_perf_save_disable(); + + /* + * Add the counter to the list (if there is room) + * and check whether the total set is still feasible. + */ + cpuhw = &__get_cpu_var(cpu_hw_counters); + n0 = cpuhw->n_counters; + if (n0 >= ppmu->n_counter) + goto out; + cpuhw->counter[n0] = counter; + cpuhw->events[n0] = counter->hw.config; + if (power_check_constraints(cpuhw->events, n0 + 1)) + goto out; + + counter->hw.config = cpuhw->events[n0]; + ++cpuhw->n_counters; + ++cpuhw->n_added; + + ret = 0; + out: + hw_perf_restore(pmudis); + local_irq_restore(flags); + return ret; +} + +/* + * Remove a counter from the PMU. + */ +static void power_perf_disable(struct perf_counter *counter) +{ + struct cpu_hw_counters *cpuhw; + long i; + u64 pmudis; + unsigned long flags; + + local_irq_save(flags); + pmudis = hw_perf_save_disable(); + + power_perf_read(counter); + + cpuhw = &__get_cpu_var(cpu_hw_counters); + for (i = 0; i < cpuhw->n_counters; ++i) { + if (counter == cpuhw->counter[i]) { + while (++i < cpuhw->n_counters) + cpuhw->counter[i-1] = cpuhw->counter[i]; + --cpuhw->n_counters; + ppmu->disable_pmc(counter->hw.idx - 1, cpuhw->mmcr); + write_pmc(counter->hw.idx, 0); + counter->hw.idx = 0; + break; + } + } + if (cpuhw->n_counters == 0) { + /* disable exceptions if no counters are running */ + cpuhw->mmcr[0] &= ~(MMCR0_PMXE | MMCR0_FCECE); + } + + hw_perf_restore(pmudis); + local_irq_restore(flags); +} + +struct hw_perf_counter_ops power_perf_ops = { + .enable = power_perf_enable, + .disable = power_perf_disable, + .read = power_perf_read +}; + +const struct hw_perf_counter_ops * +hw_perf_counter_init(struct perf_counter *counter) +{ + unsigned long ev; + struct perf_counter *ctrs[MAX_HWCOUNTERS]; + unsigned int events[MAX_HWCOUNTERS]; + int n; + + if (!ppmu) + return NULL; + if ((s64)counter->hw_event.irq_period < 0) + return NULL; + ev = counter->hw_event.type; + if (!counter->hw_event.raw) { + if (ev >= ppmu->n_generic || + ppmu->generic_events[ev] == 0) + return NULL; + ev = ppmu->generic_events[ev]; + } + counter->hw.config_base = ev; + counter->hw.idx = 0; + + /* + * If this is in a group, check if it can go on with all the + * other hardware counters in the group. We assume the counter + * hasn't been linked into its leader's sibling list at this point. + */ + n = 0; + if (counter->group_leader != counter) { + n = collect_events(counter->group_leader, ppmu->n_counter - 1, + ctrs, events); + if (n < 0) + return NULL; + } + events[n++] = ev; + if (power_check_constraints(events, n)) + return NULL; + + counter->hw.config = events[n - 1]; + atomic64_set(&counter->hw.period_left, counter->hw_event.irq_period); + return &power_perf_ops; +} + +/* + * Handle wakeups. + */ +void perf_counter_do_pending(void) +{ + int i; + struct cpu_hw_counters *cpuhw = &__get_cpu_var(cpu_hw_counters); + struct perf_counter *counter; + + set_perf_counter_pending(0); + for (i = 0; i < cpuhw->n_counters; ++i) { + counter = cpuhw->counter[i]; + if (counter && counter->wakeup_pending) { + counter->wakeup_pending = 0; + wake_up(&counter->waitq); + } + } +} + +/* + * Record data for an irq counter. + * This function was lifted from the x86 code; maybe it should + * go in the core? + */ +static void perf_store_irq_data(struct perf_counter *counter, u64 data) +{ + struct perf_data *irqdata = counter->irqdata; + + if (irqdata->len > PERF_DATA_BUFLEN - sizeof(u64)) { + irqdata->overrun++; + } else { + u64 *p = (u64 *) &irqdata->data[irqdata->len]; + + *p = data; + irqdata->len += sizeof(u64); + } +} + +/* + * Record all the values of the counters in a group + */ +static void perf_handle_group(struct perf_counter *counter) +{ + struct perf_counter *leader, *sub; + + leader = counter->group_leader; + list_for_each_entry(sub, &leader->sibling_list, list_entry) { + if (sub != counter) + sub->hw_ops->read(sub); + perf_store_irq_data(counter, sub->hw_event.type); + perf_store_irq_data(counter, atomic64_read(&sub->count)); + } +} + +/* + * A counter has overflowed; update its count and record + * things if requested. Note that interrupts are hard-disabled + * here so there is no possibility of being interrupted. + */ +static void record_and_restart(struct perf_counter *counter, long val, + struct pt_regs *regs) +{ + s64 prev, delta, left; + int record = 0; + + /* we don't have to worry about interrupts here */ + prev = atomic64_read(&counter->hw.prev_count); + delta = (val - prev) & 0xfffffffful; + atomic64_add(delta, &counter->count); + + /* + * See if the total period for this counter has expired, + * and update for the next period. + */ + val = 0; + left = atomic64_read(&counter->hw.period_left) - delta; + if (counter->hw_event.irq_period) { + if (left <= 0) { + left += counter->hw_event.irq_period; + if (left <= 0) + left = counter->hw_event.irq_period; + record = 1; + } + if (left < 0x80000000L) + val = 0x80000000L - left; + } + write_pmc(counter->hw.idx, val); + atomic64_set(&counter->hw.prev_count, val); + atomic64_set(&counter->hw.period_left, left); + + /* + * Finally record data if requested. + */ + if (record) { + switch (counter->hw_event.record_type) { + case PERF_RECORD_SIMPLE: + break; + case PERF_RECORD_IRQ: + perf_store_irq_data(counter, instruction_pointer(regs)); + counter->wakeup_pending = 1; + break; + case PERF_RECORD_GROUP: + perf_handle_group(counter); + counter->wakeup_pending = 1; + break; + } + } +} + +/* + * Performance monitor interrupt stuff + */ +static void perf_counter_interrupt(struct pt_regs *regs) +{ + int i; + struct cpu_hw_counters *cpuhw = &__get_cpu_var(cpu_hw_counters); + struct perf_counter *counter; + long val; + int need_wakeup = 0, found = 0; + + for (i = 0; i < cpuhw->n_counters; ++i) { + counter = cpuhw->counter[i]; + val = read_pmc(counter->hw.idx); + if ((int)val < 0) { + /* counter has overflowed */ + found = 1; + record_and_restart(counter, val, regs); + if (counter->wakeup_pending) + need_wakeup = 1; + } + } + + /* + * In case we didn't find and reset the counter that caused + * the interrupt, scan all counters and reset any that are + * negative, to avoid getting continual interrupts. + * Any that we processed in the previous loop will not be negative. + */ + if (!found) { + for (i = 0; i < ppmu->n_counter; ++i) { + val = read_pmc(i + 1); + if ((int)val < 0) + write_pmc(i + 1, 0); + } + } + + /* + * Reset MMCR0 to its normal value. This will set PMXE and + * clear FC (freeze counters) and PMAO (perf mon alert occurred) + * and thus allow interrupts to occur again. + * XXX might want to use MSR.PM to keep the counters frozen until + * we get back out of this interrupt. + */ + mtspr(SPRN_MMCR0, cpuhw->mmcr[0]); + + /* + * If we need a wakeup, check whether interrupts were soft-enabled + * when we took the interrupt. If they were, we can wake stuff up + * immediately; otherwise we'll have to set a flag and do the + * wakeup when interrupts get soft-enabled. + */ + if (need_wakeup) { + if (regs->softe) { + irq_enter(); + perf_counter_do_pending(); + irq_exit(); + } else { + set_perf_counter_pending(1); + } + } +} + +static int init_perf_counters(void) +{ + if (reserve_pmc_hardware(perf_counter_interrupt)) { + printk(KERN_ERR "Couldn't init performance monitor subsystem\n"); + return -EBUSY; + } + + return 0; +} + +arch_initcall(init_perf_counters); -- cgit v1.2.3 From 16b067993dee3dfde61b20027e0b168dc06201ee Mon Sep 17 00:00:00 2001 From: Paul Mackerras Date: Sat, 10 Jan 2009 16:34:07 +1100 Subject: powerpc/perf_counter: Add support for PPC970 family This adds the back-end for the PMU on the PPC970 family. The PPC970 allows events from the ISU to be selected in two different ways. Rather than use alternative event codes to express this, we instead use a single encoding for ISU events and express the resulting constraint (that you can't select events from all three of FPU/IFU/VPU, ISU and IDU/STS at the same time, since they all come in through only 2 multiplexers) using a NAND constraint field, and work out which multiplexer is used for ISU events at compute_mmcr time. Signed-off-by: Paul Mackerras --- arch/powerpc/kernel/Makefile | 2 +- arch/powerpc/kernel/perf_counter.c | 13 ++ arch/powerpc/kernel/ppc970-pmu.c | 375 +++++++++++++++++++++++++++++++++++++ 3 files changed, 389 insertions(+), 1 deletion(-) create mode 100644 arch/powerpc/kernel/ppc970-pmu.c diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile index fde190bbb2b..45798f6fb13 100644 --- a/arch/powerpc/kernel/Makefile +++ b/arch/powerpc/kernel/Makefile @@ -94,7 +94,7 @@ obj-$(CONFIG_AUDIT) += audit.o obj64-$(CONFIG_AUDIT) += compat_audit.o obj-$(CONFIG_DYNAMIC_FTRACE) += ftrace.o -obj-$(CONFIG_PERF_COUNTERS) += perf_counter.o +obj-$(CONFIG_PERF_COUNTERS) += perf_counter.o ppc970-pmu.o obj-$(CONFIG_8XX_MINIMAL_FPEMU) += softemu8xx.o diff --git a/arch/powerpc/kernel/perf_counter.c b/arch/powerpc/kernel/perf_counter.c index c7d4c2966a5..5561ecb02a4 100644 --- a/arch/powerpc/kernel/perf_counter.c +++ b/arch/powerpc/kernel/perf_counter.c @@ -741,13 +741,26 @@ static void perf_counter_interrupt(struct pt_regs *regs) } } +extern struct power_pmu ppc970_pmu; + static int init_perf_counters(void) { + unsigned long pvr; + if (reserve_pmc_hardware(perf_counter_interrupt)) { printk(KERN_ERR "Couldn't init performance monitor subsystem\n"); return -EBUSY; } + /* XXX should get this from cputable */ + pvr = mfspr(SPRN_PVR); + switch (PVR_VER(pvr)) { + case PV_970: + case PV_970FX: + case PV_970MP: + ppmu = &ppc970_pmu; + break; + } return 0; } diff --git a/arch/powerpc/kernel/ppc970-pmu.c b/arch/powerpc/kernel/ppc970-pmu.c new file mode 100644 index 00000000000..c3256580be1 --- /dev/null +++ b/arch/powerpc/kernel/ppc970-pmu.c @@ -0,0 +1,375 @@ +/* + * Performance counter support for PPC970-family processors. + * + * Copyright 2008-2009 Paul Mackerras, IBM Corporation. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ +#include +#include +#include + +/* + * Bits in event code for PPC970 + */ +#define PM_PMC_SH 12 /* PMC number (1-based) for direct events */ +#define PM_PMC_MSK 0xf +#define PM_UNIT_SH 8 /* TTMMUX number and setting - unit select */ +#define PM_UNIT_MSK 0xf +#define PM_BYTE_SH 4 /* Byte number of event bus to use */ +#define PM_BYTE_MSK 3 +#define PM_PMCSEL_MSK 0xf + +/* Values in PM_UNIT field */ +#define PM_NONE 0 +#define PM_FPU 1 +#define PM_VPU 2 +#define PM_ISU 3 +#define PM_IFU 4 +#define PM_IDU 5 +#define PM_STS 6 +#define PM_LSU0 7 +#define PM_LSU1U 8 +#define PM_LSU1L 9 +#define PM_LASTUNIT 9 + +/* + * Bits in MMCR0 for PPC970 + */ +#define MMCR0_PMC1SEL_SH 8 +#define MMCR0_PMC2SEL_SH 1 +#define MMCR_PMCSEL_MSK 0x1f + +/* + * Bits in MMCR1 for PPC970 + */ +#define MMCR1_TTM0SEL_SH 62 +#define MMCR1_TTM1SEL_SH 59 +#define MMCR1_TTM3SEL_SH 53 +#define MMCR1_TTMSEL_MSK 3 +#define MMCR1_TD_CP_DBG0SEL_SH 50 +#define MMCR1_TD_CP_DBG1SEL_SH 48 +#define MMCR1_TD_CP_DBG2SEL_SH 46 +#define MMCR1_TD_CP_DBG3SEL_SH 44 +#define MMCR1_PMC1_ADDER_SEL_SH 39 +#define MMCR1_PMC2_ADDER_SEL_SH 38 +#define MMCR1_PMC6_ADDER_SEL_SH 37 +#define MMCR1_PMC5_ADDER_SEL_SH 36 +#define MMCR1_PMC8_ADDER_SEL_SH 35 +#define MMCR1_PMC7_ADDER_SEL_SH 34 +#define MMCR1_PMC3_ADDER_SEL_SH 33 +#define MMCR1_PMC4_ADDER_SEL_SH 32 +#define MMCR1_PMC3SEL_SH 27 +#define MMCR1_PMC4SEL_SH 22 +#define MMCR1_PMC5SEL_SH 17 +#define MMCR1_PMC6SEL_SH 12 +#define MMCR1_PMC7SEL_SH 7 +#define MMCR1_PMC8SEL_SH 2 + +static short mmcr1_adder_bits[8] = { + MMCR1_PMC1_ADDER_SEL_SH, + MMCR1_PMC2_ADDER_SEL_SH, + MMCR1_PMC3_ADDER_SEL_SH, + MMCR1_PMC4_ADDER_SEL_SH, + MMCR1_PMC5_ADDER_SEL_SH, + MMCR1_PMC6_ADDER_SEL_SH, + MMCR1_PMC7_ADDER_SEL_SH, + MMCR1_PMC8_ADDER_SEL_SH +}; + +/* + * Bits in MMCRA + */ + +/* + * Layout of constraint bits: + * 6666555555555544444444443333333333222222222211111111110000000000 + * 3210987654321098765432109876543210987654321098765432109876543210 + * <><>[ >[ >[ >< >< >< >< ><><><><><><><><> + * T0T1 UC PS1 PS2 B0 B1 B2 B3 P1P2P3P4P5P6P7P8 + * + * T0 - TTM0 constraint + * 46-47: TTM0SEL value (0=FPU, 2=IFU, 3=VPU) 0xC000_0000_0000 + * + * T1 - TTM1 constraint + * 44-45: TTM1SEL value (0=IDU, 3=STS) 0x3000_0000_0000 + * + * UC - unit constraint: can't have all three of FPU|IFU|VPU, ISU, IDU|STS + * 43: UC3 error 0x0800_0000_0000 + * 42: FPU|IFU|VPU events needed 0x0400_0000_0000 + * 41: ISU events needed 0x0200_0000_0000 + * 40: IDU|STS events needed 0x0100_0000_0000 + * + * PS1 + * 39: PS1 error 0x0080_0000_0000 + * 36-38: count of events needing PMC1/2/5/6 0x0070_0000_0000 + * + * PS2 + * 35: PS2 error 0x0008_0000_0000 + * 32-34: count of events needing PMC3/4/7/8 0x0007_0000_0000 + * + * B0 + * 28-31: Byte 0 event source 0xf000_0000 + * Encoding as for the event code + * + * B1, B2, B3 + * 24-27, 20-23, 16-19: Byte 1, 2, 3 event sources + * + * P1 + * 15: P1 error 0x8000 + * 14-15: Count of events needing PMC1 + * + * P2..P8 + * 0-13: Count of events needing PMC2..PMC8 + */ + +/* Masks and values for using events from the various units */ +static u64 unit_cons[PM_LASTUNIT+1][2] = { + [PM_FPU] = { 0xc80000000000ull, 0x040000000000ull }, + [PM_VPU] = { 0xc80000000000ull, 0xc40000000000ull }, + [PM_ISU] = { 0x080000000000ull, 0x020000000000ull }, + [PM_IFU] = { 0xc80000000000ull, 0x840000000000ull }, + [PM_IDU] = { 0x380000000000ull, 0x010000000000ull }, + [PM_STS] = { 0x380000000000ull, 0x310000000000ull }, +}; + +static int p970_get_constraint(unsigned int event, u64 *maskp, u64 *valp) +{ + int pmc, byte, unit, sh; + u64 mask = 0, value = 0; + int grp = -1; + + pmc = (event >> PM_PMC_SH) & PM_PMC_MSK; + if (pmc) { + if (pmc > 8) + return -1; + sh = (pmc - 1) * 2; + mask |= 2 << sh; + value |= 1 << sh; + grp = ((pmc - 1) >> 1) & 1; + } + unit = (event >> PM_UNIT_SH) & PM_UNIT_MSK; + if (unit) { + if (unit > PM_LASTUNIT) + return -1; + mask |= unit_cons[unit][0]; + value |= unit_cons[unit][1]; + byte = (event >> PM_BYTE_SH) & PM_BYTE_MSK; + /* + * Bus events on bytes 0 and 2 can be counted + * on PMC1/2/5/6; bytes 1 and 3 on PMC3/4/7/8. + */ + if (!pmc) + grp = byte & 1; + /* Set byte lane select field */ + mask |= 0xfULL << (28 - 4 * byte); + value |= (u64)unit << (28 - 4 * byte); + } + if (grp == 0) { + /* increment PMC1/2/5/6 field */ + mask |= 0x8000000000ull; + value |= 0x1000000000ull; + } else if (grp == 1) { + /* increment PMC3/4/7/8 field */ + mask |= 0x800000000ull; + value |= 0x100000000ull; + } + *maskp = mask; + *valp = value; + return 0; +} + +static int p970_get_alternatives(unsigned int event, unsigned int alt[]) +{ + alt[0] = event; + + /* 2 alternatives for LSU empty */ + if (event == 0x2002 || event == 0x3002) { + alt[1] = event ^ 0x1000; + return 2; + } + + return 1; +} + +static int p970_compute_mmcr(unsigned int event[], int n_ev, + unsigned int hwc[], u64 mmcr[]) +{ + u64 mmcr0 = 0, mmcr1 = 0, mmcra = 0; + unsigned int pmc, unit, byte, psel; + unsigned int ttm, grp; + unsigned int pmc_inuse = 0; + unsigned int pmc_grp_use[2]; + unsigned char busbyte[4]; + unsigned char unituse[16]; + unsigned char unitmap[] = { 0, 0<<3, 3<<3, 1<<3, 2<<3, 0|4, 3|4 }; + unsigned char ttmuse[2]; + unsigned char pmcsel[8]; + int i; + + if (n_ev > 8) + return -1; + + /* First pass to count resource use */ + pmc_grp_use[0] = pmc_grp_use[1] = 0; + memset(busbyte, 0, sizeof(busbyte)); + memset(unituse, 0, sizeof(unituse)); + for (i = 0; i < n_ev; ++i) { + pmc = (event[i] >> PM_PMC_SH) & PM_PMC_MSK; + if (pmc) { + if (pmc_inuse & (1 << (pmc - 1))) + return -1; + pmc_inuse |= 1 << (pmc - 1); + /* count 1/2/5/6 vs 3/4/7/8 use */ + ++pmc_grp_use[((pmc - 1) >> 1) & 1]; + } + unit = (event[i] >> PM_UNIT_SH) & PM_UNIT_MSK; + byte = (event[i] >> PM_BYTE_SH) & PM_BYTE_MSK; + if (unit) { + if (unit > PM_LASTUNIT) + return -1; + if (!pmc) + ++pmc_grp_use[byte & 1]; + if (busbyte[byte] && busbyte[byte] != unit) + return -1; + busbyte[byte] = unit; + unituse[unit] = 1; + } + } + if (pmc_grp_use[0] > 4 || pmc_grp_use[1] > 4) + return -1; + + /* + * Assign resources and set multiplexer selects. + * + * PM_ISU can go either on TTM0 or TTM1, but that's the only + * choice we have to deal with. + */ + if (unituse[PM_ISU] & + (unituse[PM_FPU] | unituse[PM_IFU] | unituse[PM_VPU])) + unitmap[PM_ISU] = 2 | 4; /* move ISU to TTM1 */ + /* Set TTM[01]SEL fields. */ + ttmuse[0] = ttmuse[1] = 0; + for (i = PM_FPU; i <= PM_STS; ++i) { + if (!unituse[i]) + continue; + ttm = unitmap[i]; + ++ttmuse[(ttm >> 2) & 1]; + mmcr1 |= (u64)(ttm & ~4) << MMCR1_TTM1SEL_SH; + } + /* Check only one unit per TTMx */ + if (ttmuse[0] > 1 || ttmuse[1] > 1) + return -1; + + /* Set byte lane select fields and TTM3SEL. */ + for (byte = 0; byte < 4; ++byte) { + unit = busbyte[byte]; + if (!unit) + continue; + if (unit <= PM_STS) + ttm = (unitmap[unit] >> 2) & 1; + else if (unit == PM_LSU0) + ttm = 2; + else { + ttm = 3; + if (unit == PM_LSU1L && byte >= 2) + mmcr1 |= 1ull << (MMCR1_TTM3SEL_SH + 3 - byte); + } + mmcr1 |= (u64)ttm << (MMCR1_TD_CP_DBG0SEL_SH - 2 * byte); + } + + /* Second pass: assign PMCs, set PMCxSEL and PMCx_ADDER_SEL fields */ + memset(pmcsel, 0x8, sizeof(pmcsel)); /* 8 means don't count */ + for (i = 0; i < n_ev; ++i) { + pmc = (event[i] >> PM_PMC_SH) & PM_PMC_MSK; + unit = (event[i] >> PM_UNIT_SH) & PM_UNIT_MSK; + byte = (event[i] >> PM_BYTE_SH) & PM_BYTE_MSK; + psel = event[i] & PM_PMCSEL_MSK; + if (!pmc) { + /* Bus event or any-PMC direct event */ + if (unit) + psel |= 0x10 | ((byte & 2) << 2); + else + psel |= 8; + for (pmc = 0; pmc < 8; ++pmc) { + if (pmc_inuse & (1 << pmc)) + continue; + grp = (pmc >> 1) & 1; + if (unit) { + if (grp == (byte & 1)) + break; + } else if (pmc_grp_use[grp] < 4) { + ++pmc_grp_use[grp]; + break; + } + } + pmc_inuse |= 1 << pmc; + } else { + /* Direct event */ + --pmc; + if (psel == 0 && (byte & 2)) + /* add events on higher-numbered bus */ + mmcr1 |= 1ull << mmcr1_adder_bits[pmc]; + } + pmcsel[pmc] = psel; + hwc[i] = pmc; + } + for (pmc = 0; pmc < 2; ++pmc) + mmcr0 |= pmcsel[pmc] << (MMCR0_PMC1SEL_SH - 7 * pmc); + for (; pmc < 8; ++pmc) + mmcr1 |= (u64)pmcsel[pmc] << (MMCR1_PMC3SEL_SH - 5 * (pmc - 2)); + if (pmc_inuse & 1) + mmcr0 |= MMCR0_PMC1CE; + if (pmc_inuse & 0xfe) + mmcr0 |= MMCR0_PMCjCE; + + mmcra |= 0x2000; /* mark only one IOP per PPC instruction */ + + /* Return MMCRx values */ + mmcr[0] = mmcr0; + mmcr[1] = mmcr1; + mmcr[2] = mmcra; + return 0; +} + +static void p970_disable_pmc(unsigned int pmc, u64 mmcr[]) +{ + int shift, i; + + if (pmc <= 1) { + shift = MMCR0_PMC1SEL_SH - 7 * pmc; + i = 0; + } else { + shift = MMCR1_PMC3SEL_SH - 5 * (pmc - 2); + i = 1; + } + /* + * Setting the PMCxSEL field to 0x08 disables PMC x. + */ + mmcr[i] = (mmcr[i] & ~(0x1fUL << shift)) | (0x08UL << shift); +} + +static int ppc970_generic_events[] = { + [PERF_COUNT_CPU_CYCLES] = 7, + [PERF_COUNT_INSTRUCTIONS] = 1, + [PERF_COUNT_CACHE_REFERENCES] = 0x8810, /* PM_LD_REF_L1 */ + [PERF_COUNT_CACHE_MISSES] = 0x3810, /* PM_LD_MISS_L1 */ + [PERF_COUNT_BRANCH_INSTRUCTIONS] = 0x431, /* PM_BR_ISSUED */ + [PERF_COUNT_BRANCH_MISSES] = 0x327, /* PM_GRP_BR_MPRED */ +}; + +struct power_pmu ppc970_pmu = { + .n_counter = 8, + .max_alternatives = 2, + .add_fields = 0x001100005555ull, + .test_adder = 0x013300000000ull, + .compute_mmcr = p970_compute_mmcr, + .get_constraint = p970_get_constraint, + .get_alternatives = p970_get_alternatives, + .disable_pmc = p970_disable_pmc, + .n_generic = ARRAY_SIZE(ppc970_generic_events), + .generic_events = ppc970_generic_events, +}; -- cgit v1.2.3 From f78628374a13bc150db77c6e02d4f2c0a7f932ef Mon Sep 17 00:00:00 2001 From: Paul Mackerras Date: Fri, 9 Jan 2009 21:05:35 +1100 Subject: powerpc/perf_counter: Add support for POWER6 This adds the back-end for the PMU on the POWER6 processor. Fortunately, the event selection hardware is somewhat simpler on POWER6 than on other POWER family processors, so the constraints fit into only 32 bits. Signed-off-by: Paul Mackerras --- arch/powerpc/kernel/Makefile | 2 +- arch/powerpc/kernel/perf_counter.c | 4 + arch/powerpc/kernel/power6-pmu.c | 283 +++++++++++++++++++++++++++++++++++++ 3 files changed, 288 insertions(+), 1 deletion(-) create mode 100644 arch/powerpc/kernel/power6-pmu.c diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile index 45798f6fb13..0ebf4d04d4b 100644 --- a/arch/powerpc/kernel/Makefile +++ b/arch/powerpc/kernel/Makefile @@ -94,7 +94,7 @@ obj-$(CONFIG_AUDIT) += audit.o obj64-$(CONFIG_AUDIT) += compat_audit.o obj-$(CONFIG_DYNAMIC_FTRACE) += ftrace.o -obj-$(CONFIG_PERF_COUNTERS) += perf_counter.o ppc970-pmu.o +obj-$(CONFIG_PERF_COUNTERS) += perf_counter.o ppc970-pmu.o power6-pmu.o obj-$(CONFIG_8XX_MINIMAL_FPEMU) += softemu8xx.o diff --git a/arch/powerpc/kernel/perf_counter.c b/arch/powerpc/kernel/perf_counter.c index 5561ecb02a4..df3fe057dee 100644 --- a/arch/powerpc/kernel/perf_counter.c +++ b/arch/powerpc/kernel/perf_counter.c @@ -742,6 +742,7 @@ static void perf_counter_interrupt(struct pt_regs *regs) } extern struct power_pmu ppc970_pmu; +extern struct power_pmu power6_pmu; static int init_perf_counters(void) { @@ -760,6 +761,9 @@ static int init_perf_counters(void) case PV_970MP: ppmu = &ppc970_pmu; break; + case 0x3e: + ppmu = &power6_pmu; + break; } return 0; } diff --git a/arch/powerpc/kernel/power6-pmu.c b/arch/powerpc/kernel/power6-pmu.c new file mode 100644 index 00000000000..b1f61f3c97b --- /dev/null +++ b/arch/powerpc/kernel/power6-pmu.c @@ -0,0 +1,283 @@ +/* + * Performance counter support for POWER6 processors. + * + * Copyright 2008-2009 Paul Mackerras, IBM Corporation. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ +#include +#include +#include + +/* + * Bits in event code for POWER6 + */ +#define PM_PMC_SH 20 /* PMC number (1-based) for direct events */ +#define PM_PMC_MSK 0x7 +#define PM_PMC_MSKS (PM_PMC_MSK << PM_PMC_SH) +#define PM_UNIT_SH 16 /* Unit event comes (TTMxSEL encoding) */ +#define PM_UNIT_MSK 0xf +#define PM_UNIT_MSKS (PM_UNIT_MSK << PM_UNIT_SH) +#define PM_LLAV 0x8000 /* Load lookahead match value */ +#define PM_LLA 0x4000 /* Load lookahead match enable */ +#define PM_BYTE_SH 12 /* Byte of event bus to use */ +#define PM_BYTE_MSK 3 +#define PM_SUBUNIT_SH 8 /* Subunit event comes from (NEST_SEL enc.) */ +#define PM_SUBUNIT_MSK 7 +#define PM_SUBUNIT_MSKS (PM_SUBUNIT_MSK << PM_SUBUNIT_SH) +#define PM_PMCSEL_MSK 0xff /* PMCxSEL value */ +#define PM_BUSEVENT_MSK 0xf3700 + +/* + * Bits in MMCR1 for POWER6 + */ +#define MMCR1_TTM0SEL_SH 60 +#define MMCR1_TTMSEL_SH(n) (MMCR1_TTM0SEL_SH - (n) * 4) +#define MMCR1_TTMSEL_MSK 0xf +#define MMCR1_TTMSEL(m, n) (((m) >> MMCR1_TTMSEL_SH(n)) & MMCR1_TTMSEL_MSK) +#define MMCR1_NESTSEL_SH 45 +#define MMCR1_NESTSEL_MSK 0x7 +#define MMCR1_NESTSEL(m) (((m) >> MMCR1_NESTSEL_SH) & MMCR1_NESTSEL_MSK) +#define MMCR1_PMC1_LLA ((u64)1 << 44) +#define MMCR1_PMC1_LLA_VALUE ((u64)1 << 39) +#define MMCR1_PMC1_ADDR_SEL ((u64)1 << 35) +#define MMCR1_PMC1SEL_SH 24 +#define MMCR1_PMCSEL_SH(n) (MMCR1_PMC1SEL_SH - (n) * 8) +#define MMCR1_PMCSEL_MSK 0xff + +/* + * Assign PMC numbers and compute MMCR1 value for a set of events + */ +static int p6_compute_mmcr(unsigned int event[], int n_ev, + unsigned int hwc[], u64 mmcr[]) +{ + u64 mmcr1 = 0; + int i; + unsigned int pmc, ev, b, u, s, psel; + unsigned int ttmset = 0; + unsigned int pmc_inuse = 0; + + if (n_ev > 4) + return -1; + for (i = 0; i < n_ev; ++i) { + pmc = (event[i] >> PM_PMC_SH) & PM_PMC_MSK; + if (pmc) { + if (pmc_inuse & (1 << (pmc - 1))) + return -1; /* collision! */ + pmc_inuse |= 1 << (pmc - 1); + } + } + for (i = 0; i < n_ev; ++i) { + ev = event[i]; + pmc = (ev >> PM_PMC_SH) & PM_PMC_MSK; + if (pmc) { + --pmc; + } else { + /* can go on any PMC; find a free one */ + for (pmc = 0; pmc < 4; ++pmc) + if (!(pmc_inuse & (1 << pmc))) + break; + pmc_inuse |= 1 << pmc; + } + hwc[i] = pmc; + psel = ev & PM_PMCSEL_MSK; + if (ev & PM_BUSEVENT_MSK) { + /* this event uses the event bus */ + b = (ev >> PM_BYTE_SH) & PM_BYTE_MSK; + u = (ev >> PM_UNIT_SH) & PM_UNIT_MSK; + /* check for conflict on this byte of event bus */ + if ((ttmset & (1 << b)) && MMCR1_TTMSEL(mmcr1, b) != u) + return -1; + mmcr1 |= (u64)u << MMCR1_TTMSEL_SH(b); + ttmset |= 1 << b; + if (u == 5) { + /* Nest events have a further mux */ + s = (ev >> PM_SUBUNIT_SH) & PM_SUBUNIT_MSK; + if ((ttmset & 0x10) && + MMCR1_NESTSEL(mmcr1) != s) + return -1; + ttmset |= 0x10; + mmcr1 |= (u64)s << MMCR1_NESTSEL_SH; + } + if (0x30 <= psel && psel <= 0x3d) { + /* these need the PMCx_ADDR_SEL bits */ + if (b >= 2) + mmcr1 |= MMCR1_PMC1_ADDR_SEL >> pmc; + } + /* bus select values are different for PMC3/4 */ + if (pmc >= 2 && (psel & 0x90) == 0x80) + psel ^= 0x20; + } + if (ev & PM_LLA) { + mmcr1 |= MMCR1_PMC1_LLA >> pmc; + if (ev & PM_LLAV) + mmcr1 |= MMCR1_PMC1_LLA_VALUE >> pmc; + } + mmcr1 |= (u64)psel << MMCR1_PMCSEL_SH(pmc); + } + mmcr[0] = 0; + if (pmc_inuse & 1) + mmcr[0] = MMCR0_PMC1CE; + if (pmc_inuse & 0xe) + mmcr[0] |= MMCR0_PMCjCE; + mmcr[1] = mmcr1; + mmcr[2] = 0; + return 0; +} + +/* + * Layout of constraint bits: + * + * 0-1 add field: number of uses of PMC1 (max 1) + * 2-3, 4-5, 6-7: ditto for PMC2, 3, 4 + * 8-10 select field: nest (subunit) event selector + * 16-19 select field: unit on byte 0 of event bus + * 20-23, 24-27, 28-31 ditto for bytes 1, 2, 3 + */ +static int p6_get_constraint(unsigned int event, u64 *maskp, u64 *valp) +{ + int pmc, byte, sh; + unsigned int mask = 0, value = 0; + + pmc = (event >> PM_PMC_SH) & PM_PMC_MSK; + if (pmc) { + if (pmc > 4) + return -1; + sh = (pmc - 1) * 2; + mask |= 2 << sh; + value |= 1 << sh; + } + if (event & PM_BUSEVENT_MSK) { + byte = (event >> PM_BYTE_SH) & PM_BYTE_MSK; + sh = byte * 4; + mask |= PM_UNIT_MSKS << sh; + value |= (event & PM_UNIT_MSKS) << sh; + if ((event & PM_UNIT_MSKS) == (5 << PM_UNIT_SH)) { + mask |= PM_SUBUNIT_MSKS; + value |= event & PM_SUBUNIT_MSKS; + } + } + *maskp = mask; + *valp = value; + return 0; +} + +#define MAX_ALT 4 /* at most 4 alternatives for any event */ + +static const unsigned int event_alternatives[][MAX_ALT] = { + { 0x0130e8, 0x2000f6, 0x3000fc }, /* PM_PTEG_RELOAD_VALID */ + { 0x080080, 0x10000d, 0x30000c, 0x4000f0 }, /* PM_LD_MISS_L1 */ + { 0x080088, 0x200054, 0x3000f0 }, /* PM_ST_MISS_L1 */ + { 0x10000a, 0x2000f4 }, /* PM_RUN_CYC */ + { 0x10000b, 0x2000f5 }, /* PM_RUN_COUNT */ + { 0x10000e, 0x400010 }, /* PM_PURR */ + { 0x100010, 0x4000f8 }, /* PM_FLUSH */ + { 0x10001a, 0x200010 }, /* PM_MRK_INST_DISP */ + { 0x100026, 0x3000f8 }, /* PM_TB_BIT_TRANS */ + { 0x100054, 0x2000f0 }, /* PM_ST_FIN */ + { 0x100056, 0x2000fc }, /* PM_L1_ICACHE_MISS */ + { 0x1000f0, 0x40000a }, /* PM_INST_IMC_MATCH_CMPL */ + { 0x1000f8, 0x200008 }, /* PM_GCT_EMPTY_CYC */ + { 0x1000fc, 0x400006 }, /* PM_LSU_DERAT_MISS_CYC */ + { 0x20000e, 0x400007 }, /* PM_LSU_DERAT_MISS */ + { 0x200012, 0x300012 }, /* PM_INST_DISP */ + { 0x2000f2, 0x3000f2 }, /* PM_INST_DISP */ + { 0x2000f8, 0x300010 }, /* PM_EXT_INT */ + { 0x2000fe, 0x300056 }, /* PM_DATA_FROM_L2MISS */ + { 0x2d0030, 0x30001a }, /* PM_MRK_FPU_FIN */ + { 0x30000a, 0x400018 }, /* PM_MRK_INST_FIN */ + { 0x3000f6, 0x40000e }, /* PM_L1_DCACHE_RELOAD_VALID */ + { 0x3000fe, 0x400056 }, /* PM_DATA_FROM_L3MISS */ +}; + +/* + * This could be made more efficient with a binary search on + * a presorted list, if necessary + */ +static int find_alternatives_list(unsigned int event) +{ + int i, j; + unsigned int alt; + + for (i = 0; i < ARRAY_SIZE(event_alternatives); ++i) { + if (event < event_alternatives[i][0]) + return -1; + for (j = 0; j < MAX_ALT; ++j) { + alt = event_alternatives[i][j]; + if (!alt || event < alt) + break; + if (event == alt) + return i; + } + } + return -1; +} + +static int p6_get_alternatives(unsigned int event, unsigned int alt[]) +{ + int i, j; + unsigned int aevent, psel, pmc; + unsigned int nalt = 1; + + alt[0] = event; + + /* check the alternatives table */ + i = find_alternatives_list(event); + if (i >= 0) { + /* copy out alternatives from list */ + for (j = 0; j < MAX_ALT; ++j) { + aevent = event_alternatives[i][j]; + if (!aevent) + break; + if (aevent != event) + alt[nalt++] = aevent; + } + + } else { + /* Check for alternative ways of computing sum events */ + /* PMCSEL 0x32 counter N == PMCSEL 0x34 counter 5-N */ + psel = event & (PM_PMCSEL_MSK & ~1); /* ignore edge bit */ + pmc = (event >> PM_PMC_SH) & PM_PMC_MSK; + if (pmc && (psel == 0x32 || psel == 0x34)) + alt[nalt++] = ((event ^ 0x6) & ~PM_PMC_MSKS) | + ((5 - pmc) << PM_PMC_SH); + + /* PMCSEL 0x38 counter N == PMCSEL 0x3a counter N+/-2 */ + if (pmc && (psel == 0x38 || psel == 0x3a)) + alt[nalt++] = ((event ^ 0x2) & ~PM_PMC_MSKS) | + ((pmc > 2? pmc - 2: pmc + 2) << PM_PMC_SH); + } + + return nalt; +} + +static void p6_disable_pmc(unsigned int pmc, u64 mmcr[]) +{ + /* Set PMCxSEL to 0 to disable PMCx */ + mmcr[1] &= ~(0xffUL << MMCR1_PMCSEL_SH(pmc)); +} + +static int power6_generic_events[] = { + [PERF_COUNT_CPU_CYCLES] = 0x1e, + [PERF_COUNT_INSTRUCTIONS] = 2, + [PERF_COUNT_CACHE_REFERENCES] = 0x280030, /* LD_REF_L1 */ + [PERF_COUNT_CACHE_MISSES] = 0x30000c, /* LD_MISS_L1 */ + [PERF_COUNT_BRANCH_INSTRUCTIONS] = 0x410a0, /* BR_PRED */ + [PERF_COUNT_BRANCH_MISSES] = 0x400052, /* BR_MPRED */ +}; + +struct power_pmu power6_pmu = { + .n_counter = 4, + .max_alternatives = MAX_ALT, + .add_fields = 0x55, + .test_adder = 0, + .compute_mmcr = p6_compute_mmcr, + .get_constraint = p6_get_constraint, + .get_alternatives = p6_get_alternatives, + .disable_pmc = p6_disable_pmc, + .n_generic = ARRAY_SIZE(power6_generic_events), + .generic_events = power6_generic_events, +}; -- cgit v1.2.3 From dd0e6ba22ea21bcc2c420b385a170593c58f4c08 Mon Sep 17 00:00:00 2001 From: Paul Mackerras Date: Mon, 12 Jan 2009 15:11:00 +1100 Subject: perf_counter: Always schedule all software counters in Software counters aren't subject to the limitations imposed by the fixed number of hardware counter registers, so there is no reason not to enable them all in __perf_counter_sched_in. Previously we used to break out of the loop when we got to a group that wouldn't fit on the PMU; with this we continue through the list but only schedule in software counters (or groups containing only software counters) from there on. Signed-off-by: Paul Mackerras --- kernel/perf_counter.c | 33 ++++++++++++++++++++++++++++++--- 1 file changed, 30 insertions(+), 3 deletions(-) diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c index 4c0dccb756a..3aef3062ff7 100644 --- a/kernel/perf_counter.c +++ b/kernel/perf_counter.c @@ -455,12 +455,37 @@ group_error: return -EAGAIN; } +/* + * Return 1 for a software counter, 0 for a hardware counter + */ +static inline int is_software_counter(struct perf_counter *counter) +{ + return !counter->hw_event.raw && counter->hw_event.type < 0; +} + +/* + * Return 1 for a group consisting entirely of software counters, + * 0 if the group contains any hardware counters. + */ +static int is_software_only_group(struct perf_counter *leader) +{ + struct perf_counter *counter; + + if (!is_software_counter(leader)) + return 0; + list_for_each_entry(counter, &leader->sibling_list, list_entry) + if (!is_software_counter(counter)) + return 0; + return 1; +} + static void __perf_counter_sched_in(struct perf_counter_context *ctx, struct perf_cpu_context *cpuctx, int cpu) { struct perf_counter *counter; u64 flags; + int can_add_hw = 1; if (likely(!ctx->nr_counters)) return; @@ -477,10 +502,12 @@ __perf_counter_sched_in(struct perf_counter_context *ctx, /* * If we scheduled in a group atomically and exclusively, - * or if this group can't go on, break out: + * or if this group can't go on, don't add any more + * hardware counters. */ - if (group_sched_in(counter, cpuctx, ctx, cpu)) - break; + if (can_add_hw || is_software_only_group(counter)) + if (group_sched_in(counter, cpuctx, ctx, cpu)) + can_add_hw = 0; } hw_perf_restore(flags); spin_unlock(&ctx->lock); -- cgit v1.2.3 From 01d0287f068de2934109ba9b989d8807526cccc2 Mon Sep 17 00:00:00 2001 From: Paul Mackerras Date: Wed, 14 Jan 2009 13:44:19 +1100 Subject: powerpc/perf_counter: Make sure PMU gets enabled properly This makes sure that we call the platform-specific ppc_md.enable_pmcs function on each CPU before we try to use the PMU on that CPU. If the CPU goes off-line and then on-line, we need to do the enable_pmcs call again, so we use the hw_perf_counter_setup hook to ensure that. It gets called as each CPU comes online, but it isn't called on the CPU that is coming up, so this adds the CPU number as an argument to it (there were no non-empty instances of hw_perf_counter_setup before). This also arranges to set the pmcregs_in_use field of the lppaca (data structure shared with the hypervisor) on each CPU when we are using the PMU and clear it when we are not. This allows the hypervisor to optimize partition switches by not saving/restoring the PMU registers when we aren't using the PMU. Signed-off-by: Paul Mackerras --- arch/powerpc/kernel/perf_counter.c | 22 ++++++++++++++++++++++ kernel/perf_counter.c | 4 ++-- 2 files changed, 24 insertions(+), 2 deletions(-) diff --git a/arch/powerpc/kernel/perf_counter.c b/arch/powerpc/kernel/perf_counter.c index df3fe057dee..85ad25923c2 100644 --- a/arch/powerpc/kernel/perf_counter.c +++ b/arch/powerpc/kernel/perf_counter.c @@ -15,6 +15,7 @@ #include #include #include +#include struct cpu_hw_counters { int n_counters; @@ -24,6 +25,7 @@ struct cpu_hw_counters { struct perf_counter *counter[MAX_HWCOUNTERS]; unsigned int events[MAX_HWCOUNTERS]; u64 mmcr[3]; + u8 pmcs_enabled; }; DEFINE_PER_CPU(struct cpu_hw_counters, cpu_hw_counters); @@ -261,6 +263,15 @@ u64 hw_perf_save_disable(void) cpuhw->disabled = 1; cpuhw->n_added = 0; + /* + * Check if we ever enabled the PMU on this cpu. + */ + if (!cpuhw->pmcs_enabled) { + if (ppc_md.enable_pmcs) + ppc_md.enable_pmcs(); + cpuhw->pmcs_enabled = 1; + } + /* * Set the 'freeze counters' bit. * The barrier is to make sure the mtspr has been @@ -305,6 +316,8 @@ void hw_perf_restore(u64 disable) mtspr(SPRN_MMCRA, cpuhw->mmcr[2]); mtspr(SPRN_MMCR1, cpuhw->mmcr[1]); mtspr(SPRN_MMCR0, cpuhw->mmcr[0]); + if (cpuhw->n_counters == 0) + get_lppaca()->pmcregs_in_use = 0; goto out; } @@ -323,6 +336,7 @@ void hw_perf_restore(u64 disable) * bit set and set the hardware counters to their initial values. * Then unfreeze the counters. */ + get_lppaca()->pmcregs_in_use = 1; mtspr(SPRN_MMCRA, cpuhw->mmcr[2]); mtspr(SPRN_MMCR1, cpuhw->mmcr[1]); mtspr(SPRN_MMCR0, (cpuhw->mmcr[0] & ~(MMCR0_PMC1CE | MMCR0_PMCjCE)) @@ -741,6 +755,14 @@ static void perf_counter_interrupt(struct pt_regs *regs) } } +void hw_perf_counter_setup(int cpu) +{ + struct cpu_hw_counters *cpuhw = &per_cpu(cpu_hw_counters, cpu); + + memset(cpuhw, 0, sizeof(*cpuhw)); + cpuhw->mmcr[0] = MMCR0_FC; +} + extern struct power_pmu ppc970_pmu; extern struct power_pmu power6_pmu; diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c index 3aef3062ff7..52f2f526248 100644 --- a/kernel/perf_counter.c +++ b/kernel/perf_counter.c @@ -46,7 +46,7 @@ hw_perf_counter_init(struct perf_counter *counter) u64 __weak hw_perf_save_disable(void) { return 0; } void __weak hw_perf_restore(u64 ctrl) { barrier(); } -void __weak hw_perf_counter_setup(void) { barrier(); } +void __weak hw_perf_counter_setup(int cpu) { barrier(); } int __weak hw_perf_group_sched_in(struct perf_counter *group_leader, struct perf_cpu_context *cpuctx, struct perf_counter_context *ctx, int cpu) @@ -1598,7 +1598,7 @@ static void __cpuinit perf_counter_init_cpu(int cpu) cpuctx->max_pertask = perf_max_counters - perf_reserved_percpu; mutex_unlock(&perf_resource_mutex); - hw_perf_counter_setup(); + hw_perf_counter_setup(cpu); } #ifdef CONFIG_HOTPLUG_CPU -- cgit v1.2.3 From 3b6f9e5cb21964b7ce12bf81076f830885563ec8 Mon Sep 17 00:00:00 2001 From: Paul Mackerras Date: Wed, 14 Jan 2009 21:00:30 +1100 Subject: perf_counter: Add support for pinned and exclusive counter groups Impact: New perf_counter features A pinned counter group is one that the user wants to have on the CPU whenever possible, i.e. whenever the associated task is running, for a per-task group, or always for a per-cpu group. If the system cannot satisfy that, it puts the group into an error state where it is not scheduled any more and reads from it return EOF (i.e. 0 bytes read). The group can be released from error state and made readable again using prctl(PR_TASK_PERF_COUNTERS_ENABLE). When we have finer-grained enable/disable controls on counters we'll be able to reset the error state on individual groups. An exclusive group is one that the user wants to be the only group using the CPU performance monitor hardware whenever it is on. The counter group scheduler will not schedule an exclusive group if there are already other groups on the CPU and will not schedule other groups onto the CPU if there is an exclusive group scheduled (that statement does not apply to groups containing only software counters, which can always go on and which do not prevent an exclusive group from going on). With an exclusive group, we will be able to let users program PMU registers at a low level without the concern that those settings will perturb other measurements. Along the way this reorganizes things a little: - is_software_counter() is moved to perf_counter.h. - cpuctx->active_oncpu now records the number of hardware counters on the CPU, i.e. it now excludes software counters. Nothing was reading cpuctx->active_oncpu before, so this change is harmless. - A new cpuctx->exclusive field records whether we currently have an exclusive group on the CPU. - counter_sched_out moves higher up in perf_counter.c and gets called from __perf_counter_remove_from_context and __perf_counter_exit_task, where we used to have essentially the same code. - __perf_counter_sched_in now goes through the counter list twice, doing the pinned counters in the first loop and the non-pinned counters in the second loop, in order to give the pinned counters the best chance to be scheduled in. Note that only a group leader can be exclusive or pinned, and that attribute applies to the whole group. This avoids some awkwardness in some corner cases (e.g. where a group leader is closed and the other group members get added to the context list). If we want to relax that restriction later, we can, and it is easier to relax a restriction than to apply a new one. This doesn't yet handle the case where a pinned counter is inherited and goes into error state in the child - the error state is not propagated up to the parent when the child exits, and arguably it should. Signed-off-by: Paul Mackerras --- arch/powerpc/kernel/perf_counter.c | 10 +- include/linux/perf_counter.h | 15 ++- kernel/perf_counter.c | 226 +++++++++++++++++++++++++------------ 3 files changed, 169 insertions(+), 82 deletions(-) diff --git a/arch/powerpc/kernel/perf_counter.c b/arch/powerpc/kernel/perf_counter.c index 85ad25923c2..5b0211348c7 100644 --- a/arch/powerpc/kernel/perf_counter.c +++ b/arch/powerpc/kernel/perf_counter.c @@ -35,14 +35,6 @@ void perf_counter_print_debug(void) { } -/* - * Return 1 for a software counter, 0 for a hardware counter - */ -static inline int is_software_counter(struct perf_counter *counter) -{ - return !counter->hw_event.raw && counter->hw_event.type < 0; -} - /* * Read one performance monitor counter (PMC). */ @@ -443,6 +435,7 @@ int hw_perf_group_sched_in(struct perf_counter *group_leader, */ for (i = n0; i < n0 + n; ++i) cpuhw->counter[i]->hw.config = cpuhw->events[i]; + cpuctx->active_oncpu += n; n = 1; counter_sched_in(group_leader, cpu); list_for_each_entry(sub, &group_leader->sibling_list, list_entry) { @@ -451,7 +444,6 @@ int hw_perf_group_sched_in(struct perf_counter *group_leader, ++n; } } - cpuctx->active_oncpu += n; ctx->nr_active += n; return 1; diff --git a/include/linux/perf_counter.h b/include/linux/perf_counter.h index b21d1ea4c05..7ab8e5f96f5 100644 --- a/include/linux/perf_counter.h +++ b/include/linux/perf_counter.h @@ -86,7 +86,10 @@ struct perf_counter_hw_event { nmi : 1, /* NMI sampling */ raw : 1, /* raw event type */ inherit : 1, /* children inherit it */ - __reserved_1 : 28; + pinned : 1, /* must always be on PMU */ + exclusive : 1, /* only counter on PMU */ + + __reserved_1 : 26; u64 __reserved_2; }; @@ -141,6 +144,7 @@ struct hw_perf_counter_ops { * enum perf_counter_active_state - the states of a counter */ enum perf_counter_active_state { + PERF_COUNTER_STATE_ERROR = -2, PERF_COUNTER_STATE_OFF = -1, PERF_COUNTER_STATE_INACTIVE = 0, PERF_COUNTER_STATE_ACTIVE = 1, @@ -214,6 +218,7 @@ struct perf_cpu_context { struct perf_counter_context *task_ctx; int active_oncpu; int max_pertask; + int exclusive; }; /* @@ -240,6 +245,14 @@ extern int hw_perf_group_sched_in(struct perf_counter *group_leader, struct perf_cpu_context *cpuctx, struct perf_counter_context *ctx, int cpu); +/* + * Return 1 for a software counter, 0 for a hardware counter + */ +static inline int is_software_counter(struct perf_counter *counter) +{ + return !counter->hw_event.raw && counter->hw_event.type < 0; +} + #else static inline void perf_counter_task_sched_in(struct task_struct *task, int cpu) { } diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c index 52f2f526248..faf671b2956 100644 --- a/kernel/perf_counter.c +++ b/kernel/perf_counter.c @@ -93,6 +93,25 @@ list_del_counter(struct perf_counter *counter, struct perf_counter_context *ctx) } } +static void +counter_sched_out(struct perf_counter *counter, + struct perf_cpu_context *cpuctx, + struct perf_counter_context *ctx) +{ + if (counter->state != PERF_COUNTER_STATE_ACTIVE) + return; + + counter->state = PERF_COUNTER_STATE_INACTIVE; + counter->hw_ops->disable(counter); + counter->oncpu = -1; + + if (!is_software_counter(counter)) + cpuctx->active_oncpu--; + ctx->nr_active--; + if (counter->hw_event.exclusive || !cpuctx->active_oncpu) + cpuctx->exclusive = 0; +} + /* * Cross CPU call to remove a performance counter * @@ -118,14 +137,9 @@ static void __perf_counter_remove_from_context(void *info) curr_rq_lock_irq_save(&flags); spin_lock(&ctx->lock); - if (counter->state == PERF_COUNTER_STATE_ACTIVE) { - counter->state = PERF_COUNTER_STATE_INACTIVE; - counter->hw_ops->disable(counter); - ctx->nr_active--; - cpuctx->active_oncpu--; - counter->task = NULL; - counter->oncpu = -1; - } + counter_sched_out(counter, cpuctx, ctx); + + counter->task = NULL; ctx->nr_counters--; /* @@ -207,7 +221,7 @@ counter_sched_in(struct perf_counter *counter, struct perf_counter_context *ctx, int cpu) { - if (counter->state == PERF_COUNTER_STATE_OFF) + if (counter->state <= PERF_COUNTER_STATE_OFF) return 0; counter->state = PERF_COUNTER_STATE_ACTIVE; @@ -223,12 +237,63 @@ counter_sched_in(struct perf_counter *counter, return -EAGAIN; } - cpuctx->active_oncpu++; + if (!is_software_counter(counter)) + cpuctx->active_oncpu++; ctx->nr_active++; + if (counter->hw_event.exclusive) + cpuctx->exclusive = 1; + return 0; } +/* + * Return 1 for a group consisting entirely of software counters, + * 0 if the group contains any hardware counters. + */ +static int is_software_only_group(struct perf_counter *leader) +{ + struct perf_counter *counter; + + if (!is_software_counter(leader)) + return 0; + list_for_each_entry(counter, &leader->sibling_list, list_entry) + if (!is_software_counter(counter)) + return 0; + return 1; +} + +/* + * Work out whether we can put this counter group on the CPU now. + */ +static int group_can_go_on(struct perf_counter *counter, + struct perf_cpu_context *cpuctx, + int can_add_hw) +{ + /* + * Groups consisting entirely of software counters can always go on. + */ + if (is_software_only_group(counter)) + return 1; + /* + * If an exclusive group is already on, no other hardware + * counters can go on. + */ + if (cpuctx->exclusive) + return 0; + /* + * If this group is exclusive and there are already + * counters on the CPU, it can't go on. + */ + if (counter->hw_event.exclusive && cpuctx->active_oncpu) + return 0; + /* + * Otherwise, try to add it if all previous groups were able + * to go on. + */ + return can_add_hw; +} + /* * Cross CPU call to install and enable a performance counter */ @@ -240,6 +305,7 @@ static void __perf_install_in_context(void *info) int cpu = smp_processor_id(); unsigned long flags; u64 perf_flags; + int err; /* * If this is a task context, we need to check whether it is @@ -261,9 +327,21 @@ static void __perf_install_in_context(void *info) list_add_counter(counter, ctx); ctx->nr_counters++; - counter_sched_in(counter, cpuctx, ctx, cpu); + /* + * An exclusive counter can't go on if there are already active + * hardware counters, and no hardware counter can go on if there + * is already an exclusive counter on. + */ + if (counter->state == PERF_COUNTER_STATE_INACTIVE && + !group_can_go_on(counter, cpuctx, 1)) + err = -EEXIST; + else + err = counter_sched_in(counter, cpuctx, ctx, cpu); + + if (err && counter->hw_event.pinned) + counter->state = PERF_COUNTER_STATE_ERROR; - if (!ctx->task && cpuctx->max_pertask) + if (!err && !ctx->task && cpuctx->max_pertask) cpuctx->max_pertask--; hw_perf_restore(perf_flags); @@ -326,22 +404,6 @@ retry: spin_unlock_irq(&ctx->lock); } -static void -counter_sched_out(struct perf_counter *counter, - struct perf_cpu_context *cpuctx, - struct perf_counter_context *ctx) -{ - if (counter->state != PERF_COUNTER_STATE_ACTIVE) - return; - - counter->state = PERF_COUNTER_STATE_INACTIVE; - counter->hw_ops->disable(counter); - counter->oncpu = -1; - - cpuctx->active_oncpu--; - ctx->nr_active--; -} - static void group_sched_out(struct perf_counter *group_counter, struct perf_cpu_context *cpuctx, @@ -359,6 +421,9 @@ group_sched_out(struct perf_counter *group_counter, */ list_for_each_entry(counter, &group_counter->sibling_list, list_entry) counter_sched_out(counter, cpuctx, ctx); + + if (group_counter->hw_event.exclusive) + cpuctx->exclusive = 0; } void __perf_counter_sched_out(struct perf_counter_context *ctx, @@ -455,30 +520,6 @@ group_error: return -EAGAIN; } -/* - * Return 1 for a software counter, 0 for a hardware counter - */ -static inline int is_software_counter(struct perf_counter *counter) -{ - return !counter->hw_event.raw && counter->hw_event.type < 0; -} - -/* - * Return 1 for a group consisting entirely of software counters, - * 0 if the group contains any hardware counters. - */ -static int is_software_only_group(struct perf_counter *leader) -{ - struct perf_counter *counter; - - if (!is_software_counter(leader)) - return 0; - list_for_each_entry(counter, &leader->sibling_list, list_entry) - if (!is_software_counter(counter)) - return 0; - return 1; -} - static void __perf_counter_sched_in(struct perf_counter_context *ctx, struct perf_cpu_context *cpuctx, int cpu) @@ -492,7 +533,38 @@ __perf_counter_sched_in(struct perf_counter_context *ctx, spin_lock(&ctx->lock); flags = hw_perf_save_disable(); + + /* + * First go through the list and put on any pinned groups + * in order to give them the best chance of going on. + */ + list_for_each_entry(counter, &ctx->counter_list, list_entry) { + if (counter->state <= PERF_COUNTER_STATE_OFF || + !counter->hw_event.pinned) + continue; + if (counter->cpu != -1 && counter->cpu != cpu) + continue; + + if (group_can_go_on(counter, cpuctx, 1)) + group_sched_in(counter, cpuctx, ctx, cpu); + + /* + * If this pinned group hasn't been scheduled, + * put it in error state. + */ + if (counter->state == PERF_COUNTER_STATE_INACTIVE) + counter->state = PERF_COUNTER_STATE_ERROR; + } + list_for_each_entry(counter, &ctx->counter_list, list_entry) { + /* + * Ignore counters in OFF or ERROR state, and + * ignore pinned counters since we did them already. + */ + if (counter->state <= PERF_COUNTER_STATE_OFF || + counter->hw_event.pinned) + continue; + /* * Listen to the 'cpu' scheduling filter constraint * of counters: @@ -500,14 +572,10 @@ __perf_counter_sched_in(struct perf_counter_context *ctx, if (counter->cpu != -1 && counter->cpu != cpu) continue; - /* - * If we scheduled in a group atomically and exclusively, - * or if this group can't go on, don't add any more - * hardware counters. - */ - if (can_add_hw || is_software_only_group(counter)) + if (group_can_go_on(counter, cpuctx, can_add_hw)) { if (group_sched_in(counter, cpuctx, ctx, cpu)) can_add_hw = 0; + } } hw_perf_restore(flags); spin_unlock(&ctx->lock); @@ -567,8 +635,10 @@ int perf_counter_task_disable(void) */ perf_flags = hw_perf_save_disable(); - list_for_each_entry(counter, &ctx->counter_list, list_entry) - counter->state = PERF_COUNTER_STATE_OFF; + list_for_each_entry(counter, &ctx->counter_list, list_entry) { + if (counter->state != PERF_COUNTER_STATE_ERROR) + counter->state = PERF_COUNTER_STATE_OFF; + } hw_perf_restore(perf_flags); @@ -607,7 +677,7 @@ int perf_counter_task_enable(void) perf_flags = hw_perf_save_disable(); list_for_each_entry(counter, &ctx->counter_list, list_entry) { - if (counter->state != PERF_COUNTER_STATE_OFF) + if (counter->state > PERF_COUNTER_STATE_OFF) continue; counter->state = PERF_COUNTER_STATE_INACTIVE; counter->hw_event.disabled = 0; @@ -849,6 +919,14 @@ perf_read_hw(struct perf_counter *counter, char __user *buf, size_t count) if (count != sizeof(cntval)) return -EINVAL; + /* + * Return end-of-file for a read on a counter that is in + * error state (i.e. because it was pinned but it couldn't be + * scheduled on to the CPU at some point). + */ + if (counter->state == PERF_COUNTER_STATE_ERROR) + return 0; + mutex_lock(&counter->mutex); cntval = perf_counter_read(counter); mutex_unlock(&counter->mutex); @@ -884,7 +962,7 @@ perf_read_irq_data(struct perf_counter *counter, { struct perf_data *irqdata, *usrdata; DECLARE_WAITQUEUE(wait, current); - ssize_t res; + ssize_t res, res2; irqdata = counter->irqdata; usrdata = counter->usrdata; @@ -905,6 +983,9 @@ perf_read_irq_data(struct perf_counter *counter, if (signal_pending(current)) break; + if (counter->state == PERF_COUNTER_STATE_ERROR) + break; + spin_unlock_irq(&counter->waitq.lock); schedule(); spin_lock_irq(&counter->waitq.lock); @@ -913,7 +994,8 @@ perf_read_irq_data(struct perf_counter *counter, __set_current_state(TASK_RUNNING); spin_unlock_irq(&counter->waitq.lock); - if (usrdata->len + irqdata->len < count) + if (usrdata->len + irqdata->len < count && + counter->state != PERF_COUNTER_STATE_ERROR) return -ERESTARTSYS; read_pending: mutex_lock(&counter->mutex); @@ -925,11 +1007,12 @@ read_pending: /* Switch irq buffer: */ usrdata = perf_switch_irq_data(counter); - if (perf_copy_usrdata(usrdata, buf + res, count - res) < 0) { + res2 = perf_copy_usrdata(usrdata, buf + res, count - res); + if (res2 < 0) { if (!res) res = -EFAULT; } else { - res = count; + res += res2; } out: mutex_unlock(&counter->mutex); @@ -1348,6 +1431,11 @@ sys_perf_counter_open(struct perf_counter_hw_event *hw_event_uptr __user, */ if (group_leader->ctx != ctx) goto err_put_context; + /* + * Only a group leader can be exclusive or pinned + */ + if (hw_event.exclusive || hw_event.pinned) + goto err_put_context; } ret = -EINVAL; @@ -1473,13 +1561,7 @@ __perf_counter_exit_task(struct task_struct *child, cpuctx = &__get_cpu_var(perf_cpu_context); - if (child_counter->state == PERF_COUNTER_STATE_ACTIVE) { - child_counter->state = PERF_COUNTER_STATE_INACTIVE; - child_counter->hw_ops->disable(child_counter); - cpuctx->active_oncpu--; - child_ctx->nr_active--; - child_counter->oncpu = -1; - } + counter_sched_out(child_counter, cpuctx, child_ctx); list_del_init(&child_counter->list_entry); -- cgit v1.2.3 From d859e29fe34cb833071b20aef860ee94fbad9bb2 Mon Sep 17 00:00:00 2001 From: Paul Mackerras Date: Sat, 17 Jan 2009 18:10:22 +1100 Subject: perf_counter: Add counter enable/disable ioctls Impact: New perf_counter features This primarily adds a way for perf_counter users to enable and disable counters and groups. Enabling or disabling a counter or group also enables or disables all of the child counters that have been cloned from it to monitor children of the task monitored by the top-level counter. The userspace interface to enable/disable counters is via ioctl on the counter file descriptor. Along the way this extends the code that handles child counters to handle child counter groups properly. A group with multiple counters will be cloned to child tasks if and only if the group leader has the hw_event.inherit bit set - if it is set the whole group is cloned as a group in the child task. In order to be able to enable or disable all child counters of a given top-level counter, we need a way to find them all. Hence I have added a child_list field to struct perf_counter, which is the head of the list of children for a top-level counter, or the link in that list for a child counter. That list is protected by the perf_counter.mutex field. This also adds a mutex to the perf_counter_context struct. Previously the list of counters was protected just by the lock field in the context, which meant that perf_counter_init_task had to take that lock and then take whatever lock/mutex protects the top-level counter's child_list. But the counter enable/disable functions need to take that lock in order to traverse the list, then for each counter take the lock in that counter's context in order to change the counter's state safely, which will lead to a deadlock. To solve this, we now have both a mutex and a spinlock in the context, and taking either is sufficient to ensure the list of counters can't change - you have to take both before changing the list. Now perf_counter_init_task takes the mutex instead of the lock (which incidentally means that inherit_counter can use GFP_KERNEL instead of GFP_ATOMIC) and thus avoids the possible deadlock. Similarly the new enable/disable functions can take the mutex while traversing the list of child counters without incurring a possible deadlock when the counter manipulation code locks the context for a child counter. We also had an misfeature that the first counter added to a context would possibly not go on until the next sched-in, because we were using ctx->nr_active to detect if the context was running on a CPU. But nr_active is the number of active counters, and if that was zero (because the context didn't have any counters yet) it would look like the context wasn't running on a cpu and so the retry code in __perf_install_in_context wouldn't retry. So this adds an 'is_active' field that is set when the context is on a CPU, even if it has no counters. The is_active field is only used for task contexts, not for per-cpu contexts. If we enable a subsidiary counter in a group that is active on a CPU, and the arch code can't enable the counter, then we have to pull the whole group off the CPU. We do this with group_sched_out, which gets moved up in the file so it comes before all its callers. This also adds similar logic to __perf_install_in_context so that the "all on, or none" invariant of groups is preserved when adding a new counter to a group. Signed-off-by: Paul Mackerras --- include/linux/perf_counter.h | 21 +- kernel/perf_counter.c | 455 +++++++++++++++++++++++++++++++++++++------ 2 files changed, 415 insertions(+), 61 deletions(-) diff --git a/include/linux/perf_counter.h b/include/linux/perf_counter.h index 7ab8e5f96f5..33ba9fe0a78 100644 --- a/include/linux/perf_counter.h +++ b/include/linux/perf_counter.h @@ -14,6 +14,7 @@ #define _LINUX_PERF_COUNTER_H #include +#include #ifdef CONFIG_PERF_COUNTERS # include @@ -94,6 +95,12 @@ struct perf_counter_hw_event { u64 __reserved_2; }; +/* + * Ioctls that can be done on a perf counter fd: + */ +#define PERF_COUNTER_IOC_ENABLE _IO('$', 0) +#define PERF_COUNTER_IOC_DISABLE _IO('$', 1) + /* * Kernel-internal data types: */ @@ -173,8 +180,10 @@ struct perf_counter { struct file *filp; struct perf_counter *parent; + struct list_head child_list; + /* - * Protect attach/detach: + * Protect attach/detach and child_list: */ struct mutex mutex; @@ -199,13 +208,21 @@ struct perf_counter { struct perf_counter_context { #ifdef CONFIG_PERF_COUNTERS /* - * Protect the list of counters: + * Protect the states of the counters in the list, + * nr_active, and the list: */ spinlock_t lock; + /* + * Protect the list of counters. Locking either mutex or lock + * is sufficient to ensure the list doesn't change; to change + * the list you need to lock both the mutex and the spinlock. + */ + struct mutex mutex; struct list_head counter_list; int nr_counters; int nr_active; + int is_active; struct task_struct *task; #endif }; diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c index faf671b2956..1ac18daa424 100644 --- a/kernel/perf_counter.c +++ b/kernel/perf_counter.c @@ -112,6 +112,28 @@ counter_sched_out(struct perf_counter *counter, cpuctx->exclusive = 0; } +static void +group_sched_out(struct perf_counter *group_counter, + struct perf_cpu_context *cpuctx, + struct perf_counter_context *ctx) +{ + struct perf_counter *counter; + + if (group_counter->state != PERF_COUNTER_STATE_ACTIVE) + return; + + counter_sched_out(group_counter, cpuctx, ctx); + + /* + * Schedule out siblings (if any): + */ + list_for_each_entry(counter, &group_counter->sibling_list, list_entry) + counter_sched_out(counter, cpuctx, ctx); + + if (group_counter->hw_event.exclusive) + cpuctx->exclusive = 0; +} + /* * Cross CPU call to remove a performance counter * @@ -168,7 +190,7 @@ static void __perf_counter_remove_from_context(void *info) /* * Remove the counter from a task's (or a CPU's) list of counters. * - * Must be called with counter->mutex held. + * Must be called with counter->mutex and ctx->mutex held. * * CPU counters are removed with a smp call. For task counters we only * call when the task is on a CPU. @@ -215,6 +237,99 @@ retry: spin_unlock_irq(&ctx->lock); } +/* + * Cross CPU call to disable a performance counter + */ +static void __perf_counter_disable(void *info) +{ + struct perf_counter *counter = info; + struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context); + struct perf_counter_context *ctx = counter->ctx; + unsigned long flags; + + /* + * If this is a per-task counter, need to check whether this + * counter's task is the current task on this cpu. + */ + if (ctx->task && cpuctx->task_ctx != ctx) + return; + + curr_rq_lock_irq_save(&flags); + spin_lock(&ctx->lock); + + /* + * If the counter is on, turn it off. + * If it is in error state, leave it in error state. + */ + if (counter->state >= PERF_COUNTER_STATE_INACTIVE) { + if (counter == counter->group_leader) + group_sched_out(counter, cpuctx, ctx); + else + counter_sched_out(counter, cpuctx, ctx); + counter->state = PERF_COUNTER_STATE_OFF; + } + + spin_unlock(&ctx->lock); + curr_rq_unlock_irq_restore(&flags); +} + +/* + * Disable a counter. + */ +static void perf_counter_disable(struct perf_counter *counter) +{ + struct perf_counter_context *ctx = counter->ctx; + struct task_struct *task = ctx->task; + + if (!task) { + /* + * Disable the counter on the cpu that it's on + */ + smp_call_function_single(counter->cpu, __perf_counter_disable, + counter, 1); + return; + } + + retry: + task_oncpu_function_call(task, __perf_counter_disable, counter); + + spin_lock_irq(&ctx->lock); + /* + * If the counter is still active, we need to retry the cross-call. + */ + if (counter->state == PERF_COUNTER_STATE_ACTIVE) { + spin_unlock_irq(&ctx->lock); + goto retry; + } + + /* + * Since we have the lock this context can't be scheduled + * in, so we can change the state safely. + */ + if (counter->state == PERF_COUNTER_STATE_INACTIVE) + counter->state = PERF_COUNTER_STATE_OFF; + + spin_unlock_irq(&ctx->lock); +} + +/* + * Disable a counter and all its children. + */ +static void perf_counter_disable_family(struct perf_counter *counter) +{ + struct perf_counter *child; + + perf_counter_disable(counter); + + /* + * Lock the mutex to protect the list of children + */ + mutex_lock(&counter->mutex); + list_for_each_entry(child, &counter->child_list, child_list) + perf_counter_disable(child); + mutex_unlock(&counter->mutex); +} + static int counter_sched_in(struct perf_counter *counter, struct perf_cpu_context *cpuctx, @@ -302,6 +417,7 @@ static void __perf_install_in_context(void *info) struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context); struct perf_counter *counter = info; struct perf_counter_context *ctx = counter->ctx; + struct perf_counter *leader = counter->group_leader; int cpu = smp_processor_id(); unsigned long flags; u64 perf_flags; @@ -327,23 +443,40 @@ static void __perf_install_in_context(void *info) list_add_counter(counter, ctx); ctx->nr_counters++; + /* + * Don't put the counter on if it is disabled or if + * it is in a group and the group isn't on. + */ + if (counter->state != PERF_COUNTER_STATE_INACTIVE || + (leader != counter && leader->state != PERF_COUNTER_STATE_ACTIVE)) + goto unlock; + /* * An exclusive counter can't go on if there are already active * hardware counters, and no hardware counter can go on if there * is already an exclusive counter on. */ - if (counter->state == PERF_COUNTER_STATE_INACTIVE && - !group_can_go_on(counter, cpuctx, 1)) + if (!group_can_go_on(counter, cpuctx, 1)) err = -EEXIST; else err = counter_sched_in(counter, cpuctx, ctx, cpu); - if (err && counter->hw_event.pinned) - counter->state = PERF_COUNTER_STATE_ERROR; + if (err) { + /* + * This counter couldn't go on. If it is in a group + * then we have to pull the whole group off. + * If the counter group is pinned then put it in error state. + */ + if (leader != counter) + group_sched_out(leader, cpuctx, ctx); + if (leader->hw_event.pinned) + leader->state = PERF_COUNTER_STATE_ERROR; + } if (!err && !ctx->task && cpuctx->max_pertask) cpuctx->max_pertask--; + unlock: hw_perf_restore(perf_flags); spin_unlock(&ctx->lock); @@ -359,6 +492,8 @@ static void __perf_install_in_context(void *info) * If the counter is attached to a task which is on a CPU we use a smp * call to enable it in the task context. The task might have been * scheduled away, but we check this in the smp call again. + * + * Must be called with ctx->mutex held. */ static void perf_install_in_context(struct perf_counter_context *ctx, @@ -387,7 +522,7 @@ retry: /* * we need to retry the smp call. */ - if (ctx->nr_active && list_empty(&counter->list_entry)) { + if (ctx->is_active && list_empty(&counter->list_entry)) { spin_unlock_irq(&ctx->lock); goto retry; } @@ -404,26 +539,131 @@ retry: spin_unlock_irq(&ctx->lock); } -static void -group_sched_out(struct perf_counter *group_counter, - struct perf_cpu_context *cpuctx, - struct perf_counter_context *ctx) +/* + * Cross CPU call to enable a performance counter + */ +static void __perf_counter_enable(void *info) { - struct perf_counter *counter; + struct perf_counter *counter = info; + struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context); + struct perf_counter_context *ctx = counter->ctx; + struct perf_counter *leader = counter->group_leader; + unsigned long flags; + int err; - if (group_counter->state != PERF_COUNTER_STATE_ACTIVE) + /* + * If this is a per-task counter, need to check whether this + * counter's task is the current task on this cpu. + */ + if (ctx->task && cpuctx->task_ctx != ctx) return; - counter_sched_out(group_counter, cpuctx, ctx); + curr_rq_lock_irq_save(&flags); + spin_lock(&ctx->lock); + + if (counter->state >= PERF_COUNTER_STATE_INACTIVE) + goto unlock; + counter->state = PERF_COUNTER_STATE_INACTIVE; /* - * Schedule out siblings (if any): + * If the counter is in a group and isn't the group leader, + * then don't put it on unless the group is on. */ - list_for_each_entry(counter, &group_counter->sibling_list, list_entry) - counter_sched_out(counter, cpuctx, ctx); + if (leader != counter && leader->state != PERF_COUNTER_STATE_ACTIVE) + goto unlock; - if (group_counter->hw_event.exclusive) - cpuctx->exclusive = 0; + if (!group_can_go_on(counter, cpuctx, 1)) + err = -EEXIST; + else + err = counter_sched_in(counter, cpuctx, ctx, + smp_processor_id()); + + if (err) { + /* + * If this counter can't go on and it's part of a + * group, then the whole group has to come off. + */ + if (leader != counter) + group_sched_out(leader, cpuctx, ctx); + if (leader->hw_event.pinned) + leader->state = PERF_COUNTER_STATE_ERROR; + } + + unlock: + spin_unlock(&ctx->lock); + curr_rq_unlock_irq_restore(&flags); +} + +/* + * Enable a counter. + */ +static void perf_counter_enable(struct perf_counter *counter) +{ + struct perf_counter_context *ctx = counter->ctx; + struct task_struct *task = ctx->task; + + if (!task) { + /* + * Enable the counter on the cpu that it's on + */ + smp_call_function_single(counter->cpu, __perf_counter_enable, + counter, 1); + return; + } + + spin_lock_irq(&ctx->lock); + if (counter->state >= PERF_COUNTER_STATE_INACTIVE) + goto out; + + /* + * If the counter is in error state, clear that first. + * That way, if we see the counter in error state below, we + * know that it has gone back into error state, as distinct + * from the task having been scheduled away before the + * cross-call arrived. + */ + if (counter->state == PERF_COUNTER_STATE_ERROR) + counter->state = PERF_COUNTER_STATE_OFF; + + retry: + spin_unlock_irq(&ctx->lock); + task_oncpu_function_call(task, __perf_counter_enable, counter); + + spin_lock_irq(&ctx->lock); + + /* + * If the context is active and the counter is still off, + * we need to retry the cross-call. + */ + if (ctx->is_active && counter->state == PERF_COUNTER_STATE_OFF) + goto retry; + + /* + * Since we have the lock this context can't be scheduled + * in, so we can change the state safely. + */ + if (counter->state == PERF_COUNTER_STATE_OFF) + counter->state = PERF_COUNTER_STATE_INACTIVE; + out: + spin_unlock_irq(&ctx->lock); +} + +/* + * Enable a counter and all its children. + */ +static void perf_counter_enable_family(struct perf_counter *counter) +{ + struct perf_counter *child; + + perf_counter_enable(counter); + + /* + * Lock the mutex to protect the list of children + */ + mutex_lock(&counter->mutex); + list_for_each_entry(child, &counter->child_list, child_list) + perf_counter_enable(child); + mutex_unlock(&counter->mutex); } void __perf_counter_sched_out(struct perf_counter_context *ctx, @@ -432,16 +672,18 @@ void __perf_counter_sched_out(struct perf_counter_context *ctx, struct perf_counter *counter; u64 flags; + spin_lock(&ctx->lock); + ctx->is_active = 0; if (likely(!ctx->nr_counters)) - return; + goto out; - spin_lock(&ctx->lock); flags = hw_perf_save_disable(); if (ctx->nr_active) { list_for_each_entry(counter, &ctx->counter_list, list_entry) group_sched_out(counter, cpuctx, ctx); } hw_perf_restore(flags); + out: spin_unlock(&ctx->lock); } @@ -528,10 +770,11 @@ __perf_counter_sched_in(struct perf_counter_context *ctx, u64 flags; int can_add_hw = 1; + spin_lock(&ctx->lock); + ctx->is_active = 1; if (likely(!ctx->nr_counters)) - return; + goto out; - spin_lock(&ctx->lock); flags = hw_perf_save_disable(); /* @@ -578,6 +821,7 @@ __perf_counter_sched_in(struct perf_counter_context *ctx, } } hw_perf_restore(flags); + out: spin_unlock(&ctx->lock); } @@ -896,12 +1140,14 @@ static int perf_release(struct inode *inode, struct file *file) file->private_data = NULL; + mutex_lock(&ctx->mutex); mutex_lock(&counter->mutex); perf_counter_remove_from_context(counter); put_context(ctx); mutex_unlock(&counter->mutex); + mutex_unlock(&ctx->mutex); kfree(counter); @@ -1053,10 +1299,30 @@ static unsigned int perf_poll(struct file *file, poll_table *wait) return events; } +static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg) +{ + struct perf_counter *counter = file->private_data; + int err = 0; + + switch (cmd) { + case PERF_COUNTER_IOC_ENABLE: + perf_counter_enable_family(counter); + break; + case PERF_COUNTER_IOC_DISABLE: + perf_counter_disable_family(counter); + break; + default: + err = -ENOTTY; + } + return err; +} + static const struct file_operations perf_fops = { .release = perf_release, .read = perf_read, .poll = perf_poll, + .unlocked_ioctl = perf_ioctl, + .compat_ioctl = perf_ioctl, }; static int cpu_clock_perf_counter_enable(struct perf_counter *counter) @@ -1348,6 +1614,8 @@ perf_counter_alloc(struct perf_counter_hw_event *hw_event, INIT_LIST_HEAD(&counter->sibling_list); init_waitqueue_head(&counter->waitq); + INIT_LIST_HEAD(&counter->child_list); + counter->irqdata = &counter->data[0]; counter->usrdata = &counter->data[1]; counter->cpu = cpu; @@ -1452,7 +1720,9 @@ sys_perf_counter_open(struct perf_counter_hw_event *hw_event_uptr __user, goto err_free_put_context; counter->filp = counter_file; + mutex_lock(&ctx->mutex); perf_install_in_context(ctx, counter, cpu); + mutex_unlock(&ctx->mutex); fput_light(counter_file, fput_needed2); @@ -1479,6 +1749,7 @@ __perf_counter_init_context(struct perf_counter_context *ctx, { memset(ctx, 0, sizeof(*ctx)); spin_lock_init(&ctx->lock); + mutex_init(&ctx->mutex); INIT_LIST_HEAD(&ctx->counter_list); ctx->task = task; } @@ -1486,20 +1757,30 @@ __perf_counter_init_context(struct perf_counter_context *ctx, /* * inherit a counter from parent task to child task: */ -static int +static struct perf_counter * inherit_counter(struct perf_counter *parent_counter, struct task_struct *parent, struct perf_counter_context *parent_ctx, struct task_struct *child, + struct perf_counter *group_leader, struct perf_counter_context *child_ctx) { struct perf_counter *child_counter; + /* + * Instead of creating recursive hierarchies of counters, + * we link inherited counters back to the original parent, + * which has a filp for sure, which we use as the reference + * count: + */ + if (parent_counter->parent) + parent_counter = parent_counter->parent; + child_counter = perf_counter_alloc(&parent_counter->hw_event, - parent_counter->cpu, NULL, - GFP_ATOMIC); + parent_counter->cpu, group_leader, + GFP_KERNEL); if (!child_counter) - return -ENOMEM; + return NULL; /* * Link it up in the child's context: @@ -1523,16 +1804,82 @@ inherit_counter(struct perf_counter *parent_counter, */ atomic_long_inc(&parent_counter->filp->f_count); + /* + * Link this into the parent counter's child list + */ + mutex_lock(&parent_counter->mutex); + list_add_tail(&child_counter->child_list, &parent_counter->child_list); + + /* + * Make the child state follow the state of the parent counter, + * not its hw_event.disabled bit. We hold the parent's mutex, + * so we won't race with perf_counter_{en,dis}able_family. + */ + if (parent_counter->state >= PERF_COUNTER_STATE_INACTIVE) + child_counter->state = PERF_COUNTER_STATE_INACTIVE; + else + child_counter->state = PERF_COUNTER_STATE_OFF; + + mutex_unlock(&parent_counter->mutex); + + return child_counter; +} + +static int inherit_group(struct perf_counter *parent_counter, + struct task_struct *parent, + struct perf_counter_context *parent_ctx, + struct task_struct *child, + struct perf_counter_context *child_ctx) +{ + struct perf_counter *leader; + struct perf_counter *sub; + + leader = inherit_counter(parent_counter, parent, parent_ctx, + child, NULL, child_ctx); + if (!leader) + return -ENOMEM; + list_for_each_entry(sub, &parent_counter->sibling_list, list_entry) { + if (!inherit_counter(sub, parent, parent_ctx, + child, leader, child_ctx)) + return -ENOMEM; + } return 0; } +static void sync_child_counter(struct perf_counter *child_counter, + struct perf_counter *parent_counter) +{ + u64 parent_val, child_val; + + parent_val = atomic64_read(&parent_counter->count); + child_val = atomic64_read(&child_counter->count); + + /* + * Add back the child's count to the parent's count: + */ + atomic64_add(child_val, &parent_counter->count); + + /* + * Remove this counter from the parent's list + */ + mutex_lock(&parent_counter->mutex); + list_del_init(&child_counter->child_list); + mutex_unlock(&parent_counter->mutex); + + /* + * Release the parent counter, if this was the last + * reference to it. + */ + fput(parent_counter->filp); +} + static void __perf_counter_exit_task(struct task_struct *child, struct perf_counter *child_counter, struct perf_counter_context *child_ctx) { struct perf_counter *parent_counter; - u64 parent_val, child_val; + struct perf_counter *sub, *tmp; /* * If we do not self-reap then we have to wait for the @@ -1561,7 +1908,7 @@ __perf_counter_exit_task(struct task_struct *child, cpuctx = &__get_cpu_var(perf_cpu_context); - counter_sched_out(child_counter, cpuctx, child_ctx); + group_sched_out(child_counter, cpuctx, child_ctx); list_del_init(&child_counter->list_entry); @@ -1577,26 +1924,23 @@ __perf_counter_exit_task(struct task_struct *child, * that are still around due to the child reference. These * counters need to be zapped - but otherwise linger. */ - if (!parent_counter) - return; - - parent_val = atomic64_read(&parent_counter->count); - child_val = atomic64_read(&child_counter->count); - - /* - * Add back the child's count to the parent's count: - */ - atomic64_add(child_val, &parent_counter->count); - - fput(parent_counter->filp); + if (parent_counter) { + sync_child_counter(child_counter, parent_counter); + list_for_each_entry_safe(sub, tmp, &child_counter->sibling_list, + list_entry) { + if (sub->parent) + sync_child_counter(sub, sub->parent); + kfree(sub); + } + } kfree(child_counter); } /* - * When a child task exist, feed back counter values to parent counters. + * When a child task exits, feed back counter values to parent counters. * - * Note: we are running in child context, but the PID is not hashed + * Note: we may be running in child context, but the PID is not hashed * anymore so new counters will not be added. */ void perf_counter_exit_task(struct task_struct *child) @@ -1620,9 +1964,8 @@ void perf_counter_exit_task(struct task_struct *child) void perf_counter_init_task(struct task_struct *child) { struct perf_counter_context *child_ctx, *parent_ctx; - struct perf_counter *counter, *parent_counter; + struct perf_counter *counter; struct task_struct *parent = current; - unsigned long flags; child_ctx = &child->perf_counter_ctx; parent_ctx = &parent->perf_counter_ctx; @@ -1641,32 +1984,22 @@ void perf_counter_init_task(struct task_struct *child) * Lock the parent list. No need to lock the child - not PID * hashed yet and not running, so nobody can access it. */ - spin_lock_irqsave(&parent_ctx->lock, flags); + mutex_lock(&parent_ctx->mutex); /* * We dont have to disable NMIs - we are only looking at * the list, not manipulating it: */ list_for_each_entry(counter, &parent_ctx->counter_list, list_entry) { - if (!counter->hw_event.inherit || counter->group_leader != counter) + if (!counter->hw_event.inherit) continue; - /* - * Instead of creating recursive hierarchies of counters, - * we link inheritd counters back to the original parent, - * which has a filp for sure, which we use as the reference - * count: - */ - parent_counter = counter; - if (counter->parent) - parent_counter = counter->parent; - - if (inherit_counter(parent_counter, parent, + if (inherit_group(counter, parent, parent_ctx, child, child_ctx)) break; } - spin_unlock_irqrestore(&parent_ctx->lock, flags); + mutex_unlock(&parent_ctx->mutex); } static void __cpuinit perf_counter_init_cpu(int cpu) @@ -1692,11 +2025,15 @@ static void __perf_counter_exit_cpu(void *info) list_for_each_entry_safe(counter, tmp, &ctx->counter_list, list_entry) __perf_counter_remove_from_context(counter); - } static void perf_counter_exit_cpu(int cpu) { + struct perf_cpu_context *cpuctx = &per_cpu(perf_cpu_context, cpu); + struct perf_counter_context *ctx = &cpuctx->ctx; + + mutex_lock(&ctx->mutex); smp_call_function_single(cpu, __perf_counter_exit_cpu, NULL, 1); + mutex_unlock(&ctx->mutex); } #else static inline void perf_counter_exit_cpu(int cpu) { } -- cgit v1.2.3 From 1b023a96d9b44f50f4d8ff28c15f5b80e354760f Mon Sep 17 00:00:00 2001 From: Mike Galbraith Date: Fri, 23 Jan 2009 10:13:01 +0100 Subject: perfcounters: throttle on too high IRQ rates Starting kerneltop with only -c 100 seems to be a bad idea, it can easily lock the system due to perfcounter IRQ overload. So add throttling: if a new IRQ arrives in a shorter than PERFMON_MIN_PERIOD_NS time, turn off perfcounters and untrottle them from the next timer tick. Signed-off-by: Mike Galbraith Signed-off-by: Ingo Molnar --- arch/x86/kernel/apic.c | 2 ++ arch/x86/kernel/cpu/perf_counter.c | 38 ++++++++++++++++++++++++++++++++------ include/linux/perf_counter.h | 4 ++++ 3 files changed, 38 insertions(+), 6 deletions(-) diff --git a/arch/x86/kernel/apic.c b/arch/x86/kernel/apic.c index 7b434e5b14c..849c23009bf 100644 --- a/arch/x86/kernel/apic.c +++ b/arch/x86/kernel/apic.c @@ -781,6 +781,8 @@ static void local_apic_timer_interrupt(void) inc_irq_stat(apic_timer_irqs); evt->event_handler(evt); + + perf_counter_unthrottle(); } /* diff --git a/arch/x86/kernel/cpu/perf_counter.c b/arch/x86/kernel/cpu/perf_counter.c index 9376771f757..1a040b179b5 100644 --- a/arch/x86/kernel/cpu/perf_counter.c +++ b/arch/x86/kernel/cpu/perf_counter.c @@ -33,6 +33,9 @@ static int nr_counters_fixed __read_mostly; struct cpu_hw_counters { struct perf_counter *counters[X86_PMC_IDX_MAX]; unsigned long used[BITS_TO_LONGS(X86_PMC_IDX_MAX)]; + u64 last_interrupt; + u64 global_enable; + int throttled; }; /* @@ -474,16 +477,19 @@ perf_handle_group(struct perf_counter *sibling, u64 *status, u64 *overflown) static void __smp_perf_counter_interrupt(struct pt_regs *regs, int nmi) { int bit, cpu = smp_processor_id(); - u64 ack, status, saved_global; - struct cpu_hw_counters *cpuc; + u64 ack, status, now; + struct cpu_hw_counters *cpuc = &per_cpu(cpu_hw_counters, cpu); - rdmsrl(MSR_CORE_PERF_GLOBAL_CTRL, saved_global); + rdmsrl(MSR_CORE_PERF_GLOBAL_CTRL, cpuc->global_enable); /* Disable counters globally */ wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0); ack_APIC_irq(); - cpuc = &per_cpu(cpu_hw_counters, cpu); + now = sched_clock(); + if (now - cpuc->last_interrupt < PERFMON_MIN_PERIOD_NS) + cpuc->throttled = 1; + cpuc->last_interrupt = now; rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, status); if (!status) @@ -533,9 +539,29 @@ again: goto again; out: /* - * Restore - do not reenable when global enable is off: + * Restore - do not reenable when global enable is off or throttled: */ - wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, saved_global); + if (!cpuc->throttled) + wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, cpuc->global_enable); +} + +void perf_counter_unthrottle(void) +{ + struct cpu_hw_counters *cpuc; + + if (!cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) + return; + + if (unlikely(!perf_counters_initialized)) + return; + + cpuc = &per_cpu(cpu_hw_counters, smp_processor_id()); + if (cpuc->throttled) { + if (printk_ratelimit()) + printk(KERN_WARNING "PERFMON: max event frequency exceeded!\n"); + wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, cpuc->global_enable); + cpuc->throttled = 0; + } } void smp_perf_counter_interrupt(struct pt_regs *regs) diff --git a/include/linux/perf_counter.h b/include/linux/perf_counter.h index 33ba9fe0a78..91f1ca4c01c 100644 --- a/include/linux/perf_counter.h +++ b/include/linux/perf_counter.h @@ -254,6 +254,7 @@ extern void perf_counter_init_task(struct task_struct *child); extern void perf_counter_exit_task(struct task_struct *child); extern void perf_counter_notify(struct pt_regs *regs); extern void perf_counter_print_debug(void); +extern void perf_counter_unthrottle(void); extern u64 hw_perf_save_disable(void); extern void hw_perf_restore(u64 ctrl); extern int perf_counter_task_disable(void); @@ -270,6 +271,8 @@ static inline int is_software_counter(struct perf_counter *counter) return !counter->hw_event.raw && counter->hw_event.type < 0; } +#define PERFMON_MIN_PERIOD_NS 10000 + #else static inline void perf_counter_task_sched_in(struct task_struct *task, int cpu) { } @@ -281,6 +284,7 @@ static inline void perf_counter_init_task(struct task_struct *child) { } static inline void perf_counter_exit_task(struct task_struct *child) { } static inline void perf_counter_notify(struct pt_regs *regs) { } static inline void perf_counter_print_debug(void) { } +static inline void perf_counter_unthrottle(void) { } static inline void hw_perf_restore(u64 ctrl) { } static inline u64 hw_perf_save_disable(void) { return 0; } static inline int perf_counter_task_disable(void) { return -EINVAL; } -- cgit v1.2.3 From 4b39fd96855254a244f71245b41a91cdecb87d63 Mon Sep 17 00:00:00 2001 From: Mike Galbraith Date: Fri, 23 Jan 2009 14:36:16 +0100 Subject: perfcounters: ratelimit performance counter interrupts Ratelimit performance counter interrupts to 100KHz per CPU. This replaces the irq-delta-time based method. Signed-off-by: Mike Galbraith Signed-off-by: Ingo Molnar --- arch/x86/kernel/cpu/perf_counter.c | 27 +++++++++++++++------------ include/linux/perf_counter.h | 2 -- 2 files changed, 15 insertions(+), 14 deletions(-) diff --git a/arch/x86/kernel/cpu/perf_counter.c b/arch/x86/kernel/cpu/perf_counter.c index 1a040b179b5..a56d4cf92f3 100644 --- a/arch/x86/kernel/cpu/perf_counter.c +++ b/arch/x86/kernel/cpu/perf_counter.c @@ -33,9 +33,8 @@ static int nr_counters_fixed __read_mostly; struct cpu_hw_counters { struct perf_counter *counters[X86_PMC_IDX_MAX]; unsigned long used[BITS_TO_LONGS(X86_PMC_IDX_MAX)]; - u64 last_interrupt; + unsigned long interrupts; u64 global_enable; - int throttled; }; /* @@ -470,6 +469,11 @@ perf_handle_group(struct perf_counter *sibling, u64 *status, u64 *overflown) } } +/* + * Maximum interrupt frequency of 100KHz per CPU + */ +#define PERFMON_MAX_INTERRUPTS 100000/HZ + /* * This handler is triggered by the local APIC, so the APIC IRQ handling * rules apply: @@ -477,7 +481,7 @@ perf_handle_group(struct perf_counter *sibling, u64 *status, u64 *overflown) static void __smp_perf_counter_interrupt(struct pt_regs *regs, int nmi) { int bit, cpu = smp_processor_id(); - u64 ack, status, now; + u64 ack, status; struct cpu_hw_counters *cpuc = &per_cpu(cpu_hw_counters, cpu); rdmsrl(MSR_CORE_PERF_GLOBAL_CTRL, cpuc->global_enable); @@ -486,11 +490,6 @@ static void __smp_perf_counter_interrupt(struct pt_regs *regs, int nmi) wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0); ack_APIC_irq(); - now = sched_clock(); - if (now - cpuc->last_interrupt < PERFMON_MIN_PERIOD_NS) - cpuc->throttled = 1; - cpuc->last_interrupt = now; - rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, status); if (!status) goto out; @@ -541,13 +540,14 @@ out: /* * Restore - do not reenable when global enable is off or throttled: */ - if (!cpuc->throttled) + if (++cpuc->interrupts < PERFMON_MAX_INTERRUPTS) wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, cpuc->global_enable); } void perf_counter_unthrottle(void) { struct cpu_hw_counters *cpuc; + u64 global_enable; if (!cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) return; @@ -556,12 +556,15 @@ void perf_counter_unthrottle(void) return; cpuc = &per_cpu(cpu_hw_counters, smp_processor_id()); - if (cpuc->throttled) { + if (cpuc->interrupts >= PERFMON_MAX_INTERRUPTS) { if (printk_ratelimit()) - printk(KERN_WARNING "PERFMON: max event frequency exceeded!\n"); + printk(KERN_WARNING "PERFMON: max interrupts exceeded!\n"); wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, cpuc->global_enable); - cpuc->throttled = 0; } + rdmsrl(MSR_CORE_PERF_GLOBAL_CTRL, global_enable); + if (unlikely(cpuc->global_enable && !global_enable)) + wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, cpuc->global_enable); + cpuc->interrupts = 0; } void smp_perf_counter_interrupt(struct pt_regs *regs) diff --git a/include/linux/perf_counter.h b/include/linux/perf_counter.h index 91f1ca4c01c..f55381fbcac 100644 --- a/include/linux/perf_counter.h +++ b/include/linux/perf_counter.h @@ -271,8 +271,6 @@ static inline int is_software_counter(struct perf_counter *counter) return !counter->hw_event.raw && counter->hw_event.type < 0; } -#define PERFMON_MIN_PERIOD_NS 10000 - #else static inline void perf_counter_task_sched_in(struct task_struct *task, int cpu) { } -- cgit v1.2.3 From 3415dd9146c574bffe8f012c096bfc2bc62b9508 Mon Sep 17 00:00:00 2001 From: Mike Galbraith Date: Fri, 23 Jan 2009 14:16:53 +0100 Subject: perfcounters fix section mismatch warning in perf_counter.c::perf_counters_lapic_init() Fix: WARNING: arch/x86/kernel/built-in.o(.text+0xdd0f): Section mismatch in reference from the function pmc_generic_enable() to the function .cpuinit.text:perf_counters_lapic_init() The function pmc_generic_enable() references the function __cpuinit perf_counters_lapic_init(). This is often because pmc_generic_enable lacks a __cpuinit annotation or the annotation of perf_counters_lapic_init is wrong. Signed-off-by: Mike Galbraith Signed-off-by: Ingo Molnar --- arch/x86/kernel/cpu/perf_counter.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/x86/kernel/cpu/perf_counter.c b/arch/x86/kernel/cpu/perf_counter.c index a56d4cf92f3..46c436cdd73 100644 --- a/arch/x86/kernel/cpu/perf_counter.c +++ b/arch/x86/kernel/cpu/perf_counter.c @@ -605,7 +605,7 @@ void perf_counter_notify(struct pt_regs *regs) local_irq_restore(flags); } -void __cpuinit perf_counters_lapic_init(int nmi) +void perf_counters_lapic_init(int nmi) { u32 apic_val; -- cgit v1.2.3 From bb3f0b59ad005d2d2ecbbe9bd048eab6d1ecbd31 Mon Sep 17 00:00:00 2001 From: Yinghai Lu Date: Sun, 25 Jan 2009 02:38:09 -0800 Subject: x86: make irqinit_32.c more like irqinit_64.c, v2 Impact: cleanup 1. add smp_intr_init and apic_intr_init for 32bit, the same as 64bit 2. move the apic_intr_init() call before set gate with interrupt[i] 3. for 64bit, if ia32_emulation is not used, will make per_cpu to use 0x80 vector. [ v2: should use !test_bit() instead of test_bit() with 32bit ] Signed-off-by: Yinghai Lu Signed-off-by: Ingo Molnar --- arch/x86/kernel/irqinit_32.c | 56 ++++++++++++++++++++++++++------------------ arch/x86/kernel/irqinit_64.c | 7 +++--- arch/x86/kernel/traps.c | 15 +++++------- 3 files changed, 43 insertions(+), 35 deletions(-) diff --git a/arch/x86/kernel/irqinit_32.c b/arch/x86/kernel/irqinit_32.c index c56496f8c6f..ddf3eb72f86 100644 --- a/arch/x86/kernel/irqinit_32.c +++ b/arch/x86/kernel/irqinit_32.c @@ -120,28 +120,8 @@ int vector_used_by_percpu_irq(unsigned int vector) return 0; } -/* Overridden in paravirt.c */ -void init_IRQ(void) __attribute__((weak, alias("native_init_IRQ"))); - -void __init native_init_IRQ(void) +static void __init smp_intr_init(void) { - int i; - - /* all the set up before the call gates are initialised */ - pre_intr_init_hook(); - - /* - * Cover the whole vector space, no vector can escape - * us. (some of these will be overridden and become - * 'special' SMP interrupts) - */ - for (i = FIRST_EXTERNAL_VECTOR; i < NR_VECTORS; i++) { - /* SYSCALL_VECTOR was reserved in trap_init. */ - if (i != SYSCALL_VECTOR) - set_intr_gate(i, interrupt[i-FIRST_EXTERNAL_VECTOR]); - } - - #if defined(CONFIG_X86_LOCAL_APIC) && defined(CONFIG_SMP) /* * The reschedule interrupt is a CPU-to-CPU reschedule-helper @@ -170,8 +150,13 @@ void __init native_init_IRQ(void) set_intr_gate(IRQ_MOVE_CLEANUP_VECTOR, irq_move_cleanup_interrupt); set_bit(IRQ_MOVE_CLEANUP_VECTOR, used_vectors); #endif +} +static void __init apic_intr_init(void) +{ #ifdef CONFIG_X86_LOCAL_APIC + smp_intr_init(); + /* self generated IPI for local APIC timer */ alloc_intr_gate(LOCAL_TIMER_VECTOR, apic_timer_interrupt); @@ -181,12 +166,37 @@ void __init native_init_IRQ(void) # ifdef CONFIG_PERF_COUNTERS alloc_intr_gate(LOCAL_PERF_VECTOR, perf_counter_interrupt); # endif -#endif -#if defined(CONFIG_X86_LOCAL_APIC) && defined(CONFIG_X86_MCE_P4THERMAL) +# ifdef CONFIG_X86_MCE_P4THERMAL /* thermal monitor LVT interrupt */ alloc_intr_gate(THERMAL_APIC_VECTOR, thermal_interrupt); +# endif #endif +} + +/* Overridden in paravirt.c */ +void init_IRQ(void) __attribute__((weak, alias("native_init_IRQ"))); + +void __init native_init_IRQ(void) +{ + int i; + + /* all the set up before the call gates are initialised */ + pre_intr_init_hook(); + + apic_intr_init(); + + /* + * Cover the whole vector space, no vector can escape + * us. (some of these will be overridden and become + * 'special' SMP interrupts) + */ + for (i = 0; i < (NR_VECTORS - FIRST_EXTERNAL_VECTOR); i++) { + int vector = FIRST_EXTERNAL_VECTOR + i; + /* SYSCALL_VECTOR was reserved in trap_init. */ + if (!test_bit(vector, used_vectors)) + set_intr_gate(vector, interrupt[i]); + } if (!acpi_ioapic) setup_irq(2, &irq2); diff --git a/arch/x86/kernel/irqinit_64.c b/arch/x86/kernel/irqinit_64.c index 6a71bfc51e5..16e1fc68750 100644 --- a/arch/x86/kernel/irqinit_64.c +++ b/arch/x86/kernel/irqinit_64.c @@ -162,6 +162,9 @@ void __init native_init_IRQ(void) int i; init_ISA_irqs(); + + apic_intr_init(); + /* * Cover the whole vector space, no vector can escape * us. (some of these will be overridden and become @@ -169,12 +172,10 @@ void __init native_init_IRQ(void) */ for (i = 0; i < (NR_VECTORS - FIRST_EXTERNAL_VECTOR); i++) { int vector = FIRST_EXTERNAL_VECTOR + i; - if (vector != IA32_SYSCALL_VECTOR) + if (!test_bit(vector, used_vectors)) set_intr_gate(vector, interrupt[i]); } - apic_intr_init(); - if (!acpi_ioapic) setup_irq(2, &irq2); } diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c index ed5aee5f3fc..d36a502d87a 100644 --- a/arch/x86/kernel/traps.c +++ b/arch/x86/kernel/traps.c @@ -979,8 +979,13 @@ void __init trap_init(void) #endif set_intr_gate(19, &simd_coprocessor_error); + /* Reserve all the builtin and the syscall vector: */ + for (i = 0; i < FIRST_EXTERNAL_VECTOR; i++) + set_bit(i, used_vectors); + #ifdef CONFIG_IA32_EMULATION set_system_intr_gate(IA32_SYSCALL_VECTOR, ia32_syscall); + set_bit(IA32_SYSCALL_VECTOR, used_vectors); #endif #ifdef CONFIG_X86_32 @@ -997,17 +1002,9 @@ void __init trap_init(void) } set_system_trap_gate(SYSCALL_VECTOR, &system_call); -#endif - - /* Reserve all the builtin and the syscall vector: */ - for (i = 0; i < FIRST_EXTERNAL_VECTOR; i++) - set_bit(i, used_vectors); - -#ifdef CONFIG_X86_64 - set_bit(IA32_SYSCALL_VECTOR, used_vectors); -#else set_bit(SYSCALL_VECTOR, used_vectors); #endif + /* * Should be a barrier for any external CPU state: */ -- cgit v1.2.3 From 65d370862f64973611a271ced61864b5f9bb6fc0 Mon Sep 17 00:00:00 2001 From: Mike Galbraith Date: Thu, 29 Jan 2009 14:06:52 +0100 Subject: perfcounters: fix refcounting bug don't kfree in use counters. Running... while true; do perfstat -e 1 -c true; done ...on all cores for a while doesn't seem to be eating ram, and my oops is gone. Signed-off-by: Mike Galbraith Signed-off-by: Ingo Molnar --- kernel/perf_counter.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c index 1ac18daa424..f27a7e9f3c4 100644 --- a/kernel/perf_counter.c +++ b/kernel/perf_counter.c @@ -1934,7 +1934,8 @@ __perf_counter_exit_task(struct task_struct *child, } } - kfree(child_counter); + if (!child_counter->filp || !atomic_long_read(&child_counter->filp->f_count)) + kfree(child_counter); } /* -- cgit v1.2.3 From 15081c61362618a0c81cc8d04e45e7427bc1ed71 Mon Sep 17 00:00:00 2001 From: Jaswinder Singh Rajput Date: Sun, 1 Feb 2009 22:07:39 +0530 Subject: x86: irqinit_32.c fix compilation warning Fix: arch/x86/kernel/irqinit_32.c:124: warning: 'smp_intr_init' defined but not used Signed-off-by: Jaswinder Singh Rajput Signed-off-by: Ingo Molnar --- arch/x86/kernel/irqinit_32.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/x86/kernel/irqinit_32.c b/arch/x86/kernel/irqinit_32.c index ddf3eb72f86..520e6c1c5d2 100644 --- a/arch/x86/kernel/irqinit_32.c +++ b/arch/x86/kernel/irqinit_32.c @@ -154,9 +154,9 @@ static void __init smp_intr_init(void) static void __init apic_intr_init(void) { -#ifdef CONFIG_X86_LOCAL_APIC smp_intr_init(); +#ifdef CONFIG_X86_LOCAL_APIC /* self generated IPI for local APIC timer */ alloc_intr_gate(LOCAL_TIMER_VECTOR, apic_timer_interrupt); -- cgit v1.2.3 From 5b75af0a02fcf3b8899f38ff6f22164c5d8e2fdd Mon Sep 17 00:00:00 2001 From: Mike Galbraith Date: Wed, 4 Feb 2009 17:11:34 +0100 Subject: perfcounters: fix "perf counters kill oprofile" bug With oprofile as a module, and unloaded by profiling script, both oprofile and kerneltop work fine.. unless you leave kerneltop running when you start profiling, then you may see badness. Signed-off-by: Mike Galbraith Signed-off-by: Ingo Molnar --- arch/x86/kernel/cpu/perf_counter.c | 4 +++- arch/x86/oprofile/nmi_int.c | 7 ++++--- 2 files changed, 7 insertions(+), 4 deletions(-) diff --git a/arch/x86/kernel/cpu/perf_counter.c b/arch/x86/kernel/cpu/perf_counter.c index 46c436cdd73..8bb213323fe 100644 --- a/arch/x86/kernel/cpu/perf_counter.c +++ b/arch/x86/kernel/cpu/perf_counter.c @@ -643,7 +643,9 @@ perf_counter_nmi_handler(struct notifier_block *self, } static __read_mostly struct notifier_block perf_counter_nmi_notifier = { - .notifier_call = perf_counter_nmi_handler + .notifier_call = perf_counter_nmi_handler, + .next = NULL, + .priority = 1 }; void __init init_hw_perf_counters(void) diff --git a/arch/x86/oprofile/nmi_int.c b/arch/x86/oprofile/nmi_int.c index 202864ad49a..c638685136e 100644 --- a/arch/x86/oprofile/nmi_int.c +++ b/arch/x86/oprofile/nmi_int.c @@ -40,8 +40,9 @@ static int profile_exceptions_notify(struct notifier_block *self, switch (val) { case DIE_NMI: - if (model->check_ctrs(args->regs, &per_cpu(cpu_msrs, cpu))) - ret = NOTIFY_STOP; + case DIE_NMI_IPI: + model->check_ctrs(args->regs, &per_cpu(cpu_msrs, cpu)); + ret = NOTIFY_STOP; break; default: break; @@ -134,7 +135,7 @@ static void nmi_cpu_setup(void *dummy) static struct notifier_block profile_exceptions_nb = { .notifier_call = profile_exceptions_notify, .next = NULL, - .priority = 0 + .priority = 2 }; static int nmi_setup(void) -- cgit v1.2.3 From 82aa9a1829199233f9bdaf26e2ee271114f4701e Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Thu, 5 Feb 2009 15:23:08 +0100 Subject: perfcounters: fix "perf counters kills oprofile" bug, v2 Impact: fix kernel crash Both oprofile and perfcounters register an NMI die handler, but only one can handle the NMI. Conveniently, oprofile unregisters it's notifier when not actively in use, so setting it's notifier priority higher than perfcounter's allows oprofile to borrow the NMI for the duration of it's run. Tested/works both as module and built-in. While testing, I found that if kerneltop was generating NMIs at very high frequency, the kernel may panic when oprofile registered it's handler. This turned out to be because oprofile registers it's handler before reset_value has been allocated, so if an NMI comes in while it's still setting up, kabOom. Rather than try more invasive changes, I followed the lead of other places in op_model_ppro.c, and simply returned in that highly unlikely event. (debug warnings attached) Signed-off-by: Mike Galbraith Signed-off-by: Ingo Molnar --- arch/x86/oprofile/op_model_ppro.c | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/arch/x86/oprofile/op_model_ppro.c b/arch/x86/oprofile/op_model_ppro.c index 07c914555a5..85eb6268374 100644 --- a/arch/x86/oprofile/op_model_ppro.c +++ b/arch/x86/oprofile/op_model_ppro.c @@ -126,6 +126,13 @@ static int ppro_check_ctrs(struct pt_regs * const regs, u64 val; int i; + /* + * This can happen if perf counters are in use when + * we steal the die notifier NMI. + */ + if (unlikely(!reset_value)) + goto out; + for (i = 0 ; i < num_counters; ++i) { if (!reset_value[i]) continue; @@ -136,6 +143,7 @@ static int ppro_check_ctrs(struct pt_regs * const regs, } } +out: /* Only P6 based Pentium M need to re-unmask the apic vector but it * doesn't hurt other P6 variant */ apic_write(APIC_LVTPC, apic_read(APIC_LVTPC) & ~APIC_LVT_MASKED); -- cgit v1.2.3 From 23a185ca8abbeef64b6ffc33059b1d630e43ec10 Mon Sep 17 00:00:00 2001 From: Paul Mackerras Date: Mon, 9 Feb 2009 22:42:47 +1100 Subject: perf_counters: make software counters work as per-cpu counters Impact: kernel crash fix Yanmin Zhang reported that using a PERF_COUNT_TASK_CLOCK software counter as a per-cpu counter would reliably crash the system, because it calls __task_delta_exec with a null pointer. The page fault, context switch and cpu migration counters also won't function correctly as per-cpu counters since they reference the current task. This fixes the problem by redirecting the task_clock counter to the cpu_clock counter when used as a per-cpu counter, and by implementing per-cpu page fault, context switch and cpu migration counters. Along the way, this: - Initializes counter->ctx earlier, in perf_counter_alloc, so that sw_perf_counter_init can use it - Adds code to kernel/sched.c to count task migrations into each cpu, in rq->nr_migrations_in - Exports the per-cpu context switch and task migration counts via new functions added to kernel/sched.c - Makes sure that if sw_perf_counter_init fails, we don't try to initialize the counter as a hardware counter. Since the user has passed a negative, non-raw event type, they clearly don't intend for it to be interpreted as a hardware event. Reported-by: "Zhang Yanmin" Signed-off-by: Paul Mackerras Signed-off-by: Ingo Molnar --- include/linux/sched.h | 2 ++ kernel/perf_counter.c | 78 +++++++++++++++++++++++++++++---------------------- kernel/sched.c | 17 +++++++++++ 3 files changed, 64 insertions(+), 33 deletions(-) diff --git a/include/linux/sched.h b/include/linux/sched.h index b85b10abf77..1e5f70062a9 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -137,6 +137,8 @@ extern unsigned long nr_running(void); extern unsigned long nr_uninterruptible(void); extern unsigned long nr_active(void); extern unsigned long nr_iowait(void); +extern u64 cpu_nr_switches(int cpu); +extern u64 cpu_nr_migrations(int cpu); struct seq_file; struct cfs_rq; diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c index f27a7e9f3c4..544193cbc47 100644 --- a/kernel/perf_counter.c +++ b/kernel/perf_counter.c @@ -20,6 +20,8 @@ #include #include #include +#include +#include /* * Each CPU has a list of per CPU counters: @@ -502,7 +504,6 @@ perf_install_in_context(struct perf_counter_context *ctx, { struct task_struct *task = ctx->task; - counter->ctx = ctx; if (!task) { /* * Per cpu counters are installed via an smp call and @@ -1417,11 +1418,19 @@ static const struct hw_perf_counter_ops perf_ops_task_clock = { .read = task_clock_perf_counter_read, }; -static u64 get_page_faults(void) +#ifdef CONFIG_VM_EVENT_COUNTERS +#define cpu_page_faults() __get_cpu_var(vm_event_states).event[PGFAULT] +#else +#define cpu_page_faults() 0 +#endif + +static u64 get_page_faults(struct perf_counter *counter) { - struct task_struct *curr = current; + struct task_struct *curr = counter->ctx->task; - return curr->maj_flt + curr->min_flt; + if (curr) + return curr->maj_flt + curr->min_flt; + return cpu_page_faults(); } static void page_faults_perf_counter_update(struct perf_counter *counter) @@ -1430,7 +1439,7 @@ static void page_faults_perf_counter_update(struct perf_counter *counter) s64 delta; prev = atomic64_read(&counter->hw.prev_count); - now = get_page_faults(); + now = get_page_faults(counter); atomic64_set(&counter->hw.prev_count, now); @@ -1446,11 +1455,7 @@ static void page_faults_perf_counter_read(struct perf_counter *counter) static int page_faults_perf_counter_enable(struct perf_counter *counter) { - /* - * page-faults is a per-task value already, - * so we dont have to clear it on switch-in. - */ - + atomic64_set(&counter->hw.prev_count, get_page_faults(counter)); return 0; } @@ -1465,11 +1470,13 @@ static const struct hw_perf_counter_ops perf_ops_page_faults = { .read = page_faults_perf_counter_read, }; -static u64 get_context_switches(void) +static u64 get_context_switches(struct perf_counter *counter) { - struct task_struct *curr = current; + struct task_struct *curr = counter->ctx->task; - return curr->nvcsw + curr->nivcsw; + if (curr) + return curr->nvcsw + curr->nivcsw; + return cpu_nr_switches(smp_processor_id()); } static void context_switches_perf_counter_update(struct perf_counter *counter) @@ -1478,7 +1485,7 @@ static void context_switches_perf_counter_update(struct perf_counter *counter) s64 delta; prev = atomic64_read(&counter->hw.prev_count); - now = get_context_switches(); + now = get_context_switches(counter); atomic64_set(&counter->hw.prev_count, now); @@ -1494,11 +1501,7 @@ static void context_switches_perf_counter_read(struct perf_counter *counter) static int context_switches_perf_counter_enable(struct perf_counter *counter) { - /* - * ->nvcsw + curr->nivcsw is a per-task value already, - * so we dont have to clear it on switch-in. - */ - + atomic64_set(&counter->hw.prev_count, get_context_switches(counter)); return 0; } @@ -1513,9 +1516,13 @@ static const struct hw_perf_counter_ops perf_ops_context_switches = { .read = context_switches_perf_counter_read, }; -static inline u64 get_cpu_migrations(void) +static inline u64 get_cpu_migrations(struct perf_counter *counter) { - return current->se.nr_migrations; + struct task_struct *curr = counter->ctx->task; + + if (curr) + return curr->se.nr_migrations; + return cpu_nr_migrations(smp_processor_id()); } static void cpu_migrations_perf_counter_update(struct perf_counter *counter) @@ -1524,7 +1531,7 @@ static void cpu_migrations_perf_counter_update(struct perf_counter *counter) s64 delta; prev = atomic64_read(&counter->hw.prev_count); - now = get_cpu_migrations(); + now = get_cpu_migrations(counter); atomic64_set(&counter->hw.prev_count, now); @@ -1540,11 +1547,7 @@ static void cpu_migrations_perf_counter_read(struct perf_counter *counter) static int cpu_migrations_perf_counter_enable(struct perf_counter *counter) { - /* - * se.nr_migrations is a per-task value already, - * so we dont have to clear it on switch-in. - */ - + atomic64_set(&counter->hw.prev_count, get_cpu_migrations(counter)); return 0; } @@ -1569,7 +1572,14 @@ sw_perf_counter_init(struct perf_counter *counter) hw_ops = &perf_ops_cpu_clock; break; case PERF_COUNT_TASK_CLOCK: - hw_ops = &perf_ops_task_clock; + /* + * If the user instantiates this as a per-cpu counter, + * use the cpu_clock counter instead. + */ + if (counter->ctx->task) + hw_ops = &perf_ops_task_clock; + else + hw_ops = &perf_ops_cpu_clock; break; case PERF_COUNT_PAGE_FAULTS: hw_ops = &perf_ops_page_faults; @@ -1592,6 +1602,7 @@ sw_perf_counter_init(struct perf_counter *counter) static struct perf_counter * perf_counter_alloc(struct perf_counter_hw_event *hw_event, int cpu, + struct perf_counter_context *ctx, struct perf_counter *group_leader, gfp_t gfpflags) { @@ -1623,6 +1634,7 @@ perf_counter_alloc(struct perf_counter_hw_event *hw_event, counter->wakeup_pending = 0; counter->group_leader = group_leader; counter->hw_ops = NULL; + counter->ctx = ctx; counter->state = PERF_COUNTER_STATE_INACTIVE; if (hw_event->disabled) @@ -1631,7 +1643,7 @@ perf_counter_alloc(struct perf_counter_hw_event *hw_event, hw_ops = NULL; if (!hw_event->raw && hw_event->type < 0) hw_ops = sw_perf_counter_init(counter); - if (!hw_ops) + else hw_ops = hw_perf_counter_init(counter); if (!hw_ops) { @@ -1707,7 +1719,8 @@ sys_perf_counter_open(struct perf_counter_hw_event *hw_event_uptr __user, } ret = -EINVAL; - counter = perf_counter_alloc(&hw_event, cpu, group_leader, GFP_KERNEL); + counter = perf_counter_alloc(&hw_event, cpu, ctx, group_leader, + GFP_KERNEL); if (!counter) goto err_put_context; @@ -1777,15 +1790,14 @@ inherit_counter(struct perf_counter *parent_counter, parent_counter = parent_counter->parent; child_counter = perf_counter_alloc(&parent_counter->hw_event, - parent_counter->cpu, group_leader, - GFP_KERNEL); + parent_counter->cpu, child_ctx, + group_leader, GFP_KERNEL); if (!child_counter) return NULL; /* * Link it up in the child's context: */ - child_counter->ctx = child_ctx; child_counter->task = child; list_add_counter(child_counter, child_ctx); child_ctx->nr_counters++; diff --git a/kernel/sched.c b/kernel/sched.c index 8db1a4cf208..173768f142a 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -558,6 +558,7 @@ struct rq { struct load_weight load; unsigned long nr_load_updates; u64 nr_switches; + u64 nr_migrations_in; struct cfs_rq cfs; struct rt_rq rt; @@ -1908,6 +1909,7 @@ void set_task_cpu(struct task_struct *p, unsigned int new_cpu) #endif if (old_cpu != new_cpu) { p->se.nr_migrations++; + new_rq->nr_migrations_in++; #ifdef CONFIG_SCHEDSTATS if (task_hot(p, old_rq->clock, NULL)) schedstat_inc(p, se.nr_forced2_migrations); @@ -2810,6 +2812,21 @@ unsigned long nr_active(void) return running + uninterruptible; } +/* + * Externally visible per-cpu scheduler statistics: + * cpu_nr_switches(cpu) - number of context switches on that cpu + * cpu_nr_migrations(cpu) - number of migrations into that cpu + */ +u64 cpu_nr_switches(int cpu) +{ + return cpu_rq(cpu)->nr_switches; +} + +u64 cpu_nr_migrations(int cpu) +{ + return cpu_rq(cpu)->nr_migrations_in; +} + /* * Update rq->cpu_load[] statistics. This function is usually called every * scheduler tick (TICK_NSEC). -- cgit v1.2.3 From d278c48435625cb6b7edcf6a547620768b175709 Mon Sep 17 00:00:00 2001 From: Mike Galbraith Date: Mon, 9 Feb 2009 07:38:50 +0100 Subject: perf_counters: account NMI interrupts I noticed that kerneltop interrupts were accounted as NMI, but not their perf counter origin. Account NMI performance counter interrupts. Signed-off-by: Mike Galbraith Signed-off-by: Ingo Molnar arch/x86/kernel/cpu/perf_counter.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) --- arch/x86/kernel/cpu/perf_counter.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/x86/kernel/cpu/perf_counter.c b/arch/x86/kernel/cpu/perf_counter.c index 8bb213323fe..9901e46998d 100644 --- a/arch/x86/kernel/cpu/perf_counter.c +++ b/arch/x86/kernel/cpu/perf_counter.c @@ -495,6 +495,7 @@ static void __smp_perf_counter_interrupt(struct pt_regs *regs, int nmi) goto out; again: + inc_irq_stat(apic_perf_irqs); ack = status; for_each_bit(bit, (unsigned long *)&status, X86_PMC_IDX_MAX) { struct perf_counter *counter = cpuc->counters[bit]; @@ -570,7 +571,6 @@ void perf_counter_unthrottle(void) void smp_perf_counter_interrupt(struct pt_regs *regs) { irq_enter(); - inc_irq_stat(apic_perf_irqs); apic_write(APIC_LVTPC, LOCAL_PERF_VECTOR); __smp_perf_counter_interrupt(regs, 0); -- cgit v1.2.3 From 0475f9ea8e2cc030298908949e0d5da9f2fc2cfe Mon Sep 17 00:00:00 2001 From: Paul Mackerras Date: Wed, 11 Feb 2009 14:35:35 +1100 Subject: perf_counters: allow users to count user, kernel and/or hypervisor events Impact: new perf_counter feature This extends the perf_counter_hw_event struct with bits that specify that events in user, kernel and/or hypervisor mode should not be counted (i.e. should be excluded), and adds code to program the PMU mode selection bits accordingly on x86 and powerpc. For software counters, we don't currently have the infrastructure to distinguish which mode an event occurs in, so we currently fail the counter initialization if the setting of the hw_event.exclude_* bits would require us to distinguish. Context switches and CPU migrations are currently considered to occur in kernel mode. On x86, this changes the previous policy that only root can count kernel events. Now non-root users can count kernel events or exclude them. Non-root users still can't use NMI events, though. On x86 we don't appear to have any way to control whether hypervisor events are counted or not, so hw_event.exclude_hv is ignored. On powerpc, the selection of whether to count events in user, kernel and/or hypervisor mode is PMU-wide, not per-counter, so this adds a check that the hw_event.exclude_* settings are the same as other events on the PMU. Counters being added to a group have to have the same settings as the other hardware counters in the group. Counters and groups can only be enabled in hw_perf_group_sched_in or power_perf_enable if they have the same settings as any other counters already on the PMU. If we are not running on a hypervisor, the exclude_hv setting is ignored (by forcing it to 0) since we can't ever get any hypervisor events. Signed-off-by: Paul Mackerras --- arch/powerpc/kernel/perf_counter.c | 68 ++++++++++++++++++++++++++++++++++++-- arch/x86/kernel/cpu/perf_counter.c | 31 ++++++++++------- include/linux/perf_counter.h | 19 ++++++----- kernel/perf_counter.c | 26 ++++++++++++--- 4 files changed, 117 insertions(+), 27 deletions(-) diff --git a/arch/powerpc/kernel/perf_counter.c b/arch/powerpc/kernel/perf_counter.c index 5b0211348c7..bd6ba85beb5 100644 --- a/arch/powerpc/kernel/perf_counter.c +++ b/arch/powerpc/kernel/perf_counter.c @@ -16,6 +16,7 @@ #include #include #include +#include struct cpu_hw_counters { int n_counters; @@ -214,6 +215,36 @@ static int power_check_constraints(unsigned int event[], int n_ev) return 0; } +/* + * Check if newly-added counters have consistent settings for + * exclude_{user,kernel,hv} with each other and any previously + * added counters. + */ +static int check_excludes(struct perf_counter **ctrs, int n_prev, int n_new) +{ + int eu, ek, eh; + int i, n; + struct perf_counter *counter; + + n = n_prev + n_new; + if (n <= 1) + return 0; + + eu = ctrs[0]->hw_event.exclude_user; + ek = ctrs[0]->hw_event.exclude_kernel; + eh = ctrs[0]->hw_event.exclude_hv; + if (n_prev == 0) + n_prev = 1; + for (i = n_prev; i < n; ++i) { + counter = ctrs[i]; + if (counter->hw_event.exclude_user != eu || + counter->hw_event.exclude_kernel != ek || + counter->hw_event.exclude_hv != eh) + return -EAGAIN; + } + return 0; +} + static void power_perf_read(struct perf_counter *counter) { long val, delta, prev; @@ -323,6 +354,20 @@ void hw_perf_restore(u64 disable) goto out; } + /* + * Add in MMCR0 freeze bits corresponding to the + * hw_event.exclude_* bits for the first counter. + * We have already checked that all counters have the + * same values for these bits as the first counter. + */ + counter = cpuhw->counter[0]; + if (counter->hw_event.exclude_user) + cpuhw->mmcr[0] |= MMCR0_FCP; + if (counter->hw_event.exclude_kernel) + cpuhw->mmcr[0] |= MMCR0_FCS; + if (counter->hw_event.exclude_hv) + cpuhw->mmcr[0] |= MMCR0_FCHV; + /* * Write the new configuration to MMCR* with the freeze * bit set and set the hardware counters to their initial values. @@ -424,6 +469,8 @@ int hw_perf_group_sched_in(struct perf_counter *group_leader, &cpuhw->counter[n0], &cpuhw->events[n0]); if (n < 0) return -EAGAIN; + if (check_excludes(cpuhw->counter, n0, n)) + return -EAGAIN; if (power_check_constraints(cpuhw->events, n + n0)) return -EAGAIN; cpuhw->n_counters = n0 + n; @@ -476,6 +523,8 @@ static int power_perf_enable(struct perf_counter *counter) goto out; cpuhw->counter[n0] = counter; cpuhw->events[n0] = counter->hw.config; + if (check_excludes(cpuhw->counter, n0, 1)) + goto out; if (power_check_constraints(cpuhw->events, n0 + 1)) goto out; @@ -554,6 +603,17 @@ hw_perf_counter_init(struct perf_counter *counter) counter->hw.config_base = ev; counter->hw.idx = 0; + /* + * If we are not running on a hypervisor, force the + * exclude_hv bit to 0 so that we don't care what + * the user set it to. This also means that we don't + * set the MMCR0_FCHV bit, which unconditionally freezes + * the counters on the PPC970 variants used in Apple G5 + * machines (since MSR.HV is always 1 on those machines). + */ + if (!firmware_has_feature(FW_FEATURE_LPAR)) + counter->hw_event.exclude_hv = 0; + /* * If this is in a group, check if it can go on with all the * other hardware counters in the group. We assume the counter @@ -566,11 +626,13 @@ hw_perf_counter_init(struct perf_counter *counter) if (n < 0) return NULL; } - events[n++] = ev; - if (power_check_constraints(events, n)) + events[n] = ev; + if (check_excludes(ctrs, n, 1)) + return NULL; + if (power_check_constraints(events, n + 1)) return NULL; - counter->hw.config = events[n - 1]; + counter->hw.config = events[n]; atomic64_set(&counter->hw.period_left, counter->hw_event.irq_period); return &power_perf_ops; } diff --git a/arch/x86/kernel/cpu/perf_counter.c b/arch/x86/kernel/cpu/perf_counter.c index 9901e46998d..383d4c6423a 100644 --- a/arch/x86/kernel/cpu/perf_counter.c +++ b/arch/x86/kernel/cpu/perf_counter.c @@ -107,21 +107,25 @@ static int __hw_perf_counter_init(struct perf_counter *counter) return -EINVAL; /* - * Count user events, and generate PMC IRQs: + * Generate PMC IRQs: * (keep 'enabled' bit clear for now) */ - hwc->config = ARCH_PERFMON_EVENTSEL_USR | ARCH_PERFMON_EVENTSEL_INT; + hwc->config = ARCH_PERFMON_EVENTSEL_INT; /* - * If privileged enough, count OS events too, and allow - * NMI events as well: + * Count user and OS events unless requested not to. */ - hwc->nmi = 0; - if (capable(CAP_SYS_ADMIN)) { + if (!hw_event->exclude_user) + hwc->config |= ARCH_PERFMON_EVENTSEL_USR; + if (!hw_event->exclude_kernel) hwc->config |= ARCH_PERFMON_EVENTSEL_OS; - if (hw_event->nmi) - hwc->nmi = 1; - } + + /* + * If privileged enough, allow NMI events: + */ + hwc->nmi = 0; + if (capable(CAP_SYS_ADMIN) && hw_event->nmi) + hwc->nmi = 1; hwc->irq_period = hw_event->irq_period; /* @@ -248,10 +252,13 @@ __pmc_fixed_enable(struct perf_counter *counter, int err; /* - * Enable IRQ generation (0x8) and ring-3 counting (0x2), - * and enable ring-0 counting if allowed: + * Enable IRQ generation (0x8), + * and enable ring-3 counting (0x2) and ring-0 counting (0x1) + * if requested: */ - bits = 0x8ULL | 0x2ULL; + bits = 0x8ULL; + if (hwc->config & ARCH_PERFMON_EVENTSEL_USR) + bits |= 0x2; if (hwc->config & ARCH_PERFMON_EVENTSEL_OS) bits |= 0x1; bits <<= (idx * 4); diff --git a/include/linux/perf_counter.h b/include/linux/perf_counter.h index f55381fbcac..c83f51d6e35 100644 --- a/include/linux/perf_counter.h +++ b/include/linux/perf_counter.h @@ -83,14 +83,17 @@ struct perf_counter_hw_event { u64 irq_period; u32 record_type; - u32 disabled : 1, /* off by default */ - nmi : 1, /* NMI sampling */ - raw : 1, /* raw event type */ - inherit : 1, /* children inherit it */ - pinned : 1, /* must always be on PMU */ - exclusive : 1, /* only counter on PMU */ - - __reserved_1 : 26; + u32 disabled : 1, /* off by default */ + nmi : 1, /* NMI sampling */ + raw : 1, /* raw event type */ + inherit : 1, /* children inherit it */ + pinned : 1, /* must always be on PMU */ + exclusive : 1, /* only group on PMU */ + exclude_user : 1, /* don't count user */ + exclude_kernel : 1, /* ditto kernel */ + exclude_hv : 1, /* ditto hypervisor */ + + __reserved_1 : 23; u64 __reserved_2; }; diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c index 544193cbc47..89d5e3fe970 100644 --- a/kernel/perf_counter.c +++ b/kernel/perf_counter.c @@ -1567,11 +1567,25 @@ sw_perf_counter_init(struct perf_counter *counter) { const struct hw_perf_counter_ops *hw_ops = NULL; + /* + * Software counters (currently) can't in general distinguish + * between user, kernel and hypervisor events. + * However, context switches and cpu migrations are considered + * to be kernel events, and page faults are never hypervisor + * events. + */ switch (counter->hw_event.type) { case PERF_COUNT_CPU_CLOCK: - hw_ops = &perf_ops_cpu_clock; + if (!(counter->hw_event.exclude_user || + counter->hw_event.exclude_kernel || + counter->hw_event.exclude_hv)) + hw_ops = &perf_ops_cpu_clock; break; case PERF_COUNT_TASK_CLOCK: + if (counter->hw_event.exclude_user || + counter->hw_event.exclude_kernel || + counter->hw_event.exclude_hv) + break; /* * If the user instantiates this as a per-cpu counter, * use the cpu_clock counter instead. @@ -1582,13 +1596,17 @@ sw_perf_counter_init(struct perf_counter *counter) hw_ops = &perf_ops_cpu_clock; break; case PERF_COUNT_PAGE_FAULTS: - hw_ops = &perf_ops_page_faults; + if (!(counter->hw_event.exclude_user || + counter->hw_event.exclude_kernel)) + hw_ops = &perf_ops_page_faults; break; case PERF_COUNT_CONTEXT_SWITCHES: - hw_ops = &perf_ops_context_switches; + if (!counter->hw_event.exclude_kernel) + hw_ops = &perf_ops_context_switches; break; case PERF_COUNT_CPU_MIGRATIONS: - hw_ops = &perf_ops_cpu_migrations; + if (!counter->hw_event.exclude_kernel) + hw_ops = &perf_ops_cpu_migrations; break; default: break; -- cgit v1.2.3 From 5af759176cc767e7426f89764bde4996ebaaf419 Mon Sep 17 00:00:00 2001 From: Mike Galbraith Date: Wed, 11 Feb 2009 10:53:37 +0100 Subject: perfcounters: fix use after free in perf_release() running... while true; do foo -d 1 -f 1 -c 100000 & sleep 1 kerneltop -d 1 -f 1 -e 1 -c 25000 -p `pidof foo` done while true; do killall foo; killall kerneltop; sleep 2 done ...in two shells with SLUB_DEBUG enabled produces flood of: BUG task_struct: Poison overwritten. Fix the use-after-free bug in perf_release(). Signed-off-by: Mike Galbraith Signed-off-by: Ingo Molnar --- kernel/perf_counter.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c index 89d5e3fe970..e0576c3fdb5 100644 --- a/kernel/perf_counter.c +++ b/kernel/perf_counter.c @@ -1145,12 +1145,12 @@ static int perf_release(struct inode *inode, struct file *file) mutex_lock(&counter->mutex); perf_counter_remove_from_context(counter); - put_context(ctx); mutex_unlock(&counter->mutex); mutex_unlock(&ctx->mutex); kfree(counter); + put_context(ctx); return 0; } -- cgit v1.2.3 From 4bcf349a0f90d1e69eb35c6df0fa285c886c1cd6 Mon Sep 17 00:00:00 2001 From: Paul Mackerras Date: Wed, 11 Feb 2009 13:53:19 +0100 Subject: perfcounters: fix refcounting bug, take 2 Only free child_counter if it has a parent; if it doesn't, then it has a file pointing to it and we'll free it in perf_release. Signed-off-by: Mike Galbraith Signed-off-by: Ingo Molnar --- kernel/perf_counter.c | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c index e0576c3fdb5..fcefb0a726f 100644 --- a/kernel/perf_counter.c +++ b/kernel/perf_counter.c @@ -1958,14 +1958,13 @@ __perf_counter_exit_task(struct task_struct *child, sync_child_counter(child_counter, parent_counter); list_for_each_entry_safe(sub, tmp, &child_counter->sibling_list, list_entry) { - if (sub->parent) + if (sub->parent) { sync_child_counter(sub, sub->parent); - kfree(sub); + kfree(sub); + } } - } - - if (!child_counter->filp || !atomic_long_read(&child_counter->filp->f_count)) kfree(child_counter); + } } /* -- cgit v1.2.3 From c07c99b67233ccaad38a961c17405dc1e1542aa4 Mon Sep 17 00:00:00 2001 From: Paul Mackerras Date: Fri, 13 Feb 2009 22:10:34 +1100 Subject: perfcounters: make context switch and migration software counters work again Jaswinder Singh Rajput reported that commit 23a185ca8abbeef caused the context switch and migration software counters to report zero always. With that commit, the software counters only count events that occur between sched-in and sched-out for a task. This is necessary for the counter enable/disable prctls and ioctls to work. However, the context switch and migration counts are incremented after sched-out for one task and before sched-in for the next. Since the increment doesn't occur while a task is scheduled in (as far as the software counters are concerned) it doesn't count towards any counter. Thus the context switch and migration counters need to count events that occur at any time, provided the counter is enabled, not just those that occur while the task is scheduled in (from the perf_counter subsystem's point of view). The problem though is that the software counter code can't tell the difference between being enabled and being scheduled in, and between being disabled and being scheduled out, since we use the one pair of enable/disable entry points for both. That is, the high-level disable operation simply arranges for the counter to not be scheduled in any more, and the high-level enable operation arranges for it to be scheduled in again. One way to solve this would be to have sched_in/out operations in the hw_perf_counter_ops struct as well as enable/disable. However, this takes a simpler approach: it adds a 'prev_state' field to the perf_counter struct that allows a counter's enable method to know whether the counter was previously disabled or just inactive (scheduled out), and therefore whether the enable method is being called as a result of a high-level enable or a schedule-in operation. This then allows the context switch, migration and page fault counters to reset their hw.prev_count value in their enable functions only if they are called as a result of a high-level enable operation. Although page faults would normally only occur while the counter is scheduled in, this changes the page fault counter code too in case there are ever circumstances where page faults get counted against a task while its counters are not scheduled in. Reported-by: Jaswinder Singh Rajput Signed-off-by: Paul Mackerras Signed-off-by: Ingo Molnar --- include/linux/perf_counter.h | 1 + kernel/perf_counter.c | 21 +++++++++++++++------ 2 files changed, 16 insertions(+), 6 deletions(-) diff --git a/include/linux/perf_counter.h b/include/linux/perf_counter.h index c83f51d6e35..32cd1acb738 100644 --- a/include/linux/perf_counter.h +++ b/include/linux/perf_counter.h @@ -173,6 +173,7 @@ struct perf_counter { const struct hw_perf_counter_ops *hw_ops; enum perf_counter_active_state state; + enum perf_counter_active_state prev_state; atomic64_t count; struct perf_counter_hw_event hw_event; diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c index fcefb0a726f..ad62965828d 100644 --- a/kernel/perf_counter.c +++ b/kernel/perf_counter.c @@ -444,6 +444,7 @@ static void __perf_install_in_context(void *info) list_add_counter(counter, ctx); ctx->nr_counters++; + counter->prev_state = PERF_COUNTER_STATE_OFF; /* * Don't put the counter on if it is disabled or if @@ -562,6 +563,7 @@ static void __perf_counter_enable(void *info) curr_rq_lock_irq_save(&flags); spin_lock(&ctx->lock); + counter->prev_state = counter->state; if (counter->state >= PERF_COUNTER_STATE_INACTIVE) goto unlock; counter->state = PERF_COUNTER_STATE_INACTIVE; @@ -733,6 +735,7 @@ group_sched_in(struct perf_counter *group_counter, if (ret) return ret < 0 ? ret : 0; + group_counter->prev_state = group_counter->state; if (counter_sched_in(group_counter, cpuctx, ctx, cpu)) return -EAGAIN; @@ -740,6 +743,7 @@ group_sched_in(struct perf_counter *group_counter, * Schedule in siblings as one group (if any): */ list_for_each_entry(counter, &group_counter->sibling_list, list_entry) { + counter->prev_state = counter->state; if (counter_sched_in(counter, cpuctx, ctx, cpu)) { partial_group = counter; goto group_error; @@ -1398,9 +1402,9 @@ static void task_clock_perf_counter_read(struct perf_counter *counter) static int task_clock_perf_counter_enable(struct perf_counter *counter) { - u64 now = task_clock_perf_counter_val(counter, 0); - - atomic64_set(&counter->hw.prev_count, now); + if (counter->prev_state <= PERF_COUNTER_STATE_OFF) + atomic64_set(&counter->hw.prev_count, + task_clock_perf_counter_val(counter, 0)); return 0; } @@ -1455,7 +1459,8 @@ static void page_faults_perf_counter_read(struct perf_counter *counter) static int page_faults_perf_counter_enable(struct perf_counter *counter) { - atomic64_set(&counter->hw.prev_count, get_page_faults(counter)); + if (counter->prev_state <= PERF_COUNTER_STATE_OFF) + atomic64_set(&counter->hw.prev_count, get_page_faults(counter)); return 0; } @@ -1501,7 +1506,9 @@ static void context_switches_perf_counter_read(struct perf_counter *counter) static int context_switches_perf_counter_enable(struct perf_counter *counter) { - atomic64_set(&counter->hw.prev_count, get_context_switches(counter)); + if (counter->prev_state <= PERF_COUNTER_STATE_OFF) + atomic64_set(&counter->hw.prev_count, + get_context_switches(counter)); return 0; } @@ -1547,7 +1554,9 @@ static void cpu_migrations_perf_counter_read(struct perf_counter *counter) static int cpu_migrations_perf_counter_enable(struct perf_counter *counter) { - atomic64_set(&counter->hw.prev_count, get_cpu_migrations(counter)); + if (counter->prev_state <= PERF_COUNTER_STATE_OFF) + atomic64_set(&counter->hw.prev_count, + get_cpu_migrations(counter)); return 0; } -- cgit v1.2.3 From 73ca2f8380311115723c7afe811f3ed1f0ba945e Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Mon, 16 Feb 2009 01:08:17 +0100 Subject: perfcounters: remove duplicate definition of LOCAL_PERF_VECTOR Signed-off-by: Ingo Molnar --- arch/x86/include/asm/irq_vectors.h | 5 ----- 1 file changed, 5 deletions(-) diff --git a/arch/x86/include/asm/irq_vectors.h b/arch/x86/include/asm/irq_vectors.h index b66b518ff00..b07278c55e9 100644 --- a/arch/x86/include/asm/irq_vectors.h +++ b/arch/x86/include/asm/irq_vectors.h @@ -111,11 +111,6 @@ */ #define LOCAL_PERF_VECTOR 0xee -/* - * Performance monitoring interrupt vector: - */ -#define LOCAL_PERF_VECTOR 0xee - /* * First APIC vector available to drivers: (vectors 0x30-0xee) we * start at 0x31(0x41) to spread out vectors evenly between priority -- cgit v1.2.3 From 37a25424252b6cff4dd4b1937ab6a1dbfcadabcc Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Mon, 16 Feb 2009 15:32:23 +0100 Subject: perfcounters: fix acpi_idle_do_entry() workaround Fix merge error in drivers/acpi/processor_idle.c. This resulted in non-working perfcounters on certain Nehalem systems. Signed-off-by: Ingo Molnar --- drivers/acpi/processor_idle.c | 4 ---- 1 file changed, 4 deletions(-) diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c index 259f6e80631..08def2f20cd 100644 --- a/drivers/acpi/processor_idle.c +++ b/drivers/acpi/processor_idle.c @@ -826,12 +826,9 @@ static inline void acpi_idle_do_entry(struct acpi_processor_cx *cx) { u64 perf_flags; - u64 pctrl; - /* Don't trace irqs off for idle */ stop_critical_timings(); perf_flags = hw_perf_save_disable(); - pctrl = hw_perf_save_disable(); if (cx->entry_method == ACPI_CSTATE_FFH) { /* Call into architectural FFH based C-state */ acpi_processor_ffh_cstate_enter(cx); @@ -847,7 +844,6 @@ static inline void acpi_idle_do_entry(struct acpi_processor_cx *cx) unused = inl(acpi_gbl_FADT.xpm_timer_block.address); } hw_perf_restore(perf_flags); - hw_perf_restore(pctrl); start_critical_timings(); } -- cgit v1.2.3 From d095cd46dac104e4d2a4967c7c19b55a12f78240 Mon Sep 17 00:00:00 2001 From: Paul Mackerras Date: Mon, 23 Feb 2009 23:01:28 +1100 Subject: perfcounters/powerpc: Make exclude_kernel bit work on Apple G5 processors Currently, setting hw_event.exclude_kernel does nothing on the PPC970 variants used in Apple G5 machines, because they have the HV (hypervisor) bit in the MSR forced to 1, so as far as the PMU is concerned, the kernel runs in hypervisor mode. Thus we have to use the MMCR0_FCHV (freeze counters in hypervisor mode) bit rather than the MMCR0_FCS (freeze counters in supervisor mode) bit. This checks the MSR.HV bit at startup, and if it is set, we set the freeze_counters_kernel variable to MMCR0_FCHV (it was initialized to MMCR0_FCS). We then use that whenever we need to exclude kernel events. Signed-off-by: Paul Mackerras --- arch/powerpc/kernel/perf_counter.c | 23 ++++++++++++++++++----- 1 file changed, 18 insertions(+), 5 deletions(-) diff --git a/arch/powerpc/kernel/perf_counter.c b/arch/powerpc/kernel/perf_counter.c index bd6ba85beb5..6e27913ec0d 100644 --- a/arch/powerpc/kernel/perf_counter.c +++ b/arch/powerpc/kernel/perf_counter.c @@ -32,6 +32,15 @@ DEFINE_PER_CPU(struct cpu_hw_counters, cpu_hw_counters); struct power_pmu *ppmu; +/* + * Normally, to ignore kernel events we set the FCS (freeze counters + * in supervisor mode) bit in MMCR0, but if the kernel runs with the + * hypervisor bit set in the MSR, or if we are running on a processor + * where the hypervisor bit is forced to 1 (as on Apple G5 processors), + * then we need to use the FCHV bit to ignore kernel events. + */ +static unsigned int freeze_counters_kernel = MMCR0_FCS; + void perf_counter_print_debug(void) { } @@ -364,7 +373,7 @@ void hw_perf_restore(u64 disable) if (counter->hw_event.exclude_user) cpuhw->mmcr[0] |= MMCR0_FCP; if (counter->hw_event.exclude_kernel) - cpuhw->mmcr[0] |= MMCR0_FCS; + cpuhw->mmcr[0] |= freeze_counters_kernel; if (counter->hw_event.exclude_hv) cpuhw->mmcr[0] |= MMCR0_FCHV; @@ -606,10 +615,7 @@ hw_perf_counter_init(struct perf_counter *counter) /* * If we are not running on a hypervisor, force the * exclude_hv bit to 0 so that we don't care what - * the user set it to. This also means that we don't - * set the MMCR0_FCHV bit, which unconditionally freezes - * the counters on the PPC970 variants used in Apple G5 - * machines (since MSR.HV is always 1 on those machines). + * the user set it to. */ if (!firmware_has_feature(FW_FEATURE_LPAR)) counter->hw_event.exclude_hv = 0; @@ -841,6 +847,13 @@ static int init_perf_counters(void) ppmu = &power6_pmu; break; } + + /* + * Use FCHV to ignore kernel events if MSR.HV is set. + */ + if (mfmsr() & MSR_HV) + freeze_counters_kernel = MMCR0_FCHV; + return 0; } -- cgit v1.2.3 From 742bd95ba96e19b3f7196c3a0834ebc17c8ba006 Mon Sep 17 00:00:00 2001 From: Paul Mackerras Date: Tue, 24 Feb 2009 11:33:56 +1100 Subject: perfcounters/powerpc: Add support for POWER5 processors This adds the back-end for the PMU on the POWER5 processor. This knows how to use the fixed-function PMC5 and PMC6 (instructions completed and run cycles). Unlike POWER6, PMC5/6 obey the freeze conditions and can generate interrupts, so their use doesn't impose any extra restrictions. POWER5+ is different and is not supported by this patch. Signed-off-by: Paul Mackerras --- arch/powerpc/kernel/Makefile | 3 +- arch/powerpc/kernel/perf_counter.c | 4 + arch/powerpc/kernel/power5-pmu.c | 475 +++++++++++++++++++++++++++++++++++++ 3 files changed, 481 insertions(+), 1 deletion(-) create mode 100644 arch/powerpc/kernel/power5-pmu.c diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile index 7c941ec3b23..b4c6f466164 100644 --- a/arch/powerpc/kernel/Makefile +++ b/arch/powerpc/kernel/Makefile @@ -94,7 +94,8 @@ obj-$(CONFIG_AUDIT) += audit.o obj64-$(CONFIG_AUDIT) += compat_audit.o obj-$(CONFIG_DYNAMIC_FTRACE) += ftrace.o -obj-$(CONFIG_PERF_COUNTERS) += perf_counter.o ppc970-pmu.o power6-pmu.o +obj-$(CONFIG_PERF_COUNTERS) += perf_counter.o ppc970-pmu.o power5-pmu.o \ + power6-pmu.o obj-$(CONFIG_8XX_MINIMAL_FPEMU) += softemu8xx.o diff --git a/arch/powerpc/kernel/perf_counter.c b/arch/powerpc/kernel/perf_counter.c index 6e27913ec0d..112332d07fc 100644 --- a/arch/powerpc/kernel/perf_counter.c +++ b/arch/powerpc/kernel/perf_counter.c @@ -824,6 +824,7 @@ void hw_perf_counter_setup(int cpu) } extern struct power_pmu ppc970_pmu; +extern struct power_pmu power5_pmu; extern struct power_pmu power6_pmu; static int init_perf_counters(void) @@ -843,6 +844,9 @@ static int init_perf_counters(void) case PV_970MP: ppmu = &ppc970_pmu; break; + case PV_POWER5: + ppmu = &power5_pmu; + break; case 0x3e: ppmu = &power6_pmu; break; diff --git a/arch/powerpc/kernel/power5-pmu.c b/arch/powerpc/kernel/power5-pmu.c new file mode 100644 index 00000000000..379ed1087cc --- /dev/null +++ b/arch/powerpc/kernel/power5-pmu.c @@ -0,0 +1,475 @@ +/* + * Performance counter support for POWER5 (not POWER5++) processors. + * + * Copyright 2009 Paul Mackerras, IBM Corporation. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ +#include +#include +#include + +/* + * Bits in event code for POWER5 (not POWER5++) + */ +#define PM_PMC_SH 20 /* PMC number (1-based) for direct events */ +#define PM_PMC_MSK 0xf +#define PM_PMC_MSKS (PM_PMC_MSK << PM_PMC_SH) +#define PM_UNIT_SH 16 /* TTMMUX number and setting - unit select */ +#define PM_UNIT_MSK 0xf +#define PM_BYTE_SH 12 /* Byte number of event bus to use */ +#define PM_BYTE_MSK 7 +#define PM_GRS_SH 8 /* Storage subsystem mux select */ +#define PM_GRS_MSK 7 +#define PM_BUSEVENT_MSK 0x80 /* Set if event uses event bus */ +#define PM_PMCSEL_MSK 0x7f + +/* Values in PM_UNIT field */ +#define PM_FPU 0 +#define PM_ISU0 1 +#define PM_IFU 2 +#define PM_ISU1 3 +#define PM_IDU 4 +#define PM_ISU0_ALT 6 +#define PM_GRS 7 +#define PM_LSU0 8 +#define PM_LSU1 0xc +#define PM_LASTUNIT 0xc + +/* + * Bits in MMCR1 for POWER5 + */ +#define MMCR1_TTM0SEL_SH 62 +#define MMCR1_TTM1SEL_SH 60 +#define MMCR1_TTM2SEL_SH 58 +#define MMCR1_TTM3SEL_SH 56 +#define MMCR1_TTMSEL_MSK 3 +#define MMCR1_TD_CP_DBG0SEL_SH 54 +#define MMCR1_TD_CP_DBG1SEL_SH 52 +#define MMCR1_TD_CP_DBG2SEL_SH 50 +#define MMCR1_TD_CP_DBG3SEL_SH 48 +#define MMCR1_GRS_L2SEL_SH 46 +#define MMCR1_GRS_L2SEL_MSK 3 +#define MMCR1_GRS_L3SEL_SH 44 +#define MMCR1_GRS_L3SEL_MSK 3 +#define MMCR1_GRS_MCSEL_SH 41 +#define MMCR1_GRS_MCSEL_MSK 7 +#define MMCR1_GRS_FABSEL_SH 39 +#define MMCR1_GRS_FABSEL_MSK 3 +#define MMCR1_PMC1_ADDER_SEL_SH 35 +#define MMCR1_PMC2_ADDER_SEL_SH 34 +#define MMCR1_PMC3_ADDER_SEL_SH 33 +#define MMCR1_PMC4_ADDER_SEL_SH 32 +#define MMCR1_PMC1SEL_SH 25 +#define MMCR1_PMC2SEL_SH 17 +#define MMCR1_PMC3SEL_SH 9 +#define MMCR1_PMC4SEL_SH 1 +#define MMCR1_PMCSEL_SH(n) (MMCR1_PMC1SEL_SH - (n) * 8) +#define MMCR1_PMCSEL_MSK 0x7f + +/* + * Bits in MMCRA + */ + +/* + * Layout of constraint bits: + * 6666555555555544444444443333333333222222222211111111110000000000 + * 3210987654321098765432109876543210987654321098765432109876543210 + * <><>[ ><><>< ><> [ >[ >[ >< >< >< >< ><><><><><><> + * T0T1 NC G0G1G2 G3 UC PS1PS2 B0 B1 B2 B3 P6P5P4P3P2P1 + * + * T0 - TTM0 constraint + * 54-55: TTM0SEL value (0=FPU, 2=IFU, 3=ISU1) 0xc0_0000_0000_0000 + * + * T1 - TTM1 constraint + * 52-53: TTM1SEL value (0=IDU, 3=GRS) 0x30_0000_0000_0000 + * + * NC - number of counters + * 51: NC error 0x0008_0000_0000_0000 + * 48-50: number of events needing PMC1-4 0x0007_0000_0000_0000 + * + * G0..G3 - GRS mux constraints + * 46-47: GRS_L2SEL value + * 44-45: GRS_L3SEL value + * 41-44: GRS_MCSEL value + * 39-40: GRS_FABSEL value + * Note that these match up with their bit positions in MMCR1 + * + * UC - unit constraint: can't have all three of FPU|IFU|ISU1, ISU0, IDU|GRS + * 37: UC3 error 0x20_0000_0000 + * 36: FPU|IFU|ISU1 events needed 0x10_0000_0000 + * 35: ISU0 events needed 0x08_0000_0000 + * 34: IDU|GRS events needed 0x04_0000_0000 + * + * PS1 + * 33: PS1 error 0x2_0000_0000 + * 31-32: count of events needing PMC1/2 0x1_8000_0000 + * + * PS2 + * 30: PS2 error 0x4000_0000 + * 28-29: count of events needing PMC3/4 0x3000_0000 + * + * B0 + * 24-27: Byte 0 event source 0x0f00_0000 + * Encoding as for the event code + * + * B1, B2, B3 + * 20-23, 16-19, 12-15: Byte 1, 2, 3 event sources + * + * P1..P6 + * 0-11: Count of events needing PMC1..PMC6 + */ + +static const int grsel_shift[8] = { + MMCR1_GRS_L2SEL_SH, MMCR1_GRS_L2SEL_SH, MMCR1_GRS_L2SEL_SH, + MMCR1_GRS_L3SEL_SH, MMCR1_GRS_L3SEL_SH, MMCR1_GRS_L3SEL_SH, + MMCR1_GRS_MCSEL_SH, MMCR1_GRS_FABSEL_SH +}; + +/* Masks and values for using events from the various units */ +static u64 unit_cons[PM_LASTUNIT+1][2] = { + [PM_FPU] = { 0xc0002000000000ull, 0x00001000000000ull }, + [PM_ISU0] = { 0x00002000000000ull, 0x00000800000000ull }, + [PM_ISU1] = { 0xc0002000000000ull, 0xc0001000000000ull }, + [PM_IFU] = { 0xc0002000000000ull, 0x80001000000000ull }, + [PM_IDU] = { 0x30002000000000ull, 0x00000400000000ull }, + [PM_GRS] = { 0x30002000000000ull, 0x30000400000000ull }, +}; + +static int power5_get_constraint(unsigned int event, u64 *maskp, u64 *valp) +{ + int pmc, byte, unit, sh; + int bit, fmask; + u64 mask = 0, value = 0; + int grp = -1; + + pmc = (event >> PM_PMC_SH) & PM_PMC_MSK; + if (pmc) { + if (pmc > 6) + return -1; + sh = (pmc - 1) * 2; + mask |= 2 << sh; + value |= 1 << sh; + if (pmc <= 4) + grp = (pmc - 1) >> 1; + else if (event != 0x500009 && event != 0x600005) + return -1; + } + if (event & PM_BUSEVENT_MSK) { + unit = (event >> PM_UNIT_SH) & PM_UNIT_MSK; + if (unit > PM_LASTUNIT) + return -1; + if (unit == PM_ISU0_ALT) + unit = PM_ISU0; + mask |= unit_cons[unit][0]; + value |= unit_cons[unit][1]; + byte = (event >> PM_BYTE_SH) & PM_BYTE_MSK; + if (byte >= 4) { + if (unit != PM_LSU1) + return -1; + /* Map LSU1 low word (bytes 4-7) to unit LSU1+1 */ + ++unit; + byte &= 3; + } + if (unit == PM_GRS) { + bit = event & 7; + fmask = (bit == 6)? 7: 3; + sh = grsel_shift[bit]; + mask |= (u64)fmask << sh; + value |= (u64)((event >> PM_GRS_SH) & fmask) << sh; + } + /* + * Bus events on bytes 0 and 2 can be counted + * on PMC1/2; bytes 1 and 3 on PMC3/4. + */ + if (!pmc) + grp = byte & 1; + /* Set byte lane select field */ + mask |= 0xfULL << (24 - 4 * byte); + value |= (u64)unit << (24 - 4 * byte); + } + if (grp == 0) { + /* increment PMC1/2 field */ + mask |= 0x200000000ull; + value |= 0x080000000ull; + } else if (grp == 1) { + /* increment PMC3/4 field */ + mask |= 0x40000000ull; + value |= 0x10000000ull; + } + if (pmc < 5) { + /* need a counter from PMC1-4 set */ + mask |= 0x8000000000000ull; + value |= 0x1000000000000ull; + } + *maskp = mask; + *valp = value; + return 0; +} + +#define MAX_ALT 3 /* at most 3 alternatives for any event */ + +static const unsigned int event_alternatives[][MAX_ALT] = { + { 0x120e4, 0x400002 }, /* PM_GRP_DISP_REJECT */ + { 0x410c7, 0x441084 }, /* PM_THRD_L2MISS_BOTH_CYC */ + { 0x100005, 0x600005 }, /* PM_RUN_CYC */ + { 0x100009, 0x200009, 0x500009 }, /* PM_INST_CMPL */ + { 0x300009, 0x400009 }, /* PM_INST_DISP */ +}; + +/* + * Scan the alternatives table for a match and return the + * index into the alternatives table if found, else -1. + */ +static int find_alternative(unsigned int event) +{ + int i, j; + + for (i = 0; i < ARRAY_SIZE(event_alternatives); ++i) { + if (event < event_alternatives[i][0]) + break; + for (j = 0; j < MAX_ALT && event_alternatives[i][j]; ++j) + if (event == event_alternatives[i][j]) + return i; + } + return -1; +} + +static const unsigned char bytedecode_alternatives[4][4] = { + /* PMC 1 */ { 0x21, 0x23, 0x25, 0x27 }, + /* PMC 2 */ { 0x07, 0x17, 0x0e, 0x1e }, + /* PMC 3 */ { 0x20, 0x22, 0x24, 0x26 }, + /* PMC 4 */ { 0x07, 0x17, 0x0e, 0x1e } +}; + +/* + * Some direct events for decodes of event bus byte 3 have alternative + * PMCSEL values on other counters. This returns the alternative + * event code for those that do, or -1 otherwise. + */ +static int find_alternative_bdecode(unsigned int event) +{ + int pmc, altpmc, pp, j; + + pmc = (event >> PM_PMC_SH) & PM_PMC_MSK; + if (pmc == 0 || pmc > 4) + return -1; + altpmc = 5 - pmc; /* 1 <-> 4, 2 <-> 3 */ + pp = event & PM_PMCSEL_MSK; + for (j = 0; j < 4; ++j) { + if (bytedecode_alternatives[pmc - 1][j] == pp) { + return (event & ~(PM_PMC_MSKS | PM_PMCSEL_MSK)) | + (altpmc << PM_PMC_SH) | + bytedecode_alternatives[altpmc - 1][j]; + } + } + return -1; +} + +static int power5_get_alternatives(unsigned int event, unsigned int alt[]) +{ + int i, j, ae, nalt = 1; + + alt[0] = event; + nalt = 1; + i = find_alternative(event); + if (i >= 0) { + for (j = 0; j < MAX_ALT; ++j) { + ae = event_alternatives[i][j]; + if (ae && ae != event) + alt[nalt++] = ae; + } + } else { + ae = find_alternative_bdecode(event); + if (ae > 0) + alt[nalt++] = ae; + } + return nalt; +} + +static int power5_compute_mmcr(unsigned int event[], int n_ev, + unsigned int hwc[], u64 mmcr[]) +{ + u64 mmcr1 = 0; + unsigned int pmc, unit, byte, psel; + unsigned int ttm, grp; + int i, isbus, bit, grsel; + unsigned int pmc_inuse = 0; + unsigned int pmc_grp_use[2]; + unsigned char busbyte[4]; + unsigned char unituse[16]; + int ttmuse; + + if (n_ev > 6) + return -1; + + /* First pass to count resource use */ + pmc_grp_use[0] = pmc_grp_use[1] = 0; + memset(busbyte, 0, sizeof(busbyte)); + memset(unituse, 0, sizeof(unituse)); + for (i = 0; i < n_ev; ++i) { + pmc = (event[i] >> PM_PMC_SH) & PM_PMC_MSK; + if (pmc) { + if (pmc > 6) + return -1; + if (pmc_inuse & (1 << (pmc - 1))) + return -1; + pmc_inuse |= 1 << (pmc - 1); + /* count 1/2 vs 3/4 use */ + if (pmc <= 4) + ++pmc_grp_use[(pmc - 1) >> 1]; + } + if (event[i] & PM_BUSEVENT_MSK) { + unit = (event[i] >> PM_UNIT_SH) & PM_UNIT_MSK; + byte = (event[i] >> PM_BYTE_SH) & PM_BYTE_MSK; + if (unit > PM_LASTUNIT) + return -1; + if (unit == PM_ISU0_ALT) + unit = PM_ISU0; + if (byte >= 4) { + if (unit != PM_LSU1) + return -1; + ++unit; + byte &= 3; + } + if (!pmc) + ++pmc_grp_use[byte & 1]; + if (busbyte[byte] && busbyte[byte] != unit) + return -1; + busbyte[byte] = unit; + unituse[unit] = 1; + } + } + if (pmc_grp_use[0] > 2 || pmc_grp_use[1] > 2) + return -1; + + /* + * Assign resources and set multiplexer selects. + * + * PM_ISU0 can go either on TTM0 or TTM1, but that's the only + * choice we have to deal with. + */ + if (unituse[PM_ISU0] & + (unituse[PM_FPU] | unituse[PM_IFU] | unituse[PM_ISU1])) { + unituse[PM_ISU0_ALT] = 1; /* move ISU to TTM1 */ + unituse[PM_ISU0] = 0; + } + /* Set TTM[01]SEL fields. */ + ttmuse = 0; + for (i = PM_FPU; i <= PM_ISU1; ++i) { + if (!unituse[i]) + continue; + if (ttmuse++) + return -1; + mmcr1 |= (u64)i << MMCR1_TTM0SEL_SH; + } + ttmuse = 0; + for (; i <= PM_GRS; ++i) { + if (!unituse[i]) + continue; + if (ttmuse++) + return -1; + mmcr1 |= (u64)(i & 3) << MMCR1_TTM1SEL_SH; + } + if (ttmuse > 1) + return -1; + + /* Set byte lane select fields, TTM[23]SEL and GRS_*SEL. */ + for (byte = 0; byte < 4; ++byte) { + unit = busbyte[byte]; + if (!unit) + continue; + if (unit == PM_ISU0 && unituse[PM_ISU0_ALT]) { + /* get ISU0 through TTM1 rather than TTM0 */ + unit = PM_ISU0_ALT; + } else if (unit == PM_LSU1 + 1) { + /* select lower word of LSU1 for this byte */ + mmcr1 |= 1ull << (MMCR1_TTM3SEL_SH + 3 - byte); + } + ttm = unit >> 2; + mmcr1 |= (u64)ttm << (MMCR1_TD_CP_DBG0SEL_SH - 2 * byte); + } + + /* Second pass: assign PMCs, set PMCxSEL and PMCx_ADDER_SEL fields */ + for (i = 0; i < n_ev; ++i) { + pmc = (event[i] >> PM_PMC_SH) & PM_PMC_MSK; + unit = (event[i] >> PM_UNIT_SH) & PM_UNIT_MSK; + byte = (event[i] >> PM_BYTE_SH) & PM_BYTE_MSK; + psel = event[i] & PM_PMCSEL_MSK; + isbus = event[i] & PM_BUSEVENT_MSK; + if (!pmc) { + /* Bus event or any-PMC direct event */ + for (pmc = 0; pmc < 4; ++pmc) { + if (pmc_inuse & (1 << pmc)) + continue; + grp = (pmc >> 1) & 1; + if (isbus) { + if (grp == (byte & 1)) + break; + } else if (pmc_grp_use[grp] < 2) { + ++pmc_grp_use[grp]; + break; + } + } + pmc_inuse |= 1 << pmc; + } else if (pmc <= 4) { + /* Direct event */ + --pmc; + if ((psel == 8 || psel == 0x10) && isbus && (byte & 2)) + /* add events on higher-numbered bus */ + mmcr1 |= 1ull << (MMCR1_PMC1_ADDER_SEL_SH - pmc); + } else { + /* Instructions or run cycles on PMC5/6 */ + --pmc; + } + if (isbus && unit == PM_GRS) { + bit = psel & 7; + grsel = (event[i] >> PM_GRS_SH) & PM_GRS_MSK; + mmcr1 |= (u64)grsel << grsel_shift[bit]; + } + if (pmc <= 3) + mmcr1 |= psel << MMCR1_PMCSEL_SH(pmc); + hwc[i] = pmc; + } + + /* Return MMCRx values */ + mmcr[0] = 0; + if (pmc_inuse & 1) + mmcr[0] = MMCR0_PMC1CE; + if (pmc_inuse & 0x3e) + mmcr[0] |= MMCR0_PMCjCE; + mmcr[1] = mmcr1; + mmcr[2] = 0; + return 0; +} + +static void power5_disable_pmc(unsigned int pmc, u64 mmcr[]) +{ + if (pmc <= 3) + mmcr[1] &= ~(0x7fUL << MMCR1_PMCSEL_SH(pmc)); +} + +static int power5_generic_events[] = { + [PERF_COUNT_CPU_CYCLES] = 0xf, + [PERF_COUNT_INSTRUCTIONS] = 0x100009, + [PERF_COUNT_CACHE_REFERENCES] = 0x4c1090, /* LD_REF_L1 */ + [PERF_COUNT_CACHE_MISSES] = 0x3c1088, /* LD_MISS_L1 */ + [PERF_COUNT_BRANCH_INSTRUCTIONS] = 0x230e4, /* BR_ISSUED */ + [PERF_COUNT_BRANCH_MISSES] = 0x230e5, /* BR_MPRED_CR */ +}; + +struct power_pmu power5_pmu = { + .n_counter = 6, + .max_alternatives = MAX_ALT, + .add_fields = 0x7000090000555ull, + .test_adder = 0x3000490000000ull, + .compute_mmcr = power5_compute_mmcr, + .get_constraint = power5_get_constraint, + .get_alternatives = power5_get_alternatives, + .disable_pmc = power5_disable_pmc, + .n_generic = ARRAY_SIZE(power5_generic_events), + .generic_events = power5_generic_events, +}; -- cgit v1.2.3 From f3dfd2656deb81a0addee4f4ceff66b50a387388 Mon Sep 17 00:00:00 2001 From: Paul Mackerras Date: Thu, 26 Feb 2009 22:43:46 +1100 Subject: perfcounters: fix a few minor cleanliness issues This fixes three issues noticed by Arnd Bergmann: - Add #ifdef __KERNEL__ and move some things around in perf_counter.h to make sure only the bits that userspace needs are exported to userspace. - Use __u64, __s64, __u32 types in the structs exported to userspace rather than u64, s64, u32. - Make the sys_perf_counter_open syscall available to the SPUs on Cell platforms. And one issue that I noticed in looking at the code again: - Wrap the perf_counter_open syscall with SYSCALL_DEFINE4 so we get the proper handling of int arguments on ppc64 (and some other 64-bit architectures). Reported-by: Arnd Bergmann Signed-off-by: Paul Mackerras --- arch/powerpc/include/asm/systbl.h | 2 +- include/linux/perf_counter.h | 43 +++++++++++++++++++++------------------ include/linux/syscalls.h | 9 +++----- kernel/perf_counter.c | 6 +++--- 4 files changed, 30 insertions(+), 30 deletions(-) diff --git a/arch/powerpc/include/asm/systbl.h b/arch/powerpc/include/asm/systbl.h index 4c8095f6bec..d312eec8abb 100644 --- a/arch/powerpc/include/asm/systbl.h +++ b/arch/powerpc/include/asm/systbl.h @@ -322,4 +322,4 @@ SYSCALL_SPU(epoll_create1) SYSCALL_SPU(dup3) SYSCALL_SPU(pipe2) SYSCALL(inotify_init1) -SYSCALL(perf_counter_open) +SYSCALL_SPU(perf_counter_open) diff --git a/include/linux/perf_counter.h b/include/linux/perf_counter.h index 32cd1acb738..186efaf4966 100644 --- a/include/linux/perf_counter.h +++ b/include/linux/perf_counter.h @@ -13,20 +13,8 @@ #ifndef _LINUX_PERF_COUNTER_H #define _LINUX_PERF_COUNTER_H -#include -#include - -#ifdef CONFIG_PERF_COUNTERS -# include -#endif - -#include -#include -#include -#include -#include - -struct task_struct; +#include +#include /* * User-space ABI bits: @@ -78,12 +66,12 @@ enum perf_counter_record_type { * Hardware event to monitor via a performance monitoring counter: */ struct perf_counter_hw_event { - s64 type; + __s64 type; - u64 irq_period; - u32 record_type; + __u64 irq_period; + __u32 record_type; - u32 disabled : 1, /* off by default */ + __u32 disabled : 1, /* off by default */ nmi : 1, /* NMI sampling */ raw : 1, /* raw event type */ inherit : 1, /* children inherit it */ @@ -95,7 +83,7 @@ struct perf_counter_hw_event { __reserved_1 : 23; - u64 __reserved_2; + __u64 __reserved_2; }; /* @@ -104,10 +92,24 @@ struct perf_counter_hw_event { #define PERF_COUNTER_IOC_ENABLE _IO('$', 0) #define PERF_COUNTER_IOC_DISABLE _IO('$', 1) +#ifdef __KERNEL__ /* - * Kernel-internal data types: + * Kernel-internal data types and definitions: */ +#ifdef CONFIG_PERF_COUNTERS +# include +#endif + +#include +#include +#include +#include +#include +#include + +struct task_struct; + /** * struct hw_perf_counter - performance counter hardware details: */ @@ -293,4 +295,5 @@ static inline int perf_counter_task_disable(void) { return -EINVAL; } static inline int perf_counter_task_enable(void) { return -EINVAL; } #endif +#endif /* __KERNEL__ */ #endif /* _LINUX_PERF_COUNTER_H */ diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h index 88255d3261a..28ef2be839c 100644 --- a/include/linux/syscalls.h +++ b/include/linux/syscalls.h @@ -696,10 +696,7 @@ asmlinkage long sys_pipe(int __user *); int kernel_execve(const char *filename, char *const argv[], char *const envp[]); -asmlinkage int sys_perf_counter_open( - - struct perf_counter_hw_event *hw_event_uptr __user, - pid_t pid, - int cpu, - int group_fd); +asmlinkage long sys_perf_counter_open( + const struct perf_counter_hw_event __user *hw_event_uptr, + pid_t pid, int cpu, int group_fd); #endif diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c index ad62965828d..16b14ba99d3 100644 --- a/kernel/perf_counter.c +++ b/kernel/perf_counter.c @@ -1690,9 +1690,9 @@ perf_counter_alloc(struct perf_counter_hw_event *hw_event, * @cpu: target cpu * @group_fd: group leader counter fd */ -asmlinkage int -sys_perf_counter_open(struct perf_counter_hw_event *hw_event_uptr __user, - pid_t pid, int cpu, int group_fd) +SYSCALL_DEFINE4(perf_counter_open, + const struct perf_counter_hw_event __user *, hw_event_uptr, + pid_t, pid, int, cpu, int, group_fd) { struct perf_counter *counter, *group_leader; struct perf_counter_hw_event hw_event; -- cgit v1.2.3 From b56a3802dc6df29aa27d2c12edf420258091ad66 Mon Sep 17 00:00:00 2001 From: Jaswinder Singh Rajput Date: Fri, 27 Feb 2009 18:09:09 +0530 Subject: x86: prepare perf_counter to add more cpus Introduced struct pmc_x86_ops to add more cpus. Signed-off-by: Jaswinder Singh Rajput Signed-off-by: Ingo Molnar --- arch/x86/kernel/cpu/perf_counter.c | 106 +++++++++++++++++++++++++++---------- 1 file changed, 78 insertions(+), 28 deletions(-) diff --git a/arch/x86/kernel/cpu/perf_counter.c b/arch/x86/kernel/cpu/perf_counter.c index 383d4c6423a..a3c88529bb7 100644 --- a/arch/x86/kernel/cpu/perf_counter.c +++ b/arch/x86/kernel/cpu/perf_counter.c @@ -3,6 +3,7 @@ * * Copyright(C) 2008 Thomas Gleixner * Copyright(C) 2008 Red Hat, Inc., Ingo Molnar + * Copyright(C) 2009 Jaswinder Singh Rajput * * For licencing details see kernel-base/COPYING */ @@ -38,10 +39,24 @@ struct cpu_hw_counters { }; /* - * Intel PerfMon v3. Used on Core2 and later. + * struct pmc_x86_ops - performance counter x86 ops */ +struct pmc_x86_ops { + u64 (*save_disable_all) (void); + void (*restore_all) (u64 ctrl); + unsigned eventsel; + unsigned perfctr; + int (*event_map) (int event); + int max_events; +}; + +static struct pmc_x86_ops *pmc_ops; + static DEFINE_PER_CPU(struct cpu_hw_counters, cpu_hw_counters); +/* + * Intel PerfMon v3. Used on Core2 and later. + */ static const int intel_perfmon_event_map[] = { [PERF_COUNT_CPU_CYCLES] = 0x003c, @@ -53,7 +68,10 @@ static const int intel_perfmon_event_map[] = [PERF_COUNT_BUS_CYCLES] = 0x013c, }; -static const int max_intel_perfmon_events = ARRAY_SIZE(intel_perfmon_event_map); +static int pmc_intel_event_map(int event) +{ + return intel_perfmon_event_map[event]; +} /* * Propagate counter elapsed time into the generic counter. @@ -144,38 +162,48 @@ static int __hw_perf_counter_init(struct perf_counter *counter) if (hw_event->raw) { hwc->config |= hw_event->type; } else { - if (hw_event->type >= max_intel_perfmon_events) + if (hw_event->type >= pmc_ops->max_events) return -EINVAL; /* * The generic map: */ - hwc->config |= intel_perfmon_event_map[hw_event->type]; + hwc->config |= pmc_ops->event_map(hw_event->type); } counter->wakeup_pending = 0; return 0; } -u64 hw_perf_save_disable(void) +static u64 pmc_intel_save_disable_all(void) { u64 ctrl; - if (unlikely(!perf_counters_initialized)) - return 0; - rdmsrl(MSR_CORE_PERF_GLOBAL_CTRL, ctrl); wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0); return ctrl; } + +u64 hw_perf_save_disable(void) +{ + if (unlikely(!perf_counters_initialized)) + return 0; + + return pmc_ops->save_disable_all(); +} EXPORT_SYMBOL_GPL(hw_perf_save_disable); +static void pmc_intel_restore_all(u64 ctrl) +{ + wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, ctrl); +} + void hw_perf_restore(u64 ctrl) { if (unlikely(!perf_counters_initialized)) return; - wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, ctrl); + pmc_ops->restore_all(ctrl); } EXPORT_SYMBOL_GPL(hw_perf_restore); @@ -291,11 +319,11 @@ fixed_mode_idx(struct perf_counter *counter, struct hw_perf_counter *hwc) event = hwc->config & ARCH_PERFMON_EVENT_MASK; - if (unlikely(event == intel_perfmon_event_map[PERF_COUNT_INSTRUCTIONS])) + if (unlikely(event == pmc_ops->event_map(PERF_COUNT_INSTRUCTIONS))) return X86_PMC_IDX_FIXED_INSTRUCTIONS; - if (unlikely(event == intel_perfmon_event_map[PERF_COUNT_CPU_CYCLES])) + if (unlikely(event == pmc_ops->event_map(PERF_COUNT_CPU_CYCLES))) return X86_PMC_IDX_FIXED_CPU_CYCLES; - if (unlikely(event == intel_perfmon_event_map[PERF_COUNT_BUS_CYCLES])) + if (unlikely(event == pmc_ops->event_map(PERF_COUNT_BUS_CYCLES))) return X86_PMC_IDX_FIXED_BUS_CYCLES; return -1; @@ -339,8 +367,8 @@ try_generic: set_bit(idx, cpuc->used); hwc->idx = idx; } - hwc->config_base = MSR_ARCH_PERFMON_EVENTSEL0; - hwc->counter_base = MSR_ARCH_PERFMON_PERFCTR0; + hwc->config_base = pmc_ops->eventsel; + hwc->counter_base = pmc_ops->perfctr; } perf_counters_lapic_init(hwc->nmi); @@ -386,8 +414,8 @@ void perf_counter_print_debug(void) printk(KERN_INFO "CPU#%d: used: %016llx\n", cpu, *(u64 *)cpuc->used); for (idx = 0; idx < nr_counters_generic; idx++) { - rdmsrl(MSR_ARCH_PERFMON_EVENTSEL0 + idx, pmc_ctrl); - rdmsrl(MSR_ARCH_PERFMON_PERFCTR0 + idx, pmc_count); + rdmsrl(pmc_ops->eventsel + idx, pmc_ctrl); + rdmsrl(pmc_ops->perfctr + idx, pmc_count); prev_left = per_cpu(prev_left[idx], cpu); @@ -655,29 +683,56 @@ static __read_mostly struct notifier_block perf_counter_nmi_notifier = { .priority = 1 }; -void __init init_hw_perf_counters(void) +static struct pmc_x86_ops pmc_intel_ops = { + .save_disable_all = pmc_intel_save_disable_all, + .restore_all = pmc_intel_restore_all, + .eventsel = MSR_ARCH_PERFMON_EVENTSEL0, + .perfctr = MSR_ARCH_PERFMON_PERFCTR0, + .event_map = pmc_intel_event_map, + .max_events = ARRAY_SIZE(intel_perfmon_event_map), +}; + +static struct pmc_x86_ops *pmc_intel_init(void) { union cpuid10_eax eax; unsigned int ebx; unsigned int unused; union cpuid10_edx edx; - if (!cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) - return; - /* * Check whether the Architectural PerfMon supports * Branch Misses Retired Event or not. */ cpuid(10, &eax.full, &ebx, &unused, &edx.full); if (eax.split.mask_length <= ARCH_PERFMON_BRANCH_MISSES_RETIRED) - return; + return NULL; printk(KERN_INFO "Intel Performance Monitoring support detected.\n"); - printk(KERN_INFO "... version: %d\n", eax.split.version_id); - printk(KERN_INFO "... num counters: %d\n", eax.split.num_counters); + printk(KERN_INFO "... bit width: %d\n", eax.split.bit_width); + printk(KERN_INFO "... mask length: %d\n", eax.split.mask_length); + nr_counters_generic = eax.split.num_counters; + nr_counters_fixed = edx.split.num_counters_fixed; + counter_value_mask = (1ULL << eax.split.bit_width) - 1; + + return &pmc_intel_ops; +} + +void __init init_hw_perf_counters(void) +{ + if (!cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) + return; + + switch (boot_cpu_data.x86_vendor) { + case X86_VENDOR_INTEL: + pmc_ops = pmc_intel_init(); + break; + } + if (!pmc_ops) + return; + + printk(KERN_INFO "... num counters: %d\n", nr_counters_generic); if (nr_counters_generic > X86_PMC_MAX_GENERIC) { nr_counters_generic = X86_PMC_MAX_GENERIC; WARN(1, KERN_ERR "hw perf counters %d > max(%d), clipping!", @@ -686,13 +741,8 @@ void __init init_hw_perf_counters(void) perf_counter_mask = (1 << nr_counters_generic) - 1; perf_max_counters = nr_counters_generic; - printk(KERN_INFO "... bit width: %d\n", eax.split.bit_width); - counter_value_mask = (1ULL << eax.split.bit_width) - 1; printk(KERN_INFO "... value mask: %016Lx\n", counter_value_mask); - printk(KERN_INFO "... mask length: %d\n", eax.split.mask_length); - - nr_counters_fixed = edx.split.num_counters_fixed; if (nr_counters_fixed > X86_PMC_MAX_FIXED) { nr_counters_fixed = X86_PMC_MAX_FIXED; WARN(1, KERN_ERR "hw perf counters fixed %d > max(%d), clipping!", -- cgit v1.2.3 From f87ad35d37fa543925210550f7db20a54c83ed70 Mon Sep 17 00:00:00 2001 From: Jaswinder Singh Rajput Date: Fri, 27 Feb 2009 20:15:14 +0530 Subject: x86: AMD Support for perf_counter Supported basic performance counter for AMD K7 and later: $ perfstat -e 0,1,2,3,4,5,-1,-2,-3,-4,-5 ls > /dev/null Performance counter stats for 'ls': 12.298610 task clock ticks (msecs) 3298477 CPU cycles (events) 1406354 instructions (events) 749035 cache references (events) 16939 cache misses (events) 100589 branches (events) 11159 branch misses (events) 7.627540 cpu clock ticks (msecs) 12.298610 task clock ticks (msecs) 500 pagefaults (events) 6 context switches (events) 3 CPU migrations (events) Wall-clock time elapsed: 8.672290 msecs Signed-off-by: Jaswinder Singh Rajput Signed-off-by: Ingo Molnar --- arch/x86/kernel/cpu/amd.c | 4 ++ arch/x86/kernel/cpu/perf_counter.c | 83 +++++++++++++++++++++++++++++++++++++- 2 files changed, 85 insertions(+), 2 deletions(-) diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c index 25423a5b80e..edcde52bd17 100644 --- a/arch/x86/kernel/cpu/amd.c +++ b/arch/x86/kernel/cpu/amd.c @@ -368,6 +368,10 @@ static void __cpuinit init_amd(struct cpuinfo_x86 *c) if (c->x86 >= 6) set_cpu_cap(c, X86_FEATURE_FXSAVE_LEAK); + /* Enable Performance counter for K7 and later */ + if (c->x86 > 6 && c->x86 <= 0x11) + set_cpu_cap(c, X86_FEATURE_ARCH_PERFMON); + if (!c->x86_model_id[0]) { switch (c->x86) { case 0xf: diff --git a/arch/x86/kernel/cpu/perf_counter.c b/arch/x86/kernel/cpu/perf_counter.c index a3c88529bb7..266618aa1a0 100644 --- a/arch/x86/kernel/cpu/perf_counter.c +++ b/arch/x86/kernel/cpu/perf_counter.c @@ -73,6 +73,24 @@ static int pmc_intel_event_map(int event) return intel_perfmon_event_map[event]; } +/* + * AMD Performance Monitor K7 and later. + */ +static const int amd_perfmon_event_map[] = +{ + [PERF_COUNT_CPU_CYCLES] = 0x0076, + [PERF_COUNT_INSTRUCTIONS] = 0x00c0, + [PERF_COUNT_CACHE_REFERENCES] = 0x0080, + [PERF_COUNT_CACHE_MISSES] = 0x0081, + [PERF_COUNT_BRANCH_INSTRUCTIONS] = 0x00c4, + [PERF_COUNT_BRANCH_MISSES] = 0x00c5, +}; + +static int pmc_amd_event_map(int event) +{ + return amd_perfmon_event_map[event]; +} + /* * Propagate counter elapsed time into the generic counter. * Can only be executed on the CPU where the counter is active. @@ -151,8 +169,9 @@ static int __hw_perf_counter_init(struct perf_counter *counter) * so we install an artificial 1<<31 period regardless of * the generic counter period: */ - if ((s64)hwc->irq_period <= 0 || hwc->irq_period > 0x7FFFFFFF) - hwc->irq_period = 0x7FFFFFFF; + if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) + if ((s64)hwc->irq_period <= 0 || hwc->irq_period > 0x7FFFFFFF) + hwc->irq_period = 0x7FFFFFFF; atomic64_set(&hwc->period_left, hwc->irq_period); @@ -184,6 +203,22 @@ static u64 pmc_intel_save_disable_all(void) return ctrl; } +static u64 pmc_amd_save_disable_all(void) +{ + int idx; + u64 val, ctrl = 0; + + for (idx = 0; idx < nr_counters_generic; idx++) { + rdmsrl(MSR_K7_EVNTSEL0 + idx, val); + if (val & ARCH_PERFMON_EVENTSEL0_ENABLE) + ctrl |= (1 << idx); + val &= ~ARCH_PERFMON_EVENTSEL0_ENABLE; + wrmsrl(MSR_K7_EVNTSEL0 + idx, val); + } + + return ctrl; +} + u64 hw_perf_save_disable(void) { if (unlikely(!perf_counters_initialized)) @@ -198,6 +233,20 @@ static void pmc_intel_restore_all(u64 ctrl) wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, ctrl); } +static void pmc_amd_restore_all(u64 ctrl) +{ + u64 val; + int idx; + + for (idx = 0; idx < nr_counters_generic; idx++) { + if (ctrl & (1 << idx)) { + rdmsrl(MSR_K7_EVNTSEL0 + idx, val); + val |= ARCH_PERFMON_EVENTSEL0_ENABLE; + wrmsrl(MSR_K7_EVNTSEL0 + idx, val); + } + } +} + void hw_perf_restore(u64 ctrl) { if (unlikely(!perf_counters_initialized)) @@ -314,6 +363,9 @@ fixed_mode_idx(struct perf_counter *counter, struct hw_perf_counter *hwc) { unsigned int event; + if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) + return -1; + if (unlikely(hwc->nmi)) return -1; @@ -401,6 +453,7 @@ void perf_counter_print_debug(void) cpu = smp_processor_id(); cpuc = &per_cpu(cpu_hw_counters, cpu); + if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) { rdmsrl(MSR_CORE_PERF_GLOBAL_CTRL, ctrl); rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, status); rdmsrl(MSR_CORE_PERF_GLOBAL_OVF_CTRL, overflow); @@ -411,6 +464,7 @@ void perf_counter_print_debug(void) printk(KERN_INFO "CPU#%d: status: %016llx\n", cpu, status); printk(KERN_INFO "CPU#%d: overflow: %016llx\n", cpu, overflow); printk(KERN_INFO "CPU#%d: fixed: %016llx\n", cpu, fixed); + } printk(KERN_INFO "CPU#%d: used: %016llx\n", cpu, *(u64 *)cpuc->used); for (idx = 0; idx < nr_counters_generic; idx++) { @@ -588,6 +642,9 @@ void perf_counter_unthrottle(void) if (!cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) return; + if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) + return; + if (unlikely(!perf_counters_initialized)) return; @@ -692,6 +749,15 @@ static struct pmc_x86_ops pmc_intel_ops = { .max_events = ARRAY_SIZE(intel_perfmon_event_map), }; +static struct pmc_x86_ops pmc_amd_ops = { + .save_disable_all = pmc_amd_save_disable_all, + .restore_all = pmc_amd_restore_all, + .eventsel = MSR_K7_EVNTSEL0, + .perfctr = MSR_K7_PERFCTR0, + .event_map = pmc_amd_event_map, + .max_events = ARRAY_SIZE(amd_perfmon_event_map), +}; + static struct pmc_x86_ops *pmc_intel_init(void) { union cpuid10_eax eax; @@ -719,6 +785,16 @@ static struct pmc_x86_ops *pmc_intel_init(void) return &pmc_intel_ops; } +static struct pmc_x86_ops *pmc_amd_init(void) +{ + nr_counters_generic = 4; + nr_counters_fixed = 0; + + printk(KERN_INFO "AMD Performance Monitoring support detected.\n"); + + return &pmc_amd_ops; +} + void __init init_hw_perf_counters(void) { if (!cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) @@ -728,6 +804,9 @@ void __init init_hw_perf_counters(void) case X86_VENDOR_INTEL: pmc_ops = pmc_intel_init(); break; + case X86_VENDOR_AMD: + pmc_ops = pmc_amd_init(); + break; } if (!pmc_ops) return; -- cgit v1.2.3 From 169e41eb7f5464c077a7e0e129f025759d04cc54 Mon Sep 17 00:00:00 2001 From: Jaswinder Singh Rajput Date: Sat, 28 Feb 2009 18:37:49 +0530 Subject: x86: decent declarations in perf_counter.c Impact: cleanup making decent declrations for struct pmc_x86_ops and fix checkpatch error: ERROR: Macros with complex values should be enclosed in parenthesis Signed-off-by: Jaswinder Singh Rajput Signed-off-by: Ingo Molnar --- arch/x86/kernel/cpu/perf_counter.c | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/arch/x86/kernel/cpu/perf_counter.c b/arch/x86/kernel/cpu/perf_counter.c index 266618aa1a0..a1f3646a3e8 100644 --- a/arch/x86/kernel/cpu/perf_counter.c +++ b/arch/x86/kernel/cpu/perf_counter.c @@ -42,12 +42,12 @@ struct cpu_hw_counters { * struct pmc_x86_ops - performance counter x86 ops */ struct pmc_x86_ops { - u64 (*save_disable_all) (void); - void (*restore_all) (u64 ctrl); - unsigned eventsel; - unsigned perfctr; - int (*event_map) (int event); - int max_events; + u64 (*save_disable_all)(void); + void (*restore_all)(u64 ctrl); + unsigned eventsel; + unsigned perfctr; + int (*event_map)(int event); + int max_events; }; static struct pmc_x86_ops *pmc_ops; @@ -561,7 +561,7 @@ perf_handle_group(struct perf_counter *sibling, u64 *status, u64 *overflown) /* * Maximum interrupt frequency of 100KHz per CPU */ -#define PERFMON_MAX_INTERRUPTS 100000/HZ +#define PERFMON_MAX_INTERRUPTS (100000/HZ) /* * This handler is triggered by the local APIC, so the APIC IRQ handling -- cgit v1.2.3 From a1ef58f442542d8b3e3b963339fbc522c36e827c Mon Sep 17 00:00:00 2001 From: Jaswinder Singh Rajput Date: Sat, 28 Feb 2009 18:45:39 +0530 Subject: x86: use pr_info in perf_counter.c Impact: cleanup using pr_info in perf_counter.c fixes various 80 characters warnings and also indenting for conditional statement Signed-off-by: Jaswinder Singh Rajput Signed-off-by: Ingo Molnar --- arch/x86/kernel/cpu/perf_counter.c | 48 +++++++++++++++++++------------------- 1 file changed, 24 insertions(+), 24 deletions(-) diff --git a/arch/x86/kernel/cpu/perf_counter.c b/arch/x86/kernel/cpu/perf_counter.c index a1f3646a3e8..3b65f19a668 100644 --- a/arch/x86/kernel/cpu/perf_counter.c +++ b/arch/x86/kernel/cpu/perf_counter.c @@ -454,18 +454,18 @@ void perf_counter_print_debug(void) cpuc = &per_cpu(cpu_hw_counters, cpu); if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) { - rdmsrl(MSR_CORE_PERF_GLOBAL_CTRL, ctrl); - rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, status); - rdmsrl(MSR_CORE_PERF_GLOBAL_OVF_CTRL, overflow); - rdmsrl(MSR_ARCH_PERFMON_FIXED_CTR_CTRL, fixed); - - printk(KERN_INFO "\n"); - printk(KERN_INFO "CPU#%d: ctrl: %016llx\n", cpu, ctrl); - printk(KERN_INFO "CPU#%d: status: %016llx\n", cpu, status); - printk(KERN_INFO "CPU#%d: overflow: %016llx\n", cpu, overflow); - printk(KERN_INFO "CPU#%d: fixed: %016llx\n", cpu, fixed); + rdmsrl(MSR_CORE_PERF_GLOBAL_CTRL, ctrl); + rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, status); + rdmsrl(MSR_CORE_PERF_GLOBAL_OVF_CTRL, overflow); + rdmsrl(MSR_ARCH_PERFMON_FIXED_CTR_CTRL, fixed); + + pr_info("\n"); + pr_info("CPU#%d: ctrl: %016llx\n", cpu, ctrl); + pr_info("CPU#%d: status: %016llx\n", cpu, status); + pr_info("CPU#%d: overflow: %016llx\n", cpu, overflow); + pr_info("CPU#%d: fixed: %016llx\n", cpu, fixed); } - printk(KERN_INFO "CPU#%d: used: %016llx\n", cpu, *(u64 *)cpuc->used); + pr_info("CPU#%d: used: %016llx\n", cpu, *(u64 *)cpuc->used); for (idx = 0; idx < nr_counters_generic; idx++) { rdmsrl(pmc_ops->eventsel + idx, pmc_ctrl); @@ -473,17 +473,17 @@ void perf_counter_print_debug(void) prev_left = per_cpu(prev_left[idx], cpu); - printk(KERN_INFO "CPU#%d: gen-PMC%d ctrl: %016llx\n", + pr_info("CPU#%d: gen-PMC%d ctrl: %016llx\n", cpu, idx, pmc_ctrl); - printk(KERN_INFO "CPU#%d: gen-PMC%d count: %016llx\n", + pr_info("CPU#%d: gen-PMC%d count: %016llx\n", cpu, idx, pmc_count); - printk(KERN_INFO "CPU#%d: gen-PMC%d left: %016llx\n", + pr_info("CPU#%d: gen-PMC%d left: %016llx\n", cpu, idx, prev_left); } for (idx = 0; idx < nr_counters_fixed; idx++) { rdmsrl(MSR_ARCH_PERFMON_FIXED_CTR0 + idx, pmc_count); - printk(KERN_INFO "CPU#%d: fixed-PMC%d count: %016llx\n", + pr_info("CPU#%d: fixed-PMC%d count: %016llx\n", cpu, idx, pmc_count); } local_irq_enable(); @@ -773,10 +773,10 @@ static struct pmc_x86_ops *pmc_intel_init(void) if (eax.split.mask_length <= ARCH_PERFMON_BRANCH_MISSES_RETIRED) return NULL; - printk(KERN_INFO "Intel Performance Monitoring support detected.\n"); - printk(KERN_INFO "... version: %d\n", eax.split.version_id); - printk(KERN_INFO "... bit width: %d\n", eax.split.bit_width); - printk(KERN_INFO "... mask length: %d\n", eax.split.mask_length); + pr_info("Intel Performance Monitoring support detected.\n"); + pr_info("... version: %d\n", eax.split.version_id); + pr_info("... bit width: %d\n", eax.split.bit_width); + pr_info("... mask length: %d\n", eax.split.mask_length); nr_counters_generic = eax.split.num_counters; nr_counters_fixed = edx.split.num_counters_fixed; @@ -790,7 +790,7 @@ static struct pmc_x86_ops *pmc_amd_init(void) nr_counters_generic = 4; nr_counters_fixed = 0; - printk(KERN_INFO "AMD Performance Monitoring support detected.\n"); + pr_info("AMD Performance Monitoring support detected.\n"); return &pmc_amd_ops; } @@ -811,7 +811,7 @@ void __init init_hw_perf_counters(void) if (!pmc_ops) return; - printk(KERN_INFO "... num counters: %d\n", nr_counters_generic); + pr_info("... num counters: %d\n", nr_counters_generic); if (nr_counters_generic > X86_PMC_MAX_GENERIC) { nr_counters_generic = X86_PMC_MAX_GENERIC; WARN(1, KERN_ERR "hw perf counters %d > max(%d), clipping!", @@ -820,18 +820,18 @@ void __init init_hw_perf_counters(void) perf_counter_mask = (1 << nr_counters_generic) - 1; perf_max_counters = nr_counters_generic; - printk(KERN_INFO "... value mask: %016Lx\n", counter_value_mask); + pr_info("... value mask: %016Lx\n", counter_value_mask); if (nr_counters_fixed > X86_PMC_MAX_FIXED) { nr_counters_fixed = X86_PMC_MAX_FIXED; WARN(1, KERN_ERR "hw perf counters fixed %d > max(%d), clipping!", nr_counters_fixed, X86_PMC_MAX_FIXED); } - printk(KERN_INFO "... fixed counters: %d\n", nr_counters_fixed); + pr_info("... fixed counters: %d\n", nr_counters_fixed); perf_counter_mask |= ((1LL << nr_counters_fixed)-1) << X86_PMC_IDX_FIXED; - printk(KERN_INFO "... counter mask: %016Lx\n", perf_counter_mask); + pr_info("... counter mask: %016Lx\n", perf_counter_mask); perf_counters_initialized = true; perf_counters_lapic_init(0); -- cgit v1.2.3 From 2743a5b0fa6f309da904f2190a9cc25deee34dbd Mon Sep 17 00:00:00 2001 From: Paul Mackerras Date: Wed, 4 Mar 2009 20:36:51 +1100 Subject: perfcounters: provide expansion room in the ABI Impact: ABI change This expands several fields in the perf_counter_hw_event struct and adds a "flags" argument to the perf_counter_open system call, in order that features can be added in future without ABI changes. In particular the record_type field is expanded to 64 bits, and the space for flag bits has been expanded from 32 to 64 bits. This also adds some new fields: * read_format (64 bits) is intended to provide a way to specify what userspace wants to get back when it does a read() on a simple (non-interrupting) counter; * exclude_idle (1 bit) provides a way for userspace to ask that events that occur when the cpu is idle be excluded; * extra_config_len will provide a way for userspace to supply an arbitrary amount of extra machine-specific PMU configuration data immediately following the perf_counter_hw_event struct, to allow sophisticated users to program things such as instruction matching CAMs and address range registers; * __reserved_3 and __reserved_4 provide space for future expansion. Signed-off-by: Paul Mackerras --- include/linux/perf_counter.h | 12 +++++++++--- include/linux/syscalls.h | 2 +- kernel/perf_counter.c | 10 +++++++--- 3 files changed, 17 insertions(+), 7 deletions(-) diff --git a/include/linux/perf_counter.h b/include/linux/perf_counter.h index 186efaf4966..c42455ab155 100644 --- a/include/linux/perf_counter.h +++ b/include/linux/perf_counter.h @@ -69,9 +69,10 @@ struct perf_counter_hw_event { __s64 type; __u64 irq_period; - __u32 record_type; + __u64 record_type; + __u64 read_format; - __u32 disabled : 1, /* off by default */ + __u64 disabled : 1, /* off by default */ nmi : 1, /* NMI sampling */ raw : 1, /* raw event type */ inherit : 1, /* children inherit it */ @@ -80,10 +81,15 @@ struct perf_counter_hw_event { exclude_user : 1, /* don't count user */ exclude_kernel : 1, /* ditto kernel */ exclude_hv : 1, /* ditto hypervisor */ + exclude_idle : 1, /* don't count when idle */ - __reserved_1 : 23; + __reserved_1 : 55; + + __u32 extra_config_len; + __u32 __reserved_4; __u64 __reserved_2; + __u64 __reserved_3; }; /* diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h index 28ef2be839c..ab1d7724739 100644 --- a/include/linux/syscalls.h +++ b/include/linux/syscalls.h @@ -698,5 +698,5 @@ int kernel_execve(const char *filename, char *const argv[], char *const envp[]); asmlinkage long sys_perf_counter_open( const struct perf_counter_hw_event __user *hw_event_uptr, - pid_t pid, int cpu, int group_fd); + pid_t pid, int cpu, int group_fd, unsigned long flags); #endif diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c index 16b14ba99d3..b2e838959f3 100644 --- a/kernel/perf_counter.c +++ b/kernel/perf_counter.c @@ -1683,16 +1683,16 @@ perf_counter_alloc(struct perf_counter_hw_event *hw_event, } /** - * sys_perf_task_open - open a performance counter, associate it to a task/cpu + * sys_perf_counter_open - open a performance counter, associate it to a task/cpu * * @hw_event_uptr: event type attributes for monitoring/sampling * @pid: target pid * @cpu: target cpu * @group_fd: group leader counter fd */ -SYSCALL_DEFINE4(perf_counter_open, +SYSCALL_DEFINE5(perf_counter_open, const struct perf_counter_hw_event __user *, hw_event_uptr, - pid_t, pid, int, cpu, int, group_fd) + pid_t, pid, int, cpu, int, group_fd, unsigned long, flags) { struct perf_counter *counter, *group_leader; struct perf_counter_hw_event hw_event; @@ -1703,6 +1703,10 @@ SYSCALL_DEFINE4(perf_counter_open, int fput_needed2 = 0; int ret; + /* for future expandability... */ + if (flags) + return -EINVAL; + if (copy_from_user(&hw_event, hw_event_uptr, sizeof(hw_event)) != 0) return -EFAULT; -- cgit v1.2.3 From 2485e5184452ccbda34ff83883883d9107800dff Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Thu, 5 Mar 2009 12:33:16 +0100 Subject: perfcounters: fix reserved bits sizing The sum of bits is 65 currently not 64 - so reduce the # of reserved bits from 55 to 54. Cc: Paul Mackerras Signed-off-by: Ingo Molnar --- include/linux/perf_counter.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/include/linux/perf_counter.h b/include/linux/perf_counter.h index c42455ab155..dde564517b6 100644 --- a/include/linux/perf_counter.h +++ b/include/linux/perf_counter.h @@ -83,7 +83,7 @@ struct perf_counter_hw_event { exclude_hv : 1, /* ditto hypervisor */ exclude_idle : 1, /* don't count when idle */ - __reserved_1 : 55; + __reserved_1 : 54; __u32 extra_config_len; __u32 __reserved_4; -- cgit v1.2.3 From b0f3f28e0f14eb335f67bfaae33ce8b8d74fd58b Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Thu, 5 Mar 2009 18:08:27 +0100 Subject: perfcounters: IRQ and NMI support on AMD CPUs The below completes the K7+ performance counter support: - IRQ support - NMI support KernelTop output works now as well. Signed-off-by: Peter Zijlstra Cc: Jaswinder Singh Rajput Cc: Paul Mackerras LKML-Reference: <1236273633.5187.286.camel@laptop> Signed-off-by: Ingo Molnar --- arch/x86/kernel/cpu/perf_counter.c | 272 +++++++++++++++++++++++++++++++------ 1 file changed, 228 insertions(+), 44 deletions(-) diff --git a/arch/x86/kernel/cpu/perf_counter.c b/arch/x86/kernel/cpu/perf_counter.c index 3b65f19a668..6ebe9abf6ae 100644 --- a/arch/x86/kernel/cpu/perf_counter.c +++ b/arch/x86/kernel/cpu/perf_counter.c @@ -28,6 +28,7 @@ static bool perf_counters_initialized __read_mostly; static int nr_counters_generic __read_mostly; static u64 perf_counter_mask __read_mostly; static u64 counter_value_mask __read_mostly; +static int counter_value_bits __read_mostly; static int nr_counters_fixed __read_mostly; @@ -35,7 +36,9 @@ struct cpu_hw_counters { struct perf_counter *counters[X86_PMC_IDX_MAX]; unsigned long used[BITS_TO_LONGS(X86_PMC_IDX_MAX)]; unsigned long interrupts; - u64 global_enable; + u64 throttle_ctrl; + u64 active_mask; + int enabled; }; /* @@ -43,21 +46,28 @@ struct cpu_hw_counters { */ struct pmc_x86_ops { u64 (*save_disable_all)(void); - void (*restore_all)(u64 ctrl); + void (*restore_all)(u64); + u64 (*get_status)(u64); + void (*ack_status)(u64); + void (*enable)(int, u64); + void (*disable)(int, u64); unsigned eventsel; unsigned perfctr; - int (*event_map)(int event); + u64 (*event_map)(int); + u64 (*raw_event)(u64); int max_events; }; static struct pmc_x86_ops *pmc_ops; -static DEFINE_PER_CPU(struct cpu_hw_counters, cpu_hw_counters); +static DEFINE_PER_CPU(struct cpu_hw_counters, cpu_hw_counters) = { + .enabled = 1, +}; /* * Intel PerfMon v3. Used on Core2 and later. */ -static const int intel_perfmon_event_map[] = +static const u64 intel_perfmon_event_map[] = { [PERF_COUNT_CPU_CYCLES] = 0x003c, [PERF_COUNT_INSTRUCTIONS] = 0x00c0, @@ -68,15 +78,29 @@ static const int intel_perfmon_event_map[] = [PERF_COUNT_BUS_CYCLES] = 0x013c, }; -static int pmc_intel_event_map(int event) +static u64 pmc_intel_event_map(int event) { return intel_perfmon_event_map[event]; } +static u64 pmc_intel_raw_event(u64 event) +{ +#define CORE_EVNTSEL_EVENT_MASK 0x000000FF +#define CORE_EVNTSEL_UNIT_MASK 0x0000FF00 +#define CORE_EVNTSEL_COUNTER_MASK 0xFF000000 + +#define CORE_EVNTSEL_MASK \ + (CORE_EVNTSEL_EVENT_MASK | \ + CORE_EVNTSEL_UNIT_MASK | \ + CORE_EVNTSEL_COUNTER_MASK) + + return event & CORE_EVNTSEL_MASK; +} + /* * AMD Performance Monitor K7 and later. */ -static const int amd_perfmon_event_map[] = +static const u64 amd_perfmon_event_map[] = { [PERF_COUNT_CPU_CYCLES] = 0x0076, [PERF_COUNT_INSTRUCTIONS] = 0x00c0, @@ -86,11 +110,25 @@ static const int amd_perfmon_event_map[] = [PERF_COUNT_BRANCH_MISSES] = 0x00c5, }; -static int pmc_amd_event_map(int event) +static u64 pmc_amd_event_map(int event) { return amd_perfmon_event_map[event]; } +static u64 pmc_amd_raw_event(u64 event) +{ +#define K7_EVNTSEL_EVENT_MASK 0x7000000FF +#define K7_EVNTSEL_UNIT_MASK 0x00000FF00 +#define K7_EVNTSEL_COUNTER_MASK 0x0FF000000 + +#define K7_EVNTSEL_MASK \ + (K7_EVNTSEL_EVENT_MASK | \ + K7_EVNTSEL_UNIT_MASK | \ + K7_EVNTSEL_COUNTER_MASK) + + return event & K7_EVNTSEL_MASK; +} + /* * Propagate counter elapsed time into the generic counter. * Can only be executed on the CPU where the counter is active. @@ -179,7 +217,7 @@ static int __hw_perf_counter_init(struct perf_counter *counter) * Raw event type provide the config in the event structure */ if (hw_event->raw) { - hwc->config |= hw_event->type; + hwc->config |= pmc_ops->raw_event(hw_event->type); } else { if (hw_event->type >= pmc_ops->max_events) return -EINVAL; @@ -205,18 +243,24 @@ static u64 pmc_intel_save_disable_all(void) static u64 pmc_amd_save_disable_all(void) { - int idx; - u64 val, ctrl = 0; + struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters); + int enabled, idx; + + enabled = cpuc->enabled; + cpuc->enabled = 0; + barrier(); for (idx = 0; idx < nr_counters_generic; idx++) { + u64 val; + rdmsrl(MSR_K7_EVNTSEL0 + idx, val); - if (val & ARCH_PERFMON_EVENTSEL0_ENABLE) - ctrl |= (1 << idx); - val &= ~ARCH_PERFMON_EVENTSEL0_ENABLE; - wrmsrl(MSR_K7_EVNTSEL0 + idx, val); + if (val & ARCH_PERFMON_EVENTSEL0_ENABLE) { + val &= ~ARCH_PERFMON_EVENTSEL0_ENABLE; + wrmsrl(MSR_K7_EVNTSEL0 + idx, val); + } } - return ctrl; + return enabled; } u64 hw_perf_save_disable(void) @@ -226,6 +270,9 @@ u64 hw_perf_save_disable(void) return pmc_ops->save_disable_all(); } +/* + * Exported because of ACPI idle + */ EXPORT_SYMBOL_GPL(hw_perf_save_disable); static void pmc_intel_restore_all(u64 ctrl) @@ -235,11 +282,18 @@ static void pmc_intel_restore_all(u64 ctrl) static void pmc_amd_restore_all(u64 ctrl) { - u64 val; + struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters); int idx; + cpuc->enabled = ctrl; + barrier(); + if (!ctrl) + return; + for (idx = 0; idx < nr_counters_generic; idx++) { - if (ctrl & (1 << idx)) { + if (test_bit(idx, (unsigned long *)&cpuc->active_mask)) { + u64 val; + rdmsrl(MSR_K7_EVNTSEL0 + idx, val); val |= ARCH_PERFMON_EVENTSEL0_ENABLE; wrmsrl(MSR_K7_EVNTSEL0 + idx, val); @@ -254,8 +308,112 @@ void hw_perf_restore(u64 ctrl) pmc_ops->restore_all(ctrl); } +/* + * Exported because of ACPI idle + */ EXPORT_SYMBOL_GPL(hw_perf_restore); +static u64 pmc_intel_get_status(u64 mask) +{ + u64 status; + + rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, status); + + return status; +} + +static u64 pmc_amd_get_status(u64 mask) +{ + u64 status = 0; + int idx; + + for (idx = 0; idx < nr_counters_generic; idx++) { + s64 val; + + if (!(mask & (1 << idx))) + continue; + + rdmsrl(MSR_K7_PERFCTR0 + idx, val); + val <<= (64 - counter_value_bits); + if (val >= 0) + status |= (1 << idx); + } + + return status; +} + +static u64 hw_perf_get_status(u64 mask) +{ + if (unlikely(!perf_counters_initialized)) + return 0; + + return pmc_ops->get_status(mask); +} + +static void pmc_intel_ack_status(u64 ack) +{ + wrmsrl(MSR_CORE_PERF_GLOBAL_OVF_CTRL, ack); +} + +static void pmc_amd_ack_status(u64 ack) +{ +} + +static void hw_perf_ack_status(u64 ack) +{ + if (unlikely(!perf_counters_initialized)) + return; + + pmc_ops->ack_status(ack); +} + +static void pmc_intel_enable(int idx, u64 config) +{ + wrmsrl(MSR_ARCH_PERFMON_EVENTSEL0 + idx, + config | ARCH_PERFMON_EVENTSEL0_ENABLE); +} + +static void pmc_amd_enable(int idx, u64 config) +{ + struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters); + + set_bit(idx, (unsigned long *)&cpuc->active_mask); + if (cpuc->enabled) + config |= ARCH_PERFMON_EVENTSEL0_ENABLE; + + wrmsrl(MSR_K7_EVNTSEL0 + idx, config); +} + +static void hw_perf_enable(int idx, u64 config) +{ + if (unlikely(!perf_counters_initialized)) + return; + + pmc_ops->enable(idx, config); +} + +static void pmc_intel_disable(int idx, u64 config) +{ + wrmsrl(MSR_ARCH_PERFMON_EVENTSEL0 + idx, config); +} + +static void pmc_amd_disable(int idx, u64 config) +{ + struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters); + + clear_bit(idx, (unsigned long *)&cpuc->active_mask); + wrmsrl(MSR_K7_EVNTSEL0 + idx, config); + +} + +static void hw_perf_disable(int idx, u64 config) +{ + if (unlikely(!perf_counters_initialized)) + return; + + pmc_ops->disable(idx, config); +} + static inline void __pmc_fixed_disable(struct perf_counter *counter, struct hw_perf_counter *hwc, unsigned int __idx) @@ -278,7 +436,7 @@ __pmc_generic_disable(struct perf_counter *counter, if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) __pmc_fixed_disable(counter, hwc, idx); else - wrmsr_safe(hwc->config_base + idx, hwc->config, 0); + hw_perf_disable(idx, hwc->config); } static DEFINE_PER_CPU(u64, prev_left[X86_PMC_IDX_MAX]); @@ -354,8 +512,7 @@ __pmc_generic_enable(struct perf_counter *counter, if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) __pmc_fixed_enable(counter, hwc, idx); else - wrmsr(hwc->config_base + idx, - hwc->config | ARCH_PERFMON_EVENTSEL0_ENABLE, 0); + hw_perf_enable(idx, hwc->config); } static int @@ -567,22 +724,20 @@ perf_handle_group(struct perf_counter *sibling, u64 *status, u64 *overflown) * This handler is triggered by the local APIC, so the APIC IRQ handling * rules apply: */ -static void __smp_perf_counter_interrupt(struct pt_regs *regs, int nmi) +static int __smp_perf_counter_interrupt(struct pt_regs *regs, int nmi) { int bit, cpu = smp_processor_id(); u64 ack, status; struct cpu_hw_counters *cpuc = &per_cpu(cpu_hw_counters, cpu); + int ret = 0; - rdmsrl(MSR_CORE_PERF_GLOBAL_CTRL, cpuc->global_enable); - - /* Disable counters globally */ - wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0); - ack_APIC_irq(); + cpuc->throttle_ctrl = hw_perf_save_disable(); - rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, status); + status = hw_perf_get_status(cpuc->throttle_ctrl); if (!status) goto out; + ret = 1; again: inc_irq_stat(apic_perf_irqs); ack = status; @@ -618,12 +773,12 @@ again: } } - wrmsrl(MSR_CORE_PERF_GLOBAL_OVF_CTRL, ack); + hw_perf_ack_status(ack); /* * Repeat if there is more work to be done: */ - rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, status); + status = hw_perf_get_status(cpuc->throttle_ctrl); if (status) goto again; out: @@ -631,32 +786,27 @@ out: * Restore - do not reenable when global enable is off or throttled: */ if (++cpuc->interrupts < PERFMON_MAX_INTERRUPTS) - wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, cpuc->global_enable); + hw_perf_restore(cpuc->throttle_ctrl); + + return ret; } void perf_counter_unthrottle(void) { struct cpu_hw_counters *cpuc; - u64 global_enable; if (!cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) return; - if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) - return; - if (unlikely(!perf_counters_initialized)) return; - cpuc = &per_cpu(cpu_hw_counters, smp_processor_id()); + cpuc = &__get_cpu_var(cpu_hw_counters); if (cpuc->interrupts >= PERFMON_MAX_INTERRUPTS) { if (printk_ratelimit()) printk(KERN_WARNING "PERFMON: max interrupts exceeded!\n"); - wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, cpuc->global_enable); + hw_perf_restore(cpuc->throttle_ctrl); } - rdmsrl(MSR_CORE_PERF_GLOBAL_CTRL, global_enable); - if (unlikely(cpuc->global_enable && !global_enable)) - wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, cpuc->global_enable); cpuc->interrupts = 0; } @@ -664,8 +814,8 @@ void smp_perf_counter_interrupt(struct pt_regs *regs) { irq_enter(); apic_write(APIC_LVTPC, LOCAL_PERF_VECTOR); + ack_APIC_irq(); __smp_perf_counter_interrupt(regs, 0); - irq_exit(); } @@ -722,16 +872,23 @@ perf_counter_nmi_handler(struct notifier_block *self, { struct die_args *args = __args; struct pt_regs *regs; + int ret; + + switch (cmd) { + case DIE_NMI: + case DIE_NMI_IPI: + break; - if (likely(cmd != DIE_NMI_IPI)) + default: return NOTIFY_DONE; + } regs = args->regs; apic_write(APIC_LVTPC, APIC_DM_NMI); - __smp_perf_counter_interrupt(regs, 1); + ret = __smp_perf_counter_interrupt(regs, 1); - return NOTIFY_STOP; + return ret ? NOTIFY_STOP : NOTIFY_OK; } static __read_mostly struct notifier_block perf_counter_nmi_notifier = { @@ -743,18 +900,28 @@ static __read_mostly struct notifier_block perf_counter_nmi_notifier = { static struct pmc_x86_ops pmc_intel_ops = { .save_disable_all = pmc_intel_save_disable_all, .restore_all = pmc_intel_restore_all, + .get_status = pmc_intel_get_status, + .ack_status = pmc_intel_ack_status, + .enable = pmc_intel_enable, + .disable = pmc_intel_disable, .eventsel = MSR_ARCH_PERFMON_EVENTSEL0, .perfctr = MSR_ARCH_PERFMON_PERFCTR0, .event_map = pmc_intel_event_map, + .raw_event = pmc_intel_raw_event, .max_events = ARRAY_SIZE(intel_perfmon_event_map), }; static struct pmc_x86_ops pmc_amd_ops = { .save_disable_all = pmc_amd_save_disable_all, .restore_all = pmc_amd_restore_all, + .get_status = pmc_amd_get_status, + .ack_status = pmc_amd_ack_status, + .enable = pmc_amd_enable, + .disable = pmc_amd_disable, .eventsel = MSR_K7_EVNTSEL0, .perfctr = MSR_K7_PERFCTR0, .event_map = pmc_amd_event_map, + .raw_event = pmc_amd_raw_event, .max_events = ARRAY_SIZE(amd_perfmon_event_map), }; @@ -787,8 +954,25 @@ static struct pmc_x86_ops *pmc_intel_init(void) static struct pmc_x86_ops *pmc_amd_init(void) { + u64 old; + int bits; + nr_counters_generic = 4; nr_counters_fixed = 0; + counter_value_mask = ~0ULL; + + rdmsrl(MSR_K7_PERFCTR0, old); + wrmsrl(MSR_K7_PERFCTR0, counter_value_mask); + /* + * read the truncated mask + */ + rdmsrl(MSR_K7_PERFCTR0, counter_value_mask); + wrmsrl(MSR_K7_PERFCTR0, old); + + bits = 32 + fls(counter_value_mask >> 32); + if (bits == 32) + bits = fls((u32)counter_value_mask); + counter_value_bits = bits; pr_info("AMD Performance Monitoring support detected.\n"); -- cgit v1.2.3 From b5e8acf66ff5db707c7e08df49fdf6b415878442 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Thu, 5 Mar 2009 20:34:21 +0100 Subject: perfcounters: IRQ and NMI support on AMD CPUs, fix The BKGD suggests that counter width on AMD CPUs is 48 for all existing models (it certainly is for mine). Signed-off-by: Ingo Molnar --- arch/x86/kernel/cpu/perf_counter.c | 16 ++-------------- 1 file changed, 2 insertions(+), 14 deletions(-) diff --git a/arch/x86/kernel/cpu/perf_counter.c b/arch/x86/kernel/cpu/perf_counter.c index 6ebe9abf6ae..f5853718d4d 100644 --- a/arch/x86/kernel/cpu/perf_counter.c +++ b/arch/x86/kernel/cpu/perf_counter.c @@ -959,20 +959,8 @@ static struct pmc_x86_ops *pmc_amd_init(void) nr_counters_generic = 4; nr_counters_fixed = 0; - counter_value_mask = ~0ULL; - - rdmsrl(MSR_K7_PERFCTR0, old); - wrmsrl(MSR_K7_PERFCTR0, counter_value_mask); - /* - * read the truncated mask - */ - rdmsrl(MSR_K7_PERFCTR0, counter_value_mask); - wrmsrl(MSR_K7_PERFCTR0, old); - - bits = 32 + fls(counter_value_mask >> 32); - if (bits == 32) - bits = fls((u32)counter_value_mask); - counter_value_bits = bits; + counter_value_mask = 0x0000FFFFFFFFFFFFULL; + counter_value_bits = 48; pr_info("AMD Performance Monitoring support detected.\n"); -- cgit v1.2.3 From 86028598de16538f02519141756ccf4accfc29a6 Mon Sep 17 00:00:00 2001 From: Paul Mackerras Date: Thu, 5 Mar 2009 14:05:57 +1100 Subject: perfcounters/powerpc: fix oops with multiple counters in a group Impact: fix oops-causing bug This fixes a bug in the powerpc hw_perf_counter_init where the code didn't initialize ctrs[n] before passing the ctrs array to check_excludes, leading to possible oopses and other incorrect behaviour. This fixes it by initializing ctrs[n] correctly. Signed-off-by: Paul Mackerras --- arch/powerpc/kernel/perf_counter.c | 1 + 1 file changed, 1 insertion(+) diff --git a/arch/powerpc/kernel/perf_counter.c b/arch/powerpc/kernel/perf_counter.c index 112332d07fc..4fec112386f 100644 --- a/arch/powerpc/kernel/perf_counter.c +++ b/arch/powerpc/kernel/perf_counter.c @@ -633,6 +633,7 @@ hw_perf_counter_init(struct perf_counter *counter) return NULL; } events[n] = ev; + ctrs[n] = counter; if (check_excludes(ctrs, n, 1)) return NULL; if (power_check_constraints(events, n + 1)) -- cgit v1.2.3 From aabbaa6036fd847c583f585c6bae82b5a033e6c7 Mon Sep 17 00:00:00 2001 From: Paul Mackerras Date: Fri, 6 Mar 2009 16:27:10 +1100 Subject: perfcounters/powerpc: add support for POWER5+ processors Impact: more hardware support This adds the back-end for the PMU on the POWER5+ processors (i.e. GS, including GS DD3 aka POWER5++). This doesn't use the fixed-function PMC5 and PMC6 since they don't respect the freeze conditions and don't generate interrupts, as on POWER6. Signed-off-by: Paul Mackerras --- arch/powerpc/kernel/Makefile | 4 +- arch/powerpc/kernel/perf_counter.c | 4 + arch/powerpc/kernel/power5+-pmu.c | 452 +++++++++++++++++++++++++++++++++++++ 3 files changed, 458 insertions(+), 2 deletions(-) create mode 100644 arch/powerpc/kernel/power5+-pmu.c diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile index b4c6f466164..49851e0d8fd 100644 --- a/arch/powerpc/kernel/Makefile +++ b/arch/powerpc/kernel/Makefile @@ -94,8 +94,8 @@ obj-$(CONFIG_AUDIT) += audit.o obj64-$(CONFIG_AUDIT) += compat_audit.o obj-$(CONFIG_DYNAMIC_FTRACE) += ftrace.o -obj-$(CONFIG_PERF_COUNTERS) += perf_counter.o ppc970-pmu.o power5-pmu.o \ - power6-pmu.o +obj-$(CONFIG_PERF_COUNTERS) += perf_counter.o ppc970-pmu.o \ + power5-pmu.o power5+-pmu.o power6-pmu.o obj-$(CONFIG_8XX_MINIMAL_FPEMU) += softemu8xx.o diff --git a/arch/powerpc/kernel/perf_counter.c b/arch/powerpc/kernel/perf_counter.c index 4fec112386f..162f3981fa2 100644 --- a/arch/powerpc/kernel/perf_counter.c +++ b/arch/powerpc/kernel/perf_counter.c @@ -826,6 +826,7 @@ void hw_perf_counter_setup(int cpu) extern struct power_pmu ppc970_pmu; extern struct power_pmu power5_pmu; +extern struct power_pmu power5p_pmu; extern struct power_pmu power6_pmu; static int init_perf_counters(void) @@ -848,6 +849,9 @@ static int init_perf_counters(void) case PV_POWER5: ppmu = &power5_pmu; break; + case PV_POWER5p: + ppmu = &power5p_pmu; + break; case 0x3e: ppmu = &power6_pmu; break; diff --git a/arch/powerpc/kernel/power5+-pmu.c b/arch/powerpc/kernel/power5+-pmu.c new file mode 100644 index 00000000000..cec21ea65b0 --- /dev/null +++ b/arch/powerpc/kernel/power5+-pmu.c @@ -0,0 +1,452 @@ +/* + * Performance counter support for POWER5 (not POWER5++) processors. + * + * Copyright 2009 Paul Mackerras, IBM Corporation. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ +#include +#include +#include + +/* + * Bits in event code for POWER5+ (POWER5 GS) and POWER5++ (POWER5 GS DD3) + */ +#define PM_PMC_SH 20 /* PMC number (1-based) for direct events */ +#define PM_PMC_MSK 0xf +#define PM_PMC_MSKS (PM_PMC_MSK << PM_PMC_SH) +#define PM_UNIT_SH 16 /* TTMMUX number and setting - unit select */ +#define PM_UNIT_MSK 0xf +#define PM_BYTE_SH 12 /* Byte number of event bus to use */ +#define PM_BYTE_MSK 7 +#define PM_GRS_SH 8 /* Storage subsystem mux select */ +#define PM_GRS_MSK 7 +#define PM_BUSEVENT_MSK 0x80 /* Set if event uses event bus */ +#define PM_PMCSEL_MSK 0x7f + +/* Values in PM_UNIT field */ +#define PM_FPU 0 +#define PM_ISU0 1 +#define PM_IFU 2 +#define PM_ISU1 3 +#define PM_IDU 4 +#define PM_ISU0_ALT 6 +#define PM_GRS 7 +#define PM_LSU0 8 +#define PM_LSU1 0xc +#define PM_LASTUNIT 0xc + +/* + * Bits in MMCR1 for POWER5+ + */ +#define MMCR1_TTM0SEL_SH 62 +#define MMCR1_TTM1SEL_SH 60 +#define MMCR1_TTM2SEL_SH 58 +#define MMCR1_TTM3SEL_SH 56 +#define MMCR1_TTMSEL_MSK 3 +#define MMCR1_TD_CP_DBG0SEL_SH 54 +#define MMCR1_TD_CP_DBG1SEL_SH 52 +#define MMCR1_TD_CP_DBG2SEL_SH 50 +#define MMCR1_TD_CP_DBG3SEL_SH 48 +#define MMCR1_GRS_L2SEL_SH 46 +#define MMCR1_GRS_L2SEL_MSK 3 +#define MMCR1_GRS_L3SEL_SH 44 +#define MMCR1_GRS_L3SEL_MSK 3 +#define MMCR1_GRS_MCSEL_SH 41 +#define MMCR1_GRS_MCSEL_MSK 7 +#define MMCR1_GRS_FABSEL_SH 39 +#define MMCR1_GRS_FABSEL_MSK 3 +#define MMCR1_PMC1_ADDER_SEL_SH 35 +#define MMCR1_PMC2_ADDER_SEL_SH 34 +#define MMCR1_PMC3_ADDER_SEL_SH 33 +#define MMCR1_PMC4_ADDER_SEL_SH 32 +#define MMCR1_PMC1SEL_SH 25 +#define MMCR1_PMC2SEL_SH 17 +#define MMCR1_PMC3SEL_SH 9 +#define MMCR1_PMC4SEL_SH 1 +#define MMCR1_PMCSEL_SH(n) (MMCR1_PMC1SEL_SH - (n) * 8) +#define MMCR1_PMCSEL_MSK 0x7f + +/* + * Bits in MMCRA + */ + +/* + * Layout of constraint bits: + * 6666555555555544444444443333333333222222222211111111110000000000 + * 3210987654321098765432109876543210987654321098765432109876543210 + * [ ><><>< ><> <><>[ > < >< >< >< ><><><><> + * NC G0G1G2 G3 T0T1 UC B0 B1 B2 B3 P4P3P2P1 + * + * NC - number of counters + * 51: NC error 0x0008_0000_0000_0000 + * 48-50: number of events needing PMC1-4 0x0007_0000_0000_0000 + * + * G0..G3 - GRS mux constraints + * 46-47: GRS_L2SEL value + * 44-45: GRS_L3SEL value + * 41-44: GRS_MCSEL value + * 39-40: GRS_FABSEL value + * Note that these match up with their bit positions in MMCR1 + * + * T0 - TTM0 constraint + * 36-37: TTM0SEL value (0=FPU, 2=IFU, 3=ISU1) 0x30_0000_0000 + * + * T1 - TTM1 constraint + * 34-35: TTM1SEL value (0=IDU, 3=GRS) 0x0c_0000_0000 + * + * UC - unit constraint: can't have all three of FPU|IFU|ISU1, ISU0, IDU|GRS + * 33: UC3 error 0x02_0000_0000 + * 32: FPU|IFU|ISU1 events needed 0x01_0000_0000 + * 31: ISU0 events needed 0x01_8000_0000 + * 30: IDU|GRS events needed 0x00_4000_0000 + * + * B0 + * 20-23: Byte 0 event source 0x00f0_0000 + * Encoding as for the event code + * + * B1, B2, B3 + * 16-19, 12-15, 8-11: Byte 1, 2, 3 event sources + * + * P4 + * 7: P1 error 0x80 + * 6-7: Count of events needing PMC4 + * + * P1..P3 + * 0-6: Count of events needing PMC1..PMC3 + */ + +static const int grsel_shift[8] = { + MMCR1_GRS_L2SEL_SH, MMCR1_GRS_L2SEL_SH, MMCR1_GRS_L2SEL_SH, + MMCR1_GRS_L3SEL_SH, MMCR1_GRS_L3SEL_SH, MMCR1_GRS_L3SEL_SH, + MMCR1_GRS_MCSEL_SH, MMCR1_GRS_FABSEL_SH +}; + +/* Masks and values for using events from the various units */ +static u64 unit_cons[PM_LASTUNIT+1][2] = { + [PM_FPU] = { 0x3200000000ull, 0x0100000000ull }, + [PM_ISU0] = { 0x0200000000ull, 0x0080000000ull }, + [PM_ISU1] = { 0x3200000000ull, 0x3100000000ull }, + [PM_IFU] = { 0x3200000000ull, 0x2100000000ull }, + [PM_IDU] = { 0x0e00000000ull, 0x0040000000ull }, + [PM_GRS] = { 0x0e00000000ull, 0x0c40000000ull }, +}; + +static int power5p_get_constraint(unsigned int event, u64 *maskp, u64 *valp) +{ + int pmc, byte, unit, sh; + int bit, fmask; + u64 mask = 0, value = 0; + + pmc = (event >> PM_PMC_SH) & PM_PMC_MSK; + if (pmc) { + if (pmc > 4) + return -1; + sh = (pmc - 1) * 2; + mask |= 2 << sh; + value |= 1 << sh; + } + if (event & PM_BUSEVENT_MSK) { + unit = (event >> PM_UNIT_SH) & PM_UNIT_MSK; + if (unit > PM_LASTUNIT) + return -1; + if (unit == PM_ISU0_ALT) + unit = PM_ISU0; + mask |= unit_cons[unit][0]; + value |= unit_cons[unit][1]; + byte = (event >> PM_BYTE_SH) & PM_BYTE_MSK; + if (byte >= 4) { + if (unit != PM_LSU1) + return -1; + /* Map LSU1 low word (bytes 4-7) to unit LSU1+1 */ + ++unit; + byte &= 3; + } + if (unit == PM_GRS) { + bit = event & 7; + fmask = (bit == 6)? 7: 3; + sh = grsel_shift[bit]; + mask |= (u64)fmask << sh; + value |= (u64)((event >> PM_GRS_SH) & fmask) << sh; + } + /* Set byte lane select field */ + mask |= 0xfULL << (20 - 4 * byte); + value |= (u64)unit << (20 - 4 * byte); + } + mask |= 0x8000000000000ull; + value |= 0x1000000000000ull; + *maskp = mask; + *valp = value; + return 0; +} + +#define MAX_ALT 3 /* at most 3 alternatives for any event */ + +static const unsigned int event_alternatives[][MAX_ALT] = { + { 0x100c0, 0x40001f }, /* PM_GCT_FULL_CYC */ + { 0x120e4, 0x400002 }, /* PM_GRP_DISP_REJECT */ + { 0x230e2, 0x323087 }, /* PM_BR_PRED_CR */ + { 0x230e3, 0x223087, 0x3230a0 }, /* PM_BR_PRED_TA */ + { 0x410c7, 0x441084 }, /* PM_THRD_L2MISS_BOTH_CYC */ + { 0x800c4, 0xc20e0 }, /* PM_DTLB_MISS */ + { 0xc50c6, 0xc60e0 }, /* PM_MRK_DTLB_MISS */ + { 0x100009, 0x200009 }, /* PM_INST_CMPL */ + { 0x200015, 0x300015 }, /* PM_LSU_LMQ_SRQ_EMPTY_CYC */ + { 0x300009, 0x400009 }, /* PM_INST_DISP */ +}; + +/* + * Scan the alternatives table for a match and return the + * index into the alternatives table if found, else -1. + */ +static int find_alternative(unsigned int event) +{ + int i, j; + + for (i = 0; i < ARRAY_SIZE(event_alternatives); ++i) { + if (event < event_alternatives[i][0]) + break; + for (j = 0; j < MAX_ALT && event_alternatives[i][j]; ++j) + if (event == event_alternatives[i][j]) + return i; + } + return -1; +} + +static const unsigned char bytedecode_alternatives[4][4] = { + /* PMC 1 */ { 0x21, 0x23, 0x25, 0x27 }, + /* PMC 2 */ { 0x07, 0x17, 0x0e, 0x1e }, + /* PMC 3 */ { 0x20, 0x22, 0x24, 0x26 }, + /* PMC 4 */ { 0x07, 0x17, 0x0e, 0x1e } +}; + +/* + * Some direct events for decodes of event bus byte 3 have alternative + * PMCSEL values on other counters. This returns the alternative + * event code for those that do, or -1 otherwise. This also handles + * alternative PCMSEL values for add events. + */ +static int find_alternative_bdecode(unsigned int event) +{ + int pmc, altpmc, pp, j; + + pmc = (event >> PM_PMC_SH) & PM_PMC_MSK; + if (pmc == 0 || pmc > 4) + return -1; + altpmc = 5 - pmc; /* 1 <-> 4, 2 <-> 3 */ + pp = event & PM_PMCSEL_MSK; + for (j = 0; j < 4; ++j) { + if (bytedecode_alternatives[pmc - 1][j] == pp) { + return (event & ~(PM_PMC_MSKS | PM_PMCSEL_MSK)) | + (altpmc << PM_PMC_SH) | + bytedecode_alternatives[altpmc - 1][j]; + } + } + + /* new decode alternatives for power5+ */ + if (pmc == 1 && (pp == 0x0d || pp == 0x0e)) + return event + (2 << PM_PMC_SH) + (0x2e - 0x0d); + if (pmc == 3 && (pp == 0x2e || pp == 0x2f)) + return event - (2 << PM_PMC_SH) - (0x2e - 0x0d); + + /* alternative add event encodings */ + if (pp == 0x10 || pp == 0x28) + return ((event ^ (0x10 ^ 0x28)) & ~PM_PMC_MSKS) | + (altpmc << PM_PMC_SH); + + return -1; +} + +static int power5p_get_alternatives(unsigned int event, unsigned int alt[]) +{ + int i, j, ae, nalt = 1; + + alt[0] = event; + nalt = 1; + i = find_alternative(event); + if (i >= 0) { + for (j = 0; j < MAX_ALT; ++j) { + ae = event_alternatives[i][j]; + if (ae && ae != event) + alt[nalt++] = ae; + } + } else { + ae = find_alternative_bdecode(event); + if (ae > 0) + alt[nalt++] = ae; + } + return nalt; +} + +static int power5p_compute_mmcr(unsigned int event[], int n_ev, + unsigned int hwc[], u64 mmcr[]) +{ + u64 mmcr1 = 0; + unsigned int pmc, unit, byte, psel; + unsigned int ttm; + int i, isbus, bit, grsel; + unsigned int pmc_inuse = 0; + unsigned char busbyte[4]; + unsigned char unituse[16]; + int ttmuse; + + if (n_ev > 4) + return -1; + + /* First pass to count resource use */ + memset(busbyte, 0, sizeof(busbyte)); + memset(unituse, 0, sizeof(unituse)); + for (i = 0; i < n_ev; ++i) { + pmc = (event[i] >> PM_PMC_SH) & PM_PMC_MSK; + if (pmc) { + if (pmc > 4) + return -1; + if (pmc_inuse & (1 << (pmc - 1))) + return -1; + pmc_inuse |= 1 << (pmc - 1); + } + if (event[i] & PM_BUSEVENT_MSK) { + unit = (event[i] >> PM_UNIT_SH) & PM_UNIT_MSK; + byte = (event[i] >> PM_BYTE_SH) & PM_BYTE_MSK; + if (unit > PM_LASTUNIT) + return -1; + if (unit == PM_ISU0_ALT) + unit = PM_ISU0; + if (byte >= 4) { + if (unit != PM_LSU1) + return -1; + ++unit; + byte &= 3; + } + if (busbyte[byte] && busbyte[byte] != unit) + return -1; + busbyte[byte] = unit; + unituse[unit] = 1; + } + } + + /* + * Assign resources and set multiplexer selects. + * + * PM_ISU0 can go either on TTM0 or TTM1, but that's the only + * choice we have to deal with. + */ + if (unituse[PM_ISU0] & + (unituse[PM_FPU] | unituse[PM_IFU] | unituse[PM_ISU1])) { + unituse[PM_ISU0_ALT] = 1; /* move ISU to TTM1 */ + unituse[PM_ISU0] = 0; + } + /* Set TTM[01]SEL fields. */ + ttmuse = 0; + for (i = PM_FPU; i <= PM_ISU1; ++i) { + if (!unituse[i]) + continue; + if (ttmuse++) + return -1; + mmcr1 |= (u64)i << MMCR1_TTM0SEL_SH; + } + ttmuse = 0; + for (; i <= PM_GRS; ++i) { + if (!unituse[i]) + continue; + if (ttmuse++) + return -1; + mmcr1 |= (u64)(i & 3) << MMCR1_TTM1SEL_SH; + } + if (ttmuse > 1) + return -1; + + /* Set byte lane select fields, TTM[23]SEL and GRS_*SEL. */ + for (byte = 0; byte < 4; ++byte) { + unit = busbyte[byte]; + if (!unit) + continue; + if (unit == PM_ISU0 && unituse[PM_ISU0_ALT]) { + /* get ISU0 through TTM1 rather than TTM0 */ + unit = PM_ISU0_ALT; + } else if (unit == PM_LSU1 + 1) { + /* select lower word of LSU1 for this byte */ + mmcr1 |= 1ull << (MMCR1_TTM3SEL_SH + 3 - byte); + } + ttm = unit >> 2; + mmcr1 |= (u64)ttm << (MMCR1_TD_CP_DBG0SEL_SH - 2 * byte); + } + + /* Second pass: assign PMCs, set PMCxSEL and PMCx_ADDER_SEL fields */ + for (i = 0; i < n_ev; ++i) { + pmc = (event[i] >> PM_PMC_SH) & PM_PMC_MSK; + unit = (event[i] >> PM_UNIT_SH) & PM_UNIT_MSK; + byte = (event[i] >> PM_BYTE_SH) & PM_BYTE_MSK; + psel = event[i] & PM_PMCSEL_MSK; + isbus = event[i] & PM_BUSEVENT_MSK; + if (!pmc) { + /* Bus event or any-PMC direct event */ + for (pmc = 0; pmc < 4; ++pmc) { + if (!(pmc_inuse & (1 << pmc))) + break; + } + if (pmc >= 4) + return -1; + pmc_inuse |= 1 << pmc; + } else { + /* Direct event */ + --pmc; + if (isbus && (byte & 2) && + (psel == 8 || psel == 0x10 || psel == 0x28)) + /* add events on higher-numbered bus */ + mmcr1 |= 1ull << (MMCR1_PMC1_ADDER_SEL_SH - pmc); + } + if (isbus && unit == PM_GRS) { + bit = psel & 7; + grsel = (event[i] >> PM_GRS_SH) & PM_GRS_MSK; + mmcr1 |= (u64)grsel << grsel_shift[bit]; + } + if ((psel & 0x58) == 0x40 && (byte & 1) != ((pmc >> 1) & 1)) + /* select alternate byte lane */ + psel |= 0x10; + if (pmc <= 3) + mmcr1 |= psel << MMCR1_PMCSEL_SH(pmc); + hwc[i] = pmc; + } + + /* Return MMCRx values */ + mmcr[0] = 0; + if (pmc_inuse & 1) + mmcr[0] = MMCR0_PMC1CE; + if (pmc_inuse & 0x3e) + mmcr[0] |= MMCR0_PMCjCE; + mmcr[1] = mmcr1; + mmcr[2] = 0; + return 0; +} + +static void power5p_disable_pmc(unsigned int pmc, u64 mmcr[]) +{ + if (pmc <= 3) + mmcr[1] &= ~(0x7fUL << MMCR1_PMCSEL_SH(pmc)); +} + +static int power5p_generic_events[] = { + [PERF_COUNT_CPU_CYCLES] = 0xf, + [PERF_COUNT_INSTRUCTIONS] = 0x100009, + [PERF_COUNT_CACHE_REFERENCES] = 0x1c10a8, /* LD_REF_L1 */ + [PERF_COUNT_CACHE_MISSES] = 0x3c1088, /* LD_MISS_L1 */ + [PERF_COUNT_BRANCH_INSTRUCTIONS] = 0x230e4, /* BR_ISSUED */ + [PERF_COUNT_BRANCH_MISSES] = 0x230e5, /* BR_MPRED_CR */ +}; + +struct power_pmu power5p_pmu = { + .n_counter = 4, + .max_alternatives = MAX_ALT, + .add_fields = 0x7000000000055ull, + .test_adder = 0x3000040000000ull, + .compute_mmcr = power5p_compute_mmcr, + .get_constraint = power5p_get_constraint, + .get_alternatives = power5p_get_alternatives, + .disable_pmc = power5p_disable_pmc, + .n_generic = ARRAY_SIZE(power5p_generic_events), + .generic_events = power5p_generic_events, +}; -- cgit v1.2.3 From 880860e392d92c457e8116cdee39ec4d109174ee Mon Sep 17 00:00:00 2001 From: Paul Mackerras Date: Fri, 6 Mar 2009 16:30:52 +1100 Subject: perfcounters/powerpc: add support for POWER4 processors Impact: more hardware support This adds the back-end for the PMU on the POWER4 and POWER4+ processors (GP and GQ). This is quite similar to the PPC970, with 8 PMCs, but has fewer events than the PPC970. Signed-off-by: Paul Mackerras --- arch/powerpc/kernel/Makefile | 2 +- arch/powerpc/kernel/perf_counter.c | 5 + arch/powerpc/kernel/power4-pmu.c | 557 +++++++++++++++++++++++++++++++++++++ 3 files changed, 563 insertions(+), 1 deletion(-) create mode 100644 arch/powerpc/kernel/power4-pmu.c diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile index 49851e0d8fd..8e5e2c74971 100644 --- a/arch/powerpc/kernel/Makefile +++ b/arch/powerpc/kernel/Makefile @@ -94,7 +94,7 @@ obj-$(CONFIG_AUDIT) += audit.o obj64-$(CONFIG_AUDIT) += compat_audit.o obj-$(CONFIG_DYNAMIC_FTRACE) += ftrace.o -obj-$(CONFIG_PERF_COUNTERS) += perf_counter.o ppc970-pmu.o \ +obj-$(CONFIG_PERF_COUNTERS) += perf_counter.o power4-pmu.o ppc970-pmu.o \ power5-pmu.o power5+-pmu.o power6-pmu.o obj-$(CONFIG_8XX_MINIMAL_FPEMU) += softemu8xx.o diff --git a/arch/powerpc/kernel/perf_counter.c b/arch/powerpc/kernel/perf_counter.c index 162f3981fa2..0e33d27cd46 100644 --- a/arch/powerpc/kernel/perf_counter.c +++ b/arch/powerpc/kernel/perf_counter.c @@ -824,6 +824,7 @@ void hw_perf_counter_setup(int cpu) cpuhw->mmcr[0] = MMCR0_FC; } +extern struct power_pmu power4_pmu; extern struct power_pmu ppc970_pmu; extern struct power_pmu power5_pmu; extern struct power_pmu power5p_pmu; @@ -841,6 +842,10 @@ static int init_perf_counters(void) /* XXX should get this from cputable */ pvr = mfspr(SPRN_PVR); switch (PVR_VER(pvr)) { + case PV_POWER4: + case PV_POWER4p: + ppmu = &power4_pmu; + break; case PV_970: case PV_970FX: case PV_970MP: diff --git a/arch/powerpc/kernel/power4-pmu.c b/arch/powerpc/kernel/power4-pmu.c new file mode 100644 index 00000000000..1407b19ab61 --- /dev/null +++ b/arch/powerpc/kernel/power4-pmu.c @@ -0,0 +1,557 @@ +/* + * Performance counter support for POWER4 (GP) and POWER4+ (GQ) processors. + * + * Copyright 2009 Paul Mackerras, IBM Corporation. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ +#include +#include +#include + +/* + * Bits in event code for POWER4 + */ +#define PM_PMC_SH 12 /* PMC number (1-based) for direct events */ +#define PM_PMC_MSK 0xf +#define PM_UNIT_SH 8 /* TTMMUX number and setting - unit select */ +#define PM_UNIT_MSK 0xf +#define PM_LOWER_SH 6 +#define PM_LOWER_MSK 1 +#define PM_LOWER_MSKS 0x40 +#define PM_BYTE_SH 4 /* Byte number of event bus to use */ +#define PM_BYTE_MSK 3 +#define PM_PMCSEL_MSK 7 + +/* + * Unit code values + */ +#define PM_FPU 1 +#define PM_ISU1 2 +#define PM_IFU 3 +#define PM_IDU0 4 +#define PM_ISU1_ALT 6 +#define PM_ISU2 7 +#define PM_IFU_ALT 8 +#define PM_LSU0 9 +#define PM_LSU1 0xc +#define PM_GPS 0xf + +/* + * Bits in MMCR0 for POWER4 + */ +#define MMCR0_PMC1SEL_SH 8 +#define MMCR0_PMC2SEL_SH 1 +#define MMCR_PMCSEL_MSK 0x1f + +/* + * Bits in MMCR1 for POWER4 + */ +#define MMCR1_TTM0SEL_SH 62 +#define MMCR1_TTC0SEL_SH 61 +#define MMCR1_TTM1SEL_SH 59 +#define MMCR1_TTC1SEL_SH 58 +#define MMCR1_TTM2SEL_SH 56 +#define MMCR1_TTC2SEL_SH 55 +#define MMCR1_TTM3SEL_SH 53 +#define MMCR1_TTC3SEL_SH 52 +#define MMCR1_TTMSEL_MSK 3 +#define MMCR1_TD_CP_DBG0SEL_SH 50 +#define MMCR1_TD_CP_DBG1SEL_SH 48 +#define MMCR1_TD_CP_DBG2SEL_SH 46 +#define MMCR1_TD_CP_DBG3SEL_SH 44 +#define MMCR1_DEBUG0SEL_SH 43 +#define MMCR1_DEBUG1SEL_SH 42 +#define MMCR1_DEBUG2SEL_SH 41 +#define MMCR1_DEBUG3SEL_SH 40 +#define MMCR1_PMC1_ADDER_SEL_SH 39 +#define MMCR1_PMC2_ADDER_SEL_SH 38 +#define MMCR1_PMC6_ADDER_SEL_SH 37 +#define MMCR1_PMC5_ADDER_SEL_SH 36 +#define MMCR1_PMC8_ADDER_SEL_SH 35 +#define MMCR1_PMC7_ADDER_SEL_SH 34 +#define MMCR1_PMC3_ADDER_SEL_SH 33 +#define MMCR1_PMC4_ADDER_SEL_SH 32 +#define MMCR1_PMC3SEL_SH 27 +#define MMCR1_PMC4SEL_SH 22 +#define MMCR1_PMC5SEL_SH 17 +#define MMCR1_PMC6SEL_SH 12 +#define MMCR1_PMC7SEL_SH 7 +#define MMCR1_PMC8SEL_SH 2 /* note bit 0 is in MMCRA for GP */ + +static short mmcr1_adder_bits[8] = { + MMCR1_PMC1_ADDER_SEL_SH, + MMCR1_PMC2_ADDER_SEL_SH, + MMCR1_PMC3_ADDER_SEL_SH, + MMCR1_PMC4_ADDER_SEL_SH, + MMCR1_PMC5_ADDER_SEL_SH, + MMCR1_PMC6_ADDER_SEL_SH, + MMCR1_PMC7_ADDER_SEL_SH, + MMCR1_PMC8_ADDER_SEL_SH +}; + +/* + * Bits in MMCRA + */ +#define MMCRA_PMC8SEL0_SH 17 /* PMC8SEL bit 0 for GP */ + +/* + * Layout of constraint bits: + * 6666555555555544444444443333333333222222222211111111110000000000 + * 3210987654321098765432109876543210987654321098765432109876543210 + * |[ >[ >[ >|||[ >[ >< >< >< >< ><><><><><><><><> + * | UC1 UC2 UC3 ||| PS1 PS2 B0 B1 B2 B3 P1P2P3P4P5P6P7P8 + * \SMPL ||\TTC3SEL + * |\TTC_IFU_SEL + * \TTM2SEL0 + * + * SMPL - SAMPLE_ENABLE constraint + * 56: SAMPLE_ENABLE value 0x0100_0000_0000_0000 + * + * UC1 - unit constraint 1: can't have all three of FPU/ISU1/IDU0|ISU2 + * 55: UC1 error 0x0080_0000_0000_0000 + * 54: FPU events needed 0x0040_0000_0000_0000 + * 53: ISU1 events needed 0x0020_0000_0000_0000 + * 52: IDU0|ISU2 events needed 0x0010_0000_0000_0000 + * + * UC2 - unit constraint 2: can't have all three of FPU/IFU/LSU0 + * 51: UC2 error 0x0008_0000_0000_0000 + * 50: FPU events needed 0x0004_0000_0000_0000 + * 49: IFU events needed 0x0002_0000_0000_0000 + * 48: LSU0 events needed 0x0001_0000_0000_0000 + * + * UC3 - unit constraint 3: can't have all four of LSU0/IFU/IDU0|ISU2/ISU1 + * 47: UC3 error 0x8000_0000_0000 + * 46: LSU0 events needed 0x4000_0000_0000 + * 45: IFU events needed 0x2000_0000_0000 + * 44: IDU0|ISU2 events needed 0x1000_0000_0000 + * 43: ISU1 events needed 0x0800_0000_0000 + * + * TTM2SEL0 + * 42: 0 = IDU0 events needed + * 1 = ISU2 events needed 0x0400_0000_0000 + * + * TTC_IFU_SEL + * 41: 0 = IFU.U events needed + * 1 = IFU.L events needed 0x0200_0000_0000 + * + * TTC3SEL + * 40: 0 = LSU1.U events needed + * 1 = LSU1.L events needed 0x0100_0000_0000 + * + * PS1 + * 39: PS1 error 0x0080_0000_0000 + * 36-38: count of events needing PMC1/2/5/6 0x0070_0000_0000 + * + * PS2 + * 35: PS2 error 0x0008_0000_0000 + * 32-34: count of events needing PMC3/4/7/8 0x0007_0000_0000 + * + * B0 + * 28-31: Byte 0 event source 0xf000_0000 + * 1 = FPU + * 2 = ISU1 + * 3 = IFU + * 4 = IDU0 + * 7 = ISU2 + * 9 = LSU0 + * c = LSU1 + * f = GPS + * + * B1, B2, B3 + * 24-27, 20-23, 16-19: Byte 1, 2, 3 event sources + * + * P8 + * 15: P8 error 0x8000 + * 14-15: Count of events needing PMC8 + * + * P1..P7 + * 0-13: Count of events needing PMC1..PMC7 + * + * Note: this doesn't allow events using IFU.U to be combined with events + * using IFU.L, though that is feasible (using TTM0 and TTM2). However + * there are no listed events for IFU.L (they are debug events not + * verified for performance monitoring) so this shouldn't cause a + * problem. + */ + +static struct unitinfo { + u64 value, mask; + int unit; + int lowerbit; +} p4_unitinfo[16] = { + [PM_FPU] = { 0x44000000000000ull, 0x88000000000000ull, PM_FPU, 0 }, + [PM_ISU1] = { 0x20080000000000ull, 0x88000000000000ull, PM_ISU1, 0 }, + [PM_ISU1_ALT] = + { 0x20080000000000ull, 0x88000000000000ull, PM_ISU1, 0 }, + [PM_IFU] = { 0x02200000000000ull, 0x08820000000000ull, PM_IFU, 41 }, + [PM_IFU_ALT] = + { 0x02200000000000ull, 0x08820000000000ull, PM_IFU, 41 }, + [PM_IDU0] = { 0x10100000000000ull, 0x80840000000000ull, PM_IDU0, 1 }, + [PM_ISU2] = { 0x10140000000000ull, 0x80840000000000ull, PM_ISU2, 0 }, + [PM_LSU0] = { 0x01400000000000ull, 0x08800000000000ull, PM_LSU0, 0 }, + [PM_LSU1] = { 0x00000000000000ull, 0x00010000000000ull, PM_LSU1, 40 }, + [PM_GPS] = { 0x00000000000000ull, 0x00000000000000ull, PM_GPS, 0 } +}; + +static unsigned char direct_marked_event[8] = { + (1<<2) | (1<<3), /* PMC1: PM_MRK_GRP_DISP, PM_MRK_ST_CMPL */ + (1<<3) | (1<<5), /* PMC2: PM_THRESH_TIMEO, PM_MRK_BRU_FIN */ + (1<<3), /* PMC3: PM_MRK_ST_CMPL_INT */ + (1<<4) | (1<<5), /* PMC4: PM_MRK_GRP_CMPL, PM_MRK_CRU_FIN */ + (1<<4) | (1<<5), /* PMC5: PM_MRK_GRP_TIMEO */ + (1<<3) | (1<<4) | (1<<5), + /* PMC6: PM_MRK_ST_GPS, PM_MRK_FXU_FIN, PM_MRK_GRP_ISSUED */ + (1<<4) | (1<<5), /* PMC7: PM_MRK_FPU_FIN, PM_MRK_INST_FIN */ + (1<<4), /* PMC8: PM_MRK_LSU_FIN */ +}; + +/* + * Returns 1 if event counts things relating to marked instructions + * and thus needs the MMCRA_SAMPLE_ENABLE bit set, or 0 if not. + */ +static int p4_marked_instr_event(unsigned int event) +{ + int pmc, psel, unit, byte, bit; + unsigned int mask; + + pmc = (event >> PM_PMC_SH) & PM_PMC_MSK; + psel = event & PM_PMCSEL_MSK; + if (pmc) { + if (direct_marked_event[pmc - 1] & (1 << psel)) + return 1; + if (psel == 0) /* add events */ + bit = (pmc <= 4)? pmc - 1: 8 - pmc; + else if (psel == 6) /* decode events */ + bit = 4; + else + return 0; + } else + bit = psel; + + byte = (event >> PM_BYTE_SH) & PM_BYTE_MSK; + unit = (event >> PM_UNIT_SH) & PM_UNIT_MSK; + mask = 0; + switch (unit) { + case PM_LSU1: + if (event & PM_LOWER_MSKS) + mask = 1 << 28; /* byte 7 bit 4 */ + else + mask = 6 << 24; /* byte 3 bits 1 and 2 */ + break; + case PM_LSU0: + /* byte 3, bit 3; byte 2 bits 0,2,3,4,5; byte 1 */ + mask = 0x083dff00; + } + return (mask >> (byte * 8 + bit)) & 1; +} + +static int p4_get_constraint(unsigned int event, u64 *maskp, u64 *valp) +{ + int pmc, byte, unit, lower, sh; + u64 mask = 0, value = 0; + int grp = -1; + + pmc = (event >> PM_PMC_SH) & PM_PMC_MSK; + if (pmc) { + if (pmc > 8) + return -1; + sh = (pmc - 1) * 2; + mask |= 2 << sh; + value |= 1 << sh; + grp = ((pmc - 1) >> 1) & 1; + } + unit = (event >> PM_UNIT_SH) & PM_UNIT_MSK; + byte = (event >> PM_BYTE_SH) & PM_BYTE_MSK; + if (unit) { + lower = (event >> PM_LOWER_SH) & PM_LOWER_MSK; + + /* + * Bus events on bytes 0 and 2 can be counted + * on PMC1/2/5/6; bytes 1 and 3 on PMC3/4/7/8. + */ + if (!pmc) + grp = byte & 1; + + if (!p4_unitinfo[unit].unit) + return -1; + mask |= p4_unitinfo[unit].mask; + value |= p4_unitinfo[unit].value; + sh = p4_unitinfo[unit].lowerbit; + if (sh > 1) + value |= (u64)lower << sh; + else if (lower != sh) + return -1; + unit = p4_unitinfo[unit].unit; + + /* Set byte lane select field */ + mask |= 0xfULL << (28 - 4 * byte); + value |= (u64)unit << (28 - 4 * byte); + } + if (grp == 0) { + /* increment PMC1/2/5/6 field */ + mask |= 0x8000000000ull; + value |= 0x1000000000ull; + } else { + /* increment PMC3/4/7/8 field */ + mask |= 0x800000000ull; + value |= 0x100000000ull; + } + + /* Marked instruction events need sample_enable set */ + if (p4_marked_instr_event(event)) { + mask |= 1ull << 56; + value |= 1ull << 56; + } + + /* PMCSEL=6 decode events on byte 2 need sample_enable clear */ + if (pmc && (event & PM_PMCSEL_MSK) == 6 && byte == 2) + mask |= 1ull << 56; + + *maskp = mask; + *valp = value; + return 0; +} + +static unsigned int ppc_inst_cmpl[] = { + 0x1001, 0x4001, 0x6001, 0x7001, 0x8001 +}; + +static int p4_get_alternatives(unsigned int event, unsigned int alt[]) +{ + int i, j, na; + + alt[0] = event; + na = 1; + + /* 2 possibilities for PM_GRP_DISP_REJECT */ + if (event == 0x8003 || event == 0x0224) { + alt[1] = event ^ (0x8003 ^ 0x0224); + return 2; + } + + /* 2 possibilities for PM_ST_MISS_L1 */ + if (event == 0x0c13 || event == 0x0c23) { + alt[1] = event ^ (0x0c13 ^ 0x0c23); + return 2; + } + + /* several possibilities for PM_INST_CMPL */ + for (i = 0; i < ARRAY_SIZE(ppc_inst_cmpl); ++i) { + if (event == ppc_inst_cmpl[i]) { + for (j = 0; j < ARRAY_SIZE(ppc_inst_cmpl); ++j) + if (j != i) + alt[na++] = ppc_inst_cmpl[j]; + break; + } + } + + return na; +} + +static int p4_compute_mmcr(unsigned int event[], int n_ev, + unsigned int hwc[], u64 mmcr[]) +{ + u64 mmcr0 = 0, mmcr1 = 0, mmcra = 0; + unsigned int pmc, unit, byte, psel, lower; + unsigned int ttm, grp; + unsigned int pmc_inuse = 0; + unsigned int pmc_grp_use[2]; + unsigned char busbyte[4]; + unsigned char unituse[16]; + unsigned int unitlower = 0; + int i; + + if (n_ev > 8) + return -1; + + /* First pass to count resource use */ + pmc_grp_use[0] = pmc_grp_use[1] = 0; + memset(busbyte, 0, sizeof(busbyte)); + memset(unituse, 0, sizeof(unituse)); + for (i = 0; i < n_ev; ++i) { + pmc = (event[i] >> PM_PMC_SH) & PM_PMC_MSK; + if (pmc) { + if (pmc_inuse & (1 << (pmc - 1))) + return -1; + pmc_inuse |= 1 << (pmc - 1); + /* count 1/2/5/6 vs 3/4/7/8 use */ + ++pmc_grp_use[((pmc - 1) >> 1) & 1]; + } + unit = (event[i] >> PM_UNIT_SH) & PM_UNIT_MSK; + byte = (event[i] >> PM_BYTE_SH) & PM_BYTE_MSK; + lower = (event[i] >> PM_LOWER_SH) & PM_LOWER_MSK; + if (unit) { + if (!pmc) + ++pmc_grp_use[byte & 1]; + if (unit == 6 || unit == 8) + /* map alt ISU1/IFU codes: 6->2, 8->3 */ + unit = (unit >> 1) - 1; + if (busbyte[byte] && busbyte[byte] != unit) + return -1; + busbyte[byte] = unit; + lower <<= unit; + if (unituse[unit] && lower != (unitlower & lower)) + return -1; + unituse[unit] = 1; + unitlower |= lower; + } + } + if (pmc_grp_use[0] > 4 || pmc_grp_use[1] > 4) + return -1; + + /* + * Assign resources and set multiplexer selects. + * + * Units 1,2,3 are on TTM0, 4,6,7 on TTM1, 8,10 on TTM2. + * Each TTMx can only select one unit, but since + * units 2 and 6 are both ISU1, and 3 and 8 are both IFU, + * we have some choices. + */ + if (unituse[2] & (unituse[1] | (unituse[3] & unituse[9]))) { + unituse[6] = 1; /* Move 2 to 6 */ + unituse[2] = 0; + } + if (unituse[3] & (unituse[1] | unituse[2])) { + unituse[8] = 1; /* Move 3 to 8 */ + unituse[3] = 0; + unitlower = (unitlower & ~8) | ((unitlower & 8) << 5); + } + /* Check only one unit per TTMx */ + if (unituse[1] + unituse[2] + unituse[3] > 1 || + unituse[4] + unituse[6] + unituse[7] > 1 || + unituse[8] + unituse[9] > 1 || + (unituse[5] | unituse[10] | unituse[11] | + unituse[13] | unituse[14])) + return -1; + + /* Set TTMxSEL fields. Note, units 1-3 => TTM0SEL codes 0-2 */ + mmcr1 |= (u64)(unituse[3] * 2 + unituse[2]) << MMCR1_TTM0SEL_SH; + mmcr1 |= (u64)(unituse[7] * 3 + unituse[6] * 2) << MMCR1_TTM1SEL_SH; + mmcr1 |= (u64)unituse[9] << MMCR1_TTM2SEL_SH; + + /* Set TTCxSEL fields. */ + if (unitlower & 0xe) + mmcr1 |= 1ull << MMCR1_TTC0SEL_SH; + if (unitlower & 0xf0) + mmcr1 |= 1ull << MMCR1_TTC1SEL_SH; + if (unitlower & 0xf00) + mmcr1 |= 1ull << MMCR1_TTC2SEL_SH; + if (unitlower & 0x7000) + mmcr1 |= 1ull << MMCR1_TTC3SEL_SH; + + /* Set byte lane select fields. */ + for (byte = 0; byte < 4; ++byte) { + unit = busbyte[byte]; + if (!unit) + continue; + if (unit == 0xf) { + /* special case for GPS */ + mmcr1 |= 1ull << (MMCR1_DEBUG0SEL_SH - byte); + } else { + if (!unituse[unit]) + ttm = unit - 1; /* 2->1, 3->2 */ + else + ttm = unit >> 2; + mmcr1 |= (u64)ttm << (MMCR1_TD_CP_DBG0SEL_SH - 2*byte); + } + } + + /* Second pass: assign PMCs, set PMCxSEL and PMCx_ADDER_SEL fields */ + for (i = 0; i < n_ev; ++i) { + pmc = (event[i] >> PM_PMC_SH) & PM_PMC_MSK; + unit = (event[i] >> PM_UNIT_SH) & PM_UNIT_MSK; + byte = (event[i] >> PM_BYTE_SH) & PM_BYTE_MSK; + psel = event[i] & PM_PMCSEL_MSK; + if (!pmc) { + /* Bus event or 00xxx direct event (off or cycles) */ + if (unit) + psel |= 0x10 | ((byte & 2) << 2); + for (pmc = 0; pmc < 8; ++pmc) { + if (pmc_inuse & (1 << pmc)) + continue; + grp = (pmc >> 1) & 1; + if (unit) { + if (grp == (byte & 1)) + break; + } else if (pmc_grp_use[grp] < 4) { + ++pmc_grp_use[grp]; + break; + } + } + pmc_inuse |= 1 << pmc; + } else { + /* Direct event */ + --pmc; + if (psel == 0 && (byte & 2)) + /* add events on higher-numbered bus */ + mmcr1 |= 1ull << mmcr1_adder_bits[pmc]; + else if (psel == 6 && byte == 3) + /* seem to need to set sample_enable here */ + mmcra |= MMCRA_SAMPLE_ENABLE; + psel |= 8; + } + if (pmc <= 1) + mmcr0 |= psel << (MMCR0_PMC1SEL_SH - 7 * pmc); + else + mmcr1 |= psel << (MMCR1_PMC3SEL_SH - 5 * (pmc - 2)); + if (pmc == 7) /* PMC8 */ + mmcra |= (psel & 1) << MMCRA_PMC8SEL0_SH; + hwc[i] = pmc; + if (p4_marked_instr_event(event[i])) + mmcra |= MMCRA_SAMPLE_ENABLE; + } + + if (pmc_inuse & 1) + mmcr0 |= MMCR0_PMC1CE; + if (pmc_inuse & 0xfe) + mmcr0 |= MMCR0_PMCjCE; + + mmcra |= 0x2000; /* mark only one IOP per PPC instruction */ + + /* Return MMCRx values */ + mmcr[0] = mmcr0; + mmcr[1] = mmcr1; + mmcr[2] = mmcra; + return 0; +} + +static void p4_disable_pmc(unsigned int pmc, u64 mmcr[]) +{ + /* + * Setting the PMCxSEL field to 0 disables PMC x. + * (Note that pmc is 0-based here, not 1-based.) + */ + if (pmc <= 1) { + mmcr[0] &= ~(0x1fUL << (MMCR0_PMC1SEL_SH - 7 * pmc)); + } else { + mmcr[1] &= ~(0x1fUL << (MMCR1_PMC3SEL_SH - 5 * (pmc - 2))); + if (pmc == 7) + mmcr[2] &= ~(1UL << MMCRA_PMC8SEL0_SH); + } +} + +static int p4_generic_events[] = { + [PERF_COUNT_CPU_CYCLES] = 7, + [PERF_COUNT_INSTRUCTIONS] = 0x1001, + [PERF_COUNT_CACHE_REFERENCES] = 0x8c10, /* PM_LD_REF_L1 */ + [PERF_COUNT_CACHE_MISSES] = 0x3c10, /* PM_LD_MISS_L1 */ + [PERF_COUNT_BRANCH_INSTRUCTIONS] = 0x330, /* PM_BR_ISSUED */ + [PERF_COUNT_BRANCH_MISSES] = 0x331, /* PM_BR_MPRED_CR */ +}; + +struct power_pmu power4_pmu = { + .n_counter = 8, + .max_alternatives = 5, + .add_fields = 0x0000001100005555ull, + .test_adder = 0x0011083300000000ull, + .compute_mmcr = p4_compute_mmcr, + .get_constraint = p4_get_constraint, + .get_alternatives = p4_get_alternatives, + .disable_pmc = p4_disable_pmc, + .n_generic = ARRAY_SIZE(p4_generic_events), + .generic_events = p4_generic_events, +}; -- cgit v1.2.3 From 184fe4ab1f2e4dfa45584889bb3820031648386b Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Sun, 8 Mar 2009 11:34:19 +0100 Subject: x86: perf_counter cleanup Use and actual unsigned long bitmap instead of casting our way around. Signed-off-by: Peter Zijlstra Cc: Jaswinder Singh Rajput LKML-Reference: <1236508459.22914.3645.camel@twins> Signed-off-by: Ingo Molnar --- arch/x86/kernel/cpu/perf_counter.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/arch/x86/kernel/cpu/perf_counter.c b/arch/x86/kernel/cpu/perf_counter.c index f5853718d4d..1df421042b2 100644 --- a/arch/x86/kernel/cpu/perf_counter.c +++ b/arch/x86/kernel/cpu/perf_counter.c @@ -37,7 +37,7 @@ struct cpu_hw_counters { unsigned long used[BITS_TO_LONGS(X86_PMC_IDX_MAX)]; unsigned long interrupts; u64 throttle_ctrl; - u64 active_mask; + unsigned long active_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)]; int enabled; }; @@ -291,7 +291,7 @@ static void pmc_amd_restore_all(u64 ctrl) return; for (idx = 0; idx < nr_counters_generic; idx++) { - if (test_bit(idx, (unsigned long *)&cpuc->active_mask)) { + if (test_bit(idx, cpuc->active_mask)) { u64 val; rdmsrl(MSR_K7_EVNTSEL0 + idx, val); @@ -377,7 +377,7 @@ static void pmc_amd_enable(int idx, u64 config) { struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters); - set_bit(idx, (unsigned long *)&cpuc->active_mask); + set_bit(idx, cpuc->active_mask); if (cpuc->enabled) config |= ARCH_PERFMON_EVENTSEL0_ENABLE; @@ -401,7 +401,7 @@ static void pmc_amd_disable(int idx, u64 config) { struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters); - clear_bit(idx, (unsigned long *)&cpuc->active_mask); + clear_bit(idx, cpuc->active_mask); wrmsrl(MSR_K7_EVNTSEL0 + idx, config); } -- cgit v1.2.3 From e255357764f92afcafafbd4879b222b8c752065a Mon Sep 17 00:00:00 2001 From: Jaswinder Singh Rajput Date: Sun, 8 Mar 2009 17:09:49 +0530 Subject: x86: perf_counter cleanup Remove unused variables and duplicate header file. Signed-off-by: Jaswinder Singh Rajput Signed-off-by: Ingo Molnar --- arch/x86/kernel/cpu/perf_counter.c | 4 ---- 1 file changed, 4 deletions(-) diff --git a/arch/x86/kernel/cpu/perf_counter.c b/arch/x86/kernel/cpu/perf_counter.c index 1df421042b2..155bc3c239b 100644 --- a/arch/x86/kernel/cpu/perf_counter.c +++ b/arch/x86/kernel/cpu/perf_counter.c @@ -17,7 +17,6 @@ #include #include -#include #include static bool perf_counters_initialized __read_mostly; @@ -954,9 +953,6 @@ static struct pmc_x86_ops *pmc_intel_init(void) static struct pmc_x86_ops *pmc_amd_init(void) { - u64 old; - int bits; - nr_counters_generic = 4; nr_counters_fixed = 0; counter_value_mask = 0x0000FFFFFFFFFFFFULL; -- cgit v1.2.3 From 595258aaeac4cc6e187b98b1bf29bb176febe763 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Fri, 13 Mar 2009 12:21:28 +0100 Subject: perf_counter: x86: fix 32-bit irq_period assumption No need to assume the irq_period is 32bit. Signed-off-by: Peter Zijlstra Signed-off-by: Ingo Molnar --- arch/x86/kernel/cpu/perf_counter.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/x86/kernel/cpu/perf_counter.c b/arch/x86/kernel/cpu/perf_counter.c index 155bc3c239b..1cedc3468ce 100644 --- a/arch/x86/kernel/cpu/perf_counter.c +++ b/arch/x86/kernel/cpu/perf_counter.c @@ -449,7 +449,7 @@ __hw_perf_counter_set_period(struct perf_counter *counter, struct hw_perf_counter *hwc, int idx) { s64 left = atomic64_read(&hwc->period_left); - s32 period = hwc->irq_period; + s64 period = hwc->irq_period; int err; /* -- cgit v1.2.3 From 755642322aa66fbc5421a35fd3e1733f73e20083 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Fri, 13 Mar 2009 12:21:29 +0100 Subject: perf_counter: use list_move_tail() Instead of del/add use a move list-op. Signed-off-by: Peter Zijlstra Signed-off-by: Ingo Molnar --- kernel/perf_counter.c | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c index b2e838959f3..0fe22c916e2 100644 --- a/kernel/perf_counter.c +++ b/kernel/perf_counter.c @@ -89,8 +89,7 @@ list_del_counter(struct perf_counter *counter, struct perf_counter_context *ctx) list_for_each_entry_safe(sibling, tmp, &counter->sibling_list, list_entry) { - list_del_init(&sibling->list_entry); - list_add_tail(&sibling->list_entry, &ctx->counter_list); + list_move_tail(&sibling->list_entry, &ctx->counter_list); sibling->group_leader = sibling; } } @@ -959,8 +958,7 @@ static void rotate_ctx(struct perf_counter_context *ctx) */ perf_flags = hw_perf_save_disable(); list_for_each_entry(counter, &ctx->counter_list, list_entry) { - list_del(&counter->list_entry); - list_add_tail(&counter->list_entry, &ctx->counter_list); + list_move_tail(&counter->list_entry, &ctx->counter_list); break; } hw_perf_restore(perf_flags); -- cgit v1.2.3 From 60b3df9c1e24a18aabb412da9905208c5f04ebea Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Fri, 13 Mar 2009 12:21:30 +0100 Subject: perf_counter: add comment to barrier We need to ensure the enabled=0 write happens before we start disabling the actual counters, so that a pcm_amd_enable() will not enable one underneath us. I think the race is impossible anyway, we always balance the ops within any one context and perform enable() with IRQs disabled. Signed-off-by: Peter Zijlstra Signed-off-by: Ingo Molnar --- arch/x86/kernel/cpu/perf_counter.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/arch/x86/kernel/cpu/perf_counter.c b/arch/x86/kernel/cpu/perf_counter.c index 1cedc3468ce..a2e3b76bfdc 100644 --- a/arch/x86/kernel/cpu/perf_counter.c +++ b/arch/x86/kernel/cpu/perf_counter.c @@ -247,6 +247,10 @@ static u64 pmc_amd_save_disable_all(void) enabled = cpuc->enabled; cpuc->enabled = 0; + /* + * ensure we write the disable before we start disabling the + * counters proper, so that pcm_amd_enable() does the right thing. + */ barrier(); for (idx = 0; idx < nr_counters_generic; idx++) { -- cgit v1.2.3 From 82bae4f8c2fd64a2bb1e2e72c508853ed2b4a299 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Fri, 13 Mar 2009 12:21:31 +0100 Subject: perf_counter: x86: use ULL postfix for 64bit constants Fix a build warning on 32bit machines by explicitly marking the constants as 64-bit. Signed-off-by: Peter Zijlstra Signed-off-by: Ingo Molnar --- arch/x86/kernel/cpu/perf_counter.c | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/arch/x86/kernel/cpu/perf_counter.c b/arch/x86/kernel/cpu/perf_counter.c index a2e3b76bfdc..22dab06c08a 100644 --- a/arch/x86/kernel/cpu/perf_counter.c +++ b/arch/x86/kernel/cpu/perf_counter.c @@ -84,9 +84,9 @@ static u64 pmc_intel_event_map(int event) static u64 pmc_intel_raw_event(u64 event) { -#define CORE_EVNTSEL_EVENT_MASK 0x000000FF -#define CORE_EVNTSEL_UNIT_MASK 0x0000FF00 -#define CORE_EVNTSEL_COUNTER_MASK 0xFF000000 +#define CORE_EVNTSEL_EVENT_MASK 0x000000FFULL +#define CORE_EVNTSEL_UNIT_MASK 0x0000FF00ULL +#define CORE_EVNTSEL_COUNTER_MASK 0xFF000000ULL #define CORE_EVNTSEL_MASK \ (CORE_EVNTSEL_EVENT_MASK | \ @@ -116,9 +116,9 @@ static u64 pmc_amd_event_map(int event) static u64 pmc_amd_raw_event(u64 event) { -#define K7_EVNTSEL_EVENT_MASK 0x7000000FF -#define K7_EVNTSEL_UNIT_MASK 0x00000FF00 -#define K7_EVNTSEL_COUNTER_MASK 0x0FF000000 +#define K7_EVNTSEL_EVENT_MASK 0x7000000FFULL +#define K7_EVNTSEL_UNIT_MASK 0x00000FF00ULL +#define K7_EVNTSEL_COUNTER_MASK 0x0FF000000ULL #define K7_EVNTSEL_MASK \ (K7_EVNTSEL_EVENT_MASK | \ -- cgit v1.2.3 From 15dbf27cc18559a14e99609f78678aa86b9c6ff1 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Fri, 13 Mar 2009 12:21:32 +0100 Subject: perf_counter: software counter event infrastructure Provide generic software counter infrastructure that supports software events. This will be used to allow sample based profiling based on software events such as pagefaults. The current infrastructure can only provide a count of such events, no place information. Signed-off-by: Peter Zijlstra Signed-off-by: Ingo Molnar --- include/linux/perf_counter.h | 8 +- kernel/perf_counter.c | 201 +++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 208 insertions(+), 1 deletion(-) diff --git a/include/linux/perf_counter.h b/include/linux/perf_counter.h index dde564517b6..3fefc3b8150 100644 --- a/include/linux/perf_counter.h +++ b/include/linux/perf_counter.h @@ -126,6 +126,7 @@ struct hw_perf_counter { unsigned long counter_base; int nmi; unsigned int idx; + atomic64_t count; /* software */ atomic64_t prev_count; u64 irq_period; atomic64_t period_left; @@ -283,6 +284,8 @@ static inline int is_software_counter(struct perf_counter *counter) return !counter->hw_event.raw && counter->hw_event.type < 0; } +extern void perf_swcounter_event(enum hw_event_types, u64, int, struct pt_regs *); + #else static inline void perf_counter_task_sched_in(struct task_struct *task, int cpu) { } @@ -295,10 +298,13 @@ static inline void perf_counter_exit_task(struct task_struct *child) { } static inline void perf_counter_notify(struct pt_regs *regs) { } static inline void perf_counter_print_debug(void) { } static inline void perf_counter_unthrottle(void) { } -static inline void hw_perf_restore(u64 ctrl) { } +static inline void hw_perf_restore(u64 ctrl) { } static inline u64 hw_perf_save_disable(void) { return 0; } static inline int perf_counter_task_disable(void) { return -EINVAL; } static inline int perf_counter_task_enable(void) { return -EINVAL; } + +static inline void perf_swcounter_event(enum hw_event_types event, u64 nr, + int nmi, struct pt_regs *regs) { } #endif #endif /* __KERNEL__ */ diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c index 0fe22c916e2..eeb1b46cf70 100644 --- a/kernel/perf_counter.c +++ b/kernel/perf_counter.c @@ -1328,6 +1328,185 @@ static const struct file_operations perf_fops = { .compat_ioctl = perf_ioctl, }; +/* + * Generic software counter infrastructure + */ + +static void perf_swcounter_update(struct perf_counter *counter) +{ + struct hw_perf_counter *hwc = &counter->hw; + u64 prev, now; + s64 delta; + +again: + prev = atomic64_read(&hwc->prev_count); + now = atomic64_read(&hwc->count); + if (atomic64_cmpxchg(&hwc->prev_count, prev, now) != prev) + goto again; + + delta = now - prev; + + atomic64_add(delta, &counter->count); + atomic64_sub(delta, &hwc->period_left); +} + +static void perf_swcounter_set_period(struct perf_counter *counter) +{ + struct hw_perf_counter *hwc = &counter->hw; + s64 left = atomic64_read(&hwc->period_left); + s64 period = hwc->irq_period; + + if (unlikely(left <= -period)) { + left = period; + atomic64_set(&hwc->period_left, left); + } + + if (unlikely(left <= 0)) { + left += period; + atomic64_add(period, &hwc->period_left); + } + + atomic64_set(&hwc->prev_count, -left); + atomic64_set(&hwc->count, -left); +} + +static void perf_swcounter_save_and_restart(struct perf_counter *counter) +{ + perf_swcounter_update(counter); + perf_swcounter_set_period(counter); +} + +static void perf_swcounter_store_irq(struct perf_counter *counter, u64 data) +{ + struct perf_data *irqdata = counter->irqdata; + + if (irqdata->len > PERF_DATA_BUFLEN - sizeof(u64)) { + irqdata->overrun++; + } else { + u64 *p = (u64 *) &irqdata->data[irqdata->len]; + + *p = data; + irqdata->len += sizeof(u64); + } +} + +static void perf_swcounter_handle_group(struct perf_counter *sibling) +{ + struct perf_counter *counter, *group_leader = sibling->group_leader; + + list_for_each_entry(counter, &group_leader->sibling_list, list_entry) { + perf_swcounter_update(counter); + perf_swcounter_store_irq(sibling, counter->hw_event.type); + perf_swcounter_store_irq(sibling, atomic64_read(&counter->count)); + } +} + +static void perf_swcounter_interrupt(struct perf_counter *counter, + int nmi, struct pt_regs *regs) +{ + perf_swcounter_save_and_restart(counter); + + switch (counter->hw_event.record_type) { + case PERF_RECORD_SIMPLE: + break; + + case PERF_RECORD_IRQ: + perf_swcounter_store_irq(counter, instruction_pointer(regs)); + break; + + case PERF_RECORD_GROUP: + perf_swcounter_handle_group(counter); + break; + } + + if (nmi) { + counter->wakeup_pending = 1; + set_tsk_thread_flag(current, TIF_PERF_COUNTERS); + } else + wake_up(&counter->waitq); +} + +static int perf_swcounter_match(struct perf_counter *counter, + enum hw_event_types event, + struct pt_regs *regs) +{ + if (counter->state != PERF_COUNTER_STATE_ACTIVE) + return 0; + + if (counter->hw_event.raw) + return 0; + + if (counter->hw_event.type != event) + return 0; + + if (counter->hw_event.exclude_user && user_mode(regs)) + return 0; + + if (counter->hw_event.exclude_kernel && !user_mode(regs)) + return 0; + + return 1; +} + +static void perf_swcounter_ctx_event(struct perf_counter_context *ctx, + enum hw_event_types event, u64 nr, + int nmi, struct pt_regs *regs) +{ + struct perf_counter *counter; + unsigned long flags; + int neg; + + if (list_empty(&ctx->counter_list)) + return; + + spin_lock_irqsave(&ctx->lock, flags); + + /* + * XXX: make counter_list RCU safe + */ + list_for_each_entry(counter, &ctx->counter_list, list_entry) { + if (perf_swcounter_match(counter, event, regs)) { + neg = atomic64_add_negative(nr, &counter->hw.count); + if (counter->hw.irq_period && !neg) + perf_swcounter_interrupt(counter, nmi, regs); + } + } + + spin_unlock_irqrestore(&ctx->lock, flags); +} + +void perf_swcounter_event(enum hw_event_types event, u64 nr, + int nmi, struct pt_regs *regs) +{ + struct perf_cpu_context *cpuctx = &get_cpu_var(perf_cpu_context); + + perf_swcounter_ctx_event(&cpuctx->ctx, event, nr, nmi, regs); + if (cpuctx->task_ctx) + perf_swcounter_ctx_event(cpuctx->task_ctx, event, nr, nmi, regs); + + put_cpu_var(perf_cpu_context); +} + +static void perf_swcounter_read(struct perf_counter *counter) +{ + perf_swcounter_update(counter); +} + +static int perf_swcounter_enable(struct perf_counter *counter) +{ + perf_swcounter_set_period(counter); + return 0; +} + +static void perf_swcounter_disable(struct perf_counter *counter) +{ + perf_swcounter_update(counter); +} + +/* + * Software counter: cpu wall time clock + */ + static int cpu_clock_perf_counter_enable(struct perf_counter *counter) { int cpu = raw_smp_processor_id(); @@ -1364,6 +1543,10 @@ static const struct hw_perf_counter_ops perf_ops_cpu_clock = { .read = cpu_clock_perf_counter_read, }; +/* + * Software counter: task time clock + */ + /* * Called from within the scheduler: */ @@ -1420,6 +1603,10 @@ static const struct hw_perf_counter_ops perf_ops_task_clock = { .read = task_clock_perf_counter_read, }; +/* + * Software counter: page faults + */ + #ifdef CONFIG_VM_EVENT_COUNTERS #define cpu_page_faults() __get_cpu_var(vm_event_states).event[PGFAULT] #else @@ -1473,6 +1660,10 @@ static const struct hw_perf_counter_ops perf_ops_page_faults = { .read = page_faults_perf_counter_read, }; +/* + * Software counter: context switches + */ + static u64 get_context_switches(struct perf_counter *counter) { struct task_struct *curr = counter->ctx->task; @@ -1521,6 +1712,10 @@ static const struct hw_perf_counter_ops perf_ops_context_switches = { .read = context_switches_perf_counter_read, }; +/* + * Software counter: cpu migrations + */ + static inline u64 get_cpu_migrations(struct perf_counter *counter) { struct task_struct *curr = counter->ctx->task; @@ -1572,7 +1767,9 @@ static const struct hw_perf_counter_ops perf_ops_cpu_migrations = { static const struct hw_perf_counter_ops * sw_perf_counter_init(struct perf_counter *counter) { + struct perf_counter_hw_event *hw_event = &counter->hw_event; const struct hw_perf_counter_ops *hw_ops = NULL; + struct hw_perf_counter *hwc = &counter->hw; /* * Software counters (currently) can't in general distinguish @@ -1618,6 +1815,10 @@ sw_perf_counter_init(struct perf_counter *counter) default: break; } + + if (hw_ops) + hwc->irq_period = hw_event->irq_period; + return hw_ops; } -- cgit v1.2.3 From 7dd1fcc258b65da718f01e4684a7b9244501a9fb Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Fri, 13 Mar 2009 12:21:33 +0100 Subject: perf_counter: provide pagefault software events We use the generic software counter infrastructure to provide page fault events. Signed-off-by: Peter Zijlstra Signed-off-by: Ingo Molnar --- arch/powerpc/mm/fault.c | 3 +++ arch/x86/mm/fault.c | 3 +++ kernel/perf_counter.c | 53 +++---------------------------------------------- 3 files changed, 9 insertions(+), 50 deletions(-) diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c index 76993941cac..eda5b0ca4af 100644 --- a/arch/powerpc/mm/fault.c +++ b/arch/powerpc/mm/fault.c @@ -29,6 +29,7 @@ #include #include #include +#include #include #include @@ -170,6 +171,8 @@ int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address, die("Weird page fault", regs, SIGSEGV); } + perf_swcounter_event(PERF_COUNT_PAGE_FAULTS, 1, 0, regs); + /* When running in the kernel we expect faults to occur only to * addresses in user space. All other faults represent errors in the * kernel and should generate an OOPS. Unfortunately, in the case of an diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c index a03b7279efa..c8725752b6c 100644 --- a/arch/x86/mm/fault.c +++ b/arch/x86/mm/fault.c @@ -27,6 +27,7 @@ #include #include #include +#include #include @@ -1044,6 +1045,8 @@ do_page_fault(struct pt_regs *regs, unsigned long error_code) if (unlikely(error_code & PF_RSVD)) pgtable_bad(regs, error_code, address); + perf_swcounter_event(PERF_COUNT_PAGE_FAULTS, 1, 0, regs); + /* * If we're in an interrupt, have no user context or are running * in an atomic region then we must not take the fault: diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c index eeb1b46cf70..1773c5d7427 100644 --- a/kernel/perf_counter.c +++ b/kernel/perf_counter.c @@ -1607,57 +1607,10 @@ static const struct hw_perf_counter_ops perf_ops_task_clock = { * Software counter: page faults */ -#ifdef CONFIG_VM_EVENT_COUNTERS -#define cpu_page_faults() __get_cpu_var(vm_event_states).event[PGFAULT] -#else -#define cpu_page_faults() 0 -#endif - -static u64 get_page_faults(struct perf_counter *counter) -{ - struct task_struct *curr = counter->ctx->task; - - if (curr) - return curr->maj_flt + curr->min_flt; - return cpu_page_faults(); -} - -static void page_faults_perf_counter_update(struct perf_counter *counter) -{ - u64 prev, now; - s64 delta; - - prev = atomic64_read(&counter->hw.prev_count); - now = get_page_faults(counter); - - atomic64_set(&counter->hw.prev_count, now); - - delta = now - prev; - - atomic64_add(delta, &counter->count); -} - -static void page_faults_perf_counter_read(struct perf_counter *counter) -{ - page_faults_perf_counter_update(counter); -} - -static int page_faults_perf_counter_enable(struct perf_counter *counter) -{ - if (counter->prev_state <= PERF_COUNTER_STATE_OFF) - atomic64_set(&counter->hw.prev_count, get_page_faults(counter)); - return 0; -} - -static void page_faults_perf_counter_disable(struct perf_counter *counter) -{ - page_faults_perf_counter_update(counter); -} - static const struct hw_perf_counter_ops perf_ops_page_faults = { - .enable = page_faults_perf_counter_enable, - .disable = page_faults_perf_counter_disable, - .read = page_faults_perf_counter_read, + .enable = perf_swcounter_enable, + .disable = perf_swcounter_disable, + .read = perf_swcounter_read, }; /* -- cgit v1.2.3 From ac17dc8e58f3069ea895cfff963adf98ff3cf6b2 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Fri, 13 Mar 2009 12:21:34 +0100 Subject: perf_counter: provide major/minor page fault software events Provide separate sw counters for major and minor page faults. Signed-off-by: Peter Zijlstra Signed-off-by: Ingo Molnar --- arch/powerpc/mm/fault.c | 5 ++++- arch/x86/mm/fault.c | 7 +++++-- include/linux/perf_counter.h | 4 +++- kernel/perf_counter.c | 22 +++++++++------------- 4 files changed, 21 insertions(+), 17 deletions(-) diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c index eda5b0ca4af..17bbf6f91fb 100644 --- a/arch/powerpc/mm/fault.c +++ b/arch/powerpc/mm/fault.c @@ -312,6 +312,7 @@ good_area: } if (ret & VM_FAULT_MAJOR) { current->maj_flt++; + perf_swcounter_event(PERF_COUNT_PAGE_FAULTS_MAJ, 1, 0, regs); #ifdef CONFIG_PPC_SMLPAR if (firmware_has_feature(FW_FEATURE_CMO)) { preempt_disable(); @@ -319,8 +320,10 @@ good_area: preempt_enable(); } #endif - } else + } else { current->min_flt++; + perf_swcounter_event(PERF_COUNT_PAGE_FAULTS_MIN, 1, 0, regs); + } up_read(&mm->mmap_sem); return 0; diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c index c8725752b6c..f2d3324d921 100644 --- a/arch/x86/mm/fault.c +++ b/arch/x86/mm/fault.c @@ -1140,10 +1140,13 @@ good_area: return; } - if (fault & VM_FAULT_MAJOR) + if (fault & VM_FAULT_MAJOR) { tsk->maj_flt++; - else + perf_swcounter_event(PERF_COUNT_PAGE_FAULTS_MAJ, 1, 0, regs); + } else { tsk->min_flt++; + perf_swcounter_event(PERF_COUNT_PAGE_FAULTS_MIN, 1, 0, regs); + } check_v8086_mode(regs, address, tsk); diff --git a/include/linux/perf_counter.h b/include/linux/perf_counter.h index 3fefc3b8150..4b14a8e9dbf 100644 --- a/include/linux/perf_counter.h +++ b/include/linux/perf_counter.h @@ -49,8 +49,10 @@ enum hw_event_types { PERF_COUNT_PAGE_FAULTS = -3, PERF_COUNT_CONTEXT_SWITCHES = -4, PERF_COUNT_CPU_MIGRATIONS = -5, + PERF_COUNT_PAGE_FAULTS_MIN = -6, + PERF_COUNT_PAGE_FAULTS_MAJ = -7, - PERF_SW_EVENTS_MIN = -6, + PERF_SW_EVENTS_MIN = -8, }; /* diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c index 1773c5d7427..68950a3a52b 100644 --- a/kernel/perf_counter.c +++ b/kernel/perf_counter.c @@ -1503,6 +1503,12 @@ static void perf_swcounter_disable(struct perf_counter *counter) perf_swcounter_update(counter); } +static const struct hw_perf_counter_ops perf_ops_generic = { + .enable = perf_swcounter_enable, + .disable = perf_swcounter_disable, + .read = perf_swcounter_read, +}; + /* * Software counter: cpu wall time clock */ @@ -1603,16 +1609,6 @@ static const struct hw_perf_counter_ops perf_ops_task_clock = { .read = task_clock_perf_counter_read, }; -/* - * Software counter: page faults - */ - -static const struct hw_perf_counter_ops perf_ops_page_faults = { - .enable = perf_swcounter_enable, - .disable = perf_swcounter_disable, - .read = perf_swcounter_read, -}; - /* * Software counter: context switches */ @@ -1753,9 +1749,9 @@ sw_perf_counter_init(struct perf_counter *counter) hw_ops = &perf_ops_cpu_clock; break; case PERF_COUNT_PAGE_FAULTS: - if (!(counter->hw_event.exclude_user || - counter->hw_event.exclude_kernel)) - hw_ops = &perf_ops_page_faults; + case PERF_COUNT_PAGE_FAULTS_MIN: + case PERF_COUNT_PAGE_FAULTS_MAJ: + hw_ops = &perf_ops_generic; break; case PERF_COUNT_CONTEXT_SWITCHES: if (!counter->hw_event.exclude_kernel) -- cgit v1.2.3 From d6d020e9957745c61285ef3da9f294c5e6801f0f Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Fri, 13 Mar 2009 12:21:35 +0100 Subject: perf_counter: hrtimer based sampling for software time events Use hrtimers to profile timer based sampling for the software time counters. This allows platforms without hardware counter support to still perform sample based profiling. Signed-off-by: Peter Zijlstra Signed-off-by: Ingo Molnar --- include/linux/perf_counter.h | 20 ++++--- kernel/perf_counter.c | 123 ++++++++++++++++++++++++++++++------------- 2 files changed, 100 insertions(+), 43 deletions(-) diff --git a/include/linux/perf_counter.h b/include/linux/perf_counter.h index 4b14a8e9dbf..dfb4c7ce18b 100644 --- a/include/linux/perf_counter.h +++ b/include/linux/perf_counter.h @@ -114,6 +114,7 @@ struct perf_counter_hw_event { #include #include #include +#include #include struct task_struct; @@ -123,12 +124,19 @@ struct task_struct; */ struct hw_perf_counter { #ifdef CONFIG_PERF_COUNTERS - u64 config; - unsigned long config_base; - unsigned long counter_base; - int nmi; - unsigned int idx; - atomic64_t count; /* software */ + union { + struct { /* hardware */ + u64 config; + unsigned long config_base; + unsigned long counter_base; + int nmi; + unsigned int idx; + }; + union { /* software */ + atomic64_t count; + struct hrtimer hrtimer; + }; + }; atomic64_t prev_count; u64 irq_period; atomic64_t period_left; diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c index 68950a3a52b..f9330d5827c 100644 --- a/kernel/perf_counter.c +++ b/kernel/perf_counter.c @@ -1395,7 +1395,7 @@ static void perf_swcounter_handle_group(struct perf_counter *sibling) struct perf_counter *counter, *group_leader = sibling->group_leader; list_for_each_entry(counter, &group_leader->sibling_list, list_entry) { - perf_swcounter_update(counter); + counter->hw_ops->read(counter); perf_swcounter_store_irq(sibling, counter->hw_event.type); perf_swcounter_store_irq(sibling, atomic64_read(&counter->count)); } @@ -1404,8 +1404,6 @@ static void perf_swcounter_handle_group(struct perf_counter *sibling) static void perf_swcounter_interrupt(struct perf_counter *counter, int nmi, struct pt_regs *regs) { - perf_swcounter_save_and_restart(counter); - switch (counter->hw_event.record_type) { case PERF_RECORD_SIMPLE: break; @@ -1426,6 +1424,38 @@ static void perf_swcounter_interrupt(struct perf_counter *counter, wake_up(&counter->waitq); } +static enum hrtimer_restart perf_swcounter_hrtimer(struct hrtimer *hrtimer) +{ + struct perf_counter *counter; + struct pt_regs *regs; + + counter = container_of(hrtimer, struct perf_counter, hw.hrtimer); + counter->hw_ops->read(counter); + + regs = get_irq_regs(); + /* + * In case we exclude kernel IPs or are somehow not in interrupt + * context, provide the next best thing, the user IP. + */ + if ((counter->hw_event.exclude_kernel || !regs) && + !counter->hw_event.exclude_user) + regs = task_pt_regs(current); + + if (regs) + perf_swcounter_interrupt(counter, 0, regs); + + hrtimer_forward_now(hrtimer, ns_to_ktime(counter->hw.irq_period)); + + return HRTIMER_RESTART; +} + +static void perf_swcounter_overflow(struct perf_counter *counter, + int nmi, struct pt_regs *regs) +{ + perf_swcounter_save_and_restart(counter); + perf_swcounter_interrupt(counter, nmi, regs); +} + static int perf_swcounter_match(struct perf_counter *counter, enum hw_event_types event, struct pt_regs *regs) @@ -1448,13 +1478,20 @@ static int perf_swcounter_match(struct perf_counter *counter, return 1; } +static void perf_swcounter_add(struct perf_counter *counter, u64 nr, + int nmi, struct pt_regs *regs) +{ + int neg = atomic64_add_negative(nr, &counter->hw.count); + if (counter->hw.irq_period && !neg) + perf_swcounter_overflow(counter, nmi, regs); +} + static void perf_swcounter_ctx_event(struct perf_counter_context *ctx, enum hw_event_types event, u64 nr, int nmi, struct pt_regs *regs) { struct perf_counter *counter; unsigned long flags; - int neg; if (list_empty(&ctx->counter_list)) return; @@ -1465,11 +1502,8 @@ static void perf_swcounter_ctx_event(struct perf_counter_context *ctx, * XXX: make counter_list RCU safe */ list_for_each_entry(counter, &ctx->counter_list, list_entry) { - if (perf_swcounter_match(counter, event, regs)) { - neg = atomic64_add_negative(nr, &counter->hw.count); - if (counter->hw.irq_period && !neg) - perf_swcounter_interrupt(counter, nmi, regs); - } + if (perf_swcounter_match(counter, event, regs)) + perf_swcounter_add(counter, nr, nmi, regs); } spin_unlock_irqrestore(&ctx->lock, flags); @@ -1513,14 +1547,6 @@ static const struct hw_perf_counter_ops perf_ops_generic = { * Software counter: cpu wall time clock */ -static int cpu_clock_perf_counter_enable(struct perf_counter *counter) -{ - int cpu = raw_smp_processor_id(); - - atomic64_set(&counter->hw.prev_count, cpu_clock(cpu)); - return 0; -} - static void cpu_clock_perf_counter_update(struct perf_counter *counter) { int cpu = raw_smp_processor_id(); @@ -1533,8 +1559,26 @@ static void cpu_clock_perf_counter_update(struct perf_counter *counter) atomic64_add(now - prev, &counter->count); } +static int cpu_clock_perf_counter_enable(struct perf_counter *counter) +{ + struct hw_perf_counter *hwc = &counter->hw; + int cpu = raw_smp_processor_id(); + + atomic64_set(&hwc->prev_count, cpu_clock(cpu)); + if (hwc->irq_period) { + hrtimer_init(&hwc->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); + hwc->hrtimer.function = perf_swcounter_hrtimer; + __hrtimer_start_range_ns(&hwc->hrtimer, + ns_to_ktime(hwc->irq_period), 0, + HRTIMER_MODE_REL, 0); + } + + return 0; +} + static void cpu_clock_perf_counter_disable(struct perf_counter *counter) { + hrtimer_cancel(&counter->hw.hrtimer); cpu_clock_perf_counter_update(counter); } @@ -1580,27 +1624,33 @@ static void task_clock_perf_counter_update(struct perf_counter *counter, u64 now atomic64_add(delta, &counter->count); } -static void task_clock_perf_counter_read(struct perf_counter *counter) -{ - u64 now = task_clock_perf_counter_val(counter, 1); - - task_clock_perf_counter_update(counter, now); -} - static int task_clock_perf_counter_enable(struct perf_counter *counter) { - if (counter->prev_state <= PERF_COUNTER_STATE_OFF) - atomic64_set(&counter->hw.prev_count, - task_clock_perf_counter_val(counter, 0)); + struct hw_perf_counter *hwc = &counter->hw; + + atomic64_set(&hwc->prev_count, task_clock_perf_counter_val(counter, 0)); + if (hwc->irq_period) { + hrtimer_init(&hwc->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); + hwc->hrtimer.function = perf_swcounter_hrtimer; + __hrtimer_start_range_ns(&hwc->hrtimer, + ns_to_ktime(hwc->irq_period), 0, + HRTIMER_MODE_REL, 0); + } return 0; } static void task_clock_perf_counter_disable(struct perf_counter *counter) { - u64 now = task_clock_perf_counter_val(counter, 0); + hrtimer_cancel(&counter->hw.hrtimer); + task_clock_perf_counter_update(counter, + task_clock_perf_counter_val(counter, 0)); +} - task_clock_perf_counter_update(counter, now); +static void task_clock_perf_counter_read(struct perf_counter *counter) +{ + task_clock_perf_counter_update(counter, + task_clock_perf_counter_val(counter, 1)); } static const struct hw_perf_counter_ops perf_ops_task_clock = { @@ -1729,16 +1779,12 @@ sw_perf_counter_init(struct perf_counter *counter) */ switch (counter->hw_event.type) { case PERF_COUNT_CPU_CLOCK: - if (!(counter->hw_event.exclude_user || - counter->hw_event.exclude_kernel || - counter->hw_event.exclude_hv)) - hw_ops = &perf_ops_cpu_clock; + hw_ops = &perf_ops_cpu_clock; + + if (hw_event->irq_period && hw_event->irq_period < 10000) + hw_event->irq_period = 10000; break; case PERF_COUNT_TASK_CLOCK: - if (counter->hw_event.exclude_user || - counter->hw_event.exclude_kernel || - counter->hw_event.exclude_hv) - break; /* * If the user instantiates this as a per-cpu counter, * use the cpu_clock counter instead. @@ -1747,6 +1793,9 @@ sw_perf_counter_init(struct perf_counter *counter) hw_ops = &perf_ops_task_clock; else hw_ops = &perf_ops_cpu_clock; + + if (hw_event->irq_period && hw_event->irq_period < 10000) + hw_event->irq_period = 10000; break; case PERF_COUNT_PAGE_FAULTS: case PERF_COUNT_PAGE_FAULTS_MIN: -- cgit v1.2.3 From 592903cdcbf606a838056bae6d03fc557806c914 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Fri, 13 Mar 2009 12:21:36 +0100 Subject: perf_counter: add an event_list I noticed that the counter_list only includes top-level counters, thus perf_swcounter_event() will miss sw-counters in groups. Since perf_swcounter_event() also wants an RCU safe list, create a new event_list that includes all counters and uses RCU list ops and use call_rcu to free the counter structure. Signed-off-by: Peter Zijlstra Signed-off-by: Ingo Molnar --- include/linux/perf_counter.h | 4 ++++ kernel/perf_counter.c | 30 +++++++++++++++++++----------- 2 files changed, 23 insertions(+), 11 deletions(-) diff --git a/include/linux/perf_counter.h b/include/linux/perf_counter.h index dfb4c7ce18b..08c11a6afeb 100644 --- a/include/linux/perf_counter.h +++ b/include/linux/perf_counter.h @@ -187,6 +187,7 @@ struct file; struct perf_counter { #ifdef CONFIG_PERF_COUNTERS struct list_head list_entry; + struct list_head event_entry; struct list_head sibling_list; struct perf_counter *group_leader; const struct hw_perf_counter_ops *hw_ops; @@ -220,6 +221,8 @@ struct perf_counter { struct perf_data *irqdata; struct perf_data *usrdata; struct perf_data data[2]; + + struct rcu_head rcu_head; #endif }; @@ -243,6 +246,7 @@ struct perf_counter_context { struct mutex mutex; struct list_head counter_list; + struct list_head event_list; int nr_counters; int nr_active; int is_active; diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c index f9330d5827c..8d6ecfa64c0 100644 --- a/kernel/perf_counter.c +++ b/kernel/perf_counter.c @@ -22,6 +22,7 @@ #include #include #include +#include /* * Each CPU has a list of per CPU counters: @@ -72,6 +73,8 @@ list_add_counter(struct perf_counter *counter, struct perf_counter_context *ctx) list_add_tail(&counter->list_entry, &ctx->counter_list); else list_add_tail(&counter->list_entry, &group_leader->sibling_list); + + list_add_rcu(&counter->event_entry, &ctx->event_list); } static void @@ -80,6 +83,7 @@ list_del_counter(struct perf_counter *counter, struct perf_counter_context *ctx) struct perf_counter *sibling, *tmp; list_del_init(&counter->list_entry); + list_del_rcu(&counter->event_entry); /* * If this was a group counter with sibling counters then @@ -1133,6 +1137,14 @@ static struct perf_counter_context *find_get_context(pid_t pid, int cpu) return ctx; } +static void free_counter_rcu(struct rcu_head *head) +{ + struct perf_counter *counter; + + counter = container_of(head, struct perf_counter, rcu_head); + kfree(counter); +} + /* * Called when the last reference to the file is gone. */ @@ -1151,7 +1163,7 @@ static int perf_release(struct inode *inode, struct file *file) mutex_unlock(&counter->mutex); mutex_unlock(&ctx->mutex); - kfree(counter); + call_rcu(&counter->rcu_head, free_counter_rcu); put_context(ctx); return 0; @@ -1491,22 +1503,16 @@ static void perf_swcounter_ctx_event(struct perf_counter_context *ctx, int nmi, struct pt_regs *regs) { struct perf_counter *counter; - unsigned long flags; - if (list_empty(&ctx->counter_list)) + if (list_empty(&ctx->event_list)) return; - spin_lock_irqsave(&ctx->lock, flags); - - /* - * XXX: make counter_list RCU safe - */ - list_for_each_entry(counter, &ctx->counter_list, list_entry) { + rcu_read_lock(); + list_for_each_entry_rcu(counter, &ctx->event_list, event_entry) { if (perf_swcounter_match(counter, event, regs)) perf_swcounter_add(counter, nr, nmi, regs); } - - spin_unlock_irqrestore(&ctx->lock, flags); + rcu_read_unlock(); } void perf_swcounter_event(enum hw_event_types event, u64 nr, @@ -1846,6 +1852,7 @@ perf_counter_alloc(struct perf_counter_hw_event *hw_event, mutex_init(&counter->mutex); INIT_LIST_HEAD(&counter->list_entry); + INIT_LIST_HEAD(&counter->event_entry); INIT_LIST_HEAD(&counter->sibling_list); init_waitqueue_head(&counter->waitq); @@ -1992,6 +1999,7 @@ __perf_counter_init_context(struct perf_counter_context *ctx, spin_lock_init(&ctx->lock); mutex_init(&ctx->mutex); INIT_LIST_HEAD(&ctx->counter_list); + INIT_LIST_HEAD(&ctx->event_list); ctx->task = task; } -- cgit v1.2.3 From 039fc91e064b81c2820ff16c304be5aba35fd126 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Fri, 13 Mar 2009 16:43:47 +0100 Subject: perf_counter: fix hrtimer sampling Impact: fix deadlock with perfstat Fix for the perfstat fubar.. We cannot unconditionally call hrtimer_cancel() without ever having done hrtimer_init() on the thing. Signed-off-by: Peter Zijlstra Orig-LKML-Reference: <1236959027.22447.149.camel@twins> Signed-off-by: Ingo Molnar --- kernel/perf_counter.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c index 8d6ecfa64c0..d6cc22271ef 100644 --- a/kernel/perf_counter.c +++ b/kernel/perf_counter.c @@ -1571,9 +1571,9 @@ static int cpu_clock_perf_counter_enable(struct perf_counter *counter) int cpu = raw_smp_processor_id(); atomic64_set(&hwc->prev_count, cpu_clock(cpu)); + hrtimer_init(&hwc->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); + hwc->hrtimer.function = perf_swcounter_hrtimer; if (hwc->irq_period) { - hrtimer_init(&hwc->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); - hwc->hrtimer.function = perf_swcounter_hrtimer; __hrtimer_start_range_ns(&hwc->hrtimer, ns_to_ktime(hwc->irq_period), 0, HRTIMER_MODE_REL, 0); @@ -1635,9 +1635,9 @@ static int task_clock_perf_counter_enable(struct perf_counter *counter) struct hw_perf_counter *hwc = &counter->hw; atomic64_set(&hwc->prev_count, task_clock_perf_counter_val(counter, 0)); + hrtimer_init(&hwc->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); + hwc->hrtimer.function = perf_swcounter_hrtimer; if (hwc->irq_period) { - hrtimer_init(&hwc->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); - hwc->hrtimer.function = perf_swcounter_hrtimer; __hrtimer_start_range_ns(&hwc->hrtimer, ns_to_ktime(hwc->irq_period), 0, HRTIMER_MODE_REL, 0); -- cgit v1.2.3 From 4e193bd4dfdc983d12969b51439b4a1fbaf2daad Mon Sep 17 00:00:00 2001 From: Tim Blechmann Date: Sat, 14 Mar 2009 14:29:25 +0100 Subject: perf_counter: include missing header Impact: build fix In order to compile a kernel with performance counter patches, has to be included to provide the declaration of struct pt_regs *get_irq_regs(void); [ This bug was masked by unrelated x86 header file changes in the x86 tree, but occurs in the tip:perfcounters/core standalone tree. ] Signed-off-by: Tim Blechmann Orig-LKML-Reference: <20090314142925.49c29c17@thinkpad> Signed-off-by: Ingo Molnar --- kernel/perf_counter.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c index d6cc22271ef..0018c5e8124 100644 --- a/kernel/perf_counter.c +++ b/kernel/perf_counter.c @@ -24,6 +24,8 @@ #include #include +#include + /* * Each CPU has a list of per CPU counters: */ -- cgit v1.2.3 From 7bb497bd885eedd0f56dfe3cc1b5ff20710d33b9 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Wed, 18 Mar 2009 08:59:21 +0100 Subject: perf_counter: fix crash on perfmon v1 systems Impact: fix boot crash on Intel Perfmon Version 1 systems Intel Perfmon v1 does not support the global MSRs, nor does it offer the generalized MSR ranges. So support v2 and later CPUs only. Also mark pmc_ops as read-mostly - to avoid false cacheline sharing. Cc: Paul Mackerras Cc: Peter Zijlstra Signed-off-by: Ingo Molnar --- arch/x86/kernel/cpu/perf_counter.c | 16 +++++++++++----- 1 file changed, 11 insertions(+), 5 deletions(-) diff --git a/arch/x86/kernel/cpu/perf_counter.c b/arch/x86/kernel/cpu/perf_counter.c index 22dab06c08a..6cba9d47b71 100644 --- a/arch/x86/kernel/cpu/perf_counter.c +++ b/arch/x86/kernel/cpu/perf_counter.c @@ -57,12 +57,14 @@ struct pmc_x86_ops { int max_events; }; -static struct pmc_x86_ops *pmc_ops; +static struct pmc_x86_ops *pmc_ops __read_mostly; static DEFINE_PER_CPU(struct cpu_hw_counters, cpu_hw_counters) = { .enabled = 1, }; +static __read_mostly int intel_perfmon_version; + /* * Intel PerfMon v3. Used on Core2 and later. */ @@ -613,7 +615,7 @@ void perf_counter_print_debug(void) cpu = smp_processor_id(); cpuc = &per_cpu(cpu_hw_counters, cpu); - if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) { + if (intel_perfmon_version >= 2) { rdmsrl(MSR_CORE_PERF_GLOBAL_CTRL, ctrl); rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, status); rdmsrl(MSR_CORE_PERF_GLOBAL_OVF_CTRL, overflow); @@ -930,10 +932,10 @@ static struct pmc_x86_ops pmc_amd_ops = { static struct pmc_x86_ops *pmc_intel_init(void) { + union cpuid10_edx edx; union cpuid10_eax eax; - unsigned int ebx; unsigned int unused; - union cpuid10_edx edx; + unsigned int ebx; /* * Check whether the Architectural PerfMon supports @@ -943,8 +945,12 @@ static struct pmc_x86_ops *pmc_intel_init(void) if (eax.split.mask_length <= ARCH_PERFMON_BRANCH_MISSES_RETIRED) return NULL; + intel_perfmon_version = eax.split.version_id; + if (intel_perfmon_version < 2) + return NULL; + pr_info("Intel Performance Monitoring support detected.\n"); - pr_info("... version: %d\n", eax.split.version_id); + pr_info("... version: %d\n", intel_perfmon_version); pr_info("... bit width: %d\n", eax.split.bit_width); pr_info("... mask length: %d\n", eax.split.mask_length); -- cgit v1.2.3 From b6c5a71da1477d261bc36254fe1f20d32b57598d Mon Sep 17 00:00:00 2001 From: Paul Mackerras Date: Mon, 16 Mar 2009 21:00:00 +1100 Subject: perf_counter: abstract wakeup flag setting in core to fix powerpc build Impact: build fix for powerpc Commit bd753921015e7905 ("perf_counter: software counter event infrastructure") introduced a use of TIF_PERF_COUNTERS into the core perfcounter code. This breaks the build on powerpc because we use a flag in a per-cpu area to signal wakeups on powerpc rather than a thread_info flag, because the thread_info flags have to be manipulated with atomic operations and are thus slower than per-cpu flags. This fixes the by changing the core to use an abstracted set_perf_counter_pending() function, which is defined on x86 to set the TIF_PERF_COUNTERS flag and on powerpc to set the per-cpu flag (paca->perf_counter_pending). It changes the previous powerpc definition of set_perf_counter_pending to not take an argument and adds a clear_perf_counter_pending, so as to simplify the definition on x86. On x86, set_perf_counter_pending() is defined as a macro. Defining it as a static inline in arch/x86/include/asm/perf_counters.h causes compile failures because gets included early in , and the definitions of set_tsk_thread_flag etc. are therefore not available in . (On powerpc this problem is avoided by defining set_perf_counter_pending etc. in .) Signed-off-by: Paul Mackerras --- arch/powerpc/include/asm/hw_irq.h | 14 +++++++++++--- arch/powerpc/kernel/irq.c | 11 +++-------- arch/powerpc/kernel/perf_counter.c | 3 +-- arch/x86/include/asm/perf_counter.h | 3 +++ kernel/perf_counter.c | 2 +- 5 files changed, 19 insertions(+), 14 deletions(-) diff --git a/arch/powerpc/include/asm/hw_irq.h b/arch/powerpc/include/asm/hw_irq.h index b43076ff92c..cb32d571c9c 100644 --- a/arch/powerpc/include/asm/hw_irq.h +++ b/arch/powerpc/include/asm/hw_irq.h @@ -142,10 +142,17 @@ static inline unsigned long get_perf_counter_pending(void) return x; } -static inline void set_perf_counter_pending(int x) +static inline void set_perf_counter_pending(void) { asm volatile("stb %0,%1(13)" : : - "r" (x), + "r" (1), + "i" (offsetof(struct paca_struct, perf_counter_pending))); +} + +static inline void clear_perf_counter_pending(void) +{ + asm volatile("stb %0,%1(13)" : : + "r" (0), "i" (offsetof(struct paca_struct, perf_counter_pending))); } @@ -158,7 +165,8 @@ static inline unsigned long get_perf_counter_pending(void) return 0; } -static inline void set_perf_counter_pending(int x) {} +static inline void set_perf_counter_pending(void) {} +static inline void clear_perf_counter_pending(void) {} static inline void perf_counter_do_pending(void) {} #endif /* CONFIG_PERF_COUNTERS */ diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c index 0d2e37c5773..469e9635ff0 100644 --- a/arch/powerpc/kernel/irq.c +++ b/arch/powerpc/kernel/irq.c @@ -104,13 +104,6 @@ static inline notrace void set_soft_enabled(unsigned long enable) : : "r" (enable), "i" (offsetof(struct paca_struct, soft_enabled))); } -#ifdef CONFIG_PERF_COUNTERS -notrace void __weak perf_counter_do_pending(void) -{ - set_perf_counter_pending(0); -} -#endif - notrace void raw_local_irq_restore(unsigned long en) { /* @@ -142,8 +135,10 @@ notrace void raw_local_irq_restore(unsigned long en) iseries_handle_interrupts(); } - if (get_perf_counter_pending()) + if (get_perf_counter_pending()) { + clear_perf_counter_pending(); perf_counter_do_pending(); + } /* * if (get_paca()->hard_enabled) return; diff --git a/arch/powerpc/kernel/perf_counter.c b/arch/powerpc/kernel/perf_counter.c index 0e33d27cd46..5008762e8bf 100644 --- a/arch/powerpc/kernel/perf_counter.c +++ b/arch/powerpc/kernel/perf_counter.c @@ -653,7 +653,6 @@ void perf_counter_do_pending(void) struct cpu_hw_counters *cpuhw = &__get_cpu_var(cpu_hw_counters); struct perf_counter *counter; - set_perf_counter_pending(0); for (i = 0; i < cpuhw->n_counters; ++i) { counter = cpuhw->counter[i]; if (counter && counter->wakeup_pending) { @@ -811,7 +810,7 @@ static void perf_counter_interrupt(struct pt_regs *regs) perf_counter_do_pending(); irq_exit(); } else { - set_perf_counter_pending(1); + set_perf_counter_pending(); } } } diff --git a/arch/x86/include/asm/perf_counter.h b/arch/x86/include/asm/perf_counter.h index 2e08ed73664..1662043b340 100644 --- a/arch/x86/include/asm/perf_counter.h +++ b/arch/x86/include/asm/perf_counter.h @@ -84,6 +84,9 @@ union cpuid10_edx { #define MSR_ARCH_PERFMON_FIXED_CTR2 0x30b #define X86_PMC_IDX_FIXED_BUS_CYCLES (X86_PMC_IDX_FIXED + 2) +#define set_perf_counter_pending() \ + set_tsk_thread_flag(current, TIF_PERF_COUNTERS); + #ifdef CONFIG_PERF_COUNTERS extern void init_hw_perf_counters(void); extern void perf_counters_lapic_init(int nmi); diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c index 0018c5e8124..b39456ad74a 100644 --- a/kernel/perf_counter.c +++ b/kernel/perf_counter.c @@ -1433,7 +1433,7 @@ static void perf_swcounter_interrupt(struct perf_counter *counter, if (nmi) { counter->wakeup_pending = 1; - set_tsk_thread_flag(current, TIF_PERF_COUNTERS); + set_perf_counter_pending(); } else wake_up(&counter->waitq); } -- cgit v1.2.3 From 01ef09d9ffb5ce9f8d62d1e5206da3d5ca612acc Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Thu, 19 Mar 2009 20:26:11 +0100 Subject: perf_counter: fix uninitialized usage of event_list Impact: fix boot crash When doing the generic context switch event I ran into some early boot hangs, which were caused by inf func recursion (event, fault, event, fault). I eventually tracked it down to event_list not being initialized at the time of the first event. Fix this. Signed-off-by: Peter Zijlstra Cc: Paul Mackerras Cc: Steven Rostedt Orig-LKML-Reference: <20090319194233.195392657@chello.nl> Signed-off-by: Ingo Molnar --- include/linux/init_task.h | 2 ++ kernel/perf_counter.c | 2 +- 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/include/linux/init_task.h b/include/linux/init_task.h index 219748d0026..ca226a91abe 100644 --- a/include/linux/init_task.h +++ b/include/linux/init_task.h @@ -124,6 +124,8 @@ extern struct cred init_cred; # define INIT_PERF_COUNTERS(tsk) \ .perf_counter_ctx.counter_list = \ LIST_HEAD_INIT(tsk.perf_counter_ctx.counter_list), \ + .perf_counter_ctx.event_list = \ + LIST_HEAD_INIT(tsk.perf_counter_ctx.event_list), \ .perf_counter_ctx.lock = \ __SPIN_LOCK_UNLOCKED(tsk.perf_counter_ctx.lock), #else diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c index b39456ad74a..4c4e9eb37ab 100644 --- a/kernel/perf_counter.c +++ b/kernel/perf_counter.c @@ -1506,7 +1506,7 @@ static void perf_swcounter_ctx_event(struct perf_counter_context *ctx, { struct perf_counter *counter; - if (list_empty(&ctx->event_list)) + if (system_state != SYSTEM_RUNNING || list_empty(&ctx->event_list)) return; rcu_read_lock(); -- cgit v1.2.3 From 4a0deca657f3dbb8a707b5dc8f173beec01e7ed2 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Thu, 19 Mar 2009 20:26:12 +0100 Subject: perf_counter: generic context switch event Impact: cleanup Use the generic software events for context switches. Signed-off-by: Peter Zijlstra Cc: Paul Mackerras Cc: Steven Rostedt Orig-LKML-Reference: <20090319194233.283522645@chello.nl> Signed-off-by: Ingo Molnar --- include/linux/sched.h | 1 - kernel/perf_counter.c | 60 ++++----------------------------------------------- kernel/sched.c | 6 ------ 3 files changed, 4 insertions(+), 63 deletions(-) diff --git a/include/linux/sched.h b/include/linux/sched.h index 75b2fc5306d..7ed41f7c5ac 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -138,7 +138,6 @@ extern unsigned long nr_running(void); extern unsigned long nr_uninterruptible(void); extern unsigned long nr_active(void); extern unsigned long nr_iowait(void); -extern u64 cpu_nr_switches(int cpu); extern u64 cpu_nr_migrations(int cpu); extern unsigned long get_parent_ip(unsigned long addr); diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c index 4c4e9eb37ab..99d5930f0a5 100644 --- a/kernel/perf_counter.c +++ b/kernel/perf_counter.c @@ -710,10 +710,13 @@ void perf_counter_task_sched_out(struct task_struct *task, int cpu) { struct perf_cpu_context *cpuctx = &per_cpu(perf_cpu_context, cpu); struct perf_counter_context *ctx = &task->perf_counter_ctx; + struct pt_regs *regs; if (likely(!cpuctx->task_ctx)) return; + regs = task_pt_regs(task); + perf_swcounter_event(PERF_COUNT_CONTEXT_SWITCHES, 1, 1, regs); __perf_counter_sched_out(ctx, cpuctx); cpuctx->task_ctx = NULL; @@ -1667,58 +1670,6 @@ static const struct hw_perf_counter_ops perf_ops_task_clock = { .read = task_clock_perf_counter_read, }; -/* - * Software counter: context switches - */ - -static u64 get_context_switches(struct perf_counter *counter) -{ - struct task_struct *curr = counter->ctx->task; - - if (curr) - return curr->nvcsw + curr->nivcsw; - return cpu_nr_switches(smp_processor_id()); -} - -static void context_switches_perf_counter_update(struct perf_counter *counter) -{ - u64 prev, now; - s64 delta; - - prev = atomic64_read(&counter->hw.prev_count); - now = get_context_switches(counter); - - atomic64_set(&counter->hw.prev_count, now); - - delta = now - prev; - - atomic64_add(delta, &counter->count); -} - -static void context_switches_perf_counter_read(struct perf_counter *counter) -{ - context_switches_perf_counter_update(counter); -} - -static int context_switches_perf_counter_enable(struct perf_counter *counter) -{ - if (counter->prev_state <= PERF_COUNTER_STATE_OFF) - atomic64_set(&counter->hw.prev_count, - get_context_switches(counter)); - return 0; -} - -static void context_switches_perf_counter_disable(struct perf_counter *counter) -{ - context_switches_perf_counter_update(counter); -} - -static const struct hw_perf_counter_ops perf_ops_context_switches = { - .enable = context_switches_perf_counter_enable, - .disable = context_switches_perf_counter_disable, - .read = context_switches_perf_counter_read, -}; - /* * Software counter: cpu migrations */ @@ -1808,11 +1759,8 @@ sw_perf_counter_init(struct perf_counter *counter) case PERF_COUNT_PAGE_FAULTS: case PERF_COUNT_PAGE_FAULTS_MIN: case PERF_COUNT_PAGE_FAULTS_MAJ: - hw_ops = &perf_ops_generic; - break; case PERF_COUNT_CONTEXT_SWITCHES: - if (!counter->hw_event.exclude_kernel) - hw_ops = &perf_ops_context_switches; + hw_ops = &perf_ops_generic; break; case PERF_COUNT_CPU_MIGRATIONS: if (!counter->hw_event.exclude_kernel) diff --git a/kernel/sched.c b/kernel/sched.c index 39e70860216..f76e3c0188a 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -2900,14 +2900,8 @@ unsigned long nr_active(void) /* * Externally visible per-cpu scheduler statistics: - * cpu_nr_switches(cpu) - number of context switches on that cpu * cpu_nr_migrations(cpu) - number of migrations into that cpu */ -u64 cpu_nr_switches(int cpu) -{ - return cpu_rq(cpu)->nr_switches; -} - u64 cpu_nr_migrations(int cpu) { return cpu_rq(cpu)->nr_migrations_in; -- cgit v1.2.3 From f16009527595ee562308653bc3d0039166d2ab15 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Thu, 19 Mar 2009 20:26:16 +0100 Subject: perf_counter: fix up counter free paths Impact: fix crash during perfcounters use I found another counter free path, create a free_counter() call to accomodate generic tear-down. Fixes an RCU bug. Signed-off-by: Peter Zijlstra Cc: Paul Mackerras Cc: Steven Rostedt Orig-LKML-Reference: <20090319194233.652078652@chello.nl> Signed-off-by: Ingo Molnar --- kernel/perf_counter.c | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c index 99d5930f0a5..97f891ffeb4 100644 --- a/kernel/perf_counter.c +++ b/kernel/perf_counter.c @@ -1150,6 +1150,11 @@ static void free_counter_rcu(struct rcu_head *head) kfree(counter); } +static void free_counter(struct perf_counter *counter) +{ + call_rcu(&counter->rcu_head, free_counter_rcu); +} + /* * Called when the last reference to the file is gone. */ @@ -1168,7 +1173,7 @@ static int perf_release(struct inode *inode, struct file *file) mutex_unlock(&counter->mutex); mutex_unlock(&ctx->mutex); - call_rcu(&counter->rcu_head, free_counter_rcu); + free_counter(counter); put_context(ctx); return 0; @@ -2128,10 +2133,10 @@ __perf_counter_exit_task(struct task_struct *child, list_entry) { if (sub->parent) { sync_child_counter(sub, sub->parent); - kfree(sub); + free_counter(sub); } } - kfree(child_counter); + free_counter(child_counter); } } -- cgit v1.2.3 From e077df4f439681e43f0db8255b2d215b342ebdc6 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Thu, 19 Mar 2009 20:26:17 +0100 Subject: perf_counter: hook up the tracepoint events Impact: new perfcounters feature Enable usage of tracepoints as perf counter events. tracepoint event ids can be found in /debug/tracing/event/*/*/id and (for now) are represented as -65536+id in the type field. Signed-off-by: Peter Zijlstra Cc: Paul Mackerras Cc: Steven Rostedt Orig-LKML-Reference: <20090319194233.744044174@chello.nl> Signed-off-by: Ingo Molnar --- include/linux/perf_counter.h | 3 +++ init/Kconfig | 5 +++++ kernel/perf_counter.c | 43 +++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 51 insertions(+) diff --git a/include/linux/perf_counter.h b/include/linux/perf_counter.h index 08c11a6afeb..065984c1ff5 100644 --- a/include/linux/perf_counter.h +++ b/include/linux/perf_counter.h @@ -53,6 +53,8 @@ enum hw_event_types { PERF_COUNT_PAGE_FAULTS_MAJ = -7, PERF_SW_EVENTS_MIN = -8, + + PERF_TP_EVENTS_MIN = -65536 }; /* @@ -222,6 +224,7 @@ struct perf_counter { struct perf_data *usrdata; struct perf_data data[2]; + void (*destroy)(struct perf_counter *); struct rcu_head rcu_head; #endif }; diff --git a/init/Kconfig b/init/Kconfig index 38a2ecd47c3..4f647142f2e 100644 --- a/init/Kconfig +++ b/init/Kconfig @@ -947,6 +947,11 @@ config PERF_COUNTERS Say Y if unsure. +config EVENT_PROFILE + bool "Tracepoint profile sources" + depends on PERF_COUNTERS && EVENT_TRACER + default y + endmenu config VM_EVENT_COUNTERS diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c index 97f891ffeb4..0bbe3e45ba0 100644 --- a/kernel/perf_counter.c +++ b/kernel/perf_counter.c @@ -1152,6 +1152,9 @@ static void free_counter_rcu(struct rcu_head *head) static void free_counter(struct perf_counter *counter) { + if (counter->destroy) + counter->destroy(counter); + call_rcu(&counter->rcu_head, free_counter_rcu); } @@ -1727,6 +1730,45 @@ static const struct hw_perf_counter_ops perf_ops_cpu_migrations = { .read = cpu_migrations_perf_counter_read, }; +#ifdef CONFIG_EVENT_PROFILE +void perf_tpcounter_event(int event_id) +{ + perf_swcounter_event(PERF_TP_EVENTS_MIN + event_id, 1, 1, + task_pt_regs(current)); +} + +extern int ftrace_profile_enable(int); +extern void ftrace_profile_disable(int); + +static void tp_perf_counter_destroy(struct perf_counter *counter) +{ + int event_id = counter->hw_event.type - PERF_TP_EVENTS_MIN; + + ftrace_profile_disable(event_id); +} + +static const struct hw_perf_counter_ops * +tp_perf_counter_init(struct perf_counter *counter) +{ + int event_id = counter->hw_event.type - PERF_TP_EVENTS_MIN; + int ret; + + ret = ftrace_profile_enable(event_id); + if (ret) + return NULL; + + counter->destroy = tp_perf_counter_destroy; + + return &perf_ops_generic; +} +#else +static const struct hw_perf_counter_ops * +tp_perf_counter_init(struct perf_counter *counter) +{ + return NULL; +} +#endif + static const struct hw_perf_counter_ops * sw_perf_counter_init(struct perf_counter *counter) { @@ -1772,6 +1814,7 @@ sw_perf_counter_init(struct perf_counter *counter) hw_ops = &perf_ops_cpu_migrations; break; default: + hw_ops = tp_perf_counter_init(counter); break; } -- cgit v1.2.3 From b8e83514b64577b48bfb794fe85fcde40a9343ca Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Thu, 19 Mar 2009 20:26:18 +0100 Subject: perf_counter: revamp syscall input ABI Impact: modify ABI The hardware/software classification in hw_event->type became a little strained due to the addition of tracepoint tracing. Instead split up the field and provide a type field to explicitly specify the counter type, while using the event_id field to specify which event to use. Raw counters still work as before, only the raw config now goes into raw_event. Signed-off-by: Peter Zijlstra Cc: Paul Mackerras Cc: Steven Rostedt Orig-LKML-Reference: <20090319194233.836807573@chello.nl> Signed-off-by: Ingo Molnar --- arch/powerpc/kernel/perf_counter.c | 4 +- arch/x86/kernel/cpu/perf_counter.c | 10 ++-- include/linux/perf_counter.h | 95 ++++++++++++++++++++++++-------------- kernel/perf_counter.c | 83 ++++++++++++++++++++------------- 4 files changed, 117 insertions(+), 75 deletions(-) diff --git a/arch/powerpc/kernel/perf_counter.c b/arch/powerpc/kernel/perf_counter.c index 5008762e8bf..26f69dc7130 100644 --- a/arch/powerpc/kernel/perf_counter.c +++ b/arch/powerpc/kernel/perf_counter.c @@ -602,7 +602,7 @@ hw_perf_counter_init(struct perf_counter *counter) return NULL; if ((s64)counter->hw_event.irq_period < 0) return NULL; - ev = counter->hw_event.type; + ev = counter->hw_event.event_id; if (!counter->hw_event.raw) { if (ev >= ppmu->n_generic || ppmu->generic_events[ev] == 0) @@ -692,7 +692,7 @@ static void perf_handle_group(struct perf_counter *counter) list_for_each_entry(sub, &leader->sibling_list, list_entry) { if (sub != counter) sub->hw_ops->read(sub); - perf_store_irq_data(counter, sub->hw_event.type); + perf_store_irq_data(counter, sub->hw_event.event_config); perf_store_irq_data(counter, atomic64_read(&sub->count)); } } diff --git a/arch/x86/kernel/cpu/perf_counter.c b/arch/x86/kernel/cpu/perf_counter.c index 6cba9d47b71..d844ae41d5a 100644 --- a/arch/x86/kernel/cpu/perf_counter.c +++ b/arch/x86/kernel/cpu/perf_counter.c @@ -217,15 +217,15 @@ static int __hw_perf_counter_init(struct perf_counter *counter) /* * Raw event type provide the config in the event structure */ - if (hw_event->raw) { - hwc->config |= pmc_ops->raw_event(hw_event->type); + if (hw_event->raw_type) { + hwc->config |= pmc_ops->raw_event(hw_event->raw_event_id); } else { - if (hw_event->type >= pmc_ops->max_events) + if (hw_event->event_id >= pmc_ops->max_events) return -EINVAL; /* * The generic map: */ - hwc->config |= pmc_ops->event_map(hw_event->type); + hwc->config |= pmc_ops->event_map(hw_event->event_id); } counter->wakeup_pending = 0; @@ -715,7 +715,7 @@ perf_handle_group(struct perf_counter *sibling, u64 *status, u64 *overflown) list_for_each_entry(counter, &group_leader->sibling_list, list_entry) { x86_perf_counter_update(counter, &counter->hw, counter->hw.idx); - perf_store_irq_data(sibling, counter->hw_event.type); + perf_store_irq_data(sibling, counter->hw_event.event_config); perf_store_irq_data(sibling, atomic64_read(&counter->count)); } } diff --git a/include/linux/perf_counter.h b/include/linux/perf_counter.h index 065984c1ff5..8f939490550 100644 --- a/include/linux/perf_counter.h +++ b/include/linux/perf_counter.h @@ -21,56 +21,81 @@ */ /* - * Generalized performance counter event types, used by the hw_event.type - * parameter of the sys_perf_counter_open() syscall: + * hw_event.type */ -enum hw_event_types { +enum perf_event_types { + PERF_TYPE_HARDWARE = 0, + PERF_TYPE_SOFTWARE = 1, + PERF_TYPE_TRACEPOINT = 2, + /* - * Common hardware events, generalized by the kernel: + * available TYPE space, raw is the max value. */ - PERF_COUNT_CPU_CYCLES = 0, - PERF_COUNT_INSTRUCTIONS = 1, - PERF_COUNT_CACHE_REFERENCES = 2, - PERF_COUNT_CACHE_MISSES = 3, - PERF_COUNT_BRANCH_INSTRUCTIONS = 4, - PERF_COUNT_BRANCH_MISSES = 5, - PERF_COUNT_BUS_CYCLES = 6, - PERF_HW_EVENTS_MAX = 7, + PERF_TYPE_RAW = 128, +}; +/* + * Generalized performance counter event types, used by the hw_event.event_id + * parameter of the sys_perf_counter_open() syscall: + */ +enum hw_event_ids { /* - * Special "software" counters provided by the kernel, even if - * the hardware does not support performance counters. These - * counters measure various physical and sw events of the - * kernel (and allow the profiling of them as well): + * Common hardware events, generalized by the kernel: */ - PERF_COUNT_CPU_CLOCK = -1, - PERF_COUNT_TASK_CLOCK = -2, - PERF_COUNT_PAGE_FAULTS = -3, - PERF_COUNT_CONTEXT_SWITCHES = -4, - PERF_COUNT_CPU_MIGRATIONS = -5, - PERF_COUNT_PAGE_FAULTS_MIN = -6, - PERF_COUNT_PAGE_FAULTS_MAJ = -7, - - PERF_SW_EVENTS_MIN = -8, + PERF_COUNT_CPU_CYCLES = 0, + PERF_COUNT_INSTRUCTIONS = 1, + PERF_COUNT_CACHE_REFERENCES = 2, + PERF_COUNT_CACHE_MISSES = 3, + PERF_COUNT_BRANCH_INSTRUCTIONS = 4, + PERF_COUNT_BRANCH_MISSES = 5, + PERF_COUNT_BUS_CYCLES = 6, + + PERF_HW_EVENTS_MAX = 7, +}; - PERF_TP_EVENTS_MIN = -65536 +/* + * Special "software" counters provided by the kernel, even if the hardware + * does not support performance counters. These counters measure various + * physical and sw events of the kernel (and allow the profiling of them as + * well): + */ +enum sw_event_ids { + PERF_COUNT_CPU_CLOCK = 0, + PERF_COUNT_TASK_CLOCK = 1, + PERF_COUNT_PAGE_FAULTS = 2, + PERF_COUNT_CONTEXT_SWITCHES = 3, + PERF_COUNT_CPU_MIGRATIONS = 4, + PERF_COUNT_PAGE_FAULTS_MIN = 5, + PERF_COUNT_PAGE_FAULTS_MAJ = 6, + + PERF_SW_EVENTS_MAX = 7, }; /* * IRQ-notification data record type: */ enum perf_counter_record_type { - PERF_RECORD_SIMPLE = 0, - PERF_RECORD_IRQ = 1, - PERF_RECORD_GROUP = 2, + PERF_RECORD_SIMPLE = 0, + PERF_RECORD_IRQ = 1, + PERF_RECORD_GROUP = 2, }; /* * Hardware event to monitor via a performance monitoring counter: */ struct perf_counter_hw_event { - __s64 type; + union { + struct { + __u64 event_id : 56, + type : 8; + }; + struct { + __u64 raw_event_id : 63, + raw_type : 1; + }; + __u64 event_config; + }; __u64 irq_period; __u64 record_type; @@ -78,7 +103,6 @@ struct perf_counter_hw_event { __u64 disabled : 1, /* off by default */ nmi : 1, /* NMI sampling */ - raw : 1, /* raw event type */ inherit : 1, /* children inherit it */ pinned : 1, /* must always be on PMU */ exclusive : 1, /* only group on PMU */ @@ -87,7 +111,7 @@ struct perf_counter_hw_event { exclude_hv : 1, /* ditto hypervisor */ exclude_idle : 1, /* don't count when idle */ - __reserved_1 : 54; + __reserved_1 : 55; __u32 extra_config_len; __u32 __reserved_4; @@ -298,10 +322,11 @@ extern int hw_perf_group_sched_in(struct perf_counter *group_leader, */ static inline int is_software_counter(struct perf_counter *counter) { - return !counter->hw_event.raw && counter->hw_event.type < 0; + return !counter->hw_event.raw_type && + counter->hw_event.type != PERF_TYPE_HARDWARE; } -extern void perf_swcounter_event(enum hw_event_types, u64, int, struct pt_regs *); +extern void perf_swcounter_event(u32, u64, int, struct pt_regs *); #else static inline void @@ -320,7 +345,7 @@ static inline u64 hw_perf_save_disable(void) { return 0; } static inline int perf_counter_task_disable(void) { return -EINVAL; } static inline int perf_counter_task_enable(void) { return -EINVAL; } -static inline void perf_swcounter_event(enum hw_event_types event, u64 nr, +static inline void perf_swcounter_event(u32 event, u64 nr, int nmi, struct pt_regs *regs) { } #endif diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c index 0bbe3e45ba0..68a56a68bc7 100644 --- a/kernel/perf_counter.c +++ b/kernel/perf_counter.c @@ -1395,12 +1395,6 @@ static void perf_swcounter_set_period(struct perf_counter *counter) atomic64_set(&hwc->count, -left); } -static void perf_swcounter_save_and_restart(struct perf_counter *counter) -{ - perf_swcounter_update(counter); - perf_swcounter_set_period(counter); -} - static void perf_swcounter_store_irq(struct perf_counter *counter, u64 data) { struct perf_data *irqdata = counter->irqdata; @@ -1421,7 +1415,7 @@ static void perf_swcounter_handle_group(struct perf_counter *sibling) list_for_each_entry(counter, &group_leader->sibling_list, list_entry) { counter->hw_ops->read(counter); - perf_swcounter_store_irq(sibling, counter->hw_event.type); + perf_swcounter_store_irq(sibling, counter->hw_event.event_config); perf_swcounter_store_irq(sibling, atomic64_read(&counter->count)); } } @@ -1477,21 +1471,25 @@ static enum hrtimer_restart perf_swcounter_hrtimer(struct hrtimer *hrtimer) static void perf_swcounter_overflow(struct perf_counter *counter, int nmi, struct pt_regs *regs) { - perf_swcounter_save_and_restart(counter); + perf_swcounter_update(counter); + perf_swcounter_set_period(counter); perf_swcounter_interrupt(counter, nmi, regs); } static int perf_swcounter_match(struct perf_counter *counter, - enum hw_event_types event, - struct pt_regs *regs) + enum perf_event_types type, + u32 event, struct pt_regs *regs) { if (counter->state != PERF_COUNTER_STATE_ACTIVE) return 0; - if (counter->hw_event.raw) + if (counter->hw_event.raw_type) + return 0; + + if (counter->hw_event.type != type) return 0; - if (counter->hw_event.type != event) + if (counter->hw_event.event_id != event) return 0; if (counter->hw_event.exclude_user && user_mode(regs)) @@ -1512,8 +1510,8 @@ static void perf_swcounter_add(struct perf_counter *counter, u64 nr, } static void perf_swcounter_ctx_event(struct perf_counter_context *ctx, - enum hw_event_types event, u64 nr, - int nmi, struct pt_regs *regs) + enum perf_event_types type, u32 event, + u64 nr, int nmi, struct pt_regs *regs) { struct perf_counter *counter; @@ -1522,24 +1520,31 @@ static void perf_swcounter_ctx_event(struct perf_counter_context *ctx, rcu_read_lock(); list_for_each_entry_rcu(counter, &ctx->event_list, event_entry) { - if (perf_swcounter_match(counter, event, regs)) + if (perf_swcounter_match(counter, type, event, regs)) perf_swcounter_add(counter, nr, nmi, regs); } rcu_read_unlock(); } -void perf_swcounter_event(enum hw_event_types event, u64 nr, - int nmi, struct pt_regs *regs) +static void __perf_swcounter_event(enum perf_event_types type, u32 event, + u64 nr, int nmi, struct pt_regs *regs) { struct perf_cpu_context *cpuctx = &get_cpu_var(perf_cpu_context); - perf_swcounter_ctx_event(&cpuctx->ctx, event, nr, nmi, regs); - if (cpuctx->task_ctx) - perf_swcounter_ctx_event(cpuctx->task_ctx, event, nr, nmi, regs); + perf_swcounter_ctx_event(&cpuctx->ctx, type, event, nr, nmi, regs); + if (cpuctx->task_ctx) { + perf_swcounter_ctx_event(cpuctx->task_ctx, type, event, + nr, nmi, regs); + } put_cpu_var(perf_cpu_context); } +void perf_swcounter_event(u32 event, u64 nr, int nmi, struct pt_regs *regs) +{ + __perf_swcounter_event(PERF_TYPE_SOFTWARE, event, nr, nmi, regs); +} + static void perf_swcounter_read(struct perf_counter *counter) { perf_swcounter_update(counter); @@ -1733,8 +1738,12 @@ static const struct hw_perf_counter_ops perf_ops_cpu_migrations = { #ifdef CONFIG_EVENT_PROFILE void perf_tpcounter_event(int event_id) { - perf_swcounter_event(PERF_TP_EVENTS_MIN + event_id, 1, 1, - task_pt_regs(current)); + struct pt_regs *regs = get_irq_regs(); + + if (!regs) + regs = task_pt_regs(current); + + __perf_swcounter_event(PERF_TYPE_TRACEPOINT, event_id, 1, 1, regs); } extern int ftrace_profile_enable(int); @@ -1742,15 +1751,13 @@ extern void ftrace_profile_disable(int); static void tp_perf_counter_destroy(struct perf_counter *counter) { - int event_id = counter->hw_event.type - PERF_TP_EVENTS_MIN; - - ftrace_profile_disable(event_id); + ftrace_profile_disable(counter->hw_event.event_id); } static const struct hw_perf_counter_ops * tp_perf_counter_init(struct perf_counter *counter) { - int event_id = counter->hw_event.type - PERF_TP_EVENTS_MIN; + int event_id = counter->hw_event.event_id; int ret; ret = ftrace_profile_enable(event_id); @@ -1758,6 +1765,7 @@ tp_perf_counter_init(struct perf_counter *counter) return NULL; counter->destroy = tp_perf_counter_destroy; + counter->hw.irq_period = counter->hw_event.irq_period; return &perf_ops_generic; } @@ -1783,7 +1791,7 @@ sw_perf_counter_init(struct perf_counter *counter) * to be kernel events, and page faults are never hypervisor * events. */ - switch (counter->hw_event.type) { + switch (counter->hw_event.event_id) { case PERF_COUNT_CPU_CLOCK: hw_ops = &perf_ops_cpu_clock; @@ -1813,9 +1821,6 @@ sw_perf_counter_init(struct perf_counter *counter) if (!counter->hw_event.exclude_kernel) hw_ops = &perf_ops_cpu_migrations; break; - default: - hw_ops = tp_perf_counter_init(counter); - break; } if (hw_ops) @@ -1870,10 +1875,22 @@ perf_counter_alloc(struct perf_counter_hw_event *hw_event, counter->state = PERF_COUNTER_STATE_OFF; hw_ops = NULL; - if (!hw_event->raw && hw_event->type < 0) - hw_ops = sw_perf_counter_init(counter); - else + + if (hw_event->raw_type) + hw_ops = hw_perf_counter_init(counter); + else switch (hw_event->type) { + case PERF_TYPE_HARDWARE: hw_ops = hw_perf_counter_init(counter); + break; + + case PERF_TYPE_SOFTWARE: + hw_ops = sw_perf_counter_init(counter); + break; + + case PERF_TYPE_TRACEPOINT: + hw_ops = tp_perf_counter_init(counter); + break; + } if (!hw_ops) { kfree(counter); -- cgit v1.2.3 From 0322cd6ec504b0bf08ca7b2c3d7f43bda37d79c9 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Thu, 19 Mar 2009 20:26:19 +0100 Subject: perf_counter: unify irq output code Impact: cleanup Having 3 slightly different copies of the same code around does nobody any good. First step in revamping the output format. Signed-off-by: Peter Zijlstra Cc: Paul Mackerras Cc: Steven Rostedt Orig-LKML-Reference: <20090319194233.929962222@chello.nl> Signed-off-by: Ingo Molnar --- arch/powerpc/kernel/perf_counter.c | 51 +----------------- arch/x86/kernel/cpu/perf_counter.c | 53 +------------------ include/linux/perf_counter.h | 2 + kernel/perf_counter.c | 106 ++++++++++++++++++++----------------- 4 files changed, 61 insertions(+), 151 deletions(-) diff --git a/arch/powerpc/kernel/perf_counter.c b/arch/powerpc/kernel/perf_counter.c index 26f69dc7130..88b72eb4af1 100644 --- a/arch/powerpc/kernel/perf_counter.c +++ b/arch/powerpc/kernel/perf_counter.c @@ -662,41 +662,6 @@ void perf_counter_do_pending(void) } } -/* - * Record data for an irq counter. - * This function was lifted from the x86 code; maybe it should - * go in the core? - */ -static void perf_store_irq_data(struct perf_counter *counter, u64 data) -{ - struct perf_data *irqdata = counter->irqdata; - - if (irqdata->len > PERF_DATA_BUFLEN - sizeof(u64)) { - irqdata->overrun++; - } else { - u64 *p = (u64 *) &irqdata->data[irqdata->len]; - - *p = data; - irqdata->len += sizeof(u64); - } -} - -/* - * Record all the values of the counters in a group - */ -static void perf_handle_group(struct perf_counter *counter) -{ - struct perf_counter *leader, *sub; - - leader = counter->group_leader; - list_for_each_entry(sub, &leader->sibling_list, list_entry) { - if (sub != counter) - sub->hw_ops->read(sub); - perf_store_irq_data(counter, sub->hw_event.event_config); - perf_store_irq_data(counter, atomic64_read(&sub->count)); - } -} - /* * A counter has overflowed; update its count and record * things if requested. Note that interrupts are hard-disabled @@ -736,20 +701,8 @@ static void record_and_restart(struct perf_counter *counter, long val, /* * Finally record data if requested. */ - if (record) { - switch (counter->hw_event.record_type) { - case PERF_RECORD_SIMPLE: - break; - case PERF_RECORD_IRQ: - perf_store_irq_data(counter, instruction_pointer(regs)); - counter->wakeup_pending = 1; - break; - case PERF_RECORD_GROUP: - perf_handle_group(counter); - counter->wakeup_pending = 1; - break; - } - } + if (record) + perf_counter_output(counter, 1, regs); } /* diff --git a/arch/x86/kernel/cpu/perf_counter.c b/arch/x86/kernel/cpu/perf_counter.c index d844ae41d5a..902282d68b0 100644 --- a/arch/x86/kernel/cpu/perf_counter.c +++ b/arch/x86/kernel/cpu/perf_counter.c @@ -674,20 +674,6 @@ static void pmc_generic_disable(struct perf_counter *counter) x86_perf_counter_update(counter, hwc, idx); } -static void perf_store_irq_data(struct perf_counter *counter, u64 data) -{ - struct perf_data *irqdata = counter->irqdata; - - if (irqdata->len > PERF_DATA_BUFLEN - sizeof(u64)) { - irqdata->overrun++; - } else { - u64 *p = (u64 *) &irqdata->data[irqdata->len]; - - *p = data; - irqdata->len += sizeof(u64); - } -} - /* * Save and restart an expired counter. Called by NMI contexts, * so it has to be careful about preempting normal counter ops: @@ -704,22 +690,6 @@ static void perf_save_and_restart(struct perf_counter *counter) __pmc_generic_enable(counter, hwc, idx); } -static void -perf_handle_group(struct perf_counter *sibling, u64 *status, u64 *overflown) -{ - struct perf_counter *counter, *group_leader = sibling->group_leader; - - /* - * Store sibling timestamps (if any): - */ - list_for_each_entry(counter, &group_leader->sibling_list, list_entry) { - - x86_perf_counter_update(counter, &counter->hw, counter->hw.idx); - perf_store_irq_data(sibling, counter->hw_event.event_config); - perf_store_irq_data(sibling, atomic64_read(&counter->count)); - } -} - /* * Maximum interrupt frequency of 100KHz per CPU */ @@ -754,28 +724,7 @@ again: continue; perf_save_and_restart(counter); - - switch (counter->hw_event.record_type) { - case PERF_RECORD_SIMPLE: - continue; - case PERF_RECORD_IRQ: - perf_store_irq_data(counter, instruction_pointer(regs)); - break; - case PERF_RECORD_GROUP: - perf_handle_group(counter, &status, &ack); - break; - } - /* - * From NMI context we cannot call into the scheduler to - * do a task wakeup - but we mark these generic as - * wakeup_pending and initate a wakeup callback: - */ - if (nmi) { - counter->wakeup_pending = 1; - set_tsk_thread_flag(current, TIF_PERF_COUNTERS); - } else { - wake_up(&counter->waitq); - } + perf_counter_output(counter, nmi, regs); } hw_perf_ack_status(ack); diff --git a/include/linux/perf_counter.h b/include/linux/perf_counter.h index 8f939490550..a4b76c0175f 100644 --- a/include/linux/perf_counter.h +++ b/include/linux/perf_counter.h @@ -317,6 +317,8 @@ extern int hw_perf_group_sched_in(struct perf_counter *group_leader, struct perf_cpu_context *cpuctx, struct perf_counter_context *ctx, int cpu); +extern void perf_counter_output(struct perf_counter *counter, + int nmi, struct pt_regs *regs); /* * Return 1 for a software counter, 0 for a hardware counter */ diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c index 68a56a68bc7..f054b8c9bf9 100644 --- a/kernel/perf_counter.c +++ b/kernel/perf_counter.c @@ -1353,6 +1353,60 @@ static const struct file_operations perf_fops = { .compat_ioctl = perf_ioctl, }; +/* + * Output + */ + +static void perf_counter_store_irq(struct perf_counter *counter, u64 data) +{ + struct perf_data *irqdata = counter->irqdata; + + if (irqdata->len > PERF_DATA_BUFLEN - sizeof(u64)) { + irqdata->overrun++; + } else { + u64 *p = (u64 *) &irqdata->data[irqdata->len]; + + *p = data; + irqdata->len += sizeof(u64); + } +} + +static void perf_counter_handle_group(struct perf_counter *counter) +{ + struct perf_counter *leader, *sub; + + leader = counter->group_leader; + list_for_each_entry(sub, &leader->sibling_list, list_entry) { + if (sub != counter) + sub->hw_ops->read(sub); + perf_counter_store_irq(counter, sub->hw_event.event_config); + perf_counter_store_irq(counter, atomic64_read(&sub->count)); + } +} + +void perf_counter_output(struct perf_counter *counter, + int nmi, struct pt_regs *regs) +{ + switch (counter->hw_event.record_type) { + case PERF_RECORD_SIMPLE: + return; + + case PERF_RECORD_IRQ: + perf_counter_store_irq(counter, instruction_pointer(regs)); + break; + + case PERF_RECORD_GROUP: + perf_counter_handle_group(counter); + break; + } + + if (nmi) { + counter->wakeup_pending = 1; + set_perf_counter_pending(); + } else + wake_up(&counter->waitq); +} + /* * Generic software counter infrastructure */ @@ -1395,54 +1449,6 @@ static void perf_swcounter_set_period(struct perf_counter *counter) atomic64_set(&hwc->count, -left); } -static void perf_swcounter_store_irq(struct perf_counter *counter, u64 data) -{ - struct perf_data *irqdata = counter->irqdata; - - if (irqdata->len > PERF_DATA_BUFLEN - sizeof(u64)) { - irqdata->overrun++; - } else { - u64 *p = (u64 *) &irqdata->data[irqdata->len]; - - *p = data; - irqdata->len += sizeof(u64); - } -} - -static void perf_swcounter_handle_group(struct perf_counter *sibling) -{ - struct perf_counter *counter, *group_leader = sibling->group_leader; - - list_for_each_entry(counter, &group_leader->sibling_list, list_entry) { - counter->hw_ops->read(counter); - perf_swcounter_store_irq(sibling, counter->hw_event.event_config); - perf_swcounter_store_irq(sibling, atomic64_read(&counter->count)); - } -} - -static void perf_swcounter_interrupt(struct perf_counter *counter, - int nmi, struct pt_regs *regs) -{ - switch (counter->hw_event.record_type) { - case PERF_RECORD_SIMPLE: - break; - - case PERF_RECORD_IRQ: - perf_swcounter_store_irq(counter, instruction_pointer(regs)); - break; - - case PERF_RECORD_GROUP: - perf_swcounter_handle_group(counter); - break; - } - - if (nmi) { - counter->wakeup_pending = 1; - set_perf_counter_pending(); - } else - wake_up(&counter->waitq); -} - static enum hrtimer_restart perf_swcounter_hrtimer(struct hrtimer *hrtimer) { struct perf_counter *counter; @@ -1461,7 +1467,7 @@ static enum hrtimer_restart perf_swcounter_hrtimer(struct hrtimer *hrtimer) regs = task_pt_regs(current); if (regs) - perf_swcounter_interrupt(counter, 0, regs); + perf_counter_output(counter, 0, regs); hrtimer_forward_now(hrtimer, ns_to_ktime(counter->hw.irq_period)); @@ -1473,7 +1479,7 @@ static void perf_swcounter_overflow(struct perf_counter *counter, { perf_swcounter_update(counter); perf_swcounter_set_period(counter); - perf_swcounter_interrupt(counter, nmi, regs); + perf_counter_output(counter, nmi, regs); } static int perf_swcounter_match(struct perf_counter *counter, -- cgit v1.2.3 From db4fb5acf20295063d1d5105e67724eb51440207 Mon Sep 17 00:00:00 2001 From: Paul Mackerras Date: Thu, 19 Mar 2009 20:26:20 +0100 Subject: perf_counter: powerpc: clean up perc_counter_interrupt Impact: cleanup This updates the powerpc perf_counter_interrupt following on from the "perf_counter: unify irq output code" patch. Since we now use the generic perf_counter_output code, which sets the perf_counter_pending flag directly, we no longer need the need_wakeup variable. This removes need_wakeup and makes perf_counter_interrupt use get_perf_counter_pending() instead. Signed-off-by: Paul Mackerras Signed-off-by: Peter Zijlstra Cc: Steven Rostedt Orig-LKML-Reference: <20090319194234.024464535@chello.nl> Signed-off-by: Ingo Molnar --- arch/powerpc/kernel/perf_counter.c | 19 +++++++------------ 1 file changed, 7 insertions(+), 12 deletions(-) diff --git a/arch/powerpc/kernel/perf_counter.c b/arch/powerpc/kernel/perf_counter.c index 88b72eb4af1..830ca9c4494 100644 --- a/arch/powerpc/kernel/perf_counter.c +++ b/arch/powerpc/kernel/perf_counter.c @@ -723,8 +723,6 @@ static void perf_counter_interrupt(struct pt_regs *regs) /* counter has overflowed */ found = 1; record_and_restart(counter, val, regs); - if (counter->wakeup_pending) - need_wakeup = 1; } } @@ -754,17 +752,14 @@ static void perf_counter_interrupt(struct pt_regs *regs) /* * If we need a wakeup, check whether interrupts were soft-enabled * when we took the interrupt. If they were, we can wake stuff up - * immediately; otherwise we'll have to set a flag and do the - * wakeup when interrupts get soft-enabled. + * immediately; otherwise we'll have do the wakeup when interrupts + * get soft-enabled. */ - if (need_wakeup) { - if (regs->softe) { - irq_enter(); - perf_counter_do_pending(); - irq_exit(); - } else { - set_perf_counter_pending(); - } + if (get_perf_counter_pending() && regs->softe) { + irq_enter(); + clear_perf_counter_pending(); + perf_counter_do_pending(); + irq_exit(); } } -- cgit v1.2.3 From 9aaa131a279834dff75c290c91f0058f62d72d46 Mon Sep 17 00:00:00 2001 From: Paul Mackerras Date: Sat, 21 Mar 2009 15:31:47 +1100 Subject: perf_counter: fix type/event_id layout on big-endian systems Impact: build fix for powerpc Commit db3a944aca35ae61 ("perf_counter: revamp syscall input ABI") expanded the hw_event.type field into a union of structs containing bitfields. In particular it introduced a type field and a raw_type field, with the intention that the 1-bit raw_type field should overlay the most-significant bit of the 8-bit type field, and in fact perf_counter_alloc() now assumes that (or at least, assumes that raw_type doesn't overlay any of the bits that are 1 in the values of PERF_TYPE_{HARDWARE,SOFTWARE,TRACEPOINT}). Unfortunately this is not true on big-endian systems such as PowerPC, where bitfields are laid out from left to right, i.e. from most significant bit to least significant. This means that setting hw_event.type = PERF_TYPE_SOFTWARE will set hw_event.raw_type to 1. This fixes it by making the layout depend on whether or not __BIG_ENDIAN_BITFIELD is defined. It's a bit ugly, but that's what we get for using bitfields in a user/kernel ABI. Also, that commit didn't fix up some places in arch/powerpc/kernel/ perf_counter.c where hw_event.raw and hw_event.event_id were used. This fixes them too. Signed-off-by: Paul Mackerras --- arch/powerpc/kernel/perf_counter.c | 9 +++++---- include/linux/perf_counter.h | 12 ++++++++++++ 2 files changed, 17 insertions(+), 4 deletions(-) diff --git a/arch/powerpc/kernel/perf_counter.c b/arch/powerpc/kernel/perf_counter.c index 830ca9c4494..6413d9c0313 100644 --- a/arch/powerpc/kernel/perf_counter.c +++ b/arch/powerpc/kernel/perf_counter.c @@ -602,12 +602,13 @@ hw_perf_counter_init(struct perf_counter *counter) return NULL; if ((s64)counter->hw_event.irq_period < 0) return NULL; - ev = counter->hw_event.event_id; - if (!counter->hw_event.raw) { - if (ev >= ppmu->n_generic || - ppmu->generic_events[ev] == 0) + if (!counter->hw_event.raw_type) { + ev = counter->hw_event.event_id; + if (ev >= ppmu->n_generic || ppmu->generic_events[ev] == 0) return NULL; ev = ppmu->generic_events[ev]; + } else { + ev = counter->hw_event.raw_event_id; } counter->hw.config_base = ev; counter->hw.idx = 0; diff --git a/include/linux/perf_counter.h b/include/linux/perf_counter.h index a4b76c0175f..98f5990be1e 100644 --- a/include/linux/perf_counter.h +++ b/include/linux/perf_counter.h @@ -15,6 +15,7 @@ #include #include +#include /* * User-space ABI bits: @@ -86,6 +87,7 @@ enum perf_counter_record_type { */ struct perf_counter_hw_event { union { +#ifndef __BIG_ENDIAN_BITFIELD struct { __u64 event_id : 56, type : 8; @@ -94,6 +96,16 @@ struct perf_counter_hw_event { __u64 raw_event_id : 63, raw_type : 1; }; +#else + struct { + __u64 type : 8, + event_id : 56; + }; + struct { + __u64 raw_type : 1, + raw_event_id : 63; + }; +#endif /* __BIT_ENDIAN_BITFIELD */ __u64 event_config; }; -- cgit v1.2.3 From 6f9f791eb53b56097cd311a1a5517a8e89bdaf35 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Mon, 23 Mar 2009 21:26:52 +0100 Subject: perf_counter: create Documentation/perf_counter/ and move perfcounters.txt there We'll have more files in that directory, prepare for that. Acked-by: Peter Zijlstra Cc: Paul Mackerras Signed-off-by: Ingo Molnar --- Documentation/perf-counters.txt | 147 ---------------------------------- Documentation/perf_counter/design.txt | 147 ++++++++++++++++++++++++++++++++++ 2 files changed, 147 insertions(+), 147 deletions(-) delete mode 100644 Documentation/perf-counters.txt create mode 100644 Documentation/perf_counter/design.txt diff --git a/Documentation/perf-counters.txt b/Documentation/perf-counters.txt deleted file mode 100644 index fddd32189a5..00000000000 --- a/Documentation/perf-counters.txt +++ /dev/null @@ -1,147 +0,0 @@ - -Performance Counters for Linux ------------------------------- - -Performance counters are special hardware registers available on most modern -CPUs. These registers count the number of certain types of hw events: such -as instructions executed, cachemisses suffered, or branches mis-predicted - -without slowing down the kernel or applications. These registers can also -trigger interrupts when a threshold number of events have passed - and can -thus be used to profile the code that runs on that CPU. - -The Linux Performance Counter subsystem provides an abstraction of these -hardware capabilities. It provides per task and per CPU counters, counter -groups, and it provides event capabilities on top of those. - -Performance counters are accessed via special file descriptors. -There's one file descriptor per virtual counter used. - -The special file descriptor is opened via the perf_counter_open() -system call: - - int sys_perf_counter_open(struct perf_counter_hw_event *hw_event_uptr, - pid_t pid, int cpu, int group_fd); - -The syscall returns the new fd. The fd can be used via the normal -VFS system calls: read() can be used to read the counter, fcntl() -can be used to set the blocking mode, etc. - -Multiple counters can be kept open at a time, and the counters -can be poll()ed. - -When creating a new counter fd, 'perf_counter_hw_event' is: - -/* - * Hardware event to monitor via a performance monitoring counter: - */ -struct perf_counter_hw_event { - s64 type; - - u64 irq_period; - u32 record_type; - - u32 disabled : 1, /* off by default */ - nmi : 1, /* NMI sampling */ - raw : 1, /* raw event type */ - __reserved_1 : 29; - - u64 __reserved_2; -}; - -/* - * Generalized performance counter event types, used by the hw_event.type - * parameter of the sys_perf_counter_open() syscall: - */ -enum hw_event_types { - /* - * Common hardware events, generalized by the kernel: - */ - PERF_COUNT_CYCLES = 0, - PERF_COUNT_INSTRUCTIONS = 1, - PERF_COUNT_CACHE_REFERENCES = 2, - PERF_COUNT_CACHE_MISSES = 3, - PERF_COUNT_BRANCH_INSTRUCTIONS = 4, - PERF_COUNT_BRANCH_MISSES = 5, - - /* - * Special "software" counters provided by the kernel, even if - * the hardware does not support performance counters. These - * counters measure various physical and sw events of the - * kernel (and allow the profiling of them as well): - */ - PERF_COUNT_CPU_CLOCK = -1, - PERF_COUNT_TASK_CLOCK = -2, - /* - * Future software events: - */ - /* PERF_COUNT_PAGE_FAULTS = -3, - PERF_COUNT_CONTEXT_SWITCHES = -4, */ -}; - -These are standardized types of events that work uniformly on all CPUs -that implements Performance Counters support under Linux. If a CPU is -not able to count branch-misses, then the system call will return --EINVAL. - -More hw_event_types are supported as well, but they are CPU -specific and are enumerated via /sys on a per CPU basis. Raw hw event -types can be passed in under hw_event.type if hw_event.raw is 1. -For example, to count "External bus cycles while bus lock signal asserted" -events on Intel Core CPUs, pass in a 0x4064 event type value and set -hw_event.raw to 1. - -'record_type' is the type of data that a read() will provide for the -counter, and it can be one of: - -/* - * IRQ-notification data record type: - */ -enum perf_counter_record_type { - PERF_RECORD_SIMPLE = 0, - PERF_RECORD_IRQ = 1, - PERF_RECORD_GROUP = 2, -}; - -a "simple" counter is one that counts hardware events and allows -them to be read out into a u64 count value. (read() returns 8 on -a successful read of a simple counter.) - -An "irq" counter is one that will also provide an IRQ context information: -the IP of the interrupted context. In this case read() will return -the 8-byte counter value, plus the Instruction Pointer address of the -interrupted context. - -The parameter 'hw_event_period' is the number of events before waking up -a read() that is blocked on a counter fd. Zero value means a non-blocking -counter. - -The 'pid' parameter allows the counter to be specific to a task: - - pid == 0: if the pid parameter is zero, the counter is attached to the - current task. - - pid > 0: the counter is attached to a specific task (if the current task - has sufficient privilege to do so) - - pid < 0: all tasks are counted (per cpu counters) - -The 'cpu' parameter allows a counter to be made specific to a full -CPU: - - cpu >= 0: the counter is restricted to a specific CPU - cpu == -1: the counter counts on all CPUs - -(Note: the combination of 'pid == -1' and 'cpu == -1' is not valid.) - -A 'pid > 0' and 'cpu == -1' counter is a per task counter that counts -events of that task and 'follows' that task to whatever CPU the task -gets schedule to. Per task counters can be created by any user, for -their own tasks. - -A 'pid == -1' and 'cpu == x' counter is a per CPU counter that counts -all events on CPU-x. Per CPU counters need CAP_SYS_ADMIN privilege. - -Group counters are created by passing in a group_fd of another counter. -Groups are scheduled at once and can be used with PERF_RECORD_GROUP -to record multi-dimensional timestamps. - diff --git a/Documentation/perf_counter/design.txt b/Documentation/perf_counter/design.txt new file mode 100644 index 00000000000..fddd32189a5 --- /dev/null +++ b/Documentation/perf_counter/design.txt @@ -0,0 +1,147 @@ + +Performance Counters for Linux +------------------------------ + +Performance counters are special hardware registers available on most modern +CPUs. These registers count the number of certain types of hw events: such +as instructions executed, cachemisses suffered, or branches mis-predicted - +without slowing down the kernel or applications. These registers can also +trigger interrupts when a threshold number of events have passed - and can +thus be used to profile the code that runs on that CPU. + +The Linux Performance Counter subsystem provides an abstraction of these +hardware capabilities. It provides per task and per CPU counters, counter +groups, and it provides event capabilities on top of those. + +Performance counters are accessed via special file descriptors. +There's one file descriptor per virtual counter used. + +The special file descriptor is opened via the perf_counter_open() +system call: + + int sys_perf_counter_open(struct perf_counter_hw_event *hw_event_uptr, + pid_t pid, int cpu, int group_fd); + +The syscall returns the new fd. The fd can be used via the normal +VFS system calls: read() can be used to read the counter, fcntl() +can be used to set the blocking mode, etc. + +Multiple counters can be kept open at a time, and the counters +can be poll()ed. + +When creating a new counter fd, 'perf_counter_hw_event' is: + +/* + * Hardware event to monitor via a performance monitoring counter: + */ +struct perf_counter_hw_event { + s64 type; + + u64 irq_period; + u32 record_type; + + u32 disabled : 1, /* off by default */ + nmi : 1, /* NMI sampling */ + raw : 1, /* raw event type */ + __reserved_1 : 29; + + u64 __reserved_2; +}; + +/* + * Generalized performance counter event types, used by the hw_event.type + * parameter of the sys_perf_counter_open() syscall: + */ +enum hw_event_types { + /* + * Common hardware events, generalized by the kernel: + */ + PERF_COUNT_CYCLES = 0, + PERF_COUNT_INSTRUCTIONS = 1, + PERF_COUNT_CACHE_REFERENCES = 2, + PERF_COUNT_CACHE_MISSES = 3, + PERF_COUNT_BRANCH_INSTRUCTIONS = 4, + PERF_COUNT_BRANCH_MISSES = 5, + + /* + * Special "software" counters provided by the kernel, even if + * the hardware does not support performance counters. These + * counters measure various physical and sw events of the + * kernel (and allow the profiling of them as well): + */ + PERF_COUNT_CPU_CLOCK = -1, + PERF_COUNT_TASK_CLOCK = -2, + /* + * Future software events: + */ + /* PERF_COUNT_PAGE_FAULTS = -3, + PERF_COUNT_CONTEXT_SWITCHES = -4, */ +}; + +These are standardized types of events that work uniformly on all CPUs +that implements Performance Counters support under Linux. If a CPU is +not able to count branch-misses, then the system call will return +-EINVAL. + +More hw_event_types are supported as well, but they are CPU +specific and are enumerated via /sys on a per CPU basis. Raw hw event +types can be passed in under hw_event.type if hw_event.raw is 1. +For example, to count "External bus cycles while bus lock signal asserted" +events on Intel Core CPUs, pass in a 0x4064 event type value and set +hw_event.raw to 1. + +'record_type' is the type of data that a read() will provide for the +counter, and it can be one of: + +/* + * IRQ-notification data record type: + */ +enum perf_counter_record_type { + PERF_RECORD_SIMPLE = 0, + PERF_RECORD_IRQ = 1, + PERF_RECORD_GROUP = 2, +}; + +a "simple" counter is one that counts hardware events and allows +them to be read out into a u64 count value. (read() returns 8 on +a successful read of a simple counter.) + +An "irq" counter is one that will also provide an IRQ context information: +the IP of the interrupted context. In this case read() will return +the 8-byte counter value, plus the Instruction Pointer address of the +interrupted context. + +The parameter 'hw_event_period' is the number of events before waking up +a read() that is blocked on a counter fd. Zero value means a non-blocking +counter. + +The 'pid' parameter allows the counter to be specific to a task: + + pid == 0: if the pid parameter is zero, the counter is attached to the + current task. + + pid > 0: the counter is attached to a specific task (if the current task + has sufficient privilege to do so) + + pid < 0: all tasks are counted (per cpu counters) + +The 'cpu' parameter allows a counter to be made specific to a full +CPU: + + cpu >= 0: the counter is restricted to a specific CPU + cpu == -1: the counter counts on all CPUs + +(Note: the combination of 'pid == -1' and 'cpu == -1' is not valid.) + +A 'pid > 0' and 'cpu == -1' counter is a per task counter that counts +events of that task and 'follows' that task to whatever CPU the task +gets schedule to. Per task counters can be created by any user, for +their own tasks. + +A 'pid == -1' and 'cpu == x' counter is a per CPU counter that counts +all events on CPU-x. Per CPU counters need CAP_SYS_ADMIN privilege. + +Group counters are created by passing in a group_fd of another counter. +Groups are scheduled at once and can be used with PERF_RECORD_GROUP +to record multi-dimensional timestamps. + -- cgit v1.2.3 From e0143bad9dbf2a8fad4c5430562bceba196b66ea Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Mon, 23 Mar 2009 21:29:59 +0100 Subject: perf_counter: add sample user-space to Documentation/perf_counter/ Initial version of kerneltop.c and perfstat.c. Acked-by: Peter Zijlstra Cc: Paul Mackerras Signed-off-by: Ingo Molnar --- Documentation/perf_counter/Makefile | 12 + Documentation/perf_counter/kerneltop.c | 956 +++++++++++++++++++++++++++++++++ Documentation/perf_counter/perfstat.c | 521 ++++++++++++++++++ 3 files changed, 1489 insertions(+) create mode 100644 Documentation/perf_counter/Makefile create mode 100644 Documentation/perf_counter/kerneltop.c create mode 100644 Documentation/perf_counter/perfstat.c diff --git a/Documentation/perf_counter/Makefile b/Documentation/perf_counter/Makefile new file mode 100644 index 00000000000..b45749753fc --- /dev/null +++ b/Documentation/perf_counter/Makefile @@ -0,0 +1,12 @@ +BINS = kerneltop perfstat + +all: $(BINS) + +kerneltop: kerneltop.c perfcounters.h + cc -O6 -Wall -lrt `pkg-config --cflags --libs glib-2.0` -o $@ $< + +perfstat: kerneltop + ln -sf kerneltop perfstat + +clean: + rm $(BINS) diff --git a/Documentation/perf_counter/kerneltop.c b/Documentation/perf_counter/kerneltop.c new file mode 100644 index 00000000000..cf0e30bab5d --- /dev/null +++ b/Documentation/perf_counter/kerneltop.c @@ -0,0 +1,956 @@ +/* + * kerneltop.c: show top kernel functions - performance counters showcase + + Build with: + + cc -O6 -Wall `pkg-config --cflags --libs glib-2.0` -o kerneltop kerneltop.c + + Sample output: + +------------------------------------------------------------------------------ + KernelTop: 2669 irqs/sec [NMI, cache-misses/cache-refs], (all, cpu: 2) +------------------------------------------------------------------------------ + + weight RIP kernel function + ______ ________________ _______________ + + 35.20 - ffffffff804ce74b : skb_copy_and_csum_dev + 33.00 - ffffffff804cb740 : sock_alloc_send_skb + 31.26 - ffffffff804ce808 : skb_push + 22.43 - ffffffff80510004 : tcp_established_options + 19.00 - ffffffff8027d250 : find_get_page + 15.76 - ffffffff804e4fc9 : eth_type_trans + 15.20 - ffffffff804d8baa : dst_release + 14.86 - ffffffff804cf5d8 : skb_release_head_state + 14.00 - ffffffff802217d5 : read_hpet + 12.00 - ffffffff804ffb7f : __ip_local_out + 11.97 - ffffffff804fc0c8 : ip_local_deliver_finish + 8.54 - ffffffff805001a3 : ip_queue_xmit + + Started by Ingo Molnar + + Improvements and fixes by: + + Arjan van de Ven + Yanmin Zhang + Mike Galbraith + + Released under the GPL v2. (and only v2, not any later version) + + */ +#define _GNU_SOURCE +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include +#include +#include +#include +#include +#include + +#include + +#ifdef __x86_64__ +# define __NR_perf_counter_open 295 +#endif + +#ifdef __i386__ +# define __NR_perf_counter_open 333 +#endif + +/* + * Pick up some kernel type conventions: + */ +#define __user +#define asmlinkage + +typedef unsigned int __u32; +typedef unsigned long long __u64; +typedef long long __s64; + +/* + * User-space ABI bits: + */ + +/* + * Generalized performance counter event types, used by the hw_event.type + * parameter of the sys_perf_counter_open() syscall: + */ +enum hw_event_types { + /* + * Common hardware events, generalized by the kernel: + */ + PERF_COUNT_CPU_CYCLES = 0, + PERF_COUNT_INSTRUCTIONS = 1, + PERF_COUNT_CACHE_REFERENCES = 2, + PERF_COUNT_CACHE_MISSES = 3, + PERF_COUNT_BRANCH_INSTRUCTIONS = 4, + PERF_COUNT_BRANCH_MISSES = 5, + PERF_COUNT_BUS_CYCLES = 6, + + PERF_HW_EVENTS_MAX = 7, + + /* + * Special "software" counters provided by the kernel, even if + * the hardware does not support performance counters. These + * counters measure various physical and sw events of the + * kernel (and allow the profiling of them as well): + */ + PERF_COUNT_CPU_CLOCK = -1, + PERF_COUNT_TASK_CLOCK = -2, + PERF_COUNT_PAGE_FAULTS = -3, + PERF_COUNT_CONTEXT_SWITCHES = -4, + PERF_COUNT_CPU_MIGRATIONS = -5, + + PERF_SW_EVENTS_MIN = -6, +}; + +/* + * IRQ-notification data record type: + */ +enum perf_counter_record_type { + PERF_RECORD_SIMPLE = 0, + PERF_RECORD_IRQ = 1, + PERF_RECORD_GROUP = 2, +}; + +/* + * Hardware event to monitor via a performance monitoring counter: + */ +struct perf_counter_hw_event { + __s64 type; + + __u64 irq_period; + __u64 record_type; + __u64 read_format; + + __u64 disabled : 1, /* off by default */ + nmi : 1, /* NMI sampling */ + raw : 1, /* raw event type */ + inherit : 1, /* children inherit it */ + pinned : 1, /* must always be on PMU */ + exclusive : 1, /* only group on PMU */ + exclude_user : 1, /* don't count user */ + exclude_kernel : 1, /* ditto kernel */ + exclude_hv : 1, /* ditto hypervisor */ + exclude_idle : 1, /* don't count when idle */ + + __reserved_1 : 54; + + __u32 extra_config_len; + __u32 __reserved_4; + + __u64 __reserved_2; + __u64 __reserved_3; +}; + +/* + * Ioctls that can be done on a perf counter fd: + */ +#define PERF_COUNTER_IOC_ENABLE _IO('$', 0) +#define PERF_COUNTER_IOC_DISABLE _IO('$', 1) + +asmlinkage int sys_perf_counter_open( + + struct perf_counter_hw_event *hw_event_uptr __user, + pid_t pid, + int cpu, + int group_fd, + unsigned long flags) +{ + int ret; + + ret = syscall( + __NR_perf_counter_open, hw_event_uptr, pid, cpu, group_fd, flags); +#if defined(__x86_64__) || defined(__i386__) + if (ret < 0 && ret > -4096) { + errno = -ret; + ret = -1; + } +#endif + return ret; +} + +const char *event_types [] = { + "CPU cycles", + "instructions", + "cache-refs", + "cache-misses", + "branches", + "branch-misses", + "bus cycles" +}; + +const unsigned int default_count[] = { + 1000000, + 1000000, + 10000, + 10000, + 1000000, + 10000, +}; + +/* + * prctl(PR_TASK_PERF_COUNTERS_DISABLE) will (cheaply) disable all + * counters in the current task. + */ +#define PR_TASK_PERF_COUNTERS_DISABLE 31 +#define PR_TASK_PERF_COUNTERS_ENABLE 32 + +#define MAX_COUNTERS 8 + +static int nr_counters = -1; + +static __u64 count_filter = 100; + +#define MAX_NR_CPUS 256 + +static int event_count[MAX_COUNTERS]; +static unsigned long event_id[MAX_COUNTERS]; +static int event_raw[MAX_COUNTERS]; + +static int tid = -1; +static int profile_cpu = -1; +static int nr_cpus = 0; +static int nmi = 1; +static int group = 0; + +static char *vmlinux; + +static char *sym_filter; +static unsigned long filter_start; +static unsigned long filter_end; + +static int delay_secs = 2; +static int zero; +static int dump_symtab; + +struct source_line { + uint64_t EIP; + unsigned long count; + char *line; +}; + +static GList *lines; + +static void display_help(void) +{ + printf( + "Usage: kerneltop []\n\n" + "KernelTop Options (up to %d event types can be specified at once):\n\n", + MAX_COUNTERS); + printf( + " -e EID --event_id=EID # event type ID [default: 0]\n" + " 0: CPU cycles\n" + " 1: instructions\n" + " 2: cache accesses\n" + " 3: cache misses\n" + " 4: branch instructions\n" + " 5: branch prediction misses\n" + " 6: bus cycles\n\n" + " rNNN: raw PMU events (eventsel+umask)\n\n" + " -c CNT --count=CNT # event period to sample\n\n" + " -C CPU --cpu=CPU # CPU (-1 for all) [default: -1]\n" + " -p PID --pid=PID # PID of sampled task (-1 for all) [default: -1]\n\n" + " -d delay --delay= # sampling/display delay [default: 2]\n" + " -f CNT --filter=CNT # min-event-count filter [default: 100]\n\n" + " -s symbol --symbol= # function to be showed annotated one-shot\n" + " -x path --vmlinux= # the vmlinux binary, required for -s use:\n" + " -z --zero # zero counts after display\n" + " -D --dump_symtab # dump symbol table to stderr on startup\n" + "\n"); + + exit(0); +} + +static void process_options(int argc, char *argv[]) +{ + int error = 0, counter; + + for (;;) { + int option_index = 0; + /** Options for getopt */ + static struct option long_options[] = { + {"count", required_argument, NULL, 'c'}, + {"cpu", required_argument, NULL, 'C'}, + {"delay", required_argument, NULL, 'd'}, + {"dump_symtab", no_argument, NULL, 'D'}, + {"event_id", required_argument, NULL, 'e'}, + {"filter", required_argument, NULL, 'f'}, + {"group", required_argument, NULL, 'g'}, + {"help", no_argument, NULL, 'h'}, + {"nmi", required_argument, NULL, 'n'}, + {"pid", required_argument, NULL, 'p'}, + {"vmlinux", required_argument, NULL, 'x'}, + {"symbol", required_argument, NULL, 's'}, + {"zero", no_argument, NULL, 'z'}, + {NULL, 0, NULL, 0 } + }; + int c = getopt_long(argc, argv, "c:C:d:De:f:g:hn:p:s:x:z", + long_options, &option_index); + if (c == -1) + break; + + switch (c) { + case 'c': + if (nr_counters == -1) + nr_counters = 0; + event_count[nr_counters] = atoi(optarg); break; + case 'C': + /* CPU and PID are mutually exclusive */ + if (tid != -1) { + printf("WARNING: CPU switch overriding PID\n"); + sleep(1); + tid = -1; + } + profile_cpu = atoi(optarg); break; + case 'd': delay_secs = atoi(optarg); break; + case 'D': dump_symtab = 1; break; + + case 'e': + nr_counters++; + if (nr_counters == MAX_COUNTERS) { + error = 1; + break; + } + if (*optarg == 'r') { + event_raw[nr_counters] = 1; + ++optarg; + } + event_id[nr_counters] = strtol(optarg, NULL, 16); + break; + + case 'f': count_filter = atoi(optarg); break; + case 'g': group = atoi(optarg); break; + case 'h': display_help(); break; + case 'n': nmi = atoi(optarg); break; + case 'p': + /* CPU and PID are mutually exclusive */ + if (profile_cpu != -1) { + printf("WARNING: PID switch overriding CPU\n"); + sleep(1); + profile_cpu = -1; + } + tid = atoi(optarg); break; + case 's': sym_filter = strdup(optarg); break; + case 'x': vmlinux = strdup(optarg); break; + case 'z': zero = 1; break; + default: error = 1; break; + } + } + if (error) + display_help(); + + nr_counters++; + if (nr_counters < 1) + nr_counters = 1; + + for (counter = 0; counter < nr_counters; counter++) { + if (event_count[counter]) + continue; + + if (event_id[counter] < PERF_HW_EVENTS_MAX) + event_count[counter] = default_count[event_id[counter]]; + else + event_count[counter] = 100000; + } +} + +static uint64_t min_ip; +static uint64_t max_ip = -1ll; + +struct sym_entry { + unsigned long long addr; + char *sym; + unsigned long count[MAX_COUNTERS]; + int skip; + GList *source; +}; + +#define MAX_SYMS 100000 + +static int sym_table_count; + +struct sym_entry *sym_filter_entry; + +static struct sym_entry sym_table[MAX_SYMS]; + +static void show_details(struct sym_entry *sym); + +/* + * Ordering weight: count-1 * count-1 * ... / count-n + */ +static double sym_weight(const struct sym_entry *sym) +{ + double weight; + int counter; + + weight = sym->count[0]; + + for (counter = 1; counter < nr_counters-1; counter++) + weight *= sym->count[counter]; + + weight /= (sym->count[counter] + 1); + + return weight; +} + +static int compare(const void *__sym1, const void *__sym2) +{ + const struct sym_entry *sym1 = __sym1, *sym2 = __sym2; + + return sym_weight(sym1) < sym_weight(sym2); +} + +static time_t last_refresh; +static long events; +static long userspace_events; +static const char CONSOLE_CLEAR[] = ""; + +static struct sym_entry tmp[MAX_SYMS]; + +static void print_sym_table(void) +{ + int i, printed; + int counter; + float events_per_sec = events/delay_secs; + float kevents_per_sec = (events-userspace_events)/delay_secs; + + memcpy(tmp, sym_table, sizeof(sym_table[0])*sym_table_count); + qsort(tmp, sym_table_count, sizeof(tmp[0]), compare); + + write(1, CONSOLE_CLEAR, strlen(CONSOLE_CLEAR)); + + printf( +"------------------------------------------------------------------------------\n"); + printf( " KernelTop:%8.0f irqs/sec kernel:%3.1f%% [%s, ", + events_per_sec, + 100.0 - (100.0*((events_per_sec-kevents_per_sec)/events_per_sec)), + nmi ? "NMI" : "IRQ"); + + if (nr_counters == 1) + printf("%d ", event_count[0]); + + for (counter = 0; counter < nr_counters; counter++) { + if (counter) + printf("/"); + + if (event_id[counter] < PERF_HW_EVENTS_MAX) + printf( "%s", event_types[event_id[counter]]); + else + printf( "raw:%04lx", event_id[counter]); + } + + printf( "], "); + + if (tid != -1) + printf(" (tid: %d", tid); + else + printf(" (all"); + + if (profile_cpu != -1) + printf(", cpu: %d)\n", profile_cpu); + else { + if (tid != -1) + printf(")\n"); + else + printf(", %d CPUs)\n", nr_cpus); + } + + printf("------------------------------------------------------------------------------\n\n"); + + if (nr_counters == 1) + printf(" events"); + else + printf(" weight events"); + + printf(" RIP kernel function\n" + " ______ ______ ________________ _______________\n\n" + ); + + printed = 0; + for (i = 0; i < sym_table_count; i++) { + int count; + + if (nr_counters == 1) { + if (printed <= 18 && + tmp[i].count[0] >= count_filter) { + printf("%19.2f - %016llx : %s\n", + sym_weight(tmp + i), tmp[i].addr, tmp[i].sym); + printed++; + } + } else { + if (printed <= 18 && + tmp[i].count[0] >= count_filter) { + printf("%8.1f %10ld - %016llx : %s\n", + sym_weight(tmp + i), + tmp[i].count[0], + tmp[i].addr, tmp[i].sym); + printed++; + } + } + /* + * Add decay to the counts: + */ + for (count = 0; count < nr_counters; count++) + sym_table[i].count[count] = zero ? 0 : sym_table[i].count[count] * 7 / 8; + } + + if (sym_filter_entry) + show_details(sym_filter_entry); + + last_refresh = time(NULL); + + { + struct pollfd stdin_poll = { .fd = 0, .events = POLLIN }; + + if (poll(&stdin_poll, 1, 0) == 1) { + printf("key pressed - exiting.\n"); + exit(0); + } + } +} + +static int read_symbol(FILE *in, struct sym_entry *s) +{ + static int filter_match = 0; + char *sym, stype; + char str[500]; + int rc, pos; + + rc = fscanf(in, "%llx %c %499s", &s->addr, &stype, str); + if (rc == EOF) + return -1; + + assert(rc == 3); + + /* skip until end of line: */ + pos = strlen(str); + do { + rc = fgetc(in); + if (rc == '\n' || rc == EOF || pos >= 499) + break; + str[pos] = rc; + pos++; + } while (1); + str[pos] = 0; + + sym = str; + + /* Filter out known duplicates and non-text symbols. */ + if (!strcmp(sym, "_text")) + return 1; + if (!min_ip && !strcmp(sym, "_stext")) + return 1; + if (!strcmp(sym, "_etext") || !strcmp(sym, "_sinittext")) + return 1; + if (stype != 'T' && stype != 't') + return 1; + if (!strncmp("init_module", sym, 11) || !strncmp("cleanup_module", sym, 14)) + return 1; + if (strstr(sym, "_text_start") || strstr(sym, "_text_end")) + return 1; + + s->sym = malloc(strlen(str)); + assert(s->sym); + + strcpy((char *)s->sym, str); + s->skip = 0; + + /* Tag events to be skipped. */ + if (!strcmp("default_idle", s->sym) || !strcmp("cpu_idle", s->sym)) + s->skip = 1; + if (!strcmp("enter_idle", s->sym) || !strcmp("exit_idle", s->sym)) + s->skip = 1; + + if (filter_match == 1) { + filter_end = s->addr; + filter_match = -1; + if (filter_end - filter_start > 10000) { + printf("hm, too large filter symbol <%s> - skipping.\n", + sym_filter); + printf("symbol filter start: %016lx\n", filter_start); + printf(" end: %016lx\n", filter_end); + filter_end = filter_start = 0; + sym_filter = NULL; + sleep(1); + } + } + if (filter_match == 0 && sym_filter && !strcmp(s->sym, sym_filter)) { + filter_match = 1; + filter_start = s->addr; + } + + return 0; +} + +int compare_addr(const void *__sym1, const void *__sym2) +{ + const struct sym_entry *sym1 = __sym1, *sym2 = __sym2; + + return sym1->addr > sym2->addr; +} + +static void sort_symbol_table(void) +{ + int i, dups; + + do { + qsort(sym_table, sym_table_count, sizeof(sym_table[0]), compare_addr); + for (i = 0, dups = 0; i < sym_table_count; i++) { + if (sym_table[i].addr == sym_table[i+1].addr) { + sym_table[i+1].addr = -1ll; + dups++; + } + } + sym_table_count -= dups; + } while(dups); +} + +static void parse_symbols(void) +{ + struct sym_entry *last; + + FILE *kallsyms = fopen("/proc/kallsyms", "r"); + + if (!kallsyms) { + printf("Could not open /proc/kallsyms - no CONFIG_KALLSYMS_ALL=y?\n"); + exit(-1); + } + + while (!feof(kallsyms)) { + if (read_symbol(kallsyms, &sym_table[sym_table_count]) == 0) { + sym_table_count++; + assert(sym_table_count <= MAX_SYMS); + } + } + + sort_symbol_table(); + min_ip = sym_table[0].addr; + max_ip = sym_table[sym_table_count-1].addr; + last = sym_table + sym_table_count++; + + last->addr = -1ll; + last->sym = ""; + + if (filter_end) { + int count; + for (count=0; count < sym_table_count; count ++) { + if (!strcmp(sym_table[count].sym, sym_filter)) { + sym_filter_entry = &sym_table[count]; + break; + } + } + } + if (dump_symtab) { + int i; + + for (i = 0; i < sym_table_count; i++) + fprintf(stderr, "%llx %s\n", + sym_table[i].addr, sym_table[i].sym); + } +} + + +static void parse_vmlinux(char *filename) +{ + FILE *file; + char command[PATH_MAX*2]; + if (!filename) + return; + + sprintf(command, "objdump --start-address=0x%016lx --stop-address=0x%016lx -dS %s", filter_start, filter_end, filename); + + file = popen(command, "r"); + if (!file) + return; + + while (!feof(file)) { + struct source_line *src; + size_t dummy = 0; + char *c; + + src = malloc(sizeof(struct source_line)); + assert(src != NULL); + memset(src, 0, sizeof(struct source_line)); + + if (getline(&src->line, &dummy, file) < 0) + break; + if (!src->line) + break; + + c = strchr(src->line, '\n'); + if (c) + *c = 0; + + lines = g_list_prepend(lines, src); + + if (strlen(src->line)>8 && src->line[8] == ':') + src->EIP = strtoull(src->line, NULL, 16); + if (strlen(src->line)>8 && src->line[16] == ':') + src->EIP = strtoull(src->line, NULL, 16); + } + pclose(file); + lines = g_list_reverse(lines); +} + +static void record_precise_ip(uint64_t ip) +{ + struct source_line *line; + GList *item; + + item = g_list_first(lines); + while (item) { + line = item->data; + if (line->EIP == ip) + line->count++; + if (line->EIP > ip) + break; + item = g_list_next(item); + } +} + +static void lookup_sym_in_vmlinux(struct sym_entry *sym) +{ + struct source_line *line; + GList *item; + char pattern[PATH_MAX]; + sprintf(pattern, "<%s>:", sym->sym); + + item = g_list_first(lines); + while (item) { + line = item->data; + if (strstr(line->line, pattern)) { + sym->source = item; + break; + } + item = g_list_next(item); + } +} + +void show_lines(GList *item_queue, int item_queue_count) +{ + int i; + struct source_line *line; + + for (i = 0; i < item_queue_count; i++) { + line = item_queue->data; + printf("%8li\t%s\n", line->count, line->line); + item_queue = g_list_next(item_queue); + } +} + +#define TRACE_COUNT 3 + +static void show_details(struct sym_entry *sym) +{ + struct source_line *line; + GList *item; + int displayed = 0; + GList *item_queue = NULL; + int item_queue_count = 0; + + if (!sym->source) + lookup_sym_in_vmlinux(sym); + if (!sym->source) + return; + + printf("Showing details for %s\n", sym->sym); + + item = sym->source; + while (item) { + line = item->data; + if (displayed && strstr(line->line, ">:")) + break; + + if (!item_queue_count) + item_queue = item; + item_queue_count ++; + + if (line->count >= count_filter) { + show_lines(item_queue, item_queue_count); + item_queue_count = 0; + item_queue = NULL; + } else if (item_queue_count > TRACE_COUNT) { + item_queue = g_list_next(item_queue); + item_queue_count --; + } + + line->count = 0; + displayed++; + if (displayed > 300) + break; + item = g_list_next(item); + } +} + +/* + * Binary search in the histogram table and record the hit: + */ +static void record_ip(uint64_t ip, int counter) +{ + int left_idx, middle_idx, right_idx, idx; + unsigned long left, middle, right; + + record_precise_ip(ip); + + left_idx = 0; + right_idx = sym_table_count-1; + assert(ip <= max_ip && ip >= min_ip); + + while (left_idx + 1 < right_idx) { + middle_idx = (left_idx + right_idx) / 2; + + left = sym_table[ left_idx].addr; + middle = sym_table[middle_idx].addr; + right = sym_table[ right_idx].addr; + + if (!(left <= middle && middle <= right)) { + printf("%016lx...\n%016lx...\n%016lx\n", left, middle, right); + printf("%d %d %d\n", left_idx, middle_idx, right_idx); + } + assert(left <= middle && middle <= right); + if (!(left <= ip && ip <= right)) { + printf(" left: %016lx\n", left); + printf(" ip: %016lx\n", ip); + printf("right: %016lx\n", right); + } + assert(left <= ip && ip <= right); + /* + * [ left .... target .... middle .... right ] + * => right := middle + */ + if (ip < middle) { + right_idx = middle_idx; + continue; + } + /* + * [ left .... middle ... target ... right ] + * => left := middle + */ + left_idx = middle_idx; + } + + idx = left_idx; + + if (!sym_table[idx].skip) + sym_table[idx].count[counter]++; + else events--; +} + +static void process_event(uint64_t ip, int counter) +{ + events++; + + if (ip < min_ip || ip > max_ip) { + userspace_events++; + return; + } + + record_ip(ip, counter); +} + +int main(int argc, char *argv[]) +{ + struct pollfd event_array[MAX_NR_CPUS][MAX_COUNTERS]; + struct perf_counter_hw_event hw_event; + int fd[MAX_NR_CPUS][MAX_COUNTERS]; + int i, counter, group_fd; + unsigned int cpu; + uint64_t ip; + ssize_t res; + int ret; + + process_options(argc, argv); + + nr_cpus = sysconf(_SC_NPROCESSORS_ONLN); + if (tid != -1 || profile_cpu != -1) + nr_cpus = 1; + + assert(nr_cpus <= MAX_NR_CPUS); + + for (i = 0; i < nr_cpus; i++) { + group_fd = -1; + for (counter = 0; counter < nr_counters; counter++) { + + cpu = profile_cpu; + if (tid == -1 && profile_cpu == -1) + cpu = i; + + memset(&hw_event, 0, sizeof(hw_event)); + hw_event.type = event_id[counter]; + hw_event.raw = event_raw[counter]; + hw_event.irq_period = event_count[counter]; + hw_event.record_type = PERF_RECORD_IRQ; + hw_event.nmi = nmi; + + fd[i][counter] = sys_perf_counter_open(&hw_event, tid, cpu, group_fd, 0); + fcntl(fd[i][counter], F_SETFL, O_NONBLOCK); + if (fd[i][counter] < 0) { + printf("kerneltop error: syscall returned with %d (%s)\n", + fd[i][counter], strerror(-fd[i][counter])); + if (fd[i][counter] == -1) + printf("Are you root?\n"); + exit(-1); + } + assert(fd[i][counter] >= 0); + + /* + * First counter acts as the group leader: + */ + if (group && group_fd == -1) + group_fd = fd[i][counter]; + + event_array[i][counter].fd = fd[i][counter]; + event_array[i][counter].events = POLLIN; + } + } + + parse_symbols(); + if (vmlinux && sym_filter_entry) + parse_vmlinux(vmlinux); + + printf("KernelTop refresh period: %d seconds\n", delay_secs); + last_refresh = time(NULL); + + while (1) { + int hits = events; + + for (i = 0; i < nr_cpus; i++) { + for (counter = 0; counter < nr_counters; counter++) { + res = read(fd[i][counter], (char *) &ip, sizeof(ip)); + if (res > 0) { + assert(res == sizeof(ip)); + + process_event(ip, counter); + } + } + } + + if (time(NULL) >= last_refresh + delay_secs) { + print_sym_table(); + events = userspace_events = 0; + } + + if (hits == events) + ret = poll(event_array[0], nr_cpus, 1000); + hits = events; + } + + return 0; +} diff --git a/Documentation/perf_counter/perfstat.c b/Documentation/perf_counter/perfstat.c new file mode 100644 index 00000000000..9a5808fbcf9 --- /dev/null +++ b/Documentation/perf_counter/perfstat.c @@ -0,0 +1,521 @@ +/* + * perfstat: /usr/bin/time -alike performance counter statistics utility + * + * It summarizes the counter events of all tasks (and child tasks), + * covering all CPUs that the command (or workload) executes on. + * It only counts the per-task events of the workload started, + * independent of how many other tasks run on those CPUs. + * + * Build with: cc -O2 -g -lrt -Wall -W -o perfstat perfstat.c + * + * Sample output: + * + + $ ./perfstat -e 1 -e 3 -e 5 ls -lR /usr/include/ >/dev/null + + Performance counter stats for 'ls': + + 163516953 instructions + 2295 cache-misses + 2855182 branch-misses + + * + * Copyright (C) 2008, Red Hat Inc, Ingo Molnar + * + * Released under the GPLv2 (not later). + * + * Percpu counter support by: Yanmin Zhang + * Symbolic event options by: Wu Fengguang + */ +#define _GNU_SOURCE + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#ifdef __x86_64__ +# define __NR_perf_counter_open 295 +#endif + +#ifdef __i386__ +# define __NR_perf_counter_open 333 +#endif + +#ifdef __powerpc__ +#define __NR_perf_counter_open 319 +#endif + +/* + * Pick up some kernel type conventions: + */ +#define __user +#define asmlinkage + +typedef unsigned int __u32; +typedef unsigned long long __u64; +typedef long long __s64; + +#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0])) + +/* + * User-space ABI bits: + */ + +/* + * Generalized performance counter event types, used by the hw_event.type + * parameter of the sys_perf_counter_open() syscall: + */ +enum hw_event_types { + /* + * Common hardware events, generalized by the kernel: + */ + PERF_COUNT_CPU_CYCLES = 0, + PERF_COUNT_INSTRUCTIONS = 1, + PERF_COUNT_CACHE_REFERENCES = 2, + PERF_COUNT_CACHE_MISSES = 3, + PERF_COUNT_BRANCH_INSTRUCTIONS = 4, + PERF_COUNT_BRANCH_MISSES = 5, + PERF_COUNT_BUS_CYCLES = 6, + + PERF_HW_EVENTS_MAX = 7, + + /* + * Special "software" counters provided by the kernel, even if + * the hardware does not support performance counters. These + * counters measure various physical and sw events of the + * kernel (and allow the profiling of them as well): + */ + PERF_COUNT_CPU_CLOCK = -1, + PERF_COUNT_TASK_CLOCK = -2, + PERF_COUNT_PAGE_FAULTS = -3, + PERF_COUNT_CONTEXT_SWITCHES = -4, + PERF_COUNT_CPU_MIGRATIONS = -5, + + PERF_SW_EVENTS_MIN = -6, +}; + +/* + * IRQ-notification data record type: + */ +enum perf_counter_record_type { + PERF_RECORD_SIMPLE = 0, + PERF_RECORD_IRQ = 1, + PERF_RECORD_GROUP = 2, +}; + +/* + * Hardware event to monitor via a performance monitoring counter: + */ +struct perf_counter_hw_event { + __s64 type; + + __u64 irq_period; + __u64 record_type; + __u64 read_format; + + __u64 disabled : 1, /* off by default */ + nmi : 1, /* NMI sampling */ + raw : 1, /* raw event type */ + inherit : 1, /* children inherit it */ + pinned : 1, /* must always be on PMU */ + exclusive : 1, /* only group on PMU */ + exclude_user : 1, /* don't count user */ + exclude_kernel : 1, /* ditto kernel */ + exclude_hv : 1, /* ditto hypervisor */ + exclude_idle : 1, /* don't count when idle */ + + __reserved_1 : 54; + + __u32 extra_config_len; + __u32 __reserved_4; + + __u64 __reserved_2; + __u64 __reserved_3; +}; + +/* + * Ioctls that can be done on a perf counter fd: + */ +#define PERF_COUNTER_IOC_ENABLE _IO('$', 0) +#define PERF_COUNTER_IOC_DISABLE _IO('$', 1) + +asmlinkage int sys_perf_counter_open( + + struct perf_counter_hw_event *hw_event_uptr __user, + pid_t pid, + int cpu, + int group_fd, + unsigned long flags) +{ + int ret; + + ret = syscall( + __NR_perf_counter_open, hw_event_uptr, pid, cpu, group_fd, flags); +#if defined(__x86_64__) || defined(__i386__) + if (ret < 0 && ret > -4096) { + errno = -ret; + ret = -1; + } +#endif + return ret; +} + + +static char *hw_event_names [] = { + "CPU cycles", + "instructions", + "cache references", + "cache misses", + "branches", + "branch misses", + "bus cycles", +}; + +static char *sw_event_names [] = { + "cpu clock ticks", + "task clock ticks", + "pagefaults", + "context switches", + "CPU migrations", +}; + +struct event_symbol { + int event; + char *symbol; +}; + +static struct event_symbol event_symbols [] = { + {PERF_COUNT_CPU_CYCLES, "cpu-cycles", }, + {PERF_COUNT_CPU_CYCLES, "cycles", }, + {PERF_COUNT_INSTRUCTIONS, "instructions", }, + {PERF_COUNT_CACHE_REFERENCES, "cache-references", }, + {PERF_COUNT_CACHE_MISSES, "cache-misses", }, + {PERF_COUNT_BRANCH_INSTRUCTIONS, "branch-instructions", }, + {PERF_COUNT_BRANCH_INSTRUCTIONS, "branches", }, + {PERF_COUNT_BRANCH_MISSES, "branch-misses", }, + {PERF_COUNT_BUS_CYCLES, "bus-cycles", }, + {PERF_COUNT_CPU_CLOCK, "cpu-ticks", }, + {PERF_COUNT_CPU_CLOCK, "ticks", }, + {PERF_COUNT_TASK_CLOCK, "task-ticks", }, + {PERF_COUNT_PAGE_FAULTS, "page-faults", }, + {PERF_COUNT_PAGE_FAULTS, "faults", }, + {PERF_COUNT_CONTEXT_SWITCHES, "context-switches", }, + {PERF_COUNT_CONTEXT_SWITCHES, "cs", }, + {PERF_COUNT_CPU_MIGRATIONS, "cpu-migrations", }, + {PERF_COUNT_CPU_MIGRATIONS, "migrations", }, +}; + +#define MAX_COUNTERS 64 +#define MAX_NR_CPUS 256 + +static int nr_counters = 0; +static int nr_cpus = 0; + +static int event_id[MAX_COUNTERS] = + { -2, -5, -4, -3, 0, 1, 2, 3}; + +static int event_raw[MAX_COUNTERS]; + +static int system_wide = 0; + +static void display_help(void) +{ + unsigned int i; + int e; + + printf( + "Usage: perfstat [] \n\n" + "PerfStat Options (up to %d event types can be specified):\n\n", + MAX_COUNTERS); + printf( + " -e EVENT --event=EVENT # symbolic-name abbreviations"); + + for (i = 0, e = PERF_HW_EVENTS_MAX; i < ARRAY_SIZE(event_symbols); i++) { + if (e != event_symbols[i].event) { + e = event_symbols[i].event; + printf( + "\n %2d: %-20s", e, event_symbols[i].symbol); + } else + printf(" %s", event_symbols[i].symbol); + } + + printf("\n" + " rNNN: raw event type\n\n" + " -s # system-wide collection\n\n" + " -c --command= # command+arguments to be timed.\n" + "\n"); + exit(0); +} + +static int type_valid(int type) +{ + if (type >= PERF_HW_EVENTS_MAX) + return 0; + if (type <= PERF_SW_EVENTS_MIN) + return 0; + + return 1; +} + +static char *event_name(int ctr) +{ + int type = event_id[ctr]; + static char buf[32]; + + if (event_raw[ctr]) { + sprintf(buf, "raw 0x%x", type); + return buf; + } + if (!type_valid(type)) + return "unknown"; + + if (type >= 0) + return hw_event_names[type]; + + return sw_event_names[-type-1]; +} + +/* + * Each event can have multiple symbolic names. + * Symbolic names are (almost) exactly matched. + */ +static int match_event_symbols(char *str) +{ + unsigned int i; + + if (isdigit(str[0]) || str[0] == '-') + return atoi(str); + + for (i = 0; i < ARRAY_SIZE(event_symbols); i++) { + if (!strncmp(str, event_symbols[i].symbol, + strlen(event_symbols[i].symbol))) + return event_symbols[i].event; + } + + return PERF_HW_EVENTS_MAX; +} + +static void parse_events(char *str) +{ + int type, raw; + +again: + nr_counters++; + if (nr_counters == MAX_COUNTERS) + display_help(); + + raw = 0; + if (*str == 'r') { + raw = 1; + ++str; + type = strtol(str, NULL, 16); + } else { + type = match_event_symbols(str); + if (!type_valid(type)) + display_help(); + } + + event_id[nr_counters] = type; + event_raw[nr_counters] = raw; + + str = strstr(str, ","); + if (str) { + str++; + goto again; + } +} + +static void process_options(int argc, char *argv[]) +{ + for (;;) { + int option_index = 0; + /** Options for getopt */ + static struct option long_options[] = { + {"event", required_argument, NULL, 'e'}, + {"help", no_argument, NULL, 'h'}, + {"command", no_argument, NULL, 'c'}, + {NULL, 0, NULL, 0 } + }; + int c = getopt_long(argc, argv, "+:e:c:s", + long_options, &option_index); + if (c == -1) + break; + + switch (c) { + case 'c': + break; + case 's': + system_wide = 1; + break; + case 'e': + parse_events(optarg); + break; + default: + break; + } + } + if (optind == argc) + goto err; + + if (!nr_counters) + nr_counters = 8; + else + nr_counters++; + return; + +err: + display_help(); +} + +char fault_here[1000000]; + +#define PR_TASK_PERF_COUNTERS_DISABLE 31 +#define PR_TASK_PERF_COUNTERS_ENABLE 32 + +static int fd[MAX_NR_CPUS][MAX_COUNTERS]; + +static void create_counter(int counter) +{ + struct perf_counter_hw_event hw_event; + + memset(&hw_event, 0, sizeof(hw_event)); + hw_event.type = event_id[counter]; + hw_event.raw = event_raw[counter]; + hw_event.record_type = PERF_RECORD_SIMPLE; + hw_event.nmi = 0; + + if (system_wide) { + int cpu; + for (cpu = 0; cpu < nr_cpus; cpu ++) { + fd[cpu][counter] = sys_perf_counter_open(&hw_event, -1, cpu, -1, 0); + if (fd[cpu][counter] < 0) { + printf("perfstat error: syscall returned with %d (%s)\n", + fd[cpu][counter], strerror(errno)); + exit(-1); + } + + } + } else { + hw_event.inherit = 1; + hw_event.disabled = 1; + + fd[0][counter] = sys_perf_counter_open(&hw_event, 0, -1, -1, 0); + if (fd[0][counter] < 0) { + printf("perfstat error: syscall returned with %d (%s)\n", + fd[0][counter], strerror(errno)); + exit(-1); + } + } +} + + +#define rdclock() \ +({ \ + struct timespec ts; \ + \ + clock_gettime(CLOCK_MONOTONIC, &ts); \ + ts.tv_sec * 1000000000ULL + ts.tv_nsec; \ +}) + +int main(int argc, char *argv[]) +{ + unsigned long long t0, t1; + int counter; + ssize_t res; + int status; + int pid; + + process_options(argc, argv); + + if (system_wide) { + nr_cpus = sysconf(_SC_NPROCESSORS_ONLN); + assert(nr_cpus <= MAX_NR_CPUS); + assert(nr_cpus >= 0); + } else + nr_cpus = 1; + + for (counter = 0; counter < nr_counters; counter++) + create_counter(counter); + + argc -= optind; + argv += optind; + + /* + * Enable counters and exec the command: + */ + t0 = rdclock(); + prctl(PR_TASK_PERF_COUNTERS_ENABLE); + + if ((pid = fork()) < 0) + perror("failed to fork"); + if (!pid) { + if (execvp(argv[0], argv)) { + perror(argv[0]); + exit(-1); + } + } + while (wait(&status) >= 0) + ; + prctl(PR_TASK_PERF_COUNTERS_DISABLE); + t1 = rdclock(); + + fflush(stdout); + + fprintf(stderr, "\n"); + fprintf(stderr, " Performance counter stats for \'%s\':\n", + argv[0]); + fprintf(stderr, "\n"); + + for (counter = 0; counter < nr_counters; counter++) { + int cpu; + __u64 count, single_count; + + count = 0; + for (cpu = 0; cpu < nr_cpus; cpu ++) { + res = read(fd[cpu][counter], + (char *) &single_count, sizeof(single_count)); + assert(res == sizeof(single_count)); + count += single_count; + } + + if (!event_raw[counter] && + (event_id[counter] == PERF_COUNT_CPU_CLOCK || + event_id[counter] == PERF_COUNT_TASK_CLOCK)) { + + double msecs = (double)count / 1000000; + + fprintf(stderr, " %14.6f %-20s (msecs)\n", + msecs, event_name(counter)); + } else { + fprintf(stderr, " %14Ld %-20s (events)\n", + count, event_name(counter)); + } + if (!counter) + fprintf(stderr, "\n"); + } + fprintf(stderr, "\n"); + fprintf(stderr, " Wall-clock time elapsed: %12.6f msecs\n", + (double)(t1-t0)/1e6); + fprintf(stderr, "\n"); + + return 0; +} -- cgit v1.2.3 From cea92ce5b07078cd62c4712d51390b09a43dba2e Mon Sep 17 00:00:00 2001 From: Wu Fengguang Date: Fri, 20 Mar 2009 10:08:02 +0800 Subject: perf_counter tools: Merge common code into perfcounters.h kerneltop's MAX_COUNTERS is increased from 8 to 64(the value used by perfstat). Signed-off-by: Wu Fengguang Acked-by: Peter Zijlstra Cc: Paul Mackerras Signed-off-by: Ingo Molnar --- Documentation/perf_counter/kerneltop.c | 132 +--------------------------- Documentation/perf_counter/perfcounters.h | 137 ++++++++++++++++++++++++++++++ Documentation/perf_counter/perfstat.c | 134 +---------------------------- 3 files changed, 139 insertions(+), 264 deletions(-) create mode 100644 Documentation/perf_counter/perfcounters.h diff --git a/Documentation/perf_counter/kerneltop.c b/Documentation/perf_counter/kerneltop.c index cf0e30bab5d..fe70a2c92a8 100644 --- a/Documentation/perf_counter/kerneltop.c +++ b/Documentation/perf_counter/kerneltop.c @@ -65,126 +65,7 @@ #include -#ifdef __x86_64__ -# define __NR_perf_counter_open 295 -#endif - -#ifdef __i386__ -# define __NR_perf_counter_open 333 -#endif - -/* - * Pick up some kernel type conventions: - */ -#define __user -#define asmlinkage - -typedef unsigned int __u32; -typedef unsigned long long __u64; -typedef long long __s64; - -/* - * User-space ABI bits: - */ - -/* - * Generalized performance counter event types, used by the hw_event.type - * parameter of the sys_perf_counter_open() syscall: - */ -enum hw_event_types { - /* - * Common hardware events, generalized by the kernel: - */ - PERF_COUNT_CPU_CYCLES = 0, - PERF_COUNT_INSTRUCTIONS = 1, - PERF_COUNT_CACHE_REFERENCES = 2, - PERF_COUNT_CACHE_MISSES = 3, - PERF_COUNT_BRANCH_INSTRUCTIONS = 4, - PERF_COUNT_BRANCH_MISSES = 5, - PERF_COUNT_BUS_CYCLES = 6, - - PERF_HW_EVENTS_MAX = 7, - - /* - * Special "software" counters provided by the kernel, even if - * the hardware does not support performance counters. These - * counters measure various physical and sw events of the - * kernel (and allow the profiling of them as well): - */ - PERF_COUNT_CPU_CLOCK = -1, - PERF_COUNT_TASK_CLOCK = -2, - PERF_COUNT_PAGE_FAULTS = -3, - PERF_COUNT_CONTEXT_SWITCHES = -4, - PERF_COUNT_CPU_MIGRATIONS = -5, - - PERF_SW_EVENTS_MIN = -6, -}; - -/* - * IRQ-notification data record type: - */ -enum perf_counter_record_type { - PERF_RECORD_SIMPLE = 0, - PERF_RECORD_IRQ = 1, - PERF_RECORD_GROUP = 2, -}; - -/* - * Hardware event to monitor via a performance monitoring counter: - */ -struct perf_counter_hw_event { - __s64 type; - - __u64 irq_period; - __u64 record_type; - __u64 read_format; - - __u64 disabled : 1, /* off by default */ - nmi : 1, /* NMI sampling */ - raw : 1, /* raw event type */ - inherit : 1, /* children inherit it */ - pinned : 1, /* must always be on PMU */ - exclusive : 1, /* only group on PMU */ - exclude_user : 1, /* don't count user */ - exclude_kernel : 1, /* ditto kernel */ - exclude_hv : 1, /* ditto hypervisor */ - exclude_idle : 1, /* don't count when idle */ - - __reserved_1 : 54; - - __u32 extra_config_len; - __u32 __reserved_4; - - __u64 __reserved_2; - __u64 __reserved_3; -}; - -/* - * Ioctls that can be done on a perf counter fd: - */ -#define PERF_COUNTER_IOC_ENABLE _IO('$', 0) -#define PERF_COUNTER_IOC_DISABLE _IO('$', 1) - -asmlinkage int sys_perf_counter_open( - - struct perf_counter_hw_event *hw_event_uptr __user, - pid_t pid, - int cpu, - int group_fd, - unsigned long flags) -{ - int ret; - - ret = syscall( - __NR_perf_counter_open, hw_event_uptr, pid, cpu, group_fd, flags); -#if defined(__x86_64__) || defined(__i386__) - if (ret < 0 && ret > -4096) { - errno = -ret; - ret = -1; - } -#endif - return ret; -} +#include "perfcounters.h" const char *event_types [] = { "CPU cycles", @@ -205,21 +86,10 @@ const unsigned int default_count[] = { 10000, }; -/* - * prctl(PR_TASK_PERF_COUNTERS_DISABLE) will (cheaply) disable all - * counters in the current task. - */ -#define PR_TASK_PERF_COUNTERS_DISABLE 31 -#define PR_TASK_PERF_COUNTERS_ENABLE 32 - -#define MAX_COUNTERS 8 - static int nr_counters = -1; static __u64 count_filter = 100; -#define MAX_NR_CPUS 256 - static int event_count[MAX_COUNTERS]; static unsigned long event_id[MAX_COUNTERS]; static int event_raw[MAX_COUNTERS]; diff --git a/Documentation/perf_counter/perfcounters.h b/Documentation/perf_counter/perfcounters.h new file mode 100644 index 00000000000..8c1559b25f1 --- /dev/null +++ b/Documentation/perf_counter/perfcounters.h @@ -0,0 +1,137 @@ +/* + * Ioctls that can be done on a perf counter fd: + */ +#define PERF_COUNTER_IOC_ENABLE _IO('$', 0) +#define PERF_COUNTER_IOC_DISABLE _IO('$', 1) + +/* + * prctl(PR_TASK_PERF_COUNTERS_DISABLE) will (cheaply) disable all + * counters in the current task. + */ +#define PR_TASK_PERF_COUNTERS_DISABLE 31 +#define PR_TASK_PERF_COUNTERS_ENABLE 32 + +#define MAX_COUNTERS 64 +#define MAX_NR_CPUS 256 + +#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0])) + +/* + * Pick up some kernel type conventions: + */ +#define __user +#define asmlinkage + +typedef unsigned int __u32; +typedef unsigned long long __u64; +typedef long long __s64; + +/* + * User-space ABI bits: + */ + +/* + * Generalized performance counter event types, used by the hw_event.type + * parameter of the sys_perf_counter_open() syscall: + */ +enum hw_event_types { + /* + * Common hardware events, generalized by the kernel: + */ + PERF_COUNT_CPU_CYCLES = 0, + PERF_COUNT_INSTRUCTIONS = 1, + PERF_COUNT_CACHE_REFERENCES = 2, + PERF_COUNT_CACHE_MISSES = 3, + PERF_COUNT_BRANCH_INSTRUCTIONS = 4, + PERF_COUNT_BRANCH_MISSES = 5, + PERF_COUNT_BUS_CYCLES = 6, + + PERF_HW_EVENTS_MAX = 7, + + /* + * Special "software" counters provided by the kernel, even if + * the hardware does not support performance counters. These + * counters measure various physical and sw events of the + * kernel (and allow the profiling of them as well): + */ + PERF_COUNT_CPU_CLOCK = -1, + PERF_COUNT_TASK_CLOCK = -2, + PERF_COUNT_PAGE_FAULTS = -3, + PERF_COUNT_CONTEXT_SWITCHES = -4, + PERF_COUNT_CPU_MIGRATIONS = -5, + + PERF_SW_EVENTS_MIN = -6, +}; + +/* + * IRQ-notification data record type: + */ +enum perf_counter_record_type { + PERF_RECORD_SIMPLE = 0, + PERF_RECORD_IRQ = 1, + PERF_RECORD_GROUP = 2, +}; + +/* + * Hardware event to monitor via a performance monitoring counter: + */ +struct perf_counter_hw_event { + __s64 type; + + __u64 irq_period; + __u64 record_type; + __u64 read_format; + + __u64 disabled : 1, /* off by default */ + nmi : 1, /* NMI sampling */ + raw : 1, /* raw event type */ + inherit : 1, /* children inherit it */ + pinned : 1, /* must always be on PMU */ + exclusive : 1, /* only group on PMU */ + exclude_user : 1, /* don't count user */ + exclude_kernel : 1, /* ditto kernel */ + exclude_hv : 1, /* ditto hypervisor */ + exclude_idle : 1, /* don't count when idle */ + + __reserved_1 : 54; + + __u32 extra_config_len; + __u32 __reserved_4; + + __u64 __reserved_2; + __u64 __reserved_3; +}; + +#ifdef __x86_64__ +# define __NR_perf_counter_open 295 +#endif + +#ifdef __i386__ +# define __NR_perf_counter_open 333 +#endif + +#ifdef __powerpc__ +#define __NR_perf_counter_open 319 +#endif + +asmlinkage int sys_perf_counter_open( + + struct perf_counter_hw_event *hw_event_uptr __user, + pid_t pid, + int cpu, + int group_fd, + unsigned long flags) +{ + int ret; + + ret = syscall( + __NR_perf_counter_open, hw_event_uptr, pid, cpu, group_fd, flags); +#if defined(__x86_64__) || defined(__i386__) + if (ret < 0 && ret > -4096) { + errno = -ret; + ret = -1; + } +#endif + return ret; +} + diff --git a/Documentation/perf_counter/perfstat.c b/Documentation/perf_counter/perfstat.c index 9a5808fbcf9..a3d4a7a602f 100644 --- a/Documentation/perf_counter/perfstat.c +++ b/Documentation/perf_counter/perfstat.c @@ -52,133 +52,7 @@ #include -#ifdef __x86_64__ -# define __NR_perf_counter_open 295 -#endif - -#ifdef __i386__ -# define __NR_perf_counter_open 333 -#endif - -#ifdef __powerpc__ -#define __NR_perf_counter_open 319 -#endif - -/* - * Pick up some kernel type conventions: - */ -#define __user -#define asmlinkage - -typedef unsigned int __u32; -typedef unsigned long long __u64; -typedef long long __s64; - -#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0])) - -/* - * User-space ABI bits: - */ - -/* - * Generalized performance counter event types, used by the hw_event.type - * parameter of the sys_perf_counter_open() syscall: - */ -enum hw_event_types { - /* - * Common hardware events, generalized by the kernel: - */ - PERF_COUNT_CPU_CYCLES = 0, - PERF_COUNT_INSTRUCTIONS = 1, - PERF_COUNT_CACHE_REFERENCES = 2, - PERF_COUNT_CACHE_MISSES = 3, - PERF_COUNT_BRANCH_INSTRUCTIONS = 4, - PERF_COUNT_BRANCH_MISSES = 5, - PERF_COUNT_BUS_CYCLES = 6, - - PERF_HW_EVENTS_MAX = 7, - - /* - * Special "software" counters provided by the kernel, even if - * the hardware does not support performance counters. These - * counters measure various physical and sw events of the - * kernel (and allow the profiling of them as well): - */ - PERF_COUNT_CPU_CLOCK = -1, - PERF_COUNT_TASK_CLOCK = -2, - PERF_COUNT_PAGE_FAULTS = -3, - PERF_COUNT_CONTEXT_SWITCHES = -4, - PERF_COUNT_CPU_MIGRATIONS = -5, - - PERF_SW_EVENTS_MIN = -6, -}; - -/* - * IRQ-notification data record type: - */ -enum perf_counter_record_type { - PERF_RECORD_SIMPLE = 0, - PERF_RECORD_IRQ = 1, - PERF_RECORD_GROUP = 2, -}; - -/* - * Hardware event to monitor via a performance monitoring counter: - */ -struct perf_counter_hw_event { - __s64 type; - - __u64 irq_period; - __u64 record_type; - __u64 read_format; - - __u64 disabled : 1, /* off by default */ - nmi : 1, /* NMI sampling */ - raw : 1, /* raw event type */ - inherit : 1, /* children inherit it */ - pinned : 1, /* must always be on PMU */ - exclusive : 1, /* only group on PMU */ - exclude_user : 1, /* don't count user */ - exclude_kernel : 1, /* ditto kernel */ - exclude_hv : 1, /* ditto hypervisor */ - exclude_idle : 1, /* don't count when idle */ - - __reserved_1 : 54; - - __u32 extra_config_len; - __u32 __reserved_4; - - __u64 __reserved_2; - __u64 __reserved_3; -}; - -/* - * Ioctls that can be done on a perf counter fd: - */ -#define PERF_COUNTER_IOC_ENABLE _IO('$', 0) -#define PERF_COUNTER_IOC_DISABLE _IO('$', 1) - -asmlinkage int sys_perf_counter_open( - - struct perf_counter_hw_event *hw_event_uptr __user, - pid_t pid, - int cpu, - int group_fd, - unsigned long flags) -{ - int ret; - - ret = syscall( - __NR_perf_counter_open, hw_event_uptr, pid, cpu, group_fd, flags); -#if defined(__x86_64__) || defined(__i386__) - if (ret < 0 && ret > -4096) { - errno = -ret; - ret = -1; - } -#endif - return ret; -} - +#include "perfcounters.h" static char *hw_event_names [] = { "CPU cycles", @@ -224,9 +98,6 @@ static struct event_symbol event_symbols [] = { {PERF_COUNT_CPU_MIGRATIONS, "migrations", }, }; -#define MAX_COUNTERS 64 -#define MAX_NR_CPUS 256 - static int nr_counters = 0; static int nr_cpus = 0; @@ -388,9 +259,6 @@ err: char fault_here[1000000]; -#define PR_TASK_PERF_COUNTERS_DISABLE 31 -#define PR_TASK_PERF_COUNTERS_ENABLE 32 - static int fd[MAX_NR_CPUS][MAX_COUNTERS]; static void create_counter(int counter) -- cgit v1.2.3 From f49012fad4ed2231c7380c0b1901122242b3eab0 Mon Sep 17 00:00:00 2001 From: Wu Fengguang Date: Fri, 20 Mar 2009 10:08:03 +0800 Subject: perf_counter tools: Move perfstat supporting code into perfcounters.h Signed-off-by: Wu Fengguang Acked-by: Peter Zijlstra Cc: Paul Mackerras Signed-off-by: Ingo Molnar --- Documentation/perf_counter/perfcounters.h | 130 ++++++++++++++++++++++++++++++ Documentation/perf_counter/perfstat.c | 130 ------------------------------ 2 files changed, 130 insertions(+), 130 deletions(-) diff --git a/Documentation/perf_counter/perfcounters.h b/Documentation/perf_counter/perfcounters.h index 8c1559b25f1..0f3764aa52a 100644 --- a/Documentation/perf_counter/perfcounters.h +++ b/Documentation/perf_counter/perfcounters.h @@ -16,6 +16,14 @@ #define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0])) +#define rdclock() \ +({ \ + struct timespec ts; \ + \ + clock_gettime(CLOCK_MONOTONIC, &ts); \ + ts.tv_sec * 1000000000ULL + ts.tv_nsec; \ +}) + /* * Pick up some kernel type conventions: */ @@ -135,3 +143,125 @@ asmlinkage int sys_perf_counter_open( return ret; } +static char *hw_event_names [] = { + "CPU cycles", + "instructions", + "cache references", + "cache misses", + "branches", + "branch misses", + "bus cycles", +}; + +static char *sw_event_names [] = { + "cpu clock ticks", + "task clock ticks", + "pagefaults", + "context switches", + "CPU migrations", +}; + +struct event_symbol { + int event; + char *symbol; +}; + +static struct event_symbol event_symbols [] = { + {PERF_COUNT_CPU_CYCLES, "cpu-cycles", }, + {PERF_COUNT_CPU_CYCLES, "cycles", }, + {PERF_COUNT_INSTRUCTIONS, "instructions", }, + {PERF_COUNT_CACHE_REFERENCES, "cache-references", }, + {PERF_COUNT_CACHE_MISSES, "cache-misses", }, + {PERF_COUNT_BRANCH_INSTRUCTIONS, "branch-instructions", }, + {PERF_COUNT_BRANCH_INSTRUCTIONS, "branches", }, + {PERF_COUNT_BRANCH_MISSES, "branch-misses", }, + {PERF_COUNT_BUS_CYCLES, "bus-cycles", }, + {PERF_COUNT_CPU_CLOCK, "cpu-ticks", }, + {PERF_COUNT_CPU_CLOCK, "ticks", }, + {PERF_COUNT_TASK_CLOCK, "task-ticks", }, + {PERF_COUNT_PAGE_FAULTS, "page-faults", }, + {PERF_COUNT_PAGE_FAULTS, "faults", }, + {PERF_COUNT_CONTEXT_SWITCHES, "context-switches", }, + {PERF_COUNT_CONTEXT_SWITCHES, "cs", }, + {PERF_COUNT_CPU_MIGRATIONS, "cpu-migrations", }, + {PERF_COUNT_CPU_MIGRATIONS, "migrations", }, +}; + +static int type_valid(int type) +{ + if (type >= PERF_HW_EVENTS_MAX) + return 0; + if (type <= PERF_SW_EVENTS_MIN) + return 0; + + return 1; +} + +static char *event_name(int ctr) +{ + int type = event_id[ctr]; + static char buf[32]; + + if (event_raw[ctr]) { + sprintf(buf, "raw 0x%x", type); + return buf; + } + if (!type_valid(type)) + return "unknown"; + + if (type >= 0) + return hw_event_names[type]; + + return sw_event_names[-type-1]; +} + +/* + * Each event can have multiple symbolic names. + * Symbolic names are (almost) exactly matched. + */ +static int match_event_symbols(char *str) +{ + unsigned int i; + + if (isdigit(str[0]) || str[0] == '-') + return atoi(str); + + for (i = 0; i < ARRAY_SIZE(event_symbols); i++) { + if (!strncmp(str, event_symbols[i].symbol, + strlen(event_symbols[i].symbol))) + return event_symbols[i].event; + } + + return PERF_HW_EVENTS_MAX; +} + +static void parse_events(char *str) +{ + int type, raw; + +again: + nr_counters++; + if (nr_counters == MAX_COUNTERS) + display_help(); + + raw = 0; + if (*str == 'r') { + raw = 1; + ++str; + type = strtol(str, NULL, 16); + } else { + type = match_event_symbols(str); + if (!type_valid(type)) + display_help(); + } + + event_id[nr_counters] = type; + event_raw[nr_counters] = raw; + + str = strstr(str, ","); + if (str) { + str++; + goto again; + } +} + diff --git a/Documentation/perf_counter/perfstat.c b/Documentation/perf_counter/perfstat.c index a3d4a7a602f..3364dcb9dd9 100644 --- a/Documentation/perf_counter/perfstat.c +++ b/Documentation/perf_counter/perfstat.c @@ -54,50 +54,6 @@ #include "perfcounters.h" -static char *hw_event_names [] = { - "CPU cycles", - "instructions", - "cache references", - "cache misses", - "branches", - "branch misses", - "bus cycles", -}; - -static char *sw_event_names [] = { - "cpu clock ticks", - "task clock ticks", - "pagefaults", - "context switches", - "CPU migrations", -}; - -struct event_symbol { - int event; - char *symbol; -}; - -static struct event_symbol event_symbols [] = { - {PERF_COUNT_CPU_CYCLES, "cpu-cycles", }, - {PERF_COUNT_CPU_CYCLES, "cycles", }, - {PERF_COUNT_INSTRUCTIONS, "instructions", }, - {PERF_COUNT_CACHE_REFERENCES, "cache-references", }, - {PERF_COUNT_CACHE_MISSES, "cache-misses", }, - {PERF_COUNT_BRANCH_INSTRUCTIONS, "branch-instructions", }, - {PERF_COUNT_BRANCH_INSTRUCTIONS, "branches", }, - {PERF_COUNT_BRANCH_MISSES, "branch-misses", }, - {PERF_COUNT_BUS_CYCLES, "bus-cycles", }, - {PERF_COUNT_CPU_CLOCK, "cpu-ticks", }, - {PERF_COUNT_CPU_CLOCK, "ticks", }, - {PERF_COUNT_TASK_CLOCK, "task-ticks", }, - {PERF_COUNT_PAGE_FAULTS, "page-faults", }, - {PERF_COUNT_PAGE_FAULTS, "faults", }, - {PERF_COUNT_CONTEXT_SWITCHES, "context-switches", }, - {PERF_COUNT_CONTEXT_SWITCHES, "cs", }, - {PERF_COUNT_CPU_MIGRATIONS, "cpu-migrations", }, - {PERF_COUNT_CPU_MIGRATIONS, "migrations", }, -}; - static int nr_counters = 0; static int nr_cpus = 0; @@ -137,84 +93,6 @@ static void display_help(void) exit(0); } -static int type_valid(int type) -{ - if (type >= PERF_HW_EVENTS_MAX) - return 0; - if (type <= PERF_SW_EVENTS_MIN) - return 0; - - return 1; -} - -static char *event_name(int ctr) -{ - int type = event_id[ctr]; - static char buf[32]; - - if (event_raw[ctr]) { - sprintf(buf, "raw 0x%x", type); - return buf; - } - if (!type_valid(type)) - return "unknown"; - - if (type >= 0) - return hw_event_names[type]; - - return sw_event_names[-type-1]; -} - -/* - * Each event can have multiple symbolic names. - * Symbolic names are (almost) exactly matched. - */ -static int match_event_symbols(char *str) -{ - unsigned int i; - - if (isdigit(str[0]) || str[0] == '-') - return atoi(str); - - for (i = 0; i < ARRAY_SIZE(event_symbols); i++) { - if (!strncmp(str, event_symbols[i].symbol, - strlen(event_symbols[i].symbol))) - return event_symbols[i].event; - } - - return PERF_HW_EVENTS_MAX; -} - -static void parse_events(char *str) -{ - int type, raw; - -again: - nr_counters++; - if (nr_counters == MAX_COUNTERS) - display_help(); - - raw = 0; - if (*str == 'r') { - raw = 1; - ++str; - type = strtol(str, NULL, 16); - } else { - type = match_event_symbols(str); - if (!type_valid(type)) - display_help(); - } - - event_id[nr_counters] = type; - event_raw[nr_counters] = raw; - - str = strstr(str, ","); - if (str) { - str++; - goto again; - } -} - static void process_options(int argc, char *argv[]) { for (;;) { @@ -296,14 +174,6 @@ static void create_counter(int counter) } -#define rdclock() \ -({ \ - struct timespec ts; \ - \ - clock_gettime(CLOCK_MONOTONIC, &ts); \ - ts.tv_sec * 1000000000ULL + ts.tv_nsec; \ -}) - int main(int argc, char *argv[]) { unsigned long long t0, t1; -- cgit v1.2.3 From 95bb3be1b3ca4a71cc168787b675d5b7852fc6be Mon Sep 17 00:00:00 2001 From: Wu Fengguang Date: Fri, 20 Mar 2009 10:08:04 +0800 Subject: perf_counter tools: support symbolic event names in kerneltop - kerneltop: --event_id => --event - kerneltop: can accept SW event types now - perfstat: it used to implicitly add event -2(task-clock), the new code no longer does this. Shall we? Signed-off-by: Wu Fengguang Acked-by: Peter Zijlstra Cc: Paul Mackerras Signed-off-by: Ingo Molnar --- Documentation/perf_counter/kerneltop.c | 28 ++++++---------------------- Documentation/perf_counter/perfcounters.h | 15 ++++++++++----- Documentation/perf_counter/perfstat.c | 8 -------- 3 files changed, 16 insertions(+), 35 deletions(-) diff --git a/Documentation/perf_counter/kerneltop.c b/Documentation/perf_counter/kerneltop.c index fe70a2c92a8..edc5b09fb58 100644 --- a/Documentation/perf_counter/kerneltop.c +++ b/Documentation/perf_counter/kerneltop.c @@ -86,13 +86,9 @@ const unsigned int default_count[] = { 10000, }; -static int nr_counters = -1; - static __u64 count_filter = 100; static int event_count[MAX_COUNTERS]; -static unsigned long event_id[MAX_COUNTERS]; -static int event_raw[MAX_COUNTERS]; static int tid = -1; static int profile_cpu = -1; @@ -125,7 +121,7 @@ static void display_help(void) "KernelTop Options (up to %d event types can be specified at once):\n\n", MAX_COUNTERS); printf( - " -e EID --event_id=EID # event type ID [default: 0]\n" + " -e EID --event=EID # event type ID [default: 0]\n" " 0: CPU cycles\n" " 1: instructions\n" " 2: cache accesses\n" @@ -160,7 +156,7 @@ static void process_options(int argc, char *argv[]) {"cpu", required_argument, NULL, 'C'}, {"delay", required_argument, NULL, 'd'}, {"dump_symtab", no_argument, NULL, 'D'}, - {"event_id", required_argument, NULL, 'e'}, + {"event", required_argument, NULL, 'e'}, {"filter", required_argument, NULL, 'f'}, {"group", required_argument, NULL, 'g'}, {"help", no_argument, NULL, 'h'}, @@ -178,8 +174,6 @@ static void process_options(int argc, char *argv[]) switch (c) { case 'c': - if (nr_counters == -1) - nr_counters = 0; event_count[nr_counters] = atoi(optarg); break; case 'C': /* CPU and PID are mutually exclusive */ @@ -192,18 +186,7 @@ static void process_options(int argc, char *argv[]) case 'd': delay_secs = atoi(optarg); break; case 'D': dump_symtab = 1; break; - case 'e': - nr_counters++; - if (nr_counters == MAX_COUNTERS) { - error = 1; - break; - } - if (*optarg == 'r') { - event_raw[nr_counters] = 1; - ++optarg; - } - event_id[nr_counters] = strtol(optarg, NULL, 16); - break; + case 'e': error = parse_events(optarg); break; case 'f': count_filter = atoi(optarg); break; case 'g': group = atoi(optarg); break; @@ -226,9 +209,10 @@ static void process_options(int argc, char *argv[]) if (error) display_help(); - nr_counters++; - if (nr_counters < 1) + if (!nr_counters) { nr_counters = 1; + event_id[0] = 0; + } for (counter = 0; counter < nr_counters; counter++) { if (event_count[counter]) diff --git a/Documentation/perf_counter/perfcounters.h b/Documentation/perf_counter/perfcounters.h index 0f3764aa52a..99a90d833e1 100644 --- a/Documentation/perf_counter/perfcounters.h +++ b/Documentation/perf_counter/perfcounters.h @@ -143,6 +143,10 @@ asmlinkage int sys_perf_counter_open( return ret; } +static int nr_counters = 0; +static long event_id[MAX_COUNTERS] = { -2, -5, -4, -3, 0, 1, 2, 3}; +static int event_raw[MAX_COUNTERS]; + static char *hw_event_names [] = { "CPU cycles", "instructions", @@ -235,14 +239,13 @@ static int match_event_symbols(char *str) return PERF_HW_EVENTS_MAX; } -static void parse_events(char *str) +static int parse_events(char *str) { int type, raw; again: - nr_counters++; if (nr_counters == MAX_COUNTERS) - display_help(); + return -1; raw = 0; if (*str == 'r') { @@ -252,16 +255,18 @@ again: } else { type = match_event_symbols(str); if (!type_valid(type)) - display_help(); + return -1; } event_id[nr_counters] = type; event_raw[nr_counters] = raw; + nr_counters++; str = strstr(str, ","); if (str) { str++; goto again; } -} + return 0; +} diff --git a/Documentation/perf_counter/perfstat.c b/Documentation/perf_counter/perfstat.c index 3364dcb9dd9..fd594468e65 100644 --- a/Documentation/perf_counter/perfstat.c +++ b/Documentation/perf_counter/perfstat.c @@ -54,14 +54,8 @@ #include "perfcounters.h" -static int nr_counters = 0; static int nr_cpus = 0; -static int event_id[MAX_COUNTERS] = - { -2, -5, -4, -3, 0, 1, 2, 3}; - -static int event_raw[MAX_COUNTERS]; - static int system_wide = 0; static void display_help(void) @@ -127,8 +121,6 @@ static void process_options(int argc, char *argv[]) if (!nr_counters) nr_counters = 8; - else - nr_counters++; return; err: -- cgit v1.2.3 From e3908612d93dff9d7746d85d37c42593254bf282 Mon Sep 17 00:00:00 2001 From: Wu Fengguang Date: Fri, 20 Mar 2009 10:08:05 +0800 Subject: perf_counter tools: Reuse event_name() in kerneltop - can handle sw counters now - the outputs will look slightly different Acked-by: Peter Zijlstra Cc: Paul Mackerras Signed-off-by: Ingo Molnar --- Documentation/perf_counter/kerneltop.c | 15 +-------------- 1 file changed, 1 insertion(+), 14 deletions(-) diff --git a/Documentation/perf_counter/kerneltop.c b/Documentation/perf_counter/kerneltop.c index edc5b09fb58..cba5cb0a97f 100644 --- a/Documentation/perf_counter/kerneltop.c +++ b/Documentation/perf_counter/kerneltop.c @@ -67,16 +67,6 @@ #include "perfcounters.h" -const char *event_types [] = { - "CPU cycles", - "instructions", - "cache-refs", - "cache-misses", - "branches", - "branch-misses", - "bus cycles" -}; - const unsigned int default_count[] = { 1000000, 1000000, @@ -304,10 +294,7 @@ static void print_sym_table(void) if (counter) printf("/"); - if (event_id[counter] < PERF_HW_EVENTS_MAX) - printf( "%s", event_types[event_id[counter]]); - else - printf( "raw:%04lx", event_id[counter]); + printf("%s", event_name(counter)); } printf( "], "); -- cgit v1.2.3 From f7524bda8be8be98db356d6a83ac1da451ecdb2e Mon Sep 17 00:00:00 2001 From: Wu Fengguang Date: Fri, 20 Mar 2009 10:08:06 +0800 Subject: perf_counter tools: move remaining code into kerneltop.c - perfstat.c can be safely removed now - perfstat: -s => -a for system wide accounting - kerneltop: add -S/--stat for perfstat mode - minor adjustments to kerneltop --help, perfstat --help Signed-off-by: Wu Fengguang Acked-by: Peter Zijlstra Cc: Paul Mackerras Signed-off-by: Ingo Molnar --- Documentation/perf_counter/kerneltop.c | 530 ++++++++++++++++++++++++------ Documentation/perf_counter/perfcounters.h | 132 +------- 2 files changed, 432 insertions(+), 230 deletions(-) diff --git a/Documentation/perf_counter/kerneltop.c b/Documentation/perf_counter/kerneltop.c index cba5cb0a97f..9db65a4f104 100644 --- a/Documentation/perf_counter/kerneltop.c +++ b/Documentation/perf_counter/kerneltop.c @@ -3,7 +3,7 @@ Build with: - cc -O6 -Wall `pkg-config --cflags --libs glib-2.0` -o kerneltop kerneltop.c + cc -O6 -Wall -lrt `pkg-config --cflags --libs glib-2.0` -o kerneltop kerneltop.c Sample output: @@ -26,18 +26,40 @@ 12.00 - ffffffff804ffb7f : __ip_local_out 11.97 - ffffffff804fc0c8 : ip_local_deliver_finish 8.54 - ffffffff805001a3 : ip_queue_xmit + */ - Started by Ingo Molnar +/* + * perfstat: /usr/bin/time -alike performance counter statistics utility - Improvements and fixes by: + It summarizes the counter events of all tasks (and child tasks), + covering all CPUs that the command (or workload) executes on. + It only counts the per-task events of the workload started, + independent of how many other tasks run on those CPUs. - Arjan van de Ven - Yanmin Zhang - Mike Galbraith + Sample output: - Released under the GPL v2. (and only v2, not any later version) + $ ./perfstat -e 1 -e 3 -e 5 ls -lR /usr/include/ >/dev/null + Performance counter stats for 'ls': + + 163516953 instructions + 2295 cache-misses + 2855182 branch-misses */ + + /* + * Copyright (C) 2008, Red Hat Inc, Ingo Molnar + * + * Improvements and fixes by: + * + * Arjan van de Ven + * Yanmin Zhang + * Wu Fengguang + * Mike Galbraith + * + * Released under the GPL v2. (and only v2, not any later version) + */ + #define _GNU_SOURCE #include #include @@ -67,18 +89,22 @@ #include "perfcounters.h" -const unsigned int default_count[] = { - 1000000, - 1000000, - 10000, - 10000, - 1000000, - 10000, -}; -static __u64 count_filter = 100; +#define MAX_COUNTERS 64 +#define MAX_NR_CPUS 256 + +#define DEF_PERFSTAT_EVENTS { -2, -5, -4, -3, 0, 1, 2, 3} + +static int run_perfstat = 0; +static int system_wide = 0; +static int nr_counters = 0; +static long event_id[MAX_COUNTERS] = DEF_PERFSTAT_EVENTS; +static int event_raw[MAX_COUNTERS]; static int event_count[MAX_COUNTERS]; +static int fd[MAX_NR_CPUS][MAX_COUNTERS]; + +static __u64 count_filter = 100; static int tid = -1; static int profile_cpu = -1; @@ -96,125 +122,335 @@ static int delay_secs = 2; static int zero; static int dump_symtab; +static GList *lines; + struct source_line { uint64_t EIP; unsigned long count; char *line; }; -static GList *lines; + +const unsigned int default_count[] = { + 1000000, + 1000000, + 10000, + 10000, + 1000000, + 10000, +}; + +static char *hw_event_names[] = { + "CPU cycles", + "instructions", + "cache references", + "cache misses", + "branches", + "branch misses", + "bus cycles", +}; + +static char *sw_event_names[] = { + "cpu clock ticks", + "task clock ticks", + "pagefaults", + "context switches", + "CPU migrations", +}; + +struct event_symbol { + int event; + char *symbol; +}; + +static struct event_symbol event_symbols[] = { + {PERF_COUNT_CPU_CYCLES, "cpu-cycles", }, + {PERF_COUNT_CPU_CYCLES, "cycles", }, + {PERF_COUNT_INSTRUCTIONS, "instructions", }, + {PERF_COUNT_CACHE_REFERENCES, "cache-references", }, + {PERF_COUNT_CACHE_MISSES, "cache-misses", }, + {PERF_COUNT_BRANCH_INSTRUCTIONS, "branch-instructions", }, + {PERF_COUNT_BRANCH_INSTRUCTIONS, "branches", }, + {PERF_COUNT_BRANCH_MISSES, "branch-misses", }, + {PERF_COUNT_BUS_CYCLES, "bus-cycles", }, + {PERF_COUNT_CPU_CLOCK, "cpu-ticks", }, + {PERF_COUNT_CPU_CLOCK, "ticks", }, + {PERF_COUNT_TASK_CLOCK, "task-ticks", }, + {PERF_COUNT_PAGE_FAULTS, "page-faults", }, + {PERF_COUNT_PAGE_FAULTS, "faults", }, + {PERF_COUNT_CONTEXT_SWITCHES, "context-switches", }, + {PERF_COUNT_CONTEXT_SWITCHES, "cs", }, + {PERF_COUNT_CPU_MIGRATIONS, "cpu-migrations", }, + {PERF_COUNT_CPU_MIGRATIONS, "migrations", }, +}; + +static void display_events_help(void) +{ + unsigned int i; + int e; + + printf( + " -e EVENT --event=EVENT # symbolic-name abbreviations"); + + for (i = 0, e = PERF_HW_EVENTS_MAX; i < ARRAY_SIZE(event_symbols); i++) { + if (e != event_symbols[i].event) { + e = event_symbols[i].event; + printf( + "\n %2d: %-20s", e, event_symbols[i].symbol); + } else + printf(" %s", event_symbols[i].symbol); + } + + printf("\n" + " rNNN: raw PMU events (eventsel+umask)\n\n"); +} + +static void display_perfstat_help(void) +{ + printf( + "Usage: perfstat [] \n\n" + "PerfStat Options (up to %d event types can be specified):\n\n", + MAX_COUNTERS); + + display_events_help(); + + printf( + " -a # system-wide collection\n"); + exit(0); +} static void display_help(void) { + if (run_perfstat) + return display_perfstat_help(); + printf( - "Usage: kerneltop []\n\n" + "Usage: kerneltop []\n" + " Or: kerneltop -S [] COMMAND [ARGS]\n\n" "KernelTop Options (up to %d event types can be specified at once):\n\n", MAX_COUNTERS); + + display_events_help(); + printf( - " -e EID --event=EID # event type ID [default: 0]\n" - " 0: CPU cycles\n" - " 1: instructions\n" - " 2: cache accesses\n" - " 3: cache misses\n" - " 4: branch instructions\n" - " 5: branch prediction misses\n" - " 6: bus cycles\n\n" - " rNNN: raw PMU events (eventsel+umask)\n\n" + " -S --stat # perfstat COMMAND\n" + " -a # system-wide collection (for perfstat)\n\n" " -c CNT --count=CNT # event period to sample\n\n" " -C CPU --cpu=CPU # CPU (-1 for all) [default: -1]\n" " -p PID --pid=PID # PID of sampled task (-1 for all) [default: -1]\n\n" " -d delay --delay= # sampling/display delay [default: 2]\n" - " -f CNT --filter=CNT # min-event-count filter [default: 100]\n\n" + " -f CNT --filter=CNT # min-event-count filter [default: 100]\n\n" " -s symbol --symbol= # function to be showed annotated one-shot\n" - " -x path --vmlinux= # the vmlinux binary, required for -s use:\n" + " -x path --vmlinux= # the vmlinux binary, required for -s use\n" " -z --zero # zero counts after display\n" " -D --dump_symtab # dump symbol table to stderr on startup\n" - "\n"); + ); exit(0); } -static void process_options(int argc, char *argv[]) +static int type_valid(int type) { - int error = 0, counter; + if (type >= PERF_HW_EVENTS_MAX) + return 0; + if (type <= PERF_SW_EVENTS_MIN) + return 0; - for (;;) { - int option_index = 0; - /** Options for getopt */ - static struct option long_options[] = { - {"count", required_argument, NULL, 'c'}, - {"cpu", required_argument, NULL, 'C'}, - {"delay", required_argument, NULL, 'd'}, - {"dump_symtab", no_argument, NULL, 'D'}, - {"event", required_argument, NULL, 'e'}, - {"filter", required_argument, NULL, 'f'}, - {"group", required_argument, NULL, 'g'}, - {"help", no_argument, NULL, 'h'}, - {"nmi", required_argument, NULL, 'n'}, - {"pid", required_argument, NULL, 'p'}, - {"vmlinux", required_argument, NULL, 'x'}, - {"symbol", required_argument, NULL, 's'}, - {"zero", no_argument, NULL, 'z'}, - {NULL, 0, NULL, 0 } - }; - int c = getopt_long(argc, argv, "c:C:d:De:f:g:hn:p:s:x:z", - long_options, &option_index); - if (c == -1) - break; + return 1; +} - switch (c) { - case 'c': - event_count[nr_counters] = atoi(optarg); break; - case 'C': - /* CPU and PID are mutually exclusive */ - if (tid != -1) { - printf("WARNING: CPU switch overriding PID\n"); - sleep(1); - tid = -1; - } - profile_cpu = atoi(optarg); break; - case 'd': delay_secs = atoi(optarg); break; - case 'D': dump_symtab = 1; break; +static char *event_name(int ctr) +{ + int type = event_id[ctr]; + static char buf[32]; - case 'e': error = parse_events(optarg); break; + if (event_raw[ctr]) { + sprintf(buf, "raw 0x%x", type); + return buf; + } + if (!type_valid(type)) + return "unknown"; - case 'f': count_filter = atoi(optarg); break; - case 'g': group = atoi(optarg); break; - case 'h': display_help(); break; - case 'n': nmi = atoi(optarg); break; - case 'p': - /* CPU and PID are mutually exclusive */ - if (profile_cpu != -1) { - printf("WARNING: PID switch overriding CPU\n"); - sleep(1); - profile_cpu = -1; + if (type >= 0) + return hw_event_names[type]; + + return sw_event_names[-type-1]; +} + +/* + * Each event can have multiple symbolic names. + * Symbolic names are (almost) exactly matched. + */ +static int match_event_symbols(char *str) +{ + unsigned int i; + + if (isdigit(str[0]) || str[0] == '-') + return atoi(str); + + for (i = 0; i < ARRAY_SIZE(event_symbols); i++) { + if (!strncmp(str, event_symbols[i].symbol, + strlen(event_symbols[i].symbol))) + return event_symbols[i].event; + } + + return PERF_HW_EVENTS_MAX; +} + +static int parse_events(char *str) +{ + int type, raw; + +again: + if (nr_counters == MAX_COUNTERS) + return -1; + + raw = 0; + if (*str == 'r') { + raw = 1; + ++str; + type = strtol(str, NULL, 16); + } else { + type = match_event_symbols(str); + if (!type_valid(type)) + return -1; + } + + event_id[nr_counters] = type; + event_raw[nr_counters] = raw; + nr_counters++; + + str = strstr(str, ","); + if (str) { + str++; + goto again; + } + + return 0; +} + + +/* + * perfstat + */ + +char fault_here[1000000]; + +static void create_perfstat_counter(int counter) +{ + struct perf_counter_hw_event hw_event; + + memset(&hw_event, 0, sizeof(hw_event)); + hw_event.type = event_id[counter]; + hw_event.raw = event_raw[counter]; + hw_event.record_type = PERF_RECORD_SIMPLE; + hw_event.nmi = 0; + + if (system_wide) { + int cpu; + for (cpu = 0; cpu < nr_cpus; cpu ++) { + fd[cpu][counter] = sys_perf_counter_open(&hw_event, -1, cpu, -1, 0); + if (fd[cpu][counter] < 0) { + printf("perfstat error: syscall returned with %d (%s)\n", + fd[cpu][counter], strerror(errno)); + exit(-1); } - tid = atoi(optarg); break; - case 's': sym_filter = strdup(optarg); break; - case 'x': vmlinux = strdup(optarg); break; - case 'z': zero = 1; break; - default: error = 1; break; + } + } else { + hw_event.inherit = 1; + hw_event.disabled = 1; + + fd[0][counter] = sys_perf_counter_open(&hw_event, 0, -1, -1, 0); + if (fd[0][counter] < 0) { + printf("perfstat error: syscall returned with %d (%s)\n", + fd[0][counter], strerror(errno)); + exit(-1); } } - if (error) - display_help(); +} - if (!nr_counters) { - nr_counters = 1; - event_id[0] = 0; +int do_perfstat(int argc, char *argv[]) +{ + unsigned long long t0, t1; + int counter; + ssize_t res; + int status; + int pid; + + if (!system_wide) + nr_cpus = 1; + + for (counter = 0; counter < nr_counters; counter++) + create_perfstat_counter(counter); + + argc -= optind; + argv += optind; + + /* + * Enable counters and exec the command: + */ + t0 = rdclock(); + prctl(PR_TASK_PERF_COUNTERS_ENABLE); + + if ((pid = fork()) < 0) + perror("failed to fork"); + if (!pid) { + if (execvp(argv[0], argv)) { + perror(argv[0]); + exit(-1); + } } + while (wait(&status) >= 0) + ; + prctl(PR_TASK_PERF_COUNTERS_DISABLE); + t1 = rdclock(); + + fflush(stdout); + + fprintf(stderr, "\n"); + fprintf(stderr, " Performance counter stats for \'%s\':\n", + argv[0]); + fprintf(stderr, "\n"); for (counter = 0; counter < nr_counters; counter++) { - if (event_count[counter]) - continue; + int cpu; + __u64 count, single_count; + + count = 0; + for (cpu = 0; cpu < nr_cpus; cpu ++) { + res = read(fd[cpu][counter], + (char *) &single_count, sizeof(single_count)); + assert(res == sizeof(single_count)); + count += single_count; + } - if (event_id[counter] < PERF_HW_EVENTS_MAX) - event_count[counter] = default_count[event_id[counter]]; - else - event_count[counter] = 100000; + if (!event_raw[counter] && + (event_id[counter] == PERF_COUNT_CPU_CLOCK || + event_id[counter] == PERF_COUNT_TASK_CLOCK)) { + + double msecs = (double)count / 1000000; + + fprintf(stderr, " %14.6f %-20s (msecs)\n", + msecs, event_name(counter)); + } else { + fprintf(stderr, " %14Ld %-20s (events)\n", + count, event_name(counter)); + } + if (!counter) + fprintf(stderr, "\n"); } + fprintf(stderr, "\n"); + fprintf(stderr, " Wall-clock time elapsed: %12.6f msecs\n", + (double)(t1-t0)/1e6); + fprintf(stderr, "\n"); + + return 0; } +/* + * Symbols + */ + static uint64_t min_ip; static uint64_t max_ip = -1ll; @@ -507,6 +743,9 @@ static void parse_symbols(void) } } +/* + * Source lines + */ static void parse_vmlinux(char *filename) { @@ -527,7 +766,7 @@ static void parse_vmlinux(char *filename) char *c; src = malloc(sizeof(struct source_line)); - assert(src != NULL); + assert(src != NULL); memset(src, 0, sizeof(struct source_line)); if (getline(&src->line, &dummy, file) < 0) @@ -706,11 +945,100 @@ static void process_event(uint64_t ip, int counter) record_ip(ip, counter); } +static void process_options(int argc, char *argv[]) +{ + int error = 0, counter; + + if (strstr(argv[0], "perfstat")) + run_perfstat = 1; + + for (;;) { + int option_index = 0; + /** Options for getopt */ + static struct option long_options[] = { + {"count", required_argument, NULL, 'c'}, + {"cpu", required_argument, NULL, 'C'}, + {"delay", required_argument, NULL, 'd'}, + {"dump_symtab", no_argument, NULL, 'D'}, + {"event", required_argument, NULL, 'e'}, + {"filter", required_argument, NULL, 'f'}, + {"group", required_argument, NULL, 'g'}, + {"help", no_argument, NULL, 'h'}, + {"nmi", required_argument, NULL, 'n'}, + {"pid", required_argument, NULL, 'p'}, + {"vmlinux", required_argument, NULL, 'x'}, + {"symbol", required_argument, NULL, 's'}, + {"stat", no_argument, NULL, 'S'}, + {"zero", no_argument, NULL, 'z'}, + {NULL, 0, NULL, 0 } + }; + int c = getopt_long(argc, argv, "+:ac:C:d:De:f:g:hn:p:s:Sx:z", + long_options, &option_index); + if (c == -1) + break; + + switch (c) { + case 'a': system_wide = 1; break; + case 'c': event_count[nr_counters] = atoi(optarg); break; + case 'C': + /* CPU and PID are mutually exclusive */ + if (tid != -1) { + printf("WARNING: CPU switch overriding PID\n"); + sleep(1); + tid = -1; + } + profile_cpu = atoi(optarg); break; + case 'd': delay_secs = atoi(optarg); break; + case 'D': dump_symtab = 1; break; + + case 'e': error = parse_events(optarg); break; + + case 'f': count_filter = atoi(optarg); break; + case 'g': group = atoi(optarg); break; + case 'h': display_help(); break; + case 'n': nmi = atoi(optarg); break; + case 'p': + /* CPU and PID are mutually exclusive */ + if (profile_cpu != -1) { + printf("WARNING: PID switch overriding CPU\n"); + sleep(1); + profile_cpu = -1; + } + tid = atoi(optarg); break; + case 's': sym_filter = strdup(optarg); break; + case 'S': run_perfstat = 1; break; + case 'x': vmlinux = strdup(optarg); break; + case 'z': zero = 1; break; + default: error = 1; break; + } + } + if (error) + display_help(); + + if (!nr_counters) { + if (run_perfstat) + nr_counters = 8; + else { + nr_counters = 1; + event_id[0] = 0; + } + } + + for (counter = 0; counter < nr_counters; counter++) { + if (event_count[counter]) + continue; + + if (event_id[counter] < PERF_HW_EVENTS_MAX) + event_count[counter] = default_count[event_id[counter]]; + else + event_count[counter] = 100000; + } +} + int main(int argc, char *argv[]) { struct pollfd event_array[MAX_NR_CPUS][MAX_COUNTERS]; struct perf_counter_hw_event hw_event; - int fd[MAX_NR_CPUS][MAX_COUNTERS]; int i, counter, group_fd; unsigned int cpu; uint64_t ip; @@ -720,11 +1048,15 @@ int main(int argc, char *argv[]) process_options(argc, argv); nr_cpus = sysconf(_SC_NPROCESSORS_ONLN); + assert(nr_cpus <= MAX_NR_CPUS); + assert(nr_cpus >= 0); + + if (run_perfstat) + return do_perfstat(argc, argv); + if (tid != -1 || profile_cpu != -1) nr_cpus = 1; - assert(nr_cpus <= MAX_NR_CPUS); - for (i = 0; i < nr_cpus; i++) { group_fd = -1; for (counter = 0; counter < nr_counters; counter++) { diff --git a/Documentation/perf_counter/perfcounters.h b/Documentation/perf_counter/perfcounters.h index 99a90d833e1..32e24b9154a 100644 --- a/Documentation/perf_counter/perfcounters.h +++ b/Documentation/perf_counter/perfcounters.h @@ -11,9 +11,6 @@ #define PR_TASK_PERF_COUNTERS_DISABLE 31 #define PR_TASK_PERF_COUNTERS_ENABLE 32 -#define MAX_COUNTERS 64 -#define MAX_NR_CPUS 256 - #define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0])) #define rdclock() \ @@ -110,6 +107,7 @@ struct perf_counter_hw_event { __u64 __reserved_3; }; + #ifdef __x86_64__ # define __NR_perf_counter_open 295 #endif @@ -142,131 +140,3 @@ asmlinkage int sys_perf_counter_open( #endif return ret; } - -static int nr_counters = 0; -static long event_id[MAX_COUNTERS] = { -2, -5, -4, -3, 0, 1, 2, 3}; -static int event_raw[MAX_COUNTERS]; - -static char *hw_event_names [] = { - "CPU cycles", - "instructions", - "cache references", - "cache misses", - "branches", - "branch misses", - "bus cycles", -}; - -static char *sw_event_names [] = { - "cpu clock ticks", - "task clock ticks", - "pagefaults", - "context switches", - "CPU migrations", -}; - -struct event_symbol { - int event; - char *symbol; -}; - -static struct event_symbol event_symbols [] = { - {PERF_COUNT_CPU_CYCLES, "cpu-cycles", }, - {PERF_COUNT_CPU_CYCLES, "cycles", }, - {PERF_COUNT_INSTRUCTIONS, "instructions", }, - {PERF_COUNT_CACHE_REFERENCES, "cache-references", }, - {PERF_COUNT_CACHE_MISSES, "cache-misses", }, - {PERF_COUNT_BRANCH_INSTRUCTIONS, "branch-instructions", }, - {PERF_COUNT_BRANCH_INSTRUCTIONS, "branches", }, - {PERF_COUNT_BRANCH_MISSES, "branch-misses", }, - {PERF_COUNT_BUS_CYCLES, "bus-cycles", }, - {PERF_COUNT_CPU_CLOCK, "cpu-ticks", }, - {PERF_COUNT_CPU_CLOCK, "ticks", }, - {PERF_COUNT_TASK_CLOCK, "task-ticks", }, - {PERF_COUNT_PAGE_FAULTS, "page-faults", }, - {PERF_COUNT_PAGE_FAULTS, "faults", }, - {PERF_COUNT_CONTEXT_SWITCHES, "context-switches", }, - {PERF_COUNT_CONTEXT_SWITCHES, "cs", }, - {PERF_COUNT_CPU_MIGRATIONS, "cpu-migrations", }, - {PERF_COUNT_CPU_MIGRATIONS, "migrations", }, -}; - -static int type_valid(int type) -{ - if (type >= PERF_HW_EVENTS_MAX) - return 0; - if (type <= PERF_SW_EVENTS_MIN) - return 0; - - return 1; -} - -static char *event_name(int ctr) -{ - int type = event_id[ctr]; - static char buf[32]; - - if (event_raw[ctr]) { - sprintf(buf, "raw 0x%x", type); - return buf; - } - if (!type_valid(type)) - return "unknown"; - - if (type >= 0) - return hw_event_names[type]; - - return sw_event_names[-type-1]; -} - -/* - * Each event can have multiple symbolic names. - * Symbolic names are (almost) exactly matched. - */ -static int match_event_symbols(char *str) -{ - unsigned int i; - - if (isdigit(str[0]) || str[0] == '-') - return atoi(str); - - for (i = 0; i < ARRAY_SIZE(event_symbols); i++) { - if (!strncmp(str, event_symbols[i].symbol, - strlen(event_symbols[i].symbol))) - return event_symbols[i].event; - } - - return PERF_HW_EVENTS_MAX; -} - -static int parse_events(char *str) -{ - int type, raw; - -again: - if (nr_counters == MAX_COUNTERS) - return -1; - - raw = 0; - if (*str == 'r') { - raw = 1; - ++str; - type = strtol(str, NULL, 16); - } else { - type = match_event_symbols(str); - if (!type_valid(type)) - return -1; - } - - event_id[nr_counters] = type; - event_raw[nr_counters] = raw; - nr_counters++; - - str = strstr(str, ","); - if (str) { - str++; - goto again; - } - - return 0; -} -- cgit v1.2.3 From ef45fa9e6c1694d3e8063f39749097a6e496b12c Mon Sep 17 00:00:00 2001 From: Wu Fengguang Date: Fri, 20 Mar 2009 10:08:07 +0800 Subject: perf_counter tools: fix comment for sym_weight() Signed-off-by: Wu Fengguang Acked-by: Peter Zijlstra Cc: Paul Mackerras Signed-off-by: Ingo Molnar --- Documentation/perf_counter/kerneltop.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Documentation/perf_counter/kerneltop.c b/Documentation/perf_counter/kerneltop.c index 9db65a4f104..7bf2a516f18 100644 --- a/Documentation/perf_counter/kerneltop.c +++ b/Documentation/perf_counter/kerneltop.c @@ -473,7 +473,7 @@ static struct sym_entry sym_table[MAX_SYMS]; static void show_details(struct sym_entry *sym); /* - * Ordering weight: count-1 * count-1 * ... / count-n + * Ordering weight: count-1 * count-2 * ... / count-n */ static double sym_weight(const struct sym_entry *sym) { -- cgit v1.2.3 From 3ab8d792b1348eaabfe550ba60902d781d160dd4 Mon Sep 17 00:00:00 2001 From: Wu Fengguang Date: Fri, 20 Mar 2009 10:08:08 +0800 Subject: perf_counter tools: fix event_id type Signed-off-by: Wu Fengguang Acked-by: Peter Zijlstra Cc: Paul Mackerras Signed-off-by: Ingo Molnar --- Documentation/perf_counter/kerneltop.c | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/Documentation/perf_counter/kerneltop.c b/Documentation/perf_counter/kerneltop.c index 7bf2a516f18..7bfb0f0d800 100644 --- a/Documentation/perf_counter/kerneltop.c +++ b/Documentation/perf_counter/kerneltop.c @@ -99,7 +99,7 @@ static int run_perfstat = 0; static int system_wide = 0; static int nr_counters = 0; -static long event_id[MAX_COUNTERS] = DEF_PERFSTAT_EVENTS; +static __s64 event_id[MAX_COUNTERS] = DEF_PERFSTAT_EVENTS; static int event_raw[MAX_COUNTERS]; static int event_count[MAX_COUNTERS]; static int fd[MAX_NR_CPUS][MAX_COUNTERS]; @@ -261,11 +261,11 @@ static int type_valid(int type) static char *event_name(int ctr) { - int type = event_id[ctr]; + __s64 type = event_id[ctr]; static char buf[32]; if (event_raw[ctr]) { - sprintf(buf, "raw 0x%x", type); + sprintf(buf, "raw 0x%llx", (long long)type); return buf; } if (!type_valid(type)) @@ -299,7 +299,8 @@ static int match_event_symbols(char *str) static int parse_events(char *str) { - int type, raw; + __s64 type; + int raw; again: if (nr_counters == MAX_COUNTERS) -- cgit v1.2.3 From dda7c02f33833bfa9412ba6f0e410b0a18b42c88 Mon Sep 17 00:00:00 2001 From: Wu Fengguang Date: Fri, 20 Mar 2009 10:08:09 +0800 Subject: perf_counter tools: cut down default count for cpu-cycles In my system, it takes kerneltop dozens of minutes to show up usable numbers. Make the default count 100 times smaller fixed this long startup latency. I'm not sure if it's the right solution though. Signed-off-by: Wu Fengguang Acked-by: Peter Zijlstra Cc: Paul Mackerras Signed-off-by: Ingo Molnar --- Documentation/perf_counter/kerneltop.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Documentation/perf_counter/kerneltop.c b/Documentation/perf_counter/kerneltop.c index 7bfb0f0d800..0bd3c13150b 100644 --- a/Documentation/perf_counter/kerneltop.c +++ b/Documentation/perf_counter/kerneltop.c @@ -132,7 +132,7 @@ struct source_line { const unsigned int default_count[] = { - 1000000, + 10000, 1000000, 10000, 10000, -- cgit v1.2.3 From af9522cf133e9be6da8525a46a9ed7e7659f0e1a Mon Sep 17 00:00:00 2001 From: Wu Fengguang Date: Fri, 20 Mar 2009 10:08:10 +0800 Subject: perf_counter tools: when no command is feed to perfstat, display help and exit Signed-off-by: Wu Fengguang Acked-by: Peter Zijlstra Cc: Paul Mackerras Signed-off-by: Ingo Molnar --- Documentation/perf_counter/kerneltop.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/Documentation/perf_counter/kerneltop.c b/Documentation/perf_counter/kerneltop.c index 0bd3c13150b..81a68aac137 100644 --- a/Documentation/perf_counter/kerneltop.c +++ b/Documentation/perf_counter/kerneltop.c @@ -387,6 +387,9 @@ int do_perfstat(int argc, char *argv[]) argc -= optind; argv += optind; + if (!argc) + display_help(); + /* * Enable counters and exec the command: */ -- cgit v1.2.3 From f4a2deb4860497f4332cf6a1acddab3dd628ddf0 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Mon, 23 Mar 2009 18:22:06 +0100 Subject: perf_counter: remove the event config bitfields Since the bitfields turned into a bit of a mess, remove them and rely on good old masks. Signed-off-by: Peter Zijlstra Cc: Paul Mackerras Orig-LKML-Reference: <20090323172417.059499915@chello.nl> Signed-off-by: Ingo Molnar --- arch/powerpc/kernel/perf_counter.c | 6 ++-- arch/x86/kernel/cpu/perf_counter.c | 8 ++--- include/linux/perf_counter.h | 74 +++++++++++++++++++++++++------------- kernel/perf_counter.c | 22 +++++++----- 4 files changed, 70 insertions(+), 40 deletions(-) diff --git a/arch/powerpc/kernel/perf_counter.c b/arch/powerpc/kernel/perf_counter.c index 6413d9c0313..d05651584d4 100644 --- a/arch/powerpc/kernel/perf_counter.c +++ b/arch/powerpc/kernel/perf_counter.c @@ -602,13 +602,13 @@ hw_perf_counter_init(struct perf_counter *counter) return NULL; if ((s64)counter->hw_event.irq_period < 0) return NULL; - if (!counter->hw_event.raw_type) { - ev = counter->hw_event.event_id; + if (!perf_event_raw(&counter->hw_event)) { + ev = perf_event_id(&counter->hw_event); if (ev >= ppmu->n_generic || ppmu->generic_events[ev] == 0) return NULL; ev = ppmu->generic_events[ev]; } else { - ev = counter->hw_event.raw_event_id; + ev = perf_event_config(&counter->hw_event); } counter->hw.config_base = ev; counter->hw.idx = 0; diff --git a/arch/x86/kernel/cpu/perf_counter.c b/arch/x86/kernel/cpu/perf_counter.c index 902282d68b0..3f95b0cdc55 100644 --- a/arch/x86/kernel/cpu/perf_counter.c +++ b/arch/x86/kernel/cpu/perf_counter.c @@ -217,15 +217,15 @@ static int __hw_perf_counter_init(struct perf_counter *counter) /* * Raw event type provide the config in the event structure */ - if (hw_event->raw_type) { - hwc->config |= pmc_ops->raw_event(hw_event->raw_event_id); + if (perf_event_raw(hw_event)) { + hwc->config |= pmc_ops->raw_event(perf_event_config(hw_event)); } else { - if (hw_event->event_id >= pmc_ops->max_events) + if (perf_event_id(hw_event) >= pmc_ops->max_events) return -EINVAL; /* * The generic map: */ - hwc->config |= pmc_ops->event_map(hw_event->event_id); + hwc->config |= pmc_ops->event_map(perf_event_id(hw_event)); } counter->wakeup_pending = 0; diff --git a/include/linux/perf_counter.h b/include/linux/perf_counter.h index 98f5990be1e..56099e52970 100644 --- a/include/linux/perf_counter.h +++ b/include/linux/perf_counter.h @@ -82,32 +82,37 @@ enum perf_counter_record_type { PERF_RECORD_GROUP = 2, }; +#define __PERF_COUNTER_MASK(name) \ + (((1ULL << PERF_COUNTER_##name##_BITS) - 1) << \ + PERF_COUNTER_##name##_SHIFT) + +#define PERF_COUNTER_RAW_BITS 1 +#define PERF_COUNTER_RAW_SHIFT 63 +#define PERF_COUNTER_RAW_MASK __PERF_COUNTER_MASK(RAW) + +#define PERF_COUNTER_CONFIG_BITS 63 +#define PERF_COUNTER_CONFIG_SHIFT 0 +#define PERF_COUNTER_CONFIG_MASK __PERF_COUNTER_MASK(CONFIG) + +#define PERF_COUNTER_TYPE_BITS 7 +#define PERF_COUNTER_TYPE_SHIFT 56 +#define PERF_COUNTER_TYPE_MASK __PERF_COUNTER_MASK(TYPE) + +#define PERF_COUNTER_EVENT_BITS 56 +#define PERF_COUNTER_EVENT_SHIFT 0 +#define PERF_COUNTER_EVENT_MASK __PERF_COUNTER_MASK(EVENT) + /* * Hardware event to monitor via a performance monitoring counter: */ struct perf_counter_hw_event { - union { -#ifndef __BIG_ENDIAN_BITFIELD - struct { - __u64 event_id : 56, - type : 8; - }; - struct { - __u64 raw_event_id : 63, - raw_type : 1; - }; -#else - struct { - __u64 type : 8, - event_id : 56; - }; - struct { - __u64 raw_type : 1, - raw_event_id : 63; - }; -#endif /* __BIT_ENDIAN_BITFIELD */ - __u64 event_config; - }; + /* + * The MSB of the config word signifies if the rest contains cpu + * specific (raw) counter configuration data, if unset, the next + * 7 bits are an event type and the rest of the bits are the event + * identifier. + */ + __u64 config; __u64 irq_period; __u64 record_type; @@ -157,6 +162,27 @@ struct perf_counter_hw_event { struct task_struct; +static inline u64 perf_event_raw(struct perf_counter_hw_event *hw_event) +{ + return hw_event->config & PERF_COUNTER_RAW_MASK; +} + +static inline u64 perf_event_config(struct perf_counter_hw_event *hw_event) +{ + return hw_event->config & PERF_COUNTER_CONFIG_MASK; +} + +static inline u64 perf_event_type(struct perf_counter_hw_event *hw_event) +{ + return (hw_event->config & PERF_COUNTER_TYPE_MASK) >> + PERF_COUNTER_TYPE_SHIFT; +} + +static inline u64 perf_event_id(struct perf_counter_hw_event *hw_event) +{ + return hw_event->config & PERF_COUNTER_EVENT_MASK; +} + /** * struct hw_perf_counter - performance counter hardware details: */ @@ -336,8 +362,8 @@ extern void perf_counter_output(struct perf_counter *counter, */ static inline int is_software_counter(struct perf_counter *counter) { - return !counter->hw_event.raw_type && - counter->hw_event.type != PERF_TYPE_HARDWARE; + return !perf_event_raw(&counter->hw_event) && + perf_event_type(&counter->hw_event) != PERF_TYPE_HARDWARE; } extern void perf_swcounter_event(u32, u64, int, struct pt_regs *); diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c index f054b8c9bf9..ca14fc41ccd 100644 --- a/kernel/perf_counter.c +++ b/kernel/perf_counter.c @@ -1379,7 +1379,7 @@ static void perf_counter_handle_group(struct perf_counter *counter) list_for_each_entry(sub, &leader->sibling_list, list_entry) { if (sub != counter) sub->hw_ops->read(sub); - perf_counter_store_irq(counter, sub->hw_event.event_config); + perf_counter_store_irq(counter, sub->hw_event.config); perf_counter_store_irq(counter, atomic64_read(&sub->count)); } } @@ -1489,13 +1489,13 @@ static int perf_swcounter_match(struct perf_counter *counter, if (counter->state != PERF_COUNTER_STATE_ACTIVE) return 0; - if (counter->hw_event.raw_type) + if (perf_event_raw(&counter->hw_event)) return 0; - if (counter->hw_event.type != type) + if (perf_event_type(&counter->hw_event) != type) return 0; - if (counter->hw_event.event_id != event) + if (perf_event_id(&counter->hw_event) != event) return 0; if (counter->hw_event.exclude_user && user_mode(regs)) @@ -1757,13 +1757,13 @@ extern void ftrace_profile_disable(int); static void tp_perf_counter_destroy(struct perf_counter *counter) { - ftrace_profile_disable(counter->hw_event.event_id); + ftrace_profile_disable(perf_event_id(&counter->hw_event)); } static const struct hw_perf_counter_ops * tp_perf_counter_init(struct perf_counter *counter) { - int event_id = counter->hw_event.event_id; + int event_id = perf_event_id(&counter->hw_event); int ret; ret = ftrace_profile_enable(event_id); @@ -1797,7 +1797,7 @@ sw_perf_counter_init(struct perf_counter *counter) * to be kernel events, and page faults are never hypervisor * events. */ - switch (counter->hw_event.event_id) { + switch (perf_event_id(&counter->hw_event)) { case PERF_COUNT_CPU_CLOCK: hw_ops = &perf_ops_cpu_clock; @@ -1882,9 +1882,12 @@ perf_counter_alloc(struct perf_counter_hw_event *hw_event, hw_ops = NULL; - if (hw_event->raw_type) + if (perf_event_raw(hw_event)) { hw_ops = hw_perf_counter_init(counter); - else switch (hw_event->type) { + goto done; + } + + switch (perf_event_type(hw_event)) { case PERF_TYPE_HARDWARE: hw_ops = hw_perf_counter_init(counter); break; @@ -1902,6 +1905,7 @@ perf_counter_alloc(struct perf_counter_hw_event *hw_event, kfree(counter); return NULL; } +done: counter->hw_ops = hw_ops; return counter; -- cgit v1.2.3 From 96f6d4444302bb2ea2cf409529eef816462f6ce0 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Mon, 23 Mar 2009 18:22:07 +0100 Subject: perf_counter: avoid recursion Tracepoint events like lock_acquire and software counters like pagefaults can recurse into the perf counter code again, avoid that. Signed-off-by: Peter Zijlstra Cc: Paul Mackerras Orig-LKML-Reference: <20090323172417.152096433@chello.nl> Signed-off-by: Ingo Molnar --- include/linux/perf_counter.h | 7 +++++++ kernel/perf_counter.c | 26 ++++++++++++++++++++++++++ 2 files changed, 33 insertions(+) diff --git a/include/linux/perf_counter.h b/include/linux/perf_counter.h index 56099e52970..18dc17d0a61 100644 --- a/include/linux/perf_counter.h +++ b/include/linux/perf_counter.h @@ -328,6 +328,13 @@ struct perf_cpu_context { int active_oncpu; int max_pertask; int exclusive; + + /* + * Recursion avoidance: + * + * task, softirq, irq, nmi context + */ + int recursion[4]; }; /* diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c index ca14fc41ccd..ce34bff07bd 100644 --- a/kernel/perf_counter.c +++ b/kernel/perf_counter.c @@ -23,6 +23,7 @@ #include #include #include +#include #include @@ -1532,10 +1533,31 @@ static void perf_swcounter_ctx_event(struct perf_counter_context *ctx, rcu_read_unlock(); } +static int *perf_swcounter_recursion_context(struct perf_cpu_context *cpuctx) +{ + if (in_nmi()) + return &cpuctx->recursion[3]; + + if (in_irq()) + return &cpuctx->recursion[2]; + + if (in_softirq()) + return &cpuctx->recursion[1]; + + return &cpuctx->recursion[0]; +} + static void __perf_swcounter_event(enum perf_event_types type, u32 event, u64 nr, int nmi, struct pt_regs *regs) { struct perf_cpu_context *cpuctx = &get_cpu_var(perf_cpu_context); + int *recursion = perf_swcounter_recursion_context(cpuctx); + + if (*recursion) + goto out; + + (*recursion)++; + barrier(); perf_swcounter_ctx_event(&cpuctx->ctx, type, event, nr, nmi, regs); if (cpuctx->task_ctx) { @@ -1543,6 +1565,10 @@ static void __perf_swcounter_event(enum perf_event_types type, u32 event, nr, nmi, regs); } + barrier(); + (*recursion)--; + +out: put_cpu_var(perf_cpu_context); } -- cgit v1.2.3 From 37d81828385f8ff823caaaf1a83e72d065b6cfa1 Mon Sep 17 00:00:00 2001 From: Paul Mackerras Date: Mon, 23 Mar 2009 18:22:08 +0100 Subject: perf_counter: add an mmap method to allow userspace to read hardware counters Impact: new feature giving performance improvement This adds the ability for userspace to do an mmap on a hardware counter fd and get access to a read-only page that contains the information needed to translate a hardware counter value to the full 64-bit counter value that would be returned by a read on the fd. This is useful on architectures that allow user programs to read the hardware counters, such as PowerPC. The mmap will only succeed if the counter is a hardware counter monitoring the current process. On my quad 2.5GHz PowerPC 970MP machine, userspace can read a counter and translate it to the full 64-bit value in about 30ns using the mmapped page, compared to about 830ns for the read syscall on the counter, so this does give a significant performance improvement. Signed-off-by: Paul Mackerras Signed-off-by: Peter Zijlstra Orig-LKML-Reference: <20090323172417.297057964@chello.nl> Signed-off-by: Ingo Molnar --- arch/powerpc/kernel/perf_counter.c | 6 +++ include/linux/perf_counter.h | 15 ++++++++ kernel/perf_counter.c | 76 ++++++++++++++++++++++++++++++++++++++ 3 files changed, 97 insertions(+) diff --git a/arch/powerpc/kernel/perf_counter.c b/arch/powerpc/kernel/perf_counter.c index d05651584d4..e4349281b07 100644 --- a/arch/powerpc/kernel/perf_counter.c +++ b/arch/powerpc/kernel/perf_counter.c @@ -417,6 +417,8 @@ void hw_perf_restore(u64 disable) atomic64_set(&counter->hw.prev_count, val); counter->hw.idx = hwc_index[i] + 1; write_pmc(counter->hw.idx, val); + if (counter->user_page) + perf_counter_update_userpage(counter); } mb(); cpuhw->mmcr[0] |= MMCR0_PMXE | MMCR0_FCECE; @@ -572,6 +574,8 @@ static void power_perf_disable(struct perf_counter *counter) ppmu->disable_pmc(counter->hw.idx - 1, cpuhw->mmcr); write_pmc(counter->hw.idx, 0); counter->hw.idx = 0; + if (counter->user_page) + perf_counter_update_userpage(counter); break; } } @@ -698,6 +702,8 @@ static void record_and_restart(struct perf_counter *counter, long val, write_pmc(counter->hw.idx, val); atomic64_set(&counter->hw.prev_count, val); atomic64_set(&counter->hw.period_left, left); + if (counter->user_page) + perf_counter_update_userpage(counter); /* * Finally record data if requested. diff --git a/include/linux/perf_counter.h b/include/linux/perf_counter.h index 18dc17d0a61..40b324e91bf 100644 --- a/include/linux/perf_counter.h +++ b/include/linux/perf_counter.h @@ -143,6 +143,17 @@ struct perf_counter_hw_event { #define PERF_COUNTER_IOC_ENABLE _IO('$', 0) #define PERF_COUNTER_IOC_DISABLE _IO('$', 1) +/* + * Structure of the page that can be mapped via mmap + */ +struct perf_counter_mmap_page { + __u32 version; /* version number of this structure */ + __u32 compat_version; /* lowest version this is compat with */ + __u32 lock; /* seqlock for synchronization */ + __u32 index; /* hardware counter identifier */ + __s64 offset; /* add to hardware counter value */ +}; + #ifdef __KERNEL__ /* * Kernel-internal data types and definitions: @@ -278,6 +289,9 @@ struct perf_counter { int oncpu; int cpu; + /* pointer to page shared with userspace via mmap */ + unsigned long user_page; + /* read() / irq related data */ wait_queue_head_t waitq; /* optional: for NMIs */ @@ -361,6 +375,7 @@ extern int perf_counter_task_enable(void); extern int hw_perf_group_sched_in(struct perf_counter *group_leader, struct perf_cpu_context *cpuctx, struct perf_counter_context *ctx, int cpu); +extern void perf_counter_update_userpage(struct perf_counter *counter); extern void perf_counter_output(struct perf_counter *counter, int nmi, struct pt_regs *regs); diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c index ce34bff07bd..d9cfd902140 100644 --- a/kernel/perf_counter.c +++ b/kernel/perf_counter.c @@ -1177,6 +1177,7 @@ static int perf_release(struct inode *inode, struct file *file) mutex_unlock(&counter->mutex); mutex_unlock(&ctx->mutex); + free_page(counter->user_page); free_counter(counter); put_context(ctx); @@ -1346,12 +1347,87 @@ static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg) return err; } +void perf_counter_update_userpage(struct perf_counter *counter) +{ + struct perf_counter_mmap_page *userpg; + + if (!counter->user_page) + return; + userpg = (struct perf_counter_mmap_page *) counter->user_page; + + ++userpg->lock; + smp_wmb(); + userpg->index = counter->hw.idx; + userpg->offset = atomic64_read(&counter->count); + if (counter->state == PERF_COUNTER_STATE_ACTIVE) + userpg->offset -= atomic64_read(&counter->hw.prev_count); + smp_wmb(); + ++userpg->lock; +} + +static int perf_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf) +{ + struct perf_counter *counter = vma->vm_file->private_data; + + if (!counter->user_page) + return VM_FAULT_SIGBUS; + + vmf->page = virt_to_page(counter->user_page); + get_page(vmf->page); + return 0; +} + +static struct vm_operations_struct perf_mmap_vmops = { + .fault = perf_mmap_fault, +}; + +static int perf_mmap(struct file *file, struct vm_area_struct *vma) +{ + struct perf_counter *counter = file->private_data; + unsigned long userpg; + + if (!(vma->vm_flags & VM_SHARED) || (vma->vm_flags & VM_WRITE)) + return -EINVAL; + if (vma->vm_end - vma->vm_start != PAGE_SIZE) + return -EINVAL; + + /* + * For now, restrict to the case of a hardware counter + * on the current task. + */ + if (is_software_counter(counter) || counter->task != current) + return -EINVAL; + + userpg = counter->user_page; + if (!userpg) { + userpg = get_zeroed_page(GFP_KERNEL); + mutex_lock(&counter->mutex); + if (counter->user_page) { + free_page(userpg); + userpg = counter->user_page; + } else { + counter->user_page = userpg; + } + mutex_unlock(&counter->mutex); + if (!userpg) + return -ENOMEM; + } + + perf_counter_update_userpage(counter); + + vma->vm_flags &= ~VM_MAYWRITE; + vma->vm_flags |= VM_RESERVED; + vma->vm_ops = &perf_mmap_vmops; + return 0; +} + static const struct file_operations perf_fops = { .release = perf_release, .read = perf_read, .poll = perf_poll, .unlocked_ioctl = perf_ioctl, .compat_ioctl = perf_ioctl, + .mmap = perf_mmap, }; /* -- cgit v1.2.3 From 9ab772cd535c4b256a577eae516f9c7462346b2d Mon Sep 17 00:00:00 2001 From: Eric Paris Date: Mon, 23 Mar 2009 18:22:09 +0100 Subject: mutex: add atomic_dec_and_mutex_lock() Much like the atomic_dec_and_lock() function in which we take an hold a spin_lock if we drop the atomic to 0 this function takes and holds the mutex if we dec the atomic to 0. Signed-off-by: Eric Paris Signed-off-by: Peter Zijlstra Cc: Paul Mackerras Orig-LKML-Reference: <20090323172417.410913479@chello.nl> Signed-off-by: Ingo Molnar --- include/linux/mutex.h | 23 +++++++++++++++++++++++ 1 file changed, 23 insertions(+) diff --git a/include/linux/mutex.h b/include/linux/mutex.h index 3069ec7e0ab..93054fc3635 100644 --- a/include/linux/mutex.h +++ b/include/linux/mutex.h @@ -151,4 +151,27 @@ extern int __must_check mutex_lock_killable(struct mutex *lock); extern int mutex_trylock(struct mutex *lock); extern void mutex_unlock(struct mutex *lock); +/** + * atomic_dec_and_mutex_lock - return holding mutex if we dec to 0 + * @cnt: the atomic which we are to dec + * @lock: the mutex to return holding if we dec to 0 + * + * return true and hold lock if we dec to 0, return false otherwise + */ +static inline int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock) +{ + /* dec if we can't possibly hit 0 */ + if (atomic_add_unless(cnt, -1, 1)) + return 0; + /* we might hit 0, so take the lock */ + mutex_lock(lock); + if (!atomic_dec_and_test(cnt)) { + /* when we actually did the dec, we didn't hit 0 */ + mutex_unlock(lock); + return 0; + } + /* we hit 0, and we hold the lock */ + return 1; +} + #endif -- cgit v1.2.3 From b09d2501ed3d294619cbfbcf828ad39324d0e548 Mon Sep 17 00:00:00 2001 From: "H. Peter Anvin" Date: Wed, 1 Apr 2009 17:21:56 -0700 Subject: mutex: drop "inline" from mutex_lock() inside kernel/mutex.c Impact: build fix mutex_lock() is was defined inline in kernel/mutex.c, but wasn't declared so not in . This didn't cause a problem until checkin 3a2d367d9aabac486ac4444c6c7ec7a1dab16267 added the atomic_dec_and_mutex_lock() inline in between declaration and definion. This broke building with CONFIG_ALLOW_WARNINGS=n, e.g. make allnoconfig. Either from the source code nor the allnoconfig binary output I cannot find any internal references to mutex_lock() in kernel/mutex.c, so presumably this "inline" is now-useless legacy. Cc: Eric Paris Cc: Peter Zijlstra Cc: Paul Mackerras Orig-LKML-Reference: Signed-off-by: H. Peter Anvin --- kernel/mutex.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/kernel/mutex.c b/kernel/mutex.c index 5d79781394a..fd95eaa672e 100644 --- a/kernel/mutex.c +++ b/kernel/mutex.c @@ -89,7 +89,7 @@ __mutex_lock_slowpath(atomic_t *lock_count); * * This function is similar to (but not equivalent to) down(). */ -void inline __sched mutex_lock(struct mutex *lock) +void __sched mutex_lock(struct mutex *lock) { might_sleep(); /* -- cgit v1.2.3 From 7b732a75047738e4f85438ed2f9cd34bf5f2a19a Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Mon, 23 Mar 2009 18:22:10 +0100 Subject: perf_counter: new output ABI - part 1 Impact: Rework the perfcounter output ABI use sys_read() only for instant data and provide mmap() output for all async overflow data. The first mmap() determines the size of the output buffer. The mmap() size must be a PAGE_SIZE multiple of 1+pages, where pages must be a power of 2 or 0. Further mmap()s of the same fd must have the same size. Once all maps are gone, you can again mmap() with a new size. In case of 0 extra pages there is no data output and the first page only contains meta data. When there are data pages, a poll() event will be generated for each full page of data. Furthermore, the output is circular. This means that although 1 page is a valid configuration, its useless, since we'll start overwriting it the instant we report a full page. Future work will focus on the output format (currently maintained) where we'll likey want each entry denoted by a header which includes a type and length. Further future work will allow to splice() the fd, also containing the async overflow data -- splice() would be mutually exclusive with mmap() of the data. Signed-off-by: Peter Zijlstra Cc: Paul Mackerras Orig-LKML-Reference: <20090323172417.470536358@chello.nl> Signed-off-by: Ingo Molnar --- arch/powerpc/kernel/perf_counter.c | 9 +- include/linux/perf_counter.h | 36 ++- kernel/perf_counter.c | 464 ++++++++++++++++++++----------------- 3 files changed, 263 insertions(+), 246 deletions(-) diff --git a/arch/powerpc/kernel/perf_counter.c b/arch/powerpc/kernel/perf_counter.c index e4349281b07..d48596ab655 100644 --- a/arch/powerpc/kernel/perf_counter.c +++ b/arch/powerpc/kernel/perf_counter.c @@ -417,8 +417,7 @@ void hw_perf_restore(u64 disable) atomic64_set(&counter->hw.prev_count, val); counter->hw.idx = hwc_index[i] + 1; write_pmc(counter->hw.idx, val); - if (counter->user_page) - perf_counter_update_userpage(counter); + perf_counter_update_userpage(counter); } mb(); cpuhw->mmcr[0] |= MMCR0_PMXE | MMCR0_FCECE; @@ -574,8 +573,7 @@ static void power_perf_disable(struct perf_counter *counter) ppmu->disable_pmc(counter->hw.idx - 1, cpuhw->mmcr); write_pmc(counter->hw.idx, 0); counter->hw.idx = 0; - if (counter->user_page) - perf_counter_update_userpage(counter); + perf_counter_update_userpage(counter); break; } } @@ -702,8 +700,7 @@ static void record_and_restart(struct perf_counter *counter, long val, write_pmc(counter->hw.idx, val); atomic64_set(&counter->hw.prev_count, val); atomic64_set(&counter->hw.period_left, left); - if (counter->user_page) - perf_counter_update_userpage(counter); + perf_counter_update_userpage(counter); /* * Finally record data if requested. diff --git a/include/linux/perf_counter.h b/include/linux/perf_counter.h index 40b324e91bf..2b5e66d5ebd 100644 --- a/include/linux/perf_counter.h +++ b/include/linux/perf_counter.h @@ -152,6 +152,8 @@ struct perf_counter_mmap_page { __u32 lock; /* seqlock for synchronization */ __u32 index; /* hardware counter identifier */ __s64 offset; /* add to hardware counter value */ + + __u32 data_head; /* head in the data section */ }; #ifdef __KERNEL__ @@ -218,21 +220,6 @@ struct hw_perf_counter { #endif }; -/* - * Hardcoded buffer length limit for now, for IRQ-fed events: - */ -#define PERF_DATA_BUFLEN 2048 - -/** - * struct perf_data - performance counter IRQ data sampling ... - */ -struct perf_data { - int len; - int rd_idx; - int overrun; - u8 data[PERF_DATA_BUFLEN]; -}; - struct perf_counter; /** @@ -256,6 +243,14 @@ enum perf_counter_active_state { struct file; +struct perf_mmap_data { + struct rcu_head rcu_head; + int nr_pages; + atomic_t head; + struct perf_counter_mmap_page *user_page; + void *data_pages[0]; +}; + /** * struct perf_counter - performance counter kernel representation: */ @@ -289,16 +284,15 @@ struct perf_counter { int oncpu; int cpu; - /* pointer to page shared with userspace via mmap */ - unsigned long user_page; + /* mmap bits */ + struct mutex mmap_mutex; + atomic_t mmap_count; + struct perf_mmap_data *data; - /* read() / irq related data */ + /* poll related */ wait_queue_head_t waitq; /* optional: for NMIs */ int wakeup_pending; - struct perf_data *irqdata; - struct perf_data *usrdata; - struct perf_data data[2]; void (*destroy)(struct perf_counter *); struct rcu_head rcu_head; diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c index d9cfd902140..0dfe91094fd 100644 --- a/kernel/perf_counter.c +++ b/kernel/perf_counter.c @@ -4,7 +4,8 @@ * Copyright(C) 2008 Thomas Gleixner * Copyright(C) 2008 Red Hat, Inc., Ingo Molnar * - * For licencing details see kernel-base/COPYING + * + * For licensing details see kernel-base/COPYING */ #include @@ -1022,66 +1023,6 @@ static u64 perf_counter_read(struct perf_counter *counter) return atomic64_read(&counter->count); } -/* - * Cross CPU call to switch performance data pointers - */ -static void __perf_switch_irq_data(void *info) -{ - struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context); - struct perf_counter *counter = info; - struct perf_counter_context *ctx = counter->ctx; - struct perf_data *oldirqdata = counter->irqdata; - - /* - * If this is a task context, we need to check whether it is - * the current task context of this cpu. If not it has been - * scheduled out before the smp call arrived. - */ - if (ctx->task) { - if (cpuctx->task_ctx != ctx) - return; - spin_lock(&ctx->lock); - } - - /* Change the pointer NMI safe */ - atomic_long_set((atomic_long_t *)&counter->irqdata, - (unsigned long) counter->usrdata); - counter->usrdata = oldirqdata; - - if (ctx->task) - spin_unlock(&ctx->lock); -} - -static struct perf_data *perf_switch_irq_data(struct perf_counter *counter) -{ - struct perf_counter_context *ctx = counter->ctx; - struct perf_data *oldirqdata = counter->irqdata; - struct task_struct *task = ctx->task; - - if (!task) { - smp_call_function_single(counter->cpu, - __perf_switch_irq_data, - counter, 1); - return counter->usrdata; - } - -retry: - spin_lock_irq(&ctx->lock); - if (counter->state != PERF_COUNTER_STATE_ACTIVE) { - counter->irqdata = counter->usrdata; - counter->usrdata = oldirqdata; - spin_unlock_irq(&ctx->lock); - return oldirqdata; - } - spin_unlock_irq(&ctx->lock); - task_oncpu_function_call(task, __perf_switch_irq_data, counter); - /* Might have failed, because task was scheduled out */ - if (counter->irqdata == oldirqdata) - goto retry; - - return counter->usrdata; -} - static void put_context(struct perf_counter_context *ctx) { if (ctx->task) @@ -1177,7 +1118,6 @@ static int perf_release(struct inode *inode, struct file *file) mutex_unlock(&counter->mutex); mutex_unlock(&ctx->mutex); - free_page(counter->user_page); free_counter(counter); put_context(ctx); @@ -1192,7 +1132,7 @@ perf_read_hw(struct perf_counter *counter, char __user *buf, size_t count) { u64 cntval; - if (count != sizeof(cntval)) + if (count < sizeof(cntval)) return -EINVAL; /* @@ -1210,122 +1150,21 @@ perf_read_hw(struct perf_counter *counter, char __user *buf, size_t count) return put_user(cntval, (u64 __user *) buf) ? -EFAULT : sizeof(cntval); } -static ssize_t -perf_copy_usrdata(struct perf_data *usrdata, char __user *buf, size_t count) -{ - if (!usrdata->len) - return 0; - - count = min(count, (size_t)usrdata->len); - if (copy_to_user(buf, usrdata->data + usrdata->rd_idx, count)) - return -EFAULT; - - /* Adjust the counters */ - usrdata->len -= count; - if (!usrdata->len) - usrdata->rd_idx = 0; - else - usrdata->rd_idx += count; - - return count; -} - -static ssize_t -perf_read_irq_data(struct perf_counter *counter, - char __user *buf, - size_t count, - int nonblocking) -{ - struct perf_data *irqdata, *usrdata; - DECLARE_WAITQUEUE(wait, current); - ssize_t res, res2; - - irqdata = counter->irqdata; - usrdata = counter->usrdata; - - if (usrdata->len + irqdata->len >= count) - goto read_pending; - - if (nonblocking) - return -EAGAIN; - - spin_lock_irq(&counter->waitq.lock); - __add_wait_queue(&counter->waitq, &wait); - for (;;) { - set_current_state(TASK_INTERRUPTIBLE); - if (usrdata->len + irqdata->len >= count) - break; - - if (signal_pending(current)) - break; - - if (counter->state == PERF_COUNTER_STATE_ERROR) - break; - - spin_unlock_irq(&counter->waitq.lock); - schedule(); - spin_lock_irq(&counter->waitq.lock); - } - __remove_wait_queue(&counter->waitq, &wait); - __set_current_state(TASK_RUNNING); - spin_unlock_irq(&counter->waitq.lock); - - if (usrdata->len + irqdata->len < count && - counter->state != PERF_COUNTER_STATE_ERROR) - return -ERESTARTSYS; -read_pending: - mutex_lock(&counter->mutex); - - /* Drain pending data first: */ - res = perf_copy_usrdata(usrdata, buf, count); - if (res < 0 || res == count) - goto out; - - /* Switch irq buffer: */ - usrdata = perf_switch_irq_data(counter); - res2 = perf_copy_usrdata(usrdata, buf + res, count - res); - if (res2 < 0) { - if (!res) - res = -EFAULT; - } else { - res += res2; - } -out: - mutex_unlock(&counter->mutex); - - return res; -} - static ssize_t perf_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) { struct perf_counter *counter = file->private_data; - switch (counter->hw_event.record_type) { - case PERF_RECORD_SIMPLE: - return perf_read_hw(counter, buf, count); - - case PERF_RECORD_IRQ: - case PERF_RECORD_GROUP: - return perf_read_irq_data(counter, buf, count, - file->f_flags & O_NONBLOCK); - } - return -EINVAL; + return perf_read_hw(counter, buf, count); } static unsigned int perf_poll(struct file *file, poll_table *wait) { struct perf_counter *counter = file->private_data; - unsigned int events = 0; - unsigned long flags; + unsigned int events = POLLIN; poll_wait(file, &counter->waitq, wait); - spin_lock_irqsave(&counter->waitq.lock, flags); - if (counter->usrdata->len || counter->irqdata->len) - events |= POLLIN; - spin_unlock_irqrestore(&counter->waitq.lock, flags); - return events; } @@ -1347,78 +1186,207 @@ static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg) return err; } -void perf_counter_update_userpage(struct perf_counter *counter) +static void __perf_counter_update_userpage(struct perf_counter *counter, + struct perf_mmap_data *data) { - struct perf_counter_mmap_page *userpg; - - if (!counter->user_page) - return; - userpg = (struct perf_counter_mmap_page *) counter->user_page; + struct perf_counter_mmap_page *userpg = data->user_page; + /* + * Disable preemption so as to not let the corresponding user-space + * spin too long if we get preempted. + */ + preempt_disable(); ++userpg->lock; smp_wmb(); userpg->index = counter->hw.idx; userpg->offset = atomic64_read(&counter->count); if (counter->state == PERF_COUNTER_STATE_ACTIVE) userpg->offset -= atomic64_read(&counter->hw.prev_count); + + userpg->data_head = atomic_read(&data->head); smp_wmb(); ++userpg->lock; + preempt_enable(); +} + +void perf_counter_update_userpage(struct perf_counter *counter) +{ + struct perf_mmap_data *data; + + rcu_read_lock(); + data = rcu_dereference(counter->data); + if (data) + __perf_counter_update_userpage(counter, data); + rcu_read_unlock(); } static int perf_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf) { struct perf_counter *counter = vma->vm_file->private_data; + struct perf_mmap_data *data; + int ret = VM_FAULT_SIGBUS; - if (!counter->user_page) - return VM_FAULT_SIGBUS; + rcu_read_lock(); + data = rcu_dereference(counter->data); + if (!data) + goto unlock; + + if (vmf->pgoff == 0) { + vmf->page = virt_to_page(data->user_page); + } else { + int nr = vmf->pgoff - 1; - vmf->page = virt_to_page(counter->user_page); + if ((unsigned)nr > data->nr_pages) + goto unlock; + + vmf->page = virt_to_page(data->data_pages[nr]); + } get_page(vmf->page); + ret = 0; +unlock: + rcu_read_unlock(); + + return ret; +} + +static int perf_mmap_data_alloc(struct perf_counter *counter, int nr_pages) +{ + struct perf_mmap_data *data; + unsigned long size; + int i; + + WARN_ON(atomic_read(&counter->mmap_count)); + + size = sizeof(struct perf_mmap_data); + size += nr_pages * sizeof(void *); + + data = kzalloc(size, GFP_KERNEL); + if (!data) + goto fail; + + data->user_page = (void *)get_zeroed_page(GFP_KERNEL); + if (!data->user_page) + goto fail_user_page; + + for (i = 0; i < nr_pages; i++) { + data->data_pages[i] = (void *)get_zeroed_page(GFP_KERNEL); + if (!data->data_pages[i]) + goto fail_data_pages; + } + + data->nr_pages = nr_pages; + + rcu_assign_pointer(counter->data, data); + return 0; + +fail_data_pages: + for (i--; i >= 0; i--) + free_page((unsigned long)data->data_pages[i]); + + free_page((unsigned long)data->user_page); + +fail_user_page: + kfree(data); + +fail: + return -ENOMEM; +} + +static void __perf_mmap_data_free(struct rcu_head *rcu_head) +{ + struct perf_mmap_data *data = container_of(rcu_head, + struct perf_mmap_data, rcu_head); + int i; + + free_page((unsigned long)data->user_page); + for (i = 0; i < data->nr_pages; i++) + free_page((unsigned long)data->data_pages[i]); + kfree(data); +} + +static void perf_mmap_data_free(struct perf_counter *counter) +{ + struct perf_mmap_data *data = counter->data; + + WARN_ON(atomic_read(&counter->mmap_count)); + + rcu_assign_pointer(counter->data, NULL); + call_rcu(&data->rcu_head, __perf_mmap_data_free); +} + +static void perf_mmap_open(struct vm_area_struct *vma) +{ + struct perf_counter *counter = vma->vm_file->private_data; + + atomic_inc(&counter->mmap_count); +} + +static void perf_mmap_close(struct vm_area_struct *vma) +{ + struct perf_counter *counter = vma->vm_file->private_data; + + if (atomic_dec_and_mutex_lock(&counter->mmap_count, + &counter->mmap_mutex)) { + perf_mmap_data_free(counter); + mutex_unlock(&counter->mmap_mutex); + } } static struct vm_operations_struct perf_mmap_vmops = { + .open = perf_mmap_open, + .close = perf_mmap_close, .fault = perf_mmap_fault, }; static int perf_mmap(struct file *file, struct vm_area_struct *vma) { struct perf_counter *counter = file->private_data; - unsigned long userpg; + unsigned long vma_size; + unsigned long nr_pages; + unsigned long locked, lock_limit; + int ret = 0; if (!(vma->vm_flags & VM_SHARED) || (vma->vm_flags & VM_WRITE)) return -EINVAL; - if (vma->vm_end - vma->vm_start != PAGE_SIZE) + + vma_size = vma->vm_end - vma->vm_start; + nr_pages = (vma_size / PAGE_SIZE) - 1; + + if (nr_pages == 0 || !is_power_of_2(nr_pages)) return -EINVAL; - /* - * For now, restrict to the case of a hardware counter - * on the current task. - */ - if (is_software_counter(counter) || counter->task != current) + if (vma_size != PAGE_SIZE * (1 + nr_pages)) return -EINVAL; - userpg = counter->user_page; - if (!userpg) { - userpg = get_zeroed_page(GFP_KERNEL); - mutex_lock(&counter->mutex); - if (counter->user_page) { - free_page(userpg); - userpg = counter->user_page; - } else { - counter->user_page = userpg; - } - mutex_unlock(&counter->mutex); - if (!userpg) - return -ENOMEM; - } + if (vma->vm_pgoff != 0) + return -EINVAL; + + locked = vma_size >> PAGE_SHIFT; + locked += vma->vm_mm->locked_vm; - perf_counter_update_userpage(counter); + lock_limit = current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur; + lock_limit >>= PAGE_SHIFT; + + if ((locked > lock_limit) && !capable(CAP_IPC_LOCK)) + return -EPERM; + + mutex_lock(&counter->mmap_mutex); + if (atomic_inc_not_zero(&counter->mmap_count)) + goto out; + + WARN_ON(counter->data); + ret = perf_mmap_data_alloc(counter, nr_pages); + if (!ret) + atomic_set(&counter->mmap_count, 1); +out: + mutex_unlock(&counter->mmap_mutex); vma->vm_flags &= ~VM_MAYWRITE; vma->vm_flags |= VM_RESERVED; vma->vm_ops = &perf_mmap_vmops; - return 0; + + return ret; } static const struct file_operations perf_fops = { @@ -1434,30 +1402,94 @@ static const struct file_operations perf_fops = { * Output */ -static void perf_counter_store_irq(struct perf_counter *counter, u64 data) +static int perf_output_write(struct perf_counter *counter, int nmi, + void *buf, ssize_t size) { - struct perf_data *irqdata = counter->irqdata; + struct perf_mmap_data *data; + unsigned int offset, head, nr; + unsigned int len; + int ret, wakeup; - if (irqdata->len > PERF_DATA_BUFLEN - sizeof(u64)) { - irqdata->overrun++; - } else { - u64 *p = (u64 *) &irqdata->data[irqdata->len]; + rcu_read_lock(); + ret = -ENOSPC; + data = rcu_dereference(counter->data); + if (!data) + goto out; + + if (!data->nr_pages) + goto out; + + ret = -EINVAL; + if (size > PAGE_SIZE) + goto out; + + do { + offset = head = atomic_read(&data->head); + head += sizeof(u64); + } while (atomic_cmpxchg(&data->head, offset, head) != offset); + + wakeup = (offset >> PAGE_SHIFT) != (head >> PAGE_SHIFT); - *p = data; - irqdata->len += sizeof(u64); + nr = (offset >> PAGE_SHIFT) & (data->nr_pages - 1); + offset &= PAGE_SIZE - 1; + + len = min_t(unsigned int, PAGE_SIZE - offset, size); + memcpy(data->data_pages[nr] + offset, buf, len); + size -= len; + + if (size) { + nr = (nr + 1) & (data->nr_pages - 1); + memcpy(data->data_pages[nr], buf + len, size); + } + + /* + * generate a poll() wakeup for every page boundary crossed + */ + if (wakeup) { + __perf_counter_update_userpage(counter, data); + if (nmi) { + counter->wakeup_pending = 1; + set_perf_counter_pending(); + } else + wake_up(&counter->waitq); } + ret = 0; +out: + rcu_read_unlock(); + + return ret; } -static void perf_counter_handle_group(struct perf_counter *counter) +static void perf_output_simple(struct perf_counter *counter, + int nmi, struct pt_regs *regs) +{ + u64 entry; + + entry = instruction_pointer(regs); + + perf_output_write(counter, nmi, &entry, sizeof(entry)); +} + +struct group_entry { + u64 event; + u64 counter; +}; + +static void perf_output_group(struct perf_counter *counter, int nmi) { struct perf_counter *leader, *sub; leader = counter->group_leader; list_for_each_entry(sub, &leader->sibling_list, list_entry) { + struct group_entry entry; + if (sub != counter) sub->hw_ops->read(sub); - perf_counter_store_irq(counter, sub->hw_event.config); - perf_counter_store_irq(counter, atomic64_read(&sub->count)); + + entry.event = sub->hw_event.config; + entry.counter = atomic64_read(&sub->count); + + perf_output_write(counter, nmi, &entry, sizeof(entry)); } } @@ -1469,19 +1501,13 @@ void perf_counter_output(struct perf_counter *counter, return; case PERF_RECORD_IRQ: - perf_counter_store_irq(counter, instruction_pointer(regs)); + perf_output_simple(counter, nmi, regs); break; case PERF_RECORD_GROUP: - perf_counter_handle_group(counter); + perf_output_group(counter, nmi); break; } - - if (nmi) { - counter->wakeup_pending = 1; - set_perf_counter_pending(); - } else - wake_up(&counter->waitq); } /* @@ -1967,10 +1993,10 @@ perf_counter_alloc(struct perf_counter_hw_event *hw_event, INIT_LIST_HEAD(&counter->sibling_list); init_waitqueue_head(&counter->waitq); + mutex_init(&counter->mmap_mutex); + INIT_LIST_HEAD(&counter->child_list); - counter->irqdata = &counter->data[0]; - counter->usrdata = &counter->data[1]; counter->cpu = cpu; counter->hw_event = *hw_event; counter->wakeup_pending = 0; -- cgit v1.2.3 From 803d4f3980f6e220b27311a283aab0a4d68b6709 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Mon, 23 Mar 2009 18:22:11 +0100 Subject: perf_counter tools: update to new syscall ABI update the kerneltop userspace to work with the latest syscall ABI Signed-off-by: Peter Zijlstra Cc: Wu Fengguang Cc: Paul Mackerras Orig-LKML-Reference: <20090323172417.559643732@chello.nl> Signed-off-by: Ingo Molnar --- Documentation/perf_counter/kerneltop.c | 235 ++++++++++++++++++++++----------- 1 file changed, 157 insertions(+), 78 deletions(-) diff --git a/Documentation/perf_counter/kerneltop.c b/Documentation/perf_counter/kerneltop.c index 81a68aac137..a72c9bd2807 100644 --- a/Documentation/perf_counter/kerneltop.c +++ b/Documentation/perf_counter/kerneltop.c @@ -87,20 +87,90 @@ #include -#include "perfcounters.h" +#include "include/linux/perf_counter.h" +/* + * prctl(PR_TASK_PERF_COUNTERS_DISABLE) will (cheaply) disable all + * counters in the current task. + */ +#define PR_TASK_PERF_COUNTERS_DISABLE 31 +#define PR_TASK_PERF_COUNTERS_ENABLE 32 + +#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0])) + +#define rdclock() \ +({ \ + struct timespec ts; \ + \ + clock_gettime(CLOCK_MONOTONIC, &ts); \ + ts.tv_sec * 1000000000ULL + ts.tv_nsec; \ +}) + +/* + * Pick up some kernel type conventions: + */ +#define __user +#define asmlinkage + +typedef unsigned int __u32; +typedef unsigned long long __u64; +typedef long long __s64; + + +#ifdef __x86_64__ +# define __NR_perf_counter_open 295 +#endif + +#ifdef __i386__ +# define __NR_perf_counter_open 333 +#endif + +#ifdef __powerpc__ +#define __NR_perf_counter_open 319 +#endif + +asmlinkage int sys_perf_counter_open( + struct perf_counter_hw_event *hw_event_uptr __user, + pid_t pid, + int cpu, + int group_fd, + unsigned long flags) +{ + int ret; + + ret = syscall( + __NR_perf_counter_open, hw_event_uptr, pid, cpu, group_fd, flags); +#if defined(__x86_64__) || defined(__i386__) + if (ret < 0 && ret > -4096) { + errno = -ret; + ret = -1; + } +#endif + return ret; +} + #define MAX_COUNTERS 64 #define MAX_NR_CPUS 256 -#define DEF_PERFSTAT_EVENTS { -2, -5, -4, -3, 0, 1, 2, 3} +#define EID(type, id) (((__u64)(type) << PERF_COUNTER_TYPE_SHIFT) | (id)) static int run_perfstat = 0; static int system_wide = 0; static int nr_counters = 0; -static __s64 event_id[MAX_COUNTERS] = DEF_PERFSTAT_EVENTS; -static int event_raw[MAX_COUNTERS]; +static __u64 event_id[MAX_COUNTERS] = { + EID(PERF_TYPE_SOFTWARE, PERF_COUNT_TASK_CLOCK), + EID(PERF_TYPE_SOFTWARE, PERF_COUNT_CPU_MIGRATIONS), + EID(PERF_TYPE_SOFTWARE, PERF_COUNT_CPU_MIGRATIONS), + EID(PERF_TYPE_SOFTWARE, PERF_COUNT_PAGE_FAULTS), + + EID(PERF_TYPE_HARDWARE, PERF_COUNT_CPU_CYCLES), + EID(PERF_TYPE_HARDWARE, PERF_COUNT_INSTRUCTIONS), + EID(PERF_TYPE_HARDWARE, PERF_COUNT_CACHE_REFERENCES), + EID(PERF_TYPE_HARDWARE, PERF_COUNT_CACHE_MISSES), +}; +static int default_interval = 100000; static int event_count[MAX_COUNTERS]; static int fd[MAX_NR_CPUS][MAX_COUNTERS]; @@ -156,49 +226,63 @@ static char *sw_event_names[] = { "pagefaults", "context switches", "CPU migrations", + "minor faults", + "major faults", }; struct event_symbol { - int event; + __u64 event; char *symbol; }; static struct event_symbol event_symbols[] = { - {PERF_COUNT_CPU_CYCLES, "cpu-cycles", }, - {PERF_COUNT_CPU_CYCLES, "cycles", }, - {PERF_COUNT_INSTRUCTIONS, "instructions", }, - {PERF_COUNT_CACHE_REFERENCES, "cache-references", }, - {PERF_COUNT_CACHE_MISSES, "cache-misses", }, - {PERF_COUNT_BRANCH_INSTRUCTIONS, "branch-instructions", }, - {PERF_COUNT_BRANCH_INSTRUCTIONS, "branches", }, - {PERF_COUNT_BRANCH_MISSES, "branch-misses", }, - {PERF_COUNT_BUS_CYCLES, "bus-cycles", }, - {PERF_COUNT_CPU_CLOCK, "cpu-ticks", }, - {PERF_COUNT_CPU_CLOCK, "ticks", }, - {PERF_COUNT_TASK_CLOCK, "task-ticks", }, - {PERF_COUNT_PAGE_FAULTS, "page-faults", }, - {PERF_COUNT_PAGE_FAULTS, "faults", }, - {PERF_COUNT_CONTEXT_SWITCHES, "context-switches", }, - {PERF_COUNT_CONTEXT_SWITCHES, "cs", }, - {PERF_COUNT_CPU_MIGRATIONS, "cpu-migrations", }, - {PERF_COUNT_CPU_MIGRATIONS, "migrations", }, + {EID(PERF_TYPE_HARDWARE, PERF_COUNT_CPU_CYCLES), "cpu-cycles", }, + {EID(PERF_TYPE_HARDWARE, PERF_COUNT_CPU_CYCLES), "cycles", }, + {EID(PERF_TYPE_HARDWARE, PERF_COUNT_INSTRUCTIONS), "instructions", }, + {EID(PERF_TYPE_HARDWARE, PERF_COUNT_CACHE_REFERENCES), "cache-references", }, + {EID(PERF_TYPE_HARDWARE, PERF_COUNT_CACHE_MISSES), "cache-misses", }, + {EID(PERF_TYPE_HARDWARE, PERF_COUNT_BRANCH_INSTRUCTIONS), "branch-instructions", }, + {EID(PERF_TYPE_HARDWARE, PERF_COUNT_BRANCH_INSTRUCTIONS), "branches", }, + {EID(PERF_TYPE_HARDWARE, PERF_COUNT_BRANCH_MISSES), "branch-misses", }, + {EID(PERF_TYPE_HARDWARE, PERF_COUNT_BUS_CYCLES), "bus-cycles", }, + + {EID(PERF_TYPE_SOFTWARE, PERF_COUNT_CPU_CLOCK), "cpu-clock", }, + {EID(PERF_TYPE_SOFTWARE, PERF_COUNT_TASK_CLOCK), "task-clock", }, + {EID(PERF_TYPE_SOFTWARE, PERF_COUNT_PAGE_FAULTS), "page-faults", }, + {EID(PERF_TYPE_SOFTWARE, PERF_COUNT_PAGE_FAULTS), "faults", }, + {EID(PERF_TYPE_SOFTWARE, PERF_COUNT_PAGE_FAULTS_MIN), "minor-faults", }, + {EID(PERF_TYPE_SOFTWARE, PERF_COUNT_PAGE_FAULTS_MAJ), "major-faults", }, + {EID(PERF_TYPE_SOFTWARE, PERF_COUNT_CONTEXT_SWITCHES), "context-switches", }, + {EID(PERF_TYPE_SOFTWARE, PERF_COUNT_CONTEXT_SWITCHES), "cs", }, + {EID(PERF_TYPE_SOFTWARE, PERF_COUNT_CPU_MIGRATIONS), "cpu-migrations", }, + {EID(PERF_TYPE_SOFTWARE, PERF_COUNT_CPU_MIGRATIONS), "migrations", }, }; +#define __PERF_COUNTER_FIELD(config, name) \ + ((config & PERF_COUNTER_##name##_MASK) >> PERF_COUNTER_##name##_SHIFT) + +#define PERF_COUNTER_RAW(config) __PERF_COUNTER_FIELD(config, RAW) +#define PERF_COUNTER_CONFIG(config) __PERF_COUNTER_FIELD(config, CONFIG) +#define PERF_COUNTER_TYPE(config) __PERF_COUNTER_FIELD(config, TYPE) +#define PERF_COUNTER_ID(config) __PERF_COUNTER_FIELD(config, EVENT) + static void display_events_help(void) { unsigned int i; - int e; + __u64 e; printf( " -e EVENT --event=EVENT # symbolic-name abbreviations"); - for (i = 0, e = PERF_HW_EVENTS_MAX; i < ARRAY_SIZE(event_symbols); i++) { - if (e != event_symbols[i].event) { - e = event_symbols[i].event; - printf( - "\n %2d: %-20s", e, event_symbols[i].symbol); - } else - printf(" %s", event_symbols[i].symbol); + for (i = 0; i < ARRAY_SIZE(event_symbols); i++) { + int type, id; + + e = event_symbols[i].event; + type = PERF_COUNTER_TYPE(e); + id = PERF_COUNTER_ID(e); + + printf("\n %d:%d: %-20s", + type, id, event_symbols[i].symbol); } printf("\n" @@ -249,44 +333,51 @@ static void display_help(void) exit(0); } -static int type_valid(int type) -{ - if (type >= PERF_HW_EVENTS_MAX) - return 0; - if (type <= PERF_SW_EVENTS_MIN) - return 0; - - return 1; -} - static char *event_name(int ctr) { - __s64 type = event_id[ctr]; + __u64 config = event_id[ctr]; + int type = PERF_COUNTER_TYPE(config); + int id = PERF_COUNTER_ID(config); static char buf[32]; - if (event_raw[ctr]) { - sprintf(buf, "raw 0x%llx", (long long)type); + if (PERF_COUNTER_RAW(config)) { + sprintf(buf, "raw 0x%llx", PERF_COUNTER_CONFIG(config)); return buf; } - if (!type_valid(type)) - return "unknown"; - if (type >= 0) - return hw_event_names[type]; + switch (type) { + case PERF_TYPE_HARDWARE: + if (id < PERF_HW_EVENTS_MAX) + return hw_event_names[id]; + return "unknown-hardware"; + + case PERF_TYPE_SOFTWARE: + if (id < PERF_SW_EVENTS_MAX) + return sw_event_names[id]; + return "unknown-software"; - return sw_event_names[-type-1]; + default: + break; + } + + return "unknown"; } /* * Each event can have multiple symbolic names. * Symbolic names are (almost) exactly matched. */ -static int match_event_symbols(char *str) +static __u64 match_event_symbols(char *str) { + __u64 config, id; + int type; unsigned int i; - if (isdigit(str[0]) || str[0] == '-') - return atoi(str); + if (sscanf(str, "r%llx", &config) == 1) + return config | PERF_COUNTER_RAW_MASK; + + if (sscanf(str, "%d:%llu", &type, &id) == 2) + return EID(type, id); for (i = 0; i < ARRAY_SIZE(event_symbols); i++) { if (!strncmp(str, event_symbols[i].symbol, @@ -294,31 +385,22 @@ static int match_event_symbols(char *str) return event_symbols[i].event; } - return PERF_HW_EVENTS_MAX; + return ~0ULL; } static int parse_events(char *str) { - __s64 type; - int raw; + __u64 config; again: if (nr_counters == MAX_COUNTERS) return -1; - raw = 0; - if (*str == 'r') { - raw = 1; - ++str; - type = strtol(str, NULL, 16); - } else { - type = match_event_symbols(str); - if (!type_valid(type)) - return -1; - } + config = match_event_symbols(str); + if (config == ~0ULL) + return -1; - event_id[nr_counters] = type; - event_raw[nr_counters] = raw; + event_id[nr_counters] = config; nr_counters++; str = strstr(str, ","); @@ -342,8 +424,7 @@ static void create_perfstat_counter(int counter) struct perf_counter_hw_event hw_event; memset(&hw_event, 0, sizeof(hw_event)); - hw_event.type = event_id[counter]; - hw_event.raw = event_raw[counter]; + hw_event.config = event_id[counter]; hw_event.record_type = PERF_RECORD_SIMPLE; hw_event.nmi = 0; @@ -428,7 +509,7 @@ int do_perfstat(int argc, char *argv[]) count += single_count; } - if (!event_raw[counter] && + if (!PERF_COUNTER_RAW(event_id[counter]) && (event_id[counter] == PERF_COUNT_CPU_CLOCK || event_id[counter] == PERF_COUNT_TASK_CLOCK)) { @@ -911,7 +992,7 @@ static void record_ip(uint64_t ip, int counter) assert(left <= middle && middle <= right); if (!(left <= ip && ip <= right)) { printf(" left: %016lx\n", left); - printf(" ip: %016lx\n", ip); + printf(" ip: %016llx\n", ip); printf("right: %016lx\n", right); } assert(left <= ip && ip <= right); @@ -983,7 +1064,7 @@ static void process_options(int argc, char *argv[]) switch (c) { case 'a': system_wide = 1; break; - case 'c': event_count[nr_counters] = atoi(optarg); break; + case 'c': default_interval = atoi(optarg); break; case 'C': /* CPU and PID are mutually exclusive */ if (tid != -1) { @@ -1032,10 +1113,7 @@ static void process_options(int argc, char *argv[]) if (event_count[counter]) continue; - if (event_id[counter] < PERF_HW_EVENTS_MAX) - event_count[counter] = default_count[event_id[counter]]; - else - event_count[counter] = 100000; + event_count[counter] = default_interval; } } @@ -1070,12 +1148,13 @@ int main(int argc, char *argv[]) cpu = i; memset(&hw_event, 0, sizeof(hw_event)); - hw_event.type = event_id[counter]; - hw_event.raw = event_raw[counter]; + hw_event.config = event_id[counter]; hw_event.irq_period = event_count[counter]; hw_event.record_type = PERF_RECORD_IRQ; hw_event.nmi = nmi; + printf("FOO: %d %llx %llx\n", counter, event_id[counter], event_count[counter]); + fd[i][counter] = sys_perf_counter_open(&hw_event, tid, cpu, group_fd, 0); fcntl(fd[i][counter], F_SETFL, O_NONBLOCK); if (fd[i][counter] < 0) { -- cgit v1.2.3 From bcbcb37cdb67d8100acfa66df40c4d636c28c4d1 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Mon, 23 Mar 2009 18:22:12 +0100 Subject: perf_counter tools: use mmap() output update kerneltop to use the mmap() output to gather overflow information Signed-off-by: Peter Zijlstra Cc: Wu Fengguang Cc: Paul Mackerras Orig-LKML-Reference: <20090323172417.677932499@chello.nl> Signed-off-by: Ingo Molnar --- Documentation/perf_counter/kerneltop.c | 93 +++++++++++++++++++++++++++++----- 1 file changed, 79 insertions(+), 14 deletions(-) diff --git a/Documentation/perf_counter/kerneltop.c b/Documentation/perf_counter/kerneltop.c index a72c9bd2807..80b790553ec 100644 --- a/Documentation/perf_counter/kerneltop.c +++ b/Documentation/perf_counter/kerneltop.c @@ -84,6 +84,7 @@ #include #include #include +#include #include @@ -119,17 +120,25 @@ typedef long long __s64; #ifdef __x86_64__ -# define __NR_perf_counter_open 295 +#define __NR_perf_counter_open 295 +#define rmb() asm volatile("lfence" ::: "memory") +#define cpu_relax() asm volatile("rep; nop" ::: "memory"); #endif #ifdef __i386__ -# define __NR_perf_counter_open 333 +#define __NR_perf_counter_open 333 +#define rmb() asm volatile("lfence" ::: "memory") +#define cpu_relax() asm volatile("rep; nop" ::: "memory"); #endif #ifdef __powerpc__ #define __NR_perf_counter_open 319 +#define rmb() asm volatile ("sync" ::: "memory") +#define cpu_relax() asm volatile ("" ::: "memory"); #endif +#define unlikely(x) __builtin_expect(!!(x), 0) + asmlinkage int sys_perf_counter_open( struct perf_counter_hw_event *hw_event_uptr __user, pid_t pid, @@ -181,6 +190,7 @@ static int profile_cpu = -1; static int nr_cpus = 0; static int nmi = 1; static int group = 0; +static unsigned int page_size; static char *vmlinux; @@ -1117,16 +1127,68 @@ static void process_options(int argc, char *argv[]) } } +struct mmap_data { + int counter; + void *base; + unsigned int mask; + unsigned int prev; +}; + +static unsigned int mmap_read_head(struct mmap_data *md) +{ + struct perf_counter_mmap_page *pc = md->base; + unsigned int seq, head; + +repeat: + rmb(); + seq = pc->lock; + + if (unlikely(seq & 1)) { + cpu_relax(); + goto repeat; + } + + head = pc->data_head; + + rmb(); + if (pc->lock != seq) + goto repeat; + + return head; +} + +static void mmap_read(struct mmap_data *md) +{ + unsigned int head = mmap_read_head(md); + unsigned int old = md->prev; + unsigned char *data = md->base + page_size; + + if (head - old > md->mask) { + printf("ERROR: failed to keep up with mmap data\n"); + exit(-1); + } + + for (; old != head;) { + __u64 *ptr = (__u64 *)&data[old & md->mask]; + old += sizeof(__u64); + + process_event(*ptr, md->counter); + } + + md->prev = old; +} + int main(int argc, char *argv[]) { struct pollfd event_array[MAX_NR_CPUS][MAX_COUNTERS]; + struct mmap_data mmap_array[MAX_NR_CPUS][MAX_COUNTERS]; struct perf_counter_hw_event hw_event; int i, counter, group_fd; unsigned int cpu; - uint64_t ip; - ssize_t res; int ret; + page_size = sysconf(_SC_PAGE_SIZE); + process_options(argc, argv); nr_cpus = sysconf(_SC_NPROCESSORS_ONLN); @@ -1153,8 +1215,6 @@ int main(int argc, char *argv[]) hw_event.record_type = PERF_RECORD_IRQ; hw_event.nmi = nmi; - printf("FOO: %d %llx %llx\n", counter, event_id[counter], event_count[counter]); - fd[i][counter] = sys_perf_counter_open(&hw_event, tid, cpu, group_fd, 0); fcntl(fd[i][counter], F_SETFL, O_NONBLOCK); if (fd[i][counter] < 0) { @@ -1174,6 +1234,17 @@ int main(int argc, char *argv[]) event_array[i][counter].fd = fd[i][counter]; event_array[i][counter].events = POLLIN; + + mmap_array[i][counter].counter = counter; + mmap_array[i][counter].prev = 0; + mmap_array[i][counter].mask = 2*page_size - 1; + mmap_array[i][counter].base = mmap(NULL, 3*page_size, + PROT_READ, MAP_SHARED, fd[i][counter], 0); + if (mmap_array[i][counter].base == MAP_FAILED) { + printf("kerneltop error: failed to mmap with %d (%s)\n", + errno, strerror(errno)); + exit(-1); + } } } @@ -1188,14 +1259,8 @@ int main(int argc, char *argv[]) int hits = events; for (i = 0; i < nr_cpus; i++) { - for (counter = 0; counter < nr_counters; counter++) { - res = read(fd[i][counter], (char *) &ip, sizeof(ip)); - if (res > 0) { - assert(res == sizeof(ip)); - - process_event(ip, counter); - } - } + for (counter = 0; counter < nr_counters; counter++) + mmap_read(&mmap_array[i][counter]); } if (time(NULL) >= last_refresh + delay_secs) { -- cgit v1.2.3 From 383c5f8cd7d253be0017d8d5a39cbb78aaf70206 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Mon, 23 Mar 2009 21:49:25 +0100 Subject: perf_counter tools: tidy up in-kernel dependencies Remove now unified perfstat.c and perf_counter.h, and link to the in-kernel perf_counter.h. Cc: Wu Fengguang Cc: Paul Mackerras Acked-by: Peter Zijlstra Orig-LKML-Reference: <20090323172417.677932499@chello.nl> Signed-off-by: Ingo Molnar --- Documentation/perf_counter/Makefile | 2 +- Documentation/perf_counter/kerneltop.c | 2 +- Documentation/perf_counter/perfcounters.h | 142 ----------------- Documentation/perf_counter/perfstat.c | 251 ------------------------------ 4 files changed, 2 insertions(+), 395 deletions(-) delete mode 100644 Documentation/perf_counter/perfcounters.h delete mode 100644 Documentation/perf_counter/perfstat.c diff --git a/Documentation/perf_counter/Makefile b/Documentation/perf_counter/Makefile index b45749753fc..666da95a787 100644 --- a/Documentation/perf_counter/Makefile +++ b/Documentation/perf_counter/Makefile @@ -2,7 +2,7 @@ BINS = kerneltop perfstat all: $(BINS) -kerneltop: kerneltop.c perfcounters.h +kerneltop: kerneltop.c ../../include/linux/perf_counter.h cc -O6 -Wall -lrt `pkg-config --cflags --libs glib-2.0` -o $@ $< perfstat: kerneltop diff --git a/Documentation/perf_counter/kerneltop.c b/Documentation/perf_counter/kerneltop.c index 80b790553ec..25e80bc4455 100644 --- a/Documentation/perf_counter/kerneltop.c +++ b/Documentation/perf_counter/kerneltop.c @@ -88,7 +88,7 @@ #include -#include "include/linux/perf_counter.h" +#include "../../include/linux/perf_counter.h" /* diff --git a/Documentation/perf_counter/perfcounters.h b/Documentation/perf_counter/perfcounters.h deleted file mode 100644 index 32e24b9154a..00000000000 --- a/Documentation/perf_counter/perfcounters.h +++ /dev/null @@ -1,142 +0,0 @@ -/* - * Ioctls that can be done on a perf counter fd: - */ -#define PERF_COUNTER_IOC_ENABLE _IO('$', 0) -#define PERF_COUNTER_IOC_DISABLE _IO('$', 1) - -/* - * prctl(PR_TASK_PERF_COUNTERS_DISABLE) will (cheaply) disable all - * counters in the current task. - */ -#define PR_TASK_PERF_COUNTERS_DISABLE 31 -#define PR_TASK_PERF_COUNTERS_ENABLE 32 - -#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0])) - -#define rdclock() \ -({ \ - struct timespec ts; \ - \ - clock_gettime(CLOCK_MONOTONIC, &ts); \ - ts.tv_sec * 1000000000ULL + ts.tv_nsec; \ -}) - -/* - * Pick up some kernel type conventions: - */ -#define __user -#define asmlinkage - -typedef unsigned int __u32; -typedef unsigned long long __u64; -typedef long long __s64; - -/* - * User-space ABI bits: - */ - -/* - * Generalized performance counter event types, used by the hw_event.type - * parameter of the sys_perf_counter_open() syscall: - */ -enum hw_event_types { - /* - * Common hardware events, generalized by the kernel: - */ - PERF_COUNT_CPU_CYCLES = 0, - PERF_COUNT_INSTRUCTIONS = 1, - PERF_COUNT_CACHE_REFERENCES = 2, - PERF_COUNT_CACHE_MISSES = 3, - PERF_COUNT_BRANCH_INSTRUCTIONS = 4, - PERF_COUNT_BRANCH_MISSES = 5, - PERF_COUNT_BUS_CYCLES = 6, - - PERF_HW_EVENTS_MAX = 7, - - /* - * Special "software" counters provided by the kernel, even if - * the hardware does not support performance counters. These - * counters measure various physical and sw events of the - * kernel (and allow the profiling of them as well): - */ - PERF_COUNT_CPU_CLOCK = -1, - PERF_COUNT_TASK_CLOCK = -2, - PERF_COUNT_PAGE_FAULTS = -3, - PERF_COUNT_CONTEXT_SWITCHES = -4, - PERF_COUNT_CPU_MIGRATIONS = -5, - - PERF_SW_EVENTS_MIN = -6, -}; - -/* - * IRQ-notification data record type: - */ -enum perf_counter_record_type { - PERF_RECORD_SIMPLE = 0, - PERF_RECORD_IRQ = 1, - PERF_RECORD_GROUP = 2, -}; - -/* - * Hardware event to monitor via a performance monitoring counter: - */ -struct perf_counter_hw_event { - __s64 type; - - __u64 irq_period; - __u64 record_type; - __u64 read_format; - - __u64 disabled : 1, /* off by default */ - nmi : 1, /* NMI sampling */ - raw : 1, /* raw event type */ - inherit : 1, /* children inherit it */ - pinned : 1, /* must always be on PMU */ - exclusive : 1, /* only group on PMU */ - exclude_user : 1, /* don't count user */ - exclude_kernel : 1, /* ditto kernel */ - exclude_hv : 1, /* ditto hypervisor */ - exclude_idle : 1, /* don't count when idle */ - - __reserved_1 : 54; - - __u32 extra_config_len; - __u32 __reserved_4; - - __u64 __reserved_2; - __u64 __reserved_3; -}; - - -#ifdef __x86_64__ -# define __NR_perf_counter_open 295 -#endif - -#ifdef __i386__ -# define __NR_perf_counter_open 333 -#endif - -#ifdef __powerpc__ -#define __NR_perf_counter_open 319 -#endif - -asmlinkage int sys_perf_counter_open( - - struct perf_counter_hw_event *hw_event_uptr __user, - pid_t pid, - int cpu, - int group_fd, - unsigned long flags) -{ - int ret; - - ret = syscall( - __NR_perf_counter_open, hw_event_uptr, pid, cpu, group_fd, flags); -#if defined(__x86_64__) || defined(__i386__) - if (ret < 0 && ret > -4096) { - errno = -ret; - ret = -1; - } -#endif - return ret; -} diff --git a/Documentation/perf_counter/perfstat.c b/Documentation/perf_counter/perfstat.c deleted file mode 100644 index fd594468e65..00000000000 --- a/Documentation/perf_counter/perfstat.c +++ /dev/null @@ -1,251 +0,0 @@ -/* - * perfstat: /usr/bin/time -alike performance counter statistics utility - * - * It summarizes the counter events of all tasks (and child tasks), - * covering all CPUs that the command (or workload) executes on. - * It only counts the per-task events of the workload started, - * independent of how many other tasks run on those CPUs. - * - * Build with: cc -O2 -g -lrt -Wall -W -o perfstat perfstat.c - * - * Sample output: - * - - $ ./perfstat -e 1 -e 3 -e 5 ls -lR /usr/include/ >/dev/null - - Performance counter stats for 'ls': - - 163516953 instructions - 2295 cache-misses - 2855182 branch-misses - - * - * Copyright (C) 2008, Red Hat Inc, Ingo Molnar - * - * Released under the GPLv2 (not later). - * - * Percpu counter support by: Yanmin Zhang - * Symbolic event options by: Wu Fengguang - */ -#define _GNU_SOURCE - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include -#include -#include -#include -#include -#include - -#include - -#include "perfcounters.h" - -static int nr_cpus = 0; - -static int system_wide = 0; - -static void display_help(void) -{ - unsigned int i; - int e; - - printf( - "Usage: perfstat [] \n\n" - "PerfStat Options (up to %d event types can be specified):\n\n", - MAX_COUNTERS); - printf( - " -e EVENT --event=EVENT # symbolic-name abbreviations"); - - for (i = 0, e = PERF_HW_EVENTS_MAX; i < ARRAY_SIZE(event_symbols); i++) { - if (e != event_symbols[i].event) { - e = event_symbols[i].event; - printf( - "\n %2d: %-20s", e, event_symbols[i].symbol); - } else - printf(" %s", event_symbols[i].symbol); - } - - printf("\n" - " rNNN: raw event type\n\n" - " -s # system-wide collection\n\n" - " -c --command= # command+arguments to be timed.\n" - "\n"); - exit(0); -} - -static void process_options(int argc, char *argv[]) -{ - for (;;) { - int option_index = 0; - /** Options for getopt */ - static struct option long_options[] = { - {"event", required_argument, NULL, 'e'}, - {"help", no_argument, NULL, 'h'}, - {"command", no_argument, NULL, 'c'}, - {NULL, 0, NULL, 0 } - }; - int c = getopt_long(argc, argv, "+:e:c:s", - long_options, &option_index); - if (c == -1) - break; - - switch (c) { - case 'c': - break; - case 's': - system_wide = 1; - break; - case 'e': - parse_events(optarg); - break; - default: - break; - } - } - if (optind == argc) - goto err; - - if (!nr_counters) - nr_counters = 8; - return; - -err: - display_help(); -} - -char fault_here[1000000]; - -static int fd[MAX_NR_CPUS][MAX_COUNTERS]; - -static void create_counter(int counter) -{ - struct perf_counter_hw_event hw_event; - - memset(&hw_event, 0, sizeof(hw_event)); - hw_event.type = event_id[counter]; - hw_event.raw = event_raw[counter]; - hw_event.record_type = PERF_RECORD_SIMPLE; - hw_event.nmi = 0; - - if (system_wide) { - int cpu; - for (cpu = 0; cpu < nr_cpus; cpu ++) { - fd[cpu][counter] = sys_perf_counter_open(&hw_event, -1, cpu, -1, 0); - if (fd[cpu][counter] < 0) { - printf("perfstat error: syscall returned with %d (%s)\n", - fd[cpu][counter], strerror(errno)); - exit(-1); - } - - } - } else { - hw_event.inherit = 1; - hw_event.disabled = 1; - - fd[0][counter] = sys_perf_counter_open(&hw_event, 0, -1, -1, 0); - if (fd[0][counter] < 0) { - printf("perfstat error: syscall returned with %d (%s)\n", - fd[0][counter], strerror(errno)); - exit(-1); - } - } -} - - -int main(int argc, char *argv[]) -{ - unsigned long long t0, t1; - int counter; - ssize_t res; - int status; - int pid; - - process_options(argc, argv); - - if (system_wide) { - nr_cpus = sysconf(_SC_NPROCESSORS_ONLN); - assert(nr_cpus <= MAX_NR_CPUS); - assert(nr_cpus >= 0); - } else - nr_cpus = 1; - - for (counter = 0; counter < nr_counters; counter++) - create_counter(counter); - - argc -= optind; - argv += optind; - - /* - * Enable counters and exec the command: - */ - t0 = rdclock(); - prctl(PR_TASK_PERF_COUNTERS_ENABLE); - - if ((pid = fork()) < 0) - perror("failed to fork"); - if (!pid) { - if (execvp(argv[0], argv)) { - perror(argv[0]); - exit(-1); - } - } - while (wait(&status) >= 0) - ; - prctl(PR_TASK_PERF_COUNTERS_DISABLE); - t1 = rdclock(); - - fflush(stdout); - - fprintf(stderr, "\n"); - fprintf(stderr, " Performance counter stats for \'%s\':\n", - argv[0]); - fprintf(stderr, "\n"); - - for (counter = 0; counter < nr_counters; counter++) { - int cpu; - __u64 count, single_count; - - count = 0; - for (cpu = 0; cpu < nr_cpus; cpu ++) { - res = read(fd[cpu][counter], - (char *) &single_count, sizeof(single_count)); - assert(res == sizeof(single_count)); - count += single_count; - } - - if (!event_raw[counter] && - (event_id[counter] == PERF_COUNT_CPU_CLOCK || - event_id[counter] == PERF_COUNT_TASK_CLOCK)) { - - double msecs = (double)count / 1000000; - - fprintf(stderr, " %14.6f %-20s (msecs)\n", - msecs, event_name(counter)); - } else { - fprintf(stderr, " %14Ld %-20s (events)\n", - count, event_name(counter)); - } - if (!counter) - fprintf(stderr, "\n"); - } - fprintf(stderr, "\n"); - fprintf(stderr, " Wall-clock time elapsed: %12.6f msecs\n", - (double)(t1-t0)/1e6); - fprintf(stderr, "\n"); - - return 0; -} -- cgit v1.2.3 From 193e8df1b465f206f8a286202680702e1c153367 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Mon, 23 Mar 2009 22:23:16 +0100 Subject: perf_counter tools: fix build warning in kerneltop.c MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Fix: kerneltop.c: In function ‘record_ip’: kerneltop.c:1005: warning: format ‘%016llx’ expects type ‘long long unsigned int’, but argument 2 has type ‘uint64_t’ Cc: Wu Fengguang Cc: Paul Mackerras Acked-by: Peter Zijlstra Orig-LKML-Reference: <20090323172417.677932499@chello.nl> Signed-off-by: Ingo Molnar --- Documentation/perf_counter/kerneltop.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Documentation/perf_counter/kerneltop.c b/Documentation/perf_counter/kerneltop.c index 25e80bc4455..8f9a303f28d 100644 --- a/Documentation/perf_counter/kerneltop.c +++ b/Documentation/perf_counter/kerneltop.c @@ -1002,7 +1002,7 @@ static void record_ip(uint64_t ip, int counter) assert(left <= middle && middle <= right); if (!(left <= ip && ip <= right)) { printf(" left: %016lx\n", left); - printf(" ip: %016llx\n", ip); + printf(" ip: %016lx\n", (unsigned long)ip); printf("right: %016lx\n", right); } assert(left <= ip && ip <= right); -- cgit v1.2.3 From 81cdbe0509542324ad7d3282ab67c2b6716df663 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Mon, 23 Mar 2009 22:29:50 +0100 Subject: perf_counter tools: increase cpu-cycles again Commit b7368fdd7d decreased the CPU cycles interval 100-fold, but this is causig kerneltop failures on my Nehalem box: aldebaran:/home/mingo/linux/linux/Documentation/perf_counter> ./kerneltop KernelTop refresh period: 2 seconds ERROR: failed to keep up with mmap data 10,000 cycles is way too short. What we should do instead on mostly-idle systems is some sort of read/poll timeout, so that we display something every 2 seconds for sure. Cc: Wu Fengguang Cc: Peter Zijlstra Cc: Paul Mackerras Signed-off-by: Ingo Molnar --- Documentation/perf_counter/kerneltop.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Documentation/perf_counter/kerneltop.c b/Documentation/perf_counter/kerneltop.c index 8f9a303f28d..2ab29b5e32e 100644 --- a/Documentation/perf_counter/kerneltop.c +++ b/Documentation/perf_counter/kerneltop.c @@ -212,7 +212,7 @@ struct source_line { const unsigned int default_count[] = { - 10000, + 1000000, 1000000, 10000, 10000, -- cgit v1.2.3 From cbe46555dc4de6403cd757139d42289b5f21abb9 Mon Sep 17 00:00:00 2001 From: Paul Mackerras Date: Tue, 24 Mar 2009 16:52:34 +1100 Subject: perf_counter tools: remove glib dependency and fix bugs in kerneltop.c The glib dependency in kerneltop.c is only for a little bit of list manipulation, and I find it inconvenient. This adds a 'next' field to struct source_line, which lets us link them together into a list. The code to do the linking ourselves turns out to be no longer or more difficult than using glib. This also fixes a few other problems: - We need to #include to get PATH_MAX on powerpc. - We need to #include rather than have our own definitions of __u64 and __s64; on powerpc the installed headers define them to be unsigned long and long respectively, and if we have our own, different definition here that causes a compile error. - This takes out the x86 setting of errno from -ret in sys_perf_counter_open. My experiments on x86 indicate that the glibc syscall() does this for us already. - We had two CPU migration counters in the default set, which seems unnecessary; I changed one of them to a context switch counter. - In perfstat mode we were printing CPU cycles and instructions as milliseconds, and the cpu clock and task clock counters as events. This fixes that. - In perfstat mode we were still printing a blank line after the first counter, which was a holdover from when a task clock counter was automatically included as the first counter. This removes the blank line. - On a test machine here, parse_symbols() and parse_vmlinux() were taking long enough (almost 0.5 seconds) for the mmap buffer to overflow before we got to the first mmap_read() call, so this moves them before we open all the counters. - The error message if sys_perf_counter_open fails needs to use errno, not -fd[i][counter]. Signed-off-by: Paul Mackerras Acked-by: Peter Zijlstra Acked-by: Mike Galbraith Cc: Arjan van de Ven Orig-LKML-Reference: <18888.29986.340328.540512@cargo.ozlabs.ibm.com> Signed-off-by: Ingo Molnar --- Documentation/perf_counter/Makefile | 2 +- Documentation/perf_counter/kerneltop.c | 112 +++++++++++++-------------------- 2 files changed, 46 insertions(+), 68 deletions(-) diff --git a/Documentation/perf_counter/Makefile b/Documentation/perf_counter/Makefile index 666da95a787..194b6621558 100644 --- a/Documentation/perf_counter/Makefile +++ b/Documentation/perf_counter/Makefile @@ -3,7 +3,7 @@ BINS = kerneltop perfstat all: $(BINS) kerneltop: kerneltop.c ../../include/linux/perf_counter.h - cc -O6 -Wall -lrt `pkg-config --cflags --libs glib-2.0` -o $@ $< + cc -O6 -Wall -lrt -o $@ $< perfstat: kerneltop ln -sf kerneltop perfstat diff --git a/Documentation/perf_counter/kerneltop.c b/Documentation/perf_counter/kerneltop.c index 2ab29b5e32e..ea13e4e6722 100644 --- a/Documentation/perf_counter/kerneltop.c +++ b/Documentation/perf_counter/kerneltop.c @@ -3,7 +3,7 @@ Build with: - cc -O6 -Wall -lrt `pkg-config --cflags --libs glib-2.0` -o kerneltop kerneltop.c + cc -O6 -Wall -c -o kerneltop.o kerneltop.c -lrt Sample output: @@ -56,6 +56,7 @@ * Yanmin Zhang * Wu Fengguang * Mike Galbraith + * Paul Mackerras * * Released under the GPL v2. (and only v2, not any later version) */ @@ -68,6 +69,7 @@ #include #include #include +#include #include #include #include @@ -76,8 +78,6 @@ #include #include -#include - #include #include #include @@ -87,6 +87,7 @@ #include #include +#include #include "../../include/linux/perf_counter.h" @@ -114,11 +115,6 @@ #define __user #define asmlinkage -typedef unsigned int __u32; -typedef unsigned long long __u64; -typedef long long __s64; - - #ifdef __x86_64__ #define __NR_perf_counter_open 295 #define rmb() asm volatile("lfence" ::: "memory") @@ -146,17 +142,8 @@ asmlinkage int sys_perf_counter_open( int group_fd, unsigned long flags) { - int ret; - - ret = syscall( + return syscall( __NR_perf_counter_open, hw_event_uptr, pid, cpu, group_fd, flags); -#if defined(__x86_64__) || defined(__i386__) - if (ret < 0 && ret > -4096) { - errno = -ret; - ret = -1; - } -#endif - return ret; } #define MAX_COUNTERS 64 @@ -170,7 +157,7 @@ static int system_wide = 0; static int nr_counters = 0; static __u64 event_id[MAX_COUNTERS] = { EID(PERF_TYPE_SOFTWARE, PERF_COUNT_TASK_CLOCK), - EID(PERF_TYPE_SOFTWARE, PERF_COUNT_CPU_MIGRATIONS), + EID(PERF_TYPE_SOFTWARE, PERF_COUNT_CONTEXT_SWITCHES), EID(PERF_TYPE_SOFTWARE, PERF_COUNT_CPU_MIGRATIONS), EID(PERF_TYPE_SOFTWARE, PERF_COUNT_PAGE_FAULTS), @@ -202,14 +189,15 @@ static int delay_secs = 2; static int zero; static int dump_symtab; -static GList *lines; - struct source_line { uint64_t EIP; unsigned long count; char *line; + struct source_line *next; }; +static struct source_line *lines; +static struct source_line **lines_tail; const unsigned int default_count[] = { 1000000, @@ -519,9 +507,8 @@ int do_perfstat(int argc, char *argv[]) count += single_count; } - if (!PERF_COUNTER_RAW(event_id[counter]) && - (event_id[counter] == PERF_COUNT_CPU_CLOCK || - event_id[counter] == PERF_COUNT_TASK_CLOCK)) { + if (event_id[counter] == EID(PERF_TYPE_SOFTWARE, PERF_COUNT_CPU_CLOCK) || + event_id[counter] == EID(PERF_TYPE_SOFTWARE, PERF_COUNT_TASK_CLOCK)) { double msecs = (double)count / 1000000; @@ -531,8 +518,6 @@ int do_perfstat(int argc, char *argv[]) fprintf(stderr, " %14Ld %-20s (events)\n", count, event_name(counter)); } - if (!counter) - fprintf(stderr, "\n"); } fprintf(stderr, "\n"); fprintf(stderr, " Wall-clock time elapsed: %12.6f msecs\n", @@ -554,7 +539,7 @@ struct sym_entry { char *sym; unsigned long count[MAX_COUNTERS]; int skip; - GList *source; + struct source_line *source; }; #define MAX_SYMS 100000 @@ -855,6 +840,7 @@ static void parse_vmlinux(char *filename) if (!file) return; + lines_tail = &lines; while (!feof(file)) { struct source_line *src; size_t dummy = 0; @@ -873,7 +859,9 @@ static void parse_vmlinux(char *filename) if (c) *c = 0; - lines = g_list_prepend(lines, src); + src->next = NULL; + *lines_tail = src; + lines_tail = &src->next; if (strlen(src->line)>8 && src->line[8] == ':') src->EIP = strtoull(src->line, NULL, 16); @@ -881,52 +869,43 @@ static void parse_vmlinux(char *filename) src->EIP = strtoull(src->line, NULL, 16); } pclose(file); - lines = g_list_reverse(lines); } static void record_precise_ip(uint64_t ip) { struct source_line *line; - GList *item; - item = g_list_first(lines); - while (item) { - line = item->data; + for (line = lines; line; line = line->next) { if (line->EIP == ip) line->count++; if (line->EIP > ip) break; - item = g_list_next(item); } } static void lookup_sym_in_vmlinux(struct sym_entry *sym) { struct source_line *line; - GList *item; char pattern[PATH_MAX]; sprintf(pattern, "<%s>:", sym->sym); - item = g_list_first(lines); - while (item) { - line = item->data; + for (line = lines; line; line = line->next) { if (strstr(line->line, pattern)) { - sym->source = item; + sym->source = line; break; } - item = g_list_next(item); } } -void show_lines(GList *item_queue, int item_queue_count) +static void show_lines(struct source_line *line_queue, int line_queue_count) { int i; struct source_line *line; - for (i = 0; i < item_queue_count; i++) { - line = item_queue->data; + line = line_queue; + for (i = 0; i < line_queue_count; i++) { printf("%8li\t%s\n", line->count, line->line); - item_queue = g_list_next(item_queue); + line = line->next; } } @@ -935,10 +914,9 @@ void show_lines(GList *item_queue, int item_queue_count) static void show_details(struct sym_entry *sym) { struct source_line *line; - GList *item; + struct source_line *line_queue = NULL; int displayed = 0; - GList *item_queue = NULL; - int item_queue_count = 0; + int line_queue_count = 0; if (!sym->source) lookup_sym_in_vmlinux(sym); @@ -947,30 +925,29 @@ static void show_details(struct sym_entry *sym) printf("Showing details for %s\n", sym->sym); - item = sym->source; - while (item) { - line = item->data; + line = sym->source; + while (line) { if (displayed && strstr(line->line, ">:")) break; - if (!item_queue_count) - item_queue = item; - item_queue_count ++; + if (!line_queue_count) + line_queue = line; + line_queue_count ++; if (line->count >= count_filter) { - show_lines(item_queue, item_queue_count); - item_queue_count = 0; - item_queue = NULL; - } else if (item_queue_count > TRACE_COUNT) { - item_queue = g_list_next(item_queue); - item_queue_count --; + show_lines(line_queue, line_queue_count); + line_queue_count = 0; + line_queue = NULL; + } else if (line_queue_count > TRACE_COUNT) { + line_queue = line_queue->next; + line_queue_count --; } line->count = 0; displayed++; if (displayed > 300) break; - item = g_list_next(item); + line = line->next; } } @@ -1201,6 +1178,10 @@ int main(int argc, char *argv[]) if (tid != -1 || profile_cpu != -1) nr_cpus = 1; + parse_symbols(); + if (vmlinux && sym_filter_entry) + parse_vmlinux(vmlinux); + for (i = 0; i < nr_cpus; i++) { group_fd = -1; for (counter = 0; counter < nr_counters; counter++) { @@ -1216,15 +1197,16 @@ int main(int argc, char *argv[]) hw_event.nmi = nmi; fd[i][counter] = sys_perf_counter_open(&hw_event, tid, cpu, group_fd, 0); - fcntl(fd[i][counter], F_SETFL, O_NONBLOCK); if (fd[i][counter] < 0) { + int err = errno; printf("kerneltop error: syscall returned with %d (%s)\n", - fd[i][counter], strerror(-fd[i][counter])); - if (fd[i][counter] == -1) + fd[i][counter], strerror(err)); + if (err == EPERM) printf("Are you root?\n"); exit(-1); } assert(fd[i][counter] >= 0); + fcntl(fd[i][counter], F_SETFL, O_NONBLOCK); /* * First counter acts as the group leader: @@ -1248,10 +1230,6 @@ int main(int argc, char *argv[]) } } - parse_symbols(); - if (vmlinux && sym_filter_entry) - parse_vmlinux(vmlinux); - printf("KernelTop refresh period: %d seconds\n", delay_secs); last_refresh = time(NULL); -- cgit v1.2.3 From 0fd112e41cd6f6d4779cbe327c3632d087e31476 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Tue, 24 Mar 2009 10:50:24 +0100 Subject: perf_counter tools: remove glib dependency and fix bugs in kerneltop.c, fix poll() Paul Mackerras wrote: > I noticed the poll stuff is bogus - we have a 2D array of struct > pollfds (MAX_NR_CPUS x MAX_COUNTERS), we fill in a sub-array (with the > rest being uninitialized, since the array is on the stack) and then > pass the first nr_cpus elements to poll. Not what we really meant, I > suspect. :) Not even if we only have one counter, since it's the > counter dimension that varies fastest. This should fix the most obvious poll fubar.. not enough to fix the full problem though.. Reported-by: Paul Mackerras Reported-by: Mike Galbraith Signed-off-by: Peter Zijlstra Cc: Arjan van de Ven Orig-LKML-Reference: <18888.29986.340328.540512@cargo.ozlabs.ibm.com> Signed-off-by: Ingo Molnar --- Documentation/perf_counter/kerneltop.c | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/Documentation/perf_counter/kerneltop.c b/Documentation/perf_counter/kerneltop.c index ea13e4e6722..7ebde7a336c 100644 --- a/Documentation/perf_counter/kerneltop.c +++ b/Documentation/perf_counter/kerneltop.c @@ -1157,10 +1157,10 @@ static void mmap_read(struct mmap_data *md) int main(int argc, char *argv[]) { - struct pollfd event_array[MAX_NR_CPUS][MAX_COUNTERS]; + struct pollfd event_array[MAX_NR_CPUS * MAX_COUNTERS]; struct mmap_data mmap_array[MAX_NR_CPUS][MAX_COUNTERS]; struct perf_counter_hw_event hw_event; - int i, counter, group_fd; + int i, counter, group_fd, nr_poll = 0; unsigned int cpu; int ret; @@ -1214,8 +1214,9 @@ int main(int argc, char *argv[]) if (group && group_fd == -1) group_fd = fd[i][counter]; - event_array[i][counter].fd = fd[i][counter]; - event_array[i][counter].events = POLLIN; + event_array[nr_poll].fd = fd[i][counter]; + event_array[nr_poll].events = POLLIN; + nr_poll++; mmap_array[i][counter].counter = counter; mmap_array[i][counter].prev = 0; @@ -1247,7 +1248,7 @@ int main(int argc, char *argv[]) } if (hits == events) - ret = poll(event_array[0], nr_cpus, 1000); + ret = poll(event_array, nr_poll, 1000); hits = events; } -- cgit v1.2.3 From f66c6b2066b44d4ab8e8ac1ee4cae543738fe2ac Mon Sep 17 00:00:00 2001 From: Paul Mackerras Date: Mon, 23 Mar 2009 10:29:36 +1100 Subject: perf_counter: update documentation Impact: documentation fix This updates the perfcounter documentation to reflect recent changes. Signed-off-by: Paul Mackerras --- Documentation/perf_counter/design.txt | 268 +++++++++++++++++++++++++--------- 1 file changed, 202 insertions(+), 66 deletions(-) diff --git a/Documentation/perf_counter/design.txt b/Documentation/perf_counter/design.txt index fddd32189a5..aaf105c02fb 100644 --- a/Documentation/perf_counter/design.txt +++ b/Documentation/perf_counter/design.txt @@ -11,7 +11,9 @@ thus be used to profile the code that runs on that CPU. The Linux Performance Counter subsystem provides an abstraction of these hardware capabilities. It provides per task and per CPU counters, counter -groups, and it provides event capabilities on top of those. +groups, and it provides event capabilities on top of those. It +provides "virtual" 64-bit counters, regardless of the width of the +underlying hardware counters. Performance counters are accessed via special file descriptors. There's one file descriptor per virtual counter used. @@ -20,7 +22,8 @@ The special file descriptor is opened via the perf_counter_open() system call: int sys_perf_counter_open(struct perf_counter_hw_event *hw_event_uptr, - pid_t pid, int cpu, int group_fd); + pid_t pid, int cpu, int group_fd, + unsigned long flags); The syscall returns the new fd. The fd can be used via the normal VFS system calls: read() can be used to read the counter, fcntl() @@ -32,90 +35,180 @@ can be poll()ed. When creating a new counter fd, 'perf_counter_hw_event' is: /* - * Hardware event to monitor via a performance monitoring counter: + * Event to monitor via a performance monitoring counter: */ struct perf_counter_hw_event { - s64 type; + __u64 event_config; - u64 irq_period; - u32 record_type; + __u64 irq_period; + __u64 record_type; + __u64 read_format; - u32 disabled : 1, /* off by default */ - nmi : 1, /* NMI sampling */ - raw : 1, /* raw event type */ - __reserved_1 : 29; + __u64 disabled : 1, /* off by default */ + nmi : 1, /* NMI sampling */ + inherit : 1, /* children inherit it */ + pinned : 1, /* must always be on PMU */ + exclusive : 1, /* only group on PMU */ + exclude_user : 1, /* don't count user */ + exclude_kernel : 1, /* ditto kernel */ + exclude_hv : 1, /* ditto hypervisor */ + exclude_idle : 1, /* don't count when idle */ - u64 __reserved_2; + __reserved_1 : 55; + + __u32 extra_config_len; + + __u32 __reserved_4; + __u64 __reserved_2; + __u64 __reserved_3; }; +The 'event_config' field specifies what the counter should count. It +is divided into 3 bit-fields: + +raw_type: 1 bit (most significant bit) 0x8000_0000_0000_0000 +type: 7 bits (next most significant) 0x7f00_0000_0000_0000 +event_id: 56 bits (least significant) 0x00ff_0000_0000_0000 + +If 'raw_type' is 1, then the counter will count a hardware event +specified by the remaining 63 bits of event_config. The encoding is +machine-specific. + +If 'raw_type' is 0, then the 'type' field says what kind of counter +this is, with the following encoding: + +enum perf_event_types { + PERF_TYPE_HARDWARE = 0, + PERF_TYPE_SOFTWARE = 1, + PERF_TYPE_TRACEPOINT = 2, +}; + +A counter of PERF_TYPE_HARDWARE will count the hardware event +specified by 'event_id': + /* - * Generalized performance counter event types, used by the hw_event.type + * Generalized performance counter event types, used by the hw_event.event_id * parameter of the sys_perf_counter_open() syscall: */ -enum hw_event_types { +enum hw_event_ids { /* * Common hardware events, generalized by the kernel: */ - PERF_COUNT_CYCLES = 0, - PERF_COUNT_INSTRUCTIONS = 1, - PERF_COUNT_CACHE_REFERENCES = 2, - PERF_COUNT_CACHE_MISSES = 3, - PERF_COUNT_BRANCH_INSTRUCTIONS = 4, - PERF_COUNT_BRANCH_MISSES = 5, - - /* - * Special "software" counters provided by the kernel, even if - * the hardware does not support performance counters. These - * counters measure various physical and sw events of the - * kernel (and allow the profiling of them as well): - */ - PERF_COUNT_CPU_CLOCK = -1, - PERF_COUNT_TASK_CLOCK = -2, - /* - * Future software events: - */ - /* PERF_COUNT_PAGE_FAULTS = -3, - PERF_COUNT_CONTEXT_SWITCHES = -4, */ + PERF_COUNT_CPU_CYCLES = 0, + PERF_COUNT_INSTRUCTIONS = 1, + PERF_COUNT_CACHE_REFERENCES = 2, + PERF_COUNT_CACHE_MISSES = 3, + PERF_COUNT_BRANCH_INSTRUCTIONS = 4, + PERF_COUNT_BRANCH_MISSES = 5, + PERF_COUNT_BUS_CYCLES = 6, }; -These are standardized types of events that work uniformly on all CPUs -that implements Performance Counters support under Linux. If a CPU is -not able to count branch-misses, then the system call will return --EINVAL. +These are standardized types of events that work relatively uniformly +on all CPUs that implement Performance Counters support under Linux, +although there may be variations (e.g., different CPUs might count +cache references and misses at different levels of the cache hierarchy). +If a CPU is not able to count the selected event, then the system call +will return -EINVAL. -More hw_event_types are supported as well, but they are CPU -specific and are enumerated via /sys on a per CPU basis. Raw hw event -types can be passed in under hw_event.type if hw_event.raw is 1. -For example, to count "External bus cycles while bus lock signal asserted" -events on Intel Core CPUs, pass in a 0x4064 event type value and set -hw_event.raw to 1. +More hw_event_types are supported as well, but they are CPU-specific +and accessed as raw events. For example, to count "External bus +cycles while bus lock signal asserted" events on Intel Core CPUs, pass +in a 0x4064 event_id value and set hw_event.raw_type to 1. -'record_type' is the type of data that a read() will provide for the -counter, and it can be one of: +A counter of type PERF_TYPE_SOFTWARE will count one of the available +software events, selected by 'event_id': /* - * IRQ-notification data record type: + * Special "software" counters provided by the kernel, even if the hardware + * does not support performance counters. These counters measure various + * physical and sw events of the kernel (and allow the profiling of them as + * well): */ -enum perf_counter_record_type { - PERF_RECORD_SIMPLE = 0, - PERF_RECORD_IRQ = 1, - PERF_RECORD_GROUP = 2, +enum sw_event_ids { + PERF_COUNT_CPU_CLOCK = 0, + PERF_COUNT_TASK_CLOCK = 1, + PERF_COUNT_PAGE_FAULTS = 2, + PERF_COUNT_CONTEXT_SWITCHES = 3, + PERF_COUNT_CPU_MIGRATIONS = 4, + PERF_COUNT_PAGE_FAULTS_MIN = 5, + PERF_COUNT_PAGE_FAULTS_MAJ = 6, }; -a "simple" counter is one that counts hardware events and allows -them to be read out into a u64 count value. (read() returns 8 on -a successful read of a simple counter.) +Counters come in two flavours: counting counters and sampling +counters. A "counting" counter is one that is used for counting the +number of events that occur, and is characterised by having +irq_period = 0 and record_type = PERF_RECORD_SIMPLE. A read() on a +counting counter simply returns the current value of the counter as +an 8-byte number. -An "irq" counter is one that will also provide an IRQ context information: -the IP of the interrupted context. In this case read() will return -the 8-byte counter value, plus the Instruction Pointer address of the -interrupted context. +A "sampling" counter is one that is set up to generate an interrupt +every N events, where N is given by 'irq_period'. A sampling counter +has irq_period > 0 and record_type != PERF_RECORD_SIMPLE. The +record_type controls what data is recorded on each interrupt, and the +available values are currently: -The parameter 'hw_event_period' is the number of events before waking up -a read() that is blocked on a counter fd. Zero value means a non-blocking -counter. +/* + * IRQ-notification data record type: + */ +enum perf_counter_record_type { + PERF_RECORD_SIMPLE = 0, + PERF_RECORD_IRQ = 1, + PERF_RECORD_GROUP = 2, +}; -The 'pid' parameter allows the counter to be specific to a task: +A record_type value of PERF_RECORD_IRQ will record the instruction +pointer (IP) at which the interrupt occurred. A record_type value of +PERF_RECORD_GROUP will record the event_config and counter value of +all of the other counters in the group, and should only be used on a +group leader (see below). Currently these two values are mutually +exclusive, but record_type will become a bit-mask in future and +support other values. + +A sampling counter has an event queue, into which an event is placed +on each interrupt. A read() on a sampling counter will read the next +event from the event queue. If the queue is empty, the read() will +either block or return an EAGAIN error, depending on whether the fd +has been set to non-blocking mode or not. + +The 'disabled' bit specifies whether the counter starts out disabled +or enabled. If it is initially disabled, it can be enabled by ioctl +or prctl (see below). + +The 'nmi' bit specifies, for hardware events, whether the counter +should be set up to request non-maskable interrupts (NMIs) or normal +interrupts. This bit is ignored if the user doesn't have +CAP_SYS_ADMIN privilege (i.e. is not root) or if the CPU doesn't +generate NMIs from hardware counters. + +The 'inherit' bit, if set, specifies that this counter should count +events on descendant tasks as well as the task specified. This only +applies to new descendents, not to any existing descendents at the +time the counter is created (nor to any new descendents of existing +descendents). + +The 'pinned' bit, if set, specifies that the counter should always be +on the CPU if at all possible. It only applies to hardware counters +and only to group leaders. If a pinned counter cannot be put onto the +CPU (e.g. because there are not enough hardware counters or because of +a conflict with some other event), then the counter goes into an +'error' state, where reads return end-of-file (i.e. read() returns 0) +until the counter is subsequently enabled or disabled. + +The 'exclusive' bit, if set, specifies that when this counter's group +is on the CPU, it should be the only group using the CPU's counters. +In future, this will allow sophisticated monitoring programs to supply +extra configuration information via 'extra_config_len' to exploit +advanced features of the CPU's Performance Monitor Unit (PMU) that are +not otherwise accessible and that might disrupt other hardware +counters. + +The 'exclude_user', 'exclude_kernel' and 'exclude_hv' bits provide a +way to request that counting of events be restricted to times when the +CPU is in user, kernel and/or hypervisor mode. + + +The 'pid' parameter to the perf_counter_open() system call allows the +counter to be specific to a task: pid == 0: if the pid parameter is zero, the counter is attached to the current task. @@ -125,8 +218,7 @@ The 'pid' parameter allows the counter to be specific to a task: pid < 0: all tasks are counted (per cpu counters) -The 'cpu' parameter allows a counter to be made specific to a full -CPU: +The 'cpu' parameter allows a counter to be made specific to a CPU: cpu >= 0: the counter is restricted to a specific CPU cpu == -1: the counter counts on all CPUs @@ -141,7 +233,51 @@ their own tasks. A 'pid == -1' and 'cpu == x' counter is a per CPU counter that counts all events on CPU-x. Per CPU counters need CAP_SYS_ADMIN privilege. -Group counters are created by passing in a group_fd of another counter. -Groups are scheduled at once and can be used with PERF_RECORD_GROUP -to record multi-dimensional timestamps. +The 'flags' parameter is currently unused and must be zero. + +The 'group_fd' parameter allows counter "groups" to be set up. A +counter group has one counter which is the group "leader". The leader +is created first, with group_fd = -1 in the perf_counter_open call +that creates it. The rest of the group members are created +subsequently, with group_fd giving the fd of the group leader. +(A single counter on its own is created with group_fd = -1 and is +considered to be a group with only 1 member.) + +A counter group is scheduled onto the CPU as a unit, that is, it will +only be put onto the CPU if all of the counters in the group can be +put onto the CPU. This means that the values of the member counters +can be meaningfully compared, added, divided (to get ratios), etc., +with each other, since they have counted events for the same set of +executed instructions. + +Counters can be enabled and disabled in two ways: via ioctl and via +prctl. When a counter is disabled, it doesn't count or generate +events but does continue to exist and maintain its count value. + +An individual counter or counter group can be enabled with + + ioctl(fd, PERF_COUNTER_IOC_ENABLE); + +or disabled with + + ioctl(fd, PERF_COUNTER_IOC_DISABLE); + +Enabling or disabling the leader of a group enables or disables the +whole group; that is, while the group leader is disabled, none of the +counters in the group will count. Enabling or disabling a member of a +group other than the leader only affects that counter - disabling an +non-leader stops that counter from counting but doesn't affect any +other counter. + +A process can enable or disable all the counter groups that are +attached to it, using prctl: + + prctl(PR_TASK_PERF_COUNTERS_ENABLE); + + prctl(PR_TASK_PERF_COUNTERS_DISABLE); + +This applies to all counters on the current process, whether created +by this process or by another, and doesn't affect any counters that +this process has created on other processes. It only enables or +disables the group leaders, not any other members in the groups. -- cgit v1.2.3 From c7138f37f905bb7987b1f9f5a8ee73667db39f25 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Tue, 24 Mar 2009 13:18:16 +0100 Subject: perf_counter: fix perf_poll() Impact: fix kerneltop 100% CPU usage Only return a poll event when there's actually been one, poll_wait() doesn't actually wait for the waitq you pass it, it only enqueues you on it. Only once all FDs have been iterated and none of thm returned a poll-event will it schedule(). Also make it return POLL_HUP when there's not mmap() area to read from. Further, fix a silly bug in the write code. Reported-by: Mike Galbraith Signed-off-by: Peter Zijlstra Cc: Paul Mackerras Cc: Arjan van de Ven Orig-LKML-Reference: <1237897096.24918.181.camel@twins> Signed-off-by: Ingo Molnar --- include/linux/perf_counter.h | 1 + kernel/perf_counter.c | 14 ++++++++++++-- 2 files changed, 13 insertions(+), 2 deletions(-) diff --git a/include/linux/perf_counter.h b/include/linux/perf_counter.h index 2b5e66d5ebd..48212c15b7d 100644 --- a/include/linux/perf_counter.h +++ b/include/linux/perf_counter.h @@ -246,6 +246,7 @@ struct file; struct perf_mmap_data { struct rcu_head rcu_head; int nr_pages; + atomic_t wakeup; atomic_t head; struct perf_counter_mmap_page *user_page; void *data_pages[0]; diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c index 0dfe91094fd..affe227d56a 100644 --- a/kernel/perf_counter.c +++ b/kernel/perf_counter.c @@ -1161,7 +1161,16 @@ perf_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) static unsigned int perf_poll(struct file *file, poll_table *wait) { struct perf_counter *counter = file->private_data; - unsigned int events = POLLIN; + struct perf_mmap_data *data; + unsigned int events; + + rcu_read_lock(); + data = rcu_dereference(counter->data); + if (data) + events = atomic_xchg(&data->wakeup, 0); + else + events = POLL_HUP; + rcu_read_unlock(); poll_wait(file, &counter->waitq, wait); @@ -1425,7 +1434,7 @@ static int perf_output_write(struct perf_counter *counter, int nmi, do { offset = head = atomic_read(&data->head); - head += sizeof(u64); + head += size; } while (atomic_cmpxchg(&data->head, offset, head) != offset); wakeup = (offset >> PAGE_SHIFT) != (head >> PAGE_SHIFT); @@ -1446,6 +1455,7 @@ static int perf_output_write(struct perf_counter *counter, int nmi, * generate a poll() wakeup for every page boundary crossed */ if (wakeup) { + atomic_xchg(&data->wakeup, POLL_IN); __perf_counter_update_userpage(counter, data); if (nmi) { counter->wakeup_pending = 1; -- cgit v1.2.3 From b9cacc7bf193df16532bfa7d7ca77fe50fc3c2e6 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Wed, 25 Mar 2009 12:30:22 +0100 Subject: perf_counter: more elaborate write API Provide a begin, copy, end interface to the output buffer. begin() reserves the space, copy() copies the data over, considering page boundaries, end() finalizes the event and does the wakeup. Signed-off-by: Peter Zijlstra Cc: Paul Mackerras Cc: Mike Galbraith Cc: Arjan van de Ven Cc: Wu Fengguang Orig-LKML-Reference: <20090325113316.740550870@chello.nl> Signed-off-by: Ingo Molnar --- kernel/perf_counter.c | 109 ++++++++++++++++++++++++++++++++++---------------- 1 file changed, 75 insertions(+), 34 deletions(-) diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c index affe227d56a..0422fd9bf62 100644 --- a/kernel/perf_counter.c +++ b/kernel/perf_counter.c @@ -9,6 +9,7 @@ */ #include +#include #include #include #include @@ -16,15 +17,14 @@ #include #include #include +#include +#include +#include #include #include #include #include #include -#include -#include -#include -#include #include @@ -1411,16 +1411,20 @@ static const struct file_operations perf_fops = { * Output */ -static int perf_output_write(struct perf_counter *counter, int nmi, - void *buf, ssize_t size) +struct perf_output_handle { + struct perf_counter *counter; + struct perf_mmap_data *data; + unsigned int offset; + int wakeup; +}; + +static int perf_output_begin(struct perf_output_handle *handle, + struct perf_counter *counter, unsigned int size) { struct perf_mmap_data *data; - unsigned int offset, head, nr; - unsigned int len; - int ret, wakeup; + unsigned int offset, head; rcu_read_lock(); - ret = -ENOSPC; data = rcu_dereference(counter->data); if (!data) goto out; @@ -1428,45 +1432,82 @@ static int perf_output_write(struct perf_counter *counter, int nmi, if (!data->nr_pages) goto out; - ret = -EINVAL; - if (size > PAGE_SIZE) - goto out; - do { offset = head = atomic_read(&data->head); head += size; } while (atomic_cmpxchg(&data->head, offset, head) != offset); - wakeup = (offset >> PAGE_SHIFT) != (head >> PAGE_SHIFT); + handle->counter = counter; + handle->data = data; + handle->offset = offset; + handle->wakeup = (offset >> PAGE_SHIFT) != (head >> PAGE_SHIFT); - nr = (offset >> PAGE_SHIFT) & (data->nr_pages - 1); - offset &= PAGE_SIZE - 1; + return 0; - len = min_t(unsigned int, PAGE_SIZE - offset, size); - memcpy(data->data_pages[nr] + offset, buf, len); - size -= len; +out: + rcu_read_unlock(); - if (size) { - nr = (nr + 1) & (data->nr_pages - 1); - memcpy(data->data_pages[nr], buf + len, size); - } + return -ENOSPC; +} - /* - * generate a poll() wakeup for every page boundary crossed - */ - if (wakeup) { - atomic_xchg(&data->wakeup, POLL_IN); - __perf_counter_update_userpage(counter, data); +static void perf_output_copy(struct perf_output_handle *handle, + void *buf, unsigned int len) +{ + unsigned int pages_mask; + unsigned int offset; + unsigned int size; + void **pages; + + offset = handle->offset; + pages_mask = handle->data->nr_pages - 1; + pages = handle->data->data_pages; + + do { + unsigned int page_offset; + int nr; + + nr = (offset >> PAGE_SHIFT) & pages_mask; + page_offset = offset & (PAGE_SIZE - 1); + size = min_t(unsigned int, PAGE_SIZE - page_offset, len); + + memcpy(pages[nr] + page_offset, buf, size); + + len -= size; + buf += size; + offset += size; + } while (len); + + handle->offset = offset; +} + +static void perf_output_end(struct perf_output_handle *handle, int nmi) +{ + if (handle->wakeup) { + (void)atomic_xchg(&handle->data->wakeup, POLL_IN); + __perf_counter_update_userpage(handle->counter, handle->data); if (nmi) { - counter->wakeup_pending = 1; + handle->counter->wakeup_pending = 1; set_perf_counter_pending(); } else - wake_up(&counter->waitq); + wake_up(&handle->counter->waitq); } - ret = 0; -out: rcu_read_unlock(); +} + +static int perf_output_write(struct perf_counter *counter, int nmi, + void *buf, ssize_t size) +{ + struct perf_output_handle handle; + int ret; + ret = perf_output_begin(&handle, counter, size); + if (ret) + goto out; + + perf_output_copy(&handle, buf, size); + perf_output_end(&handle, nmi); + +out: return ret; } -- cgit v1.2.3 From 5c1481943250ab65fa5130e05ec479c93216e9f7 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Wed, 25 Mar 2009 12:30:23 +0100 Subject: perf_counter: output objects Provide a {type,size} header for each output entry. This should provide extensible output, and the ability to mix multiple streams. Signed-off-by: Peter Zijlstra Cc: Paul Mackerras Cc: Mike Galbraith Cc: Arjan van de Ven Cc: Wu Fengguang Orig-LKML-Reference: <20090325113316.831607932@chello.nl> Signed-off-by: Ingo Molnar --- include/linux/perf_counter.h | 11 +++++++++ kernel/perf_counter.c | 53 ++++++++++++++++++++++++++++++++++---------- 2 files changed, 52 insertions(+), 12 deletions(-) diff --git a/include/linux/perf_counter.h b/include/linux/perf_counter.h index 48212c15b7d..c256635377d 100644 --- a/include/linux/perf_counter.h +++ b/include/linux/perf_counter.h @@ -156,6 +156,16 @@ struct perf_counter_mmap_page { __u32 data_head; /* head in the data section */ }; +struct perf_event_header { + __u32 type; + __u32 size; +}; + +enum perf_event_type { + PERF_EVENT_IP = 0, + PERF_EVENT_GROUP = 1, +}; + #ifdef __KERNEL__ /* * Kernel-internal data types and definitions: @@ -260,6 +270,7 @@ struct perf_counter { struct list_head list_entry; struct list_head event_entry; struct list_head sibling_list; + int nr_siblings; struct perf_counter *group_leader; const struct hw_perf_counter_ops *hw_ops; diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c index 0422fd9bf62..d76e3112d38 100644 --- a/kernel/perf_counter.c +++ b/kernel/perf_counter.c @@ -75,8 +75,10 @@ list_add_counter(struct perf_counter *counter, struct perf_counter_context *ctx) */ if (counter->group_leader == counter) list_add_tail(&counter->list_entry, &ctx->counter_list); - else + else { list_add_tail(&counter->list_entry, &group_leader->sibling_list); + group_leader->nr_siblings++; + } list_add_rcu(&counter->event_entry, &ctx->event_list); } @@ -89,6 +91,9 @@ list_del_counter(struct perf_counter *counter, struct perf_counter_context *ctx) list_del_init(&counter->list_entry); list_del_rcu(&counter->event_entry); + if (counter->group_leader != counter) + counter->group_leader->nr_siblings--; + /* * If this was a group counter with sibling counters then * upgrade the siblings to singleton counters by adding them @@ -381,9 +386,11 @@ static int is_software_only_group(struct perf_counter *leader) if (!is_software_counter(leader)) return 0; + list_for_each_entry(counter, &leader->sibling_list, list_entry) if (!is_software_counter(counter)) return 0; + return 1; } @@ -1480,6 +1487,9 @@ static void perf_output_copy(struct perf_output_handle *handle, handle->offset = offset; } +#define perf_output_put(handle, x) \ + perf_output_copy((handle), &(x), sizeof(x)) + static void perf_output_end(struct perf_output_handle *handle, int nmi) { if (handle->wakeup) { @@ -1514,34 +1524,53 @@ out: static void perf_output_simple(struct perf_counter *counter, int nmi, struct pt_regs *regs) { - u64 entry; + struct { + struct perf_event_header header; + u64 ip; + } event; - entry = instruction_pointer(regs); + event.header.type = PERF_EVENT_IP; + event.header.size = sizeof(event); + event.ip = instruction_pointer(regs); - perf_output_write(counter, nmi, &entry, sizeof(entry)); + perf_output_write(counter, nmi, &event, sizeof(event)); } -struct group_entry { - u64 event; - u64 counter; -}; - static void perf_output_group(struct perf_counter *counter, int nmi) { + struct perf_output_handle handle; + struct perf_event_header header; struct perf_counter *leader, *sub; + unsigned int size; + struct { + u64 event; + u64 counter; + } entry; + int ret; + + size = sizeof(header) + counter->nr_siblings * sizeof(entry); + + ret = perf_output_begin(&handle, counter, size); + if (ret) + return; + + header.type = PERF_EVENT_GROUP; + header.size = size; + + perf_output_put(&handle, header); leader = counter->group_leader; list_for_each_entry(sub, &leader->sibling_list, list_entry) { - struct group_entry entry; - if (sub != counter) sub->hw_ops->read(sub); entry.event = sub->hw_event.config; entry.counter = atomic64_read(&sub->count); - perf_output_write(counter, nmi, &entry, sizeof(entry)); + perf_output_put(&handle, entry); } + + perf_output_end(&handle, nmi); } void perf_counter_output(struct perf_counter *counter, -- cgit v1.2.3 From 63e35b25d6b5c3136d22ef249dbbf96716aa08bf Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Wed, 25 Mar 2009 12:30:24 +0100 Subject: perf_counter: sanity check on the output API Ensure we never write more than we said we would. Signed-off-by: Peter Zijlstra Cc: Paul Mackerras Cc: Mike Galbraith Cc: Arjan van de Ven Cc: Wu Fengguang Orig-LKML-Reference: <20090325113316.921433024@chello.nl> Signed-off-by: Ingo Molnar --- kernel/perf_counter.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c index d76e3112d38..7669afe82cc 100644 --- a/kernel/perf_counter.c +++ b/kernel/perf_counter.c @@ -1422,6 +1422,7 @@ struct perf_output_handle { struct perf_counter *counter; struct perf_mmap_data *data; unsigned int offset; + unsigned int head; int wakeup; }; @@ -1447,6 +1448,7 @@ static int perf_output_begin(struct perf_output_handle *handle, handle->counter = counter; handle->data = data; handle->offset = offset; + handle->head = head; handle->wakeup = (offset >> PAGE_SHIFT) != (head >> PAGE_SHIFT); return 0; @@ -1485,6 +1487,8 @@ static void perf_output_copy(struct perf_output_handle *handle, } while (len); handle->offset = offset; + + WARN_ON_ONCE(handle->offset > handle->head); } #define perf_output_put(handle, x) \ -- cgit v1.2.3 From ea5d20cf99db5d26d43b6d322d3ace17e08a6552 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Wed, 25 Mar 2009 12:30:25 +0100 Subject: perf_counter: optionally provide the pid/tid of the sampled task Allow cpu wide counters to profile userspace by providing what process the sample belongs to. This raises the first issue with the output type, lots of these options: group, tid, callchain, etc.. are non-exclusive and could be combined, suggesting a bitfield. However, things like the mmap() data stream doesn't fit in that. How to split the type field... Signed-off-by: Peter Zijlstra Cc: Paul Mackerras Cc: Mike Galbraith Cc: Arjan van de Ven Cc: Wu Fengguang Orig-LKML-Reference: <20090325113317.013775235@chello.nl> Signed-off-by: Ingo Molnar --- include/linux/perf_counter.h | 5 ++++- kernel/perf_counter.c | 18 ++++++++++++++++-- 2 files changed, 20 insertions(+), 3 deletions(-) diff --git a/include/linux/perf_counter.h b/include/linux/perf_counter.h index c256635377d..7fdbdf8be77 100644 --- a/include/linux/perf_counter.h +++ b/include/linux/perf_counter.h @@ -127,8 +127,9 @@ struct perf_counter_hw_event { exclude_kernel : 1, /* ditto kernel */ exclude_hv : 1, /* ditto hypervisor */ exclude_idle : 1, /* don't count when idle */ + include_tid : 1, /* include the tid */ - __reserved_1 : 55; + __reserved_1 : 54; __u32 extra_config_len; __u32 __reserved_4; @@ -164,6 +165,8 @@ struct perf_event_header { enum perf_event_type { PERF_EVENT_IP = 0, PERF_EVENT_GROUP = 1, + + __PERF_EVENT_TID = 0x100, }; #ifdef __KERNEL__ diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c index 7669afe82cc..f3e1b27bc1b 100644 --- a/kernel/perf_counter.c +++ b/kernel/perf_counter.c @@ -1528,16 +1528,30 @@ out: static void perf_output_simple(struct perf_counter *counter, int nmi, struct pt_regs *regs) { + unsigned int size; struct { struct perf_event_header header; u64 ip; + u32 pid, tid; } event; event.header.type = PERF_EVENT_IP; - event.header.size = sizeof(event); event.ip = instruction_pointer(regs); - perf_output_write(counter, nmi, &event, sizeof(event)); + size = sizeof(event); + + if (counter->hw_event.include_tid) { + /* namespace issues */ + event.pid = current->group_leader->pid; + event.tid = current->pid; + + event.header.type |= __PERF_EVENT_TID; + } else + size -= sizeof(u64); + + event.header.size = size; + + perf_output_write(counter, nmi, &event, size); } static void perf_output_group(struct perf_counter *counter, int nmi) -- cgit v1.2.3 From 4c4ba21d2c3659e4c0421533939b58a8fd9f06c9 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Wed, 25 Mar 2009 12:30:26 +0100 Subject: perf_counter: kerneltop: mmap_pages argument provide a knob to set the number of mmap data pages. Signed-off-by: Mike Galbraith Signed-off-by: Peter Zijlstra Cc: Paul Mackerras Cc: Mike Galbraith Cc: Arjan van de Ven Cc: Wu Fengguang Orig-LKML-Reference: <20090325113317.104545398@chello.nl> Signed-off-by: Ingo Molnar --- Documentation/perf_counter/kerneltop.c | 31 +++++++++++++++++++++++++------ 1 file changed, 25 insertions(+), 6 deletions(-) diff --git a/Documentation/perf_counter/kerneltop.c b/Documentation/perf_counter/kerneltop.c index 7ebde7a336c..3e45bf6591b 100644 --- a/Documentation/perf_counter/kerneltop.c +++ b/Documentation/perf_counter/kerneltop.c @@ -178,6 +178,7 @@ static int nr_cpus = 0; static int nmi = 1; static int group = 0; static unsigned int page_size; +static unsigned int mmap_pages = 4; static char *vmlinux; @@ -326,6 +327,7 @@ static void display_help(void) " -x path --vmlinux= # the vmlinux binary, required for -s use\n" " -z --zero # zero counts after display\n" " -D --dump_symtab # dump symbol table to stderr on startup\n" + " -m pages --mmap_pages= # number of mmap data pages\n" ); exit(0); @@ -732,7 +734,9 @@ static int read_symbol(FILE *in, struct sym_entry *s) /* Tag events to be skipped. */ if (!strcmp("default_idle", s->sym) || !strcmp("cpu_idle", s->sym)) s->skip = 1; - if (!strcmp("enter_idle", s->sym) || !strcmp("exit_idle", s->sym)) + else if (!strcmp("enter_idle", s->sym) || !strcmp("exit_idle", s->sym)) + s->skip = 1; + else if (!strcmp("mwait_idle", s->sym)) s->skip = 1; if (filter_match == 1) { @@ -1042,9 +1046,10 @@ static void process_options(int argc, char *argv[]) {"symbol", required_argument, NULL, 's'}, {"stat", no_argument, NULL, 'S'}, {"zero", no_argument, NULL, 'z'}, + {"mmap_pages", required_argument, NULL, 'm'}, {NULL, 0, NULL, 0 } }; - int c = getopt_long(argc, argv, "+:ac:C:d:De:f:g:hn:p:s:Sx:z", + int c = getopt_long(argc, argv, "+:ac:C:d:De:f:g:hn:m:p:s:Sx:z", long_options, &option_index); if (c == -1) break; @@ -1081,6 +1086,7 @@ static void process_options(int argc, char *argv[]) case 'S': run_perfstat = 1; break; case 'x': vmlinux = strdup(optarg); break; case 'z': zero = 1; break; + case 'm': mmap_pages = atoi(optarg); break; default: error = 1; break; } } @@ -1134,17 +1140,30 @@ repeat: return head; } +struct timeval last_read, this_read; + static void mmap_read(struct mmap_data *md) { unsigned int head = mmap_read_head(md); unsigned int old = md->prev; unsigned char *data = md->base + page_size; + gettimeofday(&this_read, NULL); + if (head - old > md->mask) { - printf("ERROR: failed to keep up with mmap data\n"); - exit(-1); + struct timeval iv; + unsigned long msecs; + + timersub(&this_read, &last_read, &iv); + msecs = iv.tv_sec*1000 + iv.tv_usec/1000; + + fprintf(stderr, "WARNING: failed to keep up with mmap data. Last read %lu msecs ago.\n", msecs); + + old = head; } + last_read = this_read; + for (; old != head;) { __u64 *ptr = (__u64 *)&data[old & md->mask]; old += sizeof(__u64); @@ -1220,8 +1239,8 @@ int main(int argc, char *argv[]) mmap_array[i][counter].counter = counter; mmap_array[i][counter].prev = 0; - mmap_array[i][counter].mask = 2*page_size - 1; - mmap_array[i][counter].base = mmap(NULL, 3*page_size, + mmap_array[i][counter].mask = mmap_pages*page_size - 1; + mmap_array[i][counter].base = mmap(NULL, (mmap_pages+1)*page_size, PROT_READ, MAP_SHARED, fd[i][counter], 0); if (mmap_array[i][counter].base == MAP_FAILED) { printf("kerneltop error: failed to mmap with %d (%s)\n", -- cgit v1.2.3 From 00f0ad73ac90e3fba8b4cbe4cf21b2fb9a56cb72 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Wed, 25 Mar 2009 12:30:27 +0100 Subject: perf_counter: kerneltop: output event support Teach kerneltop about the new output ABI. XXX: anybody fancy integrating the PID/TID data into the output? Bump the mmap_data pages a little because we bloated the output and have to be more careful about overruns with structured data. Signed-off-by: Peter Zijlstra Cc: Paul Mackerras Cc: Mike Galbraith Cc: Arjan van de Ven Cc: Wu Fengguang Orig-LKML-Reference: <20090325113317.192910290@chello.nl> Signed-off-by: Ingo Molnar --- Documentation/perf_counter/kerneltop.c | 65 ++++++++++++++++++++++++++++++---- 1 file changed, 59 insertions(+), 6 deletions(-) diff --git a/Documentation/perf_counter/kerneltop.c b/Documentation/perf_counter/kerneltop.c index 3e45bf6591b..fda1438365d 100644 --- a/Documentation/perf_counter/kerneltop.c +++ b/Documentation/perf_counter/kerneltop.c @@ -134,6 +134,11 @@ #endif #define unlikely(x) __builtin_expect(!!(x), 0) +#define min(x, y) ({ \ + typeof(x) _min1 = (x); \ + typeof(y) _min2 = (y); \ + (void) (&_min1 == &_min2); \ + _min1 < _min2 ? _min1 : _min2; }) asmlinkage int sys_perf_counter_open( struct perf_counter_hw_event *hw_event_uptr __user, @@ -178,7 +183,7 @@ static int nr_cpus = 0; static int nmi = 1; static int group = 0; static unsigned int page_size; -static unsigned int mmap_pages = 4; +static unsigned int mmap_pages = 16; static char *vmlinux; @@ -1147,28 +1152,75 @@ static void mmap_read(struct mmap_data *md) unsigned int head = mmap_read_head(md); unsigned int old = md->prev; unsigned char *data = md->base + page_size; + int diff; gettimeofday(&this_read, NULL); - if (head - old > md->mask) { + /* + * If we're further behind than half the buffer, there's a chance + * the writer will bite our tail and screw up the events under us. + * + * If we somehow ended up ahead of the head, we got messed up. + * + * In either case, truncate and restart at head. + */ + diff = head - old; + if (diff > md->mask / 2 || diff < 0) { struct timeval iv; unsigned long msecs; timersub(&this_read, &last_read, &iv); msecs = iv.tv_sec*1000 + iv.tv_usec/1000; - fprintf(stderr, "WARNING: failed to keep up with mmap data. Last read %lu msecs ago.\n", msecs); + fprintf(stderr, "WARNING: failed to keep up with mmap data." + " Last read %lu msecs ago.\n", msecs); + /* + * head points to a known good entry, start there. + */ old = head; } last_read = this_read; for (; old != head;) { - __u64 *ptr = (__u64 *)&data[old & md->mask]; - old += sizeof(__u64); + struct event_struct { + struct perf_event_header header; + __u64 ip; + __u32 pid, tid; + } *event = (struct event_struct *)&data[old & md->mask]; + struct event_struct event_copy; + + unsigned int size = event->header.size; + + /* + * Event straddles the mmap boundary -- header should always + * be inside due to u64 alignment of output. + */ + if ((old & md->mask) + size != ((old + size) & md->mask)) { + unsigned int offset = old; + unsigned int len = sizeof(*event), cpy; + void *dst = &event_copy; + + do { + cpy = min(md->mask + 1 - (offset & md->mask), len); + memcpy(dst, &data[offset & md->mask], cpy); + offset += cpy; + dst += cpy; + len -= cpy; + } while (len); + + event = &event_copy; + } - process_event(*ptr, md->counter); + old += size; + + switch (event->header.type) { + case PERF_EVENT_IP: + case PERF_EVENT_IP | __PERF_EVENT_TID: + process_event(event->ip, md->counter); + break; + } } md->prev = old; @@ -1214,6 +1266,7 @@ int main(int argc, char *argv[]) hw_event.irq_period = event_count[counter]; hw_event.record_type = PERF_RECORD_IRQ; hw_event.nmi = nmi; + hw_event.include_tid = 1; fd[i][counter] = sys_perf_counter_open(&hw_event, tid, cpu, group_fd, 0); if (fd[i][counter] < 0) { -- cgit v1.2.3 From 7730d8655880f41f2ea519aca2ca6a1413dfd2c9 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Wed, 25 Mar 2009 12:48:31 +0100 Subject: perf_counter: allow and require one-page mmap on counting counters A brainfart stopped single page mmap()s working. The rest of the code should be perfectly fine with not having any data pages. Reported-by: Paul Mackerras Signed-off-by: Peter Zijlstra Orig-LKML-Reference: <1237981712.7972.812.camel@twins> Signed-off-by: Ingo Molnar --- kernel/perf_counter.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c index f3e1b27bc1b..95e02575546 100644 --- a/kernel/perf_counter.c +++ b/kernel/perf_counter.c @@ -1369,7 +1369,11 @@ static int perf_mmap(struct file *file, struct vm_area_struct *vma) vma_size = vma->vm_end - vma->vm_start; nr_pages = (vma_size / PAGE_SIZE) - 1; - if (nr_pages == 0 || !is_power_of_2(nr_pages)) + /* + * If we have data pages ensure they're a power-of-two number, so we + * can do bitmasks instead of modulo. + */ + if (nr_pages != 0 && !is_power_of_2(nr_pages)) return -EINVAL; if (vma_size != PAGE_SIZE * (1 + nr_pages)) -- cgit v1.2.3 From 53cfbf593758916aac41db728f029986a62f1254 Mon Sep 17 00:00:00 2001 From: Paul Mackerras Date: Wed, 25 Mar 2009 22:46:58 +1100 Subject: perf_counter: record time running and time enabled for each counter Impact: new functionality Currently, if there are more counters enabled than can fit on the CPU, the kernel will multiplex the counters on to the hardware using round-robin scheduling. That isn't too bad for sampling counters, but for counting counters it means that the value read from a counter represents some unknown fraction of the true count of events that occurred while the counter was enabled. This remedies the situation by keeping track of how long each counter is enabled for, and how long it is actually on the cpu and counting events. These times are recorded in nanoseconds using the task clock for per-task counters and the cpu clock for per-cpu counters. These values can be supplied to userspace on a read from the counter. Userspace requests that they be supplied after the counter value by setting the PERF_FORMAT_TOTAL_TIME_ENABLED and/or PERF_FORMAT_TOTAL_TIME_RUNNING bits in the hw_event.read_format field when creating the counter. (There is no way to change the read format after the counter is created, though it would be possible to add some way to do that.) Using this information it is possible for userspace to scale the count it reads from the counter to get an estimate of the true count: true_count_estimate = count * total_time_enabled / total_time_running This also lets userspace detect the situation where the counter never got to go on the cpu: total_time_running == 0. This functionality has been requested by the PAPI developers, and will be generally needed for interpreting the count values from counting counters correctly. In the implementation, this keeps 5 time values (in nanoseconds) for each counter: total_time_enabled and total_time_running are used when the counter is in state OFF or ERROR and for reporting back to userspace. When the counter is in state INACTIVE or ACTIVE, it is the tstamp_enabled, tstamp_running and tstamp_stopped values that are relevant, and total_time_enabled and total_time_running are determined from them. (tstamp_stopped is only used in INACTIVE state.) The reason for doing it like this is that it means that only counters being enabled or disabled at sched-in and sched-out time need to be updated. There are no new loops that iterate over all counters to update total_time_enabled or total_time_running. This also keeps separate child_total_time_running and child_total_time_enabled fields that get added in when reporting the totals to userspace. They are separate fields so that they can be atomic. We don't want to use atomics for total_time_running, total_time_enabled etc., because then we would have to use atomic sequences to update them, which are slower than regular arithmetic and memory accesses. It is possible to measure total_time_running by adding a task_clock counter to each group of counters, and total_time_enabled can be measured approximately with a top-level task_clock counter (though inaccuracies will creep in if you need to disable and enable groups since it is not possible in general to disable/enable the top-level task_clock counter simultaneously with another group). However, that adds extra overhead - I measured around 15% increase in the context switch latency reported by lat_ctx (from lmbench) when a task_clock counter was added to each of 2 groups, and around 25% increase when a task_clock counter was added to each of 4 groups. (In both cases a top-level task-clock counter was also added.) In contrast, the code added in this commit gives better information with no overhead that I could measure (in fact in some cases I measured lower times with this code, but the differences were all less than one standard deviation). [ v2: address review comments by Andrew Morton. ] Signed-off-by: Paul Mackerras Acked-by: Peter Zijlstra Cc: Andrew Morton Orig-LKML-Reference: <18890.6578.728637.139402@cargo.ozlabs.ibm.com> Signed-off-by: Ingo Molnar --- arch/powerpc/kernel/perf_counter.c | 2 + include/linux/perf_counter.h | 53 +++++++++++++ kernel/perf_counter.c | 157 ++++++++++++++++++++++++++++++++----- 3 files changed, 191 insertions(+), 21 deletions(-) diff --git a/arch/powerpc/kernel/perf_counter.c b/arch/powerpc/kernel/perf_counter.c index d48596ab655..df007fe0cc0 100644 --- a/arch/powerpc/kernel/perf_counter.c +++ b/arch/powerpc/kernel/perf_counter.c @@ -455,6 +455,8 @@ static void counter_sched_in(struct perf_counter *counter, int cpu) { counter->state = PERF_COUNTER_STATE_ACTIVE; counter->oncpu = cpu; + counter->tstamp_running += counter->ctx->time_now - + counter->tstamp_stopped; if (is_software_counter(counter)) counter->hw_ops->enable(counter); } diff --git a/include/linux/perf_counter.h b/include/linux/perf_counter.h index 7fdbdf8be77..6bf67ce1762 100644 --- a/include/linux/perf_counter.h +++ b/include/linux/perf_counter.h @@ -102,6 +102,16 @@ enum perf_counter_record_type { #define PERF_COUNTER_EVENT_SHIFT 0 #define PERF_COUNTER_EVENT_MASK __PERF_COUNTER_MASK(EVENT) +/* + * Bits that can be set in hw_event.read_format to request that + * reads on the counter should return the indicated quantities, + * in increasing order of bit value, after the counter value. + */ +enum perf_counter_read_format { + PERF_FORMAT_TOTAL_TIME_ENABLED = 1, + PERF_FORMAT_TOTAL_TIME_RUNNING = 2, +}; + /* * Hardware event to monitor via a performance monitoring counter: */ @@ -281,6 +291,32 @@ struct perf_counter { enum perf_counter_active_state prev_state; atomic64_t count; + /* + * These are the total time in nanoseconds that the counter + * has been enabled (i.e. eligible to run, and the task has + * been scheduled in, if this is a per-task counter) + * and running (scheduled onto the CPU), respectively. + * + * They are computed from tstamp_enabled, tstamp_running and + * tstamp_stopped when the counter is in INACTIVE or ACTIVE state. + */ + u64 total_time_enabled; + u64 total_time_running; + + /* + * These are timestamps used for computing total_time_enabled + * and total_time_running when the counter is in INACTIVE or + * ACTIVE state, measured in nanoseconds from an arbitrary point + * in time. + * tstamp_enabled: the notional time when the counter was enabled + * tstamp_running: the notional time when the counter was scheduled on + * tstamp_stopped: in INACTIVE state, the notional time when the + * counter was scheduled off. + */ + u64 tstamp_enabled; + u64 tstamp_running; + u64 tstamp_stopped; + struct perf_counter_hw_event hw_event; struct hw_perf_counter hw; @@ -291,6 +327,13 @@ struct perf_counter { struct perf_counter *parent; struct list_head child_list; + /* + * These accumulate total time (in nanoseconds) that children + * counters have been enabled and running, respectively. + */ + atomic64_t child_total_time_enabled; + atomic64_t child_total_time_running; + /* * Protect attach/detach and child_list: */ @@ -339,6 +382,16 @@ struct perf_counter_context { int nr_active; int is_active; struct task_struct *task; + + /* + * time_now is the current time in nanoseconds since an arbitrary + * point in the past. For per-task counters, this is based on the + * task clock, and for per-cpu counters it is based on the cpu clock. + * time_lost is an offset from the task/cpu clock, used to make it + * appear that time only passes while the context is scheduled in. + */ + u64 time_now; + u64 time_lost; #endif }; diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c index 95e02575546..3b862a7988c 100644 --- a/kernel/perf_counter.c +++ b/kernel/perf_counter.c @@ -116,6 +116,7 @@ counter_sched_out(struct perf_counter *counter, return; counter->state = PERF_COUNTER_STATE_INACTIVE; + counter->tstamp_stopped = ctx->time_now; counter->hw_ops->disable(counter); counter->oncpu = -1; @@ -251,6 +252,60 @@ retry: spin_unlock_irq(&ctx->lock); } +/* + * Get the current time for this context. + * If this is a task context, we use the task's task clock, + * or for a per-cpu context, we use the cpu clock. + */ +static u64 get_context_time(struct perf_counter_context *ctx, int update) +{ + struct task_struct *curr = ctx->task; + + if (!curr) + return cpu_clock(smp_processor_id()); + + return __task_delta_exec(curr, update) + curr->se.sum_exec_runtime; +} + +/* + * Update the record of the current time in a context. + */ +static void update_context_time(struct perf_counter_context *ctx, int update) +{ + ctx->time_now = get_context_time(ctx, update) - ctx->time_lost; +} + +/* + * Update the total_time_enabled and total_time_running fields for a counter. + */ +static void update_counter_times(struct perf_counter *counter) +{ + struct perf_counter_context *ctx = counter->ctx; + u64 run_end; + + if (counter->state >= PERF_COUNTER_STATE_INACTIVE) { + counter->total_time_enabled = ctx->time_now - + counter->tstamp_enabled; + if (counter->state == PERF_COUNTER_STATE_INACTIVE) + run_end = counter->tstamp_stopped; + else + run_end = ctx->time_now; + counter->total_time_running = run_end - counter->tstamp_running; + } +} + +/* + * Update total_time_enabled and total_time_running for all counters in a group. + */ +static void update_group_times(struct perf_counter *leader) +{ + struct perf_counter *counter; + + update_counter_times(leader); + list_for_each_entry(counter, &leader->sibling_list, list_entry) + update_counter_times(counter); +} + /* * Cross CPU call to disable a performance counter */ @@ -276,6 +331,8 @@ static void __perf_counter_disable(void *info) * If it is in error state, leave it in error state. */ if (counter->state >= PERF_COUNTER_STATE_INACTIVE) { + update_context_time(ctx, 1); + update_counter_times(counter); if (counter == counter->group_leader) group_sched_out(counter, cpuctx, ctx); else @@ -320,8 +377,10 @@ static void perf_counter_disable(struct perf_counter *counter) * Since we have the lock this context can't be scheduled * in, so we can change the state safely. */ - if (counter->state == PERF_COUNTER_STATE_INACTIVE) + if (counter->state == PERF_COUNTER_STATE_INACTIVE) { + update_counter_times(counter); counter->state = PERF_COUNTER_STATE_OFF; + } spin_unlock_irq(&ctx->lock); } @@ -366,6 +425,8 @@ counter_sched_in(struct perf_counter *counter, return -EAGAIN; } + counter->tstamp_running += ctx->time_now - counter->tstamp_stopped; + if (!is_software_counter(counter)) cpuctx->active_oncpu++; ctx->nr_active++; @@ -425,6 +486,17 @@ static int group_can_go_on(struct perf_counter *counter, return can_add_hw; } +static void add_counter_to_ctx(struct perf_counter *counter, + struct perf_counter_context *ctx) +{ + list_add_counter(counter, ctx); + ctx->nr_counters++; + counter->prev_state = PERF_COUNTER_STATE_OFF; + counter->tstamp_enabled = ctx->time_now; + counter->tstamp_running = ctx->time_now; + counter->tstamp_stopped = ctx->time_now; +} + /* * Cross CPU call to install and enable a performance counter */ @@ -449,6 +521,7 @@ static void __perf_install_in_context(void *info) curr_rq_lock_irq_save(&flags); spin_lock(&ctx->lock); + update_context_time(ctx, 1); /* * Protect the list operation against NMI by disabling the @@ -456,9 +529,7 @@ static void __perf_install_in_context(void *info) */ perf_flags = hw_perf_save_disable(); - list_add_counter(counter, ctx); - ctx->nr_counters++; - counter->prev_state = PERF_COUNTER_STATE_OFF; + add_counter_to_ctx(counter, ctx); /* * Don't put the counter on if it is disabled or if @@ -486,8 +557,10 @@ static void __perf_install_in_context(void *info) */ if (leader != counter) group_sched_out(leader, cpuctx, ctx); - if (leader->hw_event.pinned) + if (leader->hw_event.pinned) { + update_group_times(leader); leader->state = PERF_COUNTER_STATE_ERROR; + } } if (!err && !ctx->task && cpuctx->max_pertask) @@ -548,10 +621,8 @@ retry: * can add the counter safely, if it the call above did not * succeed. */ - if (list_empty(&counter->list_entry)) { - list_add_counter(counter, ctx); - ctx->nr_counters++; - } + if (list_empty(&counter->list_entry)) + add_counter_to_ctx(counter, ctx); spin_unlock_irq(&ctx->lock); } @@ -576,11 +647,13 @@ static void __perf_counter_enable(void *info) curr_rq_lock_irq_save(&flags); spin_lock(&ctx->lock); + update_context_time(ctx, 1); counter->prev_state = counter->state; if (counter->state >= PERF_COUNTER_STATE_INACTIVE) goto unlock; counter->state = PERF_COUNTER_STATE_INACTIVE; + counter->tstamp_enabled = ctx->time_now - counter->total_time_enabled; /* * If the counter is in a group and isn't the group leader, @@ -602,8 +675,10 @@ static void __perf_counter_enable(void *info) */ if (leader != counter) group_sched_out(leader, cpuctx, ctx); - if (leader->hw_event.pinned) + if (leader->hw_event.pinned) { + update_group_times(leader); leader->state = PERF_COUNTER_STATE_ERROR; + } } unlock: @@ -659,8 +734,11 @@ static void perf_counter_enable(struct perf_counter *counter) * Since we have the lock this context can't be scheduled * in, so we can change the state safely. */ - if (counter->state == PERF_COUNTER_STATE_OFF) + if (counter->state == PERF_COUNTER_STATE_OFF) { counter->state = PERF_COUNTER_STATE_INACTIVE; + counter->tstamp_enabled = ctx->time_now - + counter->total_time_enabled; + } out: spin_unlock_irq(&ctx->lock); } @@ -693,6 +771,7 @@ void __perf_counter_sched_out(struct perf_counter_context *ctx, ctx->is_active = 0; if (likely(!ctx->nr_counters)) goto out; + update_context_time(ctx, 0); flags = hw_perf_save_disable(); if (ctx->nr_active) { @@ -797,6 +876,13 @@ __perf_counter_sched_in(struct perf_counter_context *ctx, if (likely(!ctx->nr_counters)) goto out; + /* + * Add any time since the last sched_out to the lost time + * so it doesn't get included in the total_time_enabled and + * total_time_running measures for counters in the context. + */ + ctx->time_lost = get_context_time(ctx, 0) - ctx->time_now; + flags = hw_perf_save_disable(); /* @@ -817,8 +903,10 @@ __perf_counter_sched_in(struct perf_counter_context *ctx, * If this pinned group hasn't been scheduled, * put it in error state. */ - if (counter->state == PERF_COUNTER_STATE_INACTIVE) + if (counter->state == PERF_COUNTER_STATE_INACTIVE) { + update_group_times(counter); counter->state = PERF_COUNTER_STATE_ERROR; + } } list_for_each_entry(counter, &ctx->counter_list, list_entry) { @@ -902,8 +990,10 @@ int perf_counter_task_disable(void) perf_flags = hw_perf_save_disable(); list_for_each_entry(counter, &ctx->counter_list, list_entry) { - if (counter->state != PERF_COUNTER_STATE_ERROR) + if (counter->state != PERF_COUNTER_STATE_ERROR) { + update_group_times(counter); counter->state = PERF_COUNTER_STATE_OFF; + } } hw_perf_restore(perf_flags); @@ -946,6 +1036,8 @@ int perf_counter_task_enable(void) if (counter->state > PERF_COUNTER_STATE_OFF) continue; counter->state = PERF_COUNTER_STATE_INACTIVE; + counter->tstamp_enabled = ctx->time_now - + counter->total_time_enabled; counter->hw_event.disabled = 0; } hw_perf_restore(perf_flags); @@ -1009,10 +1101,14 @@ void perf_counter_task_tick(struct task_struct *curr, int cpu) static void __read(void *info) { struct perf_counter *counter = info; + struct perf_counter_context *ctx = counter->ctx; unsigned long flags; curr_rq_lock_irq_save(&flags); + if (ctx->is_active) + update_context_time(ctx, 1); counter->hw_ops->read(counter); + update_counter_times(counter); curr_rq_unlock_irq_restore(&flags); } @@ -1025,6 +1121,8 @@ static u64 perf_counter_read(struct perf_counter *counter) if (counter->state == PERF_COUNTER_STATE_ACTIVE) { smp_call_function_single(counter->oncpu, __read, counter, 1); + } else if (counter->state == PERF_COUNTER_STATE_INACTIVE) { + update_counter_times(counter); } return atomic64_read(&counter->count); @@ -1137,10 +1235,8 @@ static int perf_release(struct inode *inode, struct file *file) static ssize_t perf_read_hw(struct perf_counter *counter, char __user *buf, size_t count) { - u64 cntval; - - if (count < sizeof(cntval)) - return -EINVAL; + u64 values[3]; + int n; /* * Return end-of-file for a read on a counter that is in @@ -1151,10 +1247,24 @@ perf_read_hw(struct perf_counter *counter, char __user *buf, size_t count) return 0; mutex_lock(&counter->mutex); - cntval = perf_counter_read(counter); + values[0] = perf_counter_read(counter); + n = 1; + if (counter->hw_event.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) + values[n++] = counter->total_time_enabled + + atomic64_read(&counter->child_total_time_enabled); + if (counter->hw_event.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) + values[n++] = counter->total_time_running + + atomic64_read(&counter->child_total_time_running); mutex_unlock(&counter->mutex); - return put_user(cntval, (u64 __user *) buf) ? -EFAULT : sizeof(cntval); + if (count < n * sizeof(u64)) + return -EINVAL; + count = n * sizeof(u64); + + if (copy_to_user(buf, values, count)) + return -EFAULT; + + return count; } static ssize_t @@ -2290,8 +2400,7 @@ inherit_counter(struct perf_counter *parent_counter, * Link it up in the child's context: */ child_counter->task = child; - list_add_counter(child_counter, child_ctx); - child_ctx->nr_counters++; + add_counter_to_ctx(child_counter, child_ctx); child_counter->parent = parent_counter; /* @@ -2361,6 +2470,10 @@ static void sync_child_counter(struct perf_counter *child_counter, * Add back the child's count to the parent's count: */ atomic64_add(child_val, &parent_counter->count); + atomic64_add(child_counter->total_time_enabled, + &parent_counter->child_total_time_enabled); + atomic64_add(child_counter->total_time_running, + &parent_counter->child_total_time_running); /* * Remove this counter from the parent's list @@ -2395,6 +2508,7 @@ __perf_counter_exit_task(struct task_struct *child, if (child != current) { wait_task_inactive(child, 0); list_del_init(&child_counter->list_entry); + update_counter_times(child_counter); } else { struct perf_cpu_context *cpuctx; unsigned long flags; @@ -2412,6 +2526,7 @@ __perf_counter_exit_task(struct task_struct *child, cpuctx = &__get_cpu_var(perf_cpu_context); group_sched_out(child_counter, cpuctx, child_ctx); + update_counter_times(child_counter); list_del_init(&child_counter->list_entry); -- cgit v1.2.3 From 925d519ab82b6dd7aca9420d809ee83819c08db2 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Mon, 30 Mar 2009 19:07:02 +0200 Subject: perf_counter: unify and fix delayed counter wakeup While going over the wakeup code I noticed delayed wakeups only work for hardware counters but basically all software counters rely on them. This patch unifies and generalizes the delayed wakeup to fix this issue. Since we're dealing with NMI context bits here, use a cmpxchg() based single link list implementation to track counters that have pending wakeups. [ This should really be generic code for delayed wakeups, but since we cannot use cmpxchg()/xchg() in generic code, I've let it live in the perf_counter code. -- Eric Dumazet could use it to aggregate the network wakeups. ] Furthermore, the x86 method of using TIF flags was flawed in that its quite possible to end up setting the bit on the idle task, loosing the wakeup. The powerpc method uses per-cpu storage and does appear to be sufficient. Signed-off-by: Peter Zijlstra Acked-by: Paul Mackerras Orig-LKML-Reference: <20090330171023.153932974@chello.nl> Signed-off-by: Ingo Molnar --- arch/powerpc/include/asm/hw_irq.h | 4 +- arch/powerpc/kernel/irq.c | 2 +- arch/powerpc/kernel/perf_counter.c | 22 +------ arch/x86/include/asm/perf_counter.h | 5 +- arch/x86/include/asm/thread_info.h | 4 +- arch/x86/kernel/cpu/perf_counter.c | 29 -------- arch/x86/kernel/signal.c | 6 -- include/linux/perf_counter.h | 15 +++-- kernel/perf_counter.c | 128 +++++++++++++++++++++++++++++++++--- kernel/timer.c | 3 + 10 files changed, 142 insertions(+), 76 deletions(-) diff --git a/arch/powerpc/include/asm/hw_irq.h b/arch/powerpc/include/asm/hw_irq.h index cb32d571c9c..20a44d0c9fd 100644 --- a/arch/powerpc/include/asm/hw_irq.h +++ b/arch/powerpc/include/asm/hw_irq.h @@ -132,7 +132,7 @@ static inline int irqs_disabled_flags(unsigned long flags) struct irq_chip; #ifdef CONFIG_PERF_COUNTERS -static inline unsigned long get_perf_counter_pending(void) +static inline unsigned long test_perf_counter_pending(void) { unsigned long x; @@ -160,7 +160,7 @@ extern void perf_counter_do_pending(void); #else -static inline unsigned long get_perf_counter_pending(void) +static inline unsigned long test_perf_counter_pending(void) { return 0; } diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c index 469e9635ff0..2cd471f92fe 100644 --- a/arch/powerpc/kernel/irq.c +++ b/arch/powerpc/kernel/irq.c @@ -135,7 +135,7 @@ notrace void raw_local_irq_restore(unsigned long en) iseries_handle_interrupts(); } - if (get_perf_counter_pending()) { + if (test_perf_counter_pending()) { clear_perf_counter_pending(); perf_counter_do_pending(); } diff --git a/arch/powerpc/kernel/perf_counter.c b/arch/powerpc/kernel/perf_counter.c index df007fe0cc0..cde720fc495 100644 --- a/arch/powerpc/kernel/perf_counter.c +++ b/arch/powerpc/kernel/perf_counter.c @@ -649,24 +649,6 @@ hw_perf_counter_init(struct perf_counter *counter) return &power_perf_ops; } -/* - * Handle wakeups. - */ -void perf_counter_do_pending(void) -{ - int i; - struct cpu_hw_counters *cpuhw = &__get_cpu_var(cpu_hw_counters); - struct perf_counter *counter; - - for (i = 0; i < cpuhw->n_counters; ++i) { - counter = cpuhw->counter[i]; - if (counter && counter->wakeup_pending) { - counter->wakeup_pending = 0; - wake_up(&counter->waitq); - } - } -} - /* * A counter has overflowed; update its count and record * things if requested. Note that interrupts are hard-disabled @@ -720,7 +702,7 @@ static void perf_counter_interrupt(struct pt_regs *regs) struct cpu_hw_counters *cpuhw = &__get_cpu_var(cpu_hw_counters); struct perf_counter *counter; long val; - int need_wakeup = 0, found = 0; + int found = 0; for (i = 0; i < cpuhw->n_counters; ++i) { counter = cpuhw->counter[i]; @@ -761,7 +743,7 @@ static void perf_counter_interrupt(struct pt_regs *regs) * immediately; otherwise we'll have do the wakeup when interrupts * get soft-enabled. */ - if (get_perf_counter_pending() && regs->softe) { + if (test_perf_counter_pending() && regs->softe) { irq_enter(); clear_perf_counter_pending(); perf_counter_do_pending(); diff --git a/arch/x86/include/asm/perf_counter.h b/arch/x86/include/asm/perf_counter.h index 1662043b340..e2b0e66b235 100644 --- a/arch/x86/include/asm/perf_counter.h +++ b/arch/x86/include/asm/perf_counter.h @@ -84,8 +84,9 @@ union cpuid10_edx { #define MSR_ARCH_PERFMON_FIXED_CTR2 0x30b #define X86_PMC_IDX_FIXED_BUS_CYCLES (X86_PMC_IDX_FIXED + 2) -#define set_perf_counter_pending() \ - set_tsk_thread_flag(current, TIF_PERF_COUNTERS); +#define set_perf_counter_pending() do { } while (0) +#define clear_perf_counter_pending() do { } while (0) +#define test_perf_counter_pending() (0) #ifdef CONFIG_PERF_COUNTERS extern void init_hw_perf_counters(void); diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h index 3ffd5d2a367..8820a73ae09 100644 --- a/arch/x86/include/asm/thread_info.h +++ b/arch/x86/include/asm/thread_info.h @@ -83,7 +83,6 @@ struct thread_info { #define TIF_SYSCALL_AUDIT 7 /* syscall auditing active */ #define TIF_SECCOMP 8 /* secure computing */ #define TIF_MCE_NOTIFY 10 /* notify userspace of an MCE */ -#define TIF_PERF_COUNTERS 11 /* notify perf counter work */ #define TIF_NOTSC 16 /* TSC is not accessible in userland */ #define TIF_IA32 17 /* 32bit process */ #define TIF_FORK 18 /* ret_from_fork */ @@ -107,7 +106,6 @@ struct thread_info { #define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT) #define _TIF_SECCOMP (1 << TIF_SECCOMP) #define _TIF_MCE_NOTIFY (1 << TIF_MCE_NOTIFY) -#define _TIF_PERF_COUNTERS (1 << TIF_PERF_COUNTERS) #define _TIF_NOTSC (1 << TIF_NOTSC) #define _TIF_IA32 (1 << TIF_IA32) #define _TIF_FORK (1 << TIF_FORK) @@ -141,7 +139,7 @@ struct thread_info { /* Only used for 64 bit */ #define _TIF_DO_NOTIFY_MASK \ - (_TIF_SIGPENDING|_TIF_MCE_NOTIFY|_TIF_PERF_COUNTERS|_TIF_NOTIFY_RESUME) + (_TIF_SIGPENDING|_TIF_MCE_NOTIFY|_TIF_NOTIFY_RESUME) /* flags to check in __switch_to() */ #define _TIF_WORK_CTXSW \ diff --git a/arch/x86/kernel/cpu/perf_counter.c b/arch/x86/kernel/cpu/perf_counter.c index 3f95b0cdc55..7aab177fb56 100644 --- a/arch/x86/kernel/cpu/perf_counter.c +++ b/arch/x86/kernel/cpu/perf_counter.c @@ -227,7 +227,6 @@ static int __hw_perf_counter_init(struct perf_counter *counter) */ hwc->config |= pmc_ops->event_map(perf_event_id(hw_event)); } - counter->wakeup_pending = 0; return 0; } @@ -773,34 +772,6 @@ void smp_perf_counter_interrupt(struct pt_regs *regs) irq_exit(); } -/* - * This handler is triggered by NMI contexts: - */ -void perf_counter_notify(struct pt_regs *regs) -{ - struct cpu_hw_counters *cpuc; - unsigned long flags; - int bit, cpu; - - local_irq_save(flags); - cpu = smp_processor_id(); - cpuc = &per_cpu(cpu_hw_counters, cpu); - - for_each_bit(bit, cpuc->used, X86_PMC_IDX_MAX) { - struct perf_counter *counter = cpuc->counters[bit]; - - if (!counter) - continue; - - if (counter->wakeup_pending) { - counter->wakeup_pending = 0; - wake_up(&counter->waitq); - } - } - - local_irq_restore(flags); -} - void perf_counters_lapic_init(int nmi) { u32 apic_val; diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c index 611615a92c9..0a813b17b17 100644 --- a/arch/x86/kernel/signal.c +++ b/arch/x86/kernel/signal.c @@ -6,7 +6,6 @@ * 2000-06-20 Pentium III FXSR, SSE support by Gareth Hughes * 2000-2002 x86-64 support by Andi Kleen */ -#include #include #include #include @@ -872,11 +871,6 @@ do_notify_resume(struct pt_regs *regs, void *unused, __u32 thread_info_flags) tracehook_notify_resume(regs); } - if (thread_info_flags & _TIF_PERF_COUNTERS) { - clear_thread_flag(TIF_PERF_COUNTERS); - perf_counter_notify(regs); - } - #ifdef CONFIG_X86_32 clear_thread_flag(TIF_IRET); #endif /* CONFIG_X86_32 */ diff --git a/include/linux/perf_counter.h b/include/linux/perf_counter.h index 6bf67ce1762..0d833228eee 100644 --- a/include/linux/perf_counter.h +++ b/include/linux/perf_counter.h @@ -275,6 +275,10 @@ struct perf_mmap_data { void *data_pages[0]; }; +struct perf_wakeup_entry { + struct perf_wakeup_entry *next; +}; + /** * struct perf_counter - performance counter kernel representation: */ @@ -350,7 +354,7 @@ struct perf_counter { /* poll related */ wait_queue_head_t waitq; /* optional: for NMIs */ - int wakeup_pending; + struct perf_wakeup_entry wakeup; void (*destroy)(struct perf_counter *); struct rcu_head rcu_head; @@ -427,7 +431,7 @@ extern void perf_counter_task_sched_out(struct task_struct *task, int cpu); extern void perf_counter_task_tick(struct task_struct *task, int cpu); extern void perf_counter_init_task(struct task_struct *child); extern void perf_counter_exit_task(struct task_struct *child); -extern void perf_counter_notify(struct pt_regs *regs); +extern void perf_counter_do_pending(void); extern void perf_counter_print_debug(void); extern void perf_counter_unthrottle(void); extern u64 hw_perf_save_disable(void); @@ -461,7 +465,7 @@ static inline void perf_counter_task_tick(struct task_struct *task, int cpu) { } static inline void perf_counter_init_task(struct task_struct *child) { } static inline void perf_counter_exit_task(struct task_struct *child) { } -static inline void perf_counter_notify(struct pt_regs *regs) { } +static inline void perf_counter_do_pending(void) { } static inline void perf_counter_print_debug(void) { } static inline void perf_counter_unthrottle(void) { } static inline void hw_perf_restore(u64 ctrl) { } @@ -469,8 +473,9 @@ static inline u64 hw_perf_save_disable(void) { return 0; } static inline int perf_counter_task_disable(void) { return -EINVAL; } static inline int perf_counter_task_enable(void) { return -EINVAL; } -static inline void perf_swcounter_event(u32 event, u64 nr, - int nmi, struct pt_regs *regs) { } +static inline void +perf_swcounter_event(u32 event, u64 nr, int nmi, struct pt_regs *regs) { } + #endif #endif /* __KERNEL__ */ diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c index 3b862a7988c..f70ff80e79d 100644 --- a/kernel/perf_counter.c +++ b/kernel/perf_counter.c @@ -1197,8 +1197,12 @@ static void free_counter_rcu(struct rcu_head *head) kfree(counter); } +static void perf_pending_sync(struct perf_counter *counter); + static void free_counter(struct perf_counter *counter) { + perf_pending_sync(counter); + if (counter->destroy) counter->destroy(counter); @@ -1528,6 +1532,118 @@ static const struct file_operations perf_fops = { .mmap = perf_mmap, }; +/* + * Perf counter wakeup + * + * If there's data, ensure we set the poll() state and publish everything + * to user-space before waking everybody up. + */ + +void perf_counter_wakeup(struct perf_counter *counter) +{ + struct perf_mmap_data *data; + + rcu_read_lock(); + data = rcu_dereference(counter->data); + if (data) { + (void)atomic_xchg(&data->wakeup, POLL_IN); + __perf_counter_update_userpage(counter, data); + } + rcu_read_unlock(); + + wake_up_all(&counter->waitq); +} + +/* + * Pending wakeups + * + * Handle the case where we need to wakeup up from NMI (or rq->lock) context. + * + * The NMI bit means we cannot possibly take locks. Therefore, maintain a + * single linked list and use cmpxchg() to add entries lockless. + */ + +#define PENDING_TAIL ((struct perf_wakeup_entry *)-1UL) + +static DEFINE_PER_CPU(struct perf_wakeup_entry *, perf_wakeup_head) = { + PENDING_TAIL, +}; + +static void perf_pending_queue(struct perf_counter *counter) +{ + struct perf_wakeup_entry **head; + struct perf_wakeup_entry *prev, *next; + + if (cmpxchg(&counter->wakeup.next, NULL, PENDING_TAIL) != NULL) + return; + + head = &get_cpu_var(perf_wakeup_head); + + do { + prev = counter->wakeup.next = *head; + next = &counter->wakeup; + } while (cmpxchg(head, prev, next) != prev); + + set_perf_counter_pending(); + + put_cpu_var(perf_wakeup_head); +} + +static int __perf_pending_run(void) +{ + struct perf_wakeup_entry *list; + int nr = 0; + + list = xchg(&__get_cpu_var(perf_wakeup_head), PENDING_TAIL); + while (list != PENDING_TAIL) { + struct perf_counter *counter = container_of(list, + struct perf_counter, wakeup); + + list = list->next; + + counter->wakeup.next = NULL; + /* + * Ensure we observe the unqueue before we issue the wakeup, + * so that we won't be waiting forever. + * -- see perf_not_pending(). + */ + smp_wmb(); + + perf_counter_wakeup(counter); + nr++; + } + + return nr; +} + +static inline int perf_not_pending(struct perf_counter *counter) +{ + /* + * If we flush on whatever cpu we run, there is a chance we don't + * need to wait. + */ + get_cpu(); + __perf_pending_run(); + put_cpu(); + + /* + * Ensure we see the proper queue state before going to sleep + * so that we do not miss the wakeup. -- see perf_pending_handle() + */ + smp_rmb(); + return counter->wakeup.next == NULL; +} + +static void perf_pending_sync(struct perf_counter *counter) +{ + wait_event(counter->waitq, perf_not_pending(counter)); +} + +void perf_counter_do_pending(void) +{ + __perf_pending_run(); +} + /* * Output */ @@ -1611,13 +1727,10 @@ static void perf_output_copy(struct perf_output_handle *handle, static void perf_output_end(struct perf_output_handle *handle, int nmi) { if (handle->wakeup) { - (void)atomic_xchg(&handle->data->wakeup, POLL_IN); - __perf_counter_update_userpage(handle->counter, handle->data); - if (nmi) { - handle->counter->wakeup_pending = 1; - set_perf_counter_pending(); - } else - wake_up(&handle->counter->waitq); + if (nmi) + perf_pending_queue(handle->counter); + else + perf_counter_wakeup(handle->counter); } rcu_read_unlock(); } @@ -2211,7 +2324,6 @@ perf_counter_alloc(struct perf_counter_hw_event *hw_event, counter->cpu = cpu; counter->hw_event = *hw_event; - counter->wakeup_pending = 0; counter->group_leader = group_leader; counter->hw_ops = NULL; counter->ctx = ctx; diff --git a/kernel/timer.c b/kernel/timer.c index b4555568b4e..672ca25fbc4 100644 --- a/kernel/timer.c +++ b/kernel/timer.c @@ -37,6 +37,7 @@ #include #include #include +#include #include #include @@ -1167,6 +1168,8 @@ static void run_timer_softirq(struct softirq_action *h) { struct tvec_base *base = __get_cpu_var(tvec_bases); + perf_counter_do_pending(); + hrtimer_run_pending(); if (time_after_eq(jiffies, base->timer_jiffies)) -- cgit v1.2.3 From 38ff667b321b00f5e6830e93fb4ab11a653a2920 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Mon, 30 Mar 2009 19:07:03 +0200 Subject: perf_counter: fix update_userpage() It just occured to me it is possible to have multiple contending updates of the userpage (mmap information vs overflow vs counter). This would break the seqlock logic. It appear the arch code uses this from NMI context, so we cannot possibly serialize its use, therefore separate the data_head update from it and let it return to its original use. The arch code needs to make sure there are no contending callers by disabling the counter before using it -- powerpc appears to do this nicely. Signed-off-by: Peter Zijlstra Acked-by: Paul Mackerras Orig-LKML-Reference: <20090330171023.241410660@chello.nl> Signed-off-by: Ingo Molnar --- include/linux/perf_counter.h | 35 +++++++++++++++++++++++++++++++++++ kernel/perf_counter.c | 38 +++++++++++++++++++++++--------------- 2 files changed, 58 insertions(+), 15 deletions(-) diff --git a/include/linux/perf_counter.h b/include/linux/perf_counter.h index 0d833228eee..8ac18852dcf 100644 --- a/include/linux/perf_counter.h +++ b/include/linux/perf_counter.h @@ -160,10 +160,45 @@ struct perf_counter_hw_event { struct perf_counter_mmap_page { __u32 version; /* version number of this structure */ __u32 compat_version; /* lowest version this is compat with */ + + /* + * Bits needed to read the hw counters in user-space. + * + * The index and offset should be read atomically using the seqlock: + * + * __u32 seq, index; + * __s64 offset; + * + * again: + * rmb(); + * seq = pc->lock; + * + * if (unlikely(seq & 1)) { + * cpu_relax(); + * goto again; + * } + * + * index = pc->index; + * offset = pc->offset; + * + * rmb(); + * if (pc->lock != seq) + * goto again; + * + * After this, index contains architecture specific counter index + 1, + * so that 0 means unavailable, offset contains the value to be added + * to the result of the raw timer read to obtain this counter's value. + */ __u32 lock; /* seqlock for synchronization */ __u32 index; /* hardware counter identifier */ __s64 offset; /* add to hardware counter value */ + /* + * Control data for the mmap() data buffer. + * + * User-space reading this value should issue an rmb(), on SMP capable + * platforms, after reading this value -- see perf_counter_wakeup(). + */ __u32 data_head; /* head in the data section */ }; diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c index f70ff80e79d..c95e92329b9 100644 --- a/kernel/perf_counter.c +++ b/kernel/perf_counter.c @@ -1316,10 +1316,22 @@ static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg) return err; } -static void __perf_counter_update_userpage(struct perf_counter *counter, - struct perf_mmap_data *data) +/* + * Callers need to ensure there can be no nesting of this function, otherwise + * the seqlock logic goes bad. We can not serialize this because the arch + * code calls this from NMI context. + */ +void perf_counter_update_userpage(struct perf_counter *counter) { - struct perf_counter_mmap_page *userpg = data->user_page; + struct perf_mmap_data *data; + struct perf_counter_mmap_page *userpg; + + rcu_read_lock(); + data = rcu_dereference(counter->data); + if (!data) + goto unlock; + + userpg = data->user_page; /* * Disable preemption so as to not let the corresponding user-space @@ -1333,20 +1345,10 @@ static void __perf_counter_update_userpage(struct perf_counter *counter, if (counter->state == PERF_COUNTER_STATE_ACTIVE) userpg->offset -= atomic64_read(&counter->hw.prev_count); - userpg->data_head = atomic_read(&data->head); smp_wmb(); ++userpg->lock; preempt_enable(); -} - -void perf_counter_update_userpage(struct perf_counter *counter) -{ - struct perf_mmap_data *data; - - rcu_read_lock(); - data = rcu_dereference(counter->data); - if (data) - __perf_counter_update_userpage(counter, data); +unlock: rcu_read_unlock(); } @@ -1547,7 +1549,13 @@ void perf_counter_wakeup(struct perf_counter *counter) data = rcu_dereference(counter->data); if (data) { (void)atomic_xchg(&data->wakeup, POLL_IN); - __perf_counter_update_userpage(counter, data); + /* + * Ensure all data writes are issued before updating the + * user-space data head information. The matching rmb() + * will be in userspace after reading this value. + */ + smp_wmb(); + data->user_page->data_head = atomic_read(&data->head); } rcu_read_unlock(); -- cgit v1.2.3 From 195564390210977954fe4ef45b39cdee34f41b59 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Mon, 30 Mar 2009 19:07:04 +0200 Subject: perf_counter: kerneltop: simplify data_head read Now that the kernel side changed, match up again. Signed-off-by: Peter Zijlstra Acked-by: Paul Mackerras Orig-LKML-Reference: <20090330171023.327144324@chello.nl> Signed-off-by: Ingo Molnar --- Documentation/perf_counter/kerneltop.c | 14 +------------- 1 file changed, 1 insertion(+), 13 deletions(-) diff --git a/Documentation/perf_counter/kerneltop.c b/Documentation/perf_counter/kerneltop.c index fda1438365d..2779c57ad4b 100644 --- a/Documentation/perf_counter/kerneltop.c +++ b/Documentation/perf_counter/kerneltop.c @@ -1125,22 +1125,10 @@ struct mmap_data { static unsigned int mmap_read_head(struct mmap_data *md) { struct perf_counter_mmap_page *pc = md->base; - unsigned int seq, head; - -repeat: - rmb(); - seq = pc->lock; - - if (unlikely(seq & 1)) { - cpu_relax(); - goto repeat; - } + int head; head = pc->data_head; - rmb(); - if (pc->lock != seq) - goto repeat; return head; } -- cgit v1.2.3 From 0a4a93919bdc5cee48fe4367591e8e0449c1086c Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Mon, 30 Mar 2009 19:07:05 +0200 Subject: perf_counter: executable mmap() information Currently the profiling information returns userspace IPs but no way to correlate them to userspace code. Userspace could look into /proc/$pid/maps but that might not be current or even present anymore at the time of analyzing the IPs. Therefore provide means to track the mmap information and provide it in the output stream. XXX: only covers mmap()/munmap(), mremap() and mprotect() are missing. Signed-off-by: Peter Zijlstra Acked-by: Paul Mackerras Cc: Andrew Morton Orig-LKML-Reference: <20090330171023.417259499@chello.nl> Signed-off-by: Ingo Molnar --- include/linux/perf_counter.h | 24 ++++++- kernel/perf_counter.c | 145 +++++++++++++++++++++++++++++++++++++++++++ mm/mmap.c | 10 +++ 3 files changed, 177 insertions(+), 2 deletions(-) diff --git a/include/linux/perf_counter.h b/include/linux/perf_counter.h index 8ac18852dcf..037a81145ac 100644 --- a/include/linux/perf_counter.h +++ b/include/linux/perf_counter.h @@ -137,9 +137,11 @@ struct perf_counter_hw_event { exclude_kernel : 1, /* ditto kernel */ exclude_hv : 1, /* ditto hypervisor */ exclude_idle : 1, /* don't count when idle */ - include_tid : 1, /* include the tid */ + include_tid : 1, /* include the tid */ + mmap : 1, /* include mmap data */ + munmap : 1, /* include munmap data */ - __reserved_1 : 54; + __reserved_1 : 52; __u32 extra_config_len; __u32 __reserved_4; @@ -211,6 +213,9 @@ enum perf_event_type { PERF_EVENT_IP = 0, PERF_EVENT_GROUP = 1, + PERF_EVENT_MMAP = 2, + PERF_EVENT_MUNMAP = 3, + __PERF_EVENT_TID = 0x100, }; @@ -491,6 +496,12 @@ static inline int is_software_counter(struct perf_counter *counter) extern void perf_swcounter_event(u32, u64, int, struct pt_regs *); +extern void perf_counter_mmap(unsigned long addr, unsigned long len, + unsigned long pgoff, struct file *file); + +extern void perf_counter_munmap(unsigned long addr, unsigned long len, + unsigned long pgoff, struct file *file); + #else static inline void perf_counter_task_sched_in(struct task_struct *task, int cpu) { } @@ -511,6 +522,15 @@ static inline int perf_counter_task_enable(void) { return -EINVAL; } static inline void perf_swcounter_event(u32 event, u64 nr, int nmi, struct pt_regs *regs) { } + +static inline void +perf_counter_mmap(unsigned long addr, unsigned long len, + unsigned long pgoff, struct file *file) { } + +static inline void +perf_counter_munmap(unsigned long addr, unsigned long len, + unsigned long pgoff, struct file *file) { } + #endif #endif /* __KERNEL__ */ diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c index c95e92329b9..f35e89e3d6a 100644 --- a/kernel/perf_counter.c +++ b/kernel/perf_counter.c @@ -25,6 +25,7 @@ #include #include #include +#include #include @@ -1843,6 +1844,150 @@ void perf_counter_output(struct perf_counter *counter, } } +/* + * mmap tracking + */ + +struct perf_mmap_event { + struct file *file; + char *file_name; + int file_size; + + struct { + struct perf_event_header header; + + u32 pid; + u32 tid; + u64 start; + u64 len; + u64 pgoff; + } event; +}; + +static void perf_counter_mmap_output(struct perf_counter *counter, + struct perf_mmap_event *mmap_event) +{ + struct perf_output_handle handle; + int size = mmap_event->event.header.size; + int ret = perf_output_begin(&handle, counter, size); + + if (ret) + return; + + perf_output_put(&handle, mmap_event->event); + perf_output_copy(&handle, mmap_event->file_name, + mmap_event->file_size); + perf_output_end(&handle, 0); +} + +static int perf_counter_mmap_match(struct perf_counter *counter, + struct perf_mmap_event *mmap_event) +{ + if (counter->hw_event.mmap && + mmap_event->event.header.type == PERF_EVENT_MMAP) + return 1; + + if (counter->hw_event.munmap && + mmap_event->event.header.type == PERF_EVENT_MUNMAP) + return 1; + + return 0; +} + +static void perf_counter_mmap_ctx(struct perf_counter_context *ctx, + struct perf_mmap_event *mmap_event) +{ + struct perf_counter *counter; + + if (system_state != SYSTEM_RUNNING || list_empty(&ctx->event_list)) + return; + + rcu_read_lock(); + list_for_each_entry_rcu(counter, &ctx->event_list, event_entry) { + if (perf_counter_mmap_match(counter, mmap_event)) + perf_counter_mmap_output(counter, mmap_event); + } + rcu_read_unlock(); +} + +static void perf_counter_mmap_event(struct perf_mmap_event *mmap_event) +{ + struct perf_cpu_context *cpuctx; + struct file *file = mmap_event->file; + unsigned int size; + char tmp[16]; + char *buf = NULL; + char *name; + + if (file) { + buf = kzalloc(PATH_MAX, GFP_KERNEL); + if (!buf) { + name = strncpy(tmp, "//enomem", sizeof(tmp)); + goto got_name; + } + name = dentry_path(file->f_dentry, buf, PATH_MAX); + if (IS_ERR(name)) { + name = strncpy(tmp, "//toolong", sizeof(tmp)); + goto got_name; + } + } else { + name = strncpy(tmp, "//anon", sizeof(tmp)); + goto got_name; + } + +got_name: + size = ALIGN(strlen(name), sizeof(u64)); + + mmap_event->file_name = name; + mmap_event->file_size = size; + + mmap_event->event.header.size = sizeof(mmap_event->event) + size; + + cpuctx = &get_cpu_var(perf_cpu_context); + perf_counter_mmap_ctx(&cpuctx->ctx, mmap_event); + put_cpu_var(perf_cpu_context); + + perf_counter_mmap_ctx(¤t->perf_counter_ctx, mmap_event); + + kfree(buf); +} + +void perf_counter_mmap(unsigned long addr, unsigned long len, + unsigned long pgoff, struct file *file) +{ + struct perf_mmap_event mmap_event = { + .file = file, + .event = { + .header = { .type = PERF_EVENT_MMAP, }, + .pid = current->group_leader->pid, + .tid = current->pid, + .start = addr, + .len = len, + .pgoff = pgoff, + }, + }; + + perf_counter_mmap_event(&mmap_event); +} + +void perf_counter_munmap(unsigned long addr, unsigned long len, + unsigned long pgoff, struct file *file) +{ + struct perf_mmap_event mmap_event = { + .file = file, + .event = { + .header = { .type = PERF_EVENT_MUNMAP, }, + .pid = current->group_leader->pid, + .tid = current->pid, + .start = addr, + .len = len, + .pgoff = pgoff, + }, + }; + + perf_counter_mmap_event(&mmap_event); +} + /* * Generic software counter infrastructure */ diff --git a/mm/mmap.c b/mm/mmap.c index 4a3841186c1..1df63f614f9 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -28,6 +28,7 @@ #include #include #include +#include #include #include @@ -1223,6 +1224,9 @@ munmap_back: if (correct_wcount) atomic_inc(&inode->i_writecount); out: + if (vm_flags & VM_EXEC) + perf_counter_mmap(addr, len, pgoff, file); + mm->total_vm += len >> PAGE_SHIFT; vm_stat_account(mm, vm_flags, file, len >> PAGE_SHIFT); if (vm_flags & VM_LOCKED) { @@ -1756,6 +1760,12 @@ static void remove_vma_list(struct mm_struct *mm, struct vm_area_struct *vma) do { long nrpages = vma_pages(vma); + if (vma->vm_flags & VM_EXEC) { + perf_counter_munmap(vma->vm_start, + nrpages << PAGE_SHIFT, + vma->vm_pgoff, vma->vm_file); + } + mm->total_vm -= nrpages; vm_stat_account(mm, vma->vm_flags, vma->vm_file, -nrpages); vma = remove_vma(vma); -- cgit v1.2.3 From 3c1ba6fafecaed295017881f8863a18602f32c1d Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Mon, 30 Mar 2009 19:07:06 +0200 Subject: perf_counter: kerneltop: parse the mmap data stream frob the kerneltop code to print the mmap data in the stream Better use would be collecting the IPs per PID and mapping them onto the provided userspace code.. TODO Signed-off-by: Peter Zijlstra Acked-by: Paul Mackerras Orig-LKML-Reference: <20090330171023.501902515@chello.nl> Signed-off-by: Ingo Molnar --- Documentation/perf_counter/kerneltop.c | 50 ++++++++++++++++++++++++++++++---- 1 file changed, 44 insertions(+), 6 deletions(-) diff --git a/Documentation/perf_counter/kerneltop.c b/Documentation/perf_counter/kerneltop.c index 2779c57ad4b..995111dee7f 100644 --- a/Documentation/perf_counter/kerneltop.c +++ b/Documentation/perf_counter/kerneltop.c @@ -184,6 +184,8 @@ static int nmi = 1; static int group = 0; static unsigned int page_size; static unsigned int mmap_pages = 16; +static int use_mmap = 0; +static int use_munmap = 0; static char *vmlinux; @@ -333,6 +335,8 @@ static void display_help(void) " -z --zero # zero counts after display\n" " -D --dump_symtab # dump symbol table to stderr on startup\n" " -m pages --mmap_pages= # number of mmap data pages\n" + " -M --mmap_info # print mmap info stream\n" + " -U --munmap_info # print munmap info stream\n" ); exit(0); @@ -1052,9 +1056,11 @@ static void process_options(int argc, char *argv[]) {"stat", no_argument, NULL, 'S'}, {"zero", no_argument, NULL, 'z'}, {"mmap_pages", required_argument, NULL, 'm'}, + {"mmap_info", no_argument, NULL, 'M'}, + {"munmap_info", no_argument, NULL, 'U'}, {NULL, 0, NULL, 0 } }; - int c = getopt_long(argc, argv, "+:ac:C:d:De:f:g:hn:m:p:s:Sx:z", + int c = getopt_long(argc, argv, "+:ac:C:d:De:f:g:hn:m:p:s:Sx:zMU", long_options, &option_index); if (c == -1) break; @@ -1092,6 +1098,8 @@ static void process_options(int argc, char *argv[]) case 'x': vmlinux = strdup(optarg); break; case 'z': zero = 1; break; case 'm': mmap_pages = atoi(optarg); break; + case 'M': use_mmap = 1; break; + case 'U': use_munmap = 1; break; default: error = 1; break; } } @@ -1172,12 +1180,29 @@ static void mmap_read(struct mmap_data *md) last_read = this_read; for (; old != head;) { - struct event_struct { + struct ip_event { struct perf_event_header header; __u64 ip; __u32 pid, tid; - } *event = (struct event_struct *)&data[old & md->mask]; - struct event_struct event_copy; + }; + struct mmap_event { + struct perf_event_header header; + __u32 pid, tid; + __u64 start; + __u64 len; + __u64 pgoff; + char filename[PATH_MAX]; + }; + + typedef union event_union { + struct perf_event_header header; + struct ip_event ip; + struct mmap_event mmap; + } event_t; + + event_t *event = (event_t *)&data[old & md->mask]; + + event_t event_copy; unsigned int size = event->header.size; @@ -1187,7 +1212,7 @@ static void mmap_read(struct mmap_data *md) */ if ((old & md->mask) + size != ((old + size) & md->mask)) { unsigned int offset = old; - unsigned int len = sizeof(*event), cpy; + unsigned int len = min(sizeof(*event), size), cpy; void *dst = &event_copy; do { @@ -1206,7 +1231,18 @@ static void mmap_read(struct mmap_data *md) switch (event->header.type) { case PERF_EVENT_IP: case PERF_EVENT_IP | __PERF_EVENT_TID: - process_event(event->ip, md->counter); + process_event(event->ip.ip, md->counter); + break; + + case PERF_EVENT_MMAP: + case PERF_EVENT_MUNMAP: + printf("%s: %Lu %Lu %Lu %s\n", + event->header.type == PERF_EVENT_MMAP + ? "mmap" : "munmap", + event->mmap.start, + event->mmap.len, + event->mmap.pgoff, + event->mmap.filename); break; } } @@ -1255,6 +1291,8 @@ int main(int argc, char *argv[]) hw_event.record_type = PERF_RECORD_IRQ; hw_event.nmi = nmi; hw_event.include_tid = 1; + hw_event.mmap = use_mmap; + hw_event.munmap = use_munmap; fd[i][counter] = sys_perf_counter_open(&hw_event, tid, cpu, group_fd, 0); if (fd[i][counter] < 0) { -- cgit v1.2.3 From 7595d63b3a9ce65d14c4fbd0e7de448a343d7215 Mon Sep 17 00:00:00 2001 From: Paul Mackerras Date: Mon, 30 Mar 2009 19:07:07 +0200 Subject: perf_counter: powerpc: only reserve PMU hardware when we need it Impact: cooperate with oprofile At present, on PowerPC, if you have perf_counters compiled in, oprofile doesn't work. There is code to allow the PMU to be shared between competing subsystems, such as perf_counters and oprofile, but currently the perf_counter subsystem reserves the PMU for itself at boot time, and never releases it. This makes perf_counter play nicely with oprofile. Now we keep a count of how many perf_counter instances are counting hardware events, and reserve the PMU when that count becomes non-zero, and release the PMU when that count becomes zero. This means that it is possible to have perf_counters compiled in and still use oprofile, as long as there are no hardware perf_counters active. This also means that if oprofile is active, sys_perf_counter_open will fail if the hw_event specifies a hardware event. To avoid races with other tasks creating and destroying perf_counters, we use a mutex. We use atomic_inc_not_zero and atomic_add_unless to avoid having to take the mutex unless there is a possibility of the count going between 0 and 1. Signed-off-by: Paul Mackerras Signed-off-by: Peter Zijlstra Orig-LKML-Reference: <20090330171023.627912475@chello.nl> Signed-off-by: Ingo Molnar --- arch/powerpc/kernel/perf_counter.c | 47 ++++++++++++++++++++++++++++++++++---- 1 file changed, 42 insertions(+), 5 deletions(-) diff --git a/arch/powerpc/kernel/perf_counter.c b/arch/powerpc/kernel/perf_counter.c index cde720fc495..560dd1e7b52 100644 --- a/arch/powerpc/kernel/perf_counter.c +++ b/arch/powerpc/kernel/perf_counter.c @@ -41,6 +41,8 @@ struct power_pmu *ppmu; */ static unsigned int freeze_counters_kernel = MMCR0_FCS; +static void perf_counter_interrupt(struct pt_regs *regs); + void perf_counter_print_debug(void) { } @@ -594,6 +596,24 @@ struct hw_perf_counter_ops power_perf_ops = { .read = power_perf_read }; +/* Number of perf_counters counting hardware events */ +static atomic_t num_counters; +/* Used to avoid races in calling reserve/release_pmc_hardware */ +static DEFINE_MUTEX(pmc_reserve_mutex); + +/* + * Release the PMU if this is the last perf_counter. + */ +static void hw_perf_counter_destroy(struct perf_counter *counter) +{ + if (!atomic_add_unless(&num_counters, -1, 1)) { + mutex_lock(&pmc_reserve_mutex); + if (atomic_dec_return(&num_counters) == 0) + release_pmc_hardware(); + mutex_unlock(&pmc_reserve_mutex); + } +} + const struct hw_perf_counter_ops * hw_perf_counter_init(struct perf_counter *counter) { @@ -601,6 +621,7 @@ hw_perf_counter_init(struct perf_counter *counter) struct perf_counter *ctrs[MAX_HWCOUNTERS]; unsigned int events[MAX_HWCOUNTERS]; int n; + int err; if (!ppmu) return NULL; @@ -646,6 +667,27 @@ hw_perf_counter_init(struct perf_counter *counter) counter->hw.config = events[n]; atomic64_set(&counter->hw.period_left, counter->hw_event.irq_period); + + /* + * See if we need to reserve the PMU. + * If no counters are currently in use, then we have to take a + * mutex to ensure that we don't race with another task doing + * reserve_pmc_hardware or release_pmc_hardware. + */ + err = 0; + if (!atomic_inc_not_zero(&num_counters)) { + mutex_lock(&pmc_reserve_mutex); + if (atomic_read(&num_counters) == 0 && + reserve_pmc_hardware(perf_counter_interrupt)) + err = -EBUSY; + else + atomic_inc(&num_counters); + mutex_unlock(&pmc_reserve_mutex); + } + counter->destroy = hw_perf_counter_destroy; + + if (err) + return NULL; return &power_perf_ops; } @@ -769,11 +811,6 @@ static int init_perf_counters(void) { unsigned long pvr; - if (reserve_pmc_hardware(perf_counter_interrupt)) { - printk(KERN_ERR "Couldn't init performance monitor subsystem\n"); - return -EBUSY; - } - /* XXX should get this from cputable */ pvr = mfspr(SPRN_PVR); switch (PVR_VER(pvr)) { -- cgit v1.2.3 From d5d2bc0dd0379deddb9ede66fec90a3083eaec57 Mon Sep 17 00:00:00 2001 From: Paul Mackerras Date: Mon, 30 Mar 2009 19:07:08 +0200 Subject: perf_counter: make it possible for hw_perf_counter_init to return error codes Impact: better error reporting At present, if hw_perf_counter_init encounters an error, all it can do is return NULL, which causes sys_perf_counter_open to return an EINVAL error to userspace. This isn't very informative for userspace; it means that userspace can't tell the difference between "sorry, oprofile is already using the PMU" and "we don't support this CPU" and "this CPU doesn't support the requested generic hardware event". This commit uses the PTR_ERR/ERR_PTR/IS_ERR set of macros to let hw_perf_counter_init return an error code on error rather than just NULL if it wishes. If it does so, that error code will be returned from sys_perf_counter_open to userspace. If it returns NULL, an EINVAL error will be returned to userspace, as before. This also adapts the powerpc hw_perf_counter_init to make use of this to return ENXIO, EINVAL, EBUSY, or EOPNOTSUPP as appropriate. It would be good to add extra error numbers in future to allow userspace to distinguish the various errors that are currently reported as EINVAL, i.e. irq_period < 0, too many events in a group, conflict between exclude_* settings in a group, and PMU resource conflict in a group. [ v2: fix a bug pointed out by Corey Ashford where error returns from hw_perf_counter_init were not handled correctly in the case of raw hardware events.] Signed-off-by: Paul Mackerras Signed-off-by: Peter Zijlstra Orig-LKML-Reference: <20090330171023.682428180@chello.nl> Signed-off-by: Ingo Molnar --- arch/powerpc/kernel/perf_counter.c | 14 +++++++------- kernel/perf_counter.c | 35 ++++++++++++++++++++++------------- 2 files changed, 29 insertions(+), 20 deletions(-) diff --git a/arch/powerpc/kernel/perf_counter.c b/arch/powerpc/kernel/perf_counter.c index 560dd1e7b52..0a4d14f279a 100644 --- a/arch/powerpc/kernel/perf_counter.c +++ b/arch/powerpc/kernel/perf_counter.c @@ -624,13 +624,13 @@ hw_perf_counter_init(struct perf_counter *counter) int err; if (!ppmu) - return NULL; + return ERR_PTR(-ENXIO); if ((s64)counter->hw_event.irq_period < 0) - return NULL; + return ERR_PTR(-EINVAL); if (!perf_event_raw(&counter->hw_event)) { ev = perf_event_id(&counter->hw_event); if (ev >= ppmu->n_generic || ppmu->generic_events[ev] == 0) - return NULL; + return ERR_PTR(-EOPNOTSUPP); ev = ppmu->generic_events[ev]; } else { ev = perf_event_config(&counter->hw_event); @@ -656,14 +656,14 @@ hw_perf_counter_init(struct perf_counter *counter) n = collect_events(counter->group_leader, ppmu->n_counter - 1, ctrs, events); if (n < 0) - return NULL; + return ERR_PTR(-EINVAL); } events[n] = ev; ctrs[n] = counter; if (check_excludes(ctrs, n, 1)) - return NULL; + return ERR_PTR(-EINVAL); if (power_check_constraints(events, n + 1)) - return NULL; + return ERR_PTR(-EINVAL); counter->hw.config = events[n]; atomic64_set(&counter->hw.period_left, counter->hw_event.irq_period); @@ -687,7 +687,7 @@ hw_perf_counter_init(struct perf_counter *counter) counter->destroy = hw_perf_counter_destroy; if (err) - return NULL; + return ERR_PTR(err); return &power_perf_ops; } diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c index f35e89e3d6a..d07b45278b4 100644 --- a/kernel/perf_counter.c +++ b/kernel/perf_counter.c @@ -2453,10 +2453,11 @@ perf_counter_alloc(struct perf_counter_hw_event *hw_event, { const struct hw_perf_counter_ops *hw_ops; struct perf_counter *counter; + long err; counter = kzalloc(sizeof(*counter), gfpflags); if (!counter) - return NULL; + return ERR_PTR(-ENOMEM); /* * Single counters are their own group leaders, with an @@ -2505,12 +2506,18 @@ perf_counter_alloc(struct perf_counter_hw_event *hw_event, hw_ops = tp_perf_counter_init(counter); break; } +done: + err = 0; + if (!hw_ops) + err = -EINVAL; + else if (IS_ERR(hw_ops)) + err = PTR_ERR(hw_ops); - if (!hw_ops) { + if (err) { kfree(counter); - return NULL; + return ERR_PTR(err); } -done: + counter->hw_ops = hw_ops; return counter; @@ -2583,10 +2590,10 @@ SYSCALL_DEFINE5(perf_counter_open, goto err_put_context; } - ret = -EINVAL; counter = perf_counter_alloc(&hw_event, cpu, ctx, group_leader, GFP_KERNEL); - if (!counter) + ret = PTR_ERR(counter); + if (IS_ERR(counter)) goto err_put_context; ret = anon_inode_getfd("[perf_counter]", &perf_fops, counter, 0); @@ -2658,8 +2665,8 @@ inherit_counter(struct perf_counter *parent_counter, child_counter = perf_counter_alloc(&parent_counter->hw_event, parent_counter->cpu, child_ctx, group_leader, GFP_KERNEL); - if (!child_counter) - return NULL; + if (IS_ERR(child_counter)) + return child_counter; /* * Link it up in the child's context: @@ -2710,15 +2717,17 @@ static int inherit_group(struct perf_counter *parent_counter, { struct perf_counter *leader; struct perf_counter *sub; + struct perf_counter *child_ctr; leader = inherit_counter(parent_counter, parent, parent_ctx, child, NULL, child_ctx); - if (!leader) - return -ENOMEM; + if (IS_ERR(leader)) + return PTR_ERR(leader); list_for_each_entry(sub, &parent_counter->sibling_list, list_entry) { - if (!inherit_counter(sub, parent, parent_ctx, - child, leader, child_ctx)) - return -ENOMEM; + child_ctr = inherit_counter(sub, parent, parent_ctx, + child, leader, child_ctx); + if (IS_ERR(child_ctr)) + return PTR_ERR(child_ctr); } return 0; } -- cgit v1.2.3 From 9ea98e191255ee642e64a5745014424fc63f83b0 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Mon, 30 Mar 2009 19:07:09 +0200 Subject: perf_counter: x86: proper error propagation for the x86 hw_perf_counter_init() Now that Paul cleaned up the error propagation paths, pass down the x86 error as well. Signed-off-by: Peter Zijlstra Acked-by: Paul Mackerras Orig-LKML-Reference: <20090330171023.792822360@chello.nl> Signed-off-by: Ingo Molnar --- arch/x86/kernel/cpu/perf_counter.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/x86/kernel/cpu/perf_counter.c b/arch/x86/kernel/cpu/perf_counter.c index 7aab177fb56..b8885ccd804 100644 --- a/arch/x86/kernel/cpu/perf_counter.c +++ b/arch/x86/kernel/cpu/perf_counter.c @@ -954,7 +954,7 @@ hw_perf_counter_init(struct perf_counter *counter) err = __hw_perf_counter_init(counter); if (err) - return NULL; + return ERR_PTR(err); return &x86_perf_counter_ops; } -- cgit v1.2.3 From 31f004df8d14212f0a8a2fb12a8ed44a3d80e2fb Mon Sep 17 00:00:00 2001 From: Paul Mackerras Date: Mon, 30 Mar 2009 19:07:10 +0200 Subject: perf_counter tools: optionally scale counter values in perfstat mode Impact: new functionality This adds add an option to the perfstat mode of kerneltop to scale the reported counter values according to the fraction of time that each counter gets to count. This is invoked with the -l option (I used 'l' because s, c, a and e were all taken already.) This uses the new PERF_RECORD_TOTAL_TIME_{ENABLED,RUNNING} read format options. With this, we get output like this: $ ./perfstat -l -e 0:0,0:1,0:2,0:3,0:4,0:5 ./spin Performance counter stats for './spin': 4016072055 CPU cycles (events) (scaled from 66.53%) 2005887318 instructions (events) (scaled from 66.53%) 1762849 cache references (events) (scaled from 66.69%) 165229 cache misses (events) (scaled from 66.85%) 1001298009 branches (events) (scaled from 66.78%) 41566 branch misses (events) (scaled from 66.61%) Wall-clock time elapsed: 2438.227446 msecs This also lets us detect when a counter is zero because the counter never got to go on the CPU at all. In that case we print rather than 0. Signed-off-by: Paul Mackerras Signed-off-by: Peter Zijlstra Orig-LKML-Reference: <20090330171023.871484899@chello.nl> Signed-off-by: Ingo Molnar --- Documentation/perf_counter/kerneltop.c | 56 +++++++++++++++++++++++++++------- 1 file changed, 45 insertions(+), 11 deletions(-) diff --git a/Documentation/perf_counter/kerneltop.c b/Documentation/perf_counter/kerneltop.c index 995111dee7f..c0ca01504ff 100644 --- a/Documentation/perf_counter/kerneltop.c +++ b/Documentation/perf_counter/kerneltop.c @@ -197,6 +197,8 @@ static int delay_secs = 2; static int zero; static int dump_symtab; +static int scale; + struct source_line { uint64_t EIP; unsigned long count; @@ -305,6 +307,7 @@ static void display_perfstat_help(void) display_events_help(); printf( + " -l # scale counter values\n" " -a # system-wide collection\n"); exit(0); } @@ -328,6 +331,7 @@ static void display_help(void) " -c CNT --count=CNT # event period to sample\n\n" " -C CPU --cpu=CPU # CPU (-1 for all) [default: -1]\n" " -p PID --pid=PID # PID of sampled task (-1 for all) [default: -1]\n\n" + " -l # show scale factor for RR events\n" " -d delay --delay= # sampling/display delay [default: 2]\n" " -f CNT --filter=CNT # min-event-count filter [default: 100]\n\n" " -s symbol --symbol= # function to be showed annotated one-shot\n" @@ -436,6 +440,9 @@ static void create_perfstat_counter(int counter) hw_event.config = event_id[counter]; hw_event.record_type = PERF_RECORD_SIMPLE; hw_event.nmi = 0; + if (scale) + hw_event.read_format = PERF_FORMAT_TOTAL_TIME_ENABLED | + PERF_FORMAT_TOTAL_TIME_RUNNING; if (system_wide) { int cpu; @@ -507,28 +514,53 @@ int do_perfstat(int argc, char *argv[]) fprintf(stderr, "\n"); for (counter = 0; counter < nr_counters; counter++) { - int cpu; - __u64 count, single_count; + int cpu, nv; + __u64 count[3], single_count[3]; + int scaled; - count = 0; + count[0] = count[1] = count[2] = 0; + nv = scale ? 3 : 1; for (cpu = 0; cpu < nr_cpus; cpu ++) { res = read(fd[cpu][counter], - (char *) &single_count, sizeof(single_count)); - assert(res == sizeof(single_count)); - count += single_count; + single_count, nv * sizeof(__u64)); + assert(res == nv * sizeof(__u64)); + + count[0] += single_count[0]; + if (scale) { + count[1] += single_count[1]; + count[2] += single_count[2]; + } + } + + scaled = 0; + if (scale) { + if (count[2] == 0) { + fprintf(stderr, " %14s %-20s\n", + "", event_name(counter)); + continue; + } + if (count[2] < count[1]) { + scaled = 1; + count[0] = (unsigned long long) + ((double)count[0] * count[1] / count[2] + 0.5); + } } if (event_id[counter] == EID(PERF_TYPE_SOFTWARE, PERF_COUNT_CPU_CLOCK) || event_id[counter] == EID(PERF_TYPE_SOFTWARE, PERF_COUNT_TASK_CLOCK)) { - double msecs = (double)count / 1000000; + double msecs = (double)count[0] / 1000000; - fprintf(stderr, " %14.6f %-20s (msecs)\n", + fprintf(stderr, " %14.6f %-20s (msecs)", msecs, event_name(counter)); } else { - fprintf(stderr, " %14Ld %-20s (events)\n", - count, event_name(counter)); + fprintf(stderr, " %14Ld %-20s (events)", + count[0], event_name(counter)); } + if (scaled) + fprintf(stderr, " (scaled from %.2f%%)", + (double) count[2] / count[1] * 100); + fprintf(stderr, "\n"); } fprintf(stderr, "\n"); fprintf(stderr, " Wall-clock time elapsed: %12.6f msecs\n", @@ -1049,6 +1081,7 @@ static void process_options(int argc, char *argv[]) {"filter", required_argument, NULL, 'f'}, {"group", required_argument, NULL, 'g'}, {"help", no_argument, NULL, 'h'}, + {"scale", no_argument, NULL, 'l'}, {"nmi", required_argument, NULL, 'n'}, {"pid", required_argument, NULL, 'p'}, {"vmlinux", required_argument, NULL, 'x'}, @@ -1060,7 +1093,7 @@ static void process_options(int argc, char *argv[]) {"munmap_info", no_argument, NULL, 'U'}, {NULL, 0, NULL, 0 } }; - int c = getopt_long(argc, argv, "+:ac:C:d:De:f:g:hn:m:p:s:Sx:zMU", + int c = getopt_long(argc, argv, "+:ac:C:d:De:f:g:hln:m:p:s:Sx:zMU", long_options, &option_index); if (c == -1) break; @@ -1084,6 +1117,7 @@ static void process_options(int argc, char *argv[]) case 'f': count_filter = atoi(optarg); break; case 'g': group = atoi(optarg); break; case 'h': display_help(); break; + case 'l': scale = 1; break; case 'n': nmi = atoi(optarg); break; case 'p': /* CPU and PID are mutually exclusive */ -- cgit v1.2.3 From 78d613eb129fc4edf0e2cabfcc6a4c5285482d21 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Mon, 30 Mar 2009 19:07:11 +0200 Subject: perf_counter: small cleanup of the output routines Move the nmi argument to the _begin() function, so that _end() only needs the handle. This allows the _begin() function to generate a wakeup on event loss. Signed-off-by: Peter Zijlstra Acked-by: Paul Mackerras Orig-LKML-Reference: <20090330171023.959404268@chello.nl> Signed-off-by: Ingo Molnar --- kernel/perf_counter.c | 42 ++++++++++++++++++++++++++---------------- 1 file changed, 26 insertions(+), 16 deletions(-) diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c index d07b45278b4..4471e7e2c10 100644 --- a/kernel/perf_counter.c +++ b/kernel/perf_counter.c @@ -1663,10 +1663,20 @@ struct perf_output_handle { unsigned int offset; unsigned int head; int wakeup; + int nmi; }; +static inline void __perf_output_wakeup(struct perf_output_handle *handle) +{ + if (handle->nmi) + perf_pending_queue(handle->counter); + else + perf_counter_wakeup(handle->counter); +} + static int perf_output_begin(struct perf_output_handle *handle, - struct perf_counter *counter, unsigned int size) + struct perf_counter *counter, unsigned int size, + int nmi) { struct perf_mmap_data *data; unsigned int offset, head; @@ -1676,15 +1686,17 @@ static int perf_output_begin(struct perf_output_handle *handle, if (!data) goto out; + handle->counter = counter; + handle->nmi = nmi; + if (!data->nr_pages) - goto out; + goto fail; do { offset = head = atomic_read(&data->head); head += size; } while (atomic_cmpxchg(&data->head, offset, head) != offset); - handle->counter = counter; handle->data = data; handle->offset = offset; handle->head = head; @@ -1692,6 +1704,8 @@ static int perf_output_begin(struct perf_output_handle *handle, return 0; +fail: + __perf_output_wakeup(handle); out: rcu_read_unlock(); @@ -1733,14 +1747,10 @@ static void perf_output_copy(struct perf_output_handle *handle, #define perf_output_put(handle, x) \ perf_output_copy((handle), &(x), sizeof(x)) -static void perf_output_end(struct perf_output_handle *handle, int nmi) +static void perf_output_end(struct perf_output_handle *handle) { - if (handle->wakeup) { - if (nmi) - perf_pending_queue(handle->counter); - else - perf_counter_wakeup(handle->counter); - } + if (handle->wakeup) + __perf_output_wakeup(handle); rcu_read_unlock(); } @@ -1750,12 +1760,12 @@ static int perf_output_write(struct perf_counter *counter, int nmi, struct perf_output_handle handle; int ret; - ret = perf_output_begin(&handle, counter, size); + ret = perf_output_begin(&handle, counter, size, nmi); if (ret) goto out; perf_output_copy(&handle, buf, size); - perf_output_end(&handle, nmi); + perf_output_end(&handle); out: return ret; @@ -1804,7 +1814,7 @@ static void perf_output_group(struct perf_counter *counter, int nmi) size = sizeof(header) + counter->nr_siblings * sizeof(entry); - ret = perf_output_begin(&handle, counter, size); + ret = perf_output_begin(&handle, counter, size, nmi); if (ret) return; @@ -1824,7 +1834,7 @@ static void perf_output_group(struct perf_counter *counter, int nmi) perf_output_put(&handle, entry); } - perf_output_end(&handle, nmi); + perf_output_end(&handle); } void perf_counter_output(struct perf_counter *counter, @@ -1869,7 +1879,7 @@ static void perf_counter_mmap_output(struct perf_counter *counter, { struct perf_output_handle handle; int size = mmap_event->event.header.size; - int ret = perf_output_begin(&handle, counter, size); + int ret = perf_output_begin(&handle, counter, size, 0); if (ret) return; @@ -1877,7 +1887,7 @@ static void perf_counter_mmap_output(struct perf_counter *counter, perf_output_put(&handle, mmap_event->event); perf_output_copy(&handle, mmap_event->file_name, mmap_event->file_size); - perf_output_end(&handle, 0); + perf_output_end(&handle); } static int perf_counter_mmap_match(struct perf_counter *counter, -- cgit v1.2.3 From 5ed00415e304203a0a9dcaef226d6d3f1106070e Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Mon, 30 Mar 2009 19:07:12 +0200 Subject: perf_counter: re-arrange the perf_event_type Breaks ABI yet again :-) Change the event type so that [0, 2^31-1] are regular event types, but [2^31, 2^32-1] forms a bitmask for overflow events. Signed-off-by: Peter Zijlstra Acked-by: Paul Mackerras Orig-LKML-Reference: <20090330171024.047961770@chello.nl> Signed-off-by: Ingo Molnar --- include/linux/perf_counter.h | 6 +++-- kernel/perf_counter.c | 56 ++++++++++++++++++++------------------------ 2 files changed, 29 insertions(+), 33 deletions(-) diff --git a/include/linux/perf_counter.h b/include/linux/perf_counter.h index 037a81145ac..edf5bfb7ff5 100644 --- a/include/linux/perf_counter.h +++ b/include/linux/perf_counter.h @@ -210,13 +210,15 @@ struct perf_event_header { }; enum perf_event_type { - PERF_EVENT_IP = 0, + PERF_EVENT_GROUP = 1, PERF_EVENT_MMAP = 2, PERF_EVENT_MUNMAP = 3, - __PERF_EVENT_TID = 0x100, + PERF_EVENT_OVERFLOW = 1UL << 31, + __PERF_EVENT_IP = 1UL << 30, + __PERF_EVENT_TID = 1UL << 29, }; #ifdef __KERNEL__ diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c index 4471e7e2c10..d93e9ddf784 100644 --- a/kernel/perf_counter.c +++ b/kernel/perf_counter.c @@ -1754,50 +1754,44 @@ static void perf_output_end(struct perf_output_handle *handle) rcu_read_unlock(); } -static int perf_output_write(struct perf_counter *counter, int nmi, - void *buf, ssize_t size) -{ - struct perf_output_handle handle; - int ret; - - ret = perf_output_begin(&handle, counter, size, nmi); - if (ret) - goto out; - - perf_output_copy(&handle, buf, size); - perf_output_end(&handle); - -out: - return ret; -} - static void perf_output_simple(struct perf_counter *counter, int nmi, struct pt_regs *regs) { - unsigned int size; + int ret; + struct perf_output_handle handle; + struct perf_event_header header; + u64 ip; struct { - struct perf_event_header header; - u64 ip; u32 pid, tid; - } event; + } tid_entry; - event.header.type = PERF_EVENT_IP; - event.ip = instruction_pointer(regs); + header.type = PERF_EVENT_OVERFLOW; + header.size = sizeof(header); - size = sizeof(event); + ip = instruction_pointer(regs); + header.type |= __PERF_EVENT_IP; + header.size += sizeof(ip); if (counter->hw_event.include_tid) { /* namespace issues */ - event.pid = current->group_leader->pid; - event.tid = current->pid; + tid_entry.pid = current->group_leader->pid; + tid_entry.tid = current->pid; - event.header.type |= __PERF_EVENT_TID; - } else - size -= sizeof(u64); + header.type |= __PERF_EVENT_TID; + header.size += sizeof(tid_entry); + } - event.header.size = size; + ret = perf_output_begin(&handle, counter, header.size, nmi); + if (ret) + return; + + perf_output_put(&handle, header); + perf_output_put(&handle, ip); + + if (counter->hw_event.include_tid) + perf_output_put(&handle, tid_entry); - perf_output_write(counter, nmi, &event, size); + perf_output_end(&handle); } static void perf_output_group(struct perf_counter *counter, int nmi) -- cgit v1.2.3 From 023c54c42288416b4f43c67bfd5049a76926fad6 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Mon, 30 Mar 2009 19:07:13 +0200 Subject: perf_counter tools: kerneltop: update event_types Go along with the new perf_event_type ABI. Signed-off-by: Peter Zijlstra Acked-by: Paul Mackerras Orig-LKML-Reference: <20090330171024.133985461@chello.nl> Signed-off-by: Ingo Molnar --- Documentation/perf_counter/kerneltop.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Documentation/perf_counter/kerneltop.c b/Documentation/perf_counter/kerneltop.c index c0ca01504ff..430810dae1f 100644 --- a/Documentation/perf_counter/kerneltop.c +++ b/Documentation/perf_counter/kerneltop.c @@ -1263,8 +1263,8 @@ static void mmap_read(struct mmap_data *md) old += size; switch (event->header.type) { - case PERF_EVENT_IP: - case PERF_EVENT_IP | __PERF_EVENT_TID: + case PERF_EVENT_OVERFLOW | __PERF_EVENT_IP: + case PERF_EVENT_OVERFLOW | __PERF_EVENT_IP | __PERF_EVENT_TID: process_event(event->ip.ip, md->counter); break; -- cgit v1.2.3 From 394ee07623cf556c8daae2b3c00cf5fea47f0811 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Mon, 30 Mar 2009 19:07:14 +0200 Subject: perf_counter: provide generic callchain bits Provide the generic callchain support bits. If hw_event->callchain is set the arch specific perf_callchain() function is called upon to provide a perf_callchain_entry structure filled with the current callchain. If it does so, it is added to the overflow output event. Signed-off-by: Peter Zijlstra Acked-by: Paul Mackerras Orig-LKML-Reference: <20090330171024.254266860@chello.nl> Signed-off-by: Ingo Molnar --- include/linux/perf_counter.h | 13 ++++++++++++- kernel/perf_counter.c | 27 +++++++++++++++++++++++++++ 2 files changed, 39 insertions(+), 1 deletion(-) diff --git a/include/linux/perf_counter.h b/include/linux/perf_counter.h index edf5bfb7ff5..43083afffe0 100644 --- a/include/linux/perf_counter.h +++ b/include/linux/perf_counter.h @@ -140,8 +140,9 @@ struct perf_counter_hw_event { include_tid : 1, /* include the tid */ mmap : 1, /* include mmap data */ munmap : 1, /* include munmap data */ + callchain : 1, /* add callchain data */ - __reserved_1 : 52; + __reserved_1 : 51; __u32 extra_config_len; __u32 __reserved_4; @@ -219,6 +220,7 @@ enum perf_event_type { PERF_EVENT_OVERFLOW = 1UL << 31, __PERF_EVENT_IP = 1UL << 30, __PERF_EVENT_TID = 1UL << 29, + __PERF_EVENT_CALLCHAIN = 1UL << 28, }; #ifdef __KERNEL__ @@ -504,6 +506,15 @@ extern void perf_counter_mmap(unsigned long addr, unsigned long len, extern void perf_counter_munmap(unsigned long addr, unsigned long len, unsigned long pgoff, struct file *file); +#define MAX_STACK_DEPTH 255 + +struct perf_callchain_entry { + u64 nr; + u64 ip[MAX_STACK_DEPTH]; +}; + +extern struct perf_callchain_entry *perf_callchain(struct pt_regs *regs); + #else static inline void perf_counter_task_sched_in(struct task_struct *task, int cpu) { } diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c index d93e9ddf784..860cdc26bd7 100644 --- a/kernel/perf_counter.c +++ b/kernel/perf_counter.c @@ -1653,6 +1653,17 @@ void perf_counter_do_pending(void) __perf_pending_run(); } +/* + * Callchain support -- arch specific + */ + +struct perf_callchain_entry * +__attribute__((weak)) +perf_callchain(struct pt_regs *regs) +{ + return NULL; +} + /* * Output */ @@ -1764,6 +1775,8 @@ static void perf_output_simple(struct perf_counter *counter, struct { u32 pid, tid; } tid_entry; + struct perf_callchain_entry *callchain = NULL; + int callchain_size = 0; header.type = PERF_EVENT_OVERFLOW; header.size = sizeof(header); @@ -1781,6 +1794,17 @@ static void perf_output_simple(struct perf_counter *counter, header.size += sizeof(tid_entry); } + if (counter->hw_event.callchain) { + callchain = perf_callchain(regs); + + if (callchain) { + callchain_size = (1 + callchain->nr) * sizeof(u64); + + header.type |= __PERF_EVENT_CALLCHAIN; + header.size += callchain_size; + } + } + ret = perf_output_begin(&handle, counter, header.size, nmi); if (ret) return; @@ -1791,6 +1815,9 @@ static void perf_output_simple(struct perf_counter *counter, if (counter->hw_event.include_tid) perf_output_put(&handle, tid_entry); + if (callchain) + perf_output_copy(&handle, callchain, callchain_size); + perf_output_end(&handle); } -- cgit v1.2.3 From d7d59fb323833682b117b528d77eeb8ef587036a Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Mon, 30 Mar 2009 19:07:15 +0200 Subject: perf_counter: x86: callchain support Provide the x86 perf_callchain() implementation. Code based on the ftrace/sysprof code from Soeren Sandmann Pedersen. Signed-off-by: Peter Zijlstra Acked-by: Paul Mackerras Cc: Soeren Sandmann Pedersen Cc: Frederic Weisbecker Cc: Steven Rostedt Orig-LKML-Reference: <20090330171024.341993293@chello.nl> Signed-off-by: Ingo Molnar --- arch/x86/kernel/cpu/perf_counter.c | 154 +++++++++++++++++++++++++++++++++++++ 1 file changed, 154 insertions(+) diff --git a/arch/x86/kernel/cpu/perf_counter.c b/arch/x86/kernel/cpu/perf_counter.c index b8885ccd804..e16dfafc6d7 100644 --- a/arch/x86/kernel/cpu/perf_counter.c +++ b/arch/x86/kernel/cpu/perf_counter.c @@ -16,8 +16,10 @@ #include #include #include +#include #include +#include static bool perf_counters_initialized __read_mostly; @@ -958,3 +960,155 @@ hw_perf_counter_init(struct perf_counter *counter) return &x86_perf_counter_ops; } + +/* + * callchain support + */ + +static inline +void callchain_store(struct perf_callchain_entry *entry, unsigned long ip) +{ + if (entry->nr < MAX_STACK_DEPTH) + entry->ip[entry->nr++] = ip; +} + +static DEFINE_PER_CPU(struct perf_callchain_entry, irq_entry); +static DEFINE_PER_CPU(struct perf_callchain_entry, nmi_entry); + + +static void +backtrace_warning_symbol(void *data, char *msg, unsigned long symbol) +{ + /* Ignore warnings */ +} + +static void backtrace_warning(void *data, char *msg) +{ + /* Ignore warnings */ +} + +static int backtrace_stack(void *data, char *name) +{ + /* Don't bother with IRQ stacks for now */ + return -1; +} + +static void backtrace_address(void *data, unsigned long addr, int reliable) +{ + struct perf_callchain_entry *entry = data; + + if (reliable) + callchain_store(entry, addr); +} + +static const struct stacktrace_ops backtrace_ops = { + .warning = backtrace_warning, + .warning_symbol = backtrace_warning_symbol, + .stack = backtrace_stack, + .address = backtrace_address, +}; + +static void +perf_callchain_kernel(struct pt_regs *regs, struct perf_callchain_entry *entry) +{ + unsigned long bp; + char *stack; + + callchain_store(entry, instruction_pointer(regs)); + + stack = ((char *)regs + sizeof(struct pt_regs)); +#ifdef CONFIG_FRAME_POINTER + bp = frame_pointer(regs); +#else + bp = 0; +#endif + + dump_trace(NULL, regs, (void *)stack, bp, &backtrace_ops, entry); +} + + +struct stack_frame { + const void __user *next_fp; + unsigned long return_address; +}; + +static int copy_stack_frame(const void __user *fp, struct stack_frame *frame) +{ + int ret; + + if (!access_ok(VERIFY_READ, fp, sizeof(*frame))) + return 0; + + ret = 1; + pagefault_disable(); + if (__copy_from_user_inatomic(frame, fp, sizeof(*frame))) + ret = 0; + pagefault_enable(); + + return ret; +} + +static void +perf_callchain_user(struct pt_regs *regs, struct perf_callchain_entry *entry) +{ + struct stack_frame frame; + const void __user *fp; + + regs = (struct pt_regs *)current->thread.sp0 - 1; + fp = (void __user *)regs->bp; + + callchain_store(entry, regs->ip); + + while (entry->nr < MAX_STACK_DEPTH) { + frame.next_fp = NULL; + frame.return_address = 0; + + if (!copy_stack_frame(fp, &frame)) + break; + + if ((unsigned long)fp < user_stack_pointer(regs)) + break; + + callchain_store(entry, frame.return_address); + fp = frame.next_fp; + } +} + +static void +perf_do_callchain(struct pt_regs *regs, struct perf_callchain_entry *entry) +{ + int is_user; + + if (!regs) + return; + + is_user = user_mode(regs); + + if (!current || current->pid == 0) + return; + + if (is_user && current->state != TASK_RUNNING) + return; + + if (!is_user) + perf_callchain_kernel(regs, entry); + + if (current->mm) + perf_callchain_user(regs, entry); +} + +struct perf_callchain_entry *perf_callchain(struct pt_regs *regs) +{ + struct perf_callchain_entry *entry; + + if (in_nmi()) + entry = &__get_cpu_var(nmi_entry); + else + entry = &__get_cpu_var(irq_entry); + + entry->nr = 0; + + perf_do_callchain(regs, entry); + + return entry; +} -- cgit v1.2.3 From 4e935e47177c3b26cf383e79849bae2a464d0160 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Mon, 30 Mar 2009 19:07:16 +0200 Subject: perf_counter: pmc arbitration Follow the example set by powerpc and try to play nice with oprofile and the nmi watchdog. Signed-off-by: Peter Zijlstra Acked-by: Paul Mackerras Orig-LKML-Reference: <20090330171024.459968444@chello.nl> Signed-off-by: Ingo Molnar --- arch/x86/kernel/cpu/perf_counter.c | 75 ++++++++++++++++++++++++++++++++++++++ 1 file changed, 75 insertions(+) diff --git a/arch/x86/kernel/cpu/perf_counter.c b/arch/x86/kernel/cpu/perf_counter.c index e16dfafc6d7..2a946a160ca 100644 --- a/arch/x86/kernel/cpu/perf_counter.c +++ b/arch/x86/kernel/cpu/perf_counter.c @@ -20,6 +20,7 @@ #include #include +#include static bool perf_counters_initialized __read_mostly; @@ -172,6 +173,65 @@ again: atomic64_sub(delta, &hwc->period_left); } +static atomic_t num_counters; +static DEFINE_MUTEX(pmc_reserve_mutex); + +static bool reserve_pmc_hardware(void) +{ + int i; + + if (nmi_watchdog == NMI_LOCAL_APIC) + disable_lapic_nmi_watchdog(); + + for (i = 0; i < nr_counters_generic; i++) { + if (!reserve_perfctr_nmi(pmc_ops->perfctr + i)) + goto perfctr_fail; + } + + for (i = 0; i < nr_counters_generic; i++) { + if (!reserve_evntsel_nmi(pmc_ops->eventsel + i)) + goto eventsel_fail; + } + + return true; + +eventsel_fail: + for (i--; i >= 0; i--) + release_evntsel_nmi(pmc_ops->eventsel + i); + + i = nr_counters_generic; + +perfctr_fail: + for (i--; i >= 0; i--) + release_perfctr_nmi(pmc_ops->perfctr + i); + + if (nmi_watchdog == NMI_LOCAL_APIC) + enable_lapic_nmi_watchdog(); + + return false; +} + +static void release_pmc_hardware(void) +{ + int i; + + for (i = 0; i < nr_counters_generic; i++) { + release_perfctr_nmi(pmc_ops->perfctr + i); + release_evntsel_nmi(pmc_ops->eventsel + i); + } + + if (nmi_watchdog == NMI_LOCAL_APIC) + enable_lapic_nmi_watchdog(); +} + +static void hw_perf_counter_destroy(struct perf_counter *counter) +{ + if (atomic_dec_and_mutex_lock(&num_counters, &pmc_reserve_mutex)) { + release_pmc_hardware(); + mutex_unlock(&pmc_reserve_mutex); + } +} + /* * Setup the hardware configuration for a given hw_event_type */ @@ -179,10 +239,23 @@ static int __hw_perf_counter_init(struct perf_counter *counter) { struct perf_counter_hw_event *hw_event = &counter->hw_event; struct hw_perf_counter *hwc = &counter->hw; + int err; if (unlikely(!perf_counters_initialized)) return -EINVAL; + err = 0; + if (atomic_inc_not_zero(&num_counters)) { + mutex_lock(&pmc_reserve_mutex); + if (atomic_read(&num_counters) == 0 && !reserve_pmc_hardware()) + err = -EBUSY; + else + atomic_inc(&num_counters); + mutex_unlock(&pmc_reserve_mutex); + } + if (err) + return err; + /* * Generate PMC IRQs: * (keep 'enabled' bit clear for now) @@ -230,6 +303,8 @@ static int __hw_perf_counter_init(struct perf_counter *counter) hwc->config |= pmc_ops->event_map(perf_event_id(hw_event)); } + counter->destroy = hw_perf_counter_destroy; + return 0; } -- cgit v1.2.3 From 9dd499889bdb12ac0e412ccdd718fe0d348258f2 Mon Sep 17 00:00:00 2001 From: Mike Galbraith Date: Fri, 27 Mar 2009 12:13:43 +0100 Subject: perf_counter tools: kerneltop: add real-time data acquisition thread Decouple kerneltop display from event acquisition by introducing a separate data acquisition thread. This fixes annnoying kerneltop display refresh jitter and missed events. Also add a -r option, to switch the data acquisition thread to real-time priority. Signed-off-by: Mike Galbraith Cc: Paul Mackerras Cc: Peter Zijlstra Orig-LKML-Reference: Signed-off-by: Ingo Molnar --- Documentation/perf_counter/kerneltop.c | 57 +++++++++++++++++++++++----------- 1 file changed, 39 insertions(+), 18 deletions(-) diff --git a/Documentation/perf_counter/kerneltop.c b/Documentation/perf_counter/kerneltop.c index 430810dae1f..33b4fcf6e48 100644 --- a/Documentation/perf_counter/kerneltop.c +++ b/Documentation/perf_counter/kerneltop.c @@ -77,6 +77,8 @@ #include #include #include +#include +#include #include #include @@ -181,6 +183,7 @@ static int tid = -1; static int profile_cpu = -1; static int nr_cpus = 0; static int nmi = 1; +static unsigned int realtime_prio = 0; static int group = 0; static unsigned int page_size; static unsigned int mmap_pages = 16; @@ -334,6 +337,7 @@ static void display_help(void) " -l # show scale factor for RR events\n" " -d delay --delay= # sampling/display delay [default: 2]\n" " -f CNT --filter=CNT # min-event-count filter [default: 100]\n\n" + " -r prio --realtime= # event acquisition runs with SCHED_FIFO policy\n" " -s symbol --symbol= # function to be showed annotated one-shot\n" " -x path --vmlinux= # the vmlinux binary, required for -s use\n" " -z --zero # zero counts after display\n" @@ -620,7 +624,6 @@ static int compare(const void *__sym1, const void *__sym2) return sym_weight(sym1) < sym_weight(sym2); } -static time_t last_refresh; static long events; static long userspace_events; static const char CONSOLE_CLEAR[] = ""; @@ -634,6 +637,7 @@ static void print_sym_table(void) float events_per_sec = events/delay_secs; float kevents_per_sec = (events-userspace_events)/delay_secs; + events = userspace_events = 0; memcpy(tmp, sym_table, sizeof(sym_table[0])*sym_table_count); qsort(tmp, sym_table_count, sizeof(tmp[0]), compare); @@ -714,8 +718,6 @@ static void print_sym_table(void) if (sym_filter_entry) show_details(sym_filter_entry); - last_refresh = time(NULL); - { struct pollfd stdin_poll = { .fd = 0, .events = POLLIN }; @@ -726,6 +728,16 @@ static void print_sym_table(void) } } +static void *display_thread(void *arg) +{ + printf("KernelTop refresh period: %d seconds\n", delay_secs); + + while (!sleep(delay_secs)) + print_sym_table(); + + return NULL; +} + static int read_symbol(FILE *in, struct sym_entry *s) { static int filter_match = 0; @@ -1081,19 +1093,20 @@ static void process_options(int argc, char *argv[]) {"filter", required_argument, NULL, 'f'}, {"group", required_argument, NULL, 'g'}, {"help", no_argument, NULL, 'h'}, - {"scale", no_argument, NULL, 'l'}, {"nmi", required_argument, NULL, 'n'}, + {"mmap_info", no_argument, NULL, 'M'}, + {"mmap_pages", required_argument, NULL, 'm'}, + {"munmap_info", no_argument, NULL, 'U'}, {"pid", required_argument, NULL, 'p'}, - {"vmlinux", required_argument, NULL, 'x'}, + {"realtime", required_argument, NULL, 'r'}, + {"scale", no_argument, NULL, 'l'}, {"symbol", required_argument, NULL, 's'}, {"stat", no_argument, NULL, 'S'}, + {"vmlinux", required_argument, NULL, 'x'}, {"zero", no_argument, NULL, 'z'}, - {"mmap_pages", required_argument, NULL, 'm'}, - {"mmap_info", no_argument, NULL, 'M'}, - {"munmap_info", no_argument, NULL, 'U'}, {NULL, 0, NULL, 0 } }; - int c = getopt_long(argc, argv, "+:ac:C:d:De:f:g:hln:m:p:s:Sx:zMU", + int c = getopt_long(argc, argv, "+:ac:C:d:De:f:g:hln:m:p:r:s:Sx:zMU", long_options, &option_index); if (c == -1) break; @@ -1127,6 +1140,7 @@ static void process_options(int argc, char *argv[]) profile_cpu = -1; } tid = atoi(optarg); break; + case 'r': realtime_prio = atoi(optarg); break; case 's': sym_filter = strdup(optarg); break; case 'S': run_perfstat = 1; break; case 'x': vmlinux = strdup(optarg); break; @@ -1289,6 +1303,7 @@ int main(int argc, char *argv[]) struct pollfd event_array[MAX_NR_CPUS * MAX_COUNTERS]; struct mmap_data mmap_array[MAX_NR_CPUS][MAX_COUNTERS]; struct perf_counter_hw_event hw_event; + pthread_t thread; int i, counter, group_fd, nr_poll = 0; unsigned int cpu; int ret; @@ -1363,8 +1378,20 @@ int main(int argc, char *argv[]) } } - printf("KernelTop refresh period: %d seconds\n", delay_secs); - last_refresh = time(NULL); + if (pthread_create(&thread, NULL, display_thread, NULL)) { + printf("Could not create display thread.\n"); + exit(-1); + } + + if (realtime_prio) { + struct sched_param param; + + param.sched_priority = realtime_prio; + if (sched_setscheduler(0, SCHED_FIFO, ¶m)) { + printf("Could not set realtime priority.\n"); + exit(-1); + } + } while (1) { int hits = events; @@ -1374,14 +1401,8 @@ int main(int argc, char *argv[]) mmap_read(&mmap_array[i][counter]); } - if (time(NULL) >= last_refresh + delay_secs) { - print_sym_table(); - events = userspace_events = 0; - } - if (hits == events) - ret = poll(event_array, nr_poll, 1000); - hits = events; + ret = poll(event_array, nr_poll, 100); } return 0; -- cgit v1.2.3 From 8a057d84912f36e53f970c4d177cb4bb6b2f9e08 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Thu, 2 Apr 2009 11:11:59 +0200 Subject: perf_counter: move the event overflow output bits to record_type Per suggestion from Paul, move the event overflow bits to record_type and sanitize the enums a bit. Breaks the ABI -- again ;-) Suggested-by: Paul Mackerras Signed-off-by: Peter Zijlstra Cc: Corey Ashford Orig-LKML-Reference: <20090402091319.151921176@chello.nl> Signed-off-by: Ingo Molnar --- include/linux/perf_counter.h | 50 +++++++++++---------- kernel/perf_counter.c | 101 +++++++++++++++++-------------------------- 2 files changed, 68 insertions(+), 83 deletions(-) diff --git a/include/linux/perf_counter.h b/include/linux/perf_counter.h index 43083afffe0..06a6fba9f53 100644 --- a/include/linux/perf_counter.h +++ b/include/linux/perf_counter.h @@ -73,15 +73,6 @@ enum sw_event_ids { PERF_SW_EVENTS_MAX = 7, }; -/* - * IRQ-notification data record type: - */ -enum perf_counter_record_type { - PERF_RECORD_SIMPLE = 0, - PERF_RECORD_IRQ = 1, - PERF_RECORD_GROUP = 2, -}; - #define __PERF_COUNTER_MASK(name) \ (((1ULL << PERF_COUNTER_##name##_BITS) - 1) << \ PERF_COUNTER_##name##_SHIFT) @@ -102,6 +93,17 @@ enum perf_counter_record_type { #define PERF_COUNTER_EVENT_SHIFT 0 #define PERF_COUNTER_EVENT_MASK __PERF_COUNTER_MASK(EVENT) +/* + * Bits that can be set in hw_event.record_type to request information + * in the overflow packets. + */ +enum perf_counter_record_format { + PERF_RECORD_IP = 1U << 0, + PERF_RECORD_TID = 1U << 1, + PERF_RECORD_GROUP = 1U << 2, + PERF_RECORD_CALLCHAIN = 1U << 3, +}; + /* * Bits that can be set in hw_event.read_format to request that * reads on the counter should return the indicated quantities, @@ -125,8 +127,8 @@ struct perf_counter_hw_event { __u64 config; __u64 irq_period; - __u64 record_type; - __u64 read_format; + __u32 record_type; + __u32 read_format; __u64 disabled : 1, /* off by default */ nmi : 1, /* NMI sampling */ @@ -137,12 +139,10 @@ struct perf_counter_hw_event { exclude_kernel : 1, /* ditto kernel */ exclude_hv : 1, /* ditto hypervisor */ exclude_idle : 1, /* don't count when idle */ - include_tid : 1, /* include the tid */ mmap : 1, /* include mmap data */ munmap : 1, /* include munmap data */ - callchain : 1, /* add callchain data */ - __reserved_1 : 51; + __reserved_1 : 53; __u32 extra_config_len; __u32 __reserved_4; @@ -212,15 +212,21 @@ struct perf_event_header { enum perf_event_type { - PERF_EVENT_GROUP = 1, - - PERF_EVENT_MMAP = 2, - PERF_EVENT_MUNMAP = 3, + PERF_EVENT_MMAP = 1, + PERF_EVENT_MUNMAP = 2, - PERF_EVENT_OVERFLOW = 1UL << 31, - __PERF_EVENT_IP = 1UL << 30, - __PERF_EVENT_TID = 1UL << 29, - __PERF_EVENT_CALLCHAIN = 1UL << 28, + /* + * Half the event type space is reserved for the counter overflow + * bitfields, as found in hw_event.record_type. + * + * These events will have types of the form: + * PERF_EVENT_COUNTER_OVERFLOW { | __PERF_EVENT_* } * + */ + PERF_EVENT_COUNTER_OVERFLOW = 1UL << 31, + __PERF_EVENT_IP = PERF_RECORD_IP, + __PERF_EVENT_TID = PERF_RECORD_TID, + __PERF_EVENT_GROUP = PERF_RECORD_GROUP, + __PERF_EVENT_CALLCHAIN = PERF_RECORD_CALLCHAIN, }; #ifdef __KERNEL__ diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c index 860cdc26bd7..995063df910 100644 --- a/kernel/perf_counter.c +++ b/kernel/perf_counter.c @@ -1765,27 +1765,34 @@ static void perf_output_end(struct perf_output_handle *handle) rcu_read_unlock(); } -static void perf_output_simple(struct perf_counter *counter, - int nmi, struct pt_regs *regs) +void perf_counter_output(struct perf_counter *counter, + int nmi, struct pt_regs *regs) { int ret; + u64 record_type = counter->hw_event.record_type; struct perf_output_handle handle; struct perf_event_header header; u64 ip; struct { u32 pid, tid; } tid_entry; + struct { + u64 event; + u64 counter; + } group_entry; struct perf_callchain_entry *callchain = NULL; int callchain_size = 0; - header.type = PERF_EVENT_OVERFLOW; + header.type = PERF_EVENT_COUNTER_OVERFLOW; header.size = sizeof(header); - ip = instruction_pointer(regs); - header.type |= __PERF_EVENT_IP; - header.size += sizeof(ip); + if (record_type & PERF_RECORD_IP) { + ip = instruction_pointer(regs); + header.type |= __PERF_EVENT_IP; + header.size += sizeof(ip); + } - if (counter->hw_event.include_tid) { + if (record_type & PERF_RECORD_TID) { /* namespace issues */ tid_entry.pid = current->group_leader->pid; tid_entry.tid = current->pid; @@ -1794,7 +1801,13 @@ static void perf_output_simple(struct perf_counter *counter, header.size += sizeof(tid_entry); } - if (counter->hw_event.callchain) { + if (record_type & PERF_RECORD_GROUP) { + header.type |= __PERF_EVENT_GROUP; + header.size += sizeof(u64) + + counter->nr_siblings * sizeof(group_entry); + } + + if (record_type & PERF_RECORD_CALLCHAIN) { callchain = perf_callchain(regs); if (callchain) { @@ -1810,69 +1823,35 @@ static void perf_output_simple(struct perf_counter *counter, return; perf_output_put(&handle, header); - perf_output_put(&handle, ip); - if (counter->hw_event.include_tid) - perf_output_put(&handle, tid_entry); + if (record_type & PERF_RECORD_IP) + perf_output_put(&handle, ip); - if (callchain) - perf_output_copy(&handle, callchain, callchain_size); - - perf_output_end(&handle); -} - -static void perf_output_group(struct perf_counter *counter, int nmi) -{ - struct perf_output_handle handle; - struct perf_event_header header; - struct perf_counter *leader, *sub; - unsigned int size; - struct { - u64 event; - u64 counter; - } entry; - int ret; - - size = sizeof(header) + counter->nr_siblings * sizeof(entry); + if (record_type & PERF_RECORD_TID) + perf_output_put(&handle, tid_entry); - ret = perf_output_begin(&handle, counter, size, nmi); - if (ret) - return; + if (record_type & PERF_RECORD_GROUP) { + struct perf_counter *leader, *sub; + u64 nr = counter->nr_siblings; - header.type = PERF_EVENT_GROUP; - header.size = size; + perf_output_put(&handle, nr); - perf_output_put(&handle, header); + leader = counter->group_leader; + list_for_each_entry(sub, &leader->sibling_list, list_entry) { + if (sub != counter) + sub->hw_ops->read(sub); - leader = counter->group_leader; - list_for_each_entry(sub, &leader->sibling_list, list_entry) { - if (sub != counter) - sub->hw_ops->read(sub); + group_entry.event = sub->hw_event.config; + group_entry.counter = atomic64_read(&sub->count); - entry.event = sub->hw_event.config; - entry.counter = atomic64_read(&sub->count); - - perf_output_put(&handle, entry); + perf_output_put(&handle, group_entry); + } } - perf_output_end(&handle); -} - -void perf_counter_output(struct perf_counter *counter, - int nmi, struct pt_regs *regs) -{ - switch (counter->hw_event.record_type) { - case PERF_RECORD_SIMPLE: - return; - - case PERF_RECORD_IRQ: - perf_output_simple(counter, nmi, regs); - break; + if (callchain) + perf_output_copy(&handle, callchain, callchain_size); - case PERF_RECORD_GROUP: - perf_output_group(counter, nmi); - break; - } + perf_output_end(&handle); } /* -- cgit v1.2.3 From c457810ab4a825161aec6ef71b581e1bc8febd1a Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Thu, 2 Apr 2009 11:12:01 +0200 Subject: perf_counter: per event wakeups By request, provide a way to request a wakeup every 'n' events instead of every page of output. Signed-off-by: Peter Zijlstra Cc: Paul Mackerras Cc: Corey Ashford Orig-LKML-Reference: <20090402091319.323309784@chello.nl> Signed-off-by: Ingo Molnar --- include/linux/perf_counter.h | 3 ++- kernel/perf_counter.c | 10 +++++++++- 2 files changed, 11 insertions(+), 2 deletions(-) diff --git a/include/linux/perf_counter.h b/include/linux/perf_counter.h index 06a6fba9f53..5428ba120d7 100644 --- a/include/linux/perf_counter.h +++ b/include/linux/perf_counter.h @@ -145,7 +145,7 @@ struct perf_counter_hw_event { __reserved_1 : 53; __u32 extra_config_len; - __u32 __reserved_4; + __u32 wakeup_events; /* wakeup every n events */ __u64 __reserved_2; __u64 __reserved_3; @@ -321,6 +321,7 @@ struct perf_mmap_data { int nr_pages; atomic_t wakeup; atomic_t head; + atomic_t events; struct perf_counter_mmap_page *user_page; void *data_pages[0]; }; diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c index 995063df910..9bcab10e735 100644 --- a/kernel/perf_counter.c +++ b/kernel/perf_counter.c @@ -1760,7 +1760,15 @@ static void perf_output_copy(struct perf_output_handle *handle, static void perf_output_end(struct perf_output_handle *handle) { - if (handle->wakeup) + int wakeup_events = handle->counter->hw_event.wakeup_events; + + if (wakeup_events) { + int events = atomic_inc_return(&handle->data->events); + if (events >= wakeup_events) { + atomic_sub(wakeup_events, &handle->data->events); + __perf_output_wakeup(handle); + } + } else if (handle->wakeup) __perf_output_wakeup(handle); rcu_read_unlock(); } -- cgit v1.2.3 From 3df70fd623bb109e0079e697c0276d220a4b7908 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Thu, 2 Apr 2009 11:12:02 +0200 Subject: perf_counter: kerneltop: update to new ABI Update to reflect the new record_type ABI changes. Signed-off-by: Peter Zijlstra Cc: Paul Mackerras Cc: Corey Ashford Orig-LKML-Reference: <20090402091319.407283141@chello.nl> Signed-off-by: Ingo Molnar --- Documentation/perf_counter/kerneltop.c | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/Documentation/perf_counter/kerneltop.c b/Documentation/perf_counter/kerneltop.c index 33b4fcf6e48..4f8d7917aba 100644 --- a/Documentation/perf_counter/kerneltop.c +++ b/Documentation/perf_counter/kerneltop.c @@ -442,7 +442,7 @@ static void create_perfstat_counter(int counter) memset(&hw_event, 0, sizeof(hw_event)); hw_event.config = event_id[counter]; - hw_event.record_type = PERF_RECORD_SIMPLE; + hw_event.record_type = 0; hw_event.nmi = 0; if (scale) hw_event.read_format = PERF_FORMAT_TOTAL_TIME_ENABLED | @@ -1277,8 +1277,8 @@ static void mmap_read(struct mmap_data *md) old += size; switch (event->header.type) { - case PERF_EVENT_OVERFLOW | __PERF_EVENT_IP: - case PERF_EVENT_OVERFLOW | __PERF_EVENT_IP | __PERF_EVENT_TID: + case PERF_EVENT_COUNTER_OVERFLOW | __PERF_EVENT_IP: + case PERF_EVENT_COUNTER_OVERFLOW | __PERF_EVENT_IP | __PERF_EVENT_TID: process_event(event->ip.ip, md->counter); break; @@ -1337,9 +1337,8 @@ int main(int argc, char *argv[]) memset(&hw_event, 0, sizeof(hw_event)); hw_event.config = event_id[counter]; hw_event.irq_period = event_count[counter]; - hw_event.record_type = PERF_RECORD_IRQ; + hw_event.record_type = PERF_RECORD_IP | PERF_RECORD_TID; hw_event.nmi = nmi; - hw_event.include_tid = 1; hw_event.mmap = use_mmap; hw_event.munmap = use_munmap; -- cgit v1.2.3 From 5872bdb88a35fae7d224bd6b21e5f377e854ccfc Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Thu, 2 Apr 2009 11:12:03 +0200 Subject: perf_counter: add more context information Put in counts to tell which ips belong to what context. ----- | | hv | -- nr | | kernel | -- | | user ----- Signed-off-by: Peter Zijlstra Cc: Paul Mackerras Cc: Corey Ashford Orig-LKML-Reference: <20090402091319.493101305@chello.nl> Signed-off-by: Ingo Molnar --- arch/x86/kernel/cpu/perf_counter.c | 9 +++++++++ include/linux/perf_counter.h | 4 ++-- kernel/perf_counter.c | 2 +- 3 files changed, 12 insertions(+), 3 deletions(-) diff --git a/arch/x86/kernel/cpu/perf_counter.c b/arch/x86/kernel/cpu/perf_counter.c index 2a946a160ca..c74e20d593a 100644 --- a/arch/x86/kernel/cpu/perf_counter.c +++ b/arch/x86/kernel/cpu/perf_counter.c @@ -1088,6 +1088,7 @@ perf_callchain_kernel(struct pt_regs *regs, struct perf_callchain_entry *entry) { unsigned long bp; char *stack; + int nr = entry->nr; callchain_store(entry, instruction_pointer(regs)); @@ -1099,6 +1100,8 @@ perf_callchain_kernel(struct pt_regs *regs, struct perf_callchain_entry *entry) #endif dump_trace(NULL, regs, (void *)stack, bp, &backtrace_ops, entry); + + entry->kernel = entry->nr - nr; } @@ -1128,6 +1131,7 @@ perf_callchain_user(struct pt_regs *regs, struct perf_callchain_entry *entry) { struct stack_frame frame; const void __user *fp; + int nr = entry->nr; regs = (struct pt_regs *)current->thread.sp0 - 1; fp = (void __user *)regs->bp; @@ -1147,6 +1151,8 @@ perf_callchain_user(struct pt_regs *regs, struct perf_callchain_entry *entry) callchain_store(entry, frame.return_address); fp = frame.next_fp; } + + entry->user = entry->nr - nr; } static void @@ -1182,6 +1188,9 @@ struct perf_callchain_entry *perf_callchain(struct pt_regs *regs) entry = &__get_cpu_var(irq_entry); entry->nr = 0; + entry->hv = 0; + entry->kernel = 0; + entry->user = 0; perf_do_callchain(regs, entry); diff --git a/include/linux/perf_counter.h b/include/linux/perf_counter.h index 5428ba120d7..90cce0c74a0 100644 --- a/include/linux/perf_counter.h +++ b/include/linux/perf_counter.h @@ -513,10 +513,10 @@ extern void perf_counter_mmap(unsigned long addr, unsigned long len, extern void perf_counter_munmap(unsigned long addr, unsigned long len, unsigned long pgoff, struct file *file); -#define MAX_STACK_DEPTH 255 +#define MAX_STACK_DEPTH 254 struct perf_callchain_entry { - u64 nr; + u32 nr, hv, kernel, user; u64 ip[MAX_STACK_DEPTH]; }; diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c index 9bcab10e735..f105a6e696c 100644 --- a/kernel/perf_counter.c +++ b/kernel/perf_counter.c @@ -1819,7 +1819,7 @@ void perf_counter_output(struct perf_counter *counter, callchain = perf_callchain(regs); if (callchain) { - callchain_size = (1 + callchain->nr) * sizeof(u64); + callchain_size = (2 + callchain->nr) * sizeof(u64); header.type |= __PERF_EVENT_CALLCHAIN; header.size += callchain_size; -- cgit v1.2.3 From 92f22a3865abe87eea2609a6f8e5be5123f7ce4f Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Thu, 2 Apr 2009 11:12:04 +0200 Subject: perf_counter: update mmap() counter read Paul noted that we don't need SMP barriers for the mmap() counter read because its always on the same cpu (otherwise you can't access the hw counter anyway). So remove the SMP barriers and replace them with regular compiler barriers. Further, update the comment to include a race free method of reading said hardware counter. The primary change is putting the pmc_read inside the seq-loop, otherwise we can still race and read rubbish. Noticed-by: Paul Mackerras Signed-off-by: Peter Zijlstra Cc: Corey Ashford Orig-LKML-Reference: <20090402091319.577951445@chello.nl> Signed-off-by: Ingo Molnar --- include/linux/perf_counter.h | 22 ++++++++++------------ kernel/perf_counter.c | 4 ++-- 2 files changed, 12 insertions(+), 14 deletions(-) diff --git a/include/linux/perf_counter.h b/include/linux/perf_counter.h index 90cce0c74a0..f2b914de3f0 100644 --- a/include/linux/perf_counter.h +++ b/include/linux/perf_counter.h @@ -167,30 +167,28 @@ struct perf_counter_mmap_page { /* * Bits needed to read the hw counters in user-space. * - * The index and offset should be read atomically using the seqlock: - * - * __u32 seq, index; - * __s64 offset; + * u32 seq; + * s64 count; * * again: - * rmb(); * seq = pc->lock; - * * if (unlikely(seq & 1)) { * cpu_relax(); * goto again; * } * - * index = pc->index; - * offset = pc->offset; + * if (pc->index) { + * count = pmc_read(pc->index - 1); + * count += pc->offset; + * } else + * goto regular_read; * - * rmb(); + * barrier(); * if (pc->lock != seq) * goto again; * - * After this, index contains architecture specific counter index + 1, - * so that 0 means unavailable, offset contains the value to be added - * to the result of the raw timer read to obtain this counter's value. + * NOTE: for obvious reason this only works on self-monitoring + * processes. */ __u32 lock; /* seqlock for synchronization */ __u32 index; /* hardware counter identifier */ diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c index f105a6e696c..2a5d4f52556 100644 --- a/kernel/perf_counter.c +++ b/kernel/perf_counter.c @@ -1340,13 +1340,13 @@ void perf_counter_update_userpage(struct perf_counter *counter) */ preempt_disable(); ++userpg->lock; - smp_wmb(); + barrier(); userpg->index = counter->hw.idx; userpg->offset = atomic64_read(&counter->count); if (counter->state == PERF_COUNTER_STATE_ACTIVE) userpg->offset -= atomic64_read(&counter->hw.prev_count); - smp_wmb(); + barrier(); ++userpg->lock; preempt_enable(); unlock: -- cgit v1.2.3 From a2e87d06ddbe6e6fdb8d6d2e5e985efe4efb07dd Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Mon, 6 Apr 2009 11:44:59 +0200 Subject: perf_counter: update mmap() counter read, take 2 Update the userspace read method. Paul noted that: - userspace cannot observe ->lock & 1 on the same cpu. - we need a barrier() between reading ->lock and ->index to ensure we read them in that prticular order. Signed-off-by: Peter Zijlstra Cc: Paul Mackerras Cc: Corey Ashford LKML-Reference: <20090406094517.368446033@chello.nl> Signed-off-by: Ingo Molnar --- include/linux/perf_counter.h | 24 ++++++++++-------------- 1 file changed, 10 insertions(+), 14 deletions(-) diff --git a/include/linux/perf_counter.h b/include/linux/perf_counter.h index f2b914de3f0..e22ab47a2f4 100644 --- a/include/linux/perf_counter.h +++ b/include/linux/perf_counter.h @@ -170,22 +170,18 @@ struct perf_counter_mmap_page { * u32 seq; * s64 count; * - * again: - * seq = pc->lock; - * if (unlikely(seq & 1)) { - * cpu_relax(); - * goto again; - * } + * do { + * seq = pc->lock; * - * if (pc->index) { - * count = pmc_read(pc->index - 1); - * count += pc->offset; - * } else - * goto regular_read; + * barrier() + * if (pc->index) { + * count = pmc_read(pc->index - 1); + * count += pc->offset; + * } else + * goto regular_read; * - * barrier(); - * if (pc->lock != seq) - * goto again; + * barrier(); + * } while (pc->lock != seq); * * NOTE: for obvious reason this only works on self-monitoring * processes. -- cgit v1.2.3 From 9c03d88e328d5f28f13191622c2ea1349c36b799 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Mon, 6 Apr 2009 11:45:00 +0200 Subject: perf_counter: add more context information Change the callchain context entries to u16, so as to gain some space. Signed-off-by: Peter Zijlstra Cc: Paul Mackerras Cc: Corey Ashford LKML-Reference: <20090406094517.457320003@chello.nl> Signed-off-by: Ingo Molnar --- include/linux/perf_counter.h | 4 ++-- kernel/perf_counter.c | 6 ++---- 2 files changed, 4 insertions(+), 6 deletions(-) diff --git a/include/linux/perf_counter.h b/include/linux/perf_counter.h index e22ab47a2f4..f9d5cf0bfbd 100644 --- a/include/linux/perf_counter.h +++ b/include/linux/perf_counter.h @@ -507,10 +507,10 @@ extern void perf_counter_mmap(unsigned long addr, unsigned long len, extern void perf_counter_munmap(unsigned long addr, unsigned long len, unsigned long pgoff, struct file *file); -#define MAX_STACK_DEPTH 254 +#define MAX_STACK_DEPTH 255 struct perf_callchain_entry { - u32 nr, hv, kernel, user; + u16 nr, hv, kernel, user; u64 ip[MAX_STACK_DEPTH]; }; diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c index 2a5d4f52556..727624db507 100644 --- a/kernel/perf_counter.c +++ b/kernel/perf_counter.c @@ -1657,9 +1657,7 @@ void perf_counter_do_pending(void) * Callchain support -- arch specific */ -struct perf_callchain_entry * -__attribute__((weak)) -perf_callchain(struct pt_regs *regs) +__weak struct perf_callchain_entry *perf_callchain(struct pt_regs *regs) { return NULL; } @@ -1819,7 +1817,7 @@ void perf_counter_output(struct perf_counter *counter, callchain = perf_callchain(regs); if (callchain) { - callchain_size = (2 + callchain->nr) * sizeof(u64); + callchain_size = (1 + callchain->nr) * sizeof(u64); header.type |= __PERF_EVENT_CALLCHAIN; header.size += callchain_size; -- cgit v1.2.3 From 3c446b3d3b38f991f97e9d2df0ad26a60a94dcff Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Mon, 6 Apr 2009 11:45:01 +0200 Subject: perf_counter: SIGIO support Provide support for fcntl() I/O availability signals. Signed-off-by: Peter Zijlstra Cc: Paul Mackerras Cc: Corey Ashford LKML-Reference: <20090406094517.579788800@chello.nl> Signed-off-by: Ingo Molnar --- include/linux/perf_counter.h | 2 ++ kernel/perf_counter.c | 20 +++++++++++++++++++- 2 files changed, 21 insertions(+), 1 deletion(-) diff --git a/include/linux/perf_counter.h b/include/linux/perf_counter.h index f9d5cf0bfbd..8d5d11b8d01 100644 --- a/include/linux/perf_counter.h +++ b/include/linux/perf_counter.h @@ -238,6 +238,7 @@ enum perf_event_type { #include #include #include +#include #include struct task_struct; @@ -398,6 +399,7 @@ struct perf_counter { /* poll related */ wait_queue_head_t waitq; + struct fasync_struct *fasync; /* optional: for NMIs */ struct perf_wakeup_entry wakeup; diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c index 727624db507..c58cc64319e 100644 --- a/kernel/perf_counter.c +++ b/kernel/perf_counter.c @@ -1526,6 +1526,22 @@ out: return ret; } +static int perf_fasync(int fd, struct file *filp, int on) +{ + struct perf_counter *counter = filp->private_data; + struct inode *inode = filp->f_path.dentry->d_inode; + int retval; + + mutex_lock(&inode->i_mutex); + retval = fasync_helper(fd, filp, on, &counter->fasync); + mutex_unlock(&inode->i_mutex); + + if (retval < 0) + return retval; + + return 0; +} + static const struct file_operations perf_fops = { .release = perf_release, .read = perf_read, @@ -1533,6 +1549,7 @@ static const struct file_operations perf_fops = { .unlocked_ioctl = perf_ioctl, .compat_ioctl = perf_ioctl, .mmap = perf_mmap, + .fasync = perf_fasync, }; /* @@ -1549,7 +1566,7 @@ void perf_counter_wakeup(struct perf_counter *counter) rcu_read_lock(); data = rcu_dereference(counter->data); if (data) { - (void)atomic_xchg(&data->wakeup, POLL_IN); + atomic_set(&data->wakeup, POLL_IN); /* * Ensure all data writes are issued before updating the * user-space data head information. The matching rmb() @@ -1561,6 +1578,7 @@ void perf_counter_wakeup(struct perf_counter *counter) rcu_read_unlock(); wake_up_all(&counter->waitq); + kill_fasync(&counter->fasync, SIGIO, POLL_IN); } /* -- cgit v1.2.3 From 671dec5daf3b3c43c5777be282f00120a44cf37f Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Mon, 6 Apr 2009 11:45:02 +0200 Subject: perf_counter: generalize pending infrastructure Prepare the pending infrastructure to do more than wakeups. Signed-off-by: Peter Zijlstra Cc: Paul Mackerras Cc: Corey Ashford LKML-Reference: <20090406094517.634732847@chello.nl> Signed-off-by: Ingo Molnar --- include/linux/perf_counter.h | 7 +++--- kernel/perf_counter.c | 53 ++++++++++++++++++++++++++------------------ 2 files changed, 36 insertions(+), 24 deletions(-) diff --git a/include/linux/perf_counter.h b/include/linux/perf_counter.h index 8d5d11b8d01..977fb15a53f 100644 --- a/include/linux/perf_counter.h +++ b/include/linux/perf_counter.h @@ -321,8 +321,9 @@ struct perf_mmap_data { void *data_pages[0]; }; -struct perf_wakeup_entry { - struct perf_wakeup_entry *next; +struct perf_pending_entry { + struct perf_pending_entry *next; + void (*func)(struct perf_pending_entry *); }; /** @@ -401,7 +402,7 @@ struct perf_counter { wait_queue_head_t waitq; struct fasync_struct *fasync; /* optional: for NMIs */ - struct perf_wakeup_entry wakeup; + struct perf_pending_entry pending; void (*destroy)(struct perf_counter *); struct rcu_head rcu_head; diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c index c58cc64319e..0a2ade2e4f1 100644 --- a/kernel/perf_counter.c +++ b/kernel/perf_counter.c @@ -1581,6 +1581,14 @@ void perf_counter_wakeup(struct perf_counter *counter) kill_fasync(&counter->fasync, SIGIO, POLL_IN); } +static void perf_pending_wakeup(struct perf_pending_entry *entry) +{ + struct perf_counter *counter = container_of(entry, + struct perf_counter, pending); + + perf_counter_wakeup(counter); +} + /* * Pending wakeups * @@ -1590,45 +1598,47 @@ void perf_counter_wakeup(struct perf_counter *counter) * single linked list and use cmpxchg() to add entries lockless. */ -#define PENDING_TAIL ((struct perf_wakeup_entry *)-1UL) +#define PENDING_TAIL ((struct perf_pending_entry *)-1UL) -static DEFINE_PER_CPU(struct perf_wakeup_entry *, perf_wakeup_head) = { +static DEFINE_PER_CPU(struct perf_pending_entry *, perf_pending_head) = { PENDING_TAIL, }; -static void perf_pending_queue(struct perf_counter *counter) +static void perf_pending_queue(struct perf_pending_entry *entry, + void (*func)(struct perf_pending_entry *)) { - struct perf_wakeup_entry **head; - struct perf_wakeup_entry *prev, *next; + struct perf_pending_entry **head; - if (cmpxchg(&counter->wakeup.next, NULL, PENDING_TAIL) != NULL) + if (cmpxchg(&entry->next, NULL, PENDING_TAIL) != NULL) return; - head = &get_cpu_var(perf_wakeup_head); + entry->func = func; + + head = &get_cpu_var(perf_pending_head); do { - prev = counter->wakeup.next = *head; - next = &counter->wakeup; - } while (cmpxchg(head, prev, next) != prev); + entry->next = *head; + } while (cmpxchg(head, entry->next, entry) != entry->next); set_perf_counter_pending(); - put_cpu_var(perf_wakeup_head); + put_cpu_var(perf_pending_head); } static int __perf_pending_run(void) { - struct perf_wakeup_entry *list; + struct perf_pending_entry *list; int nr = 0; - list = xchg(&__get_cpu_var(perf_wakeup_head), PENDING_TAIL); + list = xchg(&__get_cpu_var(perf_pending_head), PENDING_TAIL); while (list != PENDING_TAIL) { - struct perf_counter *counter = container_of(list, - struct perf_counter, wakeup); + void (*func)(struct perf_pending_entry *); + struct perf_pending_entry *entry = list; list = list->next; - counter->wakeup.next = NULL; + func = entry->func; + entry->next = NULL; /* * Ensure we observe the unqueue before we issue the wakeup, * so that we won't be waiting forever. @@ -1636,7 +1646,7 @@ static int __perf_pending_run(void) */ smp_wmb(); - perf_counter_wakeup(counter); + func(entry); nr++; } @@ -1658,7 +1668,7 @@ static inline int perf_not_pending(struct perf_counter *counter) * so that we do not miss the wakeup. -- see perf_pending_handle() */ smp_rmb(); - return counter->wakeup.next == NULL; + return counter->pending.next == NULL; } static void perf_pending_sync(struct perf_counter *counter) @@ -1695,9 +1705,10 @@ struct perf_output_handle { static inline void __perf_output_wakeup(struct perf_output_handle *handle) { - if (handle->nmi) - perf_pending_queue(handle->counter); - else + if (handle->nmi) { + perf_pending_queue(&handle->counter->pending, + perf_pending_wakeup); + } else perf_counter_wakeup(handle->counter); } -- cgit v1.2.3 From b6276f353bf490add62dcf7db0ebd75baa3e1a37 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Mon, 6 Apr 2009 11:45:03 +0200 Subject: perf_counter: x86: self-IPI for pending work Implement set_perf_counter_pending() with a self-IPI so that it will run ASAP in a usable context. For now use a second IRQ vector, because the primary vector pokes the apic in funny ways that seem to confuse things. Signed-off-by: Peter Zijlstra Cc: Paul Mackerras Cc: Corey Ashford LKML-Reference: <20090406094517.724626696@chello.nl> Signed-off-by: Ingo Molnar --- arch/x86/include/asm/entry_arch.h | 1 + arch/x86/include/asm/hardirq.h | 1 + arch/x86/include/asm/hw_irq.h | 1 + arch/x86/include/asm/irq_vectors.h | 5 +++++ arch/x86/include/asm/perf_counter.h | 3 ++- arch/x86/kernel/cpu/perf_counter.c | 14 ++++++++++++++ arch/x86/kernel/entry_64.S | 2 ++ arch/x86/kernel/irq.c | 5 +++++ arch/x86/kernel/irqinit_32.c | 1 + arch/x86/kernel/irqinit_64.c | 1 + 10 files changed, 33 insertions(+), 1 deletion(-) diff --git a/arch/x86/include/asm/entry_arch.h b/arch/x86/include/asm/entry_arch.h index c2e6bedaf25..fe24d280249 100644 --- a/arch/x86/include/asm/entry_arch.h +++ b/arch/x86/include/asm/entry_arch.h @@ -50,6 +50,7 @@ BUILD_INTERRUPT(spurious_interrupt,SPURIOUS_APIC_VECTOR) #ifdef CONFIG_PERF_COUNTERS BUILD_INTERRUPT(perf_counter_interrupt, LOCAL_PERF_VECTOR) +BUILD_INTERRUPT(perf_pending_interrupt, LOCAL_PENDING_VECTOR) #endif #ifdef CONFIG_X86_MCE_P4THERMAL diff --git a/arch/x86/include/asm/hardirq.h b/arch/x86/include/asm/hardirq.h index 25454427cee..f5ebe2aaca4 100644 --- a/arch/x86/include/asm/hardirq.h +++ b/arch/x86/include/asm/hardirq.h @@ -14,6 +14,7 @@ typedef struct { #endif unsigned int generic_irqs; /* arch dependent */ unsigned int apic_perf_irqs; + unsigned int apic_pending_irqs; #ifdef CONFIG_SMP unsigned int irq_resched_count; unsigned int irq_call_count; diff --git a/arch/x86/include/asm/hw_irq.h b/arch/x86/include/asm/hw_irq.h index ae80f64973e..7309c0ad690 100644 --- a/arch/x86/include/asm/hw_irq.h +++ b/arch/x86/include/asm/hw_irq.h @@ -30,6 +30,7 @@ extern void apic_timer_interrupt(void); extern void generic_interrupt(void); extern void error_interrupt(void); extern void perf_counter_interrupt(void); +extern void perf_pending_interrupt(void); extern void spurious_interrupt(void); extern void thermal_interrupt(void); diff --git a/arch/x86/include/asm/irq_vectors.h b/arch/x86/include/asm/irq_vectors.h index 3cbd79bbb47..545bb811ccb 100644 --- a/arch/x86/include/asm/irq_vectors.h +++ b/arch/x86/include/asm/irq_vectors.h @@ -116,6 +116,11 @@ */ #define GENERIC_INTERRUPT_VECTOR 0xed +/* + * Performance monitoring pending work vector: + */ +#define LOCAL_PENDING_VECTOR 0xec + /* * First APIC vector available to drivers: (vectors 0x30-0xee) we * start at 0x31(0x41) to spread out vectors evenly between priority diff --git a/arch/x86/include/asm/perf_counter.h b/arch/x86/include/asm/perf_counter.h index e2b0e66b235..d08dd52cb8f 100644 --- a/arch/x86/include/asm/perf_counter.h +++ b/arch/x86/include/asm/perf_counter.h @@ -84,7 +84,8 @@ union cpuid10_edx { #define MSR_ARCH_PERFMON_FIXED_CTR2 0x30b #define X86_PMC_IDX_FIXED_BUS_CYCLES (X86_PMC_IDX_FIXED + 2) -#define set_perf_counter_pending() do { } while (0) +extern void set_perf_counter_pending(void); + #define clear_perf_counter_pending() do { } while (0) #define test_perf_counter_pending() (0) diff --git a/arch/x86/kernel/cpu/perf_counter.c b/arch/x86/kernel/cpu/perf_counter.c index c74e20d593a..438415866fe 100644 --- a/arch/x86/kernel/cpu/perf_counter.c +++ b/arch/x86/kernel/cpu/perf_counter.c @@ -849,6 +849,20 @@ void smp_perf_counter_interrupt(struct pt_regs *regs) irq_exit(); } +void smp_perf_pending_interrupt(struct pt_regs *regs) +{ + irq_enter(); + ack_APIC_irq(); + inc_irq_stat(apic_pending_irqs); + perf_counter_do_pending(); + irq_exit(); +} + +void set_perf_counter_pending(void) +{ + apic->send_IPI_self(LOCAL_PENDING_VECTOR); +} + void perf_counters_lapic_init(int nmi) { u32 apic_val; diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S index 3f129d963a0..1d46cba56fd 100644 --- a/arch/x86/kernel/entry_64.S +++ b/arch/x86/kernel/entry_64.S @@ -1028,6 +1028,8 @@ apicinterrupt SPURIOUS_APIC_VECTOR \ #ifdef CONFIG_PERF_COUNTERS apicinterrupt LOCAL_PERF_VECTOR \ perf_counter_interrupt smp_perf_counter_interrupt +apicinterrupt LOCAL_PENDING_VECTOR \ + perf_pending_interrupt smp_perf_pending_interrupt #endif /* diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c index 9c2754302ec..d465487da58 100644 --- a/arch/x86/kernel/irq.c +++ b/arch/x86/kernel/irq.c @@ -67,6 +67,10 @@ static int show_other_interrupts(struct seq_file *p, int prec) for_each_online_cpu(j) seq_printf(p, "%10u ", irq_stats(j)->apic_perf_irqs); seq_printf(p, " Performance counter interrupts\n"); + seq_printf(p, "PND: "); + for_each_online_cpu(j) + seq_printf(p, "%10u ", irq_stats(j)->apic_pending_irqs); + seq_printf(p, " Performance pending work\n"); #endif if (generic_interrupt_extension) { seq_printf(p, "PLT: "); @@ -171,6 +175,7 @@ u64 arch_irq_stat_cpu(unsigned int cpu) sum += irq_stats(cpu)->apic_timer_irqs; sum += irq_stats(cpu)->irq_spurious_count; sum += irq_stats(cpu)->apic_perf_irqs; + sum += irq_stats(cpu)->apic_pending_irqs; #endif if (generic_interrupt_extension) sum += irq_stats(cpu)->generic_irqs; diff --git a/arch/x86/kernel/irqinit_32.c b/arch/x86/kernel/irqinit_32.c index 925d87cfc55..3190a6b961e 100644 --- a/arch/x86/kernel/irqinit_32.c +++ b/arch/x86/kernel/irqinit_32.c @@ -166,6 +166,7 @@ static void __init apic_intr_init(void) alloc_intr_gate(ERROR_APIC_VECTOR, error_interrupt); # ifdef CONFIG_PERF_COUNTERS alloc_intr_gate(LOCAL_PERF_VECTOR, perf_counter_interrupt); + alloc_intr_gate(LOCAL_PENDING_VECTOR, perf_pending_interrupt); # endif # ifdef CONFIG_X86_MCE_P4THERMAL diff --git a/arch/x86/kernel/irqinit_64.c b/arch/x86/kernel/irqinit_64.c index 665e2ab48ab..53ceb26f80f 100644 --- a/arch/x86/kernel/irqinit_64.c +++ b/arch/x86/kernel/irqinit_64.c @@ -156,6 +156,7 @@ static void __init apic_intr_init(void) /* Performance monitoring interrupt: */ #ifdef CONFIG_PERF_COUNTERS alloc_intr_gate(LOCAL_PERF_VECTOR, perf_counter_interrupt); + alloc_intr_gate(LOCAL_PENDING_VECTOR, perf_pending_interrupt); #endif } -- cgit v1.2.3 From f6c7d5fe58b4846ee0cb4b98b6042489705eced4 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Mon, 6 Apr 2009 11:45:04 +0200 Subject: perf_counter: theres more to overflow than writing events Prepare for more generic overflow handling. The new perf_counter_overflow() method will handle the generic bits of the counter overflow, and can return a !0 return value, in which case the counter should be (soft) disabled, so that it won't count until it's properly disabled. XXX: do powerpc and swcounter Signed-off-by: Peter Zijlstra Cc: Paul Mackerras Cc: Corey Ashford LKML-Reference: <20090406094517.812109629@chello.nl> Signed-off-by: Ingo Molnar --- arch/powerpc/kernel/perf_counter.c | 2 +- arch/x86/kernel/cpu/perf_counter.c | 3 ++- include/linux/perf_counter.h | 4 ++-- kernel/perf_counter.c | 29 +++++++++++++++++++++++------ 4 files changed, 28 insertions(+), 10 deletions(-) diff --git a/arch/powerpc/kernel/perf_counter.c b/arch/powerpc/kernel/perf_counter.c index 0a4d14f279a..f88c35d0710 100644 --- a/arch/powerpc/kernel/perf_counter.c +++ b/arch/powerpc/kernel/perf_counter.c @@ -732,7 +732,7 @@ static void record_and_restart(struct perf_counter *counter, long val, * Finally record data if requested. */ if (record) - perf_counter_output(counter, 1, regs); + perf_counter_overflow(counter, 1, regs); } /* diff --git a/arch/x86/kernel/cpu/perf_counter.c b/arch/x86/kernel/cpu/perf_counter.c index 438415866fe..1116a41bc7b 100644 --- a/arch/x86/kernel/cpu/perf_counter.c +++ b/arch/x86/kernel/cpu/perf_counter.c @@ -800,7 +800,8 @@ again: continue; perf_save_and_restart(counter); - perf_counter_output(counter, nmi, regs); + if (perf_counter_overflow(counter, nmi, regs)) + __pmc_generic_disable(counter, &counter->hw, bit); } hw_perf_ack_status(ack); diff --git a/include/linux/perf_counter.h b/include/linux/perf_counter.h index 977fb15a53f..ca2d4df29e0 100644 --- a/include/linux/perf_counter.h +++ b/include/linux/perf_counter.h @@ -491,8 +491,8 @@ extern int hw_perf_group_sched_in(struct perf_counter *group_leader, struct perf_counter_context *ctx, int cpu); extern void perf_counter_update_userpage(struct perf_counter *counter); -extern void perf_counter_output(struct perf_counter *counter, - int nmi, struct pt_regs *regs); +extern int perf_counter_overflow(struct perf_counter *counter, + int nmi, struct pt_regs *regs); /* * Return 1 for a software counter, 0 for a hardware counter */ diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c index 0a2ade2e4f1..195e976eb07 100644 --- a/kernel/perf_counter.c +++ b/kernel/perf_counter.c @@ -1800,8 +1800,8 @@ static void perf_output_end(struct perf_output_handle *handle) rcu_read_unlock(); } -void perf_counter_output(struct perf_counter *counter, - int nmi, struct pt_regs *regs) +static void perf_counter_output(struct perf_counter *counter, + int nmi, struct pt_regs *regs) { int ret; u64 record_type = counter->hw_event.record_type; @@ -2033,6 +2033,17 @@ void perf_counter_munmap(unsigned long addr, unsigned long len, perf_counter_mmap_event(&mmap_event); } +/* + * Generic counter overflow handling. + */ + +int perf_counter_overflow(struct perf_counter *counter, + int nmi, struct pt_regs *regs) +{ + perf_counter_output(counter, nmi, regs); + return 0; +} + /* * Generic software counter infrastructure */ @@ -2077,6 +2088,7 @@ static void perf_swcounter_set_period(struct perf_counter *counter) static enum hrtimer_restart perf_swcounter_hrtimer(struct hrtimer *hrtimer) { + enum hrtimer_restart ret = HRTIMER_RESTART; struct perf_counter *counter; struct pt_regs *regs; @@ -2092,12 +2104,14 @@ static enum hrtimer_restart perf_swcounter_hrtimer(struct hrtimer *hrtimer) !counter->hw_event.exclude_user) regs = task_pt_regs(current); - if (regs) - perf_counter_output(counter, 0, regs); + if (regs) { + if (perf_counter_overflow(counter, 0, regs)) + ret = HRTIMER_NORESTART; + } hrtimer_forward_now(hrtimer, ns_to_ktime(counter->hw.irq_period)); - return HRTIMER_RESTART; + return ret; } static void perf_swcounter_overflow(struct perf_counter *counter, @@ -2105,7 +2119,10 @@ static void perf_swcounter_overflow(struct perf_counter *counter, { perf_swcounter_update(counter); perf_swcounter_set_period(counter); - perf_counter_output(counter, nmi, regs); + if (perf_counter_overflow(counter, nmi, regs)) + /* soft-disable the counter */ + ; + } static int perf_swcounter_match(struct perf_counter *counter, -- cgit v1.2.3 From ebb3c4c4cb81d64cc041356915ec015e2c57092a Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Mon, 6 Apr 2009 11:45:05 +0200 Subject: perf_counter: fix the mlock accounting Reading through the code I saw I forgot the finish the mlock accounting. Do so now. Signed-off-by: Peter Zijlstra Cc: Paul Mackerras Cc: Corey Ashford LKML-Reference: <20090406094517.899767331@chello.nl> Signed-off-by: Ingo Molnar --- kernel/perf_counter.c | 33 +++++++++++++++++++++------------ 1 file changed, 21 insertions(+), 12 deletions(-) diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c index 195e976eb07..c841563de04 100644 --- a/kernel/perf_counter.c +++ b/kernel/perf_counter.c @@ -1461,13 +1461,14 @@ static void perf_mmap_close(struct vm_area_struct *vma) if (atomic_dec_and_mutex_lock(&counter->mmap_count, &counter->mmap_mutex)) { + vma->vm_mm->locked_vm -= counter->data->nr_pages + 1; perf_mmap_data_free(counter); mutex_unlock(&counter->mmap_mutex); } } static struct vm_operations_struct perf_mmap_vmops = { - .open = perf_mmap_open, + .open = perf_mmap_open, .close = perf_mmap_close, .fault = perf_mmap_fault, }; @@ -1499,24 +1500,32 @@ static int perf_mmap(struct file *file, struct vm_area_struct *vma) if (vma->vm_pgoff != 0) return -EINVAL; - locked = vma_size >> PAGE_SHIFT; - locked += vma->vm_mm->locked_vm; + mutex_lock(&counter->mmap_mutex); + if (atomic_inc_not_zero(&counter->mmap_count)) { + if (nr_pages != counter->data->nr_pages) + ret = -EINVAL; + goto unlock; + } + + locked = vma->vm_mm->locked_vm; + locked += nr_pages + 1; lock_limit = current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur; lock_limit >>= PAGE_SHIFT; - if ((locked > lock_limit) && !capable(CAP_IPC_LOCK)) - return -EPERM; - - mutex_lock(&counter->mmap_mutex); - if (atomic_inc_not_zero(&counter->mmap_count)) - goto out; + if ((locked > lock_limit) && !capable(CAP_IPC_LOCK)) { + ret = -EPERM; + goto unlock; + } WARN_ON(counter->data); ret = perf_mmap_data_alloc(counter, nr_pages); - if (!ret) - atomic_set(&counter->mmap_count, 1); -out: + if (ret) + goto unlock; + + atomic_set(&counter->mmap_count, 1); + vma->vm_mm->locked_vm += nr_pages + 1; +unlock: mutex_unlock(&counter->mmap_mutex); vma->vm_flags &= ~VM_MAYWRITE; -- cgit v1.2.3 From 339f7c90b8a2f3aa2dd4267e79f797999e8a3c59 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Mon, 6 Apr 2009 11:45:06 +0200 Subject: perf_counter: PERF_RECORD_TIME By popular request, provide means to log a timestamp along with the counter overflow event. Signed-off-by: Peter Zijlstra Cc: Paul Mackerras Cc: Corey Ashford LKML-Reference: <20090406094518.024173282@chello.nl> Signed-off-by: Ingo Molnar --- include/linux/perf_counter.h | 2 ++ kernel/perf_counter.c | 14 ++++++++++++++ 2 files changed, 16 insertions(+) diff --git a/include/linux/perf_counter.h b/include/linux/perf_counter.h index ca2d4df29e0..928a7fae096 100644 --- a/include/linux/perf_counter.h +++ b/include/linux/perf_counter.h @@ -102,6 +102,7 @@ enum perf_counter_record_format { PERF_RECORD_TID = 1U << 1, PERF_RECORD_GROUP = 1U << 2, PERF_RECORD_CALLCHAIN = 1U << 3, + PERF_RECORD_TIME = 1U << 4, }; /* @@ -221,6 +222,7 @@ enum perf_event_type { __PERF_EVENT_TID = PERF_RECORD_TID, __PERF_EVENT_GROUP = PERF_RECORD_GROUP, __PERF_EVENT_CALLCHAIN = PERF_RECORD_CALLCHAIN, + __PERF_EVENT_TIME = PERF_RECORD_TIME, }; #ifdef __KERNEL__ diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c index c841563de04..19990d1f021 100644 --- a/kernel/perf_counter.c +++ b/kernel/perf_counter.c @@ -1826,6 +1826,7 @@ static void perf_counter_output(struct perf_counter *counter, } group_entry; struct perf_callchain_entry *callchain = NULL; int callchain_size = 0; + u64 time; header.type = PERF_EVENT_COUNTER_OVERFLOW; header.size = sizeof(header); @@ -1862,6 +1863,16 @@ static void perf_counter_output(struct perf_counter *counter, } } + if (record_type & PERF_RECORD_TIME) { + /* + * Maybe do better on x86 and provide cpu_clock_nmi() + */ + time = sched_clock(); + + header.type |= __PERF_EVENT_TIME; + header.size += sizeof(u64); + } + ret = perf_output_begin(&handle, counter, header.size, nmi); if (ret) return; @@ -1895,6 +1906,9 @@ static void perf_counter_output(struct perf_counter *counter, if (callchain) perf_output_copy(&handle, callchain, callchain_size); + if (record_type & PERF_RECORD_TIME) + perf_output_put(&handle, time); + perf_output_end(&handle); } -- cgit v1.2.3 From 79f146415623fe74f39af67c0f6adc208939a410 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Mon, 6 Apr 2009 11:45:07 +0200 Subject: perf_counter: counter overflow limit Provide means to auto-disable the counter after 'n' overflow events. Create the counter with hw_event.disabled = 1, and then issue an ioctl(fd, PREF_COUNTER_IOC_REFRESH, n); to set the limit and enable the counter. Signed-off-by: Peter Zijlstra Cc: Paul Mackerras Cc: Corey Ashford LKML-Reference: <20090406094518.083139737@chello.nl> Signed-off-by: Ingo Molnar --- include/linux/perf_counter.h | 12 ++++++++--- kernel/perf_counter.c | 51 +++++++++++++++++++++++++++++++++++--------- 2 files changed, 50 insertions(+), 13 deletions(-) diff --git a/include/linux/perf_counter.h b/include/linux/perf_counter.h index 928a7fae096..ef4dcbff75a 100644 --- a/include/linux/perf_counter.h +++ b/include/linux/perf_counter.h @@ -155,8 +155,9 @@ struct perf_counter_hw_event { /* * Ioctls that can be done on a perf counter fd: */ -#define PERF_COUNTER_IOC_ENABLE _IO('$', 0) -#define PERF_COUNTER_IOC_DISABLE _IO('$', 1) +#define PERF_COUNTER_IOC_ENABLE _IO ('$', 0) +#define PERF_COUNTER_IOC_DISABLE _IO ('$', 1) +#define PERF_COUNTER_IOC_REFRESH _IOW('$', 2, u32) /* * Structure of the page that can be mapped via mmap @@ -403,9 +404,14 @@ struct perf_counter { /* poll related */ wait_queue_head_t waitq; struct fasync_struct *fasync; - /* optional: for NMIs */ + + /* delayed work for NMIs and such */ + int pending_wakeup; + int pending_disable; struct perf_pending_entry pending; + atomic_t event_limit; + void (*destroy)(struct perf_counter *); struct rcu_head rcu_head; #endif diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c index 19990d1f021..c05e10354bc 100644 --- a/kernel/perf_counter.c +++ b/kernel/perf_counter.c @@ -744,6 +744,12 @@ static void perf_counter_enable(struct perf_counter *counter) spin_unlock_irq(&ctx->lock); } +static void perf_counter_refresh(struct perf_counter *counter, int refresh) +{ + atomic_add(refresh, &counter->event_limit); + perf_counter_enable(counter); +} + /* * Enable a counter and all its children. */ @@ -1311,6 +1317,9 @@ static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg) case PERF_COUNTER_IOC_DISABLE: perf_counter_disable_family(counter); break; + case PERF_COUNTER_IOC_REFRESH: + perf_counter_refresh(counter, arg); + break; default: err = -ENOTTY; } @@ -1590,14 +1599,6 @@ void perf_counter_wakeup(struct perf_counter *counter) kill_fasync(&counter->fasync, SIGIO, POLL_IN); } -static void perf_pending_wakeup(struct perf_pending_entry *entry) -{ - struct perf_counter *counter = container_of(entry, - struct perf_counter, pending); - - perf_counter_wakeup(counter); -} - /* * Pending wakeups * @@ -1607,6 +1608,22 @@ static void perf_pending_wakeup(struct perf_pending_entry *entry) * single linked list and use cmpxchg() to add entries lockless. */ +static void perf_pending_counter(struct perf_pending_entry *entry) +{ + struct perf_counter *counter = container_of(entry, + struct perf_counter, pending); + + if (counter->pending_disable) { + counter->pending_disable = 0; + perf_counter_disable(counter); + } + + if (counter->pending_wakeup) { + counter->pending_wakeup = 0; + perf_counter_wakeup(counter); + } +} + #define PENDING_TAIL ((struct perf_pending_entry *)-1UL) static DEFINE_PER_CPU(struct perf_pending_entry *, perf_pending_head) = { @@ -1715,8 +1732,9 @@ struct perf_output_handle { static inline void __perf_output_wakeup(struct perf_output_handle *handle) { if (handle->nmi) { + handle->counter->pending_wakeup = 1; perf_pending_queue(&handle->counter->pending, - perf_pending_wakeup); + perf_pending_counter); } else perf_counter_wakeup(handle->counter); } @@ -2063,8 +2081,21 @@ void perf_counter_munmap(unsigned long addr, unsigned long len, int perf_counter_overflow(struct perf_counter *counter, int nmi, struct pt_regs *regs) { + int events = atomic_read(&counter->event_limit); + int ret = 0; + + if (events && atomic_dec_and_test(&counter->event_limit)) { + ret = 1; + if (nmi) { + counter->pending_disable = 1; + perf_pending_queue(&counter->pending, + perf_pending_counter); + } else + perf_counter_disable(counter); + } + perf_counter_output(counter, nmi, regs); - return 0; + return ret; } /* -- cgit v1.2.3 From 0c593b3411341e3a05a61f5527df36ab02bd11e8 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Mon, 6 Apr 2009 11:45:08 +0200 Subject: perf_counter: comment the perf_event_type stuff Describe the event format. Signed-off-by: Peter Zijlstra Cc: Paul Mackerras Cc: Corey Ashford LKML-Reference: <20090406094518.211174347@chello.nl> Signed-off-by: Ingo Molnar --- include/linux/perf_counter.h | 32 ++++++++++++++++++++++++++++++++ 1 file changed, 32 insertions(+) diff --git a/include/linux/perf_counter.h b/include/linux/perf_counter.h index ef4dcbff75a..81220188d05 100644 --- a/include/linux/perf_counter.h +++ b/include/linux/perf_counter.h @@ -208,6 +208,20 @@ struct perf_event_header { enum perf_event_type { + /* + * The MMAP events record the PROT_EXEC mappings so that we can + * correlate userspace IPs to code. They have the following structure: + * + * struct { + * struct perf_event_header header; + * + * u32 pid, tid; + * u64 addr; + * u64 len; + * u64 pgoff; + * char filename[]; + * }; + */ PERF_EVENT_MMAP = 1, PERF_EVENT_MUNMAP = 2, @@ -217,6 +231,24 @@ enum perf_event_type { * * These events will have types of the form: * PERF_EVENT_COUNTER_OVERFLOW { | __PERF_EVENT_* } * + * + * struct { + * struct perf_event_header header; + * + * { u64 ip; } && __PERF_EVENT_IP + * { u32 pid, tid; } && __PERF_EVENT_TID + * + * { u64 nr; + * { u64 event, val; } cnt[nr]; } && __PERF_EVENT_GROUP + * + * { u16 nr, + * hv, + * kernel, + * user; + * u64 ips[nr]; } && __PERF_EVENT_CALLCHAIN + * + * { u64 time; } && __PERF_EVENT_TIME + * }; */ PERF_EVENT_COUNTER_OVERFLOW = 1UL << 31, __PERF_EVENT_IP = PERF_RECORD_IP, -- cgit v1.2.3 From 4c9e25428ff46b968a30f1dfafdba550cb6e4141 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Mon, 6 Apr 2009 11:45:09 +0200 Subject: perf_counter: change event definition Currently the definition of an event is slightly ambiguous. We have wakeup events, for poll() and SIGIO, which are either generated when a record crosses a page boundary (hw_events.wakeup_events == 0), or every wakeup_events new records. Now a record can be either a counter overflow record, or a number of different things, like the mmap PROT_EXEC region notifications. Then there is the PERF_COUNTER_IOC_REFRESH event limit, which only considers counter overflows. This patch changes then wakeup_events and SIGIO notification to only consider overflow events. Furthermore it changes the SIGIO notification to report SIGHUP when the event limit is reached and the counter will be disabled. Signed-off-by: Peter Zijlstra Cc: Paul Mackerras Cc: Corey Ashford LKML-Reference: <20090406094518.266679874@chello.nl> Signed-off-by: Ingo Molnar --- include/linux/perf_counter.h | 1 + kernel/perf_counter.c | 22 +++++++++++++++------- 2 files changed, 16 insertions(+), 7 deletions(-) diff --git a/include/linux/perf_counter.h b/include/linux/perf_counter.h index 81220188d05..0f5a4005048 100644 --- a/include/linux/perf_counter.h +++ b/include/linux/perf_counter.h @@ -439,6 +439,7 @@ struct perf_counter { /* delayed work for NMIs and such */ int pending_wakeup; + int pending_kill; int pending_disable; struct perf_pending_entry pending; diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c index c05e10354bc..8c8eaf0625f 100644 --- a/kernel/perf_counter.c +++ b/kernel/perf_counter.c @@ -1596,7 +1596,11 @@ void perf_counter_wakeup(struct perf_counter *counter) rcu_read_unlock(); wake_up_all(&counter->waitq); - kill_fasync(&counter->fasync, SIGIO, POLL_IN); + + if (counter->pending_kill) { + kill_fasync(&counter->fasync, SIGIO, counter->pending_kill); + counter->pending_kill = 0; + } } /* @@ -1727,6 +1731,7 @@ struct perf_output_handle { unsigned int head; int wakeup; int nmi; + int overflow; }; static inline void __perf_output_wakeup(struct perf_output_handle *handle) @@ -1741,7 +1746,7 @@ static inline void __perf_output_wakeup(struct perf_output_handle *handle) static int perf_output_begin(struct perf_output_handle *handle, struct perf_counter *counter, unsigned int size, - int nmi) + int nmi, int overflow) { struct perf_mmap_data *data; unsigned int offset, head; @@ -1751,8 +1756,9 @@ static int perf_output_begin(struct perf_output_handle *handle, if (!data) goto out; - handle->counter = counter; - handle->nmi = nmi; + handle->counter = counter; + handle->nmi = nmi; + handle->overflow = overflow; if (!data->nr_pages) goto fail; @@ -1816,7 +1822,7 @@ static void perf_output_end(struct perf_output_handle *handle) { int wakeup_events = handle->counter->hw_event.wakeup_events; - if (wakeup_events) { + if (handle->overflow && wakeup_events) { int events = atomic_inc_return(&handle->data->events); if (events >= wakeup_events) { atomic_sub(wakeup_events, &handle->data->events); @@ -1891,7 +1897,7 @@ static void perf_counter_output(struct perf_counter *counter, header.size += sizeof(u64); } - ret = perf_output_begin(&handle, counter, header.size, nmi); + ret = perf_output_begin(&handle, counter, header.size, nmi, 1); if (ret) return; @@ -1955,7 +1961,7 @@ static void perf_counter_mmap_output(struct perf_counter *counter, { struct perf_output_handle handle; int size = mmap_event->event.header.size; - int ret = perf_output_begin(&handle, counter, size, 0); + int ret = perf_output_begin(&handle, counter, size, 0, 0); if (ret) return; @@ -2084,8 +2090,10 @@ int perf_counter_overflow(struct perf_counter *counter, int events = atomic_read(&counter->event_limit); int ret = 0; + counter->pending_kill = POLL_IN; if (events && atomic_dec_and_test(&counter->event_limit)) { ret = 1; + counter->pending_kill = POLL_HUP; if (nmi) { counter->pending_disable = 1; perf_pending_queue(&counter->pending, -- cgit v1.2.3 From 4af4998b8aa35600f4c4a4f3c3a23baca6081d02 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Mon, 6 Apr 2009 11:45:10 +0200 Subject: perf_counter: rework context time Since perf_counter_context is switched along with tasks, we can maintain the context time without using the task runtime clock. Signed-off-by: Peter Zijlstra Cc: Paul Mackerras Cc: Corey Ashford LKML-Reference: <20090406094518.353552838@chello.nl> Signed-off-by: Ingo Molnar --- include/linux/perf_counter.h | 10 ++---- kernel/perf_counter.c | 78 +++++++++++++++++++------------------------- 2 files changed, 37 insertions(+), 51 deletions(-) diff --git a/include/linux/perf_counter.h b/include/linux/perf_counter.h index 0f5a4005048..7f5d353d78a 100644 --- a/include/linux/perf_counter.h +++ b/include/linux/perf_counter.h @@ -477,14 +477,10 @@ struct perf_counter_context { struct task_struct *task; /* - * time_now is the current time in nanoseconds since an arbitrary - * point in the past. For per-task counters, this is based on the - * task clock, and for per-cpu counters it is based on the cpu clock. - * time_lost is an offset from the task/cpu clock, used to make it - * appear that time only passes while the context is scheduled in. + * Context clock, runs when context enabled. */ - u64 time_now; - u64 time_lost; + u64 time; + u64 timestamp; #endif }; diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c index 8c8eaf0625f..84d85ab4e16 100644 --- a/kernel/perf_counter.c +++ b/kernel/perf_counter.c @@ -117,7 +117,7 @@ counter_sched_out(struct perf_counter *counter, return; counter->state = PERF_COUNTER_STATE_INACTIVE; - counter->tstamp_stopped = ctx->time_now; + counter->tstamp_stopped = ctx->time; counter->hw_ops->disable(counter); counter->oncpu = -1; @@ -253,27 +253,20 @@ retry: spin_unlock_irq(&ctx->lock); } -/* - * Get the current time for this context. - * If this is a task context, we use the task's task clock, - * or for a per-cpu context, we use the cpu clock. - */ -static u64 get_context_time(struct perf_counter_context *ctx, int update) +static inline u64 perf_clock(void) { - struct task_struct *curr = ctx->task; - - if (!curr) - return cpu_clock(smp_processor_id()); - - return __task_delta_exec(curr, update) + curr->se.sum_exec_runtime; + return cpu_clock(smp_processor_id()); } /* * Update the record of the current time in a context. */ -static void update_context_time(struct perf_counter_context *ctx, int update) +static void update_context_time(struct perf_counter_context *ctx) { - ctx->time_now = get_context_time(ctx, update) - ctx->time_lost; + u64 now = perf_clock(); + + ctx->time += now - ctx->timestamp; + ctx->timestamp = now; } /* @@ -284,15 +277,17 @@ static void update_counter_times(struct perf_counter *counter) struct perf_counter_context *ctx = counter->ctx; u64 run_end; - if (counter->state >= PERF_COUNTER_STATE_INACTIVE) { - counter->total_time_enabled = ctx->time_now - - counter->tstamp_enabled; - if (counter->state == PERF_COUNTER_STATE_INACTIVE) - run_end = counter->tstamp_stopped; - else - run_end = ctx->time_now; - counter->total_time_running = run_end - counter->tstamp_running; - } + if (counter->state < PERF_COUNTER_STATE_INACTIVE) + return; + + counter->total_time_enabled = ctx->time - counter->tstamp_enabled; + + if (counter->state == PERF_COUNTER_STATE_INACTIVE) + run_end = counter->tstamp_stopped; + else + run_end = ctx->time; + + counter->total_time_running = run_end - counter->tstamp_running; } /* @@ -332,7 +327,7 @@ static void __perf_counter_disable(void *info) * If it is in error state, leave it in error state. */ if (counter->state >= PERF_COUNTER_STATE_INACTIVE) { - update_context_time(ctx, 1); + update_context_time(ctx); update_counter_times(counter); if (counter == counter->group_leader) group_sched_out(counter, cpuctx, ctx); @@ -426,7 +421,7 @@ counter_sched_in(struct perf_counter *counter, return -EAGAIN; } - counter->tstamp_running += ctx->time_now - counter->tstamp_stopped; + counter->tstamp_running += ctx->time - counter->tstamp_stopped; if (!is_software_counter(counter)) cpuctx->active_oncpu++; @@ -493,9 +488,9 @@ static void add_counter_to_ctx(struct perf_counter *counter, list_add_counter(counter, ctx); ctx->nr_counters++; counter->prev_state = PERF_COUNTER_STATE_OFF; - counter->tstamp_enabled = ctx->time_now; - counter->tstamp_running = ctx->time_now; - counter->tstamp_stopped = ctx->time_now; + counter->tstamp_enabled = ctx->time; + counter->tstamp_running = ctx->time; + counter->tstamp_stopped = ctx->time; } /* @@ -522,7 +517,7 @@ static void __perf_install_in_context(void *info) curr_rq_lock_irq_save(&flags); spin_lock(&ctx->lock); - update_context_time(ctx, 1); + update_context_time(ctx); /* * Protect the list operation against NMI by disabling the @@ -648,13 +643,13 @@ static void __perf_counter_enable(void *info) curr_rq_lock_irq_save(&flags); spin_lock(&ctx->lock); - update_context_time(ctx, 1); + update_context_time(ctx); counter->prev_state = counter->state; if (counter->state >= PERF_COUNTER_STATE_INACTIVE) goto unlock; counter->state = PERF_COUNTER_STATE_INACTIVE; - counter->tstamp_enabled = ctx->time_now - counter->total_time_enabled; + counter->tstamp_enabled = ctx->time - counter->total_time_enabled; /* * If the counter is in a group and isn't the group leader, @@ -737,8 +732,8 @@ static void perf_counter_enable(struct perf_counter *counter) */ if (counter->state == PERF_COUNTER_STATE_OFF) { counter->state = PERF_COUNTER_STATE_INACTIVE; - counter->tstamp_enabled = ctx->time_now - - counter->total_time_enabled; + counter->tstamp_enabled = + ctx->time - counter->total_time_enabled; } out: spin_unlock_irq(&ctx->lock); @@ -778,7 +773,7 @@ void __perf_counter_sched_out(struct perf_counter_context *ctx, ctx->is_active = 0; if (likely(!ctx->nr_counters)) goto out; - update_context_time(ctx, 0); + update_context_time(ctx); flags = hw_perf_save_disable(); if (ctx->nr_active) { @@ -883,12 +878,7 @@ __perf_counter_sched_in(struct perf_counter_context *ctx, if (likely(!ctx->nr_counters)) goto out; - /* - * Add any time since the last sched_out to the lost time - * so it doesn't get included in the total_time_enabled and - * total_time_running measures for counters in the context. - */ - ctx->time_lost = get_context_time(ctx, 0) - ctx->time_now; + ctx->timestamp = perf_clock(); flags = hw_perf_save_disable(); @@ -1043,8 +1033,8 @@ int perf_counter_task_enable(void) if (counter->state > PERF_COUNTER_STATE_OFF) continue; counter->state = PERF_COUNTER_STATE_INACTIVE; - counter->tstamp_enabled = ctx->time_now - - counter->total_time_enabled; + counter->tstamp_enabled = + ctx->time - counter->total_time_enabled; counter->hw_event.disabled = 0; } hw_perf_restore(perf_flags); @@ -1113,7 +1103,7 @@ static void __read(void *info) curr_rq_lock_irq_save(&flags); if (ctx->is_active) - update_context_time(ctx, 1); + update_context_time(ctx); counter->hw_ops->read(counter); update_counter_times(counter); curr_rq_unlock_irq_restore(&flags); -- cgit v1.2.3 From a39d6f2556c4a19f58f538c6aa28bf8faca4fcb8 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Mon, 6 Apr 2009 11:45:11 +0200 Subject: perf_counter: rework the task clock software counter Rework the task clock software counter to use the context time instead of the task runtime clock, this removes the last such user. Signed-off-by: Peter Zijlstra Cc: Paul Mackerras Cc: Corey Ashford LKML-Reference: <20090406094518.445450972@chello.nl> Signed-off-by: Ingo Molnar --- kernel/perf_counter.c | 42 ++++++++++++------------------------------ 1 file changed, 12 insertions(+), 30 deletions(-) diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c index 84d85ab4e16..56b7eb53d67 100644 --- a/kernel/perf_counter.c +++ b/kernel/perf_counter.c @@ -974,9 +974,6 @@ int perf_counter_task_disable(void) curr_rq_lock_irq_save(&flags); cpu = smp_processor_id(); - /* force the update of the task clock: */ - __task_delta_exec(curr, 1); - perf_counter_task_sched_out(curr, cpu); spin_lock(&ctx->lock); @@ -1017,9 +1014,6 @@ int perf_counter_task_enable(void) curr_rq_lock_irq_save(&flags); cpu = smp_processor_id(); - /* force the update of the task clock: */ - __task_delta_exec(curr, 1); - perf_counter_task_sched_out(curr, cpu); spin_lock(&ctx->lock); @@ -2347,38 +2341,28 @@ static const struct hw_perf_counter_ops perf_ops_cpu_clock = { * Software counter: task time clock */ -/* - * Called from within the scheduler: - */ -static u64 task_clock_perf_counter_val(struct perf_counter *counter, int update) -{ - struct task_struct *curr = counter->task; - u64 delta; - - delta = __task_delta_exec(curr, update); - - return curr->se.sum_exec_runtime + delta; -} - -static void task_clock_perf_counter_update(struct perf_counter *counter, u64 now) +static void task_clock_perf_counter_update(struct perf_counter *counter) { - u64 prev; + u64 prev, now; s64 delta; - prev = atomic64_read(&counter->hw.prev_count); - - atomic64_set(&counter->hw.prev_count, now); + update_context_time(counter->ctx); + now = counter->ctx->time; + prev = atomic64_xchg(&counter->hw.prev_count, now); delta = now - prev; - atomic64_add(delta, &counter->count); } static int task_clock_perf_counter_enable(struct perf_counter *counter) { struct hw_perf_counter *hwc = &counter->hw; + u64 now; + + update_context_time(counter->ctx); + now = counter->ctx->time; - atomic64_set(&hwc->prev_count, task_clock_perf_counter_val(counter, 0)); + atomic64_set(&hwc->prev_count, now); hrtimer_init(&hwc->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); hwc->hrtimer.function = perf_swcounter_hrtimer; if (hwc->irq_period) { @@ -2393,14 +2377,12 @@ static int task_clock_perf_counter_enable(struct perf_counter *counter) static void task_clock_perf_counter_disable(struct perf_counter *counter) { hrtimer_cancel(&counter->hw.hrtimer); - task_clock_perf_counter_update(counter, - task_clock_perf_counter_val(counter, 0)); + task_clock_perf_counter_update(counter); } static void task_clock_perf_counter_read(struct perf_counter *counter) { - task_clock_perf_counter_update(counter, - task_clock_perf_counter_val(counter, 1)); + task_clock_perf_counter_update(counter); } static const struct hw_perf_counter_ops perf_ops_task_clock = { -- cgit v1.2.3 From 849691a6cd40270ff5f4a8846d5f6bf8df663ffc Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Mon, 6 Apr 2009 11:45:12 +0200 Subject: perf_counter: remove rq->lock usage Now that all the task runtime clock users are gone, remove the ugly rq->lock usage from perf counters, which solves the nasty deadlock seen when a software task clock counter was read from an NMI overflow context. Signed-off-by: Peter Zijlstra Cc: Paul Mackerras Cc: Corey Ashford LKML-Reference: <20090406094518.531137582@chello.nl> Signed-off-by: Ingo Molnar --- include/linux/kernel_stat.h | 2 -- kernel/perf_counter.c | 42 ++++++++++++++++-------------------------- kernel/sched.c | 20 -------------------- 3 files changed, 16 insertions(+), 48 deletions(-) diff --git a/include/linux/kernel_stat.h b/include/linux/kernel_stat.h index b6d2887a5d8..080d1fd461d 100644 --- a/include/linux/kernel_stat.h +++ b/include/linux/kernel_stat.h @@ -85,8 +85,6 @@ static inline unsigned int kstat_irqs(unsigned int irq) /* * Lock/unlock the current runqueue - to extract task statistics: */ -extern void curr_rq_lock_irq_save(unsigned long *flags); -extern void curr_rq_unlock_irq_restore(unsigned long *flags); extern unsigned long long __task_delta_exec(struct task_struct *tsk, int update); extern unsigned long long task_delta_exec(struct task_struct *); diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c index 56b7eb53d67..f4f7596f784 100644 --- a/kernel/perf_counter.c +++ b/kernel/perf_counter.c @@ -172,8 +172,7 @@ static void __perf_counter_remove_from_context(void *info) if (ctx->task && cpuctx->task_ctx != ctx) return; - curr_rq_lock_irq_save(&flags); - spin_lock(&ctx->lock); + spin_lock_irqsave(&ctx->lock, flags); counter_sched_out(counter, cpuctx, ctx); @@ -198,8 +197,7 @@ static void __perf_counter_remove_from_context(void *info) perf_max_counters - perf_reserved_percpu); } - spin_unlock(&ctx->lock); - curr_rq_unlock_irq_restore(&flags); + spin_unlock_irqrestore(&ctx->lock, flags); } @@ -319,8 +317,7 @@ static void __perf_counter_disable(void *info) if (ctx->task && cpuctx->task_ctx != ctx) return; - curr_rq_lock_irq_save(&flags); - spin_lock(&ctx->lock); + spin_lock_irqsave(&ctx->lock, flags); /* * If the counter is on, turn it off. @@ -336,8 +333,7 @@ static void __perf_counter_disable(void *info) counter->state = PERF_COUNTER_STATE_OFF; } - spin_unlock(&ctx->lock); - curr_rq_unlock_irq_restore(&flags); + spin_unlock_irqrestore(&ctx->lock, flags); } /* @@ -515,8 +511,7 @@ static void __perf_install_in_context(void *info) if (ctx->task && cpuctx->task_ctx != ctx) return; - curr_rq_lock_irq_save(&flags); - spin_lock(&ctx->lock); + spin_lock_irqsave(&ctx->lock, flags); update_context_time(ctx); /* @@ -565,8 +560,7 @@ static void __perf_install_in_context(void *info) unlock: hw_perf_restore(perf_flags); - spin_unlock(&ctx->lock); - curr_rq_unlock_irq_restore(&flags); + spin_unlock_irqrestore(&ctx->lock, flags); } /* @@ -641,8 +635,7 @@ static void __perf_counter_enable(void *info) if (ctx->task && cpuctx->task_ctx != ctx) return; - curr_rq_lock_irq_save(&flags); - spin_lock(&ctx->lock); + spin_lock_irqsave(&ctx->lock, flags); update_context_time(ctx); counter->prev_state = counter->state; @@ -678,8 +671,7 @@ static void __perf_counter_enable(void *info) } unlock: - spin_unlock(&ctx->lock); - curr_rq_unlock_irq_restore(&flags); + spin_unlock_irqrestore(&ctx->lock, flags); } /* @@ -971,7 +963,7 @@ int perf_counter_task_disable(void) if (likely(!ctx->nr_counters)) return 0; - curr_rq_lock_irq_save(&flags); + local_irq_save(flags); cpu = smp_processor_id(); perf_counter_task_sched_out(curr, cpu); @@ -992,9 +984,7 @@ int perf_counter_task_disable(void) hw_perf_restore(perf_flags); - spin_unlock(&ctx->lock); - - curr_rq_unlock_irq_restore(&flags); + spin_unlock_irqrestore(&ctx->lock, flags); return 0; } @@ -1011,7 +1001,7 @@ int perf_counter_task_enable(void) if (likely(!ctx->nr_counters)) return 0; - curr_rq_lock_irq_save(&flags); + local_irq_save(flags); cpu = smp_processor_id(); perf_counter_task_sched_out(curr, cpu); @@ -1037,7 +1027,7 @@ int perf_counter_task_enable(void) perf_counter_task_sched_in(curr, cpu); - curr_rq_unlock_irq_restore(&flags); + local_irq_restore(flags); return 0; } @@ -1095,12 +1085,12 @@ static void __read(void *info) struct perf_counter_context *ctx = counter->ctx; unsigned long flags; - curr_rq_lock_irq_save(&flags); + local_irq_save(flags); if (ctx->is_active) update_context_time(ctx); counter->hw_ops->read(counter); update_counter_times(counter); - curr_rq_unlock_irq_restore(&flags); + local_irq_restore(flags); } static u64 perf_counter_read(struct perf_counter *counter) @@ -2890,7 +2880,7 @@ __perf_counter_exit_task(struct task_struct *child, * Be careful about zapping the list - IRQ/NMI context * could still be processing it: */ - curr_rq_lock_irq_save(&flags); + local_irq_save(flags); perf_flags = hw_perf_save_disable(); cpuctx = &__get_cpu_var(perf_cpu_context); @@ -2903,7 +2893,7 @@ __perf_counter_exit_task(struct task_struct *child, child_ctx->nr_counters--; hw_perf_restore(perf_flags); - curr_rq_unlock_irq_restore(&flags); + local_irq_restore(flags); } parent_counter = child_counter->parent; diff --git a/kernel/sched.c b/kernel/sched.c index f76e3c0188a..0de2f814fb1 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -997,26 +997,6 @@ static struct rq *task_rq_lock(struct task_struct *p, unsigned long *flags) } } -void curr_rq_lock_irq_save(unsigned long *flags) - __acquires(rq->lock) -{ - struct rq *rq; - - local_irq_save(*flags); - rq = cpu_rq(smp_processor_id()); - spin_lock(&rq->lock); -} - -void curr_rq_unlock_irq_restore(unsigned long *flags) - __releases(rq->lock) -{ - struct rq *rq; - - rq = cpu_rq(smp_processor_id()); - spin_unlock(&rq->lock); - local_irq_restore(*flags); -} - void task_rq_unlock_wait(struct task_struct *p) { struct rq *rq = task_rq(p); -- cgit v1.2.3 From bce379bf358738ab8efc8cda2594a299ac685085 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Mon, 6 Apr 2009 11:45:13 +0200 Subject: perf_counter: minimize context time updates Push the update_context_time() calls up the stack so that we get less invokations and thereby a less noisy output: before: # ./perfstat -e 1:0 -e 1:1 -e 1:1 -e 1:1 -l ls > /dev/null Performance counter stats for 'ls': 10.163691 cpu clock ticks (msecs) (scaled from 98.94%) 10.215360 task clock ticks (msecs) (scaled from 98.18%) 10.185549 task clock ticks (msecs) (scaled from 98.53%) 10.183581 task clock ticks (msecs) (scaled from 98.71%) Wall-clock time elapsed: 11.912858 msecs after: # ./perfstat -e 1:0 -e 1:1 -e 1:1 -e 1:1 -l ls > /dev/null Performance counter stats for 'ls': 9.316630 cpu clock ticks (msecs) 9.280789 task clock ticks (msecs) 9.280789 task clock ticks (msecs) 9.280789 task clock ticks (msecs) Wall-clock time elapsed: 9.574872 msecs Signed-off-by: Peter Zijlstra Cc: Paul Mackerras Cc: Corey Ashford LKML-Reference: <20090406094518.618876874@chello.nl> Signed-off-by: Ingo Molnar --- kernel/perf_counter.c | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c index f4f7596f784..863703b3158 100644 --- a/kernel/perf_counter.c +++ b/kernel/perf_counter.c @@ -319,6 +319,8 @@ static void __perf_counter_disable(void *info) spin_lock_irqsave(&ctx->lock, flags); + update_context_time(ctx); + /* * If the counter is on, turn it off. * If it is in error state, leave it in error state. @@ -797,6 +799,8 @@ void perf_counter_task_sched_out(struct task_struct *task, int cpu) if (likely(!cpuctx->task_ctx)) return; + update_context_time(ctx); + regs = task_pt_regs(task); perf_swcounter_event(PERF_COUNT_CONTEXT_SWITCHES, 1, 1, regs); __perf_counter_sched_out(ctx, cpuctx); @@ -2336,7 +2340,6 @@ static void task_clock_perf_counter_update(struct perf_counter *counter) u64 prev, now; s64 delta; - update_context_time(counter->ctx); now = counter->ctx->time; prev = atomic64_xchg(&counter->hw.prev_count, now); @@ -2349,7 +2352,6 @@ static int task_clock_perf_counter_enable(struct perf_counter *counter) struct hw_perf_counter *hwc = &counter->hw; u64 now; - update_context_time(counter->ctx); now = counter->ctx->time; atomic64_set(&hwc->prev_count, now); @@ -2372,6 +2374,7 @@ static void task_clock_perf_counter_disable(struct perf_counter *counter) static void task_clock_perf_counter_read(struct perf_counter *counter) { + update_context_time(counter->ctx); task_clock_perf_counter_update(counter); } -- cgit v1.2.3 From 6278af660ff83fbafb18e53fc2747eb2ee6780fa Mon Sep 17 00:00:00 2001 From: Mike Galbraith Date: Thu, 2 Apr 2009 10:40:28 +0200 Subject: perf_counter tools: kerneltop: display per function percentage along with event count ------------------------------------------------------------------------------ KernelTop: 90551 irqs/sec kernel:15.0% [NMI, 100000 CPU cycles], (all, 4 CPUs) ------------------------------------------------------------------------------ events pcnt RIP kernel function ______ ______ _____ ________________ _______________ 16871.00 - 19.1% - ffffffff80328e20 : clear_page_c 8810.00 - 9.9% - ffffffff8048ce80 : page_fault 4746.00 - 5.4% - ffffffff8048cae2 : _spin_lock 4428.00 - 5.0% - ffffffff80328e70 : copy_page_c 3340.00 - 3.8% - ffffffff80329090 : copy_user_generic_string! 2679.00 - 3.0% - ffffffff8028a16b : get_page_from_freelist 2254.00 - 2.5% - ffffffff80296f19 : unmap_vmas 2082.00 - 2.4% - ffffffff80297e19 : handle_mm_fault 1754.00 - 2.0% - ffffffff80288dc8 : __rmqueue_smallest 1553.00 - 1.8% - ffffffff8048ca58 : _spin_lock_irqsave 1400.00 - 1.6% - ffffffff8028cdc8 : release_pages 1337.00 - 1.5% - ffffffff80285400 : find_get_page 1335.00 - 1.5% - ffffffff80225a23 : do_page_fault 1299.00 - 1.5% - ffffffff802ba8e7 : __d_lookup 1174.00 - 1.3% - ffffffff802b38f3 : __link_path_walk 1155.00 - 1.3% - ffffffff802843e1 : perf_swcounter_ctx_event! 1137.00 - 1.3% - ffffffff8028d118 : ____pagevec_lru_add 963.00 - 1.1% - ffffffff802a670b : kmem_cache_alloc 885.00 - 1.0% - ffffffff8024bc61 : __wake_up_bit Display per function percentage along with event count. Signed-off-by: Mike Galbraith Cc: Peter Zijlstra Cc: Paul Mackerras Signed-off-by: Ingo Molnar --- Documentation/perf_counter/kerneltop.c | 45 +++++++++++++++++----------------- 1 file changed, 23 insertions(+), 22 deletions(-) diff --git a/Documentation/perf_counter/kerneltop.c b/Documentation/perf_counter/kerneltop.c index 4f8d7917aba..15f3a5f9019 100644 --- a/Documentation/perf_counter/kerneltop.c +++ b/Documentation/perf_counter/kerneltop.c @@ -636,16 +636,20 @@ static void print_sym_table(void) int counter; float events_per_sec = events/delay_secs; float kevents_per_sec = (events-userspace_events)/delay_secs; + float sum_kevents = 0.0; events = userspace_events = 0; memcpy(tmp, sym_table, sizeof(sym_table[0])*sym_table_count); qsort(tmp, sym_table_count, sizeof(tmp[0]), compare); + for (i = 0; i < sym_table_count && tmp[i].count[0]; i++) + sum_kevents += tmp[i].count[0]; + write(1, CONSOLE_CLEAR, strlen(CONSOLE_CLEAR)); printf( "------------------------------------------------------------------------------\n"); - printf( " KernelTop:%8.0f irqs/sec kernel:%3.1f%% [%s, ", + printf( " KernelTop:%8.0f irqs/sec kernel:%4.1f%% [%s, ", events_per_sec, 100.0 - (100.0*((events_per_sec-kevents_per_sec)/events_per_sec)), nmi ? "NMI" : "IRQ"); @@ -679,34 +683,31 @@ static void print_sym_table(void) printf("------------------------------------------------------------------------------\n\n"); if (nr_counters == 1) - printf(" events"); + printf(" events pcnt"); else - printf(" weight events"); + printf(" weight events pcnt"); printf(" RIP kernel function\n" - " ______ ______ ________________ _______________\n\n" + " ______ ______ _____ ________________ _______________\n\n" ); - printed = 0; - for (i = 0; i < sym_table_count; i++) { + for (i = 0, printed = 0; i < sym_table_count; i++) { + float pcnt; int count; - if (nr_counters == 1) { - if (printed <= 18 && - tmp[i].count[0] >= count_filter) { - printf("%19.2f - %016llx : %s\n", - sym_weight(tmp + i), tmp[i].addr, tmp[i].sym); - printed++; - } - } else { - if (printed <= 18 && - tmp[i].count[0] >= count_filter) { - printf("%8.1f %10ld - %016llx : %s\n", - sym_weight(tmp + i), - tmp[i].count[0], - tmp[i].addr, tmp[i].sym); - printed++; - } + if (printed <= 18 && tmp[i].count[0] >= count_filter) { + pcnt = 100.0 - (100.0*((sum_kevents-tmp[i].count[0])/sum_kevents)); + + if (nr_counters == 1) + printf("%19.2f - %4.1f%% - %016llx : %s\n", + sym_weight(tmp + i), + pcnt, tmp[i].addr, tmp[i].sym); + else + printf("%8.1f %10ld - %4.1f%% - %016llx : %s\n", + sym_weight(tmp + i), + tmp[i].count[0], + pcnt, tmp[i].addr, tmp[i].sym); + printed++; } /* * Add decay to the counts: -- cgit v1.2.3 From 98c2aaf8be5baf7193be37fb28bce8e7327158bc Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Tue, 7 Apr 2009 11:30:17 +0200 Subject: x86, perfcounters: add atomic64_xchg() Complete atomic64_t support on the 32-bit side by adding atomic64_xch(). Cc: Peter Zijlstra LKML-Reference: <20090406094518.445450972@chello.nl> Signed-off-by: Ingo Molnar --- arch/x86/include/asm/atomic_32.h | 24 +++++++++++++++++++++--- 1 file changed, 21 insertions(+), 3 deletions(-) diff --git a/arch/x86/include/asm/atomic_32.h b/arch/x86/include/asm/atomic_32.h index 977250ed8b8..aff9f1fcdcd 100644 --- a/arch/x86/include/asm/atomic_32.h +++ b/arch/x86/include/asm/atomic_32.h @@ -291,19 +291,37 @@ atomic64_cmpxchg(atomic64_t *ptr, unsigned long long old_val, } /** - * atomic64_set - set atomic64 variable + * atomic64_xchg - xchg atomic64 variable * @ptr: pointer to type atomic64_t * @new_val: value to assign + * @old_val: old value that was there * - * Atomically sets the value of @ptr to @new_val. + * Atomically xchgs the value of @ptr to @new_val and returns + * the old value. */ -static inline void atomic64_set(atomic64_t *ptr, unsigned long long new_val) + +static inline unsigned long long +atomic64_xchg(atomic64_t *ptr, unsigned long long new_val) { unsigned long long old_val; do { old_val = atomic_read(ptr); } while (atomic64_cmpxchg(ptr, old_val, new_val) != old_val); + + return old_val; +} + +/** + * atomic64_set - set atomic64 variable + * @ptr: pointer to type atomic64_t + * @new_val: value to assign + * + * Atomically sets the value of @ptr to @new_val. + */ +static inline void atomic64_set(atomic64_t *ptr, unsigned long long new_val) +{ + atomic64_xchg(ptr, new_val); } /** -- cgit v1.2.3 From dc66270b51a62b1a6888d5309229e638a305c47b Mon Sep 17 00:00:00 2001 From: Paul Mackerras Date: Wed, 8 Apr 2009 20:30:10 +1000 Subject: perf_counter: fix powerpc build Commit 4af4998b ("perf_counter: rework context time") changed struct perf_counter_context to have a 'time' field instead of a 'time_now' field, but neglected to fix the place in the powerpc perf_counter.c where the time_now field was accessed. This fixes it. Signed-off-by: Paul Mackerras Cc: Peter Zijlstra LKML-Reference: <18908.31922.411398.147810@cargo.ozlabs.ibm.com> Signed-off-by: Ingo Molnar --- arch/powerpc/kernel/perf_counter.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/arch/powerpc/kernel/perf_counter.c b/arch/powerpc/kernel/perf_counter.c index f88c35d0710..0e5651385dd 100644 --- a/arch/powerpc/kernel/perf_counter.c +++ b/arch/powerpc/kernel/perf_counter.c @@ -457,8 +457,7 @@ static void counter_sched_in(struct perf_counter *counter, int cpu) { counter->state = PERF_COUNTER_STATE_ACTIVE; counter->oncpu = cpu; - counter->tstamp_running += counter->ctx->time_now - - counter->tstamp_stopped; + counter->tstamp_running += counter->ctx->time - counter->tstamp_stopped; if (is_software_counter(counter)) counter->hw_ops->enable(counter); } -- cgit v1.2.3 From f708223d49ac39f5af1643985056206c98033f5b Mon Sep 17 00:00:00 2001 From: Paul Mackerras Date: Wed, 8 Apr 2009 20:30:18 +1000 Subject: perf_counter: powerpc: set sample enable bit for marked instruction events Impact: enable access to hardware feature POWER processors have the ability to "mark" a subset of the instructions and provide more detailed information on what happens to the marked instructions as they flow through the pipeline. This marking is enabled by the "sample enable" bit in MMCRA, and there are synchronization requirements around setting and clearing the bit. This adds logic to the processor-specific back-ends so that they know which events relate to marked instructions and set the sampling enable bit if any event that we want to put on the PMU is a marked instruction event. It also adds logic to the generic powerpc code to do the necessary synchronization if that bit is set. Signed-off-by: Paul Mackerras Cc: Peter Zijlstra LKML-Reference: <18908.31930.1024.228867@cargo.ozlabs.ibm.com> Signed-off-by: Ingo Molnar --- arch/powerpc/kernel/perf_counter.c | 28 +++++++-- arch/powerpc/kernel/power5+-pmu.c | 103 +++++++++++++++++++++++++++++- arch/powerpc/kernel/power5-pmu.c | 96 +++++++++++++++++++++++++++- arch/powerpc/kernel/power6-pmu.c | 126 ++++++++++++++++++++++++++++++++++++- arch/powerpc/kernel/ppc970-pmu.c | 72 ++++++++++++++++++++- 5 files changed, 413 insertions(+), 12 deletions(-) diff --git a/arch/powerpc/kernel/perf_counter.c b/arch/powerpc/kernel/perf_counter.c index 0e5651385dd..0697ade84dd 100644 --- a/arch/powerpc/kernel/perf_counter.c +++ b/arch/powerpc/kernel/perf_counter.c @@ -306,6 +306,15 @@ u64 hw_perf_save_disable(void) cpuhw->pmcs_enabled = 1; } + /* + * Disable instruction sampling if it was enabled + */ + if (cpuhw->mmcr[2] & MMCRA_SAMPLE_ENABLE) { + mtspr(SPRN_MMCRA, + cpuhw->mmcr[2] & ~MMCRA_SAMPLE_ENABLE); + mb(); + } + /* * Set the 'freeze counters' bit. * The barrier is to make sure the mtspr has been @@ -347,12 +356,11 @@ void hw_perf_restore(u64 disable) * (possibly updated for removal of counters). */ if (!cpuhw->n_added) { - mtspr(SPRN_MMCRA, cpuhw->mmcr[2]); + mtspr(SPRN_MMCRA, cpuhw->mmcr[2] & ~MMCRA_SAMPLE_ENABLE); mtspr(SPRN_MMCR1, cpuhw->mmcr[1]); - mtspr(SPRN_MMCR0, cpuhw->mmcr[0]); if (cpuhw->n_counters == 0) get_lppaca()->pmcregs_in_use = 0; - goto out; + goto out_enable; } /* @@ -385,7 +393,7 @@ void hw_perf_restore(u64 disable) * Then unfreeze the counters. */ get_lppaca()->pmcregs_in_use = 1; - mtspr(SPRN_MMCRA, cpuhw->mmcr[2]); + mtspr(SPRN_MMCRA, cpuhw->mmcr[2] & ~MMCRA_SAMPLE_ENABLE); mtspr(SPRN_MMCR1, cpuhw->mmcr[1]); mtspr(SPRN_MMCR0, (cpuhw->mmcr[0] & ~(MMCR0_PMC1CE | MMCR0_PMCjCE)) | MMCR0_FC); @@ -421,10 +429,20 @@ void hw_perf_restore(u64 disable) write_pmc(counter->hw.idx, val); perf_counter_update_userpage(counter); } - mb(); cpuhw->mmcr[0] |= MMCR0_PMXE | MMCR0_FCECE; + + out_enable: + mb(); mtspr(SPRN_MMCR0, cpuhw->mmcr[0]); + /* + * Enable instruction sampling if necessary + */ + if (cpuhw->mmcr[2] & MMCRA_SAMPLE_ENABLE) { + mb(); + mtspr(SPRN_MMCRA, cpuhw->mmcr[2]); + } + out: local_irq_restore(flags); } diff --git a/arch/powerpc/kernel/power5+-pmu.c b/arch/powerpc/kernel/power5+-pmu.c index cec21ea65b0..1222c8ea3c2 100644 --- a/arch/powerpc/kernel/power5+-pmu.c +++ b/arch/powerpc/kernel/power5+-pmu.c @@ -1,5 +1,5 @@ /* - * Performance counter support for POWER5 (not POWER5++) processors. + * Performance counter support for POWER5+/++ (not POWER5) processors. * * Copyright 2009 Paul Mackerras, IBM Corporation. * @@ -281,10 +281,107 @@ static int power5p_get_alternatives(unsigned int event, unsigned int alt[]) return nalt; } +/* + * Map of which direct events on which PMCs are marked instruction events. + * Indexed by PMCSEL value, bit i (LE) set if PMC i is a marked event. + * Bit 0 is set if it is marked for all PMCs. + * The 0x80 bit indicates a byte decode PMCSEL value. + */ +static unsigned char direct_event_is_marked[0x28] = { + 0, /* 00 */ + 0x1f, /* 01 PM_IOPS_CMPL */ + 0x2, /* 02 PM_MRK_GRP_DISP */ + 0xe, /* 03 PM_MRK_ST_CMPL, PM_MRK_ST_GPS, PM_MRK_ST_CMPL_INT */ + 0, /* 04 */ + 0x1c, /* 05 PM_MRK_BRU_FIN, PM_MRK_INST_FIN, PM_MRK_CRU_FIN */ + 0x80, /* 06 */ + 0x80, /* 07 */ + 0, 0, 0,/* 08 - 0a */ + 0x18, /* 0b PM_THRESH_TIMEO, PM_MRK_GRP_TIMEO */ + 0, /* 0c */ + 0x80, /* 0d */ + 0x80, /* 0e */ + 0, /* 0f */ + 0, /* 10 */ + 0x14, /* 11 PM_MRK_GRP_BR_REDIR, PM_MRK_GRP_IC_MISS */ + 0, /* 12 */ + 0x10, /* 13 PM_MRK_GRP_CMPL */ + 0x1f, /* 14 PM_GRP_MRK, PM_MRK_{FXU,FPU,LSU}_FIN */ + 0x2, /* 15 PM_MRK_GRP_ISSUED */ + 0x80, /* 16 */ + 0x80, /* 17 */ + 0, 0, 0, 0, 0, + 0x80, /* 1d */ + 0x80, /* 1e */ + 0, /* 1f */ + 0x80, /* 20 */ + 0x80, /* 21 */ + 0x80, /* 22 */ + 0x80, /* 23 */ + 0x80, /* 24 */ + 0x80, /* 25 */ + 0x80, /* 26 */ + 0x80, /* 27 */ +}; + +/* + * Returns 1 if event counts things relating to marked instructions + * and thus needs the MMCRA_SAMPLE_ENABLE bit set, or 0 if not. + */ +static int power5p_marked_instr_event(unsigned int event) +{ + int pmc, psel; + int bit, byte, unit; + u32 mask; + + pmc = (event >> PM_PMC_SH) & PM_PMC_MSK; + psel = event & PM_PMCSEL_MSK; + if (pmc >= 5) + return 0; + + bit = -1; + if (psel < sizeof(direct_event_is_marked)) { + if (direct_event_is_marked[psel] & (1 << pmc)) + return 1; + if (direct_event_is_marked[psel] & 0x80) + bit = 4; + else if (psel == 0x08) + bit = pmc - 1; + else if (psel == 0x10) + bit = 4 - pmc; + else if (psel == 0x1b && (pmc == 1 || pmc == 3)) + bit = 4; + } else if ((psel & 0x48) == 0x40) { + bit = psel & 7; + } else if (psel == 0x28) { + bit = pmc - 1; + } else if (pmc == 3 && (psel == 0x2e || psel == 0x2f)) { + bit = 4; + } + + if (!(event & PM_BUSEVENT_MSK) || bit == -1) + return 0; + + byte = (event >> PM_BYTE_SH) & PM_BYTE_MSK; + unit = (event >> PM_UNIT_SH) & PM_UNIT_MSK; + if (unit == PM_LSU0) { + /* byte 1 bits 0-7, byte 2 bits 0,2-4,6 */ + mask = 0x5dff00; + } else if (unit == PM_LSU1 && byte >= 4) { + byte -= 4; + /* byte 5 bits 6-7, byte 6 bits 0,4, byte 7 bits 0-4,6 */ + mask = 0x5f11c000; + } else + return 0; + + return (mask >> (byte * 8 + bit)) & 1; +} + static int power5p_compute_mmcr(unsigned int event[], int n_ev, unsigned int hwc[], u64 mmcr[]) { u64 mmcr1 = 0; + u64 mmcra = 0; unsigned int pmc, unit, byte, psel; unsigned int ttm; int i, isbus, bit, grsel; @@ -404,6 +501,8 @@ static int power5p_compute_mmcr(unsigned int event[], int n_ev, grsel = (event[i] >> PM_GRS_SH) & PM_GRS_MSK; mmcr1 |= (u64)grsel << grsel_shift[bit]; } + if (power5p_marked_instr_event(event[i])) + mmcra |= MMCRA_SAMPLE_ENABLE; if ((psel & 0x58) == 0x40 && (byte & 1) != ((pmc >> 1) & 1)) /* select alternate byte lane */ psel |= 0x10; @@ -419,7 +518,7 @@ static int power5p_compute_mmcr(unsigned int event[], int n_ev, if (pmc_inuse & 0x3e) mmcr[0] |= MMCR0_PMCjCE; mmcr[1] = mmcr1; - mmcr[2] = 0; + mmcr[2] = mmcra; return 0; } diff --git a/arch/powerpc/kernel/power5-pmu.c b/arch/powerpc/kernel/power5-pmu.c index 379ed1087cc..116c4bb1809 100644 --- a/arch/powerpc/kernel/power5-pmu.c +++ b/arch/powerpc/kernel/power5-pmu.c @@ -290,10 +290,102 @@ static int power5_get_alternatives(unsigned int event, unsigned int alt[]) return nalt; } +/* + * Map of which direct events on which PMCs are marked instruction events. + * Indexed by PMCSEL value, bit i (LE) set if PMC i is a marked event. + * Bit 0 is set if it is marked for all PMCs. + * The 0x80 bit indicates a byte decode PMCSEL value. + */ +static unsigned char direct_event_is_marked[0x28] = { + 0, /* 00 */ + 0x1f, /* 01 PM_IOPS_CMPL */ + 0x2, /* 02 PM_MRK_GRP_DISP */ + 0xe, /* 03 PM_MRK_ST_CMPL, PM_MRK_ST_GPS, PM_MRK_ST_CMPL_INT */ + 0, /* 04 */ + 0x1c, /* 05 PM_MRK_BRU_FIN, PM_MRK_INST_FIN, PM_MRK_CRU_FIN */ + 0x80, /* 06 */ + 0x80, /* 07 */ + 0, 0, 0,/* 08 - 0a */ + 0x18, /* 0b PM_THRESH_TIMEO, PM_MRK_GRP_TIMEO */ + 0, /* 0c */ + 0x80, /* 0d */ + 0x80, /* 0e */ + 0, /* 0f */ + 0, /* 10 */ + 0x14, /* 11 PM_MRK_GRP_BR_REDIR, PM_MRK_GRP_IC_MISS */ + 0, /* 12 */ + 0x10, /* 13 PM_MRK_GRP_CMPL */ + 0x1f, /* 14 PM_GRP_MRK, PM_MRK_{FXU,FPU,LSU}_FIN */ + 0x2, /* 15 PM_MRK_GRP_ISSUED */ + 0x80, /* 16 */ + 0x80, /* 17 */ + 0, 0, 0, 0, 0, + 0x80, /* 1d */ + 0x80, /* 1e */ + 0, /* 1f */ + 0x80, /* 20 */ + 0x80, /* 21 */ + 0x80, /* 22 */ + 0x80, /* 23 */ + 0x80, /* 24 */ + 0x80, /* 25 */ + 0x80, /* 26 */ + 0x80, /* 27 */ +}; + +/* + * Returns 1 if event counts things relating to marked instructions + * and thus needs the MMCRA_SAMPLE_ENABLE bit set, or 0 if not. + */ +static int power5_marked_instr_event(unsigned int event) +{ + int pmc, psel; + int bit, byte, unit; + u32 mask; + + pmc = (event >> PM_PMC_SH) & PM_PMC_MSK; + psel = event & PM_PMCSEL_MSK; + if (pmc >= 5) + return 0; + + bit = -1; + if (psel < sizeof(direct_event_is_marked)) { + if (direct_event_is_marked[psel] & (1 << pmc)) + return 1; + if (direct_event_is_marked[psel] & 0x80) + bit = 4; + else if (psel == 0x08) + bit = pmc - 1; + else if (psel == 0x10) + bit = 4 - pmc; + else if (psel == 0x1b && (pmc == 1 || pmc == 3)) + bit = 4; + } else if ((psel & 0x58) == 0x40) + bit = psel & 7; + + if (!(event & PM_BUSEVENT_MSK)) + return 0; + + byte = (event >> PM_BYTE_SH) & PM_BYTE_MSK; + unit = (event >> PM_UNIT_SH) & PM_UNIT_MSK; + if (unit == PM_LSU0) { + /* byte 1 bits 0-7, byte 2 bits 0,2-4,6 */ + mask = 0x5dff00; + } else if (unit == PM_LSU1 && byte >= 4) { + byte -= 4; + /* byte 4 bits 1,3,5,7, byte 5 bits 6-7, byte 7 bits 0-4,6 */ + mask = 0x5f00c0aa; + } else + return 0; + + return (mask >> (byte * 8 + bit)) & 1; +} + static int power5_compute_mmcr(unsigned int event[], int n_ev, unsigned int hwc[], u64 mmcr[]) { u64 mmcr1 = 0; + u64 mmcra = 0; unsigned int pmc, unit, byte, psel; unsigned int ttm, grp; int i, isbus, bit, grsel; @@ -430,6 +522,8 @@ static int power5_compute_mmcr(unsigned int event[], int n_ev, grsel = (event[i] >> PM_GRS_SH) & PM_GRS_MSK; mmcr1 |= (u64)grsel << grsel_shift[bit]; } + if (power5_marked_instr_event(event[i])) + mmcra |= MMCRA_SAMPLE_ENABLE; if (pmc <= 3) mmcr1 |= psel << MMCR1_PMCSEL_SH(pmc); hwc[i] = pmc; @@ -442,7 +536,7 @@ static int power5_compute_mmcr(unsigned int event[], int n_ev, if (pmc_inuse & 0x3e) mmcr[0] |= MMCR0_PMCjCE; mmcr[1] = mmcr1; - mmcr[2] = 0; + mmcr[2] = mmcra; return 0; } diff --git a/arch/powerpc/kernel/power6-pmu.c b/arch/powerpc/kernel/power6-pmu.c index b1f61f3c97b..fce1fc290a1 100644 --- a/arch/powerpc/kernel/power6-pmu.c +++ b/arch/powerpc/kernel/power6-pmu.c @@ -48,6 +48,127 @@ #define MMCR1_PMCSEL_SH(n) (MMCR1_PMC1SEL_SH - (n) * 8) #define MMCR1_PMCSEL_MSK 0xff +/* + * Map of which direct events on which PMCs are marked instruction events. + * Indexed by PMCSEL value >> 1. + * Bottom 4 bits are a map of which PMCs are interesting, + * top 4 bits say what sort of event: + * 0 = direct marked event, + * 1 = byte decode event, + * 4 = add/and event (PMC1 -> bits 0 & 4), + * 5 = add/and event (PMC1 -> bits 1 & 5), + * 6 = add/and event (PMC1 -> bits 2 & 6), + * 7 = add/and event (PMC1 -> bits 3 & 7). + */ +static unsigned char direct_event_is_marked[0x60 >> 1] = { + 0, /* 00 */ + 0, /* 02 */ + 0, /* 04 */ + 0x07, /* 06 PM_MRK_ST_CMPL, PM_MRK_ST_GPS, PM_MRK_ST_CMPL_INT */ + 0x04, /* 08 PM_MRK_DFU_FIN */ + 0x06, /* 0a PM_MRK_IFU_FIN, PM_MRK_INST_FIN */ + 0, /* 0c */ + 0, /* 0e */ + 0x02, /* 10 PM_MRK_INST_DISP */ + 0x08, /* 12 PM_MRK_LSU_DERAT_MISS */ + 0, /* 14 */ + 0, /* 16 */ + 0x0c, /* 18 PM_THRESH_TIMEO, PM_MRK_INST_FIN */ + 0x0f, /* 1a PM_MRK_INST_DISP, PM_MRK_{FXU,FPU,LSU}_FIN */ + 0x01, /* 1c PM_MRK_INST_ISSUED */ + 0, /* 1e */ + 0, /* 20 */ + 0, /* 22 */ + 0, /* 24 */ + 0, /* 26 */ + 0x15, /* 28 PM_MRK_DATA_FROM_L2MISS, PM_MRK_DATA_FROM_L3MISS */ + 0, /* 2a */ + 0, /* 2c */ + 0, /* 2e */ + 0x4f, /* 30 */ + 0x7f, /* 32 */ + 0x4f, /* 34 */ + 0x5f, /* 36 */ + 0x6f, /* 38 */ + 0x4f, /* 3a */ + 0, /* 3c */ + 0x08, /* 3e PM_MRK_INST_TIMEO */ + 0x1f, /* 40 */ + 0x1f, /* 42 */ + 0x1f, /* 44 */ + 0x1f, /* 46 */ + 0x1f, /* 48 */ + 0x1f, /* 4a */ + 0x1f, /* 4c */ + 0x1f, /* 4e */ + 0, /* 50 */ + 0x05, /* 52 PM_MRK_BR_TAKEN, PM_MRK_BR_MPRED */ + 0x1c, /* 54 PM_MRK_PTEG_FROM_L3MISS, PM_MRK_PTEG_FROM_L2MISS */ + 0x02, /* 56 PM_MRK_LD_MISS_L1 */ + 0, /* 58 */ + 0, /* 5a */ + 0, /* 5c */ + 0, /* 5e */ +}; + +/* + * Masks showing for each unit which bits are marked events. + * These masks are in LE order, i.e. 0x00000001 is byte 0, bit 0. + */ +static u32 marked_bus_events[16] = { + 0x01000000, /* direct events set 1: byte 3 bit 0 */ + 0x00010000, /* direct events set 2: byte 2 bit 0 */ + 0, 0, 0, 0, /* IDU, IFU, nest: nothing */ + 0x00000088, /* VMX set 1: byte 0 bits 3, 7 */ + 0x000000c0, /* VMX set 2: byte 0 bits 4-7 */ + 0x04010000, /* LSU set 1: byte 2 bit 0, byte 3 bit 2 */ + 0xff010000u, /* LSU set 2: byte 2 bit 0, all of byte 3 */ + 0, /* LSU set 3 */ + 0x00000010, /* VMX set 3: byte 0 bit 4 */ + 0, /* BFP set 1 */ + 0x00000022, /* BFP set 2: byte 0 bits 1, 5 */ + 0, 0 +}; + +/* + * Returns 1 if event counts things relating to marked instructions + * and thus needs the MMCRA_SAMPLE_ENABLE bit set, or 0 if not. + */ +static int power6_marked_instr_event(unsigned int event) +{ + int pmc, psel, ptype; + int bit, byte, unit; + u32 mask; + + pmc = (event >> PM_PMC_SH) & PM_PMC_MSK; + psel = (event & PM_PMCSEL_MSK) >> 1; /* drop edge/level bit */ + if (pmc >= 5) + return 0; + + bit = -1; + if (psel < sizeof(direct_event_is_marked)) { + ptype = direct_event_is_marked[psel]; + if (pmc == 0 || !(ptype & (1 << (pmc - 1)))) + return 0; + ptype >>= 4; + if (ptype == 0) + return 1; + if (ptype == 1) + bit = 0; + else + bit = ptype ^ (pmc - 1); + } else if ((psel & 0x48) == 0x40) + bit = psel & 7; + + if (!(event & PM_BUSEVENT_MSK) || bit == -1) + return 0; + + byte = (event >> PM_BYTE_SH) & PM_BYTE_MSK; + unit = (event >> PM_UNIT_SH) & PM_UNIT_MSK; + mask = marked_bus_events[unit]; + return (mask >> (byte * 8 + bit)) & 1; +} + /* * Assign PMC numbers and compute MMCR1 value for a set of events */ @@ -55,6 +176,7 @@ static int p6_compute_mmcr(unsigned int event[], int n_ev, unsigned int hwc[], u64 mmcr[]) { u64 mmcr1 = 0; + u64 mmcra = 0; int i; unsigned int pmc, ev, b, u, s, psel; unsigned int ttmset = 0; @@ -116,6 +238,8 @@ static int p6_compute_mmcr(unsigned int event[], int n_ev, if (ev & PM_LLAV) mmcr1 |= MMCR1_PMC1_LLA_VALUE >> pmc; } + if (power6_marked_instr_event(event[i])) + mmcra |= MMCRA_SAMPLE_ENABLE; mmcr1 |= (u64)psel << MMCR1_PMCSEL_SH(pmc); } mmcr[0] = 0; @@ -124,7 +248,7 @@ static int p6_compute_mmcr(unsigned int event[], int n_ev, if (pmc_inuse & 0xe) mmcr[0] |= MMCR0_PMCjCE; mmcr[1] = mmcr1; - mmcr[2] = 0; + mmcr[2] = mmcra; return 0; } diff --git a/arch/powerpc/kernel/ppc970-pmu.c b/arch/powerpc/kernel/ppc970-pmu.c index c3256580be1..aed8ccd7c07 100644 --- a/arch/powerpc/kernel/ppc970-pmu.c +++ b/arch/powerpc/kernel/ppc970-pmu.c @@ -19,6 +19,8 @@ #define PM_PMC_MSK 0xf #define PM_UNIT_SH 8 /* TTMMUX number and setting - unit select */ #define PM_UNIT_MSK 0xf +#define PM_SPCSEL_SH 6 +#define PM_SPCSEL_MSK 3 #define PM_BYTE_SH 4 /* Byte number of event bus to use */ #define PM_BYTE_MSK 3 #define PM_PMCSEL_MSK 0xf @@ -88,8 +90,11 @@ static short mmcr1_adder_bits[8] = { * Layout of constraint bits: * 6666555555555544444444443333333333222222222211111111110000000000 * 3210987654321098765432109876543210987654321098765432109876543210 - * <><>[ >[ >[ >< >< >< >< ><><><><><><><><> - * T0T1 UC PS1 PS2 B0 B1 B2 B3 P1P2P3P4P5P6P7P8 + * <><><>[ >[ >[ >< >< >< >< ><><><><><><><><> + * SPT0T1 UC PS1 PS2 B0 B1 B2 B3 P1P2P3P4P5P6P7P8 + * + * SP - SPCSEL constraint + * 48-49: SPCSEL value 0x3_0000_0000_0000 * * T0 - TTM0 constraint * 46-47: TTM0SEL value (0=FPU, 2=IFU, 3=VPU) 0xC000_0000_0000 @@ -126,6 +131,57 @@ static short mmcr1_adder_bits[8] = { * 0-13: Count of events needing PMC2..PMC8 */ +static unsigned char direct_marked_event[8] = { + (1<<2) | (1<<3), /* PMC1: PM_MRK_GRP_DISP, PM_MRK_ST_CMPL */ + (1<<3) | (1<<5), /* PMC2: PM_THRESH_TIMEO, PM_MRK_BRU_FIN */ + (1<<3) | (1<<5), /* PMC3: PM_MRK_ST_CMPL_INT, PM_MRK_VMX_FIN */ + (1<<4) | (1<<5), /* PMC4: PM_MRK_GRP_CMPL, PM_MRK_CRU_FIN */ + (1<<4) | (1<<5), /* PMC5: PM_GRP_MRK, PM_MRK_GRP_TIMEO */ + (1<<3) | (1<<4) | (1<<5), + /* PMC6: PM_MRK_ST_STS, PM_MRK_FXU_FIN, PM_MRK_GRP_ISSUED */ + (1<<4) | (1<<5), /* PMC7: PM_MRK_FPU_FIN, PM_MRK_INST_FIN */ + (1<<4) /* PMC8: PM_MRK_LSU_FIN */ +}; + +/* + * Returns 1 if event counts things relating to marked instructions + * and thus needs the MMCRA_SAMPLE_ENABLE bit set, or 0 if not. + */ +static int p970_marked_instr_event(unsigned int event) +{ + int pmc, psel, unit, byte, bit; + unsigned int mask; + + pmc = (event >> PM_PMC_SH) & PM_PMC_MSK; + psel = event & PM_PMCSEL_MSK; + if (pmc) { + if (direct_marked_event[pmc - 1] & (1 << psel)) + return 1; + if (psel == 0) /* add events */ + bit = (pmc <= 4)? pmc - 1: 8 - pmc; + else if (psel == 7 || psel == 13) /* decode events */ + bit = 4; + else + return 0; + } else + bit = psel; + + byte = (event >> PM_BYTE_SH) & PM_BYTE_MSK; + unit = (event >> PM_UNIT_SH) & PM_UNIT_MSK; + mask = 0; + switch (unit) { + case PM_VPU: + mask = 0x4c; /* byte 0 bits 2,3,6 */ + case PM_LSU0: + /* byte 2 bits 0,2,3,4,6; all of byte 1 */ + mask = 0x085dff00; + case PM_LSU1L: + mask = 0x50 << 24; /* byte 3 bits 4,6 */ + break; + } + return (mask >> (byte * 8 + bit)) & 1; +} + /* Masks and values for using events from the various units */ static u64 unit_cons[PM_LASTUNIT+1][2] = { [PM_FPU] = { 0xc80000000000ull, 0x040000000000ull }, @@ -138,7 +194,7 @@ static u64 unit_cons[PM_LASTUNIT+1][2] = { static int p970_get_constraint(unsigned int event, u64 *maskp, u64 *valp) { - int pmc, byte, unit, sh; + int pmc, byte, unit, sh, spcsel; u64 mask = 0, value = 0; int grp = -1; @@ -177,6 +233,11 @@ static int p970_get_constraint(unsigned int event, u64 *maskp, u64 *valp) mask |= 0x800000000ull; value |= 0x100000000ull; } + spcsel = (event >> PM_SPCSEL_SH) & PM_SPCSEL_MSK; + if (spcsel) { + mask |= 3ull << 48; + value |= (u64)spcsel << 48; + } *maskp = mask; *valp = value; return 0; @@ -209,6 +270,7 @@ static int p970_compute_mmcr(unsigned int event[], int n_ev, unsigned char ttmuse[2]; unsigned char pmcsel[8]; int i; + int spcsel; if (n_ev > 8) return -1; @@ -316,6 +378,10 @@ static int p970_compute_mmcr(unsigned int event[], int n_ev, } pmcsel[pmc] = psel; hwc[i] = pmc; + spcsel = (event[i] >> PM_SPCSEL_SH) & PM_SPCSEL_MSK; + mmcr1 |= spcsel; + if (p970_marked_instr_event(event[i])) + mmcra |= MMCRA_SAMPLE_ENABLE; } for (pmc = 0; pmc < 2; ++pmc) mmcr0 |= pmcsel[pmc] << (MMCR0_PMC1SEL_SH - 7 * pmc); -- cgit v1.2.3 From 7333a8003cdc0470e8c0ae8b949cbc44f3165ff3 Mon Sep 17 00:00:00 2001 From: Hidetoshi Seto Date: Wed, 25 Mar 2009 10:50:34 +0900 Subject: x86: smarten /proc/interrupts output for new counters Now /proc/interrupts of tip tree has new counters: CNT: Performance counter interrupts Format change of output, as like that by commit: commit 7a81d9a7da03d2f27840d659f97ef140d032f609 x86: smarten /proc/interrupts output should be applied to these new counters too. Signed-off-by: Hidetoshi Seto Cc: Jan Beulich LKML-Reference: <49C98DEA.8060208@jp.fujitsu.com> Signed-off-by: Ingo Molnar --- arch/x86/kernel/irq.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c index d465487da58..dccaaa85578 100644 --- a/arch/x86/kernel/irq.c +++ b/arch/x86/kernel/irq.c @@ -63,7 +63,7 @@ static int show_other_interrupts(struct seq_file *p, int prec) for_each_online_cpu(j) seq_printf(p, "%10u ", irq_stats(j)->irq_spurious_count); seq_printf(p, " Spurious interrupts\n"); - seq_printf(p, "CNT: "); + seq_printf(p, "%*s: ", prec, "CNT"); for_each_online_cpu(j) seq_printf(p, "%10u ", irq_stats(j)->apic_perf_irqs); seq_printf(p, " Performance counter interrupts\n"); -- cgit v1.2.3 From e30e08f65c7ef6c230424264f09c3d53f117f58b Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Wed, 8 Apr 2009 15:01:25 +0200 Subject: perf_counter: fix NMI race in task clock We should not be updating ctx->time from NMI context, work around that. Signed-off-by: Peter Zijlstra Cc: Paul Mackerras Cc: Corey Ashford LKML-Reference: <20090408130408.681326666@chello.nl> Signed-off-by: Ingo Molnar --- kernel/perf_counter.c | 25 ++++++++++++++++--------- 1 file changed, 16 insertions(+), 9 deletions(-) diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c index 863703b3158..84a39081344 100644 --- a/kernel/perf_counter.c +++ b/kernel/perf_counter.c @@ -319,8 +319,6 @@ static void __perf_counter_disable(void *info) spin_lock_irqsave(&ctx->lock, flags); - update_context_time(ctx); - /* * If the counter is on, turn it off. * If it is in error state, leave it in error state. @@ -2335,13 +2333,11 @@ static const struct hw_perf_counter_ops perf_ops_cpu_clock = { * Software counter: task time clock */ -static void task_clock_perf_counter_update(struct perf_counter *counter) +static void task_clock_perf_counter_update(struct perf_counter *counter, u64 now) { - u64 prev, now; + u64 prev; s64 delta; - now = counter->ctx->time; - prev = atomic64_xchg(&counter->hw.prev_count, now); delta = now - prev; atomic64_add(delta, &counter->count); @@ -2369,13 +2365,24 @@ static int task_clock_perf_counter_enable(struct perf_counter *counter) static void task_clock_perf_counter_disable(struct perf_counter *counter) { hrtimer_cancel(&counter->hw.hrtimer); - task_clock_perf_counter_update(counter); + task_clock_perf_counter_update(counter, counter->ctx->time); + } static void task_clock_perf_counter_read(struct perf_counter *counter) { - update_context_time(counter->ctx); - task_clock_perf_counter_update(counter); + u64 time; + + if (!in_nmi()) { + update_context_time(counter->ctx); + time = counter->ctx->time; + } else { + u64 now = perf_clock(); + u64 delta = now - counter->ctx->timestamp; + time = counter->ctx->time + delta; + } + + task_clock_perf_counter_update(counter, time); } static const struct hw_perf_counter_ops perf_ops_task_clock = { -- cgit v1.2.3 From 6fab01927e8bdbbc77bafba2abb4810c5591ad52 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Wed, 8 Apr 2009 15:01:26 +0200 Subject: perf_counter: provide misc bits in the event header Limit the size of each record to 64k (or should we count in multiples of u64 and have a 512K limit?), this gives 16 bits or spare room in the header, which we can use for misc bits, so as to not have to grow the record with u64 every time we have a few bits to report. Signed-off-by: Peter Zijlstra Cc: Paul Mackerras Cc: Corey Ashford LKML-Reference: <20090408130408.769271806@chello.nl> Signed-off-by: Ingo Molnar --- include/linux/perf_counter.h | 6 +++++- kernel/perf_counter.c | 3 +++ 2 files changed, 8 insertions(+), 1 deletion(-) diff --git a/include/linux/perf_counter.h b/include/linux/perf_counter.h index 7f5d353d78a..5bd8817b12d 100644 --- a/include/linux/perf_counter.h +++ b/include/linux/perf_counter.h @@ -201,9 +201,13 @@ struct perf_counter_mmap_page { __u32 data_head; /* head in the data section */ }; +#define PERF_EVENT_MISC_KERNEL (1 << 0) +#define PERF_EVENT_MISC_USER (1 << 1) + struct perf_event_header { __u32 type; - __u32 size; + __u16 misc; + __u16 size; }; enum perf_event_type { diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c index 84a39081344..4af98f943d3 100644 --- a/kernel/perf_counter.c +++ b/kernel/perf_counter.c @@ -1831,6 +1831,9 @@ static void perf_counter_output(struct perf_counter *counter, header.type = PERF_EVENT_COUNTER_OVERFLOW; header.size = sizeof(header); + header.misc = user_mode(regs) ? + PERF_EVENT_MISC_USER : PERF_EVENT_MISC_KERNEL; + if (record_type & PERF_RECORD_IP) { ip = instruction_pointer(regs); header.type |= __PERF_EVENT_IP; -- cgit v1.2.3 From 6b6e5486b3a168f0328c82a8d4376caf901472b1 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Wed, 8 Apr 2009 15:01:27 +0200 Subject: perf_counter: use misc field to widen type Push the PERF_EVENT_COUNTER_OVERFLOW bit into the misc field so that we can have the full 32bit for PERF_RECORD_ bits. Signed-off-by: Peter Zijlstra Cc: Paul Mackerras Cc: Corey Ashford LKML-Reference: <20090408130408.891867663@chello.nl> Signed-off-by: Ingo Molnar --- include/linux/perf_counter.h | 28 ++++++++++------------------ kernel/perf_counter.c | 15 ++++++++------- 2 files changed, 18 insertions(+), 25 deletions(-) diff --git a/include/linux/perf_counter.h b/include/linux/perf_counter.h index 5bd8817b12d..4809ae18a94 100644 --- a/include/linux/perf_counter.h +++ b/include/linux/perf_counter.h @@ -201,8 +201,9 @@ struct perf_counter_mmap_page { __u32 data_head; /* head in the data section */ }; -#define PERF_EVENT_MISC_KERNEL (1 << 0) -#define PERF_EVENT_MISC_USER (1 << 1) +#define PERF_EVENT_MISC_KERNEL (1 << 0) +#define PERF_EVENT_MISC_USER (1 << 1) +#define PERF_EVENT_MISC_OVERFLOW (1 << 2) struct perf_event_header { __u32 type; @@ -230,36 +231,27 @@ enum perf_event_type { PERF_EVENT_MUNMAP = 2, /* - * Half the event type space is reserved for the counter overflow - * bitfields, as found in hw_event.record_type. - * - * These events will have types of the form: - * PERF_EVENT_COUNTER_OVERFLOW { | __PERF_EVENT_* } * + * When header.misc & PERF_EVENT_MISC_OVERFLOW the event_type field + * will be PERF_RECORD_* * * struct { * struct perf_event_header header; * - * { u64 ip; } && __PERF_EVENT_IP - * { u32 pid, tid; } && __PERF_EVENT_TID + * { u64 ip; } && PERF_RECORD_IP + * { u32 pid, tid; } && PERF_RECORD_TID * * { u64 nr; - * { u64 event, val; } cnt[nr]; } && __PERF_EVENT_GROUP + * { u64 event, val; } cnt[nr]; } && PERF_RECORD_GROUP * * { u16 nr, * hv, * kernel, * user; - * u64 ips[nr]; } && __PERF_EVENT_CALLCHAIN + * u64 ips[nr]; } && PERF_RECORD_CALLCHAIN * - * { u64 time; } && __PERF_EVENT_TIME + * { u64 time; } && PERF_RECORD_TIME * }; */ - PERF_EVENT_COUNTER_OVERFLOW = 1UL << 31, - __PERF_EVENT_IP = PERF_RECORD_IP, - __PERF_EVENT_TID = PERF_RECORD_TID, - __PERF_EVENT_GROUP = PERF_RECORD_GROUP, - __PERF_EVENT_CALLCHAIN = PERF_RECORD_CALLCHAIN, - __PERF_EVENT_TIME = PERF_RECORD_TIME, }; #ifdef __KERNEL__ diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c index 4af98f943d3..bf12df6f353 100644 --- a/kernel/perf_counter.c +++ b/kernel/perf_counter.c @@ -1828,15 +1828,16 @@ static void perf_counter_output(struct perf_counter *counter, int callchain_size = 0; u64 time; - header.type = PERF_EVENT_COUNTER_OVERFLOW; + header.type = 0; header.size = sizeof(header); - header.misc = user_mode(regs) ? + header.misc = PERF_EVENT_MISC_OVERFLOW; + header.misc |= user_mode(regs) ? PERF_EVENT_MISC_USER : PERF_EVENT_MISC_KERNEL; if (record_type & PERF_RECORD_IP) { ip = instruction_pointer(regs); - header.type |= __PERF_EVENT_IP; + header.type |= PERF_RECORD_IP; header.size += sizeof(ip); } @@ -1845,12 +1846,12 @@ static void perf_counter_output(struct perf_counter *counter, tid_entry.pid = current->group_leader->pid; tid_entry.tid = current->pid; - header.type |= __PERF_EVENT_TID; + header.type |= PERF_RECORD_TID; header.size += sizeof(tid_entry); } if (record_type & PERF_RECORD_GROUP) { - header.type |= __PERF_EVENT_GROUP; + header.type |= PERF_RECORD_GROUP; header.size += sizeof(u64) + counter->nr_siblings * sizeof(group_entry); } @@ -1861,7 +1862,7 @@ static void perf_counter_output(struct perf_counter *counter, if (callchain) { callchain_size = (1 + callchain->nr) * sizeof(u64); - header.type |= __PERF_EVENT_CALLCHAIN; + header.type |= PERF_RECORD_CALLCHAIN; header.size += callchain_size; } } @@ -1872,7 +1873,7 @@ static void perf_counter_output(struct perf_counter *counter, */ time = sched_clock(); - header.type |= __PERF_EVENT_TIME; + header.type |= PERF_RECORD_TIME; header.size += sizeof(u64); } -- cgit v1.2.3 From 808382b33bb4c60df6379ec2db39f332cc56b82a Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Wed, 8 Apr 2009 15:01:28 +0200 Subject: perf_counter: kerneltop: keep up with ABI changes Update kerneltop to use PERF_EVENT_MISC_OVERFLOW Signed-off-by: Peter Zijlstra Cc: Paul Mackerras Cc: Corey Ashford LKML-Reference: <20090408130408.947197470@chello.nl> Signed-off-by: Ingo Molnar --- Documentation/perf_counter/kerneltop.c | 32 ++++++++++++++++---------------- 1 file changed, 16 insertions(+), 16 deletions(-) diff --git a/Documentation/perf_counter/kerneltop.c b/Documentation/perf_counter/kerneltop.c index 15f3a5f9019..042c1b83a87 100644 --- a/Documentation/perf_counter/kerneltop.c +++ b/Documentation/perf_counter/kerneltop.c @@ -1277,22 +1277,22 @@ static void mmap_read(struct mmap_data *md) old += size; - switch (event->header.type) { - case PERF_EVENT_COUNTER_OVERFLOW | __PERF_EVENT_IP: - case PERF_EVENT_COUNTER_OVERFLOW | __PERF_EVENT_IP | __PERF_EVENT_TID: - process_event(event->ip.ip, md->counter); - break; - - case PERF_EVENT_MMAP: - case PERF_EVENT_MUNMAP: - printf("%s: %Lu %Lu %Lu %s\n", - event->header.type == PERF_EVENT_MMAP - ? "mmap" : "munmap", - event->mmap.start, - event->mmap.len, - event->mmap.pgoff, - event->mmap.filename); - break; + if (event->header.misc & PERF_EVENT_MISC_OVERFLOW) { + if (event->header.type & PERF_RECORD_IP) + process_event(event->ip.ip, md->counter); + } else { + switch (event->header.type) { + case PERF_EVENT_MMAP: + case PERF_EVENT_MUNMAP: + printf("%s: %Lu %Lu %Lu %s\n", + event->header.type == PERF_EVENT_MMAP + ? "mmap" : "munmap", + event->mmap.start, + event->mmap.len, + event->mmap.pgoff, + event->mmap.filename); + break; + } } } -- cgit v1.2.3 From 8740f9418c78dcad694b46ab25d1645d5aef1f5e Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Wed, 8 Apr 2009 15:01:29 +0200 Subject: perf_counter: add some comments Add a few comments because I was forgetting what field what for what functionality. Signed-off-by: Peter Zijlstra Cc: Paul Mackerras Cc: Corey Ashford LKML-Reference: <20090408130409.036984214@chello.nl> Signed-off-by: Ingo Molnar --- include/linux/perf_counter.h | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/include/linux/perf_counter.h b/include/linux/perf_counter.h index 4809ae18a94..8bf764fc622 100644 --- a/include/linux/perf_counter.h +++ b/include/linux/perf_counter.h @@ -344,10 +344,12 @@ struct file; struct perf_mmap_data { struct rcu_head rcu_head; - int nr_pages; - atomic_t wakeup; - atomic_t head; - atomic_t events; + int nr_pages; /* nr of data pages */ + + atomic_t wakeup; /* POLL_ for wakeups */ + atomic_t head; /* write position */ + atomic_t events; /* event limit */ + struct perf_counter_mmap_page *user_page; void *data_pages[0]; }; -- cgit v1.2.3 From 8d1b2d9361b494bfc761700c348c65ebbe3deb5b Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Wed, 8 Apr 2009 15:01:30 +0200 Subject: perf_counter: track task-comm data Similar to the mmap data stream, add one that tracks the task COMM field, so that the userspace reporting knows what to call a task. Signed-off-by: Peter Zijlstra Cc: Paul Mackerras Cc: Corey Ashford LKML-Reference: <20090408130409.127422406@chello.nl> Signed-off-by: Ingo Molnar --- fs/exec.c | 1 + include/linux/perf_counter.h | 16 +++++++- kernel/perf_counter.c | 93 ++++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 109 insertions(+), 1 deletion(-) diff --git a/fs/exec.c b/fs/exec.c index e015c0b5a08..bf47ed0278f 100644 --- a/fs/exec.c +++ b/fs/exec.c @@ -951,6 +951,7 @@ void set_task_comm(struct task_struct *tsk, char *buf) task_lock(tsk); strlcpy(tsk->comm, buf, sizeof(tsk->comm)); task_unlock(tsk); + perf_counter_comm(tsk); } int flush_old_exec(struct linux_binprm * bprm) diff --git a/include/linux/perf_counter.h b/include/linux/perf_counter.h index 8bf764fc622..a70a55f2759 100644 --- a/include/linux/perf_counter.h +++ b/include/linux/perf_counter.h @@ -142,8 +142,9 @@ struct perf_counter_hw_event { exclude_idle : 1, /* don't count when idle */ mmap : 1, /* include mmap data */ munmap : 1, /* include munmap data */ + comm : 1, /* include comm data */ - __reserved_1 : 53; + __reserved_1 : 52; __u32 extra_config_len; __u32 wakeup_events; /* wakeup every n events */ @@ -230,6 +231,16 @@ enum perf_event_type { PERF_EVENT_MMAP = 1, PERF_EVENT_MUNMAP = 2, + /* + * struct { + * struct perf_event_header header; + * + * u32 pid, tid; + * char comm[]; + * }; + */ + PERF_EVENT_COMM = 3, + /* * When header.misc & PERF_EVENT_MISC_OVERFLOW the event_type field * will be PERF_RECORD_* @@ -545,6 +556,8 @@ extern void perf_counter_mmap(unsigned long addr, unsigned long len, extern void perf_counter_munmap(unsigned long addr, unsigned long len, unsigned long pgoff, struct file *file); +extern void perf_counter_comm(struct task_struct *tsk); + #define MAX_STACK_DEPTH 255 struct perf_callchain_entry { @@ -583,6 +596,7 @@ static inline void perf_counter_munmap(unsigned long addr, unsigned long len, unsigned long pgoff, struct file *file) { } +static inline void perf_counter_comm(struct task_struct *tsk) { } #endif #endif /* __KERNEL__ */ diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c index bf12df6f353..2d4aebb2982 100644 --- a/kernel/perf_counter.c +++ b/kernel/perf_counter.c @@ -1916,6 +1916,99 @@ static void perf_counter_output(struct perf_counter *counter, perf_output_end(&handle); } +/* + * comm tracking + */ + +struct perf_comm_event { + struct task_struct *task; + char *comm; + int comm_size; + + struct { + struct perf_event_header header; + + u32 pid; + u32 tid; + } event; +}; + +static void perf_counter_comm_output(struct perf_counter *counter, + struct perf_comm_event *comm_event) +{ + struct perf_output_handle handle; + int size = comm_event->event.header.size; + int ret = perf_output_begin(&handle, counter, size, 0, 0); + + if (ret) + return; + + perf_output_put(&handle, comm_event->event); + perf_output_copy(&handle, comm_event->comm, + comm_event->comm_size); + perf_output_end(&handle); +} + +static int perf_counter_comm_match(struct perf_counter *counter, + struct perf_comm_event *comm_event) +{ + if (counter->hw_event.comm && + comm_event->event.header.type == PERF_EVENT_COMM) + return 1; + + return 0; +} + +static void perf_counter_comm_ctx(struct perf_counter_context *ctx, + struct perf_comm_event *comm_event) +{ + struct perf_counter *counter; + + if (system_state != SYSTEM_RUNNING || list_empty(&ctx->event_list)) + return; + + rcu_read_lock(); + list_for_each_entry_rcu(counter, &ctx->event_list, event_entry) { + if (perf_counter_comm_match(counter, comm_event)) + perf_counter_comm_output(counter, comm_event); + } + rcu_read_unlock(); +} + +static void perf_counter_comm_event(struct perf_comm_event *comm_event) +{ + struct perf_cpu_context *cpuctx; + unsigned int size; + char *comm = comm_event->task->comm; + + size = ALIGN(strlen(comm), sizeof(u64)); + + comm_event->comm = comm; + comm_event->comm_size = size; + + comm_event->event.header.size = sizeof(comm_event->event) + size; + + cpuctx = &get_cpu_var(perf_cpu_context); + perf_counter_comm_ctx(&cpuctx->ctx, comm_event); + put_cpu_var(perf_cpu_context); + + perf_counter_comm_ctx(¤t->perf_counter_ctx, comm_event); +} + +void perf_counter_comm(struct task_struct *task) +{ + struct perf_comm_event comm_event = { + .task = task, + .event = { + .header = { .type = PERF_EVENT_COMM, }, + .pid = task->group_leader->pid, + .tid = task->pid, + }, + }; + + perf_counter_comm_event(&comm_event); +} + /* * mmap tracking */ -- cgit v1.2.3 From de9ac07bbf8f51e0ce40e5428c3a8f627bd237c2 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Wed, 8 Apr 2009 15:01:31 +0200 Subject: perf_counter: some simple userspace profiling # perf-record make -j4 kernel/ # perf-report | tail -15 0.39 cc1 [kernel] lock_acquired 0.42 cc1 [kernel] lock_acquire 0.51 cc1 [ user ] /lib64/libc-2.8.90.so: _int_free 0.51 as [kernel] clear_page_c 0.53 cc1 [ user ] /lib64/libc-2.8.90.so: memcpy 0.56 cc1 [ user ] /lib64/libc-2.8.90.so: _IO_vfprintf 0.63 cc1 [kernel] lock_release 0.67 cc1 [ user ] /lib64/libc-2.8.90.so: strlen 0.68 cc1 [kernel] debug_smp_processor_id 1.38 cc1 [ user ] /lib64/libc-2.8.90.so: _int_malloc 1.55 cc1 [ user ] /lib64/libc-2.8.90.so: memset 1.77 cc1 [kernel] __lock_acquire 1.88 cc1 [kernel] clear_page_c 3.61 as [ user ] /usr/bin/as: 59.16 cc1 [ user ] /usr/libexec/gcc/x86_64-redhat-linux/4.3.2/cc1: Signed-off-by: Peter Zijlstra Cc: Paul Mackerras Cc: Corey Ashford Cc: Arnaldo Carvalho de Melo LKML-Reference: <20090408130409.220518450@chello.nl> Signed-off-by: Ingo Molnar --- Documentation/perf_counter/Makefile | 8 +- Documentation/perf_counter/perf-record.c | 530 ++++++++++++++++++++++++++++++ Documentation/perf_counter/perf-report.cc | 472 ++++++++++++++++++++++++++ 3 files changed, 1009 insertions(+), 1 deletion(-) create mode 100644 Documentation/perf_counter/perf-record.c create mode 100644 Documentation/perf_counter/perf-report.cc diff --git a/Documentation/perf_counter/Makefile b/Documentation/perf_counter/Makefile index 194b6621558..1dd37ee7dbd 100644 --- a/Documentation/perf_counter/Makefile +++ b/Documentation/perf_counter/Makefile @@ -1,10 +1,16 @@ -BINS = kerneltop perfstat +BINS = kerneltop perfstat perf-record perf-report all: $(BINS) kerneltop: kerneltop.c ../../include/linux/perf_counter.h cc -O6 -Wall -lrt -o $@ $< +perf-record: perf-record.c ../../include/linux/perf_counter.h + cc -O6 -Wall -lrt -o $@ $< + +perf-report: perf-report.cc ../../include/linux/perf_counter.h + g++ -O6 -Wall -lrt -o $@ $< + perfstat: kerneltop ln -sf kerneltop perfstat diff --git a/Documentation/perf_counter/perf-record.c b/Documentation/perf_counter/perf-record.c new file mode 100644 index 00000000000..614de7c468b --- /dev/null +++ b/Documentation/perf_counter/perf-record.c @@ -0,0 +1,530 @@ + + +#define _GNU_SOURCE +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include "../../include/linux/perf_counter.h" + + +/* + * prctl(PR_TASK_PERF_COUNTERS_DISABLE) will (cheaply) disable all + * counters in the current task. + */ +#define PR_TASK_PERF_COUNTERS_DISABLE 31 +#define PR_TASK_PERF_COUNTERS_ENABLE 32 + +#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0])) + +#define rdclock() \ +({ \ + struct timespec ts; \ + \ + clock_gettime(CLOCK_MONOTONIC, &ts); \ + ts.tv_sec * 1000000000ULL + ts.tv_nsec; \ +}) + +/* + * Pick up some kernel type conventions: + */ +#define __user +#define asmlinkage + +#ifdef __x86_64__ +#define __NR_perf_counter_open 295 +#define rmb() asm volatile("lfence" ::: "memory") +#define cpu_relax() asm volatile("rep; nop" ::: "memory"); +#endif + +#ifdef __i386__ +#define __NR_perf_counter_open 333 +#define rmb() asm volatile("lfence" ::: "memory") +#define cpu_relax() asm volatile("rep; nop" ::: "memory"); +#endif + +#ifdef __powerpc__ +#define __NR_perf_counter_open 319 +#define rmb() asm volatile ("sync" ::: "memory") +#define cpu_relax() asm volatile ("" ::: "memory"); +#endif + +#define unlikely(x) __builtin_expect(!!(x), 0) +#define min(x, y) ({ \ + typeof(x) _min1 = (x); \ + typeof(y) _min2 = (y); \ + (void) (&_min1 == &_min2); \ + _min1 < _min2 ? _min1 : _min2; }) + +asmlinkage int sys_perf_counter_open( + struct perf_counter_hw_event *hw_event_uptr __user, + pid_t pid, + int cpu, + int group_fd, + unsigned long flags) +{ + return syscall( + __NR_perf_counter_open, hw_event_uptr, pid, cpu, group_fd, flags); +} + +#define MAX_COUNTERS 64 +#define MAX_NR_CPUS 256 + +#define EID(type, id) (((__u64)(type) << PERF_COUNTER_TYPE_SHIFT) | (id)) + +static int nr_counters = 0; +static __u64 event_id[MAX_COUNTERS] = { }; +static int default_interval = 100000; +static int event_count[MAX_COUNTERS]; +static int fd[MAX_NR_CPUS][MAX_COUNTERS]; +static int nr_cpus = 0; +static unsigned int page_size; +static unsigned int mmap_pages = 16; +static int output; +static char *output_name = "output.perf"; +static int group = 0; +static unsigned int realtime_prio = 0; + +const unsigned int default_count[] = { + 1000000, + 1000000, + 10000, + 10000, + 1000000, + 10000, +}; + +static char *hw_event_names[] = { + "CPU cycles", + "instructions", + "cache references", + "cache misses", + "branches", + "branch misses", + "bus cycles", +}; + +static char *sw_event_names[] = { + "cpu clock ticks", + "task clock ticks", + "pagefaults", + "context switches", + "CPU migrations", + "minor faults", + "major faults", +}; + +struct event_symbol { + __u64 event; + char *symbol; +}; + +static struct event_symbol event_symbols[] = { + {EID(PERF_TYPE_HARDWARE, PERF_COUNT_CPU_CYCLES), "cpu-cycles", }, + {EID(PERF_TYPE_HARDWARE, PERF_COUNT_CPU_CYCLES), "cycles", }, + {EID(PERF_TYPE_HARDWARE, PERF_COUNT_INSTRUCTIONS), "instructions", }, + {EID(PERF_TYPE_HARDWARE, PERF_COUNT_CACHE_REFERENCES), "cache-references", }, + {EID(PERF_TYPE_HARDWARE, PERF_COUNT_CACHE_MISSES), "cache-misses", }, + {EID(PERF_TYPE_HARDWARE, PERF_COUNT_BRANCH_INSTRUCTIONS), "branch-instructions", }, + {EID(PERF_TYPE_HARDWARE, PERF_COUNT_BRANCH_INSTRUCTIONS), "branches", }, + {EID(PERF_TYPE_HARDWARE, PERF_COUNT_BRANCH_MISSES), "branch-misses", }, + {EID(PERF_TYPE_HARDWARE, PERF_COUNT_BUS_CYCLES), "bus-cycles", }, + + {EID(PERF_TYPE_SOFTWARE, PERF_COUNT_CPU_CLOCK), "cpu-clock", }, + {EID(PERF_TYPE_SOFTWARE, PERF_COUNT_TASK_CLOCK), "task-clock", }, + {EID(PERF_TYPE_SOFTWARE, PERF_COUNT_PAGE_FAULTS), "page-faults", }, + {EID(PERF_TYPE_SOFTWARE, PERF_COUNT_PAGE_FAULTS), "faults", }, + {EID(PERF_TYPE_SOFTWARE, PERF_COUNT_PAGE_FAULTS_MIN), "minor-faults", }, + {EID(PERF_TYPE_SOFTWARE, PERF_COUNT_PAGE_FAULTS_MAJ), "major-faults", }, + {EID(PERF_TYPE_SOFTWARE, PERF_COUNT_CONTEXT_SWITCHES), "context-switches", }, + {EID(PERF_TYPE_SOFTWARE, PERF_COUNT_CONTEXT_SWITCHES), "cs", }, + {EID(PERF_TYPE_SOFTWARE, PERF_COUNT_CPU_MIGRATIONS), "cpu-migrations", }, + {EID(PERF_TYPE_SOFTWARE, PERF_COUNT_CPU_MIGRATIONS), "migrations", }, +}; + +/* + * Each event can have multiple symbolic names. + * Symbolic names are (almost) exactly matched. + */ +static __u64 match_event_symbols(char *str) +{ + __u64 config, id; + int type; + unsigned int i; + + if (sscanf(str, "r%llx", &config) == 1) + return config | PERF_COUNTER_RAW_MASK; + + if (sscanf(str, "%d:%llu", &type, &id) == 2) + return EID(type, id); + + for (i = 0; i < ARRAY_SIZE(event_symbols); i++) { + if (!strncmp(str, event_symbols[i].symbol, + strlen(event_symbols[i].symbol))) + return event_symbols[i].event; + } + + return ~0ULL; +} + +static int parse_events(char *str) +{ + __u64 config; + +again: + if (nr_counters == MAX_COUNTERS) + return -1; + + config = match_event_symbols(str); + if (config == ~0ULL) + return -1; + + event_id[nr_counters] = config; + nr_counters++; + + str = strstr(str, ","); + if (str) { + str++; + goto again; + } + + return 0; +} + +#define __PERF_COUNTER_FIELD(config, name) \ + ((config & PERF_COUNTER_##name##_MASK) >> PERF_COUNTER_##name##_SHIFT) + +#define PERF_COUNTER_RAW(config) __PERF_COUNTER_FIELD(config, RAW) +#define PERF_COUNTER_CONFIG(config) __PERF_COUNTER_FIELD(config, CONFIG) +#define PERF_COUNTER_TYPE(config) __PERF_COUNTER_FIELD(config, TYPE) +#define PERF_COUNTER_ID(config) __PERF_COUNTER_FIELD(config, EVENT) + +static void display_events_help(void) +{ + unsigned int i; + __u64 e; + + printf( + " -e EVENT --event=EVENT # symbolic-name abbreviations"); + + for (i = 0; i < ARRAY_SIZE(event_symbols); i++) { + int type, id; + + e = event_symbols[i].event; + type = PERF_COUNTER_TYPE(e); + id = PERF_COUNTER_ID(e); + + printf("\n %d:%d: %-20s", + type, id, event_symbols[i].symbol); + } + + printf("\n" + " rNNN: raw PMU events (eventsel+umask)\n\n"); +} + +static void display_help(void) +{ + printf( + "Usage: perf-record []\n" + "perf-record Options (up to %d event types can be specified at once):\n\n", + MAX_COUNTERS); + + display_events_help(); + + printf( + " -c CNT --count=CNT # event period to sample\n" + " -m pages --mmap_pages= # number of mmap data pages\n" + " -o file --output= # output file\n" + " -r prio --realtime= # use RT prio\n" + ); + + exit(0); +} + +static void process_options(int argc, char *argv[]) +{ + int error = 0, counter; + + for (;;) { + int option_index = 0; + /** Options for getopt */ + static struct option long_options[] = { + {"count", required_argument, NULL, 'c'}, + {"event", required_argument, NULL, 'e'}, + {"mmap_pages", required_argument, NULL, 'm'}, + {"output", required_argument, NULL, 'o'}, + {"realtime", required_argument, NULL, 'r'}, + {NULL, 0, NULL, 0 } + }; + int c = getopt_long(argc, argv, "+:c:e:m:o:r:", + long_options, &option_index); + if (c == -1) + break; + + switch (c) { + case 'c': default_interval = atoi(optarg); break; + case 'e': error = parse_events(optarg); break; + case 'm': mmap_pages = atoi(optarg); break; + case 'o': output_name = strdup(optarg); break; + case 'r': realtime_prio = atoi(optarg); break; + default: error = 1; break; + } + } + if (error) + display_help(); + + if (!nr_counters) { + nr_counters = 1; + event_id[0] = 0; + } + + for (counter = 0; counter < nr_counters; counter++) { + if (event_count[counter]) + continue; + + event_count[counter] = default_interval; + } +} + +struct mmap_data { + int counter; + void *base; + unsigned int mask; + unsigned int prev; +}; + +static unsigned int mmap_read_head(struct mmap_data *md) +{ + struct perf_counter_mmap_page *pc = md->base; + int head; + + head = pc->data_head; + rmb(); + + return head; +} + +static long events; +static struct timeval last_read, this_read; + +static void mmap_read(struct mmap_data *md) +{ + unsigned int head = mmap_read_head(md); + unsigned int old = md->prev; + unsigned char *data = md->base + page_size; + unsigned long size; + void *buf; + int diff; + + gettimeofday(&this_read, NULL); + + /* + * If we're further behind than half the buffer, there's a chance + * the writer will bite our tail and screw up the events under us. + * + * If we somehow ended up ahead of the head, we got messed up. + * + * In either case, truncate and restart at head. + */ + diff = head - old; + if (diff > md->mask / 2 || diff < 0) { + struct timeval iv; + unsigned long msecs; + + timersub(&this_read, &last_read, &iv); + msecs = iv.tv_sec*1000 + iv.tv_usec/1000; + + fprintf(stderr, "WARNING: failed to keep up with mmap data." + " Last read %lu msecs ago.\n", msecs); + + /* + * head points to a known good entry, start there. + */ + old = head; + } + + last_read = this_read; + + if (old != head) + events++; + + size = head - old; + + if ((old & md->mask) + size != (head & md->mask)) { + buf = &data[old & md->mask]; + size = md->mask + 1 - (old & md->mask); + old += size; + while (size) { + int ret = write(output, buf, size); + if (ret < 0) { + perror("failed to write"); + exit(-1); + } + size -= ret; + buf += ret; + } + } + + buf = &data[old & md->mask]; + size = head - old; + old += size; + while (size) { + int ret = write(output, buf, size); + if (ret < 0) { + perror("failed to write"); + exit(-1); + } + size -= ret; + buf += ret; + } + + md->prev = old; +} + +static volatile int done = 0; + +static void sigchld_handler(int sig) +{ + if (sig == SIGCHLD) + done = 1; +} + +int main(int argc, char *argv[]) +{ + struct pollfd event_array[MAX_NR_CPUS * MAX_COUNTERS]; + struct mmap_data mmap_array[MAX_NR_CPUS][MAX_COUNTERS]; + struct perf_counter_hw_event hw_event; + int i, counter, group_fd, nr_poll = 0; + pid_t pid; + int ret; + + page_size = sysconf(_SC_PAGE_SIZE); + + process_options(argc, argv); + + nr_cpus = sysconf(_SC_NPROCESSORS_ONLN); + assert(nr_cpus <= MAX_NR_CPUS); + assert(nr_cpus >= 0); + + output = open(output_name, O_CREAT|O_RDWR, S_IRWXU); + if (output < 0) { + perror("failed to create output file"); + exit(-1); + } + + argc -= optind; + argv += optind; + + for (i = 0; i < nr_cpus; i++) { + group_fd = -1; + for (counter = 0; counter < nr_counters; counter++) { + + memset(&hw_event, 0, sizeof(hw_event)); + hw_event.config = event_id[counter]; + hw_event.irq_period = event_count[counter]; + hw_event.record_type = PERF_RECORD_IP | PERF_RECORD_TID; + hw_event.nmi = 1; + hw_event.mmap = 1; + hw_event.comm = 1; + + fd[i][counter] = sys_perf_counter_open(&hw_event, -1, i, group_fd, 0); + if (fd[i][counter] < 0) { + int err = errno; + printf("kerneltop error: syscall returned with %d (%s)\n", + fd[i][counter], strerror(err)); + if (err == EPERM) + printf("Are you root?\n"); + exit(-1); + } + assert(fd[i][counter] >= 0); + fcntl(fd[i][counter], F_SETFL, O_NONBLOCK); + + /* + * First counter acts as the group leader: + */ + if (group && group_fd == -1) + group_fd = fd[i][counter]; + + event_array[nr_poll].fd = fd[i][counter]; + event_array[nr_poll].events = POLLIN; + nr_poll++; + + mmap_array[i][counter].counter = counter; + mmap_array[i][counter].prev = 0; + mmap_array[i][counter].mask = mmap_pages*page_size - 1; + mmap_array[i][counter].base = mmap(NULL, (mmap_pages+1)*page_size, + PROT_READ, MAP_SHARED, fd[i][counter], 0); + if (mmap_array[i][counter].base == MAP_FAILED) { + printf("kerneltop error: failed to mmap with %d (%s)\n", + errno, strerror(errno)); + exit(-1); + } + } + } + + signal(SIGCHLD, sigchld_handler); + + pid = fork(); + if (pid < 0) + perror("failed to fork"); + + if (!pid) { + if (execvp(argv[0], argv)) { + perror(argv[0]); + exit(-1); + } + } + + if (realtime_prio) { + struct sched_param param; + + param.sched_priority = realtime_prio; + if (sched_setscheduler(0, SCHED_FIFO, ¶m)) { + printf("Could not set realtime priority.\n"); + exit(-1); + } + } + + /* + * TODO: store the current /proc/$/maps information somewhere + */ + + while (!done) { + int hits = events; + + for (i = 0; i < nr_cpus; i++) { + for (counter = 0; counter < nr_counters; counter++) + mmap_read(&mmap_array[i][counter]); + } + + if (hits == events) + ret = poll(event_array, nr_poll, 100); + } + + return 0; +} diff --git a/Documentation/perf_counter/perf-report.cc b/Documentation/perf_counter/perf-report.cc new file mode 100644 index 00000000000..09da0ba482c --- /dev/null +++ b/Documentation/perf_counter/perf-report.cc @@ -0,0 +1,472 @@ +#define _GNU_SOURCE +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include "../../include/linux/perf_counter.h" + +#include +#include +#include + + +static char const *input_name = "output.perf"; +static int input; + +static unsigned long page_size; +static unsigned long mmap_window = 32; + +struct ip_event { + struct perf_event_header header; + __u64 ip; + __u32 pid, tid; +}; +struct mmap_event { + struct perf_event_header header; + __u32 pid, tid; + __u64 start; + __u64 len; + __u64 pgoff; + char filename[PATH_MAX]; +}; +struct comm_event { + struct perf_event_header header; + __u32 pid,tid; + char comm[16]; +}; + +typedef union event_union { + struct perf_event_header header; + struct ip_event ip; + struct mmap_event mmap; + struct comm_event comm; +} event_t; + +struct section { + uint64_t start; + uint64_t end; + + uint64_t offset; + + std::string name; + + section() { }; + + section(uint64_t stab) : end(stab) { }; + + section(uint64_t start, uint64_t size, uint64_t offset, std::string name) : + start(start), end(start + size), offset(offset), name(name) + { }; + + bool operator < (const struct section &s) const { + return end < s.end; + }; +}; + +typedef std::set sections_t; + +struct symbol { + uint64_t start; + uint64_t end; + + std::string name; + + symbol() { }; + + symbol(uint64_t ip) : start(ip) { } + + symbol(uint64_t start, uint64_t len, std::string name) : + start(start), end(start + len), name(name) + { }; + + bool operator < (const struct symbol &s) const { + return start < s.start; + }; +}; + +typedef std::set symbols_t; + +struct dso { + sections_t sections; + symbols_t syms; +}; + +static std::map dsos; + +static void load_dso_sections(std::string dso_name) +{ + struct dso &dso = dsos[dso_name]; + + std::string cmd = "readelf -DSW " + dso_name; + + FILE *file = popen(cmd.c_str(), "r"); + if (!file) { + perror("failed to open pipe"); + exit(-1); + } + + char *line = NULL; + size_t n = 0; + + while (!feof(file)) { + uint64_t addr, off, size; + char name[32]; + + if (getline(&line, &n, file) < 0) + break; + if (!line) + break; + + if (sscanf(line, " [%*2d] %16s %*14s %Lx %Lx %Lx", + name, &addr, &off, &size) == 4) { + + dso.sections.insert(section(addr, size, addr - off, name)); + } +#if 0 + /* + * for reading readelf symbols (-s), however these don't seem + * to include nearly everything, so use nm for that. + */ + if (sscanf(line, " %*4d %*3d: %Lx %5Lu %*7s %*6s %*7s %3d %s", + &start, &size, §ion, sym) == 4) { + + start -= dso.section_offsets[section]; + + dso.syms.insert(symbol(start, size, std::string(sym))); + } +#endif + } + pclose(file); +} + +static void load_dso_symbols(std::string dso_name, std::string args) +{ + struct dso &dso = dsos[dso_name]; + + std::string cmd = "nm -nSC " + args + " " + dso_name; + + FILE *file = popen(cmd.c_str(), "r"); + if (!file) { + perror("failed to open pipe"); + exit(-1); + } + + char *line = NULL; + size_t n = 0; + + while (!feof(file)) { + uint64_t start, size; + char c; + char sym[1024]; + + if (getline(&line, &n, file) < 0) + break; + if (!line) + break; + + + if (sscanf(line, "%Lx %Lx %c %s", &start, &size, &c, sym) == 4) { + sections_t::const_iterator si = + dso.sections.upper_bound(section(start)); + if (si == dso.sections.end()) { + printf("symbol in unknown section: %s\n", sym); + continue; + } + + start -= si->offset; + + dso.syms.insert(symbol(start, size, sym)); + } + } + pclose(file); +} + +static void load_dso(std::string dso_name) +{ + load_dso_sections(dso_name); + load_dso_symbols(dso_name, "-D"); /* dynamic symbols */ + load_dso_symbols(dso_name, ""); /* regular ones */ +} + +void load_kallsyms(void) +{ + struct dso &dso = dsos["[kernel]"]; + + FILE *file = fopen("/proc/kallsyms", "r"); + if (!file) { + perror("failed to open kallsyms"); + exit(-1); + } + + char *line; + size_t n; + + while (!feof(file)) { + uint64_t start; + char c; + char sym[1024]; + + if (getline(&line, &n, file) < 0) + break; + if (!line) + break; + + if (sscanf(line, "%Lx %c %s", &start, &c, sym) == 3) + dso.syms.insert(symbol(start, 0x1000000, std::string(sym))); + } + fclose(file); +} + +struct map { + uint64_t start; + uint64_t end; + uint64_t pgoff; + + std::string dso; + + map() { }; + + map(uint64_t ip) : end(ip) { } + + map(mmap_event *mmap) { + start = mmap->start; + end = mmap->start + mmap->len; + pgoff = mmap->pgoff; + + dso = std::string(mmap->filename); + + if (dsos.find(dso) == dsos.end()) + load_dso(dso); + }; + + bool operator < (const struct map &m) const { + return end < m.end; + }; +}; + +typedef std::set maps_t; + +static std::map maps; + +static std::map comms; + +static std::map hist; +static std::multimap rev_hist; + +static std::string resolve_comm(int pid) +{ + std::string comm = ""; + std::map::const_iterator ci = comms.find(pid); + if (ci != comms.end()) + comm = ci->second; + + return comm; +} + +static std::string resolve_user_symbol(int pid, uint64_t ip) +{ + std::string sym = ""; + + maps_t &m = maps[pid]; + maps_t::const_iterator mi = m.upper_bound(map(ip)); + if (mi == m.end()) + return sym; + + ip -= mi->start + mi->pgoff; + + symbols_t &s = dsos[mi->dso].syms; + symbols_t::const_iterator si = s.upper_bound(symbol(ip)); + + sym = mi->dso + ": "; + + if (si == s.begin()) + return sym; + si--; + + if (si->start <= ip && ip < si->end) + sym = mi->dso + ": " + si->name; +#if 0 + else if (si->start <= ip) + sym = mi->dso + ": ?" + si->name; +#endif + + return sym; +} + +static std::string resolve_kernel_symbol(uint64_t ip) +{ + std::string sym = ""; + + symbols_t &s = dsos["[kernel]"].syms; + symbols_t::const_iterator si = s.upper_bound(symbol(ip)); + + if (si == s.begin()) + return sym; + si--; + + if (si->start <= ip && ip < si->end) + sym = si->name; + + return sym; +} + +static void display_help(void) +{ + printf( + "Usage: perf-report []\n" + " -i file --input= # input file\n" + ); + + exit(0); +} + +static void process_options(int argc, char *argv[]) +{ + int error = 0; + + for (;;) { + int option_index = 0; + /** Options for getopt */ + static struct option long_options[] = { + {"input", required_argument, NULL, 'i'}, + {NULL, 0, NULL, 0 } + }; + int c = getopt_long(argc, argv, "+:i:", + long_options, &option_index); + if (c == -1) + break; + + switch (c) { + case 'i': input_name = strdup(optarg); break; + default: error = 1; break; + } + } + + if (error) + display_help(); +} + +int main(int argc, char *argv[]) +{ + unsigned long offset = 0; + unsigned long head = 0; + struct stat stat; + char *buf; + event_t *event; + int ret; + unsigned long total = 0; + + page_size = getpagesize(); + + process_options(argc, argv); + + input = open(input_name, O_RDONLY); + if (input < 0) { + perror("failed to open file"); + exit(-1); + } + + ret = fstat(input, &stat); + if (ret < 0) { + perror("failed to stat file"); + exit(-1); + } + + load_kallsyms(); + +remap: + buf = (char *)mmap(NULL, page_size * mmap_window, PROT_READ, + MAP_SHARED, input, offset); + if (buf == MAP_FAILED) { + perror("failed to mmap file"); + exit(-1); + } + +more: + event = (event_t *)(buf + head); + + if (head + event->header.size >= page_size * mmap_window) { + unsigned long shift = page_size * (head / page_size); + + munmap(buf, page_size * mmap_window); + offset += shift; + head -= shift; + goto remap; + } + head += event->header.size; + + if (event->header.misc & PERF_EVENT_MISC_OVERFLOW) { + std::string comm, sym, level; + char output[1024]; + + if (event->header.misc & PERF_EVENT_MISC_KERNEL) { + level = "[kernel]"; + sym = resolve_kernel_symbol(event->ip.ip); + } else if (event->header.misc & PERF_EVENT_MISC_USER) { + level = "[ user ]"; + sym = resolve_user_symbol(event->ip.pid, event->ip.ip); + } else { + level = "[ hv ]"; + } + comm = resolve_comm(event->ip.pid); + + snprintf(output, sizeof(output), "%16s %s %s", + comm.c_str(), level.c_str(), sym.c_str()); + hist[output]++; + + total++; + + } else switch (event->header.type) { + case PERF_EVENT_MMAP: + maps[event->mmap.pid].insert(map(&event->mmap)); + break; + + case PERF_EVENT_COMM: + comms[event->comm.pid] = std::string(event->comm.comm); + break; + } + + if (offset + head < stat.st_size) + goto more; + + close(input); + + std::map::iterator hi = hist.begin(); + + while (hi != hist.end()) { + rev_hist.insert(std::pair(hi->second, hi->first)); + hist.erase(hi++); + } + + std::multimap::const_iterator ri = rev_hist.begin(); + + while (ri != rev_hist.end()) { + printf(" %5.2f %s\n", (100.0 * ri->first)/total, ri->second.c_str()); + ri++; + } + + return 0; +} + -- cgit v1.2.3 From 4d855457d84b819fefcd1cd1b0a2a0a0ec475c07 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Wed, 8 Apr 2009 15:01:32 +0200 Subject: perf_counter: move PERF_RECORD_TIME Move PERF_RECORD_TIME so that all the fixed length items come before the variable length ones. Signed-off-by: Peter Zijlstra Cc: Paul Mackerras Cc: Corey Ashford LKML-Reference: <20090408130409.307926436@chello.nl> Signed-off-by: Ingo Molnar --- include/linux/perf_counter.h | 9 ++++----- kernel/perf_counter.c | 26 +++++++++++++------------- 2 files changed, 17 insertions(+), 18 deletions(-) diff --git a/include/linux/perf_counter.h b/include/linux/perf_counter.h index a70a55f2759..8bd1be58c93 100644 --- a/include/linux/perf_counter.h +++ b/include/linux/perf_counter.h @@ -100,9 +100,9 @@ enum sw_event_ids { enum perf_counter_record_format { PERF_RECORD_IP = 1U << 0, PERF_RECORD_TID = 1U << 1, - PERF_RECORD_GROUP = 1U << 2, - PERF_RECORD_CALLCHAIN = 1U << 3, - PERF_RECORD_TIME = 1U << 4, + PERF_RECORD_TIME = 1U << 2, + PERF_RECORD_GROUP = 1U << 3, + PERF_RECORD_CALLCHAIN = 1U << 4, }; /* @@ -250,6 +250,7 @@ enum perf_event_type { * * { u64 ip; } && PERF_RECORD_IP * { u32 pid, tid; } && PERF_RECORD_TID + * { u64 time; } && PERF_RECORD_TIME * * { u64 nr; * { u64 event, val; } cnt[nr]; } && PERF_RECORD_GROUP @@ -259,8 +260,6 @@ enum perf_event_type { * kernel, * user; * u64 ips[nr]; } && PERF_RECORD_CALLCHAIN - * - * { u64 time; } && PERF_RECORD_TIME * }; */ }; diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c index 2d4aebb2982..4dc8600d282 100644 --- a/kernel/perf_counter.c +++ b/kernel/perf_counter.c @@ -1850,6 +1850,16 @@ static void perf_counter_output(struct perf_counter *counter, header.size += sizeof(tid_entry); } + if (record_type & PERF_RECORD_TIME) { + /* + * Maybe do better on x86 and provide cpu_clock_nmi() + */ + time = sched_clock(); + + header.type |= PERF_RECORD_TIME; + header.size += sizeof(u64); + } + if (record_type & PERF_RECORD_GROUP) { header.type |= PERF_RECORD_GROUP; header.size += sizeof(u64) + @@ -1867,16 +1877,6 @@ static void perf_counter_output(struct perf_counter *counter, } } - if (record_type & PERF_RECORD_TIME) { - /* - * Maybe do better on x86 and provide cpu_clock_nmi() - */ - time = sched_clock(); - - header.type |= PERF_RECORD_TIME; - header.size += sizeof(u64); - } - ret = perf_output_begin(&handle, counter, header.size, nmi, 1); if (ret) return; @@ -1889,6 +1889,9 @@ static void perf_counter_output(struct perf_counter *counter, if (record_type & PERF_RECORD_TID) perf_output_put(&handle, tid_entry); + if (record_type & PERF_RECORD_TIME) + perf_output_put(&handle, time); + if (record_type & PERF_RECORD_GROUP) { struct perf_counter *leader, *sub; u64 nr = counter->nr_siblings; @@ -1910,9 +1913,6 @@ static void perf_counter_output(struct perf_counter *counter, if (callchain) perf_output_copy(&handle, callchain, callchain_size); - if (record_type & PERF_RECORD_TIME) - perf_output_put(&handle, time); - perf_output_end(&handle); } -- cgit v1.2.3 From 78f13e9525ba777da25c4ddab89f28e9366a8b7c Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Wed, 8 Apr 2009 15:01:33 +0200 Subject: perf_counter: allow for data addresses to be recorded Paul suggested we allow for data addresses to be recorded along with the traditional IPs as power can provide these. For now, only the software pagefault events provide data addresses, but in the future power might as well for some events. x86 doesn't seem capable of providing this atm. Signed-off-by: Peter Zijlstra Cc: Paul Mackerras Cc: Corey Ashford LKML-Reference: <20090408130409.394816925@chello.nl> Signed-off-by: Ingo Molnar --- arch/powerpc/kernel/perf_counter.c | 2 +- arch/powerpc/mm/fault.c | 8 ++++--- arch/x86/kernel/cpu/perf_counter.c | 2 +- arch/x86/mm/fault.c | 8 ++++--- include/linux/perf_counter.h | 14 +++++++----- kernel/perf_counter.c | 46 ++++++++++++++++++++++++-------------- 6 files changed, 49 insertions(+), 31 deletions(-) diff --git a/arch/powerpc/kernel/perf_counter.c b/arch/powerpc/kernel/perf_counter.c index 0697ade84dd..c9d019f1907 100644 --- a/arch/powerpc/kernel/perf_counter.c +++ b/arch/powerpc/kernel/perf_counter.c @@ -749,7 +749,7 @@ static void record_and_restart(struct perf_counter *counter, long val, * Finally record data if requested. */ if (record) - perf_counter_overflow(counter, 1, regs); + perf_counter_overflow(counter, 1, regs, 0); } /* diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c index 17bbf6f91fb..ac0e112031b 100644 --- a/arch/powerpc/mm/fault.c +++ b/arch/powerpc/mm/fault.c @@ -171,7 +171,7 @@ int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address, die("Weird page fault", regs, SIGSEGV); } - perf_swcounter_event(PERF_COUNT_PAGE_FAULTS, 1, 0, regs); + perf_swcounter_event(PERF_COUNT_PAGE_FAULTS, 1, 0, regs, address); /* When running in the kernel we expect faults to occur only to * addresses in user space. All other faults represent errors in the @@ -312,7 +312,8 @@ good_area: } if (ret & VM_FAULT_MAJOR) { current->maj_flt++; - perf_swcounter_event(PERF_COUNT_PAGE_FAULTS_MAJ, 1, 0, regs); + perf_swcounter_event(PERF_COUNT_PAGE_FAULTS_MAJ, 1, 0, + regs, address); #ifdef CONFIG_PPC_SMLPAR if (firmware_has_feature(FW_FEATURE_CMO)) { preempt_disable(); @@ -322,7 +323,8 @@ good_area: #endif } else { current->min_flt++; - perf_swcounter_event(PERF_COUNT_PAGE_FAULTS_MIN, 1, 0, regs); + perf_swcounter_event(PERF_COUNT_PAGE_FAULTS_MIN, 1, 0, + regs, address); } up_read(&mm->mmap_sem); return 0; diff --git a/arch/x86/kernel/cpu/perf_counter.c b/arch/x86/kernel/cpu/perf_counter.c index 1116a41bc7b..0fcbaab83f9 100644 --- a/arch/x86/kernel/cpu/perf_counter.c +++ b/arch/x86/kernel/cpu/perf_counter.c @@ -800,7 +800,7 @@ again: continue; perf_save_and_restart(counter); - if (perf_counter_overflow(counter, nmi, regs)) + if (perf_counter_overflow(counter, nmi, regs, 0)) __pmc_generic_disable(counter, &counter->hw, bit); } diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c index f2d3324d921..6f9df2babe4 100644 --- a/arch/x86/mm/fault.c +++ b/arch/x86/mm/fault.c @@ -1045,7 +1045,7 @@ do_page_fault(struct pt_regs *regs, unsigned long error_code) if (unlikely(error_code & PF_RSVD)) pgtable_bad(regs, error_code, address); - perf_swcounter_event(PERF_COUNT_PAGE_FAULTS, 1, 0, regs); + perf_swcounter_event(PERF_COUNT_PAGE_FAULTS, 1, 0, regs, address); /* * If we're in an interrupt, have no user context or are running @@ -1142,10 +1142,12 @@ good_area: if (fault & VM_FAULT_MAJOR) { tsk->maj_flt++; - perf_swcounter_event(PERF_COUNT_PAGE_FAULTS_MAJ, 1, 0, regs); + perf_swcounter_event(PERF_COUNT_PAGE_FAULTS_MAJ, 1, 0, + regs, address); } else { tsk->min_flt++; - perf_swcounter_event(PERF_COUNT_PAGE_FAULTS_MIN, 1, 0, regs); + perf_swcounter_event(PERF_COUNT_PAGE_FAULTS_MIN, 1, 0, + regs, address); } check_v8086_mode(regs, address, tsk); diff --git a/include/linux/perf_counter.h b/include/linux/perf_counter.h index 8bd1be58c93..c22363a4f74 100644 --- a/include/linux/perf_counter.h +++ b/include/linux/perf_counter.h @@ -101,8 +101,9 @@ enum perf_counter_record_format { PERF_RECORD_IP = 1U << 0, PERF_RECORD_TID = 1U << 1, PERF_RECORD_TIME = 1U << 2, - PERF_RECORD_GROUP = 1U << 3, - PERF_RECORD_CALLCHAIN = 1U << 4, + PERF_RECORD_ADDR = 1U << 3, + PERF_RECORD_GROUP = 1U << 4, + PERF_RECORD_CALLCHAIN = 1U << 5, }; /* @@ -251,6 +252,7 @@ enum perf_event_type { * { u64 ip; } && PERF_RECORD_IP * { u32 pid, tid; } && PERF_RECORD_TID * { u64 time; } && PERF_RECORD_TIME + * { u64 addr; } && PERF_RECORD_ADDR * * { u64 nr; * { u64 event, val; } cnt[nr]; } && PERF_RECORD_GROUP @@ -537,7 +539,7 @@ extern int hw_perf_group_sched_in(struct perf_counter *group_leader, extern void perf_counter_update_userpage(struct perf_counter *counter); extern int perf_counter_overflow(struct perf_counter *counter, - int nmi, struct pt_regs *regs); + int nmi, struct pt_regs *regs, u64 addr); /* * Return 1 for a software counter, 0 for a hardware counter */ @@ -547,7 +549,7 @@ static inline int is_software_counter(struct perf_counter *counter) perf_event_type(&counter->hw_event) != PERF_TYPE_HARDWARE; } -extern void perf_swcounter_event(u32, u64, int, struct pt_regs *); +extern void perf_swcounter_event(u32, u64, int, struct pt_regs *, u64); extern void perf_counter_mmap(unsigned long addr, unsigned long len, unsigned long pgoff, struct file *file); @@ -584,8 +586,8 @@ static inline int perf_counter_task_disable(void) { return -EINVAL; } static inline int perf_counter_task_enable(void) { return -EINVAL; } static inline void -perf_swcounter_event(u32 event, u64 nr, int nmi, struct pt_regs *regs) { } - +perf_swcounter_event(u32 event, u64 nr, int nmi, + struct pt_regs *regs, u64 addr) { } static inline void perf_counter_mmap(unsigned long addr, unsigned long len, diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c index 4dc8600d282..321c57e3556 100644 --- a/kernel/perf_counter.c +++ b/kernel/perf_counter.c @@ -800,7 +800,7 @@ void perf_counter_task_sched_out(struct task_struct *task, int cpu) update_context_time(ctx); regs = task_pt_regs(task); - perf_swcounter_event(PERF_COUNT_CONTEXT_SWITCHES, 1, 1, regs); + perf_swcounter_event(PERF_COUNT_CONTEXT_SWITCHES, 1, 1, regs, 0); __perf_counter_sched_out(ctx, cpuctx); cpuctx->task_ctx = NULL; @@ -1810,7 +1810,7 @@ static void perf_output_end(struct perf_output_handle *handle) } static void perf_counter_output(struct perf_counter *counter, - int nmi, struct pt_regs *regs) + int nmi, struct pt_regs *regs, u64 addr) { int ret; u64 record_type = counter->hw_event.record_type; @@ -1860,6 +1860,11 @@ static void perf_counter_output(struct perf_counter *counter, header.size += sizeof(u64); } + if (record_type & PERF_RECORD_ADDR) { + header.type |= PERF_RECORD_ADDR; + header.size += sizeof(u64); + } + if (record_type & PERF_RECORD_GROUP) { header.type |= PERF_RECORD_GROUP; header.size += sizeof(u64) + @@ -1892,6 +1897,9 @@ static void perf_counter_output(struct perf_counter *counter, if (record_type & PERF_RECORD_TIME) perf_output_put(&handle, time); + if (record_type & PERF_RECORD_ADDR) + perf_output_put(&handle, addr); + if (record_type & PERF_RECORD_GROUP) { struct perf_counter *leader, *sub; u64 nr = counter->nr_siblings; @@ -2158,7 +2166,7 @@ void perf_counter_munmap(unsigned long addr, unsigned long len, */ int perf_counter_overflow(struct perf_counter *counter, - int nmi, struct pt_regs *regs) + int nmi, struct pt_regs *regs, u64 addr) { int events = atomic_read(&counter->event_limit); int ret = 0; @@ -2175,7 +2183,7 @@ int perf_counter_overflow(struct perf_counter *counter, perf_counter_disable(counter); } - perf_counter_output(counter, nmi, regs); + perf_counter_output(counter, nmi, regs, addr); return ret; } @@ -2240,7 +2248,7 @@ static enum hrtimer_restart perf_swcounter_hrtimer(struct hrtimer *hrtimer) regs = task_pt_regs(current); if (regs) { - if (perf_counter_overflow(counter, 0, regs)) + if (perf_counter_overflow(counter, 0, regs, 0)) ret = HRTIMER_NORESTART; } @@ -2250,11 +2258,11 @@ static enum hrtimer_restart perf_swcounter_hrtimer(struct hrtimer *hrtimer) } static void perf_swcounter_overflow(struct perf_counter *counter, - int nmi, struct pt_regs *regs) + int nmi, struct pt_regs *regs, u64 addr) { perf_swcounter_update(counter); perf_swcounter_set_period(counter); - if (perf_counter_overflow(counter, nmi, regs)) + if (perf_counter_overflow(counter, nmi, regs, addr)) /* soft-disable the counter */ ; @@ -2286,16 +2294,17 @@ static int perf_swcounter_match(struct perf_counter *counter, } static void perf_swcounter_add(struct perf_counter *counter, u64 nr, - int nmi, struct pt_regs *regs) + int nmi, struct pt_regs *regs, u64 addr) { int neg = atomic64_add_negative(nr, &counter->hw.count); if (counter->hw.irq_period && !neg) - perf_swcounter_overflow(counter, nmi, regs); + perf_swcounter_overflow(counter, nmi, regs, addr); } static void perf_swcounter_ctx_event(struct perf_counter_context *ctx, enum perf_event_types type, u32 event, - u64 nr, int nmi, struct pt_regs *regs) + u64 nr, int nmi, struct pt_regs *regs, + u64 addr) { struct perf_counter *counter; @@ -2305,7 +2314,7 @@ static void perf_swcounter_ctx_event(struct perf_counter_context *ctx, rcu_read_lock(); list_for_each_entry_rcu(counter, &ctx->event_list, event_entry) { if (perf_swcounter_match(counter, type, event, regs)) - perf_swcounter_add(counter, nr, nmi, regs); + perf_swcounter_add(counter, nr, nmi, regs, addr); } rcu_read_unlock(); } @@ -2325,7 +2334,8 @@ static int *perf_swcounter_recursion_context(struct perf_cpu_context *cpuctx) } static void __perf_swcounter_event(enum perf_event_types type, u32 event, - u64 nr, int nmi, struct pt_regs *regs) + u64 nr, int nmi, struct pt_regs *regs, + u64 addr) { struct perf_cpu_context *cpuctx = &get_cpu_var(perf_cpu_context); int *recursion = perf_swcounter_recursion_context(cpuctx); @@ -2336,10 +2346,11 @@ static void __perf_swcounter_event(enum perf_event_types type, u32 event, (*recursion)++; barrier(); - perf_swcounter_ctx_event(&cpuctx->ctx, type, event, nr, nmi, regs); + perf_swcounter_ctx_event(&cpuctx->ctx, type, event, + nr, nmi, regs, addr); if (cpuctx->task_ctx) { perf_swcounter_ctx_event(cpuctx->task_ctx, type, event, - nr, nmi, regs); + nr, nmi, regs, addr); } barrier(); @@ -2349,9 +2360,10 @@ out: put_cpu_var(perf_cpu_context); } -void perf_swcounter_event(u32 event, u64 nr, int nmi, struct pt_regs *regs) +void +perf_swcounter_event(u32 event, u64 nr, int nmi, struct pt_regs *regs, u64 addr) { - __perf_swcounter_event(PERF_TYPE_SOFTWARE, event, nr, nmi, regs); + __perf_swcounter_event(PERF_TYPE_SOFTWARE, event, nr, nmi, regs, addr); } static void perf_swcounter_read(struct perf_counter *counter) @@ -2548,7 +2560,7 @@ void perf_tpcounter_event(int event_id) if (!regs) regs = task_pt_regs(current); - __perf_swcounter_event(PERF_TYPE_TRACEPOINT, event_id, 1, 1, regs); + __perf_swcounter_event(PERF_TYPE_TRACEPOINT, event_id, 1, 1, regs, 0); } extern int ftrace_profile_enable(int); -- cgit v1.2.3 From 6c0b324435ff49fb3c68fe808a93853d81c7fb97 Mon Sep 17 00:00:00 2001 From: Paul Mackerras Date: Thu, 9 Apr 2009 09:27:37 +1000 Subject: perf_counter: add MAINTAINERS entry This adds an entry in MAINTAINERS for the perf_counter subsystem. Signed-off-by: Paul Mackerras Acked-by: Peter Zijlstra LKML-Reference: <18909.13033.345975.434902@cargo.ozlabs.ibm.com> Signed-off-by: Ingo Molnar --- MAINTAINERS | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/MAINTAINERS b/MAINTAINERS index c3b215970f7..16fb45adb53 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -3483,6 +3483,16 @@ M: balbir@linux.vnet.ibm.com L: linux-kernel@vger.kernel.org S: Maintained +PERFORMANCE COUNTER SUBSYSTEM +P: Peter Zijlstra +M: a.p.zijlstra@chello.nl +P: Paul Mackerras +M: paulus@samba.org +P: Ingo Molnar +M: mingo@elte.hu +L: linux-kernel@vger.kernel.org +S: Supported + PERSONALITY HANDLING P: Christoph Hellwig M: hch@infradead.org -- cgit v1.2.3 From ca8f2d7f019a8547f39ddb9ed0144932f12807f2 Mon Sep 17 00:00:00 2001 From: Paul Mackerras Date: Thu, 9 Apr 2009 14:42:56 +1000 Subject: perf_counter: powerpc: add nmi_enter/nmi_exit calls Impact: fix potential deadlocks on powerpc Now that the core is using in_nmi() (added in e30e08f6, "perf_counter: fix NMI race in task clock"), we need the powerpc perf_counter_interrupt to call nmi_enter() and nmi_exit() in those cases where the interrupt happens when interrupts are soft-disabled. If interrupts were soft-enabled, we can treat it as a regular interrupt and do irq_enter/irq_exit around the whole routine. This lets us get rid of the test_perf_counter_pending() call at the end of perf_counter_interrupt, thus simplifying things a little. Signed-off-by: Paul Mackerras Cc: Peter Zijlstra LKML-Reference: <18909.31952.873098.336615@cargo.ozlabs.ibm.com> Signed-off-by: Ingo Molnar --- arch/powerpc/kernel/perf_counter.c | 31 +++++++++++++++++-------------- 1 file changed, 17 insertions(+), 14 deletions(-) diff --git a/arch/powerpc/kernel/perf_counter.c b/arch/powerpc/kernel/perf_counter.c index c9d019f1907..bd76d0fa2c3 100644 --- a/arch/powerpc/kernel/perf_counter.c +++ b/arch/powerpc/kernel/perf_counter.c @@ -714,7 +714,7 @@ hw_perf_counter_init(struct perf_counter *counter) * here so there is no possibility of being interrupted. */ static void record_and_restart(struct perf_counter *counter, long val, - struct pt_regs *regs) + struct pt_regs *regs, int nmi) { s64 prev, delta, left; int record = 0; @@ -749,7 +749,7 @@ static void record_and_restart(struct perf_counter *counter, long val, * Finally record data if requested. */ if (record) - perf_counter_overflow(counter, 1, regs, 0); + perf_counter_overflow(counter, nmi, regs, 0); } /* @@ -762,6 +762,17 @@ static void perf_counter_interrupt(struct pt_regs *regs) struct perf_counter *counter; long val; int found = 0; + int nmi; + + /* + * If interrupts were soft-disabled when this PMU interrupt + * occurred, treat it as an NMI. + */ + nmi = !regs->softe; + if (nmi) + nmi_enter(); + else + irq_enter(); for (i = 0; i < cpuhw->n_counters; ++i) { counter = cpuhw->counter[i]; @@ -769,7 +780,7 @@ static void perf_counter_interrupt(struct pt_regs *regs) if ((int)val < 0) { /* counter has overflowed */ found = 1; - record_and_restart(counter, val, regs); + record_and_restart(counter, val, regs, nmi); } } @@ -796,18 +807,10 @@ static void perf_counter_interrupt(struct pt_regs *regs) */ mtspr(SPRN_MMCR0, cpuhw->mmcr[0]); - /* - * If we need a wakeup, check whether interrupts were soft-enabled - * when we took the interrupt. If they were, we can wake stuff up - * immediately; otherwise we'll have do the wakeup when interrupts - * get soft-enabled. - */ - if (test_perf_counter_pending() && regs->softe) { - irq_enter(); - clear_perf_counter_pending(); - perf_counter_do_pending(); + if (nmi) + nmi_exit(); + else irq_exit(); - } } void hw_perf_counter_setup(int cpu) -- cgit v1.2.3 From 888fcee066a2f4abd0d0bc9418c0535f9b01e6e5 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Thu, 9 Apr 2009 09:48:22 +0200 Subject: perf_counter: fix off task->comm by one strlen() does not include the \0. Signed-off-by: Ingo Molnar --- kernel/perf_counter.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c index 321c57e3556..b07195bbd22 100644 --- a/kernel/perf_counter.c +++ b/kernel/perf_counter.c @@ -1989,7 +1989,7 @@ static void perf_counter_comm_event(struct perf_comm_event *comm_event) unsigned int size; char *comm = comm_event->task->comm; - size = ALIGN(strlen(comm), sizeof(u64)); + size = ALIGN(strlen(comm)+1, sizeof(u64)); comm_event->comm = comm; comm_event->comm_size = size; @@ -2109,7 +2109,7 @@ static void perf_counter_mmap_event(struct perf_mmap_event *mmap_event) } got_name: - size = ALIGN(strlen(name), sizeof(u64)); + size = ALIGN(strlen(name)+1, sizeof(u64)); mmap_event->file_name = name; mmap_event->file_size = size; -- cgit v1.2.3 From b3828ebb3901adfe989d8d4157ed28247aeec132 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Thu, 9 Apr 2009 09:50:04 +0200 Subject: perf_counter tools: include PID in perf-report output, tweak user/kernel printut It's handier than an entry. Also replace the kernel/user column with a more compact version: 0.52 cc1 [k] page_fault 0.57 :0 [k] _spin_lock 0.59 :7506 [.] 0.69 as [.] /usr/bin/as: 0.76 cc1 [.] /lib64/libc-2.8.so: _int_free 0.92 cc1 [k] clear_page_c 1.00 :7465 [.] 1.43 cc1 [.] /lib64/libc-2.8.so: memset 1.86 cc1 [.] /lib64/libc-2.8.so: _int_malloc 70.33 cc1 [.] /usr/libexec/gcc/x86_64-redhat-linux/4.3.2/cc1: Signed-off-by: Ingo Molnar --- Documentation/perf_counter/perf-report.cc | 17 ++++++++++++----- 1 file changed, 12 insertions(+), 5 deletions(-) diff --git a/Documentation/perf_counter/perf-report.cc b/Documentation/perf_counter/perf-report.cc index 09da0ba482c..1727317352b 100644 --- a/Documentation/perf_counter/perf-report.cc +++ b/Documentation/perf_counter/perf-report.cc @@ -277,10 +277,17 @@ static std::multimap rev_hist; static std::string resolve_comm(int pid) { - std::string comm = ""; + std::string comm; + std::map::const_iterator ci = comms.find(pid); - if (ci != comms.end()) + if (ci != comms.end()) { comm = ci->second; + } else { + char pid_str[30]; + + sprintf(pid_str, ":%d", pid); + comm = pid_str; + } return comm; } @@ -422,13 +429,13 @@ more: char output[1024]; if (event->header.misc & PERF_EVENT_MISC_KERNEL) { - level = "[kernel]"; + level = " [k] "; sym = resolve_kernel_symbol(event->ip.ip); } else if (event->header.misc & PERF_EVENT_MISC_USER) { - level = "[ user ]"; + level = " [.] "; sym = resolve_user_symbol(event->ip.pid, event->ip.ip); } else { - level = "[ hv ]"; + level = " [H] "; } comm = resolve_comm(event->ip.pid); -- cgit v1.2.3 From 9ee318a7825929bc3734110b83ae8e20e53d9de3 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Thu, 9 Apr 2009 10:53:44 +0200 Subject: perf_counter: optimize mmap/comm tracking Impact: performance optimization The mmap/comm tracking code does quite a lot of work before it discovers there's no interest in it, avoid that by keeping a counter. Signed-off-by: Peter Zijlstra Cc: Paul Mackerras Cc: Corey Ashford LKML-Reference: <20090409085524.427173196@chello.nl> Signed-off-by: Ingo Molnar --- kernel/perf_counter.c | 39 ++++++++++++++++++++++++++++++++++++--- 1 file changed, 36 insertions(+), 3 deletions(-) diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c index b07195bbd22..76376ecb23b 100644 --- a/kernel/perf_counter.c +++ b/kernel/perf_counter.c @@ -38,6 +38,10 @@ int perf_max_counters __read_mostly = 1; static int perf_reserved_percpu __read_mostly; static int perf_overcommit __read_mostly = 1; +static atomic_t nr_mmap_tracking __read_mostly; +static atomic_t nr_munmap_tracking __read_mostly; +static atomic_t nr_comm_tracking __read_mostly; + /* * Mutex for (sysadmin-configurable) counter reservations: */ @@ -1186,6 +1190,13 @@ static void free_counter(struct perf_counter *counter) { perf_pending_sync(counter); + if (counter->hw_event.mmap) + atomic_dec(&nr_mmap_tracking); + if (counter->hw_event.munmap) + atomic_dec(&nr_munmap_tracking); + if (counter->hw_event.comm) + atomic_dec(&nr_comm_tracking); + if (counter->destroy) counter->destroy(counter); @@ -2005,7 +2016,12 @@ static void perf_counter_comm_event(struct perf_comm_event *comm_event) void perf_counter_comm(struct task_struct *task) { - struct perf_comm_event comm_event = { + struct perf_comm_event comm_event; + + if (!atomic_read(&nr_comm_tracking)) + return; + + comm_event = (struct perf_comm_event){ .task = task, .event = { .header = { .type = PERF_EVENT_COMM, }, @@ -2128,7 +2144,12 @@ got_name: void perf_counter_mmap(unsigned long addr, unsigned long len, unsigned long pgoff, struct file *file) { - struct perf_mmap_event mmap_event = { + struct perf_mmap_event mmap_event; + + if (!atomic_read(&nr_mmap_tracking)) + return; + + mmap_event = (struct perf_mmap_event){ .file = file, .event = { .header = { .type = PERF_EVENT_MMAP, }, @@ -2146,7 +2167,12 @@ void perf_counter_mmap(unsigned long addr, unsigned long len, void perf_counter_munmap(unsigned long addr, unsigned long len, unsigned long pgoff, struct file *file) { - struct perf_mmap_event mmap_event = { + struct perf_mmap_event mmap_event; + + if (!atomic_read(&nr_munmap_tracking)) + return; + + mmap_event = (struct perf_mmap_event){ .file = file, .event = { .header = { .type = PERF_EVENT_MUNMAP, }, @@ -2725,6 +2751,13 @@ done: counter->hw_ops = hw_ops; + if (counter->hw_event.mmap) + atomic_inc(&nr_mmap_tracking); + if (counter->hw_event.munmap) + atomic_inc(&nr_munmap_tracking); + if (counter->hw_event.comm) + atomic_inc(&nr_comm_tracking); + return counter; } -- cgit v1.2.3 From 1ccd15497869f3ed83b5225d410df53a96e52757 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Thu, 9 Apr 2009 10:53:45 +0200 Subject: perf_counter: sysctl for system wide perf counters Impact: add sysctl for paranoid/relaxed perfcounters policy Allow the use of system wide perf counters to everybody, but provide a sysctl to disable it for the paranoid security minded. Signed-off-by: Peter Zijlstra Cc: Paul Mackerras Cc: Corey Ashford LKML-Reference: <20090409085524.514046352@chello.nl> Signed-off-by: Ingo Molnar --- include/linux/perf_counter.h | 2 ++ kernel/perf_counter.c | 4 +++- kernel/sysctl.c | 11 +++++++++++ 3 files changed, 16 insertions(+), 1 deletion(-) diff --git a/include/linux/perf_counter.h b/include/linux/perf_counter.h index c22363a4f74..98143288530 100644 --- a/include/linux/perf_counter.h +++ b/include/linux/perf_counter.h @@ -568,6 +568,8 @@ struct perf_callchain_entry { extern struct perf_callchain_entry *perf_callchain(struct pt_regs *regs); +extern int sysctl_perf_counter_priv; + #else static inline void perf_counter_task_sched_in(struct task_struct *task, int cpu) { } diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c index 76376ecb23b..7efb7ebaaae 100644 --- a/kernel/perf_counter.c +++ b/kernel/perf_counter.c @@ -42,6 +42,8 @@ static atomic_t nr_mmap_tracking __read_mostly; static atomic_t nr_munmap_tracking __read_mostly; static atomic_t nr_comm_tracking __read_mostly; +int sysctl_perf_counter_priv __read_mostly; /* do we need to be privileged */ + /* * Mutex for (sysadmin-configurable) counter reservations: */ @@ -1132,7 +1134,7 @@ static struct perf_counter_context *find_get_context(pid_t pid, int cpu) */ if (cpu != -1) { /* Must be root to operate on a CPU counter: */ - if (!capable(CAP_SYS_ADMIN)) + if (sysctl_perf_counter_priv && !capable(CAP_SYS_ADMIN)) return ERR_PTR(-EACCES); if (cpu < 0 || cpu > num_possible_cpus()) diff --git a/kernel/sysctl.c b/kernel/sysctl.c index 4286b62b34a..8ba457838d9 100644 --- a/kernel/sysctl.c +++ b/kernel/sysctl.c @@ -49,6 +49,7 @@ #include #include #include +#include #include #include @@ -920,6 +921,16 @@ static struct ctl_table kern_table[] = { .child = slow_work_sysctls, }, #endif +#ifdef CONFIG_PERF_COUNTERS + { + .ctl_name = CTL_UNNUMBERED, + .procname = "perf_counter_privileged", + .data = &sysctl_perf_counter_priv, + .maxlen = sizeof(sysctl_perf_counter_priv), + .mode = 0644, + .proc_handler = &proc_dointvec, + }, +#endif /* * NOTE: do not add new entries to this table unless you have read * Documentation/sysctl/ctl_unnumbered.txt -- cgit v1.2.3 From d3d21c412d8525eb2e208d990ab5eee5fb0fe03d Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Thu, 9 Apr 2009 10:53:46 +0200 Subject: perf_counter: log full path names Impact: fix perf-report output for /home mounted binaries, etc. dentry_path() only provide path-names up to the mount root, which is unsuited for out purpose, use d_path() instead. Signed-off-by: Peter Zijlstra Cc: Paul Mackerras Cc: Corey Ashford LKML-Reference: <20090409085524.601794134@chello.nl> Signed-off-by: Ingo Molnar --- kernel/perf_counter.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c index 7efb7ebaaae..7f9521c3c01 100644 --- a/kernel/perf_counter.c +++ b/kernel/perf_counter.c @@ -2116,7 +2116,7 @@ static void perf_counter_mmap_event(struct perf_mmap_event *mmap_event) name = strncpy(tmp, "//enomem", sizeof(tmp)); goto got_name; } - name = dentry_path(file->f_dentry, buf, PATH_MAX); + name = d_path(&file->f_path, buf, PATH_MAX); if (IS_ERR(name)) { name = strncpy(tmp, "//toolong", sizeof(tmp)); goto got_name; -- cgit v1.2.3 From 0f3fd87ce43727d6b8573191ce89e874533b1429 Mon Sep 17 00:00:00 2001 From: Luis Henriques Date: Mon, 13 Apr 2009 20:24:50 +0100 Subject: perf_counter: fix alignment in /proc/interrupts Trivial fix on columns alignment in /proc/interrupts file. Signed-off-by: Luis Henriques Cc: Peter Zijlstra LKML-Reference: <20090413192449.GA3920@hades.domain.com> Signed-off-by: Ingo Molnar --- arch/x86/kernel/irq.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c index dccaaa85578..849cfabb1fd 100644 --- a/arch/x86/kernel/irq.c +++ b/arch/x86/kernel/irq.c @@ -67,7 +67,7 @@ static int show_other_interrupts(struct seq_file *p, int prec) for_each_online_cpu(j) seq_printf(p, "%10u ", irq_stats(j)->apic_perf_irqs); seq_printf(p, " Performance counter interrupts\n"); - seq_printf(p, "PND: "); + seq_printf(p, "%*s: ", prec, "PND"); for_each_online_cpu(j) seq_printf(p, "%10u ", irq_stats(j)->apic_pending_irqs); seq_printf(p, " Performance pending work\n"); -- cgit v1.2.3 From ff7b1b4f000cea84f071c1b6aa2918b2119d66f1 Mon Sep 17 00:00:00 2001 From: Steven Whitehouse Date: Wed, 15 Apr 2009 16:55:05 +0100 Subject: perfcounters: export perf_tpcounter_event Needed for modular tracepoint support. Signed-off-by: Steven Whitehouse Signed-off-by: Ingo Molnar --- kernel/perf_counter.c | 1 + 1 file changed, 1 insertion(+) diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c index 7f9521c3c01..09396098dd0 100644 --- a/kernel/perf_counter.c +++ b/kernel/perf_counter.c @@ -2590,6 +2590,7 @@ void perf_tpcounter_event(int event_id) __perf_swcounter_event(PERF_TYPE_TRACEPOINT, event_id, 1, 1, regs, 0); } +EXPORT_SYMBOL_GPL(perf_tpcounter_event); extern int ftrace_profile_enable(int); extern void ftrace_profile_disable(int); -- cgit v1.2.3 From d24e473e5b2ca86d1288b9416227ccc603313d0f Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Mon, 20 Apr 2009 13:32:07 +0200 Subject: perf_counter: copy in Git's top Makefile We'd like to have a similar user-space structure as Git has, for the perfcounter tools - so copy in Git's toplevel makefile as-is. We'll strip it down in subsequent commits to make it fit the perfcounters code. The Git version used: 66996ec: Sync with 1.6.2.4 Signed-off-by: Ingo Molnar --- Documentation/perf_counter/Makefile | 1735 ++++++++++++++++++++++++++++++++++- 1 file changed, 1724 insertions(+), 11 deletions(-) diff --git a/Documentation/perf_counter/Makefile b/Documentation/perf_counter/Makefile index 1dd37ee7dbd..6e0838b03ad 100644 --- a/Documentation/perf_counter/Makefile +++ b/Documentation/perf_counter/Makefile @@ -1,18 +1,1731 @@ -BINS = kerneltop perfstat perf-record perf-report +# The default target of this Makefile is... +all:: -all: $(BINS) +# Define V=1 to have a more verbose compile. +# +# Define SNPRINTF_RETURNS_BOGUS if your are on a system which snprintf() +# or vsnprintf() return -1 instead of number of characters which would +# have been written to the final string if enough space had been available. +# +# Define FREAD_READS_DIRECTORIES if your are on a system which succeeds +# when attempting to read from an fopen'ed directory. +# +# Define NO_OPENSSL environment variable if you do not have OpenSSL. +# This also implies MOZILLA_SHA1. +# +# Define NO_CURL if you do not have libcurl installed. git-http-pull and +# git-http-push are not built, and you cannot use http:// and https:// +# transports. +# +# Define CURLDIR=/foo/bar if your curl header and library files are in +# /foo/bar/include and /foo/bar/lib directories. +# +# Define NO_EXPAT if you do not have expat installed. git-http-push is +# not built, and you cannot push using http:// and https:// transports. +# +# Define EXPATDIR=/foo/bar if your expat header and library files are in +# /foo/bar/include and /foo/bar/lib directories. +# +# Define NO_D_INO_IN_DIRENT if you don't have d_ino in your struct dirent. +# +# Define NO_D_TYPE_IN_DIRENT if your platform defines DT_UNKNOWN but lacks +# d_type in struct dirent (latest Cygwin -- will be fixed soonish). +# +# Define NO_C99_FORMAT if your formatted IO functions (printf/scanf et.al.) +# do not support the 'size specifiers' introduced by C99, namely ll, hh, +# j, z, t. (representing long long int, char, intmax_t, size_t, ptrdiff_t). +# some C compilers supported these specifiers prior to C99 as an extension. +# +# Define NO_STRCASESTR if you don't have strcasestr. +# +# Define NO_MEMMEM if you don't have memmem. +# +# Define NO_STRLCPY if you don't have strlcpy. +# +# Define NO_STRTOUMAX if you don't have strtoumax in the C library. +# If your compiler also does not support long long or does not have +# strtoull, define NO_STRTOULL. +# +# Define NO_SETENV if you don't have setenv in the C library. +# +# Define NO_UNSETENV if you don't have unsetenv in the C library. +# +# Define NO_MKDTEMP if you don't have mkdtemp in the C library. +# +# Define NO_SYS_SELECT_H if you don't have sys/select.h. +# +# Define NO_SYMLINK_HEAD if you never want .git/HEAD to be a symbolic link. +# Enable it on Windows. By default, symrefs are still used. +# +# Define NO_SVN_TESTS if you want to skip time-consuming SVN interoperability +# tests. These tests take up a significant amount of the total test time +# but are not needed unless you plan to talk to SVN repos. +# +# Define NO_FINK if you are building on Darwin/Mac OS X, have Fink +# installed in /sw, but don't want GIT to link against any libraries +# installed there. If defined you may specify your own (or Fink's) +# include directories and library directories by defining CFLAGS +# and LDFLAGS appropriately. +# +# Define NO_DARWIN_PORTS if you are building on Darwin/Mac OS X, +# have DarwinPorts installed in /opt/local, but don't want GIT to +# link against any libraries installed there. If defined you may +# specify your own (or DarwinPort's) include directories and +# library directories by defining CFLAGS and LDFLAGS appropriately. +# +# Define PPC_SHA1 environment variable when running make to make use of +# a bundled SHA1 routine optimized for PowerPC. +# +# Define ARM_SHA1 environment variable when running make to make use of +# a bundled SHA1 routine optimized for ARM. +# +# Define MOZILLA_SHA1 environment variable when running make to make use of +# a bundled SHA1 routine coming from Mozilla. It is GPL'd and should be fast +# on non-x86 architectures (e.g. PowerPC), while the OpenSSL version (default +# choice) has very fast version optimized for i586. +# +# Define NEEDS_SSL_WITH_CRYPTO if you need -lcrypto with -lssl (Darwin). +# +# Define NEEDS_LIBICONV if linking with libc is not enough (Darwin). +# +# Define NEEDS_SOCKET if linking with libc is not enough (SunOS, +# Patrick Mauritz). +# +# Define NO_MMAP if you want to avoid mmap. +# +# Define NO_PTHREADS if you do not have or do not want to use Pthreads. +# +# Define NO_PREAD if you have a problem with pread() system call (e.g. +# cygwin.dll before v1.5.22). +# +# Define NO_FAST_WORKING_DIRECTORY if accessing objects in pack files is +# generally faster on your platform than accessing the working directory. +# +# Define NO_TRUSTABLE_FILEMODE if your filesystem may claim to support +# the executable mode bit, but doesn't really do so. +# +# Define NO_IPV6 if you lack IPv6 support and getaddrinfo(). +# +# Define NO_SOCKADDR_STORAGE if your platform does not have struct +# sockaddr_storage. +# +# Define NO_ICONV if your libc does not properly support iconv. +# +# Define OLD_ICONV if your library has an old iconv(), where the second +# (input buffer pointer) parameter is declared with type (const char **). +# +# Define NO_DEFLATE_BOUND if your zlib does not have deflateBound. +# +# Define NO_R_TO_GCC_LINKER if your gcc does not like "-R/path/lib" +# that tells runtime paths to dynamic libraries; +# "-Wl,-rpath=/path/lib" is used instead. +# +# Define USE_NSEC below if you want git to care about sub-second file mtimes +# and ctimes. Note that you need recent glibc (at least 2.2.4) for this, and +# it will BREAK YOUR LOCAL DIFFS! show-diff and anything using it will likely +# randomly break unless your underlying filesystem supports those sub-second +# times (my ext3 doesn't). +# +# Define USE_ST_TIMESPEC if your "struct stat" uses "st_ctimespec" instead of +# "st_ctim" +# +# Define NO_NSEC if your "struct stat" does not have "st_ctim.tv_nsec" +# available. This automatically turns USE_NSEC off. +# +# Define USE_STDEV below if you want git to care about the underlying device +# change being considered an inode change from the update-index perspective. +# +# Define NO_ST_BLOCKS_IN_STRUCT_STAT if your platform does not have st_blocks +# field that counts the on-disk footprint in 512-byte blocks. +# +# Define ASCIIDOC8 if you want to format documentation with AsciiDoc 8 +# +# Define DOCBOOK_XSL_172 if you want to format man pages with DocBook XSL v1.72. +# +# Define NO_PERL_MAKEMAKER if you cannot use Makefiles generated by perl's +# MakeMaker (e.g. using ActiveState under Cygwin). +# +# Define NO_PERL if you do not want Perl scripts or libraries at all. +# +# Define NO_TCLTK if you do not want Tcl/Tk GUI. +# +# The TCL_PATH variable governs the location of the Tcl interpreter +# used to optimize git-gui for your system. Only used if NO_TCLTK +# is not set. Defaults to the bare 'tclsh'. +# +# The TCLTK_PATH variable governs the location of the Tcl/Tk interpreter. +# If not set it defaults to the bare 'wish'. If it is set to the empty +# string then NO_TCLTK will be forced (this is used by configure script). +# +# Define THREADED_DELTA_SEARCH if you have pthreads and wish to exploit +# parallel delta searching when packing objects. +# +# Define INTERNAL_QSORT to use Git's implementation of qsort(), which +# is a simplified version of the merge sort used in glibc. This is +# recommended if Git triggers O(n^2) behavior in your platform's qsort(). +# +# Define NO_EXTERNAL_GREP if you don't want "git grep" to ever call +# your external grep (e.g., if your system lacks grep, if its grep is +# broken, or spawning external process is slower than built-in grep git has). -kerneltop: kerneltop.c ../../include/linux/perf_counter.h - cc -O6 -Wall -lrt -o $@ $< +GIT-VERSION-FILE: .FORCE-GIT-VERSION-FILE + @$(SHELL_PATH) ./GIT-VERSION-GEN +-include GIT-VERSION-FILE -perf-record: perf-record.c ../../include/linux/perf_counter.h - cc -O6 -Wall -lrt -o $@ $< +uname_S := $(shell sh -c 'uname -s 2>/dev/null || echo not') +uname_M := $(shell sh -c 'uname -m 2>/dev/null || echo not') +uname_O := $(shell sh -c 'uname -o 2>/dev/null || echo not') +uname_R := $(shell sh -c 'uname -r 2>/dev/null || echo not') +uname_P := $(shell sh -c 'uname -p 2>/dev/null || echo not') +uname_V := $(shell sh -c 'uname -v 2>/dev/null || echo not') -perf-report: perf-report.cc ../../include/linux/perf_counter.h - g++ -O6 -Wall -lrt -o $@ $< +# CFLAGS and LDFLAGS are for the users to override from the command line. -perfstat: kerneltop - ln -sf kerneltop perfstat +CFLAGS = -g -O2 -Wall +LDFLAGS = +ALL_CFLAGS = $(CFLAGS) +ALL_LDFLAGS = $(LDFLAGS) +STRIP ?= strip + +# Among the variables below, these: +# gitexecdir +# template_dir +# mandir +# infodir +# htmldir +# ETC_GITCONFIG (but not sysconfdir) +# can be specified as a relative path some/where/else; +# this is interpreted as relative to $(prefix) and "git" at +# runtime figures out where they are based on the path to the executable. +# This can help installing the suite in a relocatable way. + +prefix = $(HOME) +bindir_relative = bin +bindir = $(prefix)/$(bindir_relative) +mandir = share/man +infodir = share/info +gitexecdir = libexec/git-core +sharedir = $(prefix)/share +template_dir = share/git-core/templates +htmldir = share/doc/git-doc +ifeq ($(prefix),/usr) +sysconfdir = /etc +ETC_GITCONFIG = $(sysconfdir)/gitconfig +else +sysconfdir = $(prefix)/etc +ETC_GITCONFIG = etc/gitconfig +endif +lib = lib +# DESTDIR= + +# default configuration for gitweb +GITWEB_CONFIG = gitweb_config.perl +GITWEB_CONFIG_SYSTEM = /etc/gitweb.conf +GITWEB_HOME_LINK_STR = projects +GITWEB_SITENAME = +GITWEB_PROJECTROOT = /pub/git +GITWEB_PROJECT_MAXDEPTH = 2007 +GITWEB_EXPORT_OK = +GITWEB_STRICT_EXPORT = +GITWEB_BASE_URL = +GITWEB_LIST = +GITWEB_HOMETEXT = indextext.html +GITWEB_CSS = gitweb.css +GITWEB_LOGO = git-logo.png +GITWEB_FAVICON = git-favicon.png +GITWEB_SITE_HEADER = +GITWEB_SITE_FOOTER = + +export prefix bindir sharedir sysconfdir + +CC = gcc +AR = ar +RM = rm -f +TAR = tar +FIND = find +INSTALL = install +RPMBUILD = rpmbuild +TCL_PATH = tclsh +TCLTK_PATH = wish +PTHREAD_LIBS = -lpthread + +export TCL_PATH TCLTK_PATH + +# sparse is architecture-neutral, which means that we need to tell it +# explicitly what architecture to check for. Fix this up for yours.. +SPARSE_FLAGS = -D__BIG_ENDIAN__ -D__powerpc__ + + + +### --- END CONFIGURATION SECTION --- + +# Those must not be GNU-specific; they are shared with perl/ which may +# be built by a different compiler. (Note that this is an artifact now +# but it still might be nice to keep that distinction.) +BASIC_CFLAGS = +BASIC_LDFLAGS = + +# Guard against environment variables +BUILTIN_OBJS = +BUILT_INS = +COMPAT_CFLAGS = +COMPAT_OBJS = +LIB_H = +LIB_OBJS = +PROGRAMS = +SCRIPT_PERL = +SCRIPT_SH = +TEST_PROGRAMS = + +SCRIPT_SH += git-am.sh +SCRIPT_SH += git-bisect.sh +SCRIPT_SH += git-difftool--helper.sh +SCRIPT_SH += git-filter-branch.sh +SCRIPT_SH += git-lost-found.sh +SCRIPT_SH += git-merge-octopus.sh +SCRIPT_SH += git-merge-one-file.sh +SCRIPT_SH += git-merge-resolve.sh +SCRIPT_SH += git-mergetool.sh +SCRIPT_SH += git-mergetool--lib.sh +SCRIPT_SH += git-parse-remote.sh +SCRIPT_SH += git-pull.sh +SCRIPT_SH += git-quiltimport.sh +SCRIPT_SH += git-rebase--interactive.sh +SCRIPT_SH += git-rebase.sh +SCRIPT_SH += git-repack.sh +SCRIPT_SH += git-request-pull.sh +SCRIPT_SH += git-sh-setup.sh +SCRIPT_SH += git-stash.sh +SCRIPT_SH += git-submodule.sh +SCRIPT_SH += git-web--browse.sh + +SCRIPT_PERL += git-add--interactive.perl +SCRIPT_PERL += git-difftool.perl +SCRIPT_PERL += git-archimport.perl +SCRIPT_PERL += git-cvsexportcommit.perl +SCRIPT_PERL += git-cvsimport.perl +SCRIPT_PERL += git-cvsserver.perl +SCRIPT_PERL += git-relink.perl +SCRIPT_PERL += git-send-email.perl +SCRIPT_PERL += git-svn.perl + +SCRIPTS = $(patsubst %.sh,%,$(SCRIPT_SH)) \ + $(patsubst %.perl,%,$(SCRIPT_PERL)) \ + git-instaweb + +# Empty... +EXTRA_PROGRAMS = + +# ... and all the rest that could be moved out of bindir to gitexecdir +PROGRAMS += $(EXTRA_PROGRAMS) +PROGRAMS += git-fast-import$X +PROGRAMS += git-hash-object$X +PROGRAMS += git-index-pack$X +PROGRAMS += git-merge-index$X +PROGRAMS += git-merge-tree$X +PROGRAMS += git-mktag$X +PROGRAMS += git-mktree$X +PROGRAMS += git-pack-redundant$X +PROGRAMS += git-patch-id$X +PROGRAMS += git-shell$X +PROGRAMS += git-show-index$X +PROGRAMS += git-unpack-file$X +PROGRAMS += git-update-server-info$X +PROGRAMS += git-upload-pack$X +PROGRAMS += git-var$X + +# List built-in command $C whose implementation cmd_$C() is not in +# builtin-$C.o but is linked in as part of some other command. +BUILT_INS += $(patsubst builtin-%.o,git-%$X,$(BUILTIN_OBJS)) + +BUILT_INS += git-cherry$X +BUILT_INS += git-cherry-pick$X +BUILT_INS += git-format-patch$X +BUILT_INS += git-fsck-objects$X +BUILT_INS += git-get-tar-commit-id$X +BUILT_INS += git-init$X +BUILT_INS += git-merge-subtree$X +BUILT_INS += git-peek-remote$X +BUILT_INS += git-repo-config$X +BUILT_INS += git-show$X +BUILT_INS += git-stage$X +BUILT_INS += git-status$X +BUILT_INS += git-whatchanged$X + +# what 'all' will build and 'install' will install, in gitexecdir +ALL_PROGRAMS = $(PROGRAMS) $(SCRIPTS) + +# what 'all' will build but not install in gitexecdir +OTHER_PROGRAMS = git$X +ifndef NO_PERL +OTHER_PROGRAMS += gitweb/gitweb.cgi +endif + +# Set paths to tools early so that they can be used for version tests. +ifndef SHELL_PATH + SHELL_PATH = /bin/sh +endif +ifndef PERL_PATH + PERL_PATH = /usr/bin/perl +endif + +export PERL_PATH + +LIB_FILE=libgit.a +XDIFF_LIB=xdiff/lib.a + +LIB_H += archive.h +LIB_H += attr.h +LIB_H += blob.h +LIB_H += builtin.h +LIB_H += cache.h +LIB_H += cache-tree.h +LIB_H += commit.h +LIB_H += compat/cygwin.h +LIB_H += compat/mingw.h +LIB_H += csum-file.h +LIB_H += decorate.h +LIB_H += delta.h +LIB_H += diffcore.h +LIB_H += diff.h +LIB_H += dir.h +LIB_H += fsck.h +LIB_H += git-compat-util.h +LIB_H += graph.h +LIB_H += grep.h +LIB_H += hash.h +LIB_H += help.h +LIB_H += levenshtein.h +LIB_H += list-objects.h +LIB_H += ll-merge.h +LIB_H += log-tree.h +LIB_H += mailmap.h +LIB_H += merge-recursive.h +LIB_H += object.h +LIB_H += pack.h +LIB_H += pack-refs.h +LIB_H += pack-revindex.h +LIB_H += parse-options.h +LIB_H += patch-ids.h +LIB_H += pkt-line.h +LIB_H += progress.h +LIB_H += quote.h +LIB_H += reflog-walk.h +LIB_H += refs.h +LIB_H += remote.h +LIB_H += rerere.h +LIB_H += revision.h +LIB_H += run-command.h +LIB_H += sha1-lookup.h +LIB_H += sideband.h +LIB_H += sigchain.h +LIB_H += strbuf.h +LIB_H += string-list.h +LIB_H += tag.h +LIB_H += transport.h +LIB_H += tree.h +LIB_H += tree-walk.h +LIB_H += unpack-trees.h +LIB_H += userdiff.h +LIB_H += utf8.h +LIB_H += wt-status.h + +LIB_OBJS += abspath.o +LIB_OBJS += alias.o +LIB_OBJS += alloc.o +LIB_OBJS += archive.o +LIB_OBJS += archive-tar.o +LIB_OBJS += archive-zip.o +LIB_OBJS += attr.o +LIB_OBJS += base85.o +LIB_OBJS += bisect.o +LIB_OBJS += blob.o +LIB_OBJS += branch.o +LIB_OBJS += bundle.o +LIB_OBJS += cache-tree.o +LIB_OBJS += color.o +LIB_OBJS += combine-diff.o +LIB_OBJS += commit.o +LIB_OBJS += config.o +LIB_OBJS += connect.o +LIB_OBJS += convert.o +LIB_OBJS += copy.o +LIB_OBJS += csum-file.o +LIB_OBJS += ctype.o +LIB_OBJS += date.o +LIB_OBJS += decorate.o +LIB_OBJS += diffcore-break.o +LIB_OBJS += diffcore-delta.o +LIB_OBJS += diffcore-order.o +LIB_OBJS += diffcore-pickaxe.o +LIB_OBJS += diffcore-rename.o +LIB_OBJS += diff-delta.o +LIB_OBJS += diff-lib.o +LIB_OBJS += diff-no-index.o +LIB_OBJS += diff.o +LIB_OBJS += dir.o +LIB_OBJS += editor.o +LIB_OBJS += entry.o +LIB_OBJS += environment.o +LIB_OBJS += exec_cmd.o +LIB_OBJS += fsck.o +LIB_OBJS += graph.o +LIB_OBJS += grep.o +LIB_OBJS += hash.o +LIB_OBJS += help.o +LIB_OBJS += ident.o +LIB_OBJS += levenshtein.o +LIB_OBJS += list-objects.o +LIB_OBJS += ll-merge.o +LIB_OBJS += lockfile.o +LIB_OBJS += log-tree.o +LIB_OBJS += mailmap.o +LIB_OBJS += match-trees.o +LIB_OBJS += merge-file.o +LIB_OBJS += merge-recursive.o +LIB_OBJS += name-hash.o +LIB_OBJS += object.o +LIB_OBJS += pack-check.o +LIB_OBJS += pack-refs.o +LIB_OBJS += pack-revindex.o +LIB_OBJS += pack-write.o +LIB_OBJS += pager.o +LIB_OBJS += parse-options.o +LIB_OBJS += patch-delta.o +LIB_OBJS += patch-ids.o +LIB_OBJS += path.o +LIB_OBJS += pkt-line.o +LIB_OBJS += preload-index.o +LIB_OBJS += pretty.o +LIB_OBJS += progress.o +LIB_OBJS += quote.o +LIB_OBJS += reachable.o +LIB_OBJS += read-cache.o +LIB_OBJS += reflog-walk.o +LIB_OBJS += refs.o +LIB_OBJS += remote.o +LIB_OBJS += rerere.o +LIB_OBJS += revision.o +LIB_OBJS += run-command.o +LIB_OBJS += server-info.o +LIB_OBJS += setup.o +LIB_OBJS += sha1-lookup.o +LIB_OBJS += sha1_file.o +LIB_OBJS += sha1_name.o +LIB_OBJS += shallow.o +LIB_OBJS += sideband.o +LIB_OBJS += sigchain.o +LIB_OBJS += strbuf.o +LIB_OBJS += string-list.o +LIB_OBJS += symlinks.o +LIB_OBJS += tag.o +LIB_OBJS += trace.o +LIB_OBJS += transport.o +LIB_OBJS += tree-diff.o +LIB_OBJS += tree.o +LIB_OBJS += tree-walk.o +LIB_OBJS += unpack-trees.o +LIB_OBJS += usage.o +LIB_OBJS += userdiff.o +LIB_OBJS += utf8.o +LIB_OBJS += walker.o +LIB_OBJS += wrapper.o +LIB_OBJS += write_or_die.o +LIB_OBJS += ws.o +LIB_OBJS += wt-status.o +LIB_OBJS += xdiff-interface.o + +BUILTIN_OBJS += builtin-add.o +BUILTIN_OBJS += builtin-annotate.o +BUILTIN_OBJS += builtin-apply.o +BUILTIN_OBJS += builtin-archive.o +BUILTIN_OBJS += builtin-bisect--helper.o +BUILTIN_OBJS += builtin-blame.o +BUILTIN_OBJS += builtin-branch.o +BUILTIN_OBJS += builtin-bundle.o +BUILTIN_OBJS += builtin-cat-file.o +BUILTIN_OBJS += builtin-check-attr.o +BUILTIN_OBJS += builtin-check-ref-format.o +BUILTIN_OBJS += builtin-checkout-index.o +BUILTIN_OBJS += builtin-checkout.o +BUILTIN_OBJS += builtin-clean.o +BUILTIN_OBJS += builtin-clone.o +BUILTIN_OBJS += builtin-commit-tree.o +BUILTIN_OBJS += builtin-commit.o +BUILTIN_OBJS += builtin-config.o +BUILTIN_OBJS += builtin-count-objects.o +BUILTIN_OBJS += builtin-describe.o +BUILTIN_OBJS += builtin-diff-files.o +BUILTIN_OBJS += builtin-diff-index.o +BUILTIN_OBJS += builtin-diff-tree.o +BUILTIN_OBJS += builtin-diff.o +BUILTIN_OBJS += builtin-fast-export.o +BUILTIN_OBJS += builtin-fetch--tool.o +BUILTIN_OBJS += builtin-fetch-pack.o +BUILTIN_OBJS += builtin-fetch.o +BUILTIN_OBJS += builtin-fmt-merge-msg.o +BUILTIN_OBJS += builtin-for-each-ref.o +BUILTIN_OBJS += builtin-fsck.o +BUILTIN_OBJS += builtin-gc.o +BUILTIN_OBJS += builtin-grep.o +BUILTIN_OBJS += builtin-help.o +BUILTIN_OBJS += builtin-init-db.o +BUILTIN_OBJS += builtin-log.o +BUILTIN_OBJS += builtin-ls-files.o +BUILTIN_OBJS += builtin-ls-remote.o +BUILTIN_OBJS += builtin-ls-tree.o +BUILTIN_OBJS += builtin-mailinfo.o +BUILTIN_OBJS += builtin-mailsplit.o +BUILTIN_OBJS += builtin-merge.o +BUILTIN_OBJS += builtin-merge-base.o +BUILTIN_OBJS += builtin-merge-file.o +BUILTIN_OBJS += builtin-merge-ours.o +BUILTIN_OBJS += builtin-merge-recursive.o +BUILTIN_OBJS += builtin-mv.o +BUILTIN_OBJS += builtin-name-rev.o +BUILTIN_OBJS += builtin-pack-objects.o +BUILTIN_OBJS += builtin-pack-refs.o +BUILTIN_OBJS += builtin-prune-packed.o +BUILTIN_OBJS += builtin-prune.o +BUILTIN_OBJS += builtin-push.o +BUILTIN_OBJS += builtin-read-tree.o +BUILTIN_OBJS += builtin-receive-pack.o +BUILTIN_OBJS += builtin-reflog.o +BUILTIN_OBJS += builtin-remote.o +BUILTIN_OBJS += builtin-rerere.o +BUILTIN_OBJS += builtin-reset.o +BUILTIN_OBJS += builtin-rev-list.o +BUILTIN_OBJS += builtin-rev-parse.o +BUILTIN_OBJS += builtin-revert.o +BUILTIN_OBJS += builtin-rm.o +BUILTIN_OBJS += builtin-send-pack.o +BUILTIN_OBJS += builtin-shortlog.o +BUILTIN_OBJS += builtin-show-branch.o +BUILTIN_OBJS += builtin-show-ref.o +BUILTIN_OBJS += builtin-stripspace.o +BUILTIN_OBJS += builtin-symbolic-ref.o +BUILTIN_OBJS += builtin-tag.o +BUILTIN_OBJS += builtin-tar-tree.o +BUILTIN_OBJS += builtin-unpack-objects.o +BUILTIN_OBJS += builtin-update-index.o +BUILTIN_OBJS += builtin-update-ref.o +BUILTIN_OBJS += builtin-upload-archive.o +BUILTIN_OBJS += builtin-verify-pack.o +BUILTIN_OBJS += builtin-verify-tag.o +BUILTIN_OBJS += builtin-write-tree.o + +GITLIBS = $(LIB_FILE) $(XDIFF_LIB) +EXTLIBS = + +# +# Platform specific tweaks +# + +# We choose to avoid "if .. else if .. else .. endif endif" +# because maintaining the nesting to match is a pain. If +# we had "elif" things would have been much nicer... + +ifeq ($(uname_S),Linux) + NO_STRLCPY = YesPlease + THREADED_DELTA_SEARCH = YesPlease +endif +ifeq ($(uname_S),GNU/kFreeBSD) + NO_STRLCPY = YesPlease + THREADED_DELTA_SEARCH = YesPlease +endif +ifeq ($(uname_S),UnixWare) + CC = cc + NEEDS_SOCKET = YesPlease + NEEDS_NSL = YesPlease + NEEDS_SSL_WITH_CRYPTO = YesPlease + NEEDS_LIBICONV = YesPlease + SHELL_PATH = /usr/local/bin/bash + NO_IPV6 = YesPlease + NO_HSTRERROR = YesPlease + BASIC_CFLAGS += -Kthread + BASIC_CFLAGS += -I/usr/local/include + BASIC_LDFLAGS += -L/usr/local/lib + INSTALL = ginstall + TAR = gtar + NO_STRCASESTR = YesPlease + NO_MEMMEM = YesPlease +endif +ifeq ($(uname_S),SCO_SV) + ifeq ($(uname_R),3.2) + CFLAGS = -O2 + endif + ifeq ($(uname_R),5) + CC = cc + BASIC_CFLAGS += -Kthread + endif + NEEDS_SOCKET = YesPlease + NEEDS_NSL = YesPlease + NEEDS_SSL_WITH_CRYPTO = YesPlease + NEEDS_LIBICONV = YesPlease + SHELL_PATH = /usr/bin/bash + NO_IPV6 = YesPlease + NO_HSTRERROR = YesPlease + BASIC_CFLAGS += -I/usr/local/include + BASIC_LDFLAGS += -L/usr/local/lib + NO_STRCASESTR = YesPlease + NO_MEMMEM = YesPlease + INSTALL = ginstall + TAR = gtar +endif +ifeq ($(uname_S),Darwin) + NEEDS_SSL_WITH_CRYPTO = YesPlease + NEEDS_LIBICONV = YesPlease + ifeq ($(shell expr "$(uname_R)" : '[15678]\.'),2) + OLD_ICONV = UnfortunatelyYes + endif + ifeq ($(shell expr "$(uname_R)" : '[15]\.'),2) + NO_STRLCPY = YesPlease + endif + NO_MEMMEM = YesPlease + THREADED_DELTA_SEARCH = YesPlease + USE_ST_TIMESPEC = YesPlease +endif +ifeq ($(uname_S),SunOS) + NEEDS_SOCKET = YesPlease + NEEDS_NSL = YesPlease + SHELL_PATH = /bin/bash + NO_STRCASESTR = YesPlease + NO_MEMMEM = YesPlease + NO_HSTRERROR = YesPlease + NO_MKDTEMP = YesPlease + OLD_ICONV = UnfortunatelyYes + ifeq ($(uname_R),5.8) + NO_UNSETENV = YesPlease + NO_SETENV = YesPlease + NO_C99_FORMAT = YesPlease + NO_STRTOUMAX = YesPlease + endif + ifeq ($(uname_R),5.9) + NO_UNSETENV = YesPlease + NO_SETENV = YesPlease + NO_C99_FORMAT = YesPlease + NO_STRTOUMAX = YesPlease + endif + INSTALL = ginstall + TAR = gtar + BASIC_CFLAGS += -D__EXTENSIONS__ +endif +ifeq ($(uname_O),Cygwin) + NO_D_TYPE_IN_DIRENT = YesPlease + NO_D_INO_IN_DIRENT = YesPlease + NO_STRCASESTR = YesPlease + NO_MEMMEM = YesPlease + NO_SYMLINK_HEAD = YesPlease + NEEDS_LIBICONV = YesPlease + NO_FAST_WORKING_DIRECTORY = UnfortunatelyYes + NO_TRUSTABLE_FILEMODE = UnfortunatelyYes + OLD_ICONV = UnfortunatelyYes + # There are conflicting reports about this. + # On some boxes NO_MMAP is needed, and not so elsewhere. + # Try commenting this out if you suspect MMAP is more efficient + NO_MMAP = YesPlease + NO_IPV6 = YesPlease + X = .exe +endif +ifeq ($(uname_S),FreeBSD) + NEEDS_LIBICONV = YesPlease + NO_MEMMEM = YesPlease + BASIC_CFLAGS += -I/usr/local/include + BASIC_LDFLAGS += -L/usr/local/lib + DIR_HAS_BSD_GROUP_SEMANTICS = YesPlease + USE_ST_TIMESPEC = YesPlease + THREADED_DELTA_SEARCH = YesPlease + ifeq ($(shell expr "$(uname_R)" : '4\.'),2) + PTHREAD_LIBS = -pthread + NO_UINTMAX_T = YesPlease + NO_STRTOUMAX = YesPlease + endif +endif +ifeq ($(uname_S),OpenBSD) + NO_STRCASESTR = YesPlease + NO_MEMMEM = YesPlease + NEEDS_LIBICONV = YesPlease + BASIC_CFLAGS += -I/usr/local/include + BASIC_LDFLAGS += -L/usr/local/lib + THREADED_DELTA_SEARCH = YesPlease +endif +ifeq ($(uname_S),NetBSD) + ifeq ($(shell expr "$(uname_R)" : '[01]\.'),2) + NEEDS_LIBICONV = YesPlease + endif + BASIC_CFLAGS += -I/usr/pkg/include + BASIC_LDFLAGS += -L/usr/pkg/lib $(CC_LD_DYNPATH)/usr/pkg/lib + THREADED_DELTA_SEARCH = YesPlease +endif +ifeq ($(uname_S),AIX) + NO_STRCASESTR=YesPlease + NO_MEMMEM = YesPlease + NO_MKDTEMP = YesPlease + NO_STRLCPY = YesPlease + NO_NSEC = YesPlease + FREAD_READS_DIRECTORIES = UnfortunatelyYes + INTERNAL_QSORT = UnfortunatelyYes + NEEDS_LIBICONV=YesPlease + BASIC_CFLAGS += -D_LARGE_FILES + ifneq ($(shell expr "$(uname_V)" : '[1234]'),1) + THREADED_DELTA_SEARCH = YesPlease + else + NO_PTHREADS = YesPlease + endif +endif +ifeq ($(uname_S),GNU) + # GNU/Hurd + NO_STRLCPY=YesPlease +endif +ifeq ($(uname_S),IRIX64) + NO_IPV6=YesPlease + NO_SETENV=YesPlease + NO_STRCASESTR=YesPlease + NO_MEMMEM = YesPlease + NO_STRLCPY = YesPlease + NO_SOCKADDR_STORAGE=YesPlease + SHELL_PATH=/usr/gnu/bin/bash + BASIC_CFLAGS += -DPATH_MAX=1024 + # for now, build 32-bit version + BASIC_LDFLAGS += -L/usr/lib32 +endif +ifeq ($(uname_S),HP-UX) + NO_IPV6=YesPlease + NO_SETENV=YesPlease + NO_STRCASESTR=YesPlease + NO_MEMMEM = YesPlease + NO_STRLCPY = YesPlease + NO_MKDTEMP = YesPlease + NO_UNSETENV = YesPlease + NO_HSTRERROR = YesPlease + NO_SYS_SELECT_H = YesPlease + SNPRINTF_RETURNS_BOGUS = YesPlease +endif +ifneq (,$(findstring CYGWIN,$(uname_S))) + COMPAT_OBJS += compat/cygwin.o +endif +ifneq (,$(findstring MINGW,$(uname_S))) + NO_PREAD = YesPlease + NO_OPENSSL = YesPlease + NO_CURL = YesPlease + NO_SYMLINK_HEAD = YesPlease + NO_IPV6 = YesPlease + NO_SETENV = YesPlease + NO_UNSETENV = YesPlease + NO_STRCASESTR = YesPlease + NO_STRLCPY = YesPlease + NO_MEMMEM = YesPlease + NO_PTHREADS = YesPlease + NEEDS_LIBICONV = YesPlease + OLD_ICONV = YesPlease + NO_C99_FORMAT = YesPlease + NO_STRTOUMAX = YesPlease + NO_MKDTEMP = YesPlease + SNPRINTF_RETURNS_BOGUS = YesPlease + NO_SVN_TESTS = YesPlease + NO_PERL_MAKEMAKER = YesPlease + RUNTIME_PREFIX = YesPlease + NO_POSIX_ONLY_PROGRAMS = YesPlease + NO_ST_BLOCKS_IN_STRUCT_STAT = YesPlease + NO_NSEC = YesPlease + USE_WIN32_MMAP = YesPlease + COMPAT_CFLAGS += -D__USE_MINGW_ACCESS -DNOGDI -Icompat -Icompat/regex -Icompat/fnmatch + COMPAT_CFLAGS += -DSNPRINTF_SIZE_CORR=1 + COMPAT_CFLAGS += -DSTRIP_EXTENSION=\".exe\" + COMPAT_OBJS += compat/mingw.o compat/fnmatch/fnmatch.o compat/regex/regex.o compat/winansi.o + EXTLIBS += -lws2_32 + X = .exe +endif +ifneq (,$(findstring arm,$(uname_M))) + ARM_SHA1 = YesPlease +endif + +-include config.mak.autogen +-include config.mak + +ifeq ($(uname_S),Darwin) + ifndef NO_FINK + ifeq ($(shell test -d /sw/lib && echo y),y) + BASIC_CFLAGS += -I/sw/include + BASIC_LDFLAGS += -L/sw/lib + endif + endif + ifndef NO_DARWIN_PORTS + ifeq ($(shell test -d /opt/local/lib && echo y),y) + BASIC_CFLAGS += -I/opt/local/include + BASIC_LDFLAGS += -L/opt/local/lib + endif + endif + PTHREAD_LIBS = +endif + +ifndef CC_LD_DYNPATH + ifdef NO_R_TO_GCC_LINKER + # Some gcc does not accept and pass -R to the linker to specify + # the runtime dynamic library path. + CC_LD_DYNPATH = -Wl,-rpath, + else + CC_LD_DYNPATH = -R + endif +endif + +ifdef NO_CURL + BASIC_CFLAGS += -DNO_CURL +else + ifdef CURLDIR + # Try "-Wl,-rpath=$(CURLDIR)/$(lib)" in such a case. + BASIC_CFLAGS += -I$(CURLDIR)/include + CURL_LIBCURL = -L$(CURLDIR)/$(lib) $(CC_LD_DYNPATH)$(CURLDIR)/$(lib) -lcurl + else + CURL_LIBCURL = -lcurl + endif + BUILTIN_OBJS += builtin-http-fetch.o + EXTLIBS += $(CURL_LIBCURL) + LIB_OBJS += http.o http-walker.o + curl_check := $(shell (echo 070908; curl-config --vernum) | sort -r | sed -ne 2p) + ifeq "$(curl_check)" "070908" + ifndef NO_EXPAT + PROGRAMS += git-http-push$X + endif + endif + ifndef NO_EXPAT + ifdef EXPATDIR + BASIC_CFLAGS += -I$(EXPATDIR)/include + EXPAT_LIBEXPAT = -L$(EXPATDIR)/$(lib) $(CC_LD_DYNPATH)$(EXPATDIR)/$(lib) -lexpat + else + EXPAT_LIBEXPAT = -lexpat + endif + endif +endif + +ifdef ZLIB_PATH + BASIC_CFLAGS += -I$(ZLIB_PATH)/include + EXTLIBS += -L$(ZLIB_PATH)/$(lib) $(CC_LD_DYNPATH)$(ZLIB_PATH)/$(lib) +endif +EXTLIBS += -lz + +ifndef NO_POSIX_ONLY_PROGRAMS + PROGRAMS += git-daemon$X + PROGRAMS += git-imap-send$X +endif +ifndef NO_OPENSSL + OPENSSL_LIBSSL = -lssl + ifdef OPENSSLDIR + BASIC_CFLAGS += -I$(OPENSSLDIR)/include + OPENSSL_LINK = -L$(OPENSSLDIR)/$(lib) $(CC_LD_DYNPATH)$(OPENSSLDIR)/$(lib) + else + OPENSSL_LINK = + endif +else + BASIC_CFLAGS += -DNO_OPENSSL + MOZILLA_SHA1 = 1 + OPENSSL_LIBSSL = +endif +ifdef NEEDS_SSL_WITH_CRYPTO + LIB_4_CRYPTO = $(OPENSSL_LINK) -lcrypto -lssl +else + LIB_4_CRYPTO = $(OPENSSL_LINK) -lcrypto +endif +ifdef NEEDS_LIBICONV + ifdef ICONVDIR + BASIC_CFLAGS += -I$(ICONVDIR)/include + ICONV_LINK = -L$(ICONVDIR)/$(lib) $(CC_LD_DYNPATH)$(ICONVDIR)/$(lib) + else + ICONV_LINK = + endif + EXTLIBS += $(ICONV_LINK) -liconv +endif +ifdef NEEDS_SOCKET + EXTLIBS += -lsocket +endif +ifdef NEEDS_NSL + EXTLIBS += -lnsl +endif +ifdef NO_D_TYPE_IN_DIRENT + BASIC_CFLAGS += -DNO_D_TYPE_IN_DIRENT +endif +ifdef NO_D_INO_IN_DIRENT + BASIC_CFLAGS += -DNO_D_INO_IN_DIRENT +endif +ifdef NO_ST_BLOCKS_IN_STRUCT_STAT + BASIC_CFLAGS += -DNO_ST_BLOCKS_IN_STRUCT_STAT +endif +ifdef USE_NSEC + BASIC_CFLAGS += -DUSE_NSEC +endif +ifdef USE_ST_TIMESPEC + BASIC_CFLAGS += -DUSE_ST_TIMESPEC +endif +ifdef NO_NSEC + BASIC_CFLAGS += -DNO_NSEC +endif +ifdef NO_C99_FORMAT + BASIC_CFLAGS += -DNO_C99_FORMAT +endif +ifdef SNPRINTF_RETURNS_BOGUS + COMPAT_CFLAGS += -DSNPRINTF_RETURNS_BOGUS + COMPAT_OBJS += compat/snprintf.o +endif +ifdef FREAD_READS_DIRECTORIES + COMPAT_CFLAGS += -DFREAD_READS_DIRECTORIES + COMPAT_OBJS += compat/fopen.o +endif +ifdef NO_SYMLINK_HEAD + BASIC_CFLAGS += -DNO_SYMLINK_HEAD +endif +ifdef NO_STRCASESTR + COMPAT_CFLAGS += -DNO_STRCASESTR + COMPAT_OBJS += compat/strcasestr.o +endif +ifdef NO_STRLCPY + COMPAT_CFLAGS += -DNO_STRLCPY + COMPAT_OBJS += compat/strlcpy.o +endif +ifdef NO_STRTOUMAX + COMPAT_CFLAGS += -DNO_STRTOUMAX + COMPAT_OBJS += compat/strtoumax.o +endif +ifdef NO_STRTOULL + COMPAT_CFLAGS += -DNO_STRTOULL +endif +ifdef NO_SETENV + COMPAT_CFLAGS += -DNO_SETENV + COMPAT_OBJS += compat/setenv.o +endif +ifdef NO_MKDTEMP + COMPAT_CFLAGS += -DNO_MKDTEMP + COMPAT_OBJS += compat/mkdtemp.o +endif +ifdef NO_UNSETENV + COMPAT_CFLAGS += -DNO_UNSETENV + COMPAT_OBJS += compat/unsetenv.o +endif +ifdef NO_SYS_SELECT_H + BASIC_CFLAGS += -DNO_SYS_SELECT_H +endif +ifdef NO_MMAP + COMPAT_CFLAGS += -DNO_MMAP + COMPAT_OBJS += compat/mmap.o +else + ifdef USE_WIN32_MMAP + COMPAT_CFLAGS += -DUSE_WIN32_MMAP + COMPAT_OBJS += compat/win32mmap.o + endif +endif +ifdef NO_PREAD + COMPAT_CFLAGS += -DNO_PREAD + COMPAT_OBJS += compat/pread.o +endif +ifdef NO_FAST_WORKING_DIRECTORY + BASIC_CFLAGS += -DNO_FAST_WORKING_DIRECTORY +endif +ifdef NO_TRUSTABLE_FILEMODE + BASIC_CFLAGS += -DNO_TRUSTABLE_FILEMODE +endif +ifdef NO_IPV6 + BASIC_CFLAGS += -DNO_IPV6 +endif +ifdef NO_UINTMAX_T + BASIC_CFLAGS += -Duintmax_t=uint32_t +endif +ifdef NO_SOCKADDR_STORAGE +ifdef NO_IPV6 + BASIC_CFLAGS += -Dsockaddr_storage=sockaddr_in +else + BASIC_CFLAGS += -Dsockaddr_storage=sockaddr_in6 +endif +endif +ifdef NO_INET_NTOP + LIB_OBJS += compat/inet_ntop.o +endif +ifdef NO_INET_PTON + LIB_OBJS += compat/inet_pton.o +endif + +ifdef NO_ICONV + BASIC_CFLAGS += -DNO_ICONV +endif + +ifdef OLD_ICONV + BASIC_CFLAGS += -DOLD_ICONV +endif + +ifdef NO_DEFLATE_BOUND + BASIC_CFLAGS += -DNO_DEFLATE_BOUND +endif + +ifdef PPC_SHA1 + SHA1_HEADER = "ppc/sha1.h" + LIB_OBJS += ppc/sha1.o ppc/sha1ppc.o +else +ifdef ARM_SHA1 + SHA1_HEADER = "arm/sha1.h" + LIB_OBJS += arm/sha1.o arm/sha1_arm.o +else +ifdef MOZILLA_SHA1 + SHA1_HEADER = "mozilla-sha1/sha1.h" + LIB_OBJS += mozilla-sha1/sha1.o +else + SHA1_HEADER = + EXTLIBS += $(LIB_4_CRYPTO) +endif +endif +endif +ifdef NO_PERL_MAKEMAKER + export NO_PERL_MAKEMAKER +endif +ifdef NO_HSTRERROR + COMPAT_CFLAGS += -DNO_HSTRERROR + COMPAT_OBJS += compat/hstrerror.o +endif +ifdef NO_MEMMEM + COMPAT_CFLAGS += -DNO_MEMMEM + COMPAT_OBJS += compat/memmem.o +endif +ifdef INTERNAL_QSORT + COMPAT_CFLAGS += -DINTERNAL_QSORT + COMPAT_OBJS += compat/qsort.o +endif +ifdef RUNTIME_PREFIX + COMPAT_CFLAGS += -DRUNTIME_PREFIX +endif + +ifdef NO_PTHREADS + THREADED_DELTA_SEARCH = + BASIC_CFLAGS += -DNO_PTHREADS +else + EXTLIBS += $(PTHREAD_LIBS) +endif + +ifdef THREADED_DELTA_SEARCH + BASIC_CFLAGS += -DTHREADED_DELTA_SEARCH + LIB_OBJS += thread-utils.o +endif +ifdef DIR_HAS_BSD_GROUP_SEMANTICS + COMPAT_CFLAGS += -DDIR_HAS_BSD_GROUP_SEMANTICS +endif +ifdef NO_EXTERNAL_GREP + BASIC_CFLAGS += -DNO_EXTERNAL_GREP +endif + +ifeq ($(TCLTK_PATH),) +NO_TCLTK=NoThanks +endif + +ifeq ($(PERL_PATH),) +NO_PERL=NoThanks +endif + +QUIET_SUBDIR0 = +$(MAKE) -C # space to separate -C and subdir +QUIET_SUBDIR1 = + +ifneq ($(findstring $(MAKEFLAGS),w),w) +PRINT_DIR = --no-print-directory +else # "make -w" +NO_SUBDIR = : +endif + +ifneq ($(findstring $(MAKEFLAGS),s),s) +ifndef V + QUIET_CC = @echo ' ' CC $@; + QUIET_AR = @echo ' ' AR $@; + QUIET_LINK = @echo ' ' LINK $@; + QUIET_BUILT_IN = @echo ' ' BUILTIN $@; + QUIET_GEN = @echo ' ' GEN $@; + QUIET_SUBDIR0 = +@subdir= + QUIET_SUBDIR1 = ;$(NO_SUBDIR) echo ' ' SUBDIR $$subdir; \ + $(MAKE) $(PRINT_DIR) -C $$subdir + export V + export QUIET_GEN + export QUIET_BUILT_IN +endif +endif + +ifdef ASCIIDOC8 + export ASCIIDOC8 +endif + +# Shell quote (do not use $(call) to accommodate ancient setups); + +SHA1_HEADER_SQ = $(subst ','\'',$(SHA1_HEADER)) +ETC_GITCONFIG_SQ = $(subst ','\'',$(ETC_GITCONFIG)) + +DESTDIR_SQ = $(subst ','\'',$(DESTDIR)) +bindir_SQ = $(subst ','\'',$(bindir)) +bindir_relative_SQ = $(subst ','\'',$(bindir_relative)) +mandir_SQ = $(subst ','\'',$(mandir)) +infodir_SQ = $(subst ','\'',$(infodir)) +gitexecdir_SQ = $(subst ','\'',$(gitexecdir)) +template_dir_SQ = $(subst ','\'',$(template_dir)) +htmldir_SQ = $(subst ','\'',$(htmldir)) +prefix_SQ = $(subst ','\'',$(prefix)) + +SHELL_PATH_SQ = $(subst ','\'',$(SHELL_PATH)) +PERL_PATH_SQ = $(subst ','\'',$(PERL_PATH)) +TCLTK_PATH_SQ = $(subst ','\'',$(TCLTK_PATH)) + +LIBS = $(GITLIBS) $(EXTLIBS) + +BASIC_CFLAGS += -DSHA1_HEADER='$(SHA1_HEADER_SQ)' \ + $(COMPAT_CFLAGS) +LIB_OBJS += $(COMPAT_OBJS) + +ALL_CFLAGS += $(BASIC_CFLAGS) +ALL_LDFLAGS += $(BASIC_LDFLAGS) + +export TAR INSTALL DESTDIR SHELL_PATH + + +### Build rules + +SHELL = $(SHELL_PATH) + +all:: shell_compatibility_test $(ALL_PROGRAMS) $(BUILT_INS) $(OTHER_PROGRAMS) GIT-BUILD-OPTIONS +ifneq (,$X) + $(foreach p,$(patsubst %$X,%,$(filter %$X,$(ALL_PROGRAMS) $(BUILT_INS) git$X)), test '$p' -ef '$p$X' || $(RM) '$p';) +endif + +all:: +ifndef NO_TCLTK + $(QUIET_SUBDIR0)git-gui $(QUIET_SUBDIR1) gitexecdir='$(gitexec_instdir_SQ)' all + $(QUIET_SUBDIR0)gitk-git $(QUIET_SUBDIR1) all +endif +ifndef NO_PERL + $(QUIET_SUBDIR0)perl $(QUIET_SUBDIR1) PERL_PATH='$(PERL_PATH_SQ)' prefix='$(prefix_SQ)' all +endif + $(QUIET_SUBDIR0)templates $(QUIET_SUBDIR1) + +please_set_SHELL_PATH_to_a_more_modern_shell: + @$$(:) + +shell_compatibility_test: please_set_SHELL_PATH_to_a_more_modern_shell + +strip: $(PROGRAMS) git$X + $(STRIP) $(STRIP_OPTS) $(PROGRAMS) git$X + +git.o: git.c common-cmds.h GIT-CFLAGS + $(QUIET_CC)$(CC) -DGIT_VERSION='"$(GIT_VERSION)"' \ + '-DGIT_HTML_PATH="$(htmldir_SQ)"' \ + $(ALL_CFLAGS) -c $(filter %.c,$^) + +git$X: git.o $(BUILTIN_OBJS) $(GITLIBS) + $(QUIET_LINK)$(CC) $(ALL_CFLAGS) -o $@ git.o \ + $(BUILTIN_OBJS) $(ALL_LDFLAGS) $(LIBS) + +builtin-help.o: builtin-help.c common-cmds.h GIT-CFLAGS + $(QUIET_CC)$(CC) -o $*.o -c $(ALL_CFLAGS) \ + '-DGIT_HTML_PATH="$(htmldir_SQ)"' \ + '-DGIT_MAN_PATH="$(mandir_SQ)"' \ + '-DGIT_INFO_PATH="$(infodir_SQ)"' $< + +$(BUILT_INS): git$X + $(QUIET_BUILT_IN)$(RM) $@ && \ + ln git$X $@ 2>/dev/null || \ + ln -s git$X $@ 2>/dev/null || \ + cp git$X $@ + +common-cmds.h: ./generate-cmdlist.sh command-list.txt + +common-cmds.h: $(wildcard Documentation/git-*.txt) + $(QUIET_GEN)./generate-cmdlist.sh > $@+ && mv $@+ $@ + +$(patsubst %.sh,%,$(SCRIPT_SH)) : % : %.sh + $(QUIET_GEN)$(RM) $@ $@+ && \ + sed -e '1s|#!.*/sh|#!$(SHELL_PATH_SQ)|' \ + -e 's|@SHELL_PATH@|$(SHELL_PATH_SQ)|' \ + -e 's|@@PERL@@|$(PERL_PATH_SQ)|g' \ + -e 's/@@GIT_VERSION@@/$(GIT_VERSION)/g' \ + -e 's/@@NO_CURL@@/$(NO_CURL)/g' \ + $@.sh >$@+ && \ + chmod +x $@+ && \ + mv $@+ $@ + +ifndef NO_PERL +$(patsubst %.perl,%,$(SCRIPT_PERL)): perl/perl.mak + +perl/perl.mak: GIT-CFLAGS perl/Makefile perl/Makefile.PL + $(QUIET_SUBDIR0)perl $(QUIET_SUBDIR1) PERL_PATH='$(PERL_PATH_SQ)' prefix='$(prefix_SQ)' $(@F) + +$(patsubst %.perl,%,$(SCRIPT_PERL)): % : %.perl + $(QUIET_GEN)$(RM) $@ $@+ && \ + INSTLIBDIR=`MAKEFLAGS= $(MAKE) -C perl -s --no-print-directory instlibdir` && \ + sed -e '1{' \ + -e ' s|#!.*perl|#!$(PERL_PATH_SQ)|' \ + -e ' h' \ + -e ' s=.*=use lib (split(/:/, $$ENV{GITPERLLIB} || "@@INSTLIBDIR@@"));=' \ + -e ' H' \ + -e ' x' \ + -e '}' \ + -e 's|@@INSTLIBDIR@@|'"$$INSTLIBDIR"'|g' \ + -e 's/@@GIT_VERSION@@/$(GIT_VERSION)/g' \ + $@.perl >$@+ && \ + chmod +x $@+ && \ + mv $@+ $@ + +gitweb/gitweb.cgi: gitweb/gitweb.perl + $(QUIET_GEN)$(RM) $@ $@+ && \ + sed -e '1s|#!.*perl|#!$(PERL_PATH_SQ)|' \ + -e 's|++GIT_VERSION++|$(GIT_VERSION)|g' \ + -e 's|++GIT_BINDIR++|$(bindir)|g' \ + -e 's|++GITWEB_CONFIG++|$(GITWEB_CONFIG)|g' \ + -e 's|++GITWEB_CONFIG_SYSTEM++|$(GITWEB_CONFIG_SYSTEM)|g' \ + -e 's|++GITWEB_HOME_LINK_STR++|$(GITWEB_HOME_LINK_STR)|g' \ + -e 's|++GITWEB_SITENAME++|$(GITWEB_SITENAME)|g' \ + -e 's|++GITWEB_PROJECTROOT++|$(GITWEB_PROJECTROOT)|g' \ + -e 's|"++GITWEB_PROJECT_MAXDEPTH++"|$(GITWEB_PROJECT_MAXDEPTH)|g' \ + -e 's|++GITWEB_EXPORT_OK++|$(GITWEB_EXPORT_OK)|g' \ + -e 's|++GITWEB_STRICT_EXPORT++|$(GITWEB_STRICT_EXPORT)|g' \ + -e 's|++GITWEB_BASE_URL++|$(GITWEB_BASE_URL)|g' \ + -e 's|++GITWEB_LIST++|$(GITWEB_LIST)|g' \ + -e 's|++GITWEB_HOMETEXT++|$(GITWEB_HOMETEXT)|g' \ + -e 's|++GITWEB_CSS++|$(GITWEB_CSS)|g' \ + -e 's|++GITWEB_LOGO++|$(GITWEB_LOGO)|g' \ + -e 's|++GITWEB_FAVICON++|$(GITWEB_FAVICON)|g' \ + -e 's|++GITWEB_SITE_HEADER++|$(GITWEB_SITE_HEADER)|g' \ + -e 's|++GITWEB_SITE_FOOTER++|$(GITWEB_SITE_FOOTER)|g' \ + $< >$@+ && \ + chmod +x $@+ && \ + mv $@+ $@ + +git-instaweb: git-instaweb.sh gitweb/gitweb.cgi gitweb/gitweb.css + $(QUIET_GEN)$(RM) $@ $@+ && \ + sed -e '1s|#!.*/sh|#!$(SHELL_PATH_SQ)|' \ + -e 's/@@GIT_VERSION@@/$(GIT_VERSION)/g' \ + -e 's/@@NO_CURL@@/$(NO_CURL)/g' \ + -e '/@@GITWEB_CGI@@/r gitweb/gitweb.cgi' \ + -e '/@@GITWEB_CGI@@/d' \ + -e '/@@GITWEB_CSS@@/r gitweb/gitweb.css' \ + -e '/@@GITWEB_CSS@@/d' \ + -e 's|@@PERL@@|$(PERL_PATH_SQ)|g' \ + $@.sh > $@+ && \ + chmod +x $@+ && \ + mv $@+ $@ +else # NO_PERL +$(patsubst %.perl,%,$(SCRIPT_PERL)) git-instaweb: % : unimplemented.sh + $(QUIET_GEN)$(RM) $@ $@+ && \ + sed -e '1s|#!.*/sh|#!$(SHELL_PATH_SQ)|' \ + -e 's|@@REASON@@|NO_PERL=$(NO_PERL)|g' \ + unimplemented.sh >$@+ && \ + chmod +x $@+ && \ + mv $@+ $@ +endif # NO_PERL + +configure: configure.ac + $(QUIET_GEN)$(RM) $@ $<+ && \ + sed -e 's/@@GIT_VERSION@@/$(GIT_VERSION)/g' \ + $< > $<+ && \ + autoconf -o $@ $<+ && \ + $(RM) $<+ + +# These can record GIT_VERSION +git.o git.spec \ + $(patsubst %.sh,%,$(SCRIPT_SH)) \ + $(patsubst %.perl,%,$(SCRIPT_PERL)) \ + : GIT-VERSION-FILE + +%.o: %.c GIT-CFLAGS + $(QUIET_CC)$(CC) -o $*.o -c $(ALL_CFLAGS) $< +%.s: %.c GIT-CFLAGS + $(QUIET_CC)$(CC) -S $(ALL_CFLAGS) $< +%.o: %.S + $(QUIET_CC)$(CC) -o $*.o -c $(ALL_CFLAGS) $< + +exec_cmd.o: exec_cmd.c GIT-CFLAGS + $(QUIET_CC)$(CC) -o $*.o -c $(ALL_CFLAGS) \ + '-DGIT_EXEC_PATH="$(gitexecdir_SQ)"' \ + '-DBINDIR="$(bindir_relative_SQ)"' \ + '-DPREFIX="$(prefix_SQ)"' \ + $< + +builtin-init-db.o: builtin-init-db.c GIT-CFLAGS + $(QUIET_CC)$(CC) -o $*.o -c $(ALL_CFLAGS) -DDEFAULT_GIT_TEMPLATE_DIR='"$(template_dir_SQ)"' $< + +config.o: config.c GIT-CFLAGS + $(QUIET_CC)$(CC) -o $*.o -c $(ALL_CFLAGS) -DETC_GITCONFIG='"$(ETC_GITCONFIG_SQ)"' $< + +http.o: http.c GIT-CFLAGS + $(QUIET_CC)$(CC) -o $*.o -c $(ALL_CFLAGS) -DGIT_USER_AGENT='"git/$(GIT_VERSION)"' $< + +ifdef NO_EXPAT +http-walker.o: http-walker.c http.h GIT-CFLAGS + $(QUIET_CC)$(CC) -o $*.o -c $(ALL_CFLAGS) -DNO_EXPAT $< +endif + +git-%$X: %.o $(GITLIBS) + $(QUIET_LINK)$(CC) $(ALL_CFLAGS) -o $@ $(ALL_LDFLAGS) $(filter %.o,$^) $(LIBS) + +git-imap-send$X: imap-send.o $(GITLIBS) + $(QUIET_LINK)$(CC) $(ALL_CFLAGS) -o $@ $(ALL_LDFLAGS) $(filter %.o,$^) \ + $(LIBS) $(OPENSSL_LINK) $(OPENSSL_LIBSSL) + +http.o http-walker.o http-push.o transport.o: http.h + +git-http-push$X: revision.o http.o http-push.o $(GITLIBS) + $(QUIET_LINK)$(CC) $(ALL_CFLAGS) -o $@ $(ALL_LDFLAGS) $(filter %.o,$^) \ + $(LIBS) $(CURL_LIBCURL) $(EXPAT_LIBEXPAT) + +$(LIB_OBJS) $(BUILTIN_OBJS): $(LIB_H) +$(patsubst git-%$X,%.o,$(PROGRAMS)): $(LIB_H) $(wildcard */*.h) +builtin-revert.o wt-status.o: wt-status.h + +$(LIB_FILE): $(LIB_OBJS) + $(QUIET_AR)$(RM) $@ && $(AR) rcs $@ $(LIB_OBJS) + +XDIFF_OBJS=xdiff/xdiffi.o xdiff/xprepare.o xdiff/xutils.o xdiff/xemit.o \ + xdiff/xmerge.o xdiff/xpatience.o +$(XDIFF_OBJS): xdiff/xinclude.h xdiff/xmacros.h xdiff/xdiff.h xdiff/xtypes.h \ + xdiff/xutils.h xdiff/xprepare.h xdiff/xdiffi.h xdiff/xemit.h + +$(XDIFF_LIB): $(XDIFF_OBJS) + $(QUIET_AR)$(RM) $@ && $(AR) rcs $@ $(XDIFF_OBJS) + + +doc: + $(MAKE) -C Documentation all + +man: + $(MAKE) -C Documentation man + +html: + $(MAKE) -C Documentation html + +info: + $(MAKE) -C Documentation info + +pdf: + $(MAKE) -C Documentation pdf + +TAGS: + $(RM) TAGS + $(FIND) . -name '*.[hcS]' -print | xargs etags -a + +tags: + $(RM) tags + $(FIND) . -name '*.[hcS]' -print | xargs ctags -a + +cscope: + $(RM) cscope* + $(FIND) . -name '*.[hcS]' -print | xargs cscope -b + +### Detect prefix changes +TRACK_CFLAGS = $(subst ','\'',$(ALL_CFLAGS)):\ + $(bindir_SQ):$(gitexecdir_SQ):$(template_dir_SQ):$(prefix_SQ) + +GIT-CFLAGS: .FORCE-GIT-CFLAGS + @FLAGS='$(TRACK_CFLAGS)'; \ + if test x"$$FLAGS" != x"`cat GIT-CFLAGS 2>/dev/null`" ; then \ + echo 1>&2 " * new build flags or prefix"; \ + echo "$$FLAGS" >GIT-CFLAGS; \ + fi + +# We need to apply sq twice, once to protect from the shell +# that runs GIT-BUILD-OPTIONS, and then again to protect it +# and the first level quoting from the shell that runs "echo". +GIT-BUILD-OPTIONS: .FORCE-GIT-BUILD-OPTIONS + @echo SHELL_PATH=\''$(subst ','\'',$(SHELL_PATH_SQ))'\' >$@ + @echo TAR=\''$(subst ','\'',$(subst ','\'',$(TAR)))'\' >>$@ + @echo NO_CURL=\''$(subst ','\'',$(subst ','\'',$(NO_CURL)))'\' >>$@ + @echo NO_PERL=\''$(subst ','\'',$(subst ','\'',$(NO_PERL)))'\' >>$@ + +### Detect Tck/Tk interpreter path changes +ifndef NO_TCLTK +TRACK_VARS = $(subst ','\'',-DTCLTK_PATH='$(TCLTK_PATH_SQ)') + +GIT-GUI-VARS: .FORCE-GIT-GUI-VARS + @VARS='$(TRACK_VARS)'; \ + if test x"$$VARS" != x"`cat $@ 2>/dev/null`" ; then \ + echo 1>&2 " * new Tcl/Tk interpreter location"; \ + echo "$$VARS" >$@; \ + fi + +.PHONY: .FORCE-GIT-GUI-VARS +endif + +### Testing rules + +TEST_PROGRAMS += test-chmtime$X +TEST_PROGRAMS += test-ctype$X +TEST_PROGRAMS += test-date$X +TEST_PROGRAMS += test-delta$X +TEST_PROGRAMS += test-dump-cache-tree$X +TEST_PROGRAMS += test-genrandom$X +TEST_PROGRAMS += test-match-trees$X +TEST_PROGRAMS += test-parse-options$X +TEST_PROGRAMS += test-path-utils$X +TEST_PROGRAMS += test-sha1$X +TEST_PROGRAMS += test-sigchain$X + +all:: $(TEST_PROGRAMS) + +# GNU make supports exporting all variables by "export" without parameters. +# However, the environment gets quite big, and some programs have problems +# with that. + +export NO_SVN_TESTS + +test: all + $(MAKE) -C t/ all + +test-ctype$X: ctype.o + +test-date$X: date.o ctype.o + +test-delta$X: diff-delta.o patch-delta.o + +test-parse-options$X: parse-options.o + +.PRECIOUS: $(patsubst test-%$X,test-%.o,$(TEST_PROGRAMS)) + +test-%$X: test-%.o $(GITLIBS) + $(QUIET_LINK)$(CC) $(ALL_CFLAGS) -o $@ $(ALL_LDFLAGS) $(filter %.o,$^) $(LIBS) + +check-sha1:: test-sha1$X + ./test-sha1.sh + +check: common-cmds.h + if sparse; \ + then \ + for i in *.c; \ + do \ + sparse $(ALL_CFLAGS) $(SPARSE_FLAGS) $$i || exit; \ + done; \ + else \ + echo 2>&1 "Did you mean 'make test'?"; \ + exit 1; \ + fi + +remove-dashes: + ./fixup-builtins $(BUILT_INS) $(PROGRAMS) $(SCRIPTS) + +### Installation rules + +ifneq ($(filter /%,$(firstword $(template_dir))),) +template_instdir = $(template_dir) +else +template_instdir = $(prefix)/$(template_dir) +endif +export template_instdir + +ifneq ($(filter /%,$(firstword $(gitexecdir))),) +gitexec_instdir = $(gitexecdir) +else +gitexec_instdir = $(prefix)/$(gitexecdir) +endif +gitexec_instdir_SQ = $(subst ','\'',$(gitexec_instdir)) +export gitexec_instdir + +install: all + $(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(bindir_SQ)' + $(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(gitexec_instdir_SQ)' + $(INSTALL) $(ALL_PROGRAMS) '$(DESTDIR_SQ)$(gitexec_instdir_SQ)' + $(INSTALL) git$X git-upload-pack$X git-receive-pack$X git-upload-archive$X git-shell$X git-cvsserver '$(DESTDIR_SQ)$(bindir_SQ)' + $(MAKE) -C templates DESTDIR='$(DESTDIR_SQ)' install + $(MAKE) -C perl prefix='$(prefix_SQ)' DESTDIR='$(DESTDIR_SQ)' install +ifndef NO_TCLTK + $(MAKE) -C gitk-git install + $(MAKE) -C git-gui gitexecdir='$(gitexec_instdir_SQ)' install +endif +ifneq (,$X) + $(foreach p,$(patsubst %$X,%,$(filter %$X,$(ALL_PROGRAMS) $(BUILT_INS) git$X)), $(RM) '$(DESTDIR_SQ)$(gitexec_instdir_SQ)/$p';) +endif + bindir=$$(cd '$(DESTDIR_SQ)$(bindir_SQ)' && pwd) && \ + execdir=$$(cd '$(DESTDIR_SQ)$(gitexec_instdir_SQ)' && pwd) && \ + { $(RM) "$$execdir/git-add$X" && \ + ln "$$bindir/git$X" "$$execdir/git-add$X" 2>/dev/null || \ + cp "$$bindir/git$X" "$$execdir/git-add$X"; } && \ + { for p in $(filter-out git-add$X,$(BUILT_INS)); do \ + $(RM) "$$execdir/$$p" && \ + ln "$$execdir/git-add$X" "$$execdir/$$p" 2>/dev/null || \ + ln -s "git-add$X" "$$execdir/$$p" 2>/dev/null || \ + cp "$$execdir/git-add$X" "$$execdir/$$p" || exit; \ + done } && \ + ./check_bindir "z$$bindir" "z$$execdir" "$$bindir/git-add$X" + +install-doc: + $(MAKE) -C Documentation install + +install-man: + $(MAKE) -C Documentation install-man + +install-html: + $(MAKE) -C Documentation install-html + +install-info: + $(MAKE) -C Documentation install-info + +install-pdf: + $(MAKE) -C Documentation install-pdf + +quick-install-doc: + $(MAKE) -C Documentation quick-install + +quick-install-man: + $(MAKE) -C Documentation quick-install-man + +quick-install-html: + $(MAKE) -C Documentation quick-install-html + + + +### Maintainer's dist rules + +git.spec: git.spec.in + sed -e 's/@@VERSION@@/$(GIT_VERSION)/g' < $< > $@+ + mv $@+ $@ + +GIT_TARNAME=git-$(GIT_VERSION) +dist: git.spec git-archive$(X) configure + ./git-archive --format=tar \ + --prefix=$(GIT_TARNAME)/ HEAD^{tree} > $(GIT_TARNAME).tar + @mkdir -p $(GIT_TARNAME) + @cp git.spec configure $(GIT_TARNAME) + @echo $(GIT_VERSION) > $(GIT_TARNAME)/version + @$(MAKE) -C git-gui TARDIR=../$(GIT_TARNAME)/git-gui dist-version + $(TAR) rf $(GIT_TARNAME).tar \ + $(GIT_TARNAME)/git.spec \ + $(GIT_TARNAME)/configure \ + $(GIT_TARNAME)/version \ + $(GIT_TARNAME)/git-gui/version + @$(RM) -r $(GIT_TARNAME) + gzip -f -9 $(GIT_TARNAME).tar + +rpm: dist + $(RPMBUILD) -ta $(GIT_TARNAME).tar.gz + +htmldocs = git-htmldocs-$(GIT_VERSION) +manpages = git-manpages-$(GIT_VERSION) +dist-doc: + $(RM) -r .doc-tmp-dir + mkdir .doc-tmp-dir + $(MAKE) -C Documentation WEBDOC_DEST=../.doc-tmp-dir install-webdoc + cd .doc-tmp-dir && $(TAR) cf ../$(htmldocs).tar . + gzip -n -9 -f $(htmldocs).tar + : + $(RM) -r .doc-tmp-dir + mkdir -p .doc-tmp-dir/man1 .doc-tmp-dir/man5 .doc-tmp-dir/man7 + $(MAKE) -C Documentation DESTDIR=./ \ + man1dir=../.doc-tmp-dir/man1 \ + man5dir=../.doc-tmp-dir/man5 \ + man7dir=../.doc-tmp-dir/man7 \ + install + cd .doc-tmp-dir && $(TAR) cf ../$(manpages).tar . + gzip -n -9 -f $(manpages).tar + $(RM) -r .doc-tmp-dir + +### Cleaning rules + +distclean: clean + $(RM) configure clean: - rm $(BINS) + $(RM) *.o mozilla-sha1/*.o arm/*.o ppc/*.o compat/*.o xdiff/*.o \ + $(LIB_FILE) $(XDIFF_LIB) + $(RM) $(ALL_PROGRAMS) $(BUILT_INS) git$X + $(RM) $(TEST_PROGRAMS) + $(RM) *.spec *.pyc *.pyo */*.pyc */*.pyo common-cmds.h TAGS tags cscope* + $(RM) -r autom4te.cache + $(RM) config.log config.mak.autogen config.mak.append config.status config.cache + $(RM) -r $(GIT_TARNAME) .doc-tmp-dir + $(RM) $(GIT_TARNAME).tar.gz git-core_$(GIT_VERSION)-*.tar.gz + $(RM) $(htmldocs).tar.gz $(manpages).tar.gz + $(MAKE) -C Documentation/ clean +ifndef NO_PERL + $(RM) gitweb/gitweb.cgi + $(MAKE) -C perl clean +endif + $(MAKE) -C templates/ clean + $(MAKE) -C t/ clean +ifndef NO_TCLTK + $(MAKE) -C gitk-git clean + $(MAKE) -C git-gui clean +endif + $(RM) GIT-VERSION-FILE GIT-CFLAGS GIT-GUI-VARS GIT-BUILD-OPTIONS + +.PHONY: all install clean strip +.PHONY: shell_compatibility_test please_set_SHELL_PATH_to_a_more_modern_shell +.PHONY: .FORCE-GIT-VERSION-FILE TAGS tags cscope .FORCE-GIT-CFLAGS +.PHONY: .FORCE-GIT-BUILD-OPTIONS + +### Check documentation +# +check-docs:: + @(for v in $(ALL_PROGRAMS) $(BUILT_INS) git gitk; \ + do \ + case "$$v" in \ + git-merge-octopus | git-merge-ours | git-merge-recursive | \ + git-merge-resolve | git-merge-subtree | \ + git-fsck-objects | git-init-db | \ + git-?*--?* ) continue ;; \ + esac ; \ + test -f "Documentation/$$v.txt" || \ + echo "no doc: $$v"; \ + sed -e '/^#/d' command-list.txt | \ + grep -q "^$$v[ ]" || \ + case "$$v" in \ + git) ;; \ + *) echo "no link: $$v";; \ + esac ; \ + done; \ + ( \ + sed -e '/^#/d' \ + -e 's/[ ].*//' \ + -e 's/^/listed /' command-list.txt; \ + ls -1 Documentation/git*txt | \ + sed -e 's|Documentation/|documented |' \ + -e 's/\.txt//'; \ + ) | while read how cmd; \ + do \ + case "$$how,$$cmd" in \ + *,git-citool | \ + *,git-gui | \ + *,git-help | \ + documented,gitattributes | \ + documented,gitignore | \ + documented,gitmodules | \ + documented,gitcli | \ + documented,git-tools | \ + documented,gitcore-tutorial | \ + documented,gitcvs-migration | \ + documented,gitdiffcore | \ + documented,gitglossary | \ + documented,githooks | \ + documented,gitrepository-layout | \ + documented,gittutorial | \ + documented,gittutorial-2 | \ + sentinel,not,matching,is,ok ) continue ;; \ + esac; \ + case " $(ALL_PROGRAMS) $(BUILT_INS) git gitk " in \ + *" $$cmd "*) ;; \ + *) echo "removed but $$how: $$cmd" ;; \ + esac; \ + done ) | sort + +### Make sure built-ins do not have dups and listed in git.c +# +check-builtins:: + ./check-builtins.sh + +### Test suite coverage testing +# +.PHONY: coverage coverage-clean coverage-build coverage-report + +coverage: + $(MAKE) coverage-build + $(MAKE) coverage-report + +coverage-clean: + rm -f *.gcda *.gcno + +COVERAGE_CFLAGS = $(CFLAGS) -O0 -ftest-coverage -fprofile-arcs +COVERAGE_LDFLAGS = $(CFLAGS) -O0 -lgcov + +coverage-build: coverage-clean + $(MAKE) CFLAGS="$(COVERAGE_CFLAGS)" LDFLAGS="$(COVERAGE_LDFLAGS)" all + $(MAKE) CFLAGS="$(COVERAGE_CFLAGS)" LDFLAGS="$(COVERAGE_LDFLAGS)" \ + -j1 test + +coverage-report: + gcov -b *.c + grep '^function.*called 0 ' *.c.gcov \ + | sed -e 's/\([^:]*\)\.gcov: *function \([^ ]*\) called.*/\1: \2/' \ + | tee coverage-untested-functions -- cgit v1.2.3 From 0780060124011b94af55830939c86cc0916be0f5 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Mon, 20 Apr 2009 15:00:56 +0200 Subject: perf_counter tools: add in basic glue from Git First very raw version at having a central 'perf' command and a list of subcommands: perf top perf stat perf record perf report ... This is done by picking up Git's collection of utility functions, and hacking them to build fine in this new environment. Signed-off-by: Ingo Molnar --- Documentation/perf_counter/.gitignore | 179 +++ Documentation/perf_counter/Makefile | 1037 +++-------------- Documentation/perf_counter/PERF-BUILD-OPTIONS | 4 + Documentation/perf_counter/PERF-CFLAGS | 1 + Documentation/perf_counter/PERF-VERSION-FILE | 1 + Documentation/perf_counter/PERF-VERSION-GEN | 42 + Documentation/perf_counter/abspath.c | 117 ++ Documentation/perf_counter/alias.c | 77 ++ Documentation/perf_counter/builtin-help.c | 463 ++++++++ Documentation/perf_counter/builtin-top.c | 1411 ++++++++++++++++++++++++ Documentation/perf_counter/builtin.h | 18 + Documentation/perf_counter/cache.h | 97 ++ Documentation/perf_counter/command-list.txt | 4 + Documentation/perf_counter/config.c | 966 ++++++++++++++++ Documentation/perf_counter/ctype.c | 26 + Documentation/perf_counter/exec_cmd.c | 165 +++ Documentation/perf_counter/exec_cmd.h | 13 + Documentation/perf_counter/generate-cmdlist.sh | 24 + Documentation/perf_counter/help.c | 366 ++++++ Documentation/perf_counter/help.h | 29 + Documentation/perf_counter/levenshtein.c | 84 ++ Documentation/perf_counter/levenshtein.h | 8 + Documentation/perf_counter/parse-options.c | 495 +++++++++ Documentation/perf_counter/parse-options.h | 172 +++ Documentation/perf_counter/path.c | 392 +++++++ Documentation/perf_counter/perf.c | 411 +++++++ Documentation/perf_counter/quote.c | 478 ++++++++ Documentation/perf_counter/quote.h | 68 ++ Documentation/perf_counter/run-command.c | 395 +++++++ Documentation/perf_counter/run-command.h | 93 ++ Documentation/perf_counter/strbuf.c | 359 ++++++ Documentation/perf_counter/strbuf.h | 137 +++ Documentation/perf_counter/usage.c | 80 ++ Documentation/perf_counter/util.h | 394 +++++++ Documentation/perf_counter/wrapper.c | 206 ++++ 35 files changed, 7953 insertions(+), 859 deletions(-) create mode 100644 Documentation/perf_counter/.gitignore create mode 100644 Documentation/perf_counter/PERF-BUILD-OPTIONS create mode 100644 Documentation/perf_counter/PERF-CFLAGS create mode 100644 Documentation/perf_counter/PERF-VERSION-FILE create mode 100755 Documentation/perf_counter/PERF-VERSION-GEN create mode 100644 Documentation/perf_counter/abspath.c create mode 100644 Documentation/perf_counter/alias.c create mode 100644 Documentation/perf_counter/builtin-help.c create mode 100644 Documentation/perf_counter/builtin-top.c create mode 100644 Documentation/perf_counter/builtin.h create mode 100644 Documentation/perf_counter/cache.h create mode 100644 Documentation/perf_counter/command-list.txt create mode 100644 Documentation/perf_counter/config.c create mode 100644 Documentation/perf_counter/ctype.c create mode 100644 Documentation/perf_counter/exec_cmd.c create mode 100644 Documentation/perf_counter/exec_cmd.h create mode 100755 Documentation/perf_counter/generate-cmdlist.sh create mode 100644 Documentation/perf_counter/help.c create mode 100644 Documentation/perf_counter/help.h create mode 100644 Documentation/perf_counter/levenshtein.c create mode 100644 Documentation/perf_counter/levenshtein.h create mode 100644 Documentation/perf_counter/parse-options.c create mode 100644 Documentation/perf_counter/parse-options.h create mode 100644 Documentation/perf_counter/path.c create mode 100644 Documentation/perf_counter/perf.c create mode 100644 Documentation/perf_counter/quote.c create mode 100644 Documentation/perf_counter/quote.h create mode 100644 Documentation/perf_counter/run-command.c create mode 100644 Documentation/perf_counter/run-command.h create mode 100644 Documentation/perf_counter/strbuf.c create mode 100644 Documentation/perf_counter/strbuf.h create mode 100644 Documentation/perf_counter/usage.c create mode 100644 Documentation/perf_counter/util.h create mode 100644 Documentation/perf_counter/wrapper.c diff --git a/Documentation/perf_counter/.gitignore b/Documentation/perf_counter/.gitignore new file mode 100644 index 00000000000..41c0b20a76c --- /dev/null +++ b/Documentation/perf_counter/.gitignore @@ -0,0 +1,179 @@ +GIT-BUILD-OPTIONS +GIT-CFLAGS +GIT-GUI-VARS +GIT-VERSION-FILE +git +git-add +git-add--interactive +git-am +git-annotate +git-apply +git-archimport +git-archive +git-bisect +git-bisect--helper +git-blame +git-branch +git-bundle +git-cat-file +git-check-attr +git-check-ref-format +git-checkout +git-checkout-index +git-cherry +git-cherry-pick +git-clean +git-clone +git-commit +git-commit-tree +git-config +git-count-objects +git-cvsexportcommit +git-cvsimport +git-cvsserver +git-daemon +git-diff +git-diff-files +git-diff-index +git-diff-tree +git-difftool +git-difftool--helper +git-describe +git-fast-export +git-fast-import +git-fetch +git-fetch--tool +git-fetch-pack +git-filter-branch +git-fmt-merge-msg +git-for-each-ref +git-format-patch +git-fsck +git-fsck-objects +git-gc +git-get-tar-commit-id +git-grep +git-hash-object +git-help +git-http-fetch +git-http-push +git-imap-send +git-index-pack +git-init +git-init-db +git-instaweb +git-log +git-lost-found +git-ls-files +git-ls-remote +git-ls-tree +git-mailinfo +git-mailsplit +git-merge +git-merge-base +git-merge-index +git-merge-file +git-merge-tree +git-merge-octopus +git-merge-one-file +git-merge-ours +git-merge-recursive +git-merge-resolve +git-merge-subtree +git-mergetool +git-mergetool--lib +git-mktag +git-mktree +git-name-rev +git-mv +git-pack-redundant +git-pack-objects +git-pack-refs +git-parse-remote +git-patch-id +git-peek-remote +git-prune +git-prune-packed +git-pull +git-push +git-quiltimport +git-read-tree +git-rebase +git-rebase--interactive +git-receive-pack +git-reflog +git-relink +git-remote +git-repack +git-repo-config +git-request-pull +git-rerere +git-reset +git-rev-list +git-rev-parse +git-revert +git-rm +git-send-email +git-send-pack +git-sh-setup +git-shell +git-shortlog +git-show +git-show-branch +git-show-index +git-show-ref +git-stage +git-stash +git-status +git-stripspace +git-submodule +git-svn +git-symbolic-ref +git-tag +git-tar-tree +git-unpack-file +git-unpack-objects +git-update-index +git-update-ref +git-update-server-info +git-upload-archive +git-upload-pack +git-var +git-verify-pack +git-verify-tag +git-web--browse +git-whatchanged +git-write-tree +git-core-*/?* +gitk-wish +gitweb/gitweb.cgi +test-chmtime +test-ctype +test-date +test-delta +test-dump-cache-tree +test-genrandom +test-match-trees +test-parse-options +test-path-utils +test-sha1 +test-sigchain +common-cmds.h +*.tar.gz +*.dsc +*.deb +git.spec +*.exe +*.[aos] +*.py[co] +config.mak +autom4te.cache +config.cache +config.log +config.status +config.mak.autogen +config.mak.append +configure +tags +TAGS +cscope* diff --git a/Documentation/perf_counter/Makefile b/Documentation/perf_counter/Makefile index 6e0838b03ad..11809b943fc 100644 --- a/Documentation/perf_counter/Makefile +++ b/Documentation/perf_counter/Makefile @@ -13,16 +13,9 @@ all:: # Define NO_OPENSSL environment variable if you do not have OpenSSL. # This also implies MOZILLA_SHA1. # -# Define NO_CURL if you do not have libcurl installed. git-http-pull and -# git-http-push are not built, and you cannot use http:// and https:// -# transports. -# # Define CURLDIR=/foo/bar if your curl header and library files are in # /foo/bar/include and /foo/bar/lib directories. # -# Define NO_EXPAT if you do not have expat installed. git-http-push is -# not built, and you cannot push using http:// and https:// transports. -# # Define EXPATDIR=/foo/bar if your expat header and library files are in # /foo/bar/include and /foo/bar/lib directories. # @@ -40,8 +33,6 @@ all:: # # Define NO_MEMMEM if you don't have memmem. # -# Define NO_STRLCPY if you don't have strlcpy. -# # Define NO_STRTOUMAX if you don't have strtoumax in the C library. # If your compiler also does not support long long or does not have # strtoull, define NO_STRTOULL. @@ -54,7 +45,7 @@ all:: # # Define NO_SYS_SELECT_H if you don't have sys/select.h. # -# Define NO_SYMLINK_HEAD if you never want .git/HEAD to be a symbolic link. +# Define NO_SYMLINK_HEAD if you never want .perf/HEAD to be a symbolic link. # Enable it on Windows. By default, symrefs are still used. # # Define NO_SVN_TESTS if you want to skip time-consuming SVN interoperability @@ -62,13 +53,13 @@ all:: # but are not needed unless you plan to talk to SVN repos. # # Define NO_FINK if you are building on Darwin/Mac OS X, have Fink -# installed in /sw, but don't want GIT to link against any libraries +# installed in /sw, but don't want PERF to link against any libraries # installed there. If defined you may specify your own (or Fink's) # include directories and library directories by defining CFLAGS # and LDFLAGS appropriately. # # Define NO_DARWIN_PORTS if you are building on Darwin/Mac OS X, -# have DarwinPorts installed in /opt/local, but don't want GIT to +# have DarwinPorts installed in /opt/local, but don't want PERF to # link against any libraries installed there. If defined you may # specify your own (or DarwinPort's) include directories and # library directories by defining CFLAGS and LDFLAGS appropriately. @@ -120,7 +111,7 @@ all:: # that tells runtime paths to dynamic libraries; # "-Wl,-rpath=/path/lib" is used instead. # -# Define USE_NSEC below if you want git to care about sub-second file mtimes +# Define USE_NSEC below if you want perf to care about sub-second file mtimes # and ctimes. Note that you need recent glibc (at least 2.2.4) for this, and # it will BREAK YOUR LOCAL DIFFS! show-diff and anything using it will likely # randomly break unless your underlying filesystem supports those sub-second @@ -132,7 +123,7 @@ all:: # Define NO_NSEC if your "struct stat" does not have "st_ctim.tv_nsec" # available. This automatically turns USE_NSEC off. # -# Define USE_STDEV below if you want git to care about the underlying device +# Define USE_STDEV below if you want perf to care about the underlying device # change being considered an inode change from the update-index perspective. # # Define NO_ST_BLOCKS_IN_STRUCT_STAT if your platform does not have st_blocks @@ -150,27 +141,24 @@ all:: # Define NO_TCLTK if you do not want Tcl/Tk GUI. # # The TCL_PATH variable governs the location of the Tcl interpreter -# used to optimize git-gui for your system. Only used if NO_TCLTK +# used to optimize perf-gui for your system. Only used if NO_TCLTK # is not set. Defaults to the bare 'tclsh'. # # The TCLTK_PATH variable governs the location of the Tcl/Tk interpreter. # If not set it defaults to the bare 'wish'. If it is set to the empty # string then NO_TCLTK will be forced (this is used by configure script). # -# Define THREADED_DELTA_SEARCH if you have pthreads and wish to exploit -# parallel delta searching when packing objects. -# # Define INTERNAL_QSORT to use Git's implementation of qsort(), which # is a simplified version of the merge sort used in glibc. This is # recommended if Git triggers O(n^2) behavior in your platform's qsort(). # -# Define NO_EXTERNAL_GREP if you don't want "git grep" to ever call +# Define NO_EXTERNAL_GREP if you don't want "perf grep" to ever call # your external grep (e.g., if your system lacks grep, if its grep is -# broken, or spawning external process is slower than built-in grep git has). +# broken, or spawning external process is slower than built-in grep perf has). -GIT-VERSION-FILE: .FORCE-GIT-VERSION-FILE - @$(SHELL_PATH) ./GIT-VERSION-GEN --include GIT-VERSION-FILE +PERF-VERSION-FILE: .FORCE-PERF-VERSION-FILE + @$(SHELL_PATH) ./PERF-VERSION-GEN +-include PERF-VERSION-FILE uname_S := $(shell sh -c 'uname -s 2>/dev/null || echo not') uname_M := $(shell sh -c 'uname -m 2>/dev/null || echo not') @@ -182,20 +170,20 @@ uname_V := $(shell sh -c 'uname -v 2>/dev/null || echo not') # CFLAGS and LDFLAGS are for the users to override from the command line. CFLAGS = -g -O2 -Wall -LDFLAGS = +LDFLAGS = -lpthread -lrt ALL_CFLAGS = $(CFLAGS) ALL_LDFLAGS = $(LDFLAGS) STRIP ?= strip # Among the variables below, these: -# gitexecdir +# perfexecdir # template_dir # mandir # infodir # htmldir -# ETC_GITCONFIG (but not sysconfdir) +# ETC_PERFCONFIG (but not sysconfdir) # can be specified as a relative path some/where/else; -# this is interpreted as relative to $(prefix) and "git" at +# this is interpreted as relative to $(prefix) and "perf" at # runtime figures out where they are based on the path to the executable. # This can help installing the suite in a relocatable way. @@ -204,38 +192,20 @@ bindir_relative = bin bindir = $(prefix)/$(bindir_relative) mandir = share/man infodir = share/info -gitexecdir = libexec/git-core +perfexecdir = libexec/perf-core sharedir = $(prefix)/share -template_dir = share/git-core/templates -htmldir = share/doc/git-doc +template_dir = share/perf-core/templates +htmldir = share/doc/perf-doc ifeq ($(prefix),/usr) sysconfdir = /etc -ETC_GITCONFIG = $(sysconfdir)/gitconfig +ETC_PERFCONFIG = $(sysconfdir)/perfconfig else sysconfdir = $(prefix)/etc -ETC_GITCONFIG = etc/gitconfig +ETC_PERFCONFIG = etc/perfconfig endif lib = lib # DESTDIR= -# default configuration for gitweb -GITWEB_CONFIG = gitweb_config.perl -GITWEB_CONFIG_SYSTEM = /etc/gitweb.conf -GITWEB_HOME_LINK_STR = projects -GITWEB_SITENAME = -GITWEB_PROJECTROOT = /pub/git -GITWEB_PROJECT_MAXDEPTH = 2007 -GITWEB_EXPORT_OK = -GITWEB_STRICT_EXPORT = -GITWEB_BASE_URL = -GITWEB_LIST = -GITWEB_HOMETEXT = indextext.html -GITWEB_CSS = gitweb.css -GITWEB_LOGO = git-logo.png -GITWEB_FAVICON = git-favicon.png -GITWEB_SITE_HEADER = -GITWEB_SITE_FOOTER = - export prefix bindir sharedir sysconfdir CC = gcc @@ -277,89 +247,46 @@ SCRIPT_PERL = SCRIPT_SH = TEST_PROGRAMS = -SCRIPT_SH += git-am.sh -SCRIPT_SH += git-bisect.sh -SCRIPT_SH += git-difftool--helper.sh -SCRIPT_SH += git-filter-branch.sh -SCRIPT_SH += git-lost-found.sh -SCRIPT_SH += git-merge-octopus.sh -SCRIPT_SH += git-merge-one-file.sh -SCRIPT_SH += git-merge-resolve.sh -SCRIPT_SH += git-mergetool.sh -SCRIPT_SH += git-mergetool--lib.sh -SCRIPT_SH += git-parse-remote.sh -SCRIPT_SH += git-pull.sh -SCRIPT_SH += git-quiltimport.sh -SCRIPT_SH += git-rebase--interactive.sh -SCRIPT_SH += git-rebase.sh -SCRIPT_SH += git-repack.sh -SCRIPT_SH += git-request-pull.sh -SCRIPT_SH += git-sh-setup.sh -SCRIPT_SH += git-stash.sh -SCRIPT_SH += git-submodule.sh -SCRIPT_SH += git-web--browse.sh - -SCRIPT_PERL += git-add--interactive.perl -SCRIPT_PERL += git-difftool.perl -SCRIPT_PERL += git-archimport.perl -SCRIPT_PERL += git-cvsexportcommit.perl -SCRIPT_PERL += git-cvsimport.perl -SCRIPT_PERL += git-cvsserver.perl -SCRIPT_PERL += git-relink.perl -SCRIPT_PERL += git-send-email.perl -SCRIPT_PERL += git-svn.perl +# +# No scripts right now: +# + +# SCRIPT_SH += perf-am.sh + +# +# No Perl scripts right now: +# + +# SCRIPT_PERL += perf-add--interactive.perl SCRIPTS = $(patsubst %.sh,%,$(SCRIPT_SH)) \ - $(patsubst %.perl,%,$(SCRIPT_PERL)) \ - git-instaweb + $(patsubst %.perl,%,$(SCRIPT_PERL)) # Empty... EXTRA_PROGRAMS = -# ... and all the rest that could be moved out of bindir to gitexecdir +# ... and all the rest that could be moved out of bindir to perfexecdir PROGRAMS += $(EXTRA_PROGRAMS) -PROGRAMS += git-fast-import$X -PROGRAMS += git-hash-object$X -PROGRAMS += git-index-pack$X -PROGRAMS += git-merge-index$X -PROGRAMS += git-merge-tree$X -PROGRAMS += git-mktag$X -PROGRAMS += git-mktree$X -PROGRAMS += git-pack-redundant$X -PROGRAMS += git-patch-id$X -PROGRAMS += git-shell$X -PROGRAMS += git-show-index$X -PROGRAMS += git-unpack-file$X -PROGRAMS += git-update-server-info$X -PROGRAMS += git-upload-pack$X -PROGRAMS += git-var$X + +# +# None right now: +# +# PROGRAMS += perf-fast-import$X # List built-in command $C whose implementation cmd_$C() is not in # builtin-$C.o but is linked in as part of some other command. -BUILT_INS += $(patsubst builtin-%.o,git-%$X,$(BUILTIN_OBJS)) - -BUILT_INS += git-cherry$X -BUILT_INS += git-cherry-pick$X -BUILT_INS += git-format-patch$X -BUILT_INS += git-fsck-objects$X -BUILT_INS += git-get-tar-commit-id$X -BUILT_INS += git-init$X -BUILT_INS += git-merge-subtree$X -BUILT_INS += git-peek-remote$X -BUILT_INS += git-repo-config$X -BUILT_INS += git-show$X -BUILT_INS += git-stage$X -BUILT_INS += git-status$X -BUILT_INS += git-whatchanged$X - -# what 'all' will build and 'install' will install, in gitexecdir +BUILT_INS += $(patsubst builtin-%.o,perf-%$X,$(BUILTIN_OBJS)) + +# +# None right now: +# +# BUILT_INS += perf-init $X + +# what 'all' will build and 'install' will install, in perfexecdir ALL_PROGRAMS = $(PROGRAMS) $(SCRIPTS) -# what 'all' will build but not install in gitexecdir -OTHER_PROGRAMS = git$X -ifndef NO_PERL -OTHER_PROGRAMS += gitweb/gitweb.cgi -endif +# what 'all' will build but not install in perfexecdir +OTHER_PROGRAMS = perf$X # Set paths to tools early so that they can be used for version tests. ifndef SHELL_PATH @@ -371,250 +298,34 @@ endif export PERL_PATH -LIB_FILE=libgit.a -XDIFF_LIB=xdiff/lib.a - -LIB_H += archive.h -LIB_H += attr.h -LIB_H += blob.h -LIB_H += builtin.h -LIB_H += cache.h -LIB_H += cache-tree.h -LIB_H += commit.h -LIB_H += compat/cygwin.h -LIB_H += compat/mingw.h -LIB_H += csum-file.h -LIB_H += decorate.h -LIB_H += delta.h -LIB_H += diffcore.h -LIB_H += diff.h -LIB_H += dir.h -LIB_H += fsck.h -LIB_H += git-compat-util.h -LIB_H += graph.h -LIB_H += grep.h -LIB_H += hash.h -LIB_H += help.h +LIB_FILE=libperf.a + +LIB_H += ../../include/linux/perf_counter.h LIB_H += levenshtein.h -LIB_H += list-objects.h -LIB_H += ll-merge.h -LIB_H += log-tree.h -LIB_H += mailmap.h -LIB_H += merge-recursive.h -LIB_H += object.h -LIB_H += pack.h -LIB_H += pack-refs.h -LIB_H += pack-revindex.h LIB_H += parse-options.h -LIB_H += patch-ids.h -LIB_H += pkt-line.h -LIB_H += progress.h LIB_H += quote.h -LIB_H += reflog-walk.h -LIB_H += refs.h -LIB_H += remote.h -LIB_H += rerere.h -LIB_H += revision.h -LIB_H += run-command.h -LIB_H += sha1-lookup.h -LIB_H += sideband.h -LIB_H += sigchain.h LIB_H += strbuf.h -LIB_H += string-list.h -LIB_H += tag.h -LIB_H += transport.h -LIB_H += tree.h -LIB_H += tree-walk.h -LIB_H += unpack-trees.h -LIB_H += userdiff.h -LIB_H += utf8.h -LIB_H += wt-status.h +LIB_H += run-command.h LIB_OBJS += abspath.o LIB_OBJS += alias.o -LIB_OBJS += alloc.o -LIB_OBJS += archive.o -LIB_OBJS += archive-tar.o -LIB_OBJS += archive-zip.o -LIB_OBJS += attr.o -LIB_OBJS += base85.o -LIB_OBJS += bisect.o -LIB_OBJS += blob.o -LIB_OBJS += branch.o -LIB_OBJS += bundle.o -LIB_OBJS += cache-tree.o -LIB_OBJS += color.o -LIB_OBJS += combine-diff.o -LIB_OBJS += commit.o LIB_OBJS += config.o -LIB_OBJS += connect.o -LIB_OBJS += convert.o -LIB_OBJS += copy.o -LIB_OBJS += csum-file.o LIB_OBJS += ctype.o -LIB_OBJS += date.o -LIB_OBJS += decorate.o -LIB_OBJS += diffcore-break.o -LIB_OBJS += diffcore-delta.o -LIB_OBJS += diffcore-order.o -LIB_OBJS += diffcore-pickaxe.o -LIB_OBJS += diffcore-rename.o -LIB_OBJS += diff-delta.o -LIB_OBJS += diff-lib.o -LIB_OBJS += diff-no-index.o -LIB_OBJS += diff.o -LIB_OBJS += dir.o -LIB_OBJS += editor.o -LIB_OBJS += entry.o -LIB_OBJS += environment.o LIB_OBJS += exec_cmd.o -LIB_OBJS += fsck.o -LIB_OBJS += graph.o -LIB_OBJS += grep.o -LIB_OBJS += hash.o LIB_OBJS += help.o -LIB_OBJS += ident.o LIB_OBJS += levenshtein.o -LIB_OBJS += list-objects.o -LIB_OBJS += ll-merge.o -LIB_OBJS += lockfile.o -LIB_OBJS += log-tree.o -LIB_OBJS += mailmap.o -LIB_OBJS += match-trees.o -LIB_OBJS += merge-file.o -LIB_OBJS += merge-recursive.o -LIB_OBJS += name-hash.o -LIB_OBJS += object.o -LIB_OBJS += pack-check.o -LIB_OBJS += pack-refs.o -LIB_OBJS += pack-revindex.o -LIB_OBJS += pack-write.o -LIB_OBJS += pager.o LIB_OBJS += parse-options.o -LIB_OBJS += patch-delta.o -LIB_OBJS += patch-ids.o LIB_OBJS += path.o -LIB_OBJS += pkt-line.o -LIB_OBJS += preload-index.o -LIB_OBJS += pretty.o -LIB_OBJS += progress.o -LIB_OBJS += quote.o -LIB_OBJS += reachable.o -LIB_OBJS += read-cache.o -LIB_OBJS += reflog-walk.o -LIB_OBJS += refs.o -LIB_OBJS += remote.o -LIB_OBJS += rerere.o -LIB_OBJS += revision.o LIB_OBJS += run-command.o -LIB_OBJS += server-info.o -LIB_OBJS += setup.o -LIB_OBJS += sha1-lookup.o -LIB_OBJS += sha1_file.o -LIB_OBJS += sha1_name.o -LIB_OBJS += shallow.o -LIB_OBJS += sideband.o -LIB_OBJS += sigchain.o +LIB_OBJS += quote.o LIB_OBJS += strbuf.o -LIB_OBJS += string-list.o -LIB_OBJS += symlinks.o -LIB_OBJS += tag.o -LIB_OBJS += trace.o -LIB_OBJS += transport.o -LIB_OBJS += tree-diff.o -LIB_OBJS += tree.o -LIB_OBJS += tree-walk.o -LIB_OBJS += unpack-trees.o LIB_OBJS += usage.o -LIB_OBJS += userdiff.o -LIB_OBJS += utf8.o -LIB_OBJS += walker.o LIB_OBJS += wrapper.o -LIB_OBJS += write_or_die.o -LIB_OBJS += ws.o -LIB_OBJS += wt-status.o -LIB_OBJS += xdiff-interface.o - -BUILTIN_OBJS += builtin-add.o -BUILTIN_OBJS += builtin-annotate.o -BUILTIN_OBJS += builtin-apply.o -BUILTIN_OBJS += builtin-archive.o -BUILTIN_OBJS += builtin-bisect--helper.o -BUILTIN_OBJS += builtin-blame.o -BUILTIN_OBJS += builtin-branch.o -BUILTIN_OBJS += builtin-bundle.o -BUILTIN_OBJS += builtin-cat-file.o -BUILTIN_OBJS += builtin-check-attr.o -BUILTIN_OBJS += builtin-check-ref-format.o -BUILTIN_OBJS += builtin-checkout-index.o -BUILTIN_OBJS += builtin-checkout.o -BUILTIN_OBJS += builtin-clean.o -BUILTIN_OBJS += builtin-clone.o -BUILTIN_OBJS += builtin-commit-tree.o -BUILTIN_OBJS += builtin-commit.o -BUILTIN_OBJS += builtin-config.o -BUILTIN_OBJS += builtin-count-objects.o -BUILTIN_OBJS += builtin-describe.o -BUILTIN_OBJS += builtin-diff-files.o -BUILTIN_OBJS += builtin-diff-index.o -BUILTIN_OBJS += builtin-diff-tree.o -BUILTIN_OBJS += builtin-diff.o -BUILTIN_OBJS += builtin-fast-export.o -BUILTIN_OBJS += builtin-fetch--tool.o -BUILTIN_OBJS += builtin-fetch-pack.o -BUILTIN_OBJS += builtin-fetch.o -BUILTIN_OBJS += builtin-fmt-merge-msg.o -BUILTIN_OBJS += builtin-for-each-ref.o -BUILTIN_OBJS += builtin-fsck.o -BUILTIN_OBJS += builtin-gc.o -BUILTIN_OBJS += builtin-grep.o + BUILTIN_OBJS += builtin-help.o -BUILTIN_OBJS += builtin-init-db.o -BUILTIN_OBJS += builtin-log.o -BUILTIN_OBJS += builtin-ls-files.o -BUILTIN_OBJS += builtin-ls-remote.o -BUILTIN_OBJS += builtin-ls-tree.o -BUILTIN_OBJS += builtin-mailinfo.o -BUILTIN_OBJS += builtin-mailsplit.o -BUILTIN_OBJS += builtin-merge.o -BUILTIN_OBJS += builtin-merge-base.o -BUILTIN_OBJS += builtin-merge-file.o -BUILTIN_OBJS += builtin-merge-ours.o -BUILTIN_OBJS += builtin-merge-recursive.o -BUILTIN_OBJS += builtin-mv.o -BUILTIN_OBJS += builtin-name-rev.o -BUILTIN_OBJS += builtin-pack-objects.o -BUILTIN_OBJS += builtin-pack-refs.o -BUILTIN_OBJS += builtin-prune-packed.o -BUILTIN_OBJS += builtin-prune.o -BUILTIN_OBJS += builtin-push.o -BUILTIN_OBJS += builtin-read-tree.o -BUILTIN_OBJS += builtin-receive-pack.o -BUILTIN_OBJS += builtin-reflog.o -BUILTIN_OBJS += builtin-remote.o -BUILTIN_OBJS += builtin-rerere.o -BUILTIN_OBJS += builtin-reset.o -BUILTIN_OBJS += builtin-rev-list.o -BUILTIN_OBJS += builtin-rev-parse.o -BUILTIN_OBJS += builtin-revert.o -BUILTIN_OBJS += builtin-rm.o -BUILTIN_OBJS += builtin-send-pack.o -BUILTIN_OBJS += builtin-shortlog.o -BUILTIN_OBJS += builtin-show-branch.o -BUILTIN_OBJS += builtin-show-ref.o -BUILTIN_OBJS += builtin-stripspace.o -BUILTIN_OBJS += builtin-symbolic-ref.o -BUILTIN_OBJS += builtin-tag.o -BUILTIN_OBJS += builtin-tar-tree.o -BUILTIN_OBJS += builtin-unpack-objects.o -BUILTIN_OBJS += builtin-update-index.o -BUILTIN_OBJS += builtin-update-ref.o -BUILTIN_OBJS += builtin-upload-archive.o -BUILTIN_OBJS += builtin-verify-pack.o -BUILTIN_OBJS += builtin-verify-tag.o -BUILTIN_OBJS += builtin-write-tree.o - -GITLIBS = $(LIB_FILE) $(XDIFF_LIB) +BUILTIN_OBJS += builtin-top.o + +PERFLIBS = $(LIB_FILE) EXTLIBS = # @@ -625,221 +336,6 @@ EXTLIBS = # because maintaining the nesting to match is a pain. If # we had "elif" things would have been much nicer... -ifeq ($(uname_S),Linux) - NO_STRLCPY = YesPlease - THREADED_DELTA_SEARCH = YesPlease -endif -ifeq ($(uname_S),GNU/kFreeBSD) - NO_STRLCPY = YesPlease - THREADED_DELTA_SEARCH = YesPlease -endif -ifeq ($(uname_S),UnixWare) - CC = cc - NEEDS_SOCKET = YesPlease - NEEDS_NSL = YesPlease - NEEDS_SSL_WITH_CRYPTO = YesPlease - NEEDS_LIBICONV = YesPlease - SHELL_PATH = /usr/local/bin/bash - NO_IPV6 = YesPlease - NO_HSTRERROR = YesPlease - BASIC_CFLAGS += -Kthread - BASIC_CFLAGS += -I/usr/local/include - BASIC_LDFLAGS += -L/usr/local/lib - INSTALL = ginstall - TAR = gtar - NO_STRCASESTR = YesPlease - NO_MEMMEM = YesPlease -endif -ifeq ($(uname_S),SCO_SV) - ifeq ($(uname_R),3.2) - CFLAGS = -O2 - endif - ifeq ($(uname_R),5) - CC = cc - BASIC_CFLAGS += -Kthread - endif - NEEDS_SOCKET = YesPlease - NEEDS_NSL = YesPlease - NEEDS_SSL_WITH_CRYPTO = YesPlease - NEEDS_LIBICONV = YesPlease - SHELL_PATH = /usr/bin/bash - NO_IPV6 = YesPlease - NO_HSTRERROR = YesPlease - BASIC_CFLAGS += -I/usr/local/include - BASIC_LDFLAGS += -L/usr/local/lib - NO_STRCASESTR = YesPlease - NO_MEMMEM = YesPlease - INSTALL = ginstall - TAR = gtar -endif -ifeq ($(uname_S),Darwin) - NEEDS_SSL_WITH_CRYPTO = YesPlease - NEEDS_LIBICONV = YesPlease - ifeq ($(shell expr "$(uname_R)" : '[15678]\.'),2) - OLD_ICONV = UnfortunatelyYes - endif - ifeq ($(shell expr "$(uname_R)" : '[15]\.'),2) - NO_STRLCPY = YesPlease - endif - NO_MEMMEM = YesPlease - THREADED_DELTA_SEARCH = YesPlease - USE_ST_TIMESPEC = YesPlease -endif -ifeq ($(uname_S),SunOS) - NEEDS_SOCKET = YesPlease - NEEDS_NSL = YesPlease - SHELL_PATH = /bin/bash - NO_STRCASESTR = YesPlease - NO_MEMMEM = YesPlease - NO_HSTRERROR = YesPlease - NO_MKDTEMP = YesPlease - OLD_ICONV = UnfortunatelyYes - ifeq ($(uname_R),5.8) - NO_UNSETENV = YesPlease - NO_SETENV = YesPlease - NO_C99_FORMAT = YesPlease - NO_STRTOUMAX = YesPlease - endif - ifeq ($(uname_R),5.9) - NO_UNSETENV = YesPlease - NO_SETENV = YesPlease - NO_C99_FORMAT = YesPlease - NO_STRTOUMAX = YesPlease - endif - INSTALL = ginstall - TAR = gtar - BASIC_CFLAGS += -D__EXTENSIONS__ -endif -ifeq ($(uname_O),Cygwin) - NO_D_TYPE_IN_DIRENT = YesPlease - NO_D_INO_IN_DIRENT = YesPlease - NO_STRCASESTR = YesPlease - NO_MEMMEM = YesPlease - NO_SYMLINK_HEAD = YesPlease - NEEDS_LIBICONV = YesPlease - NO_FAST_WORKING_DIRECTORY = UnfortunatelyYes - NO_TRUSTABLE_FILEMODE = UnfortunatelyYes - OLD_ICONV = UnfortunatelyYes - # There are conflicting reports about this. - # On some boxes NO_MMAP is needed, and not so elsewhere. - # Try commenting this out if you suspect MMAP is more efficient - NO_MMAP = YesPlease - NO_IPV6 = YesPlease - X = .exe -endif -ifeq ($(uname_S),FreeBSD) - NEEDS_LIBICONV = YesPlease - NO_MEMMEM = YesPlease - BASIC_CFLAGS += -I/usr/local/include - BASIC_LDFLAGS += -L/usr/local/lib - DIR_HAS_BSD_GROUP_SEMANTICS = YesPlease - USE_ST_TIMESPEC = YesPlease - THREADED_DELTA_SEARCH = YesPlease - ifeq ($(shell expr "$(uname_R)" : '4\.'),2) - PTHREAD_LIBS = -pthread - NO_UINTMAX_T = YesPlease - NO_STRTOUMAX = YesPlease - endif -endif -ifeq ($(uname_S),OpenBSD) - NO_STRCASESTR = YesPlease - NO_MEMMEM = YesPlease - NEEDS_LIBICONV = YesPlease - BASIC_CFLAGS += -I/usr/local/include - BASIC_LDFLAGS += -L/usr/local/lib - THREADED_DELTA_SEARCH = YesPlease -endif -ifeq ($(uname_S),NetBSD) - ifeq ($(shell expr "$(uname_R)" : '[01]\.'),2) - NEEDS_LIBICONV = YesPlease - endif - BASIC_CFLAGS += -I/usr/pkg/include - BASIC_LDFLAGS += -L/usr/pkg/lib $(CC_LD_DYNPATH)/usr/pkg/lib - THREADED_DELTA_SEARCH = YesPlease -endif -ifeq ($(uname_S),AIX) - NO_STRCASESTR=YesPlease - NO_MEMMEM = YesPlease - NO_MKDTEMP = YesPlease - NO_STRLCPY = YesPlease - NO_NSEC = YesPlease - FREAD_READS_DIRECTORIES = UnfortunatelyYes - INTERNAL_QSORT = UnfortunatelyYes - NEEDS_LIBICONV=YesPlease - BASIC_CFLAGS += -D_LARGE_FILES - ifneq ($(shell expr "$(uname_V)" : '[1234]'),1) - THREADED_DELTA_SEARCH = YesPlease - else - NO_PTHREADS = YesPlease - endif -endif -ifeq ($(uname_S),GNU) - # GNU/Hurd - NO_STRLCPY=YesPlease -endif -ifeq ($(uname_S),IRIX64) - NO_IPV6=YesPlease - NO_SETENV=YesPlease - NO_STRCASESTR=YesPlease - NO_MEMMEM = YesPlease - NO_STRLCPY = YesPlease - NO_SOCKADDR_STORAGE=YesPlease - SHELL_PATH=/usr/gnu/bin/bash - BASIC_CFLAGS += -DPATH_MAX=1024 - # for now, build 32-bit version - BASIC_LDFLAGS += -L/usr/lib32 -endif -ifeq ($(uname_S),HP-UX) - NO_IPV6=YesPlease - NO_SETENV=YesPlease - NO_STRCASESTR=YesPlease - NO_MEMMEM = YesPlease - NO_STRLCPY = YesPlease - NO_MKDTEMP = YesPlease - NO_UNSETENV = YesPlease - NO_HSTRERROR = YesPlease - NO_SYS_SELECT_H = YesPlease - SNPRINTF_RETURNS_BOGUS = YesPlease -endif -ifneq (,$(findstring CYGWIN,$(uname_S))) - COMPAT_OBJS += compat/cygwin.o -endif -ifneq (,$(findstring MINGW,$(uname_S))) - NO_PREAD = YesPlease - NO_OPENSSL = YesPlease - NO_CURL = YesPlease - NO_SYMLINK_HEAD = YesPlease - NO_IPV6 = YesPlease - NO_SETENV = YesPlease - NO_UNSETENV = YesPlease - NO_STRCASESTR = YesPlease - NO_STRLCPY = YesPlease - NO_MEMMEM = YesPlease - NO_PTHREADS = YesPlease - NEEDS_LIBICONV = YesPlease - OLD_ICONV = YesPlease - NO_C99_FORMAT = YesPlease - NO_STRTOUMAX = YesPlease - NO_MKDTEMP = YesPlease - SNPRINTF_RETURNS_BOGUS = YesPlease - NO_SVN_TESTS = YesPlease - NO_PERL_MAKEMAKER = YesPlease - RUNTIME_PREFIX = YesPlease - NO_POSIX_ONLY_PROGRAMS = YesPlease - NO_ST_BLOCKS_IN_STRUCT_STAT = YesPlease - NO_NSEC = YesPlease - USE_WIN32_MMAP = YesPlease - COMPAT_CFLAGS += -D__USE_MINGW_ACCESS -DNOGDI -Icompat -Icompat/regex -Icompat/fnmatch - COMPAT_CFLAGS += -DSNPRINTF_SIZE_CORR=1 - COMPAT_CFLAGS += -DSTRIP_EXTENSION=\".exe\" - COMPAT_OBJS += compat/mingw.o compat/fnmatch/fnmatch.o compat/regex/regex.o compat/winansi.o - EXTLIBS += -lws2_32 - X = .exe -endif -ifneq (,$(findstring arm,$(uname_M))) - ARM_SHA1 = YesPlease -endif - -include config.mak.autogen -include config.mak @@ -869,72 +365,12 @@ ifndef CC_LD_DYNPATH endif endif -ifdef NO_CURL - BASIC_CFLAGS += -DNO_CURL -else - ifdef CURLDIR - # Try "-Wl,-rpath=$(CURLDIR)/$(lib)" in such a case. - BASIC_CFLAGS += -I$(CURLDIR)/include - CURL_LIBCURL = -L$(CURLDIR)/$(lib) $(CC_LD_DYNPATH)$(CURLDIR)/$(lib) -lcurl - else - CURL_LIBCURL = -lcurl - endif - BUILTIN_OBJS += builtin-http-fetch.o - EXTLIBS += $(CURL_LIBCURL) - LIB_OBJS += http.o http-walker.o - curl_check := $(shell (echo 070908; curl-config --vernum) | sort -r | sed -ne 2p) - ifeq "$(curl_check)" "070908" - ifndef NO_EXPAT - PROGRAMS += git-http-push$X - endif - endif - ifndef NO_EXPAT - ifdef EXPATDIR - BASIC_CFLAGS += -I$(EXPATDIR)/include - EXPAT_LIBEXPAT = -L$(EXPATDIR)/$(lib) $(CC_LD_DYNPATH)$(EXPATDIR)/$(lib) -lexpat - else - EXPAT_LIBEXPAT = -lexpat - endif - endif -endif - ifdef ZLIB_PATH BASIC_CFLAGS += -I$(ZLIB_PATH)/include EXTLIBS += -L$(ZLIB_PATH)/$(lib) $(CC_LD_DYNPATH)$(ZLIB_PATH)/$(lib) endif EXTLIBS += -lz -ifndef NO_POSIX_ONLY_PROGRAMS - PROGRAMS += git-daemon$X - PROGRAMS += git-imap-send$X -endif -ifndef NO_OPENSSL - OPENSSL_LIBSSL = -lssl - ifdef OPENSSLDIR - BASIC_CFLAGS += -I$(OPENSSLDIR)/include - OPENSSL_LINK = -L$(OPENSSLDIR)/$(lib) $(CC_LD_DYNPATH)$(OPENSSLDIR)/$(lib) - else - OPENSSL_LINK = - endif -else - BASIC_CFLAGS += -DNO_OPENSSL - MOZILLA_SHA1 = 1 - OPENSSL_LIBSSL = -endif -ifdef NEEDS_SSL_WITH_CRYPTO - LIB_4_CRYPTO = $(OPENSSL_LINK) -lcrypto -lssl -else - LIB_4_CRYPTO = $(OPENSSL_LINK) -lcrypto -endif -ifdef NEEDS_LIBICONV - ifdef ICONVDIR - BASIC_CFLAGS += -I$(ICONVDIR)/include - ICONV_LINK = -L$(ICONVDIR)/$(lib) $(CC_LD_DYNPATH)$(ICONVDIR)/$(lib) - else - ICONV_LINK = - endif - EXTLIBS += $(ICONV_LINK) -liconv -endif ifdef NEEDS_SOCKET EXTLIBS += -lsocket endif @@ -977,10 +413,6 @@ ifdef NO_STRCASESTR COMPAT_CFLAGS += -DNO_STRCASESTR COMPAT_OBJS += compat/strcasestr.o endif -ifdef NO_STRLCPY - COMPAT_CFLAGS += -DNO_STRLCPY - COMPAT_OBJS += compat/strlcpy.o -endif ifdef NO_STRTOUMAX COMPAT_CFLAGS += -DNO_STRTOUMAX COMPAT_OBJS += compat/strtoumax.o @@ -1090,17 +522,6 @@ ifdef RUNTIME_PREFIX COMPAT_CFLAGS += -DRUNTIME_PREFIX endif -ifdef NO_PTHREADS - THREADED_DELTA_SEARCH = - BASIC_CFLAGS += -DNO_PTHREADS -else - EXTLIBS += $(PTHREAD_LIBS) -endif - -ifdef THREADED_DELTA_SEARCH - BASIC_CFLAGS += -DTHREADED_DELTA_SEARCH - LIB_OBJS += thread-utils.o -endif ifdef DIR_HAS_BSD_GROUP_SEMANTICS COMPAT_CFLAGS += -DDIR_HAS_BSD_GROUP_SEMANTICS endif @@ -1148,14 +569,14 @@ endif # Shell quote (do not use $(call) to accommodate ancient setups); SHA1_HEADER_SQ = $(subst ','\'',$(SHA1_HEADER)) -ETC_GITCONFIG_SQ = $(subst ','\'',$(ETC_GITCONFIG)) +ETC_PERFCONFIG_SQ = $(subst ','\'',$(ETC_PERFCONFIG)) DESTDIR_SQ = $(subst ','\'',$(DESTDIR)) bindir_SQ = $(subst ','\'',$(bindir)) bindir_relative_SQ = $(subst ','\'',$(bindir_relative)) mandir_SQ = $(subst ','\'',$(mandir)) infodir_SQ = $(subst ','\'',$(infodir)) -gitexecdir_SQ = $(subst ','\'',$(gitexecdir)) +perfexecdir_SQ = $(subst ','\'',$(perfexecdir)) template_dir_SQ = $(subst ','\'',$(template_dir)) htmldir_SQ = $(subst ','\'',$(htmldir)) prefix_SQ = $(subst ','\'',$(prefix)) @@ -1164,7 +585,7 @@ SHELL_PATH_SQ = $(subst ','\'',$(SHELL_PATH)) PERL_PATH_SQ = $(subst ','\'',$(PERL_PATH)) TCLTK_PATH_SQ = $(subst ','\'',$(TCLTK_PATH)) -LIBS = $(GITLIBS) $(EXTLIBS) +LIBS = $(PERFLIBS) $(EXTLIBS) BASIC_CFLAGS += -DSHA1_HEADER='$(SHA1_HEADER_SQ)' \ $(COMPAT_CFLAGS) @@ -1180,15 +601,15 @@ export TAR INSTALL DESTDIR SHELL_PATH SHELL = $(SHELL_PATH) -all:: shell_compatibility_test $(ALL_PROGRAMS) $(BUILT_INS) $(OTHER_PROGRAMS) GIT-BUILD-OPTIONS +all:: shell_compatibility_test $(ALL_PROGRAMS) $(BUILT_INS) $(OTHER_PROGRAMS) PERF-BUILD-OPTIONS ifneq (,$X) - $(foreach p,$(patsubst %$X,%,$(filter %$X,$(ALL_PROGRAMS) $(BUILT_INS) git$X)), test '$p' -ef '$p$X' || $(RM) '$p';) + $(foreach p,$(patsubst %$X,%,$(filter %$X,$(ALL_PROGRAMS) $(BUILT_INS) perf$X)), test '$p' -ef '$p$X' || $(RM) '$p';) endif all:: ifndef NO_TCLTK - $(QUIET_SUBDIR0)git-gui $(QUIET_SUBDIR1) gitexecdir='$(gitexec_instdir_SQ)' all - $(QUIET_SUBDIR0)gitk-git $(QUIET_SUBDIR1) all + $(QUIET_SUBDIR0)perf-gui $(QUIET_SUBDIR1) perfexecdir='$(perfexec_instdir_SQ)' all + $(QUIET_SUBDIR0)perfk-perf $(QUIET_SUBDIR1) all endif ifndef NO_PERL $(QUIET_SUBDIR0)perl $(QUIET_SUBDIR1) PERL_PATH='$(PERL_PATH_SQ)' prefix='$(prefix_SQ)' all @@ -1200,33 +621,33 @@ please_set_SHELL_PATH_to_a_more_modern_shell: shell_compatibility_test: please_set_SHELL_PATH_to_a_more_modern_shell -strip: $(PROGRAMS) git$X - $(STRIP) $(STRIP_OPTS) $(PROGRAMS) git$X +strip: $(PROGRAMS) perf$X + $(STRIP) $(STRIP_OPTS) $(PROGRAMS) perf$X -git.o: git.c common-cmds.h GIT-CFLAGS - $(QUIET_CC)$(CC) -DGIT_VERSION='"$(GIT_VERSION)"' \ - '-DGIT_HTML_PATH="$(htmldir_SQ)"' \ +perf.o: perf.c common-cmds.h PERF-CFLAGS + $(QUIET_CC)$(CC) -DPERF_VERSION='"$(PERF_VERSION)"' \ + '-DPERF_HTML_PATH="$(htmldir_SQ)"' \ $(ALL_CFLAGS) -c $(filter %.c,$^) -git$X: git.o $(BUILTIN_OBJS) $(GITLIBS) - $(QUIET_LINK)$(CC) $(ALL_CFLAGS) -o $@ git.o \ +perf$X: perf.o $(BUILTIN_OBJS) $(PERFLIBS) + $(QUIET_LINK)$(CC) $(ALL_CFLAGS) -o $@ perf.o \ $(BUILTIN_OBJS) $(ALL_LDFLAGS) $(LIBS) -builtin-help.o: builtin-help.c common-cmds.h GIT-CFLAGS +builtin-help.o: builtin-help.c common-cmds.h PERF-CFLAGS $(QUIET_CC)$(CC) -o $*.o -c $(ALL_CFLAGS) \ - '-DGIT_HTML_PATH="$(htmldir_SQ)"' \ - '-DGIT_MAN_PATH="$(mandir_SQ)"' \ - '-DGIT_INFO_PATH="$(infodir_SQ)"' $< + '-DPERF_HTML_PATH="$(htmldir_SQ)"' \ + '-DPERF_MAN_PATH="$(mandir_SQ)"' \ + '-DPERF_INFO_PATH="$(infodir_SQ)"' $< -$(BUILT_INS): git$X +$(BUILT_INS): perf$X $(QUIET_BUILT_IN)$(RM) $@ && \ - ln git$X $@ 2>/dev/null || \ - ln -s git$X $@ 2>/dev/null || \ - cp git$X $@ + ln perf$X $@ 2>/dev/null || \ + ln -s perf$X $@ 2>/dev/null || \ + cp perf$X $@ common-cmds.h: ./generate-cmdlist.sh command-list.txt -common-cmds.h: $(wildcard Documentation/git-*.txt) +common-cmds.h: $(wildcard Documentation/perf-*.txt) $(QUIET_GEN)./generate-cmdlist.sh > $@+ && mv $@+ $@ $(patsubst %.sh,%,$(SCRIPT_SH)) : % : %.sh @@ -1234,152 +655,55 @@ $(patsubst %.sh,%,$(SCRIPT_SH)) : % : %.sh sed -e '1s|#!.*/sh|#!$(SHELL_PATH_SQ)|' \ -e 's|@SHELL_PATH@|$(SHELL_PATH_SQ)|' \ -e 's|@@PERL@@|$(PERL_PATH_SQ)|g' \ - -e 's/@@GIT_VERSION@@/$(GIT_VERSION)/g' \ + -e 's/@@PERF_VERSION@@/$(PERF_VERSION)/g' \ -e 's/@@NO_CURL@@/$(NO_CURL)/g' \ $@.sh >$@+ && \ chmod +x $@+ && \ mv $@+ $@ -ifndef NO_PERL -$(patsubst %.perl,%,$(SCRIPT_PERL)): perl/perl.mak - -perl/perl.mak: GIT-CFLAGS perl/Makefile perl/Makefile.PL - $(QUIET_SUBDIR0)perl $(QUIET_SUBDIR1) PERL_PATH='$(PERL_PATH_SQ)' prefix='$(prefix_SQ)' $(@F) - -$(patsubst %.perl,%,$(SCRIPT_PERL)): % : %.perl - $(QUIET_GEN)$(RM) $@ $@+ && \ - INSTLIBDIR=`MAKEFLAGS= $(MAKE) -C perl -s --no-print-directory instlibdir` && \ - sed -e '1{' \ - -e ' s|#!.*perl|#!$(PERL_PATH_SQ)|' \ - -e ' h' \ - -e ' s=.*=use lib (split(/:/, $$ENV{GITPERLLIB} || "@@INSTLIBDIR@@"));=' \ - -e ' H' \ - -e ' x' \ - -e '}' \ - -e 's|@@INSTLIBDIR@@|'"$$INSTLIBDIR"'|g' \ - -e 's/@@GIT_VERSION@@/$(GIT_VERSION)/g' \ - $@.perl >$@+ && \ - chmod +x $@+ && \ - mv $@+ $@ - -gitweb/gitweb.cgi: gitweb/gitweb.perl - $(QUIET_GEN)$(RM) $@ $@+ && \ - sed -e '1s|#!.*perl|#!$(PERL_PATH_SQ)|' \ - -e 's|++GIT_VERSION++|$(GIT_VERSION)|g' \ - -e 's|++GIT_BINDIR++|$(bindir)|g' \ - -e 's|++GITWEB_CONFIG++|$(GITWEB_CONFIG)|g' \ - -e 's|++GITWEB_CONFIG_SYSTEM++|$(GITWEB_CONFIG_SYSTEM)|g' \ - -e 's|++GITWEB_HOME_LINK_STR++|$(GITWEB_HOME_LINK_STR)|g' \ - -e 's|++GITWEB_SITENAME++|$(GITWEB_SITENAME)|g' \ - -e 's|++GITWEB_PROJECTROOT++|$(GITWEB_PROJECTROOT)|g' \ - -e 's|"++GITWEB_PROJECT_MAXDEPTH++"|$(GITWEB_PROJECT_MAXDEPTH)|g' \ - -e 's|++GITWEB_EXPORT_OK++|$(GITWEB_EXPORT_OK)|g' \ - -e 's|++GITWEB_STRICT_EXPORT++|$(GITWEB_STRICT_EXPORT)|g' \ - -e 's|++GITWEB_BASE_URL++|$(GITWEB_BASE_URL)|g' \ - -e 's|++GITWEB_LIST++|$(GITWEB_LIST)|g' \ - -e 's|++GITWEB_HOMETEXT++|$(GITWEB_HOMETEXT)|g' \ - -e 's|++GITWEB_CSS++|$(GITWEB_CSS)|g' \ - -e 's|++GITWEB_LOGO++|$(GITWEB_LOGO)|g' \ - -e 's|++GITWEB_FAVICON++|$(GITWEB_FAVICON)|g' \ - -e 's|++GITWEB_SITE_HEADER++|$(GITWEB_SITE_HEADER)|g' \ - -e 's|++GITWEB_SITE_FOOTER++|$(GITWEB_SITE_FOOTER)|g' \ - $< >$@+ && \ - chmod +x $@+ && \ - mv $@+ $@ - -git-instaweb: git-instaweb.sh gitweb/gitweb.cgi gitweb/gitweb.css - $(QUIET_GEN)$(RM) $@ $@+ && \ - sed -e '1s|#!.*/sh|#!$(SHELL_PATH_SQ)|' \ - -e 's/@@GIT_VERSION@@/$(GIT_VERSION)/g' \ - -e 's/@@NO_CURL@@/$(NO_CURL)/g' \ - -e '/@@GITWEB_CGI@@/r gitweb/gitweb.cgi' \ - -e '/@@GITWEB_CGI@@/d' \ - -e '/@@GITWEB_CSS@@/r gitweb/gitweb.css' \ - -e '/@@GITWEB_CSS@@/d' \ - -e 's|@@PERL@@|$(PERL_PATH_SQ)|g' \ - $@.sh > $@+ && \ - chmod +x $@+ && \ - mv $@+ $@ -else # NO_PERL -$(patsubst %.perl,%,$(SCRIPT_PERL)) git-instaweb: % : unimplemented.sh - $(QUIET_GEN)$(RM) $@ $@+ && \ - sed -e '1s|#!.*/sh|#!$(SHELL_PATH_SQ)|' \ - -e 's|@@REASON@@|NO_PERL=$(NO_PERL)|g' \ - unimplemented.sh >$@+ && \ - chmod +x $@+ && \ - mv $@+ $@ -endif # NO_PERL - configure: configure.ac $(QUIET_GEN)$(RM) $@ $<+ && \ - sed -e 's/@@GIT_VERSION@@/$(GIT_VERSION)/g' \ + sed -e 's/@@PERF_VERSION@@/$(PERF_VERSION)/g' \ $< > $<+ && \ autoconf -o $@ $<+ && \ $(RM) $<+ -# These can record GIT_VERSION -git.o git.spec \ +# These can record PERF_VERSION +perf.o perf.spec \ $(patsubst %.sh,%,$(SCRIPT_SH)) \ $(patsubst %.perl,%,$(SCRIPT_PERL)) \ - : GIT-VERSION-FILE + : PERF-VERSION-FILE -%.o: %.c GIT-CFLAGS +%.o: %.c PERF-CFLAGS $(QUIET_CC)$(CC) -o $*.o -c $(ALL_CFLAGS) $< -%.s: %.c GIT-CFLAGS +%.s: %.c PERF-CFLAGS $(QUIET_CC)$(CC) -S $(ALL_CFLAGS) $< %.o: %.S $(QUIET_CC)$(CC) -o $*.o -c $(ALL_CFLAGS) $< -exec_cmd.o: exec_cmd.c GIT-CFLAGS +exec_cmd.o: exec_cmd.c PERF-CFLAGS $(QUIET_CC)$(CC) -o $*.o -c $(ALL_CFLAGS) \ - '-DGIT_EXEC_PATH="$(gitexecdir_SQ)"' \ + '-DPERF_EXEC_PATH="$(perfexecdir_SQ)"' \ '-DBINDIR="$(bindir_relative_SQ)"' \ '-DPREFIX="$(prefix_SQ)"' \ $< -builtin-init-db.o: builtin-init-db.c GIT-CFLAGS - $(QUIET_CC)$(CC) -o $*.o -c $(ALL_CFLAGS) -DDEFAULT_GIT_TEMPLATE_DIR='"$(template_dir_SQ)"' $< - -config.o: config.c GIT-CFLAGS - $(QUIET_CC)$(CC) -o $*.o -c $(ALL_CFLAGS) -DETC_GITCONFIG='"$(ETC_GITCONFIG_SQ)"' $< +builtin-init-db.o: builtin-init-db.c PERF-CFLAGS + $(QUIET_CC)$(CC) -o $*.o -c $(ALL_CFLAGS) -DDEFAULT_PERF_TEMPLATE_DIR='"$(template_dir_SQ)"' $< -http.o: http.c GIT-CFLAGS - $(QUIET_CC)$(CC) -o $*.o -c $(ALL_CFLAGS) -DGIT_USER_AGENT='"git/$(GIT_VERSION)"' $< +config.o: config.c PERF-CFLAGS + $(QUIET_CC)$(CC) -o $*.o -c $(ALL_CFLAGS) -DETC_PERFCONFIG='"$(ETC_PERFCONFIG_SQ)"' $< -ifdef NO_EXPAT -http-walker.o: http-walker.c http.h GIT-CFLAGS - $(QUIET_CC)$(CC) -o $*.o -c $(ALL_CFLAGS) -DNO_EXPAT $< -endif - -git-%$X: %.o $(GITLIBS) +perf-%$X: %.o $(PERFLIBS) $(QUIET_LINK)$(CC) $(ALL_CFLAGS) -o $@ $(ALL_LDFLAGS) $(filter %.o,$^) $(LIBS) -git-imap-send$X: imap-send.o $(GITLIBS) - $(QUIET_LINK)$(CC) $(ALL_CFLAGS) -o $@ $(ALL_LDFLAGS) $(filter %.o,$^) \ - $(LIBS) $(OPENSSL_LINK) $(OPENSSL_LIBSSL) - -http.o http-walker.o http-push.o transport.o: http.h - -git-http-push$X: revision.o http.o http-push.o $(GITLIBS) - $(QUIET_LINK)$(CC) $(ALL_CFLAGS) -o $@ $(ALL_LDFLAGS) $(filter %.o,$^) \ - $(LIBS) $(CURL_LIBCURL) $(EXPAT_LIBEXPAT) - $(LIB_OBJS) $(BUILTIN_OBJS): $(LIB_H) -$(patsubst git-%$X,%.o,$(PROGRAMS)): $(LIB_H) $(wildcard */*.h) +$(patsubst perf-%$X,%.o,$(PROGRAMS)): $(LIB_H) $(wildcard */*.h) builtin-revert.o wt-status.o: wt-status.h $(LIB_FILE): $(LIB_OBJS) $(QUIET_AR)$(RM) $@ && $(AR) rcs $@ $(LIB_OBJS) -XDIFF_OBJS=xdiff/xdiffi.o xdiff/xprepare.o xdiff/xutils.o xdiff/xemit.o \ - xdiff/xmerge.o xdiff/xpatience.o -$(XDIFF_OBJS): xdiff/xinclude.h xdiff/xmacros.h xdiff/xdiff.h xdiff/xtypes.h \ - xdiff/xutils.h xdiff/xprepare.h xdiff/xdiffi.h xdiff/xemit.h - -$(XDIFF_LIB): $(XDIFF_OBJS) - $(QUIET_AR)$(RM) $@ && $(AR) rcs $@ $(XDIFF_OBJS) - - doc: $(MAKE) -C Documentation all @@ -1409,19 +733,19 @@ cscope: ### Detect prefix changes TRACK_CFLAGS = $(subst ','\'',$(ALL_CFLAGS)):\ - $(bindir_SQ):$(gitexecdir_SQ):$(template_dir_SQ):$(prefix_SQ) + $(bindir_SQ):$(perfexecdir_SQ):$(template_dir_SQ):$(prefix_SQ) -GIT-CFLAGS: .FORCE-GIT-CFLAGS +PERF-CFLAGS: .FORCE-PERF-CFLAGS @FLAGS='$(TRACK_CFLAGS)'; \ - if test x"$$FLAGS" != x"`cat GIT-CFLAGS 2>/dev/null`" ; then \ + if test x"$$FLAGS" != x"`cat PERF-CFLAGS 2>/dev/null`" ; then \ echo 1>&2 " * new build flags or prefix"; \ - echo "$$FLAGS" >GIT-CFLAGS; \ + echo "$$FLAGS" >PERF-CFLAGS; \ fi # We need to apply sq twice, once to protect from the shell -# that runs GIT-BUILD-OPTIONS, and then again to protect it +# that runs PERF-BUILD-OPTIONS, and then again to protect it # and the first level quoting from the shell that runs "echo". -GIT-BUILD-OPTIONS: .FORCE-GIT-BUILD-OPTIONS +PERF-BUILD-OPTIONS: .FORCE-PERF-BUILD-OPTIONS @echo SHELL_PATH=\''$(subst ','\'',$(SHELL_PATH_SQ))'\' >$@ @echo TAR=\''$(subst ','\'',$(subst ','\'',$(TAR)))'\' >>$@ @echo NO_CURL=\''$(subst ','\'',$(subst ','\'',$(NO_CURL)))'\' >>$@ @@ -1431,14 +755,14 @@ GIT-BUILD-OPTIONS: .FORCE-GIT-BUILD-OPTIONS ifndef NO_TCLTK TRACK_VARS = $(subst ','\'',-DTCLTK_PATH='$(TCLTK_PATH_SQ)') -GIT-GUI-VARS: .FORCE-GIT-GUI-VARS +PERF-GUI-VARS: .FORCE-PERF-GUI-VARS @VARS='$(TRACK_VARS)'; \ if test x"$$VARS" != x"`cat $@ 2>/dev/null`" ; then \ echo 1>&2 " * new Tcl/Tk interpreter location"; \ echo "$$VARS" >$@; \ fi -.PHONY: .FORCE-GIT-GUI-VARS +.PHONY: .FORCE-PERF-GUI-VARS endif ### Testing rules @@ -1476,7 +800,7 @@ test-parse-options$X: parse-options.o .PRECIOUS: $(patsubst test-%$X,test-%.o,$(TEST_PROGRAMS)) -test-%$X: test-%.o $(GITLIBS) +test-%$X: test-%.o $(PERFLIBS) $(QUIET_LINK)$(CC) $(ALL_CFLAGS) -o $@ $(ALL_LDFLAGS) $(filter %.o,$^) $(LIBS) check-sha1:: test-sha1$X @@ -1506,40 +830,40 @@ template_instdir = $(prefix)/$(template_dir) endif export template_instdir -ifneq ($(filter /%,$(firstword $(gitexecdir))),) -gitexec_instdir = $(gitexecdir) +ifneq ($(filter /%,$(firstword $(perfexecdir))),) +perfexec_instdir = $(perfexecdir) else -gitexec_instdir = $(prefix)/$(gitexecdir) +perfexec_instdir = $(prefix)/$(perfexecdir) endif -gitexec_instdir_SQ = $(subst ','\'',$(gitexec_instdir)) -export gitexec_instdir +perfexec_instdir_SQ = $(subst ','\'',$(perfexec_instdir)) +export perfexec_instdir install: all $(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(bindir_SQ)' - $(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(gitexec_instdir_SQ)' - $(INSTALL) $(ALL_PROGRAMS) '$(DESTDIR_SQ)$(gitexec_instdir_SQ)' - $(INSTALL) git$X git-upload-pack$X git-receive-pack$X git-upload-archive$X git-shell$X git-cvsserver '$(DESTDIR_SQ)$(bindir_SQ)' + $(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(perfexec_instdir_SQ)' + $(INSTALL) $(ALL_PROGRAMS) '$(DESTDIR_SQ)$(perfexec_instdir_SQ)' + $(INSTALL) perf$X perf-upload-pack$X perf-receive-pack$X perf-upload-archive$X perf-shell$X perf-cvsserver '$(DESTDIR_SQ)$(bindir_SQ)' $(MAKE) -C templates DESTDIR='$(DESTDIR_SQ)' install $(MAKE) -C perl prefix='$(prefix_SQ)' DESTDIR='$(DESTDIR_SQ)' install ifndef NO_TCLTK - $(MAKE) -C gitk-git install - $(MAKE) -C git-gui gitexecdir='$(gitexec_instdir_SQ)' install + $(MAKE) -C perfk-perf install + $(MAKE) -C perf-gui perfexecdir='$(perfexec_instdir_SQ)' install endif ifneq (,$X) - $(foreach p,$(patsubst %$X,%,$(filter %$X,$(ALL_PROGRAMS) $(BUILT_INS) git$X)), $(RM) '$(DESTDIR_SQ)$(gitexec_instdir_SQ)/$p';) + $(foreach p,$(patsubst %$X,%,$(filter %$X,$(ALL_PROGRAMS) $(BUILT_INS) perf$X)), $(RM) '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/$p';) endif bindir=$$(cd '$(DESTDIR_SQ)$(bindir_SQ)' && pwd) && \ - execdir=$$(cd '$(DESTDIR_SQ)$(gitexec_instdir_SQ)' && pwd) && \ - { $(RM) "$$execdir/git-add$X" && \ - ln "$$bindir/git$X" "$$execdir/git-add$X" 2>/dev/null || \ - cp "$$bindir/git$X" "$$execdir/git-add$X"; } && \ - { for p in $(filter-out git-add$X,$(BUILT_INS)); do \ + execdir=$$(cd '$(DESTDIR_SQ)$(perfexec_instdir_SQ)' && pwd) && \ + { $(RM) "$$execdir/perf-add$X" && \ + ln "$$bindir/perf$X" "$$execdir/perf-add$X" 2>/dev/null || \ + cp "$$bindir/perf$X" "$$execdir/perf-add$X"; } && \ + { for p in $(filter-out perf-add$X,$(BUILT_INS)); do \ $(RM) "$$execdir/$$p" && \ - ln "$$execdir/git-add$X" "$$execdir/$$p" 2>/dev/null || \ - ln -s "git-add$X" "$$execdir/$$p" 2>/dev/null || \ - cp "$$execdir/git-add$X" "$$execdir/$$p" || exit; \ + ln "$$execdir/perf-add$X" "$$execdir/$$p" 2>/dev/null || \ + ln -s "perf-add$X" "$$execdir/$$p" 2>/dev/null || \ + cp "$$execdir/perf-add$X" "$$execdir/$$p" || exit; \ done } && \ - ./check_bindir "z$$bindir" "z$$execdir" "$$bindir/git-add$X" + ./check_bindir "z$$bindir" "z$$execdir" "$$bindir/perf-add$X" install-doc: $(MAKE) -C Documentation install @@ -1569,31 +893,31 @@ quick-install-html: ### Maintainer's dist rules -git.spec: git.spec.in - sed -e 's/@@VERSION@@/$(GIT_VERSION)/g' < $< > $@+ +perf.spec: perf.spec.in + sed -e 's/@@VERSION@@/$(PERF_VERSION)/g' < $< > $@+ mv $@+ $@ -GIT_TARNAME=git-$(GIT_VERSION) -dist: git.spec git-archive$(X) configure - ./git-archive --format=tar \ - --prefix=$(GIT_TARNAME)/ HEAD^{tree} > $(GIT_TARNAME).tar - @mkdir -p $(GIT_TARNAME) - @cp git.spec configure $(GIT_TARNAME) - @echo $(GIT_VERSION) > $(GIT_TARNAME)/version - @$(MAKE) -C git-gui TARDIR=../$(GIT_TARNAME)/git-gui dist-version - $(TAR) rf $(GIT_TARNAME).tar \ - $(GIT_TARNAME)/git.spec \ - $(GIT_TARNAME)/configure \ - $(GIT_TARNAME)/version \ - $(GIT_TARNAME)/git-gui/version - @$(RM) -r $(GIT_TARNAME) - gzip -f -9 $(GIT_TARNAME).tar +PERF_TARNAME=perf-$(PERF_VERSION) +dist: perf.spec perf-archive$(X) configure + ./perf-archive --format=tar \ + --prefix=$(PERF_TARNAME)/ HEAD^{tree} > $(PERF_TARNAME).tar + @mkdir -p $(PERF_TARNAME) + @cp perf.spec configure $(PERF_TARNAME) + @echo $(PERF_VERSION) > $(PERF_TARNAME)/version + @$(MAKE) -C perf-gui TARDIR=../$(PERF_TARNAME)/perf-gui dist-version + $(TAR) rf $(PERF_TARNAME).tar \ + $(PERF_TARNAME)/perf.spec \ + $(PERF_TARNAME)/configure \ + $(PERF_TARNAME)/version \ + $(PERF_TARNAME)/perf-gui/version + @$(RM) -r $(PERF_TARNAME) + gzip -f -9 $(PERF_TARNAME).tar rpm: dist - $(RPMBUILD) -ta $(GIT_TARNAME).tar.gz + $(RPMBUILD) -ta $(PERF_TARNAME).tar.gz -htmldocs = git-htmldocs-$(GIT_VERSION) -manpages = git-manpages-$(GIT_VERSION) +htmldocs = perf-htmldocs-$(PERF_VERSION) +manpages = perf-manpages-$(PERF_VERSION) dist-doc: $(RM) -r .doc-tmp-dir mkdir .doc-tmp-dir @@ -1618,51 +942,46 @@ distclean: clean $(RM) configure clean: - $(RM) *.o mozilla-sha1/*.o arm/*.o ppc/*.o compat/*.o xdiff/*.o \ - $(LIB_FILE) $(XDIFF_LIB) - $(RM) $(ALL_PROGRAMS) $(BUILT_INS) git$X + $(RM) *.o $(LIB_FILE) + $(RM) $(ALL_PROGRAMS) $(BUILT_INS) perf$X $(RM) $(TEST_PROGRAMS) $(RM) *.spec *.pyc *.pyo */*.pyc */*.pyo common-cmds.h TAGS tags cscope* $(RM) -r autom4te.cache $(RM) config.log config.mak.autogen config.mak.append config.status config.cache - $(RM) -r $(GIT_TARNAME) .doc-tmp-dir - $(RM) $(GIT_TARNAME).tar.gz git-core_$(GIT_VERSION)-*.tar.gz + $(RM) -r $(PERF_TARNAME) .doc-tmp-dir + $(RM) $(PERF_TARNAME).tar.gz perf-core_$(PERF_VERSION)-*.tar.gz $(RM) $(htmldocs).tar.gz $(manpages).tar.gz $(MAKE) -C Documentation/ clean -ifndef NO_PERL - $(RM) gitweb/gitweb.cgi - $(MAKE) -C perl clean -endif $(MAKE) -C templates/ clean $(MAKE) -C t/ clean ifndef NO_TCLTK - $(MAKE) -C gitk-git clean - $(MAKE) -C git-gui clean + $(MAKE) -C perfk-perf clean + $(MAKE) -C perf-gui clean endif - $(RM) GIT-VERSION-FILE GIT-CFLAGS GIT-GUI-VARS GIT-BUILD-OPTIONS + $(RM) PERF-VERSION-FILE PERF-CFLAGS PERF-GUI-VARS PERF-BUILD-OPTIONS .PHONY: all install clean strip .PHONY: shell_compatibility_test please_set_SHELL_PATH_to_a_more_modern_shell -.PHONY: .FORCE-GIT-VERSION-FILE TAGS tags cscope .FORCE-GIT-CFLAGS -.PHONY: .FORCE-GIT-BUILD-OPTIONS +.PHONY: .FORCE-PERF-VERSION-FILE TAGS tags cscope .FORCE-PERF-CFLAGS +.PHONY: .FORCE-PERF-BUILD-OPTIONS ### Check documentation # check-docs:: - @(for v in $(ALL_PROGRAMS) $(BUILT_INS) git gitk; \ + @(for v in $(ALL_PROGRAMS) $(BUILT_INS) perf perfk; \ do \ case "$$v" in \ - git-merge-octopus | git-merge-ours | git-merge-recursive | \ - git-merge-resolve | git-merge-subtree | \ - git-fsck-objects | git-init-db | \ - git-?*--?* ) continue ;; \ + perf-merge-octopus | perf-merge-ours | perf-merge-recursive | \ + perf-merge-resolve | perf-merge-subtree | \ + perf-fsck-objects | perf-init-db | \ + perf-?*--?* ) continue ;; \ esac ; \ test -f "Documentation/$$v.txt" || \ echo "no doc: $$v"; \ sed -e '/^#/d' command-list.txt | \ grep -q "^$$v[ ]" || \ case "$$v" in \ - git) ;; \ + perf) ;; \ *) echo "no link: $$v";; \ esac ; \ done; \ @@ -1670,37 +989,37 @@ check-docs:: sed -e '/^#/d' \ -e 's/[ ].*//' \ -e 's/^/listed /' command-list.txt; \ - ls -1 Documentation/git*txt | \ + ls -1 Documentation/perf*txt | \ sed -e 's|Documentation/|documented |' \ -e 's/\.txt//'; \ ) | while read how cmd; \ do \ case "$$how,$$cmd" in \ - *,git-citool | \ - *,git-gui | \ - *,git-help | \ - documented,gitattributes | \ - documented,gitignore | \ - documented,gitmodules | \ - documented,gitcli | \ - documented,git-tools | \ - documented,gitcore-tutorial | \ - documented,gitcvs-migration | \ - documented,gitdiffcore | \ - documented,gitglossary | \ - documented,githooks | \ - documented,gitrepository-layout | \ - documented,gittutorial | \ - documented,gittutorial-2 | \ + *,perf-citool | \ + *,perf-gui | \ + *,perf-help | \ + documented,perfattributes | \ + documented,perfignore | \ + documented,perfmodules | \ + documented,perfcli | \ + documented,perf-tools | \ + documented,perfcore-tutorial | \ + documented,perfcvs-migration | \ + documented,perfdiffcore | \ + documented,perfglossary | \ + documented,perfhooks | \ + documented,perfrepository-layout | \ + documented,perftutorial | \ + documented,perftutorial-2 | \ sentinel,not,matching,is,ok ) continue ;; \ esac; \ - case " $(ALL_PROGRAMS) $(BUILT_INS) git gitk " in \ + case " $(ALL_PROGRAMS) $(BUILT_INS) perf perfk " in \ *" $$cmd "*) ;; \ *) echo "removed but $$how: $$cmd" ;; \ esac; \ done ) | sort -### Make sure built-ins do not have dups and listed in git.c +### Make sure built-ins do not have dups and listed in perf.c # check-builtins:: ./check-builtins.sh diff --git a/Documentation/perf_counter/PERF-BUILD-OPTIONS b/Documentation/perf_counter/PERF-BUILD-OPTIONS new file mode 100644 index 00000000000..46d8d6ceb2f --- /dev/null +++ b/Documentation/perf_counter/PERF-BUILD-OPTIONS @@ -0,0 +1,4 @@ +SHELL_PATH='/bin/sh' +TAR='tar' +NO_CURL='' +NO_PERL='' diff --git a/Documentation/perf_counter/PERF-CFLAGS b/Documentation/perf_counter/PERF-CFLAGS new file mode 100644 index 00000000000..f24906ca688 --- /dev/null +++ b/Documentation/perf_counter/PERF-CFLAGS @@ -0,0 +1 @@ +-g -O2 -Wall -DSHA1_HEADER='' : /home/mingo/bin:libexec/perf-core:share/perf-core/templates:/home/mingo diff --git a/Documentation/perf_counter/PERF-VERSION-FILE b/Documentation/perf_counter/PERF-VERSION-FILE new file mode 100644 index 00000000000..328e244c0c8 --- /dev/null +++ b/Documentation/perf_counter/PERF-VERSION-FILE @@ -0,0 +1 @@ +PERF_VERSION = 0.0.1.PERF diff --git a/Documentation/perf_counter/PERF-VERSION-GEN b/Documentation/perf_counter/PERF-VERSION-GEN new file mode 100755 index 00000000000..c561d1538c0 --- /dev/null +++ b/Documentation/perf_counter/PERF-VERSION-GEN @@ -0,0 +1,42 @@ +#!/bin/sh + +GVF=PERF-VERSION-FILE +DEF_VER=v0.0.1.PERF + +LF=' +' + +# First see if there is a version file (included in release tarballs), +# then try git-describe, then default. +if test -f version +then + VN=$(cat version) || VN="$DEF_VER" +elif test -d .git -o -f .git && + VN=$(git describe --abbrev=4 HEAD 2>/dev/null) && + case "$VN" in + *$LF*) (exit 1) ;; + v[0-9]*) + git update-index -q --refresh + test -z "$(git diff-index --name-only HEAD --)" || + VN="$VN-dirty" ;; + esac +then + VN=$(echo "$VN" | sed -e 's/-/./g'); +else + VN="$DEF_VER" +fi + +VN=$(expr "$VN" : v*'\(.*\)') + +if test -r $GVF +then + VC=$(sed -e 's/^PERF_VERSION = //' <$GVF) +else + VC=unset +fi +test "$VN" = "$VC" || { + echo >&2 "PERF_VERSION = $VN" + echo "PERF_VERSION = $VN" >$GVF +} + + diff --git a/Documentation/perf_counter/abspath.c b/Documentation/perf_counter/abspath.c new file mode 100644 index 00000000000..649f34f8336 --- /dev/null +++ b/Documentation/perf_counter/abspath.c @@ -0,0 +1,117 @@ +#include "cache.h" + +/* + * Do not use this for inspecting *tracked* content. When path is a + * symlink to a directory, we do not want to say it is a directory when + * dealing with tracked content in the working tree. + */ +int is_directory(const char *path) +{ + struct stat st; + return (!stat(path, &st) && S_ISDIR(st.st_mode)); +} + +/* We allow "recursive" symbolic links. Only within reason, though. */ +#define MAXDEPTH 5 + +const char *make_absolute_path(const char *path) +{ + static char bufs[2][PATH_MAX + 1], *buf = bufs[0], *next_buf = bufs[1]; + char cwd[1024] = ""; + int buf_index = 1, len; + + int depth = MAXDEPTH; + char *last_elem = NULL; + struct stat st; + + if (strlcpy(buf, path, PATH_MAX) >= PATH_MAX) + die ("Too long path: %.*s", 60, path); + + while (depth--) { + if (!is_directory(buf)) { + char *last_slash = strrchr(buf, '/'); + if (last_slash) { + *last_slash = '\0'; + last_elem = xstrdup(last_slash + 1); + } else { + last_elem = xstrdup(buf); + *buf = '\0'; + } + } + + if (*buf) { + if (!*cwd && !getcwd(cwd, sizeof(cwd))) + die ("Could not get current working directory"); + + if (chdir(buf)) + die ("Could not switch to '%s'", buf); + } + if (!getcwd(buf, PATH_MAX)) + die ("Could not get current working directory"); + + if (last_elem) { + int len = strlen(buf); + if (len + strlen(last_elem) + 2 > PATH_MAX) + die ("Too long path name: '%s/%s'", + buf, last_elem); + buf[len] = '/'; + strcpy(buf + len + 1, last_elem); + free(last_elem); + last_elem = NULL; + } + + if (!lstat(buf, &st) && S_ISLNK(st.st_mode)) { + len = readlink(buf, next_buf, PATH_MAX); + if (len < 0) + die ("Invalid symlink: %s", buf); + if (PATH_MAX <= len) + die("symbolic link too long: %s", buf); + next_buf[len] = '\0'; + buf = next_buf; + buf_index = 1 - buf_index; + next_buf = bufs[buf_index]; + } else + break; + } + + if (*cwd && chdir(cwd)) + die ("Could not change back to '%s'", cwd); + + return buf; +} + +static const char *get_pwd_cwd(void) +{ + static char cwd[PATH_MAX + 1]; + char *pwd; + struct stat cwd_stat, pwd_stat; + if (getcwd(cwd, PATH_MAX) == NULL) + return NULL; + pwd = getenv("PWD"); + if (pwd && strcmp(pwd, cwd)) { + stat(cwd, &cwd_stat); + if (!stat(pwd, &pwd_stat) && + pwd_stat.st_dev == cwd_stat.st_dev && + pwd_stat.st_ino == cwd_stat.st_ino) { + strlcpy(cwd, pwd, PATH_MAX); + } + } + return cwd; +} + +const char *make_nonrelative_path(const char *path) +{ + static char buf[PATH_MAX + 1]; + + if (is_absolute_path(path)) { + if (strlcpy(buf, path, PATH_MAX) >= PATH_MAX) + die("Too long path: %.*s", 60, path); + } else { + const char *cwd = get_pwd_cwd(); + if (!cwd) + die("Cannot determine the current working directory"); + if (snprintf(buf, PATH_MAX, "%s/%s", cwd, path) >= PATH_MAX) + die("Too long path: %.*s", 60, path); + } + return buf; +} diff --git a/Documentation/perf_counter/alias.c b/Documentation/perf_counter/alias.c new file mode 100644 index 00000000000..9b3dd2b428d --- /dev/null +++ b/Documentation/perf_counter/alias.c @@ -0,0 +1,77 @@ +#include "cache.h" + +static const char *alias_key; +static char *alias_val; + +static int alias_lookup_cb(const char *k, const char *v, void *cb) +{ + if (!prefixcmp(k, "alias.") && !strcmp(k+6, alias_key)) { + if (!v) + return config_error_nonbool(k); + alias_val = strdup(v); + return 0; + } + return 0; +} + +char *alias_lookup(const char *alias) +{ + alias_key = alias; + alias_val = NULL; + perf_config(alias_lookup_cb, NULL); + return alias_val; +} + +int split_cmdline(char *cmdline, const char ***argv) +{ + int src, dst, count = 0, size = 16; + char quoted = 0; + + *argv = malloc(sizeof(char*) * size); + + /* split alias_string */ + (*argv)[count++] = cmdline; + for (src = dst = 0; cmdline[src];) { + char c = cmdline[src]; + if (!quoted && isspace(c)) { + cmdline[dst++] = 0; + while (cmdline[++src] + && isspace(cmdline[src])) + ; /* skip */ + if (count >= size) { + size += 16; + *argv = realloc(*argv, sizeof(char*) * size); + } + (*argv)[count++] = cmdline + dst; + } else if (!quoted && (c == '\'' || c == '"')) { + quoted = c; + src++; + } else if (c == quoted) { + quoted = 0; + src++; + } else { + if (c == '\\' && quoted != '\'') { + src++; + c = cmdline[src]; + if (!c) { + free(*argv); + *argv = NULL; + return error("cmdline ends with \\"); + } + } + cmdline[dst++] = c; + src++; + } + } + + cmdline[dst] = 0; + + if (quoted) { + free(*argv); + *argv = NULL; + return error("unclosed quote"); + } + + return count; +} + diff --git a/Documentation/perf_counter/builtin-help.c b/Documentation/perf_counter/builtin-help.c new file mode 100644 index 00000000000..125fcc2f490 --- /dev/null +++ b/Documentation/perf_counter/builtin-help.c @@ -0,0 +1,463 @@ +/* + * builtin-help.c + * + * Builtin help command + */ +#include "cache.h" +#include "builtin.h" +#include "exec_cmd.h" +#include "common-cmds.h" +#include "parse-options.h" +#include "run-command.h" +#include "help.h" + +static struct man_viewer_list { + struct man_viewer_list *next; + char name[FLEX_ARRAY]; +} *man_viewer_list; + +static struct man_viewer_info_list { + struct man_viewer_info_list *next; + const char *info; + char name[FLEX_ARRAY]; +} *man_viewer_info_list; + +enum help_format { + HELP_FORMAT_MAN, + HELP_FORMAT_INFO, + HELP_FORMAT_WEB, +}; + +static int show_all = 0; +static enum help_format help_format = HELP_FORMAT_MAN; +static struct option builtin_help_options[] = { + OPT_BOOLEAN('a', "all", &show_all, "print all available commands"), + OPT_SET_INT('m', "man", &help_format, "show man page", HELP_FORMAT_MAN), + OPT_SET_INT('w', "web", &help_format, "show manual in web browser", + HELP_FORMAT_WEB), + OPT_SET_INT('i', "info", &help_format, "show info page", + HELP_FORMAT_INFO), + OPT_END(), +}; + +static const char * const builtin_help_usage[] = { + "perf help [--all] [--man|--web|--info] [command]", + NULL +}; + +static enum help_format parse_help_format(const char *format) +{ + if (!strcmp(format, "man")) + return HELP_FORMAT_MAN; + if (!strcmp(format, "info")) + return HELP_FORMAT_INFO; + if (!strcmp(format, "web") || !strcmp(format, "html")) + return HELP_FORMAT_WEB; + die("unrecognized help format '%s'", format); +} + +static const char *get_man_viewer_info(const char *name) +{ + struct man_viewer_info_list *viewer; + + for (viewer = man_viewer_info_list; viewer; viewer = viewer->next) + { + if (!strcasecmp(name, viewer->name)) + return viewer->info; + } + return NULL; +} + +static int check_emacsclient_version(void) +{ + struct strbuf buffer = STRBUF_INIT; + struct child_process ec_process; + const char *argv_ec[] = { "emacsclient", "--version", NULL }; + int version; + + /* emacsclient prints its version number on stderr */ + memset(&ec_process, 0, sizeof(ec_process)); + ec_process.argv = argv_ec; + ec_process.err = -1; + ec_process.stdout_to_stderr = 1; + if (start_command(&ec_process)) { + fprintf(stderr, "Failed to start emacsclient.\n"); + return -1; + } + strbuf_read(&buffer, ec_process.err, 20); + close(ec_process.err); + + /* + * Don't bother checking return value, because "emacsclient --version" + * seems to always exits with code 1. + */ + finish_command(&ec_process); + + if (prefixcmp(buffer.buf, "emacsclient")) { + fprintf(stderr, "Failed to parse emacsclient version.\n"); + strbuf_release(&buffer); + return -1; + } + + strbuf_remove(&buffer, 0, strlen("emacsclient")); + version = atoi(buffer.buf); + + if (version < 22) { + fprintf(stderr, + "emacsclient version '%d' too old (< 22).\n", + version); + strbuf_release(&buffer); + return -1; + } + + strbuf_release(&buffer); + return 0; +} + +static void exec_woman_emacs(const char* path, const char *page) +{ + if (!check_emacsclient_version()) { + /* This works only with emacsclient version >= 22. */ + struct strbuf man_page = STRBUF_INIT; + + if (!path) + path = "emacsclient"; + strbuf_addf(&man_page, "(woman \"%s\")", page); + execlp(path, "emacsclient", "-e", man_page.buf, NULL); + warning("failed to exec '%s': %s", path, strerror(errno)); + } +} + +static void exec_man_konqueror(const char* path, const char *page) +{ + const char *display = getenv("DISPLAY"); + if (display && *display) { + struct strbuf man_page = STRBUF_INIT; + const char *filename = "kfmclient"; + + /* It's simpler to launch konqueror using kfmclient. */ + if (path) { + const char *file = strrchr(path, '/'); + if (file && !strcmp(file + 1, "konqueror")) { + char *new = strdup(path); + char *dest = strrchr(new, '/'); + + /* strlen("konqueror") == strlen("kfmclient") */ + strcpy(dest + 1, "kfmclient"); + path = new; + } + if (file) + filename = file; + } else + path = "kfmclient"; + strbuf_addf(&man_page, "man:%s(1)", page); + execlp(path, filename, "newTab", man_page.buf, NULL); + warning("failed to exec '%s': %s", path, strerror(errno)); + } +} + +static void exec_man_man(const char* path, const char *page) +{ + if (!path) + path = "man"; + execlp(path, "man", page, NULL); + warning("failed to exec '%s': %s", path, strerror(errno)); +} + +static void exec_man_cmd(const char *cmd, const char *page) +{ + struct strbuf shell_cmd = STRBUF_INIT; + strbuf_addf(&shell_cmd, "%s %s", cmd, page); + execl("/bin/sh", "sh", "-c", shell_cmd.buf, NULL); + warning("failed to exec '%s': %s", cmd, strerror(errno)); +} + +static void add_man_viewer(const char *name) +{ + struct man_viewer_list **p = &man_viewer_list; + size_t len = strlen(name); + + while (*p) + p = &((*p)->next); + *p = calloc(1, (sizeof(**p) + len + 1)); + strncpy((*p)->name, name, len); +} + +static int supported_man_viewer(const char *name, size_t len) +{ + return (!strncasecmp("man", name, len) || + !strncasecmp("woman", name, len) || + !strncasecmp("konqueror", name, len)); +} + +static void do_add_man_viewer_info(const char *name, + size_t len, + const char *value) +{ + struct man_viewer_info_list *new = calloc(1, sizeof(*new) + len + 1); + + strncpy(new->name, name, len); + new->info = strdup(value); + new->next = man_viewer_info_list; + man_viewer_info_list = new; +} + +static int add_man_viewer_path(const char *name, + size_t len, + const char *value) +{ + if (supported_man_viewer(name, len)) + do_add_man_viewer_info(name, len, value); + else + warning("'%s': path for unsupported man viewer.\n" + "Please consider using 'man..cmd' instead.", + name); + + return 0; +} + +static int add_man_viewer_cmd(const char *name, + size_t len, + const char *value) +{ + if (supported_man_viewer(name, len)) + warning("'%s': cmd for supported man viewer.\n" + "Please consider using 'man..path' instead.", + name); + else + do_add_man_viewer_info(name, len, value); + + return 0; +} + +static int add_man_viewer_info(const char *var, const char *value) +{ + const char *name = var + 4; + const char *subkey = strrchr(name, '.'); + + if (!subkey) + return error("Config with no key for man viewer: %s", name); + + if (!strcmp(subkey, ".path")) { + if (!value) + return config_error_nonbool(var); + return add_man_viewer_path(name, subkey - name, value); + } + if (!strcmp(subkey, ".cmd")) { + if (!value) + return config_error_nonbool(var); + return add_man_viewer_cmd(name, subkey - name, value); + } + + warning("'%s': unsupported man viewer sub key.", subkey); + return 0; +} + +static int perf_help_config(const char *var, const char *value, void *cb) +{ + if (!strcmp(var, "help.format")) { + if (!value) + return config_error_nonbool(var); + help_format = parse_help_format(value); + return 0; + } + if (!strcmp(var, "man.viewer")) { + if (!value) + return config_error_nonbool(var); + add_man_viewer(value); + return 0; + } + if (!prefixcmp(var, "man.")) + return add_man_viewer_info(var, value); + + return perf_default_config(var, value, cb); +} + +static struct cmdnames main_cmds, other_cmds; + +void list_common_cmds_help(void) +{ + int i, longest = 0; + + for (i = 0; i < ARRAY_SIZE(common_cmds); i++) { + if (longest < strlen(common_cmds[i].name)) + longest = strlen(common_cmds[i].name); + } + + puts("The most commonly used perf commands are:"); + for (i = 0; i < ARRAY_SIZE(common_cmds); i++) { + printf(" %s ", common_cmds[i].name); + mput_char(' ', longest - strlen(common_cmds[i].name)); + puts(common_cmds[i].help); + } +} + +static int is_perf_command(const char *s) +{ + return is_in_cmdlist(&main_cmds, s) || + is_in_cmdlist(&other_cmds, s); +} + +static const char *prepend(const char *prefix, const char *cmd) +{ + size_t pre_len = strlen(prefix); + size_t cmd_len = strlen(cmd); + char *p = malloc(pre_len + cmd_len + 1); + memcpy(p, prefix, pre_len); + strcpy(p + pre_len, cmd); + return p; +} + +static const char *cmd_to_page(const char *perf_cmd) +{ + if (!perf_cmd) + return "perf"; + else if (!prefixcmp(perf_cmd, "perf")) + return perf_cmd; + else if (is_perf_command(perf_cmd)) + return prepend("perf-", perf_cmd); + else + return prepend("perf", perf_cmd); +} + +static void setup_man_path(void) +{ + struct strbuf new_path = STRBUF_INIT; + const char *old_path = getenv("MANPATH"); + + /* We should always put ':' after our path. If there is no + * old_path, the ':' at the end will let 'man' to try + * system-wide paths after ours to find the manual page. If + * there is old_path, we need ':' as delimiter. */ + strbuf_addstr(&new_path, system_path(PERF_MAN_PATH)); + strbuf_addch(&new_path, ':'); + if (old_path) + strbuf_addstr(&new_path, old_path); + + setenv("MANPATH", new_path.buf, 1); + + strbuf_release(&new_path); +} + +static void exec_viewer(const char *name, const char *page) +{ + const char *info = get_man_viewer_info(name); + + if (!strcasecmp(name, "man")) + exec_man_man(info, page); + else if (!strcasecmp(name, "woman")) + exec_woman_emacs(info, page); + else if (!strcasecmp(name, "konqueror")) + exec_man_konqueror(info, page); + else if (info) + exec_man_cmd(info, page); + else + warning("'%s': unknown man viewer.", name); +} + +static void show_man_page(const char *perf_cmd) +{ + struct man_viewer_list *viewer; + const char *page = cmd_to_page(perf_cmd); + const char *fallback = getenv("PERF_MAN_VIEWER"); + + setup_man_path(); + for (viewer = man_viewer_list; viewer; viewer = viewer->next) + { + exec_viewer(viewer->name, page); /* will return when unable */ + } + if (fallback) + exec_viewer(fallback, page); + exec_viewer("man", page); + die("no man viewer handled the request"); +} + +static void show_info_page(const char *perf_cmd) +{ + const char *page = cmd_to_page(perf_cmd); + setenv("INFOPATH", system_path(PERF_INFO_PATH), 1); + execlp("info", "info", "perfman", page, NULL); +} + +static void get_html_page_path(struct strbuf *page_path, const char *page) +{ + struct stat st; + const char *html_path = system_path(PERF_HTML_PATH); + + /* Check that we have a perf documentation directory. */ + if (stat(mkpath("%s/perf.html", html_path), &st) + || !S_ISREG(st.st_mode)) + die("'%s': not a documentation directory.", html_path); + + strbuf_init(page_path, 0); + strbuf_addf(page_path, "%s/%s.html", html_path, page); +} + +/* + * If open_html is not defined in a platform-specific way (see for + * example compat/mingw.h), we use the script web--browse to display + * HTML. + */ +#ifndef open_html +void open_html(const char *path) +{ + execl_perf_cmd("web--browse", "-c", "help.browser", path, NULL); +} +#endif + +static void show_html_page(const char *perf_cmd) +{ + const char *page = cmd_to_page(perf_cmd); + struct strbuf page_path; /* it leaks but we exec bellow */ + + get_html_page_path(&page_path, page); + + open_html(page_path.buf); +} + +int cmd_help(int argc, const char **argv, const char *prefix) +{ + int nonperf; + const char *alias; + load_command_list("perf-", &main_cmds, &other_cmds); + + /* setup_perf_directory_gently(&nonperf); */ + perf_config(perf_help_config, NULL); + + argc = parse_options(argc, argv, builtin_help_options, + builtin_help_usage, 0); + + if (show_all) { + printf("usage: %s\n\n", perf_usage_string); + list_commands("perf commands", &main_cmds, &other_cmds); + printf("%s\n", perf_more_info_string); + return 0; + } + + if (!argv[0]) { + printf("usage: %s\n\n", perf_usage_string); + list_common_cmds_help(); + printf("\n%s\n", perf_more_info_string); + return 0; + } + + alias = alias_lookup(argv[0]); + if (alias && !is_perf_command(argv[0])) { + printf("`perf %s' is aliased to `%s'\n", argv[0], alias); + return 0; + } + + switch (help_format) { + case HELP_FORMAT_MAN: + show_man_page(argv[0]); + break; + case HELP_FORMAT_INFO: + show_info_page(argv[0]); + break; + case HELP_FORMAT_WEB: + show_html_page(argv[0]); + break; + } + + return 0; +} diff --git a/Documentation/perf_counter/builtin-top.c b/Documentation/perf_counter/builtin-top.c new file mode 100644 index 00000000000..9d2c769e5f8 --- /dev/null +++ b/Documentation/perf_counter/builtin-top.c @@ -0,0 +1,1411 @@ +/* + * kerneltop.c: show top kernel functions - performance counters showcase + + Build with: + + cc -O6 -Wall -c -o kerneltop.o kerneltop.c -lrt + + Sample output: + +------------------------------------------------------------------------------ + KernelTop: 2669 irqs/sec [NMI, cache-misses/cache-refs], (all, cpu: 2) +------------------------------------------------------------------------------ + + weight RIP kernel function + ______ ________________ _______________ + + 35.20 - ffffffff804ce74b : skb_copy_and_csum_dev + 33.00 - ffffffff804cb740 : sock_alloc_send_skb + 31.26 - ffffffff804ce808 : skb_push + 22.43 - ffffffff80510004 : tcp_established_options + 19.00 - ffffffff8027d250 : find_get_page + 15.76 - ffffffff804e4fc9 : eth_type_trans + 15.20 - ffffffff804d8baa : dst_release + 14.86 - ffffffff804cf5d8 : skb_release_head_state + 14.00 - ffffffff802217d5 : read_hpet + 12.00 - ffffffff804ffb7f : __ip_local_out + 11.97 - ffffffff804fc0c8 : ip_local_deliver_finish + 8.54 - ffffffff805001a3 : ip_queue_xmit + */ + +/* + * perfstat: /usr/bin/time -alike performance counter statistics utility + + It summarizes the counter events of all tasks (and child tasks), + covering all CPUs that the command (or workload) executes on. + It only counts the per-task events of the workload started, + independent of how many other tasks run on those CPUs. + + Sample output: + + $ ./perfstat -e 1 -e 3 -e 5 ls -lR /usr/include/ >/dev/null + + Performance counter stats for 'ls': + + 163516953 instructions + 2295 cache-misses + 2855182 branch-misses + */ + + /* + * Copyright (C) 2008, Red Hat Inc, Ingo Molnar + * + * Improvements and fixes by: + * + * Arjan van de Ven + * Yanmin Zhang + * Wu Fengguang + * Mike Galbraith + * Paul Mackerras + * + * Released under the GPL v2. (and only v2, not any later version) + */ + +#include "util.h" + +#define _GNU_SOURCE +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include "../../include/linux/perf_counter.h" + + +/* + * prctl(PR_TASK_PERF_COUNTERS_DISABLE) will (cheaply) disable all + * counters in the current task. + */ +#define PR_TASK_PERF_COUNTERS_DISABLE 31 +#define PR_TASK_PERF_COUNTERS_ENABLE 32 + +#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0])) + +#define rdclock() \ +({ \ + struct timespec ts; \ + \ + clock_gettime(CLOCK_MONOTONIC, &ts); \ + ts.tv_sec * 1000000000ULL + ts.tv_nsec; \ +}) + +/* + * Pick up some kernel type conventions: + */ +#define __user +#define asmlinkage + +#ifdef __x86_64__ +#define __NR_perf_counter_open 295 +#define rmb() asm volatile("lfence" ::: "memory") +#define cpu_relax() asm volatile("rep; nop" ::: "memory"); +#endif + +#ifdef __i386__ +#define __NR_perf_counter_open 333 +#define rmb() asm volatile("lfence" ::: "memory") +#define cpu_relax() asm volatile("rep; nop" ::: "memory"); +#endif + +#ifdef __powerpc__ +#define __NR_perf_counter_open 319 +#define rmb() asm volatile ("sync" ::: "memory") +#define cpu_relax() asm volatile ("" ::: "memory"); +#endif + +#define unlikely(x) __builtin_expect(!!(x), 0) +#define min(x, y) ({ \ + typeof(x) _min1 = (x); \ + typeof(y) _min2 = (y); \ + (void) (&_min1 == &_min2); \ + _min1 < _min2 ? _min1 : _min2; }) + +asmlinkage int sys_perf_counter_open( + struct perf_counter_hw_event *hw_event_uptr __user, + pid_t pid, + int cpu, + int group_fd, + unsigned long flags) +{ + return syscall( + __NR_perf_counter_open, hw_event_uptr, pid, cpu, group_fd, flags); +} + +#define MAX_COUNTERS 64 +#define MAX_NR_CPUS 256 + +#define EID(type, id) (((__u64)(type) << PERF_COUNTER_TYPE_SHIFT) | (id)) + +static int run_perfstat = 0; +static int system_wide = 0; + +static int nr_counters = 0; +static __u64 event_id[MAX_COUNTERS] = { + EID(PERF_TYPE_SOFTWARE, PERF_COUNT_TASK_CLOCK), + EID(PERF_TYPE_SOFTWARE, PERF_COUNT_CONTEXT_SWITCHES), + EID(PERF_TYPE_SOFTWARE, PERF_COUNT_CPU_MIGRATIONS), + EID(PERF_TYPE_SOFTWARE, PERF_COUNT_PAGE_FAULTS), + + EID(PERF_TYPE_HARDWARE, PERF_COUNT_CPU_CYCLES), + EID(PERF_TYPE_HARDWARE, PERF_COUNT_INSTRUCTIONS), + EID(PERF_TYPE_HARDWARE, PERF_COUNT_CACHE_REFERENCES), + EID(PERF_TYPE_HARDWARE, PERF_COUNT_CACHE_MISSES), +}; +static int default_interval = 100000; +static int event_count[MAX_COUNTERS]; +static int fd[MAX_NR_CPUS][MAX_COUNTERS]; + +static __u64 count_filter = 100; + +static int tid = -1; +static int profile_cpu = -1; +static int nr_cpus = 0; +static int nmi = 1; +static unsigned int realtime_prio = 0; +static int group = 0; +static unsigned int page_size; +static unsigned int mmap_pages = 16; +static int use_mmap = 0; +static int use_munmap = 0; + +static char *vmlinux; + +static char *sym_filter; +static unsigned long filter_start; +static unsigned long filter_end; + +static int delay_secs = 2; +static int zero; +static int dump_symtab; + +static int scale; + +struct source_line { + uint64_t EIP; + unsigned long count; + char *line; + struct source_line *next; +}; + +static struct source_line *lines; +static struct source_line **lines_tail; + +const unsigned int default_count[] = { + 1000000, + 1000000, + 10000, + 10000, + 1000000, + 10000, +}; + +static char *hw_event_names[] = { + "CPU cycles", + "instructions", + "cache references", + "cache misses", + "branches", + "branch misses", + "bus cycles", +}; + +static char *sw_event_names[] = { + "cpu clock ticks", + "task clock ticks", + "pagefaults", + "context switches", + "CPU migrations", + "minor faults", + "major faults", +}; + +struct event_symbol { + __u64 event; + char *symbol; +}; + +static struct event_symbol event_symbols[] = { + {EID(PERF_TYPE_HARDWARE, PERF_COUNT_CPU_CYCLES), "cpu-cycles", }, + {EID(PERF_TYPE_HARDWARE, PERF_COUNT_CPU_CYCLES), "cycles", }, + {EID(PERF_TYPE_HARDWARE, PERF_COUNT_INSTRUCTIONS), "instructions", }, + {EID(PERF_TYPE_HARDWARE, PERF_COUNT_CACHE_REFERENCES), "cache-references", }, + {EID(PERF_TYPE_HARDWARE, PERF_COUNT_CACHE_MISSES), "cache-misses", }, + {EID(PERF_TYPE_HARDWARE, PERF_COUNT_BRANCH_INSTRUCTIONS), "branch-instructions", }, + {EID(PERF_TYPE_HARDWARE, PERF_COUNT_BRANCH_INSTRUCTIONS), "branches", }, + {EID(PERF_TYPE_HARDWARE, PERF_COUNT_BRANCH_MISSES), "branch-misses", }, + {EID(PERF_TYPE_HARDWARE, PERF_COUNT_BUS_CYCLES), "bus-cycles", }, + + {EID(PERF_TYPE_SOFTWARE, PERF_COUNT_CPU_CLOCK), "cpu-clock", }, + {EID(PERF_TYPE_SOFTWARE, PERF_COUNT_TASK_CLOCK), "task-clock", }, + {EID(PERF_TYPE_SOFTWARE, PERF_COUNT_PAGE_FAULTS), "page-faults", }, + {EID(PERF_TYPE_SOFTWARE, PERF_COUNT_PAGE_FAULTS), "faults", }, + {EID(PERF_TYPE_SOFTWARE, PERF_COUNT_PAGE_FAULTS_MIN), "minor-faults", }, + {EID(PERF_TYPE_SOFTWARE, PERF_COUNT_PAGE_FAULTS_MAJ), "major-faults", }, + {EID(PERF_TYPE_SOFTWARE, PERF_COUNT_CONTEXT_SWITCHES), "context-switches", }, + {EID(PERF_TYPE_SOFTWARE, PERF_COUNT_CONTEXT_SWITCHES), "cs", }, + {EID(PERF_TYPE_SOFTWARE, PERF_COUNT_CPU_MIGRATIONS), "cpu-migrations", }, + {EID(PERF_TYPE_SOFTWARE, PERF_COUNT_CPU_MIGRATIONS), "migrations", }, +}; + +#define __PERF_COUNTER_FIELD(config, name) \ + ((config & PERF_COUNTER_##name##_MASK) >> PERF_COUNTER_##name##_SHIFT) + +#define PERF_COUNTER_RAW(config) __PERF_COUNTER_FIELD(config, RAW) +#define PERF_COUNTER_CONFIG(config) __PERF_COUNTER_FIELD(config, CONFIG) +#define PERF_COUNTER_TYPE(config) __PERF_COUNTER_FIELD(config, TYPE) +#define PERF_COUNTER_ID(config) __PERF_COUNTER_FIELD(config, EVENT) + +static void display_events_help(void) +{ + unsigned int i; + __u64 e; + + printf( + " -e EVENT --event=EVENT # symbolic-name abbreviations"); + + for (i = 0; i < ARRAY_SIZE(event_symbols); i++) { + int type, id; + + e = event_symbols[i].event; + type = PERF_COUNTER_TYPE(e); + id = PERF_COUNTER_ID(e); + + printf("\n %d:%d: %-20s", + type, id, event_symbols[i].symbol); + } + + printf("\n" + " rNNN: raw PMU events (eventsel+umask)\n\n"); +} + +static void display_perfstat_help(void) +{ + printf( + "Usage: perfstat [] \n\n" + "PerfStat Options (up to %d event types can be specified):\n\n", + MAX_COUNTERS); + + display_events_help(); + + printf( + " -l # scale counter values\n" + " -a # system-wide collection\n"); + exit(0); +} + +static void display_help(void) +{ + if (run_perfstat) + return display_perfstat_help(); + + printf( + "Usage: kerneltop []\n" + " Or: kerneltop -S [] COMMAND [ARGS]\n\n" + "KernelTop Options (up to %d event types can be specified at once):\n\n", + MAX_COUNTERS); + + display_events_help(); + + printf( + " -S --stat # perfstat COMMAND\n" + " -a # system-wide collection (for perfstat)\n\n" + " -c CNT --count=CNT # event period to sample\n\n" + " -C CPU --cpu=CPU # CPU (-1 for all) [default: -1]\n" + " -p PID --pid=PID # PID of sampled task (-1 for all) [default: -1]\n\n" + " -l # show scale factor for RR events\n" + " -d delay --delay= # sampling/display delay [default: 2]\n" + " -f CNT --filter=CNT # min-event-count filter [default: 100]\n\n" + " -r prio --realtime= # event acquisition runs with SCHED_FIFO policy\n" + " -s symbol --symbol= # function to be showed annotated one-shot\n" + " -x path --vmlinux= # the vmlinux binary, required for -s use\n" + " -z --zero # zero counts after display\n" + " -D --dump_symtab # dump symbol table to stderr on startup\n" + " -m pages --mmap_pages= # number of mmap data pages\n" + " -M --mmap_info # print mmap info stream\n" + " -U --munmap_info # print munmap info stream\n" + ); + + exit(0); +} + +static char *event_name(int ctr) +{ + __u64 config = event_id[ctr]; + int type = PERF_COUNTER_TYPE(config); + int id = PERF_COUNTER_ID(config); + static char buf[32]; + + if (PERF_COUNTER_RAW(config)) { + sprintf(buf, "raw 0x%llx", PERF_COUNTER_CONFIG(config)); + return buf; + } + + switch (type) { + case PERF_TYPE_HARDWARE: + if (id < PERF_HW_EVENTS_MAX) + return hw_event_names[id]; + return "unknown-hardware"; + + case PERF_TYPE_SOFTWARE: + if (id < PERF_SW_EVENTS_MAX) + return sw_event_names[id]; + return "unknown-software"; + + default: + break; + } + + return "unknown"; +} + +/* + * Each event can have multiple symbolic names. + * Symbolic names are (almost) exactly matched. + */ +static __u64 match_event_symbols(char *str) +{ + __u64 config, id; + int type; + unsigned int i; + + if (sscanf(str, "r%llx", &config) == 1) + return config | PERF_COUNTER_RAW_MASK; + + if (sscanf(str, "%d:%llu", &type, &id) == 2) + return EID(type, id); + + for (i = 0; i < ARRAY_SIZE(event_symbols); i++) { + if (!strncmp(str, event_symbols[i].symbol, + strlen(event_symbols[i].symbol))) + return event_symbols[i].event; + } + + return ~0ULL; +} + +static int parse_events(char *str) +{ + __u64 config; + +again: + if (nr_counters == MAX_COUNTERS) + return -1; + + config = match_event_symbols(str); + if (config == ~0ULL) + return -1; + + event_id[nr_counters] = config; + nr_counters++; + + str = strstr(str, ","); + if (str) { + str++; + goto again; + } + + return 0; +} + + +/* + * perfstat + */ + +char fault_here[1000000]; + +static void create_perfstat_counter(int counter) +{ + struct perf_counter_hw_event hw_event; + + memset(&hw_event, 0, sizeof(hw_event)); + hw_event.config = event_id[counter]; + hw_event.record_type = 0; + hw_event.nmi = 0; + if (scale) + hw_event.read_format = PERF_FORMAT_TOTAL_TIME_ENABLED | + PERF_FORMAT_TOTAL_TIME_RUNNING; + + if (system_wide) { + int cpu; + for (cpu = 0; cpu < nr_cpus; cpu ++) { + fd[cpu][counter] = sys_perf_counter_open(&hw_event, -1, cpu, -1, 0); + if (fd[cpu][counter] < 0) { + printf("perfstat error: syscall returned with %d (%s)\n", + fd[cpu][counter], strerror(errno)); + exit(-1); + } + } + } else { + hw_event.inherit = 1; + hw_event.disabled = 1; + + fd[0][counter] = sys_perf_counter_open(&hw_event, 0, -1, -1, 0); + if (fd[0][counter] < 0) { + printf("perfstat error: syscall returned with %d (%s)\n", + fd[0][counter], strerror(errno)); + exit(-1); + } + } +} + +int do_perfstat(int argc, char *argv[]) +{ + unsigned long long t0, t1; + int counter; + ssize_t res; + int status; + int pid; + + if (!system_wide) + nr_cpus = 1; + + for (counter = 0; counter < nr_counters; counter++) + create_perfstat_counter(counter); + + argc -= optind; + argv += optind; + + if (!argc) + display_help(); + + /* + * Enable counters and exec the command: + */ + t0 = rdclock(); + prctl(PR_TASK_PERF_COUNTERS_ENABLE); + + if ((pid = fork()) < 0) + perror("failed to fork"); + if (!pid) { + if (execvp(argv[0], argv)) { + perror(argv[0]); + exit(-1); + } + } + while (wait(&status) >= 0) + ; + prctl(PR_TASK_PERF_COUNTERS_DISABLE); + t1 = rdclock(); + + fflush(stdout); + + fprintf(stderr, "\n"); + fprintf(stderr, " Performance counter stats for \'%s\':\n", + argv[0]); + fprintf(stderr, "\n"); + + for (counter = 0; counter < nr_counters; counter++) { + int cpu, nv; + __u64 count[3], single_count[3]; + int scaled; + + count[0] = count[1] = count[2] = 0; + nv = scale ? 3 : 1; + for (cpu = 0; cpu < nr_cpus; cpu ++) { + res = read(fd[cpu][counter], + single_count, nv * sizeof(__u64)); + assert(res == nv * sizeof(__u64)); + + count[0] += single_count[0]; + if (scale) { + count[1] += single_count[1]; + count[2] += single_count[2]; + } + } + + scaled = 0; + if (scale) { + if (count[2] == 0) { + fprintf(stderr, " %14s %-20s\n", + "", event_name(counter)); + continue; + } + if (count[2] < count[1]) { + scaled = 1; + count[0] = (unsigned long long) + ((double)count[0] * count[1] / count[2] + 0.5); + } + } + + if (event_id[counter] == EID(PERF_TYPE_SOFTWARE, PERF_COUNT_CPU_CLOCK) || + event_id[counter] == EID(PERF_TYPE_SOFTWARE, PERF_COUNT_TASK_CLOCK)) { + + double msecs = (double)count[0] / 1000000; + + fprintf(stderr, " %14.6f %-20s (msecs)", + msecs, event_name(counter)); + } else { + fprintf(stderr, " %14Ld %-20s (events)", + count[0], event_name(counter)); + } + if (scaled) + fprintf(stderr, " (scaled from %.2f%%)", + (double) count[2] / count[1] * 100); + fprintf(stderr, "\n"); + } + fprintf(stderr, "\n"); + fprintf(stderr, " Wall-clock time elapsed: %12.6f msecs\n", + (double)(t1-t0)/1e6); + fprintf(stderr, "\n"); + + return 0; +} + +/* + * Symbols + */ + +static uint64_t min_ip; +static uint64_t max_ip = -1ll; + +struct sym_entry { + unsigned long long addr; + char *sym; + unsigned long count[MAX_COUNTERS]; + int skip; + struct source_line *source; +}; + +#define MAX_SYMS 100000 + +static int sym_table_count; + +struct sym_entry *sym_filter_entry; + +static struct sym_entry sym_table[MAX_SYMS]; + +static void show_details(struct sym_entry *sym); + +/* + * Ordering weight: count-1 * count-2 * ... / count-n + */ +static double sym_weight(const struct sym_entry *sym) +{ + double weight; + int counter; + + weight = sym->count[0]; + + for (counter = 1; counter < nr_counters-1; counter++) + weight *= sym->count[counter]; + + weight /= (sym->count[counter] + 1); + + return weight; +} + +static int compare(const void *__sym1, const void *__sym2) +{ + const struct sym_entry *sym1 = __sym1, *sym2 = __sym2; + + return sym_weight(sym1) < sym_weight(sym2); +} + +static long events; +static long userspace_events; +static const char CONSOLE_CLEAR[] = ""; + +static struct sym_entry tmp[MAX_SYMS]; + +static void print_sym_table(void) +{ + int i, printed; + int counter; + float events_per_sec = events/delay_secs; + float kevents_per_sec = (events-userspace_events)/delay_secs; + float sum_kevents = 0.0; + + events = userspace_events = 0; + memcpy(tmp, sym_table, sizeof(sym_table[0])*sym_table_count); + qsort(tmp, sym_table_count, sizeof(tmp[0]), compare); + + for (i = 0; i < sym_table_count && tmp[i].count[0]; i++) + sum_kevents += tmp[i].count[0]; + + write(1, CONSOLE_CLEAR, strlen(CONSOLE_CLEAR)); + + printf( +"------------------------------------------------------------------------------\n"); + printf( " KernelTop:%8.0f irqs/sec kernel:%4.1f%% [%s, ", + events_per_sec, + 100.0 - (100.0*((events_per_sec-kevents_per_sec)/events_per_sec)), + nmi ? "NMI" : "IRQ"); + + if (nr_counters == 1) + printf("%d ", event_count[0]); + + for (counter = 0; counter < nr_counters; counter++) { + if (counter) + printf("/"); + + printf("%s", event_name(counter)); + } + + printf( "], "); + + if (tid != -1) + printf(" (tid: %d", tid); + else + printf(" (all"); + + if (profile_cpu != -1) + printf(", cpu: %d)\n", profile_cpu); + else { + if (tid != -1) + printf(")\n"); + else + printf(", %d CPUs)\n", nr_cpus); + } + + printf("------------------------------------------------------------------------------\n\n"); + + if (nr_counters == 1) + printf(" events pcnt"); + else + printf(" weight events pcnt"); + + printf(" RIP kernel function\n" + " ______ ______ _____ ________________ _______________\n\n" + ); + + for (i = 0, printed = 0; i < sym_table_count; i++) { + float pcnt; + int count; + + if (printed <= 18 && tmp[i].count[0] >= count_filter) { + pcnt = 100.0 - (100.0*((sum_kevents-tmp[i].count[0])/sum_kevents)); + + if (nr_counters == 1) + printf("%19.2f - %4.1f%% - %016llx : %s\n", + sym_weight(tmp + i), + pcnt, tmp[i].addr, tmp[i].sym); + else + printf("%8.1f %10ld - %4.1f%% - %016llx : %s\n", + sym_weight(tmp + i), + tmp[i].count[0], + pcnt, tmp[i].addr, tmp[i].sym); + printed++; + } + /* + * Add decay to the counts: + */ + for (count = 0; count < nr_counters; count++) + sym_table[i].count[count] = zero ? 0 : sym_table[i].count[count] * 7 / 8; + } + + if (sym_filter_entry) + show_details(sym_filter_entry); + + { + struct pollfd stdin_poll = { .fd = 0, .events = POLLIN }; + + if (poll(&stdin_poll, 1, 0) == 1) { + printf("key pressed - exiting.\n"); + exit(0); + } + } +} + +static void *display_thread(void *arg) +{ + printf("KernelTop refresh period: %d seconds\n", delay_secs); + + while (!sleep(delay_secs)) + print_sym_table(); + + return NULL; +} + +static int read_symbol(FILE *in, struct sym_entry *s) +{ + static int filter_match = 0; + char *sym, stype; + char str[500]; + int rc, pos; + + rc = fscanf(in, "%llx %c %499s", &s->addr, &stype, str); + if (rc == EOF) + return -1; + + assert(rc == 3); + + /* skip until end of line: */ + pos = strlen(str); + do { + rc = fgetc(in); + if (rc == '\n' || rc == EOF || pos >= 499) + break; + str[pos] = rc; + pos++; + } while (1); + str[pos] = 0; + + sym = str; + + /* Filter out known duplicates and non-text symbols. */ + if (!strcmp(sym, "_text")) + return 1; + if (!min_ip && !strcmp(sym, "_stext")) + return 1; + if (!strcmp(sym, "_etext") || !strcmp(sym, "_sinittext")) + return 1; + if (stype != 'T' && stype != 't') + return 1; + if (!strncmp("init_module", sym, 11) || !strncmp("cleanup_module", sym, 14)) + return 1; + if (strstr(sym, "_text_start") || strstr(sym, "_text_end")) + return 1; + + s->sym = malloc(strlen(str)); + assert(s->sym); + + strcpy((char *)s->sym, str); + s->skip = 0; + + /* Tag events to be skipped. */ + if (!strcmp("default_idle", s->sym) || !strcmp("cpu_idle", s->sym)) + s->skip = 1; + else if (!strcmp("enter_idle", s->sym) || !strcmp("exit_idle", s->sym)) + s->skip = 1; + else if (!strcmp("mwait_idle", s->sym)) + s->skip = 1; + + if (filter_match == 1) { + filter_end = s->addr; + filter_match = -1; + if (filter_end - filter_start > 10000) { + printf("hm, too large filter symbol <%s> - skipping.\n", + sym_filter); + printf("symbol filter start: %016lx\n", filter_start); + printf(" end: %016lx\n", filter_end); + filter_end = filter_start = 0; + sym_filter = NULL; + sleep(1); + } + } + if (filter_match == 0 && sym_filter && !strcmp(s->sym, sym_filter)) { + filter_match = 1; + filter_start = s->addr; + } + + return 0; +} + +int compare_addr(const void *__sym1, const void *__sym2) +{ + const struct sym_entry *sym1 = __sym1, *sym2 = __sym2; + + return sym1->addr > sym2->addr; +} + +static void sort_symbol_table(void) +{ + int i, dups; + + do { + qsort(sym_table, sym_table_count, sizeof(sym_table[0]), compare_addr); + for (i = 0, dups = 0; i < sym_table_count; i++) { + if (sym_table[i].addr == sym_table[i+1].addr) { + sym_table[i+1].addr = -1ll; + dups++; + } + } + sym_table_count -= dups; + } while(dups); +} + +static void parse_symbols(void) +{ + struct sym_entry *last; + + FILE *kallsyms = fopen("/proc/kallsyms", "r"); + + if (!kallsyms) { + printf("Could not open /proc/kallsyms - no CONFIG_KALLSYMS_ALL=y?\n"); + exit(-1); + } + + while (!feof(kallsyms)) { + if (read_symbol(kallsyms, &sym_table[sym_table_count]) == 0) { + sym_table_count++; + assert(sym_table_count <= MAX_SYMS); + } + } + + sort_symbol_table(); + min_ip = sym_table[0].addr; + max_ip = sym_table[sym_table_count-1].addr; + last = sym_table + sym_table_count++; + + last->addr = -1ll; + last->sym = ""; + + if (filter_end) { + int count; + for (count=0; count < sym_table_count; count ++) { + if (!strcmp(sym_table[count].sym, sym_filter)) { + sym_filter_entry = &sym_table[count]; + break; + } + } + } + if (dump_symtab) { + int i; + + for (i = 0; i < sym_table_count; i++) + fprintf(stderr, "%llx %s\n", + sym_table[i].addr, sym_table[i].sym); + } +} + +/* + * Source lines + */ + +static void parse_vmlinux(char *filename) +{ + FILE *file; + char command[PATH_MAX*2]; + if (!filename) + return; + + sprintf(command, "objdump --start-address=0x%016lx --stop-address=0x%016lx -dS %s", filter_start, filter_end, filename); + + file = popen(command, "r"); + if (!file) + return; + + lines_tail = &lines; + while (!feof(file)) { + struct source_line *src; + size_t dummy = 0; + char *c; + + src = malloc(sizeof(struct source_line)); + assert(src != NULL); + memset(src, 0, sizeof(struct source_line)); + + if (getline(&src->line, &dummy, file) < 0) + break; + if (!src->line) + break; + + c = strchr(src->line, '\n'); + if (c) + *c = 0; + + src->next = NULL; + *lines_tail = src; + lines_tail = &src->next; + + if (strlen(src->line)>8 && src->line[8] == ':') + src->EIP = strtoull(src->line, NULL, 16); + if (strlen(src->line)>8 && src->line[16] == ':') + src->EIP = strtoull(src->line, NULL, 16); + } + pclose(file); +} + +static void record_precise_ip(uint64_t ip) +{ + struct source_line *line; + + for (line = lines; line; line = line->next) { + if (line->EIP == ip) + line->count++; + if (line->EIP > ip) + break; + } +} + +static void lookup_sym_in_vmlinux(struct sym_entry *sym) +{ + struct source_line *line; + char pattern[PATH_MAX]; + sprintf(pattern, "<%s>:", sym->sym); + + for (line = lines; line; line = line->next) { + if (strstr(line->line, pattern)) { + sym->source = line; + break; + } + } +} + +static void show_lines(struct source_line *line_queue, int line_queue_count) +{ + int i; + struct source_line *line; + + line = line_queue; + for (i = 0; i < line_queue_count; i++) { + printf("%8li\t%s\n", line->count, line->line); + line = line->next; + } +} + +#define TRACE_COUNT 3 + +static void show_details(struct sym_entry *sym) +{ + struct source_line *line; + struct source_line *line_queue = NULL; + int displayed = 0; + int line_queue_count = 0; + + if (!sym->source) + lookup_sym_in_vmlinux(sym); + if (!sym->source) + return; + + printf("Showing details for %s\n", sym->sym); + + line = sym->source; + while (line) { + if (displayed && strstr(line->line, ">:")) + break; + + if (!line_queue_count) + line_queue = line; + line_queue_count ++; + + if (line->count >= count_filter) { + show_lines(line_queue, line_queue_count); + line_queue_count = 0; + line_queue = NULL; + } else if (line_queue_count > TRACE_COUNT) { + line_queue = line_queue->next; + line_queue_count --; + } + + line->count = 0; + displayed++; + if (displayed > 300) + break; + line = line->next; + } +} + +/* + * Binary search in the histogram table and record the hit: + */ +static void record_ip(uint64_t ip, int counter) +{ + int left_idx, middle_idx, right_idx, idx; + unsigned long left, middle, right; + + record_precise_ip(ip); + + left_idx = 0; + right_idx = sym_table_count-1; + assert(ip <= max_ip && ip >= min_ip); + + while (left_idx + 1 < right_idx) { + middle_idx = (left_idx + right_idx) / 2; + + left = sym_table[ left_idx].addr; + middle = sym_table[middle_idx].addr; + right = sym_table[ right_idx].addr; + + if (!(left <= middle && middle <= right)) { + printf("%016lx...\n%016lx...\n%016lx\n", left, middle, right); + printf("%d %d %d\n", left_idx, middle_idx, right_idx); + } + assert(left <= middle && middle <= right); + if (!(left <= ip && ip <= right)) { + printf(" left: %016lx\n", left); + printf(" ip: %016lx\n", (unsigned long)ip); + printf("right: %016lx\n", right); + } + assert(left <= ip && ip <= right); + /* + * [ left .... target .... middle .... right ] + * => right := middle + */ + if (ip < middle) { + right_idx = middle_idx; + continue; + } + /* + * [ left .... middle ... target ... right ] + * => left := middle + */ + left_idx = middle_idx; + } + + idx = left_idx; + + if (!sym_table[idx].skip) + sym_table[idx].count[counter]++; + else events--; +} + +static void process_event(uint64_t ip, int counter) +{ + events++; + + if (ip < min_ip || ip > max_ip) { + userspace_events++; + return; + } + + record_ip(ip, counter); +} + +static void process_options(int argc, char *argv[]) +{ + int error = 0, counter; + + if (strstr(argv[0], "perfstat")) + run_perfstat = 1; + + for (;;) { + int option_index = 0; + /** Options for getopt */ + static struct option long_options[] = { + {"count", required_argument, NULL, 'c'}, + {"cpu", required_argument, NULL, 'C'}, + {"delay", required_argument, NULL, 'd'}, + {"dump_symtab", no_argument, NULL, 'D'}, + {"event", required_argument, NULL, 'e'}, + {"filter", required_argument, NULL, 'f'}, + {"group", required_argument, NULL, 'g'}, + {"help", no_argument, NULL, 'h'}, + {"nmi", required_argument, NULL, 'n'}, + {"mmap_info", no_argument, NULL, 'M'}, + {"mmap_pages", required_argument, NULL, 'm'}, + {"munmap_info", no_argument, NULL, 'U'}, + {"pid", required_argument, NULL, 'p'}, + {"realtime", required_argument, NULL, 'r'}, + {"scale", no_argument, NULL, 'l'}, + {"symbol", required_argument, NULL, 's'}, + {"stat", no_argument, NULL, 'S'}, + {"vmlinux", required_argument, NULL, 'x'}, + {"zero", no_argument, NULL, 'z'}, + {NULL, 0, NULL, 0 } + }; + int c = getopt_long(argc, argv, "+:ac:C:d:De:f:g:hln:m:p:r:s:Sx:zMU", + long_options, &option_index); + if (c == -1) + break; + + switch (c) { + case 'a': system_wide = 1; break; + case 'c': default_interval = atoi(optarg); break; + case 'C': + /* CPU and PID are mutually exclusive */ + if (tid != -1) { + printf("WARNING: CPU switch overriding PID\n"); + sleep(1); + tid = -1; + } + profile_cpu = atoi(optarg); break; + case 'd': delay_secs = atoi(optarg); break; + case 'D': dump_symtab = 1; break; + + case 'e': error = parse_events(optarg); break; + + case 'f': count_filter = atoi(optarg); break; + case 'g': group = atoi(optarg); break; + case 'h': display_help(); break; + case 'l': scale = 1; break; + case 'n': nmi = atoi(optarg); break; + case 'p': + /* CPU and PID are mutually exclusive */ + if (profile_cpu != -1) { + printf("WARNING: PID switch overriding CPU\n"); + sleep(1); + profile_cpu = -1; + } + tid = atoi(optarg); break; + case 'r': realtime_prio = atoi(optarg); break; + case 's': sym_filter = strdup(optarg); break; + case 'S': run_perfstat = 1; break; + case 'x': vmlinux = strdup(optarg); break; + case 'z': zero = 1; break; + case 'm': mmap_pages = atoi(optarg); break; + case 'M': use_mmap = 1; break; + case 'U': use_munmap = 1; break; + default: error = 1; break; + } + } + if (error) + display_help(); + + if (!nr_counters) { + if (run_perfstat) + nr_counters = 8; + else { + nr_counters = 1; + event_id[0] = 0; + } + } + + for (counter = 0; counter < nr_counters; counter++) { + if (event_count[counter]) + continue; + + event_count[counter] = default_interval; + } +} + +struct mmap_data { + int counter; + void *base; + unsigned int mask; + unsigned int prev; +}; + +static unsigned int mmap_read_head(struct mmap_data *md) +{ + struct perf_counter_mmap_page *pc = md->base; + int head; + + head = pc->data_head; + rmb(); + + return head; +} + +struct timeval last_read, this_read; + +static void mmap_read(struct mmap_data *md) +{ + unsigned int head = mmap_read_head(md); + unsigned int old = md->prev; + unsigned char *data = md->base + page_size; + int diff; + + gettimeofday(&this_read, NULL); + + /* + * If we're further behind than half the buffer, there's a chance + * the writer will bite our tail and screw up the events under us. + * + * If we somehow ended up ahead of the head, we got messed up. + * + * In either case, truncate and restart at head. + */ + diff = head - old; + if (diff > md->mask / 2 || diff < 0) { + struct timeval iv; + unsigned long msecs; + + timersub(&this_read, &last_read, &iv); + msecs = iv.tv_sec*1000 + iv.tv_usec/1000; + + fprintf(stderr, "WARNING: failed to keep up with mmap data." + " Last read %lu msecs ago.\n", msecs); + + /* + * head points to a known good entry, start there. + */ + old = head; + } + + last_read = this_read; + + for (; old != head;) { + struct ip_event { + struct perf_event_header header; + __u64 ip; + __u32 pid, tid; + }; + struct mmap_event { + struct perf_event_header header; + __u32 pid, tid; + __u64 start; + __u64 len; + __u64 pgoff; + char filename[PATH_MAX]; + }; + + typedef union event_union { + struct perf_event_header header; + struct ip_event ip; + struct mmap_event mmap; + } event_t; + + event_t *event = (event_t *)&data[old & md->mask]; + + event_t event_copy; + + unsigned int size = event->header.size; + + /* + * Event straddles the mmap boundary -- header should always + * be inside due to u64 alignment of output. + */ + if ((old & md->mask) + size != ((old + size) & md->mask)) { + unsigned int offset = old; + unsigned int len = min(sizeof(*event), size), cpy; + void *dst = &event_copy; + + do { + cpy = min(md->mask + 1 - (offset & md->mask), len); + memcpy(dst, &data[offset & md->mask], cpy); + offset += cpy; + dst += cpy; + len -= cpy; + } while (len); + + event = &event_copy; + } + + old += size; + + if (event->header.misc & PERF_EVENT_MISC_OVERFLOW) { + if (event->header.type & PERF_RECORD_IP) + process_event(event->ip.ip, md->counter); + } else { + switch (event->header.type) { + case PERF_EVENT_MMAP: + case PERF_EVENT_MUNMAP: + printf("%s: %Lu %Lu %Lu %s\n", + event->header.type == PERF_EVENT_MMAP + ? "mmap" : "munmap", + event->mmap.start, + event->mmap.len, + event->mmap.pgoff, + event->mmap.filename); + break; + } + } + } + + md->prev = old; +} + +int cmd_top(int argc, const char **argv, const char *prefix) +{ + struct pollfd event_array[MAX_NR_CPUS * MAX_COUNTERS]; + struct mmap_data mmap_array[MAX_NR_CPUS][MAX_COUNTERS]; + struct perf_counter_hw_event hw_event; + pthread_t thread; + int i, counter, group_fd, nr_poll = 0; + unsigned int cpu; + int ret; + + page_size = sysconf(_SC_PAGE_SIZE); + + process_options(argc, argv); + + nr_cpus = sysconf(_SC_NPROCESSORS_ONLN); + assert(nr_cpus <= MAX_NR_CPUS); + assert(nr_cpus >= 0); + + if (run_perfstat) + return do_perfstat(argc, argv); + + if (tid != -1 || profile_cpu != -1) + nr_cpus = 1; + + parse_symbols(); + if (vmlinux && sym_filter_entry) + parse_vmlinux(vmlinux); + + for (i = 0; i < nr_cpus; i++) { + group_fd = -1; + for (counter = 0; counter < nr_counters; counter++) { + + cpu = profile_cpu; + if (tid == -1 && profile_cpu == -1) + cpu = i; + + memset(&hw_event, 0, sizeof(hw_event)); + hw_event.config = event_id[counter]; + hw_event.irq_period = event_count[counter]; + hw_event.record_type = PERF_RECORD_IP | PERF_RECORD_TID; + hw_event.nmi = nmi; + hw_event.mmap = use_mmap; + hw_event.munmap = use_munmap; + + fd[i][counter] = sys_perf_counter_open(&hw_event, tid, cpu, group_fd, 0); + if (fd[i][counter] < 0) { + int err = errno; + printf("kerneltop error: syscall returned with %d (%s)\n", + fd[i][counter], strerror(err)); + if (err == EPERM) + printf("Are you root?\n"); + exit(-1); + } + assert(fd[i][counter] >= 0); + fcntl(fd[i][counter], F_SETFL, O_NONBLOCK); + + /* + * First counter acts as the group leader: + */ + if (group && group_fd == -1) + group_fd = fd[i][counter]; + + event_array[nr_poll].fd = fd[i][counter]; + event_array[nr_poll].events = POLLIN; + nr_poll++; + + mmap_array[i][counter].counter = counter; + mmap_array[i][counter].prev = 0; + mmap_array[i][counter].mask = mmap_pages*page_size - 1; + mmap_array[i][counter].base = mmap(NULL, (mmap_pages+1)*page_size, + PROT_READ, MAP_SHARED, fd[i][counter], 0); + if (mmap_array[i][counter].base == MAP_FAILED) { + printf("kerneltop error: failed to mmap with %d (%s)\n", + errno, strerror(errno)); + exit(-1); + } + } + } + + if (pthread_create(&thread, NULL, display_thread, NULL)) { + printf("Could not create display thread.\n"); + exit(-1); + } + + if (realtime_prio) { + struct sched_param param; + + param.sched_priority = realtime_prio; + if (sched_setscheduler(0, SCHED_FIFO, ¶m)) { + printf("Could not set realtime priority.\n"); + exit(-1); + } + } + + while (1) { + int hits = events; + + for (i = 0; i < nr_cpus; i++) { + for (counter = 0; counter < nr_counters; counter++) + mmap_read(&mmap_array[i][counter]); + } + + if (hits == events) + ret = poll(event_array, nr_poll, 100); + } + + return 0; +} diff --git a/Documentation/perf_counter/builtin.h b/Documentation/perf_counter/builtin.h new file mode 100644 index 00000000000..41637444ce2 --- /dev/null +++ b/Documentation/perf_counter/builtin.h @@ -0,0 +1,18 @@ +#ifndef BUILTIN_H +#define BUILTIN_H + +#include "util.h" +#include "strbuf.h" + +extern const char perf_version_string[]; +extern const char perf_usage_string[]; +extern const char perf_more_info_string[]; + +extern void list_common_cmds_help(void); +extern const char *help_unknown_cmd(const char *cmd); +extern void prune_packed_objects(int); +extern int read_line_with_nul(char *buf, int size, FILE *file); +extern int check_pager_config(const char *cmd); + +extern int cmd_top(int argc, const char **argv, const char *prefix); +#endif diff --git a/Documentation/perf_counter/cache.h b/Documentation/perf_counter/cache.h new file mode 100644 index 00000000000..dc085640a57 --- /dev/null +++ b/Documentation/perf_counter/cache.h @@ -0,0 +1,97 @@ +#ifndef CACHE_H +#define CACHE_H + +#include "util.h" +#include "strbuf.h" + +#define PERF_DIR_ENVIRONMENT "PERF_DIR" +#define PERF_WORK_TREE_ENVIRONMENT "PERF_WORK_TREE" +#define DEFAULT_PERF_DIR_ENVIRONMENT ".perf" +#define DB_ENVIRONMENT "PERF_OBJECT_DIRECTORY" +#define INDEX_ENVIRONMENT "PERF_INDEX_FILE" +#define GRAFT_ENVIRONMENT "PERF_GRAFT_FILE" +#define TEMPLATE_DIR_ENVIRONMENT "PERF_TEMPLATE_DIR" +#define CONFIG_ENVIRONMENT "PERF_CONFIG" +#define EXEC_PATH_ENVIRONMENT "PERF_EXEC_PATH" +#define CEILING_DIRECTORIES_ENVIRONMENT "PERF_CEILING_DIRECTORIES" +#define PERFATTRIBUTES_FILE ".perfattributes" +#define INFOATTRIBUTES_FILE "info/attributes" +#define ATTRIBUTE_MACRO_PREFIX "[attr]" + +typedef int (*config_fn_t)(const char *, const char *, void *); +extern int perf_default_config(const char *, const char *, void *); +extern int perf_config_from_file(config_fn_t fn, const char *, void *); +extern int perf_config(config_fn_t fn, void *); +extern int perf_parse_ulong(const char *, unsigned long *); +extern int perf_config_int(const char *, const char *); +extern unsigned long perf_config_ulong(const char *, const char *); +extern int perf_config_bool_or_int(const char *, const char *, int *); +extern int perf_config_bool(const char *, const char *); +extern int perf_config_string(const char **, const char *, const char *); +extern int perf_config_set(const char *, const char *); +extern int perf_config_set_multivar(const char *, const char *, const char *, int); +extern int perf_config_rename_section(const char *, const char *); +extern const char *perf_etc_perfconfig(void); +extern int check_repository_format_version(const char *var, const char *value, void *cb); +extern int perf_config_system(void); +extern int perf_config_global(void); +extern int config_error_nonbool(const char *); +extern const char *config_exclusive_filename; + +#define MAX_PERFNAME (1000) +extern char perf_default_email[MAX_PERFNAME]; +extern char perf_default_name[MAX_PERFNAME]; +extern int user_ident_explicitly_given; + +extern const char *perf_log_output_encoding; +extern const char *perf_mailmap_file; + +/* IO helper functions */ +extern void maybe_flush_or_die(FILE *, const char *); +extern int copy_fd(int ifd, int ofd); +extern int copy_file(const char *dst, const char *src, int mode); +extern ssize_t read_in_full(int fd, void *buf, size_t count); +extern ssize_t write_in_full(int fd, const void *buf, size_t count); +extern void write_or_die(int fd, const void *buf, size_t count); +extern int write_or_whine(int fd, const void *buf, size_t count, const char *msg); +extern int write_or_whine_pipe(int fd, const void *buf, size_t count, const char *msg); +extern void fsync_or_die(int fd, const char *); + +/* pager.c */ +extern void setup_pager(void); +extern const char *pager_program; +extern int pager_in_use(void); +extern int pager_use_color; + +extern const char *editor_program; +extern const char *excludes_file; + +char *alias_lookup(const char *alias); +int split_cmdline(char *cmdline, const char ***argv); + +#define alloc_nr(x) (((x)+16)*3/2) + +/* + * Realloc the buffer pointed at by variable 'x' so that it can hold + * at least 'nr' entries; the number of entries currently allocated + * is 'alloc', using the standard growing factor alloc_nr() macro. + * + * DO NOT USE any expression with side-effect for 'x' or 'alloc'. + */ +#define ALLOC_GROW(x, nr, alloc) \ + do { \ + if ((nr) > alloc) { \ + if (alloc_nr(alloc) < (nr)) \ + alloc = (nr); \ + else \ + alloc = alloc_nr(alloc); \ + x = xrealloc((x), alloc * sizeof(*(x))); \ + } \ + } while(0) + + +static inline int is_absolute_path(const char *path) +{ + return path[0] == '/'; +} +#endif /* CACHE_H */ diff --git a/Documentation/perf_counter/command-list.txt b/Documentation/perf_counter/command-list.txt new file mode 100644 index 00000000000..1eab3659b20 --- /dev/null +++ b/Documentation/perf_counter/command-list.txt @@ -0,0 +1,4 @@ +# List of known perf commands. +# command name category [deprecated] [common] +perf-top mainporcelain common + diff --git a/Documentation/perf_counter/config.c b/Documentation/perf_counter/config.c new file mode 100644 index 00000000000..672d5395933 --- /dev/null +++ b/Documentation/perf_counter/config.c @@ -0,0 +1,966 @@ +/* + * GIT - The information manager from hell + * + * Copyright (C) Linus Torvalds, 2005 + * Copyright (C) Johannes Schindelin, 2005 + * + */ +#include "util.h" +#include "cache.h" +#include "exec_cmd.h" + +#define MAXNAME (256) + +static FILE *config_file; +static const char *config_file_name; +static int config_linenr; +static int config_file_eof; +static int zlib_compression_seen; + +const char *config_exclusive_filename = NULL; + +static int get_next_char(void) +{ + int c; + FILE *f; + + c = '\n'; + if ((f = config_file) != NULL) { + c = fgetc(f); + if (c == '\r') { + /* DOS like systems */ + c = fgetc(f); + if (c != '\n') { + ungetc(c, f); + c = '\r'; + } + } + if (c == '\n') + config_linenr++; + if (c == EOF) { + config_file_eof = 1; + c = '\n'; + } + } + return c; +} + +static char *parse_value(void) +{ + static char value[1024]; + int quote = 0, comment = 0, len = 0, space = 0; + + for (;;) { + int c = get_next_char(); + if (len >= sizeof(value) - 1) + return NULL; + if (c == '\n') { + if (quote) + return NULL; + value[len] = 0; + return value; + } + if (comment) + continue; + if (isspace(c) && !quote) { + space = 1; + continue; + } + if (!quote) { + if (c == ';' || c == '#') { + comment = 1; + continue; + } + } + if (space) { + if (len) + value[len++] = ' '; + space = 0; + } + if (c == '\\') { + c = get_next_char(); + switch (c) { + case '\n': + continue; + case 't': + c = '\t'; + break; + case 'b': + c = '\b'; + break; + case 'n': + c = '\n'; + break; + /* Some characters escape as themselves */ + case '\\': case '"': + break; + /* Reject unknown escape sequences */ + default: + return NULL; + } + value[len++] = c; + continue; + } + if (c == '"') { + quote = 1-quote; + continue; + } + value[len++] = c; + } +} + +static inline int iskeychar(int c) +{ + return isalnum(c) || c == '-'; +} + +static int get_value(config_fn_t fn, void *data, char *name, unsigned int len) +{ + int c; + char *value; + + /* Get the full name */ + for (;;) { + c = get_next_char(); + if (config_file_eof) + break; + if (!iskeychar(c)) + break; + name[len++] = tolower(c); + if (len >= MAXNAME) + return -1; + } + name[len] = 0; + while (c == ' ' || c == '\t') + c = get_next_char(); + + value = NULL; + if (c != '\n') { + if (c != '=') + return -1; + value = parse_value(); + if (!value) + return -1; + } + return fn(name, value, data); +} + +static int get_extended_base_var(char *name, int baselen, int c) +{ + do { + if (c == '\n') + return -1; + c = get_next_char(); + } while (isspace(c)); + + /* We require the format to be '[base "extension"]' */ + if (c != '"') + return -1; + name[baselen++] = '.'; + + for (;;) { + int c = get_next_char(); + if (c == '\n') + return -1; + if (c == '"') + break; + if (c == '\\') { + c = get_next_char(); + if (c == '\n') + return -1; + } + name[baselen++] = c; + if (baselen > MAXNAME / 2) + return -1; + } + + /* Final ']' */ + if (get_next_char() != ']') + return -1; + return baselen; +} + +static int get_base_var(char *name) +{ + int baselen = 0; + + for (;;) { + int c = get_next_char(); + if (config_file_eof) + return -1; + if (c == ']') + return baselen; + if (isspace(c)) + return get_extended_base_var(name, baselen, c); + if (!iskeychar(c) && c != '.') + return -1; + if (baselen > MAXNAME / 2) + return -1; + name[baselen++] = tolower(c); + } +} + +static int perf_parse_file(config_fn_t fn, void *data) +{ + int comment = 0; + int baselen = 0; + static char var[MAXNAME]; + + /* U+FEFF Byte Order Mark in UTF8 */ + static const unsigned char *utf8_bom = (unsigned char *) "\xef\xbb\xbf"; + const unsigned char *bomptr = utf8_bom; + + for (;;) { + int c = get_next_char(); + if (bomptr && *bomptr) { + /* We are at the file beginning; skip UTF8-encoded BOM + * if present. Sane editors won't put this in on their + * own, but e.g. Windows Notepad will do it happily. */ + if ((unsigned char) c == *bomptr) { + bomptr++; + continue; + } else { + /* Do not tolerate partial BOM. */ + if (bomptr != utf8_bom) + break; + /* No BOM at file beginning. Cool. */ + bomptr = NULL; + } + } + if (c == '\n') { + if (config_file_eof) + return 0; + comment = 0; + continue; + } + if (comment || isspace(c)) + continue; + if (c == '#' || c == ';') { + comment = 1; + continue; + } + if (c == '[') { + baselen = get_base_var(var); + if (baselen <= 0) + break; + var[baselen++] = '.'; + var[baselen] = 0; + continue; + } + if (!isalpha(c)) + break; + var[baselen] = tolower(c); + if (get_value(fn, data, var, baselen+1) < 0) + break; + } + die("bad config file line %d in %s", config_linenr, config_file_name); +} + +static int parse_unit_factor(const char *end, unsigned long *val) +{ + if (!*end) + return 1; + else if (!strcasecmp(end, "k")) { + *val *= 1024; + return 1; + } + else if (!strcasecmp(end, "m")) { + *val *= 1024 * 1024; + return 1; + } + else if (!strcasecmp(end, "g")) { + *val *= 1024 * 1024 * 1024; + return 1; + } + return 0; +} + +static int perf_parse_long(const char *value, long *ret) +{ + if (value && *value) { + char *end; + long val = strtol(value, &end, 0); + unsigned long factor = 1; + if (!parse_unit_factor(end, &factor)) + return 0; + *ret = val * factor; + return 1; + } + return 0; +} + +int perf_parse_ulong(const char *value, unsigned long *ret) +{ + if (value && *value) { + char *end; + unsigned long val = strtoul(value, &end, 0); + if (!parse_unit_factor(end, &val)) + return 0; + *ret = val; + return 1; + } + return 0; +} + +static void die_bad_config(const char *name) +{ + if (config_file_name) + die("bad config value for '%s' in %s", name, config_file_name); + die("bad config value for '%s'", name); +} + +int perf_config_int(const char *name, const char *value) +{ + long ret = 0; + if (!perf_parse_long(value, &ret)) + die_bad_config(name); + return ret; +} + +unsigned long perf_config_ulong(const char *name, const char *value) +{ + unsigned long ret; + if (!perf_parse_ulong(value, &ret)) + die_bad_config(name); + return ret; +} + +int perf_config_bool_or_int(const char *name, const char *value, int *is_bool) +{ + *is_bool = 1; + if (!value) + return 1; + if (!*value) + return 0; + if (!strcasecmp(value, "true") || !strcasecmp(value, "yes") || !strcasecmp(value, "on")) + return 1; + if (!strcasecmp(value, "false") || !strcasecmp(value, "no") || !strcasecmp(value, "off")) + return 0; + *is_bool = 0; + return perf_config_int(name, value); +} + +int perf_config_bool(const char *name, const char *value) +{ + int discard; + return !!perf_config_bool_or_int(name, value, &discard); +} + +int perf_config_string(const char **dest, const char *var, const char *value) +{ + if (!value) + return config_error_nonbool(var); + *dest = strdup(value); + return 0; +} + +static int perf_default_core_config(const char *var, const char *value) +{ + /* Add other config variables here and to Documentation/config.txt. */ + return 0; +} + +int perf_default_config(const char *var, const char *value, void *dummy) +{ + if (!prefixcmp(var, "core.")) + return perf_default_core_config(var, value); + + /* Add other config variables here and to Documentation/config.txt. */ + return 0; +} + +int perf_config_from_file(config_fn_t fn, const char *filename, void *data) +{ + int ret; + FILE *f = fopen(filename, "r"); + + ret = -1; + if (f) { + config_file = f; + config_file_name = filename; + config_linenr = 1; + config_file_eof = 0; + ret = perf_parse_file(fn, data); + fclose(f); + config_file_name = NULL; + } + return ret; +} + +const char *perf_etc_perfconfig(void) +{ + static const char *system_wide; + if (!system_wide) + system_wide = system_path(ETC_PERFCONFIG); + return system_wide; +} + +static int perf_env_bool(const char *k, int def) +{ + const char *v = getenv(k); + return v ? perf_config_bool(k, v) : def; +} + +int perf_config_system(void) +{ + return !perf_env_bool("PERF_CONFIG_NOSYSTEM", 0); +} + +int perf_config_global(void) +{ + return !perf_env_bool("PERF_CONFIG_NOGLOBAL", 0); +} + +int perf_config(config_fn_t fn, void *data) +{ + int ret = 0, found = 0; + char *repo_config = NULL; + const char *home = NULL; + + /* Setting $PERF_CONFIG makes perf read _only_ the given config file. */ + if (config_exclusive_filename) + return perf_config_from_file(fn, config_exclusive_filename, data); + if (perf_config_system() && !access(perf_etc_perfconfig(), R_OK)) { + ret += perf_config_from_file(fn, perf_etc_perfconfig(), + data); + found += 1; + } + + home = getenv("HOME"); + if (perf_config_global() && home) { + char *user_config = strdup(mkpath("%s/.perfconfig", home)); + if (!access(user_config, R_OK)) { + ret += perf_config_from_file(fn, user_config, data); + found += 1; + } + free(user_config); + } + + repo_config = perf_pathdup("config"); + if (!access(repo_config, R_OK)) { + ret += perf_config_from_file(fn, repo_config, data); + found += 1; + } + free(repo_config); + if (found == 0) + return -1; + return ret; +} + +/* + * Find all the stuff for perf_config_set() below. + */ + +#define MAX_MATCHES 512 + +static struct { + int baselen; + char* key; + int do_not_match; + regex_t* value_regex; + int multi_replace; + size_t offset[MAX_MATCHES]; + enum { START, SECTION_SEEN, SECTION_END_SEEN, KEY_SEEN } state; + int seen; +} store; + +static int matches(const char* key, const char* value) +{ + return !strcmp(key, store.key) && + (store.value_regex == NULL || + (store.do_not_match ^ + !regexec(store.value_regex, value, 0, NULL, 0))); +} + +static int store_aux(const char* key, const char* value, void *cb) +{ + const char *ep; + size_t section_len; + + switch (store.state) { + case KEY_SEEN: + if (matches(key, value)) { + if (store.seen == 1 && store.multi_replace == 0) { + warning("%s has multiple values", key); + } else if (store.seen >= MAX_MATCHES) { + error("too many matches for %s", key); + return 1; + } + + store.offset[store.seen] = ftell(config_file); + store.seen++; + } + break; + case SECTION_SEEN: + /* + * What we are looking for is in store.key (both + * section and var), and its section part is baselen + * long. We found key (again, both section and var). + * We would want to know if this key is in the same + * section as what we are looking for. We already + * know we are in the same section as what should + * hold store.key. + */ + ep = strrchr(key, '.'); + section_len = ep - key; + + if ((section_len != store.baselen) || + memcmp(key, store.key, section_len+1)) { + store.state = SECTION_END_SEEN; + break; + } + + /* + * Do not increment matches: this is no match, but we + * just made sure we are in the desired section. + */ + store.offset[store.seen] = ftell(config_file); + /* fallthru */ + case SECTION_END_SEEN: + case START: + if (matches(key, value)) { + store.offset[store.seen] = ftell(config_file); + store.state = KEY_SEEN; + store.seen++; + } else { + if (strrchr(key, '.') - key == store.baselen && + !strncmp(key, store.key, store.baselen)) { + store.state = SECTION_SEEN; + store.offset[store.seen] = ftell(config_file); + } + } + } + return 0; +} + +static int write_error(const char *filename) +{ + error("failed to write new configuration file %s", filename); + + /* Same error code as "failed to rename". */ + return 4; +} + +static int store_write_section(int fd, const char* key) +{ + const char *dot; + int i, success; + struct strbuf sb = STRBUF_INIT; + + dot = memchr(key, '.', store.baselen); + if (dot) { + strbuf_addf(&sb, "[%.*s \"", (int)(dot - key), key); + for (i = dot - key + 1; i < store.baselen; i++) { + if (key[i] == '"' || key[i] == '\\') + strbuf_addch(&sb, '\\'); + strbuf_addch(&sb, key[i]); + } + strbuf_addstr(&sb, "\"]\n"); + } else { + strbuf_addf(&sb, "[%.*s]\n", store.baselen, key); + } + + success = write_in_full(fd, sb.buf, sb.len) == sb.len; + strbuf_release(&sb); + + return success; +} + +static int store_write_pair(int fd, const char* key, const char* value) +{ + int i, success; + int length = strlen(key + store.baselen + 1); + const char *quote = ""; + struct strbuf sb = STRBUF_INIT; + + /* + * Check to see if the value needs to be surrounded with a dq pair. + * Note that problematic characters are always backslash-quoted; this + * check is about not losing leading or trailing SP and strings that + * follow beginning-of-comment characters (i.e. ';' and '#') by the + * configuration parser. + */ + if (value[0] == ' ') + quote = "\""; + for (i = 0; value[i]; i++) + if (value[i] == ';' || value[i] == '#') + quote = "\""; + if (i && value[i - 1] == ' ') + quote = "\""; + + strbuf_addf(&sb, "\t%.*s = %s", + length, key + store.baselen + 1, quote); + + for (i = 0; value[i]; i++) + switch (value[i]) { + case '\n': + strbuf_addstr(&sb, "\\n"); + break; + case '\t': + strbuf_addstr(&sb, "\\t"); + break; + case '"': + case '\\': + strbuf_addch(&sb, '\\'); + default: + strbuf_addch(&sb, value[i]); + break; + } + strbuf_addf(&sb, "%s\n", quote); + + success = write_in_full(fd, sb.buf, sb.len) == sb.len; + strbuf_release(&sb); + + return success; +} + +static ssize_t find_beginning_of_line(const char* contents, size_t size, + size_t offset_, int* found_bracket) +{ + size_t equal_offset = size, bracket_offset = size; + ssize_t offset; + +contline: + for (offset = offset_-2; offset > 0 + && contents[offset] != '\n'; offset--) + switch (contents[offset]) { + case '=': equal_offset = offset; break; + case ']': bracket_offset = offset; break; + } + if (offset > 0 && contents[offset-1] == '\\') { + offset_ = offset; + goto contline; + } + if (bracket_offset < equal_offset) { + *found_bracket = 1; + offset = bracket_offset+1; + } else + offset++; + + return offset; +} + +int perf_config_set(const char* key, const char* value) +{ + return perf_config_set_multivar(key, value, NULL, 0); +} + +/* + * If value==NULL, unset in (remove from) config, + * if value_regex!=NULL, disregard key/value pairs where value does not match. + * if multi_replace==0, nothing, or only one matching key/value is replaced, + * else all matching key/values (regardless how many) are removed, + * before the new pair is written. + * + * Returns 0 on success. + * + * This function does this: + * + * - it locks the config file by creating ".perf/config.lock" + * + * - it then parses the config using store_aux() as validator to find + * the position on the key/value pair to replace. If it is to be unset, + * it must be found exactly once. + * + * - the config file is mmap()ed and the part before the match (if any) is + * written to the lock file, then the changed part and the rest. + * + * - the config file is removed and the lock file rename()d to it. + * + */ +int perf_config_set_multivar(const char* key, const char* value, + const char* value_regex, int multi_replace) +{ + int i, dot; + int fd = -1, in_fd; + int ret; + char* config_filename; + const char* last_dot = strrchr(key, '.'); + + if (config_exclusive_filename) + config_filename = strdup(config_exclusive_filename); + else + config_filename = perf_pathdup("config"); + + /* + * Since "key" actually contains the section name and the real + * key name separated by a dot, we have to know where the dot is. + */ + + if (last_dot == NULL) { + error("key does not contain a section: %s", key); + ret = 2; + goto out_free; + } + store.baselen = last_dot - key; + + store.multi_replace = multi_replace; + + /* + * Validate the key and while at it, lower case it for matching. + */ + store.key = malloc(strlen(key) + 1); + dot = 0; + for (i = 0; key[i]; i++) { + unsigned char c = key[i]; + if (c == '.') + dot = 1; + /* Leave the extended basename untouched.. */ + if (!dot || i > store.baselen) { + if (!iskeychar(c) || (i == store.baselen+1 && !isalpha(c))) { + error("invalid key: %s", key); + free(store.key); + ret = 1; + goto out_free; + } + c = tolower(c); + } else if (c == '\n') { + error("invalid key (newline): %s", key); + free(store.key); + ret = 1; + goto out_free; + } + store.key[i] = c; + } + store.key[i] = 0; + + /* + * If .perf/config does not exist yet, write a minimal version. + */ + in_fd = open(config_filename, O_RDONLY); + if ( in_fd < 0 ) { + free(store.key); + + if ( ENOENT != errno ) { + error("opening %s: %s", config_filename, + strerror(errno)); + ret = 3; /* same as "invalid config file" */ + goto out_free; + } + /* if nothing to unset, error out */ + if (value == NULL) { + ret = 5; + goto out_free; + } + + store.key = (char*)key; + if (!store_write_section(fd, key) || + !store_write_pair(fd, key, value)) + goto write_err_out; + } else { + struct stat st; + char* contents; + size_t contents_sz, copy_begin, copy_end; + int i, new_line = 0; + + if (value_regex == NULL) + store.value_regex = NULL; + else { + if (value_regex[0] == '!') { + store.do_not_match = 1; + value_regex++; + } else + store.do_not_match = 0; + + store.value_regex = (regex_t*)malloc(sizeof(regex_t)); + if (regcomp(store.value_regex, value_regex, + REG_EXTENDED)) { + error("invalid pattern: %s", value_regex); + free(store.value_regex); + ret = 6; + goto out_free; + } + } + + store.offset[0] = 0; + store.state = START; + store.seen = 0; + + /* + * After this, store.offset will contain the *end* offset + * of the last match, or remain at 0 if no match was found. + * As a side effect, we make sure to transform only a valid + * existing config file. + */ + if (perf_config_from_file(store_aux, config_filename, NULL)) { + error("invalid config file %s", config_filename); + free(store.key); + if (store.value_regex != NULL) { + regfree(store.value_regex); + free(store.value_regex); + } + ret = 3; + goto out_free; + } + + free(store.key); + if (store.value_regex != NULL) { + regfree(store.value_regex); + free(store.value_regex); + } + + /* if nothing to unset, or too many matches, error out */ + if ((store.seen == 0 && value == NULL) || + (store.seen > 1 && multi_replace == 0)) { + ret = 5; + goto out_free; + } + + fstat(in_fd, &st); + contents_sz = xsize_t(st.st_size); + contents = mmap(NULL, contents_sz, PROT_READ, + MAP_PRIVATE, in_fd, 0); + close(in_fd); + + if (store.seen == 0) + store.seen = 1; + + for (i = 0, copy_begin = 0; i < store.seen; i++) { + if (store.offset[i] == 0) { + store.offset[i] = copy_end = contents_sz; + } else if (store.state != KEY_SEEN) { + copy_end = store.offset[i]; + } else + copy_end = find_beginning_of_line( + contents, contents_sz, + store.offset[i]-2, &new_line); + + if (copy_end > 0 && contents[copy_end-1] != '\n') + new_line = 1; + + /* write the first part of the config */ + if (copy_end > copy_begin) { + if (write_in_full(fd, contents + copy_begin, + copy_end - copy_begin) < + copy_end - copy_begin) + goto write_err_out; + if (new_line && + write_in_full(fd, "\n", 1) != 1) + goto write_err_out; + } + copy_begin = store.offset[i]; + } + + /* write the pair (value == NULL means unset) */ + if (value != NULL) { + if (store.state == START) { + if (!store_write_section(fd, key)) + goto write_err_out; + } + if (!store_write_pair(fd, key, value)) + goto write_err_out; + } + + /* write the rest of the config */ + if (copy_begin < contents_sz) + if (write_in_full(fd, contents + copy_begin, + contents_sz - copy_begin) < + contents_sz - copy_begin) + goto write_err_out; + + munmap(contents, contents_sz); + } + + ret = 0; + +out_free: + free(config_filename); + return ret; + +write_err_out: + goto out_free; + +} + +static int section_name_match (const char *buf, const char *name) +{ + int i = 0, j = 0, dot = 0; + for (; buf[i] && buf[i] != ']'; i++) { + if (!dot && isspace(buf[i])) { + dot = 1; + if (name[j++] != '.') + break; + for (i++; isspace(buf[i]); i++) + ; /* do nothing */ + if (buf[i] != '"') + break; + continue; + } + if (buf[i] == '\\' && dot) + i++; + else if (buf[i] == '"' && dot) { + for (i++; isspace(buf[i]); i++) + ; /* do_nothing */ + break; + } + if (buf[i] != name[j++]) + break; + } + return (buf[i] == ']' && name[j] == 0); +} + +/* if new_name == NULL, the section is removed instead */ +int perf_config_rename_section(const char *old_name, const char *new_name) +{ + int ret = 0, remove = 0; + char *config_filename; + int out_fd; + char buf[1024]; + + if (config_exclusive_filename) + config_filename = strdup(config_exclusive_filename); + else + config_filename = perf_pathdup("config"); + if (out_fd < 0) { + ret = error("could not lock config file %s", config_filename); + goto out; + } + + if (!(config_file = fopen(config_filename, "rb"))) { + /* no config file means nothing to rename, no error */ + goto unlock_and_out; + } + + while (fgets(buf, sizeof(buf), config_file)) { + int i; + int length; + for (i = 0; buf[i] && isspace(buf[i]); i++) + ; /* do nothing */ + if (buf[i] == '[') { + /* it's a section */ + if (section_name_match (&buf[i+1], old_name)) { + ret++; + if (new_name == NULL) { + remove = 1; + continue; + } + store.baselen = strlen(new_name); + if (!store_write_section(out_fd, new_name)) { + goto out; + } + continue; + } + remove = 0; + } + if (remove) + continue; + length = strlen(buf); + if (write_in_full(out_fd, buf, length) != length) { + goto out; + } + } + fclose(config_file); + unlock_and_out: + out: + free(config_filename); + return ret; +} + +/* + * Call this to report error for your variable that should not + * get a boolean value (i.e. "[my] var" means "true"). + */ +int config_error_nonbool(const char *var) +{ + return error("Missing value for '%s'", var); +} diff --git a/Documentation/perf_counter/ctype.c b/Documentation/perf_counter/ctype.c new file mode 100644 index 00000000000..b90ec004f29 --- /dev/null +++ b/Documentation/perf_counter/ctype.c @@ -0,0 +1,26 @@ +/* + * Sane locale-independent, ASCII ctype. + * + * No surprises, and works with signed and unsigned chars. + */ +#include "cache.h" + +enum { + S = GIT_SPACE, + A = GIT_ALPHA, + D = GIT_DIGIT, + G = GIT_GLOB_SPECIAL, /* *, ?, [, \\ */ + R = GIT_REGEX_SPECIAL, /* $, (, ), +, ., ^, {, | * */ +}; + +unsigned char sane_ctype[256] = { + 0, 0, 0, 0, 0, 0, 0, 0, 0, S, S, 0, 0, S, 0, 0, /* 0.. 15 */ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 16.. 31 */ + S, 0, 0, 0, R, 0, 0, 0, R, R, G, R, 0, 0, R, 0, /* 32.. 47 */ + D, D, D, D, D, D, D, D, D, D, 0, 0, 0, 0, 0, G, /* 48.. 63 */ + 0, A, A, A, A, A, A, A, A, A, A, A, A, A, A, A, /* 64.. 79 */ + A, A, A, A, A, A, A, A, A, A, A, G, G, 0, R, 0, /* 80.. 95 */ + 0, A, A, A, A, A, A, A, A, A, A, A, A, A, A, A, /* 96..111 */ + A, A, A, A, A, A, A, A, A, A, A, R, R, 0, 0, 0, /* 112..127 */ + /* Nothing in the 128.. range */ +}; diff --git a/Documentation/perf_counter/exec_cmd.c b/Documentation/perf_counter/exec_cmd.c new file mode 100644 index 00000000000..d3929226315 --- /dev/null +++ b/Documentation/perf_counter/exec_cmd.c @@ -0,0 +1,165 @@ +#include "cache.h" +#include "exec_cmd.h" +#include "quote.h" +#define MAX_ARGS 32 + +extern char **environ; +static const char *argv_exec_path; +static const char *argv0_path; + +const char *system_path(const char *path) +{ +#ifdef RUNTIME_PREFIX + static const char *prefix; +#else + static const char *prefix = PREFIX; +#endif + struct strbuf d = STRBUF_INIT; + + if (is_absolute_path(path)) + return path; + +#ifdef RUNTIME_PREFIX + assert(argv0_path); + assert(is_absolute_path(argv0_path)); + + if (!prefix && + !(prefix = strip_path_suffix(argv0_path, PERF_EXEC_PATH)) && + !(prefix = strip_path_suffix(argv0_path, BINDIR)) && + !(prefix = strip_path_suffix(argv0_path, "perf"))) { + prefix = PREFIX; + fprintf(stderr, "RUNTIME_PREFIX requested, " + "but prefix computation failed. " + "Using static fallback '%s'.\n", prefix); + } +#endif + + strbuf_addf(&d, "%s/%s", prefix, path); + path = strbuf_detach(&d, NULL); + return path; +} + +const char *perf_extract_argv0_path(const char *argv0) +{ + const char *slash; + + if (!argv0 || !*argv0) + return NULL; + slash = argv0 + strlen(argv0); + + while (argv0 <= slash && !is_dir_sep(*slash)) + slash--; + + if (slash >= argv0) { + argv0_path = strndup(argv0, slash - argv0); + return slash + 1; + } + + return argv0; +} + +void perf_set_argv_exec_path(const char *exec_path) +{ + argv_exec_path = exec_path; + /* + * Propagate this setting to external programs. + */ + setenv(EXEC_PATH_ENVIRONMENT, exec_path, 1); +} + + +/* Returns the highest-priority, location to look for perf programs. */ +const char *perf_exec_path(void) +{ + const char *env; + + if (argv_exec_path) + return argv_exec_path; + + env = getenv(EXEC_PATH_ENVIRONMENT); + if (env && *env) { + return env; + } + + return system_path(PERF_EXEC_PATH); +} + +static void add_path(struct strbuf *out, const char *path) +{ + if (path && *path) { + if (is_absolute_path(path)) + strbuf_addstr(out, path); + else + strbuf_addstr(out, make_nonrelative_path(path)); + + strbuf_addch(out, PATH_SEP); + } +} + +void setup_path(void) +{ + const char *old_path = getenv("PATH"); + struct strbuf new_path = STRBUF_INIT; + + add_path(&new_path, perf_exec_path()); + add_path(&new_path, argv0_path); + + if (old_path) + strbuf_addstr(&new_path, old_path); + else + strbuf_addstr(&new_path, "/usr/local/bin:/usr/bin:/bin"); + + setenv("PATH", new_path.buf, 1); + + strbuf_release(&new_path); +} + +const char **prepare_perf_cmd(const char **argv) +{ + int argc; + const char **nargv; + + for (argc = 0; argv[argc]; argc++) + ; /* just counting */ + nargv = malloc(sizeof(*nargv) * (argc + 2)); + + nargv[0] = "perf"; + for (argc = 0; argv[argc]; argc++) + nargv[argc + 1] = argv[argc]; + nargv[argc + 1] = NULL; + return nargv; +} + +int execv_perf_cmd(const char **argv) { + const char **nargv = prepare_perf_cmd(argv); + + /* execvp() can only ever return if it fails */ + execvp("perf", (char **)nargv); + + free(nargv); + return -1; +} + + +int execl_perf_cmd(const char *cmd,...) +{ + int argc; + const char *argv[MAX_ARGS + 1]; + const char *arg; + va_list param; + + va_start(param, cmd); + argv[0] = cmd; + argc = 1; + while (argc < MAX_ARGS) { + arg = argv[argc++] = va_arg(param, char *); + if (!arg) + break; + } + va_end(param); + if (MAX_ARGS <= argc) + return error("too many args to run %s", cmd); + + argv[argc] = NULL; + return execv_perf_cmd(argv); +} diff --git a/Documentation/perf_counter/exec_cmd.h b/Documentation/perf_counter/exec_cmd.h new file mode 100644 index 00000000000..effe25eb154 --- /dev/null +++ b/Documentation/perf_counter/exec_cmd.h @@ -0,0 +1,13 @@ +#ifndef PERF_EXEC_CMD_H +#define PERF_EXEC_CMD_H + +extern void perf_set_argv_exec_path(const char *exec_path); +extern const char *perf_extract_argv0_path(const char *path); +extern const char *perf_exec_path(void); +extern void setup_path(void); +extern const char **prepare_perf_cmd(const char **argv); +extern int execv_perf_cmd(const char **argv); /* NULL terminated */ +extern int execl_perf_cmd(const char *cmd, ...); +extern const char *system_path(const char *path); + +#endif /* PERF_EXEC_CMD_H */ diff --git a/Documentation/perf_counter/generate-cmdlist.sh b/Documentation/perf_counter/generate-cmdlist.sh new file mode 100755 index 00000000000..75c68d948fd --- /dev/null +++ b/Documentation/perf_counter/generate-cmdlist.sh @@ -0,0 +1,24 @@ +#!/bin/sh + +echo "/* Automatically generated by $0 */ +struct cmdname_help +{ + char name[16]; + char help[80]; +}; + +static struct cmdname_help common_cmds[] = {" + +sed -n -e 's/^git-\([^ ]*\)[ ].* common.*/\1/p' command-list.txt | +sort | +while read cmd +do + sed -n ' + /^NAME/,/git-'"$cmd"'/H + ${ + x + s/.*git-'"$cmd"' - \(.*\)/ {"'"$cmd"'", "\1"},/ + p + }' "Documentation/git-$cmd.txt" +done +echo "};" diff --git a/Documentation/perf_counter/help.c b/Documentation/perf_counter/help.c new file mode 100644 index 00000000000..ec011672166 --- /dev/null +++ b/Documentation/perf_counter/help.c @@ -0,0 +1,366 @@ +#include "cache.h" +#include "builtin.h" +#include "exec_cmd.h" +#include "levenshtein.h" +#include "help.h" + +/* most GUI terminals set COLUMNS (although some don't export it) */ +static int term_columns(void) +{ + char *col_string = getenv("COLUMNS"); + int n_cols; + + if (col_string && (n_cols = atoi(col_string)) > 0) + return n_cols; + +#ifdef TIOCGWINSZ + { + struct winsize ws; + if (!ioctl(1, TIOCGWINSZ, &ws)) { + if (ws.ws_col) + return ws.ws_col; + } + } +#endif + + return 80; +} + +void add_cmdname(struct cmdnames *cmds, const char *name, int len) +{ + struct cmdname *ent = malloc(sizeof(*ent) + len + 1); + + ent->len = len; + memcpy(ent->name, name, len); + ent->name[len] = 0; + + ALLOC_GROW(cmds->names, cmds->cnt + 1, cmds->alloc); + cmds->names[cmds->cnt++] = ent; +} + +static void clean_cmdnames(struct cmdnames *cmds) +{ + int i; + for (i = 0; i < cmds->cnt; ++i) + free(cmds->names[i]); + free(cmds->names); + cmds->cnt = 0; + cmds->alloc = 0; +} + +static int cmdname_compare(const void *a_, const void *b_) +{ + struct cmdname *a = *(struct cmdname **)a_; + struct cmdname *b = *(struct cmdname **)b_; + return strcmp(a->name, b->name); +} + +static void uniq(struct cmdnames *cmds) +{ + int i, j; + + if (!cmds->cnt) + return; + + for (i = j = 1; i < cmds->cnt; i++) + if (strcmp(cmds->names[i]->name, cmds->names[i-1]->name)) + cmds->names[j++] = cmds->names[i]; + + cmds->cnt = j; +} + +void exclude_cmds(struct cmdnames *cmds, struct cmdnames *excludes) +{ + int ci, cj, ei; + int cmp; + + ci = cj = ei = 0; + while (ci < cmds->cnt && ei < excludes->cnt) { + cmp = strcmp(cmds->names[ci]->name, excludes->names[ei]->name); + if (cmp < 0) + cmds->names[cj++] = cmds->names[ci++]; + else if (cmp == 0) + ci++, ei++; + else if (cmp > 0) + ei++; + } + + while (ci < cmds->cnt) + cmds->names[cj++] = cmds->names[ci++]; + + cmds->cnt = cj; +} + +static void pretty_print_string_list(struct cmdnames *cmds, int longest) +{ + int cols = 1, rows; + int space = longest + 1; /* min 1 SP between words */ + int max_cols = term_columns() - 1; /* don't print *on* the edge */ + int i, j; + + if (space < max_cols) + cols = max_cols / space; + rows = (cmds->cnt + cols - 1) / cols; + + for (i = 0; i < rows; i++) { + printf(" "); + + for (j = 0; j < cols; j++) { + int n = j * rows + i; + int size = space; + if (n >= cmds->cnt) + break; + if (j == cols-1 || n + rows >= cmds->cnt) + size = 1; + printf("%-*s", size, cmds->names[n]->name); + } + putchar('\n'); + } +} + +static int is_executable(const char *name) +{ + struct stat st; + + if (stat(name, &st) || /* stat, not lstat */ + !S_ISREG(st.st_mode)) + return 0; + +#ifdef __MINGW32__ + /* cannot trust the executable bit, peek into the file instead */ + char buf[3] = { 0 }; + int n; + int fd = open(name, O_RDONLY); + st.st_mode &= ~S_IXUSR; + if (fd >= 0) { + n = read(fd, buf, 2); + if (n == 2) + /* DOS executables start with "MZ" */ + if (!strcmp(buf, "#!") || !strcmp(buf, "MZ")) + st.st_mode |= S_IXUSR; + close(fd); + } +#endif + return st.st_mode & S_IXUSR; +} + +static void list_commands_in_dir(struct cmdnames *cmds, + const char *path, + const char *prefix) +{ + int prefix_len; + DIR *dir = opendir(path); + struct dirent *de; + struct strbuf buf = STRBUF_INIT; + int len; + + if (!dir) + return; + if (!prefix) + prefix = "perf-"; + prefix_len = strlen(prefix); + + strbuf_addf(&buf, "%s/", path); + len = buf.len; + + while ((de = readdir(dir)) != NULL) { + int entlen; + + if (prefixcmp(de->d_name, prefix)) + continue; + + strbuf_setlen(&buf, len); + strbuf_addstr(&buf, de->d_name); + if (!is_executable(buf.buf)) + continue; + + entlen = strlen(de->d_name) - prefix_len; + if (has_extension(de->d_name, ".exe")) + entlen -= 4; + + add_cmdname(cmds, de->d_name + prefix_len, entlen); + } + closedir(dir); + strbuf_release(&buf); +} + +void load_command_list(const char *prefix, + struct cmdnames *main_cmds, + struct cmdnames *other_cmds) +{ + const char *env_path = getenv("PATH"); + const char *exec_path = perf_exec_path(); + + if (exec_path) { + list_commands_in_dir(main_cmds, exec_path, prefix); + qsort(main_cmds->names, main_cmds->cnt, + sizeof(*main_cmds->names), cmdname_compare); + uniq(main_cmds); + } + + if (env_path) { + char *paths, *path, *colon; + path = paths = strdup(env_path); + while (1) { + if ((colon = strchr(path, PATH_SEP))) + *colon = 0; + if (!exec_path || strcmp(path, exec_path)) + list_commands_in_dir(other_cmds, path, prefix); + + if (!colon) + break; + path = colon + 1; + } + free(paths); + + qsort(other_cmds->names, other_cmds->cnt, + sizeof(*other_cmds->names), cmdname_compare); + uniq(other_cmds); + } + exclude_cmds(other_cmds, main_cmds); +} + +void list_commands(const char *title, struct cmdnames *main_cmds, + struct cmdnames *other_cmds) +{ + int i, longest = 0; + + for (i = 0; i < main_cmds->cnt; i++) + if (longest < main_cmds->names[i]->len) + longest = main_cmds->names[i]->len; + for (i = 0; i < other_cmds->cnt; i++) + if (longest < other_cmds->names[i]->len) + longest = other_cmds->names[i]->len; + + if (main_cmds->cnt) { + const char *exec_path = perf_exec_path(); + printf("available %s in '%s'\n", title, exec_path); + printf("----------------"); + mput_char('-', strlen(title) + strlen(exec_path)); + putchar('\n'); + pretty_print_string_list(main_cmds, longest); + putchar('\n'); + } + + if (other_cmds->cnt) { + printf("%s available from elsewhere on your $PATH\n", title); + printf("---------------------------------------"); + mput_char('-', strlen(title)); + putchar('\n'); + pretty_print_string_list(other_cmds, longest); + putchar('\n'); + } +} + +int is_in_cmdlist(struct cmdnames *c, const char *s) +{ + int i; + for (i = 0; i < c->cnt; i++) + if (!strcmp(s, c->names[i]->name)) + return 1; + return 0; +} + +static int autocorrect; +static struct cmdnames aliases; + +static int perf_unknown_cmd_config(const char *var, const char *value, void *cb) +{ + if (!strcmp(var, "help.autocorrect")) + autocorrect = perf_config_int(var,value); + /* Also use aliases for command lookup */ + if (!prefixcmp(var, "alias.")) + add_cmdname(&aliases, var + 6, strlen(var + 6)); + + return perf_default_config(var, value, cb); +} + +static int levenshtein_compare(const void *p1, const void *p2) +{ + const struct cmdname *const *c1 = p1, *const *c2 = p2; + const char *s1 = (*c1)->name, *s2 = (*c2)->name; + int l1 = (*c1)->len; + int l2 = (*c2)->len; + return l1 != l2 ? l1 - l2 : strcmp(s1, s2); +} + +static void add_cmd_list(struct cmdnames *cmds, struct cmdnames *old) +{ + int i; + ALLOC_GROW(cmds->names, cmds->cnt + old->cnt, cmds->alloc); + + for (i = 0; i < old->cnt; i++) + cmds->names[cmds->cnt++] = old->names[i]; + free(old->names); + old->cnt = 0; + old->names = NULL; +} + +const char *help_unknown_cmd(const char *cmd) +{ + int i, n, best_similarity = 0; + struct cmdnames main_cmds, other_cmds; + + memset(&main_cmds, 0, sizeof(main_cmds)); + memset(&other_cmds, 0, sizeof(main_cmds)); + memset(&aliases, 0, sizeof(aliases)); + + perf_config(perf_unknown_cmd_config, NULL); + + load_command_list("perf-", &main_cmds, &other_cmds); + + add_cmd_list(&main_cmds, &aliases); + add_cmd_list(&main_cmds, &other_cmds); + qsort(main_cmds.names, main_cmds.cnt, + sizeof(main_cmds.names), cmdname_compare); + uniq(&main_cmds); + + /* This reuses cmdname->len for similarity index */ + for (i = 0; i < main_cmds.cnt; ++i) + main_cmds.names[i]->len = + levenshtein(cmd, main_cmds.names[i]->name, 0, 2, 1, 4); + + qsort(main_cmds.names, main_cmds.cnt, + sizeof(*main_cmds.names), levenshtein_compare); + + if (!main_cmds.cnt) + die ("Uh oh. Your system reports no Git commands at all."); + + best_similarity = main_cmds.names[0]->len; + n = 1; + while (n < main_cmds.cnt && best_similarity == main_cmds.names[n]->len) + ++n; + if (autocorrect && n == 1) { + const char *assumed = main_cmds.names[0]->name; + main_cmds.names[0] = NULL; + clean_cmdnames(&main_cmds); + fprintf(stderr, "WARNING: You called a Git program named '%s', " + "which does not exist.\n" + "Continuing under the assumption that you meant '%s'\n", + cmd, assumed); + if (autocorrect > 0) { + fprintf(stderr, "in %0.1f seconds automatically...\n", + (float)autocorrect/10.0); + poll(NULL, 0, autocorrect * 100); + } + return assumed; + } + + fprintf(stderr, "perf: '%s' is not a perf-command. See 'perf --help'.\n", cmd); + + if (best_similarity < 6) { + fprintf(stderr, "\nDid you mean %s?\n", + n < 2 ? "this": "one of these"); + + for (i = 0; i < n; i++) + fprintf(stderr, "\t%s\n", main_cmds.names[i]->name); + } + + exit(1); +} + +int cmd_version(int argc, const char **argv, const char *prefix) +{ + printf("perf version %s\n", perf_version_string); + return 0; +} diff --git a/Documentation/perf_counter/help.h b/Documentation/perf_counter/help.h new file mode 100644 index 00000000000..56bc15406ff --- /dev/null +++ b/Documentation/perf_counter/help.h @@ -0,0 +1,29 @@ +#ifndef HELP_H +#define HELP_H + +struct cmdnames { + int alloc; + int cnt; + struct cmdname { + size_t len; /* also used for similarity index in help.c */ + char name[FLEX_ARRAY]; + } **names; +}; + +static inline void mput_char(char c, unsigned int num) +{ + while(num--) + putchar(c); +} + +void load_command_list(const char *prefix, + struct cmdnames *main_cmds, + struct cmdnames *other_cmds); +void add_cmdname(struct cmdnames *cmds, const char *name, int len); +/* Here we require that excludes is a sorted list. */ +void exclude_cmds(struct cmdnames *cmds, struct cmdnames *excludes); +int is_in_cmdlist(struct cmdnames *c, const char *s); +void list_commands(const char *title, struct cmdnames *main_cmds, + struct cmdnames *other_cmds); + +#endif /* HELP_H */ diff --git a/Documentation/perf_counter/levenshtein.c b/Documentation/perf_counter/levenshtein.c new file mode 100644 index 00000000000..e521d1516df --- /dev/null +++ b/Documentation/perf_counter/levenshtein.c @@ -0,0 +1,84 @@ +#include "cache.h" +#include "levenshtein.h" + +/* + * This function implements the Damerau-Levenshtein algorithm to + * calculate a distance between strings. + * + * Basically, it says how many letters need to be swapped, substituted, + * deleted from, or added to string1, at least, to get string2. + * + * The idea is to build a distance matrix for the substrings of both + * strings. To avoid a large space complexity, only the last three rows + * are kept in memory (if swaps had the same or higher cost as one deletion + * plus one insertion, only two rows would be needed). + * + * At any stage, "i + 1" denotes the length of the current substring of + * string1 that the distance is calculated for. + * + * row2 holds the current row, row1 the previous row (i.e. for the substring + * of string1 of length "i"), and row0 the row before that. + * + * In other words, at the start of the big loop, row2[j + 1] contains the + * Damerau-Levenshtein distance between the substring of string1 of length + * "i" and the substring of string2 of length "j + 1". + * + * All the big loop does is determine the partial minimum-cost paths. + * + * It does so by calculating the costs of the path ending in characters + * i (in string1) and j (in string2), respectively, given that the last + * operation is a substition, a swap, a deletion, or an insertion. + * + * This implementation allows the costs to be weighted: + * + * - w (as in "sWap") + * - s (as in "Substitution") + * - a (for insertion, AKA "Add") + * - d (as in "Deletion") + * + * Note that this algorithm calculates a distance _iff_ d == a. + */ +int levenshtein(const char *string1, const char *string2, + int w, int s, int a, int d) +{ + int len1 = strlen(string1), len2 = strlen(string2); + int *row0 = malloc(sizeof(int) * (len2 + 1)); + int *row1 = malloc(sizeof(int) * (len2 + 1)); + int *row2 = malloc(sizeof(int) * (len2 + 1)); + int i, j; + + for (j = 0; j <= len2; j++) + row1[j] = j * a; + for (i = 0; i < len1; i++) { + int *dummy; + + row2[0] = (i + 1) * d; + for (j = 0; j < len2; j++) { + /* substitution */ + row2[j + 1] = row1[j] + s * (string1[i] != string2[j]); + /* swap */ + if (i > 0 && j > 0 && string1[i - 1] == string2[j] && + string1[i] == string2[j - 1] && + row2[j + 1] > row0[j - 1] + w) + row2[j + 1] = row0[j - 1] + w; + /* deletion */ + if (row2[j + 1] > row1[j + 1] + d) + row2[j + 1] = row1[j + 1] + d; + /* insertion */ + if (row2[j + 1] > row2[j] + a) + row2[j + 1] = row2[j] + a; + } + + dummy = row0; + row0 = row1; + row1 = row2; + row2 = dummy; + } + + i = row1[len2]; + free(row0); + free(row1); + free(row2); + + return i; +} diff --git a/Documentation/perf_counter/levenshtein.h b/Documentation/perf_counter/levenshtein.h new file mode 100644 index 00000000000..0173abeef52 --- /dev/null +++ b/Documentation/perf_counter/levenshtein.h @@ -0,0 +1,8 @@ +#ifndef LEVENSHTEIN_H +#define LEVENSHTEIN_H + +int levenshtein(const char *string1, const char *string2, + int swap_penalty, int substition_penalty, + int insertion_penalty, int deletion_penalty); + +#endif diff --git a/Documentation/perf_counter/parse-options.c b/Documentation/perf_counter/parse-options.c new file mode 100644 index 00000000000..7464f34e540 --- /dev/null +++ b/Documentation/perf_counter/parse-options.c @@ -0,0 +1,495 @@ +#include "util.h" +#include "parse-options.h" +#include "cache.h" + +#define OPT_SHORT 1 +#define OPT_UNSET 2 + +static int opterror(const struct option *opt, const char *reason, int flags) +{ + if (flags & OPT_SHORT) + return error("switch `%c' %s", opt->short_name, reason); + if (flags & OPT_UNSET) + return error("option `no-%s' %s", opt->long_name, reason); + return error("option `%s' %s", opt->long_name, reason); +} + +static int get_arg(struct parse_opt_ctx_t *p, const struct option *opt, + int flags, const char **arg) +{ + if (p->opt) { + *arg = p->opt; + p->opt = NULL; + } else if (p->argc == 1 && (opt->flags & PARSE_OPT_LASTARG_DEFAULT)) { + *arg = (const char *)opt->defval; + } else if (p->argc > 1) { + p->argc--; + *arg = *++p->argv; + } else + return opterror(opt, "requires a value", flags); + return 0; +} + +static int get_value(struct parse_opt_ctx_t *p, + const struct option *opt, int flags) +{ + const char *s, *arg; + const int unset = flags & OPT_UNSET; + + if (unset && p->opt) + return opterror(opt, "takes no value", flags); + if (unset && (opt->flags & PARSE_OPT_NONEG)) + return opterror(opt, "isn't available", flags); + + if (!(flags & OPT_SHORT) && p->opt) { + switch (opt->type) { + case OPTION_CALLBACK: + if (!(opt->flags & PARSE_OPT_NOARG)) + break; + /* FALLTHROUGH */ + case OPTION_BOOLEAN: + case OPTION_BIT: + case OPTION_SET_INT: + case OPTION_SET_PTR: + return opterror(opt, "takes no value", flags); + default: + break; + } + } + + switch (opt->type) { + case OPTION_BIT: + if (unset) + *(int *)opt->value &= ~opt->defval; + else + *(int *)opt->value |= opt->defval; + return 0; + + case OPTION_BOOLEAN: + *(int *)opt->value = unset ? 0 : *(int *)opt->value + 1; + return 0; + + case OPTION_SET_INT: + *(int *)opt->value = unset ? 0 : opt->defval; + return 0; + + case OPTION_SET_PTR: + *(void **)opt->value = unset ? NULL : (void *)opt->defval; + return 0; + + case OPTION_STRING: + if (unset) + *(const char **)opt->value = NULL; + else if (opt->flags & PARSE_OPT_OPTARG && !p->opt) + *(const char **)opt->value = (const char *)opt->defval; + else + return get_arg(p, opt, flags, (const char **)opt->value); + return 0; + + case OPTION_CALLBACK: + if (unset) + return (*opt->callback)(opt, NULL, 1) ? (-1) : 0; + if (opt->flags & PARSE_OPT_NOARG) + return (*opt->callback)(opt, NULL, 0) ? (-1) : 0; + if (opt->flags & PARSE_OPT_OPTARG && !p->opt) + return (*opt->callback)(opt, NULL, 0) ? (-1) : 0; + if (get_arg(p, opt, flags, &arg)) + return -1; + return (*opt->callback)(opt, arg, 0) ? (-1) : 0; + + case OPTION_INTEGER: + if (unset) { + *(int *)opt->value = 0; + return 0; + } + if (opt->flags & PARSE_OPT_OPTARG && !p->opt) { + *(int *)opt->value = opt->defval; + return 0; + } + if (get_arg(p, opt, flags, &arg)) + return -1; + *(int *)opt->value = strtol(arg, (char **)&s, 10); + if (*s) + return opterror(opt, "expects a numerical value", flags); + return 0; + + default: + die("should not happen, someone must be hit on the forehead"); + } +} + +static int parse_short_opt(struct parse_opt_ctx_t *p, const struct option *options) +{ + for (; options->type != OPTION_END; options++) { + if (options->short_name == *p->opt) { + p->opt = p->opt[1] ? p->opt + 1 : NULL; + return get_value(p, options, OPT_SHORT); + } + } + return -2; +} + +static int parse_long_opt(struct parse_opt_ctx_t *p, const char *arg, + const struct option *options) +{ + const char *arg_end = strchr(arg, '='); + const struct option *abbrev_option = NULL, *ambiguous_option = NULL; + int abbrev_flags = 0, ambiguous_flags = 0; + + if (!arg_end) + arg_end = arg + strlen(arg); + + for (; options->type != OPTION_END; options++) { + const char *rest; + int flags = 0; + + if (!options->long_name) + continue; + + rest = skip_prefix(arg, options->long_name); + if (options->type == OPTION_ARGUMENT) { + if (!rest) + continue; + if (*rest == '=') + return opterror(options, "takes no value", flags); + if (*rest) + continue; + p->out[p->cpidx++] = arg - 2; + return 0; + } + if (!rest) { + /* abbreviated? */ + if (!strncmp(options->long_name, arg, arg_end - arg)) { +is_abbreviated: + if (abbrev_option) { + /* + * If this is abbreviated, it is + * ambiguous. So when there is no + * exact match later, we need to + * error out. + */ + ambiguous_option = abbrev_option; + ambiguous_flags = abbrev_flags; + } + if (!(flags & OPT_UNSET) && *arg_end) + p->opt = arg_end + 1; + abbrev_option = options; + abbrev_flags = flags; + continue; + } + /* negated and abbreviated very much? */ + if (!prefixcmp("no-", arg)) { + flags |= OPT_UNSET; + goto is_abbreviated; + } + /* negated? */ + if (strncmp(arg, "no-", 3)) + continue; + flags |= OPT_UNSET; + rest = skip_prefix(arg + 3, options->long_name); + /* abbreviated and negated? */ + if (!rest && !prefixcmp(options->long_name, arg + 3)) + goto is_abbreviated; + if (!rest) + continue; + } + if (*rest) { + if (*rest != '=') + continue; + p->opt = rest + 1; + } + return get_value(p, options, flags); + } + + if (ambiguous_option) + return error("Ambiguous option: %s " + "(could be --%s%s or --%s%s)", + arg, + (ambiguous_flags & OPT_UNSET) ? "no-" : "", + ambiguous_option->long_name, + (abbrev_flags & OPT_UNSET) ? "no-" : "", + abbrev_option->long_name); + if (abbrev_option) + return get_value(p, abbrev_option, abbrev_flags); + return -2; +} + +static void check_typos(const char *arg, const struct option *options) +{ + if (strlen(arg) < 3) + return; + + if (!prefixcmp(arg, "no-")) { + error ("did you mean `--%s` (with two dashes ?)", arg); + exit(129); + } + + for (; options->type != OPTION_END; options++) { + if (!options->long_name) + continue; + if (!prefixcmp(options->long_name, arg)) { + error ("did you mean `--%s` (with two dashes ?)", arg); + exit(129); + } + } +} + +void parse_options_start(struct parse_opt_ctx_t *ctx, + int argc, const char **argv, int flags) +{ + memset(ctx, 0, sizeof(*ctx)); + ctx->argc = argc - 1; + ctx->argv = argv + 1; + ctx->out = argv; + ctx->cpidx = ((flags & PARSE_OPT_KEEP_ARGV0) != 0); + ctx->flags = flags; + if ((flags & PARSE_OPT_KEEP_UNKNOWN) && + (flags & PARSE_OPT_STOP_AT_NON_OPTION)) + die("STOP_AT_NON_OPTION and KEEP_UNKNOWN don't go together"); +} + +static int usage_with_options_internal(const char * const *, + const struct option *, int); + +int parse_options_step(struct parse_opt_ctx_t *ctx, + const struct option *options, + const char * const usagestr[]) +{ + int internal_help = !(ctx->flags & PARSE_OPT_NO_INTERNAL_HELP); + + /* we must reset ->opt, unknown short option leave it dangling */ + ctx->opt = NULL; + + for (; ctx->argc; ctx->argc--, ctx->argv++) { + const char *arg = ctx->argv[0]; + + if (*arg != '-' || !arg[1]) { + if (ctx->flags & PARSE_OPT_STOP_AT_NON_OPTION) + break; + ctx->out[ctx->cpidx++] = ctx->argv[0]; + continue; + } + + if (arg[1] != '-') { + ctx->opt = arg + 1; + if (internal_help && *ctx->opt == 'h') + return parse_options_usage(usagestr, options); + switch (parse_short_opt(ctx, options)) { + case -1: + return parse_options_usage(usagestr, options); + case -2: + goto unknown; + } + if (ctx->opt) + check_typos(arg + 1, options); + while (ctx->opt) { + if (internal_help && *ctx->opt == 'h') + return parse_options_usage(usagestr, options); + switch (parse_short_opt(ctx, options)) { + case -1: + return parse_options_usage(usagestr, options); + case -2: + /* fake a short option thing to hide the fact that we may have + * started to parse aggregated stuff + * + * This is leaky, too bad. + */ + ctx->argv[0] = strdup(ctx->opt - 1); + *(char *)ctx->argv[0] = '-'; + goto unknown; + } + } + continue; + } + + if (!arg[2]) { /* "--" */ + if (!(ctx->flags & PARSE_OPT_KEEP_DASHDASH)) { + ctx->argc--; + ctx->argv++; + } + break; + } + + if (internal_help && !strcmp(arg + 2, "help-all")) + return usage_with_options_internal(usagestr, options, 1); + if (internal_help && !strcmp(arg + 2, "help")) + return parse_options_usage(usagestr, options); + switch (parse_long_opt(ctx, arg + 2, options)) { + case -1: + return parse_options_usage(usagestr, options); + case -2: + goto unknown; + } + continue; +unknown: + if (!(ctx->flags & PARSE_OPT_KEEP_UNKNOWN)) + return PARSE_OPT_UNKNOWN; + ctx->out[ctx->cpidx++] = ctx->argv[0]; + ctx->opt = NULL; + } + return PARSE_OPT_DONE; +} + +int parse_options_end(struct parse_opt_ctx_t *ctx) +{ + memmove(ctx->out + ctx->cpidx, ctx->argv, ctx->argc * sizeof(*ctx->out)); + ctx->out[ctx->cpidx + ctx->argc] = NULL; + return ctx->cpidx + ctx->argc; +} + +int parse_options(int argc, const char **argv, const struct option *options, + const char * const usagestr[], int flags) +{ + struct parse_opt_ctx_t ctx; + + parse_options_start(&ctx, argc, argv, flags); + switch (parse_options_step(&ctx, options, usagestr)) { + case PARSE_OPT_HELP: + exit(129); + case PARSE_OPT_DONE: + break; + default: /* PARSE_OPT_UNKNOWN */ + if (ctx.argv[0][1] == '-') { + error("unknown option `%s'", ctx.argv[0] + 2); + } else { + error("unknown switch `%c'", *ctx.opt); + } + usage_with_options(usagestr, options); + } + + return parse_options_end(&ctx); +} + +#define USAGE_OPTS_WIDTH 24 +#define USAGE_GAP 2 + +int usage_with_options_internal(const char * const *usagestr, + const struct option *opts, int full) +{ + if (!usagestr) + return PARSE_OPT_HELP; + + fprintf(stderr, "usage: %s\n", *usagestr++); + while (*usagestr && **usagestr) + fprintf(stderr, " or: %s\n", *usagestr++); + while (*usagestr) { + fprintf(stderr, "%s%s\n", + **usagestr ? " " : "", + *usagestr); + usagestr++; + } + + if (opts->type != OPTION_GROUP) + fputc('\n', stderr); + + for (; opts->type != OPTION_END; opts++) { + size_t pos; + int pad; + + if (opts->type == OPTION_GROUP) { + fputc('\n', stderr); + if (*opts->help) + fprintf(stderr, "%s\n", opts->help); + continue; + } + if (!full && (opts->flags & PARSE_OPT_HIDDEN)) + continue; + + pos = fprintf(stderr, " "); + if (opts->short_name) + pos += fprintf(stderr, "-%c", opts->short_name); + if (opts->long_name && opts->short_name) + pos += fprintf(stderr, ", "); + if (opts->long_name) + pos += fprintf(stderr, "--%s", opts->long_name); + + switch (opts->type) { + case OPTION_ARGUMENT: + break; + case OPTION_INTEGER: + if (opts->flags & PARSE_OPT_OPTARG) + if (opts->long_name) + pos += fprintf(stderr, "[=]"); + else + pos += fprintf(stderr, "[]"); + else + pos += fprintf(stderr, " "); + break; + case OPTION_CALLBACK: + if (opts->flags & PARSE_OPT_NOARG) + break; + /* FALLTHROUGH */ + case OPTION_STRING: + if (opts->argh) { + if (opts->flags & PARSE_OPT_OPTARG) + if (opts->long_name) + pos += fprintf(stderr, "[=<%s>]", opts->argh); + else + pos += fprintf(stderr, "[<%s>]", opts->argh); + else + pos += fprintf(stderr, " <%s>", opts->argh); + } else { + if (opts->flags & PARSE_OPT_OPTARG) + if (opts->long_name) + pos += fprintf(stderr, "[=...]"); + else + pos += fprintf(stderr, "[...]"); + else + pos += fprintf(stderr, " ..."); + } + break; + default: /* OPTION_{BIT,BOOLEAN,SET_INT,SET_PTR} */ + break; + } + + if (pos <= USAGE_OPTS_WIDTH) + pad = USAGE_OPTS_WIDTH - pos; + else { + fputc('\n', stderr); + pad = USAGE_OPTS_WIDTH; + } + fprintf(stderr, "%*s%s\n", pad + USAGE_GAP, "", opts->help); + } + fputc('\n', stderr); + + return PARSE_OPT_HELP; +} + +void usage_with_options(const char * const *usagestr, + const struct option *opts) +{ + usage_with_options_internal(usagestr, opts, 0); + exit(129); +} + +int parse_options_usage(const char * const *usagestr, + const struct option *opts) +{ + return usage_with_options_internal(usagestr, opts, 0); +} + + +/*----- some often used options -----*/ +#include "cache.h" + +int parse_opt_verbosity_cb(const struct option *opt, const char *arg, + int unset) +{ + int *target = opt->value; + + if (unset) + /* --no-quiet, --no-verbose */ + *target = 0; + else if (opt->short_name == 'v') { + if (*target >= 0) + (*target)++; + else + *target = 1; + } else { + if (*target <= 0) + (*target)--; + else + *target = -1; + } + return 0; +} diff --git a/Documentation/perf_counter/parse-options.h b/Documentation/perf_counter/parse-options.h new file mode 100644 index 00000000000..a81c7faff68 --- /dev/null +++ b/Documentation/perf_counter/parse-options.h @@ -0,0 +1,172 @@ +#ifndef PARSE_OPTIONS_H +#define PARSE_OPTIONS_H + +enum parse_opt_type { + /* special types */ + OPTION_END, + OPTION_ARGUMENT, + OPTION_GROUP, + /* options with no arguments */ + OPTION_BIT, + OPTION_BOOLEAN, /* _INCR would have been a better name */ + OPTION_SET_INT, + OPTION_SET_PTR, + /* options with arguments (usually) */ + OPTION_STRING, + OPTION_INTEGER, + OPTION_CALLBACK, +}; + +enum parse_opt_flags { + PARSE_OPT_KEEP_DASHDASH = 1, + PARSE_OPT_STOP_AT_NON_OPTION = 2, + PARSE_OPT_KEEP_ARGV0 = 4, + PARSE_OPT_KEEP_UNKNOWN = 8, + PARSE_OPT_NO_INTERNAL_HELP = 16, +}; + +enum parse_opt_option_flags { + PARSE_OPT_OPTARG = 1, + PARSE_OPT_NOARG = 2, + PARSE_OPT_NONEG = 4, + PARSE_OPT_HIDDEN = 8, + PARSE_OPT_LASTARG_DEFAULT = 16, +}; + +struct option; +typedef int parse_opt_cb(const struct option *, const char *arg, int unset); + +/* + * `type`:: + * holds the type of the option, you must have an OPTION_END last in your + * array. + * + * `short_name`:: + * the character to use as a short option name, '\0' if none. + * + * `long_name`:: + * the long option name, without the leading dashes, NULL if none. + * + * `value`:: + * stores pointers to the values to be filled. + * + * `argh`:: + * token to explain the kind of argument this option wants. Keep it + * homogenous across the repository. + * + * `help`:: + * the short help associated to what the option does. + * Must never be NULL (except for OPTION_END). + * OPTION_GROUP uses this pointer to store the group header. + * + * `flags`:: + * mask of parse_opt_option_flags. + * PARSE_OPT_OPTARG: says that the argument is optionnal (not for BOOLEANs) + * PARSE_OPT_NOARG: says that this option takes no argument, for CALLBACKs + * PARSE_OPT_NONEG: says that this option cannot be negated + * PARSE_OPT_HIDDEN this option is skipped in the default usage, showed in + * the long one. + * + * `callback`:: + * pointer to the callback to use for OPTION_CALLBACK. + * + * `defval`:: + * default value to fill (*->value) with for PARSE_OPT_OPTARG. + * OPTION_{BIT,SET_INT,SET_PTR} store the {mask,integer,pointer} to put in + * the value when met. + * CALLBACKS can use it like they want. + */ +struct option { + enum parse_opt_type type; + int short_name; + const char *long_name; + void *value; + const char *argh; + const char *help; + + int flags; + parse_opt_cb *callback; + intptr_t defval; +}; + +#define OPT_END() { OPTION_END } +#define OPT_ARGUMENT(l, h) { OPTION_ARGUMENT, 0, (l), NULL, NULL, (h) } +#define OPT_GROUP(h) { OPTION_GROUP, 0, NULL, NULL, NULL, (h) } +#define OPT_BIT(s, l, v, h, b) { OPTION_BIT, (s), (l), (v), NULL, (h), 0, NULL, (b) } +#define OPT_BOOLEAN(s, l, v, h) { OPTION_BOOLEAN, (s), (l), (v), NULL, (h) } +#define OPT_SET_INT(s, l, v, h, i) { OPTION_SET_INT, (s), (l), (v), NULL, (h), 0, NULL, (i) } +#define OPT_SET_PTR(s, l, v, h, p) { OPTION_SET_PTR, (s), (l), (v), NULL, (h), 0, NULL, (p) } +#define OPT_INTEGER(s, l, v, h) { OPTION_INTEGER, (s), (l), (v), NULL, (h) } +#define OPT_STRING(s, l, v, a, h) { OPTION_STRING, (s), (l), (v), (a), (h) } +#define OPT_DATE(s, l, v, h) \ + { OPTION_CALLBACK, (s), (l), (v), "time",(h), 0, \ + parse_opt_approxidate_cb } +#define OPT_CALLBACK(s, l, v, a, h, f) \ + { OPTION_CALLBACK, (s), (l), (v), (a), (h), 0, (f) } + +/* parse_options() will filter out the processed options and leave the + * non-option argments in argv[]. + * Returns the number of arguments left in argv[]. + */ +extern int parse_options(int argc, const char **argv, + const struct option *options, + const char * const usagestr[], int flags); + +extern NORETURN void usage_with_options(const char * const *usagestr, + const struct option *options); + +/*----- incremantal advanced APIs -----*/ + +enum { + PARSE_OPT_HELP = -1, + PARSE_OPT_DONE, + PARSE_OPT_UNKNOWN, +}; + +/* + * It's okay for the caller to consume argv/argc in the usual way. + * Other fields of that structure are private to parse-options and should not + * be modified in any way. + */ +struct parse_opt_ctx_t { + const char **argv; + const char **out; + int argc, cpidx; + const char *opt; + int flags; +}; + +extern int parse_options_usage(const char * const *usagestr, + const struct option *opts); + +extern void parse_options_start(struct parse_opt_ctx_t *ctx, + int argc, const char **argv, int flags); + +extern int parse_options_step(struct parse_opt_ctx_t *ctx, + const struct option *options, + const char * const usagestr[]); + +extern int parse_options_end(struct parse_opt_ctx_t *ctx); + + +/*----- some often used options -----*/ +extern int parse_opt_abbrev_cb(const struct option *, const char *, int); +extern int parse_opt_approxidate_cb(const struct option *, const char *, int); +extern int parse_opt_verbosity_cb(const struct option *, const char *, int); + +#define OPT__VERBOSE(var) OPT_BOOLEAN('v', "verbose", (var), "be verbose") +#define OPT__QUIET(var) OPT_BOOLEAN('q', "quiet", (var), "be quiet") +#define OPT__VERBOSITY(var) \ + { OPTION_CALLBACK, 'v', "verbose", (var), NULL, "be more verbose", \ + PARSE_OPT_NOARG, &parse_opt_verbosity_cb, 0 }, \ + { OPTION_CALLBACK, 'q', "quiet", (var), NULL, "be more quiet", \ + PARSE_OPT_NOARG, &parse_opt_verbosity_cb, 0 } +#define OPT__DRY_RUN(var) OPT_BOOLEAN('n', "dry-run", (var), "dry run") +#define OPT__ABBREV(var) \ + { OPTION_CALLBACK, 0, "abbrev", (var), "n", \ + "use digits to display SHA-1s", \ + PARSE_OPT_OPTARG, &parse_opt_abbrev_cb, 0 } + +extern const char *parse_options_fix_filename(const char *prefix, const char *file); + +#endif diff --git a/Documentation/perf_counter/path.c b/Documentation/perf_counter/path.c new file mode 100644 index 00000000000..891b612ec1a --- /dev/null +++ b/Documentation/perf_counter/path.c @@ -0,0 +1,392 @@ +/* + * I'm tired of doing "vsnprintf()" etc just to open a + * file, so here's a "return static buffer with printf" + * interface for paths. + * + * It's obviously not thread-safe. Sue me. But it's quite + * useful for doing things like + * + * f = open(mkpath("%s/%s.perf", base, name), O_RDONLY); + * + * which is what it's designed for. + */ +#include "cache.h" + +static char bad_path[] = "/bad-path/"; +/* + * Two hacks: + */ + +static char *get_perf_dir(void) +{ + return "."; +} + +size_t strlcpy(char *dest, const char *src, size_t size) +{ + size_t ret = strlen(src); + + if (size) { + size_t len = (ret >= size) ? size - 1 : ret; + memcpy(dest, src, len); + dest[len] = '\0'; + } + return ret; +} + + +static char *get_pathname(void) +{ + static char pathname_array[4][PATH_MAX]; + static int index; + return pathname_array[3 & ++index]; +} + +static char *cleanup_path(char *path) +{ + /* Clean it up */ + if (!memcmp(path, "./", 2)) { + path += 2; + while (*path == '/') + path++; + } + return path; +} + +char *mksnpath(char *buf, size_t n, const char *fmt, ...) +{ + va_list args; + unsigned len; + + va_start(args, fmt); + len = vsnprintf(buf, n, fmt, args); + va_end(args); + if (len >= n) { + strlcpy(buf, bad_path, n); + return buf; + } + return cleanup_path(buf); +} + +static char *perf_vsnpath(char *buf, size_t n, const char *fmt, va_list args) +{ + const char *perf_dir = get_perf_dir(); + size_t len; + + len = strlen(perf_dir); + if (n < len + 1) + goto bad; + memcpy(buf, perf_dir, len); + if (len && !is_dir_sep(perf_dir[len-1])) + buf[len++] = '/'; + len += vsnprintf(buf + len, n - len, fmt, args); + if (len >= n) + goto bad; + return cleanup_path(buf); +bad: + strlcpy(buf, bad_path, n); + return buf; +} + +char *perf_snpath(char *buf, size_t n, const char *fmt, ...) +{ + va_list args; + va_start(args, fmt); + (void)perf_vsnpath(buf, n, fmt, args); + va_end(args); + return buf; +} + +char *perf_pathdup(const char *fmt, ...) +{ + char path[PATH_MAX]; + va_list args; + va_start(args, fmt); + (void)perf_vsnpath(path, sizeof(path), fmt, args); + va_end(args); + return xstrdup(path); +} + +char *mkpath(const char *fmt, ...) +{ + va_list args; + unsigned len; + char *pathname = get_pathname(); + + va_start(args, fmt); + len = vsnprintf(pathname, PATH_MAX, fmt, args); + va_end(args); + if (len >= PATH_MAX) + return bad_path; + return cleanup_path(pathname); +} + +char *perf_path(const char *fmt, ...) +{ + const char *perf_dir = get_perf_dir(); + char *pathname = get_pathname(); + va_list args; + unsigned len; + + len = strlen(perf_dir); + if (len > PATH_MAX-100) + return bad_path; + memcpy(pathname, perf_dir, len); + if (len && perf_dir[len-1] != '/') + pathname[len++] = '/'; + va_start(args, fmt); + len += vsnprintf(pathname + len, PATH_MAX - len, fmt, args); + va_end(args); + if (len >= PATH_MAX) + return bad_path; + return cleanup_path(pathname); +} + + +/* perf_mkstemp() - create tmp file honoring TMPDIR variable */ +int perf_mkstemp(char *path, size_t len, const char *template) +{ + const char *tmp; + size_t n; + + tmp = getenv("TMPDIR"); + if (!tmp) + tmp = "/tmp"; + n = snprintf(path, len, "%s/%s", tmp, template); + if (len <= n) { + errno = ENAMETOOLONG; + return -1; + } + return mkstemp(path); +} + + +static char *user_path(char *buf, char *path, int sz) +{ + struct passwd *pw; + char *slash; + int len, baselen; + + if (!path || path[0] != '~') + return NULL; + path++; + slash = strchr(path, '/'); + if (path[0] == '/' || !path[0]) { + pw = getpwuid(getuid()); + } + else { + if (slash) { + *slash = 0; + pw = getpwnam(path); + *slash = '/'; + } + else + pw = getpwnam(path); + } + if (!pw || !pw->pw_dir || sz <= strlen(pw->pw_dir)) + return NULL; + baselen = strlen(pw->pw_dir); + memcpy(buf, pw->pw_dir, baselen); + while ((1 < baselen) && (buf[baselen-1] == '/')) { + buf[baselen-1] = 0; + baselen--; + } + if (slash && slash[1]) { + len = strlen(slash); + if (sz <= baselen + len) + return NULL; + memcpy(buf + baselen, slash, len + 1); + } + return buf; +} + +const char *make_relative_path(const char *abs, const char *base) +{ + static char buf[PATH_MAX + 1]; + int baselen; + if (!base) + return abs; + baselen = strlen(base); + if (prefixcmp(abs, base)) + return abs; + if (abs[baselen] == '/') + baselen++; + else if (base[baselen - 1] != '/') + return abs; + strcpy(buf, abs + baselen); + return buf; +} + +/* + * It is okay if dst == src, but they should not overlap otherwise. + * + * Performs the following normalizations on src, storing the result in dst: + * - Ensures that components are separated by '/' (Windows only) + * - Squashes sequences of '/'. + * - Removes "." components. + * - Removes ".." components, and the components the precede them. + * Returns failure (non-zero) if a ".." component appears as first path + * component anytime during the normalization. Otherwise, returns success (0). + * + * Note that this function is purely textual. It does not follow symlinks, + * verify the existence of the path, or make any system calls. + */ +int normalize_path_copy(char *dst, const char *src) +{ + char *dst0; + + if (has_dos_drive_prefix(src)) { + *dst++ = *src++; + *dst++ = *src++; + } + dst0 = dst; + + if (is_dir_sep(*src)) { + *dst++ = '/'; + while (is_dir_sep(*src)) + src++; + } + + for (;;) { + char c = *src; + + /* + * A path component that begins with . could be + * special: + * (1) "." and ends -- ignore and terminate. + * (2) "./" -- ignore them, eat slash and continue. + * (3) ".." and ends -- strip one and terminate. + * (4) "../" -- strip one, eat slash and continue. + */ + if (c == '.') { + if (!src[1]) { + /* (1) */ + src++; + } else if (is_dir_sep(src[1])) { + /* (2) */ + src += 2; + while (is_dir_sep(*src)) + src++; + continue; + } else if (src[1] == '.') { + if (!src[2]) { + /* (3) */ + src += 2; + goto up_one; + } else if (is_dir_sep(src[2])) { + /* (4) */ + src += 3; + while (is_dir_sep(*src)) + src++; + goto up_one; + } + } + } + + /* copy up to the next '/', and eat all '/' */ + while ((c = *src++) != '\0' && !is_dir_sep(c)) + *dst++ = c; + if (is_dir_sep(c)) { + *dst++ = '/'; + while (is_dir_sep(c)) + c = *src++; + src--; + } else if (!c) + break; + continue; + + up_one: + /* + * dst0..dst is prefix portion, and dst[-1] is '/'; + * go up one level. + */ + dst--; /* go to trailing '/' */ + if (dst <= dst0) + return -1; + /* Windows: dst[-1] cannot be backslash anymore */ + while (dst0 < dst && dst[-1] != '/') + dst--; + } + *dst = '\0'; + return 0; +} + +/* + * path = Canonical absolute path + * prefix_list = Colon-separated list of absolute paths + * + * Determines, for each path in prefix_list, whether the "prefix" really + * is an ancestor directory of path. Returns the length of the longest + * ancestor directory, excluding any trailing slashes, or -1 if no prefix + * is an ancestor. (Note that this means 0 is returned if prefix_list is + * "/".) "/foo" is not considered an ancestor of "/foobar". Directories + * are not considered to be their own ancestors. path must be in a + * canonical form: empty components, or "." or ".." components are not + * allowed. prefix_list may be null, which is like "". + */ +int longest_ancestor_length(const char *path, const char *prefix_list) +{ + char buf[PATH_MAX+1]; + const char *ceil, *colon; + int len, max_len = -1; + + if (prefix_list == NULL || !strcmp(path, "/")) + return -1; + + for (colon = ceil = prefix_list; *colon; ceil = colon+1) { + for (colon = ceil; *colon && *colon != PATH_SEP; colon++); + len = colon - ceil; + if (len == 0 || len > PATH_MAX || !is_absolute_path(ceil)) + continue; + strlcpy(buf, ceil, len+1); + if (normalize_path_copy(buf, buf) < 0) + continue; + len = strlen(buf); + if (len > 0 && buf[len-1] == '/') + buf[--len] = '\0'; + + if (!strncmp(path, buf, len) && + path[len] == '/' && + len > max_len) { + max_len = len; + } + } + + return max_len; +} + +/* strip arbitrary amount of directory separators at end of path */ +static inline int chomp_trailing_dir_sep(const char *path, int len) +{ + while (len && is_dir_sep(path[len - 1])) + len--; + return len; +} + +/* + * If path ends with suffix (complete path components), returns the + * part before suffix (sans trailing directory separators). + * Otherwise returns NULL. + */ +char *strip_path_suffix(const char *path, const char *suffix) +{ + int path_len = strlen(path), suffix_len = strlen(suffix); + + while (suffix_len) { + if (!path_len) + return NULL; + + if (is_dir_sep(path[path_len - 1])) { + if (!is_dir_sep(suffix[suffix_len - 1])) + return NULL; + path_len = chomp_trailing_dir_sep(path, path_len); + suffix_len = chomp_trailing_dir_sep(suffix, suffix_len); + } + else if (path[--path_len] != suffix[--suffix_len]) + return NULL; + } + + if (path_len && !is_dir_sep(path[path_len - 1])) + return NULL; + return xstrndup(path, chomp_trailing_dir_sep(path, path_len)); +} diff --git a/Documentation/perf_counter/perf.c b/Documentation/perf_counter/perf.c new file mode 100644 index 00000000000..9256f6a1644 --- /dev/null +++ b/Documentation/perf_counter/perf.c @@ -0,0 +1,411 @@ +#include "builtin.h" +#include "exec_cmd.h" +#include "cache.h" +//#include "quote.h" +#include "run-command.h" + +const char perf_usage_string[] = + "perf [--version] [--exec-path[=PERF_EXEC_PATH]] [--html-path] [-p|--paginate|--no-pager] [--bare] [--perf-dir=PERF_DIR] [--work-tree=PERF_WORK_TREE] [--help] COMMAND [ARGS]"; + +const char perf_more_info_string[] = + "See 'perf help COMMAND' for more information on a specific command."; + +static int use_pager = -1; +struct pager_config { + const char *cmd; + int val; +}; + +static int pager_command_config(const char *var, const char *value, void *data) +{ + struct pager_config *c = data; + if (!prefixcmp(var, "pager.") && !strcmp(var + 6, c->cmd)) + c->val = perf_config_bool(var, value); + return 0; +} + +/* returns 0 for "no pager", 1 for "use pager", and -1 for "not specified" */ +int check_pager_config(const char *cmd) +{ + struct pager_config c; + c.cmd = cmd; + c.val = -1; + perf_config(pager_command_config, &c); + return c.val; +} + +static void commit_pager_choice(void) { + switch (use_pager) { + case 0: + setenv("PERF_PAGER", "cat", 1); + break; + case 1: + /* setup_pager(); */ + break; + default: + break; + } +} + +static int handle_options(const char*** argv, int* argc, int* envchanged) +{ + int handled = 0; + + while (*argc > 0) { + const char *cmd = (*argv)[0]; + if (cmd[0] != '-') + break; + + /* + * For legacy reasons, the "version" and "help" + * commands can be written with "--" prepended + * to make them look like flags. + */ + if (!strcmp(cmd, "--help") || !strcmp(cmd, "--version")) + break; + + /* + * Check remaining flags. + */ + if (!prefixcmp(cmd, "--exec-path")) { + cmd += 11; + if (*cmd == '=') + perf_set_argv_exec_path(cmd + 1); + else { + puts(perf_exec_path()); + exit(0); + } + } else if (!strcmp(cmd, "--html-path")) { + puts(system_path(PERF_HTML_PATH)); + exit(0); + } else if (!strcmp(cmd, "-p") || !strcmp(cmd, "--paginate")) { + use_pager = 1; + } else if (!strcmp(cmd, "--no-pager")) { + use_pager = 0; + if (envchanged) + *envchanged = 1; + } else if (!strcmp(cmd, "--perf-dir")) { + if (*argc < 2) { + fprintf(stderr, "No directory given for --perf-dir.\n" ); + usage(perf_usage_string); + } + setenv(PERF_DIR_ENVIRONMENT, (*argv)[1], 1); + if (envchanged) + *envchanged = 1; + (*argv)++; + (*argc)--; + handled++; + } else if (!prefixcmp(cmd, "--perf-dir=")) { + setenv(PERF_DIR_ENVIRONMENT, cmd + 10, 1); + if (envchanged) + *envchanged = 1; + } else if (!strcmp(cmd, "--work-tree")) { + if (*argc < 2) { + fprintf(stderr, "No directory given for --work-tree.\n" ); + usage(perf_usage_string); + } + setenv(PERF_WORK_TREE_ENVIRONMENT, (*argv)[1], 1); + if (envchanged) + *envchanged = 1; + (*argv)++; + (*argc)--; + } else if (!prefixcmp(cmd, "--work-tree=")) { + setenv(PERF_WORK_TREE_ENVIRONMENT, cmd + 12, 1); + if (envchanged) + *envchanged = 1; + } else { + fprintf(stderr, "Unknown option: %s\n", cmd); + usage(perf_usage_string); + } + + (*argv)++; + (*argc)--; + handled++; + } + return handled; +} + +static int handle_alias(int *argcp, const char ***argv) +{ + int envchanged = 0, ret = 0, saved_errno = errno; + int count, option_count; + const char** new_argv; + const char *alias_command; + char *alias_string; + int unused_nonperf; + + alias_command = (*argv)[0]; + alias_string = alias_lookup(alias_command); + if (alias_string) { + if (alias_string[0] == '!') { + if (*argcp > 1) { + struct strbuf buf; + + strbuf_init(&buf, PATH_MAX); + strbuf_addstr(&buf, alias_string); + sq_quote_argv(&buf, (*argv) + 1, PATH_MAX); + free(alias_string); + alias_string = buf.buf; + } + ret = system(alias_string + 1); + if (ret >= 0 && WIFEXITED(ret) && + WEXITSTATUS(ret) != 127) + exit(WEXITSTATUS(ret)); + die("Failed to run '%s' when expanding alias '%s'", + alias_string + 1, alias_command); + } + count = split_cmdline(alias_string, &new_argv); + if (count < 0) + die("Bad alias.%s string", alias_command); + option_count = handle_options(&new_argv, &count, &envchanged); + if (envchanged) + die("alias '%s' changes environment variables\n" + "You can use '!perf' in the alias to do this.", + alias_command); + memmove(new_argv - option_count, new_argv, + count * sizeof(char *)); + new_argv -= option_count; + + if (count < 1) + die("empty alias for %s", alias_command); + + if (!strcmp(alias_command, new_argv[0])) + die("recursive alias: %s", alias_command); + + new_argv = realloc(new_argv, sizeof(char*) * + (count + *argcp + 1)); + /* insert after command name */ + memcpy(new_argv + count, *argv + 1, sizeof(char*) * *argcp); + new_argv[count+*argcp] = NULL; + + *argv = new_argv; + *argcp += count - 1; + + ret = 1; + } + + errno = saved_errno; + + return ret; +} + +const char perf_version_string[] = PERF_VERSION; + +#define RUN_SETUP (1<<0) +#define USE_PAGER (1<<1) +/* + * require working tree to be present -- anything uses this needs + * RUN_SETUP for reading from the configuration file. + */ +#define NEED_WORK_TREE (1<<2) + +struct cmd_struct { + const char *cmd; + int (*fn)(int, const char **, const char *); + int option; +}; + +static int run_builtin(struct cmd_struct *p, int argc, const char **argv) +{ + int status; + struct stat st; + const char *prefix; + + prefix = NULL; + if (p->option & RUN_SETUP) + prefix = NULL; /* setup_perf_directory(); */ + + if (use_pager == -1 && p->option & RUN_SETUP) + use_pager = check_pager_config(p->cmd); + if (use_pager == -1 && p->option & USE_PAGER) + use_pager = 1; + commit_pager_choice(); + + if (p->option & NEED_WORK_TREE) + /* setup_work_tree() */; + + status = p->fn(argc, argv, prefix); + if (status) + return status & 0xff; + + /* Somebody closed stdout? */ + if (fstat(fileno(stdout), &st)) + return 0; + /* Ignore write errors for pipes and sockets.. */ + if (S_ISFIFO(st.st_mode) || S_ISSOCK(st.st_mode)) + return 0; + + /* Check for ENOSPC and EIO errors.. */ + if (fflush(stdout)) + die("write failure on standard output: %s", strerror(errno)); + if (ferror(stdout)) + die("unknown write failure on standard output"); + if (fclose(stdout)) + die("close failed on standard output: %s", strerror(errno)); + return 0; +} + +static void handle_internal_command(int argc, const char **argv) +{ + const char *cmd = argv[0]; + static struct cmd_struct commands[] = { + { "top", cmd_top, 0 }, + }; + int i; + static const char ext[] = STRIP_EXTENSION; + + if (sizeof(ext) > 1) { + i = strlen(argv[0]) - strlen(ext); + if (i > 0 && !strcmp(argv[0] + i, ext)) { + char *argv0 = strdup(argv[0]); + argv[0] = cmd = argv0; + argv0[i] = '\0'; + } + } + + /* Turn "perf cmd --help" into "perf help cmd" */ + if (argc > 1 && !strcmp(argv[1], "--help")) { + argv[1] = argv[0]; + argv[0] = cmd = "help"; + } + + for (i = 0; i < ARRAY_SIZE(commands); i++) { + struct cmd_struct *p = commands+i; + if (strcmp(p->cmd, cmd)) + continue; + exit(run_builtin(p, argc, argv)); + } +} + +static void execv_dashed_external(const char **argv) +{ + struct strbuf cmd = STRBUF_INIT; + const char *tmp; + int status; + + strbuf_addf(&cmd, "perf-%s", argv[0]); + + /* + * argv[0] must be the perf command, but the argv array + * belongs to the caller, and may be reused in + * subsequent loop iterations. Save argv[0] and + * restore it on error. + */ + tmp = argv[0]; + argv[0] = cmd.buf; + + /* + * if we fail because the command is not found, it is + * OK to return. Otherwise, we just pass along the status code. + */ + status = run_command_v_opt(argv, 0); + if (status != -ERR_RUN_COMMAND_EXEC) { + if (IS_RUN_COMMAND_ERR(status)) + die("unable to run '%s'", argv[0]); + exit(-status); + } + errno = ENOENT; /* as if we called execvp */ + + argv[0] = tmp; + + strbuf_release(&cmd); +} + +static int run_argv(int *argcp, const char ***argv) +{ + int done_alias = 0; + + while (1) { + /* See if it's an internal command */ + handle_internal_command(*argcp, *argv); + + /* .. then try the external ones */ + execv_dashed_external(*argv); + + /* It could be an alias -- this works around the insanity + * of overriding "perf log" with "perf show" by having + * alias.log = show + */ + if (done_alias || !handle_alias(argcp, argv)) + break; + done_alias = 1; + } + + return done_alias; +} + + +int main(int argc, const char **argv) +{ + const char *cmd; + + cmd = perf_extract_argv0_path(argv[0]); + if (!cmd) + cmd = "perf-help"; + + /* + * "perf-xxxx" is the same as "perf xxxx", but we obviously: + * + * - cannot take flags in between the "perf" and the "xxxx". + * - cannot execute it externally (since it would just do + * the same thing over again) + * + * So we just directly call the internal command handler, and + * die if that one cannot handle it. + */ + if (!prefixcmp(cmd, "perf-")) { + cmd += 4; + argv[0] = cmd; + handle_internal_command(argc, argv); + die("cannot handle %s internally", cmd); + } + + /* Look for flags.. */ + argv++; + argc--; + handle_options(&argv, &argc, NULL); + commit_pager_choice(); + if (argc > 0) { + if (!prefixcmp(argv[0], "--")) + argv[0] += 2; + } else { + /* The user didn't specify a command; give them help */ + printf("usage: %s\n\n", perf_usage_string); + list_common_cmds_help(); + printf("\n%s\n", perf_more_info_string); + exit(1); + } + cmd = argv[0]; + + /* + * We use PATH to find perf commands, but we prepend some higher + * precidence paths: the "--exec-path" option, the PERF_EXEC_PATH + * environment, and the $(perfexecdir) from the Makefile at build + * time. + */ + setup_path(); + + while (1) { + static int done_help = 0; + static int was_alias = 0; + was_alias = run_argv(&argc, &argv); + if (errno != ENOENT) + break; + if (was_alias) { + fprintf(stderr, "Expansion of alias '%s' failed; " + "'%s' is not a perf-command\n", + cmd, argv[0]); + exit(1); + } + if (!done_help) { + cmd = argv[0] = help_unknown_cmd(cmd); + done_help = 1; + } else + break; + } + + fprintf(stderr, "Failed to run command '%s': %s\n", + cmd, strerror(errno)); + + return 1; +} diff --git a/Documentation/perf_counter/quote.c b/Documentation/perf_counter/quote.c new file mode 100644 index 00000000000..7a49fcf6967 --- /dev/null +++ b/Documentation/perf_counter/quote.c @@ -0,0 +1,478 @@ +#include "cache.h" +#include "quote.h" + +int quote_path_fully = 1; + +/* Help to copy the thing properly quoted for the shell safety. + * any single quote is replaced with '\'', any exclamation point + * is replaced with '\!', and the whole thing is enclosed in a + * + * E.g. + * original sq_quote result + * name ==> name ==> 'name' + * a b ==> a b ==> 'a b' + * a'b ==> a'\''b ==> 'a'\''b' + * a!b ==> a'\!'b ==> 'a'\!'b' + */ +static inline int need_bs_quote(char c) +{ + return (c == '\'' || c == '!'); +} + +void sq_quote_buf(struct strbuf *dst, const char *src) +{ + char *to_free = NULL; + + if (dst->buf == src) + to_free = strbuf_detach(dst, NULL); + + strbuf_addch(dst, '\''); + while (*src) { + size_t len = strcspn(src, "'!"); + strbuf_add(dst, src, len); + src += len; + while (need_bs_quote(*src)) { + strbuf_addstr(dst, "'\\"); + strbuf_addch(dst, *src++); + strbuf_addch(dst, '\''); + } + } + strbuf_addch(dst, '\''); + free(to_free); +} + +void sq_quote_print(FILE *stream, const char *src) +{ + char c; + + fputc('\'', stream); + while ((c = *src++)) { + if (need_bs_quote(c)) { + fputs("'\\", stream); + fputc(c, stream); + fputc('\'', stream); + } else { + fputc(c, stream); + } + } + fputc('\'', stream); +} + +void sq_quote_argv(struct strbuf *dst, const char** argv, size_t maxlen) +{ + int i; + + /* Copy into destination buffer. */ + strbuf_grow(dst, 255); + for (i = 0; argv[i]; ++i) { + strbuf_addch(dst, ' '); + sq_quote_buf(dst, argv[i]); + if (maxlen && dst->len > maxlen) + die("Too many or long arguments"); + } +} + +char *sq_dequote_step(char *arg, char **next) +{ + char *dst = arg; + char *src = arg; + char c; + + if (*src != '\'') + return NULL; + for (;;) { + c = *++src; + if (!c) + return NULL; + if (c != '\'') { + *dst++ = c; + continue; + } + /* We stepped out of sq */ + switch (*++src) { + case '\0': + *dst = 0; + if (next) + *next = NULL; + return arg; + case '\\': + c = *++src; + if (need_bs_quote(c) && *++src == '\'') { + *dst++ = c; + continue; + } + /* Fallthrough */ + default: + if (!next || !isspace(*src)) + return NULL; + do { + c = *++src; + } while (isspace(c)); + *dst = 0; + *next = src; + return arg; + } + } +} + +char *sq_dequote(char *arg) +{ + return sq_dequote_step(arg, NULL); +} + +int sq_dequote_to_argv(char *arg, const char ***argv, int *nr, int *alloc) +{ + char *next = arg; + + if (!*arg) + return 0; + do { + char *dequoted = sq_dequote_step(next, &next); + if (!dequoted) + return -1; + ALLOC_GROW(*argv, *nr + 1, *alloc); + (*argv)[(*nr)++] = dequoted; + } while (next); + + return 0; +} + +/* 1 means: quote as octal + * 0 means: quote as octal if (quote_path_fully) + * -1 means: never quote + * c: quote as "\\c" + */ +#define X8(x) x, x, x, x, x, x, x, x +#define X16(x) X8(x), X8(x) +static signed char const sq_lookup[256] = { + /* 0 1 2 3 4 5 6 7 */ + /* 0x00 */ 1, 1, 1, 1, 1, 1, 1, 'a', + /* 0x08 */ 'b', 't', 'n', 'v', 'f', 'r', 1, 1, + /* 0x10 */ X16(1), + /* 0x20 */ -1, -1, '"', -1, -1, -1, -1, -1, + /* 0x28 */ X16(-1), X16(-1), X16(-1), + /* 0x58 */ -1, -1, -1, -1,'\\', -1, -1, -1, + /* 0x60 */ X16(-1), X8(-1), + /* 0x78 */ -1, -1, -1, -1, -1, -1, -1, 1, + /* 0x80 */ /* set to 0 */ +}; + +static inline int sq_must_quote(char c) +{ + return sq_lookup[(unsigned char)c] + quote_path_fully > 0; +} + +/* returns the longest prefix not needing a quote up to maxlen if positive. + This stops at the first \0 because it's marked as a character needing an + escape */ +static size_t next_quote_pos(const char *s, ssize_t maxlen) +{ + size_t len; + if (maxlen < 0) { + for (len = 0; !sq_must_quote(s[len]); len++); + } else { + for (len = 0; len < maxlen && !sq_must_quote(s[len]); len++); + } + return len; +} + +/* + * C-style name quoting. + * + * (1) if sb and fp are both NULL, inspect the input name and counts the + * number of bytes that are needed to hold c_style quoted version of name, + * counting the double quotes around it but not terminating NUL, and + * returns it. + * However, if name does not need c_style quoting, it returns 0. + * + * (2) if sb or fp are not NULL, it emits the c_style quoted version + * of name, enclosed with double quotes if asked and needed only. + * Return value is the same as in (1). + */ +static size_t quote_c_style_counted(const char *name, ssize_t maxlen, + struct strbuf *sb, FILE *fp, int no_dq) +{ +#undef EMIT +#define EMIT(c) \ + do { \ + if (sb) strbuf_addch(sb, (c)); \ + if (fp) fputc((c), fp); \ + count++; \ + } while (0) +#define EMITBUF(s, l) \ + do { \ + if (sb) strbuf_add(sb, (s), (l)); \ + if (fp) fwrite((s), (l), 1, fp); \ + count += (l); \ + } while (0) + + size_t len, count = 0; + const char *p = name; + + for (;;) { + int ch; + + len = next_quote_pos(p, maxlen); + if (len == maxlen || !p[len]) + break; + + if (!no_dq && p == name) + EMIT('"'); + + EMITBUF(p, len); + EMIT('\\'); + p += len; + ch = (unsigned char)*p++; + if (sq_lookup[ch] >= ' ') { + EMIT(sq_lookup[ch]); + } else { + EMIT(((ch >> 6) & 03) + '0'); + EMIT(((ch >> 3) & 07) + '0'); + EMIT(((ch >> 0) & 07) + '0'); + } + } + + EMITBUF(p, len); + if (p == name) /* no ending quote needed */ + return 0; + + if (!no_dq) + EMIT('"'); + return count; +} + +size_t quote_c_style(const char *name, struct strbuf *sb, FILE *fp, int nodq) +{ + return quote_c_style_counted(name, -1, sb, fp, nodq); +} + +void quote_two_c_style(struct strbuf *sb, const char *prefix, const char *path, int nodq) +{ + if (quote_c_style(prefix, NULL, NULL, 0) || + quote_c_style(path, NULL, NULL, 0)) { + if (!nodq) + strbuf_addch(sb, '"'); + quote_c_style(prefix, sb, NULL, 1); + quote_c_style(path, sb, NULL, 1); + if (!nodq) + strbuf_addch(sb, '"'); + } else { + strbuf_addstr(sb, prefix); + strbuf_addstr(sb, path); + } +} + +void write_name_quoted(const char *name, FILE *fp, int terminator) +{ + if (terminator) { + quote_c_style(name, NULL, fp, 0); + } else { + fputs(name, fp); + } + fputc(terminator, fp); +} + +extern void write_name_quotedpfx(const char *pfx, size_t pfxlen, + const char *name, FILE *fp, int terminator) +{ + int needquote = 0; + + if (terminator) { + needquote = next_quote_pos(pfx, pfxlen) < pfxlen + || name[next_quote_pos(name, -1)]; + } + if (needquote) { + fputc('"', fp); + quote_c_style_counted(pfx, pfxlen, NULL, fp, 1); + quote_c_style(name, NULL, fp, 1); + fputc('"', fp); + } else { + fwrite(pfx, pfxlen, 1, fp); + fputs(name, fp); + } + fputc(terminator, fp); +} + +/* quote path as relative to the given prefix */ +char *quote_path_relative(const char *in, int len, + struct strbuf *out, const char *prefix) +{ + int needquote; + + if (len < 0) + len = strlen(in); + + /* "../" prefix itself does not need quoting, but "in" might. */ + needquote = next_quote_pos(in, len) < len; + strbuf_setlen(out, 0); + strbuf_grow(out, len); + + if (needquote) + strbuf_addch(out, '"'); + if (prefix) { + int off = 0; + while (prefix[off] && off < len && prefix[off] == in[off]) + if (prefix[off] == '/') { + prefix += off + 1; + in += off + 1; + len -= off + 1; + off = 0; + } else + off++; + + for (; *prefix; prefix++) + if (*prefix == '/') + strbuf_addstr(out, "../"); + } + + quote_c_style_counted (in, len, out, NULL, 1); + + if (needquote) + strbuf_addch(out, '"'); + if (!out->len) + strbuf_addstr(out, "./"); + + return out->buf; +} + +/* + * C-style name unquoting. + * + * Quoted should point at the opening double quote. + * + Returns 0 if it was able to unquote the string properly, and appends the + * result in the strbuf `sb'. + * + Returns -1 in case of error, and doesn't touch the strbuf. Though note + * that this function will allocate memory in the strbuf, so calling + * strbuf_release is mandatory whichever result unquote_c_style returns. + * + * Updates endp pointer to point at one past the ending double quote if given. + */ +int unquote_c_style(struct strbuf *sb, const char *quoted, const char **endp) +{ + size_t oldlen = sb->len, len; + int ch, ac; + + if (*quoted++ != '"') + return -1; + + for (;;) { + len = strcspn(quoted, "\"\\"); + strbuf_add(sb, quoted, len); + quoted += len; + + switch (*quoted++) { + case '"': + if (endp) + *endp = quoted; + return 0; + case '\\': + break; + default: + goto error; + } + + switch ((ch = *quoted++)) { + case 'a': ch = '\a'; break; + case 'b': ch = '\b'; break; + case 'f': ch = '\f'; break; + case 'n': ch = '\n'; break; + case 'r': ch = '\r'; break; + case 't': ch = '\t'; break; + case 'v': ch = '\v'; break; + + case '\\': case '"': + break; /* verbatim */ + + /* octal values with first digit over 4 overflow */ + case '0': case '1': case '2': case '3': + ac = ((ch - '0') << 6); + if ((ch = *quoted++) < '0' || '7' < ch) + goto error; + ac |= ((ch - '0') << 3); + if ((ch = *quoted++) < '0' || '7' < ch) + goto error; + ac |= (ch - '0'); + ch = ac; + break; + default: + goto error; + } + strbuf_addch(sb, ch); + } + + error: + strbuf_setlen(sb, oldlen); + return -1; +} + +/* quoting as a string literal for other languages */ + +void perl_quote_print(FILE *stream, const char *src) +{ + const char sq = '\''; + const char bq = '\\'; + char c; + + fputc(sq, stream); + while ((c = *src++)) { + if (c == sq || c == bq) + fputc(bq, stream); + fputc(c, stream); + } + fputc(sq, stream); +} + +void python_quote_print(FILE *stream, const char *src) +{ + const char sq = '\''; + const char bq = '\\'; + const char nl = '\n'; + char c; + + fputc(sq, stream); + while ((c = *src++)) { + if (c == nl) { + fputc(bq, stream); + fputc('n', stream); + continue; + } + if (c == sq || c == bq) + fputc(bq, stream); + fputc(c, stream); + } + fputc(sq, stream); +} + +void tcl_quote_print(FILE *stream, const char *src) +{ + char c; + + fputc('"', stream); + while ((c = *src++)) { + switch (c) { + case '[': case ']': + case '{': case '}': + case '$': case '\\': case '"': + fputc('\\', stream); + default: + fputc(c, stream); + break; + case '\f': + fputs("\\f", stream); + break; + case '\r': + fputs("\\r", stream); + break; + case '\n': + fputs("\\n", stream); + break; + case '\t': + fputs("\\t", stream); + break; + case '\v': + fputs("\\v", stream); + break; + } + } + fputc('"', stream); +} diff --git a/Documentation/perf_counter/quote.h b/Documentation/perf_counter/quote.h new file mode 100644 index 00000000000..66730f2bff3 --- /dev/null +++ b/Documentation/perf_counter/quote.h @@ -0,0 +1,68 @@ +#ifndef QUOTE_H +#define QUOTE_H + +#include +#include + +/* Help to copy the thing properly quoted for the shell safety. + * any single quote is replaced with '\'', any exclamation point + * is replaced with '\!', and the whole thing is enclosed in a + * single quote pair. + * + * For example, if you are passing the result to system() as an + * argument: + * + * sprintf(cmd, "foobar %s %s", sq_quote(arg0), sq_quote(arg1)) + * + * would be appropriate. If the system() is going to call ssh to + * run the command on the other side: + * + * sprintf(cmd, "git-diff-tree %s %s", sq_quote(arg0), sq_quote(arg1)); + * sprintf(rcmd, "ssh %s %s", sq_quote(host), sq_quote(cmd)); + * + * Note that the above examples leak memory! Remember to free result from + * sq_quote() in a real application. + * + * sq_quote_buf() writes to an existing buffer of specified size; it + * will return the number of characters that would have been written + * excluding the final null regardless of the buffer size. + */ + +extern void sq_quote_print(FILE *stream, const char *src); + +extern void sq_quote_buf(struct strbuf *, const char *src); +extern void sq_quote_argv(struct strbuf *, const char **argv, size_t maxlen); + +/* This unwraps what sq_quote() produces in place, but returns + * NULL if the input does not look like what sq_quote would have + * produced. + */ +extern char *sq_dequote(char *); + +/* + * Same as the above, but can be used to unwrap many arguments in the + * same string separated by space. "next" is changed to point to the + * next argument that should be passed as first parameter. When there + * is no more argument to be dequoted, "next" is updated to point to NULL. + */ +extern char *sq_dequote_step(char *arg, char **next); +extern int sq_dequote_to_argv(char *arg, const char ***argv, int *nr, int *alloc); + +extern int unquote_c_style(struct strbuf *, const char *quoted, const char **endp); +extern size_t quote_c_style(const char *name, struct strbuf *, FILE *, int no_dq); +extern void quote_two_c_style(struct strbuf *, const char *, const char *, int); + +extern void write_name_quoted(const char *name, FILE *, int terminator); +extern void write_name_quotedpfx(const char *pfx, size_t pfxlen, + const char *name, FILE *, int terminator); + +/* quote path as relative to the given prefix */ +char *quote_path_relative(const char *in, int len, + struct strbuf *out, const char *prefix); + +/* quoting as a string literal for other languages */ +extern void perl_quote_print(FILE *stream, const char *src); +extern void python_quote_print(FILE *stream, const char *src); +extern void tcl_quote_print(FILE *stream, const char *src); + +#endif diff --git a/Documentation/perf_counter/run-command.c b/Documentation/perf_counter/run-command.c new file mode 100644 index 00000000000..b2f5e854f40 --- /dev/null +++ b/Documentation/perf_counter/run-command.c @@ -0,0 +1,395 @@ +#include "cache.h" +#include "run-command.h" +#include "exec_cmd.h" + +static inline void close_pair(int fd[2]) +{ + close(fd[0]); + close(fd[1]); +} + +static inline void dup_devnull(int to) +{ + int fd = open("/dev/null", O_RDWR); + dup2(fd, to); + close(fd); +} + +int start_command(struct child_process *cmd) +{ + int need_in, need_out, need_err; + int fdin[2], fdout[2], fderr[2]; + + /* + * In case of errors we must keep the promise to close FDs + * that have been passed in via ->in and ->out. + */ + + need_in = !cmd->no_stdin && cmd->in < 0; + if (need_in) { + if (pipe(fdin) < 0) { + if (cmd->out > 0) + close(cmd->out); + return -ERR_RUN_COMMAND_PIPE; + } + cmd->in = fdin[1]; + } + + need_out = !cmd->no_stdout + && !cmd->stdout_to_stderr + && cmd->out < 0; + if (need_out) { + if (pipe(fdout) < 0) { + if (need_in) + close_pair(fdin); + else if (cmd->in) + close(cmd->in); + return -ERR_RUN_COMMAND_PIPE; + } + cmd->out = fdout[0]; + } + + need_err = !cmd->no_stderr && cmd->err < 0; + if (need_err) { + if (pipe(fderr) < 0) { + if (need_in) + close_pair(fdin); + else if (cmd->in) + close(cmd->in); + if (need_out) + close_pair(fdout); + else if (cmd->out) + close(cmd->out); + return -ERR_RUN_COMMAND_PIPE; + } + cmd->err = fderr[0]; + } + +#ifndef __MINGW32__ + fflush(NULL); + cmd->pid = fork(); + if (!cmd->pid) { + if (cmd->no_stdin) + dup_devnull(0); + else if (need_in) { + dup2(fdin[0], 0); + close_pair(fdin); + } else if (cmd->in) { + dup2(cmd->in, 0); + close(cmd->in); + } + + if (cmd->no_stderr) + dup_devnull(2); + else if (need_err) { + dup2(fderr[1], 2); + close_pair(fderr); + } + + if (cmd->no_stdout) + dup_devnull(1); + else if (cmd->stdout_to_stderr) + dup2(2, 1); + else if (need_out) { + dup2(fdout[1], 1); + close_pair(fdout); + } else if (cmd->out > 1) { + dup2(cmd->out, 1); + close(cmd->out); + } + + if (cmd->dir && chdir(cmd->dir)) + die("exec %s: cd to %s failed (%s)", cmd->argv[0], + cmd->dir, strerror(errno)); + if (cmd->env) { + for (; *cmd->env; cmd->env++) { + if (strchr(*cmd->env, '=')) + putenv((char*)*cmd->env); + else + unsetenv(*cmd->env); + } + } + if (cmd->preexec_cb) + cmd->preexec_cb(); + if (cmd->perf_cmd) { + execv_perf_cmd(cmd->argv); + } else { + execvp(cmd->argv[0], (char *const*) cmd->argv); + } + exit(127); + } +#else + int s0 = -1, s1 = -1, s2 = -1; /* backups of stdin, stdout, stderr */ + const char **sargv = cmd->argv; + char **env = environ; + + if (cmd->no_stdin) { + s0 = dup(0); + dup_devnull(0); + } else if (need_in) { + s0 = dup(0); + dup2(fdin[0], 0); + } else if (cmd->in) { + s0 = dup(0); + dup2(cmd->in, 0); + } + + if (cmd->no_stderr) { + s2 = dup(2); + dup_devnull(2); + } else if (need_err) { + s2 = dup(2); + dup2(fderr[1], 2); + } + + if (cmd->no_stdout) { + s1 = dup(1); + dup_devnull(1); + } else if (cmd->stdout_to_stderr) { + s1 = dup(1); + dup2(2, 1); + } else if (need_out) { + s1 = dup(1); + dup2(fdout[1], 1); + } else if (cmd->out > 1) { + s1 = dup(1); + dup2(cmd->out, 1); + } + + if (cmd->dir) + die("chdir in start_command() not implemented"); + if (cmd->env) { + env = copy_environ(); + for (; *cmd->env; cmd->env++) + env = env_setenv(env, *cmd->env); + } + + if (cmd->perf_cmd) { + cmd->argv = prepare_perf_cmd(cmd->argv); + } + + cmd->pid = mingw_spawnvpe(cmd->argv[0], cmd->argv, env); + + if (cmd->env) + free_environ(env); + if (cmd->perf_cmd) + free(cmd->argv); + + cmd->argv = sargv; + if (s0 >= 0) + dup2(s0, 0), close(s0); + if (s1 >= 0) + dup2(s1, 1), close(s1); + if (s2 >= 0) + dup2(s2, 2), close(s2); +#endif + + if (cmd->pid < 0) { + int err = errno; + if (need_in) + close_pair(fdin); + else if (cmd->in) + close(cmd->in); + if (need_out) + close_pair(fdout); + else if (cmd->out) + close(cmd->out); + if (need_err) + close_pair(fderr); + return err == ENOENT ? + -ERR_RUN_COMMAND_EXEC : + -ERR_RUN_COMMAND_FORK; + } + + if (need_in) + close(fdin[0]); + else if (cmd->in) + close(cmd->in); + + if (need_out) + close(fdout[1]); + else if (cmd->out) + close(cmd->out); + + if (need_err) + close(fderr[1]); + + return 0; +} + +static int wait_or_whine(pid_t pid) +{ + for (;;) { + int status, code; + pid_t waiting = waitpid(pid, &status, 0); + + if (waiting < 0) { + if (errno == EINTR) + continue; + error("waitpid failed (%s)", strerror(errno)); + return -ERR_RUN_COMMAND_WAITPID; + } + if (waiting != pid) + return -ERR_RUN_COMMAND_WAITPID_WRONG_PID; + if (WIFSIGNALED(status)) + return -ERR_RUN_COMMAND_WAITPID_SIGNAL; + + if (!WIFEXITED(status)) + return -ERR_RUN_COMMAND_WAITPID_NOEXIT; + code = WEXITSTATUS(status); + switch (code) { + case 127: + return -ERR_RUN_COMMAND_EXEC; + case 0: + return 0; + default: + return -code; + } + } +} + +int finish_command(struct child_process *cmd) +{ + return wait_or_whine(cmd->pid); +} + +int run_command(struct child_process *cmd) +{ + int code = start_command(cmd); + if (code) + return code; + return finish_command(cmd); +} + +static void prepare_run_command_v_opt(struct child_process *cmd, + const char **argv, + int opt) +{ + memset(cmd, 0, sizeof(*cmd)); + cmd->argv = argv; + cmd->no_stdin = opt & RUN_COMMAND_NO_STDIN ? 1 : 0; + cmd->perf_cmd = opt & RUN_PERF_CMD ? 1 : 0; + cmd->stdout_to_stderr = opt & RUN_COMMAND_STDOUT_TO_STDERR ? 1 : 0; +} + +int run_command_v_opt(const char **argv, int opt) +{ + struct child_process cmd; + prepare_run_command_v_opt(&cmd, argv, opt); + return run_command(&cmd); +} + +int run_command_v_opt_cd_env(const char **argv, int opt, const char *dir, const char *const *env) +{ + struct child_process cmd; + prepare_run_command_v_opt(&cmd, argv, opt); + cmd.dir = dir; + cmd.env = env; + return run_command(&cmd); +} + +#ifdef __MINGW32__ +static __stdcall unsigned run_thread(void *data) +{ + struct async *async = data; + return async->proc(async->fd_for_proc, async->data); +} +#endif + +int start_async(struct async *async) +{ + int pipe_out[2]; + + if (pipe(pipe_out) < 0) + return error("cannot create pipe: %s", strerror(errno)); + async->out = pipe_out[0]; + +#ifndef __MINGW32__ + /* Flush stdio before fork() to avoid cloning buffers */ + fflush(NULL); + + async->pid = fork(); + if (async->pid < 0) { + error("fork (async) failed: %s", strerror(errno)); + close_pair(pipe_out); + return -1; + } + if (!async->pid) { + close(pipe_out[0]); + exit(!!async->proc(pipe_out[1], async->data)); + } + close(pipe_out[1]); +#else + async->fd_for_proc = pipe_out[1]; + async->tid = (HANDLE) _beginthreadex(NULL, 0, run_thread, async, 0, NULL); + if (!async->tid) { + error("cannot create thread: %s", strerror(errno)); + close_pair(pipe_out); + return -1; + } +#endif + return 0; +} + +int finish_async(struct async *async) +{ +#ifndef __MINGW32__ + int ret = 0; + + if (wait_or_whine(async->pid)) + ret = error("waitpid (async) failed"); +#else + DWORD ret = 0; + if (WaitForSingleObject(async->tid, INFINITE) != WAIT_OBJECT_0) + ret = error("waiting for thread failed: %lu", GetLastError()); + else if (!GetExitCodeThread(async->tid, &ret)) + ret = error("cannot get thread exit code: %lu", GetLastError()); + CloseHandle(async->tid); +#endif + return ret; +} + +int run_hook(const char *index_file, const char *name, ...) +{ + struct child_process hook; + const char **argv = NULL, *env[2]; + char index[PATH_MAX]; + va_list args; + int ret; + size_t i = 0, alloc = 0; + + if (access(perf_path("hooks/%s", name), X_OK) < 0) + return 0; + + va_start(args, name); + ALLOC_GROW(argv, i + 1, alloc); + argv[i++] = perf_path("hooks/%s", name); + while (argv[i-1]) { + ALLOC_GROW(argv, i + 1, alloc); + argv[i++] = va_arg(args, const char *); + } + va_end(args); + + memset(&hook, 0, sizeof(hook)); + hook.argv = argv; + hook.no_stdin = 1; + hook.stdout_to_stderr = 1; + if (index_file) { + snprintf(index, sizeof(index), "PERF_INDEX_FILE=%s", index_file); + env[0] = index; + env[1] = NULL; + hook.env = env; + } + + ret = start_command(&hook); + free(argv); + if (ret) { + warning("Could not spawn %s", argv[0]); + return ret; + } + ret = finish_command(&hook); + if (ret == -ERR_RUN_COMMAND_WAITPID_SIGNAL) + warning("%s exited due to uncaught signal", argv[0]); + + return ret; +} diff --git a/Documentation/perf_counter/run-command.h b/Documentation/perf_counter/run-command.h new file mode 100644 index 00000000000..328289f2366 --- /dev/null +++ b/Documentation/perf_counter/run-command.h @@ -0,0 +1,93 @@ +#ifndef RUN_COMMAND_H +#define RUN_COMMAND_H + +enum { + ERR_RUN_COMMAND_FORK = 10000, + ERR_RUN_COMMAND_EXEC, + ERR_RUN_COMMAND_PIPE, + ERR_RUN_COMMAND_WAITPID, + ERR_RUN_COMMAND_WAITPID_WRONG_PID, + ERR_RUN_COMMAND_WAITPID_SIGNAL, + ERR_RUN_COMMAND_WAITPID_NOEXIT, +}; +#define IS_RUN_COMMAND_ERR(x) (-(x) >= ERR_RUN_COMMAND_FORK) + +struct child_process { + const char **argv; + pid_t pid; + /* + * Using .in, .out, .err: + * - Specify 0 for no redirections (child inherits stdin, stdout, + * stderr from parent). + * - Specify -1 to have a pipe allocated as follows: + * .in: returns the writable pipe end; parent writes to it, + * the readable pipe end becomes child's stdin + * .out, .err: returns the readable pipe end; parent reads from + * it, the writable pipe end becomes child's stdout/stderr + * The caller of start_command() must close the returned FDs + * after it has completed reading from/writing to it! + * - Specify > 0 to set a channel to a particular FD as follows: + * .in: a readable FD, becomes child's stdin + * .out: a writable FD, becomes child's stdout/stderr + * .err > 0 not supported + * The specified FD is closed by start_command(), even in case + * of errors! + */ + int in; + int out; + int err; + const char *dir; + const char *const *env; + unsigned no_stdin:1; + unsigned no_stdout:1; + unsigned no_stderr:1; + unsigned perf_cmd:1; /* if this is to be perf sub-command */ + unsigned stdout_to_stderr:1; + void (*preexec_cb)(void); +}; + +int start_command(struct child_process *); +int finish_command(struct child_process *); +int run_command(struct child_process *); + +extern int run_hook(const char *index_file, const char *name, ...); + +#define RUN_COMMAND_NO_STDIN 1 +#define RUN_PERF_CMD 2 /*If this is to be perf sub-command */ +#define RUN_COMMAND_STDOUT_TO_STDERR 4 +int run_command_v_opt(const char **argv, int opt); + +/* + * env (the environment) is to be formatted like environ: "VAR=VALUE". + * To unset an environment variable use just "VAR". + */ +int run_command_v_opt_cd_env(const char **argv, int opt, const char *dir, const char *const *env); + +/* + * The purpose of the following functions is to feed a pipe by running + * a function asynchronously and providing output that the caller reads. + * + * It is expected that no synchronization and mutual exclusion between + * the caller and the feed function is necessary so that the function + * can run in a thread without interfering with the caller. + */ +struct async { + /* + * proc writes to fd and closes it; + * returns 0 on success, non-zero on failure + */ + int (*proc)(int fd, void *data); + void *data; + int out; /* caller reads from here and closes it */ +#ifndef __MINGW32__ + pid_t pid; +#else + HANDLE tid; + int fd_for_proc; +#endif +}; + +int start_async(struct async *async); +int finish_async(struct async *async); + +#endif diff --git a/Documentation/perf_counter/strbuf.c b/Documentation/perf_counter/strbuf.c new file mode 100644 index 00000000000..eaba0930680 --- /dev/null +++ b/Documentation/perf_counter/strbuf.c @@ -0,0 +1,359 @@ +#include "cache.h" + +int prefixcmp(const char *str, const char *prefix) +{ + for (; ; str++, prefix++) + if (!*prefix) + return 0; + else if (*str != *prefix) + return (unsigned char)*prefix - (unsigned char)*str; +} + +/* + * Used as the default ->buf value, so that people can always assume + * buf is non NULL and ->buf is NUL terminated even for a freshly + * initialized strbuf. + */ +char strbuf_slopbuf[1]; + +void strbuf_init(struct strbuf *sb, size_t hint) +{ + sb->alloc = sb->len = 0; + sb->buf = strbuf_slopbuf; + if (hint) + strbuf_grow(sb, hint); +} + +void strbuf_release(struct strbuf *sb) +{ + if (sb->alloc) { + free(sb->buf); + strbuf_init(sb, 0); + } +} + +char *strbuf_detach(struct strbuf *sb, size_t *sz) +{ + char *res = sb->alloc ? sb->buf : NULL; + if (sz) + *sz = sb->len; + strbuf_init(sb, 0); + return res; +} + +void strbuf_attach(struct strbuf *sb, void *buf, size_t len, size_t alloc) +{ + strbuf_release(sb); + sb->buf = buf; + sb->len = len; + sb->alloc = alloc; + strbuf_grow(sb, 0); + sb->buf[sb->len] = '\0'; +} + +void strbuf_grow(struct strbuf *sb, size_t extra) +{ + if (sb->len + extra + 1 <= sb->len) + die("you want to use way too much memory"); + if (!sb->alloc) + sb->buf = NULL; + ALLOC_GROW(sb->buf, sb->len + extra + 1, sb->alloc); +} + +void strbuf_trim(struct strbuf *sb) +{ + char *b = sb->buf; + while (sb->len > 0 && isspace((unsigned char)sb->buf[sb->len - 1])) + sb->len--; + while (sb->len > 0 && isspace(*b)) { + b++; + sb->len--; + } + memmove(sb->buf, b, sb->len); + sb->buf[sb->len] = '\0'; +} +void strbuf_rtrim(struct strbuf *sb) +{ + while (sb->len > 0 && isspace((unsigned char)sb->buf[sb->len - 1])) + sb->len--; + sb->buf[sb->len] = '\0'; +} + +void strbuf_ltrim(struct strbuf *sb) +{ + char *b = sb->buf; + while (sb->len > 0 && isspace(*b)) { + b++; + sb->len--; + } + memmove(sb->buf, b, sb->len); + sb->buf[sb->len] = '\0'; +} + +void strbuf_tolower(struct strbuf *sb) +{ + int i; + for (i = 0; i < sb->len; i++) + sb->buf[i] = tolower(sb->buf[i]); +} + +struct strbuf **strbuf_split(const struct strbuf *sb, int delim) +{ + int alloc = 2, pos = 0; + char *n, *p; + struct strbuf **ret; + struct strbuf *t; + + ret = calloc(alloc, sizeof(struct strbuf *)); + p = n = sb->buf; + while (n < sb->buf + sb->len) { + int len; + n = memchr(n, delim, sb->len - (n - sb->buf)); + if (pos + 1 >= alloc) { + alloc = alloc * 2; + ret = realloc(ret, sizeof(struct strbuf *) * alloc); + } + if (!n) + n = sb->buf + sb->len - 1; + len = n - p + 1; + t = malloc(sizeof(struct strbuf)); + strbuf_init(t, len); + strbuf_add(t, p, len); + ret[pos] = t; + ret[++pos] = NULL; + p = ++n; + } + return ret; +} + +void strbuf_list_free(struct strbuf **sbs) +{ + struct strbuf **s = sbs; + + while (*s) { + strbuf_release(*s); + free(*s++); + } + free(sbs); +} + +int strbuf_cmp(const struct strbuf *a, const struct strbuf *b) +{ + int len = a->len < b->len ? a->len: b->len; + int cmp = memcmp(a->buf, b->buf, len); + if (cmp) + return cmp; + return a->len < b->len ? -1: a->len != b->len; +} + +void strbuf_splice(struct strbuf *sb, size_t pos, size_t len, + const void *data, size_t dlen) +{ + if (pos + len < pos) + die("you want to use way too much memory"); + if (pos > sb->len) + die("`pos' is too far after the end of the buffer"); + if (pos + len > sb->len) + die("`pos + len' is too far after the end of the buffer"); + + if (dlen >= len) + strbuf_grow(sb, dlen - len); + memmove(sb->buf + pos + dlen, + sb->buf + pos + len, + sb->len - pos - len); + memcpy(sb->buf + pos, data, dlen); + strbuf_setlen(sb, sb->len + dlen - len); +} + +void strbuf_insert(struct strbuf *sb, size_t pos, const void *data, size_t len) +{ + strbuf_splice(sb, pos, 0, data, len); +} + +void strbuf_remove(struct strbuf *sb, size_t pos, size_t len) +{ + strbuf_splice(sb, pos, len, NULL, 0); +} + +void strbuf_add(struct strbuf *sb, const void *data, size_t len) +{ + strbuf_grow(sb, len); + memcpy(sb->buf + sb->len, data, len); + strbuf_setlen(sb, sb->len + len); +} + +void strbuf_adddup(struct strbuf *sb, size_t pos, size_t len) +{ + strbuf_grow(sb, len); + memcpy(sb->buf + sb->len, sb->buf + pos, len); + strbuf_setlen(sb, sb->len + len); +} + +void strbuf_addf(struct strbuf *sb, const char *fmt, ...) +{ + int len; + va_list ap; + + if (!strbuf_avail(sb)) + strbuf_grow(sb, 64); + va_start(ap, fmt); + len = vsnprintf(sb->buf + sb->len, sb->alloc - sb->len, fmt, ap); + va_end(ap); + if (len < 0) + die("your vsnprintf is broken"); + if (len > strbuf_avail(sb)) { + strbuf_grow(sb, len); + va_start(ap, fmt); + len = vsnprintf(sb->buf + sb->len, sb->alloc - sb->len, fmt, ap); + va_end(ap); + if (len > strbuf_avail(sb)) { + die("this should not happen, your snprintf is broken"); + } + } + strbuf_setlen(sb, sb->len + len); +} + +void strbuf_expand(struct strbuf *sb, const char *format, expand_fn_t fn, + void *context) +{ + for (;;) { + const char *percent; + size_t consumed; + + percent = strchrnul(format, '%'); + strbuf_add(sb, format, percent - format); + if (!*percent) + break; + format = percent + 1; + + consumed = fn(sb, format, context); + if (consumed) + format += consumed; + else + strbuf_addch(sb, '%'); + } +} + +size_t strbuf_expand_dict_cb(struct strbuf *sb, const char *placeholder, + void *context) +{ + struct strbuf_expand_dict_entry *e = context; + size_t len; + + for (; e->placeholder && (len = strlen(e->placeholder)); e++) { + if (!strncmp(placeholder, e->placeholder, len)) { + if (e->value) + strbuf_addstr(sb, e->value); + return len; + } + } + return 0; +} + +size_t strbuf_fread(struct strbuf *sb, size_t size, FILE *f) +{ + size_t res; + size_t oldalloc = sb->alloc; + + strbuf_grow(sb, size); + res = fread(sb->buf + sb->len, 1, size, f); + if (res > 0) + strbuf_setlen(sb, sb->len + res); + else if (res < 0 && oldalloc == 0) + strbuf_release(sb); + return res; +} + +ssize_t strbuf_read(struct strbuf *sb, int fd, size_t hint) +{ + size_t oldlen = sb->len; + size_t oldalloc = sb->alloc; + + strbuf_grow(sb, hint ? hint : 8192); + for (;;) { + ssize_t cnt; + + cnt = read(fd, sb->buf + sb->len, sb->alloc - sb->len - 1); + if (cnt < 0) { + if (oldalloc == 0) + strbuf_release(sb); + else + strbuf_setlen(sb, oldlen); + return -1; + } + if (!cnt) + break; + sb->len += cnt; + strbuf_grow(sb, 8192); + } + + sb->buf[sb->len] = '\0'; + return sb->len - oldlen; +} + +#define STRBUF_MAXLINK (2*PATH_MAX) + +int strbuf_readlink(struct strbuf *sb, const char *path, size_t hint) +{ + size_t oldalloc = sb->alloc; + + if (hint < 32) + hint = 32; + + while (hint < STRBUF_MAXLINK) { + int len; + + strbuf_grow(sb, hint); + len = readlink(path, sb->buf, hint); + if (len < 0) { + if (errno != ERANGE) + break; + } else if (len < hint) { + strbuf_setlen(sb, len); + return 0; + } + + /* .. the buffer was too small - try again */ + hint *= 2; + } + if (oldalloc == 0) + strbuf_release(sb); + return -1; +} + +int strbuf_getline(struct strbuf *sb, FILE *fp, int term) +{ + int ch; + + strbuf_grow(sb, 0); + if (feof(fp)) + return EOF; + + strbuf_reset(sb); + while ((ch = fgetc(fp)) != EOF) { + if (ch == term) + break; + strbuf_grow(sb, 1); + sb->buf[sb->len++] = ch; + } + if (ch == EOF && sb->len == 0) + return EOF; + + sb->buf[sb->len] = '\0'; + return 0; +} + +int strbuf_read_file(struct strbuf *sb, const char *path, size_t hint) +{ + int fd, len; + + fd = open(path, O_RDONLY); + if (fd < 0) + return -1; + len = strbuf_read(sb, fd, hint); + close(fd); + if (len < 0) + return -1; + + return len; +} diff --git a/Documentation/perf_counter/strbuf.h b/Documentation/perf_counter/strbuf.h new file mode 100644 index 00000000000..9ee908a3ec5 --- /dev/null +++ b/Documentation/perf_counter/strbuf.h @@ -0,0 +1,137 @@ +#ifndef STRBUF_H +#define STRBUF_H + +/* + * Strbuf's can be use in many ways: as a byte array, or to store arbitrary + * long, overflow safe strings. + * + * Strbufs has some invariants that are very important to keep in mind: + * + * 1. the ->buf member is always malloc-ed, hence strbuf's can be used to + * build complex strings/buffers whose final size isn't easily known. + * + * It is NOT legal to copy the ->buf pointer away. + * `strbuf_detach' is the operation that detachs a buffer from its shell + * while keeping the shell valid wrt its invariants. + * + * 2. the ->buf member is a byte array that has at least ->len + 1 bytes + * allocated. The extra byte is used to store a '\0', allowing the ->buf + * member to be a valid C-string. Every strbuf function ensure this + * invariant is preserved. + * + * Note that it is OK to "play" with the buffer directly if you work it + * that way: + * + * strbuf_grow(sb, SOME_SIZE); + * ... Here, the memory array starting at sb->buf, and of length + * ... strbuf_avail(sb) is all yours, and you are sure that + * ... strbuf_avail(sb) is at least SOME_SIZE. + * strbuf_setlen(sb, sb->len + SOME_OTHER_SIZE); + * + * Of course, SOME_OTHER_SIZE must be smaller or equal to strbuf_avail(sb). + * + * Doing so is safe, though if it has to be done in many places, adding the + * missing API to the strbuf module is the way to go. + * + * XXX: do _not_ assume that the area that is yours is of size ->alloc - 1 + * even if it's true in the current implementation. Alloc is somehow a + * "private" member that should not be messed with. + */ + +#include + +extern char strbuf_slopbuf[]; +struct strbuf { + size_t alloc; + size_t len; + char *buf; +}; + +#define STRBUF_INIT { 0, 0, strbuf_slopbuf } + +/*----- strbuf life cycle -----*/ +extern void strbuf_init(struct strbuf *, size_t); +extern void strbuf_release(struct strbuf *); +extern char *strbuf_detach(struct strbuf *, size_t *); +extern void strbuf_attach(struct strbuf *, void *, size_t, size_t); +static inline void strbuf_swap(struct strbuf *a, struct strbuf *b) { + struct strbuf tmp = *a; + *a = *b; + *b = tmp; +} + +/*----- strbuf size related -----*/ +static inline size_t strbuf_avail(const struct strbuf *sb) { + return sb->alloc ? sb->alloc - sb->len - 1 : 0; +} + +extern void strbuf_grow(struct strbuf *, size_t); + +static inline void strbuf_setlen(struct strbuf *sb, size_t len) { + if (!sb->alloc) + strbuf_grow(sb, 0); + assert(len < sb->alloc); + sb->len = len; + sb->buf[len] = '\0'; +} +#define strbuf_reset(sb) strbuf_setlen(sb, 0) + +/*----- content related -----*/ +extern void strbuf_trim(struct strbuf *); +extern void strbuf_rtrim(struct strbuf *); +extern void strbuf_ltrim(struct strbuf *); +extern int strbuf_cmp(const struct strbuf *, const struct strbuf *); +extern void strbuf_tolower(struct strbuf *); + +extern struct strbuf **strbuf_split(const struct strbuf *, int delim); +extern void strbuf_list_free(struct strbuf **); + +/*----- add data in your buffer -----*/ +static inline void strbuf_addch(struct strbuf *sb, int c) { + strbuf_grow(sb, 1); + sb->buf[sb->len++] = c; + sb->buf[sb->len] = '\0'; +} + +extern void strbuf_insert(struct strbuf *, size_t pos, const void *, size_t); +extern void strbuf_remove(struct strbuf *, size_t pos, size_t len); + +/* splice pos..pos+len with given data */ +extern void strbuf_splice(struct strbuf *, size_t pos, size_t len, + const void *, size_t); + +extern void strbuf_add(struct strbuf *, const void *, size_t); +static inline void strbuf_addstr(struct strbuf *sb, const char *s) { + strbuf_add(sb, s, strlen(s)); +} +static inline void strbuf_addbuf(struct strbuf *sb, const struct strbuf *sb2) { + strbuf_add(sb, sb2->buf, sb2->len); +} +extern void strbuf_adddup(struct strbuf *sb, size_t pos, size_t len); + +typedef size_t (*expand_fn_t) (struct strbuf *sb, const char *placeholder, void *context); +extern void strbuf_expand(struct strbuf *sb, const char *format, expand_fn_t fn, void *context); +struct strbuf_expand_dict_entry { + const char *placeholder; + const char *value; +}; +extern size_t strbuf_expand_dict_cb(struct strbuf *sb, const char *placeholder, void *context); + +__attribute__((format(printf,2,3))) +extern void strbuf_addf(struct strbuf *sb, const char *fmt, ...); + +extern size_t strbuf_fread(struct strbuf *, size_t, FILE *); +/* XXX: if read fails, any partial read is undone */ +extern ssize_t strbuf_read(struct strbuf *, int fd, size_t hint); +extern int strbuf_read_file(struct strbuf *sb, const char *path, size_t hint); +extern int strbuf_readlink(struct strbuf *sb, const char *path, size_t hint); + +extern int strbuf_getline(struct strbuf *, FILE *, int); + +extern void stripspace(struct strbuf *buf, int skip_comments); +extern int launch_editor(const char *path, struct strbuf *buffer, const char *const *env); + +extern int strbuf_branchname(struct strbuf *sb, const char *name); +extern int strbuf_check_branch_ref(struct strbuf *sb, const char *name); + +#endif /* STRBUF_H */ diff --git a/Documentation/perf_counter/usage.c b/Documentation/perf_counter/usage.c new file mode 100644 index 00000000000..7a10421fe6b --- /dev/null +++ b/Documentation/perf_counter/usage.c @@ -0,0 +1,80 @@ +/* + * GIT - The information manager from hell + * + * Copyright (C) Linus Torvalds, 2005 + */ +#include "util.h" + +static void report(const char *prefix, const char *err, va_list params) +{ + char msg[1024]; + vsnprintf(msg, sizeof(msg), err, params); + fprintf(stderr, "%s%s\n", prefix, msg); +} + +static NORETURN void usage_builtin(const char *err) +{ + fprintf(stderr, "usage: %s\n", err); + exit(129); +} + +static NORETURN void die_builtin(const char *err, va_list params) +{ + report("fatal: ", err, params); + exit(128); +} + +static void error_builtin(const char *err, va_list params) +{ + report("error: ", err, params); +} + +static void warn_builtin(const char *warn, va_list params) +{ + report("warning: ", warn, params); +} + +/* If we are in a dlopen()ed .so write to a global variable would segfault + * (ugh), so keep things static. */ +static void (*usage_routine)(const char *err) NORETURN = usage_builtin; +static void (*die_routine)(const char *err, va_list params) NORETURN = die_builtin; +static void (*error_routine)(const char *err, va_list params) = error_builtin; +static void (*warn_routine)(const char *err, va_list params) = warn_builtin; + +void set_die_routine(void (*routine)(const char *err, va_list params) NORETURN) +{ + die_routine = routine; +} + +void usage(const char *err) +{ + usage_routine(err); +} + +void die(const char *err, ...) +{ + va_list params; + + va_start(params, err); + die_routine(err, params); + va_end(params); +} + +int error(const char *err, ...) +{ + va_list params; + + va_start(params, err); + error_routine(err, params); + va_end(params); + return -1; +} + +void warning(const char *warn, ...) +{ + va_list params; + + va_start(params, warn); + warn_routine(warn, params); + va_end(params); +} diff --git a/Documentation/perf_counter/util.h b/Documentation/perf_counter/util.h new file mode 100644 index 00000000000..13f8bdce760 --- /dev/null +++ b/Documentation/perf_counter/util.h @@ -0,0 +1,394 @@ +#ifndef GIT_COMPAT_UTIL_H +#define GIT_COMPAT_UTIL_H + +#define _FILE_OFFSET_BITS 64 + +#ifndef FLEX_ARRAY +/* + * See if our compiler is known to support flexible array members. + */ +#if defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) +# define FLEX_ARRAY /* empty */ +#elif defined(__GNUC__) +# if (__GNUC__ >= 3) +# define FLEX_ARRAY /* empty */ +# else +# define FLEX_ARRAY 0 /* older GNU extension */ +# endif +#endif + +/* + * Otherwise, default to safer but a bit wasteful traditional style + */ +#ifndef FLEX_ARRAY +# define FLEX_ARRAY 1 +#endif +#endif + +#define ARRAY_SIZE(x) (sizeof(x)/sizeof(x[0])) + +#ifdef __GNUC__ +#define TYPEOF(x) (__typeof__(x)) +#else +#define TYPEOF(x) +#endif + +#define MSB(x, bits) ((x) & TYPEOF(x)(~0ULL << (sizeof(x) * 8 - (bits)))) +#define HAS_MULTI_BITS(i) ((i) & ((i) - 1)) /* checks if an integer has more than 1 bit set */ + +/* Approximation of the length of the decimal representation of this type. */ +#define decimal_length(x) ((int)(sizeof(x) * 2.56 + 0.5) + 1) + +#if !defined(__APPLE__) && !defined(__FreeBSD__) && !defined(__USLC__) && !defined(_M_UNIX) +#define _XOPEN_SOURCE 600 /* glibc2 and AIX 5.3L need 500, OpenBSD needs 600 for S_ISLNK() */ +#define _XOPEN_SOURCE_EXTENDED 1 /* AIX 5.3L needs this */ +#endif +#define _ALL_SOURCE 1 +#define _GNU_SOURCE 1 +#define _BSD_SOURCE 1 + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#ifndef __MINGW32__ +#include +#include +#include +#include +#ifndef NO_SYS_SELECT_H +#include +#endif +#include +#include +#include +#include +#include +#include +#if defined(__CYGWIN__) +#undef _XOPEN_SOURCE +#include +#define _XOPEN_SOURCE 600 +#include "compat/cygwin.h" +#else +#undef _ALL_SOURCE /* AIX 5.3L defines a struct list with _ALL_SOURCE. */ +#include +#define _ALL_SOURCE 1 +#endif +#else /* __MINGW32__ */ +/* pull in Windows compatibility stuff */ +#include "compat/mingw.h" +#endif /* __MINGW32__ */ + +#ifndef NO_ICONV +#include +#endif + +#ifndef NO_OPENSSL +#include +#include +#endif + +/* On most systems would have given us this, but + * not on some systems (e.g. GNU/Hurd). + */ +#ifndef PATH_MAX +#define PATH_MAX 4096 +#endif + +#ifndef PRIuMAX +#define PRIuMAX "llu" +#endif + +#ifndef PRIu32 +#define PRIu32 "u" +#endif + +#ifndef PRIx32 +#define PRIx32 "x" +#endif + +#ifndef PATH_SEP +#define PATH_SEP ':' +#endif + +#ifndef STRIP_EXTENSION +#define STRIP_EXTENSION "" +#endif + +#ifndef has_dos_drive_prefix +#define has_dos_drive_prefix(path) 0 +#endif + +#ifndef is_dir_sep +#define is_dir_sep(c) ((c) == '/') +#endif + +#ifdef __GNUC__ +#define NORETURN __attribute__((__noreturn__)) +#else +#define NORETURN +#ifndef __attribute__ +#define __attribute__(x) +#endif +#endif + +/* General helper functions */ +extern void usage(const char *err) NORETURN; +extern void die(const char *err, ...) NORETURN __attribute__((format (printf, 1, 2))); +extern int error(const char *err, ...) __attribute__((format (printf, 1, 2))); +extern void warning(const char *err, ...) __attribute__((format (printf, 1, 2))); + +extern void set_die_routine(void (*routine)(const char *err, va_list params) NORETURN); + +extern int prefixcmp(const char *str, const char *prefix); +extern time_t tm_to_time_t(const struct tm *tm); + +static inline const char *skip_prefix(const char *str, const char *prefix) +{ + size_t len = strlen(prefix); + return strncmp(str, prefix, len) ? NULL : str + len; +} + +#if defined(NO_MMAP) || defined(USE_WIN32_MMAP) + +#ifndef PROT_READ +#define PROT_READ 1 +#define PROT_WRITE 2 +#define MAP_PRIVATE 1 +#define MAP_FAILED ((void*)-1) +#endif + +#define mmap git_mmap +#define munmap git_munmap +extern void *git_mmap(void *start, size_t length, int prot, int flags, int fd, off_t offset); +extern int git_munmap(void *start, size_t length); + +#else /* NO_MMAP || USE_WIN32_MMAP */ + +#include + +#endif /* NO_MMAP || USE_WIN32_MMAP */ + +#ifdef NO_MMAP + +/* This value must be multiple of (pagesize * 2) */ +#define DEFAULT_PACKED_GIT_WINDOW_SIZE (1 * 1024 * 1024) + +#else /* NO_MMAP */ + +/* This value must be multiple of (pagesize * 2) */ +#define DEFAULT_PACKED_GIT_WINDOW_SIZE \ + (sizeof(void*) >= 8 \ + ? 1 * 1024 * 1024 * 1024 \ + : 32 * 1024 * 1024) + +#endif /* NO_MMAP */ + +#ifdef NO_ST_BLOCKS_IN_STRUCT_STAT +#define on_disk_bytes(st) ((st).st_size) +#else +#define on_disk_bytes(st) ((st).st_blocks * 512) +#endif + +#define DEFAULT_PACKED_GIT_LIMIT \ + ((1024L * 1024L) * (sizeof(void*) >= 8 ? 8192 : 256)) + +#ifdef NO_PREAD +#define pread git_pread +extern ssize_t git_pread(int fd, void *buf, size_t count, off_t offset); +#endif +/* + * Forward decl that will remind us if its twin in cache.h changes. + * This function is used in compat/pread.c. But we can't include + * cache.h there. + */ +extern ssize_t read_in_full(int fd, void *buf, size_t count); + +#ifdef NO_SETENV +#define setenv gitsetenv +extern int gitsetenv(const char *, const char *, int); +#endif + +#ifdef NO_MKDTEMP +#define mkdtemp gitmkdtemp +extern char *gitmkdtemp(char *); +#endif + +#ifdef NO_UNSETENV +#define unsetenv gitunsetenv +extern void gitunsetenv(const char *); +#endif + +#ifdef NO_STRCASESTR +#define strcasestr gitstrcasestr +extern char *gitstrcasestr(const char *haystack, const char *needle); +#endif + +#ifdef NO_STRLCPY +#define strlcpy gitstrlcpy +extern size_t gitstrlcpy(char *, const char *, size_t); +#endif + +#ifdef NO_STRTOUMAX +#define strtoumax gitstrtoumax +extern uintmax_t gitstrtoumax(const char *, char **, int); +#endif + +#ifdef NO_HSTRERROR +#define hstrerror githstrerror +extern const char *githstrerror(int herror); +#endif + +#ifdef NO_MEMMEM +#define memmem gitmemmem +void *gitmemmem(const void *haystack, size_t haystacklen, + const void *needle, size_t needlelen); +#endif + +#ifdef FREAD_READS_DIRECTORIES +#ifdef fopen +#undef fopen +#endif +#define fopen(a,b) git_fopen(a,b) +extern FILE *git_fopen(const char*, const char*); +#endif + +#ifdef SNPRINTF_RETURNS_BOGUS +#define snprintf git_snprintf +extern int git_snprintf(char *str, size_t maxsize, + const char *format, ...); +#define vsnprintf git_vsnprintf +extern int git_vsnprintf(char *str, size_t maxsize, + const char *format, va_list ap); +#endif + +#ifdef __GLIBC_PREREQ +#if __GLIBC_PREREQ(2, 1) +#define HAVE_STRCHRNUL +#endif +#endif + +#ifndef HAVE_STRCHRNUL +#define strchrnul gitstrchrnul +static inline char *gitstrchrnul(const char *s, int c) +{ + while (*s && *s != c) + s++; + return (char *)s; +} +#endif + +static inline size_t xsize_t(off_t len) +{ + return (size_t)len; +} + +static inline int has_extension(const char *filename, const char *ext) +{ + size_t len = strlen(filename); + size_t extlen = strlen(ext); + return len > extlen && !memcmp(filename + len - extlen, ext, extlen); +} + +/* Sane ctype - no locale, and works with signed chars */ +#undef isascii +#undef isspace +#undef isdigit +#undef isalpha +#undef isalnum +#undef tolower +#undef toupper +extern unsigned char sane_ctype[256]; +#define GIT_SPACE 0x01 +#define GIT_DIGIT 0x02 +#define GIT_ALPHA 0x04 +#define GIT_GLOB_SPECIAL 0x08 +#define GIT_REGEX_SPECIAL 0x10 +#define sane_istest(x,mask) ((sane_ctype[(unsigned char)(x)] & (mask)) != 0) +#define isascii(x) (((x) & ~0x7f) == 0) +#define isspace(x) sane_istest(x,GIT_SPACE) +#define isdigit(x) sane_istest(x,GIT_DIGIT) +#define isalpha(x) sane_istest(x,GIT_ALPHA) +#define isalnum(x) sane_istest(x,GIT_ALPHA | GIT_DIGIT) +#define is_glob_special(x) sane_istest(x,GIT_GLOB_SPECIAL) +#define is_regex_special(x) sane_istest(x,GIT_GLOB_SPECIAL | GIT_REGEX_SPECIAL) +#define tolower(x) sane_case((unsigned char)(x), 0x20) +#define toupper(x) sane_case((unsigned char)(x), 0) + +static inline int sane_case(int x, int high) +{ + if (sane_istest(x, GIT_ALPHA)) + x = (x & ~0x20) | high; + return x; +} + +static inline int strtoul_ui(char const *s, int base, unsigned int *result) +{ + unsigned long ul; + char *p; + + errno = 0; + ul = strtoul(s, &p, base); + if (errno || *p || p == s || (unsigned int) ul != ul) + return -1; + *result = ul; + return 0; +} + +static inline int strtol_i(char const *s, int base, int *result) +{ + long ul; + char *p; + + errno = 0; + ul = strtol(s, &p, base); + if (errno || *p || p == s || (int) ul != ul) + return -1; + *result = ul; + return 0; +} + +#ifdef INTERNAL_QSORT +void git_qsort(void *base, size_t nmemb, size_t size, + int(*compar)(const void *, const void *)); +#define qsort git_qsort +#endif + +#ifndef DIR_HAS_BSD_GROUP_SEMANTICS +# define FORCE_DIR_SET_GID S_ISGID +#else +# define FORCE_DIR_SET_GID 0 +#endif + +#ifdef NO_NSEC +#undef USE_NSEC +#define ST_CTIME_NSEC(st) 0 +#define ST_MTIME_NSEC(st) 0 +#else +#ifdef USE_ST_TIMESPEC +#define ST_CTIME_NSEC(st) ((unsigned int)((st).st_ctimespec.tv_nsec)) +#define ST_MTIME_NSEC(st) ((unsigned int)((st).st_mtimespec.tv_nsec)) +#else +#define ST_CTIME_NSEC(st) ((unsigned int)((st).st_ctim.tv_nsec)) +#define ST_MTIME_NSEC(st) ((unsigned int)((st).st_mtim.tv_nsec)) +#endif +#endif + +#endif diff --git a/Documentation/perf_counter/wrapper.c b/Documentation/perf_counter/wrapper.c new file mode 100644 index 00000000000..6350d65f6d9 --- /dev/null +++ b/Documentation/perf_counter/wrapper.c @@ -0,0 +1,206 @@ +/* + * Various trivial helper wrappers around standard functions + */ +#include "cache.h" + +/* + * There's no pack memory to release - but stay close to the Git + * version so wrap this away: + */ +static inline void release_pack_memory(size_t size, int flag) +{ +} + +char *xstrdup(const char *str) +{ + char *ret = strdup(str); + if (!ret) { + release_pack_memory(strlen(str) + 1, -1); + ret = strdup(str); + if (!ret) + die("Out of memory, strdup failed"); + } + return ret; +} + +void *xmalloc(size_t size) +{ + void *ret = malloc(size); + if (!ret && !size) + ret = malloc(1); + if (!ret) { + release_pack_memory(size, -1); + ret = malloc(size); + if (!ret && !size) + ret = malloc(1); + if (!ret) + die("Out of memory, malloc failed"); + } +#ifdef XMALLOC_POISON + memset(ret, 0xA5, size); +#endif + return ret; +} + +/* + * xmemdupz() allocates (len + 1) bytes of memory, duplicates "len" bytes of + * "data" to the allocated memory, zero terminates the allocated memory, + * and returns a pointer to the allocated memory. If the allocation fails, + * the program dies. + */ +void *xmemdupz(const void *data, size_t len) +{ + char *p = xmalloc(len + 1); + memcpy(p, data, len); + p[len] = '\0'; + return p; +} + +char *xstrndup(const char *str, size_t len) +{ + char *p = memchr(str, '\0', len); + return xmemdupz(str, p ? p - str : len); +} + +void *xrealloc(void *ptr, size_t size) +{ + void *ret = realloc(ptr, size); + if (!ret && !size) + ret = realloc(ptr, 1); + if (!ret) { + release_pack_memory(size, -1); + ret = realloc(ptr, size); + if (!ret && !size) + ret = realloc(ptr, 1); + if (!ret) + die("Out of memory, realloc failed"); + } + return ret; +} + +void *xcalloc(size_t nmemb, size_t size) +{ + void *ret = calloc(nmemb, size); + if (!ret && (!nmemb || !size)) + ret = calloc(1, 1); + if (!ret) { + release_pack_memory(nmemb * size, -1); + ret = calloc(nmemb, size); + if (!ret && (!nmemb || !size)) + ret = calloc(1, 1); + if (!ret) + die("Out of memory, calloc failed"); + } + return ret; +} + +void *xmmap(void *start, size_t length, + int prot, int flags, int fd, off_t offset) +{ + void *ret = mmap(start, length, prot, flags, fd, offset); + if (ret == MAP_FAILED) { + if (!length) + return NULL; + release_pack_memory(length, fd); + ret = mmap(start, length, prot, flags, fd, offset); + if (ret == MAP_FAILED) + die("Out of memory? mmap failed: %s", strerror(errno)); + } + return ret; +} + +/* + * xread() is the same a read(), but it automatically restarts read() + * operations with a recoverable error (EAGAIN and EINTR). xread() + * DOES NOT GUARANTEE that "len" bytes is read even if the data is available. + */ +ssize_t xread(int fd, void *buf, size_t len) +{ + ssize_t nr; + while (1) { + nr = read(fd, buf, len); + if ((nr < 0) && (errno == EAGAIN || errno == EINTR)) + continue; + return nr; + } +} + +/* + * xwrite() is the same a write(), but it automatically restarts write() + * operations with a recoverable error (EAGAIN and EINTR). xwrite() DOES NOT + * GUARANTEE that "len" bytes is written even if the operation is successful. + */ +ssize_t xwrite(int fd, const void *buf, size_t len) +{ + ssize_t nr; + while (1) { + nr = write(fd, buf, len); + if ((nr < 0) && (errno == EAGAIN || errno == EINTR)) + continue; + return nr; + } +} + +ssize_t read_in_full(int fd, void *buf, size_t count) +{ + char *p = buf; + ssize_t total = 0; + + while (count > 0) { + ssize_t loaded = xread(fd, p, count); + if (loaded <= 0) + return total ? total : loaded; + count -= loaded; + p += loaded; + total += loaded; + } + + return total; +} + +ssize_t write_in_full(int fd, const void *buf, size_t count) +{ + const char *p = buf; + ssize_t total = 0; + + while (count > 0) { + ssize_t written = xwrite(fd, p, count); + if (written < 0) + return -1; + if (!written) { + errno = ENOSPC; + return -1; + } + count -= written; + p += written; + total += written; + } + + return total; +} + +int xdup(int fd) +{ + int ret = dup(fd); + if (ret < 0) + die("dup failed: %s", strerror(errno)); + return ret; +} + +FILE *xfdopen(int fd, const char *mode) +{ + FILE *stream = fdopen(fd, mode); + if (stream == NULL) + die("Out of memory? fdopen failed: %s", strerror(errno)); + return stream; +} + +int xmkstemp(char *template) +{ + int fd; + + fd = mkstemp(template); + if (fd < 0) + die("Unable to create temporary file: %s", strerror(errno)); + return fd; +} -- cgit v1.2.3 From 6f06ccbc86f8a02aa32271263249657ce484eb25 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Mon, 20 Apr 2009 15:22:22 +0200 Subject: perf_counter tools: clean up after introduction of the Git command framework Signed-off-by: Ingo Molnar --- Documentation/perf_counter/Makefile | 207 +----------------------------- Documentation/perf_counter/builtin-help.c | 2 - Documentation/perf_counter/builtin-top.c | 17 +-- Documentation/perf_counter/cache.h | 20 +++ Documentation/perf_counter/config.c | 95 +------------- Documentation/perf_counter/path.c | 39 ------ Documentation/perf_counter/perf.c | 3 +- Documentation/perf_counter/util.h | 14 ++ 8 files changed, 45 insertions(+), 352 deletions(-) diff --git a/Documentation/perf_counter/Makefile b/Documentation/perf_counter/Makefile index 11809b943fc..1b602655554 100644 --- a/Documentation/perf_counter/Makefile +++ b/Documentation/perf_counter/Makefile @@ -138,16 +138,6 @@ all:: # # Define NO_PERL if you do not want Perl scripts or libraries at all. # -# Define NO_TCLTK if you do not want Tcl/Tk GUI. -# -# The TCL_PATH variable governs the location of the Tcl interpreter -# used to optimize perf-gui for your system. Only used if NO_TCLTK -# is not set. Defaults to the bare 'tclsh'. -# -# The TCLTK_PATH variable governs the location of the Tcl/Tk interpreter. -# If not set it defaults to the bare 'wish'. If it is set to the empty -# string then NO_TCLTK will be forced (this is used by configure script). -# # Define INTERNAL_QSORT to use Git's implementation of qsort(), which # is a simplified version of the merge sort used in glibc. This is # recommended if Git triggers O(n^2) behavior in your platform's qsort(). @@ -215,12 +205,8 @@ TAR = tar FIND = find INSTALL = install RPMBUILD = rpmbuild -TCL_PATH = tclsh -TCLTK_PATH = wish PTHREAD_LIBS = -lpthread -export TCL_PATH TCLTK_PATH - # sparse is architecture-neutral, which means that we need to tell it # explicitly what architecture to check for. Fix this up for yours.. SPARSE_FLAGS = -D__BIG_ENDIAN__ -D__powerpc__ @@ -529,10 +515,6 @@ ifdef NO_EXTERNAL_GREP BASIC_CFLAGS += -DNO_EXTERNAL_GREP endif -ifeq ($(TCLTK_PATH),) -NO_TCLTK=NoThanks -endif - ifeq ($(PERL_PATH),) NO_PERL=NoThanks endif @@ -583,7 +565,6 @@ prefix_SQ = $(subst ','\'',$(prefix)) SHELL_PATH_SQ = $(subst ','\'',$(SHELL_PATH)) PERL_PATH_SQ = $(subst ','\'',$(PERL_PATH)) -TCLTK_PATH_SQ = $(subst ','\'',$(TCLTK_PATH)) LIBS = $(PERFLIBS) $(EXTLIBS) @@ -607,14 +588,6 @@ ifneq (,$X) endif all:: -ifndef NO_TCLTK - $(QUIET_SUBDIR0)perf-gui $(QUIET_SUBDIR1) perfexecdir='$(perfexec_instdir_SQ)' all - $(QUIET_SUBDIR0)perfk-perf $(QUIET_SUBDIR1) all -endif -ifndef NO_PERL - $(QUIET_SUBDIR0)perl $(QUIET_SUBDIR1) PERL_PATH='$(PERL_PATH_SQ)' prefix='$(prefix_SQ)' all -endif - $(QUIET_SUBDIR0)templates $(QUIET_SUBDIR1) please_set_SHELL_PATH_to_a_more_modern_shell: @$$(:) @@ -704,21 +677,6 @@ builtin-revert.o wt-status.o: wt-status.h $(LIB_FILE): $(LIB_OBJS) $(QUIET_AR)$(RM) $@ && $(AR) rcs $@ $(LIB_OBJS) -doc: - $(MAKE) -C Documentation all - -man: - $(MAKE) -C Documentation man - -html: - $(MAKE) -C Documentation html - -info: - $(MAKE) -C Documentation info - -pdf: - $(MAKE) -C Documentation pdf - TAGS: $(RM) TAGS $(FIND) . -name '*.[hcS]' -print | xargs etags -a @@ -751,33 +709,12 @@ PERF-BUILD-OPTIONS: .FORCE-PERF-BUILD-OPTIONS @echo NO_CURL=\''$(subst ','\'',$(subst ','\'',$(NO_CURL)))'\' >>$@ @echo NO_PERL=\''$(subst ','\'',$(subst ','\'',$(NO_PERL)))'\' >>$@ -### Detect Tck/Tk interpreter path changes -ifndef NO_TCLTK -TRACK_VARS = $(subst ','\'',-DTCLTK_PATH='$(TCLTK_PATH_SQ)') - -PERF-GUI-VARS: .FORCE-PERF-GUI-VARS - @VARS='$(TRACK_VARS)'; \ - if test x"$$VARS" != x"`cat $@ 2>/dev/null`" ; then \ - echo 1>&2 " * new Tcl/Tk interpreter location"; \ - echo "$$VARS" >$@; \ - fi - -.PHONY: .FORCE-PERF-GUI-VARS -endif - ### Testing rules -TEST_PROGRAMS += test-chmtime$X -TEST_PROGRAMS += test-ctype$X -TEST_PROGRAMS += test-date$X -TEST_PROGRAMS += test-delta$X -TEST_PROGRAMS += test-dump-cache-tree$X -TEST_PROGRAMS += test-genrandom$X -TEST_PROGRAMS += test-match-trees$X -TEST_PROGRAMS += test-parse-options$X -TEST_PROGRAMS += test-path-utils$X -TEST_PROGRAMS += test-sha1$X -TEST_PROGRAMS += test-sigchain$X +# +# None right now: +# +# TEST_PROGRAMS += test-something$X all:: $(TEST_PROGRAMS) @@ -787,25 +724,6 @@ all:: $(TEST_PROGRAMS) export NO_SVN_TESTS -test: all - $(MAKE) -C t/ all - -test-ctype$X: ctype.o - -test-date$X: date.o ctype.o - -test-delta$X: diff-delta.o patch-delta.o - -test-parse-options$X: parse-options.o - -.PRECIOUS: $(patsubst test-%$X,test-%.o,$(TEST_PROGRAMS)) - -test-%$X: test-%.o $(PERFLIBS) - $(QUIET_LINK)$(CC) $(ALL_CFLAGS) -o $@ $(ALL_LDFLAGS) $(filter %.o,$^) $(LIBS) - -check-sha1:: test-sha1$X - ./test-sha1.sh - check: common-cmds.h if sparse; \ then \ @@ -845,10 +763,6 @@ install: all $(INSTALL) perf$X perf-upload-pack$X perf-receive-pack$X perf-upload-archive$X perf-shell$X perf-cvsserver '$(DESTDIR_SQ)$(bindir_SQ)' $(MAKE) -C templates DESTDIR='$(DESTDIR_SQ)' install $(MAKE) -C perl prefix='$(prefix_SQ)' DESTDIR='$(DESTDIR_SQ)' install -ifndef NO_TCLTK - $(MAKE) -C perfk-perf install - $(MAKE) -C perf-gui perfexecdir='$(perfexec_instdir_SQ)' install -endif ifneq (,$X) $(foreach p,$(patsubst %$X,%,$(filter %$X,$(ALL_PROGRAMS) $(BUILT_INS) perf$X)), $(RM) '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/$p';) endif @@ -865,32 +779,6 @@ endif done } && \ ./check_bindir "z$$bindir" "z$$execdir" "$$bindir/perf-add$X" -install-doc: - $(MAKE) -C Documentation install - -install-man: - $(MAKE) -C Documentation install-man - -install-html: - $(MAKE) -C Documentation install-html - -install-info: - $(MAKE) -C Documentation install-info - -install-pdf: - $(MAKE) -C Documentation install-pdf - -quick-install-doc: - $(MAKE) -C Documentation quick-install - -quick-install-man: - $(MAKE) -C Documentation quick-install-man - -quick-install-html: - $(MAKE) -C Documentation quick-install-html - - - ### Maintainer's dist rules perf.spec: perf.spec.in @@ -904,38 +792,16 @@ dist: perf.spec perf-archive$(X) configure @mkdir -p $(PERF_TARNAME) @cp perf.spec configure $(PERF_TARNAME) @echo $(PERF_VERSION) > $(PERF_TARNAME)/version - @$(MAKE) -C perf-gui TARDIR=../$(PERF_TARNAME)/perf-gui dist-version $(TAR) rf $(PERF_TARNAME).tar \ $(PERF_TARNAME)/perf.spec \ $(PERF_TARNAME)/configure \ - $(PERF_TARNAME)/version \ - $(PERF_TARNAME)/perf-gui/version + $(PERF_TARNAME)/version @$(RM) -r $(PERF_TARNAME) gzip -f -9 $(PERF_TARNAME).tar rpm: dist $(RPMBUILD) -ta $(PERF_TARNAME).tar.gz -htmldocs = perf-htmldocs-$(PERF_VERSION) -manpages = perf-manpages-$(PERF_VERSION) -dist-doc: - $(RM) -r .doc-tmp-dir - mkdir .doc-tmp-dir - $(MAKE) -C Documentation WEBDOC_DEST=../.doc-tmp-dir install-webdoc - cd .doc-tmp-dir && $(TAR) cf ../$(htmldocs).tar . - gzip -n -9 -f $(htmldocs).tar - : - $(RM) -r .doc-tmp-dir - mkdir -p .doc-tmp-dir/man1 .doc-tmp-dir/man5 .doc-tmp-dir/man7 - $(MAKE) -C Documentation DESTDIR=./ \ - man1dir=../.doc-tmp-dir/man1 \ - man5dir=../.doc-tmp-dir/man5 \ - man7dir=../.doc-tmp-dir/man7 \ - install - cd .doc-tmp-dir && $(TAR) cf ../$(manpages).tar . - gzip -n -9 -f $(manpages).tar - $(RM) -r .doc-tmp-dir - ### Cleaning rules distclean: clean @@ -951,74 +817,13 @@ clean: $(RM) -r $(PERF_TARNAME) .doc-tmp-dir $(RM) $(PERF_TARNAME).tar.gz perf-core_$(PERF_VERSION)-*.tar.gz $(RM) $(htmldocs).tar.gz $(manpages).tar.gz - $(MAKE) -C Documentation/ clean - $(MAKE) -C templates/ clean - $(MAKE) -C t/ clean -ifndef NO_TCLTK - $(MAKE) -C perfk-perf clean - $(MAKE) -C perf-gui clean -endif - $(RM) PERF-VERSION-FILE PERF-CFLAGS PERF-GUI-VARS PERF-BUILD-OPTIONS + $(RM) PERF-VERSION-FILE PERF-CFLAGS PERF-BUILD-OPTIONS .PHONY: all install clean strip .PHONY: shell_compatibility_test please_set_SHELL_PATH_to_a_more_modern_shell .PHONY: .FORCE-PERF-VERSION-FILE TAGS tags cscope .FORCE-PERF-CFLAGS .PHONY: .FORCE-PERF-BUILD-OPTIONS -### Check documentation -# -check-docs:: - @(for v in $(ALL_PROGRAMS) $(BUILT_INS) perf perfk; \ - do \ - case "$$v" in \ - perf-merge-octopus | perf-merge-ours | perf-merge-recursive | \ - perf-merge-resolve | perf-merge-subtree | \ - perf-fsck-objects | perf-init-db | \ - perf-?*--?* ) continue ;; \ - esac ; \ - test -f "Documentation/$$v.txt" || \ - echo "no doc: $$v"; \ - sed -e '/^#/d' command-list.txt | \ - grep -q "^$$v[ ]" || \ - case "$$v" in \ - perf) ;; \ - *) echo "no link: $$v";; \ - esac ; \ - done; \ - ( \ - sed -e '/^#/d' \ - -e 's/[ ].*//' \ - -e 's/^/listed /' command-list.txt; \ - ls -1 Documentation/perf*txt | \ - sed -e 's|Documentation/|documented |' \ - -e 's/\.txt//'; \ - ) | while read how cmd; \ - do \ - case "$$how,$$cmd" in \ - *,perf-citool | \ - *,perf-gui | \ - *,perf-help | \ - documented,perfattributes | \ - documented,perfignore | \ - documented,perfmodules | \ - documented,perfcli | \ - documented,perf-tools | \ - documented,perfcore-tutorial | \ - documented,perfcvs-migration | \ - documented,perfdiffcore | \ - documented,perfglossary | \ - documented,perfhooks | \ - documented,perfrepository-layout | \ - documented,perftutorial | \ - documented,perftutorial-2 | \ - sentinel,not,matching,is,ok ) continue ;; \ - esac; \ - case " $(ALL_PROGRAMS) $(BUILT_INS) perf perfk " in \ - *" $$cmd "*) ;; \ - *) echo "removed but $$how: $$cmd" ;; \ - esac; \ - done ) | sort - ### Make sure built-ins do not have dups and listed in perf.c # check-builtins:: diff --git a/Documentation/perf_counter/builtin-help.c b/Documentation/perf_counter/builtin-help.c index 125fcc2f490..a136d619db3 100644 --- a/Documentation/perf_counter/builtin-help.c +++ b/Documentation/perf_counter/builtin-help.c @@ -417,11 +417,9 @@ static void show_html_page(const char *perf_cmd) int cmd_help(int argc, const char **argv, const char *prefix) { - int nonperf; const char *alias; load_command_list("perf-", &main_cmds, &other_cmds); - /* setup_perf_directory_gently(&nonperf); */ perf_config(perf_help_config, NULL); argc = parse_options(argc, argv, builtin_help_options, diff --git a/Documentation/perf_counter/builtin-top.c b/Documentation/perf_counter/builtin-top.c index 9d2c769e5f8..601bddbc30d 100644 --- a/Documentation/perf_counter/builtin-top.c +++ b/Documentation/perf_counter/builtin-top.c @@ -63,15 +63,6 @@ #include "util.h" -#define _GNU_SOURCE -#include -#include -#include -#include -#include -#include -#include -#include #include #include #include @@ -103,8 +94,6 @@ #define PR_TASK_PERF_COUNTERS_DISABLE 31 #define PR_TASK_PERF_COUNTERS_ENABLE 32 -#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0])) - #define rdclock() \ ({ \ struct timespec ts; \ @@ -1077,7 +1066,7 @@ static void process_event(uint64_t ip, int counter) record_ip(ip, counter); } -static void process_options(int argc, char *argv[]) +static void process_options(int argc, char **argv) { int error = 0, counter; @@ -1255,7 +1244,7 @@ static void mmap_read(struct mmap_data *md) event_t event_copy; - unsigned int size = event->header.size; + size_t size = event->header.size; /* * Event straddles the mmap boundary -- header should always @@ -1301,7 +1290,7 @@ static void mmap_read(struct mmap_data *md) md->prev = old; } -int cmd_top(int argc, const char **argv, const char *prefix) +int cmd_top(int argc, char **argv, const char *prefix) { struct pollfd event_array[MAX_NR_CPUS * MAX_COUNTERS]; struct mmap_data mmap_array[MAX_NR_CPUS][MAX_COUNTERS]; diff --git a/Documentation/perf_counter/cache.h b/Documentation/perf_counter/cache.h index dc085640a57..71080512fa8 100644 --- a/Documentation/perf_counter/cache.h +++ b/Documentation/perf_counter/cache.h @@ -94,4 +94,24 @@ static inline int is_absolute_path(const char *path) { return path[0] == '/'; } + +const char *make_absolute_path(const char *path); +const char *make_nonrelative_path(const char *path); +const char *make_relative_path(const char *abs, const char *base); +int normalize_path_copy(char *dst, const char *src); +int longest_ancestor_length(const char *path, const char *prefix_list); +char *strip_path_suffix(const char *path, const char *suffix); + +extern char *mkpath(const char *fmt, ...) __attribute__((format (printf, 1, 2))); +extern char *perf_path(const char *fmt, ...) __attribute__((format (printf, 1, 2))); + +extern char *mksnpath(char *buf, size_t n, const char *fmt, ...) + __attribute__((format (printf, 3, 4))); +extern char *perf_snpath(char *buf, size_t n, const char *fmt, ...) + __attribute__((format (printf, 3, 4))); +extern char *perf_pathdup(const char *fmt, ...) + __attribute__((format (printf, 1, 2))); + +extern size_t strlcpy(char *dest, const char *src, size_t size); + #endif /* CACHE_H */ diff --git a/Documentation/perf_counter/config.c b/Documentation/perf_counter/config.c index 672d5395933..3dd13faa6a2 100644 --- a/Documentation/perf_counter/config.c +++ b/Documentation/perf_counter/config.c @@ -15,7 +15,6 @@ static FILE *config_file; static const char *config_file_name; static int config_linenr; static int config_file_eof; -static int zlib_compression_seen; const char *config_exclusive_filename = NULL; @@ -533,14 +532,6 @@ static int store_aux(const char* key, const char* value, void *cb) return 0; } -static int write_error(const char *filename) -{ - error("failed to write new configuration file %s", filename); - - /* Same error code as "failed to rename". */ - return 4; -} - static int store_write_section(int fd, const char* key) { const char *dot; @@ -673,7 +664,7 @@ int perf_config_set_multivar(const char* key, const char* value, { int i, dot; int fd = -1, in_fd; - int ret; + int ret = 0; char* config_filename; const char* last_dot = strrchr(key, '.'); @@ -872,90 +863,6 @@ write_err_out: } -static int section_name_match (const char *buf, const char *name) -{ - int i = 0, j = 0, dot = 0; - for (; buf[i] && buf[i] != ']'; i++) { - if (!dot && isspace(buf[i])) { - dot = 1; - if (name[j++] != '.') - break; - for (i++; isspace(buf[i]); i++) - ; /* do nothing */ - if (buf[i] != '"') - break; - continue; - } - if (buf[i] == '\\' && dot) - i++; - else if (buf[i] == '"' && dot) { - for (i++; isspace(buf[i]); i++) - ; /* do_nothing */ - break; - } - if (buf[i] != name[j++]) - break; - } - return (buf[i] == ']' && name[j] == 0); -} - -/* if new_name == NULL, the section is removed instead */ -int perf_config_rename_section(const char *old_name, const char *new_name) -{ - int ret = 0, remove = 0; - char *config_filename; - int out_fd; - char buf[1024]; - - if (config_exclusive_filename) - config_filename = strdup(config_exclusive_filename); - else - config_filename = perf_pathdup("config"); - if (out_fd < 0) { - ret = error("could not lock config file %s", config_filename); - goto out; - } - - if (!(config_file = fopen(config_filename, "rb"))) { - /* no config file means nothing to rename, no error */ - goto unlock_and_out; - } - - while (fgets(buf, sizeof(buf), config_file)) { - int i; - int length; - for (i = 0; buf[i] && isspace(buf[i]); i++) - ; /* do nothing */ - if (buf[i] == '[') { - /* it's a section */ - if (section_name_match (&buf[i+1], old_name)) { - ret++; - if (new_name == NULL) { - remove = 1; - continue; - } - store.baselen = strlen(new_name); - if (!store_write_section(out_fd, new_name)) { - goto out; - } - continue; - } - remove = 0; - } - if (remove) - continue; - length = strlen(buf); - if (write_in_full(out_fd, buf, length) != length) { - goto out; - } - } - fclose(config_file); - unlock_and_out: - out: - free(config_filename); - return ret; -} - /* * Call this to report error for your variable that should not * get a boolean value (i.e. "[my] var" means "true"). diff --git a/Documentation/perf_counter/path.c b/Documentation/perf_counter/path.c index 891b612ec1a..a501a40dd2c 100644 --- a/Documentation/perf_counter/path.c +++ b/Documentation/perf_counter/path.c @@ -161,45 +161,6 @@ int perf_mkstemp(char *path, size_t len, const char *template) } -static char *user_path(char *buf, char *path, int sz) -{ - struct passwd *pw; - char *slash; - int len, baselen; - - if (!path || path[0] != '~') - return NULL; - path++; - slash = strchr(path, '/'); - if (path[0] == '/' || !path[0]) { - pw = getpwuid(getuid()); - } - else { - if (slash) { - *slash = 0; - pw = getpwnam(path); - *slash = '/'; - } - else - pw = getpwnam(path); - } - if (!pw || !pw->pw_dir || sz <= strlen(pw->pw_dir)) - return NULL; - baselen = strlen(pw->pw_dir); - memcpy(buf, pw->pw_dir, baselen); - while ((1 < baselen) && (buf[baselen-1] == '/')) { - buf[baselen-1] = 0; - baselen--; - } - if (slash && slash[1]) { - len = strlen(slash); - if (sz <= baselen + len) - return NULL; - memcpy(buf + baselen, slash, len + 1); - } - return buf; -} - const char *make_relative_path(const char *abs, const char *base) { static char buf[PATH_MAX + 1]; diff --git a/Documentation/perf_counter/perf.c b/Documentation/perf_counter/perf.c index 9256f6a1644..63f8a892c0d 100644 --- a/Documentation/perf_counter/perf.c +++ b/Documentation/perf_counter/perf.c @@ -1,7 +1,7 @@ #include "builtin.h" #include "exec_cmd.h" #include "cache.h" -//#include "quote.h" +#include "quote.h" #include "run-command.h" const char perf_usage_string[] = @@ -132,7 +132,6 @@ static int handle_alias(int *argcp, const char ***argv) const char** new_argv; const char *alias_command; char *alias_string; - int unused_nonperf; alias_command = (*argv)[0]; alias_string = alias_lookup(alias_command); diff --git a/Documentation/perf_counter/util.h b/Documentation/perf_counter/util.h index 13f8bdce760..36e40c38e09 100644 --- a/Documentation/perf_counter/util.h +++ b/Documentation/perf_counter/util.h @@ -295,6 +295,20 @@ static inline char *gitstrchrnul(const char *s, int c) } #endif +/* + * Wrappers: + */ +extern char *xstrdup(const char *str); +extern void *xmalloc(size_t size); +extern void *xmemdupz(const void *data, size_t len); +extern char *xstrndup(const char *str, size_t len); +extern void *xrealloc(void *ptr, size_t size); +extern void *xcalloc(size_t nmemb, size_t size); +extern void *xmmap(void *start, size_t length, int prot, int flags, int fd, off_t offset); +extern ssize_t xread(int fd, void *buf, size_t len); +extern ssize_t xwrite(int fd, const void *buf, size_t len); +extern int xdup(int fd); +extern FILE *xfdopen(int fd, const char *mode); static inline size_t xsize_t(off_t len) { return (size_t)len; -- cgit v1.2.3 From ddcacfa0febff6454dba6cea1931f3020a9f6c24 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Mon, 20 Apr 2009 15:37:32 +0200 Subject: perf_counter tools: separate kerneltop into 'perf top' and 'perf stat' Lets use the Git framework of built-in commands. Signed-off-by: Ingo Molnar --- Documentation/perf_counter/Makefile | 1 + Documentation/perf_counter/builtin-stat.c | 592 +++++++++++ Documentation/perf_counter/builtin-top.c | 204 +--- Documentation/perf_counter/builtin.h | 3 +- Documentation/perf_counter/command-list.txt | 1 + Documentation/perf_counter/kerneltop.c | 1409 --------------------------- Documentation/perf_counter/perf.c | 1 + 7 files changed, 601 insertions(+), 1610 deletions(-) create mode 100644 Documentation/perf_counter/builtin-stat.c delete mode 100644 Documentation/perf_counter/kerneltop.c diff --git a/Documentation/perf_counter/Makefile b/Documentation/perf_counter/Makefile index 1b602655554..fb8b71744e5 100644 --- a/Documentation/perf_counter/Makefile +++ b/Documentation/perf_counter/Makefile @@ -309,6 +309,7 @@ LIB_OBJS += usage.o LIB_OBJS += wrapper.o BUILTIN_OBJS += builtin-help.o +BUILTIN_OBJS += builtin-stat.o BUILTIN_OBJS += builtin-top.o PERFLIBS = $(LIB_FILE) diff --git a/Documentation/perf_counter/builtin-stat.c b/Documentation/perf_counter/builtin-stat.c new file mode 100644 index 00000000000..169a2d1783f --- /dev/null +++ b/Documentation/perf_counter/builtin-stat.c @@ -0,0 +1,592 @@ +/* + * kerneltop.c: show top kernel functions - performance counters showcase + + Build with: + + cc -O6 -Wall -c -o kerneltop.o kerneltop.c -lrt + + Sample output: + +------------------------------------------------------------------------------ + KernelTop: 2669 irqs/sec [NMI, cache-misses/cache-refs], (all, cpu: 2) +------------------------------------------------------------------------------ + + weight RIP kernel function + ______ ________________ _______________ + + 35.20 - ffffffff804ce74b : skb_copy_and_csum_dev + 33.00 - ffffffff804cb740 : sock_alloc_send_skb + 31.26 - ffffffff804ce808 : skb_push + 22.43 - ffffffff80510004 : tcp_established_options + 19.00 - ffffffff8027d250 : find_get_page + 15.76 - ffffffff804e4fc9 : eth_type_trans + 15.20 - ffffffff804d8baa : dst_release + 14.86 - ffffffff804cf5d8 : skb_release_head_state + 14.00 - ffffffff802217d5 : read_hpet + 12.00 - ffffffff804ffb7f : __ip_local_out + 11.97 - ffffffff804fc0c8 : ip_local_deliver_finish + 8.54 - ffffffff805001a3 : ip_queue_xmit + */ + +/* + * perfstat: /usr/bin/time -alike performance counter statistics utility + + It summarizes the counter events of all tasks (and child tasks), + covering all CPUs that the command (or workload) executes on. + It only counts the per-task events of the workload started, + independent of how many other tasks run on those CPUs. + + Sample output: + + $ ./perfstat -e 1 -e 3 -e 5 ls -lR /usr/include/ >/dev/null + + Performance counter stats for 'ls': + + 163516953 instructions + 2295 cache-misses + 2855182 branch-misses + */ + + /* + * Copyright (C) 2008, Red Hat Inc, Ingo Molnar + * + * Improvements and fixes by: + * + * Arjan van de Ven + * Yanmin Zhang + * Wu Fengguang + * Mike Galbraith + * Paul Mackerras + * + * Released under the GPL v2. (and only v2, not any later version) + */ + +#include "util.h" + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include "../../include/linux/perf_counter.h" + + +/* + * prctl(PR_TASK_PERF_COUNTERS_DISABLE) will (cheaply) disable all + * counters in the current task. + */ +#define PR_TASK_PERF_COUNTERS_DISABLE 31 +#define PR_TASK_PERF_COUNTERS_ENABLE 32 + +#define rdclock() \ +({ \ + struct timespec ts; \ + \ + clock_gettime(CLOCK_MONOTONIC, &ts); \ + ts.tv_sec * 1000000000ULL + ts.tv_nsec; \ +}) + +/* + * Pick up some kernel type conventions: + */ +#define __user +#define asmlinkage + +#ifdef __x86_64__ +#define __NR_perf_counter_open 295 +#define rmb() asm volatile("lfence" ::: "memory") +#define cpu_relax() asm volatile("rep; nop" ::: "memory"); +#endif + +#ifdef __i386__ +#define __NR_perf_counter_open 333 +#define rmb() asm volatile("lfence" ::: "memory") +#define cpu_relax() asm volatile("rep; nop" ::: "memory"); +#endif + +#ifdef __powerpc__ +#define __NR_perf_counter_open 319 +#define rmb() asm volatile ("sync" ::: "memory") +#define cpu_relax() asm volatile ("" ::: "memory"); +#endif + +#define unlikely(x) __builtin_expect(!!(x), 0) +#define min(x, y) ({ \ + typeof(x) _min1 = (x); \ + typeof(y) _min2 = (y); \ + (void) (&_min1 == &_min2); \ + _min1 < _min2 ? _min1 : _min2; }) + +extern asmlinkage int sys_perf_counter_open( + struct perf_counter_hw_event *hw_event_uptr __user, + pid_t pid, + int cpu, + int group_fd, + unsigned long flags); + +#define MAX_COUNTERS 64 +#define MAX_NR_CPUS 256 + +#define EID(type, id) (((__u64)(type) << PERF_COUNTER_TYPE_SHIFT) | (id)) + +static int system_wide = 0; + +static int nr_counters = 0; +static __u64 event_id[MAX_COUNTERS] = { + EID(PERF_TYPE_SOFTWARE, PERF_COUNT_TASK_CLOCK), + EID(PERF_TYPE_SOFTWARE, PERF_COUNT_CONTEXT_SWITCHES), + EID(PERF_TYPE_SOFTWARE, PERF_COUNT_CPU_MIGRATIONS), + EID(PERF_TYPE_SOFTWARE, PERF_COUNT_PAGE_FAULTS), + + EID(PERF_TYPE_HARDWARE, PERF_COUNT_CPU_CYCLES), + EID(PERF_TYPE_HARDWARE, PERF_COUNT_INSTRUCTIONS), + EID(PERF_TYPE_HARDWARE, PERF_COUNT_CACHE_REFERENCES), + EID(PERF_TYPE_HARDWARE, PERF_COUNT_CACHE_MISSES), +}; +static int default_interval = 100000; +static int event_count[MAX_COUNTERS]; +static int fd[MAX_NR_CPUS][MAX_COUNTERS]; + +static int tid = -1; +static int profile_cpu = -1; +static int nr_cpus = 0; +static int nmi = 1; +static int group = 0; +static unsigned int page_size; + +static int zero; + +static int scale; + +static const unsigned int default_count[] = { + 1000000, + 1000000, + 10000, + 10000, + 1000000, + 10000, +}; + +static char *hw_event_names[] = { + "CPU cycles", + "instructions", + "cache references", + "cache misses", + "branches", + "branch misses", + "bus cycles", +}; + +static char *sw_event_names[] = { + "cpu clock ticks", + "task clock ticks", + "pagefaults", + "context switches", + "CPU migrations", + "minor faults", + "major faults", +}; + +struct event_symbol { + __u64 event; + char *symbol; +}; + +static struct event_symbol event_symbols[] = { + {EID(PERF_TYPE_HARDWARE, PERF_COUNT_CPU_CYCLES), "cpu-cycles", }, + {EID(PERF_TYPE_HARDWARE, PERF_COUNT_CPU_CYCLES), "cycles", }, + {EID(PERF_TYPE_HARDWARE, PERF_COUNT_INSTRUCTIONS), "instructions", }, + {EID(PERF_TYPE_HARDWARE, PERF_COUNT_CACHE_REFERENCES), "cache-references", }, + {EID(PERF_TYPE_HARDWARE, PERF_COUNT_CACHE_MISSES), "cache-misses", }, + {EID(PERF_TYPE_HARDWARE, PERF_COUNT_BRANCH_INSTRUCTIONS), "branch-instructions", }, + {EID(PERF_TYPE_HARDWARE, PERF_COUNT_BRANCH_INSTRUCTIONS), "branches", }, + {EID(PERF_TYPE_HARDWARE, PERF_COUNT_BRANCH_MISSES), "branch-misses", }, + {EID(PERF_TYPE_HARDWARE, PERF_COUNT_BUS_CYCLES), "bus-cycles", }, + + {EID(PERF_TYPE_SOFTWARE, PERF_COUNT_CPU_CLOCK), "cpu-clock", }, + {EID(PERF_TYPE_SOFTWARE, PERF_COUNT_TASK_CLOCK), "task-clock", }, + {EID(PERF_TYPE_SOFTWARE, PERF_COUNT_PAGE_FAULTS), "page-faults", }, + {EID(PERF_TYPE_SOFTWARE, PERF_COUNT_PAGE_FAULTS), "faults", }, + {EID(PERF_TYPE_SOFTWARE, PERF_COUNT_PAGE_FAULTS_MIN), "minor-faults", }, + {EID(PERF_TYPE_SOFTWARE, PERF_COUNT_PAGE_FAULTS_MAJ), "major-faults", }, + {EID(PERF_TYPE_SOFTWARE, PERF_COUNT_CONTEXT_SWITCHES), "context-switches", }, + {EID(PERF_TYPE_SOFTWARE, PERF_COUNT_CONTEXT_SWITCHES), "cs", }, + {EID(PERF_TYPE_SOFTWARE, PERF_COUNT_CPU_MIGRATIONS), "cpu-migrations", }, + {EID(PERF_TYPE_SOFTWARE, PERF_COUNT_CPU_MIGRATIONS), "migrations", }, +}; + +#define __PERF_COUNTER_FIELD(config, name) \ + ((config & PERF_COUNTER_##name##_MASK) >> PERF_COUNTER_##name##_SHIFT) + +#define PERF_COUNTER_RAW(config) __PERF_COUNTER_FIELD(config, RAW) +#define PERF_COUNTER_CONFIG(config) __PERF_COUNTER_FIELD(config, CONFIG) +#define PERF_COUNTER_TYPE(config) __PERF_COUNTER_FIELD(config, TYPE) +#define PERF_COUNTER_ID(config) __PERF_COUNTER_FIELD(config, EVENT) + +static void display_events_help(void) +{ + unsigned int i; + __u64 e; + + printf( + " -e EVENT --event=EVENT # symbolic-name abbreviations"); + + for (i = 0; i < ARRAY_SIZE(event_symbols); i++) { + int type, id; + + e = event_symbols[i].event; + type = PERF_COUNTER_TYPE(e); + id = PERF_COUNTER_ID(e); + + printf("\n %d:%d: %-20s", + type, id, event_symbols[i].symbol); + } + + printf("\n" + " rNNN: raw PMU events (eventsel+umask)\n\n"); +} + +static void display_help(void) +{ + printf( + "Usage: perfstat [] \n\n" + "PerfStat Options (up to %d event types can be specified):\n\n", + MAX_COUNTERS); + + display_events_help(); + + printf( + " -l # scale counter values\n" + " -a # system-wide collection\n"); + exit(0); +} + +static char *event_name(int ctr) +{ + __u64 config = event_id[ctr]; + int type = PERF_COUNTER_TYPE(config); + int id = PERF_COUNTER_ID(config); + static char buf[32]; + + if (PERF_COUNTER_RAW(config)) { + sprintf(buf, "raw 0x%llx", PERF_COUNTER_CONFIG(config)); + return buf; + } + + switch (type) { + case PERF_TYPE_HARDWARE: + if (id < PERF_HW_EVENTS_MAX) + return hw_event_names[id]; + return "unknown-hardware"; + + case PERF_TYPE_SOFTWARE: + if (id < PERF_SW_EVENTS_MAX) + return sw_event_names[id]; + return "unknown-software"; + + default: + break; + } + + return "unknown"; +} + +/* + * Each event can have multiple symbolic names. + * Symbolic names are (almost) exactly matched. + */ +static __u64 match_event_symbols(char *str) +{ + __u64 config, id; + int type; + unsigned int i; + + if (sscanf(str, "r%llx", &config) == 1) + return config | PERF_COUNTER_RAW_MASK; + + if (sscanf(str, "%d:%llu", &type, &id) == 2) + return EID(type, id); + + for (i = 0; i < ARRAY_SIZE(event_symbols); i++) { + if (!strncmp(str, event_symbols[i].symbol, + strlen(event_symbols[i].symbol))) + return event_symbols[i].event; + } + + return ~0ULL; +} + +static int parse_events(char *str) +{ + __u64 config; + +again: + if (nr_counters == MAX_COUNTERS) + return -1; + + config = match_event_symbols(str); + if (config == ~0ULL) + return -1; + + event_id[nr_counters] = config; + nr_counters++; + + str = strstr(str, ","); + if (str) { + str++; + goto again; + } + + return 0; +} + + +/* + * perfstat + */ + +char fault_here[1000000]; + +static void create_perfstat_counter(int counter) +{ + struct perf_counter_hw_event hw_event; + + memset(&hw_event, 0, sizeof(hw_event)); + hw_event.config = event_id[counter]; + hw_event.record_type = 0; + hw_event.nmi = 0; + if (scale) + hw_event.read_format = PERF_FORMAT_TOTAL_TIME_ENABLED | + PERF_FORMAT_TOTAL_TIME_RUNNING; + + if (system_wide) { + int cpu; + for (cpu = 0; cpu < nr_cpus; cpu ++) { + fd[cpu][counter] = sys_perf_counter_open(&hw_event, -1, cpu, -1, 0); + if (fd[cpu][counter] < 0) { + printf("perfstat error: syscall returned with %d (%s)\n", + fd[cpu][counter], strerror(errno)); + exit(-1); + } + } + } else { + hw_event.inherit = 1; + hw_event.disabled = 1; + + fd[0][counter] = sys_perf_counter_open(&hw_event, 0, -1, -1, 0); + if (fd[0][counter] < 0) { + printf("perfstat error: syscall returned with %d (%s)\n", + fd[0][counter], strerror(errno)); + exit(-1); + } + } +} + +int do_perfstat(int argc, char *argv[]) +{ + unsigned long long t0, t1; + int counter; + ssize_t res; + int status; + int pid; + + if (!system_wide) + nr_cpus = 1; + + for (counter = 0; counter < nr_counters; counter++) + create_perfstat_counter(counter); + + argc -= optind; + argv += optind; + + if (!argc) + display_help(); + + /* + * Enable counters and exec the command: + */ + t0 = rdclock(); + prctl(PR_TASK_PERF_COUNTERS_ENABLE); + + if ((pid = fork()) < 0) + perror("failed to fork"); + if (!pid) { + if (execvp(argv[0], argv)) { + perror(argv[0]); + exit(-1); + } + } + while (wait(&status) >= 0) + ; + prctl(PR_TASK_PERF_COUNTERS_DISABLE); + t1 = rdclock(); + + fflush(stdout); + + fprintf(stderr, "\n"); + fprintf(stderr, " Performance counter stats for \'%s\':\n", + argv[0]); + fprintf(stderr, "\n"); + + for (counter = 0; counter < nr_counters; counter++) { + int cpu, nv; + __u64 count[3], single_count[3]; + int scaled; + + count[0] = count[1] = count[2] = 0; + nv = scale ? 3 : 1; + for (cpu = 0; cpu < nr_cpus; cpu ++) { + res = read(fd[cpu][counter], + single_count, nv * sizeof(__u64)); + assert(res == nv * sizeof(__u64)); + + count[0] += single_count[0]; + if (scale) { + count[1] += single_count[1]; + count[2] += single_count[2]; + } + } + + scaled = 0; + if (scale) { + if (count[2] == 0) { + fprintf(stderr, " %14s %-20s\n", + "", event_name(counter)); + continue; + } + if (count[2] < count[1]) { + scaled = 1; + count[0] = (unsigned long long) + ((double)count[0] * count[1] / count[2] + 0.5); + } + } + + if (event_id[counter] == EID(PERF_TYPE_SOFTWARE, PERF_COUNT_CPU_CLOCK) || + event_id[counter] == EID(PERF_TYPE_SOFTWARE, PERF_COUNT_TASK_CLOCK)) { + + double msecs = (double)count[0] / 1000000; + + fprintf(stderr, " %14.6f %-20s (msecs)", + msecs, event_name(counter)); + } else { + fprintf(stderr, " %14Ld %-20s (events)", + count[0], event_name(counter)); + } + if (scaled) + fprintf(stderr, " (scaled from %.2f%%)", + (double) count[2] / count[1] * 100); + fprintf(stderr, "\n"); + } + fprintf(stderr, "\n"); + fprintf(stderr, " Wall-clock time elapsed: %12.6f msecs\n", + (double)(t1-t0)/1e6); + fprintf(stderr, "\n"); + + return 0; +} + +static void process_options(int argc, char **argv) +{ + int error = 0, counter; + + for (;;) { + int option_index = 0; + /** Options for getopt */ + static struct option long_options[] = { + {"count", required_argument, NULL, 'c'}, + {"cpu", required_argument, NULL, 'C'}, + {"delay", required_argument, NULL, 'd'}, + {"dump_symtab", no_argument, NULL, 'D'}, + {"event", required_argument, NULL, 'e'}, + {"filter", required_argument, NULL, 'f'}, + {"group", required_argument, NULL, 'g'}, + {"help", no_argument, NULL, 'h'}, + {"nmi", required_argument, NULL, 'n'}, + {"munmap_info", no_argument, NULL, 'U'}, + {"pid", required_argument, NULL, 'p'}, + {"realtime", required_argument, NULL, 'r'}, + {"scale", no_argument, NULL, 'l'}, + {"symbol", required_argument, NULL, 's'}, + {"stat", no_argument, NULL, 'S'}, + {"vmlinux", required_argument, NULL, 'x'}, + {"zero", no_argument, NULL, 'z'}, + {NULL, 0, NULL, 0 } + }; + int c = getopt_long(argc, argv, "+:ac:C:d:De:f:g:hln:m:p:r:s:Sx:zMU", + long_options, &option_index); + if (c == -1) + break; + + switch (c) { + case 'a': system_wide = 1; break; + case 'c': default_interval = atoi(optarg); break; + case 'C': + /* CPU and PID are mutually exclusive */ + if (tid != -1) { + printf("WARNING: CPU switch overriding PID\n"); + sleep(1); + tid = -1; + } + profile_cpu = atoi(optarg); break; + + case 'e': error = parse_events(optarg); break; + + case 'g': group = atoi(optarg); break; + case 'h': display_help(); break; + case 'l': scale = 1; break; + case 'n': nmi = atoi(optarg); break; + case 'p': + /* CPU and PID are mutually exclusive */ + if (profile_cpu != -1) { + printf("WARNING: PID switch overriding CPU\n"); + sleep(1); + profile_cpu = -1; + } + tid = atoi(optarg); break; + case 'z': zero = 1; break; + default: error = 1; break; + } + } + if (error) + display_help(); + + if (!nr_counters) { + nr_counters = 8; + } + + for (counter = 0; counter < nr_counters; counter++) { + if (event_count[counter]) + continue; + + event_count[counter] = default_interval; + } +} + +int cmd_stat(int argc, char **argv, const char *prefix) +{ + page_size = sysconf(_SC_PAGE_SIZE); + + process_options(argc, argv); + + nr_cpus = sysconf(_SC_NPROCESSORS_ONLN); + assert(nr_cpus <= MAX_NR_CPUS); + assert(nr_cpus >= 0); + + return do_perfstat(argc, argv); +} diff --git a/Documentation/perf_counter/builtin-top.c b/Documentation/perf_counter/builtin-top.c index 601bddbc30d..98e8690b6bc 100644 --- a/Documentation/perf_counter/builtin-top.c +++ b/Documentation/perf_counter/builtin-top.c @@ -28,25 +28,6 @@ 8.54 - ffffffff805001a3 : ip_queue_xmit */ -/* - * perfstat: /usr/bin/time -alike performance counter statistics utility - - It summarizes the counter events of all tasks (and child tasks), - covering all CPUs that the command (or workload) executes on. - It only counts the per-task events of the workload started, - independent of how many other tasks run on those CPUs. - - Sample output: - - $ ./perfstat -e 1 -e 3 -e 5 ls -lR /usr/include/ >/dev/null - - Performance counter stats for 'ls': - - 163516953 instructions - 2295 cache-misses - 2855182 branch-misses - */ - /* * Copyright (C) 2008, Red Hat Inc, Ingo Molnar * @@ -149,7 +130,6 @@ asmlinkage int sys_perf_counter_open( #define EID(type, id) (((__u64)(type) << PERF_COUNTER_TYPE_SHIFT) | (id)) -static int run_perfstat = 0; static int system_wide = 0; static int nr_counters = 0; @@ -203,7 +183,7 @@ struct source_line { static struct source_line *lines; static struct source_line **lines_tail; -const unsigned int default_count[] = { +static const unsigned int default_count[] = { 1000000, 1000000, 10000, @@ -291,26 +271,8 @@ static void display_events_help(void) " rNNN: raw PMU events (eventsel+umask)\n\n"); } -static void display_perfstat_help(void) -{ - printf( - "Usage: perfstat [] \n\n" - "PerfStat Options (up to %d event types can be specified):\n\n", - MAX_COUNTERS); - - display_events_help(); - - printf( - " -l # scale counter values\n" - " -a # system-wide collection\n"); - exit(0); -} - static void display_help(void) { - if (run_perfstat) - return display_perfstat_help(); - printf( "Usage: kerneltop []\n" " Or: kerneltop -S [] COMMAND [ARGS]\n\n" @@ -320,8 +282,6 @@ static void display_help(void) display_events_help(); printf( - " -S --stat # perfstat COMMAND\n" - " -a # system-wide collection (for perfstat)\n\n" " -c CNT --count=CNT # event period to sample\n\n" " -C CPU --cpu=CPU # CPU (-1 for all) [default: -1]\n" " -p PID --pid=PID # PID of sampled task (-1 for all) [default: -1]\n\n" @@ -420,151 +380,6 @@ again: return 0; } - -/* - * perfstat - */ - -char fault_here[1000000]; - -static void create_perfstat_counter(int counter) -{ - struct perf_counter_hw_event hw_event; - - memset(&hw_event, 0, sizeof(hw_event)); - hw_event.config = event_id[counter]; - hw_event.record_type = 0; - hw_event.nmi = 0; - if (scale) - hw_event.read_format = PERF_FORMAT_TOTAL_TIME_ENABLED | - PERF_FORMAT_TOTAL_TIME_RUNNING; - - if (system_wide) { - int cpu; - for (cpu = 0; cpu < nr_cpus; cpu ++) { - fd[cpu][counter] = sys_perf_counter_open(&hw_event, -1, cpu, -1, 0); - if (fd[cpu][counter] < 0) { - printf("perfstat error: syscall returned with %d (%s)\n", - fd[cpu][counter], strerror(errno)); - exit(-1); - } - } - } else { - hw_event.inherit = 1; - hw_event.disabled = 1; - - fd[0][counter] = sys_perf_counter_open(&hw_event, 0, -1, -1, 0); - if (fd[0][counter] < 0) { - printf("perfstat error: syscall returned with %d (%s)\n", - fd[0][counter], strerror(errno)); - exit(-1); - } - } -} - -int do_perfstat(int argc, char *argv[]) -{ - unsigned long long t0, t1; - int counter; - ssize_t res; - int status; - int pid; - - if (!system_wide) - nr_cpus = 1; - - for (counter = 0; counter < nr_counters; counter++) - create_perfstat_counter(counter); - - argc -= optind; - argv += optind; - - if (!argc) - display_help(); - - /* - * Enable counters and exec the command: - */ - t0 = rdclock(); - prctl(PR_TASK_PERF_COUNTERS_ENABLE); - - if ((pid = fork()) < 0) - perror("failed to fork"); - if (!pid) { - if (execvp(argv[0], argv)) { - perror(argv[0]); - exit(-1); - } - } - while (wait(&status) >= 0) - ; - prctl(PR_TASK_PERF_COUNTERS_DISABLE); - t1 = rdclock(); - - fflush(stdout); - - fprintf(stderr, "\n"); - fprintf(stderr, " Performance counter stats for \'%s\':\n", - argv[0]); - fprintf(stderr, "\n"); - - for (counter = 0; counter < nr_counters; counter++) { - int cpu, nv; - __u64 count[3], single_count[3]; - int scaled; - - count[0] = count[1] = count[2] = 0; - nv = scale ? 3 : 1; - for (cpu = 0; cpu < nr_cpus; cpu ++) { - res = read(fd[cpu][counter], - single_count, nv * sizeof(__u64)); - assert(res == nv * sizeof(__u64)); - - count[0] += single_count[0]; - if (scale) { - count[1] += single_count[1]; - count[2] += single_count[2]; - } - } - - scaled = 0; - if (scale) { - if (count[2] == 0) { - fprintf(stderr, " %14s %-20s\n", - "", event_name(counter)); - continue; - } - if (count[2] < count[1]) { - scaled = 1; - count[0] = (unsigned long long) - ((double)count[0] * count[1] / count[2] + 0.5); - } - } - - if (event_id[counter] == EID(PERF_TYPE_SOFTWARE, PERF_COUNT_CPU_CLOCK) || - event_id[counter] == EID(PERF_TYPE_SOFTWARE, PERF_COUNT_TASK_CLOCK)) { - - double msecs = (double)count[0] / 1000000; - - fprintf(stderr, " %14.6f %-20s (msecs)", - msecs, event_name(counter)); - } else { - fprintf(stderr, " %14Ld %-20s (events)", - count[0], event_name(counter)); - } - if (scaled) - fprintf(stderr, " (scaled from %.2f%%)", - (double) count[2] / count[1] * 100); - fprintf(stderr, "\n"); - } - fprintf(stderr, "\n"); - fprintf(stderr, " Wall-clock time elapsed: %12.6f msecs\n", - (double)(t1-t0)/1e6); - fprintf(stderr, "\n"); - - return 0; -} - /* * Symbols */ @@ -805,7 +620,7 @@ static int read_symbol(FILE *in, struct sym_entry *s) return 0; } -int compare_addr(const void *__sym1, const void *__sym2) +static int compare_addr(const void *__sym1, const void *__sym2) { const struct sym_entry *sym1 = __sym1, *sym2 = __sym2; @@ -1070,9 +885,6 @@ static void process_options(int argc, char **argv) { int error = 0, counter; - if (strstr(argv[0], "perfstat")) - run_perfstat = 1; - for (;;) { int option_index = 0; /** Options for getopt */ @@ -1134,7 +946,6 @@ static void process_options(int argc, char **argv) tid = atoi(optarg); break; case 'r': realtime_prio = atoi(optarg); break; case 's': sym_filter = strdup(optarg); break; - case 'S': run_perfstat = 1; break; case 'x': vmlinux = strdup(optarg); break; case 'z': zero = 1; break; case 'm': mmap_pages = atoi(optarg); break; @@ -1147,12 +958,8 @@ static void process_options(int argc, char **argv) display_help(); if (!nr_counters) { - if (run_perfstat) - nr_counters = 8; - else { - nr_counters = 1; - event_id[0] = 0; - } + nr_counters = 1; + event_id[0] = 0; } for (counter = 0; counter < nr_counters; counter++) { @@ -1308,9 +1115,6 @@ int cmd_top(int argc, char **argv, const char *prefix) assert(nr_cpus <= MAX_NR_CPUS); assert(nr_cpus >= 0); - if (run_perfstat) - return do_perfstat(argc, argv); - if (tid != -1 || profile_cpu != -1) nr_cpus = 1; diff --git a/Documentation/perf_counter/builtin.h b/Documentation/perf_counter/builtin.h index 41637444ce2..a3bb6cd6bed 100644 --- a/Documentation/perf_counter/builtin.h +++ b/Documentation/perf_counter/builtin.h @@ -14,5 +14,6 @@ extern void prune_packed_objects(int); extern int read_line_with_nul(char *buf, int size, FILE *file); extern int check_pager_config(const char *cmd); -extern int cmd_top(int argc, const char **argv, const char *prefix); +extern int cmd_top(int argc, char **argv, const char *prefix); +extern int cmd_stat(int argc, char **argv, const char *prefix); #endif diff --git a/Documentation/perf_counter/command-list.txt b/Documentation/perf_counter/command-list.txt index 1eab3659b20..52455d46bfb 100644 --- a/Documentation/perf_counter/command-list.txt +++ b/Documentation/perf_counter/command-list.txt @@ -1,4 +1,5 @@ # List of known perf commands. # command name category [deprecated] [common] perf-top mainporcelain common +perf-stat mainporcelain common diff --git a/Documentation/perf_counter/kerneltop.c b/Documentation/perf_counter/kerneltop.c deleted file mode 100644 index 042c1b83a87..00000000000 --- a/Documentation/perf_counter/kerneltop.c +++ /dev/null @@ -1,1409 +0,0 @@ -/* - * kerneltop.c: show top kernel functions - performance counters showcase - - Build with: - - cc -O6 -Wall -c -o kerneltop.o kerneltop.c -lrt - - Sample output: - ------------------------------------------------------------------------------- - KernelTop: 2669 irqs/sec [NMI, cache-misses/cache-refs], (all, cpu: 2) ------------------------------------------------------------------------------- - - weight RIP kernel function - ______ ________________ _______________ - - 35.20 - ffffffff804ce74b : skb_copy_and_csum_dev - 33.00 - ffffffff804cb740 : sock_alloc_send_skb - 31.26 - ffffffff804ce808 : skb_push - 22.43 - ffffffff80510004 : tcp_established_options - 19.00 - ffffffff8027d250 : find_get_page - 15.76 - ffffffff804e4fc9 : eth_type_trans - 15.20 - ffffffff804d8baa : dst_release - 14.86 - ffffffff804cf5d8 : skb_release_head_state - 14.00 - ffffffff802217d5 : read_hpet - 12.00 - ffffffff804ffb7f : __ip_local_out - 11.97 - ffffffff804fc0c8 : ip_local_deliver_finish - 8.54 - ffffffff805001a3 : ip_queue_xmit - */ - -/* - * perfstat: /usr/bin/time -alike performance counter statistics utility - - It summarizes the counter events of all tasks (and child tasks), - covering all CPUs that the command (or workload) executes on. - It only counts the per-task events of the workload started, - independent of how many other tasks run on those CPUs. - - Sample output: - - $ ./perfstat -e 1 -e 3 -e 5 ls -lR /usr/include/ >/dev/null - - Performance counter stats for 'ls': - - 163516953 instructions - 2295 cache-misses - 2855182 branch-misses - */ - - /* - * Copyright (C) 2008, Red Hat Inc, Ingo Molnar - * - * Improvements and fixes by: - * - * Arjan van de Ven - * Yanmin Zhang - * Wu Fengguang - * Mike Galbraith - * Paul Mackerras - * - * Released under the GPL v2. (and only v2, not any later version) - */ - -#define _GNU_SOURCE -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include -#include -#include -#include -#include - -#include -#include - -#include "../../include/linux/perf_counter.h" - - -/* - * prctl(PR_TASK_PERF_COUNTERS_DISABLE) will (cheaply) disable all - * counters in the current task. - */ -#define PR_TASK_PERF_COUNTERS_DISABLE 31 -#define PR_TASK_PERF_COUNTERS_ENABLE 32 - -#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0])) - -#define rdclock() \ -({ \ - struct timespec ts; \ - \ - clock_gettime(CLOCK_MONOTONIC, &ts); \ - ts.tv_sec * 1000000000ULL + ts.tv_nsec; \ -}) - -/* - * Pick up some kernel type conventions: - */ -#define __user -#define asmlinkage - -#ifdef __x86_64__ -#define __NR_perf_counter_open 295 -#define rmb() asm volatile("lfence" ::: "memory") -#define cpu_relax() asm volatile("rep; nop" ::: "memory"); -#endif - -#ifdef __i386__ -#define __NR_perf_counter_open 333 -#define rmb() asm volatile("lfence" ::: "memory") -#define cpu_relax() asm volatile("rep; nop" ::: "memory"); -#endif - -#ifdef __powerpc__ -#define __NR_perf_counter_open 319 -#define rmb() asm volatile ("sync" ::: "memory") -#define cpu_relax() asm volatile ("" ::: "memory"); -#endif - -#define unlikely(x) __builtin_expect(!!(x), 0) -#define min(x, y) ({ \ - typeof(x) _min1 = (x); \ - typeof(y) _min2 = (y); \ - (void) (&_min1 == &_min2); \ - _min1 < _min2 ? _min1 : _min2; }) - -asmlinkage int sys_perf_counter_open( - struct perf_counter_hw_event *hw_event_uptr __user, - pid_t pid, - int cpu, - int group_fd, - unsigned long flags) -{ - return syscall( - __NR_perf_counter_open, hw_event_uptr, pid, cpu, group_fd, flags); -} - -#define MAX_COUNTERS 64 -#define MAX_NR_CPUS 256 - -#define EID(type, id) (((__u64)(type) << PERF_COUNTER_TYPE_SHIFT) | (id)) - -static int run_perfstat = 0; -static int system_wide = 0; - -static int nr_counters = 0; -static __u64 event_id[MAX_COUNTERS] = { - EID(PERF_TYPE_SOFTWARE, PERF_COUNT_TASK_CLOCK), - EID(PERF_TYPE_SOFTWARE, PERF_COUNT_CONTEXT_SWITCHES), - EID(PERF_TYPE_SOFTWARE, PERF_COUNT_CPU_MIGRATIONS), - EID(PERF_TYPE_SOFTWARE, PERF_COUNT_PAGE_FAULTS), - - EID(PERF_TYPE_HARDWARE, PERF_COUNT_CPU_CYCLES), - EID(PERF_TYPE_HARDWARE, PERF_COUNT_INSTRUCTIONS), - EID(PERF_TYPE_HARDWARE, PERF_COUNT_CACHE_REFERENCES), - EID(PERF_TYPE_HARDWARE, PERF_COUNT_CACHE_MISSES), -}; -static int default_interval = 100000; -static int event_count[MAX_COUNTERS]; -static int fd[MAX_NR_CPUS][MAX_COUNTERS]; - -static __u64 count_filter = 100; - -static int tid = -1; -static int profile_cpu = -1; -static int nr_cpus = 0; -static int nmi = 1; -static unsigned int realtime_prio = 0; -static int group = 0; -static unsigned int page_size; -static unsigned int mmap_pages = 16; -static int use_mmap = 0; -static int use_munmap = 0; - -static char *vmlinux; - -static char *sym_filter; -static unsigned long filter_start; -static unsigned long filter_end; - -static int delay_secs = 2; -static int zero; -static int dump_symtab; - -static int scale; - -struct source_line { - uint64_t EIP; - unsigned long count; - char *line; - struct source_line *next; -}; - -static struct source_line *lines; -static struct source_line **lines_tail; - -const unsigned int default_count[] = { - 1000000, - 1000000, - 10000, - 10000, - 1000000, - 10000, -}; - -static char *hw_event_names[] = { - "CPU cycles", - "instructions", - "cache references", - "cache misses", - "branches", - "branch misses", - "bus cycles", -}; - -static char *sw_event_names[] = { - "cpu clock ticks", - "task clock ticks", - "pagefaults", - "context switches", - "CPU migrations", - "minor faults", - "major faults", -}; - -struct event_symbol { - __u64 event; - char *symbol; -}; - -static struct event_symbol event_symbols[] = { - {EID(PERF_TYPE_HARDWARE, PERF_COUNT_CPU_CYCLES), "cpu-cycles", }, - {EID(PERF_TYPE_HARDWARE, PERF_COUNT_CPU_CYCLES), "cycles", }, - {EID(PERF_TYPE_HARDWARE, PERF_COUNT_INSTRUCTIONS), "instructions", }, - {EID(PERF_TYPE_HARDWARE, PERF_COUNT_CACHE_REFERENCES), "cache-references", }, - {EID(PERF_TYPE_HARDWARE, PERF_COUNT_CACHE_MISSES), "cache-misses", }, - {EID(PERF_TYPE_HARDWARE, PERF_COUNT_BRANCH_INSTRUCTIONS), "branch-instructions", }, - {EID(PERF_TYPE_HARDWARE, PERF_COUNT_BRANCH_INSTRUCTIONS), "branches", }, - {EID(PERF_TYPE_HARDWARE, PERF_COUNT_BRANCH_MISSES), "branch-misses", }, - {EID(PERF_TYPE_HARDWARE, PERF_COUNT_BUS_CYCLES), "bus-cycles", }, - - {EID(PERF_TYPE_SOFTWARE, PERF_COUNT_CPU_CLOCK), "cpu-clock", }, - {EID(PERF_TYPE_SOFTWARE, PERF_COUNT_TASK_CLOCK), "task-clock", }, - {EID(PERF_TYPE_SOFTWARE, PERF_COUNT_PAGE_FAULTS), "page-faults", }, - {EID(PERF_TYPE_SOFTWARE, PERF_COUNT_PAGE_FAULTS), "faults", }, - {EID(PERF_TYPE_SOFTWARE, PERF_COUNT_PAGE_FAULTS_MIN), "minor-faults", }, - {EID(PERF_TYPE_SOFTWARE, PERF_COUNT_PAGE_FAULTS_MAJ), "major-faults", }, - {EID(PERF_TYPE_SOFTWARE, PERF_COUNT_CONTEXT_SWITCHES), "context-switches", }, - {EID(PERF_TYPE_SOFTWARE, PERF_COUNT_CONTEXT_SWITCHES), "cs", }, - {EID(PERF_TYPE_SOFTWARE, PERF_COUNT_CPU_MIGRATIONS), "cpu-migrations", }, - {EID(PERF_TYPE_SOFTWARE, PERF_COUNT_CPU_MIGRATIONS), "migrations", }, -}; - -#define __PERF_COUNTER_FIELD(config, name) \ - ((config & PERF_COUNTER_##name##_MASK) >> PERF_COUNTER_##name##_SHIFT) - -#define PERF_COUNTER_RAW(config) __PERF_COUNTER_FIELD(config, RAW) -#define PERF_COUNTER_CONFIG(config) __PERF_COUNTER_FIELD(config, CONFIG) -#define PERF_COUNTER_TYPE(config) __PERF_COUNTER_FIELD(config, TYPE) -#define PERF_COUNTER_ID(config) __PERF_COUNTER_FIELD(config, EVENT) - -static void display_events_help(void) -{ - unsigned int i; - __u64 e; - - printf( - " -e EVENT --event=EVENT # symbolic-name abbreviations"); - - for (i = 0; i < ARRAY_SIZE(event_symbols); i++) { - int type, id; - - e = event_symbols[i].event; - type = PERF_COUNTER_TYPE(e); - id = PERF_COUNTER_ID(e); - - printf("\n %d:%d: %-20s", - type, id, event_symbols[i].symbol); - } - - printf("\n" - " rNNN: raw PMU events (eventsel+umask)\n\n"); -} - -static void display_perfstat_help(void) -{ - printf( - "Usage: perfstat [] \n\n" - "PerfStat Options (up to %d event types can be specified):\n\n", - MAX_COUNTERS); - - display_events_help(); - - printf( - " -l # scale counter values\n" - " -a # system-wide collection\n"); - exit(0); -} - -static void display_help(void) -{ - if (run_perfstat) - return display_perfstat_help(); - - printf( - "Usage: kerneltop []\n" - " Or: kerneltop -S [] COMMAND [ARGS]\n\n" - "KernelTop Options (up to %d event types can be specified at once):\n\n", - MAX_COUNTERS); - - display_events_help(); - - printf( - " -S --stat # perfstat COMMAND\n" - " -a # system-wide collection (for perfstat)\n\n" - " -c CNT --count=CNT # event period to sample\n\n" - " -C CPU --cpu=CPU # CPU (-1 for all) [default: -1]\n" - " -p PID --pid=PID # PID of sampled task (-1 for all) [default: -1]\n\n" - " -l # show scale factor for RR events\n" - " -d delay --delay= # sampling/display delay [default: 2]\n" - " -f CNT --filter=CNT # min-event-count filter [default: 100]\n\n" - " -r prio --realtime= # event acquisition runs with SCHED_FIFO policy\n" - " -s symbol --symbol= # function to be showed annotated one-shot\n" - " -x path --vmlinux= # the vmlinux binary, required for -s use\n" - " -z --zero # zero counts after display\n" - " -D --dump_symtab # dump symbol table to stderr on startup\n" - " -m pages --mmap_pages= # number of mmap data pages\n" - " -M --mmap_info # print mmap info stream\n" - " -U --munmap_info # print munmap info stream\n" - ); - - exit(0); -} - -static char *event_name(int ctr) -{ - __u64 config = event_id[ctr]; - int type = PERF_COUNTER_TYPE(config); - int id = PERF_COUNTER_ID(config); - static char buf[32]; - - if (PERF_COUNTER_RAW(config)) { - sprintf(buf, "raw 0x%llx", PERF_COUNTER_CONFIG(config)); - return buf; - } - - switch (type) { - case PERF_TYPE_HARDWARE: - if (id < PERF_HW_EVENTS_MAX) - return hw_event_names[id]; - return "unknown-hardware"; - - case PERF_TYPE_SOFTWARE: - if (id < PERF_SW_EVENTS_MAX) - return sw_event_names[id]; - return "unknown-software"; - - default: - break; - } - - return "unknown"; -} - -/* - * Each event can have multiple symbolic names. - * Symbolic names are (almost) exactly matched. - */ -static __u64 match_event_symbols(char *str) -{ - __u64 config, id; - int type; - unsigned int i; - - if (sscanf(str, "r%llx", &config) == 1) - return config | PERF_COUNTER_RAW_MASK; - - if (sscanf(str, "%d:%llu", &type, &id) == 2) - return EID(type, id); - - for (i = 0; i < ARRAY_SIZE(event_symbols); i++) { - if (!strncmp(str, event_symbols[i].symbol, - strlen(event_symbols[i].symbol))) - return event_symbols[i].event; - } - - return ~0ULL; -} - -static int parse_events(char *str) -{ - __u64 config; - -again: - if (nr_counters == MAX_COUNTERS) - return -1; - - config = match_event_symbols(str); - if (config == ~0ULL) - return -1; - - event_id[nr_counters] = config; - nr_counters++; - - str = strstr(str, ","); - if (str) { - str++; - goto again; - } - - return 0; -} - - -/* - * perfstat - */ - -char fault_here[1000000]; - -static void create_perfstat_counter(int counter) -{ - struct perf_counter_hw_event hw_event; - - memset(&hw_event, 0, sizeof(hw_event)); - hw_event.config = event_id[counter]; - hw_event.record_type = 0; - hw_event.nmi = 0; - if (scale) - hw_event.read_format = PERF_FORMAT_TOTAL_TIME_ENABLED | - PERF_FORMAT_TOTAL_TIME_RUNNING; - - if (system_wide) { - int cpu; - for (cpu = 0; cpu < nr_cpus; cpu ++) { - fd[cpu][counter] = sys_perf_counter_open(&hw_event, -1, cpu, -1, 0); - if (fd[cpu][counter] < 0) { - printf("perfstat error: syscall returned with %d (%s)\n", - fd[cpu][counter], strerror(errno)); - exit(-1); - } - } - } else { - hw_event.inherit = 1; - hw_event.disabled = 1; - - fd[0][counter] = sys_perf_counter_open(&hw_event, 0, -1, -1, 0); - if (fd[0][counter] < 0) { - printf("perfstat error: syscall returned with %d (%s)\n", - fd[0][counter], strerror(errno)); - exit(-1); - } - } -} - -int do_perfstat(int argc, char *argv[]) -{ - unsigned long long t0, t1; - int counter; - ssize_t res; - int status; - int pid; - - if (!system_wide) - nr_cpus = 1; - - for (counter = 0; counter < nr_counters; counter++) - create_perfstat_counter(counter); - - argc -= optind; - argv += optind; - - if (!argc) - display_help(); - - /* - * Enable counters and exec the command: - */ - t0 = rdclock(); - prctl(PR_TASK_PERF_COUNTERS_ENABLE); - - if ((pid = fork()) < 0) - perror("failed to fork"); - if (!pid) { - if (execvp(argv[0], argv)) { - perror(argv[0]); - exit(-1); - } - } - while (wait(&status) >= 0) - ; - prctl(PR_TASK_PERF_COUNTERS_DISABLE); - t1 = rdclock(); - - fflush(stdout); - - fprintf(stderr, "\n"); - fprintf(stderr, " Performance counter stats for \'%s\':\n", - argv[0]); - fprintf(stderr, "\n"); - - for (counter = 0; counter < nr_counters; counter++) { - int cpu, nv; - __u64 count[3], single_count[3]; - int scaled; - - count[0] = count[1] = count[2] = 0; - nv = scale ? 3 : 1; - for (cpu = 0; cpu < nr_cpus; cpu ++) { - res = read(fd[cpu][counter], - single_count, nv * sizeof(__u64)); - assert(res == nv * sizeof(__u64)); - - count[0] += single_count[0]; - if (scale) { - count[1] += single_count[1]; - count[2] += single_count[2]; - } - } - - scaled = 0; - if (scale) { - if (count[2] == 0) { - fprintf(stderr, " %14s %-20s\n", - "", event_name(counter)); - continue; - } - if (count[2] < count[1]) { - scaled = 1; - count[0] = (unsigned long long) - ((double)count[0] * count[1] / count[2] + 0.5); - } - } - - if (event_id[counter] == EID(PERF_TYPE_SOFTWARE, PERF_COUNT_CPU_CLOCK) || - event_id[counter] == EID(PERF_TYPE_SOFTWARE, PERF_COUNT_TASK_CLOCK)) { - - double msecs = (double)count[0] / 1000000; - - fprintf(stderr, " %14.6f %-20s (msecs)", - msecs, event_name(counter)); - } else { - fprintf(stderr, " %14Ld %-20s (events)", - count[0], event_name(counter)); - } - if (scaled) - fprintf(stderr, " (scaled from %.2f%%)", - (double) count[2] / count[1] * 100); - fprintf(stderr, "\n"); - } - fprintf(stderr, "\n"); - fprintf(stderr, " Wall-clock time elapsed: %12.6f msecs\n", - (double)(t1-t0)/1e6); - fprintf(stderr, "\n"); - - return 0; -} - -/* - * Symbols - */ - -static uint64_t min_ip; -static uint64_t max_ip = -1ll; - -struct sym_entry { - unsigned long long addr; - char *sym; - unsigned long count[MAX_COUNTERS]; - int skip; - struct source_line *source; -}; - -#define MAX_SYMS 100000 - -static int sym_table_count; - -struct sym_entry *sym_filter_entry; - -static struct sym_entry sym_table[MAX_SYMS]; - -static void show_details(struct sym_entry *sym); - -/* - * Ordering weight: count-1 * count-2 * ... / count-n - */ -static double sym_weight(const struct sym_entry *sym) -{ - double weight; - int counter; - - weight = sym->count[0]; - - for (counter = 1; counter < nr_counters-1; counter++) - weight *= sym->count[counter]; - - weight /= (sym->count[counter] + 1); - - return weight; -} - -static int compare(const void *__sym1, const void *__sym2) -{ - const struct sym_entry *sym1 = __sym1, *sym2 = __sym2; - - return sym_weight(sym1) < sym_weight(sym2); -} - -static long events; -static long userspace_events; -static const char CONSOLE_CLEAR[] = ""; - -static struct sym_entry tmp[MAX_SYMS]; - -static void print_sym_table(void) -{ - int i, printed; - int counter; - float events_per_sec = events/delay_secs; - float kevents_per_sec = (events-userspace_events)/delay_secs; - float sum_kevents = 0.0; - - events = userspace_events = 0; - memcpy(tmp, sym_table, sizeof(sym_table[0])*sym_table_count); - qsort(tmp, sym_table_count, sizeof(tmp[0]), compare); - - for (i = 0; i < sym_table_count && tmp[i].count[0]; i++) - sum_kevents += tmp[i].count[0]; - - write(1, CONSOLE_CLEAR, strlen(CONSOLE_CLEAR)); - - printf( -"------------------------------------------------------------------------------\n"); - printf( " KernelTop:%8.0f irqs/sec kernel:%4.1f%% [%s, ", - events_per_sec, - 100.0 - (100.0*((events_per_sec-kevents_per_sec)/events_per_sec)), - nmi ? "NMI" : "IRQ"); - - if (nr_counters == 1) - printf("%d ", event_count[0]); - - for (counter = 0; counter < nr_counters; counter++) { - if (counter) - printf("/"); - - printf("%s", event_name(counter)); - } - - printf( "], "); - - if (tid != -1) - printf(" (tid: %d", tid); - else - printf(" (all"); - - if (profile_cpu != -1) - printf(", cpu: %d)\n", profile_cpu); - else { - if (tid != -1) - printf(")\n"); - else - printf(", %d CPUs)\n", nr_cpus); - } - - printf("------------------------------------------------------------------------------\n\n"); - - if (nr_counters == 1) - printf(" events pcnt"); - else - printf(" weight events pcnt"); - - printf(" RIP kernel function\n" - " ______ ______ _____ ________________ _______________\n\n" - ); - - for (i = 0, printed = 0; i < sym_table_count; i++) { - float pcnt; - int count; - - if (printed <= 18 && tmp[i].count[0] >= count_filter) { - pcnt = 100.0 - (100.0*((sum_kevents-tmp[i].count[0])/sum_kevents)); - - if (nr_counters == 1) - printf("%19.2f - %4.1f%% - %016llx : %s\n", - sym_weight(tmp + i), - pcnt, tmp[i].addr, tmp[i].sym); - else - printf("%8.1f %10ld - %4.1f%% - %016llx : %s\n", - sym_weight(tmp + i), - tmp[i].count[0], - pcnt, tmp[i].addr, tmp[i].sym); - printed++; - } - /* - * Add decay to the counts: - */ - for (count = 0; count < nr_counters; count++) - sym_table[i].count[count] = zero ? 0 : sym_table[i].count[count] * 7 / 8; - } - - if (sym_filter_entry) - show_details(sym_filter_entry); - - { - struct pollfd stdin_poll = { .fd = 0, .events = POLLIN }; - - if (poll(&stdin_poll, 1, 0) == 1) { - printf("key pressed - exiting.\n"); - exit(0); - } - } -} - -static void *display_thread(void *arg) -{ - printf("KernelTop refresh period: %d seconds\n", delay_secs); - - while (!sleep(delay_secs)) - print_sym_table(); - - return NULL; -} - -static int read_symbol(FILE *in, struct sym_entry *s) -{ - static int filter_match = 0; - char *sym, stype; - char str[500]; - int rc, pos; - - rc = fscanf(in, "%llx %c %499s", &s->addr, &stype, str); - if (rc == EOF) - return -1; - - assert(rc == 3); - - /* skip until end of line: */ - pos = strlen(str); - do { - rc = fgetc(in); - if (rc == '\n' || rc == EOF || pos >= 499) - break; - str[pos] = rc; - pos++; - } while (1); - str[pos] = 0; - - sym = str; - - /* Filter out known duplicates and non-text symbols. */ - if (!strcmp(sym, "_text")) - return 1; - if (!min_ip && !strcmp(sym, "_stext")) - return 1; - if (!strcmp(sym, "_etext") || !strcmp(sym, "_sinittext")) - return 1; - if (stype != 'T' && stype != 't') - return 1; - if (!strncmp("init_module", sym, 11) || !strncmp("cleanup_module", sym, 14)) - return 1; - if (strstr(sym, "_text_start") || strstr(sym, "_text_end")) - return 1; - - s->sym = malloc(strlen(str)); - assert(s->sym); - - strcpy((char *)s->sym, str); - s->skip = 0; - - /* Tag events to be skipped. */ - if (!strcmp("default_idle", s->sym) || !strcmp("cpu_idle", s->sym)) - s->skip = 1; - else if (!strcmp("enter_idle", s->sym) || !strcmp("exit_idle", s->sym)) - s->skip = 1; - else if (!strcmp("mwait_idle", s->sym)) - s->skip = 1; - - if (filter_match == 1) { - filter_end = s->addr; - filter_match = -1; - if (filter_end - filter_start > 10000) { - printf("hm, too large filter symbol <%s> - skipping.\n", - sym_filter); - printf("symbol filter start: %016lx\n", filter_start); - printf(" end: %016lx\n", filter_end); - filter_end = filter_start = 0; - sym_filter = NULL; - sleep(1); - } - } - if (filter_match == 0 && sym_filter && !strcmp(s->sym, sym_filter)) { - filter_match = 1; - filter_start = s->addr; - } - - return 0; -} - -int compare_addr(const void *__sym1, const void *__sym2) -{ - const struct sym_entry *sym1 = __sym1, *sym2 = __sym2; - - return sym1->addr > sym2->addr; -} - -static void sort_symbol_table(void) -{ - int i, dups; - - do { - qsort(sym_table, sym_table_count, sizeof(sym_table[0]), compare_addr); - for (i = 0, dups = 0; i < sym_table_count; i++) { - if (sym_table[i].addr == sym_table[i+1].addr) { - sym_table[i+1].addr = -1ll; - dups++; - } - } - sym_table_count -= dups; - } while(dups); -} - -static void parse_symbols(void) -{ - struct sym_entry *last; - - FILE *kallsyms = fopen("/proc/kallsyms", "r"); - - if (!kallsyms) { - printf("Could not open /proc/kallsyms - no CONFIG_KALLSYMS_ALL=y?\n"); - exit(-1); - } - - while (!feof(kallsyms)) { - if (read_symbol(kallsyms, &sym_table[sym_table_count]) == 0) { - sym_table_count++; - assert(sym_table_count <= MAX_SYMS); - } - } - - sort_symbol_table(); - min_ip = sym_table[0].addr; - max_ip = sym_table[sym_table_count-1].addr; - last = sym_table + sym_table_count++; - - last->addr = -1ll; - last->sym = ""; - - if (filter_end) { - int count; - for (count=0; count < sym_table_count; count ++) { - if (!strcmp(sym_table[count].sym, sym_filter)) { - sym_filter_entry = &sym_table[count]; - break; - } - } - } - if (dump_symtab) { - int i; - - for (i = 0; i < sym_table_count; i++) - fprintf(stderr, "%llx %s\n", - sym_table[i].addr, sym_table[i].sym); - } -} - -/* - * Source lines - */ - -static void parse_vmlinux(char *filename) -{ - FILE *file; - char command[PATH_MAX*2]; - if (!filename) - return; - - sprintf(command, "objdump --start-address=0x%016lx --stop-address=0x%016lx -dS %s", filter_start, filter_end, filename); - - file = popen(command, "r"); - if (!file) - return; - - lines_tail = &lines; - while (!feof(file)) { - struct source_line *src; - size_t dummy = 0; - char *c; - - src = malloc(sizeof(struct source_line)); - assert(src != NULL); - memset(src, 0, sizeof(struct source_line)); - - if (getline(&src->line, &dummy, file) < 0) - break; - if (!src->line) - break; - - c = strchr(src->line, '\n'); - if (c) - *c = 0; - - src->next = NULL; - *lines_tail = src; - lines_tail = &src->next; - - if (strlen(src->line)>8 && src->line[8] == ':') - src->EIP = strtoull(src->line, NULL, 16); - if (strlen(src->line)>8 && src->line[16] == ':') - src->EIP = strtoull(src->line, NULL, 16); - } - pclose(file); -} - -static void record_precise_ip(uint64_t ip) -{ - struct source_line *line; - - for (line = lines; line; line = line->next) { - if (line->EIP == ip) - line->count++; - if (line->EIP > ip) - break; - } -} - -static void lookup_sym_in_vmlinux(struct sym_entry *sym) -{ - struct source_line *line; - char pattern[PATH_MAX]; - sprintf(pattern, "<%s>:", sym->sym); - - for (line = lines; line; line = line->next) { - if (strstr(line->line, pattern)) { - sym->source = line; - break; - } - } -} - -static void show_lines(struct source_line *line_queue, int line_queue_count) -{ - int i; - struct source_line *line; - - line = line_queue; - for (i = 0; i < line_queue_count; i++) { - printf("%8li\t%s\n", line->count, line->line); - line = line->next; - } -} - -#define TRACE_COUNT 3 - -static void show_details(struct sym_entry *sym) -{ - struct source_line *line; - struct source_line *line_queue = NULL; - int displayed = 0; - int line_queue_count = 0; - - if (!sym->source) - lookup_sym_in_vmlinux(sym); - if (!sym->source) - return; - - printf("Showing details for %s\n", sym->sym); - - line = sym->source; - while (line) { - if (displayed && strstr(line->line, ">:")) - break; - - if (!line_queue_count) - line_queue = line; - line_queue_count ++; - - if (line->count >= count_filter) { - show_lines(line_queue, line_queue_count); - line_queue_count = 0; - line_queue = NULL; - } else if (line_queue_count > TRACE_COUNT) { - line_queue = line_queue->next; - line_queue_count --; - } - - line->count = 0; - displayed++; - if (displayed > 300) - break; - line = line->next; - } -} - -/* - * Binary search in the histogram table and record the hit: - */ -static void record_ip(uint64_t ip, int counter) -{ - int left_idx, middle_idx, right_idx, idx; - unsigned long left, middle, right; - - record_precise_ip(ip); - - left_idx = 0; - right_idx = sym_table_count-1; - assert(ip <= max_ip && ip >= min_ip); - - while (left_idx + 1 < right_idx) { - middle_idx = (left_idx + right_idx) / 2; - - left = sym_table[ left_idx].addr; - middle = sym_table[middle_idx].addr; - right = sym_table[ right_idx].addr; - - if (!(left <= middle && middle <= right)) { - printf("%016lx...\n%016lx...\n%016lx\n", left, middle, right); - printf("%d %d %d\n", left_idx, middle_idx, right_idx); - } - assert(left <= middle && middle <= right); - if (!(left <= ip && ip <= right)) { - printf(" left: %016lx\n", left); - printf(" ip: %016lx\n", (unsigned long)ip); - printf("right: %016lx\n", right); - } - assert(left <= ip && ip <= right); - /* - * [ left .... target .... middle .... right ] - * => right := middle - */ - if (ip < middle) { - right_idx = middle_idx; - continue; - } - /* - * [ left .... middle ... target ... right ] - * => left := middle - */ - left_idx = middle_idx; - } - - idx = left_idx; - - if (!sym_table[idx].skip) - sym_table[idx].count[counter]++; - else events--; -} - -static void process_event(uint64_t ip, int counter) -{ - events++; - - if (ip < min_ip || ip > max_ip) { - userspace_events++; - return; - } - - record_ip(ip, counter); -} - -static void process_options(int argc, char *argv[]) -{ - int error = 0, counter; - - if (strstr(argv[0], "perfstat")) - run_perfstat = 1; - - for (;;) { - int option_index = 0; - /** Options for getopt */ - static struct option long_options[] = { - {"count", required_argument, NULL, 'c'}, - {"cpu", required_argument, NULL, 'C'}, - {"delay", required_argument, NULL, 'd'}, - {"dump_symtab", no_argument, NULL, 'D'}, - {"event", required_argument, NULL, 'e'}, - {"filter", required_argument, NULL, 'f'}, - {"group", required_argument, NULL, 'g'}, - {"help", no_argument, NULL, 'h'}, - {"nmi", required_argument, NULL, 'n'}, - {"mmap_info", no_argument, NULL, 'M'}, - {"mmap_pages", required_argument, NULL, 'm'}, - {"munmap_info", no_argument, NULL, 'U'}, - {"pid", required_argument, NULL, 'p'}, - {"realtime", required_argument, NULL, 'r'}, - {"scale", no_argument, NULL, 'l'}, - {"symbol", required_argument, NULL, 's'}, - {"stat", no_argument, NULL, 'S'}, - {"vmlinux", required_argument, NULL, 'x'}, - {"zero", no_argument, NULL, 'z'}, - {NULL, 0, NULL, 0 } - }; - int c = getopt_long(argc, argv, "+:ac:C:d:De:f:g:hln:m:p:r:s:Sx:zMU", - long_options, &option_index); - if (c == -1) - break; - - switch (c) { - case 'a': system_wide = 1; break; - case 'c': default_interval = atoi(optarg); break; - case 'C': - /* CPU and PID are mutually exclusive */ - if (tid != -1) { - printf("WARNING: CPU switch overriding PID\n"); - sleep(1); - tid = -1; - } - profile_cpu = atoi(optarg); break; - case 'd': delay_secs = atoi(optarg); break; - case 'D': dump_symtab = 1; break; - - case 'e': error = parse_events(optarg); break; - - case 'f': count_filter = atoi(optarg); break; - case 'g': group = atoi(optarg); break; - case 'h': display_help(); break; - case 'l': scale = 1; break; - case 'n': nmi = atoi(optarg); break; - case 'p': - /* CPU and PID are mutually exclusive */ - if (profile_cpu != -1) { - printf("WARNING: PID switch overriding CPU\n"); - sleep(1); - profile_cpu = -1; - } - tid = atoi(optarg); break; - case 'r': realtime_prio = atoi(optarg); break; - case 's': sym_filter = strdup(optarg); break; - case 'S': run_perfstat = 1; break; - case 'x': vmlinux = strdup(optarg); break; - case 'z': zero = 1; break; - case 'm': mmap_pages = atoi(optarg); break; - case 'M': use_mmap = 1; break; - case 'U': use_munmap = 1; break; - default: error = 1; break; - } - } - if (error) - display_help(); - - if (!nr_counters) { - if (run_perfstat) - nr_counters = 8; - else { - nr_counters = 1; - event_id[0] = 0; - } - } - - for (counter = 0; counter < nr_counters; counter++) { - if (event_count[counter]) - continue; - - event_count[counter] = default_interval; - } -} - -struct mmap_data { - int counter; - void *base; - unsigned int mask; - unsigned int prev; -}; - -static unsigned int mmap_read_head(struct mmap_data *md) -{ - struct perf_counter_mmap_page *pc = md->base; - int head; - - head = pc->data_head; - rmb(); - - return head; -} - -struct timeval last_read, this_read; - -static void mmap_read(struct mmap_data *md) -{ - unsigned int head = mmap_read_head(md); - unsigned int old = md->prev; - unsigned char *data = md->base + page_size; - int diff; - - gettimeofday(&this_read, NULL); - - /* - * If we're further behind than half the buffer, there's a chance - * the writer will bite our tail and screw up the events under us. - * - * If we somehow ended up ahead of the head, we got messed up. - * - * In either case, truncate and restart at head. - */ - diff = head - old; - if (diff > md->mask / 2 || diff < 0) { - struct timeval iv; - unsigned long msecs; - - timersub(&this_read, &last_read, &iv); - msecs = iv.tv_sec*1000 + iv.tv_usec/1000; - - fprintf(stderr, "WARNING: failed to keep up with mmap data." - " Last read %lu msecs ago.\n", msecs); - - /* - * head points to a known good entry, start there. - */ - old = head; - } - - last_read = this_read; - - for (; old != head;) { - struct ip_event { - struct perf_event_header header; - __u64 ip; - __u32 pid, tid; - }; - struct mmap_event { - struct perf_event_header header; - __u32 pid, tid; - __u64 start; - __u64 len; - __u64 pgoff; - char filename[PATH_MAX]; - }; - - typedef union event_union { - struct perf_event_header header; - struct ip_event ip; - struct mmap_event mmap; - } event_t; - - event_t *event = (event_t *)&data[old & md->mask]; - - event_t event_copy; - - unsigned int size = event->header.size; - - /* - * Event straddles the mmap boundary -- header should always - * be inside due to u64 alignment of output. - */ - if ((old & md->mask) + size != ((old + size) & md->mask)) { - unsigned int offset = old; - unsigned int len = min(sizeof(*event), size), cpy; - void *dst = &event_copy; - - do { - cpy = min(md->mask + 1 - (offset & md->mask), len); - memcpy(dst, &data[offset & md->mask], cpy); - offset += cpy; - dst += cpy; - len -= cpy; - } while (len); - - event = &event_copy; - } - - old += size; - - if (event->header.misc & PERF_EVENT_MISC_OVERFLOW) { - if (event->header.type & PERF_RECORD_IP) - process_event(event->ip.ip, md->counter); - } else { - switch (event->header.type) { - case PERF_EVENT_MMAP: - case PERF_EVENT_MUNMAP: - printf("%s: %Lu %Lu %Lu %s\n", - event->header.type == PERF_EVENT_MMAP - ? "mmap" : "munmap", - event->mmap.start, - event->mmap.len, - event->mmap.pgoff, - event->mmap.filename); - break; - } - } - } - - md->prev = old; -} - -int main(int argc, char *argv[]) -{ - struct pollfd event_array[MAX_NR_CPUS * MAX_COUNTERS]; - struct mmap_data mmap_array[MAX_NR_CPUS][MAX_COUNTERS]; - struct perf_counter_hw_event hw_event; - pthread_t thread; - int i, counter, group_fd, nr_poll = 0; - unsigned int cpu; - int ret; - - page_size = sysconf(_SC_PAGE_SIZE); - - process_options(argc, argv); - - nr_cpus = sysconf(_SC_NPROCESSORS_ONLN); - assert(nr_cpus <= MAX_NR_CPUS); - assert(nr_cpus >= 0); - - if (run_perfstat) - return do_perfstat(argc, argv); - - if (tid != -1 || profile_cpu != -1) - nr_cpus = 1; - - parse_symbols(); - if (vmlinux && sym_filter_entry) - parse_vmlinux(vmlinux); - - for (i = 0; i < nr_cpus; i++) { - group_fd = -1; - for (counter = 0; counter < nr_counters; counter++) { - - cpu = profile_cpu; - if (tid == -1 && profile_cpu == -1) - cpu = i; - - memset(&hw_event, 0, sizeof(hw_event)); - hw_event.config = event_id[counter]; - hw_event.irq_period = event_count[counter]; - hw_event.record_type = PERF_RECORD_IP | PERF_RECORD_TID; - hw_event.nmi = nmi; - hw_event.mmap = use_mmap; - hw_event.munmap = use_munmap; - - fd[i][counter] = sys_perf_counter_open(&hw_event, tid, cpu, group_fd, 0); - if (fd[i][counter] < 0) { - int err = errno; - printf("kerneltop error: syscall returned with %d (%s)\n", - fd[i][counter], strerror(err)); - if (err == EPERM) - printf("Are you root?\n"); - exit(-1); - } - assert(fd[i][counter] >= 0); - fcntl(fd[i][counter], F_SETFL, O_NONBLOCK); - - /* - * First counter acts as the group leader: - */ - if (group && group_fd == -1) - group_fd = fd[i][counter]; - - event_array[nr_poll].fd = fd[i][counter]; - event_array[nr_poll].events = POLLIN; - nr_poll++; - - mmap_array[i][counter].counter = counter; - mmap_array[i][counter].prev = 0; - mmap_array[i][counter].mask = mmap_pages*page_size - 1; - mmap_array[i][counter].base = mmap(NULL, (mmap_pages+1)*page_size, - PROT_READ, MAP_SHARED, fd[i][counter], 0); - if (mmap_array[i][counter].base == MAP_FAILED) { - printf("kerneltop error: failed to mmap with %d (%s)\n", - errno, strerror(errno)); - exit(-1); - } - } - } - - if (pthread_create(&thread, NULL, display_thread, NULL)) { - printf("Could not create display thread.\n"); - exit(-1); - } - - if (realtime_prio) { - struct sched_param param; - - param.sched_priority = realtime_prio; - if (sched_setscheduler(0, SCHED_FIFO, ¶m)) { - printf("Could not set realtime priority.\n"); - exit(-1); - } - } - - while (1) { - int hits = events; - - for (i = 0; i < nr_cpus; i++) { - for (counter = 0; counter < nr_counters; counter++) - mmap_read(&mmap_array[i][counter]); - } - - if (hits == events) - ret = poll(event_array, nr_poll, 100); - } - - return 0; -} diff --git a/Documentation/perf_counter/perf.c b/Documentation/perf_counter/perf.c index 63f8a892c0d..ff8658f2a2f 100644 --- a/Documentation/perf_counter/perf.c +++ b/Documentation/perf_counter/perf.c @@ -249,6 +249,7 @@ static void handle_internal_command(int argc, const char **argv) const char *cmd = argv[0]; static struct cmd_struct commands[] = { { "top", cmd_top, 0 }, + { "stat", cmd_stat, 0 }, }; int i; static const char ext[] = STRIP_EXTENSION; -- cgit v1.2.3 From 1d8c8b209e9351a7de1307d7b9b6df4222b8d742 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Mon, 20 Apr 2009 15:52:29 +0200 Subject: perf_counter tools: add help texts Add Documentation/perf-stat.txt and Documentation/perf-top.txt. The template that was used for it: Documentation/git-add.txt from Git. Fix up small bugs to make these help texts show up both in the 'perf' common-command summary output screen, and on the individual help screens. Signed-off-by: Ingo Molnar --- .../perf_counter/Documentation/perf-stat.txt | 76 ++++++++++++++++++++++ .../perf_counter/Documentation/perf-top.txt | 61 +++++++++++++++++ Documentation/perf_counter/builtin.h | 4 +- Documentation/perf_counter/generate-cmdlist.sh | 8 +-- 4 files changed, 143 insertions(+), 6 deletions(-) create mode 100644 Documentation/perf_counter/Documentation/perf-stat.txt create mode 100644 Documentation/perf_counter/Documentation/perf-top.txt diff --git a/Documentation/perf_counter/Documentation/perf-stat.txt b/Documentation/perf_counter/Documentation/perf-stat.txt new file mode 100644 index 00000000000..7fcab271e57 --- /dev/null +++ b/Documentation/perf_counter/Documentation/perf-stat.txt @@ -0,0 +1,76 @@ +perf-stat(1) +========== + +NAME +---- +perf-stat - Run a command and gather performance counter statistics + +SYNOPSIS +-------- +[verse] +'perf stat' [-e | --event=EVENT] [-l] [-a] + +DESCRIPTION +----------- +This command runs a command and gathers performance counter statistics +from it. + + +OPTIONS +------- +...:: + Any command you can specify in a shell. + +-e:: +--event=:: + 0:0: cpu-cycles + 0:0: cycles + 0:1: instructions + 0:2: cache-references + 0:3: cache-misses + 0:4: branch-instructions + 0:4: branches + 0:5: branch-misses + 0:6: bus-cycles + 1:0: cpu-clock + 1:1: task-clock + 1:2: page-faults + 1:2: faults + 1:5: minor-faults + 1:6: major-faults + 1:3: context-switches + 1:3: cs + 1:4: cpu-migrations + 1:4: migrations + rNNN: raw PMU events (eventsel+umask) + +-a:: + system-wide collection + +-l:: + scale counter values + +Configuration +------------- + +EXAMPLES +-------- + +$ perf stat sleep 1 + + Performance counter stats for 'sleep': + + 0.678356 task clock ticks (msecs) + 7 context switches (events) + 4 CPU migrations (events) + 232 pagefaults (events) + 1810403 CPU cycles (events) + 946759 instructions (events) + 18952 cache references (events) + 4885 cache misses (events) + + Wall-clock time elapsed: 1001.252894 msecs + +SEE ALSO +-------- +linkperf:git-tops[1] diff --git a/Documentation/perf_counter/Documentation/perf-top.txt b/Documentation/perf_counter/Documentation/perf-top.txt new file mode 100644 index 00000000000..057333b7253 --- /dev/null +++ b/Documentation/perf_counter/Documentation/perf-top.txt @@ -0,0 +1,61 @@ +perf-top(1) +========== + +NAME +---- +perf-top - Run a command and profile it + +SYNOPSIS +-------- +[verse] +'perf top' [-e | --event=EVENT] [-l] [-a] + +DESCRIPTION +----------- +This command runs a command and gathers a performance counter profile +from it. + + +OPTIONS +------- +...:: + Any command you can specify in a shell. + +-e:: +--event=:: + 0:0: cpu-cycles + 0:0: cycles + 0:1: instructions + 0:2: cache-references + 0:3: cache-misses + 0:4: branch-instructions + 0:4: branches + 0:5: branch-misses + 0:6: bus-cycles + 1:0: cpu-clock + 1:1: task-clock + 1:2: page-faults + 1:2: faults + 1:5: minor-faults + 1:6: major-faults + 1:3: context-switches + 1:3: cs + 1:4: cpu-migrations + 1:4: migrations + rNNN: raw PMU events (eventsel+umask) + +-a:: + system-wide collection + +-l:: + scale counter values + +Configuration +------------- + +EXAMPLES +-------- + +SEE ALSO +-------- +linkperf:git-stat[1] diff --git a/Documentation/perf_counter/builtin.h b/Documentation/perf_counter/builtin.h index a3bb6cd6bed..605323c691f 100644 --- a/Documentation/perf_counter/builtin.h +++ b/Documentation/perf_counter/builtin.h @@ -14,6 +14,6 @@ extern void prune_packed_objects(int); extern int read_line_with_nul(char *buf, int size, FILE *file); extern int check_pager_config(const char *cmd); -extern int cmd_top(int argc, char **argv, const char *prefix); -extern int cmd_stat(int argc, char **argv, const char *prefix); +extern int cmd_top(int argc, const char **argv, const char *prefix); +extern int cmd_stat(int argc, const char **argv, const char *prefix); #endif diff --git a/Documentation/perf_counter/generate-cmdlist.sh b/Documentation/perf_counter/generate-cmdlist.sh index 75c68d948fd..f06f6fd148f 100755 --- a/Documentation/perf_counter/generate-cmdlist.sh +++ b/Documentation/perf_counter/generate-cmdlist.sh @@ -9,16 +9,16 @@ struct cmdname_help static struct cmdname_help common_cmds[] = {" -sed -n -e 's/^git-\([^ ]*\)[ ].* common.*/\1/p' command-list.txt | +sed -n -e 's/^perf-\([^ ]*\)[ ].* common.*/\1/p' command-list.txt | sort | while read cmd do sed -n ' - /^NAME/,/git-'"$cmd"'/H + /^NAME/,/perf-'"$cmd"'/H ${ x - s/.*git-'"$cmd"' - \(.*\)/ {"'"$cmd"'", "\1"},/ + s/.*perf-'"$cmd"' - \(.*\)/ {"'"$cmd"'", "\1"},/ p - }' "Documentation/git-$cmd.txt" + }' "Documentation/perf-$cmd.txt" done echo "};" -- cgit v1.2.3 From e33e0a43736307512422e41aee6e24d5a8c39181 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Mon, 20 Apr 2009 15:58:01 +0200 Subject: perf_counter tools: add 'perf record' command Move perf-record.c into the perf suite of commands. Signed-off-by: Ingo Molnar --- .../perf_counter/Documentation/perf-record.txt | 63 +++ Documentation/perf_counter/Makefile | 1 + Documentation/perf_counter/builtin-record.c | 506 ++++++++++++++++++++ Documentation/perf_counter/builtin.h | 3 +- Documentation/perf_counter/command-list.txt | 3 +- Documentation/perf_counter/perf-record.c | 530 --------------------- Documentation/perf_counter/perf.c | 3 +- 7 files changed, 576 insertions(+), 533 deletions(-) create mode 100644 Documentation/perf_counter/Documentation/perf-record.txt create mode 100644 Documentation/perf_counter/builtin-record.c delete mode 100644 Documentation/perf_counter/perf-record.c diff --git a/Documentation/perf_counter/Documentation/perf-record.txt b/Documentation/perf_counter/Documentation/perf-record.txt new file mode 100644 index 00000000000..d07700e35eb --- /dev/null +++ b/Documentation/perf_counter/Documentation/perf-record.txt @@ -0,0 +1,63 @@ +perf-record(1) +========== + +NAME +---- +perf-record - Run a command and record its profile into output.perf + +SYNOPSIS +-------- +[verse] +'perf record' [-e | --event=EVENT] [-l] [-a] + +DESCRIPTION +----------- +This command runs a command and gathers a performance counter profile +from it, into output.perf - without displaying anything. + +This file can then be inspected later on, using 'perf report'. + + +OPTIONS +------- +...:: + Any command you can specify in a shell. + +-e:: +--event=:: + 0:0: cpu-cycles + 0:0: cycles + 0:1: instructions + 0:2: cache-references + 0:3: cache-misses + 0:4: branch-instructions + 0:4: branches + 0:5: branch-misses + 0:6: bus-cycles + 1:0: cpu-clock + 1:1: task-clock + 1:2: page-faults + 1:2: faults + 1:5: minor-faults + 1:6: major-faults + 1:3: context-switches + 1:3: cs + 1:4: cpu-migrations + 1:4: migrations + rNNN: raw PMU events (eventsel+umask) + +-a:: + system-wide collection + +-l:: + scale counter values + +Configuration +------------- + +EXAMPLES +-------- + +SEE ALSO +-------- +linkperf:git-stat[1] diff --git a/Documentation/perf_counter/Makefile b/Documentation/perf_counter/Makefile index fb8b71744e5..b6c665eb22e 100644 --- a/Documentation/perf_counter/Makefile +++ b/Documentation/perf_counter/Makefile @@ -309,6 +309,7 @@ LIB_OBJS += usage.o LIB_OBJS += wrapper.o BUILTIN_OBJS += builtin-help.o +BUILTIN_OBJS += builtin-record.o BUILTIN_OBJS += builtin-stat.o BUILTIN_OBJS += builtin-top.o diff --git a/Documentation/perf_counter/builtin-record.c b/Documentation/perf_counter/builtin-record.c new file mode 100644 index 00000000000..4a50abf843e --- /dev/null +++ b/Documentation/perf_counter/builtin-record.c @@ -0,0 +1,506 @@ + + +#define _GNU_SOURCE +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include "../../include/linux/perf_counter.h" + + +/* + * prctl(PR_TASK_PERF_COUNTERS_DISABLE) will (cheaply) disable all + * counters in the current task. + */ +#define PR_TASK_PERF_COUNTERS_DISABLE 31 +#define PR_TASK_PERF_COUNTERS_ENABLE 32 + +#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0])) + +#define rdclock() \ +({ \ + struct timespec ts; \ + \ + clock_gettime(CLOCK_MONOTONIC, &ts); \ + ts.tv_sec * 1000000000ULL + ts.tv_nsec; \ +}) + +/* + * Pick up some kernel type conventions: + */ +#define __user +#define asmlinkage + +#ifdef __x86_64__ +#define __NR_perf_counter_open 295 +#define rmb() asm volatile("lfence" ::: "memory") +#define cpu_relax() asm volatile("rep; nop" ::: "memory"); +#endif + +#ifdef __i386__ +#define __NR_perf_counter_open 333 +#define rmb() asm volatile("lfence" ::: "memory") +#define cpu_relax() asm volatile("rep; nop" ::: "memory"); +#endif + +#ifdef __powerpc__ +#define __NR_perf_counter_open 319 +#define rmb() asm volatile ("sync" ::: "memory") +#define cpu_relax() asm volatile ("" ::: "memory"); +#endif + +#define unlikely(x) __builtin_expect(!!(x), 0) +#define min(x, y) ({ \ + typeof(x) _min1 = (x); \ + typeof(y) _min2 = (y); \ + (void) (&_min1 == &_min2); \ + _min1 < _min2 ? _min1 : _min2; }) + +extern asmlinkage int sys_perf_counter_open( + struct perf_counter_hw_event *hw_event_uptr __user, + pid_t pid, + int cpu, + int group_fd, + unsigned long flags); + +#define MAX_COUNTERS 64 +#define MAX_NR_CPUS 256 + +#define EID(type, id) (((__u64)(type) << PERF_COUNTER_TYPE_SHIFT) | (id)) + +static int nr_counters = 0; +static __u64 event_id[MAX_COUNTERS] = { }; +static int default_interval = 100000; +static int event_count[MAX_COUNTERS]; +static int fd[MAX_NR_CPUS][MAX_COUNTERS]; +static int nr_cpus = 0; +static unsigned int page_size; +static unsigned int mmap_pages = 16; +static int output; +static char *output_name = "output.perf"; +static int group = 0; +static unsigned int realtime_prio = 0; + +const unsigned int default_count[] = { + 1000000, + 1000000, + 10000, + 10000, + 1000000, + 10000, +}; + +struct event_symbol { + __u64 event; + char *symbol; +}; + +static struct event_symbol event_symbols[] = { + {EID(PERF_TYPE_HARDWARE, PERF_COUNT_CPU_CYCLES), "cpu-cycles", }, + {EID(PERF_TYPE_HARDWARE, PERF_COUNT_CPU_CYCLES), "cycles", }, + {EID(PERF_TYPE_HARDWARE, PERF_COUNT_INSTRUCTIONS), "instructions", }, + {EID(PERF_TYPE_HARDWARE, PERF_COUNT_CACHE_REFERENCES), "cache-references", }, + {EID(PERF_TYPE_HARDWARE, PERF_COUNT_CACHE_MISSES), "cache-misses", }, + {EID(PERF_TYPE_HARDWARE, PERF_COUNT_BRANCH_INSTRUCTIONS), "branch-instructions", }, + {EID(PERF_TYPE_HARDWARE, PERF_COUNT_BRANCH_INSTRUCTIONS), "branches", }, + {EID(PERF_TYPE_HARDWARE, PERF_COUNT_BRANCH_MISSES), "branch-misses", }, + {EID(PERF_TYPE_HARDWARE, PERF_COUNT_BUS_CYCLES), "bus-cycles", }, + + {EID(PERF_TYPE_SOFTWARE, PERF_COUNT_CPU_CLOCK), "cpu-clock", }, + {EID(PERF_TYPE_SOFTWARE, PERF_COUNT_TASK_CLOCK), "task-clock", }, + {EID(PERF_TYPE_SOFTWARE, PERF_COUNT_PAGE_FAULTS), "page-faults", }, + {EID(PERF_TYPE_SOFTWARE, PERF_COUNT_PAGE_FAULTS), "faults", }, + {EID(PERF_TYPE_SOFTWARE, PERF_COUNT_PAGE_FAULTS_MIN), "minor-faults", }, + {EID(PERF_TYPE_SOFTWARE, PERF_COUNT_PAGE_FAULTS_MAJ), "major-faults", }, + {EID(PERF_TYPE_SOFTWARE, PERF_COUNT_CONTEXT_SWITCHES), "context-switches", }, + {EID(PERF_TYPE_SOFTWARE, PERF_COUNT_CONTEXT_SWITCHES), "cs", }, + {EID(PERF_TYPE_SOFTWARE, PERF_COUNT_CPU_MIGRATIONS), "cpu-migrations", }, + {EID(PERF_TYPE_SOFTWARE, PERF_COUNT_CPU_MIGRATIONS), "migrations", }, +}; + +/* + * Each event can have multiple symbolic names. + * Symbolic names are (almost) exactly matched. + */ +static __u64 match_event_symbols(char *str) +{ + __u64 config, id; + int type; + unsigned int i; + + if (sscanf(str, "r%llx", &config) == 1) + return config | PERF_COUNTER_RAW_MASK; + + if (sscanf(str, "%d:%llu", &type, &id) == 2) + return EID(type, id); + + for (i = 0; i < ARRAY_SIZE(event_symbols); i++) { + if (!strncmp(str, event_symbols[i].symbol, + strlen(event_symbols[i].symbol))) + return event_symbols[i].event; + } + + return ~0ULL; +} + +static int parse_events(char *str) +{ + __u64 config; + +again: + if (nr_counters == MAX_COUNTERS) + return -1; + + config = match_event_symbols(str); + if (config == ~0ULL) + return -1; + + event_id[nr_counters] = config; + nr_counters++; + + str = strstr(str, ","); + if (str) { + str++; + goto again; + } + + return 0; +} + +#define __PERF_COUNTER_FIELD(config, name) \ + ((config & PERF_COUNTER_##name##_MASK) >> PERF_COUNTER_##name##_SHIFT) + +#define PERF_COUNTER_RAW(config) __PERF_COUNTER_FIELD(config, RAW) +#define PERF_COUNTER_CONFIG(config) __PERF_COUNTER_FIELD(config, CONFIG) +#define PERF_COUNTER_TYPE(config) __PERF_COUNTER_FIELD(config, TYPE) +#define PERF_COUNTER_ID(config) __PERF_COUNTER_FIELD(config, EVENT) + +static void display_events_help(void) +{ + unsigned int i; + __u64 e; + + printf( + " -e EVENT --event=EVENT # symbolic-name abbreviations"); + + for (i = 0; i < ARRAY_SIZE(event_symbols); i++) { + int type, id; + + e = event_symbols[i].event; + type = PERF_COUNTER_TYPE(e); + id = PERF_COUNTER_ID(e); + + printf("\n %d:%d: %-20s", + type, id, event_symbols[i].symbol); + } + + printf("\n" + " rNNN: raw PMU events (eventsel+umask)\n\n"); +} + +static void display_help(void) +{ + printf( + "Usage: perf-record []\n" + "perf-record Options (up to %d event types can be specified at once):\n\n", + MAX_COUNTERS); + + display_events_help(); + + printf( + " -c CNT --count=CNT # event period to sample\n" + " -m pages --mmap_pages= # number of mmap data pages\n" + " -o file --output= # output file\n" + " -r prio --realtime= # use RT prio\n" + ); + + exit(0); +} + +static void process_options(int argc, char *argv[]) +{ + int error = 0, counter; + + for (;;) { + int option_index = 0; + /** Options for getopt */ + static struct option long_options[] = { + {"count", required_argument, NULL, 'c'}, + {"event", required_argument, NULL, 'e'}, + {"mmap_pages", required_argument, NULL, 'm'}, + {"output", required_argument, NULL, 'o'}, + {"realtime", required_argument, NULL, 'r'}, + {NULL, 0, NULL, 0 } + }; + int c = getopt_long(argc, argv, "+:c:e:m:o:r:", + long_options, &option_index); + if (c == -1) + break; + + switch (c) { + case 'c': default_interval = atoi(optarg); break; + case 'e': error = parse_events(optarg); break; + case 'm': mmap_pages = atoi(optarg); break; + case 'o': output_name = strdup(optarg); break; + case 'r': realtime_prio = atoi(optarg); break; + default: error = 1; break; + } + } + if (error) + display_help(); + + if (!nr_counters) { + nr_counters = 1; + event_id[0] = 0; + } + + for (counter = 0; counter < nr_counters; counter++) { + if (event_count[counter]) + continue; + + event_count[counter] = default_interval; + } +} + +struct mmap_data { + int counter; + void *base; + unsigned int mask; + unsigned int prev; +}; + +static unsigned int mmap_read_head(struct mmap_data *md) +{ + struct perf_counter_mmap_page *pc = md->base; + int head; + + head = pc->data_head; + rmb(); + + return head; +} + +static long events; +static struct timeval last_read, this_read; + +static void mmap_read(struct mmap_data *md) +{ + unsigned int head = mmap_read_head(md); + unsigned int old = md->prev; + unsigned char *data = md->base + page_size; + unsigned long size; + void *buf; + int diff; + + gettimeofday(&this_read, NULL); + + /* + * If we're further behind than half the buffer, there's a chance + * the writer will bite our tail and screw up the events under us. + * + * If we somehow ended up ahead of the head, we got messed up. + * + * In either case, truncate and restart at head. + */ + diff = head - old; + if (diff > md->mask / 2 || diff < 0) { + struct timeval iv; + unsigned long msecs; + + timersub(&this_read, &last_read, &iv); + msecs = iv.tv_sec*1000 + iv.tv_usec/1000; + + fprintf(stderr, "WARNING: failed to keep up with mmap data." + " Last read %lu msecs ago.\n", msecs); + + /* + * head points to a known good entry, start there. + */ + old = head; + } + + last_read = this_read; + + if (old != head) + events++; + + size = head - old; + + if ((old & md->mask) + size != (head & md->mask)) { + buf = &data[old & md->mask]; + size = md->mask + 1 - (old & md->mask); + old += size; + while (size) { + int ret = write(output, buf, size); + if (ret < 0) { + perror("failed to write"); + exit(-1); + } + size -= ret; + buf += ret; + } + } + + buf = &data[old & md->mask]; + size = head - old; + old += size; + while (size) { + int ret = write(output, buf, size); + if (ret < 0) { + perror("failed to write"); + exit(-1); + } + size -= ret; + buf += ret; + } + + md->prev = old; +} + +static volatile int done = 0; + +static void sigchld_handler(int sig) +{ + if (sig == SIGCHLD) + done = 1; +} + +int cmd_record(int argc, const char **argv) +{ + struct pollfd event_array[MAX_NR_CPUS * MAX_COUNTERS]; + struct mmap_data mmap_array[MAX_NR_CPUS][MAX_COUNTERS]; + struct perf_counter_hw_event hw_event; + int i, counter, group_fd, nr_poll = 0; + pid_t pid; + int ret; + + page_size = sysconf(_SC_PAGE_SIZE); + + process_options(argc, argv); + + nr_cpus = sysconf(_SC_NPROCESSORS_ONLN); + assert(nr_cpus <= MAX_NR_CPUS); + assert(nr_cpus >= 0); + + output = open(output_name, O_CREAT|O_RDWR, S_IRWXU); + if (output < 0) { + perror("failed to create output file"); + exit(-1); + } + + argc -= optind; + argv += optind; + + for (i = 0; i < nr_cpus; i++) { + group_fd = -1; + for (counter = 0; counter < nr_counters; counter++) { + + memset(&hw_event, 0, sizeof(hw_event)); + hw_event.config = event_id[counter]; + hw_event.irq_period = event_count[counter]; + hw_event.record_type = PERF_RECORD_IP | PERF_RECORD_TID; + hw_event.nmi = 1; + hw_event.mmap = 1; + hw_event.comm = 1; + + fd[i][counter] = sys_perf_counter_open(&hw_event, -1, i, group_fd, 0); + if (fd[i][counter] < 0) { + int err = errno; + printf("kerneltop error: syscall returned with %d (%s)\n", + fd[i][counter], strerror(err)); + if (err == EPERM) + printf("Are you root?\n"); + exit(-1); + } + assert(fd[i][counter] >= 0); + fcntl(fd[i][counter], F_SETFL, O_NONBLOCK); + + /* + * First counter acts as the group leader: + */ + if (group && group_fd == -1) + group_fd = fd[i][counter]; + + event_array[nr_poll].fd = fd[i][counter]; + event_array[nr_poll].events = POLLIN; + nr_poll++; + + mmap_array[i][counter].counter = counter; + mmap_array[i][counter].prev = 0; + mmap_array[i][counter].mask = mmap_pages*page_size - 1; + mmap_array[i][counter].base = mmap(NULL, (mmap_pages+1)*page_size, + PROT_READ, MAP_SHARED, fd[i][counter], 0); + if (mmap_array[i][counter].base == MAP_FAILED) { + printf("kerneltop error: failed to mmap with %d (%s)\n", + errno, strerror(errno)); + exit(-1); + } + } + } + + signal(SIGCHLD, sigchld_handler); + + pid = fork(); + if (pid < 0) + perror("failed to fork"); + + if (!pid) { + if (execvp(argv[0], argv)) { + perror(argv[0]); + exit(-1); + } + } + + if (realtime_prio) { + struct sched_param param; + + param.sched_priority = realtime_prio; + if (sched_setscheduler(0, SCHED_FIFO, ¶m)) { + printf("Could not set realtime priority.\n"); + exit(-1); + } + } + + /* + * TODO: store the current /proc/$/maps information somewhere + */ + + while (!done) { + int hits = events; + + for (i = 0; i < nr_cpus; i++) { + for (counter = 0; counter < nr_counters; counter++) + mmap_read(&mmap_array[i][counter]); + } + + if (hits == events) + ret = poll(event_array, nr_poll, 100); + } + + return 0; +} diff --git a/Documentation/perf_counter/builtin.h b/Documentation/perf_counter/builtin.h index 605323c691f..5854b1715f5 100644 --- a/Documentation/perf_counter/builtin.h +++ b/Documentation/perf_counter/builtin.h @@ -14,6 +14,7 @@ extern void prune_packed_objects(int); extern int read_line_with_nul(char *buf, int size, FILE *file); extern int check_pager_config(const char *cmd); -extern int cmd_top(int argc, const char **argv, const char *prefix); +extern int cmd_record(int argc, const char **argv, const char *prefix); extern int cmd_stat(int argc, const char **argv, const char *prefix); +extern int cmd_top(int argc, const char **argv, const char *prefix); #endif diff --git a/Documentation/perf_counter/command-list.txt b/Documentation/perf_counter/command-list.txt index 52455d46bfb..d15210aa0ca 100644 --- a/Documentation/perf_counter/command-list.txt +++ b/Documentation/perf_counter/command-list.txt @@ -1,5 +1,6 @@ # List of known perf commands. # command name category [deprecated] [common] -perf-top mainporcelain common +perf-record mainporcelain common perf-stat mainporcelain common +perf-top mainporcelain common diff --git a/Documentation/perf_counter/perf-record.c b/Documentation/perf_counter/perf-record.c deleted file mode 100644 index 614de7c468b..00000000000 --- a/Documentation/perf_counter/perf-record.c +++ /dev/null @@ -1,530 +0,0 @@ - - -#define _GNU_SOURCE -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include -#include -#include -#include -#include - -#include -#include - -#include "../../include/linux/perf_counter.h" - - -/* - * prctl(PR_TASK_PERF_COUNTERS_DISABLE) will (cheaply) disable all - * counters in the current task. - */ -#define PR_TASK_PERF_COUNTERS_DISABLE 31 -#define PR_TASK_PERF_COUNTERS_ENABLE 32 - -#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0])) - -#define rdclock() \ -({ \ - struct timespec ts; \ - \ - clock_gettime(CLOCK_MONOTONIC, &ts); \ - ts.tv_sec * 1000000000ULL + ts.tv_nsec; \ -}) - -/* - * Pick up some kernel type conventions: - */ -#define __user -#define asmlinkage - -#ifdef __x86_64__ -#define __NR_perf_counter_open 295 -#define rmb() asm volatile("lfence" ::: "memory") -#define cpu_relax() asm volatile("rep; nop" ::: "memory"); -#endif - -#ifdef __i386__ -#define __NR_perf_counter_open 333 -#define rmb() asm volatile("lfence" ::: "memory") -#define cpu_relax() asm volatile("rep; nop" ::: "memory"); -#endif - -#ifdef __powerpc__ -#define __NR_perf_counter_open 319 -#define rmb() asm volatile ("sync" ::: "memory") -#define cpu_relax() asm volatile ("" ::: "memory"); -#endif - -#define unlikely(x) __builtin_expect(!!(x), 0) -#define min(x, y) ({ \ - typeof(x) _min1 = (x); \ - typeof(y) _min2 = (y); \ - (void) (&_min1 == &_min2); \ - _min1 < _min2 ? _min1 : _min2; }) - -asmlinkage int sys_perf_counter_open( - struct perf_counter_hw_event *hw_event_uptr __user, - pid_t pid, - int cpu, - int group_fd, - unsigned long flags) -{ - return syscall( - __NR_perf_counter_open, hw_event_uptr, pid, cpu, group_fd, flags); -} - -#define MAX_COUNTERS 64 -#define MAX_NR_CPUS 256 - -#define EID(type, id) (((__u64)(type) << PERF_COUNTER_TYPE_SHIFT) | (id)) - -static int nr_counters = 0; -static __u64 event_id[MAX_COUNTERS] = { }; -static int default_interval = 100000; -static int event_count[MAX_COUNTERS]; -static int fd[MAX_NR_CPUS][MAX_COUNTERS]; -static int nr_cpus = 0; -static unsigned int page_size; -static unsigned int mmap_pages = 16; -static int output; -static char *output_name = "output.perf"; -static int group = 0; -static unsigned int realtime_prio = 0; - -const unsigned int default_count[] = { - 1000000, - 1000000, - 10000, - 10000, - 1000000, - 10000, -}; - -static char *hw_event_names[] = { - "CPU cycles", - "instructions", - "cache references", - "cache misses", - "branches", - "branch misses", - "bus cycles", -}; - -static char *sw_event_names[] = { - "cpu clock ticks", - "task clock ticks", - "pagefaults", - "context switches", - "CPU migrations", - "minor faults", - "major faults", -}; - -struct event_symbol { - __u64 event; - char *symbol; -}; - -static struct event_symbol event_symbols[] = { - {EID(PERF_TYPE_HARDWARE, PERF_COUNT_CPU_CYCLES), "cpu-cycles", }, - {EID(PERF_TYPE_HARDWARE, PERF_COUNT_CPU_CYCLES), "cycles", }, - {EID(PERF_TYPE_HARDWARE, PERF_COUNT_INSTRUCTIONS), "instructions", }, - {EID(PERF_TYPE_HARDWARE, PERF_COUNT_CACHE_REFERENCES), "cache-references", }, - {EID(PERF_TYPE_HARDWARE, PERF_COUNT_CACHE_MISSES), "cache-misses", }, - {EID(PERF_TYPE_HARDWARE, PERF_COUNT_BRANCH_INSTRUCTIONS), "branch-instructions", }, - {EID(PERF_TYPE_HARDWARE, PERF_COUNT_BRANCH_INSTRUCTIONS), "branches", }, - {EID(PERF_TYPE_HARDWARE, PERF_COUNT_BRANCH_MISSES), "branch-misses", }, - {EID(PERF_TYPE_HARDWARE, PERF_COUNT_BUS_CYCLES), "bus-cycles", }, - - {EID(PERF_TYPE_SOFTWARE, PERF_COUNT_CPU_CLOCK), "cpu-clock", }, - {EID(PERF_TYPE_SOFTWARE, PERF_COUNT_TASK_CLOCK), "task-clock", }, - {EID(PERF_TYPE_SOFTWARE, PERF_COUNT_PAGE_FAULTS), "page-faults", }, - {EID(PERF_TYPE_SOFTWARE, PERF_COUNT_PAGE_FAULTS), "faults", }, - {EID(PERF_TYPE_SOFTWARE, PERF_COUNT_PAGE_FAULTS_MIN), "minor-faults", }, - {EID(PERF_TYPE_SOFTWARE, PERF_COUNT_PAGE_FAULTS_MAJ), "major-faults", }, - {EID(PERF_TYPE_SOFTWARE, PERF_COUNT_CONTEXT_SWITCHES), "context-switches", }, - {EID(PERF_TYPE_SOFTWARE, PERF_COUNT_CONTEXT_SWITCHES), "cs", }, - {EID(PERF_TYPE_SOFTWARE, PERF_COUNT_CPU_MIGRATIONS), "cpu-migrations", }, - {EID(PERF_TYPE_SOFTWARE, PERF_COUNT_CPU_MIGRATIONS), "migrations", }, -}; - -/* - * Each event can have multiple symbolic names. - * Symbolic names are (almost) exactly matched. - */ -static __u64 match_event_symbols(char *str) -{ - __u64 config, id; - int type; - unsigned int i; - - if (sscanf(str, "r%llx", &config) == 1) - return config | PERF_COUNTER_RAW_MASK; - - if (sscanf(str, "%d:%llu", &type, &id) == 2) - return EID(type, id); - - for (i = 0; i < ARRAY_SIZE(event_symbols); i++) { - if (!strncmp(str, event_symbols[i].symbol, - strlen(event_symbols[i].symbol))) - return event_symbols[i].event; - } - - return ~0ULL; -} - -static int parse_events(char *str) -{ - __u64 config; - -again: - if (nr_counters == MAX_COUNTERS) - return -1; - - config = match_event_symbols(str); - if (config == ~0ULL) - return -1; - - event_id[nr_counters] = config; - nr_counters++; - - str = strstr(str, ","); - if (str) { - str++; - goto again; - } - - return 0; -} - -#define __PERF_COUNTER_FIELD(config, name) \ - ((config & PERF_COUNTER_##name##_MASK) >> PERF_COUNTER_##name##_SHIFT) - -#define PERF_COUNTER_RAW(config) __PERF_COUNTER_FIELD(config, RAW) -#define PERF_COUNTER_CONFIG(config) __PERF_COUNTER_FIELD(config, CONFIG) -#define PERF_COUNTER_TYPE(config) __PERF_COUNTER_FIELD(config, TYPE) -#define PERF_COUNTER_ID(config) __PERF_COUNTER_FIELD(config, EVENT) - -static void display_events_help(void) -{ - unsigned int i; - __u64 e; - - printf( - " -e EVENT --event=EVENT # symbolic-name abbreviations"); - - for (i = 0; i < ARRAY_SIZE(event_symbols); i++) { - int type, id; - - e = event_symbols[i].event; - type = PERF_COUNTER_TYPE(e); - id = PERF_COUNTER_ID(e); - - printf("\n %d:%d: %-20s", - type, id, event_symbols[i].symbol); - } - - printf("\n" - " rNNN: raw PMU events (eventsel+umask)\n\n"); -} - -static void display_help(void) -{ - printf( - "Usage: perf-record []\n" - "perf-record Options (up to %d event types can be specified at once):\n\n", - MAX_COUNTERS); - - display_events_help(); - - printf( - " -c CNT --count=CNT # event period to sample\n" - " -m pages --mmap_pages= # number of mmap data pages\n" - " -o file --output= # output file\n" - " -r prio --realtime= # use RT prio\n" - ); - - exit(0); -} - -static void process_options(int argc, char *argv[]) -{ - int error = 0, counter; - - for (;;) { - int option_index = 0; - /** Options for getopt */ - static struct option long_options[] = { - {"count", required_argument, NULL, 'c'}, - {"event", required_argument, NULL, 'e'}, - {"mmap_pages", required_argument, NULL, 'm'}, - {"output", required_argument, NULL, 'o'}, - {"realtime", required_argument, NULL, 'r'}, - {NULL, 0, NULL, 0 } - }; - int c = getopt_long(argc, argv, "+:c:e:m:o:r:", - long_options, &option_index); - if (c == -1) - break; - - switch (c) { - case 'c': default_interval = atoi(optarg); break; - case 'e': error = parse_events(optarg); break; - case 'm': mmap_pages = atoi(optarg); break; - case 'o': output_name = strdup(optarg); break; - case 'r': realtime_prio = atoi(optarg); break; - default: error = 1; break; - } - } - if (error) - display_help(); - - if (!nr_counters) { - nr_counters = 1; - event_id[0] = 0; - } - - for (counter = 0; counter < nr_counters; counter++) { - if (event_count[counter]) - continue; - - event_count[counter] = default_interval; - } -} - -struct mmap_data { - int counter; - void *base; - unsigned int mask; - unsigned int prev; -}; - -static unsigned int mmap_read_head(struct mmap_data *md) -{ - struct perf_counter_mmap_page *pc = md->base; - int head; - - head = pc->data_head; - rmb(); - - return head; -} - -static long events; -static struct timeval last_read, this_read; - -static void mmap_read(struct mmap_data *md) -{ - unsigned int head = mmap_read_head(md); - unsigned int old = md->prev; - unsigned char *data = md->base + page_size; - unsigned long size; - void *buf; - int diff; - - gettimeofday(&this_read, NULL); - - /* - * If we're further behind than half the buffer, there's a chance - * the writer will bite our tail and screw up the events under us. - * - * If we somehow ended up ahead of the head, we got messed up. - * - * In either case, truncate and restart at head. - */ - diff = head - old; - if (diff > md->mask / 2 || diff < 0) { - struct timeval iv; - unsigned long msecs; - - timersub(&this_read, &last_read, &iv); - msecs = iv.tv_sec*1000 + iv.tv_usec/1000; - - fprintf(stderr, "WARNING: failed to keep up with mmap data." - " Last read %lu msecs ago.\n", msecs); - - /* - * head points to a known good entry, start there. - */ - old = head; - } - - last_read = this_read; - - if (old != head) - events++; - - size = head - old; - - if ((old & md->mask) + size != (head & md->mask)) { - buf = &data[old & md->mask]; - size = md->mask + 1 - (old & md->mask); - old += size; - while (size) { - int ret = write(output, buf, size); - if (ret < 0) { - perror("failed to write"); - exit(-1); - } - size -= ret; - buf += ret; - } - } - - buf = &data[old & md->mask]; - size = head - old; - old += size; - while (size) { - int ret = write(output, buf, size); - if (ret < 0) { - perror("failed to write"); - exit(-1); - } - size -= ret; - buf += ret; - } - - md->prev = old; -} - -static volatile int done = 0; - -static void sigchld_handler(int sig) -{ - if (sig == SIGCHLD) - done = 1; -} - -int main(int argc, char *argv[]) -{ - struct pollfd event_array[MAX_NR_CPUS * MAX_COUNTERS]; - struct mmap_data mmap_array[MAX_NR_CPUS][MAX_COUNTERS]; - struct perf_counter_hw_event hw_event; - int i, counter, group_fd, nr_poll = 0; - pid_t pid; - int ret; - - page_size = sysconf(_SC_PAGE_SIZE); - - process_options(argc, argv); - - nr_cpus = sysconf(_SC_NPROCESSORS_ONLN); - assert(nr_cpus <= MAX_NR_CPUS); - assert(nr_cpus >= 0); - - output = open(output_name, O_CREAT|O_RDWR, S_IRWXU); - if (output < 0) { - perror("failed to create output file"); - exit(-1); - } - - argc -= optind; - argv += optind; - - for (i = 0; i < nr_cpus; i++) { - group_fd = -1; - for (counter = 0; counter < nr_counters; counter++) { - - memset(&hw_event, 0, sizeof(hw_event)); - hw_event.config = event_id[counter]; - hw_event.irq_period = event_count[counter]; - hw_event.record_type = PERF_RECORD_IP | PERF_RECORD_TID; - hw_event.nmi = 1; - hw_event.mmap = 1; - hw_event.comm = 1; - - fd[i][counter] = sys_perf_counter_open(&hw_event, -1, i, group_fd, 0); - if (fd[i][counter] < 0) { - int err = errno; - printf("kerneltop error: syscall returned with %d (%s)\n", - fd[i][counter], strerror(err)); - if (err == EPERM) - printf("Are you root?\n"); - exit(-1); - } - assert(fd[i][counter] >= 0); - fcntl(fd[i][counter], F_SETFL, O_NONBLOCK); - - /* - * First counter acts as the group leader: - */ - if (group && group_fd == -1) - group_fd = fd[i][counter]; - - event_array[nr_poll].fd = fd[i][counter]; - event_array[nr_poll].events = POLLIN; - nr_poll++; - - mmap_array[i][counter].counter = counter; - mmap_array[i][counter].prev = 0; - mmap_array[i][counter].mask = mmap_pages*page_size - 1; - mmap_array[i][counter].base = mmap(NULL, (mmap_pages+1)*page_size, - PROT_READ, MAP_SHARED, fd[i][counter], 0); - if (mmap_array[i][counter].base == MAP_FAILED) { - printf("kerneltop error: failed to mmap with %d (%s)\n", - errno, strerror(errno)); - exit(-1); - } - } - } - - signal(SIGCHLD, sigchld_handler); - - pid = fork(); - if (pid < 0) - perror("failed to fork"); - - if (!pid) { - if (execvp(argv[0], argv)) { - perror(argv[0]); - exit(-1); - } - } - - if (realtime_prio) { - struct sched_param param; - - param.sched_priority = realtime_prio; - if (sched_setscheduler(0, SCHED_FIFO, ¶m)) { - printf("Could not set realtime priority.\n"); - exit(-1); - } - } - - /* - * TODO: store the current /proc/$/maps information somewhere - */ - - while (!done) { - int hits = events; - - for (i = 0; i < nr_cpus; i++) { - for (counter = 0; counter < nr_counters; counter++) - mmap_read(&mmap_array[i][counter]); - } - - if (hits == events) - ret = poll(event_array, nr_poll, 100); - } - - return 0; -} diff --git a/Documentation/perf_counter/perf.c b/Documentation/perf_counter/perf.c index ff8658f2a2f..e849dd66b5e 100644 --- a/Documentation/perf_counter/perf.c +++ b/Documentation/perf_counter/perf.c @@ -248,8 +248,9 @@ static void handle_internal_command(int argc, const char **argv) { const char *cmd = argv[0]; static struct cmd_struct commands[] = { - { "top", cmd_top, 0 }, + { "record", cmd_record, 0 }, { "stat", cmd_stat, 0 }, + { "top", cmd_top, 0 }, }; int i; static const char ext[] = STRIP_EXTENSION; -- cgit v1.2.3 From cc13a5913797365b683212f5fca5fb04bb3582eb Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Mon, 20 Apr 2009 16:01:30 +0200 Subject: perf_counter tools: fix --version Hook up the 'perf version' built-in command. Signed-off-by: Ingo Molnar --- Documentation/perf_counter/builtin.h | 1 + Documentation/perf_counter/perf.c | 3 ++- 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/Documentation/perf_counter/builtin.h b/Documentation/perf_counter/builtin.h index 5854b1715f5..aec5ae388e6 100644 --- a/Documentation/perf_counter/builtin.h +++ b/Documentation/perf_counter/builtin.h @@ -17,4 +17,5 @@ extern int check_pager_config(const char *cmd); extern int cmd_record(int argc, const char **argv, const char *prefix); extern int cmd_stat(int argc, const char **argv, const char *prefix); extern int cmd_top(int argc, const char **argv, const char *prefix); +extern int cmd_version(int argc, const char **argv, const char *prefix); #endif diff --git a/Documentation/perf_counter/perf.c b/Documentation/perf_counter/perf.c index e849dd66b5e..20d508c7729 100644 --- a/Documentation/perf_counter/perf.c +++ b/Documentation/perf_counter/perf.c @@ -5,7 +5,7 @@ #include "run-command.h" const char perf_usage_string[] = - "perf [--version] [--exec-path[=PERF_EXEC_PATH]] [--html-path] [-p|--paginate|--no-pager] [--bare] [--perf-dir=PERF_DIR] [--work-tree=PERF_WORK_TREE] [--help] COMMAND [ARGS]"; + "perf [--version] [--help] COMMAND [ARGS]"; const char perf_more_info_string[] = "See 'perf help COMMAND' for more information on a specific command."; @@ -251,6 +251,7 @@ static void handle_internal_command(int argc, const char **argv) { "record", cmd_record, 0 }, { "stat", cmd_stat, 0 }, { "top", cmd_top, 0 }, + { "version", cmd_version, 0 }, }; int i; static const char ext[] = STRIP_EXTENSION; -- cgit v1.2.3 From 6142fdd968c76a0f2ee753c39bd5be8d1bb4ef04 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Mon, 20 Apr 2009 16:05:55 +0200 Subject: perf_counter tools: add 'perf help' Signed-off-by: Ingo Molnar --- .../perf_counter/Documentation/perf-help.txt | 38 ++++++++++++++++++++++ Documentation/perf_counter/builtin.h | 1 + Documentation/perf_counter/perf.c | 1 + 3 files changed, 40 insertions(+) create mode 100644 Documentation/perf_counter/Documentation/perf-help.txt diff --git a/Documentation/perf_counter/Documentation/perf-help.txt b/Documentation/perf_counter/Documentation/perf-help.txt new file mode 100644 index 00000000000..f85fed5a7ed --- /dev/null +++ b/Documentation/perf_counter/Documentation/perf-help.txt @@ -0,0 +1,38 @@ +perf-help(1) +=========== + +NAME +---- +perf-help - display help information about perf + +SYNOPSIS +-------- +'perf help' [-a|--all] [COMMAND] + +DESCRIPTION +----------- + +With no options and no COMMAND given, the synopsis of the 'perf' +command and a list of the most commonly used perf commands are printed +on the standard output. + +If the option '--all' or '-a' is given, then all available commands are +printed on the standard output. + +If a perf command is named, a manual page for that command is brought +up. The 'man' program is used by default for this purpose, but this +can be overridden by other options or configuration variables. + +Note that `perf --help ...` is identical to `perf help ...` because the +former is internally converted into the latter. + +OPTIONS +------- +-a:: +--all:: + Prints all the available commands on the standard output. This + option supersedes any other option. + +PERF +---- +Part of the linkperf:perf[1] suite diff --git a/Documentation/perf_counter/builtin.h b/Documentation/perf_counter/builtin.h index aec5ae388e6..800f86c1d44 100644 --- a/Documentation/perf_counter/builtin.h +++ b/Documentation/perf_counter/builtin.h @@ -14,6 +14,7 @@ extern void prune_packed_objects(int); extern int read_line_with_nul(char *buf, int size, FILE *file); extern int check_pager_config(const char *cmd); +extern int cmd_help(int argc, const char **argv, const char *prefix); extern int cmd_record(int argc, const char **argv, const char *prefix); extern int cmd_stat(int argc, const char **argv, const char *prefix); extern int cmd_top(int argc, const char **argv, const char *prefix); diff --git a/Documentation/perf_counter/perf.c b/Documentation/perf_counter/perf.c index 20d508c7729..8d6faecdc15 100644 --- a/Documentation/perf_counter/perf.c +++ b/Documentation/perf_counter/perf.c @@ -248,6 +248,7 @@ static void handle_internal_command(int argc, const char **argv) { const char *cmd = argv[0]; static struct cmd_struct commands[] = { + { "help", cmd_help, 0 }, { "record", cmd_record, 0 }, { "stat", cmd_stat, 0 }, { "top", cmd_top, 0 }, -- cgit v1.2.3 From 125e702b09a28a502e145fb434678ee27720fc48 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Mon, 20 Apr 2009 16:13:46 +0200 Subject: perf_counter tools: fix 'make install' Remove Git leftovers from this area. Signed-off-by: Ingo Molnar --- Documentation/perf_counter/Makefile | 19 ++----------------- 1 file changed, 2 insertions(+), 17 deletions(-) diff --git a/Documentation/perf_counter/Makefile b/Documentation/perf_counter/Makefile index b6c665eb22e..690045e4969 100644 --- a/Documentation/perf_counter/Makefile +++ b/Documentation/perf_counter/Makefile @@ -255,9 +255,9 @@ EXTRA_PROGRAMS = PROGRAMS += $(EXTRA_PROGRAMS) # -# None right now: +# Single 'perf' binary right now: # -# PROGRAMS += perf-fast-import$X +PROGRAMS += perf # List built-in command $C whose implementation cmd_$C() is not in # builtin-$C.o but is linked in as part of some other command. @@ -762,24 +762,9 @@ install: all $(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(bindir_SQ)' $(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(perfexec_instdir_SQ)' $(INSTALL) $(ALL_PROGRAMS) '$(DESTDIR_SQ)$(perfexec_instdir_SQ)' - $(INSTALL) perf$X perf-upload-pack$X perf-receive-pack$X perf-upload-archive$X perf-shell$X perf-cvsserver '$(DESTDIR_SQ)$(bindir_SQ)' - $(MAKE) -C templates DESTDIR='$(DESTDIR_SQ)' install - $(MAKE) -C perl prefix='$(prefix_SQ)' DESTDIR='$(DESTDIR_SQ)' install ifneq (,$X) $(foreach p,$(patsubst %$X,%,$(filter %$X,$(ALL_PROGRAMS) $(BUILT_INS) perf$X)), $(RM) '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/$p';) endif - bindir=$$(cd '$(DESTDIR_SQ)$(bindir_SQ)' && pwd) && \ - execdir=$$(cd '$(DESTDIR_SQ)$(perfexec_instdir_SQ)' && pwd) && \ - { $(RM) "$$execdir/perf-add$X" && \ - ln "$$bindir/perf$X" "$$execdir/perf-add$X" 2>/dev/null || \ - cp "$$bindir/perf$X" "$$execdir/perf-add$X"; } && \ - { for p in $(filter-out perf-add$X,$(BUILT_INS)); do \ - $(RM) "$$execdir/$$p" && \ - ln "$$execdir/perf-add$X" "$$execdir/$$p" 2>/dev/null || \ - ln -s "perf-add$X" "$$execdir/$$p" 2>/dev/null || \ - cp "$$execdir/perf-add$X" "$$execdir/$$p" || exit; \ - done } && \ - ./check_bindir "z$$bindir" "z$$execdir" "$$bindir/perf-add$X" ### Maintainer's dist rules -- cgit v1.2.3 From f1f9b3b1795da8625e0e6096813c9d18d4a344ce Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Mon, 20 Apr 2009 20:38:21 +0200 Subject: perfcounters, sched: remove __task_delta_exec() This function was left orphan by the latest round of sw-counter cleanups. [ Impact: remove unused kernel function ] Signed-off-by: Ingo Molnar --- include/linux/kernel_stat.h | 1 - kernel/sched.c | 23 ----------------------- 2 files changed, 24 deletions(-) diff --git a/include/linux/kernel_stat.h b/include/linux/kernel_stat.h index 080d1fd461d..a77c6007dc9 100644 --- a/include/linux/kernel_stat.h +++ b/include/linux/kernel_stat.h @@ -85,7 +85,6 @@ static inline unsigned int kstat_irqs(unsigned int irq) /* * Lock/unlock the current runqueue - to extract task statistics: */ -extern unsigned long long __task_delta_exec(struct task_struct *tsk, int update); extern unsigned long long task_delta_exec(struct task_struct *); extern void account_user_time(struct task_struct *, cputime_t, cputime_t); diff --git a/kernel/sched.c b/kernel/sched.c index b66a08c2480..a69278eef42 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -4546,29 +4546,6 @@ DEFINE_PER_CPU(struct kernel_stat, kstat); EXPORT_PER_CPU_SYMBOL(kstat); -/* - * Return any ns on the sched_clock that have not yet been banked in - * @p in case that task is currently running. - */ -unsigned long long __task_delta_exec(struct task_struct *p, int update) -{ - s64 delta_exec; - struct rq *rq; - - rq = task_rq(p); - WARN_ON_ONCE(!runqueue_is_locked()); - WARN_ON_ONCE(!task_current(rq, p)); - - if (update) - update_rq_clock(rq); - - delta_exec = rq->clock - p->se.exec_start; - - WARN_ON_ONCE(delta_exec < 0); - - return delta_exec; -} - /* * Return any ns on the sched_clock that have not yet been banked in * @p in case that task is currently running. -- cgit v1.2.3 From 148be2c15d4a866fbc7a8f55342e4fd4de73be61 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Mon, 27 Apr 2009 08:02:14 +0200 Subject: perf_counter tools: move helper library to util/* Clean up the top level directory a bit by moving all the helper libraries to util/*.[ch]. Signed-off-by: Ingo Molnar --- Documentation/perf_counter/Makefile | 60 +- Documentation/perf_counter/PERF-VERSION-GEN | 42 - Documentation/perf_counter/abspath.c | 117 --- Documentation/perf_counter/alias.c | 77 -- Documentation/perf_counter/builtin-help.c | 10 +- Documentation/perf_counter/builtin-stat.c | 2 +- Documentation/perf_counter/builtin-top.c | 2 +- Documentation/perf_counter/builtin.h | 4 +- Documentation/perf_counter/cache.h | 117 --- Documentation/perf_counter/config.c | 873 --------------------- Documentation/perf_counter/ctype.c | 26 - Documentation/perf_counter/exec_cmd.c | 165 ---- Documentation/perf_counter/exec_cmd.h | 13 - Documentation/perf_counter/generate-cmdlist.sh | 24 - Documentation/perf_counter/help.c | 366 --------- Documentation/perf_counter/help.h | 29 - Documentation/perf_counter/levenshtein.c | 84 -- Documentation/perf_counter/levenshtein.h | 8 - Documentation/perf_counter/parse-options.c | 495 ------------ Documentation/perf_counter/parse-options.h | 172 ---- Documentation/perf_counter/path.c | 353 --------- Documentation/perf_counter/perf.c | 8 +- Documentation/perf_counter/quote.c | 478 ----------- Documentation/perf_counter/quote.h | 68 -- Documentation/perf_counter/run-command.c | 395 ---------- Documentation/perf_counter/run-command.h | 93 --- Documentation/perf_counter/strbuf.c | 359 --------- Documentation/perf_counter/strbuf.h | 137 ---- Documentation/perf_counter/usage.c | 80 -- Documentation/perf_counter/util.h | 408 ---------- Documentation/perf_counter/util/PERF-VERSION-GEN | 42 + Documentation/perf_counter/util/abspath.c | 117 +++ Documentation/perf_counter/util/alias.c | 77 ++ Documentation/perf_counter/util/cache.h | 117 +++ Documentation/perf_counter/util/config.c | 873 +++++++++++++++++++++ Documentation/perf_counter/util/ctype.c | 26 + Documentation/perf_counter/util/exec_cmd.c | 165 ++++ Documentation/perf_counter/util/exec_cmd.h | 13 + .../perf_counter/util/generate-cmdlist.sh | 24 + Documentation/perf_counter/util/help.c | 366 +++++++++ Documentation/perf_counter/util/help.h | 29 + Documentation/perf_counter/util/levenshtein.c | 84 ++ Documentation/perf_counter/util/levenshtein.h | 8 + Documentation/perf_counter/util/parse-options.c | 492 ++++++++++++ Documentation/perf_counter/util/parse-options.h | 172 ++++ Documentation/perf_counter/util/path.c | 353 +++++++++ Documentation/perf_counter/util/quote.c | 478 +++++++++++ Documentation/perf_counter/util/quote.h | 68 ++ Documentation/perf_counter/util/run-command.c | 395 ++++++++++ Documentation/perf_counter/util/run-command.h | 93 +++ Documentation/perf_counter/util/strbuf.c | 359 +++++++++ Documentation/perf_counter/util/strbuf.h | 137 ++++ Documentation/perf_counter/util/usage.c | 80 ++ Documentation/perf_counter/util/util.h | 408 ++++++++++ Documentation/perf_counter/util/wrapper.c | 206 +++++ Documentation/perf_counter/wrapper.c | 206 ----- 56 files changed, 5226 insertions(+), 5227 deletions(-) delete mode 100755 Documentation/perf_counter/PERF-VERSION-GEN delete mode 100644 Documentation/perf_counter/abspath.c delete mode 100644 Documentation/perf_counter/alias.c delete mode 100644 Documentation/perf_counter/cache.h delete mode 100644 Documentation/perf_counter/config.c delete mode 100644 Documentation/perf_counter/ctype.c delete mode 100644 Documentation/perf_counter/exec_cmd.c delete mode 100644 Documentation/perf_counter/exec_cmd.h delete mode 100755 Documentation/perf_counter/generate-cmdlist.sh delete mode 100644 Documentation/perf_counter/help.c delete mode 100644 Documentation/perf_counter/help.h delete mode 100644 Documentation/perf_counter/levenshtein.c delete mode 100644 Documentation/perf_counter/levenshtein.h delete mode 100644 Documentation/perf_counter/parse-options.c delete mode 100644 Documentation/perf_counter/parse-options.h delete mode 100644 Documentation/perf_counter/path.c delete mode 100644 Documentation/perf_counter/quote.c delete mode 100644 Documentation/perf_counter/quote.h delete mode 100644 Documentation/perf_counter/run-command.c delete mode 100644 Documentation/perf_counter/run-command.h delete mode 100644 Documentation/perf_counter/strbuf.c delete mode 100644 Documentation/perf_counter/strbuf.h delete mode 100644 Documentation/perf_counter/usage.c delete mode 100644 Documentation/perf_counter/util.h create mode 100755 Documentation/perf_counter/util/PERF-VERSION-GEN create mode 100644 Documentation/perf_counter/util/abspath.c create mode 100644 Documentation/perf_counter/util/alias.c create mode 100644 Documentation/perf_counter/util/cache.h create mode 100644 Documentation/perf_counter/util/config.c create mode 100644 Documentation/perf_counter/util/ctype.c create mode 100644 Documentation/perf_counter/util/exec_cmd.c create mode 100644 Documentation/perf_counter/util/exec_cmd.h create mode 100755 Documentation/perf_counter/util/generate-cmdlist.sh create mode 100644 Documentation/perf_counter/util/help.c create mode 100644 Documentation/perf_counter/util/help.h create mode 100644 Documentation/perf_counter/util/levenshtein.c create mode 100644 Documentation/perf_counter/util/levenshtein.h create mode 100644 Documentation/perf_counter/util/parse-options.c create mode 100644 Documentation/perf_counter/util/parse-options.h create mode 100644 Documentation/perf_counter/util/path.c create mode 100644 Documentation/perf_counter/util/quote.c create mode 100644 Documentation/perf_counter/util/quote.h create mode 100644 Documentation/perf_counter/util/run-command.c create mode 100644 Documentation/perf_counter/util/run-command.h create mode 100644 Documentation/perf_counter/util/strbuf.c create mode 100644 Documentation/perf_counter/util/strbuf.h create mode 100644 Documentation/perf_counter/util/usage.c create mode 100644 Documentation/perf_counter/util/util.h create mode 100644 Documentation/perf_counter/util/wrapper.c delete mode 100644 Documentation/perf_counter/wrapper.c diff --git a/Documentation/perf_counter/Makefile b/Documentation/perf_counter/Makefile index 690045e4969..543ccf28ac4 100644 --- a/Documentation/perf_counter/Makefile +++ b/Documentation/perf_counter/Makefile @@ -147,7 +147,7 @@ all:: # broken, or spawning external process is slower than built-in grep perf has). PERF-VERSION-FILE: .FORCE-PERF-VERSION-FILE - @$(SHELL_PATH) ./PERF-VERSION-GEN + @$(SHELL_PATH) util/PERF-VERSION-GEN -include PERF-VERSION-FILE uname_S := $(shell sh -c 'uname -s 2>/dev/null || echo not') @@ -287,26 +287,28 @@ export PERL_PATH LIB_FILE=libperf.a LIB_H += ../../include/linux/perf_counter.h -LIB_H += levenshtein.h -LIB_H += parse-options.h -LIB_H += quote.h -LIB_H += strbuf.h -LIB_H += run-command.h - -LIB_OBJS += abspath.o -LIB_OBJS += alias.o -LIB_OBJS += config.o -LIB_OBJS += ctype.o -LIB_OBJS += exec_cmd.o -LIB_OBJS += help.o -LIB_OBJS += levenshtein.o -LIB_OBJS += parse-options.o -LIB_OBJS += path.o -LIB_OBJS += run-command.o -LIB_OBJS += quote.o -LIB_OBJS += strbuf.o -LIB_OBJS += usage.o -LIB_OBJS += wrapper.o +LIB_H += util/levenshtein.h +LIB_H += util/parse-options.h +LIB_H += util/quote.h +LIB_H += util/util.h +LIB_H += util/help.h +LIB_H += util/strbuf.h +LIB_H += util/run-command.h + +LIB_OBJS += util/abspath.o +LIB_OBJS += util/alias.o +LIB_OBJS += util/config.o +LIB_OBJS += util/ctype.o +LIB_OBJS += util/exec_cmd.o +LIB_OBJS += util/help.o +LIB_OBJS += util/levenshtein.o +LIB_OBJS += util/parse-options.o +LIB_OBJS += util/path.o +LIB_OBJS += util/run-command.o +LIB_OBJS += util/quote.o +LIB_OBJS += util/strbuf.o +LIB_OBJS += util/usage.o +LIB_OBJS += util/wrapper.o BUILTIN_OBJS += builtin-help.o BUILTIN_OBJS += builtin-record.o @@ -620,10 +622,10 @@ $(BUILT_INS): perf$X ln -s perf$X $@ 2>/dev/null || \ cp perf$X $@ -common-cmds.h: ./generate-cmdlist.sh command-list.txt +common-cmds.h: util/generate-cmdlist.sh command-list.txt common-cmds.h: $(wildcard Documentation/perf-*.txt) - $(QUIET_GEN)./generate-cmdlist.sh > $@+ && mv $@+ $@ + $(QUIET_GEN)util/generate-cmdlist.sh > $@+ && mv $@+ $@ $(patsubst %.sh,%,$(SCRIPT_SH)) : % : %.sh $(QUIET_GEN)$(RM) $@ $@+ && \ @@ -656,7 +658,7 @@ perf.o perf.spec \ %.o: %.S $(QUIET_CC)$(CC) -o $*.o -c $(ALL_CFLAGS) $< -exec_cmd.o: exec_cmd.c PERF-CFLAGS +util/exec_cmd.o: util/exec_cmd.c PERF-CFLAGS $(QUIET_CC)$(CC) -o $*.o -c $(ALL_CFLAGS) \ '-DPERF_EXEC_PATH="$(perfexecdir_SQ)"' \ '-DBINDIR="$(bindir_relative_SQ)"' \ @@ -666,7 +668,7 @@ exec_cmd.o: exec_cmd.c PERF-CFLAGS builtin-init-db.o: builtin-init-db.c PERF-CFLAGS $(QUIET_CC)$(CC) -o $*.o -c $(ALL_CFLAGS) -DDEFAULT_PERF_TEMPLATE_DIR='"$(template_dir_SQ)"' $< -config.o: config.c PERF-CFLAGS +util/config.o: util/config.c PERF-CFLAGS $(QUIET_CC)$(CC) -o $*.o -c $(ALL_CFLAGS) -DETC_PERFCONFIG='"$(ETC_PERFCONFIG_SQ)"' $< perf-%$X: %.o $(PERFLIBS) @@ -729,7 +731,7 @@ export NO_SVN_TESTS check: common-cmds.h if sparse; \ then \ - for i in *.c; \ + for i in *.c */*.c; \ do \ sparse $(ALL_CFLAGS) $(SPARSE_FLAGS) $$i || exit; \ done; \ @@ -795,7 +797,7 @@ distclean: clean $(RM) configure clean: - $(RM) *.o $(LIB_FILE) + $(RM) *.o */*.o $(LIB_FILE) $(RM) $(ALL_PROGRAMS) $(BUILT_INS) perf$X $(RM) $(TEST_PROGRAMS) $(RM) *.spec *.pyc *.pyo */*.pyc */*.pyo common-cmds.h TAGS tags cscope* @@ -836,7 +838,7 @@ coverage-build: coverage-clean -j1 test coverage-report: - gcov -b *.c - grep '^function.*called 0 ' *.c.gcov \ + gcov -b *.c */*.c + grep '^function.*called 0 ' *.c.gcov */*.c.gcov \ | sed -e 's/\([^:]*\)\.gcov: *function \([^ ]*\) called.*/\1: \2/' \ | tee coverage-untested-functions diff --git a/Documentation/perf_counter/PERF-VERSION-GEN b/Documentation/perf_counter/PERF-VERSION-GEN deleted file mode 100755 index c561d1538c0..00000000000 --- a/Documentation/perf_counter/PERF-VERSION-GEN +++ /dev/null @@ -1,42 +0,0 @@ -#!/bin/sh - -GVF=PERF-VERSION-FILE -DEF_VER=v0.0.1.PERF - -LF=' -' - -# First see if there is a version file (included in release tarballs), -# then try git-describe, then default. -if test -f version -then - VN=$(cat version) || VN="$DEF_VER" -elif test -d .git -o -f .git && - VN=$(git describe --abbrev=4 HEAD 2>/dev/null) && - case "$VN" in - *$LF*) (exit 1) ;; - v[0-9]*) - git update-index -q --refresh - test -z "$(git diff-index --name-only HEAD --)" || - VN="$VN-dirty" ;; - esac -then - VN=$(echo "$VN" | sed -e 's/-/./g'); -else - VN="$DEF_VER" -fi - -VN=$(expr "$VN" : v*'\(.*\)') - -if test -r $GVF -then - VC=$(sed -e 's/^PERF_VERSION = //' <$GVF) -else - VC=unset -fi -test "$VN" = "$VC" || { - echo >&2 "PERF_VERSION = $VN" - echo "PERF_VERSION = $VN" >$GVF -} - - diff --git a/Documentation/perf_counter/abspath.c b/Documentation/perf_counter/abspath.c deleted file mode 100644 index 649f34f8336..00000000000 --- a/Documentation/perf_counter/abspath.c +++ /dev/null @@ -1,117 +0,0 @@ -#include "cache.h" - -/* - * Do not use this for inspecting *tracked* content. When path is a - * symlink to a directory, we do not want to say it is a directory when - * dealing with tracked content in the working tree. - */ -int is_directory(const char *path) -{ - struct stat st; - return (!stat(path, &st) && S_ISDIR(st.st_mode)); -} - -/* We allow "recursive" symbolic links. Only within reason, though. */ -#define MAXDEPTH 5 - -const char *make_absolute_path(const char *path) -{ - static char bufs[2][PATH_MAX + 1], *buf = bufs[0], *next_buf = bufs[1]; - char cwd[1024] = ""; - int buf_index = 1, len; - - int depth = MAXDEPTH; - char *last_elem = NULL; - struct stat st; - - if (strlcpy(buf, path, PATH_MAX) >= PATH_MAX) - die ("Too long path: %.*s", 60, path); - - while (depth--) { - if (!is_directory(buf)) { - char *last_slash = strrchr(buf, '/'); - if (last_slash) { - *last_slash = '\0'; - last_elem = xstrdup(last_slash + 1); - } else { - last_elem = xstrdup(buf); - *buf = '\0'; - } - } - - if (*buf) { - if (!*cwd && !getcwd(cwd, sizeof(cwd))) - die ("Could not get current working directory"); - - if (chdir(buf)) - die ("Could not switch to '%s'", buf); - } - if (!getcwd(buf, PATH_MAX)) - die ("Could not get current working directory"); - - if (last_elem) { - int len = strlen(buf); - if (len + strlen(last_elem) + 2 > PATH_MAX) - die ("Too long path name: '%s/%s'", - buf, last_elem); - buf[len] = '/'; - strcpy(buf + len + 1, last_elem); - free(last_elem); - last_elem = NULL; - } - - if (!lstat(buf, &st) && S_ISLNK(st.st_mode)) { - len = readlink(buf, next_buf, PATH_MAX); - if (len < 0) - die ("Invalid symlink: %s", buf); - if (PATH_MAX <= len) - die("symbolic link too long: %s", buf); - next_buf[len] = '\0'; - buf = next_buf; - buf_index = 1 - buf_index; - next_buf = bufs[buf_index]; - } else - break; - } - - if (*cwd && chdir(cwd)) - die ("Could not change back to '%s'", cwd); - - return buf; -} - -static const char *get_pwd_cwd(void) -{ - static char cwd[PATH_MAX + 1]; - char *pwd; - struct stat cwd_stat, pwd_stat; - if (getcwd(cwd, PATH_MAX) == NULL) - return NULL; - pwd = getenv("PWD"); - if (pwd && strcmp(pwd, cwd)) { - stat(cwd, &cwd_stat); - if (!stat(pwd, &pwd_stat) && - pwd_stat.st_dev == cwd_stat.st_dev && - pwd_stat.st_ino == cwd_stat.st_ino) { - strlcpy(cwd, pwd, PATH_MAX); - } - } - return cwd; -} - -const char *make_nonrelative_path(const char *path) -{ - static char buf[PATH_MAX + 1]; - - if (is_absolute_path(path)) { - if (strlcpy(buf, path, PATH_MAX) >= PATH_MAX) - die("Too long path: %.*s", 60, path); - } else { - const char *cwd = get_pwd_cwd(); - if (!cwd) - die("Cannot determine the current working directory"); - if (snprintf(buf, PATH_MAX, "%s/%s", cwd, path) >= PATH_MAX) - die("Too long path: %.*s", 60, path); - } - return buf; -} diff --git a/Documentation/perf_counter/alias.c b/Documentation/perf_counter/alias.c deleted file mode 100644 index 9b3dd2b428d..00000000000 --- a/Documentation/perf_counter/alias.c +++ /dev/null @@ -1,77 +0,0 @@ -#include "cache.h" - -static const char *alias_key; -static char *alias_val; - -static int alias_lookup_cb(const char *k, const char *v, void *cb) -{ - if (!prefixcmp(k, "alias.") && !strcmp(k+6, alias_key)) { - if (!v) - return config_error_nonbool(k); - alias_val = strdup(v); - return 0; - } - return 0; -} - -char *alias_lookup(const char *alias) -{ - alias_key = alias; - alias_val = NULL; - perf_config(alias_lookup_cb, NULL); - return alias_val; -} - -int split_cmdline(char *cmdline, const char ***argv) -{ - int src, dst, count = 0, size = 16; - char quoted = 0; - - *argv = malloc(sizeof(char*) * size); - - /* split alias_string */ - (*argv)[count++] = cmdline; - for (src = dst = 0; cmdline[src];) { - char c = cmdline[src]; - if (!quoted && isspace(c)) { - cmdline[dst++] = 0; - while (cmdline[++src] - && isspace(cmdline[src])) - ; /* skip */ - if (count >= size) { - size += 16; - *argv = realloc(*argv, sizeof(char*) * size); - } - (*argv)[count++] = cmdline + dst; - } else if (!quoted && (c == '\'' || c == '"')) { - quoted = c; - src++; - } else if (c == quoted) { - quoted = 0; - src++; - } else { - if (c == '\\' && quoted != '\'') { - src++; - c = cmdline[src]; - if (!c) { - free(*argv); - *argv = NULL; - return error("cmdline ends with \\"); - } - } - cmdline[dst++] = c; - src++; - } - } - - cmdline[dst] = 0; - - if (quoted) { - free(*argv); - *argv = NULL; - return error("unclosed quote"); - } - - return count; -} - diff --git a/Documentation/perf_counter/builtin-help.c b/Documentation/perf_counter/builtin-help.c index a136d619db3..6616de0ef05 100644 --- a/Documentation/perf_counter/builtin-help.c +++ b/Documentation/perf_counter/builtin-help.c @@ -3,13 +3,13 @@ * * Builtin help command */ -#include "cache.h" +#include "util/cache.h" #include "builtin.h" -#include "exec_cmd.h" +#include "util/exec_cmd.h" #include "common-cmds.h" -#include "parse-options.h" -#include "run-command.h" -#include "help.h" +#include "util/parse-options.h" +#include "util/run-command.h" +#include "util/help.h" static struct man_viewer_list { struct man_viewer_list *next; diff --git a/Documentation/perf_counter/builtin-stat.c b/Documentation/perf_counter/builtin-stat.c index 169a2d1783f..d7ace631fc4 100644 --- a/Documentation/perf_counter/builtin-stat.c +++ b/Documentation/perf_counter/builtin-stat.c @@ -61,7 +61,7 @@ * Released under the GPL v2. (and only v2, not any later version) */ -#include "util.h" +#include "util/util.h" #include #include diff --git a/Documentation/perf_counter/builtin-top.c b/Documentation/perf_counter/builtin-top.c index 98e8690b6bc..dea016fa41e 100644 --- a/Documentation/perf_counter/builtin-top.c +++ b/Documentation/perf_counter/builtin-top.c @@ -42,7 +42,7 @@ * Released under the GPL v2. (and only v2, not any later version) */ -#include "util.h" +#include "util/util.h" #include #include diff --git a/Documentation/perf_counter/builtin.h b/Documentation/perf_counter/builtin.h index 800f86c1d44..d32318aed8c 100644 --- a/Documentation/perf_counter/builtin.h +++ b/Documentation/perf_counter/builtin.h @@ -1,8 +1,8 @@ #ifndef BUILTIN_H #define BUILTIN_H -#include "util.h" -#include "strbuf.h" +#include "util/util.h" +#include "util/strbuf.h" extern const char perf_version_string[]; extern const char perf_usage_string[]; diff --git a/Documentation/perf_counter/cache.h b/Documentation/perf_counter/cache.h deleted file mode 100644 index 71080512fa8..00000000000 --- a/Documentation/perf_counter/cache.h +++ /dev/null @@ -1,117 +0,0 @@ -#ifndef CACHE_H -#define CACHE_H - -#include "util.h" -#include "strbuf.h" - -#define PERF_DIR_ENVIRONMENT "PERF_DIR" -#define PERF_WORK_TREE_ENVIRONMENT "PERF_WORK_TREE" -#define DEFAULT_PERF_DIR_ENVIRONMENT ".perf" -#define DB_ENVIRONMENT "PERF_OBJECT_DIRECTORY" -#define INDEX_ENVIRONMENT "PERF_INDEX_FILE" -#define GRAFT_ENVIRONMENT "PERF_GRAFT_FILE" -#define TEMPLATE_DIR_ENVIRONMENT "PERF_TEMPLATE_DIR" -#define CONFIG_ENVIRONMENT "PERF_CONFIG" -#define EXEC_PATH_ENVIRONMENT "PERF_EXEC_PATH" -#define CEILING_DIRECTORIES_ENVIRONMENT "PERF_CEILING_DIRECTORIES" -#define PERFATTRIBUTES_FILE ".perfattributes" -#define INFOATTRIBUTES_FILE "info/attributes" -#define ATTRIBUTE_MACRO_PREFIX "[attr]" - -typedef int (*config_fn_t)(const char *, const char *, void *); -extern int perf_default_config(const char *, const char *, void *); -extern int perf_config_from_file(config_fn_t fn, const char *, void *); -extern int perf_config(config_fn_t fn, void *); -extern int perf_parse_ulong(const char *, unsigned long *); -extern int perf_config_int(const char *, const char *); -extern unsigned long perf_config_ulong(const char *, const char *); -extern int perf_config_bool_or_int(const char *, const char *, int *); -extern int perf_config_bool(const char *, const char *); -extern int perf_config_string(const char **, const char *, const char *); -extern int perf_config_set(const char *, const char *); -extern int perf_config_set_multivar(const char *, const char *, const char *, int); -extern int perf_config_rename_section(const char *, const char *); -extern const char *perf_etc_perfconfig(void); -extern int check_repository_format_version(const char *var, const char *value, void *cb); -extern int perf_config_system(void); -extern int perf_config_global(void); -extern int config_error_nonbool(const char *); -extern const char *config_exclusive_filename; - -#define MAX_PERFNAME (1000) -extern char perf_default_email[MAX_PERFNAME]; -extern char perf_default_name[MAX_PERFNAME]; -extern int user_ident_explicitly_given; - -extern const char *perf_log_output_encoding; -extern const char *perf_mailmap_file; - -/* IO helper functions */ -extern void maybe_flush_or_die(FILE *, const char *); -extern int copy_fd(int ifd, int ofd); -extern int copy_file(const char *dst, const char *src, int mode); -extern ssize_t read_in_full(int fd, void *buf, size_t count); -extern ssize_t write_in_full(int fd, const void *buf, size_t count); -extern void write_or_die(int fd, const void *buf, size_t count); -extern int write_or_whine(int fd, const void *buf, size_t count, const char *msg); -extern int write_or_whine_pipe(int fd, const void *buf, size_t count, const char *msg); -extern void fsync_or_die(int fd, const char *); - -/* pager.c */ -extern void setup_pager(void); -extern const char *pager_program; -extern int pager_in_use(void); -extern int pager_use_color; - -extern const char *editor_program; -extern const char *excludes_file; - -char *alias_lookup(const char *alias); -int split_cmdline(char *cmdline, const char ***argv); - -#define alloc_nr(x) (((x)+16)*3/2) - -/* - * Realloc the buffer pointed at by variable 'x' so that it can hold - * at least 'nr' entries; the number of entries currently allocated - * is 'alloc', using the standard growing factor alloc_nr() macro. - * - * DO NOT USE any expression with side-effect for 'x' or 'alloc'. - */ -#define ALLOC_GROW(x, nr, alloc) \ - do { \ - if ((nr) > alloc) { \ - if (alloc_nr(alloc) < (nr)) \ - alloc = (nr); \ - else \ - alloc = alloc_nr(alloc); \ - x = xrealloc((x), alloc * sizeof(*(x))); \ - } \ - } while(0) - - -static inline int is_absolute_path(const char *path) -{ - return path[0] == '/'; -} - -const char *make_absolute_path(const char *path); -const char *make_nonrelative_path(const char *path); -const char *make_relative_path(const char *abs, const char *base); -int normalize_path_copy(char *dst, const char *src); -int longest_ancestor_length(const char *path, const char *prefix_list); -char *strip_path_suffix(const char *path, const char *suffix); - -extern char *mkpath(const char *fmt, ...) __attribute__((format (printf, 1, 2))); -extern char *perf_path(const char *fmt, ...) __attribute__((format (printf, 1, 2))); - -extern char *mksnpath(char *buf, size_t n, const char *fmt, ...) - __attribute__((format (printf, 3, 4))); -extern char *perf_snpath(char *buf, size_t n, const char *fmt, ...) - __attribute__((format (printf, 3, 4))); -extern char *perf_pathdup(const char *fmt, ...) - __attribute__((format (printf, 1, 2))); - -extern size_t strlcpy(char *dest, const char *src, size_t size); - -#endif /* CACHE_H */ diff --git a/Documentation/perf_counter/config.c b/Documentation/perf_counter/config.c deleted file mode 100644 index 3dd13faa6a2..00000000000 --- a/Documentation/perf_counter/config.c +++ /dev/null @@ -1,873 +0,0 @@ -/* - * GIT - The information manager from hell - * - * Copyright (C) Linus Torvalds, 2005 - * Copyright (C) Johannes Schindelin, 2005 - * - */ -#include "util.h" -#include "cache.h" -#include "exec_cmd.h" - -#define MAXNAME (256) - -static FILE *config_file; -static const char *config_file_name; -static int config_linenr; -static int config_file_eof; - -const char *config_exclusive_filename = NULL; - -static int get_next_char(void) -{ - int c; - FILE *f; - - c = '\n'; - if ((f = config_file) != NULL) { - c = fgetc(f); - if (c == '\r') { - /* DOS like systems */ - c = fgetc(f); - if (c != '\n') { - ungetc(c, f); - c = '\r'; - } - } - if (c == '\n') - config_linenr++; - if (c == EOF) { - config_file_eof = 1; - c = '\n'; - } - } - return c; -} - -static char *parse_value(void) -{ - static char value[1024]; - int quote = 0, comment = 0, len = 0, space = 0; - - for (;;) { - int c = get_next_char(); - if (len >= sizeof(value) - 1) - return NULL; - if (c == '\n') { - if (quote) - return NULL; - value[len] = 0; - return value; - } - if (comment) - continue; - if (isspace(c) && !quote) { - space = 1; - continue; - } - if (!quote) { - if (c == ';' || c == '#') { - comment = 1; - continue; - } - } - if (space) { - if (len) - value[len++] = ' '; - space = 0; - } - if (c == '\\') { - c = get_next_char(); - switch (c) { - case '\n': - continue; - case 't': - c = '\t'; - break; - case 'b': - c = '\b'; - break; - case 'n': - c = '\n'; - break; - /* Some characters escape as themselves */ - case '\\': case '"': - break; - /* Reject unknown escape sequences */ - default: - return NULL; - } - value[len++] = c; - continue; - } - if (c == '"') { - quote = 1-quote; - continue; - } - value[len++] = c; - } -} - -static inline int iskeychar(int c) -{ - return isalnum(c) || c == '-'; -} - -static int get_value(config_fn_t fn, void *data, char *name, unsigned int len) -{ - int c; - char *value; - - /* Get the full name */ - for (;;) { - c = get_next_char(); - if (config_file_eof) - break; - if (!iskeychar(c)) - break; - name[len++] = tolower(c); - if (len >= MAXNAME) - return -1; - } - name[len] = 0; - while (c == ' ' || c == '\t') - c = get_next_char(); - - value = NULL; - if (c != '\n') { - if (c != '=') - return -1; - value = parse_value(); - if (!value) - return -1; - } - return fn(name, value, data); -} - -static int get_extended_base_var(char *name, int baselen, int c) -{ - do { - if (c == '\n') - return -1; - c = get_next_char(); - } while (isspace(c)); - - /* We require the format to be '[base "extension"]' */ - if (c != '"') - return -1; - name[baselen++] = '.'; - - for (;;) { - int c = get_next_char(); - if (c == '\n') - return -1; - if (c == '"') - break; - if (c == '\\') { - c = get_next_char(); - if (c == '\n') - return -1; - } - name[baselen++] = c; - if (baselen > MAXNAME / 2) - return -1; - } - - /* Final ']' */ - if (get_next_char() != ']') - return -1; - return baselen; -} - -static int get_base_var(char *name) -{ - int baselen = 0; - - for (;;) { - int c = get_next_char(); - if (config_file_eof) - return -1; - if (c == ']') - return baselen; - if (isspace(c)) - return get_extended_base_var(name, baselen, c); - if (!iskeychar(c) && c != '.') - return -1; - if (baselen > MAXNAME / 2) - return -1; - name[baselen++] = tolower(c); - } -} - -static int perf_parse_file(config_fn_t fn, void *data) -{ - int comment = 0; - int baselen = 0; - static char var[MAXNAME]; - - /* U+FEFF Byte Order Mark in UTF8 */ - static const unsigned char *utf8_bom = (unsigned char *) "\xef\xbb\xbf"; - const unsigned char *bomptr = utf8_bom; - - for (;;) { - int c = get_next_char(); - if (bomptr && *bomptr) { - /* We are at the file beginning; skip UTF8-encoded BOM - * if present. Sane editors won't put this in on their - * own, but e.g. Windows Notepad will do it happily. */ - if ((unsigned char) c == *bomptr) { - bomptr++; - continue; - } else { - /* Do not tolerate partial BOM. */ - if (bomptr != utf8_bom) - break; - /* No BOM at file beginning. Cool. */ - bomptr = NULL; - } - } - if (c == '\n') { - if (config_file_eof) - return 0; - comment = 0; - continue; - } - if (comment || isspace(c)) - continue; - if (c == '#' || c == ';') { - comment = 1; - continue; - } - if (c == '[') { - baselen = get_base_var(var); - if (baselen <= 0) - break; - var[baselen++] = '.'; - var[baselen] = 0; - continue; - } - if (!isalpha(c)) - break; - var[baselen] = tolower(c); - if (get_value(fn, data, var, baselen+1) < 0) - break; - } - die("bad config file line %d in %s", config_linenr, config_file_name); -} - -static int parse_unit_factor(const char *end, unsigned long *val) -{ - if (!*end) - return 1; - else if (!strcasecmp(end, "k")) { - *val *= 1024; - return 1; - } - else if (!strcasecmp(end, "m")) { - *val *= 1024 * 1024; - return 1; - } - else if (!strcasecmp(end, "g")) { - *val *= 1024 * 1024 * 1024; - return 1; - } - return 0; -} - -static int perf_parse_long(const char *value, long *ret) -{ - if (value && *value) { - char *end; - long val = strtol(value, &end, 0); - unsigned long factor = 1; - if (!parse_unit_factor(end, &factor)) - return 0; - *ret = val * factor; - return 1; - } - return 0; -} - -int perf_parse_ulong(const char *value, unsigned long *ret) -{ - if (value && *value) { - char *end; - unsigned long val = strtoul(value, &end, 0); - if (!parse_unit_factor(end, &val)) - return 0; - *ret = val; - return 1; - } - return 0; -} - -static void die_bad_config(const char *name) -{ - if (config_file_name) - die("bad config value for '%s' in %s", name, config_file_name); - die("bad config value for '%s'", name); -} - -int perf_config_int(const char *name, const char *value) -{ - long ret = 0; - if (!perf_parse_long(value, &ret)) - die_bad_config(name); - return ret; -} - -unsigned long perf_config_ulong(const char *name, const char *value) -{ - unsigned long ret; - if (!perf_parse_ulong(value, &ret)) - die_bad_config(name); - return ret; -} - -int perf_config_bool_or_int(const char *name, const char *value, int *is_bool) -{ - *is_bool = 1; - if (!value) - return 1; - if (!*value) - return 0; - if (!strcasecmp(value, "true") || !strcasecmp(value, "yes") || !strcasecmp(value, "on")) - return 1; - if (!strcasecmp(value, "false") || !strcasecmp(value, "no") || !strcasecmp(value, "off")) - return 0; - *is_bool = 0; - return perf_config_int(name, value); -} - -int perf_config_bool(const char *name, const char *value) -{ - int discard; - return !!perf_config_bool_or_int(name, value, &discard); -} - -int perf_config_string(const char **dest, const char *var, const char *value) -{ - if (!value) - return config_error_nonbool(var); - *dest = strdup(value); - return 0; -} - -static int perf_default_core_config(const char *var, const char *value) -{ - /* Add other config variables here and to Documentation/config.txt. */ - return 0; -} - -int perf_default_config(const char *var, const char *value, void *dummy) -{ - if (!prefixcmp(var, "core.")) - return perf_default_core_config(var, value); - - /* Add other config variables here and to Documentation/config.txt. */ - return 0; -} - -int perf_config_from_file(config_fn_t fn, const char *filename, void *data) -{ - int ret; - FILE *f = fopen(filename, "r"); - - ret = -1; - if (f) { - config_file = f; - config_file_name = filename; - config_linenr = 1; - config_file_eof = 0; - ret = perf_parse_file(fn, data); - fclose(f); - config_file_name = NULL; - } - return ret; -} - -const char *perf_etc_perfconfig(void) -{ - static const char *system_wide; - if (!system_wide) - system_wide = system_path(ETC_PERFCONFIG); - return system_wide; -} - -static int perf_env_bool(const char *k, int def) -{ - const char *v = getenv(k); - return v ? perf_config_bool(k, v) : def; -} - -int perf_config_system(void) -{ - return !perf_env_bool("PERF_CONFIG_NOSYSTEM", 0); -} - -int perf_config_global(void) -{ - return !perf_env_bool("PERF_CONFIG_NOGLOBAL", 0); -} - -int perf_config(config_fn_t fn, void *data) -{ - int ret = 0, found = 0; - char *repo_config = NULL; - const char *home = NULL; - - /* Setting $PERF_CONFIG makes perf read _only_ the given config file. */ - if (config_exclusive_filename) - return perf_config_from_file(fn, config_exclusive_filename, data); - if (perf_config_system() && !access(perf_etc_perfconfig(), R_OK)) { - ret += perf_config_from_file(fn, perf_etc_perfconfig(), - data); - found += 1; - } - - home = getenv("HOME"); - if (perf_config_global() && home) { - char *user_config = strdup(mkpath("%s/.perfconfig", home)); - if (!access(user_config, R_OK)) { - ret += perf_config_from_file(fn, user_config, data); - found += 1; - } - free(user_config); - } - - repo_config = perf_pathdup("config"); - if (!access(repo_config, R_OK)) { - ret += perf_config_from_file(fn, repo_config, data); - found += 1; - } - free(repo_config); - if (found == 0) - return -1; - return ret; -} - -/* - * Find all the stuff for perf_config_set() below. - */ - -#define MAX_MATCHES 512 - -static struct { - int baselen; - char* key; - int do_not_match; - regex_t* value_regex; - int multi_replace; - size_t offset[MAX_MATCHES]; - enum { START, SECTION_SEEN, SECTION_END_SEEN, KEY_SEEN } state; - int seen; -} store; - -static int matches(const char* key, const char* value) -{ - return !strcmp(key, store.key) && - (store.value_regex == NULL || - (store.do_not_match ^ - !regexec(store.value_regex, value, 0, NULL, 0))); -} - -static int store_aux(const char* key, const char* value, void *cb) -{ - const char *ep; - size_t section_len; - - switch (store.state) { - case KEY_SEEN: - if (matches(key, value)) { - if (store.seen == 1 && store.multi_replace == 0) { - warning("%s has multiple values", key); - } else if (store.seen >= MAX_MATCHES) { - error("too many matches for %s", key); - return 1; - } - - store.offset[store.seen] = ftell(config_file); - store.seen++; - } - break; - case SECTION_SEEN: - /* - * What we are looking for is in store.key (both - * section and var), and its section part is baselen - * long. We found key (again, both section and var). - * We would want to know if this key is in the same - * section as what we are looking for. We already - * know we are in the same section as what should - * hold store.key. - */ - ep = strrchr(key, '.'); - section_len = ep - key; - - if ((section_len != store.baselen) || - memcmp(key, store.key, section_len+1)) { - store.state = SECTION_END_SEEN; - break; - } - - /* - * Do not increment matches: this is no match, but we - * just made sure we are in the desired section. - */ - store.offset[store.seen] = ftell(config_file); - /* fallthru */ - case SECTION_END_SEEN: - case START: - if (matches(key, value)) { - store.offset[store.seen] = ftell(config_file); - store.state = KEY_SEEN; - store.seen++; - } else { - if (strrchr(key, '.') - key == store.baselen && - !strncmp(key, store.key, store.baselen)) { - store.state = SECTION_SEEN; - store.offset[store.seen] = ftell(config_file); - } - } - } - return 0; -} - -static int store_write_section(int fd, const char* key) -{ - const char *dot; - int i, success; - struct strbuf sb = STRBUF_INIT; - - dot = memchr(key, '.', store.baselen); - if (dot) { - strbuf_addf(&sb, "[%.*s \"", (int)(dot - key), key); - for (i = dot - key + 1; i < store.baselen; i++) { - if (key[i] == '"' || key[i] == '\\') - strbuf_addch(&sb, '\\'); - strbuf_addch(&sb, key[i]); - } - strbuf_addstr(&sb, "\"]\n"); - } else { - strbuf_addf(&sb, "[%.*s]\n", store.baselen, key); - } - - success = write_in_full(fd, sb.buf, sb.len) == sb.len; - strbuf_release(&sb); - - return success; -} - -static int store_write_pair(int fd, const char* key, const char* value) -{ - int i, success; - int length = strlen(key + store.baselen + 1); - const char *quote = ""; - struct strbuf sb = STRBUF_INIT; - - /* - * Check to see if the value needs to be surrounded with a dq pair. - * Note that problematic characters are always backslash-quoted; this - * check is about not losing leading or trailing SP and strings that - * follow beginning-of-comment characters (i.e. ';' and '#') by the - * configuration parser. - */ - if (value[0] == ' ') - quote = "\""; - for (i = 0; value[i]; i++) - if (value[i] == ';' || value[i] == '#') - quote = "\""; - if (i && value[i - 1] == ' ') - quote = "\""; - - strbuf_addf(&sb, "\t%.*s = %s", - length, key + store.baselen + 1, quote); - - for (i = 0; value[i]; i++) - switch (value[i]) { - case '\n': - strbuf_addstr(&sb, "\\n"); - break; - case '\t': - strbuf_addstr(&sb, "\\t"); - break; - case '"': - case '\\': - strbuf_addch(&sb, '\\'); - default: - strbuf_addch(&sb, value[i]); - break; - } - strbuf_addf(&sb, "%s\n", quote); - - success = write_in_full(fd, sb.buf, sb.len) == sb.len; - strbuf_release(&sb); - - return success; -} - -static ssize_t find_beginning_of_line(const char* contents, size_t size, - size_t offset_, int* found_bracket) -{ - size_t equal_offset = size, bracket_offset = size; - ssize_t offset; - -contline: - for (offset = offset_-2; offset > 0 - && contents[offset] != '\n'; offset--) - switch (contents[offset]) { - case '=': equal_offset = offset; break; - case ']': bracket_offset = offset; break; - } - if (offset > 0 && contents[offset-1] == '\\') { - offset_ = offset; - goto contline; - } - if (bracket_offset < equal_offset) { - *found_bracket = 1; - offset = bracket_offset+1; - } else - offset++; - - return offset; -} - -int perf_config_set(const char* key, const char* value) -{ - return perf_config_set_multivar(key, value, NULL, 0); -} - -/* - * If value==NULL, unset in (remove from) config, - * if value_regex!=NULL, disregard key/value pairs where value does not match. - * if multi_replace==0, nothing, or only one matching key/value is replaced, - * else all matching key/values (regardless how many) are removed, - * before the new pair is written. - * - * Returns 0 on success. - * - * This function does this: - * - * - it locks the config file by creating ".perf/config.lock" - * - * - it then parses the config using store_aux() as validator to find - * the position on the key/value pair to replace. If it is to be unset, - * it must be found exactly once. - * - * - the config file is mmap()ed and the part before the match (if any) is - * written to the lock file, then the changed part and the rest. - * - * - the config file is removed and the lock file rename()d to it. - * - */ -int perf_config_set_multivar(const char* key, const char* value, - const char* value_regex, int multi_replace) -{ - int i, dot; - int fd = -1, in_fd; - int ret = 0; - char* config_filename; - const char* last_dot = strrchr(key, '.'); - - if (config_exclusive_filename) - config_filename = strdup(config_exclusive_filename); - else - config_filename = perf_pathdup("config"); - - /* - * Since "key" actually contains the section name and the real - * key name separated by a dot, we have to know where the dot is. - */ - - if (last_dot == NULL) { - error("key does not contain a section: %s", key); - ret = 2; - goto out_free; - } - store.baselen = last_dot - key; - - store.multi_replace = multi_replace; - - /* - * Validate the key and while at it, lower case it for matching. - */ - store.key = malloc(strlen(key) + 1); - dot = 0; - for (i = 0; key[i]; i++) { - unsigned char c = key[i]; - if (c == '.') - dot = 1; - /* Leave the extended basename untouched.. */ - if (!dot || i > store.baselen) { - if (!iskeychar(c) || (i == store.baselen+1 && !isalpha(c))) { - error("invalid key: %s", key); - free(store.key); - ret = 1; - goto out_free; - } - c = tolower(c); - } else if (c == '\n') { - error("invalid key (newline): %s", key); - free(store.key); - ret = 1; - goto out_free; - } - store.key[i] = c; - } - store.key[i] = 0; - - /* - * If .perf/config does not exist yet, write a minimal version. - */ - in_fd = open(config_filename, O_RDONLY); - if ( in_fd < 0 ) { - free(store.key); - - if ( ENOENT != errno ) { - error("opening %s: %s", config_filename, - strerror(errno)); - ret = 3; /* same as "invalid config file" */ - goto out_free; - } - /* if nothing to unset, error out */ - if (value == NULL) { - ret = 5; - goto out_free; - } - - store.key = (char*)key; - if (!store_write_section(fd, key) || - !store_write_pair(fd, key, value)) - goto write_err_out; - } else { - struct stat st; - char* contents; - size_t contents_sz, copy_begin, copy_end; - int i, new_line = 0; - - if (value_regex == NULL) - store.value_regex = NULL; - else { - if (value_regex[0] == '!') { - store.do_not_match = 1; - value_regex++; - } else - store.do_not_match = 0; - - store.value_regex = (regex_t*)malloc(sizeof(regex_t)); - if (regcomp(store.value_regex, value_regex, - REG_EXTENDED)) { - error("invalid pattern: %s", value_regex); - free(store.value_regex); - ret = 6; - goto out_free; - } - } - - store.offset[0] = 0; - store.state = START; - store.seen = 0; - - /* - * After this, store.offset will contain the *end* offset - * of the last match, or remain at 0 if no match was found. - * As a side effect, we make sure to transform only a valid - * existing config file. - */ - if (perf_config_from_file(store_aux, config_filename, NULL)) { - error("invalid config file %s", config_filename); - free(store.key); - if (store.value_regex != NULL) { - regfree(store.value_regex); - free(store.value_regex); - } - ret = 3; - goto out_free; - } - - free(store.key); - if (store.value_regex != NULL) { - regfree(store.value_regex); - free(store.value_regex); - } - - /* if nothing to unset, or too many matches, error out */ - if ((store.seen == 0 && value == NULL) || - (store.seen > 1 && multi_replace == 0)) { - ret = 5; - goto out_free; - } - - fstat(in_fd, &st); - contents_sz = xsize_t(st.st_size); - contents = mmap(NULL, contents_sz, PROT_READ, - MAP_PRIVATE, in_fd, 0); - close(in_fd); - - if (store.seen == 0) - store.seen = 1; - - for (i = 0, copy_begin = 0; i < store.seen; i++) { - if (store.offset[i] == 0) { - store.offset[i] = copy_end = contents_sz; - } else if (store.state != KEY_SEEN) { - copy_end = store.offset[i]; - } else - copy_end = find_beginning_of_line( - contents, contents_sz, - store.offset[i]-2, &new_line); - - if (copy_end > 0 && contents[copy_end-1] != '\n') - new_line = 1; - - /* write the first part of the config */ - if (copy_end > copy_begin) { - if (write_in_full(fd, contents + copy_begin, - copy_end - copy_begin) < - copy_end - copy_begin) - goto write_err_out; - if (new_line && - write_in_full(fd, "\n", 1) != 1) - goto write_err_out; - } - copy_begin = store.offset[i]; - } - - /* write the pair (value == NULL means unset) */ - if (value != NULL) { - if (store.state == START) { - if (!store_write_section(fd, key)) - goto write_err_out; - } - if (!store_write_pair(fd, key, value)) - goto write_err_out; - } - - /* write the rest of the config */ - if (copy_begin < contents_sz) - if (write_in_full(fd, contents + copy_begin, - contents_sz - copy_begin) < - contents_sz - copy_begin) - goto write_err_out; - - munmap(contents, contents_sz); - } - - ret = 0; - -out_free: - free(config_filename); - return ret; - -write_err_out: - goto out_free; - -} - -/* - * Call this to report error for your variable that should not - * get a boolean value (i.e. "[my] var" means "true"). - */ -int config_error_nonbool(const char *var) -{ - return error("Missing value for '%s'", var); -} diff --git a/Documentation/perf_counter/ctype.c b/Documentation/perf_counter/ctype.c deleted file mode 100644 index b90ec004f29..00000000000 --- a/Documentation/perf_counter/ctype.c +++ /dev/null @@ -1,26 +0,0 @@ -/* - * Sane locale-independent, ASCII ctype. - * - * No surprises, and works with signed and unsigned chars. - */ -#include "cache.h" - -enum { - S = GIT_SPACE, - A = GIT_ALPHA, - D = GIT_DIGIT, - G = GIT_GLOB_SPECIAL, /* *, ?, [, \\ */ - R = GIT_REGEX_SPECIAL, /* $, (, ), +, ., ^, {, | * */ -}; - -unsigned char sane_ctype[256] = { - 0, 0, 0, 0, 0, 0, 0, 0, 0, S, S, 0, 0, S, 0, 0, /* 0.. 15 */ - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 16.. 31 */ - S, 0, 0, 0, R, 0, 0, 0, R, R, G, R, 0, 0, R, 0, /* 32.. 47 */ - D, D, D, D, D, D, D, D, D, D, 0, 0, 0, 0, 0, G, /* 48.. 63 */ - 0, A, A, A, A, A, A, A, A, A, A, A, A, A, A, A, /* 64.. 79 */ - A, A, A, A, A, A, A, A, A, A, A, G, G, 0, R, 0, /* 80.. 95 */ - 0, A, A, A, A, A, A, A, A, A, A, A, A, A, A, A, /* 96..111 */ - A, A, A, A, A, A, A, A, A, A, A, R, R, 0, 0, 0, /* 112..127 */ - /* Nothing in the 128.. range */ -}; diff --git a/Documentation/perf_counter/exec_cmd.c b/Documentation/perf_counter/exec_cmd.c deleted file mode 100644 index d3929226315..00000000000 --- a/Documentation/perf_counter/exec_cmd.c +++ /dev/null @@ -1,165 +0,0 @@ -#include "cache.h" -#include "exec_cmd.h" -#include "quote.h" -#define MAX_ARGS 32 - -extern char **environ; -static const char *argv_exec_path; -static const char *argv0_path; - -const char *system_path(const char *path) -{ -#ifdef RUNTIME_PREFIX - static const char *prefix; -#else - static const char *prefix = PREFIX; -#endif - struct strbuf d = STRBUF_INIT; - - if (is_absolute_path(path)) - return path; - -#ifdef RUNTIME_PREFIX - assert(argv0_path); - assert(is_absolute_path(argv0_path)); - - if (!prefix && - !(prefix = strip_path_suffix(argv0_path, PERF_EXEC_PATH)) && - !(prefix = strip_path_suffix(argv0_path, BINDIR)) && - !(prefix = strip_path_suffix(argv0_path, "perf"))) { - prefix = PREFIX; - fprintf(stderr, "RUNTIME_PREFIX requested, " - "but prefix computation failed. " - "Using static fallback '%s'.\n", prefix); - } -#endif - - strbuf_addf(&d, "%s/%s", prefix, path); - path = strbuf_detach(&d, NULL); - return path; -} - -const char *perf_extract_argv0_path(const char *argv0) -{ - const char *slash; - - if (!argv0 || !*argv0) - return NULL; - slash = argv0 + strlen(argv0); - - while (argv0 <= slash && !is_dir_sep(*slash)) - slash--; - - if (slash >= argv0) { - argv0_path = strndup(argv0, slash - argv0); - return slash + 1; - } - - return argv0; -} - -void perf_set_argv_exec_path(const char *exec_path) -{ - argv_exec_path = exec_path; - /* - * Propagate this setting to external programs. - */ - setenv(EXEC_PATH_ENVIRONMENT, exec_path, 1); -} - - -/* Returns the highest-priority, location to look for perf programs. */ -const char *perf_exec_path(void) -{ - const char *env; - - if (argv_exec_path) - return argv_exec_path; - - env = getenv(EXEC_PATH_ENVIRONMENT); - if (env && *env) { - return env; - } - - return system_path(PERF_EXEC_PATH); -} - -static void add_path(struct strbuf *out, const char *path) -{ - if (path && *path) { - if (is_absolute_path(path)) - strbuf_addstr(out, path); - else - strbuf_addstr(out, make_nonrelative_path(path)); - - strbuf_addch(out, PATH_SEP); - } -} - -void setup_path(void) -{ - const char *old_path = getenv("PATH"); - struct strbuf new_path = STRBUF_INIT; - - add_path(&new_path, perf_exec_path()); - add_path(&new_path, argv0_path); - - if (old_path) - strbuf_addstr(&new_path, old_path); - else - strbuf_addstr(&new_path, "/usr/local/bin:/usr/bin:/bin"); - - setenv("PATH", new_path.buf, 1); - - strbuf_release(&new_path); -} - -const char **prepare_perf_cmd(const char **argv) -{ - int argc; - const char **nargv; - - for (argc = 0; argv[argc]; argc++) - ; /* just counting */ - nargv = malloc(sizeof(*nargv) * (argc + 2)); - - nargv[0] = "perf"; - for (argc = 0; argv[argc]; argc++) - nargv[argc + 1] = argv[argc]; - nargv[argc + 1] = NULL; - return nargv; -} - -int execv_perf_cmd(const char **argv) { - const char **nargv = prepare_perf_cmd(argv); - - /* execvp() can only ever return if it fails */ - execvp("perf", (char **)nargv); - - free(nargv); - return -1; -} - - -int execl_perf_cmd(const char *cmd,...) -{ - int argc; - const char *argv[MAX_ARGS + 1]; - const char *arg; - va_list param; - - va_start(param, cmd); - argv[0] = cmd; - argc = 1; - while (argc < MAX_ARGS) { - arg = argv[argc++] = va_arg(param, char *); - if (!arg) - break; - } - va_end(param); - if (MAX_ARGS <= argc) - return error("too many args to run %s", cmd); - - argv[argc] = NULL; - return execv_perf_cmd(argv); -} diff --git a/Documentation/perf_counter/exec_cmd.h b/Documentation/perf_counter/exec_cmd.h deleted file mode 100644 index effe25eb154..00000000000 --- a/Documentation/perf_counter/exec_cmd.h +++ /dev/null @@ -1,13 +0,0 @@ -#ifndef PERF_EXEC_CMD_H -#define PERF_EXEC_CMD_H - -extern void perf_set_argv_exec_path(const char *exec_path); -extern const char *perf_extract_argv0_path(const char *path); -extern const char *perf_exec_path(void); -extern void setup_path(void); -extern const char **prepare_perf_cmd(const char **argv); -extern int execv_perf_cmd(const char **argv); /* NULL terminated */ -extern int execl_perf_cmd(const char *cmd, ...); -extern const char *system_path(const char *path); - -#endif /* PERF_EXEC_CMD_H */ diff --git a/Documentation/perf_counter/generate-cmdlist.sh b/Documentation/perf_counter/generate-cmdlist.sh deleted file mode 100755 index f06f6fd148f..00000000000 --- a/Documentation/perf_counter/generate-cmdlist.sh +++ /dev/null @@ -1,24 +0,0 @@ -#!/bin/sh - -echo "/* Automatically generated by $0 */ -struct cmdname_help -{ - char name[16]; - char help[80]; -}; - -static struct cmdname_help common_cmds[] = {" - -sed -n -e 's/^perf-\([^ ]*\)[ ].* common.*/\1/p' command-list.txt | -sort | -while read cmd -do - sed -n ' - /^NAME/,/perf-'"$cmd"'/H - ${ - x - s/.*perf-'"$cmd"' - \(.*\)/ {"'"$cmd"'", "\1"},/ - p - }' "Documentation/perf-$cmd.txt" -done -echo "};" diff --git a/Documentation/perf_counter/help.c b/Documentation/perf_counter/help.c deleted file mode 100644 index ec011672166..00000000000 --- a/Documentation/perf_counter/help.c +++ /dev/null @@ -1,366 +0,0 @@ -#include "cache.h" -#include "builtin.h" -#include "exec_cmd.h" -#include "levenshtein.h" -#include "help.h" - -/* most GUI terminals set COLUMNS (although some don't export it) */ -static int term_columns(void) -{ - char *col_string = getenv("COLUMNS"); - int n_cols; - - if (col_string && (n_cols = atoi(col_string)) > 0) - return n_cols; - -#ifdef TIOCGWINSZ - { - struct winsize ws; - if (!ioctl(1, TIOCGWINSZ, &ws)) { - if (ws.ws_col) - return ws.ws_col; - } - } -#endif - - return 80; -} - -void add_cmdname(struct cmdnames *cmds, const char *name, int len) -{ - struct cmdname *ent = malloc(sizeof(*ent) + len + 1); - - ent->len = len; - memcpy(ent->name, name, len); - ent->name[len] = 0; - - ALLOC_GROW(cmds->names, cmds->cnt + 1, cmds->alloc); - cmds->names[cmds->cnt++] = ent; -} - -static void clean_cmdnames(struct cmdnames *cmds) -{ - int i; - for (i = 0; i < cmds->cnt; ++i) - free(cmds->names[i]); - free(cmds->names); - cmds->cnt = 0; - cmds->alloc = 0; -} - -static int cmdname_compare(const void *a_, const void *b_) -{ - struct cmdname *a = *(struct cmdname **)a_; - struct cmdname *b = *(struct cmdname **)b_; - return strcmp(a->name, b->name); -} - -static void uniq(struct cmdnames *cmds) -{ - int i, j; - - if (!cmds->cnt) - return; - - for (i = j = 1; i < cmds->cnt; i++) - if (strcmp(cmds->names[i]->name, cmds->names[i-1]->name)) - cmds->names[j++] = cmds->names[i]; - - cmds->cnt = j; -} - -void exclude_cmds(struct cmdnames *cmds, struct cmdnames *excludes) -{ - int ci, cj, ei; - int cmp; - - ci = cj = ei = 0; - while (ci < cmds->cnt && ei < excludes->cnt) { - cmp = strcmp(cmds->names[ci]->name, excludes->names[ei]->name); - if (cmp < 0) - cmds->names[cj++] = cmds->names[ci++]; - else if (cmp == 0) - ci++, ei++; - else if (cmp > 0) - ei++; - } - - while (ci < cmds->cnt) - cmds->names[cj++] = cmds->names[ci++]; - - cmds->cnt = cj; -} - -static void pretty_print_string_list(struct cmdnames *cmds, int longest) -{ - int cols = 1, rows; - int space = longest + 1; /* min 1 SP between words */ - int max_cols = term_columns() - 1; /* don't print *on* the edge */ - int i, j; - - if (space < max_cols) - cols = max_cols / space; - rows = (cmds->cnt + cols - 1) / cols; - - for (i = 0; i < rows; i++) { - printf(" "); - - for (j = 0; j < cols; j++) { - int n = j * rows + i; - int size = space; - if (n >= cmds->cnt) - break; - if (j == cols-1 || n + rows >= cmds->cnt) - size = 1; - printf("%-*s", size, cmds->names[n]->name); - } - putchar('\n'); - } -} - -static int is_executable(const char *name) -{ - struct stat st; - - if (stat(name, &st) || /* stat, not lstat */ - !S_ISREG(st.st_mode)) - return 0; - -#ifdef __MINGW32__ - /* cannot trust the executable bit, peek into the file instead */ - char buf[3] = { 0 }; - int n; - int fd = open(name, O_RDONLY); - st.st_mode &= ~S_IXUSR; - if (fd >= 0) { - n = read(fd, buf, 2); - if (n == 2) - /* DOS executables start with "MZ" */ - if (!strcmp(buf, "#!") || !strcmp(buf, "MZ")) - st.st_mode |= S_IXUSR; - close(fd); - } -#endif - return st.st_mode & S_IXUSR; -} - -static void list_commands_in_dir(struct cmdnames *cmds, - const char *path, - const char *prefix) -{ - int prefix_len; - DIR *dir = opendir(path); - struct dirent *de; - struct strbuf buf = STRBUF_INIT; - int len; - - if (!dir) - return; - if (!prefix) - prefix = "perf-"; - prefix_len = strlen(prefix); - - strbuf_addf(&buf, "%s/", path); - len = buf.len; - - while ((de = readdir(dir)) != NULL) { - int entlen; - - if (prefixcmp(de->d_name, prefix)) - continue; - - strbuf_setlen(&buf, len); - strbuf_addstr(&buf, de->d_name); - if (!is_executable(buf.buf)) - continue; - - entlen = strlen(de->d_name) - prefix_len; - if (has_extension(de->d_name, ".exe")) - entlen -= 4; - - add_cmdname(cmds, de->d_name + prefix_len, entlen); - } - closedir(dir); - strbuf_release(&buf); -} - -void load_command_list(const char *prefix, - struct cmdnames *main_cmds, - struct cmdnames *other_cmds) -{ - const char *env_path = getenv("PATH"); - const char *exec_path = perf_exec_path(); - - if (exec_path) { - list_commands_in_dir(main_cmds, exec_path, prefix); - qsort(main_cmds->names, main_cmds->cnt, - sizeof(*main_cmds->names), cmdname_compare); - uniq(main_cmds); - } - - if (env_path) { - char *paths, *path, *colon; - path = paths = strdup(env_path); - while (1) { - if ((colon = strchr(path, PATH_SEP))) - *colon = 0; - if (!exec_path || strcmp(path, exec_path)) - list_commands_in_dir(other_cmds, path, prefix); - - if (!colon) - break; - path = colon + 1; - } - free(paths); - - qsort(other_cmds->names, other_cmds->cnt, - sizeof(*other_cmds->names), cmdname_compare); - uniq(other_cmds); - } - exclude_cmds(other_cmds, main_cmds); -} - -void list_commands(const char *title, struct cmdnames *main_cmds, - struct cmdnames *other_cmds) -{ - int i, longest = 0; - - for (i = 0; i < main_cmds->cnt; i++) - if (longest < main_cmds->names[i]->len) - longest = main_cmds->names[i]->len; - for (i = 0; i < other_cmds->cnt; i++) - if (longest < other_cmds->names[i]->len) - longest = other_cmds->names[i]->len; - - if (main_cmds->cnt) { - const char *exec_path = perf_exec_path(); - printf("available %s in '%s'\n", title, exec_path); - printf("----------------"); - mput_char('-', strlen(title) + strlen(exec_path)); - putchar('\n'); - pretty_print_string_list(main_cmds, longest); - putchar('\n'); - } - - if (other_cmds->cnt) { - printf("%s available from elsewhere on your $PATH\n", title); - printf("---------------------------------------"); - mput_char('-', strlen(title)); - putchar('\n'); - pretty_print_string_list(other_cmds, longest); - putchar('\n'); - } -} - -int is_in_cmdlist(struct cmdnames *c, const char *s) -{ - int i; - for (i = 0; i < c->cnt; i++) - if (!strcmp(s, c->names[i]->name)) - return 1; - return 0; -} - -static int autocorrect; -static struct cmdnames aliases; - -static int perf_unknown_cmd_config(const char *var, const char *value, void *cb) -{ - if (!strcmp(var, "help.autocorrect")) - autocorrect = perf_config_int(var,value); - /* Also use aliases for command lookup */ - if (!prefixcmp(var, "alias.")) - add_cmdname(&aliases, var + 6, strlen(var + 6)); - - return perf_default_config(var, value, cb); -} - -static int levenshtein_compare(const void *p1, const void *p2) -{ - const struct cmdname *const *c1 = p1, *const *c2 = p2; - const char *s1 = (*c1)->name, *s2 = (*c2)->name; - int l1 = (*c1)->len; - int l2 = (*c2)->len; - return l1 != l2 ? l1 - l2 : strcmp(s1, s2); -} - -static void add_cmd_list(struct cmdnames *cmds, struct cmdnames *old) -{ - int i; - ALLOC_GROW(cmds->names, cmds->cnt + old->cnt, cmds->alloc); - - for (i = 0; i < old->cnt; i++) - cmds->names[cmds->cnt++] = old->names[i]; - free(old->names); - old->cnt = 0; - old->names = NULL; -} - -const char *help_unknown_cmd(const char *cmd) -{ - int i, n, best_similarity = 0; - struct cmdnames main_cmds, other_cmds; - - memset(&main_cmds, 0, sizeof(main_cmds)); - memset(&other_cmds, 0, sizeof(main_cmds)); - memset(&aliases, 0, sizeof(aliases)); - - perf_config(perf_unknown_cmd_config, NULL); - - load_command_list("perf-", &main_cmds, &other_cmds); - - add_cmd_list(&main_cmds, &aliases); - add_cmd_list(&main_cmds, &other_cmds); - qsort(main_cmds.names, main_cmds.cnt, - sizeof(main_cmds.names), cmdname_compare); - uniq(&main_cmds); - - /* This reuses cmdname->len for similarity index */ - for (i = 0; i < main_cmds.cnt; ++i) - main_cmds.names[i]->len = - levenshtein(cmd, main_cmds.names[i]->name, 0, 2, 1, 4); - - qsort(main_cmds.names, main_cmds.cnt, - sizeof(*main_cmds.names), levenshtein_compare); - - if (!main_cmds.cnt) - die ("Uh oh. Your system reports no Git commands at all."); - - best_similarity = main_cmds.names[0]->len; - n = 1; - while (n < main_cmds.cnt && best_similarity == main_cmds.names[n]->len) - ++n; - if (autocorrect && n == 1) { - const char *assumed = main_cmds.names[0]->name; - main_cmds.names[0] = NULL; - clean_cmdnames(&main_cmds); - fprintf(stderr, "WARNING: You called a Git program named '%s', " - "which does not exist.\n" - "Continuing under the assumption that you meant '%s'\n", - cmd, assumed); - if (autocorrect > 0) { - fprintf(stderr, "in %0.1f seconds automatically...\n", - (float)autocorrect/10.0); - poll(NULL, 0, autocorrect * 100); - } - return assumed; - } - - fprintf(stderr, "perf: '%s' is not a perf-command. See 'perf --help'.\n", cmd); - - if (best_similarity < 6) { - fprintf(stderr, "\nDid you mean %s?\n", - n < 2 ? "this": "one of these"); - - for (i = 0; i < n; i++) - fprintf(stderr, "\t%s\n", main_cmds.names[i]->name); - } - - exit(1); -} - -int cmd_version(int argc, const char **argv, const char *prefix) -{ - printf("perf version %s\n", perf_version_string); - return 0; -} diff --git a/Documentation/perf_counter/help.h b/Documentation/perf_counter/help.h deleted file mode 100644 index 56bc15406ff..00000000000 --- a/Documentation/perf_counter/help.h +++ /dev/null @@ -1,29 +0,0 @@ -#ifndef HELP_H -#define HELP_H - -struct cmdnames { - int alloc; - int cnt; - struct cmdname { - size_t len; /* also used for similarity index in help.c */ - char name[FLEX_ARRAY]; - } **names; -}; - -static inline void mput_char(char c, unsigned int num) -{ - while(num--) - putchar(c); -} - -void load_command_list(const char *prefix, - struct cmdnames *main_cmds, - struct cmdnames *other_cmds); -void add_cmdname(struct cmdnames *cmds, const char *name, int len); -/* Here we require that excludes is a sorted list. */ -void exclude_cmds(struct cmdnames *cmds, struct cmdnames *excludes); -int is_in_cmdlist(struct cmdnames *c, const char *s); -void list_commands(const char *title, struct cmdnames *main_cmds, - struct cmdnames *other_cmds); - -#endif /* HELP_H */ diff --git a/Documentation/perf_counter/levenshtein.c b/Documentation/perf_counter/levenshtein.c deleted file mode 100644 index e521d1516df..00000000000 --- a/Documentation/perf_counter/levenshtein.c +++ /dev/null @@ -1,84 +0,0 @@ -#include "cache.h" -#include "levenshtein.h" - -/* - * This function implements the Damerau-Levenshtein algorithm to - * calculate a distance between strings. - * - * Basically, it says how many letters need to be swapped, substituted, - * deleted from, or added to string1, at least, to get string2. - * - * The idea is to build a distance matrix for the substrings of both - * strings. To avoid a large space complexity, only the last three rows - * are kept in memory (if swaps had the same or higher cost as one deletion - * plus one insertion, only two rows would be needed). - * - * At any stage, "i + 1" denotes the length of the current substring of - * string1 that the distance is calculated for. - * - * row2 holds the current row, row1 the previous row (i.e. for the substring - * of string1 of length "i"), and row0 the row before that. - * - * In other words, at the start of the big loop, row2[j + 1] contains the - * Damerau-Levenshtein distance between the substring of string1 of length - * "i" and the substring of string2 of length "j + 1". - * - * All the big loop does is determine the partial minimum-cost paths. - * - * It does so by calculating the costs of the path ending in characters - * i (in string1) and j (in string2), respectively, given that the last - * operation is a substition, a swap, a deletion, or an insertion. - * - * This implementation allows the costs to be weighted: - * - * - w (as in "sWap") - * - s (as in "Substitution") - * - a (for insertion, AKA "Add") - * - d (as in "Deletion") - * - * Note that this algorithm calculates a distance _iff_ d == a. - */ -int levenshtein(const char *string1, const char *string2, - int w, int s, int a, int d) -{ - int len1 = strlen(string1), len2 = strlen(string2); - int *row0 = malloc(sizeof(int) * (len2 + 1)); - int *row1 = malloc(sizeof(int) * (len2 + 1)); - int *row2 = malloc(sizeof(int) * (len2 + 1)); - int i, j; - - for (j = 0; j <= len2; j++) - row1[j] = j * a; - for (i = 0; i < len1; i++) { - int *dummy; - - row2[0] = (i + 1) * d; - for (j = 0; j < len2; j++) { - /* substitution */ - row2[j + 1] = row1[j] + s * (string1[i] != string2[j]); - /* swap */ - if (i > 0 && j > 0 && string1[i - 1] == string2[j] && - string1[i] == string2[j - 1] && - row2[j + 1] > row0[j - 1] + w) - row2[j + 1] = row0[j - 1] + w; - /* deletion */ - if (row2[j + 1] > row1[j + 1] + d) - row2[j + 1] = row1[j + 1] + d; - /* insertion */ - if (row2[j + 1] > row2[j] + a) - row2[j + 1] = row2[j] + a; - } - - dummy = row0; - row0 = row1; - row1 = row2; - row2 = dummy; - } - - i = row1[len2]; - free(row0); - free(row1); - free(row2); - - return i; -} diff --git a/Documentation/perf_counter/levenshtein.h b/Documentation/perf_counter/levenshtein.h deleted file mode 100644 index 0173abeef52..00000000000 --- a/Documentation/perf_counter/levenshtein.h +++ /dev/null @@ -1,8 +0,0 @@ -#ifndef LEVENSHTEIN_H -#define LEVENSHTEIN_H - -int levenshtein(const char *string1, const char *string2, - int swap_penalty, int substition_penalty, - int insertion_penalty, int deletion_penalty); - -#endif diff --git a/Documentation/perf_counter/parse-options.c b/Documentation/perf_counter/parse-options.c deleted file mode 100644 index 7464f34e540..00000000000 --- a/Documentation/perf_counter/parse-options.c +++ /dev/null @@ -1,495 +0,0 @@ -#include "util.h" -#include "parse-options.h" -#include "cache.h" - -#define OPT_SHORT 1 -#define OPT_UNSET 2 - -static int opterror(const struct option *opt, const char *reason, int flags) -{ - if (flags & OPT_SHORT) - return error("switch `%c' %s", opt->short_name, reason); - if (flags & OPT_UNSET) - return error("option `no-%s' %s", opt->long_name, reason); - return error("option `%s' %s", opt->long_name, reason); -} - -static int get_arg(struct parse_opt_ctx_t *p, const struct option *opt, - int flags, const char **arg) -{ - if (p->opt) { - *arg = p->opt; - p->opt = NULL; - } else if (p->argc == 1 && (opt->flags & PARSE_OPT_LASTARG_DEFAULT)) { - *arg = (const char *)opt->defval; - } else if (p->argc > 1) { - p->argc--; - *arg = *++p->argv; - } else - return opterror(opt, "requires a value", flags); - return 0; -} - -static int get_value(struct parse_opt_ctx_t *p, - const struct option *opt, int flags) -{ - const char *s, *arg; - const int unset = flags & OPT_UNSET; - - if (unset && p->opt) - return opterror(opt, "takes no value", flags); - if (unset && (opt->flags & PARSE_OPT_NONEG)) - return opterror(opt, "isn't available", flags); - - if (!(flags & OPT_SHORT) && p->opt) { - switch (opt->type) { - case OPTION_CALLBACK: - if (!(opt->flags & PARSE_OPT_NOARG)) - break; - /* FALLTHROUGH */ - case OPTION_BOOLEAN: - case OPTION_BIT: - case OPTION_SET_INT: - case OPTION_SET_PTR: - return opterror(opt, "takes no value", flags); - default: - break; - } - } - - switch (opt->type) { - case OPTION_BIT: - if (unset) - *(int *)opt->value &= ~opt->defval; - else - *(int *)opt->value |= opt->defval; - return 0; - - case OPTION_BOOLEAN: - *(int *)opt->value = unset ? 0 : *(int *)opt->value + 1; - return 0; - - case OPTION_SET_INT: - *(int *)opt->value = unset ? 0 : opt->defval; - return 0; - - case OPTION_SET_PTR: - *(void **)opt->value = unset ? NULL : (void *)opt->defval; - return 0; - - case OPTION_STRING: - if (unset) - *(const char **)opt->value = NULL; - else if (opt->flags & PARSE_OPT_OPTARG && !p->opt) - *(const char **)opt->value = (const char *)opt->defval; - else - return get_arg(p, opt, flags, (const char **)opt->value); - return 0; - - case OPTION_CALLBACK: - if (unset) - return (*opt->callback)(opt, NULL, 1) ? (-1) : 0; - if (opt->flags & PARSE_OPT_NOARG) - return (*opt->callback)(opt, NULL, 0) ? (-1) : 0; - if (opt->flags & PARSE_OPT_OPTARG && !p->opt) - return (*opt->callback)(opt, NULL, 0) ? (-1) : 0; - if (get_arg(p, opt, flags, &arg)) - return -1; - return (*opt->callback)(opt, arg, 0) ? (-1) : 0; - - case OPTION_INTEGER: - if (unset) { - *(int *)opt->value = 0; - return 0; - } - if (opt->flags & PARSE_OPT_OPTARG && !p->opt) { - *(int *)opt->value = opt->defval; - return 0; - } - if (get_arg(p, opt, flags, &arg)) - return -1; - *(int *)opt->value = strtol(arg, (char **)&s, 10); - if (*s) - return opterror(opt, "expects a numerical value", flags); - return 0; - - default: - die("should not happen, someone must be hit on the forehead"); - } -} - -static int parse_short_opt(struct parse_opt_ctx_t *p, const struct option *options) -{ - for (; options->type != OPTION_END; options++) { - if (options->short_name == *p->opt) { - p->opt = p->opt[1] ? p->opt + 1 : NULL; - return get_value(p, options, OPT_SHORT); - } - } - return -2; -} - -static int parse_long_opt(struct parse_opt_ctx_t *p, const char *arg, - const struct option *options) -{ - const char *arg_end = strchr(arg, '='); - const struct option *abbrev_option = NULL, *ambiguous_option = NULL; - int abbrev_flags = 0, ambiguous_flags = 0; - - if (!arg_end) - arg_end = arg + strlen(arg); - - for (; options->type != OPTION_END; options++) { - const char *rest; - int flags = 0; - - if (!options->long_name) - continue; - - rest = skip_prefix(arg, options->long_name); - if (options->type == OPTION_ARGUMENT) { - if (!rest) - continue; - if (*rest == '=') - return opterror(options, "takes no value", flags); - if (*rest) - continue; - p->out[p->cpidx++] = arg - 2; - return 0; - } - if (!rest) { - /* abbreviated? */ - if (!strncmp(options->long_name, arg, arg_end - arg)) { -is_abbreviated: - if (abbrev_option) { - /* - * If this is abbreviated, it is - * ambiguous. So when there is no - * exact match later, we need to - * error out. - */ - ambiguous_option = abbrev_option; - ambiguous_flags = abbrev_flags; - } - if (!(flags & OPT_UNSET) && *arg_end) - p->opt = arg_end + 1; - abbrev_option = options; - abbrev_flags = flags; - continue; - } - /* negated and abbreviated very much? */ - if (!prefixcmp("no-", arg)) { - flags |= OPT_UNSET; - goto is_abbreviated; - } - /* negated? */ - if (strncmp(arg, "no-", 3)) - continue; - flags |= OPT_UNSET; - rest = skip_prefix(arg + 3, options->long_name); - /* abbreviated and negated? */ - if (!rest && !prefixcmp(options->long_name, arg + 3)) - goto is_abbreviated; - if (!rest) - continue; - } - if (*rest) { - if (*rest != '=') - continue; - p->opt = rest + 1; - } - return get_value(p, options, flags); - } - - if (ambiguous_option) - return error("Ambiguous option: %s " - "(could be --%s%s or --%s%s)", - arg, - (ambiguous_flags & OPT_UNSET) ? "no-" : "", - ambiguous_option->long_name, - (abbrev_flags & OPT_UNSET) ? "no-" : "", - abbrev_option->long_name); - if (abbrev_option) - return get_value(p, abbrev_option, abbrev_flags); - return -2; -} - -static void check_typos(const char *arg, const struct option *options) -{ - if (strlen(arg) < 3) - return; - - if (!prefixcmp(arg, "no-")) { - error ("did you mean `--%s` (with two dashes ?)", arg); - exit(129); - } - - for (; options->type != OPTION_END; options++) { - if (!options->long_name) - continue; - if (!prefixcmp(options->long_name, arg)) { - error ("did you mean `--%s` (with two dashes ?)", arg); - exit(129); - } - } -} - -void parse_options_start(struct parse_opt_ctx_t *ctx, - int argc, const char **argv, int flags) -{ - memset(ctx, 0, sizeof(*ctx)); - ctx->argc = argc - 1; - ctx->argv = argv + 1; - ctx->out = argv; - ctx->cpidx = ((flags & PARSE_OPT_KEEP_ARGV0) != 0); - ctx->flags = flags; - if ((flags & PARSE_OPT_KEEP_UNKNOWN) && - (flags & PARSE_OPT_STOP_AT_NON_OPTION)) - die("STOP_AT_NON_OPTION and KEEP_UNKNOWN don't go together"); -} - -static int usage_with_options_internal(const char * const *, - const struct option *, int); - -int parse_options_step(struct parse_opt_ctx_t *ctx, - const struct option *options, - const char * const usagestr[]) -{ - int internal_help = !(ctx->flags & PARSE_OPT_NO_INTERNAL_HELP); - - /* we must reset ->opt, unknown short option leave it dangling */ - ctx->opt = NULL; - - for (; ctx->argc; ctx->argc--, ctx->argv++) { - const char *arg = ctx->argv[0]; - - if (*arg != '-' || !arg[1]) { - if (ctx->flags & PARSE_OPT_STOP_AT_NON_OPTION) - break; - ctx->out[ctx->cpidx++] = ctx->argv[0]; - continue; - } - - if (arg[1] != '-') { - ctx->opt = arg + 1; - if (internal_help && *ctx->opt == 'h') - return parse_options_usage(usagestr, options); - switch (parse_short_opt(ctx, options)) { - case -1: - return parse_options_usage(usagestr, options); - case -2: - goto unknown; - } - if (ctx->opt) - check_typos(arg + 1, options); - while (ctx->opt) { - if (internal_help && *ctx->opt == 'h') - return parse_options_usage(usagestr, options); - switch (parse_short_opt(ctx, options)) { - case -1: - return parse_options_usage(usagestr, options); - case -2: - /* fake a short option thing to hide the fact that we may have - * started to parse aggregated stuff - * - * This is leaky, too bad. - */ - ctx->argv[0] = strdup(ctx->opt - 1); - *(char *)ctx->argv[0] = '-'; - goto unknown; - } - } - continue; - } - - if (!arg[2]) { /* "--" */ - if (!(ctx->flags & PARSE_OPT_KEEP_DASHDASH)) { - ctx->argc--; - ctx->argv++; - } - break; - } - - if (internal_help && !strcmp(arg + 2, "help-all")) - return usage_with_options_internal(usagestr, options, 1); - if (internal_help && !strcmp(arg + 2, "help")) - return parse_options_usage(usagestr, options); - switch (parse_long_opt(ctx, arg + 2, options)) { - case -1: - return parse_options_usage(usagestr, options); - case -2: - goto unknown; - } - continue; -unknown: - if (!(ctx->flags & PARSE_OPT_KEEP_UNKNOWN)) - return PARSE_OPT_UNKNOWN; - ctx->out[ctx->cpidx++] = ctx->argv[0]; - ctx->opt = NULL; - } - return PARSE_OPT_DONE; -} - -int parse_options_end(struct parse_opt_ctx_t *ctx) -{ - memmove(ctx->out + ctx->cpidx, ctx->argv, ctx->argc * sizeof(*ctx->out)); - ctx->out[ctx->cpidx + ctx->argc] = NULL; - return ctx->cpidx + ctx->argc; -} - -int parse_options(int argc, const char **argv, const struct option *options, - const char * const usagestr[], int flags) -{ - struct parse_opt_ctx_t ctx; - - parse_options_start(&ctx, argc, argv, flags); - switch (parse_options_step(&ctx, options, usagestr)) { - case PARSE_OPT_HELP: - exit(129); - case PARSE_OPT_DONE: - break; - default: /* PARSE_OPT_UNKNOWN */ - if (ctx.argv[0][1] == '-') { - error("unknown option `%s'", ctx.argv[0] + 2); - } else { - error("unknown switch `%c'", *ctx.opt); - } - usage_with_options(usagestr, options); - } - - return parse_options_end(&ctx); -} - -#define USAGE_OPTS_WIDTH 24 -#define USAGE_GAP 2 - -int usage_with_options_internal(const char * const *usagestr, - const struct option *opts, int full) -{ - if (!usagestr) - return PARSE_OPT_HELP; - - fprintf(stderr, "usage: %s\n", *usagestr++); - while (*usagestr && **usagestr) - fprintf(stderr, " or: %s\n", *usagestr++); - while (*usagestr) { - fprintf(stderr, "%s%s\n", - **usagestr ? " " : "", - *usagestr); - usagestr++; - } - - if (opts->type != OPTION_GROUP) - fputc('\n', stderr); - - for (; opts->type != OPTION_END; opts++) { - size_t pos; - int pad; - - if (opts->type == OPTION_GROUP) { - fputc('\n', stderr); - if (*opts->help) - fprintf(stderr, "%s\n", opts->help); - continue; - } - if (!full && (opts->flags & PARSE_OPT_HIDDEN)) - continue; - - pos = fprintf(stderr, " "); - if (opts->short_name) - pos += fprintf(stderr, "-%c", opts->short_name); - if (opts->long_name && opts->short_name) - pos += fprintf(stderr, ", "); - if (opts->long_name) - pos += fprintf(stderr, "--%s", opts->long_name); - - switch (opts->type) { - case OPTION_ARGUMENT: - break; - case OPTION_INTEGER: - if (opts->flags & PARSE_OPT_OPTARG) - if (opts->long_name) - pos += fprintf(stderr, "[=]"); - else - pos += fprintf(stderr, "[]"); - else - pos += fprintf(stderr, " "); - break; - case OPTION_CALLBACK: - if (opts->flags & PARSE_OPT_NOARG) - break; - /* FALLTHROUGH */ - case OPTION_STRING: - if (opts->argh) { - if (opts->flags & PARSE_OPT_OPTARG) - if (opts->long_name) - pos += fprintf(stderr, "[=<%s>]", opts->argh); - else - pos += fprintf(stderr, "[<%s>]", opts->argh); - else - pos += fprintf(stderr, " <%s>", opts->argh); - } else { - if (opts->flags & PARSE_OPT_OPTARG) - if (opts->long_name) - pos += fprintf(stderr, "[=...]"); - else - pos += fprintf(stderr, "[...]"); - else - pos += fprintf(stderr, " ..."); - } - break; - default: /* OPTION_{BIT,BOOLEAN,SET_INT,SET_PTR} */ - break; - } - - if (pos <= USAGE_OPTS_WIDTH) - pad = USAGE_OPTS_WIDTH - pos; - else { - fputc('\n', stderr); - pad = USAGE_OPTS_WIDTH; - } - fprintf(stderr, "%*s%s\n", pad + USAGE_GAP, "", opts->help); - } - fputc('\n', stderr); - - return PARSE_OPT_HELP; -} - -void usage_with_options(const char * const *usagestr, - const struct option *opts) -{ - usage_with_options_internal(usagestr, opts, 0); - exit(129); -} - -int parse_options_usage(const char * const *usagestr, - const struct option *opts) -{ - return usage_with_options_internal(usagestr, opts, 0); -} - - -/*----- some often used options -----*/ -#include "cache.h" - -int parse_opt_verbosity_cb(const struct option *opt, const char *arg, - int unset) -{ - int *target = opt->value; - - if (unset) - /* --no-quiet, --no-verbose */ - *target = 0; - else if (opt->short_name == 'v') { - if (*target >= 0) - (*target)++; - else - *target = 1; - } else { - if (*target <= 0) - (*target)--; - else - *target = -1; - } - return 0; -} diff --git a/Documentation/perf_counter/parse-options.h b/Documentation/perf_counter/parse-options.h deleted file mode 100644 index a81c7faff68..00000000000 --- a/Documentation/perf_counter/parse-options.h +++ /dev/null @@ -1,172 +0,0 @@ -#ifndef PARSE_OPTIONS_H -#define PARSE_OPTIONS_H - -enum parse_opt_type { - /* special types */ - OPTION_END, - OPTION_ARGUMENT, - OPTION_GROUP, - /* options with no arguments */ - OPTION_BIT, - OPTION_BOOLEAN, /* _INCR would have been a better name */ - OPTION_SET_INT, - OPTION_SET_PTR, - /* options with arguments (usually) */ - OPTION_STRING, - OPTION_INTEGER, - OPTION_CALLBACK, -}; - -enum parse_opt_flags { - PARSE_OPT_KEEP_DASHDASH = 1, - PARSE_OPT_STOP_AT_NON_OPTION = 2, - PARSE_OPT_KEEP_ARGV0 = 4, - PARSE_OPT_KEEP_UNKNOWN = 8, - PARSE_OPT_NO_INTERNAL_HELP = 16, -}; - -enum parse_opt_option_flags { - PARSE_OPT_OPTARG = 1, - PARSE_OPT_NOARG = 2, - PARSE_OPT_NONEG = 4, - PARSE_OPT_HIDDEN = 8, - PARSE_OPT_LASTARG_DEFAULT = 16, -}; - -struct option; -typedef int parse_opt_cb(const struct option *, const char *arg, int unset); - -/* - * `type`:: - * holds the type of the option, you must have an OPTION_END last in your - * array. - * - * `short_name`:: - * the character to use as a short option name, '\0' if none. - * - * `long_name`:: - * the long option name, without the leading dashes, NULL if none. - * - * `value`:: - * stores pointers to the values to be filled. - * - * `argh`:: - * token to explain the kind of argument this option wants. Keep it - * homogenous across the repository. - * - * `help`:: - * the short help associated to what the option does. - * Must never be NULL (except for OPTION_END). - * OPTION_GROUP uses this pointer to store the group header. - * - * `flags`:: - * mask of parse_opt_option_flags. - * PARSE_OPT_OPTARG: says that the argument is optionnal (not for BOOLEANs) - * PARSE_OPT_NOARG: says that this option takes no argument, for CALLBACKs - * PARSE_OPT_NONEG: says that this option cannot be negated - * PARSE_OPT_HIDDEN this option is skipped in the default usage, showed in - * the long one. - * - * `callback`:: - * pointer to the callback to use for OPTION_CALLBACK. - * - * `defval`:: - * default value to fill (*->value) with for PARSE_OPT_OPTARG. - * OPTION_{BIT,SET_INT,SET_PTR} store the {mask,integer,pointer} to put in - * the value when met. - * CALLBACKS can use it like they want. - */ -struct option { - enum parse_opt_type type; - int short_name; - const char *long_name; - void *value; - const char *argh; - const char *help; - - int flags; - parse_opt_cb *callback; - intptr_t defval; -}; - -#define OPT_END() { OPTION_END } -#define OPT_ARGUMENT(l, h) { OPTION_ARGUMENT, 0, (l), NULL, NULL, (h) } -#define OPT_GROUP(h) { OPTION_GROUP, 0, NULL, NULL, NULL, (h) } -#define OPT_BIT(s, l, v, h, b) { OPTION_BIT, (s), (l), (v), NULL, (h), 0, NULL, (b) } -#define OPT_BOOLEAN(s, l, v, h) { OPTION_BOOLEAN, (s), (l), (v), NULL, (h) } -#define OPT_SET_INT(s, l, v, h, i) { OPTION_SET_INT, (s), (l), (v), NULL, (h), 0, NULL, (i) } -#define OPT_SET_PTR(s, l, v, h, p) { OPTION_SET_PTR, (s), (l), (v), NULL, (h), 0, NULL, (p) } -#define OPT_INTEGER(s, l, v, h) { OPTION_INTEGER, (s), (l), (v), NULL, (h) } -#define OPT_STRING(s, l, v, a, h) { OPTION_STRING, (s), (l), (v), (a), (h) } -#define OPT_DATE(s, l, v, h) \ - { OPTION_CALLBACK, (s), (l), (v), "time",(h), 0, \ - parse_opt_approxidate_cb } -#define OPT_CALLBACK(s, l, v, a, h, f) \ - { OPTION_CALLBACK, (s), (l), (v), (a), (h), 0, (f) } - -/* parse_options() will filter out the processed options and leave the - * non-option argments in argv[]. - * Returns the number of arguments left in argv[]. - */ -extern int parse_options(int argc, const char **argv, - const struct option *options, - const char * const usagestr[], int flags); - -extern NORETURN void usage_with_options(const char * const *usagestr, - const struct option *options); - -/*----- incremantal advanced APIs -----*/ - -enum { - PARSE_OPT_HELP = -1, - PARSE_OPT_DONE, - PARSE_OPT_UNKNOWN, -}; - -/* - * It's okay for the caller to consume argv/argc in the usual way. - * Other fields of that structure are private to parse-options and should not - * be modified in any way. - */ -struct parse_opt_ctx_t { - const char **argv; - const char **out; - int argc, cpidx; - const char *opt; - int flags; -}; - -extern int parse_options_usage(const char * const *usagestr, - const struct option *opts); - -extern void parse_options_start(struct parse_opt_ctx_t *ctx, - int argc, const char **argv, int flags); - -extern int parse_options_step(struct parse_opt_ctx_t *ctx, - const struct option *options, - const char * const usagestr[]); - -extern int parse_options_end(struct parse_opt_ctx_t *ctx); - - -/*----- some often used options -----*/ -extern int parse_opt_abbrev_cb(const struct option *, const char *, int); -extern int parse_opt_approxidate_cb(const struct option *, const char *, int); -extern int parse_opt_verbosity_cb(const struct option *, const char *, int); - -#define OPT__VERBOSE(var) OPT_BOOLEAN('v', "verbose", (var), "be verbose") -#define OPT__QUIET(var) OPT_BOOLEAN('q', "quiet", (var), "be quiet") -#define OPT__VERBOSITY(var) \ - { OPTION_CALLBACK, 'v', "verbose", (var), NULL, "be more verbose", \ - PARSE_OPT_NOARG, &parse_opt_verbosity_cb, 0 }, \ - { OPTION_CALLBACK, 'q', "quiet", (var), NULL, "be more quiet", \ - PARSE_OPT_NOARG, &parse_opt_verbosity_cb, 0 } -#define OPT__DRY_RUN(var) OPT_BOOLEAN('n', "dry-run", (var), "dry run") -#define OPT__ABBREV(var) \ - { OPTION_CALLBACK, 0, "abbrev", (var), "n", \ - "use digits to display SHA-1s", \ - PARSE_OPT_OPTARG, &parse_opt_abbrev_cb, 0 } - -extern const char *parse_options_fix_filename(const char *prefix, const char *file); - -#endif diff --git a/Documentation/perf_counter/path.c b/Documentation/perf_counter/path.c deleted file mode 100644 index a501a40dd2c..00000000000 --- a/Documentation/perf_counter/path.c +++ /dev/null @@ -1,353 +0,0 @@ -/* - * I'm tired of doing "vsnprintf()" etc just to open a - * file, so here's a "return static buffer with printf" - * interface for paths. - * - * It's obviously not thread-safe. Sue me. But it's quite - * useful for doing things like - * - * f = open(mkpath("%s/%s.perf", base, name), O_RDONLY); - * - * which is what it's designed for. - */ -#include "cache.h" - -static char bad_path[] = "/bad-path/"; -/* - * Two hacks: - */ - -static char *get_perf_dir(void) -{ - return "."; -} - -size_t strlcpy(char *dest, const char *src, size_t size) -{ - size_t ret = strlen(src); - - if (size) { - size_t len = (ret >= size) ? size - 1 : ret; - memcpy(dest, src, len); - dest[len] = '\0'; - } - return ret; -} - - -static char *get_pathname(void) -{ - static char pathname_array[4][PATH_MAX]; - static int index; - return pathname_array[3 & ++index]; -} - -static char *cleanup_path(char *path) -{ - /* Clean it up */ - if (!memcmp(path, "./", 2)) { - path += 2; - while (*path == '/') - path++; - } - return path; -} - -char *mksnpath(char *buf, size_t n, const char *fmt, ...) -{ - va_list args; - unsigned len; - - va_start(args, fmt); - len = vsnprintf(buf, n, fmt, args); - va_end(args); - if (len >= n) { - strlcpy(buf, bad_path, n); - return buf; - } - return cleanup_path(buf); -} - -static char *perf_vsnpath(char *buf, size_t n, const char *fmt, va_list args) -{ - const char *perf_dir = get_perf_dir(); - size_t len; - - len = strlen(perf_dir); - if (n < len + 1) - goto bad; - memcpy(buf, perf_dir, len); - if (len && !is_dir_sep(perf_dir[len-1])) - buf[len++] = '/'; - len += vsnprintf(buf + len, n - len, fmt, args); - if (len >= n) - goto bad; - return cleanup_path(buf); -bad: - strlcpy(buf, bad_path, n); - return buf; -} - -char *perf_snpath(char *buf, size_t n, const char *fmt, ...) -{ - va_list args; - va_start(args, fmt); - (void)perf_vsnpath(buf, n, fmt, args); - va_end(args); - return buf; -} - -char *perf_pathdup(const char *fmt, ...) -{ - char path[PATH_MAX]; - va_list args; - va_start(args, fmt); - (void)perf_vsnpath(path, sizeof(path), fmt, args); - va_end(args); - return xstrdup(path); -} - -char *mkpath(const char *fmt, ...) -{ - va_list args; - unsigned len; - char *pathname = get_pathname(); - - va_start(args, fmt); - len = vsnprintf(pathname, PATH_MAX, fmt, args); - va_end(args); - if (len >= PATH_MAX) - return bad_path; - return cleanup_path(pathname); -} - -char *perf_path(const char *fmt, ...) -{ - const char *perf_dir = get_perf_dir(); - char *pathname = get_pathname(); - va_list args; - unsigned len; - - len = strlen(perf_dir); - if (len > PATH_MAX-100) - return bad_path; - memcpy(pathname, perf_dir, len); - if (len && perf_dir[len-1] != '/') - pathname[len++] = '/'; - va_start(args, fmt); - len += vsnprintf(pathname + len, PATH_MAX - len, fmt, args); - va_end(args); - if (len >= PATH_MAX) - return bad_path; - return cleanup_path(pathname); -} - - -/* perf_mkstemp() - create tmp file honoring TMPDIR variable */ -int perf_mkstemp(char *path, size_t len, const char *template) -{ - const char *tmp; - size_t n; - - tmp = getenv("TMPDIR"); - if (!tmp) - tmp = "/tmp"; - n = snprintf(path, len, "%s/%s", tmp, template); - if (len <= n) { - errno = ENAMETOOLONG; - return -1; - } - return mkstemp(path); -} - - -const char *make_relative_path(const char *abs, const char *base) -{ - static char buf[PATH_MAX + 1]; - int baselen; - if (!base) - return abs; - baselen = strlen(base); - if (prefixcmp(abs, base)) - return abs; - if (abs[baselen] == '/') - baselen++; - else if (base[baselen - 1] != '/') - return abs; - strcpy(buf, abs + baselen); - return buf; -} - -/* - * It is okay if dst == src, but they should not overlap otherwise. - * - * Performs the following normalizations on src, storing the result in dst: - * - Ensures that components are separated by '/' (Windows only) - * - Squashes sequences of '/'. - * - Removes "." components. - * - Removes ".." components, and the components the precede them. - * Returns failure (non-zero) if a ".." component appears as first path - * component anytime during the normalization. Otherwise, returns success (0). - * - * Note that this function is purely textual. It does not follow symlinks, - * verify the existence of the path, or make any system calls. - */ -int normalize_path_copy(char *dst, const char *src) -{ - char *dst0; - - if (has_dos_drive_prefix(src)) { - *dst++ = *src++; - *dst++ = *src++; - } - dst0 = dst; - - if (is_dir_sep(*src)) { - *dst++ = '/'; - while (is_dir_sep(*src)) - src++; - } - - for (;;) { - char c = *src; - - /* - * A path component that begins with . could be - * special: - * (1) "." and ends -- ignore and terminate. - * (2) "./" -- ignore them, eat slash and continue. - * (3) ".." and ends -- strip one and terminate. - * (4) "../" -- strip one, eat slash and continue. - */ - if (c == '.') { - if (!src[1]) { - /* (1) */ - src++; - } else if (is_dir_sep(src[1])) { - /* (2) */ - src += 2; - while (is_dir_sep(*src)) - src++; - continue; - } else if (src[1] == '.') { - if (!src[2]) { - /* (3) */ - src += 2; - goto up_one; - } else if (is_dir_sep(src[2])) { - /* (4) */ - src += 3; - while (is_dir_sep(*src)) - src++; - goto up_one; - } - } - } - - /* copy up to the next '/', and eat all '/' */ - while ((c = *src++) != '\0' && !is_dir_sep(c)) - *dst++ = c; - if (is_dir_sep(c)) { - *dst++ = '/'; - while (is_dir_sep(c)) - c = *src++; - src--; - } else if (!c) - break; - continue; - - up_one: - /* - * dst0..dst is prefix portion, and dst[-1] is '/'; - * go up one level. - */ - dst--; /* go to trailing '/' */ - if (dst <= dst0) - return -1; - /* Windows: dst[-1] cannot be backslash anymore */ - while (dst0 < dst && dst[-1] != '/') - dst--; - } - *dst = '\0'; - return 0; -} - -/* - * path = Canonical absolute path - * prefix_list = Colon-separated list of absolute paths - * - * Determines, for each path in prefix_list, whether the "prefix" really - * is an ancestor directory of path. Returns the length of the longest - * ancestor directory, excluding any trailing slashes, or -1 if no prefix - * is an ancestor. (Note that this means 0 is returned if prefix_list is - * "/".) "/foo" is not considered an ancestor of "/foobar". Directories - * are not considered to be their own ancestors. path must be in a - * canonical form: empty components, or "." or ".." components are not - * allowed. prefix_list may be null, which is like "". - */ -int longest_ancestor_length(const char *path, const char *prefix_list) -{ - char buf[PATH_MAX+1]; - const char *ceil, *colon; - int len, max_len = -1; - - if (prefix_list == NULL || !strcmp(path, "/")) - return -1; - - for (colon = ceil = prefix_list; *colon; ceil = colon+1) { - for (colon = ceil; *colon && *colon != PATH_SEP; colon++); - len = colon - ceil; - if (len == 0 || len > PATH_MAX || !is_absolute_path(ceil)) - continue; - strlcpy(buf, ceil, len+1); - if (normalize_path_copy(buf, buf) < 0) - continue; - len = strlen(buf); - if (len > 0 && buf[len-1] == '/') - buf[--len] = '\0'; - - if (!strncmp(path, buf, len) && - path[len] == '/' && - len > max_len) { - max_len = len; - } - } - - return max_len; -} - -/* strip arbitrary amount of directory separators at end of path */ -static inline int chomp_trailing_dir_sep(const char *path, int len) -{ - while (len && is_dir_sep(path[len - 1])) - len--; - return len; -} - -/* - * If path ends with suffix (complete path components), returns the - * part before suffix (sans trailing directory separators). - * Otherwise returns NULL. - */ -char *strip_path_suffix(const char *path, const char *suffix) -{ - int path_len = strlen(path), suffix_len = strlen(suffix); - - while (suffix_len) { - if (!path_len) - return NULL; - - if (is_dir_sep(path[path_len - 1])) { - if (!is_dir_sep(suffix[suffix_len - 1])) - return NULL; - path_len = chomp_trailing_dir_sep(path, path_len); - suffix_len = chomp_trailing_dir_sep(suffix, suffix_len); - } - else if (path[--path_len] != suffix[--suffix_len]) - return NULL; - } - - if (path_len && !is_dir_sep(path[path_len - 1])) - return NULL; - return xstrndup(path, chomp_trailing_dir_sep(path, path_len)); -} diff --git a/Documentation/perf_counter/perf.c b/Documentation/perf_counter/perf.c index 8d6faecdc15..594d270be39 100644 --- a/Documentation/perf_counter/perf.c +++ b/Documentation/perf_counter/perf.c @@ -1,8 +1,8 @@ #include "builtin.h" -#include "exec_cmd.h" -#include "cache.h" -#include "quote.h" -#include "run-command.h" +#include "util/exec_cmd.h" +#include "util/cache.h" +#include "util/quote.h" +#include "util/run-command.h" const char perf_usage_string[] = "perf [--version] [--help] COMMAND [ARGS]"; diff --git a/Documentation/perf_counter/quote.c b/Documentation/perf_counter/quote.c deleted file mode 100644 index 7a49fcf6967..00000000000 --- a/Documentation/perf_counter/quote.c +++ /dev/null @@ -1,478 +0,0 @@ -#include "cache.h" -#include "quote.h" - -int quote_path_fully = 1; - -/* Help to copy the thing properly quoted for the shell safety. - * any single quote is replaced with '\'', any exclamation point - * is replaced with '\!', and the whole thing is enclosed in a - * - * E.g. - * original sq_quote result - * name ==> name ==> 'name' - * a b ==> a b ==> 'a b' - * a'b ==> a'\''b ==> 'a'\''b' - * a!b ==> a'\!'b ==> 'a'\!'b' - */ -static inline int need_bs_quote(char c) -{ - return (c == '\'' || c == '!'); -} - -void sq_quote_buf(struct strbuf *dst, const char *src) -{ - char *to_free = NULL; - - if (dst->buf == src) - to_free = strbuf_detach(dst, NULL); - - strbuf_addch(dst, '\''); - while (*src) { - size_t len = strcspn(src, "'!"); - strbuf_add(dst, src, len); - src += len; - while (need_bs_quote(*src)) { - strbuf_addstr(dst, "'\\"); - strbuf_addch(dst, *src++); - strbuf_addch(dst, '\''); - } - } - strbuf_addch(dst, '\''); - free(to_free); -} - -void sq_quote_print(FILE *stream, const char *src) -{ - char c; - - fputc('\'', stream); - while ((c = *src++)) { - if (need_bs_quote(c)) { - fputs("'\\", stream); - fputc(c, stream); - fputc('\'', stream); - } else { - fputc(c, stream); - } - } - fputc('\'', stream); -} - -void sq_quote_argv(struct strbuf *dst, const char** argv, size_t maxlen) -{ - int i; - - /* Copy into destination buffer. */ - strbuf_grow(dst, 255); - for (i = 0; argv[i]; ++i) { - strbuf_addch(dst, ' '); - sq_quote_buf(dst, argv[i]); - if (maxlen && dst->len > maxlen) - die("Too many or long arguments"); - } -} - -char *sq_dequote_step(char *arg, char **next) -{ - char *dst = arg; - char *src = arg; - char c; - - if (*src != '\'') - return NULL; - for (;;) { - c = *++src; - if (!c) - return NULL; - if (c != '\'') { - *dst++ = c; - continue; - } - /* We stepped out of sq */ - switch (*++src) { - case '\0': - *dst = 0; - if (next) - *next = NULL; - return arg; - case '\\': - c = *++src; - if (need_bs_quote(c) && *++src == '\'') { - *dst++ = c; - continue; - } - /* Fallthrough */ - default: - if (!next || !isspace(*src)) - return NULL; - do { - c = *++src; - } while (isspace(c)); - *dst = 0; - *next = src; - return arg; - } - } -} - -char *sq_dequote(char *arg) -{ - return sq_dequote_step(arg, NULL); -} - -int sq_dequote_to_argv(char *arg, const char ***argv, int *nr, int *alloc) -{ - char *next = arg; - - if (!*arg) - return 0; - do { - char *dequoted = sq_dequote_step(next, &next); - if (!dequoted) - return -1; - ALLOC_GROW(*argv, *nr + 1, *alloc); - (*argv)[(*nr)++] = dequoted; - } while (next); - - return 0; -} - -/* 1 means: quote as octal - * 0 means: quote as octal if (quote_path_fully) - * -1 means: never quote - * c: quote as "\\c" - */ -#define X8(x) x, x, x, x, x, x, x, x -#define X16(x) X8(x), X8(x) -static signed char const sq_lookup[256] = { - /* 0 1 2 3 4 5 6 7 */ - /* 0x00 */ 1, 1, 1, 1, 1, 1, 1, 'a', - /* 0x08 */ 'b', 't', 'n', 'v', 'f', 'r', 1, 1, - /* 0x10 */ X16(1), - /* 0x20 */ -1, -1, '"', -1, -1, -1, -1, -1, - /* 0x28 */ X16(-1), X16(-1), X16(-1), - /* 0x58 */ -1, -1, -1, -1,'\\', -1, -1, -1, - /* 0x60 */ X16(-1), X8(-1), - /* 0x78 */ -1, -1, -1, -1, -1, -1, -1, 1, - /* 0x80 */ /* set to 0 */ -}; - -static inline int sq_must_quote(char c) -{ - return sq_lookup[(unsigned char)c] + quote_path_fully > 0; -} - -/* returns the longest prefix not needing a quote up to maxlen if positive. - This stops at the first \0 because it's marked as a character needing an - escape */ -static size_t next_quote_pos(const char *s, ssize_t maxlen) -{ - size_t len; - if (maxlen < 0) { - for (len = 0; !sq_must_quote(s[len]); len++); - } else { - for (len = 0; len < maxlen && !sq_must_quote(s[len]); len++); - } - return len; -} - -/* - * C-style name quoting. - * - * (1) if sb and fp are both NULL, inspect the input name and counts the - * number of bytes that are needed to hold c_style quoted version of name, - * counting the double quotes around it but not terminating NUL, and - * returns it. - * However, if name does not need c_style quoting, it returns 0. - * - * (2) if sb or fp are not NULL, it emits the c_style quoted version - * of name, enclosed with double quotes if asked and needed only. - * Return value is the same as in (1). - */ -static size_t quote_c_style_counted(const char *name, ssize_t maxlen, - struct strbuf *sb, FILE *fp, int no_dq) -{ -#undef EMIT -#define EMIT(c) \ - do { \ - if (sb) strbuf_addch(sb, (c)); \ - if (fp) fputc((c), fp); \ - count++; \ - } while (0) -#define EMITBUF(s, l) \ - do { \ - if (sb) strbuf_add(sb, (s), (l)); \ - if (fp) fwrite((s), (l), 1, fp); \ - count += (l); \ - } while (0) - - size_t len, count = 0; - const char *p = name; - - for (;;) { - int ch; - - len = next_quote_pos(p, maxlen); - if (len == maxlen || !p[len]) - break; - - if (!no_dq && p == name) - EMIT('"'); - - EMITBUF(p, len); - EMIT('\\'); - p += len; - ch = (unsigned char)*p++; - if (sq_lookup[ch] >= ' ') { - EMIT(sq_lookup[ch]); - } else { - EMIT(((ch >> 6) & 03) + '0'); - EMIT(((ch >> 3) & 07) + '0'); - EMIT(((ch >> 0) & 07) + '0'); - } - } - - EMITBUF(p, len); - if (p == name) /* no ending quote needed */ - return 0; - - if (!no_dq) - EMIT('"'); - return count; -} - -size_t quote_c_style(const char *name, struct strbuf *sb, FILE *fp, int nodq) -{ - return quote_c_style_counted(name, -1, sb, fp, nodq); -} - -void quote_two_c_style(struct strbuf *sb, const char *prefix, const char *path, int nodq) -{ - if (quote_c_style(prefix, NULL, NULL, 0) || - quote_c_style(path, NULL, NULL, 0)) { - if (!nodq) - strbuf_addch(sb, '"'); - quote_c_style(prefix, sb, NULL, 1); - quote_c_style(path, sb, NULL, 1); - if (!nodq) - strbuf_addch(sb, '"'); - } else { - strbuf_addstr(sb, prefix); - strbuf_addstr(sb, path); - } -} - -void write_name_quoted(const char *name, FILE *fp, int terminator) -{ - if (terminator) { - quote_c_style(name, NULL, fp, 0); - } else { - fputs(name, fp); - } - fputc(terminator, fp); -} - -extern void write_name_quotedpfx(const char *pfx, size_t pfxlen, - const char *name, FILE *fp, int terminator) -{ - int needquote = 0; - - if (terminator) { - needquote = next_quote_pos(pfx, pfxlen) < pfxlen - || name[next_quote_pos(name, -1)]; - } - if (needquote) { - fputc('"', fp); - quote_c_style_counted(pfx, pfxlen, NULL, fp, 1); - quote_c_style(name, NULL, fp, 1); - fputc('"', fp); - } else { - fwrite(pfx, pfxlen, 1, fp); - fputs(name, fp); - } - fputc(terminator, fp); -} - -/* quote path as relative to the given prefix */ -char *quote_path_relative(const char *in, int len, - struct strbuf *out, const char *prefix) -{ - int needquote; - - if (len < 0) - len = strlen(in); - - /* "../" prefix itself does not need quoting, but "in" might. */ - needquote = next_quote_pos(in, len) < len; - strbuf_setlen(out, 0); - strbuf_grow(out, len); - - if (needquote) - strbuf_addch(out, '"'); - if (prefix) { - int off = 0; - while (prefix[off] && off < len && prefix[off] == in[off]) - if (prefix[off] == '/') { - prefix += off + 1; - in += off + 1; - len -= off + 1; - off = 0; - } else - off++; - - for (; *prefix; prefix++) - if (*prefix == '/') - strbuf_addstr(out, "../"); - } - - quote_c_style_counted (in, len, out, NULL, 1); - - if (needquote) - strbuf_addch(out, '"'); - if (!out->len) - strbuf_addstr(out, "./"); - - return out->buf; -} - -/* - * C-style name unquoting. - * - * Quoted should point at the opening double quote. - * + Returns 0 if it was able to unquote the string properly, and appends the - * result in the strbuf `sb'. - * + Returns -1 in case of error, and doesn't touch the strbuf. Though note - * that this function will allocate memory in the strbuf, so calling - * strbuf_release is mandatory whichever result unquote_c_style returns. - * - * Updates endp pointer to point at one past the ending double quote if given. - */ -int unquote_c_style(struct strbuf *sb, const char *quoted, const char **endp) -{ - size_t oldlen = sb->len, len; - int ch, ac; - - if (*quoted++ != '"') - return -1; - - for (;;) { - len = strcspn(quoted, "\"\\"); - strbuf_add(sb, quoted, len); - quoted += len; - - switch (*quoted++) { - case '"': - if (endp) - *endp = quoted; - return 0; - case '\\': - break; - default: - goto error; - } - - switch ((ch = *quoted++)) { - case 'a': ch = '\a'; break; - case 'b': ch = '\b'; break; - case 'f': ch = '\f'; break; - case 'n': ch = '\n'; break; - case 'r': ch = '\r'; break; - case 't': ch = '\t'; break; - case 'v': ch = '\v'; break; - - case '\\': case '"': - break; /* verbatim */ - - /* octal values with first digit over 4 overflow */ - case '0': case '1': case '2': case '3': - ac = ((ch - '0') << 6); - if ((ch = *quoted++) < '0' || '7' < ch) - goto error; - ac |= ((ch - '0') << 3); - if ((ch = *quoted++) < '0' || '7' < ch) - goto error; - ac |= (ch - '0'); - ch = ac; - break; - default: - goto error; - } - strbuf_addch(sb, ch); - } - - error: - strbuf_setlen(sb, oldlen); - return -1; -} - -/* quoting as a string literal for other languages */ - -void perl_quote_print(FILE *stream, const char *src) -{ - const char sq = '\''; - const char bq = '\\'; - char c; - - fputc(sq, stream); - while ((c = *src++)) { - if (c == sq || c == bq) - fputc(bq, stream); - fputc(c, stream); - } - fputc(sq, stream); -} - -void python_quote_print(FILE *stream, const char *src) -{ - const char sq = '\''; - const char bq = '\\'; - const char nl = '\n'; - char c; - - fputc(sq, stream); - while ((c = *src++)) { - if (c == nl) { - fputc(bq, stream); - fputc('n', stream); - continue; - } - if (c == sq || c == bq) - fputc(bq, stream); - fputc(c, stream); - } - fputc(sq, stream); -} - -void tcl_quote_print(FILE *stream, const char *src) -{ - char c; - - fputc('"', stream); - while ((c = *src++)) { - switch (c) { - case '[': case ']': - case '{': case '}': - case '$': case '\\': case '"': - fputc('\\', stream); - default: - fputc(c, stream); - break; - case '\f': - fputs("\\f", stream); - break; - case '\r': - fputs("\\r", stream); - break; - case '\n': - fputs("\\n", stream); - break; - case '\t': - fputs("\\t", stream); - break; - case '\v': - fputs("\\v", stream); - break; - } - } - fputc('"', stream); -} diff --git a/Documentation/perf_counter/quote.h b/Documentation/perf_counter/quote.h deleted file mode 100644 index 66730f2bff3..00000000000 --- a/Documentation/perf_counter/quote.h +++ /dev/null @@ -1,68 +0,0 @@ -#ifndef QUOTE_H -#define QUOTE_H - -#include -#include - -/* Help to copy the thing properly quoted for the shell safety. - * any single quote is replaced with '\'', any exclamation point - * is replaced with '\!', and the whole thing is enclosed in a - * single quote pair. - * - * For example, if you are passing the result to system() as an - * argument: - * - * sprintf(cmd, "foobar %s %s", sq_quote(arg0), sq_quote(arg1)) - * - * would be appropriate. If the system() is going to call ssh to - * run the command on the other side: - * - * sprintf(cmd, "git-diff-tree %s %s", sq_quote(arg0), sq_quote(arg1)); - * sprintf(rcmd, "ssh %s %s", sq_quote(host), sq_quote(cmd)); - * - * Note that the above examples leak memory! Remember to free result from - * sq_quote() in a real application. - * - * sq_quote_buf() writes to an existing buffer of specified size; it - * will return the number of characters that would have been written - * excluding the final null regardless of the buffer size. - */ - -extern void sq_quote_print(FILE *stream, const char *src); - -extern void sq_quote_buf(struct strbuf *, const char *src); -extern void sq_quote_argv(struct strbuf *, const char **argv, size_t maxlen); - -/* This unwraps what sq_quote() produces in place, but returns - * NULL if the input does not look like what sq_quote would have - * produced. - */ -extern char *sq_dequote(char *); - -/* - * Same as the above, but can be used to unwrap many arguments in the - * same string separated by space. "next" is changed to point to the - * next argument that should be passed as first parameter. When there - * is no more argument to be dequoted, "next" is updated to point to NULL. - */ -extern char *sq_dequote_step(char *arg, char **next); -extern int sq_dequote_to_argv(char *arg, const char ***argv, int *nr, int *alloc); - -extern int unquote_c_style(struct strbuf *, const char *quoted, const char **endp); -extern size_t quote_c_style(const char *name, struct strbuf *, FILE *, int no_dq); -extern void quote_two_c_style(struct strbuf *, const char *, const char *, int); - -extern void write_name_quoted(const char *name, FILE *, int terminator); -extern void write_name_quotedpfx(const char *pfx, size_t pfxlen, - const char *name, FILE *, int terminator); - -/* quote path as relative to the given prefix */ -char *quote_path_relative(const char *in, int len, - struct strbuf *out, const char *prefix); - -/* quoting as a string literal for other languages */ -extern void perl_quote_print(FILE *stream, const char *src); -extern void python_quote_print(FILE *stream, const char *src); -extern void tcl_quote_print(FILE *stream, const char *src); - -#endif diff --git a/Documentation/perf_counter/run-command.c b/Documentation/perf_counter/run-command.c deleted file mode 100644 index b2f5e854f40..00000000000 --- a/Documentation/perf_counter/run-command.c +++ /dev/null @@ -1,395 +0,0 @@ -#include "cache.h" -#include "run-command.h" -#include "exec_cmd.h" - -static inline void close_pair(int fd[2]) -{ - close(fd[0]); - close(fd[1]); -} - -static inline void dup_devnull(int to) -{ - int fd = open("/dev/null", O_RDWR); - dup2(fd, to); - close(fd); -} - -int start_command(struct child_process *cmd) -{ - int need_in, need_out, need_err; - int fdin[2], fdout[2], fderr[2]; - - /* - * In case of errors we must keep the promise to close FDs - * that have been passed in via ->in and ->out. - */ - - need_in = !cmd->no_stdin && cmd->in < 0; - if (need_in) { - if (pipe(fdin) < 0) { - if (cmd->out > 0) - close(cmd->out); - return -ERR_RUN_COMMAND_PIPE; - } - cmd->in = fdin[1]; - } - - need_out = !cmd->no_stdout - && !cmd->stdout_to_stderr - && cmd->out < 0; - if (need_out) { - if (pipe(fdout) < 0) { - if (need_in) - close_pair(fdin); - else if (cmd->in) - close(cmd->in); - return -ERR_RUN_COMMAND_PIPE; - } - cmd->out = fdout[0]; - } - - need_err = !cmd->no_stderr && cmd->err < 0; - if (need_err) { - if (pipe(fderr) < 0) { - if (need_in) - close_pair(fdin); - else if (cmd->in) - close(cmd->in); - if (need_out) - close_pair(fdout); - else if (cmd->out) - close(cmd->out); - return -ERR_RUN_COMMAND_PIPE; - } - cmd->err = fderr[0]; - } - -#ifndef __MINGW32__ - fflush(NULL); - cmd->pid = fork(); - if (!cmd->pid) { - if (cmd->no_stdin) - dup_devnull(0); - else if (need_in) { - dup2(fdin[0], 0); - close_pair(fdin); - } else if (cmd->in) { - dup2(cmd->in, 0); - close(cmd->in); - } - - if (cmd->no_stderr) - dup_devnull(2); - else if (need_err) { - dup2(fderr[1], 2); - close_pair(fderr); - } - - if (cmd->no_stdout) - dup_devnull(1); - else if (cmd->stdout_to_stderr) - dup2(2, 1); - else if (need_out) { - dup2(fdout[1], 1); - close_pair(fdout); - } else if (cmd->out > 1) { - dup2(cmd->out, 1); - close(cmd->out); - } - - if (cmd->dir && chdir(cmd->dir)) - die("exec %s: cd to %s failed (%s)", cmd->argv[0], - cmd->dir, strerror(errno)); - if (cmd->env) { - for (; *cmd->env; cmd->env++) { - if (strchr(*cmd->env, '=')) - putenv((char*)*cmd->env); - else - unsetenv(*cmd->env); - } - } - if (cmd->preexec_cb) - cmd->preexec_cb(); - if (cmd->perf_cmd) { - execv_perf_cmd(cmd->argv); - } else { - execvp(cmd->argv[0], (char *const*) cmd->argv); - } - exit(127); - } -#else - int s0 = -1, s1 = -1, s2 = -1; /* backups of stdin, stdout, stderr */ - const char **sargv = cmd->argv; - char **env = environ; - - if (cmd->no_stdin) { - s0 = dup(0); - dup_devnull(0); - } else if (need_in) { - s0 = dup(0); - dup2(fdin[0], 0); - } else if (cmd->in) { - s0 = dup(0); - dup2(cmd->in, 0); - } - - if (cmd->no_stderr) { - s2 = dup(2); - dup_devnull(2); - } else if (need_err) { - s2 = dup(2); - dup2(fderr[1], 2); - } - - if (cmd->no_stdout) { - s1 = dup(1); - dup_devnull(1); - } else if (cmd->stdout_to_stderr) { - s1 = dup(1); - dup2(2, 1); - } else if (need_out) { - s1 = dup(1); - dup2(fdout[1], 1); - } else if (cmd->out > 1) { - s1 = dup(1); - dup2(cmd->out, 1); - } - - if (cmd->dir) - die("chdir in start_command() not implemented"); - if (cmd->env) { - env = copy_environ(); - for (; *cmd->env; cmd->env++) - env = env_setenv(env, *cmd->env); - } - - if (cmd->perf_cmd) { - cmd->argv = prepare_perf_cmd(cmd->argv); - } - - cmd->pid = mingw_spawnvpe(cmd->argv[0], cmd->argv, env); - - if (cmd->env) - free_environ(env); - if (cmd->perf_cmd) - free(cmd->argv); - - cmd->argv = sargv; - if (s0 >= 0) - dup2(s0, 0), close(s0); - if (s1 >= 0) - dup2(s1, 1), close(s1); - if (s2 >= 0) - dup2(s2, 2), close(s2); -#endif - - if (cmd->pid < 0) { - int err = errno; - if (need_in) - close_pair(fdin); - else if (cmd->in) - close(cmd->in); - if (need_out) - close_pair(fdout); - else if (cmd->out) - close(cmd->out); - if (need_err) - close_pair(fderr); - return err == ENOENT ? - -ERR_RUN_COMMAND_EXEC : - -ERR_RUN_COMMAND_FORK; - } - - if (need_in) - close(fdin[0]); - else if (cmd->in) - close(cmd->in); - - if (need_out) - close(fdout[1]); - else if (cmd->out) - close(cmd->out); - - if (need_err) - close(fderr[1]); - - return 0; -} - -static int wait_or_whine(pid_t pid) -{ - for (;;) { - int status, code; - pid_t waiting = waitpid(pid, &status, 0); - - if (waiting < 0) { - if (errno == EINTR) - continue; - error("waitpid failed (%s)", strerror(errno)); - return -ERR_RUN_COMMAND_WAITPID; - } - if (waiting != pid) - return -ERR_RUN_COMMAND_WAITPID_WRONG_PID; - if (WIFSIGNALED(status)) - return -ERR_RUN_COMMAND_WAITPID_SIGNAL; - - if (!WIFEXITED(status)) - return -ERR_RUN_COMMAND_WAITPID_NOEXIT; - code = WEXITSTATUS(status); - switch (code) { - case 127: - return -ERR_RUN_COMMAND_EXEC; - case 0: - return 0; - default: - return -code; - } - } -} - -int finish_command(struct child_process *cmd) -{ - return wait_or_whine(cmd->pid); -} - -int run_command(struct child_process *cmd) -{ - int code = start_command(cmd); - if (code) - return code; - return finish_command(cmd); -} - -static void prepare_run_command_v_opt(struct child_process *cmd, - const char **argv, - int opt) -{ - memset(cmd, 0, sizeof(*cmd)); - cmd->argv = argv; - cmd->no_stdin = opt & RUN_COMMAND_NO_STDIN ? 1 : 0; - cmd->perf_cmd = opt & RUN_PERF_CMD ? 1 : 0; - cmd->stdout_to_stderr = opt & RUN_COMMAND_STDOUT_TO_STDERR ? 1 : 0; -} - -int run_command_v_opt(const char **argv, int opt) -{ - struct child_process cmd; - prepare_run_command_v_opt(&cmd, argv, opt); - return run_command(&cmd); -} - -int run_command_v_opt_cd_env(const char **argv, int opt, const char *dir, const char *const *env) -{ - struct child_process cmd; - prepare_run_command_v_opt(&cmd, argv, opt); - cmd.dir = dir; - cmd.env = env; - return run_command(&cmd); -} - -#ifdef __MINGW32__ -static __stdcall unsigned run_thread(void *data) -{ - struct async *async = data; - return async->proc(async->fd_for_proc, async->data); -} -#endif - -int start_async(struct async *async) -{ - int pipe_out[2]; - - if (pipe(pipe_out) < 0) - return error("cannot create pipe: %s", strerror(errno)); - async->out = pipe_out[0]; - -#ifndef __MINGW32__ - /* Flush stdio before fork() to avoid cloning buffers */ - fflush(NULL); - - async->pid = fork(); - if (async->pid < 0) { - error("fork (async) failed: %s", strerror(errno)); - close_pair(pipe_out); - return -1; - } - if (!async->pid) { - close(pipe_out[0]); - exit(!!async->proc(pipe_out[1], async->data)); - } - close(pipe_out[1]); -#else - async->fd_for_proc = pipe_out[1]; - async->tid = (HANDLE) _beginthreadex(NULL, 0, run_thread, async, 0, NULL); - if (!async->tid) { - error("cannot create thread: %s", strerror(errno)); - close_pair(pipe_out); - return -1; - } -#endif - return 0; -} - -int finish_async(struct async *async) -{ -#ifndef __MINGW32__ - int ret = 0; - - if (wait_or_whine(async->pid)) - ret = error("waitpid (async) failed"); -#else - DWORD ret = 0; - if (WaitForSingleObject(async->tid, INFINITE) != WAIT_OBJECT_0) - ret = error("waiting for thread failed: %lu", GetLastError()); - else if (!GetExitCodeThread(async->tid, &ret)) - ret = error("cannot get thread exit code: %lu", GetLastError()); - CloseHandle(async->tid); -#endif - return ret; -} - -int run_hook(const char *index_file, const char *name, ...) -{ - struct child_process hook; - const char **argv = NULL, *env[2]; - char index[PATH_MAX]; - va_list args; - int ret; - size_t i = 0, alloc = 0; - - if (access(perf_path("hooks/%s", name), X_OK) < 0) - return 0; - - va_start(args, name); - ALLOC_GROW(argv, i + 1, alloc); - argv[i++] = perf_path("hooks/%s", name); - while (argv[i-1]) { - ALLOC_GROW(argv, i + 1, alloc); - argv[i++] = va_arg(args, const char *); - } - va_end(args); - - memset(&hook, 0, sizeof(hook)); - hook.argv = argv; - hook.no_stdin = 1; - hook.stdout_to_stderr = 1; - if (index_file) { - snprintf(index, sizeof(index), "PERF_INDEX_FILE=%s", index_file); - env[0] = index; - env[1] = NULL; - hook.env = env; - } - - ret = start_command(&hook); - free(argv); - if (ret) { - warning("Could not spawn %s", argv[0]); - return ret; - } - ret = finish_command(&hook); - if (ret == -ERR_RUN_COMMAND_WAITPID_SIGNAL) - warning("%s exited due to uncaught signal", argv[0]); - - return ret; -} diff --git a/Documentation/perf_counter/run-command.h b/Documentation/perf_counter/run-command.h deleted file mode 100644 index 328289f2366..00000000000 --- a/Documentation/perf_counter/run-command.h +++ /dev/null @@ -1,93 +0,0 @@ -#ifndef RUN_COMMAND_H -#define RUN_COMMAND_H - -enum { - ERR_RUN_COMMAND_FORK = 10000, - ERR_RUN_COMMAND_EXEC, - ERR_RUN_COMMAND_PIPE, - ERR_RUN_COMMAND_WAITPID, - ERR_RUN_COMMAND_WAITPID_WRONG_PID, - ERR_RUN_COMMAND_WAITPID_SIGNAL, - ERR_RUN_COMMAND_WAITPID_NOEXIT, -}; -#define IS_RUN_COMMAND_ERR(x) (-(x) >= ERR_RUN_COMMAND_FORK) - -struct child_process { - const char **argv; - pid_t pid; - /* - * Using .in, .out, .err: - * - Specify 0 for no redirections (child inherits stdin, stdout, - * stderr from parent). - * - Specify -1 to have a pipe allocated as follows: - * .in: returns the writable pipe end; parent writes to it, - * the readable pipe end becomes child's stdin - * .out, .err: returns the readable pipe end; parent reads from - * it, the writable pipe end becomes child's stdout/stderr - * The caller of start_command() must close the returned FDs - * after it has completed reading from/writing to it! - * - Specify > 0 to set a channel to a particular FD as follows: - * .in: a readable FD, becomes child's stdin - * .out: a writable FD, becomes child's stdout/stderr - * .err > 0 not supported - * The specified FD is closed by start_command(), even in case - * of errors! - */ - int in; - int out; - int err; - const char *dir; - const char *const *env; - unsigned no_stdin:1; - unsigned no_stdout:1; - unsigned no_stderr:1; - unsigned perf_cmd:1; /* if this is to be perf sub-command */ - unsigned stdout_to_stderr:1; - void (*preexec_cb)(void); -}; - -int start_command(struct child_process *); -int finish_command(struct child_process *); -int run_command(struct child_process *); - -extern int run_hook(const char *index_file, const char *name, ...); - -#define RUN_COMMAND_NO_STDIN 1 -#define RUN_PERF_CMD 2 /*If this is to be perf sub-command */ -#define RUN_COMMAND_STDOUT_TO_STDERR 4 -int run_command_v_opt(const char **argv, int opt); - -/* - * env (the environment) is to be formatted like environ: "VAR=VALUE". - * To unset an environment variable use just "VAR". - */ -int run_command_v_opt_cd_env(const char **argv, int opt, const char *dir, const char *const *env); - -/* - * The purpose of the following functions is to feed a pipe by running - * a function asynchronously and providing output that the caller reads. - * - * It is expected that no synchronization and mutual exclusion between - * the caller and the feed function is necessary so that the function - * can run in a thread without interfering with the caller. - */ -struct async { - /* - * proc writes to fd and closes it; - * returns 0 on success, non-zero on failure - */ - int (*proc)(int fd, void *data); - void *data; - int out; /* caller reads from here and closes it */ -#ifndef __MINGW32__ - pid_t pid; -#else - HANDLE tid; - int fd_for_proc; -#endif -}; - -int start_async(struct async *async); -int finish_async(struct async *async); - -#endif diff --git a/Documentation/perf_counter/strbuf.c b/Documentation/perf_counter/strbuf.c deleted file mode 100644 index eaba0930680..00000000000 --- a/Documentation/perf_counter/strbuf.c +++ /dev/null @@ -1,359 +0,0 @@ -#include "cache.h" - -int prefixcmp(const char *str, const char *prefix) -{ - for (; ; str++, prefix++) - if (!*prefix) - return 0; - else if (*str != *prefix) - return (unsigned char)*prefix - (unsigned char)*str; -} - -/* - * Used as the default ->buf value, so that people can always assume - * buf is non NULL and ->buf is NUL terminated even for a freshly - * initialized strbuf. - */ -char strbuf_slopbuf[1]; - -void strbuf_init(struct strbuf *sb, size_t hint) -{ - sb->alloc = sb->len = 0; - sb->buf = strbuf_slopbuf; - if (hint) - strbuf_grow(sb, hint); -} - -void strbuf_release(struct strbuf *sb) -{ - if (sb->alloc) { - free(sb->buf); - strbuf_init(sb, 0); - } -} - -char *strbuf_detach(struct strbuf *sb, size_t *sz) -{ - char *res = sb->alloc ? sb->buf : NULL; - if (sz) - *sz = sb->len; - strbuf_init(sb, 0); - return res; -} - -void strbuf_attach(struct strbuf *sb, void *buf, size_t len, size_t alloc) -{ - strbuf_release(sb); - sb->buf = buf; - sb->len = len; - sb->alloc = alloc; - strbuf_grow(sb, 0); - sb->buf[sb->len] = '\0'; -} - -void strbuf_grow(struct strbuf *sb, size_t extra) -{ - if (sb->len + extra + 1 <= sb->len) - die("you want to use way too much memory"); - if (!sb->alloc) - sb->buf = NULL; - ALLOC_GROW(sb->buf, sb->len + extra + 1, sb->alloc); -} - -void strbuf_trim(struct strbuf *sb) -{ - char *b = sb->buf; - while (sb->len > 0 && isspace((unsigned char)sb->buf[sb->len - 1])) - sb->len--; - while (sb->len > 0 && isspace(*b)) { - b++; - sb->len--; - } - memmove(sb->buf, b, sb->len); - sb->buf[sb->len] = '\0'; -} -void strbuf_rtrim(struct strbuf *sb) -{ - while (sb->len > 0 && isspace((unsigned char)sb->buf[sb->len - 1])) - sb->len--; - sb->buf[sb->len] = '\0'; -} - -void strbuf_ltrim(struct strbuf *sb) -{ - char *b = sb->buf; - while (sb->len > 0 && isspace(*b)) { - b++; - sb->len--; - } - memmove(sb->buf, b, sb->len); - sb->buf[sb->len] = '\0'; -} - -void strbuf_tolower(struct strbuf *sb) -{ - int i; - for (i = 0; i < sb->len; i++) - sb->buf[i] = tolower(sb->buf[i]); -} - -struct strbuf **strbuf_split(const struct strbuf *sb, int delim) -{ - int alloc = 2, pos = 0; - char *n, *p; - struct strbuf **ret; - struct strbuf *t; - - ret = calloc(alloc, sizeof(struct strbuf *)); - p = n = sb->buf; - while (n < sb->buf + sb->len) { - int len; - n = memchr(n, delim, sb->len - (n - sb->buf)); - if (pos + 1 >= alloc) { - alloc = alloc * 2; - ret = realloc(ret, sizeof(struct strbuf *) * alloc); - } - if (!n) - n = sb->buf + sb->len - 1; - len = n - p + 1; - t = malloc(sizeof(struct strbuf)); - strbuf_init(t, len); - strbuf_add(t, p, len); - ret[pos] = t; - ret[++pos] = NULL; - p = ++n; - } - return ret; -} - -void strbuf_list_free(struct strbuf **sbs) -{ - struct strbuf **s = sbs; - - while (*s) { - strbuf_release(*s); - free(*s++); - } - free(sbs); -} - -int strbuf_cmp(const struct strbuf *a, const struct strbuf *b) -{ - int len = a->len < b->len ? a->len: b->len; - int cmp = memcmp(a->buf, b->buf, len); - if (cmp) - return cmp; - return a->len < b->len ? -1: a->len != b->len; -} - -void strbuf_splice(struct strbuf *sb, size_t pos, size_t len, - const void *data, size_t dlen) -{ - if (pos + len < pos) - die("you want to use way too much memory"); - if (pos > sb->len) - die("`pos' is too far after the end of the buffer"); - if (pos + len > sb->len) - die("`pos + len' is too far after the end of the buffer"); - - if (dlen >= len) - strbuf_grow(sb, dlen - len); - memmove(sb->buf + pos + dlen, - sb->buf + pos + len, - sb->len - pos - len); - memcpy(sb->buf + pos, data, dlen); - strbuf_setlen(sb, sb->len + dlen - len); -} - -void strbuf_insert(struct strbuf *sb, size_t pos, const void *data, size_t len) -{ - strbuf_splice(sb, pos, 0, data, len); -} - -void strbuf_remove(struct strbuf *sb, size_t pos, size_t len) -{ - strbuf_splice(sb, pos, len, NULL, 0); -} - -void strbuf_add(struct strbuf *sb, const void *data, size_t len) -{ - strbuf_grow(sb, len); - memcpy(sb->buf + sb->len, data, len); - strbuf_setlen(sb, sb->len + len); -} - -void strbuf_adddup(struct strbuf *sb, size_t pos, size_t len) -{ - strbuf_grow(sb, len); - memcpy(sb->buf + sb->len, sb->buf + pos, len); - strbuf_setlen(sb, sb->len + len); -} - -void strbuf_addf(struct strbuf *sb, const char *fmt, ...) -{ - int len; - va_list ap; - - if (!strbuf_avail(sb)) - strbuf_grow(sb, 64); - va_start(ap, fmt); - len = vsnprintf(sb->buf + sb->len, sb->alloc - sb->len, fmt, ap); - va_end(ap); - if (len < 0) - die("your vsnprintf is broken"); - if (len > strbuf_avail(sb)) { - strbuf_grow(sb, len); - va_start(ap, fmt); - len = vsnprintf(sb->buf + sb->len, sb->alloc - sb->len, fmt, ap); - va_end(ap); - if (len > strbuf_avail(sb)) { - die("this should not happen, your snprintf is broken"); - } - } - strbuf_setlen(sb, sb->len + len); -} - -void strbuf_expand(struct strbuf *sb, const char *format, expand_fn_t fn, - void *context) -{ - for (;;) { - const char *percent; - size_t consumed; - - percent = strchrnul(format, '%'); - strbuf_add(sb, format, percent - format); - if (!*percent) - break; - format = percent + 1; - - consumed = fn(sb, format, context); - if (consumed) - format += consumed; - else - strbuf_addch(sb, '%'); - } -} - -size_t strbuf_expand_dict_cb(struct strbuf *sb, const char *placeholder, - void *context) -{ - struct strbuf_expand_dict_entry *e = context; - size_t len; - - for (; e->placeholder && (len = strlen(e->placeholder)); e++) { - if (!strncmp(placeholder, e->placeholder, len)) { - if (e->value) - strbuf_addstr(sb, e->value); - return len; - } - } - return 0; -} - -size_t strbuf_fread(struct strbuf *sb, size_t size, FILE *f) -{ - size_t res; - size_t oldalloc = sb->alloc; - - strbuf_grow(sb, size); - res = fread(sb->buf + sb->len, 1, size, f); - if (res > 0) - strbuf_setlen(sb, sb->len + res); - else if (res < 0 && oldalloc == 0) - strbuf_release(sb); - return res; -} - -ssize_t strbuf_read(struct strbuf *sb, int fd, size_t hint) -{ - size_t oldlen = sb->len; - size_t oldalloc = sb->alloc; - - strbuf_grow(sb, hint ? hint : 8192); - for (;;) { - ssize_t cnt; - - cnt = read(fd, sb->buf + sb->len, sb->alloc - sb->len - 1); - if (cnt < 0) { - if (oldalloc == 0) - strbuf_release(sb); - else - strbuf_setlen(sb, oldlen); - return -1; - } - if (!cnt) - break; - sb->len += cnt; - strbuf_grow(sb, 8192); - } - - sb->buf[sb->len] = '\0'; - return sb->len - oldlen; -} - -#define STRBUF_MAXLINK (2*PATH_MAX) - -int strbuf_readlink(struct strbuf *sb, const char *path, size_t hint) -{ - size_t oldalloc = sb->alloc; - - if (hint < 32) - hint = 32; - - while (hint < STRBUF_MAXLINK) { - int len; - - strbuf_grow(sb, hint); - len = readlink(path, sb->buf, hint); - if (len < 0) { - if (errno != ERANGE) - break; - } else if (len < hint) { - strbuf_setlen(sb, len); - return 0; - } - - /* .. the buffer was too small - try again */ - hint *= 2; - } - if (oldalloc == 0) - strbuf_release(sb); - return -1; -} - -int strbuf_getline(struct strbuf *sb, FILE *fp, int term) -{ - int ch; - - strbuf_grow(sb, 0); - if (feof(fp)) - return EOF; - - strbuf_reset(sb); - while ((ch = fgetc(fp)) != EOF) { - if (ch == term) - break; - strbuf_grow(sb, 1); - sb->buf[sb->len++] = ch; - } - if (ch == EOF && sb->len == 0) - return EOF; - - sb->buf[sb->len] = '\0'; - return 0; -} - -int strbuf_read_file(struct strbuf *sb, const char *path, size_t hint) -{ - int fd, len; - - fd = open(path, O_RDONLY); - if (fd < 0) - return -1; - len = strbuf_read(sb, fd, hint); - close(fd); - if (len < 0) - return -1; - - return len; -} diff --git a/Documentation/perf_counter/strbuf.h b/Documentation/perf_counter/strbuf.h deleted file mode 100644 index 9ee908a3ec5..00000000000 --- a/Documentation/perf_counter/strbuf.h +++ /dev/null @@ -1,137 +0,0 @@ -#ifndef STRBUF_H -#define STRBUF_H - -/* - * Strbuf's can be use in many ways: as a byte array, or to store arbitrary - * long, overflow safe strings. - * - * Strbufs has some invariants that are very important to keep in mind: - * - * 1. the ->buf member is always malloc-ed, hence strbuf's can be used to - * build complex strings/buffers whose final size isn't easily known. - * - * It is NOT legal to copy the ->buf pointer away. - * `strbuf_detach' is the operation that detachs a buffer from its shell - * while keeping the shell valid wrt its invariants. - * - * 2. the ->buf member is a byte array that has at least ->len + 1 bytes - * allocated. The extra byte is used to store a '\0', allowing the ->buf - * member to be a valid C-string. Every strbuf function ensure this - * invariant is preserved. - * - * Note that it is OK to "play" with the buffer directly if you work it - * that way: - * - * strbuf_grow(sb, SOME_SIZE); - * ... Here, the memory array starting at sb->buf, and of length - * ... strbuf_avail(sb) is all yours, and you are sure that - * ... strbuf_avail(sb) is at least SOME_SIZE. - * strbuf_setlen(sb, sb->len + SOME_OTHER_SIZE); - * - * Of course, SOME_OTHER_SIZE must be smaller or equal to strbuf_avail(sb). - * - * Doing so is safe, though if it has to be done in many places, adding the - * missing API to the strbuf module is the way to go. - * - * XXX: do _not_ assume that the area that is yours is of size ->alloc - 1 - * even if it's true in the current implementation. Alloc is somehow a - * "private" member that should not be messed with. - */ - -#include - -extern char strbuf_slopbuf[]; -struct strbuf { - size_t alloc; - size_t len; - char *buf; -}; - -#define STRBUF_INIT { 0, 0, strbuf_slopbuf } - -/*----- strbuf life cycle -----*/ -extern void strbuf_init(struct strbuf *, size_t); -extern void strbuf_release(struct strbuf *); -extern char *strbuf_detach(struct strbuf *, size_t *); -extern void strbuf_attach(struct strbuf *, void *, size_t, size_t); -static inline void strbuf_swap(struct strbuf *a, struct strbuf *b) { - struct strbuf tmp = *a; - *a = *b; - *b = tmp; -} - -/*----- strbuf size related -----*/ -static inline size_t strbuf_avail(const struct strbuf *sb) { - return sb->alloc ? sb->alloc - sb->len - 1 : 0; -} - -extern void strbuf_grow(struct strbuf *, size_t); - -static inline void strbuf_setlen(struct strbuf *sb, size_t len) { - if (!sb->alloc) - strbuf_grow(sb, 0); - assert(len < sb->alloc); - sb->len = len; - sb->buf[len] = '\0'; -} -#define strbuf_reset(sb) strbuf_setlen(sb, 0) - -/*----- content related -----*/ -extern void strbuf_trim(struct strbuf *); -extern void strbuf_rtrim(struct strbuf *); -extern void strbuf_ltrim(struct strbuf *); -extern int strbuf_cmp(const struct strbuf *, const struct strbuf *); -extern void strbuf_tolower(struct strbuf *); - -extern struct strbuf **strbuf_split(const struct strbuf *, int delim); -extern void strbuf_list_free(struct strbuf **); - -/*----- add data in your buffer -----*/ -static inline void strbuf_addch(struct strbuf *sb, int c) { - strbuf_grow(sb, 1); - sb->buf[sb->len++] = c; - sb->buf[sb->len] = '\0'; -} - -extern void strbuf_insert(struct strbuf *, size_t pos, const void *, size_t); -extern void strbuf_remove(struct strbuf *, size_t pos, size_t len); - -/* splice pos..pos+len with given data */ -extern void strbuf_splice(struct strbuf *, size_t pos, size_t len, - const void *, size_t); - -extern void strbuf_add(struct strbuf *, const void *, size_t); -static inline void strbuf_addstr(struct strbuf *sb, const char *s) { - strbuf_add(sb, s, strlen(s)); -} -static inline void strbuf_addbuf(struct strbuf *sb, const struct strbuf *sb2) { - strbuf_add(sb, sb2->buf, sb2->len); -} -extern void strbuf_adddup(struct strbuf *sb, size_t pos, size_t len); - -typedef size_t (*expand_fn_t) (struct strbuf *sb, const char *placeholder, void *context); -extern void strbuf_expand(struct strbuf *sb, const char *format, expand_fn_t fn, void *context); -struct strbuf_expand_dict_entry { - const char *placeholder; - const char *value; -}; -extern size_t strbuf_expand_dict_cb(struct strbuf *sb, const char *placeholder, void *context); - -__attribute__((format(printf,2,3))) -extern void strbuf_addf(struct strbuf *sb, const char *fmt, ...); - -extern size_t strbuf_fread(struct strbuf *, size_t, FILE *); -/* XXX: if read fails, any partial read is undone */ -extern ssize_t strbuf_read(struct strbuf *, int fd, size_t hint); -extern int strbuf_read_file(struct strbuf *sb, const char *path, size_t hint); -extern int strbuf_readlink(struct strbuf *sb, const char *path, size_t hint); - -extern int strbuf_getline(struct strbuf *, FILE *, int); - -extern void stripspace(struct strbuf *buf, int skip_comments); -extern int launch_editor(const char *path, struct strbuf *buffer, const char *const *env); - -extern int strbuf_branchname(struct strbuf *sb, const char *name); -extern int strbuf_check_branch_ref(struct strbuf *sb, const char *name); - -#endif /* STRBUF_H */ diff --git a/Documentation/perf_counter/usage.c b/Documentation/perf_counter/usage.c deleted file mode 100644 index 7a10421fe6b..00000000000 --- a/Documentation/perf_counter/usage.c +++ /dev/null @@ -1,80 +0,0 @@ -/* - * GIT - The information manager from hell - * - * Copyright (C) Linus Torvalds, 2005 - */ -#include "util.h" - -static void report(const char *prefix, const char *err, va_list params) -{ - char msg[1024]; - vsnprintf(msg, sizeof(msg), err, params); - fprintf(stderr, "%s%s\n", prefix, msg); -} - -static NORETURN void usage_builtin(const char *err) -{ - fprintf(stderr, "usage: %s\n", err); - exit(129); -} - -static NORETURN void die_builtin(const char *err, va_list params) -{ - report("fatal: ", err, params); - exit(128); -} - -static void error_builtin(const char *err, va_list params) -{ - report("error: ", err, params); -} - -static void warn_builtin(const char *warn, va_list params) -{ - report("warning: ", warn, params); -} - -/* If we are in a dlopen()ed .so write to a global variable would segfault - * (ugh), so keep things static. */ -static void (*usage_routine)(const char *err) NORETURN = usage_builtin; -static void (*die_routine)(const char *err, va_list params) NORETURN = die_builtin; -static void (*error_routine)(const char *err, va_list params) = error_builtin; -static void (*warn_routine)(const char *err, va_list params) = warn_builtin; - -void set_die_routine(void (*routine)(const char *err, va_list params) NORETURN) -{ - die_routine = routine; -} - -void usage(const char *err) -{ - usage_routine(err); -} - -void die(const char *err, ...) -{ - va_list params; - - va_start(params, err); - die_routine(err, params); - va_end(params); -} - -int error(const char *err, ...) -{ - va_list params; - - va_start(params, err); - error_routine(err, params); - va_end(params); - return -1; -} - -void warning(const char *warn, ...) -{ - va_list params; - - va_start(params, warn); - warn_routine(warn, params); - va_end(params); -} diff --git a/Documentation/perf_counter/util.h b/Documentation/perf_counter/util.h deleted file mode 100644 index 36e40c38e09..00000000000 --- a/Documentation/perf_counter/util.h +++ /dev/null @@ -1,408 +0,0 @@ -#ifndef GIT_COMPAT_UTIL_H -#define GIT_COMPAT_UTIL_H - -#define _FILE_OFFSET_BITS 64 - -#ifndef FLEX_ARRAY -/* - * See if our compiler is known to support flexible array members. - */ -#if defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) -# define FLEX_ARRAY /* empty */ -#elif defined(__GNUC__) -# if (__GNUC__ >= 3) -# define FLEX_ARRAY /* empty */ -# else -# define FLEX_ARRAY 0 /* older GNU extension */ -# endif -#endif - -/* - * Otherwise, default to safer but a bit wasteful traditional style - */ -#ifndef FLEX_ARRAY -# define FLEX_ARRAY 1 -#endif -#endif - -#define ARRAY_SIZE(x) (sizeof(x)/sizeof(x[0])) - -#ifdef __GNUC__ -#define TYPEOF(x) (__typeof__(x)) -#else -#define TYPEOF(x) -#endif - -#define MSB(x, bits) ((x) & TYPEOF(x)(~0ULL << (sizeof(x) * 8 - (bits)))) -#define HAS_MULTI_BITS(i) ((i) & ((i) - 1)) /* checks if an integer has more than 1 bit set */ - -/* Approximation of the length of the decimal representation of this type. */ -#define decimal_length(x) ((int)(sizeof(x) * 2.56 + 0.5) + 1) - -#if !defined(__APPLE__) && !defined(__FreeBSD__) && !defined(__USLC__) && !defined(_M_UNIX) -#define _XOPEN_SOURCE 600 /* glibc2 and AIX 5.3L need 500, OpenBSD needs 600 for S_ISLNK() */ -#define _XOPEN_SOURCE_EXTENDED 1 /* AIX 5.3L needs this */ -#endif -#define _ALL_SOURCE 1 -#define _GNU_SOURCE 1 -#define _BSD_SOURCE 1 - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#ifndef __MINGW32__ -#include -#include -#include -#include -#ifndef NO_SYS_SELECT_H -#include -#endif -#include -#include -#include -#include -#include -#include -#if defined(__CYGWIN__) -#undef _XOPEN_SOURCE -#include -#define _XOPEN_SOURCE 600 -#include "compat/cygwin.h" -#else -#undef _ALL_SOURCE /* AIX 5.3L defines a struct list with _ALL_SOURCE. */ -#include -#define _ALL_SOURCE 1 -#endif -#else /* __MINGW32__ */ -/* pull in Windows compatibility stuff */ -#include "compat/mingw.h" -#endif /* __MINGW32__ */ - -#ifndef NO_ICONV -#include -#endif - -#ifndef NO_OPENSSL -#include -#include -#endif - -/* On most systems would have given us this, but - * not on some systems (e.g. GNU/Hurd). - */ -#ifndef PATH_MAX -#define PATH_MAX 4096 -#endif - -#ifndef PRIuMAX -#define PRIuMAX "llu" -#endif - -#ifndef PRIu32 -#define PRIu32 "u" -#endif - -#ifndef PRIx32 -#define PRIx32 "x" -#endif - -#ifndef PATH_SEP -#define PATH_SEP ':' -#endif - -#ifndef STRIP_EXTENSION -#define STRIP_EXTENSION "" -#endif - -#ifndef has_dos_drive_prefix -#define has_dos_drive_prefix(path) 0 -#endif - -#ifndef is_dir_sep -#define is_dir_sep(c) ((c) == '/') -#endif - -#ifdef __GNUC__ -#define NORETURN __attribute__((__noreturn__)) -#else -#define NORETURN -#ifndef __attribute__ -#define __attribute__(x) -#endif -#endif - -/* General helper functions */ -extern void usage(const char *err) NORETURN; -extern void die(const char *err, ...) NORETURN __attribute__((format (printf, 1, 2))); -extern int error(const char *err, ...) __attribute__((format (printf, 1, 2))); -extern void warning(const char *err, ...) __attribute__((format (printf, 1, 2))); - -extern void set_die_routine(void (*routine)(const char *err, va_list params) NORETURN); - -extern int prefixcmp(const char *str, const char *prefix); -extern time_t tm_to_time_t(const struct tm *tm); - -static inline const char *skip_prefix(const char *str, const char *prefix) -{ - size_t len = strlen(prefix); - return strncmp(str, prefix, len) ? NULL : str + len; -} - -#if defined(NO_MMAP) || defined(USE_WIN32_MMAP) - -#ifndef PROT_READ -#define PROT_READ 1 -#define PROT_WRITE 2 -#define MAP_PRIVATE 1 -#define MAP_FAILED ((void*)-1) -#endif - -#define mmap git_mmap -#define munmap git_munmap -extern void *git_mmap(void *start, size_t length, int prot, int flags, int fd, off_t offset); -extern int git_munmap(void *start, size_t length); - -#else /* NO_MMAP || USE_WIN32_MMAP */ - -#include - -#endif /* NO_MMAP || USE_WIN32_MMAP */ - -#ifdef NO_MMAP - -/* This value must be multiple of (pagesize * 2) */ -#define DEFAULT_PACKED_GIT_WINDOW_SIZE (1 * 1024 * 1024) - -#else /* NO_MMAP */ - -/* This value must be multiple of (pagesize * 2) */ -#define DEFAULT_PACKED_GIT_WINDOW_SIZE \ - (sizeof(void*) >= 8 \ - ? 1 * 1024 * 1024 * 1024 \ - : 32 * 1024 * 1024) - -#endif /* NO_MMAP */ - -#ifdef NO_ST_BLOCKS_IN_STRUCT_STAT -#define on_disk_bytes(st) ((st).st_size) -#else -#define on_disk_bytes(st) ((st).st_blocks * 512) -#endif - -#define DEFAULT_PACKED_GIT_LIMIT \ - ((1024L * 1024L) * (sizeof(void*) >= 8 ? 8192 : 256)) - -#ifdef NO_PREAD -#define pread git_pread -extern ssize_t git_pread(int fd, void *buf, size_t count, off_t offset); -#endif -/* - * Forward decl that will remind us if its twin in cache.h changes. - * This function is used in compat/pread.c. But we can't include - * cache.h there. - */ -extern ssize_t read_in_full(int fd, void *buf, size_t count); - -#ifdef NO_SETENV -#define setenv gitsetenv -extern int gitsetenv(const char *, const char *, int); -#endif - -#ifdef NO_MKDTEMP -#define mkdtemp gitmkdtemp -extern char *gitmkdtemp(char *); -#endif - -#ifdef NO_UNSETENV -#define unsetenv gitunsetenv -extern void gitunsetenv(const char *); -#endif - -#ifdef NO_STRCASESTR -#define strcasestr gitstrcasestr -extern char *gitstrcasestr(const char *haystack, const char *needle); -#endif - -#ifdef NO_STRLCPY -#define strlcpy gitstrlcpy -extern size_t gitstrlcpy(char *, const char *, size_t); -#endif - -#ifdef NO_STRTOUMAX -#define strtoumax gitstrtoumax -extern uintmax_t gitstrtoumax(const char *, char **, int); -#endif - -#ifdef NO_HSTRERROR -#define hstrerror githstrerror -extern const char *githstrerror(int herror); -#endif - -#ifdef NO_MEMMEM -#define memmem gitmemmem -void *gitmemmem(const void *haystack, size_t haystacklen, - const void *needle, size_t needlelen); -#endif - -#ifdef FREAD_READS_DIRECTORIES -#ifdef fopen -#undef fopen -#endif -#define fopen(a,b) git_fopen(a,b) -extern FILE *git_fopen(const char*, const char*); -#endif - -#ifdef SNPRINTF_RETURNS_BOGUS -#define snprintf git_snprintf -extern int git_snprintf(char *str, size_t maxsize, - const char *format, ...); -#define vsnprintf git_vsnprintf -extern int git_vsnprintf(char *str, size_t maxsize, - const char *format, va_list ap); -#endif - -#ifdef __GLIBC_PREREQ -#if __GLIBC_PREREQ(2, 1) -#define HAVE_STRCHRNUL -#endif -#endif - -#ifndef HAVE_STRCHRNUL -#define strchrnul gitstrchrnul -static inline char *gitstrchrnul(const char *s, int c) -{ - while (*s && *s != c) - s++; - return (char *)s; -} -#endif - -/* - * Wrappers: - */ -extern char *xstrdup(const char *str); -extern void *xmalloc(size_t size); -extern void *xmemdupz(const void *data, size_t len); -extern char *xstrndup(const char *str, size_t len); -extern void *xrealloc(void *ptr, size_t size); -extern void *xcalloc(size_t nmemb, size_t size); -extern void *xmmap(void *start, size_t length, int prot, int flags, int fd, off_t offset); -extern ssize_t xread(int fd, void *buf, size_t len); -extern ssize_t xwrite(int fd, const void *buf, size_t len); -extern int xdup(int fd); -extern FILE *xfdopen(int fd, const char *mode); -static inline size_t xsize_t(off_t len) -{ - return (size_t)len; -} - -static inline int has_extension(const char *filename, const char *ext) -{ - size_t len = strlen(filename); - size_t extlen = strlen(ext); - return len > extlen && !memcmp(filename + len - extlen, ext, extlen); -} - -/* Sane ctype - no locale, and works with signed chars */ -#undef isascii -#undef isspace -#undef isdigit -#undef isalpha -#undef isalnum -#undef tolower -#undef toupper -extern unsigned char sane_ctype[256]; -#define GIT_SPACE 0x01 -#define GIT_DIGIT 0x02 -#define GIT_ALPHA 0x04 -#define GIT_GLOB_SPECIAL 0x08 -#define GIT_REGEX_SPECIAL 0x10 -#define sane_istest(x,mask) ((sane_ctype[(unsigned char)(x)] & (mask)) != 0) -#define isascii(x) (((x) & ~0x7f) == 0) -#define isspace(x) sane_istest(x,GIT_SPACE) -#define isdigit(x) sane_istest(x,GIT_DIGIT) -#define isalpha(x) sane_istest(x,GIT_ALPHA) -#define isalnum(x) sane_istest(x,GIT_ALPHA | GIT_DIGIT) -#define is_glob_special(x) sane_istest(x,GIT_GLOB_SPECIAL) -#define is_regex_special(x) sane_istest(x,GIT_GLOB_SPECIAL | GIT_REGEX_SPECIAL) -#define tolower(x) sane_case((unsigned char)(x), 0x20) -#define toupper(x) sane_case((unsigned char)(x), 0) - -static inline int sane_case(int x, int high) -{ - if (sane_istest(x, GIT_ALPHA)) - x = (x & ~0x20) | high; - return x; -} - -static inline int strtoul_ui(char const *s, int base, unsigned int *result) -{ - unsigned long ul; - char *p; - - errno = 0; - ul = strtoul(s, &p, base); - if (errno || *p || p == s || (unsigned int) ul != ul) - return -1; - *result = ul; - return 0; -} - -static inline int strtol_i(char const *s, int base, int *result) -{ - long ul; - char *p; - - errno = 0; - ul = strtol(s, &p, base); - if (errno || *p || p == s || (int) ul != ul) - return -1; - *result = ul; - return 0; -} - -#ifdef INTERNAL_QSORT -void git_qsort(void *base, size_t nmemb, size_t size, - int(*compar)(const void *, const void *)); -#define qsort git_qsort -#endif - -#ifndef DIR_HAS_BSD_GROUP_SEMANTICS -# define FORCE_DIR_SET_GID S_ISGID -#else -# define FORCE_DIR_SET_GID 0 -#endif - -#ifdef NO_NSEC -#undef USE_NSEC -#define ST_CTIME_NSEC(st) 0 -#define ST_MTIME_NSEC(st) 0 -#else -#ifdef USE_ST_TIMESPEC -#define ST_CTIME_NSEC(st) ((unsigned int)((st).st_ctimespec.tv_nsec)) -#define ST_MTIME_NSEC(st) ((unsigned int)((st).st_mtimespec.tv_nsec)) -#else -#define ST_CTIME_NSEC(st) ((unsigned int)((st).st_ctim.tv_nsec)) -#define ST_MTIME_NSEC(st) ((unsigned int)((st).st_mtim.tv_nsec)) -#endif -#endif - -#endif diff --git a/Documentation/perf_counter/util/PERF-VERSION-GEN b/Documentation/perf_counter/util/PERF-VERSION-GEN new file mode 100755 index 00000000000..c561d1538c0 --- /dev/null +++ b/Documentation/perf_counter/util/PERF-VERSION-GEN @@ -0,0 +1,42 @@ +#!/bin/sh + +GVF=PERF-VERSION-FILE +DEF_VER=v0.0.1.PERF + +LF=' +' + +# First see if there is a version file (included in release tarballs), +# then try git-describe, then default. +if test -f version +then + VN=$(cat version) || VN="$DEF_VER" +elif test -d .git -o -f .git && + VN=$(git describe --abbrev=4 HEAD 2>/dev/null) && + case "$VN" in + *$LF*) (exit 1) ;; + v[0-9]*) + git update-index -q --refresh + test -z "$(git diff-index --name-only HEAD --)" || + VN="$VN-dirty" ;; + esac +then + VN=$(echo "$VN" | sed -e 's/-/./g'); +else + VN="$DEF_VER" +fi + +VN=$(expr "$VN" : v*'\(.*\)') + +if test -r $GVF +then + VC=$(sed -e 's/^PERF_VERSION = //' <$GVF) +else + VC=unset +fi +test "$VN" = "$VC" || { + echo >&2 "PERF_VERSION = $VN" + echo "PERF_VERSION = $VN" >$GVF +} + + diff --git a/Documentation/perf_counter/util/abspath.c b/Documentation/perf_counter/util/abspath.c new file mode 100644 index 00000000000..649f34f8336 --- /dev/null +++ b/Documentation/perf_counter/util/abspath.c @@ -0,0 +1,117 @@ +#include "cache.h" + +/* + * Do not use this for inspecting *tracked* content. When path is a + * symlink to a directory, we do not want to say it is a directory when + * dealing with tracked content in the working tree. + */ +int is_directory(const char *path) +{ + struct stat st; + return (!stat(path, &st) && S_ISDIR(st.st_mode)); +} + +/* We allow "recursive" symbolic links. Only within reason, though. */ +#define MAXDEPTH 5 + +const char *make_absolute_path(const char *path) +{ + static char bufs[2][PATH_MAX + 1], *buf = bufs[0], *next_buf = bufs[1]; + char cwd[1024] = ""; + int buf_index = 1, len; + + int depth = MAXDEPTH; + char *last_elem = NULL; + struct stat st; + + if (strlcpy(buf, path, PATH_MAX) >= PATH_MAX) + die ("Too long path: %.*s", 60, path); + + while (depth--) { + if (!is_directory(buf)) { + char *last_slash = strrchr(buf, '/'); + if (last_slash) { + *last_slash = '\0'; + last_elem = xstrdup(last_slash + 1); + } else { + last_elem = xstrdup(buf); + *buf = '\0'; + } + } + + if (*buf) { + if (!*cwd && !getcwd(cwd, sizeof(cwd))) + die ("Could not get current working directory"); + + if (chdir(buf)) + die ("Could not switch to '%s'", buf); + } + if (!getcwd(buf, PATH_MAX)) + die ("Could not get current working directory"); + + if (last_elem) { + int len = strlen(buf); + if (len + strlen(last_elem) + 2 > PATH_MAX) + die ("Too long path name: '%s/%s'", + buf, last_elem); + buf[len] = '/'; + strcpy(buf + len + 1, last_elem); + free(last_elem); + last_elem = NULL; + } + + if (!lstat(buf, &st) && S_ISLNK(st.st_mode)) { + len = readlink(buf, next_buf, PATH_MAX); + if (len < 0) + die ("Invalid symlink: %s", buf); + if (PATH_MAX <= len) + die("symbolic link too long: %s", buf); + next_buf[len] = '\0'; + buf = next_buf; + buf_index = 1 - buf_index; + next_buf = bufs[buf_index]; + } else + break; + } + + if (*cwd && chdir(cwd)) + die ("Could not change back to '%s'", cwd); + + return buf; +} + +static const char *get_pwd_cwd(void) +{ + static char cwd[PATH_MAX + 1]; + char *pwd; + struct stat cwd_stat, pwd_stat; + if (getcwd(cwd, PATH_MAX) == NULL) + return NULL; + pwd = getenv("PWD"); + if (pwd && strcmp(pwd, cwd)) { + stat(cwd, &cwd_stat); + if (!stat(pwd, &pwd_stat) && + pwd_stat.st_dev == cwd_stat.st_dev && + pwd_stat.st_ino == cwd_stat.st_ino) { + strlcpy(cwd, pwd, PATH_MAX); + } + } + return cwd; +} + +const char *make_nonrelative_path(const char *path) +{ + static char buf[PATH_MAX + 1]; + + if (is_absolute_path(path)) { + if (strlcpy(buf, path, PATH_MAX) >= PATH_MAX) + die("Too long path: %.*s", 60, path); + } else { + const char *cwd = get_pwd_cwd(); + if (!cwd) + die("Cannot determine the current working directory"); + if (snprintf(buf, PATH_MAX, "%s/%s", cwd, path) >= PATH_MAX) + die("Too long path: %.*s", 60, path); + } + return buf; +} diff --git a/Documentation/perf_counter/util/alias.c b/Documentation/perf_counter/util/alias.c new file mode 100644 index 00000000000..9b3dd2b428d --- /dev/null +++ b/Documentation/perf_counter/util/alias.c @@ -0,0 +1,77 @@ +#include "cache.h" + +static const char *alias_key; +static char *alias_val; + +static int alias_lookup_cb(const char *k, const char *v, void *cb) +{ + if (!prefixcmp(k, "alias.") && !strcmp(k+6, alias_key)) { + if (!v) + return config_error_nonbool(k); + alias_val = strdup(v); + return 0; + } + return 0; +} + +char *alias_lookup(const char *alias) +{ + alias_key = alias; + alias_val = NULL; + perf_config(alias_lookup_cb, NULL); + return alias_val; +} + +int split_cmdline(char *cmdline, const char ***argv) +{ + int src, dst, count = 0, size = 16; + char quoted = 0; + + *argv = malloc(sizeof(char*) * size); + + /* split alias_string */ + (*argv)[count++] = cmdline; + for (src = dst = 0; cmdline[src];) { + char c = cmdline[src]; + if (!quoted && isspace(c)) { + cmdline[dst++] = 0; + while (cmdline[++src] + && isspace(cmdline[src])) + ; /* skip */ + if (count >= size) { + size += 16; + *argv = realloc(*argv, sizeof(char*) * size); + } + (*argv)[count++] = cmdline + dst; + } else if (!quoted && (c == '\'' || c == '"')) { + quoted = c; + src++; + } else if (c == quoted) { + quoted = 0; + src++; + } else { + if (c == '\\' && quoted != '\'') { + src++; + c = cmdline[src]; + if (!c) { + free(*argv); + *argv = NULL; + return error("cmdline ends with \\"); + } + } + cmdline[dst++] = c; + src++; + } + } + + cmdline[dst] = 0; + + if (quoted) { + free(*argv); + *argv = NULL; + return error("unclosed quote"); + } + + return count; +} + diff --git a/Documentation/perf_counter/util/cache.h b/Documentation/perf_counter/util/cache.h new file mode 100644 index 00000000000..71080512fa8 --- /dev/null +++ b/Documentation/perf_counter/util/cache.h @@ -0,0 +1,117 @@ +#ifndef CACHE_H +#define CACHE_H + +#include "util.h" +#include "strbuf.h" + +#define PERF_DIR_ENVIRONMENT "PERF_DIR" +#define PERF_WORK_TREE_ENVIRONMENT "PERF_WORK_TREE" +#define DEFAULT_PERF_DIR_ENVIRONMENT ".perf" +#define DB_ENVIRONMENT "PERF_OBJECT_DIRECTORY" +#define INDEX_ENVIRONMENT "PERF_INDEX_FILE" +#define GRAFT_ENVIRONMENT "PERF_GRAFT_FILE" +#define TEMPLATE_DIR_ENVIRONMENT "PERF_TEMPLATE_DIR" +#define CONFIG_ENVIRONMENT "PERF_CONFIG" +#define EXEC_PATH_ENVIRONMENT "PERF_EXEC_PATH" +#define CEILING_DIRECTORIES_ENVIRONMENT "PERF_CEILING_DIRECTORIES" +#define PERFATTRIBUTES_FILE ".perfattributes" +#define INFOATTRIBUTES_FILE "info/attributes" +#define ATTRIBUTE_MACRO_PREFIX "[attr]" + +typedef int (*config_fn_t)(const char *, const char *, void *); +extern int perf_default_config(const char *, const char *, void *); +extern int perf_config_from_file(config_fn_t fn, const char *, void *); +extern int perf_config(config_fn_t fn, void *); +extern int perf_parse_ulong(const char *, unsigned long *); +extern int perf_config_int(const char *, const char *); +extern unsigned long perf_config_ulong(const char *, const char *); +extern int perf_config_bool_or_int(const char *, const char *, int *); +extern int perf_config_bool(const char *, const char *); +extern int perf_config_string(const char **, const char *, const char *); +extern int perf_config_set(const char *, const char *); +extern int perf_config_set_multivar(const char *, const char *, const char *, int); +extern int perf_config_rename_section(const char *, const char *); +extern const char *perf_etc_perfconfig(void); +extern int check_repository_format_version(const char *var, const char *value, void *cb); +extern int perf_config_system(void); +extern int perf_config_global(void); +extern int config_error_nonbool(const char *); +extern const char *config_exclusive_filename; + +#define MAX_PERFNAME (1000) +extern char perf_default_email[MAX_PERFNAME]; +extern char perf_default_name[MAX_PERFNAME]; +extern int user_ident_explicitly_given; + +extern const char *perf_log_output_encoding; +extern const char *perf_mailmap_file; + +/* IO helper functions */ +extern void maybe_flush_or_die(FILE *, const char *); +extern int copy_fd(int ifd, int ofd); +extern int copy_file(const char *dst, const char *src, int mode); +extern ssize_t read_in_full(int fd, void *buf, size_t count); +extern ssize_t write_in_full(int fd, const void *buf, size_t count); +extern void write_or_die(int fd, const void *buf, size_t count); +extern int write_or_whine(int fd, const void *buf, size_t count, const char *msg); +extern int write_or_whine_pipe(int fd, const void *buf, size_t count, const char *msg); +extern void fsync_or_die(int fd, const char *); + +/* pager.c */ +extern void setup_pager(void); +extern const char *pager_program; +extern int pager_in_use(void); +extern int pager_use_color; + +extern const char *editor_program; +extern const char *excludes_file; + +char *alias_lookup(const char *alias); +int split_cmdline(char *cmdline, const char ***argv); + +#define alloc_nr(x) (((x)+16)*3/2) + +/* + * Realloc the buffer pointed at by variable 'x' so that it can hold + * at least 'nr' entries; the number of entries currently allocated + * is 'alloc', using the standard growing factor alloc_nr() macro. + * + * DO NOT USE any expression with side-effect for 'x' or 'alloc'. + */ +#define ALLOC_GROW(x, nr, alloc) \ + do { \ + if ((nr) > alloc) { \ + if (alloc_nr(alloc) < (nr)) \ + alloc = (nr); \ + else \ + alloc = alloc_nr(alloc); \ + x = xrealloc((x), alloc * sizeof(*(x))); \ + } \ + } while(0) + + +static inline int is_absolute_path(const char *path) +{ + return path[0] == '/'; +} + +const char *make_absolute_path(const char *path); +const char *make_nonrelative_path(const char *path); +const char *make_relative_path(const char *abs, const char *base); +int normalize_path_copy(char *dst, const char *src); +int longest_ancestor_length(const char *path, const char *prefix_list); +char *strip_path_suffix(const char *path, const char *suffix); + +extern char *mkpath(const char *fmt, ...) __attribute__((format (printf, 1, 2))); +extern char *perf_path(const char *fmt, ...) __attribute__((format (printf, 1, 2))); + +extern char *mksnpath(char *buf, size_t n, const char *fmt, ...) + __attribute__((format (printf, 3, 4))); +extern char *perf_snpath(char *buf, size_t n, const char *fmt, ...) + __attribute__((format (printf, 3, 4))); +extern char *perf_pathdup(const char *fmt, ...) + __attribute__((format (printf, 1, 2))); + +extern size_t strlcpy(char *dest, const char *src, size_t size); + +#endif /* CACHE_H */ diff --git a/Documentation/perf_counter/util/config.c b/Documentation/perf_counter/util/config.c new file mode 100644 index 00000000000..3dd13faa6a2 --- /dev/null +++ b/Documentation/perf_counter/util/config.c @@ -0,0 +1,873 @@ +/* + * GIT - The information manager from hell + * + * Copyright (C) Linus Torvalds, 2005 + * Copyright (C) Johannes Schindelin, 2005 + * + */ +#include "util.h" +#include "cache.h" +#include "exec_cmd.h" + +#define MAXNAME (256) + +static FILE *config_file; +static const char *config_file_name; +static int config_linenr; +static int config_file_eof; + +const char *config_exclusive_filename = NULL; + +static int get_next_char(void) +{ + int c; + FILE *f; + + c = '\n'; + if ((f = config_file) != NULL) { + c = fgetc(f); + if (c == '\r') { + /* DOS like systems */ + c = fgetc(f); + if (c != '\n') { + ungetc(c, f); + c = '\r'; + } + } + if (c == '\n') + config_linenr++; + if (c == EOF) { + config_file_eof = 1; + c = '\n'; + } + } + return c; +} + +static char *parse_value(void) +{ + static char value[1024]; + int quote = 0, comment = 0, len = 0, space = 0; + + for (;;) { + int c = get_next_char(); + if (len >= sizeof(value) - 1) + return NULL; + if (c == '\n') { + if (quote) + return NULL; + value[len] = 0; + return value; + } + if (comment) + continue; + if (isspace(c) && !quote) { + space = 1; + continue; + } + if (!quote) { + if (c == ';' || c == '#') { + comment = 1; + continue; + } + } + if (space) { + if (len) + value[len++] = ' '; + space = 0; + } + if (c == '\\') { + c = get_next_char(); + switch (c) { + case '\n': + continue; + case 't': + c = '\t'; + break; + case 'b': + c = '\b'; + break; + case 'n': + c = '\n'; + break; + /* Some characters escape as themselves */ + case '\\': case '"': + break; + /* Reject unknown escape sequences */ + default: + return NULL; + } + value[len++] = c; + continue; + } + if (c == '"') { + quote = 1-quote; + continue; + } + value[len++] = c; + } +} + +static inline int iskeychar(int c) +{ + return isalnum(c) || c == '-'; +} + +static int get_value(config_fn_t fn, void *data, char *name, unsigned int len) +{ + int c; + char *value; + + /* Get the full name */ + for (;;) { + c = get_next_char(); + if (config_file_eof) + break; + if (!iskeychar(c)) + break; + name[len++] = tolower(c); + if (len >= MAXNAME) + return -1; + } + name[len] = 0; + while (c == ' ' || c == '\t') + c = get_next_char(); + + value = NULL; + if (c != '\n') { + if (c != '=') + return -1; + value = parse_value(); + if (!value) + return -1; + } + return fn(name, value, data); +} + +static int get_extended_base_var(char *name, int baselen, int c) +{ + do { + if (c == '\n') + return -1; + c = get_next_char(); + } while (isspace(c)); + + /* We require the format to be '[base "extension"]' */ + if (c != '"') + return -1; + name[baselen++] = '.'; + + for (;;) { + int c = get_next_char(); + if (c == '\n') + return -1; + if (c == '"') + break; + if (c == '\\') { + c = get_next_char(); + if (c == '\n') + return -1; + } + name[baselen++] = c; + if (baselen > MAXNAME / 2) + return -1; + } + + /* Final ']' */ + if (get_next_char() != ']') + return -1; + return baselen; +} + +static int get_base_var(char *name) +{ + int baselen = 0; + + for (;;) { + int c = get_next_char(); + if (config_file_eof) + return -1; + if (c == ']') + return baselen; + if (isspace(c)) + return get_extended_base_var(name, baselen, c); + if (!iskeychar(c) && c != '.') + return -1; + if (baselen > MAXNAME / 2) + return -1; + name[baselen++] = tolower(c); + } +} + +static int perf_parse_file(config_fn_t fn, void *data) +{ + int comment = 0; + int baselen = 0; + static char var[MAXNAME]; + + /* U+FEFF Byte Order Mark in UTF8 */ + static const unsigned char *utf8_bom = (unsigned char *) "\xef\xbb\xbf"; + const unsigned char *bomptr = utf8_bom; + + for (;;) { + int c = get_next_char(); + if (bomptr && *bomptr) { + /* We are at the file beginning; skip UTF8-encoded BOM + * if present. Sane editors won't put this in on their + * own, but e.g. Windows Notepad will do it happily. */ + if ((unsigned char) c == *bomptr) { + bomptr++; + continue; + } else { + /* Do not tolerate partial BOM. */ + if (bomptr != utf8_bom) + break; + /* No BOM at file beginning. Cool. */ + bomptr = NULL; + } + } + if (c == '\n') { + if (config_file_eof) + return 0; + comment = 0; + continue; + } + if (comment || isspace(c)) + continue; + if (c == '#' || c == ';') { + comment = 1; + continue; + } + if (c == '[') { + baselen = get_base_var(var); + if (baselen <= 0) + break; + var[baselen++] = '.'; + var[baselen] = 0; + continue; + } + if (!isalpha(c)) + break; + var[baselen] = tolower(c); + if (get_value(fn, data, var, baselen+1) < 0) + break; + } + die("bad config file line %d in %s", config_linenr, config_file_name); +} + +static int parse_unit_factor(const char *end, unsigned long *val) +{ + if (!*end) + return 1; + else if (!strcasecmp(end, "k")) { + *val *= 1024; + return 1; + } + else if (!strcasecmp(end, "m")) { + *val *= 1024 * 1024; + return 1; + } + else if (!strcasecmp(end, "g")) { + *val *= 1024 * 1024 * 1024; + return 1; + } + return 0; +} + +static int perf_parse_long(const char *value, long *ret) +{ + if (value && *value) { + char *end; + long val = strtol(value, &end, 0); + unsigned long factor = 1; + if (!parse_unit_factor(end, &factor)) + return 0; + *ret = val * factor; + return 1; + } + return 0; +} + +int perf_parse_ulong(const char *value, unsigned long *ret) +{ + if (value && *value) { + char *end; + unsigned long val = strtoul(value, &end, 0); + if (!parse_unit_factor(end, &val)) + return 0; + *ret = val; + return 1; + } + return 0; +} + +static void die_bad_config(const char *name) +{ + if (config_file_name) + die("bad config value for '%s' in %s", name, config_file_name); + die("bad config value for '%s'", name); +} + +int perf_config_int(const char *name, const char *value) +{ + long ret = 0; + if (!perf_parse_long(value, &ret)) + die_bad_config(name); + return ret; +} + +unsigned long perf_config_ulong(const char *name, const char *value) +{ + unsigned long ret; + if (!perf_parse_ulong(value, &ret)) + die_bad_config(name); + return ret; +} + +int perf_config_bool_or_int(const char *name, const char *value, int *is_bool) +{ + *is_bool = 1; + if (!value) + return 1; + if (!*value) + return 0; + if (!strcasecmp(value, "true") || !strcasecmp(value, "yes") || !strcasecmp(value, "on")) + return 1; + if (!strcasecmp(value, "false") || !strcasecmp(value, "no") || !strcasecmp(value, "off")) + return 0; + *is_bool = 0; + return perf_config_int(name, value); +} + +int perf_config_bool(const char *name, const char *value) +{ + int discard; + return !!perf_config_bool_or_int(name, value, &discard); +} + +int perf_config_string(const char **dest, const char *var, const char *value) +{ + if (!value) + return config_error_nonbool(var); + *dest = strdup(value); + return 0; +} + +static int perf_default_core_config(const char *var, const char *value) +{ + /* Add other config variables here and to Documentation/config.txt. */ + return 0; +} + +int perf_default_config(const char *var, const char *value, void *dummy) +{ + if (!prefixcmp(var, "core.")) + return perf_default_core_config(var, value); + + /* Add other config variables here and to Documentation/config.txt. */ + return 0; +} + +int perf_config_from_file(config_fn_t fn, const char *filename, void *data) +{ + int ret; + FILE *f = fopen(filename, "r"); + + ret = -1; + if (f) { + config_file = f; + config_file_name = filename; + config_linenr = 1; + config_file_eof = 0; + ret = perf_parse_file(fn, data); + fclose(f); + config_file_name = NULL; + } + return ret; +} + +const char *perf_etc_perfconfig(void) +{ + static const char *system_wide; + if (!system_wide) + system_wide = system_path(ETC_PERFCONFIG); + return system_wide; +} + +static int perf_env_bool(const char *k, int def) +{ + const char *v = getenv(k); + return v ? perf_config_bool(k, v) : def; +} + +int perf_config_system(void) +{ + return !perf_env_bool("PERF_CONFIG_NOSYSTEM", 0); +} + +int perf_config_global(void) +{ + return !perf_env_bool("PERF_CONFIG_NOGLOBAL", 0); +} + +int perf_config(config_fn_t fn, void *data) +{ + int ret = 0, found = 0; + char *repo_config = NULL; + const char *home = NULL; + + /* Setting $PERF_CONFIG makes perf read _only_ the given config file. */ + if (config_exclusive_filename) + return perf_config_from_file(fn, config_exclusive_filename, data); + if (perf_config_system() && !access(perf_etc_perfconfig(), R_OK)) { + ret += perf_config_from_file(fn, perf_etc_perfconfig(), + data); + found += 1; + } + + home = getenv("HOME"); + if (perf_config_global() && home) { + char *user_config = strdup(mkpath("%s/.perfconfig", home)); + if (!access(user_config, R_OK)) { + ret += perf_config_from_file(fn, user_config, data); + found += 1; + } + free(user_config); + } + + repo_config = perf_pathdup("config"); + if (!access(repo_config, R_OK)) { + ret += perf_config_from_file(fn, repo_config, data); + found += 1; + } + free(repo_config); + if (found == 0) + return -1; + return ret; +} + +/* + * Find all the stuff for perf_config_set() below. + */ + +#define MAX_MATCHES 512 + +static struct { + int baselen; + char* key; + int do_not_match; + regex_t* value_regex; + int multi_replace; + size_t offset[MAX_MATCHES]; + enum { START, SECTION_SEEN, SECTION_END_SEEN, KEY_SEEN } state; + int seen; +} store; + +static int matches(const char* key, const char* value) +{ + return !strcmp(key, store.key) && + (store.value_regex == NULL || + (store.do_not_match ^ + !regexec(store.value_regex, value, 0, NULL, 0))); +} + +static int store_aux(const char* key, const char* value, void *cb) +{ + const char *ep; + size_t section_len; + + switch (store.state) { + case KEY_SEEN: + if (matches(key, value)) { + if (store.seen == 1 && store.multi_replace == 0) { + warning("%s has multiple values", key); + } else if (store.seen >= MAX_MATCHES) { + error("too many matches for %s", key); + return 1; + } + + store.offset[store.seen] = ftell(config_file); + store.seen++; + } + break; + case SECTION_SEEN: + /* + * What we are looking for is in store.key (both + * section and var), and its section part is baselen + * long. We found key (again, both section and var). + * We would want to know if this key is in the same + * section as what we are looking for. We already + * know we are in the same section as what should + * hold store.key. + */ + ep = strrchr(key, '.'); + section_len = ep - key; + + if ((section_len != store.baselen) || + memcmp(key, store.key, section_len+1)) { + store.state = SECTION_END_SEEN; + break; + } + + /* + * Do not increment matches: this is no match, but we + * just made sure we are in the desired section. + */ + store.offset[store.seen] = ftell(config_file); + /* fallthru */ + case SECTION_END_SEEN: + case START: + if (matches(key, value)) { + store.offset[store.seen] = ftell(config_file); + store.state = KEY_SEEN; + store.seen++; + } else { + if (strrchr(key, '.') - key == store.baselen && + !strncmp(key, store.key, store.baselen)) { + store.state = SECTION_SEEN; + store.offset[store.seen] = ftell(config_file); + } + } + } + return 0; +} + +static int store_write_section(int fd, const char* key) +{ + const char *dot; + int i, success; + struct strbuf sb = STRBUF_INIT; + + dot = memchr(key, '.', store.baselen); + if (dot) { + strbuf_addf(&sb, "[%.*s \"", (int)(dot - key), key); + for (i = dot - key + 1; i < store.baselen; i++) { + if (key[i] == '"' || key[i] == '\\') + strbuf_addch(&sb, '\\'); + strbuf_addch(&sb, key[i]); + } + strbuf_addstr(&sb, "\"]\n"); + } else { + strbuf_addf(&sb, "[%.*s]\n", store.baselen, key); + } + + success = write_in_full(fd, sb.buf, sb.len) == sb.len; + strbuf_release(&sb); + + return success; +} + +static int store_write_pair(int fd, const char* key, const char* value) +{ + int i, success; + int length = strlen(key + store.baselen + 1); + const char *quote = ""; + struct strbuf sb = STRBUF_INIT; + + /* + * Check to see if the value needs to be surrounded with a dq pair. + * Note that problematic characters are always backslash-quoted; this + * check is about not losing leading or trailing SP and strings that + * follow beginning-of-comment characters (i.e. ';' and '#') by the + * configuration parser. + */ + if (value[0] == ' ') + quote = "\""; + for (i = 0; value[i]; i++) + if (value[i] == ';' || value[i] == '#') + quote = "\""; + if (i && value[i - 1] == ' ') + quote = "\""; + + strbuf_addf(&sb, "\t%.*s = %s", + length, key + store.baselen + 1, quote); + + for (i = 0; value[i]; i++) + switch (value[i]) { + case '\n': + strbuf_addstr(&sb, "\\n"); + break; + case '\t': + strbuf_addstr(&sb, "\\t"); + break; + case '"': + case '\\': + strbuf_addch(&sb, '\\'); + default: + strbuf_addch(&sb, value[i]); + break; + } + strbuf_addf(&sb, "%s\n", quote); + + success = write_in_full(fd, sb.buf, sb.len) == sb.len; + strbuf_release(&sb); + + return success; +} + +static ssize_t find_beginning_of_line(const char* contents, size_t size, + size_t offset_, int* found_bracket) +{ + size_t equal_offset = size, bracket_offset = size; + ssize_t offset; + +contline: + for (offset = offset_-2; offset > 0 + && contents[offset] != '\n'; offset--) + switch (contents[offset]) { + case '=': equal_offset = offset; break; + case ']': bracket_offset = offset; break; + } + if (offset > 0 && contents[offset-1] == '\\') { + offset_ = offset; + goto contline; + } + if (bracket_offset < equal_offset) { + *found_bracket = 1; + offset = bracket_offset+1; + } else + offset++; + + return offset; +} + +int perf_config_set(const char* key, const char* value) +{ + return perf_config_set_multivar(key, value, NULL, 0); +} + +/* + * If value==NULL, unset in (remove from) config, + * if value_regex!=NULL, disregard key/value pairs where value does not match. + * if multi_replace==0, nothing, or only one matching key/value is replaced, + * else all matching key/values (regardless how many) are removed, + * before the new pair is written. + * + * Returns 0 on success. + * + * This function does this: + * + * - it locks the config file by creating ".perf/config.lock" + * + * - it then parses the config using store_aux() as validator to find + * the position on the key/value pair to replace. If it is to be unset, + * it must be found exactly once. + * + * - the config file is mmap()ed and the part before the match (if any) is + * written to the lock file, then the changed part and the rest. + * + * - the config file is removed and the lock file rename()d to it. + * + */ +int perf_config_set_multivar(const char* key, const char* value, + const char* value_regex, int multi_replace) +{ + int i, dot; + int fd = -1, in_fd; + int ret = 0; + char* config_filename; + const char* last_dot = strrchr(key, '.'); + + if (config_exclusive_filename) + config_filename = strdup(config_exclusive_filename); + else + config_filename = perf_pathdup("config"); + + /* + * Since "key" actually contains the section name and the real + * key name separated by a dot, we have to know where the dot is. + */ + + if (last_dot == NULL) { + error("key does not contain a section: %s", key); + ret = 2; + goto out_free; + } + store.baselen = last_dot - key; + + store.multi_replace = multi_replace; + + /* + * Validate the key and while at it, lower case it for matching. + */ + store.key = malloc(strlen(key) + 1); + dot = 0; + for (i = 0; key[i]; i++) { + unsigned char c = key[i]; + if (c == '.') + dot = 1; + /* Leave the extended basename untouched.. */ + if (!dot || i > store.baselen) { + if (!iskeychar(c) || (i == store.baselen+1 && !isalpha(c))) { + error("invalid key: %s", key); + free(store.key); + ret = 1; + goto out_free; + } + c = tolower(c); + } else if (c == '\n') { + error("invalid key (newline): %s", key); + free(store.key); + ret = 1; + goto out_free; + } + store.key[i] = c; + } + store.key[i] = 0; + + /* + * If .perf/config does not exist yet, write a minimal version. + */ + in_fd = open(config_filename, O_RDONLY); + if ( in_fd < 0 ) { + free(store.key); + + if ( ENOENT != errno ) { + error("opening %s: %s", config_filename, + strerror(errno)); + ret = 3; /* same as "invalid config file" */ + goto out_free; + } + /* if nothing to unset, error out */ + if (value == NULL) { + ret = 5; + goto out_free; + } + + store.key = (char*)key; + if (!store_write_section(fd, key) || + !store_write_pair(fd, key, value)) + goto write_err_out; + } else { + struct stat st; + char* contents; + size_t contents_sz, copy_begin, copy_end; + int i, new_line = 0; + + if (value_regex == NULL) + store.value_regex = NULL; + else { + if (value_regex[0] == '!') { + store.do_not_match = 1; + value_regex++; + } else + store.do_not_match = 0; + + store.value_regex = (regex_t*)malloc(sizeof(regex_t)); + if (regcomp(store.value_regex, value_regex, + REG_EXTENDED)) { + error("invalid pattern: %s", value_regex); + free(store.value_regex); + ret = 6; + goto out_free; + } + } + + store.offset[0] = 0; + store.state = START; + store.seen = 0; + + /* + * After this, store.offset will contain the *end* offset + * of the last match, or remain at 0 if no match was found. + * As a side effect, we make sure to transform only a valid + * existing config file. + */ + if (perf_config_from_file(store_aux, config_filename, NULL)) { + error("invalid config file %s", config_filename); + free(store.key); + if (store.value_regex != NULL) { + regfree(store.value_regex); + free(store.value_regex); + } + ret = 3; + goto out_free; + } + + free(store.key); + if (store.value_regex != NULL) { + regfree(store.value_regex); + free(store.value_regex); + } + + /* if nothing to unset, or too many matches, error out */ + if ((store.seen == 0 && value == NULL) || + (store.seen > 1 && multi_replace == 0)) { + ret = 5; + goto out_free; + } + + fstat(in_fd, &st); + contents_sz = xsize_t(st.st_size); + contents = mmap(NULL, contents_sz, PROT_READ, + MAP_PRIVATE, in_fd, 0); + close(in_fd); + + if (store.seen == 0) + store.seen = 1; + + for (i = 0, copy_begin = 0; i < store.seen; i++) { + if (store.offset[i] == 0) { + store.offset[i] = copy_end = contents_sz; + } else if (store.state != KEY_SEEN) { + copy_end = store.offset[i]; + } else + copy_end = find_beginning_of_line( + contents, contents_sz, + store.offset[i]-2, &new_line); + + if (copy_end > 0 && contents[copy_end-1] != '\n') + new_line = 1; + + /* write the first part of the config */ + if (copy_end > copy_begin) { + if (write_in_full(fd, contents + copy_begin, + copy_end - copy_begin) < + copy_end - copy_begin) + goto write_err_out; + if (new_line && + write_in_full(fd, "\n", 1) != 1) + goto write_err_out; + } + copy_begin = store.offset[i]; + } + + /* write the pair (value == NULL means unset) */ + if (value != NULL) { + if (store.state == START) { + if (!store_write_section(fd, key)) + goto write_err_out; + } + if (!store_write_pair(fd, key, value)) + goto write_err_out; + } + + /* write the rest of the config */ + if (copy_begin < contents_sz) + if (write_in_full(fd, contents + copy_begin, + contents_sz - copy_begin) < + contents_sz - copy_begin) + goto write_err_out; + + munmap(contents, contents_sz); + } + + ret = 0; + +out_free: + free(config_filename); + return ret; + +write_err_out: + goto out_free; + +} + +/* + * Call this to report error for your variable that should not + * get a boolean value (i.e. "[my] var" means "true"). + */ +int config_error_nonbool(const char *var) +{ + return error("Missing value for '%s'", var); +} diff --git a/Documentation/perf_counter/util/ctype.c b/Documentation/perf_counter/util/ctype.c new file mode 100644 index 00000000000..b90ec004f29 --- /dev/null +++ b/Documentation/perf_counter/util/ctype.c @@ -0,0 +1,26 @@ +/* + * Sane locale-independent, ASCII ctype. + * + * No surprises, and works with signed and unsigned chars. + */ +#include "cache.h" + +enum { + S = GIT_SPACE, + A = GIT_ALPHA, + D = GIT_DIGIT, + G = GIT_GLOB_SPECIAL, /* *, ?, [, \\ */ + R = GIT_REGEX_SPECIAL, /* $, (, ), +, ., ^, {, | * */ +}; + +unsigned char sane_ctype[256] = { + 0, 0, 0, 0, 0, 0, 0, 0, 0, S, S, 0, 0, S, 0, 0, /* 0.. 15 */ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 16.. 31 */ + S, 0, 0, 0, R, 0, 0, 0, R, R, G, R, 0, 0, R, 0, /* 32.. 47 */ + D, D, D, D, D, D, D, D, D, D, 0, 0, 0, 0, 0, G, /* 48.. 63 */ + 0, A, A, A, A, A, A, A, A, A, A, A, A, A, A, A, /* 64.. 79 */ + A, A, A, A, A, A, A, A, A, A, A, G, G, 0, R, 0, /* 80.. 95 */ + 0, A, A, A, A, A, A, A, A, A, A, A, A, A, A, A, /* 96..111 */ + A, A, A, A, A, A, A, A, A, A, A, R, R, 0, 0, 0, /* 112..127 */ + /* Nothing in the 128.. range */ +}; diff --git a/Documentation/perf_counter/util/exec_cmd.c b/Documentation/perf_counter/util/exec_cmd.c new file mode 100644 index 00000000000..d3929226315 --- /dev/null +++ b/Documentation/perf_counter/util/exec_cmd.c @@ -0,0 +1,165 @@ +#include "cache.h" +#include "exec_cmd.h" +#include "quote.h" +#define MAX_ARGS 32 + +extern char **environ; +static const char *argv_exec_path; +static const char *argv0_path; + +const char *system_path(const char *path) +{ +#ifdef RUNTIME_PREFIX + static const char *prefix; +#else + static const char *prefix = PREFIX; +#endif + struct strbuf d = STRBUF_INIT; + + if (is_absolute_path(path)) + return path; + +#ifdef RUNTIME_PREFIX + assert(argv0_path); + assert(is_absolute_path(argv0_path)); + + if (!prefix && + !(prefix = strip_path_suffix(argv0_path, PERF_EXEC_PATH)) && + !(prefix = strip_path_suffix(argv0_path, BINDIR)) && + !(prefix = strip_path_suffix(argv0_path, "perf"))) { + prefix = PREFIX; + fprintf(stderr, "RUNTIME_PREFIX requested, " + "but prefix computation failed. " + "Using static fallback '%s'.\n", prefix); + } +#endif + + strbuf_addf(&d, "%s/%s", prefix, path); + path = strbuf_detach(&d, NULL); + return path; +} + +const char *perf_extract_argv0_path(const char *argv0) +{ + const char *slash; + + if (!argv0 || !*argv0) + return NULL; + slash = argv0 + strlen(argv0); + + while (argv0 <= slash && !is_dir_sep(*slash)) + slash--; + + if (slash >= argv0) { + argv0_path = strndup(argv0, slash - argv0); + return slash + 1; + } + + return argv0; +} + +void perf_set_argv_exec_path(const char *exec_path) +{ + argv_exec_path = exec_path; + /* + * Propagate this setting to external programs. + */ + setenv(EXEC_PATH_ENVIRONMENT, exec_path, 1); +} + + +/* Returns the highest-priority, location to look for perf programs. */ +const char *perf_exec_path(void) +{ + const char *env; + + if (argv_exec_path) + return argv_exec_path; + + env = getenv(EXEC_PATH_ENVIRONMENT); + if (env && *env) { + return env; + } + + return system_path(PERF_EXEC_PATH); +} + +static void add_path(struct strbuf *out, const char *path) +{ + if (path && *path) { + if (is_absolute_path(path)) + strbuf_addstr(out, path); + else + strbuf_addstr(out, make_nonrelative_path(path)); + + strbuf_addch(out, PATH_SEP); + } +} + +void setup_path(void) +{ + const char *old_path = getenv("PATH"); + struct strbuf new_path = STRBUF_INIT; + + add_path(&new_path, perf_exec_path()); + add_path(&new_path, argv0_path); + + if (old_path) + strbuf_addstr(&new_path, old_path); + else + strbuf_addstr(&new_path, "/usr/local/bin:/usr/bin:/bin"); + + setenv("PATH", new_path.buf, 1); + + strbuf_release(&new_path); +} + +const char **prepare_perf_cmd(const char **argv) +{ + int argc; + const char **nargv; + + for (argc = 0; argv[argc]; argc++) + ; /* just counting */ + nargv = malloc(sizeof(*nargv) * (argc + 2)); + + nargv[0] = "perf"; + for (argc = 0; argv[argc]; argc++) + nargv[argc + 1] = argv[argc]; + nargv[argc + 1] = NULL; + return nargv; +} + +int execv_perf_cmd(const char **argv) { + const char **nargv = prepare_perf_cmd(argv); + + /* execvp() can only ever return if it fails */ + execvp("perf", (char **)nargv); + + free(nargv); + return -1; +} + + +int execl_perf_cmd(const char *cmd,...) +{ + int argc; + const char *argv[MAX_ARGS + 1]; + const char *arg; + va_list param; + + va_start(param, cmd); + argv[0] = cmd; + argc = 1; + while (argc < MAX_ARGS) { + arg = argv[argc++] = va_arg(param, char *); + if (!arg) + break; + } + va_end(param); + if (MAX_ARGS <= argc) + return error("too many args to run %s", cmd); + + argv[argc] = NULL; + return execv_perf_cmd(argv); +} diff --git a/Documentation/perf_counter/util/exec_cmd.h b/Documentation/perf_counter/util/exec_cmd.h new file mode 100644 index 00000000000..effe25eb154 --- /dev/null +++ b/Documentation/perf_counter/util/exec_cmd.h @@ -0,0 +1,13 @@ +#ifndef PERF_EXEC_CMD_H +#define PERF_EXEC_CMD_H + +extern void perf_set_argv_exec_path(const char *exec_path); +extern const char *perf_extract_argv0_path(const char *path); +extern const char *perf_exec_path(void); +extern void setup_path(void); +extern const char **prepare_perf_cmd(const char **argv); +extern int execv_perf_cmd(const char **argv); /* NULL terminated */ +extern int execl_perf_cmd(const char *cmd, ...); +extern const char *system_path(const char *path); + +#endif /* PERF_EXEC_CMD_H */ diff --git a/Documentation/perf_counter/util/generate-cmdlist.sh b/Documentation/perf_counter/util/generate-cmdlist.sh new file mode 100755 index 00000000000..f06f6fd148f --- /dev/null +++ b/Documentation/perf_counter/util/generate-cmdlist.sh @@ -0,0 +1,24 @@ +#!/bin/sh + +echo "/* Automatically generated by $0 */ +struct cmdname_help +{ + char name[16]; + char help[80]; +}; + +static struct cmdname_help common_cmds[] = {" + +sed -n -e 's/^perf-\([^ ]*\)[ ].* common.*/\1/p' command-list.txt | +sort | +while read cmd +do + sed -n ' + /^NAME/,/perf-'"$cmd"'/H + ${ + x + s/.*perf-'"$cmd"' - \(.*\)/ {"'"$cmd"'", "\1"},/ + p + }' "Documentation/perf-$cmd.txt" +done +echo "};" diff --git a/Documentation/perf_counter/util/help.c b/Documentation/perf_counter/util/help.c new file mode 100644 index 00000000000..edde541d238 --- /dev/null +++ b/Documentation/perf_counter/util/help.c @@ -0,0 +1,366 @@ +#include "cache.h" +#include "../builtin.h" +#include "exec_cmd.h" +#include "levenshtein.h" +#include "help.h" + +/* most GUI terminals set COLUMNS (although some don't export it) */ +static int term_columns(void) +{ + char *col_string = getenv("COLUMNS"); + int n_cols; + + if (col_string && (n_cols = atoi(col_string)) > 0) + return n_cols; + +#ifdef TIOCGWINSZ + { + struct winsize ws; + if (!ioctl(1, TIOCGWINSZ, &ws)) { + if (ws.ws_col) + return ws.ws_col; + } + } +#endif + + return 80; +} + +void add_cmdname(struct cmdnames *cmds, const char *name, int len) +{ + struct cmdname *ent = malloc(sizeof(*ent) + len + 1); + + ent->len = len; + memcpy(ent->name, name, len); + ent->name[len] = 0; + + ALLOC_GROW(cmds->names, cmds->cnt + 1, cmds->alloc); + cmds->names[cmds->cnt++] = ent; +} + +static void clean_cmdnames(struct cmdnames *cmds) +{ + int i; + for (i = 0; i < cmds->cnt; ++i) + free(cmds->names[i]); + free(cmds->names); + cmds->cnt = 0; + cmds->alloc = 0; +} + +static int cmdname_compare(const void *a_, const void *b_) +{ + struct cmdname *a = *(struct cmdname **)a_; + struct cmdname *b = *(struct cmdname **)b_; + return strcmp(a->name, b->name); +} + +static void uniq(struct cmdnames *cmds) +{ + int i, j; + + if (!cmds->cnt) + return; + + for (i = j = 1; i < cmds->cnt; i++) + if (strcmp(cmds->names[i]->name, cmds->names[i-1]->name)) + cmds->names[j++] = cmds->names[i]; + + cmds->cnt = j; +} + +void exclude_cmds(struct cmdnames *cmds, struct cmdnames *excludes) +{ + int ci, cj, ei; + int cmp; + + ci = cj = ei = 0; + while (ci < cmds->cnt && ei < excludes->cnt) { + cmp = strcmp(cmds->names[ci]->name, excludes->names[ei]->name); + if (cmp < 0) + cmds->names[cj++] = cmds->names[ci++]; + else if (cmp == 0) + ci++, ei++; + else if (cmp > 0) + ei++; + } + + while (ci < cmds->cnt) + cmds->names[cj++] = cmds->names[ci++]; + + cmds->cnt = cj; +} + +static void pretty_print_string_list(struct cmdnames *cmds, int longest) +{ + int cols = 1, rows; + int space = longest + 1; /* min 1 SP between words */ + int max_cols = term_columns() - 1; /* don't print *on* the edge */ + int i, j; + + if (space < max_cols) + cols = max_cols / space; + rows = (cmds->cnt + cols - 1) / cols; + + for (i = 0; i < rows; i++) { + printf(" "); + + for (j = 0; j < cols; j++) { + int n = j * rows + i; + int size = space; + if (n >= cmds->cnt) + break; + if (j == cols-1 || n + rows >= cmds->cnt) + size = 1; + printf("%-*s", size, cmds->names[n]->name); + } + putchar('\n'); + } +} + +static int is_executable(const char *name) +{ + struct stat st; + + if (stat(name, &st) || /* stat, not lstat */ + !S_ISREG(st.st_mode)) + return 0; + +#ifdef __MINGW32__ + /* cannot trust the executable bit, peek into the file instead */ + char buf[3] = { 0 }; + int n; + int fd = open(name, O_RDONLY); + st.st_mode &= ~S_IXUSR; + if (fd >= 0) { + n = read(fd, buf, 2); + if (n == 2) + /* DOS executables start with "MZ" */ + if (!strcmp(buf, "#!") || !strcmp(buf, "MZ")) + st.st_mode |= S_IXUSR; + close(fd); + } +#endif + return st.st_mode & S_IXUSR; +} + +static void list_commands_in_dir(struct cmdnames *cmds, + const char *path, + const char *prefix) +{ + int prefix_len; + DIR *dir = opendir(path); + struct dirent *de; + struct strbuf buf = STRBUF_INIT; + int len; + + if (!dir) + return; + if (!prefix) + prefix = "perf-"; + prefix_len = strlen(prefix); + + strbuf_addf(&buf, "%s/", path); + len = buf.len; + + while ((de = readdir(dir)) != NULL) { + int entlen; + + if (prefixcmp(de->d_name, prefix)) + continue; + + strbuf_setlen(&buf, len); + strbuf_addstr(&buf, de->d_name); + if (!is_executable(buf.buf)) + continue; + + entlen = strlen(de->d_name) - prefix_len; + if (has_extension(de->d_name, ".exe")) + entlen -= 4; + + add_cmdname(cmds, de->d_name + prefix_len, entlen); + } + closedir(dir); + strbuf_release(&buf); +} + +void load_command_list(const char *prefix, + struct cmdnames *main_cmds, + struct cmdnames *other_cmds) +{ + const char *env_path = getenv("PATH"); + const char *exec_path = perf_exec_path(); + + if (exec_path) { + list_commands_in_dir(main_cmds, exec_path, prefix); + qsort(main_cmds->names, main_cmds->cnt, + sizeof(*main_cmds->names), cmdname_compare); + uniq(main_cmds); + } + + if (env_path) { + char *paths, *path, *colon; + path = paths = strdup(env_path); + while (1) { + if ((colon = strchr(path, PATH_SEP))) + *colon = 0; + if (!exec_path || strcmp(path, exec_path)) + list_commands_in_dir(other_cmds, path, prefix); + + if (!colon) + break; + path = colon + 1; + } + free(paths); + + qsort(other_cmds->names, other_cmds->cnt, + sizeof(*other_cmds->names), cmdname_compare); + uniq(other_cmds); + } + exclude_cmds(other_cmds, main_cmds); +} + +void list_commands(const char *title, struct cmdnames *main_cmds, + struct cmdnames *other_cmds) +{ + int i, longest = 0; + + for (i = 0; i < main_cmds->cnt; i++) + if (longest < main_cmds->names[i]->len) + longest = main_cmds->names[i]->len; + for (i = 0; i < other_cmds->cnt; i++) + if (longest < other_cmds->names[i]->len) + longest = other_cmds->names[i]->len; + + if (main_cmds->cnt) { + const char *exec_path = perf_exec_path(); + printf("available %s in '%s'\n", title, exec_path); + printf("----------------"); + mput_char('-', strlen(title) + strlen(exec_path)); + putchar('\n'); + pretty_print_string_list(main_cmds, longest); + putchar('\n'); + } + + if (other_cmds->cnt) { + printf("%s available from elsewhere on your $PATH\n", title); + printf("---------------------------------------"); + mput_char('-', strlen(title)); + putchar('\n'); + pretty_print_string_list(other_cmds, longest); + putchar('\n'); + } +} + +int is_in_cmdlist(struct cmdnames *c, const char *s) +{ + int i; + for (i = 0; i < c->cnt; i++) + if (!strcmp(s, c->names[i]->name)) + return 1; + return 0; +} + +static int autocorrect; +static struct cmdnames aliases; + +static int perf_unknown_cmd_config(const char *var, const char *value, void *cb) +{ + if (!strcmp(var, "help.autocorrect")) + autocorrect = perf_config_int(var,value); + /* Also use aliases for command lookup */ + if (!prefixcmp(var, "alias.")) + add_cmdname(&aliases, var + 6, strlen(var + 6)); + + return perf_default_config(var, value, cb); +} + +static int levenshtein_compare(const void *p1, const void *p2) +{ + const struct cmdname *const *c1 = p1, *const *c2 = p2; + const char *s1 = (*c1)->name, *s2 = (*c2)->name; + int l1 = (*c1)->len; + int l2 = (*c2)->len; + return l1 != l2 ? l1 - l2 : strcmp(s1, s2); +} + +static void add_cmd_list(struct cmdnames *cmds, struct cmdnames *old) +{ + int i; + ALLOC_GROW(cmds->names, cmds->cnt + old->cnt, cmds->alloc); + + for (i = 0; i < old->cnt; i++) + cmds->names[cmds->cnt++] = old->names[i]; + free(old->names); + old->cnt = 0; + old->names = NULL; +} + +const char *help_unknown_cmd(const char *cmd) +{ + int i, n, best_similarity = 0; + struct cmdnames main_cmds, other_cmds; + + memset(&main_cmds, 0, sizeof(main_cmds)); + memset(&other_cmds, 0, sizeof(main_cmds)); + memset(&aliases, 0, sizeof(aliases)); + + perf_config(perf_unknown_cmd_config, NULL); + + load_command_list("perf-", &main_cmds, &other_cmds); + + add_cmd_list(&main_cmds, &aliases); + add_cmd_list(&main_cmds, &other_cmds); + qsort(main_cmds.names, main_cmds.cnt, + sizeof(main_cmds.names), cmdname_compare); + uniq(&main_cmds); + + /* This reuses cmdname->len for similarity index */ + for (i = 0; i < main_cmds.cnt; ++i) + main_cmds.names[i]->len = + levenshtein(cmd, main_cmds.names[i]->name, 0, 2, 1, 4); + + qsort(main_cmds.names, main_cmds.cnt, + sizeof(*main_cmds.names), levenshtein_compare); + + if (!main_cmds.cnt) + die ("Uh oh. Your system reports no Git commands at all."); + + best_similarity = main_cmds.names[0]->len; + n = 1; + while (n < main_cmds.cnt && best_similarity == main_cmds.names[n]->len) + ++n; + if (autocorrect && n == 1) { + const char *assumed = main_cmds.names[0]->name; + main_cmds.names[0] = NULL; + clean_cmdnames(&main_cmds); + fprintf(stderr, "WARNING: You called a Git program named '%s', " + "which does not exist.\n" + "Continuing under the assumption that you meant '%s'\n", + cmd, assumed); + if (autocorrect > 0) { + fprintf(stderr, "in %0.1f seconds automatically...\n", + (float)autocorrect/10.0); + poll(NULL, 0, autocorrect * 100); + } + return assumed; + } + + fprintf(stderr, "perf: '%s' is not a perf-command. See 'perf --help'.\n", cmd); + + if (best_similarity < 6) { + fprintf(stderr, "\nDid you mean %s?\n", + n < 2 ? "this": "one of these"); + + for (i = 0; i < n; i++) + fprintf(stderr, "\t%s\n", main_cmds.names[i]->name); + } + + exit(1); +} + +int cmd_version(int argc, const char **argv, const char *prefix) +{ + printf("perf version %s\n", perf_version_string); + return 0; +} diff --git a/Documentation/perf_counter/util/help.h b/Documentation/perf_counter/util/help.h new file mode 100644 index 00000000000..56bc15406ff --- /dev/null +++ b/Documentation/perf_counter/util/help.h @@ -0,0 +1,29 @@ +#ifndef HELP_H +#define HELP_H + +struct cmdnames { + int alloc; + int cnt; + struct cmdname { + size_t len; /* also used for similarity index in help.c */ + char name[FLEX_ARRAY]; + } **names; +}; + +static inline void mput_char(char c, unsigned int num) +{ + while(num--) + putchar(c); +} + +void load_command_list(const char *prefix, + struct cmdnames *main_cmds, + struct cmdnames *other_cmds); +void add_cmdname(struct cmdnames *cmds, const char *name, int len); +/* Here we require that excludes is a sorted list. */ +void exclude_cmds(struct cmdnames *cmds, struct cmdnames *excludes); +int is_in_cmdlist(struct cmdnames *c, const char *s); +void list_commands(const char *title, struct cmdnames *main_cmds, + struct cmdnames *other_cmds); + +#endif /* HELP_H */ diff --git a/Documentation/perf_counter/util/levenshtein.c b/Documentation/perf_counter/util/levenshtein.c new file mode 100644 index 00000000000..e521d1516df --- /dev/null +++ b/Documentation/perf_counter/util/levenshtein.c @@ -0,0 +1,84 @@ +#include "cache.h" +#include "levenshtein.h" + +/* + * This function implements the Damerau-Levenshtein algorithm to + * calculate a distance between strings. + * + * Basically, it says how many letters need to be swapped, substituted, + * deleted from, or added to string1, at least, to get string2. + * + * The idea is to build a distance matrix for the substrings of both + * strings. To avoid a large space complexity, only the last three rows + * are kept in memory (if swaps had the same or higher cost as one deletion + * plus one insertion, only two rows would be needed). + * + * At any stage, "i + 1" denotes the length of the current substring of + * string1 that the distance is calculated for. + * + * row2 holds the current row, row1 the previous row (i.e. for the substring + * of string1 of length "i"), and row0 the row before that. + * + * In other words, at the start of the big loop, row2[j + 1] contains the + * Damerau-Levenshtein distance between the substring of string1 of length + * "i" and the substring of string2 of length "j + 1". + * + * All the big loop does is determine the partial minimum-cost paths. + * + * It does so by calculating the costs of the path ending in characters + * i (in string1) and j (in string2), respectively, given that the last + * operation is a substition, a swap, a deletion, or an insertion. + * + * This implementation allows the costs to be weighted: + * + * - w (as in "sWap") + * - s (as in "Substitution") + * - a (for insertion, AKA "Add") + * - d (as in "Deletion") + * + * Note that this algorithm calculates a distance _iff_ d == a. + */ +int levenshtein(const char *string1, const char *string2, + int w, int s, int a, int d) +{ + int len1 = strlen(string1), len2 = strlen(string2); + int *row0 = malloc(sizeof(int) * (len2 + 1)); + int *row1 = malloc(sizeof(int) * (len2 + 1)); + int *row2 = malloc(sizeof(int) * (len2 + 1)); + int i, j; + + for (j = 0; j <= len2; j++) + row1[j] = j * a; + for (i = 0; i < len1; i++) { + int *dummy; + + row2[0] = (i + 1) * d; + for (j = 0; j < len2; j++) { + /* substitution */ + row2[j + 1] = row1[j] + s * (string1[i] != string2[j]); + /* swap */ + if (i > 0 && j > 0 && string1[i - 1] == string2[j] && + string1[i] == string2[j - 1] && + row2[j + 1] > row0[j - 1] + w) + row2[j + 1] = row0[j - 1] + w; + /* deletion */ + if (row2[j + 1] > row1[j + 1] + d) + row2[j + 1] = row1[j + 1] + d; + /* insertion */ + if (row2[j + 1] > row2[j] + a) + row2[j + 1] = row2[j] + a; + } + + dummy = row0; + row0 = row1; + row1 = row2; + row2 = dummy; + } + + i = row1[len2]; + free(row0); + free(row1); + free(row2); + + return i; +} diff --git a/Documentation/perf_counter/util/levenshtein.h b/Documentation/perf_counter/util/levenshtein.h new file mode 100644 index 00000000000..0173abeef52 --- /dev/null +++ b/Documentation/perf_counter/util/levenshtein.h @@ -0,0 +1,8 @@ +#ifndef LEVENSHTEIN_H +#define LEVENSHTEIN_H + +int levenshtein(const char *string1, const char *string2, + int swap_penalty, int substition_penalty, + int insertion_penalty, int deletion_penalty); + +#endif diff --git a/Documentation/perf_counter/util/parse-options.c b/Documentation/perf_counter/util/parse-options.c new file mode 100644 index 00000000000..28b34c1c29c --- /dev/null +++ b/Documentation/perf_counter/util/parse-options.c @@ -0,0 +1,492 @@ +#include "util.h" +#include "parse-options.h" +#include "cache.h" + +#define OPT_SHORT 1 +#define OPT_UNSET 2 + +static int opterror(const struct option *opt, const char *reason, int flags) +{ + if (flags & OPT_SHORT) + return error("switch `%c' %s", opt->short_name, reason); + if (flags & OPT_UNSET) + return error("option `no-%s' %s", opt->long_name, reason); + return error("option `%s' %s", opt->long_name, reason); +} + +static int get_arg(struct parse_opt_ctx_t *p, const struct option *opt, + int flags, const char **arg) +{ + if (p->opt) { + *arg = p->opt; + p->opt = NULL; + } else if (p->argc == 1 && (opt->flags & PARSE_OPT_LASTARG_DEFAULT)) { + *arg = (const char *)opt->defval; + } else if (p->argc > 1) { + p->argc--; + *arg = *++p->argv; + } else + return opterror(opt, "requires a value", flags); + return 0; +} + +static int get_value(struct parse_opt_ctx_t *p, + const struct option *opt, int flags) +{ + const char *s, *arg; + const int unset = flags & OPT_UNSET; + + if (unset && p->opt) + return opterror(opt, "takes no value", flags); + if (unset && (opt->flags & PARSE_OPT_NONEG)) + return opterror(opt, "isn't available", flags); + + if (!(flags & OPT_SHORT) && p->opt) { + switch (opt->type) { + case OPTION_CALLBACK: + if (!(opt->flags & PARSE_OPT_NOARG)) + break; + /* FALLTHROUGH */ + case OPTION_BOOLEAN: + case OPTION_BIT: + case OPTION_SET_INT: + case OPTION_SET_PTR: + return opterror(opt, "takes no value", flags); + default: + break; + } + } + + switch (opt->type) { + case OPTION_BIT: + if (unset) + *(int *)opt->value &= ~opt->defval; + else + *(int *)opt->value |= opt->defval; + return 0; + + case OPTION_BOOLEAN: + *(int *)opt->value = unset ? 0 : *(int *)opt->value + 1; + return 0; + + case OPTION_SET_INT: + *(int *)opt->value = unset ? 0 : opt->defval; + return 0; + + case OPTION_SET_PTR: + *(void **)opt->value = unset ? NULL : (void *)opt->defval; + return 0; + + case OPTION_STRING: + if (unset) + *(const char **)opt->value = NULL; + else if (opt->flags & PARSE_OPT_OPTARG && !p->opt) + *(const char **)opt->value = (const char *)opt->defval; + else + return get_arg(p, opt, flags, (const char **)opt->value); + return 0; + + case OPTION_CALLBACK: + if (unset) + return (*opt->callback)(opt, NULL, 1) ? (-1) : 0; + if (opt->flags & PARSE_OPT_NOARG) + return (*opt->callback)(opt, NULL, 0) ? (-1) : 0; + if (opt->flags & PARSE_OPT_OPTARG && !p->opt) + return (*opt->callback)(opt, NULL, 0) ? (-1) : 0; + if (get_arg(p, opt, flags, &arg)) + return -1; + return (*opt->callback)(opt, arg, 0) ? (-1) : 0; + + case OPTION_INTEGER: + if (unset) { + *(int *)opt->value = 0; + return 0; + } + if (opt->flags & PARSE_OPT_OPTARG && !p->opt) { + *(int *)opt->value = opt->defval; + return 0; + } + if (get_arg(p, opt, flags, &arg)) + return -1; + *(int *)opt->value = strtol(arg, (char **)&s, 10); + if (*s) + return opterror(opt, "expects a numerical value", flags); + return 0; + + default: + die("should not happen, someone must be hit on the forehead"); + } +} + +static int parse_short_opt(struct parse_opt_ctx_t *p, const struct option *options) +{ + for (; options->type != OPTION_END; options++) { + if (options->short_name == *p->opt) { + p->opt = p->opt[1] ? p->opt + 1 : NULL; + return get_value(p, options, OPT_SHORT); + } + } + return -2; +} + +static int parse_long_opt(struct parse_opt_ctx_t *p, const char *arg, + const struct option *options) +{ + const char *arg_end = strchr(arg, '='); + const struct option *abbrev_option = NULL, *ambiguous_option = NULL; + int abbrev_flags = 0, ambiguous_flags = 0; + + if (!arg_end) + arg_end = arg + strlen(arg); + + for (; options->type != OPTION_END; options++) { + const char *rest; + int flags = 0; + + if (!options->long_name) + continue; + + rest = skip_prefix(arg, options->long_name); + if (options->type == OPTION_ARGUMENT) { + if (!rest) + continue; + if (*rest == '=') + return opterror(options, "takes no value", flags); + if (*rest) + continue; + p->out[p->cpidx++] = arg - 2; + return 0; + } + if (!rest) { + /* abbreviated? */ + if (!strncmp(options->long_name, arg, arg_end - arg)) { +is_abbreviated: + if (abbrev_option) { + /* + * If this is abbreviated, it is + * ambiguous. So when there is no + * exact match later, we need to + * error out. + */ + ambiguous_option = abbrev_option; + ambiguous_flags = abbrev_flags; + } + if (!(flags & OPT_UNSET) && *arg_end) + p->opt = arg_end + 1; + abbrev_option = options; + abbrev_flags = flags; + continue; + } + /* negated and abbreviated very much? */ + if (!prefixcmp("no-", arg)) { + flags |= OPT_UNSET; + goto is_abbreviated; + } + /* negated? */ + if (strncmp(arg, "no-", 3)) + continue; + flags |= OPT_UNSET; + rest = skip_prefix(arg + 3, options->long_name); + /* abbreviated and negated? */ + if (!rest && !prefixcmp(options->long_name, arg + 3)) + goto is_abbreviated; + if (!rest) + continue; + } + if (*rest) { + if (*rest != '=') + continue; + p->opt = rest + 1; + } + return get_value(p, options, flags); + } + + if (ambiguous_option) + return error("Ambiguous option: %s " + "(could be --%s%s or --%s%s)", + arg, + (ambiguous_flags & OPT_UNSET) ? "no-" : "", + ambiguous_option->long_name, + (abbrev_flags & OPT_UNSET) ? "no-" : "", + abbrev_option->long_name); + if (abbrev_option) + return get_value(p, abbrev_option, abbrev_flags); + return -2; +} + +static void check_typos(const char *arg, const struct option *options) +{ + if (strlen(arg) < 3) + return; + + if (!prefixcmp(arg, "no-")) { + error ("did you mean `--%s` (with two dashes ?)", arg); + exit(129); + } + + for (; options->type != OPTION_END; options++) { + if (!options->long_name) + continue; + if (!prefixcmp(options->long_name, arg)) { + error ("did you mean `--%s` (with two dashes ?)", arg); + exit(129); + } + } +} + +void parse_options_start(struct parse_opt_ctx_t *ctx, + int argc, const char **argv, int flags) +{ + memset(ctx, 0, sizeof(*ctx)); + ctx->argc = argc - 1; + ctx->argv = argv + 1; + ctx->out = argv; + ctx->cpidx = ((flags & PARSE_OPT_KEEP_ARGV0) != 0); + ctx->flags = flags; + if ((flags & PARSE_OPT_KEEP_UNKNOWN) && + (flags & PARSE_OPT_STOP_AT_NON_OPTION)) + die("STOP_AT_NON_OPTION and KEEP_UNKNOWN don't go together"); +} + +static int usage_with_options_internal(const char * const *, + const struct option *, int); + +int parse_options_step(struct parse_opt_ctx_t *ctx, + const struct option *options, + const char * const usagestr[]) +{ + int internal_help = !(ctx->flags & PARSE_OPT_NO_INTERNAL_HELP); + + /* we must reset ->opt, unknown short option leave it dangling */ + ctx->opt = NULL; + + for (; ctx->argc; ctx->argc--, ctx->argv++) { + const char *arg = ctx->argv[0]; + + if (*arg != '-' || !arg[1]) { + if (ctx->flags & PARSE_OPT_STOP_AT_NON_OPTION) + break; + ctx->out[ctx->cpidx++] = ctx->argv[0]; + continue; + } + + if (arg[1] != '-') { + ctx->opt = arg + 1; + if (internal_help && *ctx->opt == 'h') + return parse_options_usage(usagestr, options); + switch (parse_short_opt(ctx, options)) { + case -1: + return parse_options_usage(usagestr, options); + case -2: + goto unknown; + } + if (ctx->opt) + check_typos(arg + 1, options); + while (ctx->opt) { + if (internal_help && *ctx->opt == 'h') + return parse_options_usage(usagestr, options); + switch (parse_short_opt(ctx, options)) { + case -1: + return parse_options_usage(usagestr, options); + case -2: + /* fake a short option thing to hide the fact that we may have + * started to parse aggregated stuff + * + * This is leaky, too bad. + */ + ctx->argv[0] = strdup(ctx->opt - 1); + *(char *)ctx->argv[0] = '-'; + goto unknown; + } + } + continue; + } + + if (!arg[2]) { /* "--" */ + if (!(ctx->flags & PARSE_OPT_KEEP_DASHDASH)) { + ctx->argc--; + ctx->argv++; + } + break; + } + + if (internal_help && !strcmp(arg + 2, "help-all")) + return usage_with_options_internal(usagestr, options, 1); + if (internal_help && !strcmp(arg + 2, "help")) + return parse_options_usage(usagestr, options); + switch (parse_long_opt(ctx, arg + 2, options)) { + case -1: + return parse_options_usage(usagestr, options); + case -2: + goto unknown; + } + continue; +unknown: + if (!(ctx->flags & PARSE_OPT_KEEP_UNKNOWN)) + return PARSE_OPT_UNKNOWN; + ctx->out[ctx->cpidx++] = ctx->argv[0]; + ctx->opt = NULL; + } + return PARSE_OPT_DONE; +} + +int parse_options_end(struct parse_opt_ctx_t *ctx) +{ + memmove(ctx->out + ctx->cpidx, ctx->argv, ctx->argc * sizeof(*ctx->out)); + ctx->out[ctx->cpidx + ctx->argc] = NULL; + return ctx->cpidx + ctx->argc; +} + +int parse_options(int argc, const char **argv, const struct option *options, + const char * const usagestr[], int flags) +{ + struct parse_opt_ctx_t ctx; + + parse_options_start(&ctx, argc, argv, flags); + switch (parse_options_step(&ctx, options, usagestr)) { + case PARSE_OPT_HELP: + exit(129); + case PARSE_OPT_DONE: + break; + default: /* PARSE_OPT_UNKNOWN */ + if (ctx.argv[0][1] == '-') { + error("unknown option `%s'", ctx.argv[0] + 2); + } else { + error("unknown switch `%c'", *ctx.opt); + } + usage_with_options(usagestr, options); + } + + return parse_options_end(&ctx); +} + +#define USAGE_OPTS_WIDTH 24 +#define USAGE_GAP 2 + +int usage_with_options_internal(const char * const *usagestr, + const struct option *opts, int full) +{ + if (!usagestr) + return PARSE_OPT_HELP; + + fprintf(stderr, "usage: %s\n", *usagestr++); + while (*usagestr && **usagestr) + fprintf(stderr, " or: %s\n", *usagestr++); + while (*usagestr) { + fprintf(stderr, "%s%s\n", + **usagestr ? " " : "", + *usagestr); + usagestr++; + } + + if (opts->type != OPTION_GROUP) + fputc('\n', stderr); + + for (; opts->type != OPTION_END; opts++) { + size_t pos; + int pad; + + if (opts->type == OPTION_GROUP) { + fputc('\n', stderr); + if (*opts->help) + fprintf(stderr, "%s\n", opts->help); + continue; + } + if (!full && (opts->flags & PARSE_OPT_HIDDEN)) + continue; + + pos = fprintf(stderr, " "); + if (opts->short_name) + pos += fprintf(stderr, "-%c", opts->short_name); + if (opts->long_name && opts->short_name) + pos += fprintf(stderr, ", "); + if (opts->long_name) + pos += fprintf(stderr, "--%s", opts->long_name); + + switch (opts->type) { + case OPTION_ARGUMENT: + break; + case OPTION_INTEGER: + if (opts->flags & PARSE_OPT_OPTARG) + if (opts->long_name) + pos += fprintf(stderr, "[=]"); + else + pos += fprintf(stderr, "[]"); + else + pos += fprintf(stderr, " "); + break; + case OPTION_CALLBACK: + if (opts->flags & PARSE_OPT_NOARG) + break; + /* FALLTHROUGH */ + case OPTION_STRING: + if (opts->argh) { + if (opts->flags & PARSE_OPT_OPTARG) + if (opts->long_name) + pos += fprintf(stderr, "[=<%s>]", opts->argh); + else + pos += fprintf(stderr, "[<%s>]", opts->argh); + else + pos += fprintf(stderr, " <%s>", opts->argh); + } else { + if (opts->flags & PARSE_OPT_OPTARG) + if (opts->long_name) + pos += fprintf(stderr, "[=...]"); + else + pos += fprintf(stderr, "[...]"); + else + pos += fprintf(stderr, " ..."); + } + break; + default: /* OPTION_{BIT,BOOLEAN,SET_INT,SET_PTR} */ + break; + } + + if (pos <= USAGE_OPTS_WIDTH) + pad = USAGE_OPTS_WIDTH - pos; + else { + fputc('\n', stderr); + pad = USAGE_OPTS_WIDTH; + } + fprintf(stderr, "%*s%s\n", pad + USAGE_GAP, "", opts->help); + } + fputc('\n', stderr); + + return PARSE_OPT_HELP; +} + +void usage_with_options(const char * const *usagestr, + const struct option *opts) +{ + usage_with_options_internal(usagestr, opts, 0); + exit(129); +} + +int parse_options_usage(const char * const *usagestr, + const struct option *opts) +{ + return usage_with_options_internal(usagestr, opts, 0); +} + + +int parse_opt_verbosity_cb(const struct option *opt, const char *arg, + int unset) +{ + int *target = opt->value; + + if (unset) + /* --no-quiet, --no-verbose */ + *target = 0; + else if (opt->short_name == 'v') { + if (*target >= 0) + (*target)++; + else + *target = 1; + } else { + if (*target <= 0) + (*target)--; + else + *target = -1; + } + return 0; +} diff --git a/Documentation/perf_counter/util/parse-options.h b/Documentation/perf_counter/util/parse-options.h new file mode 100644 index 00000000000..a81c7faff68 --- /dev/null +++ b/Documentation/perf_counter/util/parse-options.h @@ -0,0 +1,172 @@ +#ifndef PARSE_OPTIONS_H +#define PARSE_OPTIONS_H + +enum parse_opt_type { + /* special types */ + OPTION_END, + OPTION_ARGUMENT, + OPTION_GROUP, + /* options with no arguments */ + OPTION_BIT, + OPTION_BOOLEAN, /* _INCR would have been a better name */ + OPTION_SET_INT, + OPTION_SET_PTR, + /* options with arguments (usually) */ + OPTION_STRING, + OPTION_INTEGER, + OPTION_CALLBACK, +}; + +enum parse_opt_flags { + PARSE_OPT_KEEP_DASHDASH = 1, + PARSE_OPT_STOP_AT_NON_OPTION = 2, + PARSE_OPT_KEEP_ARGV0 = 4, + PARSE_OPT_KEEP_UNKNOWN = 8, + PARSE_OPT_NO_INTERNAL_HELP = 16, +}; + +enum parse_opt_option_flags { + PARSE_OPT_OPTARG = 1, + PARSE_OPT_NOARG = 2, + PARSE_OPT_NONEG = 4, + PARSE_OPT_HIDDEN = 8, + PARSE_OPT_LASTARG_DEFAULT = 16, +}; + +struct option; +typedef int parse_opt_cb(const struct option *, const char *arg, int unset); + +/* + * `type`:: + * holds the type of the option, you must have an OPTION_END last in your + * array. + * + * `short_name`:: + * the character to use as a short option name, '\0' if none. + * + * `long_name`:: + * the long option name, without the leading dashes, NULL if none. + * + * `value`:: + * stores pointers to the values to be filled. + * + * `argh`:: + * token to explain the kind of argument this option wants. Keep it + * homogenous across the repository. + * + * `help`:: + * the short help associated to what the option does. + * Must never be NULL (except for OPTION_END). + * OPTION_GROUP uses this pointer to store the group header. + * + * `flags`:: + * mask of parse_opt_option_flags. + * PARSE_OPT_OPTARG: says that the argument is optionnal (not for BOOLEANs) + * PARSE_OPT_NOARG: says that this option takes no argument, for CALLBACKs + * PARSE_OPT_NONEG: says that this option cannot be negated + * PARSE_OPT_HIDDEN this option is skipped in the default usage, showed in + * the long one. + * + * `callback`:: + * pointer to the callback to use for OPTION_CALLBACK. + * + * `defval`:: + * default value to fill (*->value) with for PARSE_OPT_OPTARG. + * OPTION_{BIT,SET_INT,SET_PTR} store the {mask,integer,pointer} to put in + * the value when met. + * CALLBACKS can use it like they want. + */ +struct option { + enum parse_opt_type type; + int short_name; + const char *long_name; + void *value; + const char *argh; + const char *help; + + int flags; + parse_opt_cb *callback; + intptr_t defval; +}; + +#define OPT_END() { OPTION_END } +#define OPT_ARGUMENT(l, h) { OPTION_ARGUMENT, 0, (l), NULL, NULL, (h) } +#define OPT_GROUP(h) { OPTION_GROUP, 0, NULL, NULL, NULL, (h) } +#define OPT_BIT(s, l, v, h, b) { OPTION_BIT, (s), (l), (v), NULL, (h), 0, NULL, (b) } +#define OPT_BOOLEAN(s, l, v, h) { OPTION_BOOLEAN, (s), (l), (v), NULL, (h) } +#define OPT_SET_INT(s, l, v, h, i) { OPTION_SET_INT, (s), (l), (v), NULL, (h), 0, NULL, (i) } +#define OPT_SET_PTR(s, l, v, h, p) { OPTION_SET_PTR, (s), (l), (v), NULL, (h), 0, NULL, (p) } +#define OPT_INTEGER(s, l, v, h) { OPTION_INTEGER, (s), (l), (v), NULL, (h) } +#define OPT_STRING(s, l, v, a, h) { OPTION_STRING, (s), (l), (v), (a), (h) } +#define OPT_DATE(s, l, v, h) \ + { OPTION_CALLBACK, (s), (l), (v), "time",(h), 0, \ + parse_opt_approxidate_cb } +#define OPT_CALLBACK(s, l, v, a, h, f) \ + { OPTION_CALLBACK, (s), (l), (v), (a), (h), 0, (f) } + +/* parse_options() will filter out the processed options and leave the + * non-option argments in argv[]. + * Returns the number of arguments left in argv[]. + */ +extern int parse_options(int argc, const char **argv, + const struct option *options, + const char * const usagestr[], int flags); + +extern NORETURN void usage_with_options(const char * const *usagestr, + const struct option *options); + +/*----- incremantal advanced APIs -----*/ + +enum { + PARSE_OPT_HELP = -1, + PARSE_OPT_DONE, + PARSE_OPT_UNKNOWN, +}; + +/* + * It's okay for the caller to consume argv/argc in the usual way. + * Other fields of that structure are private to parse-options and should not + * be modified in any way. + */ +struct parse_opt_ctx_t { + const char **argv; + const char **out; + int argc, cpidx; + const char *opt; + int flags; +}; + +extern int parse_options_usage(const char * const *usagestr, + const struct option *opts); + +extern void parse_options_start(struct parse_opt_ctx_t *ctx, + int argc, const char **argv, int flags); + +extern int parse_options_step(struct parse_opt_ctx_t *ctx, + const struct option *options, + const char * const usagestr[]); + +extern int parse_options_end(struct parse_opt_ctx_t *ctx); + + +/*----- some often used options -----*/ +extern int parse_opt_abbrev_cb(const struct option *, const char *, int); +extern int parse_opt_approxidate_cb(const struct option *, const char *, int); +extern int parse_opt_verbosity_cb(const struct option *, const char *, int); + +#define OPT__VERBOSE(var) OPT_BOOLEAN('v', "verbose", (var), "be verbose") +#define OPT__QUIET(var) OPT_BOOLEAN('q', "quiet", (var), "be quiet") +#define OPT__VERBOSITY(var) \ + { OPTION_CALLBACK, 'v', "verbose", (var), NULL, "be more verbose", \ + PARSE_OPT_NOARG, &parse_opt_verbosity_cb, 0 }, \ + { OPTION_CALLBACK, 'q', "quiet", (var), NULL, "be more quiet", \ + PARSE_OPT_NOARG, &parse_opt_verbosity_cb, 0 } +#define OPT__DRY_RUN(var) OPT_BOOLEAN('n', "dry-run", (var), "dry run") +#define OPT__ABBREV(var) \ + { OPTION_CALLBACK, 0, "abbrev", (var), "n", \ + "use digits to display SHA-1s", \ + PARSE_OPT_OPTARG, &parse_opt_abbrev_cb, 0 } + +extern const char *parse_options_fix_filename(const char *prefix, const char *file); + +#endif diff --git a/Documentation/perf_counter/util/path.c b/Documentation/perf_counter/util/path.c new file mode 100644 index 00000000000..a501a40dd2c --- /dev/null +++ b/Documentation/perf_counter/util/path.c @@ -0,0 +1,353 @@ +/* + * I'm tired of doing "vsnprintf()" etc just to open a + * file, so here's a "return static buffer with printf" + * interface for paths. + * + * It's obviously not thread-safe. Sue me. But it's quite + * useful for doing things like + * + * f = open(mkpath("%s/%s.perf", base, name), O_RDONLY); + * + * which is what it's designed for. + */ +#include "cache.h" + +static char bad_path[] = "/bad-path/"; +/* + * Two hacks: + */ + +static char *get_perf_dir(void) +{ + return "."; +} + +size_t strlcpy(char *dest, const char *src, size_t size) +{ + size_t ret = strlen(src); + + if (size) { + size_t len = (ret >= size) ? size - 1 : ret; + memcpy(dest, src, len); + dest[len] = '\0'; + } + return ret; +} + + +static char *get_pathname(void) +{ + static char pathname_array[4][PATH_MAX]; + static int index; + return pathname_array[3 & ++index]; +} + +static char *cleanup_path(char *path) +{ + /* Clean it up */ + if (!memcmp(path, "./", 2)) { + path += 2; + while (*path == '/') + path++; + } + return path; +} + +char *mksnpath(char *buf, size_t n, const char *fmt, ...) +{ + va_list args; + unsigned len; + + va_start(args, fmt); + len = vsnprintf(buf, n, fmt, args); + va_end(args); + if (len >= n) { + strlcpy(buf, bad_path, n); + return buf; + } + return cleanup_path(buf); +} + +static char *perf_vsnpath(char *buf, size_t n, const char *fmt, va_list args) +{ + const char *perf_dir = get_perf_dir(); + size_t len; + + len = strlen(perf_dir); + if (n < len + 1) + goto bad; + memcpy(buf, perf_dir, len); + if (len && !is_dir_sep(perf_dir[len-1])) + buf[len++] = '/'; + len += vsnprintf(buf + len, n - len, fmt, args); + if (len >= n) + goto bad; + return cleanup_path(buf); +bad: + strlcpy(buf, bad_path, n); + return buf; +} + +char *perf_snpath(char *buf, size_t n, const char *fmt, ...) +{ + va_list args; + va_start(args, fmt); + (void)perf_vsnpath(buf, n, fmt, args); + va_end(args); + return buf; +} + +char *perf_pathdup(const char *fmt, ...) +{ + char path[PATH_MAX]; + va_list args; + va_start(args, fmt); + (void)perf_vsnpath(path, sizeof(path), fmt, args); + va_end(args); + return xstrdup(path); +} + +char *mkpath(const char *fmt, ...) +{ + va_list args; + unsigned len; + char *pathname = get_pathname(); + + va_start(args, fmt); + len = vsnprintf(pathname, PATH_MAX, fmt, args); + va_end(args); + if (len >= PATH_MAX) + return bad_path; + return cleanup_path(pathname); +} + +char *perf_path(const char *fmt, ...) +{ + const char *perf_dir = get_perf_dir(); + char *pathname = get_pathname(); + va_list args; + unsigned len; + + len = strlen(perf_dir); + if (len > PATH_MAX-100) + return bad_path; + memcpy(pathname, perf_dir, len); + if (len && perf_dir[len-1] != '/') + pathname[len++] = '/'; + va_start(args, fmt); + len += vsnprintf(pathname + len, PATH_MAX - len, fmt, args); + va_end(args); + if (len >= PATH_MAX) + return bad_path; + return cleanup_path(pathname); +} + + +/* perf_mkstemp() - create tmp file honoring TMPDIR variable */ +int perf_mkstemp(char *path, size_t len, const char *template) +{ + const char *tmp; + size_t n; + + tmp = getenv("TMPDIR"); + if (!tmp) + tmp = "/tmp"; + n = snprintf(path, len, "%s/%s", tmp, template); + if (len <= n) { + errno = ENAMETOOLONG; + return -1; + } + return mkstemp(path); +} + + +const char *make_relative_path(const char *abs, const char *base) +{ + static char buf[PATH_MAX + 1]; + int baselen; + if (!base) + return abs; + baselen = strlen(base); + if (prefixcmp(abs, base)) + return abs; + if (abs[baselen] == '/') + baselen++; + else if (base[baselen - 1] != '/') + return abs; + strcpy(buf, abs + baselen); + return buf; +} + +/* + * It is okay if dst == src, but they should not overlap otherwise. + * + * Performs the following normalizations on src, storing the result in dst: + * - Ensures that components are separated by '/' (Windows only) + * - Squashes sequences of '/'. + * - Removes "." components. + * - Removes ".." components, and the components the precede them. + * Returns failure (non-zero) if a ".." component appears as first path + * component anytime during the normalization. Otherwise, returns success (0). + * + * Note that this function is purely textual. It does not follow symlinks, + * verify the existence of the path, or make any system calls. + */ +int normalize_path_copy(char *dst, const char *src) +{ + char *dst0; + + if (has_dos_drive_prefix(src)) { + *dst++ = *src++; + *dst++ = *src++; + } + dst0 = dst; + + if (is_dir_sep(*src)) { + *dst++ = '/'; + while (is_dir_sep(*src)) + src++; + } + + for (;;) { + char c = *src; + + /* + * A path component that begins with . could be + * special: + * (1) "." and ends -- ignore and terminate. + * (2) "./" -- ignore them, eat slash and continue. + * (3) ".." and ends -- strip one and terminate. + * (4) "../" -- strip one, eat slash and continue. + */ + if (c == '.') { + if (!src[1]) { + /* (1) */ + src++; + } else if (is_dir_sep(src[1])) { + /* (2) */ + src += 2; + while (is_dir_sep(*src)) + src++; + continue; + } else if (src[1] == '.') { + if (!src[2]) { + /* (3) */ + src += 2; + goto up_one; + } else if (is_dir_sep(src[2])) { + /* (4) */ + src += 3; + while (is_dir_sep(*src)) + src++; + goto up_one; + } + } + } + + /* copy up to the next '/', and eat all '/' */ + while ((c = *src++) != '\0' && !is_dir_sep(c)) + *dst++ = c; + if (is_dir_sep(c)) { + *dst++ = '/'; + while (is_dir_sep(c)) + c = *src++; + src--; + } else if (!c) + break; + continue; + + up_one: + /* + * dst0..dst is prefix portion, and dst[-1] is '/'; + * go up one level. + */ + dst--; /* go to trailing '/' */ + if (dst <= dst0) + return -1; + /* Windows: dst[-1] cannot be backslash anymore */ + while (dst0 < dst && dst[-1] != '/') + dst--; + } + *dst = '\0'; + return 0; +} + +/* + * path = Canonical absolute path + * prefix_list = Colon-separated list of absolute paths + * + * Determines, for each path in prefix_list, whether the "prefix" really + * is an ancestor directory of path. Returns the length of the longest + * ancestor directory, excluding any trailing slashes, or -1 if no prefix + * is an ancestor. (Note that this means 0 is returned if prefix_list is + * "/".) "/foo" is not considered an ancestor of "/foobar". Directories + * are not considered to be their own ancestors. path must be in a + * canonical form: empty components, or "." or ".." components are not + * allowed. prefix_list may be null, which is like "". + */ +int longest_ancestor_length(const char *path, const char *prefix_list) +{ + char buf[PATH_MAX+1]; + const char *ceil, *colon; + int len, max_len = -1; + + if (prefix_list == NULL || !strcmp(path, "/")) + return -1; + + for (colon = ceil = prefix_list; *colon; ceil = colon+1) { + for (colon = ceil; *colon && *colon != PATH_SEP; colon++); + len = colon - ceil; + if (len == 0 || len > PATH_MAX || !is_absolute_path(ceil)) + continue; + strlcpy(buf, ceil, len+1); + if (normalize_path_copy(buf, buf) < 0) + continue; + len = strlen(buf); + if (len > 0 && buf[len-1] == '/') + buf[--len] = '\0'; + + if (!strncmp(path, buf, len) && + path[len] == '/' && + len > max_len) { + max_len = len; + } + } + + return max_len; +} + +/* strip arbitrary amount of directory separators at end of path */ +static inline int chomp_trailing_dir_sep(const char *path, int len) +{ + while (len && is_dir_sep(path[len - 1])) + len--; + return len; +} + +/* + * If path ends with suffix (complete path components), returns the + * part before suffix (sans trailing directory separators). + * Otherwise returns NULL. + */ +char *strip_path_suffix(const char *path, const char *suffix) +{ + int path_len = strlen(path), suffix_len = strlen(suffix); + + while (suffix_len) { + if (!path_len) + return NULL; + + if (is_dir_sep(path[path_len - 1])) { + if (!is_dir_sep(suffix[suffix_len - 1])) + return NULL; + path_len = chomp_trailing_dir_sep(path, path_len); + suffix_len = chomp_trailing_dir_sep(suffix, suffix_len); + } + else if (path[--path_len] != suffix[--suffix_len]) + return NULL; + } + + if (path_len && !is_dir_sep(path[path_len - 1])) + return NULL; + return xstrndup(path, chomp_trailing_dir_sep(path, path_len)); +} diff --git a/Documentation/perf_counter/util/quote.c b/Documentation/perf_counter/util/quote.c new file mode 100644 index 00000000000..7a49fcf6967 --- /dev/null +++ b/Documentation/perf_counter/util/quote.c @@ -0,0 +1,478 @@ +#include "cache.h" +#include "quote.h" + +int quote_path_fully = 1; + +/* Help to copy the thing properly quoted for the shell safety. + * any single quote is replaced with '\'', any exclamation point + * is replaced with '\!', and the whole thing is enclosed in a + * + * E.g. + * original sq_quote result + * name ==> name ==> 'name' + * a b ==> a b ==> 'a b' + * a'b ==> a'\''b ==> 'a'\''b' + * a!b ==> a'\!'b ==> 'a'\!'b' + */ +static inline int need_bs_quote(char c) +{ + return (c == '\'' || c == '!'); +} + +void sq_quote_buf(struct strbuf *dst, const char *src) +{ + char *to_free = NULL; + + if (dst->buf == src) + to_free = strbuf_detach(dst, NULL); + + strbuf_addch(dst, '\''); + while (*src) { + size_t len = strcspn(src, "'!"); + strbuf_add(dst, src, len); + src += len; + while (need_bs_quote(*src)) { + strbuf_addstr(dst, "'\\"); + strbuf_addch(dst, *src++); + strbuf_addch(dst, '\''); + } + } + strbuf_addch(dst, '\''); + free(to_free); +} + +void sq_quote_print(FILE *stream, const char *src) +{ + char c; + + fputc('\'', stream); + while ((c = *src++)) { + if (need_bs_quote(c)) { + fputs("'\\", stream); + fputc(c, stream); + fputc('\'', stream); + } else { + fputc(c, stream); + } + } + fputc('\'', stream); +} + +void sq_quote_argv(struct strbuf *dst, const char** argv, size_t maxlen) +{ + int i; + + /* Copy into destination buffer. */ + strbuf_grow(dst, 255); + for (i = 0; argv[i]; ++i) { + strbuf_addch(dst, ' '); + sq_quote_buf(dst, argv[i]); + if (maxlen && dst->len > maxlen) + die("Too many or long arguments"); + } +} + +char *sq_dequote_step(char *arg, char **next) +{ + char *dst = arg; + char *src = arg; + char c; + + if (*src != '\'') + return NULL; + for (;;) { + c = *++src; + if (!c) + return NULL; + if (c != '\'') { + *dst++ = c; + continue; + } + /* We stepped out of sq */ + switch (*++src) { + case '\0': + *dst = 0; + if (next) + *next = NULL; + return arg; + case '\\': + c = *++src; + if (need_bs_quote(c) && *++src == '\'') { + *dst++ = c; + continue; + } + /* Fallthrough */ + default: + if (!next || !isspace(*src)) + return NULL; + do { + c = *++src; + } while (isspace(c)); + *dst = 0; + *next = src; + return arg; + } + } +} + +char *sq_dequote(char *arg) +{ + return sq_dequote_step(arg, NULL); +} + +int sq_dequote_to_argv(char *arg, const char ***argv, int *nr, int *alloc) +{ + char *next = arg; + + if (!*arg) + return 0; + do { + char *dequoted = sq_dequote_step(next, &next); + if (!dequoted) + return -1; + ALLOC_GROW(*argv, *nr + 1, *alloc); + (*argv)[(*nr)++] = dequoted; + } while (next); + + return 0; +} + +/* 1 means: quote as octal + * 0 means: quote as octal if (quote_path_fully) + * -1 means: never quote + * c: quote as "\\c" + */ +#define X8(x) x, x, x, x, x, x, x, x +#define X16(x) X8(x), X8(x) +static signed char const sq_lookup[256] = { + /* 0 1 2 3 4 5 6 7 */ + /* 0x00 */ 1, 1, 1, 1, 1, 1, 1, 'a', + /* 0x08 */ 'b', 't', 'n', 'v', 'f', 'r', 1, 1, + /* 0x10 */ X16(1), + /* 0x20 */ -1, -1, '"', -1, -1, -1, -1, -1, + /* 0x28 */ X16(-1), X16(-1), X16(-1), + /* 0x58 */ -1, -1, -1, -1,'\\', -1, -1, -1, + /* 0x60 */ X16(-1), X8(-1), + /* 0x78 */ -1, -1, -1, -1, -1, -1, -1, 1, + /* 0x80 */ /* set to 0 */ +}; + +static inline int sq_must_quote(char c) +{ + return sq_lookup[(unsigned char)c] + quote_path_fully > 0; +} + +/* returns the longest prefix not needing a quote up to maxlen if positive. + This stops at the first \0 because it's marked as a character needing an + escape */ +static size_t next_quote_pos(const char *s, ssize_t maxlen) +{ + size_t len; + if (maxlen < 0) { + for (len = 0; !sq_must_quote(s[len]); len++); + } else { + for (len = 0; len < maxlen && !sq_must_quote(s[len]); len++); + } + return len; +} + +/* + * C-style name quoting. + * + * (1) if sb and fp are both NULL, inspect the input name and counts the + * number of bytes that are needed to hold c_style quoted version of name, + * counting the double quotes around it but not terminating NUL, and + * returns it. + * However, if name does not need c_style quoting, it returns 0. + * + * (2) if sb or fp are not NULL, it emits the c_style quoted version + * of name, enclosed with double quotes if asked and needed only. + * Return value is the same as in (1). + */ +static size_t quote_c_style_counted(const char *name, ssize_t maxlen, + struct strbuf *sb, FILE *fp, int no_dq) +{ +#undef EMIT +#define EMIT(c) \ + do { \ + if (sb) strbuf_addch(sb, (c)); \ + if (fp) fputc((c), fp); \ + count++; \ + } while (0) +#define EMITBUF(s, l) \ + do { \ + if (sb) strbuf_add(sb, (s), (l)); \ + if (fp) fwrite((s), (l), 1, fp); \ + count += (l); \ + } while (0) + + size_t len, count = 0; + const char *p = name; + + for (;;) { + int ch; + + len = next_quote_pos(p, maxlen); + if (len == maxlen || !p[len]) + break; + + if (!no_dq && p == name) + EMIT('"'); + + EMITBUF(p, len); + EMIT('\\'); + p += len; + ch = (unsigned char)*p++; + if (sq_lookup[ch] >= ' ') { + EMIT(sq_lookup[ch]); + } else { + EMIT(((ch >> 6) & 03) + '0'); + EMIT(((ch >> 3) & 07) + '0'); + EMIT(((ch >> 0) & 07) + '0'); + } + } + + EMITBUF(p, len); + if (p == name) /* no ending quote needed */ + return 0; + + if (!no_dq) + EMIT('"'); + return count; +} + +size_t quote_c_style(const char *name, struct strbuf *sb, FILE *fp, int nodq) +{ + return quote_c_style_counted(name, -1, sb, fp, nodq); +} + +void quote_two_c_style(struct strbuf *sb, const char *prefix, const char *path, int nodq) +{ + if (quote_c_style(prefix, NULL, NULL, 0) || + quote_c_style(path, NULL, NULL, 0)) { + if (!nodq) + strbuf_addch(sb, '"'); + quote_c_style(prefix, sb, NULL, 1); + quote_c_style(path, sb, NULL, 1); + if (!nodq) + strbuf_addch(sb, '"'); + } else { + strbuf_addstr(sb, prefix); + strbuf_addstr(sb, path); + } +} + +void write_name_quoted(const char *name, FILE *fp, int terminator) +{ + if (terminator) { + quote_c_style(name, NULL, fp, 0); + } else { + fputs(name, fp); + } + fputc(terminator, fp); +} + +extern void write_name_quotedpfx(const char *pfx, size_t pfxlen, + const char *name, FILE *fp, int terminator) +{ + int needquote = 0; + + if (terminator) { + needquote = next_quote_pos(pfx, pfxlen) < pfxlen + || name[next_quote_pos(name, -1)]; + } + if (needquote) { + fputc('"', fp); + quote_c_style_counted(pfx, pfxlen, NULL, fp, 1); + quote_c_style(name, NULL, fp, 1); + fputc('"', fp); + } else { + fwrite(pfx, pfxlen, 1, fp); + fputs(name, fp); + } + fputc(terminator, fp); +} + +/* quote path as relative to the given prefix */ +char *quote_path_relative(const char *in, int len, + struct strbuf *out, const char *prefix) +{ + int needquote; + + if (len < 0) + len = strlen(in); + + /* "../" prefix itself does not need quoting, but "in" might. */ + needquote = next_quote_pos(in, len) < len; + strbuf_setlen(out, 0); + strbuf_grow(out, len); + + if (needquote) + strbuf_addch(out, '"'); + if (prefix) { + int off = 0; + while (prefix[off] && off < len && prefix[off] == in[off]) + if (prefix[off] == '/') { + prefix += off + 1; + in += off + 1; + len -= off + 1; + off = 0; + } else + off++; + + for (; *prefix; prefix++) + if (*prefix == '/') + strbuf_addstr(out, "../"); + } + + quote_c_style_counted (in, len, out, NULL, 1); + + if (needquote) + strbuf_addch(out, '"'); + if (!out->len) + strbuf_addstr(out, "./"); + + return out->buf; +} + +/* + * C-style name unquoting. + * + * Quoted should point at the opening double quote. + * + Returns 0 if it was able to unquote the string properly, and appends the + * result in the strbuf `sb'. + * + Returns -1 in case of error, and doesn't touch the strbuf. Though note + * that this function will allocate memory in the strbuf, so calling + * strbuf_release is mandatory whichever result unquote_c_style returns. + * + * Updates endp pointer to point at one past the ending double quote if given. + */ +int unquote_c_style(struct strbuf *sb, const char *quoted, const char **endp) +{ + size_t oldlen = sb->len, len; + int ch, ac; + + if (*quoted++ != '"') + return -1; + + for (;;) { + len = strcspn(quoted, "\"\\"); + strbuf_add(sb, quoted, len); + quoted += len; + + switch (*quoted++) { + case '"': + if (endp) + *endp = quoted; + return 0; + case '\\': + break; + default: + goto error; + } + + switch ((ch = *quoted++)) { + case 'a': ch = '\a'; break; + case 'b': ch = '\b'; break; + case 'f': ch = '\f'; break; + case 'n': ch = '\n'; break; + case 'r': ch = '\r'; break; + case 't': ch = '\t'; break; + case 'v': ch = '\v'; break; + + case '\\': case '"': + break; /* verbatim */ + + /* octal values with first digit over 4 overflow */ + case '0': case '1': case '2': case '3': + ac = ((ch - '0') << 6); + if ((ch = *quoted++) < '0' || '7' < ch) + goto error; + ac |= ((ch - '0') << 3); + if ((ch = *quoted++) < '0' || '7' < ch) + goto error; + ac |= (ch - '0'); + ch = ac; + break; + default: + goto error; + } + strbuf_addch(sb, ch); + } + + error: + strbuf_setlen(sb, oldlen); + return -1; +} + +/* quoting as a string literal for other languages */ + +void perl_quote_print(FILE *stream, const char *src) +{ + const char sq = '\''; + const char bq = '\\'; + char c; + + fputc(sq, stream); + while ((c = *src++)) { + if (c == sq || c == bq) + fputc(bq, stream); + fputc(c, stream); + } + fputc(sq, stream); +} + +void python_quote_print(FILE *stream, const char *src) +{ + const char sq = '\''; + const char bq = '\\'; + const char nl = '\n'; + char c; + + fputc(sq, stream); + while ((c = *src++)) { + if (c == nl) { + fputc(bq, stream); + fputc('n', stream); + continue; + } + if (c == sq || c == bq) + fputc(bq, stream); + fputc(c, stream); + } + fputc(sq, stream); +} + +void tcl_quote_print(FILE *stream, const char *src) +{ + char c; + + fputc('"', stream); + while ((c = *src++)) { + switch (c) { + case '[': case ']': + case '{': case '}': + case '$': case '\\': case '"': + fputc('\\', stream); + default: + fputc(c, stream); + break; + case '\f': + fputs("\\f", stream); + break; + case '\r': + fputs("\\r", stream); + break; + case '\n': + fputs("\\n", stream); + break; + case '\t': + fputs("\\t", stream); + break; + case '\v': + fputs("\\v", stream); + break; + } + } + fputc('"', stream); +} diff --git a/Documentation/perf_counter/util/quote.h b/Documentation/perf_counter/util/quote.h new file mode 100644 index 00000000000..5dfad89816d --- /dev/null +++ b/Documentation/perf_counter/util/quote.h @@ -0,0 +1,68 @@ +#ifndef QUOTE_H +#define QUOTE_H + +#include +#include + +/* Help to copy the thing properly quoted for the shell safety. + * any single quote is replaced with '\'', any exclamation point + * is replaced with '\!', and the whole thing is enclosed in a + * single quote pair. + * + * For example, if you are passing the result to system() as an + * argument: + * + * sprintf(cmd, "foobar %s %s", sq_quote(arg0), sq_quote(arg1)) + * + * would be appropriate. If the system() is going to call ssh to + * run the command on the other side: + * + * sprintf(cmd, "git-diff-tree %s %s", sq_quote(arg0), sq_quote(arg1)); + * sprintf(rcmd, "ssh %s %s", sq_util/quote.host), sq_quote(cmd)); + * + * Note that the above examples leak memory! Remember to free result from + * sq_quote() in a real application. + * + * sq_quote_buf() writes to an existing buffer of specified size; it + * will return the number of characters that would have been written + * excluding the final null regardless of the buffer size. + */ + +extern void sq_quote_print(FILE *stream, const char *src); + +extern void sq_quote_buf(struct strbuf *, const char *src); +extern void sq_quote_argv(struct strbuf *, const char **argv, size_t maxlen); + +/* This unwraps what sq_quote() produces in place, but returns + * NULL if the input does not look like what sq_quote would have + * produced. + */ +extern char *sq_dequote(char *); + +/* + * Same as the above, but can be used to unwrap many arguments in the + * same string separated by space. "next" is changed to point to the + * next argument that should be passed as first parameter. When there + * is no more argument to be dequoted, "next" is updated to point to NULL. + */ +extern char *sq_dequote_step(char *arg, char **next); +extern int sq_dequote_to_argv(char *arg, const char ***argv, int *nr, int *alloc); + +extern int unquote_c_style(struct strbuf *, const char *quoted, const char **endp); +extern size_t quote_c_style(const char *name, struct strbuf *, FILE *, int no_dq); +extern void quote_two_c_style(struct strbuf *, const char *, const char *, int); + +extern void write_name_quoted(const char *name, FILE *, int terminator); +extern void write_name_quotedpfx(const char *pfx, size_t pfxlen, + const char *name, FILE *, int terminator); + +/* quote path as relative to the given prefix */ +char *quote_path_relative(const char *in, int len, + struct strbuf *out, const char *prefix); + +/* quoting as a string literal for other languages */ +extern void perl_quote_print(FILE *stream, const char *src); +extern void python_quote_print(FILE *stream, const char *src); +extern void tcl_quote_print(FILE *stream, const char *src); + +#endif diff --git a/Documentation/perf_counter/util/run-command.c b/Documentation/perf_counter/util/run-command.c new file mode 100644 index 00000000000..b2f5e854f40 --- /dev/null +++ b/Documentation/perf_counter/util/run-command.c @@ -0,0 +1,395 @@ +#include "cache.h" +#include "run-command.h" +#include "exec_cmd.h" + +static inline void close_pair(int fd[2]) +{ + close(fd[0]); + close(fd[1]); +} + +static inline void dup_devnull(int to) +{ + int fd = open("/dev/null", O_RDWR); + dup2(fd, to); + close(fd); +} + +int start_command(struct child_process *cmd) +{ + int need_in, need_out, need_err; + int fdin[2], fdout[2], fderr[2]; + + /* + * In case of errors we must keep the promise to close FDs + * that have been passed in via ->in and ->out. + */ + + need_in = !cmd->no_stdin && cmd->in < 0; + if (need_in) { + if (pipe(fdin) < 0) { + if (cmd->out > 0) + close(cmd->out); + return -ERR_RUN_COMMAND_PIPE; + } + cmd->in = fdin[1]; + } + + need_out = !cmd->no_stdout + && !cmd->stdout_to_stderr + && cmd->out < 0; + if (need_out) { + if (pipe(fdout) < 0) { + if (need_in) + close_pair(fdin); + else if (cmd->in) + close(cmd->in); + return -ERR_RUN_COMMAND_PIPE; + } + cmd->out = fdout[0]; + } + + need_err = !cmd->no_stderr && cmd->err < 0; + if (need_err) { + if (pipe(fderr) < 0) { + if (need_in) + close_pair(fdin); + else if (cmd->in) + close(cmd->in); + if (need_out) + close_pair(fdout); + else if (cmd->out) + close(cmd->out); + return -ERR_RUN_COMMAND_PIPE; + } + cmd->err = fderr[0]; + } + +#ifndef __MINGW32__ + fflush(NULL); + cmd->pid = fork(); + if (!cmd->pid) { + if (cmd->no_stdin) + dup_devnull(0); + else if (need_in) { + dup2(fdin[0], 0); + close_pair(fdin); + } else if (cmd->in) { + dup2(cmd->in, 0); + close(cmd->in); + } + + if (cmd->no_stderr) + dup_devnull(2); + else if (need_err) { + dup2(fderr[1], 2); + close_pair(fderr); + } + + if (cmd->no_stdout) + dup_devnull(1); + else if (cmd->stdout_to_stderr) + dup2(2, 1); + else if (need_out) { + dup2(fdout[1], 1); + close_pair(fdout); + } else if (cmd->out > 1) { + dup2(cmd->out, 1); + close(cmd->out); + } + + if (cmd->dir && chdir(cmd->dir)) + die("exec %s: cd to %s failed (%s)", cmd->argv[0], + cmd->dir, strerror(errno)); + if (cmd->env) { + for (; *cmd->env; cmd->env++) { + if (strchr(*cmd->env, '=')) + putenv((char*)*cmd->env); + else + unsetenv(*cmd->env); + } + } + if (cmd->preexec_cb) + cmd->preexec_cb(); + if (cmd->perf_cmd) { + execv_perf_cmd(cmd->argv); + } else { + execvp(cmd->argv[0], (char *const*) cmd->argv); + } + exit(127); + } +#else + int s0 = -1, s1 = -1, s2 = -1; /* backups of stdin, stdout, stderr */ + const char **sargv = cmd->argv; + char **env = environ; + + if (cmd->no_stdin) { + s0 = dup(0); + dup_devnull(0); + } else if (need_in) { + s0 = dup(0); + dup2(fdin[0], 0); + } else if (cmd->in) { + s0 = dup(0); + dup2(cmd->in, 0); + } + + if (cmd->no_stderr) { + s2 = dup(2); + dup_devnull(2); + } else if (need_err) { + s2 = dup(2); + dup2(fderr[1], 2); + } + + if (cmd->no_stdout) { + s1 = dup(1); + dup_devnull(1); + } else if (cmd->stdout_to_stderr) { + s1 = dup(1); + dup2(2, 1); + } else if (need_out) { + s1 = dup(1); + dup2(fdout[1], 1); + } else if (cmd->out > 1) { + s1 = dup(1); + dup2(cmd->out, 1); + } + + if (cmd->dir) + die("chdir in start_command() not implemented"); + if (cmd->env) { + env = copy_environ(); + for (; *cmd->env; cmd->env++) + env = env_setenv(env, *cmd->env); + } + + if (cmd->perf_cmd) { + cmd->argv = prepare_perf_cmd(cmd->argv); + } + + cmd->pid = mingw_spawnvpe(cmd->argv[0], cmd->argv, env); + + if (cmd->env) + free_environ(env); + if (cmd->perf_cmd) + free(cmd->argv); + + cmd->argv = sargv; + if (s0 >= 0) + dup2(s0, 0), close(s0); + if (s1 >= 0) + dup2(s1, 1), close(s1); + if (s2 >= 0) + dup2(s2, 2), close(s2); +#endif + + if (cmd->pid < 0) { + int err = errno; + if (need_in) + close_pair(fdin); + else if (cmd->in) + close(cmd->in); + if (need_out) + close_pair(fdout); + else if (cmd->out) + close(cmd->out); + if (need_err) + close_pair(fderr); + return err == ENOENT ? + -ERR_RUN_COMMAND_EXEC : + -ERR_RUN_COMMAND_FORK; + } + + if (need_in) + close(fdin[0]); + else if (cmd->in) + close(cmd->in); + + if (need_out) + close(fdout[1]); + else if (cmd->out) + close(cmd->out); + + if (need_err) + close(fderr[1]); + + return 0; +} + +static int wait_or_whine(pid_t pid) +{ + for (;;) { + int status, code; + pid_t waiting = waitpid(pid, &status, 0); + + if (waiting < 0) { + if (errno == EINTR) + continue; + error("waitpid failed (%s)", strerror(errno)); + return -ERR_RUN_COMMAND_WAITPID; + } + if (waiting != pid) + return -ERR_RUN_COMMAND_WAITPID_WRONG_PID; + if (WIFSIGNALED(status)) + return -ERR_RUN_COMMAND_WAITPID_SIGNAL; + + if (!WIFEXITED(status)) + return -ERR_RUN_COMMAND_WAITPID_NOEXIT; + code = WEXITSTATUS(status); + switch (code) { + case 127: + return -ERR_RUN_COMMAND_EXEC; + case 0: + return 0; + default: + return -code; + } + } +} + +int finish_command(struct child_process *cmd) +{ + return wait_or_whine(cmd->pid); +} + +int run_command(struct child_process *cmd) +{ + int code = start_command(cmd); + if (code) + return code; + return finish_command(cmd); +} + +static void prepare_run_command_v_opt(struct child_process *cmd, + const char **argv, + int opt) +{ + memset(cmd, 0, sizeof(*cmd)); + cmd->argv = argv; + cmd->no_stdin = opt & RUN_COMMAND_NO_STDIN ? 1 : 0; + cmd->perf_cmd = opt & RUN_PERF_CMD ? 1 : 0; + cmd->stdout_to_stderr = opt & RUN_COMMAND_STDOUT_TO_STDERR ? 1 : 0; +} + +int run_command_v_opt(const char **argv, int opt) +{ + struct child_process cmd; + prepare_run_command_v_opt(&cmd, argv, opt); + return run_command(&cmd); +} + +int run_command_v_opt_cd_env(const char **argv, int opt, const char *dir, const char *const *env) +{ + struct child_process cmd; + prepare_run_command_v_opt(&cmd, argv, opt); + cmd.dir = dir; + cmd.env = env; + return run_command(&cmd); +} + +#ifdef __MINGW32__ +static __stdcall unsigned run_thread(void *data) +{ + struct async *async = data; + return async->proc(async->fd_for_proc, async->data); +} +#endif + +int start_async(struct async *async) +{ + int pipe_out[2]; + + if (pipe(pipe_out) < 0) + return error("cannot create pipe: %s", strerror(errno)); + async->out = pipe_out[0]; + +#ifndef __MINGW32__ + /* Flush stdio before fork() to avoid cloning buffers */ + fflush(NULL); + + async->pid = fork(); + if (async->pid < 0) { + error("fork (async) failed: %s", strerror(errno)); + close_pair(pipe_out); + return -1; + } + if (!async->pid) { + close(pipe_out[0]); + exit(!!async->proc(pipe_out[1], async->data)); + } + close(pipe_out[1]); +#else + async->fd_for_proc = pipe_out[1]; + async->tid = (HANDLE) _beginthreadex(NULL, 0, run_thread, async, 0, NULL); + if (!async->tid) { + error("cannot create thread: %s", strerror(errno)); + close_pair(pipe_out); + return -1; + } +#endif + return 0; +} + +int finish_async(struct async *async) +{ +#ifndef __MINGW32__ + int ret = 0; + + if (wait_or_whine(async->pid)) + ret = error("waitpid (async) failed"); +#else + DWORD ret = 0; + if (WaitForSingleObject(async->tid, INFINITE) != WAIT_OBJECT_0) + ret = error("waiting for thread failed: %lu", GetLastError()); + else if (!GetExitCodeThread(async->tid, &ret)) + ret = error("cannot get thread exit code: %lu", GetLastError()); + CloseHandle(async->tid); +#endif + return ret; +} + +int run_hook(const char *index_file, const char *name, ...) +{ + struct child_process hook; + const char **argv = NULL, *env[2]; + char index[PATH_MAX]; + va_list args; + int ret; + size_t i = 0, alloc = 0; + + if (access(perf_path("hooks/%s", name), X_OK) < 0) + return 0; + + va_start(args, name); + ALLOC_GROW(argv, i + 1, alloc); + argv[i++] = perf_path("hooks/%s", name); + while (argv[i-1]) { + ALLOC_GROW(argv, i + 1, alloc); + argv[i++] = va_arg(args, const char *); + } + va_end(args); + + memset(&hook, 0, sizeof(hook)); + hook.argv = argv; + hook.no_stdin = 1; + hook.stdout_to_stderr = 1; + if (index_file) { + snprintf(index, sizeof(index), "PERF_INDEX_FILE=%s", index_file); + env[0] = index; + env[1] = NULL; + hook.env = env; + } + + ret = start_command(&hook); + free(argv); + if (ret) { + warning("Could not spawn %s", argv[0]); + return ret; + } + ret = finish_command(&hook); + if (ret == -ERR_RUN_COMMAND_WAITPID_SIGNAL) + warning("%s exited due to uncaught signal", argv[0]); + + return ret; +} diff --git a/Documentation/perf_counter/util/run-command.h b/Documentation/perf_counter/util/run-command.h new file mode 100644 index 00000000000..328289f2366 --- /dev/null +++ b/Documentation/perf_counter/util/run-command.h @@ -0,0 +1,93 @@ +#ifndef RUN_COMMAND_H +#define RUN_COMMAND_H + +enum { + ERR_RUN_COMMAND_FORK = 10000, + ERR_RUN_COMMAND_EXEC, + ERR_RUN_COMMAND_PIPE, + ERR_RUN_COMMAND_WAITPID, + ERR_RUN_COMMAND_WAITPID_WRONG_PID, + ERR_RUN_COMMAND_WAITPID_SIGNAL, + ERR_RUN_COMMAND_WAITPID_NOEXIT, +}; +#define IS_RUN_COMMAND_ERR(x) (-(x) >= ERR_RUN_COMMAND_FORK) + +struct child_process { + const char **argv; + pid_t pid; + /* + * Using .in, .out, .err: + * - Specify 0 for no redirections (child inherits stdin, stdout, + * stderr from parent). + * - Specify -1 to have a pipe allocated as follows: + * .in: returns the writable pipe end; parent writes to it, + * the readable pipe end becomes child's stdin + * .out, .err: returns the readable pipe end; parent reads from + * it, the writable pipe end becomes child's stdout/stderr + * The caller of start_command() must close the returned FDs + * after it has completed reading from/writing to it! + * - Specify > 0 to set a channel to a particular FD as follows: + * .in: a readable FD, becomes child's stdin + * .out: a writable FD, becomes child's stdout/stderr + * .err > 0 not supported + * The specified FD is closed by start_command(), even in case + * of errors! + */ + int in; + int out; + int err; + const char *dir; + const char *const *env; + unsigned no_stdin:1; + unsigned no_stdout:1; + unsigned no_stderr:1; + unsigned perf_cmd:1; /* if this is to be perf sub-command */ + unsigned stdout_to_stderr:1; + void (*preexec_cb)(void); +}; + +int start_command(struct child_process *); +int finish_command(struct child_process *); +int run_command(struct child_process *); + +extern int run_hook(const char *index_file, const char *name, ...); + +#define RUN_COMMAND_NO_STDIN 1 +#define RUN_PERF_CMD 2 /*If this is to be perf sub-command */ +#define RUN_COMMAND_STDOUT_TO_STDERR 4 +int run_command_v_opt(const char **argv, int opt); + +/* + * env (the environment) is to be formatted like environ: "VAR=VALUE". + * To unset an environment variable use just "VAR". + */ +int run_command_v_opt_cd_env(const char **argv, int opt, const char *dir, const char *const *env); + +/* + * The purpose of the following functions is to feed a pipe by running + * a function asynchronously and providing output that the caller reads. + * + * It is expected that no synchronization and mutual exclusion between + * the caller and the feed function is necessary so that the function + * can run in a thread without interfering with the caller. + */ +struct async { + /* + * proc writes to fd and closes it; + * returns 0 on success, non-zero on failure + */ + int (*proc)(int fd, void *data); + void *data; + int out; /* caller reads from here and closes it */ +#ifndef __MINGW32__ + pid_t pid; +#else + HANDLE tid; + int fd_for_proc; +#endif +}; + +int start_async(struct async *async); +int finish_async(struct async *async); + +#endif diff --git a/Documentation/perf_counter/util/strbuf.c b/Documentation/perf_counter/util/strbuf.c new file mode 100644 index 00000000000..eaba0930680 --- /dev/null +++ b/Documentation/perf_counter/util/strbuf.c @@ -0,0 +1,359 @@ +#include "cache.h" + +int prefixcmp(const char *str, const char *prefix) +{ + for (; ; str++, prefix++) + if (!*prefix) + return 0; + else if (*str != *prefix) + return (unsigned char)*prefix - (unsigned char)*str; +} + +/* + * Used as the default ->buf value, so that people can always assume + * buf is non NULL and ->buf is NUL terminated even for a freshly + * initialized strbuf. + */ +char strbuf_slopbuf[1]; + +void strbuf_init(struct strbuf *sb, size_t hint) +{ + sb->alloc = sb->len = 0; + sb->buf = strbuf_slopbuf; + if (hint) + strbuf_grow(sb, hint); +} + +void strbuf_release(struct strbuf *sb) +{ + if (sb->alloc) { + free(sb->buf); + strbuf_init(sb, 0); + } +} + +char *strbuf_detach(struct strbuf *sb, size_t *sz) +{ + char *res = sb->alloc ? sb->buf : NULL; + if (sz) + *sz = sb->len; + strbuf_init(sb, 0); + return res; +} + +void strbuf_attach(struct strbuf *sb, void *buf, size_t len, size_t alloc) +{ + strbuf_release(sb); + sb->buf = buf; + sb->len = len; + sb->alloc = alloc; + strbuf_grow(sb, 0); + sb->buf[sb->len] = '\0'; +} + +void strbuf_grow(struct strbuf *sb, size_t extra) +{ + if (sb->len + extra + 1 <= sb->len) + die("you want to use way too much memory"); + if (!sb->alloc) + sb->buf = NULL; + ALLOC_GROW(sb->buf, sb->len + extra + 1, sb->alloc); +} + +void strbuf_trim(struct strbuf *sb) +{ + char *b = sb->buf; + while (sb->len > 0 && isspace((unsigned char)sb->buf[sb->len - 1])) + sb->len--; + while (sb->len > 0 && isspace(*b)) { + b++; + sb->len--; + } + memmove(sb->buf, b, sb->len); + sb->buf[sb->len] = '\0'; +} +void strbuf_rtrim(struct strbuf *sb) +{ + while (sb->len > 0 && isspace((unsigned char)sb->buf[sb->len - 1])) + sb->len--; + sb->buf[sb->len] = '\0'; +} + +void strbuf_ltrim(struct strbuf *sb) +{ + char *b = sb->buf; + while (sb->len > 0 && isspace(*b)) { + b++; + sb->len--; + } + memmove(sb->buf, b, sb->len); + sb->buf[sb->len] = '\0'; +} + +void strbuf_tolower(struct strbuf *sb) +{ + int i; + for (i = 0; i < sb->len; i++) + sb->buf[i] = tolower(sb->buf[i]); +} + +struct strbuf **strbuf_split(const struct strbuf *sb, int delim) +{ + int alloc = 2, pos = 0; + char *n, *p; + struct strbuf **ret; + struct strbuf *t; + + ret = calloc(alloc, sizeof(struct strbuf *)); + p = n = sb->buf; + while (n < sb->buf + sb->len) { + int len; + n = memchr(n, delim, sb->len - (n - sb->buf)); + if (pos + 1 >= alloc) { + alloc = alloc * 2; + ret = realloc(ret, sizeof(struct strbuf *) * alloc); + } + if (!n) + n = sb->buf + sb->len - 1; + len = n - p + 1; + t = malloc(sizeof(struct strbuf)); + strbuf_init(t, len); + strbuf_add(t, p, len); + ret[pos] = t; + ret[++pos] = NULL; + p = ++n; + } + return ret; +} + +void strbuf_list_free(struct strbuf **sbs) +{ + struct strbuf **s = sbs; + + while (*s) { + strbuf_release(*s); + free(*s++); + } + free(sbs); +} + +int strbuf_cmp(const struct strbuf *a, const struct strbuf *b) +{ + int len = a->len < b->len ? a->len: b->len; + int cmp = memcmp(a->buf, b->buf, len); + if (cmp) + return cmp; + return a->len < b->len ? -1: a->len != b->len; +} + +void strbuf_splice(struct strbuf *sb, size_t pos, size_t len, + const void *data, size_t dlen) +{ + if (pos + len < pos) + die("you want to use way too much memory"); + if (pos > sb->len) + die("`pos' is too far after the end of the buffer"); + if (pos + len > sb->len) + die("`pos + len' is too far after the end of the buffer"); + + if (dlen >= len) + strbuf_grow(sb, dlen - len); + memmove(sb->buf + pos + dlen, + sb->buf + pos + len, + sb->len - pos - len); + memcpy(sb->buf + pos, data, dlen); + strbuf_setlen(sb, sb->len + dlen - len); +} + +void strbuf_insert(struct strbuf *sb, size_t pos, const void *data, size_t len) +{ + strbuf_splice(sb, pos, 0, data, len); +} + +void strbuf_remove(struct strbuf *sb, size_t pos, size_t len) +{ + strbuf_splice(sb, pos, len, NULL, 0); +} + +void strbuf_add(struct strbuf *sb, const void *data, size_t len) +{ + strbuf_grow(sb, len); + memcpy(sb->buf + sb->len, data, len); + strbuf_setlen(sb, sb->len + len); +} + +void strbuf_adddup(struct strbuf *sb, size_t pos, size_t len) +{ + strbuf_grow(sb, len); + memcpy(sb->buf + sb->len, sb->buf + pos, len); + strbuf_setlen(sb, sb->len + len); +} + +void strbuf_addf(struct strbuf *sb, const char *fmt, ...) +{ + int len; + va_list ap; + + if (!strbuf_avail(sb)) + strbuf_grow(sb, 64); + va_start(ap, fmt); + len = vsnprintf(sb->buf + sb->len, sb->alloc - sb->len, fmt, ap); + va_end(ap); + if (len < 0) + die("your vsnprintf is broken"); + if (len > strbuf_avail(sb)) { + strbuf_grow(sb, len); + va_start(ap, fmt); + len = vsnprintf(sb->buf + sb->len, sb->alloc - sb->len, fmt, ap); + va_end(ap); + if (len > strbuf_avail(sb)) { + die("this should not happen, your snprintf is broken"); + } + } + strbuf_setlen(sb, sb->len + len); +} + +void strbuf_expand(struct strbuf *sb, const char *format, expand_fn_t fn, + void *context) +{ + for (;;) { + const char *percent; + size_t consumed; + + percent = strchrnul(format, '%'); + strbuf_add(sb, format, percent - format); + if (!*percent) + break; + format = percent + 1; + + consumed = fn(sb, format, context); + if (consumed) + format += consumed; + else + strbuf_addch(sb, '%'); + } +} + +size_t strbuf_expand_dict_cb(struct strbuf *sb, const char *placeholder, + void *context) +{ + struct strbuf_expand_dict_entry *e = context; + size_t len; + + for (; e->placeholder && (len = strlen(e->placeholder)); e++) { + if (!strncmp(placeholder, e->placeholder, len)) { + if (e->value) + strbuf_addstr(sb, e->value); + return len; + } + } + return 0; +} + +size_t strbuf_fread(struct strbuf *sb, size_t size, FILE *f) +{ + size_t res; + size_t oldalloc = sb->alloc; + + strbuf_grow(sb, size); + res = fread(sb->buf + sb->len, 1, size, f); + if (res > 0) + strbuf_setlen(sb, sb->len + res); + else if (res < 0 && oldalloc == 0) + strbuf_release(sb); + return res; +} + +ssize_t strbuf_read(struct strbuf *sb, int fd, size_t hint) +{ + size_t oldlen = sb->len; + size_t oldalloc = sb->alloc; + + strbuf_grow(sb, hint ? hint : 8192); + for (;;) { + ssize_t cnt; + + cnt = read(fd, sb->buf + sb->len, sb->alloc - sb->len - 1); + if (cnt < 0) { + if (oldalloc == 0) + strbuf_release(sb); + else + strbuf_setlen(sb, oldlen); + return -1; + } + if (!cnt) + break; + sb->len += cnt; + strbuf_grow(sb, 8192); + } + + sb->buf[sb->len] = '\0'; + return sb->len - oldlen; +} + +#define STRBUF_MAXLINK (2*PATH_MAX) + +int strbuf_readlink(struct strbuf *sb, const char *path, size_t hint) +{ + size_t oldalloc = sb->alloc; + + if (hint < 32) + hint = 32; + + while (hint < STRBUF_MAXLINK) { + int len; + + strbuf_grow(sb, hint); + len = readlink(path, sb->buf, hint); + if (len < 0) { + if (errno != ERANGE) + break; + } else if (len < hint) { + strbuf_setlen(sb, len); + return 0; + } + + /* .. the buffer was too small - try again */ + hint *= 2; + } + if (oldalloc == 0) + strbuf_release(sb); + return -1; +} + +int strbuf_getline(struct strbuf *sb, FILE *fp, int term) +{ + int ch; + + strbuf_grow(sb, 0); + if (feof(fp)) + return EOF; + + strbuf_reset(sb); + while ((ch = fgetc(fp)) != EOF) { + if (ch == term) + break; + strbuf_grow(sb, 1); + sb->buf[sb->len++] = ch; + } + if (ch == EOF && sb->len == 0) + return EOF; + + sb->buf[sb->len] = '\0'; + return 0; +} + +int strbuf_read_file(struct strbuf *sb, const char *path, size_t hint) +{ + int fd, len; + + fd = open(path, O_RDONLY); + if (fd < 0) + return -1; + len = strbuf_read(sb, fd, hint); + close(fd); + if (len < 0) + return -1; + + return len; +} diff --git a/Documentation/perf_counter/util/strbuf.h b/Documentation/perf_counter/util/strbuf.h new file mode 100644 index 00000000000..9ee908a3ec5 --- /dev/null +++ b/Documentation/perf_counter/util/strbuf.h @@ -0,0 +1,137 @@ +#ifndef STRBUF_H +#define STRBUF_H + +/* + * Strbuf's can be use in many ways: as a byte array, or to store arbitrary + * long, overflow safe strings. + * + * Strbufs has some invariants that are very important to keep in mind: + * + * 1. the ->buf member is always malloc-ed, hence strbuf's can be used to + * build complex strings/buffers whose final size isn't easily known. + * + * It is NOT legal to copy the ->buf pointer away. + * `strbuf_detach' is the operation that detachs a buffer from its shell + * while keeping the shell valid wrt its invariants. + * + * 2. the ->buf member is a byte array that has at least ->len + 1 bytes + * allocated. The extra byte is used to store a '\0', allowing the ->buf + * member to be a valid C-string. Every strbuf function ensure this + * invariant is preserved. + * + * Note that it is OK to "play" with the buffer directly if you work it + * that way: + * + * strbuf_grow(sb, SOME_SIZE); + * ... Here, the memory array starting at sb->buf, and of length + * ... strbuf_avail(sb) is all yours, and you are sure that + * ... strbuf_avail(sb) is at least SOME_SIZE. + * strbuf_setlen(sb, sb->len + SOME_OTHER_SIZE); + * + * Of course, SOME_OTHER_SIZE must be smaller or equal to strbuf_avail(sb). + * + * Doing so is safe, though if it has to be done in many places, adding the + * missing API to the strbuf module is the way to go. + * + * XXX: do _not_ assume that the area that is yours is of size ->alloc - 1 + * even if it's true in the current implementation. Alloc is somehow a + * "private" member that should not be messed with. + */ + +#include + +extern char strbuf_slopbuf[]; +struct strbuf { + size_t alloc; + size_t len; + char *buf; +}; + +#define STRBUF_INIT { 0, 0, strbuf_slopbuf } + +/*----- strbuf life cycle -----*/ +extern void strbuf_init(struct strbuf *, size_t); +extern void strbuf_release(struct strbuf *); +extern char *strbuf_detach(struct strbuf *, size_t *); +extern void strbuf_attach(struct strbuf *, void *, size_t, size_t); +static inline void strbuf_swap(struct strbuf *a, struct strbuf *b) { + struct strbuf tmp = *a; + *a = *b; + *b = tmp; +} + +/*----- strbuf size related -----*/ +static inline size_t strbuf_avail(const struct strbuf *sb) { + return sb->alloc ? sb->alloc - sb->len - 1 : 0; +} + +extern void strbuf_grow(struct strbuf *, size_t); + +static inline void strbuf_setlen(struct strbuf *sb, size_t len) { + if (!sb->alloc) + strbuf_grow(sb, 0); + assert(len < sb->alloc); + sb->len = len; + sb->buf[len] = '\0'; +} +#define strbuf_reset(sb) strbuf_setlen(sb, 0) + +/*----- content related -----*/ +extern void strbuf_trim(struct strbuf *); +extern void strbuf_rtrim(struct strbuf *); +extern void strbuf_ltrim(struct strbuf *); +extern int strbuf_cmp(const struct strbuf *, const struct strbuf *); +extern void strbuf_tolower(struct strbuf *); + +extern struct strbuf **strbuf_split(const struct strbuf *, int delim); +extern void strbuf_list_free(struct strbuf **); + +/*----- add data in your buffer -----*/ +static inline void strbuf_addch(struct strbuf *sb, int c) { + strbuf_grow(sb, 1); + sb->buf[sb->len++] = c; + sb->buf[sb->len] = '\0'; +} + +extern void strbuf_insert(struct strbuf *, size_t pos, const void *, size_t); +extern void strbuf_remove(struct strbuf *, size_t pos, size_t len); + +/* splice pos..pos+len with given data */ +extern void strbuf_splice(struct strbuf *, size_t pos, size_t len, + const void *, size_t); + +extern void strbuf_add(struct strbuf *, const void *, size_t); +static inline void strbuf_addstr(struct strbuf *sb, const char *s) { + strbuf_add(sb, s, strlen(s)); +} +static inline void strbuf_addbuf(struct strbuf *sb, const struct strbuf *sb2) { + strbuf_add(sb, sb2->buf, sb2->len); +} +extern void strbuf_adddup(struct strbuf *sb, size_t pos, size_t len); + +typedef size_t (*expand_fn_t) (struct strbuf *sb, const char *placeholder, void *context); +extern void strbuf_expand(struct strbuf *sb, const char *format, expand_fn_t fn, void *context); +struct strbuf_expand_dict_entry { + const char *placeholder; + const char *value; +}; +extern size_t strbuf_expand_dict_cb(struct strbuf *sb, const char *placeholder, void *context); + +__attribute__((format(printf,2,3))) +extern void strbuf_addf(struct strbuf *sb, const char *fmt, ...); + +extern size_t strbuf_fread(struct strbuf *, size_t, FILE *); +/* XXX: if read fails, any partial read is undone */ +extern ssize_t strbuf_read(struct strbuf *, int fd, size_t hint); +extern int strbuf_read_file(struct strbuf *sb, const char *path, size_t hint); +extern int strbuf_readlink(struct strbuf *sb, const char *path, size_t hint); + +extern int strbuf_getline(struct strbuf *, FILE *, int); + +extern void stripspace(struct strbuf *buf, int skip_comments); +extern int launch_editor(const char *path, struct strbuf *buffer, const char *const *env); + +extern int strbuf_branchname(struct strbuf *sb, const char *name); +extern int strbuf_check_branch_ref(struct strbuf *sb, const char *name); + +#endif /* STRBUF_H */ diff --git a/Documentation/perf_counter/util/usage.c b/Documentation/perf_counter/util/usage.c new file mode 100644 index 00000000000..7a10421fe6b --- /dev/null +++ b/Documentation/perf_counter/util/usage.c @@ -0,0 +1,80 @@ +/* + * GIT - The information manager from hell + * + * Copyright (C) Linus Torvalds, 2005 + */ +#include "util.h" + +static void report(const char *prefix, const char *err, va_list params) +{ + char msg[1024]; + vsnprintf(msg, sizeof(msg), err, params); + fprintf(stderr, "%s%s\n", prefix, msg); +} + +static NORETURN void usage_builtin(const char *err) +{ + fprintf(stderr, "usage: %s\n", err); + exit(129); +} + +static NORETURN void die_builtin(const char *err, va_list params) +{ + report("fatal: ", err, params); + exit(128); +} + +static void error_builtin(const char *err, va_list params) +{ + report("error: ", err, params); +} + +static void warn_builtin(const char *warn, va_list params) +{ + report("warning: ", warn, params); +} + +/* If we are in a dlopen()ed .so write to a global variable would segfault + * (ugh), so keep things static. */ +static void (*usage_routine)(const char *err) NORETURN = usage_builtin; +static void (*die_routine)(const char *err, va_list params) NORETURN = die_builtin; +static void (*error_routine)(const char *err, va_list params) = error_builtin; +static void (*warn_routine)(const char *err, va_list params) = warn_builtin; + +void set_die_routine(void (*routine)(const char *err, va_list params) NORETURN) +{ + die_routine = routine; +} + +void usage(const char *err) +{ + usage_routine(err); +} + +void die(const char *err, ...) +{ + va_list params; + + va_start(params, err); + die_routine(err, params); + va_end(params); +} + +int error(const char *err, ...) +{ + va_list params; + + va_start(params, err); + error_routine(err, params); + va_end(params); + return -1; +} + +void warning(const char *warn, ...) +{ + va_list params; + + va_start(params, warn); + warn_routine(warn, params); + va_end(params); +} diff --git a/Documentation/perf_counter/util/util.h b/Documentation/perf_counter/util/util.h new file mode 100644 index 00000000000..36e40c38e09 --- /dev/null +++ b/Documentation/perf_counter/util/util.h @@ -0,0 +1,408 @@ +#ifndef GIT_COMPAT_UTIL_H +#define GIT_COMPAT_UTIL_H + +#define _FILE_OFFSET_BITS 64 + +#ifndef FLEX_ARRAY +/* + * See if our compiler is known to support flexible array members. + */ +#if defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) +# define FLEX_ARRAY /* empty */ +#elif defined(__GNUC__) +# if (__GNUC__ >= 3) +# define FLEX_ARRAY /* empty */ +# else +# define FLEX_ARRAY 0 /* older GNU extension */ +# endif +#endif + +/* + * Otherwise, default to safer but a bit wasteful traditional style + */ +#ifndef FLEX_ARRAY +# define FLEX_ARRAY 1 +#endif +#endif + +#define ARRAY_SIZE(x) (sizeof(x)/sizeof(x[0])) + +#ifdef __GNUC__ +#define TYPEOF(x) (__typeof__(x)) +#else +#define TYPEOF(x) +#endif + +#define MSB(x, bits) ((x) & TYPEOF(x)(~0ULL << (sizeof(x) * 8 - (bits)))) +#define HAS_MULTI_BITS(i) ((i) & ((i) - 1)) /* checks if an integer has more than 1 bit set */ + +/* Approximation of the length of the decimal representation of this type. */ +#define decimal_length(x) ((int)(sizeof(x) * 2.56 + 0.5) + 1) + +#if !defined(__APPLE__) && !defined(__FreeBSD__) && !defined(__USLC__) && !defined(_M_UNIX) +#define _XOPEN_SOURCE 600 /* glibc2 and AIX 5.3L need 500, OpenBSD needs 600 for S_ISLNK() */ +#define _XOPEN_SOURCE_EXTENDED 1 /* AIX 5.3L needs this */ +#endif +#define _ALL_SOURCE 1 +#define _GNU_SOURCE 1 +#define _BSD_SOURCE 1 + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#ifndef __MINGW32__ +#include +#include +#include +#include +#ifndef NO_SYS_SELECT_H +#include +#endif +#include +#include +#include +#include +#include +#include +#if defined(__CYGWIN__) +#undef _XOPEN_SOURCE +#include +#define _XOPEN_SOURCE 600 +#include "compat/cygwin.h" +#else +#undef _ALL_SOURCE /* AIX 5.3L defines a struct list with _ALL_SOURCE. */ +#include +#define _ALL_SOURCE 1 +#endif +#else /* __MINGW32__ */ +/* pull in Windows compatibility stuff */ +#include "compat/mingw.h" +#endif /* __MINGW32__ */ + +#ifndef NO_ICONV +#include +#endif + +#ifndef NO_OPENSSL +#include +#include +#endif + +/* On most systems would have given us this, but + * not on some systems (e.g. GNU/Hurd). + */ +#ifndef PATH_MAX +#define PATH_MAX 4096 +#endif + +#ifndef PRIuMAX +#define PRIuMAX "llu" +#endif + +#ifndef PRIu32 +#define PRIu32 "u" +#endif + +#ifndef PRIx32 +#define PRIx32 "x" +#endif + +#ifndef PATH_SEP +#define PATH_SEP ':' +#endif + +#ifndef STRIP_EXTENSION +#define STRIP_EXTENSION "" +#endif + +#ifndef has_dos_drive_prefix +#define has_dos_drive_prefix(path) 0 +#endif + +#ifndef is_dir_sep +#define is_dir_sep(c) ((c) == '/') +#endif + +#ifdef __GNUC__ +#define NORETURN __attribute__((__noreturn__)) +#else +#define NORETURN +#ifndef __attribute__ +#define __attribute__(x) +#endif +#endif + +/* General helper functions */ +extern void usage(const char *err) NORETURN; +extern void die(const char *err, ...) NORETURN __attribute__((format (printf, 1, 2))); +extern int error(const char *err, ...) __attribute__((format (printf, 1, 2))); +extern void warning(const char *err, ...) __attribute__((format (printf, 1, 2))); + +extern void set_die_routine(void (*routine)(const char *err, va_list params) NORETURN); + +extern int prefixcmp(const char *str, const char *prefix); +extern time_t tm_to_time_t(const struct tm *tm); + +static inline const char *skip_prefix(const char *str, const char *prefix) +{ + size_t len = strlen(prefix); + return strncmp(str, prefix, len) ? NULL : str + len; +} + +#if defined(NO_MMAP) || defined(USE_WIN32_MMAP) + +#ifndef PROT_READ +#define PROT_READ 1 +#define PROT_WRITE 2 +#define MAP_PRIVATE 1 +#define MAP_FAILED ((void*)-1) +#endif + +#define mmap git_mmap +#define munmap git_munmap +extern void *git_mmap(void *start, size_t length, int prot, int flags, int fd, off_t offset); +extern int git_munmap(void *start, size_t length); + +#else /* NO_MMAP || USE_WIN32_MMAP */ + +#include + +#endif /* NO_MMAP || USE_WIN32_MMAP */ + +#ifdef NO_MMAP + +/* This value must be multiple of (pagesize * 2) */ +#define DEFAULT_PACKED_GIT_WINDOW_SIZE (1 * 1024 * 1024) + +#else /* NO_MMAP */ + +/* This value must be multiple of (pagesize * 2) */ +#define DEFAULT_PACKED_GIT_WINDOW_SIZE \ + (sizeof(void*) >= 8 \ + ? 1 * 1024 * 1024 * 1024 \ + : 32 * 1024 * 1024) + +#endif /* NO_MMAP */ + +#ifdef NO_ST_BLOCKS_IN_STRUCT_STAT +#define on_disk_bytes(st) ((st).st_size) +#else +#define on_disk_bytes(st) ((st).st_blocks * 512) +#endif + +#define DEFAULT_PACKED_GIT_LIMIT \ + ((1024L * 1024L) * (sizeof(void*) >= 8 ? 8192 : 256)) + +#ifdef NO_PREAD +#define pread git_pread +extern ssize_t git_pread(int fd, void *buf, size_t count, off_t offset); +#endif +/* + * Forward decl that will remind us if its twin in cache.h changes. + * This function is used in compat/pread.c. But we can't include + * cache.h there. + */ +extern ssize_t read_in_full(int fd, void *buf, size_t count); + +#ifdef NO_SETENV +#define setenv gitsetenv +extern int gitsetenv(const char *, const char *, int); +#endif + +#ifdef NO_MKDTEMP +#define mkdtemp gitmkdtemp +extern char *gitmkdtemp(char *); +#endif + +#ifdef NO_UNSETENV +#define unsetenv gitunsetenv +extern void gitunsetenv(const char *); +#endif + +#ifdef NO_STRCASESTR +#define strcasestr gitstrcasestr +extern char *gitstrcasestr(const char *haystack, const char *needle); +#endif + +#ifdef NO_STRLCPY +#define strlcpy gitstrlcpy +extern size_t gitstrlcpy(char *, const char *, size_t); +#endif + +#ifdef NO_STRTOUMAX +#define strtoumax gitstrtoumax +extern uintmax_t gitstrtoumax(const char *, char **, int); +#endif + +#ifdef NO_HSTRERROR +#define hstrerror githstrerror +extern const char *githstrerror(int herror); +#endif + +#ifdef NO_MEMMEM +#define memmem gitmemmem +void *gitmemmem(const void *haystack, size_t haystacklen, + const void *needle, size_t needlelen); +#endif + +#ifdef FREAD_READS_DIRECTORIES +#ifdef fopen +#undef fopen +#endif +#define fopen(a,b) git_fopen(a,b) +extern FILE *git_fopen(const char*, const char*); +#endif + +#ifdef SNPRINTF_RETURNS_BOGUS +#define snprintf git_snprintf +extern int git_snprintf(char *str, size_t maxsize, + const char *format, ...); +#define vsnprintf git_vsnprintf +extern int git_vsnprintf(char *str, size_t maxsize, + const char *format, va_list ap); +#endif + +#ifdef __GLIBC_PREREQ +#if __GLIBC_PREREQ(2, 1) +#define HAVE_STRCHRNUL +#endif +#endif + +#ifndef HAVE_STRCHRNUL +#define strchrnul gitstrchrnul +static inline char *gitstrchrnul(const char *s, int c) +{ + while (*s && *s != c) + s++; + return (char *)s; +} +#endif + +/* + * Wrappers: + */ +extern char *xstrdup(const char *str); +extern void *xmalloc(size_t size); +extern void *xmemdupz(const void *data, size_t len); +extern char *xstrndup(const char *str, size_t len); +extern void *xrealloc(void *ptr, size_t size); +extern void *xcalloc(size_t nmemb, size_t size); +extern void *xmmap(void *start, size_t length, int prot, int flags, int fd, off_t offset); +extern ssize_t xread(int fd, void *buf, size_t len); +extern ssize_t xwrite(int fd, const void *buf, size_t len); +extern int xdup(int fd); +extern FILE *xfdopen(int fd, const char *mode); +static inline size_t xsize_t(off_t len) +{ + return (size_t)len; +} + +static inline int has_extension(const char *filename, const char *ext) +{ + size_t len = strlen(filename); + size_t extlen = strlen(ext); + return len > extlen && !memcmp(filename + len - extlen, ext, extlen); +} + +/* Sane ctype - no locale, and works with signed chars */ +#undef isascii +#undef isspace +#undef isdigit +#undef isalpha +#undef isalnum +#undef tolower +#undef toupper +extern unsigned char sane_ctype[256]; +#define GIT_SPACE 0x01 +#define GIT_DIGIT 0x02 +#define GIT_ALPHA 0x04 +#define GIT_GLOB_SPECIAL 0x08 +#define GIT_REGEX_SPECIAL 0x10 +#define sane_istest(x,mask) ((sane_ctype[(unsigned char)(x)] & (mask)) != 0) +#define isascii(x) (((x) & ~0x7f) == 0) +#define isspace(x) sane_istest(x,GIT_SPACE) +#define isdigit(x) sane_istest(x,GIT_DIGIT) +#define isalpha(x) sane_istest(x,GIT_ALPHA) +#define isalnum(x) sane_istest(x,GIT_ALPHA | GIT_DIGIT) +#define is_glob_special(x) sane_istest(x,GIT_GLOB_SPECIAL) +#define is_regex_special(x) sane_istest(x,GIT_GLOB_SPECIAL | GIT_REGEX_SPECIAL) +#define tolower(x) sane_case((unsigned char)(x), 0x20) +#define toupper(x) sane_case((unsigned char)(x), 0) + +static inline int sane_case(int x, int high) +{ + if (sane_istest(x, GIT_ALPHA)) + x = (x & ~0x20) | high; + return x; +} + +static inline int strtoul_ui(char const *s, int base, unsigned int *result) +{ + unsigned long ul; + char *p; + + errno = 0; + ul = strtoul(s, &p, base); + if (errno || *p || p == s || (unsigned int) ul != ul) + return -1; + *result = ul; + return 0; +} + +static inline int strtol_i(char const *s, int base, int *result) +{ + long ul; + char *p; + + errno = 0; + ul = strtol(s, &p, base); + if (errno || *p || p == s || (int) ul != ul) + return -1; + *result = ul; + return 0; +} + +#ifdef INTERNAL_QSORT +void git_qsort(void *base, size_t nmemb, size_t size, + int(*compar)(const void *, const void *)); +#define qsort git_qsort +#endif + +#ifndef DIR_HAS_BSD_GROUP_SEMANTICS +# define FORCE_DIR_SET_GID S_ISGID +#else +# define FORCE_DIR_SET_GID 0 +#endif + +#ifdef NO_NSEC +#undef USE_NSEC +#define ST_CTIME_NSEC(st) 0 +#define ST_MTIME_NSEC(st) 0 +#else +#ifdef USE_ST_TIMESPEC +#define ST_CTIME_NSEC(st) ((unsigned int)((st).st_ctimespec.tv_nsec)) +#define ST_MTIME_NSEC(st) ((unsigned int)((st).st_mtimespec.tv_nsec)) +#else +#define ST_CTIME_NSEC(st) ((unsigned int)((st).st_ctim.tv_nsec)) +#define ST_MTIME_NSEC(st) ((unsigned int)((st).st_mtim.tv_nsec)) +#endif +#endif + +#endif diff --git a/Documentation/perf_counter/util/wrapper.c b/Documentation/perf_counter/util/wrapper.c new file mode 100644 index 00000000000..6350d65f6d9 --- /dev/null +++ b/Documentation/perf_counter/util/wrapper.c @@ -0,0 +1,206 @@ +/* + * Various trivial helper wrappers around standard functions + */ +#include "cache.h" + +/* + * There's no pack memory to release - but stay close to the Git + * version so wrap this away: + */ +static inline void release_pack_memory(size_t size, int flag) +{ +} + +char *xstrdup(const char *str) +{ + char *ret = strdup(str); + if (!ret) { + release_pack_memory(strlen(str) + 1, -1); + ret = strdup(str); + if (!ret) + die("Out of memory, strdup failed"); + } + return ret; +} + +void *xmalloc(size_t size) +{ + void *ret = malloc(size); + if (!ret && !size) + ret = malloc(1); + if (!ret) { + release_pack_memory(size, -1); + ret = malloc(size); + if (!ret && !size) + ret = malloc(1); + if (!ret) + die("Out of memory, malloc failed"); + } +#ifdef XMALLOC_POISON + memset(ret, 0xA5, size); +#endif + return ret; +} + +/* + * xmemdupz() allocates (len + 1) bytes of memory, duplicates "len" bytes of + * "data" to the allocated memory, zero terminates the allocated memory, + * and returns a pointer to the allocated memory. If the allocation fails, + * the program dies. + */ +void *xmemdupz(const void *data, size_t len) +{ + char *p = xmalloc(len + 1); + memcpy(p, data, len); + p[len] = '\0'; + return p; +} + +char *xstrndup(const char *str, size_t len) +{ + char *p = memchr(str, '\0', len); + return xmemdupz(str, p ? p - str : len); +} + +void *xrealloc(void *ptr, size_t size) +{ + void *ret = realloc(ptr, size); + if (!ret && !size) + ret = realloc(ptr, 1); + if (!ret) { + release_pack_memory(size, -1); + ret = realloc(ptr, size); + if (!ret && !size) + ret = realloc(ptr, 1); + if (!ret) + die("Out of memory, realloc failed"); + } + return ret; +} + +void *xcalloc(size_t nmemb, size_t size) +{ + void *ret = calloc(nmemb, size); + if (!ret && (!nmemb || !size)) + ret = calloc(1, 1); + if (!ret) { + release_pack_memory(nmemb * size, -1); + ret = calloc(nmemb, size); + if (!ret && (!nmemb || !size)) + ret = calloc(1, 1); + if (!ret) + die("Out of memory, calloc failed"); + } + return ret; +} + +void *xmmap(void *start, size_t length, + int prot, int flags, int fd, off_t offset) +{ + void *ret = mmap(start, length, prot, flags, fd, offset); + if (ret == MAP_FAILED) { + if (!length) + return NULL; + release_pack_memory(length, fd); + ret = mmap(start, length, prot, flags, fd, offset); + if (ret == MAP_FAILED) + die("Out of memory? mmap failed: %s", strerror(errno)); + } + return ret; +} + +/* + * xread() is the same a read(), but it automatically restarts read() + * operations with a recoverable error (EAGAIN and EINTR). xread() + * DOES NOT GUARANTEE that "len" bytes is read even if the data is available. + */ +ssize_t xread(int fd, void *buf, size_t len) +{ + ssize_t nr; + while (1) { + nr = read(fd, buf, len); + if ((nr < 0) && (errno == EAGAIN || errno == EINTR)) + continue; + return nr; + } +} + +/* + * xwrite() is the same a write(), but it automatically restarts write() + * operations with a recoverable error (EAGAIN and EINTR). xwrite() DOES NOT + * GUARANTEE that "len" bytes is written even if the operation is successful. + */ +ssize_t xwrite(int fd, const void *buf, size_t len) +{ + ssize_t nr; + while (1) { + nr = write(fd, buf, len); + if ((nr < 0) && (errno == EAGAIN || errno == EINTR)) + continue; + return nr; + } +} + +ssize_t read_in_full(int fd, void *buf, size_t count) +{ + char *p = buf; + ssize_t total = 0; + + while (count > 0) { + ssize_t loaded = xread(fd, p, count); + if (loaded <= 0) + return total ? total : loaded; + count -= loaded; + p += loaded; + total += loaded; + } + + return total; +} + +ssize_t write_in_full(int fd, const void *buf, size_t count) +{ + const char *p = buf; + ssize_t total = 0; + + while (count > 0) { + ssize_t written = xwrite(fd, p, count); + if (written < 0) + return -1; + if (!written) { + errno = ENOSPC; + return -1; + } + count -= written; + p += written; + total += written; + } + + return total; +} + +int xdup(int fd) +{ + int ret = dup(fd); + if (ret < 0) + die("dup failed: %s", strerror(errno)); + return ret; +} + +FILE *xfdopen(int fd, const char *mode) +{ + FILE *stream = fdopen(fd, mode); + if (stream == NULL) + die("Out of memory? fdopen failed: %s", strerror(errno)); + return stream; +} + +int xmkstemp(char *template) +{ + int fd; + + fd = mkstemp(template); + if (fd < 0) + die("Unable to create temporary file: %s", strerror(errno)); + return fd; +} diff --git a/Documentation/perf_counter/wrapper.c b/Documentation/perf_counter/wrapper.c deleted file mode 100644 index 6350d65f6d9..00000000000 --- a/Documentation/perf_counter/wrapper.c +++ /dev/null @@ -1,206 +0,0 @@ -/* - * Various trivial helper wrappers around standard functions - */ -#include "cache.h" - -/* - * There's no pack memory to release - but stay close to the Git - * version so wrap this away: - */ -static inline void release_pack_memory(size_t size, int flag) -{ -} - -char *xstrdup(const char *str) -{ - char *ret = strdup(str); - if (!ret) { - release_pack_memory(strlen(str) + 1, -1); - ret = strdup(str); - if (!ret) - die("Out of memory, strdup failed"); - } - return ret; -} - -void *xmalloc(size_t size) -{ - void *ret = malloc(size); - if (!ret && !size) - ret = malloc(1); - if (!ret) { - release_pack_memory(size, -1); - ret = malloc(size); - if (!ret && !size) - ret = malloc(1); - if (!ret) - die("Out of memory, malloc failed"); - } -#ifdef XMALLOC_POISON - memset(ret, 0xA5, size); -#endif - return ret; -} - -/* - * xmemdupz() allocates (len + 1) bytes of memory, duplicates "len" bytes of - * "data" to the allocated memory, zero terminates the allocated memory, - * and returns a pointer to the allocated memory. If the allocation fails, - * the program dies. - */ -void *xmemdupz(const void *data, size_t len) -{ - char *p = xmalloc(len + 1); - memcpy(p, data, len); - p[len] = '\0'; - return p; -} - -char *xstrndup(const char *str, size_t len) -{ - char *p = memchr(str, '\0', len); - return xmemdupz(str, p ? p - str : len); -} - -void *xrealloc(void *ptr, size_t size) -{ - void *ret = realloc(ptr, size); - if (!ret && !size) - ret = realloc(ptr, 1); - if (!ret) { - release_pack_memory(size, -1); - ret = realloc(ptr, size); - if (!ret && !size) - ret = realloc(ptr, 1); - if (!ret) - die("Out of memory, realloc failed"); - } - return ret; -} - -void *xcalloc(size_t nmemb, size_t size) -{ - void *ret = calloc(nmemb, size); - if (!ret && (!nmemb || !size)) - ret = calloc(1, 1); - if (!ret) { - release_pack_memory(nmemb * size, -1); - ret = calloc(nmemb, size); - if (!ret && (!nmemb || !size)) - ret = calloc(1, 1); - if (!ret) - die("Out of memory, calloc failed"); - } - return ret; -} - -void *xmmap(void *start, size_t length, - int prot, int flags, int fd, off_t offset) -{ - void *ret = mmap(start, length, prot, flags, fd, offset); - if (ret == MAP_FAILED) { - if (!length) - return NULL; - release_pack_memory(length, fd); - ret = mmap(start, length, prot, flags, fd, offset); - if (ret == MAP_FAILED) - die("Out of memory? mmap failed: %s", strerror(errno)); - } - return ret; -} - -/* - * xread() is the same a read(), but it automatically restarts read() - * operations with a recoverable error (EAGAIN and EINTR). xread() - * DOES NOT GUARANTEE that "len" bytes is read even if the data is available. - */ -ssize_t xread(int fd, void *buf, size_t len) -{ - ssize_t nr; - while (1) { - nr = read(fd, buf, len); - if ((nr < 0) && (errno == EAGAIN || errno == EINTR)) - continue; - return nr; - } -} - -/* - * xwrite() is the same a write(), but it automatically restarts write() - * operations with a recoverable error (EAGAIN and EINTR). xwrite() DOES NOT - * GUARANTEE that "len" bytes is written even if the operation is successful. - */ -ssize_t xwrite(int fd, const void *buf, size_t len) -{ - ssize_t nr; - while (1) { - nr = write(fd, buf, len); - if ((nr < 0) && (errno == EAGAIN || errno == EINTR)) - continue; - return nr; - } -} - -ssize_t read_in_full(int fd, void *buf, size_t count) -{ - char *p = buf; - ssize_t total = 0; - - while (count > 0) { - ssize_t loaded = xread(fd, p, count); - if (loaded <= 0) - return total ? total : loaded; - count -= loaded; - p += loaded; - total += loaded; - } - - return total; -} - -ssize_t write_in_full(int fd, const void *buf, size_t count) -{ - const char *p = buf; - ssize_t total = 0; - - while (count > 0) { - ssize_t written = xwrite(fd, p, count); - if (written < 0) - return -1; - if (!written) { - errno = ENOSPC; - return -1; - } - count -= written; - p += written; - total += written; - } - - return total; -} - -int xdup(int fd) -{ - int ret = dup(fd); - if (ret < 0) - die("dup failed: %s", strerror(errno)); - return ret; -} - -FILE *xfdopen(int fd, const char *mode) -{ - FILE *stream = fdopen(fd, mode); - if (stream == NULL) - die("Out of memory? fdopen failed: %s", strerror(errno)); - return stream; -} - -int xmkstemp(char *template) -{ - int fd; - - fd = mkstemp(template); - if (fd < 0) - die("Unable to create temporary file: %s", strerror(errno)); - return fd; -} -- cgit v1.2.3 From 1130b0296184bc21806225fd06d533515a99d2db Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Tue, 28 Apr 2009 14:56:18 +0200 Subject: perf_counter tools: fix Documentation/perf_counter build error MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Mike Galbraith reported: > marge:..Documentation/perf_counter # make > CC builtin-stat.o > In file included from builtin-stat.c:71: > /usr/include/ctype.h:102: error: expected expression before ‘]’ token Remove the ctype.h include. Signed-off-by: Ingo Molnar --- Documentation/perf_counter/builtin-stat.c | 1 - Documentation/perf_counter/builtin-top.c | 1 - 2 files changed, 2 deletions(-) diff --git a/Documentation/perf_counter/builtin-stat.c b/Documentation/perf_counter/builtin-stat.c index d7ace631fc4..112b94ed329 100644 --- a/Documentation/perf_counter/builtin-stat.c +++ b/Documentation/perf_counter/builtin-stat.c @@ -68,7 +68,6 @@ #include #include #include -#include #include #include #include diff --git a/Documentation/perf_counter/builtin-top.c b/Documentation/perf_counter/builtin-top.c index dea016fa41e..6a276d2b2bb 100644 --- a/Documentation/perf_counter/builtin-top.c +++ b/Documentation/perf_counter/builtin-top.c @@ -49,7 +49,6 @@ #include #include #include -#include #include #include #include -- cgit v1.2.3 From da1a776be1ac7f78bb30ececbec4c1383163b079 Mon Sep 17 00:00:00 2001 From: Robert Richter Date: Wed, 29 Apr 2009 12:46:58 +0200 Subject: perf_counter, x86: remove X86_FEATURE_ARCH_PERFMON flag for AMD cpus X86_FEATURE_ARCH_PERFMON is an Intel hardware feature that does not work on AMD CPUs. The flag is now only used in Intel specific code (especially initialization). [ Impact: refactor code ] Signed-off-by: Robert Richter Acked-by: Peter Zijlstra Cc: Paul Mackerras LKML-Reference: <1241002046-8832-2-git-send-email-robert.richter@amd.com> Signed-off-by: Ingo Molnar --- arch/x86/kernel/cpu/amd.c | 4 ---- arch/x86/kernel/cpu/perf_counter.c | 6 +++--- 2 files changed, 3 insertions(+), 7 deletions(-) diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c index fd69c514ca2..7e4a459daa6 100644 --- a/arch/x86/kernel/cpu/amd.c +++ b/arch/x86/kernel/cpu/amd.c @@ -420,10 +420,6 @@ static void __cpuinit init_amd(struct cpuinfo_x86 *c) if (c->x86 >= 6) set_cpu_cap(c, X86_FEATURE_FXSAVE_LEAK); - /* Enable Performance counter for K7 and later */ - if (c->x86 > 6 && c->x86 <= 0x11) - set_cpu_cap(c, X86_FEATURE_ARCH_PERFMON); - if (!c->x86_model_id[0]) { switch (c->x86) { case 0xf: diff --git a/arch/x86/kernel/cpu/perf_counter.c b/arch/x86/kernel/cpu/perf_counter.c index 0fcbaab83f9..7d0f81dcb52 100644 --- a/arch/x86/kernel/cpu/perf_counter.c +++ b/arch/x86/kernel/cpu/perf_counter.c @@ -949,6 +949,9 @@ static struct pmc_x86_ops *pmc_intel_init(void) unsigned int unused; unsigned int ebx; + if (!cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) + return NULL; + /* * Check whether the Architectural PerfMon supports * Branch Misses Retired Event or not. @@ -987,9 +990,6 @@ static struct pmc_x86_ops *pmc_amd_init(void) void __init init_hw_perf_counters(void) { - if (!cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) - return; - switch (boot_cpu_data.x86_vendor) { case X86_VENDOR_INTEL: pmc_ops = pmc_intel_init(); -- cgit v1.2.3 From 829b42dd395c5801f6ae87da87ecbdcfd5ef1a6c Mon Sep 17 00:00:00 2001 From: Robert Richter Date: Wed, 29 Apr 2009 12:46:59 +0200 Subject: perf_counter, x86: declare perf_max_counters only for CONFIG_PERF_COUNTERS This is only needed for CONFIG_PERF_COUNTERS enabled. [ Impact: cleanup ] Signed-off-by: Robert Richter Acked-by: Peter Zijlstra Cc: Paul Mackerras LKML-Reference: <1241002046-8832-3-git-send-email-robert.richter@amd.com> Signed-off-by: Ingo Molnar --- include/linux/perf_counter.h | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/include/linux/perf_counter.h b/include/linux/perf_counter.h index 98143288530..be10b3ffe32 100644 --- a/include/linux/perf_counter.h +++ b/include/linux/perf_counter.h @@ -512,12 +512,13 @@ struct perf_cpu_context { int recursion[4]; }; +#ifdef CONFIG_PERF_COUNTERS + /* * Set by architecture code: */ extern int perf_max_counters; -#ifdef CONFIG_PERF_COUNTERS extern const struct hw_perf_counter_ops * hw_perf_counter_init(struct perf_counter *counter); -- cgit v1.2.3 From 4138960a9251a265002b5cf07e671a49f8495381 Mon Sep 17 00:00:00 2001 From: Robert Richter Date: Wed, 29 Apr 2009 12:47:00 +0200 Subject: perf_counter, x86: add default path to cpu detection This quits hw counter initialization immediately if no cpu is detected. [ Impact: cleanup ] Signed-off-by: Robert Richter Cc: Paul Mackerras Acked-by: Peter Zijlstra LKML-Reference: <1241002046-8832-4-git-send-email-robert.richter@amd.com> Signed-off-by: Ingo Molnar --- arch/x86/kernel/cpu/perf_counter.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/arch/x86/kernel/cpu/perf_counter.c b/arch/x86/kernel/cpu/perf_counter.c index 7d0f81dcb52..d6d6529349d 100644 --- a/arch/x86/kernel/cpu/perf_counter.c +++ b/arch/x86/kernel/cpu/perf_counter.c @@ -997,6 +997,8 @@ void __init init_hw_perf_counters(void) case X86_VENDOR_AMD: pmc_ops = pmc_amd_init(); break; + default: + return; } if (!pmc_ops) return; -- cgit v1.2.3 From 4295ee62660b13ddb87d41539f49b239e6e7d56f Mon Sep 17 00:00:00 2001 From: Robert Richter Date: Wed, 29 Apr 2009 12:47:01 +0200 Subject: perf_counter, x86: rework pmc_amd_save_disable_all() and pmc_amd_restore_all() MSR reads and writes are expensive. This patch adds checks to avoid its usage where possible. [ Impact: micro-optimization on AMD CPUs ] Signed-off-by: Robert Richter Cc: Paul Mackerras Acked-by: Peter Zijlstra LKML-Reference: <1241002046-8832-5-git-send-email-robert.richter@amd.com> Signed-off-by: Ingo Molnar --- arch/x86/kernel/cpu/perf_counter.c | 24 ++++++++++++++---------- 1 file changed, 14 insertions(+), 10 deletions(-) diff --git a/arch/x86/kernel/cpu/perf_counter.c b/arch/x86/kernel/cpu/perf_counter.c index d6d6529349d..75a090394b6 100644 --- a/arch/x86/kernel/cpu/perf_counter.c +++ b/arch/x86/kernel/cpu/perf_counter.c @@ -334,11 +334,13 @@ static u64 pmc_amd_save_disable_all(void) for (idx = 0; idx < nr_counters_generic; idx++) { u64 val; + if (!test_bit(idx, cpuc->active_mask)) + continue; rdmsrl(MSR_K7_EVNTSEL0 + idx, val); - if (val & ARCH_PERFMON_EVENTSEL0_ENABLE) { - val &= ~ARCH_PERFMON_EVENTSEL0_ENABLE; - wrmsrl(MSR_K7_EVNTSEL0 + idx, val); - } + if (!(val & ARCH_PERFMON_EVENTSEL0_ENABLE)) + continue; + val &= ~ARCH_PERFMON_EVENTSEL0_ENABLE; + wrmsrl(MSR_K7_EVNTSEL0 + idx, val); } return enabled; @@ -372,13 +374,15 @@ static void pmc_amd_restore_all(u64 ctrl) return; for (idx = 0; idx < nr_counters_generic; idx++) { - if (test_bit(idx, cpuc->active_mask)) { - u64 val; + u64 val; - rdmsrl(MSR_K7_EVNTSEL0 + idx, val); - val |= ARCH_PERFMON_EVENTSEL0_ENABLE; - wrmsrl(MSR_K7_EVNTSEL0 + idx, val); - } + if (!test_bit(idx, cpuc->active_mask)) + continue; + rdmsrl(MSR_K7_EVNTSEL0 + idx, val); + if (val & ARCH_PERFMON_EVENTSEL0_ENABLE) + continue; + val |= ARCH_PERFMON_EVENTSEL0_ENABLE; + wrmsrl(MSR_K7_EVNTSEL0 + idx, val); } } -- cgit v1.2.3 From 527e26af3741a2168986d8b82653ffe173891324 Mon Sep 17 00:00:00 2001 From: Robert Richter Date: Wed, 29 Apr 2009 12:47:02 +0200 Subject: perf_counter, x86: protect per-cpu variables with compile barriers only Per-cpu variables needn't to be protected with cpu barriers (smp_wmb()). Protection is only needed for preemption on the same cpu (rescheduling or the nmi handler). This can be done using a compiler barrier only. [ Impact: micro-optimization ] Signed-off-by: Robert Richter Cc: Paul Mackerras Acked-by: Peter Zijlstra LKML-Reference: <1241002046-8832-6-git-send-email-robert.richter@amd.com> Signed-off-by: Ingo Molnar --- arch/x86/kernel/cpu/perf_counter.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/arch/x86/kernel/cpu/perf_counter.c b/arch/x86/kernel/cpu/perf_counter.c index 75a090394b6..ad663d5ad2d 100644 --- a/arch/x86/kernel/cpu/perf_counter.c +++ b/arch/x86/kernel/cpu/perf_counter.c @@ -673,7 +673,7 @@ try_generic: /* * Make it visible before enabling the hw: */ - smp_wmb(); + barrier(); __hw_perf_counter_set_period(counter, hwc, idx); __pmc_generic_enable(counter, hwc, idx); @@ -745,7 +745,7 @@ static void pmc_generic_disable(struct perf_counter *counter) * Make sure the cleared pointer becomes visible before we * (potentially) free the counter: */ - smp_wmb(); + barrier(); /* * Drain the remaining delta count out of a counter -- cgit v1.2.3 From 4aeb0b4239bb3b67ed402cb9cef3e000c892cadf Mon Sep 17 00:00:00 2001 From: Robert Richter Date: Wed, 29 Apr 2009 12:47:03 +0200 Subject: perfcounters: rename struct hw_perf_counter_ops into struct pmu This patch renames struct hw_perf_counter_ops into struct pmu. It introduces a structure to describe a cpu specific pmu (performance monitoring unit). It may contain ops and data. The new name of the structure fits better, is shorter, and thus better to handle. Where it was appropriate, names of function and variable have been changed too. [ Impact: cleanup ] Signed-off-by: Robert Richter Cc: Paul Mackerras Acked-by: Peter Zijlstra LKML-Reference: <1241002046-8832-7-git-send-email-robert.richter@amd.com> Signed-off-by: Ingo Molnar --- arch/powerpc/kernel/perf_counter.c | 25 +++++++------- arch/x86/kernel/cpu/perf_counter.c | 37 ++++++++++----------- include/linux/perf_counter.h | 9 +++-- kernel/perf_counter.c | 68 ++++++++++++++++++-------------------- 4 files changed, 66 insertions(+), 73 deletions(-) diff --git a/arch/powerpc/kernel/perf_counter.c b/arch/powerpc/kernel/perf_counter.c index bd76d0fa2c3..d9bbe5efc64 100644 --- a/arch/powerpc/kernel/perf_counter.c +++ b/arch/powerpc/kernel/perf_counter.c @@ -256,7 +256,7 @@ static int check_excludes(struct perf_counter **ctrs, int n_prev, int n_new) return 0; } -static void power_perf_read(struct perf_counter *counter) +static void power_pmu_read(struct perf_counter *counter) { long val, delta, prev; @@ -405,7 +405,7 @@ void hw_perf_restore(u64 disable) for (i = 0; i < cpuhw->n_counters; ++i) { counter = cpuhw->counter[i]; if (counter->hw.idx && counter->hw.idx != hwc_index[i] + 1) { - power_perf_read(counter); + power_pmu_read(counter); write_pmc(counter->hw.idx, 0); counter->hw.idx = 0; } @@ -477,7 +477,7 @@ static void counter_sched_in(struct perf_counter *counter, int cpu) counter->oncpu = cpu; counter->tstamp_running += counter->ctx->time - counter->tstamp_stopped; if (is_software_counter(counter)) - counter->hw_ops->enable(counter); + counter->pmu->enable(counter); } /* @@ -533,7 +533,7 @@ int hw_perf_group_sched_in(struct perf_counter *group_leader, * re-enable the PMU in order to get hw_perf_restore to do the * actual work of reconfiguring the PMU. */ -static int power_perf_enable(struct perf_counter *counter) +static int power_pmu_enable(struct perf_counter *counter) { struct cpu_hw_counters *cpuhw; unsigned long flags; @@ -573,7 +573,7 @@ static int power_perf_enable(struct perf_counter *counter) /* * Remove a counter from the PMU. */ -static void power_perf_disable(struct perf_counter *counter) +static void power_pmu_disable(struct perf_counter *counter) { struct cpu_hw_counters *cpuhw; long i; @@ -583,7 +583,7 @@ static void power_perf_disable(struct perf_counter *counter) local_irq_save(flags); pmudis = hw_perf_save_disable(); - power_perf_read(counter); + power_pmu_read(counter); cpuhw = &__get_cpu_var(cpu_hw_counters); for (i = 0; i < cpuhw->n_counters; ++i) { @@ -607,10 +607,10 @@ static void power_perf_disable(struct perf_counter *counter) local_irq_restore(flags); } -struct hw_perf_counter_ops power_perf_ops = { - .enable = power_perf_enable, - .disable = power_perf_disable, - .read = power_perf_read +struct pmu power_pmu = { + .enable = power_pmu_enable, + .disable = power_pmu_disable, + .read = power_pmu_read, }; /* Number of perf_counters counting hardware events */ @@ -631,8 +631,7 @@ static void hw_perf_counter_destroy(struct perf_counter *counter) } } -const struct hw_perf_counter_ops * -hw_perf_counter_init(struct perf_counter *counter) +const struct pmu *hw_perf_counter_init(struct perf_counter *counter) { unsigned long ev; struct perf_counter *ctrs[MAX_HWCOUNTERS]; @@ -705,7 +704,7 @@ hw_perf_counter_init(struct perf_counter *counter) if (err) return ERR_PTR(err); - return &power_perf_ops; + return &power_pmu; } /* diff --git a/arch/x86/kernel/cpu/perf_counter.c b/arch/x86/kernel/cpu/perf_counter.c index ad663d5ad2d..95de980c74a 100644 --- a/arch/x86/kernel/cpu/perf_counter.c +++ b/arch/x86/kernel/cpu/perf_counter.c @@ -515,8 +515,8 @@ __pmc_fixed_disable(struct perf_counter *counter, } static inline void -__pmc_generic_disable(struct perf_counter *counter, - struct hw_perf_counter *hwc, unsigned int idx) +__x86_pmu_disable(struct perf_counter *counter, + struct hw_perf_counter *hwc, unsigned int idx) { if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) __pmc_fixed_disable(counter, hwc, idx); @@ -591,8 +591,8 @@ __pmc_fixed_enable(struct perf_counter *counter, } static void -__pmc_generic_enable(struct perf_counter *counter, - struct hw_perf_counter *hwc, int idx) +__x86_pmu_enable(struct perf_counter *counter, + struct hw_perf_counter *hwc, int idx) { if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) __pmc_fixed_enable(counter, hwc, idx); @@ -626,7 +626,7 @@ fixed_mode_idx(struct perf_counter *counter, struct hw_perf_counter *hwc) /* * Find a PMC slot for the freshly enabled / scheduled in counter: */ -static int pmc_generic_enable(struct perf_counter *counter) +static int x86_pmu_enable(struct perf_counter *counter) { struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters); struct hw_perf_counter *hwc = &counter->hw; @@ -667,7 +667,7 @@ try_generic: perf_counters_lapic_init(hwc->nmi); - __pmc_generic_disable(counter, hwc, idx); + __x86_pmu_disable(counter, hwc, idx); cpuc->counters[idx] = counter; /* @@ -676,7 +676,7 @@ try_generic: barrier(); __hw_perf_counter_set_period(counter, hwc, idx); - __pmc_generic_enable(counter, hwc, idx); + __x86_pmu_enable(counter, hwc, idx); return 0; } @@ -731,13 +731,13 @@ void perf_counter_print_debug(void) local_irq_enable(); } -static void pmc_generic_disable(struct perf_counter *counter) +static void x86_pmu_disable(struct perf_counter *counter) { struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters); struct hw_perf_counter *hwc = &counter->hw; unsigned int idx = hwc->idx; - __pmc_generic_disable(counter, hwc, idx); + __x86_pmu_disable(counter, hwc, idx); clear_bit(idx, cpuc->used); cpuc->counters[idx] = NULL; @@ -767,7 +767,7 @@ static void perf_save_and_restart(struct perf_counter *counter) __hw_perf_counter_set_period(counter, hwc, idx); if (counter->state == PERF_COUNTER_STATE_ACTIVE) - __pmc_generic_enable(counter, hwc, idx); + __x86_pmu_enable(counter, hwc, idx); } /* @@ -805,7 +805,7 @@ again: perf_save_and_restart(counter); if (perf_counter_overflow(counter, nmi, regs, 0)) - __pmc_generic_disable(counter, &counter->hw, bit); + __x86_pmu_disable(counter, &counter->hw, bit); } hw_perf_ack_status(ack); @@ -1034,19 +1034,18 @@ void __init init_hw_perf_counters(void) register_die_notifier(&perf_counter_nmi_notifier); } -static void pmc_generic_read(struct perf_counter *counter) +static void x86_pmu_read(struct perf_counter *counter) { x86_perf_counter_update(counter, &counter->hw, counter->hw.idx); } -static const struct hw_perf_counter_ops x86_perf_counter_ops = { - .enable = pmc_generic_enable, - .disable = pmc_generic_disable, - .read = pmc_generic_read, +static const struct pmu pmu = { + .enable = x86_pmu_enable, + .disable = x86_pmu_disable, + .read = x86_pmu_read, }; -const struct hw_perf_counter_ops * -hw_perf_counter_init(struct perf_counter *counter) +const struct pmu *hw_perf_counter_init(struct perf_counter *counter) { int err; @@ -1054,7 +1053,7 @@ hw_perf_counter_init(struct perf_counter *counter) if (err) return ERR_PTR(err); - return &x86_perf_counter_ops; + return &pmu; } /* diff --git a/include/linux/perf_counter.h b/include/linux/perf_counter.h index be10b3ffe32..c3db52dc876 100644 --- a/include/linux/perf_counter.h +++ b/include/linux/perf_counter.h @@ -334,9 +334,9 @@ struct hw_perf_counter { struct perf_counter; /** - * struct hw_perf_counter_ops - performance counter hw ops + * struct pmu - generic performance monitoring unit */ -struct hw_perf_counter_ops { +struct pmu { int (*enable) (struct perf_counter *counter); void (*disable) (struct perf_counter *counter); void (*read) (struct perf_counter *counter); @@ -381,7 +381,7 @@ struct perf_counter { struct list_head sibling_list; int nr_siblings; struct perf_counter *group_leader; - const struct hw_perf_counter_ops *hw_ops; + const struct pmu *pmu; enum perf_counter_active_state state; enum perf_counter_active_state prev_state; @@ -519,8 +519,7 @@ struct perf_cpu_context { */ extern int perf_max_counters; -extern const struct hw_perf_counter_ops * -hw_perf_counter_init(struct perf_counter *counter); +extern const struct pmu *hw_perf_counter_init(struct perf_counter *counter); extern void perf_counter_task_sched_in(struct task_struct *task, int cpu); extern void perf_counter_task_sched_out(struct task_struct *task, int cpu); diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c index 09396098dd0..582108addef 100644 --- a/kernel/perf_counter.c +++ b/kernel/perf_counter.c @@ -52,8 +52,7 @@ static DEFINE_MUTEX(perf_resource_mutex); /* * Architecture provided APIs - weak aliases: */ -extern __weak const struct hw_perf_counter_ops * -hw_perf_counter_init(struct perf_counter *counter) +extern __weak const struct pmu *hw_perf_counter_init(struct perf_counter *counter) { return NULL; } @@ -124,7 +123,7 @@ counter_sched_out(struct perf_counter *counter, counter->state = PERF_COUNTER_STATE_INACTIVE; counter->tstamp_stopped = ctx->time; - counter->hw_ops->disable(counter); + counter->pmu->disable(counter); counter->oncpu = -1; if (!is_software_counter(counter)) @@ -417,7 +416,7 @@ counter_sched_in(struct perf_counter *counter, */ smp_wmb(); - if (counter->hw_ops->enable(counter)) { + if (counter->pmu->enable(counter)) { counter->state = PERF_COUNTER_STATE_INACTIVE; counter->oncpu = -1; return -EAGAIN; @@ -1096,7 +1095,7 @@ static void __read(void *info) local_irq_save(flags); if (ctx->is_active) update_context_time(ctx); - counter->hw_ops->read(counter); + counter->pmu->read(counter); update_counter_times(counter); local_irq_restore(flags); } @@ -1922,7 +1921,7 @@ static void perf_counter_output(struct perf_counter *counter, leader = counter->group_leader; list_for_each_entry(sub, &leader->sibling_list, list_entry) { if (sub != counter) - sub->hw_ops->read(sub); + sub->pmu->read(sub); group_entry.event = sub->hw_event.config; group_entry.counter = atomic64_read(&sub->count); @@ -2264,7 +2263,7 @@ static enum hrtimer_restart perf_swcounter_hrtimer(struct hrtimer *hrtimer) struct pt_regs *regs; counter = container_of(hrtimer, struct perf_counter, hw.hrtimer); - counter->hw_ops->read(counter); + counter->pmu->read(counter); regs = get_irq_regs(); /* @@ -2410,7 +2409,7 @@ static void perf_swcounter_disable(struct perf_counter *counter) perf_swcounter_update(counter); } -static const struct hw_perf_counter_ops perf_ops_generic = { +static const struct pmu perf_ops_generic = { .enable = perf_swcounter_enable, .disable = perf_swcounter_disable, .read = perf_swcounter_read, @@ -2460,7 +2459,7 @@ static void cpu_clock_perf_counter_read(struct perf_counter *counter) cpu_clock_perf_counter_update(counter); } -static const struct hw_perf_counter_ops perf_ops_cpu_clock = { +static const struct pmu perf_ops_cpu_clock = { .enable = cpu_clock_perf_counter_enable, .disable = cpu_clock_perf_counter_disable, .read = cpu_clock_perf_counter_read, @@ -2522,7 +2521,7 @@ static void task_clock_perf_counter_read(struct perf_counter *counter) task_clock_perf_counter_update(counter, time); } -static const struct hw_perf_counter_ops perf_ops_task_clock = { +static const struct pmu perf_ops_task_clock = { .enable = task_clock_perf_counter_enable, .disable = task_clock_perf_counter_disable, .read = task_clock_perf_counter_read, @@ -2574,7 +2573,7 @@ static void cpu_migrations_perf_counter_disable(struct perf_counter *counter) cpu_migrations_perf_counter_update(counter); } -static const struct hw_perf_counter_ops perf_ops_cpu_migrations = { +static const struct pmu perf_ops_cpu_migrations = { .enable = cpu_migrations_perf_counter_enable, .disable = cpu_migrations_perf_counter_disable, .read = cpu_migrations_perf_counter_read, @@ -2600,8 +2599,7 @@ static void tp_perf_counter_destroy(struct perf_counter *counter) ftrace_profile_disable(perf_event_id(&counter->hw_event)); } -static const struct hw_perf_counter_ops * -tp_perf_counter_init(struct perf_counter *counter) +static const struct pmu *tp_perf_counter_init(struct perf_counter *counter) { int event_id = perf_event_id(&counter->hw_event); int ret; @@ -2616,18 +2614,16 @@ tp_perf_counter_init(struct perf_counter *counter) return &perf_ops_generic; } #else -static const struct hw_perf_counter_ops * -tp_perf_counter_init(struct perf_counter *counter) +static const struct pmu *tp_perf_counter_init(struct perf_counter *counter) { return NULL; } #endif -static const struct hw_perf_counter_ops * -sw_perf_counter_init(struct perf_counter *counter) +static const struct pmu *sw_perf_counter_init(struct perf_counter *counter) { struct perf_counter_hw_event *hw_event = &counter->hw_event; - const struct hw_perf_counter_ops *hw_ops = NULL; + const struct pmu *pmu = NULL; struct hw_perf_counter *hwc = &counter->hw; /* @@ -2639,7 +2635,7 @@ sw_perf_counter_init(struct perf_counter *counter) */ switch (perf_event_id(&counter->hw_event)) { case PERF_COUNT_CPU_CLOCK: - hw_ops = &perf_ops_cpu_clock; + pmu = &perf_ops_cpu_clock; if (hw_event->irq_period && hw_event->irq_period < 10000) hw_event->irq_period = 10000; @@ -2650,9 +2646,9 @@ sw_perf_counter_init(struct perf_counter *counter) * use the cpu_clock counter instead. */ if (counter->ctx->task) - hw_ops = &perf_ops_task_clock; + pmu = &perf_ops_task_clock; else - hw_ops = &perf_ops_cpu_clock; + pmu = &perf_ops_cpu_clock; if (hw_event->irq_period && hw_event->irq_period < 10000) hw_event->irq_period = 10000; @@ -2661,18 +2657,18 @@ sw_perf_counter_init(struct perf_counter *counter) case PERF_COUNT_PAGE_FAULTS_MIN: case PERF_COUNT_PAGE_FAULTS_MAJ: case PERF_COUNT_CONTEXT_SWITCHES: - hw_ops = &perf_ops_generic; + pmu = &perf_ops_generic; break; case PERF_COUNT_CPU_MIGRATIONS: if (!counter->hw_event.exclude_kernel) - hw_ops = &perf_ops_cpu_migrations; + pmu = &perf_ops_cpu_migrations; break; } - if (hw_ops) + if (pmu) hwc->irq_period = hw_event->irq_period; - return hw_ops; + return pmu; } /* @@ -2685,7 +2681,7 @@ perf_counter_alloc(struct perf_counter_hw_event *hw_event, struct perf_counter *group_leader, gfp_t gfpflags) { - const struct hw_perf_counter_ops *hw_ops; + const struct pmu *pmu; struct perf_counter *counter; long err; @@ -2713,46 +2709,46 @@ perf_counter_alloc(struct perf_counter_hw_event *hw_event, counter->cpu = cpu; counter->hw_event = *hw_event; counter->group_leader = group_leader; - counter->hw_ops = NULL; + counter->pmu = NULL; counter->ctx = ctx; counter->state = PERF_COUNTER_STATE_INACTIVE; if (hw_event->disabled) counter->state = PERF_COUNTER_STATE_OFF; - hw_ops = NULL; + pmu = NULL; if (perf_event_raw(hw_event)) { - hw_ops = hw_perf_counter_init(counter); + pmu = hw_perf_counter_init(counter); goto done; } switch (perf_event_type(hw_event)) { case PERF_TYPE_HARDWARE: - hw_ops = hw_perf_counter_init(counter); + pmu = hw_perf_counter_init(counter); break; case PERF_TYPE_SOFTWARE: - hw_ops = sw_perf_counter_init(counter); + pmu = sw_perf_counter_init(counter); break; case PERF_TYPE_TRACEPOINT: - hw_ops = tp_perf_counter_init(counter); + pmu = tp_perf_counter_init(counter); break; } done: err = 0; - if (!hw_ops) + if (!pmu) err = -EINVAL; - else if (IS_ERR(hw_ops)) - err = PTR_ERR(hw_ops); + else if (IS_ERR(pmu)) + err = PTR_ERR(pmu); if (err) { kfree(counter); return ERR_PTR(err); } - counter->hw_ops = hw_ops; + counter->pmu = pmu; if (counter->hw_event.mmap) atomic_inc(&nr_mmap_tracking); -- cgit v1.2.3 From 5f4ec28ffe77c840354cce1820a3436106e9e0f1 Mon Sep 17 00:00:00 2001 From: Robert Richter Date: Wed, 29 Apr 2009 12:47:04 +0200 Subject: perf_counter, x86: rename struct pmc_x86_ops into struct x86_pmu This patch renames struct pmc_x86_ops into struct x86_pmu. It introduces a structure to describe an x86 model specific pmu (performance monitoring unit). It may contain ops and data. The new name of the structure fits better, is shorter, and thus better to handle. Where it was appropriate, names of function and variable have been changed too. [ Impact: cleanup ] Signed-off-by: Robert Richter Cc: Paul Mackerras Acked-by: Peter Zijlstra LKML-Reference: <1241002046-8832-8-git-send-email-robert.richter@amd.com> Signed-off-by: Ingo Molnar --- arch/x86/kernel/cpu/perf_counter.c | 135 +++++++++++++++++++------------------ 1 file changed, 68 insertions(+), 67 deletions(-) diff --git a/arch/x86/kernel/cpu/perf_counter.c b/arch/x86/kernel/cpu/perf_counter.c index 95de980c74a..808a1a11346 100644 --- a/arch/x86/kernel/cpu/perf_counter.c +++ b/arch/x86/kernel/cpu/perf_counter.c @@ -44,9 +44,9 @@ struct cpu_hw_counters { }; /* - * struct pmc_x86_ops - performance counter x86 ops + * struct x86_pmu - generic x86 pmu */ -struct pmc_x86_ops { +struct x86_pmu { u64 (*save_disable_all)(void); void (*restore_all)(u64); u64 (*get_status)(u64); @@ -60,7 +60,7 @@ struct pmc_x86_ops { int max_events; }; -static struct pmc_x86_ops *pmc_ops __read_mostly; +static struct x86_pmu *x86_pmu __read_mostly; static DEFINE_PER_CPU(struct cpu_hw_counters, cpu_hw_counters) = { .enabled = 1, @@ -82,12 +82,12 @@ static const u64 intel_perfmon_event_map[] = [PERF_COUNT_BUS_CYCLES] = 0x013c, }; -static u64 pmc_intel_event_map(int event) +static u64 intel_pmu_event_map(int event) { return intel_perfmon_event_map[event]; } -static u64 pmc_intel_raw_event(u64 event) +static u64 intel_pmu_raw_event(u64 event) { #define CORE_EVNTSEL_EVENT_MASK 0x000000FFULL #define CORE_EVNTSEL_UNIT_MASK 0x0000FF00ULL @@ -114,12 +114,12 @@ static const u64 amd_perfmon_event_map[] = [PERF_COUNT_BRANCH_MISSES] = 0x00c5, }; -static u64 pmc_amd_event_map(int event) +static u64 amd_pmu_event_map(int event) { return amd_perfmon_event_map[event]; } -static u64 pmc_amd_raw_event(u64 event) +static u64 amd_pmu_raw_event(u64 event) { #define K7_EVNTSEL_EVENT_MASK 0x7000000FFULL #define K7_EVNTSEL_UNIT_MASK 0x00000FF00ULL @@ -184,12 +184,12 @@ static bool reserve_pmc_hardware(void) disable_lapic_nmi_watchdog(); for (i = 0; i < nr_counters_generic; i++) { - if (!reserve_perfctr_nmi(pmc_ops->perfctr + i)) + if (!reserve_perfctr_nmi(x86_pmu->perfctr + i)) goto perfctr_fail; } for (i = 0; i < nr_counters_generic; i++) { - if (!reserve_evntsel_nmi(pmc_ops->eventsel + i)) + if (!reserve_evntsel_nmi(x86_pmu->eventsel + i)) goto eventsel_fail; } @@ -197,13 +197,13 @@ static bool reserve_pmc_hardware(void) eventsel_fail: for (i--; i >= 0; i--) - release_evntsel_nmi(pmc_ops->eventsel + i); + release_evntsel_nmi(x86_pmu->eventsel + i); i = nr_counters_generic; perfctr_fail: for (i--; i >= 0; i--) - release_perfctr_nmi(pmc_ops->perfctr + i); + release_perfctr_nmi(x86_pmu->perfctr + i); if (nmi_watchdog == NMI_LOCAL_APIC) enable_lapic_nmi_watchdog(); @@ -216,8 +216,8 @@ static void release_pmc_hardware(void) int i; for (i = 0; i < nr_counters_generic; i++) { - release_perfctr_nmi(pmc_ops->perfctr + i); - release_evntsel_nmi(pmc_ops->eventsel + i); + release_perfctr_nmi(x86_pmu->perfctr + i); + release_evntsel_nmi(x86_pmu->eventsel + i); } if (nmi_watchdog == NMI_LOCAL_APIC) @@ -293,14 +293,14 @@ static int __hw_perf_counter_init(struct perf_counter *counter) * Raw event type provide the config in the event structure */ if (perf_event_raw(hw_event)) { - hwc->config |= pmc_ops->raw_event(perf_event_config(hw_event)); + hwc->config |= x86_pmu->raw_event(perf_event_config(hw_event)); } else { - if (perf_event_id(hw_event) >= pmc_ops->max_events) + if (perf_event_id(hw_event) >= x86_pmu->max_events) return -EINVAL; /* * The generic map: */ - hwc->config |= pmc_ops->event_map(perf_event_id(hw_event)); + hwc->config |= x86_pmu->event_map(perf_event_id(hw_event)); } counter->destroy = hw_perf_counter_destroy; @@ -308,7 +308,7 @@ static int __hw_perf_counter_init(struct perf_counter *counter) return 0; } -static u64 pmc_intel_save_disable_all(void) +static u64 intel_pmu_save_disable_all(void) { u64 ctrl; @@ -318,7 +318,7 @@ static u64 pmc_intel_save_disable_all(void) return ctrl; } -static u64 pmc_amd_save_disable_all(void) +static u64 amd_pmu_save_disable_all(void) { struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters); int enabled, idx; @@ -327,7 +327,8 @@ static u64 pmc_amd_save_disable_all(void) cpuc->enabled = 0; /* * ensure we write the disable before we start disabling the - * counters proper, so that pcm_amd_enable() does the right thing. + * counters proper, so that amd_pmu_enable_counter() does the + * right thing. */ barrier(); @@ -351,19 +352,19 @@ u64 hw_perf_save_disable(void) if (unlikely(!perf_counters_initialized)) return 0; - return pmc_ops->save_disable_all(); + return x86_pmu->save_disable_all(); } /* * Exported because of ACPI idle */ EXPORT_SYMBOL_GPL(hw_perf_save_disable); -static void pmc_intel_restore_all(u64 ctrl) +static void intel_pmu_restore_all(u64 ctrl) { wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, ctrl); } -static void pmc_amd_restore_all(u64 ctrl) +static void amd_pmu_restore_all(u64 ctrl) { struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters); int idx; @@ -391,14 +392,14 @@ void hw_perf_restore(u64 ctrl) if (unlikely(!perf_counters_initialized)) return; - pmc_ops->restore_all(ctrl); + x86_pmu->restore_all(ctrl); } /* * Exported because of ACPI idle */ EXPORT_SYMBOL_GPL(hw_perf_restore); -static u64 pmc_intel_get_status(u64 mask) +static u64 intel_pmu_get_status(u64 mask) { u64 status; @@ -407,7 +408,7 @@ static u64 pmc_intel_get_status(u64 mask) return status; } -static u64 pmc_amd_get_status(u64 mask) +static u64 amd_pmu_get_status(u64 mask) { u64 status = 0; int idx; @@ -432,15 +433,15 @@ static u64 hw_perf_get_status(u64 mask) if (unlikely(!perf_counters_initialized)) return 0; - return pmc_ops->get_status(mask); + return x86_pmu->get_status(mask); } -static void pmc_intel_ack_status(u64 ack) +static void intel_pmu_ack_status(u64 ack) { wrmsrl(MSR_CORE_PERF_GLOBAL_OVF_CTRL, ack); } -static void pmc_amd_ack_status(u64 ack) +static void amd_pmu_ack_status(u64 ack) { } @@ -449,16 +450,16 @@ static void hw_perf_ack_status(u64 ack) if (unlikely(!perf_counters_initialized)) return; - pmc_ops->ack_status(ack); + x86_pmu->ack_status(ack); } -static void pmc_intel_enable(int idx, u64 config) +static void intel_pmu_enable_counter(int idx, u64 config) { wrmsrl(MSR_ARCH_PERFMON_EVENTSEL0 + idx, config | ARCH_PERFMON_EVENTSEL0_ENABLE); } -static void pmc_amd_enable(int idx, u64 config) +static void amd_pmu_enable_counter(int idx, u64 config) { struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters); @@ -474,15 +475,15 @@ static void hw_perf_enable(int idx, u64 config) if (unlikely(!perf_counters_initialized)) return; - pmc_ops->enable(idx, config); + x86_pmu->enable(idx, config); } -static void pmc_intel_disable(int idx, u64 config) +static void intel_pmu_disable_counter(int idx, u64 config) { wrmsrl(MSR_ARCH_PERFMON_EVENTSEL0 + idx, config); } -static void pmc_amd_disable(int idx, u64 config) +static void amd_pmu_disable_counter(int idx, u64 config) { struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters); @@ -496,7 +497,7 @@ static void hw_perf_disable(int idx, u64 config) if (unlikely(!perf_counters_initialized)) return; - pmc_ops->disable(idx, config); + x86_pmu->disable(idx, config); } static inline void @@ -613,11 +614,11 @@ fixed_mode_idx(struct perf_counter *counter, struct hw_perf_counter *hwc) event = hwc->config & ARCH_PERFMON_EVENT_MASK; - if (unlikely(event == pmc_ops->event_map(PERF_COUNT_INSTRUCTIONS))) + if (unlikely(event == x86_pmu->event_map(PERF_COUNT_INSTRUCTIONS))) return X86_PMC_IDX_FIXED_INSTRUCTIONS; - if (unlikely(event == pmc_ops->event_map(PERF_COUNT_CPU_CYCLES))) + if (unlikely(event == x86_pmu->event_map(PERF_COUNT_CPU_CYCLES))) return X86_PMC_IDX_FIXED_CPU_CYCLES; - if (unlikely(event == pmc_ops->event_map(PERF_COUNT_BUS_CYCLES))) + if (unlikely(event == x86_pmu->event_map(PERF_COUNT_BUS_CYCLES))) return X86_PMC_IDX_FIXED_BUS_CYCLES; return -1; @@ -661,8 +662,8 @@ try_generic: set_bit(idx, cpuc->used); hwc->idx = idx; } - hwc->config_base = pmc_ops->eventsel; - hwc->counter_base = pmc_ops->perfctr; + hwc->config_base = x86_pmu->eventsel; + hwc->counter_base = x86_pmu->perfctr; } perf_counters_lapic_init(hwc->nmi); @@ -710,8 +711,8 @@ void perf_counter_print_debug(void) pr_info("CPU#%d: used: %016llx\n", cpu, *(u64 *)cpuc->used); for (idx = 0; idx < nr_counters_generic; idx++) { - rdmsrl(pmc_ops->eventsel + idx, pmc_ctrl); - rdmsrl(pmc_ops->perfctr + idx, pmc_count); + rdmsrl(x86_pmu->eventsel + idx, pmc_ctrl); + rdmsrl(x86_pmu->perfctr + idx, pmc_count); prev_left = per_cpu(prev_left[idx], cpu); @@ -918,35 +919,35 @@ static __read_mostly struct notifier_block perf_counter_nmi_notifier = { .priority = 1 }; -static struct pmc_x86_ops pmc_intel_ops = { - .save_disable_all = pmc_intel_save_disable_all, - .restore_all = pmc_intel_restore_all, - .get_status = pmc_intel_get_status, - .ack_status = pmc_intel_ack_status, - .enable = pmc_intel_enable, - .disable = pmc_intel_disable, +static struct x86_pmu intel_pmu = { + .save_disable_all = intel_pmu_save_disable_all, + .restore_all = intel_pmu_restore_all, + .get_status = intel_pmu_get_status, + .ack_status = intel_pmu_ack_status, + .enable = intel_pmu_enable_counter, + .disable = intel_pmu_disable_counter, .eventsel = MSR_ARCH_PERFMON_EVENTSEL0, .perfctr = MSR_ARCH_PERFMON_PERFCTR0, - .event_map = pmc_intel_event_map, - .raw_event = pmc_intel_raw_event, + .event_map = intel_pmu_event_map, + .raw_event = intel_pmu_raw_event, .max_events = ARRAY_SIZE(intel_perfmon_event_map), }; -static struct pmc_x86_ops pmc_amd_ops = { - .save_disable_all = pmc_amd_save_disable_all, - .restore_all = pmc_amd_restore_all, - .get_status = pmc_amd_get_status, - .ack_status = pmc_amd_ack_status, - .enable = pmc_amd_enable, - .disable = pmc_amd_disable, +static struct x86_pmu amd_pmu = { + .save_disable_all = amd_pmu_save_disable_all, + .restore_all = amd_pmu_restore_all, + .get_status = amd_pmu_get_status, + .ack_status = amd_pmu_ack_status, + .enable = amd_pmu_enable_counter, + .disable = amd_pmu_disable_counter, .eventsel = MSR_K7_EVNTSEL0, .perfctr = MSR_K7_PERFCTR0, - .event_map = pmc_amd_event_map, - .raw_event = pmc_amd_raw_event, + .event_map = amd_pmu_event_map, + .raw_event = amd_pmu_raw_event, .max_events = ARRAY_SIZE(amd_perfmon_event_map), }; -static struct pmc_x86_ops *pmc_intel_init(void) +static struct x86_pmu *intel_pmu_init(void) { union cpuid10_edx edx; union cpuid10_eax eax; @@ -977,10 +978,10 @@ static struct pmc_x86_ops *pmc_intel_init(void) nr_counters_fixed = edx.split.num_counters_fixed; counter_value_mask = (1ULL << eax.split.bit_width) - 1; - return &pmc_intel_ops; + return &intel_pmu; } -static struct pmc_x86_ops *pmc_amd_init(void) +static struct x86_pmu *amd_pmu_init(void) { nr_counters_generic = 4; nr_counters_fixed = 0; @@ -989,22 +990,22 @@ static struct pmc_x86_ops *pmc_amd_init(void) pr_info("AMD Performance Monitoring support detected.\n"); - return &pmc_amd_ops; + return &amd_pmu; } void __init init_hw_perf_counters(void) { switch (boot_cpu_data.x86_vendor) { case X86_VENDOR_INTEL: - pmc_ops = pmc_intel_init(); + x86_pmu = intel_pmu_init(); break; case X86_VENDOR_AMD: - pmc_ops = pmc_amd_init(); + x86_pmu = amd_pmu_init(); break; default: return; } - if (!pmc_ops) + if (!x86_pmu) return; pr_info("... num counters: %d\n", nr_counters_generic); -- cgit v1.2.3 From 39d81eab2374d71b2d9c82f66258a1a4f57ddd2e Mon Sep 17 00:00:00 2001 From: Robert Richter Date: Wed, 29 Apr 2009 12:47:05 +0200 Subject: perf_counter, x86: make interrupt handler model specific This separates the perfcounter interrupt handler for AMD and Intel cpus. The AMD interrupt handler implementation is a follow-on patch. [ Impact: refactor and clean up code ] Signed-off-by: Robert Richter Cc: Paul Mackerras Acked-by: Peter Zijlstra LKML-Reference: <1241002046-8832-9-git-send-email-robert.richter@amd.com> Signed-off-by: Ingo Molnar --- arch/x86/kernel/cpu/perf_counter.c | 16 +++++++++++++--- 1 file changed, 13 insertions(+), 3 deletions(-) diff --git a/arch/x86/kernel/cpu/perf_counter.c b/arch/x86/kernel/cpu/perf_counter.c index 808a1a11346..9d90de0bd0b 100644 --- a/arch/x86/kernel/cpu/perf_counter.c +++ b/arch/x86/kernel/cpu/perf_counter.c @@ -4,6 +4,7 @@ * Copyright(C) 2008 Thomas Gleixner * Copyright(C) 2008 Red Hat, Inc., Ingo Molnar * Copyright(C) 2009 Jaswinder Singh Rajput + * Copyright(C) 2009 Advanced Micro Devices, Inc., Robert Richter * * For licencing details see kernel-base/COPYING */ @@ -47,6 +48,7 @@ struct cpu_hw_counters { * struct x86_pmu - generic x86 pmu */ struct x86_pmu { + int (*handle_irq)(struct pt_regs *, int); u64 (*save_disable_all)(void); void (*restore_all)(u64); u64 (*get_status)(u64); @@ -241,6 +243,10 @@ static int __hw_perf_counter_init(struct perf_counter *counter) struct hw_perf_counter *hwc = &counter->hw; int err; + /* disable temporarily */ + if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) + return -ENOSYS; + if (unlikely(!perf_counters_initialized)) return -EINVAL; @@ -780,7 +786,7 @@ static void perf_save_and_restart(struct perf_counter *counter) * This handler is triggered by the local APIC, so the APIC IRQ handling * rules apply: */ -static int __smp_perf_counter_interrupt(struct pt_regs *regs, int nmi) +static int intel_pmu_handle_irq(struct pt_regs *regs, int nmi) { int bit, cpu = smp_processor_id(); u64 ack, status; @@ -827,6 +833,8 @@ out: return ret; } +static int amd_pmu_handle_irq(struct pt_regs *regs, int nmi) { return 0; } + void perf_counter_unthrottle(void) { struct cpu_hw_counters *cpuc; @@ -851,7 +859,7 @@ void smp_perf_counter_interrupt(struct pt_regs *regs) irq_enter(); apic_write(APIC_LVTPC, LOCAL_PERF_VECTOR); ack_APIC_irq(); - __smp_perf_counter_interrupt(regs, 0); + x86_pmu->handle_irq(regs, 0); irq_exit(); } @@ -908,7 +916,7 @@ perf_counter_nmi_handler(struct notifier_block *self, regs = args->regs; apic_write(APIC_LVTPC, APIC_DM_NMI); - ret = __smp_perf_counter_interrupt(regs, 1); + ret = x86_pmu->handle_irq(regs, 1); return ret ? NOTIFY_STOP : NOTIFY_OK; } @@ -920,6 +928,7 @@ static __read_mostly struct notifier_block perf_counter_nmi_notifier = { }; static struct x86_pmu intel_pmu = { + .handle_irq = intel_pmu_handle_irq, .save_disable_all = intel_pmu_save_disable_all, .restore_all = intel_pmu_restore_all, .get_status = intel_pmu_get_status, @@ -934,6 +943,7 @@ static struct x86_pmu intel_pmu = { }; static struct x86_pmu amd_pmu = { + .handle_irq = amd_pmu_handle_irq, .save_disable_all = amd_pmu_save_disable_all, .restore_all = amd_pmu_restore_all, .get_status = amd_pmu_get_status, -- cgit v1.2.3 From b7f8859a8ed1937e2139c17b84878f1d413fa659 Mon Sep 17 00:00:00 2001 From: Robert Richter Date: Wed, 29 Apr 2009 12:47:06 +0200 Subject: perf_counter, x86: remove get_status() from struct x86_pmu This function is Intel only and not necessary for AMD cpus. [ Impact: simplify code ] Signed-off-by: Robert Richter Cc: Paul Mackerras Acked-by: Peter Zijlstra LKML-Reference: <1241002046-8832-10-git-send-email-robert.richter@amd.com> Signed-off-by: Ingo Molnar --- arch/x86/kernel/cpu/perf_counter.c | 39 +++++--------------------------------- 1 file changed, 5 insertions(+), 34 deletions(-) diff --git a/arch/x86/kernel/cpu/perf_counter.c b/arch/x86/kernel/cpu/perf_counter.c index 9d90de0bd0b..d0bb02919c6 100644 --- a/arch/x86/kernel/cpu/perf_counter.c +++ b/arch/x86/kernel/cpu/perf_counter.c @@ -51,7 +51,6 @@ struct x86_pmu { int (*handle_irq)(struct pt_regs *, int); u64 (*save_disable_all)(void); void (*restore_all)(u64); - u64 (*get_status)(u64); void (*ack_status)(u64); void (*enable)(int, u64); void (*disable)(int, u64); @@ -405,41 +404,15 @@ void hw_perf_restore(u64 ctrl) */ EXPORT_SYMBOL_GPL(hw_perf_restore); -static u64 intel_pmu_get_status(u64 mask) +static inline u64 intel_pmu_get_status(u64 mask) { u64 status; - rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, status); - - return status; -} - -static u64 amd_pmu_get_status(u64 mask) -{ - u64 status = 0; - int idx; - - for (idx = 0; idx < nr_counters_generic; idx++) { - s64 val; - - if (!(mask & (1 << idx))) - continue; - - rdmsrl(MSR_K7_PERFCTR0 + idx, val); - val <<= (64 - counter_value_bits); - if (val >= 0) - status |= (1 << idx); - } - - return status; -} - -static u64 hw_perf_get_status(u64 mask) -{ if (unlikely(!perf_counters_initialized)) return 0; + rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, status); - return x86_pmu->get_status(mask); + return status; } static void intel_pmu_ack_status(u64 ack) @@ -795,7 +768,7 @@ static int intel_pmu_handle_irq(struct pt_regs *regs, int nmi) cpuc->throttle_ctrl = hw_perf_save_disable(); - status = hw_perf_get_status(cpuc->throttle_ctrl); + status = intel_pmu_get_status(cpuc->throttle_ctrl); if (!status) goto out; @@ -820,7 +793,7 @@ again: /* * Repeat if there is more work to be done: */ - status = hw_perf_get_status(cpuc->throttle_ctrl); + status = intel_pmu_get_status(cpuc->throttle_ctrl); if (status) goto again; out: @@ -931,7 +904,6 @@ static struct x86_pmu intel_pmu = { .handle_irq = intel_pmu_handle_irq, .save_disable_all = intel_pmu_save_disable_all, .restore_all = intel_pmu_restore_all, - .get_status = intel_pmu_get_status, .ack_status = intel_pmu_ack_status, .enable = intel_pmu_enable_counter, .disable = intel_pmu_disable_counter, @@ -946,7 +918,6 @@ static struct x86_pmu amd_pmu = { .handle_irq = amd_pmu_handle_irq, .save_disable_all = amd_pmu_save_disable_all, .restore_all = amd_pmu_restore_all, - .get_status = amd_pmu_get_status, .ack_status = amd_pmu_ack_status, .enable = amd_pmu_enable_counter, .disable = amd_pmu_disable_counter, -- cgit v1.2.3 From dee5d9067ca78b317538fd67930be4e09a83dbc5 Mon Sep 17 00:00:00 2001 From: Robert Richter Date: Wed, 29 Apr 2009 12:47:07 +0200 Subject: perf_counter, x86: remove ack_status() from struct x86_pmu This function is Intel only and not necessary for AMD cpus. [ Impact: simplify code ] Signed-off-by: Robert Richter Cc: Paul Mackerras Acked-by: Peter Zijlstra LKML-Reference: <1241002046-8832-11-git-send-email-robert.richter@amd.com> Signed-off-by: Ingo Molnar --- arch/x86/kernel/cpu/perf_counter.c | 19 ++----------------- 1 file changed, 2 insertions(+), 17 deletions(-) diff --git a/arch/x86/kernel/cpu/perf_counter.c b/arch/x86/kernel/cpu/perf_counter.c index d0bb02919c6..6bbdc16cc69 100644 --- a/arch/x86/kernel/cpu/perf_counter.c +++ b/arch/x86/kernel/cpu/perf_counter.c @@ -51,7 +51,6 @@ struct x86_pmu { int (*handle_irq)(struct pt_regs *, int); u64 (*save_disable_all)(void); void (*restore_all)(u64); - void (*ack_status)(u64); void (*enable)(int, u64); void (*disable)(int, u64); unsigned eventsel; @@ -415,23 +414,11 @@ static inline u64 intel_pmu_get_status(u64 mask) return status; } -static void intel_pmu_ack_status(u64 ack) +static inline void intel_pmu_ack_status(u64 ack) { wrmsrl(MSR_CORE_PERF_GLOBAL_OVF_CTRL, ack); } -static void amd_pmu_ack_status(u64 ack) -{ -} - -static void hw_perf_ack_status(u64 ack) -{ - if (unlikely(!perf_counters_initialized)) - return; - - x86_pmu->ack_status(ack); -} - static void intel_pmu_enable_counter(int idx, u64 config) { wrmsrl(MSR_ARCH_PERFMON_EVENTSEL0 + idx, @@ -788,7 +775,7 @@ again: __x86_pmu_disable(counter, &counter->hw, bit); } - hw_perf_ack_status(ack); + intel_pmu_ack_status(ack); /* * Repeat if there is more work to be done: @@ -904,7 +891,6 @@ static struct x86_pmu intel_pmu = { .handle_irq = intel_pmu_handle_irq, .save_disable_all = intel_pmu_save_disable_all, .restore_all = intel_pmu_restore_all, - .ack_status = intel_pmu_ack_status, .enable = intel_pmu_enable_counter, .disable = intel_pmu_disable_counter, .eventsel = MSR_ARCH_PERFMON_EVENTSEL0, @@ -918,7 +904,6 @@ static struct x86_pmu amd_pmu = { .handle_irq = amd_pmu_handle_irq, .save_disable_all = amd_pmu_save_disable_all, .restore_all = amd_pmu_restore_all, - .ack_status = amd_pmu_ack_status, .enable = amd_pmu_enable_counter, .disable = amd_pmu_disable_counter, .eventsel = MSR_K7_EVNTSEL0, -- cgit v1.2.3 From 26816c287e13eedc67bc4ed0cd40c138314b7c7d Mon Sep 17 00:00:00 2001 From: Robert Richter Date: Wed, 29 Apr 2009 12:47:08 +0200 Subject: perf_counter, x86: rename __hw_perf_counter_set_period into x86_perf_counter_set_period [ Impact: cleanup ] Signed-off-by: Robert Richter Cc: Paul Mackerras Acked-by: Peter Zijlstra LKML-Reference: <1241002046-8832-12-git-send-email-robert.richter@amd.com> Signed-off-by: Ingo Molnar --- arch/x86/kernel/cpu/perf_counter.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/arch/x86/kernel/cpu/perf_counter.c b/arch/x86/kernel/cpu/perf_counter.c index 6bbdc16cc69..fa6541d781b 100644 --- a/arch/x86/kernel/cpu/perf_counter.c +++ b/arch/x86/kernel/cpu/perf_counter.c @@ -498,7 +498,7 @@ static DEFINE_PER_CPU(u64, prev_left[X86_PMC_IDX_MAX]); * To be called with the counter disabled in hw: */ static void -__hw_perf_counter_set_period(struct perf_counter *counter, +x86_perf_counter_set_period(struct perf_counter *counter, struct hw_perf_counter *hwc, int idx) { s64 left = atomic64_read(&hwc->period_left); @@ -642,7 +642,7 @@ try_generic: */ barrier(); - __hw_perf_counter_set_period(counter, hwc, idx); + x86_perf_counter_set_period(counter, hwc, idx); __x86_pmu_enable(counter, hwc, idx); return 0; @@ -731,7 +731,7 @@ static void perf_save_and_restart(struct perf_counter *counter) int idx = hwc->idx; x86_perf_counter_update(counter, hwc, idx); - __hw_perf_counter_set_period(counter, hwc, idx); + x86_perf_counter_set_period(counter, hwc, idx); if (counter->state == PERF_COUNTER_STATE_ACTIVE) __x86_pmu_enable(counter, hwc, idx); -- cgit v1.2.3 From 55de0f2e57994b525324bf0d04d242d9358a2417 Mon Sep 17 00:00:00 2001 From: Robert Richter Date: Wed, 29 Apr 2009 12:47:09 +0200 Subject: perf_counter, x86: rename intel only functions [ Impact: cleanup ] Signed-off-by: Robert Richter Cc: Paul Mackerras Acked-by: Peter Zijlstra LKML-Reference: <1241002046-8832-13-git-send-email-robert.richter@amd.com> Signed-off-by: Ingo Molnar --- arch/x86/kernel/cpu/perf_counter.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/arch/x86/kernel/cpu/perf_counter.c b/arch/x86/kernel/cpu/perf_counter.c index fa6541d781b..5a52d73ccfa 100644 --- a/arch/x86/kernel/cpu/perf_counter.c +++ b/arch/x86/kernel/cpu/perf_counter.c @@ -725,7 +725,7 @@ static void x86_pmu_disable(struct perf_counter *counter) * Save and restart an expired counter. Called by NMI contexts, * so it has to be careful about preempting normal counter ops: */ -static void perf_save_and_restart(struct perf_counter *counter) +static void intel_pmu_save_and_restart(struct perf_counter *counter) { struct hw_perf_counter *hwc = &counter->hw; int idx = hwc->idx; @@ -753,7 +753,7 @@ static int intel_pmu_handle_irq(struct pt_regs *regs, int nmi) struct cpu_hw_counters *cpuc = &per_cpu(cpu_hw_counters, cpu); int ret = 0; - cpuc->throttle_ctrl = hw_perf_save_disable(); + cpuc->throttle_ctrl = intel_pmu_save_disable_all(); status = intel_pmu_get_status(cpuc->throttle_ctrl); if (!status) @@ -770,7 +770,7 @@ again: if (!counter) continue; - perf_save_and_restart(counter); + intel_pmu_save_and_restart(counter); if (perf_counter_overflow(counter, nmi, regs, 0)) __x86_pmu_disable(counter, &counter->hw, bit); } @@ -788,7 +788,7 @@ out: * Restore - do not reenable when global enable is off or throttled: */ if (++cpuc->interrupts < PERFMON_MAX_INTERRUPTS) - hw_perf_restore(cpuc->throttle_ctrl); + intel_pmu_restore_all(cpuc->throttle_ctrl); return ret; } -- cgit v1.2.3 From 72eae04d3a3075c26d39e1e685acfc8e8c29db64 Mon Sep 17 00:00:00 2001 From: Robert Richter Date: Wed, 29 Apr 2009 12:47:10 +0200 Subject: perf_counter, x86: modify initialization of struct x86_pmu This patch adds an error handler and changes initialization of struct x86_pmu. No functional changes. Needed for follow-on patches. [ Impact: cleanup ] Signed-off-by: Robert Richter Cc: Paul Mackerras Acked-by: Peter Zijlstra LKML-Reference: <1241002046-8832-14-git-send-email-robert.richter@amd.com> Signed-off-by: Ingo Molnar --- arch/x86/kernel/cpu/perf_counter.c | 27 ++++++++++++++++----------- 1 file changed, 16 insertions(+), 11 deletions(-) diff --git a/arch/x86/kernel/cpu/perf_counter.c b/arch/x86/kernel/cpu/perf_counter.c index 5a52d73ccfa..7c72a942363 100644 --- a/arch/x86/kernel/cpu/perf_counter.c +++ b/arch/x86/kernel/cpu/perf_counter.c @@ -913,7 +913,7 @@ static struct x86_pmu amd_pmu = { .max_events = ARRAY_SIZE(amd_perfmon_event_map), }; -static struct x86_pmu *intel_pmu_init(void) +static int intel_pmu_init(void) { union cpuid10_edx edx; union cpuid10_eax eax; @@ -921,7 +921,7 @@ static struct x86_pmu *intel_pmu_init(void) unsigned int ebx; if (!cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) - return NULL; + return -ENODEV; /* * Check whether the Architectural PerfMon supports @@ -929,49 +929,54 @@ static struct x86_pmu *intel_pmu_init(void) */ cpuid(10, &eax.full, &ebx, &unused, &edx.full); if (eax.split.mask_length <= ARCH_PERFMON_BRANCH_MISSES_RETIRED) - return NULL; + return -ENODEV; intel_perfmon_version = eax.split.version_id; if (intel_perfmon_version < 2) - return NULL; + return -ENODEV; pr_info("Intel Performance Monitoring support detected.\n"); pr_info("... version: %d\n", intel_perfmon_version); pr_info("... bit width: %d\n", eax.split.bit_width); pr_info("... mask length: %d\n", eax.split.mask_length); + x86_pmu = &intel_pmu; + nr_counters_generic = eax.split.num_counters; nr_counters_fixed = edx.split.num_counters_fixed; counter_value_mask = (1ULL << eax.split.bit_width) - 1; - return &intel_pmu; + return 0; } -static struct x86_pmu *amd_pmu_init(void) +static int amd_pmu_init(void) { + x86_pmu = &amd_pmu; + nr_counters_generic = 4; nr_counters_fixed = 0; counter_value_mask = 0x0000FFFFFFFFFFFFULL; counter_value_bits = 48; pr_info("AMD Performance Monitoring support detected.\n"); - - return &amd_pmu; + return 0; } void __init init_hw_perf_counters(void) { + int err; + switch (boot_cpu_data.x86_vendor) { case X86_VENDOR_INTEL: - x86_pmu = intel_pmu_init(); + err = intel_pmu_init(); break; case X86_VENDOR_AMD: - x86_pmu = amd_pmu_init(); + err = amd_pmu_init(); break; default: return; } - if (!x86_pmu) + if (err != 0) return; pr_info("... num counters: %d\n", nr_counters_generic); -- cgit v1.2.3 From 4a06bd8508f65ad1dd5cd2046b85694813fa36a2 Mon Sep 17 00:00:00 2001 From: Robert Richter Date: Wed, 29 Apr 2009 12:47:11 +0200 Subject: perf_counter, x86: make x86_pmu data a static struct Instead of using a pointer to reference to the x86 pmu we now have one single data structure that is initialized at the beginning. This saves the pointer access when using this memory. [ Impact: micro-optimization ] Signed-off-by: Robert Richter Cc: Paul Mackerras Acked-by: Peter Zijlstra LKML-Reference: <1241002046-8832-15-git-send-email-robert.richter@amd.com> Signed-off-by: Ingo Molnar --- arch/x86/kernel/cpu/perf_counter.c | 50 +++++++++++++++++++------------------- 1 file changed, 25 insertions(+), 25 deletions(-) diff --git a/arch/x86/kernel/cpu/perf_counter.c b/arch/x86/kernel/cpu/perf_counter.c index 7c72a942363..68597d76338 100644 --- a/arch/x86/kernel/cpu/perf_counter.c +++ b/arch/x86/kernel/cpu/perf_counter.c @@ -60,7 +60,7 @@ struct x86_pmu { int max_events; }; -static struct x86_pmu *x86_pmu __read_mostly; +static struct x86_pmu x86_pmu __read_mostly; static DEFINE_PER_CPU(struct cpu_hw_counters, cpu_hw_counters) = { .enabled = 1, @@ -184,12 +184,12 @@ static bool reserve_pmc_hardware(void) disable_lapic_nmi_watchdog(); for (i = 0; i < nr_counters_generic; i++) { - if (!reserve_perfctr_nmi(x86_pmu->perfctr + i)) + if (!reserve_perfctr_nmi(x86_pmu.perfctr + i)) goto perfctr_fail; } for (i = 0; i < nr_counters_generic; i++) { - if (!reserve_evntsel_nmi(x86_pmu->eventsel + i)) + if (!reserve_evntsel_nmi(x86_pmu.eventsel + i)) goto eventsel_fail; } @@ -197,13 +197,13 @@ static bool reserve_pmc_hardware(void) eventsel_fail: for (i--; i >= 0; i--) - release_evntsel_nmi(x86_pmu->eventsel + i); + release_evntsel_nmi(x86_pmu.eventsel + i); i = nr_counters_generic; perfctr_fail: for (i--; i >= 0; i--) - release_perfctr_nmi(x86_pmu->perfctr + i); + release_perfctr_nmi(x86_pmu.perfctr + i); if (nmi_watchdog == NMI_LOCAL_APIC) enable_lapic_nmi_watchdog(); @@ -216,8 +216,8 @@ static void release_pmc_hardware(void) int i; for (i = 0; i < nr_counters_generic; i++) { - release_perfctr_nmi(x86_pmu->perfctr + i); - release_evntsel_nmi(x86_pmu->eventsel + i); + release_perfctr_nmi(x86_pmu.perfctr + i); + release_evntsel_nmi(x86_pmu.eventsel + i); } if (nmi_watchdog == NMI_LOCAL_APIC) @@ -297,14 +297,14 @@ static int __hw_perf_counter_init(struct perf_counter *counter) * Raw event type provide the config in the event structure */ if (perf_event_raw(hw_event)) { - hwc->config |= x86_pmu->raw_event(perf_event_config(hw_event)); + hwc->config |= x86_pmu.raw_event(perf_event_config(hw_event)); } else { - if (perf_event_id(hw_event) >= x86_pmu->max_events) + if (perf_event_id(hw_event) >= x86_pmu.max_events) return -EINVAL; /* * The generic map: */ - hwc->config |= x86_pmu->event_map(perf_event_id(hw_event)); + hwc->config |= x86_pmu.event_map(perf_event_id(hw_event)); } counter->destroy = hw_perf_counter_destroy; @@ -356,7 +356,7 @@ u64 hw_perf_save_disable(void) if (unlikely(!perf_counters_initialized)) return 0; - return x86_pmu->save_disable_all(); + return x86_pmu.save_disable_all(); } /* * Exported because of ACPI idle @@ -396,7 +396,7 @@ void hw_perf_restore(u64 ctrl) if (unlikely(!perf_counters_initialized)) return; - x86_pmu->restore_all(ctrl); + x86_pmu.restore_all(ctrl); } /* * Exported because of ACPI idle @@ -441,7 +441,7 @@ static void hw_perf_enable(int idx, u64 config) if (unlikely(!perf_counters_initialized)) return; - x86_pmu->enable(idx, config); + x86_pmu.enable(idx, config); } static void intel_pmu_disable_counter(int idx, u64 config) @@ -463,7 +463,7 @@ static void hw_perf_disable(int idx, u64 config) if (unlikely(!perf_counters_initialized)) return; - x86_pmu->disable(idx, config); + x86_pmu.disable(idx, config); } static inline void @@ -580,11 +580,11 @@ fixed_mode_idx(struct perf_counter *counter, struct hw_perf_counter *hwc) event = hwc->config & ARCH_PERFMON_EVENT_MASK; - if (unlikely(event == x86_pmu->event_map(PERF_COUNT_INSTRUCTIONS))) + if (unlikely(event == x86_pmu.event_map(PERF_COUNT_INSTRUCTIONS))) return X86_PMC_IDX_FIXED_INSTRUCTIONS; - if (unlikely(event == x86_pmu->event_map(PERF_COUNT_CPU_CYCLES))) + if (unlikely(event == x86_pmu.event_map(PERF_COUNT_CPU_CYCLES))) return X86_PMC_IDX_FIXED_CPU_CYCLES; - if (unlikely(event == x86_pmu->event_map(PERF_COUNT_BUS_CYCLES))) + if (unlikely(event == x86_pmu.event_map(PERF_COUNT_BUS_CYCLES))) return X86_PMC_IDX_FIXED_BUS_CYCLES; return -1; @@ -628,8 +628,8 @@ try_generic: set_bit(idx, cpuc->used); hwc->idx = idx; } - hwc->config_base = x86_pmu->eventsel; - hwc->counter_base = x86_pmu->perfctr; + hwc->config_base = x86_pmu.eventsel; + hwc->counter_base = x86_pmu.perfctr; } perf_counters_lapic_init(hwc->nmi); @@ -677,8 +677,8 @@ void perf_counter_print_debug(void) pr_info("CPU#%d: used: %016llx\n", cpu, *(u64 *)cpuc->used); for (idx = 0; idx < nr_counters_generic; idx++) { - rdmsrl(x86_pmu->eventsel + idx, pmc_ctrl); - rdmsrl(x86_pmu->perfctr + idx, pmc_count); + rdmsrl(x86_pmu.eventsel + idx, pmc_ctrl); + rdmsrl(x86_pmu.perfctr + idx, pmc_count); prev_left = per_cpu(prev_left[idx], cpu); @@ -819,7 +819,7 @@ void smp_perf_counter_interrupt(struct pt_regs *regs) irq_enter(); apic_write(APIC_LVTPC, LOCAL_PERF_VECTOR); ack_APIC_irq(); - x86_pmu->handle_irq(regs, 0); + x86_pmu.handle_irq(regs, 0); irq_exit(); } @@ -876,7 +876,7 @@ perf_counter_nmi_handler(struct notifier_block *self, regs = args->regs; apic_write(APIC_LVTPC, APIC_DM_NMI); - ret = x86_pmu->handle_irq(regs, 1); + ret = x86_pmu.handle_irq(regs, 1); return ret ? NOTIFY_STOP : NOTIFY_OK; } @@ -940,7 +940,7 @@ static int intel_pmu_init(void) pr_info("... bit width: %d\n", eax.split.bit_width); pr_info("... mask length: %d\n", eax.split.mask_length); - x86_pmu = &intel_pmu; + x86_pmu = intel_pmu; nr_counters_generic = eax.split.num_counters; nr_counters_fixed = edx.split.num_counters_fixed; @@ -951,7 +951,7 @@ static int intel_pmu_init(void) static int amd_pmu_init(void) { - x86_pmu = &amd_pmu; + x86_pmu = amd_pmu; nr_counters_generic = 4; nr_counters_fixed = 0; -- cgit v1.2.3 From 0933e5c6a680ba8d8d786a6f7fa377b7ec0d1e49 Mon Sep 17 00:00:00 2001 From: Robert Richter Date: Wed, 29 Apr 2009 12:47:12 +0200 Subject: perf_counter, x86: move counter parameters to struct x86_pmu [ Impact: refactor and generalize code ] Signed-off-by: Robert Richter Cc: Paul Mackerras Acked-by: Peter Zijlstra LKML-Reference: <1241002046-8832-16-git-send-email-robert.richter@amd.com> Signed-off-by: Ingo Molnar --- arch/x86/kernel/cpu/perf_counter.c | 80 ++++++++++++++++++-------------------- 1 file changed, 37 insertions(+), 43 deletions(-) diff --git a/arch/x86/kernel/cpu/perf_counter.c b/arch/x86/kernel/cpu/perf_counter.c index 68597d76338..75dbb1f0900 100644 --- a/arch/x86/kernel/cpu/perf_counter.c +++ b/arch/x86/kernel/cpu/perf_counter.c @@ -24,16 +24,7 @@ #include static bool perf_counters_initialized __read_mostly; - -/* - * Number of (generic) HW counters: - */ -static int nr_counters_generic __read_mostly; static u64 perf_counter_mask __read_mostly; -static u64 counter_value_mask __read_mostly; -static int counter_value_bits __read_mostly; - -static int nr_counters_fixed __read_mostly; struct cpu_hw_counters { struct perf_counter *counters[X86_PMC_IDX_MAX]; @@ -58,6 +49,10 @@ struct x86_pmu { u64 (*event_map)(int); u64 (*raw_event)(u64); int max_events; + int num_counters; + int num_counters_fixed; + int counter_bits; + u64 counter_mask; }; static struct x86_pmu x86_pmu __read_mostly; @@ -183,12 +178,12 @@ static bool reserve_pmc_hardware(void) if (nmi_watchdog == NMI_LOCAL_APIC) disable_lapic_nmi_watchdog(); - for (i = 0; i < nr_counters_generic; i++) { + for (i = 0; i < x86_pmu.num_counters; i++) { if (!reserve_perfctr_nmi(x86_pmu.perfctr + i)) goto perfctr_fail; } - for (i = 0; i < nr_counters_generic; i++) { + for (i = 0; i < x86_pmu.num_counters; i++) { if (!reserve_evntsel_nmi(x86_pmu.eventsel + i)) goto eventsel_fail; } @@ -199,7 +194,7 @@ eventsel_fail: for (i--; i >= 0; i--) release_evntsel_nmi(x86_pmu.eventsel + i); - i = nr_counters_generic; + i = x86_pmu.num_counters; perfctr_fail: for (i--; i >= 0; i--) @@ -215,7 +210,7 @@ static void release_pmc_hardware(void) { int i; - for (i = 0; i < nr_counters_generic; i++) { + for (i = 0; i < x86_pmu.num_counters; i++) { release_perfctr_nmi(x86_pmu.perfctr + i); release_evntsel_nmi(x86_pmu.eventsel + i); } @@ -336,7 +331,7 @@ static u64 amd_pmu_save_disable_all(void) */ barrier(); - for (idx = 0; idx < nr_counters_generic; idx++) { + for (idx = 0; idx < x86_pmu.num_counters; idx++) { u64 val; if (!test_bit(idx, cpuc->active_mask)) @@ -378,7 +373,7 @@ static void amd_pmu_restore_all(u64 ctrl) if (!ctrl) return; - for (idx = 0; idx < nr_counters_generic; idx++) { + for (idx = 0; idx < x86_pmu.num_counters; idx++) { u64 val; if (!test_bit(idx, cpuc->active_mask)) @@ -527,7 +522,7 @@ x86_perf_counter_set_period(struct perf_counter *counter, atomic64_set(&hwc->prev_count, (u64)-left); err = checking_wrmsrl(hwc->counter_base + idx, - (u64)(-left) & counter_value_mask); + (u64)(-left) & x86_pmu.counter_mask); } static inline void @@ -621,8 +616,9 @@ static int x86_pmu_enable(struct perf_counter *counter) /* Try to get the previous generic counter again */ if (test_and_set_bit(idx, cpuc->used)) { try_generic: - idx = find_first_zero_bit(cpuc->used, nr_counters_generic); - if (idx == nr_counters_generic) + idx = find_first_zero_bit(cpuc->used, + x86_pmu.num_counters); + if (idx == x86_pmu.num_counters) return -EAGAIN; set_bit(idx, cpuc->used); @@ -654,7 +650,7 @@ void perf_counter_print_debug(void) struct cpu_hw_counters *cpuc; int cpu, idx; - if (!nr_counters_generic) + if (!x86_pmu.num_counters) return; local_irq_disable(); @@ -676,7 +672,7 @@ void perf_counter_print_debug(void) } pr_info("CPU#%d: used: %016llx\n", cpu, *(u64 *)cpuc->used); - for (idx = 0; idx < nr_counters_generic; idx++) { + for (idx = 0; idx < x86_pmu.num_counters; idx++) { rdmsrl(x86_pmu.eventsel + idx, pmc_ctrl); rdmsrl(x86_pmu.perfctr + idx, pmc_count); @@ -689,7 +685,7 @@ void perf_counter_print_debug(void) pr_info("CPU#%d: gen-PMC%d left: %016llx\n", cpu, idx, prev_left); } - for (idx = 0; idx < nr_counters_fixed; idx++) { + for (idx = 0; idx < x86_pmu.num_counters_fixed; idx++) { rdmsrl(MSR_ARCH_PERFMON_FIXED_CTR0 + idx, pmc_count); pr_info("CPU#%d: fixed-PMC%d count: %016llx\n", @@ -911,6 +907,9 @@ static struct x86_pmu amd_pmu = { .event_map = amd_pmu_event_map, .raw_event = amd_pmu_raw_event, .max_events = ARRAY_SIZE(amd_perfmon_event_map), + .num_counters = 4, + .counter_bits = 48, + .counter_mask = (1ULL << 48) - 1, }; static int intel_pmu_init(void) @@ -941,10 +940,10 @@ static int intel_pmu_init(void) pr_info("... mask length: %d\n", eax.split.mask_length); x86_pmu = intel_pmu; - - nr_counters_generic = eax.split.num_counters; - nr_counters_fixed = edx.split.num_counters_fixed; - counter_value_mask = (1ULL << eax.split.bit_width) - 1; + x86_pmu.num_counters = eax.split.num_counters; + x86_pmu.num_counters_fixed = edx.split.num_counters_fixed; + x86_pmu.counter_bits = eax.split.bit_width; + x86_pmu.counter_mask = (1ULL << eax.split.bit_width) - 1; return 0; } @@ -952,12 +951,6 @@ static int intel_pmu_init(void) static int amd_pmu_init(void) { x86_pmu = amd_pmu; - - nr_counters_generic = 4; - nr_counters_fixed = 0; - counter_value_mask = 0x0000FFFFFFFFFFFFULL; - counter_value_bits = 48; - pr_info("AMD Performance Monitoring support detected.\n"); return 0; } @@ -979,25 +972,26 @@ void __init init_hw_perf_counters(void) if (err != 0) return; - pr_info("... num counters: %d\n", nr_counters_generic); - if (nr_counters_generic > X86_PMC_MAX_GENERIC) { - nr_counters_generic = X86_PMC_MAX_GENERIC; + pr_info("... num counters: %d\n", x86_pmu.num_counters); + if (x86_pmu.num_counters > X86_PMC_MAX_GENERIC) { + x86_pmu.num_counters = X86_PMC_MAX_GENERIC; WARN(1, KERN_ERR "hw perf counters %d > max(%d), clipping!", - nr_counters_generic, X86_PMC_MAX_GENERIC); + x86_pmu.num_counters, X86_PMC_MAX_GENERIC); } - perf_counter_mask = (1 << nr_counters_generic) - 1; - perf_max_counters = nr_counters_generic; + perf_counter_mask = (1 << x86_pmu.num_counters) - 1; + perf_max_counters = x86_pmu.num_counters; - pr_info("... value mask: %016Lx\n", counter_value_mask); + pr_info("... value mask: %016Lx\n", x86_pmu.counter_mask); - if (nr_counters_fixed > X86_PMC_MAX_FIXED) { - nr_counters_fixed = X86_PMC_MAX_FIXED; + if (x86_pmu.num_counters_fixed > X86_PMC_MAX_FIXED) { + x86_pmu.num_counters_fixed = X86_PMC_MAX_FIXED; WARN(1, KERN_ERR "hw perf counters fixed %d > max(%d), clipping!", - nr_counters_fixed, X86_PMC_MAX_FIXED); + x86_pmu.num_counters_fixed, X86_PMC_MAX_FIXED); } - pr_info("... fixed counters: %d\n", nr_counters_fixed); + pr_info("... fixed counters: %d\n", x86_pmu.num_counters_fixed); - perf_counter_mask |= ((1LL << nr_counters_fixed)-1) << X86_PMC_IDX_FIXED; + perf_counter_mask |= + ((1LL << x86_pmu.num_counters_fixed)-1) << X86_PMC_IDX_FIXED; pr_info("... counter mask: %016Lx\n", perf_counter_mask); perf_counters_initialized = true; -- cgit v1.2.3 From faa28ae018ed004a22aa4a7704e04ccdde4a941e Mon Sep 17 00:00:00 2001 From: Robert Richter Date: Wed, 29 Apr 2009 12:47:13 +0200 Subject: perf_counter, x86: make pmu version generic This makes the use of the version variable generic. Also, some debug messages have been generalized. [ Impact: refactor and generalize code ] Signed-off-by: Robert Richter Cc: Paul Mackerras Acked-by: Peter Zijlstra LKML-Reference: <1241002046-8832-17-git-send-email-robert.richter@amd.com> Signed-off-by: Ingo Molnar --- arch/x86/kernel/cpu/perf_counter.c | 24 +++++++++++++----------- 1 file changed, 13 insertions(+), 11 deletions(-) diff --git a/arch/x86/kernel/cpu/perf_counter.c b/arch/x86/kernel/cpu/perf_counter.c index 75dbb1f0900..15d2c03e16f 100644 --- a/arch/x86/kernel/cpu/perf_counter.c +++ b/arch/x86/kernel/cpu/perf_counter.c @@ -39,6 +39,8 @@ struct cpu_hw_counters { * struct x86_pmu - generic x86 pmu */ struct x86_pmu { + const char *name; + int version; int (*handle_irq)(struct pt_regs *, int); u64 (*save_disable_all)(void); void (*restore_all)(u64); @@ -61,8 +63,6 @@ static DEFINE_PER_CPU(struct cpu_hw_counters, cpu_hw_counters) = { .enabled = 1, }; -static __read_mostly int intel_perfmon_version; - /* * Intel PerfMon v3. Used on Core2 and later. */ @@ -658,7 +658,7 @@ void perf_counter_print_debug(void) cpu = smp_processor_id(); cpuc = &per_cpu(cpu_hw_counters, cpu); - if (intel_perfmon_version >= 2) { + if (x86_pmu.version >= 2) { rdmsrl(MSR_CORE_PERF_GLOBAL_CTRL, ctrl); rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, status); rdmsrl(MSR_CORE_PERF_GLOBAL_OVF_CTRL, overflow); @@ -884,6 +884,7 @@ static __read_mostly struct notifier_block perf_counter_nmi_notifier = { }; static struct x86_pmu intel_pmu = { + .name = "Intel", .handle_irq = intel_pmu_handle_irq, .save_disable_all = intel_pmu_save_disable_all, .restore_all = intel_pmu_restore_all, @@ -897,6 +898,7 @@ static struct x86_pmu intel_pmu = { }; static struct x86_pmu amd_pmu = { + .name = "AMD", .handle_irq = amd_pmu_handle_irq, .save_disable_all = amd_pmu_save_disable_all, .restore_all = amd_pmu_restore_all, @@ -918,6 +920,7 @@ static int intel_pmu_init(void) union cpuid10_eax eax; unsigned int unused; unsigned int ebx; + int version; if (!cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) return -ENODEV; @@ -930,16 +933,12 @@ static int intel_pmu_init(void) if (eax.split.mask_length <= ARCH_PERFMON_BRANCH_MISSES_RETIRED) return -ENODEV; - intel_perfmon_version = eax.split.version_id; - if (intel_perfmon_version < 2) + version = eax.split.version_id; + if (version < 2) return -ENODEV; - pr_info("Intel Performance Monitoring support detected.\n"); - pr_info("... version: %d\n", intel_perfmon_version); - pr_info("... bit width: %d\n", eax.split.bit_width); - pr_info("... mask length: %d\n", eax.split.mask_length); - x86_pmu = intel_pmu; + x86_pmu.version = version; x86_pmu.num_counters = eax.split.num_counters; x86_pmu.num_counters_fixed = edx.split.num_counters_fixed; x86_pmu.counter_bits = eax.split.bit_width; @@ -951,7 +950,6 @@ static int intel_pmu_init(void) static int amd_pmu_init(void) { x86_pmu = amd_pmu; - pr_info("AMD Performance Monitoring support detected.\n"); return 0; } @@ -972,6 +970,10 @@ void __init init_hw_perf_counters(void) if (err != 0) return; + pr_info("%s Performance Monitoring support detected.\n", x86_pmu.name); + pr_info("... version: %d\n", x86_pmu.version); + pr_info("... bit width: %d\n", x86_pmu.counter_bits); + pr_info("... num counters: %d\n", x86_pmu.num_counters); if (x86_pmu.num_counters > X86_PMC_MAX_GENERIC) { x86_pmu.num_counters = X86_PMC_MAX_GENERIC; -- cgit v1.2.3 From bb775fc2d1dcd1aa6eafde37a8289ba2d80783aa Mon Sep 17 00:00:00 2001 From: Robert Richter Date: Wed, 29 Apr 2009 12:47:14 +0200 Subject: perf_counter, x86: make x86_pmu_read() static inline [ Impact: micro-optimization ] Signed-off-by: Robert Richter Cc: Paul Mackerras Acked-by: Peter Zijlstra LKML-Reference: <1241002046-8832-18-git-send-email-robert.richter@amd.com> Signed-off-by: Ingo Molnar --- arch/x86/kernel/cpu/perf_counter.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/x86/kernel/cpu/perf_counter.c b/arch/x86/kernel/cpu/perf_counter.c index 15d2c03e16f..3f3ae477a7d 100644 --- a/arch/x86/kernel/cpu/perf_counter.c +++ b/arch/x86/kernel/cpu/perf_counter.c @@ -1002,7 +1002,7 @@ void __init init_hw_perf_counters(void) register_die_notifier(&perf_counter_nmi_notifier); } -static void x86_pmu_read(struct perf_counter *counter) +static inline void x86_pmu_read(struct perf_counter *counter) { x86_perf_counter_update(counter, &counter->hw, counter->hw.idx); } -- cgit v1.2.3 From 93904966934193204ad08e951f806d5631c29eb3 Mon Sep 17 00:00:00 2001 From: Robert Richter Date: Wed, 29 Apr 2009 12:47:15 +0200 Subject: perf_counter, x86: rename cpuc->active_mask This is to have a consistent naming scheme with cpuc->used. [ Impact: cleanup ] Signed-off-by: Robert Richter Cc: Paul Mackerras Acked-by: Peter Zijlstra LKML-Reference: <1241002046-8832-19-git-send-email-robert.richter@amd.com> Signed-off-by: Ingo Molnar --- arch/x86/kernel/cpu/perf_counter.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/arch/x86/kernel/cpu/perf_counter.c b/arch/x86/kernel/cpu/perf_counter.c index 3f3ae477a7d..9ec51a662db 100644 --- a/arch/x86/kernel/cpu/perf_counter.c +++ b/arch/x86/kernel/cpu/perf_counter.c @@ -29,9 +29,9 @@ static u64 perf_counter_mask __read_mostly; struct cpu_hw_counters { struct perf_counter *counters[X86_PMC_IDX_MAX]; unsigned long used[BITS_TO_LONGS(X86_PMC_IDX_MAX)]; + unsigned long active[BITS_TO_LONGS(X86_PMC_IDX_MAX)]; unsigned long interrupts; u64 throttle_ctrl; - unsigned long active_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)]; int enabled; }; @@ -334,7 +334,7 @@ static u64 amd_pmu_save_disable_all(void) for (idx = 0; idx < x86_pmu.num_counters; idx++) { u64 val; - if (!test_bit(idx, cpuc->active_mask)) + if (!test_bit(idx, cpuc->active)) continue; rdmsrl(MSR_K7_EVNTSEL0 + idx, val); if (!(val & ARCH_PERFMON_EVENTSEL0_ENABLE)) @@ -376,7 +376,7 @@ static void amd_pmu_restore_all(u64 ctrl) for (idx = 0; idx < x86_pmu.num_counters; idx++) { u64 val; - if (!test_bit(idx, cpuc->active_mask)) + if (!test_bit(idx, cpuc->active)) continue; rdmsrl(MSR_K7_EVNTSEL0 + idx, val); if (val & ARCH_PERFMON_EVENTSEL0_ENABLE) @@ -424,7 +424,7 @@ static void amd_pmu_enable_counter(int idx, u64 config) { struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters); - set_bit(idx, cpuc->active_mask); + set_bit(idx, cpuc->active); if (cpuc->enabled) config |= ARCH_PERFMON_EVENTSEL0_ENABLE; @@ -448,7 +448,7 @@ static void amd_pmu_disable_counter(int idx, u64 config) { struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters); - clear_bit(idx, cpuc->active_mask); + clear_bit(idx, cpuc->active); wrmsrl(MSR_K7_EVNTSEL0 + idx, config); } -- cgit v1.2.3 From 095342389e2ed8deed07b3076f990260ce3c7c9f Mon Sep 17 00:00:00 2001 From: Robert Richter Date: Wed, 29 Apr 2009 12:47:16 +0200 Subject: perf_counter, x86: generic use of cpuc->active cpuc->active will now be used to indicate an enabled counter which implies also valid pointers of cpuc->counters[]. In contrast, cpuc->used only locks the counter, but it can be still uninitialized. [ Impact: refactor and generalize code ] Signed-off-by: Robert Richter Cc: Paul Mackerras Acked-by: Peter Zijlstra LKML-Reference: <1241002046-8832-20-git-send-email-robert.richter@amd.com> Signed-off-by: Ingo Molnar --- arch/x86/kernel/cpu/perf_counter.c | 20 +++++++++----------- 1 file changed, 9 insertions(+), 11 deletions(-) diff --git a/arch/x86/kernel/cpu/perf_counter.c b/arch/x86/kernel/cpu/perf_counter.c index 9ec51a662db..f7fd4a35515 100644 --- a/arch/x86/kernel/cpu/perf_counter.c +++ b/arch/x86/kernel/cpu/perf_counter.c @@ -424,7 +424,6 @@ static void amd_pmu_enable_counter(int idx, u64 config) { struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters); - set_bit(idx, cpuc->active); if (cpuc->enabled) config |= ARCH_PERFMON_EVENTSEL0_ENABLE; @@ -446,9 +445,6 @@ static void intel_pmu_disable_counter(int idx, u64 config) static void amd_pmu_disable_counter(int idx, u64 config) { - struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters); - - clear_bit(idx, cpuc->active); wrmsrl(MSR_K7_EVNTSEL0 + idx, config); } @@ -633,10 +629,7 @@ try_generic: __x86_pmu_disable(counter, hwc, idx); cpuc->counters[idx] = counter; - /* - * Make it visible before enabling the hw: - */ - barrier(); + set_bit(idx, cpuc->active); x86_perf_counter_set_period(counter, hwc, idx); __x86_pmu_enable(counter, hwc, idx); @@ -700,10 +693,13 @@ static void x86_pmu_disable(struct perf_counter *counter) struct hw_perf_counter *hwc = &counter->hw; unsigned int idx = hwc->idx; + /* + * Must be done before we disable, otherwise the nmi handler + * could reenable again: + */ + clear_bit(idx, cpuc->active); __x86_pmu_disable(counter, hwc, idx); - clear_bit(idx, cpuc->used); - cpuc->counters[idx] = NULL; /* * Make sure the cleared pointer becomes visible before we * (potentially) free the counter: @@ -715,6 +711,8 @@ static void x86_pmu_disable(struct perf_counter *counter) * that we are disabling: */ x86_perf_counter_update(counter, hwc, idx); + cpuc->counters[idx] = NULL; + clear_bit(idx, cpuc->used); } /* @@ -763,7 +761,7 @@ again: struct perf_counter *counter = cpuc->counters[bit]; clear_bit(bit, (unsigned long *) &status); - if (!counter) + if (!test_bit(bit, cpuc->active)) continue; intel_pmu_save_and_restart(counter); -- cgit v1.2.3 From 6f00cada07bb5da7f751929d3173494dcc5446cc Mon Sep 17 00:00:00 2001 From: Robert Richter Date: Wed, 29 Apr 2009 12:47:17 +0200 Subject: perf_counter, x86: consistent use of type int for counter index The type of counter index is sometimes implemented as unsigned int. This patch changes this to have a consistent usage of int. [ Impact: cleanup ] Signed-off-by: Robert Richter Cc: Paul Mackerras Acked-by: Peter Zijlstra LKML-Reference: <1241002046-8832-21-git-send-email-robert.richter@amd.com> Signed-off-by: Ingo Molnar --- arch/x86/kernel/cpu/perf_counter.c | 8 ++++---- include/linux/perf_counter.h | 2 +- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/arch/x86/kernel/cpu/perf_counter.c b/arch/x86/kernel/cpu/perf_counter.c index f7fd4a35515..d8beebeb270 100644 --- a/arch/x86/kernel/cpu/perf_counter.c +++ b/arch/x86/kernel/cpu/perf_counter.c @@ -459,7 +459,7 @@ static void hw_perf_disable(int idx, u64 config) static inline void __pmc_fixed_disable(struct perf_counter *counter, - struct hw_perf_counter *hwc, unsigned int __idx) + struct hw_perf_counter *hwc, int __idx) { int idx = __idx - X86_PMC_IDX_FIXED; u64 ctrl_val, mask; @@ -474,7 +474,7 @@ __pmc_fixed_disable(struct perf_counter *counter, static inline void __x86_pmu_disable(struct perf_counter *counter, - struct hw_perf_counter *hwc, unsigned int idx) + struct hw_perf_counter *hwc, int idx) { if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) __pmc_fixed_disable(counter, hwc, idx); @@ -523,7 +523,7 @@ x86_perf_counter_set_period(struct perf_counter *counter, static inline void __pmc_fixed_enable(struct perf_counter *counter, - struct hw_perf_counter *hwc, unsigned int __idx) + struct hw_perf_counter *hwc, int __idx) { int idx = __idx - X86_PMC_IDX_FIXED; u64 ctrl_val, bits, mask; @@ -691,7 +691,7 @@ static void x86_pmu_disable(struct perf_counter *counter) { struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters); struct hw_perf_counter *hwc = &counter->hw; - unsigned int idx = hwc->idx; + int idx = hwc->idx; /* * Must be done before we disable, otherwise the nmi handler diff --git a/include/linux/perf_counter.h b/include/linux/perf_counter.h index c3db52dc876..41aed427005 100644 --- a/include/linux/perf_counter.h +++ b/include/linux/perf_counter.h @@ -318,7 +318,7 @@ struct hw_perf_counter { unsigned long config_base; unsigned long counter_base; int nmi; - unsigned int idx; + int idx; }; union { /* software */ atomic64_t count; -- cgit v1.2.3 From 7c90cc45f89af4dd4617f97d452740ad95b800d5 Mon Sep 17 00:00:00 2001 From: Robert Richter Date: Wed, 29 Apr 2009 12:47:18 +0200 Subject: perf_counter, x86: rework counter enable functions There is vendor specific code in generic x86 code, and there is vendor specific code that could be generic. This patch introduces x86_pmu_enable_counter() for x86 generic code. Fixed counter code for Intel is moved to Intel only functions. In the end, checks and calls via function pointers were reduced to the necessary. Also, the internal function i/f changed. [ Impact: refactor and generalize code ] Signed-off-by: Robert Richter Cc: Paul Mackerras Acked-by: Peter Zijlstra LKML-Reference: <1241002046-8832-22-git-send-email-robert.richter@amd.com> Signed-off-by: Ingo Molnar --- arch/x86/kernel/cpu/perf_counter.c | 52 ++++++++++++++++++-------------------- 1 file changed, 24 insertions(+), 28 deletions(-) diff --git a/arch/x86/kernel/cpu/perf_counter.c b/arch/x86/kernel/cpu/perf_counter.c index d8beebeb270..ae55933ce79 100644 --- a/arch/x86/kernel/cpu/perf_counter.c +++ b/arch/x86/kernel/cpu/perf_counter.c @@ -44,7 +44,7 @@ struct x86_pmu { int (*handle_irq)(struct pt_regs *, int); u64 (*save_disable_all)(void); void (*restore_all)(u64); - void (*enable)(int, u64); + void (*enable)(struct hw_perf_counter *, int); void (*disable)(int, u64); unsigned eventsel; unsigned perfctr; @@ -414,28 +414,15 @@ static inline void intel_pmu_ack_status(u64 ack) wrmsrl(MSR_CORE_PERF_GLOBAL_OVF_CTRL, ack); } -static void intel_pmu_enable_counter(int idx, u64 config) +static inline void x86_pmu_enable_counter(struct hw_perf_counter *hwc, int idx) { - wrmsrl(MSR_ARCH_PERFMON_EVENTSEL0 + idx, - config | ARCH_PERFMON_EVENTSEL0_ENABLE); -} - -static void amd_pmu_enable_counter(int idx, u64 config) -{ - struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters); - - if (cpuc->enabled) - config |= ARCH_PERFMON_EVENTSEL0_ENABLE; - - wrmsrl(MSR_K7_EVNTSEL0 + idx, config); -} + int err; -static void hw_perf_enable(int idx, u64 config) -{ if (unlikely(!perf_counters_initialized)) return; - x86_pmu.enable(idx, config); + err = checking_wrmsrl(hwc->config_base + idx, + hwc->config | ARCH_PERFMON_EVENTSEL0_ENABLE); } static void intel_pmu_disable_counter(int idx, u64 config) @@ -522,8 +509,7 @@ x86_perf_counter_set_period(struct perf_counter *counter, } static inline void -__pmc_fixed_enable(struct perf_counter *counter, - struct hw_perf_counter *hwc, int __idx) +intel_pmu_enable_fixed(struct hw_perf_counter *hwc, int __idx) { int idx = __idx - X86_PMC_IDX_FIXED; u64 ctrl_val, bits, mask; @@ -548,14 +534,24 @@ __pmc_fixed_enable(struct perf_counter *counter, err = checking_wrmsrl(hwc->config_base, ctrl_val); } -static void -__x86_pmu_enable(struct perf_counter *counter, - struct hw_perf_counter *hwc, int idx) +static void intel_pmu_enable_counter(struct hw_perf_counter *hwc, int idx) { - if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) - __pmc_fixed_enable(counter, hwc, idx); + if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) { + intel_pmu_enable_fixed(hwc, idx); + return; + } + + x86_pmu_enable_counter(hwc, idx); +} + +static void amd_pmu_enable_counter(struct hw_perf_counter *hwc, int idx) +{ + struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters); + + if (cpuc->enabled) + x86_pmu_enable_counter(hwc, idx); else - hw_perf_enable(idx, hwc->config); + amd_pmu_disable_counter(idx, hwc->config); } static int @@ -632,7 +628,7 @@ try_generic: set_bit(idx, cpuc->active); x86_perf_counter_set_period(counter, hwc, idx); - __x86_pmu_enable(counter, hwc, idx); + x86_pmu.enable(hwc, idx); return 0; } @@ -728,7 +724,7 @@ static void intel_pmu_save_and_restart(struct perf_counter *counter) x86_perf_counter_set_period(counter, hwc, idx); if (counter->state == PERF_COUNTER_STATE_ACTIVE) - __x86_pmu_enable(counter, hwc, idx); + intel_pmu_enable_counter(hwc, idx); } /* -- cgit v1.2.3 From d43698918bd46c71d494555fb92195fbea1fcb6c Mon Sep 17 00:00:00 2001 From: Robert Richter Date: Wed, 29 Apr 2009 12:47:19 +0200 Subject: perf_counter, x86: rework counter disable functions As for the enable function, this patch reworks the disable functions and introduces x86_pmu_disable_counter(). The internal function i/f in struct x86_pmu changed too. [ Impact: refactor and generalize code ] Signed-off-by: Robert Richter Cc: Paul Mackerras Acked-by: Peter Zijlstra LKML-Reference: <1241002046-8832-23-git-send-email-robert.richter@amd.com> Signed-off-by: Ingo Molnar --- arch/x86/kernel/cpu/perf_counter.c | 48 ++++++++++++++++++-------------------- 1 file changed, 23 insertions(+), 25 deletions(-) diff --git a/arch/x86/kernel/cpu/perf_counter.c b/arch/x86/kernel/cpu/perf_counter.c index ae55933ce79..df9012bbd21 100644 --- a/arch/x86/kernel/cpu/perf_counter.c +++ b/arch/x86/kernel/cpu/perf_counter.c @@ -45,7 +45,7 @@ struct x86_pmu { u64 (*save_disable_all)(void); void (*restore_all)(u64); void (*enable)(struct hw_perf_counter *, int); - void (*disable)(int, u64); + void (*disable)(struct hw_perf_counter *, int); unsigned eventsel; unsigned perfctr; u64 (*event_map)(int); @@ -425,28 +425,19 @@ static inline void x86_pmu_enable_counter(struct hw_perf_counter *hwc, int idx) hwc->config | ARCH_PERFMON_EVENTSEL0_ENABLE); } -static void intel_pmu_disable_counter(int idx, u64 config) +static inline void x86_pmu_disable_counter(struct hw_perf_counter *hwc, int idx) { - wrmsrl(MSR_ARCH_PERFMON_EVENTSEL0 + idx, config); -} - -static void amd_pmu_disable_counter(int idx, u64 config) -{ - wrmsrl(MSR_K7_EVNTSEL0 + idx, config); - -} + int err; -static void hw_perf_disable(int idx, u64 config) -{ if (unlikely(!perf_counters_initialized)) return; - x86_pmu.disable(idx, config); + err = checking_wrmsrl(hwc->config_base + idx, + hwc->config); } static inline void -__pmc_fixed_disable(struct perf_counter *counter, - struct hw_perf_counter *hwc, int __idx) +intel_pmu_disable_fixed(struct hw_perf_counter *hwc, int __idx) { int idx = __idx - X86_PMC_IDX_FIXED; u64 ctrl_val, mask; @@ -460,13 +451,20 @@ __pmc_fixed_disable(struct perf_counter *counter, } static inline void -__x86_pmu_disable(struct perf_counter *counter, - struct hw_perf_counter *hwc, int idx) +intel_pmu_disable_counter(struct hw_perf_counter *hwc, int idx) { - if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) - __pmc_fixed_disable(counter, hwc, idx); - else - hw_perf_disable(idx, hwc->config); + if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) { + intel_pmu_disable_fixed(hwc, idx); + return; + } + + x86_pmu_disable_counter(hwc, idx); +} + +static inline void +amd_pmu_disable_counter(struct hw_perf_counter *hwc, int idx) +{ + x86_pmu_disable_counter(hwc, idx); } static DEFINE_PER_CPU(u64, prev_left[X86_PMC_IDX_MAX]); @@ -551,7 +549,7 @@ static void amd_pmu_enable_counter(struct hw_perf_counter *hwc, int idx) if (cpuc->enabled) x86_pmu_enable_counter(hwc, idx); else - amd_pmu_disable_counter(idx, hwc->config); + x86_pmu_disable_counter(hwc, idx); } static int @@ -622,7 +620,7 @@ try_generic: perf_counters_lapic_init(hwc->nmi); - __x86_pmu_disable(counter, hwc, idx); + x86_pmu.disable(hwc, idx); cpuc->counters[idx] = counter; set_bit(idx, cpuc->active); @@ -694,7 +692,7 @@ static void x86_pmu_disable(struct perf_counter *counter) * could reenable again: */ clear_bit(idx, cpuc->active); - __x86_pmu_disable(counter, hwc, idx); + x86_pmu.disable(hwc, idx); /* * Make sure the cleared pointer becomes visible before we @@ -762,7 +760,7 @@ again: intel_pmu_save_and_restart(counter); if (perf_counter_overflow(counter, nmi, regs, 0)) - __x86_pmu_disable(counter, &counter->hw, bit); + intel_pmu_disable_counter(&counter->hw, bit); } intel_pmu_ack_status(ack); -- cgit v1.2.3 From 85cf9dba92152bb4edec118b2f4f0be1ae7fdcab Mon Sep 17 00:00:00 2001 From: Robert Richter Date: Wed, 29 Apr 2009 12:47:20 +0200 Subject: perf_counter, x86: change and remove pmu initialization checks Some functions are only called if the pmu was proper initialized. That initalization checks can be removed. The way to check initialization changed too. Now, the pointer to the interrupt handler is checked. If it exists the pmu is initialized. This also removes a static variable and uses struct x86_pmu as only data source for the check. [ Impact: simplify code ] Signed-off-by: Robert Richter Cc: Paul Mackerras Acked-by: Peter Zijlstra LKML-Reference: <1241002046-8832-24-git-send-email-robert.richter@amd.com> Signed-off-by: Ingo Molnar --- arch/x86/kernel/cpu/perf_counter.c | 34 +++++++++++++--------------------- 1 file changed, 13 insertions(+), 21 deletions(-) diff --git a/arch/x86/kernel/cpu/perf_counter.c b/arch/x86/kernel/cpu/perf_counter.c index df9012bbd21..2d3681bbb52 100644 --- a/arch/x86/kernel/cpu/perf_counter.c +++ b/arch/x86/kernel/cpu/perf_counter.c @@ -23,7 +23,6 @@ #include #include -static bool perf_counters_initialized __read_mostly; static u64 perf_counter_mask __read_mostly; struct cpu_hw_counters { @@ -227,6 +226,11 @@ static void hw_perf_counter_destroy(struct perf_counter *counter) } } +static inline int x86_pmu_initialized(void) +{ + return x86_pmu.handle_irq != NULL; +} + /* * Setup the hardware configuration for a given hw_event_type */ @@ -240,8 +244,8 @@ static int __hw_perf_counter_init(struct perf_counter *counter) if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) return -ENOSYS; - if (unlikely(!perf_counters_initialized)) - return -EINVAL; + if (!x86_pmu_initialized()) + return -ENODEV; err = 0; if (atomic_inc_not_zero(&num_counters)) { @@ -348,9 +352,8 @@ static u64 amd_pmu_save_disable_all(void) u64 hw_perf_save_disable(void) { - if (unlikely(!perf_counters_initialized)) + if (!x86_pmu_initialized()) return 0; - return x86_pmu.save_disable_all(); } /* @@ -388,9 +391,8 @@ static void amd_pmu_restore_all(u64 ctrl) void hw_perf_restore(u64 ctrl) { - if (unlikely(!perf_counters_initialized)) + if (!x86_pmu_initialized()) return; - x86_pmu.restore_all(ctrl); } /* @@ -402,8 +404,6 @@ static inline u64 intel_pmu_get_status(u64 mask) { u64 status; - if (unlikely(!perf_counters_initialized)) - return 0; rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, status); return status; @@ -417,10 +417,6 @@ static inline void intel_pmu_ack_status(u64 ack) static inline void x86_pmu_enable_counter(struct hw_perf_counter *hwc, int idx) { int err; - - if (unlikely(!perf_counters_initialized)) - return; - err = checking_wrmsrl(hwc->config_base + idx, hwc->config | ARCH_PERFMON_EVENTSEL0_ENABLE); } @@ -428,10 +424,6 @@ static inline void x86_pmu_enable_counter(struct hw_perf_counter *hwc, int idx) static inline void x86_pmu_disable_counter(struct hw_perf_counter *hwc, int idx) { int err; - - if (unlikely(!perf_counters_initialized)) - return; - err = checking_wrmsrl(hwc->config_base + idx, hwc->config); } @@ -787,10 +779,10 @@ void perf_counter_unthrottle(void) { struct cpu_hw_counters *cpuc; - if (!cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) + if (!x86_pmu_initialized()) return; - if (unlikely(!perf_counters_initialized)) + if (!cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) return; cpuc = &__get_cpu_var(cpu_hw_counters); @@ -829,8 +821,9 @@ void perf_counters_lapic_init(int nmi) { u32 apic_val; - if (!perf_counters_initialized) + if (!x86_pmu_initialized()) return; + /* * Enable the performance counter vector in the APIC LVT: */ @@ -988,7 +981,6 @@ void __init init_hw_perf_counters(void) ((1LL << x86_pmu.num_counters_fixed)-1) << X86_PMC_IDX_FIXED; pr_info("... counter mask: %016Lx\n", perf_counter_mask); - perf_counters_initialized = true; perf_counters_lapic_init(0); register_die_notifier(&perf_counter_nmi_notifier); -- cgit v1.2.3 From a29aa8a7ff93e4196d558036928597e68337dd8d Mon Sep 17 00:00:00 2001 From: Robert Richter Date: Wed, 29 Apr 2009 12:47:21 +0200 Subject: perf_counter, x86: implement the interrupt handler for AMD cpus This patch implements the interrupt handler for AMD performance counters. In difference to the Intel pmu, there is no single status register and also there are no fixed counters. This makes the handler very different and it is useful to make the handler vendor specific. To check if a counter is overflowed the upper bit of the counter is checked. Only counters where the active bit is set are checked. With this patch throttling is enabled for AMD performance counters. This patch also reenables Linux performance counters on AMD cpus. [ Impact: re-enable perfcounters on AMD CPUs ] Signed-off-by: Robert Richter Cc: Paul Mackerras Acked-by: Peter Zijlstra LKML-Reference: <1241002046-8832-25-git-send-email-robert.richter@amd.com> Signed-off-by: Ingo Molnar --- arch/x86/kernel/cpu/perf_counter.c | 45 +++++++++++++++++++++++++++++++------- 1 file changed, 37 insertions(+), 8 deletions(-) diff --git a/arch/x86/kernel/cpu/perf_counter.c b/arch/x86/kernel/cpu/perf_counter.c index 2d3681bbb52..f4d59d4cf3f 100644 --- a/arch/x86/kernel/cpu/perf_counter.c +++ b/arch/x86/kernel/cpu/perf_counter.c @@ -240,10 +240,6 @@ static int __hw_perf_counter_init(struct perf_counter *counter) struct hw_perf_counter *hwc = &counter->hw; int err; - /* disable temporarily */ - if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) - return -ENOSYS; - if (!x86_pmu_initialized()) return -ENODEV; @@ -773,7 +769,43 @@ out: return ret; } -static int amd_pmu_handle_irq(struct pt_regs *regs, int nmi) { return 0; } +static int amd_pmu_handle_irq(struct pt_regs *regs, int nmi) +{ + int cpu = smp_processor_id(); + struct cpu_hw_counters *cpuc = &per_cpu(cpu_hw_counters, cpu); + u64 val; + int handled = 0; + struct perf_counter *counter; + struct hw_perf_counter *hwc; + int idx; + + ++cpuc->interrupts; + for (idx = 0; idx < x86_pmu.num_counters; idx++) { + if (!test_bit(idx, cpuc->active)) + continue; + counter = cpuc->counters[idx]; + hwc = &counter->hw; + x86_perf_counter_update(counter, hwc, idx); + val = atomic64_read(&hwc->prev_count); + if (val & (1ULL << (x86_pmu.counter_bits - 1))) + continue; + /* counter overflow */ + x86_perf_counter_set_period(counter, hwc, idx); + handled = 1; + inc_irq_stat(apic_perf_irqs); + if (perf_counter_overflow(counter, nmi, regs, 0)) + amd_pmu_disable_counter(hwc, idx); + else if (cpuc->interrupts >= PERFMON_MAX_INTERRUPTS) + /* + * do not reenable when throttled, but reload + * the register + */ + amd_pmu_disable_counter(hwc, idx); + else if (counter->state == PERF_COUNTER_STATE_ACTIVE) + amd_pmu_enable_counter(hwc, idx); + } + return handled; +} void perf_counter_unthrottle(void) { @@ -782,9 +814,6 @@ void perf_counter_unthrottle(void) if (!x86_pmu_initialized()) return; - if (!cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) - return; - cpuc = &__get_cpu_var(cpu_hw_counters); if (cpuc->interrupts >= PERFMON_MAX_INTERRUPTS) { if (printk_ratelimit()) -- cgit v1.2.3 From 4b7bfd0d276da3a006d37e85d3cf900d7a14ae2a Mon Sep 17 00:00:00 2001 From: Robert Richter Date: Wed, 29 Apr 2009 12:47:22 +0200 Subject: perf_counter, x86: return raw count with x86_perf_counter_update() To check on AMD cpus if a counter overflows, the upper bit of the raw counter value must be checked. This value is already internally available in x86_perf_counter_update(). Now, the value is returned so that it can be used directly to check for overflows. [ Impact: micro-optimization ] Signed-off-by: Robert Richter Cc: Paul Mackerras Acked-by: Peter Zijlstra LKML-Reference: <1241002046-8832-26-git-send-email-robert.richter@amd.com> Signed-off-by: Ingo Molnar --- arch/x86/kernel/cpu/perf_counter.c | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/arch/x86/kernel/cpu/perf_counter.c b/arch/x86/kernel/cpu/perf_counter.c index f4d59d4cf3f..a8a53abd706 100644 --- a/arch/x86/kernel/cpu/perf_counter.c +++ b/arch/x86/kernel/cpu/perf_counter.c @@ -132,7 +132,7 @@ static u64 amd_pmu_raw_event(u64 event) * Can only be executed on the CPU where the counter is active. * Returns the delta events processed. */ -static void +static u64 x86_perf_counter_update(struct perf_counter *counter, struct hw_perf_counter *hwc, int idx) { @@ -165,6 +165,8 @@ again: atomic64_add(delta, &counter->count); atomic64_sub(delta, &hwc->period_left); + + return new_raw_count; } static atomic_t num_counters; @@ -785,8 +787,7 @@ static int amd_pmu_handle_irq(struct pt_regs *regs, int nmi) continue; counter = cpuc->counters[idx]; hwc = &counter->hw; - x86_perf_counter_update(counter, hwc, idx); - val = atomic64_read(&hwc->prev_count); + val = x86_perf_counter_update(counter, hwc, idx); if (val & (1ULL << (x86_pmu.counter_bits - 1))) continue; /* counter overflow */ -- cgit v1.2.3 From c619b8ffb1cec6a431687a35695dc6fd292a79e6 Mon Sep 17 00:00:00 2001 From: Robert Richter Date: Wed, 29 Apr 2009 12:47:23 +0200 Subject: perf_counter, x86: introduce max_period variable In x86 pmus the allowed counter period to programm differs. This introduces a max_period value and allows the generic implementation for all models to check the max period. [ Impact: generalize code ] Signed-off-by: Robert Richter Cc: Paul Mackerras Acked-by: Peter Zijlstra LKML-Reference: <1241002046-8832-27-git-send-email-robert.richter@amd.com> Signed-off-by: Ingo Molnar --- arch/x86/kernel/cpu/perf_counter.c | 20 ++++++++++++-------- 1 file changed, 12 insertions(+), 8 deletions(-) diff --git a/arch/x86/kernel/cpu/perf_counter.c b/arch/x86/kernel/cpu/perf_counter.c index a8a53abd706..4b8715b34f8 100644 --- a/arch/x86/kernel/cpu/perf_counter.c +++ b/arch/x86/kernel/cpu/perf_counter.c @@ -54,6 +54,7 @@ struct x86_pmu { int num_counters_fixed; int counter_bits; u64 counter_mask; + u64 max_period; }; static struct x86_pmu x86_pmu __read_mostly; @@ -279,14 +280,8 @@ static int __hw_perf_counter_init(struct perf_counter *counter) hwc->nmi = 1; hwc->irq_period = hw_event->irq_period; - /* - * Intel PMCs cannot be accessed sanely above 32 bit width, - * so we install an artificial 1<<31 period regardless of - * the generic counter period: - */ - if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) - if ((s64)hwc->irq_period <= 0 || hwc->irq_period > 0x7FFFFFFF) - hwc->irq_period = 0x7FFFFFFF; + if ((s64)hwc->irq_period <= 0 || hwc->irq_period > x86_pmu.max_period) + hwc->irq_period = x86_pmu.max_period; atomic64_set(&hwc->period_left, hwc->irq_period); @@ -910,6 +905,12 @@ static struct x86_pmu intel_pmu = { .event_map = intel_pmu_event_map, .raw_event = intel_pmu_raw_event, .max_events = ARRAY_SIZE(intel_perfmon_event_map), + /* + * Intel PMCs cannot be accessed sanely above 32 bit width, + * so we install an artificial 1<<31 period regardless of + * the generic counter period: + */ + .max_period = (1ULL << 31) - 1, }; static struct x86_pmu amd_pmu = { @@ -927,6 +928,8 @@ static struct x86_pmu amd_pmu = { .num_counters = 4, .counter_bits = 48, .counter_mask = (1ULL << 48) - 1, + /* use highest bit to detect overflow */ + .max_period = (1ULL << 47) - 1, }; static int intel_pmu_init(void) @@ -999,6 +1002,7 @@ void __init init_hw_perf_counters(void) perf_max_counters = x86_pmu.num_counters; pr_info("... value mask: %016Lx\n", x86_pmu.counter_mask); + pr_info("... max period: %016Lx\n", x86_pmu.max_period); if (x86_pmu.num_counters_fixed > X86_PMC_MAX_FIXED) { x86_pmu.num_counters_fixed = X86_PMC_MAX_FIXED; -- cgit v1.2.3 From ef7b3e09ffdcd5200aea9523f6b56d331d1c4fc0 Mon Sep 17 00:00:00 2001 From: Robert Richter Date: Wed, 29 Apr 2009 12:47:24 +0200 Subject: perf_counter, x86: remove vendor check in fixed_mode_idx() The function fixed_mode_idx() is used generically. Now it checks the num_counters_fixed value instead of the vendor to decide if fixed counters are present. [ Impact: generalize code ] Signed-off-by: Robert Richter Cc: Paul Mackerras Acked-by: Peter Zijlstra LKML-Reference: <1241002046-8832-28-git-send-email-robert.richter@amd.com> Signed-off-by: Ingo Molnar --- arch/x86/kernel/cpu/perf_counter.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/x86/kernel/cpu/perf_counter.c b/arch/x86/kernel/cpu/perf_counter.c index 4b8715b34f8..d1c8036dcbd 100644 --- a/arch/x86/kernel/cpu/perf_counter.c +++ b/arch/x86/kernel/cpu/perf_counter.c @@ -542,7 +542,7 @@ fixed_mode_idx(struct perf_counter *counter, struct hw_perf_counter *hwc) { unsigned int event; - if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) + if (!x86_pmu.num_counters_fixed) return -1; if (unlikely(hwc->nmi)) -- cgit v1.2.3 From 19d84dab55a383d75c885b5c1a618f5ead96f2f6 Mon Sep 17 00:00:00 2001 From: Robert Richter Date: Wed, 29 Apr 2009 12:47:25 +0200 Subject: perf_counter, x86: remove unused function argument in intel_pmu_get_status() The mask argument is unused and thus can be removed. [ Impact: cleanup ] Signed-off-by: Robert Richter Cc: Paul Mackerras Acked-by: Peter Zijlstra LKML-Reference: <1241002046-8832-29-git-send-email-robert.richter@amd.com> Signed-off-by: Ingo Molnar --- arch/x86/kernel/cpu/perf_counter.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/arch/x86/kernel/cpu/perf_counter.c b/arch/x86/kernel/cpu/perf_counter.c index d1c8036dcbd..856b0b85219 100644 --- a/arch/x86/kernel/cpu/perf_counter.c +++ b/arch/x86/kernel/cpu/perf_counter.c @@ -393,7 +393,7 @@ void hw_perf_restore(u64 ctrl) */ EXPORT_SYMBOL_GPL(hw_perf_restore); -static inline u64 intel_pmu_get_status(u64 mask) +static inline u64 intel_pmu_get_status(void) { u64 status; @@ -728,7 +728,7 @@ static int intel_pmu_handle_irq(struct pt_regs *regs, int nmi) cpuc->throttle_ctrl = intel_pmu_save_disable_all(); - status = intel_pmu_get_status(cpuc->throttle_ctrl); + status = intel_pmu_get_status(); if (!status) goto out; @@ -753,7 +753,7 @@ again: /* * Repeat if there is more work to be done: */ - status = intel_pmu_get_status(cpuc->throttle_ctrl); + status = intel_pmu_get_status(); if (status) goto again; out: -- cgit v1.2.3 From 38105f0234d4795c77c7c6845916caf3a395c451 Mon Sep 17 00:00:00 2001 From: Robert Richter Date: Wed, 29 Apr 2009 12:47:26 +0200 Subject: perf_counter: update 'perf top' documentation The documentation about the perf-top build was outdated after perfstat has been implemented. This updates it. [ Impact: update documentation ] Signed-off-by: Robert Richter Cc: Paul Mackerras Acked-by: Peter Zijlstra LKML-Reference: <1241002046-8832-30-git-send-email-robert.richter@amd.com> Signed-off-by: Ingo Molnar --- Documentation/perf_counter/builtin-top.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Documentation/perf_counter/builtin-top.c b/Documentation/perf_counter/builtin-top.c index 6a276d2b2bb..8d28864a20c 100644 --- a/Documentation/perf_counter/builtin-top.c +++ b/Documentation/perf_counter/builtin-top.c @@ -3,7 +3,7 @@ Build with: - cc -O6 -Wall -c -o kerneltop.o kerneltop.c -lrt + make -C Documentation/perf_counter/ Sample output: -- cgit v1.2.3 From 98144511427c192e4249ff66a3f9debc55c59411 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Wed, 29 Apr 2009 14:52:50 +0200 Subject: perf_counter: add/update copyrights Acked-by: Peter Zijlstra Signed-off-by: Ingo Molnar --- arch/x86/kernel/cpu/perf_counter.c | 9 +++++---- kernel/perf_counter.c | 6 +++--- 2 files changed, 8 insertions(+), 7 deletions(-) diff --git a/arch/x86/kernel/cpu/perf_counter.c b/arch/x86/kernel/cpu/perf_counter.c index 856b0b85219..47e563bfd4c 100644 --- a/arch/x86/kernel/cpu/perf_counter.c +++ b/arch/x86/kernel/cpu/perf_counter.c @@ -1,10 +1,11 @@ /* * Performance counter x86 architecture code * - * Copyright(C) 2008 Thomas Gleixner - * Copyright(C) 2008 Red Hat, Inc., Ingo Molnar - * Copyright(C) 2009 Jaswinder Singh Rajput - * Copyright(C) 2009 Advanced Micro Devices, Inc., Robert Richter + * Copyright (C) 2008 Thomas Gleixner + * Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar + * Copyright (C) 2009 Jaswinder Singh Rajput + * Copyright (C) 2009 Advanced Micro Devices, Inc., Robert Richter + * Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra * * For licencing details see kernel-base/COPYING */ diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c index 582108addef..a95a171e608 100644 --- a/kernel/perf_counter.c +++ b/kernel/perf_counter.c @@ -1,9 +1,9 @@ /* * Performance counter core code * - * Copyright(C) 2008 Thomas Gleixner - * Copyright(C) 2008 Red Hat, Inc., Ingo Molnar - * + * Copyright (C) 2008 Thomas Gleixner + * Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar + * Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra * * For licensing details see kernel-base/COPYING */ -- cgit v1.2.3 From ab7ef2e50a557af92f4f90689f51fadadafc16b2 Mon Sep 17 00:00:00 2001 From: Paul Mackerras Date: Wed, 29 Apr 2009 22:38:51 +1000 Subject: perf_counter: powerpc: allow use of limited-function counters POWER5+ and POWER6 have two hardware counters with limited functionality: PMC5 counts instructions completed in run state and PMC6 counts cycles in run state. (Run state is the state when a hardware RUN bit is 1; the idle task clears RUN while waiting for work to do and sets it when there is work to do.) These counters can't be written to by the kernel, can't generate interrupts, and don't obey the freeze conditions. That means we can only use them for per-task counters (where we know we'll always be in run state; we can't put a per-task counter on an idle task), and only if we don't want interrupts and we do want to count in all processor modes. Obviously some counters can't go on a limited hardware counter, but there are also situations where we can only put a counter on a limited hardware counter - if there are already counters on that exclude some processor modes and we want to put on a per-task cycle or instruction counter that doesn't exclude any processor mode, it could go on if it can use a limited hardware counter. To keep track of these constraints, this adds a flags argument to the processor-specific get_alternatives() functions, with three bits defined: one to say that we can accept alternative event codes that go on limited counters, one to say we only want alternatives on limited counters, and one to say that this is a per-task counter and therefore events that are gated by run state are equivalent to those that aren't (e.g. a "cycles" event is equivalent to a "cycles in run state" event). These flags are computed for each counter and stored in the counter->hw.counter_base field (slightly wonky name for what it does, but it was an existing unused field). Since the limited counters don't freeze when we freeze the other counters, we need some special handling to avoid getting skew between things counted on the limited counters and those counted on normal counters. To minimize this skew, if we are using any limited counters, we read PMC5 and PMC6 immediately after setting and clearing the freeze bit. This is done in a single asm in the new write_mmcr0() function. The code here is specific to PMC5 and PMC6 being the limited hardware counters. Being more general (e.g. having a bitmap of limited hardware counter numbers) would have meant more complex code to read the limited counters when freezing and unfreezing the normal counters, with conditional branches, which would have increased the skew. Since it isn't necessary for the code to be more general at this stage, it isn't. This also extends the back-ends for POWER5+ and POWER6 to be able to handle up to 6 counters rather than the 4 they previously handled. Signed-off-by: Paul Mackerras Acked-by: Peter Zijlstra Cc: Robert Richter LKML-Reference: <18936.19035.163066.892208@cargo.ozlabs.ibm.com> Signed-off-by: Ingo Molnar --- arch/powerpc/include/asm/perf_counter.h | 13 +- arch/powerpc/kernel/perf_counter.c | 297 ++++++++++++++++++++++++++++---- arch/powerpc/kernel/power4-pmu.c | 3 +- arch/powerpc/kernel/power5+-pmu.c | 117 +++++++++++-- arch/powerpc/kernel/power5-pmu.c | 3 +- arch/powerpc/kernel/power6-pmu.c | 119 +++++++++++-- arch/powerpc/kernel/ppc970-pmu.c | 3 +- 7 files changed, 479 insertions(+), 76 deletions(-) diff --git a/arch/powerpc/include/asm/perf_counter.h b/arch/powerpc/include/asm/perf_counter.h index 9d7ff6d7fb5..56d66c38143 100644 --- a/arch/powerpc/include/asm/perf_counter.h +++ b/arch/powerpc/include/asm/perf_counter.h @@ -12,6 +12,7 @@ #define MAX_HWCOUNTERS 8 #define MAX_EVENT_ALTERNATIVES 8 +#define MAX_LIMITED_HWCOUNTERS 2 /* * This struct provides the constants and functions needed to @@ -25,14 +26,24 @@ struct power_pmu { int (*compute_mmcr)(unsigned int events[], int n_ev, unsigned int hwc[], u64 mmcr[]); int (*get_constraint)(unsigned int event, u64 *mskp, u64 *valp); - int (*get_alternatives)(unsigned int event, unsigned int alt[]); + int (*get_alternatives)(unsigned int event, unsigned int flags, + unsigned int alt[]); void (*disable_pmc)(unsigned int pmc, u64 mmcr[]); + int (*limited_pmc_event)(unsigned int event); + int limited_pmc5_6; /* PMC5 and PMC6 have limited function */ int n_generic; int *generic_events; }; extern struct power_pmu *ppmu; +/* + * Values for flags to get_alternatives() + */ +#define PPMU_LIMITED_PMC_OK 1 /* can put this on a limited PMC */ +#define PPMU_LIMITED_PMC_REQD 2 /* have to put this on a limited PMC */ +#define PPMU_ONLY_COUNT_RUN 4 /* only counting in run state */ + /* * The power_pmu.get_constraint function returns a 64-bit value and * a 64-bit mask that express the constraints between this event and diff --git a/arch/powerpc/kernel/perf_counter.c b/arch/powerpc/kernel/perf_counter.c index d9bbe5efc64..15cdc8e6722 100644 --- a/arch/powerpc/kernel/perf_counter.c +++ b/arch/powerpc/kernel/perf_counter.c @@ -23,10 +23,14 @@ struct cpu_hw_counters { int n_percpu; int disabled; int n_added; + int n_limited; + u8 pmcs_enabled; struct perf_counter *counter[MAX_HWCOUNTERS]; unsigned int events[MAX_HWCOUNTERS]; + unsigned int flags[MAX_HWCOUNTERS]; u64 mmcr[3]; - u8 pmcs_enabled; + struct perf_counter *limited_counter[MAX_LIMITED_HWCOUNTERS]; + u8 limited_hwidx[MAX_LIMITED_HWCOUNTERS]; }; DEFINE_PER_CPU(struct cpu_hw_counters, cpu_hw_counters); @@ -127,7 +131,8 @@ static void write_pmc(int idx, unsigned long val) * and see if any combination of alternative codes is feasible. * The feasible set is returned in event[]. */ -static int power_check_constraints(unsigned int event[], int n_ev) +static int power_check_constraints(unsigned int event[], unsigned int cflags[], + int n_ev) { u64 mask, value, nv; unsigned int alternatives[MAX_HWCOUNTERS][MAX_EVENT_ALTERNATIVES]; @@ -144,11 +149,15 @@ static int power_check_constraints(unsigned int event[], int n_ev) /* First see if the events will go on as-is */ for (i = 0; i < n_ev; ++i) { - alternatives[i][0] = event[i]; + if ((cflags[i] & PPMU_LIMITED_PMC_REQD) + && !ppmu->limited_pmc_event(event[i])) { + ppmu->get_alternatives(event[i], cflags[i], + alternatives[i]); + event[i] = alternatives[i][0]; + } if (ppmu->get_constraint(event[i], &amasks[i][0], &avalues[i][0])) return -1; - choice[i] = 0; } value = mask = 0; for (i = 0; i < n_ev; ++i) { @@ -166,7 +175,9 @@ static int power_check_constraints(unsigned int event[], int n_ev) if (!ppmu->get_alternatives) return -1; for (i = 0; i < n_ev; ++i) { - n_alt[i] = ppmu->get_alternatives(event[i], alternatives[i]); + choice[i] = 0; + n_alt[i] = ppmu->get_alternatives(event[i], cflags[i], + alternatives[i]); for (j = 1; j < n_alt[i]; ++j) ppmu->get_constraint(alternatives[i][j], &amasks[i][j], &avalues[i][j]); @@ -231,28 +242,41 @@ static int power_check_constraints(unsigned int event[], int n_ev) * exclude_{user,kernel,hv} with each other and any previously * added counters. */ -static int check_excludes(struct perf_counter **ctrs, int n_prev, int n_new) +static int check_excludes(struct perf_counter **ctrs, unsigned int cflags[], + int n_prev, int n_new) { - int eu, ek, eh; - int i, n; + int eu = 0, ek = 0, eh = 0; + int i, n, first; struct perf_counter *counter; n = n_prev + n_new; if (n <= 1) return 0; - eu = ctrs[0]->hw_event.exclude_user; - ek = ctrs[0]->hw_event.exclude_kernel; - eh = ctrs[0]->hw_event.exclude_hv; - if (n_prev == 0) - n_prev = 1; - for (i = n_prev; i < n; ++i) { + first = 1; + for (i = 0; i < n; ++i) { + if (cflags[i] & PPMU_LIMITED_PMC_OK) { + cflags[i] &= ~PPMU_LIMITED_PMC_REQD; + continue; + } counter = ctrs[i]; - if (counter->hw_event.exclude_user != eu || - counter->hw_event.exclude_kernel != ek || - counter->hw_event.exclude_hv != eh) + if (first) { + eu = counter->hw_event.exclude_user; + ek = counter->hw_event.exclude_kernel; + eh = counter->hw_event.exclude_hv; + first = 0; + } else if (counter->hw_event.exclude_user != eu || + counter->hw_event.exclude_kernel != ek || + counter->hw_event.exclude_hv != eh) { return -EAGAIN; + } } + + if (eu || ek || eh) + for (i = 0; i < n; ++i) + if (cflags[i] & PPMU_LIMITED_PMC_OK) + cflags[i] |= PPMU_LIMITED_PMC_REQD; + return 0; } @@ -279,6 +303,85 @@ static void power_pmu_read(struct perf_counter *counter) atomic64_sub(delta, &counter->hw.period_left); } +/* + * On some machines, PMC5 and PMC6 can't be written, don't respect + * the freeze conditions, and don't generate interrupts. This tells + * us if `counter' is using such a PMC. + */ +static int is_limited_pmc(int pmcnum) +{ + return ppmu->limited_pmc5_6 && (pmcnum == 5 || pmcnum == 6); +} + +static void freeze_limited_counters(struct cpu_hw_counters *cpuhw, + unsigned long pmc5, unsigned long pmc6) +{ + struct perf_counter *counter; + u64 val, prev, delta; + int i; + + for (i = 0; i < cpuhw->n_limited; ++i) { + counter = cpuhw->limited_counter[i]; + if (!counter->hw.idx) + continue; + val = (counter->hw.idx == 5) ? pmc5 : pmc6; + prev = atomic64_read(&counter->hw.prev_count); + counter->hw.idx = 0; + delta = (val - prev) & 0xfffffffful; + atomic64_add(delta, &counter->count); + } +} + +static void thaw_limited_counters(struct cpu_hw_counters *cpuhw, + unsigned long pmc5, unsigned long pmc6) +{ + struct perf_counter *counter; + u64 val; + int i; + + for (i = 0; i < cpuhw->n_limited; ++i) { + counter = cpuhw->limited_counter[i]; + counter->hw.idx = cpuhw->limited_hwidx[i]; + val = (counter->hw.idx == 5) ? pmc5 : pmc6; + atomic64_set(&counter->hw.prev_count, val); + perf_counter_update_userpage(counter); + } +} + +/* + * Since limited counters don't respect the freeze conditions, we + * have to read them immediately after freezing or unfreezing the + * other counters. We try to keep the values from the limited + * counters as consistent as possible by keeping the delay (in + * cycles and instructions) between freezing/unfreezing and reading + * the limited counters as small and consistent as possible. + * Therefore, if any limited counters are in use, we read them + * both, and always in the same order, to minimize variability, + * and do it inside the same asm that writes MMCR0. + */ +static void write_mmcr0(struct cpu_hw_counters *cpuhw, unsigned long mmcr0) +{ + unsigned long pmc5, pmc6; + + if (!cpuhw->n_limited) { + mtspr(SPRN_MMCR0, mmcr0); + return; + } + + /* + * Write MMCR0, then read PMC5 and PMC6 immediately. + */ + asm volatile("mtspr %3,%2; mfspr %0,%4; mfspr %1,%5" + : "=&r" (pmc5), "=&r" (pmc6) + : "r" (mmcr0), "i" (SPRN_MMCR0), + "i" (SPRN_PMC5), "i" (SPRN_PMC6)); + + if (mmcr0 & MMCR0_FC) + freeze_limited_counters(cpuhw, pmc5, pmc6); + else + thaw_limited_counters(cpuhw, pmc5, pmc6); +} + /* * Disable all counters to prevent PMU interrupts and to allow * counters to be added or removed. @@ -321,7 +424,7 @@ u64 hw_perf_save_disable(void) * executed and the PMU has frozen the counters * before we return. */ - mtspr(SPRN_MMCR0, mfspr(SPRN_MMCR0) | MMCR0_FC); + write_mmcr0(cpuhw, mfspr(SPRN_MMCR0) | MMCR0_FC); mb(); } local_irq_restore(flags); @@ -342,6 +445,8 @@ void hw_perf_restore(u64 disable) unsigned long val; s64 left; unsigned int hwc_index[MAX_HWCOUNTERS]; + int n_lim; + int idx; if (disable) return; @@ -414,10 +519,18 @@ void hw_perf_restore(u64 disable) /* * Initialize the PMCs for all the new and moved counters. */ + cpuhw->n_limited = n_lim = 0; for (i = 0; i < cpuhw->n_counters; ++i) { counter = cpuhw->counter[i]; if (counter->hw.idx) continue; + idx = hwc_index[i] + 1; + if (is_limited_pmc(idx)) { + cpuhw->limited_counter[n_lim] = counter; + cpuhw->limited_hwidx[n_lim] = idx; + ++n_lim; + continue; + } val = 0; if (counter->hw_event.irq_period) { left = atomic64_read(&counter->hw.period_left); @@ -425,15 +538,16 @@ void hw_perf_restore(u64 disable) val = 0x80000000L - left; } atomic64_set(&counter->hw.prev_count, val); - counter->hw.idx = hwc_index[i] + 1; - write_pmc(counter->hw.idx, val); + counter->hw.idx = idx; + write_pmc(idx, val); perf_counter_update_userpage(counter); } + cpuhw->n_limited = n_lim; cpuhw->mmcr[0] |= MMCR0_PMXE | MMCR0_FCECE; out_enable: mb(); - mtspr(SPRN_MMCR0, cpuhw->mmcr[0]); + write_mmcr0(cpuhw, cpuhw->mmcr[0]); /* * Enable instruction sampling if necessary @@ -448,7 +562,8 @@ void hw_perf_restore(u64 disable) } static int collect_events(struct perf_counter *group, int max_count, - struct perf_counter *ctrs[], unsigned int *events) + struct perf_counter *ctrs[], unsigned int *events, + unsigned int *flags) { int n = 0; struct perf_counter *counter; @@ -457,6 +572,7 @@ static int collect_events(struct perf_counter *group, int max_count, if (n >= max_count) return -1; ctrs[n] = group; + flags[n] = group->hw.counter_base; events[n++] = group->hw.config; } list_for_each_entry(counter, &group->sibling_list, list_entry) { @@ -465,6 +581,7 @@ static int collect_events(struct perf_counter *group, int max_count, if (n >= max_count) return -1; ctrs[n] = counter; + flags[n] = counter->hw.counter_base; events[n++] = counter->hw.config; } } @@ -497,12 +614,14 @@ int hw_perf_group_sched_in(struct perf_counter *group_leader, cpuhw = &__get_cpu_var(cpu_hw_counters); n0 = cpuhw->n_counters; n = collect_events(group_leader, ppmu->n_counter - n0, - &cpuhw->counter[n0], &cpuhw->events[n0]); + &cpuhw->counter[n0], &cpuhw->events[n0], + &cpuhw->flags[n0]); if (n < 0) return -EAGAIN; - if (check_excludes(cpuhw->counter, n0, n)) + if (check_excludes(cpuhw->counter, cpuhw->flags, n0, n)) return -EAGAIN; - if (power_check_constraints(cpuhw->events, n + n0)) + i = power_check_constraints(cpuhw->events, cpuhw->flags, n + n0); + if (i < 0) return -EAGAIN; cpuhw->n_counters = n0 + n; cpuhw->n_added += n; @@ -554,9 +673,10 @@ static int power_pmu_enable(struct perf_counter *counter) goto out; cpuhw->counter[n0] = counter; cpuhw->events[n0] = counter->hw.config; - if (check_excludes(cpuhw->counter, n0, 1)) + cpuhw->flags[n0] = counter->hw.counter_base; + if (check_excludes(cpuhw->counter, cpuhw->flags, n0, 1)) goto out; - if (power_check_constraints(cpuhw->events, n0 + 1)) + if (power_check_constraints(cpuhw->events, cpuhw->flags, n0 + 1)) goto out; counter->hw.config = cpuhw->events[n0]; @@ -592,12 +712,24 @@ static void power_pmu_disable(struct perf_counter *counter) cpuhw->counter[i-1] = cpuhw->counter[i]; --cpuhw->n_counters; ppmu->disable_pmc(counter->hw.idx - 1, cpuhw->mmcr); - write_pmc(counter->hw.idx, 0); - counter->hw.idx = 0; + if (counter->hw.idx) { + write_pmc(counter->hw.idx, 0); + counter->hw.idx = 0; + } perf_counter_update_userpage(counter); break; } } + for (i = 0; i < cpuhw->n_limited; ++i) + if (counter == cpuhw->limited_counter[i]) + break; + if (i < cpuhw->n_limited) { + while (++i < cpuhw->n_limited) { + cpuhw->limited_counter[i-1] = cpuhw->limited_counter[i]; + cpuhw->limited_hwidx[i-1] = cpuhw->limited_hwidx[i]; + } + --cpuhw->n_limited; + } if (cpuhw->n_counters == 0) { /* disable exceptions if no counters are running */ cpuhw->mmcr[0] &= ~(MMCR0_PMXE | MMCR0_FCECE); @@ -613,6 +745,61 @@ struct pmu power_pmu = { .read = power_pmu_read, }; +/* + * Return 1 if we might be able to put counter on a limited PMC, + * or 0 if not. + * A counter can only go on a limited PMC if it counts something + * that a limited PMC can count, doesn't require interrupts, and + * doesn't exclude any processor mode. + */ +static int can_go_on_limited_pmc(struct perf_counter *counter, unsigned int ev, + unsigned int flags) +{ + int n; + unsigned int alt[MAX_EVENT_ALTERNATIVES]; + + if (counter->hw_event.exclude_user + || counter->hw_event.exclude_kernel + || counter->hw_event.exclude_hv + || counter->hw_event.irq_period) + return 0; + + if (ppmu->limited_pmc_event(ev)) + return 1; + + /* + * The requested event isn't on a limited PMC already; + * see if any alternative code goes on a limited PMC. + */ + if (!ppmu->get_alternatives) + return 0; + + flags |= PPMU_LIMITED_PMC_OK | PPMU_LIMITED_PMC_REQD; + n = ppmu->get_alternatives(ev, flags, alt); + if (n) + return alt[0]; + + return 0; +} + +/* + * Find an alternative event that goes on a normal PMC, if possible, + * and return the event code, or 0 if there is no such alternative. + * (Note: event code 0 is "don't count" on all machines.) + */ +static unsigned long normal_pmc_alternative(unsigned long ev, + unsigned long flags) +{ + unsigned int alt[MAX_EVENT_ALTERNATIVES]; + int n; + + flags &= ~(PPMU_LIMITED_PMC_OK | PPMU_LIMITED_PMC_REQD); + n = ppmu->get_alternatives(ev, flags, alt); + if (!n) + return 0; + return alt[0]; +} + /* Number of perf_counters counting hardware events */ static atomic_t num_counters; /* Used to avoid races in calling reserve/release_pmc_hardware */ @@ -633,9 +820,10 @@ static void hw_perf_counter_destroy(struct perf_counter *counter) const struct pmu *hw_perf_counter_init(struct perf_counter *counter) { - unsigned long ev; + unsigned long ev, flags; struct perf_counter *ctrs[MAX_HWCOUNTERS]; unsigned int events[MAX_HWCOUNTERS]; + unsigned int cflags[MAX_HWCOUNTERS]; int n; int err; @@ -661,7 +849,36 @@ const struct pmu *hw_perf_counter_init(struct perf_counter *counter) */ if (!firmware_has_feature(FW_FEATURE_LPAR)) counter->hw_event.exclude_hv = 0; - + + /* + * If this is a per-task counter, then we can use + * PM_RUN_* events interchangeably with their non RUN_* + * equivalents, e.g. PM_RUN_CYC instead of PM_CYC. + * XXX we should check if the task is an idle task. + */ + flags = 0; + if (counter->ctx->task) + flags |= PPMU_ONLY_COUNT_RUN; + + /* + * If this machine has limited counters, check whether this + * event could go on a limited counter. + */ + if (ppmu->limited_pmc5_6) { + if (can_go_on_limited_pmc(counter, ev, flags)) { + flags |= PPMU_LIMITED_PMC_OK; + } else if (ppmu->limited_pmc_event(ev)) { + /* + * The requested event is on a limited PMC, + * but we can't use a limited PMC; see if any + * alternative goes on a normal PMC. + */ + ev = normal_pmc_alternative(ev, flags); + if (!ev) + return ERR_PTR(-EINVAL); + } + } + /* * If this is in a group, check if it can go on with all the * other hardware counters in the group. We assume the counter @@ -670,18 +887,20 @@ const struct pmu *hw_perf_counter_init(struct perf_counter *counter) n = 0; if (counter->group_leader != counter) { n = collect_events(counter->group_leader, ppmu->n_counter - 1, - ctrs, events); + ctrs, events, cflags); if (n < 0) return ERR_PTR(-EINVAL); } events[n] = ev; ctrs[n] = counter; - if (check_excludes(ctrs, n, 1)) + cflags[n] = flags; + if (check_excludes(ctrs, cflags, n, 1)) return ERR_PTR(-EINVAL); - if (power_check_constraints(events, n + 1)) + if (power_check_constraints(events, cflags, n + 1)) return ERR_PTR(-EINVAL); counter->hw.config = events[n]; + counter->hw.counter_base = cflags[n]; atomic64_set(&counter->hw.period_left, counter->hw_event.irq_period); /* @@ -763,6 +982,10 @@ static void perf_counter_interrupt(struct pt_regs *regs) int found = 0; int nmi; + if (cpuhw->n_limited) + freeze_limited_counters(cpuhw, mfspr(SPRN_PMC5), + mfspr(SPRN_PMC6)); + /* * If interrupts were soft-disabled when this PMU interrupt * occurred, treat it as an NMI. @@ -775,6 +998,8 @@ static void perf_counter_interrupt(struct pt_regs *regs) for (i = 0; i < cpuhw->n_counters; ++i) { counter = cpuhw->counter[i]; + if (is_limited_pmc(counter->hw.idx)) + continue; val = read_pmc(counter->hw.idx); if ((int)val < 0) { /* counter has overflowed */ @@ -791,6 +1016,8 @@ static void perf_counter_interrupt(struct pt_regs *regs) */ if (!found) { for (i = 0; i < ppmu->n_counter; ++i) { + if (is_limited_pmc(i + 1)) + continue; val = read_pmc(i + 1); if ((int)val < 0) write_pmc(i + 1, 0); @@ -804,7 +1031,7 @@ static void perf_counter_interrupt(struct pt_regs *regs) * XXX might want to use MSR.PM to keep the counters frozen until * we get back out of this interrupt. */ - mtspr(SPRN_MMCR0, cpuhw->mmcr[0]); + write_mmcr0(cpuhw, cpuhw->mmcr[0]); if (nmi) nmi_exit(); diff --git a/arch/powerpc/kernel/power4-pmu.c b/arch/powerpc/kernel/power4-pmu.c index 1407b19ab61..744a2756958 100644 --- a/arch/powerpc/kernel/power4-pmu.c +++ b/arch/powerpc/kernel/power4-pmu.c @@ -320,7 +320,8 @@ static unsigned int ppc_inst_cmpl[] = { 0x1001, 0x4001, 0x6001, 0x7001, 0x8001 }; -static int p4_get_alternatives(unsigned int event, unsigned int alt[]) +static int p4_get_alternatives(unsigned int event, unsigned int flags, + unsigned int alt[]) { int i, j, na; diff --git a/arch/powerpc/kernel/power5+-pmu.c b/arch/powerpc/kernel/power5+-pmu.c index 1222c8ea3c2..8154eaa2404 100644 --- a/arch/powerpc/kernel/power5+-pmu.c +++ b/arch/powerpc/kernel/power5+-pmu.c @@ -78,8 +78,8 @@ * Layout of constraint bits: * 6666555555555544444444443333333333222222222211111111110000000000 * 3210987654321098765432109876543210987654321098765432109876543210 - * [ ><><>< ><> <><>[ > < >< >< >< ><><><><> - * NC G0G1G2 G3 T0T1 UC B0 B1 B2 B3 P4P3P2P1 + * [ ><><>< ><> <><>[ > < >< >< >< ><><><><><><> + * NC G0G1G2 G3 T0T1 UC B0 B1 B2 B3 P6P5P4P3P2P1 * * NC - number of counters * 51: NC error 0x0008_0000_0000_0000 @@ -105,18 +105,18 @@ * 30: IDU|GRS events needed 0x00_4000_0000 * * B0 - * 20-23: Byte 0 event source 0x00f0_0000 + * 24-27: Byte 0 event source 0x0f00_0000 * Encoding as for the event code * * B1, B2, B3 - * 16-19, 12-15, 8-11: Byte 1, 2, 3 event sources + * 20-23, 16-19, 12-15: Byte 1, 2, 3 event sources * - * P4 - * 7: P1 error 0x80 - * 6-7: Count of events needing PMC4 + * P6 + * 11: P6 error 0x800 + * 10-11: Count of events needing PMC6 * - * P1..P3 - * 0-6: Count of events needing PMC1..PMC3 + * P1..P5 + * 0-9: Count of events needing PMC1..PMC5 */ static const int grsel_shift[8] = { @@ -143,11 +143,13 @@ static int power5p_get_constraint(unsigned int event, u64 *maskp, u64 *valp) pmc = (event >> PM_PMC_SH) & PM_PMC_MSK; if (pmc) { - if (pmc > 4) + if (pmc > 6) return -1; sh = (pmc - 1) * 2; mask |= 2 << sh; value |= 1 << sh; + if (pmc >= 5 && !(event == 0x500009 || event == 0x600005)) + return -1; } if (event & PM_BUSEVENT_MSK) { unit = (event >> PM_UNIT_SH) & PM_UNIT_MSK; @@ -173,16 +175,26 @@ static int power5p_get_constraint(unsigned int event, u64 *maskp, u64 *valp) value |= (u64)((event >> PM_GRS_SH) & fmask) << sh; } /* Set byte lane select field */ - mask |= 0xfULL << (20 - 4 * byte); - value |= (u64)unit << (20 - 4 * byte); + mask |= 0xfULL << (24 - 4 * byte); + value |= (u64)unit << (24 - 4 * byte); + } + if (pmc < 5) { + /* need a counter from PMC1-4 set */ + mask |= 0x8000000000000ull; + value |= 0x1000000000000ull; } - mask |= 0x8000000000000ull; - value |= 0x1000000000000ull; *maskp = mask; *valp = value; return 0; } +static int power5p_limited_pmc_event(unsigned int event) +{ + int pmc = (event >> PM_PMC_SH) & PM_PMC_MSK; + + return pmc == 5 || pmc == 6; +} + #define MAX_ALT 3 /* at most 3 alternatives for any event */ static const unsigned int event_alternatives[][MAX_ALT] = { @@ -193,6 +205,7 @@ static const unsigned int event_alternatives[][MAX_ALT] = { { 0x410c7, 0x441084 }, /* PM_THRD_L2MISS_BOTH_CYC */ { 0x800c4, 0xc20e0 }, /* PM_DTLB_MISS */ { 0xc50c6, 0xc60e0 }, /* PM_MRK_DTLB_MISS */ + { 0x100005, 0x600005 }, /* PM_RUN_CYC */ { 0x100009, 0x200009 }, /* PM_INST_CMPL */ { 0x200015, 0x300015 }, /* PM_LSU_LMQ_SRQ_EMPTY_CYC */ { 0x300009, 0x400009 }, /* PM_INST_DISP */ @@ -260,24 +273,85 @@ static int find_alternative_bdecode(unsigned int event) return -1; } -static int power5p_get_alternatives(unsigned int event, unsigned int alt[]) +static int power5p_get_alternatives(unsigned int event, unsigned int flags, + unsigned int alt[]) { int i, j, ae, nalt = 1; + int nlim; alt[0] = event; nalt = 1; + nlim = power5p_limited_pmc_event(event); i = find_alternative(event); if (i >= 0) { for (j = 0; j < MAX_ALT; ++j) { ae = event_alternatives[i][j]; if (ae && ae != event) alt[nalt++] = ae; + nlim += power5p_limited_pmc_event(ae); } } else { ae = find_alternative_bdecode(event); if (ae > 0) alt[nalt++] = ae; } + + if (flags & PPMU_ONLY_COUNT_RUN) { + /* + * We're only counting in RUN state, + * so PM_CYC is equivalent to PM_RUN_CYC + * and PM_INST_CMPL === PM_RUN_INST_CMPL. + * This doesn't include alternatives that don't provide + * any extra flexibility in assigning PMCs (e.g. + * 0x100005 for PM_RUN_CYC vs. 0xf for PM_CYC). + * Note that even with these additional alternatives + * we never end up with more than 3 alternatives for any event. + */ + j = nalt; + for (i = 0; i < nalt; ++i) { + switch (alt[i]) { + case 0xf: /* PM_CYC */ + alt[j++] = 0x600005; /* PM_RUN_CYC */ + ++nlim; + break; + case 0x600005: /* PM_RUN_CYC */ + alt[j++] = 0xf; + break; + case 0x100009: /* PM_INST_CMPL */ + alt[j++] = 0x500009; /* PM_RUN_INST_CMPL */ + ++nlim; + break; + case 0x500009: /* PM_RUN_INST_CMPL */ + alt[j++] = 0x100009; /* PM_INST_CMPL */ + alt[j++] = 0x200009; + break; + } + } + nalt = j; + } + + if (!(flags & PPMU_LIMITED_PMC_OK) && nlim) { + /* remove the limited PMC events */ + j = 0; + for (i = 0; i < nalt; ++i) { + if (!power5p_limited_pmc_event(alt[i])) { + alt[j] = alt[i]; + ++j; + } + } + nalt = j; + } else if ((flags & PPMU_LIMITED_PMC_REQD) && nlim < nalt) { + /* remove all but the limited PMC events */ + j = 0; + for (i = 0; i < nalt; ++i) { + if (power5p_limited_pmc_event(alt[i])) { + alt[j] = alt[i]; + ++j; + } + } + nalt = j; + } + return nalt; } @@ -390,7 +464,7 @@ static int power5p_compute_mmcr(unsigned int event[], int n_ev, unsigned char unituse[16]; int ttmuse; - if (n_ev > 4) + if (n_ev > 6) return -1; /* First pass to count resource use */ @@ -399,7 +473,7 @@ static int power5p_compute_mmcr(unsigned int event[], int n_ev, for (i = 0; i < n_ev; ++i) { pmc = (event[i] >> PM_PMC_SH) & PM_PMC_MSK; if (pmc) { - if (pmc > 4) + if (pmc > 6) return -1; if (pmc_inuse & (1 << (pmc - 1))) return -1; @@ -488,13 +562,16 @@ static int power5p_compute_mmcr(unsigned int event[], int n_ev, if (pmc >= 4) return -1; pmc_inuse |= 1 << pmc; - } else { + } else if (pmc <= 4) { /* Direct event */ --pmc; if (isbus && (byte & 2) && (psel == 8 || psel == 0x10 || psel == 0x28)) /* add events on higher-numbered bus */ mmcr1 |= 1ull << (MMCR1_PMC1_ADDER_SEL_SH - pmc); + } else { + /* Instructions or run cycles on PMC5/6 */ + --pmc; } if (isbus && unit == PM_GRS) { bit = psel & 7; @@ -538,7 +615,7 @@ static int power5p_generic_events[] = { }; struct power_pmu power5p_pmu = { - .n_counter = 4, + .n_counter = 6, .max_alternatives = MAX_ALT, .add_fields = 0x7000000000055ull, .test_adder = 0x3000040000000ull, @@ -548,4 +625,6 @@ struct power_pmu power5p_pmu = { .disable_pmc = power5p_disable_pmc, .n_generic = ARRAY_SIZE(power5p_generic_events), .generic_events = power5p_generic_events, + .limited_pmc5_6 = 1, + .limited_pmc_event = power5p_limited_pmc_event, }; diff --git a/arch/powerpc/kernel/power5-pmu.c b/arch/powerpc/kernel/power5-pmu.c index 116c4bb1809..6e667dc8647 100644 --- a/arch/powerpc/kernel/power5-pmu.c +++ b/arch/powerpc/kernel/power5-pmu.c @@ -269,7 +269,8 @@ static int find_alternative_bdecode(unsigned int event) return -1; } -static int power5_get_alternatives(unsigned int event, unsigned int alt[]) +static int power5_get_alternatives(unsigned int event, unsigned int flags, + unsigned int alt[]) { int i, j, ae, nalt = 1; diff --git a/arch/powerpc/kernel/power6-pmu.c b/arch/powerpc/kernel/power6-pmu.c index fce1fc290a1..d44049f0ae2 100644 --- a/arch/powerpc/kernel/power6-pmu.c +++ b/arch/powerpc/kernel/power6-pmu.c @@ -182,7 +182,7 @@ static int p6_compute_mmcr(unsigned int event[], int n_ev, unsigned int ttmset = 0; unsigned int pmc_inuse = 0; - if (n_ev > 4) + if (n_ev > 6) return -1; for (i = 0; i < n_ev; ++i) { pmc = (event[i] >> PM_PMC_SH) & PM_PMC_MSK; @@ -202,6 +202,8 @@ static int p6_compute_mmcr(unsigned int event[], int n_ev, for (pmc = 0; pmc < 4; ++pmc) if (!(pmc_inuse & (1 << pmc))) break; + if (pmc >= 4) + return -1; pmc_inuse |= 1 << pmc; } hwc[i] = pmc; @@ -240,7 +242,8 @@ static int p6_compute_mmcr(unsigned int event[], int n_ev, } if (power6_marked_instr_event(event[i])) mmcra |= MMCRA_SAMPLE_ENABLE; - mmcr1 |= (u64)psel << MMCR1_PMCSEL_SH(pmc); + if (pmc < 4) + mmcr1 |= (u64)psel << MMCR1_PMCSEL_SH(pmc); } mmcr[0] = 0; if (pmc_inuse & 1) @@ -256,19 +259,20 @@ static int p6_compute_mmcr(unsigned int event[], int n_ev, * Layout of constraint bits: * * 0-1 add field: number of uses of PMC1 (max 1) - * 2-3, 4-5, 6-7: ditto for PMC2, 3, 4 - * 8-10 select field: nest (subunit) event selector + * 2-3, 4-5, 6-7, 8-9, 10-11: ditto for PMC2, 3, 4, 5, 6 + * 12-15 add field: number of uses of PMC1-4 (max 4) * 16-19 select field: unit on byte 0 of event bus * 20-23, 24-27, 28-31 ditto for bytes 1, 2, 3 + * 32-34 select field: nest (subunit) event selector */ static int p6_get_constraint(unsigned int event, u64 *maskp, u64 *valp) { - int pmc, byte, sh; - unsigned int mask = 0, value = 0; + int pmc, byte, sh, subunit; + u64 mask = 0, value = 0; pmc = (event >> PM_PMC_SH) & PM_PMC_MSK; if (pmc) { - if (pmc > 4) + if (pmc > 4 && !(event == 0x500009 || event == 0x600005)) return -1; sh = (pmc - 1) * 2; mask |= 2 << sh; @@ -276,26 +280,38 @@ static int p6_get_constraint(unsigned int event, u64 *maskp, u64 *valp) } if (event & PM_BUSEVENT_MSK) { byte = (event >> PM_BYTE_SH) & PM_BYTE_MSK; - sh = byte * 4; + sh = byte * 4 + (16 - PM_UNIT_SH); mask |= PM_UNIT_MSKS << sh; - value |= (event & PM_UNIT_MSKS) << sh; + value |= (u64)(event & PM_UNIT_MSKS) << sh; if ((event & PM_UNIT_MSKS) == (5 << PM_UNIT_SH)) { - mask |= PM_SUBUNIT_MSKS; - value |= event & PM_SUBUNIT_MSKS; + subunit = (event >> PM_SUBUNIT_SH) & PM_SUBUNIT_MSK; + mask |= (u64)PM_SUBUNIT_MSK << 32; + value |= (u64)subunit << 32; } } + if (pmc <= 4) { + mask |= 0x8000; /* add field for count of PMC1-4 uses */ + value |= 0x1000; + } *maskp = mask; *valp = value; return 0; } +static int p6_limited_pmc_event(unsigned int event) +{ + int pmc = (event >> PM_PMC_SH) & PM_PMC_MSK; + + return pmc == 5 || pmc == 6; +} + #define MAX_ALT 4 /* at most 4 alternatives for any event */ static const unsigned int event_alternatives[][MAX_ALT] = { { 0x0130e8, 0x2000f6, 0x3000fc }, /* PM_PTEG_RELOAD_VALID */ { 0x080080, 0x10000d, 0x30000c, 0x4000f0 }, /* PM_LD_MISS_L1 */ { 0x080088, 0x200054, 0x3000f0 }, /* PM_ST_MISS_L1 */ - { 0x10000a, 0x2000f4 }, /* PM_RUN_CYC */ + { 0x10000a, 0x2000f4, 0x600005 }, /* PM_RUN_CYC */ { 0x10000b, 0x2000f5 }, /* PM_RUN_COUNT */ { 0x10000e, 0x400010 }, /* PM_PURR */ { 0x100010, 0x4000f8 }, /* PM_FLUSH */ @@ -340,13 +356,15 @@ static int find_alternatives_list(unsigned int event) return -1; } -static int p6_get_alternatives(unsigned int event, unsigned int alt[]) +static int p6_get_alternatives(unsigned int event, unsigned int flags, + unsigned int alt[]) { - int i, j; + int i, j, nlim; unsigned int aevent, psel, pmc; unsigned int nalt = 1; alt[0] = event; + nlim = p6_limited_pmc_event(event); /* check the alternatives table */ i = find_alternatives_list(event); @@ -358,6 +376,7 @@ static int p6_get_alternatives(unsigned int event, unsigned int alt[]) break; if (aevent != event) alt[nalt++] = aevent; + nlim += p6_limited_pmc_event(aevent); } } else { @@ -375,13 +394,75 @@ static int p6_get_alternatives(unsigned int event, unsigned int alt[]) ((pmc > 2? pmc - 2: pmc + 2) << PM_PMC_SH); } + if (flags & PPMU_ONLY_COUNT_RUN) { + /* + * We're only counting in RUN state, + * so PM_CYC is equivalent to PM_RUN_CYC, + * PM_INST_CMPL === PM_RUN_INST_CMPL, PM_PURR === PM_RUN_PURR. + * This doesn't include alternatives that don't provide + * any extra flexibility in assigning PMCs (e.g. + * 0x10000a for PM_RUN_CYC vs. 0x1e for PM_CYC). + * Note that even with these additional alternatives + * we never end up with more than 4 alternatives for any event. + */ + j = nalt; + for (i = 0; i < nalt; ++i) { + switch (alt[i]) { + case 0x1e: /* PM_CYC */ + alt[j++] = 0x600005; /* PM_RUN_CYC */ + ++nlim; + break; + case 0x10000a: /* PM_RUN_CYC */ + alt[j++] = 0x1e; /* PM_CYC */ + break; + case 2: /* PM_INST_CMPL */ + alt[j++] = 0x500009; /* PM_RUN_INST_CMPL */ + ++nlim; + break; + case 0x500009: /* PM_RUN_INST_CMPL */ + alt[j++] = 2; /* PM_INST_CMPL */ + break; + case 0x10000e: /* PM_PURR */ + alt[j++] = 0x4000f4; /* PM_RUN_PURR */ + break; + case 0x4000f4: /* PM_RUN_PURR */ + alt[j++] = 0x10000e; /* PM_PURR */ + break; + } + } + nalt = j; + } + + if (!(flags & PPMU_LIMITED_PMC_OK) && nlim) { + /* remove the limited PMC events */ + j = 0; + for (i = 0; i < nalt; ++i) { + if (!p6_limited_pmc_event(alt[i])) { + alt[j] = alt[i]; + ++j; + } + } + nalt = j; + } else if ((flags & PPMU_LIMITED_PMC_REQD) && nlim < nalt) { + /* remove all but the limited PMC events */ + j = 0; + for (i = 0; i < nalt; ++i) { + if (p6_limited_pmc_event(alt[i])) { + alt[j] = alt[i]; + ++j; + } + } + nalt = j; + } + return nalt; } static void p6_disable_pmc(unsigned int pmc, u64 mmcr[]) { /* Set PMCxSEL to 0 to disable PMCx */ - mmcr[1] &= ~(0xffUL << MMCR1_PMCSEL_SH(pmc)); + if (pmc <= 3) + mmcr[1] &= ~(0xffUL << MMCR1_PMCSEL_SH(pmc)); } static int power6_generic_events[] = { @@ -394,14 +475,16 @@ static int power6_generic_events[] = { }; struct power_pmu power6_pmu = { - .n_counter = 4, + .n_counter = 6, .max_alternatives = MAX_ALT, - .add_fields = 0x55, - .test_adder = 0, + .add_fields = 0x1555, + .test_adder = 0x3000, .compute_mmcr = p6_compute_mmcr, .get_constraint = p6_get_constraint, .get_alternatives = p6_get_alternatives, .disable_pmc = p6_disable_pmc, .n_generic = ARRAY_SIZE(power6_generic_events), .generic_events = power6_generic_events, + .limited_pmc5_6 = 1, + .limited_pmc_event = p6_limited_pmc_event, }; diff --git a/arch/powerpc/kernel/ppc970-pmu.c b/arch/powerpc/kernel/ppc970-pmu.c index aed8ccd7c07..af2d1884058 100644 --- a/arch/powerpc/kernel/ppc970-pmu.c +++ b/arch/powerpc/kernel/ppc970-pmu.c @@ -243,7 +243,8 @@ static int p970_get_constraint(unsigned int event, u64 *maskp, u64 *valp) return 0; } -static int p970_get_alternatives(unsigned int event, unsigned int alt[]) +static int p970_get_alternatives(unsigned int event, unsigned int flags, + unsigned int alt[]) { alt[0] = event; -- cgit v1.2.3 From 43f6201a22dbf1c5abe1cab96b49bd56fa9df8f4 Mon Sep 17 00:00:00 2001 From: Robert Richter Date: Wed, 29 Apr 2009 16:55:56 +0200 Subject: perf_counter, x86: rename bitmasks to ->used_mask and ->active_mask Standardize on explicitly mentioning '_mask' in fields that are not plain flags but masks. This avoids typos like: if (cpuc->used) (which could easily slip through review unnoticed), while if a typo looks like this: if (cpuc->used_mask) it might get noticed during review. [ Impact: cleanup ] Signed-off-by: Robert Richter Cc: Peter Zijlstra Cc: Paul Mackerras LKML-Reference: <1241016956-24648-1-git-send-email-robert.richter@amd.com> Signed-off-by: Ingo Molnar --- arch/x86/kernel/cpu/perf_counter.c | 28 ++++++++++++++-------------- 1 file changed, 14 insertions(+), 14 deletions(-) diff --git a/arch/x86/kernel/cpu/perf_counter.c b/arch/x86/kernel/cpu/perf_counter.c index 47e563bfd4c..fc06f4d3264 100644 --- a/arch/x86/kernel/cpu/perf_counter.c +++ b/arch/x86/kernel/cpu/perf_counter.c @@ -28,8 +28,8 @@ static u64 perf_counter_mask __read_mostly; struct cpu_hw_counters { struct perf_counter *counters[X86_PMC_IDX_MAX]; - unsigned long used[BITS_TO_LONGS(X86_PMC_IDX_MAX)]; - unsigned long active[BITS_TO_LONGS(X86_PMC_IDX_MAX)]; + unsigned long used_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)]; + unsigned long active_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)]; unsigned long interrupts; u64 throttle_ctrl; int enabled; @@ -332,7 +332,7 @@ static u64 amd_pmu_save_disable_all(void) for (idx = 0; idx < x86_pmu.num_counters; idx++) { u64 val; - if (!test_bit(idx, cpuc->active)) + if (!test_bit(idx, cpuc->active_mask)) continue; rdmsrl(MSR_K7_EVNTSEL0 + idx, val); if (!(val & ARCH_PERFMON_EVENTSEL0_ENABLE)) @@ -373,7 +373,7 @@ static void amd_pmu_restore_all(u64 ctrl) for (idx = 0; idx < x86_pmu.num_counters; idx++) { u64 val; - if (!test_bit(idx, cpuc->active)) + if (!test_bit(idx, cpuc->active_mask)) continue; rdmsrl(MSR_K7_EVNTSEL0 + idx, val); if (val & ARCH_PERFMON_EVENTSEL0_ENABLE) @@ -576,7 +576,7 @@ static int x86_pmu_enable(struct perf_counter *counter) * Try to get the fixed counter, if that is already taken * then try to get a generic counter: */ - if (test_and_set_bit(idx, cpuc->used)) + if (test_and_set_bit(idx, cpuc->used_mask)) goto try_generic; hwc->config_base = MSR_ARCH_PERFMON_FIXED_CTR_CTRL; @@ -590,14 +590,14 @@ static int x86_pmu_enable(struct perf_counter *counter) } else { idx = hwc->idx; /* Try to get the previous generic counter again */ - if (test_and_set_bit(idx, cpuc->used)) { + if (test_and_set_bit(idx, cpuc->used_mask)) { try_generic: - idx = find_first_zero_bit(cpuc->used, + idx = find_first_zero_bit(cpuc->used_mask, x86_pmu.num_counters); if (idx == x86_pmu.num_counters) return -EAGAIN; - set_bit(idx, cpuc->used); + set_bit(idx, cpuc->used_mask); hwc->idx = idx; } hwc->config_base = x86_pmu.eventsel; @@ -609,7 +609,7 @@ try_generic: x86_pmu.disable(hwc, idx); cpuc->counters[idx] = counter; - set_bit(idx, cpuc->active); + set_bit(idx, cpuc->active_mask); x86_perf_counter_set_period(counter, hwc, idx); x86_pmu.enable(hwc, idx); @@ -643,7 +643,7 @@ void perf_counter_print_debug(void) pr_info("CPU#%d: overflow: %016llx\n", cpu, overflow); pr_info("CPU#%d: fixed: %016llx\n", cpu, fixed); } - pr_info("CPU#%d: used: %016llx\n", cpu, *(u64 *)cpuc->used); + pr_info("CPU#%d: used: %016llx\n", cpu, *(u64 *)cpuc->used_mask); for (idx = 0; idx < x86_pmu.num_counters; idx++) { rdmsrl(x86_pmu.eventsel + idx, pmc_ctrl); @@ -677,7 +677,7 @@ static void x86_pmu_disable(struct perf_counter *counter) * Must be done before we disable, otherwise the nmi handler * could reenable again: */ - clear_bit(idx, cpuc->active); + clear_bit(idx, cpuc->active_mask); x86_pmu.disable(hwc, idx); /* @@ -692,7 +692,7 @@ static void x86_pmu_disable(struct perf_counter *counter) */ x86_perf_counter_update(counter, hwc, idx); cpuc->counters[idx] = NULL; - clear_bit(idx, cpuc->used); + clear_bit(idx, cpuc->used_mask); } /* @@ -741,7 +741,7 @@ again: struct perf_counter *counter = cpuc->counters[bit]; clear_bit(bit, (unsigned long *) &status); - if (!test_bit(bit, cpuc->active)) + if (!test_bit(bit, cpuc->active_mask)) continue; intel_pmu_save_and_restart(counter); @@ -779,7 +779,7 @@ static int amd_pmu_handle_irq(struct pt_regs *regs, int nmi) ++cpuc->interrupts; for (idx = 0; idx < x86_pmu.num_counters; idx++) { - if (!test_bit(idx, cpuc->active)) + if (!test_bit(idx, cpuc->active_mask)) continue; counter = cpuc->counters[idx]; hwc = &counter->hw; -- cgit v1.2.3 From c5dd016cdf0a040e1de0b691e274fbfe642b2cdc Mon Sep 17 00:00:00 2001 From: Paul Mackerras Date: Thu, 30 Apr 2009 09:48:16 +1000 Subject: perf_counter: update copyright notice This adds my name to the list of copyright holders on the core perf_counter.c, since I have contributed a significant amount of the code in there. Signed-off-by: Paul Mackerras Cc: Peter Zijlstra Cc: Robert Richter LKML-Reference: <18936.59200.888049.746658@cargo.ozlabs.ibm.com> Signed-off-by: Ingo Molnar --- kernel/perf_counter.c | 1 + 1 file changed, 1 insertion(+) diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c index a95a171e608..75f2b6c8239 100644 --- a/kernel/perf_counter.c +++ b/kernel/perf_counter.c @@ -4,6 +4,7 @@ * Copyright (C) 2008 Thomas Gleixner * Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar * Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra + * Copyright © 2009 Paul Mackerras, IBM Corp. * * For licensing details see kernel-base/COPYING */ -- cgit v1.2.3 From aac3f3c2c41ce49a6dbb98d9145265c00a964dc2 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Thu, 30 Apr 2009 13:52:19 +0200 Subject: perf_counter tools: add perf-report to the Makefile Build it explicitly until it's a proper builtin command. Signed-off-by: Ingo Molnar --- Documentation/perf_counter/Makefile | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/Documentation/perf_counter/Makefile b/Documentation/perf_counter/Makefile index 543ccf28ac4..877cf5dedb5 100644 --- a/Documentation/perf_counter/Makefile +++ b/Documentation/perf_counter/Makefile @@ -228,7 +228,7 @@ COMPAT_CFLAGS = COMPAT_OBJS = LIB_H = LIB_OBJS = -PROGRAMS = +PROGRAMS = perf-report SCRIPT_PERL = SCRIPT_SH = TEST_PROGRAMS = @@ -808,6 +808,10 @@ clean: $(RM) $(htmldocs).tar.gz $(manpages).tar.gz $(RM) PERF-VERSION-FILE PERF-CFLAGS PERF-BUILD-OPTIONS +# temporary hack: +perf-report: perf-report.cc ../../include/linux/perf_counter.h Makefile + g++ -g -O2 -Wall -lrt -o $@ $< + .PHONY: all install clean strip .PHONY: shell_compatibility_test please_set_SHELL_PATH_to_a_more_modern_shell .PHONY: .FORCE-PERF-VERSION-FILE TAGS tags cscope .FORCE-PERF-CFLAGS -- cgit v1.2.3 From 66cf782996f3d57d3cc199f0a2d47a54e2aa5991 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Thu, 30 Apr 2009 13:53:33 +0200 Subject: perf_counter tools: perf stat: make -l default-on Turn on scaling display by default - this is less confusing. Signed-off-by: Ingo Molnar --- Documentation/perf_counter/builtin-stat.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Documentation/perf_counter/builtin-stat.c b/Documentation/perf_counter/builtin-stat.c index 112b94ed329..1fde12762ca 100644 --- a/Documentation/perf_counter/builtin-stat.c +++ b/Documentation/perf_counter/builtin-stat.c @@ -171,7 +171,7 @@ static unsigned int page_size; static int zero; -static int scale; +static int scale = 1; static const unsigned int default_count[] = { 1000000, -- cgit v1.2.3 From bad760089c1ef7fe525c0f268a4078b9cb483903 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Thu, 30 Apr 2009 14:14:37 +0200 Subject: perf_counter tools: fix infinite loop in perf-report on zeroed event records Bail out early if a record has zero size - we have no chance to make reliable progress in that case. Print out the offset where this happens, and print the number of bytes we missed out on. Signed-off-by: Ingo Molnar --- Documentation/perf_counter/perf-report.cc | 18 ++++++++++++++++-- 1 file changed, 16 insertions(+), 2 deletions(-) diff --git a/Documentation/perf_counter/perf-report.cc b/Documentation/perf_counter/perf-report.cc index 1727317352b..933a0754453 100644 --- a/Documentation/perf_counter/perf-report.cc +++ b/Documentation/perf_counter/perf-report.cc @@ -13,6 +13,7 @@ #include #include #include +#include #include #include @@ -226,7 +227,7 @@ void load_kallsyms(void) while (!feof(file)) { uint64_t start; char c; - char sym[1024]; + char sym[1024000]; if (getline(&line, &n, file) < 0) break; @@ -416,12 +417,23 @@ more: if (head + event->header.size >= page_size * mmap_window) { unsigned long shift = page_size * (head / page_size); + int ret; + + ret = munmap(buf, page_size * mmap_window); + assert(ret == 0); - munmap(buf, page_size * mmap_window); offset += shift; head -= shift; goto remap; } + + + if (!event->header.size) { + fprintf(stderr, "zero-sized event at file offset %ld\n", offset + head); + fprintf(stderr, "skipping %ld bytes of events.\n", stat.st_size - offset - head); + goto done; + } + head += event->header.size; if (event->header.misc & PERF_EVENT_MISC_OVERFLOW) { @@ -458,6 +470,8 @@ more: if (offset + head < stat.st_size) goto more; +done: + close(input); std::map::iterator hi = hist.begin(); -- cgit v1.2.3 From c33a0bc4e41ef169d6e807d8abb9502544b518e5 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Fri, 1 May 2009 12:23:16 +0200 Subject: perf_counter: fix race in perf_output_* When two (or more) contexts output to the same buffer, it is possible to observe half written output. Suppose we have CPU0 doing perf_counter_mmap(), CPU1 doing perf_counter_overflow(). If CPU1 does a wakeup and exposes head to user-space, then CPU2 can observe the data CPU0 is still writing. [ Impact: fix occasionally corrupted profiling records ] Signed-off-by: Peter Zijlstra Cc: Paul Mackerras Cc: Corey Ashford LKML-Reference: <20090501102533.007821627@chello.nl> Signed-off-by: Ingo Molnar --- include/linux/perf_counter.h | 5 +- kernel/perf_counter.c | 130 +++++++++++++++++++++++++++++++++---------- 2 files changed, 105 insertions(+), 30 deletions(-) diff --git a/include/linux/perf_counter.h b/include/linux/perf_counter.h index 41aed427005..f776851f8c4 100644 --- a/include/linux/perf_counter.h +++ b/include/linux/perf_counter.h @@ -358,10 +358,13 @@ struct perf_mmap_data { struct rcu_head rcu_head; int nr_pages; /* nr of data pages */ - atomic_t wakeup; /* POLL_ for wakeups */ + atomic_t poll; /* POLL_ for wakeups */ atomic_t head; /* write position */ atomic_t events; /* event limit */ + atomic_t wakeup_head; /* completed head */ + atomic_t lock; /* concurrent writes */ + struct perf_counter_mmap_page *user_page; void *data_pages[0]; }; diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c index 75f2b6c8239..8660ae57953 100644 --- a/kernel/perf_counter.c +++ b/kernel/perf_counter.c @@ -1279,14 +1279,12 @@ static unsigned int perf_poll(struct file *file, poll_table *wait) { struct perf_counter *counter = file->private_data; struct perf_mmap_data *data; - unsigned int events; + unsigned int events = POLL_HUP; rcu_read_lock(); data = rcu_dereference(counter->data); if (data) - events = atomic_xchg(&data->wakeup, 0); - else - events = POLL_HUP; + events = atomic_xchg(&data->poll, 0); rcu_read_unlock(); poll_wait(file, &counter->waitq, wait); @@ -1568,22 +1566,6 @@ static const struct file_operations perf_fops = { void perf_counter_wakeup(struct perf_counter *counter) { - struct perf_mmap_data *data; - - rcu_read_lock(); - data = rcu_dereference(counter->data); - if (data) { - atomic_set(&data->wakeup, POLL_IN); - /* - * Ensure all data writes are issued before updating the - * user-space data head information. The matching rmb() - * will be in userspace after reading this value. - */ - smp_wmb(); - data->user_page->data_head = atomic_read(&data->head); - } - rcu_read_unlock(); - wake_up_all(&counter->waitq); if (counter->pending_kill) { @@ -1721,10 +1703,14 @@ struct perf_output_handle { int wakeup; int nmi; int overflow; + int locked; + unsigned long flags; }; -static inline void __perf_output_wakeup(struct perf_output_handle *handle) +static void perf_output_wakeup(struct perf_output_handle *handle) { + atomic_set(&handle->data->poll, POLL_IN); + if (handle->nmi) { handle->counter->pending_wakeup = 1; perf_pending_queue(&handle->counter->pending, @@ -1733,6 +1719,86 @@ static inline void __perf_output_wakeup(struct perf_output_handle *handle) perf_counter_wakeup(handle->counter); } +/* + * Curious locking construct. + * + * We need to ensure a later event doesn't publish a head when a former + * event isn't done writing. However since we need to deal with NMIs we + * cannot fully serialize things. + * + * What we do is serialize between CPUs so we only have to deal with NMI + * nesting on a single CPU. + * + * We only publish the head (and generate a wakeup) when the outer-most + * event completes. + */ +static void perf_output_lock(struct perf_output_handle *handle) +{ + struct perf_mmap_data *data = handle->data; + int cpu; + + handle->locked = 0; + + local_irq_save(handle->flags); + cpu = smp_processor_id(); + + if (in_nmi() && atomic_read(&data->lock) == cpu) + return; + + while (atomic_cmpxchg(&data->lock, 0, cpu) != 0) + cpu_relax(); + + handle->locked = 1; +} + +static void perf_output_unlock(struct perf_output_handle *handle) +{ + struct perf_mmap_data *data = handle->data; + int head, cpu; + + if (handle->wakeup) + data->wakeup_head = data->head; + + if (!handle->locked) + goto out; + +again: + /* + * The xchg implies a full barrier that ensures all writes are done + * before we publish the new head, matched by a rmb() in userspace when + * reading this position. + */ + while ((head = atomic_xchg(&data->wakeup_head, 0))) { + data->user_page->data_head = head; + handle->wakeup = 1; + } + + /* + * NMI can happen here, which means we can miss a wakeup_head update. + */ + + cpu = atomic_xchg(&data->lock, 0); + WARN_ON_ONCE(cpu != smp_processor_id()); + + /* + * Therefore we have to validate we did not indeed do so. + */ + if (unlikely(atomic_read(&data->wakeup_head))) { + /* + * Since we had it locked, we can lock it again. + */ + while (atomic_cmpxchg(&data->lock, 0, cpu) != 0) + cpu_relax(); + + goto again; + } + + if (handle->wakeup) + perf_output_wakeup(handle); +out: + local_irq_restore(handle->flags); +} + static int perf_output_begin(struct perf_output_handle *handle, struct perf_counter *counter, unsigned int size, int nmi, int overflow) @@ -1745,6 +1811,7 @@ static int perf_output_begin(struct perf_output_handle *handle, if (!data) goto out; + handle->data = data; handle->counter = counter; handle->nmi = nmi; handle->overflow = overflow; @@ -1752,12 +1819,13 @@ static int perf_output_begin(struct perf_output_handle *handle, if (!data->nr_pages) goto fail; + perf_output_lock(handle); + do { offset = head = atomic_read(&data->head); head += size; } while (atomic_cmpxchg(&data->head, offset, head) != offset); - handle->data = data; handle->offset = offset; handle->head = head; handle->wakeup = (offset >> PAGE_SHIFT) != (head >> PAGE_SHIFT); @@ -1765,7 +1833,7 @@ static int perf_output_begin(struct perf_output_handle *handle, return 0; fail: - __perf_output_wakeup(handle); + perf_output_wakeup(handle); out: rcu_read_unlock(); @@ -1809,16 +1877,20 @@ static void perf_output_copy(struct perf_output_handle *handle, static void perf_output_end(struct perf_output_handle *handle) { - int wakeup_events = handle->counter->hw_event.wakeup_events; + struct perf_counter *counter = handle->counter; + struct perf_mmap_data *data = handle->data; + + int wakeup_events = counter->hw_event.wakeup_events; if (handle->overflow && wakeup_events) { - int events = atomic_inc_return(&handle->data->events); + int events = atomic_inc_return(&data->events); if (events >= wakeup_events) { - atomic_sub(wakeup_events, &handle->data->events); - __perf_output_wakeup(handle); + atomic_sub(wakeup_events, &data->events); + handle->wakeup = 1; } - } else if (handle->wakeup) - __perf_output_wakeup(handle); + } + + perf_output_unlock(handle); rcu_read_unlock(); } -- cgit v1.2.3 From 63a809a2dc53b91268dd915bbcbd425063893676 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Fri, 1 May 2009 12:23:17 +0200 Subject: perf_counter: fix nmi-watchdog interaction When we don't have any perf-counters active, don't act like we know what the NMI is for. [ Impact: fix hard hang with nmi_watchdog=2 ] Signed-off-by: Peter Zijlstra Cc: Paul Mackerras Cc: Corey Ashford LKML-Reference: <20090501102533.109867793@chello.nl> Signed-off-by: Ingo Molnar --- arch/x86/kernel/cpu/perf_counter.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/arch/x86/kernel/cpu/perf_counter.c b/arch/x86/kernel/cpu/perf_counter.c index fc06f4d3264..d4c0cc9d326 100644 --- a/arch/x86/kernel/cpu/perf_counter.c +++ b/arch/x86/kernel/cpu/perf_counter.c @@ -871,6 +871,9 @@ perf_counter_nmi_handler(struct notifier_block *self, struct pt_regs *regs; int ret; + if (!atomic_read(&num_counters)) + return NOTIFY_DONE; + switch (cmd) { case DIE_NMI: case DIE_NMI_IPI: -- cgit v1.2.3 From 585e3374d9d29376c2c37d821c8b7637dd48ca95 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Fri, 1 May 2009 12:23:18 +0200 Subject: perf_counter: tool: handle 0-length data files Avoid perf-report barfing on 0-length data files. [ Impact: fix perf-report SIGBUS ] Signed-off-by: Peter Zijlstra Cc: Paul Mackerras Cc: Corey Ashford LKML-Reference: <20090501102533.196245693@chello.nl> Signed-off-by: Ingo Molnar --- Documentation/perf_counter/perf-report.cc | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/Documentation/perf_counter/perf-report.cc b/Documentation/perf_counter/perf-report.cc index 933a0754453..911d7f3e7a6 100644 --- a/Documentation/perf_counter/perf-report.cc +++ b/Documentation/perf_counter/perf-report.cc @@ -402,6 +402,11 @@ int main(int argc, char *argv[]) exit(-1); } + if (!stat.st_size) { + fprintf(stderr, "zero-sized file, nothing to do!\n"); + exit(0); + } + load_kallsyms(); remap: -- cgit v1.2.3 From e5791a808ae91a9e7e1b65ea9b8de0f96a043d88 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Fri, 1 May 2009 12:23:19 +0200 Subject: perf_counter: documentation update Update the documentation to reflect the current state of affairs [ Impact: documentation update ] Signed-off-by: Peter Zijlstra Cc: Paul Mackerras Cc: Corey Ashford LKML-Reference: <20090501102533.296727903@chello.nl> Signed-off-by: Ingo Molnar --- Documentation/perf_counter/design.txt | 274 +++++++++++++++++++++++++++------- 1 file changed, 220 insertions(+), 54 deletions(-) diff --git a/Documentation/perf_counter/design.txt b/Documentation/perf_counter/design.txt index aaf105c02fb..9930c4bddc6 100644 --- a/Documentation/perf_counter/design.txt +++ b/Documentation/perf_counter/design.txt @@ -34,41 +34,47 @@ can be poll()ed. When creating a new counter fd, 'perf_counter_hw_event' is: -/* - * Event to monitor via a performance monitoring counter: - */ struct perf_counter_hw_event { - __u64 event_config; - - __u64 irq_period; - __u64 record_type; - __u64 read_format; - - __u64 disabled : 1, /* off by default */ - nmi : 1, /* NMI sampling */ - inherit : 1, /* children inherit it */ - pinned : 1, /* must always be on PMU */ - exclusive : 1, /* only group on PMU */ - exclude_user : 1, /* don't count user */ - exclude_kernel : 1, /* ditto kernel */ - exclude_hv : 1, /* ditto hypervisor */ - exclude_idle : 1, /* don't count when idle */ - - __reserved_1 : 55; - - __u32 extra_config_len; - - __u32 __reserved_4; - __u64 __reserved_2; - __u64 __reserved_3; + /* + * The MSB of the config word signifies if the rest contains cpu + * specific (raw) counter configuration data, if unset, the next + * 7 bits are an event type and the rest of the bits are the event + * identifier. + */ + __u64 config; + + __u64 irq_period; + __u32 record_type; + __u32 read_format; + + __u64 disabled : 1, /* off by default */ + nmi : 1, /* NMI sampling */ + inherit : 1, /* children inherit it */ + pinned : 1, /* must always be on PMU */ + exclusive : 1, /* only group on PMU */ + exclude_user : 1, /* don't count user */ + exclude_kernel : 1, /* ditto kernel */ + exclude_hv : 1, /* ditto hypervisor */ + exclude_idle : 1, /* don't count when idle */ + mmap : 1, /* include mmap data */ + munmap : 1, /* include munmap data */ + comm : 1, /* include comm data */ + + __reserved_1 : 52; + + __u32 extra_config_len; + __u32 wakeup_events; /* wakeup every n events */ + + __u64 __reserved_2; + __u64 __reserved_3; }; -The 'event_config' field specifies what the counter should count. It +The 'config' field specifies what the counter should count. It is divided into 3 bit-fields: -raw_type: 1 bit (most significant bit) 0x8000_0000_0000_0000 -type: 7 bits (next most significant) 0x7f00_0000_0000_0000 -event_id: 56 bits (least significant) 0x00ff_0000_0000_0000 +raw_type: 1 bit (most significant bit) 0x8000_0000_0000_0000 +type: 7 bits (next most significant) 0x7f00_0000_0000_0000 +event_id: 56 bits (least significant) 0x00ff_ffff_ffff_ffff If 'raw_type' is 1, then the counter will count a hardware event specified by the remaining 63 bits of event_config. The encoding is @@ -134,41 +140,56 @@ enum sw_event_ids { PERF_COUNT_PAGE_FAULTS_MAJ = 6, }; +Counters of the type PERF_TYPE_TRACEPOINT are available when the ftrace event +tracer is available, and event_id values can be obtained from +/debug/tracing/events/*/*/id + + Counters come in two flavours: counting counters and sampling counters. A "counting" counter is one that is used for counting the number of events that occur, and is characterised by having -irq_period = 0 and record_type = PERF_RECORD_SIMPLE. A read() on a -counting counter simply returns the current value of the counter as -an 8-byte number. +irq_period = 0. + + +A read() on a counter returns the current value of the counter and possible +additional values as specified by 'read_format', each value is a u64 (8 bytes) +in size. + +/* + * Bits that can be set in hw_event.read_format to request that + * reads on the counter should return the indicated quantities, + * in increasing order of bit value, after the counter value. + */ +enum perf_counter_read_format { + PERF_FORMAT_TOTAL_TIME_ENABLED = 1, + PERF_FORMAT_TOTAL_TIME_RUNNING = 2, +}; + +Using these additional values one can establish the overcommit ratio for a +particular counter allowing one to take the round-robin scheduling effect +into account. + A "sampling" counter is one that is set up to generate an interrupt every N events, where N is given by 'irq_period'. A sampling counter -has irq_period > 0 and record_type != PERF_RECORD_SIMPLE. The -record_type controls what data is recorded on each interrupt, and the -available values are currently: +has irq_period > 0. The record_type controls what data is recorded on each +interrupt: /* - * IRQ-notification data record type: + * Bits that can be set in hw_event.record_type to request information + * in the overflow packets. */ -enum perf_counter_record_type { - PERF_RECORD_SIMPLE = 0, - PERF_RECORD_IRQ = 1, - PERF_RECORD_GROUP = 2, +enum perf_counter_record_format { + PERF_RECORD_IP = 1U << 0, + PERF_RECORD_TID = 1U << 1, + PERF_RECORD_TIME = 1U << 2, + PERF_RECORD_ADDR = 1U << 3, + PERF_RECORD_GROUP = 1U << 4, + PERF_RECORD_CALLCHAIN = 1U << 5, }; -A record_type value of PERF_RECORD_IRQ will record the instruction -pointer (IP) at which the interrupt occurred. A record_type value of -PERF_RECORD_GROUP will record the event_config and counter value of -all of the other counters in the group, and should only be used on a -group leader (see below). Currently these two values are mutually -exclusive, but record_type will become a bit-mask in future and -support other values. - -A sampling counter has an event queue, into which an event is placed -on each interrupt. A read() on a sampling counter will read the next -event from the event queue. If the queue is empty, the read() will -either block or return an EAGAIN error, depending on whether the fd -has been set to non-blocking mode or not. +Such (and other) events will be recorded in a ring-buffer, which is +available to user-space using mmap() (see below). The 'disabled' bit specifies whether the counter starts out disabled or enabled. If it is initially disabled, it can be enabled by ioctl @@ -206,6 +227,13 @@ The 'exclude_user', 'exclude_kernel' and 'exclude_hv' bits provide a way to request that counting of events be restricted to times when the CPU is in user, kernel and/or hypervisor mode. +The 'mmap' and 'munmap' bits allow recording of PROT_EXEC mmap/munmap +operations, these can be used to relate userspace IP addresses to actual +code, even after the mapping (or even the whole process) is gone, +these events are recorded in the ring-buffer (see below). + +The 'comm' bit allows tracking of process comm data on process creation. +This too is recorded in the ring-buffer (see below). The 'pid' parameter to the perf_counter_open() system call allows the counter to be specific to a task: @@ -250,6 +278,138 @@ can be meaningfully compared, added, divided (to get ratios), etc., with each other, since they have counted events for the same set of executed instructions. + +Like stated, asynchronous events, like counter overflow or PROT_EXEC mmap +tracking are logged into a ring-buffer. This ring-buffer is created and +accessed through mmap(). + +The mmap size should be 1+2^n pages, where the first page is a meta-data page +(struct perf_counter_mmap_page) that contains various bits of information such +as where the ring-buffer head is. + +/* + * Structure of the page that can be mapped via mmap + */ +struct perf_counter_mmap_page { + __u32 version; /* version number of this structure */ + __u32 compat_version; /* lowest version this is compat with */ + + /* + * Bits needed to read the hw counters in user-space. + * + * u32 seq; + * s64 count; + * + * do { + * seq = pc->lock; + * + * barrier() + * if (pc->index) { + * count = pmc_read(pc->index - 1); + * count += pc->offset; + * } else + * goto regular_read; + * + * barrier(); + * } while (pc->lock != seq); + * + * NOTE: for obvious reason this only works on self-monitoring + * processes. + */ + __u32 lock; /* seqlock for synchronization */ + __u32 index; /* hardware counter identifier */ + __s64 offset; /* add to hardware counter value */ + + /* + * Control data for the mmap() data buffer. + * + * User-space reading this value should issue an rmb(), on SMP capable + * platforms, after reading this value -- see perf_counter_wakeup(). + */ + __u32 data_head; /* head in the data section */ +}; + +NOTE: the hw-counter userspace bits are arch specific and are currently only + implemented on powerpc. + +The following 2^n pages are the ring-buffer which contains events of the form: + +#define PERF_EVENT_MISC_KERNEL (1 << 0) +#define PERF_EVENT_MISC_USER (1 << 1) +#define PERF_EVENT_MISC_OVERFLOW (1 << 2) + +struct perf_event_header { + __u32 type; + __u16 misc; + __u16 size; +}; + +enum perf_event_type { + + /* + * The MMAP events record the PROT_EXEC mappings so that we can + * correlate userspace IPs to code. They have the following structure: + * + * struct { + * struct perf_event_header header; + * + * u32 pid, tid; + * u64 addr; + * u64 len; + * u64 pgoff; + * char filename[]; + * }; + */ + PERF_EVENT_MMAP = 1, + PERF_EVENT_MUNMAP = 2, + + /* + * struct { + * struct perf_event_header header; + * + * u32 pid, tid; + * char comm[]; + * }; + */ + PERF_EVENT_COMM = 3, + + /* + * When header.misc & PERF_EVENT_MISC_OVERFLOW the event_type field + * will be PERF_RECORD_* + * + * struct { + * struct perf_event_header header; + * + * { u64 ip; } && PERF_RECORD_IP + * { u32 pid, tid; } && PERF_RECORD_TID + * { u64 time; } && PERF_RECORD_TIME + * { u64 addr; } && PERF_RECORD_ADDR + * + * { u64 nr; + * { u64 event, val; } cnt[nr]; } && PERF_RECORD_GROUP + * + * { u16 nr, + * hv, + * kernel, + * user; + * u64 ips[nr]; } && PERF_RECORD_CALLCHAIN + * }; + */ +}; + +NOTE: PERF_RECORD_CALLCHAIN is arch specific and currently only implemented + on x86. + +Notification of new events is possible through poll()/select()/epoll() and +fcntl() managing signals. + +Normally a notification is generated for every page filled, however one can +additionally set perf_counter_hw_event.wakeup_events to generate one every +so many counter overflow events. + +Future work will include a splice() interface to the ring-buffer. + + Counters can be enabled and disabled in two ways: via ioctl and via prctl. When a counter is disabled, it doesn't count or generate events but does continue to exist and maintain its count value. @@ -269,6 +429,12 @@ group other than the leader only affects that counter - disabling an non-leader stops that counter from counting but doesn't affect any other counter. +Additionally, non-inherited overflow counters can use + + ioctl(fd, PERF_COUNTER_IOC_REFRESH, nr); + +to enable a counter for 'nr' events, after which it gets disabled again. + A process can enable or disable all the counter groups that are attached to it, using prctl: -- cgit v1.2.3 From e0202f56a82cd1170c6f1c520db669431cf26ddc Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Fri, 1 May 2009 16:51:44 +0200 Subject: perf_counter tools: fix x86 syscall numbers Signed-off-by: Ingo Molnar --- Documentation/perf_counter/builtin-record.c | 4 ++-- Documentation/perf_counter/builtin-stat.c | 4 ++-- Documentation/perf_counter/builtin-top.c | 4 ++-- 3 files changed, 6 insertions(+), 6 deletions(-) diff --git a/Documentation/perf_counter/builtin-record.c b/Documentation/perf_counter/builtin-record.c index 4a50abf843e..9cff266fb61 100644 --- a/Documentation/perf_counter/builtin-record.c +++ b/Documentation/perf_counter/builtin-record.c @@ -57,13 +57,13 @@ #define asmlinkage #ifdef __x86_64__ -#define __NR_perf_counter_open 295 +#define __NR_perf_counter_open 298 #define rmb() asm volatile("lfence" ::: "memory") #define cpu_relax() asm volatile("rep; nop" ::: "memory"); #endif #ifdef __i386__ -#define __NR_perf_counter_open 333 +#define __NR_perf_counter_open 336 #define rmb() asm volatile("lfence" ::: "memory") #define cpu_relax() asm volatile("rep; nop" ::: "memory"); #endif diff --git a/Documentation/perf_counter/builtin-stat.c b/Documentation/perf_counter/builtin-stat.c index 1fde12762ca..9fbc66173de 100644 --- a/Documentation/perf_counter/builtin-stat.c +++ b/Documentation/perf_counter/builtin-stat.c @@ -108,13 +108,13 @@ #define asmlinkage #ifdef __x86_64__ -#define __NR_perf_counter_open 295 +#define __NR_perf_counter_open 298 #define rmb() asm volatile("lfence" ::: "memory") #define cpu_relax() asm volatile("rep; nop" ::: "memory"); #endif #ifdef __i386__ -#define __NR_perf_counter_open 333 +#define __NR_perf_counter_open 336 #define rmb() asm volatile("lfence" ::: "memory") #define cpu_relax() asm volatile("rep; nop" ::: "memory"); #endif diff --git a/Documentation/perf_counter/builtin-top.c b/Documentation/perf_counter/builtin-top.c index 8d28864a20c..b6d989e7b19 100644 --- a/Documentation/perf_counter/builtin-top.c +++ b/Documentation/perf_counter/builtin-top.c @@ -89,13 +89,13 @@ #define asmlinkage #ifdef __x86_64__ -#define __NR_perf_counter_open 295 +#define __NR_perf_counter_open 298 #define rmb() asm volatile("lfence" ::: "memory") #define cpu_relax() asm volatile("rep; nop" ::: "memory"); #endif #ifdef __i386__ -#define __NR_perf_counter_open 333 +#define __NR_perf_counter_open 336 #define rmb() asm volatile("lfence" ::: "memory") #define cpu_relax() asm volatile("rep; nop" ::: "memory"); #endif -- cgit v1.2.3 From 3666932bf2212a8fa77e344c5d946e86787bdbbe Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Fri, 1 May 2009 17:37:51 +0200 Subject: perf_counter tools: remove build generated files These files are generated during the build process. No need to have them in the git repository. Signed-off-by: Thomas Gleixner --- Documentation/perf_counter/PERF-BUILD-OPTIONS | 4 ---- Documentation/perf_counter/PERF-CFLAGS | 1 - Documentation/perf_counter/PERF-VERSION-FILE | 1 - 3 files changed, 6 deletions(-) delete mode 100644 Documentation/perf_counter/PERF-BUILD-OPTIONS delete mode 100644 Documentation/perf_counter/PERF-CFLAGS delete mode 100644 Documentation/perf_counter/PERF-VERSION-FILE diff --git a/Documentation/perf_counter/PERF-BUILD-OPTIONS b/Documentation/perf_counter/PERF-BUILD-OPTIONS deleted file mode 100644 index 46d8d6ceb2f..00000000000 --- a/Documentation/perf_counter/PERF-BUILD-OPTIONS +++ /dev/null @@ -1,4 +0,0 @@ -SHELL_PATH='/bin/sh' -TAR='tar' -NO_CURL='' -NO_PERL='' diff --git a/Documentation/perf_counter/PERF-CFLAGS b/Documentation/perf_counter/PERF-CFLAGS deleted file mode 100644 index f24906ca688..00000000000 --- a/Documentation/perf_counter/PERF-CFLAGS +++ /dev/null @@ -1 +0,0 @@ --g -O2 -Wall -DSHA1_HEADER='' : /home/mingo/bin:libexec/perf-core:share/perf-core/templates:/home/mingo diff --git a/Documentation/perf_counter/PERF-VERSION-FILE b/Documentation/perf_counter/PERF-VERSION-FILE deleted file mode 100644 index 328e244c0c8..00000000000 --- a/Documentation/perf_counter/PERF-VERSION-FILE +++ /dev/null @@ -1 +0,0 @@ -PERF_VERSION = 0.0.1.PERF -- cgit v1.2.3 From 6eda5838bc5771578986429cde4a0870e1e5f5e1 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Fri, 1 May 2009 18:29:57 +0200 Subject: perfcounter tools: move common defines ... to local header file No change, move of duplicated stuff only. Signed-off-by: Thomas Gleixner --- Documentation/perf_counter/Makefile | 1 + Documentation/perf_counter/builtin-record.c | 64 ++--------------------------- Documentation/perf_counter/builtin-stat.c | 59 +------------------------- Documentation/perf_counter/builtin-top.c | 63 +--------------------------- Documentation/perf_counter/perf.h | 64 +++++++++++++++++++++++++++++ 5 files changed, 70 insertions(+), 181 deletions(-) create mode 100644 Documentation/perf_counter/perf.h diff --git a/Documentation/perf_counter/Makefile b/Documentation/perf_counter/Makefile index 877cf5dedb5..481e4c26cd4 100644 --- a/Documentation/perf_counter/Makefile +++ b/Documentation/perf_counter/Makefile @@ -287,6 +287,7 @@ export PERL_PATH LIB_FILE=libperf.a LIB_H += ../../include/linux/perf_counter.h +LIB_H += perf.h LIB_H += util/levenshtein.h LIB_H += util/parse-options.h LIB_H += util/quote.h diff --git a/Documentation/perf_counter/builtin-record.c b/Documentation/perf_counter/builtin-record.c index 9cff266fb61..59f1d87f41e 100644 --- a/Documentation/perf_counter/builtin-record.c +++ b/Documentation/perf_counter/builtin-record.c @@ -1,6 +1,7 @@ -#define _GNU_SOURCE +#include "util/util.h" + #include #include #include @@ -32,66 +33,7 @@ #include "../../include/linux/perf_counter.h" - -/* - * prctl(PR_TASK_PERF_COUNTERS_DISABLE) will (cheaply) disable all - * counters in the current task. - */ -#define PR_TASK_PERF_COUNTERS_DISABLE 31 -#define PR_TASK_PERF_COUNTERS_ENABLE 32 - -#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0])) - -#define rdclock() \ -({ \ - struct timespec ts; \ - \ - clock_gettime(CLOCK_MONOTONIC, &ts); \ - ts.tv_sec * 1000000000ULL + ts.tv_nsec; \ -}) - -/* - * Pick up some kernel type conventions: - */ -#define __user -#define asmlinkage - -#ifdef __x86_64__ -#define __NR_perf_counter_open 298 -#define rmb() asm volatile("lfence" ::: "memory") -#define cpu_relax() asm volatile("rep; nop" ::: "memory"); -#endif - -#ifdef __i386__ -#define __NR_perf_counter_open 336 -#define rmb() asm volatile("lfence" ::: "memory") -#define cpu_relax() asm volatile("rep; nop" ::: "memory"); -#endif - -#ifdef __powerpc__ -#define __NR_perf_counter_open 319 -#define rmb() asm volatile ("sync" ::: "memory") -#define cpu_relax() asm volatile ("" ::: "memory"); -#endif - -#define unlikely(x) __builtin_expect(!!(x), 0) -#define min(x, y) ({ \ - typeof(x) _min1 = (x); \ - typeof(y) _min2 = (y); \ - (void) (&_min1 == &_min2); \ - _min1 < _min2 ? _min1 : _min2; }) - -extern asmlinkage int sys_perf_counter_open( - struct perf_counter_hw_event *hw_event_uptr __user, - pid_t pid, - int cpu, - int group_fd, - unsigned long flags); - -#define MAX_COUNTERS 64 -#define MAX_NR_CPUS 256 - -#define EID(type, id) (((__u64)(type) << PERF_COUNTER_TYPE_SHIFT) | (id)) +#include "perf.h" static int nr_counters = 0; static __u64 event_id[MAX_COUNTERS] = { }; diff --git a/Documentation/perf_counter/builtin-stat.c b/Documentation/perf_counter/builtin-stat.c index 9fbc66173de..6de38d25688 100644 --- a/Documentation/perf_counter/builtin-stat.c +++ b/Documentation/perf_counter/builtin-stat.c @@ -85,64 +85,7 @@ #include "../../include/linux/perf_counter.h" - -/* - * prctl(PR_TASK_PERF_COUNTERS_DISABLE) will (cheaply) disable all - * counters in the current task. - */ -#define PR_TASK_PERF_COUNTERS_DISABLE 31 -#define PR_TASK_PERF_COUNTERS_ENABLE 32 - -#define rdclock() \ -({ \ - struct timespec ts; \ - \ - clock_gettime(CLOCK_MONOTONIC, &ts); \ - ts.tv_sec * 1000000000ULL + ts.tv_nsec; \ -}) - -/* - * Pick up some kernel type conventions: - */ -#define __user -#define asmlinkage - -#ifdef __x86_64__ -#define __NR_perf_counter_open 298 -#define rmb() asm volatile("lfence" ::: "memory") -#define cpu_relax() asm volatile("rep; nop" ::: "memory"); -#endif - -#ifdef __i386__ -#define __NR_perf_counter_open 336 -#define rmb() asm volatile("lfence" ::: "memory") -#define cpu_relax() asm volatile("rep; nop" ::: "memory"); -#endif - -#ifdef __powerpc__ -#define __NR_perf_counter_open 319 -#define rmb() asm volatile ("sync" ::: "memory") -#define cpu_relax() asm volatile ("" ::: "memory"); -#endif - -#define unlikely(x) __builtin_expect(!!(x), 0) -#define min(x, y) ({ \ - typeof(x) _min1 = (x); \ - typeof(y) _min2 = (y); \ - (void) (&_min1 == &_min2); \ - _min1 < _min2 ? _min1 : _min2; }) - -extern asmlinkage int sys_perf_counter_open( - struct perf_counter_hw_event *hw_event_uptr __user, - pid_t pid, - int cpu, - int group_fd, - unsigned long flags); - -#define MAX_COUNTERS 64 -#define MAX_NR_CPUS 256 - -#define EID(type, id) (((__u64)(type) << PERF_COUNTER_TYPE_SHIFT) | (id)) +#include "perf.h" static int system_wide = 0; diff --git a/Documentation/perf_counter/builtin-top.c b/Documentation/perf_counter/builtin-top.c index b6d989e7b19..cd6f61d7341 100644 --- a/Documentation/perf_counter/builtin-top.c +++ b/Documentation/perf_counter/builtin-top.c @@ -66,68 +66,7 @@ #include "../../include/linux/perf_counter.h" - -/* - * prctl(PR_TASK_PERF_COUNTERS_DISABLE) will (cheaply) disable all - * counters in the current task. - */ -#define PR_TASK_PERF_COUNTERS_DISABLE 31 -#define PR_TASK_PERF_COUNTERS_ENABLE 32 - -#define rdclock() \ -({ \ - struct timespec ts; \ - \ - clock_gettime(CLOCK_MONOTONIC, &ts); \ - ts.tv_sec * 1000000000ULL + ts.tv_nsec; \ -}) - -/* - * Pick up some kernel type conventions: - */ -#define __user -#define asmlinkage - -#ifdef __x86_64__ -#define __NR_perf_counter_open 298 -#define rmb() asm volatile("lfence" ::: "memory") -#define cpu_relax() asm volatile("rep; nop" ::: "memory"); -#endif - -#ifdef __i386__ -#define __NR_perf_counter_open 336 -#define rmb() asm volatile("lfence" ::: "memory") -#define cpu_relax() asm volatile("rep; nop" ::: "memory"); -#endif - -#ifdef __powerpc__ -#define __NR_perf_counter_open 319 -#define rmb() asm volatile ("sync" ::: "memory") -#define cpu_relax() asm volatile ("" ::: "memory"); -#endif - -#define unlikely(x) __builtin_expect(!!(x), 0) -#define min(x, y) ({ \ - typeof(x) _min1 = (x); \ - typeof(y) _min2 = (y); \ - (void) (&_min1 == &_min2); \ - _min1 < _min2 ? _min1 : _min2; }) - -asmlinkage int sys_perf_counter_open( - struct perf_counter_hw_event *hw_event_uptr __user, - pid_t pid, - int cpu, - int group_fd, - unsigned long flags) -{ - return syscall( - __NR_perf_counter_open, hw_event_uptr, pid, cpu, group_fd, flags); -} - -#define MAX_COUNTERS 64 -#define MAX_NR_CPUS 256 - -#define EID(type, id) (((__u64)(type) << PERF_COUNTER_TYPE_SHIFT) | (id)) +#include "perf.h" static int system_wide = 0; diff --git a/Documentation/perf_counter/perf.h b/Documentation/perf_counter/perf.h new file mode 100644 index 00000000000..391fcc73148 --- /dev/null +++ b/Documentation/perf_counter/perf.h @@ -0,0 +1,64 @@ +#ifndef _PERF_PERF_H +#define _PERF_PERF_H + +/* + * prctl(PR_TASK_PERF_COUNTERS_DISABLE) will (cheaply) disable all + * counters in the current task. + */ +#define PR_TASK_PERF_COUNTERS_DISABLE 31 +#define PR_TASK_PERF_COUNTERS_ENABLE 32 + +#define rdclock() \ +({ \ + struct timespec ts; \ + \ + clock_gettime(CLOCK_MONOTONIC, &ts); \ + ts.tv_sec * 1000000000ULL + ts.tv_nsec; \ +}) + +/* + * Pick up some kernel type conventions: + */ +#define __user +#define asmlinkage + +#ifdef __x86_64__ +#define __NR_perf_counter_open 298 +#define rmb() asm volatile("lfence" ::: "memory") +#define cpu_relax() asm volatile("rep; nop" ::: "memory"); +#endif + +#ifdef __i386__ +#define __NR_perf_counter_open 336 +#define rmb() asm volatile("lfence" ::: "memory") +#define cpu_relax() asm volatile("rep; nop" ::: "memory"); +#endif + +#ifdef __powerpc__ +#define __NR_perf_counter_open 319 +#define rmb() asm volatile ("sync" ::: "memory") +#define cpu_relax() asm volatile ("" ::: "memory"); +#endif + +#define unlikely(x) __builtin_expect(!!(x), 0) +#define min(x, y) ({ \ + typeof(x) _min1 = (x); \ + typeof(y) _min2 = (y); \ + (void) (&_min1 == &_min2); \ + _min1 < _min2 ? _min1 : _min2; }) + +static inline int +sys_perf_counter_open(struct perf_counter_hw_event *hw_event_uptr, + pid_t pid, int cpu, int group_fd, + unsigned long flags) +{ + return syscall(__NR_perf_counter_open, hw_event_uptr, pid, cpu, + group_fd, flags); +} + +#define MAX_COUNTERS 64 +#define MAX_NR_CPUS 256 + +#define EID(type, id) (((__u64)(type) << PERF_COUNTER_TYPE_SHIFT) | (id)) + +#endif -- cgit v1.2.3 From a92e70237c8abbd1c3241133bf72f2cd07c90eae Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Fri, 1 May 2009 18:39:47 +0200 Subject: perfcounter tools: make rdclock an inline function Signed-off-by: Thomas Gleixner --- Documentation/perf_counter/perf.h | 18 +++++++++++------- 1 file changed, 11 insertions(+), 7 deletions(-) diff --git a/Documentation/perf_counter/perf.h b/Documentation/perf_counter/perf.h index 391fcc73148..fb142307228 100644 --- a/Documentation/perf_counter/perf.h +++ b/Documentation/perf_counter/perf.h @@ -8,13 +8,17 @@ #define PR_TASK_PERF_COUNTERS_DISABLE 31 #define PR_TASK_PERF_COUNTERS_ENABLE 32 -#define rdclock() \ -({ \ - struct timespec ts; \ - \ - clock_gettime(CLOCK_MONOTONIC, &ts); \ - ts.tv_sec * 1000000000ULL + ts.tv_nsec; \ -}) +#ifndef NSEC_PER_SEC +# define NSEC_PER_SEC 1000000000ULL +#endif + +static inline unsigned long long rdclock(void) +{ + struct timespec ts; + + clock_gettime(CLOCK_MONOTONIC, &ts); + return ts.tv_sec * 1000000000ULL + ts.tv_nsec; +} /* * Pick up some kernel type conventions: -- cgit v1.2.3 From 7bd5469cd938eec6a76b3135e6becd9b5e096e98 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Fri, 1 May 2009 18:42:47 +0200 Subject: perfcounter tools: fix pointer mismatch Neither process_options nor execvp take an const **char as argument. Signed-off-by: Thomas Gleixner --- Documentation/perf_counter/builtin-record.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Documentation/perf_counter/builtin-record.c b/Documentation/perf_counter/builtin-record.c index 59f1d87f41e..3a3deb3fbbc 100644 --- a/Documentation/perf_counter/builtin-record.c +++ b/Documentation/perf_counter/builtin-record.c @@ -332,7 +332,7 @@ static void sigchld_handler(int sig) done = 1; } -int cmd_record(int argc, const char **argv) +int cmd_record(int argc, char **argv) { struct pollfd event_array[MAX_NR_CPUS * MAX_COUNTERS]; struct mmap_data mmap_array[MAX_NR_CPUS][MAX_COUNTERS]; -- cgit v1.2.3 From 4ba67c1d48aeedcc31630bb40b6179fc7d360f90 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Fri, 1 May 2009 18:48:06 +0200 Subject: perfcounter tools: get the syscall number from arch/*/include/asm/unistd.h Avoid further confusion during development Signed-off-by: Thomas Gleixner --- Documentation/perf_counter/perf.h | 12 +++--------- 1 file changed, 3 insertions(+), 9 deletions(-) diff --git a/Documentation/perf_counter/perf.h b/Documentation/perf_counter/perf.h index fb142307228..6fa3656399f 100644 --- a/Documentation/perf_counter/perf.h +++ b/Documentation/perf_counter/perf.h @@ -26,20 +26,14 @@ static inline unsigned long long rdclock(void) #define __user #define asmlinkage -#ifdef __x86_64__ -#define __NR_perf_counter_open 298 -#define rmb() asm volatile("lfence" ::: "memory") -#define cpu_relax() asm volatile("rep; nop" ::: "memory"); -#endif - -#ifdef __i386__ -#define __NR_perf_counter_open 336 +#if defined(__x86_64__) || defined(__i386__) +#include "../../arch/x86/include/asm/unistd.h" #define rmb() asm volatile("lfence" ::: "memory") #define cpu_relax() asm volatile("rep; nop" ::: "memory"); #endif #ifdef __powerpc__ -#define __NR_perf_counter_open 319 +#include "../../arch/powerpc/include/asm/unistd.h" #define rmb() asm volatile ("sync" ::: "memory") #define cpu_relax() asm volatile ("" ::: "memory"); #endif -- cgit v1.2.3 From dab6f6a3401f596fe934f41fc5da3f401adfdfb1 Mon Sep 17 00:00:00 2001 From: Mike Galbraith Date: Sat, 2 May 2009 08:02:36 +0200 Subject: perf_counter tools: fix build error ctype.h crawled out of the bit bucket :) Signed-off-by: Ingo Molnar --- Documentation/perf_counter/builtin-record.c | 1 - 1 file changed, 1 deletion(-) diff --git a/Documentation/perf_counter/builtin-record.c b/Documentation/perf_counter/builtin-record.c index 3a3deb3fbbc..ddfdcf86fb2 100644 --- a/Documentation/perf_counter/builtin-record.c +++ b/Documentation/perf_counter/builtin-record.c @@ -15,7 +15,6 @@ #include #include #include -#include #include #include #include -- cgit v1.2.3 From b82914ce33146186d554b0f5c41e4e13693614ce Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Mon, 4 May 2009 18:54:32 +0200 Subject: perf_counter: round-robin per-CPU counters too This used to be unstable when we had the rq->lock dependencies, but now that they are that of the past we can turn on percpu counter RR too. [ Impact: handle counter over-commit for per-CPU counters too ] LKML-Reference: Signed-off-by: Ingo Molnar --- kernel/perf_counter.c | 10 +++------- 1 file changed, 3 insertions(+), 7 deletions(-) diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c index 8660ae57953..b9679c36bcc 100644 --- a/kernel/perf_counter.c +++ b/kernel/perf_counter.c @@ -1069,18 +1069,14 @@ void perf_counter_task_tick(struct task_struct *curr, int cpu) { struct perf_cpu_context *cpuctx = &per_cpu(perf_cpu_context, cpu); struct perf_counter_context *ctx = &curr->perf_counter_ctx; - const int rotate_percpu = 0; - if (rotate_percpu) - perf_counter_cpu_sched_out(cpuctx); + perf_counter_cpu_sched_out(cpuctx); perf_counter_task_sched_out(curr, cpu); - if (rotate_percpu) - rotate_ctx(&cpuctx->ctx); + rotate_ctx(&cpuctx->ctx); rotate_ctx(ctx); - if (rotate_percpu) - perf_counter_cpu_sched_in(cpuctx, cpu); + perf_counter_cpu_sched_in(cpuctx, cpu); perf_counter_task_sched_in(curr, cpu); } -- cgit v1.2.3 From ba77813a2a22d631fe5bc0bf1ec0d11350544b70 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Mon, 4 May 2009 18:47:44 +0200 Subject: perf_counter: x86: fixup nmi_watchdog vs perf_counter boo-boo Invert the atomic_inc_not_zero() test so that we will indeed detect the first activation. Also rename the global num_counters, since its easy to confuse with x86_pmu.num_counters. [ Impact: fix non-working perfcounters on AMD CPUs, cleanup ] Signed-off-by: Peter Zijlstra LKML-Reference: <1241455664.7620.4938.camel@twins> Signed-off-by: Ingo Molnar --- arch/x86/kernel/cpu/perf_counter.c | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/arch/x86/kernel/cpu/perf_counter.c b/arch/x86/kernel/cpu/perf_counter.c index d4c0cc9d326..196b58f0444 100644 --- a/arch/x86/kernel/cpu/perf_counter.c +++ b/arch/x86/kernel/cpu/perf_counter.c @@ -171,7 +171,7 @@ again: return new_raw_count; } -static atomic_t num_counters; +static atomic_t active_counters; static DEFINE_MUTEX(pmc_reserve_mutex); static bool reserve_pmc_hardware(void) @@ -224,7 +224,7 @@ static void release_pmc_hardware(void) static void hw_perf_counter_destroy(struct perf_counter *counter) { - if (atomic_dec_and_mutex_lock(&num_counters, &pmc_reserve_mutex)) { + if (atomic_dec_and_mutex_lock(&active_counters, &pmc_reserve_mutex)) { release_pmc_hardware(); mutex_unlock(&pmc_reserve_mutex); } @@ -248,12 +248,12 @@ static int __hw_perf_counter_init(struct perf_counter *counter) return -ENODEV; err = 0; - if (atomic_inc_not_zero(&num_counters)) { + if (!atomic_inc_not_zero(&active_counters)) { mutex_lock(&pmc_reserve_mutex); - if (atomic_read(&num_counters) == 0 && !reserve_pmc_hardware()) + if (atomic_read(&active_counters) == 0 && !reserve_pmc_hardware()) err = -EBUSY; else - atomic_inc(&num_counters); + atomic_inc(&active_counters); mutex_unlock(&pmc_reserve_mutex); } if (err) @@ -280,7 +280,7 @@ static int __hw_perf_counter_init(struct perf_counter *counter) if (capable(CAP_SYS_ADMIN) && hw_event->nmi) hwc->nmi = 1; - hwc->irq_period = hw_event->irq_period; + hwc->irq_period = hw_event->irq_period; if ((s64)hwc->irq_period <= 0 || hwc->irq_period > x86_pmu.max_period) hwc->irq_period = x86_pmu.max_period; @@ -871,7 +871,7 @@ perf_counter_nmi_handler(struct notifier_block *self, struct pt_regs *regs; int ret; - if (!atomic_read(&num_counters)) + if (!atomic_read(&active_counters)) return NOTIFY_DONE; switch (cmd) { -- cgit v1.2.3 From 0d905bca23aca5c86a10ee101bcd3b1abbd40b25 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Mon, 4 May 2009 19:13:30 +0200 Subject: perf_counter: initialize the per-cpu context earlier percpu scheduling for perfcounters wants to take the context lock, but that lock first needs to be initialized. Currently it is an early_initcall() - but that is too late, the task tick runs much sooner than that. Call it explicitly from the scheduler init sequence instead. [ Impact: fix access-before-init crash ] LKML-Reference: Signed-off-by: Ingo Molnar --- include/linux/perf_counter.h | 5 ++++- kernel/perf_counter.c | 5 +---- kernel/sched.c | 5 ++++- 3 files changed, 9 insertions(+), 6 deletions(-) diff --git a/include/linux/perf_counter.h b/include/linux/perf_counter.h index f776851f8c4..a356fa69796 100644 --- a/include/linux/perf_counter.h +++ b/include/linux/perf_counter.h @@ -573,6 +573,8 @@ extern struct perf_callchain_entry *perf_callchain(struct pt_regs *regs); extern int sysctl_perf_counter_priv; +extern void perf_counter_init(void); + #else static inline void perf_counter_task_sched_in(struct task_struct *task, int cpu) { } @@ -600,9 +602,10 @@ perf_counter_mmap(unsigned long addr, unsigned long len, static inline void perf_counter_munmap(unsigned long addr, unsigned long len, - unsigned long pgoff, struct file *file) { } + unsigned long pgoff, struct file *file) { } static inline void perf_counter_comm(struct task_struct *tsk) { } +static inline void perf_counter_init(void) { } #endif #endif /* __KERNEL__ */ diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c index b9679c36bcc..fcdafa234a5 100644 --- a/kernel/perf_counter.c +++ b/kernel/perf_counter.c @@ -3265,15 +3265,12 @@ static struct notifier_block __cpuinitdata perf_cpu_nb = { .notifier_call = perf_cpu_notify, }; -static int __init perf_counter_init(void) +void __init perf_counter_init(void) { perf_cpu_notify(&perf_cpu_nb, (unsigned long)CPU_UP_PREPARE, (void *)(long)smp_processor_id()); register_cpu_notifier(&perf_cpu_nb); - - return 0; } -early_initcall(perf_counter_init); static ssize_t perf_show_reserve_percpu(struct sysdev_class *class, char *buf) { diff --git a/kernel/sched.c b/kernel/sched.c index 2f600e30dcf..a728976a3a6 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -39,6 +39,7 @@ #include #include #include +#include #include #include #include @@ -8996,7 +8997,7 @@ void __init sched_init(void) * 1024) and two child groups A0 and A1 (of weight 1024 each), * then A0's share of the cpu resource is: * - * A0's bandwidth = 1024 / (10*1024 + 1024 + 1024) = 8.33% + * A0's bandwidth = 1024 / (10*1024 + 1024 + 1024) = 8.33% * * We achieve this by letting init_task_group's tasks sit * directly in rq->cfs (i.e init_task_group->se[] = NULL). @@ -9097,6 +9098,8 @@ void __init sched_init(void) alloc_bootmem_cpumask_var(&cpu_isolated_map); #endif /* SMP */ + perf_counter_init(); + scheduler_running = 1; } -- cgit v1.2.3 From 1dce8d99b85aba6eddb8b8260baea944922e6fe7 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Mon, 4 May 2009 19:23:18 +0200 Subject: perf_counter: convert perf_resource_mutex to a spinlock Now percpu counters can be initialized very early. But the init sequence uses mutex_lock(). Fortunately, perf_resource_mutex should be a spinlock anyway, so convert it. [ Impact: fix crash due to early init mutex use ] LKML-Reference: Signed-off-by: Ingo Molnar --- kernel/perf_counter.c | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c index fcdafa234a5..5f86a1156c9 100644 --- a/kernel/perf_counter.c +++ b/kernel/perf_counter.c @@ -46,9 +46,9 @@ static atomic_t nr_comm_tracking __read_mostly; int sysctl_perf_counter_priv __read_mostly; /* do we need to be privileged */ /* - * Mutex for (sysadmin-configurable) counter reservations: + * Lock for (sysadmin-configurable) counter reservations: */ -static DEFINE_MUTEX(perf_resource_mutex); +static DEFINE_SPINLOCK(perf_resource_lock); /* * Architecture provided APIs - weak aliases: @@ -3207,9 +3207,9 @@ static void __cpuinit perf_counter_init_cpu(int cpu) cpuctx = &per_cpu(perf_cpu_context, cpu); __perf_counter_init_context(&cpuctx->ctx, NULL); - mutex_lock(&perf_resource_mutex); + spin_lock(&perf_resource_lock); cpuctx->max_pertask = perf_max_counters - perf_reserved_percpu; - mutex_unlock(&perf_resource_mutex); + spin_unlock(&perf_resource_lock); hw_perf_counter_setup(cpu); } @@ -3292,7 +3292,7 @@ perf_set_reserve_percpu(struct sysdev_class *class, if (val > perf_max_counters) return -EINVAL; - mutex_lock(&perf_resource_mutex); + spin_lock(&perf_resource_lock); perf_reserved_percpu = val; for_each_online_cpu(cpu) { cpuctx = &per_cpu(perf_cpu_context, cpu); @@ -3302,7 +3302,7 @@ perf_set_reserve_percpu(struct sysdev_class *class, cpuctx->max_pertask = mpt; spin_unlock_irq(&cpuctx->ctx.lock); } - mutex_unlock(&perf_resource_mutex); + spin_unlock(&perf_resource_lock); return count; } @@ -3324,9 +3324,9 @@ perf_set_overcommit(struct sysdev_class *class, const char *buf, size_t count) if (val > 1) return -EINVAL; - mutex_lock(&perf_resource_mutex); + spin_lock(&perf_resource_lock); perf_overcommit = val; - mutex_unlock(&perf_resource_mutex); + spin_unlock(&perf_resource_lock); return count; } -- cgit v1.2.3 From 066d7dea32c9bffe6decc0abe465627656cdd84e Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Mon, 4 May 2009 19:04:09 +0200 Subject: perf_counter: fix fixed-purpose counter support on v2 Intel-PERFMON Fixed-purpose counters stopped working in a simple 'perf stat ls' run: cache references cache misses Due to: ef7b3e0: perf_counter, x86: remove vendor check in fixed_mode_idx() Which made x86_pmu.num_counters_fixed matter: if it's nonzero, the fixed-purpose counters are utilized. But on v2 perfmon this field is not set (despite there being fixed-purpose PMCs). So add a quirk to set the number of fixed-purpose counters to at least three. [ Impact: add quirk for three fixed-purpose counters on certain Intel CPUs ] Cc: Robert Richter Cc: Paul Mackerras Cc: Peter Zijlstra LKML-Reference: <1241002046-8832-28-git-send-email-robert.richter@amd.com> Signed-off-by: Ingo Molnar --- arch/x86/kernel/cpu/perf_counter.c | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/arch/x86/kernel/cpu/perf_counter.c b/arch/x86/kernel/cpu/perf_counter.c index 196b58f0444..a6878b0798e 100644 --- a/arch/x86/kernel/cpu/perf_counter.c +++ b/arch/x86/kernel/cpu/perf_counter.c @@ -962,7 +962,13 @@ static int intel_pmu_init(void) x86_pmu = intel_pmu; x86_pmu.version = version; x86_pmu.num_counters = eax.split.num_counters; - x86_pmu.num_counters_fixed = edx.split.num_counters_fixed; + + /* + * Quirk: v2 perfmon does not report fixed-purpose counters, so + * assume at least 3 counters: + */ + x86_pmu.num_counters_fixed = max((int)edx.split.num_counters_fixed, 3); + x86_pmu.counter_bits = eax.split.bit_width; x86_pmu.counter_mask = (1ULL << eax.split.bit_width) - 1; -- cgit v1.2.3 From c66de4a5be7913247bd83d79168f8e4420c9cfbc Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Tue, 5 May 2009 17:50:22 +0200 Subject: perf_counter: uncouple data_head updates from wakeups Keep data_head up-to-date irrespective of notifications. This fixes the case where you disable a counter and don't get a notification for the last few pending events, and it also allows polling usage. [ Impact: increase precision of perfcounter mmap-ed fields ] Suggested-by: Corey Ashford Signed-off-by: Peter Zijlstra Cc: Paul Mackerras LKML-Reference: <20090505155436.925084300@chello.nl> Signed-off-by: Ingo Molnar --- include/linux/perf_counter.h | 4 +++- kernel/perf_counter.c | 20 +++++++++----------- 2 files changed, 12 insertions(+), 12 deletions(-) diff --git a/include/linux/perf_counter.h b/include/linux/perf_counter.h index a356fa69796..17b63105f2a 100644 --- a/include/linux/perf_counter.h +++ b/include/linux/perf_counter.h @@ -362,9 +362,11 @@ struct perf_mmap_data { atomic_t head; /* write position */ atomic_t events; /* event limit */ - atomic_t wakeup_head; /* completed head */ + atomic_t done_head; /* completed head */ atomic_t lock; /* concurrent writes */ + atomic_t wakeup; /* needs a wakeup */ + struct perf_counter_mmap_page *user_page; void *data_pages[0]; }; diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c index 5f86a1156c9..ba5e921e1f3 100644 --- a/kernel/perf_counter.c +++ b/kernel/perf_counter.c @@ -1696,7 +1696,6 @@ struct perf_output_handle { struct perf_mmap_data *data; unsigned int offset; unsigned int head; - int wakeup; int nmi; int overflow; int locked; @@ -1752,8 +1751,7 @@ static void perf_output_unlock(struct perf_output_handle *handle) struct perf_mmap_data *data = handle->data; int head, cpu; - if (handle->wakeup) - data->wakeup_head = data->head; + data->done_head = data->head; if (!handle->locked) goto out; @@ -1764,13 +1762,11 @@ again: * before we publish the new head, matched by a rmb() in userspace when * reading this position. */ - while ((head = atomic_xchg(&data->wakeup_head, 0))) { + while ((head = atomic_xchg(&data->done_head, 0))) data->user_page->data_head = head; - handle->wakeup = 1; - } /* - * NMI can happen here, which means we can miss a wakeup_head update. + * NMI can happen here, which means we can miss a done_head update. */ cpu = atomic_xchg(&data->lock, 0); @@ -1779,7 +1775,7 @@ again: /* * Therefore we have to validate we did not indeed do so. */ - if (unlikely(atomic_read(&data->wakeup_head))) { + if (unlikely(atomic_read(&data->done_head))) { /* * Since we had it locked, we can lock it again. */ @@ -1789,7 +1785,7 @@ again: goto again; } - if (handle->wakeup) + if (atomic_xchg(&data->wakeup, 0)) perf_output_wakeup(handle); out: local_irq_restore(handle->flags); @@ -1824,7 +1820,9 @@ static int perf_output_begin(struct perf_output_handle *handle, handle->offset = offset; handle->head = head; - handle->wakeup = (offset >> PAGE_SHIFT) != (head >> PAGE_SHIFT); + + if ((offset >> PAGE_SHIFT) != (head >> PAGE_SHIFT)) + atomic_set(&data->wakeup, 1); return 0; @@ -1882,7 +1880,7 @@ static void perf_output_end(struct perf_output_handle *handle) int events = atomic_inc_return(&data->events); if (events >= wakeup_events) { atomic_sub(wakeup_events, &data->events); - handle->wakeup = 1; + atomic_set(&data->wakeup, 1); } } -- cgit v1.2.3 From 6de6a7b95705b859b61430fa3afa1403034eb3e6 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Tue, 5 May 2009 17:50:23 +0200 Subject: perf_counter: add ioctl(PERF_COUNTER_IOC_RESET) Provide a way to reset an existing counter - this eases PAPI libraries around perfcounters. Similar to read() it doesn't collapse pending child counters. [ Impact: new perfcounter fd ioctl method to reset counters ] Suggested-by: Corey Ashford Signed-off-by: Peter Zijlstra Cc: Paul Mackerras LKML-Reference: <20090505155437.022272933@chello.nl> Signed-off-by: Ingo Molnar --- include/linux/perf_counter.h | 1 + kernel/perf_counter.c | 8 ++++++++ 2 files changed, 9 insertions(+) diff --git a/include/linux/perf_counter.h b/include/linux/perf_counter.h index 17b63105f2a..0fcbf34a4f7 100644 --- a/include/linux/perf_counter.h +++ b/include/linux/perf_counter.h @@ -160,6 +160,7 @@ struct perf_counter_hw_event { #define PERF_COUNTER_IOC_ENABLE _IO ('$', 0) #define PERF_COUNTER_IOC_DISABLE _IO ('$', 1) #define PERF_COUNTER_IOC_REFRESH _IOW('$', 2, u32) +#define PERF_COUNTER_IOC_RESET _IO ('$', 3) /* * Structure of the page that can be mapped via mmap diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c index ba5e921e1f3..6e6834e0587 100644 --- a/kernel/perf_counter.c +++ b/kernel/perf_counter.c @@ -1288,6 +1288,11 @@ static unsigned int perf_poll(struct file *file, poll_table *wait) return events; } +static void perf_counter_reset(struct perf_counter *counter) +{ + atomic_set(&counter->count, 0); +} + static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { struct perf_counter *counter = file->private_data; @@ -1303,6 +1308,9 @@ static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg) case PERF_COUNTER_IOC_REFRESH: perf_counter_refresh(counter, arg); break; + case PERF_COUNTER_IOC_RESET: + perf_counter_reset(counter); + break; default: err = -ENOTTY; } -- cgit v1.2.3 From c5078f78b455fbf67ea71442c7e7ca8acf9ff095 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Tue, 5 May 2009 17:50:24 +0200 Subject: perf_counter: provide an mlock threshold Provide a threshold to relax the mlock accounting, increasing usability. Each counter gets perf_counter_mlock_kb for free. [ Impact: allow more mmap buffering ] Signed-off-by: Peter Zijlstra Cc: Paul Mackerras Cc: Corey Ashford LKML-Reference: <20090505155437.112113632@chello.nl> Signed-off-by: Ingo Molnar --- include/linux/perf_counter.h | 2 ++ kernel/perf_counter.c | 15 +++++++++++---- kernel/sysctl.c | 8 ++++++++ 3 files changed, 21 insertions(+), 4 deletions(-) diff --git a/include/linux/perf_counter.h b/include/linux/perf_counter.h index 0fcbf34a4f7..00081d84169 100644 --- a/include/linux/perf_counter.h +++ b/include/linux/perf_counter.h @@ -358,6 +358,7 @@ struct file; struct perf_mmap_data { struct rcu_head rcu_head; int nr_pages; /* nr of data pages */ + int nr_locked; /* nr pages mlocked */ atomic_t poll; /* POLL_ for wakeups */ atomic_t head; /* write position */ @@ -575,6 +576,7 @@ struct perf_callchain_entry { extern struct perf_callchain_entry *perf_callchain(struct pt_regs *regs); extern int sysctl_perf_counter_priv; +extern int sysctl_perf_counter_mlock; extern void perf_counter_init(void); diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c index 6e6834e0587..2d134273830 100644 --- a/kernel/perf_counter.c +++ b/kernel/perf_counter.c @@ -44,6 +44,7 @@ static atomic_t nr_munmap_tracking __read_mostly; static atomic_t nr_comm_tracking __read_mostly; int sysctl_perf_counter_priv __read_mostly; /* do we need to be privileged */ +int sysctl_perf_counter_mlock __read_mostly = 128; /* 'free' kb per counter */ /* * Lock for (sysadmin-configurable) counter reservations: @@ -1461,7 +1462,7 @@ static void perf_mmap_close(struct vm_area_struct *vma) if (atomic_dec_and_mutex_lock(&counter->mmap_count, &counter->mmap_mutex)) { - vma->vm_mm->locked_vm -= counter->data->nr_pages + 1; + vma->vm_mm->locked_vm -= counter->data->nr_locked; perf_mmap_data_free(counter); mutex_unlock(&counter->mmap_mutex); } @@ -1480,6 +1481,7 @@ static int perf_mmap(struct file *file, struct vm_area_struct *vma) unsigned long nr_pages; unsigned long locked, lock_limit; int ret = 0; + long extra; if (!(vma->vm_flags & VM_SHARED) || (vma->vm_flags & VM_WRITE)) return -EINVAL; @@ -1507,8 +1509,12 @@ static int perf_mmap(struct file *file, struct vm_area_struct *vma) goto unlock; } - locked = vma->vm_mm->locked_vm; - locked += nr_pages + 1; + extra = nr_pages /* + 1 only account the data pages */; + extra -= sysctl_perf_counter_mlock >> (PAGE_SHIFT - 10); + if (extra < 0) + extra = 0; + + locked = vma->vm_mm->locked_vm + extra; lock_limit = current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur; lock_limit >>= PAGE_SHIFT; @@ -1524,7 +1530,8 @@ static int perf_mmap(struct file *file, struct vm_area_struct *vma) goto unlock; atomic_set(&counter->mmap_count, 1); - vma->vm_mm->locked_vm += nr_pages + 1; + vma->vm_mm->locked_vm += extra; + counter->data->nr_locked = extra; unlock: mutex_unlock(&counter->mmap_mutex); diff --git a/kernel/sysctl.c b/kernel/sysctl.c index 8203d70928d..3b05c2b088d 100644 --- a/kernel/sysctl.c +++ b/kernel/sysctl.c @@ -920,6 +920,14 @@ static struct ctl_table kern_table[] = { .mode = 0644, .proc_handler = &proc_dointvec, }, + { + .ctl_name = CTL_UNNUMBERED, + .procname = "perf_counter_mlock_kb", + .data = &sysctl_perf_counter_mlock, + .maxlen = sizeof(sysctl_perf_counter_mlock), + .mode = 0644, + .proc_handler = &proc_dointvec, + }, #endif /* * NOTE: do not add new entries to this table unless you have read -- cgit v1.2.3 From 22c1558e51c210787c6cf75d8905246fc91ec030 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Tue, 5 May 2009 17:50:25 +0200 Subject: perf_counter: fix the output lock Use -1 instead of 0 as unlocked, since 0 is a valid cpu number. ( This is not an issue right now but will be once we allow multiple counters to output to the same mmap area. ) [ Impact: prepare code for multi-counter profile output ] Signed-off-by: Peter Zijlstra Cc: Paul Mackerras Cc: Corey Ashford LKML-Reference: <20090505155437.232686598@chello.nl> Signed-off-by: Ingo Molnar --- kernel/perf_counter.c | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c index 2d134273830..c881afef997 100644 --- a/kernel/perf_counter.c +++ b/kernel/perf_counter.c @@ -1409,6 +1409,7 @@ static int perf_mmap_data_alloc(struct perf_counter *counter, int nr_pages) } data->nr_pages = nr_pages; + atomic_set(&data->lock, -1); rcu_assign_pointer(counter->data, data); @@ -1755,7 +1756,7 @@ static void perf_output_lock(struct perf_output_handle *handle) if (in_nmi() && atomic_read(&data->lock) == cpu) return; - while (atomic_cmpxchg(&data->lock, 0, cpu) != 0) + while (atomic_cmpxchg(&data->lock, -1, cpu) != -1) cpu_relax(); handle->locked = 1; @@ -1784,7 +1785,7 @@ again: * NMI can happen here, which means we can miss a done_head update. */ - cpu = atomic_xchg(&data->lock, 0); + cpu = atomic_xchg(&data->lock, -1); WARN_ON_ONCE(cpu != smp_processor_id()); /* @@ -1794,7 +1795,7 @@ again: /* * Since we had it locked, we can lock it again. */ - while (atomic_cmpxchg(&data->lock, 0, cpu) != 0) + while (atomic_cmpxchg(&data->lock, -1, cpu) != -1) cpu_relax(); goto again; -- cgit v1.2.3 From 2023b359214bbc5bad31571cf50d7fb83b535c0a Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Tue, 5 May 2009 17:50:26 +0200 Subject: perf_counter: inheritable sample counters Redirect the output to the parent counter and put in some sanity checks. [ Impact: new perfcounter feature - inherited sampling counters ] Signed-off-by: Peter Zijlstra Cc: Paul Mackerras Cc: Corey Ashford LKML-Reference: <20090505155437.331556171@chello.nl> Signed-off-by: Ingo Molnar --- kernel/perf_counter.c | 32 ++++++++++++++++++++++++++++++-- 1 file changed, 30 insertions(+), 2 deletions(-) diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c index c881afef997..60e55f0b48f 100644 --- a/kernel/perf_counter.c +++ b/kernel/perf_counter.c @@ -738,10 +738,18 @@ static void perf_counter_enable(struct perf_counter *counter) spin_unlock_irq(&ctx->lock); } -static void perf_counter_refresh(struct perf_counter *counter, int refresh) +static int perf_counter_refresh(struct perf_counter *counter, int refresh) { + /* + * not supported on inherited counters + */ + if (counter->hw_event.inherit) + return -EINVAL; + atomic_add(refresh, &counter->event_limit); perf_counter_enable(counter); + + return 0; } /* @@ -1307,7 +1315,7 @@ static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg) perf_counter_disable_family(counter); break; case PERF_COUNTER_IOC_REFRESH: - perf_counter_refresh(counter, arg); + err = perf_counter_refresh(counter, arg); break; case PERF_COUNTER_IOC_RESET: perf_counter_reset(counter); @@ -1814,6 +1822,12 @@ static int perf_output_begin(struct perf_output_handle *handle, struct perf_mmap_data *data; unsigned int offset, head; + /* + * For inherited counters we send all the output towards the parent. + */ + if (counter->parent) + counter = counter->parent; + rcu_read_lock(); data = rcu_dereference(counter->data); if (!data) @@ -1995,6 +2009,9 @@ static void perf_counter_output(struct perf_counter *counter, if (record_type & PERF_RECORD_ADDR) perf_output_put(&handle, addr); + /* + * XXX PERF_RECORD_GROUP vs inherited counters seems difficult. + */ if (record_type & PERF_RECORD_GROUP) { struct perf_counter *leader, *sub; u64 nr = counter->nr_siblings; @@ -2281,6 +2298,11 @@ int perf_counter_overflow(struct perf_counter *counter, int events = atomic_read(&counter->event_limit); int ret = 0; + /* + * XXX event_limit might not quite work as expected on inherited + * counters + */ + counter->pending_kill = POLL_IN; if (events && atomic_dec_and_test(&counter->event_limit)) { ret = 1; @@ -2801,6 +2823,12 @@ perf_counter_alloc(struct perf_counter_hw_event *hw_event, pmu = NULL; + /* + * we currently do not support PERF_RECORD_GROUP on inherited counters + */ + if (hw_event->inherit && (hw_event->record_type & PERF_RECORD_GROUP)) + goto done; + if (perf_event_raw(hw_event)) { pmu = hw_perf_counter_init(counter); goto done; -- cgit v1.2.3 From 16c8a10932aef971292c9570eb5f60b5d4e83ed2 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Tue, 5 May 2009 17:50:27 +0200 Subject: perf_counter: tools: update the tools to support process and inherited counters "perf record": - per task counter - inherit switch - nmi switch "perf report": - userspace/kernel filter "perf stat": - userspace/kernel filter Signed-off-by: Peter Zijlstra Cc: Paul Mackerras Cc: Corey Ashford LKML-Reference: <20090505155437.389163017@chello.nl> Signed-off-by: Ingo Molnar --- Documentation/perf_counter/builtin-record.c | 155 +++++++++++++++++----------- Documentation/perf_counter/builtin-stat.c | 24 ++++- Documentation/perf_counter/perf-report.cc | 27 ++++- 3 files changed, 140 insertions(+), 66 deletions(-) diff --git a/Documentation/perf_counter/builtin-record.c b/Documentation/perf_counter/builtin-record.c index ddfdcf86fb2..5f5e6df0260 100644 --- a/Documentation/perf_counter/builtin-record.c +++ b/Documentation/perf_counter/builtin-record.c @@ -45,7 +45,10 @@ static unsigned int mmap_pages = 16; static int output; static char *output_name = "output.perf"; static int group = 0; -static unsigned int realtime_prio = 0; +static unsigned int realtime_prio = 0; +static int system_wide = 0; +static int inherit = 1; +static int nmi = 1; const unsigned int default_count[] = { 1000000, @@ -167,7 +170,7 @@ static void display_events_help(void) static void display_help(void) { printf( - "Usage: perf-record []\n" + "Usage: perf-record [] \n" "perf-record Options (up to %d event types can be specified at once):\n\n", MAX_COUNTERS); @@ -178,12 +181,13 @@ static void display_help(void) " -m pages --mmap_pages= # number of mmap data pages\n" " -o file --output= # output file\n" " -r prio --realtime= # use RT prio\n" + " -s --system # system wide profiling\n" ); exit(0); } -static void process_options(int argc, char *argv[]) +static void process_options(int argc, const char *argv[]) { int error = 0, counter; @@ -196,9 +200,12 @@ static void process_options(int argc, char *argv[]) {"mmap_pages", required_argument, NULL, 'm'}, {"output", required_argument, NULL, 'o'}, {"realtime", required_argument, NULL, 'r'}, + {"system", no_argument, NULL, 's'}, + {"inherit", no_argument, NULL, 'i'}, + {"nmi", no_argument, NULL, 'n'}, {NULL, 0, NULL, 0 } }; - int c = getopt_long(argc, argv, "+:c:e:m:o:r:", + int c = getopt_long(argc, argv, "+:c:e:m:o:r:sin", long_options, &option_index); if (c == -1) break; @@ -209,9 +216,16 @@ static void process_options(int argc, char *argv[]) case 'm': mmap_pages = atoi(optarg); break; case 'o': output_name = strdup(optarg); break; case 'r': realtime_prio = atoi(optarg); break; + case 's': system_wide ^= 1; break; + case 'i': inherit ^= 1; break; + case 'n': nmi ^= 1; break; default: error = 1; break; } } + + if (argc - optind == 0) + error = 1; + if (error) display_help(); @@ -325,18 +339,82 @@ static void mmap_read(struct mmap_data *md) static volatile int done = 0; -static void sigchld_handler(int sig) +static void sig_handler(int sig) { - if (sig == SIGCHLD) - done = 1; + done = 1; } -int cmd_record(int argc, char **argv) +static struct pollfd event_array[MAX_NR_CPUS * MAX_COUNTERS]; +static struct mmap_data mmap_array[MAX_NR_CPUS][MAX_COUNTERS]; + +static int nr_poll; +static int nr_cpu; + +static void open_counters(int cpu) { - struct pollfd event_array[MAX_NR_CPUS * MAX_COUNTERS]; - struct mmap_data mmap_array[MAX_NR_CPUS][MAX_COUNTERS]; struct perf_counter_hw_event hw_event; - int i, counter, group_fd, nr_poll = 0; + int counter, group_fd; + int track = 1; + pid_t pid = -1; + + if (cpu < 0) + pid = 0; + + group_fd = -1; + for (counter = 0; counter < nr_counters; counter++) { + + memset(&hw_event, 0, sizeof(hw_event)); + hw_event.config = event_id[counter]; + hw_event.irq_period = event_count[counter]; + hw_event.record_type = PERF_RECORD_IP | PERF_RECORD_TID; + hw_event.nmi = nmi; + hw_event.mmap = track; + hw_event.comm = track; + hw_event.inherit = (cpu < 0) && inherit; + + track = 0; // only the first counter needs these + + fd[nr_cpu][counter] = + sys_perf_counter_open(&hw_event, pid, cpu, group_fd, 0); + + if (fd[nr_cpu][counter] < 0) { + int err = errno; + printf("kerneltop error: syscall returned with %d (%s)\n", + fd[nr_cpu][counter], strerror(err)); + if (err == EPERM) + printf("Are you root?\n"); + exit(-1); + } + assert(fd[nr_cpu][counter] >= 0); + fcntl(fd[nr_cpu][counter], F_SETFL, O_NONBLOCK); + + /* + * First counter acts as the group leader: + */ + if (group && group_fd == -1) + group_fd = fd[nr_cpu][counter]; + + event_array[nr_poll].fd = fd[nr_cpu][counter]; + event_array[nr_poll].events = POLLIN; + nr_poll++; + + mmap_array[nr_cpu][counter].counter = counter; + mmap_array[nr_cpu][counter].prev = 0; + mmap_array[nr_cpu][counter].mask = mmap_pages*page_size - 1; + mmap_array[nr_cpu][counter].base = mmap(NULL, (mmap_pages+1)*page_size, + PROT_READ, MAP_SHARED, fd[nr_cpu][counter], 0); + if (mmap_array[nr_cpu][counter].base == MAP_FAILED) { + printf("kerneltop error: failed to mmap with %d (%s)\n", + errno, strerror(errno)); + exit(-1); + } + } + nr_cpu++; +} + +int cmd_record(int argc, const char **argv) +{ + int i, counter; pid_t pid; int ret; @@ -357,54 +435,13 @@ int cmd_record(int argc, char **argv) argc -= optind; argv += optind; - for (i = 0; i < nr_cpus; i++) { - group_fd = -1; - for (counter = 0; counter < nr_counters; counter++) { - - memset(&hw_event, 0, sizeof(hw_event)); - hw_event.config = event_id[counter]; - hw_event.irq_period = event_count[counter]; - hw_event.record_type = PERF_RECORD_IP | PERF_RECORD_TID; - hw_event.nmi = 1; - hw_event.mmap = 1; - hw_event.comm = 1; - - fd[i][counter] = sys_perf_counter_open(&hw_event, -1, i, group_fd, 0); - if (fd[i][counter] < 0) { - int err = errno; - printf("kerneltop error: syscall returned with %d (%s)\n", - fd[i][counter], strerror(err)); - if (err == EPERM) - printf("Are you root?\n"); - exit(-1); - } - assert(fd[i][counter] >= 0); - fcntl(fd[i][counter], F_SETFL, O_NONBLOCK); - - /* - * First counter acts as the group leader: - */ - if (group && group_fd == -1) - group_fd = fd[i][counter]; - - event_array[nr_poll].fd = fd[i][counter]; - event_array[nr_poll].events = POLLIN; - nr_poll++; - - mmap_array[i][counter].counter = counter; - mmap_array[i][counter].prev = 0; - mmap_array[i][counter].mask = mmap_pages*page_size - 1; - mmap_array[i][counter].base = mmap(NULL, (mmap_pages+1)*page_size, - PROT_READ, MAP_SHARED, fd[i][counter], 0); - if (mmap_array[i][counter].base == MAP_FAILED) { - printf("kerneltop error: failed to mmap with %d (%s)\n", - errno, strerror(errno)); - exit(-1); - } - } - } + if (!system_wide) + open_counters(-1); + else for (i = 0; i < nr_cpus; i++) + open_counters(i); - signal(SIGCHLD, sigchld_handler); + signal(SIGCHLD, sig_handler); + signal(SIGINT, sig_handler); pid = fork(); if (pid < 0) @@ -434,7 +471,7 @@ int cmd_record(int argc, char **argv) while (!done) { int hits = events; - for (i = 0; i < nr_cpus; i++) { + for (i = 0; i < nr_cpu; i++) { for (counter = 0; counter < nr_counters; counter++) mmap_read(&mmap_array[i][counter]); } diff --git a/Documentation/perf_counter/builtin-stat.c b/Documentation/perf_counter/builtin-stat.c index 6de38d25688..e2fa117eab5 100644 --- a/Documentation/perf_counter/builtin-stat.c +++ b/Documentation/perf_counter/builtin-stat.c @@ -87,6 +87,9 @@ #include "perf.h" +#define EVENT_MASK_KERNEL 1 +#define EVENT_MASK_USER 2 + static int system_wide = 0; static int nr_counters = 0; @@ -104,6 +107,7 @@ static __u64 event_id[MAX_COUNTERS] = { static int default_interval = 100000; static int event_count[MAX_COUNTERS]; static int fd[MAX_NR_CPUS][MAX_COUNTERS]; +static int event_mask[MAX_COUNTERS]; static int tid = -1; static int profile_cpu = -1; @@ -258,12 +262,23 @@ static __u64 match_event_symbols(char *str) __u64 config, id; int type; unsigned int i; + char mask_str[4]; if (sscanf(str, "r%llx", &config) == 1) return config | PERF_COUNTER_RAW_MASK; - if (sscanf(str, "%d:%llu", &type, &id) == 2) - return EID(type, id); + switch (sscanf(str, "%d:%llu:%2s", &type, &id, mask_str)) { + case 3: + if (strchr(mask_str, 'u')) + event_mask[nr_counters] |= EVENT_MASK_USER; + if (strchr(mask_str, 'k')) + event_mask[nr_counters] |= EVENT_MASK_KERNEL; + case 2: + return EID(type, id); + + default: + break; + } for (i = 0; i < ARRAY_SIZE(event_symbols); i++) { if (!strncmp(str, event_symbols[i].symbol, @@ -313,6 +328,11 @@ static void create_perfstat_counter(int counter) hw_event.config = event_id[counter]; hw_event.record_type = 0; hw_event.nmi = 0; + hw_event.exclude_kernel = event_mask[counter] & EVENT_MASK_KERNEL; + hw_event.exclude_user = event_mask[counter] & EVENT_MASK_USER; + +printf("exclude: %d\n", event_mask[counter]); + if (scale) hw_event.read_format = PERF_FORMAT_TOTAL_TIME_ENABLED | PERF_FORMAT_TOTAL_TIME_RUNNING; diff --git a/Documentation/perf_counter/perf-report.cc b/Documentation/perf_counter/perf-report.cc index 911d7f3e7a6..8855107fe6b 100644 --- a/Documentation/perf_counter/perf-report.cc +++ b/Documentation/perf_counter/perf-report.cc @@ -33,8 +33,13 @@ #include +#define SHOW_KERNEL 1 +#define SHOW_USER 2 +#define SHOW_HV 4 + static char const *input_name = "output.perf"; static int input; +static int show_mask = SHOW_KERNEL | SHOW_USER | SHOW_HV; static unsigned long page_size; static unsigned long mmap_window = 32; @@ -359,15 +364,21 @@ static void process_options(int argc, char *argv[]) /** Options for getopt */ static struct option long_options[] = { {"input", required_argument, NULL, 'i'}, + {"no-user", no_argument, NULL, 'u'}, + {"no-kernel", no_argument, NULL, 'k'}, + {"no-hv", no_argument, NULL, 'h'}, {NULL, 0, NULL, 0 } }; - int c = getopt_long(argc, argv, "+:i:", + int c = getopt_long(argc, argv, "+:i:kuh", long_options, &option_index); if (c == -1) break; switch (c) { case 'i': input_name = strdup(optarg); break; + case 'k': show_mask &= ~SHOW_KERNEL; break; + case 'u': show_mask &= ~SHOW_USER; break; + case 'h': show_mask &= ~SHOW_HV; break; default: error = 1; break; } } @@ -443,22 +454,28 @@ more: if (event->header.misc & PERF_EVENT_MISC_OVERFLOW) { std::string comm, sym, level; + int show = 0; char output[1024]; if (event->header.misc & PERF_EVENT_MISC_KERNEL) { + show |= SHOW_KERNEL; level = " [k] "; sym = resolve_kernel_symbol(event->ip.ip); } else if (event->header.misc & PERF_EVENT_MISC_USER) { + show |= SHOW_USER; level = " [.] "; sym = resolve_user_symbol(event->ip.pid, event->ip.ip); } else { + show |= SHOW_HV; level = " [H] "; } - comm = resolve_comm(event->ip.pid); - snprintf(output, sizeof(output), "%16s %s %s", - comm.c_str(), level.c_str(), sym.c_str()); - hist[output]++; + if (show & show_mask) { + comm = resolve_comm(event->ip.pid); + snprintf(output, sizeof(output), "%16s %s %s", + comm.c_str(), level.c_str(), sym.c_str()); + hist[output]++; + } total++; -- cgit v1.2.3 From 7fc23a5380797012e92a9633169440f2f4a21253 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Fri, 8 May 2009 18:52:21 +0200 Subject: perf_counter: optimize perf_counter_task_tick() perf_counter_task_tick() does way too much work to find out there's nothing to do. Provide an easy short-circuit for the normal case where there are no counters on the system. [ Impact: micro-optimization ] Signed-off-by: Peter Zijlstra Cc: Paul Mackerras Cc: Corey Ashford LKML-Reference: <20090508170028.750619201@chello.nl> Signed-off-by: Ingo Molnar --- kernel/perf_counter.c | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-) diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c index 60e55f0b48f..fdb0d242127 100644 --- a/kernel/perf_counter.c +++ b/kernel/perf_counter.c @@ -39,6 +39,7 @@ int perf_max_counters __read_mostly = 1; static int perf_reserved_percpu __read_mostly; static int perf_overcommit __read_mostly = 1; +static atomic_t nr_counters __read_mostly; static atomic_t nr_mmap_tracking __read_mostly; static atomic_t nr_munmap_tracking __read_mostly; static atomic_t nr_comm_tracking __read_mostly; @@ -1076,8 +1077,14 @@ static void rotate_ctx(struct perf_counter_context *ctx) void perf_counter_task_tick(struct task_struct *curr, int cpu) { - struct perf_cpu_context *cpuctx = &per_cpu(perf_cpu_context, cpu); - struct perf_counter_context *ctx = &curr->perf_counter_ctx; + struct perf_cpu_context *cpuctx; + struct perf_counter_context *ctx; + + if (!atomic_read(&nr_counters)) + return; + + cpuctx = &per_cpu(perf_cpu_context, cpu); + ctx = &curr->perf_counter_ctx; perf_counter_cpu_sched_out(cpuctx); perf_counter_task_sched_out(curr, cpu); @@ -1197,6 +1204,7 @@ static void free_counter(struct perf_counter *counter) { perf_pending_sync(counter); + atomic_dec(&nr_counters); if (counter->hw_event.mmap) atomic_dec(&nr_mmap_tracking); if (counter->hw_event.munmap) @@ -2861,6 +2869,7 @@ done: counter->pmu = pmu; + atomic_inc(&nr_counters); if (counter->hw_event.mmap) atomic_inc(&nr_mmap_tracking); if (counter->hw_event.munmap) -- cgit v1.2.3 From 3df5edad87a998273aa5a9a8c728c05d855ad00e Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Fri, 8 May 2009 18:52:22 +0200 Subject: perf_counter: rework ioctl()s Corey noticed that ioctl()s on grouped counters didn't work on the whole group. This extends the ioctl() interface to take a second argument that is interpreted as a flags field. We then provide PERF_IOC_FLAG_GROUP to toggle the behaviour. Having this flag gives the greatest flexibility, allowing you to individually enable/disable/reset counters in a group, or all together. [ Impact: fix group counter enable/disable semantics ] Reported-by: Corey Ashford Signed-off-by: Peter Zijlstra Cc: Paul Mackerras LKML-Reference: <20090508170028.837558214@chello.nl> Signed-off-by: Ingo Molnar --- include/linux/perf_counter.h | 10 +++-- kernel/perf_counter.c | 104 ++++++++++++++++++++++++------------------- 2 files changed, 65 insertions(+), 49 deletions(-) diff --git a/include/linux/perf_counter.h b/include/linux/perf_counter.h index 00081d84169..88f863ec274 100644 --- a/include/linux/perf_counter.h +++ b/include/linux/perf_counter.h @@ -157,10 +157,14 @@ struct perf_counter_hw_event { /* * Ioctls that can be done on a perf counter fd: */ -#define PERF_COUNTER_IOC_ENABLE _IO ('$', 0) -#define PERF_COUNTER_IOC_DISABLE _IO ('$', 1) +#define PERF_COUNTER_IOC_ENABLE _IOW('$', 0, u32) +#define PERF_COUNTER_IOC_DISABLE _IOW('$', 1, u32) #define PERF_COUNTER_IOC_REFRESH _IOW('$', 2, u32) -#define PERF_COUNTER_IOC_RESET _IO ('$', 3) +#define PERF_COUNTER_IOC_RESET _IOW('$', 3, u32) + +enum perf_counter_ioc_flags { + PERF_IOC_FLAG_GROUP = 1U << 0, +}; /* * Structure of the page that can be mapped via mmap diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c index fdb0d242127..f4883f1f47e 100644 --- a/kernel/perf_counter.c +++ b/kernel/perf_counter.c @@ -82,7 +82,7 @@ list_add_counter(struct perf_counter *counter, struct perf_counter_context *ctx) * add it straight to the context's counter list, or to the group * leader's sibling list: */ - if (counter->group_leader == counter) + if (group_leader == counter) list_add_tail(&counter->list_entry, &ctx->counter_list); else { list_add_tail(&counter->list_entry, &group_leader->sibling_list); @@ -385,24 +385,6 @@ static void perf_counter_disable(struct perf_counter *counter) spin_unlock_irq(&ctx->lock); } -/* - * Disable a counter and all its children. - */ -static void perf_counter_disable_family(struct perf_counter *counter) -{ - struct perf_counter *child; - - perf_counter_disable(counter); - - /* - * Lock the mutex to protect the list of children - */ - mutex_lock(&counter->mutex); - list_for_each_entry(child, &counter->child_list, child_list) - perf_counter_disable(child); - mutex_unlock(&counter->mutex); -} - static int counter_sched_in(struct perf_counter *counter, struct perf_cpu_context *cpuctx, @@ -753,24 +735,6 @@ static int perf_counter_refresh(struct perf_counter *counter, int refresh) return 0; } -/* - * Enable a counter and all its children. - */ -static void perf_counter_enable_family(struct perf_counter *counter) -{ - struct perf_counter *child; - - perf_counter_enable(counter); - - /* - * Lock the mutex to protect the list of children - */ - mutex_lock(&counter->mutex); - list_for_each_entry(child, &counter->child_list, child_list) - perf_counter_enable(child); - mutex_unlock(&counter->mutex); -} - void __perf_counter_sched_out(struct perf_counter_context *ctx, struct perf_cpu_context *cpuctx) { @@ -1307,31 +1271,79 @@ static unsigned int perf_poll(struct file *file, poll_table *wait) static void perf_counter_reset(struct perf_counter *counter) { + (void)perf_counter_read(counter); atomic_set(&counter->count, 0); + perf_counter_update_userpage(counter); +} + +static void perf_counter_for_each_sibling(struct perf_counter *counter, + void (*func)(struct perf_counter *)) +{ + struct perf_counter_context *ctx = counter->ctx; + struct perf_counter *sibling; + + spin_lock_irq(&ctx->lock); + counter = counter->group_leader; + + func(counter); + list_for_each_entry(sibling, &counter->sibling_list, list_entry) + func(sibling); + spin_unlock_irq(&ctx->lock); +} + +static void perf_counter_for_each_child(struct perf_counter *counter, + void (*func)(struct perf_counter *)) +{ + struct perf_counter *child; + + mutex_lock(&counter->mutex); + func(counter); + list_for_each_entry(child, &counter->child_list, child_list) + func(child); + mutex_unlock(&counter->mutex); +} + +static void perf_counter_for_each(struct perf_counter *counter, + void (*func)(struct perf_counter *)) +{ + struct perf_counter *child; + + mutex_lock(&counter->mutex); + perf_counter_for_each_sibling(counter, func); + list_for_each_entry(child, &counter->child_list, child_list) + perf_counter_for_each_sibling(child, func); + mutex_unlock(&counter->mutex); } static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { struct perf_counter *counter = file->private_data; - int err = 0; + void (*func)(struct perf_counter *); + u32 flags = arg; switch (cmd) { case PERF_COUNTER_IOC_ENABLE: - perf_counter_enable_family(counter); + func = perf_counter_enable; break; case PERF_COUNTER_IOC_DISABLE: - perf_counter_disable_family(counter); - break; - case PERF_COUNTER_IOC_REFRESH: - err = perf_counter_refresh(counter, arg); + func = perf_counter_disable; break; case PERF_COUNTER_IOC_RESET: - perf_counter_reset(counter); + func = perf_counter_reset; break; + + case PERF_COUNTER_IOC_REFRESH: + return perf_counter_refresh(counter, arg); default: - err = -ENOTTY; + return -ENOTTY; } - return err; + + if (flags & PERF_IOC_FLAG_GROUP) + perf_counter_for_each(counter, func); + else + perf_counter_for_each_child(counter, func); + + return 0; } /* -- cgit v1.2.3 From a85f61abe11a46553c4562e74edb27ebc782aeb7 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Fri, 8 May 2009 18:52:23 +0200 Subject: perf_counter: add PERF_RECORD_CONFIG Much like CONFIG_RECORD_GROUP records the hw_event.config to identify the values, allow to record this for all counters. [ Impact: extend perfcounter output record format ] Signed-off-by: Peter Zijlstra Cc: Paul Mackerras Cc: Corey Ashford LKML-Reference: <20090508170028.923228280@chello.nl> Signed-off-by: Ingo Molnar --- include/linux/perf_counter.h | 2 ++ kernel/perf_counter.c | 8 ++++++++ 2 files changed, 10 insertions(+) diff --git a/include/linux/perf_counter.h b/include/linux/perf_counter.h index 88f863ec274..0e6303d36c6 100644 --- a/include/linux/perf_counter.h +++ b/include/linux/perf_counter.h @@ -104,6 +104,7 @@ enum perf_counter_record_format { PERF_RECORD_ADDR = 1U << 3, PERF_RECORD_GROUP = 1U << 4, PERF_RECORD_CALLCHAIN = 1U << 5, + PERF_RECORD_CONFIG = 1U << 6, }; /* @@ -258,6 +259,7 @@ enum perf_event_type { * { u32 pid, tid; } && PERF_RECORD_TID * { u64 time; } && PERF_RECORD_TIME * { u64 addr; } && PERF_RECORD_ADDR + * { u64 config; } && PERF_RECORD_CONFIG * * { u64 nr; * { u64 event, val; } cnt[nr]; } && PERF_RECORD_GROUP diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c index f4883f1f47e..c615f52aa40 100644 --- a/kernel/perf_counter.c +++ b/kernel/perf_counter.c @@ -1994,6 +1994,11 @@ static void perf_counter_output(struct perf_counter *counter, header.size += sizeof(u64); } + if (record_type & PERF_RECORD_CONFIG) { + header.type |= PERF_RECORD_CONFIG; + header.size += sizeof(u64); + } + if (record_type & PERF_RECORD_GROUP) { header.type |= PERF_RECORD_GROUP; header.size += sizeof(u64) + @@ -2029,6 +2034,9 @@ static void perf_counter_output(struct perf_counter *counter, if (record_type & PERF_RECORD_ADDR) perf_output_put(&handle, addr); + if (record_type & PERF_RECORD_CONFIG) + perf_output_put(&handle, counter->hw_event.config); + /* * XXX PERF_RECORD_GROUP vs inherited counters seems difficult. */ -- cgit v1.2.3 From f370e1e2f195ec1e6420e26fc83e0319595db578 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Fri, 8 May 2009 18:52:24 +0200 Subject: perf_counter: add PERF_RECORD_CPU Allow recording the CPU number the event was generated on. RFC: this leaves a u32 as reserved, should we fill in the node_id() there, or leave this open for future extention, as userspace can already easily do the cpu->node mapping if needed. [ Impact: extend perfcounter output record format ] Signed-off-by: Peter Zijlstra Cc: Paul Mackerras Cc: Corey Ashford LKML-Reference: <20090508170029.008627711@chello.nl> Signed-off-by: Ingo Molnar --- include/linux/perf_counter.h | 2 ++ kernel/perf_counter.c | 13 +++++++++++++ 2 files changed, 15 insertions(+) diff --git a/include/linux/perf_counter.h b/include/linux/perf_counter.h index 0e6303d36c6..614f921d616 100644 --- a/include/linux/perf_counter.h +++ b/include/linux/perf_counter.h @@ -105,6 +105,7 @@ enum perf_counter_record_format { PERF_RECORD_GROUP = 1U << 4, PERF_RECORD_CALLCHAIN = 1U << 5, PERF_RECORD_CONFIG = 1U << 6, + PERF_RECORD_CPU = 1U << 7, }; /* @@ -260,6 +261,7 @@ enum perf_event_type { * { u64 time; } && PERF_RECORD_TIME * { u64 addr; } && PERF_RECORD_ADDR * { u64 config; } && PERF_RECORD_CONFIG + * { u32 cpu, res; } && PERF_RECORD_CPU * * { u64 nr; * { u64 event, val; } cnt[nr]; } && PERF_RECORD_GROUP diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c index c615f52aa40..d850a1fb8d4 100644 --- a/kernel/perf_counter.c +++ b/kernel/perf_counter.c @@ -1956,6 +1956,9 @@ static void perf_counter_output(struct perf_counter *counter, struct perf_callchain_entry *callchain = NULL; int callchain_size = 0; u64 time; + struct { + u32 cpu, reserved; + } cpu_entry; header.type = 0; header.size = sizeof(header); @@ -1999,6 +2002,13 @@ static void perf_counter_output(struct perf_counter *counter, header.size += sizeof(u64); } + if (record_type & PERF_RECORD_CPU) { + header.type |= PERF_RECORD_CPU; + header.size += sizeof(cpu_entry); + + cpu_entry.cpu = raw_smp_processor_id(); + } + if (record_type & PERF_RECORD_GROUP) { header.type |= PERF_RECORD_GROUP; header.size += sizeof(u64) + @@ -2037,6 +2047,9 @@ static void perf_counter_output(struct perf_counter *counter, if (record_type & PERF_RECORD_CONFIG) perf_output_put(&handle, counter->hw_event.config); + if (record_type & PERF_RECORD_CPU) + perf_output_put(&handle, cpu_entry); + /* * XXX PERF_RECORD_GROUP vs inherited counters seems difficult. */ -- cgit v1.2.3 From f15b18d0755b3ee4b29991fc2fde535ee41df53c Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Sat, 9 May 2009 10:04:22 +0200 Subject: perf_counter tools: remove debug code from builtin-stat.c Signed-off-by: Ingo Molnar --- Documentation/perf_counter/builtin-stat.c | 2 -- 1 file changed, 2 deletions(-) diff --git a/Documentation/perf_counter/builtin-stat.c b/Documentation/perf_counter/builtin-stat.c index e2fa117eab5..cf575c305a6 100644 --- a/Documentation/perf_counter/builtin-stat.c +++ b/Documentation/perf_counter/builtin-stat.c @@ -331,8 +331,6 @@ static void create_perfstat_counter(int counter) hw_event.exclude_kernel = event_mask[counter] & EVENT_MASK_KERNEL; hw_event.exclude_user = event_mask[counter] & EVENT_MASK_USER; -printf("exclude: %d\n", event_mask[counter]); - if (scale) hw_event.read_format = PERF_FORMAT_TOTAL_TIME_ENABLED | PERF_FORMAT_TOTAL_TIME_RUNNING; -- cgit v1.2.3 From 82afae6016b672acb90ceb8e773bba0bd977d2a3 Mon Sep 17 00:00:00 2001 From: Erdem Aktas Date: Sun, 10 May 2009 02:13:19 -0400 Subject: perf_counter tools: fix buffer overwrite problem for perf top command There is a buffer overwrite problem in builtin-top.c line 526, When I tried to use ./perf top command, it was giving memory corruption problem. [ Impact: fix 'perf top' crash ] LKML-Reference: <3fee128b0905092313x608e65e0l7b1116d86914114f@mail.gmail.com> Signed-off-by: Ingo Molnar --- Documentation/perf_counter/builtin-top.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Documentation/perf_counter/builtin-top.c b/Documentation/perf_counter/builtin-top.c index cd6f61d7341..b1549dd2772 100644 --- a/Documentation/perf_counter/builtin-top.c +++ b/Documentation/perf_counter/builtin-top.c @@ -523,7 +523,7 @@ static int read_symbol(FILE *in, struct sym_entry *s) if (strstr(sym, "_text_start") || strstr(sym, "_text_end")) return 1; - s->sym = malloc(strlen(str)); + s->sym = malloc(strlen(str)+1); assert(s->sym); strcpy((char *)s->sym, str); -- cgit v1.2.3 From 8823392360dc4992f87bf4c623834d315f297493 Mon Sep 17 00:00:00 2001 From: Mike Galbraith Date: Sun, 10 May 2009 10:53:05 +0200 Subject: perf_counter, x86: clean up throttling printk s/PERFMON/perfcounters for perfcounter interrupt throttling warning. 'perfmon' is the CPU feature name that is Intel-only, while we do throttling in a generic way. [ Impact: cleanup ] Signed-off-by: Mike Galbraith Cc: Robert Richter Cc: Peter Zijlstra Cc: Paul Mackerras LKML-Reference: Signed-off-by: Ingo Molnar --- arch/x86/kernel/cpu/perf_counter.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/x86/kernel/cpu/perf_counter.c b/arch/x86/kernel/cpu/perf_counter.c index a6878b0798e..da27419923a 100644 --- a/arch/x86/kernel/cpu/perf_counter.c +++ b/arch/x86/kernel/cpu/perf_counter.c @@ -814,7 +814,7 @@ void perf_counter_unthrottle(void) cpuc = &__get_cpu_var(cpu_hw_counters); if (cpuc->interrupts >= PERFMON_MAX_INTERRUPTS) { if (printk_ratelimit()) - printk(KERN_WARNING "PERFMON: max interrupts exceeded!\n"); + printk(KERN_WARNING "perfcounters: max interrupts exceeded!\n"); hw_perf_restore(cpuc->throttle_ctrl); } cpuc->interrupts = 0; -- cgit v1.2.3 From 6751b71ea2c7ab8c0d65f01973a3fc8ea16992f4 Mon Sep 17 00:00:00 2001 From: Paul Mackerras Date: Mon, 11 May 2009 12:08:02 +1000 Subject: perf_counter: Put whole group on when enabling group leader Currently, if you have a group where the leader is disabled and there are siblings that are enabled, and then you enable the leader, we only put the leader on the PMU, and not its enabled siblings. This is incorrect, since the enabled group members should be all on or all off at any given point. This fixes it by adding a call to group_sched_in in __perf_counter_enable in the case where we're enabling a group leader. To avoid the need for a forward declaration this also moves group_sched_in up before __perf_counter_enable. The actual content of group_sched_in is unchanged by this patch. [ Impact: fix bug in counter enable code ] Signed-off-by: Paul Mackerras Cc: Peter Zijlstra Cc: Corey Ashford LKML-Reference: <18951.34946.451546.691693@drongo.ozlabs.ibm.com> Signed-off-by: Ingo Molnar --- kernel/perf_counter.c | 99 ++++++++++++++++++++++++++------------------------- 1 file changed, 51 insertions(+), 48 deletions(-) diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c index d850a1fb8d4..a5bdc93ac47 100644 --- a/kernel/perf_counter.c +++ b/kernel/perf_counter.c @@ -419,6 +419,54 @@ counter_sched_in(struct perf_counter *counter, return 0; } +static int +group_sched_in(struct perf_counter *group_counter, + struct perf_cpu_context *cpuctx, + struct perf_counter_context *ctx, + int cpu) +{ + struct perf_counter *counter, *partial_group; + int ret; + + if (group_counter->state == PERF_COUNTER_STATE_OFF) + return 0; + + ret = hw_perf_group_sched_in(group_counter, cpuctx, ctx, cpu); + if (ret) + return ret < 0 ? ret : 0; + + group_counter->prev_state = group_counter->state; + if (counter_sched_in(group_counter, cpuctx, ctx, cpu)) + return -EAGAIN; + + /* + * Schedule in siblings as one group (if any): + */ + list_for_each_entry(counter, &group_counter->sibling_list, list_entry) { + counter->prev_state = counter->state; + if (counter_sched_in(counter, cpuctx, ctx, cpu)) { + partial_group = counter; + goto group_error; + } + } + + return 0; + +group_error: + /* + * Groups can be scheduled in as one unit only, so undo any + * partial group before returning: + */ + list_for_each_entry(counter, &group_counter->sibling_list, list_entry) { + if (counter == partial_group) + break; + counter_sched_out(counter, cpuctx, ctx); + } + counter_sched_out(group_counter, cpuctx, ctx); + + return -EAGAIN; +} + /* * Return 1 for a group consisting entirely of software counters, * 0 if the group contains any hardware counters. @@ -643,6 +691,9 @@ static void __perf_counter_enable(void *info) if (!group_can_go_on(counter, cpuctx, 1)) err = -EEXIST; + else if (counter == leader) + err = group_sched_in(counter, cpuctx, ctx, + smp_processor_id()); else err = counter_sched_in(counter, cpuctx, ctx, smp_processor_id()); @@ -791,54 +842,6 @@ static void perf_counter_cpu_sched_out(struct perf_cpu_context *cpuctx) __perf_counter_sched_out(&cpuctx->ctx, cpuctx); } -static int -group_sched_in(struct perf_counter *group_counter, - struct perf_cpu_context *cpuctx, - struct perf_counter_context *ctx, - int cpu) -{ - struct perf_counter *counter, *partial_group; - int ret; - - if (group_counter->state == PERF_COUNTER_STATE_OFF) - return 0; - - ret = hw_perf_group_sched_in(group_counter, cpuctx, ctx, cpu); - if (ret) - return ret < 0 ? ret : 0; - - group_counter->prev_state = group_counter->state; - if (counter_sched_in(group_counter, cpuctx, ctx, cpu)) - return -EAGAIN; - - /* - * Schedule in siblings as one group (if any): - */ - list_for_each_entry(counter, &group_counter->sibling_list, list_entry) { - counter->prev_state = counter->state; - if (counter_sched_in(counter, cpuctx, ctx, cpu)) { - partial_group = counter; - goto group_error; - } - } - - return 0; - -group_error: - /* - * Groups can be scheduled in as one unit only, so undo any - * partial group before returning: - */ - list_for_each_entry(counter, &group_counter->sibling_list, list_entry) { - if (counter == partial_group) - break; - counter_sched_out(counter, cpuctx, ctx); - } - counter_sched_out(group_counter, cpuctx, ctx); - - return -EAGAIN; -} - static void __perf_counter_sched_in(struct perf_counter_context *ctx, struct perf_cpu_context *cpuctx, int cpu) -- cgit v1.2.3 From a08b159fc243dbfe415250466d24cfc5010deee5 Mon Sep 17 00:00:00 2001 From: Paul Mackerras Date: Mon, 11 May 2009 15:46:10 +1000 Subject: perf_counter: don't count scheduler ticks as context switches The context-switch software counter gives inflated values at present because each scheduler tick and each process-wide counter enable/disable prctl gets counted as a context switch. This happens because perf_counter_task_tick, perf_counter_task_disable and perf_counter_task_enable all call perf_counter_task_sched_out, which calls perf_swcounter_event to record a context switch event. This fixes it by introducing a variant of perf_counter_task_sched_out with two underscores in front for internal use within the perf_counter code, and makes perf_counter_task_{tick,disable,enable} call it. This variant doesn't record a context switch event, and takes a struct perf_counter_context *. This adds the new variant rather than changing the behaviour or interface of perf_counter_task_sched_out because that is called from other code. [ Impact: fix inflated context-switch event counts ] Signed-off-by: Paul Mackerras Cc: Peter Zijlstra Cc: Corey Ashford LKML-Reference: <18951.48034.485580.498953@drongo.ozlabs.ibm.com> Signed-off-by: Ingo Molnar --- kernel/perf_counter.c | 16 +++++++++++----- 1 file changed, 11 insertions(+), 5 deletions(-) diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c index a5bdc93ac47..7373b96bc36 100644 --- a/kernel/perf_counter.c +++ b/kernel/perf_counter.c @@ -837,6 +837,14 @@ void perf_counter_task_sched_out(struct task_struct *task, int cpu) cpuctx->task_ctx = NULL; } +static void __perf_counter_task_sched_out(struct perf_counter_context *ctx) +{ + struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context); + + __perf_counter_sched_out(ctx, cpuctx); + cpuctx->task_ctx = NULL; +} + static void perf_counter_cpu_sched_out(struct perf_cpu_context *cpuctx) { __perf_counter_sched_out(&cpuctx->ctx, cpuctx); @@ -943,15 +951,13 @@ int perf_counter_task_disable(void) struct perf_counter *counter; unsigned long flags; u64 perf_flags; - int cpu; if (likely(!ctx->nr_counters)) return 0; local_irq_save(flags); - cpu = smp_processor_id(); - perf_counter_task_sched_out(curr, cpu); + __perf_counter_task_sched_out(ctx); spin_lock(&ctx->lock); @@ -989,7 +995,7 @@ int perf_counter_task_enable(void) local_irq_save(flags); cpu = smp_processor_id(); - perf_counter_task_sched_out(curr, cpu); + __perf_counter_task_sched_out(ctx); spin_lock(&ctx->lock); @@ -1054,7 +1060,7 @@ void perf_counter_task_tick(struct task_struct *curr, int cpu) ctx = &curr->perf_counter_ctx; perf_counter_cpu_sched_out(cpuctx); - perf_counter_task_sched_out(curr, cpu); + __perf_counter_task_sched_out(ctx); rotate_ctx(&cpuctx->ctx); rotate_ctx(ctx); -- cgit v1.2.3 From 615a3f1e055ac9b0ae74e1f935a12ea2cfe2a2ad Mon Sep 17 00:00:00 2001 From: Paul Mackerras Date: Mon, 11 May 2009 15:50:21 +1000 Subject: perf_counter: call atomic64_set for counter->count A compile warning triggered because we are calling atomic_set(&counter->count). But since counter->count is an atomic64_t, we have to use atomic64_set. So the count can be set short, resulting in the reset ioctl only resetting the low word. [ Impact: clear counter properly during the reset ioctl ] Signed-off-by: Paul Mackerras Cc: Peter Zijlstra Cc: Corey Ashford LKML-Reference: <18951.48285.270311.981806@drongo.ozlabs.ibm.com> Signed-off-by: Ingo Molnar --- kernel/perf_counter.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c index 7373b96bc36..5ea0240adab 100644 --- a/kernel/perf_counter.c +++ b/kernel/perf_counter.c @@ -1281,7 +1281,7 @@ static unsigned int perf_poll(struct file *file, poll_table *wait) static void perf_counter_reset(struct perf_counter *counter) { (void)perf_counter_read(counter); - atomic_set(&counter->count, 0); + atomic64_set(&counter->count, 0); perf_counter_update_userpage(counter); } -- cgit v1.2.3 From e758a33d6fc5b9d6a3ae489863d04fcecad8120b Mon Sep 17 00:00:00 2001 From: Paul Mackerras Date: Tue, 12 May 2009 21:59:01 +1000 Subject: perf_counter: call hw_perf_save_disable/restore around group_sched_in I noticed that when enabling a group via the PERF_COUNTER_IOC_ENABLE ioctl on the group leader, the counters weren't enabled and counting immediately on return from the ioctl, but did start counting a little while later (presumably after a context switch). The reason was that __perf_counter_enable calls group_sched_in which calls hw_perf_group_sched_in, which on powerpc assumes that the caller has called hw_perf_save_disable already. Until commit 46d686c6 ("perf_counter: put whole group on when enabling group leader") it was true that all callers of group_sched_in had called hw_perf_save_disable first, and the powerpc hw_perf_group_sched_in relies on that (there isn't an x86 version). This fixes the problem by putting calls to hw_perf_save_disable / hw_perf_restore around the calls to group_sched_in and counter_sched_in in __perf_counter_enable. Having the calls to hw_perf_save_disable/restore around the counter_sched_in call is harmless and makes this call consistent with the other call sites of counter_sched_in, which have all called hw_perf_save_disable first. [ Impact: more precise counter group disable/enable functionality ] Signed-off-by: Paul Mackerras Cc: Peter Zijlstra Cc: Corey Ashford LKML-Reference: <18953.25733.53359.147452@cargo.ozlabs.ibm.com> Signed-off-by: Ingo Molnar --- kernel/perf_counter.c | 19 ++++++++++++------- 1 file changed, 12 insertions(+), 7 deletions(-) diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c index 5ea0240adab..ff166c11b69 100644 --- a/kernel/perf_counter.c +++ b/kernel/perf_counter.c @@ -663,6 +663,7 @@ static void __perf_counter_enable(void *info) struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context); struct perf_counter_context *ctx = counter->ctx; struct perf_counter *leader = counter->group_leader; + unsigned long pmuflags; unsigned long flags; int err; @@ -689,14 +690,18 @@ static void __perf_counter_enable(void *info) if (leader != counter && leader->state != PERF_COUNTER_STATE_ACTIVE) goto unlock; - if (!group_can_go_on(counter, cpuctx, 1)) + if (!group_can_go_on(counter, cpuctx, 1)) { err = -EEXIST; - else if (counter == leader) - err = group_sched_in(counter, cpuctx, ctx, - smp_processor_id()); - else - err = counter_sched_in(counter, cpuctx, ctx, - smp_processor_id()); + } else { + pmuflags = hw_perf_save_disable(); + if (counter == leader) + err = group_sched_in(counter, cpuctx, ctx, + smp_processor_id()); + else + err = counter_sched_in(counter, cpuctx, ctx, + smp_processor_id()); + hw_perf_restore(pmuflags); + } if (err) { /* -- cgit v1.2.3 From 5bb9efe33ea4001a17ab98186a40a134a3061d67 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Wed, 13 May 2009 08:12:51 +0200 Subject: perf_counter: fix print debug irq disable inconsistent {IN-HARDIRQ-W} -> {HARDIRQ-ON-W} usage. bash/15802 [HC0[0]:SC0[0]:HE1:SE1] takes: (sysrq_key_table_lock){?.....}, Don't unconditionally enable interrupts in the perf_counter_print_debug() path. [ Impact: fix potential deadlock pointed out by lockdep ] LKML-Reference: Reported-by: Ingo Molnar Signed-off-by: Peter Zijlstra --- arch/x86/kernel/cpu/perf_counter.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/arch/x86/kernel/cpu/perf_counter.c b/arch/x86/kernel/cpu/perf_counter.c index da27419923a..f7772ff7936 100644 --- a/arch/x86/kernel/cpu/perf_counter.c +++ b/arch/x86/kernel/cpu/perf_counter.c @@ -621,12 +621,13 @@ void perf_counter_print_debug(void) { u64 ctrl, status, overflow, pmc_ctrl, pmc_count, prev_left, fixed; struct cpu_hw_counters *cpuc; + unsigned long flags; int cpu, idx; if (!x86_pmu.num_counters) return; - local_irq_disable(); + local_irq_save(flags); cpu = smp_processor_id(); cpuc = &per_cpu(cpu_hw_counters, cpu); @@ -664,7 +665,7 @@ void perf_counter_print_debug(void) pr_info("CPU#%d: fixed-PMC%d count: %016llx\n", cpu, idx, pmc_count); } - local_irq_enable(); + local_irq_restore(flags); } static void x86_pmu_disable(struct perf_counter *counter) -- cgit v1.2.3 From 1a853e36871b533ccc3f3c5bdd5cd0d867043a00 Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Thu, 14 May 2009 22:50:46 -0300 Subject: perf record: Allow specifying a pid to record Allow specifying a pid instead of always fork+exec'ing a command. Because the PERF_EVENT_COMM and PERF_EVENT_MMAP events happened before we connected, we must synthesize them so that 'perf report' can get what it needs. [ Impact: add new command line option ] Signed-off-by: Arnaldo Carvalho de Melo Cc: Clark Williams Cc: John Kacur Cc: Peter Zijlstra LKML-Reference: <20090515015046.GA13664@ghostprotocols.net> Signed-off-by: Ingo Molnar --- Documentation/perf_counter/builtin-record.c | 163 +++++++++++++++++++++++++--- 1 file changed, 146 insertions(+), 17 deletions(-) diff --git a/Documentation/perf_counter/builtin-record.c b/Documentation/perf_counter/builtin-record.c index 5f5e6df0260..efb87595f3c 100644 --- a/Documentation/perf_counter/builtin-record.c +++ b/Documentation/perf_counter/builtin-record.c @@ -34,6 +34,9 @@ #include "perf.h" +#define ALIGN(x,a) __ALIGN_MASK(x,(typeof(x))(a)-1) +#define __ALIGN_MASK(x,mask) (((x)+(mask))&~(mask)) + static int nr_counters = 0; static __u64 event_id[MAX_COUNTERS] = { }; static int default_interval = 100000; @@ -47,6 +50,7 @@ static char *output_name = "output.perf"; static int group = 0; static unsigned int realtime_prio = 0; static int system_wide = 0; +static pid_t target_pid = -1; static int inherit = 1; static int nmi = 1; @@ -180,6 +184,7 @@ static void display_help(void) " -c CNT --count=CNT # event period to sample\n" " -m pages --mmap_pages= # number of mmap data pages\n" " -o file --output= # output file\n" + " -p pid --pid= # record events on existing pid\n" " -r prio --realtime= # use RT prio\n" " -s --system # system wide profiling\n" ); @@ -199,13 +204,14 @@ static void process_options(int argc, const char *argv[]) {"event", required_argument, NULL, 'e'}, {"mmap_pages", required_argument, NULL, 'm'}, {"output", required_argument, NULL, 'o'}, + {"pid", required_argument, NULL, 'p'}, {"realtime", required_argument, NULL, 'r'}, {"system", no_argument, NULL, 's'}, {"inherit", no_argument, NULL, 'i'}, {"nmi", no_argument, NULL, 'n'}, {NULL, 0, NULL, 0 } }; - int c = getopt_long(argc, argv, "+:c:e:m:o:r:sin", + int c = getopt_long(argc, argv, "+:c:e:m:o:p:r:sin", long_options, &option_index); if (c == -1) break; @@ -215,6 +221,7 @@ static void process_options(int argc, const char *argv[]) case 'e': error = parse_events(optarg); break; case 'm': mmap_pages = atoi(optarg); break; case 'o': output_name = strdup(optarg); break; + case 'p': target_pid = atoi(optarg); break; case 'r': realtime_prio = atoi(optarg); break; case 's': system_wide ^= 1; break; case 'i': inherit ^= 1; break; @@ -223,7 +230,7 @@ static void process_options(int argc, const char *argv[]) } } - if (argc - optind == 0) + if (argc - optind == 0 && target_pid == -1) error = 1; if (error) @@ -350,15 +357,135 @@ static struct mmap_data mmap_array[MAX_NR_CPUS][MAX_COUNTERS]; static int nr_poll; static int nr_cpu; -static void open_counters(int cpu) +struct mmap_event { + struct perf_event_header header; + __u32 pid, tid; + __u64 start; + __u64 len; + __u64 pgoff; + char filename[PATH_MAX]; +}; +struct comm_event { + struct perf_event_header header; + __u32 pid,tid; + char comm[16]; +}; + +static pid_t pid_synthesize_comm_event(pid_t pid) +{ + char filename[PATH_MAX]; + char bf[BUFSIZ]; + struct comm_event comm_ev; + size_t size; + int fd; + + snprintf(filename, sizeof(filename), "/proc/%d/stat", pid); + + fd = open(filename, O_RDONLY); + if (fd < 0) { + fprintf(stderr, "couldn't open %s\n", filename); + exit(EXIT_FAILURE); + } + if (read(fd, bf, sizeof(bf)) < 0) { + fprintf(stderr, "couldn't read %s\n", filename); + exit(EXIT_FAILURE); + } + close(fd); + + pid_t spid, ppid; + char state; + char comm[18]; + + memset(&comm_ev, 0, sizeof(comm_ev)); + int nr = sscanf(bf, "%d %s %c %d %d ", + &spid, comm, &state, &ppid, &comm_ev.pid); + if (nr != 5) { + fprintf(stderr, "couldn't get COMM and pgid, malformed %s\n", + filename); + exit(EXIT_FAILURE); + } + comm_ev.header.type = PERF_EVENT_COMM; + comm_ev.tid = pid; + size = strlen(comm); + comm[--size] = '\0'; /* Remove the ')' at the end */ + --size; /* Remove the '(' at the begin */ + memcpy(comm_ev.comm, comm + 1, size); + size = ALIGN(size, sizeof(uint64_t)); + comm_ev.header.size = sizeof(comm_ev) - (sizeof(comm_ev.comm) - size); + int ret = write(output, &comm_ev, comm_ev.header.size); + if (ret < 0) { + perror("failed to write"); + exit(-1); + } + return comm_ev.pid; +} + +static void pid_synthesize_mmap_events(pid_t pid, pid_t pgid) +{ + char filename[PATH_MAX]; + FILE *fp; + + snprintf(filename, sizeof(filename), "/proc/%d/maps", pid); + + fp = fopen(filename, "r"); + if (fp == NULL) { + fprintf(stderr, "couldn't open %s\n", filename); + exit(EXIT_FAILURE); + } + while (1) { + char bf[BUFSIZ]; + unsigned char vm_read, vm_write, vm_exec, vm_mayshare; + struct mmap_event mmap_ev = { + .header.type = PERF_EVENT_MMAP, + }; + unsigned long ino; + int major, minor; + size_t size; + if (fgets(bf, sizeof(bf), fp) == NULL) + break; + + /* 00400000-0040c000 r-xp 00000000 fd:01 41038 /bin/cat */ + sscanf(bf, "%llx-%llx %c%c%c%c %llx %x:%x %lu", + &mmap_ev.start, &mmap_ev.len, + &vm_read, &vm_write, &vm_exec, &vm_mayshare, + &mmap_ev.pgoff, &major, &minor, &ino); + if (vm_exec == 'x') { + char *execname = strrchr(bf, ' '); + + if (execname == NULL || execname[1] != '/') + continue; + + execname += 1; + size = strlen(execname); + execname[size - 1] = '\0'; /* Remove \n */ + memcpy(mmap_ev.filename, execname, size); + size = ALIGN(size, sizeof(uint64_t)); + mmap_ev.len -= mmap_ev.start; + mmap_ev.header.size = (sizeof(mmap_ev) - + (sizeof(mmap_ev.filename) - size)); + mmap_ev.pid = pgid; + mmap_ev.tid = pid; + + if (write(output, &mmap_ev, mmap_ev.header.size) < 0) { + perror("failed to write"); + exit(-1); + } + } + } + + fclose(fp); +} + +static void open_counters(int cpu, pid_t pid) { struct perf_counter_hw_event hw_event; int counter, group_fd; int track = 1; - pid_t pid = -1; - if (cpu < 0) - pid = 0; + if (pid > 0) { + pid_t pgid = pid_synthesize_comm_event(pid); + pid_synthesize_mmap_events(pid, pgid); + } group_fd = -1; for (counter = 0; counter < nr_counters; counter++) { @@ -435,22 +562,24 @@ int cmd_record(int argc, const char **argv) argc -= optind; argv += optind; - if (!system_wide) - open_counters(-1); - else for (i = 0; i < nr_cpus; i++) - open_counters(i); + if (!system_wide) { + open_counters(-1, target_pid != -1 ? target_pid : 0); + } else for (i = 0; i < nr_cpus; i++) + open_counters(i, target_pid); signal(SIGCHLD, sig_handler); signal(SIGINT, sig_handler); - pid = fork(); - if (pid < 0) - perror("failed to fork"); + if (target_pid == -1) { + pid = fork(); + if (pid < 0) + perror("failed to fork"); - if (!pid) { - if (execvp(argv[0], argv)) { - perror(argv[0]); - exit(-1); + if (!pid) { + if (execvp(argv[0], argv)) { + perror(argv[0]); + exit(-1); + } } } -- cgit v1.2.3 From ec3232bdf8518bea8410f0027f870b24d3aa8753 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Wed, 13 May 2009 09:45:19 +0200 Subject: perf_counter: x86: More accurate counter update Take the counter width into account instead of assuming 32 bits. In particular Nehalem has 44 bit wide counters, and all arithmetics should happen on a 44-bit signed integer basis. [ Impact: fix rare event imprecision, warning message on Nehalem ] Signed-off-by: Peter Zijlstra Cc: Paul Mackerras Cc: Corey Ashford LKML-Reference: Signed-off-by: Ingo Molnar --- arch/x86/kernel/cpu/perf_counter.c | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/arch/x86/kernel/cpu/perf_counter.c b/arch/x86/kernel/cpu/perf_counter.c index f7772ff7936..3a92a2b2a80 100644 --- a/arch/x86/kernel/cpu/perf_counter.c +++ b/arch/x86/kernel/cpu/perf_counter.c @@ -138,7 +138,9 @@ static u64 x86_perf_counter_update(struct perf_counter *counter, struct hw_perf_counter *hwc, int idx) { - u64 prev_raw_count, new_raw_count, delta; + int shift = 64 - x86_pmu.counter_bits; + u64 prev_raw_count, new_raw_count; + s64 delta; /* * Careful: an NMI might modify the previous counter value. @@ -161,9 +163,10 @@ again: * (counter-)time and add that to the generic counter. * * Careful, not all hw sign-extends above the physical width - * of the count, so we do that by clipping the delta to 32 bits: + * of the count. */ - delta = (u64)(u32)((s32)new_raw_count - (s32)prev_raw_count); + delta = (new_raw_count << shift) - (prev_raw_count << shift); + delta >>= shift; atomic64_add(delta, &counter->count); atomic64_sub(delta, &hwc->period_left); -- cgit v1.2.3 From f5a5a2f6e69e88647ae12da39f0ff3a510bcf0a6 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Wed, 13 May 2009 12:54:01 +0200 Subject: perf_counter: x86: Fix throttling If counters are disabled globally when a perfcounter IRQ/NMI hits, and if we throttle in that case, we'll promote the '0' value to the next lapic IRQ and disable all perfcounters at that point, permanently ... Fix it. [ Impact: fix hung perfcounters under load ] Acked-by: Peter Zijlstra Cc: Paul Mackerras Cc: Corey Ashford LKML-Reference: Signed-off-by: Ingo Molnar --- arch/x86/kernel/cpu/perf_counter.c | 20 +++++++++++++++----- 1 file changed, 15 insertions(+), 5 deletions(-) diff --git a/arch/x86/kernel/cpu/perf_counter.c b/arch/x86/kernel/cpu/perf_counter.c index 3a92a2b2a80..88ae8cebf3c 100644 --- a/arch/x86/kernel/cpu/perf_counter.c +++ b/arch/x86/kernel/cpu/perf_counter.c @@ -765,8 +765,13 @@ out: /* * Restore - do not reenable when global enable is off or throttled: */ - if (++cpuc->interrupts < PERFMON_MAX_INTERRUPTS) - intel_pmu_restore_all(cpuc->throttle_ctrl); + if (cpuc->throttle_ctrl) { + if (++cpuc->interrupts < PERFMON_MAX_INTERRUPTS) { + intel_pmu_restore_all(cpuc->throttle_ctrl); + } else { + pr_info("CPU#%d: perfcounters: max interrupt rate exceeded! Throttle on.\n", smp_processor_id()); + } + } return ret; } @@ -817,11 +822,16 @@ void perf_counter_unthrottle(void) cpuc = &__get_cpu_var(cpu_hw_counters); if (cpuc->interrupts >= PERFMON_MAX_INTERRUPTS) { - if (printk_ratelimit()) - printk(KERN_WARNING "perfcounters: max interrupts exceeded!\n"); + pr_info("CPU#%d: perfcounters: throttle off.\n", smp_processor_id()); + + /* + * Clear them before re-enabling irqs/NMIs again: + */ + cpuc->interrupts = 0; hw_perf_restore(cpuc->throttle_ctrl); + } else { + cpuc->interrupts = 0; } - cpuc->interrupts = 0; } void smp_perf_counter_interrupt(struct pt_regs *regs) -- cgit v1.2.3 From a026dfecc035f213c1cfa0bf6407ce3155f6a9df Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Wed, 13 May 2009 10:02:57 +0200 Subject: perf_counter: x86: Allow unpriviliged use of NMIs Apply sysctl_perf_counter_priv to NMIs. Also, fail the counter creation instead of silently down-grading to regular interrupts. [ Impact: allow wider perf-counter usage ] Signed-off-by: Peter Zijlstra Cc: Paul Mackerras Cc: Corey Ashford LKML-Reference: Signed-off-by: Ingo Molnar --- arch/x86/kernel/cpu/perf_counter.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/arch/x86/kernel/cpu/perf_counter.c b/arch/x86/kernel/cpu/perf_counter.c index 88ae8cebf3c..c19e927b697 100644 --- a/arch/x86/kernel/cpu/perf_counter.c +++ b/arch/x86/kernel/cpu/perf_counter.c @@ -280,8 +280,11 @@ static int __hw_perf_counter_init(struct perf_counter *counter) * If privileged enough, allow NMI events: */ hwc->nmi = 0; - if (capable(CAP_SYS_ADMIN) && hw_event->nmi) + if (hw_event->nmi) { + if (sysctl_perf_counter_priv && !capable(CAP_SYS_ADMIN)) + return -EACCES; hwc->nmi = 1; + } hwc->irq_period = hw_event->irq_period; if ((s64)hwc->irq_period <= 0 || hwc->irq_period > x86_pmu.max_period) -- cgit v1.2.3 From 53020fe81eecd0b7be295868ce5850ef8f41074e Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Wed, 13 May 2009 21:26:19 +0200 Subject: perf_counter: Fix perf_output_copy() WARN to account for overflow The simple reservation test in perf_output_copy() failed to take unsigned int overflow into account, fix this. [ Impact: fix false positive warning with more than 4GB of profiling data ] Signed-off-by: Peter Zijlstra Cc: Paul Mackerras Cc: Corey Ashford LKML-Reference: Signed-off-by: Ingo Molnar --- kernel/perf_counter.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c index ff166c11b69..985be0b662a 100644 --- a/kernel/perf_counter.c +++ b/kernel/perf_counter.c @@ -1927,7 +1927,11 @@ static void perf_output_copy(struct perf_output_handle *handle, handle->offset = offset; - WARN_ON_ONCE(handle->offset > handle->head); + /* + * Check we didn't copy past our reservation window, taking the + * possible unsigned int wrap into account. + */ + WARN_ON_ONCE(((int)(handle->head - handle->offset)) < 0); } #define perf_output_put(handle, x) \ -- cgit v1.2.3 From 962bf7a66edca4d36a730a38ff8410a67f560e40 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Wed, 13 May 2009 13:21:36 +0200 Subject: perf_counter: x86: Fix up the amd NMI/INT throttle perf_counter_unthrottle() restores throttle_ctrl, buts its never set. Also, we fail to disable all counters when throttling. [ Impact: fix rare stuck perf-counters when they are throttled ] Signed-off-by: Peter Zijlstra Cc: Paul Mackerras Cc: Corey Ashford LKML-Reference: Signed-off-by: Ingo Molnar --- arch/x86/kernel/cpu/perf_counter.c | 38 ++++++++++++++++++++++++++------------ 1 file changed, 26 insertions(+), 12 deletions(-) diff --git a/arch/x86/kernel/cpu/perf_counter.c b/arch/x86/kernel/cpu/perf_counter.c index c19e927b697..7601c014f8f 100644 --- a/arch/x86/kernel/cpu/perf_counter.c +++ b/arch/x86/kernel/cpu/perf_counter.c @@ -334,6 +334,8 @@ static u64 amd_pmu_save_disable_all(void) * right thing. */ barrier(); + if (!enabled) + goto out; for (idx = 0; idx < x86_pmu.num_counters; idx++) { u64 val; @@ -347,6 +349,7 @@ static u64 amd_pmu_save_disable_all(void) wrmsrl(MSR_K7_EVNTSEL0 + idx, val); } +out: return enabled; } @@ -787,32 +790,43 @@ static int amd_pmu_handle_irq(struct pt_regs *regs, int nmi) int handled = 0; struct perf_counter *counter; struct hw_perf_counter *hwc; - int idx; + int idx, throttle = 0; + + cpuc->throttle_ctrl = cpuc->enabled; + cpuc->enabled = 0; + barrier(); + + if (cpuc->throttle_ctrl) { + if (++cpuc->interrupts >= PERFMON_MAX_INTERRUPTS) + throttle = 1; + } - ++cpuc->interrupts; for (idx = 0; idx < x86_pmu.num_counters; idx++) { + int disable = 0; + if (!test_bit(idx, cpuc->active_mask)) continue; + counter = cpuc->counters[idx]; hwc = &counter->hw; val = x86_perf_counter_update(counter, hwc, idx); if (val & (1ULL << (x86_pmu.counter_bits - 1))) - continue; + goto next; + /* counter overflow */ x86_perf_counter_set_period(counter, hwc, idx); handled = 1; inc_irq_stat(apic_perf_irqs); - if (perf_counter_overflow(counter, nmi, regs, 0)) - amd_pmu_disable_counter(hwc, idx); - else if (cpuc->interrupts >= PERFMON_MAX_INTERRUPTS) - /* - * do not reenable when throttled, but reload - * the register - */ + disable = perf_counter_overflow(counter, nmi, regs, 0); + +next: + if (disable || throttle) amd_pmu_disable_counter(hwc, idx); - else if (counter->state == PERF_COUNTER_STATE_ACTIVE) - amd_pmu_enable_counter(hwc, idx); } + + if (cpuc->throttle_ctrl && !throttle) + cpuc->enabled = 1; + return handled; } -- cgit v1.2.3 From 9e35ad388bea89f7d6f375af4c0ae98803688666 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Wed, 13 May 2009 16:21:38 +0200 Subject: perf_counter: Rework the perf counter disable/enable The current disable/enable mechanism is: token = hw_perf_save_disable(); ... /* do bits */ ... hw_perf_restore(token); This works well, provided that the use nests properly. Except we don't. x86 NMI/INT throttling has non-nested use of this, breaking things. Therefore provide a reference counter disable/enable interface, where the first disable disables the hardware, and the last enable enables the hardware again. [ Impact: refactor, simplify the PMU disable/enable logic ] Signed-off-by: Peter Zijlstra Cc: Paul Mackerras Cc: Corey Ashford LKML-Reference: Signed-off-by: Ingo Molnar --- arch/powerpc/kernel/perf_counter.c | 24 ++++---- arch/x86/kernel/cpu/perf_counter.c | 113 ++++++++++++++----------------------- drivers/acpi/processor_idle.c | 6 +- include/linux/perf_counter.h | 10 ++-- kernel/perf_counter.c | 76 +++++++++++++++---------- 5 files changed, 109 insertions(+), 120 deletions(-) diff --git a/arch/powerpc/kernel/perf_counter.c b/arch/powerpc/kernel/perf_counter.c index 15cdc8e6722..bb1b463c136 100644 --- a/arch/powerpc/kernel/perf_counter.c +++ b/arch/powerpc/kernel/perf_counter.c @@ -386,7 +386,7 @@ static void write_mmcr0(struct cpu_hw_counters *cpuhw, unsigned long mmcr0) * Disable all counters to prevent PMU interrupts and to allow * counters to be added or removed. */ -u64 hw_perf_save_disable(void) +void hw_perf_disable(void) { struct cpu_hw_counters *cpuhw; unsigned long ret; @@ -428,7 +428,6 @@ u64 hw_perf_save_disable(void) mb(); } local_irq_restore(flags); - return ret; } /* @@ -436,7 +435,7 @@ u64 hw_perf_save_disable(void) * If we were previously disabled and counters were added, then * put the new config on the PMU. */ -void hw_perf_restore(u64 disable) +void hw_perf_enable(void) { struct perf_counter *counter; struct cpu_hw_counters *cpuhw; @@ -448,9 +447,12 @@ void hw_perf_restore(u64 disable) int n_lim; int idx; - if (disable) - return; local_irq_save(flags); + if (!cpuhw->disabled) { + local_irq_restore(flags); + return; + } + cpuhw = &__get_cpu_var(cpu_hw_counters); cpuhw->disabled = 0; @@ -649,19 +651,18 @@ int hw_perf_group_sched_in(struct perf_counter *group_leader, /* * Add a counter to the PMU. * If all counters are not already frozen, then we disable and - * re-enable the PMU in order to get hw_perf_restore to do the + * re-enable the PMU in order to get hw_perf_enable to do the * actual work of reconfiguring the PMU. */ static int power_pmu_enable(struct perf_counter *counter) { struct cpu_hw_counters *cpuhw; unsigned long flags; - u64 pmudis; int n0; int ret = -EAGAIN; local_irq_save(flags); - pmudis = hw_perf_save_disable(); + perf_disable(); /* * Add the counter to the list (if there is room) @@ -685,7 +686,7 @@ static int power_pmu_enable(struct perf_counter *counter) ret = 0; out: - hw_perf_restore(pmudis); + perf_enable(); local_irq_restore(flags); return ret; } @@ -697,11 +698,10 @@ static void power_pmu_disable(struct perf_counter *counter) { struct cpu_hw_counters *cpuhw; long i; - u64 pmudis; unsigned long flags; local_irq_save(flags); - pmudis = hw_perf_save_disable(); + perf_disable(); power_pmu_read(counter); @@ -735,7 +735,7 @@ static void power_pmu_disable(struct perf_counter *counter) cpuhw->mmcr[0] &= ~(MMCR0_PMXE | MMCR0_FCECE); } - hw_perf_restore(pmudis); + perf_enable(); local_irq_restore(flags); } diff --git a/arch/x86/kernel/cpu/perf_counter.c b/arch/x86/kernel/cpu/perf_counter.c index 7601c014f8f..313638cecbb 100644 --- a/arch/x86/kernel/cpu/perf_counter.c +++ b/arch/x86/kernel/cpu/perf_counter.c @@ -31,7 +31,6 @@ struct cpu_hw_counters { unsigned long used_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)]; unsigned long active_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)]; unsigned long interrupts; - u64 throttle_ctrl; int enabled; }; @@ -42,8 +41,8 @@ struct x86_pmu { const char *name; int version; int (*handle_irq)(struct pt_regs *, int); - u64 (*save_disable_all)(void); - void (*restore_all)(u64); + void (*disable_all)(void); + void (*enable_all)(void); void (*enable)(struct hw_perf_counter *, int); void (*disable)(struct hw_perf_counter *, int); unsigned eventsel; @@ -56,6 +55,7 @@ struct x86_pmu { int counter_bits; u64 counter_mask; u64 max_period; + u64 intel_ctrl; }; static struct x86_pmu x86_pmu __read_mostly; @@ -311,22 +311,19 @@ static int __hw_perf_counter_init(struct perf_counter *counter) return 0; } -static u64 intel_pmu_save_disable_all(void) +static void intel_pmu_disable_all(void) { - u64 ctrl; - - rdmsrl(MSR_CORE_PERF_GLOBAL_CTRL, ctrl); wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0); - - return ctrl; } -static u64 amd_pmu_save_disable_all(void) +static void amd_pmu_disable_all(void) { struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters); - int enabled, idx; + int idx; + + if (!cpuc->enabled) + return; - enabled = cpuc->enabled; cpuc->enabled = 0; /* * ensure we write the disable before we start disabling the @@ -334,8 +331,6 @@ static u64 amd_pmu_save_disable_all(void) * right thing. */ barrier(); - if (!enabled) - goto out; for (idx = 0; idx < x86_pmu.num_counters; idx++) { u64 val; @@ -348,37 +343,31 @@ static u64 amd_pmu_save_disable_all(void) val &= ~ARCH_PERFMON_EVENTSEL0_ENABLE; wrmsrl(MSR_K7_EVNTSEL0 + idx, val); } - -out: - return enabled; } -u64 hw_perf_save_disable(void) +void hw_perf_disable(void) { if (!x86_pmu_initialized()) - return 0; - return x86_pmu.save_disable_all(); + return; + return x86_pmu.disable_all(); } -/* - * Exported because of ACPI idle - */ -EXPORT_SYMBOL_GPL(hw_perf_save_disable); -static void intel_pmu_restore_all(u64 ctrl) +static void intel_pmu_enable_all(void) { - wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, ctrl); + wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, x86_pmu.intel_ctrl); } -static void amd_pmu_restore_all(u64 ctrl) +static void amd_pmu_enable_all(void) { struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters); int idx; - cpuc->enabled = ctrl; - barrier(); - if (!ctrl) + if (cpuc->enabled) return; + cpuc->enabled = 1; + barrier(); + for (idx = 0; idx < x86_pmu.num_counters; idx++) { u64 val; @@ -392,16 +381,12 @@ static void amd_pmu_restore_all(u64 ctrl) } } -void hw_perf_restore(u64 ctrl) +void hw_perf_enable(void) { if (!x86_pmu_initialized()) return; - x86_pmu.restore_all(ctrl); + x86_pmu.enable_all(); } -/* - * Exported because of ACPI idle - */ -EXPORT_SYMBOL_GPL(hw_perf_restore); static inline u64 intel_pmu_get_status(void) { @@ -735,15 +720,14 @@ static int intel_pmu_handle_irq(struct pt_regs *regs, int nmi) int bit, cpu = smp_processor_id(); u64 ack, status; struct cpu_hw_counters *cpuc = &per_cpu(cpu_hw_counters, cpu); - int ret = 0; - - cpuc->throttle_ctrl = intel_pmu_save_disable_all(); + perf_disable(); status = intel_pmu_get_status(); - if (!status) - goto out; + if (!status) { + perf_enable(); + return 0; + } - ret = 1; again: inc_irq_stat(apic_perf_irqs); ack = status; @@ -767,19 +751,11 @@ again: status = intel_pmu_get_status(); if (status) goto again; -out: - /* - * Restore - do not reenable when global enable is off or throttled: - */ - if (cpuc->throttle_ctrl) { - if (++cpuc->interrupts < PERFMON_MAX_INTERRUPTS) { - intel_pmu_restore_all(cpuc->throttle_ctrl); - } else { - pr_info("CPU#%d: perfcounters: max interrupt rate exceeded! Throttle on.\n", smp_processor_id()); - } - } - return ret; + if (++cpuc->interrupts != PERFMON_MAX_INTERRUPTS) + perf_enable(); + + return 1; } static int amd_pmu_handle_irq(struct pt_regs *regs, int nmi) @@ -792,13 +768,11 @@ static int amd_pmu_handle_irq(struct pt_regs *regs, int nmi) struct hw_perf_counter *hwc; int idx, throttle = 0; - cpuc->throttle_ctrl = cpuc->enabled; - cpuc->enabled = 0; - barrier(); - - if (cpuc->throttle_ctrl) { - if (++cpuc->interrupts >= PERFMON_MAX_INTERRUPTS) - throttle = 1; + if (++cpuc->interrupts == PERFMON_MAX_INTERRUPTS) { + throttle = 1; + __perf_disable(); + cpuc->enabled = 0; + barrier(); } for (idx = 0; idx < x86_pmu.num_counters; idx++) { @@ -824,9 +798,6 @@ next: amd_pmu_disable_counter(hwc, idx); } - if (cpuc->throttle_ctrl && !throttle) - cpuc->enabled = 1; - return handled; } @@ -839,13 +810,11 @@ void perf_counter_unthrottle(void) cpuc = &__get_cpu_var(cpu_hw_counters); if (cpuc->interrupts >= PERFMON_MAX_INTERRUPTS) { - pr_info("CPU#%d: perfcounters: throttle off.\n", smp_processor_id()); - /* * Clear them before re-enabling irqs/NMIs again: */ cpuc->interrupts = 0; - hw_perf_restore(cpuc->throttle_ctrl); + perf_enable(); } else { cpuc->interrupts = 0; } @@ -931,8 +900,8 @@ static __read_mostly struct notifier_block perf_counter_nmi_notifier = { static struct x86_pmu intel_pmu = { .name = "Intel", .handle_irq = intel_pmu_handle_irq, - .save_disable_all = intel_pmu_save_disable_all, - .restore_all = intel_pmu_restore_all, + .disable_all = intel_pmu_disable_all, + .enable_all = intel_pmu_enable_all, .enable = intel_pmu_enable_counter, .disable = intel_pmu_disable_counter, .eventsel = MSR_ARCH_PERFMON_EVENTSEL0, @@ -951,8 +920,8 @@ static struct x86_pmu intel_pmu = { static struct x86_pmu amd_pmu = { .name = "AMD", .handle_irq = amd_pmu_handle_irq, - .save_disable_all = amd_pmu_save_disable_all, - .restore_all = amd_pmu_restore_all, + .disable_all = amd_pmu_disable_all, + .enable_all = amd_pmu_enable_all, .enable = amd_pmu_enable_counter, .disable = amd_pmu_disable_counter, .eventsel = MSR_K7_EVNTSEL0, @@ -1003,6 +972,8 @@ static int intel_pmu_init(void) x86_pmu.counter_bits = eax.split.bit_width; x86_pmu.counter_mask = (1ULL << eax.split.bit_width) - 1; + rdmsrl(MSR_CORE_PERF_GLOBAL_CTRL, x86_pmu.intel_ctrl); + return 0; } diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c index d2830f39d46..9645758c047 100644 --- a/drivers/acpi/processor_idle.c +++ b/drivers/acpi/processor_idle.c @@ -763,11 +763,9 @@ static int acpi_idle_bm_check(void) */ static inline void acpi_idle_do_entry(struct acpi_processor_cx *cx) { - u64 perf_flags; - /* Don't trace irqs off for idle */ stop_critical_timings(); - perf_flags = hw_perf_save_disable(); + perf_disable(); if (cx->entry_method == ACPI_CSTATE_FFH) { /* Call into architectural FFH based C-state */ acpi_processor_ffh_cstate_enter(cx); @@ -782,7 +780,7 @@ static inline void acpi_idle_do_entry(struct acpi_processor_cx *cx) gets asserted in time to freeze execution properly. */ unused = inl(acpi_gbl_FADT.xpm_timer_block.address); } - hw_perf_restore(perf_flags); + perf_enable(); start_critical_timings(); } diff --git a/include/linux/perf_counter.h b/include/linux/perf_counter.h index 614f921d616..e543ecc129f 100644 --- a/include/linux/perf_counter.h +++ b/include/linux/perf_counter.h @@ -544,8 +544,10 @@ extern void perf_counter_exit_task(struct task_struct *child); extern void perf_counter_do_pending(void); extern void perf_counter_print_debug(void); extern void perf_counter_unthrottle(void); -extern u64 hw_perf_save_disable(void); -extern void hw_perf_restore(u64 ctrl); +extern void __perf_disable(void); +extern bool __perf_enable(void); +extern void perf_disable(void); +extern void perf_enable(void); extern int perf_counter_task_disable(void); extern int perf_counter_task_enable(void); extern int hw_perf_group_sched_in(struct perf_counter *group_leader, @@ -600,8 +602,8 @@ static inline void perf_counter_exit_task(struct task_struct *child) { } static inline void perf_counter_do_pending(void) { } static inline void perf_counter_print_debug(void) { } static inline void perf_counter_unthrottle(void) { } -static inline void hw_perf_restore(u64 ctrl) { } -static inline u64 hw_perf_save_disable(void) { return 0; } +static inline void perf_disable(void) { } +static inline void perf_enable(void) { } static inline int perf_counter_task_disable(void) { return -EINVAL; } static inline int perf_counter_task_enable(void) { return -EINVAL; } diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c index 985be0b662a..e814ff04d7c 100644 --- a/kernel/perf_counter.c +++ b/kernel/perf_counter.c @@ -60,8 +60,9 @@ extern __weak const struct pmu *hw_perf_counter_init(struct perf_counter *counte return NULL; } -u64 __weak hw_perf_save_disable(void) { return 0; } -void __weak hw_perf_restore(u64 ctrl) { barrier(); } +void __weak hw_perf_disable(void) { barrier(); } +void __weak hw_perf_enable(void) { barrier(); } + void __weak hw_perf_counter_setup(int cpu) { barrier(); } int __weak hw_perf_group_sched_in(struct perf_counter *group_leader, struct perf_cpu_context *cpuctx, @@ -72,6 +73,32 @@ int __weak hw_perf_group_sched_in(struct perf_counter *group_leader, void __weak perf_counter_print_debug(void) { } +static DEFINE_PER_CPU(int, disable_count); + +void __perf_disable(void) +{ + __get_cpu_var(disable_count)++; +} + +bool __perf_enable(void) +{ + return !--__get_cpu_var(disable_count); +} + +void perf_disable(void) +{ + __perf_disable(); + hw_perf_disable(); +} +EXPORT_SYMBOL_GPL(perf_disable); /* ACPI idle */ + +void perf_enable(void) +{ + if (__perf_enable()) + hw_perf_enable(); +} +EXPORT_SYMBOL_GPL(perf_enable); /* ACPI idle */ + static void list_add_counter(struct perf_counter *counter, struct perf_counter_context *ctx) { @@ -170,7 +197,6 @@ static void __perf_counter_remove_from_context(void *info) struct perf_counter *counter = info; struct perf_counter_context *ctx = counter->ctx; unsigned long flags; - u64 perf_flags; /* * If this is a task context, we need to check whether it is @@ -191,9 +217,9 @@ static void __perf_counter_remove_from_context(void *info) * Protect the list operation against NMI by disabling the * counters on a global level. NOP for non NMI based counters. */ - perf_flags = hw_perf_save_disable(); + perf_disable(); list_del_counter(counter, ctx); - hw_perf_restore(perf_flags); + perf_enable(); if (!ctx->task) { /* @@ -538,7 +564,6 @@ static void __perf_install_in_context(void *info) struct perf_counter *leader = counter->group_leader; int cpu = smp_processor_id(); unsigned long flags; - u64 perf_flags; int err; /* @@ -556,7 +581,7 @@ static void __perf_install_in_context(void *info) * Protect the list operation against NMI by disabling the * counters on a global level. NOP for non NMI based counters. */ - perf_flags = hw_perf_save_disable(); + perf_disable(); add_counter_to_ctx(counter, ctx); @@ -596,7 +621,7 @@ static void __perf_install_in_context(void *info) cpuctx->max_pertask--; unlock: - hw_perf_restore(perf_flags); + perf_enable(); spin_unlock_irqrestore(&ctx->lock, flags); } @@ -663,7 +688,6 @@ static void __perf_counter_enable(void *info) struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context); struct perf_counter_context *ctx = counter->ctx; struct perf_counter *leader = counter->group_leader; - unsigned long pmuflags; unsigned long flags; int err; @@ -693,14 +717,14 @@ static void __perf_counter_enable(void *info) if (!group_can_go_on(counter, cpuctx, 1)) { err = -EEXIST; } else { - pmuflags = hw_perf_save_disable(); + perf_disable(); if (counter == leader) err = group_sched_in(counter, cpuctx, ctx, smp_processor_id()); else err = counter_sched_in(counter, cpuctx, ctx, smp_processor_id()); - hw_perf_restore(pmuflags); + perf_enable(); } if (err) { @@ -795,7 +819,6 @@ void __perf_counter_sched_out(struct perf_counter_context *ctx, struct perf_cpu_context *cpuctx) { struct perf_counter *counter; - u64 flags; spin_lock(&ctx->lock); ctx->is_active = 0; @@ -803,12 +826,12 @@ void __perf_counter_sched_out(struct perf_counter_context *ctx, goto out; update_context_time(ctx); - flags = hw_perf_save_disable(); + perf_disable(); if (ctx->nr_active) { list_for_each_entry(counter, &ctx->counter_list, list_entry) group_sched_out(counter, cpuctx, ctx); } - hw_perf_restore(flags); + perf_enable(); out: spin_unlock(&ctx->lock); } @@ -860,7 +883,6 @@ __perf_counter_sched_in(struct perf_counter_context *ctx, struct perf_cpu_context *cpuctx, int cpu) { struct perf_counter *counter; - u64 flags; int can_add_hw = 1; spin_lock(&ctx->lock); @@ -870,7 +892,7 @@ __perf_counter_sched_in(struct perf_counter_context *ctx, ctx->timestamp = perf_clock(); - flags = hw_perf_save_disable(); + perf_disable(); /* * First go through the list and put on any pinned groups @@ -917,7 +939,7 @@ __perf_counter_sched_in(struct perf_counter_context *ctx, can_add_hw = 0; } } - hw_perf_restore(flags); + perf_enable(); out: spin_unlock(&ctx->lock); } @@ -955,7 +977,6 @@ int perf_counter_task_disable(void) struct perf_counter_context *ctx = &curr->perf_counter_ctx; struct perf_counter *counter; unsigned long flags; - u64 perf_flags; if (likely(!ctx->nr_counters)) return 0; @@ -969,7 +990,7 @@ int perf_counter_task_disable(void) /* * Disable all the counters: */ - perf_flags = hw_perf_save_disable(); + perf_disable(); list_for_each_entry(counter, &ctx->counter_list, list_entry) { if (counter->state != PERF_COUNTER_STATE_ERROR) { @@ -978,7 +999,7 @@ int perf_counter_task_disable(void) } } - hw_perf_restore(perf_flags); + perf_enable(); spin_unlock_irqrestore(&ctx->lock, flags); @@ -991,7 +1012,6 @@ int perf_counter_task_enable(void) struct perf_counter_context *ctx = &curr->perf_counter_ctx; struct perf_counter *counter; unsigned long flags; - u64 perf_flags; int cpu; if (likely(!ctx->nr_counters)) @@ -1007,7 +1027,7 @@ int perf_counter_task_enable(void) /* * Disable all the counters: */ - perf_flags = hw_perf_save_disable(); + perf_disable(); list_for_each_entry(counter, &ctx->counter_list, list_entry) { if (counter->state > PERF_COUNTER_STATE_OFF) @@ -1017,7 +1037,7 @@ int perf_counter_task_enable(void) ctx->time - counter->total_time_enabled; counter->hw_event.disabled = 0; } - hw_perf_restore(perf_flags); + perf_enable(); spin_unlock(&ctx->lock); @@ -1034,7 +1054,6 @@ int perf_counter_task_enable(void) static void rotate_ctx(struct perf_counter_context *ctx) { struct perf_counter *counter; - u64 perf_flags; if (!ctx->nr_counters) return; @@ -1043,12 +1062,12 @@ static void rotate_ctx(struct perf_counter_context *ctx) /* * Rotate the first entry last (works just fine for group counters too): */ - perf_flags = hw_perf_save_disable(); + perf_disable(); list_for_each_entry(counter, &ctx->counter_list, list_entry) { list_move_tail(&counter->list_entry, &ctx->counter_list); break; } - hw_perf_restore(perf_flags); + perf_enable(); spin_unlock(&ctx->lock); } @@ -3194,7 +3213,6 @@ __perf_counter_exit_task(struct task_struct *child, } else { struct perf_cpu_context *cpuctx; unsigned long flags; - u64 perf_flags; /* * Disable and unlink this counter. @@ -3203,7 +3221,7 @@ __perf_counter_exit_task(struct task_struct *child, * could still be processing it: */ local_irq_save(flags); - perf_flags = hw_perf_save_disable(); + perf_disable(); cpuctx = &__get_cpu_var(perf_cpu_context); @@ -3214,7 +3232,7 @@ __perf_counter_exit_task(struct task_struct *child, child_ctx->nr_counters--; - hw_perf_restore(perf_flags); + perf_enable(); local_irq_restore(flags); } -- cgit v1.2.3 From a4016a79fcbd139e7378944c0d86a39fdbc70ecc Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Thu, 14 May 2009 14:52:17 +0200 Subject: perf_counter: x86: Robustify interrupt handling Two consecutive NMIs could daze and confuse the machine when the first would handle the overflow of both counters. [ Impact: fix false-positive syslog messages under multi-session profiling ] Signed-off-by: Peter Zijlstra Cc: Paul Mackerras Cc: Corey Ashford LKML-Reference: Signed-off-by: Ingo Molnar --- arch/x86/kernel/cpu/perf_counter.c | 16 +++++++++++++--- 1 file changed, 13 insertions(+), 3 deletions(-) diff --git a/arch/x86/kernel/cpu/perf_counter.c b/arch/x86/kernel/cpu/perf_counter.c index 313638cecbb..1dcf67057f1 100644 --- a/arch/x86/kernel/cpu/perf_counter.c +++ b/arch/x86/kernel/cpu/perf_counter.c @@ -783,6 +783,10 @@ static int amd_pmu_handle_irq(struct pt_regs *regs, int nmi) counter = cpuc->counters[idx]; hwc = &counter->hw; + + if (counter->hw_event.nmi != nmi) + goto next; + val = x86_perf_counter_update(counter, hwc, idx); if (val & (1ULL << (x86_pmu.counter_bits - 1))) goto next; @@ -869,7 +873,6 @@ perf_counter_nmi_handler(struct notifier_block *self, { struct die_args *args = __args; struct pt_regs *regs; - int ret; if (!atomic_read(&active_counters)) return NOTIFY_DONE; @@ -886,9 +889,16 @@ perf_counter_nmi_handler(struct notifier_block *self, regs = args->regs; apic_write(APIC_LVTPC, APIC_DM_NMI); - ret = x86_pmu.handle_irq(regs, 1); + /* + * Can't rely on the handled return value to say it was our NMI, two + * counters could trigger 'simultaneously' raising two back-to-back NMIs. + * + * If the first NMI handles both, the latter will be empty and daze + * the CPU. + */ + x86_pmu.handle_irq(regs, 1); - return ret ? NOTIFY_STOP : NOTIFY_OK; + return NOTIFY_STOP; } static __read_mostly struct notifier_block perf_counter_nmi_notifier = { -- cgit v1.2.3 From 1c80f4b598d9b075a2a0be694e28be93a6702bcc Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Fri, 15 May 2009 08:25:22 +0200 Subject: perf_counter: x86: Disallow interval of 1 On certain CPUs i have observed a stuck PMU if interval was set to 1 and NMIs were used. The PMU had PMC0 set in MSR_CORE_PERF_GLOBAL_STATUS, but it was not possible to ack it via MSR_CORE_PERF_GLOBAL_OVF_CTRL, and the NMI loop got stuck infinitely. [ Impact: fix rare hangs during high perfcounter load ] Signed-off-by: Peter Zijlstra Cc: Paul Mackerras Cc: Corey Ashford LKML-Reference: Signed-off-by: Ingo Molnar --- arch/x86/kernel/cpu/perf_counter.c | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/arch/x86/kernel/cpu/perf_counter.c b/arch/x86/kernel/cpu/perf_counter.c index 1dcf67057f1..46a82d1e4cb 100644 --- a/arch/x86/kernel/cpu/perf_counter.c +++ b/arch/x86/kernel/cpu/perf_counter.c @@ -473,6 +473,11 @@ x86_perf_counter_set_period(struct perf_counter *counter, left += period; atomic64_set(&hwc->period_left, left); } + /* + * Quirk: certain CPUs dont like it if just 1 event is left: + */ + if (unlikely(left < 2)) + left = 2; per_cpu(prev_left[idx], smp_processor_id()) = left; -- cgit v1.2.3 From 9029a5e3801f1cc7cdaab80169d82427acf928d8 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Fri, 15 May 2009 08:26:20 +0200 Subject: perf_counter: x86: Protect against infinite loops in intel_pmu_handle_irq() intel_pmu_handle_irq() can lock up in an infinite loop if the hardware does not allow the acking of irqs. Alas, this happened in testing so make this robust and emit a warning if it happens in the future. Also, clean up the IRQ handlers a bit. [ Impact: improve perfcounter irq/nmi handling robustness ] Acked-by: Peter Zijlstra Cc: Paul Mackerras Cc: Corey Ashford LKML-Reference: Signed-off-by: Ingo Molnar --- arch/x86/kernel/cpu/perf_counter.c | 25 ++++++++++++++++++------- 1 file changed, 18 insertions(+), 7 deletions(-) diff --git a/arch/x86/kernel/cpu/perf_counter.c b/arch/x86/kernel/cpu/perf_counter.c index 46a82d1e4cb..5a7f718eb1e 100644 --- a/arch/x86/kernel/cpu/perf_counter.c +++ b/arch/x86/kernel/cpu/perf_counter.c @@ -722,9 +722,13 @@ static void intel_pmu_save_and_restart(struct perf_counter *counter) */ static int intel_pmu_handle_irq(struct pt_regs *regs, int nmi) { - int bit, cpu = smp_processor_id(); + struct cpu_hw_counters *cpuc; + struct cpu_hw_counters; + int bit, cpu, loops; u64 ack, status; - struct cpu_hw_counters *cpuc = &per_cpu(cpu_hw_counters, cpu); + + cpu = smp_processor_id(); + cpuc = &per_cpu(cpu_hw_counters, cpu); perf_disable(); status = intel_pmu_get_status(); @@ -733,7 +737,13 @@ static int intel_pmu_handle_irq(struct pt_regs *regs, int nmi) return 0; } + loops = 0; again: + if (++loops > 100) { + WARN_ONCE(1, "perfcounters: irq loop stuck!\n"); + return 1; + } + inc_irq_stat(apic_perf_irqs); ack = status; for_each_bit(bit, (unsigned long *)&status, X86_PMC_IDX_MAX) { @@ -765,13 +775,14 @@ again: static int amd_pmu_handle_irq(struct pt_regs *regs, int nmi) { - int cpu = smp_processor_id(); - struct cpu_hw_counters *cpuc = &per_cpu(cpu_hw_counters, cpu); - u64 val; - int handled = 0; + int cpu, idx, throttle = 0, handled = 0; + struct cpu_hw_counters *cpuc; struct perf_counter *counter; struct hw_perf_counter *hwc; - int idx, throttle = 0; + u64 val; + + cpu = smp_processor_id(); + cpuc = &per_cpu(cpu_hw_counters, cpu); if (++cpuc->interrupts == PERFMON_MAX_INTERRUPTS) { throttle = 1; -- cgit v1.2.3 From 251e8e3c7235f5944805a64f24c79fc4696793f1 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Thu, 14 May 2009 05:16:59 +0200 Subject: perf_counter: Remove ACPI quirk We had a disable/enable around acpi_idle_do_entry() due to an erratum in an early prototype CPU i had access to. That erratum has been fixed in the BIOS so remove the quirk. The quirk also kept us from profiling interrupts that hit the ACPI idle instruction - so this is an improvement as well, beyond a cleanup and a micro-optimization. [ Impact: improve profiling scope, cleanup, micro-optimization ] Signed-off-by: Peter Zijlstra Cc: Paul Mackerras Cc: Corey Ashford LKML-Reference: Signed-off-by: Ingo Molnar --- drivers/acpi/processor_idle.c | 2 -- 1 file changed, 2 deletions(-) diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c index 9645758c047..f7ca8c55956 100644 --- a/drivers/acpi/processor_idle.c +++ b/drivers/acpi/processor_idle.c @@ -765,7 +765,6 @@ static inline void acpi_idle_do_entry(struct acpi_processor_cx *cx) { /* Don't trace irqs off for idle */ stop_critical_timings(); - perf_disable(); if (cx->entry_method == ACPI_CSTATE_FFH) { /* Call into architectural FFH based C-state */ acpi_processor_ffh_cstate_enter(cx); @@ -780,7 +779,6 @@ static inline void acpi_idle_do_entry(struct acpi_processor_cx *cx) gets asserted in time to freeze execution properly. */ unused = inl(acpi_gbl_FADT.xpm_timer_block.address); } - perf_enable(); start_critical_timings(); } -- cgit v1.2.3 From 58d7e993b16b62d30f8ef27757614056fe4def11 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Fri, 15 May 2009 11:03:23 +0200 Subject: perf stat: handle Ctrl-C Before this change, if a long-running perf stat workload was Ctrl-C-ed, the utility exited without displaying statistics. After the change, the Ctrl-C gets propagated into the workload (and causes its early exit there), but perf stat itself will still continue to run and will display counter results. This is useful to run open-ended workloads, let them run for a while, then Ctrl-C them to get the stats. [ Impact: extend perf stat with new functionality ] Cc: Peter Zijlstra Cc: Paul Mackerras Cc: Corey Ashford Cc: Arnaldo Carvalho de Melo LKML-Reference: Signed-off-by: Ingo Molnar --- Documentation/perf_counter/builtin-stat.c | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/Documentation/perf_counter/builtin-stat.c b/Documentation/perf_counter/builtin-stat.c index cf575c305a6..03518d75d86 100644 --- a/Documentation/perf_counter/builtin-stat.c +++ b/Documentation/perf_counter/builtin-stat.c @@ -538,8 +538,14 @@ static void process_options(int argc, char **argv) } } +static void skip_signal(int signo) +{ +} + int cmd_stat(int argc, char **argv, const char *prefix) { + sigset_t blocked; + page_size = sysconf(_SC_PAGE_SIZE); process_options(argc, argv); @@ -548,5 +554,15 @@ int cmd_stat(int argc, char **argv, const char *prefix) assert(nr_cpus <= MAX_NR_CPUS); assert(nr_cpus >= 0); + /* + * We dont want to block the signals - that would cause + * child tasks to inherit that and Ctrl-C would not work. + * What we want is for Ctrl-C to work in the exec()-ed + * task, but being ignored by perf stat itself: + */ + signal(SIGINT, skip_signal); + signal(SIGALRM, skip_signal); + signal(SIGABRT, skip_signal); + return do_perfstat(argc, argv); } -- cgit v1.2.3 From 548e1ddf255b4ebfb4ef20c08936fd8d4deb3bd9 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Fri, 15 May 2009 15:19:26 +0200 Subject: perf_counter: remove perf_disable/enable exports Now that ACPI idle doesn't use it anymore, remove the exports. [ Impact: remove dead code/data ] Signed-off-by: Peter Zijlstra Cc: Paul Mackerras Cc: Corey Ashford Cc: Arnaldo Carvalho de Melo LKML-Reference: <20090515132018.429826617@chello.nl> Signed-off-by: Ingo Molnar --- kernel/perf_counter.c | 2 -- 1 file changed, 2 deletions(-) diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c index e814ff04d7c..0173738dd54 100644 --- a/kernel/perf_counter.c +++ b/kernel/perf_counter.c @@ -90,14 +90,12 @@ void perf_disable(void) __perf_disable(); hw_perf_disable(); } -EXPORT_SYMBOL_GPL(perf_disable); /* ACPI idle */ void perf_enable(void) { if (__perf_enable()) hw_perf_enable(); } -EXPORT_SYMBOL_GPL(perf_enable); /* ACPI idle */ static void list_add_counter(struct perf_counter *counter, struct perf_counter_context *ctx) -- cgit v1.2.3 From 789f90fcf6b0b54e655740e9396c954378542c79 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Fri, 15 May 2009 15:19:27 +0200 Subject: perf_counter: per user mlock gift Instead of a per-process mlock gift for perf-counters, use a per-user gift so that there is less of a DoS potential. [ Impact: allow less worst-case unprivileged memory consumption ] Signed-off-by: Peter Zijlstra Cc: Paul Mackerras Cc: Corey Ashford Cc: Arnaldo Carvalho de Melo LKML-Reference: <20090515132018.496182835@chello.nl> Signed-off-by: Ingo Molnar --- include/linux/sched.h | 4 ++++ kernel/perf_counter.c | 22 +++++++++++++++------- 2 files changed, 19 insertions(+), 7 deletions(-) diff --git a/include/linux/sched.h b/include/linux/sched.h index d1857580a13..ff59d123151 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -674,6 +674,10 @@ struct user_struct { struct work_struct work; #endif #endif + +#ifdef CONFIG_PERF_COUNTERS + atomic_long_t locked_vm; +#endif }; extern int uids_sysfs_init(void); diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c index 0173738dd54..93f4a0e4b87 100644 --- a/kernel/perf_counter.c +++ b/kernel/perf_counter.c @@ -45,7 +45,7 @@ static atomic_t nr_munmap_tracking __read_mostly; static atomic_t nr_comm_tracking __read_mostly; int sysctl_perf_counter_priv __read_mostly; /* do we need to be privileged */ -int sysctl_perf_counter_mlock __read_mostly = 128; /* 'free' kb per counter */ +int sysctl_perf_counter_mlock __read_mostly = 512; /* 'free' kb per user */ /* * Lock for (sysadmin-configurable) counter reservations: @@ -1522,6 +1522,9 @@ static void perf_mmap_close(struct vm_area_struct *vma) if (atomic_dec_and_mutex_lock(&counter->mmap_count, &counter->mmap_mutex)) { + struct user_struct *user = current_user(); + + atomic_long_sub(counter->data->nr_pages + 1, &user->locked_vm); vma->vm_mm->locked_vm -= counter->data->nr_locked; perf_mmap_data_free(counter); mutex_unlock(&counter->mmap_mutex); @@ -1537,11 +1540,13 @@ static struct vm_operations_struct perf_mmap_vmops = { static int perf_mmap(struct file *file, struct vm_area_struct *vma) { struct perf_counter *counter = file->private_data; + struct user_struct *user = current_user(); unsigned long vma_size; unsigned long nr_pages; + unsigned long user_locked, user_lock_limit; unsigned long locked, lock_limit; + long user_extra, extra; int ret = 0; - long extra; if (!(vma->vm_flags & VM_SHARED) || (vma->vm_flags & VM_WRITE)) return -EINVAL; @@ -1569,15 +1574,17 @@ static int perf_mmap(struct file *file, struct vm_area_struct *vma) goto unlock; } - extra = nr_pages /* + 1 only account the data pages */; - extra -= sysctl_perf_counter_mlock >> (PAGE_SHIFT - 10); - if (extra < 0) - extra = 0; + user_extra = nr_pages + 1; + user_lock_limit = sysctl_perf_counter_mlock >> (PAGE_SHIFT - 10); + user_locked = atomic_long_read(&user->locked_vm) + user_extra; - locked = vma->vm_mm->locked_vm + extra; + extra = 0; + if (user_locked > user_lock_limit) + extra = user_locked - user_lock_limit; lock_limit = current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur; lock_limit >>= PAGE_SHIFT; + locked = vma->vm_mm->locked_vm + extra; if ((locked > lock_limit) && !capable(CAP_IPC_LOCK)) { ret = -EPERM; @@ -1590,6 +1597,7 @@ static int perf_mmap(struct file *file, struct vm_area_struct *vma) goto unlock; atomic_set(&counter->mmap_count, 1); + atomic_long_add(user_extra, &user->locked_vm); vma->vm_mm->locked_vm += extra; counter->data->nr_locked = extra; unlock: -- cgit v1.2.3 From 60db5e09c13109b13830cc9dcae688003fd39e79 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Fri, 15 May 2009 15:19:28 +0200 Subject: perf_counter: frequency based adaptive irq_period Instead of specifying the irq_period for a counter, provide a target interrupt frequency and dynamically adapt the irq_period to match this frequency. [ Impact: new perf-counter attribute/feature ] Signed-off-by: Peter Zijlstra Cc: Paul Mackerras Cc: Corey Ashford Cc: Arnaldo Carvalho de Melo LKML-Reference: <20090515132018.646195868@chello.nl> Signed-off-by: Ingo Molnar --- arch/powerpc/kernel/perf_counter.c | 13 ++++---- arch/x86/kernel/cpu/perf_counter.c | 9 ++---- include/linux/perf_counter.h | 10 ++++-- kernel/perf_counter.c | 63 ++++++++++++++++++++++++++++++-------- 4 files changed, 68 insertions(+), 27 deletions(-) diff --git a/arch/powerpc/kernel/perf_counter.c b/arch/powerpc/kernel/perf_counter.c index bb1b463c136..db8d5cafc15 100644 --- a/arch/powerpc/kernel/perf_counter.c +++ b/arch/powerpc/kernel/perf_counter.c @@ -534,7 +534,7 @@ void hw_perf_enable(void) continue; } val = 0; - if (counter->hw_event.irq_period) { + if (counter->hw.irq_period) { left = atomic64_read(&counter->hw.period_left); if (left < 0x80000000L) val = 0x80000000L - left; @@ -829,8 +829,6 @@ const struct pmu *hw_perf_counter_init(struct perf_counter *counter) if (!ppmu) return ERR_PTR(-ENXIO); - if ((s64)counter->hw_event.irq_period < 0) - return ERR_PTR(-EINVAL); if (!perf_event_raw(&counter->hw_event)) { ev = perf_event_id(&counter->hw_event); if (ev >= ppmu->n_generic || ppmu->generic_events[ev] == 0) @@ -901,7 +899,7 @@ const struct pmu *hw_perf_counter_init(struct perf_counter *counter) counter->hw.config = events[n]; counter->hw.counter_base = cflags[n]; - atomic64_set(&counter->hw.period_left, counter->hw_event.irq_period); + atomic64_set(&counter->hw.period_left, counter->hw.irq_period); /* * See if we need to reserve the PMU. @@ -934,6 +932,7 @@ const struct pmu *hw_perf_counter_init(struct perf_counter *counter) static void record_and_restart(struct perf_counter *counter, long val, struct pt_regs *regs, int nmi) { + u64 period = counter->hw.irq_period; s64 prev, delta, left; int record = 0; @@ -948,11 +947,11 @@ static void record_and_restart(struct perf_counter *counter, long val, */ val = 0; left = atomic64_read(&counter->hw.period_left) - delta; - if (counter->hw_event.irq_period) { + if (period) { if (left <= 0) { - left += counter->hw_event.irq_period; + left += period; if (left <= 0) - left = counter->hw_event.irq_period; + left = period; record = 1; } if (left < 0x80000000L) diff --git a/arch/x86/kernel/cpu/perf_counter.c b/arch/x86/kernel/cpu/perf_counter.c index 5a7f718eb1e..886dcf334bc 100644 --- a/arch/x86/kernel/cpu/perf_counter.c +++ b/arch/x86/kernel/cpu/perf_counter.c @@ -286,11 +286,8 @@ static int __hw_perf_counter_init(struct perf_counter *counter) hwc->nmi = 1; } - hwc->irq_period = hw_event->irq_period; - if ((s64)hwc->irq_period <= 0 || hwc->irq_period > x86_pmu.max_period) - hwc->irq_period = x86_pmu.max_period; - - atomic64_set(&hwc->period_left, hwc->irq_period); + atomic64_set(&hwc->period_left, + min(x86_pmu.max_period, hwc->irq_period)); /* * Raw event type provide the config in the event structure @@ -458,7 +455,7 @@ x86_perf_counter_set_period(struct perf_counter *counter, struct hw_perf_counter *hwc, int idx) { s64 left = atomic64_read(&hwc->period_left); - s64 period = hwc->irq_period; + s64 period = min(x86_pmu.max_period, hwc->irq_period); int err; /* diff --git a/include/linux/perf_counter.h b/include/linux/perf_counter.h index e543ecc129f..004b6e162b9 100644 --- a/include/linux/perf_counter.h +++ b/include/linux/perf_counter.h @@ -130,7 +130,11 @@ struct perf_counter_hw_event { */ __u64 config; - __u64 irq_period; + union { + __u64 irq_period; + __u64 irq_freq; + }; + __u32 record_type; __u32 read_format; @@ -146,8 +150,9 @@ struct perf_counter_hw_event { mmap : 1, /* include mmap data */ munmap : 1, /* include munmap data */ comm : 1, /* include comm data */ + freq : 1, /* use freq, not period */ - __reserved_1 : 52; + __reserved_1 : 51; __u32 extra_config_len; __u32 wakeup_events; /* wakeup every n events */ @@ -337,6 +342,7 @@ struct hw_perf_counter { atomic64_t prev_count; u64 irq_period; atomic64_t period_left; + u64 interrupts; #endif }; diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c index 93f4a0e4b87..0ad1db4f3d6 100644 --- a/kernel/perf_counter.c +++ b/kernel/perf_counter.c @@ -1046,6 +1046,38 @@ int perf_counter_task_enable(void) return 0; } +void perf_adjust_freq(struct perf_counter_context *ctx) +{ + struct perf_counter *counter; + u64 irq_period; + u64 events, period; + s64 delta; + + spin_lock(&ctx->lock); + list_for_each_entry(counter, &ctx->counter_list, list_entry) { + if (counter->state != PERF_COUNTER_STATE_ACTIVE) + continue; + + if (!counter->hw_event.freq || !counter->hw_event.irq_freq) + continue; + + events = HZ * counter->hw.interrupts * counter->hw.irq_period; + period = div64_u64(events, counter->hw_event.irq_freq); + + delta = (s64)(1 + period - counter->hw.irq_period); + delta >>= 1; + + irq_period = counter->hw.irq_period + delta; + + if (!irq_period) + irq_period = 1; + + counter->hw.irq_period = irq_period; + counter->hw.interrupts = 0; + } + spin_unlock(&ctx->lock); +} + /* * Round-robin a context's counters: */ @@ -1081,6 +1113,9 @@ void perf_counter_task_tick(struct task_struct *curr, int cpu) cpuctx = &per_cpu(perf_cpu_context, cpu); ctx = &curr->perf_counter_ctx; + perf_adjust_freq(&cpuctx->ctx); + perf_adjust_freq(ctx); + perf_counter_cpu_sched_out(cpuctx); __perf_counter_task_sched_out(ctx); @@ -2382,6 +2417,8 @@ int perf_counter_overflow(struct perf_counter *counter, int events = atomic_read(&counter->event_limit); int ret = 0; + counter->hw.interrupts++; + /* * XXX event_limit might not quite work as expected on inherited * counters @@ -2450,6 +2487,7 @@ static enum hrtimer_restart perf_swcounter_hrtimer(struct hrtimer *hrtimer) enum hrtimer_restart ret = HRTIMER_RESTART; struct perf_counter *counter; struct pt_regs *regs; + u64 period; counter = container_of(hrtimer, struct perf_counter, hw.hrtimer); counter->pmu->read(counter); @@ -2468,7 +2506,8 @@ static enum hrtimer_restart perf_swcounter_hrtimer(struct hrtimer *hrtimer) ret = HRTIMER_NORESTART; } - hrtimer_forward_now(hrtimer, ns_to_ktime(counter->hw.irq_period)); + period = max_t(u64, 10000, counter->hw.irq_period); + hrtimer_forward_now(hrtimer, ns_to_ktime(period)); return ret; } @@ -2629,8 +2668,9 @@ static int cpu_clock_perf_counter_enable(struct perf_counter *counter) hrtimer_init(&hwc->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); hwc->hrtimer.function = perf_swcounter_hrtimer; if (hwc->irq_period) { + u64 period = max_t(u64, 10000, hwc->irq_period); __hrtimer_start_range_ns(&hwc->hrtimer, - ns_to_ktime(hwc->irq_period), 0, + ns_to_ktime(period), 0, HRTIMER_MODE_REL, 0); } @@ -2679,8 +2719,9 @@ static int task_clock_perf_counter_enable(struct perf_counter *counter) hrtimer_init(&hwc->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); hwc->hrtimer.function = perf_swcounter_hrtimer; if (hwc->irq_period) { + u64 period = max_t(u64, 10000, hwc->irq_period); __hrtimer_start_range_ns(&hwc->hrtimer, - ns_to_ktime(hwc->irq_period), 0, + ns_to_ktime(period), 0, HRTIMER_MODE_REL, 0); } @@ -2811,9 +2852,7 @@ static const struct pmu *tp_perf_counter_init(struct perf_counter *counter) static const struct pmu *sw_perf_counter_init(struct perf_counter *counter) { - struct perf_counter_hw_event *hw_event = &counter->hw_event; const struct pmu *pmu = NULL; - struct hw_perf_counter *hwc = &counter->hw; /* * Software counters (currently) can't in general distinguish @@ -2826,8 +2865,6 @@ static const struct pmu *sw_perf_counter_init(struct perf_counter *counter) case PERF_COUNT_CPU_CLOCK: pmu = &perf_ops_cpu_clock; - if (hw_event->irq_period && hw_event->irq_period < 10000) - hw_event->irq_period = 10000; break; case PERF_COUNT_TASK_CLOCK: /* @@ -2839,8 +2876,6 @@ static const struct pmu *sw_perf_counter_init(struct perf_counter *counter) else pmu = &perf_ops_cpu_clock; - if (hw_event->irq_period && hw_event->irq_period < 10000) - hw_event->irq_period = 10000; break; case PERF_COUNT_PAGE_FAULTS: case PERF_COUNT_PAGE_FAULTS_MIN: @@ -2854,9 +2889,6 @@ static const struct pmu *sw_perf_counter_init(struct perf_counter *counter) break; } - if (pmu) - hwc->irq_period = hw_event->irq_period; - return pmu; } @@ -2872,6 +2904,7 @@ perf_counter_alloc(struct perf_counter_hw_event *hw_event, { const struct pmu *pmu; struct perf_counter *counter; + struct hw_perf_counter *hwc; long err; counter = kzalloc(sizeof(*counter), gfpflags); @@ -2907,6 +2940,12 @@ perf_counter_alloc(struct perf_counter_hw_event *hw_event, pmu = NULL; + hwc = &counter->hw; + if (hw_event->freq && hw_event->irq_freq) + hwc->irq_period = TICK_NSEC / hw_event->irq_freq; + else + hwc->irq_period = hw_event->irq_period; + /* * we currently do not support PERF_RECORD_GROUP on inherited counters */ -- cgit v1.2.3 From f5456a6b056b709282e87a68b4c1b81ac2e866fa Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Fri, 15 May 2009 15:19:29 +0200 Subject: perf top: update to use the new freq interface Provide perf top -F as alternative to -c. [ Impact: new 'perf top' feature ] Signed-off-by: Peter Zijlstra Cc: Paul Mackerras Cc: Corey Ashford Cc: Arnaldo Carvalho de Melo LKML-Reference: <20090515132018.707922166@chello.nl> Signed-off-by: Ingo Molnar --- Documentation/perf_counter/builtin-top.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/Documentation/perf_counter/builtin-top.c b/Documentation/perf_counter/builtin-top.c index b1549dd2772..814b2e4925e 100644 --- a/Documentation/perf_counter/builtin-top.c +++ b/Documentation/perf_counter/builtin-top.c @@ -98,6 +98,7 @@ static unsigned int page_size; static unsigned int mmap_pages = 16; static int use_mmap = 0; static int use_munmap = 0; +static int freq = 0; static char *vmlinux; @@ -846,9 +847,10 @@ static void process_options(int argc, char **argv) {"stat", no_argument, NULL, 'S'}, {"vmlinux", required_argument, NULL, 'x'}, {"zero", no_argument, NULL, 'z'}, + {"freq", required_argument, NULL, 'F'}, {NULL, 0, NULL, 0 } }; - int c = getopt_long(argc, argv, "+:ac:C:d:De:f:g:hln:m:p:r:s:Sx:zMU", + int c = getopt_long(argc, argv, "+:ac:C:d:De:f:g:hln:m:p:r:s:Sx:zMUF:", long_options, &option_index); if (c == -1) break; @@ -889,6 +891,7 @@ static void process_options(int argc, char **argv) case 'm': mmap_pages = atoi(optarg); break; case 'M': use_mmap = 1; break; case 'U': use_munmap = 1; break; + case 'F': freq = 1; default_interval = atoi(optarg); break; default: error = 1; break; } } @@ -1075,6 +1078,7 @@ int cmd_top(int argc, char **argv, const char *prefix) hw_event.nmi = nmi; hw_event.mmap = use_mmap; hw_event.munmap = use_munmap; + hw_event.freq = freq; fd[i][counter] = sys_perf_counter_open(&hw_event, tid, cpu, group_fd, 0); if (fd[i][counter] < 0) { -- cgit v1.2.3 From 2e569d36729c8105ae066a9b105068305442cc77 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Fri, 15 May 2009 15:37:47 +0200 Subject: perf_counter: frequency based adaptive irq_period, 32-bit fix fix: kernel/built-in.o: In function `perf_counter_alloc': perf_counter.c:(.text+0x7ddc7): undefined reference to `__udivdi3' [ Impact: build fix on 32-bit systems ] Reported-by: Ingo Molnar Signed-off-by: Peter Zijlstra Cc: Paul Mackerras Cc: Corey Ashford Cc: Arnaldo Carvalho de Melo Cc: Thomas Gleixner LKML-Reference: <1242394667.6642.1887.camel@laptop> Signed-off-by: Ingo Molnar --- kernel/perf_counter.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c index 0ad1db4f3d6..728a595399b 100644 --- a/kernel/perf_counter.c +++ b/kernel/perf_counter.c @@ -2942,7 +2942,7 @@ perf_counter_alloc(struct perf_counter_hw_event *hw_event, hwc = &counter->hw; if (hw_event->freq && hw_event->irq_freq) - hwc->irq_period = TICK_NSEC / hw_event->irq_freq; + hwc->irq_period = div64_u64(TICK_NSEC, hw_event->irq_freq); else hwc->irq_period = hw_event->irq_period; -- cgit v1.2.3 From ef923214a4816c289e4af2d67a9ebb1a31e4ac61 Mon Sep 17 00:00:00 2001 From: Paul Mackerras Date: Thu, 14 May 2009 13:29:14 +1000 Subject: perf_counter: powerpc: use u64 for event codes internally Although the perf_counter API allows 63-bit raw event codes, internally in the powerpc back-end we had been using 32-bit event codes. This expands them to 64 bits so that we can add bits for specifying threshold start/stop events and instruction sampling modes later. This also corrects the return value of can_go_on_limited_pmc; we were returning an event code rather than just a 0/1 value in some circumstances. That didn't particularly matter while event codes were 32-bit, but now that event codes are 64-bit it might, so this fixes it. [ Impact: extend PowerPC perfcounter interfaces from u32 to u64 ] Signed-off-by: Paul Mackerras Acked-by: Peter Zijlstra Cc: Corey Ashford Cc: Arnaldo Carvalho de Melo LKML-Reference: <18955.36874.472452.353104@drongo.ozlabs.ibm.com> Signed-off-by: Ingo Molnar --- arch/powerpc/include/asm/perf_counter.h | 10 +++++----- arch/powerpc/kernel/perf_counter.c | 26 ++++++++++++-------------- arch/powerpc/kernel/power4-pmu.c | 9 ++++----- arch/powerpc/kernel/power5+-pmu.c | 14 +++++++------- arch/powerpc/kernel/power5-pmu.c | 16 ++++++++-------- arch/powerpc/kernel/power6-pmu.c | 16 ++++++++-------- arch/powerpc/kernel/ppc970-pmu.c | 9 ++++----- 7 files changed, 48 insertions(+), 52 deletions(-) diff --git a/arch/powerpc/include/asm/perf_counter.h b/arch/powerpc/include/asm/perf_counter.h index 56d66c38143..ceea76a48e3 100644 --- a/arch/powerpc/include/asm/perf_counter.h +++ b/arch/powerpc/include/asm/perf_counter.h @@ -23,13 +23,13 @@ struct power_pmu { int max_alternatives; u64 add_fields; u64 test_adder; - int (*compute_mmcr)(unsigned int events[], int n_ev, + int (*compute_mmcr)(u64 events[], int n_ev, unsigned int hwc[], u64 mmcr[]); - int (*get_constraint)(unsigned int event, u64 *mskp, u64 *valp); - int (*get_alternatives)(unsigned int event, unsigned int flags, - unsigned int alt[]); + int (*get_constraint)(u64 event, u64 *mskp, u64 *valp); + int (*get_alternatives)(u64 event, unsigned int flags, + u64 alt[]); void (*disable_pmc)(unsigned int pmc, u64 mmcr[]); - int (*limited_pmc_event)(unsigned int event); + int (*limited_pmc_event)(u64 event); int limited_pmc5_6; /* PMC5 and PMC6 have limited function */ int n_generic; int *generic_events; diff --git a/arch/powerpc/kernel/perf_counter.c b/arch/powerpc/kernel/perf_counter.c index db8d5cafc15..8d4cafc84b8 100644 --- a/arch/powerpc/kernel/perf_counter.c +++ b/arch/powerpc/kernel/perf_counter.c @@ -26,7 +26,7 @@ struct cpu_hw_counters { int n_limited; u8 pmcs_enabled; struct perf_counter *counter[MAX_HWCOUNTERS]; - unsigned int events[MAX_HWCOUNTERS]; + u64 events[MAX_HWCOUNTERS]; unsigned int flags[MAX_HWCOUNTERS]; u64 mmcr[3]; struct perf_counter *limited_counter[MAX_LIMITED_HWCOUNTERS]; @@ -131,11 +131,11 @@ static void write_pmc(int idx, unsigned long val) * and see if any combination of alternative codes is feasible. * The feasible set is returned in event[]. */ -static int power_check_constraints(unsigned int event[], unsigned int cflags[], +static int power_check_constraints(u64 event[], unsigned int cflags[], int n_ev) { u64 mask, value, nv; - unsigned int alternatives[MAX_HWCOUNTERS][MAX_EVENT_ALTERNATIVES]; + u64 alternatives[MAX_HWCOUNTERS][MAX_EVENT_ALTERNATIVES]; u64 amasks[MAX_HWCOUNTERS][MAX_EVENT_ALTERNATIVES]; u64 avalues[MAX_HWCOUNTERS][MAX_EVENT_ALTERNATIVES]; u64 smasks[MAX_HWCOUNTERS], svalues[MAX_HWCOUNTERS]; @@ -564,7 +564,7 @@ void hw_perf_enable(void) } static int collect_events(struct perf_counter *group, int max_count, - struct perf_counter *ctrs[], unsigned int *events, + struct perf_counter *ctrs[], u64 *events, unsigned int *flags) { int n = 0; @@ -752,11 +752,11 @@ struct pmu power_pmu = { * that a limited PMC can count, doesn't require interrupts, and * doesn't exclude any processor mode. */ -static int can_go_on_limited_pmc(struct perf_counter *counter, unsigned int ev, +static int can_go_on_limited_pmc(struct perf_counter *counter, u64 ev, unsigned int flags) { int n; - unsigned int alt[MAX_EVENT_ALTERNATIVES]; + u64 alt[MAX_EVENT_ALTERNATIVES]; if (counter->hw_event.exclude_user || counter->hw_event.exclude_kernel @@ -776,10 +776,8 @@ static int can_go_on_limited_pmc(struct perf_counter *counter, unsigned int ev, flags |= PPMU_LIMITED_PMC_OK | PPMU_LIMITED_PMC_REQD; n = ppmu->get_alternatives(ev, flags, alt); - if (n) - return alt[0]; - return 0; + return n > 0; } /* @@ -787,10 +785,9 @@ static int can_go_on_limited_pmc(struct perf_counter *counter, unsigned int ev, * and return the event code, or 0 if there is no such alternative. * (Note: event code 0 is "don't count" on all machines.) */ -static unsigned long normal_pmc_alternative(unsigned long ev, - unsigned long flags) +static u64 normal_pmc_alternative(u64 ev, unsigned long flags) { - unsigned int alt[MAX_EVENT_ALTERNATIVES]; + u64 alt[MAX_EVENT_ALTERNATIVES]; int n; flags &= ~(PPMU_LIMITED_PMC_OK | PPMU_LIMITED_PMC_REQD); @@ -820,9 +817,10 @@ static void hw_perf_counter_destroy(struct perf_counter *counter) const struct pmu *hw_perf_counter_init(struct perf_counter *counter) { - unsigned long ev, flags; + u64 ev; + unsigned long flags; struct perf_counter *ctrs[MAX_HWCOUNTERS]; - unsigned int events[MAX_HWCOUNTERS]; + u64 events[MAX_HWCOUNTERS]; unsigned int cflags[MAX_HWCOUNTERS]; int n; int err; diff --git a/arch/powerpc/kernel/power4-pmu.c b/arch/powerpc/kernel/power4-pmu.c index 744a2756958..836fa118eb1 100644 --- a/arch/powerpc/kernel/power4-pmu.c +++ b/arch/powerpc/kernel/power4-pmu.c @@ -213,7 +213,7 @@ static unsigned char direct_marked_event[8] = { * Returns 1 if event counts things relating to marked instructions * and thus needs the MMCRA_SAMPLE_ENABLE bit set, or 0 if not. */ -static int p4_marked_instr_event(unsigned int event) +static int p4_marked_instr_event(u64 event) { int pmc, psel, unit, byte, bit; unsigned int mask; @@ -249,7 +249,7 @@ static int p4_marked_instr_event(unsigned int event) return (mask >> (byte * 8 + bit)) & 1; } -static int p4_get_constraint(unsigned int event, u64 *maskp, u64 *valp) +static int p4_get_constraint(u64 event, u64 *maskp, u64 *valp) { int pmc, byte, unit, lower, sh; u64 mask = 0, value = 0; @@ -320,8 +320,7 @@ static unsigned int ppc_inst_cmpl[] = { 0x1001, 0x4001, 0x6001, 0x7001, 0x8001 }; -static int p4_get_alternatives(unsigned int event, unsigned int flags, - unsigned int alt[]) +static int p4_get_alternatives(u64 event, unsigned int flags, u64 alt[]) { int i, j, na; @@ -353,7 +352,7 @@ static int p4_get_alternatives(unsigned int event, unsigned int flags, return na; } -static int p4_compute_mmcr(unsigned int event[], int n_ev, +static int p4_compute_mmcr(u64 event[], int n_ev, unsigned int hwc[], u64 mmcr[]) { u64 mmcr0 = 0, mmcr1 = 0, mmcra = 0; diff --git a/arch/powerpc/kernel/power5+-pmu.c b/arch/powerpc/kernel/power5+-pmu.c index 8154eaa2404..3ac0654372a 100644 --- a/arch/powerpc/kernel/power5+-pmu.c +++ b/arch/powerpc/kernel/power5+-pmu.c @@ -135,7 +135,7 @@ static u64 unit_cons[PM_LASTUNIT+1][2] = { [PM_GRS] = { 0x0e00000000ull, 0x0c40000000ull }, }; -static int power5p_get_constraint(unsigned int event, u64 *maskp, u64 *valp) +static int power5p_get_constraint(u64 event, u64 *maskp, u64 *valp) { int pmc, byte, unit, sh; int bit, fmask; @@ -188,7 +188,7 @@ static int power5p_get_constraint(unsigned int event, u64 *maskp, u64 *valp) return 0; } -static int power5p_limited_pmc_event(unsigned int event) +static int power5p_limited_pmc_event(u64 event) { int pmc = (event >> PM_PMC_SH) & PM_PMC_MSK; @@ -273,11 +273,11 @@ static int find_alternative_bdecode(unsigned int event) return -1; } -static int power5p_get_alternatives(unsigned int event, unsigned int flags, - unsigned int alt[]) +static int power5p_get_alternatives(u64 event, unsigned int flags, u64 alt[]) { - int i, j, ae, nalt = 1; + int i, j, nalt = 1; int nlim; + u64 ae; alt[0] = event; nalt = 1; @@ -402,7 +402,7 @@ static unsigned char direct_event_is_marked[0x28] = { * Returns 1 if event counts things relating to marked instructions * and thus needs the MMCRA_SAMPLE_ENABLE bit set, or 0 if not. */ -static int power5p_marked_instr_event(unsigned int event) +static int power5p_marked_instr_event(u64 event) { int pmc, psel; int bit, byte, unit; @@ -451,7 +451,7 @@ static int power5p_marked_instr_event(unsigned int event) return (mask >> (byte * 8 + bit)) & 1; } -static int power5p_compute_mmcr(unsigned int event[], int n_ev, +static int power5p_compute_mmcr(u64 event[], int n_ev, unsigned int hwc[], u64 mmcr[]) { u64 mmcr1 = 0; diff --git a/arch/powerpc/kernel/power5-pmu.c b/arch/powerpc/kernel/power5-pmu.c index 6e667dc8647..d5344968ee9 100644 --- a/arch/powerpc/kernel/power5-pmu.c +++ b/arch/powerpc/kernel/power5-pmu.c @@ -139,7 +139,7 @@ static u64 unit_cons[PM_LASTUNIT+1][2] = { [PM_GRS] = { 0x30002000000000ull, 0x30000400000000ull }, }; -static int power5_get_constraint(unsigned int event, u64 *maskp, u64 *valp) +static int power5_get_constraint(u64 event, u64 *maskp, u64 *valp) { int pmc, byte, unit, sh; int bit, fmask; @@ -224,7 +224,7 @@ static const unsigned int event_alternatives[][MAX_ALT] = { * Scan the alternatives table for a match and return the * index into the alternatives table if found, else -1. */ -static int find_alternative(unsigned int event) +static int find_alternative(u64 event) { int i, j; @@ -250,7 +250,7 @@ static const unsigned char bytedecode_alternatives[4][4] = { * PMCSEL values on other counters. This returns the alternative * event code for those that do, or -1 otherwise. */ -static int find_alternative_bdecode(unsigned int event) +static u64 find_alternative_bdecode(u64 event) { int pmc, altpmc, pp, j; @@ -269,10 +269,10 @@ static int find_alternative_bdecode(unsigned int event) return -1; } -static int power5_get_alternatives(unsigned int event, unsigned int flags, - unsigned int alt[]) +static int power5_get_alternatives(u64 event, unsigned int flags, u64 alt[]) { - int i, j, ae, nalt = 1; + int i, j, nalt = 1; + u64 ae; alt[0] = event; nalt = 1; @@ -338,7 +338,7 @@ static unsigned char direct_event_is_marked[0x28] = { * Returns 1 if event counts things relating to marked instructions * and thus needs the MMCRA_SAMPLE_ENABLE bit set, or 0 if not. */ -static int power5_marked_instr_event(unsigned int event) +static int power5_marked_instr_event(u64 event) { int pmc, psel; int bit, byte, unit; @@ -382,7 +382,7 @@ static int power5_marked_instr_event(unsigned int event) return (mask >> (byte * 8 + bit)) & 1; } -static int power5_compute_mmcr(unsigned int event[], int n_ev, +static int power5_compute_mmcr(u64 event[], int n_ev, unsigned int hwc[], u64 mmcr[]) { u64 mmcr1 = 0; diff --git a/arch/powerpc/kernel/power6-pmu.c b/arch/powerpc/kernel/power6-pmu.c index d44049f0ae2..ab7c615c458 100644 --- a/arch/powerpc/kernel/power6-pmu.c +++ b/arch/powerpc/kernel/power6-pmu.c @@ -134,7 +134,7 @@ static u32 marked_bus_events[16] = { * Returns 1 if event counts things relating to marked instructions * and thus needs the MMCRA_SAMPLE_ENABLE bit set, or 0 if not. */ -static int power6_marked_instr_event(unsigned int event) +static int power6_marked_instr_event(u64 event) { int pmc, psel, ptype; int bit, byte, unit; @@ -172,7 +172,7 @@ static int power6_marked_instr_event(unsigned int event) /* * Assign PMC numbers and compute MMCR1 value for a set of events */ -static int p6_compute_mmcr(unsigned int event[], int n_ev, +static int p6_compute_mmcr(u64 event[], int n_ev, unsigned int hwc[], u64 mmcr[]) { u64 mmcr1 = 0; @@ -265,7 +265,7 @@ static int p6_compute_mmcr(unsigned int event[], int n_ev, * 20-23, 24-27, 28-31 ditto for bytes 1, 2, 3 * 32-34 select field: nest (subunit) event selector */ -static int p6_get_constraint(unsigned int event, u64 *maskp, u64 *valp) +static int p6_get_constraint(u64 event, u64 *maskp, u64 *valp) { int pmc, byte, sh, subunit; u64 mask = 0, value = 0; @@ -298,7 +298,7 @@ static int p6_get_constraint(unsigned int event, u64 *maskp, u64 *valp) return 0; } -static int p6_limited_pmc_event(unsigned int event) +static int p6_limited_pmc_event(u64 event) { int pmc = (event >> PM_PMC_SH) & PM_PMC_MSK; @@ -337,7 +337,7 @@ static const unsigned int event_alternatives[][MAX_ALT] = { * This could be made more efficient with a binary search on * a presorted list, if necessary */ -static int find_alternatives_list(unsigned int event) +static int find_alternatives_list(u64 event) { int i, j; unsigned int alt; @@ -356,12 +356,12 @@ static int find_alternatives_list(unsigned int event) return -1; } -static int p6_get_alternatives(unsigned int event, unsigned int flags, - unsigned int alt[]) +static int p6_get_alternatives(u64 event, unsigned int flags, u64 alt[]) { int i, j, nlim; - unsigned int aevent, psel, pmc; + unsigned int psel, pmc; unsigned int nalt = 1; + u64 aevent; alt[0] = event; nlim = p6_limited_pmc_event(event); diff --git a/arch/powerpc/kernel/ppc970-pmu.c b/arch/powerpc/kernel/ppc970-pmu.c index af2d1884058..eed47c4523f 100644 --- a/arch/powerpc/kernel/ppc970-pmu.c +++ b/arch/powerpc/kernel/ppc970-pmu.c @@ -147,7 +147,7 @@ static unsigned char direct_marked_event[8] = { * Returns 1 if event counts things relating to marked instructions * and thus needs the MMCRA_SAMPLE_ENABLE bit set, or 0 if not. */ -static int p970_marked_instr_event(unsigned int event) +static int p970_marked_instr_event(u64 event) { int pmc, psel, unit, byte, bit; unsigned int mask; @@ -192,7 +192,7 @@ static u64 unit_cons[PM_LASTUNIT+1][2] = { [PM_STS] = { 0x380000000000ull, 0x310000000000ull }, }; -static int p970_get_constraint(unsigned int event, u64 *maskp, u64 *valp) +static int p970_get_constraint(u64 event, u64 *maskp, u64 *valp) { int pmc, byte, unit, sh, spcsel; u64 mask = 0, value = 0; @@ -243,8 +243,7 @@ static int p970_get_constraint(unsigned int event, u64 *maskp, u64 *valp) return 0; } -static int p970_get_alternatives(unsigned int event, unsigned int flags, - unsigned int alt[]) +static int p970_get_alternatives(u64 event, unsigned int flags, u64 alt[]) { alt[0] = event; @@ -257,7 +256,7 @@ static int p970_get_alternatives(unsigned int event, unsigned int flags, return 1; } -static int p970_compute_mmcr(unsigned int event[], int n_ev, +static int p970_compute_mmcr(u64 event[], int n_ev, unsigned int hwc[], u64 mmcr[]) { u64 mmcr0 = 0, mmcr1 = 0, mmcra = 0; -- cgit v1.2.3 From 9d23a90a67261e73b2fcac04d8ca963c6b496afb Mon Sep 17 00:00:00 2001 From: Paul Mackerras Date: Thu, 14 May 2009 21:48:08 +1000 Subject: perf_counter: allow arch to supply event misc flags and instruction pointer At present the values we put in overflow events for the misc flags indicating processor mode and the instruction pointer are obtained using the standard user_mode() and instruction_pointer() functions. Those functions tell you where the performance monitor interrupt was taken, which might not be exactly where the counter overflow occurred, for example because interrupts were disabled at the point where the overflow occurred, or because the processor had many instructions in flight and chose to complete some more instructions beyond the one that caused the counter overflow. Some architectures (e.g. powerpc) can supply more precise information about where the counter overflow occurred and the processor mode at that point. This introduces new functions, perf_misc_flags() and perf_instruction_pointer(), which arch code can override to provide more precise information if available. They have default implementations which are identical to the existing code. This also adds a new misc flag value, PERF_EVENT_MISC_HYPERVISOR, for the case where a counter overflow occurred in the hypervisor. We encode the processor mode in the 2 bits previously used to indicate user or kernel mode; the values for user and kernel mode are unchanged and hypervisor mode is indicated by both bits being set. [ Impact: generalize perfcounter core facilities ] Signed-off-by: Paul Mackerras Acked-by: Peter Zijlstra Cc: Corey Ashford Cc: Arnaldo Carvalho de Melo LKML-Reference: <18956.1272.818511.561835@cargo.ozlabs.ibm.com> Signed-off-by: Ingo Molnar --- include/linux/perf_counter.h | 11 ++++++++++- kernel/perf_counter.c | 5 ++--- 2 files changed, 12 insertions(+), 4 deletions(-) diff --git a/include/linux/perf_counter.h b/include/linux/perf_counter.h index 004b6e162b9..c8c1dfc22c9 100644 --- a/include/linux/perf_counter.h +++ b/include/linux/perf_counter.h @@ -215,8 +215,11 @@ struct perf_counter_mmap_page { __u32 data_head; /* head in the data section */ }; +#define PERF_EVENT_MISC_CPUMODE_MASK (3 << 0) +#define PERF_EVENT_MISC_CPUMODE_UNKNOWN (0 << 0) #define PERF_EVENT_MISC_KERNEL (1 << 0) -#define PERF_EVENT_MISC_USER (1 << 1) +#define PERF_EVENT_MISC_USER (2 << 0) +#define PERF_EVENT_MISC_HYPERVISOR (3 << 0) #define PERF_EVENT_MISC_OVERFLOW (1 << 2) struct perf_event_header { @@ -596,6 +599,12 @@ extern int sysctl_perf_counter_mlock; extern void perf_counter_init(void); +#ifndef perf_misc_flags +#define perf_misc_flags(regs) (user_mode(regs) ? PERF_EVENT_MISC_USER : \ + PERF_EVENT_MISC_KERNEL) +#define perf_instruction_pointer(regs) instruction_pointer(regs) +#endif + #else static inline void perf_counter_task_sched_in(struct task_struct *task, int cpu) { } diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c index 728a595399b..57840a94b16 100644 --- a/kernel/perf_counter.c +++ b/kernel/perf_counter.c @@ -2042,11 +2042,10 @@ static void perf_counter_output(struct perf_counter *counter, header.size = sizeof(header); header.misc = PERF_EVENT_MISC_OVERFLOW; - header.misc |= user_mode(regs) ? - PERF_EVENT_MISC_USER : PERF_EVENT_MISC_KERNEL; + header.misc |= perf_misc_flags(regs); if (record_type & PERF_RECORD_IP) { - ip = instruction_pointer(regs); + ip = perf_instruction_pointer(regs); header.type |= PERF_RECORD_IP; header.size += sizeof(ip); } -- cgit v1.2.3 From 0bbd0d4be8d5d3676c126e06e3c75c16def00441 Mon Sep 17 00:00:00 2001 From: Paul Mackerras Date: Thu, 14 May 2009 13:31:48 +1000 Subject: perf_counter: powerpc: supply more precise information on counter overflow events This uses values from the MMCRA, SIAR and SDAR registers on powerpc to supply more precise information for overflow events, including a data address when PERF_RECORD_ADDR is specified. Since POWER6 uses different bit positions in MMCRA from earlier processors, this converts the struct power_pmu limited_pmc5_6 field, which only had 0/1 values, into a flags field and defines bit values for its previous use (PPMU_LIMITED_PMC5_6) and a new flag (PPMU_ALT_SIPR) to indicate that the processor uses the POWER6 bit positions rather than the earlier positions. It also adds definitions in reg.h for the new and old positions of the bit that indicates that the SIAR and SDAR values come from the same instruction. For the data address, the SDAR value is supplied if we are not doing instruction sampling. In that case there is no guarantee that the address given in the PERF_RECORD_ADDR subrecord will correspond to the instruction whose address is given in the PERF_RECORD_IP subrecord. If instruction sampling is enabled (e.g. because this counter is counting a marked instruction event), then we only supply the SDAR value for the PERF_RECORD_ADDR subrecord if it corresponds to the instruction whose address is in the PERF_RECORD_IP subrecord. Otherwise we supply 0. [ Impact: support more PMU hardware features on PowerPC ] Signed-off-by: Paul Mackerras Acked-by: Peter Zijlstra Cc: Corey Ashford Cc: Arnaldo Carvalho de Melo LKML-Reference: <18955.37028.48861.555309@drongo.ozlabs.ibm.com> Signed-off-by: Ingo Molnar --- arch/powerpc/include/asm/perf_counter.h | 14 +++++- arch/powerpc/include/asm/reg.h | 2 + arch/powerpc/kernel/perf_counter.c | 84 +++++++++++++++++++++++++++++++-- arch/powerpc/kernel/power5+-pmu.c | 2 +- arch/powerpc/kernel/power6-pmu.c | 2 +- 5 files changed, 97 insertions(+), 7 deletions(-) diff --git a/arch/powerpc/include/asm/perf_counter.h b/arch/powerpc/include/asm/perf_counter.h index ceea76a48e3..1c60f0ca792 100644 --- a/arch/powerpc/include/asm/perf_counter.h +++ b/arch/powerpc/include/asm/perf_counter.h @@ -30,13 +30,19 @@ struct power_pmu { u64 alt[]); void (*disable_pmc)(unsigned int pmc, u64 mmcr[]); int (*limited_pmc_event)(u64 event); - int limited_pmc5_6; /* PMC5 and PMC6 have limited function */ + u32 flags; int n_generic; int *generic_events; }; extern struct power_pmu *ppmu; +/* + * Values for power_pmu.flags + */ +#define PPMU_LIMITED_PMC5_6 1 /* PMC5/6 have limited function */ +#define PPMU_ALT_SIPR 2 /* uses alternate posn for SIPR/HV */ + /* * Values for flags to get_alternatives() */ @@ -44,6 +50,12 @@ extern struct power_pmu *ppmu; #define PPMU_LIMITED_PMC_REQD 2 /* have to put this on a limited PMC */ #define PPMU_ONLY_COUNT_RUN 4 /* only counting in run state */ +struct pt_regs; +extern unsigned long perf_misc_flags(struct pt_regs *regs); +#define perf_misc_flags(regs) perf_misc_flags(regs) + +extern unsigned long perf_instruction_pointer(struct pt_regs *regs); + /* * The power_pmu.get_constraint function returns a 64-bit value and * a 64-bit mask that express the constraints between this event and diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h index e8018d540e8..fb359b0a693 100644 --- a/arch/powerpc/include/asm/reg.h +++ b/arch/powerpc/include/asm/reg.h @@ -492,11 +492,13 @@ #define MMCR0_FCHV 0x00000001UL /* freeze conditions in hypervisor mode */ #define SPRN_MMCR1 798 #define SPRN_MMCRA 0x312 +#define MMCRA_SDSYNC 0x80000000UL /* SDAR synced with SIAR */ #define MMCRA_SIHV 0x10000000UL /* state of MSR HV when SIAR set */ #define MMCRA_SIPR 0x08000000UL /* state of MSR PR when SIAR set */ #define MMCRA_SLOT 0x07000000UL /* SLOT bits (37-39) */ #define MMCRA_SLOT_SHIFT 24 #define MMCRA_SAMPLE_ENABLE 0x00000001UL /* enable sampling */ +#define POWER6_MMCRA_SDSYNC 0x0000080000000000ULL /* SDAR/SIAR synced */ #define POWER6_MMCRA_SIHV 0x0000040000000000ULL #define POWER6_MMCRA_SIPR 0x0000020000000000ULL #define POWER6_MMCRA_THRM 0x00000020UL diff --git a/arch/powerpc/kernel/perf_counter.c b/arch/powerpc/kernel/perf_counter.c index 8d4cafc84b8..6baae5a5c33 100644 --- a/arch/powerpc/kernel/perf_counter.c +++ b/arch/powerpc/kernel/perf_counter.c @@ -17,6 +17,7 @@ #include #include #include +#include struct cpu_hw_counters { int n_counters; @@ -310,7 +311,8 @@ static void power_pmu_read(struct perf_counter *counter) */ static int is_limited_pmc(int pmcnum) { - return ppmu->limited_pmc5_6 && (pmcnum == 5 || pmcnum == 6); + return (ppmu->flags & PPMU_LIMITED_PMC5_6) + && (pmcnum == 5 || pmcnum == 6); } static void freeze_limited_counters(struct cpu_hw_counters *cpuhw, @@ -860,7 +862,7 @@ const struct pmu *hw_perf_counter_init(struct perf_counter *counter) * If this machine has limited counters, check whether this * event could go on a limited counter. */ - if (ppmu->limited_pmc5_6) { + if (ppmu->flags & PPMU_LIMITED_PMC5_6) { if (can_go_on_limited_pmc(counter, ev, flags)) { flags |= PPMU_LIMITED_PMC_OK; } else if (ppmu->limited_pmc_event(ev)) { @@ -933,6 +935,7 @@ static void record_and_restart(struct perf_counter *counter, long val, u64 period = counter->hw.irq_period; s64 prev, delta, left; int record = 0; + u64 addr, mmcra, sdsync; /* we don't have to worry about interrupts here */ prev = atomic64_read(&counter->hw.prev_count); @@ -963,8 +966,76 @@ static void record_and_restart(struct perf_counter *counter, long val, /* * Finally record data if requested. */ - if (record) - perf_counter_overflow(counter, nmi, regs, 0); + if (record) { + addr = 0; + if (counter->hw_event.record_type & PERF_RECORD_ADDR) { + /* + * The user wants a data address recorded. + * If we're not doing instruction sampling, + * give them the SDAR (sampled data address). + * If we are doing instruction sampling, then only + * give them the SDAR if it corresponds to the + * instruction pointed to by SIAR; this is indicated + * by the [POWER6_]MMCRA_SDSYNC bit in MMCRA. + */ + mmcra = regs->dsisr; + sdsync = (ppmu->flags & PPMU_ALT_SIPR) ? + POWER6_MMCRA_SDSYNC : MMCRA_SDSYNC; + if (!(mmcra & MMCRA_SAMPLE_ENABLE) || (mmcra & sdsync)) + addr = mfspr(SPRN_SDAR); + } + perf_counter_overflow(counter, nmi, regs, addr); + } +} + +/* + * Called from generic code to get the misc flags (i.e. processor mode) + * for an event. + */ +unsigned long perf_misc_flags(struct pt_regs *regs) +{ + unsigned long mmcra; + + if (TRAP(regs) != 0xf00) { + /* not a PMU interrupt */ + return user_mode(regs) ? PERF_EVENT_MISC_USER : + PERF_EVENT_MISC_KERNEL; + } + + mmcra = regs->dsisr; + if (ppmu->flags & PPMU_ALT_SIPR) { + if (mmcra & POWER6_MMCRA_SIHV) + return PERF_EVENT_MISC_HYPERVISOR; + return (mmcra & POWER6_MMCRA_SIPR) ? PERF_EVENT_MISC_USER : + PERF_EVENT_MISC_KERNEL; + } + if (mmcra & MMCRA_SIHV) + return PERF_EVENT_MISC_HYPERVISOR; + return (mmcra & MMCRA_SIPR) ? PERF_EVENT_MISC_USER : + PERF_EVENT_MISC_KERNEL; +} + +/* + * Called from generic code to get the instruction pointer + * for an event. + */ +unsigned long perf_instruction_pointer(struct pt_regs *regs) +{ + unsigned long mmcra; + unsigned long ip; + unsigned long slot; + + if (TRAP(regs) != 0xf00) + return regs->nip; /* not a PMU interrupt */ + + ip = mfspr(SPRN_SIAR); + mmcra = regs->dsisr; + if ((mmcra & MMCRA_SAMPLE_ENABLE) && !(ppmu->flags & PPMU_ALT_SIPR)) { + slot = (mmcra & MMCRA_SLOT) >> MMCRA_SLOT_SHIFT; + if (slot > 1) + ip += 4 * (slot - 1); + } + return ip; } /* @@ -983,6 +1054,11 @@ static void perf_counter_interrupt(struct pt_regs *regs) freeze_limited_counters(cpuhw, mfspr(SPRN_PMC5), mfspr(SPRN_PMC6)); + /* + * Overload regs->dsisr to store MMCRA so we only need to read it once. + */ + regs->dsisr = mfspr(SPRN_MMCRA); + /* * If interrupts were soft-disabled when this PMU interrupt * occurred, treat it as an NMI. diff --git a/arch/powerpc/kernel/power5+-pmu.c b/arch/powerpc/kernel/power5+-pmu.c index 3ac0654372a..c6cdfc165d6 100644 --- a/arch/powerpc/kernel/power5+-pmu.c +++ b/arch/powerpc/kernel/power5+-pmu.c @@ -625,6 +625,6 @@ struct power_pmu power5p_pmu = { .disable_pmc = power5p_disable_pmc, .n_generic = ARRAY_SIZE(power5p_generic_events), .generic_events = power5p_generic_events, - .limited_pmc5_6 = 1, + .flags = PPMU_LIMITED_PMC5_6, .limited_pmc_event = power5p_limited_pmc_event, }; diff --git a/arch/powerpc/kernel/power6-pmu.c b/arch/powerpc/kernel/power6-pmu.c index ab7c615c458..cd4fbe06c35 100644 --- a/arch/powerpc/kernel/power6-pmu.c +++ b/arch/powerpc/kernel/power6-pmu.c @@ -485,6 +485,6 @@ struct power_pmu power6_pmu = { .disable_pmc = p6_disable_pmc, .n_generic = ARRAY_SIZE(power6_generic_events), .generic_events = power6_generic_events, - .limited_pmc5_6 = 1, + .flags = PPMU_LIMITED_PMC5_6 | PPMU_ALT_SIPR, .limited_pmc_event = p6_limited_pmc_event, }; -- cgit v1.2.3 From 8bc2095951517e2c74b8aeeca4685ddd6b16ed4b Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Fri, 15 May 2009 20:45:59 +0200 Subject: perf_counter: Fix inheritance cleanup code Clean up code that open-coded the list_{add,del}_counter() code in __perf_counter_exit_task() which consequently diverged. This could lead to software counter crashes. Also, fold the ctx->nr_counter inc/dec into those functions and clean up some of the related code. [ Impact: fix potential sw counter crash, cleanup ] Signed-off-by: Peter Zijlstra Cc: Srivatsa Vaddagiri Cc: Paul Mackerras Cc: Corey Ashford Cc: Arnaldo Carvalho de Melo Cc: Marcelo Tosatti Signed-off-by: Ingo Molnar Signed-off-by: Ingo Molnar --- kernel/perf_counter.c | 32 +++++++++++++++----------------- 1 file changed, 15 insertions(+), 17 deletions(-) diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c index 57840a94b16..59a926d04ba 100644 --- a/kernel/perf_counter.c +++ b/kernel/perf_counter.c @@ -115,6 +115,7 @@ list_add_counter(struct perf_counter *counter, struct perf_counter_context *ctx) } list_add_rcu(&counter->event_entry, &ctx->event_list); + ctx->nr_counters++; } static void @@ -122,6 +123,8 @@ list_del_counter(struct perf_counter *counter, struct perf_counter_context *ctx) { struct perf_counter *sibling, *tmp; + ctx->nr_counters--; + list_del_init(&counter->list_entry); list_del_rcu(&counter->event_entry); @@ -209,7 +212,6 @@ static void __perf_counter_remove_from_context(void *info) counter_sched_out(counter, cpuctx, ctx); counter->task = NULL; - ctx->nr_counters--; /* * Protect the list operation against NMI by disabling the @@ -276,7 +278,6 @@ retry: * succeed. */ if (!list_empty(&counter->list_entry)) { - ctx->nr_counters--; list_del_counter(counter, ctx); counter->task = NULL; } @@ -544,7 +545,6 @@ static void add_counter_to_ctx(struct perf_counter *counter, struct perf_counter_context *ctx) { list_add_counter(counter, ctx); - ctx->nr_counters++; counter->prev_state = PERF_COUNTER_STATE_OFF; counter->tstamp_enabled = ctx->time; counter->tstamp_running = ctx->time; @@ -3206,9 +3206,8 @@ static int inherit_group(struct perf_counter *parent_counter, static void sync_child_counter(struct perf_counter *child_counter, struct perf_counter *parent_counter) { - u64 parent_val, child_val; + u64 child_val; - parent_val = atomic64_read(&parent_counter->count); child_val = atomic64_read(&child_counter->count); /* @@ -3240,7 +3239,6 @@ __perf_counter_exit_task(struct task_struct *child, struct perf_counter_context *child_ctx) { struct perf_counter *parent_counter; - struct perf_counter *sub, *tmp; /* * If we do not self-reap then we have to wait for the @@ -3252,8 +3250,8 @@ __perf_counter_exit_task(struct task_struct *child, */ if (child != current) { wait_task_inactive(child, 0); - list_del_init(&child_counter->list_entry); update_counter_times(child_counter); + list_del_counter(child_counter, child_ctx); } else { struct perf_cpu_context *cpuctx; unsigned long flags; @@ -3272,9 +3270,7 @@ __perf_counter_exit_task(struct task_struct *child, group_sched_out(child_counter, cpuctx, child_ctx); update_counter_times(child_counter); - list_del_init(&child_counter->list_entry); - - child_ctx->nr_counters--; + list_del_counter(child_counter, child_ctx); perf_enable(); local_irq_restore(flags); @@ -3288,13 +3284,6 @@ __perf_counter_exit_task(struct task_struct *child, */ if (parent_counter) { sync_child_counter(child_counter, parent_counter); - list_for_each_entry_safe(sub, tmp, &child_counter->sibling_list, - list_entry) { - if (sub->parent) { - sync_child_counter(sub, sub->parent); - free_counter(sub); - } - } free_counter(child_counter); } } @@ -3315,9 +3304,18 @@ void perf_counter_exit_task(struct task_struct *child) if (likely(!child_ctx->nr_counters)) return; +again: list_for_each_entry_safe(child_counter, tmp, &child_ctx->counter_list, list_entry) __perf_counter_exit_task(child, child_counter, child_ctx); + + /* + * If the last counter was a group counter, it will have appended all + * its siblings to the list, but we obtained 'tmp' before that which + * will still point to the list head terminating the iteration. + */ + if (!list_empty(&child_ctx->counter_list)) + goto again; } /* -- cgit v1.2.3 From 856d56b9e5de650a64a6c41c17aaed702b55d578 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Fri, 15 May 2009 20:45:59 +0200 Subject: perf_counter: Fix counter inheritance Srivatsa Vaddagiri reported that a Java workload triggers this warning in kernel/exit.c: WARN_ON_ONCE(!list_empty(&tsk->perf_counter_ctx.counter_list)); Add the inherited counter propagation on self-detach, this could cause counter leaks and incomplete stats in threaded code like the below: #include #include void *thread(void *arg) { sleep(5); return NULL; } void main(void) { pthread_t thr; pthread_create(&thr, NULL, thread, NULL); } Reported-by: Srivatsa Vaddagiri Signed-off-by: Peter Zijlstra Cc: Paul Mackerras Cc: Corey Ashford Cc: Arnaldo Carvalho de Melo Cc: Marcelo Tosatti Signed-off-by: Ingo Molnar --- kernel/exit.c | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/kernel/exit.c b/kernel/exit.c index 4741376c8de..16d74f13a3e 100644 --- a/kernel/exit.c +++ b/kernel/exit.c @@ -128,6 +128,12 @@ static void __exit_signal(struct task_struct *tsk) sig = NULL; /* Marker for below. */ } + /* + * Flush inherited counters to the parent - before the parent + * gets woken up by child-exit notifications. + */ + perf_counter_exit_task(tsk); + __unhash_process(tsk); /* -- cgit v1.2.3 From 0203026b58b4299ba7281c0b4b417207c1f05d0e Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Sun, 17 May 2009 11:24:08 +0200 Subject: perf_counter: fix threaded task exit Flushing counters in __exit_signal() with irqs disabled is not a good idea as perf_counter_exit_task() acquires mutexes. So flush it before acquiring the tasklist lock. (Note, we still need a fix for when the PID has been unhashed.) [ Impact: fix crash with inherited counters ] Cc: Peter Zijlstra Cc: Srivatsa Vaddagiri Cc: Paul Mackerras Cc: Corey Ashford Cc: Arnaldo Carvalho de Melo Cc: Marcelo Tosatti Signed-off-by: Ingo Molnar --- kernel/exit.c | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/kernel/exit.c b/kernel/exit.c index 16d74f13a3e..73affd35e76 100644 --- a/kernel/exit.c +++ b/kernel/exit.c @@ -128,12 +128,6 @@ static void __exit_signal(struct task_struct *tsk) sig = NULL; /* Marker for below. */ } - /* - * Flush inherited counters to the parent - before the parent - * gets woken up by child-exit notifications. - */ - perf_counter_exit_task(tsk); - __unhash_process(tsk); /* @@ -183,6 +177,13 @@ repeat: atomic_dec(&__task_cred(p)->user->processes); proc_flush_task(p); + + /* + * Flush inherited counters to the parent - before the parent + * gets woken up by child-exit notifications. + */ + perf_counter_exit_task(p); + write_lock_irq(&tasklist_lock); tracehook_finish_release_task(p); __exit_signal(p); -- cgit v1.2.3 From d2517a49d55536b38c7a87e5289550cfedaa4dcc Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Sun, 17 May 2009 10:04:45 +0200 Subject: perf_counter, x86: fix zero irq_period counters The quirk to irq_period unearthed an unrobustness we had in the hw_counter initialization sequence: we left irq_period at 0, which was then quirked up to 2 ... which then generated a _lot_ of interrupts during 'perf stat' runs, slowed them down and skewed the counter results in general. Initialize irq_period to the maximum instead. [ Impact: fix perf stat results ] Cc: Peter Zijlstra Cc: Srivatsa Vaddagiri Cc: Paul Mackerras Cc: Corey Ashford Cc: Arnaldo Carvalho de Melo Cc: Marcelo Tosatti Signed-off-by: Ingo Molnar --- arch/x86/kernel/cpu/perf_counter.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/arch/x86/kernel/cpu/perf_counter.c b/arch/x86/kernel/cpu/perf_counter.c index 886dcf334bc..5bfd30ab392 100644 --- a/arch/x86/kernel/cpu/perf_counter.c +++ b/arch/x86/kernel/cpu/perf_counter.c @@ -286,6 +286,9 @@ static int __hw_perf_counter_init(struct perf_counter *counter) hwc->nmi = 1; } + if (!hwc->irq_period) + hwc->irq_period = x86_pmu.max_period; + atomic64_set(&hwc->period_left, min(x86_pmu.max_period, hwc->irq_period)); -- cgit v1.2.3 From c0daaf3f1f672defa3a45ca449b76d0e86c55892 Mon Sep 17 00:00:00 2001 From: Paul Mackerras Date: Mon, 18 May 2009 14:02:12 +1000 Subject: perf_counter: powerpc: initialize cpuhw pointer before use Commit 9e35ad38 ("perf_counter: Rework the perf counter disable/enable") added code to the powerpc hw_perf_enable (renamed from hw_perf_restore) to test cpuhw->disabled and return immediately if it is not set (i.e. if the PMU is already enabled). Unfortunately the test got added before cpuhw was initialized, resulting in an oops the first time hw_perf_enable got called. This fixes it by moving the initialization of cpuhw to before cpuhw->disabled is tested. [ Impact: fix oops-causing bug on powerpc ] Signed-off-by: Paul Mackerras Cc: Peter Zijlstra Cc: Corey Ashford LKML-Reference: <18960.56772.869734.304631@drongo.ozlabs.ibm.com> Signed-off-by: Ingo Molnar --- arch/powerpc/kernel/perf_counter.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/arch/powerpc/kernel/perf_counter.c b/arch/powerpc/kernel/perf_counter.c index 6baae5a5c33..fe21b2440f2 100644 --- a/arch/powerpc/kernel/perf_counter.c +++ b/arch/powerpc/kernel/perf_counter.c @@ -450,12 +450,11 @@ void hw_perf_enable(void) int idx; local_irq_save(flags); + cpuhw = &__get_cpu_var(cpu_hw_counters); if (!cpuhw->disabled) { local_irq_restore(flags); return; } - - cpuhw = &__get_cpu_var(cpu_hw_counters); cpuhw->disabled = 0; /* -- cgit v1.2.3 From b68f1d2e7aa21029d73c7d453a8046e95d351740 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Sun, 17 May 2009 19:37:25 +0200 Subject: perf_counter, x86: speed up the scheduling fast-path We have to set up the LVT entry only at counter init time, not at every switch-in time. There's friction between NMI and non-NMI use here - we'll probably remove the per counter configurability of it - but until then, dont slow down things ... [ Impact: micro-optimization ] Cc: Peter Zijlstra Cc: Srivatsa Vaddagiri Cc: Paul Mackerras Cc: Corey Ashford Cc: Arnaldo Carvalho de Melo Cc: Marcelo Tosatti LKML-Reference: Signed-off-by: Ingo Molnar --- arch/x86/kernel/cpu/perf_counter.c | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/arch/x86/kernel/cpu/perf_counter.c b/arch/x86/kernel/cpu/perf_counter.c index 5bfd30ab392..c109819c2cb 100644 --- a/arch/x86/kernel/cpu/perf_counter.c +++ b/arch/x86/kernel/cpu/perf_counter.c @@ -285,6 +285,7 @@ static int __hw_perf_counter_init(struct perf_counter *counter) return -EACCES; hwc->nmi = 1; } + perf_counters_lapic_init(hwc->nmi); if (!hwc->irq_period) hwc->irq_period = x86_pmu.max_period; @@ -603,8 +604,6 @@ try_generic: hwc->counter_base = x86_pmu.perfctr; } - perf_counters_lapic_init(hwc->nmi); - x86_pmu.disable(hwc, idx); cpuc->counters[idx] = counter; @@ -1054,7 +1053,7 @@ void __init init_hw_perf_counters(void) pr_info("... counter mask: %016Lx\n", perf_counter_mask); - perf_counters_lapic_init(0); + perf_counters_lapic_init(1); register_die_notifier(&perf_counter_nmi_notifier); } -- cgit v1.2.3 From 33b2fb303fe7f6b08bbb32f708e67b96eaa94a7a Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Sun, 17 May 2009 11:08:41 +0200 Subject: perf_counter: fix counter freeing logic Fix counter lifetime bugs which explain the crashes reported by Marcelo Tosatti and Arnaldo Carvalho de Melo. The new rule is: flushing + freeing is only done for a task's own counters, never for other tasks. [ Impact: fix crashes/lockups with inherited counters ] Reported-by: Arnaldo Carvalho de Melo Reported-by: Marcelo Tosatti Acked-by: Peter Zijlstra Cc: Paul Mackerras Cc: Corey Ashford Signed-off-by: Ingo Molnar --- kernel/exit.c | 19 +++++++------------ kernel/perf_counter.c | 2 ++ 2 files changed, 9 insertions(+), 12 deletions(-) diff --git a/kernel/exit.c b/kernel/exit.c index 73affd35e76..f9dfedd94af 100644 --- a/kernel/exit.c +++ b/kernel/exit.c @@ -178,12 +178,6 @@ repeat: proc_flush_task(p); - /* - * Flush inherited counters to the parent - before the parent - * gets woken up by child-exit notifications. - */ - perf_counter_exit_task(p); - write_lock_irq(&tasklist_lock); tracehook_finish_release_task(p); __exit_signal(p); @@ -985,6 +979,13 @@ NORET_TYPE void do_exit(long code) module_put(tsk->binfmt->module); proc_exit_connector(tsk); + + /* + * Flush inherited counters to the parent - before the parent + * gets woken up by child-exit notifications. + */ + perf_counter_exit_task(tsk); + exit_notify(tsk, group_dead); #ifdef CONFIG_NUMA mpol_put(tsk->mempolicy); @@ -1257,12 +1258,6 @@ static int wait_task_zombie(struct task_struct *p, int options, */ read_unlock(&tasklist_lock); - /* - * Flush inherited counters to the parent - before the parent - * gets woken up by child-exit notifications. - */ - perf_counter_exit_task(p); - retval = ru ? getrusage(p, RUSAGE_BOTH, ru) : 0; status = (p->signal->flags & SIGNAL_GROUP_EXIT) ? p->signal->group_exit_code : p->exit_code; diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c index 59a926d04ba..7af16d1c480 100644 --- a/kernel/perf_counter.c +++ b/kernel/perf_counter.c @@ -3299,6 +3299,8 @@ void perf_counter_exit_task(struct task_struct *child) struct perf_counter *child_counter, *tmp; struct perf_counter_context *child_ctx; + WARN_ON_ONCE(child != current); + child_ctx = &child->perf_counter_ctx; if (likely(!child_ctx->nr_counters)) -- cgit v1.2.3 From c44d70a340554a33071339064a303ac0f1a31623 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Sun, 17 May 2009 11:24:08 +0200 Subject: perf_counter: fix counter inheritance race Context rotation should not occur when we are in the middle of walking the counter list when inheriting counters ... [ Impact: fix occasionally incorrect perf stat results ] Cc: Peter Zijlstra Cc: Paul Mackerras Cc: Corey Ashford Cc: Arnaldo Carvalho de Melo Cc: Marcelo Tosatti Signed-off-by: Ingo Molnar --- include/linux/perf_counter.h | 1 + kernel/perf_counter.c | 10 +++++++++- 2 files changed, 10 insertions(+), 1 deletion(-) diff --git a/include/linux/perf_counter.h b/include/linux/perf_counter.h index c8c1dfc22c9..13cb2fbbf33 100644 --- a/include/linux/perf_counter.h +++ b/include/linux/perf_counter.h @@ -508,6 +508,7 @@ struct perf_counter_context { int nr_counters; int nr_active; int is_active; + int rr_allowed; struct task_struct *task; /* diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c index 7af16d1c480..4d8f97375f3 100644 --- a/kernel/perf_counter.c +++ b/kernel/perf_counter.c @@ -1120,7 +1120,8 @@ void perf_counter_task_tick(struct task_struct *curr, int cpu) __perf_counter_task_sched_out(ctx); rotate_ctx(&cpuctx->ctx); - rotate_ctx(ctx); + if (ctx->rr_allowed) + rotate_ctx(ctx); perf_counter_cpu_sched_in(cpuctx, cpu); perf_counter_task_sched_in(curr, cpu); @@ -3108,6 +3109,7 @@ __perf_counter_init_context(struct perf_counter_context *ctx, mutex_init(&ctx->mutex); INIT_LIST_HEAD(&ctx->counter_list); INIT_LIST_HEAD(&ctx->event_list); + ctx->rr_allowed = 1; ctx->task = task; } @@ -3348,6 +3350,9 @@ void perf_counter_init_task(struct task_struct *child) */ mutex_lock(&parent_ctx->mutex); + parent_ctx->rr_allowed = 0; + barrier(); /* irqs */ + /* * We dont have to disable NMIs - we are only looking at * the list, not manipulating it: @@ -3361,6 +3366,9 @@ void perf_counter_init_task(struct task_struct *child) break; } + barrier(); /* irqs */ + parent_ctx->rr_allowed = 1; + mutex_unlock(&parent_ctx->mutex); } -- cgit v1.2.3 From d7b629a34fc4134a43c730b5f0197855dc4948d0 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Wed, 20 May 2009 12:21:19 +0200 Subject: perf_counter: Solve the rotate_ctx vs inherit race differently Instead of disabling RR scheduling of the counters, use a different list that does not get rotated to iterate the counters on inheritance. [ Impact: cleanup, optimization ] Signed-off-by: Peter Zijlstra Cc: Paul Mackerras Cc: Corey Ashford Cc: Peter Zijlstra Cc: Arnaldo Carvalho de Melo Cc: Thomas Gleixner Cc: Marcelo Tosatti Cc: John Kacur LKML-Reference: <20090520102553.237504544@chello.nl> Signed-off-by: Ingo Molnar --- include/linux/perf_counter.h | 1 - kernel/perf_counter.c | 15 +++++---------- 2 files changed, 5 insertions(+), 11 deletions(-) diff --git a/include/linux/perf_counter.h b/include/linux/perf_counter.h index 13cb2fbbf33..c8c1dfc22c9 100644 --- a/include/linux/perf_counter.h +++ b/include/linux/perf_counter.h @@ -508,7 +508,6 @@ struct perf_counter_context { int nr_counters; int nr_active; int is_active; - int rr_allowed; struct task_struct *task; /* diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c index 4d8f97375f3..64113e6d194 100644 --- a/kernel/perf_counter.c +++ b/kernel/perf_counter.c @@ -1120,8 +1120,7 @@ void perf_counter_task_tick(struct task_struct *curr, int cpu) __perf_counter_task_sched_out(ctx); rotate_ctx(&cpuctx->ctx); - if (ctx->rr_allowed) - rotate_ctx(ctx); + rotate_ctx(ctx); perf_counter_cpu_sched_in(cpuctx, cpu); perf_counter_task_sched_in(curr, cpu); @@ -3109,7 +3108,6 @@ __perf_counter_init_context(struct perf_counter_context *ctx, mutex_init(&ctx->mutex); INIT_LIST_HEAD(&ctx->counter_list); INIT_LIST_HEAD(&ctx->event_list); - ctx->rr_allowed = 1; ctx->task = task; } @@ -3350,14 +3348,14 @@ void perf_counter_init_task(struct task_struct *child) */ mutex_lock(&parent_ctx->mutex); - parent_ctx->rr_allowed = 0; - barrier(); /* irqs */ - /* * We dont have to disable NMIs - we are only looking at * the list, not manipulating it: */ - list_for_each_entry(counter, &parent_ctx->counter_list, list_entry) { + list_for_each_entry_rcu(counter, &parent_ctx->event_list, event_entry) { + if (counter != counter->group_leader) + continue; + if (!counter->hw_event.inherit) continue; @@ -3366,9 +3364,6 @@ void perf_counter_init_task(struct task_struct *child) break; } - barrier(); /* irqs */ - parent_ctx->rr_allowed = 1; - mutex_unlock(&parent_ctx->mutex); } -- cgit v1.2.3 From 26b119bc811a73bac6ecf95bdf284bf31c7955f0 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Wed, 20 May 2009 12:21:20 +0200 Subject: perf_counter: Log irq_period changes For the dynamic irq_period code, log whenever we change the period so that analyzing code can normalize the event flow. [ Impact: add new feature to allow more precise profiling ] Signed-off-by: Peter Zijlstra Cc: Paul Mackerras Cc: Corey Ashford Cc: Peter Zijlstra Cc: Arnaldo Carvalho de Melo Cc: Thomas Gleixner Cc: Marcelo Tosatti Cc: John Kacur LKML-Reference: <20090520102553.298769743@chello.nl> Signed-off-by: Ingo Molnar --- include/linux/perf_counter.h | 8 ++++++++ kernel/perf_counter.c | 40 +++++++++++++++++++++++++++++++++++++++- 2 files changed, 47 insertions(+), 1 deletion(-) diff --git a/include/linux/perf_counter.h b/include/linux/perf_counter.h index c8c1dfc22c9..f612941ef46 100644 --- a/include/linux/perf_counter.h +++ b/include/linux/perf_counter.h @@ -257,6 +257,14 @@ enum perf_event_type { */ PERF_EVENT_COMM = 3, + /* + * struct { + * struct perf_event_header header; + * u64 irq_period; + * }; + */ + PERF_EVENT_PERIOD = 4, + /* * When header.misc & PERF_EVENT_MISC_OVERFLOW the event_type field * will be PERF_RECORD_* diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c index 64113e6d194..db02eb16c77 100644 --- a/kernel/perf_counter.c +++ b/kernel/perf_counter.c @@ -1046,7 +1046,9 @@ int perf_counter_task_enable(void) return 0; } -void perf_adjust_freq(struct perf_counter_context *ctx) +static void perf_log_period(struct perf_counter *counter, u64 period); + +static void perf_adjust_freq(struct perf_counter_context *ctx) { struct perf_counter *counter; u64 irq_period; @@ -1072,6 +1074,8 @@ void perf_adjust_freq(struct perf_counter_context *ctx) if (!irq_period) irq_period = 1; + perf_log_period(counter, irq_period); + counter->hw.irq_period = irq_period; counter->hw.interrupts = 0; } @@ -2406,6 +2410,40 @@ void perf_counter_munmap(unsigned long addr, unsigned long len, perf_counter_mmap_event(&mmap_event); } +/* + * + */ + +static void perf_log_period(struct perf_counter *counter, u64 period) +{ + struct perf_output_handle handle; + int ret; + + struct { + struct perf_event_header header; + u64 time; + u64 period; + } freq_event = { + .header = { + .type = PERF_EVENT_PERIOD, + .misc = 0, + .size = sizeof(freq_event), + }, + .time = sched_clock(), + .period = period, + }; + + if (counter->hw.irq_period == period) + return; + + ret = perf_output_begin(&handle, counter, sizeof(freq_event), 0, 0); + if (ret) + return; + + perf_output_put(&handle, freq_event); + perf_output_end(&handle); +} + /* * Generic counter overflow handling. */ -- cgit v1.2.3 From b986d7ec0f8b7ea3cc7366d80a137fbe839df227 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Wed, 20 May 2009 12:21:21 +0200 Subject: perf_counter: Optimize disable of time based sw counters Currently we call hrtimer_cancel() unconditionally on disable of time based software counters. Avoid when possible. [ Impact: micro-optimize the code ] Signed-off-by: Peter Zijlstra Cc: Paul Mackerras Cc: Corey Ashford Cc: Peter Zijlstra Cc: Arnaldo Carvalho de Melo Cc: Thomas Gleixner Cc: Marcelo Tosatti Cc: John Kacur LKML-Reference: <20090520102553.388185031@chello.nl> Signed-off-by: Ingo Molnar --- kernel/perf_counter.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c index db02eb16c77..473ed2cafbf 100644 --- a/kernel/perf_counter.c +++ b/kernel/perf_counter.c @@ -2716,7 +2716,8 @@ static int cpu_clock_perf_counter_enable(struct perf_counter *counter) static void cpu_clock_perf_counter_disable(struct perf_counter *counter) { - hrtimer_cancel(&counter->hw.hrtimer); + if (counter->hw.irq_period) + hrtimer_cancel(&counter->hw.hrtimer); cpu_clock_perf_counter_update(counter); } @@ -2767,7 +2768,8 @@ static int task_clock_perf_counter_enable(struct perf_counter *counter) static void task_clock_perf_counter_disable(struct perf_counter *counter) { - hrtimer_cancel(&counter->hw.hrtimer); + if (counter->hw.irq_period) + hrtimer_cancel(&counter->hw.hrtimer); task_clock_perf_counter_update(counter, counter->ctx->time); } -- cgit v1.2.3 From afedadf23a2c90f3ba0d963282cbe6a6be129494 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Wed, 20 May 2009 12:21:22 +0200 Subject: perf_counter: Optimize sched in/out of counters Avoid a function call for !group counters by directly calling the counter function. [ Impact: micro-optimize the code ] Signed-off-by: Peter Zijlstra Cc: Paul Mackerras Cc: Corey Ashford Cc: Peter Zijlstra Cc: Arnaldo Carvalho de Melo Cc: Marcelo Tosatti Cc: John Kacur LKML-Reference: <20090520102553.511933670@chello.nl> Signed-off-by: Ingo Molnar --- kernel/perf_counter.c | 25 +++++++++++++++++++------ 1 file changed, 19 insertions(+), 6 deletions(-) diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c index 473ed2cafbf..69d4de81596 100644 --- a/kernel/perf_counter.c +++ b/kernel/perf_counter.c @@ -826,8 +826,12 @@ void __perf_counter_sched_out(struct perf_counter_context *ctx, perf_disable(); if (ctx->nr_active) { - list_for_each_entry(counter, &ctx->counter_list, list_entry) - group_sched_out(counter, cpuctx, ctx); + list_for_each_entry(counter, &ctx->counter_list, list_entry) { + if (counter != counter->group_leader) + counter_sched_out(counter, cpuctx, ctx); + else + group_sched_out(counter, cpuctx, ctx); + } } perf_enable(); out: @@ -903,8 +907,12 @@ __perf_counter_sched_in(struct perf_counter_context *ctx, if (counter->cpu != -1 && counter->cpu != cpu) continue; - if (group_can_go_on(counter, cpuctx, 1)) - group_sched_in(counter, cpuctx, ctx, cpu); + if (counter != counter->group_leader) + counter_sched_in(counter, cpuctx, ctx, cpu); + else { + if (group_can_go_on(counter, cpuctx, 1)) + group_sched_in(counter, cpuctx, ctx, cpu); + } /* * If this pinned group hasn't been scheduled, @@ -932,9 +940,14 @@ __perf_counter_sched_in(struct perf_counter_context *ctx, if (counter->cpu != -1 && counter->cpu != cpu) continue; - if (group_can_go_on(counter, cpuctx, can_add_hw)) { - if (group_sched_in(counter, cpuctx, ctx, cpu)) + if (counter != counter->group_leader) { + if (counter_sched_in(counter, cpuctx, ctx, cpu)) can_add_hw = 0; + } else { + if (group_can_go_on(counter, cpuctx, can_add_hw)) { + if (group_sched_in(counter, cpuctx, ctx, cpu)) + can_add_hw = 0; + } } } perf_enable(); -- cgit v1.2.3 From 34adc8062227f41b04ade0ff3fbd1dbe3002669e Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Wed, 20 May 2009 20:13:28 +0200 Subject: perf_counter: Fix context removal deadlock Disable the PMU globally before removing a counter from a context. This fixes the following lockup: [22081.741922] ------------[ cut here ]------------ [22081.746668] WARNING: at arch/x86/kernel/cpu/perf_counter.c:803 intel_pmu_handle_irq+0x9b/0x24e() [22081.755624] Hardware name: X8DTN [22081.758903] perfcounters: irq loop stuck! [22081.762985] Modules linked in: [22081.766136] Pid: 11082, comm: perf Not tainted 2.6.30-rc6-tip #226 [22081.772432] Call Trace: [22081.774940] [] ? intel_pmu_handle_irq+0x9b/0x24e [22081.781993] [] ? intel_pmu_handle_irq+0x9b/0x24e [22081.788368] [] ? warn_slowpath_common+0x77/0xa3 [22081.794649] [] ? warn_slowpath_fmt+0x40/0x45 [22081.800696] [] ? intel_pmu_handle_irq+0x9b/0x24e [22081.807080] [] ? perf_counter_nmi_handler+0x3f/0x4a [22081.813751] [] ? notifier_call_chain+0x58/0x86 [22081.819951] [] ? notify_die+0x2d/0x32 [22081.825392] [] ? do_nmi+0x8e/0x242 [22081.830538] [] ? nmi+0x1a/0x20 [22081.835342] [] ? selinux_file_free_security+0x0/0x1a [22081.842105] [] ? x86_pmu_disable_counter+0x15/0x41 [22081.848673] <> [] ? x86_pmu_disable+0x86/0x103 [22081.855512] [] ? __perf_counter_remove_from_context+0x0/0xfe [22081.862926] [] ? counter_sched_out+0x30/0xce [22081.868909] [] ? __perf_counter_remove_from_context+0x59/0xfe [22081.876382] [] ? smp_call_function_single+0x6c/0xe6 [22081.882955] [] ? perf_release+0x86/0x14c [22081.888600] [] ? __fput+0xe7/0x195 [22081.893718] [] ? filp_close+0x5b/0x62 [22081.899107] [] ? put_files_struct+0x64/0xc2 [22081.905031] [] ? do_exit+0x1e2/0x6ef [22081.910360] [] ? _spin_lock_irqsave+0x9/0xe [22081.916292] [] ? do_group_exit+0x67/0x93 [22081.921953] [] ? sys_exit_group+0x12/0x16 [22081.927759] [] ? system_call_fastpath+0x16/0x1b [22081.934076] ---[ end trace 3a3936ce3e1b4505 ]--- And could potentially also fix the lockup reported by Marcelo Tosatti. Also, print more debug info in case of a detected lockup. [ Impact: fix lockup ] Reported-by: Marcelo Tosatti Acked-by: Peter Zijlstra Cc: Paul Mackerras Cc: Corey Ashford Cc: Thomas Gleixner LKML-Reference: Signed-off-by: Ingo Molnar --- arch/x86/kernel/cpu/perf_counter.c | 1 + kernel/perf_counter.c | 12 ++++++------ 2 files changed, 7 insertions(+), 6 deletions(-) diff --git a/arch/x86/kernel/cpu/perf_counter.c b/arch/x86/kernel/cpu/perf_counter.c index c109819c2cb..6cc1660db8d 100644 --- a/arch/x86/kernel/cpu/perf_counter.c +++ b/arch/x86/kernel/cpu/perf_counter.c @@ -740,6 +740,7 @@ static int intel_pmu_handle_irq(struct pt_regs *regs, int nmi) again: if (++loops > 100) { WARN_ONCE(1, "perfcounters: irq loop stuck!\n"); + perf_counter_print_debug(); return 1; } diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c index 69d4de81596..08584c16049 100644 --- a/kernel/perf_counter.c +++ b/kernel/perf_counter.c @@ -208,18 +208,17 @@ static void __perf_counter_remove_from_context(void *info) return; spin_lock_irqsave(&ctx->lock, flags); + /* + * Protect the list operation against NMI by disabling the + * counters on a global level. + */ + perf_disable(); counter_sched_out(counter, cpuctx, ctx); counter->task = NULL; - /* - * Protect the list operation against NMI by disabling the - * counters on a global level. NOP for non NMI based counters. - */ - perf_disable(); list_del_counter(counter, ctx); - perf_enable(); if (!ctx->task) { /* @@ -231,6 +230,7 @@ static void __perf_counter_remove_from_context(void *info) perf_max_counters - perf_reserved_percpu); } + perf_enable(); spin_unlock_irqrestore(&ctx->lock, flags); } -- cgit v1.2.3 From a63eaf34ae60bdb067a354cc8def2e8f4a01f5f4 Mon Sep 17 00:00:00 2001 From: Paul Mackerras Date: Fri, 22 May 2009 14:17:31 +1000 Subject: perf_counter: Dynamically allocate tasks' perf_counter_context struct This replaces the struct perf_counter_context in the task_struct with a pointer to a dynamically allocated perf_counter_context struct. The main reason for doing is this is to allow us to transfer a perf_counter_context from one task to another when we do lazy PMU switching in a later patch. This has a few side-benefits: the task_struct becomes a little smaller, we save some memory because only tasks that have perf_counters attached get a perf_counter_context allocated for them, and we can remove the inclusion of in sched.h, meaning that we don't end up recompiling nearly everything whenever perf_counter.h changes. The perf_counter_context structures are reference-counted and freed when the last reference is dropped. A context can have references from its task and the counters on its task. Counters can outlive the task so it is possible that a context will be freed well after its task has exited. Contexts are allocated on fork if the parent had a context, or otherwise the first time that a per-task counter is created on a task. In the latter case, we set the context pointer in the task struct locklessly using an atomic compare-and-exchange operation in case we raced with some other task in creating a context for the subject task. This also removes the task pointer from the perf_counter struct. The task pointer was not used anywhere and would make it harder to move a context from one task to another. Anything that needed to know which task a counter was attached to was already using counter->ctx->task. The __perf_counter_init_context function moves up in perf_counter.c so that it can be called from find_get_context, and now initializes the refcount, but is otherwise unchanged. We were potentially calling list_del_counter twice: once from __perf_counter_exit_task when the task exits and once from __perf_counter_remove_from_context when the counter's fd gets closed. This adds a check in list_del_counter so it doesn't do anything if the counter has already been removed from the lists. Since perf_counter_task_sched_in doesn't do anything if the task doesn't have a context, and leaves cpuctx->task_ctx = NULL, this adds code to __perf_install_in_context to set cpuctx->task_ctx if necessary, i.e. in the case where the current task adds the first counter to itself and thus creates a context for itself. This also adds similar code to __perf_counter_enable to handle a similar situation which can arise when the counters have been disabled using prctl; that also leaves cpuctx->task_ctx = NULL. [ Impact: refactor counter context management to prepare for new feature ] Signed-off-by: Paul Mackerras Acked-by: Peter Zijlstra Cc: Corey Ashford Cc: Marcelo Tosatti Cc: Arnaldo Carvalho de Melo LKML-Reference: <18966.10075.781053.231153@cargo.ozlabs.ibm.com> Signed-off-by: Ingo Molnar --- arch/x86/kernel/apic/apic.c | 1 + include/linux/init_task.h | 13 --- include/linux/perf_counter.h | 4 +- include/linux/sched.h | 6 +- kernel/exit.c | 3 +- kernel/fork.c | 1 + kernel/perf_counter.c | 218 +++++++++++++++++++++++++++---------------- 7 files changed, 145 insertions(+), 101 deletions(-) diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c index e9021a90802..b4f64402a82 100644 --- a/arch/x86/kernel/apic/apic.c +++ b/arch/x86/kernel/apic/apic.c @@ -14,6 +14,7 @@ * Mikael Pettersson : PM converted to driver model. */ +#include #include #include #include diff --git a/include/linux/init_task.h b/include/linux/init_task.h index 503afaa0afa..d87247d2641 100644 --- a/include/linux/init_task.h +++ b/include/linux/init_task.h @@ -108,18 +108,6 @@ extern struct group_info init_groups; extern struct cred init_cred; -#ifdef CONFIG_PERF_COUNTERS -# define INIT_PERF_COUNTERS(tsk) \ - .perf_counter_ctx.counter_list = \ - LIST_HEAD_INIT(tsk.perf_counter_ctx.counter_list), \ - .perf_counter_ctx.event_list = \ - LIST_HEAD_INIT(tsk.perf_counter_ctx.event_list), \ - .perf_counter_ctx.lock = \ - __SPIN_LOCK_UNLOCKED(tsk.perf_counter_ctx.lock), -#else -# define INIT_PERF_COUNTERS(tsk) -#endif - /* * INIT_TASK is used to set up the first task table, touch at * your own risk!. Base=0, limit=0x1fffff (=2MB) @@ -183,7 +171,6 @@ extern struct cred init_cred; }, \ .dirties = INIT_PROP_LOCAL_SINGLE(dirties), \ INIT_IDS \ - INIT_PERF_COUNTERS(tsk) \ INIT_TRACE_IRQFLAGS \ INIT_LOCKDEP \ INIT_FTRACE_GRAPH \ diff --git a/include/linux/perf_counter.h b/include/linux/perf_counter.h index f612941ef46..07130900546 100644 --- a/include/linux/perf_counter.h +++ b/include/linux/perf_counter.h @@ -449,7 +449,6 @@ struct perf_counter { struct hw_perf_counter hw; struct perf_counter_context *ctx; - struct task_struct *task; struct file *filp; struct perf_counter *parent; @@ -498,7 +497,6 @@ struct perf_counter { * Used as a container for task counters and CPU counters as well: */ struct perf_counter_context { -#ifdef CONFIG_PERF_COUNTERS /* * Protect the states of the counters in the list, * nr_active, and the list: @@ -516,6 +514,7 @@ struct perf_counter_context { int nr_counters; int nr_active; int is_active; + atomic_t refcount; struct task_struct *task; /* @@ -523,7 +522,6 @@ struct perf_counter_context { */ u64 time; u64 timestamp; -#endif }; /** diff --git a/include/linux/sched.h b/include/linux/sched.h index ff59d123151..9714d450f41 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -71,7 +71,6 @@ struct sched_param { #include #include #include -#include #include #include #include @@ -99,6 +98,7 @@ struct robust_list_head; struct bio; struct bts_tracer; struct fs_struct; +struct perf_counter_context; /* * List of flags we want to share for kernel threads, @@ -1387,7 +1387,9 @@ struct task_struct { struct list_head pi_state_list; struct futex_pi_state *pi_state_cache; #endif - struct perf_counter_context perf_counter_ctx; +#ifdef CONFIG_PERF_COUNTERS + struct perf_counter_context *perf_counter_ctxp; +#endif #ifdef CONFIG_NUMA struct mempolicy *mempolicy; short il_next; diff --git a/kernel/exit.c b/kernel/exit.c index f9dfedd94af..99ad4063ee4 100644 --- a/kernel/exit.c +++ b/kernel/exit.c @@ -48,6 +48,7 @@ #include #include #include +#include #include #include @@ -159,7 +160,7 @@ static void delayed_put_task_struct(struct rcu_head *rhp) struct task_struct *tsk = container_of(rhp, struct task_struct, rcu); #ifdef CONFIG_PERF_COUNTERS - WARN_ON_ONCE(!list_empty(&tsk->perf_counter_ctx.counter_list)); + WARN_ON_ONCE(tsk->perf_counter_ctxp); #endif trace_sched_process_free(tsk); put_task_struct(tsk); diff --git a/kernel/fork.c b/kernel/fork.c index d32fef4d38e..e72a09f5355 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -63,6 +63,7 @@ #include #include #include +#include #include #include diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c index 08584c16049..06ea3eae886 100644 --- a/kernel/perf_counter.c +++ b/kernel/perf_counter.c @@ -97,6 +97,17 @@ void perf_enable(void) hw_perf_enable(); } +static void get_ctx(struct perf_counter_context *ctx) +{ + atomic_inc(&ctx->refcount); +} + +static void put_ctx(struct perf_counter_context *ctx) +{ + if (atomic_dec_and_test(&ctx->refcount)) + kfree(ctx); +} + static void list_add_counter(struct perf_counter *counter, struct perf_counter_context *ctx) { @@ -118,11 +129,17 @@ list_add_counter(struct perf_counter *counter, struct perf_counter_context *ctx) ctx->nr_counters++; } +/* + * Remove a counter from the lists for its context. + * Must be called with counter->mutex and ctx->mutex held. + */ static void list_del_counter(struct perf_counter *counter, struct perf_counter_context *ctx) { struct perf_counter *sibling, *tmp; + if (list_empty(&counter->list_entry)) + return; ctx->nr_counters--; list_del_init(&counter->list_entry); @@ -216,8 +233,6 @@ static void __perf_counter_remove_from_context(void *info) counter_sched_out(counter, cpuctx, ctx); - counter->task = NULL; - list_del_counter(counter, ctx); if (!ctx->task) { @@ -279,7 +294,6 @@ retry: */ if (!list_empty(&counter->list_entry)) { list_del_counter(counter, ctx); - counter->task = NULL; } spin_unlock_irq(&ctx->lock); } @@ -568,11 +582,17 @@ static void __perf_install_in_context(void *info) * If this is a task context, we need to check whether it is * the current task context of this cpu. If not it has been * scheduled out before the smp call arrived. + * Or possibly this is the right context but it isn't + * on this cpu because it had no counters. */ - if (ctx->task && cpuctx->task_ctx != ctx) - return; + if (ctx->task && cpuctx->task_ctx != ctx) { + if (cpuctx->task_ctx || ctx->task != current) + return; + cpuctx->task_ctx = ctx; + } spin_lock_irqsave(&ctx->lock, flags); + ctx->is_active = 1; update_context_time(ctx); /* @@ -653,7 +673,6 @@ perf_install_in_context(struct perf_counter_context *ctx, return; } - counter->task = task; retry: task_oncpu_function_call(task, __perf_install_in_context, counter); @@ -693,10 +712,14 @@ static void __perf_counter_enable(void *info) * If this is a per-task counter, need to check whether this * counter's task is the current task on this cpu. */ - if (ctx->task && cpuctx->task_ctx != ctx) - return; + if (ctx->task && cpuctx->task_ctx != ctx) { + if (cpuctx->task_ctx || ctx->task != current) + return; + cpuctx->task_ctx = ctx; + } spin_lock_irqsave(&ctx->lock, flags); + ctx->is_active = 1; update_context_time(ctx); counter->prev_state = counter->state; @@ -852,10 +875,10 @@ void __perf_counter_sched_out(struct perf_counter_context *ctx, void perf_counter_task_sched_out(struct task_struct *task, int cpu) { struct perf_cpu_context *cpuctx = &per_cpu(perf_cpu_context, cpu); - struct perf_counter_context *ctx = &task->perf_counter_ctx; + struct perf_counter_context *ctx = task->perf_counter_ctxp; struct pt_regs *regs; - if (likely(!cpuctx->task_ctx)) + if (likely(!ctx || !cpuctx->task_ctx)) return; update_context_time(ctx); @@ -871,6 +894,8 @@ static void __perf_counter_task_sched_out(struct perf_counter_context *ctx) { struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context); + if (!cpuctx->task_ctx) + return; __perf_counter_sched_out(ctx, cpuctx); cpuctx->task_ctx = NULL; } @@ -969,8 +994,10 @@ __perf_counter_sched_in(struct perf_counter_context *ctx, void perf_counter_task_sched_in(struct task_struct *task, int cpu) { struct perf_cpu_context *cpuctx = &per_cpu(perf_cpu_context, cpu); - struct perf_counter_context *ctx = &task->perf_counter_ctx; + struct perf_counter_context *ctx = task->perf_counter_ctxp; + if (likely(!ctx)) + return; __perf_counter_sched_in(ctx, cpuctx, cpu); cpuctx->task_ctx = ctx; } @@ -985,11 +1012,11 @@ static void perf_counter_cpu_sched_in(struct perf_cpu_context *cpuctx, int cpu) int perf_counter_task_disable(void) { struct task_struct *curr = current; - struct perf_counter_context *ctx = &curr->perf_counter_ctx; + struct perf_counter_context *ctx = curr->perf_counter_ctxp; struct perf_counter *counter; unsigned long flags; - if (likely(!ctx->nr_counters)) + if (!ctx || !ctx->nr_counters) return 0; local_irq_save(flags); @@ -1020,12 +1047,12 @@ int perf_counter_task_disable(void) int perf_counter_task_enable(void) { struct task_struct *curr = current; - struct perf_counter_context *ctx = &curr->perf_counter_ctx; + struct perf_counter_context *ctx = curr->perf_counter_ctxp; struct perf_counter *counter; unsigned long flags; int cpu; - if (likely(!ctx->nr_counters)) + if (!ctx || !ctx->nr_counters) return 0; local_irq_save(flags); @@ -1128,19 +1155,23 @@ void perf_counter_task_tick(struct task_struct *curr, int cpu) return; cpuctx = &per_cpu(perf_cpu_context, cpu); - ctx = &curr->perf_counter_ctx; + ctx = curr->perf_counter_ctxp; perf_adjust_freq(&cpuctx->ctx); - perf_adjust_freq(ctx); + if (ctx) + perf_adjust_freq(ctx); perf_counter_cpu_sched_out(cpuctx); - __perf_counter_task_sched_out(ctx); + if (ctx) + __perf_counter_task_sched_out(ctx); rotate_ctx(&cpuctx->ctx); - rotate_ctx(ctx); + if (ctx) + rotate_ctx(ctx); perf_counter_cpu_sched_in(cpuctx, cpu); - perf_counter_task_sched_in(curr, cpu); + if (ctx) + perf_counter_task_sched_in(curr, cpu); } /* @@ -1176,6 +1207,22 @@ static u64 perf_counter_read(struct perf_counter *counter) return atomic64_read(&counter->count); } +/* + * Initialize the perf_counter context in a task_struct: + */ +static void +__perf_counter_init_context(struct perf_counter_context *ctx, + struct task_struct *task) +{ + memset(ctx, 0, sizeof(*ctx)); + spin_lock_init(&ctx->lock); + mutex_init(&ctx->mutex); + INIT_LIST_HEAD(&ctx->counter_list); + INIT_LIST_HEAD(&ctx->event_list); + atomic_set(&ctx->refcount, 1); + ctx->task = task; +} + static void put_context(struct perf_counter_context *ctx) { if (ctx->task) @@ -1186,6 +1233,7 @@ static struct perf_counter_context *find_get_context(pid_t pid, int cpu) { struct perf_cpu_context *cpuctx; struct perf_counter_context *ctx; + struct perf_counter_context *tctx; struct task_struct *task; /* @@ -1225,15 +1273,36 @@ static struct perf_counter_context *find_get_context(pid_t pid, int cpu) if (!task) return ERR_PTR(-ESRCH); - ctx = &task->perf_counter_ctx; - ctx->task = task; - /* Reuse ptrace permission checks for now. */ if (!ptrace_may_access(task, PTRACE_MODE_READ)) { - put_context(ctx); + put_task_struct(task); return ERR_PTR(-EACCES); } + ctx = task->perf_counter_ctxp; + if (!ctx) { + ctx = kmalloc(sizeof(struct perf_counter_context), GFP_KERNEL); + if (!ctx) { + put_task_struct(task); + return ERR_PTR(-ENOMEM); + } + __perf_counter_init_context(ctx, task); + /* + * Make sure other cpus see correct values for *ctx + * once task->perf_counter_ctxp is visible to them. + */ + smp_wmb(); + tctx = cmpxchg(&task->perf_counter_ctxp, NULL, ctx); + if (tctx) { + /* + * We raced with some other task; use + * the context they set. + */ + kfree(ctx); + ctx = tctx; + } + } + return ctx; } @@ -1242,6 +1311,7 @@ static void free_counter_rcu(struct rcu_head *head) struct perf_counter *counter; counter = container_of(head, struct perf_counter, rcu_head); + put_ctx(counter->ctx); kfree(counter); } @@ -2247,7 +2317,7 @@ static void perf_counter_comm_event(struct perf_comm_event *comm_event) perf_counter_comm_ctx(&cpuctx->ctx, comm_event); put_cpu_var(perf_cpu_context); - perf_counter_comm_ctx(¤t->perf_counter_ctx, comm_event); + perf_counter_comm_ctx(current->perf_counter_ctxp, comm_event); } void perf_counter_comm(struct task_struct *task) @@ -2256,7 +2326,9 @@ void perf_counter_comm(struct task_struct *task) if (!atomic_read(&nr_comm_tracking)) return; - + if (!current->perf_counter_ctxp) + return; + comm_event = (struct perf_comm_event){ .task = task, .event = { @@ -2372,7 +2444,7 @@ got_name: perf_counter_mmap_ctx(&cpuctx->ctx, mmap_event); put_cpu_var(perf_cpu_context); - perf_counter_mmap_ctx(¤t->perf_counter_ctx, mmap_event); + perf_counter_mmap_ctx(current->perf_counter_ctxp, mmap_event); kfree(buf); } @@ -2384,6 +2456,8 @@ void perf_counter_mmap(unsigned long addr, unsigned long len, if (!atomic_read(&nr_mmap_tracking)) return; + if (!current->perf_counter_ctxp) + return; mmap_event = (struct perf_mmap_event){ .file = file, @@ -2985,6 +3059,7 @@ perf_counter_alloc(struct perf_counter_hw_event *hw_event, counter->group_leader = group_leader; counter->pmu = NULL; counter->ctx = ctx; + get_ctx(ctx); counter->state = PERF_COUNTER_STATE_INACTIVE; if (hw_event->disabled) @@ -3149,21 +3224,6 @@ err_put_context: goto out_fput; } -/* - * Initialize the perf_counter context in a task_struct: - */ -static void -__perf_counter_init_context(struct perf_counter_context *ctx, - struct task_struct *task) -{ - memset(ctx, 0, sizeof(*ctx)); - spin_lock_init(&ctx->lock); - mutex_init(&ctx->mutex); - INIT_LIST_HEAD(&ctx->counter_list); - INIT_LIST_HEAD(&ctx->event_list); - ctx->task = task; -} - /* * inherit a counter from parent task to child task: */ @@ -3195,7 +3255,6 @@ inherit_counter(struct perf_counter *parent_counter, /* * Link it up in the child's context: */ - child_counter->task = child; add_counter_to_ctx(child_counter, child_ctx); child_counter->parent = parent_counter; @@ -3294,40 +3353,15 @@ __perf_counter_exit_task(struct task_struct *child, struct perf_counter *parent_counter; /* - * If we do not self-reap then we have to wait for the - * child task to unschedule (it will happen for sure), - * so that its counter is at its final count. (This - * condition triggers rarely - child tasks usually get - * off their CPU before the parent has a chance to - * get this far into the reaping action) + * Protect against concurrent operations on child_counter + * due its fd getting closed, etc. */ - if (child != current) { - wait_task_inactive(child, 0); - update_counter_times(child_counter); - list_del_counter(child_counter, child_ctx); - } else { - struct perf_cpu_context *cpuctx; - unsigned long flags; - - /* - * Disable and unlink this counter. - * - * Be careful about zapping the list - IRQ/NMI context - * could still be processing it: - */ - local_irq_save(flags); - perf_disable(); - - cpuctx = &__get_cpu_var(perf_cpu_context); + mutex_lock(&child_counter->mutex); - group_sched_out(child_counter, cpuctx, child_ctx); - update_counter_times(child_counter); + update_counter_times(child_counter); + list_del_counter(child_counter, child_ctx); - list_del_counter(child_counter, child_ctx); - - perf_enable(); - local_irq_restore(flags); - } + mutex_unlock(&child_counter->mutex); parent_counter = child_counter->parent; /* @@ -3346,19 +3380,29 @@ __perf_counter_exit_task(struct task_struct *child, * * Note: we may be running in child context, but the PID is not hashed * anymore so new counters will not be added. + * (XXX not sure that is true when we get called from flush_old_exec. + * -- paulus) */ void perf_counter_exit_task(struct task_struct *child) { struct perf_counter *child_counter, *tmp; struct perf_counter_context *child_ctx; + unsigned long flags; WARN_ON_ONCE(child != current); - child_ctx = &child->perf_counter_ctx; + child_ctx = child->perf_counter_ctxp; - if (likely(!child_ctx->nr_counters)) + if (likely(!child_ctx)) return; + local_irq_save(flags); + __perf_counter_task_sched_out(child_ctx); + child->perf_counter_ctxp = NULL; + local_irq_restore(flags); + + mutex_lock(&child_ctx->mutex); + again: list_for_each_entry_safe(child_counter, tmp, &child_ctx->counter_list, list_entry) @@ -3371,6 +3415,10 @@ again: */ if (!list_empty(&child_ctx->counter_list)) goto again; + + mutex_unlock(&child_ctx->mutex); + + put_ctx(child_ctx); } /* @@ -3382,19 +3430,25 @@ void perf_counter_init_task(struct task_struct *child) struct perf_counter *counter; struct task_struct *parent = current; - child_ctx = &child->perf_counter_ctx; - parent_ctx = &parent->perf_counter_ctx; - - __perf_counter_init_context(child_ctx, child); + child->perf_counter_ctxp = NULL; /* * This is executed from the parent task context, so inherit - * counters that have been marked for cloning: + * counters that have been marked for cloning. + * First allocate and initialize a context for the child. */ - if (likely(!parent_ctx->nr_counters)) + child_ctx = kmalloc(sizeof(struct perf_counter_context), GFP_KERNEL); + if (!child_ctx) + return; + + parent_ctx = parent->perf_counter_ctxp; + if (likely(!parent_ctx || !parent_ctx->nr_counters)) return; + __perf_counter_init_context(child_ctx, child); + child->perf_counter_ctxp = child_ctx; + /* * Lock the parent list. No need to lock the child - not PID * hashed yet and not running, so nobody can access it. -- cgit v1.2.3 From 564c2b210add41df9a3a5aaa365c1d97cff6110d Mon Sep 17 00:00:00 2001 From: Paul Mackerras Date: Fri, 22 May 2009 14:27:22 +1000 Subject: perf_counter: Optimize context switch between identical inherited contexts When monitoring a process and its descendants with a set of inherited counters, we can often get the situation in a context switch where both the old (outgoing) and new (incoming) process have the same set of counters, and their values are ultimately going to be added together. In that situation it doesn't matter which set of counters are used to count the activity for the new process, so there is really no need to go through the process of reading the hardware counters and updating the old task's counters and then setting up the PMU for the new task. This optimizes the context switch in this situation. Instead of scheduling out the perf_counter_context for the old task and scheduling in the new context, we simply transfer the old context to the new task and keep using it without interruption. The new context gets transferred to the old task. This means that both tasks still have a valid perf_counter_context, so no special case is introduced when the old task gets scheduled in again, either on this CPU or another CPU. The equivalence of contexts is detected by keeping a pointer in each cloned context pointing to the context it was cloned from. To cope with the situation where a context is changed by adding or removing counters after it has been cloned, we also keep a generation number on each context which is incremented every time a context is changed. When a context is cloned we take a copy of the parent's generation number, and two cloned contexts are equivalent only if they have the same parent and the same generation number. In order that the parent context pointer remains valid (and is not reused), we increment the parent context's reference count for each context cloned from it. Since we don't have individual fds for the counters in a cloned context, the only thing that can make two clones of a given parent different after they have been cloned is enabling or disabling all counters with prctl. To account for this, we keep a count of the number of enabled counters in each context. Two contexts must have the same number of enabled counters to be considered equivalent. Here are some measurements of the context switch time as measured with the lat_ctx benchmark from lmbench, comparing the times obtained with and without this patch series: -----Unmodified----- With this patch series Counters: none 2 HW 4H+4S none 2 HW 4H+4S 2 processes: Average 3.44 6.45 11.24 3.12 3.39 3.60 St dev 0.04 0.04 0.13 0.05 0.17 0.19 8 processes: Average 6.45 8.79 14.00 5.57 6.23 7.57 St dev 1.27 1.04 0.88 1.42 1.46 1.42 32 processes: Average 5.56 8.43 13.78 5.28 5.55 7.15 St dev 0.41 0.47 0.53 0.54 0.57 0.81 The numbers are the mean and standard deviation of 20 runs of lat_ctx. The "none" columns are lat_ctx run directly without any counters. The "2 HW" columns are with lat_ctx run under perfstat, counting cycles and instructions. The "4H+4S" columns are lat_ctx run under perfstat with 4 hardware counters and 4 software counters (cycles, instructions, cache references, cache misses, task clock, context switch, cpu migrations, and page faults). [ Impact: performance optimization of counter context-switches ] Signed-off-by: Paul Mackerras Acked-by: Peter Zijlstra Cc: Corey Ashford Cc: Marcelo Tosatti Cc: Arnaldo Carvalho de Melo LKML-Reference: <18966.10666.517218.332164@cargo.ozlabs.ibm.com> Signed-off-by: Ingo Molnar --- include/linux/perf_counter.h | 12 ++++- kernel/perf_counter.c | 109 +++++++++++++++++++++++++++++++++++++------ kernel/sched.c | 2 +- 3 files changed, 107 insertions(+), 16 deletions(-) diff --git a/include/linux/perf_counter.h b/include/linux/perf_counter.h index 07130900546..4cae01a5045 100644 --- a/include/linux/perf_counter.h +++ b/include/linux/perf_counter.h @@ -513,6 +513,7 @@ struct perf_counter_context { struct list_head event_list; int nr_counters; int nr_active; + int nr_enabled; int is_active; atomic_t refcount; struct task_struct *task; @@ -522,6 +523,14 @@ struct perf_counter_context { */ u64 time; u64 timestamp; + + /* + * These fields let us detect when two contexts have both + * been cloned (inherited) from a common ancestor. + */ + struct perf_counter_context *parent_ctx; + u32 parent_gen; + u32 generation; }; /** @@ -552,7 +561,8 @@ extern int perf_max_counters; extern const struct pmu *hw_perf_counter_init(struct perf_counter *counter); extern void perf_counter_task_sched_in(struct task_struct *task, int cpu); -extern void perf_counter_task_sched_out(struct task_struct *task, int cpu); +extern void perf_counter_task_sched_out(struct task_struct *task, + struct task_struct *next, int cpu); extern void perf_counter_task_tick(struct task_struct *task, int cpu); extern void perf_counter_init_task(struct task_struct *child); extern void perf_counter_exit_task(struct task_struct *child); diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c index 06ea3eae886..c10055416de 100644 --- a/kernel/perf_counter.c +++ b/kernel/perf_counter.c @@ -104,8 +104,11 @@ static void get_ctx(struct perf_counter_context *ctx) static void put_ctx(struct perf_counter_context *ctx) { - if (atomic_dec_and_test(&ctx->refcount)) + if (atomic_dec_and_test(&ctx->refcount)) { + if (ctx->parent_ctx) + put_ctx(ctx->parent_ctx); kfree(ctx); + } } static void @@ -127,6 +130,8 @@ list_add_counter(struct perf_counter *counter, struct perf_counter_context *ctx) list_add_rcu(&counter->event_entry, &ctx->event_list); ctx->nr_counters++; + if (counter->state >= PERF_COUNTER_STATE_INACTIVE) + ctx->nr_enabled++; } /* @@ -141,6 +146,8 @@ list_del_counter(struct perf_counter *counter, struct perf_counter_context *ctx) if (list_empty(&counter->list_entry)) return; ctx->nr_counters--; + if (counter->state >= PERF_COUNTER_STATE_INACTIVE) + ctx->nr_enabled--; list_del_init(&counter->list_entry); list_del_rcu(&counter->event_entry); @@ -203,6 +210,22 @@ group_sched_out(struct perf_counter *group_counter, cpuctx->exclusive = 0; } +/* + * Mark this context as not being a clone of another. + * Called when counters are added to or removed from this context. + * We also increment our generation number so that anything that + * was cloned from this context before this will not match anything + * cloned from this context after this. + */ +static void unclone_ctx(struct perf_counter_context *ctx) +{ + ++ctx->generation; + if (!ctx->parent_ctx) + return; + put_ctx(ctx->parent_ctx); + ctx->parent_ctx = NULL; +} + /* * Cross CPU call to remove a performance counter * @@ -263,6 +286,7 @@ static void perf_counter_remove_from_context(struct perf_counter *counter) struct perf_counter_context *ctx = counter->ctx; struct task_struct *task = ctx->task; + unclone_ctx(ctx); if (!task) { /* * Per cpu counters are removed via an smp call and @@ -378,6 +402,7 @@ static void __perf_counter_disable(void *info) else counter_sched_out(counter, cpuctx, ctx); counter->state = PERF_COUNTER_STATE_OFF; + ctx->nr_enabled--; } spin_unlock_irqrestore(&ctx->lock, flags); @@ -419,6 +444,7 @@ static void perf_counter_disable(struct perf_counter *counter) if (counter->state == PERF_COUNTER_STATE_INACTIVE) { update_counter_times(counter); counter->state = PERF_COUNTER_STATE_OFF; + ctx->nr_enabled--; } spin_unlock_irq(&ctx->lock); @@ -727,6 +753,7 @@ static void __perf_counter_enable(void *info) goto unlock; counter->state = PERF_COUNTER_STATE_INACTIVE; counter->tstamp_enabled = ctx->time - counter->total_time_enabled; + ctx->nr_enabled++; /* * If the counter is in a group and isn't the group leader, @@ -817,6 +844,7 @@ static void perf_counter_enable(struct perf_counter *counter) counter->state = PERF_COUNTER_STATE_INACTIVE; counter->tstamp_enabled = ctx->time - counter->total_time_enabled; + ctx->nr_enabled++; } out: spin_unlock_irq(&ctx->lock); @@ -861,6 +889,25 @@ void __perf_counter_sched_out(struct perf_counter_context *ctx, spin_unlock(&ctx->lock); } +/* + * Test whether two contexts are equivalent, i.e. whether they + * have both been cloned from the same version of the same context + * and they both have the same number of enabled counters. + * If the number of enabled counters is the same, then the set + * of enabled counters should be the same, because these are both + * inherited contexts, therefore we can't access individual counters + * in them directly with an fd; we can only enable/disable all + * counters via prctl, or enable/disable all counters in a family + * via ioctl, which will have the same effect on both contexts. + */ +static int context_equiv(struct perf_counter_context *ctx1, + struct perf_counter_context *ctx2) +{ + return ctx1->parent_ctx && ctx1->parent_ctx == ctx2->parent_ctx + && ctx1->parent_gen == ctx2->parent_gen + && ctx1->nr_enabled == ctx2->nr_enabled; +} + /* * Called from scheduler to remove the counters of the current task, * with interrupts disabled. @@ -872,10 +919,12 @@ void __perf_counter_sched_out(struct perf_counter_context *ctx, * accessing the counter control register. If a NMI hits, then it will * not restart the counter. */ -void perf_counter_task_sched_out(struct task_struct *task, int cpu) +void perf_counter_task_sched_out(struct task_struct *task, + struct task_struct *next, int cpu) { struct perf_cpu_context *cpuctx = &per_cpu(perf_cpu_context, cpu); struct perf_counter_context *ctx = task->perf_counter_ctxp; + struct perf_counter_context *next_ctx; struct pt_regs *regs; if (likely(!ctx || !cpuctx->task_ctx)) @@ -885,6 +934,16 @@ void perf_counter_task_sched_out(struct task_struct *task, int cpu) regs = task_pt_regs(task); perf_swcounter_event(PERF_COUNT_CONTEXT_SWITCHES, 1, 1, regs, 0); + + next_ctx = next->perf_counter_ctxp; + if (next_ctx && context_equiv(ctx, next_ctx)) { + task->perf_counter_ctxp = next_ctx; + next->perf_counter_ctxp = ctx; + ctx->task = next; + next_ctx->task = task; + return; + } + __perf_counter_sched_out(ctx, cpuctx); cpuctx->task_ctx = NULL; @@ -998,6 +1057,8 @@ void perf_counter_task_sched_in(struct task_struct *task, int cpu) if (likely(!ctx)) return; + if (cpuctx->task_ctx == ctx) + return; __perf_counter_sched_in(ctx, cpuctx, cpu); cpuctx->task_ctx = ctx; } @@ -3252,6 +3313,16 @@ inherit_counter(struct perf_counter *parent_counter, if (IS_ERR(child_counter)) return child_counter; + /* + * Make the child state follow the state of the parent counter, + * not its hw_event.disabled bit. We hold the parent's mutex, + * so we won't race with perf_counter_{en,dis}able_family. + */ + if (parent_counter->state >= PERF_COUNTER_STATE_INACTIVE) + child_counter->state = PERF_COUNTER_STATE_INACTIVE; + else + child_counter->state = PERF_COUNTER_STATE_OFF; + /* * Link it up in the child's context: */ @@ -3277,16 +3348,6 @@ inherit_counter(struct perf_counter *parent_counter, mutex_lock(&parent_counter->mutex); list_add_tail(&child_counter->child_list, &parent_counter->child_list); - /* - * Make the child state follow the state of the parent counter, - * not its hw_event.disabled bit. We hold the parent's mutex, - * so we won't race with perf_counter_{en,dis}able_family. - */ - if (parent_counter->state >= PERF_COUNTER_STATE_INACTIVE) - child_counter->state = PERF_COUNTER_STATE_INACTIVE; - else - child_counter->state = PERF_COUNTER_STATE_OFF; - mutex_unlock(&parent_counter->mutex); return child_counter; @@ -3429,6 +3490,7 @@ void perf_counter_init_task(struct task_struct *child) struct perf_counter_context *child_ctx, *parent_ctx; struct perf_counter *counter; struct task_struct *parent = current; + int inherited_all = 1; child->perf_counter_ctxp = NULL; @@ -3463,12 +3525,31 @@ void perf_counter_init_task(struct task_struct *child) if (counter != counter->group_leader) continue; - if (!counter->hw_event.inherit) + if (!counter->hw_event.inherit) { + inherited_all = 0; continue; + } if (inherit_group(counter, parent, - parent_ctx, child, child_ctx)) + parent_ctx, child, child_ctx)) { + inherited_all = 0; break; + } + } + + if (inherited_all) { + /* + * Mark the child context as a clone of the parent + * context, or of whatever the parent is a clone of. + */ + if (parent_ctx->parent_ctx) { + child_ctx->parent_ctx = parent_ctx->parent_ctx; + child_ctx->parent_gen = parent_ctx->parent_gen; + } else { + child_ctx->parent_ctx = parent_ctx; + child_ctx->parent_gen = parent_ctx->generation; + } + get_ctx(child_ctx->parent_ctx); } mutex_unlock(&parent_ctx->mutex); diff --git a/kernel/sched.c b/kernel/sched.c index 419a39d0988..4c0d58bce6b 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -5091,7 +5091,7 @@ need_resched_nonpreemptible: if (likely(prev != next)) { sched_info_switch(prev, next); - perf_counter_task_sched_out(prev, cpu); + perf_counter_task_sched_out(prev, next, cpu); rq->nr_switches++; rq->curr = next; -- cgit v1.2.3 From 910431c7f2e963017d767b29c80ae706421e569f Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Fri, 22 May 2009 12:32:15 +0200 Subject: perf_counter: fix !PERF_COUNTERS build failure Update the !CONFIG_PERF_COUNTERS prototype too, for perf_counter_task_sched_out(). [ Impact: build fix ] Signed-off-by: Paul Mackerras Acked-by: Peter Zijlstra Cc: Corey Ashford Cc: Marcelo Tosatti Cc: Arnaldo Carvalho de Melo LKML-Reference: <18966.10666.517218.332164@cargo.ozlabs.ibm.com> Signed-off-by: Ingo Molnar --- include/linux/perf_counter.h | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/include/linux/perf_counter.h b/include/linux/perf_counter.h index 4cae01a5045..2eedae8498d 100644 --- a/include/linux/perf_counter.h +++ b/include/linux/perf_counter.h @@ -625,7 +625,8 @@ extern void perf_counter_init(void); static inline void perf_counter_task_sched_in(struct task_struct *task, int cpu) { } static inline void -perf_counter_task_sched_out(struct task_struct *task, int cpu) { } +perf_counter_task_sched_out(struct task_struct *task, + struct task_struct *next, int cpu) { } static inline void perf_counter_task_tick(struct task_struct *task, int cpu) { } static inline void perf_counter_init_task(struct task_struct *child) { } -- cgit v1.2.3 From c6eb13847ba081552d2af644219bddeff7110caf Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Fri, 22 May 2009 18:18:28 +0200 Subject: perf_counter tools: increase limits I tried to run with 300 active counters and the tools bailed out because our limit was at 64. So increase the counter limit to 1024 and the CPU limit to 4096. Cc: Peter Zijlstra Cc: Paul Mackerras Cc: Corey Ashford Cc: Marcelo Tosatti Cc: Arnaldo Carvalho de Melo Cc: Thomas Gleixner LKML-Reference: Signed-off-by: Ingo Molnar --- Documentation/perf_counter/perf.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Documentation/perf_counter/perf.h b/Documentation/perf_counter/perf.h index 6fa3656399f..81a737444c8 100644 --- a/Documentation/perf_counter/perf.h +++ b/Documentation/perf_counter/perf.h @@ -54,8 +54,8 @@ sys_perf_counter_open(struct perf_counter_hw_event *hw_event_uptr, group_fd, flags); } -#define MAX_COUNTERS 64 -#define MAX_NR_CPUS 256 +#define MAX_COUNTERS 1024 +#define MAX_NR_CPUS 4096 #define EID(type, id) (((__u64)(type) << PERF_COUNTER_TYPE_SHIFT) | (id)) -- cgit v1.2.3 From e220d2dcb944c5c488b6855d15ec66d76900514f Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Sat, 23 May 2009 18:28:55 +0200 Subject: perf_counter: Fix dynamic irq_period logging We call perf_adjust_freq() from perf_counter_task_tick() which is is called under the rq->lock causing lock recursion. However, it's no longer required to be called under the rq->lock, so remove it from under it. Also, fix up some related comments. Signed-off-by: Peter Zijlstra Cc: Paul Mackerras Cc: Corey Ashford Cc: Arnaldo Carvalho de Melo Cc: John Kacur LKML-Reference: <20090523163012.476197912@chello.nl> Signed-off-by: Ingo Molnar --- include/linux/perf_counter.h | 1 + kernel/perf_counter.c | 3 ++- kernel/sched.c | 3 ++- 3 files changed, 5 insertions(+), 2 deletions(-) diff --git a/include/linux/perf_counter.h b/include/linux/perf_counter.h index 2eedae8498d..23ddd29730f 100644 --- a/include/linux/perf_counter.h +++ b/include/linux/perf_counter.h @@ -260,6 +260,7 @@ enum perf_event_type { /* * struct { * struct perf_event_header header; + * u64 time; * u64 irq_period; * }; */ diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c index c10055416de..2f410ea2cb3 100644 --- a/kernel/perf_counter.c +++ b/kernel/perf_counter.c @@ -2559,7 +2559,8 @@ void perf_counter_munmap(unsigned long addr, unsigned long len, } /* - * + * Log irq_period changes so that analyzing tools can re-normalize the + * event flow. */ static void perf_log_period(struct perf_counter *counter, u64 period) diff --git a/kernel/sched.c b/kernel/sched.c index 4c0d58bce6b..ad079f07c9c 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -4875,9 +4875,10 @@ void scheduler_tick(void) update_rq_clock(rq); update_cpu_load(rq); curr->sched_class->task_tick(rq, curr, 0); - perf_counter_task_tick(curr, cpu); spin_unlock(&rq->lock); + perf_counter_task_tick(curr, cpu); + #ifdef CONFIG_SMP rq->idle_at_tick = idle_cpu(cpu); trigger_load_balance(rq, cpu); -- cgit v1.2.3 From fccc714b3148ab9741fafc1e90c3876d50df6093 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Sat, 23 May 2009 18:28:56 +0200 Subject: perf_counter: Sanitize counter->mutex s/counter->mutex/counter->child_mutex/ and make sure its only used to protect child_list. The usage in __perf_counter_exit_task() doesn't appear to be problematic since ctx->mutex also covers anything related to fd tear-down. Signed-off-by: Peter Zijlstra Cc: Paul Mackerras Cc: Corey Ashford Cc: Arnaldo Carvalho de Melo Cc: John Kacur LKML-Reference: <20090523163012.533186528@chello.nl> Signed-off-by: Ingo Molnar --- include/linux/perf_counter.h | 7 +++---- kernel/perf_counter.c | 47 ++++++++++++++++++-------------------------- 2 files changed, 22 insertions(+), 32 deletions(-) diff --git a/include/linux/perf_counter.h b/include/linux/perf_counter.h index 23ddd29730f..4ab8050eb9e 100644 --- a/include/linux/perf_counter.h +++ b/include/linux/perf_counter.h @@ -452,9 +452,6 @@ struct perf_counter { struct perf_counter_context *ctx; struct file *filp; - struct perf_counter *parent; - struct list_head child_list; - /* * These accumulate total time (in nanoseconds) that children * counters have been enabled and running, respectively. @@ -465,7 +462,9 @@ struct perf_counter { /* * Protect attach/detach and child_list: */ - struct mutex mutex; + struct mutex child_mutex; + struct list_head child_list; + struct perf_counter *parent; int oncpu; int cpu; diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c index 2f410ea2cb3..679c3b5bb7d 100644 --- a/kernel/perf_counter.c +++ b/kernel/perf_counter.c @@ -111,6 +111,10 @@ static void put_ctx(struct perf_counter_context *ctx) } } +/* + * Add a counter from the lists for its context. + * Must be called with ctx->mutex and ctx->lock held. + */ static void list_add_counter(struct perf_counter *counter, struct perf_counter_context *ctx) { @@ -136,7 +140,7 @@ list_add_counter(struct perf_counter *counter, struct perf_counter_context *ctx) /* * Remove a counter from the lists for its context. - * Must be called with counter->mutex and ctx->mutex held. + * Must be called with ctx->mutex and ctx->lock held. */ static void list_del_counter(struct perf_counter *counter, struct perf_counter_context *ctx) @@ -276,7 +280,7 @@ static void __perf_counter_remove_from_context(void *info) /* * Remove the counter from a task's (or a CPU's) list of counters. * - * Must be called with counter->mutex and ctx->mutex held. + * Must be called with ctx->mutex held. * * CPU counters are removed with a smp call. For task counters we only * call when the task is on a CPU. @@ -1407,11 +1411,7 @@ static int perf_release(struct inode *inode, struct file *file) file->private_data = NULL; mutex_lock(&ctx->mutex); - mutex_lock(&counter->mutex); - perf_counter_remove_from_context(counter); - - mutex_unlock(&counter->mutex); mutex_unlock(&ctx->mutex); free_counter(counter); @@ -1437,7 +1437,7 @@ perf_read_hw(struct perf_counter *counter, char __user *buf, size_t count) if (counter->state == PERF_COUNTER_STATE_ERROR) return 0; - mutex_lock(&counter->mutex); + mutex_lock(&counter->child_mutex); values[0] = perf_counter_read(counter); n = 1; if (counter->hw_event.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) @@ -1446,7 +1446,7 @@ perf_read_hw(struct perf_counter *counter, char __user *buf, size_t count) if (counter->hw_event.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) values[n++] = counter->total_time_running + atomic64_read(&counter->child_total_time_running); - mutex_unlock(&counter->mutex); + mutex_unlock(&counter->child_mutex); if (count < n * sizeof(u64)) return -EINVAL; @@ -1510,11 +1510,11 @@ static void perf_counter_for_each_child(struct perf_counter *counter, { struct perf_counter *child; - mutex_lock(&counter->mutex); + mutex_lock(&counter->child_mutex); func(counter); list_for_each_entry(child, &counter->child_list, child_list) func(child); - mutex_unlock(&counter->mutex); + mutex_unlock(&counter->child_mutex); } static void perf_counter_for_each(struct perf_counter *counter, @@ -1522,11 +1522,11 @@ static void perf_counter_for_each(struct perf_counter *counter, { struct perf_counter *child; - mutex_lock(&counter->mutex); + mutex_lock(&counter->child_mutex); perf_counter_for_each_sibling(counter, func); list_for_each_entry(child, &counter->child_list, child_list) perf_counter_for_each_sibling(child, func); - mutex_unlock(&counter->mutex); + mutex_unlock(&counter->child_mutex); } static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg) @@ -3106,7 +3106,9 @@ perf_counter_alloc(struct perf_counter_hw_event *hw_event, if (!group_leader) group_leader = counter; - mutex_init(&counter->mutex); + mutex_init(&counter->child_mutex); + INIT_LIST_HEAD(&counter->child_list); + INIT_LIST_HEAD(&counter->list_entry); INIT_LIST_HEAD(&counter->event_entry); INIT_LIST_HEAD(&counter->sibling_list); @@ -3114,8 +3116,6 @@ perf_counter_alloc(struct perf_counter_hw_event *hw_event, mutex_init(&counter->mmap_mutex); - INIT_LIST_HEAD(&counter->child_list); - counter->cpu = cpu; counter->hw_event = *hw_event; counter->group_leader = group_leader; @@ -3346,10 +3346,9 @@ inherit_counter(struct perf_counter *parent_counter, /* * Link this into the parent counter's child list */ - mutex_lock(&parent_counter->mutex); + mutex_lock(&parent_counter->child_mutex); list_add_tail(&child_counter->child_list, &parent_counter->child_list); - - mutex_unlock(&parent_counter->mutex); + mutex_unlock(&parent_counter->child_mutex); return child_counter; } @@ -3396,9 +3395,9 @@ static void sync_child_counter(struct perf_counter *child_counter, /* * Remove this counter from the parent's list */ - mutex_lock(&parent_counter->mutex); + mutex_lock(&parent_counter->child_mutex); list_del_init(&child_counter->child_list); - mutex_unlock(&parent_counter->mutex); + mutex_unlock(&parent_counter->child_mutex); /* * Release the parent counter, if this was the last @@ -3414,17 +3413,9 @@ __perf_counter_exit_task(struct task_struct *child, { struct perf_counter *parent_counter; - /* - * Protect against concurrent operations on child_counter - * due its fd getting closed, etc. - */ - mutex_lock(&child_counter->mutex); - update_counter_times(child_counter); list_del_counter(child_counter, child_ctx); - mutex_unlock(&child_counter->mutex); - parent_counter = child_counter->parent; /* * It can happen that parent exits first, and has counters -- cgit v1.2.3 From 682076ae1de0aba9c2da509f7b19dc03e30a6e1f Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Sat, 23 May 2009 18:28:57 +0200 Subject: perf_counter: Sanitize context locking Ensure we're consistent with the context locks. context->mutex context->lock list_{add,del}_counter(); so that either lock is sufficient to stabilize the context. Signed-off-by: Peter Zijlstra Cc: Paul Mackerras Cc: Corey Ashford Cc: Arnaldo Carvalho de Melo Cc: John Kacur LKML-Reference: <20090523163012.618790733@chello.nl> Signed-off-by: Ingo Molnar --- kernel/perf_counter.c | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c index 679c3b5bb7d..d162d2f0b27 100644 --- a/kernel/perf_counter.c +++ b/kernel/perf_counter.c @@ -597,6 +597,8 @@ static void add_counter_to_ctx(struct perf_counter *counter, /* * Cross CPU call to install and enable a performance counter + * + * Must be called with ctx->mutex held */ static void __perf_install_in_context(void *info) { @@ -1496,13 +1498,13 @@ static void perf_counter_for_each_sibling(struct perf_counter *counter, struct perf_counter_context *ctx = counter->ctx; struct perf_counter *sibling; - spin_lock_irq(&ctx->lock); + mutex_lock(&ctx->mutex); counter = counter->group_leader; func(counter); list_for_each_entry(sibling, &counter->sibling_list, list_entry) func(sibling); - spin_unlock_irq(&ctx->lock); + mutex_unlock(&ctx->mutex); } static void perf_counter_for_each_child(struct perf_counter *counter, @@ -3414,7 +3416,10 @@ __perf_counter_exit_task(struct task_struct *child, struct perf_counter *parent_counter; update_counter_times(child_counter); + + spin_lock_irq(&child_ctx->lock); list_del_counter(child_counter, child_ctx); + spin_unlock_irq(&child_ctx->lock); parent_counter = child_counter->parent; /* -- cgit v1.2.3 From 1a482f38c5aafeb3576079a38a5b21b46619f3d2 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Sat, 23 May 2009 18:28:58 +0200 Subject: perf_counter: Fix userspace build recent userspace (F11) seems to already include the linux/unistd.h bits which means we cannot include the version in the kernel sources due to the header guards being the same. Ensure we include the kernel version first. Signed-off-by: Peter Zijlstra Cc: Paul Mackerras Cc: Corey Ashford Cc: Arnaldo Carvalho de Melo Cc: John Kacur LKML-Reference: <20090523163012.739756497@chello.nl> Signed-off-by: Ingo Molnar --- Documentation/perf_counter/builtin-record.c | 3 +-- Documentation/perf_counter/builtin-stat.c | 5 +---- Documentation/perf_counter/builtin-top.c | 5 +---- Documentation/perf_counter/perf.h | 31 ++++++++++++++++++----------- 4 files changed, 22 insertions(+), 22 deletions(-) diff --git a/Documentation/perf_counter/builtin-record.c b/Documentation/perf_counter/builtin-record.c index efb87595f3c..1b19f187d35 100644 --- a/Documentation/perf_counter/builtin-record.c +++ b/Documentation/perf_counter/builtin-record.c @@ -1,5 +1,6 @@ +#include "perf.h" #include "util/util.h" #include @@ -30,9 +31,7 @@ #include #include -#include "../../include/linux/perf_counter.h" -#include "perf.h" #define ALIGN(x,a) __ALIGN_MASK(x,(typeof(x))(a)-1) #define __ALIGN_MASK(x,mask) (((x)+(mask))&~(mask)) diff --git a/Documentation/perf_counter/builtin-stat.c b/Documentation/perf_counter/builtin-stat.c index 03518d75d86..8ae01d51f29 100644 --- a/Documentation/perf_counter/builtin-stat.c +++ b/Documentation/perf_counter/builtin-stat.c @@ -61,6 +61,7 @@ * Released under the GPL v2. (and only v2, not any later version) */ +#include "perf.h" #include "util/util.h" #include @@ -83,10 +84,6 @@ #include #include -#include "../../include/linux/perf_counter.h" - -#include "perf.h" - #define EVENT_MASK_KERNEL 1 #define EVENT_MASK_USER 2 diff --git a/Documentation/perf_counter/builtin-top.c b/Documentation/perf_counter/builtin-top.c index 814b2e4925e..a3216a6018c 100644 --- a/Documentation/perf_counter/builtin-top.c +++ b/Documentation/perf_counter/builtin-top.c @@ -42,6 +42,7 @@ * Released under the GPL v2. (and only v2, not any later version) */ +#include "perf.h" #include "util/util.h" #include @@ -64,10 +65,6 @@ #include #include -#include "../../include/linux/perf_counter.h" - -#include "perf.h" - static int system_wide = 0; static int nr_counters = 0; diff --git a/Documentation/perf_counter/perf.h b/Documentation/perf_counter/perf.h index 81a737444c8..a517683fc66 100644 --- a/Documentation/perf_counter/perf.h +++ b/Documentation/perf_counter/perf.h @@ -1,6 +1,25 @@ #ifndef _PERF_PERF_H #define _PERF_PERF_H +#if defined(__x86_64__) || defined(__i386__) +#include "../../arch/x86/include/asm/unistd.h" +#define rmb() asm volatile("lfence" ::: "memory") +#define cpu_relax() asm volatile("rep; nop" ::: "memory"); +#endif + +#ifdef __powerpc__ +#include "../../arch/powerpc/include/asm/unistd.h" +#define rmb() asm volatile ("sync" ::: "memory") +#define cpu_relax() asm volatile ("" ::: "memory"); +#endif + +#include +#include +#include +#include + +#include "../../include/linux/perf_counter.h" + /* * prctl(PR_TASK_PERF_COUNTERS_DISABLE) will (cheaply) disable all * counters in the current task. @@ -26,18 +45,6 @@ static inline unsigned long long rdclock(void) #define __user #define asmlinkage -#if defined(__x86_64__) || defined(__i386__) -#include "../../arch/x86/include/asm/unistd.h" -#define rmb() asm volatile("lfence" ::: "memory") -#define cpu_relax() asm volatile("rep; nop" ::: "memory"); -#endif - -#ifdef __powerpc__ -#include "../../arch/powerpc/include/asm/unistd.h" -#define rmb() asm volatile ("sync" ::: "memory") -#define cpu_relax() asm volatile ("" ::: "memory"); -#endif - #define unlikely(x) __builtin_expect(!!(x), 0) #define min(x, y) ({ \ typeof(x) _min1 = (x); \ -- cgit v1.2.3 From aa9c67f53d1969cf1db4c9c2db3a78c4ceb96469 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Sat, 23 May 2009 18:28:59 +0200 Subject: perf_counter: Simplify context cleanup Use perf_counter_remove_from_context() to remove counters from the context. Signed-off-by: Peter Zijlstra Cc: Paul Mackerras Cc: Corey Ashford Cc: Arnaldo Carvalho de Melo Cc: John Kacur LKML-Reference: <20090523163012.796275849@chello.nl> Signed-off-by: Ingo Molnar --- kernel/perf_counter.c | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c index d162d2f0b27..0e97f896133 100644 --- a/kernel/perf_counter.c +++ b/kernel/perf_counter.c @@ -3416,10 +3416,7 @@ __perf_counter_exit_task(struct task_struct *child, struct perf_counter *parent_counter; update_counter_times(child_counter); - - spin_lock_irq(&child_ctx->lock); - list_del_counter(child_counter, child_ctx); - spin_unlock_irq(&child_ctx->lock); + perf_counter_remove_from_context(child_counter); parent_counter = child_counter->parent; /* -- cgit v1.2.3 From 082ff5a2767a0679ee543f14883adbafb631ffbe Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Sat, 23 May 2009 18:29:00 +0200 Subject: perf_counter: Change pctrl() behaviour Instead of en/dis-abling all counters acting on a particular task, en/dis- able all counters we created. [ v2: fix crash on first counter enable ] Signed-off-by: Peter Zijlstra Cc: Paul Mackerras Cc: Corey Ashford Cc: Arnaldo Carvalho de Melo Cc: John Kacur LKML-Reference: <20090523163012.916937244@chello.nl> Signed-off-by: Ingo Molnar --- include/linux/init_task.h | 10 +++++ include/linux/perf_counter.h | 3 ++ include/linux/sched.h | 2 + kernel/perf_counter.c | 87 ++++++++++++-------------------------------- 4 files changed, 39 insertions(+), 63 deletions(-) diff --git a/include/linux/init_task.h b/include/linux/init_task.h index d87247d2641..353c0ac7723 100644 --- a/include/linux/init_task.h +++ b/include/linux/init_task.h @@ -108,6 +108,15 @@ extern struct group_info init_groups; extern struct cred init_cred; +#ifdef CONFIG_PERF_COUNTERS +# define INIT_PERF_COUNTERS(tsk) \ + .perf_counter_mutex = \ + __MUTEX_INITIALIZER(tsk.perf_counter_mutex), \ + .perf_counter_list = LIST_HEAD_INIT(tsk.perf_counter_list), +#else +# define INIT_PERF_COUNTERS(tsk) +#endif + /* * INIT_TASK is used to set up the first task table, touch at * your own risk!. Base=0, limit=0x1fffff (=2MB) @@ -171,6 +180,7 @@ extern struct cred init_cred; }, \ .dirties = INIT_PROP_LOCAL_SINGLE(dirties), \ INIT_IDS \ + INIT_PERF_COUNTERS(tsk) \ INIT_TRACE_IRQFLAGS \ INIT_LOCKDEP \ INIT_FTRACE_GRAPH \ diff --git a/include/linux/perf_counter.h b/include/linux/perf_counter.h index 4ab8050eb9e..4159ee5940f 100644 --- a/include/linux/perf_counter.h +++ b/include/linux/perf_counter.h @@ -469,6 +469,9 @@ struct perf_counter { int oncpu; int cpu; + struct list_head owner_entry; + struct task_struct *owner; + /* mmap bits */ struct mutex mmap_mutex; atomic_t mmap_count; diff --git a/include/linux/sched.h b/include/linux/sched.h index 9714d450f41..bc9326dcdde 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -1389,6 +1389,8 @@ struct task_struct { #endif #ifdef CONFIG_PERF_COUNTERS struct perf_counter_context *perf_counter_ctxp; + struct mutex perf_counter_mutex; + struct list_head perf_counter_list; #endif #ifdef CONFIG_NUMA struct mempolicy *mempolicy; diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c index 0e97f896133..4c86a636976 100644 --- a/kernel/perf_counter.c +++ b/kernel/perf_counter.c @@ -1076,79 +1076,26 @@ static void perf_counter_cpu_sched_in(struct perf_cpu_context *cpuctx, int cpu) __perf_counter_sched_in(ctx, cpuctx, cpu); } -int perf_counter_task_disable(void) +int perf_counter_task_enable(void) { - struct task_struct *curr = current; - struct perf_counter_context *ctx = curr->perf_counter_ctxp; struct perf_counter *counter; - unsigned long flags; - - if (!ctx || !ctx->nr_counters) - return 0; - - local_irq_save(flags); - __perf_counter_task_sched_out(ctx); - - spin_lock(&ctx->lock); - - /* - * Disable all the counters: - */ - perf_disable(); - - list_for_each_entry(counter, &ctx->counter_list, list_entry) { - if (counter->state != PERF_COUNTER_STATE_ERROR) { - update_group_times(counter); - counter->state = PERF_COUNTER_STATE_OFF; - } - } - - perf_enable(); - - spin_unlock_irqrestore(&ctx->lock, flags); + mutex_lock(¤t->perf_counter_mutex); + list_for_each_entry(counter, ¤t->perf_counter_list, owner_entry) + perf_counter_enable(counter); + mutex_unlock(¤t->perf_counter_mutex); return 0; } -int perf_counter_task_enable(void) +int perf_counter_task_disable(void) { - struct task_struct *curr = current; - struct perf_counter_context *ctx = curr->perf_counter_ctxp; struct perf_counter *counter; - unsigned long flags; - int cpu; - - if (!ctx || !ctx->nr_counters) - return 0; - - local_irq_save(flags); - cpu = smp_processor_id(); - - __perf_counter_task_sched_out(ctx); - - spin_lock(&ctx->lock); - /* - * Disable all the counters: - */ - perf_disable(); - - list_for_each_entry(counter, &ctx->counter_list, list_entry) { - if (counter->state > PERF_COUNTER_STATE_OFF) - continue; - counter->state = PERF_COUNTER_STATE_INACTIVE; - counter->tstamp_enabled = - ctx->time - counter->total_time_enabled; - counter->hw_event.disabled = 0; - } - perf_enable(); - - spin_unlock(&ctx->lock); - - perf_counter_task_sched_in(curr, cpu); - - local_irq_restore(flags); + mutex_lock(¤t->perf_counter_mutex); + list_for_each_entry(counter, ¤t->perf_counter_list, owner_entry) + perf_counter_disable(counter); + mutex_unlock(¤t->perf_counter_mutex); return 0; } @@ -1416,6 +1363,11 @@ static int perf_release(struct inode *inode, struct file *file) perf_counter_remove_from_context(counter); mutex_unlock(&ctx->mutex); + mutex_lock(&counter->owner->perf_counter_mutex); + list_del_init(&counter->owner_entry); + mutex_unlock(&counter->owner->perf_counter_mutex); + put_task_struct(counter->owner); + free_counter(counter); put_context(ctx); @@ -3272,6 +3224,12 @@ SYSCALL_DEFINE5(perf_counter_open, perf_install_in_context(ctx, counter, cpu); mutex_unlock(&ctx->mutex); + counter->owner = current; + get_task_struct(current); + mutex_lock(¤t->perf_counter_mutex); + list_add_tail(&counter->owner_entry, ¤t->perf_counter_list); + mutex_unlock(¤t->perf_counter_mutex); + fput_light(counter_file, fput_needed2); out_fput: @@ -3488,6 +3446,9 @@ void perf_counter_init_task(struct task_struct *child) child->perf_counter_ctxp = NULL; + mutex_init(&child->perf_counter_mutex); + INIT_LIST_HEAD(&child->perf_counter_list); + /* * This is executed from the parent task context, so inherit * counters that have been marked for cloning. -- cgit v1.2.3 From 475c55797323b67435083f6e2eb8ee670f6410ec Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Sat, 23 May 2009 18:29:01 +0200 Subject: perf_counter: Remove perf_counter_context::nr_enabled now that pctrl() no longer disables other people's counters, remove the PMU cache code that deals with that. Signed-off-by: Peter Zijlstra Cc: Paul Mackerras Cc: Corey Ashford Cc: Arnaldo Carvalho de Melo Cc: John Kacur LKML-Reference: <20090523163013.032998331@chello.nl> Signed-off-by: Ingo Molnar --- include/linux/perf_counter.h | 1 - kernel/perf_counter.c | 11 +---------- 2 files changed, 1 insertion(+), 11 deletions(-) diff --git a/include/linux/perf_counter.h b/include/linux/perf_counter.h index 4159ee5940f..2ddf5e3c551 100644 --- a/include/linux/perf_counter.h +++ b/include/linux/perf_counter.h @@ -516,7 +516,6 @@ struct perf_counter_context { struct list_head event_list; int nr_counters; int nr_active; - int nr_enabled; int is_active; atomic_t refcount; struct task_struct *task; diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c index 4c86a636976..cb4062559b4 100644 --- a/kernel/perf_counter.c +++ b/kernel/perf_counter.c @@ -134,8 +134,6 @@ list_add_counter(struct perf_counter *counter, struct perf_counter_context *ctx) list_add_rcu(&counter->event_entry, &ctx->event_list); ctx->nr_counters++; - if (counter->state >= PERF_COUNTER_STATE_INACTIVE) - ctx->nr_enabled++; } /* @@ -150,8 +148,6 @@ list_del_counter(struct perf_counter *counter, struct perf_counter_context *ctx) if (list_empty(&counter->list_entry)) return; ctx->nr_counters--; - if (counter->state >= PERF_COUNTER_STATE_INACTIVE) - ctx->nr_enabled--; list_del_init(&counter->list_entry); list_del_rcu(&counter->event_entry); @@ -406,7 +402,6 @@ static void __perf_counter_disable(void *info) else counter_sched_out(counter, cpuctx, ctx); counter->state = PERF_COUNTER_STATE_OFF; - ctx->nr_enabled--; } spin_unlock_irqrestore(&ctx->lock, flags); @@ -448,7 +443,6 @@ static void perf_counter_disable(struct perf_counter *counter) if (counter->state == PERF_COUNTER_STATE_INACTIVE) { update_counter_times(counter); counter->state = PERF_COUNTER_STATE_OFF; - ctx->nr_enabled--; } spin_unlock_irq(&ctx->lock); @@ -759,7 +753,6 @@ static void __perf_counter_enable(void *info) goto unlock; counter->state = PERF_COUNTER_STATE_INACTIVE; counter->tstamp_enabled = ctx->time - counter->total_time_enabled; - ctx->nr_enabled++; /* * If the counter is in a group and isn't the group leader, @@ -850,7 +843,6 @@ static void perf_counter_enable(struct perf_counter *counter) counter->state = PERF_COUNTER_STATE_INACTIVE; counter->tstamp_enabled = ctx->time - counter->total_time_enabled; - ctx->nr_enabled++; } out: spin_unlock_irq(&ctx->lock); @@ -910,8 +902,7 @@ static int context_equiv(struct perf_counter_context *ctx1, struct perf_counter_context *ctx2) { return ctx1->parent_ctx && ctx1->parent_ctx == ctx2->parent_ctx - && ctx1->parent_gen == ctx2->parent_gen - && ctx1->nr_enabled == ctx2->nr_enabled; + && ctx1->parent_gen == ctx2->parent_gen; } /* -- cgit v1.2.3 From c2990a2a582d73562d4dcf2502c39892a19a691d Mon Sep 17 00:00:00 2001 From: Mike Galbraith Date: Sun, 24 May 2009 08:35:49 +0200 Subject: perf top: fix segfault c6eb13 increased stack usage such that perf-top now croaks on startup. Take event_array and mmap_array off the stack to prevent segfault on boxen with smallish ulimit -s setting. Signed-off-by: Mike Galbraith Cc: Peter Zijlstra Cc: Paul Mackerras Cc: Corey Ashford Cc: Arnaldo Carvalho de Melo Cc: John Kacur LKML-Reference: Signed-off-by: Ingo Molnar --- Documentation/perf_counter/builtin-top.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/Documentation/perf_counter/builtin-top.c b/Documentation/perf_counter/builtin-top.c index a3216a6018c..74021ac90f5 100644 --- a/Documentation/perf_counter/builtin-top.c +++ b/Documentation/perf_counter/builtin-top.c @@ -1035,10 +1035,11 @@ static void mmap_read(struct mmap_data *md) md->prev = old; } +static struct pollfd event_array[MAX_NR_CPUS * MAX_COUNTERS]; +static struct mmap_data mmap_array[MAX_NR_CPUS][MAX_COUNTERS]; + int cmd_top(int argc, char **argv, const char *prefix) { - struct pollfd event_array[MAX_NR_CPUS * MAX_COUNTERS]; - struct mmap_data mmap_array[MAX_NR_CPUS][MAX_COUNTERS]; struct perf_counter_hw_event hw_event; pthread_t thread; int i, counter, group_fd, nr_poll = 0; -- cgit v1.2.3 From a3862d3f814ce7dfca9eed56ac23d29db3aee8d5 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Sun, 24 May 2009 09:02:37 +0200 Subject: perf_counter: Increase mmap limit In a default 'perf top' run the tool will create a counter for each online CPU. With enough CPUs this will eventually exhaust the default limit. So scale it up with the number of online CPUs. Cc: Peter Zijlstra Cc: Paul Mackerras Cc: Corey Ashford Cc: Arnaldo Carvalho de Melo Cc: John Kacur Cc: Mike Galbraith LKML-Reference: Signed-off-by: Ingo Molnar --- kernel/perf_counter.c | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c index cb4062559b4..6cdf8248eda 100644 --- a/kernel/perf_counter.c +++ b/kernel/perf_counter.c @@ -1704,6 +1704,12 @@ static int perf_mmap(struct file *file, struct vm_area_struct *vma) user_extra = nr_pages + 1; user_lock_limit = sysctl_perf_counter_mlock >> (PAGE_SHIFT - 10); + + /* + * Increase the limit linearly with more CPUs: + */ + user_lock_limit *= num_online_cpus(); + user_locked = atomic_long_read(&user->locked_vm) + user_extra; extra = 0; -- cgit v1.2.3 From 85a9f9200226ddffc2ea50dae6a8df04c033ecd4 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Mon, 25 May 2009 09:59:50 +0200 Subject: perf_counter tools: increase limits, fix NR_CPUS and NR_COUNTERS goes up quadratic ... 1024x4096 was far too ambitious upper limit - go for 256x256 which is still plenty. [ Impact: reduce perf tool memory consumption ] Cc: Peter Zijlstra Cc: Paul Mackerras Cc: Corey Ashford Cc: Arnaldo Carvalho de Melo Cc: John Kacur Cc: Mike Galbraith LKML-Reference: Signed-off-by: Ingo Molnar --- Documentation/perf_counter/perf.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Documentation/perf_counter/perf.h b/Documentation/perf_counter/perf.h index a517683fc66..5a2520bb7e5 100644 --- a/Documentation/perf_counter/perf.h +++ b/Documentation/perf_counter/perf.h @@ -61,8 +61,8 @@ sys_perf_counter_open(struct perf_counter_hw_event *hw_event_uptr, group_fd, flags); } -#define MAX_COUNTERS 1024 -#define MAX_NR_CPUS 4096 +#define MAX_COUNTERS 256 +#define MAX_NR_CPUS 256 #define EID(type, id) (((__u64)(type) << PERF_COUNTER_TYPE_SHIFT) | (id)) -- cgit v1.2.3 From d94b943054721c346b0881865d645f000cd19880 Mon Sep 17 00:00:00 2001 From: Mike Galbraith Date: Mon, 25 May 2009 09:57:56 +0200 Subject: perf top: Reduce display overhead Iterate over the symbol table once per display interval, and copy/sort/tally/decay only those symbols which are active. Before: top - 10:14:53 up 4:08, 17 users, load average: 1.17, 1.53, 1.49 Tasks: 273 total, 5 running, 268 sleeping, 0 stopped, 0 zombie Cpu(s): 6.9%us, 38.2%sy, 0.0%ni, 19.9%id, 0.0%wa, 0.0%hi, 35.0%si, 0.0%st PID USER PR NI VIRT RES SHR S %CPU %MEM TIME+ P COMMAND 28504 root 20 0 1044 260 164 S 58 0.0 0:04.19 2 netserver 28499 root 20 0 1040 412 316 R 51 0.0 0:04.15 0 netperf 28500 root 20 0 1040 408 316 R 50 0.0 0:04.14 1 netperf 28503 root 20 0 1044 260 164 S 50 0.0 0:04.01 1 netserver 28501 root 20 0 1044 260 164 S 49 0.0 0:03.99 0 netserver 28502 root 20 0 1040 412 316 S 43 0.0 0:03.96 2 netperf 28468 root 20 0 1892m 325m 972 S 16 10.8 0:10.50 3 perf 28467 root 20 0 1892m 325m 972 R 2 10.8 0:00.72 3 perf After: top - 10:16:30 up 4:10, 17 users, load average: 2.27, 1.88, 1.62 Tasks: 273 total, 6 running, 267 sleeping, 0 stopped, 0 zombie Cpu(s): 2.5%us, 39.7%sy, 0.0%ni, 24.6%id, 0.0%wa, 0.0%hi, 33.3%si, 0.0%st PID USER PR NI VIRT RES SHR S %CPU %MEM TIME+ P COMMAND 28590 root 20 0 1040 412 316 S 54 0.0 0:07.85 2 netperf 28589 root 20 0 1044 260 164 R 54 0.0 0:07.84 0 netserver 28588 root 20 0 1040 412 316 R 50 0.0 0:07.89 1 netperf 28591 root 20 0 1044 256 164 S 50 0.0 0:07.82 1 netserver 28587 root 20 0 1040 408 316 R 47 0.0 0:07.61 0 netperf 28592 root 20 0 1044 260 164 R 47 0.0 0:07.85 2 netserver 28378 root 20 0 8732 1300 860 R 2 0.0 0:01.81 3 top 28577 root 20 0 1892m 165m 972 R 2 5.5 0:00.48 3 perf 28578 root 20 0 1892m 165m 972 S 2 5.5 0:00.04 3 perf [ Impact: optimization ] Signed-off-by: Mike Galbraith Acked-by: Peter Zijlstra LKML-Reference: Signed-off-by: Ingo Molnar --- Documentation/perf_counter/builtin-top.c | 54 +++++++++++++++++--------------- 1 file changed, 28 insertions(+), 26 deletions(-) diff --git a/Documentation/perf_counter/builtin-top.c b/Documentation/perf_counter/builtin-top.c index 74021ac90f5..4bed265926d 100644 --- a/Documentation/perf_counter/builtin-top.c +++ b/Documentation/perf_counter/builtin-top.c @@ -374,18 +374,26 @@ static struct sym_entry tmp[MAX_SYMS]; static void print_sym_table(void) { - int i, printed; + int i, j, active_count, printed; int counter; float events_per_sec = events/delay_secs; float kevents_per_sec = (events-userspace_events)/delay_secs; float sum_kevents = 0.0; events = userspace_events = 0; - memcpy(tmp, sym_table, sizeof(sym_table[0])*sym_table_count); - qsort(tmp, sym_table_count, sizeof(tmp[0]), compare); - for (i = 0; i < sym_table_count && tmp[i].count[0]; i++) - sum_kevents += tmp[i].count[0]; + /* Iterate over symbol table and copy/tally/decay active symbols. */ + for (i = 0, active_count = 0; i < sym_table_count; i++) { + if (sym_table[i].count[0]) { + tmp[active_count++] = sym_table[i]; + sum_kevents += sym_table[i].count[0]; + + for (j = 0; j < nr_counters; j++) + sym_table[i].count[j] = zero ? 0 : sym_table[i].count[j] * 7 / 8; + } + } + + qsort(tmp, active_count + 1, sizeof(tmp[0]), compare); write(1, CONSOLE_CLEAR, strlen(CONSOLE_CLEAR)); @@ -433,29 +441,23 @@ static void print_sym_table(void) " ______ ______ _____ ________________ _______________\n\n" ); - for (i = 0, printed = 0; i < sym_table_count; i++) { + for (i = 0, printed = 0; i < active_count; i++) { float pcnt; - int count; - if (printed <= 18 && tmp[i].count[0] >= count_filter) { - pcnt = 100.0 - (100.0*((sum_kevents-tmp[i].count[0])/sum_kevents)); - - if (nr_counters == 1) - printf("%19.2f - %4.1f%% - %016llx : %s\n", - sym_weight(tmp + i), - pcnt, tmp[i].addr, tmp[i].sym); - else - printf("%8.1f %10ld - %4.1f%% - %016llx : %s\n", - sym_weight(tmp + i), - tmp[i].count[0], - pcnt, tmp[i].addr, tmp[i].sym); - printed++; - } - /* - * Add decay to the counts: - */ - for (count = 0; count < nr_counters; count++) - sym_table[i].count[count] = zero ? 0 : sym_table[i].count[count] * 7 / 8; + if (++printed > 18 || tmp[i].count[0] < count_filter) + break; + + pcnt = 100.0 - (100.0*((sum_kevents-tmp[i].count[0])/sum_kevents)); + + if (nr_counters == 1) + printf("%19.2f - %4.1f%% - %016llx : %s\n", + sym_weight(tmp + i), + pcnt, tmp[i].addr, tmp[i].sym); + else + printf("%8.1f %10ld - %4.1f%% - %016llx : %s\n", + sym_weight(tmp + i), + tmp[i].count[0], + pcnt, tmp[i].addr, tmp[i].sym); } if (sym_filter_entry) -- cgit v1.2.3 From e4cbb4e3ac8b09fdb11e39e5a5611bfab0a7cd1a Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Tue, 19 May 2009 15:50:30 +0200 Subject: perf_counter: Move child perfcounter init to after scheduler init Initialize a task's perfcounters (inherit from parent, etc.) after the child task's scheduler fields have been initialized already. [ Impact: cleanup ] Cc: Peter Zijlstra Cc: Paul Mackerras Cc: Corey Ashford Cc: Arnaldo Carvalho de Melo Cc: John Kacur Cc: Mike Galbraith LKML-Reference: Signed-off-by: Ingo Molnar --- kernel/fork.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/kernel/fork.c b/kernel/fork.c index e72a09f5355..675e01e9072 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -984,7 +984,6 @@ static struct task_struct *copy_process(unsigned long clone_flags, goto fork_out; rt_mutex_init_task(p); - perf_counter_init_task(p); #ifdef CONFIG_PROVE_LOCKING DEBUG_LOCKS_WARN_ON(!p->hardirqs_enabled); @@ -1096,6 +1095,7 @@ static struct task_struct *copy_process(unsigned long clone_flags, /* Perform scheduler related setup. Assign this task to a CPU. */ sched_fork(p, clone_flags); + perf_counter_init_task(p); if ((retval = audit_alloc(p))) goto bad_fork_cleanup_policy; -- cgit v1.2.3 From d3f4b3855ba87caff8f35e738c7e7e3bad0a6ab1 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Mon, 25 May 2009 14:40:01 +0200 Subject: perf stat: flip around ':k' and ':u' flags This output: $ perf stat -e 0:1:k -e 0:1:u ./hello Performance counter stats for './hello': 140131 instructions (events) 1906968 instructions (events) Is quite confusing - as :k means "user instructions", :u means "kernel instructions". Flip them around - as the 'exclude' property is not intuitive in the flag naming. Cc: Peter Zijlstra Cc: Paul Mackerras Cc: Corey Ashford Cc: Arnaldo Carvalho de Melo Cc: John Kacur Cc: Mike Galbraith LKML-Reference: Signed-off-by: Ingo Molnar --- Documentation/perf_counter/builtin-stat.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Documentation/perf_counter/builtin-stat.c b/Documentation/perf_counter/builtin-stat.c index 8ae01d51f29..88c70be9903 100644 --- a/Documentation/perf_counter/builtin-stat.c +++ b/Documentation/perf_counter/builtin-stat.c @@ -266,9 +266,9 @@ static __u64 match_event_symbols(char *str) switch (sscanf(str, "%d:%llu:%2s", &type, &id, mask_str)) { case 3: - if (strchr(mask_str, 'u')) - event_mask[nr_counters] |= EVENT_MASK_USER; if (strchr(mask_str, 'k')) + event_mask[nr_counters] |= EVENT_MASK_USER; + if (strchr(mask_str, 'u')) event_mask[nr_counters] |= EVENT_MASK_KERNEL; case 2: return EID(type, id); -- cgit v1.2.3 From 266dfb0b58bc4181b6158ee63a0069abaa9f3a98 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Mon, 25 May 2009 14:45:24 +0200 Subject: perf_counter: Fix perf-$cmd invokation Fix: $ perf-top fatal: cannot handle -top internally Signed-off-by: Peter Zijlstra Cc: Paul Mackerras Cc: Corey Ashford Cc: Arnaldo Carvalho de Melo Cc: John Kacur LKML-Reference: <20090525124559.995591577@chello.nl> Signed-off-by: Ingo Molnar --- Documentation/perf_counter/perf.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Documentation/perf_counter/perf.c b/Documentation/perf_counter/perf.c index 594d270be39..1d6d7aa575a 100644 --- a/Documentation/perf_counter/perf.c +++ b/Documentation/perf_counter/perf.c @@ -357,7 +357,7 @@ int main(int argc, const char **argv) * die if that one cannot handle it. */ if (!prefixcmp(cmd, "perf-")) { - cmd += 4; + cmd += 5; argv[0] = cmd; handle_internal_command(argc, argv); die("cannot handle %s internally", cmd); -- cgit v1.2.3 From e527ea312f31e88a7fa5472b71db71c565b0d44f Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Mon, 25 May 2009 14:45:25 +0200 Subject: perf_counter: Remove unused ABI bits extra_config_len isn't used for anything, remove it. Signed-off-by: Peter Zijlstra Cc: Paul Mackerras Cc: Corey Ashford Cc: Arnaldo Carvalho de Melo Cc: John Kacur LKML-Reference: <20090525124600.116035832@chello.nl> Signed-off-by: Ingo Molnar --- include/linux/perf_counter.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/include/linux/perf_counter.h b/include/linux/perf_counter.h index 2ddf5e3c551..b1f2bac09f9 100644 --- a/include/linux/perf_counter.h +++ b/include/linux/perf_counter.h @@ -154,11 +154,11 @@ struct perf_counter_hw_event { __reserved_1 : 51; - __u32 extra_config_len; __u32 wakeup_events; /* wakeup every n events */ + __u32 __reserved_2; - __u64 __reserved_2; __u64 __reserved_3; + __u64 __reserved_4; }; /* -- cgit v1.2.3 From 771d7cde144d87f2d1fbee4da3c6234d61f7e42a Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Mon, 25 May 2009 14:45:26 +0200 Subject: perf_counter: Make pctrl() affect inherited counters too Paul noted that the new ptcrl() didn't work on child counters. Reported-by: Paul Mackerras Signed-off-by: Peter Zijlstra Cc: Corey Ashford Cc: Arnaldo Carvalho de Melo Cc: John Kacur LKML-Reference: <20090525124600.203151469@chello.nl> Signed-off-by: Ingo Molnar --- kernel/perf_counter.c | 48 ++++++++++++++++++++++++------------------------ 1 file changed, 24 insertions(+), 24 deletions(-) diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c index 6cdf8248eda..217dbcce2eb 100644 --- a/kernel/perf_counter.c +++ b/kernel/perf_counter.c @@ -1067,30 +1067,6 @@ static void perf_counter_cpu_sched_in(struct perf_cpu_context *cpuctx, int cpu) __perf_counter_sched_in(ctx, cpuctx, cpu); } -int perf_counter_task_enable(void) -{ - struct perf_counter *counter; - - mutex_lock(¤t->perf_counter_mutex); - list_for_each_entry(counter, ¤t->perf_counter_list, owner_entry) - perf_counter_enable(counter); - mutex_unlock(¤t->perf_counter_mutex); - - return 0; -} - -int perf_counter_task_disable(void) -{ - struct perf_counter *counter; - - mutex_lock(¤t->perf_counter_mutex); - list_for_each_entry(counter, ¤t->perf_counter_list, owner_entry) - perf_counter_disable(counter); - mutex_unlock(¤t->perf_counter_mutex); - - return 0; -} - static void perf_log_period(struct perf_counter *counter, u64 period); static void perf_adjust_freq(struct perf_counter_context *ctx) @@ -1505,6 +1481,30 @@ static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg) return 0; } +int perf_counter_task_enable(void) +{ + struct perf_counter *counter; + + mutex_lock(¤t->perf_counter_mutex); + list_for_each_entry(counter, ¤t->perf_counter_list, owner_entry) + perf_counter_for_each_child(counter, perf_counter_enable); + mutex_unlock(¤t->perf_counter_mutex); + + return 0; +} + +int perf_counter_task_disable(void) +{ + struct perf_counter *counter; + + mutex_lock(¤t->perf_counter_mutex); + list_for_each_entry(counter, ¤t->perf_counter_list, owner_entry) + perf_counter_for_each_child(counter, perf_counter_disable); + mutex_unlock(¤t->perf_counter_mutex); + + return 0; +} + /* * Callers need to ensure there can be no nesting of this function, otherwise * the seqlock logic goes bad. We can not serialize this because the arch -- cgit v1.2.3 From 6ab423e0eaca827fbd201ca4ae7d4f8573a366b2 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Mon, 25 May 2009 14:45:27 +0200 Subject: perf_counter: Propagate inheritance failures down the fork() path Fail fork() when we fail inheritance for some reason (-ENOMEM most likely). Signed-off-by: Peter Zijlstra Cc: Paul Mackerras Cc: Corey Ashford Cc: Arnaldo Carvalho de Melo Cc: John Kacur LKML-Reference: <20090525124600.324656474@chello.nl> Signed-off-by: Ingo Molnar --- include/linux/perf_counter.h | 4 ++-- kernel/fork.c | 6 +++++- kernel/perf_counter.c | 20 ++++++++++++-------- 3 files changed, 19 insertions(+), 11 deletions(-) diff --git a/include/linux/perf_counter.h b/include/linux/perf_counter.h index b1f2bac09f9..d3e85de9bf1 100644 --- a/include/linux/perf_counter.h +++ b/include/linux/perf_counter.h @@ -566,7 +566,7 @@ extern void perf_counter_task_sched_in(struct task_struct *task, int cpu); extern void perf_counter_task_sched_out(struct task_struct *task, struct task_struct *next, int cpu); extern void perf_counter_task_tick(struct task_struct *task, int cpu); -extern void perf_counter_init_task(struct task_struct *child); +extern int perf_counter_init_task(struct task_struct *child); extern void perf_counter_exit_task(struct task_struct *child); extern void perf_counter_do_pending(void); extern void perf_counter_print_debug(void); @@ -631,7 +631,7 @@ perf_counter_task_sched_out(struct task_struct *task, struct task_struct *next, int cpu) { } static inline void perf_counter_task_tick(struct task_struct *task, int cpu) { } -static inline void perf_counter_init_task(struct task_struct *child) { } +static inline int perf_counter_init_task(struct task_struct *child) { } static inline void perf_counter_exit_task(struct task_struct *child) { } static inline void perf_counter_do_pending(void) { } static inline void perf_counter_print_debug(void) { } diff --git a/kernel/fork.c b/kernel/fork.c index 675e01e9072..c07c3335cea 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -1095,7 +1095,10 @@ static struct task_struct *copy_process(unsigned long clone_flags, /* Perform scheduler related setup. Assign this task to a CPU. */ sched_fork(p, clone_flags); - perf_counter_init_task(p); + + retval = perf_counter_init_task(p); + if (retval) + goto bad_fork_cleanup_policy; if ((retval = audit_alloc(p))) goto bad_fork_cleanup_policy; @@ -1295,6 +1298,7 @@ bad_fork_cleanup_semundo: bad_fork_cleanup_audit: audit_free(p); bad_fork_cleanup_policy: + perf_counter_exit_task(p); #ifdef CONFIG_NUMA mpol_put(p->mempolicy); bad_fork_cleanup_cgroup: diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c index 217dbcce2eb..7a7a144870e 100644 --- a/kernel/perf_counter.c +++ b/kernel/perf_counter.c @@ -3434,18 +3434,23 @@ again: /* * Initialize the perf_counter context in task_struct */ -void perf_counter_init_task(struct task_struct *child) +int perf_counter_init_task(struct task_struct *child) { struct perf_counter_context *child_ctx, *parent_ctx; struct perf_counter *counter; struct task_struct *parent = current; int inherited_all = 1; + int ret = 0; child->perf_counter_ctxp = NULL; mutex_init(&child->perf_counter_mutex); INIT_LIST_HEAD(&child->perf_counter_list); + parent_ctx = parent->perf_counter_ctxp; + if (likely(!parent_ctx || !parent_ctx->nr_counters)) + return 0; + /* * This is executed from the parent task context, so inherit * counters that have been marked for cloning. @@ -3454,11 +3459,7 @@ void perf_counter_init_task(struct task_struct *child) child_ctx = kmalloc(sizeof(struct perf_counter_context), GFP_KERNEL); if (!child_ctx) - return; - - parent_ctx = parent->perf_counter_ctxp; - if (likely(!parent_ctx || !parent_ctx->nr_counters)) - return; + return -ENOMEM; __perf_counter_init_context(child_ctx, child); child->perf_counter_ctxp = child_ctx; @@ -3482,8 +3483,9 @@ void perf_counter_init_task(struct task_struct *child) continue; } - if (inherit_group(counter, parent, - parent_ctx, child, child_ctx)) { + ret = inherit_group(counter, parent, parent_ctx, + child, child_ctx); + if (ret) { inherited_all = 0; break; } @@ -3505,6 +3507,8 @@ void perf_counter_init_task(struct task_struct *child) } mutex_unlock(&parent_ctx->mutex); + + return ret; } static void __cpuinit perf_counter_init_cpu(int cpu) -- cgit v1.2.3 From 10989fb2451763fae6f42d85fa6106c8fd010cf5 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Mon, 25 May 2009 14:45:28 +0200 Subject: perf_counter: Fix PERF_COUNTER_CONTEXT_SWITCHES for cpu counters Ingo noticed that cpu counters had 0 context switches, even though there was plenty scheduling on the cpu. Signed-off-by: Peter Zijlstra Cc: Paul Mackerras Cc: Corey Ashford Cc: Arnaldo Carvalho de Melo Cc: John Kacur LKML-Reference: <20090525124600.419025548@chello.nl> Signed-off-by: Ingo Molnar --- kernel/perf_counter.c | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c index 7a7a144870e..14b1fe98483 100644 --- a/kernel/perf_counter.c +++ b/kernel/perf_counter.c @@ -924,14 +924,13 @@ void perf_counter_task_sched_out(struct task_struct *task, struct perf_counter_context *next_ctx; struct pt_regs *regs; + regs = task_pt_regs(task); + perf_swcounter_event(PERF_COUNT_CONTEXT_SWITCHES, 1, 1, regs, 0); + if (likely(!ctx || !cpuctx->task_ctx)) return; update_context_time(ctx); - - regs = task_pt_regs(task); - perf_swcounter_event(PERF_COUNT_CONTEXT_SWITCHES, 1, 1, regs, 0); - next_ctx = next->perf_counter_ctxp; if (next_ctx && context_equiv(ctx, next_ctx)) { task->perf_counter_ctxp = next_ctx; -- cgit v1.2.3 From ff99be573e02e9f7edc23b472c7f9a5ddba12795 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Mon, 25 May 2009 17:39:03 +0200 Subject: perf_counter: x86: Expose INV and EDGE bits Expose the INV and EDGE bits of the PMU to raw configs. Signed-off-by: Peter Zijlstra Cc: Paul Mackerras Cc: Corey Ashford Cc: Arnaldo Carvalho de Melo Cc: John Kacur LKML-Reference: <20090525153931.494709027@chello.nl> Signed-off-by: Ingo Molnar --- arch/x86/kernel/cpu/perf_counter.c | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/arch/x86/kernel/cpu/perf_counter.c b/arch/x86/kernel/cpu/perf_counter.c index 6cc1660db8d..c14437faf5d 100644 --- a/arch/x86/kernel/cpu/perf_counter.c +++ b/arch/x86/kernel/cpu/perf_counter.c @@ -87,11 +87,15 @@ static u64 intel_pmu_raw_event(u64 event) { #define CORE_EVNTSEL_EVENT_MASK 0x000000FFULL #define CORE_EVNTSEL_UNIT_MASK 0x0000FF00ULL +#define CORE_EVNTSEL_EDGE_MASK 0x00040000ULL +#define CORE_EVNTSEL_INV_MASK 0x00800000ULL #define CORE_EVNTSEL_COUNTER_MASK 0xFF000000ULL #define CORE_EVNTSEL_MASK \ (CORE_EVNTSEL_EVENT_MASK | \ CORE_EVNTSEL_UNIT_MASK | \ + CORE_EVNTSEL_EDGE_MASK | \ + CORE_EVNTSEL_INV_MASK | \ CORE_EVNTSEL_COUNTER_MASK) return event & CORE_EVNTSEL_MASK; @@ -119,11 +123,15 @@ static u64 amd_pmu_raw_event(u64 event) { #define K7_EVNTSEL_EVENT_MASK 0x7000000FFULL #define K7_EVNTSEL_UNIT_MASK 0x00000FF00ULL +#define K7_EVNTSEL_EDGE_MASK 0x000040000ULL +#define K7_EVNTSEL_INV_MASK 0x000800000ULL #define K7_EVNTSEL_COUNTER_MASK 0x0FF000000ULL #define K7_EVNTSEL_MASK \ (K7_EVNTSEL_EVENT_MASK | \ K7_EVNTSEL_UNIT_MASK | \ + K7_EVNTSEL_EDGE_MASK | \ + K7_EVNTSEL_INV_MASK | \ K7_EVNTSEL_COUNTER_MASK) return event & K7_EVNTSEL_MASK; -- cgit v1.2.3 From 48e22d56ecdeddd1ffb42a02fccba5c6ef42b133 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Mon, 25 May 2009 17:39:04 +0200 Subject: perf_counter: x86: Remove interrupt throttle remove the x86 specific interrupt throttle Signed-off-by: Peter Zijlstra Cc: Paul Mackerras Cc: Corey Ashford Cc: Arnaldo Carvalho de Melo Cc: John Kacur LKML-Reference: <20090525153931.616671838@chello.nl> Signed-off-by: Ingo Molnar --- arch/x86/kernel/apic/apic.c | 2 -- arch/x86/kernel/cpu/perf_counter.c | 47 ++++---------------------------------- include/linux/perf_counter.h | 2 -- 3 files changed, 5 insertions(+), 46 deletions(-) diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c index b4f64402a82..89b63b5fad3 100644 --- a/arch/x86/kernel/apic/apic.c +++ b/arch/x86/kernel/apic/apic.c @@ -763,8 +763,6 @@ static void local_apic_timer_interrupt(void) inc_irq_stat(apic_timer_irqs); evt->event_handler(evt); - - perf_counter_unthrottle(); } /* diff --git a/arch/x86/kernel/cpu/perf_counter.c b/arch/x86/kernel/cpu/perf_counter.c index c14437faf5d..8c8177f859f 100644 --- a/arch/x86/kernel/cpu/perf_counter.c +++ b/arch/x86/kernel/cpu/perf_counter.c @@ -718,11 +718,6 @@ static void intel_pmu_save_and_restart(struct perf_counter *counter) intel_pmu_enable_counter(hwc, idx); } -/* - * Maximum interrupt frequency of 100KHz per CPU - */ -#define PERFMON_MAX_INTERRUPTS (100000/HZ) - /* * This handler is triggered by the local APIC, so the APIC IRQ handling * rules apply: @@ -775,15 +770,14 @@ again: if (status) goto again; - if (++cpuc->interrupts != PERFMON_MAX_INTERRUPTS) - perf_enable(); + perf_enable(); return 1; } static int amd_pmu_handle_irq(struct pt_regs *regs, int nmi) { - int cpu, idx, throttle = 0, handled = 0; + int cpu, idx, handled = 0; struct cpu_hw_counters *cpuc; struct perf_counter *counter; struct hw_perf_counter *hwc; @@ -792,16 +786,7 @@ static int amd_pmu_handle_irq(struct pt_regs *regs, int nmi) cpu = smp_processor_id(); cpuc = &per_cpu(cpu_hw_counters, cpu); - if (++cpuc->interrupts == PERFMON_MAX_INTERRUPTS) { - throttle = 1; - __perf_disable(); - cpuc->enabled = 0; - barrier(); - } - for (idx = 0; idx < x86_pmu.num_counters; idx++) { - int disable = 0; - if (!test_bit(idx, cpuc->active_mask)) continue; @@ -809,45 +794,23 @@ static int amd_pmu_handle_irq(struct pt_regs *regs, int nmi) hwc = &counter->hw; if (counter->hw_event.nmi != nmi) - goto next; + continue; val = x86_perf_counter_update(counter, hwc, idx); if (val & (1ULL << (x86_pmu.counter_bits - 1))) - goto next; + continue; /* counter overflow */ x86_perf_counter_set_period(counter, hwc, idx); handled = 1; inc_irq_stat(apic_perf_irqs); - disable = perf_counter_overflow(counter, nmi, regs, 0); - -next: - if (disable || throttle) + if (perf_counter_overflow(counter, nmi, regs, 0)) amd_pmu_disable_counter(hwc, idx); } return handled; } -void perf_counter_unthrottle(void) -{ - struct cpu_hw_counters *cpuc; - - if (!x86_pmu_initialized()) - return; - - cpuc = &__get_cpu_var(cpu_hw_counters); - if (cpuc->interrupts >= PERFMON_MAX_INTERRUPTS) { - /* - * Clear them before re-enabling irqs/NMIs again: - */ - cpuc->interrupts = 0; - perf_enable(); - } else { - cpuc->interrupts = 0; - } -} - void smp_perf_counter_interrupt(struct pt_regs *regs) { irq_enter(); diff --git a/include/linux/perf_counter.h b/include/linux/perf_counter.h index d3e85de9bf1..0c160be2078 100644 --- a/include/linux/perf_counter.h +++ b/include/linux/perf_counter.h @@ -570,7 +570,6 @@ extern int perf_counter_init_task(struct task_struct *child); extern void perf_counter_exit_task(struct task_struct *child); extern void perf_counter_do_pending(void); extern void perf_counter_print_debug(void); -extern void perf_counter_unthrottle(void); extern void __perf_disable(void); extern bool __perf_enable(void); extern void perf_disable(void); @@ -635,7 +634,6 @@ static inline int perf_counter_init_task(struct task_struct *child) { } static inline void perf_counter_exit_task(struct task_struct *child) { } static inline void perf_counter_do_pending(void) { } static inline void perf_counter_print_debug(void) { } -static inline void perf_counter_unthrottle(void) { } static inline void perf_disable(void) { } static inline void perf_enable(void) { } static inline int perf_counter_task_disable(void) { return -EINVAL; } -- cgit v1.2.3 From a78ac3258782f3e64cb40beb5990808e1febcc0c Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Mon, 25 May 2009 17:39:05 +0200 Subject: perf_counter: Generic per counter interrupt throttle Introduce a generic per counter interrupt throttle. This uses the perf_counter_overflow() quick disable to throttle a specific counter when its going too fast when a pmu->unthrottle() method is provided which can undo the quick disable. Power needs to implement both the quick disable and the unthrottle method. Signed-off-by: Peter Zijlstra Cc: Paul Mackerras Cc: Corey Ashford Cc: Arnaldo Carvalho de Melo Cc: John Kacur LKML-Reference: <20090525153931.703093461@chello.nl> Signed-off-by: Ingo Molnar --- arch/x86/kernel/cpu/perf_counter.c | 13 +++++++++ include/linux/perf_counter.h | 11 +++++++ kernel/perf_counter.c | 59 +++++++++++++++++++++++++++++++++++--- kernel/sysctl.c | 8 ++++++ 4 files changed, 87 insertions(+), 4 deletions(-) diff --git a/arch/x86/kernel/cpu/perf_counter.c b/arch/x86/kernel/cpu/perf_counter.c index 8c8177f859f..c4b543d1a86 100644 --- a/arch/x86/kernel/cpu/perf_counter.c +++ b/arch/x86/kernel/cpu/perf_counter.c @@ -623,6 +623,18 @@ try_generic: return 0; } +static void x86_pmu_unthrottle(struct perf_counter *counter) +{ + struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters); + struct hw_perf_counter *hwc = &counter->hw; + + if (WARN_ON_ONCE(hwc->idx >= X86_PMC_IDX_MAX || + cpuc->counters[hwc->idx] != counter)) + return; + + x86_pmu.enable(hwc, hwc->idx); +} + void perf_counter_print_debug(void) { u64 ctrl, status, overflow, pmc_ctrl, pmc_count, prev_left, fixed; @@ -1038,6 +1050,7 @@ static const struct pmu pmu = { .enable = x86_pmu_enable, .disable = x86_pmu_disable, .read = x86_pmu_read, + .unthrottle = x86_pmu_unthrottle, }; const struct pmu *hw_perf_counter_init(struct perf_counter *counter) diff --git a/include/linux/perf_counter.h b/include/linux/perf_counter.h index 0c160be2078..e3a7585d3e4 100644 --- a/include/linux/perf_counter.h +++ b/include/linux/perf_counter.h @@ -266,6 +266,15 @@ enum perf_event_type { */ PERF_EVENT_PERIOD = 4, + /* + * struct { + * struct perf_event_header header; + * u64 time; + * }; + */ + PERF_EVENT_THROTTLE = 5, + PERF_EVENT_UNTHROTTLE = 6, + /* * When header.misc & PERF_EVENT_MISC_OVERFLOW the event_type field * will be PERF_RECORD_* @@ -367,6 +376,7 @@ struct pmu { int (*enable) (struct perf_counter *counter); void (*disable) (struct perf_counter *counter); void (*read) (struct perf_counter *counter); + void (*unthrottle) (struct perf_counter *counter); }; /** @@ -613,6 +623,7 @@ extern struct perf_callchain_entry *perf_callchain(struct pt_regs *regs); extern int sysctl_perf_counter_priv; extern int sysctl_perf_counter_mlock; +extern int sysctl_perf_counter_limit; extern void perf_counter_init(void); diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c index 14b1fe98483..ec9c4007a7f 100644 --- a/kernel/perf_counter.c +++ b/kernel/perf_counter.c @@ -46,6 +46,7 @@ static atomic_t nr_comm_tracking __read_mostly; int sysctl_perf_counter_priv __read_mostly; /* do we need to be privileged */ int sysctl_perf_counter_mlock __read_mostly = 512; /* 'free' kb per user */ +int sysctl_perf_counter_limit __read_mostly = 100000; /* max NMIs per second */ /* * Lock for (sysadmin-configurable) counter reservations: @@ -1066,12 +1067,15 @@ static void perf_counter_cpu_sched_in(struct perf_cpu_context *cpuctx, int cpu) __perf_counter_sched_in(ctx, cpuctx, cpu); } +#define MAX_INTERRUPTS (~0ULL) + +static void perf_log_throttle(struct perf_counter *counter, int enable); static void perf_log_period(struct perf_counter *counter, u64 period); static void perf_adjust_freq(struct perf_counter_context *ctx) { struct perf_counter *counter; - u64 irq_period; + u64 interrupts, irq_period; u64 events, period; s64 delta; @@ -1080,10 +1084,19 @@ static void perf_adjust_freq(struct perf_counter_context *ctx) if (counter->state != PERF_COUNTER_STATE_ACTIVE) continue; + interrupts = counter->hw.interrupts; + counter->hw.interrupts = 0; + + if (interrupts == MAX_INTERRUPTS) { + perf_log_throttle(counter, 1); + counter->pmu->unthrottle(counter); + interrupts = 2*sysctl_perf_counter_limit/HZ; + } + if (!counter->hw_event.freq || !counter->hw_event.irq_freq) continue; - events = HZ * counter->hw.interrupts * counter->hw.irq_period; + events = HZ * interrupts * counter->hw.irq_period; period = div64_u64(events, counter->hw_event.irq_freq); delta = (s64)(1 + period - counter->hw.irq_period); @@ -1097,7 +1110,6 @@ static void perf_adjust_freq(struct perf_counter_context *ctx) perf_log_period(counter, irq_period); counter->hw.irq_period = irq_period; - counter->hw.interrupts = 0; } spin_unlock(&ctx->lock); } @@ -2543,6 +2555,35 @@ static void perf_log_period(struct perf_counter *counter, u64 period) perf_output_end(&handle); } +/* + * IRQ throttle logging + */ + +static void perf_log_throttle(struct perf_counter *counter, int enable) +{ + struct perf_output_handle handle; + int ret; + + struct { + struct perf_event_header header; + u64 time; + } throttle_event = { + .header = { + .type = PERF_EVENT_THROTTLE + 1, + .misc = 0, + .size = sizeof(throttle_event), + }, + .time = sched_clock(), + }; + + ret = perf_output_begin(&handle, counter, sizeof(throttle_event), 0, 0); + if (ret) + return; + + perf_output_put(&handle, throttle_event); + perf_output_end(&handle); +} + /* * Generic counter overflow handling. */ @@ -2551,9 +2592,19 @@ int perf_counter_overflow(struct perf_counter *counter, int nmi, struct pt_regs *regs, u64 addr) { int events = atomic_read(&counter->event_limit); + int throttle = counter->pmu->unthrottle != NULL; int ret = 0; - counter->hw.interrupts++; + if (!throttle) { + counter->hw.interrupts++; + } else if (counter->hw.interrupts != MAX_INTERRUPTS) { + counter->hw.interrupts++; + if (HZ*counter->hw.interrupts > (u64)sysctl_perf_counter_limit) { + counter->hw.interrupts = MAX_INTERRUPTS; + perf_log_throttle(counter, 0); + ret = 1; + } + } /* * XXX event_limit might not quite work as expected on inherited diff --git a/kernel/sysctl.c b/kernel/sysctl.c index 3cb1849f598..0c4bf863afa 100644 --- a/kernel/sysctl.c +++ b/kernel/sysctl.c @@ -930,6 +930,14 @@ static struct ctl_table kern_table[] = { .mode = 0644, .proc_handler = &proc_dointvec, }, + { + .ctl_name = CTL_UNNUMBERED, + .procname = "perf_counter_int_limit", + .data = &sysctl_perf_counter_limit, + .maxlen = sizeof(sysctl_perf_counter_limit), + .mode = 0644, + .proc_handler = &proc_dointvec, + }, #endif /* * NOTE: do not add new entries to this table unless you have read -- cgit v1.2.3 From 53b441a565bf4036ab49c8ea04c5ad06ace7dd6b Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Mon, 25 May 2009 21:41:28 +0200 Subject: Revert "perf_counter, x86: speed up the scheduling fast-path" This reverts commit b68f1d2e7aa21029d73c7d453a8046e95d351740. It is causing problems (stuck/stuttering profiling) - when mixed NMI and non-NMI counters are used. Cc: Peter Zijlstra Cc: Paul Mackerras Cc: Corey Ashford Cc: Arnaldo Carvalho de Melo Cc: John Kacur LKML-Reference: <20090525153931.703093461@chello.nl> Signed-off-by: Ingo Molnar --- arch/x86/kernel/cpu/perf_counter.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/arch/x86/kernel/cpu/perf_counter.c b/arch/x86/kernel/cpu/perf_counter.c index c4b543d1a86..189bf9d7cda 100644 --- a/arch/x86/kernel/cpu/perf_counter.c +++ b/arch/x86/kernel/cpu/perf_counter.c @@ -293,7 +293,6 @@ static int __hw_perf_counter_init(struct perf_counter *counter) return -EACCES; hwc->nmi = 1; } - perf_counters_lapic_init(hwc->nmi); if (!hwc->irq_period) hwc->irq_period = x86_pmu.max_period; @@ -612,6 +611,8 @@ try_generic: hwc->counter_base = x86_pmu.perfctr; } + perf_counters_lapic_init(hwc->nmi); + x86_pmu.disable(hwc, idx); cpuc->counters[idx] = counter; @@ -1037,7 +1038,7 @@ void __init init_hw_perf_counters(void) pr_info("... counter mask: %016Lx\n", perf_counter_mask); - perf_counters_lapic_init(1); + perf_counters_lapic_init(0); register_die_notifier(&perf_counter_nmi_notifier); } -- cgit v1.2.3 From 0127c3ea082ee9f1034789b978dfc7fd83254617 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Mon, 25 May 2009 22:03:26 +0200 Subject: perf_counter: fix warning & lockup - remove bogus warning - fix wakeup from NMI path lockup - also fix up whitespace noise in perf_counter.h Cc: Peter Zijlstra Cc: Paul Mackerras Cc: Corey Ashford Cc: Arnaldo Carvalho de Melo Cc: John Kacur LKML-Reference: <20090525153931.703093461@chello.nl> Signed-off-by: Ingo Molnar --- include/linux/perf_counter.h | 78 ++++++++++++++++++++++---------------------- kernel/perf_counter.c | 4 +-- 2 files changed, 40 insertions(+), 42 deletions(-) diff --git a/include/linux/perf_counter.h b/include/linux/perf_counter.h index e3a7585d3e4..2b16ed37b74 100644 --- a/include/linux/perf_counter.h +++ b/include/linux/perf_counter.h @@ -73,7 +73,7 @@ enum sw_event_ids { PERF_SW_EVENTS_MAX = 7, }; -#define __PERF_COUNTER_MASK(name) \ +#define __PERF_COUNTER_MASK(name) \ (((1ULL << PERF_COUNTER_##name##_BITS) - 1) << \ PERF_COUNTER_##name##_SHIFT) @@ -98,14 +98,14 @@ enum sw_event_ids { * in the overflow packets. */ enum perf_counter_record_format { - PERF_RECORD_IP = 1U << 0, - PERF_RECORD_TID = 1U << 1, - PERF_RECORD_TIME = 1U << 2, - PERF_RECORD_ADDR = 1U << 3, - PERF_RECORD_GROUP = 1U << 4, - PERF_RECORD_CALLCHAIN = 1U << 5, - PERF_RECORD_CONFIG = 1U << 6, - PERF_RECORD_CPU = 1U << 7, + PERF_RECORD_IP = 1U << 0, + PERF_RECORD_TID = 1U << 1, + PERF_RECORD_TIME = 1U << 2, + PERF_RECORD_ADDR = 1U << 3, + PERF_RECORD_GROUP = 1U << 4, + PERF_RECORD_CALLCHAIN = 1U << 5, + PERF_RECORD_CONFIG = 1U << 6, + PERF_RECORD_CPU = 1U << 7, }; /* @@ -235,13 +235,13 @@ enum perf_event_type { * correlate userspace IPs to code. They have the following structure: * * struct { - * struct perf_event_header header; + * struct perf_event_header header; * - * u32 pid, tid; - * u64 addr; - * u64 len; - * u64 pgoff; - * char filename[]; + * u32 pid, tid; + * u64 addr; + * u64 len; + * u64 pgoff; + * char filename[]; * }; */ PERF_EVENT_MMAP = 1, @@ -249,27 +249,27 @@ enum perf_event_type { /* * struct { - * struct perf_event_header header; + * struct perf_event_header header; * - * u32 pid, tid; - * char comm[]; + * u32 pid, tid; + * char comm[]; * }; */ PERF_EVENT_COMM = 3, /* * struct { - * struct perf_event_header header; - * u64 time; - * u64 irq_period; + * struct perf_event_header header; + * u64 time; + * u64 irq_period; * }; */ PERF_EVENT_PERIOD = 4, /* * struct { - * struct perf_event_header header; - * u64 time; + * struct perf_event_header header; + * u64 time; * }; */ PERF_EVENT_THROTTLE = 5, @@ -280,23 +280,23 @@ enum perf_event_type { * will be PERF_RECORD_* * * struct { - * struct perf_event_header header; + * struct perf_event_header header; * - * { u64 ip; } && PERF_RECORD_IP - * { u32 pid, tid; } && PERF_RECORD_TID - * { u64 time; } && PERF_RECORD_TIME - * { u64 addr; } && PERF_RECORD_ADDR - * { u64 config; } && PERF_RECORD_CONFIG - * { u32 cpu, res; } && PERF_RECORD_CPU + * { u64 ip; } && PERF_RECORD_IP + * { u32 pid, tid; } && PERF_RECORD_TID + * { u64 time; } && PERF_RECORD_TIME + * { u64 addr; } && PERF_RECORD_ADDR + * { u64 config; } && PERF_RECORD_CONFIG + * { u32 cpu, res; } && PERF_RECORD_CPU * - * { u64 nr; - * { u64 event, val; } cnt[nr]; } && PERF_RECORD_GROUP + * { u64 nr; + * { u64 event, val; } cnt[nr]; } && PERF_RECORD_GROUP * - * { u16 nr, - * hv, - * kernel, - * user; - * u64 ips[nr]; } && PERF_RECORD_CALLCHAIN + * { u16 nr, + * hv, + * kernel, + * user; + * u64 ips[nr]; } && PERF_RECORD_CALLCHAIN * }; */ }; @@ -406,7 +406,7 @@ struct perf_mmap_data { atomic_t wakeup; /* needs a wakeup */ struct perf_counter_mmap_page *user_page; - void *data_pages[0]; + void *data_pages[0]; }; struct perf_pending_entry { @@ -422,7 +422,7 @@ struct perf_counter { struct list_head list_entry; struct list_head event_entry; struct list_head sibling_list; - int nr_siblings; + int nr_siblings; struct perf_counter *group_leader; const struct pmu *pmu; diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c index ec9c4007a7f..070f92d3232 100644 --- a/kernel/perf_counter.c +++ b/kernel/perf_counter.c @@ -2576,7 +2576,7 @@ static void perf_log_throttle(struct perf_counter *counter, int enable) .time = sched_clock(), }; - ret = perf_output_begin(&handle, counter, sizeof(throttle_event), 0, 0); + ret = perf_output_begin(&handle, counter, sizeof(throttle_event), 1, 0); if (ret) return; @@ -3449,8 +3449,6 @@ void perf_counter_exit_task(struct task_struct *child) struct perf_counter_context *child_ctx; unsigned long flags; - WARN_ON_ONCE(child != current); - child_ctx = child->perf_counter_ctxp; if (likely(!child_ctx)) -- cgit v1.2.3 From 8a7b8cb91f26a671f22cedc7fd54508667f2d9b9 Mon Sep 17 00:00:00 2001 From: Paul Mackerras Date: Tue, 26 May 2009 16:27:59 +1000 Subject: perf_counter: powerpc: Implement interrupt throttling This implements interrupt throttling on powerpc. Since we don't have individual count enable/disable or interrupt enable/disable controls per counter, this simply sets the hardware counter to 0, meaning that it will not interrupt again until it has counted 2^31 counts, which will take at least 2^30 cycles assuming a maximum of 2 counts per cycle. Also, we set counter->hw.period_left to the maximum possible value (2^63 - 1), so we won't report overflows for this counter for the forseeable future. The unthrottle operation restores counter->hw.period_left and the hardware counter so that we will once again report a counter overflow after counter->hw.irq_period counts. [ Impact: new perfcounters robustness feature on PowerPC ] Signed-off-by: Paul Mackerras Cc: Peter Zijlstra Cc: Corey Ashford LKML-Reference: <18971.35823.643362.446774@cargo.ozlabs.ibm.com> Signed-off-by: Ingo Molnar --- arch/powerpc/kernel/perf_counter.c | 48 ++++++++++++++++++++++++++++++++++---- 1 file changed, 43 insertions(+), 5 deletions(-) diff --git a/arch/powerpc/kernel/perf_counter.c b/arch/powerpc/kernel/perf_counter.c index fe21b2440f2..f96d55f55bd 100644 --- a/arch/powerpc/kernel/perf_counter.c +++ b/arch/powerpc/kernel/perf_counter.c @@ -740,10 +740,37 @@ static void power_pmu_disable(struct perf_counter *counter) local_irq_restore(flags); } +/* + * Re-enable interrupts on a counter after they were throttled + * because they were coming too fast. + */ +static void power_pmu_unthrottle(struct perf_counter *counter) +{ + s64 val, left; + unsigned long flags; + + if (!counter->hw.idx || !counter->hw.irq_period) + return; + local_irq_save(flags); + perf_disable(); + power_pmu_read(counter); + left = counter->hw.irq_period; + val = 0; + if (left < 0x80000000L) + val = 0x80000000L - left; + write_pmc(counter->hw.idx, val); + atomic64_set(&counter->hw.prev_count, val); + atomic64_set(&counter->hw.period_left, left); + perf_counter_update_userpage(counter); + perf_enable(); + local_irq_restore(flags); +} + struct pmu power_pmu = { .enable = power_pmu_enable, .disable = power_pmu_disable, .read = power_pmu_read, + .unthrottle = power_pmu_unthrottle, }; /* @@ -957,10 +984,6 @@ static void record_and_restart(struct perf_counter *counter, long val, if (left < 0x80000000L) val = 0x80000000L - left; } - write_pmc(counter->hw.idx, val); - atomic64_set(&counter->hw.prev_count, val); - atomic64_set(&counter->hw.period_left, left); - perf_counter_update_userpage(counter); /* * Finally record data if requested. @@ -983,8 +1006,23 @@ static void record_and_restart(struct perf_counter *counter, long val, if (!(mmcra & MMCRA_SAMPLE_ENABLE) || (mmcra & sdsync)) addr = mfspr(SPRN_SDAR); } - perf_counter_overflow(counter, nmi, regs, addr); + if (perf_counter_overflow(counter, nmi, regs, addr)) { + /* + * Interrupts are coming too fast - throttle them + * by setting the counter to 0, so it will be + * at least 2^30 cycles until the next interrupt + * (assuming each counter counts at most 2 counts + * per cycle). + */ + val = 0; + left = ~0ULL >> 1; + } } + + write_pmc(counter->hw.idx, val); + atomic64_set(&counter->hw.prev_count, val); + atomic64_set(&counter->hw.period_left, left); + perf_counter_update_userpage(counter); } /* -- cgit v1.2.3 From 79202ba9ff8cf570a75596f42e011167734d1c4b Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Tue, 26 May 2009 08:10:00 +0200 Subject: perf_counter, x86: Fix APIC NMI programming My Nehalem box locks up in certain situations (with an always-asserted NMI causing a lockup) if the PMU LVT entry is programmed between NMI and IRQ mode with a high frequency. Standardize exlusively on NMIs instead. [ Impact: fix lockup ] Cc: Peter Zijlstra Cc: Mike Galbraith Cc: Paul Mackerras Cc: Corey Ashford Cc: Marcelo Tosatti Cc: Arnaldo Carvalho de Melo Cc: Thomas Gleixner Cc: John Kacur LKML-Reference: Signed-off-by: Ingo Molnar --- arch/x86/kernel/cpu/perf_counter.c | 16 +++------------- 1 file changed, 3 insertions(+), 13 deletions(-) diff --git a/arch/x86/kernel/cpu/perf_counter.c b/arch/x86/kernel/cpu/perf_counter.c index 189bf9d7cda..ece3813c7a3 100644 --- a/arch/x86/kernel/cpu/perf_counter.c +++ b/arch/x86/kernel/cpu/perf_counter.c @@ -285,14 +285,10 @@ static int __hw_perf_counter_init(struct perf_counter *counter) hwc->config |= ARCH_PERFMON_EVENTSEL_OS; /* - * If privileged enough, allow NMI events: + * Use NMI events all the time: */ - hwc->nmi = 0; - if (hw_event->nmi) { - if (sysctl_perf_counter_priv && !capable(CAP_SYS_ADMIN)) - return -EACCES; - hwc->nmi = 1; - } + hwc->nmi = 1; + hw_event->nmi = 1; if (!hwc->irq_period) hwc->irq_period = x86_pmu.max_period; @@ -553,9 +549,6 @@ fixed_mode_idx(struct perf_counter *counter, struct hw_perf_counter *hwc) if (!x86_pmu.num_counters_fixed) return -1; - if (unlikely(hwc->nmi)) - return -1; - event = hwc->config & ARCH_PERFMON_EVENT_MASK; if (unlikely(event == x86_pmu.event_map(PERF_COUNT_INSTRUCTIONS))) @@ -806,9 +799,6 @@ static int amd_pmu_handle_irq(struct pt_regs *regs, int nmi) counter = cpuc->counters[idx]; hwc = &counter->hw; - if (counter->hw_event.nmi != nmi) - continue; - val = x86_perf_counter_update(counter, hwc, idx); if (val & (1ULL << (x86_pmu.counter_bits - 1))) continue; -- cgit v1.2.3 From aaba98018b8295dfa2119345d17f833d74448cd0 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Tue, 26 May 2009 08:10:00 +0200 Subject: perf_counter, x86: Make NMI lockups more robust We have a debug check that detects stuck NMIs and returns with the PMU disabled in the global ctrl MSR - but i managed to trigger a situation where this was not enough to deassert the NMI. So clear/reset the full PMU and keep the disable count balanced when exiting from here. This way the box produces a debug warning but stays up and is more debuggable. [ Impact: in case of PMU related bugs, recover more gracefully ] Cc: Peter Zijlstra Cc: Mike Galbraith Cc: Paul Mackerras Cc: Corey Ashford Cc: Marcelo Tosatti Cc: Arnaldo Carvalho de Melo Cc: Thomas Gleixner Cc: John Kacur LKML-Reference: Signed-off-by: Ingo Molnar --- arch/x86/kernel/cpu/perf_counter.c | 26 ++++++++++++++++++++++++++ 1 file changed, 26 insertions(+) diff --git a/arch/x86/kernel/cpu/perf_counter.c b/arch/x86/kernel/cpu/perf_counter.c index ece3813c7a3..2eeaa99add1 100644 --- a/arch/x86/kernel/cpu/perf_counter.c +++ b/arch/x86/kernel/cpu/perf_counter.c @@ -724,6 +724,30 @@ static void intel_pmu_save_and_restart(struct perf_counter *counter) intel_pmu_enable_counter(hwc, idx); } +static void intel_pmu_reset(void) +{ + unsigned long flags; + int idx; + + if (!x86_pmu.num_counters) + return; + + local_irq_save(flags); + + printk("clearing PMU state on CPU#%d\n", smp_processor_id()); + + for (idx = 0; idx < x86_pmu.num_counters; idx++) { + checking_wrmsrl(x86_pmu.eventsel + idx, 0ull); + checking_wrmsrl(x86_pmu.perfctr + idx, 0ull); + } + for (idx = 0; idx < x86_pmu.num_counters_fixed; idx++) { + checking_wrmsrl(MSR_ARCH_PERFMON_FIXED_CTR0 + idx, 0ull); + } + + local_irq_restore(flags); +} + + /* * This handler is triggered by the local APIC, so the APIC IRQ handling * rules apply: @@ -750,6 +774,8 @@ again: if (++loops > 100) { WARN_ONCE(1, "perfcounters: irq loop stuck!\n"); perf_counter_print_debug(); + intel_pmu_reset(); + perf_enable(); return 1; } -- cgit v1.2.3 From 329d876d6fd326109f191ae0fb2798b8834fb70b Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Tue, 26 May 2009 08:10:00 +0200 Subject: perf_counter: Initialize ->oncpu properly This shouldnt matter normally (and i have not seen any misbehavior), because active counters always have a proper ->oncpu value - but nevertheless initialize the field properly to -1. [ Impact: cleanup ] Cc: Peter Zijlstra Cc: Mike Galbraith Cc: Paul Mackerras Cc: Corey Ashford Cc: Marcelo Tosatti Cc: Arnaldo Carvalho de Melo Cc: Thomas Gleixner Cc: John Kacur LKML-Reference: Signed-off-by: Ingo Molnar --- kernel/perf_counter.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c index 070f92d3232..367299f91aa 100644 --- a/kernel/perf_counter.c +++ b/kernel/perf_counter.c @@ -3122,6 +3122,8 @@ perf_counter_alloc(struct perf_counter_hw_event *hw_event, counter->group_leader = group_leader; counter->pmu = NULL; counter->ctx = ctx; + counter->oncpu = -1; + get_ctx(ctx); counter->state = PERF_COUNTER_STATE_INACTIVE; -- cgit v1.2.3 From 69aa48ab82e17299efe2be6c21795945731a6c17 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Tue, 26 May 2009 09:02:27 +0200 Subject: perf record: Straighten out argv types [ Impact: cleanup ] Cc: Peter Zijlstra Cc: Mike Galbraith Cc: Paul Mackerras Cc: Corey Ashford Cc: Marcelo Tosatti Cc: Arnaldo Carvalho de Melo Cc: Thomas Gleixner Cc: John Kacur LKML-Reference: Signed-off-by: Ingo Molnar --- Documentation/perf_counter/builtin-record.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Documentation/perf_counter/builtin-record.c b/Documentation/perf_counter/builtin-record.c index 1b19f187d35..f225efaff9f 100644 --- a/Documentation/perf_counter/builtin-record.c +++ b/Documentation/perf_counter/builtin-record.c @@ -191,7 +191,7 @@ static void display_help(void) exit(0); } -static void process_options(int argc, const char *argv[]) +static void process_options(int argc, char * const argv[]) { int error = 0, counter; @@ -538,7 +538,7 @@ static void open_counters(int cpu, pid_t pid) nr_cpu++; } -int cmd_record(int argc, const char **argv) +int cmd_record(int argc, char * const argv[]) { int i, counter; pid_t pid; -- cgit v1.2.3 From 4e97ddf09ee3ce715fc334399bae4cc0c0a13057 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Tue, 26 May 2009 10:07:44 +0200 Subject: perf stat: Remove unused variable [ Impact: cleanup ] Cc: Peter Zijlstra Cc: Mike Galbraith Cc: Paul Mackerras Cc: Corey Ashford Cc: Marcelo Tosatti Cc: Arnaldo Carvalho de Melo Cc: Thomas Gleixner Cc: John Kacur LKML-Reference: Signed-off-by: Ingo Molnar --- Documentation/perf_counter/builtin-stat.c | 2 -- 1 file changed, 2 deletions(-) diff --git a/Documentation/perf_counter/builtin-stat.c b/Documentation/perf_counter/builtin-stat.c index 88c70be9903..c1053d820c1 100644 --- a/Documentation/perf_counter/builtin-stat.c +++ b/Documentation/perf_counter/builtin-stat.c @@ -541,8 +541,6 @@ static void skip_signal(int signo) int cmd_stat(int argc, char **argv, const char *prefix) { - sigset_t blocked; - page_size = sysconf(_SC_PAGE_SIZE); process_options(argc, argv); -- cgit v1.2.3 From 0e9b20b8a1cab6c6ab4f98f917a2d98783103969 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Tue, 26 May 2009 09:17:18 +0200 Subject: perf record: Convert to Git option parsing Remove getopt usage and use Git's much more advanced and more compact command option library. Git's library (util/parse-options.[ch]) constructs help texts and error messages automatically, and has a number of other convenience features as well. Cc: Peter Zijlstra Cc: Mike Galbraith Cc: Paul Mackerras Cc: Corey Ashford Cc: Marcelo Tosatti Cc: Arnaldo Carvalho de Melo Cc: John Kacur LKML-Reference: Signed-off-by: Ingo Molnar --- Documentation/perf_counter/builtin-record.c | 372 +++++++++++++--------------- Documentation/perf_counter/builtin-top.c | 3 + 2 files changed, 177 insertions(+), 198 deletions(-) diff --git a/Documentation/perf_counter/builtin-record.c b/Documentation/perf_counter/builtin-record.c index f225efaff9f..f12a7822fcf 100644 --- a/Documentation/perf_counter/builtin-record.c +++ b/Documentation/perf_counter/builtin-record.c @@ -2,6 +2,8 @@ #include "perf.h" #include "util/util.h" +#include "util/parse-options.h" +#include "util/exec_cmd.h" #include #include @@ -11,7 +13,6 @@ #include #include #include -#include #include #include #include @@ -33,8 +34,8 @@ -#define ALIGN(x,a) __ALIGN_MASK(x,(typeof(x))(a)-1) -#define __ALIGN_MASK(x,mask) (((x)+(mask))&~(mask)) +#define ALIGN(x, a) __ALIGN_MASK(x, (typeof(x))(a)-1) +#define __ALIGN_MASK(x, mask) (((x)+(mask))&~(mask)) static int nr_counters = 0; static __u64 event_id[MAX_COUNTERS] = { }; @@ -45,7 +46,7 @@ static int nr_cpus = 0; static unsigned int page_size; static unsigned int mmap_pages = 16; static int output; -static char *output_name = "output.perf"; +static const char *output_name = "output.perf"; static int group = 0; static unsigned int realtime_prio = 0; static int system_wide = 0; @@ -62,192 +63,6 @@ const unsigned int default_count[] = { 10000, }; -struct event_symbol { - __u64 event; - char *symbol; -}; - -static struct event_symbol event_symbols[] = { - {EID(PERF_TYPE_HARDWARE, PERF_COUNT_CPU_CYCLES), "cpu-cycles", }, - {EID(PERF_TYPE_HARDWARE, PERF_COUNT_CPU_CYCLES), "cycles", }, - {EID(PERF_TYPE_HARDWARE, PERF_COUNT_INSTRUCTIONS), "instructions", }, - {EID(PERF_TYPE_HARDWARE, PERF_COUNT_CACHE_REFERENCES), "cache-references", }, - {EID(PERF_TYPE_HARDWARE, PERF_COUNT_CACHE_MISSES), "cache-misses", }, - {EID(PERF_TYPE_HARDWARE, PERF_COUNT_BRANCH_INSTRUCTIONS), "branch-instructions", }, - {EID(PERF_TYPE_HARDWARE, PERF_COUNT_BRANCH_INSTRUCTIONS), "branches", }, - {EID(PERF_TYPE_HARDWARE, PERF_COUNT_BRANCH_MISSES), "branch-misses", }, - {EID(PERF_TYPE_HARDWARE, PERF_COUNT_BUS_CYCLES), "bus-cycles", }, - - {EID(PERF_TYPE_SOFTWARE, PERF_COUNT_CPU_CLOCK), "cpu-clock", }, - {EID(PERF_TYPE_SOFTWARE, PERF_COUNT_TASK_CLOCK), "task-clock", }, - {EID(PERF_TYPE_SOFTWARE, PERF_COUNT_PAGE_FAULTS), "page-faults", }, - {EID(PERF_TYPE_SOFTWARE, PERF_COUNT_PAGE_FAULTS), "faults", }, - {EID(PERF_TYPE_SOFTWARE, PERF_COUNT_PAGE_FAULTS_MIN), "minor-faults", }, - {EID(PERF_TYPE_SOFTWARE, PERF_COUNT_PAGE_FAULTS_MAJ), "major-faults", }, - {EID(PERF_TYPE_SOFTWARE, PERF_COUNT_CONTEXT_SWITCHES), "context-switches", }, - {EID(PERF_TYPE_SOFTWARE, PERF_COUNT_CONTEXT_SWITCHES), "cs", }, - {EID(PERF_TYPE_SOFTWARE, PERF_COUNT_CPU_MIGRATIONS), "cpu-migrations", }, - {EID(PERF_TYPE_SOFTWARE, PERF_COUNT_CPU_MIGRATIONS), "migrations", }, -}; - -/* - * Each event can have multiple symbolic names. - * Symbolic names are (almost) exactly matched. - */ -static __u64 match_event_symbols(char *str) -{ - __u64 config, id; - int type; - unsigned int i; - - if (sscanf(str, "r%llx", &config) == 1) - return config | PERF_COUNTER_RAW_MASK; - - if (sscanf(str, "%d:%llu", &type, &id) == 2) - return EID(type, id); - - for (i = 0; i < ARRAY_SIZE(event_symbols); i++) { - if (!strncmp(str, event_symbols[i].symbol, - strlen(event_symbols[i].symbol))) - return event_symbols[i].event; - } - - return ~0ULL; -} - -static int parse_events(char *str) -{ - __u64 config; - -again: - if (nr_counters == MAX_COUNTERS) - return -1; - - config = match_event_symbols(str); - if (config == ~0ULL) - return -1; - - event_id[nr_counters] = config; - nr_counters++; - - str = strstr(str, ","); - if (str) { - str++; - goto again; - } - - return 0; -} - -#define __PERF_COUNTER_FIELD(config, name) \ - ((config & PERF_COUNTER_##name##_MASK) >> PERF_COUNTER_##name##_SHIFT) - -#define PERF_COUNTER_RAW(config) __PERF_COUNTER_FIELD(config, RAW) -#define PERF_COUNTER_CONFIG(config) __PERF_COUNTER_FIELD(config, CONFIG) -#define PERF_COUNTER_TYPE(config) __PERF_COUNTER_FIELD(config, TYPE) -#define PERF_COUNTER_ID(config) __PERF_COUNTER_FIELD(config, EVENT) - -static void display_events_help(void) -{ - unsigned int i; - __u64 e; - - printf( - " -e EVENT --event=EVENT # symbolic-name abbreviations"); - - for (i = 0; i < ARRAY_SIZE(event_symbols); i++) { - int type, id; - - e = event_symbols[i].event; - type = PERF_COUNTER_TYPE(e); - id = PERF_COUNTER_ID(e); - - printf("\n %d:%d: %-20s", - type, id, event_symbols[i].symbol); - } - - printf("\n" - " rNNN: raw PMU events (eventsel+umask)\n\n"); -} - -static void display_help(void) -{ - printf( - "Usage: perf-record [] \n" - "perf-record Options (up to %d event types can be specified at once):\n\n", - MAX_COUNTERS); - - display_events_help(); - - printf( - " -c CNT --count=CNT # event period to sample\n" - " -m pages --mmap_pages= # number of mmap data pages\n" - " -o file --output= # output file\n" - " -p pid --pid= # record events on existing pid\n" - " -r prio --realtime= # use RT prio\n" - " -s --system # system wide profiling\n" - ); - - exit(0); -} - -static void process_options(int argc, char * const argv[]) -{ - int error = 0, counter; - - for (;;) { - int option_index = 0; - /** Options for getopt */ - static struct option long_options[] = { - {"count", required_argument, NULL, 'c'}, - {"event", required_argument, NULL, 'e'}, - {"mmap_pages", required_argument, NULL, 'm'}, - {"output", required_argument, NULL, 'o'}, - {"pid", required_argument, NULL, 'p'}, - {"realtime", required_argument, NULL, 'r'}, - {"system", no_argument, NULL, 's'}, - {"inherit", no_argument, NULL, 'i'}, - {"nmi", no_argument, NULL, 'n'}, - {NULL, 0, NULL, 0 } - }; - int c = getopt_long(argc, argv, "+:c:e:m:o:p:r:sin", - long_options, &option_index); - if (c == -1) - break; - - switch (c) { - case 'c': default_interval = atoi(optarg); break; - case 'e': error = parse_events(optarg); break; - case 'm': mmap_pages = atoi(optarg); break; - case 'o': output_name = strdup(optarg); break; - case 'p': target_pid = atoi(optarg); break; - case 'r': realtime_prio = atoi(optarg); break; - case 's': system_wide ^= 1; break; - case 'i': inherit ^= 1; break; - case 'n': nmi ^= 1; break; - default: error = 1; break; - } - } - - if (argc - optind == 0 && target_pid == -1) - error = 1; - - if (error) - display_help(); - - if (!nr_counters) { - nr_counters = 1; - event_id[0] = 0; - } - - for (counter = 0; counter < nr_counters; counter++) { - if (event_count[counter]) - continue; - - event_count[counter] = default_interval; - } -} - struct mmap_data { int counter; void *base; @@ -538,16 +353,13 @@ static void open_counters(int cpu, pid_t pid) nr_cpu++; } -int cmd_record(int argc, char * const argv[]) +static int __cmd_record(int argc, const char **argv) { int i, counter; pid_t pid; int ret; page_size = sysconf(_SC_PAGE_SIZE); - - process_options(argc, argv); - nr_cpus = sysconf(_SC_NPROCESSORS_ONLN); assert(nr_cpus <= MAX_NR_CPUS); assert(nr_cpus >= 0); @@ -558,9 +370,6 @@ int cmd_record(int argc, char * const argv[]) exit(-1); } - argc -= optind; - argv += optind; - if (!system_wide) { open_counters(-1, target_pid != -1 ? target_pid : 0); } else for (i = 0; i < nr_cpus; i++) @@ -575,7 +384,7 @@ int cmd_record(int argc, char * const argv[]) perror("failed to fork"); if (!pid) { - if (execvp(argv[0], argv)) { + if (execvp(argv[0], (char **)argv)) { perror(argv[0]); exit(-1); } @@ -610,3 +419,170 @@ int cmd_record(int argc, char * const argv[]) return 0; } + +struct event_symbol { + __u64 event; + char *symbol; +}; + +static struct event_symbol event_symbols[] = { + {EID(PERF_TYPE_HARDWARE, PERF_COUNT_CPU_CYCLES), "cpu-cycles", }, + {EID(PERF_TYPE_HARDWARE, PERF_COUNT_CPU_CYCLES), "cycles", }, + {EID(PERF_TYPE_HARDWARE, PERF_COUNT_INSTRUCTIONS), "instructions", }, + {EID(PERF_TYPE_HARDWARE, PERF_COUNT_CACHE_REFERENCES), "cache-references", }, + {EID(PERF_TYPE_HARDWARE, PERF_COUNT_CACHE_MISSES), "cache-misses", }, + {EID(PERF_TYPE_HARDWARE, PERF_COUNT_BRANCH_INSTRUCTIONS), "branch-instructions", }, + {EID(PERF_TYPE_HARDWARE, PERF_COUNT_BRANCH_INSTRUCTIONS), "branches", }, + {EID(PERF_TYPE_HARDWARE, PERF_COUNT_BRANCH_MISSES), "branch-misses", }, + {EID(PERF_TYPE_HARDWARE, PERF_COUNT_BUS_CYCLES), "bus-cycles", }, + + {EID(PERF_TYPE_SOFTWARE, PERF_COUNT_CPU_CLOCK), "cpu-clock", }, + {EID(PERF_TYPE_SOFTWARE, PERF_COUNT_TASK_CLOCK), "task-clock", }, + {EID(PERF_TYPE_SOFTWARE, PERF_COUNT_PAGE_FAULTS), "page-faults", }, + {EID(PERF_TYPE_SOFTWARE, PERF_COUNT_PAGE_FAULTS), "faults", }, + {EID(PERF_TYPE_SOFTWARE, PERF_COUNT_PAGE_FAULTS_MIN), "minor-faults", }, + {EID(PERF_TYPE_SOFTWARE, PERF_COUNT_PAGE_FAULTS_MAJ), "major-faults", }, + {EID(PERF_TYPE_SOFTWARE, PERF_COUNT_CONTEXT_SWITCHES), "context-switches", }, + {EID(PERF_TYPE_SOFTWARE, PERF_COUNT_CONTEXT_SWITCHES), "cs", }, + {EID(PERF_TYPE_SOFTWARE, PERF_COUNT_CPU_MIGRATIONS), "cpu-migrations", }, + {EID(PERF_TYPE_SOFTWARE, PERF_COUNT_CPU_MIGRATIONS), "migrations", }, +}; + +/* + * Each event can have multiple symbolic names. + * Symbolic names are (almost) exactly matched. + */ +static __u64 match_event_symbols(const char *str) +{ + __u64 config, id; + int type; + unsigned int i; + + if (sscanf(str, "r%llx", &config) == 1) + return config | PERF_COUNTER_RAW_MASK; + + if (sscanf(str, "%d:%llu", &type, &id) == 2) + return EID(type, id); + + for (i = 0; i < ARRAY_SIZE(event_symbols); i++) { + if (!strncmp(str, event_symbols[i].symbol, + strlen(event_symbols[i].symbol))) + return event_symbols[i].event; + } + + return ~0ULL; +} + +static int parse_events(const struct option *opt, const char *str, int unset) +{ + __u64 config; + +again: + if (nr_counters == MAX_COUNTERS) + return -1; + + config = match_event_symbols(str); + if (config == ~0ULL) + return -1; + + event_id[nr_counters] = config; + nr_counters++; + + str = strstr(str, ","); + if (str) { + str++; + goto again; + } + + return 0; +} + +static char events_help[100000]; + +#define __PERF_COUNTER_FIELD(config, name) \ + ((config & PERF_COUNTER_##name##_MASK) >> PERF_COUNTER_##name##_SHIFT) + +#define PERF_COUNTER_RAW(config) __PERF_COUNTER_FIELD(config, RAW) +#define PERF_COUNTER_CONFIG(config) __PERF_COUNTER_FIELD(config, CONFIG) +#define PERF_COUNTER_TYPE(config) __PERF_COUNTER_FIELD(config, TYPE) +#define PERF_COUNTER_ID(config) __PERF_COUNTER_FIELD(config, EVENT) + + + +static void create_events_help(void) +{ + unsigned int i; + char *str; + __u64 e; + + str = events_help; + + str += sprintf(str, + "event name: ["); + + for (i = 0; i < ARRAY_SIZE(event_symbols); i++) { + int type, id; + + e = event_symbols[i].event; + type = PERF_COUNTER_TYPE(e); + id = PERF_COUNTER_ID(e); + + if (i) + str += sprintf(str, "|"); + + str += sprintf(str, "%s", + event_symbols[i].symbol); + } + + str += sprintf(str, "|rNNN]"); +} + +static const char * const record_usage[] = { + "perf record [] ", + NULL +}; + +const struct option options[] = { + OPT_CALLBACK('e', "event", NULL, "event", + events_help, parse_events), + OPT_INTEGER('c', "count", &default_interval, + "event period to sample"), + OPT_INTEGER('m', "mmap-pages", &mmap_pages, + "number of mmap data pages"), + OPT_STRING('o', "output", &output_name, "file", + "output file name"), + OPT_BOOLEAN('i', "inherit", &inherit, + "child tasks inherit counters"), + OPT_INTEGER('p', "pid", &target_pid, + "record events on existing pid"), + OPT_INTEGER('r', "realtime", &realtime_prio, + "collect data with this RT SCHED_FIFO priority"), + OPT_BOOLEAN('a', "all-cpus", &system_wide, + "system-wide collection from all CPUs"), + OPT_END() +}; + +int cmd_record(int argc, const char **argv, const char *prefix) +{ + int counter; + + create_events_help(); + + argc = parse_options(argc, argv, options, record_usage, 0); + if (!argc) + usage_with_options(record_usage, options); + + if (!nr_counters) { + nr_counters = 1; + event_id[0] = 0; + } + + for (counter = 0; counter < nr_counters; counter++) { + if (event_count[counter]) + continue; + + event_count[counter] = default_interval; + } + + return __cmd_record(argc, argv); +} diff --git a/Documentation/perf_counter/builtin-top.c b/Documentation/perf_counter/builtin-top.c index 4bed265926d..626b3207649 100644 --- a/Documentation/perf_counter/builtin-top.c +++ b/Documentation/perf_counter/builtin-top.c @@ -42,13 +42,16 @@ * Released under the GPL v2. (and only v2, not any later version) */ + #include "perf.h" #include "util/util.h" #include #include #include + #include + #include #include #include -- cgit v1.2.3 From 8ad8db3788fd9a449941fb2392ca85af4ee1cde1 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Tue, 26 May 2009 11:10:09 +0200 Subject: perf_counter tools: Librarize event string parsing Extract the event string parser from builtin-record.c, and librarize it - to be reused in other commands. Cc: Peter Zijlstra Cc: Mike Galbraith Cc: Paul Mackerras Cc: Corey Ashford Cc: Marcelo Tosatti Cc: Arnaldo Carvalho de Melo Cc: John Kacur LKML-Reference: Signed-off-by: Ingo Molnar --- Documentation/perf_counter/Makefile | 2 + Documentation/perf_counter/builtin-record.c | 154 +------------------------ Documentation/perf_counter/util/parse-events.c | 127 ++++++++++++++++++++ Documentation/perf_counter/util/parse-events.h | 10 ++ 4 files changed, 145 insertions(+), 148 deletions(-) create mode 100644 Documentation/perf_counter/util/parse-events.c create mode 100644 Documentation/perf_counter/util/parse-events.h diff --git a/Documentation/perf_counter/Makefile b/Documentation/perf_counter/Makefile index 481e4c26cd4..45daa72facd 100644 --- a/Documentation/perf_counter/Makefile +++ b/Documentation/perf_counter/Makefile @@ -290,6 +290,7 @@ LIB_H += ../../include/linux/perf_counter.h LIB_H += perf.h LIB_H += util/levenshtein.h LIB_H += util/parse-options.h +LIB_H += util/parse-events.h LIB_H += util/quote.h LIB_H += util/util.h LIB_H += util/help.h @@ -304,6 +305,7 @@ LIB_OBJS += util/exec_cmd.o LIB_OBJS += util/help.o LIB_OBJS += util/levenshtein.o LIB_OBJS += util/parse-options.o +LIB_OBJS += util/parse-events.o LIB_OBJS += util/path.o LIB_OBJS += util/run-command.o LIB_OBJS += util/quote.o diff --git a/Documentation/perf_counter/builtin-record.c b/Documentation/perf_counter/builtin-record.c index f12a7822fcf..6fa6ed66495 100644 --- a/Documentation/perf_counter/builtin-record.c +++ b/Documentation/perf_counter/builtin-record.c @@ -3,44 +3,17 @@ #include "perf.h" #include "util/util.h" #include "util/parse-options.h" +#include "util/parse-events.h" #include "util/exec_cmd.h" -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include #include -#include - -#include -#include -#include -#include -#include -#include -#include - -#include -#include - - #define ALIGN(x, a) __ALIGN_MASK(x, (typeof(x))(a)-1) #define __ALIGN_MASK(x, mask) (((x)+(mask))&~(mask)) -static int nr_counters = 0; -static __u64 event_id[MAX_COUNTERS] = { }; static int default_interval = 100000; static int event_count[MAX_COUNTERS]; + static int fd[MAX_NR_CPUS][MAX_COUNTERS]; static int nr_cpus = 0; static unsigned int page_size; @@ -420,131 +393,16 @@ static int __cmd_record(int argc, const char **argv) return 0; } -struct event_symbol { - __u64 event; - char *symbol; -}; - -static struct event_symbol event_symbols[] = { - {EID(PERF_TYPE_HARDWARE, PERF_COUNT_CPU_CYCLES), "cpu-cycles", }, - {EID(PERF_TYPE_HARDWARE, PERF_COUNT_CPU_CYCLES), "cycles", }, - {EID(PERF_TYPE_HARDWARE, PERF_COUNT_INSTRUCTIONS), "instructions", }, - {EID(PERF_TYPE_HARDWARE, PERF_COUNT_CACHE_REFERENCES), "cache-references", }, - {EID(PERF_TYPE_HARDWARE, PERF_COUNT_CACHE_MISSES), "cache-misses", }, - {EID(PERF_TYPE_HARDWARE, PERF_COUNT_BRANCH_INSTRUCTIONS), "branch-instructions", }, - {EID(PERF_TYPE_HARDWARE, PERF_COUNT_BRANCH_INSTRUCTIONS), "branches", }, - {EID(PERF_TYPE_HARDWARE, PERF_COUNT_BRANCH_MISSES), "branch-misses", }, - {EID(PERF_TYPE_HARDWARE, PERF_COUNT_BUS_CYCLES), "bus-cycles", }, - - {EID(PERF_TYPE_SOFTWARE, PERF_COUNT_CPU_CLOCK), "cpu-clock", }, - {EID(PERF_TYPE_SOFTWARE, PERF_COUNT_TASK_CLOCK), "task-clock", }, - {EID(PERF_TYPE_SOFTWARE, PERF_COUNT_PAGE_FAULTS), "page-faults", }, - {EID(PERF_TYPE_SOFTWARE, PERF_COUNT_PAGE_FAULTS), "faults", }, - {EID(PERF_TYPE_SOFTWARE, PERF_COUNT_PAGE_FAULTS_MIN), "minor-faults", }, - {EID(PERF_TYPE_SOFTWARE, PERF_COUNT_PAGE_FAULTS_MAJ), "major-faults", }, - {EID(PERF_TYPE_SOFTWARE, PERF_COUNT_CONTEXT_SWITCHES), "context-switches", }, - {EID(PERF_TYPE_SOFTWARE, PERF_COUNT_CONTEXT_SWITCHES), "cs", }, - {EID(PERF_TYPE_SOFTWARE, PERF_COUNT_CPU_MIGRATIONS), "cpu-migrations", }, - {EID(PERF_TYPE_SOFTWARE, PERF_COUNT_CPU_MIGRATIONS), "migrations", }, -}; - -/* - * Each event can have multiple symbolic names. - * Symbolic names are (almost) exactly matched. - */ -static __u64 match_event_symbols(const char *str) -{ - __u64 config, id; - int type; - unsigned int i; - - if (sscanf(str, "r%llx", &config) == 1) - return config | PERF_COUNTER_RAW_MASK; - - if (sscanf(str, "%d:%llu", &type, &id) == 2) - return EID(type, id); - - for (i = 0; i < ARRAY_SIZE(event_symbols); i++) { - if (!strncmp(str, event_symbols[i].symbol, - strlen(event_symbols[i].symbol))) - return event_symbols[i].event; - } - - return ~0ULL; -} - -static int parse_events(const struct option *opt, const char *str, int unset) -{ - __u64 config; - -again: - if (nr_counters == MAX_COUNTERS) - return -1; - - config = match_event_symbols(str); - if (config == ~0ULL) - return -1; - - event_id[nr_counters] = config; - nr_counters++; - - str = strstr(str, ","); - if (str) { - str++; - goto again; - } - - return 0; -} - -static char events_help[100000]; - -#define __PERF_COUNTER_FIELD(config, name) \ - ((config & PERF_COUNTER_##name##_MASK) >> PERF_COUNTER_##name##_SHIFT) - -#define PERF_COUNTER_RAW(config) __PERF_COUNTER_FIELD(config, RAW) -#define PERF_COUNTER_CONFIG(config) __PERF_COUNTER_FIELD(config, CONFIG) -#define PERF_COUNTER_TYPE(config) __PERF_COUNTER_FIELD(config, TYPE) -#define PERF_COUNTER_ID(config) __PERF_COUNTER_FIELD(config, EVENT) - - - -static void create_events_help(void) -{ - unsigned int i; - char *str; - __u64 e; - - str = events_help; - - str += sprintf(str, - "event name: ["); - - for (i = 0; i < ARRAY_SIZE(event_symbols); i++) { - int type, id; - - e = event_symbols[i].event; - type = PERF_COUNTER_TYPE(e); - id = PERF_COUNTER_ID(e); - - if (i) - str += sprintf(str, "|"); - - str += sprintf(str, "%s", - event_symbols[i].symbol); - } - - str += sprintf(str, "|rNNN]"); -} - static const char * const record_usage[] = { "perf record [] ", NULL }; +static char events_help_msg[EVENTS_HELP_MAX]; + const struct option options[] = { OPT_CALLBACK('e', "event", NULL, "event", - events_help, parse_events), + events_help_msg, parse_events), OPT_INTEGER('c', "count", &default_interval, "event period to sample"), OPT_INTEGER('m', "mmap-pages", &mmap_pages, @@ -566,7 +424,7 @@ int cmd_record(int argc, const char **argv, const char *prefix) { int counter; - create_events_help(); + create_events_help(events_help_msg); argc = parse_options(argc, argv, options, record_usage, 0); if (!argc) diff --git a/Documentation/perf_counter/util/parse-events.c b/Documentation/perf_counter/util/parse-events.c new file mode 100644 index 00000000000..77d0917d55d --- /dev/null +++ b/Documentation/perf_counter/util/parse-events.c @@ -0,0 +1,127 @@ + +#include "../perf.h" +#include "util.h" +#include "parse-options.h" +#include "parse-events.h" +#include "exec_cmd.h" + +int nr_counters; + +__u64 event_id[MAX_COUNTERS] = { }; + +struct event_symbol { + __u64 event; + char *symbol; +}; + +static struct event_symbol event_symbols[] = { + {EID(PERF_TYPE_HARDWARE, PERF_COUNT_CPU_CYCLES), "cpu-cycles", }, + {EID(PERF_TYPE_HARDWARE, PERF_COUNT_CPU_CYCLES), "cycles", }, + {EID(PERF_TYPE_HARDWARE, PERF_COUNT_INSTRUCTIONS), "instructions", }, + {EID(PERF_TYPE_HARDWARE, PERF_COUNT_CACHE_REFERENCES), "cache-references", }, + {EID(PERF_TYPE_HARDWARE, PERF_COUNT_CACHE_MISSES), "cache-misses", }, + {EID(PERF_TYPE_HARDWARE, PERF_COUNT_BRANCH_INSTRUCTIONS), "branch-instructions", }, + {EID(PERF_TYPE_HARDWARE, PERF_COUNT_BRANCH_INSTRUCTIONS), "branches", }, + {EID(PERF_TYPE_HARDWARE, PERF_COUNT_BRANCH_MISSES), "branch-misses", }, + {EID(PERF_TYPE_HARDWARE, PERF_COUNT_BUS_CYCLES), "bus-cycles", }, + + {EID(PERF_TYPE_SOFTWARE, PERF_COUNT_CPU_CLOCK), "cpu-clock", }, + {EID(PERF_TYPE_SOFTWARE, PERF_COUNT_TASK_CLOCK), "task-clock", }, + {EID(PERF_TYPE_SOFTWARE, PERF_COUNT_PAGE_FAULTS), "page-faults", }, + {EID(PERF_TYPE_SOFTWARE, PERF_COUNT_PAGE_FAULTS), "faults", }, + {EID(PERF_TYPE_SOFTWARE, PERF_COUNT_PAGE_FAULTS_MIN), "minor-faults", }, + {EID(PERF_TYPE_SOFTWARE, PERF_COUNT_PAGE_FAULTS_MAJ), "major-faults", }, + {EID(PERF_TYPE_SOFTWARE, PERF_COUNT_CONTEXT_SWITCHES), "context-switches", }, + {EID(PERF_TYPE_SOFTWARE, PERF_COUNT_CONTEXT_SWITCHES), "cs", }, + {EID(PERF_TYPE_SOFTWARE, PERF_COUNT_CPU_MIGRATIONS), "cpu-migrations", }, + {EID(PERF_TYPE_SOFTWARE, PERF_COUNT_CPU_MIGRATIONS), "migrations", }, +}; + +/* + * Each event can have multiple symbolic names. + * Symbolic names are (almost) exactly matched. + */ +static __u64 match_event_symbols(const char *str) +{ + __u64 config, id; + int type; + unsigned int i; + + if (sscanf(str, "r%llx", &config) == 1) + return config | PERF_COUNTER_RAW_MASK; + + if (sscanf(str, "%d:%llu", &type, &id) == 2) + return EID(type, id); + + for (i = 0; i < ARRAY_SIZE(event_symbols); i++) { + if (!strncmp(str, event_symbols[i].symbol, + strlen(event_symbols[i].symbol))) + return event_symbols[i].event; + } + + return ~0ULL; +} + +int parse_events(const struct option *opt, const char *str, int unset) +{ + __u64 config; + +again: + if (nr_counters == MAX_COUNTERS) + return -1; + + config = match_event_symbols(str); + if (config == ~0ULL) + return -1; + + event_id[nr_counters] = config; + nr_counters++; + + str = strstr(str, ","); + if (str) { + str++; + goto again; + } + + return 0; +} + +#define __PERF_COUNTER_FIELD(config, name) \ + ((config & PERF_COUNTER_##name##_MASK) >> PERF_COUNTER_##name##_SHIFT) + +#define PERF_COUNTER_RAW(config) __PERF_COUNTER_FIELD(config, RAW) +#define PERF_COUNTER_CONFIG(config) __PERF_COUNTER_FIELD(config, CONFIG) +#define PERF_COUNTER_TYPE(config) __PERF_COUNTER_FIELD(config, TYPE) +#define PERF_COUNTER_ID(config) __PERF_COUNTER_FIELD(config, EVENT) + +/* + * Create the help text for the event symbols: + */ +void create_events_help(char *events_help_msg) +{ + unsigned int i; + char *str; + __u64 e; + + str = events_help_msg; + + str += sprintf(str, + "event name: ["); + + for (i = 0; i < ARRAY_SIZE(event_symbols); i++) { + int type, id; + + e = event_symbols[i].event; + type = PERF_COUNTER_TYPE(e); + id = PERF_COUNTER_ID(e); + + if (i) + str += sprintf(str, "|"); + + str += sprintf(str, "%s", + event_symbols[i].symbol); + } + + str += sprintf(str, "|rNNN]"); +} + diff --git a/Documentation/perf_counter/util/parse-events.h b/Documentation/perf_counter/util/parse-events.h new file mode 100644 index 00000000000..6e2ebe5ff7d --- /dev/null +++ b/Documentation/perf_counter/util/parse-events.h @@ -0,0 +1,10 @@ + +extern int nr_counters; +extern __u64 event_id[MAX_COUNTERS]; + +extern int parse_events(const struct option *opt, const char *str, int unset); + +#define EVENTS_HELP_MAX (128*1024) + +extern void create_events_help(char *help_msg); + -- cgit v1.2.3 From 5242519b0296d128425368fc6ab17f541d5fa775 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Tue, 26 May 2009 09:17:18 +0200 Subject: perf stat: Convert to Git option parsing Remove getopt usage and use Git's much more advanced and more compact command option library. Extend the event parser library with the extensions that were in perf-stat before. Cc: Peter Zijlstra Cc: Mike Galbraith Cc: Paul Mackerras Cc: Corey Ashford Cc: Marcelo Tosatti Cc: Arnaldo Carvalho de Melo Cc: John Kacur LKML-Reference: Signed-off-by: Ingo Molnar --- Documentation/perf_counter/builtin-record.c | 3 +- Documentation/perf_counter/builtin-stat.c | 414 ++++--------------------- Documentation/perf_counter/util/parse-events.c | 82 ++++- Documentation/perf_counter/util/parse-events.h | 10 + 4 files changed, 145 insertions(+), 364 deletions(-) diff --git a/Documentation/perf_counter/builtin-record.c b/Documentation/perf_counter/builtin-record.c index 6fa6ed66495..ec2b787b23b 100644 --- a/Documentation/perf_counter/builtin-record.c +++ b/Documentation/perf_counter/builtin-record.c @@ -4,7 +4,6 @@ #include "util/util.h" #include "util/parse-options.h" #include "util/parse-events.h" -#include "util/exec_cmd.h" #include @@ -400,7 +399,7 @@ static const char * const record_usage[] = { static char events_help_msg[EVENTS_HELP_MAX]; -const struct option options[] = { +static const struct option options[] = { OPT_CALLBACK('e', "event", NULL, "event", events_help_msg, parse_events), OPT_INTEGER('c', "count", &default_interval, diff --git a/Documentation/perf_counter/builtin-stat.c b/Documentation/perf_counter/builtin-stat.c index c1053d820c1..e7cb9412212 100644 --- a/Documentation/perf_counter/builtin-stat.c +++ b/Documentation/perf_counter/builtin-stat.c @@ -1,35 +1,5 @@ /* - * kerneltop.c: show top kernel functions - performance counters showcase - - Build with: - - cc -O6 -Wall -c -o kerneltop.o kerneltop.c -lrt - - Sample output: - ------------------------------------------------------------------------------- - KernelTop: 2669 irqs/sec [NMI, cache-misses/cache-refs], (all, cpu: 2) ------------------------------------------------------------------------------- - - weight RIP kernel function - ______ ________________ _______________ - - 35.20 - ffffffff804ce74b : skb_copy_and_csum_dev - 33.00 - ffffffff804cb740 : sock_alloc_send_skb - 31.26 - ffffffff804ce808 : skb_push - 22.43 - ffffffff80510004 : tcp_established_options - 19.00 - ffffffff8027d250 : find_get_page - 15.76 - ffffffff804e4fc9 : eth_type_trans - 15.20 - ffffffff804d8baa : dst_release - 14.86 - ffffffff804cf5d8 : skb_release_head_state - 14.00 - ffffffff802217d5 : read_hpet - 12.00 - ffffffff804ffb7f : __ip_local_out - 11.97 - ffffffff804fc0c8 : ip_local_deliver_finish - 8.54 - ffffffff805001a3 : ip_queue_xmit - */ - -/* - * perfstat: /usr/bin/time -alike performance counter statistics utility + * perf stat: /usr/bin/time -alike performance counter statistics utility It summarizes the counter events of all tasks (and child tasks), covering all CPUs that the command (or workload) executes on. @@ -38,59 +8,38 @@ Sample output: - $ ./perfstat -e 1 -e 3 -e 5 ls -lR /usr/include/ >/dev/null + $ perf stat -e 1 -e 3 -e 5 ls -lR /usr/include/ >/dev/null Performance counter stats for 'ls': 163516953 instructions 2295 cache-misses 2855182 branch-misses + * + * Copyright (C) 2008, Red Hat Inc, Ingo Molnar + * + * Improvements and fixes by: + * + * Arjan van de Ven + * Yanmin Zhang + * Wu Fengguang + * Mike Galbraith + * Paul Mackerras + * + * Released under the GPL v2. (and only v2, not any later version) */ - /* - * Copyright (C) 2008, Red Hat Inc, Ingo Molnar - * - * Improvements and fixes by: - * - * Arjan van de Ven - * Yanmin Zhang - * Wu Fengguang - * Mike Galbraith - * Paul Mackerras - * - * Released under the GPL v2. (and only v2, not any later version) - */ - #include "perf.h" #include "util/util.h" +#include "util/parse-options.h" +#include "util/parse-events.h" -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include #include -#include -#include -#include - -#include -#include - -#define EVENT_MASK_KERNEL 1 -#define EVENT_MASK_USER 2 static int system_wide = 0; +static int inherit = 1; -static int nr_counters = 0; -static __u64 event_id[MAX_COUNTERS] = { +static __u64 default_event_id[MAX_COUNTERS] = { EID(PERF_TYPE_SOFTWARE, PERF_COUNT_TASK_CLOCK), EID(PERF_TYPE_SOFTWARE, PERF_COUNT_CONTEXT_SWITCHES), EID(PERF_TYPE_SOFTWARE, PERF_COUNT_CPU_MIGRATIONS), @@ -101,20 +50,15 @@ static __u64 event_id[MAX_COUNTERS] = { EID(PERF_TYPE_HARDWARE, PERF_COUNT_CACHE_REFERENCES), EID(PERF_TYPE_HARDWARE, PERF_COUNT_CACHE_MISSES), }; + static int default_interval = 100000; static int event_count[MAX_COUNTERS]; static int fd[MAX_NR_CPUS][MAX_COUNTERS]; -static int event_mask[MAX_COUNTERS]; -static int tid = -1; -static int profile_cpu = -1; +static int target_pid = -1; static int nr_cpus = 0; -static int nmi = 1; -static int group = 0; static unsigned int page_size; -static int zero; - static int scale = 1; static const unsigned int default_count[] = { @@ -126,197 +70,6 @@ static const unsigned int default_count[] = { 10000, }; -static char *hw_event_names[] = { - "CPU cycles", - "instructions", - "cache references", - "cache misses", - "branches", - "branch misses", - "bus cycles", -}; - -static char *sw_event_names[] = { - "cpu clock ticks", - "task clock ticks", - "pagefaults", - "context switches", - "CPU migrations", - "minor faults", - "major faults", -}; - -struct event_symbol { - __u64 event; - char *symbol; -}; - -static struct event_symbol event_symbols[] = { - {EID(PERF_TYPE_HARDWARE, PERF_COUNT_CPU_CYCLES), "cpu-cycles", }, - {EID(PERF_TYPE_HARDWARE, PERF_COUNT_CPU_CYCLES), "cycles", }, - {EID(PERF_TYPE_HARDWARE, PERF_COUNT_INSTRUCTIONS), "instructions", }, - {EID(PERF_TYPE_HARDWARE, PERF_COUNT_CACHE_REFERENCES), "cache-references", }, - {EID(PERF_TYPE_HARDWARE, PERF_COUNT_CACHE_MISSES), "cache-misses", }, - {EID(PERF_TYPE_HARDWARE, PERF_COUNT_BRANCH_INSTRUCTIONS), "branch-instructions", }, - {EID(PERF_TYPE_HARDWARE, PERF_COUNT_BRANCH_INSTRUCTIONS), "branches", }, - {EID(PERF_TYPE_HARDWARE, PERF_COUNT_BRANCH_MISSES), "branch-misses", }, - {EID(PERF_TYPE_HARDWARE, PERF_COUNT_BUS_CYCLES), "bus-cycles", }, - - {EID(PERF_TYPE_SOFTWARE, PERF_COUNT_CPU_CLOCK), "cpu-clock", }, - {EID(PERF_TYPE_SOFTWARE, PERF_COUNT_TASK_CLOCK), "task-clock", }, - {EID(PERF_TYPE_SOFTWARE, PERF_COUNT_PAGE_FAULTS), "page-faults", }, - {EID(PERF_TYPE_SOFTWARE, PERF_COUNT_PAGE_FAULTS), "faults", }, - {EID(PERF_TYPE_SOFTWARE, PERF_COUNT_PAGE_FAULTS_MIN), "minor-faults", }, - {EID(PERF_TYPE_SOFTWARE, PERF_COUNT_PAGE_FAULTS_MAJ), "major-faults", }, - {EID(PERF_TYPE_SOFTWARE, PERF_COUNT_CONTEXT_SWITCHES), "context-switches", }, - {EID(PERF_TYPE_SOFTWARE, PERF_COUNT_CONTEXT_SWITCHES), "cs", }, - {EID(PERF_TYPE_SOFTWARE, PERF_COUNT_CPU_MIGRATIONS), "cpu-migrations", }, - {EID(PERF_TYPE_SOFTWARE, PERF_COUNT_CPU_MIGRATIONS), "migrations", }, -}; - -#define __PERF_COUNTER_FIELD(config, name) \ - ((config & PERF_COUNTER_##name##_MASK) >> PERF_COUNTER_##name##_SHIFT) - -#define PERF_COUNTER_RAW(config) __PERF_COUNTER_FIELD(config, RAW) -#define PERF_COUNTER_CONFIG(config) __PERF_COUNTER_FIELD(config, CONFIG) -#define PERF_COUNTER_TYPE(config) __PERF_COUNTER_FIELD(config, TYPE) -#define PERF_COUNTER_ID(config) __PERF_COUNTER_FIELD(config, EVENT) - -static void display_events_help(void) -{ - unsigned int i; - __u64 e; - - printf( - " -e EVENT --event=EVENT # symbolic-name abbreviations"); - - for (i = 0; i < ARRAY_SIZE(event_symbols); i++) { - int type, id; - - e = event_symbols[i].event; - type = PERF_COUNTER_TYPE(e); - id = PERF_COUNTER_ID(e); - - printf("\n %d:%d: %-20s", - type, id, event_symbols[i].symbol); - } - - printf("\n" - " rNNN: raw PMU events (eventsel+umask)\n\n"); -} - -static void display_help(void) -{ - printf( - "Usage: perfstat [] \n\n" - "PerfStat Options (up to %d event types can be specified):\n\n", - MAX_COUNTERS); - - display_events_help(); - - printf( - " -l # scale counter values\n" - " -a # system-wide collection\n"); - exit(0); -} - -static char *event_name(int ctr) -{ - __u64 config = event_id[ctr]; - int type = PERF_COUNTER_TYPE(config); - int id = PERF_COUNTER_ID(config); - static char buf[32]; - - if (PERF_COUNTER_RAW(config)) { - sprintf(buf, "raw 0x%llx", PERF_COUNTER_CONFIG(config)); - return buf; - } - - switch (type) { - case PERF_TYPE_HARDWARE: - if (id < PERF_HW_EVENTS_MAX) - return hw_event_names[id]; - return "unknown-hardware"; - - case PERF_TYPE_SOFTWARE: - if (id < PERF_SW_EVENTS_MAX) - return sw_event_names[id]; - return "unknown-software"; - - default: - break; - } - - return "unknown"; -} - -/* - * Each event can have multiple symbolic names. - * Symbolic names are (almost) exactly matched. - */ -static __u64 match_event_symbols(char *str) -{ - __u64 config, id; - int type; - unsigned int i; - char mask_str[4]; - - if (sscanf(str, "r%llx", &config) == 1) - return config | PERF_COUNTER_RAW_MASK; - - switch (sscanf(str, "%d:%llu:%2s", &type, &id, mask_str)) { - case 3: - if (strchr(mask_str, 'k')) - event_mask[nr_counters] |= EVENT_MASK_USER; - if (strchr(mask_str, 'u')) - event_mask[nr_counters] |= EVENT_MASK_KERNEL; - case 2: - return EID(type, id); - - default: - break; - } - - for (i = 0; i < ARRAY_SIZE(event_symbols); i++) { - if (!strncmp(str, event_symbols[i].symbol, - strlen(event_symbols[i].symbol))) - return event_symbols[i].event; - } - - return ~0ULL; -} - -static int parse_events(char *str) -{ - __u64 config; - -again: - if (nr_counters == MAX_COUNTERS) - return -1; - - config = match_event_symbols(str); - if (config == ~0ULL) - return -1; - - event_id[nr_counters] = config; - nr_counters++; - - str = strstr(str, ","); - if (str) { - str++; - goto again; - } - - return 0; -} - - -/* - * perfstat - */ - -char fault_here[1000000]; - static void create_perfstat_counter(int counter) { struct perf_counter_hw_event hw_event; @@ -324,7 +77,7 @@ static void create_perfstat_counter(int counter) memset(&hw_event, 0, sizeof(hw_event)); hw_event.config = event_id[counter]; hw_event.record_type = 0; - hw_event.nmi = 0; + hw_event.nmi = 1; hw_event.exclude_kernel = event_mask[counter] & EVENT_MASK_KERNEL; hw_event.exclude_user = event_mask[counter] & EVENT_MASK_USER; @@ -343,7 +96,7 @@ static void create_perfstat_counter(int counter) } } } else { - hw_event.inherit = 1; + hw_event.inherit = inherit; hw_event.disabled = 1; fd[0][counter] = sys_perf_counter_open(&hw_event, 0, -1, -1, 0); @@ -355,7 +108,7 @@ static void create_perfstat_counter(int counter) } } -int do_perfstat(int argc, char *argv[]) +int do_perfstat(int argc, const char **argv) { unsigned long long t0, t1; int counter; @@ -369,12 +122,6 @@ int do_perfstat(int argc, char *argv[]) for (counter = 0; counter < nr_counters; counter++) create_perfstat_counter(counter); - argc -= optind; - argv += optind; - - if (!argc) - display_help(); - /* * Enable counters and exec the command: */ @@ -384,7 +131,7 @@ int do_perfstat(int argc, char *argv[]) if ((pid = fork()) < 0) perror("failed to fork"); if (!pid) { - if (execvp(argv[0], argv)) { + if (execvp(argv[0], (char **)argv)) { perror(argv[0]); exit(-1); } @@ -458,70 +205,45 @@ int do_perfstat(int argc, char *argv[]) return 0; } -static void process_options(int argc, char **argv) +static void skip_signal(int signo) { - int error = 0, counter; - - for (;;) { - int option_index = 0; - /** Options for getopt */ - static struct option long_options[] = { - {"count", required_argument, NULL, 'c'}, - {"cpu", required_argument, NULL, 'C'}, - {"delay", required_argument, NULL, 'd'}, - {"dump_symtab", no_argument, NULL, 'D'}, - {"event", required_argument, NULL, 'e'}, - {"filter", required_argument, NULL, 'f'}, - {"group", required_argument, NULL, 'g'}, - {"help", no_argument, NULL, 'h'}, - {"nmi", required_argument, NULL, 'n'}, - {"munmap_info", no_argument, NULL, 'U'}, - {"pid", required_argument, NULL, 'p'}, - {"realtime", required_argument, NULL, 'r'}, - {"scale", no_argument, NULL, 'l'}, - {"symbol", required_argument, NULL, 's'}, - {"stat", no_argument, NULL, 'S'}, - {"vmlinux", required_argument, NULL, 'x'}, - {"zero", no_argument, NULL, 'z'}, - {NULL, 0, NULL, 0 } - }; - int c = getopt_long(argc, argv, "+:ac:C:d:De:f:g:hln:m:p:r:s:Sx:zMU", - long_options, &option_index); - if (c == -1) - break; - - switch (c) { - case 'a': system_wide = 1; break; - case 'c': default_interval = atoi(optarg); break; - case 'C': - /* CPU and PID are mutually exclusive */ - if (tid != -1) { - printf("WARNING: CPU switch overriding PID\n"); - sleep(1); - tid = -1; - } - profile_cpu = atoi(optarg); break; - - case 'e': error = parse_events(optarg); break; - - case 'g': group = atoi(optarg); break; - case 'h': display_help(); break; - case 'l': scale = 1; break; - case 'n': nmi = atoi(optarg); break; - case 'p': - /* CPU and PID are mutually exclusive */ - if (profile_cpu != -1) { - printf("WARNING: PID switch overriding CPU\n"); - sleep(1); - profile_cpu = -1; - } - tid = atoi(optarg); break; - case 'z': zero = 1; break; - default: error = 1; break; - } - } - if (error) - display_help(); +} + +static const char * const stat_usage[] = { + "perf stat [] ", + NULL +}; + +static char events_help_msg[EVENTS_HELP_MAX]; + +static const struct option options[] = { + OPT_CALLBACK('e', "event", NULL, "event", + events_help_msg, parse_events), + OPT_INTEGER('c', "count", &default_interval, + "event period to sample"), + OPT_BOOLEAN('i', "inherit", &inherit, + "child tasks inherit counters"), + OPT_INTEGER('p', "pid", &target_pid, + "stat events on existing pid"), + OPT_BOOLEAN('a', "all-cpus", &system_wide, + "system-wide collection from all CPUs"), + OPT_BOOLEAN('l', "scale", &scale, + "scale/normalize counters"), + OPT_END() +}; + +int cmd_stat(int argc, const char **argv, const char *prefix) +{ + int counter; + + page_size = sysconf(_SC_PAGE_SIZE); + + create_events_help(events_help_msg); + memcpy(event_id, default_event_id, sizeof(default_event_id)); + + argc = parse_options(argc, argv, options, stat_usage, 0); + if (!argc) + usage_with_options(stat_usage, options); if (!nr_counters) { nr_counters = 8; @@ -533,18 +255,6 @@ static void process_options(int argc, char **argv) event_count[counter] = default_interval; } -} - -static void skip_signal(int signo) -{ -} - -int cmd_stat(int argc, char **argv, const char *prefix) -{ - page_size = sysconf(_SC_PAGE_SIZE); - - process_options(argc, argv); - nr_cpus = sysconf(_SC_NPROCESSORS_ONLN); assert(nr_cpus <= MAX_NR_CPUS); assert(nr_cpus >= 0); diff --git a/Documentation/perf_counter/util/parse-events.c b/Documentation/perf_counter/util/parse-events.c index 77d0917d55d..88c903eb260 100644 --- a/Documentation/perf_counter/util/parse-events.c +++ b/Documentation/perf_counter/util/parse-events.c @@ -8,6 +8,7 @@ int nr_counters; __u64 event_id[MAX_COUNTERS] = { }; +int event_mask[MAX_COUNTERS]; struct event_symbol { __u64 event; @@ -37,6 +38,64 @@ static struct event_symbol event_symbols[] = { {EID(PERF_TYPE_SOFTWARE, PERF_COUNT_CPU_MIGRATIONS), "migrations", }, }; +#define __PERF_COUNTER_FIELD(config, name) \ + ((config & PERF_COUNTER_##name##_MASK) >> PERF_COUNTER_##name##_SHIFT) + +#define PERF_COUNTER_RAW(config) __PERF_COUNTER_FIELD(config, RAW) +#define PERF_COUNTER_CONFIG(config) __PERF_COUNTER_FIELD(config, CONFIG) +#define PERF_COUNTER_TYPE(config) __PERF_COUNTER_FIELD(config, TYPE) +#define PERF_COUNTER_ID(config) __PERF_COUNTER_FIELD(config, EVENT) + +static char *hw_event_names[] = { + "CPU cycles", + "instructions", + "cache references", + "cache misses", + "branches", + "branch misses", + "bus cycles", +}; + +static char *sw_event_names[] = { + "cpu clock ticks", + "task clock ticks", + "pagefaults", + "context switches", + "CPU migrations", + "minor faults", + "major faults", +}; + +char *event_name(int ctr) +{ + __u64 config = event_id[ctr]; + int type = PERF_COUNTER_TYPE(config); + int id = PERF_COUNTER_ID(config); + static char buf[32]; + + if (PERF_COUNTER_RAW(config)) { + sprintf(buf, "raw 0x%llx", PERF_COUNTER_CONFIG(config)); + return buf; + } + + switch (type) { + case PERF_TYPE_HARDWARE: + if (id < PERF_HW_EVENTS_MAX) + return hw_event_names[id]; + return "unknown-hardware"; + + case PERF_TYPE_SOFTWARE: + if (id < PERF_SW_EVENTS_MAX) + return sw_event_names[id]; + return "unknown-software"; + + default: + break; + } + + return "unknown"; +} + /* * Each event can have multiple symbolic names. * Symbolic names are (almost) exactly matched. @@ -46,12 +105,23 @@ static __u64 match_event_symbols(const char *str) __u64 config, id; int type; unsigned int i; + char mask_str[4]; if (sscanf(str, "r%llx", &config) == 1) return config | PERF_COUNTER_RAW_MASK; - if (sscanf(str, "%d:%llu", &type, &id) == 2) - return EID(type, id); + switch (sscanf(str, "%d:%llu:%2s", &type, &id, mask_str)) { + case 3: + if (strchr(mask_str, 'k')) + event_mask[nr_counters] |= EVENT_MASK_USER; + if (strchr(mask_str, 'u')) + event_mask[nr_counters] |= EVENT_MASK_KERNEL; + case 2: + return EID(type, id); + + default: + break; + } for (i = 0; i < ARRAY_SIZE(event_symbols); i++) { if (!strncmp(str, event_symbols[i].symbol, @@ -86,14 +156,6 @@ again: return 0; } -#define __PERF_COUNTER_FIELD(config, name) \ - ((config & PERF_COUNTER_##name##_MASK) >> PERF_COUNTER_##name##_SHIFT) - -#define PERF_COUNTER_RAW(config) __PERF_COUNTER_FIELD(config, RAW) -#define PERF_COUNTER_CONFIG(config) __PERF_COUNTER_FIELD(config, CONFIG) -#define PERF_COUNTER_TYPE(config) __PERF_COUNTER_FIELD(config, TYPE) -#define PERF_COUNTER_ID(config) __PERF_COUNTER_FIELD(config, EVENT) - /* * Create the help text for the event symbols: */ diff --git a/Documentation/perf_counter/util/parse-events.h b/Documentation/perf_counter/util/parse-events.h index 6e2ebe5ff7d..0da306bb902 100644 --- a/Documentation/perf_counter/util/parse-events.h +++ b/Documentation/perf_counter/util/parse-events.h @@ -1,6 +1,16 @@ +/* + * Parse symbolic events/counts passed in as options: + */ + extern int nr_counters; extern __u64 event_id[MAX_COUNTERS]; +extern int event_mask[MAX_COUNTERS]; + +#define EVENT_MASK_KERNEL 1 +#define EVENT_MASK_USER 2 + +extern char *event_name(int ctr); extern int parse_events(const struct option *opt, const char *str, int unset); -- cgit v1.2.3 From b456bae0ff4f3cf91639dd32b2bfc49b1c30b4b0 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Tue, 26 May 2009 09:17:18 +0200 Subject: perf top: Convert to Git option parsing Remove getopt usage and use Git's much more advanced and more compact command option library. Cc: Peter Zijlstra Cc: Mike Galbraith Cc: Paul Mackerras Cc: Corey Ashford Cc: Marcelo Tosatti Cc: Arnaldo Carvalho de Melo Cc: John Kacur LKML-Reference: Signed-off-by: Ingo Molnar --- Documentation/perf_counter/builtin-top.c | 559 ++++++------------------------- 1 file changed, 105 insertions(+), 454 deletions(-) diff --git a/Documentation/perf_counter/builtin-top.c b/Documentation/perf_counter/builtin-top.c index 626b3207649..87b925c8f8e 100644 --- a/Documentation/perf_counter/builtin-top.c +++ b/Documentation/perf_counter/builtin-top.c @@ -45,8 +45,10 @@ #include "perf.h" #include "util/util.h" +#include "util/util.h" +#include "util/parse-options.h" +#include "util/parse-events.h" -#include #include #include @@ -70,8 +72,7 @@ static int system_wide = 0; -static int nr_counters = 0; -static __u64 event_id[MAX_COUNTERS] = { +static __u64 default_event_id[MAX_COUNTERS] = { EID(PERF_TYPE_SOFTWARE, PERF_COUNT_TASK_CLOCK), EID(PERF_TYPE_SOFTWARE, PERF_COUNT_CONTEXT_SWITCHES), EID(PERF_TYPE_SOFTWARE, PERF_COUNT_CPU_MIGRATIONS), @@ -88,7 +89,7 @@ static int fd[MAX_NR_CPUS][MAX_COUNTERS]; static __u64 count_filter = 100; -static int tid = -1; +static int target_pid = -1; static int profile_cpu = -1; static int nr_cpus = 0; static int nmi = 1; @@ -100,8 +101,6 @@ static int use_mmap = 0; static int use_munmap = 0; static int freq = 0; -static char *vmlinux; - static char *sym_filter; static unsigned long filter_start; static unsigned long filter_end; @@ -110,18 +109,6 @@ static int delay_secs = 2; static int zero; static int dump_symtab; -static int scale; - -struct source_line { - uint64_t EIP; - unsigned long count; - char *line; - struct source_line *next; -}; - -static struct source_line *lines; -static struct source_line **lines_tail; - static const unsigned int default_count[] = { 1000000, 1000000, @@ -131,194 +118,6 @@ static const unsigned int default_count[] = { 10000, }; -static char *hw_event_names[] = { - "CPU cycles", - "instructions", - "cache references", - "cache misses", - "branches", - "branch misses", - "bus cycles", -}; - -static char *sw_event_names[] = { - "cpu clock ticks", - "task clock ticks", - "pagefaults", - "context switches", - "CPU migrations", - "minor faults", - "major faults", -}; - -struct event_symbol { - __u64 event; - char *symbol; -}; - -static struct event_symbol event_symbols[] = { - {EID(PERF_TYPE_HARDWARE, PERF_COUNT_CPU_CYCLES), "cpu-cycles", }, - {EID(PERF_TYPE_HARDWARE, PERF_COUNT_CPU_CYCLES), "cycles", }, - {EID(PERF_TYPE_HARDWARE, PERF_COUNT_INSTRUCTIONS), "instructions", }, - {EID(PERF_TYPE_HARDWARE, PERF_COUNT_CACHE_REFERENCES), "cache-references", }, - {EID(PERF_TYPE_HARDWARE, PERF_COUNT_CACHE_MISSES), "cache-misses", }, - {EID(PERF_TYPE_HARDWARE, PERF_COUNT_BRANCH_INSTRUCTIONS), "branch-instructions", }, - {EID(PERF_TYPE_HARDWARE, PERF_COUNT_BRANCH_INSTRUCTIONS), "branches", }, - {EID(PERF_TYPE_HARDWARE, PERF_COUNT_BRANCH_MISSES), "branch-misses", }, - {EID(PERF_TYPE_HARDWARE, PERF_COUNT_BUS_CYCLES), "bus-cycles", }, - - {EID(PERF_TYPE_SOFTWARE, PERF_COUNT_CPU_CLOCK), "cpu-clock", }, - {EID(PERF_TYPE_SOFTWARE, PERF_COUNT_TASK_CLOCK), "task-clock", }, - {EID(PERF_TYPE_SOFTWARE, PERF_COUNT_PAGE_FAULTS), "page-faults", }, - {EID(PERF_TYPE_SOFTWARE, PERF_COUNT_PAGE_FAULTS), "faults", }, - {EID(PERF_TYPE_SOFTWARE, PERF_COUNT_PAGE_FAULTS_MIN), "minor-faults", }, - {EID(PERF_TYPE_SOFTWARE, PERF_COUNT_PAGE_FAULTS_MAJ), "major-faults", }, - {EID(PERF_TYPE_SOFTWARE, PERF_COUNT_CONTEXT_SWITCHES), "context-switches", }, - {EID(PERF_TYPE_SOFTWARE, PERF_COUNT_CONTEXT_SWITCHES), "cs", }, - {EID(PERF_TYPE_SOFTWARE, PERF_COUNT_CPU_MIGRATIONS), "cpu-migrations", }, - {EID(PERF_TYPE_SOFTWARE, PERF_COUNT_CPU_MIGRATIONS), "migrations", }, -}; - -#define __PERF_COUNTER_FIELD(config, name) \ - ((config & PERF_COUNTER_##name##_MASK) >> PERF_COUNTER_##name##_SHIFT) - -#define PERF_COUNTER_RAW(config) __PERF_COUNTER_FIELD(config, RAW) -#define PERF_COUNTER_CONFIG(config) __PERF_COUNTER_FIELD(config, CONFIG) -#define PERF_COUNTER_TYPE(config) __PERF_COUNTER_FIELD(config, TYPE) -#define PERF_COUNTER_ID(config) __PERF_COUNTER_FIELD(config, EVENT) - -static void display_events_help(void) -{ - unsigned int i; - __u64 e; - - printf( - " -e EVENT --event=EVENT # symbolic-name abbreviations"); - - for (i = 0; i < ARRAY_SIZE(event_symbols); i++) { - int type, id; - - e = event_symbols[i].event; - type = PERF_COUNTER_TYPE(e); - id = PERF_COUNTER_ID(e); - - printf("\n %d:%d: %-20s", - type, id, event_symbols[i].symbol); - } - - printf("\n" - " rNNN: raw PMU events (eventsel+umask)\n\n"); -} - -static void display_help(void) -{ - printf( - "Usage: kerneltop []\n" - " Or: kerneltop -S [] COMMAND [ARGS]\n\n" - "KernelTop Options (up to %d event types can be specified at once):\n\n", - MAX_COUNTERS); - - display_events_help(); - - printf( - " -c CNT --count=CNT # event period to sample\n\n" - " -C CPU --cpu=CPU # CPU (-1 for all) [default: -1]\n" - " -p PID --pid=PID # PID of sampled task (-1 for all) [default: -1]\n\n" - " -l # show scale factor for RR events\n" - " -d delay --delay= # sampling/display delay [default: 2]\n" - " -f CNT --filter=CNT # min-event-count filter [default: 100]\n\n" - " -r prio --realtime= # event acquisition runs with SCHED_FIFO policy\n" - " -s symbol --symbol= # function to be showed annotated one-shot\n" - " -x path --vmlinux= # the vmlinux binary, required for -s use\n" - " -z --zero # zero counts after display\n" - " -D --dump_symtab # dump symbol table to stderr on startup\n" - " -m pages --mmap_pages= # number of mmap data pages\n" - " -M --mmap_info # print mmap info stream\n" - " -U --munmap_info # print munmap info stream\n" - ); - - exit(0); -} - -static char *event_name(int ctr) -{ - __u64 config = event_id[ctr]; - int type = PERF_COUNTER_TYPE(config); - int id = PERF_COUNTER_ID(config); - static char buf[32]; - - if (PERF_COUNTER_RAW(config)) { - sprintf(buf, "raw 0x%llx", PERF_COUNTER_CONFIG(config)); - return buf; - } - - switch (type) { - case PERF_TYPE_HARDWARE: - if (id < PERF_HW_EVENTS_MAX) - return hw_event_names[id]; - return "unknown-hardware"; - - case PERF_TYPE_SOFTWARE: - if (id < PERF_SW_EVENTS_MAX) - return sw_event_names[id]; - return "unknown-software"; - - default: - break; - } - - return "unknown"; -} - -/* - * Each event can have multiple symbolic names. - * Symbolic names are (almost) exactly matched. - */ -static __u64 match_event_symbols(char *str) -{ - __u64 config, id; - int type; - unsigned int i; - - if (sscanf(str, "r%llx", &config) == 1) - return config | PERF_COUNTER_RAW_MASK; - - if (sscanf(str, "%d:%llu", &type, &id) == 2) - return EID(type, id); - - for (i = 0; i < ARRAY_SIZE(event_symbols); i++) { - if (!strncmp(str, event_symbols[i].symbol, - strlen(event_symbols[i].symbol))) - return event_symbols[i].event; - } - - return ~0ULL; -} - -static int parse_events(char *str) -{ - __u64 config; - -again: - if (nr_counters == MAX_COUNTERS) - return -1; - - config = match_event_symbols(str); - if (config == ~0ULL) - return -1; - - event_id[nr_counters] = config; - nr_counters++; - - str = strstr(str, ","); - if (str) { - str++; - goto again; - } - - return 0; -} - /* * Symbols */ @@ -331,7 +130,6 @@ struct sym_entry { char *sym; unsigned long count[MAX_COUNTERS]; int skip; - struct source_line *source; }; #define MAX_SYMS 100000 @@ -342,8 +140,6 @@ struct sym_entry *sym_filter_entry; static struct sym_entry sym_table[MAX_SYMS]; -static void show_details(struct sym_entry *sym); - /* * Ordering weight: count-1 * count-2 * ... / count-n */ @@ -419,15 +215,15 @@ static void print_sym_table(void) printf( "], "); - if (tid != -1) - printf(" (tid: %d", tid); + if (target_pid != -1) + printf(" (target_pid: %d", target_pid); else printf(" (all"); if (profile_cpu != -1) printf(", cpu: %d)\n", profile_cpu); else { - if (tid != -1) + if (target_pid != -1) printf(")\n"); else printf(", %d CPUs)\n", nr_cpus); @@ -463,9 +259,6 @@ static void print_sym_table(void) pcnt, tmp[i].addr, tmp[i].sym); } - if (sym_filter_entry) - show_details(sym_filter_entry); - { struct pollfd stdin_poll = { .fd = 0, .events = POLLIN }; @@ -628,134 +421,8 @@ static void parse_symbols(void) } } -/* - * Source lines - */ - -static void parse_vmlinux(char *filename) -{ - FILE *file; - char command[PATH_MAX*2]; - if (!filename) - return; - - sprintf(command, "objdump --start-address=0x%016lx --stop-address=0x%016lx -dS %s", filter_start, filter_end, filename); - - file = popen(command, "r"); - if (!file) - return; - - lines_tail = &lines; - while (!feof(file)) { - struct source_line *src; - size_t dummy = 0; - char *c; - - src = malloc(sizeof(struct source_line)); - assert(src != NULL); - memset(src, 0, sizeof(struct source_line)); - - if (getline(&src->line, &dummy, file) < 0) - break; - if (!src->line) - break; - - c = strchr(src->line, '\n'); - if (c) - *c = 0; - - src->next = NULL; - *lines_tail = src; - lines_tail = &src->next; - - if (strlen(src->line)>8 && src->line[8] == ':') - src->EIP = strtoull(src->line, NULL, 16); - if (strlen(src->line)>8 && src->line[16] == ':') - src->EIP = strtoull(src->line, NULL, 16); - } - pclose(file); -} - -static void record_precise_ip(uint64_t ip) -{ - struct source_line *line; - - for (line = lines; line; line = line->next) { - if (line->EIP == ip) - line->count++; - if (line->EIP > ip) - break; - } -} - -static void lookup_sym_in_vmlinux(struct sym_entry *sym) -{ - struct source_line *line; - char pattern[PATH_MAX]; - sprintf(pattern, "<%s>:", sym->sym); - - for (line = lines; line; line = line->next) { - if (strstr(line->line, pattern)) { - sym->source = line; - break; - } - } -} - -static void show_lines(struct source_line *line_queue, int line_queue_count) -{ - int i; - struct source_line *line; - - line = line_queue; - for (i = 0; i < line_queue_count; i++) { - printf("%8li\t%s\n", line->count, line->line); - line = line->next; - } -} - #define TRACE_COUNT 3 -static void show_details(struct sym_entry *sym) -{ - struct source_line *line; - struct source_line *line_queue = NULL; - int displayed = 0; - int line_queue_count = 0; - - if (!sym->source) - lookup_sym_in_vmlinux(sym); - if (!sym->source) - return; - - printf("Showing details for %s\n", sym->sym); - - line = sym->source; - while (line) { - if (displayed && strstr(line->line, ">:")) - break; - - if (!line_queue_count) - line_queue = line; - line_queue_count ++; - - if (line->count >= count_filter) { - show_lines(line_queue, line_queue_count); - line_queue_count = 0; - line_queue = NULL; - } else if (line_queue_count > TRACE_COUNT) { - line_queue = line_queue->next; - line_queue_count --; - } - - line->count = 0; - displayed++; - if (displayed > 300) - break; - line = line->next; - } -} - /* * Binary search in the histogram table and record the hit: */ @@ -764,8 +431,6 @@ static void record_ip(uint64_t ip, int counter) int left_idx, middle_idx, right_idx, idx; unsigned long left, middle, right; - record_precise_ip(ip); - left_idx = 0; right_idx = sym_table_count-1; assert(ip <= max_ip && ip >= min_ip); @@ -822,97 +487,6 @@ static void process_event(uint64_t ip, int counter) record_ip(ip, counter); } -static void process_options(int argc, char **argv) -{ - int error = 0, counter; - - for (;;) { - int option_index = 0; - /** Options for getopt */ - static struct option long_options[] = { - {"count", required_argument, NULL, 'c'}, - {"cpu", required_argument, NULL, 'C'}, - {"delay", required_argument, NULL, 'd'}, - {"dump_symtab", no_argument, NULL, 'D'}, - {"event", required_argument, NULL, 'e'}, - {"filter", required_argument, NULL, 'f'}, - {"group", required_argument, NULL, 'g'}, - {"help", no_argument, NULL, 'h'}, - {"nmi", required_argument, NULL, 'n'}, - {"mmap_info", no_argument, NULL, 'M'}, - {"mmap_pages", required_argument, NULL, 'm'}, - {"munmap_info", no_argument, NULL, 'U'}, - {"pid", required_argument, NULL, 'p'}, - {"realtime", required_argument, NULL, 'r'}, - {"scale", no_argument, NULL, 'l'}, - {"symbol", required_argument, NULL, 's'}, - {"stat", no_argument, NULL, 'S'}, - {"vmlinux", required_argument, NULL, 'x'}, - {"zero", no_argument, NULL, 'z'}, - {"freq", required_argument, NULL, 'F'}, - {NULL, 0, NULL, 0 } - }; - int c = getopt_long(argc, argv, "+:ac:C:d:De:f:g:hln:m:p:r:s:Sx:zMUF:", - long_options, &option_index); - if (c == -1) - break; - - switch (c) { - case 'a': system_wide = 1; break; - case 'c': default_interval = atoi(optarg); break; - case 'C': - /* CPU and PID are mutually exclusive */ - if (tid != -1) { - printf("WARNING: CPU switch overriding PID\n"); - sleep(1); - tid = -1; - } - profile_cpu = atoi(optarg); break; - case 'd': delay_secs = atoi(optarg); break; - case 'D': dump_symtab = 1; break; - - case 'e': error = parse_events(optarg); break; - - case 'f': count_filter = atoi(optarg); break; - case 'g': group = atoi(optarg); break; - case 'h': display_help(); break; - case 'l': scale = 1; break; - case 'n': nmi = atoi(optarg); break; - case 'p': - /* CPU and PID are mutually exclusive */ - if (profile_cpu != -1) { - printf("WARNING: PID switch overriding CPU\n"); - sleep(1); - profile_cpu = -1; - } - tid = atoi(optarg); break; - case 'r': realtime_prio = atoi(optarg); break; - case 's': sym_filter = strdup(optarg); break; - case 'x': vmlinux = strdup(optarg); break; - case 'z': zero = 1; break; - case 'm': mmap_pages = atoi(optarg); break; - case 'M': use_mmap = 1; break; - case 'U': use_munmap = 1; break; - case 'F': freq = 1; default_interval = atoi(optarg); break; - default: error = 1; break; - } - } - if (error) - display_help(); - - if (!nr_counters) { - nr_counters = 1; - event_id[0] = 0; - } - - for (counter = 0; counter < nr_counters; counter++) { - if (event_count[counter]) - continue; - - event_count[counter] = default_interval; - } -} - struct mmap_data { int counter; void *base; @@ -973,11 +547,11 @@ static void mmap_read(struct mmap_data *md) struct ip_event { struct perf_event_header header; __u64 ip; - __u32 pid, tid; + __u32 pid, target_pid; }; struct mmap_event { struct perf_event_header header; - __u32 pid, tid; + __u32 pid, target_pid; __u64 start; __u64 len; __u64 pgoff; @@ -1043,7 +617,7 @@ static void mmap_read(struct mmap_data *md) static struct pollfd event_array[MAX_NR_CPUS * MAX_COUNTERS]; static struct mmap_data mmap_array[MAX_NR_CPUS][MAX_COUNTERS]; -int cmd_top(int argc, char **argv, const char *prefix) +static int __cmd_top(void) { struct perf_counter_hw_event hw_event; pthread_t thread; @@ -1051,27 +625,12 @@ int cmd_top(int argc, char **argv, const char *prefix) unsigned int cpu; int ret; - page_size = sysconf(_SC_PAGE_SIZE); - - process_options(argc, argv); - - nr_cpus = sysconf(_SC_NPROCESSORS_ONLN); - assert(nr_cpus <= MAX_NR_CPUS); - assert(nr_cpus >= 0); - - if (tid != -1 || profile_cpu != -1) - nr_cpus = 1; - - parse_symbols(); - if (vmlinux && sym_filter_entry) - parse_vmlinux(vmlinux); - for (i = 0; i < nr_cpus; i++) { group_fd = -1; for (counter = 0; counter < nr_counters; counter++) { cpu = profile_cpu; - if (tid == -1 && profile_cpu == -1) + if (target_pid == -1 && profile_cpu == -1) cpu = i; memset(&hw_event, 0, sizeof(hw_event)); @@ -1083,7 +642,7 @@ int cmd_top(int argc, char **argv, const char *prefix) hw_event.munmap = use_munmap; hw_event.freq = freq; - fd[i][counter] = sys_perf_counter_open(&hw_event, tid, cpu, group_fd, 0); + fd[i][counter] = sys_perf_counter_open(&hw_event, target_pid, cpu, group_fd, 0); if (fd[i][counter] < 0) { int err = errno; printf("kerneltop error: syscall returned with %d (%s)\n", @@ -1147,3 +706,95 @@ int cmd_top(int argc, char **argv, const char *prefix) return 0; } + +static const char * const top_usage[] = { + "perf top []", + NULL +}; + +static char events_help_msg[EVENTS_HELP_MAX]; + +static const struct option options[] = { + OPT_CALLBACK('e', "event", NULL, "event", + events_help_msg, parse_events), + OPT_INTEGER('c', "count", &default_interval, + "event period to sample"), + OPT_INTEGER('p', "pid", &target_pid, + "profile events on existing pid"), + OPT_BOOLEAN('a', "all-cpus", &system_wide, + "system-wide collection from all CPUs"), + OPT_INTEGER('C', "CPU", &profile_cpu, + "CPU to profile on"), + OPT_INTEGER('m', "mmap-pages", &mmap_pages, + "number of mmap data pages"), + OPT_INTEGER('r', "realtime", &realtime_prio, + "collect data with this RT SCHED_FIFO priority"), + OPT_INTEGER('d', "delay", &realtime_prio, + "number of seconds to delay between refreshes"), + OPT_BOOLEAN('D', "dump-symtab", &dump_symtab, + "dump the symbol table used for profiling"), + OPT_INTEGER('f', "--count-filter", &count_filter, + "only display functions with more events than this"), + OPT_BOOLEAN('g', "group", &group, + "put the counters into a counter group"), + OPT_STRING('s', "sym-filter", &sym_filter, "pattern", + "only display symbols matchig this pattern"), + OPT_BOOLEAN('z', "zero", &group, + "zero history across updates"), + OPT_BOOLEAN('M', "use-mmap", &use_mmap, + "track mmap events"), + OPT_BOOLEAN('U', "use-munmap", &use_munmap, + "track munmap events"), + OPT_INTEGER('F', "--freq", &freq, + "profile at this frequency"), + OPT_END() +}; + +int cmd_top(int argc, const char **argv, const char *prefix) +{ + int counter; + + page_size = sysconf(_SC_PAGE_SIZE); + + create_events_help(events_help_msg); + memcpy(event_id, default_event_id, sizeof(default_event_id)); + + argc = parse_options(argc, argv, options, top_usage, 0); + if (argc) + usage_with_options(top_usage, options); + + if (freq) { + default_interval = freq; + freq = 1; + } + + /* CPU and PID are mutually exclusive */ + if (target_pid != -1 && profile_cpu != -1) { + printf("WARNING: PID switch overriding CPU\n"); + sleep(1); + profile_cpu = -1; + } + + if (!nr_counters) { + nr_counters = 1; + event_id[0] = 0; + } + + for (counter = 0; counter < nr_counters; counter++) { + if (event_count[counter]) + continue; + + event_count[counter] = default_interval; + } + + nr_cpus = sysconf(_SC_NPROCESSORS_ONLN); + assert(nr_cpus <= MAX_NR_CPUS); + assert(nr_cpus >= 0); + + if (target_pid != -1 || profile_cpu != -1) + nr_cpus = 1; + + parse_symbols(); + + return __cmd_top(); +} -- cgit v1.2.3 From 8fa66bdcc81dd4fc2c91228074d0a4698120c5a2 Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Mon, 18 May 2009 12:45:42 -0300 Subject: perf_counter: First part of 'perf report' conversion to C + elfutils Integrate perf-report into 'perf', as builtin-report.c. Signed-off-by: Arnaldo Carvalho de Melo Acked-by: Peter Zijlstra Cc: Paul Mackerras Cc: Corey Ashford Cc: Marcelo Tosatti Cc: Thomas Gleixner LKML-Reference: Signed-off-by: Ingo Molnar --- Documentation/perf_counter/Makefile | 6 +- Documentation/perf_counter/builtin-report.c | 751 ++++++++++++++++++++++++++++ Documentation/perf_counter/builtin.h | 1 + Documentation/perf_counter/command-list.txt | 1 + Documentation/perf_counter/perf.c | 1 + 5 files changed, 755 insertions(+), 5 deletions(-) create mode 100644 Documentation/perf_counter/builtin-report.c diff --git a/Documentation/perf_counter/Makefile b/Documentation/perf_counter/Makefile index 45daa72facd..49c601e1069 100644 --- a/Documentation/perf_counter/Makefile +++ b/Documentation/perf_counter/Makefile @@ -228,7 +228,6 @@ COMPAT_CFLAGS = COMPAT_OBJS = LIB_H = LIB_OBJS = -PROGRAMS = perf-report SCRIPT_PERL = SCRIPT_SH = TEST_PROGRAMS = @@ -315,6 +314,7 @@ LIB_OBJS += util/wrapper.o BUILTIN_OBJS += builtin-help.o BUILTIN_OBJS += builtin-record.o +BUILTIN_OBJS += builtin-report.o BUILTIN_OBJS += builtin-stat.o BUILTIN_OBJS += builtin-top.o @@ -811,10 +811,6 @@ clean: $(RM) $(htmldocs).tar.gz $(manpages).tar.gz $(RM) PERF-VERSION-FILE PERF-CFLAGS PERF-BUILD-OPTIONS -# temporary hack: -perf-report: perf-report.cc ../../include/linux/perf_counter.h Makefile - g++ -g -O2 -Wall -lrt -o $@ $< - .PHONY: all install clean strip .PHONY: shell_compatibility_test please_set_SHELL_PATH_to_a_more_modern_shell .PHONY: .FORCE-PERF-VERSION-FILE TAGS tags cscope .FORCE-PERF-CFLAGS diff --git a/Documentation/perf_counter/builtin-report.c b/Documentation/perf_counter/builtin-report.c new file mode 100644 index 00000000000..864f68f06a9 --- /dev/null +++ b/Documentation/perf_counter/builtin-report.c @@ -0,0 +1,751 @@ +#define _GNU_SOURCE +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include "../../include/linux/perf_counter.h" +#include "list.h" + +#define SHOW_KERNEL 1 +#define SHOW_USER 2 +#define SHOW_HV 4 + +static char const *input_name = "output.perf"; +static int input; +static int show_mask = SHOW_KERNEL | SHOW_USER | SHOW_HV; + +static unsigned long page_size; +static unsigned long mmap_window = 32; + +static const char *perf_event_names[] = { + [PERF_EVENT_MMAP] = " PERF_EVENT_MMAP", + [PERF_EVENT_MUNMAP] = " PERF_EVENT_MUNMAP", + [PERF_EVENT_COMM] = " PERF_EVENT_COMM", +}; + +struct ip_event { + struct perf_event_header header; + __u64 ip; + __u32 pid, tid; +}; +struct mmap_event { + struct perf_event_header header; + __u32 pid, tid; + __u64 start; + __u64 len; + __u64 pgoff; + char filename[PATH_MAX]; +}; +struct comm_event { + struct perf_event_header header; + __u32 pid,tid; + char comm[16]; +}; + +typedef union event_union { + struct perf_event_header header; + struct ip_event ip; + struct mmap_event mmap; + struct comm_event comm; +} event_t; + +struct section { + struct list_head node; + uint64_t start; + uint64_t end; + uint64_t offset; + char name[0]; +}; + +static struct section *section__new(uint64_t start, uint64_t size, + uint64_t offset, char *name) +{ + struct section *self = malloc(sizeof(*self) + strlen(name) + 1); + + if (self != NULL) { + self->start = start; + self->end = start + size; + self->offset = offset; + strcpy(self->name, name); + } + + return self; +} + +static void section__delete(struct section *self) +{ + free(self); +} + +struct symbol { + struct list_head node; + uint64_t start; + uint64_t end; + char name[0]; +}; + +static struct symbol *symbol__new(uint64_t start, uint64_t len, const char *name) +{ + struct symbol *self = malloc(sizeof(*self) + strlen(name) + 1); + + if (self != NULL) { + self->start = start; + self->end = start + len; + strcpy(self->name, name); + } + + return self; +} + +static void symbol__delete(struct symbol *self) +{ + free(self); +} + +static size_t symbol__fprintf(struct symbol *self, FILE *fp) +{ + return fprintf(fp, " %lx-%lx %s\n", + self->start, self->end, self->name); +} + +struct dso { + struct list_head node; + struct list_head sections; + struct list_head syms; + char name[0]; +}; + +static struct dso *dso__new(const char *name) +{ + struct dso *self = malloc(sizeof(*self) + strlen(name) + 1); + + if (self != NULL) { + strcpy(self->name, name); + INIT_LIST_HEAD(&self->sections); + INIT_LIST_HEAD(&self->syms); + } + + return self; +} + +static void dso__delete_sections(struct dso *self) +{ + struct section *pos, *n; + + list_for_each_entry_safe(pos, n, &self->sections, node) + section__delete(pos); +} + +static void dso__delete_symbols(struct dso *self) +{ + struct symbol *pos, *n; + + list_for_each_entry_safe(pos, n, &self->syms, node) + symbol__delete(pos); +} + +static void dso__delete(struct dso *self) +{ + dso__delete_sections(self); + dso__delete_symbols(self); + free(self); +} + +static void dso__insert_symbol(struct dso *self, struct symbol *sym) +{ + list_add_tail(&sym->node, &self->syms); +} + +static struct symbol *dso__find_symbol(struct dso *self, uint64_t ip) +{ + if (self == NULL) + return NULL; + + struct symbol *pos; + + list_for_each_entry(pos, &self->syms, node) + if (ip >= pos->start && ip <= pos->end) + return pos; + + return NULL; +} + +static int dso__load(struct dso *self) +{ + /* FIXME */ + return 0; +} + +static size_t dso__fprintf(struct dso *self, FILE *fp) +{ + struct symbol *pos; + size_t ret = fprintf(fp, "dso: %s\n", self->name); + + list_for_each_entry(pos, &self->syms, node) + ret += symbol__fprintf(pos, fp); + + return ret; +} + +static LIST_HEAD(dsos); +static struct dso *kernel_dso; + +static void dsos__add(struct dso *dso) +{ + list_add_tail(&dso->node, &dsos); +} + +static struct dso *dsos__find(const char *name) +{ + struct dso *pos; + + list_for_each_entry(pos, &dsos, node) + if (strcmp(pos->name, name) == 0) + return pos; + return NULL; +} + +static struct dso *dsos__findnew(const char *name) +{ + struct dso *dso = dsos__find(name); + + if (dso == NULL) { + dso = dso__new(name); + if (dso != NULL && dso__load(dso) < 0) + goto out_delete_dso; + + dsos__add(dso); + } + + return dso; + +out_delete_dso: + dso__delete(dso); + return NULL; +} + +static void dsos__fprintf(FILE *fp) +{ + struct dso *pos; + + list_for_each_entry(pos, &dsos, node) + dso__fprintf(pos, fp); +} + +static int load_kallsyms(void) +{ + kernel_dso = dso__new("[kernel]"); + if (kernel_dso == NULL) + return -1; + + FILE *file = fopen("/proc/kallsyms", "r"); + + if (file == NULL) + goto out_delete_dso; + + char *line = NULL; + size_t n; + + while (!feof(file)) { + unsigned long long start; + char c, symbf[4096]; + + if (getline(&line, &n, file) < 0) + break; + + if (!line) + goto out_delete_dso; + + if (sscanf(line, "%llx %c %s", &start, &c, symbf) == 3) { + struct symbol *sym = symbol__new(start, 0x1000000, symbf); + + if (sym == NULL) + goto out_delete_dso; + + dso__insert_symbol(kernel_dso, sym); + } + } + + dsos__add(kernel_dso); + free(line); + fclose(file); + return 0; + +out_delete_dso: + dso__delete(kernel_dso); + return -1; +} + +struct map { + struct list_head node; + uint64_t start; + uint64_t end; + uint64_t pgoff; + struct dso *dso; +}; + +static struct map *map__new(struct mmap_event *event) +{ + struct map *self = malloc(sizeof(*self)); + + if (self != NULL) { + self->start = event->start; + self->end = event->start + event->len; + self->pgoff = event->pgoff; + + self->dso = dsos__findnew(event->filename); + if (self->dso == NULL) + goto out_delete; + } + return self; +out_delete: + free(self); + return NULL; +} + +static size_t map__fprintf(struct map *self, FILE *fp) +{ + return fprintf(fp, " %lx-%lx %lx %s\n", + self->start, self->end, self->pgoff, self->dso->name); +} + +struct symhist { + struct list_head node; + struct dso *dso; + struct symbol *sym; + uint32_t count; + char level; +}; + +static struct symhist *symhist__new(struct symbol *sym, struct dso *dso, + char level) +{ + struct symhist *self = malloc(sizeof(*self)); + + if (self != NULL) { + self->sym = sym; + self->dso = dso; + self->level = level; + self->count = 0; + } + + return self; +} + +static void symhist__delete(struct symhist *self) +{ + free(self); +} + +static bool symhist__equal(struct symhist *self, struct symbol *sym, + struct dso *dso, char level) +{ + return self->level == level && self->sym == sym && self->dso == dso; +} + +static void symhist__inc(struct symhist *self) +{ + ++self->count; +} + +static size_t symhist__fprintf(struct symhist *self, FILE *fp) +{ + size_t ret = fprintf(fp, "[%c] ", self->level); + + if (self->level != '.') + ret += fprintf(fp, "%s", self->sym->name); + else + ret += fprintf(fp, "%s: %s", + self->dso ? self->dso->name : "sym ? self->sym->name : ""); + return ret + fprintf(fp, ": %u\n", self->count); +} + +struct thread { + struct list_head node; + struct list_head maps; + struct list_head symhists; + pid_t pid; + char *comm; +}; + +static struct thread *thread__new(pid_t pid) +{ + struct thread *self = malloc(sizeof(*self)); + + if (self != NULL) { + self->pid = pid; + self->comm = NULL; + INIT_LIST_HEAD(&self->maps); + INIT_LIST_HEAD(&self->symhists); + } + + return self; +} + +static void thread__insert_symhist(struct thread *self, + struct symhist *symhist) +{ + list_add_tail(&symhist->node, &self->symhists); +} + +static struct symhist *thread__symhists_find(struct thread *self, + struct symbol *sym, + struct dso *dso, char level) +{ + struct symhist *pos; + + list_for_each_entry(pos, &self->symhists, node) + if (symhist__equal(pos, sym, dso, level)) + return pos; + + return NULL; +} + +static int thread__symbol_incnew(struct thread *self, struct symbol *sym, + struct dso *dso, char level) +{ + struct symhist *symhist = thread__symhists_find(self, sym, dso, level); + + if (symhist == NULL) { + symhist = symhist__new(sym, dso, level); + if (symhist == NULL) + goto out_error; + thread__insert_symhist(self, symhist); + } + + symhist__inc(symhist); + return 0; +out_error: + return -ENOMEM; +} + +static int thread__set_comm(struct thread *self, const char *comm) +{ + self->comm = strdup(comm); + return self->comm ? 0 : -ENOMEM; +} + +static size_t thread__maps_fprintf(struct thread *self, FILE *fp) +{ + struct map *pos; + size_t ret = 0; + + list_for_each_entry(pos, &self->maps, node) + ret += map__fprintf(pos, fp); + + return ret; +} + +static size_t thread__fprintf(struct thread *self, FILE *fp) +{ + struct symhist *pos; + int ret = fprintf(fp, "thread: %d %s\n", self->pid, self->comm); + + list_for_each_entry(pos, &self->symhists, node) + ret += symhist__fprintf(pos, fp); + + return ret; +} + +static LIST_HEAD(threads); + +static void threads__add(struct thread *thread) +{ + list_add_tail(&thread->node, &threads); +} + +static struct thread *threads__find(pid_t pid) +{ + struct thread *pos; + + list_for_each_entry(pos, &threads, node) + if (pos->pid == pid) + return pos; + return NULL; +} + +static struct thread *threads__findnew(pid_t pid) +{ + struct thread *thread = threads__find(pid); + + if (thread == NULL) { + thread = thread__new(pid); + if (thread != NULL) + threads__add(thread); + } + + return thread; +} + +static void thread__insert_map(struct thread *self, struct map *map) +{ + list_add_tail(&map->node, &self->maps); +} + +static struct map *thread__find_map(struct thread *self, uint64_t ip) +{ + if (self == NULL) + return NULL; + + struct map *pos; + + list_for_each_entry(pos, &self->maps, node) + if (ip >= pos->start && ip <= pos->end) + return pos; + + return NULL; +} + +static void threads__fprintf(FILE *fp) +{ + struct thread *pos; + + list_for_each_entry(pos, &threads, node) + thread__fprintf(pos, fp); +} + +#if 0 +static std::string resolve_user_symbol(int pid, uint64_t ip) +{ + std::string sym = ""; + + maps_t &m = maps[pid]; + maps_t::const_iterator mi = m.upper_bound(map(ip)); + if (mi == m.end()) + return sym; + + ip -= mi->start + mi->pgoff; + + symbols_t &s = dsos[mi->dso].syms; + symbols_t::const_iterator si = s.upper_bound(symbol(ip)); + + sym = mi->dso + ": "; + + if (si == s.begin()) + return sym; + si--; + + if (si->start <= ip && ip < si->end) + sym = mi->dso + ": " + si->name; +#if 0 + else if (si->start <= ip) + sym = mi->dso + ": ?" + si->name; +#endif + + return sym; +} +#endif + +static void display_help(void) +{ + printf( + "Usage: perf-report []\n" + " -i file --input= # input file\n" + ); + + exit(0); +} + +static void process_options(int argc, char *argv[]) +{ + int error = 0; + + for (;;) { + int option_index = 0; + /** Options for getopt */ + static struct option long_options[] = { + {"input", required_argument, NULL, 'i'}, + {"no-user", no_argument, NULL, 'u'}, + {"no-kernel", no_argument, NULL, 'k'}, + {"no-hv", no_argument, NULL, 'h'}, + {NULL, 0, NULL, 0 } + }; + int c = getopt_long(argc, argv, "+:i:kuh", + long_options, &option_index); + if (c == -1) + break; + + switch (c) { + case 'i': input_name = strdup(optarg); break; + case 'k': show_mask &= ~SHOW_KERNEL; break; + case 'u': show_mask &= ~SHOW_USER; break; + case 'h': show_mask &= ~SHOW_HV; break; + default: error = 1; break; + } + } + + if (error) + display_help(); +} + +int cmd_report(int argc, char **argv) +{ + unsigned long offset = 0; + unsigned long head = 0; + struct stat stat; + char *buf; + event_t *event; + int ret, rc = EXIT_FAILURE; + unsigned long total = 0; + + page_size = getpagesize(); + + process_options(argc, argv); + + input = open(input_name, O_RDONLY); + if (input < 0) { + perror("failed to open file"); + exit(-1); + } + + ret = fstat(input, &stat); + if (ret < 0) { + perror("failed to stat file"); + exit(-1); + } + + if (!stat.st_size) { + fprintf(stderr, "zero-sized file, nothing to do!\n"); + exit(0); + } + + if (load_kallsyms() < 0) { + perror("failed to open kallsyms"); + return EXIT_FAILURE; + } + +remap: + buf = (char *)mmap(NULL, page_size * mmap_window, PROT_READ, + MAP_SHARED, input, offset); + if (buf == MAP_FAILED) { + perror("failed to mmap file"); + exit(-1); + } + +more: + event = (event_t *)(buf + head); + + if (head + event->header.size >= page_size * mmap_window) { + unsigned long shift = page_size * (head / page_size); + int ret; + + ret = munmap(buf, page_size * mmap_window); + assert(ret == 0); + + offset += shift; + head -= shift; + goto remap; + } + + + if (!event->header.size) { + fprintf(stderr, "zero-sized event at file offset %ld\n", offset + head); + fprintf(stderr, "skipping %ld bytes of events.\n", stat.st_size - offset - head); + goto done; + } + + head += event->header.size; + + if (event->header.misc & PERF_EVENT_MISC_OVERFLOW) { + char level; + int show = 0; + struct dso *dso = NULL; + struct thread *thread = threads__findnew(event->ip.pid); + + if (thread == NULL) + goto done; + + if (event->header.misc & PERF_EVENT_MISC_KERNEL) { + show = SHOW_KERNEL; + level = 'k'; + dso = kernel_dso; + } else if (event->header.misc & PERF_EVENT_MISC_USER) { + show = SHOW_USER; + level = '.'; + struct map *map = thread__find_map(thread, event->ip.ip); + if (map != NULL) + dso = map->dso; + } else { + show = SHOW_HV; + level = 'H'; + } + + if (show & show_mask) { + struct symbol *sym = dso__find_symbol(dso, event->ip.ip); + + if (thread__symbol_incnew(thread, sym, dso, level)) + goto done; + } + total++; + } else switch (event->header.type) { + case PERF_EVENT_MMAP: { + struct thread *thread = threads__findnew(event->mmap.pid); + struct map *map = map__new(&event->mmap); + + if (thread == NULL || map == NULL ) + goto done; + thread__insert_map(thread, map); + break; + } + case PERF_EVENT_COMM: { + struct thread *thread = threads__findnew(event->comm.pid); + + if (thread == NULL || + thread__set_comm(thread, event->comm.comm)) + goto done; + break; + } + } + + if (offset + head < stat.st_size) + goto more; + + rc = EXIT_SUCCESS; +done: + close(input); + //dsos__fprintf(stdout); + threads__fprintf(stdout); +#if 0 + std::map::iterator hi = hist.begin(); + + while (hi != hist.end()) { + rev_hist.insert(std::pair(hi->second, hi->first)); + hist.erase(hi++); + } + + std::multimap::const_iterator ri = rev_hist.begin(); + + while (ri != rev_hist.end()) { + printf(" %5.2f %s\n", (100.0 * ri->first)/total, ri->second.c_str()); + ri++; + } +#endif + return rc; +} + diff --git a/Documentation/perf_counter/builtin.h b/Documentation/perf_counter/builtin.h index d32318aed8c..5bfea57d33f 100644 --- a/Documentation/perf_counter/builtin.h +++ b/Documentation/perf_counter/builtin.h @@ -16,6 +16,7 @@ extern int check_pager_config(const char *cmd); extern int cmd_help(int argc, const char **argv, const char *prefix); extern int cmd_record(int argc, const char **argv, const char *prefix); +extern int cmd_report(int argc, const char **argv, const char *prefix); extern int cmd_stat(int argc, const char **argv, const char *prefix); extern int cmd_top(int argc, const char **argv, const char *prefix); extern int cmd_version(int argc, const char **argv, const char *prefix); diff --git a/Documentation/perf_counter/command-list.txt b/Documentation/perf_counter/command-list.txt index d15210aa0ca..43902920777 100644 --- a/Documentation/perf_counter/command-list.txt +++ b/Documentation/perf_counter/command-list.txt @@ -1,6 +1,7 @@ # List of known perf commands. # command name category [deprecated] [common] perf-record mainporcelain common +perf-report mainporcelain common perf-stat mainporcelain common perf-top mainporcelain common diff --git a/Documentation/perf_counter/perf.c b/Documentation/perf_counter/perf.c index 1d6d7aa575a..e8a85842b49 100644 --- a/Documentation/perf_counter/perf.c +++ b/Documentation/perf_counter/perf.c @@ -250,6 +250,7 @@ static void handle_internal_command(int argc, const char **argv) static struct cmd_struct commands[] = { { "help", cmd_help, 0 }, { "record", cmd_record, 0 }, + { "report", cmd_report, 0 }, { "stat", cmd_stat, 0 }, { "top", cmd_top, 0 }, { "version", cmd_version, 0 }, -- cgit v1.2.3 From fd4242bb35b70557eee8d0c79f82dacc3f3b89e0 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Wed, 20 May 2009 12:45:34 +0200 Subject: perf_counter tools: remove the standalone perf-report utility With a built-in 'perf report' command now available, remove the standalone implementation for good. Acked-by: Peter Zijlstra Cc: Paul Mackerras Cc: Corey Ashford Cc: Marcelo Tosatti Cc: Thomas Gleixner LKML-Reference: Signed-off-by: Ingo Molnar --- Documentation/perf_counter/perf-report.cc | 515 ------------------------------ 1 file changed, 515 deletions(-) delete mode 100644 Documentation/perf_counter/perf-report.cc diff --git a/Documentation/perf_counter/perf-report.cc b/Documentation/perf_counter/perf-report.cc deleted file mode 100644 index 8855107fe6b..00000000000 --- a/Documentation/perf_counter/perf-report.cc +++ /dev/null @@ -1,515 +0,0 @@ -#define _GNU_SOURCE -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include -#include -#include -#include -#include - -#include -#include - -#include "../../include/linux/perf_counter.h" - -#include -#include -#include - - -#define SHOW_KERNEL 1 -#define SHOW_USER 2 -#define SHOW_HV 4 - -static char const *input_name = "output.perf"; -static int input; -static int show_mask = SHOW_KERNEL | SHOW_USER | SHOW_HV; - -static unsigned long page_size; -static unsigned long mmap_window = 32; - -struct ip_event { - struct perf_event_header header; - __u64 ip; - __u32 pid, tid; -}; -struct mmap_event { - struct perf_event_header header; - __u32 pid, tid; - __u64 start; - __u64 len; - __u64 pgoff; - char filename[PATH_MAX]; -}; -struct comm_event { - struct perf_event_header header; - __u32 pid,tid; - char comm[16]; -}; - -typedef union event_union { - struct perf_event_header header; - struct ip_event ip; - struct mmap_event mmap; - struct comm_event comm; -} event_t; - -struct section { - uint64_t start; - uint64_t end; - - uint64_t offset; - - std::string name; - - section() { }; - - section(uint64_t stab) : end(stab) { }; - - section(uint64_t start, uint64_t size, uint64_t offset, std::string name) : - start(start), end(start + size), offset(offset), name(name) - { }; - - bool operator < (const struct section &s) const { - return end < s.end; - }; -}; - -typedef std::set sections_t; - -struct symbol { - uint64_t start; - uint64_t end; - - std::string name; - - symbol() { }; - - symbol(uint64_t ip) : start(ip) { } - - symbol(uint64_t start, uint64_t len, std::string name) : - start(start), end(start + len), name(name) - { }; - - bool operator < (const struct symbol &s) const { - return start < s.start; - }; -}; - -typedef std::set symbols_t; - -struct dso { - sections_t sections; - symbols_t syms; -}; - -static std::map dsos; - -static void load_dso_sections(std::string dso_name) -{ - struct dso &dso = dsos[dso_name]; - - std::string cmd = "readelf -DSW " + dso_name; - - FILE *file = popen(cmd.c_str(), "r"); - if (!file) { - perror("failed to open pipe"); - exit(-1); - } - - char *line = NULL; - size_t n = 0; - - while (!feof(file)) { - uint64_t addr, off, size; - char name[32]; - - if (getline(&line, &n, file) < 0) - break; - if (!line) - break; - - if (sscanf(line, " [%*2d] %16s %*14s %Lx %Lx %Lx", - name, &addr, &off, &size) == 4) { - - dso.sections.insert(section(addr, size, addr - off, name)); - } -#if 0 - /* - * for reading readelf symbols (-s), however these don't seem - * to include nearly everything, so use nm for that. - */ - if (sscanf(line, " %*4d %*3d: %Lx %5Lu %*7s %*6s %*7s %3d %s", - &start, &size, §ion, sym) == 4) { - - start -= dso.section_offsets[section]; - - dso.syms.insert(symbol(start, size, std::string(sym))); - } -#endif - } - pclose(file); -} - -static void load_dso_symbols(std::string dso_name, std::string args) -{ - struct dso &dso = dsos[dso_name]; - - std::string cmd = "nm -nSC " + args + " " + dso_name; - - FILE *file = popen(cmd.c_str(), "r"); - if (!file) { - perror("failed to open pipe"); - exit(-1); - } - - char *line = NULL; - size_t n = 0; - - while (!feof(file)) { - uint64_t start, size; - char c; - char sym[1024]; - - if (getline(&line, &n, file) < 0) - break; - if (!line) - break; - - - if (sscanf(line, "%Lx %Lx %c %s", &start, &size, &c, sym) == 4) { - sections_t::const_iterator si = - dso.sections.upper_bound(section(start)); - if (si == dso.sections.end()) { - printf("symbol in unknown section: %s\n", sym); - continue; - } - - start -= si->offset; - - dso.syms.insert(symbol(start, size, sym)); - } - } - pclose(file); -} - -static void load_dso(std::string dso_name) -{ - load_dso_sections(dso_name); - load_dso_symbols(dso_name, "-D"); /* dynamic symbols */ - load_dso_symbols(dso_name, ""); /* regular ones */ -} - -void load_kallsyms(void) -{ - struct dso &dso = dsos["[kernel]"]; - - FILE *file = fopen("/proc/kallsyms", "r"); - if (!file) { - perror("failed to open kallsyms"); - exit(-1); - } - - char *line; - size_t n; - - while (!feof(file)) { - uint64_t start; - char c; - char sym[1024000]; - - if (getline(&line, &n, file) < 0) - break; - if (!line) - break; - - if (sscanf(line, "%Lx %c %s", &start, &c, sym) == 3) - dso.syms.insert(symbol(start, 0x1000000, std::string(sym))); - } - fclose(file); -} - -struct map { - uint64_t start; - uint64_t end; - uint64_t pgoff; - - std::string dso; - - map() { }; - - map(uint64_t ip) : end(ip) { } - - map(mmap_event *mmap) { - start = mmap->start; - end = mmap->start + mmap->len; - pgoff = mmap->pgoff; - - dso = std::string(mmap->filename); - - if (dsos.find(dso) == dsos.end()) - load_dso(dso); - }; - - bool operator < (const struct map &m) const { - return end < m.end; - }; -}; - -typedef std::set maps_t; - -static std::map maps; - -static std::map comms; - -static std::map hist; -static std::multimap rev_hist; - -static std::string resolve_comm(int pid) -{ - std::string comm; - - std::map::const_iterator ci = comms.find(pid); - if (ci != comms.end()) { - comm = ci->second; - } else { - char pid_str[30]; - - sprintf(pid_str, ":%d", pid); - comm = pid_str; - } - - return comm; -} - -static std::string resolve_user_symbol(int pid, uint64_t ip) -{ - std::string sym = ""; - - maps_t &m = maps[pid]; - maps_t::const_iterator mi = m.upper_bound(map(ip)); - if (mi == m.end()) - return sym; - - ip -= mi->start + mi->pgoff; - - symbols_t &s = dsos[mi->dso].syms; - symbols_t::const_iterator si = s.upper_bound(symbol(ip)); - - sym = mi->dso + ": "; - - if (si == s.begin()) - return sym; - si--; - - if (si->start <= ip && ip < si->end) - sym = mi->dso + ": " + si->name; -#if 0 - else if (si->start <= ip) - sym = mi->dso + ": ?" + si->name; -#endif - - return sym; -} - -static std::string resolve_kernel_symbol(uint64_t ip) -{ - std::string sym = ""; - - symbols_t &s = dsos["[kernel]"].syms; - symbols_t::const_iterator si = s.upper_bound(symbol(ip)); - - if (si == s.begin()) - return sym; - si--; - - if (si->start <= ip && ip < si->end) - sym = si->name; - - return sym; -} - -static void display_help(void) -{ - printf( - "Usage: perf-report []\n" - " -i file --input= # input file\n" - ); - - exit(0); -} - -static void process_options(int argc, char *argv[]) -{ - int error = 0; - - for (;;) { - int option_index = 0; - /** Options for getopt */ - static struct option long_options[] = { - {"input", required_argument, NULL, 'i'}, - {"no-user", no_argument, NULL, 'u'}, - {"no-kernel", no_argument, NULL, 'k'}, - {"no-hv", no_argument, NULL, 'h'}, - {NULL, 0, NULL, 0 } - }; - int c = getopt_long(argc, argv, "+:i:kuh", - long_options, &option_index); - if (c == -1) - break; - - switch (c) { - case 'i': input_name = strdup(optarg); break; - case 'k': show_mask &= ~SHOW_KERNEL; break; - case 'u': show_mask &= ~SHOW_USER; break; - case 'h': show_mask &= ~SHOW_HV; break; - default: error = 1; break; - } - } - - if (error) - display_help(); -} - -int main(int argc, char *argv[]) -{ - unsigned long offset = 0; - unsigned long head = 0; - struct stat stat; - char *buf; - event_t *event; - int ret; - unsigned long total = 0; - - page_size = getpagesize(); - - process_options(argc, argv); - - input = open(input_name, O_RDONLY); - if (input < 0) { - perror("failed to open file"); - exit(-1); - } - - ret = fstat(input, &stat); - if (ret < 0) { - perror("failed to stat file"); - exit(-1); - } - - if (!stat.st_size) { - fprintf(stderr, "zero-sized file, nothing to do!\n"); - exit(0); - } - - load_kallsyms(); - -remap: - buf = (char *)mmap(NULL, page_size * mmap_window, PROT_READ, - MAP_SHARED, input, offset); - if (buf == MAP_FAILED) { - perror("failed to mmap file"); - exit(-1); - } - -more: - event = (event_t *)(buf + head); - - if (head + event->header.size >= page_size * mmap_window) { - unsigned long shift = page_size * (head / page_size); - int ret; - - ret = munmap(buf, page_size * mmap_window); - assert(ret == 0); - - offset += shift; - head -= shift; - goto remap; - } - - - if (!event->header.size) { - fprintf(stderr, "zero-sized event at file offset %ld\n", offset + head); - fprintf(stderr, "skipping %ld bytes of events.\n", stat.st_size - offset - head); - goto done; - } - - head += event->header.size; - - if (event->header.misc & PERF_EVENT_MISC_OVERFLOW) { - std::string comm, sym, level; - int show = 0; - char output[1024]; - - if (event->header.misc & PERF_EVENT_MISC_KERNEL) { - show |= SHOW_KERNEL; - level = " [k] "; - sym = resolve_kernel_symbol(event->ip.ip); - } else if (event->header.misc & PERF_EVENT_MISC_USER) { - show |= SHOW_USER; - level = " [.] "; - sym = resolve_user_symbol(event->ip.pid, event->ip.ip); - } else { - show |= SHOW_HV; - level = " [H] "; - } - - if (show & show_mask) { - comm = resolve_comm(event->ip.pid); - snprintf(output, sizeof(output), "%16s %s %s", - comm.c_str(), level.c_str(), sym.c_str()); - hist[output]++; - } - - total++; - - } else switch (event->header.type) { - case PERF_EVENT_MMAP: - maps[event->mmap.pid].insert(map(&event->mmap)); - break; - - case PERF_EVENT_COMM: - comms[event->comm.pid] = std::string(event->comm.comm); - break; - } - - if (offset + head < stat.st_size) - goto more; - -done: - - close(input); - - std::map::iterator hi = hist.begin(); - - while (hi != hist.end()) { - rev_hist.insert(std::pair(hi->second, hi->first)); - hist.erase(hi++); - } - - std::multimap::const_iterator ri = rev_hist.begin(); - - while (ri != rev_hist.end()) { - printf(" %5.2f %s\n", (100.0 * ri->first)/total, ri->second.c_str()); - ri++; - } - - return 0; -} - -- cgit v1.2.3 From 62eb93905b3b43cea407cfbc061cc7b40ae1c6e9 Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Mon, 18 May 2009 14:28:47 -0300 Subject: perf_counter: Implement dso__load using libelf Signed-off-by: Arnaldo Carvalho de Melo Acked-by: Peter Zijlstra Cc: Paul Mackerras Cc: Corey Ashford Cc: Marcelo Tosatti Cc: Thomas Gleixner LKML-Reference: Signed-off-by: Ingo Molnar --- Documentation/perf_counter/Makefile | 2 +- Documentation/perf_counter/builtin-report.c | 122 +++++++++++++++++++++++++++- 2 files changed, 121 insertions(+), 3 deletions(-) diff --git a/Documentation/perf_counter/Makefile b/Documentation/perf_counter/Makefile index 49c601e1069..6bffa86af6b 100644 --- a/Documentation/perf_counter/Makefile +++ b/Documentation/perf_counter/Makefile @@ -160,7 +160,7 @@ uname_V := $(shell sh -c 'uname -v 2>/dev/null || echo not') # CFLAGS and LDFLAGS are for the users to override from the command line. CFLAGS = -g -O2 -Wall -LDFLAGS = -lpthread -lrt +LDFLAGS = -lpthread -lrt -lelf ALL_CFLAGS = $(CFLAGS) ALL_LDFLAGS = $(LDFLAGS) STRIP ?= strip diff --git a/Documentation/perf_counter/builtin-report.c b/Documentation/perf_counter/builtin-report.c index 864f68f06a9..ad2f327a657 100644 --- a/Documentation/perf_counter/builtin-report.c +++ b/Documentation/perf_counter/builtin-report.c @@ -8,6 +8,9 @@ #include #include #include +#include +#include +#include #include #include #include @@ -195,10 +198,123 @@ static struct symbol *dso__find_symbol(struct dso *self, uint64_t ip) return NULL; } +/** + * elf_symtab__for_each_symbol - iterate thru all the symbols + * + * @self: struct elf_symtab instance to iterate + * @index: uint32_t index + * @sym: GElf_Sym iterator + */ +#define elf_symtab__for_each_symbol(syms, nr_syms, index, sym) \ + for (index = 0, gelf_getsym(syms, index, &sym);\ + index < nr_syms; \ + index++, gelf_getsym(syms, index, &sym)) + +static inline uint8_t elf_sym__type(const GElf_Sym *sym) +{ + return GELF_ST_TYPE(sym->st_info); +} + +static inline bool elf_sym__is_function(const GElf_Sym *sym) +{ + return elf_sym__type(sym) == STT_FUNC && + sym->st_name != 0 && + sym->st_shndx != SHN_UNDEF; +} + +static inline const char *elf_sym__name(const GElf_Sym *sym, + const Elf_Data *symstrs) +{ + return symstrs->d_buf + sym->st_name; +} + +static Elf_Scn *elf_section_by_name(Elf *elf, GElf_Ehdr *ep, + GElf_Shdr *shp, const char *name, + size_t *index) +{ + Elf_Scn *sec = NULL; + size_t cnt = 1; + + while ((sec = elf_nextscn(elf, sec)) != NULL) { + char *str; + + gelf_getshdr(sec, shp); + str = elf_strptr(elf, ep->e_shstrndx, shp->sh_name); + if (!strcmp(name, str)) { + if (index) + *index = cnt; + break; + } + ++cnt; + } + + return sec; +} + static int dso__load(struct dso *self) { - /* FIXME */ - return 0; + int fd = open(self->name, O_RDONLY), err = -1; + + if (fd == -1) + return -1; + + Elf *elf = elf_begin(fd, ELF_C_READ_MMAP, NULL); + if (elf == NULL) { + fprintf(stderr, "%s: cannot read %s ELF file.\n", + __func__, self->name); + goto out_close; + } + + GElf_Ehdr ehdr; + if (gelf_getehdr(elf, &ehdr) == NULL) { + fprintf(stderr, "%s: cannot get elf header.\n", __func__); + goto out_elf_end; + } + + GElf_Shdr shdr; + Elf_Scn *sec = elf_section_by_name(elf, &ehdr, &shdr, ".symtab", NULL); + if (sec == NULL) + sec = elf_section_by_name(elf, &ehdr, &shdr, ".dynsym", NULL); + + if (sec == NULL) + goto out_elf_end; + + if (gelf_getshdr(sec, &shdr) == NULL) + goto out_elf_end; + + Elf_Data *syms = elf_getdata(sec, NULL); + if (syms == NULL) + goto out_elf_end; + + sec = elf_getscn(elf, shdr.sh_link); + if (sec == NULL) + goto out_elf_end; + + Elf_Data *symstrs = elf_getdata(sec, NULL); + if (symstrs == NULL) + goto out_elf_end; + + const uint32_t nr_syms = shdr.sh_size / shdr.sh_entsize; + + GElf_Sym sym; + uint32_t index; + elf_symtab__for_each_symbol(syms, nr_syms, index, sym) { + if (!elf_sym__is_function(&sym)) + continue; + struct symbol *f = symbol__new(sym.st_value, sym.st_size, + elf_sym__name(&sym, symstrs)); + if (f == NULL) + goto out_elf_end; + + dso__insert_symbol(self, f); + } + + err = 0; +out_elf_end: + elf_end(elf); +out_close: + close(fd); + return err; } static size_t dso__fprintf(struct dso *self, FILE *fp) @@ -614,6 +730,8 @@ int cmd_report(int argc, char **argv) int ret, rc = EXIT_FAILURE; unsigned long total = 0; + elf_version(EV_CURRENT); + page_size = getpagesize(); process_options(argc, argv); -- cgit v1.2.3 From 35a50c8a20eea22c141e05c5667ac21c48b8b65d Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Mon, 18 May 2009 16:24:49 -0300 Subject: perf_counter: Use rb_trees in perf report Signed-off-by: Arnaldo Carvalho de Melo Acked-by: Peter Zijlstra Cc: Paul Mackerras Cc: Corey Ashford Cc: Marcelo Tosatti Cc: Thomas Gleixner LKML-Reference: Signed-off-by: Ingo Molnar --- Documentation/perf_counter/Makefile | 3 + Documentation/perf_counter/builtin-report.c | 60 +++-- Documentation/perf_counter/util/rbtree.c | 383 ++++++++++++++++++++++++++++ Documentation/perf_counter/util/rbtree.h | 171 +++++++++++++ 4 files changed, 601 insertions(+), 16 deletions(-) create mode 100644 Documentation/perf_counter/util/rbtree.c create mode 100644 Documentation/perf_counter/util/rbtree.h diff --git a/Documentation/perf_counter/Makefile b/Documentation/perf_counter/Makefile index 6bffa86af6b..412dea15d0b 100644 --- a/Documentation/perf_counter/Makefile +++ b/Documentation/perf_counter/Makefile @@ -287,6 +287,8 @@ LIB_FILE=libperf.a LIB_H += ../../include/linux/perf_counter.h LIB_H += perf.h +LIB_H += util/list.h +LIB_H += util/rbtree.h LIB_H += util/levenshtein.h LIB_H += util/parse-options.h LIB_H += util/parse-events.h @@ -306,6 +308,7 @@ LIB_OBJS += util/levenshtein.o LIB_OBJS += util/parse-options.o LIB_OBJS += util/parse-events.o LIB_OBJS += util/path.o +LIB_OBJS += util/rbtree.o LIB_OBJS += util/run-command.o LIB_OBJS += util/quote.o LIB_OBJS += util/strbuf.o diff --git a/Documentation/perf_counter/builtin-report.c b/Documentation/perf_counter/builtin-report.c index ad2f327a657..f63057fe2cd 100644 --- a/Documentation/perf_counter/builtin-report.c +++ b/Documentation/perf_counter/builtin-report.c @@ -32,7 +32,8 @@ #include #include "../../include/linux/perf_counter.h" -#include "list.h" +#include "util/list.h" +#include "util/rbtree.h" #define SHOW_KERNEL 1 #define SHOW_USER 2 @@ -106,10 +107,10 @@ static void section__delete(struct section *self) } struct symbol { - struct list_head node; - uint64_t start; - uint64_t end; - char name[0]; + struct rb_node rb_node; + uint64_t start; + uint64_t end; + char name[0]; }; static struct symbol *symbol__new(uint64_t start, uint64_t len, const char *name) @@ -139,7 +140,7 @@ static size_t symbol__fprintf(struct symbol *self, FILE *fp) struct dso { struct list_head node; struct list_head sections; - struct list_head syms; + struct rb_root syms; char name[0]; }; @@ -150,7 +151,7 @@ static struct dso *dso__new(const char *name) if (self != NULL) { strcpy(self->name, name); INIT_LIST_HEAD(&self->sections); - INIT_LIST_HEAD(&self->syms); + self->syms = RB_ROOT; } return self; @@ -166,10 +167,14 @@ static void dso__delete_sections(struct dso *self) static void dso__delete_symbols(struct dso *self) { - struct symbol *pos, *n; + struct symbol *pos; + struct rb_node *next = rb_first(&self->syms); - list_for_each_entry_safe(pos, n, &self->syms, node) + while (next) { + pos = rb_entry(next, struct symbol, rb_node); + next = rb_next(&pos->rb_node); symbol__delete(pos); + } } static void dso__delete(struct dso *self) @@ -181,7 +186,21 @@ static void dso__delete(struct dso *self) static void dso__insert_symbol(struct dso *self, struct symbol *sym) { - list_add_tail(&sym->node, &self->syms); + struct rb_node **p = &self->syms.rb_node; + struct rb_node *parent = NULL; + const uint64_t ip = sym->start; + struct symbol *s; + + while (*p != NULL) { + parent = *p; + s = rb_entry(parent, struct symbol, rb_node); + if (ip < s->start) + p = &(*p)->rb_left; + else + p = &(*p)->rb_right; + } + rb_link_node(&sym->rb_node, parent, p); + rb_insert_color(&sym->rb_node, &self->syms); } static struct symbol *dso__find_symbol(struct dso *self, uint64_t ip) @@ -189,11 +208,18 @@ static struct symbol *dso__find_symbol(struct dso *self, uint64_t ip) if (self == NULL) return NULL; - struct symbol *pos; + struct rb_node *n = self->syms.rb_node; - list_for_each_entry(pos, &self->syms, node) - if (ip >= pos->start && ip <= pos->end) - return pos; + while (n) { + struct symbol *s = rb_entry(n, struct symbol, rb_node); + + if (ip < s->start) + n = n->rb_left; + else if (ip > s->end) + n = n->rb_right; + else + return s; + } return NULL; } @@ -319,11 +345,13 @@ out_close: static size_t dso__fprintf(struct dso *self, FILE *fp) { - struct symbol *pos; size_t ret = fprintf(fp, "dso: %s\n", self->name); - list_for_each_entry(pos, &self->syms, node) + struct rb_node *nd; + for (nd = rb_first(&self->syms); nd; nd = rb_next(nd)) { + struct symbol *pos = rb_entry(nd, struct symbol, rb_node); ret += symbol__fprintf(pos, fp); + } return ret; } diff --git a/Documentation/perf_counter/util/rbtree.c b/Documentation/perf_counter/util/rbtree.c new file mode 100644 index 00000000000..b15ba9c7cb3 --- /dev/null +++ b/Documentation/perf_counter/util/rbtree.c @@ -0,0 +1,383 @@ +/* + Red Black Trees + (C) 1999 Andrea Arcangeli + (C) 2002 David Woodhouse + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + + linux/lib/rbtree.c +*/ + +#include "rbtree.h" + +static void __rb_rotate_left(struct rb_node *node, struct rb_root *root) +{ + struct rb_node *right = node->rb_right; + struct rb_node *parent = rb_parent(node); + + if ((node->rb_right = right->rb_left)) + rb_set_parent(right->rb_left, node); + right->rb_left = node; + + rb_set_parent(right, parent); + + if (parent) + { + if (node == parent->rb_left) + parent->rb_left = right; + else + parent->rb_right = right; + } + else + root->rb_node = right; + rb_set_parent(node, right); +} + +static void __rb_rotate_right(struct rb_node *node, struct rb_root *root) +{ + struct rb_node *left = node->rb_left; + struct rb_node *parent = rb_parent(node); + + if ((node->rb_left = left->rb_right)) + rb_set_parent(left->rb_right, node); + left->rb_right = node; + + rb_set_parent(left, parent); + + if (parent) + { + if (node == parent->rb_right) + parent->rb_right = left; + else + parent->rb_left = left; + } + else + root->rb_node = left; + rb_set_parent(node, left); +} + +void rb_insert_color(struct rb_node *node, struct rb_root *root) +{ + struct rb_node *parent, *gparent; + + while ((parent = rb_parent(node)) && rb_is_red(parent)) + { + gparent = rb_parent(parent); + + if (parent == gparent->rb_left) + { + { + register struct rb_node *uncle = gparent->rb_right; + if (uncle && rb_is_red(uncle)) + { + rb_set_black(uncle); + rb_set_black(parent); + rb_set_red(gparent); + node = gparent; + continue; + } + } + + if (parent->rb_right == node) + { + register struct rb_node *tmp; + __rb_rotate_left(parent, root); + tmp = parent; + parent = node; + node = tmp; + } + + rb_set_black(parent); + rb_set_red(gparent); + __rb_rotate_right(gparent, root); + } else { + { + register struct rb_node *uncle = gparent->rb_left; + if (uncle && rb_is_red(uncle)) + { + rb_set_black(uncle); + rb_set_black(parent); + rb_set_red(gparent); + node = gparent; + continue; + } + } + + if (parent->rb_left == node) + { + register struct rb_node *tmp; + __rb_rotate_right(parent, root); + tmp = parent; + parent = node; + node = tmp; + } + + rb_set_black(parent); + rb_set_red(gparent); + __rb_rotate_left(gparent, root); + } + } + + rb_set_black(root->rb_node); +} + +static void __rb_erase_color(struct rb_node *node, struct rb_node *parent, + struct rb_root *root) +{ + struct rb_node *other; + + while ((!node || rb_is_black(node)) && node != root->rb_node) + { + if (parent->rb_left == node) + { + other = parent->rb_right; + if (rb_is_red(other)) + { + rb_set_black(other); + rb_set_red(parent); + __rb_rotate_left(parent, root); + other = parent->rb_right; + } + if ((!other->rb_left || rb_is_black(other->rb_left)) && + (!other->rb_right || rb_is_black(other->rb_right))) + { + rb_set_red(other); + node = parent; + parent = rb_parent(node); + } + else + { + if (!other->rb_right || rb_is_black(other->rb_right)) + { + rb_set_black(other->rb_left); + rb_set_red(other); + __rb_rotate_right(other, root); + other = parent->rb_right; + } + rb_set_color(other, rb_color(parent)); + rb_set_black(parent); + rb_set_black(other->rb_right); + __rb_rotate_left(parent, root); + node = root->rb_node; + break; + } + } + else + { + other = parent->rb_left; + if (rb_is_red(other)) + { + rb_set_black(other); + rb_set_red(parent); + __rb_rotate_right(parent, root); + other = parent->rb_left; + } + if ((!other->rb_left || rb_is_black(other->rb_left)) && + (!other->rb_right || rb_is_black(other->rb_right))) + { + rb_set_red(other); + node = parent; + parent = rb_parent(node); + } + else + { + if (!other->rb_left || rb_is_black(other->rb_left)) + { + rb_set_black(other->rb_right); + rb_set_red(other); + __rb_rotate_left(other, root); + other = parent->rb_left; + } + rb_set_color(other, rb_color(parent)); + rb_set_black(parent); + rb_set_black(other->rb_left); + __rb_rotate_right(parent, root); + node = root->rb_node; + break; + } + } + } + if (node) + rb_set_black(node); +} + +void rb_erase(struct rb_node *node, struct rb_root *root) +{ + struct rb_node *child, *parent; + int color; + + if (!node->rb_left) + child = node->rb_right; + else if (!node->rb_right) + child = node->rb_left; + else + { + struct rb_node *old = node, *left; + + node = node->rb_right; + while ((left = node->rb_left) != NULL) + node = left; + child = node->rb_right; + parent = rb_parent(node); + color = rb_color(node); + + if (child) + rb_set_parent(child, parent); + if (parent == old) { + parent->rb_right = child; + parent = node; + } else + parent->rb_left = child; + + node->rb_parent_color = old->rb_parent_color; + node->rb_right = old->rb_right; + node->rb_left = old->rb_left; + + if (rb_parent(old)) + { + if (rb_parent(old)->rb_left == old) + rb_parent(old)->rb_left = node; + else + rb_parent(old)->rb_right = node; + } else + root->rb_node = node; + + rb_set_parent(old->rb_left, node); + if (old->rb_right) + rb_set_parent(old->rb_right, node); + goto color; + } + + parent = rb_parent(node); + color = rb_color(node); + + if (child) + rb_set_parent(child, parent); + if (parent) + { + if (parent->rb_left == node) + parent->rb_left = child; + else + parent->rb_right = child; + } + else + root->rb_node = child; + + color: + if (color == RB_BLACK) + __rb_erase_color(child, parent, root); +} + +/* + * This function returns the first node (in sort order) of the tree. + */ +struct rb_node *rb_first(const struct rb_root *root) +{ + struct rb_node *n; + + n = root->rb_node; + if (!n) + return NULL; + while (n->rb_left) + n = n->rb_left; + return n; +} + +struct rb_node *rb_last(const struct rb_root *root) +{ + struct rb_node *n; + + n = root->rb_node; + if (!n) + return NULL; + while (n->rb_right) + n = n->rb_right; + return n; +} + +struct rb_node *rb_next(const struct rb_node *node) +{ + struct rb_node *parent; + + if (rb_parent(node) == node) + return NULL; + + /* If we have a right-hand child, go down and then left as far + as we can. */ + if (node->rb_right) { + node = node->rb_right; + while (node->rb_left) + node=node->rb_left; + return (struct rb_node *)node; + } + + /* No right-hand children. Everything down and left is + smaller than us, so any 'next' node must be in the general + direction of our parent. Go up the tree; any time the + ancestor is a right-hand child of its parent, keep going + up. First time it's a left-hand child of its parent, said + parent is our 'next' node. */ + while ((parent = rb_parent(node)) && node == parent->rb_right) + node = parent; + + return parent; +} + +struct rb_node *rb_prev(const struct rb_node *node) +{ + struct rb_node *parent; + + if (rb_parent(node) == node) + return NULL; + + /* If we have a left-hand child, go down and then right as far + as we can. */ + if (node->rb_left) { + node = node->rb_left; + while (node->rb_right) + node=node->rb_right; + return (struct rb_node *)node; + } + + /* No left-hand children. Go up till we find an ancestor which + is a right-hand child of its parent */ + while ((parent = rb_parent(node)) && node == parent->rb_left) + node = parent; + + return parent; +} + +void rb_replace_node(struct rb_node *victim, struct rb_node *new, + struct rb_root *root) +{ + struct rb_node *parent = rb_parent(victim); + + /* Set the surrounding nodes to point to the replacement */ + if (parent) { + if (victim == parent->rb_left) + parent->rb_left = new; + else + parent->rb_right = new; + } else { + root->rb_node = new; + } + if (victim->rb_left) + rb_set_parent(victim->rb_left, new); + if (victim->rb_right) + rb_set_parent(victim->rb_right, new); + + /* Copy the pointers/colour from the victim to the replacement */ + *new = *victim; +} diff --git a/Documentation/perf_counter/util/rbtree.h b/Documentation/perf_counter/util/rbtree.h new file mode 100644 index 00000000000..6bdc488a47f --- /dev/null +++ b/Documentation/perf_counter/util/rbtree.h @@ -0,0 +1,171 @@ +/* + Red Black Trees + (C) 1999 Andrea Arcangeli + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + + linux/include/linux/rbtree.h + + To use rbtrees you'll have to implement your own insert and search cores. + This will avoid us to use callbacks and to drop drammatically performances. + I know it's not the cleaner way, but in C (not in C++) to get + performances and genericity... + + Some example of insert and search follows here. The search is a plain + normal search over an ordered tree. The insert instead must be implemented + int two steps: as first thing the code must insert the element in + order as a red leaf in the tree, then the support library function + rb_insert_color() must be called. Such function will do the + not trivial work to rebalance the rbtree if necessary. + +----------------------------------------------------------------------- +static inline struct page * rb_search_page_cache(struct inode * inode, + unsigned long offset) +{ + struct rb_node * n = inode->i_rb_page_cache.rb_node; + struct page * page; + + while (n) + { + page = rb_entry(n, struct page, rb_page_cache); + + if (offset < page->offset) + n = n->rb_left; + else if (offset > page->offset) + n = n->rb_right; + else + return page; + } + return NULL; +} + +static inline struct page * __rb_insert_page_cache(struct inode * inode, + unsigned long offset, + struct rb_node * node) +{ + struct rb_node ** p = &inode->i_rb_page_cache.rb_node; + struct rb_node * parent = NULL; + struct page * page; + + while (*p) + { + parent = *p; + page = rb_entry(parent, struct page, rb_page_cache); + + if (offset < page->offset) + p = &(*p)->rb_left; + else if (offset > page->offset) + p = &(*p)->rb_right; + else + return page; + } + + rb_link_node(node, parent, p); + + return NULL; +} + +static inline struct page * rb_insert_page_cache(struct inode * inode, + unsigned long offset, + struct rb_node * node) +{ + struct page * ret; + if ((ret = __rb_insert_page_cache(inode, offset, node))) + goto out; + rb_insert_color(node, &inode->i_rb_page_cache); + out: + return ret; +} +----------------------------------------------------------------------- +*/ + +#ifndef _LINUX_RBTREE_H +#define _LINUX_RBTREE_H + +#include + +/** + * container_of - cast a member of a structure out to the containing structure + * @ptr: the pointer to the member. + * @type: the type of the container struct this is embedded in. + * @member: the name of the member within the struct. + * + */ +#define container_of(ptr, type, member) ({ \ + const typeof( ((type *)0)->member ) *__mptr = (ptr); \ + (type *)( (char *)__mptr - offsetof(type,member) );}) + +struct rb_node +{ + unsigned long rb_parent_color; +#define RB_RED 0 +#define RB_BLACK 1 + struct rb_node *rb_right; + struct rb_node *rb_left; +} __attribute__((aligned(sizeof(long)))); + /* The alignment might seem pointless, but allegedly CRIS needs it */ + +struct rb_root +{ + struct rb_node *rb_node; +}; + + +#define rb_parent(r) ((struct rb_node *)((r)->rb_parent_color & ~3)) +#define rb_color(r) ((r)->rb_parent_color & 1) +#define rb_is_red(r) (!rb_color(r)) +#define rb_is_black(r) rb_color(r) +#define rb_set_red(r) do { (r)->rb_parent_color &= ~1; } while (0) +#define rb_set_black(r) do { (r)->rb_parent_color |= 1; } while (0) + +static inline void rb_set_parent(struct rb_node *rb, struct rb_node *p) +{ + rb->rb_parent_color = (rb->rb_parent_color & 3) | (unsigned long)p; +} +static inline void rb_set_color(struct rb_node *rb, int color) +{ + rb->rb_parent_color = (rb->rb_parent_color & ~1) | color; +} + +#define RB_ROOT (struct rb_root) { NULL, } +#define rb_entry(ptr, type, member) container_of(ptr, type, member) + +#define RB_EMPTY_ROOT(root) ((root)->rb_node == NULL) +#define RB_EMPTY_NODE(node) (rb_parent(node) == node) +#define RB_CLEAR_NODE(node) (rb_set_parent(node, node)) + +extern void rb_insert_color(struct rb_node *, struct rb_root *); +extern void rb_erase(struct rb_node *, struct rb_root *); + +/* Find logical next and previous nodes in a tree */ +extern struct rb_node *rb_next(const struct rb_node *); +extern struct rb_node *rb_prev(const struct rb_node *); +extern struct rb_node *rb_first(const struct rb_root *); +extern struct rb_node *rb_last(const struct rb_root *); + +/* Fast replacement of a single node without remove/rebalance/add/rebalance */ +extern void rb_replace_node(struct rb_node *victim, struct rb_node *new, + struct rb_root *root); + +static inline void rb_link_node(struct rb_node * node, struct rb_node * parent, + struct rb_node ** rb_link) +{ + node->rb_parent_color = (unsigned long )parent; + node->rb_left = node->rb_right = NULL; + + *rb_link = node; +} + +#endif /* _LINUX_RBTREE_H */ -- cgit v1.2.3 From 040e6034124c504d536736ce08e4643e640cd7c2 Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Mon, 18 May 2009 16:25:31 -0300 Subject: perf_counter: Add our private copy of list.h Signed-off-by: Arnaldo Carvalho de Melo Acked-by: Peter Zijlstra Cc: Paul Mackerras Cc: Corey Ashford Cc: Marcelo Tosatti Cc: Thomas Gleixner LKML-Reference: Signed-off-by: Ingo Molnar --- Documentation/perf_counter/util/list.h | 603 +++++++++++++++++++++++++++++++++ 1 file changed, 603 insertions(+) create mode 100644 Documentation/perf_counter/util/list.h diff --git a/Documentation/perf_counter/util/list.h b/Documentation/perf_counter/util/list.h new file mode 100644 index 00000000000..e2548e8072c --- /dev/null +++ b/Documentation/perf_counter/util/list.h @@ -0,0 +1,603 @@ +#ifndef _LINUX_LIST_H +#define _LINUX_LIST_H +/* + Copyright (C) Cast of dozens, comes from the Linux kernel + + This program is free software; you can redistribute it and/or modify it + under the terms of version 2 of the GNU General Public License as + published by the Free Software Foundation. +*/ + +#include + +/* + * These are non-NULL pointers that will result in page faults + * under normal circumstances, used to verify that nobody uses + * non-initialized list entries. + */ +#define LIST_POISON1 ((void *)0x00100100) +#define LIST_POISON2 ((void *)0x00200200) + +/** + * container_of - cast a member of a structure out to the containing structure + * @ptr: the pointer to the member. + * @type: the type of the container struct this is embedded in. + * @member: the name of the member within the struct. + * + */ +#define container_of(ptr, type, member) ({ \ + const typeof( ((type *)0)->member ) *__mptr = (ptr); \ + (type *)( (char *)__mptr - offsetof(type,member) );}) + +/* + * Simple doubly linked list implementation. + * + * Some of the internal functions ("__xxx") are useful when + * manipulating whole lists rather than single entries, as + * sometimes we already know the next/prev entries and we can + * generate better code by using them directly rather than + * using the generic single-entry routines. + */ + +struct list_head { + struct list_head *next, *prev; +}; + +#define LIST_HEAD_INIT(name) { &(name), &(name) } + +#define LIST_HEAD(name) \ + struct list_head name = LIST_HEAD_INIT(name) + +static inline void INIT_LIST_HEAD(struct list_head *list) +{ + list->next = list; + list->prev = list; +} + +/* + * Insert a new entry between two known consecutive entries. + * + * This is only for internal list manipulation where we know + * the prev/next entries already! + */ +static inline void __list_add(struct list_head *new, + struct list_head *prev, + struct list_head *next) +{ + next->prev = new; + new->next = next; + new->prev = prev; + prev->next = new; +} + +/** + * list_add - add a new entry + * @new: new entry to be added + * @head: list head to add it after + * + * Insert a new entry after the specified head. + * This is good for implementing stacks. + */ +static inline void list_add(struct list_head *new, struct list_head *head) +{ + __list_add(new, head, head->next); +} + +/** + * list_add_tail - add a new entry + * @new: new entry to be added + * @head: list head to add it before + * + * Insert a new entry before the specified head. + * This is useful for implementing queues. + */ +static inline void list_add_tail(struct list_head *new, struct list_head *head) +{ + __list_add(new, head->prev, head); +} + +/* + * Delete a list entry by making the prev/next entries + * point to each other. + * + * This is only for internal list manipulation where we know + * the prev/next entries already! + */ +static inline void __list_del(struct list_head * prev, struct list_head * next) +{ + next->prev = prev; + prev->next = next; +} + +/** + * list_del - deletes entry from list. + * @entry: the element to delete from the list. + * Note: list_empty on entry does not return true after this, the entry is + * in an undefined state. + */ +static inline void list_del(struct list_head *entry) +{ + __list_del(entry->prev, entry->next); + entry->next = LIST_POISON1; + entry->prev = LIST_POISON2; +} + +/** + * list_del_range - deletes range of entries from list. + * @beging: first element in the range to delete from the list. + * @beging: first element in the range to delete from the list. + * Note: list_empty on the range of entries does not return true after this, + * the entries is in an undefined state. + */ +static inline void list_del_range(struct list_head *begin, + struct list_head *end) +{ + begin->prev->next = end->next; + end->next->prev = begin->prev; +} + +/** + * list_replace - replace old entry by new one + * @old : the element to be replaced + * @new : the new element to insert + * Note: if 'old' was empty, it will be overwritten. + */ +static inline void list_replace(struct list_head *old, + struct list_head *new) +{ + new->next = old->next; + new->next->prev = new; + new->prev = old->prev; + new->prev->next = new; +} + +static inline void list_replace_init(struct list_head *old, + struct list_head *new) +{ + list_replace(old, new); + INIT_LIST_HEAD(old); +} + +/** + * list_del_init - deletes entry from list and reinitialize it. + * @entry: the element to delete from the list. + */ +static inline void list_del_init(struct list_head *entry) +{ + __list_del(entry->prev, entry->next); + INIT_LIST_HEAD(entry); +} + +/** + * list_move - delete from one list and add as another's head + * @list: the entry to move + * @head: the head that will precede our entry + */ +static inline void list_move(struct list_head *list, struct list_head *head) +{ + __list_del(list->prev, list->next); + list_add(list, head); +} + +/** + * list_move_tail - delete from one list and add as another's tail + * @list: the entry to move + * @head: the head that will follow our entry + */ +static inline void list_move_tail(struct list_head *list, + struct list_head *head) +{ + __list_del(list->prev, list->next); + list_add_tail(list, head); +} + +/** + * list_is_last - tests whether @list is the last entry in list @head + * @list: the entry to test + * @head: the head of the list + */ +static inline int list_is_last(const struct list_head *list, + const struct list_head *head) +{ + return list->next == head; +} + +/** + * list_empty - tests whether a list is empty + * @head: the list to test. + */ +static inline int list_empty(const struct list_head *head) +{ + return head->next == head; +} + +/** + * list_empty_careful - tests whether a list is empty and not being modified + * @head: the list to test + * + * Description: + * tests whether a list is empty _and_ checks that no other CPU might be + * in the process of modifying either member (next or prev) + * + * NOTE: using list_empty_careful() without synchronization + * can only be safe if the only activity that can happen + * to the list entry is list_del_init(). Eg. it cannot be used + * if another CPU could re-list_add() it. + */ +static inline int list_empty_careful(const struct list_head *head) +{ + struct list_head *next = head->next; + return (next == head) && (next == head->prev); +} + +static inline void __list_splice(struct list_head *list, + struct list_head *head) +{ + struct list_head *first = list->next; + struct list_head *last = list->prev; + struct list_head *at = head->next; + + first->prev = head; + head->next = first; + + last->next = at; + at->prev = last; +} + +/** + * list_splice - join two lists + * @list: the new list to add. + * @head: the place to add it in the first list. + */ +static inline void list_splice(struct list_head *list, struct list_head *head) +{ + if (!list_empty(list)) + __list_splice(list, head); +} + +/** + * list_splice_init - join two lists and reinitialise the emptied list. + * @list: the new list to add. + * @head: the place to add it in the first list. + * + * The list at @list is reinitialised + */ +static inline void list_splice_init(struct list_head *list, + struct list_head *head) +{ + if (!list_empty(list)) { + __list_splice(list, head); + INIT_LIST_HEAD(list); + } +} + +/** + * list_entry - get the struct for this entry + * @ptr: the &struct list_head pointer. + * @type: the type of the struct this is embedded in. + * @member: the name of the list_struct within the struct. + */ +#define list_entry(ptr, type, member) \ + container_of(ptr, type, member) + +/** + * list_first_entry - get the first element from a list + * @ptr: the list head to take the element from. + * @type: the type of the struct this is embedded in. + * @member: the name of the list_struct within the struct. + * + * Note, that list is expected to be not empty. + */ +#define list_first_entry(ptr, type, member) \ + list_entry((ptr)->next, type, member) + +/** + * list_for_each - iterate over a list + * @pos: the &struct list_head to use as a loop cursor. + * @head: the head for your list. + */ +#define list_for_each(pos, head) \ + for (pos = (head)->next; pos != (head); \ + pos = pos->next) + +/** + * __list_for_each - iterate over a list + * @pos: the &struct list_head to use as a loop cursor. + * @head: the head for your list. + * + * This variant differs from list_for_each() in that it's the + * simplest possible list iteration code, no prefetching is done. + * Use this for code that knows the list to be very short (empty + * or 1 entry) most of the time. + */ +#define __list_for_each(pos, head) \ + for (pos = (head)->next; pos != (head); pos = pos->next) + +/** + * list_for_each_prev - iterate over a list backwards + * @pos: the &struct list_head to use as a loop cursor. + * @head: the head for your list. + */ +#define list_for_each_prev(pos, head) \ + for (pos = (head)->prev; pos != (head); \ + pos = pos->prev) + +/** + * list_for_each_safe - iterate over a list safe against removal of list entry + * @pos: the &struct list_head to use as a loop cursor. + * @n: another &struct list_head to use as temporary storage + * @head: the head for your list. + */ +#define list_for_each_safe(pos, n, head) \ + for (pos = (head)->next, n = pos->next; pos != (head); \ + pos = n, n = pos->next) + +/** + * list_for_each_entry - iterate over list of given type + * @pos: the type * to use as a loop cursor. + * @head: the head for your list. + * @member: the name of the list_struct within the struct. + */ +#define list_for_each_entry(pos, head, member) \ + for (pos = list_entry((head)->next, typeof(*pos), member); \ + &pos->member != (head); \ + pos = list_entry(pos->member.next, typeof(*pos), member)) + +/** + * list_for_each_entry_reverse - iterate backwards over list of given type. + * @pos: the type * to use as a loop cursor. + * @head: the head for your list. + * @member: the name of the list_struct within the struct. + */ +#define list_for_each_entry_reverse(pos, head, member) \ + for (pos = list_entry((head)->prev, typeof(*pos), member); \ + &pos->member != (head); \ + pos = list_entry(pos->member.prev, typeof(*pos), member)) + +/** + * list_prepare_entry - prepare a pos entry for use in list_for_each_entry_continue + * @pos: the type * to use as a start point + * @head: the head of the list + * @member: the name of the list_struct within the struct. + * + * Prepares a pos entry for use as a start point in list_for_each_entry_continue. + */ +#define list_prepare_entry(pos, head, member) \ + ((pos) ? : list_entry(head, typeof(*pos), member)) + +/** + * list_for_each_entry_continue - continue iteration over list of given type + * @pos: the type * to use as a loop cursor. + * @head: the head for your list. + * @member: the name of the list_struct within the struct. + * + * Continue to iterate over list of given type, continuing after + * the current position. + */ +#define list_for_each_entry_continue(pos, head, member) \ + for (pos = list_entry(pos->member.next, typeof(*pos), member); \ + &pos->member != (head); \ + pos = list_entry(pos->member.next, typeof(*pos), member)) + +/** + * list_for_each_entry_from - iterate over list of given type from the current point + * @pos: the type * to use as a loop cursor. + * @head: the head for your list. + * @member: the name of the list_struct within the struct. + * + * Iterate over list of given type, continuing from current position. + */ +#define list_for_each_entry_from(pos, head, member) \ + for (; &pos->member != (head); \ + pos = list_entry(pos->member.next, typeof(*pos), member)) + +/** + * list_for_each_entry_safe - iterate over list of given type safe against removal of list entry + * @pos: the type * to use as a loop cursor. + * @n: another type * to use as temporary storage + * @head: the head for your list. + * @member: the name of the list_struct within the struct. + */ +#define list_for_each_entry_safe(pos, n, head, member) \ + for (pos = list_entry((head)->next, typeof(*pos), member), \ + n = list_entry(pos->member.next, typeof(*pos), member); \ + &pos->member != (head); \ + pos = n, n = list_entry(n->member.next, typeof(*n), member)) + +/** + * list_for_each_entry_safe_continue + * @pos: the type * to use as a loop cursor. + * @n: another type * to use as temporary storage + * @head: the head for your list. + * @member: the name of the list_struct within the struct. + * + * Iterate over list of given type, continuing after current point, + * safe against removal of list entry. + */ +#define list_for_each_entry_safe_continue(pos, n, head, member) \ + for (pos = list_entry(pos->member.next, typeof(*pos), member), \ + n = list_entry(pos->member.next, typeof(*pos), member); \ + &pos->member != (head); \ + pos = n, n = list_entry(n->member.next, typeof(*n), member)) + +/** + * list_for_each_entry_safe_from + * @pos: the type * to use as a loop cursor. + * @n: another type * to use as temporary storage + * @head: the head for your list. + * @member: the name of the list_struct within the struct. + * + * Iterate over list of given type from current point, safe against + * removal of list entry. + */ +#define list_for_each_entry_safe_from(pos, n, head, member) \ + for (n = list_entry(pos->member.next, typeof(*pos), member); \ + &pos->member != (head); \ + pos = n, n = list_entry(n->member.next, typeof(*n), member)) + +/** + * list_for_each_entry_safe_reverse + * @pos: the type * to use as a loop cursor. + * @n: another type * to use as temporary storage + * @head: the head for your list. + * @member: the name of the list_struct within the struct. + * + * Iterate backwards over list of given type, safe against removal + * of list entry. + */ +#define list_for_each_entry_safe_reverse(pos, n, head, member) \ + for (pos = list_entry((head)->prev, typeof(*pos), member), \ + n = list_entry(pos->member.prev, typeof(*pos), member); \ + &pos->member != (head); \ + pos = n, n = list_entry(n->member.prev, typeof(*n), member)) + +/* + * Double linked lists with a single pointer list head. + * Mostly useful for hash tables where the two pointer list head is + * too wasteful. + * You lose the ability to access the tail in O(1). + */ + +struct hlist_head { + struct hlist_node *first; +}; + +struct hlist_node { + struct hlist_node *next, **pprev; +}; + +#define HLIST_HEAD_INIT { .first = NULL } +#define HLIST_HEAD(name) struct hlist_head name = { .first = NULL } +#define INIT_HLIST_HEAD(ptr) ((ptr)->first = NULL) +static inline void INIT_HLIST_NODE(struct hlist_node *h) +{ + h->next = NULL; + h->pprev = NULL; +} + +static inline int hlist_unhashed(const struct hlist_node *h) +{ + return !h->pprev; +} + +static inline int hlist_empty(const struct hlist_head *h) +{ + return !h->first; +} + +static inline void __hlist_del(struct hlist_node *n) +{ + struct hlist_node *next = n->next; + struct hlist_node **pprev = n->pprev; + *pprev = next; + if (next) + next->pprev = pprev; +} + +static inline void hlist_del(struct hlist_node *n) +{ + __hlist_del(n); + n->next = LIST_POISON1; + n->pprev = LIST_POISON2; +} + +static inline void hlist_del_init(struct hlist_node *n) +{ + if (!hlist_unhashed(n)) { + __hlist_del(n); + INIT_HLIST_NODE(n); + } +} + +static inline void hlist_add_head(struct hlist_node *n, struct hlist_head *h) +{ + struct hlist_node *first = h->first; + n->next = first; + if (first) + first->pprev = &n->next; + h->first = n; + n->pprev = &h->first; +} + +/* next must be != NULL */ +static inline void hlist_add_before(struct hlist_node *n, + struct hlist_node *next) +{ + n->pprev = next->pprev; + n->next = next; + next->pprev = &n->next; + *(n->pprev) = n; +} + +static inline void hlist_add_after(struct hlist_node *n, + struct hlist_node *next) +{ + next->next = n->next; + n->next = next; + next->pprev = &n->next; + + if(next->next) + next->next->pprev = &next->next; +} + +#define hlist_entry(ptr, type, member) container_of(ptr,type,member) + +#define hlist_for_each(pos, head) \ + for (pos = (head)->first; pos; \ + pos = pos->next) + +#define hlist_for_each_safe(pos, n, head) \ + for (pos = (head)->first; pos && ({ n = pos->next; 1; }); \ + pos = n) + +/** + * hlist_for_each_entry - iterate over list of given type + * @tpos: the type * to use as a loop cursor. + * @pos: the &struct hlist_node to use as a loop cursor. + * @head: the head for your list. + * @member: the name of the hlist_node within the struct. + */ +#define hlist_for_each_entry(tpos, pos, head, member) \ + for (pos = (head)->first; \ + pos && \ + ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1;}); \ + pos = pos->next) + +/** + * hlist_for_each_entry_continue - iterate over a hlist continuing after current point + * @tpos: the type * to use as a loop cursor. + * @pos: the &struct hlist_node to use as a loop cursor. + * @member: the name of the hlist_node within the struct. + */ +#define hlist_for_each_entry_continue(tpos, pos, member) \ + for (pos = (pos)->next; \ + pos && \ + ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1;}); \ + pos = pos->next) + +/** + * hlist_for_each_entry_from - iterate over a hlist continuing from current point + * @tpos: the type * to use as a loop cursor. + * @pos: the &struct hlist_node to use as a loop cursor. + * @member: the name of the hlist_node within the struct. + */ +#define hlist_for_each_entry_from(tpos, pos, member) \ + for (; pos && \ + ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1;}); \ + pos = pos->next) + +/** + * hlist_for_each_entry_safe - iterate over list of given type safe against removal of list entry + * @tpos: the type * to use as a loop cursor. + * @pos: the &struct hlist_node to use as a loop cursor. + * @n: another &struct hlist_node to use as temporary storage + * @head: the head for your list. + * @member: the name of the hlist_node within the struct. + */ +#define hlist_for_each_entry_safe(tpos, pos, n, head, member) \ + for (pos = (head)->first; \ + pos && ({ n = pos->next; 1; }) && \ + ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1;}); \ + pos = n) + +#endif -- cgit v1.2.3 From ce7e43653b08db094326f378958bc293a68e8e5b Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Tue, 19 May 2009 09:30:23 -0300 Subject: perf_counter: Use rb_tree for symhists and threads in report Signed-off-by: Arnaldo Carvalho de Melo Acked-by: Peter Zijlstra Cc: Paul Mackerras Cc: Corey Ashford Cc: Marcelo Tosatti Cc: Thomas Gleixner LKML-Reference: Signed-off-by: Ingo Molnar --- Documentation/perf_counter/builtin-report.c | 178 ++++++++++++---------------- 1 file changed, 75 insertions(+), 103 deletions(-) diff --git a/Documentation/perf_counter/builtin-report.c b/Documentation/perf_counter/builtin-report.c index f63057fe2cd..e857201e1e0 100644 --- a/Documentation/perf_counter/builtin-report.c +++ b/Documentation/perf_counter/builtin-report.c @@ -479,23 +479,25 @@ static size_t map__fprintf(struct map *self, FILE *fp) } struct symhist { - struct list_head node; + struct rb_node rb_node; struct dso *dso; struct symbol *sym; + uint64_t ip; uint32_t count; char level; }; -static struct symhist *symhist__new(struct symbol *sym, struct dso *dso, - char level) +static struct symhist *symhist__new(struct symbol *sym, uint64_t ip, + struct dso *dso, char level) { struct symhist *self = malloc(sizeof(*self)); if (self != NULL) { self->sym = sym; + self->ip = ip; self->dso = dso; self->level = level; - self->count = 0; + self->count = 1; } return self; @@ -506,12 +508,6 @@ static void symhist__delete(struct symhist *self) free(self); } -static bool symhist__equal(struct symhist *self, struct symbol *sym, - struct dso *dso, char level) -{ - return self->level == level && self->sym == sym && self->dso == dso; -} - static void symhist__inc(struct symhist *self) { ++self->count; @@ -519,7 +515,7 @@ static void symhist__inc(struct symhist *self) static size_t symhist__fprintf(struct symhist *self, FILE *fp) { - size_t ret = fprintf(fp, "[%c] ", self->level); + size_t ret = fprintf(fp, "%#llx [%c] ", (unsigned long long)self->ip, self->level); if (self->level != '.') ret += fprintf(fp, "%s", self->sym->name); @@ -531,9 +527,9 @@ static size_t symhist__fprintf(struct symhist *self, FILE *fp) } struct thread { - struct list_head node; + struct rb_node rb_node; struct list_head maps; - struct list_head symhists; + struct rb_root symhists; pid_t pid; char *comm; }; @@ -546,47 +542,43 @@ static struct thread *thread__new(pid_t pid) self->pid = pid; self->comm = NULL; INIT_LIST_HEAD(&self->maps); - INIT_LIST_HEAD(&self->symhists); + self->symhists = RB_ROOT; } return self; } -static void thread__insert_symhist(struct thread *self, - struct symhist *symhist) -{ - list_add_tail(&symhist->node, &self->symhists); -} - -static struct symhist *thread__symhists_find(struct thread *self, - struct symbol *sym, - struct dso *dso, char level) +static int thread__symbol_incnew(struct thread *self, struct symbol *sym, + uint64_t ip, struct dso *dso, char level) { - struct symhist *pos; + struct rb_node **p = &self->symhists.rb_node; + struct rb_node *parent = NULL; + struct symhist *sh; - list_for_each_entry(pos, &self->symhists, node) - if (symhist__equal(pos, sym, dso, level)) - return pos; + while (*p != NULL) { + parent = *p; + sh = rb_entry(parent, struct symhist, rb_node); - return NULL; -} + if (sh->sym == sym || ip == sh->ip) { + symhist__inc(sh); + return 0; + } -static int thread__symbol_incnew(struct thread *self, struct symbol *sym, - struct dso *dso, char level) -{ - struct symhist *symhist = thread__symhists_find(self, sym, dso, level); + /* Handle unresolved symbols too */ + const uint64_t start = !sh->sym ? sh->ip : sh->sym->start; - if (symhist == NULL) { - symhist = symhist__new(sym, dso, level); - if (symhist == NULL) - goto out_error; - thread__insert_symhist(self, symhist); + if (ip < start) + p = &(*p)->rb_left; + else + p = &(*p)->rb_right; } - symhist__inc(symhist); + sh = symhist__new(sym, ip, dso, level); + if (sh == NULL) + return -ENOMEM; + rb_link_node(&sh->rb_node, parent, p); + rb_insert_color(&sh->rb_node, &self->symhists); return 0; -out_error: - return -ENOMEM; } static int thread__set_comm(struct thread *self, const char *comm) @@ -608,43 +600,44 @@ static size_t thread__maps_fprintf(struct thread *self, FILE *fp) static size_t thread__fprintf(struct thread *self, FILE *fp) { - struct symhist *pos; int ret = fprintf(fp, "thread: %d %s\n", self->pid, self->comm); + struct rb_node *nd; - list_for_each_entry(pos, &self->symhists, node) + for (nd = rb_first(&self->symhists); nd; nd = rb_next(nd)) { + struct symhist *pos = rb_entry(nd, struct symhist, rb_node); ret += symhist__fprintf(pos, fp); + } return ret; } -static LIST_HEAD(threads); +static struct rb_root threads = RB_ROOT; -static void threads__add(struct thread *thread) -{ - list_add_tail(&thread->node, &threads); -} - -static struct thread *threads__find(pid_t pid) +static struct thread *threads__findnew(pid_t pid) { - struct thread *pos; + struct rb_node **p = &threads.rb_node; + struct rb_node *parent = NULL; + struct thread *th; - list_for_each_entry(pos, &threads, node) - if (pos->pid == pid) - return pos; - return NULL; -} + while (*p != NULL) { + parent = *p; + th = rb_entry(parent, struct thread, rb_node); -static struct thread *threads__findnew(pid_t pid) -{ - struct thread *thread = threads__find(pid); + if (th->pid == pid) + return th; - if (thread == NULL) { - thread = thread__new(pid); - if (thread != NULL) - threads__add(thread); + if (pid < th->pid) + p = &(*p)->rb_left; + else + p = &(*p)->rb_right; } - return thread; + th = thread__new(pid); + if (th != NULL) { + rb_link_node(&th->rb_node, parent, p); + rb_insert_color(&th->rb_node, &threads); + } + return th; } static void thread__insert_map(struct thread *self, struct map *map) @@ -668,44 +661,13 @@ static struct map *thread__find_map(struct thread *self, uint64_t ip) static void threads__fprintf(FILE *fp) { - struct thread *pos; - - list_for_each_entry(pos, &threads, node) + struct rb_node *nd; + for (nd = rb_first(&threads); nd; nd = rb_next(nd)) { + struct thread *pos = rb_entry(nd, struct thread, rb_node); thread__fprintf(pos, fp); + } } -#if 0 -static std::string resolve_user_symbol(int pid, uint64_t ip) -{ - std::string sym = ""; - - maps_t &m = maps[pid]; - maps_t::const_iterator mi = m.upper_bound(map(ip)); - if (mi == m.end()) - return sym; - - ip -= mi->start + mi->pgoff; - - symbols_t &s = dsos[mi->dso].syms; - symbols_t::const_iterator si = s.upper_bound(symbol(ip)); - - sym = mi->dso + ": "; - - if (si == s.begin()) - return sym; - si--; - - if (si->start <= ip && ip < si->end) - sym = mi->dso + ": " + si->name; -#if 0 - else if (si->start <= ip) - sym = mi->dso + ": ?" + si->name; -#endif - - return sym; -} -#endif - static void display_help(void) { printf( @@ -824,8 +786,11 @@ more: struct dso *dso = NULL; struct thread *thread = threads__findnew(event->ip.pid); - if (thread == NULL) + if (thread == NULL) { + fprintf(stderr, "problem processing %d event, bailing out\n", + event->header.type); goto done; + } if (event->header.misc & PERF_EVENT_MISC_KERNEL) { show = SHOW_KERNEL; @@ -845,8 +810,11 @@ more: if (show & show_mask) { struct symbol *sym = dso__find_symbol(dso, event->ip.ip); - if (thread__symbol_incnew(thread, sym, dso, level)) + if (thread__symbol_incnew(thread, sym, event->ip.ip, + dso, level)) { + fprintf(stderr, "problem incrementing symbol count, bailing out\n"); goto done; + } } total++; } else switch (event->header.type) { @@ -854,8 +822,10 @@ more: struct thread *thread = threads__findnew(event->mmap.pid); struct map *map = map__new(&event->mmap); - if (thread == NULL || map == NULL ) + if (thread == NULL || map == NULL) { + fprintf(stderr, "problem processing PERF_EVENT_MMAP, bailing out\n"); goto done; + } thread__insert_map(thread, map); break; } @@ -863,8 +833,10 @@ more: struct thread *thread = threads__findnew(event->comm.pid); if (thread == NULL || - thread__set_comm(thread, event->comm.comm)) + thread__set_comm(thread, event->comm.comm)) { + fprintf(stderr, "problem processing PERF_EVENT_COMM, bailing out\n"); goto done; + } break; } } -- cgit v1.2.3 From f3e08c5341c528284460530b546608f27232f737 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Fri, 22 May 2009 15:34:54 +0200 Subject: perf report: Fix segfault on unknown symbols Ingo reported: > Program received signal SIGSEGV, Segmentation fault. > 0x0000003e25080f80 in strlen () from /lib64/libc.so.6 > Missing separate debuginfos, use: debuginfo-install elfutils.x86_64 > glibc.x86_64 zlib.x86_64 > (gdb) bt > #0 0x0000003e25080f80 in strlen () from /lib64/libc.so.6 > #1 0x0000003e2506954e in fputs () from /lib64/libc.so.6 > #2 0x00000000004059e8 in cmd_report (argc=, > argv=) at builtin-report.c:521 > #3 0x0000000000402dad in handle_internal_command (argc=1, argv=0x7fffe1218e30) > at perf.c:226 > #4 0x0000000000402f6d in main (argc=1, argv=0x7fffe1218e30) at perf.c:324 > (gdb) Signed-off-by Peter Zijlstra Cc: Paul Mackerras Cc: Corey Ashford Cc: Marcelo Tosatti Cc: Arnaldo Carvalho de Melo Cc: Thomas Gleixner Signed-off-by: Ingo Molnar --- Documentation/perf_counter/builtin-report.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Documentation/perf_counter/builtin-report.c b/Documentation/perf_counter/builtin-report.c index e857201e1e0..21386a8c6f6 100644 --- a/Documentation/perf_counter/builtin-report.c +++ b/Documentation/perf_counter/builtin-report.c @@ -518,7 +518,7 @@ static size_t symhist__fprintf(struct symhist *self, FILE *fp) size_t ret = fprintf(fp, "%#llx [%c] ", (unsigned long long)self->ip, self->level); if (self->level != '.') - ret += fprintf(fp, "%s", self->sym->name); + ret += fprintf(fp, "%s", self->sym ? self->sym->name: ""); else ret += fprintf(fp, "%s: %s", self->dso ? self->dso->name : " Date: Tue, 26 May 2009 09:17:18 +0200 Subject: perf record: Convert to Git option parsing Remove getopt usage and use Git's much more advanced and more compact command option library. Cc: Peter Zijlstra Cc: Mike Galbraith Cc: Paul Mackerras Cc: Corey Ashford Cc: Marcelo Tosatti Cc: Arnaldo Carvalho de Melo Cc: John Kacur LKML-Reference: Signed-off-by: Ingo Molnar --- Documentation/perf_counter/builtin-report.c | 126 +++++++++------------------- 1 file changed, 38 insertions(+), 88 deletions(-) diff --git a/Documentation/perf_counter/builtin-report.c b/Documentation/perf_counter/builtin-report.c index 21386a8c6f6..9e59d6071ef 100644 --- a/Documentation/perf_counter/builtin-report.c +++ b/Documentation/perf_counter/builtin-report.c @@ -1,52 +1,29 @@ -#define _GNU_SOURCE -#include -#include -#include -#include -#include -#include -#include -#include -#include +#include "util/util.h" + +#include #include #include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include -#include -#include -#include -#include - -#include -#include - -#include "../../include/linux/perf_counter.h" + #include "util/list.h" #include "util/rbtree.h" +#include "perf.h" + +#include "util/parse-options.h" +#include "util/parse-events.h" + #define SHOW_KERNEL 1 #define SHOW_USER 2 #define SHOW_HV 4 -static char const *input_name = "output.perf"; +static char const *input_name = "output.perf"; static int input; static int show_mask = SHOW_KERNEL | SHOW_USER | SHOW_HV; static unsigned long page_size; static unsigned long mmap_window = 32; -static const char *perf_event_names[] = { +const char *perf_event_names[] = { [PERF_EVENT_MMAP] = " PERF_EVENT_MMAP", [PERF_EVENT_MUNMAP] = " PERF_EVENT_MUNMAP", [PERF_EVENT_COMM] = " PERF_EVENT_COMM", @@ -86,7 +63,7 @@ struct section { char name[0]; }; -static struct section *section__new(uint64_t start, uint64_t size, +struct section *section__new(uint64_t start, uint64_t size, uint64_t offset, char *name) { struct section *self = malloc(sizeof(*self) + strlen(name) + 1); @@ -241,7 +218,7 @@ static inline uint8_t elf_sym__type(const GElf_Sym *sym) return GELF_ST_TYPE(sym->st_info); } -static inline bool elf_sym__is_function(const GElf_Sym *sym) +static inline int elf_sym__is_function(const GElf_Sym *sym) { return elf_sym__type(sym) == STT_FUNC && sym->st_name != 0 && @@ -393,7 +370,7 @@ out_delete_dso: return NULL; } -static void dsos__fprintf(FILE *fp) +void dsos__fprintf(FILE *fp) { struct dso *pos; @@ -503,7 +480,7 @@ static struct symhist *symhist__new(struct symbol *sym, uint64_t ip, return self; } -static void symhist__delete(struct symhist *self) +void symhist__delete(struct symhist *self) { free(self); } @@ -587,7 +564,7 @@ static int thread__set_comm(struct thread *self, const char *comm) return self->comm ? 0 : -ENOMEM; } -static size_t thread__maps_fprintf(struct thread *self, FILE *fp) +size_t thread__maps_fprintf(struct thread *self, FILE *fp) { struct map *pos; size_t ret = 0; @@ -668,49 +645,7 @@ static void threads__fprintf(FILE *fp) } } -static void display_help(void) -{ - printf( - "Usage: perf-report []\n" - " -i file --input= # input file\n" - ); - - exit(0); -} - -static void process_options(int argc, char *argv[]) -{ - int error = 0; - - for (;;) { - int option_index = 0; - /** Options for getopt */ - static struct option long_options[] = { - {"input", required_argument, NULL, 'i'}, - {"no-user", no_argument, NULL, 'u'}, - {"no-kernel", no_argument, NULL, 'k'}, - {"no-hv", no_argument, NULL, 'h'}, - {NULL, 0, NULL, 0 } - }; - int c = getopt_long(argc, argv, "+:i:kuh", - long_options, &option_index); - if (c == -1) - break; - - switch (c) { - case 'i': input_name = strdup(optarg); break; - case 'k': show_mask &= ~SHOW_KERNEL; break; - case 'u': show_mask &= ~SHOW_USER; break; - case 'h': show_mask &= ~SHOW_HV; break; - default: error = 1; break; - } - } - - if (error) - display_help(); -} - -int cmd_report(int argc, char **argv) +static int __cmd_report(void) { unsigned long offset = 0; unsigned long head = 0; @@ -720,12 +655,6 @@ int cmd_report(int argc, char **argv) int ret, rc = EXIT_FAILURE; unsigned long total = 0; - elf_version(EV_CURRENT); - - page_size = getpagesize(); - - process_options(argc, argv); - input = open(input_name, O_RDONLY); if (input < 0) { perror("failed to open file"); @@ -867,3 +796,24 @@ done: return rc; } +static const char * const report_usage[] = { + "perf report [] ", + NULL +}; + +static const struct option options[] = { + OPT_STRING('i', "input", &input_name, "file", + "input file name"), + OPT_END() +}; + +int cmd_report(int argc, const char **argv, const char *prefix) +{ + elf_version(EV_CURRENT); + + page_size = getpagesize(); + + parse_options(argc, argv, options, report_usage, 0); + + return __cmd_report(); +} -- cgit v1.2.3 From 0bec253c813fbb067db4dfd9f5b6cec1bd2ef026 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Tue, 26 May 2009 09:17:18 +0200 Subject: perf report: Add help/manpage Add a (minimal) manpage for perf report. Cc: Peter Zijlstra Cc: Mike Galbraith Cc: Paul Mackerras Cc: Corey Ashford Cc: Marcelo Tosatti Cc: Arnaldo Carvalho de Melo Cc: John Kacur LKML-Reference: Signed-off-by: Ingo Molnar --- .../perf_counter/Documentation/perf-report.txt | 32 ++++++++++++++++++++++ 1 file changed, 32 insertions(+) create mode 100644 Documentation/perf_counter/Documentation/perf-report.txt diff --git a/Documentation/perf_counter/Documentation/perf-report.txt b/Documentation/perf_counter/Documentation/perf-report.txt new file mode 100644 index 00000000000..64696a21810 --- /dev/null +++ b/Documentation/perf_counter/Documentation/perf-report.txt @@ -0,0 +1,32 @@ +perf-report(1) +========== + +NAME +---- +perf-report - Read output.perf (created by perf record) and display the profile + +SYNOPSIS +-------- +[verse] +'perf report' [-i | --input=file] + +DESCRIPTION +----------- +This command displays the performance counter profile information recorded +via perf report. + +OPTIONS +------- +-i:: +--input=:: + Input file name. (default: output.perf) + +Configuration +------------- + +EXAMPLES +-------- + +SEE ALSO +-------- +linkperf:perf-stat[1] -- cgit v1.2.3 From f91183fe3780d44849110a1653dfe8af7bc67aa4 Mon Sep 17 00:00:00 2001 From: Mike Galbraith Date: Tue, 26 May 2009 15:25:34 +0200 Subject: perf top: Remove leftover NMI/IRQ bits 79202b removed IRQ/NMI mode selection, so remove it from perf top as well. [ Impact: cleanup ] Signed-off-by: Mike Galbraith Cc: Peter Zijlstra Cc: Paul Mackerras Cc: Corey Ashford Cc: Marcelo Tosatti Cc: Arnaldo Carvalho de Melo Cc: John Kacur LKML-Reference: Signed-off-by: Ingo Molnar --- Documentation/perf_counter/builtin-top.c | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/Documentation/perf_counter/builtin-top.c b/Documentation/perf_counter/builtin-top.c index 87b925c8f8e..cacaa3c2518 100644 --- a/Documentation/perf_counter/builtin-top.c +++ b/Documentation/perf_counter/builtin-top.c @@ -8,7 +8,7 @@ Sample output: ------------------------------------------------------------------------------ - KernelTop: 2669 irqs/sec [NMI, cache-misses/cache-refs], (all, cpu: 2) + KernelTop: 2669 irqs/sec [cache-misses/cache-refs], (all, cpu: 2) ------------------------------------------------------------------------------ weight RIP kernel function @@ -92,7 +92,6 @@ static __u64 count_filter = 100; static int target_pid = -1; static int profile_cpu = -1; static int nr_cpus = 0; -static int nmi = 1; static unsigned int realtime_prio = 0; static int group = 0; static unsigned int page_size; @@ -198,10 +197,9 @@ static void print_sym_table(void) printf( "------------------------------------------------------------------------------\n"); - printf( " KernelTop:%8.0f irqs/sec kernel:%4.1f%% [%s, ", + printf( " KernelTop:%8.0f irqs/sec kernel:%4.1f%% [", events_per_sec, - 100.0 - (100.0*((events_per_sec-kevents_per_sec)/events_per_sec)), - nmi ? "NMI" : "IRQ"); + 100.0 - (100.0*((events_per_sec-kevents_per_sec)/events_per_sec))); if (nr_counters == 1) printf("%d ", event_count[0]); @@ -637,7 +635,7 @@ static int __cmd_top(void) hw_event.config = event_id[counter]; hw_event.irq_period = event_count[counter]; hw_event.record_type = PERF_RECORD_IP | PERF_RECORD_TID; - hw_event.nmi = nmi; + hw_event.nmi = 1; hw_event.mmap = use_mmap; hw_event.munmap = use_munmap; hw_event.freq = freq; -- cgit v1.2.3 From db20c0031288ff524d82b1f240f35f85d4a052eb Mon Sep 17 00:00:00 2001 From: Mike Galbraith Date: Tue, 26 May 2009 15:25:34 +0200 Subject: perf top: fix typo in -d option Clean up copy/paste options parsing conversion error. [ Impact: reactivate -d option ] Signed-off-by: Mike Galbraith Cc: Peter Zijlstra Cc: Paul Mackerras Cc: Corey Ashford Cc: Marcelo Tosatti Cc: Arnaldo Carvalho de Melo Cc: John Kacur LKML-Reference: Signed-off-by: Ingo Molnar --- Documentation/perf_counter/builtin-top.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Documentation/perf_counter/builtin-top.c b/Documentation/perf_counter/builtin-top.c index cacaa3c2518..6b1c66f99e4 100644 --- a/Documentation/perf_counter/builtin-top.c +++ b/Documentation/perf_counter/builtin-top.c @@ -727,7 +727,7 @@ static const struct option options[] = { "number of mmap data pages"), OPT_INTEGER('r', "realtime", &realtime_prio, "collect data with this RT SCHED_FIFO priority"), - OPT_INTEGER('d', "delay", &realtime_prio, + OPT_INTEGER('d', "delay", &delay_secs, "number of seconds to delay between refreshes"), OPT_BOOLEAN('D', "dump-symtab", &dump_symtab, "dump the symbol table used for profiling"), -- cgit v1.2.3 From f17e04afaff84b5cfd317da29ac4d764908ff833 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Tue, 26 May 2009 15:30:22 +0200 Subject: perf report: Fix ELF symbol parsing [ Impact: fix DSO symbol output in perf report ] Signed-off-by: Peter Zijlstra Cc: Mike Galbraith Cc: Paul Mackerras Cc: Corey Ashford Cc: Marcelo Tosatti Cc: Arnaldo Carvalho de Melo Cc: Thomas Gleixner Cc: John Kacur LKML-Reference: Signed-off-by: Ingo Molnar --- Documentation/perf_counter/Makefile | 2 +- Documentation/perf_counter/builtin-report.c | 72 +++++++++-------------------- 2 files changed, 22 insertions(+), 52 deletions(-) diff --git a/Documentation/perf_counter/Makefile b/Documentation/perf_counter/Makefile index 412dea15d0b..10c13a6f2bc 100644 --- a/Documentation/perf_counter/Makefile +++ b/Documentation/perf_counter/Makefile @@ -159,7 +159,7 @@ uname_V := $(shell sh -c 'uname -v 2>/dev/null || echo not') # CFLAGS and LDFLAGS are for the users to override from the command line. -CFLAGS = -g -O2 -Wall +CFLAGS = -ggdb3 -Wall LDFLAGS = -lpthread -lrt -lelf ALL_CFLAGS = $(CFLAGS) ALL_LDFLAGS = $(LDFLAGS) diff --git a/Documentation/perf_counter/builtin-report.c b/Documentation/perf_counter/builtin-report.c index 9e59d6071ef..697f960495f 100644 --- a/Documentation/perf_counter/builtin-report.c +++ b/Documentation/perf_counter/builtin-report.c @@ -55,34 +55,6 @@ typedef union event_union { struct comm_event comm; } event_t; -struct section { - struct list_head node; - uint64_t start; - uint64_t end; - uint64_t offset; - char name[0]; -}; - -struct section *section__new(uint64_t start, uint64_t size, - uint64_t offset, char *name) -{ - struct section *self = malloc(sizeof(*self) + strlen(name) + 1); - - if (self != NULL) { - self->start = start; - self->end = start + size; - self->offset = offset; - strcpy(self->name, name); - } - - return self; -} - -static void section__delete(struct section *self) -{ - free(self); -} - struct symbol { struct rb_node rb_node; uint64_t start; @@ -116,7 +88,6 @@ static size_t symbol__fprintf(struct symbol *self, FILE *fp) struct dso { struct list_head node; - struct list_head sections; struct rb_root syms; char name[0]; }; @@ -127,21 +98,12 @@ static struct dso *dso__new(const char *name) if (self != NULL) { strcpy(self->name, name); - INIT_LIST_HEAD(&self->sections); self->syms = RB_ROOT; } return self; } -static void dso__delete_sections(struct dso *self) -{ - struct section *pos, *n; - - list_for_each_entry_safe(pos, n, &self->sections, node) - section__delete(pos); -} - static void dso__delete_symbols(struct dso *self) { struct symbol *pos; @@ -156,7 +118,6 @@ static void dso__delete_symbols(struct dso *self) static void dso__delete(struct dso *self) { - dso__delete_sections(self); dso__delete_symbols(self); free(self); } @@ -282,9 +243,6 @@ static int dso__load(struct dso *self) if (sec == NULL) goto out_elf_end; - if (gelf_getshdr(sec, &shdr) == NULL) - goto out_elf_end; - Elf_Data *syms = elf_getdata(sec, NULL); if (syms == NULL) goto out_elf_end; @@ -302,11 +260,21 @@ static int dso__load(struct dso *self) GElf_Sym sym; uint32_t index; elf_symtab__for_each_symbol(syms, nr_syms, index, sym) { + struct symbol *f; + if (!elf_sym__is_function(&sym)) continue; - struct symbol *f = symbol__new(sym.st_value, sym.st_size, - elf_sym__name(&sym, symstrs)); - if (f == NULL) + + sec = elf_getscn(elf, sym.st_shndx); + if (!sec) + goto out_elf_end; + + gelf_getshdr(sec, &shdr); + sym.st_value -= shdr.sh_addr - shdr.sh_offset; + + f = symbol__new(sym.st_value, sym.st_size, + elf_sym__name(&sym, symstrs)); + if (!f) goto out_elf_end; dso__insert_symbol(self, f); @@ -498,7 +466,7 @@ static size_t symhist__fprintf(struct symhist *self, FILE *fp) ret += fprintf(fp, "%s", self->sym ? self->sym->name: ""); else ret += fprintf(fp, "%s: %s", - self->dso ? self->dso->name : "dso ? self->dso->name : "", self->sym ? self->sym->name : ""); return ret + fprintf(fp, ": %u\n", self->count); } @@ -714,6 +682,7 @@ more: int show = 0; struct dso *dso = NULL; struct thread *thread = threads__findnew(event->ip.pid); + uint64_t ip = event->ip.ip; if (thread == NULL) { fprintf(stderr, "problem processing %d event, bailing out\n", @@ -728,19 +697,20 @@ more: } else if (event->header.misc & PERF_EVENT_MISC_USER) { show = SHOW_USER; level = '.'; - struct map *map = thread__find_map(thread, event->ip.ip); - if (map != NULL) + struct map *map = thread__find_map(thread, ip); + if (map != NULL) { dso = map->dso; + ip -= map->start + map->pgoff; + } } else { show = SHOW_HV; level = 'H'; } if (show & show_mask) { - struct symbol *sym = dso__find_symbol(dso, event->ip.ip); + struct symbol *sym = dso__find_symbol(dso, ip); - if (thread__symbol_incnew(thread, sym, event->ip.ip, - dso, level)) { + if (thread__symbol_incnew(thread, sym, ip, dso, level)) { fprintf(stderr, "problem incrementing symbol count, bailing out\n"); goto done; } -- cgit v1.2.3 From 59d81029b6804c3d5895d07cad77d7dfddc6b5b2 Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Tue, 26 May 2009 11:14:27 -0300 Subject: perf report: Fix kernel symbol resolution kallsyms have just the symbol start, so we need to read two lines to get the len. [ Impact: fix incorrect kernel symbol display in perf report ] Signed-off-by: Arnaldo Carvalho de Melo Cc: Peter Zijlstra Cc: Mike Galbraith Cc: Paul Mackerras Cc: Corey Ashford Cc: Marcelo Tosatti Cc: Arnaldo Carvalho de Melo Cc: Thomas Gleixner Cc: John Kacur LKML-Reference: Signed-off-by: Ingo Molnar --- Documentation/perf_counter/builtin-report.c | 26 +++++++++++++++++++++----- 1 file changed, 21 insertions(+), 5 deletions(-) diff --git a/Documentation/perf_counter/builtin-report.c b/Documentation/perf_counter/builtin-report.c index 697f960495f..b19b893d4ff 100644 --- a/Documentation/perf_counter/builtin-report.c +++ b/Documentation/perf_counter/builtin-report.c @@ -360,9 +360,17 @@ static int load_kallsyms(void) char *line = NULL; size_t n; + if (getline(&line, &n, file) < 0 || !line) + goto out_delete_dso; + + unsigned long long previous_start; + char c, previous_symbf[4096]; + if (sscanf(line, "%llx %c %s", &previous_start, &c, previous_symbf) != 3) + goto out_delete_line; + while (!feof(file)) { unsigned long long start; - char c, symbf[4096]; + char symbf[4096]; if (getline(&line, &n, file) < 0) break; @@ -371,12 +379,18 @@ static int load_kallsyms(void) goto out_delete_dso; if (sscanf(line, "%llx %c %s", &start, &c, symbf) == 3) { - struct symbol *sym = symbol__new(start, 0x1000000, symbf); + if (start > previous_start) { + struct symbol *sym = symbol__new(previous_start, + start - previous_start, + previous_symbf); - if (sym == NULL) - goto out_delete_dso; + if (sym == NULL) + goto out_delete_dso; - dso__insert_symbol(kernel_dso, sym); + dso__insert_symbol(kernel_dso, sym); + previous_start = start; + strcpy(previous_symbf, symbf); + } } } @@ -385,6 +399,8 @@ static int load_kallsyms(void) fclose(file); return 0; +out_delete_line: + free(line); out_delete_dso: dso__delete(kernel_dso); return -1; -- cgit v1.2.3 From abd54f68629fa73ed4fa040d433196211a9bbed2 Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Tue, 26 May 2009 12:21:34 -0300 Subject: perf: Don't assume /proc/kallsyms is ordered perf: Don't assume /proc/kallsyms is ordered Since we _are_ ordering it by the symbol start, just traverse the freshly built rbtree setting the prev->end members to curr->start - 1. Signed-off-by: Arnaldo Carvalho de Melo Cc: Peter Zijlstra Cc: Mike Galbraith Cc: Paul Mackerras Cc: Corey Ashford Cc: Marcelo Tosatti Cc: Arnaldo Carvalho de Melo Cc: John Kacur LKML-Reference: <20090526152134.GF4424@ghostprotocols.net> Signed-off-by: Ingo Molnar --- Documentation/perf_counter/builtin-report.c | 44 ++++++++++++++++------------- 1 file changed, 25 insertions(+), 19 deletions(-) diff --git a/Documentation/perf_counter/builtin-report.c b/Documentation/perf_counter/builtin-report.c index b19b893d4ff..e17819001dd 100644 --- a/Documentation/perf_counter/builtin-report.c +++ b/Documentation/perf_counter/builtin-report.c @@ -360,17 +360,9 @@ static int load_kallsyms(void) char *line = NULL; size_t n; - if (getline(&line, &n, file) < 0 || !line) - goto out_delete_dso; - - unsigned long long previous_start; - char c, previous_symbf[4096]; - if (sscanf(line, "%llx %c %s", &previous_start, &c, previous_symbf) != 3) - goto out_delete_line; - while (!feof(file)) { unsigned long long start; - char symbf[4096]; + char c, symbf[4096]; if (getline(&line, &n, file) < 0) break; @@ -379,21 +371,35 @@ static int load_kallsyms(void) goto out_delete_dso; if (sscanf(line, "%llx %c %s", &start, &c, symbf) == 3) { - if (start > previous_start) { - struct symbol *sym = symbol__new(previous_start, - start - previous_start, - previous_symbf); + /* + * Well fix up the end later, when we have all sorted. + */ + struct symbol *sym = symbol__new(start, 0xdead, symbf); - if (sym == NULL) - goto out_delete_dso; + if (sym == NULL) + goto out_delete_dso; - dso__insert_symbol(kernel_dso, sym); - previous_start = start; - strcpy(previous_symbf, symbf); - } + dso__insert_symbol(kernel_dso, sym); } } + /* + * Now that we have all sorted out, just set the ->end of all + * symbols + */ + struct rb_node *nd, *prevnd = rb_first(&kernel_dso->syms); + + if (prevnd == NULL) + goto out_delete_line; + + for (nd = rb_next(prevnd); nd; nd = rb_next(nd)) { + struct symbol *prev = rb_entry(prevnd, struct symbol, rb_node), + *curr = rb_entry(nd, struct symbol, rb_node); + + prev->end = curr->start - 1; + prevnd = nd; + } + dsos__add(kernel_dso); free(line); fclose(file); -- cgit v1.2.3 From 97b07b699b11d4bd1218a841e5dfed16bd53de06 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Tue, 26 May 2009 18:48:58 +0200 Subject: perf report: add --dump-raw-trace option To help the inspection of various data files, implement an ASCII dump method that just dumps the records as they are read in - then we exit. [ Impact: new feature ] Cc: Peter Zijlstra Cc: Mike Galbraith Cc: Paul Mackerras Cc: Corey Ashford Cc: Marcelo Tosatti Cc: Arnaldo Carvalho de Melo Cc: Thomas Gleixner Cc: John Kacur LKML-Reference: Signed-off-by: Ingo Molnar --- Documentation/perf_counter/builtin-report.c | 39 ++++++++++++++++++++++++++++- 1 file changed, 38 insertions(+), 1 deletion(-) diff --git a/Documentation/perf_counter/builtin-report.c b/Documentation/perf_counter/builtin-report.c index e17819001dd..8ea8aaa05af 100644 --- a/Documentation/perf_counter/builtin-report.c +++ b/Documentation/perf_counter/builtin-report.c @@ -20,6 +20,8 @@ static char const *input_name = "output.perf"; static int input; static int show_mask = SHOW_KERNEL | SHOW_USER | SHOW_HV; +static int dump_trace = 0; + static unsigned long page_size; static unsigned long mmap_window = 32; @@ -643,7 +645,7 @@ static int __cmd_report(void) char *buf; event_t *event; int ret, rc = EXIT_FAILURE; - unsigned long total = 0; + unsigned long total = 0, total_mmap = 0, total_comm = 0; input = open(input_name, O_RDONLY); if (input < 0) { @@ -706,6 +708,13 @@ more: struct thread *thread = threads__findnew(event->ip.pid); uint64_t ip = event->ip.ip; + if (dump_trace) { + fprintf(stderr, "PERF_EVENT (IP, %d): %d: %p\n", + event->header.misc, + event->ip.pid, + (void *)event->ip.ip); + } + if (thread == NULL) { fprintf(stderr, "problem processing %d event, bailing out\n", event->header.type); @@ -743,23 +752,40 @@ more: struct thread *thread = threads__findnew(event->mmap.pid); struct map *map = map__new(&event->mmap); + if (dump_trace) { + fprintf(stderr, "PERF_EVENT_MMAP: [%p(%p) @ %p]: %s\n", + (void *)event->mmap.start, + (void *)event->mmap.len, + (void *)event->mmap.pgoff, + event->mmap.filename); + } if (thread == NULL || map == NULL) { fprintf(stderr, "problem processing PERF_EVENT_MMAP, bailing out\n"); goto done; } thread__insert_map(thread, map); + total_mmap++; break; } case PERF_EVENT_COMM: { struct thread *thread = threads__findnew(event->comm.pid); + if (dump_trace) { + fprintf(stderr, "PERF_EVENT_COMM: %s:%d\n", + event->comm.comm, event->comm.pid); + } if (thread == NULL || thread__set_comm(thread, event->comm.comm)) { fprintf(stderr, "problem processing PERF_EVENT_COMM, bailing out\n"); goto done; } + total_comm++; break; } + default: { + fprintf(stderr, "skipping unknown header type: %d\n", + event->header.type); + } } if (offset + head < stat.st_size) @@ -768,6 +794,15 @@ more: rc = EXIT_SUCCESS; done: close(input); + + if (dump_trace) { + fprintf(stderr, " IP events: %10ld\n", total); + fprintf(stderr, " mmap events: %10ld\n", total_mmap); + fprintf(stderr, " comm events: %10ld\n", total_comm); + + return 0; + } + //dsos__fprintf(stdout); threads__fprintf(stdout); #if 0 @@ -796,6 +831,8 @@ static const char * const report_usage[] = { static const struct option options[] = { OPT_STRING('i', "input", &input_name, "file", "input file name"), + OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace, + "dump raw trace in ASCII"), OPT_END() }; -- cgit v1.2.3 From 3e70611460fe74ad32534fa9791774f6bbdd4159 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Tue, 26 May 2009 18:53:17 +0200 Subject: perf report: add counter for unknown events Add a counter for unknown event records. [ Impact: improve debugging ] Cc: Peter Zijlstra Cc: Mike Galbraith Cc: Paul Mackerras Cc: Corey Ashford Cc: Marcelo Tosatti Cc: Arnaldo Carvalho de Melo Cc: Thomas Gleixner Cc: John Kacur LKML-Reference: Signed-off-by: Ingo Molnar --- Documentation/perf_counter/builtin-report.c | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/Documentation/perf_counter/builtin-report.c b/Documentation/perf_counter/builtin-report.c index 8ea8aaa05af..4b5ccc5bd0e 100644 --- a/Documentation/perf_counter/builtin-report.c +++ b/Documentation/perf_counter/builtin-report.c @@ -645,7 +645,7 @@ static int __cmd_report(void) char *buf; event_t *event; int ret, rc = EXIT_FAILURE; - unsigned long total = 0, total_mmap = 0, total_comm = 0; + unsigned long total = 0, total_mmap = 0, total_comm = 0, total_unknown; input = open(input_name, O_RDONLY); if (input < 0) { @@ -785,6 +785,7 @@ more: default: { fprintf(stderr, "skipping unknown header type: %d\n", event->header.type); + total_unknown++; } } @@ -796,9 +797,10 @@ done: close(input); if (dump_trace) { - fprintf(stderr, " IP events: %10ld\n", total); - fprintf(stderr, " mmap events: %10ld\n", total_mmap); - fprintf(stderr, " comm events: %10ld\n", total_comm); + fprintf(stderr, " IP events: %10ld\n", total); + fprintf(stderr, " mmap events: %10ld\n", total_mmap); + fprintf(stderr, " comm events: %10ld\n", total_comm); + fprintf(stderr, " unknown events: %10ld\n", total_unknown); return 0; } -- cgit v1.2.3 From f49515b157e2d3ca3633eb0664fc46c42f6cb37e Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Tue, 26 May 2009 19:03:36 +0200 Subject: perf report: add more debugging Add the offset of the file we are analyzing, and the size of the record. In case of problems it's easier to see where the parser lost track. Cc: Peter Zijlstra Cc: Mike Galbraith Cc: Paul Mackerras Cc: Corey Ashford Cc: Marcelo Tosatti Cc: Arnaldo Carvalho de Melo Cc: Thomas Gleixner Cc: John Kacur Signed-off-by: Ingo Molnar --- Documentation/perf_counter/builtin-report.c | 22 +++++++++++++++------- 1 file changed, 15 insertions(+), 7 deletions(-) diff --git a/Documentation/perf_counter/builtin-report.c b/Documentation/perf_counter/builtin-report.c index 4b5ccc5bd0e..2d4e4cc655a 100644 --- a/Documentation/perf_counter/builtin-report.c +++ b/Documentation/perf_counter/builtin-report.c @@ -645,7 +645,7 @@ static int __cmd_report(void) char *buf; event_t *event; int ret, rc = EXIT_FAILURE; - unsigned long total = 0, total_mmap = 0, total_comm = 0, total_unknown; + unsigned long total = 0, total_mmap = 0, total_comm = 0, total_unknown = 0; input = open(input_name, O_RDONLY); if (input < 0) { @@ -699,8 +699,6 @@ more: goto done; } - head += event->header.size; - if (event->header.misc & PERF_EVENT_MISC_OVERFLOW) { char level; int show = 0; @@ -709,7 +707,9 @@ more: uint64_t ip = event->ip.ip; if (dump_trace) { - fprintf(stderr, "PERF_EVENT (IP, %d): %d: %p\n", + fprintf(stderr, "%p [%p]: PERF_EVENT (IP, %d): %d: %p\n", + (void *)(offset + head), + (void *)(long)(event->header.size), event->header.misc, event->ip.pid, (void *)event->ip.ip); @@ -753,7 +753,9 @@ more: struct map *map = map__new(&event->mmap); if (dump_trace) { - fprintf(stderr, "PERF_EVENT_MMAP: [%p(%p) @ %p]: %s\n", + fprintf(stderr, "%p [%p]: PERF_EVENT_MMAP: [%p(%p) @ %p]: %s\n", + (void *)(offset + head), + (void *)(long)(event->header.size), (void *)event->mmap.start, (void *)event->mmap.len, (void *)event->mmap.pgoff, @@ -771,7 +773,9 @@ more: struct thread *thread = threads__findnew(event->comm.pid); if (dump_trace) { - fprintf(stderr, "PERF_EVENT_COMM: %s:%d\n", + fprintf(stderr, "%p [%p]: PERF_EVENT_COMM: %s:%d\n", + (void *)(offset + head), + (void *)(long)(event->header.size), event->comm.comm, event->comm.pid); } if (thread == NULL || @@ -783,12 +787,16 @@ more: break; } default: { - fprintf(stderr, "skipping unknown header type: %d\n", + fprintf(stderr, "%p [%p]: skipping unknown header type: %d\n", + (void *)(offset + head), + (void *)(long)(event->header.size), event->header.type); total_unknown++; } } + head += event->header.size; + if (offset + head < stat.st_size) goto more; -- cgit v1.2.3 From 6142f9ec108a4ddbf0d5904c3daa5fdcaa618792 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Tue, 26 May 2009 20:51:47 +0200 Subject: perf report: More robust error handling Don't let funny events confuse us, stick to what we know and try to find sensible data again. If we find an unknown event, check we're still u64 aligned, and increment by one u64. This ensures we're bound to happen upon a valid event soon. Signed-off-by: Peter Zijlstra Cc: Mike Galbraith Cc: Paul Mackerras Cc: Corey Ashford Cc: Marcelo Tosatti Cc: Arnaldo Carvalho de Melo Cc: Thomas Gleixner Cc: John Kacur LKML-Reference: Signed-off-by: Ingo Molnar --- Documentation/perf_counter/builtin-report.c | 27 ++++++++++++++++++++------- 1 file changed, 20 insertions(+), 7 deletions(-) diff --git a/Documentation/perf_counter/builtin-report.c b/Documentation/perf_counter/builtin-report.c index 2d4e4cc655a..a58be7fee42 100644 --- a/Documentation/perf_counter/builtin-report.c +++ b/Documentation/perf_counter/builtin-report.c @@ -645,6 +645,7 @@ static int __cmd_report(void) char *buf; event_t *event; int ret, rc = EXIT_FAILURE; + uint32_t size; unsigned long total = 0, total_mmap = 0, total_comm = 0, total_unknown = 0; input = open(input_name, O_RDONLY); @@ -680,6 +681,10 @@ remap: more: event = (event_t *)(buf + head); + size = event->header.size; + if (!size) + size = 8; + if (head + event->header.size >= page_size * mmap_window) { unsigned long shift = page_size * (head / page_size); int ret; @@ -692,12 +697,9 @@ more: goto remap; } - - if (!event->header.size) { - fprintf(stderr, "zero-sized event at file offset %ld\n", offset + head); - fprintf(stderr, "skipping %ld bytes of events.\n", stat.st_size - offset - head); - goto done; - } + size = event->header.size; + if (!size) + goto broken_event; if (event->header.misc & PERF_EVENT_MISC_OVERFLOW) { char level; @@ -787,15 +789,26 @@ more: break; } default: { +broken_event: fprintf(stderr, "%p [%p]: skipping unknown header type: %d\n", (void *)(offset + head), (void *)(long)(event->header.size), event->header.type); total_unknown++; + + /* + * assume we lost track of the stream, check alignment, and + * increment a single u64 in the hope to catch on again 'soon'. + */ + + if (unlikely(head & 7)) + head &= ~7ULL; + + size = 8; } } - head += event->header.size; + head += size; if (offset + head < stat.st_size) goto more; -- cgit v1.2.3 From 3a4b8cc70b7473a0b9f26f5b4ddc6579b5e214be Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Tue, 26 May 2009 16:19:04 -0300 Subject: perf report: Sort output by symbol usage [acme@emilia ~]$ perf record find / > /dev/null 2>&1 [acme@emilia ~]$ perf stat perf report | head -20 4.95 find [k] 0xffffffff81393d65 _spin_lock 3.89 find [.] 0x000000000000af89 /usr/bin/find: 2.19 find [k] 0xffffffffa00518e0 ext3fs_dirhash 1.87 find [k] 0xffffffff810a6cea __rcu_read_lock 1.86 find [k] 0xffffffff811c7312 _atomic_dec_and_lock 1.86 find [.] 0x00000000000782ab /lib64/libc-2.5.so: __GI_strlen 1.85 find [k] 0xffffffff810fedfb __kmalloc 1.62 find [.] 0x00000000000430ff /lib64/libc-2.5.so: vfprintf 1.59 find [k] 0xffffffff810a6d6d __rcu_read_unlock 1.55 find [k] 0xffffffff81119395 __d_lookup 1.39 find [.] 0x0000000000071b40 /lib64/libc-2.5.so: _int_malloc 1.30 find [k] 0xffffffffa031c4fc nfs_do_filldir 1.21 find [k] 0xffffffff811876a5 avc_has_perm_noaudit 1.15 find [k] 0xffffffff810fef62 kmem_cache_alloc 1.07 find [k] 0xffffffff811d03fb copy_user_generic_string 1.03 find [k] 0xffffffffa0043882 ext3_htree_store_dirent 0.99 find [k] 0xffffffff81393ebb _spin_lock_bh 0.98 find [k] 0xffffffffa03319a2 nfs3_decode_dirent 0.97 find [k] 0xffffffff8100bf20 system_call 0.92 find [k] 0xffffffff8139437e _spin_unlock Performance counter stats for 'perf': 244.278972 task clock ticks (msecs) 8 context switches (events) 9 CPU migrations (events) 2104 pagefaults (events) 35329669 CPU cycles (events) (scaled from 75.40%) 13740366 instructions (events) (scaled from 75.49%) 59073 cache references (events) (scaled from 24.60%) 196 cache misses (events) (scaled from 24.51%) Wall-clock time elapsed: 246.060717 msecs [acme@emilia ~]$ [acme@emilia ~]$ grep "model name" /proc/cpuinfo | head -1 model name : Intel(R) Xeon(R) CPU E5405 @ 2.00GHz [acme@emilia ~]$ grep "model name" /proc/cpuinfo | wc -l 8 [acme@emilia ~]$ Signed-off-by: Arnaldo Carvalho de Melo Cc: Paul Mackerras Cc: Mike Galbraith Cc: Steven Rostedt Cc: Peter Zijlstra LKML-Reference: <20090526191904.GH4424@ghostprotocols.net> Signed-off-by: Ingo Molnar --- Documentation/perf_counter/builtin-report.c | 129 +++++++++++++++++++++------- 1 file changed, 99 insertions(+), 30 deletions(-) diff --git a/Documentation/perf_counter/builtin-report.c b/Documentation/perf_counter/builtin-report.c index a58be7fee42..a4c6ffa9650 100644 --- a/Documentation/perf_counter/builtin-report.c +++ b/Documentation/perf_counter/builtin-report.c @@ -447,26 +447,33 @@ static size_t map__fprintf(struct map *self, FILE *fp) self->start, self->end, self->pgoff, self->dso->name); } +struct thread; + +static const char *thread__name(struct thread *self, char *bf, size_t size); + struct symhist { struct rb_node rb_node; struct dso *dso; struct symbol *sym; + struct thread *thread; uint64_t ip; uint32_t count; char level; }; static struct symhist *symhist__new(struct symbol *sym, uint64_t ip, - struct dso *dso, char level) + struct thread *thread, struct dso *dso, + char level) { struct symhist *self = malloc(sizeof(*self)); if (self != NULL) { - self->sym = sym; - self->ip = ip; - self->dso = dso; - self->level = level; - self->count = 1; + self->sym = sym; + self->thread = thread; + self->ip = ip; + self->dso = dso; + self->level = level; + self->count = 1; } return self; @@ -482,17 +489,29 @@ static void symhist__inc(struct symhist *self) ++self->count; } -static size_t symhist__fprintf(struct symhist *self, FILE *fp) +static size_t +symhist__fprintf(struct symhist *self, uint64_t total_samples, FILE *fp) { - size_t ret = fprintf(fp, "%#llx [%c] ", (unsigned long long)self->ip, self->level); + char bf[32]; + size_t ret; + + if (total_samples) + ret = fprintf(fp, "%5.2f", (self->count * 100.0) / total_samples); + else + ret = fprintf(fp, "%12d", self->count); + + ret += fprintf(fp, "%14s [%c] %#018llx ", + thread__name(self->thread, bf, sizeof(bf)), + self->level, (unsigned long long)self->ip); if (self->level != '.') - ret += fprintf(fp, "%s", self->sym ? self->sym->name: ""); + ret += fprintf(fp, "%s\n", + self->sym ? self->sym->name : ""); else - ret += fprintf(fp, "%s: %s", + ret += fprintf(fp, "%s: %s\n", self->dso ? self->dso->name : "", self->sym ? self->sym->name : ""); - return ret + fprintf(fp, ": %u\n", self->count); + return ret; } struct thread { @@ -503,6 +522,15 @@ struct thread { char *comm; }; +static const char *thread__name(struct thread *self, char *bf, size_t size) +{ + if (self->comm) + return self->comm; + + snprintf(bf, sizeof(bf), ":%u", self->pid); + return bf; +} + static struct thread *thread__new(pid_t pid) { struct thread *self = malloc(sizeof(*self)); @@ -542,7 +570,7 @@ static int thread__symbol_incnew(struct thread *self, struct symbol *sym, p = &(*p)->rb_right; } - sh = symhist__new(sym, ip, dso, level); + sh = symhist__new(sym, ip, self, dso, level); if (sh == NULL) return -ENOMEM; rb_link_node(&sh->rb_node, parent, p); @@ -574,7 +602,7 @@ static size_t thread__fprintf(struct thread *self, FILE *fp) for (nd = rb_first(&self->symhists); nd; nd = rb_next(nd)) { struct symhist *pos = rb_entry(nd, struct symhist, rb_node); - ret += symhist__fprintf(pos, fp); + ret += symhist__fprintf(pos, 0, fp); } return ret; @@ -628,7 +656,7 @@ static struct map *thread__find_map(struct thread *self, uint64_t ip) return NULL; } -static void threads__fprintf(FILE *fp) +void threads__fprintf(FILE *fp) { struct rb_node *nd; for (nd = rb_first(&threads); nd; nd = rb_next(nd)) { @@ -637,6 +665,61 @@ static void threads__fprintf(FILE *fp) } } +static struct rb_root global_symhists = RB_ROOT; + +static void threads__insert_symhist(struct symhist *sh) +{ + struct rb_node **p = &global_symhists.rb_node; + struct rb_node *parent = NULL; + struct symhist *iter; + + while (*p != NULL) { + parent = *p; + iter = rb_entry(parent, struct symhist, rb_node); + + /* Reverse order */ + if (sh->count > iter->count) + p = &(*p)->rb_left; + else + p = &(*p)->rb_right; + } + + rb_link_node(&sh->rb_node, parent, p); + rb_insert_color(&sh->rb_node, &global_symhists); +} + +static void threads__sort_symhists(void) +{ + struct rb_node *nd; + + for (nd = rb_first(&threads); nd; nd = rb_next(nd)) { + struct thread *thread = rb_entry(nd, struct thread, rb_node); + struct rb_node *next = rb_first(&thread->symhists); + + while (next) { + struct symhist *n = rb_entry(next, struct symhist, + rb_node); + next = rb_next(&n->rb_node); + rb_erase(&n->rb_node, &thread->symhists); + threads__insert_symhist(n); + } + + } +} + +static size_t threads__symhists_fprintf(uint64_t total_samples, FILE *fp) +{ + struct rb_node *nd; + size_t ret = 0; + + for (nd = rb_first(&global_symhists); nd; nd = rb_next(nd)) { + struct symhist *pos = rb_entry(nd, struct symhist, rb_node); + ret += symhist__fprintf(pos, total_samples, fp); + } + + return ret; +} + static int __cmd_report(void) { unsigned long offset = 0; @@ -826,23 +909,9 @@ done: return 0; } - //dsos__fprintf(stdout); - threads__fprintf(stdout); -#if 0 - std::map::iterator hi = hist.begin(); - - while (hi != hist.end()) { - rev_hist.insert(std::pair(hi->second, hi->first)); - hist.erase(hi++); - } - - std::multimap::const_iterator ri = rev_hist.begin(); + threads__sort_symhists(); + threads__symhists_fprintf(total, stdout); - while (ri != rev_hist.end()) { - printf(" %5.2f %s\n", (100.0 * ri->first)/total, ri->second.c_str()); - ri++; - } -#endif return rc; } -- cgit v1.2.3 From d8d1656ee15d3085e0085a87e70f9093a0a102c5 Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Tue, 26 May 2009 19:20:57 -0300 Subject: perf report: Use hex2long instead of sscanf Before: [acme@emilia ~]$ perf record -o perf_report.perf perf stat perf report > /dev/null Performance counter stats for 'perf': 245.414985 task clock ticks (msecs) 6 context switches (events) 6 CPU migrations (events) 2108 pagefaults (events) 37493013 CPU cycles (events) (scaled from 67.04%) 13576789 instructions (events) (scaled from 66.76%) 57931 cache references (events) (scaled from 21.96%) 12263 cache misses (events) (scaled from 21.98%) Wall-clock time elapsed: 246.575587 msecs [acme@emilia ~]$ perf report -i perf_report.perf | head 12.15 perf [.] 0x000000000005432a /lib64/libc-2.5.so: _IO_vfscanf_internal 9.38 perf [k] 0xffffffff8101b1d2 intel_pmu_enable_all 8.53 perf [.] 0x00000000000056b8 /home/acme/git/linux-2.6-tip/Documentation/perf_counter/perf: dso__insert_symbol 6.61 perf [.] 0x00000000000057cb /home/acme/git/linux-2.6-tip/Documentation/perf_counter/perf: dso__find_symbol 5.33 perf [k] 0xffffffff811ce082 number 4.69 perf [.] 0x0000000000034829 /lib64/libc-2.5.so: ____strtoull_l_internal 4.48 perf [.] 0x0000000000006505 /home/acme/git/linux-2.6-tip/Documentation/perf_counter/perf: thread__symbol_incnew 3.41 perf [.] 0x000000000000fce6 /home/acme/git/linux-2.6-tip/Documentation/perf_counter/perf: rb_insert_color 3.20 perf [k] 0xffffffff811cfc01 vsnprintf 2.99 perf [k] 0xffffffff811ce5e8 format_decode After: [acme@emilia ~]$ perf record -o perf_report.perf perf stat perf report > /dev/null Performance counter stats for 'perf': 218.186805 task clock ticks (msecs) 4 context switches (events) 7 CPU migrations (events) 2133 pagefaults (events) 32735365 CPU cycles (events) (scaled from 67.04%) 11952309 instructions (events) (scaled from 66.26%) 50314 cache references (events) (scaled from 21.96%) 13228 cache misses (events) (scaled from 21.98%) Wall-clock time elapsed: 218.810451 msecs [acme@emilia ~]$ perf report -i perf_report.perf | head 10.68 perf [.] 0x000000000000578d /home/acme/git/linux-2.6-tip/Documentation/perf_counter/perf: dso__find_symbol 9.62 perf [.] 0x00000000000065f7 /home/acme/git/linux-2.6-tip/Documentation/perf_counter/perf: thread__symbol_incnew 9.40 perf [.] 0x00000000000056b4 /home/acme/git/linux-2.6-tip/Documentation/perf_counter/perf: dso__insert_symbol 9.19 perf [k] 0xffffffff8101b1d2 intel_pmu_enable_all 5.13 perf [.] 0x0000000000005ec7 /home/acme/git/linux-2.6-tip/Documentation/perf_counter/perf: hex2long 4.49 perf [k] 0xffffffff81083808 kallsyms_expand_symbol 3.85 perf [k] 0xffffffff811ce2c1 number 3.63 perf [.] 0x0000000000005e81 /home/acme/git/linux-2.6-tip/Documentation/perf_counter/perf: hex 2.99 perf [.] 0x000000000000fd5b /home/acme/git/linux-2.6-tip/Documentation/perf_counter/perf: rb_insert_color 2.99 perf [k] 0xffffffff811cf251 string [acme@emilia ~]$ [ Impact: optimization ] Signed-off-by: Arnaldo Carvalho de Melo Cc: Peter Zijlstra Cc: Mike Galbraith Cc: Paul Mackerras Cc: Corey Ashford Cc: Marcelo Tosatti Cc: Thomas Gleixner Cc: John Kacur Cc: Steven Rostedt LKML-Reference: <20090526222057.GI4424@ghostprotocols.net> Signed-off-by: Ingo Molnar --- Documentation/perf_counter/builtin-report.c | 63 +++++++++++++++++++++++------ 1 file changed, 50 insertions(+), 13 deletions(-) diff --git a/Documentation/perf_counter/builtin-report.c b/Documentation/perf_counter/builtin-report.c index a4c6ffa9650..c517483fd61 100644 --- a/Documentation/perf_counter/builtin-report.c +++ b/Documentation/perf_counter/builtin-report.c @@ -348,6 +348,39 @@ void dsos__fprintf(FILE *fp) dso__fprintf(pos, fp); } +static int hex(char ch) +{ + if ((ch >= '0') && (ch <= '9')) + return ch - '0'; + if ((ch >= 'a') && (ch <= 'f')) + return ch - 'a' + 10; + if ((ch >= 'A') && (ch <= 'F')) + return ch - 'A' + 10; + return -1; +} + +/* + * While we find nice hex chars, build a long_val. + * Return number of chars processed. + */ +int hex2long(char *ptr, unsigned long *long_val) +{ + const char *p = ptr; + *long_val = 0; + + while (*p) { + const int hex_val = hex(*p); + + if (hex_val < 0) + break; + + *long_val = (*long_val << 4) | hex_val; + p++; + } + + return p - ptr; +} + static int load_kallsyms(void) { kernel_dso = dso__new("[kernel]"); @@ -363,26 +396,30 @@ static int load_kallsyms(void) size_t n; while (!feof(file)) { - unsigned long long start; - char c, symbf[4096]; - - if (getline(&line, &n, file) < 0) + unsigned long start; + int line_len = getline(&line, &n, file); + if (line_len < 0) break; if (!line) goto out_delete_dso; - if (sscanf(line, "%llx %c %s", &start, &c, symbf) == 3) { - /* - * Well fix up the end later, when we have all sorted. - */ - struct symbol *sym = symbol__new(start, 0xdead, symbf); + line[--line_len] = '\0'; /* \n */ + + int len = hex2long(line, &start); + + len += 3; /* ' t ' */ + if (len >= line_len) + continue; + /* + * Well fix up the end later, when we have all sorted. + */ + struct symbol *sym = symbol__new(start, 0xdead, line + len); - if (sym == NULL) - goto out_delete_dso; + if (sym == NULL) + goto out_delete_dso; - dso__insert_symbol(kernel_dso, sym); - } + dso__insert_symbol(kernel_dso, sym); } /* -- cgit v1.2.3 From 03f6316d32738ec5eda2e6f628c12d1c01e61a87 Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Tue, 26 May 2009 19:21:55 -0300 Subject: perf report: Only load text symbols from kallsyms Just like we do for userspace when reading the symtab, reducing the number of entries we insert on the symbols rbtree. Before: [acme@emilia ~]$ rm -f perf_report.perf ; perf record -o perf_report.perf perf stat perf report > /dev/null Performance counter stats for 'perf': 218.138382 task clock ticks (msecs) 4 context switches (events) 8 CPU migrations (events) 2136 pagefaults (events) 32746212 CPU cycles (events) (scaled from 67.04%) 11961102 instructions (events) (scaled from 66.19%) 49841 cache references (events) (scaled from 21.96%) 13777 cache misses (events) (scaled from 21.98%) Wall-clock time elapsed: 218.702477 msecs [acme@emilia ~]$ perf report -i perf_report.perf | head 11.06 perf [.] 0x00000000000057cb /home/acme/git/linux-2.6-tip/Documentation/perf_counter/perf: dso__find_symbol 9.15 perf [.] 0x00000000000056a0 /home/acme/git/linux-2.6-tip/Documentation/perf_counter/perf: dso__insert_symbol 8.72 perf [k] 0xffffffff8101b1d2 intel_pmu_enable_all 8.51 perf [.] 0x0000000000006672 /home/acme/git/linux-2.6-tip/Documentation/perf_counter/perf: thread__symbol_incnew 3.83 perf [k] 0xffffffff811cfc5a vsnprintf 3.40 perf [.] 0x0000000000005e33 /home/acme/git/linux-2.6-tip/Documentation/perf_counter/perf: hex 3.40 perf [.] 0x0000000000005ec7 /home/acme/git/linux-2.6-tip/Documentation/perf_counter/perf: hex2long 3.19 perf [k] 0xffffffff811ce1c1 number 2.77 perf [.] 0x0000000000006869 /home/acme/git/linux-2.6-tip/Documentation/perf_counter/perf: threads__findnew 2.77 perf [.] 0x000000000000fde3 /home/acme/git/linux-2.6-tip/Documentation/perf_counter/perf: rb_insert_color [acme@emilia ~]$ After: acme@emilia ~]$ rm -f perf_report.perf ; perf record -o perf_report.perf perf stat perf report > /dev/null Performance counter stats for 'perf': 190.228511 task clock ticks (msecs) 4 context switches (events) 7 CPU migrations (events) 1625 pagefaults (events) 29578745 CPU cycles (events) (scaled from 66.92%) 10516914 instructions (events) (scaled from 66.47%) 44015 cache references (events) (scaled from 22.04%) 8248 cache misses (events) (scaled from 22.07%) Wall-clock time elapsed: 190.816096 msecs [acme@emilia ~]$ perf report -i perf_report.perf | head 15.99 perf [.] 0x00000000000057a9 /home/acme/git/linux-2.6-tip/Documentation/perf_counter/perf: dso__find_symbol 10.87 perf [.] 0x000000000000674d /home/acme/git/linux-2.6-tip/Documentation/perf_counter/perf: thread__symbol_incnew 8.74 perf [k] 0xffffffff8101b1d2 intel_pmu_enable_all 5.54 perf [.] 0x0000000000005e42 /home/acme/git/linux-2.6-tip/Documentation/perf_counter/perf: hex 4.48 perf [.] 0x0000000000005ebe /home/acme/git/linux-2.6-tip/Documentation/perf_counter/perf: hex2long 4.48 perf [k] 0xffffffff811cfba0 vsnprintf 3.84 perf [.] 0x00000000000056b4 /home/acme/git/linux-2.6-tip/Documentation/perf_counter/perf: dso__insert_symbol 3.62 perf [.] 0x00000000000068d0 /home/acme/git/linux-2.6-tip/Documentation/perf_counter/perf: threads__findnew 3.20 perf [k] 0xffffffff811ce0b3 number 2.56 perf [.] 0x0000000000006d78 /home/acme/git/linux-2.6-tip/Documentation/perf_counter/perf: __cmd_report [acme@emilia ~]$ [ Impact: optimization ] Signed-off-by: Arnaldo Carvalho de Melo Cc: Peter Zijlstra Cc: Mike Galbraith Cc: Paul Mackerras Cc: Corey Ashford Cc: Marcelo Tosatti Cc: Arnaldo Carvalho de Melo Cc: John Kacur Cc: Steven Rostedt LKML-Reference: <20090526222155.GJ4424@ghostprotocols.net> Signed-off-by: Ingo Molnar --- Documentation/perf_counter/builtin-report.c | 14 +++++++++++--- 1 file changed, 11 insertions(+), 3 deletions(-) diff --git a/Documentation/perf_counter/builtin-report.c b/Documentation/perf_counter/builtin-report.c index c517483fd61..a55f15d7651 100644 --- a/Documentation/perf_counter/builtin-report.c +++ b/Documentation/perf_counter/builtin-report.c @@ -3,6 +3,7 @@ #include #include #include +#include #include "util/list.h" #include "util/rbtree.h" @@ -408,13 +409,20 @@ static int load_kallsyms(void) int len = hex2long(line, &start); - len += 3; /* ' t ' */ - if (len >= line_len) + len++; + if (len + 2 >= line_len) + continue; + + char symbol_type = line[len]; + /* + * We're interested only in code ('T'ext) + */ + if (toupper(symbol_type) != 'T') continue; /* * Well fix up the end later, when we have all sorted. */ - struct symbol *sym = symbol__new(start, 0xdead, line + len); + struct symbol *sym = symbol__new(start, 0xdead, line + len + 2); if (sym == NULL) goto out_delete_dso; -- cgit v1.2.3 From af83632f98aefd1ae4d8ca3c7c285ccf6a7d3956 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Wed, 27 May 2009 08:38:48 +0200 Subject: perf report: Only load text symbols from kallsyms, fix - allow 'W' symbols too - Convert initializations to C99 style - whitespace cleanups Cc: Arnaldo Carvalho de Melo Cc: Peter Zijlstra Cc: Mike Galbraith Cc: Paul Mackerras Cc: Corey Ashford Cc: Marcelo Tosatti Cc: Arnaldo Carvalho de Melo Cc: John Kacur Cc: Steven Rostedt LKML-Reference: <20090526222155.GJ4424@ghostprotocols.net> Signed-off-by: Ingo Molnar --- Documentation/perf_counter/builtin-report.c | 30 +++++++++++++++++------------ 1 file changed, 18 insertions(+), 12 deletions(-) diff --git a/Documentation/perf_counter/builtin-report.c b/Documentation/perf_counter/builtin-report.c index a55f15d7651..ed3da9d6198 100644 --- a/Documentation/perf_counter/builtin-report.c +++ b/Documentation/perf_counter/builtin-report.c @@ -384,21 +384,26 @@ int hex2long(char *ptr, unsigned long *long_val) static int load_kallsyms(void) { + struct rb_node *nd, *prevnd; + char *line = NULL; + FILE *file; + size_t n; + kernel_dso = dso__new("[kernel]"); if (kernel_dso == NULL) return -1; - FILE *file = fopen("/proc/kallsyms", "r"); - + file = fopen("/proc/kallsyms", "r"); if (file == NULL) goto out_delete_dso; - char *line = NULL; - size_t n; - while (!feof(file)) { unsigned long start; - int line_len = getline(&line, &n, file); + struct symbol *sym; + int line_len, len; + char symbol_type; + + line_len = getline(&line, &n, file); if (line_len < 0) break; @@ -407,22 +412,22 @@ static int load_kallsyms(void) line[--line_len] = '\0'; /* \n */ - int len = hex2long(line, &start); - + len = hex2long(line, &start); + len++; if (len + 2 >= line_len) continue; - char symbol_type = line[len]; + symbol_type = toupper(line[len]); /* * We're interested only in code ('T'ext) */ - if (toupper(symbol_type) != 'T') + if (symbol_type != 'T' && symbol_type != 'W') continue; /* * Well fix up the end later, when we have all sorted. */ - struct symbol *sym = symbol__new(start, 0xdead, line + len + 2); + sym = symbol__new(start, 0xdead, line + len + 2); if (sym == NULL) goto out_delete_dso; @@ -434,7 +439,7 @@ static int load_kallsyms(void) * Now that we have all sorted out, just set the ->end of all * symbols */ - struct rb_node *nd, *prevnd = rb_first(&kernel_dso->syms); + prevnd = rb_first(&kernel_dso->syms); if (prevnd == NULL) goto out_delete_line; @@ -450,6 +455,7 @@ static int load_kallsyms(void) dsos__add(kernel_dso); free(line); fclose(file); + return 0; out_delete_line: -- cgit v1.2.3 From 815e777f913ed54ddb449d2854015c65b4ecbfe3 Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Tue, 26 May 2009 19:46:14 -0300 Subject: perf report: Show the IP only in --verbose mode perf: report should show the IP only in --verbose mode [acme@emilia ~]$ perf report | head 4.95 find [k] _spin_lock 2.19 find [k] ext3fs_dirhash [ext3] 1.87 find [k] __rcu_read_lock 1.86 find [k] _atomic_dec_and_lock 1.86 find [.] /lib64/libc-2.5.so: __GI_strlen 1.85 find [k] __kmalloc 1.62 find [.] /lib64/libc-2.5.so: vfprintf 1.59 find [k] __rcu_read_unlock 1.55 find [k] __d_lookup Signed-off-by: Arnaldo Carvalho de Melo Cc: Peter Zijlstra Cc: Mike Galbraith Cc: Paul Mackerras Cc: Corey Ashford Cc: Marcelo Tosatti Cc: Arnaldo Carvalho de Melo Cc: John Kacur Cc: Steven Rostedt LKML-Reference: <20090526224614.GK4424@ghostprotocols.net> Signed-off-by: Ingo Molnar --- Documentation/perf_counter/builtin-report.c | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/Documentation/perf_counter/builtin-report.c b/Documentation/perf_counter/builtin-report.c index ed3da9d6198..2d65d9c12aa 100644 --- a/Documentation/perf_counter/builtin-report.c +++ b/Documentation/perf_counter/builtin-report.c @@ -22,6 +22,7 @@ static int input; static int show_mask = SHOW_KERNEL | SHOW_USER | SHOW_HV; static int dump_trace = 0; +static int verbose; static unsigned long page_size; static unsigned long mmap_window = 32; @@ -551,9 +552,12 @@ symhist__fprintf(struct symhist *self, uint64_t total_samples, FILE *fp) else ret = fprintf(fp, "%12d", self->count); - ret += fprintf(fp, "%14s [%c] %#018llx ", + ret += fprintf(fp, "%14s [%c] ", thread__name(self->thread, bf, sizeof(bf)), - self->level, (unsigned long long)self->ip); + self->level); + + if (verbose) + ret += fprintf(fp, "%#018llx ", (unsigned long long)self->ip); if (self->level != '.') ret += fprintf(fp, "%s\n", @@ -974,6 +978,8 @@ static const char * const report_usage[] = { static const struct option options[] = { OPT_STRING('i', "input", &input_name, "file", "input file name"), + OPT_BOOLEAN('v', "verbose", &verbose, + "be more verbose (show symbol address, etc)"), OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace, "dump raw trace in ASCII"), OPT_END() -- cgit v1.2.3 From 16f762a2ac5ecf8a11f6f0332e46cc3459220da5 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Wed, 27 May 2009 09:10:38 +0200 Subject: perf_counter tools: Introduce stricter C code checking Tighten up our C code requirements: - disallow warnings - disallow declarations-mixed-with-statements - require proper prototypes - require C99 (with gcc extensions) Fix up a ton of problems these measures unearth: - unused functions - needlessly global functions - missing prototypes - code mixed with declarations Cc: Arnaldo Carvalho de Melo Cc: Peter Zijlstra Cc: Mike Galbraith Cc: Paul Mackerras Cc: Corey Ashford Cc: Marcelo Tosatti Cc: Arnaldo Carvalho de Melo Cc: John Kacur Cc: Steven Rostedt LKML-Reference: <20090526222155.GJ4424@ghostprotocols.net> Signed-off-by: Ingo Molnar --- Documentation/perf_counter/Makefile | 2 +- Documentation/perf_counter/builtin-help.c | 2 +- Documentation/perf_counter/builtin-record.c | 38 +++++----- Documentation/perf_counter/builtin-report.c | 103 ++++++++++++++-------------- Documentation/perf_counter/builtin-stat.c | 3 +- Documentation/perf_counter/builtin-top.c | 2 +- Documentation/perf_counter/util/abspath.c | 2 +- Documentation/perf_counter/util/cache.h | 2 + Documentation/perf_counter/util/util.h | 2 + 9 files changed, 82 insertions(+), 74 deletions(-) diff --git a/Documentation/perf_counter/Makefile b/Documentation/perf_counter/Makefile index 10c13a6f2bc..efb05892db6 100644 --- a/Documentation/perf_counter/Makefile +++ b/Documentation/perf_counter/Makefile @@ -159,7 +159,7 @@ uname_V := $(shell sh -c 'uname -v 2>/dev/null || echo not') # CFLAGS and LDFLAGS are for the users to override from the command line. -CFLAGS = -ggdb3 -Wall +CFLAGS = -ggdb3 -Wall -Werror -Wstrict-prototypes -Wmissing-declarations -Wmissing-prototypes -std=gnu99 -Wdeclaration-after-statement LDFLAGS = -lpthread -lrt -lelf ALL_CFLAGS = $(CFLAGS) ALL_LDFLAGS = $(LDFLAGS) diff --git a/Documentation/perf_counter/builtin-help.c b/Documentation/perf_counter/builtin-help.c index 6616de0ef05..d2bd3177b98 100644 --- a/Documentation/perf_counter/builtin-help.c +++ b/Documentation/perf_counter/builtin-help.c @@ -399,7 +399,7 @@ static void get_html_page_path(struct strbuf *page_path, const char *page) * HTML. */ #ifndef open_html -void open_html(const char *path) +static void open_html(const char *path) { execl_perf_cmd("web--browse", "-c", "help.browser", path, NULL); } diff --git a/Documentation/perf_counter/builtin-record.c b/Documentation/perf_counter/builtin-record.c index ec2b787b23b..68abfdf71d3 100644 --- a/Documentation/perf_counter/builtin-record.c +++ b/Documentation/perf_counter/builtin-record.c @@ -1,6 +1,7 @@ #include "perf.h" +#include "builtin.h" #include "util/util.h" #include "util/parse-options.h" #include "util/parse-events.h" @@ -144,26 +145,32 @@ static int nr_poll; static int nr_cpu; struct mmap_event { - struct perf_event_header header; - __u32 pid, tid; - __u64 start; - __u64 len; - __u64 pgoff; - char filename[PATH_MAX]; + struct perf_event_header header; + __u32 pid; + __u32 tid; + __u64 start; + __u64 len; + __u64 pgoff; + char filename[PATH_MAX]; }; + struct comm_event { - struct perf_event_header header; - __u32 pid,tid; - char comm[16]; + struct perf_event_header header; + __u32 pid; + __u32 tid; + char comm[16]; }; static pid_t pid_synthesize_comm_event(pid_t pid) { + struct comm_event comm_ev; char filename[PATH_MAX]; + pid_t spid, ppid; char bf[BUFSIZ]; - struct comm_event comm_ev; + int fd, nr, ret; + char comm[18]; size_t size; - int fd; + char state; snprintf(filename, sizeof(filename), "/proc/%d/stat", pid); @@ -178,12 +185,8 @@ static pid_t pid_synthesize_comm_event(pid_t pid) } close(fd); - pid_t spid, ppid; - char state; - char comm[18]; - memset(&comm_ev, 0, sizeof(comm_ev)); - int nr = sscanf(bf, "%d %s %c %d %d ", + nr = sscanf(bf, "%d %s %c %d %d ", &spid, comm, &state, &ppid, &comm_ev.pid); if (nr != 5) { fprintf(stderr, "couldn't get COMM and pgid, malformed %s\n", @@ -198,7 +201,8 @@ static pid_t pid_synthesize_comm_event(pid_t pid) memcpy(comm_ev.comm, comm + 1, size); size = ALIGN(size, sizeof(uint64_t)); comm_ev.header.size = sizeof(comm_ev) - (sizeof(comm_ev.comm) - size); - int ret = write(output, &comm_ev, comm_ev.header.size); + + ret = write(output, &comm_ev, comm_ev.header.size); if (ret < 0) { perror("failed to write"); exit(-1); diff --git a/Documentation/perf_counter/builtin-report.c b/Documentation/perf_counter/builtin-report.c index 2d65d9c12aa..7f1255dcd22 100644 --- a/Documentation/perf_counter/builtin-report.c +++ b/Documentation/perf_counter/builtin-report.c @@ -1,4 +1,5 @@ #include "util/util.h" +#include "builtin.h" #include #include @@ -22,7 +23,7 @@ static int input; static int show_mask = SHOW_KERNEL | SHOW_USER | SHOW_HV; static int dump_trace = 0; -static int verbose; +static int verbose; static unsigned long page_size; static unsigned long mmap_window = 32; @@ -60,10 +61,10 @@ typedef union event_union { } event_t; struct symbol { - struct rb_node rb_node; - uint64_t start; - uint64_t end; - char name[0]; + struct rb_node rb_node; + __u64 start; + __u64 end; + char name[0]; }; static struct symbol *symbol__new(uint64_t start, uint64_t len, const char *name) @@ -86,7 +87,7 @@ static void symbol__delete(struct symbol *self) static size_t symbol__fprintf(struct symbol *self, FILE *fp) { - return fprintf(fp, " %lx-%lx %s\n", + return fprintf(fp, " %llx-%llx %s\n", self->start, self->end, self->name); } @@ -147,10 +148,12 @@ static void dso__insert_symbol(struct dso *self, struct symbol *sym) static struct symbol *dso__find_symbol(struct dso *self, uint64_t ip) { + struct rb_node *n; + if (self == NULL) return NULL; - struct rb_node *n = self->syms.rb_node; + n = self->syms.rb_node; while (n) { struct symbol *s = rb_entry(n, struct symbol, rb_node); @@ -221,33 +224,42 @@ static Elf_Scn *elf_section_by_name(Elf *elf, GElf_Ehdr *ep, static int dso__load(struct dso *self) { - int fd = open(self->name, O_RDONLY), err = -1; + Elf_Data *symstrs; + uint32_t nr_syms; + int fd, err = -1; + uint32_t index; + GElf_Ehdr ehdr; + GElf_Shdr shdr; + Elf_Data *syms; + GElf_Sym sym; + Elf_Scn *sec; + Elf *elf; + + fd = open(self->name, O_RDONLY); if (fd == -1) return -1; - Elf *elf = elf_begin(fd, ELF_C_READ_MMAP, NULL); + elf = elf_begin(fd, ELF_C_READ_MMAP, NULL); if (elf == NULL) { fprintf(stderr, "%s: cannot read %s ELF file.\n", __func__, self->name); goto out_close; } - GElf_Ehdr ehdr; if (gelf_getehdr(elf, &ehdr) == NULL) { fprintf(stderr, "%s: cannot get elf header.\n", __func__); goto out_elf_end; } - GElf_Shdr shdr; - Elf_Scn *sec = elf_section_by_name(elf, &ehdr, &shdr, ".symtab", NULL); + sec = elf_section_by_name(elf, &ehdr, &shdr, ".symtab", NULL); if (sec == NULL) sec = elf_section_by_name(elf, &ehdr, &shdr, ".dynsym", NULL); if (sec == NULL) goto out_elf_end; - Elf_Data *syms = elf_getdata(sec, NULL); + syms = elf_getdata(sec, NULL); if (syms == NULL) goto out_elf_end; @@ -255,14 +267,12 @@ static int dso__load(struct dso *self) if (sec == NULL) goto out_elf_end; - Elf_Data *symstrs = elf_getdata(sec, NULL); + symstrs = elf_getdata(sec, NULL); if (symstrs == NULL) goto out_elf_end; - const uint32_t nr_syms = shdr.sh_size / shdr.sh_entsize; + nr_syms = shdr.sh_size / shdr.sh_entsize; - GElf_Sym sym; - uint32_t index; elf_symtab__for_each_symbol(syms, nr_syms, index, sym) { struct symbol *f; @@ -342,7 +352,7 @@ out_delete_dso: return NULL; } -void dsos__fprintf(FILE *fp) +static void dsos__fprintf(FILE *fp) { struct dso *pos; @@ -365,7 +375,7 @@ static int hex(char ch) * While we find nice hex chars, build a long_val. * Return number of chars processed. */ -int hex2long(char *ptr, unsigned long *long_val) +static int hex2long(char *ptr, unsigned long *long_val) { const char *p = ptr; *long_val = 0; @@ -493,12 +503,6 @@ out_delete: return NULL; } -static size_t map__fprintf(struct map *self, FILE *fp) -{ - return fprintf(fp, " %lx-%lx %lx %s\n", - self->start, self->end, self->pgoff, self->dso->name); -} - struct thread; static const char *thread__name(struct thread *self, char *bf, size_t size); @@ -531,11 +535,6 @@ static struct symhist *symhist__new(struct symbol *sym, uint64_t ip, return self; } -void symhist__delete(struct symhist *self) -{ - free(self); -} - static void symhist__inc(struct symhist *self) { ++self->count; @@ -608,6 +607,8 @@ static int thread__symbol_incnew(struct thread *self, struct symbol *sym, struct symhist *sh; while (*p != NULL) { + uint64_t start; + parent = *p; sh = rb_entry(parent, struct symhist, rb_node); @@ -617,7 +618,7 @@ static int thread__symbol_incnew(struct thread *self, struct symbol *sym, } /* Handle unresolved symbols too */ - const uint64_t start = !sh->sym ? sh->ip : sh->sym->start; + start = !sh->sym ? sh->ip : sh->sym->start; if (ip < start) p = &(*p)->rb_left; @@ -639,17 +640,6 @@ static int thread__set_comm(struct thread *self, const char *comm) return self->comm ? 0 : -ENOMEM; } -size_t thread__maps_fprintf(struct thread *self, FILE *fp) -{ - struct map *pos; - size_t ret = 0; - - list_for_each_entry(pos, &self->maps, node) - ret += map__fprintf(pos, fp); - - return ret; -} - static size_t thread__fprintf(struct thread *self, FILE *fp) { int ret = fprintf(fp, "thread: %d %s\n", self->pid, self->comm); @@ -657,13 +647,14 @@ static size_t thread__fprintf(struct thread *self, FILE *fp) for (nd = rb_first(&self->symhists); nd; nd = rb_next(nd)) { struct symhist *pos = rb_entry(nd, struct symhist, rb_node); + ret += symhist__fprintf(pos, 0, fp); } return ret; } -static struct rb_root threads = RB_ROOT; +static struct rb_root threads; static struct thread *threads__findnew(pid_t pid) { @@ -699,11 +690,11 @@ static void thread__insert_map(struct thread *self, struct map *map) static struct map *thread__find_map(struct thread *self, uint64_t ip) { + struct map *pos; + if (self == NULL) return NULL; - struct map *pos; - list_for_each_entry(pos, &self->maps, node) if (ip >= pos->start && ip <= pos->end) return pos; @@ -711,7 +702,7 @@ static struct map *thread__find_map(struct thread *self, uint64_t ip) return NULL; } -void threads__fprintf(FILE *fp) +static void threads__fprintf(FILE *fp) { struct rb_node *nd; for (nd = rb_first(&threads); nd; nd = rb_next(nd)) { @@ -720,7 +711,7 @@ void threads__fprintf(FILE *fp) } } -static struct rb_root global_symhists = RB_ROOT; +static struct rb_root global_symhists; static void threads__insert_symhist(struct symhist *sh) { @@ -852,7 +843,7 @@ more: (void *)(long)(event->header.size), event->header.misc, event->ip.pid, - (void *)event->ip.ip); + (void *)(long)ip); } if (thread == NULL) { @@ -866,9 +857,12 @@ more: level = 'k'; dso = kernel_dso; } else if (event->header.misc & PERF_EVENT_MISC_USER) { + struct map *map; + show = SHOW_USER; level = '.'; - struct map *map = thread__find_map(thread, ip); + + map = thread__find_map(thread, ip); if (map != NULL) { dso = map->dso; ip -= map->start + map->pgoff; @@ -896,9 +890,9 @@ more: fprintf(stderr, "%p [%p]: PERF_EVENT_MMAP: [%p(%p) @ %p]: %s\n", (void *)(offset + head), (void *)(long)(event->header.size), - (void *)event->mmap.start, - (void *)event->mmap.len, - (void *)event->mmap.pgoff, + (void *)(long)event->mmap.start, + (void *)(long)event->mmap.len, + (void *)(long)event->mmap.pgoff, event->mmap.filename); } if (thread == NULL || map == NULL) { @@ -964,6 +958,11 @@ done: return 0; } + if (verbose >= 2) { + dsos__fprintf(stdout); + threads__fprintf(stdout); + } + threads__sort_symhists(); threads__symhists_fprintf(total, stdout); diff --git a/Documentation/perf_counter/builtin-stat.c b/Documentation/perf_counter/builtin-stat.c index e7cb9412212..ce661e2fa8d 100644 --- a/Documentation/perf_counter/builtin-stat.c +++ b/Documentation/perf_counter/builtin-stat.c @@ -30,6 +30,7 @@ */ #include "perf.h" +#include "builtin.h" #include "util/util.h" #include "util/parse-options.h" #include "util/parse-events.h" @@ -108,7 +109,7 @@ static void create_perfstat_counter(int counter) } } -int do_perfstat(int argc, const char **argv) +static int do_perfstat(int argc, const char **argv) { unsigned long long t0, t1; int counter; diff --git a/Documentation/perf_counter/builtin-top.c b/Documentation/perf_counter/builtin-top.c index 6b1c66f99e4..a890872638c 100644 --- a/Documentation/perf_counter/builtin-top.c +++ b/Documentation/perf_counter/builtin-top.c @@ -42,8 +42,8 @@ * Released under the GPL v2. (and only v2, not any later version) */ - #include "perf.h" +#include "builtin.h" #include "util/util.h" #include "util/util.h" #include "util/parse-options.h" diff --git a/Documentation/perf_counter/util/abspath.c b/Documentation/perf_counter/util/abspath.c index 649f34f8336..61d33b81fc9 100644 --- a/Documentation/perf_counter/util/abspath.c +++ b/Documentation/perf_counter/util/abspath.c @@ -5,7 +5,7 @@ * symlink to a directory, we do not want to say it is a directory when * dealing with tracked content in the working tree. */ -int is_directory(const char *path) +static int is_directory(const char *path) { struct stat st; return (!stat(path, &st) && S_ISDIR(st.st_mode)); diff --git a/Documentation/perf_counter/util/cache.h b/Documentation/perf_counter/util/cache.h index 71080512fa8..393d6146d13 100644 --- a/Documentation/perf_counter/util/cache.h +++ b/Documentation/perf_counter/util/cache.h @@ -104,6 +104,8 @@ char *strip_path_suffix(const char *path, const char *suffix); extern char *mkpath(const char *fmt, ...) __attribute__((format (printf, 1, 2))); extern char *perf_path(const char *fmt, ...) __attribute__((format (printf, 1, 2))); +/* perf_mkstemp() - create tmp file honoring TMPDIR variable */ +extern int perf_mkstemp(char *path, size_t len, const char *template); extern char *mksnpath(char *buf, size_t n, const char *fmt, ...) __attribute__((format (printf, 3, 4))); diff --git a/Documentation/perf_counter/util/util.h b/Documentation/perf_counter/util/util.h index 36e40c38e09..76590a16c27 100644 --- a/Documentation/perf_counter/util/util.h +++ b/Documentation/perf_counter/util/util.h @@ -309,6 +309,8 @@ extern ssize_t xread(int fd, void *buf, size_t len); extern ssize_t xwrite(int fd, const void *buf, size_t len); extern int xdup(int fd); extern FILE *xfdopen(int fd, const char *mode); +extern int xmkstemp(char *template); + static inline size_t xsize_t(off_t len) { return (size_t)len; -- cgit v1.2.3 From 23ac9cbed82b00ca3520bb81dbe9ea3b7a936a1b Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Wed, 27 May 2009 09:33:18 +0200 Subject: perf_counter tools: Rename output.perf to perf.data output.perf is only output to perf-record - it's input to perf-report. So change it to a more direction-neutral name. Cc: Peter Zijlstra Cc: Mike Galbraith Cc: Paul Mackerras Cc: Corey Ashford Cc: Marcelo Tosatti Cc: Arnaldo Carvalho de Melo Cc: Thomas Gleixner Cc: John Kacur LKML-Reference: Signed-off-by: Ingo Molnar --- Documentation/perf_counter/Documentation/perf-record.txt | 4 ++-- Documentation/perf_counter/Documentation/perf-report.txt | 4 ++-- Documentation/perf_counter/builtin-record.c | 2 +- Documentation/perf_counter/builtin-report.c | 2 +- 4 files changed, 6 insertions(+), 6 deletions(-) diff --git a/Documentation/perf_counter/Documentation/perf-record.txt b/Documentation/perf_counter/Documentation/perf-record.txt index d07700e35eb..353db1bb98a 100644 --- a/Documentation/perf_counter/Documentation/perf-record.txt +++ b/Documentation/perf_counter/Documentation/perf-record.txt @@ -3,7 +3,7 @@ perf-record(1) NAME ---- -perf-record - Run a command and record its profile into output.perf +perf-record - Run a command and record its profile into perf.data SYNOPSIS -------- @@ -13,7 +13,7 @@ SYNOPSIS DESCRIPTION ----------- This command runs a command and gathers a performance counter profile -from it, into output.perf - without displaying anything. +from it, into perf.data - without displaying anything. This file can then be inspected later on, using 'perf report'. diff --git a/Documentation/perf_counter/Documentation/perf-report.txt b/Documentation/perf_counter/Documentation/perf-report.txt index 64696a21810..49efe16c958 100644 --- a/Documentation/perf_counter/Documentation/perf-report.txt +++ b/Documentation/perf_counter/Documentation/perf-report.txt @@ -3,7 +3,7 @@ perf-report(1) NAME ---- -perf-report - Read output.perf (created by perf record) and display the profile +perf-report - Read perf.data (created by perf record) and display the profile SYNOPSIS -------- @@ -19,7 +19,7 @@ OPTIONS ------- -i:: --input=:: - Input file name. (default: output.perf) + Input file name. (default: perf.data) Configuration ------------- diff --git a/Documentation/perf_counter/builtin-record.c b/Documentation/perf_counter/builtin-record.c index 68abfdf71d3..431077a6fb7 100644 --- a/Documentation/perf_counter/builtin-record.c +++ b/Documentation/perf_counter/builtin-record.c @@ -19,7 +19,7 @@ static int nr_cpus = 0; static unsigned int page_size; static unsigned int mmap_pages = 16; static int output; -static const char *output_name = "output.perf"; +static const char *output_name = "perf.data"; static int group = 0; static unsigned int realtime_prio = 0; static int system_wide = 0; diff --git a/Documentation/perf_counter/builtin-report.c b/Documentation/perf_counter/builtin-report.c index 7f1255dcd22..e2712cd0631 100644 --- a/Documentation/perf_counter/builtin-report.c +++ b/Documentation/perf_counter/builtin-report.c @@ -18,7 +18,7 @@ #define SHOW_USER 2 #define SHOW_HV 4 -static char const *input_name = "output.perf"; +static char const *input_name = "perf.data"; static int input; static int show_mask = SHOW_KERNEL | SHOW_USER | SHOW_HV; -- cgit v1.2.3 From a930d2c0d0a685ab955472b08baad041cc5edb4a Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Wed, 27 May 2009 09:50:13 +0200 Subject: perf_counter tools: Add built-in pager support Add Git's pager.c (and sigchain) code. A command only has to call setup_pager() to get paged interactive output. Non-interactive (redirected, command-piped, etc.) uses are not affected. Update perf-report to make use of this. [ Impact: new feature ] Cc: Peter Zijlstra Cc: Mike Galbraith Cc: Paul Mackerras Cc: Corey Ashford Cc: Marcelo Tosatti Cc: Arnaldo Carvalho de Melo Cc: Thomas Gleixner Cc: John Kacur LKML-Reference: Signed-off-by: Ingo Molnar --- Documentation/perf_counter/Makefile | 4 ++ Documentation/perf_counter/builtin-report.c | 3 + Documentation/perf_counter/util/environment.c | 8 +++ Documentation/perf_counter/util/pager.c | 99 +++++++++++++++++++++++++++ Documentation/perf_counter/util/sigchain.c | 52 ++++++++++++++ Documentation/perf_counter/util/sigchain.h | 11 +++ 6 files changed, 177 insertions(+) create mode 100644 Documentation/perf_counter/util/environment.c create mode 100644 Documentation/perf_counter/util/pager.c create mode 100644 Documentation/perf_counter/util/sigchain.c create mode 100644 Documentation/perf_counter/util/sigchain.h diff --git a/Documentation/perf_counter/Makefile b/Documentation/perf_counter/Makefile index efb05892db6..51b13f98983 100644 --- a/Documentation/perf_counter/Makefile +++ b/Documentation/perf_counter/Makefile @@ -297,11 +297,13 @@ LIB_H += util/util.h LIB_H += util/help.h LIB_H += util/strbuf.h LIB_H += util/run-command.h +LIB_H += util/sigchain.h LIB_OBJS += util/abspath.o LIB_OBJS += util/alias.o LIB_OBJS += util/config.o LIB_OBJS += util/ctype.o +LIB_OBJS += util/environment.o LIB_OBJS += util/exec_cmd.o LIB_OBJS += util/help.o LIB_OBJS += util/levenshtein.o @@ -314,6 +316,8 @@ LIB_OBJS += util/quote.o LIB_OBJS += util/strbuf.o LIB_OBJS += util/usage.o LIB_OBJS += util/wrapper.o +LIB_OBJS += util/sigchain.o +LIB_OBJS += util/pager.o BUILTIN_OBJS += builtin-help.o BUILTIN_OBJS += builtin-record.o diff --git a/Documentation/perf_counter/builtin-report.c b/Documentation/perf_counter/builtin-report.c index e2712cd0631..9aef7c54483 100644 --- a/Documentation/perf_counter/builtin-report.c +++ b/Documentation/perf_counter/builtin-report.c @@ -7,6 +7,7 @@ #include #include "util/list.h" +#include "util/cache.h" #include "util/rbtree.h" #include "perf.h" @@ -992,5 +993,7 @@ int cmd_report(int argc, const char **argv, const char *prefix) parse_options(argc, argv, options, report_usage, 0); + setup_pager(); + return __cmd_report(); } diff --git a/Documentation/perf_counter/util/environment.c b/Documentation/perf_counter/util/environment.c new file mode 100644 index 00000000000..9b1c8199e72 --- /dev/null +++ b/Documentation/perf_counter/util/environment.c @@ -0,0 +1,8 @@ +/* + * We put all the perf config variables in this same object + * file, so that programs can link against the config parser + * without having to link against all the rest of perf. + */ +#include "cache.h" + +const char *pager_program; diff --git a/Documentation/perf_counter/util/pager.c b/Documentation/perf_counter/util/pager.c new file mode 100644 index 00000000000..a28bccae545 --- /dev/null +++ b/Documentation/perf_counter/util/pager.c @@ -0,0 +1,99 @@ +#include "cache.h" +#include "run-command.h" +#include "sigchain.h" + +/* + * This is split up from the rest of git so that we can do + * something different on Windows. + */ + +static int spawned_pager; + +#ifndef __MINGW32__ +static void pager_preexec(void) +{ + /* + * Work around bug in "less" by not starting it until we + * have real input + */ + fd_set in; + + FD_ZERO(&in); + FD_SET(0, &in); + select(1, &in, NULL, &in, NULL); + + setenv("LESS", "FRSX", 0); +} +#endif + +static const char *pager_argv[] = { "sh", "-c", NULL, NULL }; +static struct child_process pager_process; + +static void wait_for_pager(void) +{ + fflush(stdout); + fflush(stderr); + /* signal EOF to pager */ + close(1); + close(2); + finish_command(&pager_process); +} + +static void wait_for_pager_signal(int signo) +{ + wait_for_pager(); + sigchain_pop(signo); + raise(signo); +} + +void setup_pager(void) +{ + const char *pager = getenv("PERF_PAGER"); + + if (!isatty(1)) + return; + if (!pager) { + if (!pager_program) + perf_config(perf_default_config, NULL); + pager = pager_program; + } + if (!pager) + pager = getenv("PAGER"); + if (!pager) + pager = "less"; + else if (!*pager || !strcmp(pager, "cat")) + return; + + spawned_pager = 1; /* means we are emitting to terminal */ + + /* spawn the pager */ + pager_argv[2] = pager; + pager_process.argv = pager_argv; + pager_process.in = -1; +#ifndef __MINGW32__ + pager_process.preexec_cb = pager_preexec; +#endif + if (start_command(&pager_process)) + return; + + /* original process continues, but writes to the pipe */ + dup2(pager_process.in, 1); + if (isatty(2)) + dup2(pager_process.in, 2); + close(pager_process.in); + + /* this makes sure that the parent terminates after the pager */ + sigchain_push_common(wait_for_pager_signal); + atexit(wait_for_pager); +} + +int pager_in_use(void) +{ + const char *env; + + if (spawned_pager) + return 1; + + env = getenv("PERF_PAGER_IN_USE"); + return env ? perf_config_bool("PERF_PAGER_IN_USE", env) : 0; +} diff --git a/Documentation/perf_counter/util/sigchain.c b/Documentation/perf_counter/util/sigchain.c new file mode 100644 index 00000000000..1118b99e57d --- /dev/null +++ b/Documentation/perf_counter/util/sigchain.c @@ -0,0 +1,52 @@ +#include "sigchain.h" +#include "cache.h" + +#define SIGCHAIN_MAX_SIGNALS 32 + +struct sigchain_signal { + sigchain_fun *old; + int n; + int alloc; +}; +static struct sigchain_signal signals[SIGCHAIN_MAX_SIGNALS]; + +static void check_signum(int sig) +{ + if (sig < 1 || sig >= SIGCHAIN_MAX_SIGNALS) + die("BUG: signal out of range: %d", sig); +} + +int sigchain_push(int sig, sigchain_fun f) +{ + struct sigchain_signal *s = signals + sig; + check_signum(sig); + + ALLOC_GROW(s->old, s->n + 1, s->alloc); + s->old[s->n] = signal(sig, f); + if (s->old[s->n] == SIG_ERR) + return -1; + s->n++; + return 0; +} + +int sigchain_pop(int sig) +{ + struct sigchain_signal *s = signals + sig; + check_signum(sig); + if (s->n < 1) + return 0; + + if (signal(sig, s->old[s->n - 1]) == SIG_ERR) + return -1; + s->n--; + return 0; +} + +void sigchain_push_common(sigchain_fun f) +{ + sigchain_push(SIGINT, f); + sigchain_push(SIGHUP, f); + sigchain_push(SIGTERM, f); + sigchain_push(SIGQUIT, f); + sigchain_push(SIGPIPE, f); +} diff --git a/Documentation/perf_counter/util/sigchain.h b/Documentation/perf_counter/util/sigchain.h new file mode 100644 index 00000000000..618083bce0c --- /dev/null +++ b/Documentation/perf_counter/util/sigchain.h @@ -0,0 +1,11 @@ +#ifndef SIGCHAIN_H +#define SIGCHAIN_H + +typedef void (*sigchain_fun)(int); + +int sigchain_push(int sig, sigchain_fun f); +int sigchain_pop(int sig); + +void sigchain_push_common(sigchain_fun f); + +#endif /* SIGCHAIN_H */ -- cgit v1.2.3 From ef65b2a0b3a2f82850144df6e6a7796f6d66da6b Mon Sep 17 00:00:00 2001 From: Mike Galbraith Date: Wed, 27 May 2009 10:10:51 +0200 Subject: perf record: Fix the profiling of existing pid or whole box Perf record bails if no command argument is provided, so you can't use naked -a or -p to profile a running task or the whole box. Allow foreground profiling of an existing pid or the entire system. [ Impact: fix command option handling bug ] Signed-off-by: Mike Galbraith Cc: Peter Zijlstra Cc: Paul Mackerras Cc: Corey Ashford Cc: Marcelo Tosatti Cc: Arnaldo Carvalho de Melo Cc: John Kacur LKML-Reference: Signed-off-by: Ingo Molnar --- Documentation/perf_counter/builtin-record.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Documentation/perf_counter/builtin-record.c b/Documentation/perf_counter/builtin-record.c index 431077a6fb7..4a068664a32 100644 --- a/Documentation/perf_counter/builtin-record.c +++ b/Documentation/perf_counter/builtin-record.c @@ -354,7 +354,7 @@ static int __cmd_record(int argc, const char **argv) signal(SIGCHLD, sig_handler); signal(SIGINT, sig_handler); - if (target_pid == -1) { + if (target_pid == -1 && argc) { pid = fork(); if (pid < 0) perror("failed to fork"); @@ -430,7 +430,7 @@ int cmd_record(int argc, const char **argv, const char *prefix) create_events_help(events_help_msg); argc = parse_options(argc, argv, options, record_usage, 0); - if (!argc) + if (!argc && target_pid == -1 && !system_wide) usage_with_options(record_usage, options); if (!nr_counters) { -- cgit v1.2.3 From d716fba49c7445ec87c3f045c59624fac03ee3f2 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Wed, 27 May 2009 13:19:59 +0200 Subject: perf report: Remove include Pekka reported build failure in builtin-report.c: CC builtin-report.o In file included from builtin-report.c:7: /usr/include/ctype.h:102: error: expected expression before token And observed: | Removing #include from builtin-report.c makes the problem | go away. I am running Ubuntu 9.04 that has gcc 4.3.3 and libc 2.9. Reported-by: Pekka J Enberg Cc: Peter Zijlstra Cc: Mike Galbraith Cc: Paul Mackerras Cc: Corey Ashford Cc: Marcelo Tosatti Cc: Arnaldo Carvalho de Melo Cc: Thomas Gleixner Cc: John Kacur LKML-Reference: Signed-off-by: Ingo Molnar --- Documentation/perf_counter/builtin-report.c | 1 - 1 file changed, 1 deletion(-) diff --git a/Documentation/perf_counter/builtin-report.c b/Documentation/perf_counter/builtin-report.c index 9aef7c54483..6265bedcd93 100644 --- a/Documentation/perf_counter/builtin-report.c +++ b/Documentation/perf_counter/builtin-report.c @@ -4,7 +4,6 @@ #include #include #include -#include #include "util/list.h" #include "util/cache.h" -- cgit v1.2.3 From b7a16eac5e679fb5f531b9eeff7db7952303e77d Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Wed, 27 May 2009 13:35:35 +0200 Subject: perf_counter: tools: /usr/lib/debug%s.debug support Some distros seem to store debuginfo in weird places. Signed-off-by: Peter Zijlstra Cc: Mike Galbraith Cc: Paul Mackerras Cc: Corey Ashford Cc: Marcelo Tosatti Cc: Arnaldo Carvalho de Melo Cc: Thomas Gleixner Cc: John Kacur LKML-Reference: Signed-off-by: Ingo Molnar --- Documentation/perf_counter/builtin-report.c | 94 +++++++++++++++++++++++------ 1 file changed, 76 insertions(+), 18 deletions(-) diff --git a/Documentation/perf_counter/builtin-report.c b/Documentation/perf_counter/builtin-report.c index 6265bedcd93..a9ff49a4ede 100644 --- a/Documentation/perf_counter/builtin-report.c +++ b/Documentation/perf_counter/builtin-report.c @@ -190,7 +190,8 @@ static inline int elf_sym__is_function(const GElf_Sym *sym) { return elf_sym__type(sym) == STT_FUNC && sym->st_name != 0 && - sym->st_shndx != SHN_UNDEF; + sym->st_shndx != SHN_UNDEF && + sym->st_size != 0; } static inline const char *elf_sym__name(const GElf_Sym *sym, @@ -222,11 +223,11 @@ static Elf_Scn *elf_section_by_name(Elf *elf, GElf_Ehdr *ep, return sec; } -static int dso__load(struct dso *self) +static int dso__load_sym(struct dso *self, int fd, char *name) { Elf_Data *symstrs; uint32_t nr_syms; - int fd, err = -1; + int err = -1; uint32_t index; GElf_Ehdr ehdr; GElf_Shdr shdr; @@ -234,16 +235,12 @@ static int dso__load(struct dso *self) GElf_Sym sym; Elf_Scn *sec; Elf *elf; - - - fd = open(self->name, O_RDONLY); - if (fd == -1) - return -1; + int nr = 0; elf = elf_begin(fd, ELF_C_READ_MMAP, NULL); if (elf == NULL) { fprintf(stderr, "%s: cannot read %s ELF file.\n", - __func__, self->name); + __func__, name); goto out_close; } @@ -292,16 +289,63 @@ static int dso__load(struct dso *self) goto out_elf_end; dso__insert_symbol(self, f); + + nr++; } - err = 0; + err = nr; out_elf_end: elf_end(elf); out_close: - close(fd); return err; } +static int dso__load(struct dso *self) +{ + int size = strlen(self->name) + sizeof("/usr/lib/debug%s.debug"); + char *name = malloc(size); + int variant = 0; + int ret = -1; + int fd; + + if (!name) + return -1; + +more: + do { + switch (variant) { + case 0: /* Fedora */ + snprintf(name, size, "/usr/lib/debug%s.debug", self->name); + break; + case 1: /* Ubuntu */ + snprintf(name, size, "/usr/lib/debug%s", self->name); + break; + case 2: /* Sane people */ + snprintf(name, size, "%s", self->name); + break; + + default: + goto out; + } + variant++; + + fd = open(name, O_RDONLY); + } while (fd < 0); + + ret = dso__load_sym(self, fd, name); + close(fd); + + /* + * Some people seem to have debuginfo files _WITHOUT_ debug info!?!? + */ + if (!ret) + goto more; + +out: + free(name); + return ret; +} + static size_t dso__fprintf(struct dso *self, FILE *fp) { size_t ret = fprintf(fp, "dso: %s\n", self->name); @@ -336,11 +380,23 @@ static struct dso *dsos__find(const char *name) static struct dso *dsos__findnew(const char *name) { struct dso *dso = dsos__find(name); + int nr; if (dso == NULL) { dso = dso__new(name); - if (dso != NULL && dso__load(dso) < 0) + if (!dso) + goto out_delete_dso; + + nr = dso__load(dso); + if (nr < 0) { + fprintf(stderr, "Failed to open: %s\n", name); goto out_delete_dso; + } + if (!nr) { + fprintf(stderr, + "Failed to find debug symbols for: %s, maybe install a debug package?\n", + name); + } dsos__add(dso); } @@ -547,9 +603,9 @@ symhist__fprintf(struct symhist *self, uint64_t total_samples, FILE *fp) size_t ret; if (total_samples) - ret = fprintf(fp, "%5.2f", (self->count * 100.0) / total_samples); + ret = fprintf(fp, "%5.2f%% ", (self->count * 100.0) / total_samples); else - ret = fprintf(fp, "%12d", self->count); + ret = fprintf(fp, "%12d ", self->count); ret += fprintf(fp, "%14s [%c] ", thread__name(self->thread, bf, sizeof(bf)), @@ -922,10 +978,12 @@ more: } default: { broken_event: - fprintf(stderr, "%p [%p]: skipping unknown header type: %d\n", - (void *)(offset + head), - (void *)(long)(event->header.size), - event->header.type); + if (dump_trace) + fprintf(stderr, "%p [%p]: skipping unknown header type: %d\n", + (void *)(offset + head), + (void *)(long)(event->header.size), + event->header.type); + total_unknown++; /* -- cgit v1.2.3 From 450aaa2b2a1b006870ba68251fbb40b2387caade Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Wed, 27 May 2009 20:20:23 +0200 Subject: perf_counter: tools: report: Add vmlinux support Allow to use vmlinux instead of kallsyms. Signed-off-by: Peter Zijlstra Cc: Paul Mackerras Cc: Corey Ashford Cc: Arnaldo Carvalho de Melo Cc: John Kacur Cc: Mike Galbraith LKML-Reference: <20090527182100.740018486@chello.nl> Signed-off-by: Ingo Molnar --- Documentation/perf_counter/builtin-report.c | 37 ++++++++++++++++++++++++++++- 1 file changed, 36 insertions(+), 1 deletion(-) diff --git a/Documentation/perf_counter/builtin-report.c b/Documentation/perf_counter/builtin-report.c index a9ff49a4ede..3e87cbd3045 100644 --- a/Documentation/perf_counter/builtin-report.c +++ b/Documentation/perf_counter/builtin-report.c @@ -19,6 +19,7 @@ #define SHOW_HV 4 static char const *input_name = "perf.data"; +static char *vmlinux = NULL; static int input; static int show_mask = SHOW_KERNEL | SHOW_USER | SHOW_HV; @@ -532,6 +533,39 @@ out_delete_dso: return -1; } +static int load_kernel(void) +{ + int fd, nr; + + if (!vmlinux) + goto kallsyms; + + fd = open(vmlinux, O_RDONLY); + if (fd < 0) + goto kallsyms; + + kernel_dso = dso__new("[kernel]"); + if (!kernel_dso) + goto fail_open; + + nr = dso__load_sym(kernel_dso, fd, vmlinux); + + if (nr <= 0) + goto fail_load; + + dsos__add(kernel_dso); + close(fd); + + return 0; + +fail_load: + dso__delete(kernel_dso); +fail_open: + close(fd); +kallsyms: + return load_kallsyms(); +} + struct map { struct list_head node; uint64_t start; @@ -850,7 +884,7 @@ static int __cmd_report(void) exit(0); } - if (load_kallsyms() < 0) { + if (load_kernel() < 0) { perror("failed to open kallsyms"); return EXIT_FAILURE; } @@ -1039,6 +1073,7 @@ static const struct option options[] = { "be more verbose (show symbol address, etc)"), OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace, "dump raw trace in ASCII"), + OPT_STRING('k', "vmlinux", &vmlinux, "file", "vmlinux pathname"), OPT_END() }; -- cgit v1.2.3 From e7fb08b1d06a6b37263c765205de5614a2273aeb Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Wed, 27 May 2009 20:20:24 +0200 Subject: perf_counter: tools: report: Rework histogram code In preparation for configurable sorting, rework the histgram code a bit. Signed-off-by: Peter Zijlstra Cc: Paul Mackerras Cc: Corey Ashford Cc: Arnaldo Carvalho de Melo Cc: John Kacur Cc: Mike Galbraith LKML-Reference: <20090527182100.796410098@chello.nl> Signed-off-by: Ingo Molnar --- Documentation/perf_counter/builtin-report.c | 294 ++++++++++++++-------------- 1 file changed, 143 insertions(+), 151 deletions(-) diff --git a/Documentation/perf_counter/builtin-report.c b/Documentation/perf_counter/builtin-report.c index 3e87cbd3045..276256439b7 100644 --- a/Documentation/perf_counter/builtin-report.c +++ b/Documentation/perf_counter/builtin-report.c @@ -597,71 +597,9 @@ struct thread; static const char *thread__name(struct thread *self, char *bf, size_t size); -struct symhist { - struct rb_node rb_node; - struct dso *dso; - struct symbol *sym; - struct thread *thread; - uint64_t ip; - uint32_t count; - char level; -}; - -static struct symhist *symhist__new(struct symbol *sym, uint64_t ip, - struct thread *thread, struct dso *dso, - char level) -{ - struct symhist *self = malloc(sizeof(*self)); - - if (self != NULL) { - self->sym = sym; - self->thread = thread; - self->ip = ip; - self->dso = dso; - self->level = level; - self->count = 1; - } - - return self; -} - -static void symhist__inc(struct symhist *self) -{ - ++self->count; -} - -static size_t -symhist__fprintf(struct symhist *self, uint64_t total_samples, FILE *fp) -{ - char bf[32]; - size_t ret; - - if (total_samples) - ret = fprintf(fp, "%5.2f%% ", (self->count * 100.0) / total_samples); - else - ret = fprintf(fp, "%12d ", self->count); - - ret += fprintf(fp, "%14s [%c] ", - thread__name(self->thread, bf, sizeof(bf)), - self->level); - - if (verbose) - ret += fprintf(fp, "%#018llx ", (unsigned long long)self->ip); - - if (self->level != '.') - ret += fprintf(fp, "%s\n", - self->sym ? self->sym->name : ""); - else - ret += fprintf(fp, "%s: %s\n", - self->dso ? self->dso->name : "", - self->sym ? self->sym->name : ""); - return ret; -} - struct thread { struct rb_node rb_node; struct list_head maps; - struct rb_root symhists; pid_t pid; char *comm; }; @@ -683,67 +621,17 @@ static struct thread *thread__new(pid_t pid) self->pid = pid; self->comm = NULL; INIT_LIST_HEAD(&self->maps); - self->symhists = RB_ROOT; } return self; } -static int thread__symbol_incnew(struct thread *self, struct symbol *sym, - uint64_t ip, struct dso *dso, char level) -{ - struct rb_node **p = &self->symhists.rb_node; - struct rb_node *parent = NULL; - struct symhist *sh; - - while (*p != NULL) { - uint64_t start; - - parent = *p; - sh = rb_entry(parent, struct symhist, rb_node); - - if (sh->sym == sym || ip == sh->ip) { - symhist__inc(sh); - return 0; - } - - /* Handle unresolved symbols too */ - start = !sh->sym ? sh->ip : sh->sym->start; - - if (ip < start) - p = &(*p)->rb_left; - else - p = &(*p)->rb_right; - } - - sh = symhist__new(sym, ip, self, dso, level); - if (sh == NULL) - return -ENOMEM; - rb_link_node(&sh->rb_node, parent, p); - rb_insert_color(&sh->rb_node, &self->symhists); - return 0; -} - static int thread__set_comm(struct thread *self, const char *comm) { self->comm = strdup(comm); return self->comm ? 0 : -ENOMEM; } -static size_t thread__fprintf(struct thread *self, FILE *fp) -{ - int ret = fprintf(fp, "thread: %d %s\n", self->pid, self->comm); - struct rb_node *nd; - - for (nd = rb_first(&self->symhists); nd; nd = rb_next(nd)) { - struct symhist *pos = rb_entry(nd, struct symhist, rb_node); - - ret += symhist__fprintf(pos, 0, fp); - } - - return ret; -} - static struct rb_root threads; static struct thread *threads__findnew(pid_t pid) @@ -792,70 +680,172 @@ static struct map *thread__find_map(struct thread *self, uint64_t ip) return NULL; } -static void threads__fprintf(FILE *fp) +/* + * histogram, sorted on item, collects counts + */ + +static struct rb_root hist; + +struct hist_entry { + struct rb_node rb_node; + + struct thread *thread; + struct map *map; + struct dso *dso; + struct symbol *sym; + uint64_t ip; + char level; + + uint32_t count; +}; + +static int64_t +hist_entry__cmp(struct hist_entry *left, struct hist_entry *right) +{ + uint64_t ip_l, ip_r; + int cmp = right->thread->pid - left->thread->pid; + + if (cmp) + return cmp; + + if (left->sym == right->sym) + return 0; + + ip_l = left->sym ? left->sym->start : left->ip; + ip_r = right->sym ? right->sym->start : right->ip; + + return (int64_t)(ip_r - ip_l); +} + +static int +hist_entry__add(struct thread *thread, struct map *map, struct dso *dso, + struct symbol *sym, uint64_t ip, char level) { - struct rb_node *nd; - for (nd = rb_first(&threads); nd; nd = rb_next(nd)) { - struct thread *pos = rb_entry(nd, struct thread, rb_node); - thread__fprintf(pos, fp); + struct rb_node **p = &hist.rb_node; + struct rb_node *parent = NULL; + struct hist_entry *he; + struct hist_entry entry = { + .thread = thread, + .map = map, + .dso = dso, + .sym = sym, + .ip = ip, + .level = level, + .count = 1, + }; + int cmp; + + while (*p != NULL) { + parent = *p; + he = rb_entry(parent, struct hist_entry, rb_node); + + cmp = hist_entry__cmp(&entry, he); + + if (!cmp) { + he->count++; + return 0; + } + + if (cmp < 0) + p = &(*p)->rb_left; + else + p = &(*p)->rb_right; } + + he = malloc(sizeof(*he)); + if (!he) + return -ENOMEM; + *he = entry; + rb_link_node(&he->rb_node, parent, p); + rb_insert_color(&he->rb_node, &hist); + + return 0; } -static struct rb_root global_symhists; +static size_t +hist_entry__fprintf(FILE *fp, struct hist_entry *self, uint64_t total_samples) +{ + char bf[32]; + size_t ret; + + if (total_samples) { + ret = fprintf(fp, "%5.2f%% ", + (self->count * 100.0) / total_samples); + } else + ret = fprintf(fp, "%12d ", self->count); -static void threads__insert_symhist(struct symhist *sh) + ret += fprintf(fp, "%14s [%c] ", + thread__name(self->thread, bf, sizeof(bf)), + self->level); + + if (verbose) + ret += fprintf(fp, "%#018llx ", (unsigned long long)self->ip); + + if (self->level != '.') + ret += fprintf(fp, "%s\n", + self->sym ? self->sym->name : ""); + else + ret += fprintf(fp, "%s: %s\n", + self->dso ? self->dso->name : "", + self->sym ? self->sym->name : ""); + return ret; +} + +/* + * reverse the map, sort on count. + */ + +static struct rb_root output_hists; + +static void output__insert_entry(struct hist_entry *he) { - struct rb_node **p = &global_symhists.rb_node; + struct rb_node **p = &output_hists.rb_node; struct rb_node *parent = NULL; - struct symhist *iter; + struct hist_entry *iter; while (*p != NULL) { parent = *p; - iter = rb_entry(parent, struct symhist, rb_node); + iter = rb_entry(parent, struct hist_entry, rb_node); - /* Reverse order */ - if (sh->count > iter->count) + if (he->count > iter->count) p = &(*p)->rb_left; else p = &(*p)->rb_right; } - rb_link_node(&sh->rb_node, parent, p); - rb_insert_color(&sh->rb_node, &global_symhists); + rb_link_node(&he->rb_node, parent, p); + rb_insert_color(&he->rb_node, &output_hists); } -static void threads__sort_symhists(void) +static void output__resort(void) { - struct rb_node *nd; - - for (nd = rb_first(&threads); nd; nd = rb_next(nd)) { - struct thread *thread = rb_entry(nd, struct thread, rb_node); - struct rb_node *next = rb_first(&thread->symhists); + struct rb_node *next = rb_first(&hist); + struct hist_entry *n; - while (next) { - struct symhist *n = rb_entry(next, struct symhist, - rb_node); - next = rb_next(&n->rb_node); - rb_erase(&n->rb_node, &thread->symhists); - threads__insert_symhist(n); - } + while (next) { + n = rb_entry(next, struct hist_entry, rb_node); + next = rb_next(&n->rb_node); + rb_erase(&n->rb_node, &hist); + output__insert_entry(n); } } -static size_t threads__symhists_fprintf(uint64_t total_samples, FILE *fp) +static size_t output__fprintf(FILE *fp, uint64_t total_samples) { + struct hist_entry *pos; struct rb_node *nd; size_t ret = 0; - for (nd = rb_first(&global_symhists); nd; nd = rb_next(nd)) { - struct symhist *pos = rb_entry(nd, struct symhist, rb_node); - ret += symhist__fprintf(pos, total_samples, fp); + for (nd = rb_first(&output_hists); nd; nd = rb_next(nd)) { + pos = rb_entry(nd, struct hist_entry, rb_node); + ret += hist_entry__fprintf(fp, pos, total_samples); } return ret; } + static int __cmd_report(void) { unsigned long offset = 0; @@ -926,6 +916,7 @@ more: struct dso *dso = NULL; struct thread *thread = threads__findnew(event->ip.pid); uint64_t ip = event->ip.ip; + struct map *map = NULL; if (dump_trace) { fprintf(stderr, "%p [%p]: PERF_EVENT (IP, %d): %d: %p\n", @@ -945,9 +936,10 @@ more: if (event->header.misc & PERF_EVENT_MISC_KERNEL) { show = SHOW_KERNEL; level = 'k'; + dso = kernel_dso; + } else if (event->header.misc & PERF_EVENT_MISC_USER) { - struct map *map; show = SHOW_USER; level = '.'; @@ -957,6 +949,7 @@ more: dso = map->dso; ip -= map->start + map->pgoff; } + } else { show = SHOW_HV; level = 'H'; @@ -965,8 +958,9 @@ more: if (show & show_mask) { struct symbol *sym = dso__find_symbol(dso, ip); - if (thread__symbol_incnew(thread, sym, ip, dso, level)) { - fprintf(stderr, "problem incrementing symbol count, bailing out\n"); + if (hist_entry__add(thread, map, dso, sym, ip, level)) { + fprintf(stderr, + "problem incrementing symbol count, bailing out\n"); goto done; } } @@ -1050,13 +1044,11 @@ done: return 0; } - if (verbose >= 2) { + if (verbose >= 2) dsos__fprintf(stdout); - threads__fprintf(stdout); - } - threads__sort_symhists(); - threads__symhists_fprintf(total, stdout); + output__resort(); + output__fprintf(stdout, total); return rc; } -- cgit v1.2.3 From 1aa167382323eeeeb38368cab85cf17979793cbe Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Wed, 27 May 2009 20:20:25 +0200 Subject: perf_counter: tools: report: Dynamic sort/print bits Make the sorting and printing dynamic. Signed-off-by: Peter Zijlstra Cc: Paul Mackerras Cc: Corey Ashford Cc: Arnaldo Carvalho de Melo Cc: John Kacur Cc: Mike Galbraith LKML-Reference: <20090527182100.921953817@chello.nl> Signed-off-by: Ingo Molnar --- Documentation/perf_counter/builtin-report.c | 141 +++++++++++++++++++++------- 1 file changed, 107 insertions(+), 34 deletions(-) diff --git a/Documentation/perf_counter/builtin-report.c b/Documentation/perf_counter/builtin-report.c index 276256439b7..856186fd2bd 100644 --- a/Documentation/perf_counter/builtin-report.c +++ b/Documentation/perf_counter/builtin-report.c @@ -699,14 +699,41 @@ struct hist_entry { uint32_t count; }; +/* + * configurable sorting bits + */ + +struct sort_entry { + struct list_head list; + + int64_t (*cmp)(struct hist_entry *, struct hist_entry *); + size_t (*print)(FILE *fp, struct hist_entry *); +}; + static int64_t -hist_entry__cmp(struct hist_entry *left, struct hist_entry *right) +sort__thread_cmp(struct hist_entry *left, struct hist_entry *right) { - uint64_t ip_l, ip_r; - int cmp = right->thread->pid - left->thread->pid; + return right->thread->pid - left->thread->pid; +} + +static size_t +sort__thread_print(FILE *fp, struct hist_entry *self) +{ + char bf[32]; + + return fprintf(fp, "%14s ", + thread__name(self->thread, bf, sizeof(bf))); +} - if (cmp) - return cmp; +static struct sort_entry sort_thread = { + .cmp = sort__thread_cmp, + .print = sort__thread_print, +}; + +static int64_t +sort__sym_cmp(struct hist_entry *left, struct hist_entry *right) +{ + uint64_t ip_l, ip_r; if (left->sym == right->sym) return 0; @@ -717,6 +744,79 @@ hist_entry__cmp(struct hist_entry *left, struct hist_entry *right) return (int64_t)(ip_r - ip_l); } +static size_t +sort__sym_print(FILE *fp, struct hist_entry *self) +{ + size_t ret = 0; + + ret += fprintf(fp, "[%c] ", self->level); + + if (verbose) + ret += fprintf(fp, "%#018llx ", (unsigned long long)self->ip); + + if (self->level != '.') + ret += fprintf(fp, "%s ", + self->sym ? self->sym->name : ""); + else + ret += fprintf(fp, "%s: %s ", + self->dso ? self->dso->name : "", + self->sym ? self->sym->name : ""); + + return ret; +} + +static struct sort_entry sort_sym = { + .cmp = sort__sym_cmp, + .print = sort__sym_print, +}; + +static LIST_HEAD(hist_entry__sort_list); + +static void setup_sorting(void) +{ + list_add_tail(&sort_thread.list, &hist_entry__sort_list); + list_add_tail(&sort_sym.list, &hist_entry__sort_list); +} + +static int64_t +hist_entry__cmp(struct hist_entry *left, struct hist_entry *right) +{ + struct sort_entry *se; + int64_t cmp = 0; + + list_for_each_entry(se, &hist_entry__sort_list, list) { + cmp = se->cmp(left, right); + if (cmp) + break; + } + + return cmp; +} + +static size_t +hist_entry__fprintf(FILE *fp, struct hist_entry *self, uint64_t total_samples) +{ + struct sort_entry *se; + size_t ret; + + if (total_samples) { + ret = fprintf(fp, "%5.2f%% ", + (self->count * 100.0) / total_samples); + } else + ret = fprintf(fp, "%12d ", self->count); + + list_for_each_entry(se, &hist_entry__sort_list, list) + ret += se->print(fp, self); + + ret += fprintf(fp, "\n"); + + return ret; +} + +/* + * collect histogram counts + */ + static int hist_entry__add(struct thread *thread, struct map *map, struct dso *dso, struct symbol *sym, uint64_t ip, char level) @@ -762,35 +862,6 @@ hist_entry__add(struct thread *thread, struct map *map, struct dso *dso, return 0; } -static size_t -hist_entry__fprintf(FILE *fp, struct hist_entry *self, uint64_t total_samples) -{ - char bf[32]; - size_t ret; - - if (total_samples) { - ret = fprintf(fp, "%5.2f%% ", - (self->count * 100.0) / total_samples); - } else - ret = fprintf(fp, "%12d ", self->count); - - ret += fprintf(fp, "%14s [%c] ", - thread__name(self->thread, bf, sizeof(bf)), - self->level); - - if (verbose) - ret += fprintf(fp, "%#018llx ", (unsigned long long)self->ip); - - if (self->level != '.') - ret += fprintf(fp, "%s\n", - self->sym ? self->sym->name : ""); - else - ret += fprintf(fp, "%s: %s\n", - self->dso ? self->dso->name : "", - self->sym ? self->sym->name : ""); - return ret; -} - /* * reverse the map, sort on count. */ @@ -1077,6 +1148,8 @@ int cmd_report(int argc, const char **argv, const char *prefix) parse_options(argc, argv, options, report_usage, 0); + setup_sorting(); + setup_pager(); return __cmd_report(); -- cgit v1.2.3 From 37f440cba299bb479cf45d12eef923f0979dbcaf Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Wed, 27 May 2009 20:20:26 +0200 Subject: pref_counter: tools: report: Add --sort option option parsing for dynamic sorting. Signed-off-by: Peter Zijlstra Cc: Paul Mackerras Cc: Corey Ashford Cc: Arnaldo Carvalho de Melo Cc: John Kacur Cc: Mike Galbraith LKML-Reference: <20090527182101.041817692@chello.nl> Signed-off-by: Ingo Molnar --- Documentation/perf_counter/builtin-report.c | 43 +++++++++++++++++++++++++++-- 1 file changed, 41 insertions(+), 2 deletions(-) diff --git a/Documentation/perf_counter/builtin-report.c b/Documentation/perf_counter/builtin-report.c index 856186fd2bd..982abce0e7c 100644 --- a/Documentation/perf_counter/builtin-report.c +++ b/Documentation/perf_counter/builtin-report.c @@ -20,6 +20,7 @@ static char const *input_name = "perf.data"; static char *vmlinux = NULL; +static char *sort_order = "pid,symbol"; static int input; static int show_mask = SHOW_KERNEL | SHOW_USER | SHOW_HV; @@ -770,12 +771,49 @@ static struct sort_entry sort_sym = { .print = sort__sym_print, }; +struct sort_dimension { + char *name; + struct sort_entry *entry; + int taken; +}; + +static struct sort_dimension sort_dimensions[] = { + { .name = "pid", .entry = &sort_thread, }, + { .name = "symbol", .entry = &sort_sym, }, +}; + static LIST_HEAD(hist_entry__sort_list); +static int sort_dimension__add(char *tok) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(sort_dimensions); i++) { + struct sort_dimension *sd = &sort_dimensions[i]; + + if (sd->taken) + continue; + + if (strcmp(tok, sd->name)) + continue; + + list_add_tail(&sd->entry->list, &hist_entry__sort_list); + sd->taken = 1; + return 0; + } + + return -ESRCH; +} + static void setup_sorting(void) { - list_add_tail(&sort_thread.list, &hist_entry__sort_list); - list_add_tail(&sort_sym.list, &hist_entry__sort_list); + char *tmp, *tok, *str = strdup(sort_order); + + for (tok = strtok_r(str, ", ", &tmp); + tok; tok = strtok_r(NULL, ", ", &tmp)) + sort_dimension__add(tok); + + free(str); } static int64_t @@ -1137,6 +1175,7 @@ static const struct option options[] = { OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace, "dump raw trace in ASCII"), OPT_STRING('k', "vmlinux", &vmlinux, "file", "vmlinux pathname"), + OPT_STRING('s', "sort", &sort_order, "foo", "bar"), OPT_END() }; -- cgit v1.2.3 From 992444b173f35997f96f5cbb214f0de81d1b97ff Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Wed, 27 May 2009 20:20:27 +0200 Subject: perf_counter: tools: report: Add comm sorting Signed-off-by: Peter Zijlstra Cc: Paul Mackerras Cc: Corey Ashford Cc: Arnaldo Carvalho de Melo Cc: John Kacur Cc: Mike Galbraith LKML-Reference: <20090527182101.129302022@chello.nl> Signed-off-by: Ingo Molnar --- Documentation/perf_counter/builtin-report.c | 30 +++++++++++++++++++++++++++++ 1 file changed, 30 insertions(+) diff --git a/Documentation/perf_counter/builtin-report.c b/Documentation/perf_counter/builtin-report.c index 982abce0e7c..a634022bae0 100644 --- a/Documentation/perf_counter/builtin-report.c +++ b/Documentation/perf_counter/builtin-report.c @@ -731,6 +731,35 @@ static struct sort_entry sort_thread = { .print = sort__thread_print, }; +static int64_t +sort__comm_cmp(struct hist_entry *left, struct hist_entry *right) +{ + char *comm_l = left->thread->comm; + char *comm_r = right->thread->comm; + + if (!comm_l || !comm_r) { + if (!comm_l && !comm_r) + return 0; + else if (!comm_l) + return -1; + else + return 1; + } + + return strcmp(comm_l, comm_r); +} + +static size_t +sort__comm_print(FILE *fp, struct hist_entry *self) +{ + return fprintf(fp, "%20s ", self->thread->comm ?: ""); +} + +static struct sort_entry sort_comm = { + .cmp = sort__comm_cmp, + .print = sort__comm_print, +}; + static int64_t sort__sym_cmp(struct hist_entry *left, struct hist_entry *right) { @@ -779,6 +808,7 @@ struct sort_dimension { static struct sort_dimension sort_dimensions[] = { { .name = "pid", .entry = &sort_thread, }, + { .name = "comm", .entry = &sort_comm, }, { .name = "symbol", .entry = &sort_sym, }, }; -- cgit v1.2.3 From 55e5ec41a9de46b6ca06031f4fbdfdfc76dc24dc Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Wed, 27 May 2009 20:20:28 +0200 Subject: pref_counter: tools: report: Add dso sorting Signed-off-by: Peter Zijlstra Cc: Paul Mackerras Cc: Corey Ashford Cc: Arnaldo Carvalho de Melo Cc: John Kacur Cc: Mike Galbraith LKML-Reference: <20090527182101.229504802@chello.nl> Signed-off-by: Ingo Molnar --- Documentation/perf_counter/builtin-report.c | 30 +++++++++++++++++++++++++++++ 1 file changed, 30 insertions(+) diff --git a/Documentation/perf_counter/builtin-report.c b/Documentation/perf_counter/builtin-report.c index a634022bae0..30e12c7f710 100644 --- a/Documentation/perf_counter/builtin-report.c +++ b/Documentation/perf_counter/builtin-report.c @@ -760,6 +760,35 @@ static struct sort_entry sort_comm = { .print = sort__comm_print, }; +static int64_t +sort__dso_cmp(struct hist_entry *left, struct hist_entry *right) +{ + struct dso *dso_l = left->dso; + struct dso *dso_r = right->dso; + + if (!dso_l || !dso_r) { + if (!dso_l && !dso_r) + return 0; + else if (!dso_l) + return -1; + else + return 1; + } + + return strcmp(dso_l->name, dso_r->name); +} + +static size_t +sort__dso_print(FILE *fp, struct hist_entry *self) +{ + return fprintf(fp, "%64s ", self->dso ? self->dso->name : ""); +} + +static struct sort_entry sort_dso = { + .cmp = sort__dso_cmp, + .print = sort__dso_print, +}; + static int64_t sort__sym_cmp(struct hist_entry *left, struct hist_entry *right) { @@ -809,6 +838,7 @@ struct sort_dimension { static struct sort_dimension sort_dimensions[] = { { .name = "pid", .entry = &sort_thread, }, { .name = "comm", .entry = &sort_comm, }, + { .name = "dso", .entry = &sort_dso, }, { .name = "symbol", .entry = &sort_sym, }, }; -- cgit v1.2.3 From 2d65537ee7cd4a0818ea80a97ab7932368fff5cd Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Wed, 27 May 2009 21:36:22 +0200 Subject: pref_counter: tools: report: Add header printout & prettify Old default output: 3.12% perf-report [.] ./perf-report: dsos__find 2.44% perf-report [k] kernel: kallsyms_expand_symbol 2.28% :4483 [.] : 2.05% :4174 [k] kernel: _spin_lock_irqsave 2.01% perf-report [k] kernel: vsnprintf 1.92% perf-report [k] kernel: format_decode 1.92% :4438 [k] kernel: _spin_lock New default output: # # Overhead Command File: Symbol # ........ ....... ............ # 6.54% perf [k] kernel: kallsyms_expand_symbol 6.26% perf [.] /home/mingo/tip/Documentation/perf_counter/perf: dso__insert_symbol 4.76% perf [.] /home/mingo/tip/Documentation/perf_counter/perf: hex2long 4.55% perf [k] kernel: number 4.48% perf [k] kernel: format_decode 4.09% perf [k] kernel: vsnprintf Cc: Peter Zijlstra Cc: Paul Mackerras Cc: Corey Ashford Cc: Arnaldo Carvalho de Melo Cc: John Kacur Cc: Mike Galbraith LKML-Reference: <20090527182101.229504802@chello.nl> Signed-off-by: Ingo Molnar --- Documentation/perf_counter/builtin-report.c | 40 +++++++++++++++++++++-------- 1 file changed, 30 insertions(+), 10 deletions(-) diff --git a/Documentation/perf_counter/builtin-report.c b/Documentation/perf_counter/builtin-report.c index 30e12c7f710..6df95c2698c 100644 --- a/Documentation/perf_counter/builtin-report.c +++ b/Documentation/perf_counter/builtin-report.c @@ -708,6 +708,7 @@ struct sort_entry { struct list_head list; int64_t (*cmp)(struct hist_entry *, struct hist_entry *); + size_t (*print_header)(FILE *fp); size_t (*print)(FILE *fp, struct hist_entry *); }; @@ -722,7 +723,7 @@ sort__thread_print(FILE *fp, struct hist_entry *self) { char bf[32]; - return fprintf(fp, "%14s ", + return fprintf(fp, " %16s", thread__name(self->thread, bf, sizeof(bf))); } @@ -752,7 +753,7 @@ sort__comm_cmp(struct hist_entry *left, struct hist_entry *right) static size_t sort__comm_print(FILE *fp, struct hist_entry *self) { - return fprintf(fp, "%20s ", self->thread->comm ?: ""); + return fprintf(fp, " %16s", self->thread->comm ?: ""); } static struct sort_entry sort_comm = { @@ -781,7 +782,7 @@ sort__dso_cmp(struct hist_entry *left, struct hist_entry *right) static size_t sort__dso_print(FILE *fp, struct hist_entry *self) { - return fprintf(fp, "%64s ", self->dso ? self->dso->name : ""); + return fprintf(fp, " %64s", self->dso ? self->dso->name : ""); } static struct sort_entry sort_dso = { @@ -803,21 +804,33 @@ sort__sym_cmp(struct hist_entry *left, struct hist_entry *right) return (int64_t)(ip_r - ip_l); } +static size_t sort__sym_print_header(FILE *fp) +{ + size_t ret = 0; + + ret += fprintf(fp, "#\n"); + ret += fprintf(fp, "# Overhead Command File: Symbol\n"); + ret += fprintf(fp, "# ........ ....... ............\n"); + ret += fprintf(fp, "#\n"); + + return ret; +} + static size_t sort__sym_print(FILE *fp, struct hist_entry *self) { size_t ret = 0; - ret += fprintf(fp, "[%c] ", self->level); + ret += fprintf(fp, " [%c] ", self->level); if (verbose) - ret += fprintf(fp, "%#018llx ", (unsigned long long)self->ip); + ret += fprintf(fp, " %#018llx", (unsigned long long)self->ip); if (self->level != '.') - ret += fprintf(fp, "%s ", + ret += fprintf(fp, " kernel: %s", self->sym ? self->sym->name : ""); else - ret += fprintf(fp, "%s: %s ", + ret += fprintf(fp, " %s: %s", self->dso ? self->dso->name : "", self->sym ? self->sym->name : ""); @@ -825,8 +838,9 @@ sort__sym_print(FILE *fp, struct hist_entry *self) } static struct sort_entry sort_sym = { - .cmp = sort__sym_cmp, - .print = sort__sym_print, + .cmp = sort__sym_cmp, + .print_header = sort__sym_print_header, + .print = sort__sym_print, }; struct sort_dimension { @@ -898,7 +912,7 @@ hist_entry__fprintf(FILE *fp, struct hist_entry *self, uint64_t total_samples) size_t ret; if (total_samples) { - ret = fprintf(fp, "%5.2f%% ", + ret = fprintf(fp, " %5.2f%%", (self->count * 100.0) / total_samples); } else ret = fprintf(fp, "%12d ", self->count); @@ -1003,9 +1017,15 @@ static void output__resort(void) static size_t output__fprintf(FILE *fp, uint64_t total_samples) { struct hist_entry *pos; + struct sort_entry *se; struct rb_node *nd; size_t ret = 0; + list_for_each_entry(se, &hist_entry__sort_list, list) { + if (se->print_header) + ret += se->print_header(fp); + } + for (nd = rb_first(&output_hists); nd; nd = rb_next(nd)) { pos = rb_entry(nd, struct hist_entry, rb_node); ret += hist_entry__fprintf(fp, pos, total_samples); -- cgit v1.2.3 From 55717314c4e3a5180a54228a2f97e50f3496de4c Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Wed, 27 May 2009 22:13:17 +0200 Subject: pref_counter: tools: report: Robustify in case of weird events This error condition: aldebaran:~/linux/linux/Documentation/perf_counter> perf report dso__load_sym: cannot get elf header. failed to open: /etc/ld.so.cache problem processing PERF_EVENT_MMAP, bailing out caused the profile to be very short - as the error was at the beginning of the file and we bailed out completely. Be more permissive and consider the event broken instead. Cc: Peter Zijlstra Cc: Mike Galbraith Cc: Paul Mackerras Cc: Corey Ashford Cc: Marcelo Tosatti Cc: Arnaldo Carvalho de Melo Cc: Thomas Gleixner Cc: John Kacur LKML-Reference: Signed-off-by: Ingo Molnar --- Documentation/perf_counter/builtin-report.c | 17 ++++++++--------- 1 file changed, 8 insertions(+), 9 deletions(-) diff --git a/Documentation/perf_counter/builtin-report.c b/Documentation/perf_counter/builtin-report.c index 6df95c2698c..5993c129d73 100644 --- a/Documentation/perf_counter/builtin-report.c +++ b/Documentation/perf_counter/builtin-report.c @@ -1117,9 +1117,9 @@ more: } if (thread == NULL) { - fprintf(stderr, "problem processing %d event, bailing out\n", + fprintf(stderr, "problem processing %d event, skipping it.\n", event->header.type); - goto done; + goto broken_event; } if (event->header.misc & PERF_EVENT_MISC_KERNEL) { @@ -1149,8 +1149,8 @@ more: if (hist_entry__add(thread, map, dso, sym, ip, level)) { fprintf(stderr, - "problem incrementing symbol count, bailing out\n"); - goto done; + "problem incrementing symbol count, skipping event\n"); + goto broken_event; } } total++; @@ -1169,8 +1169,8 @@ more: event->mmap.filename); } if (thread == NULL || map == NULL) { - fprintf(stderr, "problem processing PERF_EVENT_MMAP, bailing out\n"); - goto done; + fprintf(stderr, "problem processing PERF_EVENT_MMAP, skipping event.\n"); + goto broken_event; } thread__insert_map(thread, map); total_mmap++; @@ -1187,8 +1187,8 @@ more: } if (thread == NULL || thread__set_comm(thread, event->comm.comm)) { - fprintf(stderr, "problem processing PERF_EVENT_COMM, bailing out\n"); - goto done; + fprintf(stderr, "problem processing PERF_EVENT_COMM, skipping event.\n"); + goto broken_event; } total_comm++; break; @@ -1221,7 +1221,6 @@ broken_event: goto more; rc = EXIT_SUCCESS; -done: close(input); if (dump_trace) { -- cgit v1.2.3 From d3e78ee3d015dac1794433abb6403b6fc8e70e10 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Thu, 28 May 2009 11:41:50 +0200 Subject: perf_counter: Fix perf_counter_init_task() on !CONFIG_PERF_COUNTERS Pointed out by compiler warnings: tip/include/linux/perf_counter.h:644: warning: no return statement in function returning non-void Cc: Peter Zijlstra Cc: Mike Galbraith Cc: Paul Mackerras Cc: Corey Ashford Cc: Marcelo Tosatti Cc: Arnaldo Carvalho de Melo Cc: Thomas Gleixner Cc: John Kacur LKML-Reference: Signed-off-by: Ingo Molnar --- include/linux/perf_counter.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/include/linux/perf_counter.h b/include/linux/perf_counter.h index 2b16ed37b74..a65ddc58051 100644 --- a/include/linux/perf_counter.h +++ b/include/linux/perf_counter.h @@ -641,7 +641,7 @@ perf_counter_task_sched_out(struct task_struct *task, struct task_struct *next, int cpu) { } static inline void perf_counter_task_tick(struct task_struct *task, int cpu) { } -static inline int perf_counter_init_task(struct task_struct *child) { } +static inline int perf_counter_init_task(struct task_struct *child) { return 0; } static inline void perf_counter_exit_task(struct task_struct *child) { } static inline void perf_counter_do_pending(void) { } static inline void perf_counter_print_debug(void) { } -- cgit v1.2.3 From ca8cdeef9ca2ff89ee8a21d6f6ff3dfb60286041 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Thu, 28 May 2009 11:08:33 +0200 Subject: perf_counter tools: report: Implement header output for --sort variants Implement this style of header: # # Overhead Command File: Symbol # ........ ....... ............ # for the various --sort variants as well. Signed-off-by: Peter Zijlstra Cc: Mike Galbraith Cc: Paul Mackerras Cc: Corey Ashford Cc: Marcelo Tosatti Cc: Arnaldo Carvalho de Melo Cc: John Kacur LKML-Reference: Signed-off-by: Ingo Molnar --- Documentation/perf_counter/builtin-report.c | 70 ++++++++++++----------------- 1 file changed, 28 insertions(+), 42 deletions(-) diff --git a/Documentation/perf_counter/builtin-report.c b/Documentation/perf_counter/builtin-report.c index 5993c129d73..506cde437b7 100644 --- a/Documentation/perf_counter/builtin-report.c +++ b/Documentation/perf_counter/builtin-report.c @@ -596,8 +596,6 @@ out_delete: struct thread; -static const char *thread__name(struct thread *self, char *bf, size_t size); - struct thread { struct rb_node rb_node; struct list_head maps; @@ -605,15 +603,6 @@ struct thread { char *comm; }; -static const char *thread__name(struct thread *self, char *bf, size_t size) -{ - if (self->comm) - return self->comm; - - snprintf(bf, sizeof(bf), ":%u", self->pid); - return bf; -} - static struct thread *thread__new(pid_t pid) { struct thread *self = malloc(sizeof(*self)); @@ -707,8 +696,9 @@ struct hist_entry { struct sort_entry { struct list_head list; + char *header; + int64_t (*cmp)(struct hist_entry *, struct hist_entry *); - size_t (*print_header)(FILE *fp); size_t (*print)(FILE *fp, struct hist_entry *); }; @@ -721,13 +711,11 @@ sort__thread_cmp(struct hist_entry *left, struct hist_entry *right) static size_t sort__thread_print(FILE *fp, struct hist_entry *self) { - char bf[32]; - - return fprintf(fp, " %16s", - thread__name(self->thread, bf, sizeof(bf))); + return fprintf(fp, " %16s:%5d", self->thread->comm ?: "", self->thread->pid); } static struct sort_entry sort_thread = { + .header = " Command: Pid ", .cmp = sort__thread_cmp, .print = sort__thread_print, }; @@ -757,6 +745,7 @@ sort__comm_print(FILE *fp, struct hist_entry *self) } static struct sort_entry sort_comm = { + .header = " Command", .cmp = sort__comm_cmp, .print = sort__comm_print, }; @@ -786,6 +775,7 @@ sort__dso_print(FILE *fp, struct hist_entry *self) } static struct sort_entry sort_dso = { + .header = " Shared Object", .cmp = sort__dso_cmp, .print = sort__dso_print, }; @@ -804,43 +794,25 @@ sort__sym_cmp(struct hist_entry *left, struct hist_entry *right) return (int64_t)(ip_r - ip_l); } -static size_t sort__sym_print_header(FILE *fp) -{ - size_t ret = 0; - - ret += fprintf(fp, "#\n"); - ret += fprintf(fp, "# Overhead Command File: Symbol\n"); - ret += fprintf(fp, "# ........ ....... ............\n"); - ret += fprintf(fp, "#\n"); - - return ret; -} - static size_t sort__sym_print(FILE *fp, struct hist_entry *self) { size_t ret = 0; - ret += fprintf(fp, " [%c] ", self->level); - if (verbose) ret += fprintf(fp, " %#018llx", (unsigned long long)self->ip); - if (self->level != '.') - ret += fprintf(fp, " kernel: %s", - self->sym ? self->sym->name : ""); - else - ret += fprintf(fp, " %s: %s", - self->dso ? self->dso->name : "", - self->sym ? self->sym->name : ""); + ret += fprintf(fp, " %s: %s", + self->dso ? self->dso->name : "", + self->sym ? self->sym->name : ""); return ret; } static struct sort_entry sort_sym = { - .cmp = sort__sym_cmp, - .print_header = sort__sym_print_header, - .print = sort__sym_print, + .header = "Shared Object: Symbol", + .cmp = sort__sym_cmp, + .print = sort__sym_print, }; struct sort_dimension { @@ -1021,10 +993,24 @@ static size_t output__fprintf(FILE *fp, uint64_t total_samples) struct rb_node *nd; size_t ret = 0; + fprintf(fp, "#\n"); + + fprintf(fp, "# Overhead"); + list_for_each_entry(se, &hist_entry__sort_list, list) + fprintf(fp, " %s", se->header); + fprintf(fp, "\n"); + + fprintf(fp, "# ........"); list_for_each_entry(se, &hist_entry__sort_list, list) { - if (se->print_header) - ret += se->print_header(fp); + int i; + + fprintf(fp, " "); + for (i = 0; i < strlen(se->header); i++) + fprintf(fp, "."); } + fprintf(fp, "\n"); + + fprintf(fp, "#\n"); for (nd = rb_first(&output_hists); nd; nd = rb_next(nd)) { pos = rb_entry(nd, struct hist_entry, rb_node); -- cgit v1.2.3 From 63299f057fbce47da895e8865cba7e9c3eb01a20 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Thu, 28 May 2009 10:52:00 +0200 Subject: perf_counter tools: report: Add help text for --sort Acked-by: Peter Zijlstra Cc: Mike Galbraith Cc: Paul Mackerras Cc: Corey Ashford Cc: Marcelo Tosatti Cc: Arnaldo Carvalho de Melo Cc: Thomas Gleixner Cc: John Kacur LKML-Reference: Signed-off-by: Ingo Molnar --- Documentation/perf_counter/builtin-report.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/Documentation/perf_counter/builtin-report.c b/Documentation/perf_counter/builtin-report.c index 506cde437b7..9fdf8224ee6 100644 --- a/Documentation/perf_counter/builtin-report.c +++ b/Documentation/perf_counter/builtin-report.c @@ -1240,7 +1240,8 @@ static const struct option options[] = { OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace, "dump raw trace in ASCII"), OPT_STRING('k', "vmlinux", &vmlinux, "file", "vmlinux pathname"), - OPT_STRING('s', "sort", &sort_order, "foo", "bar"), + OPT_STRING('s', "sort", &sort_order, "key[,key2...]", + "sort by key(s): pid, comm, dso, symbol. Default: pid,symbol"), OPT_END() }; -- cgit v1.2.3 From c93f7669098eb97c5376e5396e3dfb734c17df4f Mon Sep 17 00:00:00 2001 From: Paul Mackerras Date: Thu, 28 May 2009 22:18:17 +1000 Subject: perf_counter: Fix race in attaching counters to tasks and exiting Commit 564c2b21 ("perf_counter: Optimize context switch between identical inherited contexts") introduced a race where it is possible that a counter being attached to a task could get attached to the wrong task, if the task is one that has inherited its context from another task via fork. This happens because the optimized context switch could switch the context to another task after find_get_context has read task->perf_counter_ctxp. In fact, it's possible that the context could then get freed, if the other task then exits. This fixes the problem by protecting both the context switch and the critical code in find_get_context with spinlocks. The context switch locks the cxt->lock of both the outgoing and incoming contexts before swapping them. That means that once code such as find_get_context has obtained the spinlock for the context associated with a task, the context can't get swapped to another task. However, the context may have been swapped in the interval between reading task->perf_counter_ctxp and getting the lock, so it is necessary to check and retry. To make sure that none of the contexts being looked at in find_get_context can get freed, this changes the context freeing code to use RCU. Thus an rcu_read_lock() is sufficient to ensure that no contexts can get freed. This part of the patch is lifted from a patch posted by Peter Zijlstra. This also adds a check to make sure that we can't add a counter to a task that is exiting. There is also a race between perf_counter_exit_task and find_get_context; this solves the race by moving the get_ctx that was in perf_counter_alloc into the locked region in find_get_context, so that once find_get_context has got the context for a task, it won't get freed even if the task calls perf_counter_exit_task. It doesn't matter if new top-level (non-inherited) counters get attached to the context after perf_counter_exit_task has detached the context from the task. They will just stay there and never get scheduled in until the counters' fds get closed, and then perf_release will remove them from the context and eventually free the context. With this, we are now doing the unclone in find_get_context rather than when a counter was added to or removed from a context (actually, we were missing the unclone_ctx() call when adding a counter to a context). We don't need to unclone when removing a counter from a context because we have no way to remove a counter from a cloned context. This also takes out the smp_wmb() in find_get_context, which Peter Zijlstra pointed out was unnecessary because the cmpxchg implies a full barrier anyway. Signed-off-by: Paul Mackerras Acked-by: Peter Zijlstra Cc: Corey Ashford Cc: Mike Galbraith Cc: Marcelo Tosatti Cc: Arnaldo Carvalho de Melo Cc: John Kacur LKML-Reference: <18974.33033.667187.273886@cargo.ozlabs.ibm.com> Signed-off-by: Ingo Molnar --- include/linux/perf_counter.h | 5 +- kernel/perf_counter.c | 205 ++++++++++++++++++++++++++++++------------- 2 files changed, 148 insertions(+), 62 deletions(-) diff --git a/include/linux/perf_counter.h b/include/linux/perf_counter.h index a65ddc58051..717bf3b59ba 100644 --- a/include/linux/perf_counter.h +++ b/include/linux/perf_counter.h @@ -541,8 +541,9 @@ struct perf_counter_context { * been cloned (inherited) from a common ancestor. */ struct perf_counter_context *parent_ctx; - u32 parent_gen; - u32 generation; + u64 parent_gen; + u64 generation; + struct rcu_head rcu_head; }; /** diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c index 367299f91aa..52e5a15321d 100644 --- a/kernel/perf_counter.c +++ b/kernel/perf_counter.c @@ -103,12 +103,22 @@ static void get_ctx(struct perf_counter_context *ctx) atomic_inc(&ctx->refcount); } +static void free_ctx(struct rcu_head *head) +{ + struct perf_counter_context *ctx; + + ctx = container_of(head, struct perf_counter_context, rcu_head); + kfree(ctx); +} + static void put_ctx(struct perf_counter_context *ctx) { if (atomic_dec_and_test(&ctx->refcount)) { if (ctx->parent_ctx) put_ctx(ctx->parent_ctx); - kfree(ctx); + if (ctx->task) + put_task_struct(ctx->task); + call_rcu(&ctx->rcu_head, free_ctx); } } @@ -211,22 +221,6 @@ group_sched_out(struct perf_counter *group_counter, cpuctx->exclusive = 0; } -/* - * Mark this context as not being a clone of another. - * Called when counters are added to or removed from this context. - * We also increment our generation number so that anything that - * was cloned from this context before this will not match anything - * cloned from this context after this. - */ -static void unclone_ctx(struct perf_counter_context *ctx) -{ - ++ctx->generation; - if (!ctx->parent_ctx) - return; - put_ctx(ctx->parent_ctx); - ctx->parent_ctx = NULL; -} - /* * Cross CPU call to remove a performance counter * @@ -281,13 +275,19 @@ static void __perf_counter_remove_from_context(void *info) * * CPU counters are removed with a smp call. For task counters we only * call when the task is on a CPU. + * + * If counter->ctx is a cloned context, callers must make sure that + * every task struct that counter->ctx->task could possibly point to + * remains valid. This is OK when called from perf_release since + * that only calls us on the top-level context, which can't be a clone. + * When called from perf_counter_exit_task, it's OK because the + * context has been detached from its task. */ static void perf_counter_remove_from_context(struct perf_counter *counter) { struct perf_counter_context *ctx = counter->ctx; struct task_struct *task = ctx->task; - unclone_ctx(ctx); if (!task) { /* * Per cpu counters are removed via an smp call and @@ -410,6 +410,16 @@ static void __perf_counter_disable(void *info) /* * Disable a counter. + * + * If counter->ctx is a cloned context, callers must make sure that + * every task struct that counter->ctx->task could possibly point to + * remains valid. This condition is satisifed when called through + * perf_counter_for_each_child or perf_counter_for_each because they + * hold the top-level counter's child_mutex, so any descendant that + * goes to exit will block in sync_child_counter. + * When called from perf_pending_counter it's OK because counter->ctx + * is the current context on this CPU and preemption is disabled, + * hence we can't get into perf_counter_task_sched_out for this context. */ static void perf_counter_disable(struct perf_counter *counter) { @@ -794,6 +804,12 @@ static void __perf_counter_enable(void *info) /* * Enable a counter. + * + * If counter->ctx is a cloned context, callers must make sure that + * every task struct that counter->ctx->task could possibly point to + * remains valid. This condition is satisfied when called through + * perf_counter_for_each_child or perf_counter_for_each as described + * for perf_counter_disable. */ static void perf_counter_enable(struct perf_counter *counter) { @@ -923,7 +939,9 @@ void perf_counter_task_sched_out(struct task_struct *task, struct perf_cpu_context *cpuctx = &per_cpu(perf_cpu_context, cpu); struct perf_counter_context *ctx = task->perf_counter_ctxp; struct perf_counter_context *next_ctx; + struct perf_counter_context *parent; struct pt_regs *regs; + int do_switch = 1; regs = task_pt_regs(task); perf_swcounter_event(PERF_COUNT_CONTEXT_SWITCHES, 1, 1, regs, 0); @@ -932,18 +950,39 @@ void perf_counter_task_sched_out(struct task_struct *task, return; update_context_time(ctx); + + rcu_read_lock(); + parent = rcu_dereference(ctx->parent_ctx); next_ctx = next->perf_counter_ctxp; - if (next_ctx && context_equiv(ctx, next_ctx)) { - task->perf_counter_ctxp = next_ctx; - next->perf_counter_ctxp = ctx; - ctx->task = next; - next_ctx->task = task; - return; + if (parent && next_ctx && + rcu_dereference(next_ctx->parent_ctx) == parent) { + /* + * Looks like the two contexts are clones, so we might be + * able to optimize the context switch. We lock both + * contexts and check that they are clones under the + * lock (including re-checking that neither has been + * uncloned in the meantime). It doesn't matter which + * order we take the locks because no other cpu could + * be trying to lock both of these tasks. + */ + spin_lock(&ctx->lock); + spin_lock_nested(&next_ctx->lock, SINGLE_DEPTH_NESTING); + if (context_equiv(ctx, next_ctx)) { + task->perf_counter_ctxp = next_ctx; + next->perf_counter_ctxp = ctx; + ctx->task = next; + next_ctx->task = task; + do_switch = 0; + } + spin_unlock(&next_ctx->lock); + spin_unlock(&ctx->lock); } + rcu_read_unlock(); - __perf_counter_sched_out(ctx, cpuctx); - - cpuctx->task_ctx = NULL; + if (do_switch) { + __perf_counter_sched_out(ctx, cpuctx); + cpuctx->task_ctx = NULL; + } } static void __perf_counter_task_sched_out(struct perf_counter_context *ctx) @@ -1215,18 +1254,13 @@ __perf_counter_init_context(struct perf_counter_context *ctx, ctx->task = task; } -static void put_context(struct perf_counter_context *ctx) -{ - if (ctx->task) - put_task_struct(ctx->task); -} - static struct perf_counter_context *find_get_context(pid_t pid, int cpu) { struct perf_cpu_context *cpuctx; struct perf_counter_context *ctx; - struct perf_counter_context *tctx; + struct perf_counter_context *parent_ctx; struct task_struct *task; + int err; /* * If cpu is not a wildcard then this is a percpu counter: @@ -1249,6 +1283,7 @@ static struct perf_counter_context *find_get_context(pid_t pid, int cpu) cpuctx = &per_cpu(perf_cpu_context, cpu); ctx = &cpuctx->ctx; + get_ctx(ctx); return ctx; } @@ -1265,37 +1300,79 @@ static struct perf_counter_context *find_get_context(pid_t pid, int cpu) if (!task) return ERR_PTR(-ESRCH); + /* + * Can't attach counters to a dying task. + */ + err = -ESRCH; + if (task->flags & PF_EXITING) + goto errout; + /* Reuse ptrace permission checks for now. */ - if (!ptrace_may_access(task, PTRACE_MODE_READ)) { - put_task_struct(task); - return ERR_PTR(-EACCES); + err = -EACCES; + if (!ptrace_may_access(task, PTRACE_MODE_READ)) + goto errout; + + retry_lock: + rcu_read_lock(); + retry: + ctx = rcu_dereference(task->perf_counter_ctxp); + if (ctx) { + /* + * If this context is a clone of another, it might + * get swapped for another underneath us by + * perf_counter_task_sched_out, though the + * rcu_read_lock() protects us from any context + * getting freed. Lock the context and check if it + * got swapped before we could get the lock, and retry + * if so. If we locked the right context, then it + * can't get swapped on us any more and we can + * unclone it if necessary. + * Once it's not a clone things will be stable. + */ + spin_lock_irq(&ctx->lock); + if (ctx != rcu_dereference(task->perf_counter_ctxp)) { + spin_unlock_irq(&ctx->lock); + goto retry; + } + parent_ctx = ctx->parent_ctx; + if (parent_ctx) { + put_ctx(parent_ctx); + ctx->parent_ctx = NULL; /* no longer a clone */ + } + ++ctx->generation; + /* + * Get an extra reference before dropping the lock so that + * this context won't get freed if the task exits. + */ + get_ctx(ctx); + spin_unlock_irq(&ctx->lock); } + rcu_read_unlock(); - ctx = task->perf_counter_ctxp; if (!ctx) { ctx = kmalloc(sizeof(struct perf_counter_context), GFP_KERNEL); - if (!ctx) { - put_task_struct(task); - return ERR_PTR(-ENOMEM); - } + err = -ENOMEM; + if (!ctx) + goto errout; __perf_counter_init_context(ctx, task); - /* - * Make sure other cpus see correct values for *ctx - * once task->perf_counter_ctxp is visible to them. - */ - smp_wmb(); - tctx = cmpxchg(&task->perf_counter_ctxp, NULL, ctx); - if (tctx) { + get_ctx(ctx); + if (cmpxchg(&task->perf_counter_ctxp, NULL, ctx)) { /* * We raced with some other task; use * the context they set. */ kfree(ctx); - ctx = tctx; + goto retry_lock; } + get_task_struct(task); } + put_task_struct(task); return ctx; + + errout: + put_task_struct(task); + return ERR_PTR(err); } static void free_counter_rcu(struct rcu_head *head) @@ -1303,7 +1380,6 @@ static void free_counter_rcu(struct rcu_head *head) struct perf_counter *counter; counter = container_of(head, struct perf_counter, rcu_head); - put_ctx(counter->ctx); kfree(counter); } @@ -1324,6 +1400,7 @@ static void free_counter(struct perf_counter *counter) if (counter->destroy) counter->destroy(counter); + put_ctx(counter->ctx); call_rcu(&counter->rcu_head, free_counter_rcu); } @@ -1347,7 +1424,6 @@ static int perf_release(struct inode *inode, struct file *file) put_task_struct(counter->owner); free_counter(counter); - put_context(ctx); return 0; } @@ -1437,6 +1513,12 @@ static void perf_counter_for_each_sibling(struct perf_counter *counter, mutex_unlock(&ctx->mutex); } +/* + * Holding the top-level counter's child_mutex means that any + * descendant process that has inherited this counter will block + * in sync_child_counter if it goes to exit, thus satisfying the + * task existence requirements of perf_counter_enable/disable. + */ static void perf_counter_for_each_child(struct perf_counter *counter, void (*func)(struct perf_counter *)) { @@ -3124,8 +3206,6 @@ perf_counter_alloc(struct perf_counter_hw_event *hw_event, counter->ctx = ctx; counter->oncpu = -1; - get_ctx(ctx); - counter->state = PERF_COUNTER_STATE_INACTIVE; if (hw_event->disabled) counter->state = PERF_COUNTER_STATE_OFF; @@ -3290,7 +3370,7 @@ err_free_put_context: kfree(counter); err_put_context: - put_context(ctx); + put_ctx(ctx); goto out_fput; } @@ -3322,6 +3402,7 @@ inherit_counter(struct perf_counter *parent_counter, group_leader, GFP_KERNEL); if (IS_ERR(child_counter)) return child_counter; + get_ctx(child_ctx); /* * Make the child state follow the state of the parent counter, @@ -3439,11 +3520,6 @@ __perf_counter_exit_task(struct task_struct *child, /* * When a child task exits, feed back counter values to parent counters. - * - * Note: we may be running in child context, but the PID is not hashed - * anymore so new counters will not be added. - * (XXX not sure that is true when we get called from flush_old_exec. - * -- paulus) */ void perf_counter_exit_task(struct task_struct *child) { @@ -3458,7 +3534,15 @@ void perf_counter_exit_task(struct task_struct *child) local_irq_save(flags); __perf_counter_task_sched_out(child_ctx); + + /* + * Take the context lock here so that if find_get_context is + * reading child->perf_counter_ctxp, we wait until it has + * incremented the context's refcount before we do put_ctx below. + */ + spin_lock(&child_ctx->lock); child->perf_counter_ctxp = NULL; + spin_unlock(&child_ctx->lock); local_irq_restore(flags); mutex_lock(&child_ctx->mutex); @@ -3513,6 +3597,7 @@ int perf_counter_init_task(struct task_struct *child) __perf_counter_init_context(child_ctx, child); child->perf_counter_ctxp = child_ctx; + get_task_struct(child); /* * Lock the parent list. No need to lock the child - not PID -- cgit v1.2.3 From a2928c42a5d69328c3578b41bd4d72f6658cf7dc Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Thu, 28 May 2009 14:55:04 -0300 Subject: perf_counter tools: Move symbol resolution classes from report to libperf Will be used by perf top as well. Signed-off-by: Arnaldo Carvalho de Melo Cc: Paul Mackerras Cc: Mike Galbraith Cc: Steven Rostedt Cc: Peter Zijlstra LKML-Reference: <20090528175504.GC4747@ghostprotocols.net> Signed-off-by: Ingo Molnar --- Documentation/perf_counter/Makefile | 2 + Documentation/perf_counter/builtin-report.c | 458 +--------------------------- Documentation/perf_counter/util/symbol.c | 421 +++++++++++++++++++++++++ Documentation/perf_counter/util/symbol.h | 33 ++ 4 files changed, 471 insertions(+), 443 deletions(-) create mode 100644 Documentation/perf_counter/util/symbol.c create mode 100644 Documentation/perf_counter/util/symbol.h diff --git a/Documentation/perf_counter/Makefile b/Documentation/perf_counter/Makefile index 51b13f98983..bd29a5c0010 100644 --- a/Documentation/perf_counter/Makefile +++ b/Documentation/perf_counter/Makefile @@ -298,6 +298,7 @@ LIB_H += util/help.h LIB_H += util/strbuf.h LIB_H += util/run-command.h LIB_H += util/sigchain.h +LIB_H += util/symbol.h LIB_OBJS += util/abspath.o LIB_OBJS += util/alias.o @@ -317,6 +318,7 @@ LIB_OBJS += util/strbuf.o LIB_OBJS += util/usage.o LIB_OBJS += util/wrapper.o LIB_OBJS += util/sigchain.o +LIB_OBJS += util/symbol.o LIB_OBJS += util/pager.o BUILTIN_OBJS += builtin-help.o diff --git a/Documentation/perf_counter/builtin-report.c b/Documentation/perf_counter/builtin-report.c index 9fdf8224ee6..04fc7ec0c35 100644 --- a/Documentation/perf_counter/builtin-report.c +++ b/Documentation/perf_counter/builtin-report.c @@ -1,13 +1,10 @@ #include "util/util.h" #include "builtin.h" -#include -#include -#include - #include "util/list.h" #include "util/cache.h" #include "util/rbtree.h" +#include "util/symbol.h" #include "perf.h" @@ -62,305 +59,6 @@ typedef union event_union { struct comm_event comm; } event_t; -struct symbol { - struct rb_node rb_node; - __u64 start; - __u64 end; - char name[0]; -}; - -static struct symbol *symbol__new(uint64_t start, uint64_t len, const char *name) -{ - struct symbol *self = malloc(sizeof(*self) + strlen(name) + 1); - - if (self != NULL) { - self->start = start; - self->end = start + len; - strcpy(self->name, name); - } - - return self; -} - -static void symbol__delete(struct symbol *self) -{ - free(self); -} - -static size_t symbol__fprintf(struct symbol *self, FILE *fp) -{ - return fprintf(fp, " %llx-%llx %s\n", - self->start, self->end, self->name); -} - -struct dso { - struct list_head node; - struct rb_root syms; - char name[0]; -}; - -static struct dso *dso__new(const char *name) -{ - struct dso *self = malloc(sizeof(*self) + strlen(name) + 1); - - if (self != NULL) { - strcpy(self->name, name); - self->syms = RB_ROOT; - } - - return self; -} - -static void dso__delete_symbols(struct dso *self) -{ - struct symbol *pos; - struct rb_node *next = rb_first(&self->syms); - - while (next) { - pos = rb_entry(next, struct symbol, rb_node); - next = rb_next(&pos->rb_node); - symbol__delete(pos); - } -} - -static void dso__delete(struct dso *self) -{ - dso__delete_symbols(self); - free(self); -} - -static void dso__insert_symbol(struct dso *self, struct symbol *sym) -{ - struct rb_node **p = &self->syms.rb_node; - struct rb_node *parent = NULL; - const uint64_t ip = sym->start; - struct symbol *s; - - while (*p != NULL) { - parent = *p; - s = rb_entry(parent, struct symbol, rb_node); - if (ip < s->start) - p = &(*p)->rb_left; - else - p = &(*p)->rb_right; - } - rb_link_node(&sym->rb_node, parent, p); - rb_insert_color(&sym->rb_node, &self->syms); -} - -static struct symbol *dso__find_symbol(struct dso *self, uint64_t ip) -{ - struct rb_node *n; - - if (self == NULL) - return NULL; - - n = self->syms.rb_node; - - while (n) { - struct symbol *s = rb_entry(n, struct symbol, rb_node); - - if (ip < s->start) - n = n->rb_left; - else if (ip > s->end) - n = n->rb_right; - else - return s; - } - - return NULL; -} - -/** - * elf_symtab__for_each_symbol - iterate thru all the symbols - * - * @self: struct elf_symtab instance to iterate - * @index: uint32_t index - * @sym: GElf_Sym iterator - */ -#define elf_symtab__for_each_symbol(syms, nr_syms, index, sym) \ - for (index = 0, gelf_getsym(syms, index, &sym);\ - index < nr_syms; \ - index++, gelf_getsym(syms, index, &sym)) - -static inline uint8_t elf_sym__type(const GElf_Sym *sym) -{ - return GELF_ST_TYPE(sym->st_info); -} - -static inline int elf_sym__is_function(const GElf_Sym *sym) -{ - return elf_sym__type(sym) == STT_FUNC && - sym->st_name != 0 && - sym->st_shndx != SHN_UNDEF && - sym->st_size != 0; -} - -static inline const char *elf_sym__name(const GElf_Sym *sym, - const Elf_Data *symstrs) -{ - return symstrs->d_buf + sym->st_name; -} - -static Elf_Scn *elf_section_by_name(Elf *elf, GElf_Ehdr *ep, - GElf_Shdr *shp, const char *name, - size_t *index) -{ - Elf_Scn *sec = NULL; - size_t cnt = 1; - - while ((sec = elf_nextscn(elf, sec)) != NULL) { - char *str; - - gelf_getshdr(sec, shp); - str = elf_strptr(elf, ep->e_shstrndx, shp->sh_name); - if (!strcmp(name, str)) { - if (index) - *index = cnt; - break; - } - ++cnt; - } - - return sec; -} - -static int dso__load_sym(struct dso *self, int fd, char *name) -{ - Elf_Data *symstrs; - uint32_t nr_syms; - int err = -1; - uint32_t index; - GElf_Ehdr ehdr; - GElf_Shdr shdr; - Elf_Data *syms; - GElf_Sym sym; - Elf_Scn *sec; - Elf *elf; - int nr = 0; - - elf = elf_begin(fd, ELF_C_READ_MMAP, NULL); - if (elf == NULL) { - fprintf(stderr, "%s: cannot read %s ELF file.\n", - __func__, name); - goto out_close; - } - - if (gelf_getehdr(elf, &ehdr) == NULL) { - fprintf(stderr, "%s: cannot get elf header.\n", __func__); - goto out_elf_end; - } - - sec = elf_section_by_name(elf, &ehdr, &shdr, ".symtab", NULL); - if (sec == NULL) - sec = elf_section_by_name(elf, &ehdr, &shdr, ".dynsym", NULL); - - if (sec == NULL) - goto out_elf_end; - - syms = elf_getdata(sec, NULL); - if (syms == NULL) - goto out_elf_end; - - sec = elf_getscn(elf, shdr.sh_link); - if (sec == NULL) - goto out_elf_end; - - symstrs = elf_getdata(sec, NULL); - if (symstrs == NULL) - goto out_elf_end; - - nr_syms = shdr.sh_size / shdr.sh_entsize; - - elf_symtab__for_each_symbol(syms, nr_syms, index, sym) { - struct symbol *f; - - if (!elf_sym__is_function(&sym)) - continue; - - sec = elf_getscn(elf, sym.st_shndx); - if (!sec) - goto out_elf_end; - - gelf_getshdr(sec, &shdr); - sym.st_value -= shdr.sh_addr - shdr.sh_offset; - - f = symbol__new(sym.st_value, sym.st_size, - elf_sym__name(&sym, symstrs)); - if (!f) - goto out_elf_end; - - dso__insert_symbol(self, f); - - nr++; - } - - err = nr; -out_elf_end: - elf_end(elf); -out_close: - return err; -} - -static int dso__load(struct dso *self) -{ - int size = strlen(self->name) + sizeof("/usr/lib/debug%s.debug"); - char *name = malloc(size); - int variant = 0; - int ret = -1; - int fd; - - if (!name) - return -1; - -more: - do { - switch (variant) { - case 0: /* Fedora */ - snprintf(name, size, "/usr/lib/debug%s.debug", self->name); - break; - case 1: /* Ubuntu */ - snprintf(name, size, "/usr/lib/debug%s", self->name); - break; - case 2: /* Sane people */ - snprintf(name, size, "%s", self->name); - break; - - default: - goto out; - } - variant++; - - fd = open(name, O_RDONLY); - } while (fd < 0); - - ret = dso__load_sym(self, fd, name); - close(fd); - - /* - * Some people seem to have debuginfo files _WITHOUT_ debug info!?!? - */ - if (!ret) - goto more; - -out: - free(name); - return ret; -} - -static size_t dso__fprintf(struct dso *self, FILE *fp) -{ - size_t ret = fprintf(fp, "dso: %s\n", self->name); - - struct rb_node *nd; - for (nd = rb_first(&self->syms); nd; nd = rb_next(nd)) { - struct symbol *pos = rb_entry(nd, struct symbol, rb_node); - ret += symbol__fprintf(pos, fp); - } - - return ret; -} - static LIST_HEAD(dsos); static struct dso *kernel_dso; @@ -418,153 +116,27 @@ static void dsos__fprintf(FILE *fp) dso__fprintf(pos, fp); } -static int hex(char ch) -{ - if ((ch >= '0') && (ch <= '9')) - return ch - '0'; - if ((ch >= 'a') && (ch <= 'f')) - return ch - 'a' + 10; - if ((ch >= 'A') && (ch <= 'F')) - return ch - 'A' + 10; - return -1; -} - -/* - * While we find nice hex chars, build a long_val. - * Return number of chars processed. - */ -static int hex2long(char *ptr, unsigned long *long_val) -{ - const char *p = ptr; - *long_val = 0; - - while (*p) { - const int hex_val = hex(*p); - - if (hex_val < 0) - break; - - *long_val = (*long_val << 4) | hex_val; - p++; - } - - return p - ptr; -} - -static int load_kallsyms(void) -{ - struct rb_node *nd, *prevnd; - char *line = NULL; - FILE *file; - size_t n; - - kernel_dso = dso__new("[kernel]"); - if (kernel_dso == NULL) - return -1; - - file = fopen("/proc/kallsyms", "r"); - if (file == NULL) - goto out_delete_dso; - - while (!feof(file)) { - unsigned long start; - struct symbol *sym; - int line_len, len; - char symbol_type; - - line_len = getline(&line, &n, file); - if (line_len < 0) - break; - - if (!line) - goto out_delete_dso; - - line[--line_len] = '\0'; /* \n */ - - len = hex2long(line, &start); - - len++; - if (len + 2 >= line_len) - continue; - - symbol_type = toupper(line[len]); - /* - * We're interested only in code ('T'ext) - */ - if (symbol_type != 'T' && symbol_type != 'W') - continue; - /* - * Well fix up the end later, when we have all sorted. - */ - sym = symbol__new(start, 0xdead, line + len + 2); - - if (sym == NULL) - goto out_delete_dso; - - dso__insert_symbol(kernel_dso, sym); - } - - /* - * Now that we have all sorted out, just set the ->end of all - * symbols - */ - prevnd = rb_first(&kernel_dso->syms); - - if (prevnd == NULL) - goto out_delete_line; - - for (nd = rb_next(prevnd); nd; nd = rb_next(nd)) { - struct symbol *prev = rb_entry(prevnd, struct symbol, rb_node), - *curr = rb_entry(nd, struct symbol, rb_node); - - prev->end = curr->start - 1; - prevnd = nd; - } - - dsos__add(kernel_dso); - free(line); - fclose(file); - - return 0; - -out_delete_line: - free(line); -out_delete_dso: - dso__delete(kernel_dso); - return -1; -} - static int load_kernel(void) { - int fd, nr; - - if (!vmlinux) - goto kallsyms; - - fd = open(vmlinux, O_RDONLY); - if (fd < 0) - goto kallsyms; + int err = -1; kernel_dso = dso__new("[kernel]"); if (!kernel_dso) - goto fail_open; - - nr = dso__load_sym(kernel_dso, fd, vmlinux); + return -1; - if (nr <= 0) - goto fail_load; + if (vmlinux) + err = dso__load_vmlinux(kernel_dso, vmlinux); - dsos__add(kernel_dso); - close(fd); + if (err) + err = dso__load_kallsyms(kernel_dso); - return 0; + if (err) { + dso__delete(kernel_dso); + kernel_dso = NULL; + } else + dsos__add(kernel_dso); -fail_load: - dso__delete(kernel_dso); -fail_open: - close(fd); -kallsyms: - return load_kallsyms(); + return err; } struct map { @@ -1050,7 +622,7 @@ static int __cmd_report(void) } if (load_kernel() < 0) { - perror("failed to open kallsyms"); + perror("failed to load kernel symbols"); return EXIT_FAILURE; } @@ -1247,7 +819,7 @@ static const struct option options[] = { int cmd_report(int argc, const char **argv, const char *prefix) { - elf_version(EV_CURRENT); + symbol__init(); page_size = getpagesize(); diff --git a/Documentation/perf_counter/util/symbol.c b/Documentation/perf_counter/util/symbol.c new file mode 100644 index 00000000000..d06de2cfcdc --- /dev/null +++ b/Documentation/perf_counter/util/symbol.c @@ -0,0 +1,421 @@ +#include "util.h" +#include "../perf.h" +#include "symbol.h" + +#include +#include +#include + +static struct symbol *symbol__new(uint64_t start, uint64_t len, + const char *name) +{ + struct symbol *self = malloc(sizeof(*self) + strlen(name) + 1); + + if (self != NULL) { + self->start = start; + self->end = start + len; + strcpy(self->name, name); + } + + return self; +} + +static void symbol__delete(struct symbol *self) +{ + free(self); +} + +static size_t symbol__fprintf(struct symbol *self, FILE *fp) +{ + return fprintf(fp, " %llx-%llx %s\n", + self->start, self->end, self->name); +} + +struct dso *dso__new(const char *name) +{ + struct dso *self = malloc(sizeof(*self) + strlen(name) + 1); + + if (self != NULL) { + strcpy(self->name, name); + self->syms = RB_ROOT; + } + + return self; +} + +static void dso__delete_symbols(struct dso *self) +{ + struct symbol *pos; + struct rb_node *next = rb_first(&self->syms); + + while (next) { + pos = rb_entry(next, struct symbol, rb_node); + next = rb_next(&pos->rb_node); + symbol__delete(pos); + } +} + +void dso__delete(struct dso *self) +{ + dso__delete_symbols(self); + free(self); +} + +static void dso__insert_symbol(struct dso *self, struct symbol *sym) +{ + struct rb_node **p = &self->syms.rb_node; + struct rb_node *parent = NULL; + const uint64_t ip = sym->start; + struct symbol *s; + + while (*p != NULL) { + parent = *p; + s = rb_entry(parent, struct symbol, rb_node); + if (ip < s->start) + p = &(*p)->rb_left; + else + p = &(*p)->rb_right; + } + rb_link_node(&sym->rb_node, parent, p); + rb_insert_color(&sym->rb_node, &self->syms); +} + +struct symbol *dso__find_symbol(struct dso *self, uint64_t ip) +{ + struct rb_node *n; + + if (self == NULL) + return NULL; + + n = self->syms.rb_node; + + while (n) { + struct symbol *s = rb_entry(n, struct symbol, rb_node); + + if (ip < s->start) + n = n->rb_left; + else if (ip > s->end) + n = n->rb_right; + else + return s; + } + + return NULL; +} + +size_t dso__fprintf(struct dso *self, FILE *fp) +{ + size_t ret = fprintf(fp, "dso: %s\n", self->name); + + struct rb_node *nd; + for (nd = rb_first(&self->syms); nd; nd = rb_next(nd)) { + struct symbol *pos = rb_entry(nd, struct symbol, rb_node); + ret += symbol__fprintf(pos, fp); + } + + return ret; +} + +static int hex(char ch) +{ + if ((ch >= '0') && (ch <= '9')) + return ch - '0'; + if ((ch >= 'a') && (ch <= 'f')) + return ch - 'a' + 10; + if ((ch >= 'A') && (ch <= 'F')) + return ch - 'A' + 10; + return -1; +} + +/* + * While we find nice hex chars, build a long_val. + * Return number of chars processed. + */ +static int hex2long(char *ptr, unsigned long *long_val) +{ + const char *p = ptr; + *long_val = 0; + + while (*p) { + const int hex_val = hex(*p); + + if (hex_val < 0) + break; + + *long_val = (*long_val << 4) | hex_val; + p++; + } + + return p - ptr; +} + +int dso__load_kallsyms(struct dso *self) +{ + struct rb_node *nd, *prevnd; + char *line = NULL; + size_t n; + FILE *file = fopen("/proc/kallsyms", "r"); + + if (file == NULL) + goto out_failure; + + while (!feof(file)) { + unsigned long start; + struct symbol *sym; + int line_len, len; + char symbol_type; + + line_len = getline(&line, &n, file); + if (line_len < 0) + break; + + if (!line) + goto out_failure; + + line[--line_len] = '\0'; /* \n */ + + len = hex2long(line, &start); + + len++; + if (len + 2 >= line_len) + continue; + + symbol_type = toupper(line[len]); + /* + * We're interested only in code ('T'ext) + */ + if (symbol_type != 'T' && symbol_type != 'W') + continue; + /* + * Well fix up the end later, when we have all sorted. + */ + sym = symbol__new(start, 0xdead, line + len + 2); + + if (sym == NULL) + goto out_delete_line; + + dso__insert_symbol(self, sym); + } + + /* + * Now that we have all sorted out, just set the ->end of all + * symbols + */ + prevnd = rb_first(&self->syms); + + if (prevnd == NULL) + goto out_delete_line; + + for (nd = rb_next(prevnd); nd; nd = rb_next(nd)) { + struct symbol *prev = rb_entry(prevnd, struct symbol, rb_node), + *curr = rb_entry(nd, struct symbol, rb_node); + + prev->end = curr->start - 1; + prevnd = nd; + } + + free(line); + fclose(file); + + return 0; + +out_delete_line: + free(line); +out_failure: + return -1; +} + +/** + * elf_symtab__for_each_symbol - iterate thru all the symbols + * + * @self: struct elf_symtab instance to iterate + * @index: uint32_t index + * @sym: GElf_Sym iterator + */ +#define elf_symtab__for_each_symbol(syms, nr_syms, index, sym) \ + for (index = 0, gelf_getsym(syms, index, &sym);\ + index < nr_syms; \ + index++, gelf_getsym(syms, index, &sym)) + +static inline uint8_t elf_sym__type(const GElf_Sym *sym) +{ + return GELF_ST_TYPE(sym->st_info); +} + +static inline int elf_sym__is_function(const GElf_Sym *sym) +{ + return elf_sym__type(sym) == STT_FUNC && + sym->st_name != 0 && + sym->st_shndx != SHN_UNDEF && + sym->st_size != 0; +} + +static inline const char *elf_sym__name(const GElf_Sym *sym, + const Elf_Data *symstrs) +{ + return symstrs->d_buf + sym->st_name; +} + +static Elf_Scn *elf_section_by_name(Elf *elf, GElf_Ehdr *ep, + GElf_Shdr *shp, const char *name, + size_t *index) +{ + Elf_Scn *sec = NULL; + size_t cnt = 1; + + while ((sec = elf_nextscn(elf, sec)) != NULL) { + char *str; + + gelf_getshdr(sec, shp); + str = elf_strptr(elf, ep->e_shstrndx, shp->sh_name); + if (!strcmp(name, str)) { + if (index) + *index = cnt; + break; + } + ++cnt; + } + + return sec; +} + +static int dso__load_sym(struct dso *self, int fd, const char *name) +{ + Elf_Data *symstrs; + uint32_t nr_syms; + int err = -1; + uint32_t index; + GElf_Ehdr ehdr; + GElf_Shdr shdr; + Elf_Data *syms; + GElf_Sym sym; + Elf_Scn *sec; + Elf *elf; + int nr = 0; + + elf = elf_begin(fd, ELF_C_READ_MMAP, NULL); + if (elf == NULL) { + fprintf(stderr, "%s: cannot read %s ELF file.\n", + __func__, name); + goto out_close; + } + + if (gelf_getehdr(elf, &ehdr) == NULL) { + fprintf(stderr, "%s: cannot get elf header.\n", __func__); + goto out_elf_end; + } + + sec = elf_section_by_name(elf, &ehdr, &shdr, ".symtab", NULL); + if (sec == NULL) + sec = elf_section_by_name(elf, &ehdr, &shdr, ".dynsym", NULL); + + if (sec == NULL) + goto out_elf_end; + + syms = elf_getdata(sec, NULL); + if (syms == NULL) + goto out_elf_end; + + sec = elf_getscn(elf, shdr.sh_link); + if (sec == NULL) + goto out_elf_end; + + symstrs = elf_getdata(sec, NULL); + if (symstrs == NULL) + goto out_elf_end; + + nr_syms = shdr.sh_size / shdr.sh_entsize; + + elf_symtab__for_each_symbol(syms, nr_syms, index, sym) { + struct symbol *f; + + if (!elf_sym__is_function(&sym)) + continue; + + sec = elf_getscn(elf, sym.st_shndx); + if (!sec) + goto out_elf_end; + + gelf_getshdr(sec, &shdr); + sym.st_value -= shdr.sh_addr - shdr.sh_offset; + + f = symbol__new(sym.st_value, sym.st_size, + elf_sym__name(&sym, symstrs)); + if (!f) + goto out_elf_end; + + dso__insert_symbol(self, f); + + nr++; + } + + err = nr; +out_elf_end: + elf_end(elf); +out_close: + return err; +} + +int dso__load(struct dso *self) +{ + int size = strlen(self->name) + sizeof("/usr/lib/debug%s.debug"); + char *name = malloc(size); + int variant = 0; + int ret = -1; + int fd; + + if (!name) + return -1; + +more: + do { + switch (variant) { + case 0: /* Fedora */ + snprintf(name, size, "/usr/lib/debug%s.debug", self->name); + break; + case 1: /* Ubuntu */ + snprintf(name, size, "/usr/lib/debug%s", self->name); + break; + case 2: /* Sane people */ + snprintf(name, size, "%s", self->name); + break; + + default: + goto out; + } + variant++; + + fd = open(name, O_RDONLY); + } while (fd < 0); + + ret = dso__load_sym(self, fd, name); + close(fd); + + /* + * Some people seem to have debuginfo files _WITHOUT_ debug info!?!? + */ + if (!ret) + goto more; + +out: + free(name); + return ret; +} + +int dso__load_vmlinux(struct dso *self, const char *vmlinux) +{ + int err, fd = open(vmlinux, O_RDONLY); + + if (fd < 0) + return -1; + + err = dso__load_sym(self, fd, vmlinux); + close(fd); + + return err; +} + +void symbol__init(void) +{ + elf_version(EV_CURRENT); +} diff --git a/Documentation/perf_counter/util/symbol.h b/Documentation/perf_counter/util/symbol.h new file mode 100644 index 00000000000..6dffe76a28f --- /dev/null +++ b/Documentation/perf_counter/util/symbol.h @@ -0,0 +1,33 @@ +#ifndef _PERF_SYMBOL_ +#define _PERF_SYMBOL_ 1 + +#include +#include "list.h" +#include "rbtree.h" + +struct symbol { + struct rb_node rb_node; + __u64 start; + __u64 end; + char name[0]; +}; + +struct dso { + struct list_head node; + struct rb_root syms; + char name[0]; +}; + +struct dso *dso__new(const char *name); +void dso__delete(struct dso *self); + +struct symbol *dso__find_symbol(struct dso *self, uint64_t ip); + +int dso__load_kallsyms(struct dso *self); +int dso__load_vmlinux(struct dso *self, const char *vmlinux); +int dso__load(struct dso *self); + +size_t dso__fprintf(struct dso *self, FILE *fp); + +void symbol__init(void); +#endif /* _PERF_SYMBOL_ */ -- cgit v1.2.3 From 0085c954140d27937ada29d139c16341075d45e4 Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Thu, 28 May 2009 14:55:13 -0300 Subject: perf_counter tools: struct symbol priv area When creating a dso instance allow asking that all symbols in this dso have a private area just before the symbol. perf top will use this for its counters, etc. Signed-off-by: Arnaldo Carvalho de Melo Cc: Paul Mackerras Cc: Mike Galbraith Cc: Steven Rostedt Cc: Peter Zijlstra LKML-Reference: <20090528175513.GD4747@ghostprotocols.net> Signed-off-by: Ingo Molnar --- Documentation/perf_counter/builtin-report.c | 4 ++-- Documentation/perf_counter/util/symbol.c | 26 +++++++++++++++++--------- Documentation/perf_counter/util/symbol.h | 8 +++++++- 3 files changed, 26 insertions(+), 12 deletions(-) diff --git a/Documentation/perf_counter/builtin-report.c b/Documentation/perf_counter/builtin-report.c index 04fc7ec0c35..889067eb2b6 100644 --- a/Documentation/perf_counter/builtin-report.c +++ b/Documentation/perf_counter/builtin-report.c @@ -83,7 +83,7 @@ static struct dso *dsos__findnew(const char *name) int nr; if (dso == NULL) { - dso = dso__new(name); + dso = dso__new(name, 0); if (!dso) goto out_delete_dso; @@ -120,7 +120,7 @@ static int load_kernel(void) { int err = -1; - kernel_dso = dso__new("[kernel]"); + kernel_dso = dso__new("[kernel]", 0); if (!kernel_dso) return -1; diff --git a/Documentation/perf_counter/util/symbol.c b/Documentation/perf_counter/util/symbol.c index d06de2cfcdc..7088206244a 100644 --- a/Documentation/perf_counter/util/symbol.c +++ b/Documentation/perf_counter/util/symbol.c @@ -7,22 +7,27 @@ #include static struct symbol *symbol__new(uint64_t start, uint64_t len, - const char *name) + const char *name, unsigned int priv_size) { - struct symbol *self = malloc(sizeof(*self) + strlen(name) + 1); + size_t namelen = strlen(name) + 1; + struct symbol *self = malloc(priv_size + sizeof(*self) + namelen); if (self != NULL) { + if (priv_size) { + memset(self, 0, priv_size); + self = ((void *)self) + priv_size; + } self->start = start; self->end = start + len; - strcpy(self->name, name); + memcpy(self->name, name, namelen); } return self; } -static void symbol__delete(struct symbol *self) +static void symbol__delete(struct symbol *self, unsigned int priv_size) { - free(self); + free(((void *)self) - priv_size); } static size_t symbol__fprintf(struct symbol *self, FILE *fp) @@ -31,13 +36,14 @@ static size_t symbol__fprintf(struct symbol *self, FILE *fp) self->start, self->end, self->name); } -struct dso *dso__new(const char *name) +struct dso *dso__new(const char *name, unsigned int sym_priv_size) { struct dso *self = malloc(sizeof(*self) + strlen(name) + 1); if (self != NULL) { strcpy(self->name, name); self->syms = RB_ROOT; + self->sym_priv_size = sym_priv_size; } return self; @@ -51,7 +57,7 @@ static void dso__delete_symbols(struct dso *self) while (next) { pos = rb_entry(next, struct symbol, rb_node); next = rb_next(&pos->rb_node); - symbol__delete(pos); + symbol__delete(pos, self->sym_priv_size); } } @@ -189,7 +195,8 @@ int dso__load_kallsyms(struct dso *self) /* * Well fix up the end later, when we have all sorted. */ - sym = symbol__new(start, 0xdead, line + len + 2); + sym = symbol__new(start, 0xdead, line + len + 2, + self->sym_priv_size); if (sym == NULL) goto out_delete_line; @@ -340,7 +347,8 @@ static int dso__load_sym(struct dso *self, int fd, const char *name) sym.st_value -= shdr.sh_addr - shdr.sh_offset; f = symbol__new(sym.st_value, sym.st_size, - elf_sym__name(&sym, symstrs)); + elf_sym__name(&sym, symstrs), + self->sym_priv_size); if (!f) goto out_elf_end; diff --git a/Documentation/perf_counter/util/symbol.h b/Documentation/perf_counter/util/symbol.h index 6dffe76a28f..9e120af9c71 100644 --- a/Documentation/perf_counter/util/symbol.h +++ b/Documentation/perf_counter/util/symbol.h @@ -15,12 +15,18 @@ struct symbol { struct dso { struct list_head node; struct rb_root syms; + unsigned int sym_priv_size; char name[0]; }; -struct dso *dso__new(const char *name); +struct dso *dso__new(const char *name, unsigned int sym_priv_size); void dso__delete(struct dso *self); +static inline void *dso__sym_priv(struct dso *self, struct symbol *sym) +{ + return ((void *)sym) - self->sym_priv_size; +} + struct symbol *dso__find_symbol(struct dso *self, uint64_t ip); int dso__load_kallsyms(struct dso *self); -- cgit v1.2.3 From a827c875f2ebe69c6e6db5e7f112d4beb4d80f01 Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Thu, 28 May 2009 14:55:19 -0300 Subject: perf_counter tools: Consolidate dso methods to load kernel symbols Now one has just to use dso__load_kernel() optionally passing a vmlinux filename. Will make things easier for perf top that will want to pass a callback to filter some symbols. Signed-off-by: Arnaldo Carvalho de Melo Cc: Paul Mackerras Cc: Mike Galbraith Cc: Steven Rostedt Cc: Peter Zijlstra Signed-off-by: Ingo Molnar --- Documentation/perf_counter/builtin-report.c | 9 ++------- Documentation/perf_counter/util/symbol.c | 17 +++++++++++++++-- Documentation/perf_counter/util/symbol.h | 3 +-- 3 files changed, 18 insertions(+), 11 deletions(-) diff --git a/Documentation/perf_counter/builtin-report.c b/Documentation/perf_counter/builtin-report.c index 889067eb2b6..4e9f2bc1045 100644 --- a/Documentation/perf_counter/builtin-report.c +++ b/Documentation/perf_counter/builtin-report.c @@ -118,18 +118,13 @@ static void dsos__fprintf(FILE *fp) static int load_kernel(void) { - int err = -1; + int err; kernel_dso = dso__new("[kernel]", 0); if (!kernel_dso) return -1; - if (vmlinux) - err = dso__load_vmlinux(kernel_dso, vmlinux); - - if (err) - err = dso__load_kallsyms(kernel_dso); - + err = dso__load_kernel(kernel_dso, vmlinux); if (err) { dso__delete(kernel_dso); kernel_dso = NULL; diff --git a/Documentation/perf_counter/util/symbol.c b/Documentation/perf_counter/util/symbol.c index 7088206244a..504ac313201 100644 --- a/Documentation/perf_counter/util/symbol.c +++ b/Documentation/perf_counter/util/symbol.c @@ -155,7 +155,7 @@ static int hex2long(char *ptr, unsigned long *long_val) return p - ptr; } -int dso__load_kallsyms(struct dso *self) +static int dso__load_kallsyms(struct dso *self) { struct rb_node *nd, *prevnd; char *line = NULL; @@ -410,7 +410,7 @@ out: return ret; } -int dso__load_vmlinux(struct dso *self, const char *vmlinux) +static int dso__load_vmlinux(struct dso *self, const char *vmlinux) { int err, fd = open(vmlinux, O_RDONLY); @@ -423,6 +423,19 @@ int dso__load_vmlinux(struct dso *self, const char *vmlinux) return err; } +int dso__load_kernel(struct dso *self, const char *vmlinux) +{ + int err = -1; + + if (vmlinux) + err = dso__load_vmlinux(self, vmlinux); + + if (err) + err = dso__load_kallsyms(self); + + return err; +} + void symbol__init(void) { elf_version(EV_CURRENT); diff --git a/Documentation/perf_counter/util/symbol.h b/Documentation/perf_counter/util/symbol.h index 9e120af9c71..db2fdf9f70a 100644 --- a/Documentation/perf_counter/util/symbol.h +++ b/Documentation/perf_counter/util/symbol.h @@ -29,8 +29,7 @@ static inline void *dso__sym_priv(struct dso *self, struct symbol *sym) struct symbol *dso__find_symbol(struct dso *self, uint64_t ip); -int dso__load_kallsyms(struct dso *self); -int dso__load_vmlinux(struct dso *self, const char *vmlinux); +int dso__load_kernel(struct dso *self, const char *vmlinux); int dso__load(struct dso *self); size_t dso__fprintf(struct dso *self, FILE *fp); -- cgit v1.2.3 From 69ee69f63c82e63d9c6c6081d12673af4933c51e Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Thu, 28 May 2009 14:55:26 -0300 Subject: perf_counter tools: Optionally pass a symbol filter to the dso load routines Will be used by perf top. Signed-off-by: Arnaldo Carvalho de Melo Cc: Paul Mackerras Cc: Mike Galbraith Cc: Steven Rostedt Cc: Peter Zijlstra LKML-Reference: <20090528175526.GF4747@ghostprotocols.net> Signed-off-by: Ingo Molnar --- Documentation/perf_counter/builtin-report.c | 4 ++-- Documentation/perf_counter/util/symbol.c | 34 ++++++++++++++++++----------- Documentation/perf_counter/util/symbol.h | 7 ++++-- 3 files changed, 28 insertions(+), 17 deletions(-) diff --git a/Documentation/perf_counter/builtin-report.c b/Documentation/perf_counter/builtin-report.c index 4e9f2bc1045..412d524dd65 100644 --- a/Documentation/perf_counter/builtin-report.c +++ b/Documentation/perf_counter/builtin-report.c @@ -87,7 +87,7 @@ static struct dso *dsos__findnew(const char *name) if (!dso) goto out_delete_dso; - nr = dso__load(dso); + nr = dso__load(dso, NULL); if (nr < 0) { fprintf(stderr, "Failed to open: %s\n", name); goto out_delete_dso; @@ -124,7 +124,7 @@ static int load_kernel(void) if (!kernel_dso) return -1; - err = dso__load_kernel(kernel_dso, vmlinux); + err = dso__load_kernel(kernel_dso, vmlinux, NULL); if (err) { dso__delete(kernel_dso); kernel_dso = NULL; diff --git a/Documentation/perf_counter/util/symbol.c b/Documentation/perf_counter/util/symbol.c index 504ac313201..47281210443 100644 --- a/Documentation/perf_counter/util/symbol.c +++ b/Documentation/perf_counter/util/symbol.c @@ -155,7 +155,7 @@ static int hex2long(char *ptr, unsigned long *long_val) return p - ptr; } -static int dso__load_kallsyms(struct dso *self) +static int dso__load_kallsyms(struct dso *self, symbol_filter_t filter) { struct rb_node *nd, *prevnd; char *line = NULL; @@ -201,7 +201,10 @@ static int dso__load_kallsyms(struct dso *self) if (sym == NULL) goto out_delete_line; - dso__insert_symbol(self, sym); + if (filter && filter(self, sym)) + symbol__delete(sym, self->sym_priv_size); + else + dso__insert_symbol(self, sym); } /* @@ -286,7 +289,8 @@ static Elf_Scn *elf_section_by_name(Elf *elf, GElf_Ehdr *ep, return sec; } -static int dso__load_sym(struct dso *self, int fd, const char *name) +static int dso__load_sym(struct dso *self, int fd, const char *name, + symbol_filter_t filter) { Elf_Data *symstrs; uint32_t nr_syms; @@ -352,9 +356,12 @@ static int dso__load_sym(struct dso *self, int fd, const char *name) if (!f) goto out_elf_end; - dso__insert_symbol(self, f); - - nr++; + if (filter && filter(self, f)) + symbol__delete(f, self->sym_priv_size); + else { + dso__insert_symbol(self, f); + nr++; + } } err = nr; @@ -364,7 +371,7 @@ out_close: return err; } -int dso__load(struct dso *self) +int dso__load(struct dso *self, symbol_filter_t filter) { int size = strlen(self->name) + sizeof("/usr/lib/debug%s.debug"); char *name = malloc(size); @@ -396,7 +403,7 @@ more: fd = open(name, O_RDONLY); } while (fd < 0); - ret = dso__load_sym(self, fd, name); + ret = dso__load_sym(self, fd, name, filter); close(fd); /* @@ -410,28 +417,29 @@ out: return ret; } -static int dso__load_vmlinux(struct dso *self, const char *vmlinux) +static int dso__load_vmlinux(struct dso *self, const char *vmlinux, + symbol_filter_t filter) { int err, fd = open(vmlinux, O_RDONLY); if (fd < 0) return -1; - err = dso__load_sym(self, fd, vmlinux); + err = dso__load_sym(self, fd, vmlinux, filter); close(fd); return err; } -int dso__load_kernel(struct dso *self, const char *vmlinux) +int dso__load_kernel(struct dso *self, const char *vmlinux, symbol_filter_t filter) { int err = -1; if (vmlinux) - err = dso__load_vmlinux(self, vmlinux); + err = dso__load_vmlinux(self, vmlinux, filter); if (err) - err = dso__load_kallsyms(self); + err = dso__load_kallsyms(self, filter); return err; } diff --git a/Documentation/perf_counter/util/symbol.h b/Documentation/perf_counter/util/symbol.h index db2fdf9f70a..b0299bc0cf5 100644 --- a/Documentation/perf_counter/util/symbol.h +++ b/Documentation/perf_counter/util/symbol.h @@ -19,6 +19,8 @@ struct dso { char name[0]; }; +typedef int (*symbol_filter_t)(struct dso *self, struct symbol *sym); + struct dso *dso__new(const char *name, unsigned int sym_priv_size); void dso__delete(struct dso *self); @@ -29,8 +31,9 @@ static inline void *dso__sym_priv(struct dso *self, struct symbol *sym) struct symbol *dso__find_symbol(struct dso *self, uint64_t ip); -int dso__load_kernel(struct dso *self, const char *vmlinux); -int dso__load(struct dso *self); +int dso__load_kernel(struct dso *self, const char *vmlinux, + symbol_filter_t filter); +int dso__load(struct dso *self, symbol_filter_t filter); size_t dso__fprintf(struct dso *self, FILE *fp); -- cgit v1.2.3 From de04687f868bf98e4ef644af91ed85a3bc212ce8 Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Thu, 28 May 2009 14:55:41 -0300 Subject: perf_counter tools: Convert builtin-top to use libperf symbol routines Now both perf top and report use the same routines. Signed-off-by: Arnaldo Carvalho de Melo Cc: Paul Mackerras Cc: Mike Galbraith Cc: Steven Rostedt Cc: Peter Zijlstra LKML-Reference: <20090528175541.GG4747@ghostprotocols.net> Signed-off-by: Ingo Molnar --- Documentation/perf_counter/builtin-top.c | 303 ++++++++++++------------------- 1 file changed, 117 insertions(+), 186 deletions(-) diff --git a/Documentation/perf_counter/builtin-top.c b/Documentation/perf_counter/builtin-top.c index a890872638c..52ba9f4216c 100644 --- a/Documentation/perf_counter/builtin-top.c +++ b/Documentation/perf_counter/builtin-top.c @@ -44,8 +44,9 @@ #include "perf.h" #include "builtin.h" +#include "util/symbol.h" #include "util/util.h" -#include "util/util.h" +#include "util/rbtree.h" #include "util/parse-options.h" #include "util/parse-events.h" @@ -125,19 +126,21 @@ static uint64_t min_ip; static uint64_t max_ip = -1ll; struct sym_entry { - unsigned long long addr; - char *sym; + struct rb_node rb_node; + struct list_head node; unsigned long count[MAX_COUNTERS]; int skip; }; -#define MAX_SYMS 100000 - -static int sym_table_count; - struct sym_entry *sym_filter_entry; -static struct sym_entry sym_table[MAX_SYMS]; +struct dso *kernel_dso; + +/* + * Symbols will be added here in record_ip and will get out + * after decayed. + */ +static LIST_HEAD(active_symbols); /* * Ordering weight: count-1 * count-2 * ... / count-n @@ -157,42 +160,60 @@ static double sym_weight(const struct sym_entry *sym) return weight; } -static int compare(const void *__sym1, const void *__sym2) -{ - const struct sym_entry *sym1 = __sym1, *sym2 = __sym2; - - return sym_weight(sym1) < sym_weight(sym2); -} - static long events; static long userspace_events; static const char CONSOLE_CLEAR[] = ""; -static struct sym_entry tmp[MAX_SYMS]; +static void list_insert_active_sym(struct sym_entry *syme) +{ + list_add(&syme->node, &active_symbols); +} + +static void rb_insert_active_sym(struct rb_root *tree, struct sym_entry *se) +{ + struct rb_node **p = &tree->rb_node; + struct rb_node *parent = NULL; + struct sym_entry *iter; + + while (*p != NULL) { + parent = *p; + iter = rb_entry(parent, struct sym_entry, rb_node); + + if (sym_weight(se) > sym_weight(iter)) + p = &(*p)->rb_left; + else + p = &(*p)->rb_right; + } + + rb_link_node(&se->rb_node, parent, p); + rb_insert_color(&se->rb_node, tree); +} static void print_sym_table(void) { - int i, j, active_count, printed; + int printed, j; int counter; float events_per_sec = events/delay_secs; float kevents_per_sec = (events-userspace_events)/delay_secs; float sum_kevents = 0.0; + struct sym_entry *syme, *n; + struct rb_root tmp = RB_ROOT; + struct rb_node *nd; events = userspace_events = 0; - /* Iterate over symbol table and copy/tally/decay active symbols. */ - for (i = 0, active_count = 0; i < sym_table_count; i++) { - if (sym_table[i].count[0]) { - tmp[active_count++] = sym_table[i]; - sum_kevents += sym_table[i].count[0]; + /* Sort the active symbols */ + list_for_each_entry_safe(syme, n, &active_symbols, node) { + if (syme->count[0] != 0) { + rb_insert_active_sym(&tmp, syme); + sum_kevents += syme->count[0]; for (j = 0; j < nr_counters; j++) - sym_table[i].count[j] = zero ? 0 : sym_table[i].count[j] * 7 / 8; - } + syme->count[j] = zero ? 0 : syme->count[j] * 7 / 8; + } else + list_del_init(&syme->node); } - qsort(tmp, active_count + 1, sizeof(tmp[0]), compare); - write(1, CONSOLE_CLEAR, strlen(CONSOLE_CLEAR)); printf( @@ -238,23 +259,25 @@ static void print_sym_table(void) " ______ ______ _____ ________________ _______________\n\n" ); - for (i = 0, printed = 0; i < active_count; i++) { + for (nd = rb_first(&tmp); nd; nd = rb_next(nd)) { + struct sym_entry *syme = rb_entry(nd, struct sym_entry, rb_node); + struct symbol *sym = (struct symbol *)(syme + 1); float pcnt; - if (++printed > 18 || tmp[i].count[0] < count_filter) + if (++printed > 18 || syme->count[0] < count_filter) break; - pcnt = 100.0 - (100.0*((sum_kevents-tmp[i].count[0])/sum_kevents)); + pcnt = 100.0 - (100.0 * ((sum_kevents - syme->count[0]) / + sum_kevents)); if (nr_counters == 1) printf("%19.2f - %4.1f%% - %016llx : %s\n", - sym_weight(tmp + i), - pcnt, tmp[i].addr, tmp[i].sym); + sym_weight(syme), + pcnt, sym->start, sym->name); else printf("%8.1f %10ld - %4.1f%% - %016llx : %s\n", - sym_weight(tmp + i), - tmp[i].count[0], - pcnt, tmp[i].addr, tmp[i].sym); + sym_weight(syme), syme->count[0], + pcnt, sym->start, sym->name); } { @@ -277,146 +300,85 @@ static void *display_thread(void *arg) return NULL; } -static int read_symbol(FILE *in, struct sym_entry *s) +static int symbol_filter(struct dso *self, struct symbol *sym) { - static int filter_match = 0; - char *sym, stype; - char str[500]; - int rc, pos; - - rc = fscanf(in, "%llx %c %499s", &s->addr, &stype, str); - if (rc == EOF) - return -1; - - assert(rc == 3); - - /* skip until end of line: */ - pos = strlen(str); - do { - rc = fgetc(in); - if (rc == '\n' || rc == EOF || pos >= 499) - break; - str[pos] = rc; - pos++; - } while (1); - str[pos] = 0; - - sym = str; - - /* Filter out known duplicates and non-text symbols. */ - if (!strcmp(sym, "_text")) - return 1; - if (!min_ip && !strcmp(sym, "_stext")) - return 1; - if (!strcmp(sym, "_etext") || !strcmp(sym, "_sinittext")) - return 1; - if (stype != 'T' && stype != 't') - return 1; - if (!strncmp("init_module", sym, 11) || !strncmp("cleanup_module", sym, 14)) - return 1; - if (strstr(sym, "_text_start") || strstr(sym, "_text_end")) + static int filter_match; + struct sym_entry *syme; + const char *name = sym->name; + + if (!strcmp(name, "_text") || + !strcmp(name, "_etext") || + !strcmp(name, "_sinittext") || + !strncmp("init_module", name, 11) || + !strncmp("cleanup_module", name, 14) || + strstr(name, "_text_start") || + strstr(name, "_text_end")) return 1; - s->sym = malloc(strlen(str)+1); - assert(s->sym); - - strcpy((char *)s->sym, str); - s->skip = 0; - + syme = dso__sym_priv(self, sym); /* Tag events to be skipped. */ - if (!strcmp("default_idle", s->sym) || !strcmp("cpu_idle", s->sym)) - s->skip = 1; - else if (!strcmp("enter_idle", s->sym) || !strcmp("exit_idle", s->sym)) - s->skip = 1; - else if (!strcmp("mwait_idle", s->sym)) - s->skip = 1; + if (!strcmp("default_idle", name) || + !strcmp("cpu_idle", name) || + !strcmp("enter_idle", name) || + !strcmp("exit_idle", name) || + !strcmp("mwait_idle", name)) + syme->skip = 1; if (filter_match == 1) { - filter_end = s->addr; + filter_end = sym->start; filter_match = -1; if (filter_end - filter_start > 10000) { - printf("hm, too large filter symbol <%s> - skipping.\n", + fprintf(stderr, + "hm, too large filter symbol <%s> - skipping.\n", sym_filter); - printf("symbol filter start: %016lx\n", filter_start); - printf(" end: %016lx\n", filter_end); + fprintf(stderr, "symbol filter start: %016lx\n", + filter_start); + fprintf(stderr, " end: %016lx\n", + filter_end); filter_end = filter_start = 0; sym_filter = NULL; sleep(1); } } - if (filter_match == 0 && sym_filter && !strcmp(s->sym, sym_filter)) { + + if (filter_match == 0 && sym_filter && !strcmp(name, sym_filter)) { filter_match = 1; - filter_start = s->addr; + filter_start = sym->start; } + return 0; } -static int compare_addr(const void *__sym1, const void *__sym2) +static int parse_symbols(void) { - const struct sym_entry *sym1 = __sym1, *sym2 = __sym2; + struct rb_node *node; + struct symbol *sym; - return sym1->addr > sym2->addr; -} - -static void sort_symbol_table(void) -{ - int i, dups; - - do { - qsort(sym_table, sym_table_count, sizeof(sym_table[0]), compare_addr); - for (i = 0, dups = 0; i < sym_table_count; i++) { - if (sym_table[i].addr == sym_table[i+1].addr) { - sym_table[i+1].addr = -1ll; - dups++; - } - } - sym_table_count -= dups; - } while(dups); -} - -static void parse_symbols(void) -{ - struct sym_entry *last; + kernel_dso = dso__new("[kernel]", sizeof(struct sym_entry)); + if (kernel_dso == NULL) + return -1; - FILE *kallsyms = fopen("/proc/kallsyms", "r"); + if (dso__load_kernel(kernel_dso, NULL, symbol_filter) != 0) + goto out_delete_dso; - if (!kallsyms) { - printf("Could not open /proc/kallsyms - no CONFIG_KALLSYMS_ALL=y?\n"); - exit(-1); - } + node = rb_first(&kernel_dso->syms); + sym = rb_entry(node, struct symbol, rb_node); + min_ip = sym->start; - while (!feof(kallsyms)) { - if (read_symbol(kallsyms, &sym_table[sym_table_count]) == 0) { - sym_table_count++; - assert(sym_table_count <= MAX_SYMS); - } - } + node = rb_last(&kernel_dso->syms); + sym = rb_entry(node, struct symbol, rb_node); + max_ip = sym->start; - sort_symbol_table(); - min_ip = sym_table[0].addr; - max_ip = sym_table[sym_table_count-1].addr; - last = sym_table + sym_table_count++; + if (dump_symtab) + dso__fprintf(kernel_dso, stdout); - last->addr = -1ll; - last->sym = ""; - - if (filter_end) { - int count; - for (count=0; count < sym_table_count; count ++) { - if (!strcmp(sym_table[count].sym, sym_filter)) { - sym_filter_entry = &sym_table[count]; - break; - } - } - } - if (dump_symtab) { - int i; + return 0; - for (i = 0; i < sym_table_count; i++) - fprintf(stderr, "%llx %s\n", - sym_table[i].addr, sym_table[i].sym); - } +out_delete_dso: + dso__delete(kernel_dso); + kernel_dso = NULL; + return -1; } #define TRACE_COUNT 3 @@ -426,51 +388,20 @@ static void parse_symbols(void) */ static void record_ip(uint64_t ip, int counter) { - int left_idx, middle_idx, right_idx, idx; - unsigned long left, middle, right; - - left_idx = 0; - right_idx = sym_table_count-1; - assert(ip <= max_ip && ip >= min_ip); - - while (left_idx + 1 < right_idx) { - middle_idx = (left_idx + right_idx) / 2; + struct symbol *sym = dso__find_symbol(kernel_dso, ip); - left = sym_table[ left_idx].addr; - middle = sym_table[middle_idx].addr; - right = sym_table[ right_idx].addr; + if (sym != NULL) { + struct sym_entry *syme = dso__sym_priv(kernel_dso, sym); - if (!(left <= middle && middle <= right)) { - printf("%016lx...\n%016lx...\n%016lx\n", left, middle, right); - printf("%d %d %d\n", left_idx, middle_idx, right_idx); + if (!syme->skip) { + syme->count[counter]++; + if (list_empty(&syme->node) || !syme->node.next) + list_insert_active_sym(syme); + return; } - assert(left <= middle && middle <= right); - if (!(left <= ip && ip <= right)) { - printf(" left: %016lx\n", left); - printf(" ip: %016lx\n", (unsigned long)ip); - printf("right: %016lx\n", right); - } - assert(left <= ip && ip <= right); - /* - * [ left .... target .... middle .... right ] - * => right := middle - */ - if (ip < middle) { - right_idx = middle_idx; - continue; - } - /* - * [ left .... middle ... target ... right ] - * => left := middle - */ - left_idx = middle_idx; } - idx = left_idx; - - if (!sym_table[idx].skip) - sym_table[idx].count[counter]++; - else events--; + events--; } static void process_event(uint64_t ip, int counter) -- cgit v1.2.3 From 9e09675366695405412b709e91709c1ce2925c90 Mon Sep 17 00:00:00 2001 From: Mike Galbraith Date: Thu, 28 May 2009 16:25:34 +0200 Subject: perf_counter tools: Document '--' option parsing terminator Signed-off-by: Mike Galbraith Cc: Peter Zijlstra LKML-Reference: Signed-off-by: Ingo Molnar --- Documentation/perf_counter/Documentation/perf-record.txt | 1 + Documentation/perf_counter/Documentation/perf-stat.txt | 1 + Documentation/perf_counter/builtin-record.c | 3 ++- Documentation/perf_counter/builtin-stat.c | 1 + 4 files changed, 5 insertions(+), 1 deletion(-) diff --git a/Documentation/perf_counter/Documentation/perf-record.txt b/Documentation/perf_counter/Documentation/perf-record.txt index 353db1bb98a..a93d2ec6176 100644 --- a/Documentation/perf_counter/Documentation/perf-record.txt +++ b/Documentation/perf_counter/Documentation/perf-record.txt @@ -9,6 +9,7 @@ SYNOPSIS -------- [verse] 'perf record' [-e | --event=EVENT] [-l] [-a] +'perf record' [-e | --event=EVENT] [-l] [-a] -- [] DESCRIPTION ----------- diff --git a/Documentation/perf_counter/Documentation/perf-stat.txt b/Documentation/perf_counter/Documentation/perf-stat.txt index 7fcab271e57..828c59ff5f5 100644 --- a/Documentation/perf_counter/Documentation/perf-stat.txt +++ b/Documentation/perf_counter/Documentation/perf-stat.txt @@ -9,6 +9,7 @@ SYNOPSIS -------- [verse] 'perf stat' [-e | --event=EVENT] [-l] [-a] +'perf stat' [-e | --event=EVENT] [-l] [-a] -- [] DESCRIPTION ----------- diff --git a/Documentation/perf_counter/builtin-record.c b/Documentation/perf_counter/builtin-record.c index 4a068664a32..23d1224ce98 100644 --- a/Documentation/perf_counter/builtin-record.c +++ b/Documentation/perf_counter/builtin-record.c @@ -397,7 +397,8 @@ static int __cmd_record(int argc, const char **argv) } static const char * const record_usage[] = { - "perf record [] ", + "perf record [] []", + "perf record [] -- []", NULL }; diff --git a/Documentation/perf_counter/builtin-stat.c b/Documentation/perf_counter/builtin-stat.c index ce661e2fa8d..ac14086d9a7 100644 --- a/Documentation/perf_counter/builtin-stat.c +++ b/Documentation/perf_counter/builtin-stat.c @@ -212,6 +212,7 @@ static void skip_signal(int signo) static const char * const stat_usage[] = { "perf stat [] ", + "perf stat [] -- []", NULL }; -- cgit v1.2.3 From a3ec8d70f1a55acccc4874fe9b4dadbbb9454a0f Mon Sep 17 00:00:00 2001 From: Mike Galbraith Date: Fri, 29 May 2009 06:46:46 +0200 Subject: perf_counter tools: Fix top symbol table dump typo Signed-off-by: Mike Galbraith Cc: Peter Zijlstra Cc: Arnaldo Carvalho de Melo LKML-Reference: Signed-off-by: Ingo Molnar --- Documentation/perf_counter/builtin-top.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Documentation/perf_counter/builtin-top.c b/Documentation/perf_counter/builtin-top.c index 52ba9f4216c..0d100f52b70 100644 --- a/Documentation/perf_counter/builtin-top.c +++ b/Documentation/perf_counter/builtin-top.c @@ -371,7 +371,7 @@ static int parse_symbols(void) max_ip = sym->start; if (dump_symtab) - dso__fprintf(kernel_dso, stdout); + dso__fprintf(kernel_dso, stderr); return 0; -- cgit v1.2.3 From da417a7537cbf4beb28a08a49adf915f2358040c Mon Sep 17 00:00:00 2001 From: Mike Galbraith Date: Fri, 29 May 2009 08:23:16 +0200 Subject: perf_counter tools: Fix top symbol table max_ip typo Signed-off-by: Mike Galbraith Cc: Peter Zijlstra Cc: Arnaldo Carvalho de Melo LKML-Reference: Signed-off-by: Ingo Molnar --- Documentation/perf_counter/builtin-top.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Documentation/perf_counter/builtin-top.c b/Documentation/perf_counter/builtin-top.c index 0d100f52b70..ebe8bec1a0e 100644 --- a/Documentation/perf_counter/builtin-top.c +++ b/Documentation/perf_counter/builtin-top.c @@ -368,7 +368,7 @@ static int parse_symbols(void) node = rb_last(&kernel_dso->syms); sym = rb_entry(node, struct symbol, rb_node); - max_ip = sym->start; + max_ip = sym->end; if (dump_symtab) dso__fprintf(kernel_dso, stderr); -- cgit v1.2.3 From c323d95fa4dbe0b6bf6d59e24a0b7db067dd08a7 Mon Sep 17 00:00:00 2001 From: Yong Wang Date: Fri, 29 May 2009 13:28:35 +0800 Subject: perf_counter/x86: Always use NMI for performance-monitoring interrupt Always use NMI for performance-monitoring interrupt as there could be racy situations if we switch between irq and nmi mode frequently. Signed-off-by: Yong Wang LKML-Reference: <20090529052835.GA13657@ywang-moblin2.bj.intel.com> Signed-off-by: Ingo Molnar --- arch/x86/include/asm/perf_counter.h | 4 ++-- arch/x86/kernel/apic/apic.c | 2 +- arch/x86/kernel/cpu/perf_counter.c | 19 +++++-------------- 3 files changed, 8 insertions(+), 17 deletions(-) diff --git a/arch/x86/include/asm/perf_counter.h b/arch/x86/include/asm/perf_counter.h index d08dd52cb8f..876ed97147b 100644 --- a/arch/x86/include/asm/perf_counter.h +++ b/arch/x86/include/asm/perf_counter.h @@ -91,10 +91,10 @@ extern void set_perf_counter_pending(void); #ifdef CONFIG_PERF_COUNTERS extern void init_hw_perf_counters(void); -extern void perf_counters_lapic_init(int nmi); +extern void perf_counters_lapic_init(void); #else static inline void init_hw_perf_counters(void) { } -static inline void perf_counters_lapic_init(int nmi) { } +static inline void perf_counters_lapic_init(void) { } #endif #endif /* _ASM_X86_PERF_COUNTER_H */ diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c index 89b63b5fad3..60df2efd7c8 100644 --- a/arch/x86/kernel/apic/apic.c +++ b/arch/x86/kernel/apic/apic.c @@ -1135,7 +1135,7 @@ void __cpuinit setup_local_APIC(void) apic_write(APIC_ESR, 0); } #endif - perf_counters_lapic_init(0); + perf_counters_lapic_init(); preempt_disable(); diff --git a/arch/x86/kernel/cpu/perf_counter.c b/arch/x86/kernel/cpu/perf_counter.c index 2eeaa99add1..316b0c995f3 100644 --- a/arch/x86/kernel/cpu/perf_counter.c +++ b/arch/x86/kernel/cpu/perf_counter.c @@ -604,7 +604,7 @@ try_generic: hwc->counter_base = x86_pmu.perfctr; } - perf_counters_lapic_init(hwc->nmi); + perf_counters_lapic_init(); x86_pmu.disable(hwc, idx); @@ -863,24 +863,15 @@ void set_perf_counter_pending(void) apic->send_IPI_self(LOCAL_PENDING_VECTOR); } -void perf_counters_lapic_init(int nmi) +void perf_counters_lapic_init(void) { - u32 apic_val; - if (!x86_pmu_initialized()) return; /* - * Enable the performance counter vector in the APIC LVT: + * Always use NMI for PMU */ - apic_val = apic_read(APIC_LVTERR); - - apic_write(APIC_LVTERR, apic_val | APIC_LVT_MASKED); - if (nmi) - apic_write(APIC_LVTPC, APIC_DM_NMI); - else - apic_write(APIC_LVTPC, LOCAL_PERF_VECTOR); - apic_write(APIC_LVTERR, apic_val); + apic_write(APIC_LVTPC, APIC_DM_NMI); } static int __kprobes @@ -1054,7 +1045,7 @@ void __init init_hw_perf_counters(void) pr_info("... counter mask: %016Lx\n", perf_counter_mask); - perf_counters_lapic_init(0); + perf_counters_lapic_init(); register_die_notifier(&perf_counter_nmi_notifier); } -- cgit v1.2.3 From c04f5e5d7b523f90ee3cdd70a68c4002aaecd3fa Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Fri, 29 May 2009 09:10:54 +0200 Subject: perf_counter tools: Clean up builtin-stat.c's do_perfstat() [ Impact: cleanup ] Cc: Peter Zijlstra Cc: Mike Galbraith Cc: Paul Mackerras Cc: Corey Ashford Cc: Marcelo Tosatti Cc: Arnaldo Carvalho de Melo Cc: Thomas Gleixner Cc: John Kacur LKML-Reference: Signed-off-by: Ingo Molnar --- Documentation/perf_counter/builtin-stat.c | 116 +++++++++++++++++------------- 1 file changed, 67 insertions(+), 49 deletions(-) diff --git a/Documentation/perf_counter/builtin-stat.c b/Documentation/perf_counter/builtin-stat.c index ac14086d9a7..6a2936150f2 100644 --- a/Documentation/perf_counter/builtin-stat.c +++ b/Documentation/perf_counter/builtin-stat.c @@ -109,11 +109,75 @@ static void create_perfstat_counter(int counter) } } +/* + * Does the counter have nsecs as a unit? + */ +static inline int nsec_counter(int counter) +{ + if (event_id[counter] == EID(PERF_TYPE_SOFTWARE, PERF_COUNT_CPU_CLOCK)) + return 1; + if (event_id[counter] == EID(PERF_TYPE_SOFTWARE, PERF_COUNT_TASK_CLOCK)) + return 1; + + return 0; +} + +/* + * Print out the results of a single counter: + */ +static void print_counter(int counter) +{ + __u64 count[3], single_count[3]; + ssize_t res; + int cpu, nv; + int scaled; + + count[0] = count[1] = count[2] = 0; + nv = scale ? 3 : 1; + for (cpu = 0; cpu < nr_cpus; cpu ++) { + res = read(fd[cpu][counter], single_count, nv * sizeof(__u64)); + assert(res == nv * sizeof(__u64)); + + count[0] += single_count[0]; + if (scale) { + count[1] += single_count[1]; + count[2] += single_count[2]; + } + } + + scaled = 0; + if (scale) { + if (count[2] == 0) { + fprintf(stderr, " %14s %-20s\n", + "", event_name(counter)); + return; + } + if (count[2] < count[1]) { + scaled = 1; + count[0] = (unsigned long long) + ((double)count[0] * count[1] / count[2] + 0.5); + } + } + + if (nsec_counter(counter)) { + double msecs = (double)count[0] / 1000000; + + fprintf(stderr, " %14.6f %-20s (msecs)", + msecs, event_name(counter)); + } else { + fprintf(stderr, " %14Ld %-20s (events)", + count[0], event_name(counter)); + } + if (scaled) + fprintf(stderr, " (scaled from %.2f%%)", + (double) count[2] / count[1] * 100); + fprintf(stderr, "\n"); +} + static int do_perfstat(int argc, const char **argv) { unsigned long long t0, t1; int counter; - ssize_t res; int status; int pid; @@ -149,55 +213,10 @@ static int do_perfstat(int argc, const char **argv) argv[0]); fprintf(stderr, "\n"); - for (counter = 0; counter < nr_counters; counter++) { - int cpu, nv; - __u64 count[3], single_count[3]; - int scaled; - - count[0] = count[1] = count[2] = 0; - nv = scale ? 3 : 1; - for (cpu = 0; cpu < nr_cpus; cpu ++) { - res = read(fd[cpu][counter], - single_count, nv * sizeof(__u64)); - assert(res == nv * sizeof(__u64)); - - count[0] += single_count[0]; - if (scale) { - count[1] += single_count[1]; - count[2] += single_count[2]; - } - } - - scaled = 0; - if (scale) { - if (count[2] == 0) { - fprintf(stderr, " %14s %-20s\n", - "", event_name(counter)); - continue; - } - if (count[2] < count[1]) { - scaled = 1; - count[0] = (unsigned long long) - ((double)count[0] * count[1] / count[2] + 0.5); - } - } - - if (event_id[counter] == EID(PERF_TYPE_SOFTWARE, PERF_COUNT_CPU_CLOCK) || - event_id[counter] == EID(PERF_TYPE_SOFTWARE, PERF_COUNT_TASK_CLOCK)) { + for (counter = 0; counter < nr_counters; counter++) + print_counter(counter); - double msecs = (double)count[0] / 1000000; - fprintf(stderr, " %14.6f %-20s (msecs)", - msecs, event_name(counter)); - } else { - fprintf(stderr, " %14Ld %-20s (events)", - count[0], event_name(counter)); - } - if (scaled) - fprintf(stderr, " (scaled from %.2f%%)", - (double) count[2] / count[1] * 100); - fprintf(stderr, "\n"); - } fprintf(stderr, "\n"); fprintf(stderr, " Wall-clock time elapsed: %12.6f msecs\n", (double)(t1-t0)/1e6); @@ -212,7 +231,6 @@ static void skip_signal(int signo) static const char * const stat_usage[] = { "perf stat [] ", - "perf stat [] -- []", NULL }; -- cgit v1.2.3 From 2996f5ddb7ba8889caeeac65edafe48845106eaa Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Fri, 29 May 2009 09:10:54 +0200 Subject: perf_counter tools: Split display into reading and printing We introduce the extra pass to allow the print-out to possibly rely on already read counters. [ Impact: cleanup ] Cc: Peter Zijlstra Cc: Mike Galbraith Cc: Paul Mackerras Cc: Corey Ashford Cc: Marcelo Tosatti Cc: Arnaldo Carvalho de Melo Cc: Thomas Gleixner Cc: John Kacur LKML-Reference: Signed-off-by: Ingo Molnar --- Documentation/perf_counter/builtin-stat.c | 40 ++++++++++++++++++++++++++----- 1 file changed, 34 insertions(+), 6 deletions(-) diff --git a/Documentation/perf_counter/builtin-stat.c b/Documentation/perf_counter/builtin-stat.c index 6a2936150f2..0c92eb72552 100644 --- a/Documentation/perf_counter/builtin-stat.c +++ b/Documentation/perf_counter/builtin-stat.c @@ -71,6 +71,9 @@ static const unsigned int default_count[] = { 10000, }; +static __u64 event_res[MAX_COUNTERS][3]; +static __u64 event_scaled[MAX_COUNTERS]; + static void create_perfstat_counter(int counter) { struct perf_counter_hw_event hw_event; @@ -123,16 +126,19 @@ static inline int nsec_counter(int counter) } /* - * Print out the results of a single counter: + * Read out the results of a single counter: */ -static void print_counter(int counter) +static void read_counter(int counter) { - __u64 count[3], single_count[3]; + __u64 *count, single_count[3]; ssize_t res; int cpu, nv; int scaled; + count = event_res[counter]; + count[0] = count[1] = count[2] = 0; + nv = scale ? 3 : 1; for (cpu = 0; cpu < nr_cpus; cpu ++) { res = read(fd[cpu][counter], single_count, nv * sizeof(__u64)); @@ -148,16 +154,35 @@ static void print_counter(int counter) scaled = 0; if (scale) { if (count[2] == 0) { - fprintf(stderr, " %14s %-20s\n", - "", event_name(counter)); + event_scaled[counter] = -1; + count[0] = 0; return; } + if (count[2] < count[1]) { - scaled = 1; + event_scaled[counter] = 1; count[0] = (unsigned long long) ((double)count[0] * count[1] / count[2] + 0.5); } } +} + +/* + * Print out the results of a single counter: + */ +static void print_counter(int counter) +{ + __u64 *count; + int scaled; + + count = event_res[counter]; + scaled = event_scaled[counter]; + + if (scaled == -1) { + fprintf(stderr, " %14s %-20s\n", + "", event_name(counter)); + return; + } if (nsec_counter(counter)) { double msecs = (double)count[0] / 1000000; @@ -213,6 +238,9 @@ static int do_perfstat(int argc, const char **argv) argv[0]); fprintf(stderr, "\n"); + for (counter = 0; counter < nr_counters; counter++) + read_counter(counter); + for (counter = 0; counter < nr_counters; counter++) print_counter(counter); -- cgit v1.2.3 From be1ac0d81d0e3ab655f8c8ade31fb860ef6aa186 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Fri, 29 May 2009 09:10:54 +0200 Subject: perf_counter tools: Also display time-normalized stat results Add new column that normalizes counter results by 'nanoseconds spent running' unit. Before: Performance counter stats for '/home/mingo/hackbench': 10469.403605 task clock ticks (msecs) 75502 context switches (events) 9501 CPU migrations (events) 36158 pagefaults (events) 31975676185 CPU cycles (events) 26257738659 instructions (events) 108740581 cache references (events) 54606088 cache misses (events) Wall-clock time elapsed: 810.514504 msecs After: Performance counter stats for '/home/mingo/hackbench': 10469.403605 task clock ticks (msecs) 75502 context switches # 0.007 M/sec 9501 CPU migrations # 0.001 M/sec 36158 pagefaults # 0.003 M/sec 31975676185 CPU cycles # 3054.202 M/sec 26257738659 instructions # 2508.045 M/sec 108740581 cache references # 10.387 M/sec 54606088 cache misses # 5.216 M/sec Wall-clock time elapsed: 810.514504 msecs The advantage of that column is that it is characteristic of the execution workflow, regardless of runtime. Hence 'hackbench 10' will look similar to 'hackbench 15' - while the absolute counter values are very different. Cc: Peter Zijlstra Cc: Mike Galbraith Cc: Paul Mackerras Cc: Corey Ashford Cc: Marcelo Tosatti Cc: Arnaldo Carvalho de Melo Cc: Thomas Gleixner Cc: John Kacur LKML-Reference: Signed-off-by: Ingo Molnar --- Documentation/perf_counter/builtin-stat.c | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/Documentation/perf_counter/builtin-stat.c b/Documentation/perf_counter/builtin-stat.c index 0c92eb72552..ef7e0e1192c 100644 --- a/Documentation/perf_counter/builtin-stat.c +++ b/Documentation/perf_counter/builtin-stat.c @@ -74,6 +74,8 @@ static const unsigned int default_count[] = { static __u64 event_res[MAX_COUNTERS][3]; static __u64 event_scaled[MAX_COUNTERS]; +static __u64 runtime_nsecs; + static void create_perfstat_counter(int counter) { struct perf_counter_hw_event hw_event; @@ -165,6 +167,11 @@ static void read_counter(int counter) ((double)count[0] * count[1] / count[2] + 0.5); } } + /* + * Save the full runtime - to allow normalization during printout: + */ + if (event_id[counter] == EID(PERF_TYPE_SOFTWARE, PERF_COUNT_TASK_CLOCK)) + runtime_nsecs = count[0]; } /* @@ -190,8 +197,11 @@ static void print_counter(int counter) fprintf(stderr, " %14.6f %-20s (msecs)", msecs, event_name(counter)); } else { - fprintf(stderr, " %14Ld %-20s (events)", + fprintf(stderr, " %14Ld %-20s", count[0], event_name(counter)); + if (runtime_nsecs) + fprintf(stderr, " # %12.3f M/sec", + (double)count[0]/runtime_nsecs*1000.0); } if (scaled) fprintf(stderr, " (scaled from %.2f%%)", -- cgit v1.2.3 From ad3a37de81c45f6c20d410ece86004b98f7b6d84 Mon Sep 17 00:00:00 2001 From: Paul Mackerras Date: Fri, 29 May 2009 16:06:20 +1000 Subject: perf_counter: Don't swap contexts containing locked mutex Peter Zijlstra pointed out that under some circumstances, we can take the mutex in a context or a counter and then swap that context or counter to another task, potentially leading to lock order inversions or the mutexes not protecting what they are supposed to protect. This fixes the problem by making sure that we never take a mutex in a context or counter which could get swapped to another task. Most of the cases where we take a mutex is on a top-level counter or context, i.e. a counter which has an fd associated with it or a context that contains such a counter. This adds WARN_ON_ONCE statements to verify that. The two cases where we need to take the mutex on a context that is a clone of another are in perf_counter_exit_task and perf_counter_init_task. The perf_counter_exit_task case is solved by uncloning the context before starting to remove the counters from it. The perf_counter_init_task is a little trickier; we temporarily disable context swapping for the parent (forking) task by setting its ctx->parent_gen to the all-1s value after locking the context, if it is a cloned context, and restore the ctx->parent_gen value at the end if the context didn't get uncloned in the meantime. This also moves the increment of the context generation count to be within the same critical section, protected by the context mutex, that adds the new counter to the context. That way, taking the mutex is sufficient to ensure that both the counter list and the generation count are stable. [ Impact: fix hangs, races with inherited and PID counters ] Signed-off-by: Paul Mackerras Acked-by: Peter Zijlstra LKML-Reference: <18975.31580.520676.619896@drongo.ozlabs.ibm.com> Signed-off-by: Ingo Molnar --- kernel/perf_counter.c | 89 +++++++++++++++++++++++++++++++++++++++++++++------ 1 file changed, 79 insertions(+), 10 deletions(-) diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c index 52e5a15321d..db843f812a6 100644 --- a/kernel/perf_counter.c +++ b/kernel/perf_counter.c @@ -919,7 +919,8 @@ static int context_equiv(struct perf_counter_context *ctx1, struct perf_counter_context *ctx2) { return ctx1->parent_ctx && ctx1->parent_ctx == ctx2->parent_ctx - && ctx1->parent_gen == ctx2->parent_gen; + && ctx1->parent_gen == ctx2->parent_gen + && ctx1->parent_gen != ~0ull; } /* @@ -1339,7 +1340,6 @@ static struct perf_counter_context *find_get_context(pid_t pid, int cpu) put_ctx(parent_ctx); ctx->parent_ctx = NULL; /* no longer a clone */ } - ++ctx->generation; /* * Get an extra reference before dropping the lock so that * this context won't get freed if the task exits. @@ -1414,6 +1414,7 @@ static int perf_release(struct inode *inode, struct file *file) file->private_data = NULL; + WARN_ON_ONCE(ctx->parent_ctx); mutex_lock(&ctx->mutex); perf_counter_remove_from_context(counter); mutex_unlock(&ctx->mutex); @@ -1445,6 +1446,7 @@ perf_read_hw(struct perf_counter *counter, char __user *buf, size_t count) if (counter->state == PERF_COUNTER_STATE_ERROR) return 0; + WARN_ON_ONCE(counter->ctx->parent_ctx); mutex_lock(&counter->child_mutex); values[0] = perf_counter_read(counter); n = 1; @@ -1504,6 +1506,7 @@ static void perf_counter_for_each_sibling(struct perf_counter *counter, struct perf_counter_context *ctx = counter->ctx; struct perf_counter *sibling; + WARN_ON_ONCE(ctx->parent_ctx); mutex_lock(&ctx->mutex); counter = counter->group_leader; @@ -1524,6 +1527,7 @@ static void perf_counter_for_each_child(struct perf_counter *counter, { struct perf_counter *child; + WARN_ON_ONCE(counter->ctx->parent_ctx); mutex_lock(&counter->child_mutex); func(counter); list_for_each_entry(child, &counter->child_list, child_list) @@ -1536,6 +1540,7 @@ static void perf_counter_for_each(struct perf_counter *counter, { struct perf_counter *child; + WARN_ON_ONCE(counter->ctx->parent_ctx); mutex_lock(&counter->child_mutex); perf_counter_for_each_sibling(counter, func); list_for_each_entry(child, &counter->child_list, child_list) @@ -1741,6 +1746,7 @@ static void perf_mmap_close(struct vm_area_struct *vma) { struct perf_counter *counter = vma->vm_file->private_data; + WARN_ON_ONCE(counter->ctx->parent_ctx); if (atomic_dec_and_mutex_lock(&counter->mmap_count, &counter->mmap_mutex)) { struct user_struct *user = current_user(); @@ -1788,6 +1794,7 @@ static int perf_mmap(struct file *file, struct vm_area_struct *vma) if (vma->vm_pgoff != 0) return -EINVAL; + WARN_ON_ONCE(counter->ctx->parent_ctx); mutex_lock(&counter->mmap_mutex); if (atomic_inc_not_zero(&counter->mmap_count)) { if (nr_pages != counter->data->nr_pages) @@ -3349,8 +3356,10 @@ SYSCALL_DEFINE5(perf_counter_open, goto err_free_put_context; counter->filp = counter_file; + WARN_ON_ONCE(ctx->parent_ctx); mutex_lock(&ctx->mutex); perf_install_in_context(ctx, counter, cpu); + ++ctx->generation; mutex_unlock(&ctx->mutex); counter->owner = current; @@ -3436,6 +3445,7 @@ inherit_counter(struct perf_counter *parent_counter, /* * Link this into the parent counter's child list */ + WARN_ON_ONCE(parent_counter->ctx->parent_ctx); mutex_lock(&parent_counter->child_mutex); list_add_tail(&child_counter->child_list, &parent_counter->child_list); mutex_unlock(&parent_counter->child_mutex); @@ -3485,6 +3495,7 @@ static void sync_child_counter(struct perf_counter *child_counter, /* * Remove this counter from the parent's list */ + WARN_ON_ONCE(parent_counter->ctx->parent_ctx); mutex_lock(&parent_counter->child_mutex); list_del_init(&child_counter->child_list); mutex_unlock(&parent_counter->child_mutex); @@ -3527,12 +3538,17 @@ void perf_counter_exit_task(struct task_struct *child) struct perf_counter_context *child_ctx; unsigned long flags; - child_ctx = child->perf_counter_ctxp; - - if (likely(!child_ctx)) + if (likely(!child->perf_counter_ctxp)) return; local_irq_save(flags); + /* + * We can't reschedule here because interrupts are disabled, + * and either child is current or it is a task that can't be + * scheduled, so we are now safe from rescheduling changing + * our context. + */ + child_ctx = child->perf_counter_ctxp; __perf_counter_task_sched_out(child_ctx); /* @@ -3542,6 +3558,15 @@ void perf_counter_exit_task(struct task_struct *child) */ spin_lock(&child_ctx->lock); child->perf_counter_ctxp = NULL; + if (child_ctx->parent_ctx) { + /* + * This context is a clone; unclone it so it can't get + * swapped to another process while we're removing all + * the counters from it. + */ + put_ctx(child_ctx->parent_ctx); + child_ctx->parent_ctx = NULL; + } spin_unlock(&child_ctx->lock); local_irq_restore(flags); @@ -3571,9 +3596,11 @@ again: int perf_counter_init_task(struct task_struct *child) { struct perf_counter_context *child_ctx, *parent_ctx; + struct perf_counter_context *cloned_ctx; struct perf_counter *counter; struct task_struct *parent = current; int inherited_all = 1; + u64 cloned_gen; int ret = 0; child->perf_counter_ctxp = NULL; @@ -3581,8 +3608,7 @@ int perf_counter_init_task(struct task_struct *child) mutex_init(&child->perf_counter_mutex); INIT_LIST_HEAD(&child->perf_counter_list); - parent_ctx = parent->perf_counter_ctxp; - if (likely(!parent_ctx || !parent_ctx->nr_counters)) + if (likely(!parent->perf_counter_ctxp)) return 0; /* @@ -3599,6 +3625,34 @@ int perf_counter_init_task(struct task_struct *child) child->perf_counter_ctxp = child_ctx; get_task_struct(child); + /* + * If the parent's context is a clone, temporarily set its + * parent_gen to an impossible value (all 1s) so it won't get + * swapped under us. The rcu_read_lock makes sure that + * parent_ctx continues to exist even if it gets swapped to + * another process and then freed while we are trying to get + * its lock. + */ + rcu_read_lock(); + retry: + parent_ctx = rcu_dereference(parent->perf_counter_ctxp); + /* + * No need to check if parent_ctx != NULL here; since we saw + * it non-NULL earlier, the only reason for it to become NULL + * is if we exit, and since we're currently in the middle of + * a fork we can't be exiting at the same time. + */ + spin_lock_irq(&parent_ctx->lock); + if (parent_ctx != rcu_dereference(parent->perf_counter_ctxp)) { + spin_unlock_irq(&parent_ctx->lock); + goto retry; + } + cloned_gen = parent_ctx->parent_gen; + if (parent_ctx->parent_ctx) + parent_ctx->parent_gen = ~0ull; + spin_unlock_irq(&parent_ctx->lock); + rcu_read_unlock(); + /* * Lock the parent list. No need to lock the child - not PID * hashed yet and not running, so nobody can access it. @@ -3630,10 +3684,15 @@ int perf_counter_init_task(struct task_struct *child) /* * Mark the child context as a clone of the parent * context, or of whatever the parent is a clone of. + * Note that if the parent is a clone, it could get + * uncloned at any point, but that doesn't matter + * because the list of counters and the generation + * count can't have changed since we took the mutex. */ - if (parent_ctx->parent_ctx) { - child_ctx->parent_ctx = parent_ctx->parent_ctx; - child_ctx->parent_gen = parent_ctx->parent_gen; + cloned_ctx = rcu_dereference(parent_ctx->parent_ctx); + if (cloned_ctx) { + child_ctx->parent_ctx = cloned_ctx; + child_ctx->parent_gen = cloned_gen; } else { child_ctx->parent_ctx = parent_ctx; child_ctx->parent_gen = parent_ctx->generation; @@ -3643,6 +3702,16 @@ int perf_counter_init_task(struct task_struct *child) mutex_unlock(&parent_ctx->mutex); + /* + * Restore the clone status of the parent. + */ + if (parent_ctx->parent_ctx) { + spin_lock_irq(&parent_ctx->lock); + if (parent_ctx->parent_ctx) + parent_ctx->parent_gen = cloned_gen; + spin_unlock_irq(&parent_ctx->lock); + } + return ret; } -- cgit v1.2.3 From 3f4dee227348daac32f36daad9a91059efd0723e Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Fri, 29 May 2009 11:25:09 +0200 Subject: perf_counter: Fix cpuctx->task_ctx races Peter noticed that we are sometimes reading cpuctx->task_ctx with interrupts enabled. Noticed-by: Peter Zijlstra Acked-by: Peter Zijlstra Cc: Mike Galbraith Cc: Paul Mackerras Cc: Corey Ashford Cc: Marcelo Tosatti Cc: Arnaldo Carvalho de Melo Cc: John Kacur LKML-Reference: Signed-off-by: Ingo Molnar --- kernel/perf_counter.c | 28 ++++++++++++++++++++-------- 1 file changed, 20 insertions(+), 8 deletions(-) diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c index db843f812a6..eb346048f00 100644 --- a/kernel/perf_counter.c +++ b/kernel/perf_counter.c @@ -234,15 +234,18 @@ static void __perf_counter_remove_from_context(void *info) struct perf_counter_context *ctx = counter->ctx; unsigned long flags; + local_irq_save(flags); /* * If this is a task context, we need to check whether it is * the current task context of this cpu. If not it has been * scheduled out before the smp call arrived. */ - if (ctx->task && cpuctx->task_ctx != ctx) + if (ctx->task && cpuctx->task_ctx != ctx) { + local_irq_restore(flags); return; + } - spin_lock_irqsave(&ctx->lock, flags); + spin_lock(&ctx->lock); /* * Protect the list operation against NMI by disabling the * counters on a global level. @@ -382,14 +385,17 @@ static void __perf_counter_disable(void *info) struct perf_counter_context *ctx = counter->ctx; unsigned long flags; + local_irq_save(flags); /* * If this is a per-task counter, need to check whether this * counter's task is the current task on this cpu. */ - if (ctx->task && cpuctx->task_ctx != ctx) + if (ctx->task && cpuctx->task_ctx != ctx) { + local_irq_restore(flags); return; + } - spin_lock_irqsave(&ctx->lock, flags); + spin_lock(&ctx->lock); /* * If the counter is on, turn it off. @@ -615,6 +621,7 @@ static void __perf_install_in_context(void *info) unsigned long flags; int err; + local_irq_save(flags); /* * If this is a task context, we need to check whether it is * the current task context of this cpu. If not it has been @@ -623,12 +630,14 @@ static void __perf_install_in_context(void *info) * on this cpu because it had no counters. */ if (ctx->task && cpuctx->task_ctx != ctx) { - if (cpuctx->task_ctx || ctx->task != current) + if (cpuctx->task_ctx || ctx->task != current) { + local_irq_restore(flags); return; + } cpuctx->task_ctx = ctx; } - spin_lock_irqsave(&ctx->lock, flags); + spin_lock(&ctx->lock); ctx->is_active = 1; update_context_time(ctx); @@ -745,17 +754,20 @@ static void __perf_counter_enable(void *info) unsigned long flags; int err; + local_irq_save(flags); /* * If this is a per-task counter, need to check whether this * counter's task is the current task on this cpu. */ if (ctx->task && cpuctx->task_ctx != ctx) { - if (cpuctx->task_ctx || ctx->task != current) + if (cpuctx->task_ctx || ctx->task != current) { + local_irq_restore(flags); return; + } cpuctx->task_ctx = ctx; } - spin_lock_irqsave(&ctx->lock, flags); + spin_lock(&ctx->lock); ctx->is_active = 1; update_context_time(ctx); -- cgit v1.2.3 From 012b84dae17126d8b5d159173091eb3db5a2bc43 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Sun, 17 May 2009 11:08:41 +0200 Subject: perf_counter: Robustify counter-free logic This fixes a nasty crash and highlights a bug that we were freeing failed-fork() counters incorrectly. (the fix for that will come separately) [ Impact: fix crashes/lockups with inherited counters ] Acked-by: Peter Zijlstra Cc: Mike Galbraith Cc: Paul Mackerras Cc: Corey Ashford Cc: Marcelo Tosatti Cc: Arnaldo Carvalho de Melo Cc: John Kacur LKML-Reference: Signed-off-by: Ingo Molnar --- kernel/perf_counter.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c index eb346048f00..616c52426b3 100644 --- a/kernel/perf_counter.c +++ b/kernel/perf_counter.c @@ -1004,6 +1004,10 @@ static void __perf_counter_task_sched_out(struct perf_counter_context *ctx) if (!cpuctx->task_ctx) return; + + if (WARN_ON_ONCE(ctx != cpuctx->task_ctx)) + return; + __perf_counter_sched_out(ctx, cpuctx); cpuctx->task_ctx = NULL; } -- cgit v1.2.3 From efb3d17240d80e27508d238809168120fe4b93a4 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Fri, 29 May 2009 14:25:58 +0200 Subject: perf_counter: Fix COMM and MMAP events for cpu wide counters Commit a63eaf34ae6 ("perf_counter: Dynamically allocate tasks' perf_counter_context struct") broke COMM and MMAP notification for cpu wide counters by dropping out early if there was no task context, thereby also not iterating the cpu context. Signed-off-by: Peter Zijlstra Cc: Mike Galbraith Cc: Paul Mackerras Cc: Corey Ashford Cc: Marcelo Tosatti Cc: Arnaldo Carvalho de Melo Cc: John Kacur LKML-Reference: Signed-off-by: Ingo Molnar --- kernel/perf_counter.c | 12 ++++-------- 1 file changed, 4 insertions(+), 8 deletions(-) diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c index 616c52426b3..58d6d198faa 100644 --- a/kernel/perf_counter.c +++ b/kernel/perf_counter.c @@ -2443,9 +2443,9 @@ static void perf_counter_comm_event(struct perf_comm_event *comm_event) cpuctx = &get_cpu_var(perf_cpu_context); perf_counter_comm_ctx(&cpuctx->ctx, comm_event); + if (cpuctx->task_ctx) + perf_counter_comm_ctx(cpuctx->task_ctx, comm_event); put_cpu_var(perf_cpu_context); - - perf_counter_comm_ctx(current->perf_counter_ctxp, comm_event); } void perf_counter_comm(struct task_struct *task) @@ -2454,8 +2454,6 @@ void perf_counter_comm(struct task_struct *task) if (!atomic_read(&nr_comm_tracking)) return; - if (!current->perf_counter_ctxp) - return; comm_event = (struct perf_comm_event){ .task = task, @@ -2570,10 +2568,10 @@ got_name: cpuctx = &get_cpu_var(perf_cpu_context); perf_counter_mmap_ctx(&cpuctx->ctx, mmap_event); + if (cpuctx->task_ctx) + perf_counter_mmap_ctx(cpuctx->task_ctx, mmap_event); put_cpu_var(perf_cpu_context); - perf_counter_mmap_ctx(current->perf_counter_ctxp, mmap_event); - kfree(buf); } @@ -2584,8 +2582,6 @@ void perf_counter_mmap(unsigned long addr, unsigned long len, if (!atomic_read(&nr_mmap_tracking)) return; - if (!current->perf_counter_ctxp) - return; mmap_event = (struct perf_mmap_event){ .file = file, -- cgit v1.2.3 From 665c2142a94202881a3c11cbaee6506cb10ada2d Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Fri, 29 May 2009 14:51:57 +0200 Subject: perf_counter: Clean up task_ctx vs interrupts Remove the local_irq_save() etc.. in routines that are smp function calls, or have IRQs disabled by other means. Then change the COMM, MMAP, and swcounter context iteration to current->perf_counter_ctxp and RCU, since it really doesn't matter which context they iterate, they're all folded. Signed-off-by: Peter Zijlstra Cc: Mike Galbraith Cc: Paul Mackerras Cc: Corey Ashford Cc: Marcelo Tosatti Cc: Arnaldo Carvalho de Melo Cc: John Kacur LKML-Reference: Signed-off-by: Ingo Molnar --- kernel/perf_counter.c | 82 +++++++++++++++++++++++++++++++-------------------- 1 file changed, 50 insertions(+), 32 deletions(-) diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c index 58d6d198faa..0c000d305e0 100644 --- a/kernel/perf_counter.c +++ b/kernel/perf_counter.c @@ -232,18 +232,14 @@ static void __perf_counter_remove_from_context(void *info) struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context); struct perf_counter *counter = info; struct perf_counter_context *ctx = counter->ctx; - unsigned long flags; - local_irq_save(flags); /* * If this is a task context, we need to check whether it is * the current task context of this cpu. If not it has been * scheduled out before the smp call arrived. */ - if (ctx->task && cpuctx->task_ctx != ctx) { - local_irq_restore(flags); + if (ctx->task && cpuctx->task_ctx != ctx) return; - } spin_lock(&ctx->lock); /* @@ -267,7 +263,7 @@ static void __perf_counter_remove_from_context(void *info) } perf_enable(); - spin_unlock_irqrestore(&ctx->lock, flags); + spin_unlock(&ctx->lock); } @@ -383,17 +379,13 @@ static void __perf_counter_disable(void *info) struct perf_counter *counter = info; struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context); struct perf_counter_context *ctx = counter->ctx; - unsigned long flags; - local_irq_save(flags); /* * If this is a per-task counter, need to check whether this * counter's task is the current task on this cpu. */ - if (ctx->task && cpuctx->task_ctx != ctx) { - local_irq_restore(flags); + if (ctx->task && cpuctx->task_ctx != ctx) return; - } spin_lock(&ctx->lock); @@ -411,7 +403,7 @@ static void __perf_counter_disable(void *info) counter->state = PERF_COUNTER_STATE_OFF; } - spin_unlock_irqrestore(&ctx->lock, flags); + spin_unlock(&ctx->lock); } /* @@ -618,10 +610,8 @@ static void __perf_install_in_context(void *info) struct perf_counter_context *ctx = counter->ctx; struct perf_counter *leader = counter->group_leader; int cpu = smp_processor_id(); - unsigned long flags; int err; - local_irq_save(flags); /* * If this is a task context, we need to check whether it is * the current task context of this cpu. If not it has been @@ -630,10 +620,8 @@ static void __perf_install_in_context(void *info) * on this cpu because it had no counters. */ if (ctx->task && cpuctx->task_ctx != ctx) { - if (cpuctx->task_ctx || ctx->task != current) { - local_irq_restore(flags); + if (cpuctx->task_ctx || ctx->task != current) return; - } cpuctx->task_ctx = ctx; } @@ -687,7 +675,7 @@ static void __perf_install_in_context(void *info) unlock: perf_enable(); - spin_unlock_irqrestore(&ctx->lock, flags); + spin_unlock(&ctx->lock); } /* @@ -751,19 +739,15 @@ static void __perf_counter_enable(void *info) struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context); struct perf_counter_context *ctx = counter->ctx; struct perf_counter *leader = counter->group_leader; - unsigned long flags; int err; - local_irq_save(flags); /* * If this is a per-task counter, need to check whether this * counter's task is the current task on this cpu. */ if (ctx->task && cpuctx->task_ctx != ctx) { - if (cpuctx->task_ctx || ctx->task != current) { - local_irq_restore(flags); + if (cpuctx->task_ctx || ctx->task != current) return; - } cpuctx->task_ctx = ctx; } @@ -811,7 +795,7 @@ static void __perf_counter_enable(void *info) } unlock: - spin_unlock_irqrestore(&ctx->lock, flags); + spin_unlock(&ctx->lock); } /* @@ -981,6 +965,10 @@ void perf_counter_task_sched_out(struct task_struct *task, spin_lock(&ctx->lock); spin_lock_nested(&next_ctx->lock, SINGLE_DEPTH_NESTING); if (context_equiv(ctx, next_ctx)) { + /* + * XXX do we need a memory barrier of sorts + * wrt to rcu_dereference() of perf_counter_ctxp + */ task->perf_counter_ctxp = next_ctx; next->perf_counter_ctxp = ctx; ctx->task = next; @@ -998,6 +986,9 @@ void perf_counter_task_sched_out(struct task_struct *task, } } +/* + * Called with IRQs disabled + */ static void __perf_counter_task_sched_out(struct perf_counter_context *ctx) { struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context); @@ -1012,6 +1003,9 @@ static void __perf_counter_task_sched_out(struct perf_counter_context *ctx) cpuctx->task_ctx = NULL; } +/* + * Called with IRQs disabled + */ static void perf_counter_cpu_sched_out(struct perf_cpu_context *cpuctx) { __perf_counter_sched_out(&cpuctx->ctx, cpuctx); @@ -2431,6 +2425,7 @@ static void perf_counter_comm_ctx(struct perf_counter_context *ctx, static void perf_counter_comm_event(struct perf_comm_event *comm_event) { struct perf_cpu_context *cpuctx; + struct perf_counter_context *ctx; unsigned int size; char *comm = comm_event->task->comm; @@ -2443,9 +2438,17 @@ static void perf_counter_comm_event(struct perf_comm_event *comm_event) cpuctx = &get_cpu_var(perf_cpu_context); perf_counter_comm_ctx(&cpuctx->ctx, comm_event); - if (cpuctx->task_ctx) - perf_counter_comm_ctx(cpuctx->task_ctx, comm_event); put_cpu_var(perf_cpu_context); + + rcu_read_lock(); + /* + * doesn't really matter which of the child contexts the + * events ends up in. + */ + ctx = rcu_dereference(current->perf_counter_ctxp); + if (ctx) + perf_counter_comm_ctx(ctx, comm_event); + rcu_read_unlock(); } void perf_counter_comm(struct task_struct *task) @@ -2536,6 +2539,7 @@ static void perf_counter_mmap_ctx(struct perf_counter_context *ctx, static void perf_counter_mmap_event(struct perf_mmap_event *mmap_event) { struct perf_cpu_context *cpuctx; + struct perf_counter_context *ctx; struct file *file = mmap_event->file; unsigned int size; char tmp[16]; @@ -2568,10 +2572,18 @@ got_name: cpuctx = &get_cpu_var(perf_cpu_context); perf_counter_mmap_ctx(&cpuctx->ctx, mmap_event); - if (cpuctx->task_ctx) - perf_counter_mmap_ctx(cpuctx->task_ctx, mmap_event); put_cpu_var(perf_cpu_context); + rcu_read_lock(); + /* + * doesn't really matter which of the child contexts the + * events ends up in. + */ + ctx = rcu_dereference(current->perf_counter_ctxp); + if (ctx) + perf_counter_mmap_ctx(ctx, mmap_event); + rcu_read_unlock(); + kfree(buf); } @@ -2882,6 +2894,7 @@ static void __perf_swcounter_event(enum perf_event_types type, u32 event, { struct perf_cpu_context *cpuctx = &get_cpu_var(perf_cpu_context); int *recursion = perf_swcounter_recursion_context(cpuctx); + struct perf_counter_context *ctx; if (*recursion) goto out; @@ -2891,10 +2904,15 @@ static void __perf_swcounter_event(enum perf_event_types type, u32 event, perf_swcounter_ctx_event(&cpuctx->ctx, type, event, nr, nmi, regs, addr); - if (cpuctx->task_ctx) { - perf_swcounter_ctx_event(cpuctx->task_ctx, type, event, - nr, nmi, regs, addr); - } + rcu_read_lock(); + /* + * doesn't really matter which of the child contexts the + * events ends up in. + */ + ctx = rcu_dereference(current->perf_counter_ctxp); + if (ctx) + perf_swcounter_ctx_event(ctx, type, event, nr, nmi, regs, addr); + rcu_read_unlock(); barrier(); (*recursion)--; -- cgit v1.2.3 From bbbee90829304d156c12b171c0ac7e6e1aba8b90 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Fri, 29 May 2009 14:25:58 +0200 Subject: perf_counter: Ammend cleanup in fork() fail When fork() fails we cannot use perf_counter_exit_task() since that assumes to operate on current. Write a new helper that cleans up unused/clean contexts. Signed-off-by: Peter Zijlstra Cc: Mike Galbraith Cc: Paul Mackerras Cc: Corey Ashford Cc: Marcelo Tosatti Cc: Arnaldo Carvalho de Melo Cc: John Kacur LKML-Reference: Signed-off-by: Ingo Molnar --- include/linux/perf_counter.h | 2 ++ kernel/fork.c | 2 +- kernel/perf_counter.c | 43 ++++++++++++++++++++++++++++++++++++++++--- 3 files changed, 43 insertions(+), 4 deletions(-) diff --git a/include/linux/perf_counter.h b/include/linux/perf_counter.h index 717bf3b59ba..519a41bba24 100644 --- a/include/linux/perf_counter.h +++ b/include/linux/perf_counter.h @@ -579,6 +579,7 @@ extern void perf_counter_task_sched_out(struct task_struct *task, extern void perf_counter_task_tick(struct task_struct *task, int cpu); extern int perf_counter_init_task(struct task_struct *child); extern void perf_counter_exit_task(struct task_struct *child); +extern void perf_counter_free_task(struct task_struct *task); extern void perf_counter_do_pending(void); extern void perf_counter_print_debug(void); extern void __perf_disable(void); @@ -644,6 +645,7 @@ static inline void perf_counter_task_tick(struct task_struct *task, int cpu) { } static inline int perf_counter_init_task(struct task_struct *child) { return 0; } static inline void perf_counter_exit_task(struct task_struct *child) { } +static inline void perf_counter_free_task(struct task_struct *task) { } static inline void perf_counter_do_pending(void) { } static inline void perf_counter_print_debug(void) { } static inline void perf_disable(void) { } diff --git a/kernel/fork.c b/kernel/fork.c index c07c3335cea..23bf757ed32 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -1298,7 +1298,7 @@ bad_fork_cleanup_semundo: bad_fork_cleanup_audit: audit_free(p); bad_fork_cleanup_policy: - perf_counter_exit_task(p); + perf_counter_free_task(p); #ifdef CONFIG_NUMA mpol_put(p->mempolicy); bad_fork_cleanup_cgroup: diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c index 0c000d305e0..79c3f26541d 100644 --- a/kernel/perf_counter.c +++ b/kernel/perf_counter.c @@ -3538,8 +3538,7 @@ static void sync_child_counter(struct perf_counter *child_counter, } static void -__perf_counter_exit_task(struct task_struct *child, - struct perf_counter *child_counter, +__perf_counter_exit_task(struct perf_counter *child_counter, struct perf_counter_context *child_ctx) { struct perf_counter *parent_counter; @@ -3605,7 +3604,7 @@ void perf_counter_exit_task(struct task_struct *child) again: list_for_each_entry_safe(child_counter, tmp, &child_ctx->counter_list, list_entry) - __perf_counter_exit_task(child, child_counter, child_ctx); + __perf_counter_exit_task(child_counter, child_ctx); /* * If the last counter was a group counter, it will have appended all @@ -3620,6 +3619,44 @@ again: put_ctx(child_ctx); } +/* + * free an unexposed, unused context as created by inheritance by + * init_task below, used by fork() in case of fail. + */ +void perf_counter_free_task(struct task_struct *task) +{ + struct perf_counter_context *ctx = task->perf_counter_ctxp; + struct perf_counter *counter, *tmp; + + if (!ctx) + return; + + mutex_lock(&ctx->mutex); +again: + list_for_each_entry_safe(counter, tmp, &ctx->counter_list, list_entry) { + struct perf_counter *parent = counter->parent; + + if (WARN_ON_ONCE(!parent)) + continue; + + mutex_lock(&parent->child_mutex); + list_del_init(&counter->child_list); + mutex_unlock(&parent->child_mutex); + + fput(parent->filp); + + list_del_counter(counter, ctx); + free_counter(counter); + } + + if (!list_empty(&ctx->counter_list)) + goto again; + + mutex_unlock(&ctx->mutex); + + put_ctx(ctx); +} + /* * Initialize the perf_counter context in task_struct */ -- cgit v1.2.3 From b78c07d45a7e71be7b5c5d7486f922355ccf23a8 Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Fri, 29 May 2009 13:48:59 -0300 Subject: perf_counter tools: Shorten the DSO names using cwd [acme@emilia linux-2.6-tip]$ pwd /home/acme/git/linux-2.6-tip Before (still available using -P/--full-paths) [acme@emilia linux-2.6-tip]$ perf report -P | head -10 11.48% perf: 7454 [kernel]: clear_page_c 4.89% perf: 7454 [kernel]: vsnprintf 4.61% perf: 7454 /home/acme/git/linux-2.6-tip/Documentation/perf_counter/perf: dso__find_symbol 4.09% perf: 7454 [kernel]: number 4.06% perf: 7454 /home/acme/git/linux-2.6-tip/Documentation/perf_counter/perf: dso__fprintf 4.00% perf: 7454 /home/acme/git/linux-2.6-tip/Documentation/perf_counter/perf: symbol_filter New default: [acme@emilia linux-2.6-tip]$ perf report | head -10 11.48% perf: 7454 [kernel]: clear_page_c 4.89% perf: 7454 [kernel]: vsnprintf 4.61% perf: 7454 ./Documentation/perf_counter/perf: dso__find_symbol 4.09% perf: 7454 [kernel]: number 4.06% perf: 7454 ./Documentation/perf_counter/perf: dso__fprintf 4.00% perf: 7454 ./Documentation/perf_counter/perf: symbol_filter Suggested-by: Ingo Molnar Signed-off-by: Arnaldo Carvalho de Melo Cc: Peter Zijlstra Cc: Paul Mackerras Cc: Mike Galbraith Cc: Steven Rostedt LKML-Reference: <20090529164859.GN4747@ghostprotocols.net> Signed-off-by: Ingo Molnar --- Documentation/perf_counter/builtin-report.c | 41 ++++++++++++++++++++++++++--- 1 file changed, 38 insertions(+), 3 deletions(-) diff --git a/Documentation/perf_counter/builtin-report.c b/Documentation/perf_counter/builtin-report.c index 412d524dd65..4705679deba 100644 --- a/Documentation/perf_counter/builtin-report.c +++ b/Documentation/perf_counter/builtin-report.c @@ -23,6 +23,7 @@ static int show_mask = SHOW_KERNEL | SHOW_USER | SHOW_HV; static int dump_trace = 0; static int verbose; +static int full_paths; static unsigned long page_size; static unsigned long mmap_window = 32; @@ -134,6 +135,16 @@ static int load_kernel(void) return err; } +static int strcommon(const char *pathname, const char *cwd, int cwdlen) +{ + int n = 0; + + while (pathname[n] == cwd[n] && n < cwdlen) + ++n; + + return n; +} + struct map { struct list_head node; uint64_t start; @@ -142,16 +153,28 @@ struct map { struct dso *dso; }; -static struct map *map__new(struct mmap_event *event) +static struct map *map__new(struct mmap_event *event, char *cwd, int cwdlen) { struct map *self = malloc(sizeof(*self)); if (self != NULL) { + const char *filename = event->filename; + char newfilename[PATH_MAX]; + + if (cwd) { + int n = strcommon(filename, cwd, cwdlen); + if (n == cwdlen) { + snprintf(newfilename, sizeof(newfilename), + ".%s", filename + n); + filename = newfilename; + } + } + self->start = event->start; self->end = event->start + event->len; self->pgoff = event->pgoff; - self->dso = dsos__findnew(event->filename); + self->dso = dsos__findnew(filename); if (self->dso == NULL) goto out_delete; } @@ -598,6 +621,8 @@ static int __cmd_report(void) int ret, rc = EXIT_FAILURE; uint32_t size; unsigned long total = 0, total_mmap = 0, total_comm = 0, total_unknown = 0; + char cwd[PATH_MAX], *cwdp = cwd; + int cwdlen; input = open(input_name, O_RDONLY); if (input < 0) { @@ -621,6 +646,14 @@ static int __cmd_report(void) return EXIT_FAILURE; } + if (!full_paths) { + if (getcwd(cwd, sizeof(cwd)) == NULL) { + perror("failed to get the current directory"); + return EXIT_FAILURE; + } + cwdlen = strlen(cwd); + } else + cwdp = NULL; remap: buf = (char *)mmap(NULL, page_size * mmap_window, PROT_READ, MAP_SHARED, input, offset); @@ -710,7 +743,7 @@ more: } else switch (event->header.type) { case PERF_EVENT_MMAP: { struct thread *thread = threads__findnew(event->mmap.pid); - struct map *map = map__new(&event->mmap); + struct map *map = map__new(&event->mmap, cwdp, cwdlen); if (dump_trace) { fprintf(stderr, "%p [%p]: PERF_EVENT_MMAP: [%p(%p) @ %p]: %s\n", @@ -809,6 +842,8 @@ static const struct option options[] = { OPT_STRING('k', "vmlinux", &vmlinux, "file", "vmlinux pathname"), OPT_STRING('s', "sort", &sort_order, "key[,key2...]", "sort by key(s): pid, comm, dso, symbol. Default: pid,symbol"), + OPT_BOOLEAN('P', "full-paths", &full_paths, + "Don't shorten the pathnames taking into account the cwd"), OPT_END() }; -- cgit v1.2.3 From c44613a4c1092e85841b78b7ab52a06654fcd321 Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Fri, 29 May 2009 17:03:07 -0300 Subject: perf_counter tools: Add locking to perf top perf_counter tools: Add locking to perf top We need to protect the active_symbols list as two threads change it: the main thread adding entries to the head and the display thread decaying entries from any place in the list. Also related: take a snapshot of syme->count[0] before using it to calculate the weight and to show the same number used in this calc when displaying the symbol usage. Reported-by: Mike Galbraith Tested-by: Mike Galbraith Signed-off-by: Arnaldo Carvalho de Melo Cc: Peter Zijlstra Cc: Paul Mackerras Cc: Steven Rostedt LKML-Reference: <20090529200307.GR4747@ghostprotocols.net> Signed-off-by: Ingo Molnar --- Documentation/perf_counter/builtin-top.c | 47 +++++++++++++++++++++----------- 1 file changed, 31 insertions(+), 16 deletions(-) diff --git a/Documentation/perf_counter/builtin-top.c b/Documentation/perf_counter/builtin-top.c index ebe8bec1a0e..24a887907a7 100644 --- a/Documentation/perf_counter/builtin-top.c +++ b/Documentation/perf_counter/builtin-top.c @@ -129,6 +129,8 @@ struct sym_entry { struct rb_node rb_node; struct list_head node; unsigned long count[MAX_COUNTERS]; + unsigned long snap_count; + double weight; int skip; }; @@ -141,17 +143,16 @@ struct dso *kernel_dso; * after decayed. */ static LIST_HEAD(active_symbols); +static pthread_mutex_t active_symbols_lock = PTHREAD_MUTEX_INITIALIZER; /* * Ordering weight: count-1 * count-2 * ... / count-n */ static double sym_weight(const struct sym_entry *sym) { - double weight; + double weight = sym->snap_count; int counter; - weight = sym->count[0]; - for (counter = 1; counter < nr_counters-1; counter++) weight *= sym->count[counter]; @@ -164,11 +165,18 @@ static long events; static long userspace_events; static const char CONSOLE_CLEAR[] = ""; -static void list_insert_active_sym(struct sym_entry *syme) +static void __list_insert_active_sym(struct sym_entry *syme) { list_add(&syme->node, &active_symbols); } +static void list_remove_active_sym(struct sym_entry *syme) +{ + pthread_mutex_lock(&active_symbols_lock); + list_del_init(&syme->node); + pthread_mutex_unlock(&active_symbols_lock); +} + static void rb_insert_active_sym(struct rb_root *tree, struct sym_entry *se) { struct rb_node **p = &tree->rb_node; @@ -179,7 +187,7 @@ static void rb_insert_active_sym(struct rb_root *tree, struct sym_entry *se) parent = *p; iter = rb_entry(parent, struct sym_entry, rb_node); - if (sym_weight(se) > sym_weight(iter)) + if (se->weight > iter->weight) p = &(*p)->rb_left; else p = &(*p)->rb_right; @@ -203,15 +211,21 @@ static void print_sym_table(void) events = userspace_events = 0; /* Sort the active symbols */ - list_for_each_entry_safe(syme, n, &active_symbols, node) { - if (syme->count[0] != 0) { + pthread_mutex_lock(&active_symbols_lock); + syme = list_entry(active_symbols.next, struct sym_entry, node); + pthread_mutex_unlock(&active_symbols_lock); + + list_for_each_entry_safe_from(syme, n, &active_symbols, node) { + syme->snap_count = syme->count[0]; + if (syme->snap_count != 0) { + syme->weight = sym_weight(syme); rb_insert_active_sym(&tmp, syme); - sum_kevents += syme->count[0]; + sum_kevents += syme->snap_count; for (j = 0; j < nr_counters; j++) syme->count[j] = zero ? 0 : syme->count[j] * 7 / 8; } else - list_del_init(&syme->node); + list_remove_active_sym(syme); } write(1, CONSOLE_CLEAR, strlen(CONSOLE_CLEAR)); @@ -264,19 +278,18 @@ static void print_sym_table(void) struct symbol *sym = (struct symbol *)(syme + 1); float pcnt; - if (++printed > 18 || syme->count[0] < count_filter) - break; + if (++printed > 18 || syme->snap_count < count_filter) + continue; - pcnt = 100.0 - (100.0 * ((sum_kevents - syme->count[0]) / + pcnt = 100.0 - (100.0 * ((sum_kevents - syme->snap_count) / sum_kevents)); if (nr_counters == 1) printf("%19.2f - %4.1f%% - %016llx : %s\n", - sym_weight(syme), - pcnt, sym->start, sym->name); + syme->weight, pcnt, sym->start, sym->name); else printf("%8.1f %10ld - %4.1f%% - %016llx : %s\n", - sym_weight(syme), syme->count[0], + syme->weight, syme->snap_count, pcnt, sym->start, sym->name); } @@ -395,8 +408,10 @@ static void record_ip(uint64_t ip, int counter) if (!syme->skip) { syme->count[counter]++; + pthread_mutex_lock(&active_symbols_lock); if (list_empty(&syme->node) || !syme->node.next) - list_insert_active_sym(syme); + __list_insert_active_sym(syme); + pthread_mutex_unlock(&active_symbols_lock); return; } } -- cgit v1.2.3 From d7c29318c2daa96d64b7312afd8283488c1cb29f Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Sat, 30 May 2009 12:38:51 +0200 Subject: perf_counter tools: Print 'CPU utilization factor' in builtin-stat Before: Performance counter stats for '/home/mingo/hackbench': 5728.862689 task clock ticks (msecs) 34426 context switches # 0.006 M/sec 3835 CPU migrations # 0.001 M/sec 18158 pagefaults # 0.003 M/sec 16218109156 CPU cycles # 2830.947 M/sec 13519616840 instructions # 2359.913 M/sec 55941661 cache references # 9.765 M/sec 23554938 cache misses # 4.112 M/sec Wall-clock time elapsed: 528.886980 msecs After: Performance counter stats for '/home/mingo/hackbench': 5845.443541 task clock ticks # 11.886 CPU utilization factor 38289 context switches # 0.007 M/sec 4208 CPU migrations # 0.001 M/sec 17755 pagefaults # 0.003 M/sec 16664668576 CPU cycles # 2850.882 M/sec 13468113991 instructions # 2304.036 M/sec 57445468 cache references # 9.827 M/sec 26896502 cache misses # 4.601 M/sec Wall-clock time elapsed: 491.802357 msecs Cc: Peter Zijlstra Cc: Mike Galbraith Cc: Paul Mackerras Cc: Corey Ashford Cc: Marcelo Tosatti Cc: Arnaldo Carvalho de Melo Cc: John Kacur LKML-Reference: Signed-off-by: Ingo Molnar --- Documentation/perf_counter/builtin-stat.c | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-) diff --git a/Documentation/perf_counter/builtin-stat.c b/Documentation/perf_counter/builtin-stat.c index ef7e0e1192c..588679167c8 100644 --- a/Documentation/perf_counter/builtin-stat.c +++ b/Documentation/perf_counter/builtin-stat.c @@ -75,6 +75,7 @@ static __u64 event_res[MAX_COUNTERS][3]; static __u64 event_scaled[MAX_COUNTERS]; static __u64 runtime_nsecs; +static __u64 walltime_nsecs; static void create_perfstat_counter(int counter) { @@ -194,13 +195,19 @@ static void print_counter(int counter) if (nsec_counter(counter)) { double msecs = (double)count[0] / 1000000; - fprintf(stderr, " %14.6f %-20s (msecs)", + fprintf(stderr, " %14.6f %-20s", msecs, event_name(counter)); + if (event_id[counter] == + EID(PERF_TYPE_SOFTWARE, PERF_COUNT_TASK_CLOCK)) { + + fprintf(stderr, " # %11.3f CPU utilization factor", + (double)count[0] / (double)walltime_nsecs); + } } else { fprintf(stderr, " %14Ld %-20s", count[0], event_name(counter)); if (runtime_nsecs) - fprintf(stderr, " # %12.3f M/sec", + fprintf(stderr, " # %11.3f M/sec", (double)count[0]/runtime_nsecs*1000.0); } if (scaled) @@ -241,6 +248,8 @@ static int do_perfstat(int argc, const char **argv) prctl(PR_TASK_PERF_COUNTERS_DISABLE); t1 = rdclock(); + walltime_nsecs = t1 - t0; + fflush(stdout); fprintf(stderr, "\n"); -- cgit v1.2.3 From 7fbd55449aafb86d3237b5d1a26fb4dab2aa2c76 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Sat, 30 May 2009 12:38:51 +0200 Subject: perf_counter tools: Fix 'make install' 'make install' didnt install perf itself - which needs a special rule to be copied to bindir. Cc: Peter Zijlstra Cc: Mike Galbraith Cc: Paul Mackerras Cc: Corey Ashford Cc: Marcelo Tosatti Cc: Arnaldo Carvalho de Melo Cc: John Kacur LKML-Reference: Signed-off-by: Ingo Molnar --- Documentation/perf_counter/Makefile | 1 + 1 file changed, 1 insertion(+) diff --git a/Documentation/perf_counter/Makefile b/Documentation/perf_counter/Makefile index bd29a5c0010..8f725840477 100644 --- a/Documentation/perf_counter/Makefile +++ b/Documentation/perf_counter/Makefile @@ -776,6 +776,7 @@ install: all $(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(bindir_SQ)' $(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(perfexec_instdir_SQ)' $(INSTALL) $(ALL_PROGRAMS) '$(DESTDIR_SQ)$(perfexec_instdir_SQ)' + $(INSTALL) perf$X '$(DESTDIR_SQ)$(bindir_SQ)' ifneq (,$X) $(foreach p,$(patsubst %$X,%,$(filter %$X,$(ALL_PROGRAMS) $(BUILT_INS) perf$X)), $(RM) '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/$p';) endif -- cgit v1.2.3 From c1c2365acf8c044f749c0fe1ea236497e8d1718e Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Sat, 30 May 2009 12:38:51 +0200 Subject: perf_counter tools: Generate per command manpages (and pdf/html, etc.) Import Git's nice .txt => {man/html/pdf} generation machinery. Fix various errors in the Documentation/perf*.txt description as well. Also fix a bug in builtin-help: we'd map 'perf help top' to 'perftop' if only the 'perf' binary is in the default PATH - confusing the manpage logic. I dont fully understand why Git did it this way - but i suppose it's a migration artifact from their migration from standalone git-xyz commands to 'git xyz' commands. The perf tools were always using the modern form so it's not an issue there. Cc: Peter Zijlstra Cc: Mike Galbraith Cc: Paul Mackerras Cc: Corey Ashford Cc: Marcelo Tosatti Cc: Arnaldo Carvalho de Melo Cc: John Kacur LKML-Reference: Signed-off-by: Ingo Molnar --- Documentation/perf_counter/Documentation/Makefile | 300 +++++++++++++++++++++ .../perf_counter/Documentation/asciidoc.conf | 91 +++++++ .../perf_counter/Documentation/manpage-1.72.xsl | 14 + .../perf_counter/Documentation/manpage-base.xsl | 35 +++ .../Documentation/manpage-bold-literal.xsl | 17 ++ .../perf_counter/Documentation/manpage-normal.xsl | 13 + .../Documentation/manpage-suppress-sp.xsl | 21 ++ .../perf_counter/Documentation/perf-record.txt | 10 +- .../perf_counter/Documentation/perf-report.txt | 8 +- .../perf_counter/Documentation/perf-stat.txt | 5 +- .../perf_counter/Documentation/perf-top.txt | 8 +- Documentation/perf_counter/Documentation/perf.txt | 23 ++ Documentation/perf_counter/Makefile | 61 +++++ Documentation/perf_counter/builtin-help.c | 2 +- 14 files changed, 581 insertions(+), 27 deletions(-) create mode 100644 Documentation/perf_counter/Documentation/Makefile create mode 100644 Documentation/perf_counter/Documentation/asciidoc.conf create mode 100644 Documentation/perf_counter/Documentation/manpage-1.72.xsl create mode 100644 Documentation/perf_counter/Documentation/manpage-base.xsl create mode 100644 Documentation/perf_counter/Documentation/manpage-bold-literal.xsl create mode 100644 Documentation/perf_counter/Documentation/manpage-normal.xsl create mode 100644 Documentation/perf_counter/Documentation/manpage-suppress-sp.xsl create mode 100644 Documentation/perf_counter/Documentation/perf.txt diff --git a/Documentation/perf_counter/Documentation/Makefile b/Documentation/perf_counter/Documentation/Makefile new file mode 100644 index 00000000000..5457192e1b4 --- /dev/null +++ b/Documentation/perf_counter/Documentation/Makefile @@ -0,0 +1,300 @@ +MAN1_TXT= \ + $(filter-out $(addsuffix .txt, $(ARTICLES) $(SP_ARTICLES)), \ + $(wildcard perf-*.txt)) \ + perf.txt +MAN5_TXT= +MAN7_TXT= + +MAN_TXT = $(MAN1_TXT) $(MAN5_TXT) $(MAN7_TXT) +MAN_XML=$(patsubst %.txt,%.xml,$(MAN_TXT)) +MAN_HTML=$(patsubst %.txt,%.html,$(MAN_TXT)) + +DOC_HTML=$(MAN_HTML) + +ARTICLES = +# with their own formatting rules. +SP_ARTICLES = +API_DOCS = $(patsubst %.txt,%,$(filter-out technical/api-index-skel.txt technical/api-index.txt, $(wildcard technical/api-*.txt))) +SP_ARTICLES += $(API_DOCS) +SP_ARTICLES += technical/api-index + +DOC_HTML += $(patsubst %,%.html,$(ARTICLES) $(SP_ARTICLES)) + +DOC_MAN1=$(patsubst %.txt,%.1,$(MAN1_TXT)) +DOC_MAN5=$(patsubst %.txt,%.5,$(MAN5_TXT)) +DOC_MAN7=$(patsubst %.txt,%.7,$(MAN7_TXT)) + +prefix?=$(HOME) +bindir?=$(prefix)/bin +htmldir?=$(prefix)/share/doc/perf-doc +pdfdir?=$(prefix)/share/doc/perf-doc +mandir?=$(prefix)/share/man +man1dir=$(mandir)/man1 +man5dir=$(mandir)/man5 +man7dir=$(mandir)/man7 +# DESTDIR= + +ASCIIDOC=asciidoc +ASCIIDOC_EXTRA = +MANPAGE_XSL = manpage-normal.xsl +XMLTO_EXTRA = +INSTALL?=install +RM ?= rm -f +DOC_REF = origin/man +HTML_REF = origin/html + +infodir?=$(prefix)/share/info +MAKEINFO=makeinfo +INSTALL_INFO=install-info +DOCBOOK2X_TEXI=docbook2x-texi +DBLATEX=dblatex +ifndef PERL_PATH + PERL_PATH = /usr/bin/perl +endif + +-include ../config.mak.autogen +-include ../config.mak + +# +# For asciidoc ... +# -7.1.2, no extra settings are needed. +# 8.0-, set ASCIIDOC8. +# + +# +# For docbook-xsl ... +# -1.68.1, set ASCIIDOC_NO_ROFF? (based on changelog from 1.73.0) +# 1.69.0, no extra settings are needed? +# 1.69.1-1.71.0, set DOCBOOK_SUPPRESS_SP? +# 1.71.1, no extra settings are needed? +# 1.72.0, set DOCBOOK_XSL_172. +# 1.73.0-, set ASCIIDOC_NO_ROFF +# + +# +# If you had been using DOCBOOK_XSL_172 in an attempt to get rid +# of 'the ".ft C" problem' in your generated manpages, and you +# instead ended up with weird characters around callouts, try +# using ASCIIDOC_NO_ROFF instead (it works fine with ASCIIDOC8). +# + +ifdef ASCIIDOC8 +ASCIIDOC_EXTRA += -a asciidoc7compatible +endif +ifdef DOCBOOK_XSL_172 +ASCIIDOC_EXTRA += -a perf-asciidoc-no-roff +MANPAGE_XSL = manpage-1.72.xsl +else + ifdef ASCIIDOC_NO_ROFF + # docbook-xsl after 1.72 needs the regular XSL, but will not + # pass-thru raw roff codes from asciidoc.conf, so turn them off. + ASCIIDOC_EXTRA += -a perf-asciidoc-no-roff + endif +endif +ifdef MAN_BOLD_LITERAL +XMLTO_EXTRA += -m manpage-bold-literal.xsl +endif +ifdef DOCBOOK_SUPPRESS_SP +XMLTO_EXTRA += -m manpage-suppress-sp.xsl +endif + +SHELL_PATH ?= $(SHELL) +# Shell quote; +SHELL_PATH_SQ = $(subst ','\'',$(SHELL_PATH)) + +# +# Please note that there is a minor bug in asciidoc. +# The version after 6.0.3 _will_ include the patch found here: +# http://marc.theaimsgroup.com/?l=perf&m=111558757202243&w=2 +# +# Until that version is released you may have to apply the patch +# yourself - yes, all 6 characters of it! +# + +QUIET_SUBDIR0 = +$(MAKE) -C # space to separate -C and subdir +QUIET_SUBDIR1 = + +ifneq ($(findstring $(MAKEFLAGS),w),w) +PRINT_DIR = --no-print-directory +else # "make -w" +NO_SUBDIR = : +endif + +ifneq ($(findstring $(MAKEFLAGS),s),s) +ifndef V + QUIET_ASCIIDOC = @echo ' ' ASCIIDOC $@; + QUIET_XMLTO = @echo ' ' XMLTO $@; + QUIET_DB2TEXI = @echo ' ' DB2TEXI $@; + QUIET_MAKEINFO = @echo ' ' MAKEINFO $@; + QUIET_DBLATEX = @echo ' ' DBLATEX $@; + QUIET_XSLTPROC = @echo ' ' XSLTPROC $@; + QUIET_GEN = @echo ' ' GEN $@; + QUIET_STDERR = 2> /dev/null + QUIET_SUBDIR0 = +@subdir= + QUIET_SUBDIR1 = ;$(NO_SUBDIR) echo ' ' SUBDIR $$subdir; \ + $(MAKE) $(PRINT_DIR) -C $$subdir + export V +endif +endif + +all: html man + +html: $(DOC_HTML) + +$(DOC_HTML) $(DOC_MAN1) $(DOC_MAN5) $(DOC_MAN7): asciidoc.conf + +man: man1 man5 man7 +man1: $(DOC_MAN1) +man5: $(DOC_MAN5) +man7: $(DOC_MAN7) + +info: perf.info perfman.info + +pdf: user-manual.pdf + +install: install-man + +install-man: man + $(INSTALL) -d -m 755 $(DESTDIR)$(man1dir) +# $(INSTALL) -d -m 755 $(DESTDIR)$(man5dir) +# $(INSTALL) -d -m 755 $(DESTDIR)$(man7dir) + $(INSTALL) -m 644 $(DOC_MAN1) $(DESTDIR)$(man1dir) +# $(INSTALL) -m 644 $(DOC_MAN5) $(DESTDIR)$(man5dir) +# $(INSTALL) -m 644 $(DOC_MAN7) $(DESTDIR)$(man7dir) + +install-info: info + $(INSTALL) -d -m 755 $(DESTDIR)$(infodir) + $(INSTALL) -m 644 perf.info perfman.info $(DESTDIR)$(infodir) + if test -r $(DESTDIR)$(infodir)/dir; then \ + $(INSTALL_INFO) --info-dir=$(DESTDIR)$(infodir) perf.info ;\ + $(INSTALL_INFO) --info-dir=$(DESTDIR)$(infodir) perfman.info ;\ + else \ + echo "No directory found in $(DESTDIR)$(infodir)" >&2 ; \ + fi + +install-pdf: pdf + $(INSTALL) -d -m 755 $(DESTDIR)$(pdfdir) + $(INSTALL) -m 644 user-manual.pdf $(DESTDIR)$(pdfdir) + +install-html: html + '$(SHELL_PATH_SQ)' ./install-webdoc.sh $(DESTDIR)$(htmldir) + +../PERF-VERSION-FILE: .FORCE-PERF-VERSION-FILE + $(QUIET_SUBDIR0)../ $(QUIET_SUBDIR1) PERF-VERSION-FILE + +-include ../PERF-VERSION-FILE + +# +# Determine "include::" file references in asciidoc files. +# +doc.dep : $(wildcard *.txt) build-docdep.perl + $(QUIET_GEN)$(RM) $@+ $@ && \ + $(PERL_PATH) ./build-docdep.perl >$@+ $(QUIET_STDERR) && \ + mv $@+ $@ + +-include doc.dep + +cmds_txt = cmds-ancillaryinterrogators.txt \ + cmds-ancillarymanipulators.txt \ + cmds-mainporcelain.txt \ + cmds-plumbinginterrogators.txt \ + cmds-plumbingmanipulators.txt \ + cmds-synchingrepositories.txt \ + cmds-synchelpers.txt \ + cmds-purehelpers.txt \ + cmds-foreignscminterface.txt + +$(cmds_txt): cmd-list.made + +cmd-list.made: cmd-list.perl ../command-list.txt $(MAN1_TXT) + $(QUIET_GEN)$(RM) $@ && \ + $(PERL_PATH) ./cmd-list.perl ../command-list.txt $(QUIET_STDERR) && \ + date >$@ + +clean: + $(RM) *.xml *.xml+ *.html *.html+ *.1 *.5 *.7 + $(RM) *.texi *.texi+ *.texi++ perf.info perfman.info + $(RM) howto-index.txt howto/*.html doc.dep + $(RM) technical/api-*.html technical/api-index.txt + $(RM) $(cmds_txt) *.made + +$(MAN_HTML): %.html : %.txt + $(QUIET_ASCIIDOC)$(RM) $@+ $@ && \ + $(ASCIIDOC) -b xhtml11 -d manpage -f asciidoc.conf \ + $(ASCIIDOC_EXTRA) -aperf_version=$(PERF_VERSION) -o $@+ $< && \ + mv $@+ $@ + +%.1 %.5 %.7 : %.xml + $(QUIET_XMLTO)$(RM) $@ && \ + xmlto -m $(MANPAGE_XSL) $(XMLTO_EXTRA) man $< + +%.xml : %.txt + $(QUIET_ASCIIDOC)$(RM) $@+ $@ && \ + $(ASCIIDOC) -b docbook -d manpage -f asciidoc.conf \ + $(ASCIIDOC_EXTRA) -aperf_version=$(PERF_VERSION) -o $@+ $< && \ + mv $@+ $@ + +XSLT = docbook.xsl +XSLTOPTS = --xinclude --stringparam html.stylesheet docbook-xsl.css + +user-manual.html: user-manual.xml + $(QUIET_XSLTPROC)xsltproc $(XSLTOPTS) -o $@ $(XSLT) $< + +perf.info: user-manual.texi + $(QUIET_MAKEINFO)$(MAKEINFO) --no-split -o $@ user-manual.texi + +user-manual.texi: user-manual.xml + $(QUIET_DB2TEXI)$(RM) $@+ $@ && \ + $(DOCBOOK2X_TEXI) user-manual.xml --encoding=UTF-8 --to-stdout >$@++ && \ + $(PERL_PATH) fix-texi.perl <$@++ >$@+ && \ + rm $@++ && \ + mv $@+ $@ + +user-manual.pdf: user-manual.xml + $(QUIET_DBLATEX)$(RM) $@+ $@ && \ + $(DBLATEX) -o $@+ -p /etc/asciidoc/dblatex/asciidoc-dblatex.xsl -s /etc/asciidoc/dblatex/asciidoc-dblatex.sty $< && \ + mv $@+ $@ + +perfman.texi: $(MAN_XML) cat-texi.perl + $(QUIET_DB2TEXI)$(RM) $@+ $@ && \ + ($(foreach xml,$(MAN_XML),$(DOCBOOK2X_TEXI) --encoding=UTF-8 \ + --to-stdout $(xml) &&) true) > $@++ && \ + $(PERL_PATH) cat-texi.perl $@ <$@++ >$@+ && \ + rm $@++ && \ + mv $@+ $@ + +perfman.info: perfman.texi + $(QUIET_MAKEINFO)$(MAKEINFO) --no-split --no-validate $*.texi + +$(patsubst %.txt,%.texi,$(MAN_TXT)): %.texi : %.xml + $(QUIET_DB2TEXI)$(RM) $@+ $@ && \ + $(DOCBOOK2X_TEXI) --to-stdout $*.xml >$@+ && \ + mv $@+ $@ + +howto-index.txt: howto-index.sh $(wildcard howto/*.txt) + $(QUIET_GEN)$(RM) $@+ $@ && \ + '$(SHELL_PATH_SQ)' ./howto-index.sh $(wildcard howto/*.txt) >$@+ && \ + mv $@+ $@ + +$(patsubst %,%.html,$(ARTICLES)) : %.html : %.txt + $(QUIET_ASCIIDOC)$(ASCIIDOC) -b xhtml11 $*.txt + +WEBDOC_DEST = /pub/software/tools/perf/docs + +$(patsubst %.txt,%.html,$(wildcard howto/*.txt)): %.html : %.txt + $(QUIET_ASCIIDOC)$(RM) $@+ $@ && \ + sed -e '1,/^$$/d' $< | $(ASCIIDOC) -b xhtml11 - >$@+ && \ + mv $@+ $@ + +install-webdoc : html + '$(SHELL_PATH_SQ)' ./install-webdoc.sh $(WEBDOC_DEST) + +quick-install: quick-install-man + +quick-install-man: + '$(SHELL_PATH_SQ)' ./install-doc-quick.sh $(DOC_REF) $(DESTDIR)$(mandir) + +quick-install-html: + '$(SHELL_PATH_SQ)' ./install-doc-quick.sh $(HTML_REF) $(DESTDIR)$(htmldir) + +.PHONY: .FORCE-PERF-VERSION-FILE diff --git a/Documentation/perf_counter/Documentation/asciidoc.conf b/Documentation/perf_counter/Documentation/asciidoc.conf new file mode 100644 index 00000000000..356b23a4033 --- /dev/null +++ b/Documentation/perf_counter/Documentation/asciidoc.conf @@ -0,0 +1,91 @@ +## linkperf: macro +# +# Usage: linkperf:command[manpage-section] +# +# Note, {0} is the manpage section, while {target} is the command. +# +# Show PERF link as: (
); if section is defined, else just show +# the command. + +[macros] +(?su)[\\]?(?Plinkperf):(?P\S*?)\[(?P.*?)\]= + +[attributes] +asterisk=* +plus=+ +caret=^ +startsb=[ +endsb=] +tilde=~ + +ifdef::backend-docbook[] +[linkperf-inlinemacro] +{0%{target}} +{0#} +{0#{target}{0}} +{0#} +endif::backend-docbook[] + +ifdef::backend-docbook[] +ifndef::perf-asciidoc-no-roff[] +# "unbreak" docbook-xsl v1.68 for manpages. v1.69 works with or without this. +# v1.72 breaks with this because it replaces dots not in roff requests. +[listingblock] +{title} + +ifdef::doctype-manpage[] + .ft C +endif::doctype-manpage[] +| +ifdef::doctype-manpage[] + .ft +endif::doctype-manpage[] + +{title#} +endif::perf-asciidoc-no-roff[] + +ifdef::perf-asciidoc-no-roff[] +ifdef::doctype-manpage[] +# The following two small workarounds insert a simple paragraph after screen +[listingblock] +{title} + +| + +{title#} + +[verseblock] +{title} +{title%} +{title#} +| + +{title#} +{title%} +endif::doctype-manpage[] +endif::perf-asciidoc-no-roff[] +endif::backend-docbook[] + +ifdef::doctype-manpage[] +ifdef::backend-docbook[] +[header] +template::[header-declarations] + + +{mantitle} +{manvolnum} +perf +{perf_version} +perf Manual + + + {manname} + {manpurpose} + +endif::backend-docbook[] +endif::doctype-manpage[] + +ifdef::backend-xhtml11[] +[linkperf-inlinemacro] +{target}{0?({0})} +endif::backend-xhtml11[] diff --git a/Documentation/perf_counter/Documentation/manpage-1.72.xsl b/Documentation/perf_counter/Documentation/manpage-1.72.xsl new file mode 100644 index 00000000000..b4d315cb8c4 --- /dev/null +++ b/Documentation/perf_counter/Documentation/manpage-1.72.xsl @@ -0,0 +1,14 @@ + + + + + + + + + + diff --git a/Documentation/perf_counter/Documentation/manpage-base.xsl b/Documentation/perf_counter/Documentation/manpage-base.xsl new file mode 100644 index 00000000000..a264fa61609 --- /dev/null +++ b/Documentation/perf_counter/Documentation/manpage-base.xsl @@ -0,0 +1,35 @@ + + + + + + + + + + + + + + sp + + + + + + + + br + + + diff --git a/Documentation/perf_counter/Documentation/manpage-bold-literal.xsl b/Documentation/perf_counter/Documentation/manpage-bold-literal.xsl new file mode 100644 index 00000000000..608eb5df628 --- /dev/null +++ b/Documentation/perf_counter/Documentation/manpage-bold-literal.xsl @@ -0,0 +1,17 @@ + + + + + + + fB + + + fR + + + diff --git a/Documentation/perf_counter/Documentation/manpage-normal.xsl b/Documentation/perf_counter/Documentation/manpage-normal.xsl new file mode 100644 index 00000000000..a48f5b11f3d --- /dev/null +++ b/Documentation/perf_counter/Documentation/manpage-normal.xsl @@ -0,0 +1,13 @@ + + + + + + +\ +. + + diff --git a/Documentation/perf_counter/Documentation/manpage-suppress-sp.xsl b/Documentation/perf_counter/Documentation/manpage-suppress-sp.xsl new file mode 100644 index 00000000000..a63c7632a87 --- /dev/null +++ b/Documentation/perf_counter/Documentation/manpage-suppress-sp.xsl @@ -0,0 +1,21 @@ + + + + + + + + + + + + + + + diff --git a/Documentation/perf_counter/Documentation/perf-record.txt b/Documentation/perf_counter/Documentation/perf-record.txt index a93d2ec6176..4d3416fc764 100644 --- a/Documentation/perf_counter/Documentation/perf-record.txt +++ b/Documentation/perf_counter/Documentation/perf-record.txt @@ -1,5 +1,5 @@ perf-record(1) -========== +============== NAME ---- @@ -53,12 +53,6 @@ OPTIONS -l:: scale counter values -Configuration -------------- - -EXAMPLES --------- - SEE ALSO -------- -linkperf:git-stat[1] +linkperf:perf-stat[1] diff --git a/Documentation/perf_counter/Documentation/perf-report.txt b/Documentation/perf_counter/Documentation/perf-report.txt index 49efe16c958..52d3fc6846a 100644 --- a/Documentation/perf_counter/Documentation/perf-report.txt +++ b/Documentation/perf_counter/Documentation/perf-report.txt @@ -1,5 +1,5 @@ perf-report(1) -========== +============== NAME ---- @@ -21,12 +21,6 @@ OPTIONS --input=:: Input file name. (default: perf.data) -Configuration -------------- - -EXAMPLES --------- - SEE ALSO -------- linkperf:perf-stat[1] diff --git a/Documentation/perf_counter/Documentation/perf-stat.txt b/Documentation/perf_counter/Documentation/perf-stat.txt index 828c59ff5f5..a67d0e3b7d0 100644 --- a/Documentation/perf_counter/Documentation/perf-stat.txt +++ b/Documentation/perf_counter/Documentation/perf-stat.txt @@ -51,9 +51,6 @@ OPTIONS -l:: scale counter values -Configuration -------------- - EXAMPLES -------- @@ -74,4 +71,4 @@ $ perf stat sleep 1 SEE ALSO -------- -linkperf:git-tops[1] +linkperf:perf-tops[1] diff --git a/Documentation/perf_counter/Documentation/perf-top.txt b/Documentation/perf_counter/Documentation/perf-top.txt index 057333b7253..15251e40e4f 100644 --- a/Documentation/perf_counter/Documentation/perf-top.txt +++ b/Documentation/perf_counter/Documentation/perf-top.txt @@ -50,12 +50,6 @@ OPTIONS -l:: scale counter values -Configuration -------------- - -EXAMPLES --------- - SEE ALSO -------- -linkperf:git-stat[1] +linkperf:perf-stat[1] diff --git a/Documentation/perf_counter/Documentation/perf.txt b/Documentation/perf_counter/Documentation/perf.txt new file mode 100644 index 00000000000..e3d8b3832c6 --- /dev/null +++ b/Documentation/perf_counter/Documentation/perf.txt @@ -0,0 +1,23 @@ +perf(1) +======= + +NAME +---- +perf - Performance analysis tools for Linux + +SYNOPSIS +-------- +[verse] +'perf' [--version] [--help] COMMAND [ARGS] + +DESCRIPTION +----------- +Performance counters for Linux are are a new kernel-based subsystem +that provide a framework for all things performance analysis. It +covers hardware level (CPU/PMU, Performance Monitoring Unit) features +and software features (software counters, tracepoints) as well. + +SEE ALSO +-------- +linkperf:perf-stat[1], linkperf:perf-top[1], +linkperf:perf-record[1], linkperf:perf-report[1] diff --git a/Documentation/perf_counter/Makefile b/Documentation/perf_counter/Makefile index 8f725840477..416ab11e978 100644 --- a/Documentation/perf_counter/Makefile +++ b/Documentation/perf_counter/Makefile @@ -693,6 +693,21 @@ builtin-revert.o wt-status.o: wt-status.h $(LIB_FILE): $(LIB_OBJS) $(QUIET_AR)$(RM) $@ && $(AR) rcs $@ $(LIB_OBJS) +doc: + $(MAKE) -C Documentation all + +man: + $(MAKE) -C Documentation man + +html: + $(MAKE) -C Documentation html + +info: + $(MAKE) -C Documentation info + +pdf: + $(MAKE) -C Documentation pdf + TAGS: $(RM) TAGS $(FIND) . -name '*.[hcS]' -print | xargs etags -a @@ -781,6 +796,31 @@ ifneq (,$X) $(foreach p,$(patsubst %$X,%,$(filter %$X,$(ALL_PROGRAMS) $(BUILT_INS) perf$X)), $(RM) '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/$p';) endif +install-doc: + $(MAKE) -C Documentation install + +install-man: + $(MAKE) -C Documentation install-man + +install-html: + $(MAKE) -C Documentation install-html + +install-info: + $(MAKE) -C Documentation install-info + +install-pdf: + $(MAKE) -C Documentation install-pdf + +quick-install-doc: + $(MAKE) -C Documentation quick-install + +quick-install-man: + $(MAKE) -C Documentation quick-install-man + +quick-install-html: + $(MAKE) -C Documentation quick-install-html + + ### Maintainer's dist rules perf.spec: perf.spec.in @@ -801,6 +841,26 @@ dist: perf.spec perf-archive$(X) configure @$(RM) -r $(PERF_TARNAME) gzip -f -9 $(PERF_TARNAME).tar +htmldocs = perf-htmldocs-$(PERF_VERSION) +manpages = perf-manpages-$(PERF_VERSION) +dist-doc: + $(RM) -r .doc-tmp-dir + mkdir .doc-tmp-dir + $(MAKE) -C Documentation WEBDOC_DEST=../.doc-tmp-dir install-webdoc + cd .doc-tmp-dir && $(TAR) cf ../$(htmldocs).tar . + gzip -n -9 -f $(htmldocs).tar + : + $(RM) -r .doc-tmp-dir + mkdir -p .doc-tmp-dir/man1 .doc-tmp-dir/man5 .doc-tmp-dir/man7 + $(MAKE) -C Documentation DESTDIR=./ \ + man1dir=../.doc-tmp-dir/man1 \ + man5dir=../.doc-tmp-dir/man5 \ + man7dir=../.doc-tmp-dir/man7 \ + install + cd .doc-tmp-dir && $(TAR) cf ../$(manpages).tar . + gzip -n -9 -f $(manpages).tar + $(RM) -r .doc-tmp-dir + rpm: dist $(RPMBUILD) -ta $(PERF_TARNAME).tar.gz @@ -819,6 +879,7 @@ clean: $(RM) -r $(PERF_TARNAME) .doc-tmp-dir $(RM) $(PERF_TARNAME).tar.gz perf-core_$(PERF_VERSION)-*.tar.gz $(RM) $(htmldocs).tar.gz $(manpages).tar.gz + $(MAKE) -C Documentation/ clean $(RM) PERF-VERSION-FILE PERF-CFLAGS PERF-BUILD-OPTIONS .PHONY: all install clean strip diff --git a/Documentation/perf_counter/builtin-help.c b/Documentation/perf_counter/builtin-help.c index d2bd3177b98..a3894bfb9b6 100644 --- a/Documentation/perf_counter/builtin-help.c +++ b/Documentation/perf_counter/builtin-help.c @@ -317,7 +317,7 @@ static const char *cmd_to_page(const char *perf_cmd) else if (is_perf_command(perf_cmd)) return prepend("perf-", perf_cmd); else - return prepend("perf", perf_cmd); + return prepend("perf-", perf_cmd); } static void setup_man_path(void) -- cgit v1.2.3 From 27b9613b7be39412775d0ab80f57229aa73bb07d Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Sun, 31 May 2009 22:09:49 +0200 Subject: perf_counter tools: Fix unknown command help text Arjan reported this error when entering an unknown command to perf: $ perf start fatal: Uh oh. Your system reports no Git commands at all. The Git code expects there to be perf-* commands - but since Perf is a 'pure' utility with no dash commands anymore, this old assumption of Git does not hold anymore. Remove that error check. Reported-by: Arjan van de Ven Cc: Peter Zijlstra Cc: Mike Galbraith Cc: Paul Mackerras Cc: Corey Ashford Cc: Marcelo Tosatti Cc: Arnaldo Carvalho de Melo Cc: Thomas Gleixner Cc: John Kacur LKML-Reference: Signed-off-by: Ingo Molnar --- Documentation/perf_counter/util/help.c | 3 --- 1 file changed, 3 deletions(-) diff --git a/Documentation/perf_counter/util/help.c b/Documentation/perf_counter/util/help.c index edde541d238..397487fb2be 100644 --- a/Documentation/perf_counter/util/help.c +++ b/Documentation/perf_counter/util/help.c @@ -323,9 +323,6 @@ const char *help_unknown_cmd(const char *cmd) qsort(main_cmds.names, main_cmds.cnt, sizeof(*main_cmds.names), levenshtein_compare); - if (!main_cmds.cnt) - die ("Uh oh. Your system reports no Git commands at all."); - best_similarity = main_cmds.names[0]->len; n = 1; while (n < main_cmds.cnt && best_similarity == main_cmds.names[n]->len) -- cgit v1.2.3 From 25346b93ca079080c9cb23331db5c4f6404e8530 Mon Sep 17 00:00:00 2001 From: Paul Mackerras Date: Mon, 1 Jun 2009 17:48:12 +1000 Subject: perf_counter: Provide functions for locking and pinning the context for a task This abstracts out the code for locking the context associated with a task. Because the context might get transferred from one task to another concurrently, we have to check after locking the context that it is still the right context for the task and retry if not. This was open-coded in find_get_context() and perf_counter_init_task(). This adds a further function for pinning the context for a task, i.e. marking it so it can't be transferred to another task. This adds a 'pin_count' field to struct perf_counter_context to indicate that a context is pinned, instead of the previous method of setting the parent_gen count to all 1s. Pinning the context with a pin_count is easier to undo and doesn't require saving the parent_gen value. This also adds a perf_unpin_context() to undo the effect of perf_pin_task_context() and changes perf_counter_init_task to use it. Signed-off-by: Paul Mackerras Cc: Peter Zijlstra Cc: Mike Galbraith Cc: Paul Mackerras Cc: Corey Ashford Cc: Marcelo Tosatti Cc: Arnaldo Carvalho de Melo Cc: John Kacur LKML-Reference: <18979.34748.755674.596386@cargo.ozlabs.ibm.com> Signed-off-by: Ingo Molnar --- include/linux/perf_counter.h | 1 + kernel/perf_counter.c | 128 +++++++++++++++++++++++++------------------ 2 files changed, 75 insertions(+), 54 deletions(-) diff --git a/include/linux/perf_counter.h b/include/linux/perf_counter.h index 519a41bba24..81ec79c9f19 100644 --- a/include/linux/perf_counter.h +++ b/include/linux/perf_counter.h @@ -543,6 +543,7 @@ struct perf_counter_context { struct perf_counter_context *parent_ctx; u64 parent_gen; u64 generation; + int pin_count; struct rcu_head rcu_head; }; diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c index 79c3f26541d..da8dfef4b47 100644 --- a/kernel/perf_counter.c +++ b/kernel/perf_counter.c @@ -122,6 +122,69 @@ static void put_ctx(struct perf_counter_context *ctx) } } +/* + * Get the perf_counter_context for a task and lock it. + * This has to cope with with the fact that until it is locked, + * the context could get moved to another task. + */ +static struct perf_counter_context *perf_lock_task_context( + struct task_struct *task, unsigned long *flags) +{ + struct perf_counter_context *ctx; + + rcu_read_lock(); + retry: + ctx = rcu_dereference(task->perf_counter_ctxp); + if (ctx) { + /* + * If this context is a clone of another, it might + * get swapped for another underneath us by + * perf_counter_task_sched_out, though the + * rcu_read_lock() protects us from any context + * getting freed. Lock the context and check if it + * got swapped before we could get the lock, and retry + * if so. If we locked the right context, then it + * can't get swapped on us any more. + */ + spin_lock_irqsave(&ctx->lock, *flags); + if (ctx != rcu_dereference(task->perf_counter_ctxp)) { + spin_unlock_irqrestore(&ctx->lock, *flags); + goto retry; + } + } + rcu_read_unlock(); + return ctx; +} + +/* + * Get the context for a task and increment its pin_count so it + * can't get swapped to another task. This also increments its + * reference count so that the context can't get freed. + */ +static struct perf_counter_context *perf_pin_task_context(struct task_struct *task) +{ + struct perf_counter_context *ctx; + unsigned long flags; + + ctx = perf_lock_task_context(task, &flags); + if (ctx) { + ++ctx->pin_count; + get_ctx(ctx); + spin_unlock_irqrestore(&ctx->lock, flags); + } + return ctx; +} + +static void perf_unpin_context(struct perf_counter_context *ctx) +{ + unsigned long flags; + + spin_lock_irqsave(&ctx->lock, flags); + --ctx->pin_count; + spin_unlock_irqrestore(&ctx->lock, flags); + put_ctx(ctx); +} + /* * Add a counter from the lists for its context. * Must be called with ctx->mutex and ctx->lock held. @@ -916,7 +979,7 @@ static int context_equiv(struct perf_counter_context *ctx1, { return ctx1->parent_ctx && ctx1->parent_ctx == ctx2->parent_ctx && ctx1->parent_gen == ctx2->parent_gen - && ctx1->parent_gen != ~0ull; + && !ctx1->pin_count && !ctx2->pin_count; } /* @@ -1271,6 +1334,7 @@ static struct perf_counter_context *find_get_context(pid_t pid, int cpu) struct perf_counter_context *ctx; struct perf_counter_context *parent_ctx; struct task_struct *task; + unsigned long flags; int err; /* @@ -1323,28 +1387,9 @@ static struct perf_counter_context *find_get_context(pid_t pid, int cpu) if (!ptrace_may_access(task, PTRACE_MODE_READ)) goto errout; - retry_lock: - rcu_read_lock(); retry: - ctx = rcu_dereference(task->perf_counter_ctxp); + ctx = perf_lock_task_context(task, &flags); if (ctx) { - /* - * If this context is a clone of another, it might - * get swapped for another underneath us by - * perf_counter_task_sched_out, though the - * rcu_read_lock() protects us from any context - * getting freed. Lock the context and check if it - * got swapped before we could get the lock, and retry - * if so. If we locked the right context, then it - * can't get swapped on us any more and we can - * unclone it if necessary. - * Once it's not a clone things will be stable. - */ - spin_lock_irq(&ctx->lock); - if (ctx != rcu_dereference(task->perf_counter_ctxp)) { - spin_unlock_irq(&ctx->lock); - goto retry; - } parent_ctx = ctx->parent_ctx; if (parent_ctx) { put_ctx(parent_ctx); @@ -1355,9 +1400,8 @@ static struct perf_counter_context *find_get_context(pid_t pid, int cpu) * this context won't get freed if the task exits. */ get_ctx(ctx); - spin_unlock_irq(&ctx->lock); + spin_unlock_irqrestore(&ctx->lock, flags); } - rcu_read_unlock(); if (!ctx) { ctx = kmalloc(sizeof(struct perf_counter_context), GFP_KERNEL); @@ -1372,7 +1416,7 @@ static struct perf_counter_context *find_get_context(pid_t pid, int cpu) * the context they set. */ kfree(ctx); - goto retry_lock; + goto retry; } get_task_struct(task); } @@ -3667,7 +3711,6 @@ int perf_counter_init_task(struct task_struct *child) struct perf_counter *counter; struct task_struct *parent = current; int inherited_all = 1; - u64 cloned_gen; int ret = 0; child->perf_counter_ctxp = NULL; @@ -3693,32 +3736,17 @@ int perf_counter_init_task(struct task_struct *child) get_task_struct(child); /* - * If the parent's context is a clone, temporarily set its - * parent_gen to an impossible value (all 1s) so it won't get - * swapped under us. The rcu_read_lock makes sure that - * parent_ctx continues to exist even if it gets swapped to - * another process and then freed while we are trying to get - * its lock. + * If the parent's context is a clone, pin it so it won't get + * swapped under us. */ - rcu_read_lock(); - retry: - parent_ctx = rcu_dereference(parent->perf_counter_ctxp); + parent_ctx = perf_pin_task_context(parent); + /* * No need to check if parent_ctx != NULL here; since we saw * it non-NULL earlier, the only reason for it to become NULL * is if we exit, and since we're currently in the middle of * a fork we can't be exiting at the same time. */ - spin_lock_irq(&parent_ctx->lock); - if (parent_ctx != rcu_dereference(parent->perf_counter_ctxp)) { - spin_unlock_irq(&parent_ctx->lock); - goto retry; - } - cloned_gen = parent_ctx->parent_gen; - if (parent_ctx->parent_ctx) - parent_ctx->parent_gen = ~0ull; - spin_unlock_irq(&parent_ctx->lock); - rcu_read_unlock(); /* * Lock the parent list. No need to lock the child - not PID @@ -3759,7 +3787,7 @@ int perf_counter_init_task(struct task_struct *child) cloned_ctx = rcu_dereference(parent_ctx->parent_ctx); if (cloned_ctx) { child_ctx->parent_ctx = cloned_ctx; - child_ctx->parent_gen = cloned_gen; + child_ctx->parent_gen = parent_ctx->parent_gen; } else { child_ctx->parent_ctx = parent_ctx; child_ctx->parent_gen = parent_ctx->generation; @@ -3769,15 +3797,7 @@ int perf_counter_init_task(struct task_struct *child) mutex_unlock(&parent_ctx->mutex); - /* - * Restore the clone status of the parent. - */ - if (parent_ctx->parent_ctx) { - spin_lock_irq(&parent_ctx->lock); - if (parent_ctx->parent_ctx) - parent_ctx->parent_gen = cloned_gen; - spin_unlock_irq(&parent_ctx->lock); - } + perf_unpin_context(parent_ctx); return ret; } -- cgit v1.2.3 From 880ca15adf2392770a68047e7a98e076ff4d21da Mon Sep 17 00:00:00 2001 From: Paul Mackerras Date: Mon, 1 Jun 2009 17:49:14 +1000 Subject: perf_counter: Allow software counters to count while task is not running This changes perf_swcounter_match() so that per-task software counters can count events that occur while their associated task is not running. This will allow us to use the generic software counter code for counting task migrations, which can occur while the task is not scheduled in. To do this, we have to distinguish between the situations where the counter is inactive because its task has been scheduled out, and those where the counter is inactive because it is part of a group that was not able to go on the PMU. In the former case we want the counter to count, but not in the latter case. If the context is active, we have the latter case. If the context is inactive then we need to know whether the counter was counting when the context was last active, which we can determine by comparing its ->tstamp_stopped timestamp with the context's timestamp. This also folds three checks in perf_swcounter_match, checking perf_event_raw(), perf_event_type() and perf_event_id() individually, into a single 64-bit comparison on counter->hw_event.config, as an optimization. Signed-off-by: Paul Mackerras Cc: Peter Zijlstra Cc: Mike Galbraith Cc: Paul Mackerras Cc: Corey Ashford Cc: Marcelo Tosatti Cc: Arnaldo Carvalho de Melo Cc: John Kacur LKML-Reference: <18979.34810.259718.955621@cargo.ozlabs.ibm.com> Signed-off-by: Ingo Molnar --- kernel/perf_counter.c | 48 ++++++++++++++++++++++++++++++++++++++++++------ 1 file changed, 42 insertions(+), 6 deletions(-) diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c index da8dfef4b47..ff8b4636f84 100644 --- a/kernel/perf_counter.c +++ b/kernel/perf_counter.c @@ -2867,20 +2867,56 @@ static void perf_swcounter_overflow(struct perf_counter *counter, } +static int perf_swcounter_is_counting(struct perf_counter *counter) +{ + struct perf_counter_context *ctx; + unsigned long flags; + int count; + + if (counter->state == PERF_COUNTER_STATE_ACTIVE) + return 1; + + if (counter->state != PERF_COUNTER_STATE_INACTIVE) + return 0; + + /* + * If the counter is inactive, it could be just because + * its task is scheduled out, or because it's in a group + * which could not go on the PMU. We want to count in + * the first case but not the second. If the context is + * currently active then an inactive software counter must + * be the second case. If it's not currently active then + * we need to know whether the counter was active when the + * context was last active, which we can determine by + * comparing counter->tstamp_stopped with ctx->time. + * + * We are within an RCU read-side critical section, + * which protects the existence of *ctx. + */ + ctx = counter->ctx; + spin_lock_irqsave(&ctx->lock, flags); + count = 1; + /* Re-check state now we have the lock */ + if (counter->state < PERF_COUNTER_STATE_INACTIVE || + counter->ctx->is_active || + counter->tstamp_stopped < ctx->time) + count = 0; + spin_unlock_irqrestore(&ctx->lock, flags); + return count; +} + static int perf_swcounter_match(struct perf_counter *counter, enum perf_event_types type, u32 event, struct pt_regs *regs) { - if (counter->state != PERF_COUNTER_STATE_ACTIVE) - return 0; + u64 event_config; - if (perf_event_raw(&counter->hw_event)) - return 0; + event_config = ((u64) type << PERF_COUNTER_TYPE_SHIFT) | event; - if (perf_event_type(&counter->hw_event) != type) + if (!perf_swcounter_is_counting(counter)) return 0; - if (perf_event_id(&counter->hw_event) != event) + if (counter->hw_event.config != event_config) return 0; if (counter->hw_event.exclude_user && user_mode(regs)) -- cgit v1.2.3 From 22a4f650d686eeaac3629dae1c4294381485efdf Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Mon, 1 Jun 2009 10:13:37 +0200 Subject: perf_counter: Tidy up style details - whitespace fixlets - make local variable definitions more consistent [ Impact: cleanup ] Cc: Peter Zijlstra Cc: Mike Galbraith Cc: Paul Mackerras Cc: Corey Ashford Cc: Marcelo Tosatti Cc: Arnaldo Carvalho de Melo Cc: Thomas Gleixner Cc: John Kacur LKML-Reference: Signed-off-by: Ingo Molnar --- include/linux/perf_counter.h | 2 +- kernel/perf_counter.c | 39 +++++++++++++++++++++------------------ 2 files changed, 22 insertions(+), 19 deletions(-) diff --git a/include/linux/perf_counter.h b/include/linux/perf_counter.h index 81ec79c9f19..0e57d8cc5a3 100644 --- a/include/linux/perf_counter.h +++ b/include/linux/perf_counter.h @@ -562,7 +562,7 @@ struct perf_cpu_context { * * task, softirq, irq, nmi context */ - int recursion[4]; + int recursion[4]; }; #ifdef CONFIG_PERF_COUNTERS diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c index ff8b4636f84..df319c48c52 100644 --- a/kernel/perf_counter.c +++ b/kernel/perf_counter.c @@ -16,8 +16,9 @@ #include #include #include -#include +#include #include +#include #include #include #include @@ -26,7 +27,6 @@ #include #include #include -#include #include @@ -65,7 +65,9 @@ void __weak hw_perf_disable(void) { barrier(); } void __weak hw_perf_enable(void) { barrier(); } void __weak hw_perf_counter_setup(int cpu) { barrier(); } -int __weak hw_perf_group_sched_in(struct perf_counter *group_leader, + +int __weak +hw_perf_group_sched_in(struct perf_counter *group_leader, struct perf_cpu_context *cpuctx, struct perf_counter_context *ctx, int cpu) { @@ -127,8 +129,8 @@ static void put_ctx(struct perf_counter_context *ctx) * This has to cope with with the fact that until it is locked, * the context could get moved to another task. */ -static struct perf_counter_context *perf_lock_task_context( - struct task_struct *task, unsigned long *flags) +static struct perf_counter_context * +perf_lock_task_context(struct task_struct *task, unsigned long *flags) { struct perf_counter_context *ctx; @@ -1330,9 +1332,9 @@ __perf_counter_init_context(struct perf_counter_context *ctx, static struct perf_counter_context *find_get_context(pid_t pid, int cpu) { - struct perf_cpu_context *cpuctx; - struct perf_counter_context *ctx; struct perf_counter_context *parent_ctx; + struct perf_counter_context *ctx; + struct perf_cpu_context *cpuctx; struct task_struct *task; unsigned long flags; int err; @@ -1664,8 +1666,8 @@ int perf_counter_task_disable(void) */ void perf_counter_update_userpage(struct perf_counter *counter) { - struct perf_mmap_data *data; struct perf_counter_mmap_page *userpg; + struct perf_mmap_data *data; rcu_read_lock(); data = rcu_dereference(counter->data); @@ -1769,10 +1771,11 @@ fail: static void __perf_mmap_data_free(struct rcu_head *rcu_head) { - struct perf_mmap_data *data = container_of(rcu_head, - struct perf_mmap_data, rcu_head); + struct perf_mmap_data *data; int i; + data = container_of(rcu_head, struct perf_mmap_data, rcu_head); + free_page((unsigned long)data->user_page); for (i = 0; i < data->nr_pages; i++) free_page((unsigned long)data->data_pages[i]); @@ -1801,8 +1804,7 @@ static void perf_mmap_close(struct vm_area_struct *vma) struct perf_counter *counter = vma->vm_file->private_data; WARN_ON_ONCE(counter->ctx->parent_ctx); - if (atomic_dec_and_mutex_lock(&counter->mmap_count, - &counter->mmap_mutex)) { + if (atomic_dec_and_mutex_lock(&counter->mmap_count, &counter->mmap_mutex)) { struct user_struct *user = current_user(); atomic_long_sub(counter->data->nr_pages + 1, &user->locked_vm); @@ -1821,11 +1823,11 @@ static struct vm_operations_struct perf_mmap_vmops = { static int perf_mmap(struct file *file, struct vm_area_struct *vma) { struct perf_counter *counter = file->private_data; + unsigned long user_locked, user_lock_limit; struct user_struct *user = current_user(); + unsigned long locked, lock_limit; unsigned long vma_size; unsigned long nr_pages; - unsigned long user_locked, user_lock_limit; - unsigned long locked, lock_limit; long user_extra, extra; int ret = 0; @@ -1900,8 +1902,8 @@ unlock: static int perf_fasync(int fd, struct file *filp, int on) { - struct perf_counter *counter = filp->private_data; struct inode *inode = filp->f_path.dentry->d_inode; + struct perf_counter *counter = filp->private_data; int retval; mutex_lock(&inode->i_mutex); @@ -2412,8 +2414,8 @@ static void perf_counter_output(struct perf_counter *counter, */ struct perf_comm_event { - struct task_struct *task; - char *comm; + struct task_struct *task; + char *comm; int comm_size; struct { @@ -2932,6 +2934,7 @@ static void perf_swcounter_add(struct perf_counter *counter, u64 nr, int nmi, struct pt_regs *regs, u64 addr) { int neg = atomic64_add_negative(nr, &counter->hw.count); + if (counter->hw.irq_period && !neg) perf_swcounter_overflow(counter, nmi, regs, addr); } @@ -3526,7 +3529,7 @@ inherit_counter(struct perf_counter *parent_counter, /* * Make the child state follow the state of the parent counter, * not its hw_event.disabled bit. We hold the parent's mutex, - * so we won't race with perf_counter_{en,dis}able_family. + * so we won't race with perf_counter_{en, dis}able_family. */ if (parent_counter->state >= PERF_COUNTER_STATE_INACTIVE) child_counter->state = PERF_COUNTER_STATE_INACTIVE; -- cgit v1.2.3 From 229c4eedcedcdadf70411120ba34bc37554a74bd Mon Sep 17 00:00:00 2001 From: Mike Galbraith Date: Thu, 28 May 2009 16:28:53 +0200 Subject: perf_counter tools: Guard against record damaging existing files Signed-off-by: Mike Galbraith Cc: Peter Zijlstra LKML-Reference: Signed-off-by: Ingo Molnar --- Documentation/perf_counter/builtin-record.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Documentation/perf_counter/builtin-record.c b/Documentation/perf_counter/builtin-record.c index 23d1224ce98..96bfb7c5f1e 100644 --- a/Documentation/perf_counter/builtin-record.c +++ b/Documentation/perf_counter/builtin-record.c @@ -340,7 +340,7 @@ static int __cmd_record(int argc, const char **argv) assert(nr_cpus <= MAX_NR_CPUS); assert(nr_cpus >= 0); - output = open(output_name, O_CREAT|O_RDWR, S_IRWXU); + output = open(output_name, O_CREAT|O_EXCL|O_RDWR, S_IRWXU); if (output < 0) { perror("failed to create output file"); exit(-1); -- cgit v1.2.3 From ea5cc87c63b49c133d15ec2911bb2e49e8124516 Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Mon, 1 Jun 2009 22:31:03 -0300 Subject: perf_counter tools: Add string.[ch] Add hex conversion libraries. We are going to replace sscanf() uses with them. Signed-off-by: Arnaldo Carvalho de Melo LKML-Reference: Signed-off-by: Ingo Molnar --- Documentation/perf_counter/util/string.c | 34 ++++++++++++++++++++++++++++++++ Documentation/perf_counter/util/string.h | 8 ++++++++ 2 files changed, 42 insertions(+) create mode 100644 Documentation/perf_counter/util/string.c create mode 100644 Documentation/perf_counter/util/string.h diff --git a/Documentation/perf_counter/util/string.c b/Documentation/perf_counter/util/string.c new file mode 100644 index 00000000000..ec33c0c7f4e --- /dev/null +++ b/Documentation/perf_counter/util/string.c @@ -0,0 +1,34 @@ +#include "string.h" + +static int hex(char ch) +{ + if ((ch >= '0') && (ch <= '9')) + return ch - '0'; + if ((ch >= 'a') && (ch <= 'f')) + return ch - 'a' + 10; + if ((ch >= 'A') && (ch <= 'F')) + return ch - 'A' + 10; + return -1; +} + +/* + * While we find nice hex chars, build a long_val. + * Return number of chars processed. + */ +int hex2u64(const char *ptr, __u64 *long_val) +{ + const char *p = ptr; + *long_val = 0; + + while (*p) { + const int hex_val = hex(*p); + + if (hex_val < 0) + break; + + *long_val = (*long_val << 4) | hex_val; + p++; + } + + return p - ptr; +} diff --git a/Documentation/perf_counter/util/string.h b/Documentation/perf_counter/util/string.h new file mode 100644 index 00000000000..72812c1c9a7 --- /dev/null +++ b/Documentation/perf_counter/util/string.h @@ -0,0 +1,8 @@ +#ifndef _PERF_STRING_H_ +#define _PERF_STRING_H_ + +#include + +int hex2u64(const char *ptr, __u64 *val); + +#endif -- cgit v1.2.3 From a0055ae2a4e13db9534c438cf8f3896181da6afc Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Mon, 1 Jun 2009 17:50:19 -0300 Subject: perf_counter tools: Use hex2u64 in more places This has also a nice side effect, tools built on newer systems such as fedora 10 again work on systems with older versions of glibc: My workstation: [acme@doppio ~]$ rpm -q glibc.x86_64 glibc-2.9-3.x86_64 Test machine: [acme@emilia ~]$ rpm -q glibc.x86_64 glibc-2.5-24 Before: [acme@emilia ~]$ perf perf: /lib64/libc.so.6: version `GLIBC_2.7' not found (required by perf) [acme@emilia ~]$ nm `which perf` | grep GLIBC_2\.7 U __isoc99_sscanf@@GLIBC_2.7 [acme@emilia ~]$ After: [acme@emilia ~]$ perf usage: perf [--version] [--help] COMMAND [ARGS] The most commonly used perf commands are: record Run a command and record its profile into perf.data report Read perf.data (created by perf record) and display the profile stat Run a command and gather performance counter statistics top Run a command and profile it See 'perf help COMMAND' for more information on a specific command. [acme@emilia ~]$ nm `which perf` | grep GLIBC_2\.7 [acme@emilia ~]$ Signed-off-by: Arnaldo Carvalho de Melo Cc: Mike Galbraith Cc: Peter Zijlstra Cc: Paul Mackerras Cc: Steven Rostedt LKML-Reference: <20090601205019.GA7805@ghostprotocols.net> Signed-off-by: Ingo Molnar --- Documentation/perf_counter/Makefile | 2 + Documentation/perf_counter/builtin-record.c | 56 +++++++++++++++----------- Documentation/perf_counter/builtin-report.c | 1 + Documentation/perf_counter/util/parse-events.c | 27 ++++++++----- Documentation/perf_counter/util/symbol.c | 38 ++--------------- 5 files changed, 54 insertions(+), 70 deletions(-) diff --git a/Documentation/perf_counter/Makefile b/Documentation/perf_counter/Makefile index 416ab11e978..3b8275feb88 100644 --- a/Documentation/perf_counter/Makefile +++ b/Documentation/perf_counter/Makefile @@ -296,6 +296,7 @@ LIB_H += util/quote.h LIB_H += util/util.h LIB_H += util/help.h LIB_H += util/strbuf.h +LIB_H += util/string.h LIB_H += util/run-command.h LIB_H += util/sigchain.h LIB_H += util/symbol.h @@ -315,6 +316,7 @@ LIB_OBJS += util/rbtree.o LIB_OBJS += util/run-command.o LIB_OBJS += util/quote.o LIB_OBJS += util/strbuf.o +LIB_OBJS += util/string.o LIB_OBJS += util/usage.o LIB_OBJS += util/wrapper.o LIB_OBJS += util/sigchain.o diff --git a/Documentation/perf_counter/builtin-record.c b/Documentation/perf_counter/builtin-record.c index 96bfb7c5f1e..9c151ded22f 100644 --- a/Documentation/perf_counter/builtin-record.c +++ b/Documentation/perf_counter/builtin-record.c @@ -5,6 +5,7 @@ #include "util/util.h" #include "util/parse-options.h" #include "util/parse-events.h" +#include "util/string.h" #include @@ -165,12 +166,10 @@ static pid_t pid_synthesize_comm_event(pid_t pid) { struct comm_event comm_ev; char filename[PATH_MAX]; - pid_t spid, ppid; char bf[BUFSIZ]; - int fd, nr, ret; - char comm[18]; + int fd, ret; size_t size; - char state; + char *field, *sep; snprintf(filename, sizeof(filename), "/proc/%d/stat", pid); @@ -185,20 +184,22 @@ static pid_t pid_synthesize_comm_event(pid_t pid) } close(fd); + /* 9027 (cat) R 6747 9027 6747 34816 9027 ... */ memset(&comm_ev, 0, sizeof(comm_ev)); - nr = sscanf(bf, "%d %s %c %d %d ", - &spid, comm, &state, &ppid, &comm_ev.pid); - if (nr != 5) { - fprintf(stderr, "couldn't get COMM and pgid, malformed %s\n", - filename); - exit(EXIT_FAILURE); - } + field = strchr(bf, '('); + if (field == NULL) + goto out_failure; + sep = strchr(++field, ')'); + if (sep == NULL) + goto out_failure; + size = sep - field; + memcpy(comm_ev.comm, field, size++); + field = strchr(sep + 4, ' '); + if (field == NULL) + goto out_failure; + comm_ev.pid = atoi(++field); comm_ev.header.type = PERF_EVENT_COMM; comm_ev.tid = pid; - size = strlen(comm); - comm[--size] = '\0'; /* Remove the ')' at the end */ - --size; /* Remove the '(' at the begin */ - memcpy(comm_ev.comm, comm + 1, size); size = ALIGN(size, sizeof(uint64_t)); comm_ev.header.size = sizeof(comm_ev) - (sizeof(comm_ev.comm) - size); @@ -208,6 +209,11 @@ static pid_t pid_synthesize_comm_event(pid_t pid) exit(-1); } return comm_ev.pid; +out_failure: + fprintf(stderr, "couldn't get COMM and pgid, malformed %s\n", + filename); + exit(EXIT_FAILURE); + return -1; } static void pid_synthesize_mmap_events(pid_t pid, pid_t pgid) @@ -223,23 +229,25 @@ static void pid_synthesize_mmap_events(pid_t pid, pid_t pgid) exit(EXIT_FAILURE); } while (1) { - char bf[BUFSIZ]; - unsigned char vm_read, vm_write, vm_exec, vm_mayshare; + char bf[BUFSIZ], *pbf = bf; struct mmap_event mmap_ev = { .header.type = PERF_EVENT_MMAP, }; - unsigned long ino; - int major, minor; + int n; size_t size; if (fgets(bf, sizeof(bf), fp) == NULL) break; /* 00400000-0040c000 r-xp 00000000 fd:01 41038 /bin/cat */ - sscanf(bf, "%llx-%llx %c%c%c%c %llx %x:%x %lu", - &mmap_ev.start, &mmap_ev.len, - &vm_read, &vm_write, &vm_exec, &vm_mayshare, - &mmap_ev.pgoff, &major, &minor, &ino); - if (vm_exec == 'x') { + n = hex2u64(pbf, &mmap_ev.start); + if (n < 0) + continue; + pbf += n + 1; + n = hex2u64(pbf, &mmap_ev.len); + if (n < 0) + continue; + pbf += n + 3; + if (*pbf == 'x') { /* vm_exec */ char *execname = strrchr(bf, ' '); if (execname == NULL || execname[1] != '/') diff --git a/Documentation/perf_counter/builtin-report.c b/Documentation/perf_counter/builtin-report.c index 4705679deba..7973092094e 100644 --- a/Documentation/perf_counter/builtin-report.c +++ b/Documentation/perf_counter/builtin-report.c @@ -5,6 +5,7 @@ #include "util/cache.h" #include "util/rbtree.h" #include "util/symbol.h" +#include "util/string.h" #include "perf.h" diff --git a/Documentation/perf_counter/util/parse-events.c b/Documentation/perf_counter/util/parse-events.c index 88c903eb260..2fdfd1d923f 100644 --- a/Documentation/perf_counter/util/parse-events.c +++ b/Documentation/perf_counter/util/parse-events.c @@ -4,6 +4,7 @@ #include "parse-options.h" #include "parse-events.h" #include "exec_cmd.h" +#include "string.h" int nr_counters; @@ -105,22 +106,26 @@ static __u64 match_event_symbols(const char *str) __u64 config, id; int type; unsigned int i; - char mask_str[4]; + const char *sep, *pstr; - if (sscanf(str, "r%llx", &config) == 1) + if (str[0] == 'r' && hex2u64(str + 1, &config) > 0) return config | PERF_COUNTER_RAW_MASK; - switch (sscanf(str, "%d:%llu:%2s", &type, &id, mask_str)) { - case 3: - if (strchr(mask_str, 'k')) + pstr = str; + sep = strchr(pstr, ':'); + if (sep) { + type = atoi(pstr); + pstr = sep + 1; + id = atoi(pstr); + sep = strchr(pstr, ':'); + if (sep) { + pstr = sep + 1; + if (strchr(pstr, 'k')) event_mask[nr_counters] |= EVENT_MASK_USER; - if (strchr(mask_str, 'u')) + if (strchr(pstr, 'u')) event_mask[nr_counters] |= EVENT_MASK_KERNEL; - case 2: - return EID(type, id); - - default: - break; + } + return EID(type, id); } for (i = 0; i < ARRAY_SIZE(event_symbols); i++) { diff --git a/Documentation/perf_counter/util/symbol.c b/Documentation/perf_counter/util/symbol.c index 47281210443..31e8fae5842 100644 --- a/Documentation/perf_counter/util/symbol.c +++ b/Documentation/perf_counter/util/symbol.c @@ -1,5 +1,6 @@ #include "util.h" #include "../perf.h" +#include "string.h" #include "symbol.h" #include @@ -122,39 +123,6 @@ size_t dso__fprintf(struct dso *self, FILE *fp) return ret; } -static int hex(char ch) -{ - if ((ch >= '0') && (ch <= '9')) - return ch - '0'; - if ((ch >= 'a') && (ch <= 'f')) - return ch - 'a' + 10; - if ((ch >= 'A') && (ch <= 'F')) - return ch - 'A' + 10; - return -1; -} - -/* - * While we find nice hex chars, build a long_val. - * Return number of chars processed. - */ -static int hex2long(char *ptr, unsigned long *long_val) -{ - const char *p = ptr; - *long_val = 0; - - while (*p) { - const int hex_val = hex(*p); - - if (hex_val < 0) - break; - - *long_val = (*long_val << 4) | hex_val; - p++; - } - - return p - ptr; -} - static int dso__load_kallsyms(struct dso *self, symbol_filter_t filter) { struct rb_node *nd, *prevnd; @@ -166,7 +134,7 @@ static int dso__load_kallsyms(struct dso *self, symbol_filter_t filter) goto out_failure; while (!feof(file)) { - unsigned long start; + __u64 start; struct symbol *sym; int line_len, len; char symbol_type; @@ -180,7 +148,7 @@ static int dso__load_kallsyms(struct dso *self, symbol_filter_t filter) line[--line_len] = '\0'; /* \n */ - len = hex2long(line, &start); + len = hex2u64(line, &start); len++; if (len + 2 >= line_len) -- cgit v1.2.3 From c8c96525f3c25f43a4fa230e293c4976c0c36cc1 Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Mon, 1 Jun 2009 17:50:57 -0300 Subject: perf_counter tools: Add missing rb_erase in dso__delete_symbols Signed-off-by: Arnaldo Carvalho de Melo Cc: Mike Galbraith Cc: Peter Zijlstra Cc: Paul Mackerras Cc: Steven Rostedt LKML-Reference: <20090601205057.GB7805@ghostprotocols.net> Signed-off-by: Ingo Molnar --- Documentation/perf_counter/util/symbol.c | 1 + 1 file changed, 1 insertion(+) diff --git a/Documentation/perf_counter/util/symbol.c b/Documentation/perf_counter/util/symbol.c index 31e8fae5842..039931fcb1b 100644 --- a/Documentation/perf_counter/util/symbol.c +++ b/Documentation/perf_counter/util/symbol.c @@ -58,6 +58,7 @@ static void dso__delete_symbols(struct dso *self) while (next) { pos = rb_entry(next, struct symbol, rb_node); next = rb_next(&pos->rb_node); + rb_erase(&pos->rb_node, &self->syms); symbol__delete(pos, self->sym_priv_size); } } -- cgit v1.2.3 From c25486c5ea3ea5586f71285a3aa5cfafb1db59f9 Mon Sep 17 00:00:00 2001 From: Mike Galbraith Date: Tue, 2 Jun 2009 08:09:48 +0200 Subject: perf_counter tools: Make .gitignore reflect perf_counter tools files Make .gitignore reflect perf_counter tools files so git status doesn't gripe about untracked files. Signed-off-by: Mike Galbraith Cc: Peter Zijlstra LKML-Reference: Signed-off-by: Ingo Molnar --- Documentation/perf_counter/.gitignore | 187 +++------------------------------- 1 file changed, 12 insertions(+), 175 deletions(-) diff --git a/Documentation/perf_counter/.gitignore b/Documentation/perf_counter/.gitignore index 41c0b20a76c..d69a759a104 100644 --- a/Documentation/perf_counter/.gitignore +++ b/Documentation/perf_counter/.gitignore @@ -1,179 +1,16 @@ -GIT-BUILD-OPTIONS -GIT-CFLAGS -GIT-GUI-VARS -GIT-VERSION-FILE -git -git-add -git-add--interactive -git-am -git-annotate -git-apply -git-archimport -git-archive -git-bisect -git-bisect--helper -git-blame -git-branch -git-bundle -git-cat-file -git-check-attr -git-check-ref-format -git-checkout -git-checkout-index -git-cherry -git-cherry-pick -git-clean -git-clone -git-commit -git-commit-tree -git-config -git-count-objects -git-cvsexportcommit -git-cvsimport -git-cvsserver -git-daemon -git-diff -git-diff-files -git-diff-index -git-diff-tree -git-difftool -git-difftool--helper -git-describe -git-fast-export -git-fast-import -git-fetch -git-fetch--tool -git-fetch-pack -git-filter-branch -git-fmt-merge-msg -git-for-each-ref -git-format-patch -git-fsck -git-fsck-objects -git-gc -git-get-tar-commit-id -git-grep -git-hash-object -git-help -git-http-fetch -git-http-push -git-imap-send -git-index-pack -git-init -git-init-db -git-instaweb -git-log -git-lost-found -git-ls-files -git-ls-remote -git-ls-tree -git-mailinfo -git-mailsplit -git-merge -git-merge-base -git-merge-index -git-merge-file -git-merge-tree -git-merge-octopus -git-merge-one-file -git-merge-ours -git-merge-recursive -git-merge-resolve -git-merge-subtree -git-mergetool -git-mergetool--lib -git-mktag -git-mktree -git-name-rev -git-mv -git-pack-redundant -git-pack-objects -git-pack-refs -git-parse-remote -git-patch-id -git-peek-remote -git-prune -git-prune-packed -git-pull -git-push -git-quiltimport -git-read-tree -git-rebase -git-rebase--interactive -git-receive-pack -git-reflog -git-relink -git-remote -git-repack -git-repo-config -git-request-pull -git-rerere -git-reset -git-rev-list -git-rev-parse -git-revert -git-rm -git-send-email -git-send-pack -git-sh-setup -git-shell -git-shortlog -git-show -git-show-branch -git-show-index -git-show-ref -git-stage -git-stash -git-status -git-stripspace -git-submodule -git-svn -git-symbolic-ref -git-tag -git-tar-tree -git-unpack-file -git-unpack-objects -git-update-index -git-update-ref -git-update-server-info -git-upload-archive -git-upload-pack -git-var -git-verify-pack -git-verify-tag -git-web--browse -git-whatchanged -git-write-tree -git-core-*/?* -gitk-wish -gitweb/gitweb.cgi -test-chmtime -test-ctype -test-date -test-delta -test-dump-cache-tree -test-genrandom -test-match-trees -test-parse-options -test-path-utils -test-sha1 -test-sigchain +PERF-BUILD-OPTIONS +PERF-CFLAGS +PERF-GUI-VARS +PERF-VERSION-FILE +perf +perf-help +perf-record +perf-report +perf-stat +perf-top +perf*.1 +perf*.xml common-cmds.h -*.tar.gz -*.dsc -*.deb -git.spec -*.exe -*.[aos] -*.py[co] -config.mak -autom4te.cache -config.cache -config.log -config.status -config.mak.autogen -config.mak.append -configure tags TAGS cscope* -- cgit v1.2.3 From c1079abd1d5e66edabeb04d75e48e604cd8c167f Mon Sep 17 00:00:00 2001 From: Mike Galbraith Date: Tue, 2 Jun 2009 10:17:34 +0200 Subject: perf_counter tools: Cleanup Makefile We currently build perf-stat/record etc, only to do nothing with them. We also install the perf binary in two places, $prefix/bin and $perfexec_instdir, which appears to be for binaries which perf would exec were a command not linked in. Correct this, and comment out broken/incomplete targets dist and coverage. Signed-off-by: Mike Galbraith Cc: Peter Zijlstra Cc: Paul Mackerras Cc: Corey Ashford Cc: Marcelo Tosatti Cc: Arnaldo Carvalho de Melo Cc: John Kacur LKML-Reference: Signed-off-by: Ingo Molnar --- Documentation/perf_counter/Makefile | 141 +++++++++++++++++++----------------- 1 file changed, 73 insertions(+), 68 deletions(-) diff --git a/Documentation/perf_counter/Makefile b/Documentation/perf_counter/Makefile index 3b8275feb88..eae88561233 100644 --- a/Documentation/perf_counter/Makefile +++ b/Documentation/perf_counter/Makefile @@ -260,8 +260,6 @@ PROGRAMS += perf # List built-in command $C whose implementation cmd_$C() is not in # builtin-$C.o but is linked in as part of some other command. -BUILT_INS += $(patsubst builtin-%.o,perf-%$X,$(BUILTIN_OBJS)) - # # None right now: # @@ -791,12 +789,14 @@ export perfexec_instdir install: all $(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(bindir_SQ)' - $(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(perfexec_instdir_SQ)' - $(INSTALL) $(ALL_PROGRAMS) '$(DESTDIR_SQ)$(perfexec_instdir_SQ)' $(INSTALL) perf$X '$(DESTDIR_SQ)$(bindir_SQ)' +ifdef BUILT_INS + $(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(perfexec_instdir_SQ)' + $(INSTALL) $(BUILT_INS) '$(DESTDIR_SQ)$(perfexec_instdir_SQ)' ifneq (,$X) $(foreach p,$(patsubst %$X,%,$(filter %$X,$(ALL_PROGRAMS) $(BUILT_INS) perf$X)), $(RM) '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/$p';) endif +endif install-doc: $(MAKE) -C Documentation install @@ -824,52 +824,55 @@ quick-install-html: ### Maintainer's dist rules - -perf.spec: perf.spec.in - sed -e 's/@@VERSION@@/$(PERF_VERSION)/g' < $< > $@+ - mv $@+ $@ - -PERF_TARNAME=perf-$(PERF_VERSION) -dist: perf.spec perf-archive$(X) configure - ./perf-archive --format=tar \ - --prefix=$(PERF_TARNAME)/ HEAD^{tree} > $(PERF_TARNAME).tar - @mkdir -p $(PERF_TARNAME) - @cp perf.spec configure $(PERF_TARNAME) - @echo $(PERF_VERSION) > $(PERF_TARNAME)/version - $(TAR) rf $(PERF_TARNAME).tar \ - $(PERF_TARNAME)/perf.spec \ - $(PERF_TARNAME)/configure \ - $(PERF_TARNAME)/version - @$(RM) -r $(PERF_TARNAME) - gzip -f -9 $(PERF_TARNAME).tar - -htmldocs = perf-htmldocs-$(PERF_VERSION) -manpages = perf-manpages-$(PERF_VERSION) -dist-doc: - $(RM) -r .doc-tmp-dir - mkdir .doc-tmp-dir - $(MAKE) -C Documentation WEBDOC_DEST=../.doc-tmp-dir install-webdoc - cd .doc-tmp-dir && $(TAR) cf ../$(htmldocs).tar . - gzip -n -9 -f $(htmldocs).tar - : - $(RM) -r .doc-tmp-dir - mkdir -p .doc-tmp-dir/man1 .doc-tmp-dir/man5 .doc-tmp-dir/man7 - $(MAKE) -C Documentation DESTDIR=./ \ - man1dir=../.doc-tmp-dir/man1 \ - man5dir=../.doc-tmp-dir/man5 \ - man7dir=../.doc-tmp-dir/man7 \ - install - cd .doc-tmp-dir && $(TAR) cf ../$(manpages).tar . - gzip -n -9 -f $(manpages).tar - $(RM) -r .doc-tmp-dir - -rpm: dist - $(RPMBUILD) -ta $(PERF_TARNAME).tar.gz +# +# None right now +# +# +# perf.spec: perf.spec.in +# sed -e 's/@@VERSION@@/$(PERF_VERSION)/g' < $< > $@+ +# mv $@+ $@ +# +# PERF_TARNAME=perf-$(PERF_VERSION) +# dist: perf.spec perf-archive$(X) configure +# ./perf-archive --format=tar \ +# --prefix=$(PERF_TARNAME)/ HEAD^{tree} > $(PERF_TARNAME).tar +# @mkdir -p $(PERF_TARNAME) +# @cp perf.spec configure $(PERF_TARNAME) +# @echo $(PERF_VERSION) > $(PERF_TARNAME)/version +# $(TAR) rf $(PERF_TARNAME).tar \ +# $(PERF_TARNAME)/perf.spec \ +# $(PERF_TARNAME)/configure \ +# $(PERF_TARNAME)/version +# @$(RM) -r $(PERF_TARNAME) +# gzip -f -9 $(PERF_TARNAME).tar +# +# htmldocs = perf-htmldocs-$(PERF_VERSION) +# manpages = perf-manpages-$(PERF_VERSION) +# dist-doc: +# $(RM) -r .doc-tmp-dir +# mkdir .doc-tmp-dir +# $(MAKE) -C Documentation WEBDOC_DEST=../.doc-tmp-dir install-webdoc +# cd .doc-tmp-dir && $(TAR) cf ../$(htmldocs).tar . +# gzip -n -9 -f $(htmldocs).tar +# : +# $(RM) -r .doc-tmp-dir +# mkdir -p .doc-tmp-dir/man1 .doc-tmp-dir/man5 .doc-tmp-dir/man7 +# $(MAKE) -C Documentation DESTDIR=./ \ +# man1dir=../.doc-tmp-dir/man1 \ +# man5dir=../.doc-tmp-dir/man5 \ +# man7dir=../.doc-tmp-dir/man7 \ +# install +# cd .doc-tmp-dir && $(TAR) cf ../$(manpages).tar . +# gzip -n -9 -f $(manpages).tar +# $(RM) -r .doc-tmp-dir +# +# rpm: dist +# $(RPMBUILD) -ta $(PERF_TARNAME).tar.gz ### Cleaning rules distclean: clean - $(RM) configure +# $(RM) configure clean: $(RM) *.o */*.o $(LIB_FILE) @@ -896,25 +899,27 @@ check-builtins:: ### Test suite coverage testing # -.PHONY: coverage coverage-clean coverage-build coverage-report - -coverage: - $(MAKE) coverage-build - $(MAKE) coverage-report - -coverage-clean: - rm -f *.gcda *.gcno - -COVERAGE_CFLAGS = $(CFLAGS) -O0 -ftest-coverage -fprofile-arcs -COVERAGE_LDFLAGS = $(CFLAGS) -O0 -lgcov - -coverage-build: coverage-clean - $(MAKE) CFLAGS="$(COVERAGE_CFLAGS)" LDFLAGS="$(COVERAGE_LDFLAGS)" all - $(MAKE) CFLAGS="$(COVERAGE_CFLAGS)" LDFLAGS="$(COVERAGE_LDFLAGS)" \ - -j1 test - -coverage-report: - gcov -b *.c */*.c - grep '^function.*called 0 ' *.c.gcov */*.c.gcov \ - | sed -e 's/\([^:]*\)\.gcov: *function \([^ ]*\) called.*/\1: \2/' \ - | tee coverage-untested-functions +# None right now +# +# .PHONY: coverage coverage-clean coverage-build coverage-report +# +# coverage: +# $(MAKE) coverage-build +# $(MAKE) coverage-report +# +# coverage-clean: +# rm -f *.gcda *.gcno +# +# COVERAGE_CFLAGS = $(CFLAGS) -O0 -ftest-coverage -fprofile-arcs +# COVERAGE_LDFLAGS = $(CFLAGS) -O0 -lgcov +# +# coverage-build: coverage-clean +# $(MAKE) CFLAGS="$(COVERAGE_CFLAGS)" LDFLAGS="$(COVERAGE_LDFLAGS)" all +# $(MAKE) CFLAGS="$(COVERAGE_CFLAGS)" LDFLAGS="$(COVERAGE_LDFLAGS)" \ +# -j1 test +# +# coverage-report: +# gcov -b *.c */*.c +# grep '^function.*called 0 ' *.c.gcov */*.c.gcov \ +# | sed -e 's/\([^:]*\)\.gcov: *function \([^ ]*\) called.*/\1: \2/' \ +# | tee coverage-untested-functions -- cgit v1.2.3 From 10a2825514a988225ac2e336c7a9502c4ca57c39 Mon Sep 17 00:00:00 2001 From: Mike Galbraith Date: Tue, 2 Jun 2009 11:04:44 +0200 Subject: perf_counter tools: Fix uninitialized variable in perf-report.c MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit # make prefix=/usr/local V=1 gcc -o builtin-report.o -c -O2 -ggdb3 -Wall -Werror -Wstrict-prototypes -Wmissing-declarations -Wmissing-prototypes -std=gnu99 -Wdeclaration-after-statement -DSHA1_HEADER='' builtin-report.c cc1: warnings being treated as errors builtin-report.c: In function ‘__cmd_report’: builtin-report.c:626: error: ‘cwdlen’ may be used uninitialized in this function Signed-off-by: Mike Galbraith Cc: Peter Zijlstra Signed-off-by: Ingo Molnar LKML-Reference: --- Documentation/perf_counter/builtin-report.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/Documentation/perf_counter/builtin-report.c b/Documentation/perf_counter/builtin-report.c index 7973092094e..20a4e519dfd 100644 --- a/Documentation/perf_counter/builtin-report.c +++ b/Documentation/perf_counter/builtin-report.c @@ -653,8 +653,10 @@ static int __cmd_report(void) return EXIT_FAILURE; } cwdlen = strlen(cwd); - } else + } else { cwdp = NULL; + cwdlen = 0; + } remap: buf = (char *)mmap(NULL, page_size * mmap_window, PROT_READ, MAP_SHARED, input, offset); -- cgit v1.2.3 From f38b082081bf69a06fffb8b32a175999e2320c5b Mon Sep 17 00:00:00 2001 From: Paul Mackerras Date: Tue, 2 Jun 2009 21:05:16 +1000 Subject: perf_counter: Initialize per-cpu context earlier on cpu up This arranges for perf_counter's notifier for cpu hotplug operations to be called earlier than the migration notifier in sched.c by increasing its priority to 20, compared to the 10 for the migration notifier. The reason for doing this is that a subsequent commit to convert the cpu migration counter to use the generic swcounter infrastructure will add a call into the perf_counter subsystem when tasks get migrated. Therefore the perf_counter subsystem needs a chance to initialize its per-cpu data for the new cpu before it can get called from the migration code. This also adds a comment to the migration notifier noting that its priority needs to be lower than that of the perf_counter notifier. Signed-off-by: Paul Mackerras Cc: Peter Zijlstra LKML-Reference: <18981.1900.792795.836858@cargo.ozlabs.ibm.com> Signed-off-by: Ingo Molnar --- kernel/perf_counter.c | 4 ++++ kernel/sched.c | 6 ++++-- 2 files changed, 8 insertions(+), 2 deletions(-) diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c index df319c48c52..8d2653f137e 100644 --- a/kernel/perf_counter.c +++ b/kernel/perf_counter.c @@ -3902,8 +3902,12 @@ perf_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu) return NOTIFY_OK; } +/* + * This has to have a higher priority than migration_notifier in sched.c. + */ static struct notifier_block __cpuinitdata perf_cpu_nb = { .notifier_call = perf_cpu_notify, + .priority = 20, }; void __init perf_counter_init(void) diff --git a/kernel/sched.c b/kernel/sched.c index ad079f07c9c..3226cc132e9 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -7319,8 +7319,10 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu) return NOTIFY_OK; } -/* Register at highest priority so that task migration (migrate_all_tasks) - * happens before everything else. +/* + * Register at high priority so that task migration (migrate_all_tasks) + * happens before everything else. This has to be lower priority than + * the notifier in the perf_counter subsystem, though. */ static struct notifier_block __cpuinitdata migration_notifier = { .notifier_call = migration_call, -- cgit v1.2.3 From 3f731ca60afc29f5bcdb5fd2a04391466313a9ac Mon Sep 17 00:00:00 2001 From: Paul Mackerras Date: Mon, 1 Jun 2009 17:52:30 +1000 Subject: perf_counter: Fix cpu migration counter This fixes the cpu migration software counter to count correctly even when contexts get swapped from one task to another. Previously the cpu migration counts reported by perf stat were bogus, ranging from negative to several thousand for a single "lat_ctx 2 8 32" run. With this patch the cpu migration count reported for "lat_ctx 2 8 32" is almost always between 35 and 44. This fixes the problem by adding a call into the perf_counter code from set_task_cpu when tasks are migrated. This enables us to use the generic swcounter code (with some modifications) for the cpu migration counter. This modifies the swcounter code to allow a NULL regs pointer to be passed in to perf_swcounter_ctx_event() etc. The cpu migration counter does this because there isn't necessarily a pt_regs struct for the task available. In this case, the counter will not have interrupt capability - but the migration counter didn't have interrupt capability before, so this is no loss. Signed-off-by: Paul Mackerras Cc: Peter Zijlstra Cc: Mike Galbraith Cc: Corey Ashford Cc: Marcelo Tosatti Cc: Arnaldo Carvalho de Melo Cc: John Kacur LKML-Reference: <18979.35006.819769.416327@cargo.ozlabs.ibm.com> Signed-off-by: Ingo Molnar --- include/linux/perf_counter.h | 4 +++ kernel/perf_counter.c | 74 +++++++++++++------------------------------- kernel/sched.c | 1 + 3 files changed, 26 insertions(+), 53 deletions(-) diff --git a/include/linux/perf_counter.h b/include/linux/perf_counter.h index 0e57d8cc5a3..deb9acf9ad2 100644 --- a/include/linux/perf_counter.h +++ b/include/linux/perf_counter.h @@ -615,6 +615,8 @@ extern void perf_counter_munmap(unsigned long addr, unsigned long len, extern void perf_counter_comm(struct task_struct *tsk); +extern void perf_counter_task_migration(struct task_struct *task, int cpu); + #define MAX_STACK_DEPTH 255 struct perf_callchain_entry { @@ -668,6 +670,8 @@ perf_counter_munmap(unsigned long addr, unsigned long len, static inline void perf_counter_comm(struct task_struct *tsk) { } static inline void perf_counter_init(void) { } +static inline void perf_counter_task_migration(struct task_struct *task, + int cpu) { } #endif #endif /* __KERNEL__ */ diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c index 8d2653f137e..cd94cf3bf9e 100644 --- a/kernel/perf_counter.c +++ b/kernel/perf_counter.c @@ -2921,11 +2921,13 @@ static int perf_swcounter_match(struct perf_counter *counter, if (counter->hw_event.config != event_config) return 0; - if (counter->hw_event.exclude_user && user_mode(regs)) - return 0; + if (regs) { + if (counter->hw_event.exclude_user && user_mode(regs)) + return 0; - if (counter->hw_event.exclude_kernel && !user_mode(regs)) - return 0; + if (counter->hw_event.exclude_kernel && !user_mode(regs)) + return 0; + } return 1; } @@ -2935,7 +2937,7 @@ static void perf_swcounter_add(struct perf_counter *counter, u64 nr, { int neg = atomic64_add_negative(nr, &counter->hw.count); - if (counter->hw.irq_period && !neg) + if (counter->hw.irq_period && !neg && regs) perf_swcounter_overflow(counter, nmi, regs, addr); } @@ -3151,55 +3153,24 @@ static const struct pmu perf_ops_task_clock = { /* * Software counter: cpu migrations */ - -static inline u64 get_cpu_migrations(struct perf_counter *counter) -{ - struct task_struct *curr = counter->ctx->task; - - if (curr) - return curr->se.nr_migrations; - return cpu_nr_migrations(smp_processor_id()); -} - -static void cpu_migrations_perf_counter_update(struct perf_counter *counter) -{ - u64 prev, now; - s64 delta; - - prev = atomic64_read(&counter->hw.prev_count); - now = get_cpu_migrations(counter); - - atomic64_set(&counter->hw.prev_count, now); - - delta = now - prev; - - atomic64_add(delta, &counter->count); -} - -static void cpu_migrations_perf_counter_read(struct perf_counter *counter) +void perf_counter_task_migration(struct task_struct *task, int cpu) { - cpu_migrations_perf_counter_update(counter); -} + struct perf_cpu_context *cpuctx = &per_cpu(perf_cpu_context, cpu); + struct perf_counter_context *ctx; -static int cpu_migrations_perf_counter_enable(struct perf_counter *counter) -{ - if (counter->prev_state <= PERF_COUNTER_STATE_OFF) - atomic64_set(&counter->hw.prev_count, - get_cpu_migrations(counter)); - return 0; -} + perf_swcounter_ctx_event(&cpuctx->ctx, PERF_TYPE_SOFTWARE, + PERF_COUNT_CPU_MIGRATIONS, + 1, 1, NULL, 0); -static void cpu_migrations_perf_counter_disable(struct perf_counter *counter) -{ - cpu_migrations_perf_counter_update(counter); + ctx = perf_pin_task_context(task); + if (ctx) { + perf_swcounter_ctx_event(ctx, PERF_TYPE_SOFTWARE, + PERF_COUNT_CPU_MIGRATIONS, + 1, 1, NULL, 0); + perf_unpin_context(ctx); + } } -static const struct pmu perf_ops_cpu_migrations = { - .enable = cpu_migrations_perf_counter_enable, - .disable = cpu_migrations_perf_counter_disable, - .read = cpu_migrations_perf_counter_read, -}; - #ifdef CONFIG_EVENT_PROFILE void perf_tpcounter_event(int event_id) { @@ -3272,11 +3243,8 @@ static const struct pmu *sw_perf_counter_init(struct perf_counter *counter) case PERF_COUNT_PAGE_FAULTS_MIN: case PERF_COUNT_PAGE_FAULTS_MAJ: case PERF_COUNT_CONTEXT_SWITCHES: - pmu = &perf_ops_generic; - break; case PERF_COUNT_CPU_MIGRATIONS: - if (!counter->hw_event.exclude_kernel) - pmu = &perf_ops_cpu_migrations; + pmu = &perf_ops_generic; break; } diff --git a/kernel/sched.c b/kernel/sched.c index 3226cc132e9..8d43347a0c0 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -1977,6 +1977,7 @@ void set_task_cpu(struct task_struct *p, unsigned int new_cpu) if (task_hot(p, old_rq->clock, NULL)) schedstat_inc(p, se.nr_forced2_migrations); #endif + perf_counter_task_migration(p, new_cpu); } p->se.vruntime -= old_cfsrq->min_vruntime - new_cfsrq->min_vruntime; -- cgit v1.2.3 From bf4e0ed3d027ce581be18496036862131b5f32aa Mon Sep 17 00:00:00 2001 From: Paul Mackerras Date: Mon, 1 Jun 2009 17:53:16 +1000 Subject: perf_counter: Remove unused prev_state field This removes the prev_state field of struct perf_counter since it is now unused. It was only used by the cpu migration counter, which doesn't use it any more. Signed-off-by: Paul Mackerras Cc: Peter Zijlstra Cc: Mike Galbraith Cc: Paul Mackerras Cc: Corey Ashford Cc: Marcelo Tosatti Cc: Arnaldo Carvalho de Melo Cc: John Kacur LKML-Reference: <18979.35052.915728.626374@cargo.ozlabs.ibm.com> Signed-off-by: Ingo Molnar --- include/linux/perf_counter.h | 1 - kernel/perf_counter.c | 4 ---- 2 files changed, 5 deletions(-) diff --git a/include/linux/perf_counter.h b/include/linux/perf_counter.h index deb9acf9ad2..d970fbc16af 100644 --- a/include/linux/perf_counter.h +++ b/include/linux/perf_counter.h @@ -427,7 +427,6 @@ struct perf_counter { const struct pmu *pmu; enum perf_counter_active_state state; - enum perf_counter_active_state prev_state; atomic64_t count; /* diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c index cd94cf3bf9e..fbed4d28ad7 100644 --- a/kernel/perf_counter.c +++ b/kernel/perf_counter.c @@ -572,7 +572,6 @@ group_sched_in(struct perf_counter *group_counter, if (ret) return ret < 0 ? ret : 0; - group_counter->prev_state = group_counter->state; if (counter_sched_in(group_counter, cpuctx, ctx, cpu)) return -EAGAIN; @@ -580,7 +579,6 @@ group_sched_in(struct perf_counter *group_counter, * Schedule in siblings as one group (if any): */ list_for_each_entry(counter, &group_counter->sibling_list, list_entry) { - counter->prev_state = counter->state; if (counter_sched_in(counter, cpuctx, ctx, cpu)) { partial_group = counter; goto group_error; @@ -657,7 +655,6 @@ static void add_counter_to_ctx(struct perf_counter *counter, struct perf_counter_context *ctx) { list_add_counter(counter, ctx); - counter->prev_state = PERF_COUNTER_STATE_OFF; counter->tstamp_enabled = ctx->time; counter->tstamp_running = ctx->time; counter->tstamp_stopped = ctx->time; @@ -820,7 +817,6 @@ static void __perf_counter_enable(void *info) ctx->is_active = 1; update_context_time(ctx); - counter->prev_state = counter->state; if (counter->state >= PERF_COUNTER_STATE_INACTIVE) goto unlock; counter->state = PERF_COUNTER_STATE_INACTIVE; -- cgit v1.2.3 From 709e50cf870e61745b39552044aa6c7c38e4f9e0 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Tue, 2 Jun 2009 14:13:15 +0200 Subject: perf_counter: Use PID namespaces properly Stop using task_struct::pid and start using PID namespaces. PIDs will be reported in the PID namespace of the monitoring task at the moment of counter creation. Signed-off-by: Peter Zijlstra Cc: Eric W. Biederman Cc: Oleg Nesterov Cc: Mike Galbraith Cc: Paul Mackerras Cc: Corey Ashford Cc: Marcelo Tosatti Cc: Arnaldo Carvalho de Melo Cc: Thomas Gleixner Cc: John Kacur LKML-Reference: Signed-off-by: Ingo Molnar --- include/linux/perf_counter.h | 3 +++ kernel/perf_counter.c | 42 ++++++++++++++++++++++++++++++++++-------- 2 files changed, 37 insertions(+), 8 deletions(-) diff --git a/include/linux/perf_counter.h b/include/linux/perf_counter.h index d970fbc16af..9ec20fc6bd3 100644 --- a/include/linux/perf_counter.h +++ b/include/linux/perf_counter.h @@ -317,6 +317,7 @@ enum perf_event_type { #include #include #include +#include #include struct task_struct; @@ -500,6 +501,8 @@ struct perf_counter { void (*destroy)(struct perf_counter *); struct rcu_head rcu_head; + + struct pid_namespace *ns; #endif }; diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c index fbed4d28ad7..caa012cfe49 100644 --- a/kernel/perf_counter.c +++ b/kernel/perf_counter.c @@ -1432,6 +1432,8 @@ static void free_counter_rcu(struct rcu_head *head) struct perf_counter *counter; counter = container_of(head, struct perf_counter, rcu_head); + if (counter->ns) + put_pid_ns(counter->ns); kfree(counter); } @@ -2267,6 +2269,28 @@ static void perf_output_end(struct perf_output_handle *handle) rcu_read_unlock(); } +static u32 perf_counter_pid(struct perf_counter *counter, struct task_struct *p) +{ + /* + * only top level counters have the pid namespace they were created in + */ + if (counter->parent) + counter = counter->parent; + + return task_tgid_nr_ns(p, counter->ns); +} + +static u32 perf_counter_tid(struct perf_counter *counter, struct task_struct *p) +{ + /* + * only top level counters have the pid namespace they were created in + */ + if (counter->parent) + counter = counter->parent; + + return task_pid_nr_ns(p, counter->ns); +} + static void perf_counter_output(struct perf_counter *counter, int nmi, struct pt_regs *regs, u64 addr) { @@ -2303,8 +2327,8 @@ static void perf_counter_output(struct perf_counter *counter, if (record_type & PERF_RECORD_TID) { /* namespace issues */ - tid_entry.pid = current->group_leader->pid; - tid_entry.tid = current->pid; + tid_entry.pid = perf_counter_pid(counter, current); + tid_entry.tid = perf_counter_tid(counter, current); header.type |= PERF_RECORD_TID; header.size += sizeof(tid_entry); @@ -2432,6 +2456,9 @@ static void perf_counter_comm_output(struct perf_counter *counter, if (ret) return; + comm_event->event.pid = perf_counter_pid(counter, comm_event->task); + comm_event->event.tid = perf_counter_tid(counter, comm_event->task); + perf_output_put(&handle, comm_event->event); perf_output_copy(&handle, comm_event->comm, comm_event->comm_size); @@ -2504,8 +2531,6 @@ void perf_counter_comm(struct task_struct *task) .task = task, .event = { .header = { .type = PERF_EVENT_COMM, }, - .pid = task->group_leader->pid, - .tid = task->pid, }, }; @@ -2542,6 +2567,9 @@ static void perf_counter_mmap_output(struct perf_counter *counter, if (ret) return; + mmap_event->event.pid = perf_counter_pid(counter, current); + mmap_event->event.tid = perf_counter_tid(counter, current); + perf_output_put(&handle, mmap_event->event); perf_output_copy(&handle, mmap_event->file_name, mmap_event->file_size); @@ -2641,8 +2669,6 @@ void perf_counter_mmap(unsigned long addr, unsigned long len, .file = file, .event = { .header = { .type = PERF_EVENT_MMAP, }, - .pid = current->group_leader->pid, - .tid = current->pid, .start = addr, .len = len, .pgoff = pgoff, @@ -2664,8 +2690,6 @@ void perf_counter_munmap(unsigned long addr, unsigned long len, .file = file, .event = { .header = { .type = PERF_EVENT_MUNMAP, }, - .pid = current->group_leader->pid, - .tid = current->pid, .start = addr, .len = len, .pgoff = pgoff, @@ -3445,6 +3469,8 @@ SYSCALL_DEFINE5(perf_counter_open, list_add_tail(&counter->owner_entry, ¤t->perf_counter_list); mutex_unlock(¤t->perf_counter_mutex); + counter->ns = get_pid_ns(current->nsproxy->pid_ns); + fput_light(counter_file, fput_needed2); out_fput: -- cgit v1.2.3 From f70e87d7a6d9c5a23d5f43ed0a0c224c157ef597 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Tue, 2 Jun 2009 14:13:24 +0200 Subject: perf_counter: tools: Expand the COMM,MMAP event synthesizer Include code to pre-construct mappings based on /proc, on system wide recording. Fix the existing code to properly fill out ->pid and ->tid. The PID should be the Thread Group ID (PIDTYPE_PID of task->group_leader) The TID should be the Thread ID (PIDTYPE_PID of task) Furthermore, change the default sorting of report to comm,dso for a better quick overview. Signed-off-by: Peter Zijlstra Cc: Mike Galbraith Cc: Paul Mackerras Cc: Corey Ashford Cc: Marcelo Tosatti Cc: Arnaldo Carvalho de Melo Cc: Thomas Gleixner Cc: John Kacur LKML-Reference: Signed-off-by: Ingo Molnar --- Documentation/perf_counter/builtin-record.c | 84 ++++++++++++++++++++++------- Documentation/perf_counter/builtin-report.c | 2 +- 2 files changed, 65 insertions(+), 21 deletions(-) diff --git a/Documentation/perf_counter/builtin-record.c b/Documentation/perf_counter/builtin-record.c index 9c151ded22f..810fc275ca6 100644 --- a/Documentation/perf_counter/builtin-record.c +++ b/Documentation/perf_counter/builtin-record.c @@ -162,7 +162,7 @@ struct comm_event { char comm[16]; }; -static pid_t pid_synthesize_comm_event(pid_t pid) +static void pid_synthesize_comm_event(pid_t pid, int full) { struct comm_event comm_ev; char filename[PATH_MAX]; @@ -170,6 +170,8 @@ static pid_t pid_synthesize_comm_event(pid_t pid) int fd, ret; size_t size; char *field, *sep; + DIR *tasks; + struct dirent dirent, *next; snprintf(filename, sizeof(filename), "/proc/%d/stat", pid); @@ -194,29 +196,50 @@ static pid_t pid_synthesize_comm_event(pid_t pid) goto out_failure; size = sep - field; memcpy(comm_ev.comm, field, size++); - field = strchr(sep + 4, ' '); - if (field == NULL) - goto out_failure; - comm_ev.pid = atoi(++field); + + comm_ev.pid = pid; comm_ev.header.type = PERF_EVENT_COMM; - comm_ev.tid = pid; size = ALIGN(size, sizeof(uint64_t)); comm_ev.header.size = sizeof(comm_ev) - (sizeof(comm_ev.comm) - size); - ret = write(output, &comm_ev, comm_ev.header.size); - if (ret < 0) { - perror("failed to write"); - exit(-1); + if (!full) { + comm_ev.tid = pid; + + ret = write(output, &comm_ev, comm_ev.header.size); + if (ret < 0) { + perror("failed to write"); + exit(-1); + } + return; + } + + snprintf(filename, sizeof(filename), "/proc/%d/task", pid); + + tasks = opendir(filename); + while (!readdir_r(tasks, &dirent, &next) && next) { + char *end; + pid = strtol(dirent.d_name, &end, 10); + if (*end) + continue; + + comm_ev.tid = pid; + + ret = write(output, &comm_ev, comm_ev.header.size); + if (ret < 0) { + perror("failed to write"); + exit(-1); + } } - return comm_ev.pid; + closedir(tasks); + return; + out_failure: fprintf(stderr, "couldn't get COMM and pgid, malformed %s\n", filename); exit(EXIT_FAILURE); - return -1; } -static void pid_synthesize_mmap_events(pid_t pid, pid_t pgid) +static void pid_synthesize_mmap_events(pid_t pid) { char filename[PATH_MAX]; FILE *fp; @@ -261,7 +284,7 @@ static void pid_synthesize_mmap_events(pid_t pid, pid_t pgid) mmap_ev.len -= mmap_ev.start; mmap_ev.header.size = (sizeof(mmap_ev) - (sizeof(mmap_ev.filename) - size)); - mmap_ev.pid = pgid; + mmap_ev.pid = pid; mmap_ev.tid = pid; if (write(output, &mmap_ev, mmap_ev.header.size) < 0) { @@ -274,6 +297,28 @@ static void pid_synthesize_mmap_events(pid_t pid, pid_t pgid) fclose(fp); } +static void synthesize_events(void) +{ + DIR *proc; + struct dirent dirent, *next; + + proc = opendir("/proc"); + + while (!readdir_r(proc, &dirent, &next) && next) { + char *end; + pid_t pid; + + pid = strtol(dirent.d_name, &end, 10); + if (*end) /* only interested in proper numerical dirents */ + continue; + + pid_synthesize_comm_event(pid, 1); + pid_synthesize_mmap_events(pid); + } + + closedir(proc); +} + static void open_counters(int cpu, pid_t pid) { struct perf_counter_hw_event hw_event; @@ -281,8 +326,8 @@ static void open_counters(int cpu, pid_t pid) int track = 1; if (pid > 0) { - pid_t pgid = pid_synthesize_comm_event(pid); - pid_synthesize_mmap_events(pid, pgid); + pid_synthesize_comm_event(pid, 0); + pid_synthesize_mmap_events(pid); } group_fd = -1; @@ -348,7 +393,7 @@ static int __cmd_record(int argc, const char **argv) assert(nr_cpus <= MAX_NR_CPUS); assert(nr_cpus >= 0); - output = open(output_name, O_CREAT|O_EXCL|O_RDWR, S_IRWXU); + output = open(output_name, O_CREAT|O_EXCL|O_TRUNC|O_RDWR, S_IRUSR|S_IWUSR); if (output < 0) { perror("failed to create output file"); exit(-1); @@ -385,9 +430,8 @@ static int __cmd_record(int argc, const char **argv) } } - /* - * TODO: store the current /proc/$/maps information somewhere - */ + if (system_wide) + synthesize_events(); while (!done) { int hits = events; diff --git a/Documentation/perf_counter/builtin-report.c b/Documentation/perf_counter/builtin-report.c index 20a4e519dfd..0558c1e1aa5 100644 --- a/Documentation/perf_counter/builtin-report.c +++ b/Documentation/perf_counter/builtin-report.c @@ -18,7 +18,7 @@ static char const *input_name = "perf.data"; static char *vmlinux = NULL; -static char *sort_order = "pid,symbol"; +static char *sort_order = "comm,dso"; static int input; static int show_mask = SHOW_KERNEL | SHOW_USER | SHOW_HV; -- cgit v1.2.3 From 97124d5e2df5b9eaa5bb684bb1e8ebc7e29d0f5d Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Tue, 2 Jun 2009 15:52:24 +0200 Subject: perf_counter: tools: Better handle existing data files Provide an argument (-f) to overwrite existing data files. Signed-off-by: Peter Zijlstra Cc: Mike Galbraith Cc: Paul Mackerras Cc: Corey Ashford Cc: Marcelo Tosatti Cc: Arnaldo Carvalho de Melo Cc: Thomas Gleixner Cc: John Kacur LKML-Reference: Signed-off-by: Ingo Molnar --- Documentation/perf_counter/builtin-record.c | 15 ++++++++++++--- 1 file changed, 12 insertions(+), 3 deletions(-) diff --git a/Documentation/perf_counter/builtin-record.c b/Documentation/perf_counter/builtin-record.c index 810fc275ca6..bace7a8edf8 100644 --- a/Documentation/perf_counter/builtin-record.c +++ b/Documentation/perf_counter/builtin-record.c @@ -7,6 +7,7 @@ #include "util/parse-events.h" #include "util/string.h" +#include #include #define ALIGN(x, a) __ALIGN_MASK(x, (typeof(x))(a)-1) @@ -26,7 +27,7 @@ static unsigned int realtime_prio = 0; static int system_wide = 0; static pid_t target_pid = -1; static int inherit = 1; -static int nmi = 1; +static int force = 0; const unsigned int default_count[] = { 1000000, @@ -337,7 +338,6 @@ static void open_counters(int cpu, pid_t pid) hw_event.config = event_id[counter]; hw_event.irq_period = event_count[counter]; hw_event.record_type = PERF_RECORD_IP | PERF_RECORD_TID; - hw_event.nmi = nmi; hw_event.mmap = track; hw_event.comm = track; hw_event.inherit = (cpu < 0) && inherit; @@ -387,13 +387,20 @@ static int __cmd_record(int argc, const char **argv) int i, counter; pid_t pid; int ret; + struct stat st; page_size = sysconf(_SC_PAGE_SIZE); nr_cpus = sysconf(_SC_NPROCESSORS_ONLN); assert(nr_cpus <= MAX_NR_CPUS); assert(nr_cpus >= 0); - output = open(output_name, O_CREAT|O_EXCL|O_TRUNC|O_RDWR, S_IRUSR|S_IWUSR); + if (!stat(output_name, &st) && !force) { + fprintf(stderr, "Error, output file: %s exists, use -f to overwrite.\n", + output_name); + exit(-1); + } + + output = open(output_name, O_CREAT|O_TRUNC|O_RDWR, S_IRUSR|S_IWUSR); if (output < 0) { perror("failed to create output file"); exit(-1); @@ -473,6 +480,8 @@ static const struct option options[] = { "collect data with this RT SCHED_FIFO priority"), OPT_BOOLEAN('a', "all-cpus", &system_wide, "system-wide collection from all CPUs"), + OPT_BOOLEAN('f', "force", &force, + "overwrite existing data file"), OPT_END() }; -- cgit v1.2.3 From 4593bba8679b925a056f84edac061676e7eda71c Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Tue, 2 Jun 2009 15:34:25 +0200 Subject: perf report: Clean up the default output - extra space between columns - left-aligned the symbol column - moved the no-symbols printout to -v Acked-by: Peter Zijlstra Cc: Mike Galbraith Cc: Paul Mackerras Cc: Corey Ashford Cc: Marcelo Tosatti Cc: Arnaldo Carvalho de Melo Cc: Thomas Gleixner Cc: John Kacur LKML-Reference: Signed-off-by: Ingo Molnar --- Documentation/perf_counter/builtin-report.c | 55 +++++++++++++++-------------- 1 file changed, 28 insertions(+), 27 deletions(-) diff --git a/Documentation/perf_counter/builtin-report.c b/Documentation/perf_counter/builtin-report.c index 0558c1e1aa5..19c1e056bb6 100644 --- a/Documentation/perf_counter/builtin-report.c +++ b/Documentation/perf_counter/builtin-report.c @@ -84,24 +84,25 @@ static struct dso *dsos__findnew(const char *name) struct dso *dso = dsos__find(name); int nr; - if (dso == NULL) { - dso = dso__new(name, 0); - if (!dso) - goto out_delete_dso; - - nr = dso__load(dso, NULL); - if (nr < 0) { - fprintf(stderr, "Failed to open: %s\n", name); - goto out_delete_dso; - } - if (!nr) { - fprintf(stderr, - "Failed to find debug symbols for: %s, maybe install a debug package?\n", - name); - } + if (dso) + return dso; + + dso = dso__new(name, 0); + if (!dso) + goto out_delete_dso; - dsos__add(dso); + nr = dso__load(dso, NULL); + if (nr < 0) { + fprintf(stderr, "Failed to open: %s\n", name); + goto out_delete_dso; } + if (!nr && verbose) { + fprintf(stderr, + "No symbols found in: %s, maybe install a debug package?\n", + name); + } + + dsos__add(dso); return dso; @@ -302,11 +303,11 @@ sort__thread_cmp(struct hist_entry *left, struct hist_entry *right) static size_t sort__thread_print(FILE *fp, struct hist_entry *self) { - return fprintf(fp, " %16s:%5d", self->thread->comm ?: "", self->thread->pid); + return fprintf(fp, " %16s:%5d", self->thread->comm ?: "", self->thread->pid); } static struct sort_entry sort_thread = { - .header = " Command: Pid ", + .header = " Command: Pid ", .cmp = sort__thread_cmp, .print = sort__thread_print, }; @@ -332,11 +333,11 @@ sort__comm_cmp(struct hist_entry *left, struct hist_entry *right) static size_t sort__comm_print(FILE *fp, struct hist_entry *self) { - return fprintf(fp, " %16s", self->thread->comm ?: ""); + return fprintf(fp, " %16s", self->thread->comm ?: ""); } static struct sort_entry sort_comm = { - .header = " Command", + .header = " Command", .cmp = sort__comm_cmp, .print = sort__comm_print, }; @@ -362,11 +363,11 @@ sort__dso_cmp(struct hist_entry *left, struct hist_entry *right) static size_t sort__dso_print(FILE *fp, struct hist_entry *self) { - return fprintf(fp, " %64s", self->dso ? self->dso->name : ""); + return fprintf(fp, " %s", self->dso ? self->dso->name : ""); } static struct sort_entry sort_dso = { - .header = " Shared Object", + .header = " Shared Object", .cmp = sort__dso_cmp, .print = sort__dso_print, }; @@ -391,9 +392,9 @@ sort__sym_print(FILE *fp, struct hist_entry *self) size_t ret = 0; if (verbose) - ret += fprintf(fp, " %#018llx", (unsigned long long)self->ip); + ret += fprintf(fp, " %#018llx", (unsigned long long)self->ip); - ret += fprintf(fp, " %s: %s", + ret += fprintf(fp, " %s: %s", self->dso ? self->dso->name : "", self->sym ? self->sym->name : ""); @@ -401,7 +402,7 @@ sort__sym_print(FILE *fp, struct hist_entry *self) } static struct sort_entry sort_sym = { - .header = "Shared Object: Symbol", + .header = " Shared Object: Symbol", .cmp = sort__sym_cmp, .print = sort__sym_print, }; @@ -595,8 +596,8 @@ static size_t output__fprintf(FILE *fp, uint64_t total_samples) list_for_each_entry(se, &hist_entry__sort_list, list) { int i; - fprintf(fp, " "); - for (i = 0; i < strlen(se->header); i++) + fprintf(fp, " "); + for (i = 0; i < strlen(se->header)-1; i++) fprintf(fp, "."); } fprintf(fp, "\n"); -- cgit v1.2.3 From 29c2810276fbf8419c9b4d942b99c873e9b7640a Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Tue, 2 Jun 2009 15:56:26 +0200 Subject: perf_counter tools: Remove the last nmi bits Everything is nmi these days, remove the userspace bits so that the kernel can drop the interface. Signed-off-by: Peter Zijlstra Cc: Mike Galbraith Cc: Paul Mackerras Cc: Corey Ashford Cc: Marcelo Tosatti Cc: Arnaldo Carvalho de Melo Cc: Thomas Gleixner Cc: John Kacur LKML-Reference: Signed-off-by: Ingo Molnar --- Documentation/perf_counter/builtin-stat.c | 1 - Documentation/perf_counter/builtin-top.c | 1 - 2 files changed, 2 deletions(-) diff --git a/Documentation/perf_counter/builtin-stat.c b/Documentation/perf_counter/builtin-stat.c index 588679167c8..644f850b7bd 100644 --- a/Documentation/perf_counter/builtin-stat.c +++ b/Documentation/perf_counter/builtin-stat.c @@ -84,7 +84,6 @@ static void create_perfstat_counter(int counter) memset(&hw_event, 0, sizeof(hw_event)); hw_event.config = event_id[counter]; hw_event.record_type = 0; - hw_event.nmi = 1; hw_event.exclude_kernel = event_mask[counter] & EVENT_MASK_KERNEL; hw_event.exclude_user = event_mask[counter] & EVENT_MASK_USER; diff --git a/Documentation/perf_counter/builtin-top.c b/Documentation/perf_counter/builtin-top.c index 24a887907a7..a2cff7b0527 100644 --- a/Documentation/perf_counter/builtin-top.c +++ b/Documentation/perf_counter/builtin-top.c @@ -581,7 +581,6 @@ static int __cmd_top(void) hw_event.config = event_id[counter]; hw_event.irq_period = event_count[counter]; hw_event.record_type = PERF_RECORD_IP | PERF_RECORD_TID; - hw_event.nmi = 1; hw_event.mmap = use_mmap; hw_event.munmap = use_munmap; hw_event.freq = freq; -- cgit v1.2.3 From 53e111a730ea8b002d57dd226098c12789993329 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Tue, 2 Jun 2009 17:01:58 +0200 Subject: x86: Fix atomic_long_xchg() on 64bit Apparently I'm the first to use it :-) Signed-off-by: Peter Zijlstra LKML-Reference: Signed-off-by: Ingo Molnar --- include/asm-generic/atomic.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/include/asm-generic/atomic.h b/include/asm-generic/atomic.h index 3673a13b670..81d3be459ef 100644 --- a/include/asm-generic/atomic.h +++ b/include/asm-generic/atomic.h @@ -134,7 +134,7 @@ static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u) #define atomic_long_cmpxchg(l, old, new) \ (atomic64_cmpxchg((atomic64_t *)(l), (old), (new))) #define atomic_long_xchg(v, new) \ - (atomic64_xchg((atomic64_t *)(l), (new))) + (atomic64_xchg((atomic64_t *)(v), (new))) #else /* BITS_PER_LONG == 64 */ -- cgit v1.2.3 From 8e5799b1ad2a0567fdfaaf0e91b40efee010f2c1 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Tue, 2 Jun 2009 15:08:15 +0200 Subject: perf_counter: Add unique counter id Stephan raised the issue that we currently cannot distinguish between similar counters within a group (PERF_RECORD_GROUP uses the config value as identifier). Therefore, generate a new ID for each counter using a global u64 sequence counter. Reported-by: Stephane Eranian Signed-off-by: Peter Zijlstra Cc: Mike Galbraith Cc: Paul Mackerras Cc: Corey Ashford Cc: Marcelo Tosatti Cc: Arnaldo Carvalho de Melo Cc: Thomas Gleixner Cc: John Kacur LKML-Reference: Signed-off-by: Ingo Molnar --- include/linux/perf_counter.h | 8 +++++--- kernel/perf_counter.c | 9 +++++++-- 2 files changed, 12 insertions(+), 5 deletions(-) diff --git a/include/linux/perf_counter.h b/include/linux/perf_counter.h index 9ec20fc6bd3..4845a214b9e 100644 --- a/include/linux/perf_counter.h +++ b/include/linux/perf_counter.h @@ -114,8 +114,9 @@ enum perf_counter_record_format { * in increasing order of bit value, after the counter value. */ enum perf_counter_read_format { - PERF_FORMAT_TOTAL_TIME_ENABLED = 1, - PERF_FORMAT_TOTAL_TIME_RUNNING = 2, + PERF_FORMAT_TOTAL_TIME_ENABLED = 1U << 0, + PERF_FORMAT_TOTAL_TIME_RUNNING = 1U << 1, + PERF_FORMAT_ID = 1U << 2, }; /* @@ -290,7 +291,7 @@ enum perf_event_type { * { u32 cpu, res; } && PERF_RECORD_CPU * * { u64 nr; - * { u64 event, val; } cnt[nr]; } && PERF_RECORD_GROUP + * { u64 id, val; } cnt[nr]; } && PERF_RECORD_GROUP * * { u16 nr, * hv, @@ -503,6 +504,7 @@ struct perf_counter { struct rcu_head rcu_head; struct pid_namespace *ns; + u64 id; #endif }; diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c index caa012cfe49..978ecfcc7aa 100644 --- a/kernel/perf_counter.c +++ b/kernel/perf_counter.c @@ -1510,6 +1510,8 @@ perf_read_hw(struct perf_counter *counter, char __user *buf, size_t count) if (counter->hw_event.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) values[n++] = counter->total_time_running + atomic64_read(&counter->child_total_time_running); + if (counter->hw_event.read_format & PERF_FORMAT_ID) + values[n++] = counter->id; mutex_unlock(&counter->child_mutex); if (count < n * sizeof(u64)) @@ -2303,7 +2305,7 @@ static void perf_counter_output(struct perf_counter *counter, u32 pid, tid; } tid_entry; struct { - u64 event; + u64 id; u64 counter; } group_entry; struct perf_callchain_entry *callchain = NULL; @@ -2416,7 +2418,7 @@ static void perf_counter_output(struct perf_counter *counter, if (sub != counter) sub->pmu->read(sub); - group_entry.event = sub->hw_event.config; + group_entry.id = sub->id; group_entry.counter = atomic64_read(&sub->count); perf_output_put(&handle, group_entry); @@ -3375,6 +3377,8 @@ done: return counter; } +static atomic64_t perf_counter_id; + /** * sys_perf_counter_open - open a performance counter, associate it to a task/cpu * @@ -3470,6 +3474,7 @@ SYSCALL_DEFINE5(perf_counter_open, mutex_unlock(¤t->perf_counter_mutex); counter->ns = get_pid_ns(current->nsproxy->pid_ns); + counter->id = atomic64_inc_return(&perf_counter_id); fput_light(counter_file, fput_needed2); -- cgit v1.2.3 From b23f3325ed465f1bd914384884269af0d106778c Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Tue, 2 Jun 2009 15:13:03 +0200 Subject: perf_counter: Rename various fields A few renames: s/irq_period/sample_period/ s/irq_freq/sample_freq/ s/PERF_RECORD_/PERF_SAMPLE_/ s/record_type/sample_type/ And change both the new sample_type and read_format to u64. Reported-by: Stephane Eranian Signed-off-by: Peter Zijlstra Cc: Mike Galbraith Cc: Paul Mackerras Cc: Corey Ashford Cc: Marcelo Tosatti Cc: Arnaldo Carvalho de Melo Cc: Thomas Gleixner Cc: John Kacur LKML-Reference: Signed-off-by: Ingo Molnar --- arch/powerpc/kernel/perf_counter.c | 12 ++--- arch/x86/kernel/cpu/perf_counter.c | 8 +-- include/linux/perf_counter.h | 32 ++++++------ kernel/perf_counter.c | 104 ++++++++++++++++++------------------- 4 files changed, 78 insertions(+), 78 deletions(-) diff --git a/arch/powerpc/kernel/perf_counter.c b/arch/powerpc/kernel/perf_counter.c index f96d55f55bd..c9633321e7a 100644 --- a/arch/powerpc/kernel/perf_counter.c +++ b/arch/powerpc/kernel/perf_counter.c @@ -535,7 +535,7 @@ void hw_perf_enable(void) continue; } val = 0; - if (counter->hw.irq_period) { + if (counter->hw.sample_period) { left = atomic64_read(&counter->hw.period_left); if (left < 0x80000000L) val = 0x80000000L - left; @@ -749,12 +749,12 @@ static void power_pmu_unthrottle(struct perf_counter *counter) s64 val, left; unsigned long flags; - if (!counter->hw.idx || !counter->hw.irq_period) + if (!counter->hw.idx || !counter->hw.sample_period) return; local_irq_save(flags); perf_disable(); power_pmu_read(counter); - left = counter->hw.irq_period; + left = counter->hw.sample_period; val = 0; if (left < 0x80000000L) val = 0x80000000L - left; @@ -789,7 +789,7 @@ static int can_go_on_limited_pmc(struct perf_counter *counter, u64 ev, if (counter->hw_event.exclude_user || counter->hw_event.exclude_kernel || counter->hw_event.exclude_hv - || counter->hw_event.irq_period) + || counter->hw_event.sample_period) return 0; if (ppmu->limited_pmc_event(ev)) @@ -925,7 +925,7 @@ const struct pmu *hw_perf_counter_init(struct perf_counter *counter) counter->hw.config = events[n]; counter->hw.counter_base = cflags[n]; - atomic64_set(&counter->hw.period_left, counter->hw.irq_period); + atomic64_set(&counter->hw.period_left, counter->hw.sample_period); /* * See if we need to reserve the PMU. @@ -958,7 +958,7 @@ const struct pmu *hw_perf_counter_init(struct perf_counter *counter) static void record_and_restart(struct perf_counter *counter, long val, struct pt_regs *regs, int nmi) { - u64 period = counter->hw.irq_period; + u64 period = counter->hw.sample_period; s64 prev, delta, left; int record = 0; u64 addr, mmcra, sdsync; diff --git a/arch/x86/kernel/cpu/perf_counter.c b/arch/x86/kernel/cpu/perf_counter.c index 316b0c995f3..ec06aa5e928 100644 --- a/arch/x86/kernel/cpu/perf_counter.c +++ b/arch/x86/kernel/cpu/perf_counter.c @@ -290,11 +290,11 @@ static int __hw_perf_counter_init(struct perf_counter *counter) hwc->nmi = 1; hw_event->nmi = 1; - if (!hwc->irq_period) - hwc->irq_period = x86_pmu.max_period; + if (!hwc->sample_period) + hwc->sample_period = x86_pmu.max_period; atomic64_set(&hwc->period_left, - min(x86_pmu.max_period, hwc->irq_period)); + min(x86_pmu.max_period, hwc->sample_period)); /* * Raw event type provide the config in the event structure @@ -462,7 +462,7 @@ x86_perf_counter_set_period(struct perf_counter *counter, struct hw_perf_counter *hwc, int idx) { s64 left = atomic64_read(&hwc->period_left); - s64 period = min(x86_pmu.max_period, hwc->irq_period); + s64 period = min(x86_pmu.max_period, hwc->sample_period); int err; /* diff --git a/include/linux/perf_counter.h b/include/linux/perf_counter.h index 4845a214b9e..1fcd3cc9385 100644 --- a/include/linux/perf_counter.h +++ b/include/linux/perf_counter.h @@ -94,18 +94,18 @@ enum sw_event_ids { #define PERF_COUNTER_EVENT_MASK __PERF_COUNTER_MASK(EVENT) /* - * Bits that can be set in hw_event.record_type to request information + * Bits that can be set in hw_event.sample_type to request information * in the overflow packets. */ -enum perf_counter_record_format { - PERF_RECORD_IP = 1U << 0, - PERF_RECORD_TID = 1U << 1, - PERF_RECORD_TIME = 1U << 2, - PERF_RECORD_ADDR = 1U << 3, - PERF_RECORD_GROUP = 1U << 4, - PERF_RECORD_CALLCHAIN = 1U << 5, - PERF_RECORD_CONFIG = 1U << 6, - PERF_RECORD_CPU = 1U << 7, +enum perf_counter_sample_format { + PERF_SAMPLE_IP = 1U << 0, + PERF_SAMPLE_TID = 1U << 1, + PERF_SAMPLE_TIME = 1U << 2, + PERF_SAMPLE_ADDR = 1U << 3, + PERF_SAMPLE_GROUP = 1U << 4, + PERF_SAMPLE_CALLCHAIN = 1U << 5, + PERF_SAMPLE_CONFIG = 1U << 6, + PERF_SAMPLE_CPU = 1U << 7, }; /* @@ -132,12 +132,12 @@ struct perf_counter_hw_event { __u64 config; union { - __u64 irq_period; - __u64 irq_freq; + __u64 sample_period; + __u64 sample_freq; }; - __u32 record_type; - __u32 read_format; + __u64 sample_type; + __u64 read_format; __u64 disabled : 1, /* off by default */ nmi : 1, /* NMI sampling */ @@ -262,7 +262,7 @@ enum perf_event_type { * struct { * struct perf_event_header header; * u64 time; - * u64 irq_period; + * u64 sample_period; * }; */ PERF_EVENT_PERIOD = 4, @@ -363,7 +363,7 @@ struct hw_perf_counter { }; }; atomic64_t prev_count; - u64 irq_period; + u64 sample_period; atomic64_t period_left; u64 interrupts; #endif diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c index 978ecfcc7aa..5ecd9981c03 100644 --- a/kernel/perf_counter.c +++ b/kernel/perf_counter.c @@ -1186,7 +1186,7 @@ static void perf_log_period(struct perf_counter *counter, u64 period); static void perf_adjust_freq(struct perf_counter_context *ctx) { struct perf_counter *counter; - u64 interrupts, irq_period; + u64 interrupts, sample_period; u64 events, period; s64 delta; @@ -1204,23 +1204,23 @@ static void perf_adjust_freq(struct perf_counter_context *ctx) interrupts = 2*sysctl_perf_counter_limit/HZ; } - if (!counter->hw_event.freq || !counter->hw_event.irq_freq) + if (!counter->hw_event.freq || !counter->hw_event.sample_freq) continue; - events = HZ * interrupts * counter->hw.irq_period; - period = div64_u64(events, counter->hw_event.irq_freq); + events = HZ * interrupts * counter->hw.sample_period; + period = div64_u64(events, counter->hw_event.sample_freq); - delta = (s64)(1 + period - counter->hw.irq_period); + delta = (s64)(1 + period - counter->hw.sample_period); delta >>= 1; - irq_period = counter->hw.irq_period + delta; + sample_period = counter->hw.sample_period + delta; - if (!irq_period) - irq_period = 1; + if (!sample_period) + sample_period = 1; - perf_log_period(counter, irq_period); + perf_log_period(counter, sample_period); - counter->hw.irq_period = irq_period; + counter->hw.sample_period = sample_period; } spin_unlock(&ctx->lock); } @@ -2297,7 +2297,7 @@ static void perf_counter_output(struct perf_counter *counter, int nmi, struct pt_regs *regs, u64 addr) { int ret; - u64 record_type = counter->hw_event.record_type; + u64 sample_type = counter->hw_event.sample_type; struct perf_output_handle handle; struct perf_event_header header; u64 ip; @@ -2321,61 +2321,61 @@ static void perf_counter_output(struct perf_counter *counter, header.misc = PERF_EVENT_MISC_OVERFLOW; header.misc |= perf_misc_flags(regs); - if (record_type & PERF_RECORD_IP) { + if (sample_type & PERF_SAMPLE_IP) { ip = perf_instruction_pointer(regs); - header.type |= PERF_RECORD_IP; + header.type |= PERF_SAMPLE_IP; header.size += sizeof(ip); } - if (record_type & PERF_RECORD_TID) { + if (sample_type & PERF_SAMPLE_TID) { /* namespace issues */ tid_entry.pid = perf_counter_pid(counter, current); tid_entry.tid = perf_counter_tid(counter, current); - header.type |= PERF_RECORD_TID; + header.type |= PERF_SAMPLE_TID; header.size += sizeof(tid_entry); } - if (record_type & PERF_RECORD_TIME) { + if (sample_type & PERF_SAMPLE_TIME) { /* * Maybe do better on x86 and provide cpu_clock_nmi() */ time = sched_clock(); - header.type |= PERF_RECORD_TIME; + header.type |= PERF_SAMPLE_TIME; header.size += sizeof(u64); } - if (record_type & PERF_RECORD_ADDR) { - header.type |= PERF_RECORD_ADDR; + if (sample_type & PERF_SAMPLE_ADDR) { + header.type |= PERF_SAMPLE_ADDR; header.size += sizeof(u64); } - if (record_type & PERF_RECORD_CONFIG) { - header.type |= PERF_RECORD_CONFIG; + if (sample_type & PERF_SAMPLE_CONFIG) { + header.type |= PERF_SAMPLE_CONFIG; header.size += sizeof(u64); } - if (record_type & PERF_RECORD_CPU) { - header.type |= PERF_RECORD_CPU; + if (sample_type & PERF_SAMPLE_CPU) { + header.type |= PERF_SAMPLE_CPU; header.size += sizeof(cpu_entry); cpu_entry.cpu = raw_smp_processor_id(); } - if (record_type & PERF_RECORD_GROUP) { - header.type |= PERF_RECORD_GROUP; + if (sample_type & PERF_SAMPLE_GROUP) { + header.type |= PERF_SAMPLE_GROUP; header.size += sizeof(u64) + counter->nr_siblings * sizeof(group_entry); } - if (record_type & PERF_RECORD_CALLCHAIN) { + if (sample_type & PERF_SAMPLE_CALLCHAIN) { callchain = perf_callchain(regs); if (callchain) { callchain_size = (1 + callchain->nr) * sizeof(u64); - header.type |= PERF_RECORD_CALLCHAIN; + header.type |= PERF_SAMPLE_CALLCHAIN; header.size += callchain_size; } } @@ -2386,28 +2386,28 @@ static void perf_counter_output(struct perf_counter *counter, perf_output_put(&handle, header); - if (record_type & PERF_RECORD_IP) + if (sample_type & PERF_SAMPLE_IP) perf_output_put(&handle, ip); - if (record_type & PERF_RECORD_TID) + if (sample_type & PERF_SAMPLE_TID) perf_output_put(&handle, tid_entry); - if (record_type & PERF_RECORD_TIME) + if (sample_type & PERF_SAMPLE_TIME) perf_output_put(&handle, time); - if (record_type & PERF_RECORD_ADDR) + if (sample_type & PERF_SAMPLE_ADDR) perf_output_put(&handle, addr); - if (record_type & PERF_RECORD_CONFIG) + if (sample_type & PERF_SAMPLE_CONFIG) perf_output_put(&handle, counter->hw_event.config); - if (record_type & PERF_RECORD_CPU) + if (sample_type & PERF_SAMPLE_CPU) perf_output_put(&handle, cpu_entry); /* - * XXX PERF_RECORD_GROUP vs inherited counters seems difficult. + * XXX PERF_SAMPLE_GROUP vs inherited counters seems difficult. */ - if (record_type & PERF_RECORD_GROUP) { + if (sample_type & PERF_SAMPLE_GROUP) { struct perf_counter *leader, *sub; u64 nr = counter->nr_siblings; @@ -2702,7 +2702,7 @@ void perf_counter_munmap(unsigned long addr, unsigned long len, } /* - * Log irq_period changes so that analyzing tools can re-normalize the + * Log sample_period changes so that analyzing tools can re-normalize the * event flow. */ @@ -2725,7 +2725,7 @@ static void perf_log_period(struct perf_counter *counter, u64 period) .period = period, }; - if (counter->hw.irq_period == period) + if (counter->hw.sample_period == period) return; ret = perf_output_begin(&handle, counter, sizeof(freq_event), 0, 0); @@ -2834,7 +2834,7 @@ static void perf_swcounter_set_period(struct perf_counter *counter) { struct hw_perf_counter *hwc = &counter->hw; s64 left = atomic64_read(&hwc->period_left); - s64 period = hwc->irq_period; + s64 period = hwc->sample_period; if (unlikely(left <= -period)) { left = period; @@ -2874,7 +2874,7 @@ static enum hrtimer_restart perf_swcounter_hrtimer(struct hrtimer *hrtimer) ret = HRTIMER_NORESTART; } - period = max_t(u64, 10000, counter->hw.irq_period); + period = max_t(u64, 10000, counter->hw.sample_period); hrtimer_forward_now(hrtimer, ns_to_ktime(period)); return ret; @@ -2959,7 +2959,7 @@ static void perf_swcounter_add(struct perf_counter *counter, u64 nr, { int neg = atomic64_add_negative(nr, &counter->hw.count); - if (counter->hw.irq_period && !neg && regs) + if (counter->hw.sample_period && !neg && regs) perf_swcounter_overflow(counter, nmi, regs, addr); } @@ -3080,8 +3080,8 @@ static int cpu_clock_perf_counter_enable(struct perf_counter *counter) atomic64_set(&hwc->prev_count, cpu_clock(cpu)); hrtimer_init(&hwc->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); hwc->hrtimer.function = perf_swcounter_hrtimer; - if (hwc->irq_period) { - u64 period = max_t(u64, 10000, hwc->irq_period); + if (hwc->sample_period) { + u64 period = max_t(u64, 10000, hwc->sample_period); __hrtimer_start_range_ns(&hwc->hrtimer, ns_to_ktime(period), 0, HRTIMER_MODE_REL, 0); @@ -3092,7 +3092,7 @@ static int cpu_clock_perf_counter_enable(struct perf_counter *counter) static void cpu_clock_perf_counter_disable(struct perf_counter *counter) { - if (counter->hw.irq_period) + if (counter->hw.sample_period) hrtimer_cancel(&counter->hw.hrtimer); cpu_clock_perf_counter_update(counter); } @@ -3132,8 +3132,8 @@ static int task_clock_perf_counter_enable(struct perf_counter *counter) atomic64_set(&hwc->prev_count, now); hrtimer_init(&hwc->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); hwc->hrtimer.function = perf_swcounter_hrtimer; - if (hwc->irq_period) { - u64 period = max_t(u64, 10000, hwc->irq_period); + if (hwc->sample_period) { + u64 period = max_t(u64, 10000, hwc->sample_period); __hrtimer_start_range_ns(&hwc->hrtimer, ns_to_ktime(period), 0, HRTIMER_MODE_REL, 0); @@ -3144,7 +3144,7 @@ static int task_clock_perf_counter_enable(struct perf_counter *counter) static void task_clock_perf_counter_disable(struct perf_counter *counter) { - if (counter->hw.irq_period) + if (counter->hw.sample_period) hrtimer_cancel(&counter->hw.hrtimer); task_clock_perf_counter_update(counter, counter->ctx->time); @@ -3223,7 +3223,7 @@ static const struct pmu *tp_perf_counter_init(struct perf_counter *counter) return NULL; counter->destroy = tp_perf_counter_destroy; - counter->hw.irq_period = counter->hw_event.irq_period; + counter->hw.sample_period = counter->hw_event.sample_period; return &perf_ops_generic; } @@ -3323,15 +3323,15 @@ perf_counter_alloc(struct perf_counter_hw_event *hw_event, pmu = NULL; hwc = &counter->hw; - if (hw_event->freq && hw_event->irq_freq) - hwc->irq_period = div64_u64(TICK_NSEC, hw_event->irq_freq); + if (hw_event->freq && hw_event->sample_freq) + hwc->sample_period = div64_u64(TICK_NSEC, hw_event->sample_freq); else - hwc->irq_period = hw_event->irq_period; + hwc->sample_period = hw_event->sample_period; /* - * we currently do not support PERF_RECORD_GROUP on inherited counters + * we currently do not support PERF_SAMPLE_GROUP on inherited counters */ - if (hw_event->inherit && (hw_event->record_type & PERF_RECORD_GROUP)) + if (hw_event->inherit && (hw_event->sample_type & PERF_SAMPLE_GROUP)) goto done; if (perf_event_raw(hw_event)) { -- cgit v1.2.3 From 8a016db386195b193e2a8aeddff9fe937dcb7a40 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Tue, 2 Jun 2009 15:27:45 +0200 Subject: perf_counter: Remove the last nmi/irq bits IRQ (non-NMI) sampling is not used anymore - remove the last few bits. Signed-off-by: Peter Zijlstra Cc: Peter Zijlstra Cc: Mike Galbraith Cc: Paul Mackerras Cc: Corey Ashford Cc: Marcelo Tosatti Cc: Arnaldo Carvalho de Melo Cc: Thomas Gleixner Cc: John Kacur LKML-Reference: Signed-off-by: Ingo Molnar --- arch/x86/kernel/cpu/perf_counter.c | 6 ------ include/linux/perf_counter.h | 4 +--- 2 files changed, 1 insertion(+), 9 deletions(-) diff --git a/arch/x86/kernel/cpu/perf_counter.c b/arch/x86/kernel/cpu/perf_counter.c index ec06aa5e928..9e144fbebd2 100644 --- a/arch/x86/kernel/cpu/perf_counter.c +++ b/arch/x86/kernel/cpu/perf_counter.c @@ -284,12 +284,6 @@ static int __hw_perf_counter_init(struct perf_counter *counter) if (!hw_event->exclude_kernel) hwc->config |= ARCH_PERFMON_EVENTSEL_OS; - /* - * Use NMI events all the time: - */ - hwc->nmi = 1; - hw_event->nmi = 1; - if (!hwc->sample_period) hwc->sample_period = x86_pmu.max_period; diff --git a/include/linux/perf_counter.h b/include/linux/perf_counter.h index 1fcd3cc9385..cef9931793f 100644 --- a/include/linux/perf_counter.h +++ b/include/linux/perf_counter.h @@ -140,7 +140,6 @@ struct perf_counter_hw_event { __u64 read_format; __u64 disabled : 1, /* off by default */ - nmi : 1, /* NMI sampling */ inherit : 1, /* children inherit it */ pinned : 1, /* must always be on PMU */ exclusive : 1, /* only group on PMU */ @@ -153,7 +152,7 @@ struct perf_counter_hw_event { comm : 1, /* include comm data */ freq : 1, /* use freq, not period */ - __reserved_1 : 51; + __reserved_1 : 52; __u32 wakeup_events; /* wakeup every n events */ __u32 __reserved_2; @@ -354,7 +353,6 @@ struct hw_perf_counter { u64 config; unsigned long config_base; unsigned long counter_base; - int nmi; int idx; }; union { /* software */ -- cgit v1.2.3 From e4abb5d4f7ddabc1fc7c392cf0a10d8e5868c9ca Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Tue, 2 Jun 2009 16:08:20 +0200 Subject: perf_counter: x86: Emulate longer sample periods Do as Power already does, emulate sample periods up to 2^63-1 by composing them of smaller values limited by hardware capabilities. Only once we wrap the software period do we generate an overflow event. Just 10 lines of new code. Reported-by: Stephane Eranian Signed-off-by: Peter Zijlstra Cc: Mike Galbraith Cc: Paul Mackerras Cc: Corey Ashford Cc: Marcelo Tosatti Cc: Arnaldo Carvalho de Melo Cc: Thomas Gleixner Cc: John Kacur LKML-Reference: Signed-off-by: Ingo Molnar --- arch/x86/kernel/cpu/perf_counter.c | 31 ++++++++++++++++++++++--------- 1 file changed, 22 insertions(+), 9 deletions(-) diff --git a/arch/x86/kernel/cpu/perf_counter.c b/arch/x86/kernel/cpu/perf_counter.c index 9e144fbebd2..904571bea71 100644 --- a/arch/x86/kernel/cpu/perf_counter.c +++ b/arch/x86/kernel/cpu/perf_counter.c @@ -287,8 +287,7 @@ static int __hw_perf_counter_init(struct perf_counter *counter) if (!hwc->sample_period) hwc->sample_period = x86_pmu.max_period; - atomic64_set(&hwc->period_left, - min(x86_pmu.max_period, hwc->sample_period)); + atomic64_set(&hwc->period_left, hwc->sample_period); /* * Raw event type provide the config in the event structure @@ -451,13 +450,13 @@ static DEFINE_PER_CPU(u64, prev_left[X86_PMC_IDX_MAX]); * Set the next IRQ period, based on the hwc->period_left value. * To be called with the counter disabled in hw: */ -static void +static int x86_perf_counter_set_period(struct perf_counter *counter, struct hw_perf_counter *hwc, int idx) { s64 left = atomic64_read(&hwc->period_left); - s64 period = min(x86_pmu.max_period, hwc->sample_period); - int err; + s64 period = hwc->sample_period; + int err, ret = 0; /* * If we are way outside a reasoable range then just skip forward: @@ -465,11 +464,13 @@ x86_perf_counter_set_period(struct perf_counter *counter, if (unlikely(left <= -period)) { left = period; atomic64_set(&hwc->period_left, left); + ret = 1; } if (unlikely(left <= 0)) { left += period; atomic64_set(&hwc->period_left, left); + ret = 1; } /* * Quirk: certain CPUs dont like it if just 1 event is left: @@ -477,6 +478,9 @@ x86_perf_counter_set_period(struct perf_counter *counter, if (unlikely(left < 2)) left = 2; + if (left > x86_pmu.max_period) + left = x86_pmu.max_period; + per_cpu(prev_left[idx], smp_processor_id()) = left; /* @@ -487,6 +491,8 @@ x86_perf_counter_set_period(struct perf_counter *counter, err = checking_wrmsrl(hwc->counter_base + idx, (u64)(-left) & x86_pmu.counter_mask); + + return ret; } static inline void @@ -706,16 +712,19 @@ static void x86_pmu_disable(struct perf_counter *counter) * Save and restart an expired counter. Called by NMI contexts, * so it has to be careful about preempting normal counter ops: */ -static void intel_pmu_save_and_restart(struct perf_counter *counter) +static int intel_pmu_save_and_restart(struct perf_counter *counter) { struct hw_perf_counter *hwc = &counter->hw; int idx = hwc->idx; + int ret; x86_perf_counter_update(counter, hwc, idx); - x86_perf_counter_set_period(counter, hwc, idx); + ret = x86_perf_counter_set_period(counter, hwc, idx); if (counter->state == PERF_COUNTER_STATE_ACTIVE) intel_pmu_enable_counter(hwc, idx); + + return ret; } static void intel_pmu_reset(void) @@ -782,7 +791,9 @@ again: if (!test_bit(bit, cpuc->active_mask)) continue; - intel_pmu_save_and_restart(counter); + if (!intel_pmu_save_and_restart(counter)) + continue; + if (perf_counter_overflow(counter, nmi, regs, 0)) intel_pmu_disable_counter(&counter->hw, bit); } @@ -824,9 +835,11 @@ static int amd_pmu_handle_irq(struct pt_regs *regs, int nmi) continue; /* counter overflow */ - x86_perf_counter_set_period(counter, hwc, idx); handled = 1; inc_irq_stat(apic_perf_irqs); + if (!x86_perf_counter_set_period(counter, hwc, idx)) + continue; + if (perf_counter_overflow(counter, nmi, regs, 0)) amd_pmu_disable_counter(hwc, idx); } -- cgit v1.2.3 From 8e3747c13c39246c7e46def7cf495d9d21d4c5f9 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Tue, 2 Jun 2009 16:16:02 +0200 Subject: perf_counter: Change data head from u32 to u64 Since some people worried that 4G might not be a large enough as an mmap data window, extend it to 64 bit for capable platforms. Reported-by: Stephane Eranian Signed-off-by: Peter Zijlstra LKML-Reference: Signed-off-by: Ingo Molnar --- include/linux/perf_counter.h | 7 ++++--- kernel/perf_counter.c | 15 ++++++++------- 2 files changed, 12 insertions(+), 10 deletions(-) diff --git a/include/linux/perf_counter.h b/include/linux/perf_counter.h index cef9931793f..c046f7d97cf 100644 --- a/include/linux/perf_counter.h +++ b/include/linux/perf_counter.h @@ -212,7 +212,7 @@ struct perf_counter_mmap_page { * User-space reading this value should issue an rmb(), on SMP capable * platforms, after reading this value -- see perf_counter_wakeup(). */ - __u32 data_head; /* head in the data section */ + __u64 data_head; /* head in the data section */ }; #define PERF_EVENT_MISC_CPUMODE_MASK (3 << 0) @@ -397,10 +397,11 @@ struct perf_mmap_data { int nr_locked; /* nr pages mlocked */ atomic_t poll; /* POLL_ for wakeups */ - atomic_t head; /* write position */ atomic_t events; /* event limit */ - atomic_t done_head; /* completed head */ + atomic_long_t head; /* write position */ + atomic_long_t done_head; /* completed head */ + atomic_t lock; /* concurrent writes */ atomic_t wakeup; /* needs a wakeup */ diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c index 5ecd9981c03..3f11a2bc6c7 100644 --- a/kernel/perf_counter.c +++ b/kernel/perf_counter.c @@ -2067,8 +2067,8 @@ __weak struct perf_callchain_entry *perf_callchain(struct pt_regs *regs) struct perf_output_handle { struct perf_counter *counter; struct perf_mmap_data *data; - unsigned int offset; - unsigned int head; + unsigned long head; + unsigned long offset; int nmi; int overflow; int locked; @@ -2122,7 +2122,8 @@ static void perf_output_lock(struct perf_output_handle *handle) static void perf_output_unlock(struct perf_output_handle *handle) { struct perf_mmap_data *data = handle->data; - int head, cpu; + unsigned long head; + int cpu; data->done_head = data->head; @@ -2135,7 +2136,7 @@ again: * before we publish the new head, matched by a rmb() in userspace when * reading this position. */ - while ((head = atomic_xchg(&data->done_head, 0))) + while ((head = atomic_long_xchg(&data->done_head, 0))) data->user_page->data_head = head; /* @@ -2148,7 +2149,7 @@ again: /* * Therefore we have to validate we did not indeed do so. */ - if (unlikely(atomic_read(&data->done_head))) { + if (unlikely(atomic_long_read(&data->done_head))) { /* * Since we had it locked, we can lock it again. */ @@ -2195,7 +2196,7 @@ static int perf_output_begin(struct perf_output_handle *handle, do { offset = head = atomic_read(&data->head); head += size; - } while (atomic_cmpxchg(&data->head, offset, head) != offset); + } while (atomic_long_cmpxchg(&data->head, offset, head) != offset); handle->offset = offset; handle->head = head; @@ -2246,7 +2247,7 @@ static void perf_output_copy(struct perf_output_handle *handle, * Check we didn't copy past our reservation window, taking the * possible unsigned int wrap into account. */ - WARN_ON_ONCE(((int)(handle->head - handle->offset)) < 0); + WARN_ON_ONCE(((long)(handle->head - handle->offset)) < 0); } #define perf_output_put(handle, x) \ -- cgit v1.2.3 From 08247e31ca79b8f02cce47b7e8120797a8726606 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Tue, 2 Jun 2009 16:46:57 +0200 Subject: perf_counter: Add ioctl for changing the sample period/frequency Reported-by: Stephane Eranian Signed-off-by: Peter Zijlstra Cc: Mike Galbraith Cc: Paul Mackerras Cc: Corey Ashford Cc: Marcelo Tosatti Cc: Arnaldo Carvalho de Melo Cc: Thomas Gleixner Cc: John Kacur LKML-Reference: Signed-off-by: Ingo Molnar --- include/linux/perf_counter.h | 9 +++++---- kernel/perf_counter.c | 41 +++++++++++++++++++++++++++++++++++++++++ 2 files changed, 46 insertions(+), 4 deletions(-) diff --git a/include/linux/perf_counter.h b/include/linux/perf_counter.h index c046f7d97cf..45bdd3b95d3 100644 --- a/include/linux/perf_counter.h +++ b/include/linux/perf_counter.h @@ -164,10 +164,11 @@ struct perf_counter_hw_event { /* * Ioctls that can be done on a perf counter fd: */ -#define PERF_COUNTER_IOC_ENABLE _IOW('$', 0, u32) -#define PERF_COUNTER_IOC_DISABLE _IOW('$', 1, u32) -#define PERF_COUNTER_IOC_REFRESH _IOW('$', 2, u32) -#define PERF_COUNTER_IOC_RESET _IOW('$', 3, u32) +#define PERF_COUNTER_IOC_ENABLE _IO ('$', 0) +#define PERF_COUNTER_IOC_DISABLE _IO ('$', 1) +#define PERF_COUNTER_IOC_REFRESH _IO ('$', 2) +#define PERF_COUNTER_IOC_RESET _IO ('$', 3) +#define PERF_COUNTER_IOC_PERIOD _IOW('$', 4, u64) enum perf_counter_ioc_flags { PERF_IOC_FLAG_GROUP = 1U << 0, diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c index 3f11a2bc6c7..abe2f3b6c42 100644 --- a/kernel/perf_counter.c +++ b/kernel/perf_counter.c @@ -1604,6 +1604,43 @@ static void perf_counter_for_each(struct perf_counter *counter, mutex_unlock(&counter->child_mutex); } +static int perf_counter_period(struct perf_counter *counter, u64 __user *arg) +{ + struct perf_counter_context *ctx = counter->ctx; + unsigned long size; + int ret = 0; + u64 value; + + if (!counter->hw_event.sample_period) + return -EINVAL; + + size = copy_from_user(&value, arg, sizeof(value)); + if (size != sizeof(value)) + return -EFAULT; + + if (!value) + return -EINVAL; + + spin_lock_irq(&ctx->lock); + if (counter->hw_event.freq) { + if (value > sysctl_perf_counter_limit) { + ret = -EINVAL; + goto unlock; + } + + counter->hw_event.sample_freq = value; + } else { + counter->hw_event.sample_period = value; + counter->hw.sample_period = value; + + perf_log_period(counter, value); + } +unlock: + spin_unlock_irq(&ctx->lock); + + return ret; +} + static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { struct perf_counter *counter = file->private_data; @@ -1623,6 +1660,10 @@ static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg) case PERF_COUNTER_IOC_REFRESH: return perf_counter_refresh(counter, arg); + + case PERF_COUNTER_IOC_PERIOD: + return perf_counter_period(counter, (u64 __user *)arg); + default: return -ENOTTY; } -- cgit v1.2.3 From 0d48696f87e3618b0d35bd3e4e9d7c188d51e7de Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Tue, 2 Jun 2009 19:22:16 +0200 Subject: perf_counter: Rename perf_counter_hw_event => perf_counter_attr The structure isn't hw only and when I read event, I think about those things that fall out the other end. Rename the thing. Signed-off-by: Peter Zijlstra Cc: Mike Galbraith Cc: Paul Mackerras Cc: Corey Ashford Cc: Marcelo Tosatti Cc: Arnaldo Carvalho de Melo Cc: Thomas Gleixner Cc: John Kacur Cc: Stephane Eranian LKML-Reference: Signed-off-by: Ingo Molnar --- arch/powerpc/kernel/perf_counter.c | 38 ++++++------ arch/x86/kernel/cpu/perf_counter.c | 16 ++--- include/linux/perf_counter.h | 34 +++++------ include/linux/syscalls.h | 4 +- kernel/perf_counter.c | 116 ++++++++++++++++++------------------- 5 files changed, 104 insertions(+), 104 deletions(-) diff --git a/arch/powerpc/kernel/perf_counter.c b/arch/powerpc/kernel/perf_counter.c index c9633321e7a..ea54686cb78 100644 --- a/arch/powerpc/kernel/perf_counter.c +++ b/arch/powerpc/kernel/perf_counter.c @@ -262,13 +262,13 @@ static int check_excludes(struct perf_counter **ctrs, unsigned int cflags[], } counter = ctrs[i]; if (first) { - eu = counter->hw_event.exclude_user; - ek = counter->hw_event.exclude_kernel; - eh = counter->hw_event.exclude_hv; + eu = counter->attr.exclude_user; + ek = counter->attr.exclude_kernel; + eh = counter->attr.exclude_hv; first = 0; - } else if (counter->hw_event.exclude_user != eu || - counter->hw_event.exclude_kernel != ek || - counter->hw_event.exclude_hv != eh) { + } else if (counter->attr.exclude_user != eu || + counter->attr.exclude_kernel != ek || + counter->attr.exclude_hv != eh) { return -EAGAIN; } } @@ -483,16 +483,16 @@ void hw_perf_enable(void) /* * Add in MMCR0 freeze bits corresponding to the - * hw_event.exclude_* bits for the first counter. + * attr.exclude_* bits for the first counter. * We have already checked that all counters have the * same values for these bits as the first counter. */ counter = cpuhw->counter[0]; - if (counter->hw_event.exclude_user) + if (counter->attr.exclude_user) cpuhw->mmcr[0] |= MMCR0_FCP; - if (counter->hw_event.exclude_kernel) + if (counter->attr.exclude_kernel) cpuhw->mmcr[0] |= freeze_counters_kernel; - if (counter->hw_event.exclude_hv) + if (counter->attr.exclude_hv) cpuhw->mmcr[0] |= MMCR0_FCHV; /* @@ -786,10 +786,10 @@ static int can_go_on_limited_pmc(struct perf_counter *counter, u64 ev, int n; u64 alt[MAX_EVENT_ALTERNATIVES]; - if (counter->hw_event.exclude_user - || counter->hw_event.exclude_kernel - || counter->hw_event.exclude_hv - || counter->hw_event.sample_period) + if (counter->attr.exclude_user + || counter->attr.exclude_kernel + || counter->attr.exclude_hv + || counter->attr.sample_period) return 0; if (ppmu->limited_pmc_event(ev)) @@ -855,13 +855,13 @@ const struct pmu *hw_perf_counter_init(struct perf_counter *counter) if (!ppmu) return ERR_PTR(-ENXIO); - if (!perf_event_raw(&counter->hw_event)) { - ev = perf_event_id(&counter->hw_event); + if (!perf_event_raw(&counter->attr)) { + ev = perf_event_id(&counter->attr); if (ev >= ppmu->n_generic || ppmu->generic_events[ev] == 0) return ERR_PTR(-EOPNOTSUPP); ev = ppmu->generic_events[ev]; } else { - ev = perf_event_config(&counter->hw_event); + ev = perf_event_config(&counter->attr); } counter->hw.config_base = ev; counter->hw.idx = 0; @@ -872,7 +872,7 @@ const struct pmu *hw_perf_counter_init(struct perf_counter *counter) * the user set it to. */ if (!firmware_has_feature(FW_FEATURE_LPAR)) - counter->hw_event.exclude_hv = 0; + counter->attr.exclude_hv = 0; /* * If this is a per-task counter, then we can use @@ -990,7 +990,7 @@ static void record_and_restart(struct perf_counter *counter, long val, */ if (record) { addr = 0; - if (counter->hw_event.record_type & PERF_RECORD_ADDR) { + if (counter->attr.record_type & PERF_RECORD_ADDR) { /* * The user wants a data address recorded. * If we're not doing instruction sampling, diff --git a/arch/x86/kernel/cpu/perf_counter.c b/arch/x86/kernel/cpu/perf_counter.c index 904571bea71..e16e8c13132 100644 --- a/arch/x86/kernel/cpu/perf_counter.c +++ b/arch/x86/kernel/cpu/perf_counter.c @@ -247,11 +247,11 @@ static inline int x86_pmu_initialized(void) } /* - * Setup the hardware configuration for a given hw_event_type + * Setup the hardware configuration for a given attr_type */ static int __hw_perf_counter_init(struct perf_counter *counter) { - struct perf_counter_hw_event *hw_event = &counter->hw_event; + struct perf_counter_attr *attr = &counter->attr; struct hw_perf_counter *hwc = &counter->hw; int err; @@ -279,9 +279,9 @@ static int __hw_perf_counter_init(struct perf_counter *counter) /* * Count user and OS events unless requested not to. */ - if (!hw_event->exclude_user) + if (!attr->exclude_user) hwc->config |= ARCH_PERFMON_EVENTSEL_USR; - if (!hw_event->exclude_kernel) + if (!attr->exclude_kernel) hwc->config |= ARCH_PERFMON_EVENTSEL_OS; if (!hwc->sample_period) @@ -292,15 +292,15 @@ static int __hw_perf_counter_init(struct perf_counter *counter) /* * Raw event type provide the config in the event structure */ - if (perf_event_raw(hw_event)) { - hwc->config |= x86_pmu.raw_event(perf_event_config(hw_event)); + if (perf_event_raw(attr)) { + hwc->config |= x86_pmu.raw_event(perf_event_config(attr)); } else { - if (perf_event_id(hw_event) >= x86_pmu.max_events) + if (perf_event_id(attr) >= x86_pmu.max_events) return -EINVAL; /* * The generic map: */ - hwc->config |= x86_pmu.event_map(perf_event_id(hw_event)); + hwc->config |= x86_pmu.event_map(perf_event_id(attr)); } counter->destroy = hw_perf_counter_destroy; diff --git a/include/linux/perf_counter.h b/include/linux/perf_counter.h index 45bdd3b95d3..37d5541d74c 100644 --- a/include/linux/perf_counter.h +++ b/include/linux/perf_counter.h @@ -22,7 +22,7 @@ */ /* - * hw_event.type + * attr.type */ enum perf_event_types { PERF_TYPE_HARDWARE = 0, @@ -37,10 +37,10 @@ enum perf_event_types { }; /* - * Generalized performance counter event types, used by the hw_event.event_id + * Generalized performance counter event types, used by the attr.event_id * parameter of the sys_perf_counter_open() syscall: */ -enum hw_event_ids { +enum attr_ids { /* * Common hardware events, generalized by the kernel: */ @@ -94,7 +94,7 @@ enum sw_event_ids { #define PERF_COUNTER_EVENT_MASK __PERF_COUNTER_MASK(EVENT) /* - * Bits that can be set in hw_event.sample_type to request information + * Bits that can be set in attr.sample_type to request information * in the overflow packets. */ enum perf_counter_sample_format { @@ -109,7 +109,7 @@ enum perf_counter_sample_format { }; /* - * Bits that can be set in hw_event.read_format to request that + * Bits that can be set in attr.read_format to request that * reads on the counter should return the indicated quantities, * in increasing order of bit value, after the counter value. */ @@ -122,7 +122,7 @@ enum perf_counter_read_format { /* * Hardware event to monitor via a performance monitoring counter: */ -struct perf_counter_hw_event { +struct perf_counter_attr { /* * The MSB of the config word signifies if the rest contains cpu * specific (raw) counter configuration data, if unset, the next @@ -323,25 +323,25 @@ enum perf_event_type { struct task_struct; -static inline u64 perf_event_raw(struct perf_counter_hw_event *hw_event) +static inline u64 perf_event_raw(struct perf_counter_attr *attr) { - return hw_event->config & PERF_COUNTER_RAW_MASK; + return attr->config & PERF_COUNTER_RAW_MASK; } -static inline u64 perf_event_config(struct perf_counter_hw_event *hw_event) +static inline u64 perf_event_config(struct perf_counter_attr *attr) { - return hw_event->config & PERF_COUNTER_CONFIG_MASK; + return attr->config & PERF_COUNTER_CONFIG_MASK; } -static inline u64 perf_event_type(struct perf_counter_hw_event *hw_event) +static inline u64 perf_event_type(struct perf_counter_attr *attr) { - return (hw_event->config & PERF_COUNTER_TYPE_MASK) >> + return (attr->config & PERF_COUNTER_TYPE_MASK) >> PERF_COUNTER_TYPE_SHIFT; } -static inline u64 perf_event_id(struct perf_counter_hw_event *hw_event) +static inline u64 perf_event_id(struct perf_counter_attr *attr) { - return hw_event->config & PERF_COUNTER_EVENT_MASK; + return attr->config & PERF_COUNTER_EVENT_MASK; } /** @@ -457,7 +457,7 @@ struct perf_counter { u64 tstamp_running; u64 tstamp_stopped; - struct perf_counter_hw_event hw_event; + struct perf_counter_attr attr; struct hw_perf_counter hw; struct perf_counter_context *ctx; @@ -605,8 +605,8 @@ extern int perf_counter_overflow(struct perf_counter *counter, */ static inline int is_software_counter(struct perf_counter *counter) { - return !perf_event_raw(&counter->hw_event) && - perf_event_type(&counter->hw_event) != PERF_TYPE_HARDWARE; + return !perf_event_raw(&counter->attr) && + perf_event_type(&counter->attr) != PERF_TYPE_HARDWARE; } extern void perf_swcounter_event(u32, u64, int, struct pt_regs *, u64); diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h index 79faae950e2..c6c84ad8bd7 100644 --- a/include/linux/syscalls.h +++ b/include/linux/syscalls.h @@ -55,7 +55,7 @@ struct compat_timeval; struct robust_list_head; struct getcpu_cache; struct old_linux_dirent; -struct perf_counter_hw_event; +struct perf_counter_attr; #include #include @@ -758,6 +758,6 @@ int kernel_execve(const char *filename, char *const argv[], char *const envp[]); asmlinkage long sys_perf_counter_open( - const struct perf_counter_hw_event __user *hw_event_uptr, + const struct perf_counter_attr __user *attr_uptr, pid_t pid, int cpu, int group_fd, unsigned long flags); #endif diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c index abe2f3b6c42..317cef78a38 100644 --- a/kernel/perf_counter.c +++ b/kernel/perf_counter.c @@ -260,7 +260,7 @@ counter_sched_out(struct perf_counter *counter, if (!is_software_counter(counter)) cpuctx->active_oncpu--; ctx->nr_active--; - if (counter->hw_event.exclusive || !cpuctx->active_oncpu) + if (counter->attr.exclusive || !cpuctx->active_oncpu) cpuctx->exclusive = 0; } @@ -282,7 +282,7 @@ group_sched_out(struct perf_counter *group_counter, list_for_each_entry(counter, &group_counter->sibling_list, list_entry) counter_sched_out(counter, cpuctx, ctx); - if (group_counter->hw_event.exclusive) + if (group_counter->attr.exclusive) cpuctx->exclusive = 0; } @@ -550,7 +550,7 @@ counter_sched_in(struct perf_counter *counter, cpuctx->active_oncpu++; ctx->nr_active++; - if (counter->hw_event.exclusive) + if (counter->attr.exclusive) cpuctx->exclusive = 1; return 0; @@ -642,7 +642,7 @@ static int group_can_go_on(struct perf_counter *counter, * If this group is exclusive and there are already * counters on the CPU, it can't go on. */ - if (counter->hw_event.exclusive && cpuctx->active_oncpu) + if (counter->attr.exclusive && cpuctx->active_oncpu) return 0; /* * Otherwise, try to add it if all previous groups were able @@ -725,7 +725,7 @@ static void __perf_install_in_context(void *info) */ if (leader != counter) group_sched_out(leader, cpuctx, ctx); - if (leader->hw_event.pinned) { + if (leader->attr.pinned) { update_group_times(leader); leader->state = PERF_COUNTER_STATE_ERROR; } @@ -849,7 +849,7 @@ static void __perf_counter_enable(void *info) */ if (leader != counter) group_sched_out(leader, cpuctx, ctx); - if (leader->hw_event.pinned) { + if (leader->attr.pinned) { update_group_times(leader); leader->state = PERF_COUNTER_STATE_ERROR; } @@ -927,7 +927,7 @@ static int perf_counter_refresh(struct perf_counter *counter, int refresh) /* * not supported on inherited counters */ - if (counter->hw_event.inherit) + if (counter->attr.inherit) return -EINVAL; atomic_add(refresh, &counter->event_limit); @@ -1094,7 +1094,7 @@ __perf_counter_sched_in(struct perf_counter_context *ctx, */ list_for_each_entry(counter, &ctx->counter_list, list_entry) { if (counter->state <= PERF_COUNTER_STATE_OFF || - !counter->hw_event.pinned) + !counter->attr.pinned) continue; if (counter->cpu != -1 && counter->cpu != cpu) continue; @@ -1122,7 +1122,7 @@ __perf_counter_sched_in(struct perf_counter_context *ctx, * ignore pinned counters since we did them already. */ if (counter->state <= PERF_COUNTER_STATE_OFF || - counter->hw_event.pinned) + counter->attr.pinned) continue; /* @@ -1204,11 +1204,11 @@ static void perf_adjust_freq(struct perf_counter_context *ctx) interrupts = 2*sysctl_perf_counter_limit/HZ; } - if (!counter->hw_event.freq || !counter->hw_event.sample_freq) + if (!counter->attr.freq || !counter->attr.sample_freq) continue; events = HZ * interrupts * counter->hw.sample_period; - period = div64_u64(events, counter->hw_event.sample_freq); + period = div64_u64(events, counter->attr.sample_freq); delta = (s64)(1 + period - counter->hw.sample_period); delta >>= 1; @@ -1444,11 +1444,11 @@ static void free_counter(struct perf_counter *counter) perf_pending_sync(counter); atomic_dec(&nr_counters); - if (counter->hw_event.mmap) + if (counter->attr.mmap) atomic_dec(&nr_mmap_tracking); - if (counter->hw_event.munmap) + if (counter->attr.munmap) atomic_dec(&nr_munmap_tracking); - if (counter->hw_event.comm) + if (counter->attr.comm) atomic_dec(&nr_comm_tracking); if (counter->destroy) @@ -1504,13 +1504,13 @@ perf_read_hw(struct perf_counter *counter, char __user *buf, size_t count) mutex_lock(&counter->child_mutex); values[0] = perf_counter_read(counter); n = 1; - if (counter->hw_event.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) + if (counter->attr.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) values[n++] = counter->total_time_enabled + atomic64_read(&counter->child_total_time_enabled); - if (counter->hw_event.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) + if (counter->attr.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) values[n++] = counter->total_time_running + atomic64_read(&counter->child_total_time_running); - if (counter->hw_event.read_format & PERF_FORMAT_ID) + if (counter->attr.read_format & PERF_FORMAT_ID) values[n++] = counter->id; mutex_unlock(&counter->child_mutex); @@ -1611,7 +1611,7 @@ static int perf_counter_period(struct perf_counter *counter, u64 __user *arg) int ret = 0; u64 value; - if (!counter->hw_event.sample_period) + if (!counter->attr.sample_period) return -EINVAL; size = copy_from_user(&value, arg, sizeof(value)); @@ -1622,15 +1622,15 @@ static int perf_counter_period(struct perf_counter *counter, u64 __user *arg) return -EINVAL; spin_lock_irq(&ctx->lock); - if (counter->hw_event.freq) { + if (counter->attr.freq) { if (value > sysctl_perf_counter_limit) { ret = -EINVAL; goto unlock; } - counter->hw_event.sample_freq = value; + counter->attr.sample_freq = value; } else { - counter->hw_event.sample_period = value; + counter->attr.sample_period = value; counter->hw.sample_period = value; perf_log_period(counter, value); @@ -2299,7 +2299,7 @@ static void perf_output_end(struct perf_output_handle *handle) struct perf_counter *counter = handle->counter; struct perf_mmap_data *data = handle->data; - int wakeup_events = counter->hw_event.wakeup_events; + int wakeup_events = counter->attr.wakeup_events; if (handle->overflow && wakeup_events) { int events = atomic_inc_return(&data->events); @@ -2339,7 +2339,7 @@ static void perf_counter_output(struct perf_counter *counter, int nmi, struct pt_regs *regs, u64 addr) { int ret; - u64 sample_type = counter->hw_event.sample_type; + u64 sample_type = counter->attr.sample_type; struct perf_output_handle handle; struct perf_event_header header; u64 ip; @@ -2441,7 +2441,7 @@ static void perf_counter_output(struct perf_counter *counter, perf_output_put(&handle, addr); if (sample_type & PERF_SAMPLE_CONFIG) - perf_output_put(&handle, counter->hw_event.config); + perf_output_put(&handle, counter->attr.config); if (sample_type & PERF_SAMPLE_CPU) perf_output_put(&handle, cpu_entry); @@ -2512,7 +2512,7 @@ static void perf_counter_comm_output(struct perf_counter *counter, static int perf_counter_comm_match(struct perf_counter *counter, struct perf_comm_event *comm_event) { - if (counter->hw_event.comm && + if (counter->attr.comm && comm_event->event.header.type == PERF_EVENT_COMM) return 1; @@ -2623,11 +2623,11 @@ static void perf_counter_mmap_output(struct perf_counter *counter, static int perf_counter_mmap_match(struct perf_counter *counter, struct perf_mmap_event *mmap_event) { - if (counter->hw_event.mmap && + if (counter->attr.mmap && mmap_event->event.header.type == PERF_EVENT_MMAP) return 1; - if (counter->hw_event.munmap && + if (counter->attr.munmap && mmap_event->event.header.type == PERF_EVENT_MUNMAP) return 1; @@ -2907,8 +2907,8 @@ static enum hrtimer_restart perf_swcounter_hrtimer(struct hrtimer *hrtimer) * In case we exclude kernel IPs or are somehow not in interrupt * context, provide the next best thing, the user IP. */ - if ((counter->hw_event.exclude_kernel || !regs) && - !counter->hw_event.exclude_user) + if ((counter->attr.exclude_kernel || !regs) && + !counter->attr.exclude_user) regs = task_pt_regs(current); if (regs) { @@ -2982,14 +2982,14 @@ static int perf_swcounter_match(struct perf_counter *counter, if (!perf_swcounter_is_counting(counter)) return 0; - if (counter->hw_event.config != event_config) + if (counter->attr.config != event_config) return 0; if (regs) { - if (counter->hw_event.exclude_user && user_mode(regs)) + if (counter->attr.exclude_user && user_mode(regs)) return 0; - if (counter->hw_event.exclude_kernel && !user_mode(regs)) + if (counter->attr.exclude_kernel && !user_mode(regs)) return 0; } @@ -3252,12 +3252,12 @@ extern void ftrace_profile_disable(int); static void tp_perf_counter_destroy(struct perf_counter *counter) { - ftrace_profile_disable(perf_event_id(&counter->hw_event)); + ftrace_profile_disable(perf_event_id(&counter->attr)); } static const struct pmu *tp_perf_counter_init(struct perf_counter *counter) { - int event_id = perf_event_id(&counter->hw_event); + int event_id = perf_event_id(&counter->attr); int ret; ret = ftrace_profile_enable(event_id); @@ -3265,7 +3265,7 @@ static const struct pmu *tp_perf_counter_init(struct perf_counter *counter) return NULL; counter->destroy = tp_perf_counter_destroy; - counter->hw.sample_period = counter->hw_event.sample_period; + counter->hw.sample_period = counter->attr.sample_period; return &perf_ops_generic; } @@ -3287,7 +3287,7 @@ static const struct pmu *sw_perf_counter_init(struct perf_counter *counter) * to be kernel events, and page faults are never hypervisor * events. */ - switch (perf_event_id(&counter->hw_event)) { + switch (perf_event_id(&counter->attr)) { case PERF_COUNT_CPU_CLOCK: pmu = &perf_ops_cpu_clock; @@ -3319,7 +3319,7 @@ static const struct pmu *sw_perf_counter_init(struct perf_counter *counter) * Allocate and initialize a counter structure */ static struct perf_counter * -perf_counter_alloc(struct perf_counter_hw_event *hw_event, +perf_counter_alloc(struct perf_counter_attr *attr, int cpu, struct perf_counter_context *ctx, struct perf_counter *group_leader, @@ -3352,36 +3352,36 @@ perf_counter_alloc(struct perf_counter_hw_event *hw_event, mutex_init(&counter->mmap_mutex); counter->cpu = cpu; - counter->hw_event = *hw_event; + counter->attr = *attr; counter->group_leader = group_leader; counter->pmu = NULL; counter->ctx = ctx; counter->oncpu = -1; counter->state = PERF_COUNTER_STATE_INACTIVE; - if (hw_event->disabled) + if (attr->disabled) counter->state = PERF_COUNTER_STATE_OFF; pmu = NULL; hwc = &counter->hw; - if (hw_event->freq && hw_event->sample_freq) - hwc->sample_period = div64_u64(TICK_NSEC, hw_event->sample_freq); + if (attr->freq && attr->sample_freq) + hwc->sample_period = div64_u64(TICK_NSEC, attr->sample_freq); else - hwc->sample_period = hw_event->sample_period; + hwc->sample_period = attr->sample_period; /* * we currently do not support PERF_SAMPLE_GROUP on inherited counters */ - if (hw_event->inherit && (hw_event->sample_type & PERF_SAMPLE_GROUP)) + if (attr->inherit && (attr->sample_type & PERF_SAMPLE_GROUP)) goto done; - if (perf_event_raw(hw_event)) { + if (perf_event_raw(attr)) { pmu = hw_perf_counter_init(counter); goto done; } - switch (perf_event_type(hw_event)) { + switch (perf_event_type(attr)) { case PERF_TYPE_HARDWARE: pmu = hw_perf_counter_init(counter); break; @@ -3409,11 +3409,11 @@ done: counter->pmu = pmu; atomic_inc(&nr_counters); - if (counter->hw_event.mmap) + if (counter->attr.mmap) atomic_inc(&nr_mmap_tracking); - if (counter->hw_event.munmap) + if (counter->attr.munmap) atomic_inc(&nr_munmap_tracking); - if (counter->hw_event.comm) + if (counter->attr.comm) atomic_inc(&nr_comm_tracking); return counter; @@ -3424,17 +3424,17 @@ static atomic64_t perf_counter_id; /** * sys_perf_counter_open - open a performance counter, associate it to a task/cpu * - * @hw_event_uptr: event type attributes for monitoring/sampling + * @attr_uptr: event type attributes for monitoring/sampling * @pid: target pid * @cpu: target cpu * @group_fd: group leader counter fd */ SYSCALL_DEFINE5(perf_counter_open, - const struct perf_counter_hw_event __user *, hw_event_uptr, + const struct perf_counter_attr __user *, attr_uptr, pid_t, pid, int, cpu, int, group_fd, unsigned long, flags) { struct perf_counter *counter, *group_leader; - struct perf_counter_hw_event hw_event; + struct perf_counter_attr attr; struct perf_counter_context *ctx; struct file *counter_file = NULL; struct file *group_file = NULL; @@ -3446,7 +3446,7 @@ SYSCALL_DEFINE5(perf_counter_open, if (flags) return -EINVAL; - if (copy_from_user(&hw_event, hw_event_uptr, sizeof(hw_event)) != 0) + if (copy_from_user(&attr, attr_uptr, sizeof(attr)) != 0) return -EFAULT; /* @@ -3484,11 +3484,11 @@ SYSCALL_DEFINE5(perf_counter_open, /* * Only a group leader can be exclusive or pinned */ - if (hw_event.exclusive || hw_event.pinned) + if (attr.exclusive || attr.pinned) goto err_put_context; } - counter = perf_counter_alloc(&hw_event, cpu, ctx, group_leader, + counter = perf_counter_alloc(&attr, cpu, ctx, group_leader, GFP_KERNEL); ret = PTR_ERR(counter); if (IS_ERR(counter)) @@ -3556,7 +3556,7 @@ inherit_counter(struct perf_counter *parent_counter, if (parent_counter->parent) parent_counter = parent_counter->parent; - child_counter = perf_counter_alloc(&parent_counter->hw_event, + child_counter = perf_counter_alloc(&parent_counter->attr, parent_counter->cpu, child_ctx, group_leader, GFP_KERNEL); if (IS_ERR(child_counter)) @@ -3565,7 +3565,7 @@ inherit_counter(struct perf_counter *parent_counter, /* * Make the child state follow the state of the parent counter, - * not its hw_event.disabled bit. We hold the parent's mutex, + * not its attr.disabled bit. We hold the parent's mutex, * so we won't race with perf_counter_{en, dis}able_family. */ if (parent_counter->state >= PERF_COUNTER_STATE_INACTIVE) @@ -3582,7 +3582,7 @@ inherit_counter(struct perf_counter *parent_counter, /* * inherit into child's child as well: */ - child_counter->hw_event.inherit = 1; + child_counter->attr.inherit = 1; /* * Get a reference to the parent filp - we will fput it @@ -3838,7 +3838,7 @@ int perf_counter_init_task(struct task_struct *child) if (counter != counter->group_leader) continue; - if (!counter->hw_event.inherit) { + if (!counter->attr.inherit) { inherited_all = 0; continue; } -- cgit v1.2.3 From c70975bc8d5bac487616785f5d5bc7b090dfa2d9 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Tue, 2 Jun 2009 17:38:21 +0200 Subject: perf_counter tools: Fix up the ABI shakeup Signed-off-by: Peter Zijlstra LKML-Reference: Cc: Mike Galbraith Cc: Paul Mackerras Cc: Corey Ashford Cc: Marcelo Tosatti Cc: Arnaldo Carvalho de Melo Cc: Thomas Gleixner Cc: John Kacur Cc: Stephane Eranian LKML-Reference: Signed-off-by: Ingo Molnar --- Documentation/perf_counter/builtin-record.c | 18 +++++++++--------- Documentation/perf_counter/builtin-stat.c | 22 +++++++++++----------- Documentation/perf_counter/builtin-top.c | 20 ++++++++++---------- Documentation/perf_counter/perf.h | 4 ++-- 4 files changed, 32 insertions(+), 32 deletions(-) diff --git a/Documentation/perf_counter/builtin-record.c b/Documentation/perf_counter/builtin-record.c index bace7a8edf8..c2fd0424497 100644 --- a/Documentation/perf_counter/builtin-record.c +++ b/Documentation/perf_counter/builtin-record.c @@ -322,7 +322,7 @@ static void synthesize_events(void) static void open_counters(int cpu, pid_t pid) { - struct perf_counter_hw_event hw_event; + struct perf_counter_attr attr; int counter, group_fd; int track = 1; @@ -334,18 +334,18 @@ static void open_counters(int cpu, pid_t pid) group_fd = -1; for (counter = 0; counter < nr_counters; counter++) { - memset(&hw_event, 0, sizeof(hw_event)); - hw_event.config = event_id[counter]; - hw_event.irq_period = event_count[counter]; - hw_event.record_type = PERF_RECORD_IP | PERF_RECORD_TID; - hw_event.mmap = track; - hw_event.comm = track; - hw_event.inherit = (cpu < 0) && inherit; + memset(&attr, 0, sizeof(attr)); + attr.config = event_id[counter]; + attr.sample_period = event_count[counter]; + attr.sample_type = PERF_SAMPLE_IP | PERF_SAMPLE_TID; + attr.mmap = track; + attr.comm = track; + attr.inherit = (cpu < 0) && inherit; track = 0; // only the first counter needs these fd[nr_cpu][counter] = - sys_perf_counter_open(&hw_event, pid, cpu, group_fd, 0); + sys_perf_counter_open(&attr, pid, cpu, group_fd, 0); if (fd[nr_cpu][counter] < 0) { int err = errno; diff --git a/Documentation/perf_counter/builtin-stat.c b/Documentation/perf_counter/builtin-stat.c index 644f850b7bd..27abe6a2db3 100644 --- a/Documentation/perf_counter/builtin-stat.c +++ b/Documentation/perf_counter/builtin-stat.c @@ -79,22 +79,22 @@ static __u64 walltime_nsecs; static void create_perfstat_counter(int counter) { - struct perf_counter_hw_event hw_event; + struct perf_counter_attr attr; - memset(&hw_event, 0, sizeof(hw_event)); - hw_event.config = event_id[counter]; - hw_event.record_type = 0; - hw_event.exclude_kernel = event_mask[counter] & EVENT_MASK_KERNEL; - hw_event.exclude_user = event_mask[counter] & EVENT_MASK_USER; + memset(&attr, 0, sizeof(attr)); + attr.config = event_id[counter]; + attr.sample_type = 0; + attr.exclude_kernel = event_mask[counter] & EVENT_MASK_KERNEL; + attr.exclude_user = event_mask[counter] & EVENT_MASK_USER; if (scale) - hw_event.read_format = PERF_FORMAT_TOTAL_TIME_ENABLED | + attr.read_format = PERF_FORMAT_TOTAL_TIME_ENABLED | PERF_FORMAT_TOTAL_TIME_RUNNING; if (system_wide) { int cpu; for (cpu = 0; cpu < nr_cpus; cpu ++) { - fd[cpu][counter] = sys_perf_counter_open(&hw_event, -1, cpu, -1, 0); + fd[cpu][counter] = sys_perf_counter_open(&attr, -1, cpu, -1, 0); if (fd[cpu][counter] < 0) { printf("perfstat error: syscall returned with %d (%s)\n", fd[cpu][counter], strerror(errno)); @@ -102,10 +102,10 @@ static void create_perfstat_counter(int counter) } } } else { - hw_event.inherit = inherit; - hw_event.disabled = 1; + attr.inherit = inherit; + attr.disabled = 1; - fd[0][counter] = sys_perf_counter_open(&hw_event, 0, -1, -1, 0); + fd[0][counter] = sys_perf_counter_open(&attr, 0, -1, -1, 0); if (fd[0][counter] < 0) { printf("perfstat error: syscall returned with %d (%s)\n", fd[0][counter], strerror(errno)); diff --git a/Documentation/perf_counter/builtin-top.c b/Documentation/perf_counter/builtin-top.c index a2cff7b0527..5029d8e6cd9 100644 --- a/Documentation/perf_counter/builtin-top.c +++ b/Documentation/perf_counter/builtin-top.c @@ -537,7 +537,7 @@ static void mmap_read(struct mmap_data *md) old += size; if (event->header.misc & PERF_EVENT_MISC_OVERFLOW) { - if (event->header.type & PERF_RECORD_IP) + if (event->header.type & PERF_SAMPLE_IP) process_event(event->ip.ip, md->counter); } else { switch (event->header.type) { @@ -563,7 +563,7 @@ static struct mmap_data mmap_array[MAX_NR_CPUS][MAX_COUNTERS]; static int __cmd_top(void) { - struct perf_counter_hw_event hw_event; + struct perf_counter_attr attr; pthread_t thread; int i, counter, group_fd, nr_poll = 0; unsigned int cpu; @@ -577,15 +577,15 @@ static int __cmd_top(void) if (target_pid == -1 && profile_cpu == -1) cpu = i; - memset(&hw_event, 0, sizeof(hw_event)); - hw_event.config = event_id[counter]; - hw_event.irq_period = event_count[counter]; - hw_event.record_type = PERF_RECORD_IP | PERF_RECORD_TID; - hw_event.mmap = use_mmap; - hw_event.munmap = use_munmap; - hw_event.freq = freq; + memset(&attr, 0, sizeof(attr)); + attr.config = event_id[counter]; + attr.sample_period = event_count[counter]; + attr.sample_type = PERF_SAMPLE_IP | PERF_SAMPLE_TID; + attr.mmap = use_mmap; + attr.munmap = use_munmap; + attr.freq = freq; - fd[i][counter] = sys_perf_counter_open(&hw_event, target_pid, cpu, group_fd, 0); + fd[i][counter] = sys_perf_counter_open(&attr, target_pid, cpu, group_fd, 0); if (fd[i][counter] < 0) { int err = errno; printf("kerneltop error: syscall returned with %d (%s)\n", diff --git a/Documentation/perf_counter/perf.h b/Documentation/perf_counter/perf.h index 5a2520bb7e5..10622a48b40 100644 --- a/Documentation/perf_counter/perf.h +++ b/Documentation/perf_counter/perf.h @@ -53,11 +53,11 @@ static inline unsigned long long rdclock(void) _min1 < _min2 ? _min1 : _min2; }) static inline int -sys_perf_counter_open(struct perf_counter_hw_event *hw_event_uptr, +sys_perf_counter_open(struct perf_counter_attr *attr_uptr, pid_t pid, int cpu, int group_fd, unsigned long flags) { - return syscall(__NR_perf_counter_open, hw_event_uptr, pid, cpu, + return syscall(__NR_perf_counter_open, attr_uptr, pid, cpu, group_fd, flags); } -- cgit v1.2.3 From 436224a6d8bb3e29fe0cc18122f8d1f593da67b8 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Tue, 2 Jun 2009 21:02:36 +0200 Subject: perf report: Separate out idle threads Introduce the special comm name [idle] for idle theads. Signed-off-by: Peter Zijlstra Cc: Mike Galbraith Cc: Paul Mackerras Cc: Corey Ashford Cc: Marcelo Tosatti Cc: Arnaldo Carvalho de Melo Cc: Thomas Gleixner Cc: John Kacur LKML-Reference: Signed-off-by: Ingo Molnar --- Documentation/perf_counter/builtin-report.c | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/Documentation/perf_counter/builtin-report.c b/Documentation/perf_counter/builtin-report.c index 19c1e056bb6..6d68f3aa86b 100644 --- a/Documentation/perf_counter/builtin-report.c +++ b/Documentation/perf_counter/builtin-report.c @@ -612,6 +612,17 @@ static size_t output__fprintf(FILE *fp, uint64_t total_samples) return ret; } +static void register_idle_thread(void) +{ + struct thread *thread = threads__findnew(0); + + if (thread == NULL || + thread__set_comm(thread, "[idle]")) { + fprintf(stderr, "problem inserting idle task.\n"); + exit(-1); + } +} + static int __cmd_report(void) { @@ -626,6 +637,8 @@ static int __cmd_report(void) char cwd[PATH_MAX], *cwdp = cwd; int cwdlen; + register_idle_thread(); + input = open(input_name, O_RDONLY); if (input < 0) { perror("failed to open file"); -- cgit v1.2.3 From cf25c63c609e99bfb9303b68a7a90a56a3a32cea Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Tue, 2 Jun 2009 22:12:14 +0200 Subject: perf report: Fix column width/alignment of dsos Cc: Peter Zijlstra Cc: Mike Galbraith Cc: Paul Mackerras Cc: Corey Ashford Cc: Marcelo Tosatti Cc: Arnaldo Carvalho de Melo Cc: Thomas Gleixner Cc: John Kacur LKML-Reference: Signed-off-by: Ingo Molnar --- Documentation/perf_counter/builtin-report.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/Documentation/perf_counter/builtin-report.c b/Documentation/perf_counter/builtin-report.c index 6d68f3aa86b..b84aaf18bf1 100644 --- a/Documentation/perf_counter/builtin-report.c +++ b/Documentation/perf_counter/builtin-report.c @@ -303,11 +303,11 @@ sort__thread_cmp(struct hist_entry *left, struct hist_entry *right) static size_t sort__thread_print(FILE *fp, struct hist_entry *self) { - return fprintf(fp, " %16s:%5d", self->thread->comm ?: "", self->thread->pid); + return fprintf(fp, " %16s:%5d", self->thread->comm ?: "", self->thread->pid); } static struct sort_entry sort_thread = { - .header = " Command: Pid ", + .header = " Command: Pid ", .cmp = sort__thread_cmp, .print = sort__thread_print, }; @@ -363,11 +363,11 @@ sort__dso_cmp(struct hist_entry *left, struct hist_entry *right) static size_t sort__dso_print(FILE *fp, struct hist_entry *self) { - return fprintf(fp, " %s", self->dso ? self->dso->name : ""); + return fprintf(fp, " %-25s", self->dso ? self->dso->name : ""); } static struct sort_entry sort_dso = { - .header = " Shared Object", + .header = " Shared Object ", .cmp = sort__dso_cmp, .print = sort__dso_print, }; -- cgit v1.2.3 From abaff32a03e26e5d6674cb2a26ad882efe7493a3 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Tue, 2 Jun 2009 22:59:57 +0200 Subject: perf record: Add --append option Allow incremental profiling via 'perf record -A' - this will append to an existing perf.data. Also reorder perf record options by utility / likelyhood of usage. Cc: Peter Zijlstra Cc: Mike Galbraith Cc: Paul Mackerras Cc: Corey Ashford Cc: Marcelo Tosatti Cc: Arnaldo Carvalho de Melo Cc: Thomas Gleixner Cc: John Kacur LKML-Reference: Signed-off-by: Ingo Molnar --- Documentation/perf_counter/builtin-record.c | 40 +++++++++++++++++++---------- 1 file changed, 26 insertions(+), 14 deletions(-) diff --git a/Documentation/perf_counter/builtin-record.c b/Documentation/perf_counter/builtin-record.c index c2fd0424497..19cba6b5ee9 100644 --- a/Documentation/perf_counter/builtin-record.c +++ b/Documentation/perf_counter/builtin-record.c @@ -1,5 +1,7 @@ - - +/* + * perf record: Record the profile of a workload (or a CPU, or a PID) into + * the perf.data output file - for later analysis via perf report. + */ #include "perf.h" #include "builtin.h" #include "util/util.h" @@ -28,6 +30,7 @@ static int system_wide = 0; static pid_t target_pid = -1; static int inherit = 1; static int force = 0; +static int append_file = 0; const unsigned int default_count[] = { 1000000, @@ -385,22 +388,29 @@ static void open_counters(int cpu, pid_t pid) static int __cmd_record(int argc, const char **argv) { int i, counter; + struct stat st; pid_t pid; + int flags; int ret; - struct stat st; page_size = sysconf(_SC_PAGE_SIZE); nr_cpus = sysconf(_SC_NPROCESSORS_ONLN); assert(nr_cpus <= MAX_NR_CPUS); assert(nr_cpus >= 0); - if (!stat(output_name, &st) && !force) { - fprintf(stderr, "Error, output file: %s exists, use -f to overwrite.\n", + if (!stat(output_name, &st) && !force && !append_file) { + fprintf(stderr, "Error, output file %s exists, use -A to append or -f to overwrite.\n", output_name); exit(-1); } - output = open(output_name, O_CREAT|O_TRUNC|O_RDWR, S_IRUSR|S_IWUSR); + flags = O_CREAT|O_RDWR; + if (append_file) + flags |= O_APPEND; + else + flags |= O_TRUNC; + + output = open(output_name, flags, S_IRUSR|S_IWUSR); if (output < 0) { perror("failed to create output file"); exit(-1); @@ -466,22 +476,24 @@ static char events_help_msg[EVENTS_HELP_MAX]; static const struct option options[] = { OPT_CALLBACK('e', "event", NULL, "event", events_help_msg, parse_events), - OPT_INTEGER('c', "count", &default_interval, - "event period to sample"), - OPT_INTEGER('m', "mmap-pages", &mmap_pages, - "number of mmap data pages"), - OPT_STRING('o', "output", &output_name, "file", - "output file name"), - OPT_BOOLEAN('i', "inherit", &inherit, - "child tasks inherit counters"), OPT_INTEGER('p', "pid", &target_pid, "record events on existing pid"), OPT_INTEGER('r', "realtime", &realtime_prio, "collect data with this RT SCHED_FIFO priority"), OPT_BOOLEAN('a', "all-cpus", &system_wide, "system-wide collection from all CPUs"), + OPT_BOOLEAN('A', "append", &append_file, + "append to the output file to do incremental profiling"), OPT_BOOLEAN('f', "force", &force, "overwrite existing data file"), + OPT_INTEGER('c', "count", &default_interval, + "event period to sample"), + OPT_STRING('o', "output", &output_name, "file", + "output file name"), + OPT_BOOLEAN('i', "inherit", &inherit, + "child tasks inherit counters"), + OPT_INTEGER('m', "mmap-pages", &mmap_pages, + "number of mmap data pages"), OPT_END() }; -- cgit v1.2.3 From 3cf165fc2e7f221a7a95098b47eb990779e29f5f Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Tue, 2 Jun 2009 23:02:59 +0200 Subject: perf record: Increase mmap buffering default I've run into mmap overruns with the current 16 pages default, increase it to 128 pages. Cc: Peter Zijlstra Cc: Mike Galbraith Cc: Paul Mackerras Cc: Corey Ashford Cc: Marcelo Tosatti Cc: Arnaldo Carvalho de Melo Cc: Thomas Gleixner Cc: John Kacur LKML-Reference: Signed-off-by: Ingo Molnar --- Documentation/perf_counter/builtin-record.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Documentation/perf_counter/builtin-record.c b/Documentation/perf_counter/builtin-record.c index 19cba6b5ee9..8feb1192e09 100644 --- a/Documentation/perf_counter/builtin-record.c +++ b/Documentation/perf_counter/builtin-record.c @@ -19,9 +19,9 @@ static int default_interval = 100000; static int event_count[MAX_COUNTERS]; static int fd[MAX_NR_CPUS][MAX_COUNTERS]; -static int nr_cpus = 0; +static int nr_cpus = 0; static unsigned int page_size; -static unsigned int mmap_pages = 16; +static unsigned int mmap_pages = 128; static int output; static const char *output_name = "perf.data"; static int group = 0; -- cgit v1.2.3 From 0a520c63e1625b92ef775da40192e1542910e7f6 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Tue, 2 Jun 2009 23:24:45 +0200 Subject: perf report: Print more info instead of entries Sometimes we still fail to find a DSO or look up a symbol, print out the raw information in this case (which an help debug the problem), instead of a not very helpful string. Cc: Peter Zijlstra Cc: Mike Galbraith Cc: Paul Mackerras Cc: Corey Ashford Cc: Marcelo Tosatti Cc: Arnaldo Carvalho de Melo Cc: Thomas Gleixner Cc: John Kacur LKML-Reference: Signed-off-by: Ingo Molnar --- Documentation/perf_counter/builtin-report.c | 28 ++++++++++++++++++++-------- 1 file changed, 20 insertions(+), 8 deletions(-) diff --git a/Documentation/perf_counter/builtin-report.c b/Documentation/perf_counter/builtin-report.c index b84aaf18bf1..270e986c2d4 100644 --- a/Documentation/perf_counter/builtin-report.c +++ b/Documentation/perf_counter/builtin-report.c @@ -201,7 +201,9 @@ static struct thread *thread__new(pid_t pid) if (self != NULL) { self->pid = pid; - self->comm = NULL; + self->comm = malloc(30); + if (self->comm) + sprintf(self->comm, ":%d", pid); INIT_LIST_HEAD(&self->maps); } @@ -333,7 +335,7 @@ sort__comm_cmp(struct hist_entry *left, struct hist_entry *right) static size_t sort__comm_print(FILE *fp, struct hist_entry *self) { - return fprintf(fp, " %16s", self->thread->comm ?: ""); + return fprintf(fp, " %16s", self->thread->comm); } static struct sort_entry sort_comm = { @@ -363,7 +365,10 @@ sort__dso_cmp(struct hist_entry *left, struct hist_entry *right) static size_t sort__dso_print(FILE *fp, struct hist_entry *self) { - return fprintf(fp, " %-25s", self->dso ? self->dso->name : ""); + if (self->dso) + return fprintf(fp, " %-25s", self->dso->name); + + return fprintf(fp, " %016llx", (__u64)self->ip); } static struct sort_entry sort_dso = { @@ -392,11 +397,17 @@ sort__sym_print(FILE *fp, struct hist_entry *self) size_t ret = 0; if (verbose) - ret += fprintf(fp, " %#018llx", (unsigned long long)self->ip); + ret += fprintf(fp, " %#018llx", (__u64)self->ip); + + if (self->dso) + ret += fprintf(fp, " %s: ", self->dso->name); + else + ret += fprintf(fp, " %#016llx: ", (__u64)self->ip); - ret += fprintf(fp, " %s: %s", - self->dso ? self->dso->name : "", - self->sym ? self->sym->name : ""); + if (self->sym) + ret += fprintf(fp, "%s", self->sym->name); + else + ret += fprintf(fp, "%#016llx", (__u64)self->ip); return ret; } @@ -772,7 +783,8 @@ more: event->mmap.filename); } if (thread == NULL || map == NULL) { - fprintf(stderr, "problem processing PERF_EVENT_MMAP, skipping event.\n"); + if (verbose) + fprintf(stderr, "problem processing PERF_EVENT_MMAP, skipping event.\n"); goto broken_event; } thread__insert_map(thread, map); -- cgit v1.2.3 From bf9e187637ca3d85cee7407e3af93995868cc87c Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Tue, 2 Jun 2009 23:37:05 +0200 Subject: perf_counter tools: Make source code headers more coherent The perf commands had different ways of describing themselves, introduce a coherent command-file-header format taken from the Git project. Cc: Peter Zijlstra Cc: Mike Galbraith Cc: Paul Mackerras Cc: Corey Ashford Cc: Marcelo Tosatti Cc: Arnaldo Carvalho de Melo Cc: Thomas Gleixner Cc: John Kacur LKML-Reference: Signed-off-by: Ingo Molnar --- Documentation/perf_counter/builtin-record.c | 11 ++++-- Documentation/perf_counter/builtin-report.c | 10 ++++- Documentation/perf_counter/builtin-stat.c | 29 ++++++++------ Documentation/perf_counter/builtin-top.c | 60 +++++++++-------------------- Documentation/perf_counter/perf.c | 9 +++++ 5 files changed, 62 insertions(+), 57 deletions(-) diff --git a/Documentation/perf_counter/builtin-record.c b/Documentation/perf_counter/builtin-record.c index 8feb1192e09..2741b3561bb 100644 --- a/Documentation/perf_counter/builtin-record.c +++ b/Documentation/perf_counter/builtin-record.c @@ -1,9 +1,14 @@ /* - * perf record: Record the profile of a workload (or a CPU, or a PID) into - * the perf.data output file - for later analysis via perf report. + * builtin-record.c + * + * Builtin record command: Record the profile of a workload + * (or a CPU, or a PID) into the perf.data output file - for + * later analysis via perf report. */ -#include "perf.h" #include "builtin.h" + +#include "perf.h" + #include "util/util.h" #include "util/parse-options.h" #include "util/parse-events.h" diff --git a/Documentation/perf_counter/builtin-report.c b/Documentation/perf_counter/builtin-report.c index 270e986c2d4..9da990fba4a 100644 --- a/Documentation/perf_counter/builtin-report.c +++ b/Documentation/perf_counter/builtin-report.c @@ -1,6 +1,14 @@ -#include "util/util.h" +/* + * builtin-report.c + * + * Builtin report command: Analyze the perf.data input file, + * look up and read DSOs and symbol information and display + * a histogram of results, along various sorting keys. + */ #include "builtin.h" +#include "util/util.h" + #include "util/list.h" #include "util/cache.h" #include "util/rbtree.h" diff --git a/Documentation/perf_counter/builtin-stat.c b/Documentation/perf_counter/builtin-stat.c index 27abe6a2db3..2357a663b67 100644 --- a/Documentation/perf_counter/builtin-stat.c +++ b/Documentation/perf_counter/builtin-stat.c @@ -1,20 +1,27 @@ /* - * perf stat: /usr/bin/time -alike performance counter statistics utility + * builtin-stat.c + * + * Builtin stat command: Give a precise performance counters summary + * overview about any workload, CPU or specific PID. + * + * Sample output: - It summarizes the counter events of all tasks (and child tasks), - covering all CPUs that the command (or workload) executes on. - It only counts the per-task events of the workload started, - independent of how many other tasks run on those CPUs. + $ perf stat ~/hackbench 10 + Time: 0.104 - Sample output: + Performance counter stats for '/home/mingo/hackbench': - $ perf stat -e 1 -e 3 -e 5 ls -lR /usr/include/ >/dev/null + 1255.538611 task clock ticks # 10.143 CPU utilization factor + 54011 context switches # 0.043 M/sec + 385 CPU migrations # 0.000 M/sec + 17755 pagefaults # 0.014 M/sec + 3808323185 CPU cycles # 3033.219 M/sec + 1575111190 instructions # 1254.530 M/sec + 17367895 cache references # 13.833 M/sec + 7674421 cache misses # 6.112 M/sec - Performance counter stats for 'ls': + Wall-clock time elapsed: 123.786620 msecs - 163516953 instructions - 2295 cache-misses - 2855182 branch-misses * * Copyright (C) 2008, Red Hat Inc, Ingo Molnar * diff --git a/Documentation/perf_counter/builtin-top.c b/Documentation/perf_counter/builtin-top.c index 5029d8e6cd9..a63935276ca 100644 --- a/Documentation/perf_counter/builtin-top.c +++ b/Documentation/perf_counter/builtin-top.c @@ -1,49 +1,25 @@ /* - * kerneltop.c: show top kernel functions - performance counters showcase - - Build with: - - make -C Documentation/perf_counter/ - - Sample output: - ------------------------------------------------------------------------------- - KernelTop: 2669 irqs/sec [cache-misses/cache-refs], (all, cpu: 2) ------------------------------------------------------------------------------- - - weight RIP kernel function - ______ ________________ _______________ - - 35.20 - ffffffff804ce74b : skb_copy_and_csum_dev - 33.00 - ffffffff804cb740 : sock_alloc_send_skb - 31.26 - ffffffff804ce808 : skb_push - 22.43 - ffffffff80510004 : tcp_established_options - 19.00 - ffffffff8027d250 : find_get_page - 15.76 - ffffffff804e4fc9 : eth_type_trans - 15.20 - ffffffff804d8baa : dst_release - 14.86 - ffffffff804cf5d8 : skb_release_head_state - 14.00 - ffffffff802217d5 : read_hpet - 12.00 - ffffffff804ffb7f : __ip_local_out - 11.97 - ffffffff804fc0c8 : ip_local_deliver_finish - 8.54 - ffffffff805001a3 : ip_queue_xmit + * builtin-top.c + * + * Builtin top command: Display a continuously updated profile of + * any workload, CPU or specific PID. + * + * Copyright (C) 2008, Red Hat Inc, Ingo Molnar + * + * Improvements and fixes by: + * + * Arjan van de Ven + * Yanmin Zhang + * Wu Fengguang + * Mike Galbraith + * Paul Mackerras + * + * Released under the GPL v2. (and only v2, not any later version) */ - - /* - * Copyright (C) 2008, Red Hat Inc, Ingo Molnar - * - * Improvements and fixes by: - * - * Arjan van de Ven - * Yanmin Zhang - * Wu Fengguang - * Mike Galbraith - * Paul Mackerras - * - * Released under the GPL v2. (and only v2, not any later version) - */ +#include "builtin.h" #include "perf.h" -#include "builtin.h" + #include "util/symbol.h" #include "util/util.h" #include "util/rbtree.h" diff --git a/Documentation/perf_counter/perf.c b/Documentation/perf_counter/perf.c index e8a85842b49..ec7edb7fbe2 100644 --- a/Documentation/perf_counter/perf.c +++ b/Documentation/perf_counter/perf.c @@ -1,4 +1,13 @@ +/* + * perf.c + * + * Performance analysis utility. + * + * This is the main hub from which the sub-commands (perf stat, + * perf top, perf record, perf report, etc.) are started. + */ #include "builtin.h" + #include "util/exec_cmd.h" #include "util/cache.h" #include "util/quote.h" -- cgit v1.2.3 From addc2785ce92ff05da8edf18317b6b4719e10d9f Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Tue, 2 Jun 2009 23:43:11 +0200 Subject: perf record: Print out the number of events captured It makes sense to inform the user about how many events perf record has written - so that the sufficiency of profiling coverage and intensity can be determined at a glance. Cc: Peter Zijlstra Cc: Mike Galbraith Cc: Paul Mackerras Cc: Corey Ashford Cc: Marcelo Tosatti Cc: Arnaldo Carvalho de Melo Cc: Thomas Gleixner Cc: John Kacur LKML-Reference: Signed-off-by: Ingo Molnar --- Documentation/perf_counter/builtin-record.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/Documentation/perf_counter/builtin-record.c b/Documentation/perf_counter/builtin-record.c index 2741b3561bb..ec3b73adbd9 100644 --- a/Documentation/perf_counter/builtin-record.c +++ b/Documentation/perf_counter/builtin-record.c @@ -467,6 +467,9 @@ static int __cmd_record(int argc, const char **argv) ret = poll(event_array, nr_poll, 100); } + + fprintf(stderr, "[ perf record: Captured and wrote %ld events. ]\n", events); + return 0; } -- cgit v1.2.3 From a32881066e58346f2901afe0ebdfbf0c562877e5 Mon Sep 17 00:00:00 2001 From: Yong Wang Date: Wed, 3 Jun 2009 13:12:55 +0800 Subject: perf_counter/x86: Remove the IRQ (non-NMI) handling bits Remove the IRQ (non-NMI) handling bits as NMI will be used always. Signed-off-by: Yong Wang Cc: Peter Zijlstra Cc: Mike Galbraith Cc: Paul Mackerras Cc: Corey Ashford Cc: Marcelo Tosatti Cc: Arnaldo Carvalho de Melo Cc: John Kacur LKML-Reference: <20090603051255.GA2791@ywang-moblin2.bj.intel.com> Signed-off-by: Ingo Molnar --- arch/x86/include/asm/entry_arch.h | 1 - arch/x86/include/asm/hw_irq.h | 1 - arch/x86/include/asm/irq_vectors.h | 5 ----- arch/x86/kernel/cpu/perf_counter.c | 21 ++++++--------------- arch/x86/kernel/entry_64.S | 2 -- arch/x86/kernel/irqinit_32.c | 1 - arch/x86/kernel/irqinit_64.c | 1 - 7 files changed, 6 insertions(+), 26 deletions(-) diff --git a/arch/x86/include/asm/entry_arch.h b/arch/x86/include/asm/entry_arch.h index fe24d280249..d750a10ccad 100644 --- a/arch/x86/include/asm/entry_arch.h +++ b/arch/x86/include/asm/entry_arch.h @@ -49,7 +49,6 @@ BUILD_INTERRUPT(error_interrupt,ERROR_APIC_VECTOR) BUILD_INTERRUPT(spurious_interrupt,SPURIOUS_APIC_VECTOR) #ifdef CONFIG_PERF_COUNTERS -BUILD_INTERRUPT(perf_counter_interrupt, LOCAL_PERF_VECTOR) BUILD_INTERRUPT(perf_pending_interrupt, LOCAL_PENDING_VECTOR) #endif diff --git a/arch/x86/include/asm/hw_irq.h b/arch/x86/include/asm/hw_irq.h index 7309c0ad690..4b4921d7a28 100644 --- a/arch/x86/include/asm/hw_irq.h +++ b/arch/x86/include/asm/hw_irq.h @@ -29,7 +29,6 @@ extern void apic_timer_interrupt(void); extern void generic_interrupt(void); extern void error_interrupt(void); -extern void perf_counter_interrupt(void); extern void perf_pending_interrupt(void); extern void spurious_interrupt(void); diff --git a/arch/x86/include/asm/irq_vectors.h b/arch/x86/include/asm/irq_vectors.h index 545bb811ccb..4492e19f839 100644 --- a/arch/x86/include/asm/irq_vectors.h +++ b/arch/x86/include/asm/irq_vectors.h @@ -106,11 +106,6 @@ */ #define LOCAL_TIMER_VECTOR 0xef -/* - * Performance monitoring interrupt vector: - */ -#define LOCAL_PERF_VECTOR 0xee - /* * Generic system vector for platform specific use */ diff --git a/arch/x86/kernel/cpu/perf_counter.c b/arch/x86/kernel/cpu/perf_counter.c index e16e8c13132..12cc05ed9f4 100644 --- a/arch/x86/kernel/cpu/perf_counter.c +++ b/arch/x86/kernel/cpu/perf_counter.c @@ -40,7 +40,7 @@ struct cpu_hw_counters { struct x86_pmu { const char *name; int version; - int (*handle_irq)(struct pt_regs *, int); + int (*handle_irq)(struct pt_regs *); void (*disable_all)(void); void (*enable_all)(void); void (*enable)(struct hw_perf_counter *, int); @@ -755,7 +755,7 @@ static void intel_pmu_reset(void) * This handler is triggered by the local APIC, so the APIC IRQ handling * rules apply: */ -static int intel_pmu_handle_irq(struct pt_regs *regs, int nmi) +static int intel_pmu_handle_irq(struct pt_regs *regs) { struct cpu_hw_counters *cpuc; struct cpu_hw_counters; @@ -794,7 +794,7 @@ again: if (!intel_pmu_save_and_restart(counter)) continue; - if (perf_counter_overflow(counter, nmi, regs, 0)) + if (perf_counter_overflow(counter, 1, regs, 0)) intel_pmu_disable_counter(&counter->hw, bit); } @@ -812,7 +812,7 @@ again: return 1; } -static int amd_pmu_handle_irq(struct pt_regs *regs, int nmi) +static int amd_pmu_handle_irq(struct pt_regs *regs) { int cpu, idx, handled = 0; struct cpu_hw_counters *cpuc; @@ -840,22 +840,13 @@ static int amd_pmu_handle_irq(struct pt_regs *regs, int nmi) if (!x86_perf_counter_set_period(counter, hwc, idx)) continue; - if (perf_counter_overflow(counter, nmi, regs, 0)) + if (perf_counter_overflow(counter, 1, regs, 0)) amd_pmu_disable_counter(hwc, idx); } return handled; } -void smp_perf_counter_interrupt(struct pt_regs *regs) -{ - irq_enter(); - apic_write(APIC_LVTPC, LOCAL_PERF_VECTOR); - ack_APIC_irq(); - x86_pmu.handle_irq(regs, 0); - irq_exit(); -} - void smp_perf_pending_interrupt(struct pt_regs *regs) { irq_enter(); @@ -910,7 +901,7 @@ perf_counter_nmi_handler(struct notifier_block *self, * If the first NMI handles both, the latter will be empty and daze * the CPU. */ - x86_pmu.handle_irq(regs, 1); + x86_pmu.handle_irq(regs); return NOTIFY_STOP; } diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S index 89100461914..7985c010f8a 100644 --- a/arch/x86/kernel/entry_64.S +++ b/arch/x86/kernel/entry_64.S @@ -1026,8 +1026,6 @@ apicinterrupt SPURIOUS_APIC_VECTOR \ spurious_interrupt smp_spurious_interrupt #ifdef CONFIG_PERF_COUNTERS -apicinterrupt LOCAL_PERF_VECTOR \ - perf_counter_interrupt smp_perf_counter_interrupt apicinterrupt LOCAL_PENDING_VECTOR \ perf_pending_interrupt smp_perf_pending_interrupt #endif diff --git a/arch/x86/kernel/irqinit_32.c b/arch/x86/kernel/irqinit_32.c index 3190a6b961e..205bdd880d3 100644 --- a/arch/x86/kernel/irqinit_32.c +++ b/arch/x86/kernel/irqinit_32.c @@ -165,7 +165,6 @@ static void __init apic_intr_init(void) alloc_intr_gate(SPURIOUS_APIC_VECTOR, spurious_interrupt); alloc_intr_gate(ERROR_APIC_VECTOR, error_interrupt); # ifdef CONFIG_PERF_COUNTERS - alloc_intr_gate(LOCAL_PERF_VECTOR, perf_counter_interrupt); alloc_intr_gate(LOCAL_PENDING_VECTOR, perf_pending_interrupt); # endif diff --git a/arch/x86/kernel/irqinit_64.c b/arch/x86/kernel/irqinit_64.c index 53ceb26f80f..fa6ef692000 100644 --- a/arch/x86/kernel/irqinit_64.c +++ b/arch/x86/kernel/irqinit_64.c @@ -155,7 +155,6 @@ static void __init apic_intr_init(void) /* Performance monitoring interrupt: */ #ifdef CONFIG_PERF_COUNTERS - alloc_intr_gate(LOCAL_PERF_VECTOR, perf_counter_interrupt); alloc_intr_gate(LOCAL_PENDING_VECTOR, perf_pending_interrupt); #endif } -- cgit v1.2.3 From 8ce998d6693bd02ab3b74ee1cc303ecb1fa9b514 Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Wed, 3 Jun 2009 00:54:33 -0300 Subject: perf_counter tools: Cover PLT symbols too PLT, the Program Linking Table, is used with the dynamic linker to allow PIC code in executables and shared objects to figure out where functions are in other shared objects. It is one of the sources of unknown/unresolved symbols - this patch does what binutils figures out when you ask it to disassembly. (objdump -S) Cc: Peter Zijlstra Cc: Mike Galbraith Cc: Paul Mackerras Cc: Corey Ashford Cc: Marcelo Tosatti Cc: John Kacur Cc: Stephane Eranian LKML-Reference: Signed-off-by: Ingo Molnar --- Documentation/perf_counter/util/symbol.c | 143 +++++++++++++++++++++++++++++-- 1 file changed, 138 insertions(+), 5 deletions(-) diff --git a/Documentation/perf_counter/util/symbol.c b/Documentation/perf_counter/util/symbol.c index 039931fcb1b..d52a1ae5342 100644 --- a/Documentation/perf_counter/util/symbol.c +++ b/Documentation/perf_counter/util/symbol.c @@ -258,6 +258,117 @@ static Elf_Scn *elf_section_by_name(Elf *elf, GElf_Ehdr *ep, return sec; } +#define elf_section__for_each_rel(reldata, pos, pos_mem, idx, nr_entries) \ + for (idx = 0, pos = gelf_getrel(reldata, 0, &pos_mem); \ + idx < nr_entries; \ + ++idx, pos = gelf_getrel(reldata, idx, &pos_mem)) + +#define elf_section__for_each_rela(reldata, pos, pos_mem, idx, nr_entries) \ + for (idx = 0, pos = gelf_getrela(reldata, 0, &pos_mem); \ + idx < nr_entries; \ + ++idx, pos = gelf_getrela(reldata, idx, &pos_mem)) + +static int dso__synthesize_plt_symbols(struct dso *self, Elf *elf, + GElf_Ehdr *ehdr, Elf_Scn *scn_dynsym, + GElf_Shdr *shdr_dynsym, + size_t dynsym_idx) +{ + uint32_t nr_rel_entries, idx; + GElf_Sym sym; + __u64 plt_offset; + GElf_Shdr shdr_plt; + struct symbol *f; + GElf_Shdr shdr_rel_plt; + Elf_Data *reldata, *syms, *symstrs; + Elf_Scn *scn_plt_rel, *scn_symstrs; + char sympltname[1024]; + int nr = 0, symidx; + + scn_plt_rel = elf_section_by_name(elf, ehdr, &shdr_rel_plt, + ".rela.plt", NULL); + if (scn_plt_rel == NULL) { + scn_plt_rel = elf_section_by_name(elf, ehdr, &shdr_rel_plt, + ".rel.plt", NULL); + if (scn_plt_rel == NULL) + return 0; + } + + if (shdr_rel_plt.sh_link != dynsym_idx) + return 0; + + if (elf_section_by_name(elf, ehdr, &shdr_plt, ".plt", NULL) == NULL) + return 0; + + /* + * Fetch the relocation section to find the indexes to the GOT + * and the symbols in the .dynsym they refer to. + */ + reldata = elf_getdata(scn_plt_rel, NULL); + if (reldata == NULL) + return -1; + + syms = elf_getdata(scn_dynsym, NULL); + if (syms == NULL) + return -1; + + scn_symstrs = elf_getscn(elf, shdr_dynsym->sh_link); + if (scn_symstrs == NULL) + return -1; + + symstrs = elf_getdata(scn_symstrs, NULL); + if (symstrs == NULL) + return -1; + + nr_rel_entries = shdr_rel_plt.sh_size / shdr_rel_plt.sh_entsize; + plt_offset = shdr_plt.sh_offset; + + if (shdr_rel_plt.sh_type == SHT_RELA) { + GElf_Rela pos_mem, *pos; + + elf_section__for_each_rela(reldata, pos, pos_mem, idx, + nr_rel_entries) { + symidx = GELF_R_SYM(pos->r_info); + plt_offset += shdr_plt.sh_entsize; + gelf_getsym(syms, symidx, &sym); + snprintf(sympltname, sizeof(sympltname), + "%s@plt", elf_sym__name(&sym, symstrs)); + + f = symbol__new(plt_offset, shdr_plt.sh_entsize, + sympltname, self->sym_priv_size); + if (!f) + return -1; + + dso__insert_symbol(self, f); + ++nr; + } + } else if (shdr_rel_plt.sh_type == SHT_REL) { + GElf_Rel pos_mem, *pos; + elf_section__for_each_rel(reldata, pos, pos_mem, idx, + nr_rel_entries) { + symidx = GELF_R_SYM(pos->r_info); + plt_offset += shdr_plt.sh_entsize; + gelf_getsym(syms, symidx, &sym); + snprintf(sympltname, sizeof(sympltname), + "%s@plt", elf_sym__name(&sym, symstrs)); + + f = symbol__new(plt_offset, shdr_plt.sh_entsize, + sympltname, self->sym_priv_size); + if (!f) + return -1; + + dso__insert_symbol(self, f); + ++nr; + } + } else { + /* + * TODO: There are still one more shdr_rel_plt.sh_type + * I have to investigate, but probably should be ignored. + */ + } + + return nr; +} + static int dso__load_sym(struct dso *self, int fd, const char *name, symbol_filter_t filter) { @@ -269,8 +380,9 @@ static int dso__load_sym(struct dso *self, int fd, const char *name, GElf_Shdr shdr; Elf_Data *syms; GElf_Sym sym; - Elf_Scn *sec; + Elf_Scn *sec, *sec_dynsym; Elf *elf; + size_t dynsym_idx; int nr = 0; elf = elf_begin(fd, ELF_C_READ_MMAP, NULL); @@ -285,12 +397,33 @@ static int dso__load_sym(struct dso *self, int fd, const char *name, goto out_elf_end; } + /* + * We need to check if we have a .dynsym, so that we can handle the + * .plt, synthesizing its symbols, that aren't on the symtabs (be it + * .dynsym or .symtab) + */ + sec_dynsym = elf_section_by_name(elf, &ehdr, &shdr, + ".dynsym", &dynsym_idx); + if (sec_dynsym != NULL) { + nr = dso__synthesize_plt_symbols(self, elf, &ehdr, + sec_dynsym, &shdr, + dynsym_idx); + if (nr < 0) + goto out_elf_end; + } + + /* + * But if we have a full .symtab (that is a superset of .dynsym) we + * should add the symbols not in the .dynsyn + */ sec = elf_section_by_name(elf, &ehdr, &shdr, ".symtab", NULL); - if (sec == NULL) - sec = elf_section_by_name(elf, &ehdr, &shdr, ".dynsym", NULL); + if (sec == NULL) { + if (sec_dynsym == NULL) + goto out_elf_end; - if (sec == NULL) - goto out_elf_end; + sec = sec_dynsym; + gelf_getshdr(sec, &shdr); + } syms = elf_getdata(sec, NULL); if (syms == NULL) -- cgit v1.2.3 From 3502973d005ed89cc2b3f39780813a341ddba97f Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Wed, 3 Jun 2009 09:38:58 +0200 Subject: perf report: Print -D to stdout -D prints to stderr - which is a bit confusing - print to stdout instead. Also clean up the if (dump_trace) patterns via a dprintf helper. Cc: Peter Zijlstra Cc: Mike Galbraith Cc: Paul Mackerras Cc: Corey Ashford Cc: Marcelo Tosatti Cc: Arnaldo Carvalho de Melo Cc: Thomas Gleixner Cc: John Kacur LKML-Reference: Signed-off-by: Ingo Molnar --- Documentation/perf_counter/builtin-report.c | 64 ++++++++++++++--------------- 1 file changed, 30 insertions(+), 34 deletions(-) diff --git a/Documentation/perf_counter/builtin-report.c b/Documentation/perf_counter/builtin-report.c index 9da990fba4a..6207a3147fc 100644 --- a/Documentation/perf_counter/builtin-report.c +++ b/Documentation/perf_counter/builtin-report.c @@ -31,6 +31,8 @@ static int input; static int show_mask = SHOW_KERNEL | SHOW_USER | SHOW_HV; static int dump_trace = 0; +#define dprintf(x...) do { if (dump_trace) printf(x); } while (0) + static int verbose; static int full_paths; @@ -729,14 +731,12 @@ more: uint64_t ip = event->ip.ip; struct map *map = NULL; - if (dump_trace) { - fprintf(stderr, "%p [%p]: PERF_EVENT (IP, %d): %d: %p\n", - (void *)(offset + head), - (void *)(long)(event->header.size), - event->header.misc, - event->ip.pid, - (void *)(long)ip); - } + dprintf("%p [%p]: PERF_EVENT (IP, %d): %d: %p\n", + (void *)(offset + head), + (void *)(long)(event->header.size), + event->header.misc, + event->ip.pid, + (void *)(long)ip); if (thread == NULL) { fprintf(stderr, "problem processing %d event, skipping it.\n", @@ -781,15 +781,14 @@ more: struct thread *thread = threads__findnew(event->mmap.pid); struct map *map = map__new(&event->mmap, cwdp, cwdlen); - if (dump_trace) { - fprintf(stderr, "%p [%p]: PERF_EVENT_MMAP: [%p(%p) @ %p]: %s\n", - (void *)(offset + head), - (void *)(long)(event->header.size), - (void *)(long)event->mmap.start, - (void *)(long)event->mmap.len, - (void *)(long)event->mmap.pgoff, - event->mmap.filename); - } + dprintf("%p [%p]: PERF_EVENT_MMAP: [%p(%p) @ %p]: %s\n", + (void *)(offset + head), + (void *)(long)(event->header.size), + (void *)(long)event->mmap.start, + (void *)(long)event->mmap.len, + (void *)(long)event->mmap.pgoff, + event->mmap.filename); + if (thread == NULL || map == NULL) { if (verbose) fprintf(stderr, "problem processing PERF_EVENT_MMAP, skipping event.\n"); @@ -802,12 +801,11 @@ more: case PERF_EVENT_COMM: { struct thread *thread = threads__findnew(event->comm.pid); - if (dump_trace) { - fprintf(stderr, "%p [%p]: PERF_EVENT_COMM: %s:%d\n", - (void *)(offset + head), - (void *)(long)(event->header.size), - event->comm.comm, event->comm.pid); - } + dprintf("%p [%p]: PERF_EVENT_COMM: %s:%d\n", + (void *)(offset + head), + (void *)(long)(event->header.size), + event->comm.comm, event->comm.pid); + if (thread == NULL || thread__set_comm(thread, event->comm.comm)) { fprintf(stderr, "problem processing PERF_EVENT_COMM, skipping event.\n"); @@ -818,11 +816,10 @@ more: } default: { broken_event: - if (dump_trace) - fprintf(stderr, "%p [%p]: skipping unknown header type: %d\n", - (void *)(offset + head), - (void *)(long)(event->header.size), - event->header.type); + dprintf("%p [%p]: skipping unknown header type: %d\n", + (void *)(offset + head), + (void *)(long)(event->header.size), + event->header.type); total_unknown++; @@ -846,14 +843,13 @@ broken_event: rc = EXIT_SUCCESS; close(input); - if (dump_trace) { - fprintf(stderr, " IP events: %10ld\n", total); - fprintf(stderr, " mmap events: %10ld\n", total_mmap); - fprintf(stderr, " comm events: %10ld\n", total_comm); - fprintf(stderr, " unknown events: %10ld\n", total_unknown); + dprintf(" IP events: %10ld\n", total); + dprintf(" mmap events: %10ld\n", total_mmap); + dprintf(" comm events: %10ld\n", total_comm); + dprintf(" unknown events: %10ld\n", total_unknown); + if (dump_trace) return 0; - } if (verbose >= 2) dsos__fprintf(stdout); -- cgit v1.2.3 From 5352f35d6ae7b8b981d77137fb268bc54d10624f Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Wed, 3 Jun 2009 10:07:39 +0200 Subject: perf report: Improve sort key recognition - allow case-insensitive tokens - such as --sort Comm,Symbol - allow substring shortcuts: --sort sym - detect invalid tokens and bail out Cc: Peter Zijlstra Cc: Mike Galbraith Cc: Paul Mackerras Cc: Corey Ashford Cc: Marcelo Tosatti Cc: Arnaldo Carvalho de Melo Cc: Thomas Gleixner Cc: John Kacur LKML-Reference: Signed-off-by: Ingo Molnar --- Documentation/perf_counter/builtin-report.c | 29 +++++++++++++++++------------ 1 file changed, 17 insertions(+), 12 deletions(-) diff --git a/Documentation/perf_counter/builtin-report.c b/Documentation/perf_counter/builtin-report.c index 6207a3147fc..a8d390596d8 100644 --- a/Documentation/perf_counter/builtin-report.c +++ b/Documentation/perf_counter/builtin-report.c @@ -453,28 +453,18 @@ static int sort_dimension__add(char *tok) if (sd->taken) continue; - if (strcmp(tok, sd->name)) + if (strncasecmp(tok, sd->name, strlen(tok))) continue; list_add_tail(&sd->entry->list, &hist_entry__sort_list); sd->taken = 1; + return 0; } return -ESRCH; } -static void setup_sorting(void) -{ - char *tmp, *tok, *str = strdup(sort_order); - - for (tok = strtok_r(str, ", ", &tmp); - tok; tok = strtok_r(NULL, ", ", &tmp)) - sort_dimension__add(tok); - - free(str); -} - static int64_t hist_entry__cmp(struct hist_entry *left, struct hist_entry *right) { @@ -880,6 +870,21 @@ static const struct option options[] = { OPT_END() }; +static void setup_sorting(void) +{ + char *tmp, *tok, *str = strdup(sort_order); + + for (tok = strtok_r(str, ", ", &tmp); + tok; tok = strtok_r(NULL, ", ", &tmp)) { + if (sort_dimension__add(tok) < 0) { + error("Unknown --sort key: `%s'", tok); + usage_with_options(report_usage, options); + } + } + + free(str); +} + int cmd_report(int argc, const char **argv, const char *prefix) { symbol__init(); -- cgit v1.2.3 From ed966aac335a63083d3125198479447248637d9e Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Wed, 3 Jun 2009 10:39:26 +0200 Subject: perf report: Handle vDSO symbols properly We were not looking up vDSO symbols properly, because they are in the kallsyms but are user-mode entries. Pass negative addresses to the kernel dso object, this way we resolve them properly: 0.05% [kernel]: vread_tsc Cc: Peter Zijlstra Cc: Mike Galbraith Cc: Paul Mackerras Cc: Corey Ashford Cc: Marcelo Tosatti Cc: Arnaldo Carvalho de Melo Cc: Thomas Gleixner Cc: John Kacur LKML-Reference: Signed-off-by: Ingo Molnar --- Documentation/perf_counter/builtin-report.c | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/Documentation/perf_counter/builtin-report.c b/Documentation/perf_counter/builtin-report.c index a8d390596d8..0f88d9ebb34 100644 --- a/Documentation/perf_counter/builtin-report.c +++ b/Documentation/perf_counter/builtin-report.c @@ -728,6 +728,8 @@ more: event->ip.pid, (void *)(long)ip); + dprintf(" ... thread: %s:%d\n", thread->comm, thread->pid); + if (thread == NULL) { fprintf(stderr, "problem processing %d event, skipping it.\n", event->header.type); @@ -740,6 +742,8 @@ more: dso = kernel_dso; + dprintf(" ...... dso: %s\n", dso->name); + } else if (event->header.misc & PERF_EVENT_MISC_USER) { show = SHOW_USER; @@ -749,11 +753,22 @@ more: if (map != NULL) { dso = map->dso; ip -= map->start + map->pgoff; + } else { + /* + * If this is outside of all known maps, + * and is a negative address, try to look it + * up in the kernel dso, as it might be a + * vsyscall (which executes in user-mode): + */ + if ((long long)ip < 0) + dso = kernel_dso; } + dprintf(" ...... dso: %s\n", dso ? dso->name : ""); } else { show = SHOW_HV; level = 'H'; + dprintf(" ...... dso: [hypervisor]\n"); } if (show & show_mask) { -- cgit v1.2.3 From 6984efb692e97ce5f75f26e595685c04c2061bac Mon Sep 17 00:00:00 2001 From: Paul Mackerras Date: Wed, 3 Jun 2009 19:38:58 +1000 Subject: perf_counter: powerpc: Fix event alternative code generation on POWER5/5+ Commit ef923214 ("perf_counter: powerpc: use u64 for event codes internally") introduced a bug where the return value from function find_alternative_bdecode gets put into a u64 variable and later tested to see if it is < 0. The effect is that we get extra, bogus event code alternatives on POWER5 and POWER5+, leading to error messages such as "oops compute_mmcr failed" being printed and counters not counting properly. This fixes it by using s64 for the return type of find_alternative_bdecode and for the local variable that the caller puts the value in. It also makes the event argument a u64 on POWER5+ for consistency. Signed-off-by: Paul Mackerras Cc: Peter Zijlstra Cc: Mike Galbraith Cc: Paul Mackerras Cc: Corey Ashford Cc: Marcelo Tosatti Cc: Arnaldo Carvalho de Melo Cc: Thomas Gleixner Cc: John Kacur Cc: Stephane Eranian LKML-Reference: <18982.17586.666132.90983@cargo.ozlabs.ibm.com> Signed-off-by: Ingo Molnar --- arch/powerpc/kernel/power5+-pmu.c | 4 ++-- arch/powerpc/kernel/power5-pmu.c | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/arch/powerpc/kernel/power5+-pmu.c b/arch/powerpc/kernel/power5+-pmu.c index c6cdfc165d6..8471e3c2e46 100644 --- a/arch/powerpc/kernel/power5+-pmu.c +++ b/arch/powerpc/kernel/power5+-pmu.c @@ -242,7 +242,7 @@ static const unsigned char bytedecode_alternatives[4][4] = { * event code for those that do, or -1 otherwise. This also handles * alternative PCMSEL values for add events. */ -static int find_alternative_bdecode(unsigned int event) +static s64 find_alternative_bdecode(u64 event) { int pmc, altpmc, pp, j; @@ -277,7 +277,7 @@ static int power5p_get_alternatives(u64 event, unsigned int flags, u64 alt[]) { int i, j, nalt = 1; int nlim; - u64 ae; + s64 ae; alt[0] = event; nalt = 1; diff --git a/arch/powerpc/kernel/power5-pmu.c b/arch/powerpc/kernel/power5-pmu.c index d5344968ee9..1b44c5fca18 100644 --- a/arch/powerpc/kernel/power5-pmu.c +++ b/arch/powerpc/kernel/power5-pmu.c @@ -250,7 +250,7 @@ static const unsigned char bytedecode_alternatives[4][4] = { * PMCSEL values on other counters. This returns the alternative * event code for those that do, or -1 otherwise. */ -static u64 find_alternative_bdecode(u64 event) +static s64 find_alternative_bdecode(u64 event) { int pmc, altpmc, pp, j; @@ -272,7 +272,7 @@ static u64 find_alternative_bdecode(u64 event) static int power5_get_alternatives(u64 event, unsigned int flags, u64 alt[]) { int i, j, nalt = 1; - u64 ae; + s64 ae; alt[0] = event; nalt = 1; -- cgit v1.2.3 From dcd945e0d8a6d654e3e1de51faea9f98f1504aa5 Mon Sep 17 00:00:00 2001 From: Paul Mackerras Date: Wed, 3 Jun 2009 19:40:36 +1000 Subject: perf_counter: powerpc: Fix race causing "oops trying to read PMC0" errors When using interrupting counters and limited (non-interrupting) counters at the same time, it's possible that we get an interrupt in write_mmcr0() after writing MMCR0 but before we have set up the counters using limited PMCs. What happens then is that we get into perf_counter_interrupt() with counter->hw.idx = 0 for the limited counters, leading to the "oops trying to read PMC0" error message being printed. This fixes the problem by making perf_counter_interrupt() robust against counter->hw.idx being zero (the counter is just ignored in that case) and also by changing write_mmcr0() to write MMCR0 initially with the counter overflow interrupt enable bits masked (set to 0). If the MMCR0 value requested by the caller has either of those bits set, we write MMCR0 again with the requested value of those bits after setting up the limited counters properly. Signed-off-by: Paul Mackerras Cc: Peter Zijlstra Cc: Mike Galbraith Cc: Paul Mackerras Cc: Corey Ashford Cc: Marcelo Tosatti Cc: Arnaldo Carvalho de Melo Cc: Thomas Gleixner Cc: John Kacur Cc: Stephane Eranian LKML-Reference: <18982.17684.138182.954599@cargo.ozlabs.ibm.com> Signed-off-by: Ingo Molnar --- arch/powerpc/kernel/perf_counter.c | 16 ++++++++++++++-- 1 file changed, 14 insertions(+), 2 deletions(-) diff --git a/arch/powerpc/kernel/perf_counter.c b/arch/powerpc/kernel/perf_counter.c index ea54686cb78..4cc4ac5c791 100644 --- a/arch/powerpc/kernel/perf_counter.c +++ b/arch/powerpc/kernel/perf_counter.c @@ -372,16 +372,28 @@ static void write_mmcr0(struct cpu_hw_counters *cpuhw, unsigned long mmcr0) /* * Write MMCR0, then read PMC5 and PMC6 immediately. + * To ensure we don't get a performance monitor interrupt + * between writing MMCR0 and freezing/thawing the limited + * counters, we first write MMCR0 with the counter overflow + * interrupt enable bits turned off. */ asm volatile("mtspr %3,%2; mfspr %0,%4; mfspr %1,%5" : "=&r" (pmc5), "=&r" (pmc6) - : "r" (mmcr0), "i" (SPRN_MMCR0), + : "r" (mmcr0 & ~(MMCR0_PMC1CE | MMCR0_PMCjCE)), + "i" (SPRN_MMCR0), "i" (SPRN_PMC5), "i" (SPRN_PMC6)); if (mmcr0 & MMCR0_FC) freeze_limited_counters(cpuhw, pmc5, pmc6); else thaw_limited_counters(cpuhw, pmc5, pmc6); + + /* + * Write the full MMCR0 including the counter overflow interrupt + * enable bits, if necessary. + */ + if (mmcr0 & (MMCR0_PMC1CE | MMCR0_PMCjCE)) + mtspr(SPRN_MMCR0, mmcr0); } /* @@ -1108,7 +1120,7 @@ static void perf_counter_interrupt(struct pt_regs *regs) for (i = 0; i < cpuhw->n_counters; ++i) { counter = cpuhw->counter[i]; - if (is_limited_pmc(counter->hw.idx)) + if (!counter->hw.idx || is_limited_pmc(counter->hw.idx)) continue; val = read_pmc(counter->hw.idx); if ((int)val < 0) { -- cgit v1.2.3 From 226f62fdd53d5b2c74e242aa11f6ad43d0285d3f Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Wed, 3 Jun 2009 11:23:56 +0200 Subject: perf_counter: Add a comm hook for pure fork()s I noticed missing COMM events and found that we missed reporting them for pure forks. Signed-off-by: Peter Zijlstra Cc: Mike Galbraith Cc: Paul Mackerras Cc: Corey Ashford Cc: Marcelo Tosatti Cc: Arnaldo Carvalho de Melo Cc: Thomas Gleixner Cc: John Kacur LKML-Reference: Signed-off-by: Ingo Molnar --- kernel/fork.c | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/kernel/fork.c b/kernel/fork.c index 23bf757ed32..b7d7a9f0bd7 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -1412,6 +1412,12 @@ long do_fork(unsigned long clone_flags, if (clone_flags & CLONE_VFORK) { p->vfork_done = &vfork; init_completion(&vfork); + } else { + /* + * vfork will do an exec which will call + * set_task_comm() + */ + perf_counter_comm(p); } audit_finish_fork(p); -- cgit v1.2.3 From e61078a0c88773d3465b0b9d665c5ed6b952b1cf Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Wed, 3 Jun 2009 11:24:33 +0200 Subject: perf record: Use long arg for counter period I wrote this to test the extended period emulation, we might as well merge it. Signed-off-by: Peter Zijlstra Cc: Mike Galbraith Cc: Paul Mackerras Cc: Corey Ashford Cc: Marcelo Tosatti Cc: Arnaldo Carvalho de Melo Cc: Thomas Gleixner Cc: John Kacur LKML-Reference: Signed-off-by: Ingo Molnar --- Documentation/perf_counter/builtin-record.c | 6 +++--- Documentation/perf_counter/util/parse-options.c | 16 ++++++++++++++++ Documentation/perf_counter/util/parse-options.h | 2 ++ 3 files changed, 21 insertions(+), 3 deletions(-) diff --git a/Documentation/perf_counter/builtin-record.c b/Documentation/perf_counter/builtin-record.c index ec3b73adbd9..cea5b8d3c63 100644 --- a/Documentation/perf_counter/builtin-record.c +++ b/Documentation/perf_counter/builtin-record.c @@ -20,8 +20,8 @@ #define ALIGN(x, a) __ALIGN_MASK(x, (typeof(x))(a)-1) #define __ALIGN_MASK(x, mask) (((x)+(mask))&~(mask)) -static int default_interval = 100000; -static int event_count[MAX_COUNTERS]; +static long default_interval = 100000; +static long event_count[MAX_COUNTERS]; static int fd[MAX_NR_CPUS][MAX_COUNTERS]; static int nr_cpus = 0; @@ -494,7 +494,7 @@ static const struct option options[] = { "append to the output file to do incremental profiling"), OPT_BOOLEAN('f', "force", &force, "overwrite existing data file"), - OPT_INTEGER('c', "count", &default_interval, + OPT_LONG('c', "count", &default_interval, "event period to sample"), OPT_STRING('o', "output", &output_name, "file", "output file name"), diff --git a/Documentation/perf_counter/util/parse-options.c b/Documentation/perf_counter/util/parse-options.c index 28b34c1c29c..b80abd9a99b 100644 --- a/Documentation/perf_counter/util/parse-options.c +++ b/Documentation/perf_counter/util/parse-options.c @@ -113,6 +113,22 @@ static int get_value(struct parse_opt_ctx_t *p, return opterror(opt, "expects a numerical value", flags); return 0; + case OPTION_LONG: + if (unset) { + *(long *)opt->value = 0; + return 0; + } + if (opt->flags & PARSE_OPT_OPTARG && !p->opt) { + *(long *)opt->value = opt->defval; + return 0; + } + if (get_arg(p, opt, flags, &arg)) + return -1; + *(long *)opt->value = strtol(arg, (char **)&s, 10); + if (*s) + return opterror(opt, "expects a numerical value", flags); + return 0; + default: die("should not happen, someone must be hit on the forehead"); } diff --git a/Documentation/perf_counter/util/parse-options.h b/Documentation/perf_counter/util/parse-options.h index a81c7faff68..a1039a6ce0e 100644 --- a/Documentation/perf_counter/util/parse-options.h +++ b/Documentation/perf_counter/util/parse-options.h @@ -14,6 +14,7 @@ enum parse_opt_type { /* options with arguments (usually) */ OPTION_STRING, OPTION_INTEGER, + OPTION_LONG, OPTION_CALLBACK, }; @@ -97,6 +98,7 @@ struct option { #define OPT_SET_INT(s, l, v, h, i) { OPTION_SET_INT, (s), (l), (v), NULL, (h), 0, NULL, (i) } #define OPT_SET_PTR(s, l, v, h, p) { OPTION_SET_PTR, (s), (l), (v), NULL, (h), 0, NULL, (p) } #define OPT_INTEGER(s, l, v, h) { OPTION_INTEGER, (s), (l), (v), NULL, (h) } +#define OPT_LONG(s, l, v, h) { OPTION_LONG, (s), (l), (v), NULL, (h) } #define OPT_STRING(s, l, v, a, h) { OPTION_STRING, (s), (l), (v), (a), (h) } #define OPT_DATE(s, l, v, h) \ { OPTION_CALLBACK, (s), (l), (v), "time",(h), 0, \ -- cgit v1.2.3 From 8229289b607682f90b946ad2c319526303c17700 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Wed, 3 Jun 2009 12:37:36 +0200 Subject: perf report: Fix comm sorting Since we can (and do) change comm strings during the collection phase, we cannot actually sort on them to build the histogram. Therefore add an (optional) third sorting phase to collapse the histrogram. Comm sorting now builds the histrogram on threads and then in the collapse phase collects all threads with the same comm. This collapsed histogram is then reversed and sorted on events. Signed-off-by: Peter Zijlstra Cc: Mike Galbraith Cc: Paul Mackerras Cc: Corey Ashford Cc: Marcelo Tosatti Cc: Arnaldo Carvalho de Melo Cc: Thomas Gleixner Cc: John Kacur LKML-Reference: Signed-off-by: Ingo Molnar --- Documentation/perf_counter/builtin-report.c | 118 ++++++++++++++++++++++++++-- 1 file changed, 112 insertions(+), 6 deletions(-) diff --git a/Documentation/perf_counter/builtin-report.c b/Documentation/perf_counter/builtin-report.c index 0f88d9ebb34..6d359c9f75d 100644 --- a/Documentation/perf_counter/builtin-report.c +++ b/Documentation/perf_counter/builtin-report.c @@ -211,9 +211,9 @@ static struct thread *thread__new(pid_t pid) if (self != NULL) { self->pid = pid; - self->comm = malloc(30); + self->comm = malloc(32); if (self->comm) - sprintf(self->comm, ":%d", pid); + snprintf(self->comm, 32, ":%d", self->pid); INIT_LIST_HEAD(&self->maps); } @@ -222,6 +222,8 @@ static struct thread *thread__new(pid_t pid) static int thread__set_comm(struct thread *self, const char *comm) { + if (self->comm) + free(self->comm); self->comm = strdup(comm); return self->comm ? 0 : -ENOMEM; } @@ -303,9 +305,12 @@ struct sort_entry { char *header; int64_t (*cmp)(struct hist_entry *, struct hist_entry *); + int64_t (*collapse)(struct hist_entry *, struct hist_entry *); size_t (*print)(FILE *fp, struct hist_entry *); }; +/* --sort pid */ + static int64_t sort__thread_cmp(struct hist_entry *left, struct hist_entry *right) { @@ -324,8 +329,16 @@ static struct sort_entry sort_thread = { .print = sort__thread_print, }; +/* --sort comm */ + static int64_t sort__comm_cmp(struct hist_entry *left, struct hist_entry *right) +{ + return right->thread->pid - left->thread->pid; +} + +static int64_t +sort__comm_collapse(struct hist_entry *left, struct hist_entry *right) { char *comm_l = left->thread->comm; char *comm_r = right->thread->comm; @@ -349,11 +362,14 @@ sort__comm_print(FILE *fp, struct hist_entry *self) } static struct sort_entry sort_comm = { - .header = " Command", - .cmp = sort__comm_cmp, - .print = sort__comm_print, + .header = " Command", + .cmp = sort__comm_cmp, + .collapse = sort__comm_collapse, + .print = sort__comm_print, }; +/* --sort dso */ + static int64_t sort__dso_cmp(struct hist_entry *left, struct hist_entry *right) { @@ -387,6 +403,8 @@ static struct sort_entry sort_dso = { .print = sort__dso_print, }; +/* --sort symbol */ + static int64_t sort__sym_cmp(struct hist_entry *left, struct hist_entry *right) { @@ -428,6 +446,8 @@ static struct sort_entry sort_sym = { .print = sort__sym_print, }; +static int sort__need_collapse = 0; + struct sort_dimension { char *name; struct sort_entry *entry; @@ -456,6 +476,9 @@ static int sort_dimension__add(char *tok) if (strncasecmp(tok, sd->name, strlen(tok))) continue; + if (sd->entry->collapse) + sort__need_collapse = 1; + list_add_tail(&sd->entry->list, &hist_entry__sort_list); sd->taken = 1; @@ -480,6 +503,25 @@ hist_entry__cmp(struct hist_entry *left, struct hist_entry *right) return cmp; } +static int64_t +hist_entry__collapse(struct hist_entry *left, struct hist_entry *right) +{ + struct sort_entry *se; + int64_t cmp = 0; + + list_for_each_entry(se, &hist_entry__sort_list, list) { + int64_t (*f)(struct hist_entry *, struct hist_entry *); + + f = se->collapse ?: se->cmp; + + cmp = f(left, right); + if (cmp) + break; + } + + return cmp; +} + static size_t hist_entry__fprintf(FILE *fp, struct hist_entry *self, uint64_t total_samples) { @@ -549,6 +591,64 @@ hist_entry__add(struct thread *thread, struct map *map, struct dso *dso, return 0; } +static void hist_entry__free(struct hist_entry *he) +{ + free(he); +} + +/* + * collapse the histogram + */ + +static struct rb_root collapse_hists; + +static void collapse__insert_entry(struct hist_entry *he) +{ + struct rb_node **p = &collapse_hists.rb_node; + struct rb_node *parent = NULL; + struct hist_entry *iter; + int64_t cmp; + + while (*p != NULL) { + parent = *p; + iter = rb_entry(parent, struct hist_entry, rb_node); + + cmp = hist_entry__collapse(iter, he); + + if (!cmp) { + iter->count += he->count; + hist_entry__free(he); + return; + } + + if (cmp < 0) + p = &(*p)->rb_left; + else + p = &(*p)->rb_right; + } + + rb_link_node(&he->rb_node, parent, p); + rb_insert_color(&he->rb_node, &collapse_hists); +} + +static void collapse__resort(void) +{ + struct rb_node *next; + struct hist_entry *n; + + if (!sort__need_collapse) + return; + + next = rb_first(&hist); + while (next) { + n = rb_entry(next, struct hist_entry, rb_node); + next = rb_next(&n->rb_node); + + rb_erase(&n->rb_node, &hist); + collapse__insert_entry(n); + } +} + /* * reverse the map, sort on count. */ @@ -577,9 +677,14 @@ static void output__insert_entry(struct hist_entry *he) static void output__resort(void) { - struct rb_node *next = rb_first(&hist); + struct rb_node *next; struct hist_entry *n; + if (sort__need_collapse) + next = rb_first(&collapse_hists); + else + next = rb_first(&hist); + while (next) { n = rb_entry(next, struct hist_entry, rb_node); next = rb_next(&n->rb_node); @@ -859,6 +964,7 @@ broken_event: if (verbose >= 2) dsos__fprintf(stdout); + collapse__resort(); output__resort(); output__fprintf(stdout, total); -- cgit v1.2.3 From a96bbc16418bc691317f265d6bf98ba941ca9c1a Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Wed, 3 Jun 2009 14:01:36 +0200 Subject: perf_counter: Fix race in counter initialization We need the PID namespace and counter ID available when the counter overflows and we need to generate a sample event. [ Impact: fix kernel crash with high-frequency sampling ] Reported-by: Ingo Molnar Signed-off-by: Peter Zijlstra Cc: Mike Galbraith Cc: Paul Mackerras Cc: Corey Ashford Cc: Marcelo Tosatti Cc: Arnaldo Carvalho de Melo Cc: Thomas Gleixner Cc: John Kacur LKML-Reference: [ fixed a further crash and cleaned up the initialization a bit ] Signed-off-by: Ingo Molnar --- kernel/perf_counter.c | 25 ++++++++++++++----------- 1 file changed, 14 insertions(+), 11 deletions(-) diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c index 317cef78a38..ab4455447f8 100644 --- a/kernel/perf_counter.c +++ b/kernel/perf_counter.c @@ -48,6 +48,8 @@ int sysctl_perf_counter_priv __read_mostly; /* do we need to be privileged */ int sysctl_perf_counter_mlock __read_mostly = 512; /* 'free' kb per user */ int sysctl_perf_counter_limit __read_mostly = 100000; /* max NMIs per second */ +static atomic64_t perf_counter_id; + /* * Lock for (sysadmin-configurable) counter reservations: */ @@ -3351,14 +3353,18 @@ perf_counter_alloc(struct perf_counter_attr *attr, mutex_init(&counter->mmap_mutex); - counter->cpu = cpu; + counter->cpu = cpu; counter->attr = *attr; - counter->group_leader = group_leader; - counter->pmu = NULL; - counter->ctx = ctx; - counter->oncpu = -1; + counter->group_leader = group_leader; + counter->pmu = NULL; + counter->ctx = ctx; + counter->oncpu = -1; + + counter->ns = get_pid_ns(current->nsproxy->pid_ns); + counter->id = atomic64_inc_return(&perf_counter_id); + + counter->state = PERF_COUNTER_STATE_INACTIVE; - counter->state = PERF_COUNTER_STATE_INACTIVE; if (attr->disabled) counter->state = PERF_COUNTER_STATE_OFF; @@ -3402,6 +3408,8 @@ done: err = PTR_ERR(pmu); if (err) { + if (counter->ns) + put_pid_ns(counter->ns); kfree(counter); return ERR_PTR(err); } @@ -3419,8 +3427,6 @@ done: return counter; } -static atomic64_t perf_counter_id; - /** * sys_perf_counter_open - open a performance counter, associate it to a task/cpu * @@ -3515,9 +3521,6 @@ SYSCALL_DEFINE5(perf_counter_open, list_add_tail(&counter->owner_entry, ¤t->perf_counter_list); mutex_unlock(¤t->perf_counter_mutex); - counter->ns = get_pid_ns(current->nsproxy->pid_ns); - counter->id = atomic64_inc_return(&perf_counter_id); - fput_light(counter_file, fput_needed2); out_fput: -- cgit v1.2.3 From f2521b6e4c365bd0aac61b2c346e6e9f22607e31 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Wed, 3 Jun 2009 19:17:25 +0200 Subject: perf_counter tools: Clean up old kerneltop references kerneltop has been replaced with perf top - so fix up a few remaining references to it in display text and error messages. Cc: Peter Zijlstra Cc: Mike Galbraith Cc: Paul Mackerras Cc: Corey Ashford Cc: Marcelo Tosatti Cc: Arnaldo Carvalho de Melo Cc: Thomas Gleixner LKML-Reference: Signed-off-by: Ingo Molnar --- Documentation/perf_counter/builtin-record.c | 6 +++--- Documentation/perf_counter/builtin-top.c | 14 ++++++-------- 2 files changed, 9 insertions(+), 11 deletions(-) diff --git a/Documentation/perf_counter/builtin-record.c b/Documentation/perf_counter/builtin-record.c index cea5b8d3c63..fa625258315 100644 --- a/Documentation/perf_counter/builtin-record.c +++ b/Documentation/perf_counter/builtin-record.c @@ -357,7 +357,8 @@ static void open_counters(int cpu, pid_t pid) if (fd[nr_cpu][counter] < 0) { int err = errno; - printf("kerneltop error: syscall returned with %d (%s)\n", + + error("syscall returned with %d (%s)\n", fd[nr_cpu][counter], strerror(err)); if (err == EPERM) printf("Are you root?\n"); @@ -382,8 +383,7 @@ static void open_counters(int cpu, pid_t pid) mmap_array[nr_cpu][counter].base = mmap(NULL, (mmap_pages+1)*page_size, PROT_READ, MAP_SHARED, fd[nr_cpu][counter], 0); if (mmap_array[nr_cpu][counter].base == MAP_FAILED) { - printf("kerneltop error: failed to mmap with %d (%s)\n", - errno, strerror(errno)); + error("failed to mmap with %d (%s)\n", errno, strerror(errno)); exit(-1); } } diff --git a/Documentation/perf_counter/builtin-top.c b/Documentation/perf_counter/builtin-top.c index a63935276ca..16a618446d3 100644 --- a/Documentation/perf_counter/builtin-top.c +++ b/Documentation/perf_counter/builtin-top.c @@ -208,7 +208,7 @@ static void print_sym_table(void) printf( "------------------------------------------------------------------------------\n"); - printf( " KernelTop:%8.0f irqs/sec kernel:%4.1f%% [", + printf( " PerfTop:%8.0f irqs/sec kernel:%4.1f%% [", events_per_sec, 100.0 - (100.0*((events_per_sec-kevents_per_sec)/events_per_sec))); @@ -281,7 +281,7 @@ static void print_sym_table(void) static void *display_thread(void *arg) { - printf("KernelTop refresh period: %d seconds\n", delay_secs); + printf("PerfTop refresh period: %d seconds\n", delay_secs); while (!sleep(delay_secs)) print_sym_table(); @@ -564,7 +564,8 @@ static int __cmd_top(void) fd[i][counter] = sys_perf_counter_open(&attr, target_pid, cpu, group_fd, 0); if (fd[i][counter] < 0) { int err = errno; - printf("kerneltop error: syscall returned with %d (%s)\n", + + error("syscall returned with %d (%s)\n", fd[i][counter], strerror(err)); if (err == EPERM) printf("Are you root?\n"); @@ -588,11 +589,8 @@ static int __cmd_top(void) mmap_array[i][counter].mask = mmap_pages*page_size - 1; mmap_array[i][counter].base = mmap(NULL, (mmap_pages+1)*page_size, PROT_READ, MAP_SHARED, fd[i][counter], 0); - if (mmap_array[i][counter].base == MAP_FAILED) { - printf("kerneltop error: failed to mmap with %d (%s)\n", - errno, strerror(errno)); - exit(-1); - } + if (mmap_array[i][counter].base == MAP_FAILED) + die("failed to mmap with %d (%s)\n", errno, strerror(errno)); } } -- cgit v1.2.3 From 021e9f476511ebe23d7f45854a52dfe74c09b6ee Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Wed, 3 Jun 2009 19:27:19 +0200 Subject: perf record: Refine capture printout Print out the number of bytes captured, and the (estimated) number of events the output file contains. Cc: Peter Zijlstra Cc: Mike Galbraith Cc: Paul Mackerras Cc: Corey Ashford Cc: Marcelo Tosatti Cc: Arnaldo Carvalho de Melo Cc: Thomas Gleixner LKML-Reference: Signed-off-by: Ingo Molnar --- Documentation/perf_counter/builtin-record.c | 34 ++++++++++++++++++++--------- 1 file changed, 24 insertions(+), 10 deletions(-) diff --git a/Documentation/perf_counter/builtin-record.c b/Documentation/perf_counter/builtin-record.c index fa625258315..efa2eb498e9 100644 --- a/Documentation/perf_counter/builtin-record.c +++ b/Documentation/perf_counter/builtin-record.c @@ -67,6 +67,8 @@ static unsigned int mmap_read_head(struct mmap_data *md) static long events; static struct timeval last_read, this_read; +static __u64 bytes_written; + static void mmap_read(struct mmap_data *md) { unsigned int head = mmap_read_head(md); @@ -114,28 +116,34 @@ static void mmap_read(struct mmap_data *md) buf = &data[old & md->mask]; size = md->mask + 1 - (old & md->mask); old += size; + while (size) { int ret = write(output, buf, size); - if (ret < 0) { - perror("failed to write"); - exit(-1); - } + + if (ret < 0) + die("failed to write"); + size -= ret; buf += ret; + + bytes_written += ret; } } buf = &data[old & md->mask]; size = head - old; old += size; + while (size) { int ret = write(output, buf, size); - if (ret < 0) { - perror("failed to write"); - exit(-1); - } + + if (ret < 0) + die("failed to write"); + size -= ret; buf += ret; + + bytes_written += ret; } md->prev = old; @@ -467,8 +475,14 @@ static int __cmd_record(int argc, const char **argv) ret = poll(event_array, nr_poll, 100); } - - fprintf(stderr, "[ perf record: Captured and wrote %ld events. ]\n", events); + /* + * Approximate RIP event size: 24 bytes. + */ + fprintf(stderr, + "[ perf record: Captured and wrote %.3f MB %s (~%lld events) ]\n", + (double)bytes_written / 1024.0 / 1024.0, + output_name, + bytes_written / 24); return 0; } -- cgit v1.2.3 From e98e96fe43ae92fad0930f05fb2b298e49b9f3b5 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Wed, 3 Jun 2009 19:30:38 +0200 Subject: perf report: Display 100% correctly Needs to be 6.2 not 5.2, for 100.00% to be aligned properly. Cc: Peter Zijlstra Cc: Mike Galbraith Cc: Paul Mackerras Cc: Corey Ashford Cc: Marcelo Tosatti Cc: Arnaldo Carvalho de Melo Cc: Thomas Gleixner LKML-Reference: Signed-off-by: Ingo Molnar --- Documentation/perf_counter/builtin-report.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Documentation/perf_counter/builtin-report.c b/Documentation/perf_counter/builtin-report.c index 6d359c9f75d..e837bb983dc 100644 --- a/Documentation/perf_counter/builtin-report.c +++ b/Documentation/perf_counter/builtin-report.c @@ -529,7 +529,7 @@ hist_entry__fprintf(FILE *fp, struct hist_entry *self, uint64_t total_samples) size_t ret; if (total_samples) { - ret = fprintf(fp, " %5.2f%%", + ret = fprintf(fp, " %6.2f%%", (self->count * 100.0) / total_samples); } else ret = fprintf(fp, "%12d ", self->count); -- cgit v1.2.3 From 44db76c8553c328f4ae02481d77bb3a88ca17645 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Wed, 3 Jun 2009 19:36:07 +0200 Subject: perf stat: Print out all arguments Before: Performance counter stats for '/home/mingo/hackbench': After: Performance counter stats for '/home/mingo/hackbench 10': Cc: Peter Zijlstra Cc: Mike Galbraith Cc: Paul Mackerras Cc: Corey Ashford Cc: Marcelo Tosatti Cc: Arnaldo Carvalho de Melo Cc: Thomas Gleixner LKML-Reference: Signed-off-by: Ingo Molnar --- Documentation/perf_counter/builtin-stat.c | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/Documentation/perf_counter/builtin-stat.c b/Documentation/perf_counter/builtin-stat.c index 2357a663b67..4fc0d80440e 100644 --- a/Documentation/perf_counter/builtin-stat.c +++ b/Documentation/perf_counter/builtin-stat.c @@ -228,6 +228,7 @@ static int do_perfstat(int argc, const char **argv) int counter; int status; int pid; + int i; if (!system_wide) nr_cpus = 1; @@ -243,14 +244,17 @@ static int do_perfstat(int argc, const char **argv) if ((pid = fork()) < 0) perror("failed to fork"); + if (!pid) { if (execvp(argv[0], (char **)argv)) { perror(argv[0]); exit(-1); } } + while (wait(&status) >= 0) ; + prctl(PR_TASK_PERF_COUNTERS_DISABLE); t1 = rdclock(); @@ -259,8 +263,12 @@ static int do_perfstat(int argc, const char **argv) fflush(stdout); fprintf(stderr, "\n"); - fprintf(stderr, " Performance counter stats for \'%s\':\n", - argv[0]); + fprintf(stderr, " Performance counter stats for \'%s", argv[0]); + + for (i = 1; i < argc; i++) + fprintf(stderr, " %s", argv[i]); + + fprintf(stderr, "\':\n"); fprintf(stderr, "\n"); for (counter = 0; counter < nr_counters; counter++) -- cgit v1.2.3 From eed4dcd443da7a46131ef37c7a389b444905960e Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Wed, 3 Jun 2009 19:59:24 +0200 Subject: perf report: Add front-entry cache for lookups Before: Performance counter stats for './perf report -i perf.data.big': 12453988058 instructions Performance counter stats for './perf report -i perf.data.big': 12379566017 instructions 0.60% reduction. Cc: Peter Zijlstra Cc: Mike Galbraith Cc: Paul Mackerras Cc: Corey Ashford Cc: Marcelo Tosatti Cc: Arnaldo Carvalho de Melo Cc: Thomas Gleixner LKML-Reference: Signed-off-by: Ingo Molnar --- Documentation/perf_counter/builtin-report.c | 15 ++++++++++++++- 1 file changed, 14 insertions(+), 1 deletion(-) diff --git a/Documentation/perf_counter/builtin-report.c b/Documentation/perf_counter/builtin-report.c index e837bb983dc..33b3b15fb01 100644 --- a/Documentation/perf_counter/builtin-report.c +++ b/Documentation/perf_counter/builtin-report.c @@ -229,6 +229,7 @@ static int thread__set_comm(struct thread *self, const char *comm) } static struct rb_root threads; +static struct thread *last_match; static struct thread *threads__findnew(pid_t pid) { @@ -236,12 +237,22 @@ static struct thread *threads__findnew(pid_t pid) struct rb_node *parent = NULL; struct thread *th; + /* + * Font-end cache - PID lookups come in blocks, + * so most of the time we dont have to look up + * the full rbtree: + */ + if (last_match && last_match->pid == pid) + return last_match; + while (*p != NULL) { parent = *p; th = rb_entry(parent, struct thread, rb_node); - if (th->pid == pid) + if (th->pid == pid) { + last_match = th; return th; + } if (pid < th->pid) p = &(*p)->rb_left; @@ -253,7 +264,9 @@ static struct thread *threads__findnew(pid_t pid) if (th != NULL) { rb_link_node(&th->rb_node, parent, p); rb_insert_color(&th->rb_node, &threads); + last_match = th; } + return th; } -- cgit v1.2.3 From 051cdc3c2d0e24443ac03aff03ee89807ec8c589 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Wed, 3 Jun 2009 20:09:11 +0200 Subject: perf help: Fix bug when there's no perf-* command around main_cmds can be empty - fix util/help.c to handle this case without segfaulting. Reported-by: Steven Rostedt Cc: Peter Zijlstra Cc: Mike Galbraith Cc: Paul Mackerras Cc: Corey Ashford Cc: Marcelo Tosatti Cc: Arnaldo Carvalho de Melo Cc: Thomas Gleixner LKML-Reference: Signed-off-by: Ingo Molnar --- Documentation/perf_counter/util/help.c | 28 ++++++++++++++++------------ 1 file changed, 16 insertions(+), 12 deletions(-) diff --git a/Documentation/perf_counter/util/help.c b/Documentation/perf_counter/util/help.c index 397487fb2be..6653f7dd1d7 100644 --- a/Documentation/perf_counter/util/help.c +++ b/Documentation/perf_counter/util/help.c @@ -298,7 +298,7 @@ static void add_cmd_list(struct cmdnames *cmds, struct cmdnames *old) const char *help_unknown_cmd(const char *cmd) { - int i, n, best_similarity = 0; + int i, n = 0, best_similarity = 0; struct cmdnames main_cmds, other_cmds; memset(&main_cmds, 0, sizeof(main_cmds)); @@ -315,20 +315,24 @@ const char *help_unknown_cmd(const char *cmd) sizeof(main_cmds.names), cmdname_compare); uniq(&main_cmds); - /* This reuses cmdname->len for similarity index */ - for (i = 0; i < main_cmds.cnt; ++i) - main_cmds.names[i]->len = - levenshtein(cmd, main_cmds.names[i]->name, 0, 2, 1, 4); + if (main_cmds.cnt) { + /* This reuses cmdname->len for similarity index */ + for (i = 0; i < main_cmds.cnt; ++i) + main_cmds.names[i]->len = + levenshtein(cmd, main_cmds.names[i]->name, 0, 2, 1, 4); - qsort(main_cmds.names, main_cmds.cnt, - sizeof(*main_cmds.names), levenshtein_compare); + qsort(main_cmds.names, main_cmds.cnt, + sizeof(*main_cmds.names), levenshtein_compare); + + best_similarity = main_cmds.names[0]->len; + n = 1; + while (n < main_cmds.cnt && best_similarity == main_cmds.names[n]->len) + ++n; + } - best_similarity = main_cmds.names[0]->len; - n = 1; - while (n < main_cmds.cnt && best_similarity == main_cmds.names[n]->len) - ++n; if (autocorrect && n == 1) { const char *assumed = main_cmds.names[0]->name; + main_cmds.names[0] = NULL; clean_cmdnames(&main_cmds); fprintf(stderr, "WARNING: You called a Git program named '%s', " @@ -345,7 +349,7 @@ const char *help_unknown_cmd(const char *cmd) fprintf(stderr, "perf: '%s' is not a perf-command. See 'perf --help'.\n", cmd); - if (best_similarity < 6) { + if (main_cmds.cnt && best_similarity < 6) { fprintf(stderr, "\nDid you mean %s?\n", n < 2 ? "this": "one of these"); -- cgit v1.2.3 From 095b3a6a030f7d4f24825ae93fc384b3d4b4fafa Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Wed, 3 Jun 2009 20:13:51 +0200 Subject: perf_counter tools: Optimize harder Use -O6 to build the tools. Before: 12387507370 instructions # 3121.653 M/sec After: 6244894971 instructions # 3458.437 M/sec Almost twice as fast! Cc: Peter Zijlstra Cc: Mike Galbraith Cc: Paul Mackerras Cc: Corey Ashford Cc: Marcelo Tosatti Cc: Arnaldo Carvalho de Melo Cc: Thomas Gleixner LKML-Reference: Signed-off-by: Ingo Molnar --- Documentation/perf_counter/Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Documentation/perf_counter/Makefile b/Documentation/perf_counter/Makefile index eae88561233..005709b7b19 100644 --- a/Documentation/perf_counter/Makefile +++ b/Documentation/perf_counter/Makefile @@ -159,7 +159,7 @@ uname_V := $(shell sh -c 'uname -v 2>/dev/null || echo not') # CFLAGS and LDFLAGS are for the users to override from the command line. -CFLAGS = -ggdb3 -Wall -Werror -Wstrict-prototypes -Wmissing-declarations -Wmissing-prototypes -std=gnu99 -Wdeclaration-after-statement +CFLAGS = -ggdb3 -Wall -Werror -Wstrict-prototypes -Wmissing-declarations -Wmissing-prototypes -std=gnu99 -Wdeclaration-after-statement -O6 LDFLAGS = -lpthread -lrt -lelf ALL_CFLAGS = $(CFLAGS) ALL_LDFLAGS = $(LDFLAGS) -- cgit v1.2.3 From 18374ab76e3ec1cf1b0ca5a8d08e35cfc5d01669 Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Wed, 3 Jun 2009 14:49:21 -0300 Subject: perf_counter tools: Fix off-by-one bug in symbol__new The end is really (start + len - 1). Noticed when synthesizing the PLT symbols, that are small (16 bytes), and hot on the start RIP. Signed-off-by: Arnaldo Carvalho de Melo Cc: Peter Zijlstra Cc: Mike Galbraith Cc: Paul Mackerras Cc: Corey Ashford Cc: Marcelo Tosatti Cc: Arnaldo Carvalho de Melo Cc: Thomas Gleixner LKML-Reference: <20090603174921.GG7805@ghostprotocols.net> Signed-off-by: Ingo Molnar --- Documentation/perf_counter/util/symbol.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Documentation/perf_counter/util/symbol.c b/Documentation/perf_counter/util/symbol.c index d52a1ae5342..35ee6de1e51 100644 --- a/Documentation/perf_counter/util/symbol.c +++ b/Documentation/perf_counter/util/symbol.c @@ -19,7 +19,7 @@ static struct symbol *symbol__new(uint64_t start, uint64_t len, self = ((void *)self) + priv_size; } self->start = start; - self->end = start + len; + self->end = start + len - 1; memcpy(self->name, name, namelen); } -- cgit v1.2.3 From 233f0b95ca3a0d1dcbd70bc7e519069a8e10d23e Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Wed, 3 Jun 2009 21:48:40 +0200 Subject: perf_counter tools: Work around warnings in older GCCs MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit GCC 4.1.2 produces: util/parse-options.c: In function ‘get_value’: util/parse-options.c:36: warning: ‘arg’ may be used uninitialized in this function builtin-top.c: In function ‘display_thread’: builtin-top.c:178: warning: ‘printed’ may be used uninitialized in this function Annotate them away by initializing these variables to 0. Cc: Peter Zijlstra Cc: Mike Galbraith Cc: Paul Mackerras Cc: Corey Ashford Cc: Marcelo Tosatti Cc: Arnaldo Carvalho de Melo Cc: Thomas Gleixner LKML-Reference: Signed-off-by: Ingo Molnar --- Documentation/perf_counter/builtin-top.c | 2 +- Documentation/perf_counter/util/parse-options.c | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/Documentation/perf_counter/builtin-top.c b/Documentation/perf_counter/builtin-top.c index 16a618446d3..7c907e25d82 100644 --- a/Documentation/perf_counter/builtin-top.c +++ b/Documentation/perf_counter/builtin-top.c @@ -175,7 +175,7 @@ static void rb_insert_active_sym(struct rb_root *tree, struct sym_entry *se) static void print_sym_table(void) { - int printed, j; + int printed = 0, j; int counter; float events_per_sec = events/delay_secs; float kevents_per_sec = (events-userspace_events)/delay_secs; diff --git a/Documentation/perf_counter/util/parse-options.c b/Documentation/perf_counter/util/parse-options.c index b80abd9a99b..551b6bc34e7 100644 --- a/Documentation/perf_counter/util/parse-options.c +++ b/Documentation/perf_counter/util/parse-options.c @@ -33,7 +33,7 @@ static int get_arg(struct parse_opt_ctx_t *p, const struct option *opt, static int get_value(struct parse_opt_ctx_t *p, const struct option *opt, int flags) { - const char *s, *arg; + const char *s, *arg = NULL; const int unset = flags & OPT_UNSET; if (unset && p->opt) -- cgit v1.2.3 From 128f048f0f0d2a477ad2555e7acd2ad15a1b6061 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Wed, 3 Jun 2009 22:19:36 +0200 Subject: perf_counter: Fix throttling lock-up Throttling logic is broken and we can lock up with too small hw sampling intervals. Make the throttling code more robust: disable counters even if we already disabled them. ( Also clean up whitespace damage i noticed while reading various pieces of code related to throttling. ) Cc: Peter Zijlstra Cc: Mike Galbraith Cc: Paul Mackerras Cc: Corey Ashford Cc: Marcelo Tosatti Cc: Arnaldo Carvalho de Melo Cc: Thomas Gleixner LKML-Reference: Signed-off-by: Ingo Molnar --- arch/x86/kernel/cpu/perf_counter.c | 2 +- kernel/perf_counter.c | 19 ++++++++++++++----- 2 files changed, 15 insertions(+), 6 deletions(-) diff --git a/arch/x86/kernel/cpu/perf_counter.c b/arch/x86/kernel/cpu/perf_counter.c index 12cc05ed9f4..8f53f3a7da2 100644 --- a/arch/x86/kernel/cpu/perf_counter.c +++ b/arch/x86/kernel/cpu/perf_counter.c @@ -91,7 +91,7 @@ static u64 intel_pmu_raw_event(u64 event) #define CORE_EVNTSEL_INV_MASK 0x00800000ULL #define CORE_EVNTSEL_COUNTER_MASK 0xFF000000ULL -#define CORE_EVNTSEL_MASK \ +#define CORE_EVNTSEL_MASK \ (CORE_EVNTSEL_EVENT_MASK | \ CORE_EVNTSEL_UNIT_MASK | \ CORE_EVNTSEL_EDGE_MASK | \ diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c index ab4455447f8..0bb03f15a5b 100644 --- a/kernel/perf_counter.c +++ b/kernel/perf_counter.c @@ -2822,11 +2822,20 @@ int perf_counter_overflow(struct perf_counter *counter, if (!throttle) { counter->hw.interrupts++; - } else if (counter->hw.interrupts != MAX_INTERRUPTS) { - counter->hw.interrupts++; - if (HZ*counter->hw.interrupts > (u64)sysctl_perf_counter_limit) { - counter->hw.interrupts = MAX_INTERRUPTS; - perf_log_throttle(counter, 0); + } else { + if (counter->hw.interrupts != MAX_INTERRUPTS) { + counter->hw.interrupts++; + if (HZ*counter->hw.interrupts > (u64)sysctl_perf_counter_limit) { + counter->hw.interrupts = MAX_INTERRUPTS; + perf_log_throttle(counter, 0); + ret = 1; + } + } else { + /* + * Keep re-disabling counters even though on the previous + * pass we disabled it - just in case we raced with a + * sched-in and the counter got enabled again: + */ ret = 1; } } -- cgit v1.2.3 From d80d338d2fb611b65830db7ea56680624776030f Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Wed, 3 Jun 2009 23:14:49 +0200 Subject: perf report: Clean up event processing - Split out event processig into process_events() helper. - Untangle the cwd parameters - it's constant so can be a static. Cc: Peter Zijlstra Cc: Mike Galbraith Cc: Paul Mackerras Cc: Corey Ashford Cc: Marcelo Tosatti Cc: Arnaldo Carvalho de Melo Cc: Thomas Gleixner LKML-Reference: Signed-off-by: Ingo Molnar --- Documentation/perf_counter/builtin-report.c | 186 +++++++++++++++------------- 1 file changed, 98 insertions(+), 88 deletions(-) diff --git a/Documentation/perf_counter/builtin-report.c b/Documentation/perf_counter/builtin-report.c index 33b3b15fb01..333d31269e3 100644 --- a/Documentation/perf_counter/builtin-report.c +++ b/Documentation/perf_counter/builtin-report.c @@ -147,7 +147,11 @@ static int load_kernel(void) return err; } -static int strcommon(const char *pathname, const char *cwd, int cwdlen) +static char __cwd[PATH_MAX]; +static char *cwd = __cwd; +static int cwdlen; + +static int strcommon(const char *pathname) { int n = 0; @@ -165,7 +169,7 @@ struct map { struct dso *dso; }; -static struct map *map__new(struct mmap_event *event, char *cwd, int cwdlen) +static struct map *map__new(struct mmap_event *event) { struct map *self = malloc(sizeof(*self)); @@ -174,7 +178,8 @@ static struct map *map__new(struct mmap_event *event, char *cwd, int cwdlen) char newfilename[PATH_MAX]; if (cwd) { - int n = strcommon(filename, cwd, cwdlen); + int n = strcommon(filename); + if (n == cwdlen) { snprintf(newfilename, sizeof(newfilename), ".%s", filename + n); @@ -752,85 +757,11 @@ static void register_idle_thread(void) } } +static unsigned long total = 0, total_mmap = 0, total_comm = 0, total_unknown = 0; -static int __cmd_report(void) +static int +process_event(event_t *event, unsigned long offset, unsigned long head) { - unsigned long offset = 0; - unsigned long head = 0; - struct stat stat; - char *buf; - event_t *event; - int ret, rc = EXIT_FAILURE; - uint32_t size; - unsigned long total = 0, total_mmap = 0, total_comm = 0, total_unknown = 0; - char cwd[PATH_MAX], *cwdp = cwd; - int cwdlen; - - register_idle_thread(); - - input = open(input_name, O_RDONLY); - if (input < 0) { - perror("failed to open file"); - exit(-1); - } - - ret = fstat(input, &stat); - if (ret < 0) { - perror("failed to stat file"); - exit(-1); - } - - if (!stat.st_size) { - fprintf(stderr, "zero-sized file, nothing to do!\n"); - exit(0); - } - - if (load_kernel() < 0) { - perror("failed to load kernel symbols"); - return EXIT_FAILURE; - } - - if (!full_paths) { - if (getcwd(cwd, sizeof(cwd)) == NULL) { - perror("failed to get the current directory"); - return EXIT_FAILURE; - } - cwdlen = strlen(cwd); - } else { - cwdp = NULL; - cwdlen = 0; - } -remap: - buf = (char *)mmap(NULL, page_size * mmap_window, PROT_READ, - MAP_SHARED, input, offset); - if (buf == MAP_FAILED) { - perror("failed to mmap file"); - exit(-1); - } - -more: - event = (event_t *)(buf + head); - - size = event->header.size; - if (!size) - size = 8; - - if (head + event->header.size >= page_size * mmap_window) { - unsigned long shift = page_size * (head / page_size); - int ret; - - ret = munmap(buf, page_size * mmap_window); - assert(ret == 0); - - offset += shift; - head -= shift; - goto remap; - } - - size = event->header.size; - if (!size) - goto broken_event; - if (event->header.misc & PERF_EVENT_MISC_OVERFLOW) { char level; int show = 0; @@ -851,7 +782,7 @@ more: if (thread == NULL) { fprintf(stderr, "problem processing %d event, skipping it.\n", event->header.type); - goto broken_event; + return -1; } if (event->header.misc & PERF_EVENT_MISC_KERNEL) { @@ -895,14 +826,14 @@ more: if (hist_entry__add(thread, map, dso, sym, ip, level)) { fprintf(stderr, "problem incrementing symbol count, skipping event\n"); - goto broken_event; + return -1; } } total++; } else switch (event->header.type) { case PERF_EVENT_MMAP: { struct thread *thread = threads__findnew(event->mmap.pid); - struct map *map = map__new(&event->mmap, cwdp, cwdlen); + struct map *map = map__new(&event->mmap); dprintf("%p [%p]: PERF_EVENT_MMAP: [%p(%p) @ %p]: %s\n", (void *)(offset + head), @@ -915,7 +846,7 @@ more: if (thread == NULL || map == NULL) { if (verbose) fprintf(stderr, "problem processing PERF_EVENT_MMAP, skipping event.\n"); - goto broken_event; + return -1; } thread__insert_map(thread, map); total_mmap++; @@ -932,13 +863,93 @@ more: if (thread == NULL || thread__set_comm(thread, event->comm.comm)) { fprintf(stderr, "problem processing PERF_EVENT_COMM, skipping event.\n"); - goto broken_event; + return -1; } total_comm++; break; } - default: { -broken_event: + default: + return -1; + } + + return 0; +} + +static int __cmd_report(void) +{ + unsigned long offset = 0; + unsigned long head = 0; + struct stat stat; + char *buf; + event_t *event; + int ret, rc = EXIT_FAILURE; + uint32_t size; + + register_idle_thread(); + + input = open(input_name, O_RDONLY); + if (input < 0) { + perror("failed to open file"); + exit(-1); + } + + ret = fstat(input, &stat); + if (ret < 0) { + perror("failed to stat file"); + exit(-1); + } + + if (!stat.st_size) { + fprintf(stderr, "zero-sized file, nothing to do!\n"); + exit(0); + } + + if (load_kernel() < 0) { + perror("failed to load kernel symbols"); + return EXIT_FAILURE; + } + + if (!full_paths) { + if (getcwd(__cwd, sizeof(__cwd)) == NULL) { + perror("failed to get the current directory"); + return EXIT_FAILURE; + } + cwdlen = strlen(cwd); + } else { + cwd = NULL; + cwdlen = 0; + } +remap: + buf = (char *)mmap(NULL, page_size * mmap_window, PROT_READ, + MAP_SHARED, input, offset); + if (buf == MAP_FAILED) { + perror("failed to mmap file"); + exit(-1); + } + +more: + event = (event_t *)(buf + head); + + size = event->header.size; + if (!size) + size = 8; + + if (head + event->header.size >= page_size * mmap_window) { + unsigned long shift = page_size * (head / page_size); + int ret; + + ret = munmap(buf, page_size * mmap_window); + assert(ret == 0); + + offset += shift; + head -= shift; + goto remap; + } + + size = event->header.size; + + if (!size || process_event(event, offset, head) < 0) { + dprintf("%p [%p]: skipping unknown header type: %d\n", (void *)(offset + head), (void *)(long)(event->header.size), @@ -956,7 +967,6 @@ broken_event: size = 8; } - } head += size; -- cgit v1.2.3 From 75051724f78677532618dd164a515baf106990e5 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Wed, 3 Jun 2009 23:14:49 +0200 Subject: perf report: Split out event processing helpers - Introduce per event helper functions Cc: Peter Zijlstra Cc: Mike Galbraith Cc: Paul Mackerras Cc: Corey Ashford Cc: Marcelo Tosatti Cc: Arnaldo Carvalho de Melo Cc: Thomas Gleixner LKML-Reference: Signed-off-by: Ingo Molnar --- Documentation/perf_counter/builtin-report.c | 211 ++++++++++++++++------------ 1 file changed, 118 insertions(+), 93 deletions(-) diff --git a/Documentation/perf_counter/builtin-report.c b/Documentation/perf_counter/builtin-report.c index 333d31269e3..82b62529e65 100644 --- a/Documentation/perf_counter/builtin-report.c +++ b/Documentation/perf_counter/builtin-report.c @@ -50,6 +50,7 @@ struct ip_event { __u64 ip; __u32 pid, tid; }; + struct mmap_event { struct perf_event_header header; __u32 pid, tid; @@ -58,9 +59,10 @@ struct mmap_event { __u64 pgoff; char filename[PATH_MAX]; }; + struct comm_event { struct perf_event_header header; - __u32 pid,tid; + __u32 pid, tid; char comm[16]; }; @@ -760,114 +762,137 @@ static void register_idle_thread(void) static unsigned long total = 0, total_mmap = 0, total_comm = 0, total_unknown = 0; static int -process_event(event_t *event, unsigned long offset, unsigned long head) +process_overflow_event(event_t *event, unsigned long offset, unsigned long head) { - if (event->header.misc & PERF_EVENT_MISC_OVERFLOW) { - char level; - int show = 0; - struct dso *dso = NULL; - struct thread *thread = threads__findnew(event->ip.pid); - uint64_t ip = event->ip.ip; - struct map *map = NULL; - - dprintf("%p [%p]: PERF_EVENT (IP, %d): %d: %p\n", - (void *)(offset + head), - (void *)(long)(event->header.size), - event->header.misc, - event->ip.pid, - (void *)(long)ip); - - dprintf(" ... thread: %s:%d\n", thread->comm, thread->pid); - - if (thread == NULL) { - fprintf(stderr, "problem processing %d event, skipping it.\n", - event->header.type); - return -1; - } - - if (event->header.misc & PERF_EVENT_MISC_KERNEL) { - show = SHOW_KERNEL; - level = 'k'; + char level; + int show = 0; + struct dso *dso = NULL; + struct thread *thread = threads__findnew(event->ip.pid); + uint64_t ip = event->ip.ip; + struct map *map = NULL; + + dprintf("%p [%p]: PERF_EVENT (IP, %d): %d: %p\n", + (void *)(offset + head), + (void *)(long)(event->header.size), + event->header.misc, + event->ip.pid, + (void *)(long)ip); + + dprintf(" ... thread: %s:%d\n", thread->comm, thread->pid); + + if (thread == NULL) { + fprintf(stderr, "problem processing %d event, skipping it.\n", + event->header.type); + return -1; + } - dso = kernel_dso; + if (event->header.misc & PERF_EVENT_MISC_KERNEL) { + show = SHOW_KERNEL; + level = 'k'; - dprintf(" ...... dso: %s\n", dso->name); + dso = kernel_dso; - } else if (event->header.misc & PERF_EVENT_MISC_USER) { + dprintf(" ...... dso: %s\n", dso->name); - show = SHOW_USER; - level = '.'; + } else if (event->header.misc & PERF_EVENT_MISC_USER) { - map = thread__find_map(thread, ip); - if (map != NULL) { - dso = map->dso; - ip -= map->start + map->pgoff; - } else { - /* - * If this is outside of all known maps, - * and is a negative address, try to look it - * up in the kernel dso, as it might be a - * vsyscall (which executes in user-mode): - */ - if ((long long)ip < 0) - dso = kernel_dso; - } - dprintf(" ...... dso: %s\n", dso ? dso->name : ""); + show = SHOW_USER; + level = '.'; + map = thread__find_map(thread, ip); + if (map != NULL) { + dso = map->dso; + ip -= map->start + map->pgoff; } else { - show = SHOW_HV; - level = 'H'; - dprintf(" ...... dso: [hypervisor]\n"); + /* + * If this is outside of all known maps, + * and is a negative address, try to look it + * up in the kernel dso, as it might be a + * vsyscall (which executes in user-mode): + */ + if ((long long)ip < 0) + dso = kernel_dso; } + dprintf(" ...... dso: %s\n", dso ? dso->name : ""); - if (show & show_mask) { - struct symbol *sym = dso__find_symbol(dso, ip); + } else { + show = SHOW_HV; + level = 'H'; + dprintf(" ...... dso: [hypervisor]\n"); + } - if (hist_entry__add(thread, map, dso, sym, ip, level)) { - fprintf(stderr, - "problem incrementing symbol count, skipping event\n"); - return -1; - } - } - total++; - } else switch (event->header.type) { - case PERF_EVENT_MMAP: { - struct thread *thread = threads__findnew(event->mmap.pid); - struct map *map = map__new(&event->mmap); + if (show & show_mask) { + struct symbol *sym = dso__find_symbol(dso, ip); - dprintf("%p [%p]: PERF_EVENT_MMAP: [%p(%p) @ %p]: %s\n", - (void *)(offset + head), - (void *)(long)(event->header.size), - (void *)(long)event->mmap.start, - (void *)(long)event->mmap.len, - (void *)(long)event->mmap.pgoff, - event->mmap.filename); - - if (thread == NULL || map == NULL) { - if (verbose) - fprintf(stderr, "problem processing PERF_EVENT_MMAP, skipping event.\n"); + if (hist_entry__add(thread, map, dso, sym, ip, level)) { + fprintf(stderr, + "problem incrementing symbol count, skipping event\n"); return -1; } - thread__insert_map(thread, map); - total_mmap++; - break; } - case PERF_EVENT_COMM: { - struct thread *thread = threads__findnew(event->comm.pid); + total++; - dprintf("%p [%p]: PERF_EVENT_COMM: %s:%d\n", - (void *)(offset + head), - (void *)(long)(event->header.size), - event->comm.comm, event->comm.pid); + return 0; +} - if (thread == NULL || - thread__set_comm(thread, event->comm.comm)) { - fprintf(stderr, "problem processing PERF_EVENT_COMM, skipping event.\n"); - return -1; - } - total_comm++; - break; +static int +process_mmap_event(event_t *event, unsigned long offset, unsigned long head) +{ + struct thread *thread = threads__findnew(event->mmap.pid); + struct map *map = map__new(&event->mmap); + + dprintf("%p [%p]: PERF_EVENT_MMAP: [%p(%p) @ %p]: %s\n", + (void *)(offset + head), + (void *)(long)(event->header.size), + (void *)(long)event->mmap.start, + (void *)(long)event->mmap.len, + (void *)(long)event->mmap.pgoff, + event->mmap.filename); + + if (thread == NULL || map == NULL) { + dprintf("problem processing PERF_EVENT_MMAP, skipping event.\n"); + return -1; + } + + thread__insert_map(thread, map); + total_mmap++; + + return 0; +} + +static int +process_comm_event(event_t *event, unsigned long offset, unsigned long head) +{ + struct thread *thread = threads__findnew(event->comm.pid); + + dprintf("%p [%p]: PERF_EVENT_COMM: %s:%d\n", + (void *)(offset + head), + (void *)(long)(event->header.size), + event->comm.comm, event->comm.pid); + + if (thread == NULL || + thread__set_comm(thread, event->comm.comm)) { + dprintf("problem processing PERF_EVENT_COMM, skipping event.\n"); + return -1; } + total_comm++; + + return 0; +} + +static int +process_event(event_t *event, unsigned long offset, unsigned long head) +{ + if (event->header.misc & PERF_EVENT_MISC_OVERFLOW) + return process_overflow_event(event, offset, head); + + switch (event->header.type) { + case PERF_EVENT_MMAP: + return process_mmap_event(event, offset, head); + + case PERF_EVENT_COMM: + return process_comm_event(event, offset, head); + default: return -1; } @@ -877,13 +902,13 @@ process_event(event_t *event, unsigned long offset, unsigned long head) static int __cmd_report(void) { + int ret, rc = EXIT_FAILURE; unsigned long offset = 0; unsigned long head = 0; struct stat stat; - char *buf; event_t *event; - int ret, rc = EXIT_FAILURE; uint32_t size; + char *buf; register_idle_thread(); -- cgit v1.2.3 From d11444dfa78cdd887d8dfd2fab3883132aff2c2d Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Wed, 3 Jun 2009 23:29:14 +0200 Subject: perf report: Handle all known event types We have munmap, throttle/unthrottle and period events as well, process them - otherwise they are considered broke events and we mis-parse the next few events. Cc: Peter Zijlstra Cc: Mike Galbraith Cc: Paul Mackerras Cc: Corey Ashford Cc: Marcelo Tosatti Cc: Arnaldo Carvalho de Melo Cc: Thomas Gleixner LKML-Reference: Signed-off-by: Ingo Molnar --- Documentation/perf_counter/builtin-report.c | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/Documentation/perf_counter/builtin-report.c b/Documentation/perf_counter/builtin-report.c index 82b62529e65..6003cc3b188 100644 --- a/Documentation/perf_counter/builtin-report.c +++ b/Documentation/perf_counter/builtin-report.c @@ -893,6 +893,15 @@ process_event(event_t *event, unsigned long offset, unsigned long head) case PERF_EVENT_COMM: return process_comm_event(event, offset, head); + /* + * We dont process them right now but they are fine: + */ + case PERF_EVENT_MUNMAP: + case PERF_EVENT_PERIOD: + case PERF_EVENT_THROTTLE: + case PERF_EVENT_UNTHROTTLE: + return 0; + default: return -1; } -- cgit v1.2.3 From a4c43beaff0fe6c83aa2505dce8ffe65db8e0a33 Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Wed, 3 Jun 2009 23:02:33 -0300 Subject: perf report: Fix rbtree bug Ingo Molnar reported: > FYI, i just got this crash (segfault) in perf report after > collecting a long profile from Xorg: > > Starting program: /home/mingo/tip/Documentation/perf_counter/perf report > [Thread debugging using libthread_db enabled] > Detaching after fork from child process 20008. > [New Thread 0x7f92fd62a6f0 (LWP 20005)] > > Program received signal SIGSEGV, Segmentation fault. > 0x000000000041031a in __rb_erase_color (node=0x142c090, parent=0x0, > root=0x881918) > at util/rbtree.c:143 > 143 if (parent->rb_left == node) It was a problem introduced in this cset: perf report: Fix comm sorting - 8229289b607682f90b946ad2c319526303c17700 This patch should fix it. Signed-off-by: Arnaldo Carvalho de Melo Acked-by: Peter Zijlstra Cc: Mike Galbraith Cc: Paul Mackerras Cc: Corey Ashford Cc: Marcelo Tosatti Cc: Arnaldo Carvalho de Melo Cc: Thomas Gleixner Cc: Stephane Eranian LKML-Reference: Signed-off-by: Ingo Molnar --- Documentation/perf_counter/builtin-report.c | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/Documentation/perf_counter/builtin-report.c b/Documentation/perf_counter/builtin-report.c index 6003cc3b188..86f23f0991f 100644 --- a/Documentation/perf_counter/builtin-report.c +++ b/Documentation/perf_counter/builtin-report.c @@ -699,17 +699,18 @@ static void output__resort(void) { struct rb_node *next; struct hist_entry *n; + struct rb_root *tree = &hist; if (sort__need_collapse) - next = rb_first(&collapse_hists); - else - next = rb_first(&hist); + tree = &collapse_hists; + + next = rb_first(tree); while (next) { n = rb_entry(next, struct hist_entry, rb_node); next = rb_next(&n->rb_node); - rb_erase(&n->rb_node, &hist); + rb_erase(&n->rb_node, tree); output__insert_entry(n); } } -- cgit v1.2.3 From 6e53cdf11dfc8d302ebb67e7112d1baf8d7c66d4 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Thu, 4 Jun 2009 08:53:05 +0200 Subject: perf top: Reduce default filter threshold On idle systems 'perf top' comes up empty by default, because the event count filter is set to 100. Reduce it to 5 instead. Also add an option to limit the number of functions displayed. Reported-by: Steven Rostedt Acked-by: Peter Zijlstra Cc: Mike Galbraith Cc: Paul Mackerras Cc: Corey Ashford Cc: Marcelo Tosatti Cc: Arnaldo Carvalho de Melo Cc: Thomas Gleixner LKML-Reference: Signed-off-by: Ingo Molnar --- Documentation/perf_counter/builtin-top.c | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) diff --git a/Documentation/perf_counter/builtin-top.c b/Documentation/perf_counter/builtin-top.c index 7c907e25d82..3f7778ba00b 100644 --- a/Documentation/perf_counter/builtin-top.c +++ b/Documentation/perf_counter/builtin-top.c @@ -64,9 +64,10 @@ static int default_interval = 100000; static int event_count[MAX_COUNTERS]; static int fd[MAX_NR_CPUS][MAX_COUNTERS]; -static __u64 count_filter = 100; +static __u64 count_filter = 5; +static int print_entries = 15; -static int target_pid = -1; +static int target_pid = -1; static int profile_cpu = -1; static int nr_cpus = 0; static unsigned int realtime_prio = 0; @@ -254,7 +255,7 @@ static void print_sym_table(void) struct symbol *sym = (struct symbol *)(syme + 1); float pcnt; - if (++printed > 18 || syme->snap_count < count_filter) + if (++printed > print_entries || syme->snap_count < count_filter) continue; pcnt = 100.0 - (100.0 * ((sum_kevents - syme->snap_count) / @@ -650,7 +651,7 @@ static const struct option options[] = { "number of seconds to delay between refreshes"), OPT_BOOLEAN('D', "dump-symtab", &dump_symtab, "dump the symbol table used for profiling"), - OPT_INTEGER('f', "--count-filter", &count_filter, + OPT_INTEGER('f', "count-filter", &count_filter, "only display functions with more events than this"), OPT_BOOLEAN('g', "group", &group, "put the counters into a counter group"), @@ -662,8 +663,10 @@ static const struct option options[] = { "track mmap events"), OPT_BOOLEAN('U', "use-munmap", &use_munmap, "track munmap events"), - OPT_INTEGER('F', "--freq", &freq, + OPT_INTEGER('F', "freq", &freq, "profile at this frequency"), + OPT_INTEGER('E', "entries", &print_entries, + "display this many functions"), OPT_END() }; -- cgit v1.2.3 From 1b58c2515be48d5df79d20210ac5a86e30094de2 Mon Sep 17 00:00:00 2001 From: Paul Mackerras Date: Thu, 4 Jun 2009 09:49:59 +1000 Subject: perf_counter: powerpc: Use new identifier names in powerpc-specific code Commit b23f3325 ("perf_counter: Rename various fields") fixed up most of the uses of the renamed fields, but missed one instance of "record_type" in powerpc-specific code which needs to be changed to "sample_type", and a "PERF_RECORD_ADDR" in the same statement that needs to be changed to "PERF_SAMPLE_ADDR", causing compilation errors on powerpc. This fixes it. Signed-off-by: Paul Mackerras Cc: Peter Zijlstra LKML-Reference: <18983.3111.770392.800486@cargo.ozlabs.ibm.com> Signed-off-by: Ingo Molnar --- arch/powerpc/kernel/perf_counter.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/powerpc/kernel/perf_counter.c b/arch/powerpc/kernel/perf_counter.c index 4cc4ac5c791..232b00a36f7 100644 --- a/arch/powerpc/kernel/perf_counter.c +++ b/arch/powerpc/kernel/perf_counter.c @@ -1002,7 +1002,7 @@ static void record_and_restart(struct perf_counter *counter, long val, */ if (record) { addr = 0; - if (counter->attr.record_type & PERF_RECORD_ADDR) { + if (counter->attr.sample_type & PERF_SAMPLE_ADDR) { /* * The user wants a data address recorded. * If we're not doing instruction sampling, -- cgit v1.2.3 From 3aff27ca84fa94311ae99189e54fed8d83b69fc1 Mon Sep 17 00:00:00 2001 From: Yong Wang Date: Wed, 3 Jun 2009 16:42:25 +0800 Subject: perf_counter: Documentation update The 'nmi' bit is no longer there. Signed-off-by: Yong Wang Cc: Peter Zijlstra Cc: Mike Galbraith Cc: Paul Mackerras Cc: Corey Ashford Cc: Marcelo Tosatti Cc: Arnaldo Carvalho de Melo Cc: Arjan van de Ven LKML-Reference: <20090603084225.GA6553@ywang-moblin2.bj.intel.com> Signed-off-by: Ingo Molnar --- Documentation/perf_counter/design.txt | 7 ------- 1 file changed, 7 deletions(-) diff --git a/Documentation/perf_counter/design.txt b/Documentation/perf_counter/design.txt index 9930c4bddc6..d3250763dc9 100644 --- a/Documentation/perf_counter/design.txt +++ b/Documentation/perf_counter/design.txt @@ -48,7 +48,6 @@ struct perf_counter_hw_event { __u32 read_format; __u64 disabled : 1, /* off by default */ - nmi : 1, /* NMI sampling */ inherit : 1, /* children inherit it */ pinned : 1, /* must always be on PMU */ exclusive : 1, /* only group on PMU */ @@ -195,12 +194,6 @@ The 'disabled' bit specifies whether the counter starts out disabled or enabled. If it is initially disabled, it can be enabled by ioctl or prctl (see below). -The 'nmi' bit specifies, for hardware events, whether the counter -should be set up to request non-maskable interrupts (NMIs) or normal -interrupts. This bit is ignored if the user doesn't have -CAP_SYS_ADMIN privilege (i.e. is not root) or if the CPU doesn't -generate NMIs from hardware counters. - The 'inherit' bit, if set, specifies that this counter should count events on descendant tasks as well as the task specified. This only applies to new descendents, not to any existing descendents at the -- cgit v1.2.3 From df97992c6e25ffc66c549c8bc59262dc627c6d17 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Thu, 4 Jun 2009 13:41:22 +0200 Subject: perf record/report: Fix PID/COMM handling Fix two bugs causing lost comm mappings: - initial PID is not 0 but getpid() - when we are unable to handle an mmap event, dont assume the event itself is broken - try to parse the stream. This way we wont lose comm events. Cc: Peter Zijlstra Cc: Mike Galbraith Cc: Paul Mackerras Cc: Corey Ashford Cc: Marcelo Tosatti Cc: Arnaldo Carvalho de Melo Cc: Thomas Gleixner LKML-Reference: Signed-off-by: Ingo Molnar --- Documentation/perf_counter/builtin-record.c | 2 +- Documentation/perf_counter/builtin-report.c | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/Documentation/perf_counter/builtin-record.c b/Documentation/perf_counter/builtin-record.c index efa2eb498e9..bf59df5bddf 100644 --- a/Documentation/perf_counter/builtin-record.c +++ b/Documentation/perf_counter/builtin-record.c @@ -430,7 +430,7 @@ static int __cmd_record(int argc, const char **argv) } if (!system_wide) { - open_counters(-1, target_pid != -1 ? target_pid : 0); + open_counters(-1, target_pid != -1 ? target_pid : getpid()); } else for (i = 0; i < nr_cpus; i++) open_counters(i, target_pid); diff --git a/Documentation/perf_counter/builtin-report.c b/Documentation/perf_counter/builtin-report.c index 86f23f0991f..ff6f657476a 100644 --- a/Documentation/perf_counter/builtin-report.c +++ b/Documentation/perf_counter/builtin-report.c @@ -852,7 +852,7 @@ process_mmap_event(event_t *event, unsigned long offset, unsigned long head) if (thread == NULL || map == NULL) { dprintf("problem processing PERF_EVENT_MMAP, skipping event.\n"); - return -1; + return 0; } thread__insert_map(thread, map); -- cgit v1.2.3 From af794b94ae8a16fb4a9da6ce640c122efb44e2a0 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Thu, 4 Jun 2009 13:58:13 +0200 Subject: perf_counter tools: Build with native optimization Build the tools with -march=native by default. No measurable difference in speed though, compared to the default, on a Nehalem testbox. Cc: Peter Zijlstra Cc: Mike Galbraith Cc: Paul Mackerras Cc: Corey Ashford Cc: Marcelo Tosatti Cc: Arnaldo Carvalho de Melo Cc: Thomas Gleixner LKML-Reference: Signed-off-by: Ingo Molnar --- Documentation/perf_counter/Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Documentation/perf_counter/Makefile b/Documentation/perf_counter/Makefile index 005709b7b19..414399cbc51 100644 --- a/Documentation/perf_counter/Makefile +++ b/Documentation/perf_counter/Makefile @@ -159,7 +159,7 @@ uname_V := $(shell sh -c 'uname -v 2>/dev/null || echo not') # CFLAGS and LDFLAGS are for the users to override from the command line. -CFLAGS = -ggdb3 -Wall -Werror -Wstrict-prototypes -Wmissing-declarations -Wmissing-prototypes -std=gnu99 -Wdeclaration-after-statement -O6 +CFLAGS = -ggdb3 -Wall -Werror -Wstrict-prototypes -Wmissing-declarations -Wmissing-prototypes -std=gnu99 -Wdeclaration-after-statement -O6 -march=native LDFLAGS = -lpthread -lrt -lelf ALL_CFLAGS = $(CFLAGS) ALL_LDFLAGS = $(LDFLAGS) -- cgit v1.2.3 From 95ed6fd06e52bf850cd17524f0b36ed14300c10d Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Thu, 4 Jun 2009 15:00:45 +0200 Subject: perf report: Simplify symbol output The DSO can be printed already - no need to repeat it in the symbol field. Signed-off-by: Peter Zijlstra Cc: Mike Galbraith Cc: Paul Mackerras Cc: Corey Ashford Cc: Marcelo Tosatti Cc: Arnaldo Carvalho de Melo LKML-Reference: Signed-off-by: Ingo Molnar --- Documentation/perf_counter/builtin-report.c | 17 ++++++----------- 1 file changed, 6 insertions(+), 11 deletions(-) diff --git a/Documentation/perf_counter/builtin-report.c b/Documentation/perf_counter/builtin-report.c index ff6f657476a..56c664d1b62 100644 --- a/Documentation/perf_counter/builtin-report.c +++ b/Documentation/perf_counter/builtin-report.c @@ -382,7 +382,7 @@ sort__comm_print(FILE *fp, struct hist_entry *self) } static struct sort_entry sort_comm = { - .header = " Command", + .header = " Command", .cmp = sort__comm_cmp, .collapse = sort__comm_collapse, .print = sort__comm_print, @@ -414,11 +414,11 @@ sort__dso_print(FILE *fp, struct hist_entry *self) if (self->dso) return fprintf(fp, " %-25s", self->dso->name); - return fprintf(fp, " %016llx", (__u64)self->ip); + return fprintf(fp, " %016llx ", (__u64)self->ip); } static struct sort_entry sort_dso = { - .header = " Shared Object ", + .header = " Shared Object ", .cmp = sort__dso_cmp, .print = sort__dso_print, }; @@ -447,21 +447,16 @@ sort__sym_print(FILE *fp, struct hist_entry *self) if (verbose) ret += fprintf(fp, " %#018llx", (__u64)self->ip); - if (self->dso) - ret += fprintf(fp, " %s: ", self->dso->name); - else - ret += fprintf(fp, " %#016llx: ", (__u64)self->ip); - if (self->sym) - ret += fprintf(fp, "%s", self->sym->name); + ret += fprintf(fp, " %s", self->sym->name); else - ret += fprintf(fp, "%#016llx", (__u64)self->ip); + ret += fprintf(fp, " %#016llx", (__u64)self->ip); return ret; } static struct sort_entry sort_sym = { - .header = " Shared Object: Symbol", + .header = " Symbol", .cmp = sort__sym_cmp, .print = sort__sym_print, }; -- cgit v1.2.3 From bd74137ec9aaca3df3ff22b92455fddf7afaced1 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Thu, 4 Jun 2009 14:13:04 +0200 Subject: perf_counter tools: Print out symbol parsing errors only if --verbose Also, add a suggestion to 'perf report', if the default sort order is used. Cc: Peter Zijlstra Cc: Mike Galbraith Cc: Paul Mackerras Cc: Corey Ashford Cc: Marcelo Tosatti Cc: Arnaldo Carvalho de Melo Cc: Thomas Gleixner LKML-Reference: Signed-off-by: Ingo Molnar --- Documentation/perf_counter/builtin-report.c | 18 ++++++++++++++---- Documentation/perf_counter/builtin-top.c | 2 +- Documentation/perf_counter/util/symbol.c | 27 +++++++++++++++------------ Documentation/perf_counter/util/symbol.h | 4 ++-- 4 files changed, 32 insertions(+), 19 deletions(-) diff --git a/Documentation/perf_counter/builtin-report.c b/Documentation/perf_counter/builtin-report.c index 56c664d1b62..15fe9dae792 100644 --- a/Documentation/perf_counter/builtin-report.c +++ b/Documentation/perf_counter/builtin-report.c @@ -26,7 +26,10 @@ static char const *input_name = "perf.data"; static char *vmlinux = NULL; -static char *sort_order = "comm,dso"; + +static char default_sort_order[] = "comm,dso"; +static char *sort_order = default_sort_order; + static int input; static int show_mask = SHOW_KERNEL | SHOW_USER | SHOW_HV; @@ -103,9 +106,10 @@ static struct dso *dsos__findnew(const char *name) if (!dso) goto out_delete_dso; - nr = dso__load(dso, NULL); + nr = dso__load(dso, NULL, verbose); if (nr < 0) { - fprintf(stderr, "Failed to open: %s\n", name); + if (verbose) + fprintf(stderr, "Failed to open: %s\n", name); goto out_delete_dso; } if (!nr && verbose) { @@ -139,7 +143,7 @@ static int load_kernel(void) if (!kernel_dso) return -1; - err = dso__load_kernel(kernel_dso, vmlinux, NULL); + err = dso__load_kernel(kernel_dso, vmlinux, NULL, verbose); if (err) { dso__delete(kernel_dso); kernel_dso = NULL; @@ -741,6 +745,12 @@ static size_t output__fprintf(FILE *fp, uint64_t total_samples) ret += hist_entry__fprintf(fp, pos, total_samples); } + if (!strcmp(sort_order, default_sort_order)) { + fprintf(fp, "#\n"); + fprintf(fp, "# ( For more details, try: perf report --sort comm,dso,symbol )\n"); + fprintf(fp, "#\n"); + } + return ret; } diff --git a/Documentation/perf_counter/builtin-top.c b/Documentation/perf_counter/builtin-top.c index 3f7778ba00b..548a8da4b15 100644 --- a/Documentation/perf_counter/builtin-top.c +++ b/Documentation/perf_counter/builtin-top.c @@ -349,7 +349,7 @@ static int parse_symbols(void) if (kernel_dso == NULL) return -1; - if (dso__load_kernel(kernel_dso, NULL, symbol_filter) != 0) + if (dso__load_kernel(kernel_dso, NULL, symbol_filter, 1) != 0) goto out_delete_dso; node = rb_first(&kernel_dso->syms); diff --git a/Documentation/perf_counter/util/symbol.c b/Documentation/perf_counter/util/symbol.c index 35ee6de1e51..15d5cf9abfa 100644 --- a/Documentation/perf_counter/util/symbol.c +++ b/Documentation/perf_counter/util/symbol.c @@ -124,7 +124,7 @@ size_t dso__fprintf(struct dso *self, FILE *fp) return ret; } -static int dso__load_kallsyms(struct dso *self, symbol_filter_t filter) +static int dso__load_kallsyms(struct dso *self, symbol_filter_t filter, int verbose) { struct rb_node *nd, *prevnd; char *line = NULL; @@ -370,7 +370,7 @@ static int dso__synthesize_plt_symbols(struct dso *self, Elf *elf, } static int dso__load_sym(struct dso *self, int fd, const char *name, - symbol_filter_t filter) + symbol_filter_t filter, int verbose) { Elf_Data *symstrs; uint32_t nr_syms; @@ -387,13 +387,15 @@ static int dso__load_sym(struct dso *self, int fd, const char *name, elf = elf_begin(fd, ELF_C_READ_MMAP, NULL); if (elf == NULL) { - fprintf(stderr, "%s: cannot read %s ELF file.\n", - __func__, name); + if (verbose) + fprintf(stderr, "%s: cannot read %s ELF file.\n", + __func__, name); goto out_close; } if (gelf_getehdr(elf, &ehdr) == NULL) { - fprintf(stderr, "%s: cannot get elf header.\n", __func__); + if (verbose) + fprintf(stderr, "%s: cannot get elf header.\n", __func__); goto out_elf_end; } @@ -473,7 +475,7 @@ out_close: return err; } -int dso__load(struct dso *self, symbol_filter_t filter) +int dso__load(struct dso *self, symbol_filter_t filter, int verbose) { int size = strlen(self->name) + sizeof("/usr/lib/debug%s.debug"); char *name = malloc(size); @@ -505,7 +507,7 @@ more: fd = open(name, O_RDONLY); } while (fd < 0); - ret = dso__load_sym(self, fd, name, filter); + ret = dso__load_sym(self, fd, name, filter, verbose); close(fd); /* @@ -520,28 +522,29 @@ out: } static int dso__load_vmlinux(struct dso *self, const char *vmlinux, - symbol_filter_t filter) + symbol_filter_t filter, int verbose) { int err, fd = open(vmlinux, O_RDONLY); if (fd < 0) return -1; - err = dso__load_sym(self, fd, vmlinux, filter); + err = dso__load_sym(self, fd, vmlinux, filter, verbose); close(fd); return err; } -int dso__load_kernel(struct dso *self, const char *vmlinux, symbol_filter_t filter) +int dso__load_kernel(struct dso *self, const char *vmlinux, + symbol_filter_t filter, int verbose) { int err = -1; if (vmlinux) - err = dso__load_vmlinux(self, vmlinux, filter); + err = dso__load_vmlinux(self, vmlinux, filter, verbose); if (err) - err = dso__load_kallsyms(self, filter); + err = dso__load_kallsyms(self, filter, verbose); return err; } diff --git a/Documentation/perf_counter/util/symbol.h b/Documentation/perf_counter/util/symbol.h index b0299bc0cf5..8dd8522a0a0 100644 --- a/Documentation/perf_counter/util/symbol.h +++ b/Documentation/perf_counter/util/symbol.h @@ -32,8 +32,8 @@ static inline void *dso__sym_priv(struct dso *self, struct symbol *sym) struct symbol *dso__find_symbol(struct dso *self, uint64_t ip); int dso__load_kernel(struct dso *self, const char *vmlinux, - symbol_filter_t filter); -int dso__load(struct dso *self, symbol_filter_t filter); + symbol_filter_t filter, int verbose); +int dso__load(struct dso *self, symbol_filter_t filter, int verbose); size_t dso__fprintf(struct dso *self, FILE *fp); -- cgit v1.2.3 From 05ca061eb9704ad9b0739f88046276792b75f2c1 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Thu, 4 Jun 2009 14:21:16 +0200 Subject: perf report: Print out the total number of events So that the statistical quality of the profile can be estimated at a glance. Cc: Peter Zijlstra Cc: Mike Galbraith Cc: Paul Mackerras Cc: Corey Ashford Cc: Marcelo Tosatti Cc: Arnaldo Carvalho de Melo Cc: Thomas Gleixner LKML-Reference: Signed-off-by: Ingo Molnar --- Documentation/perf_counter/builtin-report.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/Documentation/perf_counter/builtin-report.c b/Documentation/perf_counter/builtin-report.c index 15fe9dae792..be392e0f77c 100644 --- a/Documentation/perf_counter/builtin-report.c +++ b/Documentation/perf_counter/builtin-report.c @@ -721,6 +721,8 @@ static size_t output__fprintf(FILE *fp, uint64_t total_samples) struct rb_node *nd; size_t ret = 0; + fprintf(fp, "#\n"); + fprintf(fp, "# (%Ld profiler events)\n", (__u64)total_samples); fprintf(fp, "#\n"); fprintf(fp, "# Overhead"); -- cgit v1.2.3 From 71dd8945d8d827ab101cd287f9480ef22fc7c1b6 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Thu, 4 Jun 2009 15:16:56 +0200 Subject: perf report: Add consistent spacing rules Make the sort header and the print function have the same column width. Signed-off-by: Peter Zijlstra Cc: Peter Zijlstra Cc: Mike Galbraith Cc: Paul Mackerras Cc: Corey Ashford Cc: Marcelo Tosatti Cc: Arnaldo Carvalho de Melo Cc: Thomas Gleixner LKML-Reference: Signed-off-by: Ingo Molnar --- Documentation/perf_counter/builtin-report.c | 34 ++++++++++++++++------------- 1 file changed, 19 insertions(+), 15 deletions(-) diff --git a/Documentation/perf_counter/builtin-report.c b/Documentation/perf_counter/builtin-report.c index be392e0f77c..e930b4e0233 100644 --- a/Documentation/perf_counter/builtin-report.c +++ b/Documentation/perf_counter/builtin-report.c @@ -344,11 +344,11 @@ sort__thread_cmp(struct hist_entry *left, struct hist_entry *right) static size_t sort__thread_print(FILE *fp, struct hist_entry *self) { - return fprintf(fp, " %16s:%5d", self->thread->comm ?: "", self->thread->pid); + return fprintf(fp, "%16s:%5d", self->thread->comm ?: "", self->thread->pid); } static struct sort_entry sort_thread = { - .header = " Command: Pid ", + .header = " Command: Pid", .cmp = sort__thread_cmp, .print = sort__thread_print, }; @@ -382,11 +382,11 @@ sort__comm_collapse(struct hist_entry *left, struct hist_entry *right) static size_t sort__comm_print(FILE *fp, struct hist_entry *self) { - return fprintf(fp, " %16s", self->thread->comm); + return fprintf(fp, "%16s", self->thread->comm); } static struct sort_entry sort_comm = { - .header = " Command", + .header = " Command", .cmp = sort__comm_cmp, .collapse = sort__comm_collapse, .print = sort__comm_print, @@ -416,13 +416,13 @@ static size_t sort__dso_print(FILE *fp, struct hist_entry *self) { if (self->dso) - return fprintf(fp, " %-25s", self->dso->name); + return fprintf(fp, "%-25s", self->dso->name); - return fprintf(fp, " %016llx ", (__u64)self->ip); + return fprintf(fp, "%016llx ", (__u64)self->ip); } static struct sort_entry sort_dso = { - .header = " Shared Object ", + .header = "Shared Object ", .cmp = sort__dso_cmp, .print = sort__dso_print, }; @@ -449,18 +449,18 @@ sort__sym_print(FILE *fp, struct hist_entry *self) size_t ret = 0; if (verbose) - ret += fprintf(fp, " %#018llx", (__u64)self->ip); + ret += fprintf(fp, "%#018llx ", (__u64)self->ip); if (self->sym) - ret += fprintf(fp, " %s", self->sym->name); + ret += fprintf(fp, "%s", self->sym->name); else - ret += fprintf(fp, " %#016llx", (__u64)self->ip); + ret += fprintf(fp, "%#016llx", (__u64)self->ip); return ret; } static struct sort_entry sort_sym = { - .header = " Symbol", + .header = "Symbol", .cmp = sort__sym_cmp, .print = sort__sym_print, }; @@ -553,8 +553,10 @@ hist_entry__fprintf(FILE *fp, struct hist_entry *self, uint64_t total_samples) } else ret = fprintf(fp, "%12d ", self->count); - list_for_each_entry(se, &hist_entry__sort_list, list) + list_for_each_entry(se, &hist_entry__sort_list, list) { + fprintf(fp, " "); ret += se->print(fp, self); + } ret += fprintf(fp, "\n"); @@ -721,13 +723,14 @@ static size_t output__fprintf(FILE *fp, uint64_t total_samples) struct rb_node *nd; size_t ret = 0; + fprintf(fp, "\n"); fprintf(fp, "#\n"); fprintf(fp, "# (%Ld profiler events)\n", (__u64)total_samples); fprintf(fp, "#\n"); fprintf(fp, "# Overhead"); list_for_each_entry(se, &hist_entry__sort_list, list) - fprintf(fp, " %s", se->header); + fprintf(fp, " %s", se->header); fprintf(fp, "\n"); fprintf(fp, "# ........"); @@ -735,7 +738,7 @@ static size_t output__fprintf(FILE *fp, uint64_t total_samples) int i; fprintf(fp, " "); - for (i = 0; i < strlen(se->header)-1; i++) + for (i = 0; i < strlen(se->header); i++) fprintf(fp, "."); } fprintf(fp, "\n"); @@ -749,9 +752,10 @@ static size_t output__fprintf(FILE *fp, uint64_t total_samples) if (!strcmp(sort_order, default_sort_order)) { fprintf(fp, "#\n"); - fprintf(fp, "# ( For more details, try: perf report --sort comm,dso,symbol )\n"); + fprintf(fp, "# (For more details, try: perf report --sort comm,dso,symbol)\n"); fprintf(fp, "#\n"); } + fprintf(fp, "\n"); return ret; } -- cgit v1.2.3 From 8fc0321f1ad0ffef969056dda91b453bbd7a494d Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Thu, 4 Jun 2009 15:19:47 +0200 Subject: perf_counter tools: Add color terminal output support Add Git's color printing library to util/color.[ch]. Add it to perf report, with a trivial example to print high-overhead entries in red, low-overhead entries in green. Cc: Peter Zijlstra Cc: Mike Galbraith Cc: Paul Mackerras Cc: Arnaldo Carvalho de Melo LKML-Reference: Signed-off-by: Ingo Molnar --- Documentation/perf_counter/Makefile | 2 + Documentation/perf_counter/builtin-report.c | 15 +- Documentation/perf_counter/builtin-top.c | 23 ++- Documentation/perf_counter/util/color.c | 230 ++++++++++++++++++++++++++ Documentation/perf_counter/util/color.h | 36 ++++ Documentation/perf_counter/util/environment.c | 1 + 6 files changed, 300 insertions(+), 7 deletions(-) create mode 100644 Documentation/perf_counter/util/color.c create mode 100644 Documentation/perf_counter/util/color.h diff --git a/Documentation/perf_counter/Makefile b/Documentation/perf_counter/Makefile index 414399cbc51..c9ec4585f4d 100644 --- a/Documentation/perf_counter/Makefile +++ b/Documentation/perf_counter/Makefile @@ -298,6 +298,7 @@ LIB_H += util/string.h LIB_H += util/run-command.h LIB_H += util/sigchain.h LIB_H += util/symbol.h +LIB_H += util/color.h LIB_OBJS += util/abspath.o LIB_OBJS += util/alias.o @@ -319,6 +320,7 @@ LIB_OBJS += util/usage.o LIB_OBJS += util/wrapper.o LIB_OBJS += util/sigchain.o LIB_OBJS += util/symbol.o +LIB_OBJS += util/color.o LIB_OBJS += util/pager.o BUILTIN_OBJS += builtin-help.o diff --git a/Documentation/perf_counter/builtin-report.c b/Documentation/perf_counter/builtin-report.c index e930b4e0233..7beedc6effa 100644 --- a/Documentation/perf_counter/builtin-report.c +++ b/Documentation/perf_counter/builtin-report.c @@ -9,6 +9,7 @@ #include "util/util.h" +#include "util/color.h" #include "util/list.h" #include "util/cache.h" #include "util/rbtree.h" @@ -548,7 +549,19 @@ hist_entry__fprintf(FILE *fp, struct hist_entry *self, uint64_t total_samples) size_t ret; if (total_samples) { - ret = fprintf(fp, " %6.2f%%", + double percent = self->count * 100.0 / total_samples; + char *color = PERF_COLOR_NORMAL; + + /* + * We color high-overhead entries in red, low-overhead + * entries in green - and keep the middle ground normal: + */ + if (percent >= 5.0) + color = PERF_COLOR_RED; + if (percent < 0.5) + color = PERF_COLOR_GREEN; + + ret = color_fprintf(fp, color, " %6.2f%%", (self->count * 100.0) / total_samples); } else ret = fprintf(fp, "%12d ", self->count); diff --git a/Documentation/perf_counter/builtin-top.c b/Documentation/perf_counter/builtin-top.c index 548a8da4b15..20e5b120095 100644 --- a/Documentation/perf_counter/builtin-top.c +++ b/Documentation/perf_counter/builtin-top.c @@ -21,6 +21,7 @@ #include "perf.h" #include "util/symbol.h" +#include "util/color.h" #include "util/util.h" #include "util/rbtree.h" #include "util/parse-options.h" @@ -253,7 +254,8 @@ static void print_sym_table(void) for (nd = rb_first(&tmp); nd; nd = rb_next(nd)) { struct sym_entry *syme = rb_entry(nd, struct sym_entry, rb_node); struct symbol *sym = (struct symbol *)(syme + 1); - float pcnt; + char *color = PERF_COLOR_NORMAL; + double pcnt; if (++printed > print_entries || syme->snap_count < count_filter) continue; @@ -261,13 +263,22 @@ static void print_sym_table(void) pcnt = 100.0 - (100.0 * ((sum_kevents - syme->snap_count) / sum_kevents)); + /* + * We color high-overhead entries in red, low-overhead + * entries in green - and keep the middle ground normal: + */ + if (pcnt >= 5.0) + color = PERF_COLOR_RED; + if (pcnt < 0.5) + color = PERF_COLOR_GREEN; + if (nr_counters == 1) - printf("%19.2f - %4.1f%% - %016llx : %s\n", - syme->weight, pcnt, sym->start, sym->name); + printf("%19.2f - ", syme->weight); else - printf("%8.1f %10ld - %4.1f%% - %016llx : %s\n", - syme->weight, syme->snap_count, - pcnt, sym->start, sym->name); + printf("%8.1f %10ld - ", syme->weight, syme->snap_count); + + color_fprintf(stdout, color, "%4.1f%%", pcnt); + printf(" - %016llx : %s\n", sym->start, sym->name); } { diff --git a/Documentation/perf_counter/util/color.c b/Documentation/perf_counter/util/color.c new file mode 100644 index 00000000000..a77975d6677 --- /dev/null +++ b/Documentation/perf_counter/util/color.c @@ -0,0 +1,230 @@ +#include "cache.h" +#include "color.h" + +int perf_use_color_default = 0; + +static int parse_color(const char *name, int len) +{ + static const char * const color_names[] = { + "normal", "black", "red", "green", "yellow", + "blue", "magenta", "cyan", "white" + }; + char *end; + int i; + for (i = 0; i < ARRAY_SIZE(color_names); i++) { + const char *str = color_names[i]; + if (!strncasecmp(name, str, len) && !str[len]) + return i - 1; + } + i = strtol(name, &end, 10); + if (end - name == len && i >= -1 && i <= 255) + return i; + return -2; +} + +static int parse_attr(const char *name, int len) +{ + static const int attr_values[] = { 1, 2, 4, 5, 7 }; + static const char * const attr_names[] = { + "bold", "dim", "ul", "blink", "reverse" + }; + int i; + for (i = 0; i < ARRAY_SIZE(attr_names); i++) { + const char *str = attr_names[i]; + if (!strncasecmp(name, str, len) && !str[len]) + return attr_values[i]; + } + return -1; +} + +void color_parse(const char *value, const char *var, char *dst) +{ + color_parse_mem(value, strlen(value), var, dst); +} + +void color_parse_mem(const char *value, int value_len, const char *var, + char *dst) +{ + const char *ptr = value; + int len = value_len; + int attr = -1; + int fg = -2; + int bg = -2; + + if (!strncasecmp(value, "reset", len)) { + strcpy(dst, PERF_COLOR_RESET); + return; + } + + /* [fg [bg]] [attr] */ + while (len > 0) { + const char *word = ptr; + int val, wordlen = 0; + + while (len > 0 && !isspace(word[wordlen])) { + wordlen++; + len--; + } + + ptr = word + wordlen; + while (len > 0 && isspace(*ptr)) { + ptr++; + len--; + } + + val = parse_color(word, wordlen); + if (val >= -1) { + if (fg == -2) { + fg = val; + continue; + } + if (bg == -2) { + bg = val; + continue; + } + goto bad; + } + val = parse_attr(word, wordlen); + if (val < 0 || attr != -1) + goto bad; + attr = val; + } + + if (attr >= 0 || fg >= 0 || bg >= 0) { + int sep = 0; + + *dst++ = '\033'; + *dst++ = '['; + if (attr >= 0) { + *dst++ = '0' + attr; + sep++; + } + if (fg >= 0) { + if (sep++) + *dst++ = ';'; + if (fg < 8) { + *dst++ = '3'; + *dst++ = '0' + fg; + } else { + dst += sprintf(dst, "38;5;%d", fg); + } + } + if (bg >= 0) { + if (sep++) + *dst++ = ';'; + if (bg < 8) { + *dst++ = '4'; + *dst++ = '0' + bg; + } else { + dst += sprintf(dst, "48;5;%d", bg); + } + } + *dst++ = 'm'; + } + *dst = 0; + return; +bad: + die("bad color value '%.*s' for variable '%s'", value_len, value, var); +} + +int perf_config_colorbool(const char *var, const char *value, int stdout_is_tty) +{ + if (value) { + if (!strcasecmp(value, "never")) + return 0; + if (!strcasecmp(value, "always")) + return 1; + if (!strcasecmp(value, "auto")) + goto auto_color; + } + + /* Missing or explicit false to turn off colorization */ + if (!perf_config_bool(var, value)) + return 0; + + /* any normal truth value defaults to 'auto' */ + auto_color: + if (stdout_is_tty < 0) + stdout_is_tty = isatty(1); + if (stdout_is_tty || (pager_in_use() && pager_use_color)) { + char *term = getenv("TERM"); + if (term && strcmp(term, "dumb")) + return 1; + } + return 0; +} + +int perf_color_default_config(const char *var, const char *value, void *cb) +{ + if (!strcmp(var, "color.ui")) { + perf_use_color_default = perf_config_colorbool(var, value, -1); + return 0; + } + + return perf_default_config(var, value, cb); +} + +static int color_vfprintf(FILE *fp, const char *color, const char *fmt, + va_list args, const char *trail) +{ + int r = 0; + + if (*color) + r += fprintf(fp, "%s", color); + r += vfprintf(fp, fmt, args); + if (*color) + r += fprintf(fp, "%s", PERF_COLOR_RESET); + if (trail) + r += fprintf(fp, "%s", trail); + return r; +} + + + +int color_fprintf(FILE *fp, const char *color, const char *fmt, ...) +{ + va_list args; + int r; + va_start(args, fmt); + r = color_vfprintf(fp, color, fmt, args, NULL); + va_end(args); + return r; +} + +int color_fprintf_ln(FILE *fp, const char *color, const char *fmt, ...) +{ + va_list args; + int r; + va_start(args, fmt); + r = color_vfprintf(fp, color, fmt, args, "\n"); + va_end(args); + return r; +} + +/* + * This function splits the buffer by newlines and colors the lines individually. + * + * Returns 0 on success. + */ +int color_fwrite_lines(FILE *fp, const char *color, + size_t count, const char *buf) +{ + if (!*color) + return fwrite(buf, count, 1, fp) != 1; + while (count) { + char *p = memchr(buf, '\n', count); + if (p != buf && (fputs(color, fp) < 0 || + fwrite(buf, p ? p - buf : count, 1, fp) != 1 || + fputs(PERF_COLOR_RESET, fp) < 0)) + return -1; + if (!p) + return 0; + if (fputc('\n', fp) < 0) + return -1; + count -= p + 1 - buf; + buf = p + 1; + } + return 0; +} + + diff --git a/Documentation/perf_counter/util/color.h b/Documentation/perf_counter/util/color.h new file mode 100644 index 00000000000..5abfd379582 --- /dev/null +++ b/Documentation/perf_counter/util/color.h @@ -0,0 +1,36 @@ +#ifndef COLOR_H +#define COLOR_H + +/* "\033[1;38;5;2xx;48;5;2xxm\0" is 23 bytes */ +#define COLOR_MAXLEN 24 + +#define PERF_COLOR_NORMAL "" +#define PERF_COLOR_RESET "\033[m" +#define PERF_COLOR_BOLD "\033[1m" +#define PERF_COLOR_RED "\033[31m" +#define PERF_COLOR_GREEN "\033[32m" +#define PERF_COLOR_YELLOW "\033[33m" +#define PERF_COLOR_BLUE "\033[34m" +#define PERF_COLOR_MAGENTA "\033[35m" +#define PERF_COLOR_CYAN "\033[36m" +#define PERF_COLOR_BG_RED "\033[41m" + +/* + * This variable stores the value of color.ui + */ +extern int perf_use_color_default; + + +/* + * Use this instead of perf_default_config if you need the value of color.ui. + */ +int perf_color_default_config(const char *var, const char *value, void *cb); + +int perf_config_colorbool(const char *var, const char *value, int stdout_is_tty); +void color_parse(const char *value, const char *var, char *dst); +void color_parse_mem(const char *value, int len, const char *var, char *dst); +int color_fprintf(FILE *fp, const char *color, const char *fmt, ...); +int color_fprintf_ln(FILE *fp, const char *color, const char *fmt, ...); +int color_fwrite_lines(FILE *fp, const char *color, size_t count, const char *buf); + +#endif /* COLOR_H */ diff --git a/Documentation/perf_counter/util/environment.c b/Documentation/perf_counter/util/environment.c index 9b1c8199e72..275b0ee345f 100644 --- a/Documentation/perf_counter/util/environment.c +++ b/Documentation/perf_counter/util/environment.c @@ -6,3 +6,4 @@ #include "cache.h" const char *pager_program; +int pager_use_color = 1; -- cgit v1.2.3 From 13d0ab5ec29852a6925f612830fa9e822669ece6 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Thu, 4 Jun 2009 15:40:25 +0200 Subject: perf_counter tools: Dont output in color on !tty Dont emit ASCII color characters if the terminal is not a tty, such as when perf report gets redirected into a file. Cc: Peter Zijlstra Cc: Mike Galbraith Cc: Paul Mackerras Cc: Arnaldo Carvalho de Melo LKML-Reference: Signed-off-by: Ingo Molnar --- Documentation/perf_counter/util/color.c | 17 ++++++++++++++--- 1 file changed, 14 insertions(+), 3 deletions(-) diff --git a/Documentation/perf_counter/util/color.c b/Documentation/perf_counter/util/color.c index a77975d6677..9a8c20ccc53 100644 --- a/Documentation/perf_counter/util/color.c +++ b/Documentation/perf_counter/util/color.c @@ -1,7 +1,7 @@ #include "cache.h" #include "color.h" -int perf_use_color_default = 0; +int perf_use_color_default = -1; static int parse_color(const char *name, int len) { @@ -169,10 +169,20 @@ static int color_vfprintf(FILE *fp, const char *color, const char *fmt, { int r = 0; - if (*color) + /* + * Auto-detect: + */ + if (perf_use_color_default < 0) { + if (isatty(1) || pager_in_use()) + perf_use_color_default = 1; + else + perf_use_color_default = 0; + } + + if (perf_use_color_default && *color) r += fprintf(fp, "%s", color); r += vfprintf(fp, fmt, args); - if (*color) + if (perf_use_color_default && *color) r += fprintf(fp, "%s", PERF_COLOR_RESET); if (trail) r += fprintf(fp, "%s", trail); @@ -185,6 +195,7 @@ int color_fprintf(FILE *fp, const char *color, const char *fmt, ...) { va_list args; int r; + va_start(args, fmt); r = color_vfprintf(fp, color, fmt, args, NULL); va_end(args); -- cgit v1.2.3 From edc52deac624e4641211a325c23da2a73b01a85d Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Thu, 4 Jun 2009 16:24:37 +0200 Subject: perf report: Bail out if there are unrecognized options/arguments Cc: Peter Zijlstra Cc: Mike Galbraith Cc: Paul Mackerras Cc: Arnaldo Carvalho de Melo LKML-Reference: Signed-off-by: Ingo Molnar --- Documentation/perf_counter/builtin-report.c | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/Documentation/perf_counter/builtin-report.c b/Documentation/perf_counter/builtin-report.c index 7beedc6effa..389ae2569f4 100644 --- a/Documentation/perf_counter/builtin-report.c +++ b/Documentation/perf_counter/builtin-report.c @@ -1094,10 +1094,16 @@ int cmd_report(int argc, const char **argv, const char *prefix) page_size = getpagesize(); - parse_options(argc, argv, options, report_usage, 0); + argc = parse_options(argc, argv, options, report_usage, 0); setup_sorting(); + /* + * Any (unrecognized) arguments left? + */ + if (argc) + usage_with_options(report_usage, options); + setup_pager(); return __cmd_report(); -- cgit v1.2.3 From 20c84e959ec11b1803d2b2832eef703d5fbe7f7b Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Thu, 4 Jun 2009 16:33:00 +0200 Subject: perf stat: Update help text Cc: Peter Zijlstra Cc: Mike Galbraith Cc: Paul Mackerras Cc: Arnaldo Carvalho de Melo LKML-Reference: Signed-off-by: Ingo Molnar --- .../perf_counter/Documentation/perf-stat.txt | 30 ++++++++++++++-------- 1 file changed, 19 insertions(+), 11 deletions(-) diff --git a/Documentation/perf_counter/Documentation/perf-stat.txt b/Documentation/perf_counter/Documentation/perf-stat.txt index a67d0e3b7d0..a340e7be83d 100644 --- a/Documentation/perf_counter/Documentation/perf-stat.txt +++ b/Documentation/perf_counter/Documentation/perf-stat.txt @@ -22,6 +22,7 @@ OPTIONS ...:: Any command you can specify in a shell. + -e:: --event=:: 0:0: cpu-cycles @@ -45,6 +46,13 @@ OPTIONS 1:4: migrations rNNN: raw PMU events (eventsel+umask) +-i:: +--inherit:: + child tasks inherit counters +-p:: +--pid=:: + stat events on existing pid + -a:: system-wide collection @@ -54,20 +62,20 @@ OPTIONS EXAMPLES -------- -$ perf stat sleep 1 +$ perf stat -- make -j - Performance counter stats for 'sleep': + Performance counter stats for 'make -j': - 0.678356 task clock ticks (msecs) - 7 context switches (events) - 4 CPU migrations (events) - 232 pagefaults (events) - 1810403 CPU cycles (events) - 946759 instructions (events) - 18952 cache references (events) - 4885 cache misses (events) + 8117.370256 task clock ticks # 11.281 CPU utilization factor + 678 context switches # 0.000 M/sec + 133 CPU migrations # 0.000 M/sec + 235724 pagefaults # 0.029 M/sec + 24821162526 CPU cycles # 3057.784 M/sec + 18687303457 instructions # 2302.138 M/sec + 172158895 cache references # 21.209 M/sec + 27075259 cache misses # 3.335 M/sec - Wall-clock time elapsed: 1001.252894 msecs + Wall-clock time elapsed: 719.554352 msecs SEE ALSO -------- -- cgit v1.2.3 From 60313ebed739b331e8e61079da27a11ee3b73a30 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Thu, 4 Jun 2009 16:53:44 +0200 Subject: perf_counter: Add fork event Create a fork event so that we can easily clone the comm and dso maps without having to generate all those events. Signed-off-by: Peter Zijlstra Cc: Mike Galbraith Cc: Paul Mackerras Cc: Arnaldo Carvalho de Melo LKML-Reference: Signed-off-by: Ingo Molnar --- include/linux/perf_counter.h | 10 ++++ kernel/fork.c | 4 +- kernel/perf_counter.c | 131 +++++++++++++++++++++++++++++++++++++------ 3 files changed, 126 insertions(+), 19 deletions(-) diff --git a/include/linux/perf_counter.h b/include/linux/perf_counter.h index 37d5541d74c..380247bdb91 100644 --- a/include/linux/perf_counter.h +++ b/include/linux/perf_counter.h @@ -276,6 +276,14 @@ enum perf_event_type { PERF_EVENT_THROTTLE = 5, PERF_EVENT_UNTHROTTLE = 6, + /* + * struct { + * struct perf_event_header header; + * u32 pid, ppid; + * }; + */ + PERF_EVENT_FORK = 7, + /* * When header.misc & PERF_EVENT_MISC_OVERFLOW the event_type field * will be PERF_RECORD_* @@ -618,6 +626,7 @@ extern void perf_counter_munmap(unsigned long addr, unsigned long len, unsigned long pgoff, struct file *file); extern void perf_counter_comm(struct task_struct *tsk); +extern void perf_counter_fork(struct task_struct *tsk); extern void perf_counter_task_migration(struct task_struct *task, int cpu); @@ -673,6 +682,7 @@ perf_counter_munmap(unsigned long addr, unsigned long len, unsigned long pgoff, struct file *file) { } static inline void perf_counter_comm(struct task_struct *tsk) { } +static inline void perf_counter_fork(struct task_struct *tsk) { } static inline void perf_counter_init(void) { } static inline void perf_counter_task_migration(struct task_struct *task, int cpu) { } diff --git a/kernel/fork.c b/kernel/fork.c index b7d7a9f0bd7..f4466ca37ec 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -1412,12 +1412,12 @@ long do_fork(unsigned long clone_flags, if (clone_flags & CLONE_VFORK) { p->vfork_done = &vfork; init_completion(&vfork); - } else { + } else if (!(clone_flags & CLONE_VM)) { /* * vfork will do an exec which will call * set_task_comm() */ - perf_counter_comm(p); + perf_counter_fork(p); } audit_finish_fork(p); diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c index 0bb03f15a5b..78c58623a0d 100644 --- a/kernel/perf_counter.c +++ b/kernel/perf_counter.c @@ -40,9 +40,9 @@ static int perf_reserved_percpu __read_mostly; static int perf_overcommit __read_mostly = 1; static atomic_t nr_counters __read_mostly; -static atomic_t nr_mmap_tracking __read_mostly; -static atomic_t nr_munmap_tracking __read_mostly; -static atomic_t nr_comm_tracking __read_mostly; +static atomic_t nr_mmap_counters __read_mostly; +static atomic_t nr_munmap_counters __read_mostly; +static atomic_t nr_comm_counters __read_mostly; int sysctl_perf_counter_priv __read_mostly; /* do we need to be privileged */ int sysctl_perf_counter_mlock __read_mostly = 512; /* 'free' kb per user */ @@ -1447,11 +1447,11 @@ static void free_counter(struct perf_counter *counter) atomic_dec(&nr_counters); if (counter->attr.mmap) - atomic_dec(&nr_mmap_tracking); + atomic_dec(&nr_mmap_counters); if (counter->attr.munmap) - atomic_dec(&nr_munmap_tracking); + atomic_dec(&nr_munmap_counters); if (counter->attr.comm) - atomic_dec(&nr_comm_tracking); + atomic_dec(&nr_comm_counters); if (counter->destroy) counter->destroy(counter); @@ -2475,6 +2475,105 @@ static void perf_counter_output(struct perf_counter *counter, perf_output_end(&handle); } +/* + * fork tracking + */ + +struct perf_fork_event { + struct task_struct *task; + + struct { + struct perf_event_header header; + + u32 pid; + u32 ppid; + } event; +}; + +static void perf_counter_fork_output(struct perf_counter *counter, + struct perf_fork_event *fork_event) +{ + struct perf_output_handle handle; + int size = fork_event->event.header.size; + struct task_struct *task = fork_event->task; + int ret = perf_output_begin(&handle, counter, size, 0, 0); + + if (ret) + return; + + fork_event->event.pid = perf_counter_pid(counter, task); + fork_event->event.ppid = perf_counter_pid(counter, task->real_parent); + + perf_output_put(&handle, fork_event->event); + perf_output_end(&handle); +} + +static int perf_counter_fork_match(struct perf_counter *counter) +{ + if (counter->attr.comm || counter->attr.mmap || counter->attr.munmap) + return 1; + + return 0; +} + +static void perf_counter_fork_ctx(struct perf_counter_context *ctx, + struct perf_fork_event *fork_event) +{ + struct perf_counter *counter; + + if (system_state != SYSTEM_RUNNING || list_empty(&ctx->event_list)) + return; + + rcu_read_lock(); + list_for_each_entry_rcu(counter, &ctx->event_list, event_entry) { + if (perf_counter_fork_match(counter)) + perf_counter_fork_output(counter, fork_event); + } + rcu_read_unlock(); +} + +static void perf_counter_fork_event(struct perf_fork_event *fork_event) +{ + struct perf_cpu_context *cpuctx; + struct perf_counter_context *ctx; + + cpuctx = &get_cpu_var(perf_cpu_context); + perf_counter_fork_ctx(&cpuctx->ctx, fork_event); + put_cpu_var(perf_cpu_context); + + rcu_read_lock(); + /* + * doesn't really matter which of the child contexts the + * events ends up in. + */ + ctx = rcu_dereference(current->perf_counter_ctxp); + if (ctx) + perf_counter_fork_ctx(ctx, fork_event); + rcu_read_unlock(); +} + +void perf_counter_fork(struct task_struct *task) +{ + struct perf_fork_event fork_event; + + if (!atomic_read(&nr_comm_counters) && + !atomic_read(&nr_mmap_counters) && + !atomic_read(&nr_munmap_counters)) + return; + + fork_event = (struct perf_fork_event){ + .task = task, + .event = { + .header = { + .type = PERF_EVENT_FORK, + .size = sizeof(fork_event.event), + }, + }, + }; + + perf_counter_fork_event(&fork_event); +} + /* * comm tracking */ @@ -2511,11 +2610,9 @@ static void perf_counter_comm_output(struct perf_counter *counter, perf_output_end(&handle); } -static int perf_counter_comm_match(struct perf_counter *counter, - struct perf_comm_event *comm_event) +static int perf_counter_comm_match(struct perf_counter *counter) { - if (counter->attr.comm && - comm_event->event.header.type == PERF_EVENT_COMM) + if (counter->attr.comm) return 1; return 0; @@ -2531,7 +2628,7 @@ static void perf_counter_comm_ctx(struct perf_counter_context *ctx, rcu_read_lock(); list_for_each_entry_rcu(counter, &ctx->event_list, event_entry) { - if (perf_counter_comm_match(counter, comm_event)) + if (perf_counter_comm_match(counter)) perf_counter_comm_output(counter, comm_event); } rcu_read_unlock(); @@ -2570,7 +2667,7 @@ void perf_counter_comm(struct task_struct *task) { struct perf_comm_event comm_event; - if (!atomic_read(&nr_comm_tracking)) + if (!atomic_read(&nr_comm_counters)) return; comm_event = (struct perf_comm_event){ @@ -2708,7 +2805,7 @@ void perf_counter_mmap(unsigned long addr, unsigned long len, { struct perf_mmap_event mmap_event; - if (!atomic_read(&nr_mmap_tracking)) + if (!atomic_read(&nr_mmap_counters)) return; mmap_event = (struct perf_mmap_event){ @@ -2729,7 +2826,7 @@ void perf_counter_munmap(unsigned long addr, unsigned long len, { struct perf_mmap_event mmap_event; - if (!atomic_read(&nr_munmap_tracking)) + if (!atomic_read(&nr_munmap_counters)) return; mmap_event = (struct perf_mmap_event){ @@ -3427,11 +3524,11 @@ done: atomic_inc(&nr_counters); if (counter->attr.mmap) - atomic_inc(&nr_mmap_tracking); + atomic_inc(&nr_mmap_counters); if (counter->attr.munmap) - atomic_inc(&nr_munmap_tracking); + atomic_inc(&nr_munmap_counters); if (counter->attr.comm) - atomic_inc(&nr_comm_tracking); + atomic_inc(&nr_comm_counters); return counter; } -- cgit v1.2.3 From d99e9446200c1ffab28cb0e39b76c34a2bfafd06 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Thu, 4 Jun 2009 17:08:58 +0200 Subject: perf_counter: Remove munmap stuff In name of keeping it simple, only track mmap events. Userspace will have to remove old overlapping maps when it encounters them. Signed-off-by: Peter Zijlstra Cc: Mike Galbraith Cc: Paul Mackerras Cc: Arnaldo Carvalho de Melo LKML-Reference: Signed-off-by: Ingo Molnar --- include/linux/perf_counter.h | 11 +---------- kernel/perf_counter.c | 38 +++----------------------------------- mm/mmap.c | 6 ------ 3 files changed, 4 insertions(+), 51 deletions(-) diff --git a/include/linux/perf_counter.h b/include/linux/perf_counter.h index 380247bdb91..6ca403acd41 100644 --- a/include/linux/perf_counter.h +++ b/include/linux/perf_counter.h @@ -148,11 +148,10 @@ struct perf_counter_attr { exclude_hv : 1, /* ditto hypervisor */ exclude_idle : 1, /* don't count when idle */ mmap : 1, /* include mmap data */ - munmap : 1, /* include munmap data */ comm : 1, /* include comm data */ freq : 1, /* use freq, not period */ - __reserved_1 : 52; + __reserved_1 : 53; __u32 wakeup_events; /* wakeup every n events */ __u32 __reserved_2; @@ -246,7 +245,6 @@ enum perf_event_type { * }; */ PERF_EVENT_MMAP = 1, - PERF_EVENT_MUNMAP = 2, /* * struct { @@ -622,9 +620,6 @@ extern void perf_swcounter_event(u32, u64, int, struct pt_regs *, u64); extern void perf_counter_mmap(unsigned long addr, unsigned long len, unsigned long pgoff, struct file *file); -extern void perf_counter_munmap(unsigned long addr, unsigned long len, - unsigned long pgoff, struct file *file); - extern void perf_counter_comm(struct task_struct *tsk); extern void perf_counter_fork(struct task_struct *tsk); @@ -677,10 +672,6 @@ static inline void perf_counter_mmap(unsigned long addr, unsigned long len, unsigned long pgoff, struct file *file) { } -static inline void -perf_counter_munmap(unsigned long addr, unsigned long len, - unsigned long pgoff, struct file *file) { } - static inline void perf_counter_comm(struct task_struct *tsk) { } static inline void perf_counter_fork(struct task_struct *tsk) { } static inline void perf_counter_init(void) { } diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c index 78c58623a0d..195712e20d0 100644 --- a/kernel/perf_counter.c +++ b/kernel/perf_counter.c @@ -41,7 +41,6 @@ static int perf_overcommit __read_mostly = 1; static atomic_t nr_counters __read_mostly; static atomic_t nr_mmap_counters __read_mostly; -static atomic_t nr_munmap_counters __read_mostly; static atomic_t nr_comm_counters __read_mostly; int sysctl_perf_counter_priv __read_mostly; /* do we need to be privileged */ @@ -1448,8 +1447,6 @@ static void free_counter(struct perf_counter *counter) atomic_dec(&nr_counters); if (counter->attr.mmap) atomic_dec(&nr_mmap_counters); - if (counter->attr.munmap) - atomic_dec(&nr_munmap_counters); if (counter->attr.comm) atomic_dec(&nr_comm_counters); @@ -2510,7 +2507,7 @@ static void perf_counter_fork_output(struct perf_counter *counter, static int perf_counter_fork_match(struct perf_counter *counter) { - if (counter->attr.comm || counter->attr.mmap || counter->attr.munmap) + if (counter->attr.comm || counter->attr.mmap) return 1; return 0; @@ -2557,8 +2554,7 @@ void perf_counter_fork(struct task_struct *task) struct perf_fork_event fork_event; if (!atomic_read(&nr_comm_counters) && - !atomic_read(&nr_mmap_counters) && - !atomic_read(&nr_munmap_counters)) + !atomic_read(&nr_mmap_counters)) return; fork_event = (struct perf_fork_event){ @@ -2722,12 +2718,7 @@ static void perf_counter_mmap_output(struct perf_counter *counter, static int perf_counter_mmap_match(struct perf_counter *counter, struct perf_mmap_event *mmap_event) { - if (counter->attr.mmap && - mmap_event->event.header.type == PERF_EVENT_MMAP) - return 1; - - if (counter->attr.munmap && - mmap_event->event.header.type == PERF_EVENT_MUNMAP) + if (counter->attr.mmap) return 1; return 0; @@ -2821,27 +2812,6 @@ void perf_counter_mmap(unsigned long addr, unsigned long len, perf_counter_mmap_event(&mmap_event); } -void perf_counter_munmap(unsigned long addr, unsigned long len, - unsigned long pgoff, struct file *file) -{ - struct perf_mmap_event mmap_event; - - if (!atomic_read(&nr_munmap_counters)) - return; - - mmap_event = (struct perf_mmap_event){ - .file = file, - .event = { - .header = { .type = PERF_EVENT_MUNMAP, }, - .start = addr, - .len = len, - .pgoff = pgoff, - }, - }; - - perf_counter_mmap_event(&mmap_event); -} - /* * Log sample_period changes so that analyzing tools can re-normalize the * event flow. @@ -3525,8 +3495,6 @@ done: atomic_inc(&nr_counters); if (counter->attr.mmap) atomic_inc(&nr_mmap_counters); - if (counter->attr.munmap) - atomic_inc(&nr_munmap_counters); if (counter->attr.comm) atomic_inc(&nr_comm_counters); diff --git a/mm/mmap.c b/mm/mmap.c index 2c1c2cb0e2e..6451ce2854b 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -1756,12 +1756,6 @@ static void remove_vma_list(struct mm_struct *mm, struct vm_area_struct *vma) do { long nrpages = vma_pages(vma); - if (vma->vm_flags & VM_EXEC) { - perf_counter_munmap(vma->vm_start, - nrpages << PAGE_SHIFT, - vma->vm_pgoff, vma->vm_file); - } - mm->total_vm -= nrpages; vm_stat_account(mm, vma->vm_flags, vma->vm_file, -nrpages); vma = remove_vma(vma); -- cgit v1.2.3 From 62fc44536c14b5787531bac7417580fca54c88b4 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Thu, 4 Jun 2009 16:53:49 +0200 Subject: perf_counter tools: Use fork and remove munmap events Use fork events to clone comm and map data and remove everything munmap related Signed-off-by: Peter Zijlstra Cc: Mike Galbraith Cc: Paul Mackerras Cc: Arnaldo Carvalho de Melo LKML-Reference: Signed-off-by: Ingo Molnar --- Documentation/perf_counter/builtin-report.c | 103 +++++++++++++++++++++++++--- Documentation/perf_counter/builtin-top.c | 21 ------ 2 files changed, 93 insertions(+), 31 deletions(-) diff --git a/Documentation/perf_counter/builtin-report.c b/Documentation/perf_counter/builtin-report.c index 389ae2569f4..5d191216c80 100644 --- a/Documentation/perf_counter/builtin-report.c +++ b/Documentation/perf_counter/builtin-report.c @@ -43,12 +43,6 @@ static int full_paths; static unsigned long page_size; static unsigned long mmap_window = 32; -const char *perf_event_names[] = { - [PERF_EVENT_MMAP] = " PERF_EVENT_MMAP", - [PERF_EVENT_MUNMAP] = " PERF_EVENT_MUNMAP", - [PERF_EVENT_COMM] = " PERF_EVENT_COMM", -}; - struct ip_event { struct perf_event_header header; __u64 ip; @@ -70,11 +64,17 @@ struct comm_event { char comm[16]; }; +struct fork_event { + struct perf_event_header header; + __u32 pid, ppid; +}; + typedef union event_union { struct perf_event_header header; struct ip_event ip; struct mmap_event mmap; struct comm_event comm; + struct fork_event fork; } event_t; static LIST_HEAD(dsos); @@ -208,7 +208,31 @@ out_delete: return NULL; } -struct thread; +static struct map *map__clone(struct map *self) +{ + struct map *map = malloc(sizeof(*self)); + + if (!map) + return NULL; + + memcpy(map, self, sizeof(*self)); + + return map; +} + +static int map__overlap(struct map *l, struct map *r) +{ + if (l->start > r->start) { + struct map *t = l; + l = r; + r = t; + } + + if (l->end > r->start) + return 1; + + return 0; +} struct thread { struct rb_node rb_node; @@ -284,9 +308,39 @@ static struct thread *threads__findnew(pid_t pid) static void thread__insert_map(struct thread *self, struct map *map) { + struct map *pos, *tmp; + + list_for_each_entry_safe(pos, tmp, &self->maps, node) { + if (map__overlap(pos, map)) { + list_del_init(&pos->node); + /* XXX leaks dsos */ + free(pos); + } + } + list_add_tail(&map->node, &self->maps); } +static int thread__fork(struct thread *self, struct thread *parent) +{ + struct map *map; + + if (self->comm) + free(self->comm); + self->comm = strdup(parent->comm); + if (!self->comm) + return -ENOMEM; + + list_for_each_entry(map, &parent->maps, node) { + struct map *new = map__clone(map); + if (!new) + return -ENOMEM; + thread__insert_map(self, new); + } + + return 0; +} + static struct map *thread__find_map(struct thread *self, uint64_t ip) { struct map *pos; @@ -784,7 +838,11 @@ static void register_idle_thread(void) } } -static unsigned long total = 0, total_mmap = 0, total_comm = 0, total_unknown = 0; +static unsigned long total = 0, + total_mmap = 0, + total_comm = 0, + total_fork = 0, + total_unknown = 0; static int process_overflow_event(event_t *event, unsigned long offset, unsigned long head) @@ -866,9 +924,10 @@ process_mmap_event(event_t *event, unsigned long offset, unsigned long head) struct thread *thread = threads__findnew(event->mmap.pid); struct map *map = map__new(&event->mmap); - dprintf("%p [%p]: PERF_EVENT_MMAP: [%p(%p) @ %p]: %s\n", + dprintf("%p [%p]: PERF_EVENT_MMAP %d: [%p(%p) @ %p]: %s\n", (void *)(offset + head), (void *)(long)(event->header.size), + event->mmap.pid, (void *)(long)event->mmap.start, (void *)(long)event->mmap.len, (void *)(long)event->mmap.pgoff, @@ -905,6 +964,26 @@ process_comm_event(event_t *event, unsigned long offset, unsigned long head) return 0; } +static int +process_fork_event(event_t *event, unsigned long offset, unsigned long head) +{ + struct thread *thread = threads__findnew(event->fork.pid); + struct thread *parent = threads__findnew(event->fork.ppid); + + dprintf("%p [%p]: PERF_EVENT_FORK: %d:%d\n", + (void *)(offset + head), + (void *)(long)(event->header.size), + event->fork.pid, event->fork.ppid); + + if (!thread || !parent || thread__fork(thread, parent)) { + dprintf("problem processing PERF_EVENT_FORK, skipping event.\n"); + return -1; + } + total_fork++; + + return 0; +} + static int process_event(event_t *event, unsigned long offset, unsigned long head) { @@ -918,10 +997,13 @@ process_event(event_t *event, unsigned long offset, unsigned long head) case PERF_EVENT_COMM: return process_comm_event(event, offset, head); + case PERF_EVENT_FORK: + return process_fork_event(event, offset, head); + /* * We dont process them right now but they are fine: */ - case PERF_EVENT_MUNMAP: + case PERF_EVENT_PERIOD: case PERF_EVENT_THROTTLE: case PERF_EVENT_UNTHROTTLE: @@ -1038,6 +1120,7 @@ more: dprintf(" IP events: %10ld\n", total); dprintf(" mmap events: %10ld\n", total_mmap); dprintf(" comm events: %10ld\n", total_comm); + dprintf(" fork events: %10ld\n", total_fork); dprintf(" unknown events: %10ld\n", total_unknown); if (dump_trace) diff --git a/Documentation/perf_counter/builtin-top.c b/Documentation/perf_counter/builtin-top.c index 20e5b120095..31c00ba99b1 100644 --- a/Documentation/perf_counter/builtin-top.c +++ b/Documentation/perf_counter/builtin-top.c @@ -75,8 +75,6 @@ static unsigned int realtime_prio = 0; static int group = 0; static unsigned int page_size; static unsigned int mmap_pages = 16; -static int use_mmap = 0; -static int use_munmap = 0; static int freq = 0; static char *sym_filter; @@ -527,19 +525,6 @@ static void mmap_read(struct mmap_data *md) if (event->header.misc & PERF_EVENT_MISC_OVERFLOW) { if (event->header.type & PERF_SAMPLE_IP) process_event(event->ip.ip, md->counter); - } else { - switch (event->header.type) { - case PERF_EVENT_MMAP: - case PERF_EVENT_MUNMAP: - printf("%s: %Lu %Lu %Lu %s\n", - event->header.type == PERF_EVENT_MMAP - ? "mmap" : "munmap", - event->mmap.start, - event->mmap.len, - event->mmap.pgoff, - event->mmap.filename); - break; - } } } @@ -569,8 +554,6 @@ static int __cmd_top(void) attr.config = event_id[counter]; attr.sample_period = event_count[counter]; attr.sample_type = PERF_SAMPLE_IP | PERF_SAMPLE_TID; - attr.mmap = use_mmap; - attr.munmap = use_munmap; attr.freq = freq; fd[i][counter] = sys_perf_counter_open(&attr, target_pid, cpu, group_fd, 0); @@ -670,10 +653,6 @@ static const struct option options[] = { "only display symbols matchig this pattern"), OPT_BOOLEAN('z', "zero", &group, "zero history across updates"), - OPT_BOOLEAN('M', "use-mmap", &use_mmap, - "track mmap events"), - OPT_BOOLEAN('U', "use-munmap", &use_munmap, - "track munmap events"), OPT_INTEGER('F', "freq", &freq, "profile at this frequency"), OPT_INTEGER('E', "entries", &print_entries, -- cgit v1.2.3 From 0f5486b5c71a831a713ce356d8d06822e3c7c379 Mon Sep 17 00:00:00 2001 From: Frederic Weisbecker Date: Thu, 4 Jun 2009 20:48:04 +0200 Subject: perf_counter: Sleep before refresh using poll in perf top perf top is refreshed every delay_secs the thread runs in such loop: while (sleep(delay_secs)) { print_sym_table(); } At the end of print_sym_table(), poll is used without sleep delay to check if we have something from stdin. It means that this check is done only every delay_secs, which can be higher that 2 secs if the user defined a custom refresh rate. We can drop sleep() here and directly use poll to wait between refresh periods, so that the reaction after the user stops perf top after typing "Enter" is immediate and doesn't suffer from the delay_secs latency. Nb: poll doesn't add any overhead that can parasite perf top measures since it sleeps the entire timeout here. Signed-off-by: Frederic Weisbecker Cc: Peter Zijlstra Cc: Mike Galbraith Cc: Paul Mackerras Cc: Corey Ashford Cc: Marcelo Tosatti Cc: Arnaldo Carvalho de Melo Cc: Frederic Weisbecker LKML-Reference: <1244141284-7507-1-git-send-email-fweisbec@gmail.com> Signed-off-by: Ingo Molnar --- Documentation/perf_counter/builtin-top.c | 20 +++++++++----------- 1 file changed, 9 insertions(+), 11 deletions(-) diff --git a/Documentation/perf_counter/builtin-top.c b/Documentation/perf_counter/builtin-top.c index 31c00ba99b1..28cbde4b6e8 100644 --- a/Documentation/perf_counter/builtin-top.c +++ b/Documentation/perf_counter/builtin-top.c @@ -204,7 +204,7 @@ static void print_sym_table(void) list_remove_active_sym(syme); } - write(1, CONSOLE_CLEAR, strlen(CONSOLE_CLEAR)); + puts(CONSOLE_CLEAR); printf( "------------------------------------------------------------------------------\n"); @@ -278,23 +278,21 @@ static void print_sym_table(void) color_fprintf(stdout, color, "%4.1f%%", pcnt); printf(" - %016llx : %s\n", sym->start, sym->name); } - - { - struct pollfd stdin_poll = { .fd = 0, .events = POLLIN }; - - if (poll(&stdin_poll, 1, 0) == 1) { - printf("key pressed - exiting.\n"); - exit(0); - } - } } static void *display_thread(void *arg) { + struct pollfd stdin_poll = { .fd = 0, .events = POLLIN }; + int delay_msecs = delay_secs * 1000; + printf("PerfTop refresh period: %d seconds\n", delay_secs); - while (!sleep(delay_secs)) + do { print_sym_table(); + } while (!poll(&stdin_poll, 1, delay_msecs) == 1); + + printf("key pressed - exiting.\n"); + exit(0); return NULL; } -- cgit v1.2.3 From 9ac995457b2a148ed9bb8860e8b7cb869327b102 Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Thu, 4 Jun 2009 13:54:00 -0300 Subject: perf report: Add -vvv to print the list of threads and its mmaps Signed-off-by: Arnaldo Carvalho de Melo Cc: Peter Zijlstra Cc: Frederic Weisbecker Signed-off-by: Ingo Molnar --- Documentation/perf_counter/builtin-report.c | 36 +++++++++++++++++++++++++++++ 1 file changed, 36 insertions(+) diff --git a/Documentation/perf_counter/builtin-report.c b/Documentation/perf_counter/builtin-report.c index 5d191216c80..1a1028d3bc3 100644 --- a/Documentation/perf_counter/builtin-report.c +++ b/Documentation/perf_counter/builtin-report.c @@ -234,6 +234,13 @@ static int map__overlap(struct map *l, struct map *r) return 0; } +static size_t map__fprintf(struct map *self, FILE *fp) +{ + return fprintf(fp, " %lx-%lx %lx %s\n", + self->start, self->end, self->pgoff, self->dso->name); +} + + struct thread { struct rb_node rb_node; struct list_head maps; @@ -264,6 +271,18 @@ static int thread__set_comm(struct thread *self, const char *comm) return self->comm ? 0 : -ENOMEM; } +static size_t thread__fprintf(struct thread *self, FILE *fp) +{ + struct map *pos; + size_t ret = fprintf(fp, "Thread %d %s\n", self->pid, self->comm); + + list_for_each_entry(pos, &self->maps, node) + ret += map__fprintf(pos, fp); + + return ret; +} + + static struct rb_root threads; static struct thread *last_match; @@ -355,6 +374,20 @@ static struct map *thread__find_map(struct thread *self, uint64_t ip) return NULL; } +static size_t threads__fprintf(FILE *fp) +{ + size_t ret = 0; + struct rb_node *nd; + + for (nd = rb_first(&threads); nd; nd = rb_next(nd)) { + struct thread *pos = rb_entry(nd, struct thread, rb_node); + + ret += thread__fprintf(pos, fp); + } + + return ret; +} + /* * histogram, sorted on item, collects counts */ @@ -1126,6 +1159,9 @@ more: if (dump_trace) return 0; + if (verbose >= 3) + threads__fprintf(stdout); + if (verbose >= 2) dsos__fprintf(stdout); -- cgit v1.2.3 From 76a0f40fd6eff1bce3b91925cea7587b3399fe80 Mon Sep 17 00:00:00 2001 From: Frederic Weisbecker Date: Thu, 4 Jun 2009 22:15:58 +0200 Subject: perf_counter tools: Fix warn_unused_result warnings MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Fix warnings for return values that we don't care about: util/quote.c:222: attention : ignoring return value of ‘fwrite’, declared with attribute warn_unused_result util/quote.c:235: attention : ignoring return value of ‘fwrite’, declared with attribute warn_unused_result util/quote.c: In function ‘write_name_quotedpfx’: util/quote.c:290: attention : ignoring return value of ‘fwrite’, declared with attribute warn_unused_result Signed-off-by: Frederic Weisbecker Cc: Peter Zijlstra Cc: Mike Galbraith Cc: Paul Mackerras Cc: Corey Ashford Cc: Marcelo Tosatti Cc: Arnaldo Carvalho de Melo Cc: Frederic Weisbecker LKML-Reference: <1244146558-8635-1-git-send-email-fweisbec@gmail.com> Signed-off-by: Ingo Molnar --- Documentation/perf_counter/util/quote.c | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/Documentation/perf_counter/util/quote.c b/Documentation/perf_counter/util/quote.c index 7a49fcf6967..f18c5212bc9 100644 --- a/Documentation/perf_counter/util/quote.c +++ b/Documentation/perf_counter/util/quote.c @@ -201,8 +201,9 @@ static size_t quote_c_style_counted(const char *name, ssize_t maxlen, } while (0) #define EMITBUF(s, l) \ do { \ + int __ret; \ if (sb) strbuf_add(sb, (s), (l)); \ - if (fp) fwrite((s), (l), 1, fp); \ + if (fp) __ret = fwrite((s), (l), 1, fp); \ count += (l); \ } while (0) @@ -287,7 +288,9 @@ extern void write_name_quotedpfx(const char *pfx, size_t pfxlen, quote_c_style(name, NULL, fp, 1); fputc('"', fp); } else { - fwrite(pfx, pfxlen, 1, fp); + int ret; + + ret = fwrite(pfx, pfxlen, 1, fp); fputs(name, fp); } fputc(terminator, fp); -- cgit v1.2.3 From 6dc5f2a41759987e35e757ef00192e7b424563bb Mon Sep 17 00:00:00 2001 From: Paul Mackerras Date: Fri, 5 Jun 2009 12:36:28 +1000 Subject: perf_counter: Fix lockup with interrupting counters Commit 8e3747c1 ("perf_counter: Change data head from u32 to u64") changed the type of 'head' in struct perf_mmap_data from atomic_t to atomic_long_t, but missed converting one use of atomic_read on it to atomic_long_read. The effect of using atomic_read rather than atomic_long_read on powerpc (and other big-endian architectures) is that we get the high half of the 64-bit quantity, resulting in the cmpxchg retry loop in perf_output_begin spinning forever as soon as data->head becomes non-zero. On little-endian architectures such as x86 we would get the low half, resulting in a lockup once data->head becomes greater than 4G. This fixes it by using atomic_long_read rather than atomic_read. [ Impact: fix perfcounter lockup on PowerPC / big-endian systems ] Signed-off-by: Paul Mackerras Cc: Peter Zijlstra LKML-Reference: <18984.33964.21541.743096@cargo.ozlabs.ibm.com> Signed-off-by: Ingo Molnar --- kernel/perf_counter.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c index 195712e20d0..a5d3e2aedd2 100644 --- a/kernel/perf_counter.c +++ b/kernel/perf_counter.c @@ -2234,7 +2234,7 @@ static int perf_output_begin(struct perf_output_handle *handle, perf_output_lock(handle); do { - offset = head = atomic_read(&data->head); + offset = head = atomic_long_read(&data->head); head += size; } while (atomic_long_cmpxchg(&data->head, offset, head) != offset); -- cgit v1.2.3 From ee7b31fe5c5da8a038b96e54ae9fbd5dcab3b1da Mon Sep 17 00:00:00 2001 From: Yong Wang Date: Fri, 5 Jun 2009 11:37:35 +0800 Subject: perf_counter tools: Fix incorrect printf formats Otherwise the code does not compile on 32-bit boxes. builtin-report.c: In function 'map__fprintf': builtin-report.c:240: error: format '%lx' expects type 'long unsigned int', but argument 3 has type 'uint64_t' builtin-report.c:240: error: format '%lx' expects type 'long unsigned int', but argument 4 has type 'uint64_t' builtin-report.c:240: error: format '%lx' expects type 'long unsigned int', but argument 5 has type 'uint64_t' Signed-off-by: Yong Wang Cc: Arnaldo Carvalho de Melo LKML-Reference: <20090605033735.GA20451@ywang-moblin2.bj.intel.com> Signed-off-by: Ingo Molnar --- Documentation/perf_counter/builtin-report.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Documentation/perf_counter/builtin-report.c b/Documentation/perf_counter/builtin-report.c index 1a1028d3bc3..eb5424fcbd6 100644 --- a/Documentation/perf_counter/builtin-report.c +++ b/Documentation/perf_counter/builtin-report.c @@ -236,7 +236,7 @@ static int map__overlap(struct map *l, struct map *r) static size_t map__fprintf(struct map *self, FILE *fp) { - return fprintf(fp, " %lx-%lx %lx %s\n", + return fprintf(fp, " %"PRIx64"-%"PRIx64" %"PRIx64" %s\n", self->start, self->end, self->pgoff, self->dso->name); } -- cgit v1.2.3 From f250c030a87273f8838a2302bee7c2b4d03e9151 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Fri, 5 Jun 2009 13:18:41 +0200 Subject: perf record: Split out counter creation into a helper function Cc: Peter Zijlstra Cc: Mike Galbraith Cc: Paul Mackerras Cc: Arnaldo Carvalho de Melo LKML-Reference: Signed-off-by: Ingo Molnar --- Documentation/perf_counter/builtin-record.c | 100 +++++++++++++++------------- 1 file changed, 53 insertions(+), 47 deletions(-) diff --git a/Documentation/perf_counter/builtin-record.c b/Documentation/perf_counter/builtin-record.c index bf59df5bddf..7f2d7ce9407 100644 --- a/Documentation/perf_counter/builtin-record.c +++ b/Documentation/perf_counter/builtin-record.c @@ -336,65 +336,71 @@ static void synthesize_events(void) closedir(proc); } -static void open_counters(int cpu, pid_t pid) +static int group_fd; + +static void create_counter(int counter, int cpu, pid_t pid) { struct perf_counter_attr attr; - int counter, group_fd; int track = 1; - if (pid > 0) { - pid_synthesize_comm_event(pid, 0); - pid_synthesize_mmap_events(pid); - } + memset(&attr, 0, sizeof(attr)); + attr.config = event_id[counter]; + attr.sample_period = event_count[counter]; + attr.sample_type = PERF_SAMPLE_IP | PERF_SAMPLE_TID; + attr.mmap = track; + attr.comm = track; + attr.inherit = (cpu < 0) && inherit; - group_fd = -1; - for (counter = 0; counter < nr_counters; counter++) { + track = 0; /* only the first counter needs these */ - memset(&attr, 0, sizeof(attr)); - attr.config = event_id[counter]; - attr.sample_period = event_count[counter]; - attr.sample_type = PERF_SAMPLE_IP | PERF_SAMPLE_TID; - attr.mmap = track; - attr.comm = track; - attr.inherit = (cpu < 0) && inherit; + fd[nr_cpu][counter] = sys_perf_counter_open(&attr, pid, cpu, group_fd, 0); - track = 0; // only the first counter needs these + if (fd[nr_cpu][counter] < 0) { + int err = errno; - fd[nr_cpu][counter] = - sys_perf_counter_open(&attr, pid, cpu, group_fd, 0); + error("syscall returned with %d (%s)\n", + fd[nr_cpu][counter], strerror(err)); + if (err == EPERM) + printf("Are you root?\n"); + exit(-1); + } + assert(fd[nr_cpu][counter] >= 0); + fcntl(fd[nr_cpu][counter], F_SETFL, O_NONBLOCK); - if (fd[nr_cpu][counter] < 0) { - int err = errno; + /* + * First counter acts as the group leader: + */ + if (group && group_fd == -1) + group_fd = fd[nr_cpu][counter]; + + event_array[nr_poll].fd = fd[nr_cpu][counter]; + event_array[nr_poll].events = POLLIN; + nr_poll++; + + mmap_array[nr_cpu][counter].counter = counter; + mmap_array[nr_cpu][counter].prev = 0; + mmap_array[nr_cpu][counter].mask = mmap_pages*page_size - 1; + mmap_array[nr_cpu][counter].base = mmap(NULL, (mmap_pages+1)*page_size, + PROT_READ, MAP_SHARED, fd[nr_cpu][counter], 0); + if (mmap_array[nr_cpu][counter].base == MAP_FAILED) { + error("failed to mmap with %d (%s)\n", errno, strerror(errno)); + exit(-1); + } +} - error("syscall returned with %d (%s)\n", - fd[nr_cpu][counter], strerror(err)); - if (err == EPERM) - printf("Are you root?\n"); - exit(-1); - } - assert(fd[nr_cpu][counter] >= 0); - fcntl(fd[nr_cpu][counter], F_SETFL, O_NONBLOCK); +static void open_counters(int cpu, pid_t pid) +{ + int counter; - /* - * First counter acts as the group leader: - */ - if (group && group_fd == -1) - group_fd = fd[nr_cpu][counter]; - - event_array[nr_poll].fd = fd[nr_cpu][counter]; - event_array[nr_poll].events = POLLIN; - nr_poll++; - - mmap_array[nr_cpu][counter].counter = counter; - mmap_array[nr_cpu][counter].prev = 0; - mmap_array[nr_cpu][counter].mask = mmap_pages*page_size - 1; - mmap_array[nr_cpu][counter].base = mmap(NULL, (mmap_pages+1)*page_size, - PROT_READ, MAP_SHARED, fd[nr_cpu][counter], 0); - if (mmap_array[nr_cpu][counter].base == MAP_FAILED) { - error("failed to mmap with %d (%s)\n", errno, strerror(errno)); - exit(-1); - } + if (pid > 0) { + pid_synthesize_comm_event(pid, 0); + pid_synthesize_mmap_events(pid); } + + group_fd = -1; + for (counter = 0; counter < nr_counters; counter++) + create_counter(counter, cpu, pid); + nr_cpu++; } -- cgit v1.2.3 From cf1f45744c6fa3501e0a6f0ddc418f0ef27e725b Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Fri, 5 Jun 2009 13:27:02 +0200 Subject: perf record, top: Implement --freq Support frequency-based profiling and make it the default. (Also add a Hz printout in perf top.) Cc: Peter Zijlstra Cc: Mike Galbraith Cc: Paul Mackerras Cc: Arnaldo Carvalho de Melo LKML-Reference: Signed-off-by: Ingo Molnar --- Documentation/perf_counter/builtin-record.c | 10 +++++++++- Documentation/perf_counter/builtin-top.c | 13 +++++++++---- 2 files changed, 18 insertions(+), 5 deletions(-) diff --git a/Documentation/perf_counter/builtin-record.c b/Documentation/perf_counter/builtin-record.c index 7f2d7ce9407..e2301f39e55 100644 --- a/Documentation/perf_counter/builtin-record.c +++ b/Documentation/perf_counter/builtin-record.c @@ -27,6 +27,7 @@ static int fd[MAX_NR_CPUS][MAX_COUNTERS]; static int nr_cpus = 0; static unsigned int page_size; static unsigned int mmap_pages = 128; +static int freq = 0; static int output; static const char *output_name = "perf.data"; static int group = 0; @@ -347,9 +348,10 @@ static void create_counter(int counter, int cpu, pid_t pid) attr.config = event_id[counter]; attr.sample_period = event_count[counter]; attr.sample_type = PERF_SAMPLE_IP | PERF_SAMPLE_TID; + attr.freq = freq; attr.mmap = track; attr.comm = track; - attr.inherit = (cpu < 0) && inherit; + attr.inherit = (cpu < 0) && inherit; track = 0; /* only the first counter needs these */ @@ -520,6 +522,8 @@ static const struct option options[] = { "output file name"), OPT_BOOLEAN('i', "inherit", &inherit, "child tasks inherit counters"), + OPT_INTEGER('F', "freq", &freq, + "profile at this frequency"), OPT_INTEGER('m', "mmap-pages", &mmap_pages, "number of mmap data pages"), OPT_END() @@ -540,6 +544,10 @@ int cmd_record(int argc, const char **argv, const char *prefix) event_id[0] = 0; } + if (freq) { + default_interval = freq; + freq = 1; + } for (counter = 0; counter < nr_counters; counter++) { if (event_count[counter]) continue; diff --git a/Documentation/perf_counter/builtin-top.c b/Documentation/perf_counter/builtin-top.c index 28cbde4b6e8..2fee5951b23 100644 --- a/Documentation/perf_counter/builtin-top.c +++ b/Documentation/perf_counter/builtin-top.c @@ -74,8 +74,8 @@ static int nr_cpus = 0; static unsigned int realtime_prio = 0; static int group = 0; static unsigned int page_size; -static unsigned int mmap_pages = 16; -static int freq = 0; +static unsigned int mmap_pages = 16; +static int freq = 0; static char *sym_filter; static unsigned long filter_start; @@ -212,8 +212,13 @@ static void print_sym_table(void) events_per_sec, 100.0 - (100.0*((events_per_sec-kevents_per_sec)/events_per_sec))); - if (nr_counters == 1) - printf("%d ", event_count[0]); + if (nr_counters == 1) { + printf("%d", event_count[0]); + if (freq) + printf("Hz "); + else + printf(" "); + } for (counter = 0; counter < nr_counters; counter++) { if (counter) -- cgit v1.2.3 From f7b6eb3fa07269da20dbbde8ba37a0273fdbd9c9 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Fri, 5 Jun 2009 14:04:51 +0200 Subject: x86: Set context.vdso before installing the mapping In order to make arch_vma_name() work from inside install_special_mapping() we need to set the context.vdso before calling it. ( This is needed for performance counters to be able to track this special executable area. ) Signed-off-by: Peter Zijlstra Cc: Mike Galbraith Cc: Paul Mackerras Cc: Arnaldo Carvalho de Melo LKML-Reference: Signed-off-by: Ingo Molnar --- arch/x86/vdso/vdso32-setup.c | 6 +++++- arch/x86/vdso/vma.c | 7 +++++-- 2 files changed, 10 insertions(+), 3 deletions(-) diff --git a/arch/x86/vdso/vdso32-setup.c b/arch/x86/vdso/vdso32-setup.c index 1241f118ab5..58bc00f68b1 100644 --- a/arch/x86/vdso/vdso32-setup.c +++ b/arch/x86/vdso/vdso32-setup.c @@ -338,6 +338,8 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) } } + current->mm->context.vdso = (void *)addr; + if (compat_uses_vma || !compat) { /* * MAYWRITE to allow gdb to COW and set breakpoints @@ -358,11 +360,13 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) goto up_fail; } - current->mm->context.vdso = (void *)addr; current_thread_info()->sysenter_return = VDSO32_SYMBOL(addr, SYSENTER_RETURN); up_fail: + if (ret) + current->mm->context.vdso = NULL; + up_write(&mm->mmap_sem); return ret; diff --git a/arch/x86/vdso/vma.c b/arch/x86/vdso/vma.c index 7133cdf9098..93b7a2938b2 100644 --- a/arch/x86/vdso/vma.c +++ b/arch/x86/vdso/vma.c @@ -115,15 +115,18 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) goto up_fail; } + current->mm->context.vdso = (void *)addr; + ret = install_special_mapping(mm, addr, vdso_size, VM_READ|VM_EXEC| VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC| VM_ALWAYSDUMP, vdso_pages); - if (ret) + if (ret) { + current->mm->context.vdso = NULL; goto up_fail; + } - current->mm->context.vdso = (void *)addr; up_fail: up_write(&mm->mmap_sem); return ret; -- cgit v1.2.3 From 089dd79db9264dc0da602bad45d42f1b3e7d1e07 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Fri, 5 Jun 2009 14:04:55 +0200 Subject: perf_counter: Generate mmap events for install_special_mapping() In order to track the vdso also generate mmap events for install_special_mapping(). Signed-off-by: Peter Zijlstra Cc: Mike Galbraith Cc: Paul Mackerras Cc: Arnaldo Carvalho de Melo LKML-Reference: Signed-off-by: Ingo Molnar --- include/linux/perf_counter.h | 14 ++++++++------ kernel/perf_counter.c | 34 ++++++++++++++++++++++------------ mm/mmap.c | 5 +++-- 3 files changed, 33 insertions(+), 20 deletions(-) diff --git a/include/linux/perf_counter.h b/include/linux/perf_counter.h index 6ca403acd41..40dc0e273d9 100644 --- a/include/linux/perf_counter.h +++ b/include/linux/perf_counter.h @@ -617,8 +617,13 @@ static inline int is_software_counter(struct perf_counter *counter) extern void perf_swcounter_event(u32, u64, int, struct pt_regs *, u64); -extern void perf_counter_mmap(unsigned long addr, unsigned long len, - unsigned long pgoff, struct file *file); +extern void __perf_counter_mmap(struct vm_area_struct *vma); + +static inline void perf_counter_mmap(struct vm_area_struct *vma) +{ + if (vma->vm_flags & VM_EXEC) + __perf_counter_mmap(vma); +} extern void perf_counter_comm(struct task_struct *tsk); extern void perf_counter_fork(struct task_struct *tsk); @@ -668,10 +673,7 @@ static inline void perf_swcounter_event(u32 event, u64 nr, int nmi, struct pt_regs *regs, u64 addr) { } -static inline void -perf_counter_mmap(unsigned long addr, unsigned long len, - unsigned long pgoff, struct file *file) { } - +static inline void perf_counter_mmap(struct vm_area_struct *vma) { } static inline void perf_counter_comm(struct task_struct *tsk) { } static inline void perf_counter_fork(struct task_struct *tsk) { } static inline void perf_counter_init(void) { } diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c index a5d3e2aedd2..37a5a241ca7 100644 --- a/kernel/perf_counter.c +++ b/kernel/perf_counter.c @@ -2255,7 +2255,7 @@ out: } static void perf_output_copy(struct perf_output_handle *handle, - void *buf, unsigned int len) + const void *buf, unsigned int len) { unsigned int pages_mask; unsigned int offset; @@ -2681,9 +2681,10 @@ void perf_counter_comm(struct task_struct *task) */ struct perf_mmap_event { - struct file *file; - char *file_name; - int file_size; + struct vm_area_struct *vma; + + const char *file_name; + int file_size; struct { struct perf_event_header header; @@ -2744,11 +2745,12 @@ static void perf_counter_mmap_event(struct perf_mmap_event *mmap_event) { struct perf_cpu_context *cpuctx; struct perf_counter_context *ctx; - struct file *file = mmap_event->file; + struct vm_area_struct *vma = mmap_event->vma; + struct file *file = vma->vm_file; unsigned int size; char tmp[16]; char *buf = NULL; - char *name; + const char *name; if (file) { buf = kzalloc(PATH_MAX, GFP_KERNEL); @@ -2762,6 +2764,15 @@ static void perf_counter_mmap_event(struct perf_mmap_event *mmap_event) goto got_name; } } else { + name = arch_vma_name(mmap_event->vma); + if (name) + goto got_name; + + if (!vma->vm_mm) { + name = strncpy(tmp, "[vdso]", sizeof(tmp)); + goto got_name; + } + name = strncpy(tmp, "//anon", sizeof(tmp)); goto got_name; } @@ -2791,8 +2802,7 @@ got_name: kfree(buf); } -void perf_counter_mmap(unsigned long addr, unsigned long len, - unsigned long pgoff, struct file *file) +void __perf_counter_mmap(struct vm_area_struct *vma) { struct perf_mmap_event mmap_event; @@ -2800,12 +2810,12 @@ void perf_counter_mmap(unsigned long addr, unsigned long len, return; mmap_event = (struct perf_mmap_event){ - .file = file, + .vma = vma, .event = { .header = { .type = PERF_EVENT_MMAP, }, - .start = addr, - .len = len, - .pgoff = pgoff, + .start = vma->vm_start, + .len = vma->vm_end - vma->vm_start, + .pgoff = vma->vm_pgoff, }, }; diff --git a/mm/mmap.c b/mm/mmap.c index 6451ce2854b..8101de490c7 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -1220,8 +1220,7 @@ munmap_back: if (correct_wcount) atomic_inc(&inode->i_writecount); out: - if (vm_flags & VM_EXEC) - perf_counter_mmap(addr, len, pgoff, file); + perf_counter_mmap(vma); mm->total_vm += len >> PAGE_SHIFT; vm_stat_account(mm, vm_flags, file, len >> PAGE_SHIFT); @@ -2309,6 +2308,8 @@ int install_special_mapping(struct mm_struct *mm, mm->total_vm += len >> PAGE_SHIFT; + perf_counter_mmap(vma); + return 0; } -- cgit v1.2.3 From fc54db5105d01ad691a7d747064c7890e17f936c Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Fri, 5 Jun 2009 14:04:59 +0200 Subject: perf report: Deal with maps In order to deal with [vdso] maps generalize the ip->symbol path a bit and allow to override some bits with custom functions. Signed-off-by: Peter Zijlstra Cc: Mike Galbraith Cc: Paul Mackerras Cc: Arnaldo Carvalho de Melo LKML-Reference: Signed-off-by: Ingo Molnar --- Documentation/perf_counter/builtin-report.c | 37 +++++++++++++++++++++++++++-- Documentation/perf_counter/util/symbol.c | 1 + Documentation/perf_counter/util/symbol.h | 1 + 3 files changed, 37 insertions(+), 2 deletions(-) diff --git a/Documentation/perf_counter/builtin-report.c b/Documentation/perf_counter/builtin-report.c index eb5424fcbd6..9783d1e493c 100644 --- a/Documentation/perf_counter/builtin-report.c +++ b/Documentation/perf_counter/builtin-report.c @@ -79,6 +79,7 @@ typedef union event_union { static LIST_HEAD(dsos); static struct dso *kernel_dso; +static struct dso *vdso; static void dsos__add(struct dso *dso) { @@ -136,6 +137,11 @@ static void dsos__fprintf(FILE *fp) dso__fprintf(pos, fp); } +static struct symbol *vdso__find_symbol(struct dso *dso, uint64_t ip) +{ + return dso__find_symbol(kernel_dso, ip); +} + static int load_kernel(void) { int err; @@ -151,6 +157,14 @@ static int load_kernel(void) } else dsos__add(kernel_dso); + vdso = dso__new("[vdso]", 0); + if (!vdso) + return -1; + + vdso->find_symbol = vdso__find_symbol; + + dsos__add(vdso); + return err; } @@ -173,9 +187,20 @@ struct map { uint64_t start; uint64_t end; uint64_t pgoff; + uint64_t (*map_ip)(struct map *, uint64_t); struct dso *dso; }; +static uint64_t map__map_ip(struct map *map, uint64_t ip) +{ + return ip - map->start + map->pgoff; +} + +static uint64_t vdso__map_ip(struct map *map, uint64_t ip) +{ + return ip; +} + static struct map *map__new(struct mmap_event *event) { struct map *self = malloc(sizeof(*self)); @@ -201,6 +226,11 @@ static struct map *map__new(struct mmap_event *event) self->dso = dsos__findnew(filename); if (self->dso == NULL) goto out_delete; + + if (self->dso == vdso) + self->map_ip = vdso__map_ip; + else + self->map_ip = map__map_ip; } return self; out_delete: @@ -917,8 +947,8 @@ process_overflow_event(event_t *event, unsigned long offset, unsigned long head) map = thread__find_map(thread, ip); if (map != NULL) { + ip = map->map_ip(map, ip); dso = map->dso; - ip -= map->start + map->pgoff; } else { /* * If this is outside of all known maps, @@ -938,7 +968,10 @@ process_overflow_event(event_t *event, unsigned long offset, unsigned long head) } if (show & show_mask) { - struct symbol *sym = dso__find_symbol(dso, ip); + struct symbol *sym = NULL; + + if (dso) + sym = dso->find_symbol(dso, ip); if (hist_entry__add(thread, map, dso, sym, ip, level)) { fprintf(stderr, diff --git a/Documentation/perf_counter/util/symbol.c b/Documentation/perf_counter/util/symbol.c index 15d5cf9abfa..a06bbfba835 100644 --- a/Documentation/perf_counter/util/symbol.c +++ b/Documentation/perf_counter/util/symbol.c @@ -45,6 +45,7 @@ struct dso *dso__new(const char *name, unsigned int sym_priv_size) strcpy(self->name, name); self->syms = RB_ROOT; self->sym_priv_size = sym_priv_size; + self->find_symbol = dso__find_symbol; } return self; diff --git a/Documentation/perf_counter/util/symbol.h b/Documentation/perf_counter/util/symbol.h index 8dd8522a0a0..e23cc312668 100644 --- a/Documentation/perf_counter/util/symbol.h +++ b/Documentation/perf_counter/util/symbol.h @@ -16,6 +16,7 @@ struct dso { struct list_head node; struct rb_root syms; unsigned int sym_priv_size; + struct symbol *(*find_symbol)(struct dso *, uint64_t ip); char name[0]; }; -- cgit v1.2.3 From 8edd4286f99f78fe07fe9196e69d5643da86cada Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Fri, 5 Jun 2009 14:13:18 +0200 Subject: perf report: Display user/kernel differentiator Before: 25.96% copy_user_generic_string 15.23% two_op 15.19% one_op 6.92% enough_duration 1.23% alloc_pages_current 1.14% acpi_os_read_port 1.08% _spin_lock After: 25.96% [k] copy_user_generic_string 15.23% [.] two_op 15.19% [.] one_op 6.92% [.] enough_duration 1.23% [k] alloc_pages_current 1.14% [k] acpi_os_read_port 1.08% [k] _spin_lock The '[k]' differentiator is a quick clue that it's a kernel symbol, without having to bring in the full dso column. Acked-by: Peter Zijlstra Cc: Mike Galbraith Cc: Paul Mackerras Cc: Arnaldo Carvalho de Melo LKML-Reference: Signed-off-by: Ingo Molnar --- Documentation/perf_counter/builtin-report.c | 16 +++++++++------- 1 file changed, 9 insertions(+), 7 deletions(-) diff --git a/Documentation/perf_counter/builtin-report.c b/Documentation/perf_counter/builtin-report.c index 9783d1e493c..ca303fd74a7 100644 --- a/Documentation/perf_counter/builtin-report.c +++ b/Documentation/perf_counter/builtin-report.c @@ -504,7 +504,7 @@ sort__comm_print(FILE *fp, struct hist_entry *self) } static struct sort_entry sort_comm = { - .header = " Command", + .header = " Command", .cmp = sort__comm_cmp, .collapse = sort__comm_collapse, .print = sort__comm_print, @@ -569,10 +569,12 @@ sort__sym_print(FILE *fp, struct hist_entry *self) if (verbose) ret += fprintf(fp, "%#018llx ", (__u64)self->ip); - if (self->sym) - ret += fprintf(fp, "%s", self->sym->name); - else + if (self->sym) { + ret += fprintf(fp, "[%c] %s", + self->dso == kernel_dso ? 'k' : '.', self->sym->name); + } else { ret += fprintf(fp, "%#016llx", (__u64)self->ip); + } return ret; } @@ -586,9 +588,9 @@ static struct sort_entry sort_sym = { static int sort__need_collapse = 0; struct sort_dimension { - char *name; - struct sort_entry *entry; - int taken; + char *name; + struct sort_entry *entry; + int taken; }; static struct sort_dimension sort_dimensions[] = { -- cgit v1.2.3 From 2debbc836696f2a815d02630230584a1754a5022 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Fri, 5 Jun 2009 14:29:10 +0200 Subject: perf_counter tools: Clarify events/samples naming A number of places said 'events' while they should say 'samples'. Acked-by: Peter Zijlstra Cc: Mike Galbraith Cc: Paul Mackerras Cc: Arnaldo Carvalho de Melo LKML-Reference: Signed-off-by: Ingo Molnar --- Documentation/perf_counter/builtin-record.c | 22 +++++++------- Documentation/perf_counter/builtin-report.c | 2 +- Documentation/perf_counter/builtin-top.c | 46 ++++++++++++++--------------- 3 files changed, 35 insertions(+), 35 deletions(-) diff --git a/Documentation/perf_counter/builtin-record.c b/Documentation/perf_counter/builtin-record.c index e2301f39e55..d4ad3057a71 100644 --- a/Documentation/perf_counter/builtin-record.c +++ b/Documentation/perf_counter/builtin-record.c @@ -65,7 +65,7 @@ static unsigned int mmap_read_head(struct mmap_data *md) return head; } -static long events; +static long samples; static struct timeval last_read, this_read; static __u64 bytes_written; @@ -83,7 +83,7 @@ static void mmap_read(struct mmap_data *md) /* * If we're further behind than half the buffer, there's a chance - * the writer will bite our tail and screw up the events under us. + * the writer will bite our tail and mess up the samples under us. * * If we somehow ended up ahead of the head, we got messed up. * @@ -109,7 +109,7 @@ static void mmap_read(struct mmap_data *md) last_read = this_read; if (old != head) - events++; + samples++; size = head - old; @@ -257,7 +257,7 @@ out_failure: exit(EXIT_FAILURE); } -static void pid_synthesize_mmap_events(pid_t pid) +static void pid_synthesize_mmap_samples(pid_t pid) { char filename[PATH_MAX]; FILE *fp; @@ -315,7 +315,7 @@ static void pid_synthesize_mmap_events(pid_t pid) fclose(fp); } -static void synthesize_events(void) +static void synthesize_samples(void) { DIR *proc; struct dirent dirent, *next; @@ -331,7 +331,7 @@ static void synthesize_events(void) continue; pid_synthesize_comm_event(pid, 1); - pid_synthesize_mmap_events(pid); + pid_synthesize_mmap_samples(pid); } closedir(proc); @@ -396,7 +396,7 @@ static void open_counters(int cpu, pid_t pid) if (pid > 0) { pid_synthesize_comm_event(pid, 0); - pid_synthesize_mmap_events(pid); + pid_synthesize_mmap_samples(pid); } group_fd = -1; @@ -469,17 +469,17 @@ static int __cmd_record(int argc, const char **argv) } if (system_wide) - synthesize_events(); + synthesize_samples(); while (!done) { - int hits = events; + int hits = samples; for (i = 0; i < nr_cpu; i++) { for (counter = 0; counter < nr_counters; counter++) mmap_read(&mmap_array[i][counter]); } - if (hits == events) + if (hits == samples) ret = poll(event_array, nr_poll, 100); } @@ -487,7 +487,7 @@ static int __cmd_record(int argc, const char **argv) * Approximate RIP event size: 24 bytes. */ fprintf(stderr, - "[ perf record: Captured and wrote %.3f MB %s (~%lld events) ]\n", + "[ perf record: Captured and wrote %.3f MB %s (~%lld samples) ]\n", (double)bytes_written / 1024.0 / 1024.0, output_name, bytes_written / 24); diff --git a/Documentation/perf_counter/builtin-report.c b/Documentation/perf_counter/builtin-report.c index ca303fd74a7..5af105c280b 100644 --- a/Documentation/perf_counter/builtin-report.c +++ b/Documentation/perf_counter/builtin-report.c @@ -857,7 +857,7 @@ static size_t output__fprintf(FILE *fp, uint64_t total_samples) fprintf(fp, "\n"); fprintf(fp, "#\n"); - fprintf(fp, "# (%Ld profiler events)\n", (__u64)total_samples); + fprintf(fp, "# (%Ld samples)\n", (__u64)total_samples); fprintf(fp, "#\n"); fprintf(fp, "# Overhead"); diff --git a/Documentation/perf_counter/builtin-top.c b/Documentation/perf_counter/builtin-top.c index 2fee5951b23..ff7e13c4647 100644 --- a/Documentation/perf_counter/builtin-top.c +++ b/Documentation/perf_counter/builtin-top.c @@ -137,8 +137,8 @@ static double sym_weight(const struct sym_entry *sym) return weight; } -static long events; -static long userspace_events; +static long samples; +static long userspace_samples; static const char CONSOLE_CLEAR[] = ""; static void __list_insert_active_sym(struct sym_entry *syme) @@ -177,14 +177,14 @@ static void print_sym_table(void) { int printed = 0, j; int counter; - float events_per_sec = events/delay_secs; - float kevents_per_sec = (events-userspace_events)/delay_secs; - float sum_kevents = 0.0; + float samples_per_sec = samples/delay_secs; + float ksamples_per_sec = (samples-userspace_samples)/delay_secs; + float sum_ksamples = 0.0; struct sym_entry *syme, *n; struct rb_root tmp = RB_ROOT; struct rb_node *nd; - events = userspace_events = 0; + samples = userspace_samples = 0; /* Sort the active symbols */ pthread_mutex_lock(&active_symbols_lock); @@ -196,7 +196,7 @@ static void print_sym_table(void) if (syme->snap_count != 0) { syme->weight = sym_weight(syme); rb_insert_active_sym(&tmp, syme); - sum_kevents += syme->snap_count; + sum_ksamples += syme->snap_count; for (j = 0; j < nr_counters; j++) syme->count[j] = zero ? 0 : syme->count[j] * 7 / 8; @@ -209,8 +209,8 @@ static void print_sym_table(void) printf( "------------------------------------------------------------------------------\n"); printf( " PerfTop:%8.0f irqs/sec kernel:%4.1f%% [", - events_per_sec, - 100.0 - (100.0*((events_per_sec-kevents_per_sec)/events_per_sec))); + samples_per_sec, + 100.0 - (100.0*((samples_per_sec-ksamples_per_sec)/samples_per_sec))); if (nr_counters == 1) { printf("%d", event_count[0]); @@ -246,12 +246,12 @@ static void print_sym_table(void) printf("------------------------------------------------------------------------------\n\n"); if (nr_counters == 1) - printf(" events pcnt"); + printf(" samples pcnt"); else - printf(" weight events pcnt"); + printf(" weight samples pcnt"); printf(" RIP kernel function\n" - " ______ ______ _____ ________________ _______________\n\n" + " ______ _______ _____ ________________ _______________\n\n" ); for (nd = rb_first(&tmp); nd; nd = rb_next(nd)) { @@ -263,8 +263,8 @@ static void print_sym_table(void) if (++printed > print_entries || syme->snap_count < count_filter) continue; - pcnt = 100.0 - (100.0 * ((sum_kevents - syme->snap_count) / - sum_kevents)); + pcnt = 100.0 - (100.0 * ((sum_ksamples - syme->snap_count) / + sum_ksamples)); /* * We color high-overhead entries in red, low-overhead @@ -276,9 +276,9 @@ static void print_sym_table(void) color = PERF_COLOR_GREEN; if (nr_counters == 1) - printf("%19.2f - ", syme->weight); + printf("%20.2f - ", syme->weight); else - printf("%8.1f %10ld - ", syme->weight, syme->snap_count); + printf("%9.1f %10ld - ", syme->weight, syme->snap_count); color_fprintf(stdout, color, "%4.1f%%", pcnt); printf(" - %016llx : %s\n", sym->start, sym->name); @@ -318,7 +318,7 @@ static int symbol_filter(struct dso *self, struct symbol *sym) return 1; syme = dso__sym_priv(self, sym); - /* Tag events to be skipped. */ + /* Tag samples to be skipped. */ if (!strcmp("default_idle", name) || !strcmp("cpu_idle", name) || !strcmp("enter_idle", name) || @@ -405,15 +405,15 @@ static void record_ip(uint64_t ip, int counter) } } - events--; + samples--; } static void process_event(uint64_t ip, int counter) { - events++; + samples++; if (ip < min_ip || ip > max_ip) { - userspace_events++; + userspace_samples++; return; } @@ -451,7 +451,7 @@ static void mmap_read(struct mmap_data *md) /* * If we're further behind than half the buffer, there's a chance - * the writer will bite our tail and screw up the events under us. + * the writer will bite our tail and mess up the samples under us. * * If we somehow ended up ahead of the head, we got messed up. * @@ -608,14 +608,14 @@ static int __cmd_top(void) } while (1) { - int hits = events; + int hits = samples; for (i = 0; i < nr_cpus; i++) { for (counter = 0; counter < nr_counters; counter++) mmap_read(&mmap_array[i][counter]); } - if (hits == events) + if (hits == samples) ret = poll(event_array, nr_poll, 100); } -- cgit v1.2.3 From 136107a76fe5f62906162f730834477b71cf131e Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Fri, 5 Jun 2009 17:56:21 +0200 Subject: perf_counter tools: Remove -march=native Turns out that neither PowerPC nor older x86 compilers know this switch ... and since it does not make a measurable difference, just omit it. Reported-by: Paul Mackerras Reported-by: Steven Rostedt Cc: Peter Zijlstra Cc: Mike Galbraith Cc: Arnaldo Carvalho de Melo LKML-Reference: Signed-off-by: Ingo Molnar --- Documentation/perf_counter/Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Documentation/perf_counter/Makefile b/Documentation/perf_counter/Makefile index c9ec4585f4d..5b99f04df81 100644 --- a/Documentation/perf_counter/Makefile +++ b/Documentation/perf_counter/Makefile @@ -159,7 +159,7 @@ uname_V := $(shell sh -c 'uname -v 2>/dev/null || echo not') # CFLAGS and LDFLAGS are for the users to override from the command line. -CFLAGS = -ggdb3 -Wall -Werror -Wstrict-prototypes -Wmissing-declarations -Wmissing-prototypes -std=gnu99 -Wdeclaration-after-statement -O6 -march=native +CFLAGS = -ggdb3 -Wall -Werror -Wstrict-prototypes -Wmissing-declarations -Wmissing-prototypes -std=gnu99 -Wdeclaration-after-statement -O6 LDFLAGS = -lpthread -lrt -lelf ALL_CFLAGS = $(CFLAGS) ALL_LDFLAGS = $(LDFLAGS) -- cgit v1.2.3 From ac4bcf889469ffbca88f234d3184452886a47905 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Fri, 5 Jun 2009 14:44:52 +0200 Subject: perf_counter: Change PERF_SAMPLE_CONFIG into PERF_SAMPLE_ID The purpose of PERF_SAMPLE_CONFIG was to identify the counters, since then we've added counter ids, use those instead. Signed-off-by: Peter Zijlstra Cc: Mike Galbraith Cc: Paul Mackerras Cc: Arnaldo Carvalho de Melo LKML-Reference: Signed-off-by: Ingo Molnar --- include/linux/perf_counter.h | 2 +- kernel/perf_counter.c | 8 ++++---- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/include/linux/perf_counter.h b/include/linux/perf_counter.h index 40dc0e273d9..9cea32a0655 100644 --- a/include/linux/perf_counter.h +++ b/include/linux/perf_counter.h @@ -104,7 +104,7 @@ enum perf_counter_sample_format { PERF_SAMPLE_ADDR = 1U << 3, PERF_SAMPLE_GROUP = 1U << 4, PERF_SAMPLE_CALLCHAIN = 1U << 5, - PERF_SAMPLE_CONFIG = 1U << 6, + PERF_SAMPLE_ID = 1U << 6, PERF_SAMPLE_CPU = 1U << 7, }; diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c index 37a5a241ca7..e75b91a76a5 100644 --- a/kernel/perf_counter.c +++ b/kernel/perf_counter.c @@ -2392,8 +2392,8 @@ static void perf_counter_output(struct perf_counter *counter, header.size += sizeof(u64); } - if (sample_type & PERF_SAMPLE_CONFIG) { - header.type |= PERF_SAMPLE_CONFIG; + if (sample_type & PERF_SAMPLE_ID) { + header.type |= PERF_SAMPLE_ID; header.size += sizeof(u64); } @@ -2439,8 +2439,8 @@ static void perf_counter_output(struct perf_counter *counter, if (sample_type & PERF_SAMPLE_ADDR) perf_output_put(&handle, addr); - if (sample_type & PERF_SAMPLE_CONFIG) - perf_output_put(&handle, counter->attr.config); + if (sample_type & PERF_SAMPLE_ID) + perf_output_put(&handle, counter->id); if (sample_type & PERF_SAMPLE_CPU) perf_output_put(&handle, cpu_entry); -- cgit v1.2.3 From 689802b2d0536e72281dc959ab9cb34fb3c304cf Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Fri, 5 Jun 2009 15:05:43 +0200 Subject: perf_counter: Add PERF_SAMPLE_PERIOD In order to allow easy tracking of the period, also provide means of adding it to the sample data. Signed-off-by: Peter Zijlstra Cc: Mike Galbraith Cc: Paul Mackerras Cc: Arnaldo Carvalho de Melo LKML-Reference: Signed-off-by: Ingo Molnar --- include/linux/perf_counter.h | 2 ++ kernel/perf_counter.c | 10 ++++++++++ 2 files changed, 12 insertions(+) diff --git a/include/linux/perf_counter.h b/include/linux/perf_counter.h index 9cea32a0655..6bc25003973 100644 --- a/include/linux/perf_counter.h +++ b/include/linux/perf_counter.h @@ -106,6 +106,7 @@ enum perf_counter_sample_format { PERF_SAMPLE_CALLCHAIN = 1U << 5, PERF_SAMPLE_ID = 1U << 6, PERF_SAMPLE_CPU = 1U << 7, + PERF_SAMPLE_PERIOD = 1U << 8, }; /* @@ -260,6 +261,7 @@ enum perf_event_type { * struct { * struct perf_event_header header; * u64 time; + * u64 id; * u64 sample_period; * }; */ diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c index e75b91a76a5..f8390668c39 100644 --- a/kernel/perf_counter.c +++ b/kernel/perf_counter.c @@ -2404,6 +2404,11 @@ static void perf_counter_output(struct perf_counter *counter, cpu_entry.cpu = raw_smp_processor_id(); } + if (sample_type & PERF_SAMPLE_PERIOD) { + header.type |= PERF_SAMPLE_PERIOD; + header.size += sizeof(u64); + } + if (sample_type & PERF_SAMPLE_GROUP) { header.type |= PERF_SAMPLE_GROUP; header.size += sizeof(u64) + @@ -2445,6 +2450,9 @@ static void perf_counter_output(struct perf_counter *counter, if (sample_type & PERF_SAMPLE_CPU) perf_output_put(&handle, cpu_entry); + if (sample_type & PERF_SAMPLE_PERIOD) + perf_output_put(&handle, counter->hw.sample_period); + /* * XXX PERF_SAMPLE_GROUP vs inherited counters seems difficult. */ @@ -2835,6 +2843,7 @@ static void perf_log_period(struct perf_counter *counter, u64 period) struct { struct perf_event_header header; u64 time; + u64 id; u64 period; } freq_event = { .header = { @@ -2843,6 +2852,7 @@ static void perf_log_period(struct perf_counter *counter, u64 period) .size = sizeof(freq_event), }, .time = sched_clock(), + .id = counter->id, .period = period, }; -- cgit v1.2.3 From 6a24ed6c6082ec65d19331a4bfa30c0512a1a822 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Fri, 5 Jun 2009 18:01:29 +0200 Subject: perf_counter: Fix frequency adjustment for < HZ Signed-off-by: Peter Zijlstra Cc: Mike Galbraith Cc: Paul Mackerras Cc: Arnaldo Carvalho de Melo LKML-Reference: Signed-off-by: Ingo Molnar --- include/linux/perf_counter.h | 3 +++ kernel/perf_counter.c | 32 +++++++++++++++++++++++++------- 2 files changed, 28 insertions(+), 7 deletions(-) diff --git a/include/linux/perf_counter.h b/include/linux/perf_counter.h index 6bc25003973..4f9d39ecdc0 100644 --- a/include/linux/perf_counter.h +++ b/include/linux/perf_counter.h @@ -373,6 +373,9 @@ struct hw_perf_counter { u64 sample_period; atomic64_t period_left; u64 interrupts; + + u64 freq_count; + u64 freq_interrupts; #endif }; diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c index f8390668c39..47c92fb927f 100644 --- a/kernel/perf_counter.c +++ b/kernel/perf_counter.c @@ -1187,8 +1187,9 @@ static void perf_log_period(struct perf_counter *counter, u64 period); static void perf_adjust_freq(struct perf_counter_context *ctx) { struct perf_counter *counter; + struct hw_perf_counter *hwc; u64 interrupts, sample_period; - u64 events, period; + u64 events, period, freq; s64 delta; spin_lock(&ctx->lock); @@ -1196,8 +1197,10 @@ static void perf_adjust_freq(struct perf_counter_context *ctx) if (counter->state != PERF_COUNTER_STATE_ACTIVE) continue; - interrupts = counter->hw.interrupts; - counter->hw.interrupts = 0; + hwc = &counter->hw; + + interrupts = hwc->interrupts; + hwc->interrupts = 0; if (interrupts == MAX_INTERRUPTS) { perf_log_throttle(counter, 1); @@ -1208,20 +1211,35 @@ static void perf_adjust_freq(struct perf_counter_context *ctx) if (!counter->attr.freq || !counter->attr.sample_freq) continue; - events = HZ * interrupts * counter->hw.sample_period; + if (counter->attr.sample_freq < HZ) { + freq = counter->attr.sample_freq; + + hwc->freq_count += freq; + hwc->freq_interrupts += interrupts; + + if (hwc->freq_count < HZ) + continue; + + interrupts = hwc->freq_interrupts; + hwc->freq_interrupts = 0; + hwc->freq_count -= HZ; + } else + freq = HZ; + + events = freq * interrupts * hwc->sample_period; period = div64_u64(events, counter->attr.sample_freq); - delta = (s64)(1 + period - counter->hw.sample_period); + delta = (s64)(1 + period - hwc->sample_period); delta >>= 1; - sample_period = counter->hw.sample_period + delta; + sample_period = hwc->sample_period + delta; if (!sample_period) sample_period = 1; perf_log_period(counter, sample_period); - counter->hw.sample_period = sample_period; + hwc->sample_period = sample_period; } spin_unlock(&ctx->lock); } -- cgit v1.2.3 From b2fef0762fdb65cf8702eea93f4e58abeb0ecefc Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Fri, 5 Jun 2009 18:07:51 +0200 Subject: perf_counter tools: Sample and display frequency adjustment changes To allow the debugging of frequency-adjusting counters, sample those adjustments and display them in perf report -D. Acked-by: Peter Zijlstra Cc: Mike Galbraith Cc: Paul Mackerras Cc: Arnaldo Carvalho de Melo LKML-Reference: Signed-off-by: Ingo Molnar --- Documentation/perf_counter/builtin-record.c | 2 +- Documentation/perf_counter/builtin-report.c | 39 ++++++++++++++++++++++++----- 2 files changed, 34 insertions(+), 7 deletions(-) diff --git a/Documentation/perf_counter/builtin-record.c b/Documentation/perf_counter/builtin-record.c index d4ad3057a71..43ddab31ac3 100644 --- a/Documentation/perf_counter/builtin-record.c +++ b/Documentation/perf_counter/builtin-record.c @@ -347,7 +347,7 @@ static void create_counter(int counter, int cpu, pid_t pid) memset(&attr, 0, sizeof(attr)); attr.config = event_id[counter]; attr.sample_period = event_count[counter]; - attr.sample_type = PERF_SAMPLE_IP | PERF_SAMPLE_TID; + attr.sample_type = PERF_SAMPLE_IP | PERF_SAMPLE_TID | PERF_SAMPLE_PERIOD; attr.freq = freq; attr.mmap = track; attr.comm = track; diff --git a/Documentation/perf_counter/builtin-report.c b/Documentation/perf_counter/builtin-report.c index 5af105c280b..242e09ff365 100644 --- a/Documentation/perf_counter/builtin-report.c +++ b/Documentation/perf_counter/builtin-report.c @@ -69,12 +69,20 @@ struct fork_event { __u32 pid, ppid; }; -typedef union event_union { +struct period_event { struct perf_event_header header; - struct ip_event ip; - struct mmap_event mmap; - struct comm_event comm; - struct fork_event fork; + __u64 time; + __u64 id; + __u64 sample_period; +}; + +typedef union event_union { + struct perf_event_header header; + struct ip_event ip; + struct mmap_event mmap; + struct comm_event comm; + struct fork_event fork; + struct period_event period; } event_t; static LIST_HEAD(dsos); @@ -1052,6 +1060,19 @@ process_fork_event(event_t *event, unsigned long offset, unsigned long head) return 0; } +static int +process_period_event(event_t *event, unsigned long offset, unsigned long head) +{ + dprintf("%p [%p]: PERF_EVENT_PERIOD: time:%Ld, id:%Ld: period:%Ld\n", + (void *)(offset + head), + (void *)(long)(event->header.size), + event->period.time, + event->period.id, + event->period.sample_period); + + return 0; +} + static int process_event(event_t *event, unsigned long offset, unsigned long head) { @@ -1068,11 +1089,12 @@ process_event(event_t *event, unsigned long offset, unsigned long head) case PERF_EVENT_FORK: return process_fork_event(event, offset, head); + case PERF_EVENT_PERIOD: + return process_period_event(event, offset, head); /* * We dont process them right now but they are fine: */ - case PERF_EVENT_PERIOD: case PERF_EVENT_THROTTLE: case PERF_EVENT_UNTHROTTLE: return 0; @@ -1157,6 +1179,11 @@ more: size = event->header.size; + dprintf("%p [%p]: event: %d\n", + (void *)(offset + head), + (void *)(long)event->header.size, + event->header.type); + if (!size || process_event(event, offset, head) < 0) { dprintf("%p [%p]: skipping unknown header type: %d\n", -- cgit v1.2.3 From 1dba15e74aba5a90c1f2557f37e5d09f8a2df643 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Fri, 5 Jun 2009 18:37:22 +0200 Subject: perf record: Set frequency correctly Cc: Peter Zijlstra Cc: Mike Galbraith Cc: Paul Mackerras Cc: Arnaldo Carvalho de Melo LKML-Reference: Signed-off-by: Ingo Molnar --- Documentation/perf_counter/builtin-record.c | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/Documentation/perf_counter/builtin-record.c b/Documentation/perf_counter/builtin-record.c index 43ddab31ac3..c22ea0c7472 100644 --- a/Documentation/perf_counter/builtin-record.c +++ b/Documentation/perf_counter/builtin-record.c @@ -348,7 +348,10 @@ static void create_counter(int counter, int cpu, pid_t pid) attr.config = event_id[counter]; attr.sample_period = event_count[counter]; attr.sample_type = PERF_SAMPLE_IP | PERF_SAMPLE_TID | PERF_SAMPLE_PERIOD; - attr.freq = freq; + if (freq) { + attr.freq = 1; + attr.sample_freq = freq; + } attr.mmap = track; attr.comm = track; attr.inherit = (cpu < 0) && inherit; @@ -544,10 +547,6 @@ int cmd_record(int argc, const char **argv, const char *prefix) event_id[0] = 0; } - if (freq) { - default_interval = freq; - freq = 1; - } for (counter = 0; counter < nr_counters; counter++) { if (event_count[counter]) continue; -- cgit v1.2.3 From 2f335a02b3c816e77e7df1d15b12e3bbb8f4c8f0 Mon Sep 17 00:00:00 2001 From: Frederic Weisbecker Date: Fri, 5 Jun 2009 19:31:01 +0200 Subject: perf top: Fix zero or negative refresh delay If perf top is executed with a zero value for the refresh rate, we get a division by zero exception while computing samples_per_sec. Also a zero refresh rate is not possible, neither do we want to accept negative values. [ Impact: fix division by zero in perf top ] Cc: Peter Zijlstra Cc: Mike Galbraith Cc: Paul Mackerras Cc: Corey Ashford Cc: Marcelo Tosatti Cc: Arnaldo Carvalho de Melo Cc: Frederic Weisbecker LKML-Reference: <1244223061-5399-1-git-send-email-fweisbec@gmail.com> Signed-off-by: Ingo Molnar --- Documentation/perf_counter/builtin-top.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/Documentation/perf_counter/builtin-top.c b/Documentation/perf_counter/builtin-top.c index ff7e13c4647..b2f480b5a13 100644 --- a/Documentation/perf_counter/builtin-top.c +++ b/Documentation/perf_counter/builtin-top.c @@ -693,6 +693,9 @@ int cmd_top(int argc, const char **argv, const char *prefix) event_id[0] = 0; } + if (delay_secs < 1) + delay_secs = 1; + for (counter = 0; counter < nr_counters; counter++) { if (event_count[counter]) continue; -- cgit v1.2.3 From a21ca2cac582886a3e95c8bb84ff7c52d4d15e54 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Sat, 6 Jun 2009 09:58:57 +0200 Subject: perf_counter: Separate out attr->type from attr->config Counter type is a frequently used value and we do a lot of bit juggling by encoding and decoding it from attr->config. Clean this up by creating a separate attr->type field. Also clean up the various similarly complex user-space bits all around counter attribute management. The net improvement is significant, and it will be easier to add a new major type (which is what triggered this cleanup). (This changes the ABI, all tools are adapted.) (PowerPC build-tested.) Cc: Peter Zijlstra Cc: Mike Galbraith Cc: Paul Mackerras Cc: Corey Ashford Cc: Marcelo Tosatti Cc: Arnaldo Carvalho de Melo LKML-Reference: Signed-off-by: Ingo Molnar --- Documentation/perf_counter/builtin-record.c | 105 ++++++++++------------ Documentation/perf_counter/builtin-stat.c | 76 +++++++--------- Documentation/perf_counter/builtin-top.c | 67 +++++--------- Documentation/perf_counter/perf.h | 2 - Documentation/perf_counter/util/parse-events.c | 120 ++++++++++++++----------- Documentation/perf_counter/util/parse-events.h | 7 +- arch/powerpc/kernel/perf_counter.c | 6 +- arch/x86/kernel/cpu/perf_counter.c | 8 +- include/linux/perf_counter.h | 65 +++----------- kernel/perf_counter.c | 14 ++- 10 files changed, 196 insertions(+), 274 deletions(-) diff --git a/Documentation/perf_counter/builtin-record.c b/Documentation/perf_counter/builtin-record.c index c22ea0c7472..130fd88266b 100644 --- a/Documentation/perf_counter/builtin-record.c +++ b/Documentation/perf_counter/builtin-record.c @@ -20,10 +20,10 @@ #define ALIGN(x, a) __ALIGN_MASK(x, (typeof(x))(a)-1) #define __ALIGN_MASK(x, mask) (((x)+(mask))&~(mask)) -static long default_interval = 100000; -static long event_count[MAX_COUNTERS]; - static int fd[MAX_NR_CPUS][MAX_COUNTERS]; + +static long default_interval = 100000; + static int nr_cpus = 0; static unsigned int page_size; static unsigned int mmap_pages = 128; @@ -38,22 +38,44 @@ static int inherit = 1; static int force = 0; static int append_file = 0; -const unsigned int default_count[] = { - 1000000, - 1000000, - 10000, - 10000, - 1000000, - 10000, +static long samples; +static struct timeval last_read; +static struct timeval this_read; + +static __u64 bytes_written; + +static struct pollfd event_array[MAX_NR_CPUS * MAX_COUNTERS]; + +static int nr_poll; +static int nr_cpu; + +struct mmap_event { + struct perf_event_header header; + __u32 pid; + __u32 tid; + __u64 start; + __u64 len; + __u64 pgoff; + char filename[PATH_MAX]; +}; + +struct comm_event { + struct perf_event_header header; + __u32 pid; + __u32 tid; + char comm[16]; }; + struct mmap_data { - int counter; - void *base; - unsigned int mask; - unsigned int prev; + int counter; + void *base; + unsigned int mask; + unsigned int prev; }; +static struct mmap_data mmap_array[MAX_NR_CPUS][MAX_COUNTERS]; + static unsigned int mmap_read_head(struct mmap_data *md) { struct perf_counter_mmap_page *pc = md->base; @@ -65,11 +87,6 @@ static unsigned int mmap_read_head(struct mmap_data *md) return head; } -static long samples; -static struct timeval last_read, this_read; - -static __u64 bytes_written; - static void mmap_read(struct mmap_data *md) { unsigned int head = mmap_read_head(md); @@ -157,29 +174,6 @@ static void sig_handler(int sig) done = 1; } -static struct pollfd event_array[MAX_NR_CPUS * MAX_COUNTERS]; -static struct mmap_data mmap_array[MAX_NR_CPUS][MAX_COUNTERS]; - -static int nr_poll; -static int nr_cpu; - -struct mmap_event { - struct perf_event_header header; - __u32 pid; - __u32 tid; - __u64 start; - __u64 len; - __u64 pgoff; - char filename[PATH_MAX]; -}; - -struct comm_event { - struct perf_event_header header; - __u32 pid; - __u32 tid; - char comm[16]; -}; - static void pid_synthesize_comm_event(pid_t pid, int full) { struct comm_event comm_ev; @@ -341,24 +335,21 @@ static int group_fd; static void create_counter(int counter, int cpu, pid_t pid) { - struct perf_counter_attr attr; + struct perf_counter_attr *attr = attrs + counter; int track = 1; - memset(&attr, 0, sizeof(attr)); - attr.config = event_id[counter]; - attr.sample_period = event_count[counter]; - attr.sample_type = PERF_SAMPLE_IP | PERF_SAMPLE_TID | PERF_SAMPLE_PERIOD; + attr->sample_type = PERF_SAMPLE_IP | PERF_SAMPLE_TID | PERF_SAMPLE_PERIOD; if (freq) { - attr.freq = 1; - attr.sample_freq = freq; + attr->freq = 1; + attr->sample_freq = freq; } - attr.mmap = track; - attr.comm = track; - attr.inherit = (cpu < 0) && inherit; + attr->mmap = track; + attr->comm = track; + attr->inherit = (cpu < 0) && inherit; track = 0; /* only the first counter needs these */ - fd[nr_cpu][counter] = sys_perf_counter_open(&attr, pid, cpu, group_fd, 0); + fd[nr_cpu][counter] = sys_perf_counter_open(attr, pid, cpu, group_fd, 0); if (fd[nr_cpu][counter] < 0) { int err = errno; @@ -542,16 +533,14 @@ int cmd_record(int argc, const char **argv, const char *prefix) if (!argc && target_pid == -1 && !system_wide) usage_with_options(record_usage, options); - if (!nr_counters) { + if (!nr_counters) nr_counters = 1; - event_id[0] = 0; - } for (counter = 0; counter < nr_counters; counter++) { - if (event_count[counter]) + if (attrs[counter].sample_period) continue; - event_count[counter] = default_interval; + attrs[counter].sample_period = default_interval; } return __cmd_record(argc, argv); diff --git a/Documentation/perf_counter/builtin-stat.c b/Documentation/perf_counter/builtin-stat.c index 4fc0d80440e..9711e552423 100644 --- a/Documentation/perf_counter/builtin-stat.c +++ b/Documentation/perf_counter/builtin-stat.c @@ -44,23 +44,22 @@ #include -static int system_wide = 0; -static int inherit = 1; +static struct perf_counter_attr default_attrs[MAX_COUNTERS] = { -static __u64 default_event_id[MAX_COUNTERS] = { - EID(PERF_TYPE_SOFTWARE, PERF_COUNT_TASK_CLOCK), - EID(PERF_TYPE_SOFTWARE, PERF_COUNT_CONTEXT_SWITCHES), - EID(PERF_TYPE_SOFTWARE, PERF_COUNT_CPU_MIGRATIONS), - EID(PERF_TYPE_SOFTWARE, PERF_COUNT_PAGE_FAULTS), + { .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_TASK_CLOCK }, + { .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_CONTEXT_SWITCHES }, + { .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_CPU_MIGRATIONS }, + { .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_PAGE_FAULTS }, - EID(PERF_TYPE_HARDWARE, PERF_COUNT_CPU_CYCLES), - EID(PERF_TYPE_HARDWARE, PERF_COUNT_INSTRUCTIONS), - EID(PERF_TYPE_HARDWARE, PERF_COUNT_CACHE_REFERENCES), - EID(PERF_TYPE_HARDWARE, PERF_COUNT_CACHE_MISSES), + { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_CPU_CYCLES }, + { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_INSTRUCTIONS }, + { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_CACHE_REFERENCES }, + { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_CACHE_MISSES }, }; -static int default_interval = 100000; -static int event_count[MAX_COUNTERS]; +static int system_wide = 0; +static int inherit = 1; + static int fd[MAX_NR_CPUS][MAX_COUNTERS]; static int target_pid = -1; @@ -86,22 +85,16 @@ static __u64 walltime_nsecs; static void create_perfstat_counter(int counter) { - struct perf_counter_attr attr; - - memset(&attr, 0, sizeof(attr)); - attr.config = event_id[counter]; - attr.sample_type = 0; - attr.exclude_kernel = event_mask[counter] & EVENT_MASK_KERNEL; - attr.exclude_user = event_mask[counter] & EVENT_MASK_USER; + struct perf_counter_attr *attr = attrs + counter; if (scale) - attr.read_format = PERF_FORMAT_TOTAL_TIME_ENABLED | - PERF_FORMAT_TOTAL_TIME_RUNNING; + attr->read_format = PERF_FORMAT_TOTAL_TIME_ENABLED | + PERF_FORMAT_TOTAL_TIME_RUNNING; if (system_wide) { int cpu; for (cpu = 0; cpu < nr_cpus; cpu ++) { - fd[cpu][counter] = sys_perf_counter_open(&attr, -1, cpu, -1, 0); + fd[cpu][counter] = sys_perf_counter_open(attr, -1, cpu, -1, 0); if (fd[cpu][counter] < 0) { printf("perfstat error: syscall returned with %d (%s)\n", fd[cpu][counter], strerror(errno)); @@ -109,10 +102,10 @@ static void create_perfstat_counter(int counter) } } } else { - attr.inherit = inherit; - attr.disabled = 1; + attr->inherit = inherit; + attr->disabled = 1; - fd[0][counter] = sys_perf_counter_open(&attr, 0, -1, -1, 0); + fd[0][counter] = sys_perf_counter_open(attr, 0, -1, -1, 0); if (fd[0][counter] < 0) { printf("perfstat error: syscall returned with %d (%s)\n", fd[0][counter], strerror(errno)); @@ -126,9 +119,13 @@ static void create_perfstat_counter(int counter) */ static inline int nsec_counter(int counter) { - if (event_id[counter] == EID(PERF_TYPE_SOFTWARE, PERF_COUNT_CPU_CLOCK)) + if (attrs[counter].type != PERF_TYPE_SOFTWARE) + return 0; + + if (attrs[counter].config == PERF_COUNT_CPU_CLOCK) return 1; - if (event_id[counter] == EID(PERF_TYPE_SOFTWARE, PERF_COUNT_TASK_CLOCK)) + + if (attrs[counter].config == PERF_COUNT_TASK_CLOCK) return 1; return 0; @@ -177,7 +174,8 @@ static void read_counter(int counter) /* * Save the full runtime - to allow normalization during printout: */ - if (event_id[counter] == EID(PERF_TYPE_SOFTWARE, PERF_COUNT_TASK_CLOCK)) + if (attrs[counter].type == PERF_TYPE_SOFTWARE && + attrs[counter].config == PERF_COUNT_TASK_CLOCK) runtime_nsecs = count[0]; } @@ -203,8 +201,8 @@ static void print_counter(int counter) fprintf(stderr, " %14.6f %-20s", msecs, event_name(counter)); - if (event_id[counter] == - EID(PERF_TYPE_SOFTWARE, PERF_COUNT_TASK_CLOCK)) { + if (attrs[counter].type == PERF_TYPE_SOFTWARE && + attrs[counter].config == PERF_COUNT_TASK_CLOCK) { fprintf(stderr, " # %11.3f CPU utilization factor", (double)count[0] / (double)walltime_nsecs); @@ -300,8 +298,6 @@ static char events_help_msg[EVENTS_HELP_MAX]; static const struct option options[] = { OPT_CALLBACK('e', "event", NULL, "event", events_help_msg, parse_events), - OPT_INTEGER('c', "count", &default_interval, - "event period to sample"), OPT_BOOLEAN('i', "inherit", &inherit, "child tasks inherit counters"), OPT_INTEGER('p', "pid", &target_pid, @@ -315,27 +311,19 @@ static const struct option options[] = { int cmd_stat(int argc, const char **argv, const char *prefix) { - int counter; - page_size = sysconf(_SC_PAGE_SIZE); create_events_help(events_help_msg); - memcpy(event_id, default_event_id, sizeof(default_event_id)); + + memcpy(attrs, default_attrs, sizeof(attrs)); argc = parse_options(argc, argv, options, stat_usage, 0); if (!argc) usage_with_options(stat_usage, options); - if (!nr_counters) { + if (!nr_counters) nr_counters = 8; - } - - for (counter = 0; counter < nr_counters; counter++) { - if (event_count[counter]) - continue; - event_count[counter] = default_interval; - } nr_cpus = sysconf(_SC_NPROCESSORS_ONLN); assert(nr_cpus <= MAX_NR_CPUS); assert(nr_cpus >= 0); diff --git a/Documentation/perf_counter/builtin-top.c b/Documentation/perf_counter/builtin-top.c index b2f480b5a13..98a6d53e17b 100644 --- a/Documentation/perf_counter/builtin-top.c +++ b/Documentation/perf_counter/builtin-top.c @@ -48,22 +48,11 @@ #include #include -static int system_wide = 0; +static int fd[MAX_NR_CPUS][MAX_COUNTERS]; -static __u64 default_event_id[MAX_COUNTERS] = { - EID(PERF_TYPE_SOFTWARE, PERF_COUNT_TASK_CLOCK), - EID(PERF_TYPE_SOFTWARE, PERF_COUNT_CONTEXT_SWITCHES), - EID(PERF_TYPE_SOFTWARE, PERF_COUNT_CPU_MIGRATIONS), - EID(PERF_TYPE_SOFTWARE, PERF_COUNT_PAGE_FAULTS), +static int system_wide = 0; - EID(PERF_TYPE_HARDWARE, PERF_COUNT_CPU_CYCLES), - EID(PERF_TYPE_HARDWARE, PERF_COUNT_INSTRUCTIONS), - EID(PERF_TYPE_HARDWARE, PERF_COUNT_CACHE_REFERENCES), - EID(PERF_TYPE_HARDWARE, PERF_COUNT_CACHE_MISSES), -}; -static int default_interval = 100000; -static int event_count[MAX_COUNTERS]; -static int fd[MAX_NR_CPUS][MAX_COUNTERS]; +static int default_interval = 100000; static __u64 count_filter = 5; static int print_entries = 15; @@ -85,15 +74,6 @@ static int delay_secs = 2; static int zero; static int dump_symtab; -static const unsigned int default_count[] = { - 1000000, - 1000000, - 10000, - 10000, - 1000000, - 10000, -}; - /* * Symbols */ @@ -112,7 +92,7 @@ struct sym_entry { struct sym_entry *sym_filter_entry; -struct dso *kernel_dso; +struct dso *kernel_dso; /* * Symbols will be added here in record_ip and will get out @@ -213,7 +193,7 @@ static void print_sym_table(void) 100.0 - (100.0*((samples_per_sec-ksamples_per_sec)/samples_per_sec))); if (nr_counters == 1) { - printf("%d", event_count[0]); + printf("%Ld", attrs[0].sample_period); if (freq) printf("Hz "); else @@ -421,10 +401,10 @@ static void process_event(uint64_t ip, int counter) } struct mmap_data { - int counter; - void *base; - unsigned int mask; - unsigned int prev; + int counter; + void *base; + unsigned int mask; + unsigned int prev; }; static unsigned int mmap_read_head(struct mmap_data *md) @@ -539,7 +519,7 @@ static struct mmap_data mmap_array[MAX_NR_CPUS][MAX_COUNTERS]; static int __cmd_top(void) { - struct perf_counter_attr attr; + struct perf_counter_attr *attr; pthread_t thread; int i, counter, group_fd, nr_poll = 0; unsigned int cpu; @@ -553,13 +533,12 @@ static int __cmd_top(void) if (target_pid == -1 && profile_cpu == -1) cpu = i; - memset(&attr, 0, sizeof(attr)); - attr.config = event_id[counter]; - attr.sample_period = event_count[counter]; - attr.sample_type = PERF_SAMPLE_IP | PERF_SAMPLE_TID; - attr.freq = freq; + attr = attrs + counter; - fd[i][counter] = sys_perf_counter_open(&attr, target_pid, cpu, group_fd, 0); + attr->sample_type = PERF_SAMPLE_IP | PERF_SAMPLE_TID; + attr->freq = freq; + + fd[i][counter] = sys_perf_counter_open(attr, target_pid, cpu, group_fd, 0); if (fd[i][counter] < 0) { int err = errno; @@ -670,7 +649,6 @@ int cmd_top(int argc, const char **argv, const char *prefix) page_size = sysconf(_SC_PAGE_SIZE); create_events_help(events_help_msg); - memcpy(event_id, default_event_id, sizeof(default_event_id)); argc = parse_options(argc, argv, options, top_usage, 0); if (argc) @@ -688,19 +666,22 @@ int cmd_top(int argc, const char **argv, const char *prefix) profile_cpu = -1; } - if (!nr_counters) { + if (!nr_counters) nr_counters = 1; - event_id[0] = 0; - } if (delay_secs < 1) delay_secs = 1; + parse_symbols(); + + /* + * Fill in the ones not specifically initialized via -c: + */ for (counter = 0; counter < nr_counters; counter++) { - if (event_count[counter]) + if (attrs[counter].sample_period) continue; - event_count[counter] = default_interval; + attrs[counter].sample_period = default_interval; } nr_cpus = sysconf(_SC_NPROCESSORS_ONLN); @@ -710,7 +691,5 @@ int cmd_top(int argc, const char **argv, const char *prefix) if (target_pid != -1 || profile_cpu != -1) nr_cpus = 1; - parse_symbols(); - return __cmd_top(); } diff --git a/Documentation/perf_counter/perf.h b/Documentation/perf_counter/perf.h index 10622a48b40..af0a5046d74 100644 --- a/Documentation/perf_counter/perf.h +++ b/Documentation/perf_counter/perf.h @@ -64,6 +64,4 @@ sys_perf_counter_open(struct perf_counter_attr *attr_uptr, #define MAX_COUNTERS 256 #define MAX_NR_CPUS 256 -#define EID(type, id) (((__u64)(type) << PERF_COUNTER_TYPE_SHIFT) | (id)) - #endif diff --git a/Documentation/perf_counter/util/parse-events.c b/Documentation/perf_counter/util/parse-events.c index 2fdfd1d923f..eb56bd99657 100644 --- a/Documentation/perf_counter/util/parse-events.c +++ b/Documentation/perf_counter/util/parse-events.c @@ -6,37 +6,39 @@ #include "exec_cmd.h" #include "string.h" -int nr_counters; +int nr_counters; -__u64 event_id[MAX_COUNTERS] = { }; -int event_mask[MAX_COUNTERS]; +struct perf_counter_attr attrs[MAX_COUNTERS]; struct event_symbol { - __u64 event; - char *symbol; + __u8 type; + __u64 config; + char *symbol; }; +#define C(x, y) .type = PERF_TYPE_##x, .config = PERF_COUNT_##y + static struct event_symbol event_symbols[] = { - {EID(PERF_TYPE_HARDWARE, PERF_COUNT_CPU_CYCLES), "cpu-cycles", }, - {EID(PERF_TYPE_HARDWARE, PERF_COUNT_CPU_CYCLES), "cycles", }, - {EID(PERF_TYPE_HARDWARE, PERF_COUNT_INSTRUCTIONS), "instructions", }, - {EID(PERF_TYPE_HARDWARE, PERF_COUNT_CACHE_REFERENCES), "cache-references", }, - {EID(PERF_TYPE_HARDWARE, PERF_COUNT_CACHE_MISSES), "cache-misses", }, - {EID(PERF_TYPE_HARDWARE, PERF_COUNT_BRANCH_INSTRUCTIONS), "branch-instructions", }, - {EID(PERF_TYPE_HARDWARE, PERF_COUNT_BRANCH_INSTRUCTIONS), "branches", }, - {EID(PERF_TYPE_HARDWARE, PERF_COUNT_BRANCH_MISSES), "branch-misses", }, - {EID(PERF_TYPE_HARDWARE, PERF_COUNT_BUS_CYCLES), "bus-cycles", }, - - {EID(PERF_TYPE_SOFTWARE, PERF_COUNT_CPU_CLOCK), "cpu-clock", }, - {EID(PERF_TYPE_SOFTWARE, PERF_COUNT_TASK_CLOCK), "task-clock", }, - {EID(PERF_TYPE_SOFTWARE, PERF_COUNT_PAGE_FAULTS), "page-faults", }, - {EID(PERF_TYPE_SOFTWARE, PERF_COUNT_PAGE_FAULTS), "faults", }, - {EID(PERF_TYPE_SOFTWARE, PERF_COUNT_PAGE_FAULTS_MIN), "minor-faults", }, - {EID(PERF_TYPE_SOFTWARE, PERF_COUNT_PAGE_FAULTS_MAJ), "major-faults", }, - {EID(PERF_TYPE_SOFTWARE, PERF_COUNT_CONTEXT_SWITCHES), "context-switches", }, - {EID(PERF_TYPE_SOFTWARE, PERF_COUNT_CONTEXT_SWITCHES), "cs", }, - {EID(PERF_TYPE_SOFTWARE, PERF_COUNT_CPU_MIGRATIONS), "cpu-migrations", }, - {EID(PERF_TYPE_SOFTWARE, PERF_COUNT_CPU_MIGRATIONS), "migrations", }, + { C(HARDWARE, CPU_CYCLES), "cpu-cycles", }, + { C(HARDWARE, CPU_CYCLES), "cycles", }, + { C(HARDWARE, INSTRUCTIONS), "instructions", }, + { C(HARDWARE, CACHE_REFERENCES), "cache-references", }, + { C(HARDWARE, CACHE_MISSES), "cache-misses", }, + { C(HARDWARE, BRANCH_INSTRUCTIONS), "branch-instructions", }, + { C(HARDWARE, BRANCH_INSTRUCTIONS), "branches", }, + { C(HARDWARE, BRANCH_MISSES), "branch-misses", }, + { C(HARDWARE, BUS_CYCLES), "bus-cycles", }, + + { C(SOFTWARE, CPU_CLOCK), "cpu-clock", }, + { C(SOFTWARE, TASK_CLOCK), "task-clock", }, + { C(SOFTWARE, PAGE_FAULTS), "page-faults", }, + { C(SOFTWARE, PAGE_FAULTS), "faults", }, + { C(SOFTWARE, PAGE_FAULTS_MIN), "minor-faults", }, + { C(SOFTWARE, PAGE_FAULTS_MAJ), "major-faults", }, + { C(SOFTWARE, CONTEXT_SWITCHES), "context-switches", }, + { C(SOFTWARE, CONTEXT_SWITCHES), "cs", }, + { C(SOFTWARE, CPU_MIGRATIONS), "cpu-migrations", }, + { C(SOFTWARE, CPU_MIGRATIONS), "migrations", }, }; #define __PERF_COUNTER_FIELD(config, name) \ @@ -67,27 +69,26 @@ static char *sw_event_names[] = { "major faults", }; -char *event_name(int ctr) +char *event_name(int counter) { - __u64 config = event_id[ctr]; - int type = PERF_COUNTER_TYPE(config); - int id = PERF_COUNTER_ID(config); + __u64 config = attrs[counter].config; + int type = attrs[counter].type; static char buf[32]; - if (PERF_COUNTER_RAW(config)) { - sprintf(buf, "raw 0x%llx", PERF_COUNTER_CONFIG(config)); + if (attrs[counter].type == PERF_TYPE_RAW) { + sprintf(buf, "raw 0x%llx", config); return buf; } switch (type) { case PERF_TYPE_HARDWARE: - if (id < PERF_HW_EVENTS_MAX) - return hw_event_names[id]; + if (config < PERF_HW_EVENTS_MAX) + return hw_event_names[config]; return "unknown-hardware"; case PERF_TYPE_SOFTWARE: - if (id < PERF_SW_EVENTS_MAX) - return sw_event_names[id]; + if (config < PERF_SW_EVENTS_MAX) + return sw_event_names[config]; return "unknown-software"; default: @@ -101,15 +102,19 @@ char *event_name(int ctr) * Each event can have multiple symbolic names. * Symbolic names are (almost) exactly matched. */ -static __u64 match_event_symbols(const char *str) +static int match_event_symbols(const char *str, struct perf_counter_attr *attr) { __u64 config, id; int type; unsigned int i; const char *sep, *pstr; - if (str[0] == 'r' && hex2u64(str + 1, &config) > 0) - return config | PERF_COUNTER_RAW_MASK; + if (str[0] == 'r' && hex2u64(str + 1, &config) > 0) { + attr->type = PERF_TYPE_RAW; + attr->config = config; + + return 0; + } pstr = str; sep = strchr(pstr, ':'); @@ -121,35 +126,45 @@ static __u64 match_event_symbols(const char *str) if (sep) { pstr = sep + 1; if (strchr(pstr, 'k')) - event_mask[nr_counters] |= EVENT_MASK_USER; + attr->exclude_user = 1; if (strchr(pstr, 'u')) - event_mask[nr_counters] |= EVENT_MASK_KERNEL; + attr->exclude_kernel = 1; } - return EID(type, id); + attr->type = type; + attr->config = id; + + return 0; } for (i = 0; i < ARRAY_SIZE(event_symbols); i++) { if (!strncmp(str, event_symbols[i].symbol, - strlen(event_symbols[i].symbol))) - return event_symbols[i].event; + strlen(event_symbols[i].symbol))) { + + attr->type = event_symbols[i].type; + attr->config = event_symbols[i].config; + + return 0; + } } - return ~0ULL; + return -EINVAL; } int parse_events(const struct option *opt, const char *str, int unset) { - __u64 config; + struct perf_counter_attr attr; + int ret; + memset(&attr, 0, sizeof(attr)); again: if (nr_counters == MAX_COUNTERS) return -1; - config = match_event_symbols(str); - if (config == ~0ULL) - return -1; + ret = match_event_symbols(str, &attr); + if (ret < 0) + return ret; - event_id[nr_counters] = config; + attrs[nr_counters] = attr; nr_counters++; str = strstr(str, ","); @@ -168,7 +183,6 @@ void create_events_help(char *events_help_msg) { unsigned int i; char *str; - __u64 e; str = events_help_msg; @@ -178,9 +192,8 @@ void create_events_help(char *events_help_msg) for (i = 0; i < ARRAY_SIZE(event_symbols); i++) { int type, id; - e = event_symbols[i].event; - type = PERF_COUNTER_TYPE(e); - id = PERF_COUNTER_ID(e); + type = event_symbols[i].type; + id = event_symbols[i].config; if (i) str += sprintf(str, "|"); @@ -191,4 +204,3 @@ void create_events_help(char *events_help_msg) str += sprintf(str, "|rNNN]"); } - diff --git a/Documentation/perf_counter/util/parse-events.h b/Documentation/perf_counter/util/parse-events.h index 0da306bb902..542971c495b 100644 --- a/Documentation/perf_counter/util/parse-events.h +++ b/Documentation/perf_counter/util/parse-events.h @@ -3,12 +3,9 @@ * Parse symbolic events/counts passed in as options: */ -extern int nr_counters; -extern __u64 event_id[MAX_COUNTERS]; -extern int event_mask[MAX_COUNTERS]; +extern int nr_counters; -#define EVENT_MASK_KERNEL 1 -#define EVENT_MASK_USER 2 +extern struct perf_counter_attr attrs[MAX_COUNTERS]; extern char *event_name(int ctr); diff --git a/arch/powerpc/kernel/perf_counter.c b/arch/powerpc/kernel/perf_counter.c index 232b00a36f7..4786ad9a288 100644 --- a/arch/powerpc/kernel/perf_counter.c +++ b/arch/powerpc/kernel/perf_counter.c @@ -867,13 +867,13 @@ const struct pmu *hw_perf_counter_init(struct perf_counter *counter) if (!ppmu) return ERR_PTR(-ENXIO); - if (!perf_event_raw(&counter->attr)) { - ev = perf_event_id(&counter->attr); + if (counter->attr.type != PERF_TYPE_RAW) { + ev = counter->attr.config; if (ev >= ppmu->n_generic || ppmu->generic_events[ev] == 0) return ERR_PTR(-EOPNOTSUPP); ev = ppmu->generic_events[ev]; } else { - ev = perf_event_config(&counter->attr); + ev = counter->attr.config; } counter->hw.config_base = ev; counter->hw.idx = 0; diff --git a/arch/x86/kernel/cpu/perf_counter.c b/arch/x86/kernel/cpu/perf_counter.c index 8f53f3a7da2..430e048f285 100644 --- a/arch/x86/kernel/cpu/perf_counter.c +++ b/arch/x86/kernel/cpu/perf_counter.c @@ -292,15 +292,15 @@ static int __hw_perf_counter_init(struct perf_counter *counter) /* * Raw event type provide the config in the event structure */ - if (perf_event_raw(attr)) { - hwc->config |= x86_pmu.raw_event(perf_event_config(attr)); + if (attr->type == PERF_TYPE_RAW) { + hwc->config |= x86_pmu.raw_event(attr->config); } else { - if (perf_event_id(attr) >= x86_pmu.max_events) + if (attr->config >= x86_pmu.max_events) return -EINVAL; /* * The generic map: */ - hwc->config |= x86_pmu.event_map(perf_event_id(attr)); + hwc->config |= x86_pmu.event_map(attr->config); } counter->destroy = hw_perf_counter_destroy; diff --git a/include/linux/perf_counter.h b/include/linux/perf_counter.h index 4f9d39ecdc0..f794c69b34c 100644 --- a/include/linux/perf_counter.h +++ b/include/linux/perf_counter.h @@ -73,26 +73,6 @@ enum sw_event_ids { PERF_SW_EVENTS_MAX = 7, }; -#define __PERF_COUNTER_MASK(name) \ - (((1ULL << PERF_COUNTER_##name##_BITS) - 1) << \ - PERF_COUNTER_##name##_SHIFT) - -#define PERF_COUNTER_RAW_BITS 1 -#define PERF_COUNTER_RAW_SHIFT 63 -#define PERF_COUNTER_RAW_MASK __PERF_COUNTER_MASK(RAW) - -#define PERF_COUNTER_CONFIG_BITS 63 -#define PERF_COUNTER_CONFIG_SHIFT 0 -#define PERF_COUNTER_CONFIG_MASK __PERF_COUNTER_MASK(CONFIG) - -#define PERF_COUNTER_TYPE_BITS 7 -#define PERF_COUNTER_TYPE_SHIFT 56 -#define PERF_COUNTER_TYPE_MASK __PERF_COUNTER_MASK(TYPE) - -#define PERF_COUNTER_EVENT_BITS 56 -#define PERF_COUNTER_EVENT_SHIFT 0 -#define PERF_COUNTER_EVENT_MASK __PERF_COUNTER_MASK(EVENT) - /* * Bits that can be set in attr.sample_type to request information * in the overflow packets. @@ -125,10 +105,13 @@ enum perf_counter_read_format { */ struct perf_counter_attr { /* - * The MSB of the config word signifies if the rest contains cpu - * specific (raw) counter configuration data, if unset, the next - * 7 bits are an event type and the rest of the bits are the event - * identifier. + * Major type: hardware/software/tracepoint/etc. + */ + __u32 type; + __u32 __reserved_1; + + /* + * Type specific configuration information. */ __u64 config; @@ -152,12 +135,11 @@ struct perf_counter_attr { comm : 1, /* include comm data */ freq : 1, /* use freq, not period */ - __reserved_1 : 53; + __reserved_2 : 53; __u32 wakeup_events; /* wakeup every n events */ - __u32 __reserved_2; + __u32 __reserved_3; - __u64 __reserved_3; __u64 __reserved_4; }; @@ -278,8 +260,8 @@ enum perf_event_type { /* * struct { - * struct perf_event_header header; - * u32 pid, ppid; + * struct perf_event_header header; + * u32 pid, ppid; * }; */ PERF_EVENT_FORK = 7, @@ -331,27 +313,6 @@ enum perf_event_type { struct task_struct; -static inline u64 perf_event_raw(struct perf_counter_attr *attr) -{ - return attr->config & PERF_COUNTER_RAW_MASK; -} - -static inline u64 perf_event_config(struct perf_counter_attr *attr) -{ - return attr->config & PERF_COUNTER_CONFIG_MASK; -} - -static inline u64 perf_event_type(struct perf_counter_attr *attr) -{ - return (attr->config & PERF_COUNTER_TYPE_MASK) >> - PERF_COUNTER_TYPE_SHIFT; -} - -static inline u64 perf_event_id(struct perf_counter_attr *attr) -{ - return attr->config & PERF_COUNTER_EVENT_MASK; -} - /** * struct hw_perf_counter - performance counter hardware details: */ @@ -616,8 +577,8 @@ extern int perf_counter_overflow(struct perf_counter *counter, */ static inline int is_software_counter(struct perf_counter *counter) { - return !perf_event_raw(&counter->attr) && - perf_event_type(&counter->attr) != PERF_TYPE_HARDWARE; + return (counter->attr.type != PERF_TYPE_RAW) && + (counter->attr.type != PERF_TYPE_HARDWARE); } extern void perf_swcounter_event(u32, u64, int, struct pt_regs *, u64); diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c index 47c92fb927f..75ae76796df 100644 --- a/kernel/perf_counter.c +++ b/kernel/perf_counter.c @@ -3091,14 +3091,12 @@ static int perf_swcounter_match(struct perf_counter *counter, enum perf_event_types type, u32 event, struct pt_regs *regs) { - u64 event_config; - - event_config = ((u64) type << PERF_COUNTER_TYPE_SHIFT) | event; - if (!perf_swcounter_is_counting(counter)) return 0; - if (counter->attr.config != event_config) + if (counter->attr.type != type) + return 0; + if (counter->attr.config != event) return 0; if (regs) { @@ -3403,7 +3401,7 @@ static const struct pmu *sw_perf_counter_init(struct perf_counter *counter) * to be kernel events, and page faults are never hypervisor * events. */ - switch (perf_event_id(&counter->attr)) { + switch (counter->attr.config) { case PERF_COUNT_CPU_CLOCK: pmu = &perf_ops_cpu_clock; @@ -3496,12 +3494,12 @@ perf_counter_alloc(struct perf_counter_attr *attr, if (attr->inherit && (attr->sample_type & PERF_SAMPLE_GROUP)) goto done; - if (perf_event_raw(attr)) { + if (attr->type == PERF_TYPE_RAW) { pmu = hw_perf_counter_init(counter); goto done; } - switch (perf_event_type(attr)) { + switch (attr->type) { case PERF_TYPE_HARDWARE: pmu = hw_perf_counter_init(counter); break; -- cgit v1.2.3 From 8326f44da090d6d304d29b9fdc7fb3e20889e329 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Fri, 5 Jun 2009 20:22:46 +0200 Subject: perf_counter: Implement generalized cache event types Extend generic event enumeration with the PERF_TYPE_HW_CACHE method. This is a 3-dimensional space: { L1-D, L1-I, L2, ITLB, DTLB, BPU } x { load, store, prefetch } x { accesses, misses } User-space passes in the 3 coordinates and the kernel provides a counter. (if the hardware supports that type and if the combination makes sense.) Combinations that make no sense produce a -EINVAL. Combinations that are not supported by the hardware produce -ENOTSUP. Extend the tools to deal with this, and rewrite the event symbol parsing code with various popular aliases for the units and access methods above. So 'l1-cache-miss' and 'l1d-read-ops' are both valid aliases. ( x86 is supported for now, with the Nehalem event table filled in, and with Core2 and Atom having placeholder tables. ) Cc: Peter Zijlstra Cc: Mike Galbraith Cc: Paul Mackerras Cc: Corey Ashford Cc: Marcelo Tosatti Cc: Arnaldo Carvalho de Melo LKML-Reference: Signed-off-by: Ingo Molnar --- Documentation/perf_counter/util/parse-events.c | 104 ++++++++++++- arch/x86/kernel/cpu/perf_counter.c | 201 ++++++++++++++++++++++++- include/linux/perf_counter.h | 34 +++++ kernel/perf_counter.c | 1 + 4 files changed, 329 insertions(+), 11 deletions(-) diff --git a/Documentation/perf_counter/util/parse-events.c b/Documentation/perf_counter/util/parse-events.c index eb56bd99657..de9a77c4715 100644 --- a/Documentation/perf_counter/util/parse-events.c +++ b/Documentation/perf_counter/util/parse-events.c @@ -6,6 +6,8 @@ #include "exec_cmd.h" #include "string.h" +extern char *strcasestr(const char *haystack, const char *needle); + int nr_counters; struct perf_counter_attr attrs[MAX_COUNTERS]; @@ -17,6 +19,7 @@ struct event_symbol { }; #define C(x, y) .type = PERF_TYPE_##x, .config = PERF_COUNT_##y +#define CR(x, y) .type = PERF_TYPE_##x, .config = y static struct event_symbol event_symbols[] = { { C(HARDWARE, CPU_CYCLES), "cpu-cycles", }, @@ -69,6 +72,28 @@ static char *sw_event_names[] = { "major faults", }; +#define MAX_ALIASES 8 + +static char *hw_cache [][MAX_ALIASES] = { + { "l1-d" , "l1d" , "l1", "l1-data-cache" }, + { "l1-i" , "l1i" , "l1-instruction-cache" }, + { "l2" , }, + { "dtlb", }, + { "itlb", }, + { "bpu" , "btb", "branch-cache", NULL }, +}; + +static char *hw_cache_op [][MAX_ALIASES] = { + { "read" , "load" }, + { "write" , "store" }, + { "prefetch" , "speculative-read", "speculative-load" }, +}; + +static char *hw_cache_result [][MAX_ALIASES] = { + { "access", "ops" }, + { "miss", }, +}; + char *event_name(int counter) { __u64 config = attrs[counter].config; @@ -86,6 +111,30 @@ char *event_name(int counter) return hw_event_names[config]; return "unknown-hardware"; + case PERF_TYPE_HW_CACHE: { + __u8 cache_type, cache_op, cache_result; + static char name[100]; + + cache_type = (config >> 0) & 0xff; + if (cache_type > PERF_COUNT_HW_CACHE_MAX) + return "unknown-ext-hardware-cache-type"; + + cache_op = (config >> 8) & 0xff; + if (cache_type > PERF_COUNT_HW_CACHE_OP_MAX) + return "unknown-ext-hardware-cache-op-type"; + + cache_result = (config >> 16) & 0xff; + if (cache_type > PERF_COUNT_HW_CACHE_RESULT_MAX) + return "unknown-ext-hardware-cache-result-type"; + + sprintf(name, "%s:%s:%s", + hw_cache[cache_type][0], + hw_cache_op[cache_op][0], + hw_cache_result[cache_result][0]); + + return name; + } + case PERF_TYPE_SOFTWARE: if (config < PERF_SW_EVENTS_MAX) return sw_event_names[config]; @@ -98,11 +147,60 @@ char *event_name(int counter) return "unknown"; } +static int parse_aliases(const char *str, char *names[][MAX_ALIASES], int size) +{ + int i, j; + + for (i = 0; i < size; i++) { + for (j = 0; j < MAX_ALIASES; j++) { + if (!names[i][j]) + break; + if (strcasestr(str, names[i][j])) + return i; + } + } + + return 0; +} + +static int parse_generic_hw_symbols(const char *str, struct perf_counter_attr *attr) +{ + __u8 cache_type = -1, cache_op = 0, cache_result = 0; + + cache_type = parse_aliases(str, hw_cache, PERF_COUNT_HW_CACHE_MAX); + /* + * No fallback - if we cannot get a clear cache type + * then bail out: + */ + if (cache_type == -1) + return -EINVAL; + + cache_op = parse_aliases(str, hw_cache_op, PERF_COUNT_HW_CACHE_OP_MAX); + /* + * Fall back to reads: + */ + if (cache_type == -1) + cache_type = PERF_COUNT_HW_CACHE_OP_READ; + + cache_result = parse_aliases(str, hw_cache_result, + PERF_COUNT_HW_CACHE_RESULT_MAX); + /* + * Fall back to accesses: + */ + if (cache_result == -1) + cache_result = PERF_COUNT_HW_CACHE_RESULT_ACCESS; + + attr->config = cache_type | (cache_op << 8) | (cache_result << 16); + attr->type = PERF_TYPE_HW_CACHE; + + return 0; +} + /* * Each event can have multiple symbolic names. * Symbolic names are (almost) exactly matched. */ -static int match_event_symbols(const char *str, struct perf_counter_attr *attr) +static int parse_event_symbols(const char *str, struct perf_counter_attr *attr) { __u64 config, id; int type; @@ -147,7 +245,7 @@ static int match_event_symbols(const char *str, struct perf_counter_attr *attr) } } - return -EINVAL; + return parse_generic_hw_symbols(str, attr); } int parse_events(const struct option *opt, const char *str, int unset) @@ -160,7 +258,7 @@ again: if (nr_counters == MAX_COUNTERS) return -1; - ret = match_event_symbols(str, &attr); + ret = parse_event_symbols(str, &attr); if (ret < 0) return ret; diff --git a/arch/x86/kernel/cpu/perf_counter.c b/arch/x86/kernel/cpu/perf_counter.c index 430e048f285..e86679fa521 100644 --- a/arch/x86/kernel/cpu/perf_counter.c +++ b/arch/x86/kernel/cpu/perf_counter.c @@ -83,6 +83,128 @@ static u64 intel_pmu_event_map(int event) return intel_perfmon_event_map[event]; } +/* + * Generalized hw caching related event table, filled + * in on a per model basis. A value of 0 means + * 'not supported', -1 means 'event makes no sense on + * this CPU', any other value means the raw event + * ID. + */ + +#define C(x) PERF_COUNT_HW_CACHE_##x + +static u64 __read_mostly hw_cache_event_ids + [PERF_COUNT_HW_CACHE_MAX] + [PERF_COUNT_HW_CACHE_OP_MAX] + [PERF_COUNT_HW_CACHE_RESULT_MAX]; + +static const u64 nehalem_hw_cache_event_ids + [PERF_COUNT_HW_CACHE_MAX] + [PERF_COUNT_HW_CACHE_OP_MAX] + [PERF_COUNT_HW_CACHE_RESULT_MAX] = +{ + [ C(L1D) ] = { + [ C(OP_READ) ] = { + [ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI */ + [ C(RESULT_MISS) ] = 0x0140, /* L1D_CACHE_LD.I_STATE */ + }, + [ C(OP_WRITE) ] = { + [ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI */ + [ C(RESULT_MISS) ] = 0x0141, /* L1D_CACHE_ST.I_STATE */ + }, + [ C(OP_PREFETCH) ] = { + [ C(RESULT_ACCESS) ] = 0x014e, /* L1D_PREFETCH.REQUESTS */ + [ C(RESULT_MISS) ] = 0x024e, /* L1D_PREFETCH.MISS */ + }, + }, + [ C(L1I ) ] = { + [ C(OP_READ) ] = { + [ C(RESULT_ACCESS) ] = 0x0480, /* L1I.READS */ + [ C(RESULT_MISS) ] = 0x0280, /* L1I.MISSES */ + }, + [ C(OP_WRITE) ] = { + [ C(RESULT_ACCESS) ] = -1, + [ C(RESULT_MISS) ] = -1, + }, + [ C(OP_PREFETCH) ] = { + [ C(RESULT_ACCESS) ] = 0x0, + [ C(RESULT_MISS) ] = 0x0, + }, + }, + [ C(L2 ) ] = { + [ C(OP_READ) ] = { + [ C(RESULT_ACCESS) ] = 0x0324, /* L2_RQSTS.LOADS */ + [ C(RESULT_MISS) ] = 0x0224, /* L2_RQSTS.LD_MISS */ + }, + [ C(OP_WRITE) ] = { + [ C(RESULT_ACCESS) ] = 0x0c24, /* L2_RQSTS.RFOS */ + [ C(RESULT_MISS) ] = 0x0824, /* L2_RQSTS.RFO_MISS */ + }, + [ C(OP_PREFETCH) ] = { + [ C(RESULT_ACCESS) ] = 0xc024, /* L2_RQSTS.PREFETCHES */ + [ C(RESULT_MISS) ] = 0x8024, /* L2_RQSTS.PREFETCH_MISS */ + }, + }, + [ C(DTLB) ] = { + [ C(OP_READ) ] = { + [ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI (alias) */ + [ C(RESULT_MISS) ] = 0x0108, /* DTLB_LOAD_MISSES.ANY */ + }, + [ C(OP_WRITE) ] = { + [ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI (alias) */ + [ C(RESULT_MISS) ] = 0x010c, /* MEM_STORE_RETIRED.DTLB_MISS */ + }, + [ C(OP_PREFETCH) ] = { + [ C(RESULT_ACCESS) ] = 0x0, + [ C(RESULT_MISS) ] = 0x0, + }, + }, + [ C(ITLB) ] = { + [ C(OP_READ) ] = { + [ C(RESULT_ACCESS) ] = 0x01c0, /* INST_RETIRED.ANY_P */ + [ C(RESULT_MISS) ] = 0x0185, /* ITLB_MISS_RETIRED */ + }, + [ C(OP_WRITE) ] = { + [ C(RESULT_ACCESS) ] = -1, + [ C(RESULT_MISS) ] = -1, + }, + [ C(OP_PREFETCH) ] = { + [ C(RESULT_ACCESS) ] = -1, + [ C(RESULT_MISS) ] = -1, + }, + }, + [ C(BPU ) ] = { + [ C(OP_READ) ] = { + [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ALL_BRANCHES */ + [ C(RESULT_MISS) ] = 0x03e8, /* BPU_CLEARS.ANY */ + }, + [ C(OP_WRITE) ] = { + [ C(RESULT_ACCESS) ] = -1, + [ C(RESULT_MISS) ] = -1, + }, + [ C(OP_PREFETCH) ] = { + [ C(RESULT_ACCESS) ] = -1, + [ C(RESULT_MISS) ] = -1, + }, + }, +}; + +static const u64 core2_hw_cache_event_ids + [PERF_COUNT_HW_CACHE_MAX] + [PERF_COUNT_HW_CACHE_OP_MAX] + [PERF_COUNT_HW_CACHE_RESULT_MAX] = +{ + /* To be filled in */ +}; + +static const u64 atom_hw_cache_event_ids + [PERF_COUNT_HW_CACHE_MAX] + [PERF_COUNT_HW_CACHE_OP_MAX] + [PERF_COUNT_HW_CACHE_RESULT_MAX] = +{ + /* To be filled in */ +}; + static u64 intel_pmu_raw_event(u64 event) { #define CORE_EVNTSEL_EVENT_MASK 0x000000FFULL @@ -246,6 +368,39 @@ static inline int x86_pmu_initialized(void) return x86_pmu.handle_irq != NULL; } +static inline int +set_ext_hw_attr(struct hw_perf_counter *hwc, struct perf_counter_attr *attr) +{ + unsigned int cache_type, cache_op, cache_result; + u64 config, val; + + config = attr->config; + + cache_type = (config >> 0) & 0xff; + if (cache_type >= PERF_COUNT_HW_CACHE_MAX) + return -EINVAL; + + cache_op = (config >> 8) & 0xff; + if (cache_op >= PERF_COUNT_HW_CACHE_OP_MAX) + return -EINVAL; + + cache_result = (config >> 16) & 0xff; + if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX) + return -EINVAL; + + val = hw_cache_event_ids[cache_type][cache_op][cache_result]; + + if (val == 0) + return -ENOENT; + + if (val == -1) + return -EINVAL; + + hwc->config |= val; + + return 0; +} + /* * Setup the hardware configuration for a given attr_type */ @@ -288,22 +443,25 @@ static int __hw_perf_counter_init(struct perf_counter *counter) hwc->sample_period = x86_pmu.max_period; atomic64_set(&hwc->period_left, hwc->sample_period); + counter->destroy = hw_perf_counter_destroy; /* * Raw event type provide the config in the event structure */ if (attr->type == PERF_TYPE_RAW) { hwc->config |= x86_pmu.raw_event(attr->config); - } else { - if (attr->config >= x86_pmu.max_events) - return -EINVAL; - /* - * The generic map: - */ - hwc->config |= x86_pmu.event_map(attr->config); + return 0; } - counter->destroy = hw_perf_counter_destroy; + if (attr->type == PERF_TYPE_HW_CACHE) + return set_ext_hw_attr(hwc, attr); + + if (attr->config >= x86_pmu.max_events) + return -EINVAL; + /* + * The generic map: + */ + hwc->config |= x86_pmu.event_map(attr->config); return 0; } @@ -989,6 +1147,33 @@ static int intel_pmu_init(void) rdmsrl(MSR_CORE_PERF_GLOBAL_CTRL, x86_pmu.intel_ctrl); + /* + * Nehalem: + */ + switch (boot_cpu_data.x86_model) { + case 17: + memcpy(hw_cache_event_ids, core2_hw_cache_event_ids, + sizeof(u64)*PERF_COUNT_HW_CACHE_MAX* + PERF_COUNT_HW_CACHE_OP_MAX*PERF_COUNT_HW_CACHE_RESULT_MAX); + + pr_info("... installed Core2 event tables\n"); + break; + default: + case 26: + memcpy(hw_cache_event_ids, nehalem_hw_cache_event_ids, + sizeof(u64)*PERF_COUNT_HW_CACHE_MAX* + PERF_COUNT_HW_CACHE_OP_MAX*PERF_COUNT_HW_CACHE_RESULT_MAX); + + pr_info("... installed Nehalem/Corei7 event tables\n"); + break; + case 28: + memcpy(hw_cache_event_ids, atom_hw_cache_event_ids, + sizeof(u64)*PERF_COUNT_HW_CACHE_MAX* + PERF_COUNT_HW_CACHE_OP_MAX*PERF_COUNT_HW_CACHE_RESULT_MAX); + + pr_info("... installed Atom event tables\n"); + break; + } return 0; } diff --git a/include/linux/perf_counter.h b/include/linux/perf_counter.h index f794c69b34c..3586df840f6 100644 --- a/include/linux/perf_counter.h +++ b/include/linux/perf_counter.h @@ -28,6 +28,7 @@ enum perf_event_types { PERF_TYPE_HARDWARE = 0, PERF_TYPE_SOFTWARE = 1, PERF_TYPE_TRACEPOINT = 2, + PERF_TYPE_HW_CACHE = 3, /* * available TYPE space, raw is the max value. @@ -55,6 +56,39 @@ enum attr_ids { PERF_HW_EVENTS_MAX = 7, }; +/* + * Generalized hardware cache counters: + * + * { L1-D, L1-I, L2, LLC, ITLB, DTLB, BPU } x + * { read, write, prefetch } x + * { accesses, misses } + */ +enum hw_cache_id { + PERF_COUNT_HW_CACHE_L1D, + PERF_COUNT_HW_CACHE_L1I, + PERF_COUNT_HW_CACHE_L2, + PERF_COUNT_HW_CACHE_DTLB, + PERF_COUNT_HW_CACHE_ITLB, + PERF_COUNT_HW_CACHE_BPU, + + PERF_COUNT_HW_CACHE_MAX, +}; + +enum hw_cache_op_id { + PERF_COUNT_HW_CACHE_OP_READ, + PERF_COUNT_HW_CACHE_OP_WRITE, + PERF_COUNT_HW_CACHE_OP_PREFETCH, + + PERF_COUNT_HW_CACHE_OP_MAX, +}; + +enum hw_cache_op_result_id { + PERF_COUNT_HW_CACHE_RESULT_ACCESS, + PERF_COUNT_HW_CACHE_RESULT_MISS, + + PERF_COUNT_HW_CACHE_RESULT_MAX, +}; + /* * Special "software" counters provided by the kernel, even if the hardware * does not support performance counters. These counters measure various diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c index 75ae76796df..5eacaaf3f9c 100644 --- a/kernel/perf_counter.c +++ b/kernel/perf_counter.c @@ -3501,6 +3501,7 @@ perf_counter_alloc(struct perf_counter_attr *attr, switch (attr->type) { case PERF_TYPE_HARDWARE: + case PERF_TYPE_HW_CACHE: pmu = hw_perf_counter_init(counter); break; -- cgit v1.2.3 From 86847b62f0781ccc97a79936c9ed9dc818cff67b Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Sat, 6 Jun 2009 12:24:17 +0200 Subject: perf_counter tools: Add 'perf list' to list available events perf list: List all the available event types which can be used in -e (--event) options. Signed-off-by: Thomas Gleixner Signed-off-by: Ingo Molnar --- Documentation/perf_counter/Makefile | 1 + Documentation/perf_counter/builtin-list.c | 20 +++++++++++ Documentation/perf_counter/builtin-record.c | 7 ++-- Documentation/perf_counter/builtin-stat.c | 9 ++--- Documentation/perf_counter/builtin-top.c | 7 ++-- Documentation/perf_counter/builtin.h | 2 ++ Documentation/perf_counter/perf.c | 1 + Documentation/perf_counter/util/parse-events.c | 46 ++++++++++++++++---------- Documentation/perf_counter/util/parse-events.h | 2 +- 9 files changed, 61 insertions(+), 34 deletions(-) create mode 100644 Documentation/perf_counter/builtin-list.c diff --git a/Documentation/perf_counter/Makefile b/Documentation/perf_counter/Makefile index 5b99f04df81..32c0bb21a32 100644 --- a/Documentation/perf_counter/Makefile +++ b/Documentation/perf_counter/Makefile @@ -328,6 +328,7 @@ BUILTIN_OBJS += builtin-record.o BUILTIN_OBJS += builtin-report.o BUILTIN_OBJS += builtin-stat.o BUILTIN_OBJS += builtin-top.o +BUILTIN_OBJS += builtin-list.o PERFLIBS = $(LIB_FILE) EXTLIBS = diff --git a/Documentation/perf_counter/builtin-list.c b/Documentation/perf_counter/builtin-list.c new file mode 100644 index 00000000000..fe60e37c96e --- /dev/null +++ b/Documentation/perf_counter/builtin-list.c @@ -0,0 +1,20 @@ +/* + * builtin-list.c + * + * Builtin list command: list all event types + * + * Copyright (C) 2009, Thomas Gleixner + * Copyright (C) 2008-2009, Red Hat Inc, Ingo Molnar + */ +#include "builtin.h" + +#include "perf.h" + +#include "util/parse-options.h" +#include "util/parse-events.h" + +int cmd_list(int argc, const char **argv, const char *prefix) +{ + print_events(); + return 0; +} diff --git a/Documentation/perf_counter/builtin-record.c b/Documentation/perf_counter/builtin-record.c index 130fd88266b..aeab9c4b15e 100644 --- a/Documentation/perf_counter/builtin-record.c +++ b/Documentation/perf_counter/builtin-record.c @@ -495,11 +495,10 @@ static const char * const record_usage[] = { NULL }; -static char events_help_msg[EVENTS_HELP_MAX]; - static const struct option options[] = { OPT_CALLBACK('e', "event", NULL, "event", - events_help_msg, parse_events), + "event selector. use 'perf list' to list available events", + parse_events), OPT_INTEGER('p', "pid", &target_pid, "record events on existing pid"), OPT_INTEGER('r', "realtime", &realtime_prio, @@ -527,8 +526,6 @@ int cmd_record(int argc, const char **argv, const char *prefix) { int counter; - create_events_help(events_help_msg); - argc = parse_options(argc, argv, options, record_usage, 0); if (!argc && target_pid == -1 && !system_wide) usage_with_options(record_usage, options); diff --git a/Documentation/perf_counter/builtin-stat.c b/Documentation/perf_counter/builtin-stat.c index 9711e552423..2cbf5a18958 100644 --- a/Documentation/perf_counter/builtin-stat.c +++ b/Documentation/perf_counter/builtin-stat.c @@ -293,18 +293,17 @@ static const char * const stat_usage[] = { NULL }; -static char events_help_msg[EVENTS_HELP_MAX]; - static const struct option options[] = { OPT_CALLBACK('e', "event", NULL, "event", - events_help_msg, parse_events), + "event selector. use 'perf list' to list available events", + parse_events), OPT_BOOLEAN('i', "inherit", &inherit, "child tasks inherit counters"), OPT_INTEGER('p', "pid", &target_pid, "stat events on existing pid"), OPT_BOOLEAN('a', "all-cpus", &system_wide, "system-wide collection from all CPUs"), - OPT_BOOLEAN('l', "scale", &scale, + OPT_BOOLEAN('S', "scale", &scale, "scale/normalize counters"), OPT_END() }; @@ -313,8 +312,6 @@ int cmd_stat(int argc, const char **argv, const char *prefix) { page_size = sysconf(_SC_PAGE_SIZE); - create_events_help(events_help_msg); - memcpy(attrs, default_attrs, sizeof(attrs)); argc = parse_options(argc, argv, options, stat_usage, 0); diff --git a/Documentation/perf_counter/builtin-top.c b/Documentation/perf_counter/builtin-top.c index 98a6d53e17b..f2e7312f85c 100644 --- a/Documentation/perf_counter/builtin-top.c +++ b/Documentation/perf_counter/builtin-top.c @@ -606,11 +606,10 @@ static const char * const top_usage[] = { NULL }; -static char events_help_msg[EVENTS_HELP_MAX]; - static const struct option options[] = { OPT_CALLBACK('e', "event", NULL, "event", - events_help_msg, parse_events), + "event selector. use 'perf list' to list available events", + parse_events), OPT_INTEGER('c', "count", &default_interval, "event period to sample"), OPT_INTEGER('p', "pid", &target_pid, @@ -648,8 +647,6 @@ int cmd_top(int argc, const char **argv, const char *prefix) page_size = sysconf(_SC_PAGE_SIZE); - create_events_help(events_help_msg); - argc = parse_options(argc, argv, options, top_usage, 0); if (argc) usage_with_options(top_usage, options); diff --git a/Documentation/perf_counter/builtin.h b/Documentation/perf_counter/builtin.h index 5bfea57d33f..e7de47da858 100644 --- a/Documentation/perf_counter/builtin.h +++ b/Documentation/perf_counter/builtin.h @@ -20,4 +20,6 @@ extern int cmd_report(int argc, const char **argv, const char *prefix); extern int cmd_stat(int argc, const char **argv, const char *prefix); extern int cmd_top(int argc, const char **argv, const char *prefix); extern int cmd_version(int argc, const char **argv, const char *prefix); +extern int cmd_list(int argc, const char **argv, const char *prefix); + #endif diff --git a/Documentation/perf_counter/perf.c b/Documentation/perf_counter/perf.c index ec7edb7fbe2..9ac75657a18 100644 --- a/Documentation/perf_counter/perf.c +++ b/Documentation/perf_counter/perf.c @@ -258,6 +258,7 @@ static void handle_internal_command(int argc, const char **argv) const char *cmd = argv[0]; static struct cmd_struct commands[] = { { "help", cmd_help, 0 }, + { "list", cmd_list, 0 }, { "record", cmd_record, 0 }, { "report", cmd_report, 0 }, { "stat", cmd_stat, 0 }, diff --git a/Documentation/perf_counter/util/parse-events.c b/Documentation/perf_counter/util/parse-events.c index de9a77c4715..150fbd26271 100644 --- a/Documentation/perf_counter/util/parse-events.c +++ b/Documentation/perf_counter/util/parse-events.c @@ -274,31 +274,43 @@ again: return 0; } +static const char * const event_type_descriptors[] = { + "", + "Hardware event", + "Software event", + "Tracepoint event", + "Hardware cache event", +}; + /* - * Create the help text for the event symbols: + * Print the help text for the event symbols: */ -void create_events_help(char *events_help_msg) +void print_events(void) { - unsigned int i; - char *str; + struct event_symbol *syms = event_symbols; + unsigned int i, type, prev_type = -1; - str = events_help_msg; + fprintf(stderr, "\n"); + fprintf(stderr, "List of pre-defined events (to be used in -e):\n"); - str += sprintf(str, - "event name: ["); + for (i = 0; i < ARRAY_SIZE(event_symbols); i++, syms++) { + type = syms->type + 1; + if (type > ARRAY_SIZE(event_type_descriptors)) + type = 0; - for (i = 0; i < ARRAY_SIZE(event_symbols); i++) { - int type, id; - - type = event_symbols[i].type; - id = event_symbols[i].config; + if (type != prev_type) + fprintf(stderr, "\n"); - if (i) - str += sprintf(str, "|"); + fprintf(stderr, " %-30s [%s]\n", syms->symbol, + event_type_descriptors[type]); - str += sprintf(str, "%s", - event_symbols[i].symbol); + prev_type = type; } - str += sprintf(str, "|rNNN]"); + fprintf(stderr, "\n"); + fprintf(stderr, " %-30s [raw hardware event descriptor]\n", + "rNNN"); + fprintf(stderr, "\n"); + + exit(129); } diff --git a/Documentation/perf_counter/util/parse-events.h b/Documentation/perf_counter/util/parse-events.h index 542971c495b..e3d552908e6 100644 --- a/Documentation/perf_counter/util/parse-events.h +++ b/Documentation/perf_counter/util/parse-events.h @@ -13,5 +13,5 @@ extern int parse_events(const struct option *opt, const char *str, int unset); #define EVENTS_HELP_MAX (128*1024) -extern void create_events_help(char *help_msg); +extern void print_events(void); -- cgit v1.2.3 From 8faf3b547593bf6ea10df631e73204975273c4e0 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Sat, 6 Jun 2009 13:58:12 +0200 Subject: perf_counter tools: Fix cache-event printout Also standardize the cache printout (so that it can be pasted back into the command) and sort out the aliases. Cc: Peter Zijlstra Cc: Mike Galbraith Cc: Paul Mackerras Cc: Corey Ashford Cc: Marcelo Tosatti Cc: Arnaldo Carvalho de Melo LKML-Reference: Signed-off-by: Ingo Molnar --- Documentation/perf_counter/util/parse-events.c | 56 +++++++++++++------------- 1 file changed, 28 insertions(+), 28 deletions(-) diff --git a/Documentation/perf_counter/util/parse-events.c b/Documentation/perf_counter/util/parse-events.c index 150fbd26271..e0820b4388a 100644 --- a/Documentation/perf_counter/util/parse-events.c +++ b/Documentation/perf_counter/util/parse-events.c @@ -53,45 +53,45 @@ static struct event_symbol event_symbols[] = { #define PERF_COUNTER_ID(config) __PERF_COUNTER_FIELD(config, EVENT) static char *hw_event_names[] = { - "CPU cycles", + "cycles", "instructions", - "cache references", - "cache misses", + "cache-references", + "cache-misses", "branches", - "branch misses", - "bus cycles", + "branch-misses", + "bus-cycles", }; static char *sw_event_names[] = { - "cpu clock ticks", - "task clock ticks", - "pagefaults", - "context switches", - "CPU migrations", - "minor faults", - "major faults", + "cpu-clock-ticks", + "task-clock-ticks", + "page-faults", + "context-switches", + "CPU-migrations", + "minor-faults", + "major-faults", }; #define MAX_ALIASES 8 static char *hw_cache [][MAX_ALIASES] = { - { "l1-d" , "l1d" , "l1", "l1-data-cache" }, - { "l1-i" , "l1i" , "l1-instruction-cache" }, - { "l2" , }, - { "dtlb", }, - { "itlb", }, - { "bpu" , "btb", "branch-cache", NULL }, + { "L1-data" , "l1-d", "l1d", "l1" }, + { "L1-instruction" , "l1-i", "l1i" }, + { "L2" , "l2" }, + { "Data-TLB" , "dtlb", "d-tlb" }, + { "Instruction-TLB" , "itlb", "i-tlb" }, + { "Branch" , "bpu" , "btb", "bpc" }, }; static char *hw_cache_op [][MAX_ALIASES] = { - { "read" , "load" }, - { "write" , "store" }, - { "prefetch" , "speculative-read", "speculative-load" }, + { "Load" , "read" }, + { "Store" , "write" }, + { "Prefetch" , "speculative-read", "speculative-load" }, }; static char *hw_cache_result [][MAX_ALIASES] = { - { "access", "ops" }, - { "miss", }, + { "Reference" , "ops", "access" }, + { "Miss" }, }; char *event_name(int counter) @@ -120,14 +120,14 @@ char *event_name(int counter) return "unknown-ext-hardware-cache-type"; cache_op = (config >> 8) & 0xff; - if (cache_type > PERF_COUNT_HW_CACHE_OP_MAX) - return "unknown-ext-hardware-cache-op-type"; + if (cache_op > PERF_COUNT_HW_CACHE_OP_MAX) + return "unknown-ext-hardware-cache-op"; cache_result = (config >> 16) & 0xff; - if (cache_type > PERF_COUNT_HW_CACHE_RESULT_MAX) - return "unknown-ext-hardware-cache-result-type"; + if (cache_result > PERF_COUNT_HW_CACHE_RESULT_MAX) + return "unknown-ext-hardware-cache-result"; - sprintf(name, "%s:%s:%s", + sprintf(name, "%s-Cache-%s-%ses", hw_cache[cache_type][0], hw_cache_op[cache_op][0], hw_cache_result[cache_result][0]); -- cgit v1.2.3 From 386b05e3a2f3c5b0a9c5575060421cca0911648a Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Sat, 6 Jun 2009 14:56:33 +0200 Subject: perf_counter tools: Add help for perf list Also update other areas of the help texts. Signed-off-by: Thomas Gleixner Cc: Peter Zijlstra Cc: Mike Galbraith Cc: Paul Mackerras Cc: Arnaldo Carvalho de Melo LKML-Reference: Signed-off-by: Ingo Molnar --- .../perf_counter/Documentation/perf-list.txt | 25 +++++++++++++++++++++ .../perf_counter/Documentation/perf-record.txt | 26 +++++----------------- .../perf_counter/Documentation/perf-stat.txt | 26 +++++----------------- .../perf_counter/Documentation/perf-top.txt | 26 +++++----------------- Documentation/perf_counter/Documentation/perf.txt | 3 ++- 5 files changed, 42 insertions(+), 64 deletions(-) create mode 100644 Documentation/perf_counter/Documentation/perf-list.txt diff --git a/Documentation/perf_counter/Documentation/perf-list.txt b/Documentation/perf_counter/Documentation/perf-list.txt new file mode 100644 index 00000000000..aa55a71184f --- /dev/null +++ b/Documentation/perf_counter/Documentation/perf-list.txt @@ -0,0 +1,25 @@ +perf-list(1) +============== + +NAME +---- +perf-list - List all symbolic event types + +SYNOPSIS +-------- +[verse] +'perf list + +DESCRIPTION +----------- +This command displays the symbolic event types which can be selected in the +various perf commands with the -e option. + +OPTIONS +------- +None + +SEE ALSO +-------- +linkperf:perf-stat[1], linkperf:perf-top[1], +linkperf:perf-record[1] diff --git a/Documentation/perf_counter/Documentation/perf-record.txt b/Documentation/perf_counter/Documentation/perf-record.txt index 4d3416fc764..1dbc1eeb4c0 100644 --- a/Documentation/perf_counter/Documentation/perf-record.txt +++ b/Documentation/perf_counter/Documentation/perf-record.txt @@ -26,26 +26,10 @@ OPTIONS -e:: --event=:: - 0:0: cpu-cycles - 0:0: cycles - 0:1: instructions - 0:2: cache-references - 0:3: cache-misses - 0:4: branch-instructions - 0:4: branches - 0:5: branch-misses - 0:6: bus-cycles - 1:0: cpu-clock - 1:1: task-clock - 1:2: page-faults - 1:2: faults - 1:5: minor-faults - 1:6: major-faults - 1:3: context-switches - 1:3: cs - 1:4: cpu-migrations - 1:4: migrations - rNNN: raw PMU events (eventsel+umask) + Select the PMU event. Selection can be a symbolic event name + (use 'perf list' to list all events) or a raw PMU + event (eventsel+umask) in the form of rNNN where NNN is a + hexadecimal event descriptor. -a:: system-wide collection @@ -55,4 +39,4 @@ OPTIONS SEE ALSO -------- -linkperf:perf-stat[1] +linkperf:perf-stat[1], linkperf:perf-list[1] diff --git a/Documentation/perf_counter/Documentation/perf-stat.txt b/Documentation/perf_counter/Documentation/perf-stat.txt index a340e7be83d..5d95784cce4 100644 --- a/Documentation/perf_counter/Documentation/perf-stat.txt +++ b/Documentation/perf_counter/Documentation/perf-stat.txt @@ -25,26 +25,10 @@ OPTIONS -e:: --event=:: - 0:0: cpu-cycles - 0:0: cycles - 0:1: instructions - 0:2: cache-references - 0:3: cache-misses - 0:4: branch-instructions - 0:4: branches - 0:5: branch-misses - 0:6: bus-cycles - 1:0: cpu-clock - 1:1: task-clock - 1:2: page-faults - 1:2: faults - 1:5: minor-faults - 1:6: major-faults - 1:3: context-switches - 1:3: cs - 1:4: cpu-migrations - 1:4: migrations - rNNN: raw PMU events (eventsel+umask) + Select the PMU event. Selection can be a symbolic event name + (use 'perf list' to list all events) or a raw PMU + event (eventsel+umask) in the form of rNNN where NNN is a + hexadecimal event descriptor. -i:: --inherit:: @@ -79,4 +63,4 @@ $ perf stat -- make -j SEE ALSO -------- -linkperf:perf-tops[1] +linkperf:perf-top[1], linkperf:perf-list[1] diff --git a/Documentation/perf_counter/Documentation/perf-top.txt b/Documentation/perf_counter/Documentation/perf-top.txt index 15251e40e4f..c8eb7cfffcd 100644 --- a/Documentation/perf_counter/Documentation/perf-top.txt +++ b/Documentation/perf_counter/Documentation/perf-top.txt @@ -23,26 +23,10 @@ OPTIONS -e:: --event=:: - 0:0: cpu-cycles - 0:0: cycles - 0:1: instructions - 0:2: cache-references - 0:3: cache-misses - 0:4: branch-instructions - 0:4: branches - 0:5: branch-misses - 0:6: bus-cycles - 1:0: cpu-clock - 1:1: task-clock - 1:2: page-faults - 1:2: faults - 1:5: minor-faults - 1:6: major-faults - 1:3: context-switches - 1:3: cs - 1:4: cpu-migrations - 1:4: migrations - rNNN: raw PMU events (eventsel+umask) + Select the PMU event. Selection can be a symbolic event name + (use 'perf list' to list all events) or a raw PMU + event (eventsel+umask) in the form of rNNN where NNN is a + hexadecimal event descriptor. -a:: system-wide collection @@ -52,4 +36,4 @@ OPTIONS SEE ALSO -------- -linkperf:perf-stat[1] +linkperf:perf-stat[1], linkperf:perf-list[1] diff --git a/Documentation/perf_counter/Documentation/perf.txt b/Documentation/perf_counter/Documentation/perf.txt index e3d8b3832c6..69c83255719 100644 --- a/Documentation/perf_counter/Documentation/perf.txt +++ b/Documentation/perf_counter/Documentation/perf.txt @@ -20,4 +20,5 @@ and software features (software counters, tracepoints) as well. SEE ALSO -------- linkperf:perf-stat[1], linkperf:perf-top[1], -linkperf:perf-record[1], linkperf:perf-report[1] +linkperf:perf-record[1], linkperf:perf-report[1], +linkperf:perf-list[1] -- cgit v1.2.3 From 502fc5c72a886ff9d4d7a596e65ecc4dd5e4d458 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Fri, 13 Mar 2009 03:20:49 +0100 Subject: perf_counter tools: Uniform help printouts Also add perf list to command-list.txt. Cc: Peter Zijlstra Cc: Mike Galbraith Cc: Paul Mackerras Cc: Arnaldo Carvalho de Melo LKML-Reference: Signed-off-by: Ingo Molnar --- Documentation/perf_counter/builtin-help.c | 10 +++++----- Documentation/perf_counter/command-list.txt | 14 ++++++++------ Documentation/perf_counter/perf.c | 4 ++-- Documentation/perf_counter/util/parse-options.c | 2 +- Documentation/perf_counter/util/usage.c | 2 +- 5 files changed, 17 insertions(+), 15 deletions(-) diff --git a/Documentation/perf_counter/builtin-help.c b/Documentation/perf_counter/builtin-help.c index a3894bfb9b6..0f32dc3f3c4 100644 --- a/Documentation/perf_counter/builtin-help.c +++ b/Documentation/perf_counter/builtin-help.c @@ -284,7 +284,7 @@ void list_common_cmds_help(void) longest = strlen(common_cmds[i].name); } - puts("The most commonly used perf commands are:"); + puts(" The most commonly used perf commands are:"); for (i = 0; i < ARRAY_SIZE(common_cmds); i++) { printf(" %s ", common_cmds[i].name); mput_char(' ', longest - strlen(common_cmds[i].name)); @@ -426,16 +426,16 @@ int cmd_help(int argc, const char **argv, const char *prefix) builtin_help_usage, 0); if (show_all) { - printf("usage: %s\n\n", perf_usage_string); + printf("\n usage: %s\n\n", perf_usage_string); list_commands("perf commands", &main_cmds, &other_cmds); - printf("%s\n", perf_more_info_string); + printf(" %s\n\n", perf_more_info_string); return 0; } if (!argv[0]) { - printf("usage: %s\n\n", perf_usage_string); + printf("\n usage: %s\n\n", perf_usage_string); list_common_cmds_help(); - printf("\n%s\n", perf_more_info_string); + printf("\n %s\n\n", perf_more_info_string); return 0; } diff --git a/Documentation/perf_counter/command-list.txt b/Documentation/perf_counter/command-list.txt index 43902920777..f0b922c9023 100644 --- a/Documentation/perf_counter/command-list.txt +++ b/Documentation/perf_counter/command-list.txt @@ -1,7 +1,9 @@ +# # List of known perf commands. -# command name category [deprecated] [common] -perf-record mainporcelain common -perf-report mainporcelain common -perf-stat mainporcelain common -perf-top mainporcelain common - +# command name category [deprecated] [common] +# +perf-record mainporcelain common +perf-report mainporcelain common +perf-stat mainporcelain common +perf-top mainporcelain common +perf-list mainporcelain common diff --git a/Documentation/perf_counter/perf.c b/Documentation/perf_counter/perf.c index 9ac75657a18..161824f1241 100644 --- a/Documentation/perf_counter/perf.c +++ b/Documentation/perf_counter/perf.c @@ -384,9 +384,9 @@ int main(int argc, const char **argv) argv[0] += 2; } else { /* The user didn't specify a command; give them help */ - printf("usage: %s\n\n", perf_usage_string); + printf("\n usage: %s\n\n", perf_usage_string); list_common_cmds_help(); - printf("\n%s\n", perf_more_info_string); + printf("\n %s\n\n", perf_more_info_string); exit(1); } cmd = argv[0]; diff --git a/Documentation/perf_counter/util/parse-options.c b/Documentation/perf_counter/util/parse-options.c index 551b6bc34e7..e4d353395a6 100644 --- a/Documentation/perf_counter/util/parse-options.c +++ b/Documentation/perf_counter/util/parse-options.c @@ -385,7 +385,7 @@ int usage_with_options_internal(const char * const *usagestr, if (!usagestr) return PARSE_OPT_HELP; - fprintf(stderr, "usage: %s\n", *usagestr++); + fprintf(stderr, "\n usage: %s\n", *usagestr++); while (*usagestr && **usagestr) fprintf(stderr, " or: %s\n", *usagestr++); while (*usagestr) { diff --git a/Documentation/perf_counter/util/usage.c b/Documentation/perf_counter/util/usage.c index 7a10421fe6b..2cad286e437 100644 --- a/Documentation/perf_counter/util/usage.c +++ b/Documentation/perf_counter/util/usage.c @@ -14,7 +14,7 @@ static void report(const char *prefix, const char *err, va_list params) static NORETURN void usage_builtin(const char *err) { - fprintf(stderr, "usage: %s\n", err); + fprintf(stderr, "\n usage: %s\n", err); exit(129); } -- cgit v1.2.3 From 6e6b754ffdb6415723686c733f13275397e44422 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Tue, 15 Apr 2008 22:39:31 +0200 Subject: perf_counter tools: Tidy up manpage details Also fix a misalignment in usage string printing. Cc: Peter Zijlstra Cc: Mike Galbraith Cc: Paul Mackerras Cc: Arnaldo Carvalho de Melo LKML-Reference: Signed-off-by: Ingo Molnar --- Documentation/perf_counter/Documentation/perf-help.txt | 2 +- Documentation/perf_counter/Documentation/perf-list.txt | 4 ++-- Documentation/perf_counter/Documentation/perf-stat.txt | 2 +- Documentation/perf_counter/Documentation/perf-top.txt | 2 +- Documentation/perf_counter/util/parse-options.c | 2 +- 5 files changed, 6 insertions(+), 6 deletions(-) diff --git a/Documentation/perf_counter/Documentation/perf-help.txt b/Documentation/perf_counter/Documentation/perf-help.txt index f85fed5a7ed..514391818d1 100644 --- a/Documentation/perf_counter/Documentation/perf-help.txt +++ b/Documentation/perf_counter/Documentation/perf-help.txt @@ -1,5 +1,5 @@ perf-help(1) -=========== +============ NAME ---- diff --git a/Documentation/perf_counter/Documentation/perf-list.txt b/Documentation/perf_counter/Documentation/perf-list.txt index aa55a71184f..8290b942266 100644 --- a/Documentation/perf_counter/Documentation/perf-list.txt +++ b/Documentation/perf_counter/Documentation/perf-list.txt @@ -1,5 +1,5 @@ perf-list(1) -============== +============ NAME ---- @@ -8,7 +8,7 @@ perf-list - List all symbolic event types SYNOPSIS -------- [verse] -'perf list +'perf list' DESCRIPTION ----------- diff --git a/Documentation/perf_counter/Documentation/perf-stat.txt b/Documentation/perf_counter/Documentation/perf-stat.txt index 5d95784cce4..c368a72721d 100644 --- a/Documentation/perf_counter/Documentation/perf-stat.txt +++ b/Documentation/perf_counter/Documentation/perf-stat.txt @@ -1,5 +1,5 @@ perf-stat(1) -========== +============ NAME ---- diff --git a/Documentation/perf_counter/Documentation/perf-top.txt b/Documentation/perf_counter/Documentation/perf-top.txt index c8eb7cfffcd..539d0128972 100644 --- a/Documentation/perf_counter/Documentation/perf-top.txt +++ b/Documentation/perf_counter/Documentation/perf-top.txt @@ -1,5 +1,5 @@ perf-top(1) -========== +=========== NAME ---- diff --git a/Documentation/perf_counter/util/parse-options.c b/Documentation/perf_counter/util/parse-options.c index e4d353395a6..b3affb1658d 100644 --- a/Documentation/perf_counter/util/parse-options.c +++ b/Documentation/perf_counter/util/parse-options.c @@ -387,7 +387,7 @@ int usage_with_options_internal(const char * const *usagestr, fprintf(stderr, "\n usage: %s\n", *usagestr++); while (*usagestr && **usagestr) - fprintf(stderr, " or: %s\n", *usagestr++); + fprintf(stderr, " or: %s\n", *usagestr++); while (*usagestr) { fprintf(stderr, "%s%s\n", **usagestr ? " " : "", -- cgit v1.2.3 From 8035e4288078cb806e7dd6bafe4d3e54d44cab3f Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Sat, 6 Jun 2009 15:19:13 +0200 Subject: perf_counter tools: Prepare for 'perf annotate' Prepare for the 'perf annotate' implementation by splitting off builtin-annotate.c from builtin-report.c. ( We keep this commit separate to ease the later librarization of the facilities that perf-report and perf-annotate shares. ) Cc: Peter Zijlstra Cc: Mike Galbraith Cc: Paul Mackerras Cc: Arnaldo Carvalho de Melo LKML-Reference: Signed-off-by: Ingo Molnar --- .../perf_counter/Documentation/perf-annotate.txt | 26 + Documentation/perf_counter/Makefile | 3 +- Documentation/perf_counter/builtin-annotate.c | 1291 ++++++++++++++++++++ Documentation/perf_counter/builtin.h | 1 + Documentation/perf_counter/command-list.txt | 3 +- Documentation/perf_counter/perf.c | 3 + 6 files changed, 1325 insertions(+), 2 deletions(-) create mode 100644 Documentation/perf_counter/Documentation/perf-annotate.txt create mode 100644 Documentation/perf_counter/builtin-annotate.c diff --git a/Documentation/perf_counter/Documentation/perf-annotate.txt b/Documentation/perf_counter/Documentation/perf-annotate.txt new file mode 100644 index 00000000000..a9d6d5ee270 --- /dev/null +++ b/Documentation/perf_counter/Documentation/perf-annotate.txt @@ -0,0 +1,26 @@ +perf-annotate(1) +============== + +NAME +---- +perf-annotate - Read perf.data (created by perf record) and annotate functions + +SYNOPSIS +-------- +[verse] +'perf annotate' [-i | --input=file] symbol_name + +DESCRIPTION +----------- +This command displays the performance counter profile information recorded +via perf record. + +OPTIONS +------- +-i:: +--input=:: + Input file name. (default: perf.data) + +SEE ALSO +-------- +linkperf:perf-record[1] diff --git a/Documentation/perf_counter/Makefile b/Documentation/perf_counter/Makefile index 32c0bb21a32..0cbd5d6874e 100644 --- a/Documentation/perf_counter/Makefile +++ b/Documentation/perf_counter/Makefile @@ -323,12 +323,13 @@ LIB_OBJS += util/symbol.o LIB_OBJS += util/color.o LIB_OBJS += util/pager.o +BUILTIN_OBJS += builtin-annotate.o BUILTIN_OBJS += builtin-help.o +BUILTIN_OBJS += builtin-list.o BUILTIN_OBJS += builtin-record.o BUILTIN_OBJS += builtin-report.o BUILTIN_OBJS += builtin-stat.o BUILTIN_OBJS += builtin-top.o -BUILTIN_OBJS += builtin-list.o PERFLIBS = $(LIB_FILE) EXTLIBS = diff --git a/Documentation/perf_counter/builtin-annotate.c b/Documentation/perf_counter/builtin-annotate.c new file mode 100644 index 00000000000..d656484ec98 --- /dev/null +++ b/Documentation/perf_counter/builtin-annotate.c @@ -0,0 +1,1291 @@ +/* + * builtin-annotate.c + * + * Builtin annotate command: Analyze the perf.data input file, + * look up and read DSOs and symbol information and display + * a histogram of results, along various sorting keys. + */ +#include "builtin.h" + +#include "util/util.h" + +#include "util/color.h" +#include "util/list.h" +#include "util/cache.h" +#include "util/rbtree.h" +#include "util/symbol.h" +#include "util/string.h" + +#include "perf.h" + +#include "util/parse-options.h" +#include "util/parse-events.h" + +#define SHOW_KERNEL 1 +#define SHOW_USER 2 +#define SHOW_HV 4 + +static char const *input_name = "perf.data"; +static char *vmlinux = NULL; + +static char default_sort_order[] = "comm,dso"; +static char *sort_order = default_sort_order; + +static int input; +static int show_mask = SHOW_KERNEL | SHOW_USER | SHOW_HV; + +static int dump_trace = 0; +#define dprintf(x...) do { if (dump_trace) printf(x); } while (0) + +static int verbose; +static int full_paths; + +static unsigned long page_size; +static unsigned long mmap_window = 32; + +struct ip_event { + struct perf_event_header header; + __u64 ip; + __u32 pid, tid; +}; + +struct mmap_event { + struct perf_event_header header; + __u32 pid, tid; + __u64 start; + __u64 len; + __u64 pgoff; + char filename[PATH_MAX]; +}; + +struct comm_event { + struct perf_event_header header; + __u32 pid, tid; + char comm[16]; +}; + +struct fork_event { + struct perf_event_header header; + __u32 pid, ppid; +}; + +struct period_event { + struct perf_event_header header; + __u64 time; + __u64 id; + __u64 sample_period; +}; + +typedef union event_union { + struct perf_event_header header; + struct ip_event ip; + struct mmap_event mmap; + struct comm_event comm; + struct fork_event fork; + struct period_event period; +} event_t; + +static LIST_HEAD(dsos); +static struct dso *kernel_dso; +static struct dso *vdso; + +static void dsos__add(struct dso *dso) +{ + list_add_tail(&dso->node, &dsos); +} + +static struct dso *dsos__find(const char *name) +{ + struct dso *pos; + + list_for_each_entry(pos, &dsos, node) + if (strcmp(pos->name, name) == 0) + return pos; + return NULL; +} + +static struct dso *dsos__findnew(const char *name) +{ + struct dso *dso = dsos__find(name); + int nr; + + if (dso) + return dso; + + dso = dso__new(name, 0); + if (!dso) + goto out_delete_dso; + + nr = dso__load(dso, NULL, verbose); + if (nr < 0) { + if (verbose) + fprintf(stderr, "Failed to open: %s\n", name); + goto out_delete_dso; + } + if (!nr && verbose) { + fprintf(stderr, + "No symbols found in: %s, maybe install a debug package?\n", + name); + } + + dsos__add(dso); + + return dso; + +out_delete_dso: + dso__delete(dso); + return NULL; +} + +static void dsos__fprintf(FILE *fp) +{ + struct dso *pos; + + list_for_each_entry(pos, &dsos, node) + dso__fprintf(pos, fp); +} + +static struct symbol *vdso__find_symbol(struct dso *dso, uint64_t ip) +{ + return dso__find_symbol(kernel_dso, ip); +} + +static int load_kernel(void) +{ + int err; + + kernel_dso = dso__new("[kernel]", 0); + if (!kernel_dso) + return -1; + + err = dso__load_kernel(kernel_dso, vmlinux, NULL, verbose); + if (err) { + dso__delete(kernel_dso); + kernel_dso = NULL; + } else + dsos__add(kernel_dso); + + vdso = dso__new("[vdso]", 0); + if (!vdso) + return -1; + + vdso->find_symbol = vdso__find_symbol; + + dsos__add(vdso); + + return err; +} + +static char __cwd[PATH_MAX]; +static char *cwd = __cwd; +static int cwdlen; + +static int strcommon(const char *pathname) +{ + int n = 0; + + while (pathname[n] == cwd[n] && n < cwdlen) + ++n; + + return n; +} + +struct map { + struct list_head node; + uint64_t start; + uint64_t end; + uint64_t pgoff; + uint64_t (*map_ip)(struct map *, uint64_t); + struct dso *dso; +}; + +static uint64_t map__map_ip(struct map *map, uint64_t ip) +{ + return ip - map->start + map->pgoff; +} + +static uint64_t vdso__map_ip(struct map *map, uint64_t ip) +{ + return ip; +} + +static struct map *map__new(struct mmap_event *event) +{ + struct map *self = malloc(sizeof(*self)); + + if (self != NULL) { + const char *filename = event->filename; + char newfilename[PATH_MAX]; + + if (cwd) { + int n = strcommon(filename); + + if (n == cwdlen) { + snprintf(newfilename, sizeof(newfilename), + ".%s", filename + n); + filename = newfilename; + } + } + + self->start = event->start; + self->end = event->start + event->len; + self->pgoff = event->pgoff; + + self->dso = dsos__findnew(filename); + if (self->dso == NULL) + goto out_delete; + + if (self->dso == vdso) + self->map_ip = vdso__map_ip; + else + self->map_ip = map__map_ip; + } + return self; +out_delete: + free(self); + return NULL; +} + +static struct map *map__clone(struct map *self) +{ + struct map *map = malloc(sizeof(*self)); + + if (!map) + return NULL; + + memcpy(map, self, sizeof(*self)); + + return map; +} + +static int map__overlap(struct map *l, struct map *r) +{ + if (l->start > r->start) { + struct map *t = l; + l = r; + r = t; + } + + if (l->end > r->start) + return 1; + + return 0; +} + +static size_t map__fprintf(struct map *self, FILE *fp) +{ + return fprintf(fp, " %"PRIx64"-%"PRIx64" %"PRIx64" %s\n", + self->start, self->end, self->pgoff, self->dso->name); +} + + +struct thread { + struct rb_node rb_node; + struct list_head maps; + pid_t pid; + char *comm; +}; + +static struct thread *thread__new(pid_t pid) +{ + struct thread *self = malloc(sizeof(*self)); + + if (self != NULL) { + self->pid = pid; + self->comm = malloc(32); + if (self->comm) + snprintf(self->comm, 32, ":%d", self->pid); + INIT_LIST_HEAD(&self->maps); + } + + return self; +} + +static int thread__set_comm(struct thread *self, const char *comm) +{ + if (self->comm) + free(self->comm); + self->comm = strdup(comm); + return self->comm ? 0 : -ENOMEM; +} + +static size_t thread__fprintf(struct thread *self, FILE *fp) +{ + struct map *pos; + size_t ret = fprintf(fp, "Thread %d %s\n", self->pid, self->comm); + + list_for_each_entry(pos, &self->maps, node) + ret += map__fprintf(pos, fp); + + return ret; +} + + +static struct rb_root threads; +static struct thread *last_match; + +static struct thread *threads__findnew(pid_t pid) +{ + struct rb_node **p = &threads.rb_node; + struct rb_node *parent = NULL; + struct thread *th; + + /* + * Font-end cache - PID lookups come in blocks, + * so most of the time we dont have to look up + * the full rbtree: + */ + if (last_match && last_match->pid == pid) + return last_match; + + while (*p != NULL) { + parent = *p; + th = rb_entry(parent, struct thread, rb_node); + + if (th->pid == pid) { + last_match = th; + return th; + } + + if (pid < th->pid) + p = &(*p)->rb_left; + else + p = &(*p)->rb_right; + } + + th = thread__new(pid); + if (th != NULL) { + rb_link_node(&th->rb_node, parent, p); + rb_insert_color(&th->rb_node, &threads); + last_match = th; + } + + return th; +} + +static void thread__insert_map(struct thread *self, struct map *map) +{ + struct map *pos, *tmp; + + list_for_each_entry_safe(pos, tmp, &self->maps, node) { + if (map__overlap(pos, map)) { + list_del_init(&pos->node); + /* XXX leaks dsos */ + free(pos); + } + } + + list_add_tail(&map->node, &self->maps); +} + +static int thread__fork(struct thread *self, struct thread *parent) +{ + struct map *map; + + if (self->comm) + free(self->comm); + self->comm = strdup(parent->comm); + if (!self->comm) + return -ENOMEM; + + list_for_each_entry(map, &parent->maps, node) { + struct map *new = map__clone(map); + if (!new) + return -ENOMEM; + thread__insert_map(self, new); + } + + return 0; +} + +static struct map *thread__find_map(struct thread *self, uint64_t ip) +{ + struct map *pos; + + if (self == NULL) + return NULL; + + list_for_each_entry(pos, &self->maps, node) + if (ip >= pos->start && ip <= pos->end) + return pos; + + return NULL; +} + +static size_t threads__fprintf(FILE *fp) +{ + size_t ret = 0; + struct rb_node *nd; + + for (nd = rb_first(&threads); nd; nd = rb_next(nd)) { + struct thread *pos = rb_entry(nd, struct thread, rb_node); + + ret += thread__fprintf(pos, fp); + } + + return ret; +} + +/* + * histogram, sorted on item, collects counts + */ + +static struct rb_root hist; + +struct hist_entry { + struct rb_node rb_node; + + struct thread *thread; + struct map *map; + struct dso *dso; + struct symbol *sym; + uint64_t ip; + char level; + + uint32_t count; +}; + +/* + * configurable sorting bits + */ + +struct sort_entry { + struct list_head list; + + char *header; + + int64_t (*cmp)(struct hist_entry *, struct hist_entry *); + int64_t (*collapse)(struct hist_entry *, struct hist_entry *); + size_t (*print)(FILE *fp, struct hist_entry *); +}; + +/* --sort pid */ + +static int64_t +sort__thread_cmp(struct hist_entry *left, struct hist_entry *right) +{ + return right->thread->pid - left->thread->pid; +} + +static size_t +sort__thread_print(FILE *fp, struct hist_entry *self) +{ + return fprintf(fp, "%16s:%5d", self->thread->comm ?: "", self->thread->pid); +} + +static struct sort_entry sort_thread = { + .header = " Command: Pid", + .cmp = sort__thread_cmp, + .print = sort__thread_print, +}; + +/* --sort comm */ + +static int64_t +sort__comm_cmp(struct hist_entry *left, struct hist_entry *right) +{ + return right->thread->pid - left->thread->pid; +} + +static int64_t +sort__comm_collapse(struct hist_entry *left, struct hist_entry *right) +{ + char *comm_l = left->thread->comm; + char *comm_r = right->thread->comm; + + if (!comm_l || !comm_r) { + if (!comm_l && !comm_r) + return 0; + else if (!comm_l) + return -1; + else + return 1; + } + + return strcmp(comm_l, comm_r); +} + +static size_t +sort__comm_print(FILE *fp, struct hist_entry *self) +{ + return fprintf(fp, "%16s", self->thread->comm); +} + +static struct sort_entry sort_comm = { + .header = " Command", + .cmp = sort__comm_cmp, + .collapse = sort__comm_collapse, + .print = sort__comm_print, +}; + +/* --sort dso */ + +static int64_t +sort__dso_cmp(struct hist_entry *left, struct hist_entry *right) +{ + struct dso *dso_l = left->dso; + struct dso *dso_r = right->dso; + + if (!dso_l || !dso_r) { + if (!dso_l && !dso_r) + return 0; + else if (!dso_l) + return -1; + else + return 1; + } + + return strcmp(dso_l->name, dso_r->name); +} + +static size_t +sort__dso_print(FILE *fp, struct hist_entry *self) +{ + if (self->dso) + return fprintf(fp, "%-25s", self->dso->name); + + return fprintf(fp, "%016llx ", (__u64)self->ip); +} + +static struct sort_entry sort_dso = { + .header = "Shared Object ", + .cmp = sort__dso_cmp, + .print = sort__dso_print, +}; + +/* --sort symbol */ + +static int64_t +sort__sym_cmp(struct hist_entry *left, struct hist_entry *right) +{ + uint64_t ip_l, ip_r; + + if (left->sym == right->sym) + return 0; + + ip_l = left->sym ? left->sym->start : left->ip; + ip_r = right->sym ? right->sym->start : right->ip; + + return (int64_t)(ip_r - ip_l); +} + +static size_t +sort__sym_print(FILE *fp, struct hist_entry *self) +{ + size_t ret = 0; + + if (verbose) + ret += fprintf(fp, "%#018llx ", (__u64)self->ip); + + if (self->sym) { + ret += fprintf(fp, "[%c] %s", + self->dso == kernel_dso ? 'k' : '.', self->sym->name); + } else { + ret += fprintf(fp, "%#016llx", (__u64)self->ip); + } + + return ret; +} + +static struct sort_entry sort_sym = { + .header = "Symbol", + .cmp = sort__sym_cmp, + .print = sort__sym_print, +}; + +static int sort__need_collapse = 0; + +struct sort_dimension { + char *name; + struct sort_entry *entry; + int taken; +}; + +static struct sort_dimension sort_dimensions[] = { + { .name = "pid", .entry = &sort_thread, }, + { .name = "comm", .entry = &sort_comm, }, + { .name = "dso", .entry = &sort_dso, }, + { .name = "symbol", .entry = &sort_sym, }, +}; + +static LIST_HEAD(hist_entry__sort_list); + +static int sort_dimension__add(char *tok) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(sort_dimensions); i++) { + struct sort_dimension *sd = &sort_dimensions[i]; + + if (sd->taken) + continue; + + if (strncasecmp(tok, sd->name, strlen(tok))) + continue; + + if (sd->entry->collapse) + sort__need_collapse = 1; + + list_add_tail(&sd->entry->list, &hist_entry__sort_list); + sd->taken = 1; + + return 0; + } + + return -ESRCH; +} + +static int64_t +hist_entry__cmp(struct hist_entry *left, struct hist_entry *right) +{ + struct sort_entry *se; + int64_t cmp = 0; + + list_for_each_entry(se, &hist_entry__sort_list, list) { + cmp = se->cmp(left, right); + if (cmp) + break; + } + + return cmp; +} + +static int64_t +hist_entry__collapse(struct hist_entry *left, struct hist_entry *right) +{ + struct sort_entry *se; + int64_t cmp = 0; + + list_for_each_entry(se, &hist_entry__sort_list, list) { + int64_t (*f)(struct hist_entry *, struct hist_entry *); + + f = se->collapse ?: se->cmp; + + cmp = f(left, right); + if (cmp) + break; + } + + return cmp; +} + +static size_t +hist_entry__fprintf(FILE *fp, struct hist_entry *self, uint64_t total_samples) +{ + struct sort_entry *se; + size_t ret; + + if (total_samples) { + double percent = self->count * 100.0 / total_samples; + char *color = PERF_COLOR_NORMAL; + + /* + * We color high-overhead entries in red, low-overhead + * entries in green - and keep the middle ground normal: + */ + if (percent >= 5.0) + color = PERF_COLOR_RED; + if (percent < 0.5) + color = PERF_COLOR_GREEN; + + ret = color_fprintf(fp, color, " %6.2f%%", + (self->count * 100.0) / total_samples); + } else + ret = fprintf(fp, "%12d ", self->count); + + list_for_each_entry(se, &hist_entry__sort_list, list) { + fprintf(fp, " "); + ret += se->print(fp, self); + } + + ret += fprintf(fp, "\n"); + + return ret; +} + +/* + * collect histogram counts + */ + +static int +hist_entry__add(struct thread *thread, struct map *map, struct dso *dso, + struct symbol *sym, uint64_t ip, char level) +{ + struct rb_node **p = &hist.rb_node; + struct rb_node *parent = NULL; + struct hist_entry *he; + struct hist_entry entry = { + .thread = thread, + .map = map, + .dso = dso, + .sym = sym, + .ip = ip, + .level = level, + .count = 1, + }; + int cmp; + + while (*p != NULL) { + parent = *p; + he = rb_entry(parent, struct hist_entry, rb_node); + + cmp = hist_entry__cmp(&entry, he); + + if (!cmp) { + he->count++; + return 0; + } + + if (cmp < 0) + p = &(*p)->rb_left; + else + p = &(*p)->rb_right; + } + + he = malloc(sizeof(*he)); + if (!he) + return -ENOMEM; + *he = entry; + rb_link_node(&he->rb_node, parent, p); + rb_insert_color(&he->rb_node, &hist); + + return 0; +} + +static void hist_entry__free(struct hist_entry *he) +{ + free(he); +} + +/* + * collapse the histogram + */ + +static struct rb_root collapse_hists; + +static void collapse__insert_entry(struct hist_entry *he) +{ + struct rb_node **p = &collapse_hists.rb_node; + struct rb_node *parent = NULL; + struct hist_entry *iter; + int64_t cmp; + + while (*p != NULL) { + parent = *p; + iter = rb_entry(parent, struct hist_entry, rb_node); + + cmp = hist_entry__collapse(iter, he); + + if (!cmp) { + iter->count += he->count; + hist_entry__free(he); + return; + } + + if (cmp < 0) + p = &(*p)->rb_left; + else + p = &(*p)->rb_right; + } + + rb_link_node(&he->rb_node, parent, p); + rb_insert_color(&he->rb_node, &collapse_hists); +} + +static void collapse__resort(void) +{ + struct rb_node *next; + struct hist_entry *n; + + if (!sort__need_collapse) + return; + + next = rb_first(&hist); + while (next) { + n = rb_entry(next, struct hist_entry, rb_node); + next = rb_next(&n->rb_node); + + rb_erase(&n->rb_node, &hist); + collapse__insert_entry(n); + } +} + +/* + * reverse the map, sort on count. + */ + +static struct rb_root output_hists; + +static void output__insert_entry(struct hist_entry *he) +{ + struct rb_node **p = &output_hists.rb_node; + struct rb_node *parent = NULL; + struct hist_entry *iter; + + while (*p != NULL) { + parent = *p; + iter = rb_entry(parent, struct hist_entry, rb_node); + + if (he->count > iter->count) + p = &(*p)->rb_left; + else + p = &(*p)->rb_right; + } + + rb_link_node(&he->rb_node, parent, p); + rb_insert_color(&he->rb_node, &output_hists); +} + +static void output__resort(void) +{ + struct rb_node *next; + struct hist_entry *n; + struct rb_root *tree = &hist; + + if (sort__need_collapse) + tree = &collapse_hists; + + next = rb_first(tree); + + while (next) { + n = rb_entry(next, struct hist_entry, rb_node); + next = rb_next(&n->rb_node); + + rb_erase(&n->rb_node, tree); + output__insert_entry(n); + } +} + +static size_t output__fprintf(FILE *fp, uint64_t total_samples) +{ + struct hist_entry *pos; + struct sort_entry *se; + struct rb_node *nd; + size_t ret = 0; + + fprintf(fp, "\n"); + fprintf(fp, "#\n"); + fprintf(fp, "# (%Ld samples)\n", (__u64)total_samples); + fprintf(fp, "#\n"); + + fprintf(fp, "# Overhead"); + list_for_each_entry(se, &hist_entry__sort_list, list) + fprintf(fp, " %s", se->header); + fprintf(fp, "\n"); + + fprintf(fp, "# ........"); + list_for_each_entry(se, &hist_entry__sort_list, list) { + int i; + + fprintf(fp, " "); + for (i = 0; i < strlen(se->header); i++) + fprintf(fp, "."); + } + fprintf(fp, "\n"); + + fprintf(fp, "#\n"); + + for (nd = rb_first(&output_hists); nd; nd = rb_next(nd)) { + pos = rb_entry(nd, struct hist_entry, rb_node); + ret += hist_entry__fprintf(fp, pos, total_samples); + } + + if (!strcmp(sort_order, default_sort_order)) { + fprintf(fp, "#\n"); + fprintf(fp, "# (For more details, try: perf annotate --sort comm,dso,symbol)\n"); + fprintf(fp, "#\n"); + } + fprintf(fp, "\n"); + + return ret; +} + +static void register_idle_thread(void) +{ + struct thread *thread = threads__findnew(0); + + if (thread == NULL || + thread__set_comm(thread, "[idle]")) { + fprintf(stderr, "problem inserting idle task.\n"); + exit(-1); + } +} + +static unsigned long total = 0, + total_mmap = 0, + total_comm = 0, + total_fork = 0, + total_unknown = 0; + +static int +process_overflow_event(event_t *event, unsigned long offset, unsigned long head) +{ + char level; + int show = 0; + struct dso *dso = NULL; + struct thread *thread = threads__findnew(event->ip.pid); + uint64_t ip = event->ip.ip; + struct map *map = NULL; + + dprintf("%p [%p]: PERF_EVENT (IP, %d): %d: %p\n", + (void *)(offset + head), + (void *)(long)(event->header.size), + event->header.misc, + event->ip.pid, + (void *)(long)ip); + + dprintf(" ... thread: %s:%d\n", thread->comm, thread->pid); + + if (thread == NULL) { + fprintf(stderr, "problem processing %d event, skipping it.\n", + event->header.type); + return -1; + } + + if (event->header.misc & PERF_EVENT_MISC_KERNEL) { + show = SHOW_KERNEL; + level = 'k'; + + dso = kernel_dso; + + dprintf(" ...... dso: %s\n", dso->name); + + } else if (event->header.misc & PERF_EVENT_MISC_USER) { + + show = SHOW_USER; + level = '.'; + + map = thread__find_map(thread, ip); + if (map != NULL) { + ip = map->map_ip(map, ip); + dso = map->dso; + } else { + /* + * If this is outside of all known maps, + * and is a negative address, try to look it + * up in the kernel dso, as it might be a + * vsyscall (which executes in user-mode): + */ + if ((long long)ip < 0) + dso = kernel_dso; + } + dprintf(" ...... dso: %s\n", dso ? dso->name : ""); + + } else { + show = SHOW_HV; + level = 'H'; + dprintf(" ...... dso: [hypervisor]\n"); + } + + if (show & show_mask) { + struct symbol *sym = NULL; + + if (dso) + sym = dso->find_symbol(dso, ip); + + if (hist_entry__add(thread, map, dso, sym, ip, level)) { + fprintf(stderr, + "problem incrementing symbol count, skipping event\n"); + return -1; + } + } + total++; + + return 0; +} + +static int +process_mmap_event(event_t *event, unsigned long offset, unsigned long head) +{ + struct thread *thread = threads__findnew(event->mmap.pid); + struct map *map = map__new(&event->mmap); + + dprintf("%p [%p]: PERF_EVENT_MMAP %d: [%p(%p) @ %p]: %s\n", + (void *)(offset + head), + (void *)(long)(event->header.size), + event->mmap.pid, + (void *)(long)event->mmap.start, + (void *)(long)event->mmap.len, + (void *)(long)event->mmap.pgoff, + event->mmap.filename); + + if (thread == NULL || map == NULL) { + dprintf("problem processing PERF_EVENT_MMAP, skipping event.\n"); + return 0; + } + + thread__insert_map(thread, map); + total_mmap++; + + return 0; +} + +static int +process_comm_event(event_t *event, unsigned long offset, unsigned long head) +{ + struct thread *thread = threads__findnew(event->comm.pid); + + dprintf("%p [%p]: PERF_EVENT_COMM: %s:%d\n", + (void *)(offset + head), + (void *)(long)(event->header.size), + event->comm.comm, event->comm.pid); + + if (thread == NULL || + thread__set_comm(thread, event->comm.comm)) { + dprintf("problem processing PERF_EVENT_COMM, skipping event.\n"); + return -1; + } + total_comm++; + + return 0; +} + +static int +process_fork_event(event_t *event, unsigned long offset, unsigned long head) +{ + struct thread *thread = threads__findnew(event->fork.pid); + struct thread *parent = threads__findnew(event->fork.ppid); + + dprintf("%p [%p]: PERF_EVENT_FORK: %d:%d\n", + (void *)(offset + head), + (void *)(long)(event->header.size), + event->fork.pid, event->fork.ppid); + + if (!thread || !parent || thread__fork(thread, parent)) { + dprintf("problem processing PERF_EVENT_FORK, skipping event.\n"); + return -1; + } + total_fork++; + + return 0; +} + +static int +process_period_event(event_t *event, unsigned long offset, unsigned long head) +{ + dprintf("%p [%p]: PERF_EVENT_PERIOD: time:%Ld, id:%Ld: period:%Ld\n", + (void *)(offset + head), + (void *)(long)(event->header.size), + event->period.time, + event->period.id, + event->period.sample_period); + + return 0; +} + +static int +process_event(event_t *event, unsigned long offset, unsigned long head) +{ + if (event->header.misc & PERF_EVENT_MISC_OVERFLOW) + return process_overflow_event(event, offset, head); + + switch (event->header.type) { + case PERF_EVENT_MMAP: + return process_mmap_event(event, offset, head); + + case PERF_EVENT_COMM: + return process_comm_event(event, offset, head); + + case PERF_EVENT_FORK: + return process_fork_event(event, offset, head); + + case PERF_EVENT_PERIOD: + return process_period_event(event, offset, head); + /* + * We dont process them right now but they are fine: + */ + + case PERF_EVENT_THROTTLE: + case PERF_EVENT_UNTHROTTLE: + return 0; + + default: + return -1; + } + + return 0; +} + +static int __cmd_annotate(void) +{ + int ret, rc = EXIT_FAILURE; + unsigned long offset = 0; + unsigned long head = 0; + struct stat stat; + event_t *event; + uint32_t size; + char *buf; + + register_idle_thread(); + + input = open(input_name, O_RDONLY); + if (input < 0) { + perror("failed to open file"); + exit(-1); + } + + ret = fstat(input, &stat); + if (ret < 0) { + perror("failed to stat file"); + exit(-1); + } + + if (!stat.st_size) { + fprintf(stderr, "zero-sized file, nothing to do!\n"); + exit(0); + } + + if (load_kernel() < 0) { + perror("failed to load kernel symbols"); + return EXIT_FAILURE; + } + + if (!full_paths) { + if (getcwd(__cwd, sizeof(__cwd)) == NULL) { + perror("failed to get the current directory"); + return EXIT_FAILURE; + } + cwdlen = strlen(cwd); + } else { + cwd = NULL; + cwdlen = 0; + } +remap: + buf = (char *)mmap(NULL, page_size * mmap_window, PROT_READ, + MAP_SHARED, input, offset); + if (buf == MAP_FAILED) { + perror("failed to mmap file"); + exit(-1); + } + +more: + event = (event_t *)(buf + head); + + size = event->header.size; + if (!size) + size = 8; + + if (head + event->header.size >= page_size * mmap_window) { + unsigned long shift = page_size * (head / page_size); + int ret; + + ret = munmap(buf, page_size * mmap_window); + assert(ret == 0); + + offset += shift; + head -= shift; + goto remap; + } + + size = event->header.size; + + dprintf("%p [%p]: event: %d\n", + (void *)(offset + head), + (void *)(long)event->header.size, + event->header.type); + + if (!size || process_event(event, offset, head) < 0) { + + dprintf("%p [%p]: skipping unknown header type: %d\n", + (void *)(offset + head), + (void *)(long)(event->header.size), + event->header.type); + + total_unknown++; + + /* + * assume we lost track of the stream, check alignment, and + * increment a single u64 in the hope to catch on again 'soon'. + */ + + if (unlikely(head & 7)) + head &= ~7ULL; + + size = 8; + } + + head += size; + + if (offset + head < stat.st_size) + goto more; + + rc = EXIT_SUCCESS; + close(input); + + dprintf(" IP events: %10ld\n", total); + dprintf(" mmap events: %10ld\n", total_mmap); + dprintf(" comm events: %10ld\n", total_comm); + dprintf(" fork events: %10ld\n", total_fork); + dprintf(" unknown events: %10ld\n", total_unknown); + + if (dump_trace) + return 0; + + if (verbose >= 3) + threads__fprintf(stdout); + + if (verbose >= 2) + dsos__fprintf(stdout); + + collapse__resort(); + output__resort(); + output__fprintf(stdout, total); + + return rc; +} + +static const char * const annotate_usage[] = { + "perf annotate [] ", + NULL +}; + +static const struct option options[] = { + OPT_STRING('i', "input", &input_name, "file", + "input file name"), + OPT_BOOLEAN('v', "verbose", &verbose, + "be more verbose (show symbol address, etc)"), + OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace, + "dump raw trace in ASCII"), + OPT_STRING('k', "vmlinux", &vmlinux, "file", "vmlinux pathname"), + OPT_STRING('s', "sort", &sort_order, "key[,key2...]", + "sort by key(s): pid, comm, dso, symbol. Default: pid,symbol"), + OPT_BOOLEAN('P', "full-paths", &full_paths, + "Don't shorten the pathnames taking into account the cwd"), + OPT_END() +}; + +static void setup_sorting(void) +{ + char *tmp, *tok, *str = strdup(sort_order); + + for (tok = strtok_r(str, ", ", &tmp); + tok; tok = strtok_r(NULL, ", ", &tmp)) { + if (sort_dimension__add(tok) < 0) { + error("Unknown --sort key: `%s'", tok); + usage_with_options(annotate_usage, options); + } + } + + free(str); +} + +int cmd_annotate(int argc, const char **argv, const char *prefix) +{ + symbol__init(); + + page_size = getpagesize(); + + argc = parse_options(argc, argv, options, annotate_usage, 0); + + setup_sorting(); + + /* + * Any (unrecognized) arguments left? + */ + if (argc) + usage_with_options(annotate_usage, options); + + setup_pager(); + + return __cmd_annotate(); +} diff --git a/Documentation/perf_counter/builtin.h b/Documentation/perf_counter/builtin.h index e7de47da858..51d168230ee 100644 --- a/Documentation/perf_counter/builtin.h +++ b/Documentation/perf_counter/builtin.h @@ -14,6 +14,7 @@ extern void prune_packed_objects(int); extern int read_line_with_nul(char *buf, int size, FILE *file); extern int check_pager_config(const char *cmd); +extern int cmd_annotate(int argc, const char **argv, const char *prefix); extern int cmd_help(int argc, const char **argv, const char *prefix); extern int cmd_record(int argc, const char **argv, const char *prefix); extern int cmd_report(int argc, const char **argv, const char *prefix); diff --git a/Documentation/perf_counter/command-list.txt b/Documentation/perf_counter/command-list.txt index f0b922c9023..eebce30afbc 100644 --- a/Documentation/perf_counter/command-list.txt +++ b/Documentation/perf_counter/command-list.txt @@ -2,8 +2,9 @@ # List of known perf commands. # command name category [deprecated] [common] # +perf-annotate mainporcelain common +perf-list mainporcelain common perf-record mainporcelain common perf-report mainporcelain common perf-stat mainporcelain common perf-top mainporcelain common -perf-list mainporcelain common diff --git a/Documentation/perf_counter/perf.c b/Documentation/perf_counter/perf.c index 161824f1241..4eb72593370 100644 --- a/Documentation/perf_counter/perf.c +++ b/Documentation/perf_counter/perf.c @@ -263,6 +263,7 @@ static void handle_internal_command(int argc, const char **argv) { "report", cmd_report, 0 }, { "stat", cmd_stat, 0 }, { "top", cmd_top, 0 }, + { "annotate", cmd_annotate, 0 }, { "version", cmd_version, 0 }, }; int i; @@ -402,9 +403,11 @@ int main(int argc, const char **argv) while (1) { static int done_help = 0; static int was_alias = 0; + was_alias = run_argv(&argc, &argv); if (errno != ENOENT) break; + if (was_alias) { fprintf(stderr, "Expansion of alias '%s' failed; " "'%s' is not a perf-command\n", -- cgit v1.2.3 From 0b73da3f40128eab6ca2a07508f424029a1edaeb Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Sat, 6 Jun 2009 15:48:52 +0200 Subject: perf_counter tools: Add 'perf annotate' feature Add new perf sub-command to display annotated source code: $ perf annotate decode_tree_entry ------------------------------------------------ Percent | Source code & Disassembly of /home/mingo/git/git ------------------------------------------------ : : /home/mingo/git/git: file format elf64-x86-64 : : : Disassembly of section .text: : : 00000000004a0da0 : : *modep = mode; : return str; : } : : static void decode_tree_entry(struct tree_desc *desc, const char *buf, unsigned long size) : { 3.82 : 4a0da0: 41 54 push %r12 : const char *path; : unsigned int mode, len; : : if (size < 24 || buf[size - 21]) 0.17 : 4a0da2: 48 83 fa 17 cmp $0x17,%rdx : *modep = mode; : return str; : } : : static void decode_tree_entry(struct tree_desc *desc, const char *buf, unsigned long size) : { 0.00 : 4a0da6: 49 89 fc mov %rdi,%r12 0.00 : 4a0da9: 55 push %rbp 3.37 : 4a0daa: 53 push %rbx : const char *path; : unsigned int mode, len; : : if (size < 24 || buf[size - 21]) 0.08 : 4a0dab: 76 73 jbe 4a0e20 0.00 : 4a0dad: 80 7c 16 eb 00 cmpb $0x0,-0x15(%rsi,%rdx,1) 3.48 : 4a0db2: 75 6c jne 4a0e20 : static const char *get_mode(const char *str, unsigned int *modep) : { : unsigned char c; : unsigned int mode = 0; : : if (*str == ' ') 1.94 : 4a0db4: 0f b6 06 movzbl (%rsi),%eax 0.39 : 4a0db7: 3c 20 cmp $0x20,%al 0.00 : 4a0db9: 74 65 je 4a0e20 : return NULL; : : while ((c = *str++) != ' ') { 0.06 : 4a0dbb: 89 c2 mov %eax,%edx : if (c < '0' || c > '7') 1.99 : 4a0dbd: 31 ed xor %ebp,%ebp : unsigned int mode = 0; : : if (*str == ' ') : return NULL; : : while ((c = *str++) != ' ') { 1.74 : 4a0dbf: 48 8d 5e 01 lea 0x1(%rsi),%rbx : if (c < '0' || c > '7') 0.00 : 4a0dc3: 8d 42 d0 lea -0x30(%rdx),%eax 0.17 : 4a0dc6: 3c 07 cmp $0x7,%al 0.00 : 4a0dc8: 76 0d jbe 4a0dd7 0.00 : 4a0dca: eb 54 jmp 4a0e20 0.00 : 4a0dcc: 0f 1f 40 00 nopl 0x0(%rax) 16.57 : 4a0dd0: 8d 42 d0 lea -0x30(%rdx),%eax 0.14 : 4a0dd3: 3c 07 cmp $0x7,%al 0.00 : 4a0dd5: 77 49 ja 4a0e20 : return NULL; : mode = (mode << 3) + (c - '0'); 3.12 : 4a0dd7: 0f b6 c2 movzbl %dl,%eax : unsigned int mode = 0; : : if (*str == ' ') : return NULL; : : while ((c = *str++) != ' ') { 0.00 : 4a0dda: 0f b6 13 movzbl (%rbx),%edx 16.74 : 4a0ddd: 48 83 c3 01 add $0x1,%rbx : if (c < '0' || c > '7') : return NULL; : mode = (mode << 3) + (c - '0'); The first column is the percentage of samples that arrived on that particular line - relative to the total cost of the function. Cc: Peter Zijlstra Cc: Mike Galbraith Cc: Paul Mackerras Cc: Arnaldo Carvalho de Melo LKML-Reference: Signed-off-by: Ingo Molnar --- .../perf_counter/Documentation/perf-annotate.txt | 9 +- Documentation/perf_counter/builtin-annotate.c | 304 +++++++++++++-------- Documentation/perf_counter/util/symbol.c | 50 ++-- Documentation/perf_counter/util/symbol.h | 5 + 4 files changed, 229 insertions(+), 139 deletions(-) diff --git a/Documentation/perf_counter/Documentation/perf-annotate.txt b/Documentation/perf_counter/Documentation/perf-annotate.txt index a9d6d5ee270..c9dcade0683 100644 --- a/Documentation/perf_counter/Documentation/perf-annotate.txt +++ b/Documentation/perf_counter/Documentation/perf-annotate.txt @@ -3,7 +3,7 @@ perf-annotate(1) NAME ---- -perf-annotate - Read perf.data (created by perf record) and annotate functions +perf-annotate - Read perf.data (created by perf record) and display annotated code SYNOPSIS -------- @@ -12,8 +12,11 @@ SYNOPSIS DESCRIPTION ----------- -This command displays the performance counter profile information recorded -via perf record. +This command reads the input file and displays an annotated version of the +code. If the object file has debug symbols then the source code will be +displayed alongside assembly code. + +If there is no debug info in the object, then annotated assembly is displayed. OPTIONS ------- diff --git a/Documentation/perf_counter/builtin-annotate.c b/Documentation/perf_counter/builtin-annotate.c index d656484ec98..116a3978b44 100644 --- a/Documentation/perf_counter/builtin-annotate.c +++ b/Documentation/perf_counter/builtin-annotate.c @@ -28,7 +28,7 @@ static char const *input_name = "perf.data"; static char *vmlinux = NULL; -static char default_sort_order[] = "comm,dso"; +static char default_sort_order[] = "comm,symbol"; static char *sort_order = default_sort_order; static int input; @@ -38,7 +38,6 @@ static int dump_trace = 0; #define dprintf(x...) do { if (dump_trace) printf(x); } while (0) static int verbose; -static int full_paths; static unsigned long page_size; static unsigned long mmap_window = 32; @@ -89,6 +88,7 @@ static LIST_HEAD(dsos); static struct dso *kernel_dso; static struct dso *vdso; + static void dsos__add(struct dso *dso) { list_add_tail(&dso->node, &dsos); @@ -176,20 +176,6 @@ static int load_kernel(void) return err; } -static char __cwd[PATH_MAX]; -static char *cwd = __cwd; -static int cwdlen; - -static int strcommon(const char *pathname) -{ - int n = 0; - - while (pathname[n] == cwd[n] && n < cwdlen) - ++n; - - return n; -} - struct map { struct list_head node; uint64_t start; @@ -215,17 +201,6 @@ static struct map *map__new(struct mmap_event *event) if (self != NULL) { const char *filename = event->filename; - char newfilename[PATH_MAX]; - - if (cwd) { - int n = strcommon(filename); - - if (n == cwdlen) { - snprintf(newfilename, sizeof(newfilename), - ".%s", filename + n); - filename = newfilename; - } - } self->start = event->start; self->end = event->start + event->len; @@ -669,44 +644,36 @@ hist_entry__collapse(struct hist_entry *left, struct hist_entry *right) return cmp; } -static size_t -hist_entry__fprintf(FILE *fp, struct hist_entry *self, uint64_t total_samples) +/* + * collect histogram counts + */ +static void hist_hit(struct hist_entry *he, uint64_t ip) { - struct sort_entry *se; - size_t ret; + unsigned int sym_size, offset; + struct symbol *sym = he->sym; - if (total_samples) { - double percent = self->count * 100.0 / total_samples; - char *color = PERF_COLOR_NORMAL; + he->count++; - /* - * We color high-overhead entries in red, low-overhead - * entries in green - and keep the middle ground normal: - */ - if (percent >= 5.0) - color = PERF_COLOR_RED; - if (percent < 0.5) - color = PERF_COLOR_GREEN; + if (!sym || !sym->hist) + return; - ret = color_fprintf(fp, color, " %6.2f%%", - (self->count * 100.0) / total_samples); - } else - ret = fprintf(fp, "%12d ", self->count); + sym_size = sym->end - sym->start; + offset = ip - sym->start; - list_for_each_entry(se, &hist_entry__sort_list, list) { - fprintf(fp, " "); - ret += se->print(fp, self); - } + if (offset >= sym_size) + return; - ret += fprintf(fp, "\n"); + sym->hist_sum++; + sym->hist[offset]++; - return ret; + if (verbose >= 3) + printf("%p %s: count++ [ip: %p, %08Lx] => %Ld\n", + (void *)he->sym->start, + he->sym->name, + (void *)ip, ip - he->sym->start, + sym->hist[offset]); } -/* - * collect histogram counts - */ - static int hist_entry__add(struct thread *thread, struct map *map, struct dso *dso, struct symbol *sym, uint64_t ip, char level) @@ -732,7 +699,8 @@ hist_entry__add(struct thread *thread, struct map *map, struct dso *dso, cmp = hist_entry__cmp(&entry, he); if (!cmp) { - he->count++; + hist_hit(he, ip); + return 0; } @@ -856,50 +824,6 @@ static void output__resort(void) } } -static size_t output__fprintf(FILE *fp, uint64_t total_samples) -{ - struct hist_entry *pos; - struct sort_entry *se; - struct rb_node *nd; - size_t ret = 0; - - fprintf(fp, "\n"); - fprintf(fp, "#\n"); - fprintf(fp, "# (%Ld samples)\n", (__u64)total_samples); - fprintf(fp, "#\n"); - - fprintf(fp, "# Overhead"); - list_for_each_entry(se, &hist_entry__sort_list, list) - fprintf(fp, " %s", se->header); - fprintf(fp, "\n"); - - fprintf(fp, "# ........"); - list_for_each_entry(se, &hist_entry__sort_list, list) { - int i; - - fprintf(fp, " "); - for (i = 0; i < strlen(se->header); i++) - fprintf(fp, "."); - } - fprintf(fp, "\n"); - - fprintf(fp, "#\n"); - - for (nd = rb_first(&output_hists); nd; nd = rb_next(nd)) { - pos = rb_entry(nd, struct hist_entry, rb_node); - ret += hist_entry__fprintf(fp, pos, total_samples); - } - - if (!strcmp(sort_order, default_sort_order)) { - fprintf(fp, "#\n"); - fprintf(fp, "# (For more details, try: perf annotate --sort comm,dso,symbol)\n"); - fprintf(fp, "#\n"); - } - fprintf(fp, "\n"); - - return ret; -} - static void register_idle_thread(void) { struct thread *thread = threads__findnew(0); @@ -1106,6 +1030,149 @@ process_event(event_t *event, unsigned long offset, unsigned long head) return 0; } +static int +parse_line(FILE *file, struct symbol *sym, uint64_t start, uint64_t len) +{ + char *line = NULL, *tmp, *tmp2; + unsigned int offset; + size_t line_len; + __u64 line_ip; + int ret; + char *c; + + if (getline(&line, &line_len, file) < 0) + return -1; + if (!line) + return -1; + + c = strchr(line, '\n'); + if (c) + *c = 0; + + line_ip = -1; + offset = 0; + ret = -2; + + /* + * Strip leading spaces: + */ + tmp = line; + while (*tmp) { + if (*tmp != ' ') + break; + tmp++; + } + + if (*tmp) { + /* + * Parse hexa addresses followed by ':' + */ + line_ip = strtoull(tmp, &tmp2, 16); + if (*tmp2 != ':') + line_ip = -1; + } + + if (line_ip != -1) { + unsigned int hits = 0; + double percent = 0.0; + char *color = PERF_COLOR_NORMAL; + + offset = line_ip - start; + if (offset < len) + hits = sym->hist[offset]; + + if (sym->hist_sum) + percent = 100.0 * hits / sym->hist_sum; + + /* + * We color high-overhead entries in red, low-overhead + * entries in green - and keep the middle ground normal: + */ + if (percent >= 5.0) + color = PERF_COLOR_RED; + else { + if (percent > 0.5) + color = PERF_COLOR_GREEN; + } + + color_fprintf(stdout, color, " %7.2f", percent); + printf(" : "); + color_fprintf(stdout, PERF_COLOR_BLUE, "%s\n", line); + } else { + if (!*line) + printf(" :\n"); + else + printf(" : %s\n", line); + } + + return 0; +} + +static void annotate_sym(struct dso *dso, struct symbol *sym) +{ + char *filename = dso->name; + uint64_t start, end, len; + char command[PATH_MAX*2]; + FILE *file; + + if (!filename) + return; + if (dso == kernel_dso) + filename = vmlinux; + + printf("\n------------------------------------------------\n"); + printf(" Percent | Source code & Disassembly of %s\n", filename); + printf("------------------------------------------------\n"); + + if (verbose >= 2) + printf("annotating [%p] %30s : [%p] %30s\n", dso, dso->name, sym, sym->name); + + start = sym->obj_start; + if (!start) + start = sym->start; + + end = start + sym->end - sym->start + 1; + len = sym->end - sym->start; + + sprintf(command, "objdump --start-address=0x%016Lx --stop-address=0x%016Lx -dS %s", (__u64)start, (__u64)end, filename); + + if (verbose >= 3) + printf("doing: %s\n", command); + + file = popen(command, "r"); + if (!file) + return; + + while (!feof(file)) { + if (parse_line(file, sym, start, len) < 0) + break; + } + + pclose(file); +} + +static void find_annotations(void) +{ + struct rb_node *nd; + struct dso *dso; + int count = 0; + + list_for_each_entry(dso, &dsos, node) { + + for (nd = rb_first(&dso->syms); nd; nd = rb_next(nd)) { + struct symbol *sym = rb_entry(nd, struct symbol, rb_node); + + if (sym->hist) { + annotate_sym(dso, sym); + count++; + } + } + } + + if (!count) + printf(" Error: symbol '%s' not present amongst the samples.\n", sym_hist_filter); +} + static int __cmd_annotate(void) { int ret, rc = EXIT_FAILURE; @@ -1140,16 +1207,6 @@ static int __cmd_annotate(void) return EXIT_FAILURE; } - if (!full_paths) { - if (getcwd(__cwd, sizeof(__cwd)) == NULL) { - perror("failed to get the current directory"); - return EXIT_FAILURE; - } - cwdlen = strlen(cwd); - } else { - cwd = NULL; - cwdlen = 0; - } remap: buf = (char *)mmap(NULL, page_size * mmap_window, PROT_READ, MAP_SHARED, input, offset); @@ -1229,7 +1286,8 @@ more: collapse__resort(); output__resort(); - output__fprintf(stdout, total); + + find_annotations(); return rc; } @@ -1242,15 +1300,13 @@ static const char * const annotate_usage[] = { static const struct option options[] = { OPT_STRING('i', "input", &input_name, "file", "input file name"), + OPT_STRING('s', "symbol", &sym_hist_filter, "file", + "symbol to annotate"), OPT_BOOLEAN('v', "verbose", &verbose, "be more verbose (show symbol address, etc)"), OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace, "dump raw trace in ASCII"), OPT_STRING('k', "vmlinux", &vmlinux, "file", "vmlinux pathname"), - OPT_STRING('s', "sort", &sort_order, "key[,key2...]", - "sort by key(s): pid, comm, dso, symbol. Default: pid,symbol"), - OPT_BOOLEAN('P', "full-paths", &full_paths, - "Don't shorten the pathnames taking into account the cwd"), OPT_END() }; @@ -1279,10 +1335,18 @@ int cmd_annotate(int argc, const char **argv, const char *prefix) setup_sorting(); - /* - * Any (unrecognized) arguments left? - */ - if (argc) + if (argc) { + /* + * Special case: if there's an argument left then assume tha + * it's a symbol filter: + */ + if (argc > 1) + usage_with_options(annotate_usage, options); + + sym_hist_filter = argv[0]; + } + + if (!sym_hist_filter) usage_with_options(annotate_usage, options); setup_pager(); diff --git a/Documentation/perf_counter/util/symbol.c b/Documentation/perf_counter/util/symbol.c index a06bbfba835..23f4f7b3b83 100644 --- a/Documentation/perf_counter/util/symbol.c +++ b/Documentation/perf_counter/util/symbol.c @@ -7,21 +7,36 @@ #include #include +const char *sym_hist_filter; + static struct symbol *symbol__new(uint64_t start, uint64_t len, - const char *name, unsigned int priv_size) + const char *name, unsigned int priv_size, + uint64_t obj_start, int verbose) { size_t namelen = strlen(name) + 1; - struct symbol *self = malloc(priv_size + sizeof(*self) + namelen); + struct symbol *self = calloc(1, priv_size + sizeof(*self) + namelen); - if (self != NULL) { - if (priv_size) { - memset(self, 0, priv_size); - self = ((void *)self) + priv_size; - } - self->start = start; - self->end = start + len - 1; - memcpy(self->name, name, namelen); + if (!self) + return NULL; + + if (verbose >= 2) + printf("new symbol: %016Lx [%08lx]: %s, hist: %p, obj_start: %p\n", + (__u64)start, len, name, self->hist, (void *)obj_start); + + self->obj_start= obj_start; + self->hist = NULL; + self->hist_sum = 0; + + if (sym_hist_filter && !strcmp(name, sym_hist_filter)) + self->hist = calloc(sizeof(__u64), len); + + if (priv_size) { + memset(self, 0, priv_size); + self = ((void *)self) + priv_size; } + self->start = start; + self->end = start + len - 1; + memcpy(self->name, name, namelen); return self; } @@ -166,7 +181,7 @@ static int dso__load_kallsyms(struct dso *self, symbol_filter_t filter, int verb * Well fix up the end later, when we have all sorted. */ sym = symbol__new(start, 0xdead, line + len + 2, - self->sym_priv_size); + self->sym_priv_size, 0, verbose); if (sym == NULL) goto out_delete_line; @@ -272,7 +287,7 @@ static Elf_Scn *elf_section_by_name(Elf *elf, GElf_Ehdr *ep, static int dso__synthesize_plt_symbols(struct dso *self, Elf *elf, GElf_Ehdr *ehdr, Elf_Scn *scn_dynsym, GElf_Shdr *shdr_dynsym, - size_t dynsym_idx) + size_t dynsym_idx, int verbose) { uint32_t nr_rel_entries, idx; GElf_Sym sym; @@ -335,7 +350,7 @@ static int dso__synthesize_plt_symbols(struct dso *self, Elf *elf, "%s@plt", elf_sym__name(&sym, symstrs)); f = symbol__new(plt_offset, shdr_plt.sh_entsize, - sympltname, self->sym_priv_size); + sympltname, self->sym_priv_size, 0, verbose); if (!f) return -1; @@ -353,7 +368,7 @@ static int dso__synthesize_plt_symbols(struct dso *self, Elf *elf, "%s@plt", elf_sym__name(&sym, symstrs)); f = symbol__new(plt_offset, shdr_plt.sh_entsize, - sympltname, self->sym_priv_size); + sympltname, self->sym_priv_size, 0, verbose); if (!f) return -1; @@ -410,7 +425,7 @@ static int dso__load_sym(struct dso *self, int fd, const char *name, if (sec_dynsym != NULL) { nr = dso__synthesize_plt_symbols(self, elf, &ehdr, sec_dynsym, &shdr, - dynsym_idx); + dynsym_idx, verbose); if (nr < 0) goto out_elf_end; } @@ -444,6 +459,7 @@ static int dso__load_sym(struct dso *self, int fd, const char *name, elf_symtab__for_each_symbol(syms, nr_syms, index, sym) { struct symbol *f; + uint64_t obj_start; if (!elf_sym__is_function(&sym)) continue; @@ -453,11 +469,13 @@ static int dso__load_sym(struct dso *self, int fd, const char *name, goto out_elf_end; gelf_getshdr(sec, &shdr); + obj_start = sym.st_value; + sym.st_value -= shdr.sh_addr - shdr.sh_offset; f = symbol__new(sym.st_value, sym.st_size, elf_sym__name(&sym, symstrs), - self->sym_priv_size); + self->sym_priv_size, obj_start, verbose); if (!f) goto out_elf_end; diff --git a/Documentation/perf_counter/util/symbol.h b/Documentation/perf_counter/util/symbol.h index e23cc312668..4839d68f14f 100644 --- a/Documentation/perf_counter/util/symbol.h +++ b/Documentation/perf_counter/util/symbol.h @@ -9,6 +9,9 @@ struct symbol { struct rb_node rb_node; __u64 start; __u64 end; + __u64 obj_start; + __u64 hist_sum; + __u64 *hist; char name[0]; }; @@ -20,6 +23,8 @@ struct dso { char name[0]; }; +const char *sym_hist_filter; + typedef int (*symbol_filter_t)(struct dso *self, struct symbol *sym); struct dso *dso__new(const char *name, unsigned int sym_priv_size); -- cgit v1.2.3 From 864709302a80f26fa9da3be5b47304f0b8bae192 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Sat, 6 Jun 2009 20:33:43 +0200 Subject: perf_counter tools: Move from Documentation/perf_counter/ to tools/perf/ Several people have suggested that 'perf' has become a full-fledged tool that should be moved out of Documentation/. Move it to the (new) tools/ directory. Cc: Peter Zijlstra Cc: Mike Galbraith Cc: Paul Mackerras Cc: Arnaldo Carvalho de Melo LKML-Reference: Signed-off-by: Ingo Molnar --- Documentation/perf_counter/.gitignore | 16 - Documentation/perf_counter/Documentation/Makefile | 300 ----- .../perf_counter/Documentation/asciidoc.conf | 91 -- .../perf_counter/Documentation/manpage-1.72.xsl | 14 - .../perf_counter/Documentation/manpage-base.xsl | 35 - .../Documentation/manpage-bold-literal.xsl | 17 - .../perf_counter/Documentation/manpage-normal.xsl | 13 - .../Documentation/manpage-suppress-sp.xsl | 21 - .../perf_counter/Documentation/perf-annotate.txt | 29 - .../perf_counter/Documentation/perf-help.txt | 38 - .../perf_counter/Documentation/perf-list.txt | 25 - .../perf_counter/Documentation/perf-record.txt | 42 - .../perf_counter/Documentation/perf-report.txt | 26 - .../perf_counter/Documentation/perf-stat.txt | 66 - .../perf_counter/Documentation/perf-top.txt | 39 - Documentation/perf_counter/Documentation/perf.txt | 24 - Documentation/perf_counter/Makefile | 929 -------------- Documentation/perf_counter/builtin-annotate.c | 1355 -------------------- Documentation/perf_counter/builtin-help.c | 461 ------- Documentation/perf_counter/builtin-list.c | 20 - Documentation/perf_counter/builtin-record.c | 544 -------- Documentation/perf_counter/builtin-report.c | 1291 ------------------- Documentation/perf_counter/builtin-stat.c | 339 ----- Documentation/perf_counter/builtin-top.c | 692 ---------- Documentation/perf_counter/builtin.h | 26 - Documentation/perf_counter/command-list.txt | 10 - Documentation/perf_counter/design.txt | 442 ------- Documentation/perf_counter/perf.c | 428 ------- Documentation/perf_counter/perf.h | 67 - Documentation/perf_counter/util/PERF-VERSION-GEN | 42 - Documentation/perf_counter/util/abspath.c | 117 -- Documentation/perf_counter/util/alias.c | 77 -- Documentation/perf_counter/util/cache.h | 119 -- Documentation/perf_counter/util/color.c | 241 ---- Documentation/perf_counter/util/color.h | 36 - Documentation/perf_counter/util/config.c | 873 ------------- Documentation/perf_counter/util/ctype.c | 26 - Documentation/perf_counter/util/environment.c | 9 - Documentation/perf_counter/util/exec_cmd.c | 165 --- Documentation/perf_counter/util/exec_cmd.h | 13 - .../perf_counter/util/generate-cmdlist.sh | 24 - Documentation/perf_counter/util/help.c | 367 ------ Documentation/perf_counter/util/help.h | 29 - Documentation/perf_counter/util/levenshtein.c | 84 -- Documentation/perf_counter/util/levenshtein.h | 8 - Documentation/perf_counter/util/list.h | 603 --------- Documentation/perf_counter/util/pager.c | 99 -- Documentation/perf_counter/util/parse-events.c | 316 ----- Documentation/perf_counter/util/parse-events.h | 17 - Documentation/perf_counter/util/parse-options.c | 508 -------- Documentation/perf_counter/util/parse-options.h | 174 --- Documentation/perf_counter/util/path.c | 353 ----- Documentation/perf_counter/util/quote.c | 481 ------- Documentation/perf_counter/util/quote.h | 68 - Documentation/perf_counter/util/rbtree.c | 383 ------ Documentation/perf_counter/util/rbtree.h | 171 --- Documentation/perf_counter/util/run-command.c | 395 ------ Documentation/perf_counter/util/run-command.h | 93 -- Documentation/perf_counter/util/sigchain.c | 52 - Documentation/perf_counter/util/sigchain.h | 11 - Documentation/perf_counter/util/strbuf.c | 359 ------ Documentation/perf_counter/util/strbuf.h | 137 -- Documentation/perf_counter/util/string.c | 34 - Documentation/perf_counter/util/string.h | 8 - Documentation/perf_counter/util/symbol.c | 574 --------- Documentation/perf_counter/util/symbol.h | 47 - Documentation/perf_counter/util/usage.c | 80 -- Documentation/perf_counter/util/util.h | 410 ------ Documentation/perf_counter/util/wrapper.c | 206 --- tools/perf/.gitignore | 16 + tools/perf/Documentation/Makefile | 300 +++++ tools/perf/Documentation/asciidoc.conf | 91 ++ tools/perf/Documentation/manpage-1.72.xsl | 14 + tools/perf/Documentation/manpage-base.xsl | 35 + tools/perf/Documentation/manpage-bold-literal.xsl | 17 + tools/perf/Documentation/manpage-normal.xsl | 13 + tools/perf/Documentation/manpage-suppress-sp.xsl | 21 + tools/perf/Documentation/perf-annotate.txt | 29 + tools/perf/Documentation/perf-help.txt | 38 + tools/perf/Documentation/perf-list.txt | 25 + tools/perf/Documentation/perf-record.txt | 42 + tools/perf/Documentation/perf-report.txt | 26 + tools/perf/Documentation/perf-stat.txt | 66 + tools/perf/Documentation/perf-top.txt | 39 + tools/perf/Documentation/perf.txt | 24 + tools/perf/Makefile | 929 ++++++++++++++ tools/perf/builtin-annotate.c | 1355 ++++++++++++++++++++ tools/perf/builtin-help.c | 461 +++++++ tools/perf/builtin-list.c | 20 + tools/perf/builtin-record.c | 544 ++++++++ tools/perf/builtin-report.c | 1291 +++++++++++++++++++ tools/perf/builtin-stat.c | 339 +++++ tools/perf/builtin-top.c | 692 ++++++++++ tools/perf/builtin.h | 26 + tools/perf/command-list.txt | 10 + tools/perf/design.txt | 442 +++++++ tools/perf/perf.c | 428 +++++++ tools/perf/perf.h | 67 + tools/perf/util/PERF-VERSION-GEN | 42 + tools/perf/util/abspath.c | 117 ++ tools/perf/util/alias.c | 77 ++ tools/perf/util/cache.h | 119 ++ tools/perf/util/color.c | 241 ++++ tools/perf/util/color.h | 36 + tools/perf/util/config.c | 873 +++++++++++++ tools/perf/util/ctype.c | 26 + tools/perf/util/environment.c | 9 + tools/perf/util/exec_cmd.c | 165 +++ tools/perf/util/exec_cmd.h | 13 + tools/perf/util/generate-cmdlist.sh | 24 + tools/perf/util/help.c | 367 ++++++ tools/perf/util/help.h | 29 + tools/perf/util/levenshtein.c | 84 ++ tools/perf/util/levenshtein.h | 8 + tools/perf/util/list.h | 603 +++++++++ tools/perf/util/pager.c | 99 ++ tools/perf/util/parse-events.c | 316 +++++ tools/perf/util/parse-events.h | 17 + tools/perf/util/parse-options.c | 508 ++++++++ tools/perf/util/parse-options.h | 174 +++ tools/perf/util/path.c | 353 +++++ tools/perf/util/quote.c | 481 +++++++ tools/perf/util/quote.h | 68 + tools/perf/util/rbtree.c | 383 ++++++ tools/perf/util/rbtree.h | 171 +++ tools/perf/util/run-command.c | 395 ++++++ tools/perf/util/run-command.h | 93 ++ tools/perf/util/sigchain.c | 52 + tools/perf/util/sigchain.h | 11 + tools/perf/util/strbuf.c | 359 ++++++ tools/perf/util/strbuf.h | 137 ++ tools/perf/util/string.c | 34 + tools/perf/util/string.h | 8 + tools/perf/util/symbol.c | 574 +++++++++ tools/perf/util/symbol.h | 47 + tools/perf/util/usage.c | 80 ++ tools/perf/util/util.h | 410 ++++++ tools/perf/util/wrapper.c | 206 +++ 138 files changed, 15209 insertions(+), 15209 deletions(-) delete mode 100644 Documentation/perf_counter/.gitignore delete mode 100644 Documentation/perf_counter/Documentation/Makefile delete mode 100644 Documentation/perf_counter/Documentation/asciidoc.conf delete mode 100644 Documentation/perf_counter/Documentation/manpage-1.72.xsl delete mode 100644 Documentation/perf_counter/Documentation/manpage-base.xsl delete mode 100644 Documentation/perf_counter/Documentation/manpage-bold-literal.xsl delete mode 100644 Documentation/perf_counter/Documentation/manpage-normal.xsl delete mode 100644 Documentation/perf_counter/Documentation/manpage-suppress-sp.xsl delete mode 100644 Documentation/perf_counter/Documentation/perf-annotate.txt delete mode 100644 Documentation/perf_counter/Documentation/perf-help.txt delete mode 100644 Documentation/perf_counter/Documentation/perf-list.txt delete mode 100644 Documentation/perf_counter/Documentation/perf-record.txt delete mode 100644 Documentation/perf_counter/Documentation/perf-report.txt delete mode 100644 Documentation/perf_counter/Documentation/perf-stat.txt delete mode 100644 Documentation/perf_counter/Documentation/perf-top.txt delete mode 100644 Documentation/perf_counter/Documentation/perf.txt delete mode 100644 Documentation/perf_counter/Makefile delete mode 100644 Documentation/perf_counter/builtin-annotate.c delete mode 100644 Documentation/perf_counter/builtin-help.c delete mode 100644 Documentation/perf_counter/builtin-list.c delete mode 100644 Documentation/perf_counter/builtin-record.c delete mode 100644 Documentation/perf_counter/builtin-report.c delete mode 100644 Documentation/perf_counter/builtin-stat.c delete mode 100644 Documentation/perf_counter/builtin-top.c delete mode 100644 Documentation/perf_counter/builtin.h delete mode 100644 Documentation/perf_counter/command-list.txt delete mode 100644 Documentation/perf_counter/design.txt delete mode 100644 Documentation/perf_counter/perf.c delete mode 100644 Documentation/perf_counter/perf.h delete mode 100755 Documentation/perf_counter/util/PERF-VERSION-GEN delete mode 100644 Documentation/perf_counter/util/abspath.c delete mode 100644 Documentation/perf_counter/util/alias.c delete mode 100644 Documentation/perf_counter/util/cache.h delete mode 100644 Documentation/perf_counter/util/color.c delete mode 100644 Documentation/perf_counter/util/color.h delete mode 100644 Documentation/perf_counter/util/config.c delete mode 100644 Documentation/perf_counter/util/ctype.c delete mode 100644 Documentation/perf_counter/util/environment.c delete mode 100644 Documentation/perf_counter/util/exec_cmd.c delete mode 100644 Documentation/perf_counter/util/exec_cmd.h delete mode 100755 Documentation/perf_counter/util/generate-cmdlist.sh delete mode 100644 Documentation/perf_counter/util/help.c delete mode 100644 Documentation/perf_counter/util/help.h delete mode 100644 Documentation/perf_counter/util/levenshtein.c delete mode 100644 Documentation/perf_counter/util/levenshtein.h delete mode 100644 Documentation/perf_counter/util/list.h delete mode 100644 Documentation/perf_counter/util/pager.c delete mode 100644 Documentation/perf_counter/util/parse-events.c delete mode 100644 Documentation/perf_counter/util/parse-events.h delete mode 100644 Documentation/perf_counter/util/parse-options.c delete mode 100644 Documentation/perf_counter/util/parse-options.h delete mode 100644 Documentation/perf_counter/util/path.c delete mode 100644 Documentation/perf_counter/util/quote.c delete mode 100644 Documentation/perf_counter/util/quote.h delete mode 100644 Documentation/perf_counter/util/rbtree.c delete mode 100644 Documentation/perf_counter/util/rbtree.h delete mode 100644 Documentation/perf_counter/util/run-command.c delete mode 100644 Documentation/perf_counter/util/run-command.h delete mode 100644 Documentation/perf_counter/util/sigchain.c delete mode 100644 Documentation/perf_counter/util/sigchain.h delete mode 100644 Documentation/perf_counter/util/strbuf.c delete mode 100644 Documentation/perf_counter/util/strbuf.h delete mode 100644 Documentation/perf_counter/util/string.c delete mode 100644 Documentation/perf_counter/util/string.h delete mode 100644 Documentation/perf_counter/util/symbol.c delete mode 100644 Documentation/perf_counter/util/symbol.h delete mode 100644 Documentation/perf_counter/util/usage.c delete mode 100644 Documentation/perf_counter/util/util.h delete mode 100644 Documentation/perf_counter/util/wrapper.c create mode 100644 tools/perf/.gitignore create mode 100644 tools/perf/Documentation/Makefile create mode 100644 tools/perf/Documentation/asciidoc.conf create mode 100644 tools/perf/Documentation/manpage-1.72.xsl create mode 100644 tools/perf/Documentation/manpage-base.xsl create mode 100644 tools/perf/Documentation/manpage-bold-literal.xsl create mode 100644 tools/perf/Documentation/manpage-normal.xsl create mode 100644 tools/perf/Documentation/manpage-suppress-sp.xsl create mode 100644 tools/perf/Documentation/perf-annotate.txt create mode 100644 tools/perf/Documentation/perf-help.txt create mode 100644 tools/perf/Documentation/perf-list.txt create mode 100644 tools/perf/Documentation/perf-record.txt create mode 100644 tools/perf/Documentation/perf-report.txt create mode 100644 tools/perf/Documentation/perf-stat.txt create mode 100644 tools/perf/Documentation/perf-top.txt create mode 100644 tools/perf/Documentation/perf.txt create mode 100644 tools/perf/Makefile create mode 100644 tools/perf/builtin-annotate.c create mode 100644 tools/perf/builtin-help.c create mode 100644 tools/perf/builtin-list.c create mode 100644 tools/perf/builtin-record.c create mode 100644 tools/perf/builtin-report.c create mode 100644 tools/perf/builtin-stat.c create mode 100644 tools/perf/builtin-top.c create mode 100644 tools/perf/builtin.h create mode 100644 tools/perf/command-list.txt create mode 100644 tools/perf/design.txt create mode 100644 tools/perf/perf.c create mode 100644 tools/perf/perf.h create mode 100755 tools/perf/util/PERF-VERSION-GEN create mode 100644 tools/perf/util/abspath.c create mode 100644 tools/perf/util/alias.c create mode 100644 tools/perf/util/cache.h create mode 100644 tools/perf/util/color.c create mode 100644 tools/perf/util/color.h create mode 100644 tools/perf/util/config.c create mode 100644 tools/perf/util/ctype.c create mode 100644 tools/perf/util/environment.c create mode 100644 tools/perf/util/exec_cmd.c create mode 100644 tools/perf/util/exec_cmd.h create mode 100755 tools/perf/util/generate-cmdlist.sh create mode 100644 tools/perf/util/help.c create mode 100644 tools/perf/util/help.h create mode 100644 tools/perf/util/levenshtein.c create mode 100644 tools/perf/util/levenshtein.h create mode 100644 tools/perf/util/list.h create mode 100644 tools/perf/util/pager.c create mode 100644 tools/perf/util/parse-events.c create mode 100644 tools/perf/util/parse-events.h create mode 100644 tools/perf/util/parse-options.c create mode 100644 tools/perf/util/parse-options.h create mode 100644 tools/perf/util/path.c create mode 100644 tools/perf/util/quote.c create mode 100644 tools/perf/util/quote.h create mode 100644 tools/perf/util/rbtree.c create mode 100644 tools/perf/util/rbtree.h create mode 100644 tools/perf/util/run-command.c create mode 100644 tools/perf/util/run-command.h create mode 100644 tools/perf/util/sigchain.c create mode 100644 tools/perf/util/sigchain.h create mode 100644 tools/perf/util/strbuf.c create mode 100644 tools/perf/util/strbuf.h create mode 100644 tools/perf/util/string.c create mode 100644 tools/perf/util/string.h create mode 100644 tools/perf/util/symbol.c create mode 100644 tools/perf/util/symbol.h create mode 100644 tools/perf/util/usage.c create mode 100644 tools/perf/util/util.h create mode 100644 tools/perf/util/wrapper.c diff --git a/Documentation/perf_counter/.gitignore b/Documentation/perf_counter/.gitignore deleted file mode 100644 index d69a759a104..00000000000 --- a/Documentation/perf_counter/.gitignore +++ /dev/null @@ -1,16 +0,0 @@ -PERF-BUILD-OPTIONS -PERF-CFLAGS -PERF-GUI-VARS -PERF-VERSION-FILE -perf -perf-help -perf-record -perf-report -perf-stat -perf-top -perf*.1 -perf*.xml -common-cmds.h -tags -TAGS -cscope* diff --git a/Documentation/perf_counter/Documentation/Makefile b/Documentation/perf_counter/Documentation/Makefile deleted file mode 100644 index 5457192e1b4..00000000000 --- a/Documentation/perf_counter/Documentation/Makefile +++ /dev/null @@ -1,300 +0,0 @@ -MAN1_TXT= \ - $(filter-out $(addsuffix .txt, $(ARTICLES) $(SP_ARTICLES)), \ - $(wildcard perf-*.txt)) \ - perf.txt -MAN5_TXT= -MAN7_TXT= - -MAN_TXT = $(MAN1_TXT) $(MAN5_TXT) $(MAN7_TXT) -MAN_XML=$(patsubst %.txt,%.xml,$(MAN_TXT)) -MAN_HTML=$(patsubst %.txt,%.html,$(MAN_TXT)) - -DOC_HTML=$(MAN_HTML) - -ARTICLES = -# with their own formatting rules. -SP_ARTICLES = -API_DOCS = $(patsubst %.txt,%,$(filter-out technical/api-index-skel.txt technical/api-index.txt, $(wildcard technical/api-*.txt))) -SP_ARTICLES += $(API_DOCS) -SP_ARTICLES += technical/api-index - -DOC_HTML += $(patsubst %,%.html,$(ARTICLES) $(SP_ARTICLES)) - -DOC_MAN1=$(patsubst %.txt,%.1,$(MAN1_TXT)) -DOC_MAN5=$(patsubst %.txt,%.5,$(MAN5_TXT)) -DOC_MAN7=$(patsubst %.txt,%.7,$(MAN7_TXT)) - -prefix?=$(HOME) -bindir?=$(prefix)/bin -htmldir?=$(prefix)/share/doc/perf-doc -pdfdir?=$(prefix)/share/doc/perf-doc -mandir?=$(prefix)/share/man -man1dir=$(mandir)/man1 -man5dir=$(mandir)/man5 -man7dir=$(mandir)/man7 -# DESTDIR= - -ASCIIDOC=asciidoc -ASCIIDOC_EXTRA = -MANPAGE_XSL = manpage-normal.xsl -XMLTO_EXTRA = -INSTALL?=install -RM ?= rm -f -DOC_REF = origin/man -HTML_REF = origin/html - -infodir?=$(prefix)/share/info -MAKEINFO=makeinfo -INSTALL_INFO=install-info -DOCBOOK2X_TEXI=docbook2x-texi -DBLATEX=dblatex -ifndef PERL_PATH - PERL_PATH = /usr/bin/perl -endif - --include ../config.mak.autogen --include ../config.mak - -# -# For asciidoc ... -# -7.1.2, no extra settings are needed. -# 8.0-, set ASCIIDOC8. -# - -# -# For docbook-xsl ... -# -1.68.1, set ASCIIDOC_NO_ROFF? (based on changelog from 1.73.0) -# 1.69.0, no extra settings are needed? -# 1.69.1-1.71.0, set DOCBOOK_SUPPRESS_SP? -# 1.71.1, no extra settings are needed? -# 1.72.0, set DOCBOOK_XSL_172. -# 1.73.0-, set ASCIIDOC_NO_ROFF -# - -# -# If you had been using DOCBOOK_XSL_172 in an attempt to get rid -# of 'the ".ft C" problem' in your generated manpages, and you -# instead ended up with weird characters around callouts, try -# using ASCIIDOC_NO_ROFF instead (it works fine with ASCIIDOC8). -# - -ifdef ASCIIDOC8 -ASCIIDOC_EXTRA += -a asciidoc7compatible -endif -ifdef DOCBOOK_XSL_172 -ASCIIDOC_EXTRA += -a perf-asciidoc-no-roff -MANPAGE_XSL = manpage-1.72.xsl -else - ifdef ASCIIDOC_NO_ROFF - # docbook-xsl after 1.72 needs the regular XSL, but will not - # pass-thru raw roff codes from asciidoc.conf, so turn them off. - ASCIIDOC_EXTRA += -a perf-asciidoc-no-roff - endif -endif -ifdef MAN_BOLD_LITERAL -XMLTO_EXTRA += -m manpage-bold-literal.xsl -endif -ifdef DOCBOOK_SUPPRESS_SP -XMLTO_EXTRA += -m manpage-suppress-sp.xsl -endif - -SHELL_PATH ?= $(SHELL) -# Shell quote; -SHELL_PATH_SQ = $(subst ','\'',$(SHELL_PATH)) - -# -# Please note that there is a minor bug in asciidoc. -# The version after 6.0.3 _will_ include the patch found here: -# http://marc.theaimsgroup.com/?l=perf&m=111558757202243&w=2 -# -# Until that version is released you may have to apply the patch -# yourself - yes, all 6 characters of it! -# - -QUIET_SUBDIR0 = +$(MAKE) -C # space to separate -C and subdir -QUIET_SUBDIR1 = - -ifneq ($(findstring $(MAKEFLAGS),w),w) -PRINT_DIR = --no-print-directory -else # "make -w" -NO_SUBDIR = : -endif - -ifneq ($(findstring $(MAKEFLAGS),s),s) -ifndef V - QUIET_ASCIIDOC = @echo ' ' ASCIIDOC $@; - QUIET_XMLTO = @echo ' ' XMLTO $@; - QUIET_DB2TEXI = @echo ' ' DB2TEXI $@; - QUIET_MAKEINFO = @echo ' ' MAKEINFO $@; - QUIET_DBLATEX = @echo ' ' DBLATEX $@; - QUIET_XSLTPROC = @echo ' ' XSLTPROC $@; - QUIET_GEN = @echo ' ' GEN $@; - QUIET_STDERR = 2> /dev/null - QUIET_SUBDIR0 = +@subdir= - QUIET_SUBDIR1 = ;$(NO_SUBDIR) echo ' ' SUBDIR $$subdir; \ - $(MAKE) $(PRINT_DIR) -C $$subdir - export V -endif -endif - -all: html man - -html: $(DOC_HTML) - -$(DOC_HTML) $(DOC_MAN1) $(DOC_MAN5) $(DOC_MAN7): asciidoc.conf - -man: man1 man5 man7 -man1: $(DOC_MAN1) -man5: $(DOC_MAN5) -man7: $(DOC_MAN7) - -info: perf.info perfman.info - -pdf: user-manual.pdf - -install: install-man - -install-man: man - $(INSTALL) -d -m 755 $(DESTDIR)$(man1dir) -# $(INSTALL) -d -m 755 $(DESTDIR)$(man5dir) -# $(INSTALL) -d -m 755 $(DESTDIR)$(man7dir) - $(INSTALL) -m 644 $(DOC_MAN1) $(DESTDIR)$(man1dir) -# $(INSTALL) -m 644 $(DOC_MAN5) $(DESTDIR)$(man5dir) -# $(INSTALL) -m 644 $(DOC_MAN7) $(DESTDIR)$(man7dir) - -install-info: info - $(INSTALL) -d -m 755 $(DESTDIR)$(infodir) - $(INSTALL) -m 644 perf.info perfman.info $(DESTDIR)$(infodir) - if test -r $(DESTDIR)$(infodir)/dir; then \ - $(INSTALL_INFO) --info-dir=$(DESTDIR)$(infodir) perf.info ;\ - $(INSTALL_INFO) --info-dir=$(DESTDIR)$(infodir) perfman.info ;\ - else \ - echo "No directory found in $(DESTDIR)$(infodir)" >&2 ; \ - fi - -install-pdf: pdf - $(INSTALL) -d -m 755 $(DESTDIR)$(pdfdir) - $(INSTALL) -m 644 user-manual.pdf $(DESTDIR)$(pdfdir) - -install-html: html - '$(SHELL_PATH_SQ)' ./install-webdoc.sh $(DESTDIR)$(htmldir) - -../PERF-VERSION-FILE: .FORCE-PERF-VERSION-FILE - $(QUIET_SUBDIR0)../ $(QUIET_SUBDIR1) PERF-VERSION-FILE - --include ../PERF-VERSION-FILE - -# -# Determine "include::" file references in asciidoc files. -# -doc.dep : $(wildcard *.txt) build-docdep.perl - $(QUIET_GEN)$(RM) $@+ $@ && \ - $(PERL_PATH) ./build-docdep.perl >$@+ $(QUIET_STDERR) && \ - mv $@+ $@ - --include doc.dep - -cmds_txt = cmds-ancillaryinterrogators.txt \ - cmds-ancillarymanipulators.txt \ - cmds-mainporcelain.txt \ - cmds-plumbinginterrogators.txt \ - cmds-plumbingmanipulators.txt \ - cmds-synchingrepositories.txt \ - cmds-synchelpers.txt \ - cmds-purehelpers.txt \ - cmds-foreignscminterface.txt - -$(cmds_txt): cmd-list.made - -cmd-list.made: cmd-list.perl ../command-list.txt $(MAN1_TXT) - $(QUIET_GEN)$(RM) $@ && \ - $(PERL_PATH) ./cmd-list.perl ../command-list.txt $(QUIET_STDERR) && \ - date >$@ - -clean: - $(RM) *.xml *.xml+ *.html *.html+ *.1 *.5 *.7 - $(RM) *.texi *.texi+ *.texi++ perf.info perfman.info - $(RM) howto-index.txt howto/*.html doc.dep - $(RM) technical/api-*.html technical/api-index.txt - $(RM) $(cmds_txt) *.made - -$(MAN_HTML): %.html : %.txt - $(QUIET_ASCIIDOC)$(RM) $@+ $@ && \ - $(ASCIIDOC) -b xhtml11 -d manpage -f asciidoc.conf \ - $(ASCIIDOC_EXTRA) -aperf_version=$(PERF_VERSION) -o $@+ $< && \ - mv $@+ $@ - -%.1 %.5 %.7 : %.xml - $(QUIET_XMLTO)$(RM) $@ && \ - xmlto -m $(MANPAGE_XSL) $(XMLTO_EXTRA) man $< - -%.xml : %.txt - $(QUIET_ASCIIDOC)$(RM) $@+ $@ && \ - $(ASCIIDOC) -b docbook -d manpage -f asciidoc.conf \ - $(ASCIIDOC_EXTRA) -aperf_version=$(PERF_VERSION) -o $@+ $< && \ - mv $@+ $@ - -XSLT = docbook.xsl -XSLTOPTS = --xinclude --stringparam html.stylesheet docbook-xsl.css - -user-manual.html: user-manual.xml - $(QUIET_XSLTPROC)xsltproc $(XSLTOPTS) -o $@ $(XSLT) $< - -perf.info: user-manual.texi - $(QUIET_MAKEINFO)$(MAKEINFO) --no-split -o $@ user-manual.texi - -user-manual.texi: user-manual.xml - $(QUIET_DB2TEXI)$(RM) $@+ $@ && \ - $(DOCBOOK2X_TEXI) user-manual.xml --encoding=UTF-8 --to-stdout >$@++ && \ - $(PERL_PATH) fix-texi.perl <$@++ >$@+ && \ - rm $@++ && \ - mv $@+ $@ - -user-manual.pdf: user-manual.xml - $(QUIET_DBLATEX)$(RM) $@+ $@ && \ - $(DBLATEX) -o $@+ -p /etc/asciidoc/dblatex/asciidoc-dblatex.xsl -s /etc/asciidoc/dblatex/asciidoc-dblatex.sty $< && \ - mv $@+ $@ - -perfman.texi: $(MAN_XML) cat-texi.perl - $(QUIET_DB2TEXI)$(RM) $@+ $@ && \ - ($(foreach xml,$(MAN_XML),$(DOCBOOK2X_TEXI) --encoding=UTF-8 \ - --to-stdout $(xml) &&) true) > $@++ && \ - $(PERL_PATH) cat-texi.perl $@ <$@++ >$@+ && \ - rm $@++ && \ - mv $@+ $@ - -perfman.info: perfman.texi - $(QUIET_MAKEINFO)$(MAKEINFO) --no-split --no-validate $*.texi - -$(patsubst %.txt,%.texi,$(MAN_TXT)): %.texi : %.xml - $(QUIET_DB2TEXI)$(RM) $@+ $@ && \ - $(DOCBOOK2X_TEXI) --to-stdout $*.xml >$@+ && \ - mv $@+ $@ - -howto-index.txt: howto-index.sh $(wildcard howto/*.txt) - $(QUIET_GEN)$(RM) $@+ $@ && \ - '$(SHELL_PATH_SQ)' ./howto-index.sh $(wildcard howto/*.txt) >$@+ && \ - mv $@+ $@ - -$(patsubst %,%.html,$(ARTICLES)) : %.html : %.txt - $(QUIET_ASCIIDOC)$(ASCIIDOC) -b xhtml11 $*.txt - -WEBDOC_DEST = /pub/software/tools/perf/docs - -$(patsubst %.txt,%.html,$(wildcard howto/*.txt)): %.html : %.txt - $(QUIET_ASCIIDOC)$(RM) $@+ $@ && \ - sed -e '1,/^$$/d' $< | $(ASCIIDOC) -b xhtml11 - >$@+ && \ - mv $@+ $@ - -install-webdoc : html - '$(SHELL_PATH_SQ)' ./install-webdoc.sh $(WEBDOC_DEST) - -quick-install: quick-install-man - -quick-install-man: - '$(SHELL_PATH_SQ)' ./install-doc-quick.sh $(DOC_REF) $(DESTDIR)$(mandir) - -quick-install-html: - '$(SHELL_PATH_SQ)' ./install-doc-quick.sh $(HTML_REF) $(DESTDIR)$(htmldir) - -.PHONY: .FORCE-PERF-VERSION-FILE diff --git a/Documentation/perf_counter/Documentation/asciidoc.conf b/Documentation/perf_counter/Documentation/asciidoc.conf deleted file mode 100644 index 356b23a4033..00000000000 --- a/Documentation/perf_counter/Documentation/asciidoc.conf +++ /dev/null @@ -1,91 +0,0 @@ -## linkperf: macro -# -# Usage: linkperf:command[manpage-section] -# -# Note, {0} is the manpage section, while {target} is the command. -# -# Show PERF link as: (
); if section is defined, else just show -# the command. - -[macros] -(?su)[\\]?(?Plinkperf):(?P\S*?)\[(?P.*?)\]= - -[attributes] -asterisk=* -plus=+ -caret=^ -startsb=[ -endsb=] -tilde=~ - -ifdef::backend-docbook[] -[linkperf-inlinemacro] -{0%{target}} -{0#} -{0#{target}{0}} -{0#} -endif::backend-docbook[] - -ifdef::backend-docbook[] -ifndef::perf-asciidoc-no-roff[] -# "unbreak" docbook-xsl v1.68 for manpages. v1.69 works with or without this. -# v1.72 breaks with this because it replaces dots not in roff requests. -[listingblock] -{title} - -ifdef::doctype-manpage[] - .ft C -endif::doctype-manpage[] -| -ifdef::doctype-manpage[] - .ft -endif::doctype-manpage[] - -{title#} -endif::perf-asciidoc-no-roff[] - -ifdef::perf-asciidoc-no-roff[] -ifdef::doctype-manpage[] -# The following two small workarounds insert a simple paragraph after screen -[listingblock] -{title} - -| - -{title#} - -[verseblock] -{title} -{title%} -{title#} -| - -{title#} -{title%} -endif::doctype-manpage[] -endif::perf-asciidoc-no-roff[] -endif::backend-docbook[] - -ifdef::doctype-manpage[] -ifdef::backend-docbook[] -[header] -template::[header-declarations] - - -{mantitle} -{manvolnum} -perf -{perf_version} -perf Manual - - - {manname} - {manpurpose} - -endif::backend-docbook[] -endif::doctype-manpage[] - -ifdef::backend-xhtml11[] -[linkperf-inlinemacro] -{target}{0?({0})} -endif::backend-xhtml11[] diff --git a/Documentation/perf_counter/Documentation/manpage-1.72.xsl b/Documentation/perf_counter/Documentation/manpage-1.72.xsl deleted file mode 100644 index b4d315cb8c4..00000000000 --- a/Documentation/perf_counter/Documentation/manpage-1.72.xsl +++ /dev/null @@ -1,14 +0,0 @@ - - - - - - - - - - diff --git a/Documentation/perf_counter/Documentation/manpage-base.xsl b/Documentation/perf_counter/Documentation/manpage-base.xsl deleted file mode 100644 index a264fa61609..00000000000 --- a/Documentation/perf_counter/Documentation/manpage-base.xsl +++ /dev/null @@ -1,35 +0,0 @@ - - - - - - - - - - - - - - sp - - - - - - - - br - - - diff --git a/Documentation/perf_counter/Documentation/manpage-bold-literal.xsl b/Documentation/perf_counter/Documentation/manpage-bold-literal.xsl deleted file mode 100644 index 608eb5df628..00000000000 --- a/Documentation/perf_counter/Documentation/manpage-bold-literal.xsl +++ /dev/null @@ -1,17 +0,0 @@ - - - - - - - fB - - - fR - - - diff --git a/Documentation/perf_counter/Documentation/manpage-normal.xsl b/Documentation/perf_counter/Documentation/manpage-normal.xsl deleted file mode 100644 index a48f5b11f3d..00000000000 --- a/Documentation/perf_counter/Documentation/manpage-normal.xsl +++ /dev/null @@ -1,13 +0,0 @@ - - - - - - -\ -. - - diff --git a/Documentation/perf_counter/Documentation/manpage-suppress-sp.xsl b/Documentation/perf_counter/Documentation/manpage-suppress-sp.xsl deleted file mode 100644 index a63c7632a87..00000000000 --- a/Documentation/perf_counter/Documentation/manpage-suppress-sp.xsl +++ /dev/null @@ -1,21 +0,0 @@ - - - - - - - - - - - - - - - diff --git a/Documentation/perf_counter/Documentation/perf-annotate.txt b/Documentation/perf_counter/Documentation/perf-annotate.txt deleted file mode 100644 index c9dcade0683..00000000000 --- a/Documentation/perf_counter/Documentation/perf-annotate.txt +++ /dev/null @@ -1,29 +0,0 @@ -perf-annotate(1) -============== - -NAME ----- -perf-annotate - Read perf.data (created by perf record) and display annotated code - -SYNOPSIS --------- -[verse] -'perf annotate' [-i | --input=file] symbol_name - -DESCRIPTION ------------ -This command reads the input file and displays an annotated version of the -code. If the object file has debug symbols then the source code will be -displayed alongside assembly code. - -If there is no debug info in the object, then annotated assembly is displayed. - -OPTIONS -------- --i:: ---input=:: - Input file name. (default: perf.data) - -SEE ALSO --------- -linkperf:perf-record[1] diff --git a/Documentation/perf_counter/Documentation/perf-help.txt b/Documentation/perf_counter/Documentation/perf-help.txt deleted file mode 100644 index 514391818d1..00000000000 --- a/Documentation/perf_counter/Documentation/perf-help.txt +++ /dev/null @@ -1,38 +0,0 @@ -perf-help(1) -============ - -NAME ----- -perf-help - display help information about perf - -SYNOPSIS --------- -'perf help' [-a|--all] [COMMAND] - -DESCRIPTION ------------ - -With no options and no COMMAND given, the synopsis of the 'perf' -command and a list of the most commonly used perf commands are printed -on the standard output. - -If the option '--all' or '-a' is given, then all available commands are -printed on the standard output. - -If a perf command is named, a manual page for that command is brought -up. The 'man' program is used by default for this purpose, but this -can be overridden by other options or configuration variables. - -Note that `perf --help ...` is identical to `perf help ...` because the -former is internally converted into the latter. - -OPTIONS -------- --a:: ---all:: - Prints all the available commands on the standard output. This - option supersedes any other option. - -PERF ----- -Part of the linkperf:perf[1] suite diff --git a/Documentation/perf_counter/Documentation/perf-list.txt b/Documentation/perf_counter/Documentation/perf-list.txt deleted file mode 100644 index 8290b942266..00000000000 --- a/Documentation/perf_counter/Documentation/perf-list.txt +++ /dev/null @@ -1,25 +0,0 @@ -perf-list(1) -============ - -NAME ----- -perf-list - List all symbolic event types - -SYNOPSIS --------- -[verse] -'perf list' - -DESCRIPTION ------------ -This command displays the symbolic event types which can be selected in the -various perf commands with the -e option. - -OPTIONS -------- -None - -SEE ALSO --------- -linkperf:perf-stat[1], linkperf:perf-top[1], -linkperf:perf-record[1] diff --git a/Documentation/perf_counter/Documentation/perf-record.txt b/Documentation/perf_counter/Documentation/perf-record.txt deleted file mode 100644 index 1dbc1eeb4c0..00000000000 --- a/Documentation/perf_counter/Documentation/perf-record.txt +++ /dev/null @@ -1,42 +0,0 @@ -perf-record(1) -============== - -NAME ----- -perf-record - Run a command and record its profile into perf.data - -SYNOPSIS --------- -[verse] -'perf record' [-e | --event=EVENT] [-l] [-a] -'perf record' [-e | --event=EVENT] [-l] [-a] -- [] - -DESCRIPTION ------------ -This command runs a command and gathers a performance counter profile -from it, into perf.data - without displaying anything. - -This file can then be inspected later on, using 'perf report'. - - -OPTIONS -------- -...:: - Any command you can specify in a shell. - --e:: ---event=:: - Select the PMU event. Selection can be a symbolic event name - (use 'perf list' to list all events) or a raw PMU - event (eventsel+umask) in the form of rNNN where NNN is a - hexadecimal event descriptor. - --a:: - system-wide collection - --l:: - scale counter values - -SEE ALSO --------- -linkperf:perf-stat[1], linkperf:perf-list[1] diff --git a/Documentation/perf_counter/Documentation/perf-report.txt b/Documentation/perf_counter/Documentation/perf-report.txt deleted file mode 100644 index 52d3fc6846a..00000000000 --- a/Documentation/perf_counter/Documentation/perf-report.txt +++ /dev/null @@ -1,26 +0,0 @@ -perf-report(1) -============== - -NAME ----- -perf-report - Read perf.data (created by perf record) and display the profile - -SYNOPSIS --------- -[verse] -'perf report' [-i | --input=file] - -DESCRIPTION ------------ -This command displays the performance counter profile information recorded -via perf report. - -OPTIONS -------- --i:: ---input=:: - Input file name. (default: perf.data) - -SEE ALSO --------- -linkperf:perf-stat[1] diff --git a/Documentation/perf_counter/Documentation/perf-stat.txt b/Documentation/perf_counter/Documentation/perf-stat.txt deleted file mode 100644 index c368a72721d..00000000000 --- a/Documentation/perf_counter/Documentation/perf-stat.txt +++ /dev/null @@ -1,66 +0,0 @@ -perf-stat(1) -============ - -NAME ----- -perf-stat - Run a command and gather performance counter statistics - -SYNOPSIS --------- -[verse] -'perf stat' [-e | --event=EVENT] [-l] [-a] -'perf stat' [-e | --event=EVENT] [-l] [-a] -- [] - -DESCRIPTION ------------ -This command runs a command and gathers performance counter statistics -from it. - - -OPTIONS -------- -...:: - Any command you can specify in a shell. - - --e:: ---event=:: - Select the PMU event. Selection can be a symbolic event name - (use 'perf list' to list all events) or a raw PMU - event (eventsel+umask) in the form of rNNN where NNN is a - hexadecimal event descriptor. - --i:: ---inherit:: - child tasks inherit counters --p:: ---pid=:: - stat events on existing pid - --a:: - system-wide collection - --l:: - scale counter values - -EXAMPLES --------- - -$ perf stat -- make -j - - Performance counter stats for 'make -j': - - 8117.370256 task clock ticks # 11.281 CPU utilization factor - 678 context switches # 0.000 M/sec - 133 CPU migrations # 0.000 M/sec - 235724 pagefaults # 0.029 M/sec - 24821162526 CPU cycles # 3057.784 M/sec - 18687303457 instructions # 2302.138 M/sec - 172158895 cache references # 21.209 M/sec - 27075259 cache misses # 3.335 M/sec - - Wall-clock time elapsed: 719.554352 msecs - -SEE ALSO --------- -linkperf:perf-top[1], linkperf:perf-list[1] diff --git a/Documentation/perf_counter/Documentation/perf-top.txt b/Documentation/perf_counter/Documentation/perf-top.txt deleted file mode 100644 index 539d0128972..00000000000 --- a/Documentation/perf_counter/Documentation/perf-top.txt +++ /dev/null @@ -1,39 +0,0 @@ -perf-top(1) -=========== - -NAME ----- -perf-top - Run a command and profile it - -SYNOPSIS --------- -[verse] -'perf top' [-e | --event=EVENT] [-l] [-a] - -DESCRIPTION ------------ -This command runs a command and gathers a performance counter profile -from it. - - -OPTIONS -------- -...:: - Any command you can specify in a shell. - --e:: ---event=:: - Select the PMU event. Selection can be a symbolic event name - (use 'perf list' to list all events) or a raw PMU - event (eventsel+umask) in the form of rNNN where NNN is a - hexadecimal event descriptor. - --a:: - system-wide collection - --l:: - scale counter values - -SEE ALSO --------- -linkperf:perf-stat[1], linkperf:perf-list[1] diff --git a/Documentation/perf_counter/Documentation/perf.txt b/Documentation/perf_counter/Documentation/perf.txt deleted file mode 100644 index 69c83255719..00000000000 --- a/Documentation/perf_counter/Documentation/perf.txt +++ /dev/null @@ -1,24 +0,0 @@ -perf(1) -======= - -NAME ----- -perf - Performance analysis tools for Linux - -SYNOPSIS --------- -[verse] -'perf' [--version] [--help] COMMAND [ARGS] - -DESCRIPTION ------------ -Performance counters for Linux are are a new kernel-based subsystem -that provide a framework for all things performance analysis. It -covers hardware level (CPU/PMU, Performance Monitoring Unit) features -and software features (software counters, tracepoints) as well. - -SEE ALSO --------- -linkperf:perf-stat[1], linkperf:perf-top[1], -linkperf:perf-record[1], linkperf:perf-report[1], -linkperf:perf-list[1] diff --git a/Documentation/perf_counter/Makefile b/Documentation/perf_counter/Makefile deleted file mode 100644 index 0cbd5d6874e..00000000000 --- a/Documentation/perf_counter/Makefile +++ /dev/null @@ -1,929 +0,0 @@ -# The default target of this Makefile is... -all:: - -# Define V=1 to have a more verbose compile. -# -# Define SNPRINTF_RETURNS_BOGUS if your are on a system which snprintf() -# or vsnprintf() return -1 instead of number of characters which would -# have been written to the final string if enough space had been available. -# -# Define FREAD_READS_DIRECTORIES if your are on a system which succeeds -# when attempting to read from an fopen'ed directory. -# -# Define NO_OPENSSL environment variable if you do not have OpenSSL. -# This also implies MOZILLA_SHA1. -# -# Define CURLDIR=/foo/bar if your curl header and library files are in -# /foo/bar/include and /foo/bar/lib directories. -# -# Define EXPATDIR=/foo/bar if your expat header and library files are in -# /foo/bar/include and /foo/bar/lib directories. -# -# Define NO_D_INO_IN_DIRENT if you don't have d_ino in your struct dirent. -# -# Define NO_D_TYPE_IN_DIRENT if your platform defines DT_UNKNOWN but lacks -# d_type in struct dirent (latest Cygwin -- will be fixed soonish). -# -# Define NO_C99_FORMAT if your formatted IO functions (printf/scanf et.al.) -# do not support the 'size specifiers' introduced by C99, namely ll, hh, -# j, z, t. (representing long long int, char, intmax_t, size_t, ptrdiff_t). -# some C compilers supported these specifiers prior to C99 as an extension. -# -# Define NO_STRCASESTR if you don't have strcasestr. -# -# Define NO_MEMMEM if you don't have memmem. -# -# Define NO_STRTOUMAX if you don't have strtoumax in the C library. -# If your compiler also does not support long long or does not have -# strtoull, define NO_STRTOULL. -# -# Define NO_SETENV if you don't have setenv in the C library. -# -# Define NO_UNSETENV if you don't have unsetenv in the C library. -# -# Define NO_MKDTEMP if you don't have mkdtemp in the C library. -# -# Define NO_SYS_SELECT_H if you don't have sys/select.h. -# -# Define NO_SYMLINK_HEAD if you never want .perf/HEAD to be a symbolic link. -# Enable it on Windows. By default, symrefs are still used. -# -# Define NO_SVN_TESTS if you want to skip time-consuming SVN interoperability -# tests. These tests take up a significant amount of the total test time -# but are not needed unless you plan to talk to SVN repos. -# -# Define NO_FINK if you are building on Darwin/Mac OS X, have Fink -# installed in /sw, but don't want PERF to link against any libraries -# installed there. If defined you may specify your own (or Fink's) -# include directories and library directories by defining CFLAGS -# and LDFLAGS appropriately. -# -# Define NO_DARWIN_PORTS if you are building on Darwin/Mac OS X, -# have DarwinPorts installed in /opt/local, but don't want PERF to -# link against any libraries installed there. If defined you may -# specify your own (or DarwinPort's) include directories and -# library directories by defining CFLAGS and LDFLAGS appropriately. -# -# Define PPC_SHA1 environment variable when running make to make use of -# a bundled SHA1 routine optimized for PowerPC. -# -# Define ARM_SHA1 environment variable when running make to make use of -# a bundled SHA1 routine optimized for ARM. -# -# Define MOZILLA_SHA1 environment variable when running make to make use of -# a bundled SHA1 routine coming from Mozilla. It is GPL'd and should be fast -# on non-x86 architectures (e.g. PowerPC), while the OpenSSL version (default -# choice) has very fast version optimized for i586. -# -# Define NEEDS_SSL_WITH_CRYPTO if you need -lcrypto with -lssl (Darwin). -# -# Define NEEDS_LIBICONV if linking with libc is not enough (Darwin). -# -# Define NEEDS_SOCKET if linking with libc is not enough (SunOS, -# Patrick Mauritz). -# -# Define NO_MMAP if you want to avoid mmap. -# -# Define NO_PTHREADS if you do not have or do not want to use Pthreads. -# -# Define NO_PREAD if you have a problem with pread() system call (e.g. -# cygwin.dll before v1.5.22). -# -# Define NO_FAST_WORKING_DIRECTORY if accessing objects in pack files is -# generally faster on your platform than accessing the working directory. -# -# Define NO_TRUSTABLE_FILEMODE if your filesystem may claim to support -# the executable mode bit, but doesn't really do so. -# -# Define NO_IPV6 if you lack IPv6 support and getaddrinfo(). -# -# Define NO_SOCKADDR_STORAGE if your platform does not have struct -# sockaddr_storage. -# -# Define NO_ICONV if your libc does not properly support iconv. -# -# Define OLD_ICONV if your library has an old iconv(), where the second -# (input buffer pointer) parameter is declared with type (const char **). -# -# Define NO_DEFLATE_BOUND if your zlib does not have deflateBound. -# -# Define NO_R_TO_GCC_LINKER if your gcc does not like "-R/path/lib" -# that tells runtime paths to dynamic libraries; -# "-Wl,-rpath=/path/lib" is used instead. -# -# Define USE_NSEC below if you want perf to care about sub-second file mtimes -# and ctimes. Note that you need recent glibc (at least 2.2.4) for this, and -# it will BREAK YOUR LOCAL DIFFS! show-diff and anything using it will likely -# randomly break unless your underlying filesystem supports those sub-second -# times (my ext3 doesn't). -# -# Define USE_ST_TIMESPEC if your "struct stat" uses "st_ctimespec" instead of -# "st_ctim" -# -# Define NO_NSEC if your "struct stat" does not have "st_ctim.tv_nsec" -# available. This automatically turns USE_NSEC off. -# -# Define USE_STDEV below if you want perf to care about the underlying device -# change being considered an inode change from the update-index perspective. -# -# Define NO_ST_BLOCKS_IN_STRUCT_STAT if your platform does not have st_blocks -# field that counts the on-disk footprint in 512-byte blocks. -# -# Define ASCIIDOC8 if you want to format documentation with AsciiDoc 8 -# -# Define DOCBOOK_XSL_172 if you want to format man pages with DocBook XSL v1.72. -# -# Define NO_PERL_MAKEMAKER if you cannot use Makefiles generated by perl's -# MakeMaker (e.g. using ActiveState under Cygwin). -# -# Define NO_PERL if you do not want Perl scripts or libraries at all. -# -# Define INTERNAL_QSORT to use Git's implementation of qsort(), which -# is a simplified version of the merge sort used in glibc. This is -# recommended if Git triggers O(n^2) behavior in your platform's qsort(). -# -# Define NO_EXTERNAL_GREP if you don't want "perf grep" to ever call -# your external grep (e.g., if your system lacks grep, if its grep is -# broken, or spawning external process is slower than built-in grep perf has). - -PERF-VERSION-FILE: .FORCE-PERF-VERSION-FILE - @$(SHELL_PATH) util/PERF-VERSION-GEN --include PERF-VERSION-FILE - -uname_S := $(shell sh -c 'uname -s 2>/dev/null || echo not') -uname_M := $(shell sh -c 'uname -m 2>/dev/null || echo not') -uname_O := $(shell sh -c 'uname -o 2>/dev/null || echo not') -uname_R := $(shell sh -c 'uname -r 2>/dev/null || echo not') -uname_P := $(shell sh -c 'uname -p 2>/dev/null || echo not') -uname_V := $(shell sh -c 'uname -v 2>/dev/null || echo not') - -# CFLAGS and LDFLAGS are for the users to override from the command line. - -CFLAGS = -ggdb3 -Wall -Werror -Wstrict-prototypes -Wmissing-declarations -Wmissing-prototypes -std=gnu99 -Wdeclaration-after-statement -O6 -LDFLAGS = -lpthread -lrt -lelf -ALL_CFLAGS = $(CFLAGS) -ALL_LDFLAGS = $(LDFLAGS) -STRIP ?= strip - -# Among the variables below, these: -# perfexecdir -# template_dir -# mandir -# infodir -# htmldir -# ETC_PERFCONFIG (but not sysconfdir) -# can be specified as a relative path some/where/else; -# this is interpreted as relative to $(prefix) and "perf" at -# runtime figures out where they are based on the path to the executable. -# This can help installing the suite in a relocatable way. - -prefix = $(HOME) -bindir_relative = bin -bindir = $(prefix)/$(bindir_relative) -mandir = share/man -infodir = share/info -perfexecdir = libexec/perf-core -sharedir = $(prefix)/share -template_dir = share/perf-core/templates -htmldir = share/doc/perf-doc -ifeq ($(prefix),/usr) -sysconfdir = /etc -ETC_PERFCONFIG = $(sysconfdir)/perfconfig -else -sysconfdir = $(prefix)/etc -ETC_PERFCONFIG = etc/perfconfig -endif -lib = lib -# DESTDIR= - -export prefix bindir sharedir sysconfdir - -CC = gcc -AR = ar -RM = rm -f -TAR = tar -FIND = find -INSTALL = install -RPMBUILD = rpmbuild -PTHREAD_LIBS = -lpthread - -# sparse is architecture-neutral, which means that we need to tell it -# explicitly what architecture to check for. Fix this up for yours.. -SPARSE_FLAGS = -D__BIG_ENDIAN__ -D__powerpc__ - - - -### --- END CONFIGURATION SECTION --- - -# Those must not be GNU-specific; they are shared with perl/ which may -# be built by a different compiler. (Note that this is an artifact now -# but it still might be nice to keep that distinction.) -BASIC_CFLAGS = -BASIC_LDFLAGS = - -# Guard against environment variables -BUILTIN_OBJS = -BUILT_INS = -COMPAT_CFLAGS = -COMPAT_OBJS = -LIB_H = -LIB_OBJS = -SCRIPT_PERL = -SCRIPT_SH = -TEST_PROGRAMS = - -# -# No scripts right now: -# - -# SCRIPT_SH += perf-am.sh - -# -# No Perl scripts right now: -# - -# SCRIPT_PERL += perf-add--interactive.perl - -SCRIPTS = $(patsubst %.sh,%,$(SCRIPT_SH)) \ - $(patsubst %.perl,%,$(SCRIPT_PERL)) - -# Empty... -EXTRA_PROGRAMS = - -# ... and all the rest that could be moved out of bindir to perfexecdir -PROGRAMS += $(EXTRA_PROGRAMS) - -# -# Single 'perf' binary right now: -# -PROGRAMS += perf - -# List built-in command $C whose implementation cmd_$C() is not in -# builtin-$C.o but is linked in as part of some other command. -# -# None right now: -# -# BUILT_INS += perf-init $X - -# what 'all' will build and 'install' will install, in perfexecdir -ALL_PROGRAMS = $(PROGRAMS) $(SCRIPTS) - -# what 'all' will build but not install in perfexecdir -OTHER_PROGRAMS = perf$X - -# Set paths to tools early so that they can be used for version tests. -ifndef SHELL_PATH - SHELL_PATH = /bin/sh -endif -ifndef PERL_PATH - PERL_PATH = /usr/bin/perl -endif - -export PERL_PATH - -LIB_FILE=libperf.a - -LIB_H += ../../include/linux/perf_counter.h -LIB_H += perf.h -LIB_H += util/list.h -LIB_H += util/rbtree.h -LIB_H += util/levenshtein.h -LIB_H += util/parse-options.h -LIB_H += util/parse-events.h -LIB_H += util/quote.h -LIB_H += util/util.h -LIB_H += util/help.h -LIB_H += util/strbuf.h -LIB_H += util/string.h -LIB_H += util/run-command.h -LIB_H += util/sigchain.h -LIB_H += util/symbol.h -LIB_H += util/color.h - -LIB_OBJS += util/abspath.o -LIB_OBJS += util/alias.o -LIB_OBJS += util/config.o -LIB_OBJS += util/ctype.o -LIB_OBJS += util/environment.o -LIB_OBJS += util/exec_cmd.o -LIB_OBJS += util/help.o -LIB_OBJS += util/levenshtein.o -LIB_OBJS += util/parse-options.o -LIB_OBJS += util/parse-events.o -LIB_OBJS += util/path.o -LIB_OBJS += util/rbtree.o -LIB_OBJS += util/run-command.o -LIB_OBJS += util/quote.o -LIB_OBJS += util/strbuf.o -LIB_OBJS += util/string.o -LIB_OBJS += util/usage.o -LIB_OBJS += util/wrapper.o -LIB_OBJS += util/sigchain.o -LIB_OBJS += util/symbol.o -LIB_OBJS += util/color.o -LIB_OBJS += util/pager.o - -BUILTIN_OBJS += builtin-annotate.o -BUILTIN_OBJS += builtin-help.o -BUILTIN_OBJS += builtin-list.o -BUILTIN_OBJS += builtin-record.o -BUILTIN_OBJS += builtin-report.o -BUILTIN_OBJS += builtin-stat.o -BUILTIN_OBJS += builtin-top.o - -PERFLIBS = $(LIB_FILE) -EXTLIBS = - -# -# Platform specific tweaks -# - -# We choose to avoid "if .. else if .. else .. endif endif" -# because maintaining the nesting to match is a pain. If -# we had "elif" things would have been much nicer... - --include config.mak.autogen --include config.mak - -ifeq ($(uname_S),Darwin) - ifndef NO_FINK - ifeq ($(shell test -d /sw/lib && echo y),y) - BASIC_CFLAGS += -I/sw/include - BASIC_LDFLAGS += -L/sw/lib - endif - endif - ifndef NO_DARWIN_PORTS - ifeq ($(shell test -d /opt/local/lib && echo y),y) - BASIC_CFLAGS += -I/opt/local/include - BASIC_LDFLAGS += -L/opt/local/lib - endif - endif - PTHREAD_LIBS = -endif - -ifndef CC_LD_DYNPATH - ifdef NO_R_TO_GCC_LINKER - # Some gcc does not accept and pass -R to the linker to specify - # the runtime dynamic library path. - CC_LD_DYNPATH = -Wl,-rpath, - else - CC_LD_DYNPATH = -R - endif -endif - -ifdef ZLIB_PATH - BASIC_CFLAGS += -I$(ZLIB_PATH)/include - EXTLIBS += -L$(ZLIB_PATH)/$(lib) $(CC_LD_DYNPATH)$(ZLIB_PATH)/$(lib) -endif -EXTLIBS += -lz - -ifdef NEEDS_SOCKET - EXTLIBS += -lsocket -endif -ifdef NEEDS_NSL - EXTLIBS += -lnsl -endif -ifdef NO_D_TYPE_IN_DIRENT - BASIC_CFLAGS += -DNO_D_TYPE_IN_DIRENT -endif -ifdef NO_D_INO_IN_DIRENT - BASIC_CFLAGS += -DNO_D_INO_IN_DIRENT -endif -ifdef NO_ST_BLOCKS_IN_STRUCT_STAT - BASIC_CFLAGS += -DNO_ST_BLOCKS_IN_STRUCT_STAT -endif -ifdef USE_NSEC - BASIC_CFLAGS += -DUSE_NSEC -endif -ifdef USE_ST_TIMESPEC - BASIC_CFLAGS += -DUSE_ST_TIMESPEC -endif -ifdef NO_NSEC - BASIC_CFLAGS += -DNO_NSEC -endif -ifdef NO_C99_FORMAT - BASIC_CFLAGS += -DNO_C99_FORMAT -endif -ifdef SNPRINTF_RETURNS_BOGUS - COMPAT_CFLAGS += -DSNPRINTF_RETURNS_BOGUS - COMPAT_OBJS += compat/snprintf.o -endif -ifdef FREAD_READS_DIRECTORIES - COMPAT_CFLAGS += -DFREAD_READS_DIRECTORIES - COMPAT_OBJS += compat/fopen.o -endif -ifdef NO_SYMLINK_HEAD - BASIC_CFLAGS += -DNO_SYMLINK_HEAD -endif -ifdef NO_STRCASESTR - COMPAT_CFLAGS += -DNO_STRCASESTR - COMPAT_OBJS += compat/strcasestr.o -endif -ifdef NO_STRTOUMAX - COMPAT_CFLAGS += -DNO_STRTOUMAX - COMPAT_OBJS += compat/strtoumax.o -endif -ifdef NO_STRTOULL - COMPAT_CFLAGS += -DNO_STRTOULL -endif -ifdef NO_SETENV - COMPAT_CFLAGS += -DNO_SETENV - COMPAT_OBJS += compat/setenv.o -endif -ifdef NO_MKDTEMP - COMPAT_CFLAGS += -DNO_MKDTEMP - COMPAT_OBJS += compat/mkdtemp.o -endif -ifdef NO_UNSETENV - COMPAT_CFLAGS += -DNO_UNSETENV - COMPAT_OBJS += compat/unsetenv.o -endif -ifdef NO_SYS_SELECT_H - BASIC_CFLAGS += -DNO_SYS_SELECT_H -endif -ifdef NO_MMAP - COMPAT_CFLAGS += -DNO_MMAP - COMPAT_OBJS += compat/mmap.o -else - ifdef USE_WIN32_MMAP - COMPAT_CFLAGS += -DUSE_WIN32_MMAP - COMPAT_OBJS += compat/win32mmap.o - endif -endif -ifdef NO_PREAD - COMPAT_CFLAGS += -DNO_PREAD - COMPAT_OBJS += compat/pread.o -endif -ifdef NO_FAST_WORKING_DIRECTORY - BASIC_CFLAGS += -DNO_FAST_WORKING_DIRECTORY -endif -ifdef NO_TRUSTABLE_FILEMODE - BASIC_CFLAGS += -DNO_TRUSTABLE_FILEMODE -endif -ifdef NO_IPV6 - BASIC_CFLAGS += -DNO_IPV6 -endif -ifdef NO_UINTMAX_T - BASIC_CFLAGS += -Duintmax_t=uint32_t -endif -ifdef NO_SOCKADDR_STORAGE -ifdef NO_IPV6 - BASIC_CFLAGS += -Dsockaddr_storage=sockaddr_in -else - BASIC_CFLAGS += -Dsockaddr_storage=sockaddr_in6 -endif -endif -ifdef NO_INET_NTOP - LIB_OBJS += compat/inet_ntop.o -endif -ifdef NO_INET_PTON - LIB_OBJS += compat/inet_pton.o -endif - -ifdef NO_ICONV - BASIC_CFLAGS += -DNO_ICONV -endif - -ifdef OLD_ICONV - BASIC_CFLAGS += -DOLD_ICONV -endif - -ifdef NO_DEFLATE_BOUND - BASIC_CFLAGS += -DNO_DEFLATE_BOUND -endif - -ifdef PPC_SHA1 - SHA1_HEADER = "ppc/sha1.h" - LIB_OBJS += ppc/sha1.o ppc/sha1ppc.o -else -ifdef ARM_SHA1 - SHA1_HEADER = "arm/sha1.h" - LIB_OBJS += arm/sha1.o arm/sha1_arm.o -else -ifdef MOZILLA_SHA1 - SHA1_HEADER = "mozilla-sha1/sha1.h" - LIB_OBJS += mozilla-sha1/sha1.o -else - SHA1_HEADER = - EXTLIBS += $(LIB_4_CRYPTO) -endif -endif -endif -ifdef NO_PERL_MAKEMAKER - export NO_PERL_MAKEMAKER -endif -ifdef NO_HSTRERROR - COMPAT_CFLAGS += -DNO_HSTRERROR - COMPAT_OBJS += compat/hstrerror.o -endif -ifdef NO_MEMMEM - COMPAT_CFLAGS += -DNO_MEMMEM - COMPAT_OBJS += compat/memmem.o -endif -ifdef INTERNAL_QSORT - COMPAT_CFLAGS += -DINTERNAL_QSORT - COMPAT_OBJS += compat/qsort.o -endif -ifdef RUNTIME_PREFIX - COMPAT_CFLAGS += -DRUNTIME_PREFIX -endif - -ifdef DIR_HAS_BSD_GROUP_SEMANTICS - COMPAT_CFLAGS += -DDIR_HAS_BSD_GROUP_SEMANTICS -endif -ifdef NO_EXTERNAL_GREP - BASIC_CFLAGS += -DNO_EXTERNAL_GREP -endif - -ifeq ($(PERL_PATH),) -NO_PERL=NoThanks -endif - -QUIET_SUBDIR0 = +$(MAKE) -C # space to separate -C and subdir -QUIET_SUBDIR1 = - -ifneq ($(findstring $(MAKEFLAGS),w),w) -PRINT_DIR = --no-print-directory -else # "make -w" -NO_SUBDIR = : -endif - -ifneq ($(findstring $(MAKEFLAGS),s),s) -ifndef V - QUIET_CC = @echo ' ' CC $@; - QUIET_AR = @echo ' ' AR $@; - QUIET_LINK = @echo ' ' LINK $@; - QUIET_BUILT_IN = @echo ' ' BUILTIN $@; - QUIET_GEN = @echo ' ' GEN $@; - QUIET_SUBDIR0 = +@subdir= - QUIET_SUBDIR1 = ;$(NO_SUBDIR) echo ' ' SUBDIR $$subdir; \ - $(MAKE) $(PRINT_DIR) -C $$subdir - export V - export QUIET_GEN - export QUIET_BUILT_IN -endif -endif - -ifdef ASCIIDOC8 - export ASCIIDOC8 -endif - -# Shell quote (do not use $(call) to accommodate ancient setups); - -SHA1_HEADER_SQ = $(subst ','\'',$(SHA1_HEADER)) -ETC_PERFCONFIG_SQ = $(subst ','\'',$(ETC_PERFCONFIG)) - -DESTDIR_SQ = $(subst ','\'',$(DESTDIR)) -bindir_SQ = $(subst ','\'',$(bindir)) -bindir_relative_SQ = $(subst ','\'',$(bindir_relative)) -mandir_SQ = $(subst ','\'',$(mandir)) -infodir_SQ = $(subst ','\'',$(infodir)) -perfexecdir_SQ = $(subst ','\'',$(perfexecdir)) -template_dir_SQ = $(subst ','\'',$(template_dir)) -htmldir_SQ = $(subst ','\'',$(htmldir)) -prefix_SQ = $(subst ','\'',$(prefix)) - -SHELL_PATH_SQ = $(subst ','\'',$(SHELL_PATH)) -PERL_PATH_SQ = $(subst ','\'',$(PERL_PATH)) - -LIBS = $(PERFLIBS) $(EXTLIBS) - -BASIC_CFLAGS += -DSHA1_HEADER='$(SHA1_HEADER_SQ)' \ - $(COMPAT_CFLAGS) -LIB_OBJS += $(COMPAT_OBJS) - -ALL_CFLAGS += $(BASIC_CFLAGS) -ALL_LDFLAGS += $(BASIC_LDFLAGS) - -export TAR INSTALL DESTDIR SHELL_PATH - - -### Build rules - -SHELL = $(SHELL_PATH) - -all:: shell_compatibility_test $(ALL_PROGRAMS) $(BUILT_INS) $(OTHER_PROGRAMS) PERF-BUILD-OPTIONS -ifneq (,$X) - $(foreach p,$(patsubst %$X,%,$(filter %$X,$(ALL_PROGRAMS) $(BUILT_INS) perf$X)), test '$p' -ef '$p$X' || $(RM) '$p';) -endif - -all:: - -please_set_SHELL_PATH_to_a_more_modern_shell: - @$$(:) - -shell_compatibility_test: please_set_SHELL_PATH_to_a_more_modern_shell - -strip: $(PROGRAMS) perf$X - $(STRIP) $(STRIP_OPTS) $(PROGRAMS) perf$X - -perf.o: perf.c common-cmds.h PERF-CFLAGS - $(QUIET_CC)$(CC) -DPERF_VERSION='"$(PERF_VERSION)"' \ - '-DPERF_HTML_PATH="$(htmldir_SQ)"' \ - $(ALL_CFLAGS) -c $(filter %.c,$^) - -perf$X: perf.o $(BUILTIN_OBJS) $(PERFLIBS) - $(QUIET_LINK)$(CC) $(ALL_CFLAGS) -o $@ perf.o \ - $(BUILTIN_OBJS) $(ALL_LDFLAGS) $(LIBS) - -builtin-help.o: builtin-help.c common-cmds.h PERF-CFLAGS - $(QUIET_CC)$(CC) -o $*.o -c $(ALL_CFLAGS) \ - '-DPERF_HTML_PATH="$(htmldir_SQ)"' \ - '-DPERF_MAN_PATH="$(mandir_SQ)"' \ - '-DPERF_INFO_PATH="$(infodir_SQ)"' $< - -$(BUILT_INS): perf$X - $(QUIET_BUILT_IN)$(RM) $@ && \ - ln perf$X $@ 2>/dev/null || \ - ln -s perf$X $@ 2>/dev/null || \ - cp perf$X $@ - -common-cmds.h: util/generate-cmdlist.sh command-list.txt - -common-cmds.h: $(wildcard Documentation/perf-*.txt) - $(QUIET_GEN)util/generate-cmdlist.sh > $@+ && mv $@+ $@ - -$(patsubst %.sh,%,$(SCRIPT_SH)) : % : %.sh - $(QUIET_GEN)$(RM) $@ $@+ && \ - sed -e '1s|#!.*/sh|#!$(SHELL_PATH_SQ)|' \ - -e 's|@SHELL_PATH@|$(SHELL_PATH_SQ)|' \ - -e 's|@@PERL@@|$(PERL_PATH_SQ)|g' \ - -e 's/@@PERF_VERSION@@/$(PERF_VERSION)/g' \ - -e 's/@@NO_CURL@@/$(NO_CURL)/g' \ - $@.sh >$@+ && \ - chmod +x $@+ && \ - mv $@+ $@ - -configure: configure.ac - $(QUIET_GEN)$(RM) $@ $<+ && \ - sed -e 's/@@PERF_VERSION@@/$(PERF_VERSION)/g' \ - $< > $<+ && \ - autoconf -o $@ $<+ && \ - $(RM) $<+ - -# These can record PERF_VERSION -perf.o perf.spec \ - $(patsubst %.sh,%,$(SCRIPT_SH)) \ - $(patsubst %.perl,%,$(SCRIPT_PERL)) \ - : PERF-VERSION-FILE - -%.o: %.c PERF-CFLAGS - $(QUIET_CC)$(CC) -o $*.o -c $(ALL_CFLAGS) $< -%.s: %.c PERF-CFLAGS - $(QUIET_CC)$(CC) -S $(ALL_CFLAGS) $< -%.o: %.S - $(QUIET_CC)$(CC) -o $*.o -c $(ALL_CFLAGS) $< - -util/exec_cmd.o: util/exec_cmd.c PERF-CFLAGS - $(QUIET_CC)$(CC) -o $*.o -c $(ALL_CFLAGS) \ - '-DPERF_EXEC_PATH="$(perfexecdir_SQ)"' \ - '-DBINDIR="$(bindir_relative_SQ)"' \ - '-DPREFIX="$(prefix_SQ)"' \ - $< - -builtin-init-db.o: builtin-init-db.c PERF-CFLAGS - $(QUIET_CC)$(CC) -o $*.o -c $(ALL_CFLAGS) -DDEFAULT_PERF_TEMPLATE_DIR='"$(template_dir_SQ)"' $< - -util/config.o: util/config.c PERF-CFLAGS - $(QUIET_CC)$(CC) -o $*.o -c $(ALL_CFLAGS) -DETC_PERFCONFIG='"$(ETC_PERFCONFIG_SQ)"' $< - -perf-%$X: %.o $(PERFLIBS) - $(QUIET_LINK)$(CC) $(ALL_CFLAGS) -o $@ $(ALL_LDFLAGS) $(filter %.o,$^) $(LIBS) - -$(LIB_OBJS) $(BUILTIN_OBJS): $(LIB_H) -$(patsubst perf-%$X,%.o,$(PROGRAMS)): $(LIB_H) $(wildcard */*.h) -builtin-revert.o wt-status.o: wt-status.h - -$(LIB_FILE): $(LIB_OBJS) - $(QUIET_AR)$(RM) $@ && $(AR) rcs $@ $(LIB_OBJS) - -doc: - $(MAKE) -C Documentation all - -man: - $(MAKE) -C Documentation man - -html: - $(MAKE) -C Documentation html - -info: - $(MAKE) -C Documentation info - -pdf: - $(MAKE) -C Documentation pdf - -TAGS: - $(RM) TAGS - $(FIND) . -name '*.[hcS]' -print | xargs etags -a - -tags: - $(RM) tags - $(FIND) . -name '*.[hcS]' -print | xargs ctags -a - -cscope: - $(RM) cscope* - $(FIND) . -name '*.[hcS]' -print | xargs cscope -b - -### Detect prefix changes -TRACK_CFLAGS = $(subst ','\'',$(ALL_CFLAGS)):\ - $(bindir_SQ):$(perfexecdir_SQ):$(template_dir_SQ):$(prefix_SQ) - -PERF-CFLAGS: .FORCE-PERF-CFLAGS - @FLAGS='$(TRACK_CFLAGS)'; \ - if test x"$$FLAGS" != x"`cat PERF-CFLAGS 2>/dev/null`" ; then \ - echo 1>&2 " * new build flags or prefix"; \ - echo "$$FLAGS" >PERF-CFLAGS; \ - fi - -# We need to apply sq twice, once to protect from the shell -# that runs PERF-BUILD-OPTIONS, and then again to protect it -# and the first level quoting from the shell that runs "echo". -PERF-BUILD-OPTIONS: .FORCE-PERF-BUILD-OPTIONS - @echo SHELL_PATH=\''$(subst ','\'',$(SHELL_PATH_SQ))'\' >$@ - @echo TAR=\''$(subst ','\'',$(subst ','\'',$(TAR)))'\' >>$@ - @echo NO_CURL=\''$(subst ','\'',$(subst ','\'',$(NO_CURL)))'\' >>$@ - @echo NO_PERL=\''$(subst ','\'',$(subst ','\'',$(NO_PERL)))'\' >>$@ - -### Testing rules - -# -# None right now: -# -# TEST_PROGRAMS += test-something$X - -all:: $(TEST_PROGRAMS) - -# GNU make supports exporting all variables by "export" without parameters. -# However, the environment gets quite big, and some programs have problems -# with that. - -export NO_SVN_TESTS - -check: common-cmds.h - if sparse; \ - then \ - for i in *.c */*.c; \ - do \ - sparse $(ALL_CFLAGS) $(SPARSE_FLAGS) $$i || exit; \ - done; \ - else \ - echo 2>&1 "Did you mean 'make test'?"; \ - exit 1; \ - fi - -remove-dashes: - ./fixup-builtins $(BUILT_INS) $(PROGRAMS) $(SCRIPTS) - -### Installation rules - -ifneq ($(filter /%,$(firstword $(template_dir))),) -template_instdir = $(template_dir) -else -template_instdir = $(prefix)/$(template_dir) -endif -export template_instdir - -ifneq ($(filter /%,$(firstword $(perfexecdir))),) -perfexec_instdir = $(perfexecdir) -else -perfexec_instdir = $(prefix)/$(perfexecdir) -endif -perfexec_instdir_SQ = $(subst ','\'',$(perfexec_instdir)) -export perfexec_instdir - -install: all - $(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(bindir_SQ)' - $(INSTALL) perf$X '$(DESTDIR_SQ)$(bindir_SQ)' -ifdef BUILT_INS - $(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(perfexec_instdir_SQ)' - $(INSTALL) $(BUILT_INS) '$(DESTDIR_SQ)$(perfexec_instdir_SQ)' -ifneq (,$X) - $(foreach p,$(patsubst %$X,%,$(filter %$X,$(ALL_PROGRAMS) $(BUILT_INS) perf$X)), $(RM) '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/$p';) -endif -endif - -install-doc: - $(MAKE) -C Documentation install - -install-man: - $(MAKE) -C Documentation install-man - -install-html: - $(MAKE) -C Documentation install-html - -install-info: - $(MAKE) -C Documentation install-info - -install-pdf: - $(MAKE) -C Documentation install-pdf - -quick-install-doc: - $(MAKE) -C Documentation quick-install - -quick-install-man: - $(MAKE) -C Documentation quick-install-man - -quick-install-html: - $(MAKE) -C Documentation quick-install-html - - -### Maintainer's dist rules -# -# None right now -# -# -# perf.spec: perf.spec.in -# sed -e 's/@@VERSION@@/$(PERF_VERSION)/g' < $< > $@+ -# mv $@+ $@ -# -# PERF_TARNAME=perf-$(PERF_VERSION) -# dist: perf.spec perf-archive$(X) configure -# ./perf-archive --format=tar \ -# --prefix=$(PERF_TARNAME)/ HEAD^{tree} > $(PERF_TARNAME).tar -# @mkdir -p $(PERF_TARNAME) -# @cp perf.spec configure $(PERF_TARNAME) -# @echo $(PERF_VERSION) > $(PERF_TARNAME)/version -# $(TAR) rf $(PERF_TARNAME).tar \ -# $(PERF_TARNAME)/perf.spec \ -# $(PERF_TARNAME)/configure \ -# $(PERF_TARNAME)/version -# @$(RM) -r $(PERF_TARNAME) -# gzip -f -9 $(PERF_TARNAME).tar -# -# htmldocs = perf-htmldocs-$(PERF_VERSION) -# manpages = perf-manpages-$(PERF_VERSION) -# dist-doc: -# $(RM) -r .doc-tmp-dir -# mkdir .doc-tmp-dir -# $(MAKE) -C Documentation WEBDOC_DEST=../.doc-tmp-dir install-webdoc -# cd .doc-tmp-dir && $(TAR) cf ../$(htmldocs).tar . -# gzip -n -9 -f $(htmldocs).tar -# : -# $(RM) -r .doc-tmp-dir -# mkdir -p .doc-tmp-dir/man1 .doc-tmp-dir/man5 .doc-tmp-dir/man7 -# $(MAKE) -C Documentation DESTDIR=./ \ -# man1dir=../.doc-tmp-dir/man1 \ -# man5dir=../.doc-tmp-dir/man5 \ -# man7dir=../.doc-tmp-dir/man7 \ -# install -# cd .doc-tmp-dir && $(TAR) cf ../$(manpages).tar . -# gzip -n -9 -f $(manpages).tar -# $(RM) -r .doc-tmp-dir -# -# rpm: dist -# $(RPMBUILD) -ta $(PERF_TARNAME).tar.gz - -### Cleaning rules - -distclean: clean -# $(RM) configure - -clean: - $(RM) *.o */*.o $(LIB_FILE) - $(RM) $(ALL_PROGRAMS) $(BUILT_INS) perf$X - $(RM) $(TEST_PROGRAMS) - $(RM) *.spec *.pyc *.pyo */*.pyc */*.pyo common-cmds.h TAGS tags cscope* - $(RM) -r autom4te.cache - $(RM) config.log config.mak.autogen config.mak.append config.status config.cache - $(RM) -r $(PERF_TARNAME) .doc-tmp-dir - $(RM) $(PERF_TARNAME).tar.gz perf-core_$(PERF_VERSION)-*.tar.gz - $(RM) $(htmldocs).tar.gz $(manpages).tar.gz - $(MAKE) -C Documentation/ clean - $(RM) PERF-VERSION-FILE PERF-CFLAGS PERF-BUILD-OPTIONS - -.PHONY: all install clean strip -.PHONY: shell_compatibility_test please_set_SHELL_PATH_to_a_more_modern_shell -.PHONY: .FORCE-PERF-VERSION-FILE TAGS tags cscope .FORCE-PERF-CFLAGS -.PHONY: .FORCE-PERF-BUILD-OPTIONS - -### Make sure built-ins do not have dups and listed in perf.c -# -check-builtins:: - ./check-builtins.sh - -### Test suite coverage testing -# -# None right now -# -# .PHONY: coverage coverage-clean coverage-build coverage-report -# -# coverage: -# $(MAKE) coverage-build -# $(MAKE) coverage-report -# -# coverage-clean: -# rm -f *.gcda *.gcno -# -# COVERAGE_CFLAGS = $(CFLAGS) -O0 -ftest-coverage -fprofile-arcs -# COVERAGE_LDFLAGS = $(CFLAGS) -O0 -lgcov -# -# coverage-build: coverage-clean -# $(MAKE) CFLAGS="$(COVERAGE_CFLAGS)" LDFLAGS="$(COVERAGE_LDFLAGS)" all -# $(MAKE) CFLAGS="$(COVERAGE_CFLAGS)" LDFLAGS="$(COVERAGE_LDFLAGS)" \ -# -j1 test -# -# coverage-report: -# gcov -b *.c */*.c -# grep '^function.*called 0 ' *.c.gcov */*.c.gcov \ -# | sed -e 's/\([^:]*\)\.gcov: *function \([^ ]*\) called.*/\1: \2/' \ -# | tee coverage-untested-functions diff --git a/Documentation/perf_counter/builtin-annotate.c b/Documentation/perf_counter/builtin-annotate.c deleted file mode 100644 index 116a3978b44..00000000000 --- a/Documentation/perf_counter/builtin-annotate.c +++ /dev/null @@ -1,1355 +0,0 @@ -/* - * builtin-annotate.c - * - * Builtin annotate command: Analyze the perf.data input file, - * look up and read DSOs and symbol information and display - * a histogram of results, along various sorting keys. - */ -#include "builtin.h" - -#include "util/util.h" - -#include "util/color.h" -#include "util/list.h" -#include "util/cache.h" -#include "util/rbtree.h" -#include "util/symbol.h" -#include "util/string.h" - -#include "perf.h" - -#include "util/parse-options.h" -#include "util/parse-events.h" - -#define SHOW_KERNEL 1 -#define SHOW_USER 2 -#define SHOW_HV 4 - -static char const *input_name = "perf.data"; -static char *vmlinux = NULL; - -static char default_sort_order[] = "comm,symbol"; -static char *sort_order = default_sort_order; - -static int input; -static int show_mask = SHOW_KERNEL | SHOW_USER | SHOW_HV; - -static int dump_trace = 0; -#define dprintf(x...) do { if (dump_trace) printf(x); } while (0) - -static int verbose; - -static unsigned long page_size; -static unsigned long mmap_window = 32; - -struct ip_event { - struct perf_event_header header; - __u64 ip; - __u32 pid, tid; -}; - -struct mmap_event { - struct perf_event_header header; - __u32 pid, tid; - __u64 start; - __u64 len; - __u64 pgoff; - char filename[PATH_MAX]; -}; - -struct comm_event { - struct perf_event_header header; - __u32 pid, tid; - char comm[16]; -}; - -struct fork_event { - struct perf_event_header header; - __u32 pid, ppid; -}; - -struct period_event { - struct perf_event_header header; - __u64 time; - __u64 id; - __u64 sample_period; -}; - -typedef union event_union { - struct perf_event_header header; - struct ip_event ip; - struct mmap_event mmap; - struct comm_event comm; - struct fork_event fork; - struct period_event period; -} event_t; - -static LIST_HEAD(dsos); -static struct dso *kernel_dso; -static struct dso *vdso; - - -static void dsos__add(struct dso *dso) -{ - list_add_tail(&dso->node, &dsos); -} - -static struct dso *dsos__find(const char *name) -{ - struct dso *pos; - - list_for_each_entry(pos, &dsos, node) - if (strcmp(pos->name, name) == 0) - return pos; - return NULL; -} - -static struct dso *dsos__findnew(const char *name) -{ - struct dso *dso = dsos__find(name); - int nr; - - if (dso) - return dso; - - dso = dso__new(name, 0); - if (!dso) - goto out_delete_dso; - - nr = dso__load(dso, NULL, verbose); - if (nr < 0) { - if (verbose) - fprintf(stderr, "Failed to open: %s\n", name); - goto out_delete_dso; - } - if (!nr && verbose) { - fprintf(stderr, - "No symbols found in: %s, maybe install a debug package?\n", - name); - } - - dsos__add(dso); - - return dso; - -out_delete_dso: - dso__delete(dso); - return NULL; -} - -static void dsos__fprintf(FILE *fp) -{ - struct dso *pos; - - list_for_each_entry(pos, &dsos, node) - dso__fprintf(pos, fp); -} - -static struct symbol *vdso__find_symbol(struct dso *dso, uint64_t ip) -{ - return dso__find_symbol(kernel_dso, ip); -} - -static int load_kernel(void) -{ - int err; - - kernel_dso = dso__new("[kernel]", 0); - if (!kernel_dso) - return -1; - - err = dso__load_kernel(kernel_dso, vmlinux, NULL, verbose); - if (err) { - dso__delete(kernel_dso); - kernel_dso = NULL; - } else - dsos__add(kernel_dso); - - vdso = dso__new("[vdso]", 0); - if (!vdso) - return -1; - - vdso->find_symbol = vdso__find_symbol; - - dsos__add(vdso); - - return err; -} - -struct map { - struct list_head node; - uint64_t start; - uint64_t end; - uint64_t pgoff; - uint64_t (*map_ip)(struct map *, uint64_t); - struct dso *dso; -}; - -static uint64_t map__map_ip(struct map *map, uint64_t ip) -{ - return ip - map->start + map->pgoff; -} - -static uint64_t vdso__map_ip(struct map *map, uint64_t ip) -{ - return ip; -} - -static struct map *map__new(struct mmap_event *event) -{ - struct map *self = malloc(sizeof(*self)); - - if (self != NULL) { - const char *filename = event->filename; - - self->start = event->start; - self->end = event->start + event->len; - self->pgoff = event->pgoff; - - self->dso = dsos__findnew(filename); - if (self->dso == NULL) - goto out_delete; - - if (self->dso == vdso) - self->map_ip = vdso__map_ip; - else - self->map_ip = map__map_ip; - } - return self; -out_delete: - free(self); - return NULL; -} - -static struct map *map__clone(struct map *self) -{ - struct map *map = malloc(sizeof(*self)); - - if (!map) - return NULL; - - memcpy(map, self, sizeof(*self)); - - return map; -} - -static int map__overlap(struct map *l, struct map *r) -{ - if (l->start > r->start) { - struct map *t = l; - l = r; - r = t; - } - - if (l->end > r->start) - return 1; - - return 0; -} - -static size_t map__fprintf(struct map *self, FILE *fp) -{ - return fprintf(fp, " %"PRIx64"-%"PRIx64" %"PRIx64" %s\n", - self->start, self->end, self->pgoff, self->dso->name); -} - - -struct thread { - struct rb_node rb_node; - struct list_head maps; - pid_t pid; - char *comm; -}; - -static struct thread *thread__new(pid_t pid) -{ - struct thread *self = malloc(sizeof(*self)); - - if (self != NULL) { - self->pid = pid; - self->comm = malloc(32); - if (self->comm) - snprintf(self->comm, 32, ":%d", self->pid); - INIT_LIST_HEAD(&self->maps); - } - - return self; -} - -static int thread__set_comm(struct thread *self, const char *comm) -{ - if (self->comm) - free(self->comm); - self->comm = strdup(comm); - return self->comm ? 0 : -ENOMEM; -} - -static size_t thread__fprintf(struct thread *self, FILE *fp) -{ - struct map *pos; - size_t ret = fprintf(fp, "Thread %d %s\n", self->pid, self->comm); - - list_for_each_entry(pos, &self->maps, node) - ret += map__fprintf(pos, fp); - - return ret; -} - - -static struct rb_root threads; -static struct thread *last_match; - -static struct thread *threads__findnew(pid_t pid) -{ - struct rb_node **p = &threads.rb_node; - struct rb_node *parent = NULL; - struct thread *th; - - /* - * Font-end cache - PID lookups come in blocks, - * so most of the time we dont have to look up - * the full rbtree: - */ - if (last_match && last_match->pid == pid) - return last_match; - - while (*p != NULL) { - parent = *p; - th = rb_entry(parent, struct thread, rb_node); - - if (th->pid == pid) { - last_match = th; - return th; - } - - if (pid < th->pid) - p = &(*p)->rb_left; - else - p = &(*p)->rb_right; - } - - th = thread__new(pid); - if (th != NULL) { - rb_link_node(&th->rb_node, parent, p); - rb_insert_color(&th->rb_node, &threads); - last_match = th; - } - - return th; -} - -static void thread__insert_map(struct thread *self, struct map *map) -{ - struct map *pos, *tmp; - - list_for_each_entry_safe(pos, tmp, &self->maps, node) { - if (map__overlap(pos, map)) { - list_del_init(&pos->node); - /* XXX leaks dsos */ - free(pos); - } - } - - list_add_tail(&map->node, &self->maps); -} - -static int thread__fork(struct thread *self, struct thread *parent) -{ - struct map *map; - - if (self->comm) - free(self->comm); - self->comm = strdup(parent->comm); - if (!self->comm) - return -ENOMEM; - - list_for_each_entry(map, &parent->maps, node) { - struct map *new = map__clone(map); - if (!new) - return -ENOMEM; - thread__insert_map(self, new); - } - - return 0; -} - -static struct map *thread__find_map(struct thread *self, uint64_t ip) -{ - struct map *pos; - - if (self == NULL) - return NULL; - - list_for_each_entry(pos, &self->maps, node) - if (ip >= pos->start && ip <= pos->end) - return pos; - - return NULL; -} - -static size_t threads__fprintf(FILE *fp) -{ - size_t ret = 0; - struct rb_node *nd; - - for (nd = rb_first(&threads); nd; nd = rb_next(nd)) { - struct thread *pos = rb_entry(nd, struct thread, rb_node); - - ret += thread__fprintf(pos, fp); - } - - return ret; -} - -/* - * histogram, sorted on item, collects counts - */ - -static struct rb_root hist; - -struct hist_entry { - struct rb_node rb_node; - - struct thread *thread; - struct map *map; - struct dso *dso; - struct symbol *sym; - uint64_t ip; - char level; - - uint32_t count; -}; - -/* - * configurable sorting bits - */ - -struct sort_entry { - struct list_head list; - - char *header; - - int64_t (*cmp)(struct hist_entry *, struct hist_entry *); - int64_t (*collapse)(struct hist_entry *, struct hist_entry *); - size_t (*print)(FILE *fp, struct hist_entry *); -}; - -/* --sort pid */ - -static int64_t -sort__thread_cmp(struct hist_entry *left, struct hist_entry *right) -{ - return right->thread->pid - left->thread->pid; -} - -static size_t -sort__thread_print(FILE *fp, struct hist_entry *self) -{ - return fprintf(fp, "%16s:%5d", self->thread->comm ?: "", self->thread->pid); -} - -static struct sort_entry sort_thread = { - .header = " Command: Pid", - .cmp = sort__thread_cmp, - .print = sort__thread_print, -}; - -/* --sort comm */ - -static int64_t -sort__comm_cmp(struct hist_entry *left, struct hist_entry *right) -{ - return right->thread->pid - left->thread->pid; -} - -static int64_t -sort__comm_collapse(struct hist_entry *left, struct hist_entry *right) -{ - char *comm_l = left->thread->comm; - char *comm_r = right->thread->comm; - - if (!comm_l || !comm_r) { - if (!comm_l && !comm_r) - return 0; - else if (!comm_l) - return -1; - else - return 1; - } - - return strcmp(comm_l, comm_r); -} - -static size_t -sort__comm_print(FILE *fp, struct hist_entry *self) -{ - return fprintf(fp, "%16s", self->thread->comm); -} - -static struct sort_entry sort_comm = { - .header = " Command", - .cmp = sort__comm_cmp, - .collapse = sort__comm_collapse, - .print = sort__comm_print, -}; - -/* --sort dso */ - -static int64_t -sort__dso_cmp(struct hist_entry *left, struct hist_entry *right) -{ - struct dso *dso_l = left->dso; - struct dso *dso_r = right->dso; - - if (!dso_l || !dso_r) { - if (!dso_l && !dso_r) - return 0; - else if (!dso_l) - return -1; - else - return 1; - } - - return strcmp(dso_l->name, dso_r->name); -} - -static size_t -sort__dso_print(FILE *fp, struct hist_entry *self) -{ - if (self->dso) - return fprintf(fp, "%-25s", self->dso->name); - - return fprintf(fp, "%016llx ", (__u64)self->ip); -} - -static struct sort_entry sort_dso = { - .header = "Shared Object ", - .cmp = sort__dso_cmp, - .print = sort__dso_print, -}; - -/* --sort symbol */ - -static int64_t -sort__sym_cmp(struct hist_entry *left, struct hist_entry *right) -{ - uint64_t ip_l, ip_r; - - if (left->sym == right->sym) - return 0; - - ip_l = left->sym ? left->sym->start : left->ip; - ip_r = right->sym ? right->sym->start : right->ip; - - return (int64_t)(ip_r - ip_l); -} - -static size_t -sort__sym_print(FILE *fp, struct hist_entry *self) -{ - size_t ret = 0; - - if (verbose) - ret += fprintf(fp, "%#018llx ", (__u64)self->ip); - - if (self->sym) { - ret += fprintf(fp, "[%c] %s", - self->dso == kernel_dso ? 'k' : '.', self->sym->name); - } else { - ret += fprintf(fp, "%#016llx", (__u64)self->ip); - } - - return ret; -} - -static struct sort_entry sort_sym = { - .header = "Symbol", - .cmp = sort__sym_cmp, - .print = sort__sym_print, -}; - -static int sort__need_collapse = 0; - -struct sort_dimension { - char *name; - struct sort_entry *entry; - int taken; -}; - -static struct sort_dimension sort_dimensions[] = { - { .name = "pid", .entry = &sort_thread, }, - { .name = "comm", .entry = &sort_comm, }, - { .name = "dso", .entry = &sort_dso, }, - { .name = "symbol", .entry = &sort_sym, }, -}; - -static LIST_HEAD(hist_entry__sort_list); - -static int sort_dimension__add(char *tok) -{ - int i; - - for (i = 0; i < ARRAY_SIZE(sort_dimensions); i++) { - struct sort_dimension *sd = &sort_dimensions[i]; - - if (sd->taken) - continue; - - if (strncasecmp(tok, sd->name, strlen(tok))) - continue; - - if (sd->entry->collapse) - sort__need_collapse = 1; - - list_add_tail(&sd->entry->list, &hist_entry__sort_list); - sd->taken = 1; - - return 0; - } - - return -ESRCH; -} - -static int64_t -hist_entry__cmp(struct hist_entry *left, struct hist_entry *right) -{ - struct sort_entry *se; - int64_t cmp = 0; - - list_for_each_entry(se, &hist_entry__sort_list, list) { - cmp = se->cmp(left, right); - if (cmp) - break; - } - - return cmp; -} - -static int64_t -hist_entry__collapse(struct hist_entry *left, struct hist_entry *right) -{ - struct sort_entry *se; - int64_t cmp = 0; - - list_for_each_entry(se, &hist_entry__sort_list, list) { - int64_t (*f)(struct hist_entry *, struct hist_entry *); - - f = se->collapse ?: se->cmp; - - cmp = f(left, right); - if (cmp) - break; - } - - return cmp; -} - -/* - * collect histogram counts - */ -static void hist_hit(struct hist_entry *he, uint64_t ip) -{ - unsigned int sym_size, offset; - struct symbol *sym = he->sym; - - he->count++; - - if (!sym || !sym->hist) - return; - - sym_size = sym->end - sym->start; - offset = ip - sym->start; - - if (offset >= sym_size) - return; - - sym->hist_sum++; - sym->hist[offset]++; - - if (verbose >= 3) - printf("%p %s: count++ [ip: %p, %08Lx] => %Ld\n", - (void *)he->sym->start, - he->sym->name, - (void *)ip, ip - he->sym->start, - sym->hist[offset]); -} - -static int -hist_entry__add(struct thread *thread, struct map *map, struct dso *dso, - struct symbol *sym, uint64_t ip, char level) -{ - struct rb_node **p = &hist.rb_node; - struct rb_node *parent = NULL; - struct hist_entry *he; - struct hist_entry entry = { - .thread = thread, - .map = map, - .dso = dso, - .sym = sym, - .ip = ip, - .level = level, - .count = 1, - }; - int cmp; - - while (*p != NULL) { - parent = *p; - he = rb_entry(parent, struct hist_entry, rb_node); - - cmp = hist_entry__cmp(&entry, he); - - if (!cmp) { - hist_hit(he, ip); - - return 0; - } - - if (cmp < 0) - p = &(*p)->rb_left; - else - p = &(*p)->rb_right; - } - - he = malloc(sizeof(*he)); - if (!he) - return -ENOMEM; - *he = entry; - rb_link_node(&he->rb_node, parent, p); - rb_insert_color(&he->rb_node, &hist); - - return 0; -} - -static void hist_entry__free(struct hist_entry *he) -{ - free(he); -} - -/* - * collapse the histogram - */ - -static struct rb_root collapse_hists; - -static void collapse__insert_entry(struct hist_entry *he) -{ - struct rb_node **p = &collapse_hists.rb_node; - struct rb_node *parent = NULL; - struct hist_entry *iter; - int64_t cmp; - - while (*p != NULL) { - parent = *p; - iter = rb_entry(parent, struct hist_entry, rb_node); - - cmp = hist_entry__collapse(iter, he); - - if (!cmp) { - iter->count += he->count; - hist_entry__free(he); - return; - } - - if (cmp < 0) - p = &(*p)->rb_left; - else - p = &(*p)->rb_right; - } - - rb_link_node(&he->rb_node, parent, p); - rb_insert_color(&he->rb_node, &collapse_hists); -} - -static void collapse__resort(void) -{ - struct rb_node *next; - struct hist_entry *n; - - if (!sort__need_collapse) - return; - - next = rb_first(&hist); - while (next) { - n = rb_entry(next, struct hist_entry, rb_node); - next = rb_next(&n->rb_node); - - rb_erase(&n->rb_node, &hist); - collapse__insert_entry(n); - } -} - -/* - * reverse the map, sort on count. - */ - -static struct rb_root output_hists; - -static void output__insert_entry(struct hist_entry *he) -{ - struct rb_node **p = &output_hists.rb_node; - struct rb_node *parent = NULL; - struct hist_entry *iter; - - while (*p != NULL) { - parent = *p; - iter = rb_entry(parent, struct hist_entry, rb_node); - - if (he->count > iter->count) - p = &(*p)->rb_left; - else - p = &(*p)->rb_right; - } - - rb_link_node(&he->rb_node, parent, p); - rb_insert_color(&he->rb_node, &output_hists); -} - -static void output__resort(void) -{ - struct rb_node *next; - struct hist_entry *n; - struct rb_root *tree = &hist; - - if (sort__need_collapse) - tree = &collapse_hists; - - next = rb_first(tree); - - while (next) { - n = rb_entry(next, struct hist_entry, rb_node); - next = rb_next(&n->rb_node); - - rb_erase(&n->rb_node, tree); - output__insert_entry(n); - } -} - -static void register_idle_thread(void) -{ - struct thread *thread = threads__findnew(0); - - if (thread == NULL || - thread__set_comm(thread, "[idle]")) { - fprintf(stderr, "problem inserting idle task.\n"); - exit(-1); - } -} - -static unsigned long total = 0, - total_mmap = 0, - total_comm = 0, - total_fork = 0, - total_unknown = 0; - -static int -process_overflow_event(event_t *event, unsigned long offset, unsigned long head) -{ - char level; - int show = 0; - struct dso *dso = NULL; - struct thread *thread = threads__findnew(event->ip.pid); - uint64_t ip = event->ip.ip; - struct map *map = NULL; - - dprintf("%p [%p]: PERF_EVENT (IP, %d): %d: %p\n", - (void *)(offset + head), - (void *)(long)(event->header.size), - event->header.misc, - event->ip.pid, - (void *)(long)ip); - - dprintf(" ... thread: %s:%d\n", thread->comm, thread->pid); - - if (thread == NULL) { - fprintf(stderr, "problem processing %d event, skipping it.\n", - event->header.type); - return -1; - } - - if (event->header.misc & PERF_EVENT_MISC_KERNEL) { - show = SHOW_KERNEL; - level = 'k'; - - dso = kernel_dso; - - dprintf(" ...... dso: %s\n", dso->name); - - } else if (event->header.misc & PERF_EVENT_MISC_USER) { - - show = SHOW_USER; - level = '.'; - - map = thread__find_map(thread, ip); - if (map != NULL) { - ip = map->map_ip(map, ip); - dso = map->dso; - } else { - /* - * If this is outside of all known maps, - * and is a negative address, try to look it - * up in the kernel dso, as it might be a - * vsyscall (which executes in user-mode): - */ - if ((long long)ip < 0) - dso = kernel_dso; - } - dprintf(" ...... dso: %s\n", dso ? dso->name : ""); - - } else { - show = SHOW_HV; - level = 'H'; - dprintf(" ...... dso: [hypervisor]\n"); - } - - if (show & show_mask) { - struct symbol *sym = NULL; - - if (dso) - sym = dso->find_symbol(dso, ip); - - if (hist_entry__add(thread, map, dso, sym, ip, level)) { - fprintf(stderr, - "problem incrementing symbol count, skipping event\n"); - return -1; - } - } - total++; - - return 0; -} - -static int -process_mmap_event(event_t *event, unsigned long offset, unsigned long head) -{ - struct thread *thread = threads__findnew(event->mmap.pid); - struct map *map = map__new(&event->mmap); - - dprintf("%p [%p]: PERF_EVENT_MMAP %d: [%p(%p) @ %p]: %s\n", - (void *)(offset + head), - (void *)(long)(event->header.size), - event->mmap.pid, - (void *)(long)event->mmap.start, - (void *)(long)event->mmap.len, - (void *)(long)event->mmap.pgoff, - event->mmap.filename); - - if (thread == NULL || map == NULL) { - dprintf("problem processing PERF_EVENT_MMAP, skipping event.\n"); - return 0; - } - - thread__insert_map(thread, map); - total_mmap++; - - return 0; -} - -static int -process_comm_event(event_t *event, unsigned long offset, unsigned long head) -{ - struct thread *thread = threads__findnew(event->comm.pid); - - dprintf("%p [%p]: PERF_EVENT_COMM: %s:%d\n", - (void *)(offset + head), - (void *)(long)(event->header.size), - event->comm.comm, event->comm.pid); - - if (thread == NULL || - thread__set_comm(thread, event->comm.comm)) { - dprintf("problem processing PERF_EVENT_COMM, skipping event.\n"); - return -1; - } - total_comm++; - - return 0; -} - -static int -process_fork_event(event_t *event, unsigned long offset, unsigned long head) -{ - struct thread *thread = threads__findnew(event->fork.pid); - struct thread *parent = threads__findnew(event->fork.ppid); - - dprintf("%p [%p]: PERF_EVENT_FORK: %d:%d\n", - (void *)(offset + head), - (void *)(long)(event->header.size), - event->fork.pid, event->fork.ppid); - - if (!thread || !parent || thread__fork(thread, parent)) { - dprintf("problem processing PERF_EVENT_FORK, skipping event.\n"); - return -1; - } - total_fork++; - - return 0; -} - -static int -process_period_event(event_t *event, unsigned long offset, unsigned long head) -{ - dprintf("%p [%p]: PERF_EVENT_PERIOD: time:%Ld, id:%Ld: period:%Ld\n", - (void *)(offset + head), - (void *)(long)(event->header.size), - event->period.time, - event->period.id, - event->period.sample_period); - - return 0; -} - -static int -process_event(event_t *event, unsigned long offset, unsigned long head) -{ - if (event->header.misc & PERF_EVENT_MISC_OVERFLOW) - return process_overflow_event(event, offset, head); - - switch (event->header.type) { - case PERF_EVENT_MMAP: - return process_mmap_event(event, offset, head); - - case PERF_EVENT_COMM: - return process_comm_event(event, offset, head); - - case PERF_EVENT_FORK: - return process_fork_event(event, offset, head); - - case PERF_EVENT_PERIOD: - return process_period_event(event, offset, head); - /* - * We dont process them right now but they are fine: - */ - - case PERF_EVENT_THROTTLE: - case PERF_EVENT_UNTHROTTLE: - return 0; - - default: - return -1; - } - - return 0; -} - -static int -parse_line(FILE *file, struct symbol *sym, uint64_t start, uint64_t len) -{ - char *line = NULL, *tmp, *tmp2; - unsigned int offset; - size_t line_len; - __u64 line_ip; - int ret; - char *c; - - if (getline(&line, &line_len, file) < 0) - return -1; - if (!line) - return -1; - - c = strchr(line, '\n'); - if (c) - *c = 0; - - line_ip = -1; - offset = 0; - ret = -2; - - /* - * Strip leading spaces: - */ - tmp = line; - while (*tmp) { - if (*tmp != ' ') - break; - tmp++; - } - - if (*tmp) { - /* - * Parse hexa addresses followed by ':' - */ - line_ip = strtoull(tmp, &tmp2, 16); - if (*tmp2 != ':') - line_ip = -1; - } - - if (line_ip != -1) { - unsigned int hits = 0; - double percent = 0.0; - char *color = PERF_COLOR_NORMAL; - - offset = line_ip - start; - if (offset < len) - hits = sym->hist[offset]; - - if (sym->hist_sum) - percent = 100.0 * hits / sym->hist_sum; - - /* - * We color high-overhead entries in red, low-overhead - * entries in green - and keep the middle ground normal: - */ - if (percent >= 5.0) - color = PERF_COLOR_RED; - else { - if (percent > 0.5) - color = PERF_COLOR_GREEN; - } - - color_fprintf(stdout, color, " %7.2f", percent); - printf(" : "); - color_fprintf(stdout, PERF_COLOR_BLUE, "%s\n", line); - } else { - if (!*line) - printf(" :\n"); - else - printf(" : %s\n", line); - } - - return 0; -} - -static void annotate_sym(struct dso *dso, struct symbol *sym) -{ - char *filename = dso->name; - uint64_t start, end, len; - char command[PATH_MAX*2]; - FILE *file; - - if (!filename) - return; - if (dso == kernel_dso) - filename = vmlinux; - - printf("\n------------------------------------------------\n"); - printf(" Percent | Source code & Disassembly of %s\n", filename); - printf("------------------------------------------------\n"); - - if (verbose >= 2) - printf("annotating [%p] %30s : [%p] %30s\n", dso, dso->name, sym, sym->name); - - start = sym->obj_start; - if (!start) - start = sym->start; - - end = start + sym->end - sym->start + 1; - len = sym->end - sym->start; - - sprintf(command, "objdump --start-address=0x%016Lx --stop-address=0x%016Lx -dS %s", (__u64)start, (__u64)end, filename); - - if (verbose >= 3) - printf("doing: %s\n", command); - - file = popen(command, "r"); - if (!file) - return; - - while (!feof(file)) { - if (parse_line(file, sym, start, len) < 0) - break; - } - - pclose(file); -} - -static void find_annotations(void) -{ - struct rb_node *nd; - struct dso *dso; - int count = 0; - - list_for_each_entry(dso, &dsos, node) { - - for (nd = rb_first(&dso->syms); nd; nd = rb_next(nd)) { - struct symbol *sym = rb_entry(nd, struct symbol, rb_node); - - if (sym->hist) { - annotate_sym(dso, sym); - count++; - } - } - } - - if (!count) - printf(" Error: symbol '%s' not present amongst the samples.\n", sym_hist_filter); -} - -static int __cmd_annotate(void) -{ - int ret, rc = EXIT_FAILURE; - unsigned long offset = 0; - unsigned long head = 0; - struct stat stat; - event_t *event; - uint32_t size; - char *buf; - - register_idle_thread(); - - input = open(input_name, O_RDONLY); - if (input < 0) { - perror("failed to open file"); - exit(-1); - } - - ret = fstat(input, &stat); - if (ret < 0) { - perror("failed to stat file"); - exit(-1); - } - - if (!stat.st_size) { - fprintf(stderr, "zero-sized file, nothing to do!\n"); - exit(0); - } - - if (load_kernel() < 0) { - perror("failed to load kernel symbols"); - return EXIT_FAILURE; - } - -remap: - buf = (char *)mmap(NULL, page_size * mmap_window, PROT_READ, - MAP_SHARED, input, offset); - if (buf == MAP_FAILED) { - perror("failed to mmap file"); - exit(-1); - } - -more: - event = (event_t *)(buf + head); - - size = event->header.size; - if (!size) - size = 8; - - if (head + event->header.size >= page_size * mmap_window) { - unsigned long shift = page_size * (head / page_size); - int ret; - - ret = munmap(buf, page_size * mmap_window); - assert(ret == 0); - - offset += shift; - head -= shift; - goto remap; - } - - size = event->header.size; - - dprintf("%p [%p]: event: %d\n", - (void *)(offset + head), - (void *)(long)event->header.size, - event->header.type); - - if (!size || process_event(event, offset, head) < 0) { - - dprintf("%p [%p]: skipping unknown header type: %d\n", - (void *)(offset + head), - (void *)(long)(event->header.size), - event->header.type); - - total_unknown++; - - /* - * assume we lost track of the stream, check alignment, and - * increment a single u64 in the hope to catch on again 'soon'. - */ - - if (unlikely(head & 7)) - head &= ~7ULL; - - size = 8; - } - - head += size; - - if (offset + head < stat.st_size) - goto more; - - rc = EXIT_SUCCESS; - close(input); - - dprintf(" IP events: %10ld\n", total); - dprintf(" mmap events: %10ld\n", total_mmap); - dprintf(" comm events: %10ld\n", total_comm); - dprintf(" fork events: %10ld\n", total_fork); - dprintf(" unknown events: %10ld\n", total_unknown); - - if (dump_trace) - return 0; - - if (verbose >= 3) - threads__fprintf(stdout); - - if (verbose >= 2) - dsos__fprintf(stdout); - - collapse__resort(); - output__resort(); - - find_annotations(); - - return rc; -} - -static const char * const annotate_usage[] = { - "perf annotate [] ", - NULL -}; - -static const struct option options[] = { - OPT_STRING('i', "input", &input_name, "file", - "input file name"), - OPT_STRING('s', "symbol", &sym_hist_filter, "file", - "symbol to annotate"), - OPT_BOOLEAN('v', "verbose", &verbose, - "be more verbose (show symbol address, etc)"), - OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace, - "dump raw trace in ASCII"), - OPT_STRING('k', "vmlinux", &vmlinux, "file", "vmlinux pathname"), - OPT_END() -}; - -static void setup_sorting(void) -{ - char *tmp, *tok, *str = strdup(sort_order); - - for (tok = strtok_r(str, ", ", &tmp); - tok; tok = strtok_r(NULL, ", ", &tmp)) { - if (sort_dimension__add(tok) < 0) { - error("Unknown --sort key: `%s'", tok); - usage_with_options(annotate_usage, options); - } - } - - free(str); -} - -int cmd_annotate(int argc, const char **argv, const char *prefix) -{ - symbol__init(); - - page_size = getpagesize(); - - argc = parse_options(argc, argv, options, annotate_usage, 0); - - setup_sorting(); - - if (argc) { - /* - * Special case: if there's an argument left then assume tha - * it's a symbol filter: - */ - if (argc > 1) - usage_with_options(annotate_usage, options); - - sym_hist_filter = argv[0]; - } - - if (!sym_hist_filter) - usage_with_options(annotate_usage, options); - - setup_pager(); - - return __cmd_annotate(); -} diff --git a/Documentation/perf_counter/builtin-help.c b/Documentation/perf_counter/builtin-help.c deleted file mode 100644 index 0f32dc3f3c4..00000000000 --- a/Documentation/perf_counter/builtin-help.c +++ /dev/null @@ -1,461 +0,0 @@ -/* - * builtin-help.c - * - * Builtin help command - */ -#include "util/cache.h" -#include "builtin.h" -#include "util/exec_cmd.h" -#include "common-cmds.h" -#include "util/parse-options.h" -#include "util/run-command.h" -#include "util/help.h" - -static struct man_viewer_list { - struct man_viewer_list *next; - char name[FLEX_ARRAY]; -} *man_viewer_list; - -static struct man_viewer_info_list { - struct man_viewer_info_list *next; - const char *info; - char name[FLEX_ARRAY]; -} *man_viewer_info_list; - -enum help_format { - HELP_FORMAT_MAN, - HELP_FORMAT_INFO, - HELP_FORMAT_WEB, -}; - -static int show_all = 0; -static enum help_format help_format = HELP_FORMAT_MAN; -static struct option builtin_help_options[] = { - OPT_BOOLEAN('a', "all", &show_all, "print all available commands"), - OPT_SET_INT('m', "man", &help_format, "show man page", HELP_FORMAT_MAN), - OPT_SET_INT('w', "web", &help_format, "show manual in web browser", - HELP_FORMAT_WEB), - OPT_SET_INT('i', "info", &help_format, "show info page", - HELP_FORMAT_INFO), - OPT_END(), -}; - -static const char * const builtin_help_usage[] = { - "perf help [--all] [--man|--web|--info] [command]", - NULL -}; - -static enum help_format parse_help_format(const char *format) -{ - if (!strcmp(format, "man")) - return HELP_FORMAT_MAN; - if (!strcmp(format, "info")) - return HELP_FORMAT_INFO; - if (!strcmp(format, "web") || !strcmp(format, "html")) - return HELP_FORMAT_WEB; - die("unrecognized help format '%s'", format); -} - -static const char *get_man_viewer_info(const char *name) -{ - struct man_viewer_info_list *viewer; - - for (viewer = man_viewer_info_list; viewer; viewer = viewer->next) - { - if (!strcasecmp(name, viewer->name)) - return viewer->info; - } - return NULL; -} - -static int check_emacsclient_version(void) -{ - struct strbuf buffer = STRBUF_INIT; - struct child_process ec_process; - const char *argv_ec[] = { "emacsclient", "--version", NULL }; - int version; - - /* emacsclient prints its version number on stderr */ - memset(&ec_process, 0, sizeof(ec_process)); - ec_process.argv = argv_ec; - ec_process.err = -1; - ec_process.stdout_to_stderr = 1; - if (start_command(&ec_process)) { - fprintf(stderr, "Failed to start emacsclient.\n"); - return -1; - } - strbuf_read(&buffer, ec_process.err, 20); - close(ec_process.err); - - /* - * Don't bother checking return value, because "emacsclient --version" - * seems to always exits with code 1. - */ - finish_command(&ec_process); - - if (prefixcmp(buffer.buf, "emacsclient")) { - fprintf(stderr, "Failed to parse emacsclient version.\n"); - strbuf_release(&buffer); - return -1; - } - - strbuf_remove(&buffer, 0, strlen("emacsclient")); - version = atoi(buffer.buf); - - if (version < 22) { - fprintf(stderr, - "emacsclient version '%d' too old (< 22).\n", - version); - strbuf_release(&buffer); - return -1; - } - - strbuf_release(&buffer); - return 0; -} - -static void exec_woman_emacs(const char* path, const char *page) -{ - if (!check_emacsclient_version()) { - /* This works only with emacsclient version >= 22. */ - struct strbuf man_page = STRBUF_INIT; - - if (!path) - path = "emacsclient"; - strbuf_addf(&man_page, "(woman \"%s\")", page); - execlp(path, "emacsclient", "-e", man_page.buf, NULL); - warning("failed to exec '%s': %s", path, strerror(errno)); - } -} - -static void exec_man_konqueror(const char* path, const char *page) -{ - const char *display = getenv("DISPLAY"); - if (display && *display) { - struct strbuf man_page = STRBUF_INIT; - const char *filename = "kfmclient"; - - /* It's simpler to launch konqueror using kfmclient. */ - if (path) { - const char *file = strrchr(path, '/'); - if (file && !strcmp(file + 1, "konqueror")) { - char *new = strdup(path); - char *dest = strrchr(new, '/'); - - /* strlen("konqueror") == strlen("kfmclient") */ - strcpy(dest + 1, "kfmclient"); - path = new; - } - if (file) - filename = file; - } else - path = "kfmclient"; - strbuf_addf(&man_page, "man:%s(1)", page); - execlp(path, filename, "newTab", man_page.buf, NULL); - warning("failed to exec '%s': %s", path, strerror(errno)); - } -} - -static void exec_man_man(const char* path, const char *page) -{ - if (!path) - path = "man"; - execlp(path, "man", page, NULL); - warning("failed to exec '%s': %s", path, strerror(errno)); -} - -static void exec_man_cmd(const char *cmd, const char *page) -{ - struct strbuf shell_cmd = STRBUF_INIT; - strbuf_addf(&shell_cmd, "%s %s", cmd, page); - execl("/bin/sh", "sh", "-c", shell_cmd.buf, NULL); - warning("failed to exec '%s': %s", cmd, strerror(errno)); -} - -static void add_man_viewer(const char *name) -{ - struct man_viewer_list **p = &man_viewer_list; - size_t len = strlen(name); - - while (*p) - p = &((*p)->next); - *p = calloc(1, (sizeof(**p) + len + 1)); - strncpy((*p)->name, name, len); -} - -static int supported_man_viewer(const char *name, size_t len) -{ - return (!strncasecmp("man", name, len) || - !strncasecmp("woman", name, len) || - !strncasecmp("konqueror", name, len)); -} - -static void do_add_man_viewer_info(const char *name, - size_t len, - const char *value) -{ - struct man_viewer_info_list *new = calloc(1, sizeof(*new) + len + 1); - - strncpy(new->name, name, len); - new->info = strdup(value); - new->next = man_viewer_info_list; - man_viewer_info_list = new; -} - -static int add_man_viewer_path(const char *name, - size_t len, - const char *value) -{ - if (supported_man_viewer(name, len)) - do_add_man_viewer_info(name, len, value); - else - warning("'%s': path for unsupported man viewer.\n" - "Please consider using 'man..cmd' instead.", - name); - - return 0; -} - -static int add_man_viewer_cmd(const char *name, - size_t len, - const char *value) -{ - if (supported_man_viewer(name, len)) - warning("'%s': cmd for supported man viewer.\n" - "Please consider using 'man..path' instead.", - name); - else - do_add_man_viewer_info(name, len, value); - - return 0; -} - -static int add_man_viewer_info(const char *var, const char *value) -{ - const char *name = var + 4; - const char *subkey = strrchr(name, '.'); - - if (!subkey) - return error("Config with no key for man viewer: %s", name); - - if (!strcmp(subkey, ".path")) { - if (!value) - return config_error_nonbool(var); - return add_man_viewer_path(name, subkey - name, value); - } - if (!strcmp(subkey, ".cmd")) { - if (!value) - return config_error_nonbool(var); - return add_man_viewer_cmd(name, subkey - name, value); - } - - warning("'%s': unsupported man viewer sub key.", subkey); - return 0; -} - -static int perf_help_config(const char *var, const char *value, void *cb) -{ - if (!strcmp(var, "help.format")) { - if (!value) - return config_error_nonbool(var); - help_format = parse_help_format(value); - return 0; - } - if (!strcmp(var, "man.viewer")) { - if (!value) - return config_error_nonbool(var); - add_man_viewer(value); - return 0; - } - if (!prefixcmp(var, "man.")) - return add_man_viewer_info(var, value); - - return perf_default_config(var, value, cb); -} - -static struct cmdnames main_cmds, other_cmds; - -void list_common_cmds_help(void) -{ - int i, longest = 0; - - for (i = 0; i < ARRAY_SIZE(common_cmds); i++) { - if (longest < strlen(common_cmds[i].name)) - longest = strlen(common_cmds[i].name); - } - - puts(" The most commonly used perf commands are:"); - for (i = 0; i < ARRAY_SIZE(common_cmds); i++) { - printf(" %s ", common_cmds[i].name); - mput_char(' ', longest - strlen(common_cmds[i].name)); - puts(common_cmds[i].help); - } -} - -static int is_perf_command(const char *s) -{ - return is_in_cmdlist(&main_cmds, s) || - is_in_cmdlist(&other_cmds, s); -} - -static const char *prepend(const char *prefix, const char *cmd) -{ - size_t pre_len = strlen(prefix); - size_t cmd_len = strlen(cmd); - char *p = malloc(pre_len + cmd_len + 1); - memcpy(p, prefix, pre_len); - strcpy(p + pre_len, cmd); - return p; -} - -static const char *cmd_to_page(const char *perf_cmd) -{ - if (!perf_cmd) - return "perf"; - else if (!prefixcmp(perf_cmd, "perf")) - return perf_cmd; - else if (is_perf_command(perf_cmd)) - return prepend("perf-", perf_cmd); - else - return prepend("perf-", perf_cmd); -} - -static void setup_man_path(void) -{ - struct strbuf new_path = STRBUF_INIT; - const char *old_path = getenv("MANPATH"); - - /* We should always put ':' after our path. If there is no - * old_path, the ':' at the end will let 'man' to try - * system-wide paths after ours to find the manual page. If - * there is old_path, we need ':' as delimiter. */ - strbuf_addstr(&new_path, system_path(PERF_MAN_PATH)); - strbuf_addch(&new_path, ':'); - if (old_path) - strbuf_addstr(&new_path, old_path); - - setenv("MANPATH", new_path.buf, 1); - - strbuf_release(&new_path); -} - -static void exec_viewer(const char *name, const char *page) -{ - const char *info = get_man_viewer_info(name); - - if (!strcasecmp(name, "man")) - exec_man_man(info, page); - else if (!strcasecmp(name, "woman")) - exec_woman_emacs(info, page); - else if (!strcasecmp(name, "konqueror")) - exec_man_konqueror(info, page); - else if (info) - exec_man_cmd(info, page); - else - warning("'%s': unknown man viewer.", name); -} - -static void show_man_page(const char *perf_cmd) -{ - struct man_viewer_list *viewer; - const char *page = cmd_to_page(perf_cmd); - const char *fallback = getenv("PERF_MAN_VIEWER"); - - setup_man_path(); - for (viewer = man_viewer_list; viewer; viewer = viewer->next) - { - exec_viewer(viewer->name, page); /* will return when unable */ - } - if (fallback) - exec_viewer(fallback, page); - exec_viewer("man", page); - die("no man viewer handled the request"); -} - -static void show_info_page(const char *perf_cmd) -{ - const char *page = cmd_to_page(perf_cmd); - setenv("INFOPATH", system_path(PERF_INFO_PATH), 1); - execlp("info", "info", "perfman", page, NULL); -} - -static void get_html_page_path(struct strbuf *page_path, const char *page) -{ - struct stat st; - const char *html_path = system_path(PERF_HTML_PATH); - - /* Check that we have a perf documentation directory. */ - if (stat(mkpath("%s/perf.html", html_path), &st) - || !S_ISREG(st.st_mode)) - die("'%s': not a documentation directory.", html_path); - - strbuf_init(page_path, 0); - strbuf_addf(page_path, "%s/%s.html", html_path, page); -} - -/* - * If open_html is not defined in a platform-specific way (see for - * example compat/mingw.h), we use the script web--browse to display - * HTML. - */ -#ifndef open_html -static void open_html(const char *path) -{ - execl_perf_cmd("web--browse", "-c", "help.browser", path, NULL); -} -#endif - -static void show_html_page(const char *perf_cmd) -{ - const char *page = cmd_to_page(perf_cmd); - struct strbuf page_path; /* it leaks but we exec bellow */ - - get_html_page_path(&page_path, page); - - open_html(page_path.buf); -} - -int cmd_help(int argc, const char **argv, const char *prefix) -{ - const char *alias; - load_command_list("perf-", &main_cmds, &other_cmds); - - perf_config(perf_help_config, NULL); - - argc = parse_options(argc, argv, builtin_help_options, - builtin_help_usage, 0); - - if (show_all) { - printf("\n usage: %s\n\n", perf_usage_string); - list_commands("perf commands", &main_cmds, &other_cmds); - printf(" %s\n\n", perf_more_info_string); - return 0; - } - - if (!argv[0]) { - printf("\n usage: %s\n\n", perf_usage_string); - list_common_cmds_help(); - printf("\n %s\n\n", perf_more_info_string); - return 0; - } - - alias = alias_lookup(argv[0]); - if (alias && !is_perf_command(argv[0])) { - printf("`perf %s' is aliased to `%s'\n", argv[0], alias); - return 0; - } - - switch (help_format) { - case HELP_FORMAT_MAN: - show_man_page(argv[0]); - break; - case HELP_FORMAT_INFO: - show_info_page(argv[0]); - break; - case HELP_FORMAT_WEB: - show_html_page(argv[0]); - break; - } - - return 0; -} diff --git a/Documentation/perf_counter/builtin-list.c b/Documentation/perf_counter/builtin-list.c deleted file mode 100644 index fe60e37c96e..00000000000 --- a/Documentation/perf_counter/builtin-list.c +++ /dev/null @@ -1,20 +0,0 @@ -/* - * builtin-list.c - * - * Builtin list command: list all event types - * - * Copyright (C) 2009, Thomas Gleixner - * Copyright (C) 2008-2009, Red Hat Inc, Ingo Molnar - */ -#include "builtin.h" - -#include "perf.h" - -#include "util/parse-options.h" -#include "util/parse-events.h" - -int cmd_list(int argc, const char **argv, const char *prefix) -{ - print_events(); - return 0; -} diff --git a/Documentation/perf_counter/builtin-record.c b/Documentation/perf_counter/builtin-record.c deleted file mode 100644 index aeab9c4b15e..00000000000 --- a/Documentation/perf_counter/builtin-record.c +++ /dev/null @@ -1,544 +0,0 @@ -/* - * builtin-record.c - * - * Builtin record command: Record the profile of a workload - * (or a CPU, or a PID) into the perf.data output file - for - * later analysis via perf report. - */ -#include "builtin.h" - -#include "perf.h" - -#include "util/util.h" -#include "util/parse-options.h" -#include "util/parse-events.h" -#include "util/string.h" - -#include -#include - -#define ALIGN(x, a) __ALIGN_MASK(x, (typeof(x))(a)-1) -#define __ALIGN_MASK(x, mask) (((x)+(mask))&~(mask)) - -static int fd[MAX_NR_CPUS][MAX_COUNTERS]; - -static long default_interval = 100000; - -static int nr_cpus = 0; -static unsigned int page_size; -static unsigned int mmap_pages = 128; -static int freq = 0; -static int output; -static const char *output_name = "perf.data"; -static int group = 0; -static unsigned int realtime_prio = 0; -static int system_wide = 0; -static pid_t target_pid = -1; -static int inherit = 1; -static int force = 0; -static int append_file = 0; - -static long samples; -static struct timeval last_read; -static struct timeval this_read; - -static __u64 bytes_written; - -static struct pollfd event_array[MAX_NR_CPUS * MAX_COUNTERS]; - -static int nr_poll; -static int nr_cpu; - -struct mmap_event { - struct perf_event_header header; - __u32 pid; - __u32 tid; - __u64 start; - __u64 len; - __u64 pgoff; - char filename[PATH_MAX]; -}; - -struct comm_event { - struct perf_event_header header; - __u32 pid; - __u32 tid; - char comm[16]; -}; - - -struct mmap_data { - int counter; - void *base; - unsigned int mask; - unsigned int prev; -}; - -static struct mmap_data mmap_array[MAX_NR_CPUS][MAX_COUNTERS]; - -static unsigned int mmap_read_head(struct mmap_data *md) -{ - struct perf_counter_mmap_page *pc = md->base; - int head; - - head = pc->data_head; - rmb(); - - return head; -} - -static void mmap_read(struct mmap_data *md) -{ - unsigned int head = mmap_read_head(md); - unsigned int old = md->prev; - unsigned char *data = md->base + page_size; - unsigned long size; - void *buf; - int diff; - - gettimeofday(&this_read, NULL); - - /* - * If we're further behind than half the buffer, there's a chance - * the writer will bite our tail and mess up the samples under us. - * - * If we somehow ended up ahead of the head, we got messed up. - * - * In either case, truncate and restart at head. - */ - diff = head - old; - if (diff > md->mask / 2 || diff < 0) { - struct timeval iv; - unsigned long msecs; - - timersub(&this_read, &last_read, &iv); - msecs = iv.tv_sec*1000 + iv.tv_usec/1000; - - fprintf(stderr, "WARNING: failed to keep up with mmap data." - " Last read %lu msecs ago.\n", msecs); - - /* - * head points to a known good entry, start there. - */ - old = head; - } - - last_read = this_read; - - if (old != head) - samples++; - - size = head - old; - - if ((old & md->mask) + size != (head & md->mask)) { - buf = &data[old & md->mask]; - size = md->mask + 1 - (old & md->mask); - old += size; - - while (size) { - int ret = write(output, buf, size); - - if (ret < 0) - die("failed to write"); - - size -= ret; - buf += ret; - - bytes_written += ret; - } - } - - buf = &data[old & md->mask]; - size = head - old; - old += size; - - while (size) { - int ret = write(output, buf, size); - - if (ret < 0) - die("failed to write"); - - size -= ret; - buf += ret; - - bytes_written += ret; - } - - md->prev = old; -} - -static volatile int done = 0; - -static void sig_handler(int sig) -{ - done = 1; -} - -static void pid_synthesize_comm_event(pid_t pid, int full) -{ - struct comm_event comm_ev; - char filename[PATH_MAX]; - char bf[BUFSIZ]; - int fd, ret; - size_t size; - char *field, *sep; - DIR *tasks; - struct dirent dirent, *next; - - snprintf(filename, sizeof(filename), "/proc/%d/stat", pid); - - fd = open(filename, O_RDONLY); - if (fd < 0) { - fprintf(stderr, "couldn't open %s\n", filename); - exit(EXIT_FAILURE); - } - if (read(fd, bf, sizeof(bf)) < 0) { - fprintf(stderr, "couldn't read %s\n", filename); - exit(EXIT_FAILURE); - } - close(fd); - - /* 9027 (cat) R 6747 9027 6747 34816 9027 ... */ - memset(&comm_ev, 0, sizeof(comm_ev)); - field = strchr(bf, '('); - if (field == NULL) - goto out_failure; - sep = strchr(++field, ')'); - if (sep == NULL) - goto out_failure; - size = sep - field; - memcpy(comm_ev.comm, field, size++); - - comm_ev.pid = pid; - comm_ev.header.type = PERF_EVENT_COMM; - size = ALIGN(size, sizeof(uint64_t)); - comm_ev.header.size = sizeof(comm_ev) - (sizeof(comm_ev.comm) - size); - - if (!full) { - comm_ev.tid = pid; - - ret = write(output, &comm_ev, comm_ev.header.size); - if (ret < 0) { - perror("failed to write"); - exit(-1); - } - return; - } - - snprintf(filename, sizeof(filename), "/proc/%d/task", pid); - - tasks = opendir(filename); - while (!readdir_r(tasks, &dirent, &next) && next) { - char *end; - pid = strtol(dirent.d_name, &end, 10); - if (*end) - continue; - - comm_ev.tid = pid; - - ret = write(output, &comm_ev, comm_ev.header.size); - if (ret < 0) { - perror("failed to write"); - exit(-1); - } - } - closedir(tasks); - return; - -out_failure: - fprintf(stderr, "couldn't get COMM and pgid, malformed %s\n", - filename); - exit(EXIT_FAILURE); -} - -static void pid_synthesize_mmap_samples(pid_t pid) -{ - char filename[PATH_MAX]; - FILE *fp; - - snprintf(filename, sizeof(filename), "/proc/%d/maps", pid); - - fp = fopen(filename, "r"); - if (fp == NULL) { - fprintf(stderr, "couldn't open %s\n", filename); - exit(EXIT_FAILURE); - } - while (1) { - char bf[BUFSIZ], *pbf = bf; - struct mmap_event mmap_ev = { - .header.type = PERF_EVENT_MMAP, - }; - int n; - size_t size; - if (fgets(bf, sizeof(bf), fp) == NULL) - break; - - /* 00400000-0040c000 r-xp 00000000 fd:01 41038 /bin/cat */ - n = hex2u64(pbf, &mmap_ev.start); - if (n < 0) - continue; - pbf += n + 1; - n = hex2u64(pbf, &mmap_ev.len); - if (n < 0) - continue; - pbf += n + 3; - if (*pbf == 'x') { /* vm_exec */ - char *execname = strrchr(bf, ' '); - - if (execname == NULL || execname[1] != '/') - continue; - - execname += 1; - size = strlen(execname); - execname[size - 1] = '\0'; /* Remove \n */ - memcpy(mmap_ev.filename, execname, size); - size = ALIGN(size, sizeof(uint64_t)); - mmap_ev.len -= mmap_ev.start; - mmap_ev.header.size = (sizeof(mmap_ev) - - (sizeof(mmap_ev.filename) - size)); - mmap_ev.pid = pid; - mmap_ev.tid = pid; - - if (write(output, &mmap_ev, mmap_ev.header.size) < 0) { - perror("failed to write"); - exit(-1); - } - } - } - - fclose(fp); -} - -static void synthesize_samples(void) -{ - DIR *proc; - struct dirent dirent, *next; - - proc = opendir("/proc"); - - while (!readdir_r(proc, &dirent, &next) && next) { - char *end; - pid_t pid; - - pid = strtol(dirent.d_name, &end, 10); - if (*end) /* only interested in proper numerical dirents */ - continue; - - pid_synthesize_comm_event(pid, 1); - pid_synthesize_mmap_samples(pid); - } - - closedir(proc); -} - -static int group_fd; - -static void create_counter(int counter, int cpu, pid_t pid) -{ - struct perf_counter_attr *attr = attrs + counter; - int track = 1; - - attr->sample_type = PERF_SAMPLE_IP | PERF_SAMPLE_TID | PERF_SAMPLE_PERIOD; - if (freq) { - attr->freq = 1; - attr->sample_freq = freq; - } - attr->mmap = track; - attr->comm = track; - attr->inherit = (cpu < 0) && inherit; - - track = 0; /* only the first counter needs these */ - - fd[nr_cpu][counter] = sys_perf_counter_open(attr, pid, cpu, group_fd, 0); - - if (fd[nr_cpu][counter] < 0) { - int err = errno; - - error("syscall returned with %d (%s)\n", - fd[nr_cpu][counter], strerror(err)); - if (err == EPERM) - printf("Are you root?\n"); - exit(-1); - } - assert(fd[nr_cpu][counter] >= 0); - fcntl(fd[nr_cpu][counter], F_SETFL, O_NONBLOCK); - - /* - * First counter acts as the group leader: - */ - if (group && group_fd == -1) - group_fd = fd[nr_cpu][counter]; - - event_array[nr_poll].fd = fd[nr_cpu][counter]; - event_array[nr_poll].events = POLLIN; - nr_poll++; - - mmap_array[nr_cpu][counter].counter = counter; - mmap_array[nr_cpu][counter].prev = 0; - mmap_array[nr_cpu][counter].mask = mmap_pages*page_size - 1; - mmap_array[nr_cpu][counter].base = mmap(NULL, (mmap_pages+1)*page_size, - PROT_READ, MAP_SHARED, fd[nr_cpu][counter], 0); - if (mmap_array[nr_cpu][counter].base == MAP_FAILED) { - error("failed to mmap with %d (%s)\n", errno, strerror(errno)); - exit(-1); - } -} - -static void open_counters(int cpu, pid_t pid) -{ - int counter; - - if (pid > 0) { - pid_synthesize_comm_event(pid, 0); - pid_synthesize_mmap_samples(pid); - } - - group_fd = -1; - for (counter = 0; counter < nr_counters; counter++) - create_counter(counter, cpu, pid); - - nr_cpu++; -} - -static int __cmd_record(int argc, const char **argv) -{ - int i, counter; - struct stat st; - pid_t pid; - int flags; - int ret; - - page_size = sysconf(_SC_PAGE_SIZE); - nr_cpus = sysconf(_SC_NPROCESSORS_ONLN); - assert(nr_cpus <= MAX_NR_CPUS); - assert(nr_cpus >= 0); - - if (!stat(output_name, &st) && !force && !append_file) { - fprintf(stderr, "Error, output file %s exists, use -A to append or -f to overwrite.\n", - output_name); - exit(-1); - } - - flags = O_CREAT|O_RDWR; - if (append_file) - flags |= O_APPEND; - else - flags |= O_TRUNC; - - output = open(output_name, flags, S_IRUSR|S_IWUSR); - if (output < 0) { - perror("failed to create output file"); - exit(-1); - } - - if (!system_wide) { - open_counters(-1, target_pid != -1 ? target_pid : getpid()); - } else for (i = 0; i < nr_cpus; i++) - open_counters(i, target_pid); - - signal(SIGCHLD, sig_handler); - signal(SIGINT, sig_handler); - - if (target_pid == -1 && argc) { - pid = fork(); - if (pid < 0) - perror("failed to fork"); - - if (!pid) { - if (execvp(argv[0], (char **)argv)) { - perror(argv[0]); - exit(-1); - } - } - } - - if (realtime_prio) { - struct sched_param param; - - param.sched_priority = realtime_prio; - if (sched_setscheduler(0, SCHED_FIFO, ¶m)) { - printf("Could not set realtime priority.\n"); - exit(-1); - } - } - - if (system_wide) - synthesize_samples(); - - while (!done) { - int hits = samples; - - for (i = 0; i < nr_cpu; i++) { - for (counter = 0; counter < nr_counters; counter++) - mmap_read(&mmap_array[i][counter]); - } - - if (hits == samples) - ret = poll(event_array, nr_poll, 100); - } - - /* - * Approximate RIP event size: 24 bytes. - */ - fprintf(stderr, - "[ perf record: Captured and wrote %.3f MB %s (~%lld samples) ]\n", - (double)bytes_written / 1024.0 / 1024.0, - output_name, - bytes_written / 24); - - return 0; -} - -static const char * const record_usage[] = { - "perf record [] []", - "perf record [] -- []", - NULL -}; - -static const struct option options[] = { - OPT_CALLBACK('e', "event", NULL, "event", - "event selector. use 'perf list' to list available events", - parse_events), - OPT_INTEGER('p', "pid", &target_pid, - "record events on existing pid"), - OPT_INTEGER('r', "realtime", &realtime_prio, - "collect data with this RT SCHED_FIFO priority"), - OPT_BOOLEAN('a', "all-cpus", &system_wide, - "system-wide collection from all CPUs"), - OPT_BOOLEAN('A', "append", &append_file, - "append to the output file to do incremental profiling"), - OPT_BOOLEAN('f', "force", &force, - "overwrite existing data file"), - OPT_LONG('c', "count", &default_interval, - "event period to sample"), - OPT_STRING('o', "output", &output_name, "file", - "output file name"), - OPT_BOOLEAN('i', "inherit", &inherit, - "child tasks inherit counters"), - OPT_INTEGER('F', "freq", &freq, - "profile at this frequency"), - OPT_INTEGER('m', "mmap-pages", &mmap_pages, - "number of mmap data pages"), - OPT_END() -}; - -int cmd_record(int argc, const char **argv, const char *prefix) -{ - int counter; - - argc = parse_options(argc, argv, options, record_usage, 0); - if (!argc && target_pid == -1 && !system_wide) - usage_with_options(record_usage, options); - - if (!nr_counters) - nr_counters = 1; - - for (counter = 0; counter < nr_counters; counter++) { - if (attrs[counter].sample_period) - continue; - - attrs[counter].sample_period = default_interval; - } - - return __cmd_record(argc, argv); -} diff --git a/Documentation/perf_counter/builtin-report.c b/Documentation/perf_counter/builtin-report.c deleted file mode 100644 index 242e09ff365..00000000000 --- a/Documentation/perf_counter/builtin-report.c +++ /dev/null @@ -1,1291 +0,0 @@ -/* - * builtin-report.c - * - * Builtin report command: Analyze the perf.data input file, - * look up and read DSOs and symbol information and display - * a histogram of results, along various sorting keys. - */ -#include "builtin.h" - -#include "util/util.h" - -#include "util/color.h" -#include "util/list.h" -#include "util/cache.h" -#include "util/rbtree.h" -#include "util/symbol.h" -#include "util/string.h" - -#include "perf.h" - -#include "util/parse-options.h" -#include "util/parse-events.h" - -#define SHOW_KERNEL 1 -#define SHOW_USER 2 -#define SHOW_HV 4 - -static char const *input_name = "perf.data"; -static char *vmlinux = NULL; - -static char default_sort_order[] = "comm,dso"; -static char *sort_order = default_sort_order; - -static int input; -static int show_mask = SHOW_KERNEL | SHOW_USER | SHOW_HV; - -static int dump_trace = 0; -#define dprintf(x...) do { if (dump_trace) printf(x); } while (0) - -static int verbose; -static int full_paths; - -static unsigned long page_size; -static unsigned long mmap_window = 32; - -struct ip_event { - struct perf_event_header header; - __u64 ip; - __u32 pid, tid; -}; - -struct mmap_event { - struct perf_event_header header; - __u32 pid, tid; - __u64 start; - __u64 len; - __u64 pgoff; - char filename[PATH_MAX]; -}; - -struct comm_event { - struct perf_event_header header; - __u32 pid, tid; - char comm[16]; -}; - -struct fork_event { - struct perf_event_header header; - __u32 pid, ppid; -}; - -struct period_event { - struct perf_event_header header; - __u64 time; - __u64 id; - __u64 sample_period; -}; - -typedef union event_union { - struct perf_event_header header; - struct ip_event ip; - struct mmap_event mmap; - struct comm_event comm; - struct fork_event fork; - struct period_event period; -} event_t; - -static LIST_HEAD(dsos); -static struct dso *kernel_dso; -static struct dso *vdso; - -static void dsos__add(struct dso *dso) -{ - list_add_tail(&dso->node, &dsos); -} - -static struct dso *dsos__find(const char *name) -{ - struct dso *pos; - - list_for_each_entry(pos, &dsos, node) - if (strcmp(pos->name, name) == 0) - return pos; - return NULL; -} - -static struct dso *dsos__findnew(const char *name) -{ - struct dso *dso = dsos__find(name); - int nr; - - if (dso) - return dso; - - dso = dso__new(name, 0); - if (!dso) - goto out_delete_dso; - - nr = dso__load(dso, NULL, verbose); - if (nr < 0) { - if (verbose) - fprintf(stderr, "Failed to open: %s\n", name); - goto out_delete_dso; - } - if (!nr && verbose) { - fprintf(stderr, - "No symbols found in: %s, maybe install a debug package?\n", - name); - } - - dsos__add(dso); - - return dso; - -out_delete_dso: - dso__delete(dso); - return NULL; -} - -static void dsos__fprintf(FILE *fp) -{ - struct dso *pos; - - list_for_each_entry(pos, &dsos, node) - dso__fprintf(pos, fp); -} - -static struct symbol *vdso__find_symbol(struct dso *dso, uint64_t ip) -{ - return dso__find_symbol(kernel_dso, ip); -} - -static int load_kernel(void) -{ - int err; - - kernel_dso = dso__new("[kernel]", 0); - if (!kernel_dso) - return -1; - - err = dso__load_kernel(kernel_dso, vmlinux, NULL, verbose); - if (err) { - dso__delete(kernel_dso); - kernel_dso = NULL; - } else - dsos__add(kernel_dso); - - vdso = dso__new("[vdso]", 0); - if (!vdso) - return -1; - - vdso->find_symbol = vdso__find_symbol; - - dsos__add(vdso); - - return err; -} - -static char __cwd[PATH_MAX]; -static char *cwd = __cwd; -static int cwdlen; - -static int strcommon(const char *pathname) -{ - int n = 0; - - while (pathname[n] == cwd[n] && n < cwdlen) - ++n; - - return n; -} - -struct map { - struct list_head node; - uint64_t start; - uint64_t end; - uint64_t pgoff; - uint64_t (*map_ip)(struct map *, uint64_t); - struct dso *dso; -}; - -static uint64_t map__map_ip(struct map *map, uint64_t ip) -{ - return ip - map->start + map->pgoff; -} - -static uint64_t vdso__map_ip(struct map *map, uint64_t ip) -{ - return ip; -} - -static struct map *map__new(struct mmap_event *event) -{ - struct map *self = malloc(sizeof(*self)); - - if (self != NULL) { - const char *filename = event->filename; - char newfilename[PATH_MAX]; - - if (cwd) { - int n = strcommon(filename); - - if (n == cwdlen) { - snprintf(newfilename, sizeof(newfilename), - ".%s", filename + n); - filename = newfilename; - } - } - - self->start = event->start; - self->end = event->start + event->len; - self->pgoff = event->pgoff; - - self->dso = dsos__findnew(filename); - if (self->dso == NULL) - goto out_delete; - - if (self->dso == vdso) - self->map_ip = vdso__map_ip; - else - self->map_ip = map__map_ip; - } - return self; -out_delete: - free(self); - return NULL; -} - -static struct map *map__clone(struct map *self) -{ - struct map *map = malloc(sizeof(*self)); - - if (!map) - return NULL; - - memcpy(map, self, sizeof(*self)); - - return map; -} - -static int map__overlap(struct map *l, struct map *r) -{ - if (l->start > r->start) { - struct map *t = l; - l = r; - r = t; - } - - if (l->end > r->start) - return 1; - - return 0; -} - -static size_t map__fprintf(struct map *self, FILE *fp) -{ - return fprintf(fp, " %"PRIx64"-%"PRIx64" %"PRIx64" %s\n", - self->start, self->end, self->pgoff, self->dso->name); -} - - -struct thread { - struct rb_node rb_node; - struct list_head maps; - pid_t pid; - char *comm; -}; - -static struct thread *thread__new(pid_t pid) -{ - struct thread *self = malloc(sizeof(*self)); - - if (self != NULL) { - self->pid = pid; - self->comm = malloc(32); - if (self->comm) - snprintf(self->comm, 32, ":%d", self->pid); - INIT_LIST_HEAD(&self->maps); - } - - return self; -} - -static int thread__set_comm(struct thread *self, const char *comm) -{ - if (self->comm) - free(self->comm); - self->comm = strdup(comm); - return self->comm ? 0 : -ENOMEM; -} - -static size_t thread__fprintf(struct thread *self, FILE *fp) -{ - struct map *pos; - size_t ret = fprintf(fp, "Thread %d %s\n", self->pid, self->comm); - - list_for_each_entry(pos, &self->maps, node) - ret += map__fprintf(pos, fp); - - return ret; -} - - -static struct rb_root threads; -static struct thread *last_match; - -static struct thread *threads__findnew(pid_t pid) -{ - struct rb_node **p = &threads.rb_node; - struct rb_node *parent = NULL; - struct thread *th; - - /* - * Font-end cache - PID lookups come in blocks, - * so most of the time we dont have to look up - * the full rbtree: - */ - if (last_match && last_match->pid == pid) - return last_match; - - while (*p != NULL) { - parent = *p; - th = rb_entry(parent, struct thread, rb_node); - - if (th->pid == pid) { - last_match = th; - return th; - } - - if (pid < th->pid) - p = &(*p)->rb_left; - else - p = &(*p)->rb_right; - } - - th = thread__new(pid); - if (th != NULL) { - rb_link_node(&th->rb_node, parent, p); - rb_insert_color(&th->rb_node, &threads); - last_match = th; - } - - return th; -} - -static void thread__insert_map(struct thread *self, struct map *map) -{ - struct map *pos, *tmp; - - list_for_each_entry_safe(pos, tmp, &self->maps, node) { - if (map__overlap(pos, map)) { - list_del_init(&pos->node); - /* XXX leaks dsos */ - free(pos); - } - } - - list_add_tail(&map->node, &self->maps); -} - -static int thread__fork(struct thread *self, struct thread *parent) -{ - struct map *map; - - if (self->comm) - free(self->comm); - self->comm = strdup(parent->comm); - if (!self->comm) - return -ENOMEM; - - list_for_each_entry(map, &parent->maps, node) { - struct map *new = map__clone(map); - if (!new) - return -ENOMEM; - thread__insert_map(self, new); - } - - return 0; -} - -static struct map *thread__find_map(struct thread *self, uint64_t ip) -{ - struct map *pos; - - if (self == NULL) - return NULL; - - list_for_each_entry(pos, &self->maps, node) - if (ip >= pos->start && ip <= pos->end) - return pos; - - return NULL; -} - -static size_t threads__fprintf(FILE *fp) -{ - size_t ret = 0; - struct rb_node *nd; - - for (nd = rb_first(&threads); nd; nd = rb_next(nd)) { - struct thread *pos = rb_entry(nd, struct thread, rb_node); - - ret += thread__fprintf(pos, fp); - } - - return ret; -} - -/* - * histogram, sorted on item, collects counts - */ - -static struct rb_root hist; - -struct hist_entry { - struct rb_node rb_node; - - struct thread *thread; - struct map *map; - struct dso *dso; - struct symbol *sym; - uint64_t ip; - char level; - - uint32_t count; -}; - -/* - * configurable sorting bits - */ - -struct sort_entry { - struct list_head list; - - char *header; - - int64_t (*cmp)(struct hist_entry *, struct hist_entry *); - int64_t (*collapse)(struct hist_entry *, struct hist_entry *); - size_t (*print)(FILE *fp, struct hist_entry *); -}; - -/* --sort pid */ - -static int64_t -sort__thread_cmp(struct hist_entry *left, struct hist_entry *right) -{ - return right->thread->pid - left->thread->pid; -} - -static size_t -sort__thread_print(FILE *fp, struct hist_entry *self) -{ - return fprintf(fp, "%16s:%5d", self->thread->comm ?: "", self->thread->pid); -} - -static struct sort_entry sort_thread = { - .header = " Command: Pid", - .cmp = sort__thread_cmp, - .print = sort__thread_print, -}; - -/* --sort comm */ - -static int64_t -sort__comm_cmp(struct hist_entry *left, struct hist_entry *right) -{ - return right->thread->pid - left->thread->pid; -} - -static int64_t -sort__comm_collapse(struct hist_entry *left, struct hist_entry *right) -{ - char *comm_l = left->thread->comm; - char *comm_r = right->thread->comm; - - if (!comm_l || !comm_r) { - if (!comm_l && !comm_r) - return 0; - else if (!comm_l) - return -1; - else - return 1; - } - - return strcmp(comm_l, comm_r); -} - -static size_t -sort__comm_print(FILE *fp, struct hist_entry *self) -{ - return fprintf(fp, "%16s", self->thread->comm); -} - -static struct sort_entry sort_comm = { - .header = " Command", - .cmp = sort__comm_cmp, - .collapse = sort__comm_collapse, - .print = sort__comm_print, -}; - -/* --sort dso */ - -static int64_t -sort__dso_cmp(struct hist_entry *left, struct hist_entry *right) -{ - struct dso *dso_l = left->dso; - struct dso *dso_r = right->dso; - - if (!dso_l || !dso_r) { - if (!dso_l && !dso_r) - return 0; - else if (!dso_l) - return -1; - else - return 1; - } - - return strcmp(dso_l->name, dso_r->name); -} - -static size_t -sort__dso_print(FILE *fp, struct hist_entry *self) -{ - if (self->dso) - return fprintf(fp, "%-25s", self->dso->name); - - return fprintf(fp, "%016llx ", (__u64)self->ip); -} - -static struct sort_entry sort_dso = { - .header = "Shared Object ", - .cmp = sort__dso_cmp, - .print = sort__dso_print, -}; - -/* --sort symbol */ - -static int64_t -sort__sym_cmp(struct hist_entry *left, struct hist_entry *right) -{ - uint64_t ip_l, ip_r; - - if (left->sym == right->sym) - return 0; - - ip_l = left->sym ? left->sym->start : left->ip; - ip_r = right->sym ? right->sym->start : right->ip; - - return (int64_t)(ip_r - ip_l); -} - -static size_t -sort__sym_print(FILE *fp, struct hist_entry *self) -{ - size_t ret = 0; - - if (verbose) - ret += fprintf(fp, "%#018llx ", (__u64)self->ip); - - if (self->sym) { - ret += fprintf(fp, "[%c] %s", - self->dso == kernel_dso ? 'k' : '.', self->sym->name); - } else { - ret += fprintf(fp, "%#016llx", (__u64)self->ip); - } - - return ret; -} - -static struct sort_entry sort_sym = { - .header = "Symbol", - .cmp = sort__sym_cmp, - .print = sort__sym_print, -}; - -static int sort__need_collapse = 0; - -struct sort_dimension { - char *name; - struct sort_entry *entry; - int taken; -}; - -static struct sort_dimension sort_dimensions[] = { - { .name = "pid", .entry = &sort_thread, }, - { .name = "comm", .entry = &sort_comm, }, - { .name = "dso", .entry = &sort_dso, }, - { .name = "symbol", .entry = &sort_sym, }, -}; - -static LIST_HEAD(hist_entry__sort_list); - -static int sort_dimension__add(char *tok) -{ - int i; - - for (i = 0; i < ARRAY_SIZE(sort_dimensions); i++) { - struct sort_dimension *sd = &sort_dimensions[i]; - - if (sd->taken) - continue; - - if (strncasecmp(tok, sd->name, strlen(tok))) - continue; - - if (sd->entry->collapse) - sort__need_collapse = 1; - - list_add_tail(&sd->entry->list, &hist_entry__sort_list); - sd->taken = 1; - - return 0; - } - - return -ESRCH; -} - -static int64_t -hist_entry__cmp(struct hist_entry *left, struct hist_entry *right) -{ - struct sort_entry *se; - int64_t cmp = 0; - - list_for_each_entry(se, &hist_entry__sort_list, list) { - cmp = se->cmp(left, right); - if (cmp) - break; - } - - return cmp; -} - -static int64_t -hist_entry__collapse(struct hist_entry *left, struct hist_entry *right) -{ - struct sort_entry *se; - int64_t cmp = 0; - - list_for_each_entry(se, &hist_entry__sort_list, list) { - int64_t (*f)(struct hist_entry *, struct hist_entry *); - - f = se->collapse ?: se->cmp; - - cmp = f(left, right); - if (cmp) - break; - } - - return cmp; -} - -static size_t -hist_entry__fprintf(FILE *fp, struct hist_entry *self, uint64_t total_samples) -{ - struct sort_entry *se; - size_t ret; - - if (total_samples) { - double percent = self->count * 100.0 / total_samples; - char *color = PERF_COLOR_NORMAL; - - /* - * We color high-overhead entries in red, low-overhead - * entries in green - and keep the middle ground normal: - */ - if (percent >= 5.0) - color = PERF_COLOR_RED; - if (percent < 0.5) - color = PERF_COLOR_GREEN; - - ret = color_fprintf(fp, color, " %6.2f%%", - (self->count * 100.0) / total_samples); - } else - ret = fprintf(fp, "%12d ", self->count); - - list_for_each_entry(se, &hist_entry__sort_list, list) { - fprintf(fp, " "); - ret += se->print(fp, self); - } - - ret += fprintf(fp, "\n"); - - return ret; -} - -/* - * collect histogram counts - */ - -static int -hist_entry__add(struct thread *thread, struct map *map, struct dso *dso, - struct symbol *sym, uint64_t ip, char level) -{ - struct rb_node **p = &hist.rb_node; - struct rb_node *parent = NULL; - struct hist_entry *he; - struct hist_entry entry = { - .thread = thread, - .map = map, - .dso = dso, - .sym = sym, - .ip = ip, - .level = level, - .count = 1, - }; - int cmp; - - while (*p != NULL) { - parent = *p; - he = rb_entry(parent, struct hist_entry, rb_node); - - cmp = hist_entry__cmp(&entry, he); - - if (!cmp) { - he->count++; - return 0; - } - - if (cmp < 0) - p = &(*p)->rb_left; - else - p = &(*p)->rb_right; - } - - he = malloc(sizeof(*he)); - if (!he) - return -ENOMEM; - *he = entry; - rb_link_node(&he->rb_node, parent, p); - rb_insert_color(&he->rb_node, &hist); - - return 0; -} - -static void hist_entry__free(struct hist_entry *he) -{ - free(he); -} - -/* - * collapse the histogram - */ - -static struct rb_root collapse_hists; - -static void collapse__insert_entry(struct hist_entry *he) -{ - struct rb_node **p = &collapse_hists.rb_node; - struct rb_node *parent = NULL; - struct hist_entry *iter; - int64_t cmp; - - while (*p != NULL) { - parent = *p; - iter = rb_entry(parent, struct hist_entry, rb_node); - - cmp = hist_entry__collapse(iter, he); - - if (!cmp) { - iter->count += he->count; - hist_entry__free(he); - return; - } - - if (cmp < 0) - p = &(*p)->rb_left; - else - p = &(*p)->rb_right; - } - - rb_link_node(&he->rb_node, parent, p); - rb_insert_color(&he->rb_node, &collapse_hists); -} - -static void collapse__resort(void) -{ - struct rb_node *next; - struct hist_entry *n; - - if (!sort__need_collapse) - return; - - next = rb_first(&hist); - while (next) { - n = rb_entry(next, struct hist_entry, rb_node); - next = rb_next(&n->rb_node); - - rb_erase(&n->rb_node, &hist); - collapse__insert_entry(n); - } -} - -/* - * reverse the map, sort on count. - */ - -static struct rb_root output_hists; - -static void output__insert_entry(struct hist_entry *he) -{ - struct rb_node **p = &output_hists.rb_node; - struct rb_node *parent = NULL; - struct hist_entry *iter; - - while (*p != NULL) { - parent = *p; - iter = rb_entry(parent, struct hist_entry, rb_node); - - if (he->count > iter->count) - p = &(*p)->rb_left; - else - p = &(*p)->rb_right; - } - - rb_link_node(&he->rb_node, parent, p); - rb_insert_color(&he->rb_node, &output_hists); -} - -static void output__resort(void) -{ - struct rb_node *next; - struct hist_entry *n; - struct rb_root *tree = &hist; - - if (sort__need_collapse) - tree = &collapse_hists; - - next = rb_first(tree); - - while (next) { - n = rb_entry(next, struct hist_entry, rb_node); - next = rb_next(&n->rb_node); - - rb_erase(&n->rb_node, tree); - output__insert_entry(n); - } -} - -static size_t output__fprintf(FILE *fp, uint64_t total_samples) -{ - struct hist_entry *pos; - struct sort_entry *se; - struct rb_node *nd; - size_t ret = 0; - - fprintf(fp, "\n"); - fprintf(fp, "#\n"); - fprintf(fp, "# (%Ld samples)\n", (__u64)total_samples); - fprintf(fp, "#\n"); - - fprintf(fp, "# Overhead"); - list_for_each_entry(se, &hist_entry__sort_list, list) - fprintf(fp, " %s", se->header); - fprintf(fp, "\n"); - - fprintf(fp, "# ........"); - list_for_each_entry(se, &hist_entry__sort_list, list) { - int i; - - fprintf(fp, " "); - for (i = 0; i < strlen(se->header); i++) - fprintf(fp, "."); - } - fprintf(fp, "\n"); - - fprintf(fp, "#\n"); - - for (nd = rb_first(&output_hists); nd; nd = rb_next(nd)) { - pos = rb_entry(nd, struct hist_entry, rb_node); - ret += hist_entry__fprintf(fp, pos, total_samples); - } - - if (!strcmp(sort_order, default_sort_order)) { - fprintf(fp, "#\n"); - fprintf(fp, "# (For more details, try: perf report --sort comm,dso,symbol)\n"); - fprintf(fp, "#\n"); - } - fprintf(fp, "\n"); - - return ret; -} - -static void register_idle_thread(void) -{ - struct thread *thread = threads__findnew(0); - - if (thread == NULL || - thread__set_comm(thread, "[idle]")) { - fprintf(stderr, "problem inserting idle task.\n"); - exit(-1); - } -} - -static unsigned long total = 0, - total_mmap = 0, - total_comm = 0, - total_fork = 0, - total_unknown = 0; - -static int -process_overflow_event(event_t *event, unsigned long offset, unsigned long head) -{ - char level; - int show = 0; - struct dso *dso = NULL; - struct thread *thread = threads__findnew(event->ip.pid); - uint64_t ip = event->ip.ip; - struct map *map = NULL; - - dprintf("%p [%p]: PERF_EVENT (IP, %d): %d: %p\n", - (void *)(offset + head), - (void *)(long)(event->header.size), - event->header.misc, - event->ip.pid, - (void *)(long)ip); - - dprintf(" ... thread: %s:%d\n", thread->comm, thread->pid); - - if (thread == NULL) { - fprintf(stderr, "problem processing %d event, skipping it.\n", - event->header.type); - return -1; - } - - if (event->header.misc & PERF_EVENT_MISC_KERNEL) { - show = SHOW_KERNEL; - level = 'k'; - - dso = kernel_dso; - - dprintf(" ...... dso: %s\n", dso->name); - - } else if (event->header.misc & PERF_EVENT_MISC_USER) { - - show = SHOW_USER; - level = '.'; - - map = thread__find_map(thread, ip); - if (map != NULL) { - ip = map->map_ip(map, ip); - dso = map->dso; - } else { - /* - * If this is outside of all known maps, - * and is a negative address, try to look it - * up in the kernel dso, as it might be a - * vsyscall (which executes in user-mode): - */ - if ((long long)ip < 0) - dso = kernel_dso; - } - dprintf(" ...... dso: %s\n", dso ? dso->name : ""); - - } else { - show = SHOW_HV; - level = 'H'; - dprintf(" ...... dso: [hypervisor]\n"); - } - - if (show & show_mask) { - struct symbol *sym = NULL; - - if (dso) - sym = dso->find_symbol(dso, ip); - - if (hist_entry__add(thread, map, dso, sym, ip, level)) { - fprintf(stderr, - "problem incrementing symbol count, skipping event\n"); - return -1; - } - } - total++; - - return 0; -} - -static int -process_mmap_event(event_t *event, unsigned long offset, unsigned long head) -{ - struct thread *thread = threads__findnew(event->mmap.pid); - struct map *map = map__new(&event->mmap); - - dprintf("%p [%p]: PERF_EVENT_MMAP %d: [%p(%p) @ %p]: %s\n", - (void *)(offset + head), - (void *)(long)(event->header.size), - event->mmap.pid, - (void *)(long)event->mmap.start, - (void *)(long)event->mmap.len, - (void *)(long)event->mmap.pgoff, - event->mmap.filename); - - if (thread == NULL || map == NULL) { - dprintf("problem processing PERF_EVENT_MMAP, skipping event.\n"); - return 0; - } - - thread__insert_map(thread, map); - total_mmap++; - - return 0; -} - -static int -process_comm_event(event_t *event, unsigned long offset, unsigned long head) -{ - struct thread *thread = threads__findnew(event->comm.pid); - - dprintf("%p [%p]: PERF_EVENT_COMM: %s:%d\n", - (void *)(offset + head), - (void *)(long)(event->header.size), - event->comm.comm, event->comm.pid); - - if (thread == NULL || - thread__set_comm(thread, event->comm.comm)) { - dprintf("problem processing PERF_EVENT_COMM, skipping event.\n"); - return -1; - } - total_comm++; - - return 0; -} - -static int -process_fork_event(event_t *event, unsigned long offset, unsigned long head) -{ - struct thread *thread = threads__findnew(event->fork.pid); - struct thread *parent = threads__findnew(event->fork.ppid); - - dprintf("%p [%p]: PERF_EVENT_FORK: %d:%d\n", - (void *)(offset + head), - (void *)(long)(event->header.size), - event->fork.pid, event->fork.ppid); - - if (!thread || !parent || thread__fork(thread, parent)) { - dprintf("problem processing PERF_EVENT_FORK, skipping event.\n"); - return -1; - } - total_fork++; - - return 0; -} - -static int -process_period_event(event_t *event, unsigned long offset, unsigned long head) -{ - dprintf("%p [%p]: PERF_EVENT_PERIOD: time:%Ld, id:%Ld: period:%Ld\n", - (void *)(offset + head), - (void *)(long)(event->header.size), - event->period.time, - event->period.id, - event->period.sample_period); - - return 0; -} - -static int -process_event(event_t *event, unsigned long offset, unsigned long head) -{ - if (event->header.misc & PERF_EVENT_MISC_OVERFLOW) - return process_overflow_event(event, offset, head); - - switch (event->header.type) { - case PERF_EVENT_MMAP: - return process_mmap_event(event, offset, head); - - case PERF_EVENT_COMM: - return process_comm_event(event, offset, head); - - case PERF_EVENT_FORK: - return process_fork_event(event, offset, head); - - case PERF_EVENT_PERIOD: - return process_period_event(event, offset, head); - /* - * We dont process them right now but they are fine: - */ - - case PERF_EVENT_THROTTLE: - case PERF_EVENT_UNTHROTTLE: - return 0; - - default: - return -1; - } - - return 0; -} - -static int __cmd_report(void) -{ - int ret, rc = EXIT_FAILURE; - unsigned long offset = 0; - unsigned long head = 0; - struct stat stat; - event_t *event; - uint32_t size; - char *buf; - - register_idle_thread(); - - input = open(input_name, O_RDONLY); - if (input < 0) { - perror("failed to open file"); - exit(-1); - } - - ret = fstat(input, &stat); - if (ret < 0) { - perror("failed to stat file"); - exit(-1); - } - - if (!stat.st_size) { - fprintf(stderr, "zero-sized file, nothing to do!\n"); - exit(0); - } - - if (load_kernel() < 0) { - perror("failed to load kernel symbols"); - return EXIT_FAILURE; - } - - if (!full_paths) { - if (getcwd(__cwd, sizeof(__cwd)) == NULL) { - perror("failed to get the current directory"); - return EXIT_FAILURE; - } - cwdlen = strlen(cwd); - } else { - cwd = NULL; - cwdlen = 0; - } -remap: - buf = (char *)mmap(NULL, page_size * mmap_window, PROT_READ, - MAP_SHARED, input, offset); - if (buf == MAP_FAILED) { - perror("failed to mmap file"); - exit(-1); - } - -more: - event = (event_t *)(buf + head); - - size = event->header.size; - if (!size) - size = 8; - - if (head + event->header.size >= page_size * mmap_window) { - unsigned long shift = page_size * (head / page_size); - int ret; - - ret = munmap(buf, page_size * mmap_window); - assert(ret == 0); - - offset += shift; - head -= shift; - goto remap; - } - - size = event->header.size; - - dprintf("%p [%p]: event: %d\n", - (void *)(offset + head), - (void *)(long)event->header.size, - event->header.type); - - if (!size || process_event(event, offset, head) < 0) { - - dprintf("%p [%p]: skipping unknown header type: %d\n", - (void *)(offset + head), - (void *)(long)(event->header.size), - event->header.type); - - total_unknown++; - - /* - * assume we lost track of the stream, check alignment, and - * increment a single u64 in the hope to catch on again 'soon'. - */ - - if (unlikely(head & 7)) - head &= ~7ULL; - - size = 8; - } - - head += size; - - if (offset + head < stat.st_size) - goto more; - - rc = EXIT_SUCCESS; - close(input); - - dprintf(" IP events: %10ld\n", total); - dprintf(" mmap events: %10ld\n", total_mmap); - dprintf(" comm events: %10ld\n", total_comm); - dprintf(" fork events: %10ld\n", total_fork); - dprintf(" unknown events: %10ld\n", total_unknown); - - if (dump_trace) - return 0; - - if (verbose >= 3) - threads__fprintf(stdout); - - if (verbose >= 2) - dsos__fprintf(stdout); - - collapse__resort(); - output__resort(); - output__fprintf(stdout, total); - - return rc; -} - -static const char * const report_usage[] = { - "perf report [] ", - NULL -}; - -static const struct option options[] = { - OPT_STRING('i', "input", &input_name, "file", - "input file name"), - OPT_BOOLEAN('v', "verbose", &verbose, - "be more verbose (show symbol address, etc)"), - OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace, - "dump raw trace in ASCII"), - OPT_STRING('k', "vmlinux", &vmlinux, "file", "vmlinux pathname"), - OPT_STRING('s', "sort", &sort_order, "key[,key2...]", - "sort by key(s): pid, comm, dso, symbol. Default: pid,symbol"), - OPT_BOOLEAN('P', "full-paths", &full_paths, - "Don't shorten the pathnames taking into account the cwd"), - OPT_END() -}; - -static void setup_sorting(void) -{ - char *tmp, *tok, *str = strdup(sort_order); - - for (tok = strtok_r(str, ", ", &tmp); - tok; tok = strtok_r(NULL, ", ", &tmp)) { - if (sort_dimension__add(tok) < 0) { - error("Unknown --sort key: `%s'", tok); - usage_with_options(report_usage, options); - } - } - - free(str); -} - -int cmd_report(int argc, const char **argv, const char *prefix) -{ - symbol__init(); - - page_size = getpagesize(); - - argc = parse_options(argc, argv, options, report_usage, 0); - - setup_sorting(); - - /* - * Any (unrecognized) arguments left? - */ - if (argc) - usage_with_options(report_usage, options); - - setup_pager(); - - return __cmd_report(); -} diff --git a/Documentation/perf_counter/builtin-stat.c b/Documentation/perf_counter/builtin-stat.c deleted file mode 100644 index 2cbf5a18958..00000000000 --- a/Documentation/perf_counter/builtin-stat.c +++ /dev/null @@ -1,339 +0,0 @@ -/* - * builtin-stat.c - * - * Builtin stat command: Give a precise performance counters summary - * overview about any workload, CPU or specific PID. - * - * Sample output: - - $ perf stat ~/hackbench 10 - Time: 0.104 - - Performance counter stats for '/home/mingo/hackbench': - - 1255.538611 task clock ticks # 10.143 CPU utilization factor - 54011 context switches # 0.043 M/sec - 385 CPU migrations # 0.000 M/sec - 17755 pagefaults # 0.014 M/sec - 3808323185 CPU cycles # 3033.219 M/sec - 1575111190 instructions # 1254.530 M/sec - 17367895 cache references # 13.833 M/sec - 7674421 cache misses # 6.112 M/sec - - Wall-clock time elapsed: 123.786620 msecs - - * - * Copyright (C) 2008, Red Hat Inc, Ingo Molnar - * - * Improvements and fixes by: - * - * Arjan van de Ven - * Yanmin Zhang - * Wu Fengguang - * Mike Galbraith - * Paul Mackerras - * - * Released under the GPL v2. (and only v2, not any later version) - */ - -#include "perf.h" -#include "builtin.h" -#include "util/util.h" -#include "util/parse-options.h" -#include "util/parse-events.h" - -#include - -static struct perf_counter_attr default_attrs[MAX_COUNTERS] = { - - { .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_TASK_CLOCK }, - { .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_CONTEXT_SWITCHES }, - { .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_CPU_MIGRATIONS }, - { .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_PAGE_FAULTS }, - - { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_CPU_CYCLES }, - { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_INSTRUCTIONS }, - { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_CACHE_REFERENCES }, - { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_CACHE_MISSES }, -}; - -static int system_wide = 0; -static int inherit = 1; - -static int fd[MAX_NR_CPUS][MAX_COUNTERS]; - -static int target_pid = -1; -static int nr_cpus = 0; -static unsigned int page_size; - -static int scale = 1; - -static const unsigned int default_count[] = { - 1000000, - 1000000, - 10000, - 10000, - 1000000, - 10000, -}; - -static __u64 event_res[MAX_COUNTERS][3]; -static __u64 event_scaled[MAX_COUNTERS]; - -static __u64 runtime_nsecs; -static __u64 walltime_nsecs; - -static void create_perfstat_counter(int counter) -{ - struct perf_counter_attr *attr = attrs + counter; - - if (scale) - attr->read_format = PERF_FORMAT_TOTAL_TIME_ENABLED | - PERF_FORMAT_TOTAL_TIME_RUNNING; - - if (system_wide) { - int cpu; - for (cpu = 0; cpu < nr_cpus; cpu ++) { - fd[cpu][counter] = sys_perf_counter_open(attr, -1, cpu, -1, 0); - if (fd[cpu][counter] < 0) { - printf("perfstat error: syscall returned with %d (%s)\n", - fd[cpu][counter], strerror(errno)); - exit(-1); - } - } - } else { - attr->inherit = inherit; - attr->disabled = 1; - - fd[0][counter] = sys_perf_counter_open(attr, 0, -1, -1, 0); - if (fd[0][counter] < 0) { - printf("perfstat error: syscall returned with %d (%s)\n", - fd[0][counter], strerror(errno)); - exit(-1); - } - } -} - -/* - * Does the counter have nsecs as a unit? - */ -static inline int nsec_counter(int counter) -{ - if (attrs[counter].type != PERF_TYPE_SOFTWARE) - return 0; - - if (attrs[counter].config == PERF_COUNT_CPU_CLOCK) - return 1; - - if (attrs[counter].config == PERF_COUNT_TASK_CLOCK) - return 1; - - return 0; -} - -/* - * Read out the results of a single counter: - */ -static void read_counter(int counter) -{ - __u64 *count, single_count[3]; - ssize_t res; - int cpu, nv; - int scaled; - - count = event_res[counter]; - - count[0] = count[1] = count[2] = 0; - - nv = scale ? 3 : 1; - for (cpu = 0; cpu < nr_cpus; cpu ++) { - res = read(fd[cpu][counter], single_count, nv * sizeof(__u64)); - assert(res == nv * sizeof(__u64)); - - count[0] += single_count[0]; - if (scale) { - count[1] += single_count[1]; - count[2] += single_count[2]; - } - } - - scaled = 0; - if (scale) { - if (count[2] == 0) { - event_scaled[counter] = -1; - count[0] = 0; - return; - } - - if (count[2] < count[1]) { - event_scaled[counter] = 1; - count[0] = (unsigned long long) - ((double)count[0] * count[1] / count[2] + 0.5); - } - } - /* - * Save the full runtime - to allow normalization during printout: - */ - if (attrs[counter].type == PERF_TYPE_SOFTWARE && - attrs[counter].config == PERF_COUNT_TASK_CLOCK) - runtime_nsecs = count[0]; -} - -/* - * Print out the results of a single counter: - */ -static void print_counter(int counter) -{ - __u64 *count; - int scaled; - - count = event_res[counter]; - scaled = event_scaled[counter]; - - if (scaled == -1) { - fprintf(stderr, " %14s %-20s\n", - "", event_name(counter)); - return; - } - - if (nsec_counter(counter)) { - double msecs = (double)count[0] / 1000000; - - fprintf(stderr, " %14.6f %-20s", - msecs, event_name(counter)); - if (attrs[counter].type == PERF_TYPE_SOFTWARE && - attrs[counter].config == PERF_COUNT_TASK_CLOCK) { - - fprintf(stderr, " # %11.3f CPU utilization factor", - (double)count[0] / (double)walltime_nsecs); - } - } else { - fprintf(stderr, " %14Ld %-20s", - count[0], event_name(counter)); - if (runtime_nsecs) - fprintf(stderr, " # %11.3f M/sec", - (double)count[0]/runtime_nsecs*1000.0); - } - if (scaled) - fprintf(stderr, " (scaled from %.2f%%)", - (double) count[2] / count[1] * 100); - fprintf(stderr, "\n"); -} - -static int do_perfstat(int argc, const char **argv) -{ - unsigned long long t0, t1; - int counter; - int status; - int pid; - int i; - - if (!system_wide) - nr_cpus = 1; - - for (counter = 0; counter < nr_counters; counter++) - create_perfstat_counter(counter); - - /* - * Enable counters and exec the command: - */ - t0 = rdclock(); - prctl(PR_TASK_PERF_COUNTERS_ENABLE); - - if ((pid = fork()) < 0) - perror("failed to fork"); - - if (!pid) { - if (execvp(argv[0], (char **)argv)) { - perror(argv[0]); - exit(-1); - } - } - - while (wait(&status) >= 0) - ; - - prctl(PR_TASK_PERF_COUNTERS_DISABLE); - t1 = rdclock(); - - walltime_nsecs = t1 - t0; - - fflush(stdout); - - fprintf(stderr, "\n"); - fprintf(stderr, " Performance counter stats for \'%s", argv[0]); - - for (i = 1; i < argc; i++) - fprintf(stderr, " %s", argv[i]); - - fprintf(stderr, "\':\n"); - fprintf(stderr, "\n"); - - for (counter = 0; counter < nr_counters; counter++) - read_counter(counter); - - for (counter = 0; counter < nr_counters; counter++) - print_counter(counter); - - - fprintf(stderr, "\n"); - fprintf(stderr, " Wall-clock time elapsed: %12.6f msecs\n", - (double)(t1-t0)/1e6); - fprintf(stderr, "\n"); - - return 0; -} - -static void skip_signal(int signo) -{ -} - -static const char * const stat_usage[] = { - "perf stat [] ", - NULL -}; - -static const struct option options[] = { - OPT_CALLBACK('e', "event", NULL, "event", - "event selector. use 'perf list' to list available events", - parse_events), - OPT_BOOLEAN('i', "inherit", &inherit, - "child tasks inherit counters"), - OPT_INTEGER('p', "pid", &target_pid, - "stat events on existing pid"), - OPT_BOOLEAN('a', "all-cpus", &system_wide, - "system-wide collection from all CPUs"), - OPT_BOOLEAN('S', "scale", &scale, - "scale/normalize counters"), - OPT_END() -}; - -int cmd_stat(int argc, const char **argv, const char *prefix) -{ - page_size = sysconf(_SC_PAGE_SIZE); - - memcpy(attrs, default_attrs, sizeof(attrs)); - - argc = parse_options(argc, argv, options, stat_usage, 0); - if (!argc) - usage_with_options(stat_usage, options); - - if (!nr_counters) - nr_counters = 8; - - nr_cpus = sysconf(_SC_NPROCESSORS_ONLN); - assert(nr_cpus <= MAX_NR_CPUS); - assert(nr_cpus >= 0); - - /* - * We dont want to block the signals - that would cause - * child tasks to inherit that and Ctrl-C would not work. - * What we want is for Ctrl-C to work in the exec()-ed - * task, but being ignored by perf stat itself: - */ - signal(SIGINT, skip_signal); - signal(SIGALRM, skip_signal); - signal(SIGABRT, skip_signal); - - return do_perfstat(argc, argv); -} diff --git a/Documentation/perf_counter/builtin-top.c b/Documentation/perf_counter/builtin-top.c deleted file mode 100644 index f2e7312f85c..00000000000 --- a/Documentation/perf_counter/builtin-top.c +++ /dev/null @@ -1,692 +0,0 @@ -/* - * builtin-top.c - * - * Builtin top command: Display a continuously updated profile of - * any workload, CPU or specific PID. - * - * Copyright (C) 2008, Red Hat Inc, Ingo Molnar - * - * Improvements and fixes by: - * - * Arjan van de Ven - * Yanmin Zhang - * Wu Fengguang - * Mike Galbraith - * Paul Mackerras - * - * Released under the GPL v2. (and only v2, not any later version) - */ -#include "builtin.h" - -#include "perf.h" - -#include "util/symbol.h" -#include "util/color.h" -#include "util/util.h" -#include "util/rbtree.h" -#include "util/parse-options.h" -#include "util/parse-events.h" - -#include -#include - -#include - -#include -#include -#include -#include - -#include -#include -#include -#include -#include -#include -#include - -#include -#include - -static int fd[MAX_NR_CPUS][MAX_COUNTERS]; - -static int system_wide = 0; - -static int default_interval = 100000; - -static __u64 count_filter = 5; -static int print_entries = 15; - -static int target_pid = -1; -static int profile_cpu = -1; -static int nr_cpus = 0; -static unsigned int realtime_prio = 0; -static int group = 0; -static unsigned int page_size; -static unsigned int mmap_pages = 16; -static int freq = 0; - -static char *sym_filter; -static unsigned long filter_start; -static unsigned long filter_end; - -static int delay_secs = 2; -static int zero; -static int dump_symtab; - -/* - * Symbols - */ - -static uint64_t min_ip; -static uint64_t max_ip = -1ll; - -struct sym_entry { - struct rb_node rb_node; - struct list_head node; - unsigned long count[MAX_COUNTERS]; - unsigned long snap_count; - double weight; - int skip; -}; - -struct sym_entry *sym_filter_entry; - -struct dso *kernel_dso; - -/* - * Symbols will be added here in record_ip and will get out - * after decayed. - */ -static LIST_HEAD(active_symbols); -static pthread_mutex_t active_symbols_lock = PTHREAD_MUTEX_INITIALIZER; - -/* - * Ordering weight: count-1 * count-2 * ... / count-n - */ -static double sym_weight(const struct sym_entry *sym) -{ - double weight = sym->snap_count; - int counter; - - for (counter = 1; counter < nr_counters-1; counter++) - weight *= sym->count[counter]; - - weight /= (sym->count[counter] + 1); - - return weight; -} - -static long samples; -static long userspace_samples; -static const char CONSOLE_CLEAR[] = ""; - -static void __list_insert_active_sym(struct sym_entry *syme) -{ - list_add(&syme->node, &active_symbols); -} - -static void list_remove_active_sym(struct sym_entry *syme) -{ - pthread_mutex_lock(&active_symbols_lock); - list_del_init(&syme->node); - pthread_mutex_unlock(&active_symbols_lock); -} - -static void rb_insert_active_sym(struct rb_root *tree, struct sym_entry *se) -{ - struct rb_node **p = &tree->rb_node; - struct rb_node *parent = NULL; - struct sym_entry *iter; - - while (*p != NULL) { - parent = *p; - iter = rb_entry(parent, struct sym_entry, rb_node); - - if (se->weight > iter->weight) - p = &(*p)->rb_left; - else - p = &(*p)->rb_right; - } - - rb_link_node(&se->rb_node, parent, p); - rb_insert_color(&se->rb_node, tree); -} - -static void print_sym_table(void) -{ - int printed = 0, j; - int counter; - float samples_per_sec = samples/delay_secs; - float ksamples_per_sec = (samples-userspace_samples)/delay_secs; - float sum_ksamples = 0.0; - struct sym_entry *syme, *n; - struct rb_root tmp = RB_ROOT; - struct rb_node *nd; - - samples = userspace_samples = 0; - - /* Sort the active symbols */ - pthread_mutex_lock(&active_symbols_lock); - syme = list_entry(active_symbols.next, struct sym_entry, node); - pthread_mutex_unlock(&active_symbols_lock); - - list_for_each_entry_safe_from(syme, n, &active_symbols, node) { - syme->snap_count = syme->count[0]; - if (syme->snap_count != 0) { - syme->weight = sym_weight(syme); - rb_insert_active_sym(&tmp, syme); - sum_ksamples += syme->snap_count; - - for (j = 0; j < nr_counters; j++) - syme->count[j] = zero ? 0 : syme->count[j] * 7 / 8; - } else - list_remove_active_sym(syme); - } - - puts(CONSOLE_CLEAR); - - printf( -"------------------------------------------------------------------------------\n"); - printf( " PerfTop:%8.0f irqs/sec kernel:%4.1f%% [", - samples_per_sec, - 100.0 - (100.0*((samples_per_sec-ksamples_per_sec)/samples_per_sec))); - - if (nr_counters == 1) { - printf("%Ld", attrs[0].sample_period); - if (freq) - printf("Hz "); - else - printf(" "); - } - - for (counter = 0; counter < nr_counters; counter++) { - if (counter) - printf("/"); - - printf("%s", event_name(counter)); - } - - printf( "], "); - - if (target_pid != -1) - printf(" (target_pid: %d", target_pid); - else - printf(" (all"); - - if (profile_cpu != -1) - printf(", cpu: %d)\n", profile_cpu); - else { - if (target_pid != -1) - printf(")\n"); - else - printf(", %d CPUs)\n", nr_cpus); - } - - printf("------------------------------------------------------------------------------\n\n"); - - if (nr_counters == 1) - printf(" samples pcnt"); - else - printf(" weight samples pcnt"); - - printf(" RIP kernel function\n" - " ______ _______ _____ ________________ _______________\n\n" - ); - - for (nd = rb_first(&tmp); nd; nd = rb_next(nd)) { - struct sym_entry *syme = rb_entry(nd, struct sym_entry, rb_node); - struct symbol *sym = (struct symbol *)(syme + 1); - char *color = PERF_COLOR_NORMAL; - double pcnt; - - if (++printed > print_entries || syme->snap_count < count_filter) - continue; - - pcnt = 100.0 - (100.0 * ((sum_ksamples - syme->snap_count) / - sum_ksamples)); - - /* - * We color high-overhead entries in red, low-overhead - * entries in green - and keep the middle ground normal: - */ - if (pcnt >= 5.0) - color = PERF_COLOR_RED; - if (pcnt < 0.5) - color = PERF_COLOR_GREEN; - - if (nr_counters == 1) - printf("%20.2f - ", syme->weight); - else - printf("%9.1f %10ld - ", syme->weight, syme->snap_count); - - color_fprintf(stdout, color, "%4.1f%%", pcnt); - printf(" - %016llx : %s\n", sym->start, sym->name); - } -} - -static void *display_thread(void *arg) -{ - struct pollfd stdin_poll = { .fd = 0, .events = POLLIN }; - int delay_msecs = delay_secs * 1000; - - printf("PerfTop refresh period: %d seconds\n", delay_secs); - - do { - print_sym_table(); - } while (!poll(&stdin_poll, 1, delay_msecs) == 1); - - printf("key pressed - exiting.\n"); - exit(0); - - return NULL; -} - -static int symbol_filter(struct dso *self, struct symbol *sym) -{ - static int filter_match; - struct sym_entry *syme; - const char *name = sym->name; - - if (!strcmp(name, "_text") || - !strcmp(name, "_etext") || - !strcmp(name, "_sinittext") || - !strncmp("init_module", name, 11) || - !strncmp("cleanup_module", name, 14) || - strstr(name, "_text_start") || - strstr(name, "_text_end")) - return 1; - - syme = dso__sym_priv(self, sym); - /* Tag samples to be skipped. */ - if (!strcmp("default_idle", name) || - !strcmp("cpu_idle", name) || - !strcmp("enter_idle", name) || - !strcmp("exit_idle", name) || - !strcmp("mwait_idle", name)) - syme->skip = 1; - - if (filter_match == 1) { - filter_end = sym->start; - filter_match = -1; - if (filter_end - filter_start > 10000) { - fprintf(stderr, - "hm, too large filter symbol <%s> - skipping.\n", - sym_filter); - fprintf(stderr, "symbol filter start: %016lx\n", - filter_start); - fprintf(stderr, " end: %016lx\n", - filter_end); - filter_end = filter_start = 0; - sym_filter = NULL; - sleep(1); - } - } - - if (filter_match == 0 && sym_filter && !strcmp(name, sym_filter)) { - filter_match = 1; - filter_start = sym->start; - } - - - return 0; -} - -static int parse_symbols(void) -{ - struct rb_node *node; - struct symbol *sym; - - kernel_dso = dso__new("[kernel]", sizeof(struct sym_entry)); - if (kernel_dso == NULL) - return -1; - - if (dso__load_kernel(kernel_dso, NULL, symbol_filter, 1) != 0) - goto out_delete_dso; - - node = rb_first(&kernel_dso->syms); - sym = rb_entry(node, struct symbol, rb_node); - min_ip = sym->start; - - node = rb_last(&kernel_dso->syms); - sym = rb_entry(node, struct symbol, rb_node); - max_ip = sym->end; - - if (dump_symtab) - dso__fprintf(kernel_dso, stderr); - - return 0; - -out_delete_dso: - dso__delete(kernel_dso); - kernel_dso = NULL; - return -1; -} - -#define TRACE_COUNT 3 - -/* - * Binary search in the histogram table and record the hit: - */ -static void record_ip(uint64_t ip, int counter) -{ - struct symbol *sym = dso__find_symbol(kernel_dso, ip); - - if (sym != NULL) { - struct sym_entry *syme = dso__sym_priv(kernel_dso, sym); - - if (!syme->skip) { - syme->count[counter]++; - pthread_mutex_lock(&active_symbols_lock); - if (list_empty(&syme->node) || !syme->node.next) - __list_insert_active_sym(syme); - pthread_mutex_unlock(&active_symbols_lock); - return; - } - } - - samples--; -} - -static void process_event(uint64_t ip, int counter) -{ - samples++; - - if (ip < min_ip || ip > max_ip) { - userspace_samples++; - return; - } - - record_ip(ip, counter); -} - -struct mmap_data { - int counter; - void *base; - unsigned int mask; - unsigned int prev; -}; - -static unsigned int mmap_read_head(struct mmap_data *md) -{ - struct perf_counter_mmap_page *pc = md->base; - int head; - - head = pc->data_head; - rmb(); - - return head; -} - -struct timeval last_read, this_read; - -static void mmap_read(struct mmap_data *md) -{ - unsigned int head = mmap_read_head(md); - unsigned int old = md->prev; - unsigned char *data = md->base + page_size; - int diff; - - gettimeofday(&this_read, NULL); - - /* - * If we're further behind than half the buffer, there's a chance - * the writer will bite our tail and mess up the samples under us. - * - * If we somehow ended up ahead of the head, we got messed up. - * - * In either case, truncate and restart at head. - */ - diff = head - old; - if (diff > md->mask / 2 || diff < 0) { - struct timeval iv; - unsigned long msecs; - - timersub(&this_read, &last_read, &iv); - msecs = iv.tv_sec*1000 + iv.tv_usec/1000; - - fprintf(stderr, "WARNING: failed to keep up with mmap data." - " Last read %lu msecs ago.\n", msecs); - - /* - * head points to a known good entry, start there. - */ - old = head; - } - - last_read = this_read; - - for (; old != head;) { - struct ip_event { - struct perf_event_header header; - __u64 ip; - __u32 pid, target_pid; - }; - struct mmap_event { - struct perf_event_header header; - __u32 pid, target_pid; - __u64 start; - __u64 len; - __u64 pgoff; - char filename[PATH_MAX]; - }; - - typedef union event_union { - struct perf_event_header header; - struct ip_event ip; - struct mmap_event mmap; - } event_t; - - event_t *event = (event_t *)&data[old & md->mask]; - - event_t event_copy; - - size_t size = event->header.size; - - /* - * Event straddles the mmap boundary -- header should always - * be inside due to u64 alignment of output. - */ - if ((old & md->mask) + size != ((old + size) & md->mask)) { - unsigned int offset = old; - unsigned int len = min(sizeof(*event), size), cpy; - void *dst = &event_copy; - - do { - cpy = min(md->mask + 1 - (offset & md->mask), len); - memcpy(dst, &data[offset & md->mask], cpy); - offset += cpy; - dst += cpy; - len -= cpy; - } while (len); - - event = &event_copy; - } - - old += size; - - if (event->header.misc & PERF_EVENT_MISC_OVERFLOW) { - if (event->header.type & PERF_SAMPLE_IP) - process_event(event->ip.ip, md->counter); - } - } - - md->prev = old; -} - -static struct pollfd event_array[MAX_NR_CPUS * MAX_COUNTERS]; -static struct mmap_data mmap_array[MAX_NR_CPUS][MAX_COUNTERS]; - -static int __cmd_top(void) -{ - struct perf_counter_attr *attr; - pthread_t thread; - int i, counter, group_fd, nr_poll = 0; - unsigned int cpu; - int ret; - - for (i = 0; i < nr_cpus; i++) { - group_fd = -1; - for (counter = 0; counter < nr_counters; counter++) { - - cpu = profile_cpu; - if (target_pid == -1 && profile_cpu == -1) - cpu = i; - - attr = attrs + counter; - - attr->sample_type = PERF_SAMPLE_IP | PERF_SAMPLE_TID; - attr->freq = freq; - - fd[i][counter] = sys_perf_counter_open(attr, target_pid, cpu, group_fd, 0); - if (fd[i][counter] < 0) { - int err = errno; - - error("syscall returned with %d (%s)\n", - fd[i][counter], strerror(err)); - if (err == EPERM) - printf("Are you root?\n"); - exit(-1); - } - assert(fd[i][counter] >= 0); - fcntl(fd[i][counter], F_SETFL, O_NONBLOCK); - - /* - * First counter acts as the group leader: - */ - if (group && group_fd == -1) - group_fd = fd[i][counter]; - - event_array[nr_poll].fd = fd[i][counter]; - event_array[nr_poll].events = POLLIN; - nr_poll++; - - mmap_array[i][counter].counter = counter; - mmap_array[i][counter].prev = 0; - mmap_array[i][counter].mask = mmap_pages*page_size - 1; - mmap_array[i][counter].base = mmap(NULL, (mmap_pages+1)*page_size, - PROT_READ, MAP_SHARED, fd[i][counter], 0); - if (mmap_array[i][counter].base == MAP_FAILED) - die("failed to mmap with %d (%s)\n", errno, strerror(errno)); - } - } - - if (pthread_create(&thread, NULL, display_thread, NULL)) { - printf("Could not create display thread.\n"); - exit(-1); - } - - if (realtime_prio) { - struct sched_param param; - - param.sched_priority = realtime_prio; - if (sched_setscheduler(0, SCHED_FIFO, ¶m)) { - printf("Could not set realtime priority.\n"); - exit(-1); - } - } - - while (1) { - int hits = samples; - - for (i = 0; i < nr_cpus; i++) { - for (counter = 0; counter < nr_counters; counter++) - mmap_read(&mmap_array[i][counter]); - } - - if (hits == samples) - ret = poll(event_array, nr_poll, 100); - } - - return 0; -} - -static const char * const top_usage[] = { - "perf top []", - NULL -}; - -static const struct option options[] = { - OPT_CALLBACK('e', "event", NULL, "event", - "event selector. use 'perf list' to list available events", - parse_events), - OPT_INTEGER('c', "count", &default_interval, - "event period to sample"), - OPT_INTEGER('p', "pid", &target_pid, - "profile events on existing pid"), - OPT_BOOLEAN('a', "all-cpus", &system_wide, - "system-wide collection from all CPUs"), - OPT_INTEGER('C', "CPU", &profile_cpu, - "CPU to profile on"), - OPT_INTEGER('m', "mmap-pages", &mmap_pages, - "number of mmap data pages"), - OPT_INTEGER('r', "realtime", &realtime_prio, - "collect data with this RT SCHED_FIFO priority"), - OPT_INTEGER('d', "delay", &delay_secs, - "number of seconds to delay between refreshes"), - OPT_BOOLEAN('D', "dump-symtab", &dump_symtab, - "dump the symbol table used for profiling"), - OPT_INTEGER('f', "count-filter", &count_filter, - "only display functions with more events than this"), - OPT_BOOLEAN('g', "group", &group, - "put the counters into a counter group"), - OPT_STRING('s', "sym-filter", &sym_filter, "pattern", - "only display symbols matchig this pattern"), - OPT_BOOLEAN('z', "zero", &group, - "zero history across updates"), - OPT_INTEGER('F', "freq", &freq, - "profile at this frequency"), - OPT_INTEGER('E', "entries", &print_entries, - "display this many functions"), - OPT_END() -}; - -int cmd_top(int argc, const char **argv, const char *prefix) -{ - int counter; - - page_size = sysconf(_SC_PAGE_SIZE); - - argc = parse_options(argc, argv, options, top_usage, 0); - if (argc) - usage_with_options(top_usage, options); - - if (freq) { - default_interval = freq; - freq = 1; - } - - /* CPU and PID are mutually exclusive */ - if (target_pid != -1 && profile_cpu != -1) { - printf("WARNING: PID switch overriding CPU\n"); - sleep(1); - profile_cpu = -1; - } - - if (!nr_counters) - nr_counters = 1; - - if (delay_secs < 1) - delay_secs = 1; - - parse_symbols(); - - /* - * Fill in the ones not specifically initialized via -c: - */ - for (counter = 0; counter < nr_counters; counter++) { - if (attrs[counter].sample_period) - continue; - - attrs[counter].sample_period = default_interval; - } - - nr_cpus = sysconf(_SC_NPROCESSORS_ONLN); - assert(nr_cpus <= MAX_NR_CPUS); - assert(nr_cpus >= 0); - - if (target_pid != -1 || profile_cpu != -1) - nr_cpus = 1; - - return __cmd_top(); -} diff --git a/Documentation/perf_counter/builtin.h b/Documentation/perf_counter/builtin.h deleted file mode 100644 index 51d168230ee..00000000000 --- a/Documentation/perf_counter/builtin.h +++ /dev/null @@ -1,26 +0,0 @@ -#ifndef BUILTIN_H -#define BUILTIN_H - -#include "util/util.h" -#include "util/strbuf.h" - -extern const char perf_version_string[]; -extern const char perf_usage_string[]; -extern const char perf_more_info_string[]; - -extern void list_common_cmds_help(void); -extern const char *help_unknown_cmd(const char *cmd); -extern void prune_packed_objects(int); -extern int read_line_with_nul(char *buf, int size, FILE *file); -extern int check_pager_config(const char *cmd); - -extern int cmd_annotate(int argc, const char **argv, const char *prefix); -extern int cmd_help(int argc, const char **argv, const char *prefix); -extern int cmd_record(int argc, const char **argv, const char *prefix); -extern int cmd_report(int argc, const char **argv, const char *prefix); -extern int cmd_stat(int argc, const char **argv, const char *prefix); -extern int cmd_top(int argc, const char **argv, const char *prefix); -extern int cmd_version(int argc, const char **argv, const char *prefix); -extern int cmd_list(int argc, const char **argv, const char *prefix); - -#endif diff --git a/Documentation/perf_counter/command-list.txt b/Documentation/perf_counter/command-list.txt deleted file mode 100644 index eebce30afbc..00000000000 --- a/Documentation/perf_counter/command-list.txt +++ /dev/null @@ -1,10 +0,0 @@ -# -# List of known perf commands. -# command name category [deprecated] [common] -# -perf-annotate mainporcelain common -perf-list mainporcelain common -perf-record mainporcelain common -perf-report mainporcelain common -perf-stat mainporcelain common -perf-top mainporcelain common diff --git a/Documentation/perf_counter/design.txt b/Documentation/perf_counter/design.txt deleted file mode 100644 index d3250763dc9..00000000000 --- a/Documentation/perf_counter/design.txt +++ /dev/null @@ -1,442 +0,0 @@ - -Performance Counters for Linux ------------------------------- - -Performance counters are special hardware registers available on most modern -CPUs. These registers count the number of certain types of hw events: such -as instructions executed, cachemisses suffered, or branches mis-predicted - -without slowing down the kernel or applications. These registers can also -trigger interrupts when a threshold number of events have passed - and can -thus be used to profile the code that runs on that CPU. - -The Linux Performance Counter subsystem provides an abstraction of these -hardware capabilities. It provides per task and per CPU counters, counter -groups, and it provides event capabilities on top of those. It -provides "virtual" 64-bit counters, regardless of the width of the -underlying hardware counters. - -Performance counters are accessed via special file descriptors. -There's one file descriptor per virtual counter used. - -The special file descriptor is opened via the perf_counter_open() -system call: - - int sys_perf_counter_open(struct perf_counter_hw_event *hw_event_uptr, - pid_t pid, int cpu, int group_fd, - unsigned long flags); - -The syscall returns the new fd. The fd can be used via the normal -VFS system calls: read() can be used to read the counter, fcntl() -can be used to set the blocking mode, etc. - -Multiple counters can be kept open at a time, and the counters -can be poll()ed. - -When creating a new counter fd, 'perf_counter_hw_event' is: - -struct perf_counter_hw_event { - /* - * The MSB of the config word signifies if the rest contains cpu - * specific (raw) counter configuration data, if unset, the next - * 7 bits are an event type and the rest of the bits are the event - * identifier. - */ - __u64 config; - - __u64 irq_period; - __u32 record_type; - __u32 read_format; - - __u64 disabled : 1, /* off by default */ - inherit : 1, /* children inherit it */ - pinned : 1, /* must always be on PMU */ - exclusive : 1, /* only group on PMU */ - exclude_user : 1, /* don't count user */ - exclude_kernel : 1, /* ditto kernel */ - exclude_hv : 1, /* ditto hypervisor */ - exclude_idle : 1, /* don't count when idle */ - mmap : 1, /* include mmap data */ - munmap : 1, /* include munmap data */ - comm : 1, /* include comm data */ - - __reserved_1 : 52; - - __u32 extra_config_len; - __u32 wakeup_events; /* wakeup every n events */ - - __u64 __reserved_2; - __u64 __reserved_3; -}; - -The 'config' field specifies what the counter should count. It -is divided into 3 bit-fields: - -raw_type: 1 bit (most significant bit) 0x8000_0000_0000_0000 -type: 7 bits (next most significant) 0x7f00_0000_0000_0000 -event_id: 56 bits (least significant) 0x00ff_ffff_ffff_ffff - -If 'raw_type' is 1, then the counter will count a hardware event -specified by the remaining 63 bits of event_config. The encoding is -machine-specific. - -If 'raw_type' is 0, then the 'type' field says what kind of counter -this is, with the following encoding: - -enum perf_event_types { - PERF_TYPE_HARDWARE = 0, - PERF_TYPE_SOFTWARE = 1, - PERF_TYPE_TRACEPOINT = 2, -}; - -A counter of PERF_TYPE_HARDWARE will count the hardware event -specified by 'event_id': - -/* - * Generalized performance counter event types, used by the hw_event.event_id - * parameter of the sys_perf_counter_open() syscall: - */ -enum hw_event_ids { - /* - * Common hardware events, generalized by the kernel: - */ - PERF_COUNT_CPU_CYCLES = 0, - PERF_COUNT_INSTRUCTIONS = 1, - PERF_COUNT_CACHE_REFERENCES = 2, - PERF_COUNT_CACHE_MISSES = 3, - PERF_COUNT_BRANCH_INSTRUCTIONS = 4, - PERF_COUNT_BRANCH_MISSES = 5, - PERF_COUNT_BUS_CYCLES = 6, -}; - -These are standardized types of events that work relatively uniformly -on all CPUs that implement Performance Counters support under Linux, -although there may be variations (e.g., different CPUs might count -cache references and misses at different levels of the cache hierarchy). -If a CPU is not able to count the selected event, then the system call -will return -EINVAL. - -More hw_event_types are supported as well, but they are CPU-specific -and accessed as raw events. For example, to count "External bus -cycles while bus lock signal asserted" events on Intel Core CPUs, pass -in a 0x4064 event_id value and set hw_event.raw_type to 1. - -A counter of type PERF_TYPE_SOFTWARE will count one of the available -software events, selected by 'event_id': - -/* - * Special "software" counters provided by the kernel, even if the hardware - * does not support performance counters. These counters measure various - * physical and sw events of the kernel (and allow the profiling of them as - * well): - */ -enum sw_event_ids { - PERF_COUNT_CPU_CLOCK = 0, - PERF_COUNT_TASK_CLOCK = 1, - PERF_COUNT_PAGE_FAULTS = 2, - PERF_COUNT_CONTEXT_SWITCHES = 3, - PERF_COUNT_CPU_MIGRATIONS = 4, - PERF_COUNT_PAGE_FAULTS_MIN = 5, - PERF_COUNT_PAGE_FAULTS_MAJ = 6, -}; - -Counters of the type PERF_TYPE_TRACEPOINT are available when the ftrace event -tracer is available, and event_id values can be obtained from -/debug/tracing/events/*/*/id - - -Counters come in two flavours: counting counters and sampling -counters. A "counting" counter is one that is used for counting the -number of events that occur, and is characterised by having -irq_period = 0. - - -A read() on a counter returns the current value of the counter and possible -additional values as specified by 'read_format', each value is a u64 (8 bytes) -in size. - -/* - * Bits that can be set in hw_event.read_format to request that - * reads on the counter should return the indicated quantities, - * in increasing order of bit value, after the counter value. - */ -enum perf_counter_read_format { - PERF_FORMAT_TOTAL_TIME_ENABLED = 1, - PERF_FORMAT_TOTAL_TIME_RUNNING = 2, -}; - -Using these additional values one can establish the overcommit ratio for a -particular counter allowing one to take the round-robin scheduling effect -into account. - - -A "sampling" counter is one that is set up to generate an interrupt -every N events, where N is given by 'irq_period'. A sampling counter -has irq_period > 0. The record_type controls what data is recorded on each -interrupt: - -/* - * Bits that can be set in hw_event.record_type to request information - * in the overflow packets. - */ -enum perf_counter_record_format { - PERF_RECORD_IP = 1U << 0, - PERF_RECORD_TID = 1U << 1, - PERF_RECORD_TIME = 1U << 2, - PERF_RECORD_ADDR = 1U << 3, - PERF_RECORD_GROUP = 1U << 4, - PERF_RECORD_CALLCHAIN = 1U << 5, -}; - -Such (and other) events will be recorded in a ring-buffer, which is -available to user-space using mmap() (see below). - -The 'disabled' bit specifies whether the counter starts out disabled -or enabled. If it is initially disabled, it can be enabled by ioctl -or prctl (see below). - -The 'inherit' bit, if set, specifies that this counter should count -events on descendant tasks as well as the task specified. This only -applies to new descendents, not to any existing descendents at the -time the counter is created (nor to any new descendents of existing -descendents). - -The 'pinned' bit, if set, specifies that the counter should always be -on the CPU if at all possible. It only applies to hardware counters -and only to group leaders. If a pinned counter cannot be put onto the -CPU (e.g. because there are not enough hardware counters or because of -a conflict with some other event), then the counter goes into an -'error' state, where reads return end-of-file (i.e. read() returns 0) -until the counter is subsequently enabled or disabled. - -The 'exclusive' bit, if set, specifies that when this counter's group -is on the CPU, it should be the only group using the CPU's counters. -In future, this will allow sophisticated monitoring programs to supply -extra configuration information via 'extra_config_len' to exploit -advanced features of the CPU's Performance Monitor Unit (PMU) that are -not otherwise accessible and that might disrupt other hardware -counters. - -The 'exclude_user', 'exclude_kernel' and 'exclude_hv' bits provide a -way to request that counting of events be restricted to times when the -CPU is in user, kernel and/or hypervisor mode. - -The 'mmap' and 'munmap' bits allow recording of PROT_EXEC mmap/munmap -operations, these can be used to relate userspace IP addresses to actual -code, even after the mapping (or even the whole process) is gone, -these events are recorded in the ring-buffer (see below). - -The 'comm' bit allows tracking of process comm data on process creation. -This too is recorded in the ring-buffer (see below). - -The 'pid' parameter to the perf_counter_open() system call allows the -counter to be specific to a task: - - pid == 0: if the pid parameter is zero, the counter is attached to the - current task. - - pid > 0: the counter is attached to a specific task (if the current task - has sufficient privilege to do so) - - pid < 0: all tasks are counted (per cpu counters) - -The 'cpu' parameter allows a counter to be made specific to a CPU: - - cpu >= 0: the counter is restricted to a specific CPU - cpu == -1: the counter counts on all CPUs - -(Note: the combination of 'pid == -1' and 'cpu == -1' is not valid.) - -A 'pid > 0' and 'cpu == -1' counter is a per task counter that counts -events of that task and 'follows' that task to whatever CPU the task -gets schedule to. Per task counters can be created by any user, for -their own tasks. - -A 'pid == -1' and 'cpu == x' counter is a per CPU counter that counts -all events on CPU-x. Per CPU counters need CAP_SYS_ADMIN privilege. - -The 'flags' parameter is currently unused and must be zero. - -The 'group_fd' parameter allows counter "groups" to be set up. A -counter group has one counter which is the group "leader". The leader -is created first, with group_fd = -1 in the perf_counter_open call -that creates it. The rest of the group members are created -subsequently, with group_fd giving the fd of the group leader. -(A single counter on its own is created with group_fd = -1 and is -considered to be a group with only 1 member.) - -A counter group is scheduled onto the CPU as a unit, that is, it will -only be put onto the CPU if all of the counters in the group can be -put onto the CPU. This means that the values of the member counters -can be meaningfully compared, added, divided (to get ratios), etc., -with each other, since they have counted events for the same set of -executed instructions. - - -Like stated, asynchronous events, like counter overflow or PROT_EXEC mmap -tracking are logged into a ring-buffer. This ring-buffer is created and -accessed through mmap(). - -The mmap size should be 1+2^n pages, where the first page is a meta-data page -(struct perf_counter_mmap_page) that contains various bits of information such -as where the ring-buffer head is. - -/* - * Structure of the page that can be mapped via mmap - */ -struct perf_counter_mmap_page { - __u32 version; /* version number of this structure */ - __u32 compat_version; /* lowest version this is compat with */ - - /* - * Bits needed to read the hw counters in user-space. - * - * u32 seq; - * s64 count; - * - * do { - * seq = pc->lock; - * - * barrier() - * if (pc->index) { - * count = pmc_read(pc->index - 1); - * count += pc->offset; - * } else - * goto regular_read; - * - * barrier(); - * } while (pc->lock != seq); - * - * NOTE: for obvious reason this only works on self-monitoring - * processes. - */ - __u32 lock; /* seqlock for synchronization */ - __u32 index; /* hardware counter identifier */ - __s64 offset; /* add to hardware counter value */ - - /* - * Control data for the mmap() data buffer. - * - * User-space reading this value should issue an rmb(), on SMP capable - * platforms, after reading this value -- see perf_counter_wakeup(). - */ - __u32 data_head; /* head in the data section */ -}; - -NOTE: the hw-counter userspace bits are arch specific and are currently only - implemented on powerpc. - -The following 2^n pages are the ring-buffer which contains events of the form: - -#define PERF_EVENT_MISC_KERNEL (1 << 0) -#define PERF_EVENT_MISC_USER (1 << 1) -#define PERF_EVENT_MISC_OVERFLOW (1 << 2) - -struct perf_event_header { - __u32 type; - __u16 misc; - __u16 size; -}; - -enum perf_event_type { - - /* - * The MMAP events record the PROT_EXEC mappings so that we can - * correlate userspace IPs to code. They have the following structure: - * - * struct { - * struct perf_event_header header; - * - * u32 pid, tid; - * u64 addr; - * u64 len; - * u64 pgoff; - * char filename[]; - * }; - */ - PERF_EVENT_MMAP = 1, - PERF_EVENT_MUNMAP = 2, - - /* - * struct { - * struct perf_event_header header; - * - * u32 pid, tid; - * char comm[]; - * }; - */ - PERF_EVENT_COMM = 3, - - /* - * When header.misc & PERF_EVENT_MISC_OVERFLOW the event_type field - * will be PERF_RECORD_* - * - * struct { - * struct perf_event_header header; - * - * { u64 ip; } && PERF_RECORD_IP - * { u32 pid, tid; } && PERF_RECORD_TID - * { u64 time; } && PERF_RECORD_TIME - * { u64 addr; } && PERF_RECORD_ADDR - * - * { u64 nr; - * { u64 event, val; } cnt[nr]; } && PERF_RECORD_GROUP - * - * { u16 nr, - * hv, - * kernel, - * user; - * u64 ips[nr]; } && PERF_RECORD_CALLCHAIN - * }; - */ -}; - -NOTE: PERF_RECORD_CALLCHAIN is arch specific and currently only implemented - on x86. - -Notification of new events is possible through poll()/select()/epoll() and -fcntl() managing signals. - -Normally a notification is generated for every page filled, however one can -additionally set perf_counter_hw_event.wakeup_events to generate one every -so many counter overflow events. - -Future work will include a splice() interface to the ring-buffer. - - -Counters can be enabled and disabled in two ways: via ioctl and via -prctl. When a counter is disabled, it doesn't count or generate -events but does continue to exist and maintain its count value. - -An individual counter or counter group can be enabled with - - ioctl(fd, PERF_COUNTER_IOC_ENABLE); - -or disabled with - - ioctl(fd, PERF_COUNTER_IOC_DISABLE); - -Enabling or disabling the leader of a group enables or disables the -whole group; that is, while the group leader is disabled, none of the -counters in the group will count. Enabling or disabling a member of a -group other than the leader only affects that counter - disabling an -non-leader stops that counter from counting but doesn't affect any -other counter. - -Additionally, non-inherited overflow counters can use - - ioctl(fd, PERF_COUNTER_IOC_REFRESH, nr); - -to enable a counter for 'nr' events, after which it gets disabled again. - -A process can enable or disable all the counter groups that are -attached to it, using prctl: - - prctl(PR_TASK_PERF_COUNTERS_ENABLE); - - prctl(PR_TASK_PERF_COUNTERS_DISABLE); - -This applies to all counters on the current process, whether created -by this process or by another, and doesn't affect any counters that -this process has created on other processes. It only enables or -disables the group leaders, not any other members in the groups. - diff --git a/Documentation/perf_counter/perf.c b/Documentation/perf_counter/perf.c deleted file mode 100644 index 4eb72593370..00000000000 --- a/Documentation/perf_counter/perf.c +++ /dev/null @@ -1,428 +0,0 @@ -/* - * perf.c - * - * Performance analysis utility. - * - * This is the main hub from which the sub-commands (perf stat, - * perf top, perf record, perf report, etc.) are started. - */ -#include "builtin.h" - -#include "util/exec_cmd.h" -#include "util/cache.h" -#include "util/quote.h" -#include "util/run-command.h" - -const char perf_usage_string[] = - "perf [--version] [--help] COMMAND [ARGS]"; - -const char perf_more_info_string[] = - "See 'perf help COMMAND' for more information on a specific command."; - -static int use_pager = -1; -struct pager_config { - const char *cmd; - int val; -}; - -static int pager_command_config(const char *var, const char *value, void *data) -{ - struct pager_config *c = data; - if (!prefixcmp(var, "pager.") && !strcmp(var + 6, c->cmd)) - c->val = perf_config_bool(var, value); - return 0; -} - -/* returns 0 for "no pager", 1 for "use pager", and -1 for "not specified" */ -int check_pager_config(const char *cmd) -{ - struct pager_config c; - c.cmd = cmd; - c.val = -1; - perf_config(pager_command_config, &c); - return c.val; -} - -static void commit_pager_choice(void) { - switch (use_pager) { - case 0: - setenv("PERF_PAGER", "cat", 1); - break; - case 1: - /* setup_pager(); */ - break; - default: - break; - } -} - -static int handle_options(const char*** argv, int* argc, int* envchanged) -{ - int handled = 0; - - while (*argc > 0) { - const char *cmd = (*argv)[0]; - if (cmd[0] != '-') - break; - - /* - * For legacy reasons, the "version" and "help" - * commands can be written with "--" prepended - * to make them look like flags. - */ - if (!strcmp(cmd, "--help") || !strcmp(cmd, "--version")) - break; - - /* - * Check remaining flags. - */ - if (!prefixcmp(cmd, "--exec-path")) { - cmd += 11; - if (*cmd == '=') - perf_set_argv_exec_path(cmd + 1); - else { - puts(perf_exec_path()); - exit(0); - } - } else if (!strcmp(cmd, "--html-path")) { - puts(system_path(PERF_HTML_PATH)); - exit(0); - } else if (!strcmp(cmd, "-p") || !strcmp(cmd, "--paginate")) { - use_pager = 1; - } else if (!strcmp(cmd, "--no-pager")) { - use_pager = 0; - if (envchanged) - *envchanged = 1; - } else if (!strcmp(cmd, "--perf-dir")) { - if (*argc < 2) { - fprintf(stderr, "No directory given for --perf-dir.\n" ); - usage(perf_usage_string); - } - setenv(PERF_DIR_ENVIRONMENT, (*argv)[1], 1); - if (envchanged) - *envchanged = 1; - (*argv)++; - (*argc)--; - handled++; - } else if (!prefixcmp(cmd, "--perf-dir=")) { - setenv(PERF_DIR_ENVIRONMENT, cmd + 10, 1); - if (envchanged) - *envchanged = 1; - } else if (!strcmp(cmd, "--work-tree")) { - if (*argc < 2) { - fprintf(stderr, "No directory given for --work-tree.\n" ); - usage(perf_usage_string); - } - setenv(PERF_WORK_TREE_ENVIRONMENT, (*argv)[1], 1); - if (envchanged) - *envchanged = 1; - (*argv)++; - (*argc)--; - } else if (!prefixcmp(cmd, "--work-tree=")) { - setenv(PERF_WORK_TREE_ENVIRONMENT, cmd + 12, 1); - if (envchanged) - *envchanged = 1; - } else { - fprintf(stderr, "Unknown option: %s\n", cmd); - usage(perf_usage_string); - } - - (*argv)++; - (*argc)--; - handled++; - } - return handled; -} - -static int handle_alias(int *argcp, const char ***argv) -{ - int envchanged = 0, ret = 0, saved_errno = errno; - int count, option_count; - const char** new_argv; - const char *alias_command; - char *alias_string; - - alias_command = (*argv)[0]; - alias_string = alias_lookup(alias_command); - if (alias_string) { - if (alias_string[0] == '!') { - if (*argcp > 1) { - struct strbuf buf; - - strbuf_init(&buf, PATH_MAX); - strbuf_addstr(&buf, alias_string); - sq_quote_argv(&buf, (*argv) + 1, PATH_MAX); - free(alias_string); - alias_string = buf.buf; - } - ret = system(alias_string + 1); - if (ret >= 0 && WIFEXITED(ret) && - WEXITSTATUS(ret) != 127) - exit(WEXITSTATUS(ret)); - die("Failed to run '%s' when expanding alias '%s'", - alias_string + 1, alias_command); - } - count = split_cmdline(alias_string, &new_argv); - if (count < 0) - die("Bad alias.%s string", alias_command); - option_count = handle_options(&new_argv, &count, &envchanged); - if (envchanged) - die("alias '%s' changes environment variables\n" - "You can use '!perf' in the alias to do this.", - alias_command); - memmove(new_argv - option_count, new_argv, - count * sizeof(char *)); - new_argv -= option_count; - - if (count < 1) - die("empty alias for %s", alias_command); - - if (!strcmp(alias_command, new_argv[0])) - die("recursive alias: %s", alias_command); - - new_argv = realloc(new_argv, sizeof(char*) * - (count + *argcp + 1)); - /* insert after command name */ - memcpy(new_argv + count, *argv + 1, sizeof(char*) * *argcp); - new_argv[count+*argcp] = NULL; - - *argv = new_argv; - *argcp += count - 1; - - ret = 1; - } - - errno = saved_errno; - - return ret; -} - -const char perf_version_string[] = PERF_VERSION; - -#define RUN_SETUP (1<<0) -#define USE_PAGER (1<<1) -/* - * require working tree to be present -- anything uses this needs - * RUN_SETUP for reading from the configuration file. - */ -#define NEED_WORK_TREE (1<<2) - -struct cmd_struct { - const char *cmd; - int (*fn)(int, const char **, const char *); - int option; -}; - -static int run_builtin(struct cmd_struct *p, int argc, const char **argv) -{ - int status; - struct stat st; - const char *prefix; - - prefix = NULL; - if (p->option & RUN_SETUP) - prefix = NULL; /* setup_perf_directory(); */ - - if (use_pager == -1 && p->option & RUN_SETUP) - use_pager = check_pager_config(p->cmd); - if (use_pager == -1 && p->option & USE_PAGER) - use_pager = 1; - commit_pager_choice(); - - if (p->option & NEED_WORK_TREE) - /* setup_work_tree() */; - - status = p->fn(argc, argv, prefix); - if (status) - return status & 0xff; - - /* Somebody closed stdout? */ - if (fstat(fileno(stdout), &st)) - return 0; - /* Ignore write errors for pipes and sockets.. */ - if (S_ISFIFO(st.st_mode) || S_ISSOCK(st.st_mode)) - return 0; - - /* Check for ENOSPC and EIO errors.. */ - if (fflush(stdout)) - die("write failure on standard output: %s", strerror(errno)); - if (ferror(stdout)) - die("unknown write failure on standard output"); - if (fclose(stdout)) - die("close failed on standard output: %s", strerror(errno)); - return 0; -} - -static void handle_internal_command(int argc, const char **argv) -{ - const char *cmd = argv[0]; - static struct cmd_struct commands[] = { - { "help", cmd_help, 0 }, - { "list", cmd_list, 0 }, - { "record", cmd_record, 0 }, - { "report", cmd_report, 0 }, - { "stat", cmd_stat, 0 }, - { "top", cmd_top, 0 }, - { "annotate", cmd_annotate, 0 }, - { "version", cmd_version, 0 }, - }; - int i; - static const char ext[] = STRIP_EXTENSION; - - if (sizeof(ext) > 1) { - i = strlen(argv[0]) - strlen(ext); - if (i > 0 && !strcmp(argv[0] + i, ext)) { - char *argv0 = strdup(argv[0]); - argv[0] = cmd = argv0; - argv0[i] = '\0'; - } - } - - /* Turn "perf cmd --help" into "perf help cmd" */ - if (argc > 1 && !strcmp(argv[1], "--help")) { - argv[1] = argv[0]; - argv[0] = cmd = "help"; - } - - for (i = 0; i < ARRAY_SIZE(commands); i++) { - struct cmd_struct *p = commands+i; - if (strcmp(p->cmd, cmd)) - continue; - exit(run_builtin(p, argc, argv)); - } -} - -static void execv_dashed_external(const char **argv) -{ - struct strbuf cmd = STRBUF_INIT; - const char *tmp; - int status; - - strbuf_addf(&cmd, "perf-%s", argv[0]); - - /* - * argv[0] must be the perf command, but the argv array - * belongs to the caller, and may be reused in - * subsequent loop iterations. Save argv[0] and - * restore it on error. - */ - tmp = argv[0]; - argv[0] = cmd.buf; - - /* - * if we fail because the command is not found, it is - * OK to return. Otherwise, we just pass along the status code. - */ - status = run_command_v_opt(argv, 0); - if (status != -ERR_RUN_COMMAND_EXEC) { - if (IS_RUN_COMMAND_ERR(status)) - die("unable to run '%s'", argv[0]); - exit(-status); - } - errno = ENOENT; /* as if we called execvp */ - - argv[0] = tmp; - - strbuf_release(&cmd); -} - -static int run_argv(int *argcp, const char ***argv) -{ - int done_alias = 0; - - while (1) { - /* See if it's an internal command */ - handle_internal_command(*argcp, *argv); - - /* .. then try the external ones */ - execv_dashed_external(*argv); - - /* It could be an alias -- this works around the insanity - * of overriding "perf log" with "perf show" by having - * alias.log = show - */ - if (done_alias || !handle_alias(argcp, argv)) - break; - done_alias = 1; - } - - return done_alias; -} - - -int main(int argc, const char **argv) -{ - const char *cmd; - - cmd = perf_extract_argv0_path(argv[0]); - if (!cmd) - cmd = "perf-help"; - - /* - * "perf-xxxx" is the same as "perf xxxx", but we obviously: - * - * - cannot take flags in between the "perf" and the "xxxx". - * - cannot execute it externally (since it would just do - * the same thing over again) - * - * So we just directly call the internal command handler, and - * die if that one cannot handle it. - */ - if (!prefixcmp(cmd, "perf-")) { - cmd += 5; - argv[0] = cmd; - handle_internal_command(argc, argv); - die("cannot handle %s internally", cmd); - } - - /* Look for flags.. */ - argv++; - argc--; - handle_options(&argv, &argc, NULL); - commit_pager_choice(); - if (argc > 0) { - if (!prefixcmp(argv[0], "--")) - argv[0] += 2; - } else { - /* The user didn't specify a command; give them help */ - printf("\n usage: %s\n\n", perf_usage_string); - list_common_cmds_help(); - printf("\n %s\n\n", perf_more_info_string); - exit(1); - } - cmd = argv[0]; - - /* - * We use PATH to find perf commands, but we prepend some higher - * precidence paths: the "--exec-path" option, the PERF_EXEC_PATH - * environment, and the $(perfexecdir) from the Makefile at build - * time. - */ - setup_path(); - - while (1) { - static int done_help = 0; - static int was_alias = 0; - - was_alias = run_argv(&argc, &argv); - if (errno != ENOENT) - break; - - if (was_alias) { - fprintf(stderr, "Expansion of alias '%s' failed; " - "'%s' is not a perf-command\n", - cmd, argv[0]); - exit(1); - } - if (!done_help) { - cmd = argv[0] = help_unknown_cmd(cmd); - done_help = 1; - } else - break; - } - - fprintf(stderr, "Failed to run command '%s': %s\n", - cmd, strerror(errno)); - - return 1; -} diff --git a/Documentation/perf_counter/perf.h b/Documentation/perf_counter/perf.h deleted file mode 100644 index af0a5046d74..00000000000 --- a/Documentation/perf_counter/perf.h +++ /dev/null @@ -1,67 +0,0 @@ -#ifndef _PERF_PERF_H -#define _PERF_PERF_H - -#if defined(__x86_64__) || defined(__i386__) -#include "../../arch/x86/include/asm/unistd.h" -#define rmb() asm volatile("lfence" ::: "memory") -#define cpu_relax() asm volatile("rep; nop" ::: "memory"); -#endif - -#ifdef __powerpc__ -#include "../../arch/powerpc/include/asm/unistd.h" -#define rmb() asm volatile ("sync" ::: "memory") -#define cpu_relax() asm volatile ("" ::: "memory"); -#endif - -#include -#include -#include -#include - -#include "../../include/linux/perf_counter.h" - -/* - * prctl(PR_TASK_PERF_COUNTERS_DISABLE) will (cheaply) disable all - * counters in the current task. - */ -#define PR_TASK_PERF_COUNTERS_DISABLE 31 -#define PR_TASK_PERF_COUNTERS_ENABLE 32 - -#ifndef NSEC_PER_SEC -# define NSEC_PER_SEC 1000000000ULL -#endif - -static inline unsigned long long rdclock(void) -{ - struct timespec ts; - - clock_gettime(CLOCK_MONOTONIC, &ts); - return ts.tv_sec * 1000000000ULL + ts.tv_nsec; -} - -/* - * Pick up some kernel type conventions: - */ -#define __user -#define asmlinkage - -#define unlikely(x) __builtin_expect(!!(x), 0) -#define min(x, y) ({ \ - typeof(x) _min1 = (x); \ - typeof(y) _min2 = (y); \ - (void) (&_min1 == &_min2); \ - _min1 < _min2 ? _min1 : _min2; }) - -static inline int -sys_perf_counter_open(struct perf_counter_attr *attr_uptr, - pid_t pid, int cpu, int group_fd, - unsigned long flags) -{ - return syscall(__NR_perf_counter_open, attr_uptr, pid, cpu, - group_fd, flags); -} - -#define MAX_COUNTERS 256 -#define MAX_NR_CPUS 256 - -#endif diff --git a/Documentation/perf_counter/util/PERF-VERSION-GEN b/Documentation/perf_counter/util/PERF-VERSION-GEN deleted file mode 100755 index c561d1538c0..00000000000 --- a/Documentation/perf_counter/util/PERF-VERSION-GEN +++ /dev/null @@ -1,42 +0,0 @@ -#!/bin/sh - -GVF=PERF-VERSION-FILE -DEF_VER=v0.0.1.PERF - -LF=' -' - -# First see if there is a version file (included in release tarballs), -# then try git-describe, then default. -if test -f version -then - VN=$(cat version) || VN="$DEF_VER" -elif test -d .git -o -f .git && - VN=$(git describe --abbrev=4 HEAD 2>/dev/null) && - case "$VN" in - *$LF*) (exit 1) ;; - v[0-9]*) - git update-index -q --refresh - test -z "$(git diff-index --name-only HEAD --)" || - VN="$VN-dirty" ;; - esac -then - VN=$(echo "$VN" | sed -e 's/-/./g'); -else - VN="$DEF_VER" -fi - -VN=$(expr "$VN" : v*'\(.*\)') - -if test -r $GVF -then - VC=$(sed -e 's/^PERF_VERSION = //' <$GVF) -else - VC=unset -fi -test "$VN" = "$VC" || { - echo >&2 "PERF_VERSION = $VN" - echo "PERF_VERSION = $VN" >$GVF -} - - diff --git a/Documentation/perf_counter/util/abspath.c b/Documentation/perf_counter/util/abspath.c deleted file mode 100644 index 61d33b81fc9..00000000000 --- a/Documentation/perf_counter/util/abspath.c +++ /dev/null @@ -1,117 +0,0 @@ -#include "cache.h" - -/* - * Do not use this for inspecting *tracked* content. When path is a - * symlink to a directory, we do not want to say it is a directory when - * dealing with tracked content in the working tree. - */ -static int is_directory(const char *path) -{ - struct stat st; - return (!stat(path, &st) && S_ISDIR(st.st_mode)); -} - -/* We allow "recursive" symbolic links. Only within reason, though. */ -#define MAXDEPTH 5 - -const char *make_absolute_path(const char *path) -{ - static char bufs[2][PATH_MAX + 1], *buf = bufs[0], *next_buf = bufs[1]; - char cwd[1024] = ""; - int buf_index = 1, len; - - int depth = MAXDEPTH; - char *last_elem = NULL; - struct stat st; - - if (strlcpy(buf, path, PATH_MAX) >= PATH_MAX) - die ("Too long path: %.*s", 60, path); - - while (depth--) { - if (!is_directory(buf)) { - char *last_slash = strrchr(buf, '/'); - if (last_slash) { - *last_slash = '\0'; - last_elem = xstrdup(last_slash + 1); - } else { - last_elem = xstrdup(buf); - *buf = '\0'; - } - } - - if (*buf) { - if (!*cwd && !getcwd(cwd, sizeof(cwd))) - die ("Could not get current working directory"); - - if (chdir(buf)) - die ("Could not switch to '%s'", buf); - } - if (!getcwd(buf, PATH_MAX)) - die ("Could not get current working directory"); - - if (last_elem) { - int len = strlen(buf); - if (len + strlen(last_elem) + 2 > PATH_MAX) - die ("Too long path name: '%s/%s'", - buf, last_elem); - buf[len] = '/'; - strcpy(buf + len + 1, last_elem); - free(last_elem); - last_elem = NULL; - } - - if (!lstat(buf, &st) && S_ISLNK(st.st_mode)) { - len = readlink(buf, next_buf, PATH_MAX); - if (len < 0) - die ("Invalid symlink: %s", buf); - if (PATH_MAX <= len) - die("symbolic link too long: %s", buf); - next_buf[len] = '\0'; - buf = next_buf; - buf_index = 1 - buf_index; - next_buf = bufs[buf_index]; - } else - break; - } - - if (*cwd && chdir(cwd)) - die ("Could not change back to '%s'", cwd); - - return buf; -} - -static const char *get_pwd_cwd(void) -{ - static char cwd[PATH_MAX + 1]; - char *pwd; - struct stat cwd_stat, pwd_stat; - if (getcwd(cwd, PATH_MAX) == NULL) - return NULL; - pwd = getenv("PWD"); - if (pwd && strcmp(pwd, cwd)) { - stat(cwd, &cwd_stat); - if (!stat(pwd, &pwd_stat) && - pwd_stat.st_dev == cwd_stat.st_dev && - pwd_stat.st_ino == cwd_stat.st_ino) { - strlcpy(cwd, pwd, PATH_MAX); - } - } - return cwd; -} - -const char *make_nonrelative_path(const char *path) -{ - static char buf[PATH_MAX + 1]; - - if (is_absolute_path(path)) { - if (strlcpy(buf, path, PATH_MAX) >= PATH_MAX) - die("Too long path: %.*s", 60, path); - } else { - const char *cwd = get_pwd_cwd(); - if (!cwd) - die("Cannot determine the current working directory"); - if (snprintf(buf, PATH_MAX, "%s/%s", cwd, path) >= PATH_MAX) - die("Too long path: %.*s", 60, path); - } - return buf; -} diff --git a/Documentation/perf_counter/util/alias.c b/Documentation/perf_counter/util/alias.c deleted file mode 100644 index 9b3dd2b428d..00000000000 --- a/Documentation/perf_counter/util/alias.c +++ /dev/null @@ -1,77 +0,0 @@ -#include "cache.h" - -static const char *alias_key; -static char *alias_val; - -static int alias_lookup_cb(const char *k, const char *v, void *cb) -{ - if (!prefixcmp(k, "alias.") && !strcmp(k+6, alias_key)) { - if (!v) - return config_error_nonbool(k); - alias_val = strdup(v); - return 0; - } - return 0; -} - -char *alias_lookup(const char *alias) -{ - alias_key = alias; - alias_val = NULL; - perf_config(alias_lookup_cb, NULL); - return alias_val; -} - -int split_cmdline(char *cmdline, const char ***argv) -{ - int src, dst, count = 0, size = 16; - char quoted = 0; - - *argv = malloc(sizeof(char*) * size); - - /* split alias_string */ - (*argv)[count++] = cmdline; - for (src = dst = 0; cmdline[src];) { - char c = cmdline[src]; - if (!quoted && isspace(c)) { - cmdline[dst++] = 0; - while (cmdline[++src] - && isspace(cmdline[src])) - ; /* skip */ - if (count >= size) { - size += 16; - *argv = realloc(*argv, sizeof(char*) * size); - } - (*argv)[count++] = cmdline + dst; - } else if (!quoted && (c == '\'' || c == '"')) { - quoted = c; - src++; - } else if (c == quoted) { - quoted = 0; - src++; - } else { - if (c == '\\' && quoted != '\'') { - src++; - c = cmdline[src]; - if (!c) { - free(*argv); - *argv = NULL; - return error("cmdline ends with \\"); - } - } - cmdline[dst++] = c; - src++; - } - } - - cmdline[dst] = 0; - - if (quoted) { - free(*argv); - *argv = NULL; - return error("unclosed quote"); - } - - return count; -} - diff --git a/Documentation/perf_counter/util/cache.h b/Documentation/perf_counter/util/cache.h deleted file mode 100644 index 393d6146d13..00000000000 --- a/Documentation/perf_counter/util/cache.h +++ /dev/null @@ -1,119 +0,0 @@ -#ifndef CACHE_H -#define CACHE_H - -#include "util.h" -#include "strbuf.h" - -#define PERF_DIR_ENVIRONMENT "PERF_DIR" -#define PERF_WORK_TREE_ENVIRONMENT "PERF_WORK_TREE" -#define DEFAULT_PERF_DIR_ENVIRONMENT ".perf" -#define DB_ENVIRONMENT "PERF_OBJECT_DIRECTORY" -#define INDEX_ENVIRONMENT "PERF_INDEX_FILE" -#define GRAFT_ENVIRONMENT "PERF_GRAFT_FILE" -#define TEMPLATE_DIR_ENVIRONMENT "PERF_TEMPLATE_DIR" -#define CONFIG_ENVIRONMENT "PERF_CONFIG" -#define EXEC_PATH_ENVIRONMENT "PERF_EXEC_PATH" -#define CEILING_DIRECTORIES_ENVIRONMENT "PERF_CEILING_DIRECTORIES" -#define PERFATTRIBUTES_FILE ".perfattributes" -#define INFOATTRIBUTES_FILE "info/attributes" -#define ATTRIBUTE_MACRO_PREFIX "[attr]" - -typedef int (*config_fn_t)(const char *, const char *, void *); -extern int perf_default_config(const char *, const char *, void *); -extern int perf_config_from_file(config_fn_t fn, const char *, void *); -extern int perf_config(config_fn_t fn, void *); -extern int perf_parse_ulong(const char *, unsigned long *); -extern int perf_config_int(const char *, const char *); -extern unsigned long perf_config_ulong(const char *, const char *); -extern int perf_config_bool_or_int(const char *, const char *, int *); -extern int perf_config_bool(const char *, const char *); -extern int perf_config_string(const char **, const char *, const char *); -extern int perf_config_set(const char *, const char *); -extern int perf_config_set_multivar(const char *, const char *, const char *, int); -extern int perf_config_rename_section(const char *, const char *); -extern const char *perf_etc_perfconfig(void); -extern int check_repository_format_version(const char *var, const char *value, void *cb); -extern int perf_config_system(void); -extern int perf_config_global(void); -extern int config_error_nonbool(const char *); -extern const char *config_exclusive_filename; - -#define MAX_PERFNAME (1000) -extern char perf_default_email[MAX_PERFNAME]; -extern char perf_default_name[MAX_PERFNAME]; -extern int user_ident_explicitly_given; - -extern const char *perf_log_output_encoding; -extern const char *perf_mailmap_file; - -/* IO helper functions */ -extern void maybe_flush_or_die(FILE *, const char *); -extern int copy_fd(int ifd, int ofd); -extern int copy_file(const char *dst, const char *src, int mode); -extern ssize_t read_in_full(int fd, void *buf, size_t count); -extern ssize_t write_in_full(int fd, const void *buf, size_t count); -extern void write_or_die(int fd, const void *buf, size_t count); -extern int write_or_whine(int fd, const void *buf, size_t count, const char *msg); -extern int write_or_whine_pipe(int fd, const void *buf, size_t count, const char *msg); -extern void fsync_or_die(int fd, const char *); - -/* pager.c */ -extern void setup_pager(void); -extern const char *pager_program; -extern int pager_in_use(void); -extern int pager_use_color; - -extern const char *editor_program; -extern const char *excludes_file; - -char *alias_lookup(const char *alias); -int split_cmdline(char *cmdline, const char ***argv); - -#define alloc_nr(x) (((x)+16)*3/2) - -/* - * Realloc the buffer pointed at by variable 'x' so that it can hold - * at least 'nr' entries; the number of entries currently allocated - * is 'alloc', using the standard growing factor alloc_nr() macro. - * - * DO NOT USE any expression with side-effect for 'x' or 'alloc'. - */ -#define ALLOC_GROW(x, nr, alloc) \ - do { \ - if ((nr) > alloc) { \ - if (alloc_nr(alloc) < (nr)) \ - alloc = (nr); \ - else \ - alloc = alloc_nr(alloc); \ - x = xrealloc((x), alloc * sizeof(*(x))); \ - } \ - } while(0) - - -static inline int is_absolute_path(const char *path) -{ - return path[0] == '/'; -} - -const char *make_absolute_path(const char *path); -const char *make_nonrelative_path(const char *path); -const char *make_relative_path(const char *abs, const char *base); -int normalize_path_copy(char *dst, const char *src); -int longest_ancestor_length(const char *path, const char *prefix_list); -char *strip_path_suffix(const char *path, const char *suffix); - -extern char *mkpath(const char *fmt, ...) __attribute__((format (printf, 1, 2))); -extern char *perf_path(const char *fmt, ...) __attribute__((format (printf, 1, 2))); -/* perf_mkstemp() - create tmp file honoring TMPDIR variable */ -extern int perf_mkstemp(char *path, size_t len, const char *template); - -extern char *mksnpath(char *buf, size_t n, const char *fmt, ...) - __attribute__((format (printf, 3, 4))); -extern char *perf_snpath(char *buf, size_t n, const char *fmt, ...) - __attribute__((format (printf, 3, 4))); -extern char *perf_pathdup(const char *fmt, ...) - __attribute__((format (printf, 1, 2))); - -extern size_t strlcpy(char *dest, const char *src, size_t size); - -#endif /* CACHE_H */ diff --git a/Documentation/perf_counter/util/color.c b/Documentation/perf_counter/util/color.c deleted file mode 100644 index 9a8c20ccc53..00000000000 --- a/Documentation/perf_counter/util/color.c +++ /dev/null @@ -1,241 +0,0 @@ -#include "cache.h" -#include "color.h" - -int perf_use_color_default = -1; - -static int parse_color(const char *name, int len) -{ - static const char * const color_names[] = { - "normal", "black", "red", "green", "yellow", - "blue", "magenta", "cyan", "white" - }; - char *end; - int i; - for (i = 0; i < ARRAY_SIZE(color_names); i++) { - const char *str = color_names[i]; - if (!strncasecmp(name, str, len) && !str[len]) - return i - 1; - } - i = strtol(name, &end, 10); - if (end - name == len && i >= -1 && i <= 255) - return i; - return -2; -} - -static int parse_attr(const char *name, int len) -{ - static const int attr_values[] = { 1, 2, 4, 5, 7 }; - static const char * const attr_names[] = { - "bold", "dim", "ul", "blink", "reverse" - }; - int i; - for (i = 0; i < ARRAY_SIZE(attr_names); i++) { - const char *str = attr_names[i]; - if (!strncasecmp(name, str, len) && !str[len]) - return attr_values[i]; - } - return -1; -} - -void color_parse(const char *value, const char *var, char *dst) -{ - color_parse_mem(value, strlen(value), var, dst); -} - -void color_parse_mem(const char *value, int value_len, const char *var, - char *dst) -{ - const char *ptr = value; - int len = value_len; - int attr = -1; - int fg = -2; - int bg = -2; - - if (!strncasecmp(value, "reset", len)) { - strcpy(dst, PERF_COLOR_RESET); - return; - } - - /* [fg [bg]] [attr] */ - while (len > 0) { - const char *word = ptr; - int val, wordlen = 0; - - while (len > 0 && !isspace(word[wordlen])) { - wordlen++; - len--; - } - - ptr = word + wordlen; - while (len > 0 && isspace(*ptr)) { - ptr++; - len--; - } - - val = parse_color(word, wordlen); - if (val >= -1) { - if (fg == -2) { - fg = val; - continue; - } - if (bg == -2) { - bg = val; - continue; - } - goto bad; - } - val = parse_attr(word, wordlen); - if (val < 0 || attr != -1) - goto bad; - attr = val; - } - - if (attr >= 0 || fg >= 0 || bg >= 0) { - int sep = 0; - - *dst++ = '\033'; - *dst++ = '['; - if (attr >= 0) { - *dst++ = '0' + attr; - sep++; - } - if (fg >= 0) { - if (sep++) - *dst++ = ';'; - if (fg < 8) { - *dst++ = '3'; - *dst++ = '0' + fg; - } else { - dst += sprintf(dst, "38;5;%d", fg); - } - } - if (bg >= 0) { - if (sep++) - *dst++ = ';'; - if (bg < 8) { - *dst++ = '4'; - *dst++ = '0' + bg; - } else { - dst += sprintf(dst, "48;5;%d", bg); - } - } - *dst++ = 'm'; - } - *dst = 0; - return; -bad: - die("bad color value '%.*s' for variable '%s'", value_len, value, var); -} - -int perf_config_colorbool(const char *var, const char *value, int stdout_is_tty) -{ - if (value) { - if (!strcasecmp(value, "never")) - return 0; - if (!strcasecmp(value, "always")) - return 1; - if (!strcasecmp(value, "auto")) - goto auto_color; - } - - /* Missing or explicit false to turn off colorization */ - if (!perf_config_bool(var, value)) - return 0; - - /* any normal truth value defaults to 'auto' */ - auto_color: - if (stdout_is_tty < 0) - stdout_is_tty = isatty(1); - if (stdout_is_tty || (pager_in_use() && pager_use_color)) { - char *term = getenv("TERM"); - if (term && strcmp(term, "dumb")) - return 1; - } - return 0; -} - -int perf_color_default_config(const char *var, const char *value, void *cb) -{ - if (!strcmp(var, "color.ui")) { - perf_use_color_default = perf_config_colorbool(var, value, -1); - return 0; - } - - return perf_default_config(var, value, cb); -} - -static int color_vfprintf(FILE *fp, const char *color, const char *fmt, - va_list args, const char *trail) -{ - int r = 0; - - /* - * Auto-detect: - */ - if (perf_use_color_default < 0) { - if (isatty(1) || pager_in_use()) - perf_use_color_default = 1; - else - perf_use_color_default = 0; - } - - if (perf_use_color_default && *color) - r += fprintf(fp, "%s", color); - r += vfprintf(fp, fmt, args); - if (perf_use_color_default && *color) - r += fprintf(fp, "%s", PERF_COLOR_RESET); - if (trail) - r += fprintf(fp, "%s", trail); - return r; -} - - - -int color_fprintf(FILE *fp, const char *color, const char *fmt, ...) -{ - va_list args; - int r; - - va_start(args, fmt); - r = color_vfprintf(fp, color, fmt, args, NULL); - va_end(args); - return r; -} - -int color_fprintf_ln(FILE *fp, const char *color, const char *fmt, ...) -{ - va_list args; - int r; - va_start(args, fmt); - r = color_vfprintf(fp, color, fmt, args, "\n"); - va_end(args); - return r; -} - -/* - * This function splits the buffer by newlines and colors the lines individually. - * - * Returns 0 on success. - */ -int color_fwrite_lines(FILE *fp, const char *color, - size_t count, const char *buf) -{ - if (!*color) - return fwrite(buf, count, 1, fp) != 1; - while (count) { - char *p = memchr(buf, '\n', count); - if (p != buf && (fputs(color, fp) < 0 || - fwrite(buf, p ? p - buf : count, 1, fp) != 1 || - fputs(PERF_COLOR_RESET, fp) < 0)) - return -1; - if (!p) - return 0; - if (fputc('\n', fp) < 0) - return -1; - count -= p + 1 - buf; - buf = p + 1; - } - return 0; -} - - diff --git a/Documentation/perf_counter/util/color.h b/Documentation/perf_counter/util/color.h deleted file mode 100644 index 5abfd379582..00000000000 --- a/Documentation/perf_counter/util/color.h +++ /dev/null @@ -1,36 +0,0 @@ -#ifndef COLOR_H -#define COLOR_H - -/* "\033[1;38;5;2xx;48;5;2xxm\0" is 23 bytes */ -#define COLOR_MAXLEN 24 - -#define PERF_COLOR_NORMAL "" -#define PERF_COLOR_RESET "\033[m" -#define PERF_COLOR_BOLD "\033[1m" -#define PERF_COLOR_RED "\033[31m" -#define PERF_COLOR_GREEN "\033[32m" -#define PERF_COLOR_YELLOW "\033[33m" -#define PERF_COLOR_BLUE "\033[34m" -#define PERF_COLOR_MAGENTA "\033[35m" -#define PERF_COLOR_CYAN "\033[36m" -#define PERF_COLOR_BG_RED "\033[41m" - -/* - * This variable stores the value of color.ui - */ -extern int perf_use_color_default; - - -/* - * Use this instead of perf_default_config if you need the value of color.ui. - */ -int perf_color_default_config(const char *var, const char *value, void *cb); - -int perf_config_colorbool(const char *var, const char *value, int stdout_is_tty); -void color_parse(const char *value, const char *var, char *dst); -void color_parse_mem(const char *value, int len, const char *var, char *dst); -int color_fprintf(FILE *fp, const char *color, const char *fmt, ...); -int color_fprintf_ln(FILE *fp, const char *color, const char *fmt, ...); -int color_fwrite_lines(FILE *fp, const char *color, size_t count, const char *buf); - -#endif /* COLOR_H */ diff --git a/Documentation/perf_counter/util/config.c b/Documentation/perf_counter/util/config.c deleted file mode 100644 index 3dd13faa6a2..00000000000 --- a/Documentation/perf_counter/util/config.c +++ /dev/null @@ -1,873 +0,0 @@ -/* - * GIT - The information manager from hell - * - * Copyright (C) Linus Torvalds, 2005 - * Copyright (C) Johannes Schindelin, 2005 - * - */ -#include "util.h" -#include "cache.h" -#include "exec_cmd.h" - -#define MAXNAME (256) - -static FILE *config_file; -static const char *config_file_name; -static int config_linenr; -static int config_file_eof; - -const char *config_exclusive_filename = NULL; - -static int get_next_char(void) -{ - int c; - FILE *f; - - c = '\n'; - if ((f = config_file) != NULL) { - c = fgetc(f); - if (c == '\r') { - /* DOS like systems */ - c = fgetc(f); - if (c != '\n') { - ungetc(c, f); - c = '\r'; - } - } - if (c == '\n') - config_linenr++; - if (c == EOF) { - config_file_eof = 1; - c = '\n'; - } - } - return c; -} - -static char *parse_value(void) -{ - static char value[1024]; - int quote = 0, comment = 0, len = 0, space = 0; - - for (;;) { - int c = get_next_char(); - if (len >= sizeof(value) - 1) - return NULL; - if (c == '\n') { - if (quote) - return NULL; - value[len] = 0; - return value; - } - if (comment) - continue; - if (isspace(c) && !quote) { - space = 1; - continue; - } - if (!quote) { - if (c == ';' || c == '#') { - comment = 1; - continue; - } - } - if (space) { - if (len) - value[len++] = ' '; - space = 0; - } - if (c == '\\') { - c = get_next_char(); - switch (c) { - case '\n': - continue; - case 't': - c = '\t'; - break; - case 'b': - c = '\b'; - break; - case 'n': - c = '\n'; - break; - /* Some characters escape as themselves */ - case '\\': case '"': - break; - /* Reject unknown escape sequences */ - default: - return NULL; - } - value[len++] = c; - continue; - } - if (c == '"') { - quote = 1-quote; - continue; - } - value[len++] = c; - } -} - -static inline int iskeychar(int c) -{ - return isalnum(c) || c == '-'; -} - -static int get_value(config_fn_t fn, void *data, char *name, unsigned int len) -{ - int c; - char *value; - - /* Get the full name */ - for (;;) { - c = get_next_char(); - if (config_file_eof) - break; - if (!iskeychar(c)) - break; - name[len++] = tolower(c); - if (len >= MAXNAME) - return -1; - } - name[len] = 0; - while (c == ' ' || c == '\t') - c = get_next_char(); - - value = NULL; - if (c != '\n') { - if (c != '=') - return -1; - value = parse_value(); - if (!value) - return -1; - } - return fn(name, value, data); -} - -static int get_extended_base_var(char *name, int baselen, int c) -{ - do { - if (c == '\n') - return -1; - c = get_next_char(); - } while (isspace(c)); - - /* We require the format to be '[base "extension"]' */ - if (c != '"') - return -1; - name[baselen++] = '.'; - - for (;;) { - int c = get_next_char(); - if (c == '\n') - return -1; - if (c == '"') - break; - if (c == '\\') { - c = get_next_char(); - if (c == '\n') - return -1; - } - name[baselen++] = c; - if (baselen > MAXNAME / 2) - return -1; - } - - /* Final ']' */ - if (get_next_char() != ']') - return -1; - return baselen; -} - -static int get_base_var(char *name) -{ - int baselen = 0; - - for (;;) { - int c = get_next_char(); - if (config_file_eof) - return -1; - if (c == ']') - return baselen; - if (isspace(c)) - return get_extended_base_var(name, baselen, c); - if (!iskeychar(c) && c != '.') - return -1; - if (baselen > MAXNAME / 2) - return -1; - name[baselen++] = tolower(c); - } -} - -static int perf_parse_file(config_fn_t fn, void *data) -{ - int comment = 0; - int baselen = 0; - static char var[MAXNAME]; - - /* U+FEFF Byte Order Mark in UTF8 */ - static const unsigned char *utf8_bom = (unsigned char *) "\xef\xbb\xbf"; - const unsigned char *bomptr = utf8_bom; - - for (;;) { - int c = get_next_char(); - if (bomptr && *bomptr) { - /* We are at the file beginning; skip UTF8-encoded BOM - * if present. Sane editors won't put this in on their - * own, but e.g. Windows Notepad will do it happily. */ - if ((unsigned char) c == *bomptr) { - bomptr++; - continue; - } else { - /* Do not tolerate partial BOM. */ - if (bomptr != utf8_bom) - break; - /* No BOM at file beginning. Cool. */ - bomptr = NULL; - } - } - if (c == '\n') { - if (config_file_eof) - return 0; - comment = 0; - continue; - } - if (comment || isspace(c)) - continue; - if (c == '#' || c == ';') { - comment = 1; - continue; - } - if (c == '[') { - baselen = get_base_var(var); - if (baselen <= 0) - break; - var[baselen++] = '.'; - var[baselen] = 0; - continue; - } - if (!isalpha(c)) - break; - var[baselen] = tolower(c); - if (get_value(fn, data, var, baselen+1) < 0) - break; - } - die("bad config file line %d in %s", config_linenr, config_file_name); -} - -static int parse_unit_factor(const char *end, unsigned long *val) -{ - if (!*end) - return 1; - else if (!strcasecmp(end, "k")) { - *val *= 1024; - return 1; - } - else if (!strcasecmp(end, "m")) { - *val *= 1024 * 1024; - return 1; - } - else if (!strcasecmp(end, "g")) { - *val *= 1024 * 1024 * 1024; - return 1; - } - return 0; -} - -static int perf_parse_long(const char *value, long *ret) -{ - if (value && *value) { - char *end; - long val = strtol(value, &end, 0); - unsigned long factor = 1; - if (!parse_unit_factor(end, &factor)) - return 0; - *ret = val * factor; - return 1; - } - return 0; -} - -int perf_parse_ulong(const char *value, unsigned long *ret) -{ - if (value && *value) { - char *end; - unsigned long val = strtoul(value, &end, 0); - if (!parse_unit_factor(end, &val)) - return 0; - *ret = val; - return 1; - } - return 0; -} - -static void die_bad_config(const char *name) -{ - if (config_file_name) - die("bad config value for '%s' in %s", name, config_file_name); - die("bad config value for '%s'", name); -} - -int perf_config_int(const char *name, const char *value) -{ - long ret = 0; - if (!perf_parse_long(value, &ret)) - die_bad_config(name); - return ret; -} - -unsigned long perf_config_ulong(const char *name, const char *value) -{ - unsigned long ret; - if (!perf_parse_ulong(value, &ret)) - die_bad_config(name); - return ret; -} - -int perf_config_bool_or_int(const char *name, const char *value, int *is_bool) -{ - *is_bool = 1; - if (!value) - return 1; - if (!*value) - return 0; - if (!strcasecmp(value, "true") || !strcasecmp(value, "yes") || !strcasecmp(value, "on")) - return 1; - if (!strcasecmp(value, "false") || !strcasecmp(value, "no") || !strcasecmp(value, "off")) - return 0; - *is_bool = 0; - return perf_config_int(name, value); -} - -int perf_config_bool(const char *name, const char *value) -{ - int discard; - return !!perf_config_bool_or_int(name, value, &discard); -} - -int perf_config_string(const char **dest, const char *var, const char *value) -{ - if (!value) - return config_error_nonbool(var); - *dest = strdup(value); - return 0; -} - -static int perf_default_core_config(const char *var, const char *value) -{ - /* Add other config variables here and to Documentation/config.txt. */ - return 0; -} - -int perf_default_config(const char *var, const char *value, void *dummy) -{ - if (!prefixcmp(var, "core.")) - return perf_default_core_config(var, value); - - /* Add other config variables here and to Documentation/config.txt. */ - return 0; -} - -int perf_config_from_file(config_fn_t fn, const char *filename, void *data) -{ - int ret; - FILE *f = fopen(filename, "r"); - - ret = -1; - if (f) { - config_file = f; - config_file_name = filename; - config_linenr = 1; - config_file_eof = 0; - ret = perf_parse_file(fn, data); - fclose(f); - config_file_name = NULL; - } - return ret; -} - -const char *perf_etc_perfconfig(void) -{ - static const char *system_wide; - if (!system_wide) - system_wide = system_path(ETC_PERFCONFIG); - return system_wide; -} - -static int perf_env_bool(const char *k, int def) -{ - const char *v = getenv(k); - return v ? perf_config_bool(k, v) : def; -} - -int perf_config_system(void) -{ - return !perf_env_bool("PERF_CONFIG_NOSYSTEM", 0); -} - -int perf_config_global(void) -{ - return !perf_env_bool("PERF_CONFIG_NOGLOBAL", 0); -} - -int perf_config(config_fn_t fn, void *data) -{ - int ret = 0, found = 0; - char *repo_config = NULL; - const char *home = NULL; - - /* Setting $PERF_CONFIG makes perf read _only_ the given config file. */ - if (config_exclusive_filename) - return perf_config_from_file(fn, config_exclusive_filename, data); - if (perf_config_system() && !access(perf_etc_perfconfig(), R_OK)) { - ret += perf_config_from_file(fn, perf_etc_perfconfig(), - data); - found += 1; - } - - home = getenv("HOME"); - if (perf_config_global() && home) { - char *user_config = strdup(mkpath("%s/.perfconfig", home)); - if (!access(user_config, R_OK)) { - ret += perf_config_from_file(fn, user_config, data); - found += 1; - } - free(user_config); - } - - repo_config = perf_pathdup("config"); - if (!access(repo_config, R_OK)) { - ret += perf_config_from_file(fn, repo_config, data); - found += 1; - } - free(repo_config); - if (found == 0) - return -1; - return ret; -} - -/* - * Find all the stuff for perf_config_set() below. - */ - -#define MAX_MATCHES 512 - -static struct { - int baselen; - char* key; - int do_not_match; - regex_t* value_regex; - int multi_replace; - size_t offset[MAX_MATCHES]; - enum { START, SECTION_SEEN, SECTION_END_SEEN, KEY_SEEN } state; - int seen; -} store; - -static int matches(const char* key, const char* value) -{ - return !strcmp(key, store.key) && - (store.value_regex == NULL || - (store.do_not_match ^ - !regexec(store.value_regex, value, 0, NULL, 0))); -} - -static int store_aux(const char* key, const char* value, void *cb) -{ - const char *ep; - size_t section_len; - - switch (store.state) { - case KEY_SEEN: - if (matches(key, value)) { - if (store.seen == 1 && store.multi_replace == 0) { - warning("%s has multiple values", key); - } else if (store.seen >= MAX_MATCHES) { - error("too many matches for %s", key); - return 1; - } - - store.offset[store.seen] = ftell(config_file); - store.seen++; - } - break; - case SECTION_SEEN: - /* - * What we are looking for is in store.key (both - * section and var), and its section part is baselen - * long. We found key (again, both section and var). - * We would want to know if this key is in the same - * section as what we are looking for. We already - * know we are in the same section as what should - * hold store.key. - */ - ep = strrchr(key, '.'); - section_len = ep - key; - - if ((section_len != store.baselen) || - memcmp(key, store.key, section_len+1)) { - store.state = SECTION_END_SEEN; - break; - } - - /* - * Do not increment matches: this is no match, but we - * just made sure we are in the desired section. - */ - store.offset[store.seen] = ftell(config_file); - /* fallthru */ - case SECTION_END_SEEN: - case START: - if (matches(key, value)) { - store.offset[store.seen] = ftell(config_file); - store.state = KEY_SEEN; - store.seen++; - } else { - if (strrchr(key, '.') - key == store.baselen && - !strncmp(key, store.key, store.baselen)) { - store.state = SECTION_SEEN; - store.offset[store.seen] = ftell(config_file); - } - } - } - return 0; -} - -static int store_write_section(int fd, const char* key) -{ - const char *dot; - int i, success; - struct strbuf sb = STRBUF_INIT; - - dot = memchr(key, '.', store.baselen); - if (dot) { - strbuf_addf(&sb, "[%.*s \"", (int)(dot - key), key); - for (i = dot - key + 1; i < store.baselen; i++) { - if (key[i] == '"' || key[i] == '\\') - strbuf_addch(&sb, '\\'); - strbuf_addch(&sb, key[i]); - } - strbuf_addstr(&sb, "\"]\n"); - } else { - strbuf_addf(&sb, "[%.*s]\n", store.baselen, key); - } - - success = write_in_full(fd, sb.buf, sb.len) == sb.len; - strbuf_release(&sb); - - return success; -} - -static int store_write_pair(int fd, const char* key, const char* value) -{ - int i, success; - int length = strlen(key + store.baselen + 1); - const char *quote = ""; - struct strbuf sb = STRBUF_INIT; - - /* - * Check to see if the value needs to be surrounded with a dq pair. - * Note that problematic characters are always backslash-quoted; this - * check is about not losing leading or trailing SP and strings that - * follow beginning-of-comment characters (i.e. ';' and '#') by the - * configuration parser. - */ - if (value[0] == ' ') - quote = "\""; - for (i = 0; value[i]; i++) - if (value[i] == ';' || value[i] == '#') - quote = "\""; - if (i && value[i - 1] == ' ') - quote = "\""; - - strbuf_addf(&sb, "\t%.*s = %s", - length, key + store.baselen + 1, quote); - - for (i = 0; value[i]; i++) - switch (value[i]) { - case '\n': - strbuf_addstr(&sb, "\\n"); - break; - case '\t': - strbuf_addstr(&sb, "\\t"); - break; - case '"': - case '\\': - strbuf_addch(&sb, '\\'); - default: - strbuf_addch(&sb, value[i]); - break; - } - strbuf_addf(&sb, "%s\n", quote); - - success = write_in_full(fd, sb.buf, sb.len) == sb.len; - strbuf_release(&sb); - - return success; -} - -static ssize_t find_beginning_of_line(const char* contents, size_t size, - size_t offset_, int* found_bracket) -{ - size_t equal_offset = size, bracket_offset = size; - ssize_t offset; - -contline: - for (offset = offset_-2; offset > 0 - && contents[offset] != '\n'; offset--) - switch (contents[offset]) { - case '=': equal_offset = offset; break; - case ']': bracket_offset = offset; break; - } - if (offset > 0 && contents[offset-1] == '\\') { - offset_ = offset; - goto contline; - } - if (bracket_offset < equal_offset) { - *found_bracket = 1; - offset = bracket_offset+1; - } else - offset++; - - return offset; -} - -int perf_config_set(const char* key, const char* value) -{ - return perf_config_set_multivar(key, value, NULL, 0); -} - -/* - * If value==NULL, unset in (remove from) config, - * if value_regex!=NULL, disregard key/value pairs where value does not match. - * if multi_replace==0, nothing, or only one matching key/value is replaced, - * else all matching key/values (regardless how many) are removed, - * before the new pair is written. - * - * Returns 0 on success. - * - * This function does this: - * - * - it locks the config file by creating ".perf/config.lock" - * - * - it then parses the config using store_aux() as validator to find - * the position on the key/value pair to replace. If it is to be unset, - * it must be found exactly once. - * - * - the config file is mmap()ed and the part before the match (if any) is - * written to the lock file, then the changed part and the rest. - * - * - the config file is removed and the lock file rename()d to it. - * - */ -int perf_config_set_multivar(const char* key, const char* value, - const char* value_regex, int multi_replace) -{ - int i, dot; - int fd = -1, in_fd; - int ret = 0; - char* config_filename; - const char* last_dot = strrchr(key, '.'); - - if (config_exclusive_filename) - config_filename = strdup(config_exclusive_filename); - else - config_filename = perf_pathdup("config"); - - /* - * Since "key" actually contains the section name and the real - * key name separated by a dot, we have to know where the dot is. - */ - - if (last_dot == NULL) { - error("key does not contain a section: %s", key); - ret = 2; - goto out_free; - } - store.baselen = last_dot - key; - - store.multi_replace = multi_replace; - - /* - * Validate the key and while at it, lower case it for matching. - */ - store.key = malloc(strlen(key) + 1); - dot = 0; - for (i = 0; key[i]; i++) { - unsigned char c = key[i]; - if (c == '.') - dot = 1; - /* Leave the extended basename untouched.. */ - if (!dot || i > store.baselen) { - if (!iskeychar(c) || (i == store.baselen+1 && !isalpha(c))) { - error("invalid key: %s", key); - free(store.key); - ret = 1; - goto out_free; - } - c = tolower(c); - } else if (c == '\n') { - error("invalid key (newline): %s", key); - free(store.key); - ret = 1; - goto out_free; - } - store.key[i] = c; - } - store.key[i] = 0; - - /* - * If .perf/config does not exist yet, write a minimal version. - */ - in_fd = open(config_filename, O_RDONLY); - if ( in_fd < 0 ) { - free(store.key); - - if ( ENOENT != errno ) { - error("opening %s: %s", config_filename, - strerror(errno)); - ret = 3; /* same as "invalid config file" */ - goto out_free; - } - /* if nothing to unset, error out */ - if (value == NULL) { - ret = 5; - goto out_free; - } - - store.key = (char*)key; - if (!store_write_section(fd, key) || - !store_write_pair(fd, key, value)) - goto write_err_out; - } else { - struct stat st; - char* contents; - size_t contents_sz, copy_begin, copy_end; - int i, new_line = 0; - - if (value_regex == NULL) - store.value_regex = NULL; - else { - if (value_regex[0] == '!') { - store.do_not_match = 1; - value_regex++; - } else - store.do_not_match = 0; - - store.value_regex = (regex_t*)malloc(sizeof(regex_t)); - if (regcomp(store.value_regex, value_regex, - REG_EXTENDED)) { - error("invalid pattern: %s", value_regex); - free(store.value_regex); - ret = 6; - goto out_free; - } - } - - store.offset[0] = 0; - store.state = START; - store.seen = 0; - - /* - * After this, store.offset will contain the *end* offset - * of the last match, or remain at 0 if no match was found. - * As a side effect, we make sure to transform only a valid - * existing config file. - */ - if (perf_config_from_file(store_aux, config_filename, NULL)) { - error("invalid config file %s", config_filename); - free(store.key); - if (store.value_regex != NULL) { - regfree(store.value_regex); - free(store.value_regex); - } - ret = 3; - goto out_free; - } - - free(store.key); - if (store.value_regex != NULL) { - regfree(store.value_regex); - free(store.value_regex); - } - - /* if nothing to unset, or too many matches, error out */ - if ((store.seen == 0 && value == NULL) || - (store.seen > 1 && multi_replace == 0)) { - ret = 5; - goto out_free; - } - - fstat(in_fd, &st); - contents_sz = xsize_t(st.st_size); - contents = mmap(NULL, contents_sz, PROT_READ, - MAP_PRIVATE, in_fd, 0); - close(in_fd); - - if (store.seen == 0) - store.seen = 1; - - for (i = 0, copy_begin = 0; i < store.seen; i++) { - if (store.offset[i] == 0) { - store.offset[i] = copy_end = contents_sz; - } else if (store.state != KEY_SEEN) { - copy_end = store.offset[i]; - } else - copy_end = find_beginning_of_line( - contents, contents_sz, - store.offset[i]-2, &new_line); - - if (copy_end > 0 && contents[copy_end-1] != '\n') - new_line = 1; - - /* write the first part of the config */ - if (copy_end > copy_begin) { - if (write_in_full(fd, contents + copy_begin, - copy_end - copy_begin) < - copy_end - copy_begin) - goto write_err_out; - if (new_line && - write_in_full(fd, "\n", 1) != 1) - goto write_err_out; - } - copy_begin = store.offset[i]; - } - - /* write the pair (value == NULL means unset) */ - if (value != NULL) { - if (store.state == START) { - if (!store_write_section(fd, key)) - goto write_err_out; - } - if (!store_write_pair(fd, key, value)) - goto write_err_out; - } - - /* write the rest of the config */ - if (copy_begin < contents_sz) - if (write_in_full(fd, contents + copy_begin, - contents_sz - copy_begin) < - contents_sz - copy_begin) - goto write_err_out; - - munmap(contents, contents_sz); - } - - ret = 0; - -out_free: - free(config_filename); - return ret; - -write_err_out: - goto out_free; - -} - -/* - * Call this to report error for your variable that should not - * get a boolean value (i.e. "[my] var" means "true"). - */ -int config_error_nonbool(const char *var) -{ - return error("Missing value for '%s'", var); -} diff --git a/Documentation/perf_counter/util/ctype.c b/Documentation/perf_counter/util/ctype.c deleted file mode 100644 index b90ec004f29..00000000000 --- a/Documentation/perf_counter/util/ctype.c +++ /dev/null @@ -1,26 +0,0 @@ -/* - * Sane locale-independent, ASCII ctype. - * - * No surprises, and works with signed and unsigned chars. - */ -#include "cache.h" - -enum { - S = GIT_SPACE, - A = GIT_ALPHA, - D = GIT_DIGIT, - G = GIT_GLOB_SPECIAL, /* *, ?, [, \\ */ - R = GIT_REGEX_SPECIAL, /* $, (, ), +, ., ^, {, | * */ -}; - -unsigned char sane_ctype[256] = { - 0, 0, 0, 0, 0, 0, 0, 0, 0, S, S, 0, 0, S, 0, 0, /* 0.. 15 */ - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 16.. 31 */ - S, 0, 0, 0, R, 0, 0, 0, R, R, G, R, 0, 0, R, 0, /* 32.. 47 */ - D, D, D, D, D, D, D, D, D, D, 0, 0, 0, 0, 0, G, /* 48.. 63 */ - 0, A, A, A, A, A, A, A, A, A, A, A, A, A, A, A, /* 64.. 79 */ - A, A, A, A, A, A, A, A, A, A, A, G, G, 0, R, 0, /* 80.. 95 */ - 0, A, A, A, A, A, A, A, A, A, A, A, A, A, A, A, /* 96..111 */ - A, A, A, A, A, A, A, A, A, A, A, R, R, 0, 0, 0, /* 112..127 */ - /* Nothing in the 128.. range */ -}; diff --git a/Documentation/perf_counter/util/environment.c b/Documentation/perf_counter/util/environment.c deleted file mode 100644 index 275b0ee345f..00000000000 --- a/Documentation/perf_counter/util/environment.c +++ /dev/null @@ -1,9 +0,0 @@ -/* - * We put all the perf config variables in this same object - * file, so that programs can link against the config parser - * without having to link against all the rest of perf. - */ -#include "cache.h" - -const char *pager_program; -int pager_use_color = 1; diff --git a/Documentation/perf_counter/util/exec_cmd.c b/Documentation/perf_counter/util/exec_cmd.c deleted file mode 100644 index d3929226315..00000000000 --- a/Documentation/perf_counter/util/exec_cmd.c +++ /dev/null @@ -1,165 +0,0 @@ -#include "cache.h" -#include "exec_cmd.h" -#include "quote.h" -#define MAX_ARGS 32 - -extern char **environ; -static const char *argv_exec_path; -static const char *argv0_path; - -const char *system_path(const char *path) -{ -#ifdef RUNTIME_PREFIX - static const char *prefix; -#else - static const char *prefix = PREFIX; -#endif - struct strbuf d = STRBUF_INIT; - - if (is_absolute_path(path)) - return path; - -#ifdef RUNTIME_PREFIX - assert(argv0_path); - assert(is_absolute_path(argv0_path)); - - if (!prefix && - !(prefix = strip_path_suffix(argv0_path, PERF_EXEC_PATH)) && - !(prefix = strip_path_suffix(argv0_path, BINDIR)) && - !(prefix = strip_path_suffix(argv0_path, "perf"))) { - prefix = PREFIX; - fprintf(stderr, "RUNTIME_PREFIX requested, " - "but prefix computation failed. " - "Using static fallback '%s'.\n", prefix); - } -#endif - - strbuf_addf(&d, "%s/%s", prefix, path); - path = strbuf_detach(&d, NULL); - return path; -} - -const char *perf_extract_argv0_path(const char *argv0) -{ - const char *slash; - - if (!argv0 || !*argv0) - return NULL; - slash = argv0 + strlen(argv0); - - while (argv0 <= slash && !is_dir_sep(*slash)) - slash--; - - if (slash >= argv0) { - argv0_path = strndup(argv0, slash - argv0); - return slash + 1; - } - - return argv0; -} - -void perf_set_argv_exec_path(const char *exec_path) -{ - argv_exec_path = exec_path; - /* - * Propagate this setting to external programs. - */ - setenv(EXEC_PATH_ENVIRONMENT, exec_path, 1); -} - - -/* Returns the highest-priority, location to look for perf programs. */ -const char *perf_exec_path(void) -{ - const char *env; - - if (argv_exec_path) - return argv_exec_path; - - env = getenv(EXEC_PATH_ENVIRONMENT); - if (env && *env) { - return env; - } - - return system_path(PERF_EXEC_PATH); -} - -static void add_path(struct strbuf *out, const char *path) -{ - if (path && *path) { - if (is_absolute_path(path)) - strbuf_addstr(out, path); - else - strbuf_addstr(out, make_nonrelative_path(path)); - - strbuf_addch(out, PATH_SEP); - } -} - -void setup_path(void) -{ - const char *old_path = getenv("PATH"); - struct strbuf new_path = STRBUF_INIT; - - add_path(&new_path, perf_exec_path()); - add_path(&new_path, argv0_path); - - if (old_path) - strbuf_addstr(&new_path, old_path); - else - strbuf_addstr(&new_path, "/usr/local/bin:/usr/bin:/bin"); - - setenv("PATH", new_path.buf, 1); - - strbuf_release(&new_path); -} - -const char **prepare_perf_cmd(const char **argv) -{ - int argc; - const char **nargv; - - for (argc = 0; argv[argc]; argc++) - ; /* just counting */ - nargv = malloc(sizeof(*nargv) * (argc + 2)); - - nargv[0] = "perf"; - for (argc = 0; argv[argc]; argc++) - nargv[argc + 1] = argv[argc]; - nargv[argc + 1] = NULL; - return nargv; -} - -int execv_perf_cmd(const char **argv) { - const char **nargv = prepare_perf_cmd(argv); - - /* execvp() can only ever return if it fails */ - execvp("perf", (char **)nargv); - - free(nargv); - return -1; -} - - -int execl_perf_cmd(const char *cmd,...) -{ - int argc; - const char *argv[MAX_ARGS + 1]; - const char *arg; - va_list param; - - va_start(param, cmd); - argv[0] = cmd; - argc = 1; - while (argc < MAX_ARGS) { - arg = argv[argc++] = va_arg(param, char *); - if (!arg) - break; - } - va_end(param); - if (MAX_ARGS <= argc) - return error("too many args to run %s", cmd); - - argv[argc] = NULL; - return execv_perf_cmd(argv); -} diff --git a/Documentation/perf_counter/util/exec_cmd.h b/Documentation/perf_counter/util/exec_cmd.h deleted file mode 100644 index effe25eb154..00000000000 --- a/Documentation/perf_counter/util/exec_cmd.h +++ /dev/null @@ -1,13 +0,0 @@ -#ifndef PERF_EXEC_CMD_H -#define PERF_EXEC_CMD_H - -extern void perf_set_argv_exec_path(const char *exec_path); -extern const char *perf_extract_argv0_path(const char *path); -extern const char *perf_exec_path(void); -extern void setup_path(void); -extern const char **prepare_perf_cmd(const char **argv); -extern int execv_perf_cmd(const char **argv); /* NULL terminated */ -extern int execl_perf_cmd(const char *cmd, ...); -extern const char *system_path(const char *path); - -#endif /* PERF_EXEC_CMD_H */ diff --git a/Documentation/perf_counter/util/generate-cmdlist.sh b/Documentation/perf_counter/util/generate-cmdlist.sh deleted file mode 100755 index f06f6fd148f..00000000000 --- a/Documentation/perf_counter/util/generate-cmdlist.sh +++ /dev/null @@ -1,24 +0,0 @@ -#!/bin/sh - -echo "/* Automatically generated by $0 */ -struct cmdname_help -{ - char name[16]; - char help[80]; -}; - -static struct cmdname_help common_cmds[] = {" - -sed -n -e 's/^perf-\([^ ]*\)[ ].* common.*/\1/p' command-list.txt | -sort | -while read cmd -do - sed -n ' - /^NAME/,/perf-'"$cmd"'/H - ${ - x - s/.*perf-'"$cmd"' - \(.*\)/ {"'"$cmd"'", "\1"},/ - p - }' "Documentation/perf-$cmd.txt" -done -echo "};" diff --git a/Documentation/perf_counter/util/help.c b/Documentation/perf_counter/util/help.c deleted file mode 100644 index 6653f7dd1d7..00000000000 --- a/Documentation/perf_counter/util/help.c +++ /dev/null @@ -1,367 +0,0 @@ -#include "cache.h" -#include "../builtin.h" -#include "exec_cmd.h" -#include "levenshtein.h" -#include "help.h" - -/* most GUI terminals set COLUMNS (although some don't export it) */ -static int term_columns(void) -{ - char *col_string = getenv("COLUMNS"); - int n_cols; - - if (col_string && (n_cols = atoi(col_string)) > 0) - return n_cols; - -#ifdef TIOCGWINSZ - { - struct winsize ws; - if (!ioctl(1, TIOCGWINSZ, &ws)) { - if (ws.ws_col) - return ws.ws_col; - } - } -#endif - - return 80; -} - -void add_cmdname(struct cmdnames *cmds, const char *name, int len) -{ - struct cmdname *ent = malloc(sizeof(*ent) + len + 1); - - ent->len = len; - memcpy(ent->name, name, len); - ent->name[len] = 0; - - ALLOC_GROW(cmds->names, cmds->cnt + 1, cmds->alloc); - cmds->names[cmds->cnt++] = ent; -} - -static void clean_cmdnames(struct cmdnames *cmds) -{ - int i; - for (i = 0; i < cmds->cnt; ++i) - free(cmds->names[i]); - free(cmds->names); - cmds->cnt = 0; - cmds->alloc = 0; -} - -static int cmdname_compare(const void *a_, const void *b_) -{ - struct cmdname *a = *(struct cmdname **)a_; - struct cmdname *b = *(struct cmdname **)b_; - return strcmp(a->name, b->name); -} - -static void uniq(struct cmdnames *cmds) -{ - int i, j; - - if (!cmds->cnt) - return; - - for (i = j = 1; i < cmds->cnt; i++) - if (strcmp(cmds->names[i]->name, cmds->names[i-1]->name)) - cmds->names[j++] = cmds->names[i]; - - cmds->cnt = j; -} - -void exclude_cmds(struct cmdnames *cmds, struct cmdnames *excludes) -{ - int ci, cj, ei; - int cmp; - - ci = cj = ei = 0; - while (ci < cmds->cnt && ei < excludes->cnt) { - cmp = strcmp(cmds->names[ci]->name, excludes->names[ei]->name); - if (cmp < 0) - cmds->names[cj++] = cmds->names[ci++]; - else if (cmp == 0) - ci++, ei++; - else if (cmp > 0) - ei++; - } - - while (ci < cmds->cnt) - cmds->names[cj++] = cmds->names[ci++]; - - cmds->cnt = cj; -} - -static void pretty_print_string_list(struct cmdnames *cmds, int longest) -{ - int cols = 1, rows; - int space = longest + 1; /* min 1 SP between words */ - int max_cols = term_columns() - 1; /* don't print *on* the edge */ - int i, j; - - if (space < max_cols) - cols = max_cols / space; - rows = (cmds->cnt + cols - 1) / cols; - - for (i = 0; i < rows; i++) { - printf(" "); - - for (j = 0; j < cols; j++) { - int n = j * rows + i; - int size = space; - if (n >= cmds->cnt) - break; - if (j == cols-1 || n + rows >= cmds->cnt) - size = 1; - printf("%-*s", size, cmds->names[n]->name); - } - putchar('\n'); - } -} - -static int is_executable(const char *name) -{ - struct stat st; - - if (stat(name, &st) || /* stat, not lstat */ - !S_ISREG(st.st_mode)) - return 0; - -#ifdef __MINGW32__ - /* cannot trust the executable bit, peek into the file instead */ - char buf[3] = { 0 }; - int n; - int fd = open(name, O_RDONLY); - st.st_mode &= ~S_IXUSR; - if (fd >= 0) { - n = read(fd, buf, 2); - if (n == 2) - /* DOS executables start with "MZ" */ - if (!strcmp(buf, "#!") || !strcmp(buf, "MZ")) - st.st_mode |= S_IXUSR; - close(fd); - } -#endif - return st.st_mode & S_IXUSR; -} - -static void list_commands_in_dir(struct cmdnames *cmds, - const char *path, - const char *prefix) -{ - int prefix_len; - DIR *dir = opendir(path); - struct dirent *de; - struct strbuf buf = STRBUF_INIT; - int len; - - if (!dir) - return; - if (!prefix) - prefix = "perf-"; - prefix_len = strlen(prefix); - - strbuf_addf(&buf, "%s/", path); - len = buf.len; - - while ((de = readdir(dir)) != NULL) { - int entlen; - - if (prefixcmp(de->d_name, prefix)) - continue; - - strbuf_setlen(&buf, len); - strbuf_addstr(&buf, de->d_name); - if (!is_executable(buf.buf)) - continue; - - entlen = strlen(de->d_name) - prefix_len; - if (has_extension(de->d_name, ".exe")) - entlen -= 4; - - add_cmdname(cmds, de->d_name + prefix_len, entlen); - } - closedir(dir); - strbuf_release(&buf); -} - -void load_command_list(const char *prefix, - struct cmdnames *main_cmds, - struct cmdnames *other_cmds) -{ - const char *env_path = getenv("PATH"); - const char *exec_path = perf_exec_path(); - - if (exec_path) { - list_commands_in_dir(main_cmds, exec_path, prefix); - qsort(main_cmds->names, main_cmds->cnt, - sizeof(*main_cmds->names), cmdname_compare); - uniq(main_cmds); - } - - if (env_path) { - char *paths, *path, *colon; - path = paths = strdup(env_path); - while (1) { - if ((colon = strchr(path, PATH_SEP))) - *colon = 0; - if (!exec_path || strcmp(path, exec_path)) - list_commands_in_dir(other_cmds, path, prefix); - - if (!colon) - break; - path = colon + 1; - } - free(paths); - - qsort(other_cmds->names, other_cmds->cnt, - sizeof(*other_cmds->names), cmdname_compare); - uniq(other_cmds); - } - exclude_cmds(other_cmds, main_cmds); -} - -void list_commands(const char *title, struct cmdnames *main_cmds, - struct cmdnames *other_cmds) -{ - int i, longest = 0; - - for (i = 0; i < main_cmds->cnt; i++) - if (longest < main_cmds->names[i]->len) - longest = main_cmds->names[i]->len; - for (i = 0; i < other_cmds->cnt; i++) - if (longest < other_cmds->names[i]->len) - longest = other_cmds->names[i]->len; - - if (main_cmds->cnt) { - const char *exec_path = perf_exec_path(); - printf("available %s in '%s'\n", title, exec_path); - printf("----------------"); - mput_char('-', strlen(title) + strlen(exec_path)); - putchar('\n'); - pretty_print_string_list(main_cmds, longest); - putchar('\n'); - } - - if (other_cmds->cnt) { - printf("%s available from elsewhere on your $PATH\n", title); - printf("---------------------------------------"); - mput_char('-', strlen(title)); - putchar('\n'); - pretty_print_string_list(other_cmds, longest); - putchar('\n'); - } -} - -int is_in_cmdlist(struct cmdnames *c, const char *s) -{ - int i; - for (i = 0; i < c->cnt; i++) - if (!strcmp(s, c->names[i]->name)) - return 1; - return 0; -} - -static int autocorrect; -static struct cmdnames aliases; - -static int perf_unknown_cmd_config(const char *var, const char *value, void *cb) -{ - if (!strcmp(var, "help.autocorrect")) - autocorrect = perf_config_int(var,value); - /* Also use aliases for command lookup */ - if (!prefixcmp(var, "alias.")) - add_cmdname(&aliases, var + 6, strlen(var + 6)); - - return perf_default_config(var, value, cb); -} - -static int levenshtein_compare(const void *p1, const void *p2) -{ - const struct cmdname *const *c1 = p1, *const *c2 = p2; - const char *s1 = (*c1)->name, *s2 = (*c2)->name; - int l1 = (*c1)->len; - int l2 = (*c2)->len; - return l1 != l2 ? l1 - l2 : strcmp(s1, s2); -} - -static void add_cmd_list(struct cmdnames *cmds, struct cmdnames *old) -{ - int i; - ALLOC_GROW(cmds->names, cmds->cnt + old->cnt, cmds->alloc); - - for (i = 0; i < old->cnt; i++) - cmds->names[cmds->cnt++] = old->names[i]; - free(old->names); - old->cnt = 0; - old->names = NULL; -} - -const char *help_unknown_cmd(const char *cmd) -{ - int i, n = 0, best_similarity = 0; - struct cmdnames main_cmds, other_cmds; - - memset(&main_cmds, 0, sizeof(main_cmds)); - memset(&other_cmds, 0, sizeof(main_cmds)); - memset(&aliases, 0, sizeof(aliases)); - - perf_config(perf_unknown_cmd_config, NULL); - - load_command_list("perf-", &main_cmds, &other_cmds); - - add_cmd_list(&main_cmds, &aliases); - add_cmd_list(&main_cmds, &other_cmds); - qsort(main_cmds.names, main_cmds.cnt, - sizeof(main_cmds.names), cmdname_compare); - uniq(&main_cmds); - - if (main_cmds.cnt) { - /* This reuses cmdname->len for similarity index */ - for (i = 0; i < main_cmds.cnt; ++i) - main_cmds.names[i]->len = - levenshtein(cmd, main_cmds.names[i]->name, 0, 2, 1, 4); - - qsort(main_cmds.names, main_cmds.cnt, - sizeof(*main_cmds.names), levenshtein_compare); - - best_similarity = main_cmds.names[0]->len; - n = 1; - while (n < main_cmds.cnt && best_similarity == main_cmds.names[n]->len) - ++n; - } - - if (autocorrect && n == 1) { - const char *assumed = main_cmds.names[0]->name; - - main_cmds.names[0] = NULL; - clean_cmdnames(&main_cmds); - fprintf(stderr, "WARNING: You called a Git program named '%s', " - "which does not exist.\n" - "Continuing under the assumption that you meant '%s'\n", - cmd, assumed); - if (autocorrect > 0) { - fprintf(stderr, "in %0.1f seconds automatically...\n", - (float)autocorrect/10.0); - poll(NULL, 0, autocorrect * 100); - } - return assumed; - } - - fprintf(stderr, "perf: '%s' is not a perf-command. See 'perf --help'.\n", cmd); - - if (main_cmds.cnt && best_similarity < 6) { - fprintf(stderr, "\nDid you mean %s?\n", - n < 2 ? "this": "one of these"); - - for (i = 0; i < n; i++) - fprintf(stderr, "\t%s\n", main_cmds.names[i]->name); - } - - exit(1); -} - -int cmd_version(int argc, const char **argv, const char *prefix) -{ - printf("perf version %s\n", perf_version_string); - return 0; -} diff --git a/Documentation/perf_counter/util/help.h b/Documentation/perf_counter/util/help.h deleted file mode 100644 index 56bc15406ff..00000000000 --- a/Documentation/perf_counter/util/help.h +++ /dev/null @@ -1,29 +0,0 @@ -#ifndef HELP_H -#define HELP_H - -struct cmdnames { - int alloc; - int cnt; - struct cmdname { - size_t len; /* also used for similarity index in help.c */ - char name[FLEX_ARRAY]; - } **names; -}; - -static inline void mput_char(char c, unsigned int num) -{ - while(num--) - putchar(c); -} - -void load_command_list(const char *prefix, - struct cmdnames *main_cmds, - struct cmdnames *other_cmds); -void add_cmdname(struct cmdnames *cmds, const char *name, int len); -/* Here we require that excludes is a sorted list. */ -void exclude_cmds(struct cmdnames *cmds, struct cmdnames *excludes); -int is_in_cmdlist(struct cmdnames *c, const char *s); -void list_commands(const char *title, struct cmdnames *main_cmds, - struct cmdnames *other_cmds); - -#endif /* HELP_H */ diff --git a/Documentation/perf_counter/util/levenshtein.c b/Documentation/perf_counter/util/levenshtein.c deleted file mode 100644 index e521d1516df..00000000000 --- a/Documentation/perf_counter/util/levenshtein.c +++ /dev/null @@ -1,84 +0,0 @@ -#include "cache.h" -#include "levenshtein.h" - -/* - * This function implements the Damerau-Levenshtein algorithm to - * calculate a distance between strings. - * - * Basically, it says how many letters need to be swapped, substituted, - * deleted from, or added to string1, at least, to get string2. - * - * The idea is to build a distance matrix for the substrings of both - * strings. To avoid a large space complexity, only the last three rows - * are kept in memory (if swaps had the same or higher cost as one deletion - * plus one insertion, only two rows would be needed). - * - * At any stage, "i + 1" denotes the length of the current substring of - * string1 that the distance is calculated for. - * - * row2 holds the current row, row1 the previous row (i.e. for the substring - * of string1 of length "i"), and row0 the row before that. - * - * In other words, at the start of the big loop, row2[j + 1] contains the - * Damerau-Levenshtein distance between the substring of string1 of length - * "i" and the substring of string2 of length "j + 1". - * - * All the big loop does is determine the partial minimum-cost paths. - * - * It does so by calculating the costs of the path ending in characters - * i (in string1) and j (in string2), respectively, given that the last - * operation is a substition, a swap, a deletion, or an insertion. - * - * This implementation allows the costs to be weighted: - * - * - w (as in "sWap") - * - s (as in "Substitution") - * - a (for insertion, AKA "Add") - * - d (as in "Deletion") - * - * Note that this algorithm calculates a distance _iff_ d == a. - */ -int levenshtein(const char *string1, const char *string2, - int w, int s, int a, int d) -{ - int len1 = strlen(string1), len2 = strlen(string2); - int *row0 = malloc(sizeof(int) * (len2 + 1)); - int *row1 = malloc(sizeof(int) * (len2 + 1)); - int *row2 = malloc(sizeof(int) * (len2 + 1)); - int i, j; - - for (j = 0; j <= len2; j++) - row1[j] = j * a; - for (i = 0; i < len1; i++) { - int *dummy; - - row2[0] = (i + 1) * d; - for (j = 0; j < len2; j++) { - /* substitution */ - row2[j + 1] = row1[j] + s * (string1[i] != string2[j]); - /* swap */ - if (i > 0 && j > 0 && string1[i - 1] == string2[j] && - string1[i] == string2[j - 1] && - row2[j + 1] > row0[j - 1] + w) - row2[j + 1] = row0[j - 1] + w; - /* deletion */ - if (row2[j + 1] > row1[j + 1] + d) - row2[j + 1] = row1[j + 1] + d; - /* insertion */ - if (row2[j + 1] > row2[j] + a) - row2[j + 1] = row2[j] + a; - } - - dummy = row0; - row0 = row1; - row1 = row2; - row2 = dummy; - } - - i = row1[len2]; - free(row0); - free(row1); - free(row2); - - return i; -} diff --git a/Documentation/perf_counter/util/levenshtein.h b/Documentation/perf_counter/util/levenshtein.h deleted file mode 100644 index 0173abeef52..00000000000 --- a/Documentation/perf_counter/util/levenshtein.h +++ /dev/null @@ -1,8 +0,0 @@ -#ifndef LEVENSHTEIN_H -#define LEVENSHTEIN_H - -int levenshtein(const char *string1, const char *string2, - int swap_penalty, int substition_penalty, - int insertion_penalty, int deletion_penalty); - -#endif diff --git a/Documentation/perf_counter/util/list.h b/Documentation/perf_counter/util/list.h deleted file mode 100644 index e2548e8072c..00000000000 --- a/Documentation/perf_counter/util/list.h +++ /dev/null @@ -1,603 +0,0 @@ -#ifndef _LINUX_LIST_H -#define _LINUX_LIST_H -/* - Copyright (C) Cast of dozens, comes from the Linux kernel - - This program is free software; you can redistribute it and/or modify it - under the terms of version 2 of the GNU General Public License as - published by the Free Software Foundation. -*/ - -#include - -/* - * These are non-NULL pointers that will result in page faults - * under normal circumstances, used to verify that nobody uses - * non-initialized list entries. - */ -#define LIST_POISON1 ((void *)0x00100100) -#define LIST_POISON2 ((void *)0x00200200) - -/** - * container_of - cast a member of a structure out to the containing structure - * @ptr: the pointer to the member. - * @type: the type of the container struct this is embedded in. - * @member: the name of the member within the struct. - * - */ -#define container_of(ptr, type, member) ({ \ - const typeof( ((type *)0)->member ) *__mptr = (ptr); \ - (type *)( (char *)__mptr - offsetof(type,member) );}) - -/* - * Simple doubly linked list implementation. - * - * Some of the internal functions ("__xxx") are useful when - * manipulating whole lists rather than single entries, as - * sometimes we already know the next/prev entries and we can - * generate better code by using them directly rather than - * using the generic single-entry routines. - */ - -struct list_head { - struct list_head *next, *prev; -}; - -#define LIST_HEAD_INIT(name) { &(name), &(name) } - -#define LIST_HEAD(name) \ - struct list_head name = LIST_HEAD_INIT(name) - -static inline void INIT_LIST_HEAD(struct list_head *list) -{ - list->next = list; - list->prev = list; -} - -/* - * Insert a new entry between two known consecutive entries. - * - * This is only for internal list manipulation where we know - * the prev/next entries already! - */ -static inline void __list_add(struct list_head *new, - struct list_head *prev, - struct list_head *next) -{ - next->prev = new; - new->next = next; - new->prev = prev; - prev->next = new; -} - -/** - * list_add - add a new entry - * @new: new entry to be added - * @head: list head to add it after - * - * Insert a new entry after the specified head. - * This is good for implementing stacks. - */ -static inline void list_add(struct list_head *new, struct list_head *head) -{ - __list_add(new, head, head->next); -} - -/** - * list_add_tail - add a new entry - * @new: new entry to be added - * @head: list head to add it before - * - * Insert a new entry before the specified head. - * This is useful for implementing queues. - */ -static inline void list_add_tail(struct list_head *new, struct list_head *head) -{ - __list_add(new, head->prev, head); -} - -/* - * Delete a list entry by making the prev/next entries - * point to each other. - * - * This is only for internal list manipulation where we know - * the prev/next entries already! - */ -static inline void __list_del(struct list_head * prev, struct list_head * next) -{ - next->prev = prev; - prev->next = next; -} - -/** - * list_del - deletes entry from list. - * @entry: the element to delete from the list. - * Note: list_empty on entry does not return true after this, the entry is - * in an undefined state. - */ -static inline void list_del(struct list_head *entry) -{ - __list_del(entry->prev, entry->next); - entry->next = LIST_POISON1; - entry->prev = LIST_POISON2; -} - -/** - * list_del_range - deletes range of entries from list. - * @beging: first element in the range to delete from the list. - * @beging: first element in the range to delete from the list. - * Note: list_empty on the range of entries does not return true after this, - * the entries is in an undefined state. - */ -static inline void list_del_range(struct list_head *begin, - struct list_head *end) -{ - begin->prev->next = end->next; - end->next->prev = begin->prev; -} - -/** - * list_replace - replace old entry by new one - * @old : the element to be replaced - * @new : the new element to insert - * Note: if 'old' was empty, it will be overwritten. - */ -static inline void list_replace(struct list_head *old, - struct list_head *new) -{ - new->next = old->next; - new->next->prev = new; - new->prev = old->prev; - new->prev->next = new; -} - -static inline void list_replace_init(struct list_head *old, - struct list_head *new) -{ - list_replace(old, new); - INIT_LIST_HEAD(old); -} - -/** - * list_del_init - deletes entry from list and reinitialize it. - * @entry: the element to delete from the list. - */ -static inline void list_del_init(struct list_head *entry) -{ - __list_del(entry->prev, entry->next); - INIT_LIST_HEAD(entry); -} - -/** - * list_move - delete from one list and add as another's head - * @list: the entry to move - * @head: the head that will precede our entry - */ -static inline void list_move(struct list_head *list, struct list_head *head) -{ - __list_del(list->prev, list->next); - list_add(list, head); -} - -/** - * list_move_tail - delete from one list and add as another's tail - * @list: the entry to move - * @head: the head that will follow our entry - */ -static inline void list_move_tail(struct list_head *list, - struct list_head *head) -{ - __list_del(list->prev, list->next); - list_add_tail(list, head); -} - -/** - * list_is_last - tests whether @list is the last entry in list @head - * @list: the entry to test - * @head: the head of the list - */ -static inline int list_is_last(const struct list_head *list, - const struct list_head *head) -{ - return list->next == head; -} - -/** - * list_empty - tests whether a list is empty - * @head: the list to test. - */ -static inline int list_empty(const struct list_head *head) -{ - return head->next == head; -} - -/** - * list_empty_careful - tests whether a list is empty and not being modified - * @head: the list to test - * - * Description: - * tests whether a list is empty _and_ checks that no other CPU might be - * in the process of modifying either member (next or prev) - * - * NOTE: using list_empty_careful() without synchronization - * can only be safe if the only activity that can happen - * to the list entry is list_del_init(). Eg. it cannot be used - * if another CPU could re-list_add() it. - */ -static inline int list_empty_careful(const struct list_head *head) -{ - struct list_head *next = head->next; - return (next == head) && (next == head->prev); -} - -static inline void __list_splice(struct list_head *list, - struct list_head *head) -{ - struct list_head *first = list->next; - struct list_head *last = list->prev; - struct list_head *at = head->next; - - first->prev = head; - head->next = first; - - last->next = at; - at->prev = last; -} - -/** - * list_splice - join two lists - * @list: the new list to add. - * @head: the place to add it in the first list. - */ -static inline void list_splice(struct list_head *list, struct list_head *head) -{ - if (!list_empty(list)) - __list_splice(list, head); -} - -/** - * list_splice_init - join two lists and reinitialise the emptied list. - * @list: the new list to add. - * @head: the place to add it in the first list. - * - * The list at @list is reinitialised - */ -static inline void list_splice_init(struct list_head *list, - struct list_head *head) -{ - if (!list_empty(list)) { - __list_splice(list, head); - INIT_LIST_HEAD(list); - } -} - -/** - * list_entry - get the struct for this entry - * @ptr: the &struct list_head pointer. - * @type: the type of the struct this is embedded in. - * @member: the name of the list_struct within the struct. - */ -#define list_entry(ptr, type, member) \ - container_of(ptr, type, member) - -/** - * list_first_entry - get the first element from a list - * @ptr: the list head to take the element from. - * @type: the type of the struct this is embedded in. - * @member: the name of the list_struct within the struct. - * - * Note, that list is expected to be not empty. - */ -#define list_first_entry(ptr, type, member) \ - list_entry((ptr)->next, type, member) - -/** - * list_for_each - iterate over a list - * @pos: the &struct list_head to use as a loop cursor. - * @head: the head for your list. - */ -#define list_for_each(pos, head) \ - for (pos = (head)->next; pos != (head); \ - pos = pos->next) - -/** - * __list_for_each - iterate over a list - * @pos: the &struct list_head to use as a loop cursor. - * @head: the head for your list. - * - * This variant differs from list_for_each() in that it's the - * simplest possible list iteration code, no prefetching is done. - * Use this for code that knows the list to be very short (empty - * or 1 entry) most of the time. - */ -#define __list_for_each(pos, head) \ - for (pos = (head)->next; pos != (head); pos = pos->next) - -/** - * list_for_each_prev - iterate over a list backwards - * @pos: the &struct list_head to use as a loop cursor. - * @head: the head for your list. - */ -#define list_for_each_prev(pos, head) \ - for (pos = (head)->prev; pos != (head); \ - pos = pos->prev) - -/** - * list_for_each_safe - iterate over a list safe against removal of list entry - * @pos: the &struct list_head to use as a loop cursor. - * @n: another &struct list_head to use as temporary storage - * @head: the head for your list. - */ -#define list_for_each_safe(pos, n, head) \ - for (pos = (head)->next, n = pos->next; pos != (head); \ - pos = n, n = pos->next) - -/** - * list_for_each_entry - iterate over list of given type - * @pos: the type * to use as a loop cursor. - * @head: the head for your list. - * @member: the name of the list_struct within the struct. - */ -#define list_for_each_entry(pos, head, member) \ - for (pos = list_entry((head)->next, typeof(*pos), member); \ - &pos->member != (head); \ - pos = list_entry(pos->member.next, typeof(*pos), member)) - -/** - * list_for_each_entry_reverse - iterate backwards over list of given type. - * @pos: the type * to use as a loop cursor. - * @head: the head for your list. - * @member: the name of the list_struct within the struct. - */ -#define list_for_each_entry_reverse(pos, head, member) \ - for (pos = list_entry((head)->prev, typeof(*pos), member); \ - &pos->member != (head); \ - pos = list_entry(pos->member.prev, typeof(*pos), member)) - -/** - * list_prepare_entry - prepare a pos entry for use in list_for_each_entry_continue - * @pos: the type * to use as a start point - * @head: the head of the list - * @member: the name of the list_struct within the struct. - * - * Prepares a pos entry for use as a start point in list_for_each_entry_continue. - */ -#define list_prepare_entry(pos, head, member) \ - ((pos) ? : list_entry(head, typeof(*pos), member)) - -/** - * list_for_each_entry_continue - continue iteration over list of given type - * @pos: the type * to use as a loop cursor. - * @head: the head for your list. - * @member: the name of the list_struct within the struct. - * - * Continue to iterate over list of given type, continuing after - * the current position. - */ -#define list_for_each_entry_continue(pos, head, member) \ - for (pos = list_entry(pos->member.next, typeof(*pos), member); \ - &pos->member != (head); \ - pos = list_entry(pos->member.next, typeof(*pos), member)) - -/** - * list_for_each_entry_from - iterate over list of given type from the current point - * @pos: the type * to use as a loop cursor. - * @head: the head for your list. - * @member: the name of the list_struct within the struct. - * - * Iterate over list of given type, continuing from current position. - */ -#define list_for_each_entry_from(pos, head, member) \ - for (; &pos->member != (head); \ - pos = list_entry(pos->member.next, typeof(*pos), member)) - -/** - * list_for_each_entry_safe - iterate over list of given type safe against removal of list entry - * @pos: the type * to use as a loop cursor. - * @n: another type * to use as temporary storage - * @head: the head for your list. - * @member: the name of the list_struct within the struct. - */ -#define list_for_each_entry_safe(pos, n, head, member) \ - for (pos = list_entry((head)->next, typeof(*pos), member), \ - n = list_entry(pos->member.next, typeof(*pos), member); \ - &pos->member != (head); \ - pos = n, n = list_entry(n->member.next, typeof(*n), member)) - -/** - * list_for_each_entry_safe_continue - * @pos: the type * to use as a loop cursor. - * @n: another type * to use as temporary storage - * @head: the head for your list. - * @member: the name of the list_struct within the struct. - * - * Iterate over list of given type, continuing after current point, - * safe against removal of list entry. - */ -#define list_for_each_entry_safe_continue(pos, n, head, member) \ - for (pos = list_entry(pos->member.next, typeof(*pos), member), \ - n = list_entry(pos->member.next, typeof(*pos), member); \ - &pos->member != (head); \ - pos = n, n = list_entry(n->member.next, typeof(*n), member)) - -/** - * list_for_each_entry_safe_from - * @pos: the type * to use as a loop cursor. - * @n: another type * to use as temporary storage - * @head: the head for your list. - * @member: the name of the list_struct within the struct. - * - * Iterate over list of given type from current point, safe against - * removal of list entry. - */ -#define list_for_each_entry_safe_from(pos, n, head, member) \ - for (n = list_entry(pos->member.next, typeof(*pos), member); \ - &pos->member != (head); \ - pos = n, n = list_entry(n->member.next, typeof(*n), member)) - -/** - * list_for_each_entry_safe_reverse - * @pos: the type * to use as a loop cursor. - * @n: another type * to use as temporary storage - * @head: the head for your list. - * @member: the name of the list_struct within the struct. - * - * Iterate backwards over list of given type, safe against removal - * of list entry. - */ -#define list_for_each_entry_safe_reverse(pos, n, head, member) \ - for (pos = list_entry((head)->prev, typeof(*pos), member), \ - n = list_entry(pos->member.prev, typeof(*pos), member); \ - &pos->member != (head); \ - pos = n, n = list_entry(n->member.prev, typeof(*n), member)) - -/* - * Double linked lists with a single pointer list head. - * Mostly useful for hash tables where the two pointer list head is - * too wasteful. - * You lose the ability to access the tail in O(1). - */ - -struct hlist_head { - struct hlist_node *first; -}; - -struct hlist_node { - struct hlist_node *next, **pprev; -}; - -#define HLIST_HEAD_INIT { .first = NULL } -#define HLIST_HEAD(name) struct hlist_head name = { .first = NULL } -#define INIT_HLIST_HEAD(ptr) ((ptr)->first = NULL) -static inline void INIT_HLIST_NODE(struct hlist_node *h) -{ - h->next = NULL; - h->pprev = NULL; -} - -static inline int hlist_unhashed(const struct hlist_node *h) -{ - return !h->pprev; -} - -static inline int hlist_empty(const struct hlist_head *h) -{ - return !h->first; -} - -static inline void __hlist_del(struct hlist_node *n) -{ - struct hlist_node *next = n->next; - struct hlist_node **pprev = n->pprev; - *pprev = next; - if (next) - next->pprev = pprev; -} - -static inline void hlist_del(struct hlist_node *n) -{ - __hlist_del(n); - n->next = LIST_POISON1; - n->pprev = LIST_POISON2; -} - -static inline void hlist_del_init(struct hlist_node *n) -{ - if (!hlist_unhashed(n)) { - __hlist_del(n); - INIT_HLIST_NODE(n); - } -} - -static inline void hlist_add_head(struct hlist_node *n, struct hlist_head *h) -{ - struct hlist_node *first = h->first; - n->next = first; - if (first) - first->pprev = &n->next; - h->first = n; - n->pprev = &h->first; -} - -/* next must be != NULL */ -static inline void hlist_add_before(struct hlist_node *n, - struct hlist_node *next) -{ - n->pprev = next->pprev; - n->next = next; - next->pprev = &n->next; - *(n->pprev) = n; -} - -static inline void hlist_add_after(struct hlist_node *n, - struct hlist_node *next) -{ - next->next = n->next; - n->next = next; - next->pprev = &n->next; - - if(next->next) - next->next->pprev = &next->next; -} - -#define hlist_entry(ptr, type, member) container_of(ptr,type,member) - -#define hlist_for_each(pos, head) \ - for (pos = (head)->first; pos; \ - pos = pos->next) - -#define hlist_for_each_safe(pos, n, head) \ - for (pos = (head)->first; pos && ({ n = pos->next; 1; }); \ - pos = n) - -/** - * hlist_for_each_entry - iterate over list of given type - * @tpos: the type * to use as a loop cursor. - * @pos: the &struct hlist_node to use as a loop cursor. - * @head: the head for your list. - * @member: the name of the hlist_node within the struct. - */ -#define hlist_for_each_entry(tpos, pos, head, member) \ - for (pos = (head)->first; \ - pos && \ - ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1;}); \ - pos = pos->next) - -/** - * hlist_for_each_entry_continue - iterate over a hlist continuing after current point - * @tpos: the type * to use as a loop cursor. - * @pos: the &struct hlist_node to use as a loop cursor. - * @member: the name of the hlist_node within the struct. - */ -#define hlist_for_each_entry_continue(tpos, pos, member) \ - for (pos = (pos)->next; \ - pos && \ - ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1;}); \ - pos = pos->next) - -/** - * hlist_for_each_entry_from - iterate over a hlist continuing from current point - * @tpos: the type * to use as a loop cursor. - * @pos: the &struct hlist_node to use as a loop cursor. - * @member: the name of the hlist_node within the struct. - */ -#define hlist_for_each_entry_from(tpos, pos, member) \ - for (; pos && \ - ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1;}); \ - pos = pos->next) - -/** - * hlist_for_each_entry_safe - iterate over list of given type safe against removal of list entry - * @tpos: the type * to use as a loop cursor. - * @pos: the &struct hlist_node to use as a loop cursor. - * @n: another &struct hlist_node to use as temporary storage - * @head: the head for your list. - * @member: the name of the hlist_node within the struct. - */ -#define hlist_for_each_entry_safe(tpos, pos, n, head, member) \ - for (pos = (head)->first; \ - pos && ({ n = pos->next; 1; }) && \ - ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1;}); \ - pos = n) - -#endif diff --git a/Documentation/perf_counter/util/pager.c b/Documentation/perf_counter/util/pager.c deleted file mode 100644 index a28bccae545..00000000000 --- a/Documentation/perf_counter/util/pager.c +++ /dev/null @@ -1,99 +0,0 @@ -#include "cache.h" -#include "run-command.h" -#include "sigchain.h" - -/* - * This is split up from the rest of git so that we can do - * something different on Windows. - */ - -static int spawned_pager; - -#ifndef __MINGW32__ -static void pager_preexec(void) -{ - /* - * Work around bug in "less" by not starting it until we - * have real input - */ - fd_set in; - - FD_ZERO(&in); - FD_SET(0, &in); - select(1, &in, NULL, &in, NULL); - - setenv("LESS", "FRSX", 0); -} -#endif - -static const char *pager_argv[] = { "sh", "-c", NULL, NULL }; -static struct child_process pager_process; - -static void wait_for_pager(void) -{ - fflush(stdout); - fflush(stderr); - /* signal EOF to pager */ - close(1); - close(2); - finish_command(&pager_process); -} - -static void wait_for_pager_signal(int signo) -{ - wait_for_pager(); - sigchain_pop(signo); - raise(signo); -} - -void setup_pager(void) -{ - const char *pager = getenv("PERF_PAGER"); - - if (!isatty(1)) - return; - if (!pager) { - if (!pager_program) - perf_config(perf_default_config, NULL); - pager = pager_program; - } - if (!pager) - pager = getenv("PAGER"); - if (!pager) - pager = "less"; - else if (!*pager || !strcmp(pager, "cat")) - return; - - spawned_pager = 1; /* means we are emitting to terminal */ - - /* spawn the pager */ - pager_argv[2] = pager; - pager_process.argv = pager_argv; - pager_process.in = -1; -#ifndef __MINGW32__ - pager_process.preexec_cb = pager_preexec; -#endif - if (start_command(&pager_process)) - return; - - /* original process continues, but writes to the pipe */ - dup2(pager_process.in, 1); - if (isatty(2)) - dup2(pager_process.in, 2); - close(pager_process.in); - - /* this makes sure that the parent terminates after the pager */ - sigchain_push_common(wait_for_pager_signal); - atexit(wait_for_pager); -} - -int pager_in_use(void) -{ - const char *env; - - if (spawned_pager) - return 1; - - env = getenv("PERF_PAGER_IN_USE"); - return env ? perf_config_bool("PERF_PAGER_IN_USE", env) : 0; -} diff --git a/Documentation/perf_counter/util/parse-events.c b/Documentation/perf_counter/util/parse-events.c deleted file mode 100644 index e0820b4388a..00000000000 --- a/Documentation/perf_counter/util/parse-events.c +++ /dev/null @@ -1,316 +0,0 @@ - -#include "../perf.h" -#include "util.h" -#include "parse-options.h" -#include "parse-events.h" -#include "exec_cmd.h" -#include "string.h" - -extern char *strcasestr(const char *haystack, const char *needle); - -int nr_counters; - -struct perf_counter_attr attrs[MAX_COUNTERS]; - -struct event_symbol { - __u8 type; - __u64 config; - char *symbol; -}; - -#define C(x, y) .type = PERF_TYPE_##x, .config = PERF_COUNT_##y -#define CR(x, y) .type = PERF_TYPE_##x, .config = y - -static struct event_symbol event_symbols[] = { - { C(HARDWARE, CPU_CYCLES), "cpu-cycles", }, - { C(HARDWARE, CPU_CYCLES), "cycles", }, - { C(HARDWARE, INSTRUCTIONS), "instructions", }, - { C(HARDWARE, CACHE_REFERENCES), "cache-references", }, - { C(HARDWARE, CACHE_MISSES), "cache-misses", }, - { C(HARDWARE, BRANCH_INSTRUCTIONS), "branch-instructions", }, - { C(HARDWARE, BRANCH_INSTRUCTIONS), "branches", }, - { C(HARDWARE, BRANCH_MISSES), "branch-misses", }, - { C(HARDWARE, BUS_CYCLES), "bus-cycles", }, - - { C(SOFTWARE, CPU_CLOCK), "cpu-clock", }, - { C(SOFTWARE, TASK_CLOCK), "task-clock", }, - { C(SOFTWARE, PAGE_FAULTS), "page-faults", }, - { C(SOFTWARE, PAGE_FAULTS), "faults", }, - { C(SOFTWARE, PAGE_FAULTS_MIN), "minor-faults", }, - { C(SOFTWARE, PAGE_FAULTS_MAJ), "major-faults", }, - { C(SOFTWARE, CONTEXT_SWITCHES), "context-switches", }, - { C(SOFTWARE, CONTEXT_SWITCHES), "cs", }, - { C(SOFTWARE, CPU_MIGRATIONS), "cpu-migrations", }, - { C(SOFTWARE, CPU_MIGRATIONS), "migrations", }, -}; - -#define __PERF_COUNTER_FIELD(config, name) \ - ((config & PERF_COUNTER_##name##_MASK) >> PERF_COUNTER_##name##_SHIFT) - -#define PERF_COUNTER_RAW(config) __PERF_COUNTER_FIELD(config, RAW) -#define PERF_COUNTER_CONFIG(config) __PERF_COUNTER_FIELD(config, CONFIG) -#define PERF_COUNTER_TYPE(config) __PERF_COUNTER_FIELD(config, TYPE) -#define PERF_COUNTER_ID(config) __PERF_COUNTER_FIELD(config, EVENT) - -static char *hw_event_names[] = { - "cycles", - "instructions", - "cache-references", - "cache-misses", - "branches", - "branch-misses", - "bus-cycles", -}; - -static char *sw_event_names[] = { - "cpu-clock-ticks", - "task-clock-ticks", - "page-faults", - "context-switches", - "CPU-migrations", - "minor-faults", - "major-faults", -}; - -#define MAX_ALIASES 8 - -static char *hw_cache [][MAX_ALIASES] = { - { "L1-data" , "l1-d", "l1d", "l1" }, - { "L1-instruction" , "l1-i", "l1i" }, - { "L2" , "l2" }, - { "Data-TLB" , "dtlb", "d-tlb" }, - { "Instruction-TLB" , "itlb", "i-tlb" }, - { "Branch" , "bpu" , "btb", "bpc" }, -}; - -static char *hw_cache_op [][MAX_ALIASES] = { - { "Load" , "read" }, - { "Store" , "write" }, - { "Prefetch" , "speculative-read", "speculative-load" }, -}; - -static char *hw_cache_result [][MAX_ALIASES] = { - { "Reference" , "ops", "access" }, - { "Miss" }, -}; - -char *event_name(int counter) -{ - __u64 config = attrs[counter].config; - int type = attrs[counter].type; - static char buf[32]; - - if (attrs[counter].type == PERF_TYPE_RAW) { - sprintf(buf, "raw 0x%llx", config); - return buf; - } - - switch (type) { - case PERF_TYPE_HARDWARE: - if (config < PERF_HW_EVENTS_MAX) - return hw_event_names[config]; - return "unknown-hardware"; - - case PERF_TYPE_HW_CACHE: { - __u8 cache_type, cache_op, cache_result; - static char name[100]; - - cache_type = (config >> 0) & 0xff; - if (cache_type > PERF_COUNT_HW_CACHE_MAX) - return "unknown-ext-hardware-cache-type"; - - cache_op = (config >> 8) & 0xff; - if (cache_op > PERF_COUNT_HW_CACHE_OP_MAX) - return "unknown-ext-hardware-cache-op"; - - cache_result = (config >> 16) & 0xff; - if (cache_result > PERF_COUNT_HW_CACHE_RESULT_MAX) - return "unknown-ext-hardware-cache-result"; - - sprintf(name, "%s-Cache-%s-%ses", - hw_cache[cache_type][0], - hw_cache_op[cache_op][0], - hw_cache_result[cache_result][0]); - - return name; - } - - case PERF_TYPE_SOFTWARE: - if (config < PERF_SW_EVENTS_MAX) - return sw_event_names[config]; - return "unknown-software"; - - default: - break; - } - - return "unknown"; -} - -static int parse_aliases(const char *str, char *names[][MAX_ALIASES], int size) -{ - int i, j; - - for (i = 0; i < size; i++) { - for (j = 0; j < MAX_ALIASES; j++) { - if (!names[i][j]) - break; - if (strcasestr(str, names[i][j])) - return i; - } - } - - return 0; -} - -static int parse_generic_hw_symbols(const char *str, struct perf_counter_attr *attr) -{ - __u8 cache_type = -1, cache_op = 0, cache_result = 0; - - cache_type = parse_aliases(str, hw_cache, PERF_COUNT_HW_CACHE_MAX); - /* - * No fallback - if we cannot get a clear cache type - * then bail out: - */ - if (cache_type == -1) - return -EINVAL; - - cache_op = parse_aliases(str, hw_cache_op, PERF_COUNT_HW_CACHE_OP_MAX); - /* - * Fall back to reads: - */ - if (cache_type == -1) - cache_type = PERF_COUNT_HW_CACHE_OP_READ; - - cache_result = parse_aliases(str, hw_cache_result, - PERF_COUNT_HW_CACHE_RESULT_MAX); - /* - * Fall back to accesses: - */ - if (cache_result == -1) - cache_result = PERF_COUNT_HW_CACHE_RESULT_ACCESS; - - attr->config = cache_type | (cache_op << 8) | (cache_result << 16); - attr->type = PERF_TYPE_HW_CACHE; - - return 0; -} - -/* - * Each event can have multiple symbolic names. - * Symbolic names are (almost) exactly matched. - */ -static int parse_event_symbols(const char *str, struct perf_counter_attr *attr) -{ - __u64 config, id; - int type; - unsigned int i; - const char *sep, *pstr; - - if (str[0] == 'r' && hex2u64(str + 1, &config) > 0) { - attr->type = PERF_TYPE_RAW; - attr->config = config; - - return 0; - } - - pstr = str; - sep = strchr(pstr, ':'); - if (sep) { - type = atoi(pstr); - pstr = sep + 1; - id = atoi(pstr); - sep = strchr(pstr, ':'); - if (sep) { - pstr = sep + 1; - if (strchr(pstr, 'k')) - attr->exclude_user = 1; - if (strchr(pstr, 'u')) - attr->exclude_kernel = 1; - } - attr->type = type; - attr->config = id; - - return 0; - } - - for (i = 0; i < ARRAY_SIZE(event_symbols); i++) { - if (!strncmp(str, event_symbols[i].symbol, - strlen(event_symbols[i].symbol))) { - - attr->type = event_symbols[i].type; - attr->config = event_symbols[i].config; - - return 0; - } - } - - return parse_generic_hw_symbols(str, attr); -} - -int parse_events(const struct option *opt, const char *str, int unset) -{ - struct perf_counter_attr attr; - int ret; - - memset(&attr, 0, sizeof(attr)); -again: - if (nr_counters == MAX_COUNTERS) - return -1; - - ret = parse_event_symbols(str, &attr); - if (ret < 0) - return ret; - - attrs[nr_counters] = attr; - nr_counters++; - - str = strstr(str, ","); - if (str) { - str++; - goto again; - } - - return 0; -} - -static const char * const event_type_descriptors[] = { - "", - "Hardware event", - "Software event", - "Tracepoint event", - "Hardware cache event", -}; - -/* - * Print the help text for the event symbols: - */ -void print_events(void) -{ - struct event_symbol *syms = event_symbols; - unsigned int i, type, prev_type = -1; - - fprintf(stderr, "\n"); - fprintf(stderr, "List of pre-defined events (to be used in -e):\n"); - - for (i = 0; i < ARRAY_SIZE(event_symbols); i++, syms++) { - type = syms->type + 1; - if (type > ARRAY_SIZE(event_type_descriptors)) - type = 0; - - if (type != prev_type) - fprintf(stderr, "\n"); - - fprintf(stderr, " %-30s [%s]\n", syms->symbol, - event_type_descriptors[type]); - - prev_type = type; - } - - fprintf(stderr, "\n"); - fprintf(stderr, " %-30s [raw hardware event descriptor]\n", - "rNNN"); - fprintf(stderr, "\n"); - - exit(129); -} diff --git a/Documentation/perf_counter/util/parse-events.h b/Documentation/perf_counter/util/parse-events.h deleted file mode 100644 index e3d552908e6..00000000000 --- a/Documentation/perf_counter/util/parse-events.h +++ /dev/null @@ -1,17 +0,0 @@ - -/* - * Parse symbolic events/counts passed in as options: - */ - -extern int nr_counters; - -extern struct perf_counter_attr attrs[MAX_COUNTERS]; - -extern char *event_name(int ctr); - -extern int parse_events(const struct option *opt, const char *str, int unset); - -#define EVENTS_HELP_MAX (128*1024) - -extern void print_events(void); - diff --git a/Documentation/perf_counter/util/parse-options.c b/Documentation/perf_counter/util/parse-options.c deleted file mode 100644 index b3affb1658d..00000000000 --- a/Documentation/perf_counter/util/parse-options.c +++ /dev/null @@ -1,508 +0,0 @@ -#include "util.h" -#include "parse-options.h" -#include "cache.h" - -#define OPT_SHORT 1 -#define OPT_UNSET 2 - -static int opterror(const struct option *opt, const char *reason, int flags) -{ - if (flags & OPT_SHORT) - return error("switch `%c' %s", opt->short_name, reason); - if (flags & OPT_UNSET) - return error("option `no-%s' %s", opt->long_name, reason); - return error("option `%s' %s", opt->long_name, reason); -} - -static int get_arg(struct parse_opt_ctx_t *p, const struct option *opt, - int flags, const char **arg) -{ - if (p->opt) { - *arg = p->opt; - p->opt = NULL; - } else if (p->argc == 1 && (opt->flags & PARSE_OPT_LASTARG_DEFAULT)) { - *arg = (const char *)opt->defval; - } else if (p->argc > 1) { - p->argc--; - *arg = *++p->argv; - } else - return opterror(opt, "requires a value", flags); - return 0; -} - -static int get_value(struct parse_opt_ctx_t *p, - const struct option *opt, int flags) -{ - const char *s, *arg = NULL; - const int unset = flags & OPT_UNSET; - - if (unset && p->opt) - return opterror(opt, "takes no value", flags); - if (unset && (opt->flags & PARSE_OPT_NONEG)) - return opterror(opt, "isn't available", flags); - - if (!(flags & OPT_SHORT) && p->opt) { - switch (opt->type) { - case OPTION_CALLBACK: - if (!(opt->flags & PARSE_OPT_NOARG)) - break; - /* FALLTHROUGH */ - case OPTION_BOOLEAN: - case OPTION_BIT: - case OPTION_SET_INT: - case OPTION_SET_PTR: - return opterror(opt, "takes no value", flags); - default: - break; - } - } - - switch (opt->type) { - case OPTION_BIT: - if (unset) - *(int *)opt->value &= ~opt->defval; - else - *(int *)opt->value |= opt->defval; - return 0; - - case OPTION_BOOLEAN: - *(int *)opt->value = unset ? 0 : *(int *)opt->value + 1; - return 0; - - case OPTION_SET_INT: - *(int *)opt->value = unset ? 0 : opt->defval; - return 0; - - case OPTION_SET_PTR: - *(void **)opt->value = unset ? NULL : (void *)opt->defval; - return 0; - - case OPTION_STRING: - if (unset) - *(const char **)opt->value = NULL; - else if (opt->flags & PARSE_OPT_OPTARG && !p->opt) - *(const char **)opt->value = (const char *)opt->defval; - else - return get_arg(p, opt, flags, (const char **)opt->value); - return 0; - - case OPTION_CALLBACK: - if (unset) - return (*opt->callback)(opt, NULL, 1) ? (-1) : 0; - if (opt->flags & PARSE_OPT_NOARG) - return (*opt->callback)(opt, NULL, 0) ? (-1) : 0; - if (opt->flags & PARSE_OPT_OPTARG && !p->opt) - return (*opt->callback)(opt, NULL, 0) ? (-1) : 0; - if (get_arg(p, opt, flags, &arg)) - return -1; - return (*opt->callback)(opt, arg, 0) ? (-1) : 0; - - case OPTION_INTEGER: - if (unset) { - *(int *)opt->value = 0; - return 0; - } - if (opt->flags & PARSE_OPT_OPTARG && !p->opt) { - *(int *)opt->value = opt->defval; - return 0; - } - if (get_arg(p, opt, flags, &arg)) - return -1; - *(int *)opt->value = strtol(arg, (char **)&s, 10); - if (*s) - return opterror(opt, "expects a numerical value", flags); - return 0; - - case OPTION_LONG: - if (unset) { - *(long *)opt->value = 0; - return 0; - } - if (opt->flags & PARSE_OPT_OPTARG && !p->opt) { - *(long *)opt->value = opt->defval; - return 0; - } - if (get_arg(p, opt, flags, &arg)) - return -1; - *(long *)opt->value = strtol(arg, (char **)&s, 10); - if (*s) - return opterror(opt, "expects a numerical value", flags); - return 0; - - default: - die("should not happen, someone must be hit on the forehead"); - } -} - -static int parse_short_opt(struct parse_opt_ctx_t *p, const struct option *options) -{ - for (; options->type != OPTION_END; options++) { - if (options->short_name == *p->opt) { - p->opt = p->opt[1] ? p->opt + 1 : NULL; - return get_value(p, options, OPT_SHORT); - } - } - return -2; -} - -static int parse_long_opt(struct parse_opt_ctx_t *p, const char *arg, - const struct option *options) -{ - const char *arg_end = strchr(arg, '='); - const struct option *abbrev_option = NULL, *ambiguous_option = NULL; - int abbrev_flags = 0, ambiguous_flags = 0; - - if (!arg_end) - arg_end = arg + strlen(arg); - - for (; options->type != OPTION_END; options++) { - const char *rest; - int flags = 0; - - if (!options->long_name) - continue; - - rest = skip_prefix(arg, options->long_name); - if (options->type == OPTION_ARGUMENT) { - if (!rest) - continue; - if (*rest == '=') - return opterror(options, "takes no value", flags); - if (*rest) - continue; - p->out[p->cpidx++] = arg - 2; - return 0; - } - if (!rest) { - /* abbreviated? */ - if (!strncmp(options->long_name, arg, arg_end - arg)) { -is_abbreviated: - if (abbrev_option) { - /* - * If this is abbreviated, it is - * ambiguous. So when there is no - * exact match later, we need to - * error out. - */ - ambiguous_option = abbrev_option; - ambiguous_flags = abbrev_flags; - } - if (!(flags & OPT_UNSET) && *arg_end) - p->opt = arg_end + 1; - abbrev_option = options; - abbrev_flags = flags; - continue; - } - /* negated and abbreviated very much? */ - if (!prefixcmp("no-", arg)) { - flags |= OPT_UNSET; - goto is_abbreviated; - } - /* negated? */ - if (strncmp(arg, "no-", 3)) - continue; - flags |= OPT_UNSET; - rest = skip_prefix(arg + 3, options->long_name); - /* abbreviated and negated? */ - if (!rest && !prefixcmp(options->long_name, arg + 3)) - goto is_abbreviated; - if (!rest) - continue; - } - if (*rest) { - if (*rest != '=') - continue; - p->opt = rest + 1; - } - return get_value(p, options, flags); - } - - if (ambiguous_option) - return error("Ambiguous option: %s " - "(could be --%s%s or --%s%s)", - arg, - (ambiguous_flags & OPT_UNSET) ? "no-" : "", - ambiguous_option->long_name, - (abbrev_flags & OPT_UNSET) ? "no-" : "", - abbrev_option->long_name); - if (abbrev_option) - return get_value(p, abbrev_option, abbrev_flags); - return -2; -} - -static void check_typos(const char *arg, const struct option *options) -{ - if (strlen(arg) < 3) - return; - - if (!prefixcmp(arg, "no-")) { - error ("did you mean `--%s` (with two dashes ?)", arg); - exit(129); - } - - for (; options->type != OPTION_END; options++) { - if (!options->long_name) - continue; - if (!prefixcmp(options->long_name, arg)) { - error ("did you mean `--%s` (with two dashes ?)", arg); - exit(129); - } - } -} - -void parse_options_start(struct parse_opt_ctx_t *ctx, - int argc, const char **argv, int flags) -{ - memset(ctx, 0, sizeof(*ctx)); - ctx->argc = argc - 1; - ctx->argv = argv + 1; - ctx->out = argv; - ctx->cpidx = ((flags & PARSE_OPT_KEEP_ARGV0) != 0); - ctx->flags = flags; - if ((flags & PARSE_OPT_KEEP_UNKNOWN) && - (flags & PARSE_OPT_STOP_AT_NON_OPTION)) - die("STOP_AT_NON_OPTION and KEEP_UNKNOWN don't go together"); -} - -static int usage_with_options_internal(const char * const *, - const struct option *, int); - -int parse_options_step(struct parse_opt_ctx_t *ctx, - const struct option *options, - const char * const usagestr[]) -{ - int internal_help = !(ctx->flags & PARSE_OPT_NO_INTERNAL_HELP); - - /* we must reset ->opt, unknown short option leave it dangling */ - ctx->opt = NULL; - - for (; ctx->argc; ctx->argc--, ctx->argv++) { - const char *arg = ctx->argv[0]; - - if (*arg != '-' || !arg[1]) { - if (ctx->flags & PARSE_OPT_STOP_AT_NON_OPTION) - break; - ctx->out[ctx->cpidx++] = ctx->argv[0]; - continue; - } - - if (arg[1] != '-') { - ctx->opt = arg + 1; - if (internal_help && *ctx->opt == 'h') - return parse_options_usage(usagestr, options); - switch (parse_short_opt(ctx, options)) { - case -1: - return parse_options_usage(usagestr, options); - case -2: - goto unknown; - } - if (ctx->opt) - check_typos(arg + 1, options); - while (ctx->opt) { - if (internal_help && *ctx->opt == 'h') - return parse_options_usage(usagestr, options); - switch (parse_short_opt(ctx, options)) { - case -1: - return parse_options_usage(usagestr, options); - case -2: - /* fake a short option thing to hide the fact that we may have - * started to parse aggregated stuff - * - * This is leaky, too bad. - */ - ctx->argv[0] = strdup(ctx->opt - 1); - *(char *)ctx->argv[0] = '-'; - goto unknown; - } - } - continue; - } - - if (!arg[2]) { /* "--" */ - if (!(ctx->flags & PARSE_OPT_KEEP_DASHDASH)) { - ctx->argc--; - ctx->argv++; - } - break; - } - - if (internal_help && !strcmp(arg + 2, "help-all")) - return usage_with_options_internal(usagestr, options, 1); - if (internal_help && !strcmp(arg + 2, "help")) - return parse_options_usage(usagestr, options); - switch (parse_long_opt(ctx, arg + 2, options)) { - case -1: - return parse_options_usage(usagestr, options); - case -2: - goto unknown; - } - continue; -unknown: - if (!(ctx->flags & PARSE_OPT_KEEP_UNKNOWN)) - return PARSE_OPT_UNKNOWN; - ctx->out[ctx->cpidx++] = ctx->argv[0]; - ctx->opt = NULL; - } - return PARSE_OPT_DONE; -} - -int parse_options_end(struct parse_opt_ctx_t *ctx) -{ - memmove(ctx->out + ctx->cpidx, ctx->argv, ctx->argc * sizeof(*ctx->out)); - ctx->out[ctx->cpidx + ctx->argc] = NULL; - return ctx->cpidx + ctx->argc; -} - -int parse_options(int argc, const char **argv, const struct option *options, - const char * const usagestr[], int flags) -{ - struct parse_opt_ctx_t ctx; - - parse_options_start(&ctx, argc, argv, flags); - switch (parse_options_step(&ctx, options, usagestr)) { - case PARSE_OPT_HELP: - exit(129); - case PARSE_OPT_DONE: - break; - default: /* PARSE_OPT_UNKNOWN */ - if (ctx.argv[0][1] == '-') { - error("unknown option `%s'", ctx.argv[0] + 2); - } else { - error("unknown switch `%c'", *ctx.opt); - } - usage_with_options(usagestr, options); - } - - return parse_options_end(&ctx); -} - -#define USAGE_OPTS_WIDTH 24 -#define USAGE_GAP 2 - -int usage_with_options_internal(const char * const *usagestr, - const struct option *opts, int full) -{ - if (!usagestr) - return PARSE_OPT_HELP; - - fprintf(stderr, "\n usage: %s\n", *usagestr++); - while (*usagestr && **usagestr) - fprintf(stderr, " or: %s\n", *usagestr++); - while (*usagestr) { - fprintf(stderr, "%s%s\n", - **usagestr ? " " : "", - *usagestr); - usagestr++; - } - - if (opts->type != OPTION_GROUP) - fputc('\n', stderr); - - for (; opts->type != OPTION_END; opts++) { - size_t pos; - int pad; - - if (opts->type == OPTION_GROUP) { - fputc('\n', stderr); - if (*opts->help) - fprintf(stderr, "%s\n", opts->help); - continue; - } - if (!full && (opts->flags & PARSE_OPT_HIDDEN)) - continue; - - pos = fprintf(stderr, " "); - if (opts->short_name) - pos += fprintf(stderr, "-%c", opts->short_name); - if (opts->long_name && opts->short_name) - pos += fprintf(stderr, ", "); - if (opts->long_name) - pos += fprintf(stderr, "--%s", opts->long_name); - - switch (opts->type) { - case OPTION_ARGUMENT: - break; - case OPTION_INTEGER: - if (opts->flags & PARSE_OPT_OPTARG) - if (opts->long_name) - pos += fprintf(stderr, "[=]"); - else - pos += fprintf(stderr, "[]"); - else - pos += fprintf(stderr, " "); - break; - case OPTION_CALLBACK: - if (opts->flags & PARSE_OPT_NOARG) - break; - /* FALLTHROUGH */ - case OPTION_STRING: - if (opts->argh) { - if (opts->flags & PARSE_OPT_OPTARG) - if (opts->long_name) - pos += fprintf(stderr, "[=<%s>]", opts->argh); - else - pos += fprintf(stderr, "[<%s>]", opts->argh); - else - pos += fprintf(stderr, " <%s>", opts->argh); - } else { - if (opts->flags & PARSE_OPT_OPTARG) - if (opts->long_name) - pos += fprintf(stderr, "[=...]"); - else - pos += fprintf(stderr, "[...]"); - else - pos += fprintf(stderr, " ..."); - } - break; - default: /* OPTION_{BIT,BOOLEAN,SET_INT,SET_PTR} */ - break; - } - - if (pos <= USAGE_OPTS_WIDTH) - pad = USAGE_OPTS_WIDTH - pos; - else { - fputc('\n', stderr); - pad = USAGE_OPTS_WIDTH; - } - fprintf(stderr, "%*s%s\n", pad + USAGE_GAP, "", opts->help); - } - fputc('\n', stderr); - - return PARSE_OPT_HELP; -} - -void usage_with_options(const char * const *usagestr, - const struct option *opts) -{ - usage_with_options_internal(usagestr, opts, 0); - exit(129); -} - -int parse_options_usage(const char * const *usagestr, - const struct option *opts) -{ - return usage_with_options_internal(usagestr, opts, 0); -} - - -int parse_opt_verbosity_cb(const struct option *opt, const char *arg, - int unset) -{ - int *target = opt->value; - - if (unset) - /* --no-quiet, --no-verbose */ - *target = 0; - else if (opt->short_name == 'v') { - if (*target >= 0) - (*target)++; - else - *target = 1; - } else { - if (*target <= 0) - (*target)--; - else - *target = -1; - } - return 0; -} diff --git a/Documentation/perf_counter/util/parse-options.h b/Documentation/perf_counter/util/parse-options.h deleted file mode 100644 index a1039a6ce0e..00000000000 --- a/Documentation/perf_counter/util/parse-options.h +++ /dev/null @@ -1,174 +0,0 @@ -#ifndef PARSE_OPTIONS_H -#define PARSE_OPTIONS_H - -enum parse_opt_type { - /* special types */ - OPTION_END, - OPTION_ARGUMENT, - OPTION_GROUP, - /* options with no arguments */ - OPTION_BIT, - OPTION_BOOLEAN, /* _INCR would have been a better name */ - OPTION_SET_INT, - OPTION_SET_PTR, - /* options with arguments (usually) */ - OPTION_STRING, - OPTION_INTEGER, - OPTION_LONG, - OPTION_CALLBACK, -}; - -enum parse_opt_flags { - PARSE_OPT_KEEP_DASHDASH = 1, - PARSE_OPT_STOP_AT_NON_OPTION = 2, - PARSE_OPT_KEEP_ARGV0 = 4, - PARSE_OPT_KEEP_UNKNOWN = 8, - PARSE_OPT_NO_INTERNAL_HELP = 16, -}; - -enum parse_opt_option_flags { - PARSE_OPT_OPTARG = 1, - PARSE_OPT_NOARG = 2, - PARSE_OPT_NONEG = 4, - PARSE_OPT_HIDDEN = 8, - PARSE_OPT_LASTARG_DEFAULT = 16, -}; - -struct option; -typedef int parse_opt_cb(const struct option *, const char *arg, int unset); - -/* - * `type`:: - * holds the type of the option, you must have an OPTION_END last in your - * array. - * - * `short_name`:: - * the character to use as a short option name, '\0' if none. - * - * `long_name`:: - * the long option name, without the leading dashes, NULL if none. - * - * `value`:: - * stores pointers to the values to be filled. - * - * `argh`:: - * token to explain the kind of argument this option wants. Keep it - * homogenous across the repository. - * - * `help`:: - * the short help associated to what the option does. - * Must never be NULL (except for OPTION_END). - * OPTION_GROUP uses this pointer to store the group header. - * - * `flags`:: - * mask of parse_opt_option_flags. - * PARSE_OPT_OPTARG: says that the argument is optionnal (not for BOOLEANs) - * PARSE_OPT_NOARG: says that this option takes no argument, for CALLBACKs - * PARSE_OPT_NONEG: says that this option cannot be negated - * PARSE_OPT_HIDDEN this option is skipped in the default usage, showed in - * the long one. - * - * `callback`:: - * pointer to the callback to use for OPTION_CALLBACK. - * - * `defval`:: - * default value to fill (*->value) with for PARSE_OPT_OPTARG. - * OPTION_{BIT,SET_INT,SET_PTR} store the {mask,integer,pointer} to put in - * the value when met. - * CALLBACKS can use it like they want. - */ -struct option { - enum parse_opt_type type; - int short_name; - const char *long_name; - void *value; - const char *argh; - const char *help; - - int flags; - parse_opt_cb *callback; - intptr_t defval; -}; - -#define OPT_END() { OPTION_END } -#define OPT_ARGUMENT(l, h) { OPTION_ARGUMENT, 0, (l), NULL, NULL, (h) } -#define OPT_GROUP(h) { OPTION_GROUP, 0, NULL, NULL, NULL, (h) } -#define OPT_BIT(s, l, v, h, b) { OPTION_BIT, (s), (l), (v), NULL, (h), 0, NULL, (b) } -#define OPT_BOOLEAN(s, l, v, h) { OPTION_BOOLEAN, (s), (l), (v), NULL, (h) } -#define OPT_SET_INT(s, l, v, h, i) { OPTION_SET_INT, (s), (l), (v), NULL, (h), 0, NULL, (i) } -#define OPT_SET_PTR(s, l, v, h, p) { OPTION_SET_PTR, (s), (l), (v), NULL, (h), 0, NULL, (p) } -#define OPT_INTEGER(s, l, v, h) { OPTION_INTEGER, (s), (l), (v), NULL, (h) } -#define OPT_LONG(s, l, v, h) { OPTION_LONG, (s), (l), (v), NULL, (h) } -#define OPT_STRING(s, l, v, a, h) { OPTION_STRING, (s), (l), (v), (a), (h) } -#define OPT_DATE(s, l, v, h) \ - { OPTION_CALLBACK, (s), (l), (v), "time",(h), 0, \ - parse_opt_approxidate_cb } -#define OPT_CALLBACK(s, l, v, a, h, f) \ - { OPTION_CALLBACK, (s), (l), (v), (a), (h), 0, (f) } - -/* parse_options() will filter out the processed options and leave the - * non-option argments in argv[]. - * Returns the number of arguments left in argv[]. - */ -extern int parse_options(int argc, const char **argv, - const struct option *options, - const char * const usagestr[], int flags); - -extern NORETURN void usage_with_options(const char * const *usagestr, - const struct option *options); - -/*----- incremantal advanced APIs -----*/ - -enum { - PARSE_OPT_HELP = -1, - PARSE_OPT_DONE, - PARSE_OPT_UNKNOWN, -}; - -/* - * It's okay for the caller to consume argv/argc in the usual way. - * Other fields of that structure are private to parse-options and should not - * be modified in any way. - */ -struct parse_opt_ctx_t { - const char **argv; - const char **out; - int argc, cpidx; - const char *opt; - int flags; -}; - -extern int parse_options_usage(const char * const *usagestr, - const struct option *opts); - -extern void parse_options_start(struct parse_opt_ctx_t *ctx, - int argc, const char **argv, int flags); - -extern int parse_options_step(struct parse_opt_ctx_t *ctx, - const struct option *options, - const char * const usagestr[]); - -extern int parse_options_end(struct parse_opt_ctx_t *ctx); - - -/*----- some often used options -----*/ -extern int parse_opt_abbrev_cb(const struct option *, const char *, int); -extern int parse_opt_approxidate_cb(const struct option *, const char *, int); -extern int parse_opt_verbosity_cb(const struct option *, const char *, int); - -#define OPT__VERBOSE(var) OPT_BOOLEAN('v', "verbose", (var), "be verbose") -#define OPT__QUIET(var) OPT_BOOLEAN('q', "quiet", (var), "be quiet") -#define OPT__VERBOSITY(var) \ - { OPTION_CALLBACK, 'v', "verbose", (var), NULL, "be more verbose", \ - PARSE_OPT_NOARG, &parse_opt_verbosity_cb, 0 }, \ - { OPTION_CALLBACK, 'q', "quiet", (var), NULL, "be more quiet", \ - PARSE_OPT_NOARG, &parse_opt_verbosity_cb, 0 } -#define OPT__DRY_RUN(var) OPT_BOOLEAN('n', "dry-run", (var), "dry run") -#define OPT__ABBREV(var) \ - { OPTION_CALLBACK, 0, "abbrev", (var), "n", \ - "use digits to display SHA-1s", \ - PARSE_OPT_OPTARG, &parse_opt_abbrev_cb, 0 } - -extern const char *parse_options_fix_filename(const char *prefix, const char *file); - -#endif diff --git a/Documentation/perf_counter/util/path.c b/Documentation/perf_counter/util/path.c deleted file mode 100644 index a501a40dd2c..00000000000 --- a/Documentation/perf_counter/util/path.c +++ /dev/null @@ -1,353 +0,0 @@ -/* - * I'm tired of doing "vsnprintf()" etc just to open a - * file, so here's a "return static buffer with printf" - * interface for paths. - * - * It's obviously not thread-safe. Sue me. But it's quite - * useful for doing things like - * - * f = open(mkpath("%s/%s.perf", base, name), O_RDONLY); - * - * which is what it's designed for. - */ -#include "cache.h" - -static char bad_path[] = "/bad-path/"; -/* - * Two hacks: - */ - -static char *get_perf_dir(void) -{ - return "."; -} - -size_t strlcpy(char *dest, const char *src, size_t size) -{ - size_t ret = strlen(src); - - if (size) { - size_t len = (ret >= size) ? size - 1 : ret; - memcpy(dest, src, len); - dest[len] = '\0'; - } - return ret; -} - - -static char *get_pathname(void) -{ - static char pathname_array[4][PATH_MAX]; - static int index; - return pathname_array[3 & ++index]; -} - -static char *cleanup_path(char *path) -{ - /* Clean it up */ - if (!memcmp(path, "./", 2)) { - path += 2; - while (*path == '/') - path++; - } - return path; -} - -char *mksnpath(char *buf, size_t n, const char *fmt, ...) -{ - va_list args; - unsigned len; - - va_start(args, fmt); - len = vsnprintf(buf, n, fmt, args); - va_end(args); - if (len >= n) { - strlcpy(buf, bad_path, n); - return buf; - } - return cleanup_path(buf); -} - -static char *perf_vsnpath(char *buf, size_t n, const char *fmt, va_list args) -{ - const char *perf_dir = get_perf_dir(); - size_t len; - - len = strlen(perf_dir); - if (n < len + 1) - goto bad; - memcpy(buf, perf_dir, len); - if (len && !is_dir_sep(perf_dir[len-1])) - buf[len++] = '/'; - len += vsnprintf(buf + len, n - len, fmt, args); - if (len >= n) - goto bad; - return cleanup_path(buf); -bad: - strlcpy(buf, bad_path, n); - return buf; -} - -char *perf_snpath(char *buf, size_t n, const char *fmt, ...) -{ - va_list args; - va_start(args, fmt); - (void)perf_vsnpath(buf, n, fmt, args); - va_end(args); - return buf; -} - -char *perf_pathdup(const char *fmt, ...) -{ - char path[PATH_MAX]; - va_list args; - va_start(args, fmt); - (void)perf_vsnpath(path, sizeof(path), fmt, args); - va_end(args); - return xstrdup(path); -} - -char *mkpath(const char *fmt, ...) -{ - va_list args; - unsigned len; - char *pathname = get_pathname(); - - va_start(args, fmt); - len = vsnprintf(pathname, PATH_MAX, fmt, args); - va_end(args); - if (len >= PATH_MAX) - return bad_path; - return cleanup_path(pathname); -} - -char *perf_path(const char *fmt, ...) -{ - const char *perf_dir = get_perf_dir(); - char *pathname = get_pathname(); - va_list args; - unsigned len; - - len = strlen(perf_dir); - if (len > PATH_MAX-100) - return bad_path; - memcpy(pathname, perf_dir, len); - if (len && perf_dir[len-1] != '/') - pathname[len++] = '/'; - va_start(args, fmt); - len += vsnprintf(pathname + len, PATH_MAX - len, fmt, args); - va_end(args); - if (len >= PATH_MAX) - return bad_path; - return cleanup_path(pathname); -} - - -/* perf_mkstemp() - create tmp file honoring TMPDIR variable */ -int perf_mkstemp(char *path, size_t len, const char *template) -{ - const char *tmp; - size_t n; - - tmp = getenv("TMPDIR"); - if (!tmp) - tmp = "/tmp"; - n = snprintf(path, len, "%s/%s", tmp, template); - if (len <= n) { - errno = ENAMETOOLONG; - return -1; - } - return mkstemp(path); -} - - -const char *make_relative_path(const char *abs, const char *base) -{ - static char buf[PATH_MAX + 1]; - int baselen; - if (!base) - return abs; - baselen = strlen(base); - if (prefixcmp(abs, base)) - return abs; - if (abs[baselen] == '/') - baselen++; - else if (base[baselen - 1] != '/') - return abs; - strcpy(buf, abs + baselen); - return buf; -} - -/* - * It is okay if dst == src, but they should not overlap otherwise. - * - * Performs the following normalizations on src, storing the result in dst: - * - Ensures that components are separated by '/' (Windows only) - * - Squashes sequences of '/'. - * - Removes "." components. - * - Removes ".." components, and the components the precede them. - * Returns failure (non-zero) if a ".." component appears as first path - * component anytime during the normalization. Otherwise, returns success (0). - * - * Note that this function is purely textual. It does not follow symlinks, - * verify the existence of the path, or make any system calls. - */ -int normalize_path_copy(char *dst, const char *src) -{ - char *dst0; - - if (has_dos_drive_prefix(src)) { - *dst++ = *src++; - *dst++ = *src++; - } - dst0 = dst; - - if (is_dir_sep(*src)) { - *dst++ = '/'; - while (is_dir_sep(*src)) - src++; - } - - for (;;) { - char c = *src; - - /* - * A path component that begins with . could be - * special: - * (1) "." and ends -- ignore and terminate. - * (2) "./" -- ignore them, eat slash and continue. - * (3) ".." and ends -- strip one and terminate. - * (4) "../" -- strip one, eat slash and continue. - */ - if (c == '.') { - if (!src[1]) { - /* (1) */ - src++; - } else if (is_dir_sep(src[1])) { - /* (2) */ - src += 2; - while (is_dir_sep(*src)) - src++; - continue; - } else if (src[1] == '.') { - if (!src[2]) { - /* (3) */ - src += 2; - goto up_one; - } else if (is_dir_sep(src[2])) { - /* (4) */ - src += 3; - while (is_dir_sep(*src)) - src++; - goto up_one; - } - } - } - - /* copy up to the next '/', and eat all '/' */ - while ((c = *src++) != '\0' && !is_dir_sep(c)) - *dst++ = c; - if (is_dir_sep(c)) { - *dst++ = '/'; - while (is_dir_sep(c)) - c = *src++; - src--; - } else if (!c) - break; - continue; - - up_one: - /* - * dst0..dst is prefix portion, and dst[-1] is '/'; - * go up one level. - */ - dst--; /* go to trailing '/' */ - if (dst <= dst0) - return -1; - /* Windows: dst[-1] cannot be backslash anymore */ - while (dst0 < dst && dst[-1] != '/') - dst--; - } - *dst = '\0'; - return 0; -} - -/* - * path = Canonical absolute path - * prefix_list = Colon-separated list of absolute paths - * - * Determines, for each path in prefix_list, whether the "prefix" really - * is an ancestor directory of path. Returns the length of the longest - * ancestor directory, excluding any trailing slashes, or -1 if no prefix - * is an ancestor. (Note that this means 0 is returned if prefix_list is - * "/".) "/foo" is not considered an ancestor of "/foobar". Directories - * are not considered to be their own ancestors. path must be in a - * canonical form: empty components, or "." or ".." components are not - * allowed. prefix_list may be null, which is like "". - */ -int longest_ancestor_length(const char *path, const char *prefix_list) -{ - char buf[PATH_MAX+1]; - const char *ceil, *colon; - int len, max_len = -1; - - if (prefix_list == NULL || !strcmp(path, "/")) - return -1; - - for (colon = ceil = prefix_list; *colon; ceil = colon+1) { - for (colon = ceil; *colon && *colon != PATH_SEP; colon++); - len = colon - ceil; - if (len == 0 || len > PATH_MAX || !is_absolute_path(ceil)) - continue; - strlcpy(buf, ceil, len+1); - if (normalize_path_copy(buf, buf) < 0) - continue; - len = strlen(buf); - if (len > 0 && buf[len-1] == '/') - buf[--len] = '\0'; - - if (!strncmp(path, buf, len) && - path[len] == '/' && - len > max_len) { - max_len = len; - } - } - - return max_len; -} - -/* strip arbitrary amount of directory separators at end of path */ -static inline int chomp_trailing_dir_sep(const char *path, int len) -{ - while (len && is_dir_sep(path[len - 1])) - len--; - return len; -} - -/* - * If path ends with suffix (complete path components), returns the - * part before suffix (sans trailing directory separators). - * Otherwise returns NULL. - */ -char *strip_path_suffix(const char *path, const char *suffix) -{ - int path_len = strlen(path), suffix_len = strlen(suffix); - - while (suffix_len) { - if (!path_len) - return NULL; - - if (is_dir_sep(path[path_len - 1])) { - if (!is_dir_sep(suffix[suffix_len - 1])) - return NULL; - path_len = chomp_trailing_dir_sep(path, path_len); - suffix_len = chomp_trailing_dir_sep(suffix, suffix_len); - } - else if (path[--path_len] != suffix[--suffix_len]) - return NULL; - } - - if (path_len && !is_dir_sep(path[path_len - 1])) - return NULL; - return xstrndup(path, chomp_trailing_dir_sep(path, path_len)); -} diff --git a/Documentation/perf_counter/util/quote.c b/Documentation/perf_counter/util/quote.c deleted file mode 100644 index f18c5212bc9..00000000000 --- a/Documentation/perf_counter/util/quote.c +++ /dev/null @@ -1,481 +0,0 @@ -#include "cache.h" -#include "quote.h" - -int quote_path_fully = 1; - -/* Help to copy the thing properly quoted for the shell safety. - * any single quote is replaced with '\'', any exclamation point - * is replaced with '\!', and the whole thing is enclosed in a - * - * E.g. - * original sq_quote result - * name ==> name ==> 'name' - * a b ==> a b ==> 'a b' - * a'b ==> a'\''b ==> 'a'\''b' - * a!b ==> a'\!'b ==> 'a'\!'b' - */ -static inline int need_bs_quote(char c) -{ - return (c == '\'' || c == '!'); -} - -void sq_quote_buf(struct strbuf *dst, const char *src) -{ - char *to_free = NULL; - - if (dst->buf == src) - to_free = strbuf_detach(dst, NULL); - - strbuf_addch(dst, '\''); - while (*src) { - size_t len = strcspn(src, "'!"); - strbuf_add(dst, src, len); - src += len; - while (need_bs_quote(*src)) { - strbuf_addstr(dst, "'\\"); - strbuf_addch(dst, *src++); - strbuf_addch(dst, '\''); - } - } - strbuf_addch(dst, '\''); - free(to_free); -} - -void sq_quote_print(FILE *stream, const char *src) -{ - char c; - - fputc('\'', stream); - while ((c = *src++)) { - if (need_bs_quote(c)) { - fputs("'\\", stream); - fputc(c, stream); - fputc('\'', stream); - } else { - fputc(c, stream); - } - } - fputc('\'', stream); -} - -void sq_quote_argv(struct strbuf *dst, const char** argv, size_t maxlen) -{ - int i; - - /* Copy into destination buffer. */ - strbuf_grow(dst, 255); - for (i = 0; argv[i]; ++i) { - strbuf_addch(dst, ' '); - sq_quote_buf(dst, argv[i]); - if (maxlen && dst->len > maxlen) - die("Too many or long arguments"); - } -} - -char *sq_dequote_step(char *arg, char **next) -{ - char *dst = arg; - char *src = arg; - char c; - - if (*src != '\'') - return NULL; - for (;;) { - c = *++src; - if (!c) - return NULL; - if (c != '\'') { - *dst++ = c; - continue; - } - /* We stepped out of sq */ - switch (*++src) { - case '\0': - *dst = 0; - if (next) - *next = NULL; - return arg; - case '\\': - c = *++src; - if (need_bs_quote(c) && *++src == '\'') { - *dst++ = c; - continue; - } - /* Fallthrough */ - default: - if (!next || !isspace(*src)) - return NULL; - do { - c = *++src; - } while (isspace(c)); - *dst = 0; - *next = src; - return arg; - } - } -} - -char *sq_dequote(char *arg) -{ - return sq_dequote_step(arg, NULL); -} - -int sq_dequote_to_argv(char *arg, const char ***argv, int *nr, int *alloc) -{ - char *next = arg; - - if (!*arg) - return 0; - do { - char *dequoted = sq_dequote_step(next, &next); - if (!dequoted) - return -1; - ALLOC_GROW(*argv, *nr + 1, *alloc); - (*argv)[(*nr)++] = dequoted; - } while (next); - - return 0; -} - -/* 1 means: quote as octal - * 0 means: quote as octal if (quote_path_fully) - * -1 means: never quote - * c: quote as "\\c" - */ -#define X8(x) x, x, x, x, x, x, x, x -#define X16(x) X8(x), X8(x) -static signed char const sq_lookup[256] = { - /* 0 1 2 3 4 5 6 7 */ - /* 0x00 */ 1, 1, 1, 1, 1, 1, 1, 'a', - /* 0x08 */ 'b', 't', 'n', 'v', 'f', 'r', 1, 1, - /* 0x10 */ X16(1), - /* 0x20 */ -1, -1, '"', -1, -1, -1, -1, -1, - /* 0x28 */ X16(-1), X16(-1), X16(-1), - /* 0x58 */ -1, -1, -1, -1,'\\', -1, -1, -1, - /* 0x60 */ X16(-1), X8(-1), - /* 0x78 */ -1, -1, -1, -1, -1, -1, -1, 1, - /* 0x80 */ /* set to 0 */ -}; - -static inline int sq_must_quote(char c) -{ - return sq_lookup[(unsigned char)c] + quote_path_fully > 0; -} - -/* returns the longest prefix not needing a quote up to maxlen if positive. - This stops at the first \0 because it's marked as a character needing an - escape */ -static size_t next_quote_pos(const char *s, ssize_t maxlen) -{ - size_t len; - if (maxlen < 0) { - for (len = 0; !sq_must_quote(s[len]); len++); - } else { - for (len = 0; len < maxlen && !sq_must_quote(s[len]); len++); - } - return len; -} - -/* - * C-style name quoting. - * - * (1) if sb and fp are both NULL, inspect the input name and counts the - * number of bytes that are needed to hold c_style quoted version of name, - * counting the double quotes around it but not terminating NUL, and - * returns it. - * However, if name does not need c_style quoting, it returns 0. - * - * (2) if sb or fp are not NULL, it emits the c_style quoted version - * of name, enclosed with double quotes if asked and needed only. - * Return value is the same as in (1). - */ -static size_t quote_c_style_counted(const char *name, ssize_t maxlen, - struct strbuf *sb, FILE *fp, int no_dq) -{ -#undef EMIT -#define EMIT(c) \ - do { \ - if (sb) strbuf_addch(sb, (c)); \ - if (fp) fputc((c), fp); \ - count++; \ - } while (0) -#define EMITBUF(s, l) \ - do { \ - int __ret; \ - if (sb) strbuf_add(sb, (s), (l)); \ - if (fp) __ret = fwrite((s), (l), 1, fp); \ - count += (l); \ - } while (0) - - size_t len, count = 0; - const char *p = name; - - for (;;) { - int ch; - - len = next_quote_pos(p, maxlen); - if (len == maxlen || !p[len]) - break; - - if (!no_dq && p == name) - EMIT('"'); - - EMITBUF(p, len); - EMIT('\\'); - p += len; - ch = (unsigned char)*p++; - if (sq_lookup[ch] >= ' ') { - EMIT(sq_lookup[ch]); - } else { - EMIT(((ch >> 6) & 03) + '0'); - EMIT(((ch >> 3) & 07) + '0'); - EMIT(((ch >> 0) & 07) + '0'); - } - } - - EMITBUF(p, len); - if (p == name) /* no ending quote needed */ - return 0; - - if (!no_dq) - EMIT('"'); - return count; -} - -size_t quote_c_style(const char *name, struct strbuf *sb, FILE *fp, int nodq) -{ - return quote_c_style_counted(name, -1, sb, fp, nodq); -} - -void quote_two_c_style(struct strbuf *sb, const char *prefix, const char *path, int nodq) -{ - if (quote_c_style(prefix, NULL, NULL, 0) || - quote_c_style(path, NULL, NULL, 0)) { - if (!nodq) - strbuf_addch(sb, '"'); - quote_c_style(prefix, sb, NULL, 1); - quote_c_style(path, sb, NULL, 1); - if (!nodq) - strbuf_addch(sb, '"'); - } else { - strbuf_addstr(sb, prefix); - strbuf_addstr(sb, path); - } -} - -void write_name_quoted(const char *name, FILE *fp, int terminator) -{ - if (terminator) { - quote_c_style(name, NULL, fp, 0); - } else { - fputs(name, fp); - } - fputc(terminator, fp); -} - -extern void write_name_quotedpfx(const char *pfx, size_t pfxlen, - const char *name, FILE *fp, int terminator) -{ - int needquote = 0; - - if (terminator) { - needquote = next_quote_pos(pfx, pfxlen) < pfxlen - || name[next_quote_pos(name, -1)]; - } - if (needquote) { - fputc('"', fp); - quote_c_style_counted(pfx, pfxlen, NULL, fp, 1); - quote_c_style(name, NULL, fp, 1); - fputc('"', fp); - } else { - int ret; - - ret = fwrite(pfx, pfxlen, 1, fp); - fputs(name, fp); - } - fputc(terminator, fp); -} - -/* quote path as relative to the given prefix */ -char *quote_path_relative(const char *in, int len, - struct strbuf *out, const char *prefix) -{ - int needquote; - - if (len < 0) - len = strlen(in); - - /* "../" prefix itself does not need quoting, but "in" might. */ - needquote = next_quote_pos(in, len) < len; - strbuf_setlen(out, 0); - strbuf_grow(out, len); - - if (needquote) - strbuf_addch(out, '"'); - if (prefix) { - int off = 0; - while (prefix[off] && off < len && prefix[off] == in[off]) - if (prefix[off] == '/') { - prefix += off + 1; - in += off + 1; - len -= off + 1; - off = 0; - } else - off++; - - for (; *prefix; prefix++) - if (*prefix == '/') - strbuf_addstr(out, "../"); - } - - quote_c_style_counted (in, len, out, NULL, 1); - - if (needquote) - strbuf_addch(out, '"'); - if (!out->len) - strbuf_addstr(out, "./"); - - return out->buf; -} - -/* - * C-style name unquoting. - * - * Quoted should point at the opening double quote. - * + Returns 0 if it was able to unquote the string properly, and appends the - * result in the strbuf `sb'. - * + Returns -1 in case of error, and doesn't touch the strbuf. Though note - * that this function will allocate memory in the strbuf, so calling - * strbuf_release is mandatory whichever result unquote_c_style returns. - * - * Updates endp pointer to point at one past the ending double quote if given. - */ -int unquote_c_style(struct strbuf *sb, const char *quoted, const char **endp) -{ - size_t oldlen = sb->len, len; - int ch, ac; - - if (*quoted++ != '"') - return -1; - - for (;;) { - len = strcspn(quoted, "\"\\"); - strbuf_add(sb, quoted, len); - quoted += len; - - switch (*quoted++) { - case '"': - if (endp) - *endp = quoted; - return 0; - case '\\': - break; - default: - goto error; - } - - switch ((ch = *quoted++)) { - case 'a': ch = '\a'; break; - case 'b': ch = '\b'; break; - case 'f': ch = '\f'; break; - case 'n': ch = '\n'; break; - case 'r': ch = '\r'; break; - case 't': ch = '\t'; break; - case 'v': ch = '\v'; break; - - case '\\': case '"': - break; /* verbatim */ - - /* octal values with first digit over 4 overflow */ - case '0': case '1': case '2': case '3': - ac = ((ch - '0') << 6); - if ((ch = *quoted++) < '0' || '7' < ch) - goto error; - ac |= ((ch - '0') << 3); - if ((ch = *quoted++) < '0' || '7' < ch) - goto error; - ac |= (ch - '0'); - ch = ac; - break; - default: - goto error; - } - strbuf_addch(sb, ch); - } - - error: - strbuf_setlen(sb, oldlen); - return -1; -} - -/* quoting as a string literal for other languages */ - -void perl_quote_print(FILE *stream, const char *src) -{ - const char sq = '\''; - const char bq = '\\'; - char c; - - fputc(sq, stream); - while ((c = *src++)) { - if (c == sq || c == bq) - fputc(bq, stream); - fputc(c, stream); - } - fputc(sq, stream); -} - -void python_quote_print(FILE *stream, const char *src) -{ - const char sq = '\''; - const char bq = '\\'; - const char nl = '\n'; - char c; - - fputc(sq, stream); - while ((c = *src++)) { - if (c == nl) { - fputc(bq, stream); - fputc('n', stream); - continue; - } - if (c == sq || c == bq) - fputc(bq, stream); - fputc(c, stream); - } - fputc(sq, stream); -} - -void tcl_quote_print(FILE *stream, const char *src) -{ - char c; - - fputc('"', stream); - while ((c = *src++)) { - switch (c) { - case '[': case ']': - case '{': case '}': - case '$': case '\\': case '"': - fputc('\\', stream); - default: - fputc(c, stream); - break; - case '\f': - fputs("\\f", stream); - break; - case '\r': - fputs("\\r", stream); - break; - case '\n': - fputs("\\n", stream); - break; - case '\t': - fputs("\\t", stream); - break; - case '\v': - fputs("\\v", stream); - break; - } - } - fputc('"', stream); -} diff --git a/Documentation/perf_counter/util/quote.h b/Documentation/perf_counter/util/quote.h deleted file mode 100644 index 5dfad89816d..00000000000 --- a/Documentation/perf_counter/util/quote.h +++ /dev/null @@ -1,68 +0,0 @@ -#ifndef QUOTE_H -#define QUOTE_H - -#include -#include - -/* Help to copy the thing properly quoted for the shell safety. - * any single quote is replaced with '\'', any exclamation point - * is replaced with '\!', and the whole thing is enclosed in a - * single quote pair. - * - * For example, if you are passing the result to system() as an - * argument: - * - * sprintf(cmd, "foobar %s %s", sq_quote(arg0), sq_quote(arg1)) - * - * would be appropriate. If the system() is going to call ssh to - * run the command on the other side: - * - * sprintf(cmd, "git-diff-tree %s %s", sq_quote(arg0), sq_quote(arg1)); - * sprintf(rcmd, "ssh %s %s", sq_util/quote.host), sq_quote(cmd)); - * - * Note that the above examples leak memory! Remember to free result from - * sq_quote() in a real application. - * - * sq_quote_buf() writes to an existing buffer of specified size; it - * will return the number of characters that would have been written - * excluding the final null regardless of the buffer size. - */ - -extern void sq_quote_print(FILE *stream, const char *src); - -extern void sq_quote_buf(struct strbuf *, const char *src); -extern void sq_quote_argv(struct strbuf *, const char **argv, size_t maxlen); - -/* This unwraps what sq_quote() produces in place, but returns - * NULL if the input does not look like what sq_quote would have - * produced. - */ -extern char *sq_dequote(char *); - -/* - * Same as the above, but can be used to unwrap many arguments in the - * same string separated by space. "next" is changed to point to the - * next argument that should be passed as first parameter. When there - * is no more argument to be dequoted, "next" is updated to point to NULL. - */ -extern char *sq_dequote_step(char *arg, char **next); -extern int sq_dequote_to_argv(char *arg, const char ***argv, int *nr, int *alloc); - -extern int unquote_c_style(struct strbuf *, const char *quoted, const char **endp); -extern size_t quote_c_style(const char *name, struct strbuf *, FILE *, int no_dq); -extern void quote_two_c_style(struct strbuf *, const char *, const char *, int); - -extern void write_name_quoted(const char *name, FILE *, int terminator); -extern void write_name_quotedpfx(const char *pfx, size_t pfxlen, - const char *name, FILE *, int terminator); - -/* quote path as relative to the given prefix */ -char *quote_path_relative(const char *in, int len, - struct strbuf *out, const char *prefix); - -/* quoting as a string literal for other languages */ -extern void perl_quote_print(FILE *stream, const char *src); -extern void python_quote_print(FILE *stream, const char *src); -extern void tcl_quote_print(FILE *stream, const char *src); - -#endif diff --git a/Documentation/perf_counter/util/rbtree.c b/Documentation/perf_counter/util/rbtree.c deleted file mode 100644 index b15ba9c7cb3..00000000000 --- a/Documentation/perf_counter/util/rbtree.c +++ /dev/null @@ -1,383 +0,0 @@ -/* - Red Black Trees - (C) 1999 Andrea Arcangeli - (C) 2002 David Woodhouse - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - - linux/lib/rbtree.c -*/ - -#include "rbtree.h" - -static void __rb_rotate_left(struct rb_node *node, struct rb_root *root) -{ - struct rb_node *right = node->rb_right; - struct rb_node *parent = rb_parent(node); - - if ((node->rb_right = right->rb_left)) - rb_set_parent(right->rb_left, node); - right->rb_left = node; - - rb_set_parent(right, parent); - - if (parent) - { - if (node == parent->rb_left) - parent->rb_left = right; - else - parent->rb_right = right; - } - else - root->rb_node = right; - rb_set_parent(node, right); -} - -static void __rb_rotate_right(struct rb_node *node, struct rb_root *root) -{ - struct rb_node *left = node->rb_left; - struct rb_node *parent = rb_parent(node); - - if ((node->rb_left = left->rb_right)) - rb_set_parent(left->rb_right, node); - left->rb_right = node; - - rb_set_parent(left, parent); - - if (parent) - { - if (node == parent->rb_right) - parent->rb_right = left; - else - parent->rb_left = left; - } - else - root->rb_node = left; - rb_set_parent(node, left); -} - -void rb_insert_color(struct rb_node *node, struct rb_root *root) -{ - struct rb_node *parent, *gparent; - - while ((parent = rb_parent(node)) && rb_is_red(parent)) - { - gparent = rb_parent(parent); - - if (parent == gparent->rb_left) - { - { - register struct rb_node *uncle = gparent->rb_right; - if (uncle && rb_is_red(uncle)) - { - rb_set_black(uncle); - rb_set_black(parent); - rb_set_red(gparent); - node = gparent; - continue; - } - } - - if (parent->rb_right == node) - { - register struct rb_node *tmp; - __rb_rotate_left(parent, root); - tmp = parent; - parent = node; - node = tmp; - } - - rb_set_black(parent); - rb_set_red(gparent); - __rb_rotate_right(gparent, root); - } else { - { - register struct rb_node *uncle = gparent->rb_left; - if (uncle && rb_is_red(uncle)) - { - rb_set_black(uncle); - rb_set_black(parent); - rb_set_red(gparent); - node = gparent; - continue; - } - } - - if (parent->rb_left == node) - { - register struct rb_node *tmp; - __rb_rotate_right(parent, root); - tmp = parent; - parent = node; - node = tmp; - } - - rb_set_black(parent); - rb_set_red(gparent); - __rb_rotate_left(gparent, root); - } - } - - rb_set_black(root->rb_node); -} - -static void __rb_erase_color(struct rb_node *node, struct rb_node *parent, - struct rb_root *root) -{ - struct rb_node *other; - - while ((!node || rb_is_black(node)) && node != root->rb_node) - { - if (parent->rb_left == node) - { - other = parent->rb_right; - if (rb_is_red(other)) - { - rb_set_black(other); - rb_set_red(parent); - __rb_rotate_left(parent, root); - other = parent->rb_right; - } - if ((!other->rb_left || rb_is_black(other->rb_left)) && - (!other->rb_right || rb_is_black(other->rb_right))) - { - rb_set_red(other); - node = parent; - parent = rb_parent(node); - } - else - { - if (!other->rb_right || rb_is_black(other->rb_right)) - { - rb_set_black(other->rb_left); - rb_set_red(other); - __rb_rotate_right(other, root); - other = parent->rb_right; - } - rb_set_color(other, rb_color(parent)); - rb_set_black(parent); - rb_set_black(other->rb_right); - __rb_rotate_left(parent, root); - node = root->rb_node; - break; - } - } - else - { - other = parent->rb_left; - if (rb_is_red(other)) - { - rb_set_black(other); - rb_set_red(parent); - __rb_rotate_right(parent, root); - other = parent->rb_left; - } - if ((!other->rb_left || rb_is_black(other->rb_left)) && - (!other->rb_right || rb_is_black(other->rb_right))) - { - rb_set_red(other); - node = parent; - parent = rb_parent(node); - } - else - { - if (!other->rb_left || rb_is_black(other->rb_left)) - { - rb_set_black(other->rb_right); - rb_set_red(other); - __rb_rotate_left(other, root); - other = parent->rb_left; - } - rb_set_color(other, rb_color(parent)); - rb_set_black(parent); - rb_set_black(other->rb_left); - __rb_rotate_right(parent, root); - node = root->rb_node; - break; - } - } - } - if (node) - rb_set_black(node); -} - -void rb_erase(struct rb_node *node, struct rb_root *root) -{ - struct rb_node *child, *parent; - int color; - - if (!node->rb_left) - child = node->rb_right; - else if (!node->rb_right) - child = node->rb_left; - else - { - struct rb_node *old = node, *left; - - node = node->rb_right; - while ((left = node->rb_left) != NULL) - node = left; - child = node->rb_right; - parent = rb_parent(node); - color = rb_color(node); - - if (child) - rb_set_parent(child, parent); - if (parent == old) { - parent->rb_right = child; - parent = node; - } else - parent->rb_left = child; - - node->rb_parent_color = old->rb_parent_color; - node->rb_right = old->rb_right; - node->rb_left = old->rb_left; - - if (rb_parent(old)) - { - if (rb_parent(old)->rb_left == old) - rb_parent(old)->rb_left = node; - else - rb_parent(old)->rb_right = node; - } else - root->rb_node = node; - - rb_set_parent(old->rb_left, node); - if (old->rb_right) - rb_set_parent(old->rb_right, node); - goto color; - } - - parent = rb_parent(node); - color = rb_color(node); - - if (child) - rb_set_parent(child, parent); - if (parent) - { - if (parent->rb_left == node) - parent->rb_left = child; - else - parent->rb_right = child; - } - else - root->rb_node = child; - - color: - if (color == RB_BLACK) - __rb_erase_color(child, parent, root); -} - -/* - * This function returns the first node (in sort order) of the tree. - */ -struct rb_node *rb_first(const struct rb_root *root) -{ - struct rb_node *n; - - n = root->rb_node; - if (!n) - return NULL; - while (n->rb_left) - n = n->rb_left; - return n; -} - -struct rb_node *rb_last(const struct rb_root *root) -{ - struct rb_node *n; - - n = root->rb_node; - if (!n) - return NULL; - while (n->rb_right) - n = n->rb_right; - return n; -} - -struct rb_node *rb_next(const struct rb_node *node) -{ - struct rb_node *parent; - - if (rb_parent(node) == node) - return NULL; - - /* If we have a right-hand child, go down and then left as far - as we can. */ - if (node->rb_right) { - node = node->rb_right; - while (node->rb_left) - node=node->rb_left; - return (struct rb_node *)node; - } - - /* No right-hand children. Everything down and left is - smaller than us, so any 'next' node must be in the general - direction of our parent. Go up the tree; any time the - ancestor is a right-hand child of its parent, keep going - up. First time it's a left-hand child of its parent, said - parent is our 'next' node. */ - while ((parent = rb_parent(node)) && node == parent->rb_right) - node = parent; - - return parent; -} - -struct rb_node *rb_prev(const struct rb_node *node) -{ - struct rb_node *parent; - - if (rb_parent(node) == node) - return NULL; - - /* If we have a left-hand child, go down and then right as far - as we can. */ - if (node->rb_left) { - node = node->rb_left; - while (node->rb_right) - node=node->rb_right; - return (struct rb_node *)node; - } - - /* No left-hand children. Go up till we find an ancestor which - is a right-hand child of its parent */ - while ((parent = rb_parent(node)) && node == parent->rb_left) - node = parent; - - return parent; -} - -void rb_replace_node(struct rb_node *victim, struct rb_node *new, - struct rb_root *root) -{ - struct rb_node *parent = rb_parent(victim); - - /* Set the surrounding nodes to point to the replacement */ - if (parent) { - if (victim == parent->rb_left) - parent->rb_left = new; - else - parent->rb_right = new; - } else { - root->rb_node = new; - } - if (victim->rb_left) - rb_set_parent(victim->rb_left, new); - if (victim->rb_right) - rb_set_parent(victim->rb_right, new); - - /* Copy the pointers/colour from the victim to the replacement */ - *new = *victim; -} diff --git a/Documentation/perf_counter/util/rbtree.h b/Documentation/perf_counter/util/rbtree.h deleted file mode 100644 index 6bdc488a47f..00000000000 --- a/Documentation/perf_counter/util/rbtree.h +++ /dev/null @@ -1,171 +0,0 @@ -/* - Red Black Trees - (C) 1999 Andrea Arcangeli - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - - linux/include/linux/rbtree.h - - To use rbtrees you'll have to implement your own insert and search cores. - This will avoid us to use callbacks and to drop drammatically performances. - I know it's not the cleaner way, but in C (not in C++) to get - performances and genericity... - - Some example of insert and search follows here. The search is a plain - normal search over an ordered tree. The insert instead must be implemented - int two steps: as first thing the code must insert the element in - order as a red leaf in the tree, then the support library function - rb_insert_color() must be called. Such function will do the - not trivial work to rebalance the rbtree if necessary. - ------------------------------------------------------------------------ -static inline struct page * rb_search_page_cache(struct inode * inode, - unsigned long offset) -{ - struct rb_node * n = inode->i_rb_page_cache.rb_node; - struct page * page; - - while (n) - { - page = rb_entry(n, struct page, rb_page_cache); - - if (offset < page->offset) - n = n->rb_left; - else if (offset > page->offset) - n = n->rb_right; - else - return page; - } - return NULL; -} - -static inline struct page * __rb_insert_page_cache(struct inode * inode, - unsigned long offset, - struct rb_node * node) -{ - struct rb_node ** p = &inode->i_rb_page_cache.rb_node; - struct rb_node * parent = NULL; - struct page * page; - - while (*p) - { - parent = *p; - page = rb_entry(parent, struct page, rb_page_cache); - - if (offset < page->offset) - p = &(*p)->rb_left; - else if (offset > page->offset) - p = &(*p)->rb_right; - else - return page; - } - - rb_link_node(node, parent, p); - - return NULL; -} - -static inline struct page * rb_insert_page_cache(struct inode * inode, - unsigned long offset, - struct rb_node * node) -{ - struct page * ret; - if ((ret = __rb_insert_page_cache(inode, offset, node))) - goto out; - rb_insert_color(node, &inode->i_rb_page_cache); - out: - return ret; -} ------------------------------------------------------------------------ -*/ - -#ifndef _LINUX_RBTREE_H -#define _LINUX_RBTREE_H - -#include - -/** - * container_of - cast a member of a structure out to the containing structure - * @ptr: the pointer to the member. - * @type: the type of the container struct this is embedded in. - * @member: the name of the member within the struct. - * - */ -#define container_of(ptr, type, member) ({ \ - const typeof( ((type *)0)->member ) *__mptr = (ptr); \ - (type *)( (char *)__mptr - offsetof(type,member) );}) - -struct rb_node -{ - unsigned long rb_parent_color; -#define RB_RED 0 -#define RB_BLACK 1 - struct rb_node *rb_right; - struct rb_node *rb_left; -} __attribute__((aligned(sizeof(long)))); - /* The alignment might seem pointless, but allegedly CRIS needs it */ - -struct rb_root -{ - struct rb_node *rb_node; -}; - - -#define rb_parent(r) ((struct rb_node *)((r)->rb_parent_color & ~3)) -#define rb_color(r) ((r)->rb_parent_color & 1) -#define rb_is_red(r) (!rb_color(r)) -#define rb_is_black(r) rb_color(r) -#define rb_set_red(r) do { (r)->rb_parent_color &= ~1; } while (0) -#define rb_set_black(r) do { (r)->rb_parent_color |= 1; } while (0) - -static inline void rb_set_parent(struct rb_node *rb, struct rb_node *p) -{ - rb->rb_parent_color = (rb->rb_parent_color & 3) | (unsigned long)p; -} -static inline void rb_set_color(struct rb_node *rb, int color) -{ - rb->rb_parent_color = (rb->rb_parent_color & ~1) | color; -} - -#define RB_ROOT (struct rb_root) { NULL, } -#define rb_entry(ptr, type, member) container_of(ptr, type, member) - -#define RB_EMPTY_ROOT(root) ((root)->rb_node == NULL) -#define RB_EMPTY_NODE(node) (rb_parent(node) == node) -#define RB_CLEAR_NODE(node) (rb_set_parent(node, node)) - -extern void rb_insert_color(struct rb_node *, struct rb_root *); -extern void rb_erase(struct rb_node *, struct rb_root *); - -/* Find logical next and previous nodes in a tree */ -extern struct rb_node *rb_next(const struct rb_node *); -extern struct rb_node *rb_prev(const struct rb_node *); -extern struct rb_node *rb_first(const struct rb_root *); -extern struct rb_node *rb_last(const struct rb_root *); - -/* Fast replacement of a single node without remove/rebalance/add/rebalance */ -extern void rb_replace_node(struct rb_node *victim, struct rb_node *new, - struct rb_root *root); - -static inline void rb_link_node(struct rb_node * node, struct rb_node * parent, - struct rb_node ** rb_link) -{ - node->rb_parent_color = (unsigned long )parent; - node->rb_left = node->rb_right = NULL; - - *rb_link = node; -} - -#endif /* _LINUX_RBTREE_H */ diff --git a/Documentation/perf_counter/util/run-command.c b/Documentation/perf_counter/util/run-command.c deleted file mode 100644 index b2f5e854f40..00000000000 --- a/Documentation/perf_counter/util/run-command.c +++ /dev/null @@ -1,395 +0,0 @@ -#include "cache.h" -#include "run-command.h" -#include "exec_cmd.h" - -static inline void close_pair(int fd[2]) -{ - close(fd[0]); - close(fd[1]); -} - -static inline void dup_devnull(int to) -{ - int fd = open("/dev/null", O_RDWR); - dup2(fd, to); - close(fd); -} - -int start_command(struct child_process *cmd) -{ - int need_in, need_out, need_err; - int fdin[2], fdout[2], fderr[2]; - - /* - * In case of errors we must keep the promise to close FDs - * that have been passed in via ->in and ->out. - */ - - need_in = !cmd->no_stdin && cmd->in < 0; - if (need_in) { - if (pipe(fdin) < 0) { - if (cmd->out > 0) - close(cmd->out); - return -ERR_RUN_COMMAND_PIPE; - } - cmd->in = fdin[1]; - } - - need_out = !cmd->no_stdout - && !cmd->stdout_to_stderr - && cmd->out < 0; - if (need_out) { - if (pipe(fdout) < 0) { - if (need_in) - close_pair(fdin); - else if (cmd->in) - close(cmd->in); - return -ERR_RUN_COMMAND_PIPE; - } - cmd->out = fdout[0]; - } - - need_err = !cmd->no_stderr && cmd->err < 0; - if (need_err) { - if (pipe(fderr) < 0) { - if (need_in) - close_pair(fdin); - else if (cmd->in) - close(cmd->in); - if (need_out) - close_pair(fdout); - else if (cmd->out) - close(cmd->out); - return -ERR_RUN_COMMAND_PIPE; - } - cmd->err = fderr[0]; - } - -#ifndef __MINGW32__ - fflush(NULL); - cmd->pid = fork(); - if (!cmd->pid) { - if (cmd->no_stdin) - dup_devnull(0); - else if (need_in) { - dup2(fdin[0], 0); - close_pair(fdin); - } else if (cmd->in) { - dup2(cmd->in, 0); - close(cmd->in); - } - - if (cmd->no_stderr) - dup_devnull(2); - else if (need_err) { - dup2(fderr[1], 2); - close_pair(fderr); - } - - if (cmd->no_stdout) - dup_devnull(1); - else if (cmd->stdout_to_stderr) - dup2(2, 1); - else if (need_out) { - dup2(fdout[1], 1); - close_pair(fdout); - } else if (cmd->out > 1) { - dup2(cmd->out, 1); - close(cmd->out); - } - - if (cmd->dir && chdir(cmd->dir)) - die("exec %s: cd to %s failed (%s)", cmd->argv[0], - cmd->dir, strerror(errno)); - if (cmd->env) { - for (; *cmd->env; cmd->env++) { - if (strchr(*cmd->env, '=')) - putenv((char*)*cmd->env); - else - unsetenv(*cmd->env); - } - } - if (cmd->preexec_cb) - cmd->preexec_cb(); - if (cmd->perf_cmd) { - execv_perf_cmd(cmd->argv); - } else { - execvp(cmd->argv[0], (char *const*) cmd->argv); - } - exit(127); - } -#else - int s0 = -1, s1 = -1, s2 = -1; /* backups of stdin, stdout, stderr */ - const char **sargv = cmd->argv; - char **env = environ; - - if (cmd->no_stdin) { - s0 = dup(0); - dup_devnull(0); - } else if (need_in) { - s0 = dup(0); - dup2(fdin[0], 0); - } else if (cmd->in) { - s0 = dup(0); - dup2(cmd->in, 0); - } - - if (cmd->no_stderr) { - s2 = dup(2); - dup_devnull(2); - } else if (need_err) { - s2 = dup(2); - dup2(fderr[1], 2); - } - - if (cmd->no_stdout) { - s1 = dup(1); - dup_devnull(1); - } else if (cmd->stdout_to_stderr) { - s1 = dup(1); - dup2(2, 1); - } else if (need_out) { - s1 = dup(1); - dup2(fdout[1], 1); - } else if (cmd->out > 1) { - s1 = dup(1); - dup2(cmd->out, 1); - } - - if (cmd->dir) - die("chdir in start_command() not implemented"); - if (cmd->env) { - env = copy_environ(); - for (; *cmd->env; cmd->env++) - env = env_setenv(env, *cmd->env); - } - - if (cmd->perf_cmd) { - cmd->argv = prepare_perf_cmd(cmd->argv); - } - - cmd->pid = mingw_spawnvpe(cmd->argv[0], cmd->argv, env); - - if (cmd->env) - free_environ(env); - if (cmd->perf_cmd) - free(cmd->argv); - - cmd->argv = sargv; - if (s0 >= 0) - dup2(s0, 0), close(s0); - if (s1 >= 0) - dup2(s1, 1), close(s1); - if (s2 >= 0) - dup2(s2, 2), close(s2); -#endif - - if (cmd->pid < 0) { - int err = errno; - if (need_in) - close_pair(fdin); - else if (cmd->in) - close(cmd->in); - if (need_out) - close_pair(fdout); - else if (cmd->out) - close(cmd->out); - if (need_err) - close_pair(fderr); - return err == ENOENT ? - -ERR_RUN_COMMAND_EXEC : - -ERR_RUN_COMMAND_FORK; - } - - if (need_in) - close(fdin[0]); - else if (cmd->in) - close(cmd->in); - - if (need_out) - close(fdout[1]); - else if (cmd->out) - close(cmd->out); - - if (need_err) - close(fderr[1]); - - return 0; -} - -static int wait_or_whine(pid_t pid) -{ - for (;;) { - int status, code; - pid_t waiting = waitpid(pid, &status, 0); - - if (waiting < 0) { - if (errno == EINTR) - continue; - error("waitpid failed (%s)", strerror(errno)); - return -ERR_RUN_COMMAND_WAITPID; - } - if (waiting != pid) - return -ERR_RUN_COMMAND_WAITPID_WRONG_PID; - if (WIFSIGNALED(status)) - return -ERR_RUN_COMMAND_WAITPID_SIGNAL; - - if (!WIFEXITED(status)) - return -ERR_RUN_COMMAND_WAITPID_NOEXIT; - code = WEXITSTATUS(status); - switch (code) { - case 127: - return -ERR_RUN_COMMAND_EXEC; - case 0: - return 0; - default: - return -code; - } - } -} - -int finish_command(struct child_process *cmd) -{ - return wait_or_whine(cmd->pid); -} - -int run_command(struct child_process *cmd) -{ - int code = start_command(cmd); - if (code) - return code; - return finish_command(cmd); -} - -static void prepare_run_command_v_opt(struct child_process *cmd, - const char **argv, - int opt) -{ - memset(cmd, 0, sizeof(*cmd)); - cmd->argv = argv; - cmd->no_stdin = opt & RUN_COMMAND_NO_STDIN ? 1 : 0; - cmd->perf_cmd = opt & RUN_PERF_CMD ? 1 : 0; - cmd->stdout_to_stderr = opt & RUN_COMMAND_STDOUT_TO_STDERR ? 1 : 0; -} - -int run_command_v_opt(const char **argv, int opt) -{ - struct child_process cmd; - prepare_run_command_v_opt(&cmd, argv, opt); - return run_command(&cmd); -} - -int run_command_v_opt_cd_env(const char **argv, int opt, const char *dir, const char *const *env) -{ - struct child_process cmd; - prepare_run_command_v_opt(&cmd, argv, opt); - cmd.dir = dir; - cmd.env = env; - return run_command(&cmd); -} - -#ifdef __MINGW32__ -static __stdcall unsigned run_thread(void *data) -{ - struct async *async = data; - return async->proc(async->fd_for_proc, async->data); -} -#endif - -int start_async(struct async *async) -{ - int pipe_out[2]; - - if (pipe(pipe_out) < 0) - return error("cannot create pipe: %s", strerror(errno)); - async->out = pipe_out[0]; - -#ifndef __MINGW32__ - /* Flush stdio before fork() to avoid cloning buffers */ - fflush(NULL); - - async->pid = fork(); - if (async->pid < 0) { - error("fork (async) failed: %s", strerror(errno)); - close_pair(pipe_out); - return -1; - } - if (!async->pid) { - close(pipe_out[0]); - exit(!!async->proc(pipe_out[1], async->data)); - } - close(pipe_out[1]); -#else - async->fd_for_proc = pipe_out[1]; - async->tid = (HANDLE) _beginthreadex(NULL, 0, run_thread, async, 0, NULL); - if (!async->tid) { - error("cannot create thread: %s", strerror(errno)); - close_pair(pipe_out); - return -1; - } -#endif - return 0; -} - -int finish_async(struct async *async) -{ -#ifndef __MINGW32__ - int ret = 0; - - if (wait_or_whine(async->pid)) - ret = error("waitpid (async) failed"); -#else - DWORD ret = 0; - if (WaitForSingleObject(async->tid, INFINITE) != WAIT_OBJECT_0) - ret = error("waiting for thread failed: %lu", GetLastError()); - else if (!GetExitCodeThread(async->tid, &ret)) - ret = error("cannot get thread exit code: %lu", GetLastError()); - CloseHandle(async->tid); -#endif - return ret; -} - -int run_hook(const char *index_file, const char *name, ...) -{ - struct child_process hook; - const char **argv = NULL, *env[2]; - char index[PATH_MAX]; - va_list args; - int ret; - size_t i = 0, alloc = 0; - - if (access(perf_path("hooks/%s", name), X_OK) < 0) - return 0; - - va_start(args, name); - ALLOC_GROW(argv, i + 1, alloc); - argv[i++] = perf_path("hooks/%s", name); - while (argv[i-1]) { - ALLOC_GROW(argv, i + 1, alloc); - argv[i++] = va_arg(args, const char *); - } - va_end(args); - - memset(&hook, 0, sizeof(hook)); - hook.argv = argv; - hook.no_stdin = 1; - hook.stdout_to_stderr = 1; - if (index_file) { - snprintf(index, sizeof(index), "PERF_INDEX_FILE=%s", index_file); - env[0] = index; - env[1] = NULL; - hook.env = env; - } - - ret = start_command(&hook); - free(argv); - if (ret) { - warning("Could not spawn %s", argv[0]); - return ret; - } - ret = finish_command(&hook); - if (ret == -ERR_RUN_COMMAND_WAITPID_SIGNAL) - warning("%s exited due to uncaught signal", argv[0]); - - return ret; -} diff --git a/Documentation/perf_counter/util/run-command.h b/Documentation/perf_counter/util/run-command.h deleted file mode 100644 index 328289f2366..00000000000 --- a/Documentation/perf_counter/util/run-command.h +++ /dev/null @@ -1,93 +0,0 @@ -#ifndef RUN_COMMAND_H -#define RUN_COMMAND_H - -enum { - ERR_RUN_COMMAND_FORK = 10000, - ERR_RUN_COMMAND_EXEC, - ERR_RUN_COMMAND_PIPE, - ERR_RUN_COMMAND_WAITPID, - ERR_RUN_COMMAND_WAITPID_WRONG_PID, - ERR_RUN_COMMAND_WAITPID_SIGNAL, - ERR_RUN_COMMAND_WAITPID_NOEXIT, -}; -#define IS_RUN_COMMAND_ERR(x) (-(x) >= ERR_RUN_COMMAND_FORK) - -struct child_process { - const char **argv; - pid_t pid; - /* - * Using .in, .out, .err: - * - Specify 0 for no redirections (child inherits stdin, stdout, - * stderr from parent). - * - Specify -1 to have a pipe allocated as follows: - * .in: returns the writable pipe end; parent writes to it, - * the readable pipe end becomes child's stdin - * .out, .err: returns the readable pipe end; parent reads from - * it, the writable pipe end becomes child's stdout/stderr - * The caller of start_command() must close the returned FDs - * after it has completed reading from/writing to it! - * - Specify > 0 to set a channel to a particular FD as follows: - * .in: a readable FD, becomes child's stdin - * .out: a writable FD, becomes child's stdout/stderr - * .err > 0 not supported - * The specified FD is closed by start_command(), even in case - * of errors! - */ - int in; - int out; - int err; - const char *dir; - const char *const *env; - unsigned no_stdin:1; - unsigned no_stdout:1; - unsigned no_stderr:1; - unsigned perf_cmd:1; /* if this is to be perf sub-command */ - unsigned stdout_to_stderr:1; - void (*preexec_cb)(void); -}; - -int start_command(struct child_process *); -int finish_command(struct child_process *); -int run_command(struct child_process *); - -extern int run_hook(const char *index_file, const char *name, ...); - -#define RUN_COMMAND_NO_STDIN 1 -#define RUN_PERF_CMD 2 /*If this is to be perf sub-command */ -#define RUN_COMMAND_STDOUT_TO_STDERR 4 -int run_command_v_opt(const char **argv, int opt); - -/* - * env (the environment) is to be formatted like environ: "VAR=VALUE". - * To unset an environment variable use just "VAR". - */ -int run_command_v_opt_cd_env(const char **argv, int opt, const char *dir, const char *const *env); - -/* - * The purpose of the following functions is to feed a pipe by running - * a function asynchronously and providing output that the caller reads. - * - * It is expected that no synchronization and mutual exclusion between - * the caller and the feed function is necessary so that the function - * can run in a thread without interfering with the caller. - */ -struct async { - /* - * proc writes to fd and closes it; - * returns 0 on success, non-zero on failure - */ - int (*proc)(int fd, void *data); - void *data; - int out; /* caller reads from here and closes it */ -#ifndef __MINGW32__ - pid_t pid; -#else - HANDLE tid; - int fd_for_proc; -#endif -}; - -int start_async(struct async *async); -int finish_async(struct async *async); - -#endif diff --git a/Documentation/perf_counter/util/sigchain.c b/Documentation/perf_counter/util/sigchain.c deleted file mode 100644 index 1118b99e57d..00000000000 --- a/Documentation/perf_counter/util/sigchain.c +++ /dev/null @@ -1,52 +0,0 @@ -#include "sigchain.h" -#include "cache.h" - -#define SIGCHAIN_MAX_SIGNALS 32 - -struct sigchain_signal { - sigchain_fun *old; - int n; - int alloc; -}; -static struct sigchain_signal signals[SIGCHAIN_MAX_SIGNALS]; - -static void check_signum(int sig) -{ - if (sig < 1 || sig >= SIGCHAIN_MAX_SIGNALS) - die("BUG: signal out of range: %d", sig); -} - -int sigchain_push(int sig, sigchain_fun f) -{ - struct sigchain_signal *s = signals + sig; - check_signum(sig); - - ALLOC_GROW(s->old, s->n + 1, s->alloc); - s->old[s->n] = signal(sig, f); - if (s->old[s->n] == SIG_ERR) - return -1; - s->n++; - return 0; -} - -int sigchain_pop(int sig) -{ - struct sigchain_signal *s = signals + sig; - check_signum(sig); - if (s->n < 1) - return 0; - - if (signal(sig, s->old[s->n - 1]) == SIG_ERR) - return -1; - s->n--; - return 0; -} - -void sigchain_push_common(sigchain_fun f) -{ - sigchain_push(SIGINT, f); - sigchain_push(SIGHUP, f); - sigchain_push(SIGTERM, f); - sigchain_push(SIGQUIT, f); - sigchain_push(SIGPIPE, f); -} diff --git a/Documentation/perf_counter/util/sigchain.h b/Documentation/perf_counter/util/sigchain.h deleted file mode 100644 index 618083bce0c..00000000000 --- a/Documentation/perf_counter/util/sigchain.h +++ /dev/null @@ -1,11 +0,0 @@ -#ifndef SIGCHAIN_H -#define SIGCHAIN_H - -typedef void (*sigchain_fun)(int); - -int sigchain_push(int sig, sigchain_fun f); -int sigchain_pop(int sig); - -void sigchain_push_common(sigchain_fun f); - -#endif /* SIGCHAIN_H */ diff --git a/Documentation/perf_counter/util/strbuf.c b/Documentation/perf_counter/util/strbuf.c deleted file mode 100644 index eaba0930680..00000000000 --- a/Documentation/perf_counter/util/strbuf.c +++ /dev/null @@ -1,359 +0,0 @@ -#include "cache.h" - -int prefixcmp(const char *str, const char *prefix) -{ - for (; ; str++, prefix++) - if (!*prefix) - return 0; - else if (*str != *prefix) - return (unsigned char)*prefix - (unsigned char)*str; -} - -/* - * Used as the default ->buf value, so that people can always assume - * buf is non NULL and ->buf is NUL terminated even for a freshly - * initialized strbuf. - */ -char strbuf_slopbuf[1]; - -void strbuf_init(struct strbuf *sb, size_t hint) -{ - sb->alloc = sb->len = 0; - sb->buf = strbuf_slopbuf; - if (hint) - strbuf_grow(sb, hint); -} - -void strbuf_release(struct strbuf *sb) -{ - if (sb->alloc) { - free(sb->buf); - strbuf_init(sb, 0); - } -} - -char *strbuf_detach(struct strbuf *sb, size_t *sz) -{ - char *res = sb->alloc ? sb->buf : NULL; - if (sz) - *sz = sb->len; - strbuf_init(sb, 0); - return res; -} - -void strbuf_attach(struct strbuf *sb, void *buf, size_t len, size_t alloc) -{ - strbuf_release(sb); - sb->buf = buf; - sb->len = len; - sb->alloc = alloc; - strbuf_grow(sb, 0); - sb->buf[sb->len] = '\0'; -} - -void strbuf_grow(struct strbuf *sb, size_t extra) -{ - if (sb->len + extra + 1 <= sb->len) - die("you want to use way too much memory"); - if (!sb->alloc) - sb->buf = NULL; - ALLOC_GROW(sb->buf, sb->len + extra + 1, sb->alloc); -} - -void strbuf_trim(struct strbuf *sb) -{ - char *b = sb->buf; - while (sb->len > 0 && isspace((unsigned char)sb->buf[sb->len - 1])) - sb->len--; - while (sb->len > 0 && isspace(*b)) { - b++; - sb->len--; - } - memmove(sb->buf, b, sb->len); - sb->buf[sb->len] = '\0'; -} -void strbuf_rtrim(struct strbuf *sb) -{ - while (sb->len > 0 && isspace((unsigned char)sb->buf[sb->len - 1])) - sb->len--; - sb->buf[sb->len] = '\0'; -} - -void strbuf_ltrim(struct strbuf *sb) -{ - char *b = sb->buf; - while (sb->len > 0 && isspace(*b)) { - b++; - sb->len--; - } - memmove(sb->buf, b, sb->len); - sb->buf[sb->len] = '\0'; -} - -void strbuf_tolower(struct strbuf *sb) -{ - int i; - for (i = 0; i < sb->len; i++) - sb->buf[i] = tolower(sb->buf[i]); -} - -struct strbuf **strbuf_split(const struct strbuf *sb, int delim) -{ - int alloc = 2, pos = 0; - char *n, *p; - struct strbuf **ret; - struct strbuf *t; - - ret = calloc(alloc, sizeof(struct strbuf *)); - p = n = sb->buf; - while (n < sb->buf + sb->len) { - int len; - n = memchr(n, delim, sb->len - (n - sb->buf)); - if (pos + 1 >= alloc) { - alloc = alloc * 2; - ret = realloc(ret, sizeof(struct strbuf *) * alloc); - } - if (!n) - n = sb->buf + sb->len - 1; - len = n - p + 1; - t = malloc(sizeof(struct strbuf)); - strbuf_init(t, len); - strbuf_add(t, p, len); - ret[pos] = t; - ret[++pos] = NULL; - p = ++n; - } - return ret; -} - -void strbuf_list_free(struct strbuf **sbs) -{ - struct strbuf **s = sbs; - - while (*s) { - strbuf_release(*s); - free(*s++); - } - free(sbs); -} - -int strbuf_cmp(const struct strbuf *a, const struct strbuf *b) -{ - int len = a->len < b->len ? a->len: b->len; - int cmp = memcmp(a->buf, b->buf, len); - if (cmp) - return cmp; - return a->len < b->len ? -1: a->len != b->len; -} - -void strbuf_splice(struct strbuf *sb, size_t pos, size_t len, - const void *data, size_t dlen) -{ - if (pos + len < pos) - die("you want to use way too much memory"); - if (pos > sb->len) - die("`pos' is too far after the end of the buffer"); - if (pos + len > sb->len) - die("`pos + len' is too far after the end of the buffer"); - - if (dlen >= len) - strbuf_grow(sb, dlen - len); - memmove(sb->buf + pos + dlen, - sb->buf + pos + len, - sb->len - pos - len); - memcpy(sb->buf + pos, data, dlen); - strbuf_setlen(sb, sb->len + dlen - len); -} - -void strbuf_insert(struct strbuf *sb, size_t pos, const void *data, size_t len) -{ - strbuf_splice(sb, pos, 0, data, len); -} - -void strbuf_remove(struct strbuf *sb, size_t pos, size_t len) -{ - strbuf_splice(sb, pos, len, NULL, 0); -} - -void strbuf_add(struct strbuf *sb, const void *data, size_t len) -{ - strbuf_grow(sb, len); - memcpy(sb->buf + sb->len, data, len); - strbuf_setlen(sb, sb->len + len); -} - -void strbuf_adddup(struct strbuf *sb, size_t pos, size_t len) -{ - strbuf_grow(sb, len); - memcpy(sb->buf + sb->len, sb->buf + pos, len); - strbuf_setlen(sb, sb->len + len); -} - -void strbuf_addf(struct strbuf *sb, const char *fmt, ...) -{ - int len; - va_list ap; - - if (!strbuf_avail(sb)) - strbuf_grow(sb, 64); - va_start(ap, fmt); - len = vsnprintf(sb->buf + sb->len, sb->alloc - sb->len, fmt, ap); - va_end(ap); - if (len < 0) - die("your vsnprintf is broken"); - if (len > strbuf_avail(sb)) { - strbuf_grow(sb, len); - va_start(ap, fmt); - len = vsnprintf(sb->buf + sb->len, sb->alloc - sb->len, fmt, ap); - va_end(ap); - if (len > strbuf_avail(sb)) { - die("this should not happen, your snprintf is broken"); - } - } - strbuf_setlen(sb, sb->len + len); -} - -void strbuf_expand(struct strbuf *sb, const char *format, expand_fn_t fn, - void *context) -{ - for (;;) { - const char *percent; - size_t consumed; - - percent = strchrnul(format, '%'); - strbuf_add(sb, format, percent - format); - if (!*percent) - break; - format = percent + 1; - - consumed = fn(sb, format, context); - if (consumed) - format += consumed; - else - strbuf_addch(sb, '%'); - } -} - -size_t strbuf_expand_dict_cb(struct strbuf *sb, const char *placeholder, - void *context) -{ - struct strbuf_expand_dict_entry *e = context; - size_t len; - - for (; e->placeholder && (len = strlen(e->placeholder)); e++) { - if (!strncmp(placeholder, e->placeholder, len)) { - if (e->value) - strbuf_addstr(sb, e->value); - return len; - } - } - return 0; -} - -size_t strbuf_fread(struct strbuf *sb, size_t size, FILE *f) -{ - size_t res; - size_t oldalloc = sb->alloc; - - strbuf_grow(sb, size); - res = fread(sb->buf + sb->len, 1, size, f); - if (res > 0) - strbuf_setlen(sb, sb->len + res); - else if (res < 0 && oldalloc == 0) - strbuf_release(sb); - return res; -} - -ssize_t strbuf_read(struct strbuf *sb, int fd, size_t hint) -{ - size_t oldlen = sb->len; - size_t oldalloc = sb->alloc; - - strbuf_grow(sb, hint ? hint : 8192); - for (;;) { - ssize_t cnt; - - cnt = read(fd, sb->buf + sb->len, sb->alloc - sb->len - 1); - if (cnt < 0) { - if (oldalloc == 0) - strbuf_release(sb); - else - strbuf_setlen(sb, oldlen); - return -1; - } - if (!cnt) - break; - sb->len += cnt; - strbuf_grow(sb, 8192); - } - - sb->buf[sb->len] = '\0'; - return sb->len - oldlen; -} - -#define STRBUF_MAXLINK (2*PATH_MAX) - -int strbuf_readlink(struct strbuf *sb, const char *path, size_t hint) -{ - size_t oldalloc = sb->alloc; - - if (hint < 32) - hint = 32; - - while (hint < STRBUF_MAXLINK) { - int len; - - strbuf_grow(sb, hint); - len = readlink(path, sb->buf, hint); - if (len < 0) { - if (errno != ERANGE) - break; - } else if (len < hint) { - strbuf_setlen(sb, len); - return 0; - } - - /* .. the buffer was too small - try again */ - hint *= 2; - } - if (oldalloc == 0) - strbuf_release(sb); - return -1; -} - -int strbuf_getline(struct strbuf *sb, FILE *fp, int term) -{ - int ch; - - strbuf_grow(sb, 0); - if (feof(fp)) - return EOF; - - strbuf_reset(sb); - while ((ch = fgetc(fp)) != EOF) { - if (ch == term) - break; - strbuf_grow(sb, 1); - sb->buf[sb->len++] = ch; - } - if (ch == EOF && sb->len == 0) - return EOF; - - sb->buf[sb->len] = '\0'; - return 0; -} - -int strbuf_read_file(struct strbuf *sb, const char *path, size_t hint) -{ - int fd, len; - - fd = open(path, O_RDONLY); - if (fd < 0) - return -1; - len = strbuf_read(sb, fd, hint); - close(fd); - if (len < 0) - return -1; - - return len; -} diff --git a/Documentation/perf_counter/util/strbuf.h b/Documentation/perf_counter/util/strbuf.h deleted file mode 100644 index 9ee908a3ec5..00000000000 --- a/Documentation/perf_counter/util/strbuf.h +++ /dev/null @@ -1,137 +0,0 @@ -#ifndef STRBUF_H -#define STRBUF_H - -/* - * Strbuf's can be use in many ways: as a byte array, or to store arbitrary - * long, overflow safe strings. - * - * Strbufs has some invariants that are very important to keep in mind: - * - * 1. the ->buf member is always malloc-ed, hence strbuf's can be used to - * build complex strings/buffers whose final size isn't easily known. - * - * It is NOT legal to copy the ->buf pointer away. - * `strbuf_detach' is the operation that detachs a buffer from its shell - * while keeping the shell valid wrt its invariants. - * - * 2. the ->buf member is a byte array that has at least ->len + 1 bytes - * allocated. The extra byte is used to store a '\0', allowing the ->buf - * member to be a valid C-string. Every strbuf function ensure this - * invariant is preserved. - * - * Note that it is OK to "play" with the buffer directly if you work it - * that way: - * - * strbuf_grow(sb, SOME_SIZE); - * ... Here, the memory array starting at sb->buf, and of length - * ... strbuf_avail(sb) is all yours, and you are sure that - * ... strbuf_avail(sb) is at least SOME_SIZE. - * strbuf_setlen(sb, sb->len + SOME_OTHER_SIZE); - * - * Of course, SOME_OTHER_SIZE must be smaller or equal to strbuf_avail(sb). - * - * Doing so is safe, though if it has to be done in many places, adding the - * missing API to the strbuf module is the way to go. - * - * XXX: do _not_ assume that the area that is yours is of size ->alloc - 1 - * even if it's true in the current implementation. Alloc is somehow a - * "private" member that should not be messed with. - */ - -#include - -extern char strbuf_slopbuf[]; -struct strbuf { - size_t alloc; - size_t len; - char *buf; -}; - -#define STRBUF_INIT { 0, 0, strbuf_slopbuf } - -/*----- strbuf life cycle -----*/ -extern void strbuf_init(struct strbuf *, size_t); -extern void strbuf_release(struct strbuf *); -extern char *strbuf_detach(struct strbuf *, size_t *); -extern void strbuf_attach(struct strbuf *, void *, size_t, size_t); -static inline void strbuf_swap(struct strbuf *a, struct strbuf *b) { - struct strbuf tmp = *a; - *a = *b; - *b = tmp; -} - -/*----- strbuf size related -----*/ -static inline size_t strbuf_avail(const struct strbuf *sb) { - return sb->alloc ? sb->alloc - sb->len - 1 : 0; -} - -extern void strbuf_grow(struct strbuf *, size_t); - -static inline void strbuf_setlen(struct strbuf *sb, size_t len) { - if (!sb->alloc) - strbuf_grow(sb, 0); - assert(len < sb->alloc); - sb->len = len; - sb->buf[len] = '\0'; -} -#define strbuf_reset(sb) strbuf_setlen(sb, 0) - -/*----- content related -----*/ -extern void strbuf_trim(struct strbuf *); -extern void strbuf_rtrim(struct strbuf *); -extern void strbuf_ltrim(struct strbuf *); -extern int strbuf_cmp(const struct strbuf *, const struct strbuf *); -extern void strbuf_tolower(struct strbuf *); - -extern struct strbuf **strbuf_split(const struct strbuf *, int delim); -extern void strbuf_list_free(struct strbuf **); - -/*----- add data in your buffer -----*/ -static inline void strbuf_addch(struct strbuf *sb, int c) { - strbuf_grow(sb, 1); - sb->buf[sb->len++] = c; - sb->buf[sb->len] = '\0'; -} - -extern void strbuf_insert(struct strbuf *, size_t pos, const void *, size_t); -extern void strbuf_remove(struct strbuf *, size_t pos, size_t len); - -/* splice pos..pos+len with given data */ -extern void strbuf_splice(struct strbuf *, size_t pos, size_t len, - const void *, size_t); - -extern void strbuf_add(struct strbuf *, const void *, size_t); -static inline void strbuf_addstr(struct strbuf *sb, const char *s) { - strbuf_add(sb, s, strlen(s)); -} -static inline void strbuf_addbuf(struct strbuf *sb, const struct strbuf *sb2) { - strbuf_add(sb, sb2->buf, sb2->len); -} -extern void strbuf_adddup(struct strbuf *sb, size_t pos, size_t len); - -typedef size_t (*expand_fn_t) (struct strbuf *sb, const char *placeholder, void *context); -extern void strbuf_expand(struct strbuf *sb, const char *format, expand_fn_t fn, void *context); -struct strbuf_expand_dict_entry { - const char *placeholder; - const char *value; -}; -extern size_t strbuf_expand_dict_cb(struct strbuf *sb, const char *placeholder, void *context); - -__attribute__((format(printf,2,3))) -extern void strbuf_addf(struct strbuf *sb, const char *fmt, ...); - -extern size_t strbuf_fread(struct strbuf *, size_t, FILE *); -/* XXX: if read fails, any partial read is undone */ -extern ssize_t strbuf_read(struct strbuf *, int fd, size_t hint); -extern int strbuf_read_file(struct strbuf *sb, const char *path, size_t hint); -extern int strbuf_readlink(struct strbuf *sb, const char *path, size_t hint); - -extern int strbuf_getline(struct strbuf *, FILE *, int); - -extern void stripspace(struct strbuf *buf, int skip_comments); -extern int launch_editor(const char *path, struct strbuf *buffer, const char *const *env); - -extern int strbuf_branchname(struct strbuf *sb, const char *name); -extern int strbuf_check_branch_ref(struct strbuf *sb, const char *name); - -#endif /* STRBUF_H */ diff --git a/Documentation/perf_counter/util/string.c b/Documentation/perf_counter/util/string.c deleted file mode 100644 index ec33c0c7f4e..00000000000 --- a/Documentation/perf_counter/util/string.c +++ /dev/null @@ -1,34 +0,0 @@ -#include "string.h" - -static int hex(char ch) -{ - if ((ch >= '0') && (ch <= '9')) - return ch - '0'; - if ((ch >= 'a') && (ch <= 'f')) - return ch - 'a' + 10; - if ((ch >= 'A') && (ch <= 'F')) - return ch - 'A' + 10; - return -1; -} - -/* - * While we find nice hex chars, build a long_val. - * Return number of chars processed. - */ -int hex2u64(const char *ptr, __u64 *long_val) -{ - const char *p = ptr; - *long_val = 0; - - while (*p) { - const int hex_val = hex(*p); - - if (hex_val < 0) - break; - - *long_val = (*long_val << 4) | hex_val; - p++; - } - - return p - ptr; -} diff --git a/Documentation/perf_counter/util/string.h b/Documentation/perf_counter/util/string.h deleted file mode 100644 index 72812c1c9a7..00000000000 --- a/Documentation/perf_counter/util/string.h +++ /dev/null @@ -1,8 +0,0 @@ -#ifndef _PERF_STRING_H_ -#define _PERF_STRING_H_ - -#include - -int hex2u64(const char *ptr, __u64 *val); - -#endif diff --git a/Documentation/perf_counter/util/symbol.c b/Documentation/perf_counter/util/symbol.c deleted file mode 100644 index 23f4f7b3b83..00000000000 --- a/Documentation/perf_counter/util/symbol.c +++ /dev/null @@ -1,574 +0,0 @@ -#include "util.h" -#include "../perf.h" -#include "string.h" -#include "symbol.h" - -#include -#include -#include - -const char *sym_hist_filter; - -static struct symbol *symbol__new(uint64_t start, uint64_t len, - const char *name, unsigned int priv_size, - uint64_t obj_start, int verbose) -{ - size_t namelen = strlen(name) + 1; - struct symbol *self = calloc(1, priv_size + sizeof(*self) + namelen); - - if (!self) - return NULL; - - if (verbose >= 2) - printf("new symbol: %016Lx [%08lx]: %s, hist: %p, obj_start: %p\n", - (__u64)start, len, name, self->hist, (void *)obj_start); - - self->obj_start= obj_start; - self->hist = NULL; - self->hist_sum = 0; - - if (sym_hist_filter && !strcmp(name, sym_hist_filter)) - self->hist = calloc(sizeof(__u64), len); - - if (priv_size) { - memset(self, 0, priv_size); - self = ((void *)self) + priv_size; - } - self->start = start; - self->end = start + len - 1; - memcpy(self->name, name, namelen); - - return self; -} - -static void symbol__delete(struct symbol *self, unsigned int priv_size) -{ - free(((void *)self) - priv_size); -} - -static size_t symbol__fprintf(struct symbol *self, FILE *fp) -{ - return fprintf(fp, " %llx-%llx %s\n", - self->start, self->end, self->name); -} - -struct dso *dso__new(const char *name, unsigned int sym_priv_size) -{ - struct dso *self = malloc(sizeof(*self) + strlen(name) + 1); - - if (self != NULL) { - strcpy(self->name, name); - self->syms = RB_ROOT; - self->sym_priv_size = sym_priv_size; - self->find_symbol = dso__find_symbol; - } - - return self; -} - -static void dso__delete_symbols(struct dso *self) -{ - struct symbol *pos; - struct rb_node *next = rb_first(&self->syms); - - while (next) { - pos = rb_entry(next, struct symbol, rb_node); - next = rb_next(&pos->rb_node); - rb_erase(&pos->rb_node, &self->syms); - symbol__delete(pos, self->sym_priv_size); - } -} - -void dso__delete(struct dso *self) -{ - dso__delete_symbols(self); - free(self); -} - -static void dso__insert_symbol(struct dso *self, struct symbol *sym) -{ - struct rb_node **p = &self->syms.rb_node; - struct rb_node *parent = NULL; - const uint64_t ip = sym->start; - struct symbol *s; - - while (*p != NULL) { - parent = *p; - s = rb_entry(parent, struct symbol, rb_node); - if (ip < s->start) - p = &(*p)->rb_left; - else - p = &(*p)->rb_right; - } - rb_link_node(&sym->rb_node, parent, p); - rb_insert_color(&sym->rb_node, &self->syms); -} - -struct symbol *dso__find_symbol(struct dso *self, uint64_t ip) -{ - struct rb_node *n; - - if (self == NULL) - return NULL; - - n = self->syms.rb_node; - - while (n) { - struct symbol *s = rb_entry(n, struct symbol, rb_node); - - if (ip < s->start) - n = n->rb_left; - else if (ip > s->end) - n = n->rb_right; - else - return s; - } - - return NULL; -} - -size_t dso__fprintf(struct dso *self, FILE *fp) -{ - size_t ret = fprintf(fp, "dso: %s\n", self->name); - - struct rb_node *nd; - for (nd = rb_first(&self->syms); nd; nd = rb_next(nd)) { - struct symbol *pos = rb_entry(nd, struct symbol, rb_node); - ret += symbol__fprintf(pos, fp); - } - - return ret; -} - -static int dso__load_kallsyms(struct dso *self, symbol_filter_t filter, int verbose) -{ - struct rb_node *nd, *prevnd; - char *line = NULL; - size_t n; - FILE *file = fopen("/proc/kallsyms", "r"); - - if (file == NULL) - goto out_failure; - - while (!feof(file)) { - __u64 start; - struct symbol *sym; - int line_len, len; - char symbol_type; - - line_len = getline(&line, &n, file); - if (line_len < 0) - break; - - if (!line) - goto out_failure; - - line[--line_len] = '\0'; /* \n */ - - len = hex2u64(line, &start); - - len++; - if (len + 2 >= line_len) - continue; - - symbol_type = toupper(line[len]); - /* - * We're interested only in code ('T'ext) - */ - if (symbol_type != 'T' && symbol_type != 'W') - continue; - /* - * Well fix up the end later, when we have all sorted. - */ - sym = symbol__new(start, 0xdead, line + len + 2, - self->sym_priv_size, 0, verbose); - - if (sym == NULL) - goto out_delete_line; - - if (filter && filter(self, sym)) - symbol__delete(sym, self->sym_priv_size); - else - dso__insert_symbol(self, sym); - } - - /* - * Now that we have all sorted out, just set the ->end of all - * symbols - */ - prevnd = rb_first(&self->syms); - - if (prevnd == NULL) - goto out_delete_line; - - for (nd = rb_next(prevnd); nd; nd = rb_next(nd)) { - struct symbol *prev = rb_entry(prevnd, struct symbol, rb_node), - *curr = rb_entry(nd, struct symbol, rb_node); - - prev->end = curr->start - 1; - prevnd = nd; - } - - free(line); - fclose(file); - - return 0; - -out_delete_line: - free(line); -out_failure: - return -1; -} - -/** - * elf_symtab__for_each_symbol - iterate thru all the symbols - * - * @self: struct elf_symtab instance to iterate - * @index: uint32_t index - * @sym: GElf_Sym iterator - */ -#define elf_symtab__for_each_symbol(syms, nr_syms, index, sym) \ - for (index = 0, gelf_getsym(syms, index, &sym);\ - index < nr_syms; \ - index++, gelf_getsym(syms, index, &sym)) - -static inline uint8_t elf_sym__type(const GElf_Sym *sym) -{ - return GELF_ST_TYPE(sym->st_info); -} - -static inline int elf_sym__is_function(const GElf_Sym *sym) -{ - return elf_sym__type(sym) == STT_FUNC && - sym->st_name != 0 && - sym->st_shndx != SHN_UNDEF && - sym->st_size != 0; -} - -static inline const char *elf_sym__name(const GElf_Sym *sym, - const Elf_Data *symstrs) -{ - return symstrs->d_buf + sym->st_name; -} - -static Elf_Scn *elf_section_by_name(Elf *elf, GElf_Ehdr *ep, - GElf_Shdr *shp, const char *name, - size_t *index) -{ - Elf_Scn *sec = NULL; - size_t cnt = 1; - - while ((sec = elf_nextscn(elf, sec)) != NULL) { - char *str; - - gelf_getshdr(sec, shp); - str = elf_strptr(elf, ep->e_shstrndx, shp->sh_name); - if (!strcmp(name, str)) { - if (index) - *index = cnt; - break; - } - ++cnt; - } - - return sec; -} - -#define elf_section__for_each_rel(reldata, pos, pos_mem, idx, nr_entries) \ - for (idx = 0, pos = gelf_getrel(reldata, 0, &pos_mem); \ - idx < nr_entries; \ - ++idx, pos = gelf_getrel(reldata, idx, &pos_mem)) - -#define elf_section__for_each_rela(reldata, pos, pos_mem, idx, nr_entries) \ - for (idx = 0, pos = gelf_getrela(reldata, 0, &pos_mem); \ - idx < nr_entries; \ - ++idx, pos = gelf_getrela(reldata, idx, &pos_mem)) - -static int dso__synthesize_plt_symbols(struct dso *self, Elf *elf, - GElf_Ehdr *ehdr, Elf_Scn *scn_dynsym, - GElf_Shdr *shdr_dynsym, - size_t dynsym_idx, int verbose) -{ - uint32_t nr_rel_entries, idx; - GElf_Sym sym; - __u64 plt_offset; - GElf_Shdr shdr_plt; - struct symbol *f; - GElf_Shdr shdr_rel_plt; - Elf_Data *reldata, *syms, *symstrs; - Elf_Scn *scn_plt_rel, *scn_symstrs; - char sympltname[1024]; - int nr = 0, symidx; - - scn_plt_rel = elf_section_by_name(elf, ehdr, &shdr_rel_plt, - ".rela.plt", NULL); - if (scn_plt_rel == NULL) { - scn_plt_rel = elf_section_by_name(elf, ehdr, &shdr_rel_plt, - ".rel.plt", NULL); - if (scn_plt_rel == NULL) - return 0; - } - - if (shdr_rel_plt.sh_link != dynsym_idx) - return 0; - - if (elf_section_by_name(elf, ehdr, &shdr_plt, ".plt", NULL) == NULL) - return 0; - - /* - * Fetch the relocation section to find the indexes to the GOT - * and the symbols in the .dynsym they refer to. - */ - reldata = elf_getdata(scn_plt_rel, NULL); - if (reldata == NULL) - return -1; - - syms = elf_getdata(scn_dynsym, NULL); - if (syms == NULL) - return -1; - - scn_symstrs = elf_getscn(elf, shdr_dynsym->sh_link); - if (scn_symstrs == NULL) - return -1; - - symstrs = elf_getdata(scn_symstrs, NULL); - if (symstrs == NULL) - return -1; - - nr_rel_entries = shdr_rel_plt.sh_size / shdr_rel_plt.sh_entsize; - plt_offset = shdr_plt.sh_offset; - - if (shdr_rel_plt.sh_type == SHT_RELA) { - GElf_Rela pos_mem, *pos; - - elf_section__for_each_rela(reldata, pos, pos_mem, idx, - nr_rel_entries) { - symidx = GELF_R_SYM(pos->r_info); - plt_offset += shdr_plt.sh_entsize; - gelf_getsym(syms, symidx, &sym); - snprintf(sympltname, sizeof(sympltname), - "%s@plt", elf_sym__name(&sym, symstrs)); - - f = symbol__new(plt_offset, shdr_plt.sh_entsize, - sympltname, self->sym_priv_size, 0, verbose); - if (!f) - return -1; - - dso__insert_symbol(self, f); - ++nr; - } - } else if (shdr_rel_plt.sh_type == SHT_REL) { - GElf_Rel pos_mem, *pos; - elf_section__for_each_rel(reldata, pos, pos_mem, idx, - nr_rel_entries) { - symidx = GELF_R_SYM(pos->r_info); - plt_offset += shdr_plt.sh_entsize; - gelf_getsym(syms, symidx, &sym); - snprintf(sympltname, sizeof(sympltname), - "%s@plt", elf_sym__name(&sym, symstrs)); - - f = symbol__new(plt_offset, shdr_plt.sh_entsize, - sympltname, self->sym_priv_size, 0, verbose); - if (!f) - return -1; - - dso__insert_symbol(self, f); - ++nr; - } - } else { - /* - * TODO: There are still one more shdr_rel_plt.sh_type - * I have to investigate, but probably should be ignored. - */ - } - - return nr; -} - -static int dso__load_sym(struct dso *self, int fd, const char *name, - symbol_filter_t filter, int verbose) -{ - Elf_Data *symstrs; - uint32_t nr_syms; - int err = -1; - uint32_t index; - GElf_Ehdr ehdr; - GElf_Shdr shdr; - Elf_Data *syms; - GElf_Sym sym; - Elf_Scn *sec, *sec_dynsym; - Elf *elf; - size_t dynsym_idx; - int nr = 0; - - elf = elf_begin(fd, ELF_C_READ_MMAP, NULL); - if (elf == NULL) { - if (verbose) - fprintf(stderr, "%s: cannot read %s ELF file.\n", - __func__, name); - goto out_close; - } - - if (gelf_getehdr(elf, &ehdr) == NULL) { - if (verbose) - fprintf(stderr, "%s: cannot get elf header.\n", __func__); - goto out_elf_end; - } - - /* - * We need to check if we have a .dynsym, so that we can handle the - * .plt, synthesizing its symbols, that aren't on the symtabs (be it - * .dynsym or .symtab) - */ - sec_dynsym = elf_section_by_name(elf, &ehdr, &shdr, - ".dynsym", &dynsym_idx); - if (sec_dynsym != NULL) { - nr = dso__synthesize_plt_symbols(self, elf, &ehdr, - sec_dynsym, &shdr, - dynsym_idx, verbose); - if (nr < 0) - goto out_elf_end; - } - - /* - * But if we have a full .symtab (that is a superset of .dynsym) we - * should add the symbols not in the .dynsyn - */ - sec = elf_section_by_name(elf, &ehdr, &shdr, ".symtab", NULL); - if (sec == NULL) { - if (sec_dynsym == NULL) - goto out_elf_end; - - sec = sec_dynsym; - gelf_getshdr(sec, &shdr); - } - - syms = elf_getdata(sec, NULL); - if (syms == NULL) - goto out_elf_end; - - sec = elf_getscn(elf, shdr.sh_link); - if (sec == NULL) - goto out_elf_end; - - symstrs = elf_getdata(sec, NULL); - if (symstrs == NULL) - goto out_elf_end; - - nr_syms = shdr.sh_size / shdr.sh_entsize; - - elf_symtab__for_each_symbol(syms, nr_syms, index, sym) { - struct symbol *f; - uint64_t obj_start; - - if (!elf_sym__is_function(&sym)) - continue; - - sec = elf_getscn(elf, sym.st_shndx); - if (!sec) - goto out_elf_end; - - gelf_getshdr(sec, &shdr); - obj_start = sym.st_value; - - sym.st_value -= shdr.sh_addr - shdr.sh_offset; - - f = symbol__new(sym.st_value, sym.st_size, - elf_sym__name(&sym, symstrs), - self->sym_priv_size, obj_start, verbose); - if (!f) - goto out_elf_end; - - if (filter && filter(self, f)) - symbol__delete(f, self->sym_priv_size); - else { - dso__insert_symbol(self, f); - nr++; - } - } - - err = nr; -out_elf_end: - elf_end(elf); -out_close: - return err; -} - -int dso__load(struct dso *self, symbol_filter_t filter, int verbose) -{ - int size = strlen(self->name) + sizeof("/usr/lib/debug%s.debug"); - char *name = malloc(size); - int variant = 0; - int ret = -1; - int fd; - - if (!name) - return -1; - -more: - do { - switch (variant) { - case 0: /* Fedora */ - snprintf(name, size, "/usr/lib/debug%s.debug", self->name); - break; - case 1: /* Ubuntu */ - snprintf(name, size, "/usr/lib/debug%s", self->name); - break; - case 2: /* Sane people */ - snprintf(name, size, "%s", self->name); - break; - - default: - goto out; - } - variant++; - - fd = open(name, O_RDONLY); - } while (fd < 0); - - ret = dso__load_sym(self, fd, name, filter, verbose); - close(fd); - - /* - * Some people seem to have debuginfo files _WITHOUT_ debug info!?!? - */ - if (!ret) - goto more; - -out: - free(name); - return ret; -} - -static int dso__load_vmlinux(struct dso *self, const char *vmlinux, - symbol_filter_t filter, int verbose) -{ - int err, fd = open(vmlinux, O_RDONLY); - - if (fd < 0) - return -1; - - err = dso__load_sym(self, fd, vmlinux, filter, verbose); - close(fd); - - return err; -} - -int dso__load_kernel(struct dso *self, const char *vmlinux, - symbol_filter_t filter, int verbose) -{ - int err = -1; - - if (vmlinux) - err = dso__load_vmlinux(self, vmlinux, filter, verbose); - - if (err) - err = dso__load_kallsyms(self, filter, verbose); - - return err; -} - -void symbol__init(void) -{ - elf_version(EV_CURRENT); -} diff --git a/Documentation/perf_counter/util/symbol.h b/Documentation/perf_counter/util/symbol.h deleted file mode 100644 index 4839d68f14f..00000000000 --- a/Documentation/perf_counter/util/symbol.h +++ /dev/null @@ -1,47 +0,0 @@ -#ifndef _PERF_SYMBOL_ -#define _PERF_SYMBOL_ 1 - -#include -#include "list.h" -#include "rbtree.h" - -struct symbol { - struct rb_node rb_node; - __u64 start; - __u64 end; - __u64 obj_start; - __u64 hist_sum; - __u64 *hist; - char name[0]; -}; - -struct dso { - struct list_head node; - struct rb_root syms; - unsigned int sym_priv_size; - struct symbol *(*find_symbol)(struct dso *, uint64_t ip); - char name[0]; -}; - -const char *sym_hist_filter; - -typedef int (*symbol_filter_t)(struct dso *self, struct symbol *sym); - -struct dso *dso__new(const char *name, unsigned int sym_priv_size); -void dso__delete(struct dso *self); - -static inline void *dso__sym_priv(struct dso *self, struct symbol *sym) -{ - return ((void *)sym) - self->sym_priv_size; -} - -struct symbol *dso__find_symbol(struct dso *self, uint64_t ip); - -int dso__load_kernel(struct dso *self, const char *vmlinux, - symbol_filter_t filter, int verbose); -int dso__load(struct dso *self, symbol_filter_t filter, int verbose); - -size_t dso__fprintf(struct dso *self, FILE *fp); - -void symbol__init(void); -#endif /* _PERF_SYMBOL_ */ diff --git a/Documentation/perf_counter/util/usage.c b/Documentation/perf_counter/util/usage.c deleted file mode 100644 index 2cad286e437..00000000000 --- a/Documentation/perf_counter/util/usage.c +++ /dev/null @@ -1,80 +0,0 @@ -/* - * GIT - The information manager from hell - * - * Copyright (C) Linus Torvalds, 2005 - */ -#include "util.h" - -static void report(const char *prefix, const char *err, va_list params) -{ - char msg[1024]; - vsnprintf(msg, sizeof(msg), err, params); - fprintf(stderr, "%s%s\n", prefix, msg); -} - -static NORETURN void usage_builtin(const char *err) -{ - fprintf(stderr, "\n usage: %s\n", err); - exit(129); -} - -static NORETURN void die_builtin(const char *err, va_list params) -{ - report("fatal: ", err, params); - exit(128); -} - -static void error_builtin(const char *err, va_list params) -{ - report("error: ", err, params); -} - -static void warn_builtin(const char *warn, va_list params) -{ - report("warning: ", warn, params); -} - -/* If we are in a dlopen()ed .so write to a global variable would segfault - * (ugh), so keep things static. */ -static void (*usage_routine)(const char *err) NORETURN = usage_builtin; -static void (*die_routine)(const char *err, va_list params) NORETURN = die_builtin; -static void (*error_routine)(const char *err, va_list params) = error_builtin; -static void (*warn_routine)(const char *err, va_list params) = warn_builtin; - -void set_die_routine(void (*routine)(const char *err, va_list params) NORETURN) -{ - die_routine = routine; -} - -void usage(const char *err) -{ - usage_routine(err); -} - -void die(const char *err, ...) -{ - va_list params; - - va_start(params, err); - die_routine(err, params); - va_end(params); -} - -int error(const char *err, ...) -{ - va_list params; - - va_start(params, err); - error_routine(err, params); - va_end(params); - return -1; -} - -void warning(const char *warn, ...) -{ - va_list params; - - va_start(params, warn); - warn_routine(warn, params); - va_end(params); -} diff --git a/Documentation/perf_counter/util/util.h b/Documentation/perf_counter/util/util.h deleted file mode 100644 index 76590a16c27..00000000000 --- a/Documentation/perf_counter/util/util.h +++ /dev/null @@ -1,410 +0,0 @@ -#ifndef GIT_COMPAT_UTIL_H -#define GIT_COMPAT_UTIL_H - -#define _FILE_OFFSET_BITS 64 - -#ifndef FLEX_ARRAY -/* - * See if our compiler is known to support flexible array members. - */ -#if defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) -# define FLEX_ARRAY /* empty */ -#elif defined(__GNUC__) -# if (__GNUC__ >= 3) -# define FLEX_ARRAY /* empty */ -# else -# define FLEX_ARRAY 0 /* older GNU extension */ -# endif -#endif - -/* - * Otherwise, default to safer but a bit wasteful traditional style - */ -#ifndef FLEX_ARRAY -# define FLEX_ARRAY 1 -#endif -#endif - -#define ARRAY_SIZE(x) (sizeof(x)/sizeof(x[0])) - -#ifdef __GNUC__ -#define TYPEOF(x) (__typeof__(x)) -#else -#define TYPEOF(x) -#endif - -#define MSB(x, bits) ((x) & TYPEOF(x)(~0ULL << (sizeof(x) * 8 - (bits)))) -#define HAS_MULTI_BITS(i) ((i) & ((i) - 1)) /* checks if an integer has more than 1 bit set */ - -/* Approximation of the length of the decimal representation of this type. */ -#define decimal_length(x) ((int)(sizeof(x) * 2.56 + 0.5) + 1) - -#if !defined(__APPLE__) && !defined(__FreeBSD__) && !defined(__USLC__) && !defined(_M_UNIX) -#define _XOPEN_SOURCE 600 /* glibc2 and AIX 5.3L need 500, OpenBSD needs 600 for S_ISLNK() */ -#define _XOPEN_SOURCE_EXTENDED 1 /* AIX 5.3L needs this */ -#endif -#define _ALL_SOURCE 1 -#define _GNU_SOURCE 1 -#define _BSD_SOURCE 1 - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#ifndef __MINGW32__ -#include -#include -#include -#include -#ifndef NO_SYS_SELECT_H -#include -#endif -#include -#include -#include -#include -#include -#include -#if defined(__CYGWIN__) -#undef _XOPEN_SOURCE -#include -#define _XOPEN_SOURCE 600 -#include "compat/cygwin.h" -#else -#undef _ALL_SOURCE /* AIX 5.3L defines a struct list with _ALL_SOURCE. */ -#include -#define _ALL_SOURCE 1 -#endif -#else /* __MINGW32__ */ -/* pull in Windows compatibility stuff */ -#include "compat/mingw.h" -#endif /* __MINGW32__ */ - -#ifndef NO_ICONV -#include -#endif - -#ifndef NO_OPENSSL -#include -#include -#endif - -/* On most systems would have given us this, but - * not on some systems (e.g. GNU/Hurd). - */ -#ifndef PATH_MAX -#define PATH_MAX 4096 -#endif - -#ifndef PRIuMAX -#define PRIuMAX "llu" -#endif - -#ifndef PRIu32 -#define PRIu32 "u" -#endif - -#ifndef PRIx32 -#define PRIx32 "x" -#endif - -#ifndef PATH_SEP -#define PATH_SEP ':' -#endif - -#ifndef STRIP_EXTENSION -#define STRIP_EXTENSION "" -#endif - -#ifndef has_dos_drive_prefix -#define has_dos_drive_prefix(path) 0 -#endif - -#ifndef is_dir_sep -#define is_dir_sep(c) ((c) == '/') -#endif - -#ifdef __GNUC__ -#define NORETURN __attribute__((__noreturn__)) -#else -#define NORETURN -#ifndef __attribute__ -#define __attribute__(x) -#endif -#endif - -/* General helper functions */ -extern void usage(const char *err) NORETURN; -extern void die(const char *err, ...) NORETURN __attribute__((format (printf, 1, 2))); -extern int error(const char *err, ...) __attribute__((format (printf, 1, 2))); -extern void warning(const char *err, ...) __attribute__((format (printf, 1, 2))); - -extern void set_die_routine(void (*routine)(const char *err, va_list params) NORETURN); - -extern int prefixcmp(const char *str, const char *prefix); -extern time_t tm_to_time_t(const struct tm *tm); - -static inline const char *skip_prefix(const char *str, const char *prefix) -{ - size_t len = strlen(prefix); - return strncmp(str, prefix, len) ? NULL : str + len; -} - -#if defined(NO_MMAP) || defined(USE_WIN32_MMAP) - -#ifndef PROT_READ -#define PROT_READ 1 -#define PROT_WRITE 2 -#define MAP_PRIVATE 1 -#define MAP_FAILED ((void*)-1) -#endif - -#define mmap git_mmap -#define munmap git_munmap -extern void *git_mmap(void *start, size_t length, int prot, int flags, int fd, off_t offset); -extern int git_munmap(void *start, size_t length); - -#else /* NO_MMAP || USE_WIN32_MMAP */ - -#include - -#endif /* NO_MMAP || USE_WIN32_MMAP */ - -#ifdef NO_MMAP - -/* This value must be multiple of (pagesize * 2) */ -#define DEFAULT_PACKED_GIT_WINDOW_SIZE (1 * 1024 * 1024) - -#else /* NO_MMAP */ - -/* This value must be multiple of (pagesize * 2) */ -#define DEFAULT_PACKED_GIT_WINDOW_SIZE \ - (sizeof(void*) >= 8 \ - ? 1 * 1024 * 1024 * 1024 \ - : 32 * 1024 * 1024) - -#endif /* NO_MMAP */ - -#ifdef NO_ST_BLOCKS_IN_STRUCT_STAT -#define on_disk_bytes(st) ((st).st_size) -#else -#define on_disk_bytes(st) ((st).st_blocks * 512) -#endif - -#define DEFAULT_PACKED_GIT_LIMIT \ - ((1024L * 1024L) * (sizeof(void*) >= 8 ? 8192 : 256)) - -#ifdef NO_PREAD -#define pread git_pread -extern ssize_t git_pread(int fd, void *buf, size_t count, off_t offset); -#endif -/* - * Forward decl that will remind us if its twin in cache.h changes. - * This function is used in compat/pread.c. But we can't include - * cache.h there. - */ -extern ssize_t read_in_full(int fd, void *buf, size_t count); - -#ifdef NO_SETENV -#define setenv gitsetenv -extern int gitsetenv(const char *, const char *, int); -#endif - -#ifdef NO_MKDTEMP -#define mkdtemp gitmkdtemp -extern char *gitmkdtemp(char *); -#endif - -#ifdef NO_UNSETENV -#define unsetenv gitunsetenv -extern void gitunsetenv(const char *); -#endif - -#ifdef NO_STRCASESTR -#define strcasestr gitstrcasestr -extern char *gitstrcasestr(const char *haystack, const char *needle); -#endif - -#ifdef NO_STRLCPY -#define strlcpy gitstrlcpy -extern size_t gitstrlcpy(char *, const char *, size_t); -#endif - -#ifdef NO_STRTOUMAX -#define strtoumax gitstrtoumax -extern uintmax_t gitstrtoumax(const char *, char **, int); -#endif - -#ifdef NO_HSTRERROR -#define hstrerror githstrerror -extern const char *githstrerror(int herror); -#endif - -#ifdef NO_MEMMEM -#define memmem gitmemmem -void *gitmemmem(const void *haystack, size_t haystacklen, - const void *needle, size_t needlelen); -#endif - -#ifdef FREAD_READS_DIRECTORIES -#ifdef fopen -#undef fopen -#endif -#define fopen(a,b) git_fopen(a,b) -extern FILE *git_fopen(const char*, const char*); -#endif - -#ifdef SNPRINTF_RETURNS_BOGUS -#define snprintf git_snprintf -extern int git_snprintf(char *str, size_t maxsize, - const char *format, ...); -#define vsnprintf git_vsnprintf -extern int git_vsnprintf(char *str, size_t maxsize, - const char *format, va_list ap); -#endif - -#ifdef __GLIBC_PREREQ -#if __GLIBC_PREREQ(2, 1) -#define HAVE_STRCHRNUL -#endif -#endif - -#ifndef HAVE_STRCHRNUL -#define strchrnul gitstrchrnul -static inline char *gitstrchrnul(const char *s, int c) -{ - while (*s && *s != c) - s++; - return (char *)s; -} -#endif - -/* - * Wrappers: - */ -extern char *xstrdup(const char *str); -extern void *xmalloc(size_t size); -extern void *xmemdupz(const void *data, size_t len); -extern char *xstrndup(const char *str, size_t len); -extern void *xrealloc(void *ptr, size_t size); -extern void *xcalloc(size_t nmemb, size_t size); -extern void *xmmap(void *start, size_t length, int prot, int flags, int fd, off_t offset); -extern ssize_t xread(int fd, void *buf, size_t len); -extern ssize_t xwrite(int fd, const void *buf, size_t len); -extern int xdup(int fd); -extern FILE *xfdopen(int fd, const char *mode); -extern int xmkstemp(char *template); - -static inline size_t xsize_t(off_t len) -{ - return (size_t)len; -} - -static inline int has_extension(const char *filename, const char *ext) -{ - size_t len = strlen(filename); - size_t extlen = strlen(ext); - return len > extlen && !memcmp(filename + len - extlen, ext, extlen); -} - -/* Sane ctype - no locale, and works with signed chars */ -#undef isascii -#undef isspace -#undef isdigit -#undef isalpha -#undef isalnum -#undef tolower -#undef toupper -extern unsigned char sane_ctype[256]; -#define GIT_SPACE 0x01 -#define GIT_DIGIT 0x02 -#define GIT_ALPHA 0x04 -#define GIT_GLOB_SPECIAL 0x08 -#define GIT_REGEX_SPECIAL 0x10 -#define sane_istest(x,mask) ((sane_ctype[(unsigned char)(x)] & (mask)) != 0) -#define isascii(x) (((x) & ~0x7f) == 0) -#define isspace(x) sane_istest(x,GIT_SPACE) -#define isdigit(x) sane_istest(x,GIT_DIGIT) -#define isalpha(x) sane_istest(x,GIT_ALPHA) -#define isalnum(x) sane_istest(x,GIT_ALPHA | GIT_DIGIT) -#define is_glob_special(x) sane_istest(x,GIT_GLOB_SPECIAL) -#define is_regex_special(x) sane_istest(x,GIT_GLOB_SPECIAL | GIT_REGEX_SPECIAL) -#define tolower(x) sane_case((unsigned char)(x), 0x20) -#define toupper(x) sane_case((unsigned char)(x), 0) - -static inline int sane_case(int x, int high) -{ - if (sane_istest(x, GIT_ALPHA)) - x = (x & ~0x20) | high; - return x; -} - -static inline int strtoul_ui(char const *s, int base, unsigned int *result) -{ - unsigned long ul; - char *p; - - errno = 0; - ul = strtoul(s, &p, base); - if (errno || *p || p == s || (unsigned int) ul != ul) - return -1; - *result = ul; - return 0; -} - -static inline int strtol_i(char const *s, int base, int *result) -{ - long ul; - char *p; - - errno = 0; - ul = strtol(s, &p, base); - if (errno || *p || p == s || (int) ul != ul) - return -1; - *result = ul; - return 0; -} - -#ifdef INTERNAL_QSORT -void git_qsort(void *base, size_t nmemb, size_t size, - int(*compar)(const void *, const void *)); -#define qsort git_qsort -#endif - -#ifndef DIR_HAS_BSD_GROUP_SEMANTICS -# define FORCE_DIR_SET_GID S_ISGID -#else -# define FORCE_DIR_SET_GID 0 -#endif - -#ifdef NO_NSEC -#undef USE_NSEC -#define ST_CTIME_NSEC(st) 0 -#define ST_MTIME_NSEC(st) 0 -#else -#ifdef USE_ST_TIMESPEC -#define ST_CTIME_NSEC(st) ((unsigned int)((st).st_ctimespec.tv_nsec)) -#define ST_MTIME_NSEC(st) ((unsigned int)((st).st_mtimespec.tv_nsec)) -#else -#define ST_CTIME_NSEC(st) ((unsigned int)((st).st_ctim.tv_nsec)) -#define ST_MTIME_NSEC(st) ((unsigned int)((st).st_mtim.tv_nsec)) -#endif -#endif - -#endif diff --git a/Documentation/perf_counter/util/wrapper.c b/Documentation/perf_counter/util/wrapper.c deleted file mode 100644 index 6350d65f6d9..00000000000 --- a/Documentation/perf_counter/util/wrapper.c +++ /dev/null @@ -1,206 +0,0 @@ -/* - * Various trivial helper wrappers around standard functions - */ -#include "cache.h" - -/* - * There's no pack memory to release - but stay close to the Git - * version so wrap this away: - */ -static inline void release_pack_memory(size_t size, int flag) -{ -} - -char *xstrdup(const char *str) -{ - char *ret = strdup(str); - if (!ret) { - release_pack_memory(strlen(str) + 1, -1); - ret = strdup(str); - if (!ret) - die("Out of memory, strdup failed"); - } - return ret; -} - -void *xmalloc(size_t size) -{ - void *ret = malloc(size); - if (!ret && !size) - ret = malloc(1); - if (!ret) { - release_pack_memory(size, -1); - ret = malloc(size); - if (!ret && !size) - ret = malloc(1); - if (!ret) - die("Out of memory, malloc failed"); - } -#ifdef XMALLOC_POISON - memset(ret, 0xA5, size); -#endif - return ret; -} - -/* - * xmemdupz() allocates (len + 1) bytes of memory, duplicates "len" bytes of - * "data" to the allocated memory, zero terminates the allocated memory, - * and returns a pointer to the allocated memory. If the allocation fails, - * the program dies. - */ -void *xmemdupz(const void *data, size_t len) -{ - char *p = xmalloc(len + 1); - memcpy(p, data, len); - p[len] = '\0'; - return p; -} - -char *xstrndup(const char *str, size_t len) -{ - char *p = memchr(str, '\0', len); - return xmemdupz(str, p ? p - str : len); -} - -void *xrealloc(void *ptr, size_t size) -{ - void *ret = realloc(ptr, size); - if (!ret && !size) - ret = realloc(ptr, 1); - if (!ret) { - release_pack_memory(size, -1); - ret = realloc(ptr, size); - if (!ret && !size) - ret = realloc(ptr, 1); - if (!ret) - die("Out of memory, realloc failed"); - } - return ret; -} - -void *xcalloc(size_t nmemb, size_t size) -{ - void *ret = calloc(nmemb, size); - if (!ret && (!nmemb || !size)) - ret = calloc(1, 1); - if (!ret) { - release_pack_memory(nmemb * size, -1); - ret = calloc(nmemb, size); - if (!ret && (!nmemb || !size)) - ret = calloc(1, 1); - if (!ret) - die("Out of memory, calloc failed"); - } - return ret; -} - -void *xmmap(void *start, size_t length, - int prot, int flags, int fd, off_t offset) -{ - void *ret = mmap(start, length, prot, flags, fd, offset); - if (ret == MAP_FAILED) { - if (!length) - return NULL; - release_pack_memory(length, fd); - ret = mmap(start, length, prot, flags, fd, offset); - if (ret == MAP_FAILED) - die("Out of memory? mmap failed: %s", strerror(errno)); - } - return ret; -} - -/* - * xread() is the same a read(), but it automatically restarts read() - * operations with a recoverable error (EAGAIN and EINTR). xread() - * DOES NOT GUARANTEE that "len" bytes is read even if the data is available. - */ -ssize_t xread(int fd, void *buf, size_t len) -{ - ssize_t nr; - while (1) { - nr = read(fd, buf, len); - if ((nr < 0) && (errno == EAGAIN || errno == EINTR)) - continue; - return nr; - } -} - -/* - * xwrite() is the same a write(), but it automatically restarts write() - * operations with a recoverable error (EAGAIN and EINTR). xwrite() DOES NOT - * GUARANTEE that "len" bytes is written even if the operation is successful. - */ -ssize_t xwrite(int fd, const void *buf, size_t len) -{ - ssize_t nr; - while (1) { - nr = write(fd, buf, len); - if ((nr < 0) && (errno == EAGAIN || errno == EINTR)) - continue; - return nr; - } -} - -ssize_t read_in_full(int fd, void *buf, size_t count) -{ - char *p = buf; - ssize_t total = 0; - - while (count > 0) { - ssize_t loaded = xread(fd, p, count); - if (loaded <= 0) - return total ? total : loaded; - count -= loaded; - p += loaded; - total += loaded; - } - - return total; -} - -ssize_t write_in_full(int fd, const void *buf, size_t count) -{ - const char *p = buf; - ssize_t total = 0; - - while (count > 0) { - ssize_t written = xwrite(fd, p, count); - if (written < 0) - return -1; - if (!written) { - errno = ENOSPC; - return -1; - } - count -= written; - p += written; - total += written; - } - - return total; -} - -int xdup(int fd) -{ - int ret = dup(fd); - if (ret < 0) - die("dup failed: %s", strerror(errno)); - return ret; -} - -FILE *xfdopen(int fd, const char *mode) -{ - FILE *stream = fdopen(fd, mode); - if (stream == NULL) - die("Out of memory? fdopen failed: %s", strerror(errno)); - return stream; -} - -int xmkstemp(char *template) -{ - int fd; - - fd = mkstemp(template); - if (fd < 0) - die("Unable to create temporary file: %s", strerror(errno)); - return fd; -} diff --git a/tools/perf/.gitignore b/tools/perf/.gitignore new file mode 100644 index 00000000000..d69a759a104 --- /dev/null +++ b/tools/perf/.gitignore @@ -0,0 +1,16 @@ +PERF-BUILD-OPTIONS +PERF-CFLAGS +PERF-GUI-VARS +PERF-VERSION-FILE +perf +perf-help +perf-record +perf-report +perf-stat +perf-top +perf*.1 +perf*.xml +common-cmds.h +tags +TAGS +cscope* diff --git a/tools/perf/Documentation/Makefile b/tools/perf/Documentation/Makefile new file mode 100644 index 00000000000..5457192e1b4 --- /dev/null +++ b/tools/perf/Documentation/Makefile @@ -0,0 +1,300 @@ +MAN1_TXT= \ + $(filter-out $(addsuffix .txt, $(ARTICLES) $(SP_ARTICLES)), \ + $(wildcard perf-*.txt)) \ + perf.txt +MAN5_TXT= +MAN7_TXT= + +MAN_TXT = $(MAN1_TXT) $(MAN5_TXT) $(MAN7_TXT) +MAN_XML=$(patsubst %.txt,%.xml,$(MAN_TXT)) +MAN_HTML=$(patsubst %.txt,%.html,$(MAN_TXT)) + +DOC_HTML=$(MAN_HTML) + +ARTICLES = +# with their own formatting rules. +SP_ARTICLES = +API_DOCS = $(patsubst %.txt,%,$(filter-out technical/api-index-skel.txt technical/api-index.txt, $(wildcard technical/api-*.txt))) +SP_ARTICLES += $(API_DOCS) +SP_ARTICLES += technical/api-index + +DOC_HTML += $(patsubst %,%.html,$(ARTICLES) $(SP_ARTICLES)) + +DOC_MAN1=$(patsubst %.txt,%.1,$(MAN1_TXT)) +DOC_MAN5=$(patsubst %.txt,%.5,$(MAN5_TXT)) +DOC_MAN7=$(patsubst %.txt,%.7,$(MAN7_TXT)) + +prefix?=$(HOME) +bindir?=$(prefix)/bin +htmldir?=$(prefix)/share/doc/perf-doc +pdfdir?=$(prefix)/share/doc/perf-doc +mandir?=$(prefix)/share/man +man1dir=$(mandir)/man1 +man5dir=$(mandir)/man5 +man7dir=$(mandir)/man7 +# DESTDIR= + +ASCIIDOC=asciidoc +ASCIIDOC_EXTRA = +MANPAGE_XSL = manpage-normal.xsl +XMLTO_EXTRA = +INSTALL?=install +RM ?= rm -f +DOC_REF = origin/man +HTML_REF = origin/html + +infodir?=$(prefix)/share/info +MAKEINFO=makeinfo +INSTALL_INFO=install-info +DOCBOOK2X_TEXI=docbook2x-texi +DBLATEX=dblatex +ifndef PERL_PATH + PERL_PATH = /usr/bin/perl +endif + +-include ../config.mak.autogen +-include ../config.mak + +# +# For asciidoc ... +# -7.1.2, no extra settings are needed. +# 8.0-, set ASCIIDOC8. +# + +# +# For docbook-xsl ... +# -1.68.1, set ASCIIDOC_NO_ROFF? (based on changelog from 1.73.0) +# 1.69.0, no extra settings are needed? +# 1.69.1-1.71.0, set DOCBOOK_SUPPRESS_SP? +# 1.71.1, no extra settings are needed? +# 1.72.0, set DOCBOOK_XSL_172. +# 1.73.0-, set ASCIIDOC_NO_ROFF +# + +# +# If you had been using DOCBOOK_XSL_172 in an attempt to get rid +# of 'the ".ft C" problem' in your generated manpages, and you +# instead ended up with weird characters around callouts, try +# using ASCIIDOC_NO_ROFF instead (it works fine with ASCIIDOC8). +# + +ifdef ASCIIDOC8 +ASCIIDOC_EXTRA += -a asciidoc7compatible +endif +ifdef DOCBOOK_XSL_172 +ASCIIDOC_EXTRA += -a perf-asciidoc-no-roff +MANPAGE_XSL = manpage-1.72.xsl +else + ifdef ASCIIDOC_NO_ROFF + # docbook-xsl after 1.72 needs the regular XSL, but will not + # pass-thru raw roff codes from asciidoc.conf, so turn them off. + ASCIIDOC_EXTRA += -a perf-asciidoc-no-roff + endif +endif +ifdef MAN_BOLD_LITERAL +XMLTO_EXTRA += -m manpage-bold-literal.xsl +endif +ifdef DOCBOOK_SUPPRESS_SP +XMLTO_EXTRA += -m manpage-suppress-sp.xsl +endif + +SHELL_PATH ?= $(SHELL) +# Shell quote; +SHELL_PATH_SQ = $(subst ','\'',$(SHELL_PATH)) + +# +# Please note that there is a minor bug in asciidoc. +# The version after 6.0.3 _will_ include the patch found here: +# http://marc.theaimsgroup.com/?l=perf&m=111558757202243&w=2 +# +# Until that version is released you may have to apply the patch +# yourself - yes, all 6 characters of it! +# + +QUIET_SUBDIR0 = +$(MAKE) -C # space to separate -C and subdir +QUIET_SUBDIR1 = + +ifneq ($(findstring $(MAKEFLAGS),w),w) +PRINT_DIR = --no-print-directory +else # "make -w" +NO_SUBDIR = : +endif + +ifneq ($(findstring $(MAKEFLAGS),s),s) +ifndef V + QUIET_ASCIIDOC = @echo ' ' ASCIIDOC $@; + QUIET_XMLTO = @echo ' ' XMLTO $@; + QUIET_DB2TEXI = @echo ' ' DB2TEXI $@; + QUIET_MAKEINFO = @echo ' ' MAKEINFO $@; + QUIET_DBLATEX = @echo ' ' DBLATEX $@; + QUIET_XSLTPROC = @echo ' ' XSLTPROC $@; + QUIET_GEN = @echo ' ' GEN $@; + QUIET_STDERR = 2> /dev/null + QUIET_SUBDIR0 = +@subdir= + QUIET_SUBDIR1 = ;$(NO_SUBDIR) echo ' ' SUBDIR $$subdir; \ + $(MAKE) $(PRINT_DIR) -C $$subdir + export V +endif +endif + +all: html man + +html: $(DOC_HTML) + +$(DOC_HTML) $(DOC_MAN1) $(DOC_MAN5) $(DOC_MAN7): asciidoc.conf + +man: man1 man5 man7 +man1: $(DOC_MAN1) +man5: $(DOC_MAN5) +man7: $(DOC_MAN7) + +info: perf.info perfman.info + +pdf: user-manual.pdf + +install: install-man + +install-man: man + $(INSTALL) -d -m 755 $(DESTDIR)$(man1dir) +# $(INSTALL) -d -m 755 $(DESTDIR)$(man5dir) +# $(INSTALL) -d -m 755 $(DESTDIR)$(man7dir) + $(INSTALL) -m 644 $(DOC_MAN1) $(DESTDIR)$(man1dir) +# $(INSTALL) -m 644 $(DOC_MAN5) $(DESTDIR)$(man5dir) +# $(INSTALL) -m 644 $(DOC_MAN7) $(DESTDIR)$(man7dir) + +install-info: info + $(INSTALL) -d -m 755 $(DESTDIR)$(infodir) + $(INSTALL) -m 644 perf.info perfman.info $(DESTDIR)$(infodir) + if test -r $(DESTDIR)$(infodir)/dir; then \ + $(INSTALL_INFO) --info-dir=$(DESTDIR)$(infodir) perf.info ;\ + $(INSTALL_INFO) --info-dir=$(DESTDIR)$(infodir) perfman.info ;\ + else \ + echo "No directory found in $(DESTDIR)$(infodir)" >&2 ; \ + fi + +install-pdf: pdf + $(INSTALL) -d -m 755 $(DESTDIR)$(pdfdir) + $(INSTALL) -m 644 user-manual.pdf $(DESTDIR)$(pdfdir) + +install-html: html + '$(SHELL_PATH_SQ)' ./install-webdoc.sh $(DESTDIR)$(htmldir) + +../PERF-VERSION-FILE: .FORCE-PERF-VERSION-FILE + $(QUIET_SUBDIR0)../ $(QUIET_SUBDIR1) PERF-VERSION-FILE + +-include ../PERF-VERSION-FILE + +# +# Determine "include::" file references in asciidoc files. +# +doc.dep : $(wildcard *.txt) build-docdep.perl + $(QUIET_GEN)$(RM) $@+ $@ && \ + $(PERL_PATH) ./build-docdep.perl >$@+ $(QUIET_STDERR) && \ + mv $@+ $@ + +-include doc.dep + +cmds_txt = cmds-ancillaryinterrogators.txt \ + cmds-ancillarymanipulators.txt \ + cmds-mainporcelain.txt \ + cmds-plumbinginterrogators.txt \ + cmds-plumbingmanipulators.txt \ + cmds-synchingrepositories.txt \ + cmds-synchelpers.txt \ + cmds-purehelpers.txt \ + cmds-foreignscminterface.txt + +$(cmds_txt): cmd-list.made + +cmd-list.made: cmd-list.perl ../command-list.txt $(MAN1_TXT) + $(QUIET_GEN)$(RM) $@ && \ + $(PERL_PATH) ./cmd-list.perl ../command-list.txt $(QUIET_STDERR) && \ + date >$@ + +clean: + $(RM) *.xml *.xml+ *.html *.html+ *.1 *.5 *.7 + $(RM) *.texi *.texi+ *.texi++ perf.info perfman.info + $(RM) howto-index.txt howto/*.html doc.dep + $(RM) technical/api-*.html technical/api-index.txt + $(RM) $(cmds_txt) *.made + +$(MAN_HTML): %.html : %.txt + $(QUIET_ASCIIDOC)$(RM) $@+ $@ && \ + $(ASCIIDOC) -b xhtml11 -d manpage -f asciidoc.conf \ + $(ASCIIDOC_EXTRA) -aperf_version=$(PERF_VERSION) -o $@+ $< && \ + mv $@+ $@ + +%.1 %.5 %.7 : %.xml + $(QUIET_XMLTO)$(RM) $@ && \ + xmlto -m $(MANPAGE_XSL) $(XMLTO_EXTRA) man $< + +%.xml : %.txt + $(QUIET_ASCIIDOC)$(RM) $@+ $@ && \ + $(ASCIIDOC) -b docbook -d manpage -f asciidoc.conf \ + $(ASCIIDOC_EXTRA) -aperf_version=$(PERF_VERSION) -o $@+ $< && \ + mv $@+ $@ + +XSLT = docbook.xsl +XSLTOPTS = --xinclude --stringparam html.stylesheet docbook-xsl.css + +user-manual.html: user-manual.xml + $(QUIET_XSLTPROC)xsltproc $(XSLTOPTS) -o $@ $(XSLT) $< + +perf.info: user-manual.texi + $(QUIET_MAKEINFO)$(MAKEINFO) --no-split -o $@ user-manual.texi + +user-manual.texi: user-manual.xml + $(QUIET_DB2TEXI)$(RM) $@+ $@ && \ + $(DOCBOOK2X_TEXI) user-manual.xml --encoding=UTF-8 --to-stdout >$@++ && \ + $(PERL_PATH) fix-texi.perl <$@++ >$@+ && \ + rm $@++ && \ + mv $@+ $@ + +user-manual.pdf: user-manual.xml + $(QUIET_DBLATEX)$(RM) $@+ $@ && \ + $(DBLATEX) -o $@+ -p /etc/asciidoc/dblatex/asciidoc-dblatex.xsl -s /etc/asciidoc/dblatex/asciidoc-dblatex.sty $< && \ + mv $@+ $@ + +perfman.texi: $(MAN_XML) cat-texi.perl + $(QUIET_DB2TEXI)$(RM) $@+ $@ && \ + ($(foreach xml,$(MAN_XML),$(DOCBOOK2X_TEXI) --encoding=UTF-8 \ + --to-stdout $(xml) &&) true) > $@++ && \ + $(PERL_PATH) cat-texi.perl $@ <$@++ >$@+ && \ + rm $@++ && \ + mv $@+ $@ + +perfman.info: perfman.texi + $(QUIET_MAKEINFO)$(MAKEINFO) --no-split --no-validate $*.texi + +$(patsubst %.txt,%.texi,$(MAN_TXT)): %.texi : %.xml + $(QUIET_DB2TEXI)$(RM) $@+ $@ && \ + $(DOCBOOK2X_TEXI) --to-stdout $*.xml >$@+ && \ + mv $@+ $@ + +howto-index.txt: howto-index.sh $(wildcard howto/*.txt) + $(QUIET_GEN)$(RM) $@+ $@ && \ + '$(SHELL_PATH_SQ)' ./howto-index.sh $(wildcard howto/*.txt) >$@+ && \ + mv $@+ $@ + +$(patsubst %,%.html,$(ARTICLES)) : %.html : %.txt + $(QUIET_ASCIIDOC)$(ASCIIDOC) -b xhtml11 $*.txt + +WEBDOC_DEST = /pub/software/tools/perf/docs + +$(patsubst %.txt,%.html,$(wildcard howto/*.txt)): %.html : %.txt + $(QUIET_ASCIIDOC)$(RM) $@+ $@ && \ + sed -e '1,/^$$/d' $< | $(ASCIIDOC) -b xhtml11 - >$@+ && \ + mv $@+ $@ + +install-webdoc : html + '$(SHELL_PATH_SQ)' ./install-webdoc.sh $(WEBDOC_DEST) + +quick-install: quick-install-man + +quick-install-man: + '$(SHELL_PATH_SQ)' ./install-doc-quick.sh $(DOC_REF) $(DESTDIR)$(mandir) + +quick-install-html: + '$(SHELL_PATH_SQ)' ./install-doc-quick.sh $(HTML_REF) $(DESTDIR)$(htmldir) + +.PHONY: .FORCE-PERF-VERSION-FILE diff --git a/tools/perf/Documentation/asciidoc.conf b/tools/perf/Documentation/asciidoc.conf new file mode 100644 index 00000000000..356b23a4033 --- /dev/null +++ b/tools/perf/Documentation/asciidoc.conf @@ -0,0 +1,91 @@ +## linkperf: macro +# +# Usage: linkperf:command[manpage-section] +# +# Note, {0} is the manpage section, while {target} is the command. +# +# Show PERF link as: (
); if section is defined, else just show +# the command. + +[macros] +(?su)[\\]?(?Plinkperf):(?P\S*?)\[(?P.*?)\]= + +[attributes] +asterisk=* +plus=+ +caret=^ +startsb=[ +endsb=] +tilde=~ + +ifdef::backend-docbook[] +[linkperf-inlinemacro] +{0%{target}} +{0#} +{0#{target}{0}} +{0#} +endif::backend-docbook[] + +ifdef::backend-docbook[] +ifndef::perf-asciidoc-no-roff[] +# "unbreak" docbook-xsl v1.68 for manpages. v1.69 works with or without this. +# v1.72 breaks with this because it replaces dots not in roff requests. +[listingblock] +{title} + +ifdef::doctype-manpage[] + .ft C +endif::doctype-manpage[] +| +ifdef::doctype-manpage[] + .ft +endif::doctype-manpage[] + +{title#} +endif::perf-asciidoc-no-roff[] + +ifdef::perf-asciidoc-no-roff[] +ifdef::doctype-manpage[] +# The following two small workarounds insert a simple paragraph after screen +[listingblock] +{title} + +| + +{title#} + +[verseblock] +{title} +{title%} +{title#} +| + +{title#} +{title%} +endif::doctype-manpage[] +endif::perf-asciidoc-no-roff[] +endif::backend-docbook[] + +ifdef::doctype-manpage[] +ifdef::backend-docbook[] +[header] +template::[header-declarations] + + +{mantitle} +{manvolnum} +perf +{perf_version} +perf Manual + + + {manname} + {manpurpose} + +endif::backend-docbook[] +endif::doctype-manpage[] + +ifdef::backend-xhtml11[] +[linkperf-inlinemacro] +{target}{0?({0})} +endif::backend-xhtml11[] diff --git a/tools/perf/Documentation/manpage-1.72.xsl b/tools/perf/Documentation/manpage-1.72.xsl new file mode 100644 index 00000000000..b4d315cb8c4 --- /dev/null +++ b/tools/perf/Documentation/manpage-1.72.xsl @@ -0,0 +1,14 @@ + + + + + + + + + + diff --git a/tools/perf/Documentation/manpage-base.xsl b/tools/perf/Documentation/manpage-base.xsl new file mode 100644 index 00000000000..a264fa61609 --- /dev/null +++ b/tools/perf/Documentation/manpage-base.xsl @@ -0,0 +1,35 @@ + + + + + + + + + + + + + + sp + + + + + + + + br + + + diff --git a/tools/perf/Documentation/manpage-bold-literal.xsl b/tools/perf/Documentation/manpage-bold-literal.xsl new file mode 100644 index 00000000000..608eb5df628 --- /dev/null +++ b/tools/perf/Documentation/manpage-bold-literal.xsl @@ -0,0 +1,17 @@ + + + + + + + fB + + + fR + + + diff --git a/tools/perf/Documentation/manpage-normal.xsl b/tools/perf/Documentation/manpage-normal.xsl new file mode 100644 index 00000000000..a48f5b11f3d --- /dev/null +++ b/tools/perf/Documentation/manpage-normal.xsl @@ -0,0 +1,13 @@ + + + + + + +\ +. + + diff --git a/tools/perf/Documentation/manpage-suppress-sp.xsl b/tools/perf/Documentation/manpage-suppress-sp.xsl new file mode 100644 index 00000000000..a63c7632a87 --- /dev/null +++ b/tools/perf/Documentation/manpage-suppress-sp.xsl @@ -0,0 +1,21 @@ + + + + + + + + + + + + + + + diff --git a/tools/perf/Documentation/perf-annotate.txt b/tools/perf/Documentation/perf-annotate.txt new file mode 100644 index 00000000000..c9dcade0683 --- /dev/null +++ b/tools/perf/Documentation/perf-annotate.txt @@ -0,0 +1,29 @@ +perf-annotate(1) +============== + +NAME +---- +perf-annotate - Read perf.data (created by perf record) and display annotated code + +SYNOPSIS +-------- +[verse] +'perf annotate' [-i | --input=file] symbol_name + +DESCRIPTION +----------- +This command reads the input file and displays an annotated version of the +code. If the object file has debug symbols then the source code will be +displayed alongside assembly code. + +If there is no debug info in the object, then annotated assembly is displayed. + +OPTIONS +------- +-i:: +--input=:: + Input file name. (default: perf.data) + +SEE ALSO +-------- +linkperf:perf-record[1] diff --git a/tools/perf/Documentation/perf-help.txt b/tools/perf/Documentation/perf-help.txt new file mode 100644 index 00000000000..514391818d1 --- /dev/null +++ b/tools/perf/Documentation/perf-help.txt @@ -0,0 +1,38 @@ +perf-help(1) +============ + +NAME +---- +perf-help - display help information about perf + +SYNOPSIS +-------- +'perf help' [-a|--all] [COMMAND] + +DESCRIPTION +----------- + +With no options and no COMMAND given, the synopsis of the 'perf' +command and a list of the most commonly used perf commands are printed +on the standard output. + +If the option '--all' or '-a' is given, then all available commands are +printed on the standard output. + +If a perf command is named, a manual page for that command is brought +up. The 'man' program is used by default for this purpose, but this +can be overridden by other options or configuration variables. + +Note that `perf --help ...` is identical to `perf help ...` because the +former is internally converted into the latter. + +OPTIONS +------- +-a:: +--all:: + Prints all the available commands on the standard output. This + option supersedes any other option. + +PERF +---- +Part of the linkperf:perf[1] suite diff --git a/tools/perf/Documentation/perf-list.txt b/tools/perf/Documentation/perf-list.txt new file mode 100644 index 00000000000..8290b942266 --- /dev/null +++ b/tools/perf/Documentation/perf-list.txt @@ -0,0 +1,25 @@ +perf-list(1) +============ + +NAME +---- +perf-list - List all symbolic event types + +SYNOPSIS +-------- +[verse] +'perf list' + +DESCRIPTION +----------- +This command displays the symbolic event types which can be selected in the +various perf commands with the -e option. + +OPTIONS +------- +None + +SEE ALSO +-------- +linkperf:perf-stat[1], linkperf:perf-top[1], +linkperf:perf-record[1] diff --git a/tools/perf/Documentation/perf-record.txt b/tools/perf/Documentation/perf-record.txt new file mode 100644 index 00000000000..1dbc1eeb4c0 --- /dev/null +++ b/tools/perf/Documentation/perf-record.txt @@ -0,0 +1,42 @@ +perf-record(1) +============== + +NAME +---- +perf-record - Run a command and record its profile into perf.data + +SYNOPSIS +-------- +[verse] +'perf record' [-e | --event=EVENT] [-l] [-a] +'perf record' [-e | --event=EVENT] [-l] [-a] -- [] + +DESCRIPTION +----------- +This command runs a command and gathers a performance counter profile +from it, into perf.data - without displaying anything. + +This file can then be inspected later on, using 'perf report'. + + +OPTIONS +------- +...:: + Any command you can specify in a shell. + +-e:: +--event=:: + Select the PMU event. Selection can be a symbolic event name + (use 'perf list' to list all events) or a raw PMU + event (eventsel+umask) in the form of rNNN where NNN is a + hexadecimal event descriptor. + +-a:: + system-wide collection + +-l:: + scale counter values + +SEE ALSO +-------- +linkperf:perf-stat[1], linkperf:perf-list[1] diff --git a/tools/perf/Documentation/perf-report.txt b/tools/perf/Documentation/perf-report.txt new file mode 100644 index 00000000000..52d3fc6846a --- /dev/null +++ b/tools/perf/Documentation/perf-report.txt @@ -0,0 +1,26 @@ +perf-report(1) +============== + +NAME +---- +perf-report - Read perf.data (created by perf record) and display the profile + +SYNOPSIS +-------- +[verse] +'perf report' [-i | --input=file] + +DESCRIPTION +----------- +This command displays the performance counter profile information recorded +via perf report. + +OPTIONS +------- +-i:: +--input=:: + Input file name. (default: perf.data) + +SEE ALSO +-------- +linkperf:perf-stat[1] diff --git a/tools/perf/Documentation/perf-stat.txt b/tools/perf/Documentation/perf-stat.txt new file mode 100644 index 00000000000..c368a72721d --- /dev/null +++ b/tools/perf/Documentation/perf-stat.txt @@ -0,0 +1,66 @@ +perf-stat(1) +============ + +NAME +---- +perf-stat - Run a command and gather performance counter statistics + +SYNOPSIS +-------- +[verse] +'perf stat' [-e | --event=EVENT] [-l] [-a] +'perf stat' [-e | --event=EVENT] [-l] [-a] -- [] + +DESCRIPTION +----------- +This command runs a command and gathers performance counter statistics +from it. + + +OPTIONS +------- +...:: + Any command you can specify in a shell. + + +-e:: +--event=:: + Select the PMU event. Selection can be a symbolic event name + (use 'perf list' to list all events) or a raw PMU + event (eventsel+umask) in the form of rNNN where NNN is a + hexadecimal event descriptor. + +-i:: +--inherit:: + child tasks inherit counters +-p:: +--pid=:: + stat events on existing pid + +-a:: + system-wide collection + +-l:: + scale counter values + +EXAMPLES +-------- + +$ perf stat -- make -j + + Performance counter stats for 'make -j': + + 8117.370256 task clock ticks # 11.281 CPU utilization factor + 678 context switches # 0.000 M/sec + 133 CPU migrations # 0.000 M/sec + 235724 pagefaults # 0.029 M/sec + 24821162526 CPU cycles # 3057.784 M/sec + 18687303457 instructions # 2302.138 M/sec + 172158895 cache references # 21.209 M/sec + 27075259 cache misses # 3.335 M/sec + + Wall-clock time elapsed: 719.554352 msecs + +SEE ALSO +-------- +linkperf:perf-top[1], linkperf:perf-list[1] diff --git a/tools/perf/Documentation/perf-top.txt b/tools/perf/Documentation/perf-top.txt new file mode 100644 index 00000000000..539d0128972 --- /dev/null +++ b/tools/perf/Documentation/perf-top.txt @@ -0,0 +1,39 @@ +perf-top(1) +=========== + +NAME +---- +perf-top - Run a command and profile it + +SYNOPSIS +-------- +[verse] +'perf top' [-e | --event=EVENT] [-l] [-a] + +DESCRIPTION +----------- +This command runs a command and gathers a performance counter profile +from it. + + +OPTIONS +------- +...:: + Any command you can specify in a shell. + +-e:: +--event=:: + Select the PMU event. Selection can be a symbolic event name + (use 'perf list' to list all events) or a raw PMU + event (eventsel+umask) in the form of rNNN where NNN is a + hexadecimal event descriptor. + +-a:: + system-wide collection + +-l:: + scale counter values + +SEE ALSO +-------- +linkperf:perf-stat[1], linkperf:perf-list[1] diff --git a/tools/perf/Documentation/perf.txt b/tools/perf/Documentation/perf.txt new file mode 100644 index 00000000000..69c83255719 --- /dev/null +++ b/tools/perf/Documentation/perf.txt @@ -0,0 +1,24 @@ +perf(1) +======= + +NAME +---- +perf - Performance analysis tools for Linux + +SYNOPSIS +-------- +[verse] +'perf' [--version] [--help] COMMAND [ARGS] + +DESCRIPTION +----------- +Performance counters for Linux are are a new kernel-based subsystem +that provide a framework for all things performance analysis. It +covers hardware level (CPU/PMU, Performance Monitoring Unit) features +and software features (software counters, tracepoints) as well. + +SEE ALSO +-------- +linkperf:perf-stat[1], linkperf:perf-top[1], +linkperf:perf-record[1], linkperf:perf-report[1], +linkperf:perf-list[1] diff --git a/tools/perf/Makefile b/tools/perf/Makefile new file mode 100644 index 00000000000..0cbd5d6874e --- /dev/null +++ b/tools/perf/Makefile @@ -0,0 +1,929 @@ +# The default target of this Makefile is... +all:: + +# Define V=1 to have a more verbose compile. +# +# Define SNPRINTF_RETURNS_BOGUS if your are on a system which snprintf() +# or vsnprintf() return -1 instead of number of characters which would +# have been written to the final string if enough space had been available. +# +# Define FREAD_READS_DIRECTORIES if your are on a system which succeeds +# when attempting to read from an fopen'ed directory. +# +# Define NO_OPENSSL environment variable if you do not have OpenSSL. +# This also implies MOZILLA_SHA1. +# +# Define CURLDIR=/foo/bar if your curl header and library files are in +# /foo/bar/include and /foo/bar/lib directories. +# +# Define EXPATDIR=/foo/bar if your expat header and library files are in +# /foo/bar/include and /foo/bar/lib directories. +# +# Define NO_D_INO_IN_DIRENT if you don't have d_ino in your struct dirent. +# +# Define NO_D_TYPE_IN_DIRENT if your platform defines DT_UNKNOWN but lacks +# d_type in struct dirent (latest Cygwin -- will be fixed soonish). +# +# Define NO_C99_FORMAT if your formatted IO functions (printf/scanf et.al.) +# do not support the 'size specifiers' introduced by C99, namely ll, hh, +# j, z, t. (representing long long int, char, intmax_t, size_t, ptrdiff_t). +# some C compilers supported these specifiers prior to C99 as an extension. +# +# Define NO_STRCASESTR if you don't have strcasestr. +# +# Define NO_MEMMEM if you don't have memmem. +# +# Define NO_STRTOUMAX if you don't have strtoumax in the C library. +# If your compiler also does not support long long or does not have +# strtoull, define NO_STRTOULL. +# +# Define NO_SETENV if you don't have setenv in the C library. +# +# Define NO_UNSETENV if you don't have unsetenv in the C library. +# +# Define NO_MKDTEMP if you don't have mkdtemp in the C library. +# +# Define NO_SYS_SELECT_H if you don't have sys/select.h. +# +# Define NO_SYMLINK_HEAD if you never want .perf/HEAD to be a symbolic link. +# Enable it on Windows. By default, symrefs are still used. +# +# Define NO_SVN_TESTS if you want to skip time-consuming SVN interoperability +# tests. These tests take up a significant amount of the total test time +# but are not needed unless you plan to talk to SVN repos. +# +# Define NO_FINK if you are building on Darwin/Mac OS X, have Fink +# installed in /sw, but don't want PERF to link against any libraries +# installed there. If defined you may specify your own (or Fink's) +# include directories and library directories by defining CFLAGS +# and LDFLAGS appropriately. +# +# Define NO_DARWIN_PORTS if you are building on Darwin/Mac OS X, +# have DarwinPorts installed in /opt/local, but don't want PERF to +# link against any libraries installed there. If defined you may +# specify your own (or DarwinPort's) include directories and +# library directories by defining CFLAGS and LDFLAGS appropriately. +# +# Define PPC_SHA1 environment variable when running make to make use of +# a bundled SHA1 routine optimized for PowerPC. +# +# Define ARM_SHA1 environment variable when running make to make use of +# a bundled SHA1 routine optimized for ARM. +# +# Define MOZILLA_SHA1 environment variable when running make to make use of +# a bundled SHA1 routine coming from Mozilla. It is GPL'd and should be fast +# on non-x86 architectures (e.g. PowerPC), while the OpenSSL version (default +# choice) has very fast version optimized for i586. +# +# Define NEEDS_SSL_WITH_CRYPTO if you need -lcrypto with -lssl (Darwin). +# +# Define NEEDS_LIBICONV if linking with libc is not enough (Darwin). +# +# Define NEEDS_SOCKET if linking with libc is not enough (SunOS, +# Patrick Mauritz). +# +# Define NO_MMAP if you want to avoid mmap. +# +# Define NO_PTHREADS if you do not have or do not want to use Pthreads. +# +# Define NO_PREAD if you have a problem with pread() system call (e.g. +# cygwin.dll before v1.5.22). +# +# Define NO_FAST_WORKING_DIRECTORY if accessing objects in pack files is +# generally faster on your platform than accessing the working directory. +# +# Define NO_TRUSTABLE_FILEMODE if your filesystem may claim to support +# the executable mode bit, but doesn't really do so. +# +# Define NO_IPV6 if you lack IPv6 support and getaddrinfo(). +# +# Define NO_SOCKADDR_STORAGE if your platform does not have struct +# sockaddr_storage. +# +# Define NO_ICONV if your libc does not properly support iconv. +# +# Define OLD_ICONV if your library has an old iconv(), where the second +# (input buffer pointer) parameter is declared with type (const char **). +# +# Define NO_DEFLATE_BOUND if your zlib does not have deflateBound. +# +# Define NO_R_TO_GCC_LINKER if your gcc does not like "-R/path/lib" +# that tells runtime paths to dynamic libraries; +# "-Wl,-rpath=/path/lib" is used instead. +# +# Define USE_NSEC below if you want perf to care about sub-second file mtimes +# and ctimes. Note that you need recent glibc (at least 2.2.4) for this, and +# it will BREAK YOUR LOCAL DIFFS! show-diff and anything using it will likely +# randomly break unless your underlying filesystem supports those sub-second +# times (my ext3 doesn't). +# +# Define USE_ST_TIMESPEC if your "struct stat" uses "st_ctimespec" instead of +# "st_ctim" +# +# Define NO_NSEC if your "struct stat" does not have "st_ctim.tv_nsec" +# available. This automatically turns USE_NSEC off. +# +# Define USE_STDEV below if you want perf to care about the underlying device +# change being considered an inode change from the update-index perspective. +# +# Define NO_ST_BLOCKS_IN_STRUCT_STAT if your platform does not have st_blocks +# field that counts the on-disk footprint in 512-byte blocks. +# +# Define ASCIIDOC8 if you want to format documentation with AsciiDoc 8 +# +# Define DOCBOOK_XSL_172 if you want to format man pages with DocBook XSL v1.72. +# +# Define NO_PERL_MAKEMAKER if you cannot use Makefiles generated by perl's +# MakeMaker (e.g. using ActiveState under Cygwin). +# +# Define NO_PERL if you do not want Perl scripts or libraries at all. +# +# Define INTERNAL_QSORT to use Git's implementation of qsort(), which +# is a simplified version of the merge sort used in glibc. This is +# recommended if Git triggers O(n^2) behavior in your platform's qsort(). +# +# Define NO_EXTERNAL_GREP if you don't want "perf grep" to ever call +# your external grep (e.g., if your system lacks grep, if its grep is +# broken, or spawning external process is slower than built-in grep perf has). + +PERF-VERSION-FILE: .FORCE-PERF-VERSION-FILE + @$(SHELL_PATH) util/PERF-VERSION-GEN +-include PERF-VERSION-FILE + +uname_S := $(shell sh -c 'uname -s 2>/dev/null || echo not') +uname_M := $(shell sh -c 'uname -m 2>/dev/null || echo not') +uname_O := $(shell sh -c 'uname -o 2>/dev/null || echo not') +uname_R := $(shell sh -c 'uname -r 2>/dev/null || echo not') +uname_P := $(shell sh -c 'uname -p 2>/dev/null || echo not') +uname_V := $(shell sh -c 'uname -v 2>/dev/null || echo not') + +# CFLAGS and LDFLAGS are for the users to override from the command line. + +CFLAGS = -ggdb3 -Wall -Werror -Wstrict-prototypes -Wmissing-declarations -Wmissing-prototypes -std=gnu99 -Wdeclaration-after-statement -O6 +LDFLAGS = -lpthread -lrt -lelf +ALL_CFLAGS = $(CFLAGS) +ALL_LDFLAGS = $(LDFLAGS) +STRIP ?= strip + +# Among the variables below, these: +# perfexecdir +# template_dir +# mandir +# infodir +# htmldir +# ETC_PERFCONFIG (but not sysconfdir) +# can be specified as a relative path some/where/else; +# this is interpreted as relative to $(prefix) and "perf" at +# runtime figures out where they are based on the path to the executable. +# This can help installing the suite in a relocatable way. + +prefix = $(HOME) +bindir_relative = bin +bindir = $(prefix)/$(bindir_relative) +mandir = share/man +infodir = share/info +perfexecdir = libexec/perf-core +sharedir = $(prefix)/share +template_dir = share/perf-core/templates +htmldir = share/doc/perf-doc +ifeq ($(prefix),/usr) +sysconfdir = /etc +ETC_PERFCONFIG = $(sysconfdir)/perfconfig +else +sysconfdir = $(prefix)/etc +ETC_PERFCONFIG = etc/perfconfig +endif +lib = lib +# DESTDIR= + +export prefix bindir sharedir sysconfdir + +CC = gcc +AR = ar +RM = rm -f +TAR = tar +FIND = find +INSTALL = install +RPMBUILD = rpmbuild +PTHREAD_LIBS = -lpthread + +# sparse is architecture-neutral, which means that we need to tell it +# explicitly what architecture to check for. Fix this up for yours.. +SPARSE_FLAGS = -D__BIG_ENDIAN__ -D__powerpc__ + + + +### --- END CONFIGURATION SECTION --- + +# Those must not be GNU-specific; they are shared with perl/ which may +# be built by a different compiler. (Note that this is an artifact now +# but it still might be nice to keep that distinction.) +BASIC_CFLAGS = +BASIC_LDFLAGS = + +# Guard against environment variables +BUILTIN_OBJS = +BUILT_INS = +COMPAT_CFLAGS = +COMPAT_OBJS = +LIB_H = +LIB_OBJS = +SCRIPT_PERL = +SCRIPT_SH = +TEST_PROGRAMS = + +# +# No scripts right now: +# + +# SCRIPT_SH += perf-am.sh + +# +# No Perl scripts right now: +# + +# SCRIPT_PERL += perf-add--interactive.perl + +SCRIPTS = $(patsubst %.sh,%,$(SCRIPT_SH)) \ + $(patsubst %.perl,%,$(SCRIPT_PERL)) + +# Empty... +EXTRA_PROGRAMS = + +# ... and all the rest that could be moved out of bindir to perfexecdir +PROGRAMS += $(EXTRA_PROGRAMS) + +# +# Single 'perf' binary right now: +# +PROGRAMS += perf + +# List built-in command $C whose implementation cmd_$C() is not in +# builtin-$C.o but is linked in as part of some other command. +# +# None right now: +# +# BUILT_INS += perf-init $X + +# what 'all' will build and 'install' will install, in perfexecdir +ALL_PROGRAMS = $(PROGRAMS) $(SCRIPTS) + +# what 'all' will build but not install in perfexecdir +OTHER_PROGRAMS = perf$X + +# Set paths to tools early so that they can be used for version tests. +ifndef SHELL_PATH + SHELL_PATH = /bin/sh +endif +ifndef PERL_PATH + PERL_PATH = /usr/bin/perl +endif + +export PERL_PATH + +LIB_FILE=libperf.a + +LIB_H += ../../include/linux/perf_counter.h +LIB_H += perf.h +LIB_H += util/list.h +LIB_H += util/rbtree.h +LIB_H += util/levenshtein.h +LIB_H += util/parse-options.h +LIB_H += util/parse-events.h +LIB_H += util/quote.h +LIB_H += util/util.h +LIB_H += util/help.h +LIB_H += util/strbuf.h +LIB_H += util/string.h +LIB_H += util/run-command.h +LIB_H += util/sigchain.h +LIB_H += util/symbol.h +LIB_H += util/color.h + +LIB_OBJS += util/abspath.o +LIB_OBJS += util/alias.o +LIB_OBJS += util/config.o +LIB_OBJS += util/ctype.o +LIB_OBJS += util/environment.o +LIB_OBJS += util/exec_cmd.o +LIB_OBJS += util/help.o +LIB_OBJS += util/levenshtein.o +LIB_OBJS += util/parse-options.o +LIB_OBJS += util/parse-events.o +LIB_OBJS += util/path.o +LIB_OBJS += util/rbtree.o +LIB_OBJS += util/run-command.o +LIB_OBJS += util/quote.o +LIB_OBJS += util/strbuf.o +LIB_OBJS += util/string.o +LIB_OBJS += util/usage.o +LIB_OBJS += util/wrapper.o +LIB_OBJS += util/sigchain.o +LIB_OBJS += util/symbol.o +LIB_OBJS += util/color.o +LIB_OBJS += util/pager.o + +BUILTIN_OBJS += builtin-annotate.o +BUILTIN_OBJS += builtin-help.o +BUILTIN_OBJS += builtin-list.o +BUILTIN_OBJS += builtin-record.o +BUILTIN_OBJS += builtin-report.o +BUILTIN_OBJS += builtin-stat.o +BUILTIN_OBJS += builtin-top.o + +PERFLIBS = $(LIB_FILE) +EXTLIBS = + +# +# Platform specific tweaks +# + +# We choose to avoid "if .. else if .. else .. endif endif" +# because maintaining the nesting to match is a pain. If +# we had "elif" things would have been much nicer... + +-include config.mak.autogen +-include config.mak + +ifeq ($(uname_S),Darwin) + ifndef NO_FINK + ifeq ($(shell test -d /sw/lib && echo y),y) + BASIC_CFLAGS += -I/sw/include + BASIC_LDFLAGS += -L/sw/lib + endif + endif + ifndef NO_DARWIN_PORTS + ifeq ($(shell test -d /opt/local/lib && echo y),y) + BASIC_CFLAGS += -I/opt/local/include + BASIC_LDFLAGS += -L/opt/local/lib + endif + endif + PTHREAD_LIBS = +endif + +ifndef CC_LD_DYNPATH + ifdef NO_R_TO_GCC_LINKER + # Some gcc does not accept and pass -R to the linker to specify + # the runtime dynamic library path. + CC_LD_DYNPATH = -Wl,-rpath, + else + CC_LD_DYNPATH = -R + endif +endif + +ifdef ZLIB_PATH + BASIC_CFLAGS += -I$(ZLIB_PATH)/include + EXTLIBS += -L$(ZLIB_PATH)/$(lib) $(CC_LD_DYNPATH)$(ZLIB_PATH)/$(lib) +endif +EXTLIBS += -lz + +ifdef NEEDS_SOCKET + EXTLIBS += -lsocket +endif +ifdef NEEDS_NSL + EXTLIBS += -lnsl +endif +ifdef NO_D_TYPE_IN_DIRENT + BASIC_CFLAGS += -DNO_D_TYPE_IN_DIRENT +endif +ifdef NO_D_INO_IN_DIRENT + BASIC_CFLAGS += -DNO_D_INO_IN_DIRENT +endif +ifdef NO_ST_BLOCKS_IN_STRUCT_STAT + BASIC_CFLAGS += -DNO_ST_BLOCKS_IN_STRUCT_STAT +endif +ifdef USE_NSEC + BASIC_CFLAGS += -DUSE_NSEC +endif +ifdef USE_ST_TIMESPEC + BASIC_CFLAGS += -DUSE_ST_TIMESPEC +endif +ifdef NO_NSEC + BASIC_CFLAGS += -DNO_NSEC +endif +ifdef NO_C99_FORMAT + BASIC_CFLAGS += -DNO_C99_FORMAT +endif +ifdef SNPRINTF_RETURNS_BOGUS + COMPAT_CFLAGS += -DSNPRINTF_RETURNS_BOGUS + COMPAT_OBJS += compat/snprintf.o +endif +ifdef FREAD_READS_DIRECTORIES + COMPAT_CFLAGS += -DFREAD_READS_DIRECTORIES + COMPAT_OBJS += compat/fopen.o +endif +ifdef NO_SYMLINK_HEAD + BASIC_CFLAGS += -DNO_SYMLINK_HEAD +endif +ifdef NO_STRCASESTR + COMPAT_CFLAGS += -DNO_STRCASESTR + COMPAT_OBJS += compat/strcasestr.o +endif +ifdef NO_STRTOUMAX + COMPAT_CFLAGS += -DNO_STRTOUMAX + COMPAT_OBJS += compat/strtoumax.o +endif +ifdef NO_STRTOULL + COMPAT_CFLAGS += -DNO_STRTOULL +endif +ifdef NO_SETENV + COMPAT_CFLAGS += -DNO_SETENV + COMPAT_OBJS += compat/setenv.o +endif +ifdef NO_MKDTEMP + COMPAT_CFLAGS += -DNO_MKDTEMP + COMPAT_OBJS += compat/mkdtemp.o +endif +ifdef NO_UNSETENV + COMPAT_CFLAGS += -DNO_UNSETENV + COMPAT_OBJS += compat/unsetenv.o +endif +ifdef NO_SYS_SELECT_H + BASIC_CFLAGS += -DNO_SYS_SELECT_H +endif +ifdef NO_MMAP + COMPAT_CFLAGS += -DNO_MMAP + COMPAT_OBJS += compat/mmap.o +else + ifdef USE_WIN32_MMAP + COMPAT_CFLAGS += -DUSE_WIN32_MMAP + COMPAT_OBJS += compat/win32mmap.o + endif +endif +ifdef NO_PREAD + COMPAT_CFLAGS += -DNO_PREAD + COMPAT_OBJS += compat/pread.o +endif +ifdef NO_FAST_WORKING_DIRECTORY + BASIC_CFLAGS += -DNO_FAST_WORKING_DIRECTORY +endif +ifdef NO_TRUSTABLE_FILEMODE + BASIC_CFLAGS += -DNO_TRUSTABLE_FILEMODE +endif +ifdef NO_IPV6 + BASIC_CFLAGS += -DNO_IPV6 +endif +ifdef NO_UINTMAX_T + BASIC_CFLAGS += -Duintmax_t=uint32_t +endif +ifdef NO_SOCKADDR_STORAGE +ifdef NO_IPV6 + BASIC_CFLAGS += -Dsockaddr_storage=sockaddr_in +else + BASIC_CFLAGS += -Dsockaddr_storage=sockaddr_in6 +endif +endif +ifdef NO_INET_NTOP + LIB_OBJS += compat/inet_ntop.o +endif +ifdef NO_INET_PTON + LIB_OBJS += compat/inet_pton.o +endif + +ifdef NO_ICONV + BASIC_CFLAGS += -DNO_ICONV +endif + +ifdef OLD_ICONV + BASIC_CFLAGS += -DOLD_ICONV +endif + +ifdef NO_DEFLATE_BOUND + BASIC_CFLAGS += -DNO_DEFLATE_BOUND +endif + +ifdef PPC_SHA1 + SHA1_HEADER = "ppc/sha1.h" + LIB_OBJS += ppc/sha1.o ppc/sha1ppc.o +else +ifdef ARM_SHA1 + SHA1_HEADER = "arm/sha1.h" + LIB_OBJS += arm/sha1.o arm/sha1_arm.o +else +ifdef MOZILLA_SHA1 + SHA1_HEADER = "mozilla-sha1/sha1.h" + LIB_OBJS += mozilla-sha1/sha1.o +else + SHA1_HEADER = + EXTLIBS += $(LIB_4_CRYPTO) +endif +endif +endif +ifdef NO_PERL_MAKEMAKER + export NO_PERL_MAKEMAKER +endif +ifdef NO_HSTRERROR + COMPAT_CFLAGS += -DNO_HSTRERROR + COMPAT_OBJS += compat/hstrerror.o +endif +ifdef NO_MEMMEM + COMPAT_CFLAGS += -DNO_MEMMEM + COMPAT_OBJS += compat/memmem.o +endif +ifdef INTERNAL_QSORT + COMPAT_CFLAGS += -DINTERNAL_QSORT + COMPAT_OBJS += compat/qsort.o +endif +ifdef RUNTIME_PREFIX + COMPAT_CFLAGS += -DRUNTIME_PREFIX +endif + +ifdef DIR_HAS_BSD_GROUP_SEMANTICS + COMPAT_CFLAGS += -DDIR_HAS_BSD_GROUP_SEMANTICS +endif +ifdef NO_EXTERNAL_GREP + BASIC_CFLAGS += -DNO_EXTERNAL_GREP +endif + +ifeq ($(PERL_PATH),) +NO_PERL=NoThanks +endif + +QUIET_SUBDIR0 = +$(MAKE) -C # space to separate -C and subdir +QUIET_SUBDIR1 = + +ifneq ($(findstring $(MAKEFLAGS),w),w) +PRINT_DIR = --no-print-directory +else # "make -w" +NO_SUBDIR = : +endif + +ifneq ($(findstring $(MAKEFLAGS),s),s) +ifndef V + QUIET_CC = @echo ' ' CC $@; + QUIET_AR = @echo ' ' AR $@; + QUIET_LINK = @echo ' ' LINK $@; + QUIET_BUILT_IN = @echo ' ' BUILTIN $@; + QUIET_GEN = @echo ' ' GEN $@; + QUIET_SUBDIR0 = +@subdir= + QUIET_SUBDIR1 = ;$(NO_SUBDIR) echo ' ' SUBDIR $$subdir; \ + $(MAKE) $(PRINT_DIR) -C $$subdir + export V + export QUIET_GEN + export QUIET_BUILT_IN +endif +endif + +ifdef ASCIIDOC8 + export ASCIIDOC8 +endif + +# Shell quote (do not use $(call) to accommodate ancient setups); + +SHA1_HEADER_SQ = $(subst ','\'',$(SHA1_HEADER)) +ETC_PERFCONFIG_SQ = $(subst ','\'',$(ETC_PERFCONFIG)) + +DESTDIR_SQ = $(subst ','\'',$(DESTDIR)) +bindir_SQ = $(subst ','\'',$(bindir)) +bindir_relative_SQ = $(subst ','\'',$(bindir_relative)) +mandir_SQ = $(subst ','\'',$(mandir)) +infodir_SQ = $(subst ','\'',$(infodir)) +perfexecdir_SQ = $(subst ','\'',$(perfexecdir)) +template_dir_SQ = $(subst ','\'',$(template_dir)) +htmldir_SQ = $(subst ','\'',$(htmldir)) +prefix_SQ = $(subst ','\'',$(prefix)) + +SHELL_PATH_SQ = $(subst ','\'',$(SHELL_PATH)) +PERL_PATH_SQ = $(subst ','\'',$(PERL_PATH)) + +LIBS = $(PERFLIBS) $(EXTLIBS) + +BASIC_CFLAGS += -DSHA1_HEADER='$(SHA1_HEADER_SQ)' \ + $(COMPAT_CFLAGS) +LIB_OBJS += $(COMPAT_OBJS) + +ALL_CFLAGS += $(BASIC_CFLAGS) +ALL_LDFLAGS += $(BASIC_LDFLAGS) + +export TAR INSTALL DESTDIR SHELL_PATH + + +### Build rules + +SHELL = $(SHELL_PATH) + +all:: shell_compatibility_test $(ALL_PROGRAMS) $(BUILT_INS) $(OTHER_PROGRAMS) PERF-BUILD-OPTIONS +ifneq (,$X) + $(foreach p,$(patsubst %$X,%,$(filter %$X,$(ALL_PROGRAMS) $(BUILT_INS) perf$X)), test '$p' -ef '$p$X' || $(RM) '$p';) +endif + +all:: + +please_set_SHELL_PATH_to_a_more_modern_shell: + @$$(:) + +shell_compatibility_test: please_set_SHELL_PATH_to_a_more_modern_shell + +strip: $(PROGRAMS) perf$X + $(STRIP) $(STRIP_OPTS) $(PROGRAMS) perf$X + +perf.o: perf.c common-cmds.h PERF-CFLAGS + $(QUIET_CC)$(CC) -DPERF_VERSION='"$(PERF_VERSION)"' \ + '-DPERF_HTML_PATH="$(htmldir_SQ)"' \ + $(ALL_CFLAGS) -c $(filter %.c,$^) + +perf$X: perf.o $(BUILTIN_OBJS) $(PERFLIBS) + $(QUIET_LINK)$(CC) $(ALL_CFLAGS) -o $@ perf.o \ + $(BUILTIN_OBJS) $(ALL_LDFLAGS) $(LIBS) + +builtin-help.o: builtin-help.c common-cmds.h PERF-CFLAGS + $(QUIET_CC)$(CC) -o $*.o -c $(ALL_CFLAGS) \ + '-DPERF_HTML_PATH="$(htmldir_SQ)"' \ + '-DPERF_MAN_PATH="$(mandir_SQ)"' \ + '-DPERF_INFO_PATH="$(infodir_SQ)"' $< + +$(BUILT_INS): perf$X + $(QUIET_BUILT_IN)$(RM) $@ && \ + ln perf$X $@ 2>/dev/null || \ + ln -s perf$X $@ 2>/dev/null || \ + cp perf$X $@ + +common-cmds.h: util/generate-cmdlist.sh command-list.txt + +common-cmds.h: $(wildcard Documentation/perf-*.txt) + $(QUIET_GEN)util/generate-cmdlist.sh > $@+ && mv $@+ $@ + +$(patsubst %.sh,%,$(SCRIPT_SH)) : % : %.sh + $(QUIET_GEN)$(RM) $@ $@+ && \ + sed -e '1s|#!.*/sh|#!$(SHELL_PATH_SQ)|' \ + -e 's|@SHELL_PATH@|$(SHELL_PATH_SQ)|' \ + -e 's|@@PERL@@|$(PERL_PATH_SQ)|g' \ + -e 's/@@PERF_VERSION@@/$(PERF_VERSION)/g' \ + -e 's/@@NO_CURL@@/$(NO_CURL)/g' \ + $@.sh >$@+ && \ + chmod +x $@+ && \ + mv $@+ $@ + +configure: configure.ac + $(QUIET_GEN)$(RM) $@ $<+ && \ + sed -e 's/@@PERF_VERSION@@/$(PERF_VERSION)/g' \ + $< > $<+ && \ + autoconf -o $@ $<+ && \ + $(RM) $<+ + +# These can record PERF_VERSION +perf.o perf.spec \ + $(patsubst %.sh,%,$(SCRIPT_SH)) \ + $(patsubst %.perl,%,$(SCRIPT_PERL)) \ + : PERF-VERSION-FILE + +%.o: %.c PERF-CFLAGS + $(QUIET_CC)$(CC) -o $*.o -c $(ALL_CFLAGS) $< +%.s: %.c PERF-CFLAGS + $(QUIET_CC)$(CC) -S $(ALL_CFLAGS) $< +%.o: %.S + $(QUIET_CC)$(CC) -o $*.o -c $(ALL_CFLAGS) $< + +util/exec_cmd.o: util/exec_cmd.c PERF-CFLAGS + $(QUIET_CC)$(CC) -o $*.o -c $(ALL_CFLAGS) \ + '-DPERF_EXEC_PATH="$(perfexecdir_SQ)"' \ + '-DBINDIR="$(bindir_relative_SQ)"' \ + '-DPREFIX="$(prefix_SQ)"' \ + $< + +builtin-init-db.o: builtin-init-db.c PERF-CFLAGS + $(QUIET_CC)$(CC) -o $*.o -c $(ALL_CFLAGS) -DDEFAULT_PERF_TEMPLATE_DIR='"$(template_dir_SQ)"' $< + +util/config.o: util/config.c PERF-CFLAGS + $(QUIET_CC)$(CC) -o $*.o -c $(ALL_CFLAGS) -DETC_PERFCONFIG='"$(ETC_PERFCONFIG_SQ)"' $< + +perf-%$X: %.o $(PERFLIBS) + $(QUIET_LINK)$(CC) $(ALL_CFLAGS) -o $@ $(ALL_LDFLAGS) $(filter %.o,$^) $(LIBS) + +$(LIB_OBJS) $(BUILTIN_OBJS): $(LIB_H) +$(patsubst perf-%$X,%.o,$(PROGRAMS)): $(LIB_H) $(wildcard */*.h) +builtin-revert.o wt-status.o: wt-status.h + +$(LIB_FILE): $(LIB_OBJS) + $(QUIET_AR)$(RM) $@ && $(AR) rcs $@ $(LIB_OBJS) + +doc: + $(MAKE) -C Documentation all + +man: + $(MAKE) -C Documentation man + +html: + $(MAKE) -C Documentation html + +info: + $(MAKE) -C Documentation info + +pdf: + $(MAKE) -C Documentation pdf + +TAGS: + $(RM) TAGS + $(FIND) . -name '*.[hcS]' -print | xargs etags -a + +tags: + $(RM) tags + $(FIND) . -name '*.[hcS]' -print | xargs ctags -a + +cscope: + $(RM) cscope* + $(FIND) . -name '*.[hcS]' -print | xargs cscope -b + +### Detect prefix changes +TRACK_CFLAGS = $(subst ','\'',$(ALL_CFLAGS)):\ + $(bindir_SQ):$(perfexecdir_SQ):$(template_dir_SQ):$(prefix_SQ) + +PERF-CFLAGS: .FORCE-PERF-CFLAGS + @FLAGS='$(TRACK_CFLAGS)'; \ + if test x"$$FLAGS" != x"`cat PERF-CFLAGS 2>/dev/null`" ; then \ + echo 1>&2 " * new build flags or prefix"; \ + echo "$$FLAGS" >PERF-CFLAGS; \ + fi + +# We need to apply sq twice, once to protect from the shell +# that runs PERF-BUILD-OPTIONS, and then again to protect it +# and the first level quoting from the shell that runs "echo". +PERF-BUILD-OPTIONS: .FORCE-PERF-BUILD-OPTIONS + @echo SHELL_PATH=\''$(subst ','\'',$(SHELL_PATH_SQ))'\' >$@ + @echo TAR=\''$(subst ','\'',$(subst ','\'',$(TAR)))'\' >>$@ + @echo NO_CURL=\''$(subst ','\'',$(subst ','\'',$(NO_CURL)))'\' >>$@ + @echo NO_PERL=\''$(subst ','\'',$(subst ','\'',$(NO_PERL)))'\' >>$@ + +### Testing rules + +# +# None right now: +# +# TEST_PROGRAMS += test-something$X + +all:: $(TEST_PROGRAMS) + +# GNU make supports exporting all variables by "export" without parameters. +# However, the environment gets quite big, and some programs have problems +# with that. + +export NO_SVN_TESTS + +check: common-cmds.h + if sparse; \ + then \ + for i in *.c */*.c; \ + do \ + sparse $(ALL_CFLAGS) $(SPARSE_FLAGS) $$i || exit; \ + done; \ + else \ + echo 2>&1 "Did you mean 'make test'?"; \ + exit 1; \ + fi + +remove-dashes: + ./fixup-builtins $(BUILT_INS) $(PROGRAMS) $(SCRIPTS) + +### Installation rules + +ifneq ($(filter /%,$(firstword $(template_dir))),) +template_instdir = $(template_dir) +else +template_instdir = $(prefix)/$(template_dir) +endif +export template_instdir + +ifneq ($(filter /%,$(firstword $(perfexecdir))),) +perfexec_instdir = $(perfexecdir) +else +perfexec_instdir = $(prefix)/$(perfexecdir) +endif +perfexec_instdir_SQ = $(subst ','\'',$(perfexec_instdir)) +export perfexec_instdir + +install: all + $(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(bindir_SQ)' + $(INSTALL) perf$X '$(DESTDIR_SQ)$(bindir_SQ)' +ifdef BUILT_INS + $(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(perfexec_instdir_SQ)' + $(INSTALL) $(BUILT_INS) '$(DESTDIR_SQ)$(perfexec_instdir_SQ)' +ifneq (,$X) + $(foreach p,$(patsubst %$X,%,$(filter %$X,$(ALL_PROGRAMS) $(BUILT_INS) perf$X)), $(RM) '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/$p';) +endif +endif + +install-doc: + $(MAKE) -C Documentation install + +install-man: + $(MAKE) -C Documentation install-man + +install-html: + $(MAKE) -C Documentation install-html + +install-info: + $(MAKE) -C Documentation install-info + +install-pdf: + $(MAKE) -C Documentation install-pdf + +quick-install-doc: + $(MAKE) -C Documentation quick-install + +quick-install-man: + $(MAKE) -C Documentation quick-install-man + +quick-install-html: + $(MAKE) -C Documentation quick-install-html + + +### Maintainer's dist rules +# +# None right now +# +# +# perf.spec: perf.spec.in +# sed -e 's/@@VERSION@@/$(PERF_VERSION)/g' < $< > $@+ +# mv $@+ $@ +# +# PERF_TARNAME=perf-$(PERF_VERSION) +# dist: perf.spec perf-archive$(X) configure +# ./perf-archive --format=tar \ +# --prefix=$(PERF_TARNAME)/ HEAD^{tree} > $(PERF_TARNAME).tar +# @mkdir -p $(PERF_TARNAME) +# @cp perf.spec configure $(PERF_TARNAME) +# @echo $(PERF_VERSION) > $(PERF_TARNAME)/version +# $(TAR) rf $(PERF_TARNAME).tar \ +# $(PERF_TARNAME)/perf.spec \ +# $(PERF_TARNAME)/configure \ +# $(PERF_TARNAME)/version +# @$(RM) -r $(PERF_TARNAME) +# gzip -f -9 $(PERF_TARNAME).tar +# +# htmldocs = perf-htmldocs-$(PERF_VERSION) +# manpages = perf-manpages-$(PERF_VERSION) +# dist-doc: +# $(RM) -r .doc-tmp-dir +# mkdir .doc-tmp-dir +# $(MAKE) -C Documentation WEBDOC_DEST=../.doc-tmp-dir install-webdoc +# cd .doc-tmp-dir && $(TAR) cf ../$(htmldocs).tar . +# gzip -n -9 -f $(htmldocs).tar +# : +# $(RM) -r .doc-tmp-dir +# mkdir -p .doc-tmp-dir/man1 .doc-tmp-dir/man5 .doc-tmp-dir/man7 +# $(MAKE) -C Documentation DESTDIR=./ \ +# man1dir=../.doc-tmp-dir/man1 \ +# man5dir=../.doc-tmp-dir/man5 \ +# man7dir=../.doc-tmp-dir/man7 \ +# install +# cd .doc-tmp-dir && $(TAR) cf ../$(manpages).tar . +# gzip -n -9 -f $(manpages).tar +# $(RM) -r .doc-tmp-dir +# +# rpm: dist +# $(RPMBUILD) -ta $(PERF_TARNAME).tar.gz + +### Cleaning rules + +distclean: clean +# $(RM) configure + +clean: + $(RM) *.o */*.o $(LIB_FILE) + $(RM) $(ALL_PROGRAMS) $(BUILT_INS) perf$X + $(RM) $(TEST_PROGRAMS) + $(RM) *.spec *.pyc *.pyo */*.pyc */*.pyo common-cmds.h TAGS tags cscope* + $(RM) -r autom4te.cache + $(RM) config.log config.mak.autogen config.mak.append config.status config.cache + $(RM) -r $(PERF_TARNAME) .doc-tmp-dir + $(RM) $(PERF_TARNAME).tar.gz perf-core_$(PERF_VERSION)-*.tar.gz + $(RM) $(htmldocs).tar.gz $(manpages).tar.gz + $(MAKE) -C Documentation/ clean + $(RM) PERF-VERSION-FILE PERF-CFLAGS PERF-BUILD-OPTIONS + +.PHONY: all install clean strip +.PHONY: shell_compatibility_test please_set_SHELL_PATH_to_a_more_modern_shell +.PHONY: .FORCE-PERF-VERSION-FILE TAGS tags cscope .FORCE-PERF-CFLAGS +.PHONY: .FORCE-PERF-BUILD-OPTIONS + +### Make sure built-ins do not have dups and listed in perf.c +# +check-builtins:: + ./check-builtins.sh + +### Test suite coverage testing +# +# None right now +# +# .PHONY: coverage coverage-clean coverage-build coverage-report +# +# coverage: +# $(MAKE) coverage-build +# $(MAKE) coverage-report +# +# coverage-clean: +# rm -f *.gcda *.gcno +# +# COVERAGE_CFLAGS = $(CFLAGS) -O0 -ftest-coverage -fprofile-arcs +# COVERAGE_LDFLAGS = $(CFLAGS) -O0 -lgcov +# +# coverage-build: coverage-clean +# $(MAKE) CFLAGS="$(COVERAGE_CFLAGS)" LDFLAGS="$(COVERAGE_LDFLAGS)" all +# $(MAKE) CFLAGS="$(COVERAGE_CFLAGS)" LDFLAGS="$(COVERAGE_LDFLAGS)" \ +# -j1 test +# +# coverage-report: +# gcov -b *.c */*.c +# grep '^function.*called 0 ' *.c.gcov */*.c.gcov \ +# | sed -e 's/\([^:]*\)\.gcov: *function \([^ ]*\) called.*/\1: \2/' \ +# | tee coverage-untested-functions diff --git a/tools/perf/builtin-annotate.c b/tools/perf/builtin-annotate.c new file mode 100644 index 00000000000..116a3978b44 --- /dev/null +++ b/tools/perf/builtin-annotate.c @@ -0,0 +1,1355 @@ +/* + * builtin-annotate.c + * + * Builtin annotate command: Analyze the perf.data input file, + * look up and read DSOs and symbol information and display + * a histogram of results, along various sorting keys. + */ +#include "builtin.h" + +#include "util/util.h" + +#include "util/color.h" +#include "util/list.h" +#include "util/cache.h" +#include "util/rbtree.h" +#include "util/symbol.h" +#include "util/string.h" + +#include "perf.h" + +#include "util/parse-options.h" +#include "util/parse-events.h" + +#define SHOW_KERNEL 1 +#define SHOW_USER 2 +#define SHOW_HV 4 + +static char const *input_name = "perf.data"; +static char *vmlinux = NULL; + +static char default_sort_order[] = "comm,symbol"; +static char *sort_order = default_sort_order; + +static int input; +static int show_mask = SHOW_KERNEL | SHOW_USER | SHOW_HV; + +static int dump_trace = 0; +#define dprintf(x...) do { if (dump_trace) printf(x); } while (0) + +static int verbose; + +static unsigned long page_size; +static unsigned long mmap_window = 32; + +struct ip_event { + struct perf_event_header header; + __u64 ip; + __u32 pid, tid; +}; + +struct mmap_event { + struct perf_event_header header; + __u32 pid, tid; + __u64 start; + __u64 len; + __u64 pgoff; + char filename[PATH_MAX]; +}; + +struct comm_event { + struct perf_event_header header; + __u32 pid, tid; + char comm[16]; +}; + +struct fork_event { + struct perf_event_header header; + __u32 pid, ppid; +}; + +struct period_event { + struct perf_event_header header; + __u64 time; + __u64 id; + __u64 sample_period; +}; + +typedef union event_union { + struct perf_event_header header; + struct ip_event ip; + struct mmap_event mmap; + struct comm_event comm; + struct fork_event fork; + struct period_event period; +} event_t; + +static LIST_HEAD(dsos); +static struct dso *kernel_dso; +static struct dso *vdso; + + +static void dsos__add(struct dso *dso) +{ + list_add_tail(&dso->node, &dsos); +} + +static struct dso *dsos__find(const char *name) +{ + struct dso *pos; + + list_for_each_entry(pos, &dsos, node) + if (strcmp(pos->name, name) == 0) + return pos; + return NULL; +} + +static struct dso *dsos__findnew(const char *name) +{ + struct dso *dso = dsos__find(name); + int nr; + + if (dso) + return dso; + + dso = dso__new(name, 0); + if (!dso) + goto out_delete_dso; + + nr = dso__load(dso, NULL, verbose); + if (nr < 0) { + if (verbose) + fprintf(stderr, "Failed to open: %s\n", name); + goto out_delete_dso; + } + if (!nr && verbose) { + fprintf(stderr, + "No symbols found in: %s, maybe install a debug package?\n", + name); + } + + dsos__add(dso); + + return dso; + +out_delete_dso: + dso__delete(dso); + return NULL; +} + +static void dsos__fprintf(FILE *fp) +{ + struct dso *pos; + + list_for_each_entry(pos, &dsos, node) + dso__fprintf(pos, fp); +} + +static struct symbol *vdso__find_symbol(struct dso *dso, uint64_t ip) +{ + return dso__find_symbol(kernel_dso, ip); +} + +static int load_kernel(void) +{ + int err; + + kernel_dso = dso__new("[kernel]", 0); + if (!kernel_dso) + return -1; + + err = dso__load_kernel(kernel_dso, vmlinux, NULL, verbose); + if (err) { + dso__delete(kernel_dso); + kernel_dso = NULL; + } else + dsos__add(kernel_dso); + + vdso = dso__new("[vdso]", 0); + if (!vdso) + return -1; + + vdso->find_symbol = vdso__find_symbol; + + dsos__add(vdso); + + return err; +} + +struct map { + struct list_head node; + uint64_t start; + uint64_t end; + uint64_t pgoff; + uint64_t (*map_ip)(struct map *, uint64_t); + struct dso *dso; +}; + +static uint64_t map__map_ip(struct map *map, uint64_t ip) +{ + return ip - map->start + map->pgoff; +} + +static uint64_t vdso__map_ip(struct map *map, uint64_t ip) +{ + return ip; +} + +static struct map *map__new(struct mmap_event *event) +{ + struct map *self = malloc(sizeof(*self)); + + if (self != NULL) { + const char *filename = event->filename; + + self->start = event->start; + self->end = event->start + event->len; + self->pgoff = event->pgoff; + + self->dso = dsos__findnew(filename); + if (self->dso == NULL) + goto out_delete; + + if (self->dso == vdso) + self->map_ip = vdso__map_ip; + else + self->map_ip = map__map_ip; + } + return self; +out_delete: + free(self); + return NULL; +} + +static struct map *map__clone(struct map *self) +{ + struct map *map = malloc(sizeof(*self)); + + if (!map) + return NULL; + + memcpy(map, self, sizeof(*self)); + + return map; +} + +static int map__overlap(struct map *l, struct map *r) +{ + if (l->start > r->start) { + struct map *t = l; + l = r; + r = t; + } + + if (l->end > r->start) + return 1; + + return 0; +} + +static size_t map__fprintf(struct map *self, FILE *fp) +{ + return fprintf(fp, " %"PRIx64"-%"PRIx64" %"PRIx64" %s\n", + self->start, self->end, self->pgoff, self->dso->name); +} + + +struct thread { + struct rb_node rb_node; + struct list_head maps; + pid_t pid; + char *comm; +}; + +static struct thread *thread__new(pid_t pid) +{ + struct thread *self = malloc(sizeof(*self)); + + if (self != NULL) { + self->pid = pid; + self->comm = malloc(32); + if (self->comm) + snprintf(self->comm, 32, ":%d", self->pid); + INIT_LIST_HEAD(&self->maps); + } + + return self; +} + +static int thread__set_comm(struct thread *self, const char *comm) +{ + if (self->comm) + free(self->comm); + self->comm = strdup(comm); + return self->comm ? 0 : -ENOMEM; +} + +static size_t thread__fprintf(struct thread *self, FILE *fp) +{ + struct map *pos; + size_t ret = fprintf(fp, "Thread %d %s\n", self->pid, self->comm); + + list_for_each_entry(pos, &self->maps, node) + ret += map__fprintf(pos, fp); + + return ret; +} + + +static struct rb_root threads; +static struct thread *last_match; + +static struct thread *threads__findnew(pid_t pid) +{ + struct rb_node **p = &threads.rb_node; + struct rb_node *parent = NULL; + struct thread *th; + + /* + * Font-end cache - PID lookups come in blocks, + * so most of the time we dont have to look up + * the full rbtree: + */ + if (last_match && last_match->pid == pid) + return last_match; + + while (*p != NULL) { + parent = *p; + th = rb_entry(parent, struct thread, rb_node); + + if (th->pid == pid) { + last_match = th; + return th; + } + + if (pid < th->pid) + p = &(*p)->rb_left; + else + p = &(*p)->rb_right; + } + + th = thread__new(pid); + if (th != NULL) { + rb_link_node(&th->rb_node, parent, p); + rb_insert_color(&th->rb_node, &threads); + last_match = th; + } + + return th; +} + +static void thread__insert_map(struct thread *self, struct map *map) +{ + struct map *pos, *tmp; + + list_for_each_entry_safe(pos, tmp, &self->maps, node) { + if (map__overlap(pos, map)) { + list_del_init(&pos->node); + /* XXX leaks dsos */ + free(pos); + } + } + + list_add_tail(&map->node, &self->maps); +} + +static int thread__fork(struct thread *self, struct thread *parent) +{ + struct map *map; + + if (self->comm) + free(self->comm); + self->comm = strdup(parent->comm); + if (!self->comm) + return -ENOMEM; + + list_for_each_entry(map, &parent->maps, node) { + struct map *new = map__clone(map); + if (!new) + return -ENOMEM; + thread__insert_map(self, new); + } + + return 0; +} + +static struct map *thread__find_map(struct thread *self, uint64_t ip) +{ + struct map *pos; + + if (self == NULL) + return NULL; + + list_for_each_entry(pos, &self->maps, node) + if (ip >= pos->start && ip <= pos->end) + return pos; + + return NULL; +} + +static size_t threads__fprintf(FILE *fp) +{ + size_t ret = 0; + struct rb_node *nd; + + for (nd = rb_first(&threads); nd; nd = rb_next(nd)) { + struct thread *pos = rb_entry(nd, struct thread, rb_node); + + ret += thread__fprintf(pos, fp); + } + + return ret; +} + +/* + * histogram, sorted on item, collects counts + */ + +static struct rb_root hist; + +struct hist_entry { + struct rb_node rb_node; + + struct thread *thread; + struct map *map; + struct dso *dso; + struct symbol *sym; + uint64_t ip; + char level; + + uint32_t count; +}; + +/* + * configurable sorting bits + */ + +struct sort_entry { + struct list_head list; + + char *header; + + int64_t (*cmp)(struct hist_entry *, struct hist_entry *); + int64_t (*collapse)(struct hist_entry *, struct hist_entry *); + size_t (*print)(FILE *fp, struct hist_entry *); +}; + +/* --sort pid */ + +static int64_t +sort__thread_cmp(struct hist_entry *left, struct hist_entry *right) +{ + return right->thread->pid - left->thread->pid; +} + +static size_t +sort__thread_print(FILE *fp, struct hist_entry *self) +{ + return fprintf(fp, "%16s:%5d", self->thread->comm ?: "", self->thread->pid); +} + +static struct sort_entry sort_thread = { + .header = " Command: Pid", + .cmp = sort__thread_cmp, + .print = sort__thread_print, +}; + +/* --sort comm */ + +static int64_t +sort__comm_cmp(struct hist_entry *left, struct hist_entry *right) +{ + return right->thread->pid - left->thread->pid; +} + +static int64_t +sort__comm_collapse(struct hist_entry *left, struct hist_entry *right) +{ + char *comm_l = left->thread->comm; + char *comm_r = right->thread->comm; + + if (!comm_l || !comm_r) { + if (!comm_l && !comm_r) + return 0; + else if (!comm_l) + return -1; + else + return 1; + } + + return strcmp(comm_l, comm_r); +} + +static size_t +sort__comm_print(FILE *fp, struct hist_entry *self) +{ + return fprintf(fp, "%16s", self->thread->comm); +} + +static struct sort_entry sort_comm = { + .header = " Command", + .cmp = sort__comm_cmp, + .collapse = sort__comm_collapse, + .print = sort__comm_print, +}; + +/* --sort dso */ + +static int64_t +sort__dso_cmp(struct hist_entry *left, struct hist_entry *right) +{ + struct dso *dso_l = left->dso; + struct dso *dso_r = right->dso; + + if (!dso_l || !dso_r) { + if (!dso_l && !dso_r) + return 0; + else if (!dso_l) + return -1; + else + return 1; + } + + return strcmp(dso_l->name, dso_r->name); +} + +static size_t +sort__dso_print(FILE *fp, struct hist_entry *self) +{ + if (self->dso) + return fprintf(fp, "%-25s", self->dso->name); + + return fprintf(fp, "%016llx ", (__u64)self->ip); +} + +static struct sort_entry sort_dso = { + .header = "Shared Object ", + .cmp = sort__dso_cmp, + .print = sort__dso_print, +}; + +/* --sort symbol */ + +static int64_t +sort__sym_cmp(struct hist_entry *left, struct hist_entry *right) +{ + uint64_t ip_l, ip_r; + + if (left->sym == right->sym) + return 0; + + ip_l = left->sym ? left->sym->start : left->ip; + ip_r = right->sym ? right->sym->start : right->ip; + + return (int64_t)(ip_r - ip_l); +} + +static size_t +sort__sym_print(FILE *fp, struct hist_entry *self) +{ + size_t ret = 0; + + if (verbose) + ret += fprintf(fp, "%#018llx ", (__u64)self->ip); + + if (self->sym) { + ret += fprintf(fp, "[%c] %s", + self->dso == kernel_dso ? 'k' : '.', self->sym->name); + } else { + ret += fprintf(fp, "%#016llx", (__u64)self->ip); + } + + return ret; +} + +static struct sort_entry sort_sym = { + .header = "Symbol", + .cmp = sort__sym_cmp, + .print = sort__sym_print, +}; + +static int sort__need_collapse = 0; + +struct sort_dimension { + char *name; + struct sort_entry *entry; + int taken; +}; + +static struct sort_dimension sort_dimensions[] = { + { .name = "pid", .entry = &sort_thread, }, + { .name = "comm", .entry = &sort_comm, }, + { .name = "dso", .entry = &sort_dso, }, + { .name = "symbol", .entry = &sort_sym, }, +}; + +static LIST_HEAD(hist_entry__sort_list); + +static int sort_dimension__add(char *tok) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(sort_dimensions); i++) { + struct sort_dimension *sd = &sort_dimensions[i]; + + if (sd->taken) + continue; + + if (strncasecmp(tok, sd->name, strlen(tok))) + continue; + + if (sd->entry->collapse) + sort__need_collapse = 1; + + list_add_tail(&sd->entry->list, &hist_entry__sort_list); + sd->taken = 1; + + return 0; + } + + return -ESRCH; +} + +static int64_t +hist_entry__cmp(struct hist_entry *left, struct hist_entry *right) +{ + struct sort_entry *se; + int64_t cmp = 0; + + list_for_each_entry(se, &hist_entry__sort_list, list) { + cmp = se->cmp(left, right); + if (cmp) + break; + } + + return cmp; +} + +static int64_t +hist_entry__collapse(struct hist_entry *left, struct hist_entry *right) +{ + struct sort_entry *se; + int64_t cmp = 0; + + list_for_each_entry(se, &hist_entry__sort_list, list) { + int64_t (*f)(struct hist_entry *, struct hist_entry *); + + f = se->collapse ?: se->cmp; + + cmp = f(left, right); + if (cmp) + break; + } + + return cmp; +} + +/* + * collect histogram counts + */ +static void hist_hit(struct hist_entry *he, uint64_t ip) +{ + unsigned int sym_size, offset; + struct symbol *sym = he->sym; + + he->count++; + + if (!sym || !sym->hist) + return; + + sym_size = sym->end - sym->start; + offset = ip - sym->start; + + if (offset >= sym_size) + return; + + sym->hist_sum++; + sym->hist[offset]++; + + if (verbose >= 3) + printf("%p %s: count++ [ip: %p, %08Lx] => %Ld\n", + (void *)he->sym->start, + he->sym->name, + (void *)ip, ip - he->sym->start, + sym->hist[offset]); +} + +static int +hist_entry__add(struct thread *thread, struct map *map, struct dso *dso, + struct symbol *sym, uint64_t ip, char level) +{ + struct rb_node **p = &hist.rb_node; + struct rb_node *parent = NULL; + struct hist_entry *he; + struct hist_entry entry = { + .thread = thread, + .map = map, + .dso = dso, + .sym = sym, + .ip = ip, + .level = level, + .count = 1, + }; + int cmp; + + while (*p != NULL) { + parent = *p; + he = rb_entry(parent, struct hist_entry, rb_node); + + cmp = hist_entry__cmp(&entry, he); + + if (!cmp) { + hist_hit(he, ip); + + return 0; + } + + if (cmp < 0) + p = &(*p)->rb_left; + else + p = &(*p)->rb_right; + } + + he = malloc(sizeof(*he)); + if (!he) + return -ENOMEM; + *he = entry; + rb_link_node(&he->rb_node, parent, p); + rb_insert_color(&he->rb_node, &hist); + + return 0; +} + +static void hist_entry__free(struct hist_entry *he) +{ + free(he); +} + +/* + * collapse the histogram + */ + +static struct rb_root collapse_hists; + +static void collapse__insert_entry(struct hist_entry *he) +{ + struct rb_node **p = &collapse_hists.rb_node; + struct rb_node *parent = NULL; + struct hist_entry *iter; + int64_t cmp; + + while (*p != NULL) { + parent = *p; + iter = rb_entry(parent, struct hist_entry, rb_node); + + cmp = hist_entry__collapse(iter, he); + + if (!cmp) { + iter->count += he->count; + hist_entry__free(he); + return; + } + + if (cmp < 0) + p = &(*p)->rb_left; + else + p = &(*p)->rb_right; + } + + rb_link_node(&he->rb_node, parent, p); + rb_insert_color(&he->rb_node, &collapse_hists); +} + +static void collapse__resort(void) +{ + struct rb_node *next; + struct hist_entry *n; + + if (!sort__need_collapse) + return; + + next = rb_first(&hist); + while (next) { + n = rb_entry(next, struct hist_entry, rb_node); + next = rb_next(&n->rb_node); + + rb_erase(&n->rb_node, &hist); + collapse__insert_entry(n); + } +} + +/* + * reverse the map, sort on count. + */ + +static struct rb_root output_hists; + +static void output__insert_entry(struct hist_entry *he) +{ + struct rb_node **p = &output_hists.rb_node; + struct rb_node *parent = NULL; + struct hist_entry *iter; + + while (*p != NULL) { + parent = *p; + iter = rb_entry(parent, struct hist_entry, rb_node); + + if (he->count > iter->count) + p = &(*p)->rb_left; + else + p = &(*p)->rb_right; + } + + rb_link_node(&he->rb_node, parent, p); + rb_insert_color(&he->rb_node, &output_hists); +} + +static void output__resort(void) +{ + struct rb_node *next; + struct hist_entry *n; + struct rb_root *tree = &hist; + + if (sort__need_collapse) + tree = &collapse_hists; + + next = rb_first(tree); + + while (next) { + n = rb_entry(next, struct hist_entry, rb_node); + next = rb_next(&n->rb_node); + + rb_erase(&n->rb_node, tree); + output__insert_entry(n); + } +} + +static void register_idle_thread(void) +{ + struct thread *thread = threads__findnew(0); + + if (thread == NULL || + thread__set_comm(thread, "[idle]")) { + fprintf(stderr, "problem inserting idle task.\n"); + exit(-1); + } +} + +static unsigned long total = 0, + total_mmap = 0, + total_comm = 0, + total_fork = 0, + total_unknown = 0; + +static int +process_overflow_event(event_t *event, unsigned long offset, unsigned long head) +{ + char level; + int show = 0; + struct dso *dso = NULL; + struct thread *thread = threads__findnew(event->ip.pid); + uint64_t ip = event->ip.ip; + struct map *map = NULL; + + dprintf("%p [%p]: PERF_EVENT (IP, %d): %d: %p\n", + (void *)(offset + head), + (void *)(long)(event->header.size), + event->header.misc, + event->ip.pid, + (void *)(long)ip); + + dprintf(" ... thread: %s:%d\n", thread->comm, thread->pid); + + if (thread == NULL) { + fprintf(stderr, "problem processing %d event, skipping it.\n", + event->header.type); + return -1; + } + + if (event->header.misc & PERF_EVENT_MISC_KERNEL) { + show = SHOW_KERNEL; + level = 'k'; + + dso = kernel_dso; + + dprintf(" ...... dso: %s\n", dso->name); + + } else if (event->header.misc & PERF_EVENT_MISC_USER) { + + show = SHOW_USER; + level = '.'; + + map = thread__find_map(thread, ip); + if (map != NULL) { + ip = map->map_ip(map, ip); + dso = map->dso; + } else { + /* + * If this is outside of all known maps, + * and is a negative address, try to look it + * up in the kernel dso, as it might be a + * vsyscall (which executes in user-mode): + */ + if ((long long)ip < 0) + dso = kernel_dso; + } + dprintf(" ...... dso: %s\n", dso ? dso->name : ""); + + } else { + show = SHOW_HV; + level = 'H'; + dprintf(" ...... dso: [hypervisor]\n"); + } + + if (show & show_mask) { + struct symbol *sym = NULL; + + if (dso) + sym = dso->find_symbol(dso, ip); + + if (hist_entry__add(thread, map, dso, sym, ip, level)) { + fprintf(stderr, + "problem incrementing symbol count, skipping event\n"); + return -1; + } + } + total++; + + return 0; +} + +static int +process_mmap_event(event_t *event, unsigned long offset, unsigned long head) +{ + struct thread *thread = threads__findnew(event->mmap.pid); + struct map *map = map__new(&event->mmap); + + dprintf("%p [%p]: PERF_EVENT_MMAP %d: [%p(%p) @ %p]: %s\n", + (void *)(offset + head), + (void *)(long)(event->header.size), + event->mmap.pid, + (void *)(long)event->mmap.start, + (void *)(long)event->mmap.len, + (void *)(long)event->mmap.pgoff, + event->mmap.filename); + + if (thread == NULL || map == NULL) { + dprintf("problem processing PERF_EVENT_MMAP, skipping event.\n"); + return 0; + } + + thread__insert_map(thread, map); + total_mmap++; + + return 0; +} + +static int +process_comm_event(event_t *event, unsigned long offset, unsigned long head) +{ + struct thread *thread = threads__findnew(event->comm.pid); + + dprintf("%p [%p]: PERF_EVENT_COMM: %s:%d\n", + (void *)(offset + head), + (void *)(long)(event->header.size), + event->comm.comm, event->comm.pid); + + if (thread == NULL || + thread__set_comm(thread, event->comm.comm)) { + dprintf("problem processing PERF_EVENT_COMM, skipping event.\n"); + return -1; + } + total_comm++; + + return 0; +} + +static int +process_fork_event(event_t *event, unsigned long offset, unsigned long head) +{ + struct thread *thread = threads__findnew(event->fork.pid); + struct thread *parent = threads__findnew(event->fork.ppid); + + dprintf("%p [%p]: PERF_EVENT_FORK: %d:%d\n", + (void *)(offset + head), + (void *)(long)(event->header.size), + event->fork.pid, event->fork.ppid); + + if (!thread || !parent || thread__fork(thread, parent)) { + dprintf("problem processing PERF_EVENT_FORK, skipping event.\n"); + return -1; + } + total_fork++; + + return 0; +} + +static int +process_period_event(event_t *event, unsigned long offset, unsigned long head) +{ + dprintf("%p [%p]: PERF_EVENT_PERIOD: time:%Ld, id:%Ld: period:%Ld\n", + (void *)(offset + head), + (void *)(long)(event->header.size), + event->period.time, + event->period.id, + event->period.sample_period); + + return 0; +} + +static int +process_event(event_t *event, unsigned long offset, unsigned long head) +{ + if (event->header.misc & PERF_EVENT_MISC_OVERFLOW) + return process_overflow_event(event, offset, head); + + switch (event->header.type) { + case PERF_EVENT_MMAP: + return process_mmap_event(event, offset, head); + + case PERF_EVENT_COMM: + return process_comm_event(event, offset, head); + + case PERF_EVENT_FORK: + return process_fork_event(event, offset, head); + + case PERF_EVENT_PERIOD: + return process_period_event(event, offset, head); + /* + * We dont process them right now but they are fine: + */ + + case PERF_EVENT_THROTTLE: + case PERF_EVENT_UNTHROTTLE: + return 0; + + default: + return -1; + } + + return 0; +} + +static int +parse_line(FILE *file, struct symbol *sym, uint64_t start, uint64_t len) +{ + char *line = NULL, *tmp, *tmp2; + unsigned int offset; + size_t line_len; + __u64 line_ip; + int ret; + char *c; + + if (getline(&line, &line_len, file) < 0) + return -1; + if (!line) + return -1; + + c = strchr(line, '\n'); + if (c) + *c = 0; + + line_ip = -1; + offset = 0; + ret = -2; + + /* + * Strip leading spaces: + */ + tmp = line; + while (*tmp) { + if (*tmp != ' ') + break; + tmp++; + } + + if (*tmp) { + /* + * Parse hexa addresses followed by ':' + */ + line_ip = strtoull(tmp, &tmp2, 16); + if (*tmp2 != ':') + line_ip = -1; + } + + if (line_ip != -1) { + unsigned int hits = 0; + double percent = 0.0; + char *color = PERF_COLOR_NORMAL; + + offset = line_ip - start; + if (offset < len) + hits = sym->hist[offset]; + + if (sym->hist_sum) + percent = 100.0 * hits / sym->hist_sum; + + /* + * We color high-overhead entries in red, low-overhead + * entries in green - and keep the middle ground normal: + */ + if (percent >= 5.0) + color = PERF_COLOR_RED; + else { + if (percent > 0.5) + color = PERF_COLOR_GREEN; + } + + color_fprintf(stdout, color, " %7.2f", percent); + printf(" : "); + color_fprintf(stdout, PERF_COLOR_BLUE, "%s\n", line); + } else { + if (!*line) + printf(" :\n"); + else + printf(" : %s\n", line); + } + + return 0; +} + +static void annotate_sym(struct dso *dso, struct symbol *sym) +{ + char *filename = dso->name; + uint64_t start, end, len; + char command[PATH_MAX*2]; + FILE *file; + + if (!filename) + return; + if (dso == kernel_dso) + filename = vmlinux; + + printf("\n------------------------------------------------\n"); + printf(" Percent | Source code & Disassembly of %s\n", filename); + printf("------------------------------------------------\n"); + + if (verbose >= 2) + printf("annotating [%p] %30s : [%p] %30s\n", dso, dso->name, sym, sym->name); + + start = sym->obj_start; + if (!start) + start = sym->start; + + end = start + sym->end - sym->start + 1; + len = sym->end - sym->start; + + sprintf(command, "objdump --start-address=0x%016Lx --stop-address=0x%016Lx -dS %s", (__u64)start, (__u64)end, filename); + + if (verbose >= 3) + printf("doing: %s\n", command); + + file = popen(command, "r"); + if (!file) + return; + + while (!feof(file)) { + if (parse_line(file, sym, start, len) < 0) + break; + } + + pclose(file); +} + +static void find_annotations(void) +{ + struct rb_node *nd; + struct dso *dso; + int count = 0; + + list_for_each_entry(dso, &dsos, node) { + + for (nd = rb_first(&dso->syms); nd; nd = rb_next(nd)) { + struct symbol *sym = rb_entry(nd, struct symbol, rb_node); + + if (sym->hist) { + annotate_sym(dso, sym); + count++; + } + } + } + + if (!count) + printf(" Error: symbol '%s' not present amongst the samples.\n", sym_hist_filter); +} + +static int __cmd_annotate(void) +{ + int ret, rc = EXIT_FAILURE; + unsigned long offset = 0; + unsigned long head = 0; + struct stat stat; + event_t *event; + uint32_t size; + char *buf; + + register_idle_thread(); + + input = open(input_name, O_RDONLY); + if (input < 0) { + perror("failed to open file"); + exit(-1); + } + + ret = fstat(input, &stat); + if (ret < 0) { + perror("failed to stat file"); + exit(-1); + } + + if (!stat.st_size) { + fprintf(stderr, "zero-sized file, nothing to do!\n"); + exit(0); + } + + if (load_kernel() < 0) { + perror("failed to load kernel symbols"); + return EXIT_FAILURE; + } + +remap: + buf = (char *)mmap(NULL, page_size * mmap_window, PROT_READ, + MAP_SHARED, input, offset); + if (buf == MAP_FAILED) { + perror("failed to mmap file"); + exit(-1); + } + +more: + event = (event_t *)(buf + head); + + size = event->header.size; + if (!size) + size = 8; + + if (head + event->header.size >= page_size * mmap_window) { + unsigned long shift = page_size * (head / page_size); + int ret; + + ret = munmap(buf, page_size * mmap_window); + assert(ret == 0); + + offset += shift; + head -= shift; + goto remap; + } + + size = event->header.size; + + dprintf("%p [%p]: event: %d\n", + (void *)(offset + head), + (void *)(long)event->header.size, + event->header.type); + + if (!size || process_event(event, offset, head) < 0) { + + dprintf("%p [%p]: skipping unknown header type: %d\n", + (void *)(offset + head), + (void *)(long)(event->header.size), + event->header.type); + + total_unknown++; + + /* + * assume we lost track of the stream, check alignment, and + * increment a single u64 in the hope to catch on again 'soon'. + */ + + if (unlikely(head & 7)) + head &= ~7ULL; + + size = 8; + } + + head += size; + + if (offset + head < stat.st_size) + goto more; + + rc = EXIT_SUCCESS; + close(input); + + dprintf(" IP events: %10ld\n", total); + dprintf(" mmap events: %10ld\n", total_mmap); + dprintf(" comm events: %10ld\n", total_comm); + dprintf(" fork events: %10ld\n", total_fork); + dprintf(" unknown events: %10ld\n", total_unknown); + + if (dump_trace) + return 0; + + if (verbose >= 3) + threads__fprintf(stdout); + + if (verbose >= 2) + dsos__fprintf(stdout); + + collapse__resort(); + output__resort(); + + find_annotations(); + + return rc; +} + +static const char * const annotate_usage[] = { + "perf annotate [] ", + NULL +}; + +static const struct option options[] = { + OPT_STRING('i', "input", &input_name, "file", + "input file name"), + OPT_STRING('s', "symbol", &sym_hist_filter, "file", + "symbol to annotate"), + OPT_BOOLEAN('v', "verbose", &verbose, + "be more verbose (show symbol address, etc)"), + OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace, + "dump raw trace in ASCII"), + OPT_STRING('k', "vmlinux", &vmlinux, "file", "vmlinux pathname"), + OPT_END() +}; + +static void setup_sorting(void) +{ + char *tmp, *tok, *str = strdup(sort_order); + + for (tok = strtok_r(str, ", ", &tmp); + tok; tok = strtok_r(NULL, ", ", &tmp)) { + if (sort_dimension__add(tok) < 0) { + error("Unknown --sort key: `%s'", tok); + usage_with_options(annotate_usage, options); + } + } + + free(str); +} + +int cmd_annotate(int argc, const char **argv, const char *prefix) +{ + symbol__init(); + + page_size = getpagesize(); + + argc = parse_options(argc, argv, options, annotate_usage, 0); + + setup_sorting(); + + if (argc) { + /* + * Special case: if there's an argument left then assume tha + * it's a symbol filter: + */ + if (argc > 1) + usage_with_options(annotate_usage, options); + + sym_hist_filter = argv[0]; + } + + if (!sym_hist_filter) + usage_with_options(annotate_usage, options); + + setup_pager(); + + return __cmd_annotate(); +} diff --git a/tools/perf/builtin-help.c b/tools/perf/builtin-help.c new file mode 100644 index 00000000000..0f32dc3f3c4 --- /dev/null +++ b/tools/perf/builtin-help.c @@ -0,0 +1,461 @@ +/* + * builtin-help.c + * + * Builtin help command + */ +#include "util/cache.h" +#include "builtin.h" +#include "util/exec_cmd.h" +#include "common-cmds.h" +#include "util/parse-options.h" +#include "util/run-command.h" +#include "util/help.h" + +static struct man_viewer_list { + struct man_viewer_list *next; + char name[FLEX_ARRAY]; +} *man_viewer_list; + +static struct man_viewer_info_list { + struct man_viewer_info_list *next; + const char *info; + char name[FLEX_ARRAY]; +} *man_viewer_info_list; + +enum help_format { + HELP_FORMAT_MAN, + HELP_FORMAT_INFO, + HELP_FORMAT_WEB, +}; + +static int show_all = 0; +static enum help_format help_format = HELP_FORMAT_MAN; +static struct option builtin_help_options[] = { + OPT_BOOLEAN('a', "all", &show_all, "print all available commands"), + OPT_SET_INT('m', "man", &help_format, "show man page", HELP_FORMAT_MAN), + OPT_SET_INT('w', "web", &help_format, "show manual in web browser", + HELP_FORMAT_WEB), + OPT_SET_INT('i', "info", &help_format, "show info page", + HELP_FORMAT_INFO), + OPT_END(), +}; + +static const char * const builtin_help_usage[] = { + "perf help [--all] [--man|--web|--info] [command]", + NULL +}; + +static enum help_format parse_help_format(const char *format) +{ + if (!strcmp(format, "man")) + return HELP_FORMAT_MAN; + if (!strcmp(format, "info")) + return HELP_FORMAT_INFO; + if (!strcmp(format, "web") || !strcmp(format, "html")) + return HELP_FORMAT_WEB; + die("unrecognized help format '%s'", format); +} + +static const char *get_man_viewer_info(const char *name) +{ + struct man_viewer_info_list *viewer; + + for (viewer = man_viewer_info_list; viewer; viewer = viewer->next) + { + if (!strcasecmp(name, viewer->name)) + return viewer->info; + } + return NULL; +} + +static int check_emacsclient_version(void) +{ + struct strbuf buffer = STRBUF_INIT; + struct child_process ec_process; + const char *argv_ec[] = { "emacsclient", "--version", NULL }; + int version; + + /* emacsclient prints its version number on stderr */ + memset(&ec_process, 0, sizeof(ec_process)); + ec_process.argv = argv_ec; + ec_process.err = -1; + ec_process.stdout_to_stderr = 1; + if (start_command(&ec_process)) { + fprintf(stderr, "Failed to start emacsclient.\n"); + return -1; + } + strbuf_read(&buffer, ec_process.err, 20); + close(ec_process.err); + + /* + * Don't bother checking return value, because "emacsclient --version" + * seems to always exits with code 1. + */ + finish_command(&ec_process); + + if (prefixcmp(buffer.buf, "emacsclient")) { + fprintf(stderr, "Failed to parse emacsclient version.\n"); + strbuf_release(&buffer); + return -1; + } + + strbuf_remove(&buffer, 0, strlen("emacsclient")); + version = atoi(buffer.buf); + + if (version < 22) { + fprintf(stderr, + "emacsclient version '%d' too old (< 22).\n", + version); + strbuf_release(&buffer); + return -1; + } + + strbuf_release(&buffer); + return 0; +} + +static void exec_woman_emacs(const char* path, const char *page) +{ + if (!check_emacsclient_version()) { + /* This works only with emacsclient version >= 22. */ + struct strbuf man_page = STRBUF_INIT; + + if (!path) + path = "emacsclient"; + strbuf_addf(&man_page, "(woman \"%s\")", page); + execlp(path, "emacsclient", "-e", man_page.buf, NULL); + warning("failed to exec '%s': %s", path, strerror(errno)); + } +} + +static void exec_man_konqueror(const char* path, const char *page) +{ + const char *display = getenv("DISPLAY"); + if (display && *display) { + struct strbuf man_page = STRBUF_INIT; + const char *filename = "kfmclient"; + + /* It's simpler to launch konqueror using kfmclient. */ + if (path) { + const char *file = strrchr(path, '/'); + if (file && !strcmp(file + 1, "konqueror")) { + char *new = strdup(path); + char *dest = strrchr(new, '/'); + + /* strlen("konqueror") == strlen("kfmclient") */ + strcpy(dest + 1, "kfmclient"); + path = new; + } + if (file) + filename = file; + } else + path = "kfmclient"; + strbuf_addf(&man_page, "man:%s(1)", page); + execlp(path, filename, "newTab", man_page.buf, NULL); + warning("failed to exec '%s': %s", path, strerror(errno)); + } +} + +static void exec_man_man(const char* path, const char *page) +{ + if (!path) + path = "man"; + execlp(path, "man", page, NULL); + warning("failed to exec '%s': %s", path, strerror(errno)); +} + +static void exec_man_cmd(const char *cmd, const char *page) +{ + struct strbuf shell_cmd = STRBUF_INIT; + strbuf_addf(&shell_cmd, "%s %s", cmd, page); + execl("/bin/sh", "sh", "-c", shell_cmd.buf, NULL); + warning("failed to exec '%s': %s", cmd, strerror(errno)); +} + +static void add_man_viewer(const char *name) +{ + struct man_viewer_list **p = &man_viewer_list; + size_t len = strlen(name); + + while (*p) + p = &((*p)->next); + *p = calloc(1, (sizeof(**p) + len + 1)); + strncpy((*p)->name, name, len); +} + +static int supported_man_viewer(const char *name, size_t len) +{ + return (!strncasecmp("man", name, len) || + !strncasecmp("woman", name, len) || + !strncasecmp("konqueror", name, len)); +} + +static void do_add_man_viewer_info(const char *name, + size_t len, + const char *value) +{ + struct man_viewer_info_list *new = calloc(1, sizeof(*new) + len + 1); + + strncpy(new->name, name, len); + new->info = strdup(value); + new->next = man_viewer_info_list; + man_viewer_info_list = new; +} + +static int add_man_viewer_path(const char *name, + size_t len, + const char *value) +{ + if (supported_man_viewer(name, len)) + do_add_man_viewer_info(name, len, value); + else + warning("'%s': path for unsupported man viewer.\n" + "Please consider using 'man..cmd' instead.", + name); + + return 0; +} + +static int add_man_viewer_cmd(const char *name, + size_t len, + const char *value) +{ + if (supported_man_viewer(name, len)) + warning("'%s': cmd for supported man viewer.\n" + "Please consider using 'man..path' instead.", + name); + else + do_add_man_viewer_info(name, len, value); + + return 0; +} + +static int add_man_viewer_info(const char *var, const char *value) +{ + const char *name = var + 4; + const char *subkey = strrchr(name, '.'); + + if (!subkey) + return error("Config with no key for man viewer: %s", name); + + if (!strcmp(subkey, ".path")) { + if (!value) + return config_error_nonbool(var); + return add_man_viewer_path(name, subkey - name, value); + } + if (!strcmp(subkey, ".cmd")) { + if (!value) + return config_error_nonbool(var); + return add_man_viewer_cmd(name, subkey - name, value); + } + + warning("'%s': unsupported man viewer sub key.", subkey); + return 0; +} + +static int perf_help_config(const char *var, const char *value, void *cb) +{ + if (!strcmp(var, "help.format")) { + if (!value) + return config_error_nonbool(var); + help_format = parse_help_format(value); + return 0; + } + if (!strcmp(var, "man.viewer")) { + if (!value) + return config_error_nonbool(var); + add_man_viewer(value); + return 0; + } + if (!prefixcmp(var, "man.")) + return add_man_viewer_info(var, value); + + return perf_default_config(var, value, cb); +} + +static struct cmdnames main_cmds, other_cmds; + +void list_common_cmds_help(void) +{ + int i, longest = 0; + + for (i = 0; i < ARRAY_SIZE(common_cmds); i++) { + if (longest < strlen(common_cmds[i].name)) + longest = strlen(common_cmds[i].name); + } + + puts(" The most commonly used perf commands are:"); + for (i = 0; i < ARRAY_SIZE(common_cmds); i++) { + printf(" %s ", common_cmds[i].name); + mput_char(' ', longest - strlen(common_cmds[i].name)); + puts(common_cmds[i].help); + } +} + +static int is_perf_command(const char *s) +{ + return is_in_cmdlist(&main_cmds, s) || + is_in_cmdlist(&other_cmds, s); +} + +static const char *prepend(const char *prefix, const char *cmd) +{ + size_t pre_len = strlen(prefix); + size_t cmd_len = strlen(cmd); + char *p = malloc(pre_len + cmd_len + 1); + memcpy(p, prefix, pre_len); + strcpy(p + pre_len, cmd); + return p; +} + +static const char *cmd_to_page(const char *perf_cmd) +{ + if (!perf_cmd) + return "perf"; + else if (!prefixcmp(perf_cmd, "perf")) + return perf_cmd; + else if (is_perf_command(perf_cmd)) + return prepend("perf-", perf_cmd); + else + return prepend("perf-", perf_cmd); +} + +static void setup_man_path(void) +{ + struct strbuf new_path = STRBUF_INIT; + const char *old_path = getenv("MANPATH"); + + /* We should always put ':' after our path. If there is no + * old_path, the ':' at the end will let 'man' to try + * system-wide paths after ours to find the manual page. If + * there is old_path, we need ':' as delimiter. */ + strbuf_addstr(&new_path, system_path(PERF_MAN_PATH)); + strbuf_addch(&new_path, ':'); + if (old_path) + strbuf_addstr(&new_path, old_path); + + setenv("MANPATH", new_path.buf, 1); + + strbuf_release(&new_path); +} + +static void exec_viewer(const char *name, const char *page) +{ + const char *info = get_man_viewer_info(name); + + if (!strcasecmp(name, "man")) + exec_man_man(info, page); + else if (!strcasecmp(name, "woman")) + exec_woman_emacs(info, page); + else if (!strcasecmp(name, "konqueror")) + exec_man_konqueror(info, page); + else if (info) + exec_man_cmd(info, page); + else + warning("'%s': unknown man viewer.", name); +} + +static void show_man_page(const char *perf_cmd) +{ + struct man_viewer_list *viewer; + const char *page = cmd_to_page(perf_cmd); + const char *fallback = getenv("PERF_MAN_VIEWER"); + + setup_man_path(); + for (viewer = man_viewer_list; viewer; viewer = viewer->next) + { + exec_viewer(viewer->name, page); /* will return when unable */ + } + if (fallback) + exec_viewer(fallback, page); + exec_viewer("man", page); + die("no man viewer handled the request"); +} + +static void show_info_page(const char *perf_cmd) +{ + const char *page = cmd_to_page(perf_cmd); + setenv("INFOPATH", system_path(PERF_INFO_PATH), 1); + execlp("info", "info", "perfman", page, NULL); +} + +static void get_html_page_path(struct strbuf *page_path, const char *page) +{ + struct stat st; + const char *html_path = system_path(PERF_HTML_PATH); + + /* Check that we have a perf documentation directory. */ + if (stat(mkpath("%s/perf.html", html_path), &st) + || !S_ISREG(st.st_mode)) + die("'%s': not a documentation directory.", html_path); + + strbuf_init(page_path, 0); + strbuf_addf(page_path, "%s/%s.html", html_path, page); +} + +/* + * If open_html is not defined in a platform-specific way (see for + * example compat/mingw.h), we use the script web--browse to display + * HTML. + */ +#ifndef open_html +static void open_html(const char *path) +{ + execl_perf_cmd("web--browse", "-c", "help.browser", path, NULL); +} +#endif + +static void show_html_page(const char *perf_cmd) +{ + const char *page = cmd_to_page(perf_cmd); + struct strbuf page_path; /* it leaks but we exec bellow */ + + get_html_page_path(&page_path, page); + + open_html(page_path.buf); +} + +int cmd_help(int argc, const char **argv, const char *prefix) +{ + const char *alias; + load_command_list("perf-", &main_cmds, &other_cmds); + + perf_config(perf_help_config, NULL); + + argc = parse_options(argc, argv, builtin_help_options, + builtin_help_usage, 0); + + if (show_all) { + printf("\n usage: %s\n\n", perf_usage_string); + list_commands("perf commands", &main_cmds, &other_cmds); + printf(" %s\n\n", perf_more_info_string); + return 0; + } + + if (!argv[0]) { + printf("\n usage: %s\n\n", perf_usage_string); + list_common_cmds_help(); + printf("\n %s\n\n", perf_more_info_string); + return 0; + } + + alias = alias_lookup(argv[0]); + if (alias && !is_perf_command(argv[0])) { + printf("`perf %s' is aliased to `%s'\n", argv[0], alias); + return 0; + } + + switch (help_format) { + case HELP_FORMAT_MAN: + show_man_page(argv[0]); + break; + case HELP_FORMAT_INFO: + show_info_page(argv[0]); + break; + case HELP_FORMAT_WEB: + show_html_page(argv[0]); + break; + } + + return 0; +} diff --git a/tools/perf/builtin-list.c b/tools/perf/builtin-list.c new file mode 100644 index 00000000000..fe60e37c96e --- /dev/null +++ b/tools/perf/builtin-list.c @@ -0,0 +1,20 @@ +/* + * builtin-list.c + * + * Builtin list command: list all event types + * + * Copyright (C) 2009, Thomas Gleixner + * Copyright (C) 2008-2009, Red Hat Inc, Ingo Molnar + */ +#include "builtin.h" + +#include "perf.h" + +#include "util/parse-options.h" +#include "util/parse-events.h" + +int cmd_list(int argc, const char **argv, const char *prefix) +{ + print_events(); + return 0; +} diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c new file mode 100644 index 00000000000..aeab9c4b15e --- /dev/null +++ b/tools/perf/builtin-record.c @@ -0,0 +1,544 @@ +/* + * builtin-record.c + * + * Builtin record command: Record the profile of a workload + * (or a CPU, or a PID) into the perf.data output file - for + * later analysis via perf report. + */ +#include "builtin.h" + +#include "perf.h" + +#include "util/util.h" +#include "util/parse-options.h" +#include "util/parse-events.h" +#include "util/string.h" + +#include +#include + +#define ALIGN(x, a) __ALIGN_MASK(x, (typeof(x))(a)-1) +#define __ALIGN_MASK(x, mask) (((x)+(mask))&~(mask)) + +static int fd[MAX_NR_CPUS][MAX_COUNTERS]; + +static long default_interval = 100000; + +static int nr_cpus = 0; +static unsigned int page_size; +static unsigned int mmap_pages = 128; +static int freq = 0; +static int output; +static const char *output_name = "perf.data"; +static int group = 0; +static unsigned int realtime_prio = 0; +static int system_wide = 0; +static pid_t target_pid = -1; +static int inherit = 1; +static int force = 0; +static int append_file = 0; + +static long samples; +static struct timeval last_read; +static struct timeval this_read; + +static __u64 bytes_written; + +static struct pollfd event_array[MAX_NR_CPUS * MAX_COUNTERS]; + +static int nr_poll; +static int nr_cpu; + +struct mmap_event { + struct perf_event_header header; + __u32 pid; + __u32 tid; + __u64 start; + __u64 len; + __u64 pgoff; + char filename[PATH_MAX]; +}; + +struct comm_event { + struct perf_event_header header; + __u32 pid; + __u32 tid; + char comm[16]; +}; + + +struct mmap_data { + int counter; + void *base; + unsigned int mask; + unsigned int prev; +}; + +static struct mmap_data mmap_array[MAX_NR_CPUS][MAX_COUNTERS]; + +static unsigned int mmap_read_head(struct mmap_data *md) +{ + struct perf_counter_mmap_page *pc = md->base; + int head; + + head = pc->data_head; + rmb(); + + return head; +} + +static void mmap_read(struct mmap_data *md) +{ + unsigned int head = mmap_read_head(md); + unsigned int old = md->prev; + unsigned char *data = md->base + page_size; + unsigned long size; + void *buf; + int diff; + + gettimeofday(&this_read, NULL); + + /* + * If we're further behind than half the buffer, there's a chance + * the writer will bite our tail and mess up the samples under us. + * + * If we somehow ended up ahead of the head, we got messed up. + * + * In either case, truncate and restart at head. + */ + diff = head - old; + if (diff > md->mask / 2 || diff < 0) { + struct timeval iv; + unsigned long msecs; + + timersub(&this_read, &last_read, &iv); + msecs = iv.tv_sec*1000 + iv.tv_usec/1000; + + fprintf(stderr, "WARNING: failed to keep up with mmap data." + " Last read %lu msecs ago.\n", msecs); + + /* + * head points to a known good entry, start there. + */ + old = head; + } + + last_read = this_read; + + if (old != head) + samples++; + + size = head - old; + + if ((old & md->mask) + size != (head & md->mask)) { + buf = &data[old & md->mask]; + size = md->mask + 1 - (old & md->mask); + old += size; + + while (size) { + int ret = write(output, buf, size); + + if (ret < 0) + die("failed to write"); + + size -= ret; + buf += ret; + + bytes_written += ret; + } + } + + buf = &data[old & md->mask]; + size = head - old; + old += size; + + while (size) { + int ret = write(output, buf, size); + + if (ret < 0) + die("failed to write"); + + size -= ret; + buf += ret; + + bytes_written += ret; + } + + md->prev = old; +} + +static volatile int done = 0; + +static void sig_handler(int sig) +{ + done = 1; +} + +static void pid_synthesize_comm_event(pid_t pid, int full) +{ + struct comm_event comm_ev; + char filename[PATH_MAX]; + char bf[BUFSIZ]; + int fd, ret; + size_t size; + char *field, *sep; + DIR *tasks; + struct dirent dirent, *next; + + snprintf(filename, sizeof(filename), "/proc/%d/stat", pid); + + fd = open(filename, O_RDONLY); + if (fd < 0) { + fprintf(stderr, "couldn't open %s\n", filename); + exit(EXIT_FAILURE); + } + if (read(fd, bf, sizeof(bf)) < 0) { + fprintf(stderr, "couldn't read %s\n", filename); + exit(EXIT_FAILURE); + } + close(fd); + + /* 9027 (cat) R 6747 9027 6747 34816 9027 ... */ + memset(&comm_ev, 0, sizeof(comm_ev)); + field = strchr(bf, '('); + if (field == NULL) + goto out_failure; + sep = strchr(++field, ')'); + if (sep == NULL) + goto out_failure; + size = sep - field; + memcpy(comm_ev.comm, field, size++); + + comm_ev.pid = pid; + comm_ev.header.type = PERF_EVENT_COMM; + size = ALIGN(size, sizeof(uint64_t)); + comm_ev.header.size = sizeof(comm_ev) - (sizeof(comm_ev.comm) - size); + + if (!full) { + comm_ev.tid = pid; + + ret = write(output, &comm_ev, comm_ev.header.size); + if (ret < 0) { + perror("failed to write"); + exit(-1); + } + return; + } + + snprintf(filename, sizeof(filename), "/proc/%d/task", pid); + + tasks = opendir(filename); + while (!readdir_r(tasks, &dirent, &next) && next) { + char *end; + pid = strtol(dirent.d_name, &end, 10); + if (*end) + continue; + + comm_ev.tid = pid; + + ret = write(output, &comm_ev, comm_ev.header.size); + if (ret < 0) { + perror("failed to write"); + exit(-1); + } + } + closedir(tasks); + return; + +out_failure: + fprintf(stderr, "couldn't get COMM and pgid, malformed %s\n", + filename); + exit(EXIT_FAILURE); +} + +static void pid_synthesize_mmap_samples(pid_t pid) +{ + char filename[PATH_MAX]; + FILE *fp; + + snprintf(filename, sizeof(filename), "/proc/%d/maps", pid); + + fp = fopen(filename, "r"); + if (fp == NULL) { + fprintf(stderr, "couldn't open %s\n", filename); + exit(EXIT_FAILURE); + } + while (1) { + char bf[BUFSIZ], *pbf = bf; + struct mmap_event mmap_ev = { + .header.type = PERF_EVENT_MMAP, + }; + int n; + size_t size; + if (fgets(bf, sizeof(bf), fp) == NULL) + break; + + /* 00400000-0040c000 r-xp 00000000 fd:01 41038 /bin/cat */ + n = hex2u64(pbf, &mmap_ev.start); + if (n < 0) + continue; + pbf += n + 1; + n = hex2u64(pbf, &mmap_ev.len); + if (n < 0) + continue; + pbf += n + 3; + if (*pbf == 'x') { /* vm_exec */ + char *execname = strrchr(bf, ' '); + + if (execname == NULL || execname[1] != '/') + continue; + + execname += 1; + size = strlen(execname); + execname[size - 1] = '\0'; /* Remove \n */ + memcpy(mmap_ev.filename, execname, size); + size = ALIGN(size, sizeof(uint64_t)); + mmap_ev.len -= mmap_ev.start; + mmap_ev.header.size = (sizeof(mmap_ev) - + (sizeof(mmap_ev.filename) - size)); + mmap_ev.pid = pid; + mmap_ev.tid = pid; + + if (write(output, &mmap_ev, mmap_ev.header.size) < 0) { + perror("failed to write"); + exit(-1); + } + } + } + + fclose(fp); +} + +static void synthesize_samples(void) +{ + DIR *proc; + struct dirent dirent, *next; + + proc = opendir("/proc"); + + while (!readdir_r(proc, &dirent, &next) && next) { + char *end; + pid_t pid; + + pid = strtol(dirent.d_name, &end, 10); + if (*end) /* only interested in proper numerical dirents */ + continue; + + pid_synthesize_comm_event(pid, 1); + pid_synthesize_mmap_samples(pid); + } + + closedir(proc); +} + +static int group_fd; + +static void create_counter(int counter, int cpu, pid_t pid) +{ + struct perf_counter_attr *attr = attrs + counter; + int track = 1; + + attr->sample_type = PERF_SAMPLE_IP | PERF_SAMPLE_TID | PERF_SAMPLE_PERIOD; + if (freq) { + attr->freq = 1; + attr->sample_freq = freq; + } + attr->mmap = track; + attr->comm = track; + attr->inherit = (cpu < 0) && inherit; + + track = 0; /* only the first counter needs these */ + + fd[nr_cpu][counter] = sys_perf_counter_open(attr, pid, cpu, group_fd, 0); + + if (fd[nr_cpu][counter] < 0) { + int err = errno; + + error("syscall returned with %d (%s)\n", + fd[nr_cpu][counter], strerror(err)); + if (err == EPERM) + printf("Are you root?\n"); + exit(-1); + } + assert(fd[nr_cpu][counter] >= 0); + fcntl(fd[nr_cpu][counter], F_SETFL, O_NONBLOCK); + + /* + * First counter acts as the group leader: + */ + if (group && group_fd == -1) + group_fd = fd[nr_cpu][counter]; + + event_array[nr_poll].fd = fd[nr_cpu][counter]; + event_array[nr_poll].events = POLLIN; + nr_poll++; + + mmap_array[nr_cpu][counter].counter = counter; + mmap_array[nr_cpu][counter].prev = 0; + mmap_array[nr_cpu][counter].mask = mmap_pages*page_size - 1; + mmap_array[nr_cpu][counter].base = mmap(NULL, (mmap_pages+1)*page_size, + PROT_READ, MAP_SHARED, fd[nr_cpu][counter], 0); + if (mmap_array[nr_cpu][counter].base == MAP_FAILED) { + error("failed to mmap with %d (%s)\n", errno, strerror(errno)); + exit(-1); + } +} + +static void open_counters(int cpu, pid_t pid) +{ + int counter; + + if (pid > 0) { + pid_synthesize_comm_event(pid, 0); + pid_synthesize_mmap_samples(pid); + } + + group_fd = -1; + for (counter = 0; counter < nr_counters; counter++) + create_counter(counter, cpu, pid); + + nr_cpu++; +} + +static int __cmd_record(int argc, const char **argv) +{ + int i, counter; + struct stat st; + pid_t pid; + int flags; + int ret; + + page_size = sysconf(_SC_PAGE_SIZE); + nr_cpus = sysconf(_SC_NPROCESSORS_ONLN); + assert(nr_cpus <= MAX_NR_CPUS); + assert(nr_cpus >= 0); + + if (!stat(output_name, &st) && !force && !append_file) { + fprintf(stderr, "Error, output file %s exists, use -A to append or -f to overwrite.\n", + output_name); + exit(-1); + } + + flags = O_CREAT|O_RDWR; + if (append_file) + flags |= O_APPEND; + else + flags |= O_TRUNC; + + output = open(output_name, flags, S_IRUSR|S_IWUSR); + if (output < 0) { + perror("failed to create output file"); + exit(-1); + } + + if (!system_wide) { + open_counters(-1, target_pid != -1 ? target_pid : getpid()); + } else for (i = 0; i < nr_cpus; i++) + open_counters(i, target_pid); + + signal(SIGCHLD, sig_handler); + signal(SIGINT, sig_handler); + + if (target_pid == -1 && argc) { + pid = fork(); + if (pid < 0) + perror("failed to fork"); + + if (!pid) { + if (execvp(argv[0], (char **)argv)) { + perror(argv[0]); + exit(-1); + } + } + } + + if (realtime_prio) { + struct sched_param param; + + param.sched_priority = realtime_prio; + if (sched_setscheduler(0, SCHED_FIFO, ¶m)) { + printf("Could not set realtime priority.\n"); + exit(-1); + } + } + + if (system_wide) + synthesize_samples(); + + while (!done) { + int hits = samples; + + for (i = 0; i < nr_cpu; i++) { + for (counter = 0; counter < nr_counters; counter++) + mmap_read(&mmap_array[i][counter]); + } + + if (hits == samples) + ret = poll(event_array, nr_poll, 100); + } + + /* + * Approximate RIP event size: 24 bytes. + */ + fprintf(stderr, + "[ perf record: Captured and wrote %.3f MB %s (~%lld samples) ]\n", + (double)bytes_written / 1024.0 / 1024.0, + output_name, + bytes_written / 24); + + return 0; +} + +static const char * const record_usage[] = { + "perf record [] []", + "perf record [] -- []", + NULL +}; + +static const struct option options[] = { + OPT_CALLBACK('e', "event", NULL, "event", + "event selector. use 'perf list' to list available events", + parse_events), + OPT_INTEGER('p', "pid", &target_pid, + "record events on existing pid"), + OPT_INTEGER('r', "realtime", &realtime_prio, + "collect data with this RT SCHED_FIFO priority"), + OPT_BOOLEAN('a', "all-cpus", &system_wide, + "system-wide collection from all CPUs"), + OPT_BOOLEAN('A', "append", &append_file, + "append to the output file to do incremental profiling"), + OPT_BOOLEAN('f', "force", &force, + "overwrite existing data file"), + OPT_LONG('c', "count", &default_interval, + "event period to sample"), + OPT_STRING('o', "output", &output_name, "file", + "output file name"), + OPT_BOOLEAN('i', "inherit", &inherit, + "child tasks inherit counters"), + OPT_INTEGER('F', "freq", &freq, + "profile at this frequency"), + OPT_INTEGER('m', "mmap-pages", &mmap_pages, + "number of mmap data pages"), + OPT_END() +}; + +int cmd_record(int argc, const char **argv, const char *prefix) +{ + int counter; + + argc = parse_options(argc, argv, options, record_usage, 0); + if (!argc && target_pid == -1 && !system_wide) + usage_with_options(record_usage, options); + + if (!nr_counters) + nr_counters = 1; + + for (counter = 0; counter < nr_counters; counter++) { + if (attrs[counter].sample_period) + continue; + + attrs[counter].sample_period = default_interval; + } + + return __cmd_record(argc, argv); +} diff --git a/tools/perf/builtin-report.c b/tools/perf/builtin-report.c new file mode 100644 index 00000000000..242e09ff365 --- /dev/null +++ b/tools/perf/builtin-report.c @@ -0,0 +1,1291 @@ +/* + * builtin-report.c + * + * Builtin report command: Analyze the perf.data input file, + * look up and read DSOs and symbol information and display + * a histogram of results, along various sorting keys. + */ +#include "builtin.h" + +#include "util/util.h" + +#include "util/color.h" +#include "util/list.h" +#include "util/cache.h" +#include "util/rbtree.h" +#include "util/symbol.h" +#include "util/string.h" + +#include "perf.h" + +#include "util/parse-options.h" +#include "util/parse-events.h" + +#define SHOW_KERNEL 1 +#define SHOW_USER 2 +#define SHOW_HV 4 + +static char const *input_name = "perf.data"; +static char *vmlinux = NULL; + +static char default_sort_order[] = "comm,dso"; +static char *sort_order = default_sort_order; + +static int input; +static int show_mask = SHOW_KERNEL | SHOW_USER | SHOW_HV; + +static int dump_trace = 0; +#define dprintf(x...) do { if (dump_trace) printf(x); } while (0) + +static int verbose; +static int full_paths; + +static unsigned long page_size; +static unsigned long mmap_window = 32; + +struct ip_event { + struct perf_event_header header; + __u64 ip; + __u32 pid, tid; +}; + +struct mmap_event { + struct perf_event_header header; + __u32 pid, tid; + __u64 start; + __u64 len; + __u64 pgoff; + char filename[PATH_MAX]; +}; + +struct comm_event { + struct perf_event_header header; + __u32 pid, tid; + char comm[16]; +}; + +struct fork_event { + struct perf_event_header header; + __u32 pid, ppid; +}; + +struct period_event { + struct perf_event_header header; + __u64 time; + __u64 id; + __u64 sample_period; +}; + +typedef union event_union { + struct perf_event_header header; + struct ip_event ip; + struct mmap_event mmap; + struct comm_event comm; + struct fork_event fork; + struct period_event period; +} event_t; + +static LIST_HEAD(dsos); +static struct dso *kernel_dso; +static struct dso *vdso; + +static void dsos__add(struct dso *dso) +{ + list_add_tail(&dso->node, &dsos); +} + +static struct dso *dsos__find(const char *name) +{ + struct dso *pos; + + list_for_each_entry(pos, &dsos, node) + if (strcmp(pos->name, name) == 0) + return pos; + return NULL; +} + +static struct dso *dsos__findnew(const char *name) +{ + struct dso *dso = dsos__find(name); + int nr; + + if (dso) + return dso; + + dso = dso__new(name, 0); + if (!dso) + goto out_delete_dso; + + nr = dso__load(dso, NULL, verbose); + if (nr < 0) { + if (verbose) + fprintf(stderr, "Failed to open: %s\n", name); + goto out_delete_dso; + } + if (!nr && verbose) { + fprintf(stderr, + "No symbols found in: %s, maybe install a debug package?\n", + name); + } + + dsos__add(dso); + + return dso; + +out_delete_dso: + dso__delete(dso); + return NULL; +} + +static void dsos__fprintf(FILE *fp) +{ + struct dso *pos; + + list_for_each_entry(pos, &dsos, node) + dso__fprintf(pos, fp); +} + +static struct symbol *vdso__find_symbol(struct dso *dso, uint64_t ip) +{ + return dso__find_symbol(kernel_dso, ip); +} + +static int load_kernel(void) +{ + int err; + + kernel_dso = dso__new("[kernel]", 0); + if (!kernel_dso) + return -1; + + err = dso__load_kernel(kernel_dso, vmlinux, NULL, verbose); + if (err) { + dso__delete(kernel_dso); + kernel_dso = NULL; + } else + dsos__add(kernel_dso); + + vdso = dso__new("[vdso]", 0); + if (!vdso) + return -1; + + vdso->find_symbol = vdso__find_symbol; + + dsos__add(vdso); + + return err; +} + +static char __cwd[PATH_MAX]; +static char *cwd = __cwd; +static int cwdlen; + +static int strcommon(const char *pathname) +{ + int n = 0; + + while (pathname[n] == cwd[n] && n < cwdlen) + ++n; + + return n; +} + +struct map { + struct list_head node; + uint64_t start; + uint64_t end; + uint64_t pgoff; + uint64_t (*map_ip)(struct map *, uint64_t); + struct dso *dso; +}; + +static uint64_t map__map_ip(struct map *map, uint64_t ip) +{ + return ip - map->start + map->pgoff; +} + +static uint64_t vdso__map_ip(struct map *map, uint64_t ip) +{ + return ip; +} + +static struct map *map__new(struct mmap_event *event) +{ + struct map *self = malloc(sizeof(*self)); + + if (self != NULL) { + const char *filename = event->filename; + char newfilename[PATH_MAX]; + + if (cwd) { + int n = strcommon(filename); + + if (n == cwdlen) { + snprintf(newfilename, sizeof(newfilename), + ".%s", filename + n); + filename = newfilename; + } + } + + self->start = event->start; + self->end = event->start + event->len; + self->pgoff = event->pgoff; + + self->dso = dsos__findnew(filename); + if (self->dso == NULL) + goto out_delete; + + if (self->dso == vdso) + self->map_ip = vdso__map_ip; + else + self->map_ip = map__map_ip; + } + return self; +out_delete: + free(self); + return NULL; +} + +static struct map *map__clone(struct map *self) +{ + struct map *map = malloc(sizeof(*self)); + + if (!map) + return NULL; + + memcpy(map, self, sizeof(*self)); + + return map; +} + +static int map__overlap(struct map *l, struct map *r) +{ + if (l->start > r->start) { + struct map *t = l; + l = r; + r = t; + } + + if (l->end > r->start) + return 1; + + return 0; +} + +static size_t map__fprintf(struct map *self, FILE *fp) +{ + return fprintf(fp, " %"PRIx64"-%"PRIx64" %"PRIx64" %s\n", + self->start, self->end, self->pgoff, self->dso->name); +} + + +struct thread { + struct rb_node rb_node; + struct list_head maps; + pid_t pid; + char *comm; +}; + +static struct thread *thread__new(pid_t pid) +{ + struct thread *self = malloc(sizeof(*self)); + + if (self != NULL) { + self->pid = pid; + self->comm = malloc(32); + if (self->comm) + snprintf(self->comm, 32, ":%d", self->pid); + INIT_LIST_HEAD(&self->maps); + } + + return self; +} + +static int thread__set_comm(struct thread *self, const char *comm) +{ + if (self->comm) + free(self->comm); + self->comm = strdup(comm); + return self->comm ? 0 : -ENOMEM; +} + +static size_t thread__fprintf(struct thread *self, FILE *fp) +{ + struct map *pos; + size_t ret = fprintf(fp, "Thread %d %s\n", self->pid, self->comm); + + list_for_each_entry(pos, &self->maps, node) + ret += map__fprintf(pos, fp); + + return ret; +} + + +static struct rb_root threads; +static struct thread *last_match; + +static struct thread *threads__findnew(pid_t pid) +{ + struct rb_node **p = &threads.rb_node; + struct rb_node *parent = NULL; + struct thread *th; + + /* + * Font-end cache - PID lookups come in blocks, + * so most of the time we dont have to look up + * the full rbtree: + */ + if (last_match && last_match->pid == pid) + return last_match; + + while (*p != NULL) { + parent = *p; + th = rb_entry(parent, struct thread, rb_node); + + if (th->pid == pid) { + last_match = th; + return th; + } + + if (pid < th->pid) + p = &(*p)->rb_left; + else + p = &(*p)->rb_right; + } + + th = thread__new(pid); + if (th != NULL) { + rb_link_node(&th->rb_node, parent, p); + rb_insert_color(&th->rb_node, &threads); + last_match = th; + } + + return th; +} + +static void thread__insert_map(struct thread *self, struct map *map) +{ + struct map *pos, *tmp; + + list_for_each_entry_safe(pos, tmp, &self->maps, node) { + if (map__overlap(pos, map)) { + list_del_init(&pos->node); + /* XXX leaks dsos */ + free(pos); + } + } + + list_add_tail(&map->node, &self->maps); +} + +static int thread__fork(struct thread *self, struct thread *parent) +{ + struct map *map; + + if (self->comm) + free(self->comm); + self->comm = strdup(parent->comm); + if (!self->comm) + return -ENOMEM; + + list_for_each_entry(map, &parent->maps, node) { + struct map *new = map__clone(map); + if (!new) + return -ENOMEM; + thread__insert_map(self, new); + } + + return 0; +} + +static struct map *thread__find_map(struct thread *self, uint64_t ip) +{ + struct map *pos; + + if (self == NULL) + return NULL; + + list_for_each_entry(pos, &self->maps, node) + if (ip >= pos->start && ip <= pos->end) + return pos; + + return NULL; +} + +static size_t threads__fprintf(FILE *fp) +{ + size_t ret = 0; + struct rb_node *nd; + + for (nd = rb_first(&threads); nd; nd = rb_next(nd)) { + struct thread *pos = rb_entry(nd, struct thread, rb_node); + + ret += thread__fprintf(pos, fp); + } + + return ret; +} + +/* + * histogram, sorted on item, collects counts + */ + +static struct rb_root hist; + +struct hist_entry { + struct rb_node rb_node; + + struct thread *thread; + struct map *map; + struct dso *dso; + struct symbol *sym; + uint64_t ip; + char level; + + uint32_t count; +}; + +/* + * configurable sorting bits + */ + +struct sort_entry { + struct list_head list; + + char *header; + + int64_t (*cmp)(struct hist_entry *, struct hist_entry *); + int64_t (*collapse)(struct hist_entry *, struct hist_entry *); + size_t (*print)(FILE *fp, struct hist_entry *); +}; + +/* --sort pid */ + +static int64_t +sort__thread_cmp(struct hist_entry *left, struct hist_entry *right) +{ + return right->thread->pid - left->thread->pid; +} + +static size_t +sort__thread_print(FILE *fp, struct hist_entry *self) +{ + return fprintf(fp, "%16s:%5d", self->thread->comm ?: "", self->thread->pid); +} + +static struct sort_entry sort_thread = { + .header = " Command: Pid", + .cmp = sort__thread_cmp, + .print = sort__thread_print, +}; + +/* --sort comm */ + +static int64_t +sort__comm_cmp(struct hist_entry *left, struct hist_entry *right) +{ + return right->thread->pid - left->thread->pid; +} + +static int64_t +sort__comm_collapse(struct hist_entry *left, struct hist_entry *right) +{ + char *comm_l = left->thread->comm; + char *comm_r = right->thread->comm; + + if (!comm_l || !comm_r) { + if (!comm_l && !comm_r) + return 0; + else if (!comm_l) + return -1; + else + return 1; + } + + return strcmp(comm_l, comm_r); +} + +static size_t +sort__comm_print(FILE *fp, struct hist_entry *self) +{ + return fprintf(fp, "%16s", self->thread->comm); +} + +static struct sort_entry sort_comm = { + .header = " Command", + .cmp = sort__comm_cmp, + .collapse = sort__comm_collapse, + .print = sort__comm_print, +}; + +/* --sort dso */ + +static int64_t +sort__dso_cmp(struct hist_entry *left, struct hist_entry *right) +{ + struct dso *dso_l = left->dso; + struct dso *dso_r = right->dso; + + if (!dso_l || !dso_r) { + if (!dso_l && !dso_r) + return 0; + else if (!dso_l) + return -1; + else + return 1; + } + + return strcmp(dso_l->name, dso_r->name); +} + +static size_t +sort__dso_print(FILE *fp, struct hist_entry *self) +{ + if (self->dso) + return fprintf(fp, "%-25s", self->dso->name); + + return fprintf(fp, "%016llx ", (__u64)self->ip); +} + +static struct sort_entry sort_dso = { + .header = "Shared Object ", + .cmp = sort__dso_cmp, + .print = sort__dso_print, +}; + +/* --sort symbol */ + +static int64_t +sort__sym_cmp(struct hist_entry *left, struct hist_entry *right) +{ + uint64_t ip_l, ip_r; + + if (left->sym == right->sym) + return 0; + + ip_l = left->sym ? left->sym->start : left->ip; + ip_r = right->sym ? right->sym->start : right->ip; + + return (int64_t)(ip_r - ip_l); +} + +static size_t +sort__sym_print(FILE *fp, struct hist_entry *self) +{ + size_t ret = 0; + + if (verbose) + ret += fprintf(fp, "%#018llx ", (__u64)self->ip); + + if (self->sym) { + ret += fprintf(fp, "[%c] %s", + self->dso == kernel_dso ? 'k' : '.', self->sym->name); + } else { + ret += fprintf(fp, "%#016llx", (__u64)self->ip); + } + + return ret; +} + +static struct sort_entry sort_sym = { + .header = "Symbol", + .cmp = sort__sym_cmp, + .print = sort__sym_print, +}; + +static int sort__need_collapse = 0; + +struct sort_dimension { + char *name; + struct sort_entry *entry; + int taken; +}; + +static struct sort_dimension sort_dimensions[] = { + { .name = "pid", .entry = &sort_thread, }, + { .name = "comm", .entry = &sort_comm, }, + { .name = "dso", .entry = &sort_dso, }, + { .name = "symbol", .entry = &sort_sym, }, +}; + +static LIST_HEAD(hist_entry__sort_list); + +static int sort_dimension__add(char *tok) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(sort_dimensions); i++) { + struct sort_dimension *sd = &sort_dimensions[i]; + + if (sd->taken) + continue; + + if (strncasecmp(tok, sd->name, strlen(tok))) + continue; + + if (sd->entry->collapse) + sort__need_collapse = 1; + + list_add_tail(&sd->entry->list, &hist_entry__sort_list); + sd->taken = 1; + + return 0; + } + + return -ESRCH; +} + +static int64_t +hist_entry__cmp(struct hist_entry *left, struct hist_entry *right) +{ + struct sort_entry *se; + int64_t cmp = 0; + + list_for_each_entry(se, &hist_entry__sort_list, list) { + cmp = se->cmp(left, right); + if (cmp) + break; + } + + return cmp; +} + +static int64_t +hist_entry__collapse(struct hist_entry *left, struct hist_entry *right) +{ + struct sort_entry *se; + int64_t cmp = 0; + + list_for_each_entry(se, &hist_entry__sort_list, list) { + int64_t (*f)(struct hist_entry *, struct hist_entry *); + + f = se->collapse ?: se->cmp; + + cmp = f(left, right); + if (cmp) + break; + } + + return cmp; +} + +static size_t +hist_entry__fprintf(FILE *fp, struct hist_entry *self, uint64_t total_samples) +{ + struct sort_entry *se; + size_t ret; + + if (total_samples) { + double percent = self->count * 100.0 / total_samples; + char *color = PERF_COLOR_NORMAL; + + /* + * We color high-overhead entries in red, low-overhead + * entries in green - and keep the middle ground normal: + */ + if (percent >= 5.0) + color = PERF_COLOR_RED; + if (percent < 0.5) + color = PERF_COLOR_GREEN; + + ret = color_fprintf(fp, color, " %6.2f%%", + (self->count * 100.0) / total_samples); + } else + ret = fprintf(fp, "%12d ", self->count); + + list_for_each_entry(se, &hist_entry__sort_list, list) { + fprintf(fp, " "); + ret += se->print(fp, self); + } + + ret += fprintf(fp, "\n"); + + return ret; +} + +/* + * collect histogram counts + */ + +static int +hist_entry__add(struct thread *thread, struct map *map, struct dso *dso, + struct symbol *sym, uint64_t ip, char level) +{ + struct rb_node **p = &hist.rb_node; + struct rb_node *parent = NULL; + struct hist_entry *he; + struct hist_entry entry = { + .thread = thread, + .map = map, + .dso = dso, + .sym = sym, + .ip = ip, + .level = level, + .count = 1, + }; + int cmp; + + while (*p != NULL) { + parent = *p; + he = rb_entry(parent, struct hist_entry, rb_node); + + cmp = hist_entry__cmp(&entry, he); + + if (!cmp) { + he->count++; + return 0; + } + + if (cmp < 0) + p = &(*p)->rb_left; + else + p = &(*p)->rb_right; + } + + he = malloc(sizeof(*he)); + if (!he) + return -ENOMEM; + *he = entry; + rb_link_node(&he->rb_node, parent, p); + rb_insert_color(&he->rb_node, &hist); + + return 0; +} + +static void hist_entry__free(struct hist_entry *he) +{ + free(he); +} + +/* + * collapse the histogram + */ + +static struct rb_root collapse_hists; + +static void collapse__insert_entry(struct hist_entry *he) +{ + struct rb_node **p = &collapse_hists.rb_node; + struct rb_node *parent = NULL; + struct hist_entry *iter; + int64_t cmp; + + while (*p != NULL) { + parent = *p; + iter = rb_entry(parent, struct hist_entry, rb_node); + + cmp = hist_entry__collapse(iter, he); + + if (!cmp) { + iter->count += he->count; + hist_entry__free(he); + return; + } + + if (cmp < 0) + p = &(*p)->rb_left; + else + p = &(*p)->rb_right; + } + + rb_link_node(&he->rb_node, parent, p); + rb_insert_color(&he->rb_node, &collapse_hists); +} + +static void collapse__resort(void) +{ + struct rb_node *next; + struct hist_entry *n; + + if (!sort__need_collapse) + return; + + next = rb_first(&hist); + while (next) { + n = rb_entry(next, struct hist_entry, rb_node); + next = rb_next(&n->rb_node); + + rb_erase(&n->rb_node, &hist); + collapse__insert_entry(n); + } +} + +/* + * reverse the map, sort on count. + */ + +static struct rb_root output_hists; + +static void output__insert_entry(struct hist_entry *he) +{ + struct rb_node **p = &output_hists.rb_node; + struct rb_node *parent = NULL; + struct hist_entry *iter; + + while (*p != NULL) { + parent = *p; + iter = rb_entry(parent, struct hist_entry, rb_node); + + if (he->count > iter->count) + p = &(*p)->rb_left; + else + p = &(*p)->rb_right; + } + + rb_link_node(&he->rb_node, parent, p); + rb_insert_color(&he->rb_node, &output_hists); +} + +static void output__resort(void) +{ + struct rb_node *next; + struct hist_entry *n; + struct rb_root *tree = &hist; + + if (sort__need_collapse) + tree = &collapse_hists; + + next = rb_first(tree); + + while (next) { + n = rb_entry(next, struct hist_entry, rb_node); + next = rb_next(&n->rb_node); + + rb_erase(&n->rb_node, tree); + output__insert_entry(n); + } +} + +static size_t output__fprintf(FILE *fp, uint64_t total_samples) +{ + struct hist_entry *pos; + struct sort_entry *se; + struct rb_node *nd; + size_t ret = 0; + + fprintf(fp, "\n"); + fprintf(fp, "#\n"); + fprintf(fp, "# (%Ld samples)\n", (__u64)total_samples); + fprintf(fp, "#\n"); + + fprintf(fp, "# Overhead"); + list_for_each_entry(se, &hist_entry__sort_list, list) + fprintf(fp, " %s", se->header); + fprintf(fp, "\n"); + + fprintf(fp, "# ........"); + list_for_each_entry(se, &hist_entry__sort_list, list) { + int i; + + fprintf(fp, " "); + for (i = 0; i < strlen(se->header); i++) + fprintf(fp, "."); + } + fprintf(fp, "\n"); + + fprintf(fp, "#\n"); + + for (nd = rb_first(&output_hists); nd; nd = rb_next(nd)) { + pos = rb_entry(nd, struct hist_entry, rb_node); + ret += hist_entry__fprintf(fp, pos, total_samples); + } + + if (!strcmp(sort_order, default_sort_order)) { + fprintf(fp, "#\n"); + fprintf(fp, "# (For more details, try: perf report --sort comm,dso,symbol)\n"); + fprintf(fp, "#\n"); + } + fprintf(fp, "\n"); + + return ret; +} + +static void register_idle_thread(void) +{ + struct thread *thread = threads__findnew(0); + + if (thread == NULL || + thread__set_comm(thread, "[idle]")) { + fprintf(stderr, "problem inserting idle task.\n"); + exit(-1); + } +} + +static unsigned long total = 0, + total_mmap = 0, + total_comm = 0, + total_fork = 0, + total_unknown = 0; + +static int +process_overflow_event(event_t *event, unsigned long offset, unsigned long head) +{ + char level; + int show = 0; + struct dso *dso = NULL; + struct thread *thread = threads__findnew(event->ip.pid); + uint64_t ip = event->ip.ip; + struct map *map = NULL; + + dprintf("%p [%p]: PERF_EVENT (IP, %d): %d: %p\n", + (void *)(offset + head), + (void *)(long)(event->header.size), + event->header.misc, + event->ip.pid, + (void *)(long)ip); + + dprintf(" ... thread: %s:%d\n", thread->comm, thread->pid); + + if (thread == NULL) { + fprintf(stderr, "problem processing %d event, skipping it.\n", + event->header.type); + return -1; + } + + if (event->header.misc & PERF_EVENT_MISC_KERNEL) { + show = SHOW_KERNEL; + level = 'k'; + + dso = kernel_dso; + + dprintf(" ...... dso: %s\n", dso->name); + + } else if (event->header.misc & PERF_EVENT_MISC_USER) { + + show = SHOW_USER; + level = '.'; + + map = thread__find_map(thread, ip); + if (map != NULL) { + ip = map->map_ip(map, ip); + dso = map->dso; + } else { + /* + * If this is outside of all known maps, + * and is a negative address, try to look it + * up in the kernel dso, as it might be a + * vsyscall (which executes in user-mode): + */ + if ((long long)ip < 0) + dso = kernel_dso; + } + dprintf(" ...... dso: %s\n", dso ? dso->name : ""); + + } else { + show = SHOW_HV; + level = 'H'; + dprintf(" ...... dso: [hypervisor]\n"); + } + + if (show & show_mask) { + struct symbol *sym = NULL; + + if (dso) + sym = dso->find_symbol(dso, ip); + + if (hist_entry__add(thread, map, dso, sym, ip, level)) { + fprintf(stderr, + "problem incrementing symbol count, skipping event\n"); + return -1; + } + } + total++; + + return 0; +} + +static int +process_mmap_event(event_t *event, unsigned long offset, unsigned long head) +{ + struct thread *thread = threads__findnew(event->mmap.pid); + struct map *map = map__new(&event->mmap); + + dprintf("%p [%p]: PERF_EVENT_MMAP %d: [%p(%p) @ %p]: %s\n", + (void *)(offset + head), + (void *)(long)(event->header.size), + event->mmap.pid, + (void *)(long)event->mmap.start, + (void *)(long)event->mmap.len, + (void *)(long)event->mmap.pgoff, + event->mmap.filename); + + if (thread == NULL || map == NULL) { + dprintf("problem processing PERF_EVENT_MMAP, skipping event.\n"); + return 0; + } + + thread__insert_map(thread, map); + total_mmap++; + + return 0; +} + +static int +process_comm_event(event_t *event, unsigned long offset, unsigned long head) +{ + struct thread *thread = threads__findnew(event->comm.pid); + + dprintf("%p [%p]: PERF_EVENT_COMM: %s:%d\n", + (void *)(offset + head), + (void *)(long)(event->header.size), + event->comm.comm, event->comm.pid); + + if (thread == NULL || + thread__set_comm(thread, event->comm.comm)) { + dprintf("problem processing PERF_EVENT_COMM, skipping event.\n"); + return -1; + } + total_comm++; + + return 0; +} + +static int +process_fork_event(event_t *event, unsigned long offset, unsigned long head) +{ + struct thread *thread = threads__findnew(event->fork.pid); + struct thread *parent = threads__findnew(event->fork.ppid); + + dprintf("%p [%p]: PERF_EVENT_FORK: %d:%d\n", + (void *)(offset + head), + (void *)(long)(event->header.size), + event->fork.pid, event->fork.ppid); + + if (!thread || !parent || thread__fork(thread, parent)) { + dprintf("problem processing PERF_EVENT_FORK, skipping event.\n"); + return -1; + } + total_fork++; + + return 0; +} + +static int +process_period_event(event_t *event, unsigned long offset, unsigned long head) +{ + dprintf("%p [%p]: PERF_EVENT_PERIOD: time:%Ld, id:%Ld: period:%Ld\n", + (void *)(offset + head), + (void *)(long)(event->header.size), + event->period.time, + event->period.id, + event->period.sample_period); + + return 0; +} + +static int +process_event(event_t *event, unsigned long offset, unsigned long head) +{ + if (event->header.misc & PERF_EVENT_MISC_OVERFLOW) + return process_overflow_event(event, offset, head); + + switch (event->header.type) { + case PERF_EVENT_MMAP: + return process_mmap_event(event, offset, head); + + case PERF_EVENT_COMM: + return process_comm_event(event, offset, head); + + case PERF_EVENT_FORK: + return process_fork_event(event, offset, head); + + case PERF_EVENT_PERIOD: + return process_period_event(event, offset, head); + /* + * We dont process them right now but they are fine: + */ + + case PERF_EVENT_THROTTLE: + case PERF_EVENT_UNTHROTTLE: + return 0; + + default: + return -1; + } + + return 0; +} + +static int __cmd_report(void) +{ + int ret, rc = EXIT_FAILURE; + unsigned long offset = 0; + unsigned long head = 0; + struct stat stat; + event_t *event; + uint32_t size; + char *buf; + + register_idle_thread(); + + input = open(input_name, O_RDONLY); + if (input < 0) { + perror("failed to open file"); + exit(-1); + } + + ret = fstat(input, &stat); + if (ret < 0) { + perror("failed to stat file"); + exit(-1); + } + + if (!stat.st_size) { + fprintf(stderr, "zero-sized file, nothing to do!\n"); + exit(0); + } + + if (load_kernel() < 0) { + perror("failed to load kernel symbols"); + return EXIT_FAILURE; + } + + if (!full_paths) { + if (getcwd(__cwd, sizeof(__cwd)) == NULL) { + perror("failed to get the current directory"); + return EXIT_FAILURE; + } + cwdlen = strlen(cwd); + } else { + cwd = NULL; + cwdlen = 0; + } +remap: + buf = (char *)mmap(NULL, page_size * mmap_window, PROT_READ, + MAP_SHARED, input, offset); + if (buf == MAP_FAILED) { + perror("failed to mmap file"); + exit(-1); + } + +more: + event = (event_t *)(buf + head); + + size = event->header.size; + if (!size) + size = 8; + + if (head + event->header.size >= page_size * mmap_window) { + unsigned long shift = page_size * (head / page_size); + int ret; + + ret = munmap(buf, page_size * mmap_window); + assert(ret == 0); + + offset += shift; + head -= shift; + goto remap; + } + + size = event->header.size; + + dprintf("%p [%p]: event: %d\n", + (void *)(offset + head), + (void *)(long)event->header.size, + event->header.type); + + if (!size || process_event(event, offset, head) < 0) { + + dprintf("%p [%p]: skipping unknown header type: %d\n", + (void *)(offset + head), + (void *)(long)(event->header.size), + event->header.type); + + total_unknown++; + + /* + * assume we lost track of the stream, check alignment, and + * increment a single u64 in the hope to catch on again 'soon'. + */ + + if (unlikely(head & 7)) + head &= ~7ULL; + + size = 8; + } + + head += size; + + if (offset + head < stat.st_size) + goto more; + + rc = EXIT_SUCCESS; + close(input); + + dprintf(" IP events: %10ld\n", total); + dprintf(" mmap events: %10ld\n", total_mmap); + dprintf(" comm events: %10ld\n", total_comm); + dprintf(" fork events: %10ld\n", total_fork); + dprintf(" unknown events: %10ld\n", total_unknown); + + if (dump_trace) + return 0; + + if (verbose >= 3) + threads__fprintf(stdout); + + if (verbose >= 2) + dsos__fprintf(stdout); + + collapse__resort(); + output__resort(); + output__fprintf(stdout, total); + + return rc; +} + +static const char * const report_usage[] = { + "perf report [] ", + NULL +}; + +static const struct option options[] = { + OPT_STRING('i', "input", &input_name, "file", + "input file name"), + OPT_BOOLEAN('v', "verbose", &verbose, + "be more verbose (show symbol address, etc)"), + OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace, + "dump raw trace in ASCII"), + OPT_STRING('k', "vmlinux", &vmlinux, "file", "vmlinux pathname"), + OPT_STRING('s', "sort", &sort_order, "key[,key2...]", + "sort by key(s): pid, comm, dso, symbol. Default: pid,symbol"), + OPT_BOOLEAN('P', "full-paths", &full_paths, + "Don't shorten the pathnames taking into account the cwd"), + OPT_END() +}; + +static void setup_sorting(void) +{ + char *tmp, *tok, *str = strdup(sort_order); + + for (tok = strtok_r(str, ", ", &tmp); + tok; tok = strtok_r(NULL, ", ", &tmp)) { + if (sort_dimension__add(tok) < 0) { + error("Unknown --sort key: `%s'", tok); + usage_with_options(report_usage, options); + } + } + + free(str); +} + +int cmd_report(int argc, const char **argv, const char *prefix) +{ + symbol__init(); + + page_size = getpagesize(); + + argc = parse_options(argc, argv, options, report_usage, 0); + + setup_sorting(); + + /* + * Any (unrecognized) arguments left? + */ + if (argc) + usage_with_options(report_usage, options); + + setup_pager(); + + return __cmd_report(); +} diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c new file mode 100644 index 00000000000..2cbf5a18958 --- /dev/null +++ b/tools/perf/builtin-stat.c @@ -0,0 +1,339 @@ +/* + * builtin-stat.c + * + * Builtin stat command: Give a precise performance counters summary + * overview about any workload, CPU or specific PID. + * + * Sample output: + + $ perf stat ~/hackbench 10 + Time: 0.104 + + Performance counter stats for '/home/mingo/hackbench': + + 1255.538611 task clock ticks # 10.143 CPU utilization factor + 54011 context switches # 0.043 M/sec + 385 CPU migrations # 0.000 M/sec + 17755 pagefaults # 0.014 M/sec + 3808323185 CPU cycles # 3033.219 M/sec + 1575111190 instructions # 1254.530 M/sec + 17367895 cache references # 13.833 M/sec + 7674421 cache misses # 6.112 M/sec + + Wall-clock time elapsed: 123.786620 msecs + + * + * Copyright (C) 2008, Red Hat Inc, Ingo Molnar + * + * Improvements and fixes by: + * + * Arjan van de Ven + * Yanmin Zhang + * Wu Fengguang + * Mike Galbraith + * Paul Mackerras + * + * Released under the GPL v2. (and only v2, not any later version) + */ + +#include "perf.h" +#include "builtin.h" +#include "util/util.h" +#include "util/parse-options.h" +#include "util/parse-events.h" + +#include + +static struct perf_counter_attr default_attrs[MAX_COUNTERS] = { + + { .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_TASK_CLOCK }, + { .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_CONTEXT_SWITCHES }, + { .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_CPU_MIGRATIONS }, + { .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_PAGE_FAULTS }, + + { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_CPU_CYCLES }, + { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_INSTRUCTIONS }, + { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_CACHE_REFERENCES }, + { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_CACHE_MISSES }, +}; + +static int system_wide = 0; +static int inherit = 1; + +static int fd[MAX_NR_CPUS][MAX_COUNTERS]; + +static int target_pid = -1; +static int nr_cpus = 0; +static unsigned int page_size; + +static int scale = 1; + +static const unsigned int default_count[] = { + 1000000, + 1000000, + 10000, + 10000, + 1000000, + 10000, +}; + +static __u64 event_res[MAX_COUNTERS][3]; +static __u64 event_scaled[MAX_COUNTERS]; + +static __u64 runtime_nsecs; +static __u64 walltime_nsecs; + +static void create_perfstat_counter(int counter) +{ + struct perf_counter_attr *attr = attrs + counter; + + if (scale) + attr->read_format = PERF_FORMAT_TOTAL_TIME_ENABLED | + PERF_FORMAT_TOTAL_TIME_RUNNING; + + if (system_wide) { + int cpu; + for (cpu = 0; cpu < nr_cpus; cpu ++) { + fd[cpu][counter] = sys_perf_counter_open(attr, -1, cpu, -1, 0); + if (fd[cpu][counter] < 0) { + printf("perfstat error: syscall returned with %d (%s)\n", + fd[cpu][counter], strerror(errno)); + exit(-1); + } + } + } else { + attr->inherit = inherit; + attr->disabled = 1; + + fd[0][counter] = sys_perf_counter_open(attr, 0, -1, -1, 0); + if (fd[0][counter] < 0) { + printf("perfstat error: syscall returned with %d (%s)\n", + fd[0][counter], strerror(errno)); + exit(-1); + } + } +} + +/* + * Does the counter have nsecs as a unit? + */ +static inline int nsec_counter(int counter) +{ + if (attrs[counter].type != PERF_TYPE_SOFTWARE) + return 0; + + if (attrs[counter].config == PERF_COUNT_CPU_CLOCK) + return 1; + + if (attrs[counter].config == PERF_COUNT_TASK_CLOCK) + return 1; + + return 0; +} + +/* + * Read out the results of a single counter: + */ +static void read_counter(int counter) +{ + __u64 *count, single_count[3]; + ssize_t res; + int cpu, nv; + int scaled; + + count = event_res[counter]; + + count[0] = count[1] = count[2] = 0; + + nv = scale ? 3 : 1; + for (cpu = 0; cpu < nr_cpus; cpu ++) { + res = read(fd[cpu][counter], single_count, nv * sizeof(__u64)); + assert(res == nv * sizeof(__u64)); + + count[0] += single_count[0]; + if (scale) { + count[1] += single_count[1]; + count[2] += single_count[2]; + } + } + + scaled = 0; + if (scale) { + if (count[2] == 0) { + event_scaled[counter] = -1; + count[0] = 0; + return; + } + + if (count[2] < count[1]) { + event_scaled[counter] = 1; + count[0] = (unsigned long long) + ((double)count[0] * count[1] / count[2] + 0.5); + } + } + /* + * Save the full runtime - to allow normalization during printout: + */ + if (attrs[counter].type == PERF_TYPE_SOFTWARE && + attrs[counter].config == PERF_COUNT_TASK_CLOCK) + runtime_nsecs = count[0]; +} + +/* + * Print out the results of a single counter: + */ +static void print_counter(int counter) +{ + __u64 *count; + int scaled; + + count = event_res[counter]; + scaled = event_scaled[counter]; + + if (scaled == -1) { + fprintf(stderr, " %14s %-20s\n", + "", event_name(counter)); + return; + } + + if (nsec_counter(counter)) { + double msecs = (double)count[0] / 1000000; + + fprintf(stderr, " %14.6f %-20s", + msecs, event_name(counter)); + if (attrs[counter].type == PERF_TYPE_SOFTWARE && + attrs[counter].config == PERF_COUNT_TASK_CLOCK) { + + fprintf(stderr, " # %11.3f CPU utilization factor", + (double)count[0] / (double)walltime_nsecs); + } + } else { + fprintf(stderr, " %14Ld %-20s", + count[0], event_name(counter)); + if (runtime_nsecs) + fprintf(stderr, " # %11.3f M/sec", + (double)count[0]/runtime_nsecs*1000.0); + } + if (scaled) + fprintf(stderr, " (scaled from %.2f%%)", + (double) count[2] / count[1] * 100); + fprintf(stderr, "\n"); +} + +static int do_perfstat(int argc, const char **argv) +{ + unsigned long long t0, t1; + int counter; + int status; + int pid; + int i; + + if (!system_wide) + nr_cpus = 1; + + for (counter = 0; counter < nr_counters; counter++) + create_perfstat_counter(counter); + + /* + * Enable counters and exec the command: + */ + t0 = rdclock(); + prctl(PR_TASK_PERF_COUNTERS_ENABLE); + + if ((pid = fork()) < 0) + perror("failed to fork"); + + if (!pid) { + if (execvp(argv[0], (char **)argv)) { + perror(argv[0]); + exit(-1); + } + } + + while (wait(&status) >= 0) + ; + + prctl(PR_TASK_PERF_COUNTERS_DISABLE); + t1 = rdclock(); + + walltime_nsecs = t1 - t0; + + fflush(stdout); + + fprintf(stderr, "\n"); + fprintf(stderr, " Performance counter stats for \'%s", argv[0]); + + for (i = 1; i < argc; i++) + fprintf(stderr, " %s", argv[i]); + + fprintf(stderr, "\':\n"); + fprintf(stderr, "\n"); + + for (counter = 0; counter < nr_counters; counter++) + read_counter(counter); + + for (counter = 0; counter < nr_counters; counter++) + print_counter(counter); + + + fprintf(stderr, "\n"); + fprintf(stderr, " Wall-clock time elapsed: %12.6f msecs\n", + (double)(t1-t0)/1e6); + fprintf(stderr, "\n"); + + return 0; +} + +static void skip_signal(int signo) +{ +} + +static const char * const stat_usage[] = { + "perf stat [] ", + NULL +}; + +static const struct option options[] = { + OPT_CALLBACK('e', "event", NULL, "event", + "event selector. use 'perf list' to list available events", + parse_events), + OPT_BOOLEAN('i', "inherit", &inherit, + "child tasks inherit counters"), + OPT_INTEGER('p', "pid", &target_pid, + "stat events on existing pid"), + OPT_BOOLEAN('a', "all-cpus", &system_wide, + "system-wide collection from all CPUs"), + OPT_BOOLEAN('S', "scale", &scale, + "scale/normalize counters"), + OPT_END() +}; + +int cmd_stat(int argc, const char **argv, const char *prefix) +{ + page_size = sysconf(_SC_PAGE_SIZE); + + memcpy(attrs, default_attrs, sizeof(attrs)); + + argc = parse_options(argc, argv, options, stat_usage, 0); + if (!argc) + usage_with_options(stat_usage, options); + + if (!nr_counters) + nr_counters = 8; + + nr_cpus = sysconf(_SC_NPROCESSORS_ONLN); + assert(nr_cpus <= MAX_NR_CPUS); + assert(nr_cpus >= 0); + + /* + * We dont want to block the signals - that would cause + * child tasks to inherit that and Ctrl-C would not work. + * What we want is for Ctrl-C to work in the exec()-ed + * task, but being ignored by perf stat itself: + */ + signal(SIGINT, skip_signal); + signal(SIGALRM, skip_signal); + signal(SIGABRT, skip_signal); + + return do_perfstat(argc, argv); +} diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c new file mode 100644 index 00000000000..f2e7312f85c --- /dev/null +++ b/tools/perf/builtin-top.c @@ -0,0 +1,692 @@ +/* + * builtin-top.c + * + * Builtin top command: Display a continuously updated profile of + * any workload, CPU or specific PID. + * + * Copyright (C) 2008, Red Hat Inc, Ingo Molnar + * + * Improvements and fixes by: + * + * Arjan van de Ven + * Yanmin Zhang + * Wu Fengguang + * Mike Galbraith + * Paul Mackerras + * + * Released under the GPL v2. (and only v2, not any later version) + */ +#include "builtin.h" + +#include "perf.h" + +#include "util/symbol.h" +#include "util/color.h" +#include "util/util.h" +#include "util/rbtree.h" +#include "util/parse-options.h" +#include "util/parse-events.h" + +#include +#include + +#include + +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +static int fd[MAX_NR_CPUS][MAX_COUNTERS]; + +static int system_wide = 0; + +static int default_interval = 100000; + +static __u64 count_filter = 5; +static int print_entries = 15; + +static int target_pid = -1; +static int profile_cpu = -1; +static int nr_cpus = 0; +static unsigned int realtime_prio = 0; +static int group = 0; +static unsigned int page_size; +static unsigned int mmap_pages = 16; +static int freq = 0; + +static char *sym_filter; +static unsigned long filter_start; +static unsigned long filter_end; + +static int delay_secs = 2; +static int zero; +static int dump_symtab; + +/* + * Symbols + */ + +static uint64_t min_ip; +static uint64_t max_ip = -1ll; + +struct sym_entry { + struct rb_node rb_node; + struct list_head node; + unsigned long count[MAX_COUNTERS]; + unsigned long snap_count; + double weight; + int skip; +}; + +struct sym_entry *sym_filter_entry; + +struct dso *kernel_dso; + +/* + * Symbols will be added here in record_ip and will get out + * after decayed. + */ +static LIST_HEAD(active_symbols); +static pthread_mutex_t active_symbols_lock = PTHREAD_MUTEX_INITIALIZER; + +/* + * Ordering weight: count-1 * count-2 * ... / count-n + */ +static double sym_weight(const struct sym_entry *sym) +{ + double weight = sym->snap_count; + int counter; + + for (counter = 1; counter < nr_counters-1; counter++) + weight *= sym->count[counter]; + + weight /= (sym->count[counter] + 1); + + return weight; +} + +static long samples; +static long userspace_samples; +static const char CONSOLE_CLEAR[] = ""; + +static void __list_insert_active_sym(struct sym_entry *syme) +{ + list_add(&syme->node, &active_symbols); +} + +static void list_remove_active_sym(struct sym_entry *syme) +{ + pthread_mutex_lock(&active_symbols_lock); + list_del_init(&syme->node); + pthread_mutex_unlock(&active_symbols_lock); +} + +static void rb_insert_active_sym(struct rb_root *tree, struct sym_entry *se) +{ + struct rb_node **p = &tree->rb_node; + struct rb_node *parent = NULL; + struct sym_entry *iter; + + while (*p != NULL) { + parent = *p; + iter = rb_entry(parent, struct sym_entry, rb_node); + + if (se->weight > iter->weight) + p = &(*p)->rb_left; + else + p = &(*p)->rb_right; + } + + rb_link_node(&se->rb_node, parent, p); + rb_insert_color(&se->rb_node, tree); +} + +static void print_sym_table(void) +{ + int printed = 0, j; + int counter; + float samples_per_sec = samples/delay_secs; + float ksamples_per_sec = (samples-userspace_samples)/delay_secs; + float sum_ksamples = 0.0; + struct sym_entry *syme, *n; + struct rb_root tmp = RB_ROOT; + struct rb_node *nd; + + samples = userspace_samples = 0; + + /* Sort the active symbols */ + pthread_mutex_lock(&active_symbols_lock); + syme = list_entry(active_symbols.next, struct sym_entry, node); + pthread_mutex_unlock(&active_symbols_lock); + + list_for_each_entry_safe_from(syme, n, &active_symbols, node) { + syme->snap_count = syme->count[0]; + if (syme->snap_count != 0) { + syme->weight = sym_weight(syme); + rb_insert_active_sym(&tmp, syme); + sum_ksamples += syme->snap_count; + + for (j = 0; j < nr_counters; j++) + syme->count[j] = zero ? 0 : syme->count[j] * 7 / 8; + } else + list_remove_active_sym(syme); + } + + puts(CONSOLE_CLEAR); + + printf( +"------------------------------------------------------------------------------\n"); + printf( " PerfTop:%8.0f irqs/sec kernel:%4.1f%% [", + samples_per_sec, + 100.0 - (100.0*((samples_per_sec-ksamples_per_sec)/samples_per_sec))); + + if (nr_counters == 1) { + printf("%Ld", attrs[0].sample_period); + if (freq) + printf("Hz "); + else + printf(" "); + } + + for (counter = 0; counter < nr_counters; counter++) { + if (counter) + printf("/"); + + printf("%s", event_name(counter)); + } + + printf( "], "); + + if (target_pid != -1) + printf(" (target_pid: %d", target_pid); + else + printf(" (all"); + + if (profile_cpu != -1) + printf(", cpu: %d)\n", profile_cpu); + else { + if (target_pid != -1) + printf(")\n"); + else + printf(", %d CPUs)\n", nr_cpus); + } + + printf("------------------------------------------------------------------------------\n\n"); + + if (nr_counters == 1) + printf(" samples pcnt"); + else + printf(" weight samples pcnt"); + + printf(" RIP kernel function\n" + " ______ _______ _____ ________________ _______________\n\n" + ); + + for (nd = rb_first(&tmp); nd; nd = rb_next(nd)) { + struct sym_entry *syme = rb_entry(nd, struct sym_entry, rb_node); + struct symbol *sym = (struct symbol *)(syme + 1); + char *color = PERF_COLOR_NORMAL; + double pcnt; + + if (++printed > print_entries || syme->snap_count < count_filter) + continue; + + pcnt = 100.0 - (100.0 * ((sum_ksamples - syme->snap_count) / + sum_ksamples)); + + /* + * We color high-overhead entries in red, low-overhead + * entries in green - and keep the middle ground normal: + */ + if (pcnt >= 5.0) + color = PERF_COLOR_RED; + if (pcnt < 0.5) + color = PERF_COLOR_GREEN; + + if (nr_counters == 1) + printf("%20.2f - ", syme->weight); + else + printf("%9.1f %10ld - ", syme->weight, syme->snap_count); + + color_fprintf(stdout, color, "%4.1f%%", pcnt); + printf(" - %016llx : %s\n", sym->start, sym->name); + } +} + +static void *display_thread(void *arg) +{ + struct pollfd stdin_poll = { .fd = 0, .events = POLLIN }; + int delay_msecs = delay_secs * 1000; + + printf("PerfTop refresh period: %d seconds\n", delay_secs); + + do { + print_sym_table(); + } while (!poll(&stdin_poll, 1, delay_msecs) == 1); + + printf("key pressed - exiting.\n"); + exit(0); + + return NULL; +} + +static int symbol_filter(struct dso *self, struct symbol *sym) +{ + static int filter_match; + struct sym_entry *syme; + const char *name = sym->name; + + if (!strcmp(name, "_text") || + !strcmp(name, "_etext") || + !strcmp(name, "_sinittext") || + !strncmp("init_module", name, 11) || + !strncmp("cleanup_module", name, 14) || + strstr(name, "_text_start") || + strstr(name, "_text_end")) + return 1; + + syme = dso__sym_priv(self, sym); + /* Tag samples to be skipped. */ + if (!strcmp("default_idle", name) || + !strcmp("cpu_idle", name) || + !strcmp("enter_idle", name) || + !strcmp("exit_idle", name) || + !strcmp("mwait_idle", name)) + syme->skip = 1; + + if (filter_match == 1) { + filter_end = sym->start; + filter_match = -1; + if (filter_end - filter_start > 10000) { + fprintf(stderr, + "hm, too large filter symbol <%s> - skipping.\n", + sym_filter); + fprintf(stderr, "symbol filter start: %016lx\n", + filter_start); + fprintf(stderr, " end: %016lx\n", + filter_end); + filter_end = filter_start = 0; + sym_filter = NULL; + sleep(1); + } + } + + if (filter_match == 0 && sym_filter && !strcmp(name, sym_filter)) { + filter_match = 1; + filter_start = sym->start; + } + + + return 0; +} + +static int parse_symbols(void) +{ + struct rb_node *node; + struct symbol *sym; + + kernel_dso = dso__new("[kernel]", sizeof(struct sym_entry)); + if (kernel_dso == NULL) + return -1; + + if (dso__load_kernel(kernel_dso, NULL, symbol_filter, 1) != 0) + goto out_delete_dso; + + node = rb_first(&kernel_dso->syms); + sym = rb_entry(node, struct symbol, rb_node); + min_ip = sym->start; + + node = rb_last(&kernel_dso->syms); + sym = rb_entry(node, struct symbol, rb_node); + max_ip = sym->end; + + if (dump_symtab) + dso__fprintf(kernel_dso, stderr); + + return 0; + +out_delete_dso: + dso__delete(kernel_dso); + kernel_dso = NULL; + return -1; +} + +#define TRACE_COUNT 3 + +/* + * Binary search in the histogram table and record the hit: + */ +static void record_ip(uint64_t ip, int counter) +{ + struct symbol *sym = dso__find_symbol(kernel_dso, ip); + + if (sym != NULL) { + struct sym_entry *syme = dso__sym_priv(kernel_dso, sym); + + if (!syme->skip) { + syme->count[counter]++; + pthread_mutex_lock(&active_symbols_lock); + if (list_empty(&syme->node) || !syme->node.next) + __list_insert_active_sym(syme); + pthread_mutex_unlock(&active_symbols_lock); + return; + } + } + + samples--; +} + +static void process_event(uint64_t ip, int counter) +{ + samples++; + + if (ip < min_ip || ip > max_ip) { + userspace_samples++; + return; + } + + record_ip(ip, counter); +} + +struct mmap_data { + int counter; + void *base; + unsigned int mask; + unsigned int prev; +}; + +static unsigned int mmap_read_head(struct mmap_data *md) +{ + struct perf_counter_mmap_page *pc = md->base; + int head; + + head = pc->data_head; + rmb(); + + return head; +} + +struct timeval last_read, this_read; + +static void mmap_read(struct mmap_data *md) +{ + unsigned int head = mmap_read_head(md); + unsigned int old = md->prev; + unsigned char *data = md->base + page_size; + int diff; + + gettimeofday(&this_read, NULL); + + /* + * If we're further behind than half the buffer, there's a chance + * the writer will bite our tail and mess up the samples under us. + * + * If we somehow ended up ahead of the head, we got messed up. + * + * In either case, truncate and restart at head. + */ + diff = head - old; + if (diff > md->mask / 2 || diff < 0) { + struct timeval iv; + unsigned long msecs; + + timersub(&this_read, &last_read, &iv); + msecs = iv.tv_sec*1000 + iv.tv_usec/1000; + + fprintf(stderr, "WARNING: failed to keep up with mmap data." + " Last read %lu msecs ago.\n", msecs); + + /* + * head points to a known good entry, start there. + */ + old = head; + } + + last_read = this_read; + + for (; old != head;) { + struct ip_event { + struct perf_event_header header; + __u64 ip; + __u32 pid, target_pid; + }; + struct mmap_event { + struct perf_event_header header; + __u32 pid, target_pid; + __u64 start; + __u64 len; + __u64 pgoff; + char filename[PATH_MAX]; + }; + + typedef union event_union { + struct perf_event_header header; + struct ip_event ip; + struct mmap_event mmap; + } event_t; + + event_t *event = (event_t *)&data[old & md->mask]; + + event_t event_copy; + + size_t size = event->header.size; + + /* + * Event straddles the mmap boundary -- header should always + * be inside due to u64 alignment of output. + */ + if ((old & md->mask) + size != ((old + size) & md->mask)) { + unsigned int offset = old; + unsigned int len = min(sizeof(*event), size), cpy; + void *dst = &event_copy; + + do { + cpy = min(md->mask + 1 - (offset & md->mask), len); + memcpy(dst, &data[offset & md->mask], cpy); + offset += cpy; + dst += cpy; + len -= cpy; + } while (len); + + event = &event_copy; + } + + old += size; + + if (event->header.misc & PERF_EVENT_MISC_OVERFLOW) { + if (event->header.type & PERF_SAMPLE_IP) + process_event(event->ip.ip, md->counter); + } + } + + md->prev = old; +} + +static struct pollfd event_array[MAX_NR_CPUS * MAX_COUNTERS]; +static struct mmap_data mmap_array[MAX_NR_CPUS][MAX_COUNTERS]; + +static int __cmd_top(void) +{ + struct perf_counter_attr *attr; + pthread_t thread; + int i, counter, group_fd, nr_poll = 0; + unsigned int cpu; + int ret; + + for (i = 0; i < nr_cpus; i++) { + group_fd = -1; + for (counter = 0; counter < nr_counters; counter++) { + + cpu = profile_cpu; + if (target_pid == -1 && profile_cpu == -1) + cpu = i; + + attr = attrs + counter; + + attr->sample_type = PERF_SAMPLE_IP | PERF_SAMPLE_TID; + attr->freq = freq; + + fd[i][counter] = sys_perf_counter_open(attr, target_pid, cpu, group_fd, 0); + if (fd[i][counter] < 0) { + int err = errno; + + error("syscall returned with %d (%s)\n", + fd[i][counter], strerror(err)); + if (err == EPERM) + printf("Are you root?\n"); + exit(-1); + } + assert(fd[i][counter] >= 0); + fcntl(fd[i][counter], F_SETFL, O_NONBLOCK); + + /* + * First counter acts as the group leader: + */ + if (group && group_fd == -1) + group_fd = fd[i][counter]; + + event_array[nr_poll].fd = fd[i][counter]; + event_array[nr_poll].events = POLLIN; + nr_poll++; + + mmap_array[i][counter].counter = counter; + mmap_array[i][counter].prev = 0; + mmap_array[i][counter].mask = mmap_pages*page_size - 1; + mmap_array[i][counter].base = mmap(NULL, (mmap_pages+1)*page_size, + PROT_READ, MAP_SHARED, fd[i][counter], 0); + if (mmap_array[i][counter].base == MAP_FAILED) + die("failed to mmap with %d (%s)\n", errno, strerror(errno)); + } + } + + if (pthread_create(&thread, NULL, display_thread, NULL)) { + printf("Could not create display thread.\n"); + exit(-1); + } + + if (realtime_prio) { + struct sched_param param; + + param.sched_priority = realtime_prio; + if (sched_setscheduler(0, SCHED_FIFO, ¶m)) { + printf("Could not set realtime priority.\n"); + exit(-1); + } + } + + while (1) { + int hits = samples; + + for (i = 0; i < nr_cpus; i++) { + for (counter = 0; counter < nr_counters; counter++) + mmap_read(&mmap_array[i][counter]); + } + + if (hits == samples) + ret = poll(event_array, nr_poll, 100); + } + + return 0; +} + +static const char * const top_usage[] = { + "perf top []", + NULL +}; + +static const struct option options[] = { + OPT_CALLBACK('e', "event", NULL, "event", + "event selector. use 'perf list' to list available events", + parse_events), + OPT_INTEGER('c', "count", &default_interval, + "event period to sample"), + OPT_INTEGER('p', "pid", &target_pid, + "profile events on existing pid"), + OPT_BOOLEAN('a', "all-cpus", &system_wide, + "system-wide collection from all CPUs"), + OPT_INTEGER('C', "CPU", &profile_cpu, + "CPU to profile on"), + OPT_INTEGER('m', "mmap-pages", &mmap_pages, + "number of mmap data pages"), + OPT_INTEGER('r', "realtime", &realtime_prio, + "collect data with this RT SCHED_FIFO priority"), + OPT_INTEGER('d', "delay", &delay_secs, + "number of seconds to delay between refreshes"), + OPT_BOOLEAN('D', "dump-symtab", &dump_symtab, + "dump the symbol table used for profiling"), + OPT_INTEGER('f', "count-filter", &count_filter, + "only display functions with more events than this"), + OPT_BOOLEAN('g', "group", &group, + "put the counters into a counter group"), + OPT_STRING('s', "sym-filter", &sym_filter, "pattern", + "only display symbols matchig this pattern"), + OPT_BOOLEAN('z', "zero", &group, + "zero history across updates"), + OPT_INTEGER('F', "freq", &freq, + "profile at this frequency"), + OPT_INTEGER('E', "entries", &print_entries, + "display this many functions"), + OPT_END() +}; + +int cmd_top(int argc, const char **argv, const char *prefix) +{ + int counter; + + page_size = sysconf(_SC_PAGE_SIZE); + + argc = parse_options(argc, argv, options, top_usage, 0); + if (argc) + usage_with_options(top_usage, options); + + if (freq) { + default_interval = freq; + freq = 1; + } + + /* CPU and PID are mutually exclusive */ + if (target_pid != -1 && profile_cpu != -1) { + printf("WARNING: PID switch overriding CPU\n"); + sleep(1); + profile_cpu = -1; + } + + if (!nr_counters) + nr_counters = 1; + + if (delay_secs < 1) + delay_secs = 1; + + parse_symbols(); + + /* + * Fill in the ones not specifically initialized via -c: + */ + for (counter = 0; counter < nr_counters; counter++) { + if (attrs[counter].sample_period) + continue; + + attrs[counter].sample_period = default_interval; + } + + nr_cpus = sysconf(_SC_NPROCESSORS_ONLN); + assert(nr_cpus <= MAX_NR_CPUS); + assert(nr_cpus >= 0); + + if (target_pid != -1 || profile_cpu != -1) + nr_cpus = 1; + + return __cmd_top(); +} diff --git a/tools/perf/builtin.h b/tools/perf/builtin.h new file mode 100644 index 00000000000..51d168230ee --- /dev/null +++ b/tools/perf/builtin.h @@ -0,0 +1,26 @@ +#ifndef BUILTIN_H +#define BUILTIN_H + +#include "util/util.h" +#include "util/strbuf.h" + +extern const char perf_version_string[]; +extern const char perf_usage_string[]; +extern const char perf_more_info_string[]; + +extern void list_common_cmds_help(void); +extern const char *help_unknown_cmd(const char *cmd); +extern void prune_packed_objects(int); +extern int read_line_with_nul(char *buf, int size, FILE *file); +extern int check_pager_config(const char *cmd); + +extern int cmd_annotate(int argc, const char **argv, const char *prefix); +extern int cmd_help(int argc, const char **argv, const char *prefix); +extern int cmd_record(int argc, const char **argv, const char *prefix); +extern int cmd_report(int argc, const char **argv, const char *prefix); +extern int cmd_stat(int argc, const char **argv, const char *prefix); +extern int cmd_top(int argc, const char **argv, const char *prefix); +extern int cmd_version(int argc, const char **argv, const char *prefix); +extern int cmd_list(int argc, const char **argv, const char *prefix); + +#endif diff --git a/tools/perf/command-list.txt b/tools/perf/command-list.txt new file mode 100644 index 00000000000..eebce30afbc --- /dev/null +++ b/tools/perf/command-list.txt @@ -0,0 +1,10 @@ +# +# List of known perf commands. +# command name category [deprecated] [common] +# +perf-annotate mainporcelain common +perf-list mainporcelain common +perf-record mainporcelain common +perf-report mainporcelain common +perf-stat mainporcelain common +perf-top mainporcelain common diff --git a/tools/perf/design.txt b/tools/perf/design.txt new file mode 100644 index 00000000000..d3250763dc9 --- /dev/null +++ b/tools/perf/design.txt @@ -0,0 +1,442 @@ + +Performance Counters for Linux +------------------------------ + +Performance counters are special hardware registers available on most modern +CPUs. These registers count the number of certain types of hw events: such +as instructions executed, cachemisses suffered, or branches mis-predicted - +without slowing down the kernel or applications. These registers can also +trigger interrupts when a threshold number of events have passed - and can +thus be used to profile the code that runs on that CPU. + +The Linux Performance Counter subsystem provides an abstraction of these +hardware capabilities. It provides per task and per CPU counters, counter +groups, and it provides event capabilities on top of those. It +provides "virtual" 64-bit counters, regardless of the width of the +underlying hardware counters. + +Performance counters are accessed via special file descriptors. +There's one file descriptor per virtual counter used. + +The special file descriptor is opened via the perf_counter_open() +system call: + + int sys_perf_counter_open(struct perf_counter_hw_event *hw_event_uptr, + pid_t pid, int cpu, int group_fd, + unsigned long flags); + +The syscall returns the new fd. The fd can be used via the normal +VFS system calls: read() can be used to read the counter, fcntl() +can be used to set the blocking mode, etc. + +Multiple counters can be kept open at a time, and the counters +can be poll()ed. + +When creating a new counter fd, 'perf_counter_hw_event' is: + +struct perf_counter_hw_event { + /* + * The MSB of the config word signifies if the rest contains cpu + * specific (raw) counter configuration data, if unset, the next + * 7 bits are an event type and the rest of the bits are the event + * identifier. + */ + __u64 config; + + __u64 irq_period; + __u32 record_type; + __u32 read_format; + + __u64 disabled : 1, /* off by default */ + inherit : 1, /* children inherit it */ + pinned : 1, /* must always be on PMU */ + exclusive : 1, /* only group on PMU */ + exclude_user : 1, /* don't count user */ + exclude_kernel : 1, /* ditto kernel */ + exclude_hv : 1, /* ditto hypervisor */ + exclude_idle : 1, /* don't count when idle */ + mmap : 1, /* include mmap data */ + munmap : 1, /* include munmap data */ + comm : 1, /* include comm data */ + + __reserved_1 : 52; + + __u32 extra_config_len; + __u32 wakeup_events; /* wakeup every n events */ + + __u64 __reserved_2; + __u64 __reserved_3; +}; + +The 'config' field specifies what the counter should count. It +is divided into 3 bit-fields: + +raw_type: 1 bit (most significant bit) 0x8000_0000_0000_0000 +type: 7 bits (next most significant) 0x7f00_0000_0000_0000 +event_id: 56 bits (least significant) 0x00ff_ffff_ffff_ffff + +If 'raw_type' is 1, then the counter will count a hardware event +specified by the remaining 63 bits of event_config. The encoding is +machine-specific. + +If 'raw_type' is 0, then the 'type' field says what kind of counter +this is, with the following encoding: + +enum perf_event_types { + PERF_TYPE_HARDWARE = 0, + PERF_TYPE_SOFTWARE = 1, + PERF_TYPE_TRACEPOINT = 2, +}; + +A counter of PERF_TYPE_HARDWARE will count the hardware event +specified by 'event_id': + +/* + * Generalized performance counter event types, used by the hw_event.event_id + * parameter of the sys_perf_counter_open() syscall: + */ +enum hw_event_ids { + /* + * Common hardware events, generalized by the kernel: + */ + PERF_COUNT_CPU_CYCLES = 0, + PERF_COUNT_INSTRUCTIONS = 1, + PERF_COUNT_CACHE_REFERENCES = 2, + PERF_COUNT_CACHE_MISSES = 3, + PERF_COUNT_BRANCH_INSTRUCTIONS = 4, + PERF_COUNT_BRANCH_MISSES = 5, + PERF_COUNT_BUS_CYCLES = 6, +}; + +These are standardized types of events that work relatively uniformly +on all CPUs that implement Performance Counters support under Linux, +although there may be variations (e.g., different CPUs might count +cache references and misses at different levels of the cache hierarchy). +If a CPU is not able to count the selected event, then the system call +will return -EINVAL. + +More hw_event_types are supported as well, but they are CPU-specific +and accessed as raw events. For example, to count "External bus +cycles while bus lock signal asserted" events on Intel Core CPUs, pass +in a 0x4064 event_id value and set hw_event.raw_type to 1. + +A counter of type PERF_TYPE_SOFTWARE will count one of the available +software events, selected by 'event_id': + +/* + * Special "software" counters provided by the kernel, even if the hardware + * does not support performance counters. These counters measure various + * physical and sw events of the kernel (and allow the profiling of them as + * well): + */ +enum sw_event_ids { + PERF_COUNT_CPU_CLOCK = 0, + PERF_COUNT_TASK_CLOCK = 1, + PERF_COUNT_PAGE_FAULTS = 2, + PERF_COUNT_CONTEXT_SWITCHES = 3, + PERF_COUNT_CPU_MIGRATIONS = 4, + PERF_COUNT_PAGE_FAULTS_MIN = 5, + PERF_COUNT_PAGE_FAULTS_MAJ = 6, +}; + +Counters of the type PERF_TYPE_TRACEPOINT are available when the ftrace event +tracer is available, and event_id values can be obtained from +/debug/tracing/events/*/*/id + + +Counters come in two flavours: counting counters and sampling +counters. A "counting" counter is one that is used for counting the +number of events that occur, and is characterised by having +irq_period = 0. + + +A read() on a counter returns the current value of the counter and possible +additional values as specified by 'read_format', each value is a u64 (8 bytes) +in size. + +/* + * Bits that can be set in hw_event.read_format to request that + * reads on the counter should return the indicated quantities, + * in increasing order of bit value, after the counter value. + */ +enum perf_counter_read_format { + PERF_FORMAT_TOTAL_TIME_ENABLED = 1, + PERF_FORMAT_TOTAL_TIME_RUNNING = 2, +}; + +Using these additional values one can establish the overcommit ratio for a +particular counter allowing one to take the round-robin scheduling effect +into account. + + +A "sampling" counter is one that is set up to generate an interrupt +every N events, where N is given by 'irq_period'. A sampling counter +has irq_period > 0. The record_type controls what data is recorded on each +interrupt: + +/* + * Bits that can be set in hw_event.record_type to request information + * in the overflow packets. + */ +enum perf_counter_record_format { + PERF_RECORD_IP = 1U << 0, + PERF_RECORD_TID = 1U << 1, + PERF_RECORD_TIME = 1U << 2, + PERF_RECORD_ADDR = 1U << 3, + PERF_RECORD_GROUP = 1U << 4, + PERF_RECORD_CALLCHAIN = 1U << 5, +}; + +Such (and other) events will be recorded in a ring-buffer, which is +available to user-space using mmap() (see below). + +The 'disabled' bit specifies whether the counter starts out disabled +or enabled. If it is initially disabled, it can be enabled by ioctl +or prctl (see below). + +The 'inherit' bit, if set, specifies that this counter should count +events on descendant tasks as well as the task specified. This only +applies to new descendents, not to any existing descendents at the +time the counter is created (nor to any new descendents of existing +descendents). + +The 'pinned' bit, if set, specifies that the counter should always be +on the CPU if at all possible. It only applies to hardware counters +and only to group leaders. If a pinned counter cannot be put onto the +CPU (e.g. because there are not enough hardware counters or because of +a conflict with some other event), then the counter goes into an +'error' state, where reads return end-of-file (i.e. read() returns 0) +until the counter is subsequently enabled or disabled. + +The 'exclusive' bit, if set, specifies that when this counter's group +is on the CPU, it should be the only group using the CPU's counters. +In future, this will allow sophisticated monitoring programs to supply +extra configuration information via 'extra_config_len' to exploit +advanced features of the CPU's Performance Monitor Unit (PMU) that are +not otherwise accessible and that might disrupt other hardware +counters. + +The 'exclude_user', 'exclude_kernel' and 'exclude_hv' bits provide a +way to request that counting of events be restricted to times when the +CPU is in user, kernel and/or hypervisor mode. + +The 'mmap' and 'munmap' bits allow recording of PROT_EXEC mmap/munmap +operations, these can be used to relate userspace IP addresses to actual +code, even after the mapping (or even the whole process) is gone, +these events are recorded in the ring-buffer (see below). + +The 'comm' bit allows tracking of process comm data on process creation. +This too is recorded in the ring-buffer (see below). + +The 'pid' parameter to the perf_counter_open() system call allows the +counter to be specific to a task: + + pid == 0: if the pid parameter is zero, the counter is attached to the + current task. + + pid > 0: the counter is attached to a specific task (if the current task + has sufficient privilege to do so) + + pid < 0: all tasks are counted (per cpu counters) + +The 'cpu' parameter allows a counter to be made specific to a CPU: + + cpu >= 0: the counter is restricted to a specific CPU + cpu == -1: the counter counts on all CPUs + +(Note: the combination of 'pid == -1' and 'cpu == -1' is not valid.) + +A 'pid > 0' and 'cpu == -1' counter is a per task counter that counts +events of that task and 'follows' that task to whatever CPU the task +gets schedule to. Per task counters can be created by any user, for +their own tasks. + +A 'pid == -1' and 'cpu == x' counter is a per CPU counter that counts +all events on CPU-x. Per CPU counters need CAP_SYS_ADMIN privilege. + +The 'flags' parameter is currently unused and must be zero. + +The 'group_fd' parameter allows counter "groups" to be set up. A +counter group has one counter which is the group "leader". The leader +is created first, with group_fd = -1 in the perf_counter_open call +that creates it. The rest of the group members are created +subsequently, with group_fd giving the fd of the group leader. +(A single counter on its own is created with group_fd = -1 and is +considered to be a group with only 1 member.) + +A counter group is scheduled onto the CPU as a unit, that is, it will +only be put onto the CPU if all of the counters in the group can be +put onto the CPU. This means that the values of the member counters +can be meaningfully compared, added, divided (to get ratios), etc., +with each other, since they have counted events for the same set of +executed instructions. + + +Like stated, asynchronous events, like counter overflow or PROT_EXEC mmap +tracking are logged into a ring-buffer. This ring-buffer is created and +accessed through mmap(). + +The mmap size should be 1+2^n pages, where the first page is a meta-data page +(struct perf_counter_mmap_page) that contains various bits of information such +as where the ring-buffer head is. + +/* + * Structure of the page that can be mapped via mmap + */ +struct perf_counter_mmap_page { + __u32 version; /* version number of this structure */ + __u32 compat_version; /* lowest version this is compat with */ + + /* + * Bits needed to read the hw counters in user-space. + * + * u32 seq; + * s64 count; + * + * do { + * seq = pc->lock; + * + * barrier() + * if (pc->index) { + * count = pmc_read(pc->index - 1); + * count += pc->offset; + * } else + * goto regular_read; + * + * barrier(); + * } while (pc->lock != seq); + * + * NOTE: for obvious reason this only works on self-monitoring + * processes. + */ + __u32 lock; /* seqlock for synchronization */ + __u32 index; /* hardware counter identifier */ + __s64 offset; /* add to hardware counter value */ + + /* + * Control data for the mmap() data buffer. + * + * User-space reading this value should issue an rmb(), on SMP capable + * platforms, after reading this value -- see perf_counter_wakeup(). + */ + __u32 data_head; /* head in the data section */ +}; + +NOTE: the hw-counter userspace bits are arch specific and are currently only + implemented on powerpc. + +The following 2^n pages are the ring-buffer which contains events of the form: + +#define PERF_EVENT_MISC_KERNEL (1 << 0) +#define PERF_EVENT_MISC_USER (1 << 1) +#define PERF_EVENT_MISC_OVERFLOW (1 << 2) + +struct perf_event_header { + __u32 type; + __u16 misc; + __u16 size; +}; + +enum perf_event_type { + + /* + * The MMAP events record the PROT_EXEC mappings so that we can + * correlate userspace IPs to code. They have the following structure: + * + * struct { + * struct perf_event_header header; + * + * u32 pid, tid; + * u64 addr; + * u64 len; + * u64 pgoff; + * char filename[]; + * }; + */ + PERF_EVENT_MMAP = 1, + PERF_EVENT_MUNMAP = 2, + + /* + * struct { + * struct perf_event_header header; + * + * u32 pid, tid; + * char comm[]; + * }; + */ + PERF_EVENT_COMM = 3, + + /* + * When header.misc & PERF_EVENT_MISC_OVERFLOW the event_type field + * will be PERF_RECORD_* + * + * struct { + * struct perf_event_header header; + * + * { u64 ip; } && PERF_RECORD_IP + * { u32 pid, tid; } && PERF_RECORD_TID + * { u64 time; } && PERF_RECORD_TIME + * { u64 addr; } && PERF_RECORD_ADDR + * + * { u64 nr; + * { u64 event, val; } cnt[nr]; } && PERF_RECORD_GROUP + * + * { u16 nr, + * hv, + * kernel, + * user; + * u64 ips[nr]; } && PERF_RECORD_CALLCHAIN + * }; + */ +}; + +NOTE: PERF_RECORD_CALLCHAIN is arch specific and currently only implemented + on x86. + +Notification of new events is possible through poll()/select()/epoll() and +fcntl() managing signals. + +Normally a notification is generated for every page filled, however one can +additionally set perf_counter_hw_event.wakeup_events to generate one every +so many counter overflow events. + +Future work will include a splice() interface to the ring-buffer. + + +Counters can be enabled and disabled in two ways: via ioctl and via +prctl. When a counter is disabled, it doesn't count or generate +events but does continue to exist and maintain its count value. + +An individual counter or counter group can be enabled with + + ioctl(fd, PERF_COUNTER_IOC_ENABLE); + +or disabled with + + ioctl(fd, PERF_COUNTER_IOC_DISABLE); + +Enabling or disabling the leader of a group enables or disables the +whole group; that is, while the group leader is disabled, none of the +counters in the group will count. Enabling or disabling a member of a +group other than the leader only affects that counter - disabling an +non-leader stops that counter from counting but doesn't affect any +other counter. + +Additionally, non-inherited overflow counters can use + + ioctl(fd, PERF_COUNTER_IOC_REFRESH, nr); + +to enable a counter for 'nr' events, after which it gets disabled again. + +A process can enable or disable all the counter groups that are +attached to it, using prctl: + + prctl(PR_TASK_PERF_COUNTERS_ENABLE); + + prctl(PR_TASK_PERF_COUNTERS_DISABLE); + +This applies to all counters on the current process, whether created +by this process or by another, and doesn't affect any counters that +this process has created on other processes. It only enables or +disables the group leaders, not any other members in the groups. + diff --git a/tools/perf/perf.c b/tools/perf/perf.c new file mode 100644 index 00000000000..4eb72593370 --- /dev/null +++ b/tools/perf/perf.c @@ -0,0 +1,428 @@ +/* + * perf.c + * + * Performance analysis utility. + * + * This is the main hub from which the sub-commands (perf stat, + * perf top, perf record, perf report, etc.) are started. + */ +#include "builtin.h" + +#include "util/exec_cmd.h" +#include "util/cache.h" +#include "util/quote.h" +#include "util/run-command.h" + +const char perf_usage_string[] = + "perf [--version] [--help] COMMAND [ARGS]"; + +const char perf_more_info_string[] = + "See 'perf help COMMAND' for more information on a specific command."; + +static int use_pager = -1; +struct pager_config { + const char *cmd; + int val; +}; + +static int pager_command_config(const char *var, const char *value, void *data) +{ + struct pager_config *c = data; + if (!prefixcmp(var, "pager.") && !strcmp(var + 6, c->cmd)) + c->val = perf_config_bool(var, value); + return 0; +} + +/* returns 0 for "no pager", 1 for "use pager", and -1 for "not specified" */ +int check_pager_config(const char *cmd) +{ + struct pager_config c; + c.cmd = cmd; + c.val = -1; + perf_config(pager_command_config, &c); + return c.val; +} + +static void commit_pager_choice(void) { + switch (use_pager) { + case 0: + setenv("PERF_PAGER", "cat", 1); + break; + case 1: + /* setup_pager(); */ + break; + default: + break; + } +} + +static int handle_options(const char*** argv, int* argc, int* envchanged) +{ + int handled = 0; + + while (*argc > 0) { + const char *cmd = (*argv)[0]; + if (cmd[0] != '-') + break; + + /* + * For legacy reasons, the "version" and "help" + * commands can be written with "--" prepended + * to make them look like flags. + */ + if (!strcmp(cmd, "--help") || !strcmp(cmd, "--version")) + break; + + /* + * Check remaining flags. + */ + if (!prefixcmp(cmd, "--exec-path")) { + cmd += 11; + if (*cmd == '=') + perf_set_argv_exec_path(cmd + 1); + else { + puts(perf_exec_path()); + exit(0); + } + } else if (!strcmp(cmd, "--html-path")) { + puts(system_path(PERF_HTML_PATH)); + exit(0); + } else if (!strcmp(cmd, "-p") || !strcmp(cmd, "--paginate")) { + use_pager = 1; + } else if (!strcmp(cmd, "--no-pager")) { + use_pager = 0; + if (envchanged) + *envchanged = 1; + } else if (!strcmp(cmd, "--perf-dir")) { + if (*argc < 2) { + fprintf(stderr, "No directory given for --perf-dir.\n" ); + usage(perf_usage_string); + } + setenv(PERF_DIR_ENVIRONMENT, (*argv)[1], 1); + if (envchanged) + *envchanged = 1; + (*argv)++; + (*argc)--; + handled++; + } else if (!prefixcmp(cmd, "--perf-dir=")) { + setenv(PERF_DIR_ENVIRONMENT, cmd + 10, 1); + if (envchanged) + *envchanged = 1; + } else if (!strcmp(cmd, "--work-tree")) { + if (*argc < 2) { + fprintf(stderr, "No directory given for --work-tree.\n" ); + usage(perf_usage_string); + } + setenv(PERF_WORK_TREE_ENVIRONMENT, (*argv)[1], 1); + if (envchanged) + *envchanged = 1; + (*argv)++; + (*argc)--; + } else if (!prefixcmp(cmd, "--work-tree=")) { + setenv(PERF_WORK_TREE_ENVIRONMENT, cmd + 12, 1); + if (envchanged) + *envchanged = 1; + } else { + fprintf(stderr, "Unknown option: %s\n", cmd); + usage(perf_usage_string); + } + + (*argv)++; + (*argc)--; + handled++; + } + return handled; +} + +static int handle_alias(int *argcp, const char ***argv) +{ + int envchanged = 0, ret = 0, saved_errno = errno; + int count, option_count; + const char** new_argv; + const char *alias_command; + char *alias_string; + + alias_command = (*argv)[0]; + alias_string = alias_lookup(alias_command); + if (alias_string) { + if (alias_string[0] == '!') { + if (*argcp > 1) { + struct strbuf buf; + + strbuf_init(&buf, PATH_MAX); + strbuf_addstr(&buf, alias_string); + sq_quote_argv(&buf, (*argv) + 1, PATH_MAX); + free(alias_string); + alias_string = buf.buf; + } + ret = system(alias_string + 1); + if (ret >= 0 && WIFEXITED(ret) && + WEXITSTATUS(ret) != 127) + exit(WEXITSTATUS(ret)); + die("Failed to run '%s' when expanding alias '%s'", + alias_string + 1, alias_command); + } + count = split_cmdline(alias_string, &new_argv); + if (count < 0) + die("Bad alias.%s string", alias_command); + option_count = handle_options(&new_argv, &count, &envchanged); + if (envchanged) + die("alias '%s' changes environment variables\n" + "You can use '!perf' in the alias to do this.", + alias_command); + memmove(new_argv - option_count, new_argv, + count * sizeof(char *)); + new_argv -= option_count; + + if (count < 1) + die("empty alias for %s", alias_command); + + if (!strcmp(alias_command, new_argv[0])) + die("recursive alias: %s", alias_command); + + new_argv = realloc(new_argv, sizeof(char*) * + (count + *argcp + 1)); + /* insert after command name */ + memcpy(new_argv + count, *argv + 1, sizeof(char*) * *argcp); + new_argv[count+*argcp] = NULL; + + *argv = new_argv; + *argcp += count - 1; + + ret = 1; + } + + errno = saved_errno; + + return ret; +} + +const char perf_version_string[] = PERF_VERSION; + +#define RUN_SETUP (1<<0) +#define USE_PAGER (1<<1) +/* + * require working tree to be present -- anything uses this needs + * RUN_SETUP for reading from the configuration file. + */ +#define NEED_WORK_TREE (1<<2) + +struct cmd_struct { + const char *cmd; + int (*fn)(int, const char **, const char *); + int option; +}; + +static int run_builtin(struct cmd_struct *p, int argc, const char **argv) +{ + int status; + struct stat st; + const char *prefix; + + prefix = NULL; + if (p->option & RUN_SETUP) + prefix = NULL; /* setup_perf_directory(); */ + + if (use_pager == -1 && p->option & RUN_SETUP) + use_pager = check_pager_config(p->cmd); + if (use_pager == -1 && p->option & USE_PAGER) + use_pager = 1; + commit_pager_choice(); + + if (p->option & NEED_WORK_TREE) + /* setup_work_tree() */; + + status = p->fn(argc, argv, prefix); + if (status) + return status & 0xff; + + /* Somebody closed stdout? */ + if (fstat(fileno(stdout), &st)) + return 0; + /* Ignore write errors for pipes and sockets.. */ + if (S_ISFIFO(st.st_mode) || S_ISSOCK(st.st_mode)) + return 0; + + /* Check for ENOSPC and EIO errors.. */ + if (fflush(stdout)) + die("write failure on standard output: %s", strerror(errno)); + if (ferror(stdout)) + die("unknown write failure on standard output"); + if (fclose(stdout)) + die("close failed on standard output: %s", strerror(errno)); + return 0; +} + +static void handle_internal_command(int argc, const char **argv) +{ + const char *cmd = argv[0]; + static struct cmd_struct commands[] = { + { "help", cmd_help, 0 }, + { "list", cmd_list, 0 }, + { "record", cmd_record, 0 }, + { "report", cmd_report, 0 }, + { "stat", cmd_stat, 0 }, + { "top", cmd_top, 0 }, + { "annotate", cmd_annotate, 0 }, + { "version", cmd_version, 0 }, + }; + int i; + static const char ext[] = STRIP_EXTENSION; + + if (sizeof(ext) > 1) { + i = strlen(argv[0]) - strlen(ext); + if (i > 0 && !strcmp(argv[0] + i, ext)) { + char *argv0 = strdup(argv[0]); + argv[0] = cmd = argv0; + argv0[i] = '\0'; + } + } + + /* Turn "perf cmd --help" into "perf help cmd" */ + if (argc > 1 && !strcmp(argv[1], "--help")) { + argv[1] = argv[0]; + argv[0] = cmd = "help"; + } + + for (i = 0; i < ARRAY_SIZE(commands); i++) { + struct cmd_struct *p = commands+i; + if (strcmp(p->cmd, cmd)) + continue; + exit(run_builtin(p, argc, argv)); + } +} + +static void execv_dashed_external(const char **argv) +{ + struct strbuf cmd = STRBUF_INIT; + const char *tmp; + int status; + + strbuf_addf(&cmd, "perf-%s", argv[0]); + + /* + * argv[0] must be the perf command, but the argv array + * belongs to the caller, and may be reused in + * subsequent loop iterations. Save argv[0] and + * restore it on error. + */ + tmp = argv[0]; + argv[0] = cmd.buf; + + /* + * if we fail because the command is not found, it is + * OK to return. Otherwise, we just pass along the status code. + */ + status = run_command_v_opt(argv, 0); + if (status != -ERR_RUN_COMMAND_EXEC) { + if (IS_RUN_COMMAND_ERR(status)) + die("unable to run '%s'", argv[0]); + exit(-status); + } + errno = ENOENT; /* as if we called execvp */ + + argv[0] = tmp; + + strbuf_release(&cmd); +} + +static int run_argv(int *argcp, const char ***argv) +{ + int done_alias = 0; + + while (1) { + /* See if it's an internal command */ + handle_internal_command(*argcp, *argv); + + /* .. then try the external ones */ + execv_dashed_external(*argv); + + /* It could be an alias -- this works around the insanity + * of overriding "perf log" with "perf show" by having + * alias.log = show + */ + if (done_alias || !handle_alias(argcp, argv)) + break; + done_alias = 1; + } + + return done_alias; +} + + +int main(int argc, const char **argv) +{ + const char *cmd; + + cmd = perf_extract_argv0_path(argv[0]); + if (!cmd) + cmd = "perf-help"; + + /* + * "perf-xxxx" is the same as "perf xxxx", but we obviously: + * + * - cannot take flags in between the "perf" and the "xxxx". + * - cannot execute it externally (since it would just do + * the same thing over again) + * + * So we just directly call the internal command handler, and + * die if that one cannot handle it. + */ + if (!prefixcmp(cmd, "perf-")) { + cmd += 5; + argv[0] = cmd; + handle_internal_command(argc, argv); + die("cannot handle %s internally", cmd); + } + + /* Look for flags.. */ + argv++; + argc--; + handle_options(&argv, &argc, NULL); + commit_pager_choice(); + if (argc > 0) { + if (!prefixcmp(argv[0], "--")) + argv[0] += 2; + } else { + /* The user didn't specify a command; give them help */ + printf("\n usage: %s\n\n", perf_usage_string); + list_common_cmds_help(); + printf("\n %s\n\n", perf_more_info_string); + exit(1); + } + cmd = argv[0]; + + /* + * We use PATH to find perf commands, but we prepend some higher + * precidence paths: the "--exec-path" option, the PERF_EXEC_PATH + * environment, and the $(perfexecdir) from the Makefile at build + * time. + */ + setup_path(); + + while (1) { + static int done_help = 0; + static int was_alias = 0; + + was_alias = run_argv(&argc, &argv); + if (errno != ENOENT) + break; + + if (was_alias) { + fprintf(stderr, "Expansion of alias '%s' failed; " + "'%s' is not a perf-command\n", + cmd, argv[0]); + exit(1); + } + if (!done_help) { + cmd = argv[0] = help_unknown_cmd(cmd); + done_help = 1; + } else + break; + } + + fprintf(stderr, "Failed to run command '%s': %s\n", + cmd, strerror(errno)); + + return 1; +} diff --git a/tools/perf/perf.h b/tools/perf/perf.h new file mode 100644 index 00000000000..af0a5046d74 --- /dev/null +++ b/tools/perf/perf.h @@ -0,0 +1,67 @@ +#ifndef _PERF_PERF_H +#define _PERF_PERF_H + +#if defined(__x86_64__) || defined(__i386__) +#include "../../arch/x86/include/asm/unistd.h" +#define rmb() asm volatile("lfence" ::: "memory") +#define cpu_relax() asm volatile("rep; nop" ::: "memory"); +#endif + +#ifdef __powerpc__ +#include "../../arch/powerpc/include/asm/unistd.h" +#define rmb() asm volatile ("sync" ::: "memory") +#define cpu_relax() asm volatile ("" ::: "memory"); +#endif + +#include +#include +#include +#include + +#include "../../include/linux/perf_counter.h" + +/* + * prctl(PR_TASK_PERF_COUNTERS_DISABLE) will (cheaply) disable all + * counters in the current task. + */ +#define PR_TASK_PERF_COUNTERS_DISABLE 31 +#define PR_TASK_PERF_COUNTERS_ENABLE 32 + +#ifndef NSEC_PER_SEC +# define NSEC_PER_SEC 1000000000ULL +#endif + +static inline unsigned long long rdclock(void) +{ + struct timespec ts; + + clock_gettime(CLOCK_MONOTONIC, &ts); + return ts.tv_sec * 1000000000ULL + ts.tv_nsec; +} + +/* + * Pick up some kernel type conventions: + */ +#define __user +#define asmlinkage + +#define unlikely(x) __builtin_expect(!!(x), 0) +#define min(x, y) ({ \ + typeof(x) _min1 = (x); \ + typeof(y) _min2 = (y); \ + (void) (&_min1 == &_min2); \ + _min1 < _min2 ? _min1 : _min2; }) + +static inline int +sys_perf_counter_open(struct perf_counter_attr *attr_uptr, + pid_t pid, int cpu, int group_fd, + unsigned long flags) +{ + return syscall(__NR_perf_counter_open, attr_uptr, pid, cpu, + group_fd, flags); +} + +#define MAX_COUNTERS 256 +#define MAX_NR_CPUS 256 + +#endif diff --git a/tools/perf/util/PERF-VERSION-GEN b/tools/perf/util/PERF-VERSION-GEN new file mode 100755 index 00000000000..c561d1538c0 --- /dev/null +++ b/tools/perf/util/PERF-VERSION-GEN @@ -0,0 +1,42 @@ +#!/bin/sh + +GVF=PERF-VERSION-FILE +DEF_VER=v0.0.1.PERF + +LF=' +' + +# First see if there is a version file (included in release tarballs), +# then try git-describe, then default. +if test -f version +then + VN=$(cat version) || VN="$DEF_VER" +elif test -d .git -o -f .git && + VN=$(git describe --abbrev=4 HEAD 2>/dev/null) && + case "$VN" in + *$LF*) (exit 1) ;; + v[0-9]*) + git update-index -q --refresh + test -z "$(git diff-index --name-only HEAD --)" || + VN="$VN-dirty" ;; + esac +then + VN=$(echo "$VN" | sed -e 's/-/./g'); +else + VN="$DEF_VER" +fi + +VN=$(expr "$VN" : v*'\(.*\)') + +if test -r $GVF +then + VC=$(sed -e 's/^PERF_VERSION = //' <$GVF) +else + VC=unset +fi +test "$VN" = "$VC" || { + echo >&2 "PERF_VERSION = $VN" + echo "PERF_VERSION = $VN" >$GVF +} + + diff --git a/tools/perf/util/abspath.c b/tools/perf/util/abspath.c new file mode 100644 index 00000000000..61d33b81fc9 --- /dev/null +++ b/tools/perf/util/abspath.c @@ -0,0 +1,117 @@ +#include "cache.h" + +/* + * Do not use this for inspecting *tracked* content. When path is a + * symlink to a directory, we do not want to say it is a directory when + * dealing with tracked content in the working tree. + */ +static int is_directory(const char *path) +{ + struct stat st; + return (!stat(path, &st) && S_ISDIR(st.st_mode)); +} + +/* We allow "recursive" symbolic links. Only within reason, though. */ +#define MAXDEPTH 5 + +const char *make_absolute_path(const char *path) +{ + static char bufs[2][PATH_MAX + 1], *buf = bufs[0], *next_buf = bufs[1]; + char cwd[1024] = ""; + int buf_index = 1, len; + + int depth = MAXDEPTH; + char *last_elem = NULL; + struct stat st; + + if (strlcpy(buf, path, PATH_MAX) >= PATH_MAX) + die ("Too long path: %.*s", 60, path); + + while (depth--) { + if (!is_directory(buf)) { + char *last_slash = strrchr(buf, '/'); + if (last_slash) { + *last_slash = '\0'; + last_elem = xstrdup(last_slash + 1); + } else { + last_elem = xstrdup(buf); + *buf = '\0'; + } + } + + if (*buf) { + if (!*cwd && !getcwd(cwd, sizeof(cwd))) + die ("Could not get current working directory"); + + if (chdir(buf)) + die ("Could not switch to '%s'", buf); + } + if (!getcwd(buf, PATH_MAX)) + die ("Could not get current working directory"); + + if (last_elem) { + int len = strlen(buf); + if (len + strlen(last_elem) + 2 > PATH_MAX) + die ("Too long path name: '%s/%s'", + buf, last_elem); + buf[len] = '/'; + strcpy(buf + len + 1, last_elem); + free(last_elem); + last_elem = NULL; + } + + if (!lstat(buf, &st) && S_ISLNK(st.st_mode)) { + len = readlink(buf, next_buf, PATH_MAX); + if (len < 0) + die ("Invalid symlink: %s", buf); + if (PATH_MAX <= len) + die("symbolic link too long: %s", buf); + next_buf[len] = '\0'; + buf = next_buf; + buf_index = 1 - buf_index; + next_buf = bufs[buf_index]; + } else + break; + } + + if (*cwd && chdir(cwd)) + die ("Could not change back to '%s'", cwd); + + return buf; +} + +static const char *get_pwd_cwd(void) +{ + static char cwd[PATH_MAX + 1]; + char *pwd; + struct stat cwd_stat, pwd_stat; + if (getcwd(cwd, PATH_MAX) == NULL) + return NULL; + pwd = getenv("PWD"); + if (pwd && strcmp(pwd, cwd)) { + stat(cwd, &cwd_stat); + if (!stat(pwd, &pwd_stat) && + pwd_stat.st_dev == cwd_stat.st_dev && + pwd_stat.st_ino == cwd_stat.st_ino) { + strlcpy(cwd, pwd, PATH_MAX); + } + } + return cwd; +} + +const char *make_nonrelative_path(const char *path) +{ + static char buf[PATH_MAX + 1]; + + if (is_absolute_path(path)) { + if (strlcpy(buf, path, PATH_MAX) >= PATH_MAX) + die("Too long path: %.*s", 60, path); + } else { + const char *cwd = get_pwd_cwd(); + if (!cwd) + die("Cannot determine the current working directory"); + if (snprintf(buf, PATH_MAX, "%s/%s", cwd, path) >= PATH_MAX) + die("Too long path: %.*s", 60, path); + } + return buf; +} diff --git a/tools/perf/util/alias.c b/tools/perf/util/alias.c new file mode 100644 index 00000000000..9b3dd2b428d --- /dev/null +++ b/tools/perf/util/alias.c @@ -0,0 +1,77 @@ +#include "cache.h" + +static const char *alias_key; +static char *alias_val; + +static int alias_lookup_cb(const char *k, const char *v, void *cb) +{ + if (!prefixcmp(k, "alias.") && !strcmp(k+6, alias_key)) { + if (!v) + return config_error_nonbool(k); + alias_val = strdup(v); + return 0; + } + return 0; +} + +char *alias_lookup(const char *alias) +{ + alias_key = alias; + alias_val = NULL; + perf_config(alias_lookup_cb, NULL); + return alias_val; +} + +int split_cmdline(char *cmdline, const char ***argv) +{ + int src, dst, count = 0, size = 16; + char quoted = 0; + + *argv = malloc(sizeof(char*) * size); + + /* split alias_string */ + (*argv)[count++] = cmdline; + for (src = dst = 0; cmdline[src];) { + char c = cmdline[src]; + if (!quoted && isspace(c)) { + cmdline[dst++] = 0; + while (cmdline[++src] + && isspace(cmdline[src])) + ; /* skip */ + if (count >= size) { + size += 16; + *argv = realloc(*argv, sizeof(char*) * size); + } + (*argv)[count++] = cmdline + dst; + } else if (!quoted && (c == '\'' || c == '"')) { + quoted = c; + src++; + } else if (c == quoted) { + quoted = 0; + src++; + } else { + if (c == '\\' && quoted != '\'') { + src++; + c = cmdline[src]; + if (!c) { + free(*argv); + *argv = NULL; + return error("cmdline ends with \\"); + } + } + cmdline[dst++] = c; + src++; + } + } + + cmdline[dst] = 0; + + if (quoted) { + free(*argv); + *argv = NULL; + return error("unclosed quote"); + } + + return count; +} + diff --git a/tools/perf/util/cache.h b/tools/perf/util/cache.h new file mode 100644 index 00000000000..393d6146d13 --- /dev/null +++ b/tools/perf/util/cache.h @@ -0,0 +1,119 @@ +#ifndef CACHE_H +#define CACHE_H + +#include "util.h" +#include "strbuf.h" + +#define PERF_DIR_ENVIRONMENT "PERF_DIR" +#define PERF_WORK_TREE_ENVIRONMENT "PERF_WORK_TREE" +#define DEFAULT_PERF_DIR_ENVIRONMENT ".perf" +#define DB_ENVIRONMENT "PERF_OBJECT_DIRECTORY" +#define INDEX_ENVIRONMENT "PERF_INDEX_FILE" +#define GRAFT_ENVIRONMENT "PERF_GRAFT_FILE" +#define TEMPLATE_DIR_ENVIRONMENT "PERF_TEMPLATE_DIR" +#define CONFIG_ENVIRONMENT "PERF_CONFIG" +#define EXEC_PATH_ENVIRONMENT "PERF_EXEC_PATH" +#define CEILING_DIRECTORIES_ENVIRONMENT "PERF_CEILING_DIRECTORIES" +#define PERFATTRIBUTES_FILE ".perfattributes" +#define INFOATTRIBUTES_FILE "info/attributes" +#define ATTRIBUTE_MACRO_PREFIX "[attr]" + +typedef int (*config_fn_t)(const char *, const char *, void *); +extern int perf_default_config(const char *, const char *, void *); +extern int perf_config_from_file(config_fn_t fn, const char *, void *); +extern int perf_config(config_fn_t fn, void *); +extern int perf_parse_ulong(const char *, unsigned long *); +extern int perf_config_int(const char *, const char *); +extern unsigned long perf_config_ulong(const char *, const char *); +extern int perf_config_bool_or_int(const char *, const char *, int *); +extern int perf_config_bool(const char *, const char *); +extern int perf_config_string(const char **, const char *, const char *); +extern int perf_config_set(const char *, const char *); +extern int perf_config_set_multivar(const char *, const char *, const char *, int); +extern int perf_config_rename_section(const char *, const char *); +extern const char *perf_etc_perfconfig(void); +extern int check_repository_format_version(const char *var, const char *value, void *cb); +extern int perf_config_system(void); +extern int perf_config_global(void); +extern int config_error_nonbool(const char *); +extern const char *config_exclusive_filename; + +#define MAX_PERFNAME (1000) +extern char perf_default_email[MAX_PERFNAME]; +extern char perf_default_name[MAX_PERFNAME]; +extern int user_ident_explicitly_given; + +extern const char *perf_log_output_encoding; +extern const char *perf_mailmap_file; + +/* IO helper functions */ +extern void maybe_flush_or_die(FILE *, const char *); +extern int copy_fd(int ifd, int ofd); +extern int copy_file(const char *dst, const char *src, int mode); +extern ssize_t read_in_full(int fd, void *buf, size_t count); +extern ssize_t write_in_full(int fd, const void *buf, size_t count); +extern void write_or_die(int fd, const void *buf, size_t count); +extern int write_or_whine(int fd, const void *buf, size_t count, const char *msg); +extern int write_or_whine_pipe(int fd, const void *buf, size_t count, const char *msg); +extern void fsync_or_die(int fd, const char *); + +/* pager.c */ +extern void setup_pager(void); +extern const char *pager_program; +extern int pager_in_use(void); +extern int pager_use_color; + +extern const char *editor_program; +extern const char *excludes_file; + +char *alias_lookup(const char *alias); +int split_cmdline(char *cmdline, const char ***argv); + +#define alloc_nr(x) (((x)+16)*3/2) + +/* + * Realloc the buffer pointed at by variable 'x' so that it can hold + * at least 'nr' entries; the number of entries currently allocated + * is 'alloc', using the standard growing factor alloc_nr() macro. + * + * DO NOT USE any expression with side-effect for 'x' or 'alloc'. + */ +#define ALLOC_GROW(x, nr, alloc) \ + do { \ + if ((nr) > alloc) { \ + if (alloc_nr(alloc) < (nr)) \ + alloc = (nr); \ + else \ + alloc = alloc_nr(alloc); \ + x = xrealloc((x), alloc * sizeof(*(x))); \ + } \ + } while(0) + + +static inline int is_absolute_path(const char *path) +{ + return path[0] == '/'; +} + +const char *make_absolute_path(const char *path); +const char *make_nonrelative_path(const char *path); +const char *make_relative_path(const char *abs, const char *base); +int normalize_path_copy(char *dst, const char *src); +int longest_ancestor_length(const char *path, const char *prefix_list); +char *strip_path_suffix(const char *path, const char *suffix); + +extern char *mkpath(const char *fmt, ...) __attribute__((format (printf, 1, 2))); +extern char *perf_path(const char *fmt, ...) __attribute__((format (printf, 1, 2))); +/* perf_mkstemp() - create tmp file honoring TMPDIR variable */ +extern int perf_mkstemp(char *path, size_t len, const char *template); + +extern char *mksnpath(char *buf, size_t n, const char *fmt, ...) + __attribute__((format (printf, 3, 4))); +extern char *perf_snpath(char *buf, size_t n, const char *fmt, ...) + __attribute__((format (printf, 3, 4))); +extern char *perf_pathdup(const char *fmt, ...) + __attribute__((format (printf, 1, 2))); + +extern size_t strlcpy(char *dest, const char *src, size_t size); + +#endif /* CACHE_H */ diff --git a/tools/perf/util/color.c b/tools/perf/util/color.c new file mode 100644 index 00000000000..9a8c20ccc53 --- /dev/null +++ b/tools/perf/util/color.c @@ -0,0 +1,241 @@ +#include "cache.h" +#include "color.h" + +int perf_use_color_default = -1; + +static int parse_color(const char *name, int len) +{ + static const char * const color_names[] = { + "normal", "black", "red", "green", "yellow", + "blue", "magenta", "cyan", "white" + }; + char *end; + int i; + for (i = 0; i < ARRAY_SIZE(color_names); i++) { + const char *str = color_names[i]; + if (!strncasecmp(name, str, len) && !str[len]) + return i - 1; + } + i = strtol(name, &end, 10); + if (end - name == len && i >= -1 && i <= 255) + return i; + return -2; +} + +static int parse_attr(const char *name, int len) +{ + static const int attr_values[] = { 1, 2, 4, 5, 7 }; + static const char * const attr_names[] = { + "bold", "dim", "ul", "blink", "reverse" + }; + int i; + for (i = 0; i < ARRAY_SIZE(attr_names); i++) { + const char *str = attr_names[i]; + if (!strncasecmp(name, str, len) && !str[len]) + return attr_values[i]; + } + return -1; +} + +void color_parse(const char *value, const char *var, char *dst) +{ + color_parse_mem(value, strlen(value), var, dst); +} + +void color_parse_mem(const char *value, int value_len, const char *var, + char *dst) +{ + const char *ptr = value; + int len = value_len; + int attr = -1; + int fg = -2; + int bg = -2; + + if (!strncasecmp(value, "reset", len)) { + strcpy(dst, PERF_COLOR_RESET); + return; + } + + /* [fg [bg]] [attr] */ + while (len > 0) { + const char *word = ptr; + int val, wordlen = 0; + + while (len > 0 && !isspace(word[wordlen])) { + wordlen++; + len--; + } + + ptr = word + wordlen; + while (len > 0 && isspace(*ptr)) { + ptr++; + len--; + } + + val = parse_color(word, wordlen); + if (val >= -1) { + if (fg == -2) { + fg = val; + continue; + } + if (bg == -2) { + bg = val; + continue; + } + goto bad; + } + val = parse_attr(word, wordlen); + if (val < 0 || attr != -1) + goto bad; + attr = val; + } + + if (attr >= 0 || fg >= 0 || bg >= 0) { + int sep = 0; + + *dst++ = '\033'; + *dst++ = '['; + if (attr >= 0) { + *dst++ = '0' + attr; + sep++; + } + if (fg >= 0) { + if (sep++) + *dst++ = ';'; + if (fg < 8) { + *dst++ = '3'; + *dst++ = '0' + fg; + } else { + dst += sprintf(dst, "38;5;%d", fg); + } + } + if (bg >= 0) { + if (sep++) + *dst++ = ';'; + if (bg < 8) { + *dst++ = '4'; + *dst++ = '0' + bg; + } else { + dst += sprintf(dst, "48;5;%d", bg); + } + } + *dst++ = 'm'; + } + *dst = 0; + return; +bad: + die("bad color value '%.*s' for variable '%s'", value_len, value, var); +} + +int perf_config_colorbool(const char *var, const char *value, int stdout_is_tty) +{ + if (value) { + if (!strcasecmp(value, "never")) + return 0; + if (!strcasecmp(value, "always")) + return 1; + if (!strcasecmp(value, "auto")) + goto auto_color; + } + + /* Missing or explicit false to turn off colorization */ + if (!perf_config_bool(var, value)) + return 0; + + /* any normal truth value defaults to 'auto' */ + auto_color: + if (stdout_is_tty < 0) + stdout_is_tty = isatty(1); + if (stdout_is_tty || (pager_in_use() && pager_use_color)) { + char *term = getenv("TERM"); + if (term && strcmp(term, "dumb")) + return 1; + } + return 0; +} + +int perf_color_default_config(const char *var, const char *value, void *cb) +{ + if (!strcmp(var, "color.ui")) { + perf_use_color_default = perf_config_colorbool(var, value, -1); + return 0; + } + + return perf_default_config(var, value, cb); +} + +static int color_vfprintf(FILE *fp, const char *color, const char *fmt, + va_list args, const char *trail) +{ + int r = 0; + + /* + * Auto-detect: + */ + if (perf_use_color_default < 0) { + if (isatty(1) || pager_in_use()) + perf_use_color_default = 1; + else + perf_use_color_default = 0; + } + + if (perf_use_color_default && *color) + r += fprintf(fp, "%s", color); + r += vfprintf(fp, fmt, args); + if (perf_use_color_default && *color) + r += fprintf(fp, "%s", PERF_COLOR_RESET); + if (trail) + r += fprintf(fp, "%s", trail); + return r; +} + + + +int color_fprintf(FILE *fp, const char *color, const char *fmt, ...) +{ + va_list args; + int r; + + va_start(args, fmt); + r = color_vfprintf(fp, color, fmt, args, NULL); + va_end(args); + return r; +} + +int color_fprintf_ln(FILE *fp, const char *color, const char *fmt, ...) +{ + va_list args; + int r; + va_start(args, fmt); + r = color_vfprintf(fp, color, fmt, args, "\n"); + va_end(args); + return r; +} + +/* + * This function splits the buffer by newlines and colors the lines individually. + * + * Returns 0 on success. + */ +int color_fwrite_lines(FILE *fp, const char *color, + size_t count, const char *buf) +{ + if (!*color) + return fwrite(buf, count, 1, fp) != 1; + while (count) { + char *p = memchr(buf, '\n', count); + if (p != buf && (fputs(color, fp) < 0 || + fwrite(buf, p ? p - buf : count, 1, fp) != 1 || + fputs(PERF_COLOR_RESET, fp) < 0)) + return -1; + if (!p) + return 0; + if (fputc('\n', fp) < 0) + return -1; + count -= p + 1 - buf; + buf = p + 1; + } + return 0; +} + + diff --git a/tools/perf/util/color.h b/tools/perf/util/color.h new file mode 100644 index 00000000000..5abfd379582 --- /dev/null +++ b/tools/perf/util/color.h @@ -0,0 +1,36 @@ +#ifndef COLOR_H +#define COLOR_H + +/* "\033[1;38;5;2xx;48;5;2xxm\0" is 23 bytes */ +#define COLOR_MAXLEN 24 + +#define PERF_COLOR_NORMAL "" +#define PERF_COLOR_RESET "\033[m" +#define PERF_COLOR_BOLD "\033[1m" +#define PERF_COLOR_RED "\033[31m" +#define PERF_COLOR_GREEN "\033[32m" +#define PERF_COLOR_YELLOW "\033[33m" +#define PERF_COLOR_BLUE "\033[34m" +#define PERF_COLOR_MAGENTA "\033[35m" +#define PERF_COLOR_CYAN "\033[36m" +#define PERF_COLOR_BG_RED "\033[41m" + +/* + * This variable stores the value of color.ui + */ +extern int perf_use_color_default; + + +/* + * Use this instead of perf_default_config if you need the value of color.ui. + */ +int perf_color_default_config(const char *var, const char *value, void *cb); + +int perf_config_colorbool(const char *var, const char *value, int stdout_is_tty); +void color_parse(const char *value, const char *var, char *dst); +void color_parse_mem(const char *value, int len, const char *var, char *dst); +int color_fprintf(FILE *fp, const char *color, const char *fmt, ...); +int color_fprintf_ln(FILE *fp, const char *color, const char *fmt, ...); +int color_fwrite_lines(FILE *fp, const char *color, size_t count, const char *buf); + +#endif /* COLOR_H */ diff --git a/tools/perf/util/config.c b/tools/perf/util/config.c new file mode 100644 index 00000000000..3dd13faa6a2 --- /dev/null +++ b/tools/perf/util/config.c @@ -0,0 +1,873 @@ +/* + * GIT - The information manager from hell + * + * Copyright (C) Linus Torvalds, 2005 + * Copyright (C) Johannes Schindelin, 2005 + * + */ +#include "util.h" +#include "cache.h" +#include "exec_cmd.h" + +#define MAXNAME (256) + +static FILE *config_file; +static const char *config_file_name; +static int config_linenr; +static int config_file_eof; + +const char *config_exclusive_filename = NULL; + +static int get_next_char(void) +{ + int c; + FILE *f; + + c = '\n'; + if ((f = config_file) != NULL) { + c = fgetc(f); + if (c == '\r') { + /* DOS like systems */ + c = fgetc(f); + if (c != '\n') { + ungetc(c, f); + c = '\r'; + } + } + if (c == '\n') + config_linenr++; + if (c == EOF) { + config_file_eof = 1; + c = '\n'; + } + } + return c; +} + +static char *parse_value(void) +{ + static char value[1024]; + int quote = 0, comment = 0, len = 0, space = 0; + + for (;;) { + int c = get_next_char(); + if (len >= sizeof(value) - 1) + return NULL; + if (c == '\n') { + if (quote) + return NULL; + value[len] = 0; + return value; + } + if (comment) + continue; + if (isspace(c) && !quote) { + space = 1; + continue; + } + if (!quote) { + if (c == ';' || c == '#') { + comment = 1; + continue; + } + } + if (space) { + if (len) + value[len++] = ' '; + space = 0; + } + if (c == '\\') { + c = get_next_char(); + switch (c) { + case '\n': + continue; + case 't': + c = '\t'; + break; + case 'b': + c = '\b'; + break; + case 'n': + c = '\n'; + break; + /* Some characters escape as themselves */ + case '\\': case '"': + break; + /* Reject unknown escape sequences */ + default: + return NULL; + } + value[len++] = c; + continue; + } + if (c == '"') { + quote = 1-quote; + continue; + } + value[len++] = c; + } +} + +static inline int iskeychar(int c) +{ + return isalnum(c) || c == '-'; +} + +static int get_value(config_fn_t fn, void *data, char *name, unsigned int len) +{ + int c; + char *value; + + /* Get the full name */ + for (;;) { + c = get_next_char(); + if (config_file_eof) + break; + if (!iskeychar(c)) + break; + name[len++] = tolower(c); + if (len >= MAXNAME) + return -1; + } + name[len] = 0; + while (c == ' ' || c == '\t') + c = get_next_char(); + + value = NULL; + if (c != '\n') { + if (c != '=') + return -1; + value = parse_value(); + if (!value) + return -1; + } + return fn(name, value, data); +} + +static int get_extended_base_var(char *name, int baselen, int c) +{ + do { + if (c == '\n') + return -1; + c = get_next_char(); + } while (isspace(c)); + + /* We require the format to be '[base "extension"]' */ + if (c != '"') + return -1; + name[baselen++] = '.'; + + for (;;) { + int c = get_next_char(); + if (c == '\n') + return -1; + if (c == '"') + break; + if (c == '\\') { + c = get_next_char(); + if (c == '\n') + return -1; + } + name[baselen++] = c; + if (baselen > MAXNAME / 2) + return -1; + } + + /* Final ']' */ + if (get_next_char() != ']') + return -1; + return baselen; +} + +static int get_base_var(char *name) +{ + int baselen = 0; + + for (;;) { + int c = get_next_char(); + if (config_file_eof) + return -1; + if (c == ']') + return baselen; + if (isspace(c)) + return get_extended_base_var(name, baselen, c); + if (!iskeychar(c) && c != '.') + return -1; + if (baselen > MAXNAME / 2) + return -1; + name[baselen++] = tolower(c); + } +} + +static int perf_parse_file(config_fn_t fn, void *data) +{ + int comment = 0; + int baselen = 0; + static char var[MAXNAME]; + + /* U+FEFF Byte Order Mark in UTF8 */ + static const unsigned char *utf8_bom = (unsigned char *) "\xef\xbb\xbf"; + const unsigned char *bomptr = utf8_bom; + + for (;;) { + int c = get_next_char(); + if (bomptr && *bomptr) { + /* We are at the file beginning; skip UTF8-encoded BOM + * if present. Sane editors won't put this in on their + * own, but e.g. Windows Notepad will do it happily. */ + if ((unsigned char) c == *bomptr) { + bomptr++; + continue; + } else { + /* Do not tolerate partial BOM. */ + if (bomptr != utf8_bom) + break; + /* No BOM at file beginning. Cool. */ + bomptr = NULL; + } + } + if (c == '\n') { + if (config_file_eof) + return 0; + comment = 0; + continue; + } + if (comment || isspace(c)) + continue; + if (c == '#' || c == ';') { + comment = 1; + continue; + } + if (c == '[') { + baselen = get_base_var(var); + if (baselen <= 0) + break; + var[baselen++] = '.'; + var[baselen] = 0; + continue; + } + if (!isalpha(c)) + break; + var[baselen] = tolower(c); + if (get_value(fn, data, var, baselen+1) < 0) + break; + } + die("bad config file line %d in %s", config_linenr, config_file_name); +} + +static int parse_unit_factor(const char *end, unsigned long *val) +{ + if (!*end) + return 1; + else if (!strcasecmp(end, "k")) { + *val *= 1024; + return 1; + } + else if (!strcasecmp(end, "m")) { + *val *= 1024 * 1024; + return 1; + } + else if (!strcasecmp(end, "g")) { + *val *= 1024 * 1024 * 1024; + return 1; + } + return 0; +} + +static int perf_parse_long(const char *value, long *ret) +{ + if (value && *value) { + char *end; + long val = strtol(value, &end, 0); + unsigned long factor = 1; + if (!parse_unit_factor(end, &factor)) + return 0; + *ret = val * factor; + return 1; + } + return 0; +} + +int perf_parse_ulong(const char *value, unsigned long *ret) +{ + if (value && *value) { + char *end; + unsigned long val = strtoul(value, &end, 0); + if (!parse_unit_factor(end, &val)) + return 0; + *ret = val; + return 1; + } + return 0; +} + +static void die_bad_config(const char *name) +{ + if (config_file_name) + die("bad config value for '%s' in %s", name, config_file_name); + die("bad config value for '%s'", name); +} + +int perf_config_int(const char *name, const char *value) +{ + long ret = 0; + if (!perf_parse_long(value, &ret)) + die_bad_config(name); + return ret; +} + +unsigned long perf_config_ulong(const char *name, const char *value) +{ + unsigned long ret; + if (!perf_parse_ulong(value, &ret)) + die_bad_config(name); + return ret; +} + +int perf_config_bool_or_int(const char *name, const char *value, int *is_bool) +{ + *is_bool = 1; + if (!value) + return 1; + if (!*value) + return 0; + if (!strcasecmp(value, "true") || !strcasecmp(value, "yes") || !strcasecmp(value, "on")) + return 1; + if (!strcasecmp(value, "false") || !strcasecmp(value, "no") || !strcasecmp(value, "off")) + return 0; + *is_bool = 0; + return perf_config_int(name, value); +} + +int perf_config_bool(const char *name, const char *value) +{ + int discard; + return !!perf_config_bool_or_int(name, value, &discard); +} + +int perf_config_string(const char **dest, const char *var, const char *value) +{ + if (!value) + return config_error_nonbool(var); + *dest = strdup(value); + return 0; +} + +static int perf_default_core_config(const char *var, const char *value) +{ + /* Add other config variables here and to Documentation/config.txt. */ + return 0; +} + +int perf_default_config(const char *var, const char *value, void *dummy) +{ + if (!prefixcmp(var, "core.")) + return perf_default_core_config(var, value); + + /* Add other config variables here and to Documentation/config.txt. */ + return 0; +} + +int perf_config_from_file(config_fn_t fn, const char *filename, void *data) +{ + int ret; + FILE *f = fopen(filename, "r"); + + ret = -1; + if (f) { + config_file = f; + config_file_name = filename; + config_linenr = 1; + config_file_eof = 0; + ret = perf_parse_file(fn, data); + fclose(f); + config_file_name = NULL; + } + return ret; +} + +const char *perf_etc_perfconfig(void) +{ + static const char *system_wide; + if (!system_wide) + system_wide = system_path(ETC_PERFCONFIG); + return system_wide; +} + +static int perf_env_bool(const char *k, int def) +{ + const char *v = getenv(k); + return v ? perf_config_bool(k, v) : def; +} + +int perf_config_system(void) +{ + return !perf_env_bool("PERF_CONFIG_NOSYSTEM", 0); +} + +int perf_config_global(void) +{ + return !perf_env_bool("PERF_CONFIG_NOGLOBAL", 0); +} + +int perf_config(config_fn_t fn, void *data) +{ + int ret = 0, found = 0; + char *repo_config = NULL; + const char *home = NULL; + + /* Setting $PERF_CONFIG makes perf read _only_ the given config file. */ + if (config_exclusive_filename) + return perf_config_from_file(fn, config_exclusive_filename, data); + if (perf_config_system() && !access(perf_etc_perfconfig(), R_OK)) { + ret += perf_config_from_file(fn, perf_etc_perfconfig(), + data); + found += 1; + } + + home = getenv("HOME"); + if (perf_config_global() && home) { + char *user_config = strdup(mkpath("%s/.perfconfig", home)); + if (!access(user_config, R_OK)) { + ret += perf_config_from_file(fn, user_config, data); + found += 1; + } + free(user_config); + } + + repo_config = perf_pathdup("config"); + if (!access(repo_config, R_OK)) { + ret += perf_config_from_file(fn, repo_config, data); + found += 1; + } + free(repo_config); + if (found == 0) + return -1; + return ret; +} + +/* + * Find all the stuff for perf_config_set() below. + */ + +#define MAX_MATCHES 512 + +static struct { + int baselen; + char* key; + int do_not_match; + regex_t* value_regex; + int multi_replace; + size_t offset[MAX_MATCHES]; + enum { START, SECTION_SEEN, SECTION_END_SEEN, KEY_SEEN } state; + int seen; +} store; + +static int matches(const char* key, const char* value) +{ + return !strcmp(key, store.key) && + (store.value_regex == NULL || + (store.do_not_match ^ + !regexec(store.value_regex, value, 0, NULL, 0))); +} + +static int store_aux(const char* key, const char* value, void *cb) +{ + const char *ep; + size_t section_len; + + switch (store.state) { + case KEY_SEEN: + if (matches(key, value)) { + if (store.seen == 1 && store.multi_replace == 0) { + warning("%s has multiple values", key); + } else if (store.seen >= MAX_MATCHES) { + error("too many matches for %s", key); + return 1; + } + + store.offset[store.seen] = ftell(config_file); + store.seen++; + } + break; + case SECTION_SEEN: + /* + * What we are looking for is in store.key (both + * section and var), and its section part is baselen + * long. We found key (again, both section and var). + * We would want to know if this key is in the same + * section as what we are looking for. We already + * know we are in the same section as what should + * hold store.key. + */ + ep = strrchr(key, '.'); + section_len = ep - key; + + if ((section_len != store.baselen) || + memcmp(key, store.key, section_len+1)) { + store.state = SECTION_END_SEEN; + break; + } + + /* + * Do not increment matches: this is no match, but we + * just made sure we are in the desired section. + */ + store.offset[store.seen] = ftell(config_file); + /* fallthru */ + case SECTION_END_SEEN: + case START: + if (matches(key, value)) { + store.offset[store.seen] = ftell(config_file); + store.state = KEY_SEEN; + store.seen++; + } else { + if (strrchr(key, '.') - key == store.baselen && + !strncmp(key, store.key, store.baselen)) { + store.state = SECTION_SEEN; + store.offset[store.seen] = ftell(config_file); + } + } + } + return 0; +} + +static int store_write_section(int fd, const char* key) +{ + const char *dot; + int i, success; + struct strbuf sb = STRBUF_INIT; + + dot = memchr(key, '.', store.baselen); + if (dot) { + strbuf_addf(&sb, "[%.*s \"", (int)(dot - key), key); + for (i = dot - key + 1; i < store.baselen; i++) { + if (key[i] == '"' || key[i] == '\\') + strbuf_addch(&sb, '\\'); + strbuf_addch(&sb, key[i]); + } + strbuf_addstr(&sb, "\"]\n"); + } else { + strbuf_addf(&sb, "[%.*s]\n", store.baselen, key); + } + + success = write_in_full(fd, sb.buf, sb.len) == sb.len; + strbuf_release(&sb); + + return success; +} + +static int store_write_pair(int fd, const char* key, const char* value) +{ + int i, success; + int length = strlen(key + store.baselen + 1); + const char *quote = ""; + struct strbuf sb = STRBUF_INIT; + + /* + * Check to see if the value needs to be surrounded with a dq pair. + * Note that problematic characters are always backslash-quoted; this + * check is about not losing leading or trailing SP and strings that + * follow beginning-of-comment characters (i.e. ';' and '#') by the + * configuration parser. + */ + if (value[0] == ' ') + quote = "\""; + for (i = 0; value[i]; i++) + if (value[i] == ';' || value[i] == '#') + quote = "\""; + if (i && value[i - 1] == ' ') + quote = "\""; + + strbuf_addf(&sb, "\t%.*s = %s", + length, key + store.baselen + 1, quote); + + for (i = 0; value[i]; i++) + switch (value[i]) { + case '\n': + strbuf_addstr(&sb, "\\n"); + break; + case '\t': + strbuf_addstr(&sb, "\\t"); + break; + case '"': + case '\\': + strbuf_addch(&sb, '\\'); + default: + strbuf_addch(&sb, value[i]); + break; + } + strbuf_addf(&sb, "%s\n", quote); + + success = write_in_full(fd, sb.buf, sb.len) == sb.len; + strbuf_release(&sb); + + return success; +} + +static ssize_t find_beginning_of_line(const char* contents, size_t size, + size_t offset_, int* found_bracket) +{ + size_t equal_offset = size, bracket_offset = size; + ssize_t offset; + +contline: + for (offset = offset_-2; offset > 0 + && contents[offset] != '\n'; offset--) + switch (contents[offset]) { + case '=': equal_offset = offset; break; + case ']': bracket_offset = offset; break; + } + if (offset > 0 && contents[offset-1] == '\\') { + offset_ = offset; + goto contline; + } + if (bracket_offset < equal_offset) { + *found_bracket = 1; + offset = bracket_offset+1; + } else + offset++; + + return offset; +} + +int perf_config_set(const char* key, const char* value) +{ + return perf_config_set_multivar(key, value, NULL, 0); +} + +/* + * If value==NULL, unset in (remove from) config, + * if value_regex!=NULL, disregard key/value pairs where value does not match. + * if multi_replace==0, nothing, or only one matching key/value is replaced, + * else all matching key/values (regardless how many) are removed, + * before the new pair is written. + * + * Returns 0 on success. + * + * This function does this: + * + * - it locks the config file by creating ".perf/config.lock" + * + * - it then parses the config using store_aux() as validator to find + * the position on the key/value pair to replace. If it is to be unset, + * it must be found exactly once. + * + * - the config file is mmap()ed and the part before the match (if any) is + * written to the lock file, then the changed part and the rest. + * + * - the config file is removed and the lock file rename()d to it. + * + */ +int perf_config_set_multivar(const char* key, const char* value, + const char* value_regex, int multi_replace) +{ + int i, dot; + int fd = -1, in_fd; + int ret = 0; + char* config_filename; + const char* last_dot = strrchr(key, '.'); + + if (config_exclusive_filename) + config_filename = strdup(config_exclusive_filename); + else + config_filename = perf_pathdup("config"); + + /* + * Since "key" actually contains the section name and the real + * key name separated by a dot, we have to know where the dot is. + */ + + if (last_dot == NULL) { + error("key does not contain a section: %s", key); + ret = 2; + goto out_free; + } + store.baselen = last_dot - key; + + store.multi_replace = multi_replace; + + /* + * Validate the key and while at it, lower case it for matching. + */ + store.key = malloc(strlen(key) + 1); + dot = 0; + for (i = 0; key[i]; i++) { + unsigned char c = key[i]; + if (c == '.') + dot = 1; + /* Leave the extended basename untouched.. */ + if (!dot || i > store.baselen) { + if (!iskeychar(c) || (i == store.baselen+1 && !isalpha(c))) { + error("invalid key: %s", key); + free(store.key); + ret = 1; + goto out_free; + } + c = tolower(c); + } else if (c == '\n') { + error("invalid key (newline): %s", key); + free(store.key); + ret = 1; + goto out_free; + } + store.key[i] = c; + } + store.key[i] = 0; + + /* + * If .perf/config does not exist yet, write a minimal version. + */ + in_fd = open(config_filename, O_RDONLY); + if ( in_fd < 0 ) { + free(store.key); + + if ( ENOENT != errno ) { + error("opening %s: %s", config_filename, + strerror(errno)); + ret = 3; /* same as "invalid config file" */ + goto out_free; + } + /* if nothing to unset, error out */ + if (value == NULL) { + ret = 5; + goto out_free; + } + + store.key = (char*)key; + if (!store_write_section(fd, key) || + !store_write_pair(fd, key, value)) + goto write_err_out; + } else { + struct stat st; + char* contents; + size_t contents_sz, copy_begin, copy_end; + int i, new_line = 0; + + if (value_regex == NULL) + store.value_regex = NULL; + else { + if (value_regex[0] == '!') { + store.do_not_match = 1; + value_regex++; + } else + store.do_not_match = 0; + + store.value_regex = (regex_t*)malloc(sizeof(regex_t)); + if (regcomp(store.value_regex, value_regex, + REG_EXTENDED)) { + error("invalid pattern: %s", value_regex); + free(store.value_regex); + ret = 6; + goto out_free; + } + } + + store.offset[0] = 0; + store.state = START; + store.seen = 0; + + /* + * After this, store.offset will contain the *end* offset + * of the last match, or remain at 0 if no match was found. + * As a side effect, we make sure to transform only a valid + * existing config file. + */ + if (perf_config_from_file(store_aux, config_filename, NULL)) { + error("invalid config file %s", config_filename); + free(store.key); + if (store.value_regex != NULL) { + regfree(store.value_regex); + free(store.value_regex); + } + ret = 3; + goto out_free; + } + + free(store.key); + if (store.value_regex != NULL) { + regfree(store.value_regex); + free(store.value_regex); + } + + /* if nothing to unset, or too many matches, error out */ + if ((store.seen == 0 && value == NULL) || + (store.seen > 1 && multi_replace == 0)) { + ret = 5; + goto out_free; + } + + fstat(in_fd, &st); + contents_sz = xsize_t(st.st_size); + contents = mmap(NULL, contents_sz, PROT_READ, + MAP_PRIVATE, in_fd, 0); + close(in_fd); + + if (store.seen == 0) + store.seen = 1; + + for (i = 0, copy_begin = 0; i < store.seen; i++) { + if (store.offset[i] == 0) { + store.offset[i] = copy_end = contents_sz; + } else if (store.state != KEY_SEEN) { + copy_end = store.offset[i]; + } else + copy_end = find_beginning_of_line( + contents, contents_sz, + store.offset[i]-2, &new_line); + + if (copy_end > 0 && contents[copy_end-1] != '\n') + new_line = 1; + + /* write the first part of the config */ + if (copy_end > copy_begin) { + if (write_in_full(fd, contents + copy_begin, + copy_end - copy_begin) < + copy_end - copy_begin) + goto write_err_out; + if (new_line && + write_in_full(fd, "\n", 1) != 1) + goto write_err_out; + } + copy_begin = store.offset[i]; + } + + /* write the pair (value == NULL means unset) */ + if (value != NULL) { + if (store.state == START) { + if (!store_write_section(fd, key)) + goto write_err_out; + } + if (!store_write_pair(fd, key, value)) + goto write_err_out; + } + + /* write the rest of the config */ + if (copy_begin < contents_sz) + if (write_in_full(fd, contents + copy_begin, + contents_sz - copy_begin) < + contents_sz - copy_begin) + goto write_err_out; + + munmap(contents, contents_sz); + } + + ret = 0; + +out_free: + free(config_filename); + return ret; + +write_err_out: + goto out_free; + +} + +/* + * Call this to report error for your variable that should not + * get a boolean value (i.e. "[my] var" means "true"). + */ +int config_error_nonbool(const char *var) +{ + return error("Missing value for '%s'", var); +} diff --git a/tools/perf/util/ctype.c b/tools/perf/util/ctype.c new file mode 100644 index 00000000000..b90ec004f29 --- /dev/null +++ b/tools/perf/util/ctype.c @@ -0,0 +1,26 @@ +/* + * Sane locale-independent, ASCII ctype. + * + * No surprises, and works with signed and unsigned chars. + */ +#include "cache.h" + +enum { + S = GIT_SPACE, + A = GIT_ALPHA, + D = GIT_DIGIT, + G = GIT_GLOB_SPECIAL, /* *, ?, [, \\ */ + R = GIT_REGEX_SPECIAL, /* $, (, ), +, ., ^, {, | * */ +}; + +unsigned char sane_ctype[256] = { + 0, 0, 0, 0, 0, 0, 0, 0, 0, S, S, 0, 0, S, 0, 0, /* 0.. 15 */ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 16.. 31 */ + S, 0, 0, 0, R, 0, 0, 0, R, R, G, R, 0, 0, R, 0, /* 32.. 47 */ + D, D, D, D, D, D, D, D, D, D, 0, 0, 0, 0, 0, G, /* 48.. 63 */ + 0, A, A, A, A, A, A, A, A, A, A, A, A, A, A, A, /* 64.. 79 */ + A, A, A, A, A, A, A, A, A, A, A, G, G, 0, R, 0, /* 80.. 95 */ + 0, A, A, A, A, A, A, A, A, A, A, A, A, A, A, A, /* 96..111 */ + A, A, A, A, A, A, A, A, A, A, A, R, R, 0, 0, 0, /* 112..127 */ + /* Nothing in the 128.. range */ +}; diff --git a/tools/perf/util/environment.c b/tools/perf/util/environment.c new file mode 100644 index 00000000000..275b0ee345f --- /dev/null +++ b/tools/perf/util/environment.c @@ -0,0 +1,9 @@ +/* + * We put all the perf config variables in this same object + * file, so that programs can link against the config parser + * without having to link against all the rest of perf. + */ +#include "cache.h" + +const char *pager_program; +int pager_use_color = 1; diff --git a/tools/perf/util/exec_cmd.c b/tools/perf/util/exec_cmd.c new file mode 100644 index 00000000000..d3929226315 --- /dev/null +++ b/tools/perf/util/exec_cmd.c @@ -0,0 +1,165 @@ +#include "cache.h" +#include "exec_cmd.h" +#include "quote.h" +#define MAX_ARGS 32 + +extern char **environ; +static const char *argv_exec_path; +static const char *argv0_path; + +const char *system_path(const char *path) +{ +#ifdef RUNTIME_PREFIX + static const char *prefix; +#else + static const char *prefix = PREFIX; +#endif + struct strbuf d = STRBUF_INIT; + + if (is_absolute_path(path)) + return path; + +#ifdef RUNTIME_PREFIX + assert(argv0_path); + assert(is_absolute_path(argv0_path)); + + if (!prefix && + !(prefix = strip_path_suffix(argv0_path, PERF_EXEC_PATH)) && + !(prefix = strip_path_suffix(argv0_path, BINDIR)) && + !(prefix = strip_path_suffix(argv0_path, "perf"))) { + prefix = PREFIX; + fprintf(stderr, "RUNTIME_PREFIX requested, " + "but prefix computation failed. " + "Using static fallback '%s'.\n", prefix); + } +#endif + + strbuf_addf(&d, "%s/%s", prefix, path); + path = strbuf_detach(&d, NULL); + return path; +} + +const char *perf_extract_argv0_path(const char *argv0) +{ + const char *slash; + + if (!argv0 || !*argv0) + return NULL; + slash = argv0 + strlen(argv0); + + while (argv0 <= slash && !is_dir_sep(*slash)) + slash--; + + if (slash >= argv0) { + argv0_path = strndup(argv0, slash - argv0); + return slash + 1; + } + + return argv0; +} + +void perf_set_argv_exec_path(const char *exec_path) +{ + argv_exec_path = exec_path; + /* + * Propagate this setting to external programs. + */ + setenv(EXEC_PATH_ENVIRONMENT, exec_path, 1); +} + + +/* Returns the highest-priority, location to look for perf programs. */ +const char *perf_exec_path(void) +{ + const char *env; + + if (argv_exec_path) + return argv_exec_path; + + env = getenv(EXEC_PATH_ENVIRONMENT); + if (env && *env) { + return env; + } + + return system_path(PERF_EXEC_PATH); +} + +static void add_path(struct strbuf *out, const char *path) +{ + if (path && *path) { + if (is_absolute_path(path)) + strbuf_addstr(out, path); + else + strbuf_addstr(out, make_nonrelative_path(path)); + + strbuf_addch(out, PATH_SEP); + } +} + +void setup_path(void) +{ + const char *old_path = getenv("PATH"); + struct strbuf new_path = STRBUF_INIT; + + add_path(&new_path, perf_exec_path()); + add_path(&new_path, argv0_path); + + if (old_path) + strbuf_addstr(&new_path, old_path); + else + strbuf_addstr(&new_path, "/usr/local/bin:/usr/bin:/bin"); + + setenv("PATH", new_path.buf, 1); + + strbuf_release(&new_path); +} + +const char **prepare_perf_cmd(const char **argv) +{ + int argc; + const char **nargv; + + for (argc = 0; argv[argc]; argc++) + ; /* just counting */ + nargv = malloc(sizeof(*nargv) * (argc + 2)); + + nargv[0] = "perf"; + for (argc = 0; argv[argc]; argc++) + nargv[argc + 1] = argv[argc]; + nargv[argc + 1] = NULL; + return nargv; +} + +int execv_perf_cmd(const char **argv) { + const char **nargv = prepare_perf_cmd(argv); + + /* execvp() can only ever return if it fails */ + execvp("perf", (char **)nargv); + + free(nargv); + return -1; +} + + +int execl_perf_cmd(const char *cmd,...) +{ + int argc; + const char *argv[MAX_ARGS + 1]; + const char *arg; + va_list param; + + va_start(param, cmd); + argv[0] = cmd; + argc = 1; + while (argc < MAX_ARGS) { + arg = argv[argc++] = va_arg(param, char *); + if (!arg) + break; + } + va_end(param); + if (MAX_ARGS <= argc) + return error("too many args to run %s", cmd); + + argv[argc] = NULL; + return execv_perf_cmd(argv); +} diff --git a/tools/perf/util/exec_cmd.h b/tools/perf/util/exec_cmd.h new file mode 100644 index 00000000000..effe25eb154 --- /dev/null +++ b/tools/perf/util/exec_cmd.h @@ -0,0 +1,13 @@ +#ifndef PERF_EXEC_CMD_H +#define PERF_EXEC_CMD_H + +extern void perf_set_argv_exec_path(const char *exec_path); +extern const char *perf_extract_argv0_path(const char *path); +extern const char *perf_exec_path(void); +extern void setup_path(void); +extern const char **prepare_perf_cmd(const char **argv); +extern int execv_perf_cmd(const char **argv); /* NULL terminated */ +extern int execl_perf_cmd(const char *cmd, ...); +extern const char *system_path(const char *path); + +#endif /* PERF_EXEC_CMD_H */ diff --git a/tools/perf/util/generate-cmdlist.sh b/tools/perf/util/generate-cmdlist.sh new file mode 100755 index 00000000000..f06f6fd148f --- /dev/null +++ b/tools/perf/util/generate-cmdlist.sh @@ -0,0 +1,24 @@ +#!/bin/sh + +echo "/* Automatically generated by $0 */ +struct cmdname_help +{ + char name[16]; + char help[80]; +}; + +static struct cmdname_help common_cmds[] = {" + +sed -n -e 's/^perf-\([^ ]*\)[ ].* common.*/\1/p' command-list.txt | +sort | +while read cmd +do + sed -n ' + /^NAME/,/perf-'"$cmd"'/H + ${ + x + s/.*perf-'"$cmd"' - \(.*\)/ {"'"$cmd"'", "\1"},/ + p + }' "Documentation/perf-$cmd.txt" +done +echo "};" diff --git a/tools/perf/util/help.c b/tools/perf/util/help.c new file mode 100644 index 00000000000..6653f7dd1d7 --- /dev/null +++ b/tools/perf/util/help.c @@ -0,0 +1,367 @@ +#include "cache.h" +#include "../builtin.h" +#include "exec_cmd.h" +#include "levenshtein.h" +#include "help.h" + +/* most GUI terminals set COLUMNS (although some don't export it) */ +static int term_columns(void) +{ + char *col_string = getenv("COLUMNS"); + int n_cols; + + if (col_string && (n_cols = atoi(col_string)) > 0) + return n_cols; + +#ifdef TIOCGWINSZ + { + struct winsize ws; + if (!ioctl(1, TIOCGWINSZ, &ws)) { + if (ws.ws_col) + return ws.ws_col; + } + } +#endif + + return 80; +} + +void add_cmdname(struct cmdnames *cmds, const char *name, int len) +{ + struct cmdname *ent = malloc(sizeof(*ent) + len + 1); + + ent->len = len; + memcpy(ent->name, name, len); + ent->name[len] = 0; + + ALLOC_GROW(cmds->names, cmds->cnt + 1, cmds->alloc); + cmds->names[cmds->cnt++] = ent; +} + +static void clean_cmdnames(struct cmdnames *cmds) +{ + int i; + for (i = 0; i < cmds->cnt; ++i) + free(cmds->names[i]); + free(cmds->names); + cmds->cnt = 0; + cmds->alloc = 0; +} + +static int cmdname_compare(const void *a_, const void *b_) +{ + struct cmdname *a = *(struct cmdname **)a_; + struct cmdname *b = *(struct cmdname **)b_; + return strcmp(a->name, b->name); +} + +static void uniq(struct cmdnames *cmds) +{ + int i, j; + + if (!cmds->cnt) + return; + + for (i = j = 1; i < cmds->cnt; i++) + if (strcmp(cmds->names[i]->name, cmds->names[i-1]->name)) + cmds->names[j++] = cmds->names[i]; + + cmds->cnt = j; +} + +void exclude_cmds(struct cmdnames *cmds, struct cmdnames *excludes) +{ + int ci, cj, ei; + int cmp; + + ci = cj = ei = 0; + while (ci < cmds->cnt && ei < excludes->cnt) { + cmp = strcmp(cmds->names[ci]->name, excludes->names[ei]->name); + if (cmp < 0) + cmds->names[cj++] = cmds->names[ci++]; + else if (cmp == 0) + ci++, ei++; + else if (cmp > 0) + ei++; + } + + while (ci < cmds->cnt) + cmds->names[cj++] = cmds->names[ci++]; + + cmds->cnt = cj; +} + +static void pretty_print_string_list(struct cmdnames *cmds, int longest) +{ + int cols = 1, rows; + int space = longest + 1; /* min 1 SP between words */ + int max_cols = term_columns() - 1; /* don't print *on* the edge */ + int i, j; + + if (space < max_cols) + cols = max_cols / space; + rows = (cmds->cnt + cols - 1) / cols; + + for (i = 0; i < rows; i++) { + printf(" "); + + for (j = 0; j < cols; j++) { + int n = j * rows + i; + int size = space; + if (n >= cmds->cnt) + break; + if (j == cols-1 || n + rows >= cmds->cnt) + size = 1; + printf("%-*s", size, cmds->names[n]->name); + } + putchar('\n'); + } +} + +static int is_executable(const char *name) +{ + struct stat st; + + if (stat(name, &st) || /* stat, not lstat */ + !S_ISREG(st.st_mode)) + return 0; + +#ifdef __MINGW32__ + /* cannot trust the executable bit, peek into the file instead */ + char buf[3] = { 0 }; + int n; + int fd = open(name, O_RDONLY); + st.st_mode &= ~S_IXUSR; + if (fd >= 0) { + n = read(fd, buf, 2); + if (n == 2) + /* DOS executables start with "MZ" */ + if (!strcmp(buf, "#!") || !strcmp(buf, "MZ")) + st.st_mode |= S_IXUSR; + close(fd); + } +#endif + return st.st_mode & S_IXUSR; +} + +static void list_commands_in_dir(struct cmdnames *cmds, + const char *path, + const char *prefix) +{ + int prefix_len; + DIR *dir = opendir(path); + struct dirent *de; + struct strbuf buf = STRBUF_INIT; + int len; + + if (!dir) + return; + if (!prefix) + prefix = "perf-"; + prefix_len = strlen(prefix); + + strbuf_addf(&buf, "%s/", path); + len = buf.len; + + while ((de = readdir(dir)) != NULL) { + int entlen; + + if (prefixcmp(de->d_name, prefix)) + continue; + + strbuf_setlen(&buf, len); + strbuf_addstr(&buf, de->d_name); + if (!is_executable(buf.buf)) + continue; + + entlen = strlen(de->d_name) - prefix_len; + if (has_extension(de->d_name, ".exe")) + entlen -= 4; + + add_cmdname(cmds, de->d_name + prefix_len, entlen); + } + closedir(dir); + strbuf_release(&buf); +} + +void load_command_list(const char *prefix, + struct cmdnames *main_cmds, + struct cmdnames *other_cmds) +{ + const char *env_path = getenv("PATH"); + const char *exec_path = perf_exec_path(); + + if (exec_path) { + list_commands_in_dir(main_cmds, exec_path, prefix); + qsort(main_cmds->names, main_cmds->cnt, + sizeof(*main_cmds->names), cmdname_compare); + uniq(main_cmds); + } + + if (env_path) { + char *paths, *path, *colon; + path = paths = strdup(env_path); + while (1) { + if ((colon = strchr(path, PATH_SEP))) + *colon = 0; + if (!exec_path || strcmp(path, exec_path)) + list_commands_in_dir(other_cmds, path, prefix); + + if (!colon) + break; + path = colon + 1; + } + free(paths); + + qsort(other_cmds->names, other_cmds->cnt, + sizeof(*other_cmds->names), cmdname_compare); + uniq(other_cmds); + } + exclude_cmds(other_cmds, main_cmds); +} + +void list_commands(const char *title, struct cmdnames *main_cmds, + struct cmdnames *other_cmds) +{ + int i, longest = 0; + + for (i = 0; i < main_cmds->cnt; i++) + if (longest < main_cmds->names[i]->len) + longest = main_cmds->names[i]->len; + for (i = 0; i < other_cmds->cnt; i++) + if (longest < other_cmds->names[i]->len) + longest = other_cmds->names[i]->len; + + if (main_cmds->cnt) { + const char *exec_path = perf_exec_path(); + printf("available %s in '%s'\n", title, exec_path); + printf("----------------"); + mput_char('-', strlen(title) + strlen(exec_path)); + putchar('\n'); + pretty_print_string_list(main_cmds, longest); + putchar('\n'); + } + + if (other_cmds->cnt) { + printf("%s available from elsewhere on your $PATH\n", title); + printf("---------------------------------------"); + mput_char('-', strlen(title)); + putchar('\n'); + pretty_print_string_list(other_cmds, longest); + putchar('\n'); + } +} + +int is_in_cmdlist(struct cmdnames *c, const char *s) +{ + int i; + for (i = 0; i < c->cnt; i++) + if (!strcmp(s, c->names[i]->name)) + return 1; + return 0; +} + +static int autocorrect; +static struct cmdnames aliases; + +static int perf_unknown_cmd_config(const char *var, const char *value, void *cb) +{ + if (!strcmp(var, "help.autocorrect")) + autocorrect = perf_config_int(var,value); + /* Also use aliases for command lookup */ + if (!prefixcmp(var, "alias.")) + add_cmdname(&aliases, var + 6, strlen(var + 6)); + + return perf_default_config(var, value, cb); +} + +static int levenshtein_compare(const void *p1, const void *p2) +{ + const struct cmdname *const *c1 = p1, *const *c2 = p2; + const char *s1 = (*c1)->name, *s2 = (*c2)->name; + int l1 = (*c1)->len; + int l2 = (*c2)->len; + return l1 != l2 ? l1 - l2 : strcmp(s1, s2); +} + +static void add_cmd_list(struct cmdnames *cmds, struct cmdnames *old) +{ + int i; + ALLOC_GROW(cmds->names, cmds->cnt + old->cnt, cmds->alloc); + + for (i = 0; i < old->cnt; i++) + cmds->names[cmds->cnt++] = old->names[i]; + free(old->names); + old->cnt = 0; + old->names = NULL; +} + +const char *help_unknown_cmd(const char *cmd) +{ + int i, n = 0, best_similarity = 0; + struct cmdnames main_cmds, other_cmds; + + memset(&main_cmds, 0, sizeof(main_cmds)); + memset(&other_cmds, 0, sizeof(main_cmds)); + memset(&aliases, 0, sizeof(aliases)); + + perf_config(perf_unknown_cmd_config, NULL); + + load_command_list("perf-", &main_cmds, &other_cmds); + + add_cmd_list(&main_cmds, &aliases); + add_cmd_list(&main_cmds, &other_cmds); + qsort(main_cmds.names, main_cmds.cnt, + sizeof(main_cmds.names), cmdname_compare); + uniq(&main_cmds); + + if (main_cmds.cnt) { + /* This reuses cmdname->len for similarity index */ + for (i = 0; i < main_cmds.cnt; ++i) + main_cmds.names[i]->len = + levenshtein(cmd, main_cmds.names[i]->name, 0, 2, 1, 4); + + qsort(main_cmds.names, main_cmds.cnt, + sizeof(*main_cmds.names), levenshtein_compare); + + best_similarity = main_cmds.names[0]->len; + n = 1; + while (n < main_cmds.cnt && best_similarity == main_cmds.names[n]->len) + ++n; + } + + if (autocorrect && n == 1) { + const char *assumed = main_cmds.names[0]->name; + + main_cmds.names[0] = NULL; + clean_cmdnames(&main_cmds); + fprintf(stderr, "WARNING: You called a Git program named '%s', " + "which does not exist.\n" + "Continuing under the assumption that you meant '%s'\n", + cmd, assumed); + if (autocorrect > 0) { + fprintf(stderr, "in %0.1f seconds automatically...\n", + (float)autocorrect/10.0); + poll(NULL, 0, autocorrect * 100); + } + return assumed; + } + + fprintf(stderr, "perf: '%s' is not a perf-command. See 'perf --help'.\n", cmd); + + if (main_cmds.cnt && best_similarity < 6) { + fprintf(stderr, "\nDid you mean %s?\n", + n < 2 ? "this": "one of these"); + + for (i = 0; i < n; i++) + fprintf(stderr, "\t%s\n", main_cmds.names[i]->name); + } + + exit(1); +} + +int cmd_version(int argc, const char **argv, const char *prefix) +{ + printf("perf version %s\n", perf_version_string); + return 0; +} diff --git a/tools/perf/util/help.h b/tools/perf/util/help.h new file mode 100644 index 00000000000..56bc15406ff --- /dev/null +++ b/tools/perf/util/help.h @@ -0,0 +1,29 @@ +#ifndef HELP_H +#define HELP_H + +struct cmdnames { + int alloc; + int cnt; + struct cmdname { + size_t len; /* also used for similarity index in help.c */ + char name[FLEX_ARRAY]; + } **names; +}; + +static inline void mput_char(char c, unsigned int num) +{ + while(num--) + putchar(c); +} + +void load_command_list(const char *prefix, + struct cmdnames *main_cmds, + struct cmdnames *other_cmds); +void add_cmdname(struct cmdnames *cmds, const char *name, int len); +/* Here we require that excludes is a sorted list. */ +void exclude_cmds(struct cmdnames *cmds, struct cmdnames *excludes); +int is_in_cmdlist(struct cmdnames *c, const char *s); +void list_commands(const char *title, struct cmdnames *main_cmds, + struct cmdnames *other_cmds); + +#endif /* HELP_H */ diff --git a/tools/perf/util/levenshtein.c b/tools/perf/util/levenshtein.c new file mode 100644 index 00000000000..e521d1516df --- /dev/null +++ b/tools/perf/util/levenshtein.c @@ -0,0 +1,84 @@ +#include "cache.h" +#include "levenshtein.h" + +/* + * This function implements the Damerau-Levenshtein algorithm to + * calculate a distance between strings. + * + * Basically, it says how many letters need to be swapped, substituted, + * deleted from, or added to string1, at least, to get string2. + * + * The idea is to build a distance matrix for the substrings of both + * strings. To avoid a large space complexity, only the last three rows + * are kept in memory (if swaps had the same or higher cost as one deletion + * plus one insertion, only two rows would be needed). + * + * At any stage, "i + 1" denotes the length of the current substring of + * string1 that the distance is calculated for. + * + * row2 holds the current row, row1 the previous row (i.e. for the substring + * of string1 of length "i"), and row0 the row before that. + * + * In other words, at the start of the big loop, row2[j + 1] contains the + * Damerau-Levenshtein distance between the substring of string1 of length + * "i" and the substring of string2 of length "j + 1". + * + * All the big loop does is determine the partial minimum-cost paths. + * + * It does so by calculating the costs of the path ending in characters + * i (in string1) and j (in string2), respectively, given that the last + * operation is a substition, a swap, a deletion, or an insertion. + * + * This implementation allows the costs to be weighted: + * + * - w (as in "sWap") + * - s (as in "Substitution") + * - a (for insertion, AKA "Add") + * - d (as in "Deletion") + * + * Note that this algorithm calculates a distance _iff_ d == a. + */ +int levenshtein(const char *string1, const char *string2, + int w, int s, int a, int d) +{ + int len1 = strlen(string1), len2 = strlen(string2); + int *row0 = malloc(sizeof(int) * (len2 + 1)); + int *row1 = malloc(sizeof(int) * (len2 + 1)); + int *row2 = malloc(sizeof(int) * (len2 + 1)); + int i, j; + + for (j = 0; j <= len2; j++) + row1[j] = j * a; + for (i = 0; i < len1; i++) { + int *dummy; + + row2[0] = (i + 1) * d; + for (j = 0; j < len2; j++) { + /* substitution */ + row2[j + 1] = row1[j] + s * (string1[i] != string2[j]); + /* swap */ + if (i > 0 && j > 0 && string1[i - 1] == string2[j] && + string1[i] == string2[j - 1] && + row2[j + 1] > row0[j - 1] + w) + row2[j + 1] = row0[j - 1] + w; + /* deletion */ + if (row2[j + 1] > row1[j + 1] + d) + row2[j + 1] = row1[j + 1] + d; + /* insertion */ + if (row2[j + 1] > row2[j] + a) + row2[j + 1] = row2[j] + a; + } + + dummy = row0; + row0 = row1; + row1 = row2; + row2 = dummy; + } + + i = row1[len2]; + free(row0); + free(row1); + free(row2); + + return i; +} diff --git a/tools/perf/util/levenshtein.h b/tools/perf/util/levenshtein.h new file mode 100644 index 00000000000..0173abeef52 --- /dev/null +++ b/tools/perf/util/levenshtein.h @@ -0,0 +1,8 @@ +#ifndef LEVENSHTEIN_H +#define LEVENSHTEIN_H + +int levenshtein(const char *string1, const char *string2, + int swap_penalty, int substition_penalty, + int insertion_penalty, int deletion_penalty); + +#endif diff --git a/tools/perf/util/list.h b/tools/perf/util/list.h new file mode 100644 index 00000000000..e2548e8072c --- /dev/null +++ b/tools/perf/util/list.h @@ -0,0 +1,603 @@ +#ifndef _LINUX_LIST_H +#define _LINUX_LIST_H +/* + Copyright (C) Cast of dozens, comes from the Linux kernel + + This program is free software; you can redistribute it and/or modify it + under the terms of version 2 of the GNU General Public License as + published by the Free Software Foundation. +*/ + +#include + +/* + * These are non-NULL pointers that will result in page faults + * under normal circumstances, used to verify that nobody uses + * non-initialized list entries. + */ +#define LIST_POISON1 ((void *)0x00100100) +#define LIST_POISON2 ((void *)0x00200200) + +/** + * container_of - cast a member of a structure out to the containing structure + * @ptr: the pointer to the member. + * @type: the type of the container struct this is embedded in. + * @member: the name of the member within the struct. + * + */ +#define container_of(ptr, type, member) ({ \ + const typeof( ((type *)0)->member ) *__mptr = (ptr); \ + (type *)( (char *)__mptr - offsetof(type,member) );}) + +/* + * Simple doubly linked list implementation. + * + * Some of the internal functions ("__xxx") are useful when + * manipulating whole lists rather than single entries, as + * sometimes we already know the next/prev entries and we can + * generate better code by using them directly rather than + * using the generic single-entry routines. + */ + +struct list_head { + struct list_head *next, *prev; +}; + +#define LIST_HEAD_INIT(name) { &(name), &(name) } + +#define LIST_HEAD(name) \ + struct list_head name = LIST_HEAD_INIT(name) + +static inline void INIT_LIST_HEAD(struct list_head *list) +{ + list->next = list; + list->prev = list; +} + +/* + * Insert a new entry between two known consecutive entries. + * + * This is only for internal list manipulation where we know + * the prev/next entries already! + */ +static inline void __list_add(struct list_head *new, + struct list_head *prev, + struct list_head *next) +{ + next->prev = new; + new->next = next; + new->prev = prev; + prev->next = new; +} + +/** + * list_add - add a new entry + * @new: new entry to be added + * @head: list head to add it after + * + * Insert a new entry after the specified head. + * This is good for implementing stacks. + */ +static inline void list_add(struct list_head *new, struct list_head *head) +{ + __list_add(new, head, head->next); +} + +/** + * list_add_tail - add a new entry + * @new: new entry to be added + * @head: list head to add it before + * + * Insert a new entry before the specified head. + * This is useful for implementing queues. + */ +static inline void list_add_tail(struct list_head *new, struct list_head *head) +{ + __list_add(new, head->prev, head); +} + +/* + * Delete a list entry by making the prev/next entries + * point to each other. + * + * This is only for internal list manipulation where we know + * the prev/next entries already! + */ +static inline void __list_del(struct list_head * prev, struct list_head * next) +{ + next->prev = prev; + prev->next = next; +} + +/** + * list_del - deletes entry from list. + * @entry: the element to delete from the list. + * Note: list_empty on entry does not return true after this, the entry is + * in an undefined state. + */ +static inline void list_del(struct list_head *entry) +{ + __list_del(entry->prev, entry->next); + entry->next = LIST_POISON1; + entry->prev = LIST_POISON2; +} + +/** + * list_del_range - deletes range of entries from list. + * @beging: first element in the range to delete from the list. + * @beging: first element in the range to delete from the list. + * Note: list_empty on the range of entries does not return true after this, + * the entries is in an undefined state. + */ +static inline void list_del_range(struct list_head *begin, + struct list_head *end) +{ + begin->prev->next = end->next; + end->next->prev = begin->prev; +} + +/** + * list_replace - replace old entry by new one + * @old : the element to be replaced + * @new : the new element to insert + * Note: if 'old' was empty, it will be overwritten. + */ +static inline void list_replace(struct list_head *old, + struct list_head *new) +{ + new->next = old->next; + new->next->prev = new; + new->prev = old->prev; + new->prev->next = new; +} + +static inline void list_replace_init(struct list_head *old, + struct list_head *new) +{ + list_replace(old, new); + INIT_LIST_HEAD(old); +} + +/** + * list_del_init - deletes entry from list and reinitialize it. + * @entry: the element to delete from the list. + */ +static inline void list_del_init(struct list_head *entry) +{ + __list_del(entry->prev, entry->next); + INIT_LIST_HEAD(entry); +} + +/** + * list_move - delete from one list and add as another's head + * @list: the entry to move + * @head: the head that will precede our entry + */ +static inline void list_move(struct list_head *list, struct list_head *head) +{ + __list_del(list->prev, list->next); + list_add(list, head); +} + +/** + * list_move_tail - delete from one list and add as another's tail + * @list: the entry to move + * @head: the head that will follow our entry + */ +static inline void list_move_tail(struct list_head *list, + struct list_head *head) +{ + __list_del(list->prev, list->next); + list_add_tail(list, head); +} + +/** + * list_is_last - tests whether @list is the last entry in list @head + * @list: the entry to test + * @head: the head of the list + */ +static inline int list_is_last(const struct list_head *list, + const struct list_head *head) +{ + return list->next == head; +} + +/** + * list_empty - tests whether a list is empty + * @head: the list to test. + */ +static inline int list_empty(const struct list_head *head) +{ + return head->next == head; +} + +/** + * list_empty_careful - tests whether a list is empty and not being modified + * @head: the list to test + * + * Description: + * tests whether a list is empty _and_ checks that no other CPU might be + * in the process of modifying either member (next or prev) + * + * NOTE: using list_empty_careful() without synchronization + * can only be safe if the only activity that can happen + * to the list entry is list_del_init(). Eg. it cannot be used + * if another CPU could re-list_add() it. + */ +static inline int list_empty_careful(const struct list_head *head) +{ + struct list_head *next = head->next; + return (next == head) && (next == head->prev); +} + +static inline void __list_splice(struct list_head *list, + struct list_head *head) +{ + struct list_head *first = list->next; + struct list_head *last = list->prev; + struct list_head *at = head->next; + + first->prev = head; + head->next = first; + + last->next = at; + at->prev = last; +} + +/** + * list_splice - join two lists + * @list: the new list to add. + * @head: the place to add it in the first list. + */ +static inline void list_splice(struct list_head *list, struct list_head *head) +{ + if (!list_empty(list)) + __list_splice(list, head); +} + +/** + * list_splice_init - join two lists and reinitialise the emptied list. + * @list: the new list to add. + * @head: the place to add it in the first list. + * + * The list at @list is reinitialised + */ +static inline void list_splice_init(struct list_head *list, + struct list_head *head) +{ + if (!list_empty(list)) { + __list_splice(list, head); + INIT_LIST_HEAD(list); + } +} + +/** + * list_entry - get the struct for this entry + * @ptr: the &struct list_head pointer. + * @type: the type of the struct this is embedded in. + * @member: the name of the list_struct within the struct. + */ +#define list_entry(ptr, type, member) \ + container_of(ptr, type, member) + +/** + * list_first_entry - get the first element from a list + * @ptr: the list head to take the element from. + * @type: the type of the struct this is embedded in. + * @member: the name of the list_struct within the struct. + * + * Note, that list is expected to be not empty. + */ +#define list_first_entry(ptr, type, member) \ + list_entry((ptr)->next, type, member) + +/** + * list_for_each - iterate over a list + * @pos: the &struct list_head to use as a loop cursor. + * @head: the head for your list. + */ +#define list_for_each(pos, head) \ + for (pos = (head)->next; pos != (head); \ + pos = pos->next) + +/** + * __list_for_each - iterate over a list + * @pos: the &struct list_head to use as a loop cursor. + * @head: the head for your list. + * + * This variant differs from list_for_each() in that it's the + * simplest possible list iteration code, no prefetching is done. + * Use this for code that knows the list to be very short (empty + * or 1 entry) most of the time. + */ +#define __list_for_each(pos, head) \ + for (pos = (head)->next; pos != (head); pos = pos->next) + +/** + * list_for_each_prev - iterate over a list backwards + * @pos: the &struct list_head to use as a loop cursor. + * @head: the head for your list. + */ +#define list_for_each_prev(pos, head) \ + for (pos = (head)->prev; pos != (head); \ + pos = pos->prev) + +/** + * list_for_each_safe - iterate over a list safe against removal of list entry + * @pos: the &struct list_head to use as a loop cursor. + * @n: another &struct list_head to use as temporary storage + * @head: the head for your list. + */ +#define list_for_each_safe(pos, n, head) \ + for (pos = (head)->next, n = pos->next; pos != (head); \ + pos = n, n = pos->next) + +/** + * list_for_each_entry - iterate over list of given type + * @pos: the type * to use as a loop cursor. + * @head: the head for your list. + * @member: the name of the list_struct within the struct. + */ +#define list_for_each_entry(pos, head, member) \ + for (pos = list_entry((head)->next, typeof(*pos), member); \ + &pos->member != (head); \ + pos = list_entry(pos->member.next, typeof(*pos), member)) + +/** + * list_for_each_entry_reverse - iterate backwards over list of given type. + * @pos: the type * to use as a loop cursor. + * @head: the head for your list. + * @member: the name of the list_struct within the struct. + */ +#define list_for_each_entry_reverse(pos, head, member) \ + for (pos = list_entry((head)->prev, typeof(*pos), member); \ + &pos->member != (head); \ + pos = list_entry(pos->member.prev, typeof(*pos), member)) + +/** + * list_prepare_entry - prepare a pos entry for use in list_for_each_entry_continue + * @pos: the type * to use as a start point + * @head: the head of the list + * @member: the name of the list_struct within the struct. + * + * Prepares a pos entry for use as a start point in list_for_each_entry_continue. + */ +#define list_prepare_entry(pos, head, member) \ + ((pos) ? : list_entry(head, typeof(*pos), member)) + +/** + * list_for_each_entry_continue - continue iteration over list of given type + * @pos: the type * to use as a loop cursor. + * @head: the head for your list. + * @member: the name of the list_struct within the struct. + * + * Continue to iterate over list of given type, continuing after + * the current position. + */ +#define list_for_each_entry_continue(pos, head, member) \ + for (pos = list_entry(pos->member.next, typeof(*pos), member); \ + &pos->member != (head); \ + pos = list_entry(pos->member.next, typeof(*pos), member)) + +/** + * list_for_each_entry_from - iterate over list of given type from the current point + * @pos: the type * to use as a loop cursor. + * @head: the head for your list. + * @member: the name of the list_struct within the struct. + * + * Iterate over list of given type, continuing from current position. + */ +#define list_for_each_entry_from(pos, head, member) \ + for (; &pos->member != (head); \ + pos = list_entry(pos->member.next, typeof(*pos), member)) + +/** + * list_for_each_entry_safe - iterate over list of given type safe against removal of list entry + * @pos: the type * to use as a loop cursor. + * @n: another type * to use as temporary storage + * @head: the head for your list. + * @member: the name of the list_struct within the struct. + */ +#define list_for_each_entry_safe(pos, n, head, member) \ + for (pos = list_entry((head)->next, typeof(*pos), member), \ + n = list_entry(pos->member.next, typeof(*pos), member); \ + &pos->member != (head); \ + pos = n, n = list_entry(n->member.next, typeof(*n), member)) + +/** + * list_for_each_entry_safe_continue + * @pos: the type * to use as a loop cursor. + * @n: another type * to use as temporary storage + * @head: the head for your list. + * @member: the name of the list_struct within the struct. + * + * Iterate over list of given type, continuing after current point, + * safe against removal of list entry. + */ +#define list_for_each_entry_safe_continue(pos, n, head, member) \ + for (pos = list_entry(pos->member.next, typeof(*pos), member), \ + n = list_entry(pos->member.next, typeof(*pos), member); \ + &pos->member != (head); \ + pos = n, n = list_entry(n->member.next, typeof(*n), member)) + +/** + * list_for_each_entry_safe_from + * @pos: the type * to use as a loop cursor. + * @n: another type * to use as temporary storage + * @head: the head for your list. + * @member: the name of the list_struct within the struct. + * + * Iterate over list of given type from current point, safe against + * removal of list entry. + */ +#define list_for_each_entry_safe_from(pos, n, head, member) \ + for (n = list_entry(pos->member.next, typeof(*pos), member); \ + &pos->member != (head); \ + pos = n, n = list_entry(n->member.next, typeof(*n), member)) + +/** + * list_for_each_entry_safe_reverse + * @pos: the type * to use as a loop cursor. + * @n: another type * to use as temporary storage + * @head: the head for your list. + * @member: the name of the list_struct within the struct. + * + * Iterate backwards over list of given type, safe against removal + * of list entry. + */ +#define list_for_each_entry_safe_reverse(pos, n, head, member) \ + for (pos = list_entry((head)->prev, typeof(*pos), member), \ + n = list_entry(pos->member.prev, typeof(*pos), member); \ + &pos->member != (head); \ + pos = n, n = list_entry(n->member.prev, typeof(*n), member)) + +/* + * Double linked lists with a single pointer list head. + * Mostly useful for hash tables where the two pointer list head is + * too wasteful. + * You lose the ability to access the tail in O(1). + */ + +struct hlist_head { + struct hlist_node *first; +}; + +struct hlist_node { + struct hlist_node *next, **pprev; +}; + +#define HLIST_HEAD_INIT { .first = NULL } +#define HLIST_HEAD(name) struct hlist_head name = { .first = NULL } +#define INIT_HLIST_HEAD(ptr) ((ptr)->first = NULL) +static inline void INIT_HLIST_NODE(struct hlist_node *h) +{ + h->next = NULL; + h->pprev = NULL; +} + +static inline int hlist_unhashed(const struct hlist_node *h) +{ + return !h->pprev; +} + +static inline int hlist_empty(const struct hlist_head *h) +{ + return !h->first; +} + +static inline void __hlist_del(struct hlist_node *n) +{ + struct hlist_node *next = n->next; + struct hlist_node **pprev = n->pprev; + *pprev = next; + if (next) + next->pprev = pprev; +} + +static inline void hlist_del(struct hlist_node *n) +{ + __hlist_del(n); + n->next = LIST_POISON1; + n->pprev = LIST_POISON2; +} + +static inline void hlist_del_init(struct hlist_node *n) +{ + if (!hlist_unhashed(n)) { + __hlist_del(n); + INIT_HLIST_NODE(n); + } +} + +static inline void hlist_add_head(struct hlist_node *n, struct hlist_head *h) +{ + struct hlist_node *first = h->first; + n->next = first; + if (first) + first->pprev = &n->next; + h->first = n; + n->pprev = &h->first; +} + +/* next must be != NULL */ +static inline void hlist_add_before(struct hlist_node *n, + struct hlist_node *next) +{ + n->pprev = next->pprev; + n->next = next; + next->pprev = &n->next; + *(n->pprev) = n; +} + +static inline void hlist_add_after(struct hlist_node *n, + struct hlist_node *next) +{ + next->next = n->next; + n->next = next; + next->pprev = &n->next; + + if(next->next) + next->next->pprev = &next->next; +} + +#define hlist_entry(ptr, type, member) container_of(ptr,type,member) + +#define hlist_for_each(pos, head) \ + for (pos = (head)->first; pos; \ + pos = pos->next) + +#define hlist_for_each_safe(pos, n, head) \ + for (pos = (head)->first; pos && ({ n = pos->next; 1; }); \ + pos = n) + +/** + * hlist_for_each_entry - iterate over list of given type + * @tpos: the type * to use as a loop cursor. + * @pos: the &struct hlist_node to use as a loop cursor. + * @head: the head for your list. + * @member: the name of the hlist_node within the struct. + */ +#define hlist_for_each_entry(tpos, pos, head, member) \ + for (pos = (head)->first; \ + pos && \ + ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1;}); \ + pos = pos->next) + +/** + * hlist_for_each_entry_continue - iterate over a hlist continuing after current point + * @tpos: the type * to use as a loop cursor. + * @pos: the &struct hlist_node to use as a loop cursor. + * @member: the name of the hlist_node within the struct. + */ +#define hlist_for_each_entry_continue(tpos, pos, member) \ + for (pos = (pos)->next; \ + pos && \ + ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1;}); \ + pos = pos->next) + +/** + * hlist_for_each_entry_from - iterate over a hlist continuing from current point + * @tpos: the type * to use as a loop cursor. + * @pos: the &struct hlist_node to use as a loop cursor. + * @member: the name of the hlist_node within the struct. + */ +#define hlist_for_each_entry_from(tpos, pos, member) \ + for (; pos && \ + ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1;}); \ + pos = pos->next) + +/** + * hlist_for_each_entry_safe - iterate over list of given type safe against removal of list entry + * @tpos: the type * to use as a loop cursor. + * @pos: the &struct hlist_node to use as a loop cursor. + * @n: another &struct hlist_node to use as temporary storage + * @head: the head for your list. + * @member: the name of the hlist_node within the struct. + */ +#define hlist_for_each_entry_safe(tpos, pos, n, head, member) \ + for (pos = (head)->first; \ + pos && ({ n = pos->next; 1; }) && \ + ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1;}); \ + pos = n) + +#endif diff --git a/tools/perf/util/pager.c b/tools/perf/util/pager.c new file mode 100644 index 00000000000..a28bccae545 --- /dev/null +++ b/tools/perf/util/pager.c @@ -0,0 +1,99 @@ +#include "cache.h" +#include "run-command.h" +#include "sigchain.h" + +/* + * This is split up from the rest of git so that we can do + * something different on Windows. + */ + +static int spawned_pager; + +#ifndef __MINGW32__ +static void pager_preexec(void) +{ + /* + * Work around bug in "less" by not starting it until we + * have real input + */ + fd_set in; + + FD_ZERO(&in); + FD_SET(0, &in); + select(1, &in, NULL, &in, NULL); + + setenv("LESS", "FRSX", 0); +} +#endif + +static const char *pager_argv[] = { "sh", "-c", NULL, NULL }; +static struct child_process pager_process; + +static void wait_for_pager(void) +{ + fflush(stdout); + fflush(stderr); + /* signal EOF to pager */ + close(1); + close(2); + finish_command(&pager_process); +} + +static void wait_for_pager_signal(int signo) +{ + wait_for_pager(); + sigchain_pop(signo); + raise(signo); +} + +void setup_pager(void) +{ + const char *pager = getenv("PERF_PAGER"); + + if (!isatty(1)) + return; + if (!pager) { + if (!pager_program) + perf_config(perf_default_config, NULL); + pager = pager_program; + } + if (!pager) + pager = getenv("PAGER"); + if (!pager) + pager = "less"; + else if (!*pager || !strcmp(pager, "cat")) + return; + + spawned_pager = 1; /* means we are emitting to terminal */ + + /* spawn the pager */ + pager_argv[2] = pager; + pager_process.argv = pager_argv; + pager_process.in = -1; +#ifndef __MINGW32__ + pager_process.preexec_cb = pager_preexec; +#endif + if (start_command(&pager_process)) + return; + + /* original process continues, but writes to the pipe */ + dup2(pager_process.in, 1); + if (isatty(2)) + dup2(pager_process.in, 2); + close(pager_process.in); + + /* this makes sure that the parent terminates after the pager */ + sigchain_push_common(wait_for_pager_signal); + atexit(wait_for_pager); +} + +int pager_in_use(void) +{ + const char *env; + + if (spawned_pager) + return 1; + + env = getenv("PERF_PAGER_IN_USE"); + return env ? perf_config_bool("PERF_PAGER_IN_USE", env) : 0; +} diff --git a/tools/perf/util/parse-events.c b/tools/perf/util/parse-events.c new file mode 100644 index 00000000000..e0820b4388a --- /dev/null +++ b/tools/perf/util/parse-events.c @@ -0,0 +1,316 @@ + +#include "../perf.h" +#include "util.h" +#include "parse-options.h" +#include "parse-events.h" +#include "exec_cmd.h" +#include "string.h" + +extern char *strcasestr(const char *haystack, const char *needle); + +int nr_counters; + +struct perf_counter_attr attrs[MAX_COUNTERS]; + +struct event_symbol { + __u8 type; + __u64 config; + char *symbol; +}; + +#define C(x, y) .type = PERF_TYPE_##x, .config = PERF_COUNT_##y +#define CR(x, y) .type = PERF_TYPE_##x, .config = y + +static struct event_symbol event_symbols[] = { + { C(HARDWARE, CPU_CYCLES), "cpu-cycles", }, + { C(HARDWARE, CPU_CYCLES), "cycles", }, + { C(HARDWARE, INSTRUCTIONS), "instructions", }, + { C(HARDWARE, CACHE_REFERENCES), "cache-references", }, + { C(HARDWARE, CACHE_MISSES), "cache-misses", }, + { C(HARDWARE, BRANCH_INSTRUCTIONS), "branch-instructions", }, + { C(HARDWARE, BRANCH_INSTRUCTIONS), "branches", }, + { C(HARDWARE, BRANCH_MISSES), "branch-misses", }, + { C(HARDWARE, BUS_CYCLES), "bus-cycles", }, + + { C(SOFTWARE, CPU_CLOCK), "cpu-clock", }, + { C(SOFTWARE, TASK_CLOCK), "task-clock", }, + { C(SOFTWARE, PAGE_FAULTS), "page-faults", }, + { C(SOFTWARE, PAGE_FAULTS), "faults", }, + { C(SOFTWARE, PAGE_FAULTS_MIN), "minor-faults", }, + { C(SOFTWARE, PAGE_FAULTS_MAJ), "major-faults", }, + { C(SOFTWARE, CONTEXT_SWITCHES), "context-switches", }, + { C(SOFTWARE, CONTEXT_SWITCHES), "cs", }, + { C(SOFTWARE, CPU_MIGRATIONS), "cpu-migrations", }, + { C(SOFTWARE, CPU_MIGRATIONS), "migrations", }, +}; + +#define __PERF_COUNTER_FIELD(config, name) \ + ((config & PERF_COUNTER_##name##_MASK) >> PERF_COUNTER_##name##_SHIFT) + +#define PERF_COUNTER_RAW(config) __PERF_COUNTER_FIELD(config, RAW) +#define PERF_COUNTER_CONFIG(config) __PERF_COUNTER_FIELD(config, CONFIG) +#define PERF_COUNTER_TYPE(config) __PERF_COUNTER_FIELD(config, TYPE) +#define PERF_COUNTER_ID(config) __PERF_COUNTER_FIELD(config, EVENT) + +static char *hw_event_names[] = { + "cycles", + "instructions", + "cache-references", + "cache-misses", + "branches", + "branch-misses", + "bus-cycles", +}; + +static char *sw_event_names[] = { + "cpu-clock-ticks", + "task-clock-ticks", + "page-faults", + "context-switches", + "CPU-migrations", + "minor-faults", + "major-faults", +}; + +#define MAX_ALIASES 8 + +static char *hw_cache [][MAX_ALIASES] = { + { "L1-data" , "l1-d", "l1d", "l1" }, + { "L1-instruction" , "l1-i", "l1i" }, + { "L2" , "l2" }, + { "Data-TLB" , "dtlb", "d-tlb" }, + { "Instruction-TLB" , "itlb", "i-tlb" }, + { "Branch" , "bpu" , "btb", "bpc" }, +}; + +static char *hw_cache_op [][MAX_ALIASES] = { + { "Load" , "read" }, + { "Store" , "write" }, + { "Prefetch" , "speculative-read", "speculative-load" }, +}; + +static char *hw_cache_result [][MAX_ALIASES] = { + { "Reference" , "ops", "access" }, + { "Miss" }, +}; + +char *event_name(int counter) +{ + __u64 config = attrs[counter].config; + int type = attrs[counter].type; + static char buf[32]; + + if (attrs[counter].type == PERF_TYPE_RAW) { + sprintf(buf, "raw 0x%llx", config); + return buf; + } + + switch (type) { + case PERF_TYPE_HARDWARE: + if (config < PERF_HW_EVENTS_MAX) + return hw_event_names[config]; + return "unknown-hardware"; + + case PERF_TYPE_HW_CACHE: { + __u8 cache_type, cache_op, cache_result; + static char name[100]; + + cache_type = (config >> 0) & 0xff; + if (cache_type > PERF_COUNT_HW_CACHE_MAX) + return "unknown-ext-hardware-cache-type"; + + cache_op = (config >> 8) & 0xff; + if (cache_op > PERF_COUNT_HW_CACHE_OP_MAX) + return "unknown-ext-hardware-cache-op"; + + cache_result = (config >> 16) & 0xff; + if (cache_result > PERF_COUNT_HW_CACHE_RESULT_MAX) + return "unknown-ext-hardware-cache-result"; + + sprintf(name, "%s-Cache-%s-%ses", + hw_cache[cache_type][0], + hw_cache_op[cache_op][0], + hw_cache_result[cache_result][0]); + + return name; + } + + case PERF_TYPE_SOFTWARE: + if (config < PERF_SW_EVENTS_MAX) + return sw_event_names[config]; + return "unknown-software"; + + default: + break; + } + + return "unknown"; +} + +static int parse_aliases(const char *str, char *names[][MAX_ALIASES], int size) +{ + int i, j; + + for (i = 0; i < size; i++) { + for (j = 0; j < MAX_ALIASES; j++) { + if (!names[i][j]) + break; + if (strcasestr(str, names[i][j])) + return i; + } + } + + return 0; +} + +static int parse_generic_hw_symbols(const char *str, struct perf_counter_attr *attr) +{ + __u8 cache_type = -1, cache_op = 0, cache_result = 0; + + cache_type = parse_aliases(str, hw_cache, PERF_COUNT_HW_CACHE_MAX); + /* + * No fallback - if we cannot get a clear cache type + * then bail out: + */ + if (cache_type == -1) + return -EINVAL; + + cache_op = parse_aliases(str, hw_cache_op, PERF_COUNT_HW_CACHE_OP_MAX); + /* + * Fall back to reads: + */ + if (cache_type == -1) + cache_type = PERF_COUNT_HW_CACHE_OP_READ; + + cache_result = parse_aliases(str, hw_cache_result, + PERF_COUNT_HW_CACHE_RESULT_MAX); + /* + * Fall back to accesses: + */ + if (cache_result == -1) + cache_result = PERF_COUNT_HW_CACHE_RESULT_ACCESS; + + attr->config = cache_type | (cache_op << 8) | (cache_result << 16); + attr->type = PERF_TYPE_HW_CACHE; + + return 0; +} + +/* + * Each event can have multiple symbolic names. + * Symbolic names are (almost) exactly matched. + */ +static int parse_event_symbols(const char *str, struct perf_counter_attr *attr) +{ + __u64 config, id; + int type; + unsigned int i; + const char *sep, *pstr; + + if (str[0] == 'r' && hex2u64(str + 1, &config) > 0) { + attr->type = PERF_TYPE_RAW; + attr->config = config; + + return 0; + } + + pstr = str; + sep = strchr(pstr, ':'); + if (sep) { + type = atoi(pstr); + pstr = sep + 1; + id = atoi(pstr); + sep = strchr(pstr, ':'); + if (sep) { + pstr = sep + 1; + if (strchr(pstr, 'k')) + attr->exclude_user = 1; + if (strchr(pstr, 'u')) + attr->exclude_kernel = 1; + } + attr->type = type; + attr->config = id; + + return 0; + } + + for (i = 0; i < ARRAY_SIZE(event_symbols); i++) { + if (!strncmp(str, event_symbols[i].symbol, + strlen(event_symbols[i].symbol))) { + + attr->type = event_symbols[i].type; + attr->config = event_symbols[i].config; + + return 0; + } + } + + return parse_generic_hw_symbols(str, attr); +} + +int parse_events(const struct option *opt, const char *str, int unset) +{ + struct perf_counter_attr attr; + int ret; + + memset(&attr, 0, sizeof(attr)); +again: + if (nr_counters == MAX_COUNTERS) + return -1; + + ret = parse_event_symbols(str, &attr); + if (ret < 0) + return ret; + + attrs[nr_counters] = attr; + nr_counters++; + + str = strstr(str, ","); + if (str) { + str++; + goto again; + } + + return 0; +} + +static const char * const event_type_descriptors[] = { + "", + "Hardware event", + "Software event", + "Tracepoint event", + "Hardware cache event", +}; + +/* + * Print the help text for the event symbols: + */ +void print_events(void) +{ + struct event_symbol *syms = event_symbols; + unsigned int i, type, prev_type = -1; + + fprintf(stderr, "\n"); + fprintf(stderr, "List of pre-defined events (to be used in -e):\n"); + + for (i = 0; i < ARRAY_SIZE(event_symbols); i++, syms++) { + type = syms->type + 1; + if (type > ARRAY_SIZE(event_type_descriptors)) + type = 0; + + if (type != prev_type) + fprintf(stderr, "\n"); + + fprintf(stderr, " %-30s [%s]\n", syms->symbol, + event_type_descriptors[type]); + + prev_type = type; + } + + fprintf(stderr, "\n"); + fprintf(stderr, " %-30s [raw hardware event descriptor]\n", + "rNNN"); + fprintf(stderr, "\n"); + + exit(129); +} diff --git a/tools/perf/util/parse-events.h b/tools/perf/util/parse-events.h new file mode 100644 index 00000000000..e3d552908e6 --- /dev/null +++ b/tools/perf/util/parse-events.h @@ -0,0 +1,17 @@ + +/* + * Parse symbolic events/counts passed in as options: + */ + +extern int nr_counters; + +extern struct perf_counter_attr attrs[MAX_COUNTERS]; + +extern char *event_name(int ctr); + +extern int parse_events(const struct option *opt, const char *str, int unset); + +#define EVENTS_HELP_MAX (128*1024) + +extern void print_events(void); + diff --git a/tools/perf/util/parse-options.c b/tools/perf/util/parse-options.c new file mode 100644 index 00000000000..b3affb1658d --- /dev/null +++ b/tools/perf/util/parse-options.c @@ -0,0 +1,508 @@ +#include "util.h" +#include "parse-options.h" +#include "cache.h" + +#define OPT_SHORT 1 +#define OPT_UNSET 2 + +static int opterror(const struct option *opt, const char *reason, int flags) +{ + if (flags & OPT_SHORT) + return error("switch `%c' %s", opt->short_name, reason); + if (flags & OPT_UNSET) + return error("option `no-%s' %s", opt->long_name, reason); + return error("option `%s' %s", opt->long_name, reason); +} + +static int get_arg(struct parse_opt_ctx_t *p, const struct option *opt, + int flags, const char **arg) +{ + if (p->opt) { + *arg = p->opt; + p->opt = NULL; + } else if (p->argc == 1 && (opt->flags & PARSE_OPT_LASTARG_DEFAULT)) { + *arg = (const char *)opt->defval; + } else if (p->argc > 1) { + p->argc--; + *arg = *++p->argv; + } else + return opterror(opt, "requires a value", flags); + return 0; +} + +static int get_value(struct parse_opt_ctx_t *p, + const struct option *opt, int flags) +{ + const char *s, *arg = NULL; + const int unset = flags & OPT_UNSET; + + if (unset && p->opt) + return opterror(opt, "takes no value", flags); + if (unset && (opt->flags & PARSE_OPT_NONEG)) + return opterror(opt, "isn't available", flags); + + if (!(flags & OPT_SHORT) && p->opt) { + switch (opt->type) { + case OPTION_CALLBACK: + if (!(opt->flags & PARSE_OPT_NOARG)) + break; + /* FALLTHROUGH */ + case OPTION_BOOLEAN: + case OPTION_BIT: + case OPTION_SET_INT: + case OPTION_SET_PTR: + return opterror(opt, "takes no value", flags); + default: + break; + } + } + + switch (opt->type) { + case OPTION_BIT: + if (unset) + *(int *)opt->value &= ~opt->defval; + else + *(int *)opt->value |= opt->defval; + return 0; + + case OPTION_BOOLEAN: + *(int *)opt->value = unset ? 0 : *(int *)opt->value + 1; + return 0; + + case OPTION_SET_INT: + *(int *)opt->value = unset ? 0 : opt->defval; + return 0; + + case OPTION_SET_PTR: + *(void **)opt->value = unset ? NULL : (void *)opt->defval; + return 0; + + case OPTION_STRING: + if (unset) + *(const char **)opt->value = NULL; + else if (opt->flags & PARSE_OPT_OPTARG && !p->opt) + *(const char **)opt->value = (const char *)opt->defval; + else + return get_arg(p, opt, flags, (const char **)opt->value); + return 0; + + case OPTION_CALLBACK: + if (unset) + return (*opt->callback)(opt, NULL, 1) ? (-1) : 0; + if (opt->flags & PARSE_OPT_NOARG) + return (*opt->callback)(opt, NULL, 0) ? (-1) : 0; + if (opt->flags & PARSE_OPT_OPTARG && !p->opt) + return (*opt->callback)(opt, NULL, 0) ? (-1) : 0; + if (get_arg(p, opt, flags, &arg)) + return -1; + return (*opt->callback)(opt, arg, 0) ? (-1) : 0; + + case OPTION_INTEGER: + if (unset) { + *(int *)opt->value = 0; + return 0; + } + if (opt->flags & PARSE_OPT_OPTARG && !p->opt) { + *(int *)opt->value = opt->defval; + return 0; + } + if (get_arg(p, opt, flags, &arg)) + return -1; + *(int *)opt->value = strtol(arg, (char **)&s, 10); + if (*s) + return opterror(opt, "expects a numerical value", flags); + return 0; + + case OPTION_LONG: + if (unset) { + *(long *)opt->value = 0; + return 0; + } + if (opt->flags & PARSE_OPT_OPTARG && !p->opt) { + *(long *)opt->value = opt->defval; + return 0; + } + if (get_arg(p, opt, flags, &arg)) + return -1; + *(long *)opt->value = strtol(arg, (char **)&s, 10); + if (*s) + return opterror(opt, "expects a numerical value", flags); + return 0; + + default: + die("should not happen, someone must be hit on the forehead"); + } +} + +static int parse_short_opt(struct parse_opt_ctx_t *p, const struct option *options) +{ + for (; options->type != OPTION_END; options++) { + if (options->short_name == *p->opt) { + p->opt = p->opt[1] ? p->opt + 1 : NULL; + return get_value(p, options, OPT_SHORT); + } + } + return -2; +} + +static int parse_long_opt(struct parse_opt_ctx_t *p, const char *arg, + const struct option *options) +{ + const char *arg_end = strchr(arg, '='); + const struct option *abbrev_option = NULL, *ambiguous_option = NULL; + int abbrev_flags = 0, ambiguous_flags = 0; + + if (!arg_end) + arg_end = arg + strlen(arg); + + for (; options->type != OPTION_END; options++) { + const char *rest; + int flags = 0; + + if (!options->long_name) + continue; + + rest = skip_prefix(arg, options->long_name); + if (options->type == OPTION_ARGUMENT) { + if (!rest) + continue; + if (*rest == '=') + return opterror(options, "takes no value", flags); + if (*rest) + continue; + p->out[p->cpidx++] = arg - 2; + return 0; + } + if (!rest) { + /* abbreviated? */ + if (!strncmp(options->long_name, arg, arg_end - arg)) { +is_abbreviated: + if (abbrev_option) { + /* + * If this is abbreviated, it is + * ambiguous. So when there is no + * exact match later, we need to + * error out. + */ + ambiguous_option = abbrev_option; + ambiguous_flags = abbrev_flags; + } + if (!(flags & OPT_UNSET) && *arg_end) + p->opt = arg_end + 1; + abbrev_option = options; + abbrev_flags = flags; + continue; + } + /* negated and abbreviated very much? */ + if (!prefixcmp("no-", arg)) { + flags |= OPT_UNSET; + goto is_abbreviated; + } + /* negated? */ + if (strncmp(arg, "no-", 3)) + continue; + flags |= OPT_UNSET; + rest = skip_prefix(arg + 3, options->long_name); + /* abbreviated and negated? */ + if (!rest && !prefixcmp(options->long_name, arg + 3)) + goto is_abbreviated; + if (!rest) + continue; + } + if (*rest) { + if (*rest != '=') + continue; + p->opt = rest + 1; + } + return get_value(p, options, flags); + } + + if (ambiguous_option) + return error("Ambiguous option: %s " + "(could be --%s%s or --%s%s)", + arg, + (ambiguous_flags & OPT_UNSET) ? "no-" : "", + ambiguous_option->long_name, + (abbrev_flags & OPT_UNSET) ? "no-" : "", + abbrev_option->long_name); + if (abbrev_option) + return get_value(p, abbrev_option, abbrev_flags); + return -2; +} + +static void check_typos(const char *arg, const struct option *options) +{ + if (strlen(arg) < 3) + return; + + if (!prefixcmp(arg, "no-")) { + error ("did you mean `--%s` (with two dashes ?)", arg); + exit(129); + } + + for (; options->type != OPTION_END; options++) { + if (!options->long_name) + continue; + if (!prefixcmp(options->long_name, arg)) { + error ("did you mean `--%s` (with two dashes ?)", arg); + exit(129); + } + } +} + +void parse_options_start(struct parse_opt_ctx_t *ctx, + int argc, const char **argv, int flags) +{ + memset(ctx, 0, sizeof(*ctx)); + ctx->argc = argc - 1; + ctx->argv = argv + 1; + ctx->out = argv; + ctx->cpidx = ((flags & PARSE_OPT_KEEP_ARGV0) != 0); + ctx->flags = flags; + if ((flags & PARSE_OPT_KEEP_UNKNOWN) && + (flags & PARSE_OPT_STOP_AT_NON_OPTION)) + die("STOP_AT_NON_OPTION and KEEP_UNKNOWN don't go together"); +} + +static int usage_with_options_internal(const char * const *, + const struct option *, int); + +int parse_options_step(struct parse_opt_ctx_t *ctx, + const struct option *options, + const char * const usagestr[]) +{ + int internal_help = !(ctx->flags & PARSE_OPT_NO_INTERNAL_HELP); + + /* we must reset ->opt, unknown short option leave it dangling */ + ctx->opt = NULL; + + for (; ctx->argc; ctx->argc--, ctx->argv++) { + const char *arg = ctx->argv[0]; + + if (*arg != '-' || !arg[1]) { + if (ctx->flags & PARSE_OPT_STOP_AT_NON_OPTION) + break; + ctx->out[ctx->cpidx++] = ctx->argv[0]; + continue; + } + + if (arg[1] != '-') { + ctx->opt = arg + 1; + if (internal_help && *ctx->opt == 'h') + return parse_options_usage(usagestr, options); + switch (parse_short_opt(ctx, options)) { + case -1: + return parse_options_usage(usagestr, options); + case -2: + goto unknown; + } + if (ctx->opt) + check_typos(arg + 1, options); + while (ctx->opt) { + if (internal_help && *ctx->opt == 'h') + return parse_options_usage(usagestr, options); + switch (parse_short_opt(ctx, options)) { + case -1: + return parse_options_usage(usagestr, options); + case -2: + /* fake a short option thing to hide the fact that we may have + * started to parse aggregated stuff + * + * This is leaky, too bad. + */ + ctx->argv[0] = strdup(ctx->opt - 1); + *(char *)ctx->argv[0] = '-'; + goto unknown; + } + } + continue; + } + + if (!arg[2]) { /* "--" */ + if (!(ctx->flags & PARSE_OPT_KEEP_DASHDASH)) { + ctx->argc--; + ctx->argv++; + } + break; + } + + if (internal_help && !strcmp(arg + 2, "help-all")) + return usage_with_options_internal(usagestr, options, 1); + if (internal_help && !strcmp(arg + 2, "help")) + return parse_options_usage(usagestr, options); + switch (parse_long_opt(ctx, arg + 2, options)) { + case -1: + return parse_options_usage(usagestr, options); + case -2: + goto unknown; + } + continue; +unknown: + if (!(ctx->flags & PARSE_OPT_KEEP_UNKNOWN)) + return PARSE_OPT_UNKNOWN; + ctx->out[ctx->cpidx++] = ctx->argv[0]; + ctx->opt = NULL; + } + return PARSE_OPT_DONE; +} + +int parse_options_end(struct parse_opt_ctx_t *ctx) +{ + memmove(ctx->out + ctx->cpidx, ctx->argv, ctx->argc * sizeof(*ctx->out)); + ctx->out[ctx->cpidx + ctx->argc] = NULL; + return ctx->cpidx + ctx->argc; +} + +int parse_options(int argc, const char **argv, const struct option *options, + const char * const usagestr[], int flags) +{ + struct parse_opt_ctx_t ctx; + + parse_options_start(&ctx, argc, argv, flags); + switch (parse_options_step(&ctx, options, usagestr)) { + case PARSE_OPT_HELP: + exit(129); + case PARSE_OPT_DONE: + break; + default: /* PARSE_OPT_UNKNOWN */ + if (ctx.argv[0][1] == '-') { + error("unknown option `%s'", ctx.argv[0] + 2); + } else { + error("unknown switch `%c'", *ctx.opt); + } + usage_with_options(usagestr, options); + } + + return parse_options_end(&ctx); +} + +#define USAGE_OPTS_WIDTH 24 +#define USAGE_GAP 2 + +int usage_with_options_internal(const char * const *usagestr, + const struct option *opts, int full) +{ + if (!usagestr) + return PARSE_OPT_HELP; + + fprintf(stderr, "\n usage: %s\n", *usagestr++); + while (*usagestr && **usagestr) + fprintf(stderr, " or: %s\n", *usagestr++); + while (*usagestr) { + fprintf(stderr, "%s%s\n", + **usagestr ? " " : "", + *usagestr); + usagestr++; + } + + if (opts->type != OPTION_GROUP) + fputc('\n', stderr); + + for (; opts->type != OPTION_END; opts++) { + size_t pos; + int pad; + + if (opts->type == OPTION_GROUP) { + fputc('\n', stderr); + if (*opts->help) + fprintf(stderr, "%s\n", opts->help); + continue; + } + if (!full && (opts->flags & PARSE_OPT_HIDDEN)) + continue; + + pos = fprintf(stderr, " "); + if (opts->short_name) + pos += fprintf(stderr, "-%c", opts->short_name); + if (opts->long_name && opts->short_name) + pos += fprintf(stderr, ", "); + if (opts->long_name) + pos += fprintf(stderr, "--%s", opts->long_name); + + switch (opts->type) { + case OPTION_ARGUMENT: + break; + case OPTION_INTEGER: + if (opts->flags & PARSE_OPT_OPTARG) + if (opts->long_name) + pos += fprintf(stderr, "[=]"); + else + pos += fprintf(stderr, "[]"); + else + pos += fprintf(stderr, " "); + break; + case OPTION_CALLBACK: + if (opts->flags & PARSE_OPT_NOARG) + break; + /* FALLTHROUGH */ + case OPTION_STRING: + if (opts->argh) { + if (opts->flags & PARSE_OPT_OPTARG) + if (opts->long_name) + pos += fprintf(stderr, "[=<%s>]", opts->argh); + else + pos += fprintf(stderr, "[<%s>]", opts->argh); + else + pos += fprintf(stderr, " <%s>", opts->argh); + } else { + if (opts->flags & PARSE_OPT_OPTARG) + if (opts->long_name) + pos += fprintf(stderr, "[=...]"); + else + pos += fprintf(stderr, "[...]"); + else + pos += fprintf(stderr, " ..."); + } + break; + default: /* OPTION_{BIT,BOOLEAN,SET_INT,SET_PTR} */ + break; + } + + if (pos <= USAGE_OPTS_WIDTH) + pad = USAGE_OPTS_WIDTH - pos; + else { + fputc('\n', stderr); + pad = USAGE_OPTS_WIDTH; + } + fprintf(stderr, "%*s%s\n", pad + USAGE_GAP, "", opts->help); + } + fputc('\n', stderr); + + return PARSE_OPT_HELP; +} + +void usage_with_options(const char * const *usagestr, + const struct option *opts) +{ + usage_with_options_internal(usagestr, opts, 0); + exit(129); +} + +int parse_options_usage(const char * const *usagestr, + const struct option *opts) +{ + return usage_with_options_internal(usagestr, opts, 0); +} + + +int parse_opt_verbosity_cb(const struct option *opt, const char *arg, + int unset) +{ + int *target = opt->value; + + if (unset) + /* --no-quiet, --no-verbose */ + *target = 0; + else if (opt->short_name == 'v') { + if (*target >= 0) + (*target)++; + else + *target = 1; + } else { + if (*target <= 0) + (*target)--; + else + *target = -1; + } + return 0; +} diff --git a/tools/perf/util/parse-options.h b/tools/perf/util/parse-options.h new file mode 100644 index 00000000000..a1039a6ce0e --- /dev/null +++ b/tools/perf/util/parse-options.h @@ -0,0 +1,174 @@ +#ifndef PARSE_OPTIONS_H +#define PARSE_OPTIONS_H + +enum parse_opt_type { + /* special types */ + OPTION_END, + OPTION_ARGUMENT, + OPTION_GROUP, + /* options with no arguments */ + OPTION_BIT, + OPTION_BOOLEAN, /* _INCR would have been a better name */ + OPTION_SET_INT, + OPTION_SET_PTR, + /* options with arguments (usually) */ + OPTION_STRING, + OPTION_INTEGER, + OPTION_LONG, + OPTION_CALLBACK, +}; + +enum parse_opt_flags { + PARSE_OPT_KEEP_DASHDASH = 1, + PARSE_OPT_STOP_AT_NON_OPTION = 2, + PARSE_OPT_KEEP_ARGV0 = 4, + PARSE_OPT_KEEP_UNKNOWN = 8, + PARSE_OPT_NO_INTERNAL_HELP = 16, +}; + +enum parse_opt_option_flags { + PARSE_OPT_OPTARG = 1, + PARSE_OPT_NOARG = 2, + PARSE_OPT_NONEG = 4, + PARSE_OPT_HIDDEN = 8, + PARSE_OPT_LASTARG_DEFAULT = 16, +}; + +struct option; +typedef int parse_opt_cb(const struct option *, const char *arg, int unset); + +/* + * `type`:: + * holds the type of the option, you must have an OPTION_END last in your + * array. + * + * `short_name`:: + * the character to use as a short option name, '\0' if none. + * + * `long_name`:: + * the long option name, without the leading dashes, NULL if none. + * + * `value`:: + * stores pointers to the values to be filled. + * + * `argh`:: + * token to explain the kind of argument this option wants. Keep it + * homogenous across the repository. + * + * `help`:: + * the short help associated to what the option does. + * Must never be NULL (except for OPTION_END). + * OPTION_GROUP uses this pointer to store the group header. + * + * `flags`:: + * mask of parse_opt_option_flags. + * PARSE_OPT_OPTARG: says that the argument is optionnal (not for BOOLEANs) + * PARSE_OPT_NOARG: says that this option takes no argument, for CALLBACKs + * PARSE_OPT_NONEG: says that this option cannot be negated + * PARSE_OPT_HIDDEN this option is skipped in the default usage, showed in + * the long one. + * + * `callback`:: + * pointer to the callback to use for OPTION_CALLBACK. + * + * `defval`:: + * default value to fill (*->value) with for PARSE_OPT_OPTARG. + * OPTION_{BIT,SET_INT,SET_PTR} store the {mask,integer,pointer} to put in + * the value when met. + * CALLBACKS can use it like they want. + */ +struct option { + enum parse_opt_type type; + int short_name; + const char *long_name; + void *value; + const char *argh; + const char *help; + + int flags; + parse_opt_cb *callback; + intptr_t defval; +}; + +#define OPT_END() { OPTION_END } +#define OPT_ARGUMENT(l, h) { OPTION_ARGUMENT, 0, (l), NULL, NULL, (h) } +#define OPT_GROUP(h) { OPTION_GROUP, 0, NULL, NULL, NULL, (h) } +#define OPT_BIT(s, l, v, h, b) { OPTION_BIT, (s), (l), (v), NULL, (h), 0, NULL, (b) } +#define OPT_BOOLEAN(s, l, v, h) { OPTION_BOOLEAN, (s), (l), (v), NULL, (h) } +#define OPT_SET_INT(s, l, v, h, i) { OPTION_SET_INT, (s), (l), (v), NULL, (h), 0, NULL, (i) } +#define OPT_SET_PTR(s, l, v, h, p) { OPTION_SET_PTR, (s), (l), (v), NULL, (h), 0, NULL, (p) } +#define OPT_INTEGER(s, l, v, h) { OPTION_INTEGER, (s), (l), (v), NULL, (h) } +#define OPT_LONG(s, l, v, h) { OPTION_LONG, (s), (l), (v), NULL, (h) } +#define OPT_STRING(s, l, v, a, h) { OPTION_STRING, (s), (l), (v), (a), (h) } +#define OPT_DATE(s, l, v, h) \ + { OPTION_CALLBACK, (s), (l), (v), "time",(h), 0, \ + parse_opt_approxidate_cb } +#define OPT_CALLBACK(s, l, v, a, h, f) \ + { OPTION_CALLBACK, (s), (l), (v), (a), (h), 0, (f) } + +/* parse_options() will filter out the processed options and leave the + * non-option argments in argv[]. + * Returns the number of arguments left in argv[]. + */ +extern int parse_options(int argc, const char **argv, + const struct option *options, + const char * const usagestr[], int flags); + +extern NORETURN void usage_with_options(const char * const *usagestr, + const struct option *options); + +/*----- incremantal advanced APIs -----*/ + +enum { + PARSE_OPT_HELP = -1, + PARSE_OPT_DONE, + PARSE_OPT_UNKNOWN, +}; + +/* + * It's okay for the caller to consume argv/argc in the usual way. + * Other fields of that structure are private to parse-options and should not + * be modified in any way. + */ +struct parse_opt_ctx_t { + const char **argv; + const char **out; + int argc, cpidx; + const char *opt; + int flags; +}; + +extern int parse_options_usage(const char * const *usagestr, + const struct option *opts); + +extern void parse_options_start(struct parse_opt_ctx_t *ctx, + int argc, const char **argv, int flags); + +extern int parse_options_step(struct parse_opt_ctx_t *ctx, + const struct option *options, + const char * const usagestr[]); + +extern int parse_options_end(struct parse_opt_ctx_t *ctx); + + +/*----- some often used options -----*/ +extern int parse_opt_abbrev_cb(const struct option *, const char *, int); +extern int parse_opt_approxidate_cb(const struct option *, const char *, int); +extern int parse_opt_verbosity_cb(const struct option *, const char *, int); + +#define OPT__VERBOSE(var) OPT_BOOLEAN('v', "verbose", (var), "be verbose") +#define OPT__QUIET(var) OPT_BOOLEAN('q', "quiet", (var), "be quiet") +#define OPT__VERBOSITY(var) \ + { OPTION_CALLBACK, 'v', "verbose", (var), NULL, "be more verbose", \ + PARSE_OPT_NOARG, &parse_opt_verbosity_cb, 0 }, \ + { OPTION_CALLBACK, 'q', "quiet", (var), NULL, "be more quiet", \ + PARSE_OPT_NOARG, &parse_opt_verbosity_cb, 0 } +#define OPT__DRY_RUN(var) OPT_BOOLEAN('n', "dry-run", (var), "dry run") +#define OPT__ABBREV(var) \ + { OPTION_CALLBACK, 0, "abbrev", (var), "n", \ + "use digits to display SHA-1s", \ + PARSE_OPT_OPTARG, &parse_opt_abbrev_cb, 0 } + +extern const char *parse_options_fix_filename(const char *prefix, const char *file); + +#endif diff --git a/tools/perf/util/path.c b/tools/perf/util/path.c new file mode 100644 index 00000000000..a501a40dd2c --- /dev/null +++ b/tools/perf/util/path.c @@ -0,0 +1,353 @@ +/* + * I'm tired of doing "vsnprintf()" etc just to open a + * file, so here's a "return static buffer with printf" + * interface for paths. + * + * It's obviously not thread-safe. Sue me. But it's quite + * useful for doing things like + * + * f = open(mkpath("%s/%s.perf", base, name), O_RDONLY); + * + * which is what it's designed for. + */ +#include "cache.h" + +static char bad_path[] = "/bad-path/"; +/* + * Two hacks: + */ + +static char *get_perf_dir(void) +{ + return "."; +} + +size_t strlcpy(char *dest, const char *src, size_t size) +{ + size_t ret = strlen(src); + + if (size) { + size_t len = (ret >= size) ? size - 1 : ret; + memcpy(dest, src, len); + dest[len] = '\0'; + } + return ret; +} + + +static char *get_pathname(void) +{ + static char pathname_array[4][PATH_MAX]; + static int index; + return pathname_array[3 & ++index]; +} + +static char *cleanup_path(char *path) +{ + /* Clean it up */ + if (!memcmp(path, "./", 2)) { + path += 2; + while (*path == '/') + path++; + } + return path; +} + +char *mksnpath(char *buf, size_t n, const char *fmt, ...) +{ + va_list args; + unsigned len; + + va_start(args, fmt); + len = vsnprintf(buf, n, fmt, args); + va_end(args); + if (len >= n) { + strlcpy(buf, bad_path, n); + return buf; + } + return cleanup_path(buf); +} + +static char *perf_vsnpath(char *buf, size_t n, const char *fmt, va_list args) +{ + const char *perf_dir = get_perf_dir(); + size_t len; + + len = strlen(perf_dir); + if (n < len + 1) + goto bad; + memcpy(buf, perf_dir, len); + if (len && !is_dir_sep(perf_dir[len-1])) + buf[len++] = '/'; + len += vsnprintf(buf + len, n - len, fmt, args); + if (len >= n) + goto bad; + return cleanup_path(buf); +bad: + strlcpy(buf, bad_path, n); + return buf; +} + +char *perf_snpath(char *buf, size_t n, const char *fmt, ...) +{ + va_list args; + va_start(args, fmt); + (void)perf_vsnpath(buf, n, fmt, args); + va_end(args); + return buf; +} + +char *perf_pathdup(const char *fmt, ...) +{ + char path[PATH_MAX]; + va_list args; + va_start(args, fmt); + (void)perf_vsnpath(path, sizeof(path), fmt, args); + va_end(args); + return xstrdup(path); +} + +char *mkpath(const char *fmt, ...) +{ + va_list args; + unsigned len; + char *pathname = get_pathname(); + + va_start(args, fmt); + len = vsnprintf(pathname, PATH_MAX, fmt, args); + va_end(args); + if (len >= PATH_MAX) + return bad_path; + return cleanup_path(pathname); +} + +char *perf_path(const char *fmt, ...) +{ + const char *perf_dir = get_perf_dir(); + char *pathname = get_pathname(); + va_list args; + unsigned len; + + len = strlen(perf_dir); + if (len > PATH_MAX-100) + return bad_path; + memcpy(pathname, perf_dir, len); + if (len && perf_dir[len-1] != '/') + pathname[len++] = '/'; + va_start(args, fmt); + len += vsnprintf(pathname + len, PATH_MAX - len, fmt, args); + va_end(args); + if (len >= PATH_MAX) + return bad_path; + return cleanup_path(pathname); +} + + +/* perf_mkstemp() - create tmp file honoring TMPDIR variable */ +int perf_mkstemp(char *path, size_t len, const char *template) +{ + const char *tmp; + size_t n; + + tmp = getenv("TMPDIR"); + if (!tmp) + tmp = "/tmp"; + n = snprintf(path, len, "%s/%s", tmp, template); + if (len <= n) { + errno = ENAMETOOLONG; + return -1; + } + return mkstemp(path); +} + + +const char *make_relative_path(const char *abs, const char *base) +{ + static char buf[PATH_MAX + 1]; + int baselen; + if (!base) + return abs; + baselen = strlen(base); + if (prefixcmp(abs, base)) + return abs; + if (abs[baselen] == '/') + baselen++; + else if (base[baselen - 1] != '/') + return abs; + strcpy(buf, abs + baselen); + return buf; +} + +/* + * It is okay if dst == src, but they should not overlap otherwise. + * + * Performs the following normalizations on src, storing the result in dst: + * - Ensures that components are separated by '/' (Windows only) + * - Squashes sequences of '/'. + * - Removes "." components. + * - Removes ".." components, and the components the precede them. + * Returns failure (non-zero) if a ".." component appears as first path + * component anytime during the normalization. Otherwise, returns success (0). + * + * Note that this function is purely textual. It does not follow symlinks, + * verify the existence of the path, or make any system calls. + */ +int normalize_path_copy(char *dst, const char *src) +{ + char *dst0; + + if (has_dos_drive_prefix(src)) { + *dst++ = *src++; + *dst++ = *src++; + } + dst0 = dst; + + if (is_dir_sep(*src)) { + *dst++ = '/'; + while (is_dir_sep(*src)) + src++; + } + + for (;;) { + char c = *src; + + /* + * A path component that begins with . could be + * special: + * (1) "." and ends -- ignore and terminate. + * (2) "./" -- ignore them, eat slash and continue. + * (3) ".." and ends -- strip one and terminate. + * (4) "../" -- strip one, eat slash and continue. + */ + if (c == '.') { + if (!src[1]) { + /* (1) */ + src++; + } else if (is_dir_sep(src[1])) { + /* (2) */ + src += 2; + while (is_dir_sep(*src)) + src++; + continue; + } else if (src[1] == '.') { + if (!src[2]) { + /* (3) */ + src += 2; + goto up_one; + } else if (is_dir_sep(src[2])) { + /* (4) */ + src += 3; + while (is_dir_sep(*src)) + src++; + goto up_one; + } + } + } + + /* copy up to the next '/', and eat all '/' */ + while ((c = *src++) != '\0' && !is_dir_sep(c)) + *dst++ = c; + if (is_dir_sep(c)) { + *dst++ = '/'; + while (is_dir_sep(c)) + c = *src++; + src--; + } else if (!c) + break; + continue; + + up_one: + /* + * dst0..dst is prefix portion, and dst[-1] is '/'; + * go up one level. + */ + dst--; /* go to trailing '/' */ + if (dst <= dst0) + return -1; + /* Windows: dst[-1] cannot be backslash anymore */ + while (dst0 < dst && dst[-1] != '/') + dst--; + } + *dst = '\0'; + return 0; +} + +/* + * path = Canonical absolute path + * prefix_list = Colon-separated list of absolute paths + * + * Determines, for each path in prefix_list, whether the "prefix" really + * is an ancestor directory of path. Returns the length of the longest + * ancestor directory, excluding any trailing slashes, or -1 if no prefix + * is an ancestor. (Note that this means 0 is returned if prefix_list is + * "/".) "/foo" is not considered an ancestor of "/foobar". Directories + * are not considered to be their own ancestors. path must be in a + * canonical form: empty components, or "." or ".." components are not + * allowed. prefix_list may be null, which is like "". + */ +int longest_ancestor_length(const char *path, const char *prefix_list) +{ + char buf[PATH_MAX+1]; + const char *ceil, *colon; + int len, max_len = -1; + + if (prefix_list == NULL || !strcmp(path, "/")) + return -1; + + for (colon = ceil = prefix_list; *colon; ceil = colon+1) { + for (colon = ceil; *colon && *colon != PATH_SEP; colon++); + len = colon - ceil; + if (len == 0 || len > PATH_MAX || !is_absolute_path(ceil)) + continue; + strlcpy(buf, ceil, len+1); + if (normalize_path_copy(buf, buf) < 0) + continue; + len = strlen(buf); + if (len > 0 && buf[len-1] == '/') + buf[--len] = '\0'; + + if (!strncmp(path, buf, len) && + path[len] == '/' && + len > max_len) { + max_len = len; + } + } + + return max_len; +} + +/* strip arbitrary amount of directory separators at end of path */ +static inline int chomp_trailing_dir_sep(const char *path, int len) +{ + while (len && is_dir_sep(path[len - 1])) + len--; + return len; +} + +/* + * If path ends with suffix (complete path components), returns the + * part before suffix (sans trailing directory separators). + * Otherwise returns NULL. + */ +char *strip_path_suffix(const char *path, const char *suffix) +{ + int path_len = strlen(path), suffix_len = strlen(suffix); + + while (suffix_len) { + if (!path_len) + return NULL; + + if (is_dir_sep(path[path_len - 1])) { + if (!is_dir_sep(suffix[suffix_len - 1])) + return NULL; + path_len = chomp_trailing_dir_sep(path, path_len); + suffix_len = chomp_trailing_dir_sep(suffix, suffix_len); + } + else if (path[--path_len] != suffix[--suffix_len]) + return NULL; + } + + if (path_len && !is_dir_sep(path[path_len - 1])) + return NULL; + return xstrndup(path, chomp_trailing_dir_sep(path, path_len)); +} diff --git a/tools/perf/util/quote.c b/tools/perf/util/quote.c new file mode 100644 index 00000000000..f18c5212bc9 --- /dev/null +++ b/tools/perf/util/quote.c @@ -0,0 +1,481 @@ +#include "cache.h" +#include "quote.h" + +int quote_path_fully = 1; + +/* Help to copy the thing properly quoted for the shell safety. + * any single quote is replaced with '\'', any exclamation point + * is replaced with '\!', and the whole thing is enclosed in a + * + * E.g. + * original sq_quote result + * name ==> name ==> 'name' + * a b ==> a b ==> 'a b' + * a'b ==> a'\''b ==> 'a'\''b' + * a!b ==> a'\!'b ==> 'a'\!'b' + */ +static inline int need_bs_quote(char c) +{ + return (c == '\'' || c == '!'); +} + +void sq_quote_buf(struct strbuf *dst, const char *src) +{ + char *to_free = NULL; + + if (dst->buf == src) + to_free = strbuf_detach(dst, NULL); + + strbuf_addch(dst, '\''); + while (*src) { + size_t len = strcspn(src, "'!"); + strbuf_add(dst, src, len); + src += len; + while (need_bs_quote(*src)) { + strbuf_addstr(dst, "'\\"); + strbuf_addch(dst, *src++); + strbuf_addch(dst, '\''); + } + } + strbuf_addch(dst, '\''); + free(to_free); +} + +void sq_quote_print(FILE *stream, const char *src) +{ + char c; + + fputc('\'', stream); + while ((c = *src++)) { + if (need_bs_quote(c)) { + fputs("'\\", stream); + fputc(c, stream); + fputc('\'', stream); + } else { + fputc(c, stream); + } + } + fputc('\'', stream); +} + +void sq_quote_argv(struct strbuf *dst, const char** argv, size_t maxlen) +{ + int i; + + /* Copy into destination buffer. */ + strbuf_grow(dst, 255); + for (i = 0; argv[i]; ++i) { + strbuf_addch(dst, ' '); + sq_quote_buf(dst, argv[i]); + if (maxlen && dst->len > maxlen) + die("Too many or long arguments"); + } +} + +char *sq_dequote_step(char *arg, char **next) +{ + char *dst = arg; + char *src = arg; + char c; + + if (*src != '\'') + return NULL; + for (;;) { + c = *++src; + if (!c) + return NULL; + if (c != '\'') { + *dst++ = c; + continue; + } + /* We stepped out of sq */ + switch (*++src) { + case '\0': + *dst = 0; + if (next) + *next = NULL; + return arg; + case '\\': + c = *++src; + if (need_bs_quote(c) && *++src == '\'') { + *dst++ = c; + continue; + } + /* Fallthrough */ + default: + if (!next || !isspace(*src)) + return NULL; + do { + c = *++src; + } while (isspace(c)); + *dst = 0; + *next = src; + return arg; + } + } +} + +char *sq_dequote(char *arg) +{ + return sq_dequote_step(arg, NULL); +} + +int sq_dequote_to_argv(char *arg, const char ***argv, int *nr, int *alloc) +{ + char *next = arg; + + if (!*arg) + return 0; + do { + char *dequoted = sq_dequote_step(next, &next); + if (!dequoted) + return -1; + ALLOC_GROW(*argv, *nr + 1, *alloc); + (*argv)[(*nr)++] = dequoted; + } while (next); + + return 0; +} + +/* 1 means: quote as octal + * 0 means: quote as octal if (quote_path_fully) + * -1 means: never quote + * c: quote as "\\c" + */ +#define X8(x) x, x, x, x, x, x, x, x +#define X16(x) X8(x), X8(x) +static signed char const sq_lookup[256] = { + /* 0 1 2 3 4 5 6 7 */ + /* 0x00 */ 1, 1, 1, 1, 1, 1, 1, 'a', + /* 0x08 */ 'b', 't', 'n', 'v', 'f', 'r', 1, 1, + /* 0x10 */ X16(1), + /* 0x20 */ -1, -1, '"', -1, -1, -1, -1, -1, + /* 0x28 */ X16(-1), X16(-1), X16(-1), + /* 0x58 */ -1, -1, -1, -1,'\\', -1, -1, -1, + /* 0x60 */ X16(-1), X8(-1), + /* 0x78 */ -1, -1, -1, -1, -1, -1, -1, 1, + /* 0x80 */ /* set to 0 */ +}; + +static inline int sq_must_quote(char c) +{ + return sq_lookup[(unsigned char)c] + quote_path_fully > 0; +} + +/* returns the longest prefix not needing a quote up to maxlen if positive. + This stops at the first \0 because it's marked as a character needing an + escape */ +static size_t next_quote_pos(const char *s, ssize_t maxlen) +{ + size_t len; + if (maxlen < 0) { + for (len = 0; !sq_must_quote(s[len]); len++); + } else { + for (len = 0; len < maxlen && !sq_must_quote(s[len]); len++); + } + return len; +} + +/* + * C-style name quoting. + * + * (1) if sb and fp are both NULL, inspect the input name and counts the + * number of bytes that are needed to hold c_style quoted version of name, + * counting the double quotes around it but not terminating NUL, and + * returns it. + * However, if name does not need c_style quoting, it returns 0. + * + * (2) if sb or fp are not NULL, it emits the c_style quoted version + * of name, enclosed with double quotes if asked and needed only. + * Return value is the same as in (1). + */ +static size_t quote_c_style_counted(const char *name, ssize_t maxlen, + struct strbuf *sb, FILE *fp, int no_dq) +{ +#undef EMIT +#define EMIT(c) \ + do { \ + if (sb) strbuf_addch(sb, (c)); \ + if (fp) fputc((c), fp); \ + count++; \ + } while (0) +#define EMITBUF(s, l) \ + do { \ + int __ret; \ + if (sb) strbuf_add(sb, (s), (l)); \ + if (fp) __ret = fwrite((s), (l), 1, fp); \ + count += (l); \ + } while (0) + + size_t len, count = 0; + const char *p = name; + + for (;;) { + int ch; + + len = next_quote_pos(p, maxlen); + if (len == maxlen || !p[len]) + break; + + if (!no_dq && p == name) + EMIT('"'); + + EMITBUF(p, len); + EMIT('\\'); + p += len; + ch = (unsigned char)*p++; + if (sq_lookup[ch] >= ' ') { + EMIT(sq_lookup[ch]); + } else { + EMIT(((ch >> 6) & 03) + '0'); + EMIT(((ch >> 3) & 07) + '0'); + EMIT(((ch >> 0) & 07) + '0'); + } + } + + EMITBUF(p, len); + if (p == name) /* no ending quote needed */ + return 0; + + if (!no_dq) + EMIT('"'); + return count; +} + +size_t quote_c_style(const char *name, struct strbuf *sb, FILE *fp, int nodq) +{ + return quote_c_style_counted(name, -1, sb, fp, nodq); +} + +void quote_two_c_style(struct strbuf *sb, const char *prefix, const char *path, int nodq) +{ + if (quote_c_style(prefix, NULL, NULL, 0) || + quote_c_style(path, NULL, NULL, 0)) { + if (!nodq) + strbuf_addch(sb, '"'); + quote_c_style(prefix, sb, NULL, 1); + quote_c_style(path, sb, NULL, 1); + if (!nodq) + strbuf_addch(sb, '"'); + } else { + strbuf_addstr(sb, prefix); + strbuf_addstr(sb, path); + } +} + +void write_name_quoted(const char *name, FILE *fp, int terminator) +{ + if (terminator) { + quote_c_style(name, NULL, fp, 0); + } else { + fputs(name, fp); + } + fputc(terminator, fp); +} + +extern void write_name_quotedpfx(const char *pfx, size_t pfxlen, + const char *name, FILE *fp, int terminator) +{ + int needquote = 0; + + if (terminator) { + needquote = next_quote_pos(pfx, pfxlen) < pfxlen + || name[next_quote_pos(name, -1)]; + } + if (needquote) { + fputc('"', fp); + quote_c_style_counted(pfx, pfxlen, NULL, fp, 1); + quote_c_style(name, NULL, fp, 1); + fputc('"', fp); + } else { + int ret; + + ret = fwrite(pfx, pfxlen, 1, fp); + fputs(name, fp); + } + fputc(terminator, fp); +} + +/* quote path as relative to the given prefix */ +char *quote_path_relative(const char *in, int len, + struct strbuf *out, const char *prefix) +{ + int needquote; + + if (len < 0) + len = strlen(in); + + /* "../" prefix itself does not need quoting, but "in" might. */ + needquote = next_quote_pos(in, len) < len; + strbuf_setlen(out, 0); + strbuf_grow(out, len); + + if (needquote) + strbuf_addch(out, '"'); + if (prefix) { + int off = 0; + while (prefix[off] && off < len && prefix[off] == in[off]) + if (prefix[off] == '/') { + prefix += off + 1; + in += off + 1; + len -= off + 1; + off = 0; + } else + off++; + + for (; *prefix; prefix++) + if (*prefix == '/') + strbuf_addstr(out, "../"); + } + + quote_c_style_counted (in, len, out, NULL, 1); + + if (needquote) + strbuf_addch(out, '"'); + if (!out->len) + strbuf_addstr(out, "./"); + + return out->buf; +} + +/* + * C-style name unquoting. + * + * Quoted should point at the opening double quote. + * + Returns 0 if it was able to unquote the string properly, and appends the + * result in the strbuf `sb'. + * + Returns -1 in case of error, and doesn't touch the strbuf. Though note + * that this function will allocate memory in the strbuf, so calling + * strbuf_release is mandatory whichever result unquote_c_style returns. + * + * Updates endp pointer to point at one past the ending double quote if given. + */ +int unquote_c_style(struct strbuf *sb, const char *quoted, const char **endp) +{ + size_t oldlen = sb->len, len; + int ch, ac; + + if (*quoted++ != '"') + return -1; + + for (;;) { + len = strcspn(quoted, "\"\\"); + strbuf_add(sb, quoted, len); + quoted += len; + + switch (*quoted++) { + case '"': + if (endp) + *endp = quoted; + return 0; + case '\\': + break; + default: + goto error; + } + + switch ((ch = *quoted++)) { + case 'a': ch = '\a'; break; + case 'b': ch = '\b'; break; + case 'f': ch = '\f'; break; + case 'n': ch = '\n'; break; + case 'r': ch = '\r'; break; + case 't': ch = '\t'; break; + case 'v': ch = '\v'; break; + + case '\\': case '"': + break; /* verbatim */ + + /* octal values with first digit over 4 overflow */ + case '0': case '1': case '2': case '3': + ac = ((ch - '0') << 6); + if ((ch = *quoted++) < '0' || '7' < ch) + goto error; + ac |= ((ch - '0') << 3); + if ((ch = *quoted++) < '0' || '7' < ch) + goto error; + ac |= (ch - '0'); + ch = ac; + break; + default: + goto error; + } + strbuf_addch(sb, ch); + } + + error: + strbuf_setlen(sb, oldlen); + return -1; +} + +/* quoting as a string literal for other languages */ + +void perl_quote_print(FILE *stream, const char *src) +{ + const char sq = '\''; + const char bq = '\\'; + char c; + + fputc(sq, stream); + while ((c = *src++)) { + if (c == sq || c == bq) + fputc(bq, stream); + fputc(c, stream); + } + fputc(sq, stream); +} + +void python_quote_print(FILE *stream, const char *src) +{ + const char sq = '\''; + const char bq = '\\'; + const char nl = '\n'; + char c; + + fputc(sq, stream); + while ((c = *src++)) { + if (c == nl) { + fputc(bq, stream); + fputc('n', stream); + continue; + } + if (c == sq || c == bq) + fputc(bq, stream); + fputc(c, stream); + } + fputc(sq, stream); +} + +void tcl_quote_print(FILE *stream, const char *src) +{ + char c; + + fputc('"', stream); + while ((c = *src++)) { + switch (c) { + case '[': case ']': + case '{': case '}': + case '$': case '\\': case '"': + fputc('\\', stream); + default: + fputc(c, stream); + break; + case '\f': + fputs("\\f", stream); + break; + case '\r': + fputs("\\r", stream); + break; + case '\n': + fputs("\\n", stream); + break; + case '\t': + fputs("\\t", stream); + break; + case '\v': + fputs("\\v", stream); + break; + } + } + fputc('"', stream); +} diff --git a/tools/perf/util/quote.h b/tools/perf/util/quote.h new file mode 100644 index 00000000000..5dfad89816d --- /dev/null +++ b/tools/perf/util/quote.h @@ -0,0 +1,68 @@ +#ifndef QUOTE_H +#define QUOTE_H + +#include +#include + +/* Help to copy the thing properly quoted for the shell safety. + * any single quote is replaced with '\'', any exclamation point + * is replaced with '\!', and the whole thing is enclosed in a + * single quote pair. + * + * For example, if you are passing the result to system() as an + * argument: + * + * sprintf(cmd, "foobar %s %s", sq_quote(arg0), sq_quote(arg1)) + * + * would be appropriate. If the system() is going to call ssh to + * run the command on the other side: + * + * sprintf(cmd, "git-diff-tree %s %s", sq_quote(arg0), sq_quote(arg1)); + * sprintf(rcmd, "ssh %s %s", sq_util/quote.host), sq_quote(cmd)); + * + * Note that the above examples leak memory! Remember to free result from + * sq_quote() in a real application. + * + * sq_quote_buf() writes to an existing buffer of specified size; it + * will return the number of characters that would have been written + * excluding the final null regardless of the buffer size. + */ + +extern void sq_quote_print(FILE *stream, const char *src); + +extern void sq_quote_buf(struct strbuf *, const char *src); +extern void sq_quote_argv(struct strbuf *, const char **argv, size_t maxlen); + +/* This unwraps what sq_quote() produces in place, but returns + * NULL if the input does not look like what sq_quote would have + * produced. + */ +extern char *sq_dequote(char *); + +/* + * Same as the above, but can be used to unwrap many arguments in the + * same string separated by space. "next" is changed to point to the + * next argument that should be passed as first parameter. When there + * is no more argument to be dequoted, "next" is updated to point to NULL. + */ +extern char *sq_dequote_step(char *arg, char **next); +extern int sq_dequote_to_argv(char *arg, const char ***argv, int *nr, int *alloc); + +extern int unquote_c_style(struct strbuf *, const char *quoted, const char **endp); +extern size_t quote_c_style(const char *name, struct strbuf *, FILE *, int no_dq); +extern void quote_two_c_style(struct strbuf *, const char *, const char *, int); + +extern void write_name_quoted(const char *name, FILE *, int terminator); +extern void write_name_quotedpfx(const char *pfx, size_t pfxlen, + const char *name, FILE *, int terminator); + +/* quote path as relative to the given prefix */ +char *quote_path_relative(const char *in, int len, + struct strbuf *out, const char *prefix); + +/* quoting as a string literal for other languages */ +extern void perl_quote_print(FILE *stream, const char *src); +extern void python_quote_print(FILE *stream, const char *src); +extern void tcl_quote_print(FILE *stream, const char *src); + +#endif diff --git a/tools/perf/util/rbtree.c b/tools/perf/util/rbtree.c new file mode 100644 index 00000000000..b15ba9c7cb3 --- /dev/null +++ b/tools/perf/util/rbtree.c @@ -0,0 +1,383 @@ +/* + Red Black Trees + (C) 1999 Andrea Arcangeli + (C) 2002 David Woodhouse + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + + linux/lib/rbtree.c +*/ + +#include "rbtree.h" + +static void __rb_rotate_left(struct rb_node *node, struct rb_root *root) +{ + struct rb_node *right = node->rb_right; + struct rb_node *parent = rb_parent(node); + + if ((node->rb_right = right->rb_left)) + rb_set_parent(right->rb_left, node); + right->rb_left = node; + + rb_set_parent(right, parent); + + if (parent) + { + if (node == parent->rb_left) + parent->rb_left = right; + else + parent->rb_right = right; + } + else + root->rb_node = right; + rb_set_parent(node, right); +} + +static void __rb_rotate_right(struct rb_node *node, struct rb_root *root) +{ + struct rb_node *left = node->rb_left; + struct rb_node *parent = rb_parent(node); + + if ((node->rb_left = left->rb_right)) + rb_set_parent(left->rb_right, node); + left->rb_right = node; + + rb_set_parent(left, parent); + + if (parent) + { + if (node == parent->rb_right) + parent->rb_right = left; + else + parent->rb_left = left; + } + else + root->rb_node = left; + rb_set_parent(node, left); +} + +void rb_insert_color(struct rb_node *node, struct rb_root *root) +{ + struct rb_node *parent, *gparent; + + while ((parent = rb_parent(node)) && rb_is_red(parent)) + { + gparent = rb_parent(parent); + + if (parent == gparent->rb_left) + { + { + register struct rb_node *uncle = gparent->rb_right; + if (uncle && rb_is_red(uncle)) + { + rb_set_black(uncle); + rb_set_black(parent); + rb_set_red(gparent); + node = gparent; + continue; + } + } + + if (parent->rb_right == node) + { + register struct rb_node *tmp; + __rb_rotate_left(parent, root); + tmp = parent; + parent = node; + node = tmp; + } + + rb_set_black(parent); + rb_set_red(gparent); + __rb_rotate_right(gparent, root); + } else { + { + register struct rb_node *uncle = gparent->rb_left; + if (uncle && rb_is_red(uncle)) + { + rb_set_black(uncle); + rb_set_black(parent); + rb_set_red(gparent); + node = gparent; + continue; + } + } + + if (parent->rb_left == node) + { + register struct rb_node *tmp; + __rb_rotate_right(parent, root); + tmp = parent; + parent = node; + node = tmp; + } + + rb_set_black(parent); + rb_set_red(gparent); + __rb_rotate_left(gparent, root); + } + } + + rb_set_black(root->rb_node); +} + +static void __rb_erase_color(struct rb_node *node, struct rb_node *parent, + struct rb_root *root) +{ + struct rb_node *other; + + while ((!node || rb_is_black(node)) && node != root->rb_node) + { + if (parent->rb_left == node) + { + other = parent->rb_right; + if (rb_is_red(other)) + { + rb_set_black(other); + rb_set_red(parent); + __rb_rotate_left(parent, root); + other = parent->rb_right; + } + if ((!other->rb_left || rb_is_black(other->rb_left)) && + (!other->rb_right || rb_is_black(other->rb_right))) + { + rb_set_red(other); + node = parent; + parent = rb_parent(node); + } + else + { + if (!other->rb_right || rb_is_black(other->rb_right)) + { + rb_set_black(other->rb_left); + rb_set_red(other); + __rb_rotate_right(other, root); + other = parent->rb_right; + } + rb_set_color(other, rb_color(parent)); + rb_set_black(parent); + rb_set_black(other->rb_right); + __rb_rotate_left(parent, root); + node = root->rb_node; + break; + } + } + else + { + other = parent->rb_left; + if (rb_is_red(other)) + { + rb_set_black(other); + rb_set_red(parent); + __rb_rotate_right(parent, root); + other = parent->rb_left; + } + if ((!other->rb_left || rb_is_black(other->rb_left)) && + (!other->rb_right || rb_is_black(other->rb_right))) + { + rb_set_red(other); + node = parent; + parent = rb_parent(node); + } + else + { + if (!other->rb_left || rb_is_black(other->rb_left)) + { + rb_set_black(other->rb_right); + rb_set_red(other); + __rb_rotate_left(other, root); + other = parent->rb_left; + } + rb_set_color(other, rb_color(parent)); + rb_set_black(parent); + rb_set_black(other->rb_left); + __rb_rotate_right(parent, root); + node = root->rb_node; + break; + } + } + } + if (node) + rb_set_black(node); +} + +void rb_erase(struct rb_node *node, struct rb_root *root) +{ + struct rb_node *child, *parent; + int color; + + if (!node->rb_left) + child = node->rb_right; + else if (!node->rb_right) + child = node->rb_left; + else + { + struct rb_node *old = node, *left; + + node = node->rb_right; + while ((left = node->rb_left) != NULL) + node = left; + child = node->rb_right; + parent = rb_parent(node); + color = rb_color(node); + + if (child) + rb_set_parent(child, parent); + if (parent == old) { + parent->rb_right = child; + parent = node; + } else + parent->rb_left = child; + + node->rb_parent_color = old->rb_parent_color; + node->rb_right = old->rb_right; + node->rb_left = old->rb_left; + + if (rb_parent(old)) + { + if (rb_parent(old)->rb_left == old) + rb_parent(old)->rb_left = node; + else + rb_parent(old)->rb_right = node; + } else + root->rb_node = node; + + rb_set_parent(old->rb_left, node); + if (old->rb_right) + rb_set_parent(old->rb_right, node); + goto color; + } + + parent = rb_parent(node); + color = rb_color(node); + + if (child) + rb_set_parent(child, parent); + if (parent) + { + if (parent->rb_left == node) + parent->rb_left = child; + else + parent->rb_right = child; + } + else + root->rb_node = child; + + color: + if (color == RB_BLACK) + __rb_erase_color(child, parent, root); +} + +/* + * This function returns the first node (in sort order) of the tree. + */ +struct rb_node *rb_first(const struct rb_root *root) +{ + struct rb_node *n; + + n = root->rb_node; + if (!n) + return NULL; + while (n->rb_left) + n = n->rb_left; + return n; +} + +struct rb_node *rb_last(const struct rb_root *root) +{ + struct rb_node *n; + + n = root->rb_node; + if (!n) + return NULL; + while (n->rb_right) + n = n->rb_right; + return n; +} + +struct rb_node *rb_next(const struct rb_node *node) +{ + struct rb_node *parent; + + if (rb_parent(node) == node) + return NULL; + + /* If we have a right-hand child, go down and then left as far + as we can. */ + if (node->rb_right) { + node = node->rb_right; + while (node->rb_left) + node=node->rb_left; + return (struct rb_node *)node; + } + + /* No right-hand children. Everything down and left is + smaller than us, so any 'next' node must be in the general + direction of our parent. Go up the tree; any time the + ancestor is a right-hand child of its parent, keep going + up. First time it's a left-hand child of its parent, said + parent is our 'next' node. */ + while ((parent = rb_parent(node)) && node == parent->rb_right) + node = parent; + + return parent; +} + +struct rb_node *rb_prev(const struct rb_node *node) +{ + struct rb_node *parent; + + if (rb_parent(node) == node) + return NULL; + + /* If we have a left-hand child, go down and then right as far + as we can. */ + if (node->rb_left) { + node = node->rb_left; + while (node->rb_right) + node=node->rb_right; + return (struct rb_node *)node; + } + + /* No left-hand children. Go up till we find an ancestor which + is a right-hand child of its parent */ + while ((parent = rb_parent(node)) && node == parent->rb_left) + node = parent; + + return parent; +} + +void rb_replace_node(struct rb_node *victim, struct rb_node *new, + struct rb_root *root) +{ + struct rb_node *parent = rb_parent(victim); + + /* Set the surrounding nodes to point to the replacement */ + if (parent) { + if (victim == parent->rb_left) + parent->rb_left = new; + else + parent->rb_right = new; + } else { + root->rb_node = new; + } + if (victim->rb_left) + rb_set_parent(victim->rb_left, new); + if (victim->rb_right) + rb_set_parent(victim->rb_right, new); + + /* Copy the pointers/colour from the victim to the replacement */ + *new = *victim; +} diff --git a/tools/perf/util/rbtree.h b/tools/perf/util/rbtree.h new file mode 100644 index 00000000000..6bdc488a47f --- /dev/null +++ b/tools/perf/util/rbtree.h @@ -0,0 +1,171 @@ +/* + Red Black Trees + (C) 1999 Andrea Arcangeli + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + + linux/include/linux/rbtree.h + + To use rbtrees you'll have to implement your own insert and search cores. + This will avoid us to use callbacks and to drop drammatically performances. + I know it's not the cleaner way, but in C (not in C++) to get + performances and genericity... + + Some example of insert and search follows here. The search is a plain + normal search over an ordered tree. The insert instead must be implemented + int two steps: as first thing the code must insert the element in + order as a red leaf in the tree, then the support library function + rb_insert_color() must be called. Such function will do the + not trivial work to rebalance the rbtree if necessary. + +----------------------------------------------------------------------- +static inline struct page * rb_search_page_cache(struct inode * inode, + unsigned long offset) +{ + struct rb_node * n = inode->i_rb_page_cache.rb_node; + struct page * page; + + while (n) + { + page = rb_entry(n, struct page, rb_page_cache); + + if (offset < page->offset) + n = n->rb_left; + else if (offset > page->offset) + n = n->rb_right; + else + return page; + } + return NULL; +} + +static inline struct page * __rb_insert_page_cache(struct inode * inode, + unsigned long offset, + struct rb_node * node) +{ + struct rb_node ** p = &inode->i_rb_page_cache.rb_node; + struct rb_node * parent = NULL; + struct page * page; + + while (*p) + { + parent = *p; + page = rb_entry(parent, struct page, rb_page_cache); + + if (offset < page->offset) + p = &(*p)->rb_left; + else if (offset > page->offset) + p = &(*p)->rb_right; + else + return page; + } + + rb_link_node(node, parent, p); + + return NULL; +} + +static inline struct page * rb_insert_page_cache(struct inode * inode, + unsigned long offset, + struct rb_node * node) +{ + struct page * ret; + if ((ret = __rb_insert_page_cache(inode, offset, node))) + goto out; + rb_insert_color(node, &inode->i_rb_page_cache); + out: + return ret; +} +----------------------------------------------------------------------- +*/ + +#ifndef _LINUX_RBTREE_H +#define _LINUX_RBTREE_H + +#include + +/** + * container_of - cast a member of a structure out to the containing structure + * @ptr: the pointer to the member. + * @type: the type of the container struct this is embedded in. + * @member: the name of the member within the struct. + * + */ +#define container_of(ptr, type, member) ({ \ + const typeof( ((type *)0)->member ) *__mptr = (ptr); \ + (type *)( (char *)__mptr - offsetof(type,member) );}) + +struct rb_node +{ + unsigned long rb_parent_color; +#define RB_RED 0 +#define RB_BLACK 1 + struct rb_node *rb_right; + struct rb_node *rb_left; +} __attribute__((aligned(sizeof(long)))); + /* The alignment might seem pointless, but allegedly CRIS needs it */ + +struct rb_root +{ + struct rb_node *rb_node; +}; + + +#define rb_parent(r) ((struct rb_node *)((r)->rb_parent_color & ~3)) +#define rb_color(r) ((r)->rb_parent_color & 1) +#define rb_is_red(r) (!rb_color(r)) +#define rb_is_black(r) rb_color(r) +#define rb_set_red(r) do { (r)->rb_parent_color &= ~1; } while (0) +#define rb_set_black(r) do { (r)->rb_parent_color |= 1; } while (0) + +static inline void rb_set_parent(struct rb_node *rb, struct rb_node *p) +{ + rb->rb_parent_color = (rb->rb_parent_color & 3) | (unsigned long)p; +} +static inline void rb_set_color(struct rb_node *rb, int color) +{ + rb->rb_parent_color = (rb->rb_parent_color & ~1) | color; +} + +#define RB_ROOT (struct rb_root) { NULL, } +#define rb_entry(ptr, type, member) container_of(ptr, type, member) + +#define RB_EMPTY_ROOT(root) ((root)->rb_node == NULL) +#define RB_EMPTY_NODE(node) (rb_parent(node) == node) +#define RB_CLEAR_NODE(node) (rb_set_parent(node, node)) + +extern void rb_insert_color(struct rb_node *, struct rb_root *); +extern void rb_erase(struct rb_node *, struct rb_root *); + +/* Find logical next and previous nodes in a tree */ +extern struct rb_node *rb_next(const struct rb_node *); +extern struct rb_node *rb_prev(const struct rb_node *); +extern struct rb_node *rb_first(const struct rb_root *); +extern struct rb_node *rb_last(const struct rb_root *); + +/* Fast replacement of a single node without remove/rebalance/add/rebalance */ +extern void rb_replace_node(struct rb_node *victim, struct rb_node *new, + struct rb_root *root); + +static inline void rb_link_node(struct rb_node * node, struct rb_node * parent, + struct rb_node ** rb_link) +{ + node->rb_parent_color = (unsigned long )parent; + node->rb_left = node->rb_right = NULL; + + *rb_link = node; +} + +#endif /* _LINUX_RBTREE_H */ diff --git a/tools/perf/util/run-command.c b/tools/perf/util/run-command.c new file mode 100644 index 00000000000..b2f5e854f40 --- /dev/null +++ b/tools/perf/util/run-command.c @@ -0,0 +1,395 @@ +#include "cache.h" +#include "run-command.h" +#include "exec_cmd.h" + +static inline void close_pair(int fd[2]) +{ + close(fd[0]); + close(fd[1]); +} + +static inline void dup_devnull(int to) +{ + int fd = open("/dev/null", O_RDWR); + dup2(fd, to); + close(fd); +} + +int start_command(struct child_process *cmd) +{ + int need_in, need_out, need_err; + int fdin[2], fdout[2], fderr[2]; + + /* + * In case of errors we must keep the promise to close FDs + * that have been passed in via ->in and ->out. + */ + + need_in = !cmd->no_stdin && cmd->in < 0; + if (need_in) { + if (pipe(fdin) < 0) { + if (cmd->out > 0) + close(cmd->out); + return -ERR_RUN_COMMAND_PIPE; + } + cmd->in = fdin[1]; + } + + need_out = !cmd->no_stdout + && !cmd->stdout_to_stderr + && cmd->out < 0; + if (need_out) { + if (pipe(fdout) < 0) { + if (need_in) + close_pair(fdin); + else if (cmd->in) + close(cmd->in); + return -ERR_RUN_COMMAND_PIPE; + } + cmd->out = fdout[0]; + } + + need_err = !cmd->no_stderr && cmd->err < 0; + if (need_err) { + if (pipe(fderr) < 0) { + if (need_in) + close_pair(fdin); + else if (cmd->in) + close(cmd->in); + if (need_out) + close_pair(fdout); + else if (cmd->out) + close(cmd->out); + return -ERR_RUN_COMMAND_PIPE; + } + cmd->err = fderr[0]; + } + +#ifndef __MINGW32__ + fflush(NULL); + cmd->pid = fork(); + if (!cmd->pid) { + if (cmd->no_stdin) + dup_devnull(0); + else if (need_in) { + dup2(fdin[0], 0); + close_pair(fdin); + } else if (cmd->in) { + dup2(cmd->in, 0); + close(cmd->in); + } + + if (cmd->no_stderr) + dup_devnull(2); + else if (need_err) { + dup2(fderr[1], 2); + close_pair(fderr); + } + + if (cmd->no_stdout) + dup_devnull(1); + else if (cmd->stdout_to_stderr) + dup2(2, 1); + else if (need_out) { + dup2(fdout[1], 1); + close_pair(fdout); + } else if (cmd->out > 1) { + dup2(cmd->out, 1); + close(cmd->out); + } + + if (cmd->dir && chdir(cmd->dir)) + die("exec %s: cd to %s failed (%s)", cmd->argv[0], + cmd->dir, strerror(errno)); + if (cmd->env) { + for (; *cmd->env; cmd->env++) { + if (strchr(*cmd->env, '=')) + putenv((char*)*cmd->env); + else + unsetenv(*cmd->env); + } + } + if (cmd->preexec_cb) + cmd->preexec_cb(); + if (cmd->perf_cmd) { + execv_perf_cmd(cmd->argv); + } else { + execvp(cmd->argv[0], (char *const*) cmd->argv); + } + exit(127); + } +#else + int s0 = -1, s1 = -1, s2 = -1; /* backups of stdin, stdout, stderr */ + const char **sargv = cmd->argv; + char **env = environ; + + if (cmd->no_stdin) { + s0 = dup(0); + dup_devnull(0); + } else if (need_in) { + s0 = dup(0); + dup2(fdin[0], 0); + } else if (cmd->in) { + s0 = dup(0); + dup2(cmd->in, 0); + } + + if (cmd->no_stderr) { + s2 = dup(2); + dup_devnull(2); + } else if (need_err) { + s2 = dup(2); + dup2(fderr[1], 2); + } + + if (cmd->no_stdout) { + s1 = dup(1); + dup_devnull(1); + } else if (cmd->stdout_to_stderr) { + s1 = dup(1); + dup2(2, 1); + } else if (need_out) { + s1 = dup(1); + dup2(fdout[1], 1); + } else if (cmd->out > 1) { + s1 = dup(1); + dup2(cmd->out, 1); + } + + if (cmd->dir) + die("chdir in start_command() not implemented"); + if (cmd->env) { + env = copy_environ(); + for (; *cmd->env; cmd->env++) + env = env_setenv(env, *cmd->env); + } + + if (cmd->perf_cmd) { + cmd->argv = prepare_perf_cmd(cmd->argv); + } + + cmd->pid = mingw_spawnvpe(cmd->argv[0], cmd->argv, env); + + if (cmd->env) + free_environ(env); + if (cmd->perf_cmd) + free(cmd->argv); + + cmd->argv = sargv; + if (s0 >= 0) + dup2(s0, 0), close(s0); + if (s1 >= 0) + dup2(s1, 1), close(s1); + if (s2 >= 0) + dup2(s2, 2), close(s2); +#endif + + if (cmd->pid < 0) { + int err = errno; + if (need_in) + close_pair(fdin); + else if (cmd->in) + close(cmd->in); + if (need_out) + close_pair(fdout); + else if (cmd->out) + close(cmd->out); + if (need_err) + close_pair(fderr); + return err == ENOENT ? + -ERR_RUN_COMMAND_EXEC : + -ERR_RUN_COMMAND_FORK; + } + + if (need_in) + close(fdin[0]); + else if (cmd->in) + close(cmd->in); + + if (need_out) + close(fdout[1]); + else if (cmd->out) + close(cmd->out); + + if (need_err) + close(fderr[1]); + + return 0; +} + +static int wait_or_whine(pid_t pid) +{ + for (;;) { + int status, code; + pid_t waiting = waitpid(pid, &status, 0); + + if (waiting < 0) { + if (errno == EINTR) + continue; + error("waitpid failed (%s)", strerror(errno)); + return -ERR_RUN_COMMAND_WAITPID; + } + if (waiting != pid) + return -ERR_RUN_COMMAND_WAITPID_WRONG_PID; + if (WIFSIGNALED(status)) + return -ERR_RUN_COMMAND_WAITPID_SIGNAL; + + if (!WIFEXITED(status)) + return -ERR_RUN_COMMAND_WAITPID_NOEXIT; + code = WEXITSTATUS(status); + switch (code) { + case 127: + return -ERR_RUN_COMMAND_EXEC; + case 0: + return 0; + default: + return -code; + } + } +} + +int finish_command(struct child_process *cmd) +{ + return wait_or_whine(cmd->pid); +} + +int run_command(struct child_process *cmd) +{ + int code = start_command(cmd); + if (code) + return code; + return finish_command(cmd); +} + +static void prepare_run_command_v_opt(struct child_process *cmd, + const char **argv, + int opt) +{ + memset(cmd, 0, sizeof(*cmd)); + cmd->argv = argv; + cmd->no_stdin = opt & RUN_COMMAND_NO_STDIN ? 1 : 0; + cmd->perf_cmd = opt & RUN_PERF_CMD ? 1 : 0; + cmd->stdout_to_stderr = opt & RUN_COMMAND_STDOUT_TO_STDERR ? 1 : 0; +} + +int run_command_v_opt(const char **argv, int opt) +{ + struct child_process cmd; + prepare_run_command_v_opt(&cmd, argv, opt); + return run_command(&cmd); +} + +int run_command_v_opt_cd_env(const char **argv, int opt, const char *dir, const char *const *env) +{ + struct child_process cmd; + prepare_run_command_v_opt(&cmd, argv, opt); + cmd.dir = dir; + cmd.env = env; + return run_command(&cmd); +} + +#ifdef __MINGW32__ +static __stdcall unsigned run_thread(void *data) +{ + struct async *async = data; + return async->proc(async->fd_for_proc, async->data); +} +#endif + +int start_async(struct async *async) +{ + int pipe_out[2]; + + if (pipe(pipe_out) < 0) + return error("cannot create pipe: %s", strerror(errno)); + async->out = pipe_out[0]; + +#ifndef __MINGW32__ + /* Flush stdio before fork() to avoid cloning buffers */ + fflush(NULL); + + async->pid = fork(); + if (async->pid < 0) { + error("fork (async) failed: %s", strerror(errno)); + close_pair(pipe_out); + return -1; + } + if (!async->pid) { + close(pipe_out[0]); + exit(!!async->proc(pipe_out[1], async->data)); + } + close(pipe_out[1]); +#else + async->fd_for_proc = pipe_out[1]; + async->tid = (HANDLE) _beginthreadex(NULL, 0, run_thread, async, 0, NULL); + if (!async->tid) { + error("cannot create thread: %s", strerror(errno)); + close_pair(pipe_out); + return -1; + } +#endif + return 0; +} + +int finish_async(struct async *async) +{ +#ifndef __MINGW32__ + int ret = 0; + + if (wait_or_whine(async->pid)) + ret = error("waitpid (async) failed"); +#else + DWORD ret = 0; + if (WaitForSingleObject(async->tid, INFINITE) != WAIT_OBJECT_0) + ret = error("waiting for thread failed: %lu", GetLastError()); + else if (!GetExitCodeThread(async->tid, &ret)) + ret = error("cannot get thread exit code: %lu", GetLastError()); + CloseHandle(async->tid); +#endif + return ret; +} + +int run_hook(const char *index_file, const char *name, ...) +{ + struct child_process hook; + const char **argv = NULL, *env[2]; + char index[PATH_MAX]; + va_list args; + int ret; + size_t i = 0, alloc = 0; + + if (access(perf_path("hooks/%s", name), X_OK) < 0) + return 0; + + va_start(args, name); + ALLOC_GROW(argv, i + 1, alloc); + argv[i++] = perf_path("hooks/%s", name); + while (argv[i-1]) { + ALLOC_GROW(argv, i + 1, alloc); + argv[i++] = va_arg(args, const char *); + } + va_end(args); + + memset(&hook, 0, sizeof(hook)); + hook.argv = argv; + hook.no_stdin = 1; + hook.stdout_to_stderr = 1; + if (index_file) { + snprintf(index, sizeof(index), "PERF_INDEX_FILE=%s", index_file); + env[0] = index; + env[1] = NULL; + hook.env = env; + } + + ret = start_command(&hook); + free(argv); + if (ret) { + warning("Could not spawn %s", argv[0]); + return ret; + } + ret = finish_command(&hook); + if (ret == -ERR_RUN_COMMAND_WAITPID_SIGNAL) + warning("%s exited due to uncaught signal", argv[0]); + + return ret; +} diff --git a/tools/perf/util/run-command.h b/tools/perf/util/run-command.h new file mode 100644 index 00000000000..328289f2366 --- /dev/null +++ b/tools/perf/util/run-command.h @@ -0,0 +1,93 @@ +#ifndef RUN_COMMAND_H +#define RUN_COMMAND_H + +enum { + ERR_RUN_COMMAND_FORK = 10000, + ERR_RUN_COMMAND_EXEC, + ERR_RUN_COMMAND_PIPE, + ERR_RUN_COMMAND_WAITPID, + ERR_RUN_COMMAND_WAITPID_WRONG_PID, + ERR_RUN_COMMAND_WAITPID_SIGNAL, + ERR_RUN_COMMAND_WAITPID_NOEXIT, +}; +#define IS_RUN_COMMAND_ERR(x) (-(x) >= ERR_RUN_COMMAND_FORK) + +struct child_process { + const char **argv; + pid_t pid; + /* + * Using .in, .out, .err: + * - Specify 0 for no redirections (child inherits stdin, stdout, + * stderr from parent). + * - Specify -1 to have a pipe allocated as follows: + * .in: returns the writable pipe end; parent writes to it, + * the readable pipe end becomes child's stdin + * .out, .err: returns the readable pipe end; parent reads from + * it, the writable pipe end becomes child's stdout/stderr + * The caller of start_command() must close the returned FDs + * after it has completed reading from/writing to it! + * - Specify > 0 to set a channel to a particular FD as follows: + * .in: a readable FD, becomes child's stdin + * .out: a writable FD, becomes child's stdout/stderr + * .err > 0 not supported + * The specified FD is closed by start_command(), even in case + * of errors! + */ + int in; + int out; + int err; + const char *dir; + const char *const *env; + unsigned no_stdin:1; + unsigned no_stdout:1; + unsigned no_stderr:1; + unsigned perf_cmd:1; /* if this is to be perf sub-command */ + unsigned stdout_to_stderr:1; + void (*preexec_cb)(void); +}; + +int start_command(struct child_process *); +int finish_command(struct child_process *); +int run_command(struct child_process *); + +extern int run_hook(const char *index_file, const char *name, ...); + +#define RUN_COMMAND_NO_STDIN 1 +#define RUN_PERF_CMD 2 /*If this is to be perf sub-command */ +#define RUN_COMMAND_STDOUT_TO_STDERR 4 +int run_command_v_opt(const char **argv, int opt); + +/* + * env (the environment) is to be formatted like environ: "VAR=VALUE". + * To unset an environment variable use just "VAR". + */ +int run_command_v_opt_cd_env(const char **argv, int opt, const char *dir, const char *const *env); + +/* + * The purpose of the following functions is to feed a pipe by running + * a function asynchronously and providing output that the caller reads. + * + * It is expected that no synchronization and mutual exclusion between + * the caller and the feed function is necessary so that the function + * can run in a thread without interfering with the caller. + */ +struct async { + /* + * proc writes to fd and closes it; + * returns 0 on success, non-zero on failure + */ + int (*proc)(int fd, void *data); + void *data; + int out; /* caller reads from here and closes it */ +#ifndef __MINGW32__ + pid_t pid; +#else + HANDLE tid; + int fd_for_proc; +#endif +}; + +int start_async(struct async *async); +int finish_async(struct async *async); + +#endif diff --git a/tools/perf/util/sigchain.c b/tools/perf/util/sigchain.c new file mode 100644 index 00000000000..1118b99e57d --- /dev/null +++ b/tools/perf/util/sigchain.c @@ -0,0 +1,52 @@ +#include "sigchain.h" +#include "cache.h" + +#define SIGCHAIN_MAX_SIGNALS 32 + +struct sigchain_signal { + sigchain_fun *old; + int n; + int alloc; +}; +static struct sigchain_signal signals[SIGCHAIN_MAX_SIGNALS]; + +static void check_signum(int sig) +{ + if (sig < 1 || sig >= SIGCHAIN_MAX_SIGNALS) + die("BUG: signal out of range: %d", sig); +} + +int sigchain_push(int sig, sigchain_fun f) +{ + struct sigchain_signal *s = signals + sig; + check_signum(sig); + + ALLOC_GROW(s->old, s->n + 1, s->alloc); + s->old[s->n] = signal(sig, f); + if (s->old[s->n] == SIG_ERR) + return -1; + s->n++; + return 0; +} + +int sigchain_pop(int sig) +{ + struct sigchain_signal *s = signals + sig; + check_signum(sig); + if (s->n < 1) + return 0; + + if (signal(sig, s->old[s->n - 1]) == SIG_ERR) + return -1; + s->n--; + return 0; +} + +void sigchain_push_common(sigchain_fun f) +{ + sigchain_push(SIGINT, f); + sigchain_push(SIGHUP, f); + sigchain_push(SIGTERM, f); + sigchain_push(SIGQUIT, f); + sigchain_push(SIGPIPE, f); +} diff --git a/tools/perf/util/sigchain.h b/tools/perf/util/sigchain.h new file mode 100644 index 00000000000..618083bce0c --- /dev/null +++ b/tools/perf/util/sigchain.h @@ -0,0 +1,11 @@ +#ifndef SIGCHAIN_H +#define SIGCHAIN_H + +typedef void (*sigchain_fun)(int); + +int sigchain_push(int sig, sigchain_fun f); +int sigchain_pop(int sig); + +void sigchain_push_common(sigchain_fun f); + +#endif /* SIGCHAIN_H */ diff --git a/tools/perf/util/strbuf.c b/tools/perf/util/strbuf.c new file mode 100644 index 00000000000..eaba0930680 --- /dev/null +++ b/tools/perf/util/strbuf.c @@ -0,0 +1,359 @@ +#include "cache.h" + +int prefixcmp(const char *str, const char *prefix) +{ + for (; ; str++, prefix++) + if (!*prefix) + return 0; + else if (*str != *prefix) + return (unsigned char)*prefix - (unsigned char)*str; +} + +/* + * Used as the default ->buf value, so that people can always assume + * buf is non NULL and ->buf is NUL terminated even for a freshly + * initialized strbuf. + */ +char strbuf_slopbuf[1]; + +void strbuf_init(struct strbuf *sb, size_t hint) +{ + sb->alloc = sb->len = 0; + sb->buf = strbuf_slopbuf; + if (hint) + strbuf_grow(sb, hint); +} + +void strbuf_release(struct strbuf *sb) +{ + if (sb->alloc) { + free(sb->buf); + strbuf_init(sb, 0); + } +} + +char *strbuf_detach(struct strbuf *sb, size_t *sz) +{ + char *res = sb->alloc ? sb->buf : NULL; + if (sz) + *sz = sb->len; + strbuf_init(sb, 0); + return res; +} + +void strbuf_attach(struct strbuf *sb, void *buf, size_t len, size_t alloc) +{ + strbuf_release(sb); + sb->buf = buf; + sb->len = len; + sb->alloc = alloc; + strbuf_grow(sb, 0); + sb->buf[sb->len] = '\0'; +} + +void strbuf_grow(struct strbuf *sb, size_t extra) +{ + if (sb->len + extra + 1 <= sb->len) + die("you want to use way too much memory"); + if (!sb->alloc) + sb->buf = NULL; + ALLOC_GROW(sb->buf, sb->len + extra + 1, sb->alloc); +} + +void strbuf_trim(struct strbuf *sb) +{ + char *b = sb->buf; + while (sb->len > 0 && isspace((unsigned char)sb->buf[sb->len - 1])) + sb->len--; + while (sb->len > 0 && isspace(*b)) { + b++; + sb->len--; + } + memmove(sb->buf, b, sb->len); + sb->buf[sb->len] = '\0'; +} +void strbuf_rtrim(struct strbuf *sb) +{ + while (sb->len > 0 && isspace((unsigned char)sb->buf[sb->len - 1])) + sb->len--; + sb->buf[sb->len] = '\0'; +} + +void strbuf_ltrim(struct strbuf *sb) +{ + char *b = sb->buf; + while (sb->len > 0 && isspace(*b)) { + b++; + sb->len--; + } + memmove(sb->buf, b, sb->len); + sb->buf[sb->len] = '\0'; +} + +void strbuf_tolower(struct strbuf *sb) +{ + int i; + for (i = 0; i < sb->len; i++) + sb->buf[i] = tolower(sb->buf[i]); +} + +struct strbuf **strbuf_split(const struct strbuf *sb, int delim) +{ + int alloc = 2, pos = 0; + char *n, *p; + struct strbuf **ret; + struct strbuf *t; + + ret = calloc(alloc, sizeof(struct strbuf *)); + p = n = sb->buf; + while (n < sb->buf + sb->len) { + int len; + n = memchr(n, delim, sb->len - (n - sb->buf)); + if (pos + 1 >= alloc) { + alloc = alloc * 2; + ret = realloc(ret, sizeof(struct strbuf *) * alloc); + } + if (!n) + n = sb->buf + sb->len - 1; + len = n - p + 1; + t = malloc(sizeof(struct strbuf)); + strbuf_init(t, len); + strbuf_add(t, p, len); + ret[pos] = t; + ret[++pos] = NULL; + p = ++n; + } + return ret; +} + +void strbuf_list_free(struct strbuf **sbs) +{ + struct strbuf **s = sbs; + + while (*s) { + strbuf_release(*s); + free(*s++); + } + free(sbs); +} + +int strbuf_cmp(const struct strbuf *a, const struct strbuf *b) +{ + int len = a->len < b->len ? a->len: b->len; + int cmp = memcmp(a->buf, b->buf, len); + if (cmp) + return cmp; + return a->len < b->len ? -1: a->len != b->len; +} + +void strbuf_splice(struct strbuf *sb, size_t pos, size_t len, + const void *data, size_t dlen) +{ + if (pos + len < pos) + die("you want to use way too much memory"); + if (pos > sb->len) + die("`pos' is too far after the end of the buffer"); + if (pos + len > sb->len) + die("`pos + len' is too far after the end of the buffer"); + + if (dlen >= len) + strbuf_grow(sb, dlen - len); + memmove(sb->buf + pos + dlen, + sb->buf + pos + len, + sb->len - pos - len); + memcpy(sb->buf + pos, data, dlen); + strbuf_setlen(sb, sb->len + dlen - len); +} + +void strbuf_insert(struct strbuf *sb, size_t pos, const void *data, size_t len) +{ + strbuf_splice(sb, pos, 0, data, len); +} + +void strbuf_remove(struct strbuf *sb, size_t pos, size_t len) +{ + strbuf_splice(sb, pos, len, NULL, 0); +} + +void strbuf_add(struct strbuf *sb, const void *data, size_t len) +{ + strbuf_grow(sb, len); + memcpy(sb->buf + sb->len, data, len); + strbuf_setlen(sb, sb->len + len); +} + +void strbuf_adddup(struct strbuf *sb, size_t pos, size_t len) +{ + strbuf_grow(sb, len); + memcpy(sb->buf + sb->len, sb->buf + pos, len); + strbuf_setlen(sb, sb->len + len); +} + +void strbuf_addf(struct strbuf *sb, const char *fmt, ...) +{ + int len; + va_list ap; + + if (!strbuf_avail(sb)) + strbuf_grow(sb, 64); + va_start(ap, fmt); + len = vsnprintf(sb->buf + sb->len, sb->alloc - sb->len, fmt, ap); + va_end(ap); + if (len < 0) + die("your vsnprintf is broken"); + if (len > strbuf_avail(sb)) { + strbuf_grow(sb, len); + va_start(ap, fmt); + len = vsnprintf(sb->buf + sb->len, sb->alloc - sb->len, fmt, ap); + va_end(ap); + if (len > strbuf_avail(sb)) { + die("this should not happen, your snprintf is broken"); + } + } + strbuf_setlen(sb, sb->len + len); +} + +void strbuf_expand(struct strbuf *sb, const char *format, expand_fn_t fn, + void *context) +{ + for (;;) { + const char *percent; + size_t consumed; + + percent = strchrnul(format, '%'); + strbuf_add(sb, format, percent - format); + if (!*percent) + break; + format = percent + 1; + + consumed = fn(sb, format, context); + if (consumed) + format += consumed; + else + strbuf_addch(sb, '%'); + } +} + +size_t strbuf_expand_dict_cb(struct strbuf *sb, const char *placeholder, + void *context) +{ + struct strbuf_expand_dict_entry *e = context; + size_t len; + + for (; e->placeholder && (len = strlen(e->placeholder)); e++) { + if (!strncmp(placeholder, e->placeholder, len)) { + if (e->value) + strbuf_addstr(sb, e->value); + return len; + } + } + return 0; +} + +size_t strbuf_fread(struct strbuf *sb, size_t size, FILE *f) +{ + size_t res; + size_t oldalloc = sb->alloc; + + strbuf_grow(sb, size); + res = fread(sb->buf + sb->len, 1, size, f); + if (res > 0) + strbuf_setlen(sb, sb->len + res); + else if (res < 0 && oldalloc == 0) + strbuf_release(sb); + return res; +} + +ssize_t strbuf_read(struct strbuf *sb, int fd, size_t hint) +{ + size_t oldlen = sb->len; + size_t oldalloc = sb->alloc; + + strbuf_grow(sb, hint ? hint : 8192); + for (;;) { + ssize_t cnt; + + cnt = read(fd, sb->buf + sb->len, sb->alloc - sb->len - 1); + if (cnt < 0) { + if (oldalloc == 0) + strbuf_release(sb); + else + strbuf_setlen(sb, oldlen); + return -1; + } + if (!cnt) + break; + sb->len += cnt; + strbuf_grow(sb, 8192); + } + + sb->buf[sb->len] = '\0'; + return sb->len - oldlen; +} + +#define STRBUF_MAXLINK (2*PATH_MAX) + +int strbuf_readlink(struct strbuf *sb, const char *path, size_t hint) +{ + size_t oldalloc = sb->alloc; + + if (hint < 32) + hint = 32; + + while (hint < STRBUF_MAXLINK) { + int len; + + strbuf_grow(sb, hint); + len = readlink(path, sb->buf, hint); + if (len < 0) { + if (errno != ERANGE) + break; + } else if (len < hint) { + strbuf_setlen(sb, len); + return 0; + } + + /* .. the buffer was too small - try again */ + hint *= 2; + } + if (oldalloc == 0) + strbuf_release(sb); + return -1; +} + +int strbuf_getline(struct strbuf *sb, FILE *fp, int term) +{ + int ch; + + strbuf_grow(sb, 0); + if (feof(fp)) + return EOF; + + strbuf_reset(sb); + while ((ch = fgetc(fp)) != EOF) { + if (ch == term) + break; + strbuf_grow(sb, 1); + sb->buf[sb->len++] = ch; + } + if (ch == EOF && sb->len == 0) + return EOF; + + sb->buf[sb->len] = '\0'; + return 0; +} + +int strbuf_read_file(struct strbuf *sb, const char *path, size_t hint) +{ + int fd, len; + + fd = open(path, O_RDONLY); + if (fd < 0) + return -1; + len = strbuf_read(sb, fd, hint); + close(fd); + if (len < 0) + return -1; + + return len; +} diff --git a/tools/perf/util/strbuf.h b/tools/perf/util/strbuf.h new file mode 100644 index 00000000000..9ee908a3ec5 --- /dev/null +++ b/tools/perf/util/strbuf.h @@ -0,0 +1,137 @@ +#ifndef STRBUF_H +#define STRBUF_H + +/* + * Strbuf's can be use in many ways: as a byte array, or to store arbitrary + * long, overflow safe strings. + * + * Strbufs has some invariants that are very important to keep in mind: + * + * 1. the ->buf member is always malloc-ed, hence strbuf's can be used to + * build complex strings/buffers whose final size isn't easily known. + * + * It is NOT legal to copy the ->buf pointer away. + * `strbuf_detach' is the operation that detachs a buffer from its shell + * while keeping the shell valid wrt its invariants. + * + * 2. the ->buf member is a byte array that has at least ->len + 1 bytes + * allocated. The extra byte is used to store a '\0', allowing the ->buf + * member to be a valid C-string. Every strbuf function ensure this + * invariant is preserved. + * + * Note that it is OK to "play" with the buffer directly if you work it + * that way: + * + * strbuf_grow(sb, SOME_SIZE); + * ... Here, the memory array starting at sb->buf, and of length + * ... strbuf_avail(sb) is all yours, and you are sure that + * ... strbuf_avail(sb) is at least SOME_SIZE. + * strbuf_setlen(sb, sb->len + SOME_OTHER_SIZE); + * + * Of course, SOME_OTHER_SIZE must be smaller or equal to strbuf_avail(sb). + * + * Doing so is safe, though if it has to be done in many places, adding the + * missing API to the strbuf module is the way to go. + * + * XXX: do _not_ assume that the area that is yours is of size ->alloc - 1 + * even if it's true in the current implementation. Alloc is somehow a + * "private" member that should not be messed with. + */ + +#include + +extern char strbuf_slopbuf[]; +struct strbuf { + size_t alloc; + size_t len; + char *buf; +}; + +#define STRBUF_INIT { 0, 0, strbuf_slopbuf } + +/*----- strbuf life cycle -----*/ +extern void strbuf_init(struct strbuf *, size_t); +extern void strbuf_release(struct strbuf *); +extern char *strbuf_detach(struct strbuf *, size_t *); +extern void strbuf_attach(struct strbuf *, void *, size_t, size_t); +static inline void strbuf_swap(struct strbuf *a, struct strbuf *b) { + struct strbuf tmp = *a; + *a = *b; + *b = tmp; +} + +/*----- strbuf size related -----*/ +static inline size_t strbuf_avail(const struct strbuf *sb) { + return sb->alloc ? sb->alloc - sb->len - 1 : 0; +} + +extern void strbuf_grow(struct strbuf *, size_t); + +static inline void strbuf_setlen(struct strbuf *sb, size_t len) { + if (!sb->alloc) + strbuf_grow(sb, 0); + assert(len < sb->alloc); + sb->len = len; + sb->buf[len] = '\0'; +} +#define strbuf_reset(sb) strbuf_setlen(sb, 0) + +/*----- content related -----*/ +extern void strbuf_trim(struct strbuf *); +extern void strbuf_rtrim(struct strbuf *); +extern void strbuf_ltrim(struct strbuf *); +extern int strbuf_cmp(const struct strbuf *, const struct strbuf *); +extern void strbuf_tolower(struct strbuf *); + +extern struct strbuf **strbuf_split(const struct strbuf *, int delim); +extern void strbuf_list_free(struct strbuf **); + +/*----- add data in your buffer -----*/ +static inline void strbuf_addch(struct strbuf *sb, int c) { + strbuf_grow(sb, 1); + sb->buf[sb->len++] = c; + sb->buf[sb->len] = '\0'; +} + +extern void strbuf_insert(struct strbuf *, size_t pos, const void *, size_t); +extern void strbuf_remove(struct strbuf *, size_t pos, size_t len); + +/* splice pos..pos+len with given data */ +extern void strbuf_splice(struct strbuf *, size_t pos, size_t len, + const void *, size_t); + +extern void strbuf_add(struct strbuf *, const void *, size_t); +static inline void strbuf_addstr(struct strbuf *sb, const char *s) { + strbuf_add(sb, s, strlen(s)); +} +static inline void strbuf_addbuf(struct strbuf *sb, const struct strbuf *sb2) { + strbuf_add(sb, sb2->buf, sb2->len); +} +extern void strbuf_adddup(struct strbuf *sb, size_t pos, size_t len); + +typedef size_t (*expand_fn_t) (struct strbuf *sb, const char *placeholder, void *context); +extern void strbuf_expand(struct strbuf *sb, const char *format, expand_fn_t fn, void *context); +struct strbuf_expand_dict_entry { + const char *placeholder; + const char *value; +}; +extern size_t strbuf_expand_dict_cb(struct strbuf *sb, const char *placeholder, void *context); + +__attribute__((format(printf,2,3))) +extern void strbuf_addf(struct strbuf *sb, const char *fmt, ...); + +extern size_t strbuf_fread(struct strbuf *, size_t, FILE *); +/* XXX: if read fails, any partial read is undone */ +extern ssize_t strbuf_read(struct strbuf *, int fd, size_t hint); +extern int strbuf_read_file(struct strbuf *sb, const char *path, size_t hint); +extern int strbuf_readlink(struct strbuf *sb, const char *path, size_t hint); + +extern int strbuf_getline(struct strbuf *, FILE *, int); + +extern void stripspace(struct strbuf *buf, int skip_comments); +extern int launch_editor(const char *path, struct strbuf *buffer, const char *const *env); + +extern int strbuf_branchname(struct strbuf *sb, const char *name); +extern int strbuf_check_branch_ref(struct strbuf *sb, const char *name); + +#endif /* STRBUF_H */ diff --git a/tools/perf/util/string.c b/tools/perf/util/string.c new file mode 100644 index 00000000000..ec33c0c7f4e --- /dev/null +++ b/tools/perf/util/string.c @@ -0,0 +1,34 @@ +#include "string.h" + +static int hex(char ch) +{ + if ((ch >= '0') && (ch <= '9')) + return ch - '0'; + if ((ch >= 'a') && (ch <= 'f')) + return ch - 'a' + 10; + if ((ch >= 'A') && (ch <= 'F')) + return ch - 'A' + 10; + return -1; +} + +/* + * While we find nice hex chars, build a long_val. + * Return number of chars processed. + */ +int hex2u64(const char *ptr, __u64 *long_val) +{ + const char *p = ptr; + *long_val = 0; + + while (*p) { + const int hex_val = hex(*p); + + if (hex_val < 0) + break; + + *long_val = (*long_val << 4) | hex_val; + p++; + } + + return p - ptr; +} diff --git a/tools/perf/util/string.h b/tools/perf/util/string.h new file mode 100644 index 00000000000..72812c1c9a7 --- /dev/null +++ b/tools/perf/util/string.h @@ -0,0 +1,8 @@ +#ifndef _PERF_STRING_H_ +#define _PERF_STRING_H_ + +#include + +int hex2u64(const char *ptr, __u64 *val); + +#endif diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c new file mode 100644 index 00000000000..23f4f7b3b83 --- /dev/null +++ b/tools/perf/util/symbol.c @@ -0,0 +1,574 @@ +#include "util.h" +#include "../perf.h" +#include "string.h" +#include "symbol.h" + +#include +#include +#include + +const char *sym_hist_filter; + +static struct symbol *symbol__new(uint64_t start, uint64_t len, + const char *name, unsigned int priv_size, + uint64_t obj_start, int verbose) +{ + size_t namelen = strlen(name) + 1; + struct symbol *self = calloc(1, priv_size + sizeof(*self) + namelen); + + if (!self) + return NULL; + + if (verbose >= 2) + printf("new symbol: %016Lx [%08lx]: %s, hist: %p, obj_start: %p\n", + (__u64)start, len, name, self->hist, (void *)obj_start); + + self->obj_start= obj_start; + self->hist = NULL; + self->hist_sum = 0; + + if (sym_hist_filter && !strcmp(name, sym_hist_filter)) + self->hist = calloc(sizeof(__u64), len); + + if (priv_size) { + memset(self, 0, priv_size); + self = ((void *)self) + priv_size; + } + self->start = start; + self->end = start + len - 1; + memcpy(self->name, name, namelen); + + return self; +} + +static void symbol__delete(struct symbol *self, unsigned int priv_size) +{ + free(((void *)self) - priv_size); +} + +static size_t symbol__fprintf(struct symbol *self, FILE *fp) +{ + return fprintf(fp, " %llx-%llx %s\n", + self->start, self->end, self->name); +} + +struct dso *dso__new(const char *name, unsigned int sym_priv_size) +{ + struct dso *self = malloc(sizeof(*self) + strlen(name) + 1); + + if (self != NULL) { + strcpy(self->name, name); + self->syms = RB_ROOT; + self->sym_priv_size = sym_priv_size; + self->find_symbol = dso__find_symbol; + } + + return self; +} + +static void dso__delete_symbols(struct dso *self) +{ + struct symbol *pos; + struct rb_node *next = rb_first(&self->syms); + + while (next) { + pos = rb_entry(next, struct symbol, rb_node); + next = rb_next(&pos->rb_node); + rb_erase(&pos->rb_node, &self->syms); + symbol__delete(pos, self->sym_priv_size); + } +} + +void dso__delete(struct dso *self) +{ + dso__delete_symbols(self); + free(self); +} + +static void dso__insert_symbol(struct dso *self, struct symbol *sym) +{ + struct rb_node **p = &self->syms.rb_node; + struct rb_node *parent = NULL; + const uint64_t ip = sym->start; + struct symbol *s; + + while (*p != NULL) { + parent = *p; + s = rb_entry(parent, struct symbol, rb_node); + if (ip < s->start) + p = &(*p)->rb_left; + else + p = &(*p)->rb_right; + } + rb_link_node(&sym->rb_node, parent, p); + rb_insert_color(&sym->rb_node, &self->syms); +} + +struct symbol *dso__find_symbol(struct dso *self, uint64_t ip) +{ + struct rb_node *n; + + if (self == NULL) + return NULL; + + n = self->syms.rb_node; + + while (n) { + struct symbol *s = rb_entry(n, struct symbol, rb_node); + + if (ip < s->start) + n = n->rb_left; + else if (ip > s->end) + n = n->rb_right; + else + return s; + } + + return NULL; +} + +size_t dso__fprintf(struct dso *self, FILE *fp) +{ + size_t ret = fprintf(fp, "dso: %s\n", self->name); + + struct rb_node *nd; + for (nd = rb_first(&self->syms); nd; nd = rb_next(nd)) { + struct symbol *pos = rb_entry(nd, struct symbol, rb_node); + ret += symbol__fprintf(pos, fp); + } + + return ret; +} + +static int dso__load_kallsyms(struct dso *self, symbol_filter_t filter, int verbose) +{ + struct rb_node *nd, *prevnd; + char *line = NULL; + size_t n; + FILE *file = fopen("/proc/kallsyms", "r"); + + if (file == NULL) + goto out_failure; + + while (!feof(file)) { + __u64 start; + struct symbol *sym; + int line_len, len; + char symbol_type; + + line_len = getline(&line, &n, file); + if (line_len < 0) + break; + + if (!line) + goto out_failure; + + line[--line_len] = '\0'; /* \n */ + + len = hex2u64(line, &start); + + len++; + if (len + 2 >= line_len) + continue; + + symbol_type = toupper(line[len]); + /* + * We're interested only in code ('T'ext) + */ + if (symbol_type != 'T' && symbol_type != 'W') + continue; + /* + * Well fix up the end later, when we have all sorted. + */ + sym = symbol__new(start, 0xdead, line + len + 2, + self->sym_priv_size, 0, verbose); + + if (sym == NULL) + goto out_delete_line; + + if (filter && filter(self, sym)) + symbol__delete(sym, self->sym_priv_size); + else + dso__insert_symbol(self, sym); + } + + /* + * Now that we have all sorted out, just set the ->end of all + * symbols + */ + prevnd = rb_first(&self->syms); + + if (prevnd == NULL) + goto out_delete_line; + + for (nd = rb_next(prevnd); nd; nd = rb_next(nd)) { + struct symbol *prev = rb_entry(prevnd, struct symbol, rb_node), + *curr = rb_entry(nd, struct symbol, rb_node); + + prev->end = curr->start - 1; + prevnd = nd; + } + + free(line); + fclose(file); + + return 0; + +out_delete_line: + free(line); +out_failure: + return -1; +} + +/** + * elf_symtab__for_each_symbol - iterate thru all the symbols + * + * @self: struct elf_symtab instance to iterate + * @index: uint32_t index + * @sym: GElf_Sym iterator + */ +#define elf_symtab__for_each_symbol(syms, nr_syms, index, sym) \ + for (index = 0, gelf_getsym(syms, index, &sym);\ + index < nr_syms; \ + index++, gelf_getsym(syms, index, &sym)) + +static inline uint8_t elf_sym__type(const GElf_Sym *sym) +{ + return GELF_ST_TYPE(sym->st_info); +} + +static inline int elf_sym__is_function(const GElf_Sym *sym) +{ + return elf_sym__type(sym) == STT_FUNC && + sym->st_name != 0 && + sym->st_shndx != SHN_UNDEF && + sym->st_size != 0; +} + +static inline const char *elf_sym__name(const GElf_Sym *sym, + const Elf_Data *symstrs) +{ + return symstrs->d_buf + sym->st_name; +} + +static Elf_Scn *elf_section_by_name(Elf *elf, GElf_Ehdr *ep, + GElf_Shdr *shp, const char *name, + size_t *index) +{ + Elf_Scn *sec = NULL; + size_t cnt = 1; + + while ((sec = elf_nextscn(elf, sec)) != NULL) { + char *str; + + gelf_getshdr(sec, shp); + str = elf_strptr(elf, ep->e_shstrndx, shp->sh_name); + if (!strcmp(name, str)) { + if (index) + *index = cnt; + break; + } + ++cnt; + } + + return sec; +} + +#define elf_section__for_each_rel(reldata, pos, pos_mem, idx, nr_entries) \ + for (idx = 0, pos = gelf_getrel(reldata, 0, &pos_mem); \ + idx < nr_entries; \ + ++idx, pos = gelf_getrel(reldata, idx, &pos_mem)) + +#define elf_section__for_each_rela(reldata, pos, pos_mem, idx, nr_entries) \ + for (idx = 0, pos = gelf_getrela(reldata, 0, &pos_mem); \ + idx < nr_entries; \ + ++idx, pos = gelf_getrela(reldata, idx, &pos_mem)) + +static int dso__synthesize_plt_symbols(struct dso *self, Elf *elf, + GElf_Ehdr *ehdr, Elf_Scn *scn_dynsym, + GElf_Shdr *shdr_dynsym, + size_t dynsym_idx, int verbose) +{ + uint32_t nr_rel_entries, idx; + GElf_Sym sym; + __u64 plt_offset; + GElf_Shdr shdr_plt; + struct symbol *f; + GElf_Shdr shdr_rel_plt; + Elf_Data *reldata, *syms, *symstrs; + Elf_Scn *scn_plt_rel, *scn_symstrs; + char sympltname[1024]; + int nr = 0, symidx; + + scn_plt_rel = elf_section_by_name(elf, ehdr, &shdr_rel_plt, + ".rela.plt", NULL); + if (scn_plt_rel == NULL) { + scn_plt_rel = elf_section_by_name(elf, ehdr, &shdr_rel_plt, + ".rel.plt", NULL); + if (scn_plt_rel == NULL) + return 0; + } + + if (shdr_rel_plt.sh_link != dynsym_idx) + return 0; + + if (elf_section_by_name(elf, ehdr, &shdr_plt, ".plt", NULL) == NULL) + return 0; + + /* + * Fetch the relocation section to find the indexes to the GOT + * and the symbols in the .dynsym they refer to. + */ + reldata = elf_getdata(scn_plt_rel, NULL); + if (reldata == NULL) + return -1; + + syms = elf_getdata(scn_dynsym, NULL); + if (syms == NULL) + return -1; + + scn_symstrs = elf_getscn(elf, shdr_dynsym->sh_link); + if (scn_symstrs == NULL) + return -1; + + symstrs = elf_getdata(scn_symstrs, NULL); + if (symstrs == NULL) + return -1; + + nr_rel_entries = shdr_rel_plt.sh_size / shdr_rel_plt.sh_entsize; + plt_offset = shdr_plt.sh_offset; + + if (shdr_rel_plt.sh_type == SHT_RELA) { + GElf_Rela pos_mem, *pos; + + elf_section__for_each_rela(reldata, pos, pos_mem, idx, + nr_rel_entries) { + symidx = GELF_R_SYM(pos->r_info); + plt_offset += shdr_plt.sh_entsize; + gelf_getsym(syms, symidx, &sym); + snprintf(sympltname, sizeof(sympltname), + "%s@plt", elf_sym__name(&sym, symstrs)); + + f = symbol__new(plt_offset, shdr_plt.sh_entsize, + sympltname, self->sym_priv_size, 0, verbose); + if (!f) + return -1; + + dso__insert_symbol(self, f); + ++nr; + } + } else if (shdr_rel_plt.sh_type == SHT_REL) { + GElf_Rel pos_mem, *pos; + elf_section__for_each_rel(reldata, pos, pos_mem, idx, + nr_rel_entries) { + symidx = GELF_R_SYM(pos->r_info); + plt_offset += shdr_plt.sh_entsize; + gelf_getsym(syms, symidx, &sym); + snprintf(sympltname, sizeof(sympltname), + "%s@plt", elf_sym__name(&sym, symstrs)); + + f = symbol__new(plt_offset, shdr_plt.sh_entsize, + sympltname, self->sym_priv_size, 0, verbose); + if (!f) + return -1; + + dso__insert_symbol(self, f); + ++nr; + } + } else { + /* + * TODO: There are still one more shdr_rel_plt.sh_type + * I have to investigate, but probably should be ignored. + */ + } + + return nr; +} + +static int dso__load_sym(struct dso *self, int fd, const char *name, + symbol_filter_t filter, int verbose) +{ + Elf_Data *symstrs; + uint32_t nr_syms; + int err = -1; + uint32_t index; + GElf_Ehdr ehdr; + GElf_Shdr shdr; + Elf_Data *syms; + GElf_Sym sym; + Elf_Scn *sec, *sec_dynsym; + Elf *elf; + size_t dynsym_idx; + int nr = 0; + + elf = elf_begin(fd, ELF_C_READ_MMAP, NULL); + if (elf == NULL) { + if (verbose) + fprintf(stderr, "%s: cannot read %s ELF file.\n", + __func__, name); + goto out_close; + } + + if (gelf_getehdr(elf, &ehdr) == NULL) { + if (verbose) + fprintf(stderr, "%s: cannot get elf header.\n", __func__); + goto out_elf_end; + } + + /* + * We need to check if we have a .dynsym, so that we can handle the + * .plt, synthesizing its symbols, that aren't on the symtabs (be it + * .dynsym or .symtab) + */ + sec_dynsym = elf_section_by_name(elf, &ehdr, &shdr, + ".dynsym", &dynsym_idx); + if (sec_dynsym != NULL) { + nr = dso__synthesize_plt_symbols(self, elf, &ehdr, + sec_dynsym, &shdr, + dynsym_idx, verbose); + if (nr < 0) + goto out_elf_end; + } + + /* + * But if we have a full .symtab (that is a superset of .dynsym) we + * should add the symbols not in the .dynsyn + */ + sec = elf_section_by_name(elf, &ehdr, &shdr, ".symtab", NULL); + if (sec == NULL) { + if (sec_dynsym == NULL) + goto out_elf_end; + + sec = sec_dynsym; + gelf_getshdr(sec, &shdr); + } + + syms = elf_getdata(sec, NULL); + if (syms == NULL) + goto out_elf_end; + + sec = elf_getscn(elf, shdr.sh_link); + if (sec == NULL) + goto out_elf_end; + + symstrs = elf_getdata(sec, NULL); + if (symstrs == NULL) + goto out_elf_end; + + nr_syms = shdr.sh_size / shdr.sh_entsize; + + elf_symtab__for_each_symbol(syms, nr_syms, index, sym) { + struct symbol *f; + uint64_t obj_start; + + if (!elf_sym__is_function(&sym)) + continue; + + sec = elf_getscn(elf, sym.st_shndx); + if (!sec) + goto out_elf_end; + + gelf_getshdr(sec, &shdr); + obj_start = sym.st_value; + + sym.st_value -= shdr.sh_addr - shdr.sh_offset; + + f = symbol__new(sym.st_value, sym.st_size, + elf_sym__name(&sym, symstrs), + self->sym_priv_size, obj_start, verbose); + if (!f) + goto out_elf_end; + + if (filter && filter(self, f)) + symbol__delete(f, self->sym_priv_size); + else { + dso__insert_symbol(self, f); + nr++; + } + } + + err = nr; +out_elf_end: + elf_end(elf); +out_close: + return err; +} + +int dso__load(struct dso *self, symbol_filter_t filter, int verbose) +{ + int size = strlen(self->name) + sizeof("/usr/lib/debug%s.debug"); + char *name = malloc(size); + int variant = 0; + int ret = -1; + int fd; + + if (!name) + return -1; + +more: + do { + switch (variant) { + case 0: /* Fedora */ + snprintf(name, size, "/usr/lib/debug%s.debug", self->name); + break; + case 1: /* Ubuntu */ + snprintf(name, size, "/usr/lib/debug%s", self->name); + break; + case 2: /* Sane people */ + snprintf(name, size, "%s", self->name); + break; + + default: + goto out; + } + variant++; + + fd = open(name, O_RDONLY); + } while (fd < 0); + + ret = dso__load_sym(self, fd, name, filter, verbose); + close(fd); + + /* + * Some people seem to have debuginfo files _WITHOUT_ debug info!?!? + */ + if (!ret) + goto more; + +out: + free(name); + return ret; +} + +static int dso__load_vmlinux(struct dso *self, const char *vmlinux, + symbol_filter_t filter, int verbose) +{ + int err, fd = open(vmlinux, O_RDONLY); + + if (fd < 0) + return -1; + + err = dso__load_sym(self, fd, vmlinux, filter, verbose); + close(fd); + + return err; +} + +int dso__load_kernel(struct dso *self, const char *vmlinux, + symbol_filter_t filter, int verbose) +{ + int err = -1; + + if (vmlinux) + err = dso__load_vmlinux(self, vmlinux, filter, verbose); + + if (err) + err = dso__load_kallsyms(self, filter, verbose); + + return err; +} + +void symbol__init(void) +{ + elf_version(EV_CURRENT); +} diff --git a/tools/perf/util/symbol.h b/tools/perf/util/symbol.h new file mode 100644 index 00000000000..4839d68f14f --- /dev/null +++ b/tools/perf/util/symbol.h @@ -0,0 +1,47 @@ +#ifndef _PERF_SYMBOL_ +#define _PERF_SYMBOL_ 1 + +#include +#include "list.h" +#include "rbtree.h" + +struct symbol { + struct rb_node rb_node; + __u64 start; + __u64 end; + __u64 obj_start; + __u64 hist_sum; + __u64 *hist; + char name[0]; +}; + +struct dso { + struct list_head node; + struct rb_root syms; + unsigned int sym_priv_size; + struct symbol *(*find_symbol)(struct dso *, uint64_t ip); + char name[0]; +}; + +const char *sym_hist_filter; + +typedef int (*symbol_filter_t)(struct dso *self, struct symbol *sym); + +struct dso *dso__new(const char *name, unsigned int sym_priv_size); +void dso__delete(struct dso *self); + +static inline void *dso__sym_priv(struct dso *self, struct symbol *sym) +{ + return ((void *)sym) - self->sym_priv_size; +} + +struct symbol *dso__find_symbol(struct dso *self, uint64_t ip); + +int dso__load_kernel(struct dso *self, const char *vmlinux, + symbol_filter_t filter, int verbose); +int dso__load(struct dso *self, symbol_filter_t filter, int verbose); + +size_t dso__fprintf(struct dso *self, FILE *fp); + +void symbol__init(void); +#endif /* _PERF_SYMBOL_ */ diff --git a/tools/perf/util/usage.c b/tools/perf/util/usage.c new file mode 100644 index 00000000000..2cad286e437 --- /dev/null +++ b/tools/perf/util/usage.c @@ -0,0 +1,80 @@ +/* + * GIT - The information manager from hell + * + * Copyright (C) Linus Torvalds, 2005 + */ +#include "util.h" + +static void report(const char *prefix, const char *err, va_list params) +{ + char msg[1024]; + vsnprintf(msg, sizeof(msg), err, params); + fprintf(stderr, "%s%s\n", prefix, msg); +} + +static NORETURN void usage_builtin(const char *err) +{ + fprintf(stderr, "\n usage: %s\n", err); + exit(129); +} + +static NORETURN void die_builtin(const char *err, va_list params) +{ + report("fatal: ", err, params); + exit(128); +} + +static void error_builtin(const char *err, va_list params) +{ + report("error: ", err, params); +} + +static void warn_builtin(const char *warn, va_list params) +{ + report("warning: ", warn, params); +} + +/* If we are in a dlopen()ed .so write to a global variable would segfault + * (ugh), so keep things static. */ +static void (*usage_routine)(const char *err) NORETURN = usage_builtin; +static void (*die_routine)(const char *err, va_list params) NORETURN = die_builtin; +static void (*error_routine)(const char *err, va_list params) = error_builtin; +static void (*warn_routine)(const char *err, va_list params) = warn_builtin; + +void set_die_routine(void (*routine)(const char *err, va_list params) NORETURN) +{ + die_routine = routine; +} + +void usage(const char *err) +{ + usage_routine(err); +} + +void die(const char *err, ...) +{ + va_list params; + + va_start(params, err); + die_routine(err, params); + va_end(params); +} + +int error(const char *err, ...) +{ + va_list params; + + va_start(params, err); + error_routine(err, params); + va_end(params); + return -1; +} + +void warning(const char *warn, ...) +{ + va_list params; + + va_start(params, warn); + warn_routine(warn, params); + va_end(params); +} diff --git a/tools/perf/util/util.h b/tools/perf/util/util.h new file mode 100644 index 00000000000..76590a16c27 --- /dev/null +++ b/tools/perf/util/util.h @@ -0,0 +1,410 @@ +#ifndef GIT_COMPAT_UTIL_H +#define GIT_COMPAT_UTIL_H + +#define _FILE_OFFSET_BITS 64 + +#ifndef FLEX_ARRAY +/* + * See if our compiler is known to support flexible array members. + */ +#if defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) +# define FLEX_ARRAY /* empty */ +#elif defined(__GNUC__) +# if (__GNUC__ >= 3) +# define FLEX_ARRAY /* empty */ +# else +# define FLEX_ARRAY 0 /* older GNU extension */ +# endif +#endif + +/* + * Otherwise, default to safer but a bit wasteful traditional style + */ +#ifndef FLEX_ARRAY +# define FLEX_ARRAY 1 +#endif +#endif + +#define ARRAY_SIZE(x) (sizeof(x)/sizeof(x[0])) + +#ifdef __GNUC__ +#define TYPEOF(x) (__typeof__(x)) +#else +#define TYPEOF(x) +#endif + +#define MSB(x, bits) ((x) & TYPEOF(x)(~0ULL << (sizeof(x) * 8 - (bits)))) +#define HAS_MULTI_BITS(i) ((i) & ((i) - 1)) /* checks if an integer has more than 1 bit set */ + +/* Approximation of the length of the decimal representation of this type. */ +#define decimal_length(x) ((int)(sizeof(x) * 2.56 + 0.5) + 1) + +#if !defined(__APPLE__) && !defined(__FreeBSD__) && !defined(__USLC__) && !defined(_M_UNIX) +#define _XOPEN_SOURCE 600 /* glibc2 and AIX 5.3L need 500, OpenBSD needs 600 for S_ISLNK() */ +#define _XOPEN_SOURCE_EXTENDED 1 /* AIX 5.3L needs this */ +#endif +#define _ALL_SOURCE 1 +#define _GNU_SOURCE 1 +#define _BSD_SOURCE 1 + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#ifndef __MINGW32__ +#include +#include +#include +#include +#ifndef NO_SYS_SELECT_H +#include +#endif +#include +#include +#include +#include +#include +#include +#if defined(__CYGWIN__) +#undef _XOPEN_SOURCE +#include +#define _XOPEN_SOURCE 600 +#include "compat/cygwin.h" +#else +#undef _ALL_SOURCE /* AIX 5.3L defines a struct list with _ALL_SOURCE. */ +#include +#define _ALL_SOURCE 1 +#endif +#else /* __MINGW32__ */ +/* pull in Windows compatibility stuff */ +#include "compat/mingw.h" +#endif /* __MINGW32__ */ + +#ifndef NO_ICONV +#include +#endif + +#ifndef NO_OPENSSL +#include +#include +#endif + +/* On most systems would have given us this, but + * not on some systems (e.g. GNU/Hurd). + */ +#ifndef PATH_MAX +#define PATH_MAX 4096 +#endif + +#ifndef PRIuMAX +#define PRIuMAX "llu" +#endif + +#ifndef PRIu32 +#define PRIu32 "u" +#endif + +#ifndef PRIx32 +#define PRIx32 "x" +#endif + +#ifndef PATH_SEP +#define PATH_SEP ':' +#endif + +#ifndef STRIP_EXTENSION +#define STRIP_EXTENSION "" +#endif + +#ifndef has_dos_drive_prefix +#define has_dos_drive_prefix(path) 0 +#endif + +#ifndef is_dir_sep +#define is_dir_sep(c) ((c) == '/') +#endif + +#ifdef __GNUC__ +#define NORETURN __attribute__((__noreturn__)) +#else +#define NORETURN +#ifndef __attribute__ +#define __attribute__(x) +#endif +#endif + +/* General helper functions */ +extern void usage(const char *err) NORETURN; +extern void die(const char *err, ...) NORETURN __attribute__((format (printf, 1, 2))); +extern int error(const char *err, ...) __attribute__((format (printf, 1, 2))); +extern void warning(const char *err, ...) __attribute__((format (printf, 1, 2))); + +extern void set_die_routine(void (*routine)(const char *err, va_list params) NORETURN); + +extern int prefixcmp(const char *str, const char *prefix); +extern time_t tm_to_time_t(const struct tm *tm); + +static inline const char *skip_prefix(const char *str, const char *prefix) +{ + size_t len = strlen(prefix); + return strncmp(str, prefix, len) ? NULL : str + len; +} + +#if defined(NO_MMAP) || defined(USE_WIN32_MMAP) + +#ifndef PROT_READ +#define PROT_READ 1 +#define PROT_WRITE 2 +#define MAP_PRIVATE 1 +#define MAP_FAILED ((void*)-1) +#endif + +#define mmap git_mmap +#define munmap git_munmap +extern void *git_mmap(void *start, size_t length, int prot, int flags, int fd, off_t offset); +extern int git_munmap(void *start, size_t length); + +#else /* NO_MMAP || USE_WIN32_MMAP */ + +#include + +#endif /* NO_MMAP || USE_WIN32_MMAP */ + +#ifdef NO_MMAP + +/* This value must be multiple of (pagesize * 2) */ +#define DEFAULT_PACKED_GIT_WINDOW_SIZE (1 * 1024 * 1024) + +#else /* NO_MMAP */ + +/* This value must be multiple of (pagesize * 2) */ +#define DEFAULT_PACKED_GIT_WINDOW_SIZE \ + (sizeof(void*) >= 8 \ + ? 1 * 1024 * 1024 * 1024 \ + : 32 * 1024 * 1024) + +#endif /* NO_MMAP */ + +#ifdef NO_ST_BLOCKS_IN_STRUCT_STAT +#define on_disk_bytes(st) ((st).st_size) +#else +#define on_disk_bytes(st) ((st).st_blocks * 512) +#endif + +#define DEFAULT_PACKED_GIT_LIMIT \ + ((1024L * 1024L) * (sizeof(void*) >= 8 ? 8192 : 256)) + +#ifdef NO_PREAD +#define pread git_pread +extern ssize_t git_pread(int fd, void *buf, size_t count, off_t offset); +#endif +/* + * Forward decl that will remind us if its twin in cache.h changes. + * This function is used in compat/pread.c. But we can't include + * cache.h there. + */ +extern ssize_t read_in_full(int fd, void *buf, size_t count); + +#ifdef NO_SETENV +#define setenv gitsetenv +extern int gitsetenv(const char *, const char *, int); +#endif + +#ifdef NO_MKDTEMP +#define mkdtemp gitmkdtemp +extern char *gitmkdtemp(char *); +#endif + +#ifdef NO_UNSETENV +#define unsetenv gitunsetenv +extern void gitunsetenv(const char *); +#endif + +#ifdef NO_STRCASESTR +#define strcasestr gitstrcasestr +extern char *gitstrcasestr(const char *haystack, const char *needle); +#endif + +#ifdef NO_STRLCPY +#define strlcpy gitstrlcpy +extern size_t gitstrlcpy(char *, const char *, size_t); +#endif + +#ifdef NO_STRTOUMAX +#define strtoumax gitstrtoumax +extern uintmax_t gitstrtoumax(const char *, char **, int); +#endif + +#ifdef NO_HSTRERROR +#define hstrerror githstrerror +extern const char *githstrerror(int herror); +#endif + +#ifdef NO_MEMMEM +#define memmem gitmemmem +void *gitmemmem(const void *haystack, size_t haystacklen, + const void *needle, size_t needlelen); +#endif + +#ifdef FREAD_READS_DIRECTORIES +#ifdef fopen +#undef fopen +#endif +#define fopen(a,b) git_fopen(a,b) +extern FILE *git_fopen(const char*, const char*); +#endif + +#ifdef SNPRINTF_RETURNS_BOGUS +#define snprintf git_snprintf +extern int git_snprintf(char *str, size_t maxsize, + const char *format, ...); +#define vsnprintf git_vsnprintf +extern int git_vsnprintf(char *str, size_t maxsize, + const char *format, va_list ap); +#endif + +#ifdef __GLIBC_PREREQ +#if __GLIBC_PREREQ(2, 1) +#define HAVE_STRCHRNUL +#endif +#endif + +#ifndef HAVE_STRCHRNUL +#define strchrnul gitstrchrnul +static inline char *gitstrchrnul(const char *s, int c) +{ + while (*s && *s != c) + s++; + return (char *)s; +} +#endif + +/* + * Wrappers: + */ +extern char *xstrdup(const char *str); +extern void *xmalloc(size_t size); +extern void *xmemdupz(const void *data, size_t len); +extern char *xstrndup(const char *str, size_t len); +extern void *xrealloc(void *ptr, size_t size); +extern void *xcalloc(size_t nmemb, size_t size); +extern void *xmmap(void *start, size_t length, int prot, int flags, int fd, off_t offset); +extern ssize_t xread(int fd, void *buf, size_t len); +extern ssize_t xwrite(int fd, const void *buf, size_t len); +extern int xdup(int fd); +extern FILE *xfdopen(int fd, const char *mode); +extern int xmkstemp(char *template); + +static inline size_t xsize_t(off_t len) +{ + return (size_t)len; +} + +static inline int has_extension(const char *filename, const char *ext) +{ + size_t len = strlen(filename); + size_t extlen = strlen(ext); + return len > extlen && !memcmp(filename + len - extlen, ext, extlen); +} + +/* Sane ctype - no locale, and works with signed chars */ +#undef isascii +#undef isspace +#undef isdigit +#undef isalpha +#undef isalnum +#undef tolower +#undef toupper +extern unsigned char sane_ctype[256]; +#define GIT_SPACE 0x01 +#define GIT_DIGIT 0x02 +#define GIT_ALPHA 0x04 +#define GIT_GLOB_SPECIAL 0x08 +#define GIT_REGEX_SPECIAL 0x10 +#define sane_istest(x,mask) ((sane_ctype[(unsigned char)(x)] & (mask)) != 0) +#define isascii(x) (((x) & ~0x7f) == 0) +#define isspace(x) sane_istest(x,GIT_SPACE) +#define isdigit(x) sane_istest(x,GIT_DIGIT) +#define isalpha(x) sane_istest(x,GIT_ALPHA) +#define isalnum(x) sane_istest(x,GIT_ALPHA | GIT_DIGIT) +#define is_glob_special(x) sane_istest(x,GIT_GLOB_SPECIAL) +#define is_regex_special(x) sane_istest(x,GIT_GLOB_SPECIAL | GIT_REGEX_SPECIAL) +#define tolower(x) sane_case((unsigned char)(x), 0x20) +#define toupper(x) sane_case((unsigned char)(x), 0) + +static inline int sane_case(int x, int high) +{ + if (sane_istest(x, GIT_ALPHA)) + x = (x & ~0x20) | high; + return x; +} + +static inline int strtoul_ui(char const *s, int base, unsigned int *result) +{ + unsigned long ul; + char *p; + + errno = 0; + ul = strtoul(s, &p, base); + if (errno || *p || p == s || (unsigned int) ul != ul) + return -1; + *result = ul; + return 0; +} + +static inline int strtol_i(char const *s, int base, int *result) +{ + long ul; + char *p; + + errno = 0; + ul = strtol(s, &p, base); + if (errno || *p || p == s || (int) ul != ul) + return -1; + *result = ul; + return 0; +} + +#ifdef INTERNAL_QSORT +void git_qsort(void *base, size_t nmemb, size_t size, + int(*compar)(const void *, const void *)); +#define qsort git_qsort +#endif + +#ifndef DIR_HAS_BSD_GROUP_SEMANTICS +# define FORCE_DIR_SET_GID S_ISGID +#else +# define FORCE_DIR_SET_GID 0 +#endif + +#ifdef NO_NSEC +#undef USE_NSEC +#define ST_CTIME_NSEC(st) 0 +#define ST_MTIME_NSEC(st) 0 +#else +#ifdef USE_ST_TIMESPEC +#define ST_CTIME_NSEC(st) ((unsigned int)((st).st_ctimespec.tv_nsec)) +#define ST_MTIME_NSEC(st) ((unsigned int)((st).st_mtimespec.tv_nsec)) +#else +#define ST_CTIME_NSEC(st) ((unsigned int)((st).st_ctim.tv_nsec)) +#define ST_MTIME_NSEC(st) ((unsigned int)((st).st_mtim.tv_nsec)) +#endif +#endif + +#endif diff --git a/tools/perf/util/wrapper.c b/tools/perf/util/wrapper.c new file mode 100644 index 00000000000..6350d65f6d9 --- /dev/null +++ b/tools/perf/util/wrapper.c @@ -0,0 +1,206 @@ +/* + * Various trivial helper wrappers around standard functions + */ +#include "cache.h" + +/* + * There's no pack memory to release - but stay close to the Git + * version so wrap this away: + */ +static inline void release_pack_memory(size_t size, int flag) +{ +} + +char *xstrdup(const char *str) +{ + char *ret = strdup(str); + if (!ret) { + release_pack_memory(strlen(str) + 1, -1); + ret = strdup(str); + if (!ret) + die("Out of memory, strdup failed"); + } + return ret; +} + +void *xmalloc(size_t size) +{ + void *ret = malloc(size); + if (!ret && !size) + ret = malloc(1); + if (!ret) { + release_pack_memory(size, -1); + ret = malloc(size); + if (!ret && !size) + ret = malloc(1); + if (!ret) + die("Out of memory, malloc failed"); + } +#ifdef XMALLOC_POISON + memset(ret, 0xA5, size); +#endif + return ret; +} + +/* + * xmemdupz() allocates (len + 1) bytes of memory, duplicates "len" bytes of + * "data" to the allocated memory, zero terminates the allocated memory, + * and returns a pointer to the allocated memory. If the allocation fails, + * the program dies. + */ +void *xmemdupz(const void *data, size_t len) +{ + char *p = xmalloc(len + 1); + memcpy(p, data, len); + p[len] = '\0'; + return p; +} + +char *xstrndup(const char *str, size_t len) +{ + char *p = memchr(str, '\0', len); + return xmemdupz(str, p ? p - str : len); +} + +void *xrealloc(void *ptr, size_t size) +{ + void *ret = realloc(ptr, size); + if (!ret && !size) + ret = realloc(ptr, 1); + if (!ret) { + release_pack_memory(size, -1); + ret = realloc(ptr, size); + if (!ret && !size) + ret = realloc(ptr, 1); + if (!ret) + die("Out of memory, realloc failed"); + } + return ret; +} + +void *xcalloc(size_t nmemb, size_t size) +{ + void *ret = calloc(nmemb, size); + if (!ret && (!nmemb || !size)) + ret = calloc(1, 1); + if (!ret) { + release_pack_memory(nmemb * size, -1); + ret = calloc(nmemb, size); + if (!ret && (!nmemb || !size)) + ret = calloc(1, 1); + if (!ret) + die("Out of memory, calloc failed"); + } + return ret; +} + +void *xmmap(void *start, size_t length, + int prot, int flags, int fd, off_t offset) +{ + void *ret = mmap(start, length, prot, flags, fd, offset); + if (ret == MAP_FAILED) { + if (!length) + return NULL; + release_pack_memory(length, fd); + ret = mmap(start, length, prot, flags, fd, offset); + if (ret == MAP_FAILED) + die("Out of memory? mmap failed: %s", strerror(errno)); + } + return ret; +} + +/* + * xread() is the same a read(), but it automatically restarts read() + * operations with a recoverable error (EAGAIN and EINTR). xread() + * DOES NOT GUARANTEE that "len" bytes is read even if the data is available. + */ +ssize_t xread(int fd, void *buf, size_t len) +{ + ssize_t nr; + while (1) { + nr = read(fd, buf, len); + if ((nr < 0) && (errno == EAGAIN || errno == EINTR)) + continue; + return nr; + } +} + +/* + * xwrite() is the same a write(), but it automatically restarts write() + * operations with a recoverable error (EAGAIN and EINTR). xwrite() DOES NOT + * GUARANTEE that "len" bytes is written even if the operation is successful. + */ +ssize_t xwrite(int fd, const void *buf, size_t len) +{ + ssize_t nr; + while (1) { + nr = write(fd, buf, len); + if ((nr < 0) && (errno == EAGAIN || errno == EINTR)) + continue; + return nr; + } +} + +ssize_t read_in_full(int fd, void *buf, size_t count) +{ + char *p = buf; + ssize_t total = 0; + + while (count > 0) { + ssize_t loaded = xread(fd, p, count); + if (loaded <= 0) + return total ? total : loaded; + count -= loaded; + p += loaded; + total += loaded; + } + + return total; +} + +ssize_t write_in_full(int fd, const void *buf, size_t count) +{ + const char *p = buf; + ssize_t total = 0; + + while (count > 0) { + ssize_t written = xwrite(fd, p, count); + if (written < 0) + return -1; + if (!written) { + errno = ENOSPC; + return -1; + } + count -= written; + p += written; + total += written; + } + + return total; +} + +int xdup(int fd) +{ + int ret = dup(fd); + if (ret < 0) + die("dup failed: %s", strerror(errno)); + return ret; +} + +FILE *xfdopen(int fd, const char *mode) +{ + FILE *stream = fdopen(fd, mode); + if (stream == NULL) + die("Out of memory? fdopen failed: %s", strerror(errno)); + return stream; +} + +int xmkstemp(char *template) +{ + int fd; + + fd = mkstemp(template); + if (fd < 0) + die("Unable to create temporary file: %s", strerror(errno)); + return fd; +} -- cgit v1.2.3 From 7d37a0cbd68c875fa984fa97bcf5c7f4b7950b6d Mon Sep 17 00:00:00 2001 From: Arjan van de Ven Date: Sat, 6 Jun 2009 20:36:38 +0200 Subject: perf_counter tools: Warning fixes on 32-bit Cc: Peter Zijlstra Cc: Mike Galbraith Cc: Paul Mackerras Cc: Arnaldo Carvalho de Melo LKML-Reference: Signed-off-by: Ingo Molnar --- tools/perf/builtin-annotate.c | 4 ++-- tools/perf/util/symbol.c | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/tools/perf/builtin-annotate.c b/tools/perf/builtin-annotate.c index 116a3978b44..4a3c279160c 100644 --- a/tools/perf/builtin-annotate.c +++ b/tools/perf/builtin-annotate.c @@ -668,9 +668,9 @@ static void hist_hit(struct hist_entry *he, uint64_t ip) if (verbose >= 3) printf("%p %s: count++ [ip: %p, %08Lx] => %Ld\n", - (void *)he->sym->start, + (void *)(unsigned long)he->sym->start, he->sym->name, - (void *)ip, ip - he->sym->start, + (void *)(unsigned long)ip, ip - he->sym->start, sym->hist[offset]); } diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c index 23f4f7b3b83..253821d1efd 100644 --- a/tools/perf/util/symbol.c +++ b/tools/perf/util/symbol.c @@ -21,7 +21,7 @@ static struct symbol *symbol__new(uint64_t start, uint64_t len, if (verbose >= 2) printf("new symbol: %016Lx [%08lx]: %s, hist: %p, obj_start: %p\n", - (__u64)start, len, name, self->hist, (void *)obj_start); + (__u64)start, (unsigned long)len, name, self->hist, (void *)(unsigned long)obj_start); self->obj_start= obj_start; self->hist = NULL; -- cgit v1.2.3 From 8953645fec933f992223286ad407dc371ac2caa5 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Sat, 6 Jun 2009 21:04:17 +0200 Subject: perf_counter tools: Fix error condition in parse_aliases() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit gcc warned about this bug: util/parse-events.c: In function ‘parse_generic_hw_symbols’: util/parse-events.c:175: warning: comparison is always false due to limited range of data type util/parse-events.c:182: warning: comparison is always false due to limited range of data type util/parse-events.c:190: warning: comparison is always false due to limited range of data type Cc: Peter Zijlstra Cc: Mike Galbraith Cc: Paul Mackerras Cc: Arnaldo Carvalho de Melo LKML-Reference: Signed-off-by: Ingo Molnar --- tools/perf/util/parse-events.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/tools/perf/util/parse-events.c b/tools/perf/util/parse-events.c index e0820b4388a..f18a9a006e1 100644 --- a/tools/perf/util/parse-events.c +++ b/tools/perf/util/parse-events.c @@ -160,12 +160,12 @@ static int parse_aliases(const char *str, char *names[][MAX_ALIASES], int size) } } - return 0; + return -1; } static int parse_generic_hw_symbols(const char *str, struct perf_counter_attr *attr) { - __u8 cache_type = -1, cache_op = 0, cache_result = 0; + int cache_type = -1, cache_op = 0, cache_result = 0; cache_type = parse_aliases(str, hw_cache, PERF_COUNT_HW_CACHE_MAX); /* @@ -179,8 +179,8 @@ static int parse_generic_hw_symbols(const char *str, struct perf_counter_attr *a /* * Fall back to reads: */ - if (cache_type == -1) - cache_type = PERF_COUNT_HW_CACHE_OP_READ; + if (cache_op == -1) + cache_op = PERF_COUNT_HW_CACHE_OP_READ; cache_result = parse_aliases(str, hw_cache_result, PERF_COUNT_HW_CACHE_RESULT_MAX); -- cgit v1.2.3 From 39273ee9756917129de3190d469b0b120f87e763 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Sat, 6 Jun 2009 21:17:03 +0200 Subject: perf annotate: Automatically pick up vmlinux in the local directory Right now kernel debug info does not get resolved by default, because we dont know where to look for the vmlinux. The -k option can be used for that - but if no option is given, pick up vmlinux files in the current directory - in case a kernel hacker runs profiling from the source directory that the kernel was built in. The real solution would be to embedd the location (and perhaps the date/timestamp) of the vmlinux file in /proc/kallsyms, so that tools can pick it up automatically. Cc: Peter Zijlstra Cc: Mike Galbraith Cc: Paul Mackerras Cc: Arnaldo Carvalho de Melo LKML-Reference: Signed-off-by: Ingo Molnar --- tools/perf/builtin-annotate.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/perf/builtin-annotate.c b/tools/perf/builtin-annotate.c index 4a3c279160c..80c5aa0bb42 100644 --- a/tools/perf/builtin-annotate.c +++ b/tools/perf/builtin-annotate.c @@ -26,7 +26,7 @@ #define SHOW_HV 4 static char const *input_name = "perf.data"; -static char *vmlinux = NULL; +static char *vmlinux = "vmlinux"; static char default_sort_order[] = "comm,symbol"; static char *sort_order = default_sort_order; -- cgit v1.2.3 From e9fbc9dc9214d6a9de7d62627be5414804fd7b9f Mon Sep 17 00:00:00 2001 From: Arjan van de Ven Date: Sat, 6 Jun 2009 21:22:33 +0200 Subject: perf_counter tools: Initialize a stack variable before use the "perf report" utility crashed in some circumstances because the "sym" stack variable was not initialized before used (as also proven by valgrind). With this fix both the crash goes away and valgrind no longer complains. Signed-off-by: Arjan van de Ven Cc: Peter Zijlstra Cc: Mike Galbraith Cc: Paul Mackerras Cc: Arnaldo Carvalho de Melo LKML-Reference: Signed-off-by: Ingo Molnar --- tools/perf/util/symbol.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c index 253821d1efd..158588c7f6b 100644 --- a/tools/perf/util/symbol.c +++ b/tools/perf/util/symbol.c @@ -457,6 +457,8 @@ static int dso__load_sym(struct dso *self, int fd, const char *name, nr_syms = shdr.sh_size / shdr.sh_entsize; + memset(&sym, 0, sizeof(sym)); + elf_symtab__for_each_symbol(syms, nr_syms, index, sym) { struct symbol *f; uint64_t obj_start; -- cgit v1.2.3 From 23b87116c7c4f73597965218b66041acbdb4e79f Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Sat, 6 Jun 2009 21:25:29 +0200 Subject: perf annotate: Fix command line help text Arjan noticed this bug in the perf annotate help output: -s, --symbol symbol to annotate that should be instead. Reported-by: Arjan van de Ven Cc: Peter Zijlstra Cc: Mike Galbraith Cc: Paul Mackerras Cc: Arnaldo Carvalho de Melo LKML-Reference: Signed-off-by: Ingo Molnar --- tools/perf/builtin-annotate.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/perf/builtin-annotate.c b/tools/perf/builtin-annotate.c index 80c5aa0bb42..0e23fe98ec4 100644 --- a/tools/perf/builtin-annotate.c +++ b/tools/perf/builtin-annotate.c @@ -1300,7 +1300,7 @@ static const char * const annotate_usage[] = { static const struct option options[] = { OPT_STRING('i', "input", &input_name, "file", "input file name"), - OPT_STRING('s', "symbol", &sym_hist_filter, "file", + OPT_STRING('s', "symbol", &sym_hist_filter, "symbol", "symbol to annotate"), OPT_BOOLEAN('v', "verbose", &verbose, "be more verbose (show symbol address, etc)"), -- cgit v1.2.3 From 2f01190aa62fe9dd0a98205927b9f09fd191c017 Mon Sep 17 00:00:00 2001 From: Frederic Weisbecker Date: Sat, 6 Jun 2009 23:10:43 +0200 Subject: perf top: Wait for a minimal set of events before reading first snapshot The first snapshot reading often occur before any events have been read in the mapped perfcounter files. Just wait until we have at least one event before starting the snapshot, or the delay before the first set of entries to be displayed may be long in case of low refresh rate. Note: we could also use a semaphore to wait before "print_entries" number of eveents is reached, but again this value is tunable and we can't ensure we will even reach it. Also we could base on a default mimimum set of entries for the first refresh, say 15, but again, the minimal sample is tunable, and we could end up displaying nothing until we have a minimal default set of events, which can take some time in case of high samples filters. Hence this simple solution which partially covers the default case. [ Impact: fix display artifacts in perf top ] Signed-off-by: Frederic Weisbecker Cc: Peter Zijlstra Cc: Mike Galbraith Cc: Paul Mackerras Cc: Corey Ashford Cc: Marcelo Tosatti Cc: Arnaldo Carvalho de Melo LKML-Reference: <1244322643-6447-1-git-send-email-fweisbec@gmail.com> Signed-off-by: Ingo Molnar --- tools/perf/builtin-top.c | 22 +++++++++++++++++----- 1 file changed, 17 insertions(+), 5 deletions(-) diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c index f2e7312f85c..fdc1d5863b0 100644 --- a/tools/perf/builtin-top.c +++ b/tools/perf/builtin-top.c @@ -420,7 +420,7 @@ static unsigned int mmap_read_head(struct mmap_data *md) struct timeval last_read, this_read; -static void mmap_read(struct mmap_data *md) +static void mmap_read_counter(struct mmap_data *md) { unsigned int head = mmap_read_head(md); unsigned int old = md->prev; @@ -517,6 +517,16 @@ static void mmap_read(struct mmap_data *md) static struct pollfd event_array[MAX_NR_CPUS * MAX_COUNTERS]; static struct mmap_data mmap_array[MAX_NR_CPUS][MAX_COUNTERS]; +static void mmap_read(void) +{ + int i, counter; + + for (i = 0; i < nr_cpus; i++) { + for (counter = 0; counter < nr_counters; counter++) + mmap_read_counter(&mmap_array[i][counter]); + } +} + static int __cmd_top(void) { struct perf_counter_attr *attr; @@ -571,6 +581,11 @@ static int __cmd_top(void) } } + /* Wait for a minimal set of events before starting the snapshot */ + poll(event_array, nr_poll, 100); + + mmap_read(); + if (pthread_create(&thread, NULL, display_thread, NULL)) { printf("Could not create display thread.\n"); exit(-1); @@ -589,10 +604,7 @@ static int __cmd_top(void) while (1) { int hits = samples; - for (i = 0; i < nr_cpus; i++) { - for (counter = 0; counter < nr_counters; counter++) - mmap_read(&mmap_array[i][counter]); - } + mmap_read(); if (hits == samples) ret = poll(event_array, nr_poll, 100); -- cgit v1.2.3 From 743ee1f80434138495bbb95ffb897acf46b51d54 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Sun, 7 Jun 2009 17:06:46 +0200 Subject: perf stat: Continue even on counter creation error Before: $ perf stat ~/hackbench 5 error: syscall returned with -1 (No such device) After: $ perf stat ~/hackbench 5 Time: 1.640 Performance counter stats for '/home/mingo/hackbench 5': 6524.570382 task-clock-ticks # 3.838 CPU utilization factor 35704 context-switches # 0.005 M/sec 191 CPU-migrations # 0.000 M/sec 8958 page-faults # 0.001 M/sec cycles instructions cache-references cache-misses Wall-clock time elapsed: 1699.999995 msecs Also add -v (--verbose) option to allow the printing of failed counter opens. Plus dont print 'inf' if wall-time is zero (due to jiffies granularity), instead skip the printing of the CPU utilization factor. Cc: Peter Zijlstra Cc: Mike Galbraith Cc: Paul Mackerras Cc: Arnaldo Carvalho de Melo LKML-Reference: Signed-off-by: Ingo Molnar --- tools/perf/builtin-stat.c | 31 +++++++++++++++++-------------- 1 file changed, 17 insertions(+), 14 deletions(-) diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c index 2cbf5a18958..184ff95ef4f 100644 --- a/tools/perf/builtin-stat.c +++ b/tools/perf/builtin-stat.c @@ -59,6 +59,7 @@ static struct perf_counter_attr default_attrs[MAX_COUNTERS] = { static int system_wide = 0; static int inherit = 1; +static int verbose = 0; static int fd[MAX_NR_CPUS][MAX_COUNTERS]; @@ -83,7 +84,7 @@ static __u64 event_scaled[MAX_COUNTERS]; static __u64 runtime_nsecs; static __u64 walltime_nsecs; -static void create_perfstat_counter(int counter) +static void create_perf_stat_counter(int counter) { struct perf_counter_attr *attr = attrs + counter; @@ -95,10 +96,8 @@ static void create_perfstat_counter(int counter) int cpu; for (cpu = 0; cpu < nr_cpus; cpu ++) { fd[cpu][counter] = sys_perf_counter_open(attr, -1, cpu, -1, 0); - if (fd[cpu][counter] < 0) { - printf("perfstat error: syscall returned with %d (%s)\n", - fd[cpu][counter], strerror(errno)); - exit(-1); + if (fd[cpu][counter] < 0 && verbose) { + printf("Error: counter %d, sys_perf_counter_open() syscall returned with %d (%s)\n", counter, fd[cpu][counter], strerror(errno)); } } } else { @@ -106,10 +105,8 @@ static void create_perfstat_counter(int counter) attr->disabled = 1; fd[0][counter] = sys_perf_counter_open(attr, 0, -1, -1, 0); - if (fd[0][counter] < 0) { - printf("perfstat error: syscall returned with %d (%s)\n", - fd[0][counter], strerror(errno)); - exit(-1); + if (fd[0][counter] < 0 && verbose) { + printf("Error: counter %d, sys_perf_counter_open() syscall returned with %d (%s)\n", counter, fd[0][counter], strerror(errno)); } } } @@ -147,6 +144,9 @@ static void read_counter(int counter) nv = scale ? 3 : 1; for (cpu = 0; cpu < nr_cpus; cpu ++) { + if (fd[cpu][counter] < 0) + continue; + res = read(fd[cpu][counter], single_count, nv * sizeof(__u64)); assert(res == nv * sizeof(__u64)); @@ -204,8 +204,9 @@ static void print_counter(int counter) if (attrs[counter].type == PERF_TYPE_SOFTWARE && attrs[counter].config == PERF_COUNT_TASK_CLOCK) { - fprintf(stderr, " # %11.3f CPU utilization factor", - (double)count[0] / (double)walltime_nsecs); + if (walltime_nsecs) + fprintf(stderr, " # %11.3f CPU utilization factor", + (double)count[0] / (double)walltime_nsecs); } } else { fprintf(stderr, " %14Ld %-20s", @@ -220,7 +221,7 @@ static void print_counter(int counter) fprintf(stderr, "\n"); } -static int do_perfstat(int argc, const char **argv) +static int do_perf_stat(int argc, const char **argv) { unsigned long long t0, t1; int counter; @@ -232,7 +233,7 @@ static int do_perfstat(int argc, const char **argv) nr_cpus = 1; for (counter = 0; counter < nr_counters; counter++) - create_perfstat_counter(counter); + create_perf_stat_counter(counter); /* * Enable counters and exec the command: @@ -305,6 +306,8 @@ static const struct option options[] = { "system-wide collection from all CPUs"), OPT_BOOLEAN('S', "scale", &scale, "scale/normalize counters"), + OPT_BOOLEAN('v', "verbose", &verbose, + "be more verbose (show counter open errors, etc)"), OPT_END() }; @@ -335,5 +338,5 @@ int cmd_stat(int argc, const char **argv, const char *prefix) signal(SIGALRM, skip_signal); signal(SIGABRT, skip_signal); - return do_perfstat(argc, argv); + return do_perf_stat(argc, argv); } -- cgit v1.2.3 From 716c69fecacd42f2a304a97158e04af2786a3f65 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Sun, 7 Jun 2009 17:31:52 +0200 Subject: perf top: Fall back to cpu-clock-tick hrtimer sampling if no cycle counter available On architectures/CPUs without PMU support but with perfcounters enabled 'perf top' currently fails because it cannot create a cycle based hw-perfcounter. Fall back to the cpu-clock-tick sw-perfcounter in this case, which is hrtimer based and will always work (as long as perfcounters is enabled). Cc: Peter Zijlstra Cc: Mike Galbraith Cc: Paul Mackerras Cc: Arnaldo Carvalho de Melo LKML-Reference: Signed-off-by: Ingo Molnar --- tools/perf/builtin-top.c | 113 ++++++++++++++++++++++++++++------------------- tools/perf/util/usage.c | 10 ++--- 2 files changed, 73 insertions(+), 50 deletions(-) diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c index fdc1d5863b0..6da30a140e8 100644 --- a/tools/perf/builtin-top.c +++ b/tools/perf/builtin-top.c @@ -527,58 +527,81 @@ static void mmap_read(void) } } -static int __cmd_top(void) +int nr_poll; +int group_fd; + +static void start_counter(int i, int counter) { struct perf_counter_attr *attr; - pthread_t thread; - int i, counter, group_fd, nr_poll = 0; unsigned int cpu; + + cpu = profile_cpu; + if (target_pid == -1 && profile_cpu == -1) + cpu = i; + + attr = attrs + counter; + + attr->sample_type = PERF_SAMPLE_IP | PERF_SAMPLE_TID; + attr->freq = freq; + +try_again: + fd[i][counter] = sys_perf_counter_open(attr, target_pid, cpu, group_fd, 0); + + if (fd[i][counter] < 0) { + int err = errno; + + error("sys_perf_counter_open() syscall returned with %d (%s)\n", + fd[i][counter], strerror(err)); + + if (err == EPERM) + die(" No permission - are you root?\n"); + /* + * If it's cycles then fall back to hrtimer + * based cpu-clock-tick sw counter, which + * is always available even if no PMU support: + */ + if (attr->type == PERF_TYPE_HARDWARE + && attr->config == PERF_COUNT_CPU_CYCLES) { + + warning(" ... trying to fall back to cpu-clock-ticks\n"); + attr->type = PERF_TYPE_SOFTWARE; + attr->config = PERF_COUNT_CPU_CLOCK; + goto try_again; + } + exit(-1); + } + assert(fd[i][counter] >= 0); + fcntl(fd[i][counter], F_SETFL, O_NONBLOCK); + + /* + * First counter acts as the group leader: + */ + if (group && group_fd == -1) + group_fd = fd[i][counter]; + + event_array[nr_poll].fd = fd[i][counter]; + event_array[nr_poll].events = POLLIN; + nr_poll++; + + mmap_array[i][counter].counter = counter; + mmap_array[i][counter].prev = 0; + mmap_array[i][counter].mask = mmap_pages*page_size - 1; + mmap_array[i][counter].base = mmap(NULL, (mmap_pages+1)*page_size, + PROT_READ, MAP_SHARED, fd[i][counter], 0); + if (mmap_array[i][counter].base == MAP_FAILED) + die("failed to mmap with %d (%s)\n", errno, strerror(errno)); +} + +static int __cmd_top(void) +{ + pthread_t thread; + int i, counter; int ret; for (i = 0; i < nr_cpus; i++) { group_fd = -1; - for (counter = 0; counter < nr_counters; counter++) { - - cpu = profile_cpu; - if (target_pid == -1 && profile_cpu == -1) - cpu = i; - - attr = attrs + counter; - - attr->sample_type = PERF_SAMPLE_IP | PERF_SAMPLE_TID; - attr->freq = freq; - - fd[i][counter] = sys_perf_counter_open(attr, target_pid, cpu, group_fd, 0); - if (fd[i][counter] < 0) { - int err = errno; - - error("syscall returned with %d (%s)\n", - fd[i][counter], strerror(err)); - if (err == EPERM) - printf("Are you root?\n"); - exit(-1); - } - assert(fd[i][counter] >= 0); - fcntl(fd[i][counter], F_SETFL, O_NONBLOCK); - - /* - * First counter acts as the group leader: - */ - if (group && group_fd == -1) - group_fd = fd[i][counter]; - - event_array[nr_poll].fd = fd[i][counter]; - event_array[nr_poll].events = POLLIN; - nr_poll++; - - mmap_array[i][counter].counter = counter; - mmap_array[i][counter].prev = 0; - mmap_array[i][counter].mask = mmap_pages*page_size - 1; - mmap_array[i][counter].base = mmap(NULL, (mmap_pages+1)*page_size, - PROT_READ, MAP_SHARED, fd[i][counter], 0); - if (mmap_array[i][counter].base == MAP_FAILED) - die("failed to mmap with %d (%s)\n", errno, strerror(errno)); - } + for (counter = 0; counter < nr_counters; counter++) + start_counter(i, counter); } /* Wait for a minimal set of events before starting the snapshot */ diff --git a/tools/perf/util/usage.c b/tools/perf/util/usage.c index 2cad286e437..e16bf9a707e 100644 --- a/tools/perf/util/usage.c +++ b/tools/perf/util/usage.c @@ -9,29 +9,29 @@ static void report(const char *prefix, const char *err, va_list params) { char msg[1024]; vsnprintf(msg, sizeof(msg), err, params); - fprintf(stderr, "%s%s\n", prefix, msg); + fprintf(stderr, " %s%s\n", prefix, msg); } static NORETURN void usage_builtin(const char *err) { - fprintf(stderr, "\n usage: %s\n", err); + fprintf(stderr, "\n Usage: %s\n", err); exit(129); } static NORETURN void die_builtin(const char *err, va_list params) { - report("fatal: ", err, params); + report(" Fatal: ", err, params); exit(128); } static void error_builtin(const char *err, va_list params) { - report("error: ", err, params); + report(" Error: ", err, params); } static void warn_builtin(const char *warn, va_list params) { - report("warning: ", warn, params); + report(" Warning: ", warn, params); } /* If we are in a dlopen()ed .so write to a global variable would segfault -- cgit v1.2.3 From 3da297a60f7e8840f79f7d0b343af078890939ea Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Sun, 7 Jun 2009 17:39:02 +0200 Subject: perf record: Fall back to cpu-clock-ticks if no PMU On architectures/CPUs without PMU support but with perfcounters enabled 'perf record' currently fails because it cannot create a cycle based hw-perfcounter. Fall back to the cpu-clock-tick sw-perfcounter in this case, which is hrtimer based and will always work (as long as perfcounters are enabled). Cc: Peter Zijlstra Cc: Mike Galbraith Cc: Paul Mackerras Cc: Arnaldo Carvalho de Melo LKML-Reference: Signed-off-by: Ingo Molnar --- tools/perf/builtin-record.c | 25 +++++++++++++++++++++++-- tools/perf/builtin-top.c | 14 ++++++++++---- 2 files changed, 33 insertions(+), 6 deletions(-) diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c index aeab9c4b15e..87866294a0e 100644 --- a/tools/perf/builtin-record.c +++ b/tools/perf/builtin-record.c @@ -37,6 +37,7 @@ static pid_t target_pid = -1; static int inherit = 1; static int force = 0; static int append_file = 0; +static int verbose = 0; static long samples; static struct timeval last_read; @@ -349,17 +350,35 @@ static void create_counter(int counter, int cpu, pid_t pid) track = 0; /* only the first counter needs these */ +try_again: fd[nr_cpu][counter] = sys_perf_counter_open(attr, pid, cpu, group_fd, 0); if (fd[nr_cpu][counter] < 0) { int err = errno; - error("syscall returned with %d (%s)\n", + if (verbose) + error("sys_perf_counter_open() syscall returned with %d (%s)\n", fd[nr_cpu][counter], strerror(err)); if (err == EPERM) - printf("Are you root?\n"); + die("Permission error - are you root?\n"); + + /* + * If it's cycles then fall back to hrtimer + * based cpu-clock-tick sw counter, which + * is always available even if no PMU support: + */ + if (attr->type == PERF_TYPE_HARDWARE + && attr->config == PERF_COUNT_CPU_CYCLES) { + + if (verbose) + warning(" ... trying to fall back to cpu-clock-ticks\n"); + attr->type = PERF_TYPE_SOFTWARE; + attr->config = PERF_COUNT_CPU_CLOCK; + goto try_again; + } exit(-1); } + assert(fd[nr_cpu][counter] >= 0); fcntl(fd[nr_cpu][counter], F_SETFL, O_NONBLOCK); @@ -519,6 +538,8 @@ static const struct option options[] = { "profile at this frequency"), OPT_INTEGER('m', "mmap-pages", &mmap_pages, "number of mmap data pages"), + OPT_BOOLEAN('v', "verbose", &verbose, + "be more verbose (show counter open errors, etc)"), OPT_END() }; diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c index 6da30a140e8..1f8c97d5c32 100644 --- a/tools/perf/builtin-top.c +++ b/tools/perf/builtin-top.c @@ -65,6 +65,7 @@ static int group = 0; static unsigned int page_size; static unsigned int mmap_pages = 16; static int freq = 0; +static int verbose = 0; static char *sym_filter; static unsigned long filter_start; @@ -550,11 +551,12 @@ try_again: if (fd[i][counter] < 0) { int err = errno; - error("sys_perf_counter_open() syscall returned with %d (%s)\n", - fd[i][counter], strerror(err)); + if (verbose) + error("sys_perf_counter_open() syscall returned with %d (%s)\n", + fd[i][counter], strerror(err)); if (err == EPERM) - die(" No permission - are you root?\n"); + die("No permission - are you root?\n"); /* * If it's cycles then fall back to hrtimer * based cpu-clock-tick sw counter, which @@ -563,7 +565,9 @@ try_again: if (attr->type == PERF_TYPE_HARDWARE && attr->config == PERF_COUNT_CPU_CYCLES) { - warning(" ... trying to fall back to cpu-clock-ticks\n"); + if (verbose) + warning(" ... trying to fall back to cpu-clock-ticks\n"); + attr->type = PERF_TYPE_SOFTWARE; attr->config = PERF_COUNT_CPU_CLOCK; goto try_again; @@ -673,6 +677,8 @@ static const struct option options[] = { "profile at this frequency"), OPT_INTEGER('E', "entries", &print_entries, "display this many functions"), + OPT_BOOLEAN('v', "verbose", &verbose, + "be more verbose (show counter open errors, etc)"), OPT_END() }; -- cgit v1.2.3 From 30c806a094493beb7691bc7957dfa02dee96230a Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Sun, 7 Jun 2009 17:46:24 +0200 Subject: perf_counter tools: Handle kernels with !CONFIG_PERF_COUNTER If perf is run on a !CONFIG_PERF_COUNTER kernel right now it bails out with no messages or with confusing messages. Standardize this case some more and explain the situation. Cc: Peter Zijlstra Cc: Mike Galbraith Cc: Paul Mackerras Cc: Arnaldo Carvalho de Melo LKML-Reference: Signed-off-by: Ingo Molnar --- tools/perf/builtin-record.c | 7 ++++--- tools/perf/builtin-top.c | 8 ++++---- 2 files changed, 8 insertions(+), 7 deletions(-) diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c index 87866294a0e..deaee42d5eb 100644 --- a/tools/perf/builtin-record.c +++ b/tools/perf/builtin-record.c @@ -356,9 +356,6 @@ try_again: if (fd[nr_cpu][counter] < 0) { int err = errno; - if (verbose) - error("sys_perf_counter_open() syscall returned with %d (%s)\n", - fd[nr_cpu][counter], strerror(err)); if (err == EPERM) die("Permission error - are you root?\n"); @@ -376,6 +373,10 @@ try_again: attr->config = PERF_COUNT_CPU_CLOCK; goto try_again; } + printf("\n"); + error("perfcounter syscall returned with %d (%s)\n", + fd[nr_cpu][counter], strerror(err)); + die("No CONFIG_PERF_COUNTERS=y kernel support configured?\n"); exit(-1); } diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c index 1f8c97d5c32..be1698f1189 100644 --- a/tools/perf/builtin-top.c +++ b/tools/perf/builtin-top.c @@ -551,10 +551,6 @@ try_again: if (fd[i][counter] < 0) { int err = errno; - if (verbose) - error("sys_perf_counter_open() syscall returned with %d (%s)\n", - fd[i][counter], strerror(err)); - if (err == EPERM) die("No permission - are you root?\n"); /* @@ -572,6 +568,10 @@ try_again: attr->config = PERF_COUNT_CPU_CLOCK; goto try_again; } + printf("\n"); + error("perfcounter syscall returned with %d (%s)\n", + fd[i][counter], strerror(err)); + die("No CONFIG_PERF_COUNTERS=y kernel support configured?\n"); exit(-1); } assert(fd[i][counter] >= 0); -- cgit v1.2.3 From a14832ff977e78d1982cdf78cdabb1f2320d9ac8 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Sun, 7 Jun 2009 17:58:23 +0200 Subject: perf report: Print more expressive message in case of file open error Before: $ perf report failed to open file: No such file or directory After: $ perf report failed to open file: perf.data (try 'perf record' first) Cc: Peter Zijlstra Cc: Mike Galbraith Cc: Paul Mackerras Cc: Arnaldo Carvalho de Melo LKML-Reference: Signed-off-by: Ingo Molnar --- tools/perf/builtin-report.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/tools/perf/builtin-report.c b/tools/perf/builtin-report.c index 242e09ff365..f053a7463dc 100644 --- a/tools/perf/builtin-report.c +++ b/tools/perf/builtin-report.c @@ -1120,7 +1120,10 @@ static int __cmd_report(void) input = open(input_name, O_RDONLY); if (input < 0) { - perror("failed to open file"); + fprintf(stderr, " failed to open file: %s", input_name); + if (!strcmp(input_name, "perf.data")) + fprintf(stderr, " (try 'perf record' first)"); + fprintf(stderr, "\n"); exit(-1); } -- cgit v1.2.3 From e779898aa74cd2e97216368b3f3689ceffe8aeed Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Sun, 7 Jun 2009 18:14:46 +0200 Subject: perf stat: Print out instructins/cycle metric Before: 7549326754 cycles # 3201.811 M/sec 10007594937 instructions # 4244.408 M/sec After: 7542051194 cycles # 3201.996 M/sec 10007743852 instructions # 4248.811 M/sec # 1.327 per cycle Cc: Peter Zijlstra Cc: Mike Galbraith Cc: Paul Mackerras Cc: Arnaldo Carvalho de Melo LKML-Reference: Signed-off-by: Ingo Molnar --- tools/perf/builtin-stat.c | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c index 184ff95ef4f..80855090fd9 100644 --- a/tools/perf/builtin-stat.c +++ b/tools/perf/builtin-stat.c @@ -83,6 +83,7 @@ static __u64 event_scaled[MAX_COUNTERS]; static __u64 runtime_nsecs; static __u64 walltime_nsecs; +static __u64 runtime_cycles; static void create_perf_stat_counter(int counter) { @@ -177,6 +178,9 @@ static void read_counter(int counter) if (attrs[counter].type == PERF_TYPE_SOFTWARE && attrs[counter].config == PERF_COUNT_TASK_CLOCK) runtime_nsecs = count[0]; + if (attrs[counter].type == PERF_TYPE_HARDWARE && + attrs[counter].config == PERF_COUNT_CPU_CYCLES) + runtime_cycles = count[0]; } /* @@ -214,6 +218,13 @@ static void print_counter(int counter) if (runtime_nsecs) fprintf(stderr, " # %11.3f M/sec", (double)count[0]/runtime_nsecs*1000.0); + if (runtime_cycles && + attrs[counter].type == PERF_TYPE_HARDWARE && + attrs[counter].config == PERF_COUNT_INSTRUCTIONS) { + + fprintf(stderr, " # %1.3f per cycle", + (double)count[0] / (double)runtime_cycles); + } } if (scaled) fprintf(stderr, " (scaled from %.2f%%)", -- cgit v1.2.3 From 0312af84164215a452f2a94957ebd9bce86e0204 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Mon, 8 Jun 2009 07:42:04 +0200 Subject: perf_counter, x86: Implement generalized cache event types, add Core2 support Fill in core2_hw_cache_event_id[] with the Core2 model specific events. The events can be used in all the tools via the -e (--event) parameter, for example "-e l1-misses" or -"-e l2-accesses" or "-e l2-write-misses". Signed-off-by: Thomas Gleixner Cc: Peter Zijlstra Cc: Mike Galbraith Cc: Paul Mackerras Cc: Arnaldo Carvalho de Melo LKML-Reference: Signed-off-by: Ingo Molnar --- arch/x86/kernel/cpu/perf_counter.c | 85 +++++++++++++++++++++++++++++++++++++- 1 file changed, 84 insertions(+), 1 deletion(-) diff --git a/arch/x86/kernel/cpu/perf_counter.c b/arch/x86/kernel/cpu/perf_counter.c index e86679fa521..b1f71ff5025 100644 --- a/arch/x86/kernel/cpu/perf_counter.c +++ b/arch/x86/kernel/cpu/perf_counter.c @@ -194,7 +194,90 @@ static const u64 core2_hw_cache_event_ids [PERF_COUNT_HW_CACHE_OP_MAX] [PERF_COUNT_HW_CACHE_RESULT_MAX] = { - /* To be filled in */ + [ C(L1D) ] = { + [ C(OP_READ) ] = { + [ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI */ + [ C(RESULT_MISS) ] = 0x0140, /* L1D_CACHE_LD.I_STATE */ + }, + [ C(OP_WRITE) ] = { + [ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI */ + [ C(RESULT_MISS) ] = 0x0141, /* L1D_CACHE_ST.I_STATE */ + }, + [ C(OP_PREFETCH) ] = { + [ C(RESULT_ACCESS) ] = 0x104e, /* L1D_PREFETCH.REQUESTS */ + [ C(RESULT_MISS) ] = 0, + }, + }, + [ C(L1I ) ] = { + [ C(OP_READ) ] = { + [ C(RESULT_ACCESS) ] = 0x0080, /* L1I.READS */ + [ C(RESULT_MISS) ] = 0x0081, /* L1I.MISSES */ + }, + [ C(OP_WRITE) ] = { + [ C(RESULT_ACCESS) ] = -1, + [ C(RESULT_MISS) ] = -1, + }, + [ C(OP_PREFETCH) ] = { + [ C(RESULT_ACCESS) ] = 0, + [ C(RESULT_MISS) ] = 0, + }, + }, + [ C(L2 ) ] = { + [ C(OP_READ) ] = { + [ C(RESULT_ACCESS) ] = 0x4f29, /* L2_LD.MESI */ + [ C(RESULT_MISS) ] = 0x4129, /* L2_LD.ISTATE */ + }, + [ C(OP_WRITE) ] = { + [ C(RESULT_ACCESS) ] = 0x4f2A, /* L2_ST.MESI */ + [ C(RESULT_MISS) ] = 0x412A, /* L2_ST.ISTATE */ + }, + [ C(OP_PREFETCH) ] = { + [ C(RESULT_ACCESS) ] = 0, + [ C(RESULT_MISS) ] = 0, + }, + }, + [ C(DTLB) ] = { + [ C(OP_READ) ] = { + [ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI (alias) */ + [ C(RESULT_MISS) ] = 0x0208, /* DTLB_MISSES.MISS_LD */ + }, + [ C(OP_WRITE) ] = { + [ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI (alias) */ + [ C(RESULT_MISS) ] = 0x0808, /* DTLB_MISSES.MISS_ST */ + }, + [ C(OP_PREFETCH) ] = { + [ C(RESULT_ACCESS) ] = 0, + [ C(RESULT_MISS) ] = 0, + }, + }, + [ C(ITLB) ] = { + [ C(OP_READ) ] = { + [ C(RESULT_ACCESS) ] = 0x00c0, /* INST_RETIRED.ANY_P */ + [ C(RESULT_MISS) ] = 0x1282, /* ITLBMISSES */ + }, + [ C(OP_WRITE) ] = { + [ C(RESULT_ACCESS) ] = -1, + [ C(RESULT_MISS) ] = -1, + }, + [ C(OP_PREFETCH) ] = { + [ C(RESULT_ACCESS) ] = -1, + [ C(RESULT_MISS) ] = -1, + }, + }, + [ C(BPU ) ] = { + [ C(OP_READ) ] = { + [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ANY */ + [ C(RESULT_MISS) ] = 0x00c5, /* BP_INST_RETIRED.MISPRED */ + }, + [ C(OP_WRITE) ] = { + [ C(RESULT_ACCESS) ] = -1, + [ C(RESULT_MISS) ] = -1, + }, + [ C(OP_PREFETCH) ] = { + [ C(RESULT_ACCESS) ] = -1, + [ C(RESULT_MISS) ] = -1, + }, + }, }; static const u64 atom_hw_cache_event_ids -- cgit v1.2.3 From ad689220614b6c7c0b13b70d742f358e9310e71e Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Mon, 8 Jun 2009 09:30:41 +0200 Subject: perf_counter, x86: Implement generalized cache event types, add Atom support Fill in core2_hw_cache_event_id[] with the Atom model specific events. The events can be used in all the tools via the -e (--event) parameter, for example "-e l1-misses" or -"-e l2-accesses" or "-e l2-write-misses". ( Note: these are straight from the Intel manuals - not tested yet.) Signed-off-by: Thomas Gleixner Cc: Peter Zijlstra Cc: Mike Galbraith Cc: Paul Mackerras Cc: Arnaldo Carvalho de Melo LKML-Reference: Signed-off-by: Ingo Molnar --- arch/x86/kernel/cpu/perf_counter.c | 85 +++++++++++++++++++++++++++++++++++++- 1 file changed, 84 insertions(+), 1 deletion(-) diff --git a/arch/x86/kernel/cpu/perf_counter.c b/arch/x86/kernel/cpu/perf_counter.c index b1f71ff5025..71590e09d16 100644 --- a/arch/x86/kernel/cpu/perf_counter.c +++ b/arch/x86/kernel/cpu/perf_counter.c @@ -285,7 +285,90 @@ static const u64 atom_hw_cache_event_ids [PERF_COUNT_HW_CACHE_OP_MAX] [PERF_COUNT_HW_CACHE_RESULT_MAX] = { - /* To be filled in */ + [ C(L1D) ] = { + [ C(OP_READ) ] = { + [ C(RESULT_ACCESS) ] = 0x2140, /* L1D_CACHE.LD */ + [ C(RESULT_MISS) ] = 0, + }, + [ C(OP_WRITE) ] = { + [ C(RESULT_ACCESS) ] = 0x2241, /* L1D_CACHE.ST */ + [ C(RESULT_MISS) ] = 0, + }, + [ C(OP_PREFETCH) ] = { + [ C(RESULT_ACCESS) ] = 0x0, + [ C(RESULT_MISS) ] = 0, + }, + }, + [ C(L1I ) ] = { + [ C(OP_READ) ] = { + [ C(RESULT_ACCESS) ] = 0x0080, /* L1I.READS */ + [ C(RESULT_MISS) ] = 0x0081, /* L1I.MISSES */ + }, + [ C(OP_WRITE) ] = { + [ C(RESULT_ACCESS) ] = -1, + [ C(RESULT_MISS) ] = -1, + }, + [ C(OP_PREFETCH) ] = { + [ C(RESULT_ACCESS) ] = 0, + [ C(RESULT_MISS) ] = 0, + }, + }, + [ C(L2 ) ] = { + [ C(OP_READ) ] = { + [ C(RESULT_ACCESS) ] = 0x4f29, /* L2_LD.MESI */ + [ C(RESULT_MISS) ] = 0x4129, /* L2_LD.ISTATE */ + }, + [ C(OP_WRITE) ] = { + [ C(RESULT_ACCESS) ] = 0x4f2A, /* L2_ST.MESI */ + [ C(RESULT_MISS) ] = 0x412A, /* L2_ST.ISTATE */ + }, + [ C(OP_PREFETCH) ] = { + [ C(RESULT_ACCESS) ] = 0, + [ C(RESULT_MISS) ] = 0, + }, + }, + [ C(DTLB) ] = { + [ C(OP_READ) ] = { + [ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI (alias) */ + [ C(RESULT_MISS) ] = 0x0508, /* DTLB_MISSES.MISS_LD */ + }, + [ C(OP_WRITE) ] = { + [ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI (alias) */ + [ C(RESULT_MISS) ] = 0x0608, /* DTLB_MISSES.MISS_ST */ + }, + [ C(OP_PREFETCH) ] = { + [ C(RESULT_ACCESS) ] = 0, + [ C(RESULT_MISS) ] = 0, + }, + }, + [ C(ITLB) ] = { + [ C(OP_READ) ] = { + [ C(RESULT_ACCESS) ] = 0x00c0, /* INST_RETIRED.ANY_P */ + [ C(RESULT_MISS) ] = 0x0282, /* ITLB.MISSES */ + }, + [ C(OP_WRITE) ] = { + [ C(RESULT_ACCESS) ] = -1, + [ C(RESULT_MISS) ] = -1, + }, + [ C(OP_PREFETCH) ] = { + [ C(RESULT_ACCESS) ] = -1, + [ C(RESULT_MISS) ] = -1, + }, + }, + [ C(BPU ) ] = { + [ C(OP_READ) ] = { + [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ANY */ + [ C(RESULT_MISS) ] = 0x00c5, /* BP_INST_RETIRED.MISPRED */ + }, + [ C(OP_WRITE) ] = { + [ C(RESULT_ACCESS) ] = -1, + [ C(RESULT_MISS) ] = -1, + }, + [ C(OP_PREFETCH) ] = { + [ C(RESULT_ACCESS) ] = -1, + [ C(RESULT_MISS) ] = -1, + }, + }, }; static u64 intel_pmu_raw_event(u64 event) -- cgit v1.2.3 From 1123e3ad73697d64ad99f0104bbe49f8b52d7d65 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Fri, 29 May 2009 11:25:09 +0200 Subject: perf_counter: Clean up x86 boot messages Standardize and tidy up all the messages we print during perfcounter initialization. Cc: Peter Zijlstra Cc: Mike Galbraith Cc: Paul Mackerras Cc: Arnaldo Carvalho de Melo LKML-Reference: Signed-off-by: Ingo Molnar --- arch/x86/kernel/cpu/perf_counter.c | 46 ++++++++++++++++++++------------------ 1 file changed, 24 insertions(+), 22 deletions(-) diff --git a/arch/x86/kernel/cpu/perf_counter.c b/arch/x86/kernel/cpu/perf_counter.c index 71590e09d16..0339d195a3f 100644 --- a/arch/x86/kernel/cpu/perf_counter.c +++ b/arch/x86/kernel/cpu/perf_counter.c @@ -1298,23 +1298,22 @@ static int intel_pmu_init(void) if (version < 2) return -ENODEV; - x86_pmu = intel_pmu; - x86_pmu.version = version; - x86_pmu.num_counters = eax.split.num_counters; + x86_pmu = intel_pmu; + x86_pmu.version = version; + x86_pmu.num_counters = eax.split.num_counters; + x86_pmu.counter_bits = eax.split.bit_width; + x86_pmu.counter_mask = (1ULL << eax.split.bit_width) - 1; /* * Quirk: v2 perfmon does not report fixed-purpose counters, so * assume at least 3 counters: */ - x86_pmu.num_counters_fixed = max((int)edx.split.num_counters_fixed, 3); - - x86_pmu.counter_bits = eax.split.bit_width; - x86_pmu.counter_mask = (1ULL << eax.split.bit_width) - 1; + x86_pmu.num_counters_fixed = max((int)edx.split.num_counters_fixed, 3); rdmsrl(MSR_CORE_PERF_GLOBAL_CTRL, x86_pmu.intel_ctrl); /* - * Nehalem: + * Install the hw-cache-events table: */ switch (boot_cpu_data.x86_model) { case 17: @@ -1322,7 +1321,7 @@ static int intel_pmu_init(void) sizeof(u64)*PERF_COUNT_HW_CACHE_MAX* PERF_COUNT_HW_CACHE_OP_MAX*PERF_COUNT_HW_CACHE_RESULT_MAX); - pr_info("... installed Core2 event tables\n"); + pr_cont("Core2 events, "); break; default: case 26: @@ -1330,14 +1329,14 @@ static int intel_pmu_init(void) sizeof(u64)*PERF_COUNT_HW_CACHE_MAX* PERF_COUNT_HW_CACHE_OP_MAX*PERF_COUNT_HW_CACHE_RESULT_MAX); - pr_info("... installed Nehalem/Corei7 event tables\n"); + pr_cont("Nehalem/Corei7 events, "); break; case 28: memcpy(hw_cache_event_ids, atom_hw_cache_event_ids, sizeof(u64)*PERF_COUNT_HW_CACHE_MAX* PERF_COUNT_HW_CACHE_OP_MAX*PERF_COUNT_HW_CACHE_RESULT_MAX); - pr_info("... installed Atom event tables\n"); + pr_cont("Atom events, "); break; } return 0; @@ -1353,6 +1352,8 @@ void __init init_hw_perf_counters(void) { int err; + pr_info("Performance Counters: "); + switch (boot_cpu_data.x86_vendor) { case X86_VENDOR_INTEL: err = intel_pmu_init(); @@ -1363,14 +1364,13 @@ void __init init_hw_perf_counters(void) default: return; } - if (err != 0) + if (err != 0) { + pr_cont("no PMU driver, software counters only.\n"); return; + } - pr_info("%s Performance Monitoring support detected.\n", x86_pmu.name); - pr_info("... version: %d\n", x86_pmu.version); - pr_info("... bit width: %d\n", x86_pmu.counter_bits); + pr_cont("%s PMU driver.\n", x86_pmu.name); - pr_info("... num counters: %d\n", x86_pmu.num_counters); if (x86_pmu.num_counters > X86_PMC_MAX_GENERIC) { x86_pmu.num_counters = X86_PMC_MAX_GENERIC; WARN(1, KERN_ERR "hw perf counters %d > max(%d), clipping!", @@ -1379,23 +1379,25 @@ void __init init_hw_perf_counters(void) perf_counter_mask = (1 << x86_pmu.num_counters) - 1; perf_max_counters = x86_pmu.num_counters; - pr_info("... value mask: %016Lx\n", x86_pmu.counter_mask); - pr_info("... max period: %016Lx\n", x86_pmu.max_period); - if (x86_pmu.num_counters_fixed > X86_PMC_MAX_FIXED) { x86_pmu.num_counters_fixed = X86_PMC_MAX_FIXED; WARN(1, KERN_ERR "hw perf counters fixed %d > max(%d), clipping!", x86_pmu.num_counters_fixed, X86_PMC_MAX_FIXED); } - pr_info("... fixed counters: %d\n", x86_pmu.num_counters_fixed); perf_counter_mask |= ((1LL << x86_pmu.num_counters_fixed)-1) << X86_PMC_IDX_FIXED; - pr_info("... counter mask: %016Lx\n", perf_counter_mask); - perf_counters_lapic_init(); register_die_notifier(&perf_counter_nmi_notifier); + + pr_info("... version: %d\n", x86_pmu.version); + pr_info("... bit width: %d\n", x86_pmu.counter_bits); + pr_info("... generic counters: %d\n", x86_pmu.num_counters); + pr_info("... value mask: %016Lx\n", x86_pmu.counter_mask); + pr_info("... max period: %016Lx\n", x86_pmu.max_period); + pr_info("... fixed-purpose counters: %d\n", x86_pmu.num_counters_fixed); + pr_info("... counter mask: %016Lx\n", perf_counter_mask); } static inline void x86_pmu_read(struct perf_counter *counter) -- cgit v1.2.3 From f86748e91a14bd6cc49477560f33ed5d59896e89 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Mon, 8 Jun 2009 22:33:10 +0200 Subject: perf_counter, x86: Implement generalized cache event types, add AMD support Fill in amd_hw_cache_event_id[] with the AMD CPU specific events, for family 0x0f, 0x10 and 0x11. There's apparently no distinction between load and store events, so we only fill in the load events. Signed-off-by: Thomas Gleixner Cc: Peter Zijlstra Cc: Mike Galbraith Cc: Paul Mackerras Cc: Arnaldo Carvalho de Melo LKML-Reference: Signed-off-by: Ingo Molnar --- arch/x86/kernel/cpu/perf_counter.c | 102 +++++++++++++++++++++++++++++++++++++ 1 file changed, 102 insertions(+) diff --git a/arch/x86/kernel/cpu/perf_counter.c b/arch/x86/kernel/cpu/perf_counter.c index 0339d195a3f..93af821ebe5 100644 --- a/arch/x86/kernel/cpu/perf_counter.c +++ b/arch/x86/kernel/cpu/perf_counter.c @@ -389,6 +389,97 @@ static u64 intel_pmu_raw_event(u64 event) return event & CORE_EVNTSEL_MASK; } +static const u64 amd_0f_hw_cache_event_ids + [PERF_COUNT_HW_CACHE_MAX] + [PERF_COUNT_HW_CACHE_OP_MAX] + [PERF_COUNT_HW_CACHE_RESULT_MAX] = +{ + [ C(L1D) ] = { + [ C(OP_READ) ] = { + [ C(RESULT_ACCESS) ] = 0, + [ C(RESULT_MISS) ] = 0, + }, + [ C(OP_WRITE) ] = { + [ C(RESULT_ACCESS) ] = 0, + [ C(RESULT_MISS) ] = 0, + }, + [ C(OP_PREFETCH) ] = { + [ C(RESULT_ACCESS) ] = 0, + [ C(RESULT_MISS) ] = 0, + }, + }, + [ C(L1I ) ] = { + [ C(OP_READ) ] = { + [ C(RESULT_ACCESS) ] = 0x0080, /* Instruction cache fetches */ + [ C(RESULT_MISS) ] = 0x0081, /* Instruction cache misses */ + }, + [ C(OP_WRITE) ] = { + [ C(RESULT_ACCESS) ] = -1, + [ C(RESULT_MISS) ] = -1, + }, + [ C(OP_PREFETCH) ] = { + [ C(RESULT_ACCESS) ] = 0, + [ C(RESULT_MISS) ] = 0, + }, + }, + [ C(L2 ) ] = { + [ C(OP_READ) ] = { + [ C(RESULT_ACCESS) ] = 0, + [ C(RESULT_MISS) ] = 0, + }, + [ C(OP_WRITE) ] = { + [ C(RESULT_ACCESS) ] = 0, + [ C(RESULT_MISS) ] = 0, + }, + [ C(OP_PREFETCH) ] = { + [ C(RESULT_ACCESS) ] = 0, + [ C(RESULT_MISS) ] = 0, + }, + }, + [ C(DTLB) ] = { + [ C(OP_READ) ] = { + [ C(RESULT_ACCESS) ] = 0, + [ C(RESULT_MISS) ] = 0, + }, + [ C(OP_WRITE) ] = { + [ C(RESULT_ACCESS) ] = 0, + [ C(RESULT_MISS) ] = 0, + }, + [ C(OP_PREFETCH) ] = { + [ C(RESULT_ACCESS) ] = 0, + [ C(RESULT_MISS) ] = 0, + }, + }, + [ C(ITLB) ] = { + [ C(OP_READ) ] = { + [ C(RESULT_ACCESS) ] = 0x0080, /* Instruction fecthes */ + [ C(RESULT_MISS) ] = 0x0085, /* Instr. fetch ITLB misses */ + }, + [ C(OP_WRITE) ] = { + [ C(RESULT_ACCESS) ] = -1, + [ C(RESULT_MISS) ] = -1, + }, + [ C(OP_PREFETCH) ] = { + [ C(RESULT_ACCESS) ] = -1, + [ C(RESULT_MISS) ] = -1, + }, + }, + [ C(BPU ) ] = { + [ C(OP_READ) ] = { + [ C(RESULT_ACCESS) ] = 0x00c2, /* Retired Branch Instr. */ + [ C(RESULT_MISS) ] = 0x00c3, /* Retired Mispredicted BI */ + }, + [ C(OP_WRITE) ] = { + [ C(RESULT_ACCESS) ] = -1, + [ C(RESULT_MISS) ] = -1, + }, + [ C(OP_PREFETCH) ] = { + [ C(RESULT_ACCESS) ] = -1, + [ C(RESULT_MISS) ] = -1, + }, + }, +}; + /* * AMD Performance Monitor K7 and later. */ @@ -1345,6 +1436,17 @@ static int intel_pmu_init(void) static int amd_pmu_init(void) { x86_pmu = amd_pmu; + + switch (boot_cpu_data.x86) { + case 0x0f: + case 0x10: + case 0x11: + memcpy(hw_cache_event_ids, amd_0f_hw_cache_event_ids, + sizeof(hw_cache_event_ids)); + + pr_cont("AMD Family 0f/10/11 events, "); + break; + } return 0; } -- cgit v1.2.3 From 820a644211bc1ac7715333abdb0f0b9ea4fbb549 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Mon, 8 Jun 2009 19:10:25 +0200 Subject: perf_counter, x86: Clean up hw_cache_event ids copies Signed-off-by: Thomas Gleixner Signed-off-by: Ingo Molnar --- arch/x86/kernel/cpu/perf_counter.c | 9 +++------ 1 file changed, 3 insertions(+), 6 deletions(-) diff --git a/arch/x86/kernel/cpu/perf_counter.c b/arch/x86/kernel/cpu/perf_counter.c index 93af821ebe5..56001feeffc 100644 --- a/arch/x86/kernel/cpu/perf_counter.c +++ b/arch/x86/kernel/cpu/perf_counter.c @@ -1409,23 +1409,20 @@ static int intel_pmu_init(void) switch (boot_cpu_data.x86_model) { case 17: memcpy(hw_cache_event_ids, core2_hw_cache_event_ids, - sizeof(u64)*PERF_COUNT_HW_CACHE_MAX* - PERF_COUNT_HW_CACHE_OP_MAX*PERF_COUNT_HW_CACHE_RESULT_MAX); + sizeof(hw_cache_event_ids)); pr_cont("Core2 events, "); break; default: case 26: memcpy(hw_cache_event_ids, nehalem_hw_cache_event_ids, - sizeof(u64)*PERF_COUNT_HW_CACHE_MAX* - PERF_COUNT_HW_CACHE_OP_MAX*PERF_COUNT_HW_CACHE_RESULT_MAX); + sizeof(hw_cache_event_ids)); pr_cont("Nehalem/Corei7 events, "); break; case 28: memcpy(hw_cache_event_ids, atom_hw_cache_event_ids, - sizeof(u64)*PERF_COUNT_HW_CACHE_MAX* - PERF_COUNT_HW_CACHE_OP_MAX*PERF_COUNT_HW_CACHE_RESULT_MAX); + sizeof(hw_cache_event_ids)); pr_cont("Atom events, "); break; -- cgit v1.2.3 From dab5855b12411334355ba21349a06700e4ae7a3b Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Mon, 8 Jun 2009 21:11:57 +0300 Subject: perf_counter: Add mmap event hooks to mprotect() Some JIT compilers allocate memory for generated code with posix_memalign() + mprotect() so we need to hook into mprotect() to make sure 'perf' is aware that we're executing code in anonymous memory. [ penberg@cs.helsinki.fi: move the hook to sys_mprotect() ] Cc: Arnaldo Carvalho de Melo Signed-off-by: Pekka Enberg LKML-Reference: Signed-off-by: Ingo Molnar --- mm/mprotect.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/mm/mprotect.c b/mm/mprotect.c index 258197b76fb..d80311baeb2 100644 --- a/mm/mprotect.c +++ b/mm/mprotect.c @@ -23,6 +23,7 @@ #include #include #include +#include #include #include #include @@ -299,6 +300,7 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len, error = mprotect_fixup(vma, &prev, nstart, tmp, newflags); if (error) goto out; + perf_counter_mmap(vma); nstart = tmp; if (nstart < prev->vm_end) -- cgit v1.2.3 From 80d496be89ed7dede5abee5c057634e80a31c82d Mon Sep 17 00:00:00 2001 From: Pekka Enberg Date: Mon, 8 Jun 2009 21:12:48 +0300 Subject: perf report: Add support for profiling JIT generated code This patch adds support for profiling JIT generated code to 'perf report'. A JIT compiler is required to generate a "/tmp/perf-$PID.map" symbols map that is parsed when looking and displaying symbols. Thanks to Peter Zijlstra for his help with this patch! Example "perf report" output with the Jato JIT: # # (40311 samples) # # Overhead Command Shared Object Symbol # ........ ................ ......................... ...... # 97.80% jato /tmp/perf-11915.map [.] Fibonacci.fib(I)I 0.56% jato 00000000b7fa023b 0x000000b7fa023b 0.45% jato /tmp/perf-11915.map [.] Fibonacci.main([Ljava/lang/String;)V 0.38% jato [kernel] [k] get_page_from_freelist 0.06% jato [kernel] [k] kunmap_atomic 0.05% jato ./jato [.] utf8Hash 0.04% jato ./jato [.] executeJava 0.04% jato ./jato [.] defineClass Cc: Peter Zijlstra Cc: Arnaldo Carvalho de Melo Signed-off-by: Pekka Enberg Cc: a.p.zijlstra@chello.nl Cc: acme@redhat.com LKML-Reference: Signed-off-by: Ingo Molnar --- tools/perf/builtin-report.c | 15 ++++++++++- tools/perf/util/symbol.c | 65 +++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 79 insertions(+), 1 deletion(-) diff --git a/tools/perf/builtin-report.c b/tools/perf/builtin-report.c index f053a7463dc..61d871849b4 100644 --- a/tools/perf/builtin-report.c +++ b/tools/perf/builtin-report.c @@ -209,6 +209,11 @@ static uint64_t vdso__map_ip(struct map *map, uint64_t ip) return ip; } +static inline int is_anon_memory(const char *filename) +{ + return strcmp(filename, "//anon") == 0; +} + static struct map *map__new(struct mmap_event *event) { struct map *self = malloc(sizeof(*self)); @@ -216,6 +221,7 @@ static struct map *map__new(struct mmap_event *event) if (self != NULL) { const char *filename = event->filename; char newfilename[PATH_MAX]; + int anon; if (cwd) { int n = strcommon(filename); @@ -227,6 +233,13 @@ static struct map *map__new(struct mmap_event *event) } } + anon = is_anon_memory(filename); + + if (anon) { + snprintf(newfilename, sizeof(newfilename), "/tmp/perf-%d.map", event->pid); + filename = newfilename; + } + self->start = event->start; self->end = event->start + event->len; self->pgoff = event->pgoff; @@ -235,7 +248,7 @@ static struct map *map__new(struct mmap_event *event) if (self->dso == NULL) goto out_delete; - if (self->dso == vdso) + if (self->dso == vdso || anon) self->map_ip = vdso__map_ip; else self->map_ip = map__map_ip; diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c index 158588c7f6b..32dd47d60d9 100644 --- a/tools/perf/util/symbol.c +++ b/tools/perf/util/symbol.c @@ -220,6 +220,68 @@ out_failure: return -1; } +static int dso__load_perf_map(struct dso *self, symbol_filter_t filter, int verbose) +{ + char *line = NULL; + size_t n; + FILE *file; + int nr_syms = 0; + + file = fopen(self->name, "r"); + if (file == NULL) + goto out_failure; + + while (!feof(file)) { + __u64 start, size; + struct symbol *sym; + int line_len, len; + + line_len = getline(&line, &n, file); + if (line_len < 0) + break; + + if (!line) + goto out_failure; + + line[--line_len] = '\0'; /* \n */ + + len = hex2u64(line, &start); + + len++; + if (len + 2 >= line_len) + continue; + + len += hex2u64(line + len, &size); + + len++; + if (len + 2 >= line_len) + continue; + + sym = symbol__new(start, size, line + len, + self->sym_priv_size, start, verbose); + + if (sym == NULL) + goto out_delete_line; + + if (filter && filter(self, sym)) + symbol__delete(sym, self->sym_priv_size); + else { + dso__insert_symbol(self, sym); + nr_syms++; + } + } + + free(line); + fclose(file); + + return nr_syms; + +out_delete_line: + free(line); +out_failure: + return -1; +} + /** * elf_symtab__for_each_symbol - iterate thru all the symbols * @@ -507,6 +569,9 @@ int dso__load(struct dso *self, symbol_filter_t filter, int verbose) if (!name) return -1; + if (strncmp(self->name, "/tmp/perf-", 10) == 0) + return dso__load_perf_map(self, filter, verbose); + more: do { switch (variant) { -- cgit v1.2.3 From aefcf37b82886260d8540c9fb815e613c8977e06 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Mon, 8 Jun 2009 23:15:28 +0200 Subject: perf_counter tools: Standardize color printing The rule is: - high overhead: red - mid overhead: green - low overhead: normal (white/black) Cc: Peter Zijlstra Cc: Mike Galbraith Cc: Paul Mackerras Cc: Arnaldo Carvalho de Melo LKML-Reference: Signed-off-by: Ingo Molnar --- tools/perf/builtin-annotate.c | 5 +++-- tools/perf/builtin-report.c | 13 ++++++++----- tools/perf/builtin-top.c | 13 ++++++++----- 3 files changed, 19 insertions(+), 12 deletions(-) diff --git a/tools/perf/builtin-annotate.c b/tools/perf/builtin-annotate.c index 0e23fe98ec4..3334a8bb1d5 100644 --- a/tools/perf/builtin-annotate.c +++ b/tools/perf/builtin-annotate.c @@ -1085,8 +1085,9 @@ parse_line(FILE *file, struct symbol *sym, uint64_t start, uint64_t len) percent = 100.0 * hits / sym->hist_sum; /* - * We color high-overhead entries in red, low-overhead - * entries in green - and keep the middle ground normal: + * We color high-overhead entries in red, mid-overhead + * entries in green - and keep the low overhead places + * normal: */ if (percent >= 5.0) color = PERF_COLOR_RED; diff --git a/tools/perf/builtin-report.c b/tools/perf/builtin-report.c index 61d871849b4..0b18cb99a85 100644 --- a/tools/perf/builtin-report.c +++ b/tools/perf/builtin-report.c @@ -693,13 +693,16 @@ hist_entry__fprintf(FILE *fp, struct hist_entry *self, uint64_t total_samples) char *color = PERF_COLOR_NORMAL; /* - * We color high-overhead entries in red, low-overhead - * entries in green - and keep the middle ground normal: + * We color high-overhead entries in red, mid-overhead + * entries in green - and keep the low overhead places + * normal: */ - if (percent >= 5.0) + if (percent >= 5.0) { color = PERF_COLOR_RED; - if (percent < 0.5) - color = PERF_COLOR_GREEN; + } else { + if (percent >= 0.5) + color = PERF_COLOR_GREEN; + } ret = color_fprintf(fp, color, " %6.2f%%", (self->count * 100.0) / total_samples); diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c index be1698f1189..8ba24808a39 100644 --- a/tools/perf/builtin-top.c +++ b/tools/perf/builtin-top.c @@ -248,13 +248,16 @@ static void print_sym_table(void) sum_ksamples)); /* - * We color high-overhead entries in red, low-overhead - * entries in green - and keep the middle ground normal: + * We color high-overhead entries in red, mid-overhead + * entries in green - and keep the low overhead places + * normal: */ - if (pcnt >= 5.0) + if (pcnt >= 5.0) { color = PERF_COLOR_RED; - if (pcnt < 0.5) - color = PERF_COLOR_GREEN; + } else { + if (pcnt >= 0.5) + color = PERF_COLOR_GREEN; + } if (nr_counters == 1) printf("%20.2f - ", syme->weight); -- cgit v1.2.3 From fecc8ac8496fce96069724f54daba8e7078b0082 Mon Sep 17 00:00:00 2001 From: Yong Wang Date: Tue, 9 Jun 2009 21:15:53 +0800 Subject: perf_counter, x86: Correct some event and umask values for Intel processors Correct some event and UMASK values according to Intel SDM, in the Nehalem and Atom tables. Signed-off-by: Yong Wang Cc: Peter Zijlstra Cc: Mike Galbraith Cc: Paul Mackerras Cc: Arnaldo Carvalho de Melo LKML-Reference: <20090609131553.GA12489@ywang-moblin2.bj.intel.com> Signed-off-by: Ingo Molnar --- arch/x86/kernel/cpu/perf_counter.c | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/arch/x86/kernel/cpu/perf_counter.c b/arch/x86/kernel/cpu/perf_counter.c index 56001feeffc..40978aac6e0 100644 --- a/arch/x86/kernel/cpu/perf_counter.c +++ b/arch/x86/kernel/cpu/perf_counter.c @@ -119,7 +119,7 @@ static const u64 nehalem_hw_cache_event_ids }, [ C(L1I ) ] = { [ C(OP_READ) ] = { - [ C(RESULT_ACCESS) ] = 0x0480, /* L1I.READS */ + [ C(RESULT_ACCESS) ] = 0x0380, /* L1I.READS */ [ C(RESULT_MISS) ] = 0x0280, /* L1I.MISSES */ }, [ C(OP_WRITE) ] = { @@ -162,7 +162,7 @@ static const u64 nehalem_hw_cache_event_ids [ C(ITLB) ] = { [ C(OP_READ) ] = { [ C(RESULT_ACCESS) ] = 0x01c0, /* INST_RETIRED.ANY_P */ - [ C(RESULT_MISS) ] = 0x0185, /* ITLB_MISS_RETIRED */ + [ C(RESULT_MISS) ] = 0x20c8, /* ITLB_MISS_RETIRED */ }, [ C(OP_WRITE) ] = { [ C(RESULT_ACCESS) ] = -1, @@ -291,7 +291,7 @@ static const u64 atom_hw_cache_event_ids [ C(RESULT_MISS) ] = 0, }, [ C(OP_WRITE) ] = { - [ C(RESULT_ACCESS) ] = 0x2241, /* L1D_CACHE.ST */ + [ C(RESULT_ACCESS) ] = 0x2240, /* L1D_CACHE.ST */ [ C(RESULT_MISS) ] = 0, }, [ C(OP_PREFETCH) ] = { @@ -301,8 +301,8 @@ static const u64 atom_hw_cache_event_ids }, [ C(L1I ) ] = { [ C(OP_READ) ] = { - [ C(RESULT_ACCESS) ] = 0x0080, /* L1I.READS */ - [ C(RESULT_MISS) ] = 0x0081, /* L1I.MISSES */ + [ C(RESULT_ACCESS) ] = 0x0380, /* L1I.READS */ + [ C(RESULT_MISS) ] = 0x0280, /* L1I.MISSES */ }, [ C(OP_WRITE) ] = { [ C(RESULT_ACCESS) ] = -1, @@ -329,11 +329,11 @@ static const u64 atom_hw_cache_event_ids }, [ C(DTLB) ] = { [ C(OP_READ) ] = { - [ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI (alias) */ + [ C(RESULT_ACCESS) ] = 0x2140, /* L1D_CACHE_LD.MESI (alias) */ [ C(RESULT_MISS) ] = 0x0508, /* DTLB_MISSES.MISS_LD */ }, [ C(OP_WRITE) ] = { - [ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI (alias) */ + [ C(RESULT_ACCESS) ] = 0x2240, /* L1D_CACHE_ST.MESI (alias) */ [ C(RESULT_MISS) ] = 0x0608, /* DTLB_MISSES.MISS_ST */ }, [ C(OP_PREFETCH) ] = { -- cgit v1.2.3 From dc81081b2d9a6a9d64dad1bef1e5fc9fb660e53e Mon Sep 17 00:00:00 2001 From: Yong Wang Date: Wed, 10 Jun 2009 17:06:12 +0800 Subject: perf_counter/x86: Fix the model number of Intel Core2 processors Fix the model number of Intel Core2 processors according to the documentation: Intel Processor Identification with the CPUID Instruction: http://www.intel.com/support/processors/sb/cs-009861.htm Signed-off-by: Yong Wang Also-Reported-by: Arnd Bergmann Cc: Peter Zijlstra Cc: Mike Galbraith Cc: Paul Mackerras Cc: Arnaldo Carvalho de Melo LKML-Reference: <20090610090612.GA26580@ywang-moblin2.bj.intel.com> [ Added two more model numbers suggested by Arnd Bergmann ] Signed-off-by: Ingo Molnar --- arch/x86/kernel/cpu/perf_counter.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/arch/x86/kernel/cpu/perf_counter.c b/arch/x86/kernel/cpu/perf_counter.c index 40978aac6e0..49f258537cb 100644 --- a/arch/x86/kernel/cpu/perf_counter.c +++ b/arch/x86/kernel/cpu/perf_counter.c @@ -1407,7 +1407,10 @@ static int intel_pmu_init(void) * Install the hw-cache-events table: */ switch (boot_cpu_data.x86_model) { - case 17: + case 15: /* original 65 nm celeron/pentium/core2/xeon, "Merom"/"Conroe" */ + case 22: /* single-core 65 nm celeron/core2solo "Merom-L"/"Conroe-L" */ + case 23: /* current 45 nm celeron/core2/xeon "Penryn"/"Wolfdale" */ + case 29: /* six-core 45 nm xeon "Dunnington" */ memcpy(hw_cache_event_ids, core2_hw_cache_event_ids, sizeof(hw_cache_event_ids)); -- cgit v1.2.3 From bd2b5b12849a3446abad0b25e920f86f5480b309 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Wed, 10 Jun 2009 13:40:57 +0200 Subject: perf_counter: More aggressive frequency adjustment Also employ the overflow handler to adjust the frequency, this results in a stable frequency in about 40~50 samples, instead of that many ticks. This also means we can start sampling at a sample period of 1 without running head-first into the throttle. It relies on sched_clock() to accurately measure the time difference between the overflow NMIs. Signed-off-by: Peter Zijlstra Cc: Mike Galbraith Cc: Paul Mackerras Cc: Arnaldo Carvalho de Melo LKML-Reference: Signed-off-by: Ingo Molnar --- arch/x86/kernel/cpu/perf_counter.c | 5 +- include/linux/perf_counter.h | 1 + kernel/perf_counter.c | 130 +++++++++++++++++++++++++------------ 3 files changed, 92 insertions(+), 44 deletions(-) diff --git a/arch/x86/kernel/cpu/perf_counter.c b/arch/x86/kernel/cpu/perf_counter.c index 49f258537cb..240ca563063 100644 --- a/arch/x86/kernel/cpu/perf_counter.c +++ b/arch/x86/kernel/cpu/perf_counter.c @@ -696,10 +696,11 @@ static int __hw_perf_counter_init(struct perf_counter *counter) if (!attr->exclude_kernel) hwc->config |= ARCH_PERFMON_EVENTSEL_OS; - if (!hwc->sample_period) + if (!hwc->sample_period) { hwc->sample_period = x86_pmu.max_period; + atomic64_set(&hwc->period_left, hwc->sample_period); + } - atomic64_set(&hwc->period_left, hwc->sample_period); counter->destroy = hw_perf_counter_destroy; /* diff --git a/include/linux/perf_counter.h b/include/linux/perf_counter.h index 3586df840f6..282d8cc4898 100644 --- a/include/linux/perf_counter.h +++ b/include/linux/perf_counter.h @@ -371,6 +371,7 @@ struct hw_perf_counter { u64 freq_count; u64 freq_interrupts; + u64 freq_stamp; #endif }; diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c index 5eacaaf3f9c..51c571ee4d0 100644 --- a/kernel/perf_counter.c +++ b/kernel/perf_counter.c @@ -1184,13 +1184,33 @@ static void perf_counter_cpu_sched_in(struct perf_cpu_context *cpuctx, int cpu) static void perf_log_throttle(struct perf_counter *counter, int enable); static void perf_log_period(struct perf_counter *counter, u64 period); -static void perf_adjust_freq(struct perf_counter_context *ctx) +static void perf_adjust_period(struct perf_counter *counter, u64 events) +{ + struct hw_perf_counter *hwc = &counter->hw; + u64 period, sample_period; + s64 delta; + + events *= hwc->sample_period; + period = div64_u64(events, counter->attr.sample_freq); + + delta = (s64)(period - hwc->sample_period); + delta = (delta + 7) / 8; /* low pass filter */ + + sample_period = hwc->sample_period + delta; + + if (!sample_period) + sample_period = 1; + + perf_log_period(counter, sample_period); + + hwc->sample_period = sample_period; +} + +static void perf_ctx_adjust_freq(struct perf_counter_context *ctx) { struct perf_counter *counter; struct hw_perf_counter *hwc; - u64 interrupts, sample_period; - u64 events, period, freq; - s64 delta; + u64 interrupts, freq; spin_lock(&ctx->lock); list_for_each_entry(counter, &ctx->counter_list, list_entry) { @@ -1202,6 +1222,9 @@ static void perf_adjust_freq(struct perf_counter_context *ctx) interrupts = hwc->interrupts; hwc->interrupts = 0; + /* + * unthrottle counters on the tick + */ if (interrupts == MAX_INTERRUPTS) { perf_log_throttle(counter, 1); counter->pmu->unthrottle(counter); @@ -1211,6 +1234,9 @@ static void perf_adjust_freq(struct perf_counter_context *ctx) if (!counter->attr.freq || !counter->attr.sample_freq) continue; + /* + * if the specified freq < HZ then we need to skip ticks + */ if (counter->attr.sample_freq < HZ) { freq = counter->attr.sample_freq; @@ -1226,20 +1252,20 @@ static void perf_adjust_freq(struct perf_counter_context *ctx) } else freq = HZ; - events = freq * interrupts * hwc->sample_period; - period = div64_u64(events, counter->attr.sample_freq); - - delta = (s64)(1 + period - hwc->sample_period); - delta >>= 1; - - sample_period = hwc->sample_period + delta; - - if (!sample_period) - sample_period = 1; + perf_adjust_period(counter, freq * interrupts); - perf_log_period(counter, sample_period); - - hwc->sample_period = sample_period; + /* + * In order to avoid being stalled by an (accidental) huge + * sample period, force reset the sample period if we didn't + * get any events in this freq period. + */ + if (!interrupts) { + perf_disable(); + counter->pmu->disable(counter); + atomic_set(&hwc->period_left, 0); + counter->pmu->enable(counter); + perf_enable(); + } } spin_unlock(&ctx->lock); } @@ -1279,9 +1305,9 @@ void perf_counter_task_tick(struct task_struct *curr, int cpu) cpuctx = &per_cpu(perf_cpu_context, cpu); ctx = curr->perf_counter_ctxp; - perf_adjust_freq(&cpuctx->ctx); + perf_ctx_adjust_freq(&cpuctx->ctx); if (ctx) - perf_adjust_freq(ctx); + perf_ctx_adjust_freq(ctx); perf_counter_cpu_sched_out(cpuctx); if (ctx) @@ -1647,10 +1673,10 @@ static int perf_counter_period(struct perf_counter *counter, u64 __user *arg) counter->attr.sample_freq = value; } else { + perf_log_period(counter, value); + counter->attr.sample_period = value; counter->hw.sample_period = value; - - perf_log_period(counter, value); } unlock: spin_unlock_irq(&ctx->lock); @@ -2853,35 +2879,41 @@ void __perf_counter_mmap(struct vm_area_struct *vma) * event flow. */ +struct freq_event { + struct perf_event_header header; + u64 time; + u64 id; + u64 period; +}; + static void perf_log_period(struct perf_counter *counter, u64 period) { struct perf_output_handle handle; + struct freq_event event; int ret; - struct { - struct perf_event_header header; - u64 time; - u64 id; - u64 period; - } freq_event = { + if (counter->hw.sample_period == period) + return; + + if (counter->attr.sample_type & PERF_SAMPLE_PERIOD) + return; + + event = (struct freq_event) { .header = { .type = PERF_EVENT_PERIOD, .misc = 0, - .size = sizeof(freq_event), + .size = sizeof(event), }, .time = sched_clock(), .id = counter->id, .period = period, }; - if (counter->hw.sample_period == period) - return; - - ret = perf_output_begin(&handle, counter, sizeof(freq_event), 0, 0); + ret = perf_output_begin(&handle, counter, sizeof(event), 1, 0); if (ret) return; - perf_output_put(&handle, freq_event); + perf_output_put(&handle, event); perf_output_end(&handle); } @@ -2923,15 +2955,16 @@ int perf_counter_overflow(struct perf_counter *counter, { int events = atomic_read(&counter->event_limit); int throttle = counter->pmu->unthrottle != NULL; + struct hw_perf_counter *hwc = &counter->hw; int ret = 0; if (!throttle) { - counter->hw.interrupts++; + hwc->interrupts++; } else { - if (counter->hw.interrupts != MAX_INTERRUPTS) { - counter->hw.interrupts++; - if (HZ*counter->hw.interrupts > (u64)sysctl_perf_counter_limit) { - counter->hw.interrupts = MAX_INTERRUPTS; + if (hwc->interrupts != MAX_INTERRUPTS) { + hwc->interrupts++; + if (HZ * hwc->interrupts > (u64)sysctl_perf_counter_limit) { + hwc->interrupts = MAX_INTERRUPTS; perf_log_throttle(counter, 0); ret = 1; } @@ -2945,6 +2978,16 @@ int perf_counter_overflow(struct perf_counter *counter, } } + if (counter->attr.freq) { + u64 now = sched_clock(); + s64 delta = now - hwc->freq_stamp; + + hwc->freq_stamp = now; + + if (delta > 0 && delta < TICK_NSEC) + perf_adjust_period(counter, NSEC_PER_SEC / (int)delta); + } + /* * XXX event_limit might not quite work as expected on inherited * counters @@ -3379,7 +3422,6 @@ static const struct pmu *tp_perf_counter_init(struct perf_counter *counter) return NULL; counter->destroy = tp_perf_counter_destroy; - counter->hw.sample_period = counter->attr.sample_period; return &perf_ops_generic; } @@ -3483,10 +3525,11 @@ perf_counter_alloc(struct perf_counter_attr *attr, pmu = NULL; hwc = &counter->hw; + hwc->sample_period = attr->sample_period; if (attr->freq && attr->sample_freq) - hwc->sample_period = div64_u64(TICK_NSEC, attr->sample_freq); - else - hwc->sample_period = attr->sample_period; + hwc->sample_period = 1; + + atomic64_set(&hwc->period_left, hwc->sample_period); /* * we currently do not support PERF_SAMPLE_GROUP on inherited counters @@ -3687,6 +3730,9 @@ inherit_counter(struct perf_counter *parent_counter, else child_counter->state = PERF_COUNTER_STATE_OFF; + if (parent_counter->attr.freq) + child_counter->hw.sample_period = parent_counter->hw.sample_period; + /* * Link it up in the child's context: */ -- cgit v1.2.3 From 4502d77c1d8f15f20c04b92cb96c12d4e465de29 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Wed, 10 Jun 2009 15:03:06 +0200 Subject: perf_counter tools: Small frequency related fixes Create the counter in a disabled state and only enable it after we mmap() the buffer, this allows us to see the first few samples (and observe the frequency ramp). Furthermore, print the period in the verbose report. Signed-off-by: Peter Zijlstra Cc: Mike Galbraith Cc: Paul Mackerras Cc: Arnaldo Carvalho de Melo LKML-Reference: Signed-off-by: Ingo Molnar --- tools/perf/builtin-record.c | 3 +++ tools/perf/builtin-report.c | 6 ++++-- 2 files changed, 7 insertions(+), 2 deletions(-) diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c index deaee42d5eb..a5698add2fc 100644 --- a/tools/perf/builtin-record.c +++ b/tools/perf/builtin-record.c @@ -347,6 +347,7 @@ static void create_counter(int counter, int cpu, pid_t pid) attr->mmap = track; attr->comm = track; attr->inherit = (cpu < 0) && inherit; + attr->disabled = 1; track = 0; /* only the first counter needs these */ @@ -402,6 +403,8 @@ try_again: error("failed to mmap with %d (%s)\n", errno, strerror(errno)); exit(-1); } + + ioctl(fd[nr_cpu][counter], PERF_COUNTER_IOC_ENABLE); } static void open_counters(int cpu, pid_t pid) diff --git a/tools/perf/builtin-report.c b/tools/perf/builtin-report.c index 0b18cb99a85..9a0e31e79e9 100644 --- a/tools/perf/builtin-report.c +++ b/tools/perf/builtin-report.c @@ -47,6 +47,7 @@ struct ip_event { struct perf_event_header header; __u64 ip; __u32 pid, tid; + __u64 period; }; struct mmap_event { @@ -943,12 +944,13 @@ process_overflow_event(event_t *event, unsigned long offset, unsigned long head) uint64_t ip = event->ip.ip; struct map *map = NULL; - dprintf("%p [%p]: PERF_EVENT (IP, %d): %d: %p\n", + dprintf("%p [%p]: PERF_EVENT (IP, %d): %d: %p period: %Ld\n", (void *)(offset + head), (void *)(long)(event->header.size), event->header.misc, event->ip.pid, - (void *)(long)ip); + (void *)(long)ip, + (long long)event->ip.period); dprintf(" ... thread: %s:%d\n", thread->comm, thread->pid); -- cgit v1.2.3 From f7b7c26e01e51fe46097e11f179dc71ce7950084 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Wed, 10 Jun 2009 15:55:59 +0200 Subject: perf_counter tools: Propagate signals properly Currently report and stat catch SIGINT (and others) without altering their exit state. This means that things like: while :; do perf stat ./foo ; done Loops become hard-to-interrupt, because bash never sees perf terminate due to interruption. Fix this. Signed-off-by: Peter Zijlstra Cc: Mike Galbraith Cc: Paul Mackerras Cc: Arnaldo Carvalho de Melo LKML-Reference: Signed-off-by: Ingo Molnar --- tools/perf/builtin-record.c | 12 ++++++++++++ tools/perf/builtin-stat.c | 13 +++++++++++++ 2 files changed, 25 insertions(+) diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c index a5698add2fc..c10553c3460 100644 --- a/tools/perf/builtin-record.c +++ b/tools/perf/builtin-record.c @@ -169,10 +169,21 @@ static void mmap_read(struct mmap_data *md) } static volatile int done = 0; +static volatile int signr = -1; static void sig_handler(int sig) { done = 1; + signr = sig; +} + +static void sig_atexit(void) +{ + if (signr == -1) + return; + + signal(signr, SIG_DFL); + kill(getpid(), signr); } static void pid_synthesize_comm_event(pid_t pid, int full) @@ -459,6 +470,7 @@ static int __cmd_record(int argc, const char **argv) } else for (i = 0; i < nr_cpus; i++) open_counters(i, target_pid); + atexit(sig_atexit); signal(SIGCHLD, sig_handler); signal(SIGINT, sig_handler); diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c index 80855090fd9..6404906924f 100644 --- a/tools/perf/builtin-stat.c +++ b/tools/perf/builtin-stat.c @@ -296,8 +296,20 @@ static int do_perf_stat(int argc, const char **argv) return 0; } +static volatile int signr = -1; + static void skip_signal(int signo) { + signr = signo; +} + +static void sig_atexit(void) +{ + if (signr == -1) + return; + + signal(signr, SIG_DFL); + kill(getpid(), signr); } static const char * const stat_usage[] = { @@ -345,6 +357,7 @@ int cmd_stat(int argc, const char **argv, const char *prefix) * What we want is for Ctrl-C to work in the exec()-ed * task, but being ignored by perf stat itself: */ + atexit(sig_atexit); signal(SIGINT, skip_signal); signal(SIGALRM, skip_signal); signal(SIGABRT, skip_signal); -- cgit v1.2.3 From 66fff22483d8542dfb4d61a28d21277bbde321e8 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Wed, 10 Jun 2009 22:53:37 +0200 Subject: perf_counter: Annotate exit ctx recursion Ever since Paul fixed it to unclone the context before taking the ctx->lock this became a false positive, annotate it away. Signed-off-by: Peter Zijlstra Cc: Mike Galbraith Cc: Paul Mackerras Cc: Arnaldo Carvalho de Melo LKML-Reference: Signed-off-by: Ingo Molnar --- kernel/perf_counter.c | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c index 51c571ee4d0..ae591a1275a 100644 --- a/kernel/perf_counter.c +++ b/kernel/perf_counter.c @@ -3879,7 +3879,18 @@ void perf_counter_exit_task(struct task_struct *child) spin_unlock(&child_ctx->lock); local_irq_restore(flags); - mutex_lock(&child_ctx->mutex); + /* + * We can recurse on the same lock type through: + * + * __perf_counter_exit_task() + * sync_child_counter() + * fput(parent_counter->filp) + * perf_release() + * mutex_lock(&ctx->mutex) + * + * But since its the parent context it won't be the same instance. + */ + mutex_lock_nested(&child_ctx->mutex, SINGLE_DEPTH_NESTING); again: list_for_each_entry_safe(child_counter, tmp, &child_ctx->counter_list, -- cgit v1.2.3 From ea1900e571d40a3ce60c835c2f21e1fd8c5cb663 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Wed, 10 Jun 2009 21:45:22 +0200 Subject: perf_counter tools: Normalize data using per sample period data When we use variable period sampling, add the period to the sample data and use that to normalize the samples. Signed-off-by: Peter Zijlstra Cc: Peter Zijlstra Cc: Mike Galbraith Cc: Paul Mackerras Cc: Arnaldo Carvalho de Melo LKML-Reference: Signed-off-by: Ingo Molnar --- tools/perf/builtin-record.c | 3 ++- tools/perf/builtin-report.c | 18 +++++++++++------- 2 files changed, 13 insertions(+), 8 deletions(-) diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c index c10553c3460..919f23ca419 100644 --- a/tools/perf/builtin-record.c +++ b/tools/perf/builtin-record.c @@ -350,8 +350,9 @@ static void create_counter(int counter, int cpu, pid_t pid) struct perf_counter_attr *attr = attrs + counter; int track = 1; - attr->sample_type = PERF_SAMPLE_IP | PERF_SAMPLE_TID | PERF_SAMPLE_PERIOD; + attr->sample_type = PERF_SAMPLE_IP | PERF_SAMPLE_TID; if (freq) { + attr->sample_type |= PERF_SAMPLE_PERIOD; attr->freq = 1; attr->sample_freq = freq; } diff --git a/tools/perf/builtin-report.c b/tools/perf/builtin-report.c index 9a0e31e79e9..f57fd5c5531 100644 --- a/tools/perf/builtin-report.c +++ b/tools/perf/builtin-report.c @@ -456,7 +456,7 @@ struct hist_entry { uint64_t ip; char level; - uint32_t count; + uint64_t count; }; /* @@ -726,7 +726,7 @@ hist_entry__fprintf(FILE *fp, struct hist_entry *self, uint64_t total_samples) static int hist_entry__add(struct thread *thread, struct map *map, struct dso *dso, - struct symbol *sym, uint64_t ip, char level) + struct symbol *sym, uint64_t ip, char level, uint64_t count) { struct rb_node **p = &hist.rb_node; struct rb_node *parent = NULL; @@ -738,7 +738,7 @@ hist_entry__add(struct thread *thread, struct map *map, struct dso *dso, .sym = sym, .ip = ip, .level = level, - .count = 1, + .count = count, }; int cmp; @@ -749,7 +749,7 @@ hist_entry__add(struct thread *thread, struct map *map, struct dso *dso, cmp = hist_entry__cmp(&entry, he); if (!cmp) { - he->count++; + he->count += count; return 0; } @@ -942,15 +942,19 @@ process_overflow_event(event_t *event, unsigned long offset, unsigned long head) struct dso *dso = NULL; struct thread *thread = threads__findnew(event->ip.pid); uint64_t ip = event->ip.ip; + uint64_t period = 1; struct map *map = NULL; + if (event->header.type & PERF_SAMPLE_PERIOD) + period = event->ip.period; + dprintf("%p [%p]: PERF_EVENT (IP, %d): %d: %p period: %Ld\n", (void *)(offset + head), (void *)(long)(event->header.size), event->header.misc, event->ip.pid, (void *)(long)ip, - (long long)event->ip.period); + (long long)period); dprintf(" ... thread: %s:%d\n", thread->comm, thread->pid); @@ -1001,13 +1005,13 @@ process_overflow_event(event_t *event, unsigned long offset, unsigned long head) if (dso) sym = dso->find_symbol(dso, ip); - if (hist_entry__add(thread, map, dso, sym, ip, level)) { + if (hist_entry__add(thread, map, dso, sym, ip, level, period)) { fprintf(stderr, "problem incrementing symbol count, skipping event\n"); return -1; } } - total++; + total += period; return 0; } -- cgit v1.2.3 From df1a132bf3d3508f863336c80a27806a2ac947e0 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Wed, 10 Jun 2009 21:02:22 +0200 Subject: perf_counter: Introduce struct for sample data For easy extension of the sample data, put it in a structure. Signed-off-by: Peter Zijlstra Cc: Peter Zijlstra Cc: Mike Galbraith Cc: Paul Mackerras Cc: Arnaldo Carvalho de Melo LKML-Reference: Signed-off-by: Ingo Molnar --- arch/powerpc/kernel/perf_counter.c | 10 +++++++--- arch/x86/kernel/cpu/perf_counter.c | 15 +++++++++++---- include/linux/perf_counter.h | 10 ++++++++-- kernel/perf_counter.c | 38 ++++++++++++++++++++++---------------- 4 files changed, 48 insertions(+), 25 deletions(-) diff --git a/arch/powerpc/kernel/perf_counter.c b/arch/powerpc/kernel/perf_counter.c index 4786ad9a288..5e0bf399c43 100644 --- a/arch/powerpc/kernel/perf_counter.c +++ b/arch/powerpc/kernel/perf_counter.c @@ -1001,7 +1001,11 @@ static void record_and_restart(struct perf_counter *counter, long val, * Finally record data if requested. */ if (record) { - addr = 0; + struct perf_sample_data data = { + .regs = regs, + .addr = 0, + }; + if (counter->attr.sample_type & PERF_SAMPLE_ADDR) { /* * The user wants a data address recorded. @@ -1016,9 +1020,9 @@ static void record_and_restart(struct perf_counter *counter, long val, sdsync = (ppmu->flags & PPMU_ALT_SIPR) ? POWER6_MMCRA_SDSYNC : MMCRA_SDSYNC; if (!(mmcra & MMCRA_SAMPLE_ENABLE) || (mmcra & sdsync)) - addr = mfspr(SPRN_SDAR); + data.addr = mfspr(SPRN_SDAR); } - if (perf_counter_overflow(counter, nmi, regs, addr)) { + if (perf_counter_overflow(counter, nmi, &data)) { /* * Interrupts are coming too fast - throttle them * by setting the counter to 0, so it will be diff --git a/arch/x86/kernel/cpu/perf_counter.c b/arch/x86/kernel/cpu/perf_counter.c index 240ca563063..82a23d487f9 100644 --- a/arch/x86/kernel/cpu/perf_counter.c +++ b/arch/x86/kernel/cpu/perf_counter.c @@ -1173,11 +1173,14 @@ static void intel_pmu_reset(void) */ static int intel_pmu_handle_irq(struct pt_regs *regs) { + struct perf_sample_data data; struct cpu_hw_counters *cpuc; - struct cpu_hw_counters; int bit, cpu, loops; u64 ack, status; + data.regs = regs; + data.addr = 0; + cpu = smp_processor_id(); cpuc = &per_cpu(cpu_hw_counters, cpu); @@ -1210,7 +1213,7 @@ again: if (!intel_pmu_save_and_restart(counter)) continue; - if (perf_counter_overflow(counter, 1, regs, 0)) + if (perf_counter_overflow(counter, 1, &data)) intel_pmu_disable_counter(&counter->hw, bit); } @@ -1230,12 +1233,16 @@ again: static int amd_pmu_handle_irq(struct pt_regs *regs) { - int cpu, idx, handled = 0; + struct perf_sample_data data; struct cpu_hw_counters *cpuc; struct perf_counter *counter; struct hw_perf_counter *hwc; + int cpu, idx, handled = 0; u64 val; + data.regs = regs; + data.addr = 0; + cpu = smp_processor_id(); cpuc = &per_cpu(cpu_hw_counters, cpu); @@ -1256,7 +1263,7 @@ static int amd_pmu_handle_irq(struct pt_regs *regs) if (!x86_perf_counter_set_period(counter, hwc, idx)) continue; - if (perf_counter_overflow(counter, 1, regs, 0)) + if (perf_counter_overflow(counter, 1, &data)) amd_pmu_disable_counter(hwc, idx); } diff --git a/include/linux/perf_counter.h b/include/linux/perf_counter.h index 282d8cc4898..d8c0eb480f9 100644 --- a/include/linux/perf_counter.h +++ b/include/linux/perf_counter.h @@ -605,8 +605,14 @@ extern int hw_perf_group_sched_in(struct perf_counter *group_leader, struct perf_counter_context *ctx, int cpu); extern void perf_counter_update_userpage(struct perf_counter *counter); -extern int perf_counter_overflow(struct perf_counter *counter, - int nmi, struct pt_regs *regs, u64 addr); +struct perf_sample_data { + struct pt_regs *regs; + u64 addr; +}; + +extern int perf_counter_overflow(struct perf_counter *counter, int nmi, + struct perf_sample_data *data); + /* * Return 1 for a software counter, 0 for a hardware counter */ diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c index ae591a1275a..4fe85e804f4 100644 --- a/kernel/perf_counter.c +++ b/kernel/perf_counter.c @@ -2378,8 +2378,8 @@ static u32 perf_counter_tid(struct perf_counter *counter, struct task_struct *p) return task_pid_nr_ns(p, counter->ns); } -static void perf_counter_output(struct perf_counter *counter, - int nmi, struct pt_regs *regs, u64 addr) +static void perf_counter_output(struct perf_counter *counter, int nmi, + struct perf_sample_data *data) { int ret; u64 sample_type = counter->attr.sample_type; @@ -2404,10 +2404,10 @@ static void perf_counter_output(struct perf_counter *counter, header.size = sizeof(header); header.misc = PERF_EVENT_MISC_OVERFLOW; - header.misc |= perf_misc_flags(regs); + header.misc |= perf_misc_flags(data->regs); if (sample_type & PERF_SAMPLE_IP) { - ip = perf_instruction_pointer(regs); + ip = perf_instruction_pointer(data->regs); header.type |= PERF_SAMPLE_IP; header.size += sizeof(ip); } @@ -2460,7 +2460,7 @@ static void perf_counter_output(struct perf_counter *counter, } if (sample_type & PERF_SAMPLE_CALLCHAIN) { - callchain = perf_callchain(regs); + callchain = perf_callchain(data->regs); if (callchain) { callchain_size = (1 + callchain->nr) * sizeof(u64); @@ -2486,7 +2486,7 @@ static void perf_counter_output(struct perf_counter *counter, perf_output_put(&handle, time); if (sample_type & PERF_SAMPLE_ADDR) - perf_output_put(&handle, addr); + perf_output_put(&handle, data->addr); if (sample_type & PERF_SAMPLE_ID) perf_output_put(&handle, counter->id); @@ -2950,8 +2950,8 @@ static void perf_log_throttle(struct perf_counter *counter, int enable) * Generic counter overflow handling. */ -int perf_counter_overflow(struct perf_counter *counter, - int nmi, struct pt_regs *regs, u64 addr) +int perf_counter_overflow(struct perf_counter *counter, int nmi, + struct perf_sample_data *data) { int events = atomic_read(&counter->event_limit); int throttle = counter->pmu->unthrottle != NULL; @@ -3005,7 +3005,7 @@ int perf_counter_overflow(struct perf_counter *counter, perf_counter_disable(counter); } - perf_counter_output(counter, nmi, regs, addr); + perf_counter_output(counter, nmi, data); return ret; } @@ -3054,24 +3054,25 @@ static void perf_swcounter_set_period(struct perf_counter *counter) static enum hrtimer_restart perf_swcounter_hrtimer(struct hrtimer *hrtimer) { enum hrtimer_restart ret = HRTIMER_RESTART; + struct perf_sample_data data; struct perf_counter *counter; - struct pt_regs *regs; u64 period; counter = container_of(hrtimer, struct perf_counter, hw.hrtimer); counter->pmu->read(counter); - regs = get_irq_regs(); + data.addr = 0; + data.regs = get_irq_regs(); /* * In case we exclude kernel IPs or are somehow not in interrupt * context, provide the next best thing, the user IP. */ - if ((counter->attr.exclude_kernel || !regs) && + if ((counter->attr.exclude_kernel || !data.regs) && !counter->attr.exclude_user) - regs = task_pt_regs(current); + data.regs = task_pt_regs(current); - if (regs) { - if (perf_counter_overflow(counter, 0, regs, 0)) + if (data.regs) { + if (perf_counter_overflow(counter, 0, &data)) ret = HRTIMER_NORESTART; } @@ -3084,9 +3085,14 @@ static enum hrtimer_restart perf_swcounter_hrtimer(struct hrtimer *hrtimer) static void perf_swcounter_overflow(struct perf_counter *counter, int nmi, struct pt_regs *regs, u64 addr) { + struct perf_sample_data data = { + .regs = regs, + .addr = addr, + }; + perf_swcounter_update(counter); perf_swcounter_set_period(counter); - if (perf_counter_overflow(counter, nmi, regs, addr)) + if (perf_counter_overflow(counter, nmi, &data)) /* soft-disable the counter */ ; -- cgit v1.2.3 From 9e350de37ac9607012fcf9c5314a28fbddf8f43c Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Wed, 10 Jun 2009 21:34:59 +0200 Subject: perf_counter: Accurate period data We currently log hw.sample_period for PERF_SAMPLE_PERIOD, however this is incorrect. When we adjust the period, it will only take effect the next cycle but report it for the current cycle. So when we adjust the period for every cycle, we're always wrong. Solve this by keeping track of the last_period. Signed-off-by: Peter Zijlstra Cc: Peter Zijlstra Cc: Mike Galbraith Cc: Paul Mackerras Cc: Arnaldo Carvalho de Melo LKML-Reference: Signed-off-by: Ingo Molnar --- arch/powerpc/kernel/perf_counter.c | 9 ++++++--- arch/x86/kernel/cpu/perf_counter.c | 15 ++++++++++++--- include/linux/perf_counter.h | 6 ++++-- kernel/perf_counter.c | 9 ++++++--- 4 files changed, 28 insertions(+), 11 deletions(-) diff --git a/arch/powerpc/kernel/perf_counter.c b/arch/powerpc/kernel/perf_counter.c index 5e0bf399c43..4990ce2e5f0 100644 --- a/arch/powerpc/kernel/perf_counter.c +++ b/arch/powerpc/kernel/perf_counter.c @@ -767,6 +767,7 @@ static void power_pmu_unthrottle(struct perf_counter *counter) perf_disable(); power_pmu_read(counter); left = counter->hw.sample_period; + counter->hw.last_period = left; val = 0; if (left < 0x80000000L) val = 0x80000000L - left; @@ -937,7 +938,8 @@ const struct pmu *hw_perf_counter_init(struct perf_counter *counter) counter->hw.config = events[n]; counter->hw.counter_base = cflags[n]; - atomic64_set(&counter->hw.period_left, counter->hw.sample_period); + counter->hw.last_period = counter->hw.sample_period; + atomic64_set(&counter->hw.period_left, counter->hw.last_period); /* * See if we need to reserve the PMU. @@ -1002,8 +1004,9 @@ static void record_and_restart(struct perf_counter *counter, long val, */ if (record) { struct perf_sample_data data = { - .regs = regs, - .addr = 0, + .regs = regs, + .addr = 0, + .period = counter->hw.last_period, }; if (counter->attr.sample_type & PERF_SAMPLE_ADDR) { diff --git a/arch/x86/kernel/cpu/perf_counter.c b/arch/x86/kernel/cpu/perf_counter.c index 82a23d487f9..57ae1bec81b 100644 --- a/arch/x86/kernel/cpu/perf_counter.c +++ b/arch/x86/kernel/cpu/perf_counter.c @@ -698,6 +698,7 @@ static int __hw_perf_counter_init(struct perf_counter *counter) if (!hwc->sample_period) { hwc->sample_period = x86_pmu.max_period; + hwc->last_period = hwc->sample_period; atomic64_set(&hwc->period_left, hwc->sample_period); } @@ -880,12 +881,14 @@ x86_perf_counter_set_period(struct perf_counter *counter, if (unlikely(left <= -period)) { left = period; atomic64_set(&hwc->period_left, left); + hwc->last_period = period; ret = 1; } if (unlikely(left <= 0)) { left += period; atomic64_set(&hwc->period_left, left); + hwc->last_period = period; ret = 1; } /* @@ -1257,9 +1260,12 @@ static int amd_pmu_handle_irq(struct pt_regs *regs) if (val & (1ULL << (x86_pmu.counter_bits - 1))) continue; - /* counter overflow */ - handled = 1; - inc_irq_stat(apic_perf_irqs); + /* + * counter overflow + */ + handled = 1; + data.period = counter->hw.last_period; + if (!x86_perf_counter_set_period(counter, hwc, idx)) continue; @@ -1267,6 +1273,9 @@ static int amd_pmu_handle_irq(struct pt_regs *regs) amd_pmu_disable_counter(hwc, idx); } + if (handled) + inc_irq_stat(apic_perf_irqs); + return handled; } diff --git a/include/linux/perf_counter.h b/include/linux/perf_counter.h index d8c0eb480f9..5b966472b45 100644 --- a/include/linux/perf_counter.h +++ b/include/linux/perf_counter.h @@ -366,6 +366,7 @@ struct hw_perf_counter { }; atomic64_t prev_count; u64 sample_period; + u64 last_period; atomic64_t period_left; u64 interrupts; @@ -606,8 +607,9 @@ extern int hw_perf_group_sched_in(struct perf_counter *group_leader, extern void perf_counter_update_userpage(struct perf_counter *counter); struct perf_sample_data { - struct pt_regs *regs; - u64 addr; + struct pt_regs *regs; + u64 addr; + u64 period; }; extern int perf_counter_overflow(struct perf_counter *counter, int nmi, diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c index 4fe85e804f4..8b89b40bd0f 100644 --- a/kernel/perf_counter.c +++ b/kernel/perf_counter.c @@ -2495,7 +2495,7 @@ static void perf_counter_output(struct perf_counter *counter, int nmi, perf_output_put(&handle, cpu_entry); if (sample_type & PERF_SAMPLE_PERIOD) - perf_output_put(&handle, counter->hw.sample_period); + perf_output_put(&handle, data->period); /* * XXX PERF_SAMPLE_GROUP vs inherited counters seems difficult. @@ -3040,11 +3040,13 @@ static void perf_swcounter_set_period(struct perf_counter *counter) if (unlikely(left <= -period)) { left = period; atomic64_set(&hwc->period_left, left); + hwc->last_period = period; } if (unlikely(left <= 0)) { left += period; atomic64_add(period, &hwc->period_left); + hwc->last_period = period; } atomic64_set(&hwc->prev_count, -left); @@ -3086,8 +3088,9 @@ static void perf_swcounter_overflow(struct perf_counter *counter, int nmi, struct pt_regs *regs, u64 addr) { struct perf_sample_data data = { - .regs = regs, - .addr = addr, + .regs = regs, + .addr = addr, + .period = counter->hw.last_period, }; perf_swcounter_update(counter); -- cgit v1.2.3 From 4da52960fd1ae3ddd14901bc88b608cbeaa4b9a6 Mon Sep 17 00:00:00 2001 From: Paul Mackerras Date: Thu, 11 Jun 2009 14:54:01 +1000 Subject: perf_counters: powerpc: Add support for POWER7 processors This adds the back-end for the PMU on POWER7 processors. POWER7 has 4 fully-programmable counters and two fixed-function counters (which do respect the freeze conditions, can generate interrupts, and are writable, unlike PMC5/6 on POWER5+/6). Signed-off-by: Paul Mackerras Cc: Peter Zijlstra LKML-Reference: <18992.36329.189378.17992@drongo.ozlabs.ibm.com> Signed-off-by: Ingo Molnar --- arch/powerpc/kernel/Makefile | 3 +- arch/powerpc/kernel/perf_counter.c | 4 + arch/powerpc/kernel/power7-pmu.c | 316 +++++++++++++++++++++++++++++++++++++ 3 files changed, 322 insertions(+), 1 deletion(-) create mode 100644 arch/powerpc/kernel/power7-pmu.c diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile index 9ba1bb731fc..a2c683403c2 100644 --- a/arch/powerpc/kernel/Makefile +++ b/arch/powerpc/kernel/Makefile @@ -95,7 +95,8 @@ obj64-$(CONFIG_AUDIT) += compat_audit.o obj-$(CONFIG_DYNAMIC_FTRACE) += ftrace.o obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += ftrace.o obj-$(CONFIG_PERF_COUNTERS) += perf_counter.o power4-pmu.o ppc970-pmu.o \ - power5-pmu.o power5+-pmu.o power6-pmu.o + power5-pmu.o power5+-pmu.o power6-pmu.o \ + power7-pmu.o obj-$(CONFIG_8XX_MINIMAL_FPEMU) += softemu8xx.o diff --git a/arch/powerpc/kernel/perf_counter.c b/arch/powerpc/kernel/perf_counter.c index 4990ce2e5f0..5d12e68aac1 100644 --- a/arch/powerpc/kernel/perf_counter.c +++ b/arch/powerpc/kernel/perf_counter.c @@ -1181,6 +1181,7 @@ extern struct power_pmu ppc970_pmu; extern struct power_pmu power5_pmu; extern struct power_pmu power5p_pmu; extern struct power_pmu power6_pmu; +extern struct power_pmu power7_pmu; static int init_perf_counters(void) { @@ -1207,6 +1208,9 @@ static int init_perf_counters(void) case 0x3e: ppmu = &power6_pmu; break; + case 0x3f: + ppmu = &power7_pmu; + break; } /* diff --git a/arch/powerpc/kernel/power7-pmu.c b/arch/powerpc/kernel/power7-pmu.c new file mode 100644 index 00000000000..dfac48d8ff4 --- /dev/null +++ b/arch/powerpc/kernel/power7-pmu.c @@ -0,0 +1,316 @@ +/* + * Performance counter support for POWER7 processors. + * + * Copyright 2009 Paul Mackerras, IBM Corporation. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ +#include +#include +#include + +/* + * Bits in event code for POWER7 + */ +#define PM_PMC_SH 16 /* PMC number (1-based) for direct events */ +#define PM_PMC_MSK 0xf +#define PM_PMC_MSKS (PM_PMC_MSK << PM_PMC_SH) +#define PM_UNIT_SH 12 /* TTMMUX number and setting - unit select */ +#define PM_UNIT_MSK 0xf +#define PM_COMBINE_SH 11 /* Combined event bit */ +#define PM_COMBINE_MSK 1 +#define PM_COMBINE_MSKS 0x800 +#define PM_L2SEL_SH 8 /* L2 event select */ +#define PM_L2SEL_MSK 7 +#define PM_PMCSEL_MSK 0xff + +/* + * Bits in MMCR1 for POWER7 + */ +#define MMCR1_TTM0SEL_SH 60 +#define MMCR1_TTM1SEL_SH 56 +#define MMCR1_TTM2SEL_SH 52 +#define MMCR1_TTM3SEL_SH 48 +#define MMCR1_TTMSEL_MSK 0xf +#define MMCR1_L2SEL_SH 45 +#define MMCR1_L2SEL_MSK 7 +#define MMCR1_PMC1_COMBINE_SH 35 +#define MMCR1_PMC2_COMBINE_SH 34 +#define MMCR1_PMC3_COMBINE_SH 33 +#define MMCR1_PMC4_COMBINE_SH 32 +#define MMCR1_PMC1SEL_SH 24 +#define MMCR1_PMC2SEL_SH 16 +#define MMCR1_PMC3SEL_SH 8 +#define MMCR1_PMC4SEL_SH 0 +#define MMCR1_PMCSEL_SH(n) (MMCR1_PMC1SEL_SH - (n) * 8) +#define MMCR1_PMCSEL_MSK 0xff + +/* + * Bits in MMCRA + */ + +/* + * Layout of constraint bits: + * 6666555555555544444444443333333333222222222211111111110000000000 + * 3210987654321098765432109876543210987654321098765432109876543210 + * [ ><><><><><><> + * NC P6P5P4P3P2P1 + * + * NC - number of counters + * 15: NC error 0x8000 + * 12-14: number of events needing PMC1-4 0x7000 + * + * P6 + * 11: P6 error 0x800 + * 10-11: Count of events needing PMC6 + * + * P1..P5 + * 0-9: Count of events needing PMC1..PMC5 + */ + +static int power7_get_constraint(u64 event, u64 *maskp, u64 *valp) +{ + int pmc, sh; + u64 mask = 0, value = 0; + + pmc = (event >> PM_PMC_SH) & PM_PMC_MSK; + if (pmc) { + if (pmc > 6) + return -1; + sh = (pmc - 1) * 2; + mask |= 2 << sh; + value |= 1 << sh; + if (pmc >= 5 && !(event == 0x500fa || event == 0x600f4)) + return -1; + } + if (pmc < 5) { + /* need a counter from PMC1-4 set */ + mask |= 0x8000; + value |= 0x1000; + } + *maskp = mask; + *valp = value; + return 0; +} + +#define MAX_ALT 2 /* at most 2 alternatives for any event */ + +static const unsigned int event_alternatives[][MAX_ALT] = { + { 0x200f2, 0x300f2 }, /* PM_INST_DISP */ + { 0x200f4, 0x600f4 }, /* PM_RUN_CYC */ + { 0x400fa, 0x500fa }, /* PM_RUN_INST_CMPL */ +}; + +/* + * Scan the alternatives table for a match and return the + * index into the alternatives table if found, else -1. + */ +static int find_alternative(u64 event) +{ + int i, j; + + for (i = 0; i < ARRAY_SIZE(event_alternatives); ++i) { + if (event < event_alternatives[i][0]) + break; + for (j = 0; j < MAX_ALT && event_alternatives[i][j]; ++j) + if (event == event_alternatives[i][j]) + return i; + } + return -1; +} + +static s64 find_alternative_decode(u64 event) +{ + int pmc, psel; + + /* this only handles the 4x decode events */ + pmc = (event >> PM_PMC_SH) & PM_PMC_MSK; + psel = event & PM_PMCSEL_MSK; + if ((pmc == 2 || pmc == 4) && (psel & ~7) == 0x40) + return event - (1 << PM_PMC_SH) + 8; + if ((pmc == 1 || pmc == 3) && (psel & ~7) == 0x48) + return event + (1 << PM_PMC_SH) - 8; + return -1; +} + +static int power7_get_alternatives(u64 event, unsigned int flags, u64 alt[]) +{ + int i, j, nalt = 1; + s64 ae; + + alt[0] = event; + nalt = 1; + i = find_alternative(event); + if (i >= 0) { + for (j = 0; j < MAX_ALT; ++j) { + ae = event_alternatives[i][j]; + if (ae && ae != event) + alt[nalt++] = ae; + } + } else { + ae = find_alternative_decode(event); + if (ae > 0) + alt[nalt++] = ae; + } + + if (flags & PPMU_ONLY_COUNT_RUN) { + /* + * We're only counting in RUN state, + * so PM_CYC is equivalent to PM_RUN_CYC + * and PM_INST_CMPL === PM_RUN_INST_CMPL. + * This doesn't include alternatives that don't provide + * any extra flexibility in assigning PMCs. + */ + j = nalt; + for (i = 0; i < nalt; ++i) { + switch (alt[i]) { + case 0x1e: /* PM_CYC */ + alt[j++] = 0x600f4; /* PM_RUN_CYC */ + break; + case 0x600f4: /* PM_RUN_CYC */ + alt[j++] = 0x1e; + break; + case 0x2: /* PM_PPC_CMPL */ + alt[j++] = 0x500fa; /* PM_RUN_INST_CMPL */ + break; + case 0x500fa: /* PM_RUN_INST_CMPL */ + alt[j++] = 0x2; /* PM_PPC_CMPL */ + break; + } + } + nalt = j; + } + + return nalt; +} + +/* + * Returns 1 if event counts things relating to marked instructions + * and thus needs the MMCRA_SAMPLE_ENABLE bit set, or 0 if not. + */ +static int power7_marked_instr_event(u64 event) +{ + int pmc, psel; + int unit; + + pmc = (event >> PM_PMC_SH) & PM_PMC_MSK; + unit = (event >> PM_UNIT_SH) & PM_UNIT_MSK; + psel = event & PM_PMCSEL_MSK & ~1; /* trim off edge/level bit */ + if (pmc >= 5) + return 0; + + switch (psel >> 4) { + case 2: + return pmc == 2 || pmc == 4; + case 3: + if (psel == 0x3c) + return pmc == 1; + if (psel == 0x3e) + return pmc != 2; + return 1; + case 4: + case 5: + return unit == 0xd; + case 6: + if (psel == 0x64) + return pmc >= 3; + case 8: + return unit == 0xd; + } + return 0; +} + +static int power7_compute_mmcr(u64 event[], int n_ev, + unsigned int hwc[], u64 mmcr[]) +{ + u64 mmcr1 = 0; + u64 mmcra = 0; + unsigned int pmc, unit, combine, l2sel, psel; + unsigned int pmc_inuse = 0; + int i; + + /* First pass to count resource use */ + for (i = 0; i < n_ev; ++i) { + pmc = (event[i] >> PM_PMC_SH) & PM_PMC_MSK; + if (pmc) { + if (pmc > 6) + return -1; + if (pmc_inuse & (1 << (pmc - 1))) + return -1; + pmc_inuse |= 1 << (pmc - 1); + } + } + + /* Second pass: assign PMCs, set all MMCR1 fields */ + for (i = 0; i < n_ev; ++i) { + pmc = (event[i] >> PM_PMC_SH) & PM_PMC_MSK; + unit = (event[i] >> PM_UNIT_SH) & PM_UNIT_MSK; + combine = (event[i] >> PM_COMBINE_SH) & PM_COMBINE_MSK; + l2sel = (event[i] >> PM_L2SEL_SH) & PM_L2SEL_MSK; + psel = event[i] & PM_PMCSEL_MSK; + if (!pmc) { + /* Bus event or any-PMC direct event */ + for (pmc = 0; pmc < 4; ++pmc) { + if (!(pmc_inuse & (1 << pmc))) + break; + } + if (pmc >= 4) + return -1; + pmc_inuse |= 1 << pmc; + } else { + /* Direct or decoded event */ + --pmc; + } + if (pmc <= 3) { + mmcr1 |= (u64) unit << (MMCR1_TTM0SEL_SH - 4 * pmc); + mmcr1 |= (u64) combine << (MMCR1_PMC1_COMBINE_SH - pmc); + mmcr1 |= psel << MMCR1_PMCSEL_SH(pmc); + if (unit == 6) /* L2 events */ + mmcr1 |= (u64) l2sel << MMCR1_L2SEL_SH; + } + if (power7_marked_instr_event(event[i])) + mmcra |= MMCRA_SAMPLE_ENABLE; + hwc[i] = pmc; + } + + /* Return MMCRx values */ + mmcr[0] = 0; + if (pmc_inuse & 1) + mmcr[0] = MMCR0_PMC1CE; + if (pmc_inuse & 0x3e) + mmcr[0] |= MMCR0_PMCjCE; + mmcr[1] = mmcr1; + mmcr[2] = mmcra; + return 0; +} + +static void power7_disable_pmc(unsigned int pmc, u64 mmcr[]) +{ + if (pmc <= 3) + mmcr[1] &= ~(0xffULL << MMCR1_PMCSEL_SH(pmc)); +} + +static int power7_generic_events[] = { + [PERF_COUNT_CPU_CYCLES] = 0x1e, + [PERF_COUNT_INSTRUCTIONS] = 2, + [PERF_COUNT_CACHE_REFERENCES] = 0xc880, /* LD_REF_L1_LSU */ + [PERF_COUNT_CACHE_MISSES] = 0x400f0, /* LD_MISS_L1 */ + [PERF_COUNT_BRANCH_INSTRUCTIONS] = 0x10068, /* BRU_FIN */ + [PERF_COUNT_BRANCH_MISSES] = 0x400f6, /* BR_MPRED */ +}; + +struct power_pmu power7_pmu = { + .n_counter = 6, + .max_alternatives = MAX_ALT + 1, + .add_fields = 0x1555ull, + .test_adder = 0x3000ull, + .compute_mmcr = power7_compute_mmcr, + .get_constraint = power7_get_constraint, + .get_alternatives = power7_get_alternatives, + .disable_pmc = power7_disable_pmc, + .n_generic = ARRAY_SIZE(power7_generic_events), + .generic_events = power7_generic_events, +}; -- cgit v1.2.3 From 106b506c3a8b74daa5751e83ed3e46438fcf9a52 Mon Sep 17 00:00:00 2001 From: Paul Mackerras Date: Thu, 11 Jun 2009 14:55:42 +1000 Subject: perf_counter: powerpc: Implement generalized cache events for POWER processors This adds tables of event codes for the generalized cache events for all the currently supported powerpc processors: POWER{4,5,5+,6,7} and PPC970*, plus powerpc-specific code to use these tables when a generalized cache event is requested. Signed-off-by: Paul Mackerras Cc: Peter Zijlstra LKML-Reference: <18992.36430.933526.742969@drongo.ozlabs.ibm.com> Signed-off-by: Ingo Molnar --- arch/powerpc/include/asm/perf_counter.h | 3 +++ arch/powerpc/kernel/perf_counter.c | 42 ++++++++++++++++++++++++++++-- arch/powerpc/kernel/power4-pmu.c | 41 +++++++++++++++++++++++++++++ arch/powerpc/kernel/power5+-pmu.c | 45 ++++++++++++++++++++++++++++++-- arch/powerpc/kernel/power5-pmu.c | 41 +++++++++++++++++++++++++++++ arch/powerpc/kernel/power6-pmu.c | 46 +++++++++++++++++++++++++++++++-- arch/powerpc/kernel/power7-pmu.c | 41 +++++++++++++++++++++++++++++ arch/powerpc/kernel/ppc970-pmu.c | 41 +++++++++++++++++++++++++++++ 8 files changed, 294 insertions(+), 6 deletions(-) diff --git a/arch/powerpc/include/asm/perf_counter.h b/arch/powerpc/include/asm/perf_counter.h index 1c60f0ca792..cc7c887705b 100644 --- a/arch/powerpc/include/asm/perf_counter.h +++ b/arch/powerpc/include/asm/perf_counter.h @@ -33,6 +33,9 @@ struct power_pmu { u32 flags; int n_generic; int *generic_events; + int (*cache_events)[PERF_COUNT_HW_CACHE_MAX] + [PERF_COUNT_HW_CACHE_OP_MAX] + [PERF_COUNT_HW_CACHE_RESULT_MAX]; }; extern struct power_pmu *ppmu; diff --git a/arch/powerpc/kernel/perf_counter.c b/arch/powerpc/kernel/perf_counter.c index 5d12e68aac1..bb202388170 100644 --- a/arch/powerpc/kernel/perf_counter.c +++ b/arch/powerpc/kernel/perf_counter.c @@ -856,6 +856,36 @@ static void hw_perf_counter_destroy(struct perf_counter *counter) } } +/* + * Translate a generic cache event config to a raw event code. + */ +static int hw_perf_cache_event(u64 config, u64 *eventp) +{ + unsigned long type, op, result; + int ev; + + if (!ppmu->cache_events) + return -EINVAL; + + /* unpack config */ + type = config & 0xff; + op = (config >> 8) & 0xff; + result = (config >> 16) & 0xff; + + if (type >= PERF_COUNT_HW_CACHE_MAX || + op >= PERF_COUNT_HW_CACHE_OP_MAX || + result >= PERF_COUNT_HW_CACHE_RESULT_MAX) + return -EINVAL; + + ev = (*ppmu->cache_events)[type][op][result]; + if (ev == 0) + return -EOPNOTSUPP; + if (ev == -1) + return -EINVAL; + *eventp = ev; + return 0; +} + const struct pmu *hw_perf_counter_init(struct perf_counter *counter) { u64 ev; @@ -868,13 +898,21 @@ const struct pmu *hw_perf_counter_init(struct perf_counter *counter) if (!ppmu) return ERR_PTR(-ENXIO); - if (counter->attr.type != PERF_TYPE_RAW) { + switch (counter->attr.type) { + case PERF_TYPE_HARDWARE: ev = counter->attr.config; if (ev >= ppmu->n_generic || ppmu->generic_events[ev] == 0) return ERR_PTR(-EOPNOTSUPP); ev = ppmu->generic_events[ev]; - } else { + break; + case PERF_TYPE_HW_CACHE: + err = hw_perf_cache_event(counter->attr.config, &ev); + if (err) + return ERR_PTR(err); + break; + case PERF_TYPE_RAW: ev = counter->attr.config; + break; } counter->hw.config_base = ev; counter->hw.idx = 0; diff --git a/arch/powerpc/kernel/power4-pmu.c b/arch/powerpc/kernel/power4-pmu.c index 836fa118eb1..0e94b685722 100644 --- a/arch/powerpc/kernel/power4-pmu.c +++ b/arch/powerpc/kernel/power4-pmu.c @@ -543,6 +543,46 @@ static int p4_generic_events[] = { [PERF_COUNT_BRANCH_MISSES] = 0x331, /* PM_BR_MPRED_CR */ }; +#define C(x) PERF_COUNT_HW_CACHE_##x + +/* + * Table of generalized cache-related events. + * 0 means not supported, -1 means nonsensical, other values + * are event codes. + */ +static int power4_cache_events[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = { + [C(L1D)] = { /* RESULT_ACCESS RESULT_MISS */ + [C(OP_READ)] = { 0x8c10, 0x3c10 }, + [C(OP_WRITE)] = { 0x7c10, 0xc13 }, + [C(OP_PREFETCH)] = { 0xc35, 0 }, + }, + [C(L1I)] = { /* RESULT_ACCESS RESULT_MISS */ + [C(OP_READ)] = { 0, 0 }, + [C(OP_WRITE)] = { -1, -1 }, + [C(OP_PREFETCH)] = { 0, 0 }, + }, + [C(L2)] = { /* RESULT_ACCESS RESULT_MISS */ + [C(OP_READ)] = { 0, 0 }, + [C(OP_WRITE)] = { 0, 0 }, + [C(OP_PREFETCH)] = { 0xc34, 0 }, + }, + [C(DTLB)] = { /* RESULT_ACCESS RESULT_MISS */ + [C(OP_READ)] = { 0, 0x904 }, + [C(OP_WRITE)] = { -1, -1 }, + [C(OP_PREFETCH)] = { -1, -1 }, + }, + [C(ITLB)] = { /* RESULT_ACCESS RESULT_MISS */ + [C(OP_READ)] = { 0, 0x900 }, + [C(OP_WRITE)] = { -1, -1 }, + [C(OP_PREFETCH)] = { -1, -1 }, + }, + [C(BPU)] = { /* RESULT_ACCESS RESULT_MISS */ + [C(OP_READ)] = { 0x330, 0x331 }, + [C(OP_WRITE)] = { -1, -1 }, + [C(OP_PREFETCH)] = { -1, -1 }, + }, +}; + struct power_pmu power4_pmu = { .n_counter = 8, .max_alternatives = 5, @@ -554,4 +594,5 @@ struct power_pmu power4_pmu = { .disable_pmc = p4_disable_pmc, .n_generic = ARRAY_SIZE(p4_generic_events), .generic_events = p4_generic_events, + .cache_events = &power4_cache_events, }; diff --git a/arch/powerpc/kernel/power5+-pmu.c b/arch/powerpc/kernel/power5+-pmu.c index 8471e3c2e46..bbf2cbb0738 100644 --- a/arch/powerpc/kernel/power5+-pmu.c +++ b/arch/powerpc/kernel/power5+-pmu.c @@ -614,6 +614,46 @@ static int power5p_generic_events[] = { [PERF_COUNT_BRANCH_MISSES] = 0x230e5, /* BR_MPRED_CR */ }; +#define C(x) PERF_COUNT_HW_CACHE_##x + +/* + * Table of generalized cache-related events. + * 0 means not supported, -1 means nonsensical, other values + * are event codes. + */ +static int power5p_cache_events[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = { + [C(L1D)] = { /* RESULT_ACCESS RESULT_MISS */ + [C(OP_READ)] = { 0x1c10a8, 0x3c1088 }, + [C(OP_WRITE)] = { 0x2c10a8, 0xc10c3 }, + [C(OP_PREFETCH)] = { 0xc70e7, -1 }, + }, + [C(L1I)] = { /* RESULT_ACCESS RESULT_MISS */ + [C(OP_READ)] = { 0, 0 }, + [C(OP_WRITE)] = { -1, -1 }, + [C(OP_PREFETCH)] = { 0, 0 }, + }, + [C(L2)] = { /* RESULT_ACCESS RESULT_MISS */ + [C(OP_READ)] = { 0, 0 }, + [C(OP_WRITE)] = { 0, 0 }, + [C(OP_PREFETCH)] = { 0xc50c3, 0 }, + }, + [C(DTLB)] = { /* RESULT_ACCESS RESULT_MISS */ + [C(OP_READ)] = { 0xc20e4, 0x800c4 }, + [C(OP_WRITE)] = { -1, -1 }, + [C(OP_PREFETCH)] = { -1, -1 }, + }, + [C(ITLB)] = { /* RESULT_ACCESS RESULT_MISS */ + [C(OP_READ)] = { 0, 0x800c0 }, + [C(OP_WRITE)] = { -1, -1 }, + [C(OP_PREFETCH)] = { -1, -1 }, + }, + [C(BPU)] = { /* RESULT_ACCESS RESULT_MISS */ + [C(OP_READ)] = { 0x230e4, 0x230e5 }, + [C(OP_WRITE)] = { -1, -1 }, + [C(OP_PREFETCH)] = { -1, -1 }, + }, +}; + struct power_pmu power5p_pmu = { .n_counter = 6, .max_alternatives = MAX_ALT, @@ -623,8 +663,9 @@ struct power_pmu power5p_pmu = { .get_constraint = power5p_get_constraint, .get_alternatives = power5p_get_alternatives, .disable_pmc = power5p_disable_pmc, + .limited_pmc_event = power5p_limited_pmc_event, + .flags = PPMU_LIMITED_PMC5_6, .n_generic = ARRAY_SIZE(power5p_generic_events), .generic_events = power5p_generic_events, - .flags = PPMU_LIMITED_PMC5_6, - .limited_pmc_event = power5p_limited_pmc_event, + .cache_events = &power5p_cache_events, }; diff --git a/arch/powerpc/kernel/power5-pmu.c b/arch/powerpc/kernel/power5-pmu.c index 1b44c5fca18..670cf10b91e 100644 --- a/arch/powerpc/kernel/power5-pmu.c +++ b/arch/powerpc/kernel/power5-pmu.c @@ -556,6 +556,46 @@ static int power5_generic_events[] = { [PERF_COUNT_BRANCH_MISSES] = 0x230e5, /* BR_MPRED_CR */ }; +#define C(x) PERF_COUNT_HW_CACHE_##x + +/* + * Table of generalized cache-related events. + * 0 means not supported, -1 means nonsensical, other values + * are event codes. + */ +static int power5_cache_events[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = { + [C(L1D)] = { /* RESULT_ACCESS RESULT_MISS */ + [C(OP_READ)] = { 0x4c1090, 0x3c1088 }, + [C(OP_WRITE)] = { 0x3c1090, 0xc10c3 }, + [C(OP_PREFETCH)] = { 0xc70e7, 0 }, + }, + [C(L1I)] = { /* RESULT_ACCESS RESULT_MISS */ + [C(OP_READ)] = { 0, 0 }, + [C(OP_WRITE)] = { -1, -1 }, + [C(OP_PREFETCH)] = { 0, 0 }, + }, + [C(L2)] = { /* RESULT_ACCESS RESULT_MISS */ + [C(OP_READ)] = { 0, 0x3c309b }, + [C(OP_WRITE)] = { 0, 0 }, + [C(OP_PREFETCH)] = { 0xc50c3, 0 }, + }, + [C(DTLB)] = { /* RESULT_ACCESS RESULT_MISS */ + [C(OP_READ)] = { 0x2c4090, 0x800c4 }, + [C(OP_WRITE)] = { -1, -1 }, + [C(OP_PREFETCH)] = { -1, -1 }, + }, + [C(ITLB)] = { /* RESULT_ACCESS RESULT_MISS */ + [C(OP_READ)] = { 0, 0x800c0 }, + [C(OP_WRITE)] = { -1, -1 }, + [C(OP_PREFETCH)] = { -1, -1 }, + }, + [C(BPU)] = { /* RESULT_ACCESS RESULT_MISS */ + [C(OP_READ)] = { 0x230e4, 0x230e5 }, + [C(OP_WRITE)] = { -1, -1 }, + [C(OP_PREFETCH)] = { -1, -1 }, + }, +}; + struct power_pmu power5_pmu = { .n_counter = 6, .max_alternatives = MAX_ALT, @@ -567,4 +607,5 @@ struct power_pmu power5_pmu = { .disable_pmc = power5_disable_pmc, .n_generic = ARRAY_SIZE(power5_generic_events), .generic_events = power5_generic_events, + .cache_events = &power5_cache_events, }; diff --git a/arch/powerpc/kernel/power6-pmu.c b/arch/powerpc/kernel/power6-pmu.c index cd4fbe06c35..4da70786609 100644 --- a/arch/powerpc/kernel/power6-pmu.c +++ b/arch/powerpc/kernel/power6-pmu.c @@ -474,6 +474,47 @@ static int power6_generic_events[] = { [PERF_COUNT_BRANCH_MISSES] = 0x400052, /* BR_MPRED */ }; +#define C(x) PERF_COUNT_HW_CACHE_##x + +/* + * Table of generalized cache-related events. + * 0 means not supported, -1 means nonsensical, other values + * are event codes. + * The "DTLB" and "ITLB" events relate to the DERAT and IERAT. + */ +static int power6_cache_events[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = { + [C(L1D)] = { /* RESULT_ACCESS RESULT_MISS */ + [C(OP_READ)] = { 0x80082, 0x80080 }, + [C(OP_WRITE)] = { 0x80086, 0x80088 }, + [C(OP_PREFETCH)] = { 0x810a4, 0 }, + }, + [C(L1I)] = { /* RESULT_ACCESS RESULT_MISS */ + [C(OP_READ)] = { 0, 0x100056 }, + [C(OP_WRITE)] = { -1, -1 }, + [C(OP_PREFETCH)] = { 0x4008c, 0 }, + }, + [C(L2)] = { /* RESULT_ACCESS RESULT_MISS */ + [C(OP_READ)] = { 0x150730, 0x250532 }, + [C(OP_WRITE)] = { 0x250432, 0x150432 }, + [C(OP_PREFETCH)] = { 0x810a6, 0 }, + }, + [C(DTLB)] = { /* RESULT_ACCESS RESULT_MISS */ + [C(OP_READ)] = { 0, 0x20000e }, + [C(OP_WRITE)] = { -1, -1 }, + [C(OP_PREFETCH)] = { -1, -1 }, + }, + [C(ITLB)] = { /* RESULT_ACCESS RESULT_MISS */ + [C(OP_READ)] = { 0, 0x420ce }, + [C(OP_WRITE)] = { -1, -1 }, + [C(OP_PREFETCH)] = { -1, -1 }, + }, + [C(BPU)] = { /* RESULT_ACCESS RESULT_MISS */ + [C(OP_READ)] = { 0x430e6, 0x400052 }, + [C(OP_WRITE)] = { -1, -1 }, + [C(OP_PREFETCH)] = { -1, -1 }, + }, +}; + struct power_pmu power6_pmu = { .n_counter = 6, .max_alternatives = MAX_ALT, @@ -483,8 +524,9 @@ struct power_pmu power6_pmu = { .get_constraint = p6_get_constraint, .get_alternatives = p6_get_alternatives, .disable_pmc = p6_disable_pmc, + .limited_pmc_event = p6_limited_pmc_event, + .flags = PPMU_LIMITED_PMC5_6 | PPMU_ALT_SIPR, .n_generic = ARRAY_SIZE(power6_generic_events), .generic_events = power6_generic_events, - .flags = PPMU_LIMITED_PMC5_6 | PPMU_ALT_SIPR, - .limited_pmc_event = p6_limited_pmc_event, + .cache_events = &power6_cache_events, }; diff --git a/arch/powerpc/kernel/power7-pmu.c b/arch/powerpc/kernel/power7-pmu.c index dfac48d8ff4..060e0deb399 100644 --- a/arch/powerpc/kernel/power7-pmu.c +++ b/arch/powerpc/kernel/power7-pmu.c @@ -302,6 +302,46 @@ static int power7_generic_events[] = { [PERF_COUNT_BRANCH_MISSES] = 0x400f6, /* BR_MPRED */ }; +#define C(x) PERF_COUNT_HW_CACHE_##x + +/* + * Table of generalized cache-related events. + * 0 means not supported, -1 means nonsensical, other values + * are event codes. + */ +static int power7_cache_events[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = { + [C(L1D)] = { /* RESULT_ACCESS RESULT_MISS */ + [C(OP_READ)] = { 0x400f0, 0xc880 }, + [C(OP_WRITE)] = { 0, 0x300f0 }, + [C(OP_PREFETCH)] = { 0xd8b8, 0 }, + }, + [C(L1I)] = { /* RESULT_ACCESS RESULT_MISS */ + [C(OP_READ)] = { 0, 0x200fc }, + [C(OP_WRITE)] = { -1, -1 }, + [C(OP_PREFETCH)] = { 0x408a, 0 }, + }, + [C(L2)] = { /* RESULT_ACCESS RESULT_MISS */ + [C(OP_READ)] = { 0x6080, 0x6084 }, + [C(OP_WRITE)] = { 0x6082, 0x6086 }, + [C(OP_PREFETCH)] = { 0, 0 }, + }, + [C(DTLB)] = { /* RESULT_ACCESS RESULT_MISS */ + [C(OP_READ)] = { 0, 0x300fc }, + [C(OP_WRITE)] = { -1, -1 }, + [C(OP_PREFETCH)] = { -1, -1 }, + }, + [C(ITLB)] = { /* RESULT_ACCESS RESULT_MISS */ + [C(OP_READ)] = { 0, 0x400fc }, + [C(OP_WRITE)] = { -1, -1 }, + [C(OP_PREFETCH)] = { -1, -1 }, + }, + [C(BPU)] = { /* RESULT_ACCESS RESULT_MISS */ + [C(OP_READ)] = { 0x10068, 0x400f6 }, + [C(OP_WRITE)] = { -1, -1 }, + [C(OP_PREFETCH)] = { -1, -1 }, + }, +}; + struct power_pmu power7_pmu = { .n_counter = 6, .max_alternatives = MAX_ALT + 1, @@ -313,4 +353,5 @@ struct power_pmu power7_pmu = { .disable_pmc = power7_disable_pmc, .n_generic = ARRAY_SIZE(power7_generic_events), .generic_events = power7_generic_events, + .cache_events = &power7_cache_events, }; diff --git a/arch/powerpc/kernel/ppc970-pmu.c b/arch/powerpc/kernel/ppc970-pmu.c index eed47c4523f..336adf1736a 100644 --- a/arch/powerpc/kernel/ppc970-pmu.c +++ b/arch/powerpc/kernel/ppc970-pmu.c @@ -427,6 +427,46 @@ static int ppc970_generic_events[] = { [PERF_COUNT_BRANCH_MISSES] = 0x327, /* PM_GRP_BR_MPRED */ }; +#define C(x) PERF_COUNT_HW_CACHE_##x + +/* + * Table of generalized cache-related events. + * 0 means not supported, -1 means nonsensical, other values + * are event codes. + */ +static int ppc970_cache_events[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = { + [C(L1D)] = { /* RESULT_ACCESS RESULT_MISS */ + [C(OP_READ)] = { 0x8810, 0x3810 }, + [C(OP_WRITE)] = { 0x7810, 0x813 }, + [C(OP_PREFETCH)] = { 0x731, 0 }, + }, + [C(L1I)] = { /* RESULT_ACCESS RESULT_MISS */ + [C(OP_READ)] = { 0, 0 }, + [C(OP_WRITE)] = { -1, -1 }, + [C(OP_PREFETCH)] = { 0, 0 }, + }, + [C(L2)] = { /* RESULT_ACCESS RESULT_MISS */ + [C(OP_READ)] = { 0, 0 }, + [C(OP_WRITE)] = { 0, 0 }, + [C(OP_PREFETCH)] = { 0x733, 0 }, + }, + [C(DTLB)] = { /* RESULT_ACCESS RESULT_MISS */ + [C(OP_READ)] = { 0, 0x704 }, + [C(OP_WRITE)] = { -1, -1 }, + [C(OP_PREFETCH)] = { -1, -1 }, + }, + [C(ITLB)] = { /* RESULT_ACCESS RESULT_MISS */ + [C(OP_READ)] = { 0, 0x700 }, + [C(OP_WRITE)] = { -1, -1 }, + [C(OP_PREFETCH)] = { -1, -1 }, + }, + [C(BPU)] = { /* RESULT_ACCESS RESULT_MISS */ + [C(OP_READ)] = { 0x431, 0x327 }, + [C(OP_WRITE)] = { -1, -1 }, + [C(OP_PREFETCH)] = { -1, -1 }, + }, +}; + struct power_pmu ppc970_pmu = { .n_counter = 8, .max_alternatives = 2, @@ -438,4 +478,5 @@ struct power_pmu ppc970_pmu = { .disable_pmc = p970_disable_pmc, .n_generic = ARRAY_SIZE(ppc970_generic_events), .generic_events = ppc970_generic_events, + .cache_events = &ppc970_cache_events, }; -- cgit v1.2.3 From 0764771dab80d7b84b9a271bee7f1b21a04a3f0c Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Thu, 11 Jun 2009 11:18:36 +0200 Subject: perf_counter: More paranoia settings Rename the perf_counter_priv knob to perf_counter_paranoia (because priv can be read as private, as opposed to privileged) and provide one more level: 0 - permissive 1 - restrict cpu counters to privilidged contexts 2 - restrict kernel-mode code counting and profiling Signed-off-by: Peter Zijlstra Cc: Mike Galbraith Cc: Paul Mackerras Cc: Arnaldo Carvalho de Melo Signed-off-by: Ingo Molnar --- include/linux/perf_counter.h | 2 +- kernel/perf_counter.c | 25 +++++++++++++++++++++++-- kernel/sysctl.c | 6 +++--- 3 files changed, 27 insertions(+), 6 deletions(-) diff --git a/include/linux/perf_counter.h b/include/linux/perf_counter.h index 5b966472b45..386be915baa 100644 --- a/include/linux/perf_counter.h +++ b/include/linux/perf_counter.h @@ -648,7 +648,7 @@ struct perf_callchain_entry { extern struct perf_callchain_entry *perf_callchain(struct pt_regs *regs); -extern int sysctl_perf_counter_priv; +extern int sysctl_perf_counter_paranoid; extern int sysctl_perf_counter_mlock; extern int sysctl_perf_counter_limit; diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c index 8b89b40bd0f..63f1987c1c1 100644 --- a/kernel/perf_counter.c +++ b/kernel/perf_counter.c @@ -43,7 +43,23 @@ static atomic_t nr_counters __read_mostly; static atomic_t nr_mmap_counters __read_mostly; static atomic_t nr_comm_counters __read_mostly; -int sysctl_perf_counter_priv __read_mostly; /* do we need to be privileged */ +/* + * 0 - not paranoid + * 1 - disallow cpu counters to unpriv + * 2 - disallow kernel profiling to unpriv + */ +int sysctl_perf_counter_paranoid __read_mostly; /* do we need to be privileged */ + +static inline bool perf_paranoid_cpu(void) +{ + return sysctl_perf_counter_paranoid > 0; +} + +static inline bool perf_paranoid_kernel(void) +{ + return sysctl_perf_counter_paranoid > 1; +} + int sysctl_perf_counter_mlock __read_mostly = 512; /* 'free' kb per user */ int sysctl_perf_counter_limit __read_mostly = 100000; /* max NMIs per second */ @@ -1385,7 +1401,7 @@ static struct perf_counter_context *find_get_context(pid_t pid, int cpu) */ if (cpu != -1) { /* Must be root to operate on a CPU counter: */ - if (sysctl_perf_counter_priv && !capable(CAP_SYS_ADMIN)) + if (perf_paranoid_cpu() && !capable(CAP_SYS_ADMIN)) return ERR_PTR(-EACCES); if (cpu < 0 || cpu > num_possible_cpus()) @@ -3618,6 +3634,11 @@ SYSCALL_DEFINE5(perf_counter_open, if (copy_from_user(&attr, attr_uptr, sizeof(attr)) != 0) return -EFAULT; + if (!attr.exclude_kernel) { + if (perf_paranoid_kernel() && !capable(CAP_SYS_ADMIN)) + return -EACCES; + } + /* * Get the target context (task or percpu): */ diff --git a/kernel/sysctl.c b/kernel/sysctl.c index 0c4bf863afa..344a65981de 100644 --- a/kernel/sysctl.c +++ b/kernel/sysctl.c @@ -916,9 +916,9 @@ static struct ctl_table kern_table[] = { #ifdef CONFIG_PERF_COUNTERS { .ctl_name = CTL_UNNUMBERED, - .procname = "perf_counter_privileged", - .data = &sysctl_perf_counter_priv, - .maxlen = sizeof(sysctl_perf_counter_priv), + .procname = "perf_counter_paranoid", + .data = &sysctl_perf_counter_paranoid, + .maxlen = sizeof(sysctl_perf_counter_paranoid), .mode = 0644, .proc_handler = &proc_dointvec, }, -- cgit v1.2.3 From df58ab24bf26b166874bfb18b3b5a2e0a8e63179 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Thu, 11 Jun 2009 11:25:05 +0200 Subject: perf_counter: Rename perf_counter_limit sysctl Rename perf_counter_limit to perf_counter_max_sample_rate and prohibit creation of counters with a known higher sample frequency. Signed-off-by: Peter Zijlstra Cc: Peter Zijlstra Cc: Mike Galbraith Cc: Paul Mackerras Cc: Arnaldo Carvalho de Melo Signed-off-by: Ingo Molnar --- include/linux/perf_counter.h | 2 +- kernel/perf_counter.c | 27 +++++++++++++++++++-------- kernel/sysctl.c | 6 +++--- 3 files changed, 23 insertions(+), 12 deletions(-) diff --git a/include/linux/perf_counter.h b/include/linux/perf_counter.h index 386be915baa..95c797c480e 100644 --- a/include/linux/perf_counter.h +++ b/include/linux/perf_counter.h @@ -650,7 +650,7 @@ extern struct perf_callchain_entry *perf_callchain(struct pt_regs *regs); extern int sysctl_perf_counter_paranoid; extern int sysctl_perf_counter_mlock; -extern int sysctl_perf_counter_limit; +extern int sysctl_perf_counter_sample_rate; extern void perf_counter_init(void); diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c index 63f1987c1c1..3b2829de559 100644 --- a/kernel/perf_counter.c +++ b/kernel/perf_counter.c @@ -44,11 +44,12 @@ static atomic_t nr_mmap_counters __read_mostly; static atomic_t nr_comm_counters __read_mostly; /* - * 0 - not paranoid - * 1 - disallow cpu counters to unpriv - * 2 - disallow kernel profiling to unpriv + * perf counter paranoia level: + * 0 - not paranoid + * 1 - disallow cpu counters to unpriv + * 2 - disallow kernel profiling to unpriv */ -int sysctl_perf_counter_paranoid __read_mostly; /* do we need to be privileged */ +int sysctl_perf_counter_paranoid __read_mostly; static inline bool perf_paranoid_cpu(void) { @@ -61,7 +62,11 @@ static inline bool perf_paranoid_kernel(void) } int sysctl_perf_counter_mlock __read_mostly = 512; /* 'free' kb per user */ -int sysctl_perf_counter_limit __read_mostly = 100000; /* max NMIs per second */ + +/* + * max perf counter sample rate + */ +int sysctl_perf_counter_sample_rate __read_mostly = 100000; static atomic64_t perf_counter_id; @@ -1244,7 +1249,7 @@ static void perf_ctx_adjust_freq(struct perf_counter_context *ctx) if (interrupts == MAX_INTERRUPTS) { perf_log_throttle(counter, 1); counter->pmu->unthrottle(counter); - interrupts = 2*sysctl_perf_counter_limit/HZ; + interrupts = 2*sysctl_perf_counter_sample_rate/HZ; } if (!counter->attr.freq || !counter->attr.sample_freq) @@ -1682,7 +1687,7 @@ static int perf_counter_period(struct perf_counter *counter, u64 __user *arg) spin_lock_irq(&ctx->lock); if (counter->attr.freq) { - if (value > sysctl_perf_counter_limit) { + if (value > sysctl_perf_counter_sample_rate) { ret = -EINVAL; goto unlock; } @@ -2979,7 +2984,8 @@ int perf_counter_overflow(struct perf_counter *counter, int nmi, } else { if (hwc->interrupts != MAX_INTERRUPTS) { hwc->interrupts++; - if (HZ * hwc->interrupts > (u64)sysctl_perf_counter_limit) { + if (HZ * hwc->interrupts > + (u64)sysctl_perf_counter_sample_rate) { hwc->interrupts = MAX_INTERRUPTS; perf_log_throttle(counter, 0); ret = 1; @@ -3639,6 +3645,11 @@ SYSCALL_DEFINE5(perf_counter_open, return -EACCES; } + if (attr.freq) { + if (attr.sample_freq > sysctl_perf_counter_sample_rate) + return -EINVAL; + } + /* * Get the target context (task or percpu): */ diff --git a/kernel/sysctl.c b/kernel/sysctl.c index 344a65981de..9fd4e436b69 100644 --- a/kernel/sysctl.c +++ b/kernel/sysctl.c @@ -932,9 +932,9 @@ static struct ctl_table kern_table[] = { }, { .ctl_name = CTL_UNNUMBERED, - .procname = "perf_counter_int_limit", - .data = &sysctl_perf_counter_limit, - .maxlen = sizeof(sysctl_perf_counter_limit), + .procname = "perf_counter_max_sample_rate", + .data = &sysctl_perf_counter_sample_rate, + .maxlen = sizeof(sysctl_perf_counter_sample_rate), .mode = 0644, .proc_handler = &proc_dointvec, }, -- cgit v1.2.3 From 729ff5e2aaf181f5d3ab849337fce406cd19b1d9 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Thu, 11 Jun 2009 14:16:15 +0200 Subject: perf_counter tools: Clean up u64 usage MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit A build error slipped in: builtin-report.c: In function ‘hist_entry__fprintf’: builtin-report.c:711: error: format ‘%12d’ expects type ‘int’, but argument 3 has type ‘uint64_t’ Because we got a bit sloppy with those types. uint64_t really sucks, because there's no printf format for it. So standardize on __u64 instead - for all types that go to or come from the ABI (which is __u64), or for values that need to be large enough even on 32-bit. Cc: Peter Zijlstra Cc: Mike Galbraith Cc: Paul Mackerras Cc: Arnaldo Carvalho de Melo LKML-Reference: Signed-off-by: Ingo Molnar --- tools/perf/builtin-annotate.c | 32 ++++++++++++++++---------------- tools/perf/builtin-record.c | 4 ++-- tools/perf/builtin-report.c | 36 ++++++++++++++++++------------------ tools/perf/builtin-top.c | 8 ++++---- tools/perf/util/symbol.c | 10 +++++----- tools/perf/util/symbol.h | 4 ++-- 6 files changed, 47 insertions(+), 47 deletions(-) diff --git a/tools/perf/builtin-annotate.c b/tools/perf/builtin-annotate.c index 3334a8bb1d5..b1ed5f766cb 100644 --- a/tools/perf/builtin-annotate.c +++ b/tools/perf/builtin-annotate.c @@ -145,7 +145,7 @@ static void dsos__fprintf(FILE *fp) dso__fprintf(pos, fp); } -static struct symbol *vdso__find_symbol(struct dso *dso, uint64_t ip) +static struct symbol *vdso__find_symbol(struct dso *dso, __u64 ip) { return dso__find_symbol(kernel_dso, ip); } @@ -178,19 +178,19 @@ static int load_kernel(void) struct map { struct list_head node; - uint64_t start; - uint64_t end; - uint64_t pgoff; - uint64_t (*map_ip)(struct map *, uint64_t); + __u64 start; + __u64 end; + __u64 pgoff; + __u64 (*map_ip)(struct map *, __u64); struct dso *dso; }; -static uint64_t map__map_ip(struct map *map, uint64_t ip) +static __u64 map__map_ip(struct map *map, __u64 ip) { return ip - map->start + map->pgoff; } -static uint64_t vdso__map_ip(struct map *map, uint64_t ip) +static __u64 vdso__map_ip(struct map *map, __u64 ip) { return ip; } @@ -249,7 +249,7 @@ static int map__overlap(struct map *l, struct map *r) static size_t map__fprintf(struct map *self, FILE *fp) { - return fprintf(fp, " %"PRIx64"-%"PRIx64" %"PRIx64" %s\n", + return fprintf(fp, " %Lx-%Lx %Lx %s\n", self->start, self->end, self->pgoff, self->dso->name); } @@ -373,7 +373,7 @@ static int thread__fork(struct thread *self, struct thread *parent) return 0; } -static struct map *thread__find_map(struct thread *self, uint64_t ip) +static struct map *thread__find_map(struct thread *self, __u64 ip) { struct map *pos; @@ -414,7 +414,7 @@ struct hist_entry { struct map *map; struct dso *dso; struct symbol *sym; - uint64_t ip; + __u64 ip; char level; uint32_t count; @@ -533,7 +533,7 @@ static struct sort_entry sort_dso = { static int64_t sort__sym_cmp(struct hist_entry *left, struct hist_entry *right) { - uint64_t ip_l, ip_r; + __u64 ip_l, ip_r; if (left->sym == right->sym) return 0; @@ -647,7 +647,7 @@ hist_entry__collapse(struct hist_entry *left, struct hist_entry *right) /* * collect histogram counts */ -static void hist_hit(struct hist_entry *he, uint64_t ip) +static void hist_hit(struct hist_entry *he, __u64 ip) { unsigned int sym_size, offset; struct symbol *sym = he->sym; @@ -676,7 +676,7 @@ static void hist_hit(struct hist_entry *he, uint64_t ip) static int hist_entry__add(struct thread *thread, struct map *map, struct dso *dso, - struct symbol *sym, uint64_t ip, char level) + struct symbol *sym, __u64 ip, char level) { struct rb_node **p = &hist.rb_node; struct rb_node *parent = NULL; @@ -848,7 +848,7 @@ process_overflow_event(event_t *event, unsigned long offset, unsigned long head) int show = 0; struct dso *dso = NULL; struct thread *thread = threads__findnew(event->ip.pid); - uint64_t ip = event->ip.ip; + __u64 ip = event->ip.ip; struct map *map = NULL; dprintf("%p [%p]: PERF_EVENT (IP, %d): %d: %p\n", @@ -1031,7 +1031,7 @@ process_event(event_t *event, unsigned long offset, unsigned long head) } static int -parse_line(FILE *file, struct symbol *sym, uint64_t start, uint64_t len) +parse_line(FILE *file, struct symbol *sym, __u64 start, __u64 len) { char *line = NULL, *tmp, *tmp2; unsigned int offset; @@ -1112,7 +1112,7 @@ parse_line(FILE *file, struct symbol *sym, uint64_t start, uint64_t len) static void annotate_sym(struct dso *dso, struct symbol *sym) { char *filename = dso->name; - uint64_t start, end, len; + __u64 start, end, len; char command[PATH_MAX*2]; FILE *file; diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c index 919f23ca419..84cd336ae79 100644 --- a/tools/perf/builtin-record.c +++ b/tools/perf/builtin-record.c @@ -223,7 +223,7 @@ static void pid_synthesize_comm_event(pid_t pid, int full) comm_ev.pid = pid; comm_ev.header.type = PERF_EVENT_COMM; - size = ALIGN(size, sizeof(uint64_t)); + size = ALIGN(size, sizeof(__u64)); comm_ev.header.size = sizeof(comm_ev) - (sizeof(comm_ev.comm) - size); if (!full) { @@ -304,7 +304,7 @@ static void pid_synthesize_mmap_samples(pid_t pid) size = strlen(execname); execname[size - 1] = '\0'; /* Remove \n */ memcpy(mmap_ev.filename, execname, size); - size = ALIGN(size, sizeof(uint64_t)); + size = ALIGN(size, sizeof(__u64)); mmap_ev.len -= mmap_ev.start; mmap_ev.header.size = (sizeof(mmap_ev) - (sizeof(mmap_ev.filename) - size)); diff --git a/tools/perf/builtin-report.c b/tools/perf/builtin-report.c index f57fd5c5531..82fa93b4db9 100644 --- a/tools/perf/builtin-report.c +++ b/tools/perf/builtin-report.c @@ -146,7 +146,7 @@ static void dsos__fprintf(FILE *fp) dso__fprintf(pos, fp); } -static struct symbol *vdso__find_symbol(struct dso *dso, uint64_t ip) +static struct symbol *vdso__find_symbol(struct dso *dso, __u64 ip) { return dso__find_symbol(kernel_dso, ip); } @@ -193,19 +193,19 @@ static int strcommon(const char *pathname) struct map { struct list_head node; - uint64_t start; - uint64_t end; - uint64_t pgoff; - uint64_t (*map_ip)(struct map *, uint64_t); + __u64 start; + __u64 end; + __u64 pgoff; + __u64 (*map_ip)(struct map *, __u64); struct dso *dso; }; -static uint64_t map__map_ip(struct map *map, uint64_t ip) +static __u64 map__map_ip(struct map *map, __u64 ip) { return ip - map->start + map->pgoff; } -static uint64_t vdso__map_ip(struct map *map, uint64_t ip) +static __u64 vdso__map_ip(struct map *map, __u64 ip) { return ip; } @@ -288,7 +288,7 @@ static int map__overlap(struct map *l, struct map *r) static size_t map__fprintf(struct map *self, FILE *fp) { - return fprintf(fp, " %"PRIx64"-%"PRIx64" %"PRIx64" %s\n", + return fprintf(fp, " %Lx-%Lx %Lx %s\n", self->start, self->end, self->pgoff, self->dso->name); } @@ -412,7 +412,7 @@ static int thread__fork(struct thread *self, struct thread *parent) return 0; } -static struct map *thread__find_map(struct thread *self, uint64_t ip) +static struct map *thread__find_map(struct thread *self, __u64 ip) { struct map *pos; @@ -453,10 +453,10 @@ struct hist_entry { struct map *map; struct dso *dso; struct symbol *sym; - uint64_t ip; + __u64 ip; char level; - uint64_t count; + __u64 count; }; /* @@ -572,7 +572,7 @@ static struct sort_entry sort_dso = { static int64_t sort__sym_cmp(struct hist_entry *left, struct hist_entry *right) { - uint64_t ip_l, ip_r; + __u64 ip_l, ip_r; if (left->sym == right->sym) return 0; @@ -684,7 +684,7 @@ hist_entry__collapse(struct hist_entry *left, struct hist_entry *right) } static size_t -hist_entry__fprintf(FILE *fp, struct hist_entry *self, uint64_t total_samples) +hist_entry__fprintf(FILE *fp, struct hist_entry *self, __u64 total_samples) { struct sort_entry *se; size_t ret; @@ -708,7 +708,7 @@ hist_entry__fprintf(FILE *fp, struct hist_entry *self, uint64_t total_samples) ret = color_fprintf(fp, color, " %6.2f%%", (self->count * 100.0) / total_samples); } else - ret = fprintf(fp, "%12d ", self->count); + ret = fprintf(fp, "%12Ld ", self->count); list_for_each_entry(se, &hist_entry__sort_list, list) { fprintf(fp, " "); @@ -726,7 +726,7 @@ hist_entry__fprintf(FILE *fp, struct hist_entry *self, uint64_t total_samples) static int hist_entry__add(struct thread *thread, struct map *map, struct dso *dso, - struct symbol *sym, uint64_t ip, char level, uint64_t count) + struct symbol *sym, __u64 ip, char level, __u64 count) { struct rb_node **p = &hist.rb_node; struct rb_node *parent = NULL; @@ -873,7 +873,7 @@ static void output__resort(void) } } -static size_t output__fprintf(FILE *fp, uint64_t total_samples) +static size_t output__fprintf(FILE *fp, __u64 total_samples) { struct hist_entry *pos; struct sort_entry *se; @@ -941,8 +941,8 @@ process_overflow_event(event_t *event, unsigned long offset, unsigned long head) int show = 0; struct dso *dso = NULL; struct thread *thread = threads__findnew(event->ip.pid); - uint64_t ip = event->ip.ip; - uint64_t period = 1; + __u64 ip = event->ip.ip; + __u64 period = 1; struct map *map = NULL; if (event->header.type & PERF_SAMPLE_PERIOD) diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c index 8ba24808a39..309dbc76ec8 100644 --- a/tools/perf/builtin-top.c +++ b/tools/perf/builtin-top.c @@ -79,8 +79,8 @@ static int dump_symtab; * Symbols */ -static uint64_t min_ip; -static uint64_t max_ip = -1ll; +static __u64 min_ip; +static __u64 max_ip = -1ll; struct sym_entry { struct rb_node rb_node; @@ -372,7 +372,7 @@ out_delete_dso: /* * Binary search in the histogram table and record the hit: */ -static void record_ip(uint64_t ip, int counter) +static void record_ip(__u64 ip, int counter) { struct symbol *sym = dso__find_symbol(kernel_dso, ip); @@ -392,7 +392,7 @@ static void record_ip(uint64_t ip, int counter) samples--; } -static void process_event(uint64_t ip, int counter) +static void process_event(__u64 ip, int counter) { samples++; diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c index 32dd47d60d9..49a55f81371 100644 --- a/tools/perf/util/symbol.c +++ b/tools/perf/util/symbol.c @@ -9,9 +9,9 @@ const char *sym_hist_filter; -static struct symbol *symbol__new(uint64_t start, uint64_t len, +static struct symbol *symbol__new(__u64 start, __u64 len, const char *name, unsigned int priv_size, - uint64_t obj_start, int verbose) + __u64 obj_start, int verbose) { size_t namelen = strlen(name) + 1; struct symbol *self = calloc(1, priv_size + sizeof(*self) + namelen); @@ -89,7 +89,7 @@ static void dso__insert_symbol(struct dso *self, struct symbol *sym) { struct rb_node **p = &self->syms.rb_node; struct rb_node *parent = NULL; - const uint64_t ip = sym->start; + const __u64 ip = sym->start; struct symbol *s; while (*p != NULL) { @@ -104,7 +104,7 @@ static void dso__insert_symbol(struct dso *self, struct symbol *sym) rb_insert_color(&sym->rb_node, &self->syms); } -struct symbol *dso__find_symbol(struct dso *self, uint64_t ip) +struct symbol *dso__find_symbol(struct dso *self, __u64 ip) { struct rb_node *n; @@ -523,7 +523,7 @@ static int dso__load_sym(struct dso *self, int fd, const char *name, elf_symtab__for_each_symbol(syms, nr_syms, index, sym) { struct symbol *f; - uint64_t obj_start; + __u64 obj_start; if (!elf_sym__is_function(&sym)) continue; diff --git a/tools/perf/util/symbol.h b/tools/perf/util/symbol.h index 4839d68f14f..0d1292bd827 100644 --- a/tools/perf/util/symbol.h +++ b/tools/perf/util/symbol.h @@ -19,7 +19,7 @@ struct dso { struct list_head node; struct rb_root syms; unsigned int sym_priv_size; - struct symbol *(*find_symbol)(struct dso *, uint64_t ip); + struct symbol *(*find_symbol)(struct dso *, __u64 ip); char name[0]; }; @@ -35,7 +35,7 @@ static inline void *dso__sym_priv(struct dso *self, struct symbol *sym) return ((void *)sym) - self->sym_priv_size; } -struct symbol *dso__find_symbol(struct dso *self, uint64_t ip); +struct symbol *dso__find_symbol(struct dso *self, __u64 ip); int dso__load_kernel(struct dso *self, const char *vmlinux, symbol_filter_t filter, int verbose); -- cgit v1.2.3 From 1c432d899d32d36371ee4ee310fa3609cf0e5742 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Thu, 11 Jun 2009 13:19:29 +0200 Subject: perf_counter: Rename enums Rename the perf enums to be in the 'perf_' namespace and strictly enumerate the ABI bits. Signed-off-by: Peter Zijlstra Cc: Mike Galbraith Cc: Paul Mackerras Cc: Arnaldo Carvalho de Melo LKML-Reference: Signed-off-by: Ingo Molnar --- include/linux/perf_counter.h | 53 +++++++++++++++++++++----------------------- kernel/perf_counter.c | 6 ++--- 2 files changed, 28 insertions(+), 31 deletions(-) diff --git a/include/linux/perf_counter.h b/include/linux/perf_counter.h index 95c797c480e..d5911b02bc8 100644 --- a/include/linux/perf_counter.h +++ b/include/linux/perf_counter.h @@ -24,24 +24,21 @@ /* * attr.type */ -enum perf_event_types { +enum perf_type_id { PERF_TYPE_HARDWARE = 0, PERF_TYPE_SOFTWARE = 1, PERF_TYPE_TRACEPOINT = 2, PERF_TYPE_HW_CACHE = 3, + PERF_TYPE_RAW = 4, - /* - * available TYPE space, raw is the max value. - */ - - PERF_TYPE_RAW = 128, + PERF_TYPE_MAX, /* non ABI */ }; /* * Generalized performance counter event types, used by the attr.event_id * parameter of the sys_perf_counter_open() syscall: */ -enum attr_ids { +enum perf_hw_id { /* * Common hardware events, generalized by the kernel: */ @@ -53,7 +50,7 @@ enum attr_ids { PERF_COUNT_BRANCH_MISSES = 5, PERF_COUNT_BUS_CYCLES = 6, - PERF_HW_EVENTS_MAX = 7, + PERF_HW_EVENTS_MAX, /* non ABI */ }; /* @@ -63,30 +60,30 @@ enum attr_ids { * { read, write, prefetch } x * { accesses, misses } */ -enum hw_cache_id { - PERF_COUNT_HW_CACHE_L1D, - PERF_COUNT_HW_CACHE_L1I, - PERF_COUNT_HW_CACHE_L2, - PERF_COUNT_HW_CACHE_DTLB, - PERF_COUNT_HW_CACHE_ITLB, - PERF_COUNT_HW_CACHE_BPU, - - PERF_COUNT_HW_CACHE_MAX, +enum perf_hw_cache_id { + PERF_COUNT_HW_CACHE_L1D = 0, + PERF_COUNT_HW_CACHE_L1I = 1, + PERF_COUNT_HW_CACHE_L2 = 2, + PERF_COUNT_HW_CACHE_DTLB = 3, + PERF_COUNT_HW_CACHE_ITLB = 4, + PERF_COUNT_HW_CACHE_BPU = 5, + + PERF_COUNT_HW_CACHE_MAX, /* non ABI */ }; -enum hw_cache_op_id { - PERF_COUNT_HW_CACHE_OP_READ, - PERF_COUNT_HW_CACHE_OP_WRITE, - PERF_COUNT_HW_CACHE_OP_PREFETCH, +enum perf_hw_cache_op_id { + PERF_COUNT_HW_CACHE_OP_READ = 0, + PERF_COUNT_HW_CACHE_OP_WRITE = 1, + PERF_COUNT_HW_CACHE_OP_PREFETCH = 2, - PERF_COUNT_HW_CACHE_OP_MAX, + PERF_COUNT_HW_CACHE_OP_MAX, /* non ABI */ }; -enum hw_cache_op_result_id { - PERF_COUNT_HW_CACHE_RESULT_ACCESS, - PERF_COUNT_HW_CACHE_RESULT_MISS, +enum perf_hw_cache_op_result_id { + PERF_COUNT_HW_CACHE_RESULT_ACCESS = 0, + PERF_COUNT_HW_CACHE_RESULT_MISS = 1, - PERF_COUNT_HW_CACHE_RESULT_MAX, + PERF_COUNT_HW_CACHE_RESULT_MAX, /* non ABI */ }; /* @@ -95,7 +92,7 @@ enum hw_cache_op_result_id { * physical and sw events of the kernel (and allow the profiling of them as * well): */ -enum sw_event_ids { +enum perf_sw_ids { PERF_COUNT_CPU_CLOCK = 0, PERF_COUNT_TASK_CLOCK = 1, PERF_COUNT_PAGE_FAULTS = 2, @@ -104,7 +101,7 @@ enum sw_event_ids { PERF_COUNT_PAGE_FAULTS_MIN = 5, PERF_COUNT_PAGE_FAULTS_MAJ = 6, - PERF_SW_EVENTS_MAX = 7, + PERF_SW_EVENTS_MAX, /* non ABI */ }; /* diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c index 3b2829de559..c02535bed26 100644 --- a/kernel/perf_counter.c +++ b/kernel/perf_counter.c @@ -3162,7 +3162,7 @@ static int perf_swcounter_is_counting(struct perf_counter *counter) } static int perf_swcounter_match(struct perf_counter *counter, - enum perf_event_types type, + enum perf_type_id type, u32 event, struct pt_regs *regs) { if (!perf_swcounter_is_counting(counter)) @@ -3194,7 +3194,7 @@ static void perf_swcounter_add(struct perf_counter *counter, u64 nr, } static void perf_swcounter_ctx_event(struct perf_counter_context *ctx, - enum perf_event_types type, u32 event, + enum perf_type_id type, u32 event, u64 nr, int nmi, struct pt_regs *regs, u64 addr) { @@ -3225,7 +3225,7 @@ static int *perf_swcounter_recursion_context(struct perf_cpu_context *cpuctx) return &cpuctx->recursion[0]; } -static void __perf_swcounter_event(enum perf_event_types type, u32 event, +static void __perf_swcounter_event(enum perf_type_id type, u32 event, u64 nr, int nmi, struct pt_regs *regs, u64 addr) { -- cgit v1.2.3 From f4dbfa8f3131a84257223393905f7efad0ca5996 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Thu, 11 Jun 2009 14:06:28 +0200 Subject: perf_counter: Standardize event names Pure renames only, to PERF_COUNT_HW_* and PERF_COUNT_SW_*. Signed-off-by: Peter Zijlstra Cc: Mike Galbraith Cc: Paul Mackerras Cc: Arnaldo Carvalho de Melo LKML-Reference: Signed-off-by: Ingo Molnar --- arch/powerpc/kernel/power4-pmu.c | 12 +++++------ arch/powerpc/kernel/power5+-pmu.c | 12 +++++------ arch/powerpc/kernel/power5-pmu.c | 12 +++++------ arch/powerpc/kernel/power6-pmu.c | 12 +++++------ arch/powerpc/kernel/ppc970-pmu.c | 12 +++++------ arch/powerpc/mm/fault.c | 6 +++--- arch/x86/kernel/cpu/perf_counter.c | 32 +++++++++++++-------------- arch/x86/mm/fault.c | 6 +++--- include/linux/perf_counter.h | 36 +++++++++++++++---------------- kernel/perf_counter.c | 20 ++++++++--------- tools/perf/builtin-record.c | 4 ++-- tools/perf/builtin-stat.c | 31 ++++++++++++++------------- tools/perf/builtin-top.c | 4 ++-- tools/perf/design.txt | 28 ++++++++++++------------ tools/perf/util/parse-events.c | 44 +++++++++++++++++++------------------- 15 files changed, 136 insertions(+), 135 deletions(-) diff --git a/arch/powerpc/kernel/power4-pmu.c b/arch/powerpc/kernel/power4-pmu.c index 0e94b685722..73956f084b2 100644 --- a/arch/powerpc/kernel/power4-pmu.c +++ b/arch/powerpc/kernel/power4-pmu.c @@ -535,12 +535,12 @@ static void p4_disable_pmc(unsigned int pmc, u64 mmcr[]) } static int p4_generic_events[] = { - [PERF_COUNT_CPU_CYCLES] = 7, - [PERF_COUNT_INSTRUCTIONS] = 0x1001, - [PERF_COUNT_CACHE_REFERENCES] = 0x8c10, /* PM_LD_REF_L1 */ - [PERF_COUNT_CACHE_MISSES] = 0x3c10, /* PM_LD_MISS_L1 */ - [PERF_COUNT_BRANCH_INSTRUCTIONS] = 0x330, /* PM_BR_ISSUED */ - [PERF_COUNT_BRANCH_MISSES] = 0x331, /* PM_BR_MPRED_CR */ + [PERF_COUNT_HW_CPU_CYCLES] = 7, + [PERF_COUNT_HW_INSTRUCTIONS] = 0x1001, + [PERF_COUNT_HW_CACHE_REFERENCES] = 0x8c10, /* PM_LD_REF_L1 */ + [PERF_COUNT_HW_CACHE_MISSES] = 0x3c10, /* PM_LD_MISS_L1 */ + [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x330, /* PM_BR_ISSUED */ + [PERF_COUNT_HW_BRANCH_MISSES] = 0x331, /* PM_BR_MPRED_CR */ }; #define C(x) PERF_COUNT_HW_CACHE_##x diff --git a/arch/powerpc/kernel/power5+-pmu.c b/arch/powerpc/kernel/power5+-pmu.c index bbf2cbb0738..5f8b7741e97 100644 --- a/arch/powerpc/kernel/power5+-pmu.c +++ b/arch/powerpc/kernel/power5+-pmu.c @@ -606,12 +606,12 @@ static void power5p_disable_pmc(unsigned int pmc, u64 mmcr[]) } static int power5p_generic_events[] = { - [PERF_COUNT_CPU_CYCLES] = 0xf, - [PERF_COUNT_INSTRUCTIONS] = 0x100009, - [PERF_COUNT_CACHE_REFERENCES] = 0x1c10a8, /* LD_REF_L1 */ - [PERF_COUNT_CACHE_MISSES] = 0x3c1088, /* LD_MISS_L1 */ - [PERF_COUNT_BRANCH_INSTRUCTIONS] = 0x230e4, /* BR_ISSUED */ - [PERF_COUNT_BRANCH_MISSES] = 0x230e5, /* BR_MPRED_CR */ + [PERF_COUNT_HW_CPU_CYCLES] = 0xf, + [PERF_COUNT_HW_INSTRUCTIONS] = 0x100009, + [PERF_COUNT_HW_CACHE_REFERENCES] = 0x1c10a8, /* LD_REF_L1 */ + [PERF_COUNT_HW_CACHE_MISSES] = 0x3c1088, /* LD_MISS_L1 */ + [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x230e4, /* BR_ISSUED */ + [PERF_COUNT_HW_BRANCH_MISSES] = 0x230e5, /* BR_MPRED_CR */ }; #define C(x) PERF_COUNT_HW_CACHE_##x diff --git a/arch/powerpc/kernel/power5-pmu.c b/arch/powerpc/kernel/power5-pmu.c index 670cf10b91e..d54723ab627 100644 --- a/arch/powerpc/kernel/power5-pmu.c +++ b/arch/powerpc/kernel/power5-pmu.c @@ -548,12 +548,12 @@ static void power5_disable_pmc(unsigned int pmc, u64 mmcr[]) } static int power5_generic_events[] = { - [PERF_COUNT_CPU_CYCLES] = 0xf, - [PERF_COUNT_INSTRUCTIONS] = 0x100009, - [PERF_COUNT_CACHE_REFERENCES] = 0x4c1090, /* LD_REF_L1 */ - [PERF_COUNT_CACHE_MISSES] = 0x3c1088, /* LD_MISS_L1 */ - [PERF_COUNT_BRANCH_INSTRUCTIONS] = 0x230e4, /* BR_ISSUED */ - [PERF_COUNT_BRANCH_MISSES] = 0x230e5, /* BR_MPRED_CR */ + [PERF_COUNT_HW_CPU_CYCLES] = 0xf, + [PERF_COUNT_HW_INSTRUCTIONS] = 0x100009, + [PERF_COUNT_HW_CACHE_REFERENCES] = 0x4c1090, /* LD_REF_L1 */ + [PERF_COUNT_HW_CACHE_MISSES] = 0x3c1088, /* LD_MISS_L1 */ + [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x230e4, /* BR_ISSUED */ + [PERF_COUNT_HW_BRANCH_MISSES] = 0x230e5, /* BR_MPRED_CR */ }; #define C(x) PERF_COUNT_HW_CACHE_##x diff --git a/arch/powerpc/kernel/power6-pmu.c b/arch/powerpc/kernel/power6-pmu.c index 4da70786609..0cd406ee765 100644 --- a/arch/powerpc/kernel/power6-pmu.c +++ b/arch/powerpc/kernel/power6-pmu.c @@ -466,12 +466,12 @@ static void p6_disable_pmc(unsigned int pmc, u64 mmcr[]) } static int power6_generic_events[] = { - [PERF_COUNT_CPU_CYCLES] = 0x1e, - [PERF_COUNT_INSTRUCTIONS] = 2, - [PERF_COUNT_CACHE_REFERENCES] = 0x280030, /* LD_REF_L1 */ - [PERF_COUNT_CACHE_MISSES] = 0x30000c, /* LD_MISS_L1 */ - [PERF_COUNT_BRANCH_INSTRUCTIONS] = 0x410a0, /* BR_PRED */ - [PERF_COUNT_BRANCH_MISSES] = 0x400052, /* BR_MPRED */ + [PERF_COUNT_HW_CPU_CYCLES] = 0x1e, + [PERF_COUNT_HW_INSTRUCTIONS] = 2, + [PERF_COUNT_HW_CACHE_REFERENCES] = 0x280030, /* LD_REF_L1 */ + [PERF_COUNT_HW_CACHE_MISSES] = 0x30000c, /* LD_MISS_L1 */ + [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x410a0, /* BR_PRED */ + [PERF_COUNT_HW_BRANCH_MISSES] = 0x400052, /* BR_MPRED */ }; #define C(x) PERF_COUNT_HW_CACHE_##x diff --git a/arch/powerpc/kernel/ppc970-pmu.c b/arch/powerpc/kernel/ppc970-pmu.c index 336adf1736a..46a20640942 100644 --- a/arch/powerpc/kernel/ppc970-pmu.c +++ b/arch/powerpc/kernel/ppc970-pmu.c @@ -419,12 +419,12 @@ static void p970_disable_pmc(unsigned int pmc, u64 mmcr[]) } static int ppc970_generic_events[] = { - [PERF_COUNT_CPU_CYCLES] = 7, - [PERF_COUNT_INSTRUCTIONS] = 1, - [PERF_COUNT_CACHE_REFERENCES] = 0x8810, /* PM_LD_REF_L1 */ - [PERF_COUNT_CACHE_MISSES] = 0x3810, /* PM_LD_MISS_L1 */ - [PERF_COUNT_BRANCH_INSTRUCTIONS] = 0x431, /* PM_BR_ISSUED */ - [PERF_COUNT_BRANCH_MISSES] = 0x327, /* PM_GRP_BR_MPRED */ + [PERF_COUNT_HW_CPU_CYCLES] = 7, + [PERF_COUNT_HW_INSTRUCTIONS] = 1, + [PERF_COUNT_HW_CACHE_REFERENCES] = 0x8810, /* PM_LD_REF_L1 */ + [PERF_COUNT_HW_CACHE_MISSES] = 0x3810, /* PM_LD_MISS_L1 */ + [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x431, /* PM_BR_ISSUED */ + [PERF_COUNT_HW_BRANCH_MISSES] = 0x327, /* PM_GRP_BR_MPRED */ }; #define C(x) PERF_COUNT_HW_CACHE_##x diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c index ac0e112031b..5beffc8f481 100644 --- a/arch/powerpc/mm/fault.c +++ b/arch/powerpc/mm/fault.c @@ -171,7 +171,7 @@ int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address, die("Weird page fault", regs, SIGSEGV); } - perf_swcounter_event(PERF_COUNT_PAGE_FAULTS, 1, 0, regs, address); + perf_swcounter_event(PERF_COUNT_SW_PAGE_FAULTS, 1, 0, regs, address); /* When running in the kernel we expect faults to occur only to * addresses in user space. All other faults represent errors in the @@ -312,7 +312,7 @@ good_area: } if (ret & VM_FAULT_MAJOR) { current->maj_flt++; - perf_swcounter_event(PERF_COUNT_PAGE_FAULTS_MAJ, 1, 0, + perf_swcounter_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, 0, regs, address); #ifdef CONFIG_PPC_SMLPAR if (firmware_has_feature(FW_FEATURE_CMO)) { @@ -323,7 +323,7 @@ good_area: #endif } else { current->min_flt++; - perf_swcounter_event(PERF_COUNT_PAGE_FAULTS_MIN, 1, 0, + perf_swcounter_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, 0, regs, address); } up_read(&mm->mmap_sem); diff --git a/arch/x86/kernel/cpu/perf_counter.c b/arch/x86/kernel/cpu/perf_counter.c index 57ae1bec81b..572fb434a66 100644 --- a/arch/x86/kernel/cpu/perf_counter.c +++ b/arch/x86/kernel/cpu/perf_counter.c @@ -69,13 +69,13 @@ static DEFINE_PER_CPU(struct cpu_hw_counters, cpu_hw_counters) = { */ static const u64 intel_perfmon_event_map[] = { - [PERF_COUNT_CPU_CYCLES] = 0x003c, - [PERF_COUNT_INSTRUCTIONS] = 0x00c0, - [PERF_COUNT_CACHE_REFERENCES] = 0x4f2e, - [PERF_COUNT_CACHE_MISSES] = 0x412e, - [PERF_COUNT_BRANCH_INSTRUCTIONS] = 0x00c4, - [PERF_COUNT_BRANCH_MISSES] = 0x00c5, - [PERF_COUNT_BUS_CYCLES] = 0x013c, + [PERF_COUNT_HW_CPU_CYCLES] = 0x003c, + [PERF_COUNT_HW_INSTRUCTIONS] = 0x00c0, + [PERF_COUNT_HW_CACHE_REFERENCES] = 0x4f2e, + [PERF_COUNT_HW_CACHE_MISSES] = 0x412e, + [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x00c4, + [PERF_COUNT_HW_BRANCH_MISSES] = 0x00c5, + [PERF_COUNT_HW_BUS_CYCLES] = 0x013c, }; static u64 intel_pmu_event_map(int event) @@ -485,12 +485,12 @@ static const u64 amd_0f_hw_cache_event_ids */ static const u64 amd_perfmon_event_map[] = { - [PERF_COUNT_CPU_CYCLES] = 0x0076, - [PERF_COUNT_INSTRUCTIONS] = 0x00c0, - [PERF_COUNT_CACHE_REFERENCES] = 0x0080, - [PERF_COUNT_CACHE_MISSES] = 0x0081, - [PERF_COUNT_BRANCH_INSTRUCTIONS] = 0x00c4, - [PERF_COUNT_BRANCH_MISSES] = 0x00c5, + [PERF_COUNT_HW_CPU_CYCLES] = 0x0076, + [PERF_COUNT_HW_INSTRUCTIONS] = 0x00c0, + [PERF_COUNT_HW_CACHE_REFERENCES] = 0x0080, + [PERF_COUNT_HW_CACHE_MISSES] = 0x0081, + [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x00c4, + [PERF_COUNT_HW_BRANCH_MISSES] = 0x00c5, }; static u64 amd_pmu_event_map(int event) @@ -970,11 +970,11 @@ fixed_mode_idx(struct perf_counter *counter, struct hw_perf_counter *hwc) event = hwc->config & ARCH_PERFMON_EVENT_MASK; - if (unlikely(event == x86_pmu.event_map(PERF_COUNT_INSTRUCTIONS))) + if (unlikely(event == x86_pmu.event_map(PERF_COUNT_HW_INSTRUCTIONS))) return X86_PMC_IDX_FIXED_INSTRUCTIONS; - if (unlikely(event == x86_pmu.event_map(PERF_COUNT_CPU_CYCLES))) + if (unlikely(event == x86_pmu.event_map(PERF_COUNT_HW_CPU_CYCLES))) return X86_PMC_IDX_FIXED_CPU_CYCLES; - if (unlikely(event == x86_pmu.event_map(PERF_COUNT_BUS_CYCLES))) + if (unlikely(event == x86_pmu.event_map(PERF_COUNT_HW_BUS_CYCLES))) return X86_PMC_IDX_FIXED_BUS_CYCLES; return -1; diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c index 6f9df2babe4..5c6d816f30b 100644 --- a/arch/x86/mm/fault.c +++ b/arch/x86/mm/fault.c @@ -1045,7 +1045,7 @@ do_page_fault(struct pt_regs *regs, unsigned long error_code) if (unlikely(error_code & PF_RSVD)) pgtable_bad(regs, error_code, address); - perf_swcounter_event(PERF_COUNT_PAGE_FAULTS, 1, 0, regs, address); + perf_swcounter_event(PERF_COUNT_SW_PAGE_FAULTS, 1, 0, regs, address); /* * If we're in an interrupt, have no user context or are running @@ -1142,11 +1142,11 @@ good_area: if (fault & VM_FAULT_MAJOR) { tsk->maj_flt++; - perf_swcounter_event(PERF_COUNT_PAGE_FAULTS_MAJ, 1, 0, + perf_swcounter_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, 0, regs, address); } else { tsk->min_flt++; - perf_swcounter_event(PERF_COUNT_PAGE_FAULTS_MIN, 1, 0, + perf_swcounter_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, 0, regs, address); } diff --git a/include/linux/perf_counter.h b/include/linux/perf_counter.h index d5911b02bc8..887df88a9c2 100644 --- a/include/linux/perf_counter.h +++ b/include/linux/perf_counter.h @@ -42,15 +42,15 @@ enum perf_hw_id { /* * Common hardware events, generalized by the kernel: */ - PERF_COUNT_CPU_CYCLES = 0, - PERF_COUNT_INSTRUCTIONS = 1, - PERF_COUNT_CACHE_REFERENCES = 2, - PERF_COUNT_CACHE_MISSES = 3, - PERF_COUNT_BRANCH_INSTRUCTIONS = 4, - PERF_COUNT_BRANCH_MISSES = 5, - PERF_COUNT_BUS_CYCLES = 6, - - PERF_HW_EVENTS_MAX, /* non ABI */ + PERF_COUNT_HW_CPU_CYCLES = 0, + PERF_COUNT_HW_INSTRUCTIONS = 1, + PERF_COUNT_HW_CACHE_REFERENCES = 2, + PERF_COUNT_HW_CACHE_MISSES = 3, + PERF_COUNT_HW_BRANCH_INSTRUCTIONS = 4, + PERF_COUNT_HW_BRANCH_MISSES = 5, + PERF_COUNT_HW_BUS_CYCLES = 6, + + PERF_COUNT_HW_MAX, /* non ABI */ }; /* @@ -93,15 +93,15 @@ enum perf_hw_cache_op_result_id { * well): */ enum perf_sw_ids { - PERF_COUNT_CPU_CLOCK = 0, - PERF_COUNT_TASK_CLOCK = 1, - PERF_COUNT_PAGE_FAULTS = 2, - PERF_COUNT_CONTEXT_SWITCHES = 3, - PERF_COUNT_CPU_MIGRATIONS = 4, - PERF_COUNT_PAGE_FAULTS_MIN = 5, - PERF_COUNT_PAGE_FAULTS_MAJ = 6, - - PERF_SW_EVENTS_MAX, /* non ABI */ + PERF_COUNT_SW_CPU_CLOCK = 0, + PERF_COUNT_SW_TASK_CLOCK = 1, + PERF_COUNT_SW_PAGE_FAULTS = 2, + PERF_COUNT_SW_CONTEXT_SWITCHES = 3, + PERF_COUNT_SW_CPU_MIGRATIONS = 4, + PERF_COUNT_SW_PAGE_FAULTS_MIN = 5, + PERF_COUNT_SW_PAGE_FAULTS_MAJ = 6, + + PERF_COUNT_SW_MAX, /* non ABI */ }; /* diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c index c02535bed26..8859b97390e 100644 --- a/kernel/perf_counter.c +++ b/kernel/perf_counter.c @@ -1024,7 +1024,7 @@ void perf_counter_task_sched_out(struct task_struct *task, int do_switch = 1; regs = task_pt_regs(task); - perf_swcounter_event(PERF_COUNT_CONTEXT_SWITCHES, 1, 1, regs, 0); + perf_swcounter_event(PERF_COUNT_SW_CONTEXT_SWITCHES, 1, 1, regs, 0); if (likely(!ctx || !cpuctx->task_ctx)) return; @@ -3411,13 +3411,13 @@ void perf_counter_task_migration(struct task_struct *task, int cpu) struct perf_counter_context *ctx; perf_swcounter_ctx_event(&cpuctx->ctx, PERF_TYPE_SOFTWARE, - PERF_COUNT_CPU_MIGRATIONS, + PERF_COUNT_SW_CPU_MIGRATIONS, 1, 1, NULL, 0); ctx = perf_pin_task_context(task); if (ctx) { perf_swcounter_ctx_event(ctx, PERF_TYPE_SOFTWARE, - PERF_COUNT_CPU_MIGRATIONS, + PERF_COUNT_SW_CPU_MIGRATIONS, 1, 1, NULL, 0); perf_unpin_context(ctx); } @@ -3475,11 +3475,11 @@ static const struct pmu *sw_perf_counter_init(struct perf_counter *counter) * events. */ switch (counter->attr.config) { - case PERF_COUNT_CPU_CLOCK: + case PERF_COUNT_SW_CPU_CLOCK: pmu = &perf_ops_cpu_clock; break; - case PERF_COUNT_TASK_CLOCK: + case PERF_COUNT_SW_TASK_CLOCK: /* * If the user instantiates this as a per-cpu counter, * use the cpu_clock counter instead. @@ -3490,11 +3490,11 @@ static const struct pmu *sw_perf_counter_init(struct perf_counter *counter) pmu = &perf_ops_cpu_clock; break; - case PERF_COUNT_PAGE_FAULTS: - case PERF_COUNT_PAGE_FAULTS_MIN: - case PERF_COUNT_PAGE_FAULTS_MAJ: - case PERF_COUNT_CONTEXT_SWITCHES: - case PERF_COUNT_CPU_MIGRATIONS: + case PERF_COUNT_SW_PAGE_FAULTS: + case PERF_COUNT_SW_PAGE_FAULTS_MIN: + case PERF_COUNT_SW_PAGE_FAULTS_MAJ: + case PERF_COUNT_SW_CONTEXT_SWITCHES: + case PERF_COUNT_SW_CPU_MIGRATIONS: pmu = &perf_ops_generic; break; } diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c index 84cd336ae79..29259e74dcf 100644 --- a/tools/perf/builtin-record.c +++ b/tools/perf/builtin-record.c @@ -378,12 +378,12 @@ try_again: * is always available even if no PMU support: */ if (attr->type == PERF_TYPE_HARDWARE - && attr->config == PERF_COUNT_CPU_CYCLES) { + && attr->config == PERF_COUNT_HW_CPU_CYCLES) { if (verbose) warning(" ... trying to fall back to cpu-clock-ticks\n"); attr->type = PERF_TYPE_SOFTWARE; - attr->config = PERF_COUNT_CPU_CLOCK; + attr->config = PERF_COUNT_SW_CPU_CLOCK; goto try_again; } printf("\n"); diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c index 6404906924f..c43e4a97dc4 100644 --- a/tools/perf/builtin-stat.c +++ b/tools/perf/builtin-stat.c @@ -46,15 +46,16 @@ static struct perf_counter_attr default_attrs[MAX_COUNTERS] = { - { .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_TASK_CLOCK }, - { .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_CONTEXT_SWITCHES }, - { .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_CPU_MIGRATIONS }, - { .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_PAGE_FAULTS }, - - { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_CPU_CYCLES }, - { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_INSTRUCTIONS }, - { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_CACHE_REFERENCES }, - { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_CACHE_MISSES }, + { .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_TASK_CLOCK }, + { .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_CONTEXT_SWITCHES}, + { .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_CPU_MIGRATIONS }, + { .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_PAGE_FAULTS }, + + { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_CPU_CYCLES }, + { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_INSTRUCTIONS }, + { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_CACHE_REFERENCES}, + { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_CACHE_MISSES }, + }; static int system_wide = 0; @@ -120,10 +121,10 @@ static inline int nsec_counter(int counter) if (attrs[counter].type != PERF_TYPE_SOFTWARE) return 0; - if (attrs[counter].config == PERF_COUNT_CPU_CLOCK) + if (attrs[counter].config == PERF_COUNT_SW_CPU_CLOCK) return 1; - if (attrs[counter].config == PERF_COUNT_TASK_CLOCK) + if (attrs[counter].config == PERF_COUNT_SW_TASK_CLOCK) return 1; return 0; @@ -176,10 +177,10 @@ static void read_counter(int counter) * Save the full runtime - to allow normalization during printout: */ if (attrs[counter].type == PERF_TYPE_SOFTWARE && - attrs[counter].config == PERF_COUNT_TASK_CLOCK) + attrs[counter].config == PERF_COUNT_SW_TASK_CLOCK) runtime_nsecs = count[0]; if (attrs[counter].type == PERF_TYPE_HARDWARE && - attrs[counter].config == PERF_COUNT_CPU_CYCLES) + attrs[counter].config == PERF_COUNT_HW_CPU_CYCLES) runtime_cycles = count[0]; } @@ -206,7 +207,7 @@ static void print_counter(int counter) fprintf(stderr, " %14.6f %-20s", msecs, event_name(counter)); if (attrs[counter].type == PERF_TYPE_SOFTWARE && - attrs[counter].config == PERF_COUNT_TASK_CLOCK) { + attrs[counter].config == PERF_COUNT_SW_TASK_CLOCK) { if (walltime_nsecs) fprintf(stderr, " # %11.3f CPU utilization factor", @@ -220,7 +221,7 @@ static void print_counter(int counter) (double)count[0]/runtime_nsecs*1000.0); if (runtime_cycles && attrs[counter].type == PERF_TYPE_HARDWARE && - attrs[counter].config == PERF_COUNT_INSTRUCTIONS) { + attrs[counter].config == PERF_COUNT_HW_INSTRUCTIONS) { fprintf(stderr, " # %1.3f per cycle", (double)count[0] / (double)runtime_cycles); diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c index 309dbc76ec8..fe338d3c5d7 100644 --- a/tools/perf/builtin-top.c +++ b/tools/perf/builtin-top.c @@ -562,13 +562,13 @@ try_again: * is always available even if no PMU support: */ if (attr->type == PERF_TYPE_HARDWARE - && attr->config == PERF_COUNT_CPU_CYCLES) { + && attr->config == PERF_COUNT_HW_CPU_CYCLES) { if (verbose) warning(" ... trying to fall back to cpu-clock-ticks\n"); attr->type = PERF_TYPE_SOFTWARE; - attr->config = PERF_COUNT_CPU_CLOCK; + attr->config = PERF_COUNT_SW_CPU_CLOCK; goto try_again; } printf("\n"); diff --git a/tools/perf/design.txt b/tools/perf/design.txt index d3250763dc9..860e116d979 100644 --- a/tools/perf/design.txt +++ b/tools/perf/design.txt @@ -99,13 +99,13 @@ enum hw_event_ids { /* * Common hardware events, generalized by the kernel: */ - PERF_COUNT_CPU_CYCLES = 0, - PERF_COUNT_INSTRUCTIONS = 1, - PERF_COUNT_CACHE_REFERENCES = 2, - PERF_COUNT_CACHE_MISSES = 3, - PERF_COUNT_BRANCH_INSTRUCTIONS = 4, - PERF_COUNT_BRANCH_MISSES = 5, - PERF_COUNT_BUS_CYCLES = 6, + PERF_COUNT_HW_CPU_CYCLES = 0, + PERF_COUNT_HW_INSTRUCTIONS = 1, + PERF_COUNT_HW_CACHE_REFERENCES = 2, + PERF_COUNT_HW_CACHE_MISSES = 3, + PERF_COUNT_HW_BRANCH_INSTRUCTIONS = 4, + PERF_COUNT_HW_BRANCH_MISSES = 5, + PERF_COUNT_HW_BUS_CYCLES = 6, }; These are standardized types of events that work relatively uniformly @@ -130,13 +130,13 @@ software events, selected by 'event_id': * well): */ enum sw_event_ids { - PERF_COUNT_CPU_CLOCK = 0, - PERF_COUNT_TASK_CLOCK = 1, - PERF_COUNT_PAGE_FAULTS = 2, - PERF_COUNT_CONTEXT_SWITCHES = 3, - PERF_COUNT_CPU_MIGRATIONS = 4, - PERF_COUNT_PAGE_FAULTS_MIN = 5, - PERF_COUNT_PAGE_FAULTS_MAJ = 6, + PERF_COUNT_SW_CPU_CLOCK = 0, + PERF_COUNT_SW_TASK_CLOCK = 1, + PERF_COUNT_SW_PAGE_FAULTS = 2, + PERF_COUNT_SW_CONTEXT_SWITCHES = 3, + PERF_COUNT_SW_CPU_MIGRATIONS = 4, + PERF_COUNT_SW_PAGE_FAULTS_MIN = 5, + PERF_COUNT_SW_PAGE_FAULTS_MAJ = 6, }; Counters of the type PERF_TYPE_TRACEPOINT are available when the ftrace event diff --git a/tools/perf/util/parse-events.c b/tools/perf/util/parse-events.c index f18a9a006e1..9d5f1ca50e6 100644 --- a/tools/perf/util/parse-events.c +++ b/tools/perf/util/parse-events.c @@ -22,26 +22,26 @@ struct event_symbol { #define CR(x, y) .type = PERF_TYPE_##x, .config = y static struct event_symbol event_symbols[] = { - { C(HARDWARE, CPU_CYCLES), "cpu-cycles", }, - { C(HARDWARE, CPU_CYCLES), "cycles", }, - { C(HARDWARE, INSTRUCTIONS), "instructions", }, - { C(HARDWARE, CACHE_REFERENCES), "cache-references", }, - { C(HARDWARE, CACHE_MISSES), "cache-misses", }, - { C(HARDWARE, BRANCH_INSTRUCTIONS), "branch-instructions", }, - { C(HARDWARE, BRANCH_INSTRUCTIONS), "branches", }, - { C(HARDWARE, BRANCH_MISSES), "branch-misses", }, - { C(HARDWARE, BUS_CYCLES), "bus-cycles", }, - - { C(SOFTWARE, CPU_CLOCK), "cpu-clock", }, - { C(SOFTWARE, TASK_CLOCK), "task-clock", }, - { C(SOFTWARE, PAGE_FAULTS), "page-faults", }, - { C(SOFTWARE, PAGE_FAULTS), "faults", }, - { C(SOFTWARE, PAGE_FAULTS_MIN), "minor-faults", }, - { C(SOFTWARE, PAGE_FAULTS_MAJ), "major-faults", }, - { C(SOFTWARE, CONTEXT_SWITCHES), "context-switches", }, - { C(SOFTWARE, CONTEXT_SWITCHES), "cs", }, - { C(SOFTWARE, CPU_MIGRATIONS), "cpu-migrations", }, - { C(SOFTWARE, CPU_MIGRATIONS), "migrations", }, + { C(HARDWARE, HW_CPU_CYCLES), "cpu-cycles", }, + { C(HARDWARE, HW_CPU_CYCLES), "cycles", }, + { C(HARDWARE, HW_INSTRUCTIONS), "instructions", }, + { C(HARDWARE, HW_CACHE_REFERENCES), "cache-references", }, + { C(HARDWARE, HW_CACHE_MISSES), "cache-misses", }, + { C(HARDWARE, HW_BRANCH_INSTRUCTIONS),"branch-instructions", }, + { C(HARDWARE, HW_BRANCH_INSTRUCTIONS),"branches", }, + { C(HARDWARE, HW_BRANCH_MISSES), "branch-misses", }, + { C(HARDWARE, HW_BUS_CYCLES), "bus-cycles", }, + + { C(SOFTWARE, SW_CPU_CLOCK), "cpu-clock", }, + { C(SOFTWARE, SW_TASK_CLOCK), "task-clock", }, + { C(SOFTWARE, SW_PAGE_FAULTS), "page-faults", }, + { C(SOFTWARE, SW_PAGE_FAULTS), "faults", }, + { C(SOFTWARE, SW_PAGE_FAULTS_MIN), "minor-faults", }, + { C(SOFTWARE, SW_PAGE_FAULTS_MAJ), "major-faults", }, + { C(SOFTWARE, SW_CONTEXT_SWITCHES), "context-switches", }, + { C(SOFTWARE, SW_CONTEXT_SWITCHES), "cs", }, + { C(SOFTWARE, SW_CPU_MIGRATIONS), "cpu-migrations", }, + { C(SOFTWARE, SW_CPU_MIGRATIONS), "migrations", }, }; #define __PERF_COUNTER_FIELD(config, name) \ @@ -107,7 +107,7 @@ char *event_name(int counter) switch (type) { case PERF_TYPE_HARDWARE: - if (config < PERF_HW_EVENTS_MAX) + if (config < PERF_COUNT_HW_MAX) return hw_event_names[config]; return "unknown-hardware"; @@ -136,7 +136,7 @@ char *event_name(int counter) } case PERF_TYPE_SOFTWARE: - if (config < PERF_SW_EVENTS_MAX) + if (config < PERF_COUNT_SW_MAX) return sw_event_names[config]; return "unknown-software"; -- cgit v1.2.3 From 8be6e8f3c3a13900169f1141870562d0c723b010 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Thu, 11 Jun 2009 14:19:11 +0200 Subject: perf_counter: Rename L2 to LL cache The top (fastest) and last level (biggest) caches are the most interesting ones, performance wise. Signed-off-by: Peter Zijlstra Cc: Mike Galbraith Cc: Paul Mackerras Cc: Arnaldo Carvalho de Melo LKML-Reference: [ Fixed the Nehalem LL table to LLC Reference/Miss events ] Signed-off-by: Ingo Molnar --- arch/powerpc/kernel/power4-pmu.c | 2 +- arch/powerpc/kernel/power5+-pmu.c | 2 +- arch/powerpc/kernel/power5-pmu.c | 2 +- arch/powerpc/kernel/power6-pmu.c | 2 +- arch/powerpc/kernel/power7-pmu.c | 2 +- arch/powerpc/kernel/ppc970-pmu.c | 2 +- arch/x86/kernel/cpu/perf_counter.c | 12 ++++++------ include/linux/perf_counter.h | 4 ++-- 8 files changed, 14 insertions(+), 14 deletions(-) diff --git a/arch/powerpc/kernel/power4-pmu.c b/arch/powerpc/kernel/power4-pmu.c index 73956f084b2..07bd308a5fa 100644 --- a/arch/powerpc/kernel/power4-pmu.c +++ b/arch/powerpc/kernel/power4-pmu.c @@ -561,7 +561,7 @@ static int power4_cache_events[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = { [C(OP_WRITE)] = { -1, -1 }, [C(OP_PREFETCH)] = { 0, 0 }, }, - [C(L2)] = { /* RESULT_ACCESS RESULT_MISS */ + [C(LL)] = { /* RESULT_ACCESS RESULT_MISS */ [C(OP_READ)] = { 0, 0 }, [C(OP_WRITE)] = { 0, 0 }, [C(OP_PREFETCH)] = { 0xc34, 0 }, diff --git a/arch/powerpc/kernel/power5+-pmu.c b/arch/powerpc/kernel/power5+-pmu.c index 5f8b7741e97..41e5d2d958d 100644 --- a/arch/powerpc/kernel/power5+-pmu.c +++ b/arch/powerpc/kernel/power5+-pmu.c @@ -632,7 +632,7 @@ static int power5p_cache_events[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = { [C(OP_WRITE)] = { -1, -1 }, [C(OP_PREFETCH)] = { 0, 0 }, }, - [C(L2)] = { /* RESULT_ACCESS RESULT_MISS */ + [C(LL)] = { /* RESULT_ACCESS RESULT_MISS */ [C(OP_READ)] = { 0, 0 }, [C(OP_WRITE)] = { 0, 0 }, [C(OP_PREFETCH)] = { 0xc50c3, 0 }, diff --git a/arch/powerpc/kernel/power5-pmu.c b/arch/powerpc/kernel/power5-pmu.c index d54723ab627..05600b66221 100644 --- a/arch/powerpc/kernel/power5-pmu.c +++ b/arch/powerpc/kernel/power5-pmu.c @@ -574,7 +574,7 @@ static int power5_cache_events[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = { [C(OP_WRITE)] = { -1, -1 }, [C(OP_PREFETCH)] = { 0, 0 }, }, - [C(L2)] = { /* RESULT_ACCESS RESULT_MISS */ + [C(LL)] = { /* RESULT_ACCESS RESULT_MISS */ [C(OP_READ)] = { 0, 0x3c309b }, [C(OP_WRITE)] = { 0, 0 }, [C(OP_PREFETCH)] = { 0xc50c3, 0 }, diff --git a/arch/powerpc/kernel/power6-pmu.c b/arch/powerpc/kernel/power6-pmu.c index 0cd406ee765..46f74bebcfd 100644 --- a/arch/powerpc/kernel/power6-pmu.c +++ b/arch/powerpc/kernel/power6-pmu.c @@ -493,7 +493,7 @@ static int power6_cache_events[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = { [C(OP_WRITE)] = { -1, -1 }, [C(OP_PREFETCH)] = { 0x4008c, 0 }, }, - [C(L2)] = { /* RESULT_ACCESS RESULT_MISS */ + [C(LL)] = { /* RESULT_ACCESS RESULT_MISS */ [C(OP_READ)] = { 0x150730, 0x250532 }, [C(OP_WRITE)] = { 0x250432, 0x150432 }, [C(OP_PREFETCH)] = { 0x810a6, 0 }, diff --git a/arch/powerpc/kernel/power7-pmu.c b/arch/powerpc/kernel/power7-pmu.c index 060e0deb399..b3f7d1216ba 100644 --- a/arch/powerpc/kernel/power7-pmu.c +++ b/arch/powerpc/kernel/power7-pmu.c @@ -320,7 +320,7 @@ static int power7_cache_events[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = { [C(OP_WRITE)] = { -1, -1 }, [C(OP_PREFETCH)] = { 0x408a, 0 }, }, - [C(L2)] = { /* RESULT_ACCESS RESULT_MISS */ + [C(LL)] = { /* RESULT_ACCESS RESULT_MISS */ [C(OP_READ)] = { 0x6080, 0x6084 }, [C(OP_WRITE)] = { 0x6082, 0x6086 }, [C(OP_PREFETCH)] = { 0, 0 }, diff --git a/arch/powerpc/kernel/ppc970-pmu.c b/arch/powerpc/kernel/ppc970-pmu.c index 46a20640942..ba0a357a89f 100644 --- a/arch/powerpc/kernel/ppc970-pmu.c +++ b/arch/powerpc/kernel/ppc970-pmu.c @@ -445,7 +445,7 @@ static int ppc970_cache_events[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = { [C(OP_WRITE)] = { -1, -1 }, [C(OP_PREFETCH)] = { 0, 0 }, }, - [C(L2)] = { /* RESULT_ACCESS RESULT_MISS */ + [C(LL)] = { /* RESULT_ACCESS RESULT_MISS */ [C(OP_READ)] = { 0, 0 }, [C(OP_WRITE)] = { 0, 0 }, [C(OP_PREFETCH)] = { 0x733, 0 }, diff --git a/arch/x86/kernel/cpu/perf_counter.c b/arch/x86/kernel/cpu/perf_counter.c index 572fb434a66..895c82e7845 100644 --- a/arch/x86/kernel/cpu/perf_counter.c +++ b/arch/x86/kernel/cpu/perf_counter.c @@ -131,7 +131,7 @@ static const u64 nehalem_hw_cache_event_ids [ C(RESULT_MISS) ] = 0x0, }, }, - [ C(L2 ) ] = { + [ C(LL ) ] = { [ C(OP_READ) ] = { [ C(RESULT_ACCESS) ] = 0x0324, /* L2_RQSTS.LOADS */ [ C(RESULT_MISS) ] = 0x0224, /* L2_RQSTS.LD_MISS */ @@ -141,8 +141,8 @@ static const u64 nehalem_hw_cache_event_ids [ C(RESULT_MISS) ] = 0x0824, /* L2_RQSTS.RFO_MISS */ }, [ C(OP_PREFETCH) ] = { - [ C(RESULT_ACCESS) ] = 0xc024, /* L2_RQSTS.PREFETCHES */ - [ C(RESULT_MISS) ] = 0x8024, /* L2_RQSTS.PREFETCH_MISS */ + [ C(RESULT_ACCESS) ] = 0x4f2e, /* LLC Reference */ + [ C(RESULT_MISS) ] = 0x412e, /* LLC Misses */ }, }, [ C(DTLB) ] = { @@ -222,7 +222,7 @@ static const u64 core2_hw_cache_event_ids [ C(RESULT_MISS) ] = 0, }, }, - [ C(L2 ) ] = { + [ C(LL ) ] = { [ C(OP_READ) ] = { [ C(RESULT_ACCESS) ] = 0x4f29, /* L2_LD.MESI */ [ C(RESULT_MISS) ] = 0x4129, /* L2_LD.ISTATE */ @@ -313,7 +313,7 @@ static const u64 atom_hw_cache_event_ids [ C(RESULT_MISS) ] = 0, }, }, - [ C(L2 ) ] = { + [ C(LL ) ] = { [ C(OP_READ) ] = { [ C(RESULT_ACCESS) ] = 0x4f29, /* L2_LD.MESI */ [ C(RESULT_MISS) ] = 0x4129, /* L2_LD.ISTATE */ @@ -422,7 +422,7 @@ static const u64 amd_0f_hw_cache_event_ids [ C(RESULT_MISS) ] = 0, }, }, - [ C(L2 ) ] = { + [ C(LL ) ] = { [ C(OP_READ) ] = { [ C(RESULT_ACCESS) ] = 0, [ C(RESULT_MISS) ] = 0, diff --git a/include/linux/perf_counter.h b/include/linux/perf_counter.h index 887df88a9c2..20cf5af27ad 100644 --- a/include/linux/perf_counter.h +++ b/include/linux/perf_counter.h @@ -56,14 +56,14 @@ enum perf_hw_id { /* * Generalized hardware cache counters: * - * { L1-D, L1-I, L2, LLC, ITLB, DTLB, BPU } x + * { L1-D, L1-I, LLC, ITLB, DTLB, BPU } x * { read, write, prefetch } x * { accesses, misses } */ enum perf_hw_cache_id { PERF_COUNT_HW_CACHE_L1D = 0, PERF_COUNT_HW_CACHE_L1I = 1, - PERF_COUNT_HW_CACHE_L2 = 2, + PERF_COUNT_HW_CACHE_LL = 2, PERF_COUNT_HW_CACHE_DTLB = 3, PERF_COUNT_HW_CACHE_ITLB = 4, PERF_COUNT_HW_CACHE_BPU = 5, -- cgit v1.2.3 From a308444ceb576d3089f9ca0dfd097eba6f1e623f Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Thu, 11 Jun 2009 14:44:26 +0200 Subject: perf_counter: Better align code Whitespace and comment bits. Also update copyrights. [ Impact: cleanup ] Signed-off-by: Ingo Molnar Signed-off-by: Peter Zijlstra Cc: Mike Galbraith Cc: Paul Mackerras Cc: Arnaldo Carvalho de Melo LKML-Reference: --- include/linux/perf_counter.h | 165 ++++++++++++++++++++++--------------------- 1 file changed, 85 insertions(+), 80 deletions(-) diff --git a/include/linux/perf_counter.h b/include/linux/perf_counter.h index 20cf5af27ad..1fa1a26cb1b 100644 --- a/include/linux/perf_counter.h +++ b/include/linux/perf_counter.h @@ -1,12 +1,13 @@ /* * Performance counters: * - * Copyright(C) 2008, Thomas Gleixner - * Copyright(C) 2008, Red Hat, Inc., Ingo Molnar + * Copyright (C) 2008-2009, Thomas Gleixner + * Copyright (C) 2008-2009, Red Hat, Inc., Ingo Molnar + * Copyright (C) 2008-2009, Red Hat, Inc., Peter Zijlstra * * Data type definitions, declarations, prototypes. * - * Started by: Thomas Gleixner and Ingo Molnar + * Started by: Thomas Gleixner and Ingo Molnar * * For licencing details see kernel-base/COPYING */ @@ -25,18 +26,19 @@ * attr.type */ enum perf_type_id { - PERF_TYPE_HARDWARE = 0, - PERF_TYPE_SOFTWARE = 1, - PERF_TYPE_TRACEPOINT = 2, - PERF_TYPE_HW_CACHE = 3, - PERF_TYPE_RAW = 4, + PERF_TYPE_HARDWARE = 0, + PERF_TYPE_SOFTWARE = 1, + PERF_TYPE_TRACEPOINT = 2, + PERF_TYPE_HW_CACHE = 3, + PERF_TYPE_RAW = 4, - PERF_TYPE_MAX, /* non ABI */ + PERF_TYPE_MAX, /* non-ABI */ }; /* - * Generalized performance counter event types, used by the attr.event_id - * parameter of the sys_perf_counter_open() syscall: + * Generalized performance counter event types, used by the + * attr.event_id parameter of the sys_perf_counter_open() + * syscall: */ enum perf_hw_id { /* @@ -50,7 +52,7 @@ enum perf_hw_id { PERF_COUNT_HW_BRANCH_MISSES = 5, PERF_COUNT_HW_BUS_CYCLES = 6, - PERF_COUNT_HW_MAX, /* non ABI */ + PERF_COUNT_HW_MAX, /* non-ABI */ }; /* @@ -61,29 +63,29 @@ enum perf_hw_id { * { accesses, misses } */ enum perf_hw_cache_id { - PERF_COUNT_HW_CACHE_L1D = 0, - PERF_COUNT_HW_CACHE_L1I = 1, - PERF_COUNT_HW_CACHE_LL = 2, - PERF_COUNT_HW_CACHE_DTLB = 3, - PERF_COUNT_HW_CACHE_ITLB = 4, - PERF_COUNT_HW_CACHE_BPU = 5, - - PERF_COUNT_HW_CACHE_MAX, /* non ABI */ + PERF_COUNT_HW_CACHE_L1D = 0, + PERF_COUNT_HW_CACHE_L1I = 1, + PERF_COUNT_HW_CACHE_LL = 2, + PERF_COUNT_HW_CACHE_DTLB = 3, + PERF_COUNT_HW_CACHE_ITLB = 4, + PERF_COUNT_HW_CACHE_BPU = 5, + + PERF_COUNT_HW_CACHE_MAX, /* non-ABI */ }; enum perf_hw_cache_op_id { - PERF_COUNT_HW_CACHE_OP_READ = 0, - PERF_COUNT_HW_CACHE_OP_WRITE = 1, - PERF_COUNT_HW_CACHE_OP_PREFETCH = 2, + PERF_COUNT_HW_CACHE_OP_READ = 0, + PERF_COUNT_HW_CACHE_OP_WRITE = 1, + PERF_COUNT_HW_CACHE_OP_PREFETCH = 2, - PERF_COUNT_HW_CACHE_OP_MAX, /* non ABI */ + PERF_COUNT_HW_CACHE_OP_MAX, /* non-ABI */ }; enum perf_hw_cache_op_result_id { PERF_COUNT_HW_CACHE_RESULT_ACCESS = 0, PERF_COUNT_HW_CACHE_RESULT_MISS = 1, - PERF_COUNT_HW_CACHE_RESULT_MAX, /* non ABI */ + PERF_COUNT_HW_CACHE_RESULT_MAX, /* non-ABI */ }; /* @@ -93,15 +95,15 @@ enum perf_hw_cache_op_result_id { * well): */ enum perf_sw_ids { - PERF_COUNT_SW_CPU_CLOCK = 0, - PERF_COUNT_SW_TASK_CLOCK = 1, - PERF_COUNT_SW_PAGE_FAULTS = 2, - PERF_COUNT_SW_CONTEXT_SWITCHES = 3, - PERF_COUNT_SW_CPU_MIGRATIONS = 4, - PERF_COUNT_SW_PAGE_FAULTS_MIN = 5, - PERF_COUNT_SW_PAGE_FAULTS_MAJ = 6, - - PERF_COUNT_SW_MAX, /* non ABI */ + PERF_COUNT_SW_CPU_CLOCK = 0, + PERF_COUNT_SW_TASK_CLOCK = 1, + PERF_COUNT_SW_PAGE_FAULTS = 2, + PERF_COUNT_SW_CONTEXT_SWITCHES = 3, + PERF_COUNT_SW_CPU_MIGRATIONS = 4, + PERF_COUNT_SW_PAGE_FAULTS_MIN = 5, + PERF_COUNT_SW_PAGE_FAULTS_MAJ = 6, + + PERF_COUNT_SW_MAX, /* non-ABI */ }; /* @@ -109,15 +111,15 @@ enum perf_sw_ids { * in the overflow packets. */ enum perf_counter_sample_format { - PERF_SAMPLE_IP = 1U << 0, - PERF_SAMPLE_TID = 1U << 1, - PERF_SAMPLE_TIME = 1U << 2, - PERF_SAMPLE_ADDR = 1U << 3, - PERF_SAMPLE_GROUP = 1U << 4, - PERF_SAMPLE_CALLCHAIN = 1U << 5, - PERF_SAMPLE_ID = 1U << 6, - PERF_SAMPLE_CPU = 1U << 7, - PERF_SAMPLE_PERIOD = 1U << 8, + PERF_SAMPLE_IP = 1U << 0, + PERF_SAMPLE_TID = 1U << 1, + PERF_SAMPLE_TIME = 1U << 2, + PERF_SAMPLE_ADDR = 1U << 3, + PERF_SAMPLE_GROUP = 1U << 4, + PERF_SAMPLE_CALLCHAIN = 1U << 5, + PERF_SAMPLE_ID = 1U << 6, + PERF_SAMPLE_CPU = 1U << 7, + PERF_SAMPLE_PERIOD = 1U << 8, }; /* @@ -126,9 +128,9 @@ enum perf_counter_sample_format { * in increasing order of bit value, after the counter value. */ enum perf_counter_read_format { - PERF_FORMAT_TOTAL_TIME_ENABLED = 1U << 0, - PERF_FORMAT_TOTAL_TIME_RUNNING = 1U << 1, - PERF_FORMAT_ID = 1U << 2, + PERF_FORMAT_TOTAL_TIME_ENABLED = 1U << 0, + PERF_FORMAT_TOTAL_TIME_RUNNING = 1U << 1, + PERF_FORMAT_ID = 1U << 2, }; /* @@ -229,12 +231,12 @@ struct perf_counter_mmap_page { __u64 data_head; /* head in the data section */ }; -#define PERF_EVENT_MISC_CPUMODE_MASK (3 << 0) -#define PERF_EVENT_MISC_CPUMODE_UNKNOWN (0 << 0) -#define PERF_EVENT_MISC_KERNEL (1 << 0) -#define PERF_EVENT_MISC_USER (2 << 0) -#define PERF_EVENT_MISC_HYPERVISOR (3 << 0) -#define PERF_EVENT_MISC_OVERFLOW (1 << 2) +#define PERF_EVENT_MISC_CPUMODE_MASK (3 << 0) +#define PERF_EVENT_MISC_CPUMODE_UNKNOWN (0 << 0) +#define PERF_EVENT_MISC_KERNEL (1 << 0) +#define PERF_EVENT_MISC_USER (2 << 0) +#define PERF_EVENT_MISC_HYPERVISOR (3 << 0) +#define PERF_EVENT_MISC_OVERFLOW (1 << 2) struct perf_event_header { __u32 type; @@ -351,14 +353,14 @@ struct hw_perf_counter { #ifdef CONFIG_PERF_COUNTERS union { struct { /* hardware */ - u64 config; - unsigned long config_base; - unsigned long counter_base; - int idx; + u64 config; + unsigned long config_base; + unsigned long counter_base; + int idx; }; union { /* software */ - atomic64_t count; - struct hrtimer hrtimer; + atomic64_t count; + struct hrtimer hrtimer; }; }; atomic64_t prev_count; @@ -523,37 +525,37 @@ struct perf_counter_context { * Protect the states of the counters in the list, * nr_active, and the list: */ - spinlock_t lock; + spinlock_t lock; /* * Protect the list of counters. Locking either mutex or lock * is sufficient to ensure the list doesn't change; to change * the list you need to lock both the mutex and the spinlock. */ - struct mutex mutex; + struct mutex mutex; - struct list_head counter_list; - struct list_head event_list; - int nr_counters; - int nr_active; - int is_active; - atomic_t refcount; - struct task_struct *task; + struct list_head counter_list; + struct list_head event_list; + int nr_counters; + int nr_active; + int is_active; + atomic_t refcount; + struct task_struct *task; /* * Context clock, runs when context enabled. */ - u64 time; - u64 timestamp; + u64 time; + u64 timestamp; /* * These fields let us detect when two contexts have both * been cloned (inherited) from a common ancestor. */ - struct perf_counter_context *parent_ctx; - u64 parent_gen; - u64 generation; - int pin_count; - struct rcu_head rcu_head; + struct perf_counter_context *parent_ctx; + u64 parent_gen; + u64 generation; + int pin_count; + struct rcu_head rcu_head; }; /** @@ -604,9 +606,9 @@ extern int hw_perf_group_sched_in(struct perf_counter *group_leader, extern void perf_counter_update_userpage(struct perf_counter *counter); struct perf_sample_data { - struct pt_regs *regs; - u64 addr; - u64 period; + struct pt_regs *regs; + u64 addr; + u64 period; }; extern int perf_counter_overflow(struct perf_counter *counter, int nmi, @@ -636,11 +638,14 @@ extern void perf_counter_fork(struct task_struct *tsk); extern void perf_counter_task_migration(struct task_struct *task, int cpu); -#define MAX_STACK_DEPTH 255 +#define MAX_STACK_DEPTH 255 struct perf_callchain_entry { - u16 nr, hv, kernel, user; - u64 ip[MAX_STACK_DEPTH]; + u16 nr; + u16 hv; + u16 kernel; + u16 user; + u64 ip[MAX_STACK_DEPTH]; }; extern struct perf_callchain_entry *perf_callchain(struct pt_regs *regs); -- cgit v1.2.3 From cca3f454a85ff42d426401bce7ac804541b2bd03 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Thu, 11 Jun 2009 14:57:55 +0200 Subject: perf_counter: Add counter->id to the throttle event So as to be able to distuinguish between multiple counters. Signed-off-by: Peter Zijlstra Cc: Mike Galbraith Cc: Paul Mackerras Cc: Arnaldo Carvalho de Melo LKML-Reference: Signed-off-by: Ingo Molnar --- include/linux/perf_counter.h | 1 + kernel/perf_counter.c | 4 +++- 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/include/linux/perf_counter.h b/include/linux/perf_counter.h index 1fa1a26cb1b..6e133954e2e 100644 --- a/include/linux/perf_counter.h +++ b/include/linux/perf_counter.h @@ -286,6 +286,7 @@ enum perf_event_type { * struct { * struct perf_event_header header; * u64 time; + * u64 id; * }; */ PERF_EVENT_THROTTLE = 5, diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c index 8859b97390e..ef5d8a5b245 100644 --- a/kernel/perf_counter.c +++ b/kernel/perf_counter.c @@ -2950,13 +2950,15 @@ static void perf_log_throttle(struct perf_counter *counter, int enable) struct { struct perf_event_header header; u64 time; + u64 id; } throttle_event = { .header = { .type = PERF_EVENT_THROTTLE + 1, .misc = 0, .size = sizeof(throttle_event), }, - .time = sched_clock(), + .time = sched_clock(), + .id = counter->id, }; ret = perf_output_begin(&handle, counter, sizeof(throttle_event), 1, 0); -- cgit v1.2.3 From 8dc8e5e8bc0ce00b0f656bf972f67cd8a72759e5 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Thu, 11 Jun 2009 16:13:24 +0200 Subject: perf_counter: Turn off by default Perfcounters were enabled by default to help testing - but now that we are submitting it upstream, make it default-disabled. Cc: Peter Zijlstra Cc: Mike Galbraith Cc: Paul Mackerras Cc: Arnaldo Carvalho de Melo LKML-Reference: Signed-off-by: Ingo Molnar --- init/Kconfig | 1 - 1 file changed, 1 deletion(-) diff --git a/init/Kconfig b/init/Kconfig index 8158f1f4469..aef16f9b3d2 100644 --- a/init/Kconfig +++ b/init/Kconfig @@ -941,7 +941,6 @@ menu "Performance Counters" config PERF_COUNTERS bool "Kernel Performance Counters" depends on HAVE_PERF_COUNTERS - default y select ANON_INODES help Enable kernel support for performance counter hardware. -- cgit v1.2.3