aboutsummaryrefslogtreecommitdiff
path: root/arch/x86/kernel
diff options
context:
space:
mode:
authorRobert Richter <robert.richter@amd.com>2009-04-29 12:47:09 +0200
committerIngo Molnar <mingo@elte.hu>2009-04-29 14:51:06 +0200
commit55de0f2e57994b525324bf0d04d242d9358a2417 (patch)
tree2467fa8dba81135fed5430c488685a12789ccc29 /arch/x86/kernel
parent26816c287e13eedc67bc4ed0cd40c138314b7c7d (diff)
perf_counter, x86: rename intel only functions
[ Impact: cleanup ] Signed-off-by: Robert Richter <robert.richter@amd.com> Cc: Paul Mackerras <paulus@samba.org> Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl> LKML-Reference: <1241002046-8832-13-git-send-email-robert.richter@amd.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/x86/kernel')
-rw-r--r--arch/x86/kernel/cpu/perf_counter.c8
1 files changed, 4 insertions, 4 deletions
diff --git a/arch/x86/kernel/cpu/perf_counter.c b/arch/x86/kernel/cpu/perf_counter.c
index fa6541d781b..5a52d73ccfa 100644
--- a/arch/x86/kernel/cpu/perf_counter.c
+++ b/arch/x86/kernel/cpu/perf_counter.c
@@ -725,7 +725,7 @@ static void x86_pmu_disable(struct perf_counter *counter)
* Save and restart an expired counter. Called by NMI contexts,
* so it has to be careful about preempting normal counter ops:
*/
-static void perf_save_and_restart(struct perf_counter *counter)
+static void intel_pmu_save_and_restart(struct perf_counter *counter)
{
struct hw_perf_counter *hwc = &counter->hw;
int idx = hwc->idx;
@@ -753,7 +753,7 @@ static int intel_pmu_handle_irq(struct pt_regs *regs, int nmi)
struct cpu_hw_counters *cpuc = &per_cpu(cpu_hw_counters, cpu);
int ret = 0;
- cpuc->throttle_ctrl = hw_perf_save_disable();
+ cpuc->throttle_ctrl = intel_pmu_save_disable_all();
status = intel_pmu_get_status(cpuc->throttle_ctrl);
if (!status)
@@ -770,7 +770,7 @@ again:
if (!counter)
continue;
- perf_save_and_restart(counter);
+ intel_pmu_save_and_restart(counter);
if (perf_counter_overflow(counter, nmi, regs, 0))
__x86_pmu_disable(counter, &counter->hw, bit);
}
@@ -788,7 +788,7 @@ out:
* Restore - do not reenable when global enable is off or throttled:
*/
if (++cpuc->interrupts < PERFMON_MAX_INTERRUPTS)
- hw_perf_restore(cpuc->throttle_ctrl);
+ intel_pmu_restore_all(cpuc->throttle_ctrl);
return ret;
}