aboutsummaryrefslogtreecommitdiff
path: root/arch
diff options
context:
space:
mode:
Diffstat (limited to 'arch')
-rw-r--r--arch/powerpc/kernel/perf_counter.c13
-rw-r--r--arch/x86/kernel/cpu/perf_counter.c9
2 files changed, 9 insertions, 13 deletions
diff --git a/arch/powerpc/kernel/perf_counter.c b/arch/powerpc/kernel/perf_counter.c
index bb1b463c136..db8d5cafc15 100644
--- a/arch/powerpc/kernel/perf_counter.c
+++ b/arch/powerpc/kernel/perf_counter.c
@@ -534,7 +534,7 @@ void hw_perf_enable(void)
continue;
}
val = 0;
- if (counter->hw_event.irq_period) {
+ if (counter->hw.irq_period) {
left = atomic64_read(&counter->hw.period_left);
if (left < 0x80000000L)
val = 0x80000000L - left;
@@ -829,8 +829,6 @@ const struct pmu *hw_perf_counter_init(struct perf_counter *counter)
if (!ppmu)
return ERR_PTR(-ENXIO);
- if ((s64)counter->hw_event.irq_period < 0)
- return ERR_PTR(-EINVAL);
if (!perf_event_raw(&counter->hw_event)) {
ev = perf_event_id(&counter->hw_event);
if (ev >= ppmu->n_generic || ppmu->generic_events[ev] == 0)
@@ -901,7 +899,7 @@ const struct pmu *hw_perf_counter_init(struct perf_counter *counter)
counter->hw.config = events[n];
counter->hw.counter_base = cflags[n];
- atomic64_set(&counter->hw.period_left, counter->hw_event.irq_period);
+ atomic64_set(&counter->hw.period_left, counter->hw.irq_period);
/*
* See if we need to reserve the PMU.
@@ -934,6 +932,7 @@ const struct pmu *hw_perf_counter_init(struct perf_counter *counter)
static void record_and_restart(struct perf_counter *counter, long val,
struct pt_regs *regs, int nmi)
{
+ u64 period = counter->hw.irq_period;
s64 prev, delta, left;
int record = 0;
@@ -948,11 +947,11 @@ static void record_and_restart(struct perf_counter *counter, long val,
*/
val = 0;
left = atomic64_read(&counter->hw.period_left) - delta;
- if (counter->hw_event.irq_period) {
+ if (period) {
if (left <= 0) {
- left += counter->hw_event.irq_period;
+ left += period;
if (left <= 0)
- left = counter->hw_event.irq_period;
+ left = period;
record = 1;
}
if (left < 0x80000000L)
diff --git a/arch/x86/kernel/cpu/perf_counter.c b/arch/x86/kernel/cpu/perf_counter.c
index 5a7f718eb1e..886dcf334bc 100644
--- a/arch/x86/kernel/cpu/perf_counter.c
+++ b/arch/x86/kernel/cpu/perf_counter.c
@@ -286,11 +286,8 @@ static int __hw_perf_counter_init(struct perf_counter *counter)
hwc->nmi = 1;
}
- hwc->irq_period = hw_event->irq_period;
- if ((s64)hwc->irq_period <= 0 || hwc->irq_period > x86_pmu.max_period)
- hwc->irq_period = x86_pmu.max_period;
-
- atomic64_set(&hwc->period_left, hwc->irq_period);
+ atomic64_set(&hwc->period_left,
+ min(x86_pmu.max_period, hwc->irq_period));
/*
* Raw event type provide the config in the event structure
@@ -458,7 +455,7 @@ x86_perf_counter_set_period(struct perf_counter *counter,
struct hw_perf_counter *hwc, int idx)
{
s64 left = atomic64_read(&hwc->period_left);
- s64 period = hwc->irq_period;
+ s64 period = min(x86_pmu.max_period, hwc->irq_period);
int err;
/*