diff options
Diffstat (limited to 'drivers/cpufreq')
-rw-r--r-- | drivers/cpufreq/Kconfig | 1 | ||||
-rw-r--r-- | drivers/cpufreq/cpufreq.c | 34 | ||||
-rw-r--r-- | drivers/cpufreq/cpufreq_conservative.c | 7 | ||||
-rw-r--r-- | drivers/cpufreq/cpufreq_ondemand.c | 28 |
4 files changed, 41 insertions, 29 deletions
diff --git a/drivers/cpufreq/Kconfig b/drivers/cpufreq/Kconfig index 2cc71b66231..491779af8d5 100644 --- a/drivers/cpufreq/Kconfig +++ b/drivers/cpufreq/Kconfig @@ -107,6 +107,7 @@ config CPU_FREQ_GOV_USERSPACE config CPU_FREQ_GOV_ONDEMAND tristate "'ondemand' cpufreq policy governor" + select CPU_FREQ_TABLE help 'ondemand' - This driver adds a dynamic cpufreq policy governor. The governor does a periodic polling and diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c index d35a9f06ab7..47ab42db122 100644 --- a/drivers/cpufreq/cpufreq.c +++ b/drivers/cpufreq/cpufreq.c @@ -42,7 +42,7 @@ static DEFINE_SPINLOCK(cpufreq_driver_lock); /* internal prototypes */ static int __cpufreq_governor(struct cpufreq_policy *policy, unsigned int event); -static void handle_update(void *data); +static void handle_update(struct work_struct *work); /** * Two notifier lists: the "policy" list is involved in the @@ -52,8 +52,14 @@ static void handle_update(void *data); * The mutex locks both lists. */ static BLOCKING_NOTIFIER_HEAD(cpufreq_policy_notifier_list); -static BLOCKING_NOTIFIER_HEAD(cpufreq_transition_notifier_list); +static struct srcu_notifier_head cpufreq_transition_notifier_list; +static int __init init_cpufreq_transition_notifier_list(void) +{ + srcu_init_notifier_head(&cpufreq_transition_notifier_list); + return 0; +} +pure_initcall(init_cpufreq_transition_notifier_list); static LIST_HEAD(cpufreq_governor_list); static DEFINE_MUTEX (cpufreq_governor_mutex); @@ -262,14 +268,14 @@ void cpufreq_notify_transition(struct cpufreq_freqs *freqs, unsigned int state) freqs->old = policy->cur; } } - blocking_notifier_call_chain(&cpufreq_transition_notifier_list, + srcu_notifier_call_chain(&cpufreq_transition_notifier_list, CPUFREQ_PRECHANGE, freqs); adjust_jiffies(CPUFREQ_PRECHANGE, freqs); break; case CPUFREQ_POSTCHANGE: adjust_jiffies(CPUFREQ_POSTCHANGE, freqs); - blocking_notifier_call_chain(&cpufreq_transition_notifier_list, + srcu_notifier_call_chain(&cpufreq_transition_notifier_list, CPUFREQ_POSTCHANGE, freqs); if (likely(policy) && likely(policy->cpu == freqs->cpu)) policy->cur = freqs->new; @@ -659,7 +665,7 @@ static int cpufreq_add_dev (struct sys_device * sys_dev) mutex_init(&policy->lock); mutex_lock(&policy->lock); init_completion(&policy->kobj_unregister); - INIT_WORK(&policy->update, handle_update, (void *)(long)cpu); + INIT_WORK(&policy->update, handle_update); /* call driver. From then on the cpufreq must be able * to accept all calls to ->verify and ->setpolicy for this CPU @@ -889,9 +895,11 @@ static int cpufreq_remove_dev (struct sys_device * sys_dev) } -static void handle_update(void *data) +static void handle_update(struct work_struct *work) { - unsigned int cpu = (unsigned int)(long)data; + struct cpufreq_policy *policy = + container_of(work, struct cpufreq_policy, update); + unsigned int cpu = policy->cpu; dprintk("handle_update for cpu %u called\n", cpu); cpufreq_update_policy(cpu); } @@ -994,7 +1002,7 @@ static int cpufreq_suspend(struct sys_device * sysdev, pm_message_t pmsg) unsigned int cur_freq = 0; struct cpufreq_policy *cpu_policy; - dprintk("resuming cpu %u\n", cpu); + dprintk("suspending cpu %u\n", cpu); if (!cpu_online(cpu)) return 0; @@ -1049,7 +1057,7 @@ static int cpufreq_suspend(struct sys_device * sysdev, pm_message_t pmsg) freqs.old = cpu_policy->cur; freqs.new = cur_freq; - blocking_notifier_call_chain(&cpufreq_transition_notifier_list, + srcu_notifier_call_chain(&cpufreq_transition_notifier_list, CPUFREQ_SUSPENDCHANGE, &freqs); adjust_jiffies(CPUFREQ_SUSPENDCHANGE, &freqs); @@ -1130,7 +1138,7 @@ static int cpufreq_resume(struct sys_device * sysdev) freqs.old = cpu_policy->cur; freqs.new = cur_freq; - blocking_notifier_call_chain( + srcu_notifier_call_chain( &cpufreq_transition_notifier_list, CPUFREQ_RESUMECHANGE, &freqs); adjust_jiffies(CPUFREQ_RESUMECHANGE, &freqs); @@ -1176,7 +1184,7 @@ int cpufreq_register_notifier(struct notifier_block *nb, unsigned int list) switch (list) { case CPUFREQ_TRANSITION_NOTIFIER: - ret = blocking_notifier_chain_register( + ret = srcu_notifier_chain_register( &cpufreq_transition_notifier_list, nb); break; case CPUFREQ_POLICY_NOTIFIER: @@ -1208,7 +1216,7 @@ int cpufreq_unregister_notifier(struct notifier_block *nb, unsigned int list) switch (list) { case CPUFREQ_TRANSITION_NOTIFIER: - ret = blocking_notifier_chain_unregister( + ret = srcu_notifier_chain_unregister( &cpufreq_transition_notifier_list, nb); break; case CPUFREQ_POLICY_NOTIFIER: @@ -1529,7 +1537,6 @@ int cpufreq_update_policy(unsigned int cpu) } EXPORT_SYMBOL(cpufreq_update_policy); -#ifdef CONFIG_HOTPLUG_CPU static int cpufreq_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu) { @@ -1569,7 +1576,6 @@ static struct notifier_block __cpuinitdata cpufreq_cpu_notifier = { .notifier_call = cpufreq_cpu_callback, }; -#endif /* CONFIG_HOTPLUG_CPU */ /********************************************************************* * REGISTER / UNREGISTER CPUFREQ DRIVER * diff --git a/drivers/cpufreq/cpufreq_conservative.c b/drivers/cpufreq/cpufreq_conservative.c index c4c578defab..5ef5ede5b88 100644 --- a/drivers/cpufreq/cpufreq_conservative.c +++ b/drivers/cpufreq/cpufreq_conservative.c @@ -59,7 +59,7 @@ static unsigned int def_sampling_rate; #define MAX_SAMPLING_DOWN_FACTOR (10) #define TRANSITION_LATENCY_LIMIT (10 * 1000) -static void do_dbs_timer(void *data); +static void do_dbs_timer(struct work_struct *work); struct cpu_dbs_info_s { struct cpufreq_policy *cur_policy; @@ -82,7 +82,7 @@ static unsigned int dbs_enable; /* number of CPUs using this policy */ * is recursive for the same process. -Venki */ static DEFINE_MUTEX (dbs_mutex); -static DECLARE_WORK (dbs_work, do_dbs_timer, NULL); +static DECLARE_DELAYED_WORK(dbs_work, do_dbs_timer); struct dbs_tuners { unsigned int sampling_rate; @@ -420,7 +420,7 @@ static void dbs_check_cpu(int cpu) } } -static void do_dbs_timer(void *data) +static void do_dbs_timer(struct work_struct *work) { int i; lock_cpu_hotplug(); @@ -435,7 +435,6 @@ static void do_dbs_timer(void *data) static inline void dbs_timer_init(void) { - INIT_WORK(&dbs_work, do_dbs_timer, NULL); schedule_delayed_work(&dbs_work, usecs_to_jiffies(dbs_tuners_ins.sampling_rate)); return; diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c index bf8aa45d4f0..e1cc5113c2a 100644 --- a/drivers/cpufreq/cpufreq_ondemand.c +++ b/drivers/cpufreq/cpufreq_ondemand.c @@ -47,13 +47,17 @@ static unsigned int def_sampling_rate; #define DEF_SAMPLING_RATE_LATENCY_MULTIPLIER (1000) #define TRANSITION_LATENCY_LIMIT (10 * 1000) -static void do_dbs_timer(void *data); +static void do_dbs_timer(struct work_struct *work); + +/* Sampling types */ +enum dbs_sample {DBS_NORMAL_SAMPLE, DBS_SUB_SAMPLE}; struct cpu_dbs_info_s { cputime64_t prev_cpu_idle; cputime64_t prev_cpu_wall; struct cpufreq_policy *cur_policy; - struct work_struct work; + struct delayed_work work; + enum dbs_sample sample_type; unsigned int enable; struct cpufreq_frequency_table *freq_table; unsigned int freq_lo; @@ -407,30 +411,31 @@ static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info) } } -/* Sampling types */ -enum {DBS_NORMAL_SAMPLE, DBS_SUB_SAMPLE}; - -static void do_dbs_timer(void *data) +static void do_dbs_timer(struct work_struct *work) { unsigned int cpu = smp_processor_id(); struct cpu_dbs_info_s *dbs_info = &per_cpu(cpu_dbs_info, cpu); + enum dbs_sample sample_type = dbs_info->sample_type; /* We want all CPUs to do sampling nearly on same jiffy */ int delay = usecs_to_jiffies(dbs_tuners_ins.sampling_rate); + + /* Permit rescheduling of this work item */ + work_release(work); + delay -= jiffies % delay; if (!dbs_info->enable) return; /* Common NORMAL_SAMPLE setup */ - INIT_WORK(&dbs_info->work, do_dbs_timer, (void *)DBS_NORMAL_SAMPLE); + dbs_info->sample_type = DBS_NORMAL_SAMPLE; if (!dbs_tuners_ins.powersave_bias || - (unsigned long) data == DBS_NORMAL_SAMPLE) { + sample_type == DBS_NORMAL_SAMPLE) { lock_cpu_hotplug(); dbs_check_cpu(dbs_info); unlock_cpu_hotplug(); if (dbs_info->freq_lo) { /* Setup timer for SUB_SAMPLE */ - INIT_WORK(&dbs_info->work, do_dbs_timer, - (void *)DBS_SUB_SAMPLE); + dbs_info->sample_type = DBS_SUB_SAMPLE; delay = dbs_info->freq_hi_jiffies; } } else { @@ -449,7 +454,8 @@ static inline void dbs_timer_init(unsigned int cpu) delay -= jiffies % delay; ondemand_powersave_bias_init(); - INIT_WORK(&dbs_info->work, do_dbs_timer, NULL); + INIT_DELAYED_WORK_NAR(&dbs_info->work, do_dbs_timer); + dbs_info->sample_type = DBS_NORMAL_SAMPLE; queue_delayed_work_on(cpu, kondemand_wq, &dbs_info->work, delay); } |