X-Git-Url: https://err.no/cgi-bin/gitweb.cgi?a=blobdiff_plain;f=drivers%2Fcpufreq%2Fcpufreq_conservative.c;h=5d3a04ba6ad2c38aaf18d443b508511206bfac0c;hb=54d29ad33e3483bcc7ca433a21cf294854e5154a;hp=4bd33ce8a6f3398a0aa37995582e619c3b938590;hpb=37ca506adc395a028cd12760eca419dd0dc14b5c;p=linux-2.6 diff --git a/drivers/cpufreq/cpufreq_conservative.c b/drivers/cpufreq/cpufreq_conservative.c index 4bd33ce8a6..5d3a04ba6a 100644 --- a/drivers/cpufreq/cpufreq_conservative.c +++ b/drivers/cpufreq/cpufreq_conservative.c @@ -37,17 +37,17 @@ #define DEF_FREQUENCY_UP_THRESHOLD (80) #define DEF_FREQUENCY_DOWN_THRESHOLD (20) -/* - * The polling frequency of this governor depends on the capability of +/* + * The polling frequency of this governor depends on the capability of * the processor. Default polling frequency is 1000 times the transition - * latency of the processor. The governor will work on any processor with - * transition latency <= 10mS, using appropriate sampling + * latency of the processor. The governor will work on any processor with + * transition latency <= 10mS, using appropriate sampling * rate. * For CPUs with transition latency > 10mS (mostly drivers * with CPUFREQ_ETERNAL), this governor will not work. * All times here are in uS. */ -static unsigned int def_sampling_rate; +static unsigned int def_sampling_rate; #define MIN_SAMPLING_RATE_RATIO (2) /* for correct statistics, we need at least 10 ticks between each measure */ #define MIN_STAT_SAMPLING_RATE \ @@ -63,12 +63,12 @@ static unsigned int def_sampling_rate; static void do_dbs_timer(struct work_struct *work); struct cpu_dbs_info_s { - struct cpufreq_policy *cur_policy; - unsigned int prev_cpu_idle_up; - unsigned int prev_cpu_idle_down; - unsigned int enable; - unsigned int down_skip; - unsigned int requested_freq; + struct cpufreq_policy *cur_policy; + unsigned int prev_cpu_idle_up; + unsigned int prev_cpu_idle_down; + unsigned int enable; + unsigned int down_skip; + unsigned int requested_freq; }; static DEFINE_PER_CPU(struct cpu_dbs_info_s, cpu_dbs_info); @@ -82,24 +82,24 @@ static unsigned int dbs_enable; /* number of CPUs using this policy */ * cpu_hotplug lock should be taken before that. Note that cpu_hotplug lock * is recursive for the same process. -Venki */ -static DEFINE_MUTEX (dbs_mutex); +static DEFINE_MUTEX (dbs_mutex); static DECLARE_DELAYED_WORK(dbs_work, do_dbs_timer); struct dbs_tuners { - unsigned int sampling_rate; - unsigned int sampling_down_factor; - unsigned int up_threshold; - unsigned int down_threshold; - unsigned int ignore_nice; - unsigned int freq_step; + unsigned int sampling_rate; + unsigned int sampling_down_factor; + unsigned int up_threshold; + unsigned int down_threshold; + unsigned int ignore_nice; + unsigned int freq_step; }; static struct dbs_tuners dbs_tuners_ins = { - .up_threshold = DEF_FREQUENCY_UP_THRESHOLD, - .down_threshold = DEF_FREQUENCY_DOWN_THRESHOLD, - .sampling_down_factor = DEF_SAMPLING_DOWN_FACTOR, - .ignore_nice = 0, - .freq_step = 5, + .up_threshold = DEF_FREQUENCY_UP_THRESHOLD, + .down_threshold = DEF_FREQUENCY_DOWN_THRESHOLD, + .sampling_down_factor = DEF_SAMPLING_DOWN_FACTOR, + .ignore_nice = 0, + .freq_step = 5, }; static inline unsigned int get_cpu_idle_time(unsigned int cpu) @@ -109,13 +109,34 @@ static inline unsigned int get_cpu_idle_time(unsigned int cpu) if (dbs_tuners_ins.ignore_nice) add_nice = kstat_cpu(cpu).cpustat.nice; - ret = kstat_cpu(cpu).cpustat.idle + + ret = kstat_cpu(cpu).cpustat.idle + kstat_cpu(cpu).cpustat.iowait + add_nice; return ret; } +/* keep track of frequency transitions */ +static int +dbs_cpufreq_notifier(struct notifier_block *nb, unsigned long val, + void *data) +{ + struct cpufreq_freqs *freq = data; + struct cpu_dbs_info_s *this_dbs_info = &per_cpu(cpu_dbs_info, + freq->cpu); + + if (!this_dbs_info->enable) + return 0; + + this_dbs_info->requested_freq = freq->new; + + return 0; +} + +static struct notifier_block dbs_cpufreq_notifier_block = { + .notifier_call = dbs_cpufreq_notifier +}; + /************************** sysfs interface ************************/ static ssize_t show_sampling_rate_max(struct cpufreq_policy *policy, char *buf) { @@ -127,8 +148,8 @@ static ssize_t show_sampling_rate_min(struct cpufreq_policy *policy, char *buf) return sprintf (buf, "%u\n", MIN_SAMPLING_RATE); } -#define define_one_ro(_name) \ -static struct freq_attr _name = \ +#define define_one_ro(_name) \ +static struct freq_attr _name = \ __ATTR(_name, 0444, show_##_name, NULL) define_one_ro(sampling_rate_max); @@ -148,7 +169,7 @@ show_one(down_threshold, down_threshold); show_one(ignore_nice_load, ignore_nice); show_one(freq_step, freq_step); -static ssize_t store_sampling_down_factor(struct cpufreq_policy *unused, +static ssize_t store_sampling_down_factor(struct cpufreq_policy *unused, const char *buf, size_t count) { unsigned int input; @@ -164,7 +185,7 @@ static ssize_t store_sampling_down_factor(struct cpufreq_policy *unused, return count; } -static ssize_t store_sampling_rate(struct cpufreq_policy *unused, +static ssize_t store_sampling_rate(struct cpufreq_policy *unused, const char *buf, size_t count) { unsigned int input; @@ -183,7 +204,7 @@ static ssize_t store_sampling_rate(struct cpufreq_policy *unused, return count; } -static ssize_t store_up_threshold(struct cpufreq_policy *unused, +static ssize_t store_up_threshold(struct cpufreq_policy *unused, const char *buf, size_t count) { unsigned int input; @@ -202,7 +223,7 @@ static ssize_t store_up_threshold(struct cpufreq_policy *unused, return count; } -static ssize_t store_down_threshold(struct cpufreq_policy *unused, +static ssize_t store_down_threshold(struct cpufreq_policy *unused, const char *buf, size_t count) { unsigned int input; @@ -228,16 +249,16 @@ static ssize_t store_ignore_nice_load(struct cpufreq_policy *policy, int ret; unsigned int j; - - ret = sscanf (buf, "%u", &input); - if ( ret != 1 ) + + ret = sscanf(buf, "%u", &input); + if (ret != 1) return -EINVAL; - if ( input > 1 ) + if (input > 1) input = 1; - + mutex_lock(&dbs_mutex); - if ( input == dbs_tuners_ins.ignore_nice ) { /* nothing to do */ + if (input == dbs_tuners_ins.ignore_nice) { /* nothing to do */ mutex_unlock(&dbs_mutex); return count; } @@ -261,14 +282,14 @@ static ssize_t store_freq_step(struct cpufreq_policy *policy, unsigned int input; int ret; - ret = sscanf (buf, "%u", &input); + ret = sscanf(buf, "%u", &input); - if ( ret != 1 ) + if (ret != 1) return -EINVAL; - if ( input > 100 ) + if (input > 100) input = 100; - + /* no need to test here if freq_step is zero as the user might actually * want this, they would be crazy though :) */ mutex_lock(&dbs_mutex); @@ -322,18 +343,18 @@ static void dbs_check_cpu(int cpu) policy = this_dbs_info->cur_policy; - /* - * The default safe range is 20% to 80% + /* + * The default safe range is 20% to 80% * Every sampling_rate, we check - * - If current idle time is less than 20%, then we try to - * increase frequency + * - If current idle time is less than 20%, then we try to + * increase frequency * Every sampling_rate*sampling_down_factor, we check - * - If current idle time is more than 80%, then we try to - * decrease frequency + * - If current idle time is more than 80%, then we try to + * decrease frequency * - * Any frequency increase takes it to the maximum frequency. - * Frequency reduction happens at minimum steps of - * 5% (default) of max_frequency + * Any frequency increase takes it to the maximum frequency. + * Frequency reduction happens at minimum steps of + * 5% (default) of max_frequency */ /* Check for frequency increase */ @@ -361,13 +382,13 @@ static void dbs_check_cpu(int cpu) /* if we are already at full speed then break out early */ if (this_dbs_info->requested_freq == policy->max) return; - + freq_step = (dbs_tuners_ins.freq_step * policy->max) / 100; /* max freq cannot be less than 100. But who knows.... */ if (unlikely(freq_step == 0)) freq_step = 5; - + this_dbs_info->requested_freq += freq_step; if (this_dbs_info->requested_freq > policy->max) this_dbs_info->requested_freq = policy->max; @@ -427,15 +448,15 @@ static void dbs_check_cpu(int cpu) } static void do_dbs_timer(struct work_struct *work) -{ +{ int i; mutex_lock(&dbs_mutex); for_each_online_cpu(i) dbs_check_cpu(i); - schedule_delayed_work(&dbs_work, + schedule_delayed_work(&dbs_work, usecs_to_jiffies(dbs_tuners_ins.sampling_rate)); mutex_unlock(&dbs_mutex); -} +} static inline void dbs_timer_init(void) { @@ -462,13 +483,12 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy, switch (event) { case CPUFREQ_GOV_START: - if ((!cpu_online(cpu)) || - (!policy->cur)) + if ((!cpu_online(cpu)) || (!policy->cur)) return -EINVAL; if (this_dbs_info->enable) /* Already enabled */ break; - + mutex_lock(&dbs_mutex); rc = sysfs_create_group(&policy->kobj, &dbs_attr_group); @@ -481,7 +501,7 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy, struct cpu_dbs_info_s *j_dbs_info; j_dbs_info = &per_cpu(cpu_dbs_info, j); j_dbs_info->cur_policy = policy; - + j_dbs_info->prev_cpu_idle_up = get_cpu_idle_time(cpu); j_dbs_info->prev_cpu_idle_down = j_dbs_info->prev_cpu_idle_up; @@ -511,8 +531,11 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy, dbs_tuners_ins.sampling_rate = def_sampling_rate; dbs_timer_init(); + cpufreq_register_notifier( + &dbs_cpufreq_notifier_block, + CPUFREQ_TRANSITION_NOTIFIER); } - + mutex_unlock(&dbs_mutex); break; @@ -525,9 +548,13 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy, * Stop the timerschedule work, when this governor * is used for first time */ - if (dbs_enable == 0) + if (dbs_enable == 0) { dbs_timer_exit(); - + cpufreq_unregister_notifier( + &dbs_cpufreq_notifier_block, + CPUFREQ_TRANSITION_NOTIFIER); + } + mutex_unlock(&dbs_mutex); break; @@ -537,11 +564,11 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy, if (policy->max < this_dbs_info->cur_policy->cur) __cpufreq_driver_target( this_dbs_info->cur_policy, - policy->max, CPUFREQ_RELATION_H); + policy->max, CPUFREQ_RELATION_H); else if (policy->min > this_dbs_info->cur_policy->cur) __cpufreq_driver_target( this_dbs_info->cur_policy, - policy->min, CPUFREQ_RELATION_L); + policy->min, CPUFREQ_RELATION_L); mutex_unlock(&dbs_mutex); break; } @@ -576,5 +603,9 @@ MODULE_DESCRIPTION ("'cpufreq_conservative' - A dynamic cpufreq governor for " "optimised for use in a battery environment"); MODULE_LICENSE ("GPL"); +#ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_CONSERVATIVE +fs_initcall(cpufreq_gov_dbs_init); +#else module_init(cpufreq_gov_dbs_init); +#endif module_exit(cpufreq_gov_dbs_exit);