2 * linux/drivers/cpufreq/cpufreq.c
4 * Copyright (C) 2001 Russell King
5 * (C) 2002 - 2003 Dominik Brodowski <linux@brodo.de>
7 * Oct 2005 - Ashok Raj <ashok.raj@intel.com>
8 * Added handling for CPU hotplug
9 * Feb 2006 - Jacob Shin <jacob.shin@amd.com>
10 * Fix handling for CPU hotplug -- affected CPUs
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License version 2 as
14 * published by the Free Software Foundation.
18 #include <linux/config.h>
19 #include <linux/kernel.h>
20 #include <linux/module.h>
21 #include <linux/init.h>
22 #include <linux/notifier.h>
23 #include <linux/cpufreq.h>
24 #include <linux/delay.h>
25 #include <linux/interrupt.h>
26 #include <linux/spinlock.h>
27 #include <linux/device.h>
28 #include <linux/slab.h>
29 #include <linux/cpu.h>
30 #include <linux/completion.h>
31 #include <linux/mutex.h>
33 #define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_CORE, "cpufreq-core", msg)
36 * The "cpufreq driver" - the arch- or hardware-dependend low
37 * level driver of CPUFreq support, and its spinlock. This lock
38 * also protects the cpufreq_cpu_data array.
40 static struct cpufreq_driver *cpufreq_driver;
41 static struct cpufreq_policy *cpufreq_cpu_data[NR_CPUS];
42 static DEFINE_SPINLOCK(cpufreq_driver_lock);
44 /* internal prototypes */
45 static int __cpufreq_governor(struct cpufreq_policy *policy, unsigned int event);
46 static void handle_update(void *data);
49 * Two notifier lists: the "policy" list is involved in the
50 * validation process for a new CPU frequency policy; the
51 * "transition" list for kernel code that needs to handle
52 * changes to devices when the CPU clock speed changes.
53 * The mutex locks both lists.
55 static BLOCKING_NOTIFIER_HEAD(cpufreq_policy_notifier_list);
56 static BLOCKING_NOTIFIER_HEAD(cpufreq_transition_notifier_list);
59 static LIST_HEAD(cpufreq_governor_list);
60 static DEFINE_MUTEX (cpufreq_governor_mutex);
62 struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu)
64 struct cpufreq_policy *data;
70 /* get the cpufreq driver */
71 spin_lock_irqsave(&cpufreq_driver_lock, flags);
76 if (!try_module_get(cpufreq_driver->owner))
81 data = cpufreq_cpu_data[cpu];
84 goto err_out_put_module;
86 if (!kobject_get(&data->kobj))
87 goto err_out_put_module;
89 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
93 module_put(cpufreq_driver->owner);
95 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
99 EXPORT_SYMBOL_GPL(cpufreq_cpu_get);
102 void cpufreq_cpu_put(struct cpufreq_policy *data)
104 kobject_put(&data->kobj);
105 module_put(cpufreq_driver->owner);
107 EXPORT_SYMBOL_GPL(cpufreq_cpu_put);
110 /*********************************************************************
111 * UNIFIED DEBUG HELPERS *
112 *********************************************************************/
113 #ifdef CONFIG_CPU_FREQ_DEBUG
115 /* what part(s) of the CPUfreq subsystem are debugged? */
116 static unsigned int debug;
118 /* is the debug output ratelimit'ed using printk_ratelimit? User can
119 * set or modify this value.
121 static unsigned int debug_ratelimit = 1;
123 /* is the printk_ratelimit'ing enabled? It's enabled after a successful
124 * loading of a cpufreq driver, temporarily disabled when a new policy
125 * is set, and disabled upon cpufreq driver removal
127 static unsigned int disable_ratelimit = 1;
128 static DEFINE_SPINLOCK(disable_ratelimit_lock);
130 static void cpufreq_debug_enable_ratelimit(void)
134 spin_lock_irqsave(&disable_ratelimit_lock, flags);
135 if (disable_ratelimit)
137 spin_unlock_irqrestore(&disable_ratelimit_lock, flags);
140 static void cpufreq_debug_disable_ratelimit(void)
144 spin_lock_irqsave(&disable_ratelimit_lock, flags);
146 spin_unlock_irqrestore(&disable_ratelimit_lock, flags);
149 void cpufreq_debug_printk(unsigned int type, const char *prefix, const char *fmt, ...)
158 spin_lock_irqsave(&disable_ratelimit_lock, flags);
159 if (!disable_ratelimit && debug_ratelimit && !printk_ratelimit()) {
160 spin_unlock_irqrestore(&disable_ratelimit_lock, flags);
163 spin_unlock_irqrestore(&disable_ratelimit_lock, flags);
165 len = snprintf(s, 256, KERN_DEBUG "%s: ", prefix);
168 len += vsnprintf(&s[len], (256 - len), fmt, args);
176 EXPORT_SYMBOL(cpufreq_debug_printk);
179 module_param(debug, uint, 0644);
180 MODULE_PARM_DESC(debug, "CPUfreq debugging: add 1 to debug core, 2 to debug drivers, and 4 to debug governors.");
182 module_param(debug_ratelimit, uint, 0644);
183 MODULE_PARM_DESC(debug_ratelimit, "CPUfreq debugging: set to 0 to disable ratelimiting.");
185 #else /* !CONFIG_CPU_FREQ_DEBUG */
187 static inline void cpufreq_debug_enable_ratelimit(void) { return; }
188 static inline void cpufreq_debug_disable_ratelimit(void) { return; }
190 #endif /* CONFIG_CPU_FREQ_DEBUG */
193 /*********************************************************************
194 * EXTERNALLY AFFECTING FREQUENCY CHANGES *
195 *********************************************************************/
198 * adjust_jiffies - adjust the system "loops_per_jiffy"
200 * This function alters the system "loops_per_jiffy" for the clock
201 * speed change. Note that loops_per_jiffy cannot be updated on SMP
202 * systems as each CPU might be scaled differently. So, use the arch
203 * per-CPU loops_per_jiffy value wherever possible.
206 static unsigned long l_p_j_ref;
207 static unsigned int l_p_j_ref_freq;
209 static void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci)
211 if (ci->flags & CPUFREQ_CONST_LOOPS)
214 if (!l_p_j_ref_freq) {
215 l_p_j_ref = loops_per_jiffy;
216 l_p_j_ref_freq = ci->old;
217 dprintk("saving %lu as reference value for loops_per_jiffy; freq is %u kHz\n", l_p_j_ref, l_p_j_ref_freq);
219 if ((val == CPUFREQ_PRECHANGE && ci->old < ci->new) ||
220 (val == CPUFREQ_POSTCHANGE && ci->old > ci->new) ||
221 (val == CPUFREQ_RESUMECHANGE || val == CPUFREQ_SUSPENDCHANGE)) {
222 loops_per_jiffy = cpufreq_scale(l_p_j_ref, l_p_j_ref_freq, ci->new);
223 dprintk("scaling loops_per_jiffy to %lu for frequency %u kHz\n", loops_per_jiffy, ci->new);
227 static inline void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci) { return; }
232 * cpufreq_notify_transition - call notifier chain and adjust_jiffies
233 * on frequency transition.
235 * This function calls the transition notifiers and the "adjust_jiffies"
236 * function. It is called twice on all CPU frequency changes that have
239 void cpufreq_notify_transition(struct cpufreq_freqs *freqs, unsigned int state)
241 struct cpufreq_policy *policy;
243 BUG_ON(irqs_disabled());
245 freqs->flags = cpufreq_driver->flags;
246 dprintk("notification %u of frequency transition to %u kHz\n",
249 policy = cpufreq_cpu_data[freqs->cpu];
252 case CPUFREQ_PRECHANGE:
253 /* detect if the driver reported a value as "old frequency"
254 * which is not equal to what the cpufreq core thinks is
257 if (!(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) {
258 if ((policy) && (policy->cpu == freqs->cpu) &&
259 (policy->cur) && (policy->cur != freqs->old)) {
260 dprintk(KERN_WARNING "Warning: CPU frequency is"
261 " %u, cpufreq assumed %u kHz.\n",
262 freqs->old, policy->cur);
263 freqs->old = policy->cur;
266 blocking_notifier_call_chain(&cpufreq_transition_notifier_list,
267 CPUFREQ_PRECHANGE, freqs);
268 adjust_jiffies(CPUFREQ_PRECHANGE, freqs);
271 case CPUFREQ_POSTCHANGE:
272 adjust_jiffies(CPUFREQ_POSTCHANGE, freqs);
273 blocking_notifier_call_chain(&cpufreq_transition_notifier_list,
274 CPUFREQ_POSTCHANGE, freqs);
275 if (likely(policy) && likely(policy->cpu == freqs->cpu))
276 policy->cur = freqs->new;
280 EXPORT_SYMBOL_GPL(cpufreq_notify_transition);
284 /*********************************************************************
286 *********************************************************************/
289 * cpufreq_parse_governor - parse a governor string
291 static int cpufreq_parse_governor (char *str_governor, unsigned int *policy,
292 struct cpufreq_governor **governor)
296 if (cpufreq_driver->setpolicy) {
297 if (!strnicmp(str_governor, "performance", CPUFREQ_NAME_LEN)) {
298 *policy = CPUFREQ_POLICY_PERFORMANCE;
300 } else if (!strnicmp(str_governor, "powersave", CPUFREQ_NAME_LEN)) {
301 *policy = CPUFREQ_POLICY_POWERSAVE;
306 struct cpufreq_governor *t;
307 mutex_lock(&cpufreq_governor_mutex);
308 if (!cpufreq_driver || !cpufreq_driver->target)
310 list_for_each_entry(t, &cpufreq_governor_list, governor_list) {
311 if (!strnicmp(str_governor,t->name,CPUFREQ_NAME_LEN)) {
313 mutex_unlock(&cpufreq_governor_mutex);
318 mutex_unlock(&cpufreq_governor_mutex);
322 EXPORT_SYMBOL_GPL(cpufreq_parse_governor);
325 /* drivers/base/cpu.c */
326 extern struct sysdev_class cpu_sysdev_class;
330 * cpufreq_per_cpu_attr_read() / show_##file_name() - print out cpufreq information
332 * Write out information from cpufreq_driver->policy[cpu]; object must be
336 #define show_one(file_name, object) \
337 static ssize_t show_##file_name \
338 (struct cpufreq_policy * policy, char *buf) \
340 return sprintf (buf, "%u\n", policy->object); \
343 show_one(cpuinfo_min_freq, cpuinfo.min_freq);
344 show_one(cpuinfo_max_freq, cpuinfo.max_freq);
345 show_one(scaling_min_freq, min);
346 show_one(scaling_max_freq, max);
347 show_one(scaling_cur_freq, cur);
349 static int __cpufreq_set_policy(struct cpufreq_policy *data, struct cpufreq_policy *policy);
352 * cpufreq_per_cpu_attr_write() / store_##file_name() - sysfs write access
354 #define store_one(file_name, object) \
355 static ssize_t store_##file_name \
356 (struct cpufreq_policy * policy, const char *buf, size_t count) \
358 unsigned int ret = -EINVAL; \
359 struct cpufreq_policy new_policy; \
361 ret = cpufreq_get_policy(&new_policy, policy->cpu); \
365 ret = sscanf (buf, "%u", &new_policy.object); \
369 mutex_lock(&policy->lock); \
370 ret = __cpufreq_set_policy(policy, &new_policy); \
371 policy->user_policy.object = policy->object; \
372 mutex_unlock(&policy->lock); \
374 return ret ? ret : count; \
377 store_one(scaling_min_freq,min);
378 store_one(scaling_max_freq,max);
381 * show_cpuinfo_cur_freq - current CPU frequency as detected by hardware
383 static ssize_t show_cpuinfo_cur_freq (struct cpufreq_policy * policy, char *buf)
385 unsigned int cur_freq = cpufreq_get(policy->cpu);
387 return sprintf(buf, "<unknown>");
388 return sprintf(buf, "%u\n", cur_freq);
393 * show_scaling_governor - show the current policy for the specified CPU
395 static ssize_t show_scaling_governor (struct cpufreq_policy * policy, char *buf)
397 if(policy->policy == CPUFREQ_POLICY_POWERSAVE)
398 return sprintf(buf, "powersave\n");
399 else if (policy->policy == CPUFREQ_POLICY_PERFORMANCE)
400 return sprintf(buf, "performance\n");
401 else if (policy->governor)
402 return scnprintf(buf, CPUFREQ_NAME_LEN, "%s\n", policy->governor->name);
408 * store_scaling_governor - store policy for the specified CPU
410 static ssize_t store_scaling_governor (struct cpufreq_policy * policy,
411 const char *buf, size_t count)
413 unsigned int ret = -EINVAL;
414 char str_governor[16];
415 struct cpufreq_policy new_policy;
417 ret = cpufreq_get_policy(&new_policy, policy->cpu);
421 ret = sscanf (buf, "%15s", str_governor);
425 if (cpufreq_parse_governor(str_governor, &new_policy.policy, &new_policy.governor))
428 /* Do not use cpufreq_set_policy here or the user_policy.max
429 will be wrongly overridden */
430 mutex_lock(&policy->lock);
431 ret = __cpufreq_set_policy(policy, &new_policy);
433 policy->user_policy.policy = policy->policy;
434 policy->user_policy.governor = policy->governor;
435 mutex_unlock(&policy->lock);
437 return ret ? ret : count;
441 * show_scaling_driver - show the cpufreq driver currently loaded
443 static ssize_t show_scaling_driver (struct cpufreq_policy * policy, char *buf)
445 return scnprintf(buf, CPUFREQ_NAME_LEN, "%s\n", cpufreq_driver->name);
449 * show_scaling_available_governors - show the available CPUfreq governors
451 static ssize_t show_scaling_available_governors (struct cpufreq_policy * policy,
455 struct cpufreq_governor *t;
457 if (!cpufreq_driver->target) {
458 i += sprintf(buf, "performance powersave");
462 list_for_each_entry(t, &cpufreq_governor_list, governor_list) {
463 if (i >= (ssize_t) ((PAGE_SIZE / sizeof(char)) - (CPUFREQ_NAME_LEN + 2)))
465 i += scnprintf(&buf[i], CPUFREQ_NAME_LEN, "%s ", t->name);
468 i += sprintf(&buf[i], "\n");
472 * show_affected_cpus - show the CPUs affected by each transition
474 static ssize_t show_affected_cpus (struct cpufreq_policy * policy, char *buf)
479 for_each_cpu_mask(cpu, policy->cpus) {
481 i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), " ");
482 i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), "%u", cpu);
483 if (i >= (PAGE_SIZE - 5))
486 i += sprintf(&buf[i], "\n");
491 #define define_one_ro(_name) \
492 static struct freq_attr _name = \
493 __ATTR(_name, 0444, show_##_name, NULL)
495 #define define_one_ro0400(_name) \
496 static struct freq_attr _name = \
497 __ATTR(_name, 0400, show_##_name, NULL)
499 #define define_one_rw(_name) \
500 static struct freq_attr _name = \
501 __ATTR(_name, 0644, show_##_name, store_##_name)
503 define_one_ro0400(cpuinfo_cur_freq);
504 define_one_ro(cpuinfo_min_freq);
505 define_one_ro(cpuinfo_max_freq);
506 define_one_ro(scaling_available_governors);
507 define_one_ro(scaling_driver);
508 define_one_ro(scaling_cur_freq);
509 define_one_ro(affected_cpus);
510 define_one_rw(scaling_min_freq);
511 define_one_rw(scaling_max_freq);
512 define_one_rw(scaling_governor);
514 static struct attribute * default_attrs[] = {
515 &cpuinfo_min_freq.attr,
516 &cpuinfo_max_freq.attr,
517 &scaling_min_freq.attr,
518 &scaling_max_freq.attr,
520 &scaling_governor.attr,
521 &scaling_driver.attr,
522 &scaling_available_governors.attr,
526 #define to_policy(k) container_of(k,struct cpufreq_policy,kobj)
527 #define to_attr(a) container_of(a,struct freq_attr,attr)
529 static ssize_t show(struct kobject * kobj, struct attribute * attr ,char * buf)
531 struct cpufreq_policy * policy = to_policy(kobj);
532 struct freq_attr * fattr = to_attr(attr);
534 policy = cpufreq_cpu_get(policy->cpu);
537 ret = fattr->show ? fattr->show(policy,buf) : -EIO;
538 cpufreq_cpu_put(policy);
542 static ssize_t store(struct kobject * kobj, struct attribute * attr,
543 const char * buf, size_t count)
545 struct cpufreq_policy * policy = to_policy(kobj);
546 struct freq_attr * fattr = to_attr(attr);
548 policy = cpufreq_cpu_get(policy->cpu);
551 ret = fattr->store ? fattr->store(policy,buf,count) : -EIO;
552 cpufreq_cpu_put(policy);
556 static void cpufreq_sysfs_release(struct kobject * kobj)
558 struct cpufreq_policy * policy = to_policy(kobj);
559 dprintk("last reference is dropped\n");
560 complete(&policy->kobj_unregister);
563 static struct sysfs_ops sysfs_ops = {
568 static struct kobj_type ktype_cpufreq = {
569 .sysfs_ops = &sysfs_ops,
570 .default_attrs = default_attrs,
571 .release = cpufreq_sysfs_release,
576 * cpufreq_add_dev - add a CPU device
578 * Adds the cpufreq interface for a CPU device.
580 static int cpufreq_add_dev (struct sys_device * sys_dev)
582 unsigned int cpu = sys_dev->id;
584 struct cpufreq_policy new_policy;
585 struct cpufreq_policy *policy;
586 struct freq_attr **drv_attr;
587 struct sys_device *cpu_sys_dev;
591 struct cpufreq_policy *managed_policy;
594 if (cpu_is_offline(cpu))
597 cpufreq_debug_disable_ratelimit();
598 dprintk("adding CPU %u\n", cpu);
601 /* check whether a different CPU already registered this
602 * CPU because it is in the same boat. */
603 policy = cpufreq_cpu_get(cpu);
604 if (unlikely(policy)) {
605 cpufreq_cpu_put(policy);
606 cpufreq_debug_enable_ratelimit();
611 if (!try_module_get(cpufreq_driver->owner)) {
616 policy = kzalloc(sizeof(struct cpufreq_policy), GFP_KERNEL);
623 policy->cpus = cpumask_of_cpu(cpu);
625 mutex_init(&policy->lock);
626 mutex_lock(&policy->lock);
627 init_completion(&policy->kobj_unregister);
628 INIT_WORK(&policy->update, handle_update, (void *)(long)cpu);
630 /* call driver. From then on the cpufreq must be able
631 * to accept all calls to ->verify and ->setpolicy for this CPU
633 ret = cpufreq_driver->init(policy);
635 dprintk("initialization failed\n");
636 mutex_unlock(&policy->lock);
641 for_each_cpu_mask(j, policy->cpus) {
645 /* check for existing affected CPUs. They may not be aware
646 * of it due to CPU Hotplug.
648 managed_policy = cpufreq_cpu_get(j);
649 if (unlikely(managed_policy)) {
650 spin_lock_irqsave(&cpufreq_driver_lock, flags);
651 managed_policy->cpus = policy->cpus;
652 cpufreq_cpu_data[cpu] = managed_policy;
653 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
655 dprintk("CPU already managed, adding link\n");
656 sysfs_create_link(&sys_dev->kobj,
657 &managed_policy->kobj, "cpufreq");
659 cpufreq_debug_enable_ratelimit();
660 mutex_unlock(&policy->lock);
662 goto err_out_driver_exit; /* call driver->exit() */
666 memcpy(&new_policy, policy, sizeof(struct cpufreq_policy));
668 /* prepare interface data */
669 policy->kobj.parent = &sys_dev->kobj;
670 policy->kobj.ktype = &ktype_cpufreq;
671 strlcpy(policy->kobj.name, "cpufreq", KOBJ_NAME_LEN);
673 ret = kobject_register(&policy->kobj);
675 mutex_unlock(&policy->lock);
676 goto err_out_driver_exit;
678 /* set up files for this cpu device */
679 drv_attr = cpufreq_driver->attr;
680 while ((drv_attr) && (*drv_attr)) {
681 sysfs_create_file(&policy->kobj, &((*drv_attr)->attr));
684 if (cpufreq_driver->get)
685 sysfs_create_file(&policy->kobj, &cpuinfo_cur_freq.attr);
686 if (cpufreq_driver->target)
687 sysfs_create_file(&policy->kobj, &scaling_cur_freq.attr);
689 spin_lock_irqsave(&cpufreq_driver_lock, flags);
690 for_each_cpu_mask(j, policy->cpus)
691 cpufreq_cpu_data[j] = policy;
692 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
694 /* symlink affected CPUs */
695 for_each_cpu_mask(j, policy->cpus) {
701 dprintk("CPU %u already managed, adding link\n", j);
702 cpufreq_cpu_get(cpu);
703 cpu_sys_dev = get_cpu_sysdev(j);
704 sysfs_create_link(&cpu_sys_dev->kobj, &policy->kobj,
708 policy->governor = NULL; /* to assure that the starting sequence is
709 * run in cpufreq_set_policy */
710 mutex_unlock(&policy->lock);
712 /* set default policy */
713 ret = cpufreq_set_policy(&new_policy);
715 dprintk("setting policy failed\n");
716 goto err_out_unregister;
719 module_put(cpufreq_driver->owner);
720 dprintk("initialization complete\n");
721 cpufreq_debug_enable_ratelimit();
727 spin_lock_irqsave(&cpufreq_driver_lock, flags);
728 for_each_cpu_mask(j, policy->cpus)
729 cpufreq_cpu_data[j] = NULL;
730 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
732 kobject_unregister(&policy->kobj);
733 wait_for_completion(&policy->kobj_unregister);
736 if (cpufreq_driver->exit)
737 cpufreq_driver->exit(policy);
743 module_put(cpufreq_driver->owner);
745 cpufreq_debug_enable_ratelimit();
751 * cpufreq_remove_dev - remove a CPU device
753 * Removes the cpufreq interface for a CPU device.
755 static int cpufreq_remove_dev (struct sys_device * sys_dev)
757 unsigned int cpu = sys_dev->id;
759 struct cpufreq_policy *data;
761 struct sys_device *cpu_sys_dev;
765 cpufreq_debug_disable_ratelimit();
766 dprintk("unregistering CPU %u\n", cpu);
768 spin_lock_irqsave(&cpufreq_driver_lock, flags);
769 data = cpufreq_cpu_data[cpu];
772 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
773 cpufreq_debug_enable_ratelimit();
776 cpufreq_cpu_data[cpu] = NULL;
780 /* if this isn't the CPU which is the parent of the kobj, we
781 * only need to unlink, put and exit
783 if (unlikely(cpu != data->cpu)) {
784 dprintk("removing link\n");
785 cpu_clear(cpu, data->cpus);
786 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
787 sysfs_remove_link(&sys_dev->kobj, "cpufreq");
788 cpufreq_cpu_put(data);
789 cpufreq_debug_enable_ratelimit();
795 if (!kobject_get(&data->kobj)) {
796 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
797 cpufreq_debug_enable_ratelimit();
802 /* if we have other CPUs still registered, we need to unlink them,
803 * or else wait_for_completion below will lock up. Clean the
804 * cpufreq_cpu_data[] while holding the lock, and remove the sysfs
807 if (unlikely(cpus_weight(data->cpus) > 1)) {
808 for_each_cpu_mask(j, data->cpus) {
811 cpufreq_cpu_data[j] = NULL;
815 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
817 if (unlikely(cpus_weight(data->cpus) > 1)) {
818 for_each_cpu_mask(j, data->cpus) {
821 dprintk("removing link for cpu %u\n", j);
822 cpu_sys_dev = get_cpu_sysdev(j);
823 sysfs_remove_link(&cpu_sys_dev->kobj, "cpufreq");
824 cpufreq_cpu_put(data);
828 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
831 mutex_lock(&data->lock);
832 if (cpufreq_driver->target)
833 __cpufreq_governor(data, CPUFREQ_GOV_STOP);
834 mutex_unlock(&data->lock);
836 kobject_unregister(&data->kobj);
838 kobject_put(&data->kobj);
840 /* we need to make sure that the underlying kobj is actually
841 * not referenced anymore by anybody before we proceed with
844 dprintk("waiting for dropping of refcount\n");
845 wait_for_completion(&data->kobj_unregister);
846 dprintk("wait complete\n");
848 if (cpufreq_driver->exit)
849 cpufreq_driver->exit(data);
853 cpufreq_debug_enable_ratelimit();
858 static void handle_update(void *data)
860 unsigned int cpu = (unsigned int)(long)data;
861 dprintk("handle_update for cpu %u called\n", cpu);
862 cpufreq_update_policy(cpu);
866 * cpufreq_out_of_sync - If actual and saved CPU frequency differs, we're in deep trouble.
868 * @old_freq: CPU frequency the kernel thinks the CPU runs at
869 * @new_freq: CPU frequency the CPU actually runs at
871 * We adjust to current frequency first, and need to clean up later. So either call
872 * to cpufreq_update_policy() or schedule handle_update()).
874 static void cpufreq_out_of_sync(unsigned int cpu, unsigned int old_freq, unsigned int new_freq)
876 struct cpufreq_freqs freqs;
878 dprintk(KERN_WARNING "Warning: CPU frequency out of sync: cpufreq and timing "
879 "core thinks of %u, is %u kHz.\n", old_freq, new_freq);
882 freqs.old = old_freq;
883 freqs.new = new_freq;
884 cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
885 cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
890 * cpufreq_quick_get - get the CPU frequency (in kHz) frpm policy->cur
893 * This is the last known freq, without actually getting it from the driver.
894 * Return value will be same as what is shown in scaling_cur_freq in sysfs.
896 unsigned int cpufreq_quick_get(unsigned int cpu)
898 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
899 unsigned int ret = 0;
902 mutex_lock(&policy->lock);
904 mutex_unlock(&policy->lock);
905 cpufreq_cpu_put(policy);
910 EXPORT_SYMBOL(cpufreq_quick_get);
914 * cpufreq_get - get the current CPU frequency (in kHz)
917 * Get the CPU current (static) CPU frequency
919 unsigned int cpufreq_get(unsigned int cpu)
921 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
922 unsigned int ret = 0;
927 if (!cpufreq_driver->get)
930 mutex_lock(&policy->lock);
932 ret = cpufreq_driver->get(cpu);
934 if (ret && policy->cur && !(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) {
935 /* verify no discrepancy between actual and saved value exists */
936 if (unlikely(ret != policy->cur)) {
937 cpufreq_out_of_sync(cpu, policy->cur, ret);
938 schedule_work(&policy->update);
942 mutex_unlock(&policy->lock);
945 cpufreq_cpu_put(policy);
949 EXPORT_SYMBOL(cpufreq_get);
953 * cpufreq_suspend - let the low level driver prepare for suspend
956 static int cpufreq_suspend(struct sys_device * sysdev, pm_message_t pmsg)
958 int cpu = sysdev->id;
959 unsigned int ret = 0;
960 unsigned int cur_freq = 0;
961 struct cpufreq_policy *cpu_policy;
963 dprintk("resuming cpu %u\n", cpu);
965 if (!cpu_online(cpu))
968 /* we may be lax here as interrupts are off. Nonetheless
969 * we need to grab the correct cpu policy, as to check
970 * whether we really run on this CPU.
973 cpu_policy = cpufreq_cpu_get(cpu);
977 /* only handle each CPU group once */
978 if (unlikely(cpu_policy->cpu != cpu)) {
979 cpufreq_cpu_put(cpu_policy);
983 if (cpufreq_driver->suspend) {
984 ret = cpufreq_driver->suspend(cpu_policy, pmsg);
986 printk(KERN_ERR "cpufreq: suspend failed in ->suspend "
987 "step on CPU %u\n", cpu_policy->cpu);
988 cpufreq_cpu_put(cpu_policy);
994 if (cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)
997 if (cpufreq_driver->get)
998 cur_freq = cpufreq_driver->get(cpu_policy->cpu);
1000 if (!cur_freq || !cpu_policy->cur) {
1001 printk(KERN_ERR "cpufreq: suspend failed to assert current "
1002 "frequency is what timing core thinks it is.\n");
1006 if (unlikely(cur_freq != cpu_policy->cur)) {
1007 struct cpufreq_freqs freqs;
1009 if (!(cpufreq_driver->flags & CPUFREQ_PM_NO_WARN))
1010 dprintk(KERN_DEBUG "Warning: CPU frequency is %u, "
1011 "cpufreq assumed %u kHz.\n",
1012 cur_freq, cpu_policy->cur);
1015 freqs.old = cpu_policy->cur;
1016 freqs.new = cur_freq;
1018 blocking_notifier_call_chain(&cpufreq_transition_notifier_list,
1019 CPUFREQ_SUSPENDCHANGE, &freqs);
1020 adjust_jiffies(CPUFREQ_SUSPENDCHANGE, &freqs);
1022 cpu_policy->cur = cur_freq;
1026 cpufreq_cpu_put(cpu_policy);
1031 * cpufreq_resume - restore proper CPU frequency handling after resume
1033 * 1.) resume CPUfreq hardware support (cpufreq_driver->resume())
1034 * 2.) if ->target and !CPUFREQ_CONST_LOOPS: verify we're in sync
1035 * 3.) schedule call cpufreq_update_policy() ASAP as interrupts are
1038 static int cpufreq_resume(struct sys_device * sysdev)
1040 int cpu = sysdev->id;
1041 unsigned int ret = 0;
1042 struct cpufreq_policy *cpu_policy;
1044 dprintk("resuming cpu %u\n", cpu);
1046 if (!cpu_online(cpu))
1049 /* we may be lax here as interrupts are off. Nonetheless
1050 * we need to grab the correct cpu policy, as to check
1051 * whether we really run on this CPU.
1054 cpu_policy = cpufreq_cpu_get(cpu);
1058 /* only handle each CPU group once */
1059 if (unlikely(cpu_policy->cpu != cpu)) {
1060 cpufreq_cpu_put(cpu_policy);
1064 if (cpufreq_driver->resume) {
1065 ret = cpufreq_driver->resume(cpu_policy);
1067 printk(KERN_ERR "cpufreq: resume failed in ->resume "
1068 "step on CPU %u\n", cpu_policy->cpu);
1069 cpufreq_cpu_put(cpu_policy);
1074 if (!(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) {
1075 unsigned int cur_freq = 0;
1077 if (cpufreq_driver->get)
1078 cur_freq = cpufreq_driver->get(cpu_policy->cpu);
1080 if (!cur_freq || !cpu_policy->cur) {
1081 printk(KERN_ERR "cpufreq: resume failed to assert "
1082 "current frequency is what timing core "
1087 if (unlikely(cur_freq != cpu_policy->cur)) {
1088 struct cpufreq_freqs freqs;
1090 if (!(cpufreq_driver->flags & CPUFREQ_PM_NO_WARN))
1091 dprintk(KERN_WARNING "Warning: CPU frequency"
1092 "is %u, cpufreq assumed %u kHz.\n",
1093 cur_freq, cpu_policy->cur);
1096 freqs.old = cpu_policy->cur;
1097 freqs.new = cur_freq;
1099 blocking_notifier_call_chain(
1100 &cpufreq_transition_notifier_list,
1101 CPUFREQ_RESUMECHANGE, &freqs);
1102 adjust_jiffies(CPUFREQ_RESUMECHANGE, &freqs);
1104 cpu_policy->cur = cur_freq;
1109 schedule_work(&cpu_policy->update);
1110 cpufreq_cpu_put(cpu_policy);
1114 static struct sysdev_driver cpufreq_sysdev_driver = {
1115 .add = cpufreq_add_dev,
1116 .remove = cpufreq_remove_dev,
1117 .suspend = cpufreq_suspend,
1118 .resume = cpufreq_resume,
1122 /*********************************************************************
1123 * NOTIFIER LISTS INTERFACE *
1124 *********************************************************************/
1127 * cpufreq_register_notifier - register a driver with cpufreq
1128 * @nb: notifier function to register
1129 * @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER
1131 * Add a driver to one of two lists: either a list of drivers that
1132 * are notified about clock rate changes (once before and once after
1133 * the transition), or a list of drivers that are notified about
1134 * changes in cpufreq policy.
1136 * This function may sleep, and has the same return conditions as
1137 * blocking_notifier_chain_register.
1139 int cpufreq_register_notifier(struct notifier_block *nb, unsigned int list)
1144 case CPUFREQ_TRANSITION_NOTIFIER:
1145 ret = blocking_notifier_chain_register(
1146 &cpufreq_transition_notifier_list, nb);
1148 case CPUFREQ_POLICY_NOTIFIER:
1149 ret = blocking_notifier_chain_register(
1150 &cpufreq_policy_notifier_list, nb);
1158 EXPORT_SYMBOL(cpufreq_register_notifier);
1162 * cpufreq_unregister_notifier - unregister a driver with cpufreq
1163 * @nb: notifier block to be unregistered
1164 * @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER
1166 * Remove a driver from the CPU frequency notifier list.
1168 * This function may sleep, and has the same return conditions as
1169 * blocking_notifier_chain_unregister.
1171 int cpufreq_unregister_notifier(struct notifier_block *nb, unsigned int list)
1176 case CPUFREQ_TRANSITION_NOTIFIER:
1177 ret = blocking_notifier_chain_unregister(
1178 &cpufreq_transition_notifier_list, nb);
1180 case CPUFREQ_POLICY_NOTIFIER:
1181 ret = blocking_notifier_chain_unregister(
1182 &cpufreq_policy_notifier_list, nb);
1190 EXPORT_SYMBOL(cpufreq_unregister_notifier);
1193 /*********************************************************************
1195 *********************************************************************/
1198 int __cpufreq_driver_target(struct cpufreq_policy *policy,
1199 unsigned int target_freq,
1200 unsigned int relation)
1202 int retval = -EINVAL;
1205 dprintk("target for CPU %u: %u kHz, relation %u\n", policy->cpu,
1206 target_freq, relation);
1207 if (cpu_online(policy->cpu) && cpufreq_driver->target)
1208 retval = cpufreq_driver->target(policy, target_freq, relation);
1210 unlock_cpu_hotplug();
1214 EXPORT_SYMBOL_GPL(__cpufreq_driver_target);
1216 int cpufreq_driver_target(struct cpufreq_policy *policy,
1217 unsigned int target_freq,
1218 unsigned int relation)
1222 policy = cpufreq_cpu_get(policy->cpu);
1226 mutex_lock(&policy->lock);
1228 ret = __cpufreq_driver_target(policy, target_freq, relation);
1230 mutex_unlock(&policy->lock);
1232 cpufreq_cpu_put(policy);
1235 EXPORT_SYMBOL_GPL(cpufreq_driver_target);
1238 static int __cpufreq_governor(struct cpufreq_policy *policy, unsigned int event)
1242 if (!try_module_get(policy->governor->owner))
1245 dprintk("__cpufreq_governor for CPU %u, event %u\n", policy->cpu, event);
1246 ret = policy->governor->governor(policy, event);
1248 /* we keep one module reference alive for each CPU governed by this CPU */
1249 if ((event != CPUFREQ_GOV_START) || ret)
1250 module_put(policy->governor->owner);
1251 if ((event == CPUFREQ_GOV_STOP) && !ret)
1252 module_put(policy->governor->owner);
1258 int cpufreq_governor(unsigned int cpu, unsigned int event)
1261 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
1266 mutex_lock(&policy->lock);
1267 ret = __cpufreq_governor(policy, event);
1268 mutex_unlock(&policy->lock);
1270 cpufreq_cpu_put(policy);
1273 EXPORT_SYMBOL_GPL(cpufreq_governor);
1276 int cpufreq_register_governor(struct cpufreq_governor *governor)
1278 struct cpufreq_governor *t;
1283 mutex_lock(&cpufreq_governor_mutex);
1285 list_for_each_entry(t, &cpufreq_governor_list, governor_list) {
1286 if (!strnicmp(governor->name,t->name,CPUFREQ_NAME_LEN)) {
1287 mutex_unlock(&cpufreq_governor_mutex);
1291 list_add(&governor->governor_list, &cpufreq_governor_list);
1293 mutex_unlock(&cpufreq_governor_mutex);
1296 EXPORT_SYMBOL_GPL(cpufreq_register_governor);
1299 void cpufreq_unregister_governor(struct cpufreq_governor *governor)
1304 mutex_lock(&cpufreq_governor_mutex);
1305 list_del(&governor->governor_list);
1306 mutex_unlock(&cpufreq_governor_mutex);
1309 EXPORT_SYMBOL_GPL(cpufreq_unregister_governor);
1313 /*********************************************************************
1314 * POLICY INTERFACE *
1315 *********************************************************************/
1318 * cpufreq_get_policy - get the current cpufreq_policy
1319 * @policy: struct cpufreq_policy into which the current cpufreq_policy is written
1321 * Reads the current cpufreq policy.
1323 int cpufreq_get_policy(struct cpufreq_policy *policy, unsigned int cpu)
1325 struct cpufreq_policy *cpu_policy;
1329 cpu_policy = cpufreq_cpu_get(cpu);
1333 mutex_lock(&cpu_policy->lock);
1334 memcpy(policy, cpu_policy, sizeof(struct cpufreq_policy));
1335 mutex_unlock(&cpu_policy->lock);
1337 cpufreq_cpu_put(cpu_policy);
1340 EXPORT_SYMBOL(cpufreq_get_policy);
1343 static int __cpufreq_set_policy(struct cpufreq_policy *data, struct cpufreq_policy *policy)
1347 cpufreq_debug_disable_ratelimit();
1348 dprintk("setting new policy for CPU %u: %u - %u kHz\n", policy->cpu,
1349 policy->min, policy->max);
1351 memcpy(&policy->cpuinfo, &data->cpuinfo, sizeof(struct cpufreq_cpuinfo));
1353 /* verify the cpu speed can be set within this limit */
1354 ret = cpufreq_driver->verify(policy);
1358 /* adjust if necessary - all reasons */
1359 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1360 CPUFREQ_ADJUST, policy);
1362 /* adjust if necessary - hardware incompatibility*/
1363 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1364 CPUFREQ_INCOMPATIBLE, policy);
1366 /* verify the cpu speed can be set within this limit,
1367 which might be different to the first one */
1368 ret = cpufreq_driver->verify(policy);
1372 /* notification of the new policy */
1373 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1374 CPUFREQ_NOTIFY, policy);
1376 data->min = policy->min;
1377 data->max = policy->max;
1379 dprintk("new min and max freqs are %u - %u kHz\n", data->min, data->max);
1381 if (cpufreq_driver->setpolicy) {
1382 data->policy = policy->policy;
1383 dprintk("setting range\n");
1384 ret = cpufreq_driver->setpolicy(policy);
1386 if (policy->governor != data->governor) {
1387 /* save old, working values */
1388 struct cpufreq_governor *old_gov = data->governor;
1390 dprintk("governor switch\n");
1392 /* end old governor */
1394 __cpufreq_governor(data, CPUFREQ_GOV_STOP);
1396 /* start new governor */
1397 data->governor = policy->governor;
1398 if (__cpufreq_governor(data, CPUFREQ_GOV_START)) {
1399 /* new governor failed, so re-start old one */
1400 dprintk("starting governor %s failed\n", data->governor->name);
1402 data->governor = old_gov;
1403 __cpufreq_governor(data, CPUFREQ_GOV_START);
1408 /* might be a policy change, too, so fall through */
1410 dprintk("governor: change or update limits\n");
1411 __cpufreq_governor(data, CPUFREQ_GOV_LIMITS);
1415 cpufreq_debug_enable_ratelimit();
1420 * cpufreq_set_policy - set a new CPUFreq policy
1421 * @policy: policy to be set.
1423 * Sets a new CPU frequency and voltage scaling policy.
1425 int cpufreq_set_policy(struct cpufreq_policy *policy)
1428 struct cpufreq_policy *data;
1433 data = cpufreq_cpu_get(policy->cpu);
1438 mutex_lock(&data->lock);
1440 ret = __cpufreq_set_policy(data, policy);
1441 data->user_policy.min = data->min;
1442 data->user_policy.max = data->max;
1443 data->user_policy.policy = data->policy;
1444 data->user_policy.governor = data->governor;
1446 mutex_unlock(&data->lock);
1447 cpufreq_cpu_put(data);
1451 EXPORT_SYMBOL(cpufreq_set_policy);
1455 * cpufreq_update_policy - re-evaluate an existing cpufreq policy
1456 * @cpu: CPU which shall be re-evaluated
1458 * Usefull for policy notifiers which have different necessities
1459 * at different times.
1461 int cpufreq_update_policy(unsigned int cpu)
1463 struct cpufreq_policy *data = cpufreq_cpu_get(cpu);
1464 struct cpufreq_policy policy;
1470 mutex_lock(&data->lock);
1472 dprintk("updating policy for CPU %u\n", cpu);
1473 memcpy(&policy, data, sizeof(struct cpufreq_policy));
1474 policy.min = data->user_policy.min;
1475 policy.max = data->user_policy.max;
1476 policy.policy = data->user_policy.policy;
1477 policy.governor = data->user_policy.governor;
1479 /* BIOS might change freq behind our back
1480 -> ask driver for current freq and notify governors about a change */
1481 if (cpufreq_driver->get) {
1482 policy.cur = cpufreq_driver->get(cpu);
1484 dprintk("Driver did not initialize current freq");
1485 data->cur = policy.cur;
1487 if (data->cur != policy.cur)
1488 cpufreq_out_of_sync(cpu, data->cur, policy.cur);
1492 ret = __cpufreq_set_policy(data, &policy);
1494 mutex_unlock(&data->lock);
1496 cpufreq_cpu_put(data);
1499 EXPORT_SYMBOL(cpufreq_update_policy);
1501 static int __cpuinit cpufreq_cpu_callback(struct notifier_block *nfb,
1502 unsigned long action, void *hcpu)
1504 unsigned int cpu = (unsigned long)hcpu;
1505 struct cpufreq_policy *policy;
1506 struct sys_device *sys_dev;
1508 sys_dev = get_cpu_sysdev(cpu);
1513 cpufreq_add_dev(sys_dev);
1515 case CPU_DOWN_PREPARE:
1517 * We attempt to put this cpu in lowest frequency
1518 * possible before going down. This will permit
1519 * hardware-managed P-State to switch other related
1520 * threads to min or higher speeds if possible.
1522 policy = cpufreq_cpu_data[cpu];
1524 cpufreq_driver_target(policy, policy->min,
1525 CPUFREQ_RELATION_H);
1529 cpufreq_remove_dev(sys_dev);
1536 static struct notifier_block cpufreq_cpu_notifier =
1538 .notifier_call = cpufreq_cpu_callback,
1541 /*********************************************************************
1542 * REGISTER / UNREGISTER CPUFREQ DRIVER *
1543 *********************************************************************/
1546 * cpufreq_register_driver - register a CPU Frequency driver
1547 * @driver_data: A struct cpufreq_driver containing the values#
1548 * submitted by the CPU Frequency driver.
1550 * Registers a CPU Frequency driver to this core code. This code
1551 * returns zero on success, -EBUSY when another driver got here first
1552 * (and isn't unregistered in the meantime).
1555 int cpufreq_register_driver(struct cpufreq_driver *driver_data)
1557 unsigned long flags;
1560 if (!driver_data || !driver_data->verify || !driver_data->init ||
1561 ((!driver_data->setpolicy) && (!driver_data->target)))
1564 dprintk("trying to register driver %s\n", driver_data->name);
1566 if (driver_data->setpolicy)
1567 driver_data->flags |= CPUFREQ_CONST_LOOPS;
1569 spin_lock_irqsave(&cpufreq_driver_lock, flags);
1570 if (cpufreq_driver) {
1571 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
1574 cpufreq_driver = driver_data;
1575 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
1577 ret = sysdev_driver_register(&cpu_sysdev_class,&cpufreq_sysdev_driver);
1579 if ((!ret) && !(cpufreq_driver->flags & CPUFREQ_STICKY)) {
1583 /* check for at least one working CPU */
1584 for (i=0; i<NR_CPUS; i++)
1585 if (cpufreq_cpu_data[i])
1588 /* if all ->init() calls failed, unregister */
1590 dprintk("no CPU initialized for driver %s\n", driver_data->name);
1591 sysdev_driver_unregister(&cpu_sysdev_class, &cpufreq_sysdev_driver);
1593 spin_lock_irqsave(&cpufreq_driver_lock, flags);
1594 cpufreq_driver = NULL;
1595 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
1600 register_cpu_notifier(&cpufreq_cpu_notifier);
1601 dprintk("driver %s up and running\n", driver_data->name);
1602 cpufreq_debug_enable_ratelimit();
1607 EXPORT_SYMBOL_GPL(cpufreq_register_driver);
1611 * cpufreq_unregister_driver - unregister the current CPUFreq driver
1613 * Unregister the current CPUFreq driver. Only call this if you have
1614 * the right to do so, i.e. if you have succeeded in initialising before!
1615 * Returns zero if successful, and -EINVAL if the cpufreq_driver is
1616 * currently not initialised.
1618 int cpufreq_unregister_driver(struct cpufreq_driver *driver)
1620 unsigned long flags;
1622 cpufreq_debug_disable_ratelimit();
1624 if (!cpufreq_driver || (driver != cpufreq_driver)) {
1625 cpufreq_debug_enable_ratelimit();
1629 dprintk("unregistering driver %s\n", driver->name);
1631 sysdev_driver_unregister(&cpu_sysdev_class, &cpufreq_sysdev_driver);
1632 unregister_cpu_notifier(&cpufreq_cpu_notifier);
1634 spin_lock_irqsave(&cpufreq_driver_lock, flags);
1635 cpufreq_driver = NULL;
1636 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
1640 EXPORT_SYMBOL_GPL(cpufreq_unregister_driver);