X-Git-Url: https://err.no/cgi-bin/gitweb.cgi?a=blobdiff_plain;f=drivers%2Fcpufreq%2Fcpufreq.c;h=0d054873da75cde59b2d3817845c32867c26ebfe;hb=c906049447019d69b9cc2d591a142af561afa7f9;hp=2f6a73c01b718318d583123f66aaa6c622dca6a0;hpb=22aadf8a07067644e101267ed5003043f2ad05bf;p=linux-2.6 diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c index 2f6a73c01b..0d054873da 100644 --- a/drivers/cpufreq/cpufreq.c +++ b/drivers/cpufreq/cpufreq.c @@ -287,7 +287,7 @@ static void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci) if (!l_p_j_ref_freq) { l_p_j_ref = loops_per_jiffy; l_p_j_ref_freq = ci->old; - dprintk("saving %lu as reference value for loops_per_jiffy;" + dprintk("saving %lu as reference value for loops_per_jiffy; " "freq is %u kHz\n", l_p_j_ref, l_p_j_ref_freq); } if ((val == CPUFREQ_PRECHANGE && ci->old < ci->new) || @@ -295,7 +295,7 @@ static void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci) (val == CPUFREQ_RESUMECHANGE || val == CPUFREQ_SUSPENDCHANGE)) { loops_per_jiffy = cpufreq_scale(l_p_j_ref, l_p_j_ref_freq, ci->new); - dprintk("scaling loops_per_jiffy to %lu" + dprintk("scaling loops_per_jiffy to %lu " "for frequency %u kHz\n", loops_per_jiffy, ci->new); } } @@ -601,6 +601,31 @@ static ssize_t show_affected_cpus (struct cpufreq_policy * policy, char *buf) return i; } +static ssize_t store_scaling_setspeed(struct cpufreq_policy *policy, + const char *buf, size_t count) +{ + unsigned int freq = 0; + unsigned int ret; + + if (!policy->governor->store_setspeed) + return -EINVAL; + + ret = sscanf(buf, "%u", &freq); + if (ret != 1) + return -EINVAL; + + policy->governor->store_setspeed(policy, freq); + + return count; +} + +static ssize_t show_scaling_setspeed(struct cpufreq_policy *policy, char *buf) +{ + if (!policy->governor->show_setspeed) + return sprintf(buf, "\n"); + + return policy->governor->show_setspeed(policy, buf); +} #define define_one_ro(_name) \ static struct freq_attr _name = \ @@ -624,6 +649,7 @@ define_one_ro(affected_cpus); define_one_rw(scaling_min_freq); define_one_rw(scaling_max_freq); define_one_rw(scaling_governor); +define_one_rw(scaling_setspeed); static struct attribute * default_attrs[] = { &cpuinfo_min_freq.attr, @@ -634,6 +660,7 @@ static struct attribute * default_attrs[] = { &scaling_governor.attr, &scaling_driver.attr, &scaling_available_governors.attr, + &scaling_setspeed.attr, NULL }; @@ -644,13 +671,13 @@ static ssize_t show(struct kobject * kobj, struct attribute * attr ,char * buf) { struct cpufreq_policy * policy = to_policy(kobj); struct freq_attr * fattr = to_attr(attr); - ssize_t ret; + ssize_t ret = -EINVAL; policy = cpufreq_cpu_get(policy->cpu); if (!policy) - return -EINVAL; + goto no_policy; if (lock_policy_rwsem_read(policy->cpu) < 0) - return -EINVAL; + goto fail; if (fattr->show) ret = fattr->show(policy, buf); @@ -658,8 +685,9 @@ static ssize_t show(struct kobject * kobj, struct attribute * attr ,char * buf) ret = -EIO; unlock_policy_rwsem_read(policy->cpu); - +fail: cpufreq_cpu_put(policy); +no_policy: return ret; } @@ -668,13 +696,13 @@ static ssize_t store(struct kobject * kobj, struct attribute * attr, { struct cpufreq_policy * policy = to_policy(kobj); struct freq_attr * fattr = to_attr(attr); - ssize_t ret; + ssize_t ret = -EINVAL; policy = cpufreq_cpu_get(policy->cpu); if (!policy) - return -EINVAL; + goto no_policy; if (lock_policy_rwsem_write(policy->cpu) < 0) - return -EINVAL; + goto fail; if (fattr->store) ret = fattr->store(policy, buf, count); @@ -682,8 +710,9 @@ static ssize_t store(struct kobject * kobj, struct attribute * attr, ret = -EIO; unlock_policy_rwsem_write(policy->cpu); - +fail: cpufreq_cpu_put(policy); +no_policy: return ret; } @@ -763,6 +792,8 @@ static int cpufreq_add_dev (struct sys_device * sys_dev) init_completion(&policy->kobj_unregister); INIT_WORK(&policy->update, handle_update); + /* Set governor before ->init, so that driver could check it */ + policy->governor = CPUFREQ_DEFAULT_GOVERNOR; /* call driver. From then on the cpufreq must be able * to accept all calls to ->verify and ->setpolicy for this CPU */ @@ -826,11 +857,8 @@ static int cpufreq_add_dev (struct sys_device * sys_dev) memcpy(&new_policy, policy, sizeof(struct cpufreq_policy)); /* prepare interface data */ - policy->kobj.parent = &sys_dev->kobj; - policy->kobj.ktype = &ktype_cpufreq; - strlcpy(policy->kobj.name, "cpufreq", KOBJ_NAME_LEN); - - ret = kobject_register(&policy->kobj); + ret = kobject_init_and_add(&policy->kobj, &ktype_cpufreq, &sys_dev->kobj, + "cpufreq"); if (ret) { unlock_policy_rwsem_write(cpu); goto err_out_driver_exit; @@ -839,19 +867,25 @@ static int cpufreq_add_dev (struct sys_device * sys_dev) drv_attr = cpufreq_driver->attr; while ((drv_attr) && (*drv_attr)) { ret = sysfs_create_file(&policy->kobj, &((*drv_attr)->attr)); - if (ret) + if (ret) { + unlock_policy_rwsem_write(cpu); goto err_out_driver_exit; + } drv_attr++; } if (cpufreq_driver->get){ ret = sysfs_create_file(&policy->kobj, &cpuinfo_cur_freq.attr); - if (ret) + if (ret) { + unlock_policy_rwsem_write(cpu); goto err_out_driver_exit; + } } if (cpufreq_driver->target){ ret = sysfs_create_file(&policy->kobj, &scaling_cur_freq.attr); - if (ret) + if (ret) { + unlock_policy_rwsem_write(cpu); goto err_out_driver_exit; + } } spin_lock_irqsave(&cpufreq_driver_lock, flags); @@ -894,6 +928,7 @@ static int cpufreq_add_dev (struct sys_device * sys_dev) goto err_out_unregister; } + kobject_uevent(&policy->kobj, KOBJ_ADD); module_put(cpufreq_driver->owner); dprintk("initialization complete\n"); cpufreq_debug_enable_ratelimit(); @@ -907,7 +942,7 @@ err_out_unregister: cpufreq_cpu_data[j] = NULL; spin_unlock_irqrestore(&cpufreq_driver_lock, flags); - kobject_unregister(&policy->kobj); + kobject_put(&policy->kobj); wait_for_completion(&policy->kobj_unregister); err_out_driver_exit: @@ -973,14 +1008,6 @@ static int __cpufreq_remove_dev (struct sys_device * sys_dev) } #endif - - if (!kobject_get(&data->kobj)) { - spin_unlock_irqrestore(&cpufreq_driver_lock, flags); - cpufreq_debug_enable_ratelimit(); - unlock_policy_rwsem_write(cpu); - return -EFAULT; - } - #ifdef CONFIG_SMP #ifdef CONFIG_HOTPLUG_CPU @@ -1024,8 +1051,6 @@ static int __cpufreq_remove_dev (struct sys_device * sys_dev) unlock_policy_rwsem_write(cpu); - kobject_unregister(&data->kobj); - kobject_put(&data->kobj); /* we need to make sure that the underlying kobj is actually @@ -1109,12 +1134,7 @@ unsigned int cpufreq_quick_get(unsigned int cpu) unsigned int ret_freq = 0; if (policy) { - if (unlikely(lock_policy_rwsem_read(cpu))) - return ret_freq; - ret_freq = policy->cur; - - unlock_policy_rwsem_read(cpu); cpufreq_cpu_put(policy); } @@ -1201,22 +1221,18 @@ static int cpufreq_suspend(struct sys_device * sysdev, pm_message_t pmsg) return -EINVAL; /* only handle each CPU group once */ - if (unlikely(cpu_policy->cpu != cpu)) { - cpufreq_cpu_put(cpu_policy); - return 0; - } + if (unlikely(cpu_policy->cpu != cpu)) + goto out; if (cpufreq_driver->suspend) { ret = cpufreq_driver->suspend(cpu_policy, pmsg); if (ret) { printk(KERN_ERR "cpufreq: suspend failed in ->suspend " "step on CPU %u\n", cpu_policy->cpu); - cpufreq_cpu_put(cpu_policy); - return ret; + goto out; } } - if (cpufreq_driver->flags & CPUFREQ_CONST_LOOPS) goto out; @@ -1250,7 +1266,7 @@ static int cpufreq_suspend(struct sys_device * sysdev, pm_message_t pmsg) out: cpufreq_cpu_put(cpu_policy); - return 0; + return ret; } /** @@ -1282,18 +1298,15 @@ static int cpufreq_resume(struct sys_device * sysdev) return -EINVAL; /* only handle each CPU group once */ - if (unlikely(cpu_policy->cpu != cpu)) { - cpufreq_cpu_put(cpu_policy); - return 0; - } + if (unlikely(cpu_policy->cpu != cpu)) + goto fail; if (cpufreq_driver->resume) { ret = cpufreq_driver->resume(cpu_policy); if (ret) { printk(KERN_ERR "cpufreq: resume failed in ->resume " "step on CPU %u\n", cpu_policy->cpu); - cpufreq_cpu_put(cpu_policy); - return ret; + goto fail; } } @@ -1314,7 +1327,7 @@ static int cpufreq_resume(struct sys_device * sysdev) struct cpufreq_freqs freqs; if (!(cpufreq_driver->flags & CPUFREQ_PM_NO_WARN)) - dprintk("Warning: CPU frequency" + dprintk("Warning: CPU frequency " "is %u, cpufreq assumed %u kHz.\n", cur_freq, cpu_policy->cur); @@ -1333,6 +1346,7 @@ static int cpufreq_resume(struct sys_device * sysdev) out: schedule_work(&cpu_policy->update); +fail: cpufreq_cpu_put(cpu_policy); return ret; } @@ -1483,6 +1497,31 @@ static int __cpufreq_governor(struct cpufreq_policy *policy, { int ret; + /* Only must be defined when default governor is known to have latency + restrictions, like e.g. conservative or ondemand. + That this is the case is already ensured in Kconfig + */ +#ifdef CONFIG_CPU_FREQ_GOV_PERFORMANCE + struct cpufreq_governor *gov = &cpufreq_gov_performance; +#else + struct cpufreq_governor *gov = NULL; +#endif + + if (policy->governor->max_transition_latency && + policy->cpuinfo.transition_latency > + policy->governor->max_transition_latency) { + if (!gov) + return -EINVAL; + else { + printk(KERN_WARNING "%s governor failed, too long" + " transition latency of HW, fallback" + " to %s governor\n", + policy->governor->name, + gov->name); + policy->governor = gov; + } + } + if (!try_module_get(policy->governor->owner)) return -EINVAL; @@ -1580,7 +1619,7 @@ static int __cpufreq_set_policy(struct cpufreq_policy *data, memcpy(&policy->cpuinfo, &data->cpuinfo, sizeof(struct cpufreq_cpuinfo)); - if (policy->min > data->min && policy->min > policy->max) { + if (policy->min > data->max || policy->max < data->min) { ret = -EINVAL; goto error_out; } @@ -1703,7 +1742,7 @@ int cpufreq_update_policy(unsigned int cpu) } EXPORT_SYMBOL(cpufreq_update_policy); -static int cpufreq_cpu_callback(struct notifier_block *nfb, +static int __cpuinit cpufreq_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu) { unsigned int cpu = (unsigned long)hcpu; @@ -1732,7 +1771,7 @@ static int cpufreq_cpu_callback(struct notifier_block *nfb, return NOTIFY_OK; } -static struct notifier_block __cpuinitdata cpufreq_cpu_notifier = +static struct notifier_block __refdata cpufreq_cpu_notifier = { .notifier_call = cpufreq_cpu_callback, };