2 * processor_idle - idle state submodule to the ACPI processor driver
4 * Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com>
5 * Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
6 * Copyright (C) 2004 Dominik Brodowski <linux@brodo.de>
7 * Copyright (C) 2004 Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
8 * - Added processor hotplug support
9 * Copyright (C) 2005 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
10 * - Added support for C3 on SMP
12 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
14 * This program is free software; you can redistribute it and/or modify
15 * it under the terms of the GNU General Public License as published by
16 * the Free Software Foundation; either version 2 of the License, or (at
17 * your option) any later version.
19 * This program is distributed in the hope that it will be useful, but
20 * WITHOUT ANY WARRANTY; without even the implied warranty of
21 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
22 * General Public License for more details.
24 * You should have received a copy of the GNU General Public License along
25 * with this program; if not, write to the Free Software Foundation, Inc.,
26 * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
28 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
31 #include <linux/kernel.h>
32 #include <linux/module.h>
33 #include <linux/init.h>
34 #include <linux/cpufreq.h>
35 #include <linux/proc_fs.h>
36 #include <linux/seq_file.h>
37 #include <linux/acpi.h>
38 #include <linux/dmi.h>
39 #include <linux/moduleparam.h>
40 #include <linux/sched.h> /* need_resched() */
43 #include <asm/uaccess.h>
45 #include <acpi/acpi_bus.h>
46 #include <acpi/processor.h>
48 #define ACPI_PROCESSOR_COMPONENT 0x01000000
49 #define ACPI_PROCESSOR_CLASS "processor"
50 #define ACPI_PROCESSOR_DRIVER_NAME "ACPI Processor Driver"
51 #define _COMPONENT ACPI_PROCESSOR_COMPONENT
52 ACPI_MODULE_NAME("acpi_processor")
53 #define ACPI_PROCESSOR_FILE_POWER "power"
54 #define US_TO_PM_TIMER_TICKS(t) ((t * (PM_TIMER_FREQUENCY/1000)) / 1000)
55 #define C2_OVERHEAD 4 /* 1us (3.579 ticks per us) */
56 #define C3_OVERHEAD 4 /* 1us (3.579 ticks per us) */
57 static void (*pm_idle_save) (void);
58 module_param(max_cstate, uint, 0644);
60 static unsigned int nocst = 0;
61 module_param(nocst, uint, 0000);
64 * bm_history -- bit-mask with a bit per jiffy of bus-master activity
65 * 1000 HZ: 0xFFFFFFFF: 32 jiffies = 32ms
66 * 800 HZ: 0xFFFFFFFF: 32 jiffies = 40ms
67 * 100 HZ: 0x0000000F: 4 jiffies = 40ms
68 * reduce history for more aggressive entry into C3
70 static unsigned int bm_history =
71 (HZ >= 800 ? 0xFFFFFFFF : ((1U << (HZ / 25)) - 1));
72 module_param(bm_history, uint, 0644);
73 /* --------------------------------------------------------------------------
75 -------------------------------------------------------------------------- */
78 * IBM ThinkPad R40e crashes mysteriously when going into C2 or C3.
79 * For now disable this. Probably a bug somewhere else.
81 * To skip this limit, boot/load with a large max_cstate limit.
83 static int set_max_cstate(struct dmi_system_id *id)
85 if (max_cstate > ACPI_PROCESSOR_MAX_POWER)
88 printk(KERN_NOTICE PREFIX "%s detected - limiting to C%ld max_cstate."
89 " Override with \"processor.max_cstate=%d\"\n", id->ident,
90 (long)id->driver_data, ACPI_PROCESSOR_MAX_POWER + 1);
92 max_cstate = (long)id->driver_data;
97 static struct dmi_system_id __initdata processor_power_dmi_table[] = {
98 {set_max_cstate, "IBM ThinkPad R40e", {
99 DMI_MATCH(DMI_BIOS_VENDOR,
101 DMI_MATCH(DMI_BIOS_VERSION,
104 {set_max_cstate, "Medion 41700", {
105 DMI_MATCH(DMI_BIOS_VENDOR,
106 "Phoenix Technologies LTD"),
107 DMI_MATCH(DMI_BIOS_VERSION,
108 "R01-A1J")}, (void *)1},
109 {set_max_cstate, "Clevo 5600D", {
110 DMI_MATCH(DMI_BIOS_VENDOR,
111 "Phoenix Technologies LTD"),
112 DMI_MATCH(DMI_BIOS_VERSION,
113 "SHE845M0.86C.0013.D.0302131307")},
118 static inline u32 ticks_elapsed(u32 t1, u32 t2)
122 else if (!acpi_fadt.tmr_val_ext)
123 return (((0x00FFFFFF - t1) + t2) & 0x00FFFFFF);
125 return ((0xFFFFFFFF - t1) + t2);
129 acpi_processor_power_activate(struct acpi_processor *pr,
130 struct acpi_processor_cx *new)
132 struct acpi_processor_cx *old;
137 old = pr->power.state;
140 old->promotion.count = 0;
141 new->demotion.count = 0;
143 /* Cleanup from old state. */
147 /* Disable bus master reload */
148 if (new->type != ACPI_STATE_C3 && pr->flags.bm_check)
149 acpi_set_register(ACPI_BITREG_BUS_MASTER_RLD, 0,
150 ACPI_MTX_DO_NOT_LOCK);
155 /* Prepare to use new state. */
158 /* Enable bus master reload */
159 if (old->type != ACPI_STATE_C3 && pr->flags.bm_check)
160 acpi_set_register(ACPI_BITREG_BUS_MASTER_RLD, 1,
161 ACPI_MTX_DO_NOT_LOCK);
165 pr->power.state = new;
170 static void acpi_safe_halt(void)
172 int polling = test_thread_flag(TIF_POLLING_NRFLAG);
174 clear_thread_flag(TIF_POLLING_NRFLAG);
175 smp_mb__after_clear_bit();
180 set_thread_flag(TIF_POLLING_NRFLAG);
183 static atomic_t c3_cpu_count;
185 static void acpi_processor_idle(void)
187 struct acpi_processor *pr = NULL;
188 struct acpi_processor_cx *cx = NULL;
189 struct acpi_processor_cx *next_state = NULL;
193 pr = processors[smp_processor_id()];
198 * Interrupts must be disabled during bus mastering calculations and
199 * for C2/C3 transitions.
204 * Check whether we truly need to go idle, or should
207 if (unlikely(need_resched())) {
212 cx = pr->power.state;
224 * Check for bus mastering activity (if required), record, and check
227 if (pr->flags.bm_check) {
229 unsigned long diff = jiffies - pr->power.bm_check_timestamp;
235 /* if we didn't get called, assume there was busmaster activity */
238 pr->power.bm_activity |= 0x1;
239 pr->power.bm_activity <<= 1;
242 acpi_get_register(ACPI_BITREG_BUS_MASTER_STATUS,
243 &bm_status, ACPI_MTX_DO_NOT_LOCK);
245 pr->power.bm_activity++;
246 acpi_set_register(ACPI_BITREG_BUS_MASTER_STATUS,
247 1, ACPI_MTX_DO_NOT_LOCK);
250 * PIIX4 Erratum #18: Note that BM_STS doesn't always reflect
251 * the true state of bus mastering activity; forcing us to
252 * manually check the BMIDEA bit of each IDE channel.
254 else if (errata.piix4.bmisx) {
255 if ((inb_p(errata.piix4.bmisx + 0x02) & 0x01)
256 || (inb_p(errata.piix4.bmisx + 0x0A) & 0x01))
257 pr->power.bm_activity++;
260 pr->power.bm_check_timestamp = jiffies;
263 * Apply bus mastering demotion policy. Automatically demote
264 * to avoid a faulty transition. Note that the processor
265 * won't enter a low-power state during this call (to this
266 * funciton) but should upon the next.
268 * TBD: A better policy might be to fallback to the demotion
269 * state (use it for this quantum only) istead of
270 * demoting -- and rely on duration as our sole demotion
271 * qualification. This may, however, introduce DMA
272 * issues (e.g. floppy DMA transfer overrun/underrun).
274 if (pr->power.bm_activity & cx->demotion.threshold.bm) {
276 next_state = cx->demotion.state;
281 #ifdef CONFIG_HOTPLUG_CPU
283 * Check for P_LVL2_UP flag before entering C2 and above on
284 * an SMP system. We do it here instead of doing it at _CST/P_LVL
285 * detection phase, to work cleanly with logical CPU hotplug.
287 if ((cx->type != ACPI_STATE_C1) && (num_online_cpus() > 1) &&
288 !pr->flags.has_cst && !acpi_fadt.plvl2_up)
289 cx = &pr->power.states[ACPI_STATE_C1];
297 * Invoke the current Cx state to put the processor to sleep.
304 * Use the appropriate idle routine, the one that would
305 * be used without acpi C-states.
313 * TBD: Can't get time duration while in C1, as resumes
314 * go to an ISR rather than here. Need to instrument
315 * base interrupt handler.
317 sleep_ticks = 0xFFFFFFFF;
321 /* Get start time (ticks) */
322 t1 = inl(acpi_fadt.xpm_tmr_blk.address);
325 /* Dummy op - must do something useless after P_LVL2 read */
326 t2 = inl(acpi_fadt.xpm_tmr_blk.address);
327 /* Get end time (ticks) */
328 t2 = inl(acpi_fadt.xpm_tmr_blk.address);
329 /* Re-enable interrupts */
331 /* Compute time (ticks) that we were actually asleep */
333 ticks_elapsed(t1, t2) - cx->latency_ticks - C2_OVERHEAD;
338 if (pr->flags.bm_check) {
339 if (atomic_inc_return(&c3_cpu_count) ==
342 * All CPUs are trying to go to C3
343 * Disable bus master arbitration
345 acpi_set_register(ACPI_BITREG_ARB_DISABLE, 1,
346 ACPI_MTX_DO_NOT_LOCK);
349 /* SMP with no shared cache... Invalidate cache */
350 ACPI_FLUSH_CPU_CACHE();
353 /* Get start time (ticks) */
354 t1 = inl(acpi_fadt.xpm_tmr_blk.address);
357 /* Dummy op - must do something useless after P_LVL3 read */
358 t2 = inl(acpi_fadt.xpm_tmr_blk.address);
359 /* Get end time (ticks) */
360 t2 = inl(acpi_fadt.xpm_tmr_blk.address);
361 if (pr->flags.bm_check) {
362 /* Enable bus master arbitration */
363 atomic_dec(&c3_cpu_count);
364 acpi_set_register(ACPI_BITREG_ARB_DISABLE, 0,
365 ACPI_MTX_DO_NOT_LOCK);
368 /* Re-enable interrupts */
370 /* Compute time (ticks) that we were actually asleep */
372 ticks_elapsed(t1, t2) - cx->latency_ticks - C3_OVERHEAD;
380 next_state = pr->power.state;
382 #ifdef CONFIG_HOTPLUG_CPU
383 /* Don't do promotion/demotion */
384 if ((cx->type == ACPI_STATE_C1) && (num_online_cpus() > 1) &&
385 !pr->flags.has_cst && !acpi_fadt.plvl2_up) {
394 * Track the number of longs (time asleep is greater than threshold)
395 * and promote when the count threshold is reached. Note that bus
396 * mastering activity may prevent promotions.
397 * Do not promote above max_cstate.
399 if (cx->promotion.state &&
400 ((cx->promotion.state - pr->power.states) <= max_cstate)) {
401 if (sleep_ticks > cx->promotion.threshold.ticks) {
402 cx->promotion.count++;
403 cx->demotion.count = 0;
404 if (cx->promotion.count >=
405 cx->promotion.threshold.count) {
406 if (pr->flags.bm_check) {
408 (pr->power.bm_activity & cx->
409 promotion.threshold.bm)) {
415 next_state = cx->promotion.state;
425 * Track the number of shorts (time asleep is less than time threshold)
426 * and demote when the usage threshold is reached.
428 if (cx->demotion.state) {
429 if (sleep_ticks < cx->demotion.threshold.ticks) {
430 cx->demotion.count++;
431 cx->promotion.count = 0;
432 if (cx->demotion.count >= cx->demotion.threshold.count) {
433 next_state = cx->demotion.state;
441 * Demote if current state exceeds max_cstate
443 if ((pr->power.state - pr->power.states) > max_cstate) {
444 if (cx->demotion.state)
445 next_state = cx->demotion.state;
451 * If we're going to start using a new Cx state we must clean up
452 * from the previous and prepare to use the new.
454 if (next_state != pr->power.state)
455 acpi_processor_power_activate(pr, next_state);
458 static int acpi_processor_set_power_policy(struct acpi_processor *pr)
461 unsigned int state_is_set = 0;
462 struct acpi_processor_cx *lower = NULL;
463 struct acpi_processor_cx *higher = NULL;
464 struct acpi_processor_cx *cx;
466 ACPI_FUNCTION_TRACE("acpi_processor_set_power_policy");
469 return_VALUE(-EINVAL);
472 * This function sets the default Cx state policy (OS idle handler).
473 * Our scheme is to promote quickly to C2 but more conservatively
474 * to C3. We're favoring C2 for its characteristics of low latency
475 * (quick response), good power savings, and ability to allow bus
476 * mastering activity. Note that the Cx state policy is completely
477 * customizable and can be altered dynamically.
481 for (i = 1; i < ACPI_PROCESSOR_MAX_POWER; i++) {
482 cx = &pr->power.states[i];
487 pr->power.state = cx;
493 return_VALUE(-ENODEV);
496 for (i = 1; i < ACPI_PROCESSOR_MAX_POWER; i++) {
497 cx = &pr->power.states[i];
502 cx->demotion.state = lower;
503 cx->demotion.threshold.ticks = cx->latency_ticks;
504 cx->demotion.threshold.count = 1;
505 if (cx->type == ACPI_STATE_C3)
506 cx->demotion.threshold.bm = bm_history;
513 for (i = (ACPI_PROCESSOR_MAX_POWER - 1); i > 0; i--) {
514 cx = &pr->power.states[i];
519 cx->promotion.state = higher;
520 cx->promotion.threshold.ticks = cx->latency_ticks;
521 if (cx->type >= ACPI_STATE_C2)
522 cx->promotion.threshold.count = 4;
524 cx->promotion.threshold.count = 10;
525 if (higher->type == ACPI_STATE_C3)
526 cx->promotion.threshold.bm = bm_history;
535 static int acpi_processor_get_power_info_fadt(struct acpi_processor *pr)
537 ACPI_FUNCTION_TRACE("acpi_processor_get_power_info_fadt");
540 return_VALUE(-EINVAL);
543 return_VALUE(-ENODEV);
545 memset(pr->power.states, 0, sizeof(pr->power.states));
547 /* if info is obtained from pblk/fadt, type equals state */
548 pr->power.states[ACPI_STATE_C1].type = ACPI_STATE_C1;
549 pr->power.states[ACPI_STATE_C2].type = ACPI_STATE_C2;
550 pr->power.states[ACPI_STATE_C3].type = ACPI_STATE_C3;
552 /* the C0 state only exists as a filler in our array,
553 * and all processors need to support C1 */
554 pr->power.states[ACPI_STATE_C0].valid = 1;
555 pr->power.states[ACPI_STATE_C1].valid = 1;
557 #ifndef CONFIG_HOTPLUG_CPU
559 * Check for P_LVL2_UP flag before entering C2 and above on
562 if ((num_online_cpus() > 1) && !acpi_fadt.plvl2_up)
563 return_VALUE(-ENODEV);
566 /* determine C2 and C3 address from pblk */
567 pr->power.states[ACPI_STATE_C2].address = pr->pblk + 4;
568 pr->power.states[ACPI_STATE_C3].address = pr->pblk + 5;
570 /* determine latencies from FADT */
571 pr->power.states[ACPI_STATE_C2].latency = acpi_fadt.plvl2_lat;
572 pr->power.states[ACPI_STATE_C3].latency = acpi_fadt.plvl3_lat;
574 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
575 "lvl2[0x%08x] lvl3[0x%08x]\n",
576 pr->power.states[ACPI_STATE_C2].address,
577 pr->power.states[ACPI_STATE_C3].address));
582 static int acpi_processor_get_power_info_default_c1(struct acpi_processor *pr)
584 ACPI_FUNCTION_TRACE("acpi_processor_get_power_info_default_c1");
586 memset(pr->power.states, 0, sizeof(pr->power.states));
588 /* if info is obtained from pblk/fadt, type equals state */
589 pr->power.states[ACPI_STATE_C1].type = ACPI_STATE_C1;
590 pr->power.states[ACPI_STATE_C2].type = ACPI_STATE_C2;
591 pr->power.states[ACPI_STATE_C3].type = ACPI_STATE_C3;
593 /* the C0 state only exists as a filler in our array,
594 * and all processors need to support C1 */
595 pr->power.states[ACPI_STATE_C0].valid = 1;
596 pr->power.states[ACPI_STATE_C1].valid = 1;
601 static int acpi_processor_get_power_info_cst(struct acpi_processor *pr)
603 acpi_status status = 0;
606 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
607 union acpi_object *cst;
609 ACPI_FUNCTION_TRACE("acpi_processor_get_power_info_cst");
612 return_VALUE(-ENODEV);
615 for (i = 0; i < ACPI_PROCESSOR_MAX_POWER; i++)
616 memset(&(pr->power.states[i]), 0,
617 sizeof(struct acpi_processor_cx));
619 status = acpi_evaluate_object(pr->handle, "_CST", NULL, &buffer);
620 if (ACPI_FAILURE(status)) {
621 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "No _CST, giving up\n"));
622 return_VALUE(-ENODEV);
625 cst = (union acpi_object *)buffer.pointer;
627 /* There must be at least 2 elements */
628 if (!cst || (cst->type != ACPI_TYPE_PACKAGE) || cst->package.count < 2) {
629 ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
630 "not enough elements in _CST\n"));
635 count = cst->package.elements[0].integer.value;
637 /* Validate number of power states. */
638 if (count < 1 || count != cst->package.count - 1) {
639 ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
640 "count given by _CST is not valid\n"));
645 /* We support up to ACPI_PROCESSOR_MAX_POWER. */
646 if (count > ACPI_PROCESSOR_MAX_POWER) {
648 "Limiting number of power states to max (%d)\n",
649 ACPI_PROCESSOR_MAX_POWER);
651 "Please increase ACPI_PROCESSOR_MAX_POWER if needed.\n");
652 count = ACPI_PROCESSOR_MAX_POWER;
655 /* Tell driver that at least _CST is supported. */
656 pr->flags.has_cst = 1;
658 for (i = 1; i <= count; i++) {
659 union acpi_object *element;
660 union acpi_object *obj;
661 struct acpi_power_register *reg;
662 struct acpi_processor_cx cx;
664 memset(&cx, 0, sizeof(cx));
666 element = (union acpi_object *)&(cst->package.elements[i]);
667 if (element->type != ACPI_TYPE_PACKAGE)
670 if (element->package.count != 4)
673 obj = (union acpi_object *)&(element->package.elements[0]);
675 if (obj->type != ACPI_TYPE_BUFFER)
678 reg = (struct acpi_power_register *)obj->buffer.pointer;
680 if (reg->space_id != ACPI_ADR_SPACE_SYSTEM_IO &&
681 (reg->space_id != ACPI_ADR_SPACE_FIXED_HARDWARE))
684 cx.address = (reg->space_id == ACPI_ADR_SPACE_FIXED_HARDWARE) ?
687 /* There should be an easy way to extract an integer... */
688 obj = (union acpi_object *)&(element->package.elements[1]);
689 if (obj->type != ACPI_TYPE_INTEGER)
692 cx.type = obj->integer.value;
694 if ((cx.type != ACPI_STATE_C1) &&
695 (reg->space_id != ACPI_ADR_SPACE_SYSTEM_IO))
698 if ((cx.type < ACPI_STATE_C1) || (cx.type > ACPI_STATE_C3))
701 obj = (union acpi_object *)&(element->package.elements[2]);
702 if (obj->type != ACPI_TYPE_INTEGER)
705 cx.latency = obj->integer.value;
707 obj = (union acpi_object *)&(element->package.elements[3]);
708 if (obj->type != ACPI_TYPE_INTEGER)
711 cx.power = obj->integer.value;
714 memcpy(&(pr->power.states[pr->power.count]), &cx, sizeof(cx));
717 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found %d power states\n",
720 /* Validate number of power states discovered */
721 if (pr->power.count < 2)
725 acpi_os_free(buffer.pointer);
727 return_VALUE(status);
730 static void acpi_processor_power_verify_c2(struct acpi_processor_cx *cx)
732 ACPI_FUNCTION_TRACE("acpi_processor_get_power_verify_c2");
738 * C2 latency must be less than or equal to 100
741 else if (cx->latency > ACPI_PROCESSOR_MAX_C2_LATENCY) {
742 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
743 "latency too large [%d]\n", cx->latency));
748 * Otherwise we've met all of our C2 requirements.
749 * Normalize the C2 latency to expidite policy
752 cx->latency_ticks = US_TO_PM_TIMER_TICKS(cx->latency);
757 static void acpi_processor_power_verify_c3(struct acpi_processor *pr,
758 struct acpi_processor_cx *cx)
760 static int bm_check_flag;
762 ACPI_FUNCTION_TRACE("acpi_processor_get_power_verify_c3");
768 * C3 latency must be less than or equal to 1000
771 else if (cx->latency > ACPI_PROCESSOR_MAX_C3_LATENCY) {
772 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
773 "latency too large [%d]\n", cx->latency));
778 * PIIX4 Erratum #18: We don't support C3 when Type-F (fast)
779 * DMA transfers are used by any ISA device to avoid livelock.
780 * Note that we could disable Type-F DMA (as recommended by
781 * the erratum), but this is known to disrupt certain ISA
782 * devices thus we take the conservative approach.
784 else if (errata.piix4.fdma) {
785 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
786 "C3 not supported on PIIX4 with Type-F DMA\n"));
790 /* All the logic here assumes flags.bm_check is same across all CPUs */
791 if (!bm_check_flag) {
792 /* Determine whether bm_check is needed based on CPU */
793 acpi_processor_power_init_bm_check(&(pr->flags), pr->id);
794 bm_check_flag = pr->flags.bm_check;
796 pr->flags.bm_check = bm_check_flag;
799 if (pr->flags.bm_check) {
800 /* bus mastering control is necessary */
801 if (!pr->flags.bm_control) {
802 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
803 "C3 support requires bus mastering control\n"));
808 * WBINVD should be set in fadt, for C3 state to be
809 * supported on when bm_check is not required.
811 if (acpi_fadt.wb_invd != 1) {
812 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
813 "Cache invalidation should work properly"
814 " for C3 to be enabled on SMP systems\n"));
817 acpi_set_register(ACPI_BITREG_BUS_MASTER_RLD,
818 0, ACPI_MTX_DO_NOT_LOCK);
822 * Otherwise we've met all of our C3 requirements.
823 * Normalize the C3 latency to expidite policy. Enable
824 * checking of bus mastering status (bm_check) so we can
825 * use this in our C3 policy
828 cx->latency_ticks = US_TO_PM_TIMER_TICKS(cx->latency);
833 static int acpi_processor_power_verify(struct acpi_processor *pr)
836 unsigned int working = 0;
838 for (i = 1; i < ACPI_PROCESSOR_MAX_POWER; i++) {
839 struct acpi_processor_cx *cx = &pr->power.states[i];
847 acpi_processor_power_verify_c2(cx);
851 acpi_processor_power_verify_c3(pr, cx);
862 static int acpi_processor_get_power_info(struct acpi_processor *pr)
867 ACPI_FUNCTION_TRACE("acpi_processor_get_power_info");
869 /* NOTE: the idle thread may not be running while calling
872 result = acpi_processor_get_power_info_cst(pr);
873 if (result == -ENODEV)
874 result = acpi_processor_get_power_info_fadt(pr);
876 if ((result) || (acpi_processor_power_verify(pr) < 2))
877 result = acpi_processor_get_power_info_default_c1(pr);
882 * Now that we know which states are supported, set the default
883 * policy. Note that this policy can be changed dynamically
884 * (e.g. encourage deeper sleeps to conserve battery life when
887 result = acpi_processor_set_power_policy(pr);
889 return_VALUE(result);
892 * if one state of type C2 or C3 is available, mark this
893 * CPU as being "idle manageable"
895 for (i = 1; i < ACPI_PROCESSOR_MAX_POWER; i++) {
896 if (pr->power.states[i].valid) {
898 if (pr->power.states[i].type >= ACPI_STATE_C2)
906 int acpi_processor_cst_has_changed(struct acpi_processor *pr)
910 ACPI_FUNCTION_TRACE("acpi_processor_cst_has_changed");
913 return_VALUE(-EINVAL);
916 return_VALUE(-ENODEV);
919 if (!pr->flags.power_setup_done)
920 return_VALUE(-ENODEV);
922 /* Fall back to the default idle loop */
923 pm_idle = pm_idle_save;
924 synchronize_sched(); /* Relies on interrupts forcing exit from idle. */
927 result = acpi_processor_get_power_info(pr);
928 if ((pr->flags.power == 1) && (pr->flags.power_setup_done))
929 pm_idle = acpi_processor_idle;
931 return_VALUE(result);
936 static int acpi_processor_power_seq_show(struct seq_file *seq, void *offset)
938 struct acpi_processor *pr = (struct acpi_processor *)seq->private;
941 ACPI_FUNCTION_TRACE("acpi_processor_power_seq_show");
946 seq_printf(seq, "active state: C%zd\n"
948 "bus master activity: %08x\n",
949 pr->power.state ? pr->power.state - pr->power.states : 0,
950 max_cstate, (unsigned)pr->power.bm_activity);
952 seq_puts(seq, "states:\n");
954 for (i = 1; i <= pr->power.count; i++) {
955 seq_printf(seq, " %cC%d: ",
956 (&pr->power.states[i] ==
957 pr->power.state ? '*' : ' '), i);
959 if (!pr->power.states[i].valid) {
960 seq_puts(seq, "<not supported>\n");
964 switch (pr->power.states[i].type) {
966 seq_printf(seq, "type[C1] ");
969 seq_printf(seq, "type[C2] ");
972 seq_printf(seq, "type[C3] ");
975 seq_printf(seq, "type[--] ");
979 if (pr->power.states[i].promotion.state)
980 seq_printf(seq, "promotion[C%zd] ",
981 (pr->power.states[i].promotion.state -
984 seq_puts(seq, "promotion[--] ");
986 if (pr->power.states[i].demotion.state)
987 seq_printf(seq, "demotion[C%zd] ",
988 (pr->power.states[i].demotion.state -
991 seq_puts(seq, "demotion[--] ");
993 seq_printf(seq, "latency[%03d] usage[%08d]\n",
994 pr->power.states[i].latency,
995 pr->power.states[i].usage);
1002 static int acpi_processor_power_open_fs(struct inode *inode, struct file *file)
1004 return single_open(file, acpi_processor_power_seq_show,
1008 static struct file_operations acpi_processor_power_fops = {
1009 .open = acpi_processor_power_open_fs,
1011 .llseek = seq_lseek,
1012 .release = single_release,
1015 int acpi_processor_power_init(struct acpi_processor *pr,
1016 struct acpi_device *device)
1018 acpi_status status = 0;
1019 static int first_run = 0;
1020 struct proc_dir_entry *entry = NULL;
1023 ACPI_FUNCTION_TRACE("acpi_processor_power_init");
1026 dmi_check_system(processor_power_dmi_table);
1027 if (max_cstate < ACPI_C_STATES_MAX)
1029 "ACPI: processor limited to max C-state %d\n",
1035 return_VALUE(-EINVAL);
1037 if (acpi_fadt.cst_cnt && !nocst) {
1039 acpi_os_write_port(acpi_fadt.smi_cmd, acpi_fadt.cst_cnt, 8);
1040 if (ACPI_FAILURE(status)) {
1041 ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
1042 "Notifying BIOS of _CST ability failed\n"));
1046 acpi_processor_power_init_pdc(&(pr->power), pr->id);
1047 acpi_processor_set_pdc(pr, pr->power.pdc);
1048 acpi_processor_get_power_info(pr);
1051 * Install the idle handler if processor power management is supported.
1052 * Note that we use previously set idle handler will be used on
1053 * platforms that only support C1.
1055 if ((pr->flags.power) && (!boot_option_idle_override)) {
1056 printk(KERN_INFO PREFIX "CPU%d (power states:", pr->id);
1057 for (i = 1; i <= pr->power.count; i++)
1058 if (pr->power.states[i].valid)
1059 printk(" C%d[C%d]", i,
1060 pr->power.states[i].type);
1064 pm_idle_save = pm_idle;
1065 pm_idle = acpi_processor_idle;
1070 entry = create_proc_entry(ACPI_PROCESSOR_FILE_POWER,
1071 S_IRUGO, acpi_device_dir(device));
1073 ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
1074 "Unable to create '%s' fs entry\n",
1075 ACPI_PROCESSOR_FILE_POWER));
1077 entry->proc_fops = &acpi_processor_power_fops;
1078 entry->data = acpi_driver_data(device);
1079 entry->owner = THIS_MODULE;
1082 pr->flags.power_setup_done = 1;
1087 int acpi_processor_power_exit(struct acpi_processor *pr,
1088 struct acpi_device *device)
1090 ACPI_FUNCTION_TRACE("acpi_processor_power_exit");
1092 pr->flags.power_setup_done = 0;
1094 if (acpi_device_dir(device))
1095 remove_proc_entry(ACPI_PROCESSOR_FILE_POWER,
1096 acpi_device_dir(device));
1098 /* Unregister the idle handler when processor #0 is removed. */
1100 pm_idle = pm_idle_save;
1103 * We are about to unload the current idle thread pm callback
1104 * (pm_idle), Wait for all processors to update cached/local
1105 * copies of pm_idle before proceeding.