2 * acpi_processor.c - ACPI Processor Driver ($Revision: 71 $)
4 * Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com>
5 * Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
6 * Copyright (C) 2004 Dominik Brodowski <linux@brodo.de>
7 * Copyright (C) 2004 Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
8 * - Added processor hotplug support
10 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2 of the License, or (at
15 * your option) any later version.
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
22 * You should have received a copy of the GNU General Public License along
23 * with this program; if not, write to the Free Software Foundation, Inc.,
24 * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
26 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
28 * 1. Make # power states dynamic.
29 * 2. Support duty_cycle values that span bit 4.
30 * 3. Optimize by having scheduler determine business instead of
31 * having us try to calculate it here.
32 * 4. Need C1 timing -- must modify kernel (IRQ handler) to get this.
35 #include <linux/kernel.h>
36 #include <linux/module.h>
37 #include <linux/init.h>
38 #include <linux/types.h>
39 #include <linux/pci.h>
41 #include <linux/cpufreq.h>
42 #include <linux/cpu.h>
43 #include <linux/proc_fs.h>
44 #include <linux/seq_file.h>
45 #include <linux/dmi.h>
46 #include <linux/moduleparam.h>
47 #include <linux/cpuidle.h>
50 #include <asm/system.h>
52 #include <asm/delay.h>
53 #include <asm/uaccess.h>
54 #include <asm/processor.h>
58 #include <acpi/acpi_bus.h>
59 #include <acpi/acpi_drivers.h>
60 #include <acpi/processor.h>
62 #define ACPI_PROCESSOR_COMPONENT 0x01000000
63 #define ACPI_PROCESSOR_CLASS "processor"
64 #define ACPI_PROCESSOR_DEVICE_NAME "Processor"
65 #define ACPI_PROCESSOR_FILE_INFO "info"
66 #define ACPI_PROCESSOR_FILE_THROTTLING "throttling"
67 #define ACPI_PROCESSOR_FILE_LIMIT "limit"
68 #define ACPI_PROCESSOR_NOTIFY_PERFORMANCE 0x80
69 #define ACPI_PROCESSOR_NOTIFY_POWER 0x81
70 #define ACPI_PROCESSOR_NOTIFY_THROTTLING 0x82
72 #define ACPI_PROCESSOR_LIMIT_USER 0
73 #define ACPI_PROCESSOR_LIMIT_THERMAL 1
75 #define _COMPONENT ACPI_PROCESSOR_COMPONENT
76 ACPI_MODULE_NAME("processor_core");
78 MODULE_AUTHOR("Paul Diefenbaugh");
79 MODULE_DESCRIPTION("ACPI Processor Driver");
80 MODULE_LICENSE("GPL");
82 static int acpi_processor_add(struct acpi_device *device);
83 static int acpi_processor_start(struct acpi_device *device);
84 static int acpi_processor_remove(struct acpi_device *device, int type);
85 static int acpi_processor_info_open_fs(struct inode *inode, struct file *file);
86 static void acpi_processor_notify(acpi_handle handle, u32 event, void *data);
87 static acpi_status acpi_processor_hotadd_init(acpi_handle handle, int *p_cpu);
88 static int acpi_processor_handle_eject(struct acpi_processor *pr);
89 extern int acpi_processor_tstate_has_changed(struct acpi_processor *pr);
92 static const struct acpi_device_id processor_device_ids[] = {
93 {ACPI_PROCESSOR_HID, 0},
96 MODULE_DEVICE_TABLE(acpi, processor_device_ids);
98 static struct acpi_driver acpi_processor_driver = {
100 .class = ACPI_PROCESSOR_CLASS,
101 .ids = processor_device_ids,
103 .add = acpi_processor_add,
104 .remove = acpi_processor_remove,
105 .start = acpi_processor_start,
106 .suspend = acpi_processor_suspend,
107 .resume = acpi_processor_resume,
111 #define INSTALL_NOTIFY_HANDLER 1
112 #define UNINSTALL_NOTIFY_HANDLER 2
114 static const struct file_operations acpi_processor_info_fops = {
115 .owner = THIS_MODULE,
116 .open = acpi_processor_info_open_fs,
119 .release = single_release,
122 struct acpi_processor *processors[NR_CPUS];
123 struct acpi_processor_errata errata __read_mostly;
125 /* --------------------------------------------------------------------------
127 -------------------------------------------------------------------------- */
129 static int acpi_processor_errata_piix4(struct pci_dev *dev)
139 * Note that 'dev' references the PIIX4 ACPI Controller.
142 switch (dev->revision) {
144 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found PIIX4 A-step\n"));
147 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found PIIX4 B-step\n"));
150 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found PIIX4E\n"));
153 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found PIIX4M\n"));
156 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found unknown PIIX4\n"));
160 switch (dev->revision) {
162 case 0: /* PIIX4 A-step */
163 case 1: /* PIIX4 B-step */
165 * See specification changes #13 ("Manual Throttle Duty Cycle")
166 * and #14 ("Enabling and Disabling Manual Throttle"), plus
167 * erratum #5 ("STPCLK# Deassertion Time") from the January
168 * 2002 PIIX4 specification update. Applies to only older
171 errata.piix4.throttle = 1;
176 * See erratum #18 ("C3 Power State/BMIDE and Type-F DMA
177 * Livelock") from the January 2002 PIIX4 specification update.
178 * Applies to all PIIX4 models.
184 * Find the PIIX4 IDE Controller and get the Bus Master IDE
185 * Status register address. We'll use this later to read
186 * each IDE controller's DMA status to make sure we catch all
189 dev = pci_get_subsys(PCI_VENDOR_ID_INTEL,
190 PCI_DEVICE_ID_INTEL_82371AB,
191 PCI_ANY_ID, PCI_ANY_ID, NULL);
193 errata.piix4.bmisx = pci_resource_start(dev, 4);
200 * Find the PIIX4 ISA Controller and read the Motherboard
201 * DMA controller's status to see if Type-F (Fast) DMA mode
202 * is enabled (bit 7) on either channel. Note that we'll
203 * disable C3 support if this is enabled, as some legacy
204 * devices won't operate well if fast DMA is disabled.
206 dev = pci_get_subsys(PCI_VENDOR_ID_INTEL,
207 PCI_DEVICE_ID_INTEL_82371AB_0,
208 PCI_ANY_ID, PCI_ANY_ID, NULL);
210 pci_read_config_byte(dev, 0x76, &value1);
211 pci_read_config_byte(dev, 0x77, &value2);
212 if ((value1 & 0x80) || (value2 & 0x80))
213 errata.piix4.fdma = 1;
220 if (errata.piix4.bmisx)
221 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
222 "Bus master activity detection (BM-IDE) erratum enabled\n"));
223 if (errata.piix4.fdma)
224 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
225 "Type-F DMA livelock erratum (C3 disabled)\n"));
230 static int acpi_processor_errata(struct acpi_processor *pr)
233 struct pci_dev *dev = NULL;
242 dev = pci_get_subsys(PCI_VENDOR_ID_INTEL,
243 PCI_DEVICE_ID_INTEL_82371AB_3, PCI_ANY_ID,
246 result = acpi_processor_errata_piix4(dev);
253 /* --------------------------------------------------------------------------
254 Common ACPI processor functions
255 -------------------------------------------------------------------------- */
258 * _PDC is required for a BIOS-OS handshake for most of the newer
259 * ACPI processor features.
261 static int acpi_processor_set_pdc(struct acpi_processor *pr)
263 struct acpi_object_list *pdc_in = pr->pdc;
264 acpi_status status = AE_OK;
270 status = acpi_evaluate_object(pr->handle, "_PDC", pdc_in, NULL);
272 if (ACPI_FAILURE(status))
273 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
274 "Could not evaluate _PDC, using legacy perf. control...\n"));
279 /* --------------------------------------------------------------------------
281 -------------------------------------------------------------------------- */
283 static struct proc_dir_entry *acpi_processor_dir = NULL;
285 static int acpi_processor_info_seq_show(struct seq_file *seq, void *offset)
287 struct acpi_processor *pr = seq->private;
293 seq_printf(seq, "processor id: %d\n"
295 "bus mastering control: %s\n"
296 "power management: %s\n"
297 "throttling control: %s\n"
298 "limit interface: %s\n",
301 pr->flags.bm_control ? "yes" : "no",
302 pr->flags.power ? "yes" : "no",
303 pr->flags.throttling ? "yes" : "no",
304 pr->flags.limit ? "yes" : "no");
310 static int acpi_processor_info_open_fs(struct inode *inode, struct file *file)
312 return single_open(file, acpi_processor_info_seq_show,
316 static int acpi_processor_add_fs(struct acpi_device *device)
318 struct proc_dir_entry *entry = NULL;
321 if (!acpi_device_dir(device)) {
322 acpi_device_dir(device) = proc_mkdir(acpi_device_bid(device),
324 if (!acpi_device_dir(device))
327 acpi_device_dir(device)->owner = THIS_MODULE;
330 entry = proc_create_data(ACPI_PROCESSOR_FILE_INFO,
331 S_IRUGO, acpi_device_dir(device),
332 &acpi_processor_info_fops,
333 acpi_driver_data(device));
337 /* 'throttling' [R/W] */
338 entry = proc_create_data(ACPI_PROCESSOR_FILE_THROTTLING,
339 S_IFREG | S_IRUGO | S_IWUSR,
340 acpi_device_dir(device),
341 &acpi_processor_throttling_fops,
342 acpi_driver_data(device));
347 entry = proc_create_data(ACPI_PROCESSOR_FILE_LIMIT,
348 S_IFREG | S_IRUGO | S_IWUSR,
349 acpi_device_dir(device),
350 &acpi_processor_limit_fops,
351 acpi_driver_data(device));
357 static int acpi_processor_remove_fs(struct acpi_device *device)
360 if (acpi_device_dir(device)) {
361 remove_proc_entry(ACPI_PROCESSOR_FILE_INFO,
362 acpi_device_dir(device));
363 remove_proc_entry(ACPI_PROCESSOR_FILE_THROTTLING,
364 acpi_device_dir(device));
365 remove_proc_entry(ACPI_PROCESSOR_FILE_LIMIT,
366 acpi_device_dir(device));
367 remove_proc_entry(acpi_device_bid(device), acpi_processor_dir);
368 acpi_device_dir(device) = NULL;
374 /* Use the acpiid in MADT to map cpus in case of SMP */
377 static int get_cpu_id(acpi_handle handle, u32 acpi_id) {return -1;}
380 static struct acpi_table_madt *madt;
382 static int map_lapic_id(struct acpi_subtable_header *entry,
383 u32 acpi_id, int *apic_id)
385 struct acpi_madt_local_apic *lapic =
386 (struct acpi_madt_local_apic *)entry;
387 if ((lapic->lapic_flags & ACPI_MADT_ENABLED) &&
388 lapic->processor_id == acpi_id) {
389 *apic_id = lapic->id;
395 static int map_lsapic_id(struct acpi_subtable_header *entry,
396 u32 acpi_id, int *apic_id)
398 struct acpi_madt_local_sapic *lsapic =
399 (struct acpi_madt_local_sapic *)entry;
400 /* Only check enabled APICs*/
401 if (lsapic->lapic_flags & ACPI_MADT_ENABLED) {
402 /* First check against id */
403 if (lsapic->processor_id == acpi_id) {
404 *apic_id = (lsapic->id << 8) | lsapic->eid;
406 /* Check against optional uid */
407 } else if (entry->length >= 16 &&
408 lsapic->uid == acpi_id) {
409 *apic_id = lsapic->uid;
416 static int map_madt_entry(u32 acpi_id)
418 unsigned long madt_end, entry;
424 entry = (unsigned long)madt;
425 madt_end = entry + madt->header.length;
427 /* Parse all entries looking for a match. */
429 entry += sizeof(struct acpi_table_madt);
430 while (entry + sizeof(struct acpi_subtable_header) < madt_end) {
431 struct acpi_subtable_header *header =
432 (struct acpi_subtable_header *)entry;
433 if (header->type == ACPI_MADT_TYPE_LOCAL_APIC) {
434 if (map_lapic_id(header, acpi_id, &apic_id))
436 } else if (header->type == ACPI_MADT_TYPE_LOCAL_SAPIC) {
437 if (map_lsapic_id(header, acpi_id, &apic_id))
440 entry += header->length;
445 static int map_mat_entry(acpi_handle handle, u32 acpi_id)
447 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
448 union acpi_object *obj;
449 struct acpi_subtable_header *header;
452 if (ACPI_FAILURE(acpi_evaluate_object(handle, "_MAT", NULL, &buffer)))
455 if (!buffer.length || !buffer.pointer)
458 obj = buffer.pointer;
459 if (obj->type != ACPI_TYPE_BUFFER ||
460 obj->buffer.length < sizeof(struct acpi_subtable_header)) {
464 header = (struct acpi_subtable_header *)obj->buffer.pointer;
465 if (header->type == ACPI_MADT_TYPE_LOCAL_APIC) {
466 map_lapic_id(header, acpi_id, &apic_id);
467 } else if (header->type == ACPI_MADT_TYPE_LOCAL_SAPIC) {
468 map_lsapic_id(header, acpi_id, &apic_id);
473 kfree(buffer.pointer);
477 static int get_cpu_id(acpi_handle handle, u32 acpi_id)
482 apic_id = map_mat_entry(handle, acpi_id);
484 apic_id = map_madt_entry(acpi_id);
488 for_each_possible_cpu(i) {
489 if (cpu_physical_id(i) == apic_id)
496 /* --------------------------------------------------------------------------
498 -------------------------------------------------------------------------- */
500 static int acpi_processor_get_info(struct acpi_processor *pr, unsigned has_uid)
502 acpi_status status = 0;
503 union acpi_object object = { 0 };
504 struct acpi_buffer buffer = { sizeof(union acpi_object), &object };
506 static int cpu0_initialized;
512 if (num_online_cpus() > 1)
515 acpi_processor_errata(pr);
518 * Check to see if we have bus mastering arbitration control. This
519 * is required for proper C3 usage (to maintain cache coherency).
521 if (acpi_gbl_FADT.pm2_control_block && acpi_gbl_FADT.pm2_control_length) {
522 pr->flags.bm_control = 1;
523 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
524 "Bus mastering arbitration control present\n"));
526 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
527 "No bus mastering arbitration control\n"));
529 /* Check if it is a Device with HID and UID */
532 status = acpi_evaluate_integer(pr->handle, METHOD_NAME__UID,
534 if (ACPI_FAILURE(status)) {
535 printk(KERN_ERR PREFIX "Evaluating processor _UID\n");
541 * Evalute the processor object. Note that it is common on SMP to
542 * have the first (boot) processor with a valid PBLK address while
543 * all others have a NULL address.
545 status = acpi_evaluate_object(pr->handle, NULL, NULL, &buffer);
546 if (ACPI_FAILURE(status)) {
547 printk(KERN_ERR PREFIX "Evaluating processor object\n");
552 * TBD: Synch processor ID (via LAPIC/LSAPIC structures) on SMP.
553 * >>> 'acpi_get_processor_id(acpi_id, &id)' in arch/xxx/acpi.c
555 pr->acpi_id = object.processor.proc_id;
557 cpu_index = get_cpu_id(pr->handle, pr->acpi_id);
559 /* Handle UP system running SMP kernel, with no LAPIC in MADT */
560 if (!cpu0_initialized && (cpu_index == -1) &&
561 (num_online_cpus() == 1)) {
565 cpu0_initialized = 1;
570 * Extra Processor objects may be enumerated on MP systems with
571 * less than the max # of CPUs. They should be ignored _iff
572 * they are physically not present.
576 (acpi_processor_hotadd_init(pr->handle, &pr->id))) {
581 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Processor [%d:%d]\n", pr->id,
584 if (!object.processor.pblk_address)
585 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "No PBLK (NULL address)\n"));
586 else if (object.processor.pblk_length != 6)
587 printk(KERN_ERR PREFIX "Invalid PBLK length [%d]\n",
588 object.processor.pblk_length);
590 pr->throttling.address = object.processor.pblk_address;
591 pr->throttling.duty_offset = acpi_gbl_FADT.duty_offset;
592 pr->throttling.duty_width = acpi_gbl_FADT.duty_width;
594 pr->pblk = object.processor.pblk_address;
597 * We don't care about error returns - we just try to mark
598 * these reserved so that nobody else is confused into thinking
599 * that this region might be unused..
601 * (In particular, allocating the IO range for Cardbus)
603 request_region(pr->throttling.address, 6, "ACPI CPU throttle");
607 * If ACPI describes a slot number for this CPU, we can use it
608 * ensure we get the right value in the "physical id" field
611 status = acpi_evaluate_object(pr->handle, "_SUN", NULL, &buffer);
612 if (ACPI_SUCCESS(status))
613 arch_fix_phys_package_id(pr->id, object.integer.value);
618 static void *processor_device_array[NR_CPUS];
620 static int __cpuinit acpi_processor_start(struct acpi_device *device)
623 acpi_status status = AE_OK;
624 struct acpi_processor *pr;
627 pr = acpi_driver_data(device);
629 result = acpi_processor_get_info(pr, device->flags.unique_id);
631 /* Processor is physically not present */
635 BUG_ON((pr->id >= nr_cpu_ids) || (pr->id < 0));
639 * ACPI id of processors can be reported wrongly by the BIOS.
640 * Don't trust it blindly
642 if (processor_device_array[pr->id] != NULL &&
643 processor_device_array[pr->id] != device) {
644 printk(KERN_WARNING "BIOS reported wrong ACPI id "
645 "for the processor\n");
648 processor_device_array[pr->id] = device;
650 processors[pr->id] = pr;
652 result = acpi_processor_add_fs(device);
656 status = acpi_install_notify_handler(pr->handle, ACPI_DEVICE_NOTIFY,
657 acpi_processor_notify, pr);
659 /* _PDC call should be done before doing anything else (if reqd.). */
660 arch_acpi_processor_init_pdc(pr);
661 acpi_processor_set_pdc(pr);
662 #ifdef CONFIG_CPU_FREQ
663 acpi_processor_ppc_has_changed(pr);
665 acpi_processor_get_throttling_info(pr);
666 acpi_processor_get_limit_info(pr);
669 acpi_processor_power_init(pr, device);
671 pr->cdev = thermal_cooling_device_register("Processor", device,
672 &processor_cooling_ops);
673 if (IS_ERR(pr->cdev)) {
674 result = PTR_ERR(pr->cdev);
678 printk(KERN_INFO PREFIX
679 "%s is registered as cooling_device%d\n",
680 device->dev.bus_id, pr->cdev->id);
682 result = sysfs_create_link(&device->dev.kobj,
683 &pr->cdev->device.kobj,
687 result = sysfs_create_link(&pr->cdev->device.kobj,
694 if (pr->flags.throttling) {
695 printk(KERN_INFO PREFIX "%s [%s] (supports",
696 acpi_device_name(device), acpi_device_bid(device));
697 printk(" %d throttling states", pr->throttling.state_count);
706 static void acpi_processor_notify(acpi_handle handle, u32 event, void *data)
708 struct acpi_processor *pr = data;
709 struct acpi_device *device = NULL;
715 if (acpi_bus_get_device(pr->handle, &device))
719 case ACPI_PROCESSOR_NOTIFY_PERFORMANCE:
720 saved = pr->performance_platform_limit;
721 acpi_processor_ppc_has_changed(pr);
722 if (saved == pr->performance_platform_limit)
724 acpi_bus_generate_proc_event(device, event,
725 pr->performance_platform_limit);
726 acpi_bus_generate_netlink_event(device->pnp.device_class,
727 device->dev.bus_id, event,
728 pr->performance_platform_limit);
730 case ACPI_PROCESSOR_NOTIFY_POWER:
731 acpi_processor_cst_has_changed(pr);
732 acpi_bus_generate_proc_event(device, event, 0);
733 acpi_bus_generate_netlink_event(device->pnp.device_class,
734 device->dev.bus_id, event, 0);
736 case ACPI_PROCESSOR_NOTIFY_THROTTLING:
737 acpi_processor_tstate_has_changed(pr);
738 acpi_bus_generate_proc_event(device, event, 0);
739 acpi_bus_generate_netlink_event(device->pnp.device_class,
740 device->dev.bus_id, event, 0);
742 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
743 "Unsupported event [0x%x]\n", event));
750 static int acpi_cpu_soft_notify(struct notifier_block *nfb,
751 unsigned long action, void *hcpu)
753 unsigned int cpu = (unsigned long)hcpu;
754 struct acpi_processor *pr = processors[cpu];
756 if (action == CPU_ONLINE && pr) {
757 acpi_processor_ppc_has_changed(pr);
758 acpi_processor_cst_has_changed(pr);
759 acpi_processor_tstate_has_changed(pr);
764 static struct notifier_block acpi_cpu_notifier =
766 .notifier_call = acpi_cpu_soft_notify,
769 static int acpi_processor_add(struct acpi_device *device)
771 struct acpi_processor *pr = NULL;
777 pr = kzalloc(sizeof(struct acpi_processor), GFP_KERNEL);
781 pr->handle = device->handle;
782 strcpy(acpi_device_name(device), ACPI_PROCESSOR_DEVICE_NAME);
783 strcpy(acpi_device_class(device), ACPI_PROCESSOR_CLASS);
784 acpi_driver_data(device) = pr;
789 static int acpi_processor_remove(struct acpi_device *device, int type)
791 acpi_status status = AE_OK;
792 struct acpi_processor *pr = NULL;
795 if (!device || !acpi_driver_data(device))
798 pr = acpi_driver_data(device);
800 if (pr->id >= nr_cpu_ids) {
805 if (type == ACPI_BUS_REMOVAL_EJECT) {
806 if (acpi_processor_handle_eject(pr))
810 acpi_processor_power_exit(pr, device);
812 status = acpi_remove_notify_handler(pr->handle, ACPI_DEVICE_NOTIFY,
813 acpi_processor_notify);
815 acpi_processor_remove_fs(device);
818 sysfs_remove_link(&device->dev.kobj, "thermal_cooling");
819 sysfs_remove_link(&pr->cdev->device.kobj, "device");
820 thermal_cooling_device_unregister(pr->cdev);
824 processors[pr->id] = NULL;
825 processor_device_array[pr->id] = NULL;
831 #ifdef CONFIG_ACPI_HOTPLUG_CPU
832 /****************************************************************************
833 * Acpi processor hotplug support *
834 ****************************************************************************/
836 static int is_processor_present(acpi_handle handle)
839 unsigned long sta = 0;
842 status = acpi_evaluate_integer(handle, "_STA", NULL, &sta);
844 if (ACPI_SUCCESS(status) && (sta & ACPI_STA_DEVICE_PRESENT))
848 * _STA is mandatory for a processor that supports hot plug
850 if (status == AE_NOT_FOUND)
851 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
852 "Processor does not support hot plug\n"));
854 ACPI_EXCEPTION((AE_INFO, status,
855 "Processor Device is not present"));
860 int acpi_processor_device_add(acpi_handle handle, struct acpi_device **device)
863 struct acpi_device *pdev;
864 struct acpi_processor *pr;
867 if (acpi_get_parent(handle, &phandle)) {
871 if (acpi_bus_get_device(phandle, &pdev)) {
875 if (acpi_bus_add(device, pdev, handle, ACPI_BUS_TYPE_PROCESSOR)) {
879 acpi_bus_start(*device);
881 pr = acpi_driver_data(*device);
885 if ((pr->id >= 0) && (pr->id < nr_cpu_ids)) {
886 kobject_uevent(&(*device)->dev.kobj, KOBJ_ONLINE);
891 static void __ref acpi_processor_hotplug_notify(acpi_handle handle,
892 u32 event, void *data)
894 struct acpi_processor *pr;
895 struct acpi_device *device = NULL;
900 case ACPI_NOTIFY_BUS_CHECK:
901 case ACPI_NOTIFY_DEVICE_CHECK:
902 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
903 "Processor driver received %s event\n",
904 (event == ACPI_NOTIFY_BUS_CHECK) ?
905 "ACPI_NOTIFY_BUS_CHECK" : "ACPI_NOTIFY_DEVICE_CHECK"));
907 if (!is_processor_present(handle))
910 if (acpi_bus_get_device(handle, &device)) {
911 result = acpi_processor_device_add(handle, &device);
913 printk(KERN_ERR PREFIX
914 "Unable to add the device\n");
918 pr = acpi_driver_data(device);
920 printk(KERN_ERR PREFIX "Driver data is NULL\n");
924 if (pr->id >= 0 && (pr->id < nr_cpu_ids)) {
925 kobject_uevent(&device->dev.kobj, KOBJ_OFFLINE);
929 result = acpi_processor_start(device);
930 if ((!result) && ((pr->id >= 0) && (pr->id < nr_cpu_ids))) {
931 kobject_uevent(&device->dev.kobj, KOBJ_ONLINE);
933 printk(KERN_ERR PREFIX "Device [%s] failed to start\n",
934 acpi_device_bid(device));
937 case ACPI_NOTIFY_EJECT_REQUEST:
938 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
939 "received ACPI_NOTIFY_EJECT_REQUEST\n"));
941 if (acpi_bus_get_device(handle, &device)) {
942 printk(KERN_ERR PREFIX
943 "Device don't exist, dropping EJECT\n");
946 pr = acpi_driver_data(device);
948 printk(KERN_ERR PREFIX
949 "Driver data is NULL, dropping EJECT\n");
953 if ((pr->id < nr_cpu_ids) && (cpu_present(pr->id)))
954 kobject_uevent(&device->dev.kobj, KOBJ_OFFLINE);
957 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
958 "Unsupported event [0x%x]\n", event));
966 processor_walk_namespace_cb(acpi_handle handle,
967 u32 lvl, void *context, void **rv)
970 int *action = context;
971 acpi_object_type type = 0;
973 status = acpi_get_type(handle, &type);
974 if (ACPI_FAILURE(status))
977 if (type != ACPI_TYPE_PROCESSOR)
981 case INSTALL_NOTIFY_HANDLER:
982 acpi_install_notify_handler(handle,
984 acpi_processor_hotplug_notify,
987 case UNINSTALL_NOTIFY_HANDLER:
988 acpi_remove_notify_handler(handle,
990 acpi_processor_hotplug_notify);
999 static acpi_status acpi_processor_hotadd_init(acpi_handle handle, int *p_cpu)
1002 if (!is_processor_present(handle)) {
1006 if (acpi_map_lsapic(handle, p_cpu))
1009 if (arch_register_cpu(*p_cpu)) {
1010 acpi_unmap_lsapic(*p_cpu);
1017 static int acpi_processor_handle_eject(struct acpi_processor *pr)
1019 if (cpu_online(pr->id)) {
1022 arch_unregister_cpu(pr->id);
1023 acpi_unmap_lsapic(pr->id);
1027 static acpi_status acpi_processor_hotadd_init(acpi_handle handle, int *p_cpu)
1031 static int acpi_processor_handle_eject(struct acpi_processor *pr)
1038 void acpi_processor_install_hotplug_notify(void)
1040 #ifdef CONFIG_ACPI_HOTPLUG_CPU
1041 int action = INSTALL_NOTIFY_HANDLER;
1042 acpi_walk_namespace(ACPI_TYPE_PROCESSOR,
1045 processor_walk_namespace_cb, &action, NULL);
1047 register_hotcpu_notifier(&acpi_cpu_notifier);
1051 void acpi_processor_uninstall_hotplug_notify(void)
1053 #ifdef CONFIG_ACPI_HOTPLUG_CPU
1054 int action = UNINSTALL_NOTIFY_HANDLER;
1055 acpi_walk_namespace(ACPI_TYPE_PROCESSOR,
1058 processor_walk_namespace_cb, &action, NULL);
1060 unregister_hotcpu_notifier(&acpi_cpu_notifier);
1064 * We keep the driver loaded even when ACPI is not running.
1065 * This is needed for the powernow-k8 driver, that works even without
1066 * ACPI, but needs symbols from this driver
1069 static int __init acpi_processor_init(void)
1074 memset(&processors, 0, sizeof(processors));
1075 memset(&errata, 0, sizeof(errata));
1078 if (ACPI_FAILURE(acpi_get_table(ACPI_SIG_MADT, 0,
1079 (struct acpi_table_header **)&madt)))
1083 acpi_processor_dir = proc_mkdir(ACPI_PROCESSOR_CLASS, acpi_root_dir);
1084 if (!acpi_processor_dir)
1086 acpi_processor_dir->owner = THIS_MODULE;
1088 result = cpuidle_register_driver(&acpi_idle_driver);
1092 result = acpi_bus_register_driver(&acpi_processor_driver);
1096 acpi_processor_install_hotplug_notify();
1098 acpi_thermal_cpufreq_init();
1100 acpi_processor_ppc_init();
1102 acpi_processor_throttling_init();
1107 cpuidle_unregister_driver(&acpi_idle_driver);
1110 remove_proc_entry(ACPI_PROCESSOR_CLASS, acpi_root_dir);
1115 static void __exit acpi_processor_exit(void)
1117 acpi_processor_ppc_exit();
1119 acpi_thermal_cpufreq_exit();
1121 acpi_processor_uninstall_hotplug_notify();
1123 acpi_bus_unregister_driver(&acpi_processor_driver);
1125 cpuidle_unregister_driver(&acpi_idle_driver);
1127 remove_proc_entry(ACPI_PROCESSOR_CLASS, acpi_root_dir);
1132 module_init(acpi_processor_init);
1133 module_exit(acpi_processor_exit);
1135 EXPORT_SYMBOL(acpi_processor_set_thermal_limit);
1137 MODULE_ALIAS("processor");