/*
* stash over-ride to indicate we've been here
- * and for later update of acpi_fadt
+ * and for later update of acpi_gbl_FADT
*/
acpi_sci_override_gsi = gsi;
return;
acpi_table_print_madt_entry(header);
- if (intsrc->bus_irq == acpi_fadt.sci_int) {
+ if (intsrc->bus_irq == acpi_gbl_FADT.sci_interrupt) {
acpi_sci_ioapic_setup(intsrc->global_irq,
intsrc->flags.polarity,
intsrc->flags.trigger);
static int __init acpi_parse_fadt(struct acpi_table_header *header)
{
- struct fadt_descriptor *fadt = NULL;
+ struct acpi_table_fadt *fadt = NULL;
- fadt = (struct fadt_descriptor *)header;
+ fadt = (struct acpi_table_fadt *)header;
if (!fadt) {
printk(KERN_WARNING PREFIX "Unable to map FADT\n");
return 0;
* pretend we got one so we can set the SCI flags.
*/
if (!acpi_sci_override_gsi)
- acpi_sci_ioapic_setup(acpi_fadt.sci_int, 0, 0);
+ acpi_sci_ioapic_setup(acpi_gbl_FADT.sci_interrupt, 0, 0);
/* Fill in identity legacy mapings where no override */
mp_config_acpi_legacy_irqs();
/* Invoke C3 */
inb(cx_address);
/* Dummy op - must do something useless after P_LVL3 read */
- t = inl(acpi_fadt.xpm_tmr_blk.address);
+ t = inl(acpi_gbl_FADT.xpm_timer_block.address);
}
/* Disable bus ratio bit */
local_irq_disable();
outb(3, 0x22);
} else if ((pr != NULL) && pr->flags.bm_control) {
/* Disable bus master arbitration */
- acpi_set_register(ACPI_BITREG_ARB_DISABLE, 1,
- ACPI_MTX_DO_NOT_LOCK);
+ acpi_set_register(ACPI_BITREG_ARB_DISABLE, 1);
}
switch (longhaul_version) {
case TYPE_POWERSAVER:
if (longhaul_flags & USE_ACPI_C3) {
/* Don't allow wakeup */
- acpi_set_register(ACPI_BITREG_BUS_MASTER_RLD, 0,
- ACPI_MTX_DO_NOT_LOCK);
+ acpi_set_register(ACPI_BITREG_BUS_MASTER_RLD, 0);
do_powersaver(cx->address, clock_ratio_index);
} else {
do_powersaver(0, clock_ratio_index);
outb(0, 0x22);
} else if ((pr != NULL) && pr->flags.bm_control) {
/* Enable bus master arbitration */
- acpi_set_register(ACPI_BITREG_ARB_DISABLE, 0,
- ACPI_MTX_DO_NOT_LOCK);
+ acpi_set_register(ACPI_BITREG_ARB_DISABLE, 0);
}
outb(pic2_mask,0xA1); /* restore mask */
outb(pic1_mask,0x21);
highest_speed = calc_speed(maxmult);
lowest_speed = calc_speed(minmult);
dprintk ("FSB:%dMHz Lowest speed: %s Highest speed:%s\n", fsb,
- print_speed(lowest_speed/1000),
+ print_speed(lowest_speed/1000),
print_speed(highest_speed/1000));
if (lowest_speed == highest_speed) {
maxvid.mV/1000, maxvid.mV%1000,
minvid.mV/1000, minvid.mV%1000,
numvscales);
-
+
j = 0;
while (longhaul_table[j].frequency != CPUFREQ_TABLE_END) {
speed = longhaul_table[j].frequency;
static int gsi_to_irq[MAX_GSI_NUM];
/* Don't set up the ACPI SCI because it's already set up */
- if (acpi_fadt.sci_int == gsi)
+ if (acpi_gbl_FADT.sci_interrupt == gsi)
return gsi;
ioapic = mp_find_ioapic(gsi);
/*
* Don't assign IRQ used by ACPI SCI
*/
- if (gsi == acpi_fadt.sci_int)
+ if (gsi == acpi_gbl_FADT.sci_interrupt)
gsi = pci_irq++;
gsi_to_irq[irq] = gsi;
} else {
static int __init acpi_parse_fadt(unsigned long phys_addr, unsigned long size)
{
struct acpi_table_header *fadt_header;
- struct fadt_descriptor *fadt;
+ struct acpi_table_fadt *fadt;
if (!phys_addr || !size)
return -EINVAL;
if (fadt_header->revision != 3)
return -ENODEV; /* Only deal with ACPI 2.0 FADT */
- fadt = (struct fadt_descriptor *)fadt_header;
+ fadt = (struct acpi_table_fadt *)fadt_header;
- acpi_register_gsi(fadt->sci_int, ACPI_LEVEL_SENSITIVE, ACPI_ACTIVE_LOW);
+ acpi_register_gsi(fadt->sci_interrupt, ACPI_LEVEL_SENSITIVE, ACPI_ACTIVE_LOW);
return 0;
}
* Some x86_64 machines use physical APIC mode regardless of how many
* procs/clusters are present (x86_64 ES7000 is an example).
*/
- if (acpi_fadt.revision > FADT2_REVISION_ID)
- if (acpi_fadt.force_apic_physical_destination_mode) {
+ if (acpi_gbl_FADT.header.revision > FADT2_REVISION_ID)
+ if (acpi_gbl_FADT.flags & ACPI_FADT_APIC_PHYSICAL) {
genapic = &apic_cluster;
goto print;
}
return gsi;
/* Don't set up the ACPI SCI because it's already set up */
- if (acpi_fadt.sci_int == gsi)
+ if (acpi_gbl_FADT.sci_interrupt == gsi)
return gsi;
ioapic = mp_find_ioapic(gsi);
acpi_sci_flags.trigger = 3;
/* Set PIC-mode SCI trigger type */
- acpi_pic_sci_set_trigger(acpi_fadt.sci_int,
+ acpi_pic_sci_set_trigger(acpi_gbl_FADT.sci_interrupt,
acpi_sci_flags.trigger);
} else {
extern int acpi_sci_override_gsi;
/*
- * now that acpi_fadt is initialized,
+ * now that acpi_gbl_FADT is initialized,
* update it with result from INT_SRC_OVR parsing
*/
- acpi_fadt.sci_int = acpi_sci_override_gsi;
+ acpi_gbl_FADT.sci_interrupt = acpi_sci_override_gsi;
}
#endif
* FADT. It may not be the same if an interrupt source override exists
* for the SCI.
*/
- gsi = acpi_fadt.sci_int;
+ gsi = acpi_gbl_FADT.sci_interrupt;
if (acpi_gsi_to_irq(gsi, &irq) < 0) {
printk(KERN_ERR PREFIX "SCI (ACPI GSI %d) not registered\n",
gsi);
}
}
/* Add a penalty for the SCI */
- acpi_irq_penalty[acpi_fadt.sci_int] += PIRQ_PENALTY_PCI_USING;
+ acpi_irq_penalty[acpi_gbl_FADT.sci_interrupt] += PIRQ_PENALTY_PCI_USING;
return 0;
}
* Check to see if we have bus mastering arbitration control. This
* is required for proper C3 usage (to maintain cache coherency).
*/
- if (acpi_fadt.pm2_control_block && acpi_fadt.pm2_control_length) {
+ if (acpi_gbl_FADT.pm2_control_block && acpi_gbl_FADT.pm2_control_length) {
pr->flags.bm_control = 1;
ACPI_DEBUG_PRINT((ACPI_DB_INFO,
"Bus mastering arbitration control present\n"));
object.processor.pblk_length);
else {
pr->throttling.address = object.processor.pblk_address;
- pr->throttling.duty_offset = acpi_fadt.duty_offset;
- pr->throttling.duty_width = acpi_fadt.duty_width;
+ pr->throttling.duty_offset = acpi_gbl_FADT.duty_offset;
+ pr->throttling.duty_width = acpi_gbl_FADT.duty_width;
pr->pblk = object.processor.pblk_address;
{
if (t2 >= t1)
return (t2 - t1);
- else if (!(acpi_fadt.flags & ACPI_FADT_32BIT_TIMER))
+ else if (!(acpi_gbl_FADT.flags & ACPI_FADT_32BIT_TIMER))
return (((0x00FFFFFF - t1) + t2) & 0x00FFFFFF);
else
return ((0xFFFFFFFF - t1) + t2);
/* Dummy wait op - must do something useless after P_LVL2 read
because chipsets cannot guarantee that STPCLK# signal
gets asserted in time to freeze execution properly. */
- unused = inl(acpi_fadt.xpm_timer_block.address);
+ unused = inl(acpi_gbl_FADT.xpm_timer_block.address);
}
}
* detection phase, to work cleanly with logical CPU hotplug.
*/
if ((cx->type != ACPI_STATE_C1) && (num_online_cpus() > 1) &&
- !pr->flags.has_cst && !(acpi_fadt.flags & ACPI_FADT_C2_MP_SUPPORTED))
+ !pr->flags.has_cst && !(acpi_gbl_FADT.flags & ACPI_FADT_C2_MP_SUPPORTED))
cx = &pr->power.states[ACPI_STATE_C1];
#endif
case ACPI_STATE_C2:
/* Get start time (ticks) */
- t1 = inl(acpi_fadt.xpm_timer_block.address);
+ t1 = inl(acpi_gbl_FADT.xpm_timer_block.address);
/* Invoke C2 */
acpi_cstate_enter(cx);
/* Get end time (ticks) */
- t2 = inl(acpi_fadt.xpm_timer_block.address);
+ t2 = inl(acpi_gbl_FADT.xpm_timer_block.address);
#ifdef CONFIG_GENERIC_TIME
/* TSC halts in C2, so notify users */
}
/* Get start time (ticks) */
- t1 = inl(acpi_fadt.xpm_timer_block.address);
+ t1 = inl(acpi_gbl_FADT.xpm_timer_block.address);
/* Invoke C3 */
acpi_cstate_enter(cx);
/* Get end time (ticks) */
- t2 = inl(acpi_fadt.xpm_timer_block.address);
+ t2 = inl(acpi_gbl_FADT.xpm_timer_block.address);
if (pr->flags.bm_check) {
/* Enable bus master arbitration */
atomic_dec(&c3_cpu_count);
#ifdef CONFIG_HOTPLUG_CPU
/* Don't do promotion/demotion */
if ((cx->type == ACPI_STATE_C1) && (num_online_cpus() > 1) &&
- !pr->flags.has_cst && !(acpi_fadt.flags & ACPI_FADT_C2_MP_SUPPORTED)) {
+ !pr->flags.has_cst && !(acpi_gbl_FADT.flags & ACPI_FADT_C2_MP_SUPPORTED)) {
next_state = cx;
goto end;
}
* an SMP system.
*/
if ((num_online_cpus() > 1) &&
- !(acpi_fadt.flags & ACPI_FADT_C2_MP_SUPPORTED))
+ !(acpi_gbl_FADT.flags & ACPI_FADT_C2_MP_SUPPORTED))
return -ENODEV;
#endif
pr->power.states[ACPI_STATE_C3].address = pr->pblk + 5;
/* determine latencies from FADT */
- pr->power.states[ACPI_STATE_C2].latency = acpi_fadt.C2latency;
- pr->power.states[ACPI_STATE_C3].latency = acpi_fadt.C3latency;
+ pr->power.states[ACPI_STATE_C2].latency = acpi_gbl_FADT.C2latency;
+ pr->power.states[ACPI_STATE_C3].latency = acpi_gbl_FADT.C3latency;
ACPI_DEBUG_PRINT((ACPI_DB_INFO,
"lvl2[0x%08x] lvl3[0x%08x]\n",
* WBINVD should be set in fadt, for C3 state to be
* supported on when bm_check is not required.
*/
- if (!(acpi_fadt.flags & ACPI_FADT_WBINVD)) {
+ if (!(acpi_gbl_FADT.flags & ACPI_FADT_WBINVD)) {
ACPI_DEBUG_PRINT((ACPI_DB_INFO,
"Cache invalidation should work properly"
" for C3 to be enabled on SMP systems\n"));
if (!pr)
return -EINVAL;
- if (acpi_fadt.cst_control && !nocst) {
+ if (acpi_gbl_FADT.cst_control && !nocst) {
status =
- acpi_os_write_port(acpi_fadt.smi_command, acpi_fadt.cst_control, 8);
+ acpi_os_write_port(acpi_gbl_FADT.smi_command, acpi_gbl_FADT.cst_control, 8);
if (ACPI_FAILURE(status)) {
ACPI_EXCEPTION((AE_INFO, status,
"Notifying BIOS of _CST ability failed"));
is_done = -EIO;
/* Can't write pstate_control to smi_command if either value is zero */
- if ((!acpi_fadt.smi_command) || (!acpi_fadt.pstate_control)) {
+ if ((!acpi_gbl_FADT.smi_command) || (!acpi_gbl_FADT.pstate_control)) {
ACPI_DEBUG_PRINT((ACPI_DB_INFO, "No SMI port or pstate_control\n"));
module_put(calling_module);
return 0;
ACPI_DEBUG_PRINT((ACPI_DB_INFO,
"Writing pstate_control [0x%x] to smi_command [0x%x]\n",
- acpi_fadt.pstate_control, acpi_fadt.smi_command));
+ acpi_gbl_FADT.pstate_control, acpi_gbl_FADT.smi_command));
- status = acpi_os_write_port(acpi_fadt.smi_command,
- (u32) acpi_fadt.pstate_control, 8);
+ status = acpi_os_write_port(acpi_gbl_FADT.smi_command,
+ (u32) acpi_gbl_FADT.pstate_control, 8);
if (ACPI_FAILURE(status)) {
ACPI_EXCEPTION((AE_INFO, status,
"Failed to write pstate_control [0x%x] to "
- "smi_command [0x%x]", acpi_fadt.pstate_control,
- acpi_fadt.smi_command));
+ "smi_command [0x%x]", acpi_gbl_FADT.pstate_control,
+ acpi_gbl_FADT.smi_command));
module_put(calling_module);
return status;
}
/* Used to clear all duty_value bits */
duty_mask = pr->throttling.state_count - 1;
- duty_mask <<= acpi_fadt.duty_offset;
+ duty_mask <<= acpi_gbl_FADT.duty_offset;
duty_mask = ~duty_mask;
}
return 0;
}
- pr->throttling.state_count = 1 << acpi_fadt.duty_width;
+ pr->throttling.state_count = 1 << acpi_gbl_FADT.duty_width;
/*
* Compute state values. Note that throttling displays a linear power/
/*
* Enumerate all fixed-feature devices.
*/
- if ((acpi_fadt.flags & ACPI_FADT_POWER_BUTTON) == 0) {
+ if ((acpi_gbl_FADT.flags & ACPI_FADT_POWER_BUTTON) == 0) {
result = acpi_add_single_object(&device, acpi_root,
NULL,
ACPI_BUS_TYPE_POWER_BUTTON);
result = acpi_start_single_object(device);
}
- if ((acpi_fadt.flags & ACPI_FADT_SLEEP_BUTTON) == 0) {
+ if ((acpi_gbl_FADT.flags & ACPI_FADT_SLEEP_BUTTON) == 0) {
result = acpi_add_single_object(&device, acpi_root,
NULL,
ACPI_BUS_TYPE_SLEEP_BUTTON);
*/
ACPI_EXTERN struct acpi_internal_rsdt acpi_gbl_root_table_list;
ACPI_EXTERN struct acpi_table_fadt acpi_gbl_FADT;
-#define acpi_fadt acpi_gbl_FADT
extern acpi_native_uint acpi_gbl_permanent_mmap;
/* These addresses are calculated from FADT address values */
struct acpi_generic_address xgpe1_block; /* 64-bit Extended General Purpose Event 1 Reg Blk address */
};
-#define fadt_descriptor acpi_table_fadt
-#define sci_int sci_interrupt
-
/* FADT flags */
#define ACPI_FADT_WBINVD (1) /* 00: The wbinvd instruction works properly */
#define ACPI_SRAT_MEM_HOT_PLUGGABLE (1<<1) /* 01: Memory region is hot pluggable */
#define ACPI_SRAT_MEM_NON_VOLATILE (1<<2) /* 02: Memory region is non-volatile */
-/* Memory types */
-
-enum acpi_address_range_id {
- ACPI_ADDRESS_RANGE_MEMORY = 1,
- ACPI_ADDRESS_RANGE_RESERVED = 2,
- ACPI_ADDRESS_RANGE_ACPI = 3,
- ACPI_ADDRESS_RANGE_NVS = 4,
- ACPI_ADDRESS_RANGE_COUNT = 5
-};
-
/*******************************************************************************
*
* TCPA - Trusted Computing Platform Alliance table