#include "op_x86_model.h"
static struct op_x86_model_spec const *model;
-static struct op_msrs cpu_msrs[NR_CPUS];
-static unsigned long saved_lvtpc[NR_CPUS];
+static DEFINE_PER_CPU(struct op_msrs, cpu_msrs);
+static DEFINE_PER_CPU(unsigned long, saved_lvtpc);
static int nmi_start(void);
static void nmi_stop(void);
switch (val) {
case DIE_NMI:
- if (model->check_ctrs(args->regs, &cpu_msrs[cpu]))
+ if (model->check_ctrs(args->regs, &per_cpu(cpu_msrs, cpu)))
ret = NOTIFY_STOP;
break;
default:
static void nmi_save_registers(void *dummy)
{
int cpu = smp_processor_id();
- struct op_msrs *msrs = &cpu_msrs[cpu];
+ struct op_msrs *msrs = &per_cpu(cpu_msrs, cpu);
nmi_cpu_save_registers(msrs);
}
{
int i;
for_each_possible_cpu(i) {
- kfree(cpu_msrs[i].counters);
- cpu_msrs[i].counters = NULL;
- kfree(cpu_msrs[i].controls);
- cpu_msrs[i].controls = NULL;
+ kfree(per_cpu(cpu_msrs, i).counters);
+ per_cpu(cpu_msrs, i).counters = NULL;
+ kfree(per_cpu(cpu_msrs, i).controls);
+ per_cpu(cpu_msrs, i).controls = NULL;
}
}
int i;
for_each_possible_cpu(i) {
- cpu_msrs[i].counters = kmalloc(counters_size, GFP_KERNEL);
- if (!cpu_msrs[i].counters) {
+ per_cpu(cpu_msrs, i).counters = kmalloc(counters_size,
+ GFP_KERNEL);
+ if (!per_cpu(cpu_msrs, i).counters) {
success = 0;
break;
}
- cpu_msrs[i].controls = kmalloc(controls_size, GFP_KERNEL);
- if (!cpu_msrs[i].controls) {
+ per_cpu(cpu_msrs, i).controls = kmalloc(controls_size,
+ GFP_KERNEL);
+ if (!per_cpu(cpu_msrs, i).controls) {
success = 0;
break;
}
static void nmi_cpu_setup(void *dummy)
{
int cpu = smp_processor_id();
- struct op_msrs *msrs = &cpu_msrs[cpu];
+ struct op_msrs *msrs = &per_cpu(cpu_msrs, cpu);
spin_lock(&oprofilefs_lock);
model->setup_ctrs(msrs);
spin_unlock(&oprofilefs_lock);
- saved_lvtpc[cpu] = apic_read(APIC_LVTPC);
+ per_cpu(saved_lvtpc, cpu) = apic_read(APIC_LVTPC);
apic_write(APIC_LVTPC, APIC_DM_NMI);
}
*/
/* Assume saved/restored counters are the same on all CPUs */
- model->fill_in_addresses(&cpu_msrs[0]);
+ model->fill_in_addresses(&per_cpu(cpu_msrs, 0));
for_each_possible_cpu(cpu) {
if (cpu != 0) {
- memcpy(cpu_msrs[cpu].counters, cpu_msrs[0].counters,
+ memcpy(per_cpu(cpu_msrs, cpu).counters,
+ per_cpu(cpu_msrs, 0).counters,
sizeof(struct op_msr) * model->num_counters);
- memcpy(cpu_msrs[cpu].controls, cpu_msrs[0].controls,
+ memcpy(per_cpu(cpu_msrs, cpu).controls,
+ per_cpu(cpu_msrs, 0).controls,
sizeof(struct op_msr) * model->num_controls);
}
}
- on_each_cpu(nmi_save_registers, NULL, 0, 1);
- on_each_cpu(nmi_cpu_setup, NULL, 0, 1);
+ on_each_cpu(nmi_save_registers, NULL, 1);
+ on_each_cpu(nmi_cpu_setup, NULL, 1);
nmi_enabled = 1;
return 0;
}
{
unsigned int v;
int cpu = smp_processor_id();
- struct op_msrs *msrs = &cpu_msrs[cpu];
+ struct op_msrs *msrs = &__get_cpu_var(cpu_msrs);
/* restoring APIC_LVTPC can trigger an apic error because the delivery
* mode and vector nr combination can be illegal. That's by design: on
*/
v = apic_read(APIC_LVTERR);
apic_write(APIC_LVTERR, v | APIC_LVT_MASKED);
- apic_write(APIC_LVTPC, saved_lvtpc[cpu]);
+ apic_write(APIC_LVTPC, per_cpu(saved_lvtpc, cpu));
apic_write(APIC_LVTERR, v);
nmi_restore_registers(msrs);
}
static void nmi_shutdown(void)
{
+ struct op_msrs *msrs = &get_cpu_var(cpu_msrs);
nmi_enabled = 0;
- on_each_cpu(nmi_cpu_shutdown, NULL, 0, 1);
+ on_each_cpu(nmi_cpu_shutdown, NULL, 1);
unregister_die_notifier(&profile_exceptions_nb);
- model->shutdown(cpu_msrs);
+ model->shutdown(msrs);
free_msrs();
+ put_cpu_var(cpu_msrs);
}
static void nmi_cpu_start(void *dummy)
{
- struct op_msrs const *msrs = &cpu_msrs[smp_processor_id()];
+ struct op_msrs const *msrs = &__get_cpu_var(cpu_msrs);
model->start(msrs);
}
static int nmi_start(void)
{
- on_each_cpu(nmi_cpu_start, NULL, 0, 1);
+ on_each_cpu(nmi_cpu_start, NULL, 1);
return 0;
}
static void nmi_cpu_stop(void *dummy)
{
- struct op_msrs const *msrs = &cpu_msrs[smp_processor_id()];
+ struct op_msrs const *msrs = &__get_cpu_var(cpu_msrs);
model->stop(msrs);
}
static void nmi_stop(void)
{
- on_each_cpu(nmi_cpu_stop, NULL, 0, 1);
+ on_each_cpu(nmi_cpu_stop, NULL, 1);
}
struct op_counter_config counter_config[OP_MAX_COUNTER];
{
__u8 cpu_model = boot_cpu_data.x86_model;
- if (cpu_model == 14)
+ switch (cpu_model) {
+ case 0 ... 2:
+ *cpu_type = "i386/ppro";
+ break;
+ case 3 ... 5:
+ *cpu_type = "i386/pii";
+ break;
+ case 6 ... 8:
+ *cpu_type = "i386/piii";
+ break;
+ case 9:
+ *cpu_type = "i386/p6_mobile";
+ break;
+ case 10 ... 13:
+ *cpu_type = "i386/p6";
+ break;
+ case 14:
*cpu_type = "i386/core";
- else if (cpu_model == 15 || cpu_model == 23)
+ break;
+ case 15: case 23:
+ *cpu_type = "i386/core_2";
+ break;
+ case 26:
*cpu_type = "i386/core_2";
- else if (cpu_model > 0xd)
+ break;
+ default:
+ /* Unknown */
return 0;
- else if (cpu_model == 9) {
- *cpu_type = "i386/p6_mobile";
- } else if (cpu_model > 5) {
- *cpu_type = "i386/piii";
- } else if (cpu_model > 2) {
- *cpu_type = "i386/pii";
- } else {
- *cpu_type = "i386/ppro";
}
model = &op_ppro_spec;