]> err.no Git - linux-2.6/blobdiff - arch/x86/mm/mmio-mod.c
Merge branch 'x86-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git...
[linux-2.6] / arch / x86 / mm / mmio-mod.c
index 8256546d49bf6826d33a61bda8fd88bc135e1074..635b50e85581de29bc4a9d32ffbd82da6612838c 100644 (file)
@@ -24,7 +24,7 @@
 #include <linux/module.h>
 #include <linux/debugfs.h>
 #include <linux/uaccess.h>
-#include <asm/io.h>
+#include <linux/io.h>
 #include <linux/version.h>
 #include <linux/kallsyms.h>
 #include <asm/pgtable.h>
@@ -32,6 +32,7 @@
 #include <asm/e820.h> /* for ISA_START_ADDRESS */
 #include <asm/atomic.h>
 #include <linux/percpu.h>
+#include <linux/cpu.h>
 
 #include "pf_in.h"
 
@@ -47,7 +48,7 @@ struct trap_reason {
 struct remap_trace {
        struct list_head list;
        struct kmmio_probe probe;
-       unsigned long phys;
+       resource_size_t phys;
        unsigned long id;
 };
 
@@ -81,17 +82,14 @@ static LIST_HEAD(trace_list);               /* struct remap_trace */
 /* module parameters */
 static unsigned long   filter_offset;
 static int             nommiotrace;
-static int             ISA_trace;
 static int             trace_pc;
 
 module_param(filter_offset, ulong, 0);
 module_param(nommiotrace, bool, 0);
-module_param(ISA_trace, bool, 0);
 module_param(trace_pc, bool, 0);
 
 MODULE_PARM_DESC(filter_offset, "Start address of traced mappings.");
 MODULE_PARM_DESC(nommiotrace, "Disable actual MMIO tracing.");
-MODULE_PARM_DESC(ISA_trace, "Do not exclude the low ISA range.");
 MODULE_PARM_DESC(trace_pc, "Record address of faulting instructions.");
 
 static bool is_enabled(void)
@@ -139,7 +137,7 @@ static ssize_t write_marker(struct file *file, const char __user *buffer,
 
 static void print_pte(unsigned long address)
 {
-       int level;
+       unsigned int level;
        pte_t *pte = lookup_address(address, &level);
 
        if (!pte) {
@@ -153,8 +151,9 @@ static void print_pte(unsigned long address)
                                                        "0x%08lx\n", address);
                BUG();
        }
-       pr_info(NAME "pte for 0x%lx: 0x%lx 0x%lx\n", address, pte_val(*pte),
-                                               pte_val(*pte) & _PAGE_PRESENT);
+       pr_info(NAME "pte for 0x%lx: 0x%llx 0x%llx\n", address,
+               (unsigned long long)pte_val(*pte),
+               (unsigned long long)pte_val(*pte) & _PAGE_PRESENT);
 }
 
 /*
@@ -192,7 +191,7 @@ static void pre(struct kmmio_probe *p, struct pt_regs *regs,
        struct mmiotrace_rw *my_trace = &get_cpu_var(cpu_trace);
        const unsigned long instptr = instruction_pointer(regs);
        const enum reason_type type = get_ins_type(instptr);
-       struct remap_trace *trace = p->user_data;
+       struct remap_trace *trace = p->private;
 
        /* it doesn't make sense to have more than one active trace per cpu */
        if (my_reason->active_traces)
@@ -276,11 +275,12 @@ static void post(struct kmmio_probe *p, unsigned long condition,
        put_cpu_var(pf_reason);
 }
 
-static void ioremap_trace_core(unsigned long offset, unsigned long size,
+static void ioremap_trace_core(resource_size_t offset, unsigned long size,
                                                        void __iomem *addr)
 {
        static atomic_t next_id;
        struct remap_trace *trace = kmalloc(sizeof(*trace), GFP_KERNEL);
+       /* These are page-unaligned. */
        struct mmiotrace_map map = {
                .phys = offset,
                .virt = (unsigned long)addr,
@@ -299,7 +299,7 @@ static void ioremap_trace_core(unsigned long offset, unsigned long size,
                        .len = size,
                        .pre_handler = pre,
                        .post_handler = post,
-                       .user_data = trace
+                       .private = trace
                },
                .phys = offset,
                .id = atomic_inc_return(&next_id)
@@ -319,13 +319,14 @@ not_enabled:
        spin_unlock_irq(&trace_lock);
 }
 
-void
-mmiotrace_ioremap(unsigned long offset, unsigned long size, void __iomem *addr)
+void mmiotrace_ioremap(resource_size_t offset, unsigned long size,
+                                               void __iomem *addr)
 {
        if (!is_enabled()) /* recheck and proper locking in *_core() */
                return;
 
-       pr_debug(NAME "ioremap_*(0x%lx, 0x%lx) = %p\n", offset, size, addr);
+       pr_debug(NAME "ioremap_*(0x%llx, 0x%lx) = %p\n",
+                               (unsigned long long)offset, size, addr);
        if ((filter_offset) && (offset != filter_offset))
                return;
        ioremap_trace_core(offset, size, addr);
@@ -402,6 +403,65 @@ static void clear_trace_list(void)
        }
 }
 
+#ifdef CONFIG_HOTPLUG_CPU
+static cpumask_t downed_cpus;
+
+static void enter_uniprocessor(void)
+{
+       int cpu;
+       int err;
+
+       get_online_cpus();
+       downed_cpus = cpu_online_map;
+       cpu_clear(first_cpu(cpu_online_map), downed_cpus);
+       if (num_online_cpus() > 1)
+               pr_notice(NAME "Disabling non-boot CPUs...\n");
+       put_online_cpus();
+
+       for_each_cpu_mask(cpu, downed_cpus) {
+               err = cpu_down(cpu);
+               if (!err)
+                       pr_info(NAME "CPU%d is down.\n", cpu);
+               else
+                       pr_err(NAME "Error taking CPU%d down: %d\n", cpu, err);
+       }
+       if (num_online_cpus() > 1)
+               pr_warning(NAME "multiple CPUs still online, "
+                                               "may miss events.\n");
+}
+
+/* __ref because leave_uniprocessor calls cpu_up which is __cpuinit,
+   but this whole function is ifdefed CONFIG_HOTPLUG_CPU */
+static void __ref leave_uniprocessor(void)
+{
+       int cpu;
+       int err;
+
+       if (cpus_weight(downed_cpus) == 0)
+               return;
+       pr_notice(NAME "Re-enabling CPUs...\n");
+       for_each_cpu_mask(cpu, downed_cpus) {
+               err = cpu_up(cpu);
+               if (!err)
+                       pr_info(NAME "enabled CPU%d.\n", cpu);
+               else
+                       pr_err(NAME "cannot re-enable CPU%d: %d\n", cpu, err);
+       }
+}
+
+#else /* !CONFIG_HOTPLUG_CPU */
+static void enter_uniprocessor(void)
+{
+       if (num_online_cpus() > 1)
+               pr_warning(NAME "multiple CPUs are online, may miss events. "
+                       "Suggest booting with maxcpus=1 kernel argument.\n");
+}
+
+static void leave_uniprocessor(void)
+{
+}
+#endif
+
 #if 0 /* XXX: out of order */
 static struct file_operations fops_marker = {
        .owner =        THIS_MODULE,
@@ -424,8 +484,7 @@ void enable_mmiotrace(void)
 
        if (nommiotrace)
                pr_info(NAME "MMIO tracing disabled.\n");
-       if (ISA_trace)
-               pr_warning(NAME "Warning! low ISA range will be traced.\n");
+       enter_uniprocessor();
        spin_lock_irq(&trace_lock);
        atomic_inc(&mmiotrace_enabled);
        spin_unlock_irq(&trace_lock);
@@ -446,6 +505,7 @@ void disable_mmiotrace(void)
        spin_unlock_irq(&trace_lock);
 
        clear_trace_list(); /* guarantees: no more kmmio callbacks */
+       leave_uniprocessor();
        if (marker_file) {
                debugfs_remove(marker_file);
                marker_file = NULL;