]> err.no Git - linux-2.6/blobdiff - arch/x86/mm/mmio-mod.c
x86 mmiotrace: use resource_size_t for phys addresses
[linux-2.6] / arch / x86 / mm / mmio-mod.c
index 6d6cac84c0453a75f6ea7852fc0cfcdb2d1dd948..3b04a01261216489993283f54a8aa308d1c14ada 100644 (file)
@@ -24,7 +24,7 @@
 #include <linux/module.h>
 #include <linux/debugfs.h>
 #include <linux/uaccess.h>
-#include <asm/io.h>
+#include <linux/io.h>
 #include <linux/version.h>
 #include <linux/kallsyms.h>
 #include <asm/pgtable.h>
@@ -32,6 +32,7 @@
 #include <asm/e820.h> /* for ISA_START_ADDRESS */
 #include <asm/atomic.h>
 #include <linux/percpu.h>
+#include <linux/cpu.h>
 
 #include "pf_in.h"
 
@@ -47,7 +48,7 @@ struct trap_reason {
 struct remap_trace {
        struct list_head list;
        struct kmmio_probe probe;
-       unsigned long phys;
+       resource_size_t phys;
        unsigned long id;
 };
 
@@ -274,11 +275,12 @@ static void post(struct kmmio_probe *p, unsigned long condition,
        put_cpu_var(pf_reason);
 }
 
-static void ioremap_trace_core(unsigned long offset, unsigned long size,
+static void ioremap_trace_core(resource_size_t offset, unsigned long size,
                                                        void __iomem *addr)
 {
        static atomic_t next_id;
        struct remap_trace *trace = kmalloc(sizeof(*trace), GFP_KERNEL);
+       /* These are page-unaligned. */
        struct mmiotrace_map map = {
                .phys = offset,
                .virt = (unsigned long)addr,
@@ -317,13 +319,14 @@ not_enabled:
        spin_unlock_irq(&trace_lock);
 }
 
-void
-mmiotrace_ioremap(unsigned long offset, unsigned long size, void __iomem *addr)
+void mmiotrace_ioremap(resource_size_t offset, unsigned long size,
+                                               void __iomem *addr)
 {
        if (!is_enabled()) /* recheck and proper locking in *_core() */
                return;
 
-       pr_debug(NAME "ioremap_*(0x%lx, 0x%lx) = %p\n", offset, size, addr);
+       pr_debug(NAME "ioremap_*(0x%llx, 0x%lx) = %p\n",
+                               (unsigned long long)offset, size, addr);
        if ((filter_offset) && (offset != filter_offset))
                return;
        ioremap_trace_core(offset, size, addr);
@@ -400,6 +403,63 @@ static void clear_trace_list(void)
        }
 }
 
+#ifdef CONFIG_HOTPLUG_CPU
+static cpumask_t downed_cpus;
+
+static void enter_uniprocessor(void)
+{
+       int cpu;
+       int err;
+
+       get_online_cpus();
+       downed_cpus = cpu_online_map;
+       cpu_clear(first_cpu(cpu_online_map), downed_cpus);
+       if (num_online_cpus() > 1)
+               pr_notice(NAME "Disabling non-boot CPUs...\n");
+       put_online_cpus();
+
+       for_each_cpu_mask(cpu, downed_cpus) {
+               err = cpu_down(cpu);
+               if (!err)
+                       pr_info(NAME "CPU%d is down.\n", cpu);
+               else
+                       pr_err(NAME "Error taking CPU%d down: %d\n", cpu, err);
+       }
+       if (num_online_cpus() > 1)
+               pr_warning(NAME "multiple CPUs still online, "
+                                               "may miss events.\n");
+}
+
+static void leave_uniprocessor(void)
+{
+       int cpu;
+       int err;
+
+       if (cpus_weight(downed_cpus) == 0)
+               return;
+       pr_notice(NAME "Re-enabling CPUs...\n");
+       for_each_cpu_mask(cpu, downed_cpus) {
+               err = cpu_up(cpu);
+               if (!err)
+                       pr_info(NAME "enabled CPU%d.\n", cpu);
+               else
+                       pr_err(NAME "cannot re-enable CPU%d: %d\n", cpu, err);
+       }
+}
+
+#else /* !CONFIG_HOTPLUG_CPU */
+static void enter_uniprocessor(void)
+{
+       if (num_online_cpus() > 1)
+               pr_warning(NAME "multiple CPUs are online, may miss events. "
+                       "Suggest booting with maxcpus=1 kernel argument.\n");
+}
+
+static void leave_uniprocessor(void)
+{
+}
+#endif
+
 #if 0 /* XXX: out of order */
 static struct file_operations fops_marker = {
        .owner =        THIS_MODULE,
@@ -422,6 +482,7 @@ void enable_mmiotrace(void)
 
        if (nommiotrace)
                pr_info(NAME "MMIO tracing disabled.\n");
+       enter_uniprocessor();
        spin_lock_irq(&trace_lock);
        atomic_inc(&mmiotrace_enabled);
        spin_unlock_irq(&trace_lock);
@@ -442,6 +503,7 @@ void disable_mmiotrace(void)
        spin_unlock_irq(&trace_lock);
 
        clear_trace_list(); /* guarantees: no more kmmio callbacks */
+       leave_uniprocessor();
        if (marker_file) {
                debugfs_remove(marker_file);
                marker_file = NULL;