]> err.no Git - linux-2.6/commitdiff
[SPARC64]: Proper multi-core scheduling support.
authorDavid S. Miller <davem@hutch.davemloft.net>
Tue, 5 Jun 2007 00:01:39 +0000 (17:01 -0700)
committerDavid S. Miller <davem@sunset.davemloft.net>
Tue, 5 Jun 2007 04:50:00 +0000 (21:50 -0700)
The scheduling domain hierarchy is:

   all cpus -->
      cpus that share an instruction cache -->
          cpus that share an integer execution unit

Signed-off-by: David S. Miller <davem@davemloft.net>
arch/sparc64/Kconfig
arch/sparc64/kernel/mdesc.c
arch/sparc64/kernel/prom.c
arch/sparc64/kernel/smp.c
include/asm-sparc64/cpudata.h
include/asm-sparc64/smp.h
include/asm-sparc64/topology.h

index bd00f89eed1ed3c4dcaa7a5cb39cd6ef1a132505..89a1b469b93df149b21c1c65c5d1723d36e97e09 100644 (file)
@@ -396,6 +396,15 @@ config SCHED_SMT
          when dealing with UltraSPARC cpus at a cost of slightly increased
          overhead in some places. If unsure say N here.
 
+config SCHED_MC
+       bool "Multi-core scheduler support"
+       depends on SMP
+       default y
+       help
+         Multi-core scheduler support improves the CPU scheduler's decision
+         making when dealing with multi-core CPU chips at a cost of slightly
+         increased overhead in some places. If unsure say N here.
+
 source "kernel/Kconfig.preempt"
 
 config CMDLINE_BOOL
index 9246c2cf95747685fba8106d7cb83dcd99d6a60b..1b5db4bc6b34ae05497a18b7649e934635d304ca 100644 (file)
@@ -473,6 +473,53 @@ static void __init set_core_ids(void)
        }
 }
 
+static void __init mark_proc_ids(struct mdesc_node *mp, int proc_id)
+{
+       int i;
+
+       for (i = 0; i < mp->num_arcs; i++) {
+               struct mdesc_node *t = mp->arcs[i].arc;
+               const u64 *id;
+
+               if (strcmp(mp->arcs[i].name, "back"))
+                       continue;
+
+               if (strcmp(t->name, "cpu"))
+                       continue;
+
+               id = md_get_property(t, "id", NULL);
+               if (*id < NR_CPUS)
+                       cpu_data(*id).proc_id = proc_id;
+       }
+}
+
+static void __init __set_proc_ids(const char *exec_unit_name)
+{
+       struct mdesc_node *mp;
+       int idx;
+
+       idx = 0;
+       md_for_each_node_by_name(mp, exec_unit_name) {
+               const char *type;
+               int len;
+
+               type = md_get_property(mp, "type", &len);
+               if (!find_in_proplist(type, "int", len) &&
+                   !find_in_proplist(type, "integer", len))
+                       continue;
+
+               mark_proc_ids(mp, idx);
+
+               idx++;
+       }
+}
+
+static void __init set_proc_ids(void)
+{
+       __set_proc_ids("exec_unit");
+       __set_proc_ids("exec-unit");
+}
+
 static void __init get_one_mondo_bits(const u64 *p, unsigned int *mask, unsigned char def)
 {
        u64 val;
@@ -574,9 +621,11 @@ static void __init mdesc_fill_in_cpu_data(void)
 #endif
 
                c->core_id = 0;
+               c->proc_id = -1;
        }
 
        set_core_ids();
+       set_proc_ids();
 
        smp_fill_in_sib_core_maps();
 }
index dad4b3ba705f5aaab2e354692db7eb229a490a4c..928aba3d0db371dacd1b762a1aee372dc7d69d0c 100644 (file)
@@ -1800,6 +1800,7 @@ static void __init of_fill_in_cpu_data(void)
 
                        cpu_data(cpuid).core_id = 0;
                }
+               cpu_data(cpuid).proc_id = -1;
 
 #ifdef CONFIG_SMP
                cpu_set(cpuid, cpu_present_map);
index c550bba3490a528fb240f3dc4f5b565a0f59fdc4..68a45ac933757c04ed74db9f02f89ff3de9d63ef 100644 (file)
@@ -51,6 +51,8 @@ cpumask_t cpu_online_map __read_mostly = CPU_MASK_NONE;
 cpumask_t phys_cpu_present_map __read_mostly = CPU_MASK_NONE;
 cpumask_t cpu_sibling_map[NR_CPUS] __read_mostly =
        { [0 ... NR_CPUS-1] = CPU_MASK_NONE };
+cpumask_t cpu_core_map[NR_CPUS] __read_mostly =
+       { [0 ... NR_CPUS-1] = CPU_MASK_NONE };
 static cpumask_t smp_commenced_mask;
 static cpumask_t cpu_callout_map;
 
@@ -1217,13 +1219,28 @@ void __devinit smp_fill_in_sib_core_maps(void)
                unsigned int j;
 
                if (cpu_data(i).core_id == 0) {
-                       cpu_set(i, cpu_sibling_map[i]);
+                       cpu_set(i, cpu_core_map[i]);
                        continue;
                }
 
                for_each_possible_cpu(j) {
                        if (cpu_data(i).core_id ==
                            cpu_data(j).core_id)
+                               cpu_set(j, cpu_core_map[i]);
+               }
+       }
+
+       for_each_possible_cpu(i) {
+               unsigned int j;
+
+               if (cpu_data(i).proc_id == -1) {
+                       cpu_set(i, cpu_sibling_map[i]);
+                       continue;
+               }
+
+               for_each_possible_cpu(j) {
+                       if (cpu_data(i).proc_id ==
+                           cpu_data(j).proc_id)
                                cpu_set(j, cpu_sibling_map[i]);
                }
        }
index 03c385de7619e517373fd9216670761354938473..445026fbec35e4619ca3b5a1b005a88f9841d5d6 100644 (file)
@@ -31,7 +31,7 @@ typedef struct {
        unsigned int    ecache_size;
        unsigned int    ecache_line_size;
        int             core_id;
-       unsigned int    __pad3;
+       int             proc_id;
 } cpuinfo_sparc;
 
 DECLARE_PER_CPU(cpuinfo_sparc, __cpu_data);
index f76e1492add5f34aebfb331a86de57aef81083ef..540021268684778a380b73f2f981d6414cf5ef43 100644 (file)
@@ -33,6 +33,7 @@ extern cpumask_t phys_cpu_present_map;
 #define cpu_possible_map phys_cpu_present_map
 
 extern cpumask_t cpu_sibling_map[NR_CPUS];
+extern cpumask_t cpu_core_map[NR_CPUS];
 
 /*
  *     General functions that each host system must provide.
index e0d450d600ec2a7890cc7bca91572f8e56fd472a..4880f7ca0b608d300a05d4c9c299aa70cb0a4280 100644 (file)
@@ -1,12 +1,19 @@
 #ifndef _ASM_SPARC64_TOPOLOGY_H
 #define _ASM_SPARC64_TOPOLOGY_H
 
+#ifdef CONFIG_SMP
 #include <asm/spitfire.h>
-#define smt_capable()  (tlb_type == hypervisor)
-
-#include <asm-generic/topology.h>
 
+#define topology_physical_package_id(cpu)      (cpu_data(cpu).proc_id)
 #define topology_core_id(cpu)                  (cpu_data(cpu).core_id)
+#define topology_core_siblings(cpu)            (cpu_core_map[cpu])
 #define topology_thread_siblings(cpu)          (cpu_sibling_map[cpu])
+#define mc_capable()                           (tlb_type == hypervisor)
+#define smt_capable()                          (tlb_type == hypervisor)
+#endif /* CONFIG_SMP */
+
+#include <asm-generic/topology.h>
+
+#define cpu_coregroup_map(cpu)                 (cpu_core_map[cpu])
 
 #endif /* _ASM_SPARC64_TOPOLOGY_H */