DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
+int direct_gbpages __meminitdata
+#ifdef CONFIG_DIRECT_GBPAGES
+ = 1
+#endif
+;
+
+static int __init parse_direct_gbpages_off(char *arg)
+{
+ direct_gbpages = 0;
+ return 0;
+}
+early_param("nogbpages", parse_direct_gbpages_off);
+
+static int __init parse_direct_gbpages_on(char *arg)
+{
+ direct_gbpages = 1;
+ return 0;
+}
+early_param("gbpages", parse_direct_gbpages_on);
+
/*
* NOTE: pagetable_init alloc all the fixmap pagetables contiguous on the
* physical space so we can cache the place of the first one and move
printk(KERN_INFO "Mem-info:\n");
show_free_areas();
- printk(KERN_INFO "Free swap: %6ldkB\n",
- nr_swap_pages << (PAGE_SHIFT-10));
-
for_each_online_pgdat(pgdat) {
for (i = 0; i < pgdat->node_spanned_pages; ++i) {
/*
}
if (pud_val(*pud)) {
- phys_pmd_update(pud, addr, end);
+ if (!pud_large(*pud))
+ phys_pmd_update(pud, addr, end);
+ continue;
+ }
+
+ if (direct_gbpages) {
+ set_pte((pte_t *)pud,
+ pfn_pte(addr >> PAGE_SHIFT, PAGE_KERNEL_LARGE));
continue;
}
unsigned long puds, pmds, tables, start;
puds = (end + PUD_SIZE - 1) >> PUD_SHIFT;
- pmds = (end + PMD_SIZE - 1) >> PMD_SHIFT;
- tables = round_up(puds * sizeof(pud_t), PAGE_SIZE) +
- round_up(pmds * sizeof(pmd_t), PAGE_SIZE);
+ tables = round_up(puds * sizeof(pud_t), PAGE_SIZE);
+ if (!direct_gbpages) {
+ pmds = (end + PMD_SIZE - 1) >> PMD_SHIFT;
+ tables += round_up(pmds * sizeof(pmd_t), PAGE_SIZE);
+ }
/*
* RED-PEN putting page tables only on node 0 could
(table_start << PAGE_SHIFT) + tables);
}
+static void __init init_gbpages(void)
+{
+ if (direct_gbpages && cpu_has_gbpages)
+ printk(KERN_INFO "Using GB pages for direct mapping\n");
+ else
+ direct_gbpages = 0;
+}
+
+#ifdef CONFIG_MEMTEST_BOOTPARAM
+
+static void __init memtest(unsigned long start_phys, unsigned long size,
+ unsigned pattern)
+{
+ unsigned long i;
+ unsigned long *start;
+ unsigned long start_bad;
+ unsigned long last_bad;
+ unsigned long val;
+ unsigned long start_phys_aligned;
+ unsigned long count;
+ unsigned long incr;
+
+ switch (pattern) {
+ case 0:
+ val = 0UL;
+ break;
+ case 1:
+ val = -1UL;
+ break;
+ case 2:
+ val = 0x5555555555555555UL;
+ break;
+ case 3:
+ val = 0xaaaaaaaaaaaaaaaaUL;
+ break;
+ default:
+ return;
+ }
+
+ incr = sizeof(unsigned long);
+ start_phys_aligned = ALIGN(start_phys, incr);
+ count = (size - (start_phys_aligned - start_phys))/incr;
+ start = __va(start_phys_aligned);
+ start_bad = 0;
+ last_bad = 0;
+
+ for (i = 0; i < count; i++)
+ start[i] = val;
+ for (i = 0; i < count; i++, start++, start_phys_aligned += incr) {
+ if (*start != val) {
+ if (start_phys_aligned == last_bad + incr) {
+ last_bad += incr;
+ } else {
+ if (start_bad) {
+ printk(KERN_CONT "\n %016lx bad mem addr %016lx - %016lx reserved",
+ val, start_bad, last_bad + incr);
+ reserve_early(start_bad, last_bad - start_bad, "BAD RAM");
+ }
+ start_bad = last_bad = start_phys_aligned;
+ }
+ }
+ }
+ if (start_bad) {
+ printk(KERN_CONT "\n %016lx bad mem addr %016lx - %016lx reserved",
+ val, start_bad, last_bad + incr);
+ reserve_early(start_bad, last_bad - start_bad, "BAD RAM");
+ }
+
+}
+
+static int memtest_pattern __initdata = CONFIG_MEMTEST_BOOTPARAM_VALUE;
+
+static int __init parse_memtest(char *arg)
+{
+ if (arg)
+ memtest_pattern = simple_strtoul(arg, NULL, 0);
+ return 0;
+}
+
+early_param("memtest", parse_memtest);
+
+static void __init early_memtest(unsigned long start, unsigned long end)
+{
+ unsigned long t_start, t_size;
+ unsigned pattern;
+
+ if (!memtest_pattern)
+ return;
+
+ printk(KERN_INFO "early_memtest: pattern num %d", memtest_pattern);
+ for (pattern = 0; pattern < memtest_pattern; pattern++) {
+ t_start = start;
+ t_size = 0;
+ while (t_start < end) {
+ t_start = find_e820_area_size(t_start, &t_size, 1);
+
+ /* done ? */
+ if (t_start >= end)
+ break;
+ if (t_start + t_size > end)
+ t_size = end - t_start;
+
+ printk(KERN_CONT "\n %016lx - %016lx pattern %d",
+ t_start, t_start + t_size, pattern);
+
+ memtest(t_start, t_size, pattern);
+
+ t_start += t_size;
+ }
+ }
+ printk(KERN_CONT "\n");
+}
+#else
+static void __init early_memtest(unsigned long start, unsigned long end)
+{
+}
+#endif
+
/*
* Setup the direct mapping of the physical memory at PAGE_OFFSET.
* This runs before bootmem is initialized and gets pages directly from
void __init_refok init_memory_mapping(unsigned long start, unsigned long end)
{
unsigned long next;
+ unsigned long start_phys = start, end_phys = end;
- pr_debug("init_memory_mapping\n");
+ printk(KERN_INFO "init_memory_mapping\n");
/*
* Find space for the kernel direct mapping tables.
* memory mapped. Unfortunately this is done currently before the
* nodes are discovered.
*/
- if (!after_bootmem)
+ if (!after_bootmem) {
+ init_gbpages();
find_early_table_space(end);
+ }
start = (unsigned long)__va(start);
end = (unsigned long)__va(end);
if (!after_bootmem)
reserve_early(table_start << PAGE_SHIFT,
table_end << PAGE_SHIFT, "PGTABLE");
+
+ if (!after_bootmem)
+ early_memtest(start_phys, end_phys);
}
#ifndef CONFIG_NUMA
void mark_rodata_ro(void)
{
- unsigned long start = (unsigned long)_stext, end;
-
-#ifdef CONFIG_HOTPLUG_CPU
- /* It must still be possible to apply SMP alternatives. */
- if (num_possible_cpus() > 1)
- start = (unsigned long)_etext;
-#endif
-
-#ifdef CONFIG_KPROBES
- start = (unsigned long)__start_rodata;
-#endif
-
- end = (unsigned long)__end_rodata;
- start = (start + PAGE_SIZE - 1) & PAGE_MASK;
- end &= PAGE_MASK;
- if (end <= start)
- return;
-
+ unsigned long start = PFN_ALIGN(_stext), end = PFN_ALIGN(__end_rodata);
printk(KERN_INFO "Write protecting the kernel read-only data: %luk\n",
(end - start) >> 10);
set_memory_ro(start, (end-start) >> PAGE_SHIFT);
#endif
}
+
#endif
#ifdef CONFIG_BLK_DEV_INITRD