]> err.no Git - linux-2.6/blob - arch/arm/mm/mm-armv.c
[netdrvr starfire] Add GPL'd firmware, remove compat code
[linux-2.6] / arch / arm / mm / mm-armv.c
1 /*
2  *  linux/arch/arm/mm/mm-armv.c
3  *
4  *  Copyright (C) 1998-2002 Russell King
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 as
8  * published by the Free Software Foundation.
9  *
10  *  Page table sludge for ARM v3 and v4 processor architectures.
11  */
12 #include <linux/config.h>
13 #include <linux/module.h>
14 #include <linux/mm.h>
15 #include <linux/init.h>
16 #include <linux/bootmem.h>
17 #include <linux/highmem.h>
18 #include <linux/nodemask.h>
19
20 #include <asm/pgalloc.h>
21 #include <asm/page.h>
22 #include <asm/io.h>
23 #include <asm/setup.h>
24 #include <asm/tlbflush.h>
25
26 #include <asm/mach/map.h>
27
28 #define CPOLICY_UNCACHED        0
29 #define CPOLICY_BUFFERED        1
30 #define CPOLICY_WRITETHROUGH    2
31 #define CPOLICY_WRITEBACK       3
32 #define CPOLICY_WRITEALLOC      4
33
34 static unsigned int cachepolicy __initdata = CPOLICY_WRITEBACK;
35 static unsigned int ecc_mask __initdata = 0;
36 pgprot_t pgprot_kernel;
37
38 EXPORT_SYMBOL(pgprot_kernel);
39
40 struct cachepolicy {
41         const char      policy[16];
42         unsigned int    cr_mask;
43         unsigned int    pmd;
44         unsigned int    pte;
45 };
46
47 static struct cachepolicy cache_policies[] __initdata = {
48         {
49                 .policy         = "uncached",
50                 .cr_mask        = CR_W|CR_C,
51                 .pmd            = PMD_SECT_UNCACHED,
52                 .pte            = 0,
53         }, {
54                 .policy         = "buffered",
55                 .cr_mask        = CR_C,
56                 .pmd            = PMD_SECT_BUFFERED,
57                 .pte            = PTE_BUFFERABLE,
58         }, {
59                 .policy         = "writethrough",
60                 .cr_mask        = 0,
61                 .pmd            = PMD_SECT_WT,
62                 .pte            = PTE_CACHEABLE,
63         }, {
64                 .policy         = "writeback",
65                 .cr_mask        = 0,
66                 .pmd            = PMD_SECT_WB,
67                 .pte            = PTE_BUFFERABLE|PTE_CACHEABLE,
68         }, {
69                 .policy         = "writealloc",
70                 .cr_mask        = 0,
71                 .pmd            = PMD_SECT_WBWA,
72                 .pte            = PTE_BUFFERABLE|PTE_CACHEABLE,
73         }
74 };
75
76 /*
77  * These are useful for identifing cache coherency
78  * problems by allowing the cache or the cache and
79  * writebuffer to be turned off.  (Note: the write
80  * buffer should not be on and the cache off).
81  */
82 static void __init early_cachepolicy(char **p)
83 {
84         int i;
85
86         for (i = 0; i < ARRAY_SIZE(cache_policies); i++) {
87                 int len = strlen(cache_policies[i].policy);
88
89                 if (memcmp(*p, cache_policies[i].policy, len) == 0) {
90                         cachepolicy = i;
91                         cr_alignment &= ~cache_policies[i].cr_mask;
92                         cr_no_alignment &= ~cache_policies[i].cr_mask;
93                         *p += len;
94                         break;
95                 }
96         }
97         if (i == ARRAY_SIZE(cache_policies))
98                 printk(KERN_ERR "ERROR: unknown or unsupported cache policy\n");
99         flush_cache_all();
100         set_cr(cr_alignment);
101 }
102
103 static void __init early_nocache(char **__unused)
104 {
105         char *p = "buffered";
106         printk(KERN_WARNING "nocache is deprecated; use cachepolicy=%s\n", p);
107         early_cachepolicy(&p);
108 }
109
110 static void __init early_nowrite(char **__unused)
111 {
112         char *p = "uncached";
113         printk(KERN_WARNING "nowb is deprecated; use cachepolicy=%s\n", p);
114         early_cachepolicy(&p);
115 }
116
117 static void __init early_ecc(char **p)
118 {
119         if (memcmp(*p, "on", 2) == 0) {
120                 ecc_mask = PMD_PROTECTION;
121                 *p += 2;
122         } else if (memcmp(*p, "off", 3) == 0) {
123                 ecc_mask = 0;
124                 *p += 3;
125         }
126 }
127
128 __early_param("nocache", early_nocache);
129 __early_param("nowb", early_nowrite);
130 __early_param("cachepolicy=", early_cachepolicy);
131 __early_param("ecc=", early_ecc);
132
133 static int __init noalign_setup(char *__unused)
134 {
135         cr_alignment &= ~CR_A;
136         cr_no_alignment &= ~CR_A;
137         set_cr(cr_alignment);
138         return 1;
139 }
140
141 __setup("noalign", noalign_setup);
142
143 #define FIRST_KERNEL_PGD_NR     (FIRST_USER_PGD_NR + USER_PTRS_PER_PGD)
144
145 /*
146  * need to get a 16k page for level 1
147  */
148 pgd_t *get_pgd_slow(struct mm_struct *mm)
149 {
150         pgd_t *new_pgd, *init_pgd;
151         pmd_t *new_pmd, *init_pmd;
152         pte_t *new_pte, *init_pte;
153
154         new_pgd = (pgd_t *)__get_free_pages(GFP_KERNEL, 2);
155         if (!new_pgd)
156                 goto no_pgd;
157
158         memzero(new_pgd, FIRST_KERNEL_PGD_NR * sizeof(pgd_t));
159
160         init_pgd = pgd_offset_k(0);
161
162         if (!vectors_high()) {
163                 /*
164                  * This lock is here just to satisfy pmd_alloc and pte_lock
165                  */
166                 spin_lock(&mm->page_table_lock);
167
168                 /*
169                  * On ARM, first page must always be allocated since it
170                  * contains the machine vectors.
171                  */
172                 new_pmd = pmd_alloc(mm, new_pgd, 0);
173                 if (!new_pmd)
174                         goto no_pmd;
175
176                 new_pte = pte_alloc_map(mm, new_pmd, 0);
177                 if (!new_pte)
178                         goto no_pte;
179
180                 init_pmd = pmd_offset(init_pgd, 0);
181                 init_pte = pte_offset_map_nested(init_pmd, 0);
182                 set_pte(new_pte, *init_pte);
183                 pte_unmap_nested(init_pte);
184                 pte_unmap(new_pte);
185
186                 spin_unlock(&mm->page_table_lock);
187         }
188
189         /*
190          * Copy over the kernel and IO PGD entries
191          */
192         memcpy(new_pgd + FIRST_KERNEL_PGD_NR, init_pgd + FIRST_KERNEL_PGD_NR,
193                        (PTRS_PER_PGD - FIRST_KERNEL_PGD_NR) * sizeof(pgd_t));
194
195         clean_dcache_area(new_pgd, PTRS_PER_PGD * sizeof(pgd_t));
196
197         return new_pgd;
198
199 no_pte:
200         spin_unlock(&mm->page_table_lock);
201         pmd_free(new_pmd);
202         free_pages((unsigned long)new_pgd, 2);
203         return NULL;
204
205 no_pmd:
206         spin_unlock(&mm->page_table_lock);
207         free_pages((unsigned long)new_pgd, 2);
208         return NULL;
209
210 no_pgd:
211         return NULL;
212 }
213
214 void free_pgd_slow(pgd_t *pgd)
215 {
216         pmd_t *pmd;
217         struct page *pte;
218
219         if (!pgd)
220                 return;
221
222         /* pgd is always present and good */
223         pmd = (pmd_t *)pgd;
224         if (pmd_none(*pmd))
225                 goto free;
226         if (pmd_bad(*pmd)) {
227                 pmd_ERROR(*pmd);
228                 pmd_clear(pmd);
229                 goto free;
230         }
231
232         pte = pmd_page(*pmd);
233         pmd_clear(pmd);
234         dec_page_state(nr_page_table_pages);
235         pte_free(pte);
236         pmd_free(pmd);
237 free:
238         free_pages((unsigned long) pgd, 2);
239 }
240
241 /*
242  * Create a SECTION PGD between VIRT and PHYS in domain
243  * DOMAIN with protection PROT.  This operates on half-
244  * pgdir entry increments.
245  */
246 static inline void
247 alloc_init_section(unsigned long virt, unsigned long phys, int prot)
248 {
249         pmd_t *pmdp;
250
251         pmdp = pmd_offset(pgd_offset_k(virt), virt);
252         if (virt & (1 << 20))
253                 pmdp++;
254
255         *pmdp = __pmd(phys | prot);
256         flush_pmd_entry(pmdp);
257 }
258
259 /*
260  * Create a SUPER SECTION PGD between VIRT and PHYS with protection PROT
261  */
262 static inline void
263 alloc_init_supersection(unsigned long virt, unsigned long phys, int prot)
264 {
265         int i;
266
267         for (i = 0; i < 16; i += 1) {
268                 alloc_init_section(virt, phys & SUPERSECTION_MASK,
269                                    prot | PMD_SECT_SUPER);
270
271                 virt += (PGDIR_SIZE / 2);
272                 phys += (PGDIR_SIZE / 2);
273         }
274 }
275
276 /*
277  * Add a PAGE mapping between VIRT and PHYS in domain
278  * DOMAIN with protection PROT.  Note that due to the
279  * way we map the PTEs, we must allocate two PTE_SIZE'd
280  * blocks - one for the Linux pte table, and one for
281  * the hardware pte table.
282  */
283 static inline void
284 alloc_init_page(unsigned long virt, unsigned long phys, unsigned int prot_l1, pgprot_t prot)
285 {
286         pmd_t *pmdp;
287         pte_t *ptep;
288
289         pmdp = pmd_offset(pgd_offset_k(virt), virt);
290
291         if (pmd_none(*pmdp)) {
292                 unsigned long pmdval;
293                 ptep = alloc_bootmem_low_pages(2 * PTRS_PER_PTE *
294                                                sizeof(pte_t));
295
296                 pmdval = __pa(ptep) | prot_l1;
297                 pmdp[0] = __pmd(pmdval);
298                 pmdp[1] = __pmd(pmdval + 256 * sizeof(pte_t));
299                 flush_pmd_entry(pmdp);
300         }
301         ptep = pte_offset_kernel(pmdp, virt);
302
303         set_pte(ptep, pfn_pte(phys >> PAGE_SHIFT, prot));
304 }
305
306 /*
307  * Clear any PGD mapping.  On a two-level page table system,
308  * the clearance is done by the middle-level functions (pmd)
309  * rather than the top-level (pgd) functions.
310  */
311 static inline void clear_mapping(unsigned long virt)
312 {
313         pmd_clear(pmd_offset(pgd_offset_k(virt), virt));
314 }
315
316 struct mem_types {
317         unsigned int    prot_pte;
318         unsigned int    prot_l1;
319         unsigned int    prot_sect;
320         unsigned int    domain;
321 };
322
323 static struct mem_types mem_types[] __initdata = {
324         [MT_DEVICE] = {
325                 .prot_pte  = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
326                                 L_PTE_WRITE,
327                 .prot_l1   = PMD_TYPE_TABLE,
328                 .prot_sect = PMD_TYPE_SECT | PMD_SECT_UNCACHED |
329                                 PMD_SECT_AP_WRITE,
330                 .domain    = DOMAIN_IO,
331         },
332         [MT_CACHECLEAN] = {
333                 .prot_sect = PMD_TYPE_SECT,
334                 .domain    = DOMAIN_KERNEL,
335         },
336         [MT_MINICLEAN] = {
337                 .prot_sect = PMD_TYPE_SECT | PMD_SECT_MINICACHE,
338                 .domain    = DOMAIN_KERNEL,
339         },
340         [MT_LOW_VECTORS] = {
341                 .prot_pte  = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
342                                 L_PTE_EXEC,
343                 .prot_l1   = PMD_TYPE_TABLE,
344                 .domain    = DOMAIN_USER,
345         },
346         [MT_HIGH_VECTORS] = {
347                 .prot_pte  = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
348                                 L_PTE_USER | L_PTE_EXEC,
349                 .prot_l1   = PMD_TYPE_TABLE,
350                 .domain    = DOMAIN_USER,
351         },
352         [MT_MEMORY] = {
353                 .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
354                 .domain    = DOMAIN_KERNEL,
355         },
356         [MT_ROM] = {
357                 .prot_sect = PMD_TYPE_SECT,
358                 .domain    = DOMAIN_KERNEL,
359         },
360         [MT_IXP2000_DEVICE] = { /* IXP2400 requires XCB=101 for on-chip I/O */
361                 .prot_pte  = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
362                                 L_PTE_WRITE,
363                 .prot_l1   = PMD_TYPE_TABLE,
364                 .prot_sect = PMD_TYPE_SECT | PMD_SECT_UNCACHED |
365                                 PMD_SECT_AP_WRITE | PMD_SECT_BUFFERABLE |
366                                 PMD_SECT_TEX(1),
367                 .domain    = DOMAIN_IO,
368         }
369 };
370
371 /*
372  * Adjust the PMD section entries according to the CPU in use.
373  */
374 static void __init build_mem_type_table(void)
375 {
376         struct cachepolicy *cp;
377         unsigned int cr = get_cr();
378         int cpu_arch = cpu_architecture();
379         int i;
380
381 #if defined(CONFIG_CPU_DCACHE_DISABLE)
382         if (cachepolicy > CPOLICY_BUFFERED)
383                 cachepolicy = CPOLICY_BUFFERED;
384 #elif defined(CONFIG_CPU_DCACHE_WRITETHROUGH)
385         if (cachepolicy > CPOLICY_WRITETHROUGH)
386                 cachepolicy = CPOLICY_WRITETHROUGH;
387 #endif
388         if (cpu_arch < CPU_ARCH_ARMv5) {
389                 if (cachepolicy >= CPOLICY_WRITEALLOC)
390                         cachepolicy = CPOLICY_WRITEBACK;
391                 ecc_mask = 0;
392         }
393
394         if (cpu_arch <= CPU_ARCH_ARMv5) {
395                 for (i = 0; i < ARRAY_SIZE(mem_types); i++) {
396                         if (mem_types[i].prot_l1)
397                                 mem_types[i].prot_l1 |= PMD_BIT4;
398                         if (mem_types[i].prot_sect)
399                                 mem_types[i].prot_sect |= PMD_BIT4;
400                 }
401         }
402
403         /*
404          * ARMv6 and above have extended page tables.
405          */
406         if (cpu_arch >= CPU_ARCH_ARMv6 && (cr & CR_XP)) {
407                 /*
408                  * bit 4 becomes XN which we must clear for the
409                  * kernel memory mapping.
410                  */
411                 mem_types[MT_MEMORY].prot_sect &= ~PMD_BIT4;
412                 mem_types[MT_ROM].prot_sect &= ~PMD_BIT4;
413                 /*
414                  * Mark cache clean areas and XIP ROM read only
415                  * from SVC mode and no access from userspace.
416                  */
417                 mem_types[MT_ROM].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
418                 mem_types[MT_MINICLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
419                 mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
420         }
421
422         cp = &cache_policies[cachepolicy];
423
424         if (cpu_arch >= CPU_ARCH_ARMv5) {
425                 mem_types[MT_LOW_VECTORS].prot_pte |= cp->pte & PTE_CACHEABLE;
426                 mem_types[MT_HIGH_VECTORS].prot_pte |= cp->pte & PTE_CACHEABLE;
427         } else {
428                 mem_types[MT_LOW_VECTORS].prot_pte |= cp->pte;
429                 mem_types[MT_HIGH_VECTORS].prot_pte |= cp->pte;
430                 mem_types[MT_MINICLEAN].prot_sect &= ~PMD_SECT_TEX(1);
431         }
432
433         mem_types[MT_LOW_VECTORS].prot_l1 |= ecc_mask;
434         mem_types[MT_HIGH_VECTORS].prot_l1 |= ecc_mask;
435         mem_types[MT_MEMORY].prot_sect |= ecc_mask | cp->pmd;
436         mem_types[MT_ROM].prot_sect |= cp->pmd;
437
438         for (i = 0; i < 16; i++) {
439                 unsigned long v = pgprot_val(protection_map[i]);
440                 v &= (~(PTE_BUFFERABLE|PTE_CACHEABLE)) | cp->pte;
441                 protection_map[i] = __pgprot(v);
442         }
443
444         pgprot_kernel = __pgprot(L_PTE_PRESENT | L_PTE_YOUNG |
445                                  L_PTE_DIRTY | L_PTE_WRITE |
446                                  L_PTE_EXEC | cp->pte);
447
448         switch (cp->pmd) {
449         case PMD_SECT_WT:
450                 mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_WT;
451                 break;
452         case PMD_SECT_WB:
453         case PMD_SECT_WBWA:
454                 mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_WB;
455                 break;
456         }
457         printk("Memory policy: ECC %sabled, Data cache %s\n",
458                 ecc_mask ? "en" : "dis", cp->policy);
459 }
460
461 #define vectors_base()  (vectors_high() ? 0xffff0000 : 0)
462
463 /*
464  * Create the page directory entries and any necessary
465  * page tables for the mapping specified by `md'.  We
466  * are able to cope here with varying sizes and address
467  * offsets, and we take full advantage of sections and
468  * supersections.
469  */
470 static void __init create_mapping(struct map_desc *md)
471 {
472         unsigned long virt, length;
473         int prot_sect, prot_l1, domain;
474         pgprot_t prot_pte;
475         long off;
476
477         if (md->virtual != vectors_base() && md->virtual < TASK_SIZE) {
478                 printk(KERN_WARNING "BUG: not creating mapping for "
479                        "0x%08lx at 0x%08lx in user region\n",
480                        md->physical, md->virtual);
481                 return;
482         }
483
484         if ((md->type == MT_DEVICE || md->type == MT_ROM) &&
485             md->virtual >= PAGE_OFFSET && md->virtual < VMALLOC_END) {
486                 printk(KERN_WARNING "BUG: mapping for 0x%08lx at 0x%08lx "
487                        "overlaps vmalloc space\n",
488                        md->physical, md->virtual);
489         }
490
491         domain    = mem_types[md->type].domain;
492         prot_pte  = __pgprot(mem_types[md->type].prot_pte);
493         prot_l1   = mem_types[md->type].prot_l1 | PMD_DOMAIN(domain);
494         prot_sect = mem_types[md->type].prot_sect | PMD_DOMAIN(domain);
495
496         virt   = md->virtual;
497         off    = md->physical - virt;
498         length = md->length;
499
500         if (mem_types[md->type].prot_l1 == 0 &&
501             (virt & 0xfffff || (virt + off) & 0xfffff || (virt + length) & 0xfffff)) {
502                 printk(KERN_WARNING "BUG: map for 0x%08lx at 0x%08lx can not "
503                        "be mapped using pages, ignoring.\n",
504                        md->physical, md->virtual);
505                 return;
506         }
507
508         while ((virt & 0xfffff || (virt + off) & 0xfffff) && length >= PAGE_SIZE) {
509                 alloc_init_page(virt, virt + off, prot_l1, prot_pte);
510
511                 virt   += PAGE_SIZE;
512                 length -= PAGE_SIZE;
513         }
514
515         /* N.B. ARMv6 supersections are only defined to work with domain 0.
516          *      Since domain assignments can in fact be arbitrary, the
517          *      'domain == 0' check below is required to insure that ARMv6
518          *      supersections are only allocated for domain 0 regardless
519          *      of the actual domain assignments in use.
520          */
521         if (cpu_architecture() >= CPU_ARCH_ARMv6 && domain == 0) {
522                 /* Align to supersection boundary */
523                 while ((virt & ~SUPERSECTION_MASK || (virt + off) &
524                         ~SUPERSECTION_MASK) && length >= (PGDIR_SIZE / 2)) {
525                         alloc_init_section(virt, virt + off, prot_sect);
526
527                         virt   += (PGDIR_SIZE / 2);
528                         length -= (PGDIR_SIZE / 2);
529                 }
530
531                 while (length >= SUPERSECTION_SIZE) {
532                         alloc_init_supersection(virt, virt + off, prot_sect);
533
534                         virt   += SUPERSECTION_SIZE;
535                         length -= SUPERSECTION_SIZE;
536                 }
537         }
538
539         /*
540          * A section mapping covers half a "pgdir" entry.
541          */
542         while (length >= (PGDIR_SIZE / 2)) {
543                 alloc_init_section(virt, virt + off, prot_sect);
544
545                 virt   += (PGDIR_SIZE / 2);
546                 length -= (PGDIR_SIZE / 2);
547         }
548
549         while (length >= PAGE_SIZE) {
550                 alloc_init_page(virt, virt + off, prot_l1, prot_pte);
551
552                 virt   += PAGE_SIZE;
553                 length -= PAGE_SIZE;
554         }
555 }
556
557 /*
558  * In order to soft-boot, we need to insert a 1:1 mapping in place of
559  * the user-mode pages.  This will then ensure that we have predictable
560  * results when turning the mmu off
561  */
562 void setup_mm_for_reboot(char mode)
563 {
564         unsigned long pmdval;
565         pgd_t *pgd;
566         pmd_t *pmd;
567         int i;
568         int cpu_arch = cpu_architecture();
569
570         if (current->mm && current->mm->pgd)
571                 pgd = current->mm->pgd;
572         else
573                 pgd = init_mm.pgd;
574
575         for (i = 0; i < FIRST_USER_PGD_NR + USER_PTRS_PER_PGD; i++) {
576                 pmdval = (i << PGDIR_SHIFT) |
577                          PMD_SECT_AP_WRITE | PMD_SECT_AP_READ |
578                          PMD_TYPE_SECT;
579                 if (cpu_arch <= CPU_ARCH_ARMv5)
580                         pmdval |= PMD_BIT4;
581                 pmd = pmd_offset(pgd + i, i << PGDIR_SHIFT);
582                 pmd[0] = __pmd(pmdval);
583                 pmd[1] = __pmd(pmdval + (1 << (PGDIR_SHIFT - 1)));
584                 flush_pmd_entry(pmd);
585         }
586 }
587
588 extern void _stext, _etext;
589
590 /*
591  * Setup initial mappings.  We use the page we allocated for zero page to hold
592  * the mappings, which will get overwritten by the vectors in traps_init().
593  * The mappings must be in virtual address order.
594  */
595 void __init memtable_init(struct meminfo *mi)
596 {
597         struct map_desc *init_maps, *p, *q;
598         unsigned long address = 0;
599         int i;
600
601         build_mem_type_table();
602
603         init_maps = p = alloc_bootmem_low_pages(PAGE_SIZE);
604
605 #ifdef CONFIG_XIP_KERNEL
606         p->physical   = CONFIG_XIP_PHYS_ADDR & PMD_MASK;
607         p->virtual    = (unsigned long)&_stext & PMD_MASK;
608         p->length     = ((unsigned long)&_etext - p->virtual + ~PMD_MASK) & PMD_MASK;
609         p->type       = MT_ROM;
610         p ++;
611 #endif
612
613         for (i = 0; i < mi->nr_banks; i++) {
614                 if (mi->bank[i].size == 0)
615                         continue;
616
617                 p->physical   = mi->bank[i].start;
618                 p->virtual    = __phys_to_virt(p->physical);
619                 p->length     = mi->bank[i].size;
620                 p->type       = MT_MEMORY;
621                 p ++;
622         }
623
624 #ifdef FLUSH_BASE
625         p->physical   = FLUSH_BASE_PHYS;
626         p->virtual    = FLUSH_BASE;
627         p->length     = PGDIR_SIZE;
628         p->type       = MT_CACHECLEAN;
629         p ++;
630 #endif
631
632 #ifdef FLUSH_BASE_MINICACHE
633         p->physical   = FLUSH_BASE_PHYS + PGDIR_SIZE;
634         p->virtual    = FLUSH_BASE_MINICACHE;
635         p->length     = PGDIR_SIZE;
636         p->type       = MT_MINICLEAN;
637         p ++;
638 #endif
639
640         /*
641          * Go through the initial mappings, but clear out any
642          * pgdir entries that are not in the description.
643          */
644         q = init_maps;
645         do {
646                 if (address < q->virtual || q == p) {
647                         clear_mapping(address);
648                         address += PGDIR_SIZE;
649                 } else {
650                         create_mapping(q);
651
652                         address = q->virtual + q->length;
653                         address = (address + PGDIR_SIZE - 1) & PGDIR_MASK;
654
655                         q ++;
656                 }
657         } while (address != 0);
658
659         /*
660          * Create a mapping for the machine vectors at the high-vectors
661          * location (0xffff0000).  If we aren't using high-vectors, also
662          * create a mapping at the low-vectors virtual address.
663          */
664         init_maps->physical   = virt_to_phys(init_maps);
665         init_maps->virtual    = 0xffff0000;
666         init_maps->length     = PAGE_SIZE;
667         init_maps->type       = MT_HIGH_VECTORS;
668         create_mapping(init_maps);
669
670         if (!vectors_high()) {
671                 init_maps->virtual = 0;
672                 init_maps->type = MT_LOW_VECTORS;
673                 create_mapping(init_maps);
674         }
675
676         flush_cache_all();
677         flush_tlb_all();
678 }
679
680 /*
681  * Create the architecture specific mappings
682  */
683 void __init iotable_init(struct map_desc *io_desc, int nr)
684 {
685         int i;
686
687         for (i = 0; i < nr; i++)
688                 create_mapping(io_desc + i);
689 }
690
691 static inline void
692 free_memmap(int node, unsigned long start_pfn, unsigned long end_pfn)
693 {
694         struct page *start_pg, *end_pg;
695         unsigned long pg, pgend;
696
697         /*
698          * Convert start_pfn/end_pfn to a struct page pointer.
699          */
700         start_pg = pfn_to_page(start_pfn);
701         end_pg = pfn_to_page(end_pfn);
702
703         /*
704          * Convert to physical addresses, and
705          * round start upwards and end downwards.
706          */
707         pg = PAGE_ALIGN(__pa(start_pg));
708         pgend = __pa(end_pg) & PAGE_MASK;
709
710         /*
711          * If there are free pages between these,
712          * free the section of the memmap array.
713          */
714         if (pg < pgend)
715                 free_bootmem_node(NODE_DATA(node), pg, pgend - pg);
716 }
717
718 static inline void free_unused_memmap_node(int node, struct meminfo *mi)
719 {
720         unsigned long bank_start, prev_bank_end = 0;
721         unsigned int i;
722
723         /*
724          * [FIXME] This relies on each bank being in address order.  This
725          * may not be the case, especially if the user has provided the
726          * information on the command line.
727          */
728         for (i = 0; i < mi->nr_banks; i++) {
729                 if (mi->bank[i].size == 0 || mi->bank[i].node != node)
730                         continue;
731
732                 bank_start = mi->bank[i].start >> PAGE_SHIFT;
733                 if (bank_start < prev_bank_end) {
734                         printk(KERN_ERR "MEM: unordered memory banks.  "
735                                 "Not freeing memmap.\n");
736                         break;
737                 }
738
739                 /*
740                  * If we had a previous bank, and there is a space
741                  * between the current bank and the previous, free it.
742                  */
743                 if (prev_bank_end && prev_bank_end != bank_start)
744                         free_memmap(node, prev_bank_end, bank_start);
745
746                 prev_bank_end = PAGE_ALIGN(mi->bank[i].start +
747                                            mi->bank[i].size) >> PAGE_SHIFT;
748         }
749 }
750
751 /*
752  * The mem_map array can get very big.  Free
753  * the unused area of the memory map.
754  */
755 void __init create_memmap_holes(struct meminfo *mi)
756 {
757         int node;
758
759         for_each_online_node(node)
760                 free_unused_memmap_node(node, mi);
761 }