]> err.no Git - linux-2.6/blob - arch/x86/kernel/e820_32.c
56335a85a15ab95e76a2ecb8473604bb3ce45693
[linux-2.6] / arch / x86 / kernel / e820_32.c
1 #include <linux/kernel.h>
2 #include <linux/types.h>
3 #include <linux/init.h>
4 #include <linux/bootmem.h>
5 #include <linux/ioport.h>
6 #include <linux/string.h>
7 #include <linux/kexec.h>
8 #include <linux/module.h>
9 #include <linux/mm.h>
10 #include <linux/efi.h>
11 #include <linux/pfn.h>
12 #include <linux/uaccess.h>
13 #include <linux/suspend.h>
14
15 #include <asm/pgtable.h>
16 #include <asm/page.h>
17 #include <asm/e820.h>
18 #include <asm/setup.h>
19
20 struct e820map e820;
21 struct change_member {
22         struct e820entry *pbios; /* pointer to original bios entry */
23         unsigned long long addr; /* address for this change point */
24 };
25 static struct change_member change_point_list[2*E820MAX] __initdata;
26 static struct change_member *change_point[2*E820MAX] __initdata;
27 static struct e820entry *overlap_list[E820MAX] __initdata;
28 static struct e820entry new_bios[E820MAX] __initdata;
29 /* For PCI or other memory-mapped resources */
30 unsigned long pci_mem_start = 0x10000000;
31 #ifdef CONFIG_PCI
32 EXPORT_SYMBOL(pci_mem_start);
33 #endif
34 extern int user_defined_memmap;
35
36 static struct resource system_rom_resource = {
37         .name   = "System ROM",
38         .start  = 0xf0000,
39         .end    = 0xfffff,
40         .flags  = IORESOURCE_BUSY | IORESOURCE_READONLY | IORESOURCE_MEM
41 };
42
43 static struct resource extension_rom_resource = {
44         .name   = "Extension ROM",
45         .start  = 0xe0000,
46         .end    = 0xeffff,
47         .flags  = IORESOURCE_BUSY | IORESOURCE_READONLY | IORESOURCE_MEM
48 };
49
50 static struct resource adapter_rom_resources[] = { {
51         .name   = "Adapter ROM",
52         .start  = 0xc8000,
53         .end    = 0,
54         .flags  = IORESOURCE_BUSY | IORESOURCE_READONLY | IORESOURCE_MEM
55 }, {
56         .name   = "Adapter ROM",
57         .start  = 0,
58         .end    = 0,
59         .flags  = IORESOURCE_BUSY | IORESOURCE_READONLY | IORESOURCE_MEM
60 }, {
61         .name   = "Adapter ROM",
62         .start  = 0,
63         .end    = 0,
64         .flags  = IORESOURCE_BUSY | IORESOURCE_READONLY | IORESOURCE_MEM
65 }, {
66         .name   = "Adapter ROM",
67         .start  = 0,
68         .end    = 0,
69         .flags  = IORESOURCE_BUSY | IORESOURCE_READONLY | IORESOURCE_MEM
70 }, {
71         .name   = "Adapter ROM",
72         .start  = 0,
73         .end    = 0,
74         .flags  = IORESOURCE_BUSY | IORESOURCE_READONLY | IORESOURCE_MEM
75 }, {
76         .name   = "Adapter ROM",
77         .start  = 0,
78         .end    = 0,
79         .flags  = IORESOURCE_BUSY | IORESOURCE_READONLY | IORESOURCE_MEM
80 } };
81
82 static struct resource video_rom_resource = {
83         .name   = "Video ROM",
84         .start  = 0xc0000,
85         .end    = 0xc7fff,
86         .flags  = IORESOURCE_BUSY | IORESOURCE_READONLY | IORESOURCE_MEM
87 };
88
89 #define ROMSIGNATURE 0xaa55
90
91 static int __init romsignature(const unsigned char *rom)
92 {
93         const unsigned short * const ptr = (const unsigned short *)rom;
94         unsigned short sig;
95
96         return probe_kernel_address(ptr, sig) == 0 && sig == ROMSIGNATURE;
97 }
98
99 static int __init romchecksum(const unsigned char *rom, unsigned long length)
100 {
101         unsigned char sum, c;
102
103         for (sum = 0; length && probe_kernel_address(rom++, c) == 0; length--)
104                 sum += c;
105         return !length && !sum;
106 }
107
108 static void __init probe_roms(void)
109 {
110         const unsigned char *rom;
111         unsigned long start, length, upper;
112         unsigned char c;
113         int i;
114
115         /* video rom */
116         upper = adapter_rom_resources[0].start;
117         for (start = video_rom_resource.start; start < upper; start += 2048) {
118                 rom = isa_bus_to_virt(start);
119                 if (!romsignature(rom))
120                         continue;
121
122                 video_rom_resource.start = start;
123
124                 if (probe_kernel_address(rom + 2, c) != 0)
125                         continue;
126
127                 /* 0 < length <= 0x7f * 512, historically */
128                 length = c * 512;
129
130                 /* if checksum okay, trust length byte */
131                 if (length && romchecksum(rom, length))
132                         video_rom_resource.end = start + length - 1;
133
134                 request_resource(&iomem_resource, &video_rom_resource);
135                 break;
136         }
137
138         start = (video_rom_resource.end + 1 + 2047) & ~2047UL;
139         if (start < upper)
140                 start = upper;
141
142         /* system rom */
143         request_resource(&iomem_resource, &system_rom_resource);
144         upper = system_rom_resource.start;
145
146         /* check for extension rom (ignore length byte!) */
147         rom = isa_bus_to_virt(extension_rom_resource.start);
148         if (romsignature(rom)) {
149                 length = extension_rom_resource.end - extension_rom_resource.start + 1;
150                 if (romchecksum(rom, length)) {
151                         request_resource(&iomem_resource, &extension_rom_resource);
152                         upper = extension_rom_resource.start;
153                 }
154         }
155
156         /* check for adapter roms on 2k boundaries */
157         for (i = 0; i < ARRAY_SIZE(adapter_rom_resources) && start < upper; start += 2048) {
158                 rom = isa_bus_to_virt(start);
159                 if (!romsignature(rom))
160                         continue;
161
162                 if (probe_kernel_address(rom + 2, c) != 0)
163                         continue;
164
165                 /* 0 < length <= 0x7f * 512, historically */
166                 length = c * 512;
167
168                 /* but accept any length that fits if checksum okay */
169                 if (!length || start + length > upper || !romchecksum(rom, length))
170                         continue;
171
172                 adapter_rom_resources[i].start = start;
173                 adapter_rom_resources[i].end = start + length - 1;
174                 request_resource(&iomem_resource, &adapter_rom_resources[i]);
175
176                 start = adapter_rom_resources[i++].end & ~2047UL;
177         }
178 }
179
180 /*
181  * Request address space for all standard RAM and ROM resources
182  * and also for regions reported as reserved by the e820.
183  */
184 void __init legacy_init_iomem_resources(struct resource *code_resource,
185                 struct resource *data_resource,
186                 struct resource *bss_resource)
187 {
188         int i;
189
190         probe_roms();
191         for (i = 0; i < e820.nr_map; i++) {
192                 struct resource *res;
193 #ifndef CONFIG_RESOURCES_64BIT
194                 if (e820.map[i].addr + e820.map[i].size > 0x100000000ULL)
195                         continue;
196 #endif
197                 res = kzalloc(sizeof(struct resource), GFP_ATOMIC);
198                 switch (e820.map[i].type) {
199                 case E820_RAM:  res->name = "System RAM"; break;
200                 case E820_ACPI: res->name = "ACPI Tables"; break;
201                 case E820_NVS:  res->name = "ACPI Non-volatile Storage"; break;
202                 default:        res->name = "reserved";
203                 }
204                 res->start = e820.map[i].addr;
205                 res->end = res->start + e820.map[i].size - 1;
206                 res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
207                 if (request_resource(&iomem_resource, res)) {
208                         kfree(res);
209                         continue;
210                 }
211                 if (e820.map[i].type == E820_RAM) {
212                         /*
213                          *  We don't know which RAM region contains kernel data,
214                          *  so we try it repeatedly and let the resource manager
215                          *  test it.
216                          */
217                         request_resource(res, code_resource);
218                         request_resource(res, data_resource);
219                         request_resource(res, bss_resource);
220 #ifdef CONFIG_KEXEC
221                         if (crashk_res.start != crashk_res.end)
222                                 request_resource(res, &crashk_res);
223 #endif
224                 }
225         }
226 }
227
228 #if defined(CONFIG_PM) && defined(CONFIG_HIBERNATION)
229 /**
230  * e820_mark_nosave_regions - Find the ranges of physical addresses that do not
231  * correspond to e820 RAM areas and mark the corresponding pages as nosave for
232  * hibernation.
233  *
234  * This function requires the e820 map to be sorted and without any
235  * overlapping entries and assumes the first e820 area to be RAM.
236  */
237 void __init e820_mark_nosave_regions(void)
238 {
239         int i;
240         unsigned long pfn;
241
242         pfn = PFN_DOWN(e820.map[0].addr + e820.map[0].size);
243         for (i = 1; i < e820.nr_map; i++) {
244                 struct e820entry *ei = &e820.map[i];
245
246                 if (pfn < PFN_UP(ei->addr))
247                         register_nosave_region(pfn, PFN_UP(ei->addr));
248
249                 pfn = PFN_DOWN(ei->addr + ei->size);
250                 if (ei->type != E820_RAM)
251                         register_nosave_region(PFN_UP(ei->addr), pfn);
252
253                 if (pfn >= max_low_pfn)
254                         break;
255         }
256 }
257 #endif
258
259 void __init add_memory_region(unsigned long long start,
260                               unsigned long long size, int type)
261 {
262         int x;
263
264         if (!efi_enabled) {
265                 x = e820.nr_map;
266
267                 if (x == E820MAX) {
268                     printk(KERN_ERR "Ooops! Too many entries in the memory map!\n");
269                     return;
270                 }
271
272                 e820.map[x].addr = start;
273                 e820.map[x].size = size;
274                 e820.map[x].type = type;
275                 e820.nr_map++;
276         }
277 } /* add_memory_region */
278
279 /*
280  * Sanitize the BIOS e820 map.
281  *
282  * Some e820 responses include overlapping entries.  The following
283  * replaces the original e820 map with a new one, removing overlaps.
284  *
285  */
286 int __init sanitize_e820_map(struct e820entry * biosmap, char * pnr_map)
287 {
288         struct change_member *change_tmp;
289         unsigned long current_type, last_type;
290         unsigned long long last_addr;
291         int chgidx, still_changing;
292         int overlap_entries;
293         int new_bios_entry;
294         int old_nr, new_nr, chg_nr;
295         int i;
296
297         /*
298                 Visually we're performing the following (1,2,3,4 = memory types)...
299
300                 Sample memory map (w/overlaps):
301                    ____22__________________
302                    ______________________4_
303                    ____1111________________
304                    _44_____________________
305                    11111111________________
306                    ____________________33__
307                    ___________44___________
308                    __________33333_________
309                    ______________22________
310                    ___________________2222_
311                    _________111111111______
312                    _____________________11_
313                    _________________4______
314
315                 Sanitized equivalent (no overlap):
316                    1_______________________
317                    _44_____________________
318                    ___1____________________
319                    ____22__________________
320                    ______11________________
321                    _________1______________
322                    __________3_____________
323                    ___________44___________
324                    _____________33_________
325                    _______________2________
326                    ________________1_______
327                    _________________4______
328                    ___________________2____
329                    ____________________33__
330                    ______________________4_
331         */
332         /* if there's only one memory region, don't bother */
333         if (*pnr_map < 2) {
334                 return -1;
335         }
336
337         old_nr = *pnr_map;
338
339         /* bail out if we find any unreasonable addresses in bios map */
340         for (i=0; i<old_nr; i++)
341                 if (biosmap[i].addr + biosmap[i].size < biosmap[i].addr) {
342                         return -1;
343                 }
344
345         /* create pointers for initial change-point information (for sorting) */
346         for (i=0; i < 2*old_nr; i++)
347                 change_point[i] = &change_point_list[i];
348
349         /* record all known change-points (starting and ending addresses),
350            omitting those that are for empty memory regions */
351         chgidx = 0;
352         for (i=0; i < old_nr; i++)      {
353                 if (biosmap[i].size != 0) {
354                         change_point[chgidx]->addr = biosmap[i].addr;
355                         change_point[chgidx++]->pbios = &biosmap[i];
356                         change_point[chgidx]->addr = biosmap[i].addr + biosmap[i].size;
357                         change_point[chgidx++]->pbios = &biosmap[i];
358                 }
359         }
360         chg_nr = chgidx;        /* true number of change-points */
361
362         /* sort change-point list by memory addresses (low -> high) */
363         still_changing = 1;
364         while (still_changing)  {
365                 still_changing = 0;
366                 for (i=1; i < chg_nr; i++)  {
367                         /* if <current_addr> > <last_addr>, swap */
368                         /* or, if current=<start_addr> & last=<end_addr>, swap */
369                         if ((change_point[i]->addr < change_point[i-1]->addr) ||
370                                 ((change_point[i]->addr == change_point[i-1]->addr) &&
371                                  (change_point[i]->addr == change_point[i]->pbios->addr) &&
372                                  (change_point[i-1]->addr != change_point[i-1]->pbios->addr))
373                            )
374                         {
375                                 change_tmp = change_point[i];
376                                 change_point[i] = change_point[i-1];
377                                 change_point[i-1] = change_tmp;
378                                 still_changing=1;
379                         }
380                 }
381         }
382
383         /* create a new bios memory map, removing overlaps */
384         overlap_entries=0;       /* number of entries in the overlap table */
385         new_bios_entry=0;        /* index for creating new bios map entries */
386         last_type = 0;           /* start with undefined memory type */
387         last_addr = 0;           /* start with 0 as last starting address */
388         /* loop through change-points, determining affect on the new bios map */
389         for (chgidx=0; chgidx < chg_nr; chgidx++)
390         {
391                 /* keep track of all overlapping bios entries */
392                 if (change_point[chgidx]->addr == change_point[chgidx]->pbios->addr)
393                 {
394                         /* add map entry to overlap list (> 1 entry implies an overlap) */
395                         overlap_list[overlap_entries++]=change_point[chgidx]->pbios;
396                 }
397                 else
398                 {
399                         /* remove entry from list (order independent, so swap with last) */
400                         for (i=0; i<overlap_entries; i++)
401                         {
402                                 if (overlap_list[i] == change_point[chgidx]->pbios)
403                                         overlap_list[i] = overlap_list[overlap_entries-1];
404                         }
405                         overlap_entries--;
406                 }
407                 /* if there are overlapping entries, decide which "type" to use */
408                 /* (larger value takes precedence -- 1=usable, 2,3,4,4+=unusable) */
409                 current_type = 0;
410                 for (i=0; i<overlap_entries; i++)
411                         if (overlap_list[i]->type > current_type)
412                                 current_type = overlap_list[i]->type;
413                 /* continue building up new bios map based on this information */
414                 if (current_type != last_type)  {
415                         if (last_type != 0)      {
416                                 new_bios[new_bios_entry].size =
417                                         change_point[chgidx]->addr - last_addr;
418                                 /* move forward only if the new size was non-zero */
419                                 if (new_bios[new_bios_entry].size != 0)
420                                         if (++new_bios_entry >= E820MAX)
421                                                 break;  /* no more space left for new bios entries */
422                         }
423                         if (current_type != 0)  {
424                                 new_bios[new_bios_entry].addr = change_point[chgidx]->addr;
425                                 new_bios[new_bios_entry].type = current_type;
426                                 last_addr=change_point[chgidx]->addr;
427                         }
428                         last_type = current_type;
429                 }
430         }
431         new_nr = new_bios_entry;   /* retain count for new bios entries */
432
433         /* copy new bios mapping into original location */
434         memcpy(biosmap, new_bios, new_nr*sizeof(struct e820entry));
435         *pnr_map = new_nr;
436
437         return 0;
438 }
439
440 /*
441  * Copy the BIOS e820 map into a safe place.
442  *
443  * Sanity-check it while we're at it..
444  *
445  * If we're lucky and live on a modern system, the setup code
446  * will have given us a memory map that we can use to properly
447  * set up memory.  If we aren't, we'll fake a memory map.
448  *
449  * We check to see that the memory map contains at least 2 elements
450  * before we'll use it, because the detection code in setup.S may
451  * not be perfect and most every PC known to man has two memory
452  * regions: one from 0 to 640k, and one from 1mb up.  (The IBM
453  * thinkpad 560x, for example, does not cooperate with the memory
454  * detection code.)
455  */
456 int __init copy_e820_map(struct e820entry * biosmap, int nr_map)
457 {
458         /* Only one memory region (or negative)? Ignore it */
459         if (nr_map < 2)
460                 return -1;
461
462         do {
463                 unsigned long long start = biosmap->addr;
464                 unsigned long long size = biosmap->size;
465                 unsigned long long end = start + size;
466                 unsigned long type = biosmap->type;
467
468                 /* Overflow in 64 bits? Ignore the memory map. */
469                 if (start > end)
470                         return -1;
471
472                 /*
473                  * Some BIOSes claim RAM in the 640k - 1M region.
474                  * Not right. Fix it up.
475                  */
476                 if (type == E820_RAM) {
477                         if (start < 0x100000ULL && end > 0xA0000ULL) {
478                                 if (start < 0xA0000ULL)
479                                         add_memory_region(start, 0xA0000ULL-start, type);
480                                 if (end <= 0x100000ULL)
481                                         continue;
482                                 start = 0x100000ULL;
483                                 size = end - start;
484                         }
485                 }
486                 add_memory_region(start, size, type);
487         } while (biosmap++,--nr_map);
488         return 0;
489 }
490
491 /*
492  * Callback for efi_memory_walk.
493  */
494 static int __init
495 efi_find_max_pfn(unsigned long start, unsigned long end, void *arg)
496 {
497         unsigned long *max_pfn = arg, pfn;
498
499         if (start < end) {
500                 pfn = PFN_UP(end -1);
501                 if (pfn > *max_pfn)
502                         *max_pfn = pfn;
503         }
504         return 0;
505 }
506
507 static int __init
508 efi_memory_present_wrapper(unsigned long start, unsigned long end, void *arg)
509 {
510         memory_present(0, PFN_UP(start), PFN_DOWN(end));
511         return 0;
512 }
513
514 /*
515  * Find the highest page frame number we have available
516  */
517 void __init find_max_pfn(void)
518 {
519         int i;
520
521         max_pfn = 0;
522         if (efi_enabled) {
523                 efi_memmap_walk(efi_find_max_pfn, &max_pfn);
524                 efi_memmap_walk(efi_memory_present_wrapper, NULL);
525                 return;
526         }
527
528         for (i = 0; i < e820.nr_map; i++) {
529                 unsigned long start, end;
530                 /* RAM? */
531                 if (e820.map[i].type != E820_RAM)
532                         continue;
533                 start = PFN_UP(e820.map[i].addr);
534                 end = PFN_DOWN(e820.map[i].addr + e820.map[i].size);
535                 if (start >= end)
536                         continue;
537                 if (end > max_pfn)
538                         max_pfn = end;
539                 memory_present(0, start, end);
540         }
541 }
542
543 /*
544  * Free all available memory for boot time allocation.  Used
545  * as a callback function by efi_memory_walk()
546  */
547
548 static int __init
549 free_available_memory(unsigned long start, unsigned long end, void *arg)
550 {
551         /* check max_low_pfn */
552         if (start >= (max_low_pfn << PAGE_SHIFT))
553                 return 0;
554         if (end >= (max_low_pfn << PAGE_SHIFT))
555                 end = max_low_pfn << PAGE_SHIFT;
556         if (start < end)
557                 free_bootmem(start, end - start);
558
559         return 0;
560 }
561 /*
562  * Register fully available low RAM pages with the bootmem allocator.
563  */
564 void __init register_bootmem_low_pages(unsigned long max_low_pfn)
565 {
566         int i;
567
568         if (efi_enabled) {
569                 efi_memmap_walk(free_available_memory, NULL);
570                 return;
571         }
572         for (i = 0; i < e820.nr_map; i++) {
573                 unsigned long curr_pfn, last_pfn, size;
574                 /*
575                  * Reserve usable low memory
576                  */
577                 if (e820.map[i].type != E820_RAM)
578                         continue;
579                 /*
580                  * We are rounding up the start address of usable memory:
581                  */
582                 curr_pfn = PFN_UP(e820.map[i].addr);
583                 if (curr_pfn >= max_low_pfn)
584                         continue;
585                 /*
586                  * ... and at the end of the usable range downwards:
587                  */
588                 last_pfn = PFN_DOWN(e820.map[i].addr + e820.map[i].size);
589
590                 if (last_pfn > max_low_pfn)
591                         last_pfn = max_low_pfn;
592
593                 /*
594                  * .. finally, did all the rounding and playing
595                  * around just make the area go away?
596                  */
597                 if (last_pfn <= curr_pfn)
598                         continue;
599
600                 size = last_pfn - curr_pfn;
601                 free_bootmem(PFN_PHYS(curr_pfn), PFN_PHYS(size));
602         }
603 }
604
605 void __init e820_register_memory(void)
606 {
607         unsigned long gapstart, gapsize, round;
608         unsigned long long last;
609         int i;
610
611         /*
612          * Search for the biggest gap in the low 32 bits of the e820
613          * memory space.
614          */
615         last = 0x100000000ull;
616         gapstart = 0x10000000;
617         gapsize = 0x400000;
618         i = e820.nr_map;
619         while (--i >= 0) {
620                 unsigned long long start = e820.map[i].addr;
621                 unsigned long long end = start + e820.map[i].size;
622
623                 /*
624                  * Since "last" is at most 4GB, we know we'll
625                  * fit in 32 bits if this condition is true
626                  */
627                 if (last > end) {
628                         unsigned long gap = last - end;
629
630                         if (gap > gapsize) {
631                                 gapsize = gap;
632                                 gapstart = end;
633                         }
634                 }
635                 if (start < last)
636                         last = start;
637         }
638
639         /*
640          * See how much we want to round up: start off with
641          * rounding to the next 1MB area.
642          */
643         round = 0x100000;
644         while ((gapsize >> 4) > round)
645                 round += round;
646         /* Fun with two's complement */
647         pci_mem_start = (gapstart + round) & -round;
648
649         printk("Allocating PCI resources starting at %08lx (gap: %08lx:%08lx)\n",
650                 pci_mem_start, gapstart, gapsize);
651 }
652
653 void __init print_memory_map(char *who)
654 {
655         int i;
656
657         for (i = 0; i < e820.nr_map; i++) {
658                 printk(" %s: %016Lx - %016Lx ", who,
659                         e820.map[i].addr,
660                         e820.map[i].addr + e820.map[i].size);
661                 switch (e820.map[i].type) {
662                 case E820_RAM:  printk("(usable)\n");
663                                 break;
664                 case E820_RESERVED:
665                                 printk("(reserved)\n");
666                                 break;
667                 case E820_ACPI:
668                                 printk("(ACPI data)\n");
669                                 break;
670                 case E820_NVS:
671                                 printk("(ACPI NVS)\n");
672                                 break;
673                 default:        printk("type %u\n", e820.map[i].type);
674                                 break;
675                 }
676         }
677 }
678
679 static __init __always_inline void efi_limit_regions(unsigned long long size)
680 {
681         unsigned long long current_addr = 0;
682         efi_memory_desc_t *md, *next_md;
683         void *p, *p1;
684         int i, j;
685
686         j = 0;
687         p1 = memmap.map;
688         for (p = p1, i = 0; p < memmap.map_end; p += memmap.desc_size, i++) {
689                 md = p;
690                 next_md = p1;
691                 current_addr = md->phys_addr +
692                         PFN_PHYS(md->num_pages);
693                 if (is_available_memory(md)) {
694                         if (md->phys_addr >= size) continue;
695                         memcpy(next_md, md, memmap.desc_size);
696                         if (current_addr >= size) {
697                                 next_md->num_pages -=
698                                         PFN_UP(current_addr-size);
699                         }
700                         p1 += memmap.desc_size;
701                         next_md = p1;
702                         j++;
703                 } else if ((md->attribute & EFI_MEMORY_RUNTIME) ==
704                            EFI_MEMORY_RUNTIME) {
705                         /* In order to make runtime services
706                          * available we have to include runtime
707                          * memory regions in memory map */
708                         memcpy(next_md, md, memmap.desc_size);
709                         p1 += memmap.desc_size;
710                         next_md = p1;
711                         j++;
712                 }
713         }
714         memmap.nr_map = j;
715         memmap.map_end = memmap.map +
716                 (memmap.nr_map * memmap.desc_size);
717 }
718
719 void __init limit_regions(unsigned long long size)
720 {
721         unsigned long long current_addr;
722         int i;
723
724         print_memory_map("limit_regions start");
725         if (efi_enabled) {
726                 efi_limit_regions(size);
727                 return;
728         }
729         for (i = 0; i < e820.nr_map; i++) {
730                 current_addr = e820.map[i].addr + e820.map[i].size;
731                 if (current_addr < size)
732                         continue;
733
734                 if (e820.map[i].type != E820_RAM)
735                         continue;
736
737                 if (e820.map[i].addr >= size) {
738                         /*
739                          * This region starts past the end of the
740                          * requested size, skip it completely.
741                          */
742                         e820.nr_map = i;
743                 } else {
744                         e820.nr_map = i + 1;
745                         e820.map[i].size -= current_addr - size;
746                 }
747                 print_memory_map("limit_regions endfor");
748                 return;
749         }
750         print_memory_map("limit_regions endfunc");
751 }
752
753 /*
754  * This function checks if any part of the range <start,end> is mapped
755  * with type.
756  */
757 int
758 e820_any_mapped(u64 start, u64 end, unsigned type)
759 {
760         int i;
761         for (i = 0; i < e820.nr_map; i++) {
762                 const struct e820entry *ei = &e820.map[i];
763                 if (type && ei->type != type)
764                         continue;
765                 if (ei->addr >= end || ei->addr + ei->size <= start)
766                         continue;
767                 return 1;
768         }
769         return 0;
770 }
771 EXPORT_SYMBOL_GPL(e820_any_mapped);
772
773  /*
774   * This function checks if the entire range <start,end> is mapped with type.
775   *
776   * Note: this function only works correct if the e820 table is sorted and
777   * not-overlapping, which is the case
778   */
779 int __init
780 e820_all_mapped(unsigned long s, unsigned long e, unsigned type)
781 {
782         u64 start = s;
783         u64 end = e;
784         int i;
785         for (i = 0; i < e820.nr_map; i++) {
786                 struct e820entry *ei = &e820.map[i];
787                 if (type && ei->type != type)
788                         continue;
789                 /* is the region (part) in overlap with the current region ?*/
790                 if (ei->addr >= end || ei->addr + ei->size <= start)
791                         continue;
792                 /* if the region is at the beginning of <start,end> we move
793                  * start to the end of the region since it's ok until there
794                  */
795                 if (ei->addr <= start)
796                         start = ei->addr + ei->size;
797                 /* if start is now at or beyond end, we're done, full
798                  * coverage */
799                 if (start >= end)
800                         return 1; /* we're done */
801         }
802         return 0;
803 }
804
805 static int __init parse_memmap(char *arg)
806 {
807         if (!arg)
808                 return -EINVAL;
809
810         if (strcmp(arg, "exactmap") == 0) {
811 #ifdef CONFIG_CRASH_DUMP
812                 /* If we are doing a crash dump, we
813                  * still need to know the real mem
814                  * size before original memory map is
815                  * reset.
816                  */
817                 find_max_pfn();
818                 saved_max_pfn = max_pfn;
819 #endif
820                 e820.nr_map = 0;
821                 user_defined_memmap = 1;
822         } else {
823                 /* If the user specifies memory size, we
824                  * limit the BIOS-provided memory map to
825                  * that size. exactmap can be used to specify
826                  * the exact map. mem=number can be used to
827                  * trim the existing memory map.
828                  */
829                 unsigned long long start_at, mem_size;
830
831                 mem_size = memparse(arg, &arg);
832                 if (*arg == '@') {
833                         start_at = memparse(arg+1, &arg);
834                         add_memory_region(start_at, mem_size, E820_RAM);
835                 } else if (*arg == '#') {
836                         start_at = memparse(arg+1, &arg);
837                         add_memory_region(start_at, mem_size, E820_ACPI);
838                 } else if (*arg == '$') {
839                         start_at = memparse(arg+1, &arg);
840                         add_memory_region(start_at, mem_size, E820_RESERVED);
841                 } else {
842                         limit_regions(mem_size);
843                         user_defined_memmap = 1;
844                 }
845         }
846         return 0;
847 }
848 early_param("memmap", parse_memmap);