1 /* Generic MTRR (Memory Type Range Register) driver.
3 Copyright (C) 1997-2000 Richard Gooch
4 Copyright (c) 2002 Patrick Mochel
6 This library is free software; you can redistribute it and/or
7 modify it under the terms of the GNU Library General Public
8 License as published by the Free Software Foundation; either
9 version 2 of the License, or (at your option) any later version.
11 This library is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 Library General Public License for more details.
16 You should have received a copy of the GNU Library General Public
17 License along with this library; if not, write to the Free
18 Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
20 Richard Gooch may be reached by email at rgooch@atnf.csiro.au
21 The postal address is:
22 Richard Gooch, c/o ATNF, P. O. Box 76, Epping, N.S.W., 2121, Australia.
24 Source: "Pentium Pro Family Developer's Manual, Volume 3:
25 Operating System Writer's Guide" (Intel document number 242692),
28 This was cleaned and made readable by Patrick Mochel <mochel@osdl.org>
30 Source: Intel Architecture Software Developers Manual, Volume 3:
31 System Programming Guide; Section 9.11. (1997 edition - PPro).
34 #include <linux/module.h>
35 #include <linux/init.h>
36 #include <linux/pci.h>
37 #include <linux/smp.h>
38 #include <linux/cpu.h>
39 #include <linux/mutex.h>
40 #include <linux/sort.h>
44 #include <asm/uaccess.h>
45 #include <asm/processor.h>
47 #include <asm/kvm_para.h>
50 u32 num_var_ranges = 0;
52 unsigned int mtrr_usage_table[MAX_VAR_RANGES];
53 static DEFINE_MUTEX(mtrr_mutex);
55 u64 size_or_mask, size_and_mask;
57 static struct mtrr_ops * mtrr_ops[X86_VENDOR_NUM] = {};
59 struct mtrr_ops * mtrr_if = NULL;
61 static void set_mtrr(unsigned int reg, unsigned long base,
62 unsigned long size, mtrr_type type);
64 void set_mtrr_ops(struct mtrr_ops * ops)
66 if (ops->vendor && ops->vendor < X86_VENDOR_NUM)
67 mtrr_ops[ops->vendor] = ops;
70 /* Returns non-zero if we have the write-combining memory type */
71 static int have_wrcomb(void)
76 if ((dev = pci_get_class(PCI_CLASS_BRIDGE_HOST << 8, NULL)) != NULL) {
77 /* ServerWorks LE chipsets < rev 6 have problems with write-combining
78 Don't allow it and leave room for other chipsets to be tagged */
79 if (dev->vendor == PCI_VENDOR_ID_SERVERWORKS &&
80 dev->device == PCI_DEVICE_ID_SERVERWORKS_LE) {
81 pci_read_config_byte(dev, PCI_CLASS_REVISION, &rev);
83 printk(KERN_INFO "mtrr: Serverworks LE rev < 6 detected. Write-combining disabled.\n");
88 /* Intel 450NX errata # 23. Non ascending cacheline evictions to
89 write combining memory may resulting in data corruption */
90 if (dev->vendor == PCI_VENDOR_ID_INTEL &&
91 dev->device == PCI_DEVICE_ID_INTEL_82451NX) {
92 printk(KERN_INFO "mtrr: Intel 450NX MMC detected. Write-combining disabled.\n");
98 return (mtrr_if->have_wrcomb ? mtrr_if->have_wrcomb() : 0);
101 /* This function returns the number of variable MTRRs */
102 static void __init set_num_var_ranges(void)
104 unsigned long config = 0, dummy;
107 rdmsr(MTRRcap_MSR, config, dummy);
108 } else if (is_cpu(AMD))
110 else if (is_cpu(CYRIX) || is_cpu(CENTAUR))
112 num_var_ranges = config & 0xff;
115 static void __init init_table(void)
119 max = num_var_ranges;
120 for (i = 0; i < max; i++)
121 mtrr_usage_table[i] = 1;
124 struct set_mtrr_data {
127 unsigned long smp_base;
128 unsigned long smp_size;
129 unsigned int smp_reg;
133 static void ipi_handler(void *info)
134 /* [SUMMARY] Synchronisation handler. Executed by "other" CPUs.
139 struct set_mtrr_data *data = info;
142 local_irq_save(flags);
144 atomic_dec(&data->count);
145 while(!atomic_read(&data->gate))
148 /* The master has cleared me to execute */
149 if (data->smp_reg != ~0U)
150 mtrr_if->set(data->smp_reg, data->smp_base,
151 data->smp_size, data->smp_type);
155 atomic_dec(&data->count);
156 while(atomic_read(&data->gate))
159 atomic_dec(&data->count);
160 local_irq_restore(flags);
164 static inline int types_compatible(mtrr_type type1, mtrr_type type2) {
165 return type1 == MTRR_TYPE_UNCACHABLE ||
166 type2 == MTRR_TYPE_UNCACHABLE ||
167 (type1 == MTRR_TYPE_WRTHROUGH && type2 == MTRR_TYPE_WRBACK) ||
168 (type1 == MTRR_TYPE_WRBACK && type2 == MTRR_TYPE_WRTHROUGH);
172 * set_mtrr - update mtrrs on all processors
173 * @reg: mtrr in question
178 * This is kinda tricky, but fortunately, Intel spelled it out for us cleanly:
180 * 1. Send IPI to do the following:
181 * 2. Disable Interrupts
182 * 3. Wait for all procs to do so
183 * 4. Enter no-fill cache mode
187 * 8. Disable all range registers
188 * 9. Update the MTRRs
189 * 10. Enable all range registers
190 * 11. Flush all TLBs and caches again
191 * 12. Enter normal cache mode and reenable caching
193 * 14. Wait for buddies to catch up
194 * 15. Enable interrupts.
196 * What does that mean for us? Well, first we set data.count to the number
197 * of CPUs. As each CPU disables interrupts, it'll decrement it once. We wait
198 * until it hits 0 and proceed. We set the data.gate flag and reset data.count.
199 * Meanwhile, they are waiting for that flag to be set. Once it's set, each
200 * CPU goes through the transition of updating MTRRs. The CPU vendors may each do it
201 * differently, so we call mtrr_if->set() callback and let them take care of it.
202 * When they're done, they again decrement data->count and wait for data.gate to
204 * When we finish, we wait for data.count to hit 0 and toggle the data.gate flag.
205 * Everyone then enables interrupts and we all continue on.
207 * Note that the mechanism is the same for UP systems, too; all the SMP stuff
210 static void set_mtrr(unsigned int reg, unsigned long base,
211 unsigned long size, mtrr_type type)
213 struct set_mtrr_data data;
217 data.smp_base = base;
218 data.smp_size = size;
219 data.smp_type = type;
220 atomic_set(&data.count, num_booting_cpus() - 1);
221 /* make sure data.count is visible before unleashing other CPUs */
223 atomic_set(&data.gate,0);
225 /* Start the ball rolling on other CPUs */
226 if (smp_call_function(ipi_handler, &data, 1, 0) != 0)
227 panic("mtrr: timed out waiting for other CPUs\n");
229 local_irq_save(flags);
231 while(atomic_read(&data.count))
234 /* ok, reset count and toggle gate */
235 atomic_set(&data.count, num_booting_cpus() - 1);
237 atomic_set(&data.gate,1);
239 /* do our MTRR business */
242 * We use this same function to initialize the mtrrs on boot.
243 * The state of the boot cpu's mtrrs has been saved, and we want
244 * to replicate across all the APs.
245 * If we're doing that @reg is set to something special...
248 mtrr_if->set(reg,base,size,type);
250 /* wait for the others */
251 while(atomic_read(&data.count))
254 atomic_set(&data.count, num_booting_cpus() - 1);
256 atomic_set(&data.gate,0);
259 * Wait here for everyone to have seen the gate change
260 * So we're the last ones to touch 'data'
262 while(atomic_read(&data.count))
265 local_irq_restore(flags);
269 * mtrr_add_page - Add a memory type region
270 * @base: Physical base address of region in pages (in units of 4 kB!)
271 * @size: Physical size of region in pages (4 kB)
272 * @type: Type of MTRR desired
273 * @increment: If this is true do usage counting on the region
275 * Memory type region registers control the caching on newer Intel and
276 * non Intel processors. This function allows drivers to request an
277 * MTRR is added. The details and hardware specifics of each processor's
278 * implementation are hidden from the caller, but nevertheless the
279 * caller should expect to need to provide a power of two size on an
280 * equivalent power of two boundary.
282 * If the region cannot be added either because all regions are in use
283 * or the CPU cannot support it a negative value is returned. On success
284 * the register number for this entry is returned, but should be treated
287 * On a multiprocessor machine the changes are made to all processors.
288 * This is required on x86 by the Intel processors.
290 * The available types are
292 * %MTRR_TYPE_UNCACHABLE - No caching
294 * %MTRR_TYPE_WRBACK - Write data back in bursts whenever
296 * %MTRR_TYPE_WRCOMB - Write data back soon but allow bursts
298 * %MTRR_TYPE_WRTHROUGH - Cache reads but not writes
300 * BUGS: Needs a quiet flag for the cases where drivers do not mind
301 * failures and do not wish system log messages to be sent.
304 int mtrr_add_page(unsigned long base, unsigned long size,
305 unsigned int type, bool increment)
307 int i, replace, error;
309 unsigned long lbase, lsize;
314 if ((error = mtrr_if->validate_add_page(base,size,type)))
317 if (type >= MTRR_NUM_TYPES) {
318 printk(KERN_WARNING "mtrr: type: %u invalid\n", type);
322 /* If the type is WC, check that this processor supports it */
323 if ((type == MTRR_TYPE_WRCOMB) && !have_wrcomb()) {
325 "mtrr: your processor doesn't support write-combining\n");
330 printk(KERN_WARNING "mtrr: zero sized request\n");
334 if (base & size_or_mask || size & size_or_mask) {
335 printk(KERN_WARNING "mtrr: base or size exceeds the MTRR width\n");
342 /* No CPU hotplug when we change MTRR entries */
344 /* Search for existing MTRR */
345 mutex_lock(&mtrr_mutex);
346 for (i = 0; i < num_var_ranges; ++i) {
347 mtrr_if->get(i, &lbase, &lsize, <ype);
348 if (!lsize || base > lbase + lsize - 1 || base + size - 1 < lbase)
350 /* At this point we know there is some kind of overlap/enclosure */
351 if (base < lbase || base + size - 1 > lbase + lsize - 1) {
352 if (base <= lbase && base + size - 1 >= lbase + lsize - 1) {
353 /* New region encloses an existing region */
355 replace = replace == -1 ? i : -2;
358 else if (types_compatible(type, ltype))
362 "mtrr: 0x%lx000,0x%lx000 overlaps existing"
363 " 0x%lx000,0x%lx000\n", base, size, lbase,
367 /* New region is enclosed by an existing region */
369 if (types_compatible(type, ltype))
371 printk (KERN_WARNING "mtrr: type mismatch for %lx000,%lx000 old: %s new: %s\n",
372 base, size, mtrr_attrib_to_str(ltype),
373 mtrr_attrib_to_str(type));
377 ++mtrr_usage_table[i];
381 /* Search for an empty MTRR */
382 i = mtrr_if->get_free_region(base, size, replace);
384 set_mtrr(i, base, size, type);
385 if (likely(replace < 0)) {
386 mtrr_usage_table[i] = 1;
388 mtrr_usage_table[i] = mtrr_usage_table[replace];
390 mtrr_usage_table[i]++;
391 if (unlikely(replace != i)) {
392 set_mtrr(replace, 0, 0, 0);
393 mtrr_usage_table[replace] = 0;
397 printk(KERN_INFO "mtrr: no more MTRRs available\n");
400 mutex_unlock(&mtrr_mutex);
405 static int mtrr_check(unsigned long base, unsigned long size)
407 if ((base & (PAGE_SIZE - 1)) || (size & (PAGE_SIZE - 1))) {
409 "mtrr: size and base must be multiples of 4 kiB\n");
411 "mtrr: size: 0x%lx base: 0x%lx\n", size, base);
419 * mtrr_add - Add a memory type region
420 * @base: Physical base address of region
421 * @size: Physical size of region
422 * @type: Type of MTRR desired
423 * @increment: If this is true do usage counting on the region
425 * Memory type region registers control the caching on newer Intel and
426 * non Intel processors. This function allows drivers to request an
427 * MTRR is added. The details and hardware specifics of each processor's
428 * implementation are hidden from the caller, but nevertheless the
429 * caller should expect to need to provide a power of two size on an
430 * equivalent power of two boundary.
432 * If the region cannot be added either because all regions are in use
433 * or the CPU cannot support it a negative value is returned. On success
434 * the register number for this entry is returned, but should be treated
437 * On a multiprocessor machine the changes are made to all processors.
438 * This is required on x86 by the Intel processors.
440 * The available types are
442 * %MTRR_TYPE_UNCACHABLE - No caching
444 * %MTRR_TYPE_WRBACK - Write data back in bursts whenever
446 * %MTRR_TYPE_WRCOMB - Write data back soon but allow bursts
448 * %MTRR_TYPE_WRTHROUGH - Cache reads but not writes
450 * BUGS: Needs a quiet flag for the cases where drivers do not mind
451 * failures and do not wish system log messages to be sent.
455 mtrr_add(unsigned long base, unsigned long size, unsigned int type,
458 if (mtrr_check(base, size))
460 return mtrr_add_page(base >> PAGE_SHIFT, size >> PAGE_SHIFT, type,
465 * mtrr_del_page - delete a memory type region
466 * @reg: Register returned by mtrr_add
467 * @base: Physical base address
468 * @size: Size of region
470 * If register is supplied then base and size are ignored. This is
471 * how drivers should call it.
473 * Releases an MTRR region. If the usage count drops to zero the
474 * register is freed and the region returns to default state.
475 * On success the register is returned, on failure a negative error
479 int mtrr_del_page(int reg, unsigned long base, unsigned long size)
483 unsigned long lbase, lsize;
489 max = num_var_ranges;
490 /* No CPU hotplug when we change MTRR entries */
492 mutex_lock(&mtrr_mutex);
494 /* Search for existing MTRR */
495 for (i = 0; i < max; ++i) {
496 mtrr_if->get(i, &lbase, &lsize, <ype);
497 if (lbase == base && lsize == size) {
503 printk(KERN_DEBUG "mtrr: no MTRR for %lx000,%lx000 found\n", base,
509 printk(KERN_WARNING "mtrr: register: %d too big\n", reg);
512 mtrr_if->get(reg, &lbase, &lsize, <ype);
514 printk(KERN_WARNING "mtrr: MTRR %d not used\n", reg);
517 if (mtrr_usage_table[reg] < 1) {
518 printk(KERN_WARNING "mtrr: reg: %d has count=0\n", reg);
521 if (--mtrr_usage_table[reg] < 1)
522 set_mtrr(reg, 0, 0, 0);
525 mutex_unlock(&mtrr_mutex);
530 * mtrr_del - delete a memory type region
531 * @reg: Register returned by mtrr_add
532 * @base: Physical base address
533 * @size: Size of region
535 * If register is supplied then base and size are ignored. This is
536 * how drivers should call it.
538 * Releases an MTRR region. If the usage count drops to zero the
539 * register is freed and the region returns to default state.
540 * On success the register is returned, on failure a negative error
545 mtrr_del(int reg, unsigned long base, unsigned long size)
547 if (mtrr_check(base, size))
549 return mtrr_del_page(reg, base >> PAGE_SHIFT, size >> PAGE_SHIFT);
552 EXPORT_SYMBOL(mtrr_add);
553 EXPORT_SYMBOL(mtrr_del);
556 * These should be called implicitly, but we can't yet until all the initcall
559 static void __init init_ifs(void)
561 #ifndef CONFIG_X86_64
568 /* The suspend/resume methods are only for CPU without MTRR. CPU using generic
569 * MTRR driver doesn't require this
577 static struct mtrr_value mtrr_state[MAX_VAR_RANGES];
579 static int mtrr_save(struct sys_device * sysdev, pm_message_t state)
583 for (i = 0; i < num_var_ranges; i++) {
585 &mtrr_state[i].lbase,
586 &mtrr_state[i].lsize,
587 &mtrr_state[i].ltype);
592 static int mtrr_restore(struct sys_device * sysdev)
596 for (i = 0; i < num_var_ranges; i++) {
597 if (mtrr_state[i].lsize)
601 mtrr_state[i].ltype);
608 static struct sysdev_driver mtrr_sysdev_driver = {
609 .suspend = mtrr_save,
610 .resume = mtrr_restore,
613 /* should be related to MTRR_VAR_RANGES nums */
614 #define RANGE_NUM 256
622 add_range(struct res_range *range, int nr_range, unsigned long start,
626 if (nr_range >= RANGE_NUM)
629 range[nr_range].start = start;
630 range[nr_range].end = end;
638 add_range_with_merge(struct res_range *range, int nr_range, unsigned long start,
643 /* try to merge it with old one */
644 for (i = 0; i < nr_range; i++) {
645 unsigned long final_start, final_end;
646 unsigned long common_start, common_end;
651 common_start = max(range[i].start, start);
652 common_end = min(range[i].end, end);
653 if (common_start > common_end + 1)
656 final_start = min(range[i].start, start);
657 final_end = max(range[i].end, end);
659 range[i].start = final_start;
660 range[i].end = final_end;
664 /* need to add that */
665 return add_range(range, nr_range, start, end);
669 subtract_range(struct res_range *range, unsigned long start, unsigned long end)
673 for (j = 0; j < RANGE_NUM; j++) {
677 if (start <= range[j].start && end >= range[j].end) {
683 if (start <= range[j].start && end < range[j].end &&
684 range[j].start < end + 1) {
685 range[j].start = end + 1;
690 if (start > range[j].start && end >= range[j].end &&
691 range[j].end > start - 1) {
692 range[j].end = start - 1;
696 if (start > range[j].start && end < range[j].end) {
697 /* find the new spare */
698 for (i = 0; i < RANGE_NUM; i++) {
699 if (range[i].end == 0)
703 range[i].end = range[j].end;
704 range[i].start = end + 1;
706 printk(KERN_ERR "run of slot in ranges\n");
708 range[j].end = start - 1;
714 static int __init cmp_range(const void *x1, const void *x2)
716 const struct res_range *r1 = x1;
717 const struct res_range *r2 = x2;
723 return start1 - start2;
726 struct var_mtrr_range_state {
727 unsigned long base_pfn;
728 unsigned long size_pfn;
732 struct var_mtrr_range_state __initdata range_state[RANGE_NUM];
735 x86_get_mtrr_mem_range(struct res_range *range, int nr_range,
736 unsigned long extra_remove_base,
737 unsigned long extra_remove_size)
739 unsigned long i, base, size;
742 for (i = 0; i < num_var_ranges; i++) {
743 type = range_state[i].type;
744 if (type != MTRR_TYPE_WRBACK)
746 base = range_state[i].base_pfn;
747 size = range_state[i].size_pfn;
748 nr_range = add_range_with_merge(range, nr_range, base,
751 printk(KERN_DEBUG "After WB checking\n");
752 for (i = 0; i < nr_range; i++)
753 printk(KERN_DEBUG "MTRR MAP PFN: %016lx - %016lx\n",
754 range[i].start, range[i].end + 1);
756 /* take out UC ranges */
757 for (i = 0; i < num_var_ranges; i++) {
758 type = range_state[i].type;
759 if (type != MTRR_TYPE_UNCACHABLE)
761 size = range_state[i].size_pfn;
764 base = range_state[i].base_pfn;
765 subtract_range(range, base, base + size - 1);
767 if (extra_remove_size)
768 subtract_range(range, extra_remove_base,
769 extra_remove_base + extra_remove_size - 1);
771 /* get new range num */
773 for (i = 0; i < RANGE_NUM; i++) {
778 printk(KERN_DEBUG "After UC checking\n");
779 for (i = 0; i < nr_range; i++)
780 printk(KERN_DEBUG "MTRR MAP PFN: %016lx - %016lx\n",
781 range[i].start, range[i].end + 1);
783 /* sort the ranges */
784 sort(range, nr_range, sizeof(struct res_range), cmp_range, NULL);
785 printk(KERN_DEBUG "After sorting\n");
786 for (i = 0; i < nr_range; i++)
787 printk(KERN_DEBUG "MTRR MAP PFN: %016lx - %016lx\n",
788 range[i].start, range[i].end + 1);
790 /* clear those is not used */
791 for (i = nr_range; i < RANGE_NUM; i++)
792 memset(&range[i], 0, sizeof(range[i]));
797 static struct res_range __initdata range[RANGE_NUM];
799 #ifdef CONFIG_MTRR_SANITIZER
801 static unsigned long __init sum_ranges(struct res_range *range, int nr_range)
807 for (i = 0; i < nr_range; i++)
808 sum += range[i].end + 1 - range[i].start;
813 static int enable_mtrr_cleanup __initdata =
814 CONFIG_MTRR_SANITIZER_ENABLE_DEFAULT;
816 static int __init disable_mtrr_cleanup_setup(char *str)
818 if (enable_mtrr_cleanup != -1)
819 enable_mtrr_cleanup = 0;
822 early_param("disable_mtrr_cleanup", disable_mtrr_cleanup_setup);
824 static int __init enable_mtrr_cleanup_setup(char *str)
826 if (enable_mtrr_cleanup != -1)
827 enable_mtrr_cleanup = 1;
830 early_param("enble_mtrr_cleanup", enable_mtrr_cleanup_setup);
832 struct var_mtrr_state {
833 unsigned long range_startk;
834 unsigned long range_sizek;
835 unsigned long chunk_sizek;
836 unsigned long gran_sizek;
841 set_var_mtrr(unsigned int reg, unsigned long basek, unsigned long sizek,
842 unsigned char type, unsigned int address_bits)
844 u32 base_lo, base_hi, mask_lo, mask_hi;
848 fill_mtrr_var_range(reg, 0, 0, 0, 0);
852 mask = (1ULL << address_bits) - 1;
853 mask &= ~((((u64)sizek) << 10) - 1);
855 base = ((u64)basek) << 10;
860 base_lo = base & ((1ULL<<32) - 1);
861 base_hi = base >> 32;
863 mask_lo = mask & ((1ULL<<32) - 1);
864 mask_hi = mask >> 32;
866 fill_mtrr_var_range(reg, base_lo, base_hi, mask_lo, mask_hi);
870 save_var_mtrr(unsigned int reg, unsigned long basek, unsigned long sizek,
873 range_state[reg].base_pfn = basek >> (PAGE_SHIFT - 10);
874 range_state[reg].size_pfn = sizek >> (PAGE_SHIFT - 10);
875 range_state[reg].type = type;
879 set_var_mtrr_all(unsigned int address_bits)
881 unsigned long basek, sizek;
885 for (reg = 0; reg < num_var_ranges; reg++) {
886 basek = range_state[reg].base_pfn << (PAGE_SHIFT - 10);
887 sizek = range_state[reg].size_pfn << (PAGE_SHIFT - 10);
888 type = range_state[reg].type;
890 set_var_mtrr(reg, basek, sizek, type, address_bits);
894 static unsigned int __init
895 range_to_mtrr(unsigned int reg, unsigned long range_startk,
896 unsigned long range_sizek, unsigned char type)
898 if (!range_sizek || (reg >= num_var_ranges))
901 while (range_sizek) {
902 unsigned long max_align, align;
905 /* Compute the maximum size I can make a range */
907 max_align = ffs(range_startk) - 1;
910 align = fls(range_sizek) - 1;
911 if (align > max_align)
915 printk(KERN_DEBUG "Setting variable MTRR %d, base: %ldMB, "
916 "range: %ldMB, type %s\n",
917 reg, range_startk >> 10, sizek >> 10,
918 (type == MTRR_TYPE_UNCACHABLE)?"UC":
919 ((type == MTRR_TYPE_WRBACK)?"WB":"Other")
921 save_var_mtrr(reg++, range_startk, sizek, type);
922 range_startk += sizek;
923 range_sizek -= sizek;
924 if (reg >= num_var_ranges)
930 static unsigned __init
931 range_to_mtrr_with_hole(struct var_mtrr_state *state, unsigned long basek,
934 unsigned long hole_basek, hole_sizek;
935 unsigned long second_basek, second_sizek;
936 unsigned long range0_basek, range0_sizek;
937 unsigned long range_basek, range_sizek;
938 unsigned long chunk_sizek;
939 unsigned long gran_sizek;
945 chunk_sizek = state->chunk_sizek;
946 gran_sizek = state->gran_sizek;
948 /* align with gran size, prevent small block used up MTRRs */
949 range_basek = ALIGN(state->range_startk, gran_sizek);
950 if ((range_basek > basek) && basek)
952 state->range_sizek -= (range_basek - state->range_startk);
953 range_sizek = ALIGN(state->range_sizek, gran_sizek);
955 while (range_sizek > state->range_sizek) {
956 range_sizek -= gran_sizek;
960 state->range_sizek = range_sizek;
962 /* try to append some small hole */
963 range0_basek = state->range_startk;
964 range0_sizek = ALIGN(state->range_sizek, chunk_sizek);
965 if (range0_sizek == state->range_sizek) {
966 printk(KERN_DEBUG "rangeX: %016lx - %016lx\n", range0_basek<<10,
967 (range0_basek + state->range_sizek)<<10);
968 state->reg = range_to_mtrr(state->reg, range0_basek,
969 state->range_sizek, MTRR_TYPE_WRBACK);
973 range0_sizek -= chunk_sizek;
974 if (range0_sizek && sizek) {
975 while (range0_basek + range0_sizek > (basek + sizek)) {
976 range0_sizek -= chunk_sizek;
983 printk(KERN_DEBUG "range0: %016lx - %016lx\n", range0_basek<<10,
984 (range0_basek + range0_sizek)<<10);
985 state->reg = range_to_mtrr(state->reg, range0_basek,
986 range0_sizek, MTRR_TYPE_WRBACK);
990 range_basek = range0_basek + range0_sizek;
991 range_sizek = chunk_sizek;
993 if (range_basek + range_sizek > basek &&
994 range_basek + range_sizek <= (basek + sizek)) {
996 second_basek = basek;
997 second_sizek = range_basek + range_sizek - basek;
1000 /* if last piece, only could one hole near end */
1001 if ((second_basek || !basek) &&
1002 range_sizek - (state->range_sizek - range0_sizek) - second_sizek <
1003 (chunk_sizek >> 1)) {
1005 * one hole in middle (second_sizek is 0) or at end
1006 * (second_sizek is 0 )
1008 hole_sizek = range_sizek - (state->range_sizek - range0_sizek)
1010 hole_basek = range_basek + range_sizek - hole_sizek
1013 /* fallback for big hole, or several holes */
1014 range_sizek = state->range_sizek - range0_sizek;
1019 printk(KERN_DEBUG "range: %016lx - %016lx\n", range_basek<<10,
1020 (range_basek + range_sizek)<<10);
1021 state->reg = range_to_mtrr(state->reg, range_basek, range_sizek,
1024 printk(KERN_DEBUG "hole: %016lx - %016lx\n", hole_basek<<10,
1025 (hole_basek + hole_sizek)<<10);
1026 state->reg = range_to_mtrr(state->reg, hole_basek, hole_sizek,
1027 MTRR_TYPE_UNCACHABLE);
1031 return second_sizek;
1035 set_var_mtrr_range(struct var_mtrr_state *state, unsigned long base_pfn,
1036 unsigned long size_pfn)
1038 unsigned long basek, sizek;
1039 unsigned long second_sizek = 0;
1041 if (state->reg >= num_var_ranges)
1044 basek = base_pfn << (PAGE_SHIFT - 10);
1045 sizek = size_pfn << (PAGE_SHIFT - 10);
1047 /* See if I can merge with the last range */
1048 if ((basek <= 1024) ||
1049 (state->range_startk + state->range_sizek == basek)) {
1050 unsigned long endk = basek + sizek;
1051 state->range_sizek = endk - state->range_startk;
1054 /* Write the range mtrrs */
1055 if (state->range_sizek != 0)
1056 second_sizek = range_to_mtrr_with_hole(state, basek, sizek);
1058 /* Allocate an msr */
1059 state->range_startk = basek + second_sizek;
1060 state->range_sizek = sizek - second_sizek;
1063 /* mininum size of mtrr block that can take hole */
1064 static u64 mtrr_chunk_size __initdata = (256ULL<<20);
1066 static int __init parse_mtrr_chunk_size_opt(char *p)
1070 mtrr_chunk_size = memparse(p, &p);
1073 early_param("mtrr_chunk_size", parse_mtrr_chunk_size_opt);
1075 /* granity of mtrr of block */
1076 static u64 mtrr_gran_size __initdata;
1078 static int __init parse_mtrr_gran_size_opt(char *p)
1082 mtrr_gran_size = memparse(p, &p);
1085 early_param("mtrr_gran_size", parse_mtrr_gran_size_opt);
1087 static int nr_mtrr_spare_reg __initdata =
1088 CONFIG_MTRR_SANITIZER_SPARE_REG_NR_DEFAULT;
1090 static int __init parse_mtrr_spare_reg(char *arg)
1093 nr_mtrr_spare_reg = simple_strtoul(arg, NULL, 0);
1097 early_param("mtrr_spare_reg_nr", parse_mtrr_spare_reg);
1100 x86_setup_var_mtrrs(struct res_range *range, int nr_range,
1101 u64 chunk_size, u64 gran_size)
1103 struct var_mtrr_state var_state;
1107 var_state.range_startk = 0;
1108 var_state.range_sizek = 0;
1110 var_state.chunk_sizek = chunk_size >> 10;
1111 var_state.gran_sizek = gran_size >> 10;
1113 memset(range_state, 0, sizeof(range_state));
1115 /* Write the range etc */
1116 for (i = 0; i < nr_range; i++)
1117 set_var_mtrr_range(&var_state, range[i].start,
1118 range[i].end - range[i].start + 1);
1120 /* Write the last range */
1121 if (var_state.range_sizek != 0)
1122 range_to_mtrr_with_hole(&var_state, 0, 0);
1123 printk(KERN_DEBUG "DONE variable MTRRs\n");
1125 num_reg = var_state.reg;
1126 /* Clear out the extra MTRR's */
1127 while (var_state.reg < num_var_ranges) {
1128 save_var_mtrr(var_state.reg, 0, 0, 0);
1135 struct mtrr_cleanup_result {
1136 unsigned long gran_sizek;
1137 unsigned long chunk_sizek;
1138 unsigned long lose_cover_sizek;
1139 unsigned int num_reg;
1144 * gran_size: 1M, 2M, ..., 2G
1145 * chunk size: gran_size, ..., 4G
1146 * so we need (2+13)*6
1148 #define NUM_RESULT 90
1149 #define PSHIFT (PAGE_SHIFT - 10)
1151 static struct mtrr_cleanup_result __initdata result[NUM_RESULT];
1152 static struct res_range __initdata range_new[RANGE_NUM];
1153 static unsigned long __initdata min_loss_pfn[RANGE_NUM];
1155 static int __init mtrr_cleanup(unsigned address_bits)
1157 unsigned long extra_remove_base, extra_remove_size;
1158 unsigned long i, base, size, def, dummy;
1160 int nr_range, nr_range_new;
1161 u64 chunk_size, gran_size;
1162 unsigned long range_sums, range_sums_new;
1166 /* extra one for all 0 */
1167 int num[MTRR_NUM_TYPES + 1];
1169 if (!is_cpu(INTEL) || enable_mtrr_cleanup < 1)
1171 rdmsr(MTRRdefType_MSR, def, dummy);
1173 if (def != MTRR_TYPE_UNCACHABLE)
1176 /* get it and store it aside */
1177 memset(range_state, 0, sizeof(range_state));
1178 for (i = 0; i < num_var_ranges; i++) {
1179 mtrr_if->get(i, &base, &size, &type);
1180 range_state[i].base_pfn = base;
1181 range_state[i].size_pfn = size;
1182 range_state[i].type = type;
1185 /* check entries number */
1186 memset(num, 0, sizeof(num));
1187 for (i = 0; i < num_var_ranges; i++) {
1188 type = range_state[i].type;
1189 size = range_state[i].size_pfn;
1190 if (type >= MTRR_NUM_TYPES)
1193 type = MTRR_NUM_TYPES;
1197 /* check if we got UC entries */
1198 if (!num[MTRR_TYPE_UNCACHABLE])
1201 /* check if we only had WB and UC */
1202 if (num[MTRR_TYPE_WRBACK] + num[MTRR_TYPE_UNCACHABLE] !=
1203 num_var_ranges - num[MTRR_NUM_TYPES])
1206 memset(range, 0, sizeof(range));
1207 extra_remove_size = 0;
1209 extra_remove_base = 1 << (32 - PAGE_SHIFT);
1211 (mtrr_tom2 >> PAGE_SHIFT) - extra_remove_base;
1213 nr_range = x86_get_mtrr_mem_range(range, 0, extra_remove_base,
1215 range_sums = sum_ranges(range, nr_range);
1216 printk(KERN_INFO "total RAM coverred: %ldM\n",
1217 range_sums >> (20 - PAGE_SHIFT));
1219 if (mtrr_chunk_size && mtrr_gran_size) {
1222 /* convert ranges to var ranges state */
1223 num_reg = x86_setup_var_mtrrs(range, nr_range, mtrr_chunk_size,
1226 /* we got new setting in range_state, check it */
1227 memset(range_new, 0, sizeof(range_new));
1228 nr_range_new = x86_get_mtrr_mem_range(range_new, 0,
1231 range_sums_new = sum_ranges(range_new, nr_range_new);
1234 result[i].chunk_sizek = mtrr_chunk_size >> 10;
1235 result[i].gran_sizek = mtrr_gran_size >> 10;
1236 result[i].num_reg = num_reg;
1237 if (range_sums < range_sums_new) {
1238 result[i].lose_cover_sizek =
1239 (range_sums_new - range_sums) << PSHIFT;
1242 result[i].lose_cover_sizek =
1243 (range_sums - range_sums_new) << PSHIFT;
1245 printk(KERN_INFO " %sgran_size: %ldM \tchunk_size: %ldM \t",
1246 result[i].bad?" BAD ":"", result[i].gran_sizek >> 10,
1247 result[i].chunk_sizek >> 10);
1248 printk(KERN_CONT "num_reg: %d \tlose cover RAM: %s%ldM \n",
1249 result[i].num_reg, result[i].bad?"-":"",
1250 result[i].lose_cover_sizek >> 10);
1251 if (!result[i].bad) {
1252 set_var_mtrr_all(address_bits);
1255 printk(KERN_INFO "invalid mtrr_gran_size or mtrr_chunk_size, "
1256 "will find optimal one\n");
1257 memset(result, 0, sizeof(result[0]));
1261 memset(min_loss_pfn, 0xff, sizeof(min_loss_pfn));
1262 memset(result, 0, sizeof(result));
1263 for (gran_size = (1ULL<<20); gran_size < (1ULL<<32); gran_size <<= 1) {
1264 for (chunk_size = gran_size; chunk_size < (1ULL<<33);
1269 "\ngran_size: %lldM chunk_size_size: %lldM\n",
1270 gran_size >> 20, chunk_size >> 20);
1271 if (i >= NUM_RESULT)
1274 /* convert ranges to var ranges state */
1275 num_reg = x86_setup_var_mtrrs(range, nr_range,
1276 chunk_size, gran_size);
1278 /* we got new setting in range_state, check it */
1279 memset(range_new, 0, sizeof(range_new));
1280 nr_range_new = x86_get_mtrr_mem_range(range_new, 0,
1281 extra_remove_base, extra_remove_size);
1282 range_sums_new = sum_ranges(range_new, nr_range_new);
1284 result[i].chunk_sizek = chunk_size >> 10;
1285 result[i].gran_sizek = gran_size >> 10;
1286 result[i].num_reg = num_reg;
1287 if (range_sums < range_sums_new) {
1288 result[i].lose_cover_sizek =
1289 (range_sums_new - range_sums) << PSHIFT;
1292 result[i].lose_cover_sizek =
1293 (range_sums - range_sums_new) << PSHIFT;
1295 /* double check it */
1296 if (!result[i].bad && !result[i].lose_cover_sizek) {
1297 if (nr_range_new != nr_range ||
1298 memcmp(range, range_new, sizeof(range)))
1302 if (!result[i].bad && (range_sums - range_sums_new <
1303 min_loss_pfn[num_reg])) {
1304 min_loss_pfn[num_reg] =
1305 range_sums - range_sums_new;
1312 for (i = 0; i < NUM_RESULT; i++) {
1313 printk(KERN_INFO "%sgran_size: %ldM \tchunk_size: %ldM \t",
1314 result[i].bad?"*BAD* ":" ", result[i].gran_sizek >> 10,
1315 result[i].chunk_sizek >> 10);
1316 printk(KERN_CONT "num_reg: %d \tlose cover RAM: %s%ldM \n",
1317 result[i].num_reg, result[i].bad?"-":"",
1318 result[i].lose_cover_sizek >> 10);
1321 /* try to find the optimal index */
1322 if (nr_mtrr_spare_reg >= num_var_ranges)
1323 nr_mtrr_spare_reg = num_var_ranges - 1;
1325 for (i = 1; i < num_var_ranges + 1 - nr_mtrr_spare_reg; i++) {
1326 if (!min_loss_pfn[i]) {
1333 if (num_reg_good != -1) {
1334 for (i = 0; i < NUM_RESULT; i++) {
1335 if (!result[i].bad &&
1336 result[i].num_reg == num_reg_good &&
1337 !result[i].lose_cover_sizek) {
1344 if (index_good != -1) {
1345 printk(KERN_INFO "Found optimal setting for mtrr clean up\n");
1347 printk(KERN_INFO "gran_size: %ldM \tchunk_size: %ldM \t",
1348 result[i].gran_sizek >> 10,
1349 result[i].chunk_sizek >> 10);
1350 printk(KERN_CONT "num_reg: %d \tlose cover RAM: %ldM \n",
1352 result[i].lose_cover_sizek >> 10);
1353 /* convert ranges to var ranges state */
1354 chunk_size = result[i].chunk_sizek;
1356 gran_size = result[i].gran_sizek;
1358 x86_setup_var_mtrrs(range, nr_range, chunk_size, gran_size);
1359 set_var_mtrr_all(address_bits);
1363 printk(KERN_INFO "mtrr_cleanup: can not find optimal value\n");
1364 printk(KERN_INFO "please specify mtrr_gran_size/mtrr_chunk_size\n");
1369 static int __init mtrr_cleanup(unsigned address_bits)
1375 static int __initdata changed_by_mtrr_cleanup;
1377 static int disable_mtrr_trim;
1379 static int __init disable_mtrr_trim_setup(char *str)
1381 disable_mtrr_trim = 1;
1384 early_param("disable_mtrr_trim", disable_mtrr_trim_setup);
1387 * Newer AMD K8s and later CPUs have a special magic MSR way to force WB
1388 * for memory >4GB. Check for that here.
1389 * Note this won't check if the MTRRs < 4GB where the magic bit doesn't
1390 * apply to are wrong, but so far we don't know of any such case in the wild.
1392 #define Tom2Enabled (1U << 21)
1393 #define Tom2ForceMemTypeWB (1U << 22)
1395 int __init amd_special_default_mtrr(void)
1399 if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD)
1401 if (boot_cpu_data.x86 < 0xf || boot_cpu_data.x86 > 0x11)
1403 /* In case some hypervisor doesn't pass SYSCFG through */
1404 if (rdmsr_safe(MSR_K8_SYSCFG, &l, &h) < 0)
1407 * Memory between 4GB and top of mem is forced WB by this magic bit.
1408 * Reserved before K8RevF, but should be zero there.
1410 if ((l & (Tom2Enabled | Tom2ForceMemTypeWB)) ==
1411 (Tom2Enabled | Tom2ForceMemTypeWB))
1416 static u64 __init real_trim_memory(unsigned long start_pfn,
1417 unsigned long limit_pfn)
1419 u64 trim_start, trim_size;
1420 trim_start = start_pfn;
1421 trim_start <<= PAGE_SHIFT;
1422 trim_size = limit_pfn;
1423 trim_size <<= PAGE_SHIFT;
1424 trim_size -= trim_start;
1426 return update_memory_range(trim_start, trim_size, E820_RAM,
1430 * mtrr_trim_uncached_memory - trim RAM not covered by MTRRs
1431 * @end_pfn: ending page frame number
1433 * Some buggy BIOSes don't setup the MTRRs properly for systems with certain
1434 * memory configurations. This routine checks that the highest MTRR matches
1435 * the end of memory, to make sure the MTRRs having a write back type cover
1436 * all of the memory the kernel is intending to use. If not, it'll trim any
1437 * memory off the end by adjusting end_pfn, removing it from the kernel's
1438 * allocation pools, warning the user with an obnoxious message.
1440 int __init mtrr_trim_uncached_memory(unsigned long end_pfn)
1442 unsigned long i, base, size, highest_pfn = 0, def, dummy;
1445 u64 total_trim_size;
1447 /* extra one for all 0 */
1448 int num[MTRR_NUM_TYPES + 1];
1450 * Make sure we only trim uncachable memory on machines that
1451 * support the Intel MTRR architecture:
1453 if (!is_cpu(INTEL) || disable_mtrr_trim)
1455 rdmsr(MTRRdefType_MSR, def, dummy);
1457 if (def != MTRR_TYPE_UNCACHABLE)
1460 /* get it and store it aside */
1461 memset(range_state, 0, sizeof(range_state));
1462 for (i = 0; i < num_var_ranges; i++) {
1463 mtrr_if->get(i, &base, &size, &type);
1464 range_state[i].base_pfn = base;
1465 range_state[i].size_pfn = size;
1466 range_state[i].type = type;
1469 /* Find highest cached pfn */
1470 for (i = 0; i < num_var_ranges; i++) {
1471 type = range_state[i].type;
1472 if (type != MTRR_TYPE_WRBACK)
1474 base = range_state[i].base_pfn;
1475 size = range_state[i].size_pfn;
1476 if (highest_pfn < base + size)
1477 highest_pfn = base + size;
1480 /* kvm/qemu doesn't have mtrr set right, don't trim them all */
1482 if (!kvm_para_available()) {
1484 "WARNING: strange, CPU MTRRs all blank?\n");
1490 /* check entries number */
1491 memset(num, 0, sizeof(num));
1492 for (i = 0; i < num_var_ranges; i++) {
1493 type = range_state[i].type;
1494 if (type >= MTRR_NUM_TYPES)
1496 size = range_state[i].size_pfn;
1498 type = MTRR_NUM_TYPES;
1502 /* no entry for WB? */
1503 if (!num[MTRR_TYPE_WRBACK])
1506 /* check if we only had WB and UC */
1507 if (num[MTRR_TYPE_WRBACK] + num[MTRR_TYPE_UNCACHABLE] !=
1508 num_var_ranges - num[MTRR_NUM_TYPES])
1511 memset(range, 0, sizeof(range));
1514 range[nr_range].start = (1ULL<<(32 - PAGE_SHIFT));
1515 range[nr_range].end = (mtrr_tom2 >> PAGE_SHIFT) - 1;
1516 if (highest_pfn < range[nr_range].end + 1)
1517 highest_pfn = range[nr_range].end + 1;
1520 nr_range = x86_get_mtrr_mem_range(range, nr_range, 0, 0);
1522 total_trim_size = 0;
1523 /* check the head */
1525 total_trim_size += real_trim_memory(0, range[0].start);
1526 /* check the holes */
1527 for (i = 0; i < nr_range - 1; i++) {
1528 if (range[i].end + 1 < range[i+1].start)
1529 total_trim_size += real_trim_memory(range[i].end + 1,
1534 if (range[i].end + 1 < end_pfn)
1535 total_trim_size += real_trim_memory(range[i].end + 1,
1538 if (total_trim_size) {
1539 printk(KERN_WARNING "WARNING: BIOS bug: CPU MTRRs don't cover"
1540 " all of memory, losing %lluMB of RAM.\n",
1541 total_trim_size >> 20);
1543 if (!changed_by_mtrr_cleanup)
1546 printk(KERN_INFO "update e820 for mtrr\n");
1556 * mtrr_bp_init - initialize mtrrs on the boot CPU
1558 * This needs to be called early; before any of the other CPUs are
1559 * initialized (i.e. before smp_init()).
1562 void __init mtrr_bp_init(void)
1570 mtrr_if = &generic_mtrr_ops;
1571 size_or_mask = 0xff000000; /* 36 bits */
1572 size_and_mask = 0x00f00000;
1575 /* This is an AMD specific MSR, but we assume(hope?) that
1576 Intel will implement it to when they extend the address
1578 if (cpuid_eax(0x80000000) >= 0x80000008) {
1579 phys_addr = cpuid_eax(0x80000008) & 0xff;
1580 /* CPUID workaround for Intel 0F33/0F34 CPU */
1581 if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL &&
1582 boot_cpu_data.x86 == 0xF &&
1583 boot_cpu_data.x86_model == 0x3 &&
1584 (boot_cpu_data.x86_mask == 0x3 ||
1585 boot_cpu_data.x86_mask == 0x4))
1588 size_or_mask = ~((1ULL << (phys_addr - PAGE_SHIFT)) - 1);
1589 size_and_mask = ~size_or_mask & 0xfffff00000ULL;
1590 } else if (boot_cpu_data.x86_vendor == X86_VENDOR_CENTAUR &&
1591 boot_cpu_data.x86 == 6) {
1592 /* VIA C* family have Intel style MTRRs, but
1593 don't support PAE */
1594 size_or_mask = 0xfff00000; /* 32 bits */
1599 switch (boot_cpu_data.x86_vendor) {
1600 case X86_VENDOR_AMD:
1601 if (cpu_has_k6_mtrr) {
1602 /* Pre-Athlon (K6) AMD CPU MTRRs */
1603 mtrr_if = mtrr_ops[X86_VENDOR_AMD];
1604 size_or_mask = 0xfff00000; /* 32 bits */
1608 case X86_VENDOR_CENTAUR:
1609 if (cpu_has_centaur_mcr) {
1610 mtrr_if = mtrr_ops[X86_VENDOR_CENTAUR];
1611 size_or_mask = 0xfff00000; /* 32 bits */
1615 case X86_VENDOR_CYRIX:
1616 if (cpu_has_cyrix_arr) {
1617 mtrr_if = mtrr_ops[X86_VENDOR_CYRIX];
1618 size_or_mask = 0xfff00000; /* 32 bits */
1628 set_num_var_ranges();
1633 if (mtrr_cleanup(phys_addr)) {
1634 changed_by_mtrr_cleanup = 1;
1642 void mtrr_ap_init(void)
1644 unsigned long flags;
1646 if (!mtrr_if || !use_intel())
1649 * Ideally we should hold mtrr_mutex here to avoid mtrr entries changed,
1650 * but this routine will be called in cpu boot time, holding the lock
1651 * breaks it. This routine is called in two cases: 1.very earily time
1652 * of software resume, when there absolutely isn't mtrr entry changes;
1653 * 2.cpu hotadd time. We let mtrr_add/del_page hold cpuhotplug lock to
1654 * prevent mtrr entry changes
1656 local_irq_save(flags);
1660 local_irq_restore(flags);
1664 * Save current fixed-range MTRR state of the BSP
1666 void mtrr_save_state(void)
1668 smp_call_function_single(0, mtrr_save_fixed_ranges, NULL, 1, 1);
1671 static int __init mtrr_init_finialize(void)
1676 if (!changed_by_mtrr_cleanup)
1679 /* The CPUs haven't MTRR and seem to not support SMP. They have
1680 * specific drivers, we use a tricky method to support
1681 * suspend/resume for them.
1682 * TBD: is there any system with such CPU which supports
1683 * suspend/resume? if no, we should remove the code.
1685 sysdev_driver_register(&cpu_sysdev_class,
1686 &mtrr_sysdev_driver);
1690 subsys_initcall(mtrr_init_finialize);