2 * self test for change_page_attr.
4 * Clears the global bit on random pages in the direct mapping, then reverts
5 * and compares page tables forwards and afterwards.
7 #include <linux/bootmem.h>
8 #include <linux/random.h>
9 #include <linux/kernel.h>
10 #include <linux/init.h>
13 #include <asm/cacheflush.h>
14 #include <asm/pgtable.h>
15 #include <asm/kdebug.h>
21 LPS = (1 << PMD_SHIFT),
22 #elif defined(CONFIG_X86_PAE)
24 LPS = (1 << PMD_SHIFT),
26 LOWEST_LEVEL = 3, /* lookup_address lies here */
33 # include <asm/proto.h>
34 # define max_mapped end_pfn_map
36 # define max_mapped max_low_pfn
40 long lpg, gpg, spg, exec;
41 long min_exec, max_exec;
44 static __init int print_split(struct split_state *s)
46 long i, expected, missed = 0;
50 s->lpg = s->gpg = s->spg = s->exec = 0;
53 for (i = 0; i < max_mapped; ) {
54 unsigned long addr = (unsigned long)__va(i << PAGE_SHIFT);
58 pte = lookup_address(addr, &level);
62 printk(KERN_INFO "CPA %lx no pte level %d\n",
71 if (level == 2 && sizeof(long) == 8) {
74 } else if (level != LOWEST_LEVEL) {
75 if (!(pte_val(*pte) & _PAGE_PSE)) {
77 "%lx level %d but not PSE %Lx\n",
78 addr, level, (u64)pte_val(*pte));
87 if (!(pte_val(*pte) & _PAGE_NX)) {
89 if (addr < s->min_exec)
91 if (addr > s->max_exec)
96 "CPA mapping 4k %lu large %lu gb %lu x %lu[%lx-%lx] miss %lu\n",
97 s->spg, s->lpg, s->gpg, s->exec,
98 s->min_exec != ~0UL ? s->min_exec : 0, s->max_exec, missed);
100 expected = (s->gpg*GPS + s->lpg*LPS)/PAGE_SIZE + s->spg + missed;
102 printk(KERN_ERR "CPA max_mapped %lu but expected %lu\n",
103 max_mapped, expected);
109 static __init int state_same(struct split_state *a, struct split_state *b)
111 return a->lpg == b->lpg && a->gpg == b->gpg && a->spg == b->spg &&
115 static unsigned long __initdata addr[NTEST];
116 static unsigned int __initdata len[NTEST];
118 /* Change the global bit on random pages in the direct mapping */
119 static __init int exercise_pageattr(void)
121 struct split_state sa, sb, sc;
129 printk(KERN_INFO "CPA exercising pageattr\n");
131 bm = vmalloc((max_mapped + 7) / 8);
133 printk(KERN_ERR "CPA Cannot vmalloc bitmap\n");
136 memset(bm, 0, (max_mapped + 7) / 8);
138 failed += print_split(&sa);
141 for (i = 0; i < NTEST; i++) {
142 unsigned long pfn = random32() % max_mapped;
144 addr[i] = (unsigned long)__va(pfn << PAGE_SHIFT);
145 len[i] = random32() % 100;
146 len[i] = min_t(unsigned long, len[i], max_mapped - pfn - 1);
152 pte0 = pfn_pte(0, __pgprot(0)); /* shut gcc up */
154 for (k = 0; k < len[i]; k++) {
155 pte = lookup_address(addr[i] + k*PAGE_SIZE, &level);
156 if (!pte || pgprot_val(pte_pgprot(*pte)) == 0) {
163 if (pgprot_val(pte_pgprot(*pte)) !=
164 pgprot_val(pte_pgprot(pte0))) {
169 if (test_bit(pfn + k, bm)) {
173 __set_bit(pfn + k, bm);
175 if (!addr[i] || !pte || !k) {
180 err = change_page_attr_addr(addr[i], len[i],
181 pte_pgprot(pte_clrhuge(pte_clrglobal(pte0))));
183 printk(KERN_ERR "CPA %d failed %d\n", i, err);
187 pte = lookup_address(addr[i], &level);
188 if (!pte || pte_global(*pte) || pte_huge(*pte)) {
189 printk(KERN_ERR "CPA %lx: bad pte %Lx\n", addr[i],
190 pte ? (u64)pte_val(*pte) : 0ULL);
193 if (level != LOWEST_LEVEL) {
194 printk(KERN_ERR "CPA %lx: unexpected level %d\n",
203 failed += print_split(&sb);
205 printk(KERN_INFO "CPA reverting everything\n");
206 for (i = 0; i < NTEST; i++) {
209 pte = lookup_address(addr[i], &level);
211 printk(KERN_ERR "CPA lookup of %lx failed\n", addr[i]);
215 err = change_page_attr_addr(addr[i], len[i],
216 pte_pgprot(pte_mkglobal(*pte)));
218 printk(KERN_ERR "CPA reverting failed: %d\n", err);
221 pte = lookup_address(addr[i], &level);
222 if (!pte || !pte_global(*pte)) {
223 printk(KERN_ERR "CPA %lx: bad pte after revert %Lx\n",
224 addr[i], pte ? (u64)pte_val(*pte) : 0ULL);
231 failed += print_split(&sc);
232 if (!state_same(&sa, &sc))
236 printk(KERN_ERR "CPA selftests NOT PASSED. Please report.\n");
238 printk(KERN_INFO "CPA selftests PASSED\n");
242 module_init(exercise_pageattr);