2 * PPC64 (POWER4) Huge TLB Page Support for Kernel.
4 * Copyright (C) 2003 David Gibson, IBM Corporation.
6 * Based on the IA-32 version:
7 * Copyright (C) 2002, Rohit Seth <rohit.seth@intel.com>
10 #include <linux/init.h>
13 #include <linux/hugetlb.h>
14 #include <linux/pagemap.h>
15 #include <linux/slab.h>
16 #include <linux/err.h>
17 #include <linux/sysctl.h>
19 #include <asm/pgalloc.h>
21 #include <asm/tlbflush.h>
22 #include <asm/mmu_context.h>
23 #include <asm/machdep.h>
24 #include <asm/cputable.h>
27 #define HPAGE_SHIFT_64K 16
28 #define HPAGE_SHIFT_16M 24
30 #define NUM_LOW_AREAS (0x100000000UL >> SID_SHIFT)
31 #define NUM_HIGH_AREAS (PGTABLE_RANGE >> HTLB_AREA_SHIFT)
32 #define MAX_NUMBER_GPAGES 1024
34 /* Tracks the 16G pages after the device tree is scanned and before the
35 * huge_boot_pages list is ready. */
36 static unsigned long gpage_freearray[MAX_NUMBER_GPAGES];
37 static unsigned nr_gpages;
39 unsigned int hugepte_shift;
40 #define PTRS_PER_HUGEPTE (1 << hugepte_shift)
41 #define HUGEPTE_TABLE_SIZE (sizeof(pte_t) << hugepte_shift)
43 #define HUGEPD_SHIFT (HPAGE_SHIFT + hugepte_shift)
44 #define HUGEPD_SIZE (1UL << HUGEPD_SHIFT)
45 #define HUGEPD_MASK (~(HUGEPD_SIZE-1))
47 #define huge_pgtable_cache (pgtable_cache[HUGEPTE_CACHE_NUM])
49 /* Flag to mark huge PD pointers. This means pmd_bad() and pud_bad()
50 * will choke on pointers to hugepte tables, which is handy for
51 * catching screwups early. */
54 typedef struct { unsigned long pd; } hugepd_t;
56 #define hugepd_none(hpd) ((hpd).pd == 0)
58 static inline pte_t *hugepd_page(hugepd_t hpd)
60 BUG_ON(!(hpd.pd & HUGEPD_OK));
61 return (pte_t *)(hpd.pd & ~HUGEPD_OK);
64 static inline pte_t *hugepte_offset(hugepd_t *hpdp, unsigned long addr)
66 unsigned long idx = ((addr >> HPAGE_SHIFT) & (PTRS_PER_HUGEPTE-1));
67 pte_t *dir = hugepd_page(*hpdp);
72 static int __hugepte_alloc(struct mm_struct *mm, hugepd_t *hpdp,
73 unsigned long address)
75 pte_t *new = kmem_cache_alloc(huge_pgtable_cache,
76 GFP_KERNEL|__GFP_REPEAT);
81 spin_lock(&mm->page_table_lock);
82 if (!hugepd_none(*hpdp))
83 kmem_cache_free(huge_pgtable_cache, new);
85 hpdp->pd = (unsigned long)new | HUGEPD_OK;
86 spin_unlock(&mm->page_table_lock);
90 /* Base page size affects how we walk hugetlb page tables */
91 #ifdef CONFIG_PPC_64K_PAGES
92 #define hpmd_offset(pud, addr) pmd_offset(pud, addr)
93 #define hpmd_alloc(mm, pud, addr) pmd_alloc(mm, pud, addr)
96 pmd_t *hpmd_offset(pud_t *pud, unsigned long addr)
98 if (HPAGE_SHIFT == HPAGE_SHIFT_64K)
99 return pmd_offset(pud, addr);
101 return (pmd_t *) pud;
104 pmd_t *hpmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long addr)
106 if (HPAGE_SHIFT == HPAGE_SHIFT_64K)
107 return pmd_alloc(mm, pud, addr);
109 return (pmd_t *) pud;
113 /* Moves the gigantic page addresses from the temporary list to the
114 * huge_boot_pages list. */
115 int alloc_bootmem_huge_page(struct hstate *h)
117 struct huge_bootmem_page *m;
120 m = phys_to_virt(gpage_freearray[--nr_gpages]);
121 gpage_freearray[nr_gpages] = 0;
122 list_add(&m->list, &huge_boot_pages);
128 /* Modelled after find_linux_pte() */
129 pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
135 BUG_ON(get_slice_psize(mm, addr) != mmu_huge_psize);
139 pg = pgd_offset(mm, addr);
140 if (!pgd_none(*pg)) {
141 pu = pud_offset(pg, addr);
142 if (!pud_none(*pu)) {
143 pm = hpmd_offset(pu, addr);
145 return hugepte_offset((hugepd_t *)pm, addr);
152 pte_t *huge_pte_alloc(struct mm_struct *mm,
153 unsigned long addr, unsigned long sz)
158 hugepd_t *hpdp = NULL;
160 BUG_ON(get_slice_psize(mm, addr) != mmu_huge_psize);
164 pg = pgd_offset(mm, addr);
165 pu = pud_alloc(mm, pg, addr);
168 pm = hpmd_alloc(mm, pu, addr);
170 hpdp = (hugepd_t *)pm;
176 if (hugepd_none(*hpdp) && __hugepte_alloc(mm, hpdp, addr))
179 return hugepte_offset(hpdp, addr);
182 int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep)
187 static void free_hugepte_range(struct mmu_gather *tlb, hugepd_t *hpdp)
189 pte_t *hugepte = hugepd_page(*hpdp);
193 pgtable_free_tlb(tlb, pgtable_free_cache(hugepte, HUGEPTE_CACHE_NUM,
197 static void hugetlb_free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
198 unsigned long addr, unsigned long end,
199 unsigned long floor, unsigned long ceiling)
206 pmd = pmd_offset(pud, addr);
208 next = pmd_addr_end(addr, end);
211 free_hugepte_range(tlb, (hugepd_t *)pmd);
212 } while (pmd++, addr = next, addr != end);
222 if (end - 1 > ceiling - 1)
225 pmd = pmd_offset(pud, start);
227 pmd_free_tlb(tlb, pmd);
230 static void hugetlb_free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
231 unsigned long addr, unsigned long end,
232 unsigned long floor, unsigned long ceiling)
239 pud = pud_offset(pgd, addr);
241 next = pud_addr_end(addr, end);
242 #ifdef CONFIG_PPC_64K_PAGES
243 if (pud_none_or_clear_bad(pud))
245 hugetlb_free_pmd_range(tlb, pud, addr, next, floor, ceiling);
247 if (HPAGE_SHIFT == HPAGE_SHIFT_64K) {
248 if (pud_none_or_clear_bad(pud))
250 hugetlb_free_pmd_range(tlb, pud, addr, next, floor, ceiling);
254 free_hugepte_range(tlb, (hugepd_t *)pud);
257 } while (pud++, addr = next, addr != end);
263 ceiling &= PGDIR_MASK;
267 if (end - 1 > ceiling - 1)
270 pud = pud_offset(pgd, start);
272 pud_free_tlb(tlb, pud);
276 * This function frees user-level page tables of a process.
278 * Must be called with pagetable lock held.
280 void hugetlb_free_pgd_range(struct mmu_gather *tlb,
281 unsigned long addr, unsigned long end,
282 unsigned long floor, unsigned long ceiling)
289 * Comments below take from the normal free_pgd_range(). They
290 * apply here too. The tests against HUGEPD_MASK below are
291 * essential, because we *don't* test for this at the bottom
292 * level. Without them we'll attempt to free a hugepte table
293 * when we unmap just part of it, even if there are other
294 * active mappings using it.
296 * The next few lines have given us lots of grief...
298 * Why are we testing HUGEPD* at this top level? Because
299 * often there will be no work to do at all, and we'd prefer
300 * not to go all the way down to the bottom just to discover
303 * Why all these "- 1"s? Because 0 represents both the bottom
304 * of the address space and the top of it (using -1 for the
305 * top wouldn't help much: the masks would do the wrong thing).
306 * The rule is that addr 0 and floor 0 refer to the bottom of
307 * the address space, but end 0 and ceiling 0 refer to the top
308 * Comparisons need to use "end - 1" and "ceiling - 1" (though
309 * that end 0 case should be mythical).
311 * Wherever addr is brought up or ceiling brought down, we
312 * must be careful to reject "the opposite 0" before it
313 * confuses the subsequent tests. But what about where end is
314 * brought down by HUGEPD_SIZE below? no, end can't go down to
317 * Whereas we round start (addr) and ceiling down, by different
318 * masks at different levels, in order to test whether a table
319 * now has no other vmas using it, so can be freed, we don't
320 * bother to round floor or end up - the tests don't need that.
330 ceiling &= HUGEPD_MASK;
334 if (end - 1 > ceiling - 1)
340 pgd = pgd_offset(tlb->mm, addr);
342 BUG_ON(get_slice_psize(tlb->mm, addr) != mmu_huge_psize);
343 next = pgd_addr_end(addr, end);
344 if (pgd_none_or_clear_bad(pgd))
346 hugetlb_free_pud_range(tlb, pgd, addr, next, floor, ceiling);
347 } while (pgd++, addr = next, addr != end);
350 void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
351 pte_t *ptep, pte_t pte)
353 if (pte_present(*ptep)) {
354 /* We open-code pte_clear because we need to pass the right
355 * argument to hpte_need_flush (huge / !huge). Might not be
356 * necessary anymore if we make hpte_need_flush() get the
357 * page size from the slices
359 pte_update(mm, addr & HPAGE_MASK, ptep, ~0UL, 1);
361 *ptep = __pte(pte_val(pte) & ~_PAGE_HPTEFLAGS);
364 pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
367 unsigned long old = pte_update(mm, addr, ptep, ~0UL, 1);
372 follow_huge_addr(struct mm_struct *mm, unsigned long address, int write)
377 if (get_slice_psize(mm, address) != mmu_huge_psize)
378 return ERR_PTR(-EINVAL);
380 ptep = huge_pte_offset(mm, address);
381 page = pte_page(*ptep);
383 page += (address % HPAGE_SIZE) / PAGE_SIZE;
388 int pmd_huge(pmd_t pmd)
393 int pud_huge(pud_t pud)
399 follow_huge_pmd(struct mm_struct *mm, unsigned long address,
400 pmd_t *pmd, int write)
407 unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
408 unsigned long len, unsigned long pgoff,
411 return slice_get_unmapped_area(addr, len, flags,
412 mmu_huge_psize, 1, 0);
416 * Called by asm hashtable.S for doing lazy icache flush
418 static unsigned int hash_huge_page_do_lazy_icache(unsigned long rflags,
424 if (!pfn_valid(pte_pfn(pte)))
427 page = pte_page(pte);
430 if (!test_bit(PG_arch_1, &page->flags) && !PageReserved(page)) {
432 for (i = 0; i < (HPAGE_SIZE / PAGE_SIZE); i++)
433 __flush_dcache_icache(page_address(page+i));
434 set_bit(PG_arch_1, &page->flags);
442 int hash_huge_page(struct mm_struct *mm, unsigned long access,
443 unsigned long ea, unsigned long vsid, int local,
447 unsigned long old_pte, new_pte;
448 unsigned long va, rflags, pa;
451 int ssize = user_segment_size(ea);
453 ptep = huge_pte_offset(mm, ea);
455 /* Search the Linux page table for a match with va */
456 va = hpt_va(ea, vsid, ssize);
459 * If no pte found or not present, send the problem up to
462 if (unlikely(!ptep || pte_none(*ptep)))
466 * Check the user's access rights to the page. If access should be
467 * prevented then send the problem up to do_page_fault.
469 if (unlikely(access & ~pte_val(*ptep)))
472 * At this point, we have a pte (old_pte) which can be used to build
473 * or update an HPTE. There are 2 cases:
475 * 1. There is a valid (present) pte with no associated HPTE (this is
476 * the most common case)
477 * 2. There is a valid (present) pte with an associated HPTE. The
478 * current values of the pp bits in the HPTE prevent access
479 * because we are doing software DIRTY bit management and the
480 * page is currently not DIRTY.
485 old_pte = pte_val(*ptep);
486 if (old_pte & _PAGE_BUSY)
488 new_pte = old_pte | _PAGE_BUSY | _PAGE_ACCESSED;
489 } while(old_pte != __cmpxchg_u64((unsigned long *)ptep,
492 rflags = 0x2 | (!(new_pte & _PAGE_RW));
493 /* _PAGE_EXEC -> HW_NO_EXEC since it's inverted */
494 rflags |= ((new_pte & _PAGE_EXEC) ? 0 : HPTE_R_N);
495 if (!cpu_has_feature(CPU_FTR_COHERENT_ICACHE))
496 /* No CPU has hugepages but lacks no execute, so we
497 * don't need to worry about that case */
498 rflags = hash_huge_page_do_lazy_icache(rflags, __pte(old_pte),
501 /* Check if pte already has an hpte (case 2) */
502 if (unlikely(old_pte & _PAGE_HASHPTE)) {
503 /* There MIGHT be an HPTE for this pte */
504 unsigned long hash, slot;
506 hash = hpt_hash(va, HPAGE_SHIFT, ssize);
507 if (old_pte & _PAGE_F_SECOND)
509 slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
510 slot += (old_pte & _PAGE_F_GIX) >> 12;
512 if (ppc_md.hpte_updatepp(slot, rflags, va, mmu_huge_psize,
514 old_pte &= ~_PAGE_HPTEFLAGS;
517 if (likely(!(old_pte & _PAGE_HASHPTE))) {
518 unsigned long hash = hpt_hash(va, HPAGE_SHIFT, ssize);
519 unsigned long hpte_group;
521 pa = pte_pfn(__pte(old_pte)) << PAGE_SHIFT;
524 hpte_group = ((hash & htab_hash_mask) *
525 HPTES_PER_GROUP) & ~0x7UL;
527 /* clear HPTE slot informations in new PTE */
528 #ifdef CONFIG_PPC_64K_PAGES
529 new_pte = (new_pte & ~_PAGE_HPTEFLAGS) | _PAGE_HPTE_SUB0;
531 new_pte = (new_pte & ~_PAGE_HPTEFLAGS) | _PAGE_HASHPTE;
533 /* Add in WIMG bits */
534 rflags |= (new_pte & (_PAGE_WRITETHRU | _PAGE_NO_CACHE |
535 _PAGE_COHERENT | _PAGE_GUARDED));
537 /* Insert into the hash table, primary slot */
538 slot = ppc_md.hpte_insert(hpte_group, va, pa, rflags, 0,
539 mmu_huge_psize, ssize);
541 /* Primary is full, try the secondary */
542 if (unlikely(slot == -1)) {
543 hpte_group = ((~hash & htab_hash_mask) *
544 HPTES_PER_GROUP) & ~0x7UL;
545 slot = ppc_md.hpte_insert(hpte_group, va, pa, rflags,
547 mmu_huge_psize, ssize);
550 hpte_group = ((hash & htab_hash_mask) *
551 HPTES_PER_GROUP)&~0x7UL;
553 ppc_md.hpte_remove(hpte_group);
558 if (unlikely(slot == -2))
559 panic("hash_huge_page: pte_insert failed\n");
561 new_pte |= (slot << 12) & (_PAGE_F_SECOND | _PAGE_F_GIX);
565 * No need to use ldarx/stdcx here
567 *ptep = __pte(new_pte & ~_PAGE_BUSY);
575 void set_huge_psize(int psize)
577 /* Check that it is a page size supported by the hardware and
578 * that it fits within pagetable limits. */
579 if (mmu_psize_defs[psize].shift && mmu_psize_defs[psize].shift < SID_SHIFT &&
580 (mmu_psize_defs[psize].shift > MIN_HUGEPTE_SHIFT ||
581 mmu_psize_defs[psize].shift == HPAGE_SHIFT_64K)) {
582 HPAGE_SHIFT = mmu_psize_defs[psize].shift;
583 mmu_huge_psize = psize;
584 #ifdef CONFIG_PPC_64K_PAGES
585 hugepte_shift = (PMD_SHIFT-HPAGE_SHIFT);
587 if (HPAGE_SHIFT == HPAGE_SHIFT_64K)
588 hugepte_shift = (PMD_SHIFT-HPAGE_SHIFT);
590 hugepte_shift = (PUD_SHIFT-HPAGE_SHIFT);
597 static int __init hugepage_setup_sz(char *str)
599 unsigned long long size;
603 size = memparse(str, &str);
607 #ifndef CONFIG_PPC_64K_PAGES
608 case HPAGE_SHIFT_64K:
609 mmu_psize = MMU_PAGE_64K;
612 case HPAGE_SHIFT_16M:
613 mmu_psize = MMU_PAGE_16M;
617 if (mmu_psize >=0 && mmu_psize_defs[mmu_psize].shift)
618 set_huge_psize(mmu_psize);
620 printk(KERN_WARNING "Invalid huge page size specified(%llu)\n", size);
624 __setup("hugepagesz=", hugepage_setup_sz);
626 static void zero_ctor(struct kmem_cache *cache, void *addr)
628 memset(addr, 0, kmem_cache_size(cache));
631 static int __init hugetlbpage_init(void)
633 if (!cpu_has_feature(CPU_FTR_16M_PAGE))
636 huge_pgtable_cache = kmem_cache_create("hugepte_cache",
641 if (! huge_pgtable_cache)
642 panic("hugetlbpage_init(): could not create hugepte cache\n");
647 module_init(hugetlbpage_init);