2 * PPC64 (POWER4) Huge TLB Page Support for Kernel.
4 * Copyright (C) 2003 David Gibson, IBM Corporation.
6 * Based on the IA-32 version:
7 * Copyright (C) 2002, Rohit Seth <rohit.seth@intel.com>
10 #include <linux/init.h>
13 #include <linux/hugetlb.h>
14 #include <linux/pagemap.h>
15 #include <linux/slab.h>
16 #include <linux/err.h>
17 #include <linux/sysctl.h>
19 #include <asm/pgalloc.h>
21 #include <asm/tlbflush.h>
22 #include <asm/mmu_context.h>
23 #include <asm/machdep.h>
24 #include <asm/cputable.h>
28 #include <linux/sysctl.h>
30 #define NUM_LOW_AREAS (0x100000000UL >> SID_SHIFT)
31 #define NUM_HIGH_AREAS (PGTABLE_RANGE >> HTLB_AREA_SHIFT)
33 #ifdef CONFIG_PPC_64K_PAGES
34 #define HUGEPTE_INDEX_SIZE (PMD_SHIFT-HPAGE_SHIFT)
36 #define HUGEPTE_INDEX_SIZE (PUD_SHIFT-HPAGE_SHIFT)
38 #define PTRS_PER_HUGEPTE (1 << HUGEPTE_INDEX_SIZE)
39 #define HUGEPTE_TABLE_SIZE (sizeof(pte_t) << HUGEPTE_INDEX_SIZE)
41 #define HUGEPD_SHIFT (HPAGE_SHIFT + HUGEPTE_INDEX_SIZE)
42 #define HUGEPD_SIZE (1UL << HUGEPD_SHIFT)
43 #define HUGEPD_MASK (~(HUGEPD_SIZE-1))
45 #define huge_pgtable_cache (pgtable_cache[HUGEPTE_CACHE_NUM])
47 /* Flag to mark huge PD pointers. This means pmd_bad() and pud_bad()
48 * will choke on pointers to hugepte tables, which is handy for
49 * catching screwups early. */
52 typedef struct { unsigned long pd; } hugepd_t;
54 #define hugepd_none(hpd) ((hpd).pd == 0)
56 static inline pte_t *hugepd_page(hugepd_t hpd)
58 BUG_ON(!(hpd.pd & HUGEPD_OK));
59 return (pte_t *)(hpd.pd & ~HUGEPD_OK);
62 static inline pte_t *hugepte_offset(hugepd_t *hpdp, unsigned long addr)
64 unsigned long idx = ((addr >> HPAGE_SHIFT) & (PTRS_PER_HUGEPTE-1));
65 pte_t *dir = hugepd_page(*hpdp);
70 static int __hugepte_alloc(struct mm_struct *mm, hugepd_t *hpdp,
71 unsigned long address)
73 pte_t *new = kmem_cache_alloc(huge_pgtable_cache,
74 GFP_KERNEL|__GFP_REPEAT);
79 spin_lock(&mm->page_table_lock);
80 if (!hugepd_none(*hpdp))
81 kmem_cache_free(huge_pgtable_cache, new);
83 hpdp->pd = (unsigned long)new | HUGEPD_OK;
84 spin_unlock(&mm->page_table_lock);
88 /* Modelled after find_linux_pte() */
89 pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
94 BUG_ON(! in_hugepage_area(mm->context, addr));
98 pg = pgd_offset(mm, addr);
100 pu = pud_offset(pg, addr);
101 if (!pud_none(*pu)) {
102 #ifdef CONFIG_PPC_64K_PAGES
104 pm = pmd_offset(pu, addr);
106 return hugepte_offset((hugepd_t *)pm, addr);
108 return hugepte_offset((hugepd_t *)pu, addr);
116 pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr)
120 hugepd_t *hpdp = NULL;
122 BUG_ON(! in_hugepage_area(mm->context, addr));
126 pg = pgd_offset(mm, addr);
127 pu = pud_alloc(mm, pg, addr);
130 #ifdef CONFIG_PPC_64K_PAGES
132 pm = pmd_alloc(mm, pu, addr);
134 hpdp = (hugepd_t *)pm;
136 hpdp = (hugepd_t *)pu;
143 if (hugepd_none(*hpdp) && __hugepte_alloc(mm, hpdp, addr))
146 return hugepte_offset(hpdp, addr);
149 int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep)
154 static void free_hugepte_range(struct mmu_gather *tlb, hugepd_t *hpdp)
156 pte_t *hugepte = hugepd_page(*hpdp);
160 pgtable_free_tlb(tlb, pgtable_free_cache(hugepte, HUGEPTE_CACHE_NUM,
164 #ifdef CONFIG_PPC_64K_PAGES
165 static void hugetlb_free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
166 unsigned long addr, unsigned long end,
167 unsigned long floor, unsigned long ceiling)
174 pmd = pmd_offset(pud, addr);
176 next = pmd_addr_end(addr, end);
179 free_hugepte_range(tlb, (hugepd_t *)pmd);
180 } while (pmd++, addr = next, addr != end);
190 if (end - 1 > ceiling - 1)
193 pmd = pmd_offset(pud, start);
195 pmd_free_tlb(tlb, pmd);
199 static void hugetlb_free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
200 unsigned long addr, unsigned long end,
201 unsigned long floor, unsigned long ceiling)
208 pud = pud_offset(pgd, addr);
210 next = pud_addr_end(addr, end);
211 #ifdef CONFIG_PPC_64K_PAGES
212 if (pud_none_or_clear_bad(pud))
214 hugetlb_free_pmd_range(tlb, pud, addr, next, floor, ceiling);
218 free_hugepte_range(tlb, (hugepd_t *)pud);
220 } while (pud++, addr = next, addr != end);
226 ceiling &= PGDIR_MASK;
230 if (end - 1 > ceiling - 1)
233 pud = pud_offset(pgd, start);
235 pud_free_tlb(tlb, pud);
239 * This function frees user-level page tables of a process.
241 * Must be called with pagetable lock held.
243 void hugetlb_free_pgd_range(struct mmu_gather **tlb,
244 unsigned long addr, unsigned long end,
245 unsigned long floor, unsigned long ceiling)
252 * Comments below take from the normal free_pgd_range(). They
253 * apply here too. The tests against HUGEPD_MASK below are
254 * essential, because we *don't* test for this at the bottom
255 * level. Without them we'll attempt to free a hugepte table
256 * when we unmap just part of it, even if there are other
257 * active mappings using it.
259 * The next few lines have given us lots of grief...
261 * Why are we testing HUGEPD* at this top level? Because
262 * often there will be no work to do at all, and we'd prefer
263 * not to go all the way down to the bottom just to discover
266 * Why all these "- 1"s? Because 0 represents both the bottom
267 * of the address space and the top of it (using -1 for the
268 * top wouldn't help much: the masks would do the wrong thing).
269 * The rule is that addr 0 and floor 0 refer to the bottom of
270 * the address space, but end 0 and ceiling 0 refer to the top
271 * Comparisons need to use "end - 1" and "ceiling - 1" (though
272 * that end 0 case should be mythical).
274 * Wherever addr is brought up or ceiling brought down, we
275 * must be careful to reject "the opposite 0" before it
276 * confuses the subsequent tests. But what about where end is
277 * brought down by HUGEPD_SIZE below? no, end can't go down to
280 * Whereas we round start (addr) and ceiling down, by different
281 * masks at different levels, in order to test whether a table
282 * now has no other vmas using it, so can be freed, we don't
283 * bother to round floor or end up - the tests don't need that.
293 ceiling &= HUGEPD_MASK;
297 if (end - 1 > ceiling - 1)
303 pgd = pgd_offset((*tlb)->mm, addr);
305 BUG_ON(! in_hugepage_area((*tlb)->mm->context, addr));
306 next = pgd_addr_end(addr, end);
307 if (pgd_none_or_clear_bad(pgd))
309 hugetlb_free_pud_range(*tlb, pgd, addr, next, floor, ceiling);
310 } while (pgd++, addr = next, addr != end);
313 void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
314 pte_t *ptep, pte_t pte)
316 if (pte_present(*ptep)) {
317 /* We open-code pte_clear because we need to pass the right
318 * argument to hpte_need_flush (huge / !huge). Might not be
319 * necessary anymore if we make hpte_need_flush() get the
320 * page size from the slices
322 pte_update(mm, addr & HPAGE_MASK, ptep, ~0UL, 1);
324 *ptep = __pte(pte_val(pte) & ~_PAGE_HPTEFLAGS);
327 pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
330 unsigned long old = pte_update(mm, addr, ptep, ~0UL, 1);
334 struct slb_flush_info {
335 struct mm_struct *mm;
339 static void flush_low_segments(void *parm)
341 struct slb_flush_info *fi = parm;
344 BUILD_BUG_ON((sizeof(fi->newareas)*8) != NUM_LOW_AREAS);
346 if (current->active_mm != fi->mm)
349 /* Only need to do anything if this CPU is working in the same
350 * mm as the one which has changed */
352 /* update the paca copy of the context struct */
353 get_paca()->context = current->active_mm->context;
355 asm volatile("isync" : : : "memory");
356 for (i = 0; i < NUM_LOW_AREAS; i++) {
357 if (! (fi->newareas & (1U << i)))
359 asm volatile("slbie %0"
360 : : "r" ((i << SID_SHIFT) | SLBIE_C));
362 asm volatile("isync" : : : "memory");
365 static void flush_high_segments(void *parm)
367 struct slb_flush_info *fi = parm;
371 BUILD_BUG_ON((sizeof(fi->newareas)*8) != NUM_HIGH_AREAS);
373 if (current->active_mm != fi->mm)
376 /* Only need to do anything if this CPU is working in the same
377 * mm as the one which has changed */
379 /* update the paca copy of the context struct */
380 get_paca()->context = current->active_mm->context;
382 asm volatile("isync" : : : "memory");
383 for (i = 0; i < NUM_HIGH_AREAS; i++) {
384 if (! (fi->newareas & (1U << i)))
386 for (j = 0; j < (1UL << (HTLB_AREA_SHIFT-SID_SHIFT)); j++)
387 asm volatile("slbie %0"
388 :: "r" (((i << HTLB_AREA_SHIFT)
389 + (j << SID_SHIFT)) | SLBIE_C));
391 asm volatile("isync" : : : "memory");
394 static int prepare_low_area_for_htlb(struct mm_struct *mm, unsigned long area)
396 unsigned long start = area << SID_SHIFT;
397 unsigned long end = (area+1) << SID_SHIFT;
398 struct vm_area_struct *vma;
400 BUG_ON(area >= NUM_LOW_AREAS);
402 /* Check no VMAs are in the region */
403 vma = find_vma(mm, start);
404 if (vma && (vma->vm_start < end))
410 static int prepare_high_area_for_htlb(struct mm_struct *mm, unsigned long area)
412 unsigned long start = area << HTLB_AREA_SHIFT;
413 unsigned long end = (area+1) << HTLB_AREA_SHIFT;
414 struct vm_area_struct *vma;
416 BUG_ON(area >= NUM_HIGH_AREAS);
418 /* Hack, so that each addresses is controlled by exactly one
419 * of the high or low area bitmaps, the first high area starts
422 start = 0x100000000UL;
424 /* Check no VMAs are in the region */
425 vma = find_vma(mm, start);
426 if (vma && (vma->vm_start < end))
432 static int open_low_hpage_areas(struct mm_struct *mm, u16 newareas)
435 struct slb_flush_info fi;
437 BUILD_BUG_ON((sizeof(newareas)*8) != NUM_LOW_AREAS);
438 BUILD_BUG_ON((sizeof(mm->context.low_htlb_areas)*8) != NUM_LOW_AREAS);
440 newareas &= ~(mm->context.low_htlb_areas);
442 return 0; /* The segments we want are already open */
444 for (i = 0; i < NUM_LOW_AREAS; i++)
445 if ((1 << i) & newareas)
446 if (prepare_low_area_for_htlb(mm, i) != 0)
449 mm->context.low_htlb_areas |= newareas;
451 /* the context change must make it to memory before the flush,
452 * so that further SLB misses do the right thing. */
456 fi.newareas = newareas;
457 on_each_cpu(flush_low_segments, &fi, 0, 1);
462 static int open_high_hpage_areas(struct mm_struct *mm, u16 newareas)
464 struct slb_flush_info fi;
467 BUILD_BUG_ON((sizeof(newareas)*8) != NUM_HIGH_AREAS);
468 BUILD_BUG_ON((sizeof(mm->context.high_htlb_areas)*8)
471 newareas &= ~(mm->context.high_htlb_areas);
473 return 0; /* The areas we want are already open */
475 for (i = 0; i < NUM_HIGH_AREAS; i++)
476 if ((1 << i) & newareas)
477 if (prepare_high_area_for_htlb(mm, i) != 0)
480 mm->context.high_htlb_areas |= newareas;
482 /* the context change must make it to memory before the flush,
483 * so that further SLB misses do the right thing. */
487 fi.newareas = newareas;
488 on_each_cpu(flush_high_segments, &fi, 0, 1);
493 int prepare_hugepage_range(unsigned long addr, unsigned long len, pgoff_t pgoff)
497 if (pgoff & (~HPAGE_MASK >> PAGE_SHIFT))
499 if (len & ~HPAGE_MASK)
501 if (addr & ~HPAGE_MASK)
504 if (addr < 0x100000000UL)
505 err = open_low_hpage_areas(current->mm,
506 LOW_ESID_MASK(addr, len));
507 if ((addr + len) > 0x100000000UL)
508 err = open_high_hpage_areas(current->mm,
509 HTLB_AREA_MASK(addr, len));
510 #ifdef CONFIG_SPE_BASE
511 spu_flush_all_slbs(current->mm);
514 printk(KERN_DEBUG "prepare_hugepage_range(%lx, %lx)"
515 " failed (lowmask: 0x%04hx, highmask: 0x%04hx)\n",
517 LOW_ESID_MASK(addr, len), HTLB_AREA_MASK(addr, len));
525 follow_huge_addr(struct mm_struct *mm, unsigned long address, int write)
530 if (! in_hugepage_area(mm->context, address))
531 return ERR_PTR(-EINVAL);
533 ptep = huge_pte_offset(mm, address);
534 page = pte_page(*ptep);
536 page += (address % HPAGE_SIZE) / PAGE_SIZE;
541 int pmd_huge(pmd_t pmd)
547 follow_huge_pmd(struct mm_struct *mm, unsigned long address,
548 pmd_t *pmd, int write)
554 /* Because we have an exclusive hugepage region which lies within the
555 * normal user address space, we have to take special measures to make
556 * non-huge mmap()s evade the hugepage reserved regions. */
557 unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
558 unsigned long len, unsigned long pgoff,
561 struct mm_struct *mm = current->mm;
562 struct vm_area_struct *vma;
563 unsigned long start_addr;
568 /* handle fixed mapping: prevent overlap with huge pages */
569 if (flags & MAP_FIXED) {
570 if (is_hugepage_only_range(mm, addr, len))
576 addr = PAGE_ALIGN(addr);
577 vma = find_vma(mm, addr);
578 if (((TASK_SIZE - len) >= addr)
579 && (!vma || (addr+len) <= vma->vm_start)
580 && !is_hugepage_only_range(mm, addr,len))
583 if (len > mm->cached_hole_size) {
584 start_addr = addr = mm->free_area_cache;
586 start_addr = addr = TASK_UNMAPPED_BASE;
587 mm->cached_hole_size = 0;
591 vma = find_vma(mm, addr);
592 while (TASK_SIZE - len >= addr) {
593 BUG_ON(vma && (addr >= vma->vm_end));
595 if (touches_hugepage_low_range(mm, addr, len)) {
596 addr = ALIGN(addr+1, 1<<SID_SHIFT);
597 vma = find_vma(mm, addr);
600 if (touches_hugepage_high_range(mm, addr, len)) {
601 addr = ALIGN(addr+1, 1UL<<HTLB_AREA_SHIFT);
602 vma = find_vma(mm, addr);
605 if (!vma || addr + len <= vma->vm_start) {
607 * Remember the place where we stopped the search:
609 mm->free_area_cache = addr + len;
612 if (addr + mm->cached_hole_size < vma->vm_start)
613 mm->cached_hole_size = vma->vm_start - addr;
618 /* Make sure we didn't miss any holes */
619 if (start_addr != TASK_UNMAPPED_BASE) {
620 start_addr = addr = TASK_UNMAPPED_BASE;
621 mm->cached_hole_size = 0;
628 * This mmap-allocator allocates new areas top-down from below the
629 * stack's low limit (the base):
631 * Because we have an exclusive hugepage region which lies within the
632 * normal user address space, we have to take special measures to make
633 * non-huge mmap()s evade the hugepage reserved regions.
636 arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
637 const unsigned long len, const unsigned long pgoff,
638 const unsigned long flags)
640 struct vm_area_struct *vma, *prev_vma;
641 struct mm_struct *mm = current->mm;
642 unsigned long base = mm->mmap_base, addr = addr0;
643 unsigned long largest_hole = mm->cached_hole_size;
646 /* requested length too big for entire address space */
650 /* handle fixed mapping: prevent overlap with huge pages */
651 if (flags & MAP_FIXED) {
652 if (is_hugepage_only_range(mm, addr, len))
657 /* dont allow allocations above current base */
658 if (mm->free_area_cache > base)
659 mm->free_area_cache = base;
661 /* requesting a specific address */
663 addr = PAGE_ALIGN(addr);
664 vma = find_vma(mm, addr);
665 if (TASK_SIZE - len >= addr &&
666 (!vma || addr + len <= vma->vm_start)
667 && !is_hugepage_only_range(mm, addr,len))
671 if (len <= largest_hole) {
673 mm->free_area_cache = base;
676 /* make sure it can fit in the remaining address space */
677 if (mm->free_area_cache < len)
680 /* either no address requested or cant fit in requested address hole */
681 addr = (mm->free_area_cache - len) & PAGE_MASK;
684 if (touches_hugepage_low_range(mm, addr, len)) {
685 addr = (addr & ((~0) << SID_SHIFT)) - len;
686 goto hugepage_recheck;
687 } else if (touches_hugepage_high_range(mm, addr, len)) {
688 addr = (addr & ((~0UL) << HTLB_AREA_SHIFT)) - len;
689 goto hugepage_recheck;
693 * Lookup failure means no vma is above this address,
694 * i.e. return with success:
696 if (!(vma = find_vma_prev(mm, addr, &prev_vma)))
700 * new region fits between prev_vma->vm_end and
701 * vma->vm_start, use it:
703 if (addr+len <= vma->vm_start &&
704 (!prev_vma || (addr >= prev_vma->vm_end))) {
705 /* remember the address as a hint for next time */
706 mm->cached_hole_size = largest_hole;
707 return (mm->free_area_cache = addr);
709 /* pull free_area_cache down to the first hole */
710 if (mm->free_area_cache == vma->vm_end) {
711 mm->free_area_cache = vma->vm_start;
712 mm->cached_hole_size = largest_hole;
716 /* remember the largest hole we saw so far */
717 if (addr + largest_hole < vma->vm_start)
718 largest_hole = vma->vm_start - addr;
720 /* try just below the current vma->vm_start */
721 addr = vma->vm_start-len;
722 } while (len <= vma->vm_start);
726 * if hint left us with no space for the requested
727 * mapping then try again:
730 mm->free_area_cache = base;
736 * A failed mmap() very likely causes application failure,
737 * so fall back to the bottom-up function here. This scenario
738 * can happen with large stack limits and large mmap()
741 mm->free_area_cache = TASK_UNMAPPED_BASE;
742 mm->cached_hole_size = ~0UL;
743 addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
745 * Restore the topdown base:
747 mm->free_area_cache = base;
748 mm->cached_hole_size = ~0UL;
753 static int htlb_check_hinted_area(unsigned long addr, unsigned long len)
755 struct vm_area_struct *vma;
757 vma = find_vma(current->mm, addr);
758 if (TASK_SIZE - len >= addr &&
759 (!vma || ((addr + len) <= vma->vm_start)))
765 static unsigned long htlb_get_low_area(unsigned long len, u16 segmask)
767 unsigned long addr = 0;
768 struct vm_area_struct *vma;
770 vma = find_vma(current->mm, addr);
771 while (addr + len <= 0x100000000UL) {
772 BUG_ON(vma && (addr >= vma->vm_end)); /* invariant */
774 if (! __within_hugepage_low_range(addr, len, segmask)) {
775 addr = ALIGN(addr+1, 1<<SID_SHIFT);
776 vma = find_vma(current->mm, addr);
780 if (!vma || (addr + len) <= vma->vm_start)
782 addr = ALIGN(vma->vm_end, HPAGE_SIZE);
783 /* Depending on segmask this might not be a confirmed
784 * hugepage region, so the ALIGN could have skipped
786 vma = find_vma(current->mm, addr);
792 static unsigned long htlb_get_high_area(unsigned long len, u16 areamask)
794 unsigned long addr = 0x100000000UL;
795 struct vm_area_struct *vma;
797 vma = find_vma(current->mm, addr);
798 while (addr + len <= TASK_SIZE_USER64) {
799 BUG_ON(vma && (addr >= vma->vm_end)); /* invariant */
801 if (! __within_hugepage_high_range(addr, len, areamask)) {
802 addr = ALIGN(addr+1, 1UL<<HTLB_AREA_SHIFT);
803 vma = find_vma(current->mm, addr);
807 if (!vma || (addr + len) <= vma->vm_start)
809 addr = ALIGN(vma->vm_end, HPAGE_SIZE);
810 /* Depending on segmask this might not be a confirmed
811 * hugepage region, so the ALIGN could have skipped
813 vma = find_vma(current->mm, addr);
819 unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
820 unsigned long len, unsigned long pgoff,
824 u16 areamask, curareas;
826 if (HPAGE_SHIFT == 0)
828 if (len & ~HPAGE_MASK)
833 if (!cpu_has_feature(CPU_FTR_16M_PAGE))
836 /* Paranoia, caller should have dealt with this */
837 BUG_ON((addr + len) < addr);
839 /* Handle MAP_FIXED */
840 if (flags & MAP_FIXED) {
841 if (prepare_hugepage_range(addr, len, pgoff))
846 if (test_thread_flag(TIF_32BIT)) {
847 curareas = current->mm->context.low_htlb_areas;
849 /* First see if we can use the hint address */
850 if (addr && (htlb_check_hinted_area(addr, len) == 0)) {
851 areamask = LOW_ESID_MASK(addr, len);
852 if (open_low_hpage_areas(current->mm, areamask) == 0)
856 /* Next see if we can map in the existing low areas */
857 addr = htlb_get_low_area(len, curareas);
861 /* Finally go looking for areas to open */
863 for (areamask = LOW_ESID_MASK(0x100000000UL-len, len);
864 ! lastshift; areamask >>=1) {
868 addr = htlb_get_low_area(len, curareas | areamask);
869 if ((addr != -ENOMEM)
870 && open_low_hpage_areas(current->mm, areamask) == 0)
874 curareas = current->mm->context.high_htlb_areas;
876 /* First see if we can use the hint address */
877 /* We discourage 64-bit processes from doing hugepage
878 * mappings below 4GB (must use MAP_FIXED) */
879 if ((addr >= 0x100000000UL)
880 && (htlb_check_hinted_area(addr, len) == 0)) {
881 areamask = HTLB_AREA_MASK(addr, len);
882 if (open_high_hpage_areas(current->mm, areamask) == 0)
886 /* Next see if we can map in the existing high areas */
887 addr = htlb_get_high_area(len, curareas);
891 /* Finally go looking for areas to open */
893 for (areamask = HTLB_AREA_MASK(TASK_SIZE_USER64-len, len);
894 ! lastshift; areamask >>=1) {
898 addr = htlb_get_high_area(len, curareas | areamask);
899 if ((addr != -ENOMEM)
900 && open_high_hpage_areas(current->mm, areamask) == 0)
904 printk(KERN_DEBUG "hugetlb_get_unmapped_area() unable to open"
910 * Called by asm hashtable.S for doing lazy icache flush
912 static unsigned int hash_huge_page_do_lazy_icache(unsigned long rflags,
918 if (!pfn_valid(pte_pfn(pte)))
921 page = pte_page(pte);
924 if (!test_bit(PG_arch_1, &page->flags) && !PageReserved(page)) {
926 for (i = 0; i < (HPAGE_SIZE / PAGE_SIZE); i++)
927 __flush_dcache_icache(page_address(page+i));
928 set_bit(PG_arch_1, &page->flags);
936 int hash_huge_page(struct mm_struct *mm, unsigned long access,
937 unsigned long ea, unsigned long vsid, int local,
941 unsigned long old_pte, new_pte;
942 unsigned long va, rflags, pa;
946 ptep = huge_pte_offset(mm, ea);
948 /* Search the Linux page table for a match with va */
949 va = (vsid << 28) | (ea & 0x0fffffff);
952 * If no pte found or not present, send the problem up to
955 if (unlikely(!ptep || pte_none(*ptep)))
959 * Check the user's access rights to the page. If access should be
960 * prevented then send the problem up to do_page_fault.
962 if (unlikely(access & ~pte_val(*ptep)))
965 * At this point, we have a pte (old_pte) which can be used to build
966 * or update an HPTE. There are 2 cases:
968 * 1. There is a valid (present) pte with no associated HPTE (this is
969 * the most common case)
970 * 2. There is a valid (present) pte with an associated HPTE. The
971 * current values of the pp bits in the HPTE prevent access
972 * because we are doing software DIRTY bit management and the
973 * page is currently not DIRTY.
978 old_pte = pte_val(*ptep);
979 if (old_pte & _PAGE_BUSY)
981 new_pte = old_pte | _PAGE_BUSY |
982 _PAGE_ACCESSED | _PAGE_HASHPTE;
983 } while(old_pte != __cmpxchg_u64((unsigned long *)ptep,
986 rflags = 0x2 | (!(new_pte & _PAGE_RW));
987 /* _PAGE_EXEC -> HW_NO_EXEC since it's inverted */
988 rflags |= ((new_pte & _PAGE_EXEC) ? 0 : HPTE_R_N);
989 if (!cpu_has_feature(CPU_FTR_COHERENT_ICACHE))
990 /* No CPU has hugepages but lacks no execute, so we
991 * don't need to worry about that case */
992 rflags = hash_huge_page_do_lazy_icache(rflags, __pte(old_pte),
995 /* Check if pte already has an hpte (case 2) */
996 if (unlikely(old_pte & _PAGE_HASHPTE)) {
997 /* There MIGHT be an HPTE for this pte */
998 unsigned long hash, slot;
1000 hash = hpt_hash(va, HPAGE_SHIFT);
1001 if (old_pte & _PAGE_F_SECOND)
1003 slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
1004 slot += (old_pte & _PAGE_F_GIX) >> 12;
1006 if (ppc_md.hpte_updatepp(slot, rflags, va, mmu_huge_psize,
1008 old_pte &= ~_PAGE_HPTEFLAGS;
1011 if (likely(!(old_pte & _PAGE_HASHPTE))) {
1012 unsigned long hash = hpt_hash(va, HPAGE_SHIFT);
1013 unsigned long hpte_group;
1015 pa = pte_pfn(__pte(old_pte)) << PAGE_SHIFT;
1018 hpte_group = ((hash & htab_hash_mask) *
1019 HPTES_PER_GROUP) & ~0x7UL;
1021 /* clear HPTE slot informations in new PTE */
1022 new_pte = (new_pte & ~_PAGE_HPTEFLAGS) | _PAGE_HASHPTE;
1024 /* Add in WIMG bits */
1025 /* XXX We should store these in the pte */
1026 /* --BenH: I think they are ... */
1027 rflags |= _PAGE_COHERENT;
1029 /* Insert into the hash table, primary slot */
1030 slot = ppc_md.hpte_insert(hpte_group, va, pa, rflags, 0,
1033 /* Primary is full, try the secondary */
1034 if (unlikely(slot == -1)) {
1035 hpte_group = ((~hash & htab_hash_mask) *
1036 HPTES_PER_GROUP) & ~0x7UL;
1037 slot = ppc_md.hpte_insert(hpte_group, va, pa, rflags,
1042 hpte_group = ((hash & htab_hash_mask) *
1043 HPTES_PER_GROUP)&~0x7UL;
1045 ppc_md.hpte_remove(hpte_group);
1050 if (unlikely(slot == -2))
1051 panic("hash_huge_page: pte_insert failed\n");
1053 new_pte |= (slot << 12) & (_PAGE_F_SECOND | _PAGE_F_GIX);
1057 * No need to use ldarx/stdcx here
1059 *ptep = __pte(new_pte & ~_PAGE_BUSY);
1067 static void zero_ctor(void *addr, struct kmem_cache *cache, unsigned long flags)
1069 memset(addr, 0, kmem_cache_size(cache));
1072 static int __init hugetlbpage_init(void)
1074 if (!cpu_has_feature(CPU_FTR_16M_PAGE))
1077 huge_pgtable_cache = kmem_cache_create("hugepte_cache",
1082 if (! huge_pgtable_cache)
1083 panic("hugetlbpage_init(): could not create hugepte cache\n");
1088 module_init(hugetlbpage_init);