2 * Generic hugetlb support.
3 * (C) William Irwin, April 2004
6 #include <linux/list.h>
7 #include <linux/init.h>
8 #include <linux/module.h>
10 #include <linux/sysctl.h>
11 #include <linux/highmem.h>
12 #include <linux/nodemask.h>
13 #include <linux/pagemap.h>
14 #include <linux/mempolicy.h>
15 #include <linux/cpuset.h>
16 #include <linux/mutex.h>
19 #include <asm/pgtable.h>
21 #include <linux/hugetlb.h>
24 const unsigned long hugetlb_zero = 0, hugetlb_infinity = ~0UL;
25 static unsigned long nr_huge_pages, free_huge_pages, resv_huge_pages;
26 static unsigned long surplus_huge_pages;
27 static unsigned long nr_overcommit_huge_pages;
28 unsigned long max_huge_pages;
29 unsigned long sysctl_overcommit_huge_pages;
30 static struct list_head hugepage_freelists[MAX_NUMNODES];
31 static unsigned int nr_huge_pages_node[MAX_NUMNODES];
32 static unsigned int free_huge_pages_node[MAX_NUMNODES];
33 static unsigned int surplus_huge_pages_node[MAX_NUMNODES];
34 static gfp_t htlb_alloc_mask = GFP_HIGHUSER;
35 unsigned long hugepages_treat_as_movable;
36 static int hugetlb_next_nid;
39 * Protects updates to hugepage_freelists, nr_huge_pages, and free_huge_pages
41 static DEFINE_SPINLOCK(hugetlb_lock);
43 static void clear_huge_page(struct page *page, unsigned long addr)
48 for (i = 0; i < (HPAGE_SIZE/PAGE_SIZE); i++) {
50 clear_user_highpage(page + i, addr + i * PAGE_SIZE);
54 static void copy_huge_page(struct page *dst, struct page *src,
55 unsigned long addr, struct vm_area_struct *vma)
60 for (i = 0; i < HPAGE_SIZE/PAGE_SIZE; i++) {
62 copy_user_highpage(dst + i, src + i, addr + i*PAGE_SIZE, vma);
66 static void enqueue_huge_page(struct page *page)
68 int nid = page_to_nid(page);
69 list_add(&page->lru, &hugepage_freelists[nid]);
71 free_huge_pages_node[nid]++;
74 static struct page *dequeue_huge_page(void)
77 struct page *page = NULL;
79 for (nid = 0; nid < MAX_NUMNODES; ++nid) {
80 if (!list_empty(&hugepage_freelists[nid])) {
81 page = list_entry(hugepage_freelists[nid].next,
85 free_huge_pages_node[nid]--;
92 static struct page *dequeue_huge_page_vma(struct vm_area_struct *vma,
93 unsigned long address)
96 struct page *page = NULL;
97 struct mempolicy *mpol;
98 struct zonelist *zonelist = huge_zonelist(vma, address,
99 htlb_alloc_mask, &mpol);
102 for (z = zonelist->zones; *z; z++) {
103 nid = zone_to_nid(*z);
104 if (cpuset_zone_allowed_softwall(*z, htlb_alloc_mask) &&
105 !list_empty(&hugepage_freelists[nid])) {
106 page = list_entry(hugepage_freelists[nid].next,
108 list_del(&page->lru);
110 free_huge_pages_node[nid]--;
111 if (vma && vma->vm_flags & VM_MAYSHARE)
116 mpol_free(mpol); /* unref if mpol !NULL */
120 static void update_and_free_page(struct page *page)
124 nr_huge_pages_node[page_to_nid(page)]--;
125 for (i = 0; i < (HPAGE_SIZE / PAGE_SIZE); i++) {
126 page[i].flags &= ~(1 << PG_locked | 1 << PG_error | 1 << PG_referenced |
127 1 << PG_dirty | 1 << PG_active | 1 << PG_reserved |
128 1 << PG_private | 1<< PG_writeback);
130 set_compound_page_dtor(page, NULL);
131 set_page_refcounted(page);
132 __free_pages(page, HUGETLB_PAGE_ORDER);
135 static void free_huge_page(struct page *page)
137 int nid = page_to_nid(page);
138 struct address_space *mapping;
140 mapping = (struct address_space *) page_private(page);
141 set_page_private(page, 0);
142 BUG_ON(page_count(page));
143 INIT_LIST_HEAD(&page->lru);
145 spin_lock(&hugetlb_lock);
146 if (surplus_huge_pages_node[nid]) {
147 update_and_free_page(page);
148 surplus_huge_pages--;
149 surplus_huge_pages_node[nid]--;
151 enqueue_huge_page(page);
153 spin_unlock(&hugetlb_lock);
155 hugetlb_put_quota(mapping, 1);
159 * Increment or decrement surplus_huge_pages. Keep node-specific counters
160 * balanced by operating on them in a round-robin fashion.
161 * Returns 1 if an adjustment was made.
163 static int adjust_pool_surplus(int delta)
169 VM_BUG_ON(delta != -1 && delta != 1);
171 nid = next_node(nid, node_online_map);
172 if (nid == MAX_NUMNODES)
173 nid = first_node(node_online_map);
175 /* To shrink on this node, there must be a surplus page */
176 if (delta < 0 && !surplus_huge_pages_node[nid])
178 /* Surplus cannot exceed the total number of pages */
179 if (delta > 0 && surplus_huge_pages_node[nid] >=
180 nr_huge_pages_node[nid])
183 surplus_huge_pages += delta;
184 surplus_huge_pages_node[nid] += delta;
187 } while (nid != prev_nid);
193 static struct page *alloc_fresh_huge_page_node(int nid)
197 page = alloc_pages_node(nid,
198 htlb_alloc_mask|__GFP_COMP|__GFP_THISNODE|__GFP_NOWARN,
201 set_compound_page_dtor(page, free_huge_page);
202 spin_lock(&hugetlb_lock);
204 nr_huge_pages_node[nid]++;
205 spin_unlock(&hugetlb_lock);
206 put_page(page); /* free it into the hugepage allocator */
212 static int alloc_fresh_huge_page(void)
219 start_nid = hugetlb_next_nid;
222 page = alloc_fresh_huge_page_node(hugetlb_next_nid);
226 * Use a helper variable to find the next node and then
227 * copy it back to hugetlb_next_nid afterwards:
228 * otherwise there's a window in which a racer might
229 * pass invalid nid MAX_NUMNODES to alloc_pages_node.
230 * But we don't need to use a spin_lock here: it really
231 * doesn't matter if occasionally a racer chooses the
232 * same nid as we do. Move nid forward in the mask even
233 * if we just successfully allocated a hugepage so that
234 * the next caller gets hugepages on the next node.
236 next_nid = next_node(hugetlb_next_nid, node_online_map);
237 if (next_nid == MAX_NUMNODES)
238 next_nid = first_node(node_online_map);
239 hugetlb_next_nid = next_nid;
240 } while (!page && hugetlb_next_nid != start_nid);
245 static struct page *alloc_buddy_huge_page(struct vm_area_struct *vma,
246 unsigned long address)
252 * Assume we will successfully allocate the surplus page to
253 * prevent racing processes from causing the surplus to exceed
256 * This however introduces a different race, where a process B
257 * tries to grow the static hugepage pool while alloc_pages() is
258 * called by process A. B will only examine the per-node
259 * counters in determining if surplus huge pages can be
260 * converted to normal huge pages in adjust_pool_surplus(). A
261 * won't be able to increment the per-node counter, until the
262 * lock is dropped by B, but B doesn't drop hugetlb_lock until
263 * no more huge pages can be converted from surplus to normal
264 * state (and doesn't try to convert again). Thus, we have a
265 * case where a surplus huge page exists, the pool is grown, and
266 * the surplus huge page still exists after, even though it
267 * should just have been converted to a normal huge page. This
268 * does not leak memory, though, as the hugepage will be freed
269 * once it is out of use. It also does not allow the counters to
270 * go out of whack in adjust_pool_surplus() as we don't modify
271 * the node values until we've gotten the hugepage and only the
272 * per-node value is checked there.
274 spin_lock(&hugetlb_lock);
275 if (surplus_huge_pages >= nr_overcommit_huge_pages) {
276 spin_unlock(&hugetlb_lock);
280 surplus_huge_pages++;
282 spin_unlock(&hugetlb_lock);
284 page = alloc_pages(htlb_alloc_mask|__GFP_COMP|__GFP_NOWARN,
287 spin_lock(&hugetlb_lock);
290 * This page is now managed by the hugetlb allocator and has
291 * no users -- drop the buddy allocator's reference.
293 put_page_testzero(page);
294 VM_BUG_ON(page_count(page));
295 nid = page_to_nid(page);
296 set_compound_page_dtor(page, free_huge_page);
298 * We incremented the global counters already
300 nr_huge_pages_node[nid]++;
301 surplus_huge_pages_node[nid]++;
304 surplus_huge_pages--;
306 spin_unlock(&hugetlb_lock);
312 * Increase the hugetlb pool such that it can accomodate a reservation
315 static int gather_surplus_pages(int delta)
317 struct list_head surplus_list;
318 struct page *page, *tmp;
320 int needed, allocated;
322 needed = (resv_huge_pages + delta) - free_huge_pages;
324 resv_huge_pages += delta;
329 INIT_LIST_HEAD(&surplus_list);
333 spin_unlock(&hugetlb_lock);
334 for (i = 0; i < needed; i++) {
335 page = alloc_buddy_huge_page(NULL, 0);
338 * We were not able to allocate enough pages to
339 * satisfy the entire reservation so we free what
340 * we've allocated so far.
342 spin_lock(&hugetlb_lock);
347 list_add(&page->lru, &surplus_list);
352 * After retaking hugetlb_lock, we need to recalculate 'needed'
353 * because either resv_huge_pages or free_huge_pages may have changed.
355 spin_lock(&hugetlb_lock);
356 needed = (resv_huge_pages + delta) - (free_huge_pages + allocated);
361 * The surplus_list now contains _at_least_ the number of extra pages
362 * needed to accomodate the reservation. Add the appropriate number
363 * of pages to the hugetlb pool and free the extras back to the buddy
364 * allocator. Commit the entire reservation here to prevent another
365 * process from stealing the pages as they are added to the pool but
366 * before they are reserved.
369 resv_huge_pages += delta;
372 list_for_each_entry_safe(page, tmp, &surplus_list, lru) {
373 list_del(&page->lru);
375 enqueue_huge_page(page);
378 * The page has a reference count of zero already, so
379 * call free_huge_page directly instead of using
380 * put_page. This must be done with hugetlb_lock
381 * unlocked which is safe because free_huge_page takes
382 * hugetlb_lock before deciding how to free the page.
384 spin_unlock(&hugetlb_lock);
385 free_huge_page(page);
386 spin_lock(&hugetlb_lock);
394 * When releasing a hugetlb pool reservation, any surplus pages that were
395 * allocated to satisfy the reservation must be explicitly freed if they were
398 static void return_unused_surplus_pages(unsigned long unused_resv_pages)
402 unsigned long nr_pages;
404 /* Uncommit the reservation */
405 resv_huge_pages -= unused_resv_pages;
407 nr_pages = min(unused_resv_pages, surplus_huge_pages);
410 nid = next_node(nid, node_online_map);
411 if (nid == MAX_NUMNODES)
412 nid = first_node(node_online_map);
414 if (!surplus_huge_pages_node[nid])
417 if (!list_empty(&hugepage_freelists[nid])) {
418 page = list_entry(hugepage_freelists[nid].next,
420 list_del(&page->lru);
421 update_and_free_page(page);
423 free_huge_pages_node[nid]--;
424 surplus_huge_pages--;
425 surplus_huge_pages_node[nid]--;
432 static struct page *alloc_huge_page_shared(struct vm_area_struct *vma,
437 spin_lock(&hugetlb_lock);
438 page = dequeue_huge_page_vma(vma, addr);
439 spin_unlock(&hugetlb_lock);
440 return page ? page : ERR_PTR(-VM_FAULT_OOM);
443 static struct page *alloc_huge_page_private(struct vm_area_struct *vma,
446 struct page *page = NULL;
448 if (hugetlb_get_quota(vma->vm_file->f_mapping, 1))
449 return ERR_PTR(-VM_FAULT_SIGBUS);
451 spin_lock(&hugetlb_lock);
452 if (free_huge_pages > resv_huge_pages)
453 page = dequeue_huge_page_vma(vma, addr);
454 spin_unlock(&hugetlb_lock);
456 page = alloc_buddy_huge_page(vma, addr);
458 hugetlb_put_quota(vma->vm_file->f_mapping, 1);
459 return ERR_PTR(-VM_FAULT_OOM);
465 static struct page *alloc_huge_page(struct vm_area_struct *vma,
469 struct address_space *mapping = vma->vm_file->f_mapping;
471 if (vma->vm_flags & VM_MAYSHARE)
472 page = alloc_huge_page_shared(vma, addr);
474 page = alloc_huge_page_private(vma, addr);
477 set_page_refcounted(page);
478 set_page_private(page, (unsigned long) mapping);
483 static int __init hugetlb_init(void)
487 if (HPAGE_SHIFT == 0)
490 for (i = 0; i < MAX_NUMNODES; ++i)
491 INIT_LIST_HEAD(&hugepage_freelists[i]);
493 hugetlb_next_nid = first_node(node_online_map);
495 for (i = 0; i < max_huge_pages; ++i) {
496 if (!alloc_fresh_huge_page())
499 max_huge_pages = free_huge_pages = nr_huge_pages = i;
500 printk("Total HugeTLB memory allocated, %ld\n", free_huge_pages);
503 module_init(hugetlb_init);
505 static int __init hugetlb_setup(char *s)
507 if (sscanf(s, "%lu", &max_huge_pages) <= 0)
511 __setup("hugepages=", hugetlb_setup);
513 static unsigned int cpuset_mems_nr(unsigned int *array)
518 for_each_node_mask(node, cpuset_current_mems_allowed)
525 #ifdef CONFIG_HIGHMEM
526 static void try_to_free_low(unsigned long count)
530 for (i = 0; i < MAX_NUMNODES; ++i) {
531 struct page *page, *next;
532 list_for_each_entry_safe(page, next, &hugepage_freelists[i], lru) {
533 if (count >= nr_huge_pages)
535 if (PageHighMem(page))
537 list_del(&page->lru);
538 update_and_free_page(page);
540 free_huge_pages_node[page_to_nid(page)]--;
545 static inline void try_to_free_low(unsigned long count)
550 #define persistent_huge_pages (nr_huge_pages - surplus_huge_pages)
551 static unsigned long set_max_huge_pages(unsigned long count)
553 unsigned long min_count, ret;
556 * Increase the pool size
557 * First take pages out of surplus state. Then make up the
558 * remaining difference by allocating fresh huge pages.
560 * We might race with alloc_buddy_huge_page() here and be unable
561 * to convert a surplus huge page to a normal huge page. That is
562 * not critical, though, it just means the overall size of the
563 * pool might be one hugepage larger than it needs to be, but
564 * within all the constraints specified by the sysctls.
566 spin_lock(&hugetlb_lock);
567 while (surplus_huge_pages && count > persistent_huge_pages) {
568 if (!adjust_pool_surplus(-1))
572 while (count > persistent_huge_pages) {
575 * If this allocation races such that we no longer need the
576 * page, free_huge_page will handle it by freeing the page
577 * and reducing the surplus.
579 spin_unlock(&hugetlb_lock);
580 ret = alloc_fresh_huge_page();
581 spin_lock(&hugetlb_lock);
588 * Decrease the pool size
589 * First return free pages to the buddy allocator (being careful
590 * to keep enough around to satisfy reservations). Then place
591 * pages into surplus state as needed so the pool will shrink
592 * to the desired size as pages become free.
594 * By placing pages into the surplus state independent of the
595 * overcommit value, we are allowing the surplus pool size to
596 * exceed overcommit. There are few sane options here. Since
597 * alloc_buddy_huge_page() is checking the global counter,
598 * though, we'll note that we're not allowed to exceed surplus
599 * and won't grow the pool anywhere else. Not until one of the
600 * sysctls are changed, or the surplus pages go out of use.
602 min_count = resv_huge_pages + nr_huge_pages - free_huge_pages;
603 min_count = max(count, min_count);
604 try_to_free_low(min_count);
605 while (min_count < persistent_huge_pages) {
606 struct page *page = dequeue_huge_page();
609 update_and_free_page(page);
611 while (count < persistent_huge_pages) {
612 if (!adjust_pool_surplus(1))
616 ret = persistent_huge_pages;
617 spin_unlock(&hugetlb_lock);
621 int hugetlb_sysctl_handler(struct ctl_table *table, int write,
622 struct file *file, void __user *buffer,
623 size_t *length, loff_t *ppos)
625 proc_doulongvec_minmax(table, write, file, buffer, length, ppos);
626 max_huge_pages = set_max_huge_pages(max_huge_pages);
630 int hugetlb_treat_movable_handler(struct ctl_table *table, int write,
631 struct file *file, void __user *buffer,
632 size_t *length, loff_t *ppos)
634 proc_dointvec(table, write, file, buffer, length, ppos);
635 if (hugepages_treat_as_movable)
636 htlb_alloc_mask = GFP_HIGHUSER_MOVABLE;
638 htlb_alloc_mask = GFP_HIGHUSER;
642 int hugetlb_overcommit_handler(struct ctl_table *table, int write,
643 struct file *file, void __user *buffer,
644 size_t *length, loff_t *ppos)
646 proc_doulongvec_minmax(table, write, file, buffer, length, ppos);
647 spin_lock(&hugetlb_lock);
648 nr_overcommit_huge_pages = sysctl_overcommit_huge_pages;
649 spin_unlock(&hugetlb_lock);
653 #endif /* CONFIG_SYSCTL */
655 int hugetlb_report_meminfo(char *buf)
658 "HugePages_Total: %5lu\n"
659 "HugePages_Free: %5lu\n"
660 "HugePages_Rsvd: %5lu\n"
661 "HugePages_Surp: %5lu\n"
662 "Hugepagesize: %5lu kB\n",
670 int hugetlb_report_node_meminfo(int nid, char *buf)
673 "Node %d HugePages_Total: %5u\n"
674 "Node %d HugePages_Free: %5u\n"
675 "Node %d HugePages_Surp: %5u\n",
676 nid, nr_huge_pages_node[nid],
677 nid, free_huge_pages_node[nid],
678 nid, surplus_huge_pages_node[nid]);
681 /* Return the number pages of memory we physically have, in PAGE_SIZE units. */
682 unsigned long hugetlb_total_pages(void)
684 return nr_huge_pages * (HPAGE_SIZE / PAGE_SIZE);
688 * We cannot handle pagefaults against hugetlb pages at all. They cause
689 * handle_mm_fault() to try to instantiate regular-sized pages in the
690 * hugegpage VMA. do_page_fault() is supposed to trap this, so BUG is we get
693 static int hugetlb_vm_op_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
699 struct vm_operations_struct hugetlb_vm_ops = {
700 .fault = hugetlb_vm_op_fault,
703 static pte_t make_huge_pte(struct vm_area_struct *vma, struct page *page,
710 pte_mkwrite(pte_mkdirty(mk_pte(page, vma->vm_page_prot)));
712 entry = pte_wrprotect(mk_pte(page, vma->vm_page_prot));
714 entry = pte_mkyoung(entry);
715 entry = pte_mkhuge(entry);
720 static void set_huge_ptep_writable(struct vm_area_struct *vma,
721 unsigned long address, pte_t *ptep)
725 entry = pte_mkwrite(pte_mkdirty(*ptep));
726 if (ptep_set_access_flags(vma, address, ptep, entry, 1)) {
727 update_mmu_cache(vma, address, entry);
732 int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
733 struct vm_area_struct *vma)
735 pte_t *src_pte, *dst_pte, entry;
736 struct page *ptepage;
740 cow = (vma->vm_flags & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE;
742 for (addr = vma->vm_start; addr < vma->vm_end; addr += HPAGE_SIZE) {
743 src_pte = huge_pte_offset(src, addr);
746 dst_pte = huge_pte_alloc(dst, addr);
750 /* If the pagetables are shared don't copy or take references */
751 if (dst_pte == src_pte)
754 spin_lock(&dst->page_table_lock);
755 spin_lock(&src->page_table_lock);
756 if (!pte_none(*src_pte)) {
758 ptep_set_wrprotect(src, addr, src_pte);
760 ptepage = pte_page(entry);
762 set_huge_pte_at(dst, addr, dst_pte, entry);
764 spin_unlock(&src->page_table_lock);
765 spin_unlock(&dst->page_table_lock);
773 void __unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
776 struct mm_struct *mm = vma->vm_mm;
777 unsigned long address;
783 * A page gathering list, protected by per file i_mmap_lock. The
784 * lock is used to avoid list corruption from multiple unmapping
785 * of the same page since we are using page->lru.
787 LIST_HEAD(page_list);
789 WARN_ON(!is_vm_hugetlb_page(vma));
790 BUG_ON(start & ~HPAGE_MASK);
791 BUG_ON(end & ~HPAGE_MASK);
793 spin_lock(&mm->page_table_lock);
794 for (address = start; address < end; address += HPAGE_SIZE) {
795 ptep = huge_pte_offset(mm, address);
799 if (huge_pmd_unshare(mm, &address, ptep))
802 pte = huge_ptep_get_and_clear(mm, address, ptep);
806 page = pte_page(pte);
808 set_page_dirty(page);
809 list_add(&page->lru, &page_list);
811 spin_unlock(&mm->page_table_lock);
812 flush_tlb_range(vma, start, end);
813 list_for_each_entry_safe(page, tmp, &page_list, lru) {
814 list_del(&page->lru);
819 void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
823 * It is undesirable to test vma->vm_file as it should be non-null
824 * for valid hugetlb area. However, vm_file will be NULL in the error
825 * cleanup path of do_mmap_pgoff. When hugetlbfs ->mmap method fails,
826 * do_mmap_pgoff() nullifies vma->vm_file before calling this function
827 * to clean up. Since no pte has actually been setup, it is safe to
828 * do nothing in this case.
831 spin_lock(&vma->vm_file->f_mapping->i_mmap_lock);
832 __unmap_hugepage_range(vma, start, end);
833 spin_unlock(&vma->vm_file->f_mapping->i_mmap_lock);
837 static int hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma,
838 unsigned long address, pte_t *ptep, pte_t pte)
840 struct page *old_page, *new_page;
843 old_page = pte_page(pte);
845 /* If no-one else is actually using this page, avoid the copy
846 * and just make the page writable */
847 avoidcopy = (page_count(old_page) == 1);
849 set_huge_ptep_writable(vma, address, ptep);
853 page_cache_get(old_page);
854 new_page = alloc_huge_page(vma, address);
856 if (IS_ERR(new_page)) {
857 page_cache_release(old_page);
858 return -PTR_ERR(new_page);
861 spin_unlock(&mm->page_table_lock);
862 copy_huge_page(new_page, old_page, address, vma);
863 __SetPageUptodate(new_page);
864 spin_lock(&mm->page_table_lock);
866 ptep = huge_pte_offset(mm, address & HPAGE_MASK);
867 if (likely(pte_same(*ptep, pte))) {
869 set_huge_pte_at(mm, address, ptep,
870 make_huge_pte(vma, new_page, 1));
871 /* Make the old page be freed below */
874 page_cache_release(new_page);
875 page_cache_release(old_page);
879 static int hugetlb_no_page(struct mm_struct *mm, struct vm_area_struct *vma,
880 unsigned long address, pte_t *ptep, int write_access)
882 int ret = VM_FAULT_SIGBUS;
886 struct address_space *mapping;
889 mapping = vma->vm_file->f_mapping;
890 idx = ((address - vma->vm_start) >> HPAGE_SHIFT)
891 + (vma->vm_pgoff >> (HPAGE_SHIFT - PAGE_SHIFT));
894 * Use page lock to guard against racing truncation
895 * before we get page_table_lock.
898 page = find_lock_page(mapping, idx);
900 size = i_size_read(mapping->host) >> HPAGE_SHIFT;
903 page = alloc_huge_page(vma, address);
905 ret = -PTR_ERR(page);
908 clear_huge_page(page, address);
909 __SetPageUptodate(page);
911 if (vma->vm_flags & VM_SHARED) {
913 struct inode *inode = mapping->host;
915 err = add_to_page_cache(page, mapping, idx, GFP_KERNEL);
923 spin_lock(&inode->i_lock);
924 inode->i_blocks += BLOCKS_PER_HUGEPAGE;
925 spin_unlock(&inode->i_lock);
930 spin_lock(&mm->page_table_lock);
931 size = i_size_read(mapping->host) >> HPAGE_SHIFT;
936 if (!pte_none(*ptep))
939 new_pte = make_huge_pte(vma, page, ((vma->vm_flags & VM_WRITE)
940 && (vma->vm_flags & VM_SHARED)));
941 set_huge_pte_at(mm, address, ptep, new_pte);
943 if (write_access && !(vma->vm_flags & VM_SHARED)) {
944 /* Optimization, do the COW without a second fault */
945 ret = hugetlb_cow(mm, vma, address, ptep, new_pte);
948 spin_unlock(&mm->page_table_lock);
954 spin_unlock(&mm->page_table_lock);
960 int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
961 unsigned long address, int write_access)
966 static DEFINE_MUTEX(hugetlb_instantiation_mutex);
968 ptep = huge_pte_alloc(mm, address);
973 * Serialize hugepage allocation and instantiation, so that we don't
974 * get spurious allocation failures if two CPUs race to instantiate
975 * the same page in the page cache.
977 mutex_lock(&hugetlb_instantiation_mutex);
979 if (pte_none(entry)) {
980 ret = hugetlb_no_page(mm, vma, address, ptep, write_access);
981 mutex_unlock(&hugetlb_instantiation_mutex);
987 spin_lock(&mm->page_table_lock);
988 /* Check for a racing update before calling hugetlb_cow */
989 if (likely(pte_same(entry, *ptep)))
990 if (write_access && !pte_write(entry))
991 ret = hugetlb_cow(mm, vma, address, ptep, entry);
992 spin_unlock(&mm->page_table_lock);
993 mutex_unlock(&hugetlb_instantiation_mutex);
998 int follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
999 struct page **pages, struct vm_area_struct **vmas,
1000 unsigned long *position, int *length, int i,
1003 unsigned long pfn_offset;
1004 unsigned long vaddr = *position;
1005 int remainder = *length;
1007 spin_lock(&mm->page_table_lock);
1008 while (vaddr < vma->vm_end && remainder) {
1013 * Some archs (sparc64, sh*) have multiple pte_ts to
1014 * each hugepage. We have to make * sure we get the
1015 * first, for the page indexing below to work.
1017 pte = huge_pte_offset(mm, vaddr & HPAGE_MASK);
1019 if (!pte || pte_none(*pte) || (write && !pte_write(*pte))) {
1022 spin_unlock(&mm->page_table_lock);
1023 ret = hugetlb_fault(mm, vma, vaddr, write);
1024 spin_lock(&mm->page_table_lock);
1025 if (!(ret & VM_FAULT_ERROR))
1034 pfn_offset = (vaddr & ~HPAGE_MASK) >> PAGE_SHIFT;
1035 page = pte_page(*pte);
1039 pages[i] = page + pfn_offset;
1049 if (vaddr < vma->vm_end && remainder &&
1050 pfn_offset < HPAGE_SIZE/PAGE_SIZE) {
1052 * We use pfn_offset to avoid touching the pageframes
1053 * of this compound page.
1058 spin_unlock(&mm->page_table_lock);
1059 *length = remainder;
1065 void hugetlb_change_protection(struct vm_area_struct *vma,
1066 unsigned long address, unsigned long end, pgprot_t newprot)
1068 struct mm_struct *mm = vma->vm_mm;
1069 unsigned long start = address;
1073 BUG_ON(address >= end);
1074 flush_cache_range(vma, address, end);
1076 spin_lock(&vma->vm_file->f_mapping->i_mmap_lock);
1077 spin_lock(&mm->page_table_lock);
1078 for (; address < end; address += HPAGE_SIZE) {
1079 ptep = huge_pte_offset(mm, address);
1082 if (huge_pmd_unshare(mm, &address, ptep))
1084 if (!pte_none(*ptep)) {
1085 pte = huge_ptep_get_and_clear(mm, address, ptep);
1086 pte = pte_mkhuge(pte_modify(pte, newprot));
1087 set_huge_pte_at(mm, address, ptep, pte);
1090 spin_unlock(&mm->page_table_lock);
1091 spin_unlock(&vma->vm_file->f_mapping->i_mmap_lock);
1093 flush_tlb_range(vma, start, end);
1096 struct file_region {
1097 struct list_head link;
1102 static long region_add(struct list_head *head, long f, long t)
1104 struct file_region *rg, *nrg, *trg;
1106 /* Locate the region we are either in or before. */
1107 list_for_each_entry(rg, head, link)
1111 /* Round our left edge to the current segment if it encloses us. */
1115 /* Check for and consume any regions we now overlap with. */
1117 list_for_each_entry_safe(rg, trg, rg->link.prev, link) {
1118 if (&rg->link == head)
1123 /* If this area reaches higher then extend our area to
1124 * include it completely. If this is not the first area
1125 * which we intend to reuse, free it. */
1129 list_del(&rg->link);
1138 static long region_chg(struct list_head *head, long f, long t)
1140 struct file_region *rg, *nrg;
1143 /* Locate the region we are before or in. */
1144 list_for_each_entry(rg, head, link)
1148 /* If we are below the current region then a new region is required.
1149 * Subtle, allocate a new region at the position but make it zero
1150 * size such that we can guarantee to record the reservation. */
1151 if (&rg->link == head || t < rg->from) {
1152 nrg = kmalloc(sizeof(*nrg), GFP_KERNEL);
1157 INIT_LIST_HEAD(&nrg->link);
1158 list_add(&nrg->link, rg->link.prev);
1163 /* Round our left edge to the current segment if it encloses us. */
1168 /* Check for and consume any regions we now overlap with. */
1169 list_for_each_entry(rg, rg->link.prev, link) {
1170 if (&rg->link == head)
1175 /* We overlap with this area, if it extends futher than
1176 * us then we must extend ourselves. Account for its
1177 * existing reservation. */
1182 chg -= rg->to - rg->from;
1187 static long region_truncate(struct list_head *head, long end)
1189 struct file_region *rg, *trg;
1192 /* Locate the region we are either in or before. */
1193 list_for_each_entry(rg, head, link)
1196 if (&rg->link == head)
1199 /* If we are in the middle of a region then adjust it. */
1200 if (end > rg->from) {
1203 rg = list_entry(rg->link.next, typeof(*rg), link);
1206 /* Drop any remaining regions. */
1207 list_for_each_entry_safe(rg, trg, rg->link.prev, link) {
1208 if (&rg->link == head)
1210 chg += rg->to - rg->from;
1211 list_del(&rg->link);
1217 static int hugetlb_acct_memory(long delta)
1221 spin_lock(&hugetlb_lock);
1223 * When cpuset is configured, it breaks the strict hugetlb page
1224 * reservation as the accounting is done on a global variable. Such
1225 * reservation is completely rubbish in the presence of cpuset because
1226 * the reservation is not checked against page availability for the
1227 * current cpuset. Application can still potentially OOM'ed by kernel
1228 * with lack of free htlb page in cpuset that the task is in.
1229 * Attempt to enforce strict accounting with cpuset is almost
1230 * impossible (or too ugly) because cpuset is too fluid that
1231 * task or memory node can be dynamically moved between cpusets.
1233 * The change of semantics for shared hugetlb mapping with cpuset is
1234 * undesirable. However, in order to preserve some of the semantics,
1235 * we fall back to check against current free page availability as
1236 * a best attempt and hopefully to minimize the impact of changing
1237 * semantics that cpuset has.
1240 if (gather_surplus_pages(delta) < 0)
1243 if (delta > cpuset_mems_nr(free_huge_pages_node)) {
1244 return_unused_surplus_pages(delta);
1251 return_unused_surplus_pages((unsigned long) -delta);
1254 spin_unlock(&hugetlb_lock);
1258 int hugetlb_reserve_pages(struct inode *inode, long from, long to)
1262 chg = region_chg(&inode->i_mapping->private_list, from, to);
1266 if (hugetlb_get_quota(inode->i_mapping, chg))
1268 ret = hugetlb_acct_memory(chg);
1270 hugetlb_put_quota(inode->i_mapping, chg);
1273 region_add(&inode->i_mapping->private_list, from, to);
1277 void hugetlb_unreserve_pages(struct inode *inode, long offset, long freed)
1279 long chg = region_truncate(&inode->i_mapping->private_list, offset);
1281 spin_lock(&inode->i_lock);
1282 inode->i_blocks -= BLOCKS_PER_HUGEPAGE * freed;
1283 spin_unlock(&inode->i_lock);
1285 hugetlb_put_quota(inode->i_mapping, (chg - freed));
1286 hugetlb_acct_memory(-(chg - freed));