#include <linux/mm.h>
#include <linux/sysctl.h>
#include <linux/highmem.h>
+#include <linux/mmu_notifier.h>
#include <linux/nodemask.h>
#include <linux/pagemap.h>
#include <linux/mempolicy.h>
#include <asm/page.h>
#include <asm/pgtable.h>
+#include <asm/io.h>
#include <linux/hugetlb.h>
#include "internal.h"
unsigned int default_hstate_idx;
struct hstate hstates[HUGE_MAX_HSTATE];
+__initdata LIST_HEAD(huge_boot_pages);
+
/* for command line parsing */
static struct hstate * __initdata parsed_hstate;
static unsigned long __initdata default_hstate_max_huge_pages;
+static unsigned long __initdata default_hstate_size;
#define for_each_hstate(h) \
for ((h) = hstates; (h) < &hstates[max_hstate]; (h)++)
}
/* Returns true if the VMA has associated reserve pages */
-static int vma_has_private_reserves(struct vm_area_struct *vma)
+static int vma_has_reserves(struct vm_area_struct *vma)
{
if (vma->vm_flags & VM_SHARED)
- return 0;
- if (!is_vma_resv_set(vma, HPAGE_RESV_OWNER))
- return 0;
- return 1;
+ return 1;
+ if (is_vma_resv_set(vma, HPAGE_RESV_OWNER))
+ return 1;
+ return 0;
}
static void clear_huge_page(struct page *page,
* have no page reserves. This check ensures that reservations are
* not "stolen". The child may still get SIGKILLed
*/
- if (!vma_has_private_reserves(vma) &&
+ if (!vma_has_reserves(vma) &&
h->free_huge_pages - h->resv_huge_pages == 0)
return NULL;
huge_page_order(h));
if (page) {
if (arch_prepare_hugepage(page)) {
- __free_pages(page, HUGETLB_PAGE_ORDER);
+ __free_pages(page, huge_page_order(h));
return NULL;
}
prep_new_huge_page(h, page, nid);
__GFP_REPEAT|__GFP_NOWARN,
huge_page_order(h));
+ if (page && arch_prepare_hugepage(page)) {
+ __free_pages(page, huge_page_order(h));
+ return NULL;
+ }
+
spin_lock(&hugetlb_lock);
if (page) {
/*
return page;
}
-static __initdata LIST_HEAD(huge_boot_pages);
-
-struct huge_bootmem_page {
- struct list_head list;
- struct hstate *hstate;
-};
-
-static int __init alloc_bootmem_huge_page(struct hstate *h)
+__attribute__((weak)) int alloc_bootmem_huge_page(struct hstate *h)
{
struct huge_bootmem_page *m;
int nr_nodes = nodes_weight(node_online_map);
}
}
-#ifdef CONFIG_SYSCTL
#ifdef CONFIG_HIGHMEM
static void try_to_free_low(struct hstate *h, unsigned long count)
{
static int __init hugetlb_init(void)
{
- BUILD_BUG_ON(HPAGE_SHIFT == 0);
+ /* Some platform decide whether they support huge pages at boot
+ * time. On these, such as powerpc, HPAGE_SHIFT is set to 0 when
+ * there is no such support
+ */
+ if (HPAGE_SHIFT == 0)
+ return 0;
- if (!size_to_hstate(HPAGE_SIZE)) {
- hugetlb_add_hstate(HUGETLB_PAGE_ORDER);
- parsed_hstate->max_huge_pages = default_hstate_max_huge_pages;
+ if (!size_to_hstate(default_hstate_size)) {
+ default_hstate_size = HPAGE_SIZE;
+ if (!size_to_hstate(default_hstate_size))
+ hugetlb_add_hstate(HUGETLB_PAGE_ORDER);
}
- default_hstate_idx = size_to_hstate(HPAGE_SIZE) - hstates;
+ default_hstate_idx = size_to_hstate(default_hstate_size) - hstates;
+ if (default_hstate_max_huge_pages)
+ default_hstate.max_huge_pages = default_hstate_max_huge_pages;
hugetlb_init_hstates();
parsed_hstate = h;
}
-static int __init hugetlb_setup(char *s)
+static int __init hugetlb_nrpages_setup(char *s)
{
unsigned long *mhp;
static unsigned long *last_mhp;
return 1;
}
-__setup("hugepages=", hugetlb_setup);
+__setup("hugepages=", hugetlb_nrpages_setup);
+
+static int __init hugetlb_default_setup(char *s)
+{
+ default_hstate_size = memparse(s, &s);
+ return 1;
+}
+__setup("default_hugepagesz=", hugetlb_default_setup);
static unsigned int cpuset_mems_nr(unsigned int *array)
{
return nr;
}
+#ifdef CONFIG_SYSCTL
int hugetlb_sysctl_handler(struct ctl_table *table, int write,
struct file *file, void __user *buffer,
size_t *length, loff_t *ppos)
kref_put(&reservations->refs, resv_map_release);
- if (reserve)
+ if (reserve) {
hugetlb_acct_memory(h, -reserve);
+ hugetlb_put_quota(vma->vm_file->f_mapping, reserve);
+ }
}
}
BUG_ON(start & ~huge_page_mask(h));
BUG_ON(end & ~huge_page_mask(h));
+ mmu_notifier_invalidate_range_start(mm, start, end);
spin_lock(&mm->page_table_lock);
for (address = start; address < end; address += sz) {
ptep = huge_pte_offset(mm, address);
}
spin_unlock(&mm->page_table_lock);
flush_tlb_range(vma, start, end);
+ mmu_notifier_invalidate_range_end(mm, start, end);
list_for_each_entry_safe(page, tmp, &page_list, lru) {
list_del(&page->lru);
put_page(page);
lock_page(page);
}
+ /*
+ * If we are going to COW a private mapping later, we examine the
+ * pending reservations for this page now. This will ensure that
+ * any allocations necessary to record that reservation occur outside
+ * the spinlock.
+ */
+ if (write_access && !(vma->vm_flags & VM_SHARED))
+ if (vma_needs_reservation(h, vma, address) < 0) {
+ ret = VM_FAULT_OOM;
+ goto backout_unlocked;
+ }
+
spin_lock(&mm->page_table_lock);
size = i_size_read(mapping->host) >> huge_page_shift(h);
if (idx >= size)
backout:
spin_unlock(&mm->page_table_lock);
+backout_unlocked:
unlock_page(page);
put_page(page);
goto out;
pte_t *ptep;
pte_t entry;
int ret;
+ struct page *pagecache_page = NULL;
static DEFINE_MUTEX(hugetlb_instantiation_mutex);
struct hstate *h = hstate_vma(vma);
entry = huge_ptep_get(ptep);
if (huge_pte_none(entry)) {
ret = hugetlb_no_page(mm, vma, address, ptep, write_access);
- mutex_unlock(&hugetlb_instantiation_mutex);
- return ret;
+ goto out_unlock;
}
ret = 0;
+ /*
+ * If we are going to COW the mapping later, we examine the pending
+ * reservations for this page now. This will ensure that any
+ * allocations necessary to record that reservation occur outside the
+ * spinlock. For private mappings, we also lookup the pagecache
+ * page now as it is used to determine if a reservation has been
+ * consumed.
+ */
+ if (write_access && !pte_write(entry)) {
+ if (vma_needs_reservation(h, vma, address) < 0) {
+ ret = VM_FAULT_OOM;
+ goto out_unlock;
+ }
+
+ if (!(vma->vm_flags & VM_SHARED))
+ pagecache_page = hugetlbfs_pagecache_page(h,
+ vma, address);
+ }
+
spin_lock(&mm->page_table_lock);
/* Check for a racing update before calling hugetlb_cow */
if (likely(pte_same(entry, huge_ptep_get(ptep))))
- if (write_access && !pte_write(entry)) {
- struct page *page;
- page = hugetlbfs_pagecache_page(h, vma, address);
- ret = hugetlb_cow(mm, vma, address, ptep, entry, page);
- if (page) {
- unlock_page(page);
- put_page(page);
- }
- }
+ if (write_access && !pte_write(entry))
+ ret = hugetlb_cow(mm, vma, address, ptep, entry,
+ pagecache_page);
spin_unlock(&mm->page_table_lock);
+
+ if (pagecache_page) {
+ unlock_page(pagecache_page);
+ put_page(pagecache_page);
+ }
+
+out_unlock:
mutex_unlock(&hugetlb_instantiation_mutex);
return ret;
}
+/* Can be overriden by architectures */
+__attribute__((weak)) struct page *
+follow_huge_pud(struct mm_struct *mm, unsigned long address,
+ pud_t *pud, int write)
+{
+ BUG();
+ return NULL;
+}
+
int follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
struct page **pages, struct vm_area_struct **vmas,
unsigned long *position, int *length, int i,