#include <linux/prio_tree.h>
#include <linux/fs.h>
#include <linux/mutex.h>
+#include <linux/debug_locks.h>
struct mempolicy;
struct anon_vma;
#include <asm/page.h>
#include <asm/pgtable.h>
#include <asm/processor.h>
-#include <asm/atomic.h>
#define nth_page(page,n) pfn_to_page(page_to_pfn((page)) + (n))
#define VM_GROWSDOWN 0x00000100 /* general info on the segment */
#define VM_GROWSUP 0x00000200
-#define VM_SHM 0x00000000 /* Means nothing: delete it later */
#define VM_PFNMAP 0x00000400 /* Page-ranges managed without "struct page", just pure PFN */
#define VM_DENYWRITE 0x00000800 /* ETXTBSY on write attempts.. */
int (*set_policy)(struct vm_area_struct *vma, struct mempolicy *new);
struct mempolicy *(*get_policy)(struct vm_area_struct *vma,
unsigned long addr);
+ int (*migrate)(struct vm_area_struct *vma, const nodemask_t *from,
+ const nodemask_t *to, unsigned long flags);
#endif
};
}
void put_page(struct page *page);
+void put_pages_list(struct list_head *pages);
void split_page(struct page *page, unsigned int order);
set_page_section(page, pfn_to_section_nr(pfn));
}
+/*
+ * Some inline functions in vmstat.h depend on page_zone()
+ */
+#include <linux/vmstat.h>
+
#ifndef CONFIG_DISCONTIGMEM
/* The array of struct pages - for discontigmem use pgdat->lmem_map */
extern struct page *mem_map;
return (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
}
+pgprot_t vm_get_page_prot(unsigned long vm_flags);
struct vm_area_struct *find_extend_vma(struct mm_struct *, unsigned long addr);
struct page *vmalloc_to_page(void *addr);
unsigned long vmalloc_to_pfn(void *addr);
kernel_map_pages(struct page *page, int numpages, int enable)
{
if (!PageHighMem(page) && !enable)
- mutex_debug_check_no_locks_freed(page_address(page),
- numpages * PAGE_SIZE);
+ debug_check_no_locks_freed(page_address(page),
+ numpages * PAGE_SIZE);
}
#endif
extern int randomize_va_space;
#endif
+const char *arch_vma_name(struct vm_area_struct *vma);
+
#endif /* __KERNEL__ */
#endif /* _LINUX_MM_H */