X-Git-Url: https://err.no/cgi-bin/gitweb.cgi?a=blobdiff_plain;f=include%2Flinux%2Fmm.h;h=1850cf8bad64f9e133c3b7f498815acb9ef23e40;hb=69e05944af39fc6c97b09380c8721e38433bd828;hp=75e9f0724997e631806ceb7dd3733dfd7b2d8cc1;hpb=0bdd340c092b0936f78a54bdbd3927463ed4fca3;p=linux-2.6 diff --git a/include/linux/mm.h b/include/linux/mm.h index 75e9f07249..1850cf8bad 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -286,34 +286,28 @@ struct page { * * Also, many kernel routines increase the page count before a critical * routine so they can be sure the page doesn't go away from under them. - * - * Since 2.6.6 (approx), a free page has ->_count = -1. This is so that we - * can use atomic_add_negative(-1, page->_count) to detect when the page - * becomes free and so that we can also use atomic_inc_and_test to atomically - * detect when we just tried to grab a ref on a page which some other CPU has - * already deemed to be freeable. - * - * NO code should make assumptions about this internal detail! Use the provided - * macros which retain the old rules: page_count(page) == 0 is a free page. */ /* * Drop a ref, return true if the logical refcount fell to zero (the page has * no users) */ -#define put_page_testzero(p) \ - ({ \ - BUG_ON(atomic_read(&(p)->_count) == -1);\ - atomic_add_negative(-1, &(p)->_count); \ - }) +static inline int put_page_testzero(struct page *page) +{ + BUG_ON(atomic_read(&page->_count) == 0); + return atomic_dec_and_test(&page->_count); +} /* - * Grab a ref, return true if the page previously had a logical refcount of - * zero. ie: returns true if we just grabbed an already-deemed-to-be-free page + * Try to grab a ref unless the page has a refcount of zero, return false if + * that is the case. */ -#define get_page_testone(p) atomic_inc_and_test(&(p)->_count) +static inline int get_page_unless_zero(struct page *page) +{ + return atomic_inc_not_zero(&page->_count); +} -#define set_page_count(p,v) atomic_set(&(p)->_count, (v) - 1) +#define set_page_count(p,v) atomic_set(&(p)->_count, (v)) #define __put_page(p) atomic_dec(&(p)->_count) extern void FASTCALL(__page_cache_release(struct page *)); @@ -322,7 +316,7 @@ static inline int page_count(struct page *page) { if (PageCompound(page)) page = (struct page *)page_private(page); - return atomic_read(&page->_count) + 1; + return atomic_read(&page->_count); } static inline void get_page(struct page *page) @@ -334,6 +328,12 @@ static inline void get_page(struct page *page) void put_page(struct page *page); +#ifdef CONFIG_MMU +void split_page(struct page *page, unsigned int order); +#else +static inline void split_page(struct page *page, unsigned int order) {} +#endif + /* * Multiple processes may "see" the same page. E.g. for untouched * mappings of /dev/null, all processes see the same page full of @@ -1046,10 +1046,16 @@ int in_gate_area_no_task(unsigned long addr); int drop_caches_sysctl_handler(struct ctl_table *, int, struct file *, void __user *, size_t *, loff_t *); -int shrink_slab(unsigned long scanned, gfp_t gfp_mask, +unsigned long shrink_slab(unsigned long scanned, gfp_t gfp_mask, unsigned long lru_pages); void drop_pagecache(void); void drop_slab(void); +#ifndef CONFIG_MMU +#define randomize_va_space 0 +#else +extern int randomize_va_space; +#endif + #endif /* __KERNEL__ */ #endif /* _LINUX_MM_H */