]> err.no Git - linux-2.6/blobdiff - include/linux/mm.h
[ALSA] semaphore -> mutex (ISA part)
[linux-2.6] / include / linux / mm.h
index 83c651f25188e6671307e6886611563d4d17aa71..498ff8778fb6d6c39ef8fb8472f32d03cc33952f 100644 (file)
@@ -3,6 +3,7 @@
 
 #include <linux/sched.h>
 #include <linux/errno.h>
+#include <linux/capability.h>
 
 #ifdef __KERNEL__
 
@@ -13,6 +14,7 @@
 #include <linux/rbtree.h>
 #include <linux/prio_tree.h>
 #include <linux/fs.h>
+#include <linux/mutex.h>
 
 struct mempolicy;
 struct anon_vma;
@@ -223,24 +225,27 @@ struct page {
                                         * & limit reverse map searches.
                                         */
        union {
-               unsigned long private;  /* Mapping-private opaque data:
-                                        * usually used for buffer_heads
-                                        * if PagePrivate set; used for
-                                        * swp_entry_t if PageSwapCache
-                                        * When page is free, this indicates
-                                        * order in the buddy system.
-                                        */
+           struct {
+               unsigned long private;          /* Mapping-private opaque data:
+                                                * usually used for buffer_heads
+                                                * if PagePrivate set; used for
+                                                * swp_entry_t if PageSwapCache.
+                                                * When page is free, this
+                                                * indicates order in the buddy
+                                                * system.
+                                                */
+               struct address_space *mapping;  /* If low bit clear, points to
+                                                * inode address_space, or NULL.
+                                                * If page mapped as anonymous
+                                                * memory, low bit is set, and
+                                                * it points to anon_vma object:
+                                                * see PAGE_MAPPING_ANON below.
+                                                */
+           };
 #if NR_CPUS >= CONFIG_SPLIT_PTLOCK_CPUS
-               spinlock_t ptl;
+           spinlock_t ptl;
 #endif
-       } u;
-       struct address_space *mapping;  /* If low bit clear, points to
-                                        * inode address_space, or NULL.
-                                        * If page mapped as anonymous
-                                        * memory, low bit is set, and
-                                        * it points to anon_vma object:
-                                        * see PAGE_MAPPING_ANON below.
-                                        */
+       };
        pgoff_t index;                  /* Our offset within mapping. */
        struct list_head lru;           /* Pageout list, eg. active_list
                                         * protected by zone->lru_lock !
@@ -261,8 +266,8 @@ struct page {
 #endif /* WANT_PAGE_VIRTUAL */
 };
 
-#define page_private(page)             ((page)->u.private)
-#define set_page_private(page, v)      ((page)->u.private = (v))
+#define page_private(page)             ((page)->private)
+#define set_page_private(page, v)      ((page)->private = (v))
 
 /*
  * FIXME: take this include out, include page-flags.h in
@@ -298,7 +303,7 @@ struct page {
  */
 #define put_page_testzero(p)                           \
        ({                                              \
-               BUG_ON(page_count(p) == 0);             \
+               BUG_ON(atomic_read(&(p)->_count) == -1);\
                atomic_add_negative(-1, &(p)->_count);  \
        })
 
@@ -308,7 +313,7 @@ struct page {
  */
 #define get_page_testone(p)    atomic_inc_and_test(&(p)->_count)
 
-#define set_page_count(p,v)    atomic_set(&(p)->_count, v - 1)
+#define set_page_count(p,v)    atomic_set(&(p)->_count, (v) - 1)
 #define __put_page(p)          atomic_dec(&(p)->_count)
 
 extern void FASTCALL(__page_cache_release(struct page *));
@@ -507,7 +512,7 @@ static inline void set_page_links(struct page *page, unsigned long zone,
 extern struct page *mem_map;
 #endif
 
-static inline void *lowmem_page_address(struct page *page)
+static __always_inline void *lowmem_page_address(struct page *page)
 {
        return __va(page_to_pfn(page) << PAGE_SHIFT);
 }
@@ -815,7 +820,7 @@ static inline pmd_t *pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long a
  * overflow into the next struct page (as it might with DEBUG_SPINLOCK).
  * When freeing, reset page->mapping so free_pages_check won't complain.
  */
-#define __pte_lockptr(page)    &((page)->u.ptl)
+#define __pte_lockptr(page)    &((page)->ptl)
 #define pte_lock_init(_page)   do {                                    \
        spin_lock_init(__pte_lockptr(_page));                           \
 } while (0)
@@ -1021,6 +1026,9 @@ static inline void vm_stat_account(struct mm_struct *mm,
 static inline void
 kernel_map_pages(struct page *page, int numpages, int enable)
 {
+       if (!PageHighMem(page) && !enable)
+               mutex_debug_check_no_locks_freed(page_address(page),
+                                                numpages * PAGE_SIZE);
 }
 #endif
 
@@ -1043,5 +1051,11 @@ int shrink_slab(unsigned long scanned, gfp_t gfp_mask,
 void drop_pagecache(void);
 void drop_slab(void);
 
+#ifndef CONFIG_MMU
+#define randomize_va_space 0
+#else
+extern int randomize_va_space;
+#endif
+
 #endif /* __KERNEL__ */
 #endif /* _LINUX_MM_H */