X-Git-Url: https://err.no/cgi-bin/gitweb.cgi?a=blobdiff_plain;f=include%2Flinux%2Fgfp.h;h=b414be387180f1db79c90977921296640e15f5e9;hb=13c48c490208d9e70d8d66d56f96c5054db69af7;hp=164be9da3c1b5157c32a7d5125eb1b684267abec;hpb=07ce198a1eb3431d04a6d59ea9fb7b71f21e33b1;p=linux-2.6 diff --git a/include/linux/gfp.h b/include/linux/gfp.h index 164be9da3c..b414be3871 100644 --- a/include/linux/gfp.h +++ b/include/linux/gfp.h @@ -40,9 +40,9 @@ struct vm_area_struct; #define __GFP_FS ((__force gfp_t)0x80u) /* Can call down to low-level FS? */ #define __GFP_COLD ((__force gfp_t)0x100u) /* Cache-cold page required */ #define __GFP_NOWARN ((__force gfp_t)0x200u) /* Suppress page allocation failure warning */ -#define __GFP_REPEAT ((__force gfp_t)0x400u) /* Retry the allocation. Might fail */ -#define __GFP_NOFAIL ((__force gfp_t)0x800u) /* Retry for ever. Cannot fail */ -#define __GFP_NORETRY ((__force gfp_t)0x1000u)/* Do not retry. Might fail */ +#define __GFP_REPEAT ((__force gfp_t)0x400u) /* See above */ +#define __GFP_NOFAIL ((__force gfp_t)0x800u) /* See above */ +#define __GFP_NORETRY ((__force gfp_t)0x1000u)/* See above */ #define __GFP_COMP ((__force gfp_t)0x4000u)/* Add compound page metadata */ #define __GFP_ZERO ((__force gfp_t)0x8000u)/* Return zeroed page on success */ #define __GFP_NOMEMALLOC ((__force gfp_t)0x10000u) /* Don't use emergency reserves */ @@ -119,35 +119,22 @@ static inline int allocflags_to_migratetype(gfp_t gfp_flags) static inline enum zone_type gfp_zone(gfp_t flags) { - int base = 0; - -#ifdef CONFIG_NUMA - if (flags & __GFP_THISNODE) - base = MAX_NR_ZONES; -#endif - #ifdef CONFIG_ZONE_DMA if (flags & __GFP_DMA) - return base + ZONE_DMA; + return ZONE_DMA; #endif #ifdef CONFIG_ZONE_DMA32 if (flags & __GFP_DMA32) - return base + ZONE_DMA32; + return ZONE_DMA32; #endif if ((flags & (__GFP_HIGHMEM | __GFP_MOVABLE)) == (__GFP_HIGHMEM | __GFP_MOVABLE)) - return base + ZONE_MOVABLE; + return ZONE_MOVABLE; #ifdef CONFIG_HIGHMEM if (flags & __GFP_HIGHMEM) - return base + ZONE_HIGHMEM; + return ZONE_HIGHMEM; #endif - return base + ZONE_NORMAL; -} - -static inline gfp_t set_migrateflags(gfp_t gfp, gfp_t migrate_flags) -{ - BUG_ON((gfp & GFP_MOVABLE_MASK) == GFP_MOVABLE_MASK); - return (gfp & ~(GFP_MOVABLE_MASK)) | migrate_flags; + return ZONE_NORMAL; } /* @@ -157,13 +144,27 @@ static inline gfp_t set_migrateflags(gfp_t gfp, gfp_t migrate_flags) * virtual kernel addresses to the allocated page(s). */ +static inline int gfp_zonelist(gfp_t flags) +{ + if (NUMA_BUILD && unlikely(flags & __GFP_THISNODE)) + return 1; + + return 0; +} + /* * We get the zone list from the current node and the gfp_mask. * This zone list contains a maximum of MAXNODES*MAX_NR_ZONES zones. + * There are two zonelists per node, one for all zones with memory and + * one containing just zones from the node the zonelist belongs to. * * For the normal case of non-DISCONTIGMEM systems the NODE_DATA() gets * optimized to &contig_page_data at compile-time. */ +static inline struct zonelist *node_zonelist(int nid, gfp_t flags) +{ + return NODE_DATA(nid)->node_zonelists + gfp_zonelist(flags); +} #ifndef HAVE_ARCH_FREE_PAGE static inline void arch_free_page(struct page *page, int order) { } @@ -174,6 +175,10 @@ static inline void arch_alloc_page(struct page *page, int order) { } extern struct page *__alloc_pages(gfp_t, unsigned int, struct zonelist *); +extern struct page * +__alloc_pages_nodemask(gfp_t, unsigned int, + struct zonelist *, nodemask_t *nodemask); + static inline struct page *alloc_pages_node(int nid, gfp_t gfp_mask, unsigned int order) { @@ -184,8 +189,7 @@ static inline struct page *alloc_pages_node(int nid, gfp_t gfp_mask, if (nid < 0) nid = numa_node_id(); - return __alloc_pages(gfp_mask, order, - NODE_DATA(nid)->node_zonelists + gfp_zone(gfp_mask)); + return __alloc_pages(gfp_mask, order, node_zonelist(nid, gfp_mask)); } #ifdef CONFIG_NUMA