X-Git-Url: https://err.no/cgi-bin/gitweb.cgi?a=blobdiff_plain;f=include%2Flinux%2Fmmzone.h;h=3ea68cd3b61f29a04b708c8c8f4043cccf6b2450;hb=440fdb53b4ae58602711b5b8c3a139ace2404dbb;hp=b262f47961fbe8a439cc218cd62b20122c5ac957;hpb=e7b3ca08549caccf5d6e1cf066780bf4f0ae77a7;p=linux-2.6 diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index b262f47961..3ea68cd3b6 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h @@ -24,6 +24,14 @@ #endif #define MAX_ORDER_NR_PAGES (1 << (MAX_ORDER - 1)) +/* + * PAGE_ALLOC_COSTLY_ORDER is the order at which allocations are deemed + * costly to service. That is between allocation orders which should + * coelesce naturally under reasonable reclaim pressure and those which + * will not. + */ +#define PAGE_ALLOC_COSTLY_ORDER 3 + struct free_area { struct list_head free_list; unsigned long nr_free; @@ -47,15 +55,20 @@ struct zone_padding { #endif enum zone_stat_item { + /* First 128 byte cacheline (assuming 64 bit words) */ + NR_FREE_PAGES, + NR_INACTIVE, + NR_ACTIVE, NR_ANON_PAGES, /* Mapped anonymous pages */ NR_FILE_MAPPED, /* pagecache pages mapped into pagetables. only modified from process context */ NR_FILE_PAGES, - NR_SLAB_RECLAIMABLE, - NR_SLAB_UNRECLAIMABLE, - NR_PAGETABLE, /* used for pagetables */ NR_FILE_DIRTY, NR_WRITEBACK, + /* Second 128 byte cacheline */ + NR_SLAB_RECLAIMABLE, + NR_SLAB_UNRECLAIMABLE, + NR_PAGETABLE, /* used for pagetables */ NR_UNSTABLE_NFS, /* NFS unstable pages */ NR_BOUNCE, NR_VMSCAN_WRITE, @@ -78,6 +91,9 @@ struct per_cpu_pages { struct per_cpu_pageset { struct per_cpu_pages pcp[2]; /* 0: hot. 1: cold */ +#ifdef CONFIG_NUMA + s8 expire; +#endif #ifdef CONFIG_SMP s8 stat_threshold; s8 vm_stat_diff[NR_VM_ZONE_STAT_ITEMS]; @@ -91,6 +107,7 @@ struct per_cpu_pageset { #endif enum zone_type { +#ifdef CONFIG_ZONE_DMA /* * ZONE_DMA is used when there are devices that are not able * to do DMA to all of addressable memory (ZONE_NORMAL). Then we @@ -103,7 +120,6 @@ enum zone_type { * --------------------------- * parisc, ia64, sparc <4G * s390 <2G - * arm26 <48M * arm Various * alpha Unlimited or 0-16MB. * @@ -111,6 +127,7 @@ enum zone_type { * <16M. */ ZONE_DMA, +#endif #ifdef CONFIG_ZONE_DMA32 /* * x86_64 needs two ZONE_DMAs because it supports devices that are @@ -136,6 +153,7 @@ enum zone_type { */ ZONE_HIGHMEM, #endif + ZONE_MOVABLE, MAX_NR_ZONES }; @@ -147,15 +165,31 @@ enum zone_type { * match the requested limits. See gfp_zone() in include/linux/gfp.h */ -#if !defined(CONFIG_ZONE_DMA32) && !defined(CONFIG_HIGHMEM) +/* + * Count the active zones. Note that the use of defined(X) outside + * #if and family is not necessarily defined so ensure we cannot use + * it later. Use __ZONE_COUNT to work out how many shift bits we need. + */ +#define __ZONE_COUNT ( \ + defined(CONFIG_ZONE_DMA) \ + + defined(CONFIG_ZONE_DMA32) \ + + 1 \ + + defined(CONFIG_HIGHMEM) \ + + 1 \ +) +#if __ZONE_COUNT < 2 +#define ZONES_SHIFT 0 +#elif __ZONE_COUNT <= 2 #define ZONES_SHIFT 1 -#else +#elif __ZONE_COUNT <= 4 #define ZONES_SHIFT 2 +#else +#error ZONES_SHIFT -- too many zones configured adjust calculation #endif +#undef __ZONE_COUNT struct zone { /* Fields commonly accessed by the page allocator */ - unsigned long free_pages; unsigned long pages_min, pages_low, pages_high; /* * We don't know if the memory that we're going to allocate will be freeable @@ -197,8 +231,6 @@ struct zone { struct list_head inactive_list; unsigned long nr_scan_active; unsigned long nr_scan_inactive; - unsigned long nr_active; - unsigned long nr_inactive; unsigned long pages_scanned; /* since last reclaim */ int all_unreclaimable; /* All pages pinned */ @@ -442,8 +474,6 @@ typedef struct pglist_data { #include -void __get_zone_counts(unsigned long *active, unsigned long *inactive, - unsigned long *free, struct pglist_data *pgdat); void get_zone_counts(unsigned long *active, unsigned long *inactive, unsigned long *free); void build_all_zonelists(void); @@ -478,10 +508,22 @@ static inline int populated_zone(struct zone *zone) return (!!zone->present_pages); } +extern int movable_zone; + +static inline int zone_movable_is_highmem(void) +{ +#if defined(CONFIG_HIGHMEM) && defined(CONFIG_ARCH_POPULATES_NODE_MAP) + return movable_zone == ZONE_HIGHMEM; +#else + return 0; +#endif +} + static inline int is_highmem_idx(enum zone_type idx) { #ifdef CONFIG_HIGHMEM - return (idx == ZONE_HIGHMEM); + return (idx == ZONE_HIGHMEM || + (idx == ZONE_MOVABLE && zone_movable_is_highmem())); #else return 0; #endif @@ -501,7 +543,9 @@ static inline int is_normal_idx(enum zone_type idx) static inline int is_highmem(struct zone *zone) { #ifdef CONFIG_HIGHMEM - return zone == zone->zone_pgdat->node_zones + ZONE_HIGHMEM; + int zone_idx = zone - zone->zone_pgdat->node_zones; + return zone_idx == ZONE_HIGHMEM || + (zone_idx == ZONE_MOVABLE && zone_movable_is_highmem()); #else return 0; #endif @@ -523,7 +567,11 @@ static inline int is_dma32(struct zone *zone) static inline int is_dma(struct zone *zone) { +#ifdef CONFIG_ZONE_DMA return zone == zone->zone_pgdat->node_zones + ZONE_DMA; +#else + return 0; +#endif } /* These two functions are used to setup the per zone pages min values */ @@ -541,6 +589,11 @@ int sysctl_min_unmapped_ratio_sysctl_handler(struct ctl_table *, int, int sysctl_min_slab_ratio_sysctl_handler(struct ctl_table *, int, struct file *, void __user *, size_t *, loff_t *); +extern int numa_zonelist_order_handler(struct ctl_table *, int, + struct file *, void __user *, size_t *, loff_t *); +extern char numa_zonelist_order[]; +#define NUMA_ZONELIST_ORDER_LEN 16 /* string buffer size */ + #include /* Returns the number of the current Node. */ #ifndef numa_node_id @@ -762,6 +815,18 @@ void sparse_init(void); void memory_present(int nid, unsigned long start, unsigned long end); unsigned long __init node_memmap_size_bytes(int, unsigned long, unsigned long); +/* + * If it is possible to have holes within a MAX_ORDER_NR_PAGES, then we + * need to check pfn validility within that MAX_ORDER_NR_PAGES block. + * pfn_valid_within() should be used in this case; we optimise this away + * when we have no holes within a MAX_ORDER_NR_PAGES block. + */ +#ifdef CONFIG_HOLES_IN_ZONE +#define pfn_valid_within(pfn) pfn_valid(pfn) +#else +#define pfn_valid_within(pfn) (1) +#endif + #endif /* !__ASSEMBLY__ */ #endif /* __KERNEL__ */ #endif /* _LINUX_MMZONE_H */