X-Git-Url: https://err.no/cgi-bin/gitweb.cgi?a=blobdiff_plain;f=mm%2Fpage_alloc.c;h=43f757fcf30f6b1dc61cffd2c824e791db28c418;hb=5b4db0c2f25925fcfc17fa7233b7b90dc023d207;hp=332f46d70d2d75b6750777fd0ce9af572a6820a4;hpb=835c134ec4dd755e5c4470af566db226d1e96742;p=linux-2.6 diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 332f46d70d..43f757fcf3 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -27,6 +27,7 @@ #include #include #include +#include #include #include #include @@ -41,6 +42,7 @@ #include #include #include +#include #include #include @@ -67,6 +69,10 @@ unsigned long totalreserve_pages __read_mostly; long nr_swap_pages; int percpu_pagelist_fraction; +#ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE +int pageblock_order __read_mostly; +#endif + static void __free_pages_ok(struct page *page, unsigned int order); /* @@ -145,7 +151,7 @@ static unsigned long __meminitdata dma_reserve; static unsigned long __meminitdata node_boundary_end_pfn[MAX_NUMNODES]; #endif /* CONFIG_MEMORY_HOTPLUG_RESERVE */ unsigned long __initdata required_kernelcore; - unsigned long __initdata required_movablecore; + static unsigned long __initdata required_movablecore; unsigned long __meminitdata zone_movable_pfn[MAX_NUMNODES]; /* movable_zone is the "real" zone pages in ZONE_MOVABLE are taken from */ @@ -158,6 +164,14 @@ int nr_node_ids __read_mostly = MAX_NUMNODES; EXPORT_SYMBOL(nr_node_ids); #endif +int page_group_by_mobility_disabled __read_mostly; + +static void set_pageblock_migratetype(struct page *page, int migratetype) +{ + set_pageblock_flags_group(page, (unsigned long)migratetype, + PB_migrate, PB_migrate_end); +} + #ifdef CONFIG_DEBUG_VM static int page_outside_zone_boundaries(struct zone *zone, struct page *page) { @@ -301,16 +315,6 @@ static inline void prep_zero_page(struct page *page, int order, gfp_t gfp_flags) clear_highpage(page + i); } -/* - * function for dealing with page's order in buddy system. - * zone->lock is already acquired when we use these. - * So, we don't need atomic page->flags operations here. - */ -static inline unsigned long page_order(struct page *page) -{ - return page_private(page); -} - static inline void set_page_order(struct page *page, int order) { set_page_private(page, order); @@ -412,6 +416,7 @@ static inline void __free_one_page(struct page *page, { unsigned long page_idx; int order_size = 1 << order; + int migratetype = get_pageblock_migratetype(page); if (unlikely(PageCompound(page))) destroy_compound_page(page, order); @@ -424,7 +429,6 @@ static inline void __free_one_page(struct page *page, __mod_zone_page_state(zone, NR_FREE_PAGES, order_size); while (order < MAX_ORDER-1) { unsigned long combined_idx; - struct free_area *area; struct page *buddy; buddy = __page_find_buddy(page, page_idx, order); @@ -432,8 +436,7 @@ static inline void __free_one_page(struct page *page, break; /* Move the buddy up one level. */ list_del(&buddy->lru); - area = zone->free_area + order; - area->nr_free--; + zone->free_area[order].nr_free--; rmv_page_order(buddy); combined_idx = __find_combined_index(page_idx, order); page = page + (combined_idx - page_idx); @@ -441,7 +444,8 @@ static inline void __free_one_page(struct page *page, order++; } set_page_order(page, order); - list_add(&page->lru, &zone->free_area[order].free_list); + list_add(&page->lru, + &zone->free_area[order].free_list[migratetype]); zone->free_area[order].nr_free++; } @@ -486,7 +490,7 @@ static void free_pages_bulk(struct zone *zone, int count, struct list_head *list, int order) { spin_lock(&zone->lock); - zone->all_unreclaimable = 0; + zone_clear_flag(zone, ZONE_ALL_UNRECLAIMABLE); zone->pages_scanned = 0; while (count--) { struct page *page; @@ -503,7 +507,7 @@ static void free_pages_bulk(struct zone *zone, int count, static void free_one_page(struct zone *zone, struct page *page, int order) { spin_lock(&zone->lock); - zone->all_unreclaimable = 0; + zone_clear_flag(zone, ZONE_ALL_UNRECLAIMABLE); zone->pages_scanned = 0; __free_one_page(page, zone, order); spin_unlock(&zone->lock); @@ -575,7 +579,8 @@ void fastcall __init __free_pages_bootmem(struct page *page, unsigned int order) * -- wli */ static inline void expand(struct zone *zone, struct page *page, - int low, int high, struct free_area *area) + int low, int high, struct free_area *area, + int migratetype) { unsigned long size = 1 << high; @@ -584,7 +589,7 @@ static inline void expand(struct zone *zone, struct page *page, high--; size >>= 1; VM_BUG_ON(bad_range(zone, &page[size])); - list_add(&page[size].lru, &area->free_list); + list_add(&page[size].lru, &area->free_list[migratetype]); area->nr_free++; set_page_order(&page[size], high); } @@ -636,49 +641,235 @@ static int prep_new_page(struct page *page, int order, gfp_t gfp_flags) return 0; } -/* - * Do the hard work of removing an element from the buddy allocator. - * Call me with the zone->lock already held. +/* + * Go through the free lists for the given migratetype and remove + * the smallest available page from the freelists */ -static struct page *__rmqueue(struct zone *zone, unsigned int order) +static struct page *__rmqueue_smallest(struct zone *zone, unsigned int order, + int migratetype) { - struct free_area * area; unsigned int current_order; + struct free_area * area; struct page *page; + /* Find a page of the appropriate size in the preferred list */ for (current_order = order; current_order < MAX_ORDER; ++current_order) { - area = zone->free_area + current_order; - if (list_empty(&area->free_list)) + area = &(zone->free_area[current_order]); + if (list_empty(&area->free_list[migratetype])) continue; - page = list_entry(area->free_list.next, struct page, lru); + page = list_entry(area->free_list[migratetype].next, + struct page, lru); list_del(&page->lru); rmv_page_order(page); area->nr_free--; __mod_zone_page_state(zone, NR_FREE_PAGES, - (1UL << order)); - expand(zone, page, order, current_order, area); + expand(zone, page, order, current_order, area, migratetype); return page; } return NULL; } + +/* + * This array describes the order lists are fallen back to when + * the free lists for the desirable migrate type are depleted + */ +static int fallbacks[MIGRATE_TYPES][MIGRATE_TYPES-1] = { + [MIGRATE_UNMOVABLE] = { MIGRATE_RECLAIMABLE, MIGRATE_MOVABLE, MIGRATE_RESERVE }, + [MIGRATE_RECLAIMABLE] = { MIGRATE_UNMOVABLE, MIGRATE_MOVABLE, MIGRATE_RESERVE }, + [MIGRATE_MOVABLE] = { MIGRATE_RECLAIMABLE, MIGRATE_UNMOVABLE, MIGRATE_RESERVE }, + [MIGRATE_RESERVE] = { MIGRATE_RESERVE, MIGRATE_RESERVE, MIGRATE_RESERVE }, /* Never used */ +}; + +/* + * Move the free pages in a range to the free lists of the requested type. + * Note that start_page and end_pages are not aligned on a pageblock + * boundary. If alignment is required, use move_freepages_block() + */ +int move_freepages(struct zone *zone, + struct page *start_page, struct page *end_page, + int migratetype) +{ + struct page *page; + unsigned long order; + int pages_moved = 0; + +#ifndef CONFIG_HOLES_IN_ZONE + /* + * page_zone is not safe to call in this context when + * CONFIG_HOLES_IN_ZONE is set. This bug check is probably redundant + * anyway as we check zone boundaries in move_freepages_block(). + * Remove at a later date when no bug reports exist related to + * grouping pages by mobility + */ + BUG_ON(page_zone(start_page) != page_zone(end_page)); +#endif + + for (page = start_page; page <= end_page;) { + if (!pfn_valid_within(page_to_pfn(page))) { + page++; + continue; + } + + if (!PageBuddy(page)) { + page++; + continue; + } + + order = page_order(page); + list_del(&page->lru); + list_add(&page->lru, + &zone->free_area[order].free_list[migratetype]); + page += 1 << order; + pages_moved += 1 << order; + } + + return pages_moved; +} + +int move_freepages_block(struct zone *zone, struct page *page, int migratetype) +{ + unsigned long start_pfn, end_pfn; + struct page *start_page, *end_page; + + start_pfn = page_to_pfn(page); + start_pfn = start_pfn & ~(pageblock_nr_pages-1); + start_page = pfn_to_page(start_pfn); + end_page = start_page + pageblock_nr_pages - 1; + end_pfn = start_pfn + pageblock_nr_pages - 1; + + /* Do not cross zone boundaries */ + if (start_pfn < zone->zone_start_pfn) + start_page = page; + if (end_pfn >= zone->zone_start_pfn + zone->spanned_pages) + return 0; + + return move_freepages(zone, start_page, end_page, migratetype); +} + +/* Return the page with the lowest PFN in the list */ +static struct page *min_page(struct list_head *list) +{ + unsigned long min_pfn = -1UL; + struct page *min_page = NULL, *page;; + + list_for_each_entry(page, list, lru) { + unsigned long pfn = page_to_pfn(page); + if (pfn < min_pfn) { + min_pfn = pfn; + min_page = page; + } + } + + return min_page; +} + +/* Remove an element from the buddy allocator from the fallback list */ +static struct page *__rmqueue_fallback(struct zone *zone, int order, + int start_migratetype) +{ + struct free_area * area; + int current_order; + struct page *page; + int migratetype, i; + + /* Find the largest possible block of pages in the other list */ + for (current_order = MAX_ORDER-1; current_order >= order; + --current_order) { + for (i = 0; i < MIGRATE_TYPES - 1; i++) { + migratetype = fallbacks[start_migratetype][i]; + + /* MIGRATE_RESERVE handled later if necessary */ + if (migratetype == MIGRATE_RESERVE) + continue; + + area = &(zone->free_area[current_order]); + if (list_empty(&area->free_list[migratetype])) + continue; + + /* Bias kernel allocations towards low pfns */ + page = list_entry(area->free_list[migratetype].next, + struct page, lru); + if (unlikely(start_migratetype != MIGRATE_MOVABLE)) + page = min_page(&area->free_list[migratetype]); + area->nr_free--; + + /* + * If breaking a large block of pages, move all free + * pages to the preferred allocation list. If falling + * back for a reclaimable kernel allocation, be more + * agressive about taking ownership of free pages + */ + if (unlikely(current_order >= (pageblock_order >> 1)) || + start_migratetype == MIGRATE_RECLAIMABLE) { + unsigned long pages; + pages = move_freepages_block(zone, page, + start_migratetype); + + /* Claim the whole block if over half of it is free */ + if (pages >= (1 << (pageblock_order-1))) + set_pageblock_migratetype(page, + start_migratetype); + + migratetype = start_migratetype; + } + + /* Remove the page from the freelists */ + list_del(&page->lru); + rmv_page_order(page); + __mod_zone_page_state(zone, NR_FREE_PAGES, + -(1UL << order)); + + if (current_order == pageblock_order) + set_pageblock_migratetype(page, + start_migratetype); + + expand(zone, page, order, current_order, area, migratetype); + return page; + } + } + + /* Use MIGRATE_RESERVE rather than fail an allocation */ + return __rmqueue_smallest(zone, order, MIGRATE_RESERVE); +} + +/* + * Do the hard work of removing an element from the buddy allocator. + * Call me with the zone->lock already held. + */ +static struct page *__rmqueue(struct zone *zone, unsigned int order, + int migratetype) +{ + struct page *page; + + page = __rmqueue_smallest(zone, order, migratetype); + + if (unlikely(!page)) + page = __rmqueue_fallback(zone, order, migratetype); + + return page; +} + /* * Obtain a specified number of elements from the buddy allocator, all under * a single hold of the lock, for efficiency. Add them to the supplied list. * Returns the number of new pages which were placed at *list. */ static int rmqueue_bulk(struct zone *zone, unsigned int order, - unsigned long count, struct list_head *list) + unsigned long count, struct list_head *list, + int migratetype) { int i; spin_lock(&zone->lock); for (i = 0; i < count; ++i) { - struct page *page = __rmqueue(zone, order); + struct page *page = __rmqueue(zone, order, migratetype); if (unlikely(page == NULL)) break; - list_add_tail(&page->lru, list); + list_add(&page->lru, list); + set_page_private(page, migratetype); } spin_unlock(&zone->lock); return i; @@ -740,7 +931,7 @@ void mark_free_pages(struct zone *zone) { unsigned long pfn, max_zone_pfn; unsigned long flags; - int order; + int order, t; struct list_head *curr; if (!zone->spanned_pages) @@ -757,17 +948,18 @@ void mark_free_pages(struct zone *zone) swsusp_unset_page_free(page); } - for (order = MAX_ORDER - 1; order >= 0; --order) - list_for_each(curr, &zone->free_area[order].free_list) { + for_each_migratetype_order(order, t) { + list_for_each(curr, &zone->free_area[order].free_list[t]) { unsigned long i; pfn = page_to_pfn(list_entry(curr, struct page, lru)); for (i = 0; i < (1UL << order); i++) swsusp_set_page_free(pfn_to_page(pfn + i)); } - + } spin_unlock_irqrestore(&zone->lock, flags); } +#endif /* CONFIG_PM */ /* * Spill all of this CPU's per-cpu pages back into the buddy allocator. @@ -780,7 +972,25 @@ void drain_local_pages(void) __drain_pages(smp_processor_id()); local_irq_restore(flags); } -#endif /* CONFIG_HIBERNATION */ + +void smp_drain_local_pages(void *arg) +{ + drain_local_pages(); +} + +/* + * Spill all the per-cpu pages from all CPUs back into the buddy allocator + */ +void drain_all_local_pages(void) +{ + unsigned long flags; + + local_irq_save(flags); + __drain_pages(smp_processor_id()); + local_irq_restore(flags); + + smp_call_function(smp_drain_local_pages, NULL, 0, 1); +} /* * Free a 0-order page @@ -805,6 +1015,7 @@ static void fastcall free_hot_cold_page(struct page *page, int cold) local_irq_save(flags); __count_vm_event(PGFREE); list_add(&page->lru, &pcp->list); + set_page_private(page, get_pageblock_migratetype(page)); pcp->count++; if (pcp->count >= pcp->high) { free_pages_bulk(zone, pcp->batch, &pcp->list, 0); @@ -854,6 +1065,7 @@ static struct page *buffered_rmqueue(struct zonelist *zonelist, struct page *page; int cold = !!(gfp_flags & __GFP_COLD); int cpu; + int migratetype = allocflags_to_migratetype(gfp_flags); again: cpu = get_cpu(); @@ -864,16 +1076,28 @@ again: local_irq_save(flags); if (!pcp->count) { pcp->count = rmqueue_bulk(zone, 0, - pcp->batch, &pcp->list); + pcp->batch, &pcp->list, migratetype); if (unlikely(!pcp->count)) goto failed; } - page = list_entry(pcp->list.next, struct page, lru); + + /* Find a page of the appropriate migrate type */ + list_for_each_entry(page, &pcp->list, lru) + if (page_private(page) == migratetype) + break; + + /* Allocate more to the pcp list if necessary */ + if (unlikely(&page->lru == &pcp->list)) { + pcp->count += rmqueue_bulk(zone, 0, + pcp->batch, &pcp->list, migratetype); + page = list_entry(pcp->list.next, struct page, lru); + } + list_del(&page->lru); pcp->count--; } else { spin_lock_irqsave(&zone->lock, flags); - page = __rmqueue(zone, order); + page = __rmqueue(zone, order, migratetype); spin_unlock(&zone->lock); if (!page) goto failed; @@ -1354,12 +1578,20 @@ nofail_alloc: cond_resched(); + if (order != 0) + drain_all_local_pages(); + if (likely(did_some_progress)) { page = get_page_from_freelist(gfp_mask, order, zonelist, alloc_flags); if (page) goto got_pg; } else if ((gfp_mask & __GFP_FS) && !(gfp_mask & __GFP_NORETRY)) { + if (!try_set_zone_oom(zonelist)) { + schedule_timeout_uninterruptible(1); + goto restart; + } + /* * Go through the zonelist yet one more time, keep * very high watermark here, this is only to catch @@ -1368,14 +1600,19 @@ nofail_alloc: */ page = get_page_from_freelist(gfp_mask|__GFP_HARDWALL, order, zonelist, ALLOC_WMARK_HIGH|ALLOC_CPUSET); - if (page) + if (page) { + clear_zonelist_oom(zonelist); goto got_pg; + } /* The OOM killer will not help higher order allocs so fail */ - if (order > PAGE_ALLOC_COSTLY_ORDER) + if (order > PAGE_ALLOC_COSTLY_ORDER) { + clear_zonelist_oom(zonelist); goto nopage; + } out_of_memory(zonelist, gfp_mask, order); + clear_zonelist_oom(zonelist); goto restart; } @@ -1624,7 +1861,7 @@ void show_free_areas(void) K(zone_page_state(zone, NR_INACTIVE)), K(zone->present_pages), zone->pages_scanned, - (zone->all_unreclaimable ? "yes" : "no") + (zone_is_all_unreclaimable(zone) ? "yes" : "no") ); printk("lowmem_reserve[]:"); for (i = 0; i < MAX_NR_ZONES; i++) @@ -2127,9 +2364,23 @@ void build_all_zonelists(void) /* cpuset refresh routine should be here */ } vm_total_pages = nr_free_pagecache_pages(); - printk("Built %i zonelists in %s order. Total pages: %ld\n", + /* + * Disable grouping by mobility if the number of pages in the + * system is too low to allow the mechanism to work. It would be + * more accurate, but expensive to check per-zone. This check is + * made on memory-hotadd so a system can start with mobility + * disabled and enable it later + */ + if (vm_total_pages < (pageblock_nr_pages * MIGRATE_TYPES)) + page_group_by_mobility_disabled = 1; + else + page_group_by_mobility_disabled = 0; + + printk("Built %i zonelists in %s order, mobility grouping %s. " + "Total pages: %ld\n", num_online_nodes(), zonelist_order_name[current_zonelist_order], + page_group_by_mobility_disabled ? "off" : "on", vm_total_pages); #ifdef CONFIG_NUMA printk("Policy zone: %s\n", zone_names[policy_zone]); @@ -2204,6 +2455,61 @@ static inline unsigned long wait_table_bits(unsigned long size) #define LONG_ALIGN(x) (((x)+(sizeof(long))-1)&~((sizeof(long))-1)) +/* + * Mark a number of pageblocks as MIGRATE_RESERVE. The number + * of blocks reserved is based on zone->pages_min. The memory within the + * reserve will tend to store contiguous free pages. Setting min_free_kbytes + * higher will lead to a bigger reserve which will get freed as contiguous + * blocks as reclaim kicks in + */ +static void setup_zone_migrate_reserve(struct zone *zone) +{ + unsigned long start_pfn, pfn, end_pfn; + struct page *page; + unsigned long reserve, block_migratetype; + + /* Get the start pfn, end pfn and the number of blocks to reserve */ + start_pfn = zone->zone_start_pfn; + end_pfn = start_pfn + zone->spanned_pages; + reserve = roundup(zone->pages_min, pageblock_nr_pages) >> + pageblock_order; + + for (pfn = start_pfn; pfn < end_pfn; pfn += pageblock_nr_pages) { + if (!pfn_valid(pfn)) + continue; + page = pfn_to_page(pfn); + + /* Blocks with reserved pages will never free, skip them. */ + if (PageReserved(page)) + continue; + + block_migratetype = get_pageblock_migratetype(page); + + /* If this block is reserved, account for it */ + if (reserve > 0 && block_migratetype == MIGRATE_RESERVE) { + reserve--; + continue; + } + + /* Suitable for reserving if this block is movable */ + if (reserve > 0 && block_migratetype == MIGRATE_MOVABLE) { + set_pageblock_migratetype(page, MIGRATE_RESERVE); + move_freepages_block(zone, page, MIGRATE_RESERVE); + reserve--; + continue; + } + + /* + * If the reserve is met and this is a previous reserved block, + * take it back + */ + if (block_migratetype == MIGRATE_RESERVE) { + set_pageblock_migratetype(page, MIGRATE_MOVABLE); + move_freepages_block(zone, page, MIGRATE_MOVABLE); + } + } +} + /* * Initially all pages are reserved - free ones are freed * up by free_all_bootmem() once the early boot process is @@ -2233,6 +2539,19 @@ void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone, init_page_count(page); reset_page_mapcount(page); SetPageReserved(page); + + /* + * Mark the block movable so that blocks are reserved for + * movable at startup. This will force kernel allocations + * to reserve their blocks rather than leaking throughout + * the address space during boot when many long-lived + * kernel allocations are made. Later some blocks near + * the start are marked MIGRATE_RESERVE by + * setup_zone_migrate_reserve() + */ + if ((pfn & (pageblock_nr_pages-1))) + set_pageblock_migratetype(page, MIGRATE_MOVABLE); + INIT_LIST_HEAD(&page->lru); #ifdef WANT_PAGE_VIRTUAL /* The shift won't overflow because ZONE_NORMAL is below 4G. */ @@ -2245,9 +2564,9 @@ void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone, static void __meminit zone_init_free_lists(struct pglist_data *pgdat, struct zone *zone, unsigned long size) { - int order; - for (order = 0; order < MAX_ORDER ; order++) { - INIT_LIST_HEAD(&zone->free_area[order].free_list); + int order, t; + for_each_migratetype_order(order, t) { + INIT_LIST_HEAD(&zone->free_area[order].free_list[t]); zone->free_area[order].nr_free = 0; } } @@ -2934,8 +3253,8 @@ static void __meminit calculate_node_totalpages(struct pglist_data *pgdat, #ifndef CONFIG_SPARSEMEM /* * Calculate the size of the zone->blockflags rounded to an unsigned long - * Start by making sure zonesize is a multiple of MAX_ORDER-1 by rounding up - * Then figure 1 NR_PAGEBLOCK_BITS worth of bits per MAX_ORDER-1, finally + * Start by making sure zonesize is a multiple of pageblock_order by rounding + * up. Then use 1 NR_PAGEBLOCK_BITS worth of bits per pageblock, finally * round what is now in bits to nearest long in bits, then return it in * bytes. */ @@ -2943,8 +3262,8 @@ static unsigned long __init usemap_size(unsigned long zonesize) { unsigned long usemapsize; - usemapsize = roundup(zonesize, MAX_ORDER_NR_PAGES); - usemapsize = usemapsize >> (MAX_ORDER-1); + usemapsize = roundup(zonesize, pageblock_nr_pages); + usemapsize = usemapsize >> pageblock_order; usemapsize *= NR_PAGEBLOCK_BITS; usemapsize = roundup(usemapsize, 8 * sizeof(unsigned long)); @@ -2966,6 +3285,27 @@ static void inline setup_usemap(struct pglist_data *pgdat, struct zone *zone, unsigned long zonesize) {} #endif /* CONFIG_SPARSEMEM */ +#ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE +/* Initialise the number of pages represented by NR_PAGEBLOCK_BITS */ +static inline void __init set_pageblock_order(unsigned int order) +{ + /* Check that pageblock_nr_pages has not already been setup */ + if (pageblock_order) + return; + + /* + * Assume the largest contiguous order of interest is a huge page. + * This value may be variable depending on boot parameters on IA64 + */ + pageblock_order = order; +} +#else /* CONFIG_HUGETLB_PAGE_SIZE_VARIABLE */ + +/* Defined this way to avoid accidently referencing HUGETLB_PAGE_ORDER */ +#define set_pageblock_order(x) do {} while (0) + +#endif /* CONFIG_HUGETLB_PAGE_SIZE_VARIABLE */ + /* * Set up the zone data structures: * - mark all pages reserved @@ -3042,10 +3382,11 @@ static void __meminit free_area_init_core(struct pglist_data *pgdat, zone->nr_scan_active = 0; zone->nr_scan_inactive = 0; zap_zone_vm_stats(zone); - atomic_set(&zone->reclaim_in_progress, 0); + zone->flags = 0; if (!size) continue; + set_pageblock_order(HUGETLB_PAGE_ORDER); setup_usemap(pgdat, zone, size); ret = init_currently_empty_zone(zone, zone_start_pfn, size, MEMMAP_EARLY); @@ -3305,7 +3646,7 @@ unsigned long __init find_max_pfn_with_active_regions(void) * Sum pages in active regions for movable zone. * Populate N_HIGH_MEMORY for calculating usable_nodes. */ -unsigned long __init early_calculate_totalpages(void) +static unsigned long __init early_calculate_totalpages(void) { int i; unsigned long totalpages = 0; @@ -3766,6 +4107,7 @@ void setup_per_zone_pages_min(void) zone->pages_low = zone->pages_min + (tmp >> 2); zone->pages_high = zone->pages_min + (tmp >> 1); + setup_zone_migrate_reserve(zone); spin_unlock_irqrestore(&zone->lru_lock, flags); } @@ -4042,15 +4384,15 @@ static inline int pfn_to_bitidx(struct zone *zone, unsigned long pfn) { #ifdef CONFIG_SPARSEMEM pfn &= (PAGES_PER_SECTION-1); - return (pfn >> (MAX_ORDER-1)) * NR_PAGEBLOCK_BITS; + return (pfn >> pageblock_order) * NR_PAGEBLOCK_BITS; #else pfn = pfn - zone->zone_start_pfn; - return (pfn >> (MAX_ORDER-1)) * NR_PAGEBLOCK_BITS; + return (pfn >> pageblock_order) * NR_PAGEBLOCK_BITS; #endif /* CONFIG_SPARSEMEM */ } /** - * get_pageblock_flags_group - Return the requested group of flags for the MAX_ORDER_NR_PAGES block of pages + * get_pageblock_flags_group - Return the requested group of flags for the pageblock_nr_pages block of pages * @page: The page within the block of interest * @start_bitidx: The first bit of interest to retrieve * @end_bitidx: The last bit of interest @@ -4078,7 +4420,7 @@ unsigned long get_pageblock_flags_group(struct page *page, } /** - * set_pageblock_flags_group - Set the requested group of flags for a MAX_ORDER_NR_PAGES block of pages + * set_pageblock_flags_group - Set the requested group of flags for a pageblock_nr_pages block of pages * @page: The page within the block of interest * @start_bitidx: The first bit of interest * @end_bitidx: The last bit of interest @@ -4103,3 +4445,93 @@ void set_pageblock_flags_group(struct page *page, unsigned long flags, else __clear_bit(bitidx + start_bitidx, bitmap); } + +/* + * This is designed as sub function...plz see page_isolation.c also. + * set/clear page block's type to be ISOLATE. + * page allocater never alloc memory from ISOLATE block. + */ + +int set_migratetype_isolate(struct page *page) +{ + struct zone *zone; + unsigned long flags; + int ret = -EBUSY; + + zone = page_zone(page); + spin_lock_irqsave(&zone->lock, flags); + /* + * In future, more migrate types will be able to be isolation target. + */ + if (get_pageblock_migratetype(page) != MIGRATE_MOVABLE) + goto out; + set_pageblock_migratetype(page, MIGRATE_ISOLATE); + move_freepages_block(zone, page, MIGRATE_ISOLATE); + ret = 0; +out: + spin_unlock_irqrestore(&zone->lock, flags); + if (!ret) + drain_all_local_pages(); + return ret; +} + +void unset_migratetype_isolate(struct page *page) +{ + struct zone *zone; + unsigned long flags; + zone = page_zone(page); + spin_lock_irqsave(&zone->lock, flags); + if (get_pageblock_migratetype(page) != MIGRATE_ISOLATE) + goto out; + set_pageblock_migratetype(page, MIGRATE_MOVABLE); + move_freepages_block(zone, page, MIGRATE_MOVABLE); +out: + spin_unlock_irqrestore(&zone->lock, flags); +} + +#ifdef CONFIG_MEMORY_HOTREMOVE +/* + * All pages in the range must be isolated before calling this. + */ +void +__offline_isolated_pages(unsigned long start_pfn, unsigned long end_pfn) +{ + struct page *page; + struct zone *zone; + int order, i; + unsigned long pfn; + unsigned long flags; + /* find the first valid pfn */ + for (pfn = start_pfn; pfn < end_pfn; pfn++) + if (pfn_valid(pfn)) + break; + if (pfn == end_pfn) + return; + zone = page_zone(pfn_to_page(pfn)); + spin_lock_irqsave(&zone->lock, flags); + pfn = start_pfn; + while (pfn < end_pfn) { + if (!pfn_valid(pfn)) { + pfn++; + continue; + } + page = pfn_to_page(pfn); + BUG_ON(page_count(page)); + BUG_ON(!PageBuddy(page)); + order = page_order(page); +#ifdef CONFIG_DEBUG_VM + printk(KERN_INFO "remove from free list %lx %d %lx\n", + pfn, 1 << order, end_pfn); +#endif + list_del(&page->lru); + rmv_page_order(page); + zone->free_area[order].nr_free--; + __mod_zone_page_state(zone, NR_FREE_PAGES, + - (1UL << order)); + for (i = 0; i < (1 << order); i++) + SetPageReserved((page+i)); + pfn += (1 << order); + } + spin_unlock_irqrestore(&zone->lock, flags); +} +#endif