X-Git-Url: https://err.no/cgi-bin/gitweb.cgi?a=blobdiff_plain;f=mm%2Fpage_alloc.c;h=b06a9636d971d2a3e89dcab022047b24753d0479;hb=babf68de5848f7e4b97c9c4a2d5e30598b825387;hp=7ee675ad101eb579375050fb8ff503f6fbc4cfd1;hpb=29751f6991e845f7d002a6ae520bf996b38c8dcd;p=linux-2.6 diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 7ee675ad10..b06a9636d9 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -329,7 +329,7 @@ static inline void free_pages_check(const char *function, struct page *page) 1 << PG_writeback ))) bad_page(function, page); if (PageDirty(page)) - ClearPageDirty(page); + __ClearPageDirty(page); } /* @@ -897,12 +897,6 @@ rebalance: cond_resched(); if (likely(did_some_progress)) { - /* - * Go through the zonelist yet one more time, keep - * very high watermark here, this is only to catch - * a parallel oom killing, we must fail if we're still - * under heavy pressure. - */ for (i = 0; (z = zones[i]) != NULL; i++) { if (!zone_watermark_ok(z, order, z->pages_min, classzone_idx, can_try_harder, @@ -936,7 +930,7 @@ rebalance: goto got_pg; } - out_of_memory(gfp_mask); + out_of_memory(gfp_mask, order); goto restart; } @@ -1067,20 +1061,19 @@ unsigned int nr_free_pages_pgdat(pg_data_t *pgdat) static unsigned int nr_free_zone_pages(int offset) { - pg_data_t *pgdat; + /* Just pick one node, since fallback list is circular */ + pg_data_t *pgdat = NODE_DATA(numa_node_id()); unsigned int sum = 0; - for_each_pgdat(pgdat) { - struct zonelist *zonelist = pgdat->node_zonelists + offset; - struct zone **zonep = zonelist->zones; - struct zone *zone; + struct zonelist *zonelist = pgdat->node_zonelists + offset; + struct zone **zonep = zonelist->zones; + struct zone *zone; - for (zone = *zonep++; zone; zone = *zonep++) { - unsigned long size = zone->present_pages; - unsigned long high = zone->pages_high; - if (size > high) - sum += size - high; - } + for (zone = *zonep++; zone; zone = *zonep++) { + unsigned long size = zone->present_pages; + unsigned long high = zone->pages_high; + if (size > high) + sum += size - high; } return sum; @@ -1137,19 +1130,20 @@ EXPORT_SYMBOL(nr_pagecache); DEFINE_PER_CPU(long, nr_pagecache_local) = 0; #endif -void __get_page_state(struct page_state *ret, int nr) +void __get_page_state(struct page_state *ret, int nr, cpumask_t *cpumask) { int cpu = 0; memset(ret, 0, sizeof(*ret)); + cpus_and(*cpumask, *cpumask, cpu_online_map); - cpu = first_cpu(cpu_online_map); + cpu = first_cpu(*cpumask); while (cpu < NR_CPUS) { unsigned long *in, *out, off; in = (unsigned long *)&per_cpu(page_states, cpu); - cpu = next_cpu(cpu, cpu_online_map); + cpu = next_cpu(cpu, *cpumask); if (cpu < NR_CPUS) prefetch(&per_cpu(page_states, cpu)); @@ -1160,19 +1154,33 @@ void __get_page_state(struct page_state *ret, int nr) } } +void get_page_state_node(struct page_state *ret, int node) +{ + int nr; + cpumask_t mask = node_to_cpumask(node); + + nr = offsetof(struct page_state, GET_PAGE_STATE_LAST); + nr /= sizeof(unsigned long); + + __get_page_state(ret, nr+1, &mask); +} + void get_page_state(struct page_state *ret) { int nr; + cpumask_t mask = CPU_MASK_ALL; nr = offsetof(struct page_state, GET_PAGE_STATE_LAST); nr /= sizeof(unsigned long); - __get_page_state(ret, nr + 1); + __get_page_state(ret, nr + 1, &mask); } void get_full_page_state(struct page_state *ret) { - __get_page_state(ret, sizeof(*ret) / sizeof(unsigned long)); + cpumask_t mask = CPU_MASK_ALL; + + __get_page_state(ret, sizeof(*ret) / sizeof(unsigned long), &mask); } unsigned long __read_page_state(unsigned long offset) @@ -1667,9 +1675,8 @@ void __init memmap_init_zone(unsigned long size, int nid, unsigned long zone, #ifdef WANT_PAGE_VIRTUAL /* The shift won't overflow because ZONE_NORMAL is below 4G. */ if (!is_highmem_idx(zone)) - set_page_address(page, __va(start_pfn << PAGE_SHIFT)); + set_page_address(page, __va(pfn << PAGE_SHIFT)); #endif - start_pfn++; } } @@ -1868,7 +1875,6 @@ static void __init free_area_init_core(struct pglist_data *pgdat, unsigned long *zones_size, unsigned long *zholes_size) { unsigned long i, j; - const unsigned long zone_required_alignment = 1UL << (MAX_ORDER-1); int cpu, nid = pgdat->node_id; unsigned long zone_start_pfn = pgdat->node_start_pfn; @@ -1918,7 +1924,7 @@ static void __init free_area_init_core(struct pglist_data *pgdat, zone->nr_scan_inactive = 0; zone->nr_active = 0; zone->nr_inactive = 0; - atomic_set(&zone->reclaim_in_progress, -1); + atomic_set(&zone->reclaim_in_progress, 0); if (!size) continue; @@ -1941,9 +1947,6 @@ static void __init free_area_init_core(struct pglist_data *pgdat, zone->zone_mem_map = pfn_to_page(zone_start_pfn); zone->zone_start_pfn = zone_start_pfn; - if ((zone_start_pfn) & (zone_required_alignment-1)) - printk(KERN_CRIT "BUG: wrong zone alignment, it will crash\n"); - memmap_init(size, nid, j, zone_start_pfn); zonetable_add(zone, nid, j, zone_start_pfn, size);