X-Git-Url: https://err.no/cgi-bin/gitweb.cgi?a=blobdiff_plain;f=mm%2Fvmscan.c;h=a740778f688da277d35eb0d15a60168257dbe42e;hb=aa7c3a119a6202b758a669cde9e3829ced64a13f;hp=7da846960d8a7b2b4773758adb6ede0c7d2bc712;hpb=753ee728964e5afb80c17659cc6c3a6fd0a42fe0;p=linux-2.6 diff --git a/mm/vmscan.c b/mm/vmscan.c index 7da846960d..a740778f68 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -822,6 +822,8 @@ shrink_zone(struct zone *zone, struct scan_control *sc) unsigned long nr_active; unsigned long nr_inactive; + atomic_inc(&zone->reclaim_in_progress); + /* * Add one to `nr_to_scan' just to make sure that the kernel will * slowly sift through the active list. @@ -861,6 +863,8 @@ shrink_zone(struct zone *zone, struct scan_control *sc) } throttle_vm_writeout(); + + atomic_dec(&zone->reclaim_in_progress); } /* @@ -890,7 +894,7 @@ shrink_caches(struct zone **zones, struct scan_control *sc) if (zone->present_pages == 0) continue; - if (!cpuset_zone_allowed(zone)) + if (!cpuset_zone_allowed(zone, __GFP_HARDWALL)) continue; zone->temp_priority = sc->priority; @@ -917,8 +921,7 @@ shrink_caches(struct zone **zones, struct scan_control *sc) * holds filesystem locks which prevent writeout this might not work, and the * allocation attempt will fail. */ -int try_to_free_pages(struct zone **zones, - unsigned int gfp_mask, unsigned int order) +int try_to_free_pages(struct zone **zones, unsigned int gfp_mask) { int priority; int ret = 0; @@ -937,7 +940,7 @@ int try_to_free_pages(struct zone **zones, for (i = 0; zones[i] != NULL; i++) { struct zone *zone = zones[i]; - if (!cpuset_zone_allowed(zone)) + if (!cpuset_zone_allowed(zone, __GFP_HARDWALL)) continue; zone->temp_priority = DEF_PRIORITY; @@ -971,7 +974,7 @@ int try_to_free_pages(struct zone **zones, * writeout. So in laptop mode, write out the whole world. */ if (total_scanned > sc.swap_cluster_max + sc.swap_cluster_max/2) { - wakeup_bdflush(laptop_mode ? 0 : total_scanned); + wakeup_pdflush(laptop_mode ? 0 : total_scanned); sc.may_writepage = 1; } @@ -983,7 +986,7 @@ out: for (i = 0; zones[i] != 0; i++) { struct zone *zone = zones[i]; - if (!cpuset_zone_allowed(zone)) + if (!cpuset_zone_allowed(zone, __GFP_HARDWALL)) continue; zone->prev_priority = zone->temp_priority; @@ -1111,7 +1114,9 @@ scan: sc.nr_reclaimed = 0; sc.priority = priority; sc.swap_cluster_max = nr_pages? nr_pages : SWAP_CLUSTER_MAX; + atomic_inc(&zone->reclaim_in_progress); shrink_zone(zone, &sc); + atomic_dec(&zone->reclaim_in_progress); reclaim_state->reclaimed_slab = 0; nr_slab = shrink_slab(sc.nr_scanned, GFP_KERNEL, lru_pages); @@ -1213,8 +1218,8 @@ static int kswapd(void *p) order = 0; for ( ; ; ) { unsigned long new_order; - if (current->flags & PF_FREEZE) - refrigerator(PF_FREEZE); + + try_to_freeze(); prepare_to_wait(&pgdat->kswapd_wait, &wait, TASK_INTERRUPTIBLE); new_order = pgdat->kswapd_max_order; @@ -1251,7 +1256,7 @@ void wakeup_kswapd(struct zone *zone, int order) return; if (pgdat->kswapd_max_order < order) pgdat->kswapd_max_order = order; - if (!cpuset_zone_allowed(zone)) + if (!cpuset_zone_allowed(zone, __GFP_HARDWALL)) return; if (!waitqueue_active(&zone->zone_pgdat->kswapd_wait)) return; @@ -1354,9 +1359,14 @@ int zone_reclaim(struct zone *zone, unsigned int gfp_mask, unsigned int order) else sc.swap_cluster_max = SWAP_CLUSTER_MAX; + /* Don't reclaim the zone if there are other reclaimers active */ + if (atomic_read(&zone->reclaim_in_progress) > 0) + goto out; + shrink_zone(zone, &sc); total_reclaimed = sc.nr_reclaimed; + out: return total_reclaimed; } @@ -1366,6 +1376,9 @@ asmlinkage long sys_set_zone_reclaim(unsigned int node, unsigned int zone, struct zone *z; int i; + if (!capable(CAP_SYS_ADMIN)) + return -EACCES; + if (node >= MAX_NUMNODES || !node_online(node)) return -EINVAL;