X-Git-Url: https://err.no/cgi-bin/gitweb.cgi?a=blobdiff_plain;f=mm%2Fvmscan.c;h=0095533cdde963143f7988c048a110519cc50a31;hb=d44ed4f86892e350f4b16a3489b7e7c1a9bb7ead;hp=6379ddbffd9b9b5fd81b3c7909ea24f709a60128;hpb=bfbb38fb808ac23ef44472d05d9bb36edfb49ed0;p=linux-2.6 diff --git a/mm/vmscan.c b/mm/vmscan.c index 6379ddbffd..0095533cdd 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -822,6 +822,8 @@ shrink_zone(struct zone *zone, struct scan_control *sc) unsigned long nr_active; unsigned long nr_inactive; + atomic_inc(&zone->reclaim_in_progress); + /* * Add one to `nr_to_scan' just to make sure that the kernel will * slowly sift through the active list. @@ -861,6 +863,8 @@ shrink_zone(struct zone *zone, struct scan_control *sc) } throttle_vm_writeout(); + + atomic_dec(&zone->reclaim_in_progress); } /* @@ -917,8 +921,7 @@ shrink_caches(struct zone **zones, struct scan_control *sc) * holds filesystem locks which prevent writeout this might not work, and the * allocation attempt will fail. */ -int try_to_free_pages(struct zone **zones, - unsigned int gfp_mask, unsigned int order) +int try_to_free_pages(struct zone **zones, unsigned int gfp_mask) { int priority; int ret = 0; @@ -971,7 +974,7 @@ int try_to_free_pages(struct zone **zones, * writeout. So in laptop mode, write out the whole world. */ if (total_scanned > sc.swap_cluster_max + sc.swap_cluster_max/2) { - wakeup_bdflush(laptop_mode ? 0 : total_scanned); + wakeup_pdflush(laptop_mode ? 0 : total_scanned); sc.may_writepage = 1; } @@ -1111,7 +1114,9 @@ scan: sc.nr_reclaimed = 0; sc.priority = priority; sc.swap_cluster_max = nr_pages? nr_pages : SWAP_CLUSTER_MAX; + atomic_inc(&zone->reclaim_in_progress); shrink_zone(zone, &sc); + atomic_dec(&zone->reclaim_in_progress); reclaim_state->reclaimed_slab = 0; nr_slab = shrink_slab(sc.nr_scanned, GFP_KERNEL, lru_pages); @@ -1213,8 +1218,8 @@ static int kswapd(void *p) order = 0; for ( ; ; ) { unsigned long new_order; - if (current->flags & PF_FREEZE) - refrigerator(PF_FREEZE); + + try_to_freeze(); prepare_to_wait(&pgdat->kswapd_wait, &wait, TASK_INTERRUPTIBLE); new_order = pgdat->kswapd_max_order; @@ -1323,3 +1328,75 @@ static int __init kswapd_init(void) } module_init(kswapd_init) + + +/* + * Try to free up some pages from this zone through reclaim. + */ +int zone_reclaim(struct zone *zone, unsigned int gfp_mask, unsigned int order) +{ + struct scan_control sc; + int nr_pages = 1 << order; + int total_reclaimed = 0; + + /* The reclaim may sleep, so don't do it if sleep isn't allowed */ + if (!(gfp_mask & __GFP_WAIT)) + return 0; + if (zone->all_unreclaimable) + return 0; + + sc.gfp_mask = gfp_mask; + sc.may_writepage = 0; + sc.may_swap = 0; + sc.nr_mapped = read_page_state(nr_mapped); + sc.nr_scanned = 0; + sc.nr_reclaimed = 0; + /* scan at the highest priority */ + sc.priority = 0; + + if (nr_pages > SWAP_CLUSTER_MAX) + sc.swap_cluster_max = nr_pages; + else + sc.swap_cluster_max = SWAP_CLUSTER_MAX; + + /* Don't reclaim the zone if there are other reclaimers active */ + if (atomic_read(&zone->reclaim_in_progress) > 0) + goto out; + + shrink_zone(zone, &sc); + total_reclaimed = sc.nr_reclaimed; + + out: + return total_reclaimed; +} + +asmlinkage long sys_set_zone_reclaim(unsigned int node, unsigned int zone, + unsigned int state) +{ + struct zone *z; + int i; + + if (!capable(CAP_SYS_ADMIN)) + return -EACCES; + + if (node >= MAX_NUMNODES || !node_online(node)) + return -EINVAL; + + /* This will break if we ever add more zones */ + if (!(zone & (1<node_zones[i]; + + if (state) + z->reclaim_pages = 1; + else + z->reclaim_pages = 0; + } + + return 0; +}