]> err.no Git - linux-2.6/commitdiff
[PATCH] Use min of two prio settings in calculating distress for reclaim
authorMartin Bligh <mbligh@google.com>
Sat, 28 Oct 2006 17:38:25 +0000 (10:38 -0700)
committerLinus Torvalds <torvalds@g5.osdl.org>
Sat, 28 Oct 2006 18:30:51 +0000 (11:30 -0700)
If try_to_free_pages / balance_pgdat are called with a gfp_mask specifying
GFP_IO and/or GFP_FS, they will reclaim the requisite number of pages, and the
reset prev_priority to DEF_PRIORITY (or to some other high (ie: unurgent)
value).

However, another reclaimer without those gfp_mask flags set (say, GFP_NOIO)
may still be struggling to reclaim pages.  The concurrent overwrite of
zone->prev_priority will cause this GFP_NOIO thread to unexpectedly cease
deactivating mapped pages, thus causing reclaim difficulties.

Fix this is to key the distress calculation not off zone->prev_priority, but
also take into account the local caller's priority by using
min(zone->prev_priority, sc->priority)

Signed-off-by: Martin J. Bligh <mbligh@google.com>
Cc: Nick Piggin <nickpiggin@yahoo.com.au>
Cc: <stable@kernel.org>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
mm/vmscan.c

index b32560ead5c0c518e0b3db5c2c1bbe802c1c441c..518540a4a2a66a1ade20312d1d655ba6923e05b5 100644 (file)
@@ -760,7 +760,7 @@ static inline int zone_is_near_oom(struct zone *zone)
  * But we had to alter page->flags anyway.
  */
 static void shrink_active_list(unsigned long nr_pages, struct zone *zone,
-                               struct scan_control *sc)
+                               struct scan_control *sc, int priority)
 {
        unsigned long pgmoved;
        int pgdeactivate = 0;
@@ -784,7 +784,7 @@ static void shrink_active_list(unsigned long nr_pages, struct zone *zone,
                 * `distress' is a measure of how much trouble we're having
                 * reclaiming pages.  0 -> no problems.  100 -> great trouble.
                 */
-               distress = 100 >> zone->prev_priority;
+               distress = 100 >> min(zone->prev_priority, priority);
 
                /*
                 * The point of this algorithm is to decide when to start
@@ -936,7 +936,7 @@ static unsigned long shrink_zone(int priority, struct zone *zone,
                        nr_to_scan = min(nr_active,
                                        (unsigned long)sc->swap_cluster_max);
                        nr_active -= nr_to_scan;
-                       shrink_active_list(nr_to_scan, zone, sc);
+                       shrink_active_list(nr_to_scan, zone, sc, priority);
                }
 
                if (nr_inactive) {
@@ -1384,7 +1384,7 @@ static unsigned long shrink_all_zones(unsigned long nr_pages, int pass,
                        if (zone->nr_scan_active >= nr_pages || pass > 3) {
                                zone->nr_scan_active = 0;
                                nr_to_scan = min(nr_pages, zone->nr_active);
-                               shrink_active_list(nr_to_scan, zone, sc);
+                               shrink_active_list(nr_to_scan, zone, sc, prio);
                        }
                }