X-Git-Url: https://err.no/cgi-bin/gitweb.cgi?a=blobdiff_plain;f=mm%2Fpage-writeback.c;h=d821321326e3f6fcff609d7f87ecb5c0890213c9;hb=6af2acb6619688046039234f716fd003e6ed2b3f;hp=ea9da3bed3e908db7ff42dfd5460250d70b2edbf;hpb=14dc5249728ff699b1ca4dac01ad416a350a147a;p=linux-2.6 diff --git a/mm/page-writeback.c b/mm/page-writeback.c index ea9da3bed3..d821321326 100644 --- a/mm/page-writeback.c +++ b/mm/page-writeback.c @@ -126,7 +126,7 @@ static unsigned long highmem_dirtyable_memory(unsigned long total) int node; unsigned long x = 0; - for_each_online_node(node) { + for_each_node_state(node, N_HIGH_MEMORY) { struct zone *z = &NODE_DATA(node)->node_zones[ZONE_HIGHMEM]; @@ -274,9 +274,9 @@ static void balance_dirty_pages(struct address_space *mapping) pdflush_operation(background_writeout, 0); } -void set_page_dirty_balance(struct page *page) +void set_page_dirty_balance(struct page *page, int page_mkwrite) { - if (set_page_dirty(page)) { + if (set_page_dirty(page) || page_mkwrite) { struct address_space *mapping = page_mapping(page); if (mapping) @@ -824,6 +824,7 @@ int __set_page_dirty_nobuffers(struct page *page) mapping2 = page_mapping(page); if (mapping2) { /* Race with truncate? */ BUG_ON(mapping2 != mapping); + WARN_ON_ONCE(!PagePrivate(page) && !PageUptodate(page)); if (mapping_cap_account_dirty(mapping)) { __inc_zone_page_state(page, NR_FILE_DIRTY); task_io_account_write(PAGE_CACHE_SIZE); @@ -917,6 +918,9 @@ int clear_page_dirty_for_io(struct page *page) { struct address_space *mapping = page_mapping(page); + BUG_ON(!PageLocked(page)); + + ClearPageReclaim(page); if (mapping && mapping_cap_account_dirty(mapping)) { /* * Yes, Virginia, this is indeed insane. @@ -942,14 +946,19 @@ int clear_page_dirty_for_io(struct page *page) * We basically use the page "master dirty bit" * as a serialization point for all the different * threads doing their things. - * - * FIXME! We still have a race here: if somebody - * adds the page back to the page tables in - * between the "page_mkclean()" and the "TestClearPageDirty()", - * we might have it mapped without the dirty bit set. */ if (page_mkclean(page)) set_page_dirty(page); + /* + * We carefully synchronise fault handlers against + * installing a dirty pte and marking the page dirty + * at this point. We do this by having them hold the + * page lock at some point after installing their + * pte, but before marking the page dirty. + * Pages are always locked coming in here, so we get + * the desired exclusion. See mm/memory.c:do_wp_page() + * for more comments. + */ if (TestClearPageDirty(page)) { dec_zone_page_state(page, NR_FILE_DIRTY); return 1; @@ -978,6 +987,8 @@ int test_clear_page_writeback(struct page *page) } else { ret = TestClearPageWriteback(page); } + if (ret) + dec_zone_page_state(page, NR_WRITEBACK); return ret; } @@ -1003,23 +1014,23 @@ int test_set_page_writeback(struct page *page) } else { ret = TestSetPageWriteback(page); } + if (!ret) + inc_zone_page_state(page, NR_WRITEBACK); return ret; } EXPORT_SYMBOL(test_set_page_writeback); /* - * Return true if any of the pages in the mapping are marged with the + * Return true if any of the pages in the mapping are marked with the * passed tag. */ int mapping_tagged(struct address_space *mapping, int tag) { - unsigned long flags; int ret; - - read_lock_irqsave(&mapping->tree_lock, flags); + rcu_read_lock(); ret = radix_tree_tagged(&mapping->page_tree, tag); - read_unlock_irqrestore(&mapping->tree_lock, flags); + rcu_read_unlock(); return ret; } EXPORT_SYMBOL(mapping_tagged);