X-Git-Url: https://err.no/cgi-bin/gitweb.cgi?a=blobdiff_plain;f=mm%2Frmap.c;h=059774712c0889fbe1ff08aabc43f7593760baec;hb=ff9bc512f198eb47204f55b24c6fe3d36ed89592;hp=99bc3f9cd796f34a58ec1d61798c27c583a0eaf3;hpb=e93dc4891df93d7efa59d861fdcbb529a1819343;p=linux-2.6 diff --git a/mm/rmap.c b/mm/rmap.c index 99bc3f9cd7..059774712c 100644 --- a/mm/rmap.c +++ b/mm/rmap.c @@ -422,7 +422,7 @@ int page_referenced(struct page *page, int is_locked, referenced += page_referenced_anon(page, mem_cont); else if (is_locked) referenced += page_referenced_file(page, mem_cont); - else if (TestSetPageLocked(page)) + else if (!trylock_page(page)) referenced++; else { if (page->mapping) @@ -658,6 +658,22 @@ void page_remove_rmap(struct page *page, struct vm_area_struct *vma) BUG(); } + /* + * Now that the last pte has gone, s390 must transfer dirty + * flag from storage key to struct page. We can usually skip + * this if the page is anon, so about to be freed; but perhaps + * not if it's in swapcache - there might be another pte slot + * containing the swap entry, but page not yet written to swap. + */ + if ((!PageAnon(page) || PageSwapCache(page)) && + page_test_dirty(page)) { + page_clear_dirty(page); + set_page_dirty(page); + } + + mem_cgroup_uncharge_page(page); + __dec_zone_page_state(page, + PageAnon(page) ? NR_ANON_PAGES : NR_FILE_MAPPED); /* * It would be tidy to reset the PageAnon mapping here, * but that might overwrite a racing page_add_anon_rmap @@ -667,14 +683,6 @@ void page_remove_rmap(struct page *page, struct vm_area_struct *vma) * Leaving it set also helps swapoff to reinstate ptes * faster for those pages still in swapcache. */ - if (page_test_dirty(page)) { - page_clear_dirty(page); - set_page_dirty(page); - } - mem_cgroup_uncharge_page(page); - - __dec_zone_page_state(page, - PageAnon(page) ? NR_ANON_PAGES : NR_FILE_MAPPED); } }