Don't unmap page before starting pageout. This should reduce number of minor page faults. shmem_writepage() wants page to be unmapped (see comment there for an explanation), so unmap it on entry. mm/shmem.c | 14 +++++++++++++- mm/vmscan.c | 38 +++++++++++++++++++++++++++++--------- 2 files changed, 42 insertions(+), 10 deletions(-) diff -puN mm/vmscan.c~dont-unmap-on-pageout mm/vmscan.c --- bk-linux/mm/vmscan.c~dont-unmap-on-pageout 2004-11-08 15:08:25.246204576 +0300 +++ bk-linux-nikita/mm/vmscan.c 2004-11-08 15:08:25.255203208 +0300 @@ -240,9 +240,13 @@ static inline int page_mapping_inuse(str return mapping_mapped(mapping); } +/* + * page is freeable if nothing but user mappings use it + */ static inline int is_page_cache_freeable(struct page *page) { - return page_count(page) - !!PagePrivate(page) == 2; + return page_count(page) - + page_mapcount(page) - !!PagePrivate(page) == 2; } static int may_write_to_queue(struct backing_dev_info *bdi) @@ -388,6 +392,7 @@ static int shrink_list(struct list_head struct page *page; int may_enter_fs; int referenced; + int dirty; page = lru_to_page(page_list); list_del(&page->lru); @@ -425,22 +430,37 @@ static int shrink_list(struct list_head may_enter_fs = (sc->gfp_mask & __GFP_FS) || (PageSwapCache(page) && (sc->gfp_mask & __GFP_IO)); + dirty = PageDirty(page); /* * The page is mapped into the page tables of one or more * processes. Try to unmap it here. */ if (page_mapped(page) && mapping) { - switch (try_to_unmap(page)) { - case SWAP_FAIL: - goto activate_locked; - case SWAP_AGAIN: - goto keep_locked; - case SWAP_SUCCESS: - ; /* try to free the page below */ + /* + * if page is dirty (either "explicitly", or through + * pte), start pageout immediately (without unmapping + * page). This way pageout is done on mapped pages, + * which, presumably, reduces number of minor page + * faults. Only unmap page when it's not dirty. + */ + if (!dirty) { + /* + * The page is mapped into the page tables of + * one or more processes. Try to unmap it + * here. + */ + switch (try_to_unmap(page)) { + case SWAP_FAIL: + goto activate_locked; + case SWAP_AGAIN: + goto keep_locked; + case SWAP_SUCCESS: + ; /* try to free the page below */ + } } } - if (PageDirty(page)) { + if (dirty) { if (referenced) goto keep_locked; if (!may_enter_fs) diff -puN mm/shmem.c~dont-unmap-on-pageout mm/shmem.c --- bk-linux/mm/shmem.c~dont-unmap-on-pageout 2004-11-08 15:08:25.250203968 +0300 +++ bk-linux-nikita/mm/shmem.c 2004-11-08 15:08:25.265201688 +0300 @@ -46,6 +46,7 @@ #include #include #include +#include #include #include #include @@ -761,7 +762,18 @@ static int shmem_writepage(struct page * struct inode *inode; BUG_ON(!PageLocked(page)); - BUG_ON(page_mapped(page)); + + /* + * If shmem_writepage() is called on mapped page, a problem arises for + * a tmpfs file mapped shared into different mms. Viz. shmem_writepage + * changes the tmpfs-file identity of the page to swap identity: so if + * it's unmapped later, the instances would then become private (to be + * COWed) instead of shared. + * + * Just unmap page. + */ + if (page_mapped(page) && try_to_unmap(page) != SWAP_SUCCESS) + goto redirty; mapping = page->mapping; index = page->index; _