mm/vmscan.c | 17 ++++++++++++++--- 1 files changed, 14 insertions(+), 3 deletions(-) diff -puN mm/vmscan.c~ignore-page_referenced mm/vmscan.c --- bk-linux/mm/vmscan.c~ignore-page_referenced 2004-11-06 20:28:28.838538464 +0300 +++ bk-linux-nikita/mm/vmscan.c 2004-11-06 20:28:28.841538008 +0300 @@ -234,11 +234,17 @@ static int shrink_slab(unsigned long sca return 0; } +#define IGNORE_REF_PRIORITY (2) + /* Called without lock on whether page is mapped, so answer is unstable */ static inline int page_mapping_inuse(struct page *page) { struct address_space *mapping; + /* zone is under real stress, reclaim everything */ + if (page_zone(page)->prev_priority <= IGNORE_REF_PRIORITY) + return 0; + /* Page is in somebody's page tables. */ if (page_mapped(page)) return 1; @@ -762,7 +768,6 @@ spill_on_spot(struct zone *zone, } - /* * This moves pages from the active list to the inactive list. * @@ -799,12 +804,13 @@ refill_inactive_zone(struct zone *zone, long mapped_ratio; long distress; long swap_tendency; + int priority = zone->prev_priority; /* * `distress' is a measure of how much trouble we're having reclaiming * pages. 0 -> no problems. 100 -> great trouble. */ - distress = 100 >> zone->prev_priority; + distress = 100 >> priority; /* * The point of this algorithm is to decide when to start reclaiming @@ -885,7 +891,12 @@ refill_inactive_zone(struct zone *zone, if (page_mapped(page)) { if (!reclaim_mapped || (total_swap_pages == 0 && PageAnon(page)) || - page_referenced(page, 0)) { + /* + * stop honoring referenced bit when we are really + * tight on memory. + */ + (priority > IGNORE_REF_PRIORITY && + page_referenced(page, 0))) { list_add(&page->lru, &l_ignore); continue; } _