--- include/linux/swap.h.~1~ Mon Dec 7 12:05:54 1998 +++ include/linux/swap.h Mon Dec 7 18:55:55 1998 @@ -90,6 +90,7 @@ extern struct page * read_swap_cache_async(unsigned long, int); #define read_swap_cache(entry) read_swap_cache_async(entry, 1); extern int FASTCALL(swap_count(unsigned long)); +extern struct page * lookup_swap_cache(unsigned long); /* * Make these inline later once they are working properly. */ --- mm/page_alloc.c.~1~ Fri Nov 27 12:36:42 1998 +++ mm/page_alloc.c Mon Dec 7 20:42:36 1998 @@ -360,6 +360,35 @@ } /* + * Primitive swap readahead code. We simply read the + * next 16 entries in the swap area. This method is + * chosen because it doesn't cost us any seek time. + * We also make sure to queue the 'original' request + * together with the readahead ones... + */ +void swapin_readahead(unsigned long entry) { + int i; + struct page *new_page; + unsigned long offset = SWP_OFFSET(entry); + struct swap_info_struct *swapdev = SWP_TYPE(entry) + swap_info; + + for (i = 0; i < 16; i++) { + if (offset >= swapdev->max + || nr_free_pages - atomic_read(&nr_async_pages) < + (freepages.high + freepages.low)/2) + return; + if (!swapdev->swap_map[offset] || + test_bit(offset, swapdev->swap_lockmap)) + continue; + new_page = read_swap_cache_async(SWP_ENTRY(SWP_TYPE(entry), offset), 0); + if (new_page != NULL) + __free_page(new_page); + offset++; + } + return; +} + +/* * The tests may look silly, but it essentially makes sure that * no other process did a swap-in on us just as we were waiting. * @@ -370,10 +399,12 @@ pte_t * page_table, unsigned long entry, int write_access) { unsigned long page; - struct page *page_map; - - page_map = read_swap_cache(entry); + struct page *page_map = lookup_swap_cache(entry); + if (!page_map) { + swapin_readahead(entry); + page_map = read_swap_cache(entry); + } if (pte_val(*page_table) != entry) { if (page_map) free_page_and_swap_cache(page_address(page_map)); --- mm/page_io.c.~1~ Fri Nov 27 12:36:42 1998 +++ mm/page_io.c Mon Dec 7 18:55:55 1998 @@ -60,7 +60,7 @@ } /* Don't allow too many pending pages in flight.. */ - if (atomic_read(&nr_async_pages) > SWAP_CLUSTER_MAX) + if (atomic_read(&nr_async_pages) > pager_daemon.swap_cluster) wait = 1; p = &swap_info[type]; --- mm/swap.c.~1~ Mon Dec 7 12:05:54 1998 +++ mm/swap.c Mon Dec 7 18:55:55 1998 @@ -61,14 +61,14 @@ swapstat_t swapstats = {0}; buffer_mem_t buffer_mem = { - 5, /* minimum percent buffer */ - 10, /* borrow percent buffer */ + 1, /* minimum percent buffer */ + 20, /* borrow percent buffer */ 60 /* maximum percent buffer */ }; buffer_mem_t page_cache = { - 5, /* minimum percent page cache */ - 15, /* borrow percent page cache */ + 1, /* minimum percent page cache */ + 30, /* borrow percent page cache */ 75 /* maximum */ }; --- mm/swap_state.c.~1~ Fri Nov 27 12:36:42 1998 +++ mm/swap_state.c Mon Dec 7 18:55:55 1998 @@ -258,9 +258,10 @@ * incremented. */ -static struct page * lookup_swap_cache(unsigned long entry) +struct page * lookup_swap_cache(unsigned long entry) { struct page *found; + swap_cache_find_total++; while (1) { found = find_page(&swapper_inode, entry); @@ -268,8 +269,10 @@ return 0; if (found->inode != &swapper_inode || !PageSwapCache(found)) goto out_bad; - if (!PageLocked(found)) + if (!PageLocked(found)) { + swap_cache_find_success++; return found; + } __free_page(found); __wait_on_page(found); } --- mm/vmscan.c.~1~ Mon Dec 7 12:05:54 1998 +++ mm/vmscan.c Mon Dec 7 18:55:55 1998 @@ -432,6 +432,8 @@ if (buffer_over_borrow() || pgcache_over_borrow()) state = 0; + if (atomic_read(&nr_async_pages) > pager_daemon.swap_cluster / 2) + shrink_mmap(i, gfp_mask); switch (state) { do {