diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2009-06-16 19:50:13 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2009-06-16 19:50:13 -0700 |
commit | 517d08699b250021303f9a7cf0d758b6dc0748ed (patch) | |
tree | 5e5b0134c3fffb78fe9d8b1641a64ff28fdd7bbc /mm/readahead.c | |
parent | 8eeee4e2f04fc551f50c9d9847da2d73d7d33728 (diff) | |
parent | a34601c5d84134055782ee031d58d82f5440e918 (diff) |
Merge branch 'akpm'
* akpm: (182 commits)
fbdev: bf54x-lq043fb: use kzalloc over kmalloc/memset
fbdev: *bfin*: fix __dev{init,exit} markings
fbdev: *bfin*: drop unnecessary calls to memset
fbdev: bfin-t350mcqb-fb: drop unused local variables
fbdev: blackfin has __raw I/O accessors, so use them in fb.h
fbdev: s1d13xxxfb: add accelerated bitblt functions
tcx: use standard fields for framebuffer physical address and length
fbdev: add support for handoff from firmware to hw framebuffers
intelfb: fix a bug when changing video timing
fbdev: use framebuffer_release() for freeing fb_info structures
radeon: P2G2CLK_ALWAYS_ONb tested twice, should 2nd be P2G2CLK_DAC_ALWAYS_ONb?
s3c-fb: CPUFREQ frequency scaling support
s3c-fb: fix resource releasing on error during probing
carminefb: fix possible access beyond end of carmine_modedb[]
acornfb: remove fb_mmap function
mb862xxfb: use CONFIG_OF instead of CONFIG_PPC_OF
mb862xxfb: restrict compliation of platform driver to PPC
Samsung SoC Framebuffer driver: add Alpha Channel support
atmel-lcdc: fix pixclock upper bound detection
offb: use framebuffer_alloc() to allocate fb_info struct
...
Manually fix up conflicts due to kmemcheck in mm/slab.c
Diffstat (limited to 'mm/readahead.c')
-rw-r--r-- | mm/readahead.c | 145 |
1 files changed, 101 insertions, 44 deletions
diff --git a/mm/readahead.c b/mm/readahead.c index 133b6d52551..aa1aa234523 100644 --- a/mm/readahead.c +++ b/mm/readahead.c @@ -133,15 +133,12 @@ out: } /* - * do_page_cache_readahead actually reads a chunk of disk. It allocates all + * __do_page_cache_readahead() actually reads a chunk of disk. It allocates all * the pages first, then submits them all for I/O. This avoids the very bad * behaviour which would occur if page allocations are causing VM writeback. * We really don't want to intermingle reads and writes like that. * * Returns the number of pages requested, or the maximum amount of I/O allowed. - * - * do_page_cache_readahead() returns -1 if it encountered request queue - * congestion. */ static int __do_page_cache_readahead(struct address_space *mapping, struct file *filp, @@ -210,6 +207,7 @@ int force_page_cache_readahead(struct address_space *mapping, struct file *filp, if (unlikely(!mapping->a_ops->readpage && !mapping->a_ops->readpages)) return -EINVAL; + nr_to_read = max_sane_readahead(nr_to_read); while (nr_to_read) { int err; @@ -231,22 +229,6 @@ int force_page_cache_readahead(struct address_space *mapping, struct file *filp, } /* - * This version skips the IO if the queue is read-congested, and will tell the - * block layer to abandon the readahead if request allocation would block. - * - * force_page_cache_readahead() will ignore queue congestion and will block on - * request queues. - */ -int do_page_cache_readahead(struct address_space *mapping, struct file *filp, - pgoff_t offset, unsigned long nr_to_read) -{ - if (bdi_read_congested(mapping->backing_dev_info)) - return -1; - - return __do_page_cache_readahead(mapping, filp, offset, nr_to_read, 0); -} - -/* * Given a desired number of PAGE_CACHE_SIZE readahead pages, return a * sensible upper limit. */ @@ -259,7 +241,7 @@ unsigned long max_sane_readahead(unsigned long nr) /* * Submit IO for the read-ahead request in file_ra_state. */ -static unsigned long ra_submit(struct file_ra_state *ra, +unsigned long ra_submit(struct file_ra_state *ra, struct address_space *mapping, struct file *filp) { int actual; @@ -348,6 +330,59 @@ static unsigned long get_next_ra_size(struct file_ra_state *ra, */ /* + * Count contiguously cached pages from @offset-1 to @offset-@max, + * this count is a conservative estimation of + * - length of the sequential read sequence, or + * - thrashing threshold in memory tight systems + */ +static pgoff_t count_history_pages(struct address_space *mapping, + struct file_ra_state *ra, + pgoff_t offset, unsigned long max) +{ + pgoff_t head; + + rcu_read_lock(); + head = radix_tree_prev_hole(&mapping->page_tree, offset - 1, max); + rcu_read_unlock(); + + return offset - 1 - head; +} + +/* + * page cache context based read-ahead + */ +static int try_context_readahead(struct address_space *mapping, + struct file_ra_state *ra, + pgoff_t offset, + unsigned long req_size, + unsigned long max) +{ + pgoff_t size; + + size = count_history_pages(mapping, ra, offset, max); + + /* + * no history pages: + * it could be a random read + */ + if (!size) + return 0; + + /* + * starts from beginning of file: + * it is a strong indication of long-run stream (or whole-file-read) + */ + if (size >= offset) + size *= 2; + + ra->start = offset; + ra->size = get_init_ra_size(size + req_size, max); + ra->async_size = ra->size; + + return 1; +} + +/* * A minimal readahead algorithm for trivial sequential/random reads. */ static unsigned long @@ -356,34 +391,26 @@ ondemand_readahead(struct address_space *mapping, bool hit_readahead_marker, pgoff_t offset, unsigned long req_size) { - int max = ra->ra_pages; /* max readahead pages */ - pgoff_t prev_offset; - int sequential; + unsigned long max = max_sane_readahead(ra->ra_pages); + + /* + * start of file + */ + if (!offset) + goto initial_readahead; /* * It's the expected callback offset, assume sequential access. * Ramp up sizes, and push forward the readahead window. */ - if (offset && (offset == (ra->start + ra->size - ra->async_size) || - offset == (ra->start + ra->size))) { + if ((offset == (ra->start + ra->size - ra->async_size) || + offset == (ra->start + ra->size))) { ra->start += ra->size; ra->size = get_next_ra_size(ra, max); ra->async_size = ra->size; goto readit; } - prev_offset = ra->prev_pos >> PAGE_CACHE_SHIFT; - sequential = offset - prev_offset <= 1UL || req_size > max; - - /* - * Standalone, small read. - * Read as is, and do not pollute the readahead state. - */ - if (!hit_readahead_marker && !sequential) { - return __do_page_cache_readahead(mapping, filp, - offset, req_size, 0); - } - /* * Hit a marked page without valid readahead state. * E.g. interleaved reads. @@ -394,7 +421,7 @@ ondemand_readahead(struct address_space *mapping, pgoff_t start; rcu_read_lock(); - start = radix_tree_next_hole(&mapping->page_tree, offset,max+1); + start = radix_tree_next_hole(&mapping->page_tree, offset+1,max); rcu_read_unlock(); if (!start || start - offset > max) @@ -402,23 +429,53 @@ ondemand_readahead(struct address_space *mapping, ra->start = start; ra->size = start - offset; /* old async_size */ + ra->size += req_size; ra->size = get_next_ra_size(ra, max); ra->async_size = ra->size; goto readit; } /* - * It may be one of - * - first read on start of file - * - sequential cache miss - * - oversize random read - * Start readahead for it. + * oversize read + */ + if (req_size > max) + goto initial_readahead; + + /* + * sequential cache miss + */ + if (offset - (ra->prev_pos >> PAGE_CACHE_SHIFT) <= 1UL) + goto initial_readahead; + + /* + * Query the page cache and look for the traces(cached history pages) + * that a sequential stream would leave behind. + */ + if (try_context_readahead(mapping, ra, offset, req_size, max)) + goto readit; + + /* + * standalone, small random read + * Read as is, and do not pollute the readahead state. */ + return __do_page_cache_readahead(mapping, filp, offset, req_size, 0); + +initial_readahead: ra->start = offset; ra->size = get_init_ra_size(req_size, max); ra->async_size = ra->size > req_size ? ra->size - req_size : ra->size; readit: + /* + * Will this read hit the readahead marker made by itself? + * If so, trigger the readahead marker hit now, and merge + * the resulted next readahead window into the current one. + */ + if (offset == ra->start && ra->size == ra->async_size) { + ra->async_size = get_next_ra_size(ra, max); + ra->size += ra->async_size; + } + return ra_submit(ra, mapping, filp); } |