diff options
author | Ingo Molnar <mingo@elte.hu> | 2009-08-31 17:54:18 +0200 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2009-08-31 18:05:25 +0200 |
commit | bbe69aa57a7374b51242b95a54eefcf0d0393b7e (patch) | |
tree | c45e48d11cc9cb81a57c8c27f7243863b117cec8 /block/blk-settings.c | |
parent | a417887637e862b434b293404f2a31ad1f282a58 (diff) | |
parent | 326ba5010a5429a5a528b268b36a5900d4ab0eba (diff) |
Merge commit 'v2.6.31-rc8' into core/locking
Merge reason: we were on -rc4, move to -rc8 before applying
a new batch of locking infrastructure changes.
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'block/blk-settings.c')
-rw-r--r-- | block/blk-settings.c | 84 |
1 files changed, 53 insertions, 31 deletions
diff --git a/block/blk-settings.c b/block/blk-settings.c index bd582a7f531..476d8706507 100644 --- a/block/blk-settings.c +++ b/block/blk-settings.c @@ -7,6 +7,7 @@ #include <linux/bio.h> #include <linux/blkdev.h> #include <linux/bootmem.h> /* for max_pfn/max_low_pfn */ +#include <linux/gcd.h> #include "blk.h" @@ -165,6 +166,13 @@ void blk_queue_make_request(struct request_queue *q, make_request_fn *mfn) blk_set_default_limits(&q->limits); /* + * If the caller didn't supply a lock, fall back to our embedded + * per-queue locks + */ + if (!q->queue_lock) + q->queue_lock = &q->__queue_lock; + + /* * by default assume old behaviour and bounce for any highmem page */ blk_queue_bounce_limit(q, BLK_BOUNCE_HIGH); @@ -377,8 +385,8 @@ void blk_queue_alignment_offset(struct request_queue *q, unsigned int offset) EXPORT_SYMBOL(blk_queue_alignment_offset); /** - * blk_queue_io_min - set minimum request size for the queue - * @q: the request queue for the device + * blk_limits_io_min - set minimum request size for a device + * @limits: the queue limits * @min: smallest I/O size in bytes * * Description: @@ -387,15 +395,35 @@ EXPORT_SYMBOL(blk_queue_alignment_offset); * smallest I/O the device can perform without incurring a performance * penalty. */ -void blk_queue_io_min(struct request_queue *q, unsigned int min) +void blk_limits_io_min(struct queue_limits *limits, unsigned int min) { - q->limits.io_min = min; + limits->io_min = min; - if (q->limits.io_min < q->limits.logical_block_size) - q->limits.io_min = q->limits.logical_block_size; + if (limits->io_min < limits->logical_block_size) + limits->io_min = limits->logical_block_size; - if (q->limits.io_min < q->limits.physical_block_size) - q->limits.io_min = q->limits.physical_block_size; + if (limits->io_min < limits->physical_block_size) + limits->io_min = limits->physical_block_size; +} +EXPORT_SYMBOL(blk_limits_io_min); + +/** + * blk_queue_io_min - set minimum request size for the queue + * @q: the request queue for the device + * @min: smallest I/O size in bytes + * + * Description: + * Storage devices may report a granularity or preferred minimum I/O + * size which is the smallest request the device can perform without + * incurring a performance penalty. For disk drives this is often the + * physical block size. For RAID arrays it is often the stripe chunk + * size. A properly aligned multiple of minimum_io_size is the + * preferred request size for workloads where a high number of I/O + * operations is desired. + */ +void blk_queue_io_min(struct request_queue *q, unsigned int min) +{ + blk_limits_io_min(&q->limits, min); } EXPORT_SYMBOL(blk_queue_io_min); @@ -405,8 +433,12 @@ EXPORT_SYMBOL(blk_queue_io_min); * @opt: optimal request size in bytes * * Description: - * Drivers can call this function to set the preferred I/O request - * size for devices that report such a value. + * Storage devices may report an optimal I/O size, which is the + * device's preferred unit for sustained I/O. This is rarely reported + * for disk drives. For RAID arrays it is usually the stripe width or + * the internal track size. A properly aligned multiple of + * optimal_io_size is the preferred request size for workloads where + * sustained throughput is desired. */ void blk_queue_io_opt(struct request_queue *q, unsigned int opt) { @@ -426,27 +458,7 @@ EXPORT_SYMBOL(blk_queue_io_opt); **/ void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b) { - /* zero is "infinity" */ - t->limits.max_sectors = min_not_zero(queue_max_sectors(t), - queue_max_sectors(b)); - - t->limits.max_hw_sectors = min_not_zero(queue_max_hw_sectors(t), - queue_max_hw_sectors(b)); - - t->limits.seg_boundary_mask = min_not_zero(queue_segment_boundary(t), - queue_segment_boundary(b)); - - t->limits.max_phys_segments = min_not_zero(queue_max_phys_segments(t), - queue_max_phys_segments(b)); - - t->limits.max_hw_segments = min_not_zero(queue_max_hw_segments(t), - queue_max_hw_segments(b)); - - t->limits.max_segment_size = min_not_zero(queue_max_segment_size(t), - queue_max_segment_size(b)); - - t->limits.logical_block_size = max(queue_logical_block_size(t), - queue_logical_block_size(b)); + blk_stack_limits(&t->limits, &b->limits, 0); if (!t->queue_lock) WARN_ON_ONCE(1); @@ -516,6 +528,16 @@ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b, return -1; } + /* Find lcm() of optimal I/O size */ + if (t->io_opt && b->io_opt) + t->io_opt = (t->io_opt * b->io_opt) / gcd(t->io_opt, b->io_opt); + else if (b->io_opt) + t->io_opt = b->io_opt; + + /* Verify that optimal I/O size is a multiple of io_min */ + if (t->io_min && t->io_opt % t->io_min) + return -1; + return 0; } EXPORT_SYMBOL(blk_stack_limits); |