aboutsummaryrefslogtreecommitdiff
path: root/block
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2009-04-07 13:47:33 +0200
committerIngo Molnar <mingo@elte.hu>2009-04-07 13:47:45 +0200
commit93776a8ec746cf9d32c36e5a5b23d28d8be28826 (patch)
tree6c472ae9f709246ee5268e1d71559d07839fb965 /block
parent34886c8bc590f078d4c0b88f50d061326639198d (diff)
parentd508afb437daee7cf07da085b635c44a4ebf9b38 (diff)
Merge branch 'linus' into tracing/core
Merge reason: update to upstream tracing facilities Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'block')
-rw-r--r--block/blk-core.c98
-rw-r--r--block/blk-merge.c2
-rw-r--r--block/blk-softirq.c2
-rw-r--r--block/blk-sysfs.c40
-rw-r--r--block/blk.h2
-rw-r--r--block/bsg.c12
-rw-r--r--block/cfq-iosched.c4
-rw-r--r--block/cmd-filter.c1
-rw-r--r--block/elevator.c2
-rw-r--r--block/scsi_ioctl.c21
10 files changed, 94 insertions, 90 deletions
diff --git a/block/blk-core.c b/block/blk-core.c
index 29bcfac6c68..25572802dac 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -484,11 +484,11 @@ static int blk_init_free_list(struct request_queue *q)
{
struct request_list *rl = &q->rq;
- rl->count[READ] = rl->count[WRITE] = 0;
- rl->starved[READ] = rl->starved[WRITE] = 0;
+ rl->count[BLK_RW_SYNC] = rl->count[BLK_RW_ASYNC] = 0;
+ rl->starved[BLK_RW_SYNC] = rl->starved[BLK_RW_ASYNC] = 0;
rl->elvpriv = 0;
- init_waitqueue_head(&rl->wait[READ]);
- init_waitqueue_head(&rl->wait[WRITE]);
+ init_waitqueue_head(&rl->wait[BLK_RW_SYNC]);
+ init_waitqueue_head(&rl->wait[BLK_RW_ASYNC]);
rl->rq_pool = mempool_create_node(BLKDEV_MIN_RQ, mempool_alloc_slab,
mempool_free_slab, request_cachep, q->node);
@@ -603,13 +603,10 @@ blk_init_queue_node(request_fn_proc *rfn, spinlock_t *lock, int node_id)
q->queue_flags = QUEUE_FLAG_DEFAULT;
q->queue_lock = lock;
- blk_queue_segment_boundary(q, BLK_SEG_BOUNDARY_MASK);
-
+ /*
+ * This also sets hw/phys segments, boundary and size
+ */
blk_queue_make_request(q, __make_request);
- blk_queue_max_segment_size(q, MAX_SEGMENT_SIZE);
-
- blk_queue_max_hw_segments(q, MAX_HW_SEGMENTS);
- blk_queue_max_phys_segments(q, MAX_PHYS_SEGMENTS);
q->sg_reserved_size = INT_MAX;
@@ -702,18 +699,18 @@ static void ioc_set_batching(struct request_queue *q, struct io_context *ioc)
ioc->last_waited = jiffies;
}
-static void __freed_request(struct request_queue *q, int rw)
+static void __freed_request(struct request_queue *q, int sync)
{
struct request_list *rl = &q->rq;
- if (rl->count[rw] < queue_congestion_off_threshold(q))
- blk_clear_queue_congested(q, rw);
+ if (rl->count[sync] < queue_congestion_off_threshold(q))
+ blk_clear_queue_congested(q, sync);
- if (rl->count[rw] + 1 <= q->nr_requests) {
- if (waitqueue_active(&rl->wait[rw]))
- wake_up(&rl->wait[rw]);
+ if (rl->count[sync] + 1 <= q->nr_requests) {
+ if (waitqueue_active(&rl->wait[sync]))
+ wake_up(&rl->wait[sync]);
- blk_clear_queue_full(q, rw);
+ blk_clear_queue_full(q, sync);
}
}
@@ -721,21 +718,20 @@ static void __freed_request(struct request_queue *q, int rw)
* A request has just been released. Account for it, update the full and
* congestion status, wake up any waiters. Called under q->queue_lock.
*/
-static void freed_request(struct request_queue *q, int rw, int priv)
+static void freed_request(struct request_queue *q, int sync, int priv)
{
struct request_list *rl = &q->rq;
- rl->count[rw]--;
+ rl->count[sync]--;
if (priv)
rl->elvpriv--;
- __freed_request(q, rw);
+ __freed_request(q, sync);
- if (unlikely(rl->starved[rw ^ 1]))
- __freed_request(q, rw ^ 1);
+ if (unlikely(rl->starved[sync ^ 1]))
+ __freed_request(q, sync ^ 1);
}
-#define blkdev_free_rq(list) list_entry((list)->next, struct request, queuelist)
/*
* Get a free request, queue_lock must be held.
* Returns NULL on failure, with queue_lock held.
@@ -747,15 +743,15 @@ static struct request *get_request(struct request_queue *q, int rw_flags,
struct request *rq = NULL;
struct request_list *rl = &q->rq;
struct io_context *ioc = NULL;
- const int rw = rw_flags & 0x01;
+ const bool is_sync = rw_is_sync(rw_flags) != 0;
int may_queue, priv;
may_queue = elv_may_queue(q, rw_flags);
if (may_queue == ELV_MQUEUE_NO)
goto rq_starved;
- if (rl->count[rw]+1 >= queue_congestion_on_threshold(q)) {
- if (rl->count[rw]+1 >= q->nr_requests) {
+ if (rl->count[is_sync]+1 >= queue_congestion_on_threshold(q)) {
+ if (rl->count[is_sync]+1 >= q->nr_requests) {
ioc = current_io_context(GFP_ATOMIC, q->node);
/*
* The queue will fill after this allocation, so set
@@ -763,9 +759,9 @@ static struct request *get_request(struct request_queue *q, int rw_flags,
* This process will be allowed to complete a batch of
* requests, others will be blocked.
*/
- if (!blk_queue_full(q, rw)) {
+ if (!blk_queue_full(q, is_sync)) {
ioc_set_batching(q, ioc);
- blk_set_queue_full(q, rw);
+ blk_set_queue_full(q, is_sync);
} else {
if (may_queue != ELV_MQUEUE_MUST
&& !ioc_batching(q, ioc)) {
@@ -778,7 +774,7 @@ static struct request *get_request(struct request_queue *q, int rw_flags,
}
}
}
- blk_set_queue_congested(q, rw);
+ blk_set_queue_congested(q, is_sync);
}
/*
@@ -786,11 +782,11 @@ static struct request *get_request(struct request_queue *q, int rw_flags,
* limit of requests, otherwise we could have thousands of requests
* allocated with any setting of ->nr_requests
*/
- if (rl->count[rw] >= (3 * q->nr_requests / 2))
+ if (rl->count[is_sync] >= (3 * q->nr_requests / 2))
goto out;
- rl->count[rw]++;
- rl->starved[rw] = 0;
+ rl->count[is_sync]++;
+ rl->starved[is_sync] = 0;
priv = !test_bit(QUEUE_FLAG_ELVSWITCH, &q->queue_flags);
if (priv)
@@ -808,7 +804,7 @@ static struct request *get_request(struct request_queue *q, int rw_flags,
* wait queue, but this is pretty rare.
*/
spin_lock_irq(q->queue_lock);
- freed_request(q, rw, priv);
+ freed_request(q, is_sync, priv);
/*
* in the very unlikely event that allocation failed and no
@@ -818,8 +814,8 @@ static struct request *get_request(struct request_queue *q, int rw_flags,
* rq mempool into READ and WRITE
*/
rq_starved:
- if (unlikely(rl->count[rw] == 0))
- rl->starved[rw] = 1;
+ if (unlikely(rl->count[is_sync] == 0))
+ rl->starved[is_sync] = 1;
goto out;
}
@@ -833,7 +829,7 @@ rq_starved:
if (ioc_batching(q, ioc))
ioc->nr_batch_requests--;
- trace_block_getrq(q, bio, rw);
+ trace_block_getrq(q, bio, rw_flags & 1);
out:
return rq;
}
@@ -847,7 +843,7 @@ out:
static struct request *get_request_wait(struct request_queue *q, int rw_flags,
struct bio *bio)
{
- const int rw = rw_flags & 0x01;
+ const bool is_sync = rw_is_sync(rw_flags) != 0;
struct request *rq;
rq = get_request(q, rw_flags, bio, GFP_NOIO);
@@ -856,10 +852,10 @@ static struct request *get_request_wait(struct request_queue *q, int rw_flags,
struct io_context *ioc;
struct request_list *rl = &q->rq;
- prepare_to_wait_exclusive(&rl->wait[rw], &wait,
+ prepare_to_wait_exclusive(&rl->wait[is_sync], &wait,
TASK_UNINTERRUPTIBLE);
- trace_block_sleeprq(q, bio, rw);
+ trace_block_sleeprq(q, bio, rw_flags & 1);
__generic_unplug_device(q);
spin_unlock_irq(q->queue_lock);
@@ -875,7 +871,7 @@ static struct request *get_request_wait(struct request_queue *q, int rw_flags,
ioc_set_batching(q, ioc);
spin_lock_irq(q->queue_lock);
- finish_wait(&rl->wait[rw], &wait);
+ finish_wait(&rl->wait[is_sync], &wait);
rq = get_request(q, rw_flags, bio, GFP_NOIO);
};
@@ -1066,19 +1062,22 @@ void __blk_put_request(struct request_queue *q, struct request *req)
elv_completed_request(q, req);
+ /* this is a bio leak */
+ WARN_ON(req->bio != NULL);
+
/*
* Request may not have originated from ll_rw_blk. if not,
* it didn't come out of our reserved rq pools
*/
if (req->cmd_flags & REQ_ALLOCED) {
- int rw = rq_data_dir(req);
+ int is_sync = rq_is_sync(req) != 0;
int priv = req->cmd_flags & REQ_ELVPRIV;
BUG_ON(!list_empty(&req->queuelist));
BUG_ON(!hlist_unhashed(&req->hash));
blk_free_request(q, req);
- freed_request(q, rw, priv);
+ freed_request(q, is_sync, priv);
}
}
EXPORT_SYMBOL_GPL(__blk_put_request);
@@ -1129,6 +1128,8 @@ void init_request_from_bio(struct request *req, struct bio *bio)
req->cmd_flags |= REQ_UNPLUG;
if (bio_rw_meta(bio))
req->cmd_flags |= REQ_RW_META;
+ if (bio_noidle(bio))
+ req->cmd_flags |= REQ_NOIDLE;
req->errors = 0;
req->hard_sector = req->sector = bio->bi_sector;
@@ -1137,6 +1138,15 @@ void init_request_from_bio(struct request *req, struct bio *bio)
blk_rq_bio_prep(req->q, req, bio);
}
+/*
+ * Only disabling plugging for non-rotational devices if it does tagging
+ * as well, otherwise we do need the proper merging
+ */
+static inline bool queue_should_plug(struct request_queue *q)
+{
+ return !(blk_queue_nonrot(q) && blk_queue_tagged(q));
+}
+
static int __make_request(struct request_queue *q, struct bio *bio)
{
struct request *req;
@@ -1243,11 +1253,11 @@ get_rq:
if (test_bit(QUEUE_FLAG_SAME_COMP, &q->queue_flags) ||
bio_flagged(bio, BIO_CPU_AFFINE))
req->cpu = blk_cpu_to_group(smp_processor_id());
- if (!blk_queue_nonrot(q) && elv_queue_empty(q))
+ if (queue_should_plug(q) && elv_queue_empty(q))
blk_plug_device(q);
add_request(q, req);
out:
- if (unplug || blk_queue_nonrot(q))
+ if (unplug || !queue_should_plug(q))
__generic_unplug_device(q);
spin_unlock_irq(q->queue_lock);
return 0;
diff --git a/block/blk-merge.c b/block/blk-merge.c
index 5a244f05360..e39cb24b767 100644
--- a/block/blk-merge.c
+++ b/block/blk-merge.c
@@ -403,6 +403,8 @@ static int attempt_merge(struct request_queue *q, struct request *req,
if (blk_rq_cpu_valid(next))
req->cpu = next->cpu;
+ /* owner-ship of bio passed from next to req */
+ next->bio = NULL;
__blk_put_request(q, next);
return 1;
}
diff --git a/block/blk-softirq.c b/block/blk-softirq.c
index ce0efc6b26d..ee9c2160222 100644
--- a/block/blk-softirq.c
+++ b/block/blk-softirq.c
@@ -64,7 +64,7 @@ static int raise_blk_irq(int cpu, struct request *rq)
data->info = rq;
data->flags = 0;
- __smp_call_function_single(cpu, data);
+ __smp_call_function_single(cpu, data, 0);
return 0;
}
diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c
index e29ddfc73cf..3ff9bba3379 100644
--- a/block/blk-sysfs.c
+++ b/block/blk-sysfs.c
@@ -48,28 +48,28 @@ queue_requests_store(struct request_queue *q, const char *page, size_t count)
q->nr_requests = nr;
blk_queue_congestion_threshold(q);
- if (rl->count[READ] >= queue_congestion_on_threshold(q))
- blk_set_queue_congested(q, READ);
- else if (rl->count[READ] < queue_congestion_off_threshold(q))
- blk_clear_queue_congested(q, READ);
-
- if (rl->count[WRITE] >= queue_congestion_on_threshold(q))
- blk_set_queue_congested(q, WRITE);
- else if (rl->count[WRITE] < queue_congestion_off_threshold(q))
- blk_clear_queue_congested(q, WRITE);
-
- if (rl->count[READ] >= q->nr_requests) {
- blk_set_queue_full(q, READ);
- } else if (rl->count[READ]+1 <= q->nr_requests) {
- blk_clear_queue_full(q, READ);
- wake_up(&rl->wait[READ]);
+ if (rl->count[BLK_RW_SYNC] >= queue_congestion_on_threshold(q))
+ blk_set_queue_congested(q, BLK_RW_SYNC);
+ else if (rl->count[BLK_RW_SYNC] < queue_congestion_off_threshold(q))
+ blk_clear_queue_congested(q, BLK_RW_SYNC);
+
+ if (rl->count[BLK_RW_ASYNC] >= queue_congestion_on_threshold(q))
+ blk_set_queue_congested(q, BLK_RW_ASYNC);
+ else if (rl->count[BLK_RW_ASYNC] < queue_congestion_off_threshold(q))
+ blk_clear_queue_congested(q, BLK_RW_ASYNC);
+
+ if (rl->count[BLK_RW_SYNC] >= q->nr_requests) {
+ blk_set_queue_full(q, BLK_RW_SYNC);
+ } else if (rl->count[BLK_RW_SYNC]+1 <= q->nr_requests) {
+ blk_clear_queue_full(q, BLK_RW_SYNC);
+ wake_up(&rl->wait[BLK_RW_SYNC]);
}
- if (rl->count[WRITE] >= q->nr_requests) {
- blk_set_queue_full(q, WRITE);
- } else if (rl->count[WRITE]+1 <= q->nr_requests) {
- blk_clear_queue_full(q, WRITE);
- wake_up(&rl->wait[WRITE]);
+ if (rl->count[BLK_RW_ASYNC] >= q->nr_requests) {
+ blk_set_queue_full(q, BLK_RW_ASYNC);
+ } else if (rl->count[BLK_RW_ASYNC]+1 <= q->nr_requests) {
+ blk_clear_queue_full(q, BLK_RW_ASYNC);
+ wake_up(&rl->wait[BLK_RW_ASYNC]);
}
spin_unlock_irq(q->queue_lock);
return ret;
diff --git a/block/blk.h b/block/blk.h
index 0dce92c3749..3ee94358b43 100644
--- a/block/blk.h
+++ b/block/blk.h
@@ -102,7 +102,7 @@ static inline int blk_cpu_to_group(int cpu)
const struct cpumask *mask = cpu_coregroup_mask(cpu);
return cpumask_first(mask);
#elif defined(CONFIG_SCHED_SMT)
- return first_cpu(per_cpu(cpu_sibling_map, cpu));
+ return cpumask_first(topology_thread_cpumask(cpu));
#else
return cpu;
#endif
diff --git a/block/bsg.c b/block/bsg.c
index 0ce8806dd0c..206060e795d 100644
--- a/block/bsg.c
+++ b/block/bsg.c
@@ -218,9 +218,6 @@ bsg_validate_sgv4_hdr(struct request_queue *q, struct sg_io_v4 *hdr, int *rw)
if (hdr->guard != 'Q')
return -EINVAL;
- if (hdr->dout_xfer_len > (q->max_sectors << 9) ||
- hdr->din_xfer_len > (q->max_sectors << 9))
- return -EIO;
switch (hdr->protocol) {
case BSG_PROTOCOL_SCSI:
@@ -353,6 +350,8 @@ static void bsg_rq_end_io(struct request *rq, int uptodate)
static void bsg_add_command(struct bsg_device *bd, struct request_queue *q,
struct bsg_command *bc, struct request *rq)
{
+ int at_head = (0 == (bc->hdr.flags & BSG_FLAG_Q_AT_TAIL));
+
/*
* add bc command to busy queue and submit rq for io
*/
@@ -368,7 +367,7 @@ static void bsg_add_command(struct bsg_device *bd, struct request_queue *q,
dprintk("%s: queueing rq %p, bc %p\n", bd->name, rq, bc);
rq->end_io_data = bc;
- blk_execute_rq_nowait(q, NULL, rq, 1, bsg_rq_end_io);
+ blk_execute_rq_nowait(q, NULL, rq, at_head, bsg_rq_end_io);
}
static struct bsg_command *bsg_next_done_cmd(struct bsg_device *bd)
@@ -924,6 +923,7 @@ static long bsg_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
struct request *rq;
struct bio *bio, *bidi_bio = NULL;
struct sg_io_v4 hdr;
+ int at_head;
u8 sense[SCSI_SENSE_BUFFERSIZE];
if (copy_from_user(&hdr, uarg, sizeof(hdr)))
@@ -936,7 +936,9 @@ static long bsg_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
bio = rq->bio;
if (rq->next_rq)
bidi_bio = rq->next_rq->bio;
- blk_execute_rq(bd->queue, NULL, rq, 0);
+
+ at_head = (0 == (hdr.flags & BSG_FLAG_Q_AT_TAIL));
+ blk_execute_rq(bd->queue, NULL, rq, at_head);
ret = blk_complete_sgv4_hdr_rq(rq, &hdr, bio, bidi_bio);
if (copy_to_user(uarg, &hdr, sizeof(hdr)))
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index 664ebfd092e..9e809345f71 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -1992,8 +1992,10 @@ static void cfq_completed_request(struct request_queue *q, struct request *rq)
}
if (cfq_slice_used(cfqq) || cfq_class_idle(cfqq))
cfq_slice_expired(cfqd, 1);
- else if (sync && RB_EMPTY_ROOT(&cfqq->sort_list))
+ else if (sync && !rq_noidle(rq) &&
+ RB_EMPTY_ROOT(&cfqq->sort_list)) {
cfq_arm_slice_timer(cfqd);
+ }
}
if (!cfqd->rq_in_driver)
diff --git a/block/cmd-filter.c b/block/cmd-filter.c
index 504b275e1b9..572bbc2f900 100644
--- a/block/cmd-filter.c
+++ b/block/cmd-filter.c
@@ -22,6 +22,7 @@
#include <linux/spinlock.h>
#include <linux/capability.h>
#include <linux/bitops.h>
+#include <linux/blkdev.h>
#include <scsi/scsi.h>
#include <linux/cdrom.h>
diff --git a/block/elevator.c b/block/elevator.c
index 98259eda0ef..ca6788a0195 100644
--- a/block/elevator.c
+++ b/block/elevator.c
@@ -677,7 +677,7 @@ void elv_insert(struct request_queue *q, struct request *rq, int where)
}
if (unplug_it && blk_queue_plugged(q)) {
- int nrq = q->rq.count[READ] + q->rq.count[WRITE]
+ int nrq = q->rq.count[BLK_RW_SYNC] + q->rq.count[BLK_RW_ASYNC]
- q->in_flight;
if (nrq >= q->unplug_thresh)
diff --git a/block/scsi_ioctl.c b/block/scsi_ioctl.c
index ee9c67d7e1b..626ee274c5c 100644
--- a/block/scsi_ioctl.c
+++ b/block/scsi_ioctl.c
@@ -214,21 +214,10 @@ static int blk_fill_sghdr_rq(struct request_queue *q, struct request *rq,
return 0;
}
-/*
- * unmap a request that was previously mapped to this sg_io_hdr. handles
- * both sg and non-sg sg_io_hdr.
- */
-static int blk_unmap_sghdr_rq(struct request *rq, struct sg_io_hdr *hdr)
-{
- blk_rq_unmap_user(rq->bio);
- blk_put_request(rq);
- return 0;
-}
-
static int blk_complete_sghdr_rq(struct request *rq, struct sg_io_hdr *hdr,
struct bio *bio)
{
- int r, ret = 0;
+ int ret = 0;
/*
* fill in all the output members
@@ -253,12 +242,10 @@ static int blk_complete_sghdr_rq(struct request *rq, struct sg_io_hdr *hdr,
ret = -EFAULT;
}
- rq->bio = bio;
- r = blk_unmap_sghdr_rq(rq, hdr);
- if (ret)
- r = ret;
+ blk_rq_unmap_user(bio);
+ blk_put_request(rq);
- return r;
+ return ret;
}
static int sg_io(struct request_queue *q, struct gendisk *bd_disk,