diff options
author | Dmitry Torokhov <dtor@insightbb.com> | 2007-02-10 01:26:32 -0500 |
---|---|---|
committer | Dmitry Torokhov <dtor@insightbb.com> | 2007-02-10 01:26:32 -0500 |
commit | b22364c8eec89e6b0c081a237f3b6348df87796f (patch) | |
tree | 233a923281fb640106465d076997ff511efb6edf /block | |
parent | 2c8dc071517ec2843869024dc82be2e246f41064 (diff) | |
parent | 66efc5a7e3061c3597ac43a8bb1026488d57e66b (diff) |
Merge rsync://rsync.kernel.org/pub/scm/linux/kernel/git/torvalds/linux-2.6
Diffstat (limited to 'block')
-rw-r--r-- | block/as-iosched.c | 15 | ||||
-rw-r--r-- | block/cfq-iosched.c | 46 | ||||
-rw-r--r-- | block/elevator.c | 37 | ||||
-rw-r--r-- | block/genhd.c | 31 | ||||
-rw-r--r-- | block/ioctl.c | 6 | ||||
-rw-r--r-- | block/ll_rw_blk.c | 157 | ||||
-rw-r--r-- | block/scsi_ioctl.c | 16 |
7 files changed, 207 insertions, 101 deletions
diff --git a/block/as-iosched.c b/block/as-iosched.c index 5934c4bfd52..ef126277b4b 100644 --- a/block/as-iosched.c +++ b/block/as-iosched.c @@ -1462,20 +1462,7 @@ static struct elevator_type iosched_as = { static int __init as_init(void) { - int ret; - - ret = elv_register(&iosched_as); - if (!ret) { - /* - * don't allow AS to get unregistered, since we would have - * to browse all tasks in the system and release their - * as_io_context first - */ - __module_get(THIS_MODULE); - return 0; - } - - return ret; + return elv_register(&iosched_as); } static void __exit as_exit(void) diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c index 78c6b312bd3..07b70624377 100644 --- a/block/cfq-iosched.c +++ b/block/cfq-iosched.c @@ -219,9 +219,12 @@ static int cfq_queue_empty(request_queue_t *q) return !cfqd->busy_queues; } -static inline pid_t cfq_queue_pid(struct task_struct *task, int rw) +static inline pid_t cfq_queue_pid(struct task_struct *task, int rw, int is_sync) { - if (rw == READ || rw == WRITE_SYNC) + /* + * Use the per-process queue, for read requests and syncronous writes + */ + if (!(rw & REQ_RW) || is_sync) return task->pid; return CFQ_KEY_ASYNC; @@ -473,7 +476,7 @@ static struct request * cfq_find_rq_fmerge(struct cfq_data *cfqd, struct bio *bio) { struct task_struct *tsk = current; - pid_t key = cfq_queue_pid(tsk, bio_data_dir(bio)); + pid_t key = cfq_queue_pid(tsk, bio_data_dir(bio), bio_sync(bio)); struct cfq_queue *cfqq; cfqq = cfq_find_cfq_hash(cfqd, key, tsk->ioprio); @@ -565,6 +568,33 @@ cfq_merged_requests(request_queue_t *q, struct request *rq, cfq_remove_request(next); } +static int cfq_allow_merge(request_queue_t *q, struct request *rq, + struct bio *bio) +{ + struct cfq_data *cfqd = q->elevator->elevator_data; + const int rw = bio_data_dir(bio); + struct cfq_queue *cfqq; + pid_t key; + + /* + * Disallow merge of a sync bio into an async request. + */ + if ((bio_data_dir(bio) == READ || bio_sync(bio)) && !rq_is_sync(rq)) + return 0; + + /* + * Lookup the cfqq that this bio will be queued with. Allow + * merge only if rq is queued there. + */ + key = cfq_queue_pid(current, rw, bio_sync(bio)); + cfqq = cfq_find_cfq_hash(cfqd, key, current->ioprio); + + if (cfqq == RQ_CFQQ(rq)) + return 1; + + return 0; +} + static inline void __cfq_set_active_queue(struct cfq_data *cfqd, struct cfq_queue *cfqq) { @@ -1748,6 +1778,9 @@ static int cfq_may_queue(request_queue_t *q, int rw) struct cfq_data *cfqd = q->elevator->elevator_data; struct task_struct *tsk = current; struct cfq_queue *cfqq; + unsigned int key; + + key = cfq_queue_pid(tsk, rw, rw & REQ_RW_SYNC); /* * don't force setup of a queue from here, as a call to may_queue @@ -1755,7 +1788,7 @@ static int cfq_may_queue(request_queue_t *q, int rw) * so just lookup a possibly existing queue, or return 'may queue' * if that fails */ - cfqq = cfq_find_cfq_hash(cfqd, cfq_queue_pid(tsk, rw), tsk->ioprio); + cfqq = cfq_find_cfq_hash(cfqd, key, tsk->ioprio); if (cfqq) { cfq_init_prio_data(cfqq); cfq_prio_boost(cfqq); @@ -1798,10 +1831,10 @@ cfq_set_request(request_queue_t *q, struct request *rq, gfp_t gfp_mask) struct task_struct *tsk = current; struct cfq_io_context *cic; const int rw = rq_data_dir(rq); - pid_t key = cfq_queue_pid(tsk, rw); + const int is_sync = rq_is_sync(rq); + pid_t key = cfq_queue_pid(tsk, rw, is_sync); struct cfq_queue *cfqq; unsigned long flags; - int is_sync = key != CFQ_KEY_ASYNC; might_sleep_if(gfp_mask & __GFP_WAIT); @@ -2119,6 +2152,7 @@ static struct elevator_type iosched_cfq = { .elevator_merge_fn = cfq_merge, .elevator_merged_fn = cfq_merged_request, .elevator_merge_req_fn = cfq_merged_requests, + .elevator_allow_merge_fn = cfq_allow_merge, .elevator_dispatch_fn = cfq_dispatch_requests, .elevator_add_req_fn = cfq_insert_request, .elevator_activate_req_fn = cfq_activate_request, diff --git a/block/elevator.c b/block/elevator.c index c0063f345c5..f6dafa8c7c4 100644 --- a/block/elevator.c +++ b/block/elevator.c @@ -51,6 +51,21 @@ static const int elv_hash_shift = 6; #define ELV_ON_HASH(rq) (!hlist_unhashed(&(rq)->hash)) /* + * Query io scheduler to see if the current process issuing bio may be + * merged with rq. + */ +static int elv_iosched_allow_merge(struct request *rq, struct bio *bio) +{ + request_queue_t *q = rq->q; + elevator_t *e = q->elevator; + + if (e->ops->elevator_allow_merge_fn) + return e->ops->elevator_allow_merge_fn(q, rq, bio); + + return 1; +} + +/* * can we safely merge with this request? */ inline int elv_rq_merge_ok(struct request *rq, struct bio *bio) @@ -65,12 +80,15 @@ inline int elv_rq_merge_ok(struct request *rq, struct bio *bio) return 0; /* - * same device and no special stuff set, merge is ok + * must be same device and not a special request */ - if (rq->rq_disk == bio->bi_bdev->bd_disk && !rq->special) - return 1; + if (rq->rq_disk != bio->bi_bdev->bd_disk || rq->special) + return 0; - return 0; + if (!elv_iosched_allow_merge(rq, bio)) + return 0; + + return 1; } EXPORT_SYMBOL(elv_rq_merge_ok); @@ -572,6 +590,12 @@ void elv_insert(request_queue_t *q, struct request *rq, int where) */ rq->cmd_flags |= REQ_SOFTBARRIER; + /* + * Most requeues happen because of a busy condition, + * don't force unplug of the queue for that case. + */ + unplug_it = 0; + if (q->ordseq == 0) { list_add(&rq->queuelist, &q->queue_head); break; @@ -586,11 +610,6 @@ void elv_insert(request_queue_t *q, struct request *rq, int where) } list_add_tail(&rq->queuelist, pos); - /* - * most requeues happen because of a busy condition, don't - * force unplug of the queue for that case. - */ - unplug_it = 0; break; default: diff --git a/block/genhd.c b/block/genhd.c index 653919d50cd..457fdac4c17 100644 --- a/block/genhd.c +++ b/block/genhd.c @@ -417,6 +417,34 @@ static struct disk_attribute disk_attr_stat = { .show = disk_stats_read }; +#ifdef CONFIG_FAIL_MAKE_REQUEST + +static ssize_t disk_fail_store(struct gendisk * disk, + const char *buf, size_t count) +{ + int i; + + if (count > 0 && sscanf(buf, "%d", &i) > 0) { + if (i == 0) + disk->flags &= ~GENHD_FL_FAIL; + else + disk->flags |= GENHD_FL_FAIL; + } + + return count; +} +static ssize_t disk_fail_read(struct gendisk * disk, char *page) +{ + return sprintf(page, "%d\n", disk->flags & GENHD_FL_FAIL ? 1 : 0); +} +static struct disk_attribute disk_attr_fail = { + .attr = {.name = "make-it-fail", .mode = S_IRUGO | S_IWUSR }, + .store = disk_fail_store, + .show = disk_fail_read +}; + +#endif + static struct attribute * default_attrs[] = { &disk_attr_uevent.attr, &disk_attr_dev.attr, @@ -424,6 +452,9 @@ static struct attribute * default_attrs[] = { &disk_attr_removable.attr, &disk_attr_size.attr, &disk_attr_stat.attr, +#ifdef CONFIG_FAIL_MAKE_REQUEST + &disk_attr_fail.attr, +#endif NULL, }; diff --git a/block/ioctl.c b/block/ioctl.c index 58aab630dfc..f6962b64660 100644 --- a/block/ioctl.c +++ b/block/ioctl.c @@ -72,7 +72,7 @@ static int blkpg_ioctl(struct block_device *bdev, struct blkpg_ioctl_arg __user bdevp = bdget_disk(disk, part); if (!bdevp) return -ENOMEM; - mutex_lock_nested(&bdevp->bd_mutex, BD_MUTEX_PARTITION); + mutex_lock(&bdevp->bd_mutex); if (bdevp->bd_openers) { mutex_unlock(&bdevp->bd_mutex); bdput(bdevp); @@ -82,7 +82,7 @@ static int blkpg_ioctl(struct block_device *bdev, struct blkpg_ioctl_arg __user fsync_bdev(bdevp); invalidate_bdev(bdevp, 0); - mutex_lock_nested(&bdev->bd_mutex, BD_MUTEX_WHOLE); + mutex_lock(&bdev->bd_mutex); delete_partition(disk, part); mutex_unlock(&bdev->bd_mutex); mutex_unlock(&bdevp->bd_mutex); @@ -290,7 +290,7 @@ int blkdev_ioctl(struct inode *inode, struct file *file, unsigned cmd, ENOIOCTLCMD for unknown ioctls. */ long compat_blkdev_ioctl(struct file *file, unsigned cmd, unsigned long arg) { - struct block_device *bdev = file->f_dentry->d_inode->i_bdev; + struct block_device *bdev = file->f_path.dentry->d_inode->i_bdev; struct gendisk *disk = bdev->bd_disk; int ret = -ENOIOCTLCMD; if (disk->fops->compat_ioctl) { diff --git a/block/ll_rw_blk.c b/block/ll_rw_blk.c index 31512cd9f3a..38c293b987b 100644 --- a/block/ll_rw_blk.c +++ b/block/ll_rw_blk.c @@ -25,9 +25,11 @@ #include <linux/slab.h> #include <linux/swap.h> #include <linux/writeback.h> +#include <linux/task_io_accounting_ops.h> #include <linux/interrupt.h> #include <linux/cpu.h> #include <linux/blktrace_api.h> +#include <linux/fault-inject.h> /* * for max sense size @@ -127,13 +129,6 @@ struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev) } EXPORT_SYMBOL(blk_get_backing_dev_info); -void blk_queue_activity_fn(request_queue_t *q, activity_fn *fn, void *data) -{ - q->activity_fn = fn; - q->activity_data = data; -} -EXPORT_SYMBOL(blk_queue_activity_fn); - /** * blk_queue_prep_rq - set a prepare_request function for queue * @q: queue @@ -236,8 +231,6 @@ void blk_queue_make_request(request_queue_t * q, make_request_fn * mfn) * by default assume old behaviour and bounce for any highmem page */ blk_queue_bounce_limit(q, BLK_BOUNCE_HIGH); - - blk_queue_activity_fn(q, NULL, NULL); } EXPORT_SYMBOL(blk_queue_make_request); @@ -1271,7 +1264,7 @@ new_hw_segment: bio->bi_hw_segments = nr_hw_segs; bio->bi_flags |= (1 << BIO_SEG_VALID); } - +EXPORT_SYMBOL(blk_recount_segments); static int blk_phys_contig_segment(request_queue_t *q, struct bio *bio, struct bio *nxt) @@ -1412,8 +1405,7 @@ static inline int ll_new_hw_segment(request_queue_t *q, return 1; } -static int ll_back_merge_fn(request_queue_t *q, struct request *req, - struct bio *bio) +int ll_back_merge_fn(request_queue_t *q, struct request *req, struct bio *bio) { unsigned short max_sectors; int len; @@ -1449,6 +1441,7 @@ static int ll_back_merge_fn(request_queue_t *q, struct request *req, return ll_new_hw_segment(q, req, bio); } +EXPORT_SYMBOL(ll_back_merge_fn); static int ll_front_merge_fn(request_queue_t *q, struct request *req, struct bio *bio) @@ -1919,9 +1912,6 @@ blk_init_queue_node(request_fn_proc *rfn, spinlock_t *lock, int node_id) } q->request_fn = rfn; - q->back_merge_fn = ll_back_merge_fn; - q->front_merge_fn = ll_front_merge_fn; - q->merge_requests_fn = ll_merge_requests_fn; q->prep_rq_fn = NULL; q->unplug_fn = generic_unplug_device; q->queue_flags = (1 << QUEUE_FLAG_CLUSTER); @@ -2065,15 +2055,16 @@ static void freed_request(request_queue_t *q, int rw, int priv) * Returns NULL on failure, with queue_lock held. * Returns !NULL on success, with queue_lock *not held*. */ -static struct request *get_request(request_queue_t *q, int rw, struct bio *bio, - gfp_t gfp_mask) +static struct request *get_request(request_queue_t *q, int rw_flags, + struct bio *bio, gfp_t gfp_mask) { struct request *rq = NULL; struct request_list *rl = &q->rq; struct io_context *ioc = NULL; + const int rw = rw_flags & 0x01; int may_queue, priv; - may_queue = elv_may_queue(q, rw); + may_queue = elv_may_queue(q, rw_flags); if (may_queue == ELV_MQUEUE_NO) goto rq_starved; @@ -2121,7 +2112,7 @@ static struct request *get_request(request_queue_t *q, int rw, struct bio *bio, spin_unlock_irq(q->queue_lock); - rq = blk_alloc_request(q, rw, priv, gfp_mask); + rq = blk_alloc_request(q, rw_flags, priv, gfp_mask); if (unlikely(!rq)) { /* * Allocation failed presumably due to memory. Undo anything @@ -2169,12 +2160,13 @@ out: * * Called with q->queue_lock held, and returns with it unlocked. */ -static struct request *get_request_wait(request_queue_t *q, int rw, +static struct request *get_request_wait(request_queue_t *q, int rw_flags, struct bio *bio) { + const int rw = rw_flags & 0x01; struct request *rq; - rq = get_request(q, rw, bio, GFP_NOIO); + rq = get_request(q, rw_flags, bio, GFP_NOIO); while (!rq) { DEFINE_WAIT(wait); struct request_list *rl = &q->rq; @@ -2182,7 +2174,7 @@ static struct request *get_request_wait(request_queue_t *q, int rw, prepare_to_wait_exclusive(&rl->wait[rw], &wait, TASK_UNINTERRUPTIBLE); - rq = get_request(q, rw, bio, GFP_NOIO); + rq = get_request(q, rw_flags, bio, GFP_NOIO); if (!rq) { struct io_context *ioc; @@ -2355,40 +2347,29 @@ static int __blk_rq_map_user(request_queue_t *q, struct request *rq, else bio = bio_copy_user(q, uaddr, len, reading); - if (IS_ERR(bio)) { + if (IS_ERR(bio)) return PTR_ERR(bio); - } orig_bio = bio; blk_queue_bounce(q, &bio); + /* * We link the bounce buffer in and could have to traverse it * later so we have to get a ref to prevent it from being freed */ bio_get(bio); - /* - * for most (all? don't know of any) queues we could - * skip grabbing the queue lock here. only drivers with - * funky private ->back_merge_fn() function could be - * problematic. - */ - spin_lock_irq(q->queue_lock); if (!rq->bio) blk_rq_bio_prep(q, rq, bio); - else if (!q->back_merge_fn(q, rq, bio)) { + else if (!ll_back_merge_fn(q, rq, bio)) { ret = -EINVAL; - spin_unlock_irq(q->queue_lock); goto unmap_bio; } else { rq->biotail->bi_next = bio; rq->biotail = bio; - rq->nr_sectors += bio_sectors(bio); - rq->hard_nr_sectors = rq->nr_sectors; rq->data_len += bio->bi_size; } - spin_unlock_irq(q->queue_lock); return bio->bi_size; @@ -2424,6 +2405,7 @@ int blk_rq_map_user(request_queue_t *q, struct request *rq, void __user *ubuf, unsigned long len) { unsigned long bytes_read = 0; + struct bio *bio = NULL; int ret; if (len > (q->max_hw_sectors << 9)) @@ -2450,6 +2432,8 @@ int blk_rq_map_user(request_queue_t *q, struct request *rq, void __user *ubuf, ret = __blk_rq_map_user(q, rq, ubuf, map_len); if (ret < 0) goto unmap_rq; + if (!bio) + bio = rq->bio; bytes_read += ret; ubuf += ret; } @@ -2457,7 +2441,7 @@ int blk_rq_map_user(request_queue_t *q, struct request *rq, void __user *ubuf, rq->buffer = rq->data = NULL; return 0; unmap_rq: - blk_rq_unmap_user(rq); + blk_rq_unmap_user(bio); return ret; } @@ -2469,6 +2453,7 @@ EXPORT_SYMBOL(blk_rq_map_user); * @rq: request to map data to * @iov: pointer to the iovec * @iov_count: number of elements in the iovec + * @len: I/O byte count * * Description: * Data will be mapped directly for zero copy io, if possible. Otherwise @@ -2514,27 +2499,33 @@ EXPORT_SYMBOL(blk_rq_map_user_iov); /** * blk_rq_unmap_user - unmap a request with user data - * @rq: rq to be unmapped + * @bio: start of bio list * * Description: - * Unmap a rq previously mapped by blk_rq_map_user(). - * rq->bio must be set to the original head of the request. + * Unmap a rq previously mapped by blk_rq_map_user(). The caller must + * supply the original rq->bio from the blk_rq_map_user() return, since + * the io completion may have changed rq->bio. */ -int blk_rq_unmap_user(struct request *rq) +int blk_rq_unmap_user(struct bio *bio) { - struct bio *bio, *mapped_bio; + struct bio *mapped_bio; + int ret = 0, ret2; - while ((bio = rq->bio)) { - if (bio_flagged(bio, BIO_BOUNCED)) + while (bio) { + mapped_bio = bio; + if (unlikely(bio_flagged(bio, BIO_BOUNCED))) mapped_bio = bio->bi_private; - else - mapped_bio = bio; - __blk_rq_unmap_user(mapped_bio); - rq->bio = bio->bi_next; - bio_put(bio); + ret2 = __blk_rq_unmap_user(mapped_bio); + if (ret2 && !ret) + ret = ret2; + + mapped_bio = bio; + bio = bio->bi_next; + bio_put(mapped_bio); } - return 0; + + return ret; } EXPORT_SYMBOL(blk_rq_unmap_user); @@ -2694,9 +2685,6 @@ static inline void add_request(request_queue_t * q, struct request * req) { drive_stat_acct(req, req->nr_sectors, 1); - if (q->activity_fn) - q->activity_fn(q->activity_data, rq_data_dir(req)); - /* * elevator indicated where it wants this request to be * inserted at elevator_merge time @@ -2830,7 +2818,7 @@ static int attempt_merge(request_queue_t *q, struct request *req, * will have updated segment counts, update sector * counts here. */ - if (!q->merge_requests_fn(q, req, next)) + if (!ll_merge_requests_fn(q, req, next)) return 0; /* @@ -2920,6 +2908,7 @@ static int __make_request(request_queue_t *q, struct bio *bio) int el_ret, nr_sectors, barrier, err; const unsigned short prio = bio_prio(bio); const int sync = bio_sync(bio); + int rw_flags; nr_sectors = bio_sectors(bio); @@ -2946,7 +2935,7 @@ static int __make_request(request_queue_t *q, struct bio *bio) case ELEVATOR_BACK_MERGE: BUG_ON(!rq_mergeable(req)); - if (!q->back_merge_fn(q, req, bio)) + if (!ll_back_merge_fn(q, req, bio)) break; blk_add_trace_bio(q, bio, BLK_TA_BACKMERGE); @@ -2963,7 +2952,7 @@ static int __make_request(request_queue_t *q, struct bio *bio) case ELEVATOR_FRONT_MERGE: BUG_ON(!rq_mergeable(req)); - if (!q->front_merge_fn(q, req, bio)) + if (!ll_front_merge_fn(q, req, bio)) break; blk_add_trace_bio(q, bio, BLK_TA_FRONTMERGE); @@ -2994,10 +2983,19 @@ static int __make_request(request_queue_t *q, struct bio *bio) get_rq: /* + * This sync check and mask will be re-done in init_request_from_bio(), + * but we need to set it earlier to expose the sync flag to the + * rq allocator and io schedulers. + */ + rw_flags = bio_data_dir(bio); + if (sync) + rw_flags |= REQ_RW_SYNC; + + /* * Grab a free request. This is might sleep but can not fail. * Returns with the queue unlocked. */ - req = get_request_wait(q, bio_data_dir(bio), bio); + req = get_request_wait(q, rw_flags, bio); /* * After dropping the lock and possibly sleeping here, our request @@ -3056,6 +3054,42 @@ static void handle_bad_sector(struct bio *bio) set_bit(BIO_EOF, &bio->bi_flags); } +#ifdef CONFIG_FAIL_MAKE_REQUEST + +static DECLARE_FAULT_ATTR(fail_make_request); + +static int __init setup_fail_make_request(char *str) +{ + return setup_fault_attr(&fail_make_request, str); +} +__setup("fail_make_request=", setup_fail_make_request); + +static int should_fail_request(struct bio *bio) +{ + if ((bio->bi_bdev->bd_disk->flags & GENHD_FL_FAIL) || + (bio->bi_bdev->bd_part && bio->bi_bdev->bd_part->make_it_fail)) + return should_fail(&fail_make_request, bio->bi_size); + + return 0; +} + +static int __init fail_make_request_debugfs(void) +{ + return init_fault_attr_dentries(&fail_make_request, + "fail_make_request"); +} + +late_initcall(fail_make_request_debugfs); + +#else /* CONFIG_FAIL_MAKE_REQUEST */ + +static inline int should_fail_request(struct bio *bio) +{ + return 0; +} + +#endif /* CONFIG_FAIL_MAKE_REQUEST */ + /** * generic_make_request: hand a buffer to its device driver for I/O * @bio: The bio describing the location in memory and on the device. @@ -3141,6 +3175,9 @@ end_io: if (unlikely(test_bit(QUEUE_FLAG_DEAD, &q->queue_flags))) goto end_io; + if (should_fail_request(bio)) + goto end_io; + /* * If this device has partitions, remap block n * of partition p to block n+start(p) of the disk. @@ -3195,10 +3232,12 @@ void submit_bio(int rw, struct bio *bio) BIO_BUG_ON(!bio->bi_size); BIO_BUG_ON(!bio->bi_io_vec); bio->bi_rw |= rw; - if (rw & WRITE) + if (rw & WRITE) { count_vm_events(PGPGOUT, count); - else + } else { + task_io_account_read(bio->bi_size); count_vm_events(PGPGIN, count); + } if (unlikely(block_dump)) { char b[BDEVNAME_SIZE]; diff --git a/block/scsi_ioctl.c b/block/scsi_ioctl.c index b3e210723a7..65c6a3cba6d 100644 --- a/block/scsi_ioctl.c +++ b/block/scsi_ioctl.c @@ -223,11 +223,12 @@ static int verify_command(struct file *file, unsigned char *cmd) static int sg_io(struct file *file, request_queue_t *q, struct gendisk *bd_disk, struct sg_io_hdr *hdr) { - unsigned long start_time; + unsigned long start_time, timeout; int writing = 0, ret = 0; struct request *rq; char sense[SCSI_SENSE_BUFFERSIZE]; unsigned char cmd[BLK_MAX_CDB]; + struct bio *bio; if (hdr->interface_id != 'S') return -EINVAL; @@ -270,14 +271,8 @@ static int sg_io(struct file *file, request_queue_t *q, rq->cmd_type = REQ_TYPE_BLOCK_PC; - /* - * bounce this after holding a reference to the original bio, it's - * needed for proper unmapping - */ - if (rq->bio) - blk_queue_bounce(q, &rq->bio); - - rq->timeout = jiffies_to_msecs(hdr->timeout); + timeout = msecs_to_jiffies(hdr->timeout); + rq->timeout = (timeout < INT_MAX) ? timeout : INT_MAX; if (!rq->timeout) rq->timeout = q->sg_timeout; if (!rq->timeout) @@ -308,6 +303,7 @@ static int sg_io(struct file *file, request_queue_t *q, if (ret) goto out; + bio = rq->bio; rq->retries = 0; start_time = jiffies; @@ -338,7 +334,7 @@ static int sg_io(struct file *file, request_queue_t *q, hdr->sb_len_wr = len; } - if (blk_rq_unmap_user(rq)) + if (blk_rq_unmap_user(bio)) ret = -EFAULT; /* may not have succeeded, but output values written to control |