aboutsummaryrefslogtreecommitdiff
path: root/block
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@woody.linux-foundation.org>2008-03-04 08:08:05 -0800
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2008-03-04 08:08:05 -0800
commitce932967b9f77c130d4936d1e20d619a628ae08f (patch)
treeaf0274e83664d9563468f0647b178427d3b18560 /block
parent8727e28ddebb031d80b5e261c98c24f1dcb9a82f (diff)
parentcc66b4512cae8df4ed1635483210aabf7690ec27 (diff)
Merge branch 'for-linus' of git://git.kernel.dk/linux-2.6-block
* 'for-linus' of git://git.kernel.dk/linux-2.6-block: block: fix blkdev_issue_flush() not detecting and passing EOPNOTSUPP back block: fix shadowed variable warning in blk-map.c block: remove extern on function definition cciss: remove READ_AHEAD define and use block layer defaults make cdrom.c:check_for_audio_disc() static block/genhd.c: proper externs unexport blk_rq_map_user_iov unexport blk_{get,put}_queue block/genhd.c: cleanups proper prototype for blk_dev_init() block/blk-tag.c should #include "blk.h" Fix DMA access of block device in 64-bit kernel on some non-x86 systems with 4GB or upper 4GB memory block: separate out padding from alignment block: restore the meaning of rq->data_len to the true data length resubmit: cciss: procfs updates to display info about many splice: only return -EAGAIN if there's hope of more data block: fix kernel-docbook parameters and files
Diffstat (limited to 'block')
-rw-r--r--block/blk-barrier.c9
-rw-r--r--block/blk-core.c7
-rw-r--r--block/blk-map.c27
-rw-r--r--block/blk-merge.c2
-rw-r--r--block/blk-settings.c22
-rw-r--r--block/blk-tag.c2
-rw-r--r--block/blk.h2
-rw-r--r--block/bsg.c8
-rw-r--r--block/genhd.c10
-rw-r--r--block/scsi_ioctl.c4
10 files changed, 61 insertions, 32 deletions
diff --git a/block/blk-barrier.c b/block/blk-barrier.c
index 6901eedeffc..55c5f1fc4f1 100644
--- a/block/blk-barrier.c
+++ b/block/blk-barrier.c
@@ -259,8 +259,11 @@ int blk_do_ordered(struct request_queue *q, struct request **rqp)
static void bio_end_empty_barrier(struct bio *bio, int err)
{
- if (err)
+ if (err) {
+ if (err == -EOPNOTSUPP)
+ set_bit(BIO_EOPNOTSUPP, &bio->bi_flags);
clear_bit(BIO_UPTODATE, &bio->bi_flags);
+ }
complete(bio->bi_private);
}
@@ -309,7 +312,9 @@ int blkdev_issue_flush(struct block_device *bdev, sector_t *error_sector)
*error_sector = bio->bi_sector;
ret = 0;
- if (!bio_flagged(bio, BIO_UPTODATE))
+ if (bio_flagged(bio, BIO_EOPNOTSUPP))
+ ret = -EOPNOTSUPP;
+ else if (!bio_flagged(bio, BIO_UPTODATE))
ret = -EIO;
bio_put(bio);
diff --git a/block/blk-core.c b/block/blk-core.c
index 775c8516abf..2a438a93f72 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -127,7 +127,6 @@ void rq_init(struct request_queue *q, struct request *rq)
rq->nr_hw_segments = 0;
rq->ioprio = 0;
rq->special = NULL;
- rq->raw_data_len = 0;
rq->buffer = NULL;
rq->tag = -1;
rq->errors = 0;
@@ -135,6 +134,7 @@ void rq_init(struct request_queue *q, struct request *rq)
rq->cmd_len = 0;
memset(rq->cmd, 0, sizeof(rq->cmd));
rq->data_len = 0;
+ rq->extra_len = 0;
rq->sense_len = 0;
rq->data = NULL;
rq->sense = NULL;
@@ -424,7 +424,6 @@ void blk_put_queue(struct request_queue *q)
{
kobject_put(&q->kobj);
}
-EXPORT_SYMBOL(blk_put_queue);
void blk_cleanup_queue(struct request_queue *q)
{
@@ -592,7 +591,6 @@ int blk_get_queue(struct request_queue *q)
return 1;
}
-EXPORT_SYMBOL(blk_get_queue);
static inline void blk_free_request(struct request_queue *q, struct request *rq)
{
@@ -1768,6 +1766,7 @@ static inline void __end_request(struct request *rq, int uptodate,
/**
* blk_rq_bytes - Returns bytes left to complete in the entire request
+ * @rq: the request being processed
**/
unsigned int blk_rq_bytes(struct request *rq)
{
@@ -1780,6 +1779,7 @@ EXPORT_SYMBOL_GPL(blk_rq_bytes);
/**
* blk_rq_cur_bytes - Returns bytes left to complete in the current segment
+ * @rq: the request being processed
**/
unsigned int blk_rq_cur_bytes(struct request *rq)
{
@@ -2016,7 +2016,6 @@ void blk_rq_bio_prep(struct request_queue *q, struct request *rq,
rq->hard_cur_sectors = rq->current_nr_sectors;
rq->hard_nr_sectors = rq->nr_sectors = bio_sectors(bio);
rq->buffer = bio_data(bio);
- rq->raw_data_len = bio->bi_size;
rq->data_len = bio->bi_size;
rq->bio = rq->biotail = bio;
diff --git a/block/blk-map.c b/block/blk-map.c
index 09f7fd0bcb7..c07d9c8317f 100644
--- a/block/blk-map.c
+++ b/block/blk-map.c
@@ -19,7 +19,6 @@ int blk_rq_append_bio(struct request_queue *q, struct request *rq,
rq->biotail->bi_next = bio;
rq->biotail = bio;
- rq->raw_data_len += bio->bi_size;
rq->data_len += bio->bi_size;
}
return 0;
@@ -44,6 +43,7 @@ static int __blk_rq_map_user(struct request_queue *q, struct request *rq,
void __user *ubuf, unsigned int len)
{
unsigned long uaddr;
+ unsigned int alignment;
struct bio *bio, *orig_bio;
int reading, ret;
@@ -54,8 +54,8 @@ static int __blk_rq_map_user(struct request_queue *q, struct request *rq,
* direct dma. else, set up kernel bounce buffers
*/
uaddr = (unsigned long) ubuf;
- if (!(uaddr & queue_dma_alignment(q)) &&
- !(len & queue_dma_alignment(q)))
+ alignment = queue_dma_alignment(q) | q->dma_pad_mask;
+ if (!(uaddr & alignment) && !(len & alignment))
bio = bio_map_user(q, NULL, uaddr, len, reading);
else
bio = bio_copy_user(q, uaddr, len, reading);
@@ -142,20 +142,22 @@ int blk_rq_map_user(struct request_queue *q, struct request *rq,
/*
* __blk_rq_map_user() copies the buffers if starting address
- * or length isn't aligned. As the copied buffer is always
- * page aligned, we know that there's enough room for padding.
- * Extend the last bio and update rq->data_len accordingly.
+ * or length isn't aligned to dma_pad_mask. As the copied
+ * buffer is always page aligned, we know that there's enough
+ * room for padding. Extend the last bio and update
+ * rq->data_len accordingly.
*
* On unmap, bio_uncopy_user() will use unmodified
* bio_map_data pointed to by bio->bi_private.
*/
- if (len & queue_dma_alignment(q)) {
- unsigned int pad_len = (queue_dma_alignment(q) & ~len) + 1;
- struct bio *bio = rq->biotail;
+ if (len & q->dma_pad_mask) {
+ unsigned int pad_len = (q->dma_pad_mask & ~len) + 1;
+ struct bio *tail = rq->biotail;
- bio->bi_io_vec[bio->bi_vcnt - 1].bv_len += pad_len;
- bio->bi_size += pad_len;
- rq->data_len += pad_len;
+ tail->bi_io_vec[tail->bi_vcnt - 1].bv_len += pad_len;
+ tail->bi_size += pad_len;
+
+ rq->extra_len += pad_len;
}
rq->buffer = rq->data = NULL;
@@ -215,7 +217,6 @@ int blk_rq_map_user_iov(struct request_queue *q, struct request *rq,
rq->buffer = rq->data = NULL;
return 0;
}
-EXPORT_SYMBOL(blk_rq_map_user_iov);
/**
* blk_rq_unmap_user - unmap a request with user data
diff --git a/block/blk-merge.c b/block/blk-merge.c
index 7506c4fe026..0f58616bcd7 100644
--- a/block/blk-merge.c
+++ b/block/blk-merge.c
@@ -231,7 +231,7 @@ new_segment:
((unsigned long)q->dma_drain_buffer) &
(PAGE_SIZE - 1));
nsegs++;
- rq->data_len += q->dma_drain_size;
+ rq->extra_len += q->dma_drain_size;
}
if (sg)
diff --git a/block/blk-settings.c b/block/blk-settings.c
index 9a8ffdd0ce3..1344a0ea5cc 100644
--- a/block/blk-settings.c
+++ b/block/blk-settings.c
@@ -140,7 +140,7 @@ void blk_queue_bounce_limit(struct request_queue *q, u64 dma_addr)
/* Assume anything <= 4GB can be handled by IOMMU.
Actually some IOMMUs can handle everything, but I don't
know of a way to test this here. */
- if (b_pfn < (min_t(u64, 0xffffffff, BLK_BOUNCE_HIGH) >> PAGE_SHIFT))
+ if (b_pfn <= (min_t(u64, 0xffffffff, BLK_BOUNCE_HIGH) >> PAGE_SHIFT))
dma = 1;
q->bounce_pfn = max_low_pfn;
#else
@@ -293,8 +293,24 @@ void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b)
EXPORT_SYMBOL(blk_queue_stack_limits);
/**
- * blk_queue_dma_drain - Set up a drain buffer for excess dma.
+ * blk_queue_dma_pad - set pad mask
+ * @q: the request queue for the device
+ * @mask: pad mask
+ *
+ * Set pad mask. Direct IO requests are padded to the mask specified.
*
+ * Appending pad buffer to a request modifies ->data_len such that it
+ * includes the pad buffer. The original requested data length can be
+ * obtained using blk_rq_raw_data_len().
+ **/
+void blk_queue_dma_pad(struct request_queue *q, unsigned int mask)
+{
+ q->dma_pad_mask = mask;
+}
+EXPORT_SYMBOL(blk_queue_dma_pad);
+
+/**
+ * blk_queue_dma_drain - Set up a drain buffer for excess dma.
* @q: the request queue for the device
* @dma_drain_needed: fn which returns non-zero if drain is necessary
* @buf: physically contiguous buffer
@@ -316,7 +332,7 @@ EXPORT_SYMBOL(blk_queue_stack_limits);
* device can support otherwise there won't be room for the drain
* buffer.
*/
-extern int blk_queue_dma_drain(struct request_queue *q,
+int blk_queue_dma_drain(struct request_queue *q,
dma_drain_needed_fn *dma_drain_needed,
void *buf, unsigned int size)
{
diff --git a/block/blk-tag.c b/block/blk-tag.c
index a8c37d4bbb3..4780a46ce23 100644
--- a/block/blk-tag.c
+++ b/block/blk-tag.c
@@ -6,6 +6,8 @@
#include <linux/bio.h>
#include <linux/blkdev.h>
+#include "blk.h"
+
/**
* blk_queue_find_tag - find a request by its tag and queue
* @q: The request queue for the device
diff --git a/block/blk.h b/block/blk.h
index ec898dd0c65..ec9120fb789 100644
--- a/block/blk.h
+++ b/block/blk.h
@@ -32,6 +32,8 @@ void blk_recalc_rq_sectors(struct request *rq, int nsect);
void blk_queue_congestion_threshold(struct request_queue *q);
+int blk_dev_init(void);
+
/*
* Return the threshold (number of used requests) at which the queue is
* considered to be congested. It include a little hysteresis to keep the
diff --git a/block/bsg.c b/block/bsg.c
index 7f3c09549e4..8917c5174dc 100644
--- a/block/bsg.c
+++ b/block/bsg.c
@@ -437,14 +437,14 @@ static int blk_complete_sgv4_hdr_rq(struct request *rq, struct sg_io_v4 *hdr,
}
if (rq->next_rq) {
- hdr->dout_resid = rq->raw_data_len;
- hdr->din_resid = rq->next_rq->raw_data_len;
+ hdr->dout_resid = rq->data_len;
+ hdr->din_resid = rq->next_rq->data_len;
blk_rq_unmap_user(bidi_bio);
blk_put_request(rq->next_rq);
} else if (rq_data_dir(rq) == READ)
- hdr->din_resid = rq->raw_data_len;
+ hdr->din_resid = rq->data_len;
else
- hdr->dout_resid = rq->raw_data_len;
+ hdr->dout_resid = rq->data_len;
/*
* If the request generated a negative error number, return it
diff --git a/block/genhd.c b/block/genhd.c
index 53f2238e69c..c44527d16c5 100644
--- a/block/genhd.c
+++ b/block/genhd.c
@@ -17,11 +17,15 @@
#include <linux/buffer_head.h>
#include <linux/mutex.h>
+#include "blk.h"
+
static DEFINE_MUTEX(block_class_lock);
#ifndef CONFIG_SYSFS_DEPRECATED
struct kobject *block_depr;
#endif
+static struct device_type disk_type;
+
/*
* Can be deleted altogether. Later.
*
@@ -346,8 +350,6 @@ const struct seq_operations partitions_op = {
#endif
-extern int blk_dev_init(void);
-
static struct kobject *base_probe(dev_t devt, int *part, void *data)
{
if (request_module("block-major-%d-%d", MAJOR(devt), MINOR(devt)) > 0)
@@ -502,7 +504,7 @@ struct class block_class = {
.name = "block",
};
-struct device_type disk_type = {
+static struct device_type disk_type = {
.name = "disk",
.groups = disk_attr_groups,
.release = disk_release,
@@ -632,12 +634,14 @@ static void media_change_notify_thread(struct work_struct *work)
put_device(gd->driverfs_dev);
}
+#if 0
void genhd_media_change_notify(struct gendisk *disk)
{
get_device(disk->driverfs_dev);
schedule_work(&disk->async_notify);
}
EXPORT_SYMBOL_GPL(genhd_media_change_notify);
+#endif /* 0 */
dev_t blk_lookup_devt(const char *name)
{
diff --git a/block/scsi_ioctl.c b/block/scsi_ioctl.c
index e993cac4911..a2c3a936ebf 100644
--- a/block/scsi_ioctl.c
+++ b/block/scsi_ioctl.c
@@ -266,7 +266,7 @@ static int blk_complete_sghdr_rq(struct request *rq, struct sg_io_hdr *hdr,
hdr->info = 0;
if (hdr->masked_status || hdr->host_status || hdr->driver_status)
hdr->info |= SG_INFO_CHECK;
- hdr->resid = rq->raw_data_len;
+ hdr->resid = rq->data_len;
hdr->sb_len_wr = 0;
if (rq->sense_len && hdr->sbp) {
@@ -528,8 +528,8 @@ static int __blk_send_generic(struct request_queue *q, struct gendisk *bd_disk,
rq = blk_get_request(q, WRITE, __GFP_WAIT);
rq->cmd_type = REQ_TYPE_BLOCK_PC;
rq->data = NULL;
- rq->raw_data_len = 0;
rq->data_len = 0;
+ rq->extra_len = 0;
rq->timeout = BLK_DEFAULT_SG_TIMEOUT;
memset(rq->cmd, 0, sizeof(rq->cmd));
rq->cmd[0] = cmd;