aboutsummaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2009-02-26 10:36:35 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2009-02-26 10:36:35 -0800
commitbabb29b0a33db53704594f63f271ca6c049eddc6 (patch)
tree6d03412fc9315dd1bd221ee1ab4e5f67768a9904 /drivers
parent6fc79d40d30b1595261607a155f039fb7b2d5606 (diff)
parent9e973e64ac6dc504e6447d52193d4fff1a670156 (diff)
Merge branch 'for-linus' of git://git.kernel.dk/linux-2.6-block
* 'for-linus' of git://git.kernel.dk/linux-2.6-block: xen/blkfront: use blk_rq_map_sg to generate ring entries block: reduce stack footprint of blk_recount_segments() cciss: shorten 30s timeout on controller reset block: add documentation for register_blkdev() block: fix bogus gcc warning for uninitialized var usage
Diffstat (limited to 'drivers')
-rw-r--r--drivers/block/cciss.c10
-rw-r--r--drivers/block/xen-blkfront.c30
2 files changed, 22 insertions, 18 deletions
diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c
index d2cb67b6117..b5a06111463 100644
--- a/drivers/block/cciss.c
+++ b/drivers/block/cciss.c
@@ -3611,11 +3611,15 @@ static int __devinit cciss_init_one(struct pci_dev *pdev,
schedule_timeout_uninterruptible(30*HZ);
/* Now try to get the controller to respond to a no-op */
- for (i=0; i<12; i++) {
+ for (i=0; i<30; i++) {
if (cciss_noop(pdev) == 0)
break;
- else
- printk("cciss: no-op failed%s\n", (i < 11 ? "; re-trying" : ""));
+
+ schedule_timeout_uninterruptible(HZ);
+ }
+ if (i == 30) {
+ printk(KERN_ERR "cciss: controller seems dead\n");
+ return -EBUSY;
}
}
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
index 918ef725de4..b6c8ce25435 100644
--- a/drivers/block/xen-blkfront.c
+++ b/drivers/block/xen-blkfront.c
@@ -40,6 +40,7 @@
#include <linux/hdreg.h>
#include <linux/cdrom.h>
#include <linux/module.h>
+#include <linux/scatterlist.h>
#include <xen/xenbus.h>
#include <xen/grant_table.h>
@@ -82,6 +83,7 @@ struct blkfront_info
enum blkif_state connected;
int ring_ref;
struct blkif_front_ring ring;
+ struct scatterlist sg[BLKIF_MAX_SEGMENTS_PER_REQUEST];
unsigned int evtchn, irq;
struct request_queue *rq;
struct work_struct work;
@@ -204,12 +206,11 @@ static int blkif_queue_request(struct request *req)
struct blkfront_info *info = req->rq_disk->private_data;
unsigned long buffer_mfn;
struct blkif_request *ring_req;
- struct req_iterator iter;
- struct bio_vec *bvec;
unsigned long id;
unsigned int fsect, lsect;
- int ref;
+ int i, ref;
grant_ref_t gref_head;
+ struct scatterlist *sg;
if (unlikely(info->connected != BLKIF_STATE_CONNECTED))
return 1;
@@ -238,12 +239,13 @@ static int blkif_queue_request(struct request *req)
if (blk_barrier_rq(req))
ring_req->operation = BLKIF_OP_WRITE_BARRIER;
- ring_req->nr_segments = 0;
- rq_for_each_segment(bvec, req, iter) {
- BUG_ON(ring_req->nr_segments == BLKIF_MAX_SEGMENTS_PER_REQUEST);
- buffer_mfn = pfn_to_mfn(page_to_pfn(bvec->bv_page));
- fsect = bvec->bv_offset >> 9;
- lsect = fsect + (bvec->bv_len >> 9) - 1;
+ ring_req->nr_segments = blk_rq_map_sg(req->q, req, info->sg);
+ BUG_ON(ring_req->nr_segments > BLKIF_MAX_SEGMENTS_PER_REQUEST);
+
+ for_each_sg(info->sg, sg, ring_req->nr_segments, i) {
+ buffer_mfn = pfn_to_mfn(page_to_pfn(sg_page(sg)));
+ fsect = sg->offset >> 9;
+ lsect = fsect + (sg->length >> 9) - 1;
/* install a grant reference. */
ref = gnttab_claim_grant_reference(&gref_head);
BUG_ON(ref == -ENOSPC);
@@ -254,16 +256,12 @@ static int blkif_queue_request(struct request *req)
buffer_mfn,
rq_data_dir(req) );
- info->shadow[id].frame[ring_req->nr_segments] =
- mfn_to_pfn(buffer_mfn);
-
- ring_req->seg[ring_req->nr_segments] =
+ info->shadow[id].frame[i] = mfn_to_pfn(buffer_mfn);
+ ring_req->seg[i] =
(struct blkif_request_segment) {
.gref = ref,
.first_sect = fsect,
.last_sect = lsect };
-
- ring_req->nr_segments++;
}
info->ring.req_prod_pvt++;
@@ -622,6 +620,8 @@ static int setup_blkring(struct xenbus_device *dev,
SHARED_RING_INIT(sring);
FRONT_RING_INIT(&info->ring, sring, PAGE_SIZE);
+ sg_init_table(info->sg, BLKIF_MAX_SEGMENTS_PER_REQUEST);
+
err = xenbus_grant_ring(dev, virt_to_mfn(info->ring.sring));
if (err < 0) {
free_page((unsigned long)sring);