From 07359fc61bb8ed786f96a1c24cca6f94dd17e329 Mon Sep 17 00:00:00 2001 From: FUJITA Tomonori Date: Thu, 26 Jun 2008 19:39:23 +0200 Subject: block: add bounce support to blk_rq_map_user_iov blk_rq_map_user_iov can't handle the bounce buffer (it means that the bio_map_user_iov path doesn't work with a LLD that needs GFP_DMA). This patch fixes blk_rq_map_user_iov to support the bounce buffer. Signed-off-by: FUJITA Tomonori Cc: Mike Christie Signed-off-by: Jens Axboe --- block/blk-map.c | 1 + 1 file changed, 1 insertion(+) (limited to 'block/blk-map.c') diff --git a/block/blk-map.c b/block/blk-map.c index 0b1af5a3537..813011ef827 100644 --- a/block/blk-map.c +++ b/block/blk-map.c @@ -210,6 +210,7 @@ int blk_rq_map_user_iov(struct request_queue *q, struct request *rq, if (!bio_flagged(bio, BIO_USER_MAPPED)) rq->cmd_flags |= REQ_COPY_USER; + blk_queue_bounce(q, &bio); bio_get(bio); blk_rq_bio_prep(q, rq, bio); rq->buffer = rq->data = NULL; -- cgit v1.2.3 From 30c00eda73d5db5bd64dd0c370161abd8df5ba4a Mon Sep 17 00:00:00 2001 From: FUJITA Tomonori Date: Fri, 4 Jul 2008 09:31:11 +0200 Subject: block: blk_rq_map_kern uses the bounce buffers for stack buffers blk_rq_map_kern is used for kernel internal I/Os. Some callers use this function with stack buffers but DMA to/from the stack buffers leads to memory corruption on a non-coherent platform. This patch make blk_rq_map_kern uses the bounce buffers if a caller passes a stack buffer (on the all platforms for simplicity). Signed-off-by: FUJITA Tomonori Cc: Bartlomiej Zolnierkiewicz Cc: Thomas Bogendoerfer Cc: Tejun Heo Cc: James Bottomley Signed-off-by: Andrew Morton Signed-off-by: Jens Axboe --- block/blk-map.c | 5 +++++ 1 file changed, 5 insertions(+) (limited to 'block/blk-map.c') diff --git a/block/blk-map.c b/block/blk-map.c index 813011ef827..ddd96fb11a7 100644 --- a/block/blk-map.c +++ b/block/blk-map.c @@ -269,6 +269,7 @@ int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf, int reading = rq_data_dir(rq) == READ; int do_copy = 0; struct bio *bio; + unsigned long stack_mask = ~(THREAD_SIZE - 1); if (len > (q->max_hw_sectors << 9)) return -EINVAL; @@ -279,6 +280,10 @@ int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf, alignment = queue_dma_alignment(q) | q->dma_pad_mask; do_copy = ((kaddr & alignment) || (len & alignment)); + if (!((kaddr & stack_mask) ^ + ((unsigned long)current->stack & stack_mask))) + do_copy = 1; + if (do_copy) bio = bio_copy_kern(q, kbuf, len, gfp_mask, reading); else -- cgit v1.2.3