From c1c201200a359cf3b6e2e36a4236cdca77a3cd8e Mon Sep 17 00:00:00 2001 From: Boaz Harrosh Date: Tue, 3 Feb 2009 07:47:29 +0100 Subject: bsg: Fix sense buffer bug in SG_IO When submitting requests via SG_IO, which does a sync io, a bsg_command is not allocated. So an in-Kernel sense_buffer was not set. However when calling blk_execute_rq() with no sense buffer one is provided from the stack. Now bsg at blk_complete_sgv4_hdr_rq() would check if rq->sense_len and a sense was requested by sg_io_v4 the rq->sense was copy_user() back, but by now it is already mangled stack memory. I have fixed that by forcing a sense_buffer when calling bsg_map_hdr(). The bsg_command->sense is provided in the write/read path like before, and on-the-stack buffer is provided when doing SG_IO. I have also fixed a dprintk message to print rq->errors in hex because of the scsi bit-field use of this member. For other block devices it does not matter anyway. Signed-off-by: Boaz Harrosh Acked-by: FUJITA Tomonori Signed-off-by: Jens Axboe --- block/bsg.c | 17 ++++++++++------- 1 file changed, 10 insertions(+), 7 deletions(-) diff --git a/block/bsg.c b/block/bsg.c index d414bb5607e..0ce8806dd0c 100644 --- a/block/bsg.c +++ b/block/bsg.c @@ -244,7 +244,8 @@ bsg_validate_sgv4_hdr(struct request_queue *q, struct sg_io_v4 *hdr, int *rw) * map sg_io_v4 to a request. */ static struct request * -bsg_map_hdr(struct bsg_device *bd, struct sg_io_v4 *hdr, fmode_t has_write_perm) +bsg_map_hdr(struct bsg_device *bd, struct sg_io_v4 *hdr, fmode_t has_write_perm, + u8 *sense) { struct request_queue *q = bd->queue; struct request *rq, *next_rq = NULL; @@ -306,6 +307,10 @@ bsg_map_hdr(struct bsg_device *bd, struct sg_io_v4 *hdr, fmode_t has_write_perm) if (ret) goto out; } + + rq->sense = sense; + rq->sense_len = 0; + return rq; out: if (rq->cmd != rq->__cmd) @@ -348,9 +353,6 @@ static void bsg_rq_end_io(struct request *rq, int uptodate) static void bsg_add_command(struct bsg_device *bd, struct request_queue *q, struct bsg_command *bc, struct request *rq) { - rq->sense = bc->sense; - rq->sense_len = 0; - /* * add bc command to busy queue and submit rq for io */ @@ -419,7 +421,7 @@ static int blk_complete_sgv4_hdr_rq(struct request *rq, struct sg_io_v4 *hdr, { int ret = 0; - dprintk("rq %p bio %p %u\n", rq, bio, rq->errors); + dprintk("rq %p bio %p 0x%x\n", rq, bio, rq->errors); /* * fill in all the output members */ @@ -635,7 +637,7 @@ static int __bsg_write(struct bsg_device *bd, const char __user *buf, /* * get a request, fill in the blanks, and add to request queue */ - rq = bsg_map_hdr(bd, &bc->hdr, has_write_perm); + rq = bsg_map_hdr(bd, &bc->hdr, has_write_perm, bc->sense); if (IS_ERR(rq)) { ret = PTR_ERR(rq); rq = NULL; @@ -922,11 +924,12 @@ static long bsg_ioctl(struct file *file, unsigned int cmd, unsigned long arg) struct request *rq; struct bio *bio, *bidi_bio = NULL; struct sg_io_v4 hdr; + u8 sense[SCSI_SENSE_BUFFERSIZE]; if (copy_from_user(&hdr, uarg, sizeof(hdr))) return -EFAULT; - rq = bsg_map_hdr(bd, &hdr, file->f_mode & FMODE_WRITE); + rq = bsg_map_hdr(bd, &hdr, file->f_mode & FMODE_WRITE, sense); if (IS_ERR(rq)) return PTR_ERR(rq); -- cgit v1.2.3 From 93dbb393503d53cd226e5e1f0088fe8f4dbaa2b8 Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Mon, 16 Feb 2009 10:25:40 +0100 Subject: block: fix bad definition of BIO_RW_SYNC We can't OR shift values, so get rid of BIO_RW_SYNC and use BIO_RW_SYNCIO and BIO_RW_UNPLUG explicitly. This brings back the behaviour from before 213d9417fec62ef4c3675621b9364a667954d4dd. Signed-off-by: Jens Axboe --- block/blktrace.c | 2 +- drivers/md/dm-io.c | 2 +- drivers/md/dm-kcopyd.c | 2 +- drivers/md/md.c | 4 ++-- include/linux/bio.h | 2 -- include/linux/blktrace_api.h | 1 + include/linux/fs.h | 6 +++--- kernel/power/swap.c | 5 +++-- mm/page_io.c | 2 +- 9 files changed, 13 insertions(+), 13 deletions(-) diff --git a/block/blktrace.c b/block/blktrace.c index 39cc3bfe56e..7cf9d1ff45a 100644 --- a/block/blktrace.c +++ b/block/blktrace.c @@ -142,7 +142,7 @@ static void __blk_add_trace(struct blk_trace *bt, sector_t sector, int bytes, what |= ddir_act[rw & WRITE]; what |= MASK_TC_BIT(rw, BARRIER); - what |= MASK_TC_BIT(rw, SYNC); + what |= MASK_TC_BIT(rw, SYNCIO); what |= MASK_TC_BIT(rw, AHEAD); what |= MASK_TC_BIT(rw, META); what |= MASK_TC_BIT(rw, DISCARD); diff --git a/drivers/md/dm-io.c b/drivers/md/dm-io.c index a34338567a2..f14813be4ef 100644 --- a/drivers/md/dm-io.c +++ b/drivers/md/dm-io.c @@ -328,7 +328,7 @@ static void dispatch_io(int rw, unsigned int num_regions, struct dpages old_pages = *dp; if (sync) - rw |= (1 << BIO_RW_SYNC); + rw |= (1 << BIO_RW_SYNCIO) | (1 << BIO_RW_UNPLUG); /* * For multiple regions we need to be careful to rewind diff --git a/drivers/md/dm-kcopyd.c b/drivers/md/dm-kcopyd.c index 3073618269e..0a225da2127 100644 --- a/drivers/md/dm-kcopyd.c +++ b/drivers/md/dm-kcopyd.c @@ -344,7 +344,7 @@ static int run_io_job(struct kcopyd_job *job) { int r; struct dm_io_request io_req = { - .bi_rw = job->rw | (1 << BIO_RW_SYNC), + .bi_rw = job->rw | (1 << BIO_RW_SYNCIO) | (1 << BIO_RW_UNPLUG), .mem.type = DM_IO_PAGE_LIST, .mem.ptr.pl = job->pages, .mem.offset = job->offset, diff --git a/drivers/md/md.c b/drivers/md/md.c index 4495104f6c9..03b4cd0a634 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -474,7 +474,7 @@ void md_super_write(mddev_t *mddev, mdk_rdev_t *rdev, * causes ENOTSUPP, we allocate a spare bio... */ struct bio *bio = bio_alloc(GFP_NOIO, 1); - int rw = (1<bi_bdev = rdev->bdev; bio->bi_sector = sector; @@ -531,7 +531,7 @@ int sync_page_io(struct block_device *bdev, sector_t sector, int size, struct completion event; int ret; - rw |= (1 << BIO_RW_SYNC); + rw |= (1 << BIO_RW_SYNCIO) | (1 << BIO_RW_UNPLUG); bio->bi_bdev = bdev; bio->bi_sector = sector; diff --git a/include/linux/bio.h b/include/linux/bio.h index 2aa283ab062..1b16108a541 100644 --- a/include/linux/bio.h +++ b/include/linux/bio.h @@ -171,8 +171,6 @@ struct bio { #define BIO_RW_FAILFAST_TRANSPORT 8 #define BIO_RW_FAILFAST_DRIVER 9 -#define BIO_RW_SYNC (BIO_RW_SYNCIO | BIO_RW_UNPLUG) - #define bio_rw_flagged(bio, flag) ((bio)->bi_rw & (1 << (flag))) /* diff --git a/include/linux/blktrace_api.h b/include/linux/blktrace_api.h index 25379cba237..6e915878e88 100644 --- a/include/linux/blktrace_api.h +++ b/include/linux/blktrace_api.h @@ -15,6 +15,7 @@ enum blktrace_cat { BLK_TC_WRITE = 1 << 1, /* writes */ BLK_TC_BARRIER = 1 << 2, /* barrier */ BLK_TC_SYNC = 1 << 3, /* sync IO */ + BLK_TC_SYNCIO = BLK_TC_SYNC, BLK_TC_QUEUE = 1 << 4, /* queueing/merging */ BLK_TC_REQUEUE = 1 << 5, /* requeueing */ BLK_TC_ISSUE = 1 << 6, /* issue */ diff --git a/include/linux/fs.h b/include/linux/fs.h index 6022f44043f..67857dc1636 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -87,10 +87,10 @@ struct inodes_stat_t { #define WRITE 1 #define READA 2 /* read-ahead - don't block if no resources */ #define SWRITE 3 /* for ll_rw_block() - wait for buffer lock */ -#define READ_SYNC (READ | (1 << BIO_RW_SYNC)) +#define READ_SYNC (READ | (1 << BIO_RW_SYNCIO) | (1 << BIO_RW_UNPLUG)) #define READ_META (READ | (1 << BIO_RW_META)) -#define WRITE_SYNC (WRITE | (1 << BIO_RW_SYNC)) -#define SWRITE_SYNC (SWRITE | (1 << BIO_RW_SYNC)) +#define WRITE_SYNC (WRITE | (1 << BIO_RW_SYNCIO) | (1 << BIO_RW_UNPLUG)) +#define SWRITE_SYNC (SWRITE | (1 << BIO_RW_SYNCIO) | (1 << BIO_RW_UNPLUG)) #define WRITE_BARRIER (WRITE | (1 << BIO_RW_BARRIER)) #define DISCARD_NOBARRIER (1 << BIO_RW_DISCARD) #define DISCARD_BARRIER ((1 << BIO_RW_DISCARD) | (1 << BIO_RW_BARRIER)) diff --git a/kernel/power/swap.c b/kernel/power/swap.c index 6da14358537..505f319e489 100644 --- a/kernel/power/swap.c +++ b/kernel/power/swap.c @@ -60,6 +60,7 @@ static struct block_device *resume_bdev; static int submit(int rw, pgoff_t page_off, struct page *page, struct bio **bio_chain) { + const int bio_rw = rw | (1 << BIO_RW_SYNCIO) | (1 << BIO_RW_UNPLUG); struct bio *bio; bio = bio_alloc(__GFP_WAIT | __GFP_HIGH, 1); @@ -80,7 +81,7 @@ static int submit(int rw, pgoff_t page_off, struct page *page, bio_get(bio); if (bio_chain == NULL) { - submit_bio(rw | (1 << BIO_RW_SYNC), bio); + submit_bio(bio_rw, bio); wait_on_page_locked(page); if (rw == READ) bio_set_pages_dirty(bio); @@ -90,7 +91,7 @@ static int submit(int rw, pgoff_t page_off, struct page *page, get_page(page); /* These pages are freed later */ bio->bi_private = *bio_chain; *bio_chain = bio; - submit_bio(rw | (1 << BIO_RW_SYNC), bio); + submit_bio(bio_rw, bio); } return 0; } diff --git a/mm/page_io.c b/mm/page_io.c index dc6ce0afbde..3023c475e04 100644 --- a/mm/page_io.c +++ b/mm/page_io.c @@ -111,7 +111,7 @@ int swap_writepage(struct page *page, struct writeback_control *wbc) goto out; } if (wbc->sync_mode == WB_SYNC_ALL) - rw |= (1 << BIO_RW_SYNC); + rw |= (1 << BIO_RW_SYNCIO) | (1 << BIO_RW_UNPLUG); count_vm_event(PSWPOUT); set_page_writeback(page); unlock_page(page); -- cgit v1.2.3 From a60e78e57a17d55bbd5a96da16fe9649d364b987 Mon Sep 17 00:00:00 2001 From: Subhash Peddamallu Date: Mon, 16 Feb 2009 10:27:07 +0100 Subject: fs/bio: bio_alloc_bioset: pass right object ptr to mempool_free When freeing from bio pool use right ptr to account for bs->front_pad, instead of bio ptr, Signed-off-by: Subhash Peddamallu Signed-off-by: Jens Axboe --- fs/bio.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/fs/bio.c b/fs/bio.c index 062299acbcc..72ab251cdb9 100644 --- a/fs/bio.c +++ b/fs/bio.c @@ -302,9 +302,10 @@ void bio_init(struct bio *bio) struct bio *bio_alloc_bioset(gfp_t gfp_mask, int nr_iovecs, struct bio_set *bs) { struct bio *bio = NULL; + void *p; if (bs) { - void *p = mempool_alloc(bs->bio_pool, gfp_mask); + p = mempool_alloc(bs->bio_pool, gfp_mask); if (p) bio = p + bs->front_pad; @@ -329,7 +330,7 @@ struct bio *bio_alloc_bioset(gfp_t gfp_mask, int nr_iovecs, struct bio_set *bs) } if (unlikely(!bvl)) { if (bs) - mempool_free(bio, bs->bio_pool); + mempool_free(p, bs->bio_pool); else kfree(bio); bio = NULL; -- cgit v1.2.3 From c8cbec6bdf6329279fd14696020f6b59d1d3124d Mon Sep 17 00:00:00 2001 From: Roel Kluin Date: Mon, 16 Feb 2009 13:11:55 +0100 Subject: paride/pg.c: xs(): &&/|| confusion &&/|| confusion Signed-off-by: Roel Kluin Cc: Bartlomiej Zolnierkiewicz Signed-off-by: Andrew Morton Signed-off-by: Jens Axboe --- drivers/block/paride/pg.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/block/paride/pg.c b/drivers/block/paride/pg.c index 9dfa2716300..c397b3ddba9 100644 --- a/drivers/block/paride/pg.c +++ b/drivers/block/paride/pg.c @@ -422,7 +422,7 @@ static void xs(char *buf, char *targ, int len) for (k = 0; k < len; k++) { char c = *buf++; - if (c != ' ' || c != l) + if (c != ' ' && c != l) l = *targ++ = c; } if (l == ' ') -- cgit v1.2.3 From 82eb03cfd862a65363fa2826de0dbd5474cfe5e2 Mon Sep 17 00:00:00 2001 From: Chip Coldwell Date: Mon, 16 Feb 2009 13:11:56 +0100 Subject: cciss: PCI power management reset for kexec The kexec kernel resets the CCISS hardware in three steps: 1. Use PCI power management states to reset the controller in the kexec kernel. 2. Clear the MSI/MSI-X bits in PCI configuration space so that MSI initialization in the kexec kernel doesn't fail. 3. Use the CCISS "No-op" message to determine when the controller firmware has recovered from the PCI PM reset. [akpm@linux-foundation.org: cleanups] Signed-off-by: Mike Miller Cc: Randy Dunlap Signed-off-by: Andrew Morton Signed-off-by: Jens Axboe --- drivers/block/cciss.c | 215 ++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 215 insertions(+) diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c index 01e69383d9c..d2cb67b6117 100644 --- a/drivers/block/cciss.c +++ b/drivers/block/cciss.c @@ -3390,6 +3390,203 @@ static void free_hba(int i) kfree(p); } +/* Send a message CDB to the firmware. */ +static __devinit int cciss_message(struct pci_dev *pdev, unsigned char opcode, unsigned char type) +{ + typedef struct { + CommandListHeader_struct CommandHeader; + RequestBlock_struct Request; + ErrDescriptor_struct ErrorDescriptor; + } Command; + static const size_t cmd_sz = sizeof(Command) + sizeof(ErrorInfo_struct); + Command *cmd; + dma_addr_t paddr64; + uint32_t paddr32, tag; + void __iomem *vaddr; + int i, err; + + vaddr = ioremap_nocache(pci_resource_start(pdev, 0), pci_resource_len(pdev, 0)); + if (vaddr == NULL) + return -ENOMEM; + + /* The Inbound Post Queue only accepts 32-bit physical addresses for the + CCISS commands, so they must be allocated from the lower 4GiB of + memory. */ + err = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK); + if (err) { + iounmap(vaddr); + return -ENOMEM; + } + + cmd = pci_alloc_consistent(pdev, cmd_sz, &paddr64); + if (cmd == NULL) { + iounmap(vaddr); + return -ENOMEM; + } + + /* This must fit, because of the 32-bit consistent DMA mask. Also, + although there's no guarantee, we assume that the address is at + least 4-byte aligned (most likely, it's page-aligned). */ + paddr32 = paddr64; + + cmd->CommandHeader.ReplyQueue = 0; + cmd->CommandHeader.SGList = 0; + cmd->CommandHeader.SGTotal = 0; + cmd->CommandHeader.Tag.lower = paddr32; + cmd->CommandHeader.Tag.upper = 0; + memset(&cmd->CommandHeader.LUN.LunAddrBytes, 0, 8); + + cmd->Request.CDBLen = 16; + cmd->Request.Type.Type = TYPE_MSG; + cmd->Request.Type.Attribute = ATTR_HEADOFQUEUE; + cmd->Request.Type.Direction = XFER_NONE; + cmd->Request.Timeout = 0; /* Don't time out */ + cmd->Request.CDB[0] = opcode; + cmd->Request.CDB[1] = type; + memset(&cmd->Request.CDB[2], 0, 14); /* the rest of the CDB is reserved */ + + cmd->ErrorDescriptor.Addr.lower = paddr32 + sizeof(Command); + cmd->ErrorDescriptor.Addr.upper = 0; + cmd->ErrorDescriptor.Len = sizeof(ErrorInfo_struct); + + writel(paddr32, vaddr + SA5_REQUEST_PORT_OFFSET); + + for (i = 0; i < 10; i++) { + tag = readl(vaddr + SA5_REPLY_PORT_OFFSET); + if ((tag & ~3) == paddr32) + break; + schedule_timeout_uninterruptible(HZ); + } + + iounmap(vaddr); + + /* we leak the DMA buffer here ... no choice since the controller could + still complete the command. */ + if (i == 10) { + printk(KERN_ERR "cciss: controller message %02x:%02x timed out\n", + opcode, type); + return -ETIMEDOUT; + } + + pci_free_consistent(pdev, cmd_sz, cmd, paddr64); + + if (tag & 2) { + printk(KERN_ERR "cciss: controller message %02x:%02x failed\n", + opcode, type); + return -EIO; + } + + printk(KERN_INFO "cciss: controller message %02x:%02x succeeded\n", + opcode, type); + return 0; +} + +#define cciss_soft_reset_controller(p) cciss_message(p, 1, 0) +#define cciss_noop(p) cciss_message(p, 3, 0) + +static __devinit int cciss_reset_msi(struct pci_dev *pdev) +{ +/* the #defines are stolen from drivers/pci/msi.h. */ +#define msi_control_reg(base) (base + PCI_MSI_FLAGS) +#define PCI_MSIX_FLAGS_ENABLE (1 << 15) + + int pos; + u16 control = 0; + + pos = pci_find_capability(pdev, PCI_CAP_ID_MSI); + if (pos) { + pci_read_config_word(pdev, msi_control_reg(pos), &control); + if (control & PCI_MSI_FLAGS_ENABLE) { + printk(KERN_INFO "cciss: resetting MSI\n"); + pci_write_config_word(pdev, msi_control_reg(pos), control & ~PCI_MSI_FLAGS_ENABLE); + } + } + + pos = pci_find_capability(pdev, PCI_CAP_ID_MSIX); + if (pos) { + pci_read_config_word(pdev, msi_control_reg(pos), &control); + if (control & PCI_MSIX_FLAGS_ENABLE) { + printk(KERN_INFO "cciss: resetting MSI-X\n"); + pci_write_config_word(pdev, msi_control_reg(pos), control & ~PCI_MSIX_FLAGS_ENABLE); + } + } + + return 0; +} + +/* This does a hard reset of the controller using PCI power management + * states. */ +static __devinit int cciss_hard_reset_controller(struct pci_dev *pdev) +{ + u16 pmcsr, saved_config_space[32]; + int i, pos; + + printk(KERN_INFO "cciss: using PCI PM to reset controller\n"); + + /* This is very nearly the same thing as + + pci_save_state(pci_dev); + pci_set_power_state(pci_dev, PCI_D3hot); + pci_set_power_state(pci_dev, PCI_D0); + pci_restore_state(pci_dev); + + but we can't use these nice canned kernel routines on + kexec, because they also check the MSI/MSI-X state in PCI + configuration space and do the wrong thing when it is + set/cleared. Also, the pci_save/restore_state functions + violate the ordering requirements for restoring the + configuration space from the CCISS document (see the + comment below). So we roll our own .... */ + + for (i = 0; i < 32; i++) + pci_read_config_word(pdev, 2*i, &saved_config_space[i]); + + pos = pci_find_capability(pdev, PCI_CAP_ID_PM); + if (pos == 0) { + printk(KERN_ERR "cciss_reset_controller: PCI PM not supported\n"); + return -ENODEV; + } + + /* Quoting from the Open CISS Specification: "The Power + * Management Control/Status Register (CSR) controls the power + * state of the device. The normal operating state is D0, + * CSR=00h. The software off state is D3, CSR=03h. To reset + * the controller, place the interface device in D3 then to + * D0, this causes a secondary PCI reset which will reset the + * controller." */ + + /* enter the D3hot power management state */ + pci_read_config_word(pdev, pos + PCI_PM_CTRL, &pmcsr); + pmcsr &= ~PCI_PM_CTRL_STATE_MASK; + pmcsr |= PCI_D3hot; + pci_write_config_word(pdev, pos + PCI_PM_CTRL, pmcsr); + + schedule_timeout_uninterruptible(HZ >> 1); + + /* enter the D0 power management state */ + pmcsr &= ~PCI_PM_CTRL_STATE_MASK; + pmcsr |= PCI_D0; + pci_write_config_word(pdev, pos + PCI_PM_CTRL, pmcsr); + + schedule_timeout_uninterruptible(HZ >> 1); + + /* Restore the PCI configuration space. The Open CISS + * Specification says, "Restore the PCI Configuration + * Registers, offsets 00h through 60h. It is important to + * restore the command register, 16-bits at offset 04h, + * last. Do not restore the configuration status register, + * 16-bits at offset 06h." Note that the offset is 2*i. */ + for (i = 0; i < 32; i++) { + if (i == 2 || i == 3) + continue; + pci_write_config_word(pdev, 2*i, saved_config_space[i]); + } + wmb(); + pci_write_config_word(pdev, 4, saved_config_space[2]); + + return 0; +} + /* * This is it. Find all the controllers and register them. I really hate * stealing all these major device numbers. @@ -3404,6 +3601,24 @@ static int __devinit cciss_init_one(struct pci_dev *pdev, int dac, return_code; InquiryData_struct *inq_buff = NULL; + if (reset_devices) { + /* Reset the controller with a PCI power-cycle */ + if (cciss_hard_reset_controller(pdev) || cciss_reset_msi(pdev)) + return -ENODEV; + + /* Some devices (notably the HP Smart Array 5i Controller) + need a little pause here */ + schedule_timeout_uninterruptible(30*HZ); + + /* Now try to get the controller to respond to a no-op */ + for (i=0; i<12; i++) { + if (cciss_noop(pdev) == 0) + break; + else + printk("cciss: no-op failed%s\n", (i < 11 ? "; re-trying" : "")); + } + } + i = alloc_cciss_hba(); if (i < 0) return -1; -- cgit v1.2.3 From 78f707bfc723552e8309b7c38a8d0cc51012e813 Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Tue, 17 Feb 2009 13:59:08 +0100 Subject: block: revert part of 18ce3751ccd488c78d3827e9f6bf54e6322676fb The above commit added WRITE_SYNC and switched various places to using that for committing writes that will be waited upon immediately after submission. However, this causes a performance regression with AS and CFQ for ext3 at least, since sync_dirty_buffer() will submit some writes with WRITE_SYNC while ext3 has sumitted others dependent writes without the sync flag set. This causes excessive anticipation/idling in the IO scheduler because sync and async writes get interleaved, causing a big performance regression for the below test case (which is meant to simulate sqlite like behaviour). ---- test case ---- int main(int argc, char **argv) { int fdes, i; FILE *fp; struct timeval start; struct timeval end; struct timeval res; gettimeofday(&start, NULL); for (i=0; i --- fs/buffer.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/fs/buffer.c b/fs/buffer.c index 665d446b25b..62b57e330b6 100644 --- a/fs/buffer.c +++ b/fs/buffer.c @@ -3108,7 +3108,7 @@ int sync_dirty_buffer(struct buffer_head *bh) if (test_clear_buffer_dirty(bh)) { get_bh(bh); bh->b_end_io = end_buffer_write_sync; - ret = submit_bh(WRITE_SYNC, bh); + ret = submit_bh(WRITE, bh); wait_on_buffer(bh); if (buffer_eopnotsupp(bh)) { clear_buffer_eopnotsupp(bh); -- cgit v1.2.3 From 41b8c853a495438208faa5be03bbb0050859163b Mon Sep 17 00:00:00 2001 From: Neil Brown Date: Wed, 18 Feb 2009 10:33:59 +0100 Subject: block: fix booting from partitioned md array Hi Tejun, it looks like your commit: block: don't depend on consecutive minor space f331c0296f2a9fee0d396a70598b954062603015 broke a particular case for booting from partitioned md/raid devices. That is the second time this has been broken recently. The previous time was fixed by block: do_mounts - accept root= 30f2f0eb4bd2c43d10a8b0d872c6e5ad8f31c9a0 Because the data isn't available when an md device is first created (we add disks and set it up after creation), the initial partition scan finds nothing. It is not until the device is opened that another partition scan happens and finds something. So at the point where the kernel parameter "root=/dev/md_d0p1" is being parsed, md_d0 exists, but md_d0p1 does not. However if we let blk_lookup_devt return the correct device number even though the device doesn't exist, then the attempt to mount it will successfully find the partition. I have tried in the past to find a way to get the partition table to be read as soon as the array is assembled but that proved impossible (at the time). I don't remember the details, and could possibly revisit it. However it would be really nice if blk_lookup_devt could be adjusted to again accept non existant partitions. Signed-off-by: Jens Axboe --- block/genhd.c | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/block/genhd.c b/block/genhd.c index 397960cf26a..e1eadcc9546 100644 --- a/block/genhd.c +++ b/block/genhd.c @@ -1087,6 +1087,14 @@ dev_t blk_lookup_devt(const char *name, int partno) if (strcmp(dev_name(dev), name)) continue; + if (partno < disk->minors) { + /* We need to return the right devno, even + * if the partition doesn't exist yet. + */ + devt = MKDEV(MAJOR(dev->devt), + MINOR(dev->devt) + partno); + break; + } part = disk_get_part(disk, partno); if (part) { devt = part_devt(part); -- cgit v1.2.3 From be987fdb55a4726e2fcbab7501f89276bdb57288 Mon Sep 17 00:00:00 2001 From: Hannes Reinecke Date: Wed, 18 Feb 2009 10:30:15 +0100 Subject: block: fix deadlock in blk_abort_queue() for drivers that readd to timeout list blk_abort_queue() iterates the timeout list and aborts each request on the list, but if the driver error handling readds a request to the timeout list during this processing, we could be looping forever. Fix this by splicing current entries to a local list and run over that list instead. Signed-off-by: Jens Axboe --- block/blk-timeout.c | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/block/blk-timeout.c b/block/blk-timeout.c index a09535377a9..bbbdc4b8ccf 100644 --- a/block/blk-timeout.c +++ b/block/blk-timeout.c @@ -209,12 +209,19 @@ void blk_abort_queue(struct request_queue *q) { unsigned long flags; struct request *rq, *tmp; + LIST_HEAD(list); spin_lock_irqsave(q->queue_lock, flags); elv_abort_queue(q); - list_for_each_entry_safe(rq, tmp, &q->timeout_list, timeout_list) + /* + * Splice entries to local list, to avoid deadlocking if entries + * get readded to the timeout list by error handling + */ + list_splice_init(&q->timeout_list, &list); + + list_for_each_entry_safe(rq, tmp, &list, timeout_list) blk_abort_request(rq); spin_unlock_irqrestore(q->queue_lock, flags); -- cgit v1.2.3