diff options
Diffstat (limited to 'drivers/block')
-rw-r--r-- | drivers/block/as-iosched.c | 10 | ||||
-rw-r--r-- | drivers/block/cciss.c | 49 | ||||
-rw-r--r-- | drivers/block/cciss.h | 4 | ||||
-rw-r--r-- | drivers/block/cfq-iosched.c | 1 | ||||
-rw-r--r-- | drivers/block/ll_rw_blk.c | 18 | ||||
-rw-r--r-- | drivers/block/sx8.c | 4 | ||||
-rw-r--r-- | drivers/block/ub.c | 211 |
7 files changed, 217 insertions, 80 deletions
diff --git a/drivers/block/as-iosched.c b/drivers/block/as-iosched.c index 91aeb678135..95c0a3690b0 100644 --- a/drivers/block/as-iosched.c +++ b/drivers/block/as-iosched.c @@ -1935,23 +1935,15 @@ struct as_fs_entry { static ssize_t as_var_show(unsigned int var, char *page) { - var = (var * 1000) / HZ; return sprintf(page, "%d\n", var); } static ssize_t as_var_store(unsigned long *var, const char *page, size_t count) { - unsigned long tmp; char *p = (char *) page; - tmp = simple_strtoul(p, &p, 10); - if (tmp != 0) { - tmp = (tmp * HZ) / 1000; - if (tmp == 0) - tmp = 1; - } - *var = tmp; + *var = simple_strtoul(p, &p, 10); return count; } diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c index 3e9fb6e4a52..418b1469d75 100644 --- a/drivers/block/cciss.c +++ b/drivers/block/cciss.c @@ -1135,7 +1135,7 @@ static int revalidate_allvol(ctlr_info_t *host) /* this is for the online array utilities */ if (!drv->heads && i) continue; - blk_queue_hardsect_size(host->queue, drv->block_size); + blk_queue_hardsect_size(drv->queue, drv->block_size); set_capacity(disk, drv->nr_blocks); add_disk(disk); } @@ -1691,7 +1691,7 @@ static int cciss_revalidate(struct gendisk *disk) cciss_read_capacity(h->ctlr, logvol, size_buff, 1, &total_size, &block_size); cciss_geometry_inquiry(h->ctlr, logvol, 1, total_size, block_size, inq_buff, drv); - blk_queue_hardsect_size(h->queue, drv->block_size); + blk_queue_hardsect_size(drv->queue, drv->block_size); set_capacity(disk, drv->nr_blocks); kfree(size_buff); @@ -2248,12 +2248,12 @@ static irqreturn_t do_cciss_intr(int irq, void *dev_id, struct pt_regs *regs) * them up. We will also keep track of the next queue to run so * that every queue gets a chance to be started first. */ - for (j=0; j < NWD; j++){ - int curr_queue = (start_queue + j) % NWD; + for (j=0; j < h->highest_lun + 1; j++){ + int curr_queue = (start_queue + j) % (h->highest_lun + 1); /* make sure the disk has been added and the drive is real * because this can be called from the middle of init_one. */ - if(!(h->gendisk[curr_queue]->queue) || + if(!(h->drv[curr_queue].queue) || !(h->drv[curr_queue].heads)) continue; blk_start_queue(h->gendisk[curr_queue]->queue); @@ -2264,14 +2264,14 @@ static irqreturn_t do_cciss_intr(int irq, void *dev_id, struct pt_regs *regs) if ((find_first_zero_bit(h->cmd_pool_bits, NR_CMDS)) == NR_CMDS) { if (curr_queue == start_queue){ - h->next_to_run = (start_queue + 1) % NWD; + h->next_to_run = (start_queue + 1) % (h->highest_lun + 1); goto cleanup; } else { h->next_to_run = curr_queue; goto cleanup; } } else { - curr_queue = (curr_queue + 1) % NWD; + curr_queue = (curr_queue + 1) % (h->highest_lun + 1); } } @@ -2279,7 +2279,6 @@ cleanup: spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags); return IRQ_HANDLED; } - /* * We cannot read the structure directly, for portablity we must use * the io functions. @@ -2789,13 +2788,6 @@ static int __devinit cciss_init_one(struct pci_dev *pdev, } spin_lock_init(&hba[i]->lock); - q = blk_init_queue(do_cciss_request, &hba[i]->lock); - if (!q) - goto clean4; - - q->backing_dev_info.ra_pages = READ_AHEAD; - hba[i]->queue = q; - q->queuedata = hba[i]; /* Initialize the pdev driver private data. have it point to hba[i]. */ @@ -2817,6 +2809,20 @@ static int __devinit cciss_init_one(struct pci_dev *pdev, cciss_procinit(i); + for(j=0; j < NWD; j++) { /* mfm */ + drive_info_struct *drv = &(hba[i]->drv[j]); + struct gendisk *disk = hba[i]->gendisk[j]; + + q = blk_init_queue(do_cciss_request, &hba[i]->lock); + if (!q) { + printk(KERN_ERR + "cciss: unable to allocate queue for disk %d\n", + j); + break; + } + drv->queue = q; + + q->backing_dev_info.ra_pages = READ_AHEAD; blk_queue_bounce_limit(q, hba[i]->pdev->dma_mask); /* This is a hardware imposed limit. */ @@ -2827,26 +2833,23 @@ static int __devinit cciss_init_one(struct pci_dev *pdev, blk_queue_max_sectors(q, 512); - - for(j=0; j<NWD; j++) { - drive_info_struct *drv = &(hba[i]->drv[j]); - struct gendisk *disk = hba[i]->gendisk[j]; - + q->queuedata = hba[i]; sprintf(disk->disk_name, "cciss/c%dd%d", i, j); sprintf(disk->devfs_name, "cciss/host%d/target%d", i, j); disk->major = hba[i]->major; disk->first_minor = j << NWD_SHIFT; disk->fops = &cciss_fops; - disk->queue = hba[i]->queue; + disk->queue = q; disk->private_data = drv; /* we must register the controller even if no disks exist */ /* this is for the online array utilities */ if(!drv->heads && j) continue; - blk_queue_hardsect_size(hba[i]->queue, drv->block_size); + blk_queue_hardsect_size(q, drv->block_size); set_capacity(disk, drv->nr_blocks); add_disk(disk); } + return(1); clean4: @@ -2912,10 +2915,10 @@ static void __devexit cciss_remove_one (struct pci_dev *pdev) for (j = 0; j < NWD; j++) { struct gendisk *disk = hba[i]->gendisk[j]; if (disk->flags & GENHD_FL_UP) + blk_cleanup_queue(disk->queue); del_gendisk(disk); } - blk_cleanup_queue(hba[i]->queue); pci_free_consistent(hba[i]->pdev, NR_CMDS * sizeof(CommandList_struct), hba[i]->cmd_pool, hba[i]->cmd_pool_dhandle); pci_free_consistent(hba[i]->pdev, NR_CMDS * sizeof( ErrorInfo_struct), diff --git a/drivers/block/cciss.h b/drivers/block/cciss.h index 8fb19206edd..566587d0a50 100644 --- a/drivers/block/cciss.h +++ b/drivers/block/cciss.h @@ -29,6 +29,7 @@ typedef struct _drive_info_struct { __u32 LunID; int usage_count; + struct request_queue *queue; sector_t nr_blocks; int block_size; int heads; @@ -72,7 +73,6 @@ struct ctlr_info unsigned int maxQsinceinit; unsigned int maxSG; spinlock_t lock; - struct request_queue *queue; //* pointers to command and error info pool */ CommandList_struct *cmd_pool; @@ -260,7 +260,7 @@ struct board_type { struct access_method *access; }; -#define CCISS_LOCK(i) (hba[i]->queue->queue_lock) +#define CCISS_LOCK(i) (&hba[i]->lock) #endif /* CCISS_H */ diff --git a/drivers/block/cfq-iosched.c b/drivers/block/cfq-iosched.c index de5746e38af..2435a7c99b2 100644 --- a/drivers/block/cfq-iosched.c +++ b/drivers/block/cfq-iosched.c @@ -1281,6 +1281,7 @@ dispatch: */ if (!cfq_crq_in_driver(crq) && !cfq_cfqq_idle_window(cfqq) && + !blk_barrier_rq(rq) && cfqd->rq_in_driver >= cfqd->cfq_max_depth) return NULL; diff --git a/drivers/block/ll_rw_blk.c b/drivers/block/ll_rw_blk.c index 692a5fced76..3c818544475 100644 --- a/drivers/block/ll_rw_blk.c +++ b/drivers/block/ll_rw_blk.c @@ -719,7 +719,7 @@ struct request *blk_queue_find_tag(request_queue_t *q, int tag) { struct blk_queue_tag *bqt = q->queue_tags; - if (unlikely(bqt == NULL || tag >= bqt->max_depth)) + if (unlikely(bqt == NULL || tag >= bqt->real_max_depth)) return NULL; return bqt->tag_index[tag]; @@ -798,6 +798,7 @@ init_tag_map(request_queue_t *q, struct blk_queue_tag *tags, int depth) memset(tag_index, 0, depth * sizeof(struct request *)); memset(tag_map, 0, nr_ulongs * sizeof(unsigned long)); + tags->real_max_depth = depth; tags->max_depth = depth; tags->tag_index = tag_index; tags->tag_map = tag_map; @@ -872,11 +873,22 @@ int blk_queue_resize_tags(request_queue_t *q, int new_depth) return -ENXIO; /* + * if we already have large enough real_max_depth. just + * adjust max_depth. *NOTE* as requests with tag value + * between new_depth and real_max_depth can be in-flight, tag + * map can not be shrunk blindly here. + */ + if (new_depth <= bqt->real_max_depth) { + bqt->max_depth = new_depth; + return 0; + } + + /* * save the old state info, so we can copy it back */ tag_index = bqt->tag_index; tag_map = bqt->tag_map; - max_depth = bqt->max_depth; + max_depth = bqt->real_max_depth; if (init_tag_map(q, bqt, new_depth)) return -ENOMEM; @@ -913,7 +925,7 @@ void blk_queue_end_tag(request_queue_t *q, struct request *rq) BUG_ON(tag == -1); - if (unlikely(tag >= bqt->max_depth)) + if (unlikely(tag >= bqt->real_max_depth)) /* * This can happen after tag depth has been reduced. * FIXME: how about a warning or info message here? diff --git a/drivers/block/sx8.c b/drivers/block/sx8.c index 9db0a9e3e59..d57007b92f7 100644 --- a/drivers/block/sx8.c +++ b/drivers/block/sx8.c @@ -1582,7 +1582,7 @@ static int carm_init_one (struct pci_dev *pdev, const struct pci_device_id *ent) if (rc) goto err_out; -#if IF_64BIT_DMA_IS_POSSIBLE /* grrrr... */ +#ifdef IF_64BIT_DMA_IS_POSSIBLE /* grrrr... */ rc = pci_set_dma_mask(pdev, DMA_64BIT_MASK); if (!rc) { rc = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK); @@ -1601,7 +1601,7 @@ static int carm_init_one (struct pci_dev *pdev, const struct pci_device_id *ent) goto err_out_regions; } pci_dac = 0; -#if IF_64BIT_DMA_IS_POSSIBLE /* grrrr... */ +#ifdef IF_64BIT_DMA_IS_POSSIBLE /* grrrr... */ } #endif diff --git a/drivers/block/ub.c b/drivers/block/ub.c index 685f061e69b..a026567f5d1 100644 --- a/drivers/block/ub.c +++ b/drivers/block/ub.c @@ -23,6 +23,7 @@ * -- Exterminate P3 printks * -- Resove XXX's * -- Redo "benh's retries", perhaps have spin-up code to handle them. V:D=? + * -- CLEAR, CLR2STS, CLRRS seem to be ripe for refactoring. */ #include <linux/kernel.h> #include <linux/module.h> @@ -38,6 +39,73 @@ #define UB_MAJOR 180 /* + * The command state machine is the key model for understanding of this driver. + * + * The general rule is that all transitions are done towards the bottom + * of the diagram, thus preventing any loops. + * + * An exception to that is how the STAT state is handled. A counter allows it + * to be re-entered along the path marked with [C]. + * + * +--------+ + * ! INIT ! + * +--------+ + * ! + * ub_scsi_cmd_start fails ->--------------------------------------\ + * ! ! + * V ! + * +--------+ ! + * ! CMD ! ! + * +--------+ ! + * ! +--------+ ! + * was -EPIPE -->-------------------------------->! CLEAR ! ! + * ! +--------+ ! + * ! ! ! + * was error -->------------------------------------- ! --------->\ + * ! ! ! + * /--<-- cmd->dir == NONE ? ! ! + * ! ! ! ! + * ! V ! ! + * ! +--------+ ! ! + * ! ! DATA ! ! ! + * ! +--------+ ! ! + * ! ! +---------+ ! ! + * ! was -EPIPE -->--------------->! CLR2STS ! ! ! + * ! ! +---------+ ! ! + * ! ! ! ! ! + * ! ! was error -->---- ! --------->\ + * ! was error -->--------------------- ! ------------- ! --------->\ + * ! ! ! ! ! + * ! V ! ! ! + * \--->+--------+ ! ! ! + * ! STAT !<--------------------------/ ! ! + * /--->+--------+ ! ! + * ! ! ! ! + * [C] was -EPIPE -->-----------\ ! ! + * ! ! ! ! ! + * +<---- len == 0 ! ! ! + * ! ! ! ! ! + * ! was error -->--------------------------------------!---------->\ + * ! ! ! ! ! + * +<---- bad CSW ! ! ! + * +<---- bad tag ! ! ! + * ! ! V ! ! + * ! ! +--------+ ! ! + * ! ! ! CLRRS ! ! ! + * ! ! +--------+ ! ! + * ! ! ! ! ! + * \------- ! --------------------[C]--------\ ! ! + * ! ! ! ! + * cmd->error---\ +--------+ ! ! + * ! +--------------->! SENSE !<----------/ ! + * STAT_FAIL----/ +--------+ ! + * ! ! V + * ! V +--------+ + * \--------------------------------\--------------------->! DONE ! + * +--------+ + */ + +/* * Definitions which have to be scattered once we understand the layout better. */ @@ -91,8 +159,6 @@ struct bulk_cs_wrap { #define US_BULK_CS_WRAP_LEN 13 #define US_BULK_CS_SIGN 0x53425355 /* spells out 'USBS' */ -/* This is for Olympus Camedia digital cameras */ -#define US_BULK_CS_OLYMPUS_SIGN 0x55425355 /* spells out 'USBU' */ #define US_BULK_STAT_OK 0 #define US_BULK_STAT_FAIL 1 #define US_BULK_STAT_PHASE 2 @@ -135,6 +201,7 @@ enum ub_scsi_cmd_state { UB_CMDST_CLR2STS, /* Clearing before requesting status */ UB_CMDST_STAT, /* Status phase */ UB_CMDST_CLEAR, /* Clearing a stall (halt, actually) */ + UB_CMDST_CLRRS, /* Clearing before retrying status */ UB_CMDST_SENSE, /* Sending Request Sense */ UB_CMDST_DONE /* Final state */ }; @@ -146,6 +213,7 @@ static char *ub_scsi_cmd_stname[] = { "c2s", "sts", "clr", + "crs", "Sen", "fin" }; @@ -316,6 +384,7 @@ struct ub_dev { struct urb work_urb; struct timer_list work_timer; int last_pipe; /* What might need clearing */ + __le32 signature; /* Learned signature */ struct bulk_cb_wrap work_bcb; struct bulk_cs_wrap work_bcs; struct usb_ctrlrequest work_cr; @@ -339,8 +408,9 @@ static void ub_scsi_action(unsigned long _dev); static void ub_scsi_dispatch(struct ub_dev *sc); static void ub_scsi_urb_compl(struct ub_dev *sc, struct ub_scsi_cmd *cmd); static void ub_state_done(struct ub_dev *sc, struct ub_scsi_cmd *cmd, int rc); -static void __ub_state_stat(struct ub_dev *sc, struct ub_scsi_cmd *cmd); +static int __ub_state_stat(struct ub_dev *sc, struct ub_scsi_cmd *cmd); static void ub_state_stat(struct ub_dev *sc, struct ub_scsi_cmd *cmd); +static void ub_state_stat_counted(struct ub_dev *sc, struct ub_scsi_cmd *cmd); static void ub_state_sense(struct ub_dev *sc, struct ub_scsi_cmd *cmd); static int ub_submit_clear_stall(struct ub_dev *sc, struct ub_scsi_cmd *cmd, int stalled_pipe); @@ -1085,6 +1155,28 @@ static void ub_scsi_urb_compl(struct ub_dev *sc, struct ub_scsi_cmd *cmd) ub_state_stat(sc, cmd); + } else if (cmd->state == UB_CMDST_CLRRS) { + if (urb->status == -EPIPE) { + /* + * STALL while clearning STALL. + * The control pipe clears itself - nothing to do. + * XXX Might try to reset the device here and retry. + */ + printk(KERN_NOTICE "%s: stall on control pipe\n", + sc->name); + goto Bad_End; + } + + /* + * We ignore the result for the halt clear. + */ + + /* reset the endpoint toggle */ + usb_settoggle(sc->dev, usb_pipeendpoint(sc->last_pipe), + usb_pipeout(sc->last_pipe), 0); + + ub_state_stat_counted(sc, cmd); + } else if (cmd->state == UB_CMDST_CMD) { if (urb->status == -EPIPE) { rc = ub_submit_clear_stall(sc, cmd, sc->last_pipe); @@ -1190,52 +1282,57 @@ static void ub_scsi_urb_compl(struct ub_dev *sc, struct ub_scsi_cmd *cmd) */ goto Bad_End; } - cmd->state = UB_CMDST_CLEAR; + + /* + * Having a stall when getting CSW is an error, so + * make sure uppper levels are not oblivious to it. + */ + cmd->error = -EIO; /* A cheap trick... */ + + cmd->state = UB_CMDST_CLRRS; ub_cmdtr_state(sc, cmd); return; } + if (urb->status == -EOVERFLOW) { + /* + * XXX We are screwed here. Retrying is pointless, + * because the pipelined data will not get in until + * we read with a big enough buffer. We must reset XXX. + */ + goto Bad_End; + } if (urb->status != 0) goto Bad_End; if (urb->actual_length == 0) { - /* - * Some broken devices add unnecessary zero-length - * packets to the end of their data transfers. - * Such packets show up as 0-length CSWs. If we - * encounter such a thing, try to read the CSW again. - */ - if (++cmd->stat_count >= 4) { - printk(KERN_NOTICE "%s: unable to get CSW\n", - sc->name); - goto Bad_End; - } - __ub_state_stat(sc, cmd); + ub_state_stat_counted(sc, cmd); return; } /* * Check the returned Bulk protocol status. + * The status block has to be validated first. */ bcs = &sc->work_bcs; - rc = le32_to_cpu(bcs->Residue); - if (rc != cmd->len - cmd->act_len) { + + if (sc->signature == cpu_to_le32(0)) { /* - * It is all right to transfer less, the caller has - * to check. But it's not all right if the device - * counts disagree with our counts. + * This is the first reply, so do not perform the check. + * Instead, remember the signature the device uses + * for future checks. But do not allow a nul. */ - /* P3 */ printk("%s: resid %d len %d act %d\n", - sc->name, rc, cmd->len, cmd->act_len); - goto Bad_End; - } - -#if 0 - if (bcs->Signature != cpu_to_le32(US_BULK_CS_SIGN) && - bcs->Signature != cpu_to_le32(US_BULK_CS_OLYMPUS_SIGN)) { - /* Windows ignores signatures, so do we. */ + sc->signature = bcs->Signature; + if (sc->signature == cpu_to_le32(0)) { + ub_state_stat_counted(sc, cmd); + return; + } + } else { + if (bcs->Signature != sc->signature) { + ub_state_stat_counted(sc, cmd); + return; + } } -#endif if (bcs->Tag != cmd->tag) { /* @@ -1245,16 +1342,22 @@ static void ub_scsi_urb_compl(struct ub_dev *sc, struct ub_scsi_cmd *cmd) * commands and reply at commands we timed out before. * Without flushing these replies we loop forever. */ - if (++cmd->stat_count >= 4) { - printk(KERN_NOTICE "%s: " - "tag mismatch orig 0x%x reply 0x%x\n", - sc->name, cmd->tag, bcs->Tag); - goto Bad_End; - } - __ub_state_stat(sc, cmd); + ub_state_stat_counted(sc, cmd); return; } + rc = le32_to_cpu(bcs->Residue); + if (rc != cmd->len - cmd->act_len) { + /* + * It is all right to transfer less, the caller has + * to check. But it's not all right if the device + * counts disagree with our counts. + */ + /* P3 */ printk("%s: resid %d len %d act %d\n", + sc->name, rc, cmd->len, cmd->act_len); + goto Bad_End; + } + switch (bcs->Status) { case US_BULK_STAT_OK: break; @@ -1272,6 +1375,10 @@ static void ub_scsi_urb_compl(struct ub_dev *sc, struct ub_scsi_cmd *cmd) } /* Not zeroing error to preserve a babble indicator */ + if (cmd->error != 0) { + ub_state_sense(sc, cmd); + return; + } cmd->state = UB_CMDST_DONE; ub_cmdtr_state(sc, cmd); ub_cmdq_pop(sc); @@ -1310,7 +1417,7 @@ static void ub_state_done(struct ub_dev *sc, struct ub_scsi_cmd *cmd, int rc) * Factorization helper for the command state machine: * Submit a CSW read. */ -static void __ub_state_stat(struct ub_dev *sc, struct ub_scsi_cmd *cmd) +static int __ub_state_stat(struct ub_dev *sc, struct ub_scsi_cmd *cmd) { int rc; @@ -1328,11 +1435,12 @@ static void __ub_state_stat(struct ub_dev *sc, struct ub_scsi_cmd *cmd) /* XXX Clear stalls */ ub_complete(&sc->work_done); ub_state_done(sc, cmd, rc); - return; + return -1; } sc->work_timer.expires = jiffies + UB_STAT_TIMEOUT; add_timer(&sc->work_timer); + return 0; } /* @@ -1341,7 +1449,9 @@ static void __ub_state_stat(struct ub_dev *sc, struct ub_scsi_cmd *cmd) */ static void ub_state_stat(struct ub_dev *sc, struct ub_scsi_cmd *cmd) { - __ub_state_stat(sc, cmd); + + if (__ub_state_stat(sc, cmd) != 0) + return; cmd->stat_count = 0; cmd->state = UB_CMDST_STAT; @@ -1350,6 +1460,25 @@ static void ub_state_stat(struct ub_dev *sc, struct ub_scsi_cmd *cmd) /* * Factorization helper for the command state machine: + * Submit a CSW read and go to STAT state with counter (along [C] path). + */ +static void ub_state_stat_counted(struct ub_dev *sc, struct ub_scsi_cmd *cmd) +{ + + if (++cmd->stat_count >= 4) { + ub_state_sense(sc, cmd); + return; + } + + if (__ub_state_stat(sc, cmd) != 0) + return; + + cmd->state = UB_CMDST_STAT; + ub_cmdtr_state(sc, cmd); +} + +/* + * Factorization helper for the command state machine: * Submit a REQUEST SENSE and go to SENSE state. */ static void ub_state_sense(struct ub_dev *sc, struct ub_scsi_cmd *cmd) |