diff options
author | David Woodhouse <dwmw2@infradead.org> | 2006-05-24 09:22:21 +0100 |
---|---|---|
committer | David Woodhouse <dwmw2@infradead.org> | 2006-05-24 09:22:21 +0100 |
commit | 66643de455c27973ac31ad6de9f859d399916842 (patch) | |
tree | 7ebed7f051879007d4b11d6aaa9e65a1bcb0b08f /drivers/scsi | |
parent | 2c23d62abb820e19c54012520f08a198c2233a85 (diff) | |
parent | 387e2b0439026aa738a9edca15a57e5c0bcb4dfc (diff) |
Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux-2.6
Conflicts:
include/asm-powerpc/unistd.h
include/asm-sparc/unistd.h
include/asm-sparc64/unistd.h
Signed-off-by: David Woodhouse <dwmw2@infradead.org>
Diffstat (limited to 'drivers/scsi')
24 files changed, 386 insertions, 299 deletions
diff --git a/drivers/scsi/aic7xxx/aic7xxx_osm_pci.c b/drivers/scsi/aic7xxx/aic7xxx_osm_pci.c index cb30d9c1153..0c9c2f400bf 100644 --- a/drivers/scsi/aic7xxx/aic7xxx_osm_pci.c +++ b/drivers/scsi/aic7xxx/aic7xxx_osm_pci.c @@ -219,6 +219,7 @@ ahc_linux_pci_dev_probe(struct pci_dev *pdev, const struct pci_device_id *ent) ahc->flags |= AHC_39BIT_ADDRESSING; } else { if (dma_set_mask(dev, DMA_32BIT_MASK)) { + ahc_free(ahc); printk(KERN_WARNING "aic7xxx: No suitable DMA available.\n"); return (-ENODEV); } diff --git a/drivers/scsi/aic7xxx/aic7xxx_pci.c b/drivers/scsi/aic7xxx/aic7xxx_pci.c index 5f586140e05..3adecef2178 100644 --- a/drivers/scsi/aic7xxx/aic7xxx_pci.c +++ b/drivers/scsi/aic7xxx/aic7xxx_pci.c @@ -2036,12 +2036,12 @@ ahc_pci_resume(struct ahc_softc *ahc) * that the OS doesn't know about and rely on our chip * reset handler to handle the rest. */ - ahc_pci_write_config(ahc->dev_softc, DEVCONFIG, /*bytes*/4, - ahc->bus_softc.pci_softc.devconfig); - ahc_pci_write_config(ahc->dev_softc, PCIR_COMMAND, /*bytes*/1, - ahc->bus_softc.pci_softc.command); - ahc_pci_write_config(ahc->dev_softc, CSIZE_LATTIME, /*bytes*/1, - ahc->bus_softc.pci_softc.csize_lattime); + ahc_pci_write_config(ahc->dev_softc, DEVCONFIG, + ahc->bus_softc.pci_softc.devconfig, /*bytes*/4); + ahc_pci_write_config(ahc->dev_softc, PCIR_COMMAND, + ahc->bus_softc.pci_softc.command, /*bytes*/1); + ahc_pci_write_config(ahc->dev_softc, CSIZE_LATTIME, + ahc->bus_softc.pci_softc.csize_lattime, /*bytes*/1); if ((ahc->flags & AHC_HAS_TERM_LOGIC) != 0) { struct seeprom_descriptor sd; u_int sxfrctl1; diff --git a/drivers/scsi/ibmvscsi/ibmvscsi.c b/drivers/scsi/ibmvscsi/ibmvscsi.c index 0a8ad37ae89..2e9be83a697 100644 --- a/drivers/scsi/ibmvscsi/ibmvscsi.c +++ b/drivers/scsi/ibmvscsi/ibmvscsi.c @@ -739,7 +739,8 @@ static void send_mad_adapter_info(struct ibmvscsi_host_data *hostdata) { struct viosrp_adapter_info *req; struct srp_event_struct *evt_struct; - + dma_addr_t addr; + evt_struct = get_event_struct(&hostdata->pool); if (!evt_struct) { printk(KERN_ERR "ibmvscsi: couldn't allocate an event " @@ -757,10 +758,10 @@ static void send_mad_adapter_info(struct ibmvscsi_host_data *hostdata) req->common.type = VIOSRP_ADAPTER_INFO_TYPE; req->common.length = sizeof(hostdata->madapter_info); - req->buffer = dma_map_single(hostdata->dev, - &hostdata->madapter_info, - sizeof(hostdata->madapter_info), - DMA_BIDIRECTIONAL); + req->buffer = addr = dma_map_single(hostdata->dev, + &hostdata->madapter_info, + sizeof(hostdata->madapter_info), + DMA_BIDIRECTIONAL); if (dma_mapping_error(req->buffer)) { printk(KERN_ERR @@ -770,8 +771,13 @@ static void send_mad_adapter_info(struct ibmvscsi_host_data *hostdata) return; } - if (ibmvscsi_send_srp_event(evt_struct, hostdata)) + if (ibmvscsi_send_srp_event(evt_struct, hostdata)) { printk(KERN_ERR "ibmvscsi: couldn't send ADAPTER_INFO_REQ!\n"); + dma_unmap_single(hostdata->dev, + addr, + sizeof(hostdata->madapter_info), + DMA_BIDIRECTIONAL); + } }; /** @@ -1259,6 +1265,7 @@ static int ibmvscsi_do_host_config(struct ibmvscsi_host_data *hostdata, { struct viosrp_host_config *host_config; struct srp_event_struct *evt_struct; + dma_addr_t addr; int rc; evt_struct = get_event_struct(&hostdata->pool); @@ -1279,8 +1286,9 @@ static int ibmvscsi_do_host_config(struct ibmvscsi_host_data *hostdata, memset(host_config, 0x00, sizeof(*host_config)); host_config->common.type = VIOSRP_HOST_CONFIG_TYPE; host_config->common.length = length; - host_config->buffer = dma_map_single(hostdata->dev, buffer, length, - DMA_BIDIRECTIONAL); + host_config->buffer = addr = dma_map_single(hostdata->dev, buffer, + length, + DMA_BIDIRECTIONAL); if (dma_mapping_error(host_config->buffer)) { printk(KERN_ERR @@ -1291,11 +1299,9 @@ static int ibmvscsi_do_host_config(struct ibmvscsi_host_data *hostdata, init_completion(&evt_struct->comp); rc = ibmvscsi_send_srp_event(evt_struct, hostdata); - if (rc == 0) { + if (rc == 0) wait_for_completion(&evt_struct->comp); - dma_unmap_single(hostdata->dev, host_config->buffer, - length, DMA_BIDIRECTIONAL); - } + dma_unmap_single(hostdata->dev, addr, length, DMA_BIDIRECTIONAL); return rc; } diff --git a/drivers/scsi/libata-core.c b/drivers/scsi/libata-core.c index bd147207f25..823dfa78c0b 100644 --- a/drivers/scsi/libata-core.c +++ b/drivers/scsi/libata-core.c @@ -864,6 +864,9 @@ static unsigned int ata_id_xfermask(const u16 *id) /** * ata_port_queue_task - Queue port_task * @ap: The ata_port to queue port_task for + * @fn: workqueue function to be scheduled + * @data: data value to pass to workqueue function + * @delay: delay time for workqueue function * * Schedule @fn(@data) for execution after @delay jiffies using * port_task. There is one port_task per port and it's the @@ -2739,6 +2742,8 @@ static unsigned int ata_dev_set_xfermode(struct ata_port *ap, * ata_dev_init_params - Issue INIT DEV PARAMS command * @ap: Port associated with device @dev * @dev: Device to which command will be sent + * @heads: Number of heads (taskfile parameter) + * @sectors: Number of sectors (taskfile parameter) * * LOCKING: * Kernel thread context (may sleep) @@ -4302,6 +4307,7 @@ int ata_device_resume(struct ata_port *ap, struct ata_device *dev) * ata_device_suspend - prepare a device for suspend * @ap: port the device is connected to * @dev: the device to suspend + * @state: target power management state * * Flush the cache on the drive, if appropriate, then issue a * standbynow command. diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h index fad607b2e6f..ee22173fce4 100644 --- a/drivers/scsi/lpfc/lpfc_crtn.h +++ b/drivers/scsi/lpfc/lpfc_crtn.h @@ -27,7 +27,6 @@ void lpfc_config_link(struct lpfc_hba *, LPFC_MBOXQ_t *); int lpfc_read_sparam(struct lpfc_hba *, LPFC_MBOXQ_t *); void lpfc_read_config(struct lpfc_hba *, LPFC_MBOXQ_t *); void lpfc_read_lnk_stat(struct lpfc_hba *, LPFC_MBOXQ_t *); -void lpfc_set_slim(struct lpfc_hba *, LPFC_MBOXQ_t *, uint32_t, uint32_t); int lpfc_reg_login(struct lpfc_hba *, uint32_t, uint8_t *, LPFC_MBOXQ_t *, uint32_t); void lpfc_unreg_login(struct lpfc_hba *, uint32_t, LPFC_MBOXQ_t *); diff --git a/drivers/scsi/lpfc/lpfc_disc.h b/drivers/scsi/lpfc/lpfc_disc.h index 8932b1be2b6..41cf5d3ea6c 100644 --- a/drivers/scsi/lpfc/lpfc_disc.h +++ b/drivers/scsi/lpfc/lpfc_disc.h @@ -113,6 +113,7 @@ struct lpfc_nodelist { #define NLP_NPR_ADISC 0x2000000 /* Issue ADISC when dq'ed from NPR list */ #define NLP_DELAY_REMOVE 0x4000000 /* Defer removal till end of DSM */ +#define NLP_NODEV_REMOVE 0x8000000 /* Defer removal till discovery ends */ /* Defines for list searchs */ #define NLP_SEARCH_MAPPED 0x1 /* search mapped */ diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c index 4813beaaca8..283b7d824c3 100644 --- a/drivers/scsi/lpfc/lpfc_els.c +++ b/drivers/scsi/lpfc/lpfc_els.c @@ -302,10 +302,6 @@ lpfc_cmpl_els_flogi_fabric(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp, if (lpfc_reg_login(phba, Fabric_DID, (uint8_t *) sp, mbox, 0)) goto fail_free_mbox; - /* - * set_slim mailbox command needs to execute first, - * queue this command to be processed later. - */ mbox->mbox_cmpl = lpfc_mbx_cmpl_fabric_reg_login; mbox->context2 = ndlp; @@ -781,25 +777,26 @@ lpfc_cmpl_els_plogi(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb, if (disc && phba->num_disc_nodes) { /* Check to see if there are more PLOGIs to be sent */ lpfc_more_plogi(phba); - } - if (phba->num_disc_nodes == 0) { - spin_lock_irq(phba->host->host_lock); - phba->fc_flag &= ~FC_NDISC_ACTIVE; - spin_unlock_irq(phba->host->host_lock); + if (phba->num_disc_nodes == 0) { + spin_lock_irq(phba->host->host_lock); + phba->fc_flag &= ~FC_NDISC_ACTIVE; + spin_unlock_irq(phba->host->host_lock); - lpfc_can_disctmo(phba); - if (phba->fc_flag & FC_RSCN_MODE) { - /* Check to see if more RSCNs came in while we were - * processing this one. - */ - if ((phba->fc_rscn_id_cnt == 0) && - (!(phba->fc_flag & FC_RSCN_DISCOVERY))) { - spin_lock_irq(phba->host->host_lock); - phba->fc_flag &= ~FC_RSCN_MODE; - spin_unlock_irq(phba->host->host_lock); - } else { - lpfc_els_handle_rscn(phba); + lpfc_can_disctmo(phba); + if (phba->fc_flag & FC_RSCN_MODE) { + /* + * Check to see if more RSCNs came in while + * we were processing this one. + */ + if ((phba->fc_rscn_id_cnt == 0) && + (!(phba->fc_flag & FC_RSCN_DISCOVERY))) { + spin_lock_irq(phba->host->host_lock); + phba->fc_flag &= ~FC_RSCN_MODE; + spin_unlock_irq(phba->host->host_lock); + } else { + lpfc_els_handle_rscn(phba); + } } } } @@ -1263,7 +1260,7 @@ lpfc_issue_els_logo(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp, psli = &phba->sli; pring = &psli->ring[LPFC_ELS_RING]; - cmdsize = 2 * (sizeof (uint32_t) + sizeof (struct lpfc_name)); + cmdsize = (2 * sizeof (uint32_t)) + sizeof (struct lpfc_name); elsiocb = lpfc_prep_els_iocb(phba, 1, cmdsize, retry, ndlp, ndlp->nlp_DID, ELS_CMD_LOGO); if (!elsiocb) @@ -1451,22 +1448,23 @@ lpfc_cancel_retry_delay_tmo(struct lpfc_hba *phba, struct lpfc_nodelist * nlp) * PLOGIs to be sent */ lpfc_more_plogi(phba); - } - if (phba->num_disc_nodes == 0) { - phba->fc_flag &= ~FC_NDISC_ACTIVE; - lpfc_can_disctmo(phba); - if (phba->fc_flag & FC_RSCN_MODE) { - /* Check to see if more RSCNs - * came in while we were - * processing this one. - */ - if((phba->fc_rscn_id_cnt==0) && - (!(phba->fc_flag & FC_RSCN_DISCOVERY))) { - phba->fc_flag &= ~FC_RSCN_MODE; - } - else { - lpfc_els_handle_rscn(phba); + if (phba->num_disc_nodes == 0) { + phba->fc_flag &= ~FC_NDISC_ACTIVE; + lpfc_can_disctmo(phba); + if (phba->fc_flag & FC_RSCN_MODE) { + /* + * Check to see if more RSCNs + * came in while we were + * processing this one. + */ + if((phba->fc_rscn_id_cnt==0) && + !(phba->fc_flag & FC_RSCN_DISCOVERY)) { + phba->fc_flag &= ~FC_RSCN_MODE; + } + else { + lpfc_els_handle_rscn(phba); + } } } } @@ -1872,9 +1870,6 @@ lpfc_cmpl_els_acc(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb, if (mbox) { if ((rspiocb->iocb.ulpStatus == 0) && (ndlp->nlp_flag & NLP_ACC_REGLOGIN)) { - /* set_slim mailbox command needs to execute first, - * queue this command to be processed later. - */ lpfc_unreg_rpi(phba, ndlp); mbox->mbox_cmpl = lpfc_mbx_cmpl_reg_login; mbox->context2 = ndlp; @@ -1920,6 +1915,7 @@ lpfc_els_rsp_acc(struct lpfc_hba * phba, uint32_t flag, uint8_t *pcmd; uint16_t cmdsize; int rc; + ELS_PKT *els_pkt_ptr; psli = &phba->sli; pring = &psli->ring[LPFC_ELS_RING]; /* ELS ring */ @@ -1958,6 +1954,23 @@ lpfc_els_rsp_acc(struct lpfc_hba * phba, uint32_t flag, pcmd += sizeof (uint32_t); memcpy(pcmd, &phba->fc_sparam, sizeof (struct serv_parm)); break; + case ELS_CMD_PRLO: + cmdsize = sizeof (uint32_t) + sizeof (PRLO); + elsiocb = lpfc_prep_els_iocb(phba, 0, cmdsize, oldiocb->retry, + ndlp, ndlp->nlp_DID, ELS_CMD_PRLO); + if (!elsiocb) + return 1; + + icmd = &elsiocb->iocb; + icmd->ulpContext = oldcmd->ulpContext; /* Xri */ + pcmd = (((struct lpfc_dmabuf *) elsiocb->context2)->virt); + + memcpy(pcmd, ((struct lpfc_dmabuf *) oldiocb->context2)->virt, + sizeof (uint32_t) + sizeof (PRLO)); + *((uint32_t *) (pcmd)) = ELS_CMD_PRLO_ACC; + els_pkt_ptr = (ELS_PKT *) pcmd; + els_pkt_ptr->un.prlo.acceptRspCode = PRLO_REQ_EXECUTED; + break; default: return 1; } @@ -2498,7 +2511,7 @@ lpfc_els_rcv_rscn(struct lpfc_hba * phba, /* If we are about to begin discovery, just ACC the RSCN. * Discovery processing will satisfy it. */ - if (phba->hba_state < LPFC_NS_QRY) { + if (phba->hba_state <= LPFC_NS_QRY) { lpfc_els_rsp_acc(phba, ELS_CMD_ACC, cmdiocb, ndlp, NULL, newnode); return 0; diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c index 6721e679df6..adb086009ae 100644 --- a/drivers/scsi/lpfc/lpfc_hbadisc.c +++ b/drivers/scsi/lpfc/lpfc_hbadisc.c @@ -311,8 +311,8 @@ lpfc_workq_post_event(struct lpfc_hba * phba, void *arg1, void *arg2, evtp->evt_arg2 = arg2; evtp->evt = evt; - list_add_tail(&evtp->evt_listp, &phba->work_list); spin_lock_irq(phba->host->host_lock); + list_add_tail(&evtp->evt_listp, &phba->work_list); if (phba->work_wait) wake_up(phba->work_wait); spin_unlock_irq(phba->host->host_lock); @@ -1071,10 +1071,6 @@ lpfc_register_remote_port(struct lpfc_hba * phba, /* initialize static port data */ rport->maxframe_size = ndlp->nlp_maxframe; rport->supported_classes = ndlp->nlp_class_sup; - if ((rport->scsi_target_id != -1) && - (rport->scsi_target_id < MAX_FCP_TARGET)) { - ndlp->nlp_sid = rport->scsi_target_id; - } rdata = rport->dd_data; rdata->pnode = ndlp; @@ -1087,6 +1083,10 @@ lpfc_register_remote_port(struct lpfc_hba * phba, if (rport_ids.roles != FC_RPORT_ROLE_UNKNOWN) fc_remote_port_rolechg(rport, rport_ids.roles); + if ((rport->scsi_target_id != -1) && + (rport->scsi_target_id < MAX_FCP_TARGET)) { + ndlp->nlp_sid = rport->scsi_target_id; + } return; } @@ -1238,6 +1238,7 @@ lpfc_nlp_list(struct lpfc_hba * phba, struct lpfc_nodelist * nlp, int list) evt_listp); } + nlp->nlp_flag &= ~NLP_NODEV_REMOVE; nlp->nlp_type |= NLP_FC_NODE; break; case NLP_MAPPED_LIST: @@ -1258,6 +1259,7 @@ lpfc_nlp_list(struct lpfc_hba * phba, struct lpfc_nodelist * nlp, int list) evt_listp); } + nlp->nlp_flag &= ~NLP_NODEV_REMOVE; break; case NLP_NPR_LIST: nlp->nlp_flag |= list; @@ -1402,6 +1404,8 @@ lpfc_check_sli_ndlp(struct lpfc_hba * phba, if (icmd->ulpContext == (volatile ushort)ndlp->nlp_rpi) return 1; case CMD_ELS_REQUEST64_CR: + if (icmd->un.elsreq64.remoteID == ndlp->nlp_DID) + return 1; case CMD_XMIT_ELS_RSP64_CX: if (iocb->context1 == (uint8_t *) ndlp) return 1; @@ -1901,10 +1905,8 @@ lpfc_setup_disc_node(struct lpfc_hba * phba, uint32_t did) */ if (ndlp->nlp_flag & NLP_DELAY_TMO) lpfc_cancel_retry_delay_tmo(phba, ndlp); - } else { - ndlp->nlp_flag &= ~NLP_NPR_2B_DISC; + } else ndlp = NULL; - } } else { flg = ndlp->nlp_flag & NLP_LIST_MASK; if ((flg == NLP_ADISC_LIST) || (flg == NLP_PLOGI_LIST)) diff --git a/drivers/scsi/lpfc/lpfc_hw.h b/drivers/scsi/lpfc/lpfc_hw.h index 54d04188f7c..eedf9880136 100644 --- a/drivers/scsi/lpfc/lpfc_hw.h +++ b/drivers/scsi/lpfc/lpfc_hw.h @@ -449,6 +449,7 @@ struct serv_parm { /* Structure is in Big Endian format */ #define ELS_CMD_RRQ 0x12000000 #define ELS_CMD_PRLI 0x20100014 #define ELS_CMD_PRLO 0x21100014 +#define ELS_CMD_PRLO_ACC 0x02100014 #define ELS_CMD_PDISC 0x50000000 #define ELS_CMD_FDISC 0x51000000 #define ELS_CMD_ADISC 0x52000000 @@ -484,6 +485,7 @@ struct serv_parm { /* Structure is in Big Endian format */ #define ELS_CMD_RRQ 0x12 #define ELS_CMD_PRLI 0x14001020 #define ELS_CMD_PRLO 0x14001021 +#define ELS_CMD_PRLO_ACC 0x14001002 #define ELS_CMD_PDISC 0x50 #define ELS_CMD_FDISC 0x51 #define ELS_CMD_ADISC 0x52 @@ -1539,6 +1541,7 @@ typedef struct { #define FLAGS_TOPOLOGY_FAILOVER 0x0400 /* Bit 10 */ #define FLAGS_LINK_SPEED 0x0800 /* Bit 11 */ +#define FLAGS_IMED_ABORT 0x04000 /* Bit 14 */ uint32_t link_speed; #define LINK_SPEED_AUTO 0 /* Auto selection */ diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c index 66d5d003555..908d0f27706 100644 --- a/drivers/scsi/lpfc/lpfc_init.c +++ b/drivers/scsi/lpfc/lpfc_init.c @@ -294,15 +294,6 @@ lpfc_config_port_post(struct lpfc_hba * phba) } } - /* This should turn on DELAYED ABTS for ELS timeouts */ - lpfc_set_slim(phba, pmb, 0x052198, 0x1); - if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) { - phba->hba_state = LPFC_HBA_ERROR; - mempool_free( pmb, phba->mbox_mem_pool); - return -EIO; - } - - lpfc_read_config(phba, pmb); if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) { lpfc_printf_log(phba, @@ -804,7 +795,7 @@ lpfc_get_hba_model_desc(struct lpfc_hba * phba, uint8_t * mdp, uint8_t * descp) int max_speed; char * ports; char * bus; - } m; + } m = {"<Unknown>", 0, "", ""}; pci_read_config_byte(phba->pcidev, PCI_HEADER_TYPE, &hdrtype); ports = (hdrtype == 0x80) ? "2-port " : ""; @@ -1627,7 +1618,7 @@ lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid) error = lpfc_alloc_sysfs_attr(phba); if (error) - goto out_kthread_stop; + goto out_remove_host; error = request_irq(phba->pcidev->irq, lpfc_intr_handler, SA_SHIRQ, LPFC_DRIVER_NAME, phba); @@ -1644,8 +1635,10 @@ lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid) phba->HCregaddr = phba->ctrl_regs_memmap_p + HC_REG_OFFSET; error = lpfc_sli_hba_setup(phba); - if (error) + if (error) { + error = -ENODEV; goto out_free_irq; + } if (phba->cfg_poll & DISABLE_FCP_RING_INT) { spin_lock_irq(phba->host->host_lock); @@ -1700,6 +1693,9 @@ out_free_irq: free_irq(phba->pcidev->irq, phba); out_free_sysfs_attr: lpfc_free_sysfs_attr(phba); +out_remove_host: + fc_remove_host(phba->host); + scsi_remove_host(phba->host); out_kthread_stop: kthread_stop(phba->worker_thread); out_free_iocbq: @@ -1721,12 +1717,14 @@ out_iounmap_slim: out_idr_remove: idr_remove(&lpfc_hba_index, phba->brd_no); out_put_host: + phba->host = NULL; scsi_host_put(host); out_release_regions: pci_release_regions(pdev); out_disable_device: pci_disable_device(pdev); out: + pci_set_drvdata(pdev, NULL); return error; } diff --git a/drivers/scsi/lpfc/lpfc_mbox.c b/drivers/scsi/lpfc/lpfc_mbox.c index c585e2b2e58..e42f22aaf71 100644 --- a/drivers/scsi/lpfc/lpfc_mbox.c +++ b/drivers/scsi/lpfc/lpfc_mbox.c @@ -200,6 +200,9 @@ lpfc_init_link(struct lpfc_hba * phba, break; } + /* Enable asynchronous ABTS responses from firmware */ + mb->un.varInitLnk.link_flags |= FLAGS_IMED_ABORT; + /* NEW_FEATURE * Setting up the link speed */ @@ -292,36 +295,6 @@ lpfc_unreg_did(struct lpfc_hba * phba, uint32_t did, LPFC_MBOXQ_t * pmb) return; } -/***********************************************/ - -/* command to write slim */ -/***********************************************/ -void -lpfc_set_slim(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb, uint32_t addr, - uint32_t value) -{ - MAILBOX_t *mb; - - mb = &pmb->mb; - memset(pmb, 0, sizeof (LPFC_MBOXQ_t)); - - /* addr = 0x090597 is AUTO ABTS disable for ELS commands */ - /* addr = 0x052198 is DELAYED ABTS enable for ELS commands */ - - /* - * Always turn on DELAYED ABTS for ELS timeouts - */ - if ((addr == 0x052198) && (value == 0)) - value = 1; - - mb->un.varWords[0] = addr; - mb->un.varWords[1] = value; - - mb->mbxCommand = MBX_SET_SLIM; - mb->mbxOwner = OWN_HOST; - return; -} - /**********************************************/ /* lpfc_read_nv Issue a READ CONFIG */ /* mailbox command */ diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c index 3d77bd999b7..27d60ad897c 100644 --- a/drivers/scsi/lpfc/lpfc_nportdisc.c +++ b/drivers/scsi/lpfc/lpfc_nportdisc.c @@ -465,14 +465,18 @@ lpfc_rcv_padisc(struct lpfc_hba * phba, static int lpfc_rcv_logo(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp, - struct lpfc_iocbq *cmdiocb) + struct lpfc_iocbq *cmdiocb, + uint32_t els_cmd) { /* Put ndlp on NPR list with 1 sec timeout for plogi, ACC logo */ /* Only call LOGO ACC for first LOGO, this avoids sending unnecessary * PLOGIs during LOGO storms from a device. */ ndlp->nlp_flag |= NLP_LOGO_ACC; - lpfc_els_rsp_acc(phba, ELS_CMD_ACC, cmdiocb, ndlp, NULL, 0); + if (els_cmd == ELS_CMD_PRLO) + lpfc_els_rsp_acc(phba, ELS_CMD_PRLO, cmdiocb, ndlp, NULL, 0); + else + lpfc_els_rsp_acc(phba, ELS_CMD_ACC, cmdiocb, ndlp, NULL, 0); if (!(ndlp->nlp_type & NLP_FABRIC) || (ndlp->nlp_state == NLP_STE_ADISC_ISSUE)) { @@ -681,7 +685,7 @@ lpfc_rcv_logo_plogi_issue(struct lpfc_hba * phba, /* software abort outstanding PLOGI */ lpfc_els_abort(phba, ndlp, 1); - lpfc_rcv_logo(phba, ndlp, cmdiocb); + lpfc_rcv_logo(phba, ndlp, cmdiocb, ELS_CMD_LOGO); return ndlp->nlp_state; } @@ -788,10 +792,6 @@ lpfc_cmpl_plogi_plogi_issue(struct lpfc_hba * phba, if (lpfc_reg_login (phba, irsp->un.elsreq64.remoteID, (uint8_t *) sp, mbox, 0) == 0) { - /* set_slim mailbox command needs to - * execute first, queue this command to - * be processed later. - */ switch (ndlp->nlp_DID) { case NameServer_DID: mbox->mbox_cmpl = @@ -832,11 +832,17 @@ static uint32_t lpfc_device_rm_plogi_issue(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp, void *arg, uint32_t evt) { - /* software abort outstanding PLOGI */ - lpfc_els_abort(phba, ndlp, 1); + if(ndlp->nlp_flag & NLP_NPR_2B_DISC) { + ndlp->nlp_flag |= NLP_NODEV_REMOVE; + return ndlp->nlp_state; + } + else { + /* software abort outstanding PLOGI */ + lpfc_els_abort(phba, ndlp, 1); - lpfc_nlp_list(phba, ndlp, NLP_NO_LIST); - return NLP_STE_FREED_NODE; + lpfc_nlp_list(phba, ndlp, NLP_NO_LIST); + return NLP_STE_FREED_NODE; + } } static uint32_t @@ -851,7 +857,7 @@ lpfc_device_recov_plogi_issue(struct lpfc_hba * phba, ndlp->nlp_state = NLP_STE_NPR_NODE; lpfc_nlp_list(phba, ndlp, NLP_NPR_LIST); spin_lock_irq(phba->host->host_lock); - ndlp->nlp_flag &= ~NLP_NPR_2B_DISC; + ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC); spin_unlock_irq(phba->host->host_lock); return ndlp->nlp_state; @@ -905,7 +911,7 @@ lpfc_rcv_logo_adisc_issue(struct lpfc_hba * phba, /* software abort outstanding ADISC */ lpfc_els_abort(phba, ndlp, 0); - lpfc_rcv_logo(phba, ndlp, cmdiocb); + lpfc_rcv_logo(phba, ndlp, cmdiocb, ELS_CMD_LOGO); return ndlp->nlp_state; } @@ -932,7 +938,7 @@ lpfc_rcv_prlo_adisc_issue(struct lpfc_hba * phba, cmdiocb = (struct lpfc_iocbq *) arg; /* Treat like rcv logo */ - lpfc_rcv_logo(phba, ndlp, cmdiocb); + lpfc_rcv_logo(phba, ndlp, cmdiocb, ELS_CMD_PRLO); return ndlp->nlp_state; } @@ -987,11 +993,17 @@ lpfc_device_rm_adisc_issue(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp, void *arg, uint32_t evt) { - /* software abort outstanding ADISC */ - lpfc_els_abort(phba, ndlp, 1); + if(ndlp->nlp_flag & NLP_NPR_2B_DISC) { + ndlp->nlp_flag |= NLP_NODEV_REMOVE; + return ndlp->nlp_state; + } + else { + /* software abort outstanding ADISC */ + lpfc_els_abort(phba, ndlp, 1); - lpfc_nlp_list(phba, ndlp, NLP_NO_LIST); - return NLP_STE_FREED_NODE; + lpfc_nlp_list(phba, ndlp, NLP_NO_LIST); + return NLP_STE_FREED_NODE; + } } static uint32_t @@ -1006,7 +1018,7 @@ lpfc_device_recov_adisc_issue(struct lpfc_hba * phba, ndlp->nlp_state = NLP_STE_NPR_NODE; lpfc_nlp_list(phba, ndlp, NLP_NPR_LIST); spin_lock_irq(phba->host->host_lock); - ndlp->nlp_flag &= ~NLP_NPR_2B_DISC; + ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC); ndlp->nlp_flag |= NLP_NPR_ADISC; spin_unlock_irq(phba->host->host_lock); @@ -1048,7 +1060,7 @@ lpfc_rcv_logo_reglogin_issue(struct lpfc_hba * phba, cmdiocb = (struct lpfc_iocbq *) arg; - lpfc_rcv_logo(phba, ndlp, cmdiocb); + lpfc_rcv_logo(phba, ndlp, cmdiocb, ELS_CMD_LOGO); return ndlp->nlp_state; } @@ -1073,7 +1085,7 @@ lpfc_rcv_prlo_reglogin_issue(struct lpfc_hba * phba, struct lpfc_iocbq *cmdiocb; cmdiocb = (struct lpfc_iocbq *) arg; - lpfc_els_rsp_acc(phba, ELS_CMD_ACC, cmdiocb, ndlp, NULL, 0); + lpfc_els_rsp_acc(phba, ELS_CMD_PRLO, cmdiocb, ndlp, NULL, 0); return ndlp->nlp_state; } @@ -1133,8 +1145,14 @@ lpfc_device_rm_reglogin_issue(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp, void *arg, uint32_t evt) { - lpfc_nlp_list(phba, ndlp, NLP_NO_LIST); - return NLP_STE_FREED_NODE; + if(ndlp->nlp_flag & NLP_NPR_2B_DISC) { + ndlp->nlp_flag |= NLP_NODEV_REMOVE; + return ndlp->nlp_state; + } + else { + lpfc_nlp_list(phba, ndlp, NLP_NO_LIST); + return NLP_STE_FREED_NODE; + } } static uint32_t @@ -1146,7 +1164,7 @@ lpfc_device_recov_reglogin_issue(struct lpfc_hba * phba, ndlp->nlp_state = NLP_STE_NPR_NODE; lpfc_nlp_list(phba, ndlp, NLP_NPR_LIST); spin_lock_irq(phba->host->host_lock); - ndlp->nlp_flag &= ~NLP_NPR_2B_DISC; + ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC); spin_unlock_irq(phba->host->host_lock); return ndlp->nlp_state; } @@ -1186,7 +1204,7 @@ lpfc_rcv_logo_prli_issue(struct lpfc_hba * phba, /* Software abort outstanding PRLI before sending acc */ lpfc_els_abort(phba, ndlp, 1); - lpfc_rcv_logo(phba, ndlp, cmdiocb); + lpfc_rcv_logo(phba, ndlp, cmdiocb, ELS_CMD_LOGO); return ndlp->nlp_state; } @@ -1214,7 +1232,7 @@ lpfc_rcv_prlo_prli_issue(struct lpfc_hba * phba, struct lpfc_iocbq *cmdiocb; cmdiocb = (struct lpfc_iocbq *) arg; - lpfc_els_rsp_acc(phba, ELS_CMD_ACC, cmdiocb, ndlp, NULL, 0); + lpfc_els_rsp_acc(phba, ELS_CMD_PRLO, cmdiocb, ndlp, NULL, 0); return ndlp->nlp_state; } @@ -1278,11 +1296,17 @@ static uint32_t lpfc_device_rm_prli_issue(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp, void *arg, uint32_t evt) { - /* software abort outstanding PRLI */ - lpfc_els_abort(phba, ndlp, 1); + if(ndlp->nlp_flag & NLP_NPR_2B_DISC) { + ndlp->nlp_flag |= NLP_NODEV_REMOVE; + return ndlp->nlp_state; + } + else { + /* software abort outstanding PLOGI */ + lpfc_els_abort(phba, ndlp, 1); - lpfc_nlp_list(phba, ndlp, NLP_NO_LIST); - return NLP_STE_FREED_NODE; + lpfc_nlp_list(phba, ndlp, NLP_NO_LIST); + return NLP_STE_FREED_NODE; + } } @@ -1313,7 +1337,7 @@ lpfc_device_recov_prli_issue(struct lpfc_hba * phba, ndlp->nlp_state = NLP_STE_NPR_NODE; lpfc_nlp_list(phba, ndlp, NLP_NPR_LIST); spin_lock_irq(phba->host->host_lock); - ndlp->nlp_flag &= ~NLP_NPR_2B_DISC; + ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC); spin_unlock_irq(phba->host->host_lock); return ndlp->nlp_state; } @@ -1351,7 +1375,7 @@ lpfc_rcv_logo_unmap_node(struct lpfc_hba * phba, cmdiocb = (struct lpfc_iocbq *) arg; - lpfc_rcv_logo(phba, ndlp, cmdiocb); + lpfc_rcv_logo(phba, ndlp, cmdiocb, ELS_CMD_LOGO); return ndlp->nlp_state; } @@ -1375,7 +1399,7 @@ lpfc_rcv_prlo_unmap_node(struct lpfc_hba * phba, cmdiocb = (struct lpfc_iocbq *) arg; - lpfc_els_rsp_acc(phba, ELS_CMD_ACC, cmdiocb, ndlp, NULL, 0); + lpfc_els_rsp_acc(phba, ELS_CMD_PRLO, cmdiocb, ndlp, NULL, 0); return ndlp->nlp_state; } @@ -1386,7 +1410,7 @@ lpfc_device_recov_unmap_node(struct lpfc_hba * phba, ndlp->nlp_prev_state = NLP_STE_UNMAPPED_NODE; ndlp->nlp_state = NLP_STE_NPR_NODE; lpfc_nlp_list(phba, ndlp, NLP_NPR_LIST); - ndlp->nlp_flag &= ~NLP_NPR_2B_DISC; + ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC); lpfc_disc_set_adisc(phba, ndlp); return ndlp->nlp_state; @@ -1424,7 +1448,7 @@ lpfc_rcv_logo_mapped_node(struct lpfc_hba * phba, cmdiocb = (struct lpfc_iocbq *) arg; - lpfc_rcv_logo(phba, ndlp, cmdiocb); + lpfc_rcv_logo(phba, ndlp, cmdiocb, ELS_CMD_LOGO); return ndlp->nlp_state; } @@ -1456,7 +1480,7 @@ lpfc_rcv_prlo_mapped_node(struct lpfc_hba * phba, spin_unlock_irq(phba->host->host_lock); /* Treat like rcv logo */ - lpfc_rcv_logo(phba, ndlp, cmdiocb); + lpfc_rcv_logo(phba, ndlp, cmdiocb, ELS_CMD_PRLO); return ndlp->nlp_state; } @@ -1469,7 +1493,7 @@ lpfc_device_recov_mapped_node(struct lpfc_hba * phba, ndlp->nlp_state = NLP_STE_NPR_NODE; lpfc_nlp_list(phba, ndlp, NLP_NPR_LIST); spin_lock_irq(phba->host->host_lock); - ndlp->nlp_flag &= ~NLP_NPR_2B_DISC; + ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC); spin_unlock_irq(phba->host->host_lock); lpfc_disc_set_adisc(phba, ndlp); return ndlp->nlp_state; @@ -1551,7 +1575,7 @@ lpfc_rcv_logo_npr_node(struct lpfc_hba * phba, cmdiocb = (struct lpfc_iocbq *) arg; - lpfc_rcv_logo(phba, ndlp, cmdiocb); + lpfc_rcv_logo(phba, ndlp, cmdiocb, ELS_CMD_LOGO); return ndlp->nlp_state; } @@ -1617,9 +1641,16 @@ lpfc_cmpl_plogi_npr_node(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp, void *arg, uint32_t evt) { struct lpfc_iocbq *cmdiocb, *rspiocb; + IOCB_t *irsp; cmdiocb = (struct lpfc_iocbq *) arg; rspiocb = cmdiocb->context_un.rsp_iocb; + + irsp = &rspiocb->iocb; + if (irsp->ulpStatus) { + lpfc_nlp_list(phba, ndlp, NLP_NO_LIST); + return NLP_STE_FREED_NODE; + } return ndlp->nlp_state; } @@ -1628,9 +1659,16 @@ lpfc_cmpl_prli_npr_node(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp, void *arg, uint32_t evt) { struct lpfc_iocbq *cmdiocb, *rspiocb; + IOCB_t *irsp; cmdiocb = (struct lpfc_iocbq *) arg; rspiocb = cmdiocb->context_un.rsp_iocb; + + irsp = &rspiocb->iocb; + if (irsp->ulpStatus && (ndlp->nlp_flag & NLP_NODEV_REMOVE)) { + lpfc_nlp_list(phba, ndlp, NLP_NO_LIST); + return NLP_STE_FREED_NODE; + } return ndlp->nlp_state; } @@ -1649,9 +1687,16 @@ lpfc_cmpl_adisc_npr_node(struct lpfc_hba * phba, uint32_t evt) { struct lpfc_iocbq *cmdiocb, *rspiocb; + IOCB_t *irsp; cmdiocb = (struct lpfc_iocbq *) arg; rspiocb = cmdiocb->context_un.rsp_iocb; + + irsp = &rspiocb->iocb; + if (irsp->ulpStatus && (ndlp->nlp_flag & NLP_NODEV_REMOVE)) { + lpfc_nlp_list(phba, ndlp, NLP_NO_LIST); + return NLP_STE_FREED_NODE; + } return ndlp->nlp_state; } @@ -1668,7 +1713,12 @@ lpfc_cmpl_reglogin_npr_node(struct lpfc_hba * phba, if (!mb->mbxStatus) ndlp->nlp_rpi = mb->un.varWords[0]; - + else { + if (ndlp->nlp_flag & NLP_NODEV_REMOVE) { + lpfc_nlp_list(phba, ndlp, NLP_NO_LIST); + return NLP_STE_FREED_NODE; + } + } return ndlp->nlp_state; } @@ -1677,6 +1727,10 @@ lpfc_device_rm_npr_node(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp, void *arg, uint32_t evt) { + if (ndlp->nlp_flag & NLP_NPR_2B_DISC) { + ndlp->nlp_flag |= NLP_NODEV_REMOVE; + return ndlp->nlp_state; + } lpfc_nlp_list(phba, ndlp, NLP_NO_LIST); return NLP_STE_FREED_NODE; } @@ -1687,7 +1741,7 @@ lpfc_device_recov_npr_node(struct lpfc_hba * phba, uint32_t evt) { spin_lock_irq(phba->host->host_lock); - ndlp->nlp_flag &= ~NLP_NPR_2B_DISC; + ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC); spin_unlock_irq(phba->host->host_lock); if (ndlp->nlp_flag & NLP_DELAY_TMO) { lpfc_cancel_retry_delay_tmo(phba, ndlp); diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c index f9379987372..7dc4c2e6bed 100644 --- a/drivers/scsi/lpfc/lpfc_scsi.c +++ b/drivers/scsi/lpfc/lpfc_scsi.c @@ -629,8 +629,7 @@ lpfc_scsi_prep_task_mgmt_cmd(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq; IOCB_t *piocb; struct fcp_cmnd *fcp_cmnd; - struct scsi_device *scsi_dev = lpfc_cmd->pCmd->device; - struct lpfc_rport_data *rdata = scsi_dev->hostdata; + struct lpfc_rport_data *rdata = lpfc_cmd->rdata; struct lpfc_nodelist *ndlp = rdata->pnode; if ((ndlp == NULL) || (ndlp->nlp_state != NLP_STE_MAPPED_NODE)) { @@ -665,56 +664,18 @@ lpfc_scsi_prep_task_mgmt_cmd(struct lpfc_hba *phba, piocb->ulpTimeout = lpfc_cmd->timeout; } - lpfc_cmd->rdata = rdata; - - switch (task_mgmt_cmd) { - case FCP_LUN_RESET: - /* Issue LUN Reset to TGT <num> LUN <num> */ - lpfc_printf_log(phba, - KERN_INFO, - LOG_FCP, - "%d:0703 Issue LUN Reset to TGT %d LUN %d " - "Data: x%x x%x\n", - phba->brd_no, - scsi_dev->id, scsi_dev->lun, - ndlp->nlp_rpi, ndlp->nlp_flag); - - break; - case FCP_ABORT_TASK_SET: - /* Issue Abort Task Set to TGT <num> LUN <num> */ - lpfc_printf_log(phba, - KERN_INFO, - LOG_FCP, - "%d:0701 Issue Abort Task Set to TGT %d LUN %d " - "Data: x%x x%x\n", - phba->brd_no, - scsi_dev->id, scsi_dev->lun, - ndlp->nlp_rpi, ndlp->nlp_flag); - - break; - case FCP_TARGET_RESET: - /* Issue Target Reset to TGT <num> */ - lpfc_printf_log(phba, - KERN_INFO, - LOG_FCP, - "%d:0702 Issue Target Reset to TGT %d " - "Data: x%x x%x\n", - phba->brd_no, - scsi_dev->id, ndlp->nlp_rpi, - ndlp->nlp_flag); - break; - } - return (1); } static int -lpfc_scsi_tgt_reset(struct lpfc_scsi_buf * lpfc_cmd, struct lpfc_hba * phba) +lpfc_scsi_tgt_reset(struct lpfc_scsi_buf * lpfc_cmd, struct lpfc_hba * phba, + unsigned tgt_id, struct lpfc_rport_data *rdata) { struct lpfc_iocbq *iocbq; struct lpfc_iocbq *iocbqrsp; int ret; + lpfc_cmd->rdata = rdata; ret = lpfc_scsi_prep_task_mgmt_cmd(phba, lpfc_cmd, FCP_TARGET_RESET); if (!ret) return FAILED; @@ -726,6 +687,13 @@ lpfc_scsi_tgt_reset(struct lpfc_scsi_buf * lpfc_cmd, struct lpfc_hba * phba) if (!iocbqrsp) return FAILED; + /* Issue Target Reset to TGT <num> */ + lpfc_printf_log(phba, KERN_INFO, LOG_FCP, + "%d:0702 Issue Target Reset to TGT %d " + "Data: x%x x%x\n", + phba->brd_no, tgt_id, rdata->pnode->nlp_rpi, + rdata->pnode->nlp_flag); + ret = lpfc_sli_issue_iocb_wait(phba, &phba->sli.ring[phba->sli.fcp_ring], iocbq, iocbqrsp, lpfc_cmd->timeout); @@ -1021,6 +989,7 @@ lpfc_reset_lun_handler(struct scsi_cmnd *cmnd) lpfc_cmd->pCmd = cmnd; lpfc_cmd->timeout = 60; lpfc_cmd->scsi_hba = phba; + lpfc_cmd->rdata = rdata; ret = lpfc_scsi_prep_task_mgmt_cmd(phba, lpfc_cmd, FCP_LUN_RESET); if (!ret) @@ -1033,6 +1002,11 @@ lpfc_reset_lun_handler(struct scsi_cmnd *cmnd) if (iocbqrsp == NULL) goto out_free_scsi_buf; + lpfc_printf_log(phba, KERN_INFO, LOG_FCP, + "%d:0703 Issue LUN Reset to TGT %d LUN %d " + "Data: x%x x%x\n", phba->brd_no, cmnd->device->id, + cmnd->device->lun, pnode->nlp_rpi, pnode->nlp_flag); + ret = lpfc_sli_issue_iocb_wait(phba, &phba->sli.ring[phba->sli.fcp_ring], iocbq, iocbqrsp, lpfc_cmd->timeout); @@ -1104,7 +1078,6 @@ lpfc_reset_bus_handler(struct scsi_cmnd *cmnd) int match; int ret = FAILED, i, err_count = 0; int cnt, loopcnt; - unsigned int midlayer_id = 0; struct lpfc_scsi_buf * lpfc_cmd; lpfc_block_requests(phba); @@ -1124,7 +1097,6 @@ lpfc_reset_bus_handler(struct scsi_cmnd *cmnd) * targets known to the driver. Should any target reset * fail, this routine returns failure to the midlayer. */ - midlayer_id = cmnd->device->id; for (i = 0; i < MAX_FCP_TARGET; i++) { /* Search the mapped list for this target ID */ match = 0; @@ -1137,9 +1109,8 @@ lpfc_reset_bus_handler(struct scsi_cmnd *cmnd) if (!match) continue; - lpfc_cmd->pCmd->device->id = i; - lpfc_cmd->pCmd->device->hostdata = ndlp->rport->dd_data; - ret = lpfc_scsi_tgt_reset(lpfc_cmd, phba); + ret = lpfc_scsi_tgt_reset(lpfc_cmd, phba, + i, ndlp->rport->dd_data); if (ret != SUCCESS) { lpfc_printf_log(phba, KERN_ERR, LOG_FCP, "%d:0713 Bus Reset on target %d failed\n", @@ -1158,7 +1129,6 @@ lpfc_reset_bus_handler(struct scsi_cmnd *cmnd) * the targets. Unfortunately, some targets do not abide by * this forcing the driver to double check. */ - cmnd->device->id = midlayer_id; cnt = lpfc_sli_sum_iocb(phba, &phba->sli.ring[phba->sli.fcp_ring], 0, 0, LPFC_CTX_HOST); if (cnt) diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h index 4cf1366108b..6b737568b83 100644 --- a/drivers/scsi/lpfc/lpfc_version.h +++ b/drivers/scsi/lpfc/lpfc_version.h @@ -18,7 +18,7 @@ * included with this package. * *******************************************************************/ -#define LPFC_DRIVER_VERSION "8.1.4" +#define LPFC_DRIVER_VERSION "8.1.6" #define LPFC_DRIVER_NAME "lpfc" diff --git a/drivers/scsi/megaraid.c b/drivers/scsi/megaraid.c index 80b68a2481b..de35ffe2f79 100644 --- a/drivers/scsi/megaraid.c +++ b/drivers/scsi/megaraid.c @@ -4471,7 +4471,6 @@ mega_internal_command(adapter_t *adapter, megacmd_t *mc, mega_passthru *pthru) { Scsi_Cmnd *scmd; struct scsi_device *sdev; - unsigned long flags = 0; scb_t *scb; int rval; diff --git a/drivers/scsi/megaraid/megaraid_mbox.c b/drivers/scsi/megaraid/megaraid_mbox.c index c11e5ce6865..bec1424eda8 100644 --- a/drivers/scsi/megaraid/megaraid_mbox.c +++ b/drivers/scsi/megaraid/megaraid_mbox.c @@ -10,7 +10,7 @@ * 2 of the License, or (at your option) any later version. * * FILE : megaraid_mbox.c - * Version : v2.20.4.7 (Nov 14 2005) + * Version : v2.20.4.8 (Apr 11 2006) * * Authors: * Atul Mukker <Atul.Mukker@lsil.com> @@ -2278,6 +2278,7 @@ megaraid_mbox_dpc(unsigned long devp) unsigned long flags; uint8_t c; int status; + uioc_t *kioc; if (!adapter) return; @@ -2320,6 +2321,9 @@ megaraid_mbox_dpc(unsigned long devp) // remove from local clist list_del_init(&scb->list); + kioc = (uioc_t *)scb->gp; + kioc->status = 0; + megaraid_mbox_mm_done(adapter, scb); continue; @@ -2636,6 +2640,7 @@ megaraid_reset_handler(struct scsi_cmnd *scp) int recovery_window; int recovering; int i; + uioc_t *kioc; adapter = SCP2ADAPTER(scp); raid_dev = ADAP2RAIDDEV(adapter); @@ -2655,32 +2660,51 @@ megaraid_reset_handler(struct scsi_cmnd *scp) // Also, reset all the commands currently owned by the driver spin_lock_irqsave(PENDING_LIST_LOCK(adapter), flags); list_for_each_entry_safe(scb, tmp, &adapter->pend_list, list) { - list_del_init(&scb->list); // from pending list - con_log(CL_ANN, (KERN_WARNING - "megaraid: %ld:%d[%d:%d], reset from pending list\n", - scp->serial_number, scb->sno, - scb->dev_channel, scb->dev_target)); + if (scb->sno >= MBOX_MAX_SCSI_CMDS) { + con_log(CL_ANN, (KERN_WARNING + "megaraid: IOCTL packet with %d[%d:%d] being reset\n", + scb->sno, scb->dev_channel, scb->dev_target)); - scp->result = (DID_RESET << 16); - scp->scsi_done(scp); + scb->status = -1; - megaraid_dealloc_scb(adapter, scb); + kioc = (uioc_t *)scb->gp; + kioc->status = -EFAULT; + + megaraid_mbox_mm_done(adapter, scb); + } else { + if (scb->scp == scp) { // Found command + con_log(CL_ANN, (KERN_WARNING + "megaraid: %ld:%d[%d:%d], reset from pending list\n", + scp->serial_number, scb->sno, + scb->dev_channel, scb->dev_target)); + } else { + con_log(CL_ANN, (KERN_WARNING + "megaraid: IO packet with %d[%d:%d] being reset\n", + scb->sno, scb->dev_channel, scb->dev_target)); + } + + scb->scp->result = (DID_RESET << 16); + scb->scp->scsi_done(scb->scp); + + megaraid_dealloc_scb(adapter, scb); + } } spin_unlock_irqrestore(PENDING_LIST_LOCK(adapter), flags); if (adapter->outstanding_cmds) { con_log(CL_ANN, (KERN_NOTICE "megaraid: %d outstanding commands. Max wait %d sec\n", - adapter->outstanding_cmds, MBOX_RESET_WAIT)); + adapter->outstanding_cmds, + (MBOX_RESET_WAIT + MBOX_RESET_EXT_WAIT))); } recovery_window = MBOX_RESET_WAIT + MBOX_RESET_EXT_WAIT; recovering = adapter->outstanding_cmds; - for (i = 0; i < recovery_window && adapter->outstanding_cmds; i++) { + for (i = 0; i < recovery_window; i++) { megaraid_ack_sequence(adapter); @@ -2689,12 +2713,11 @@ megaraid_reset_handler(struct scsi_cmnd *scp) con_log(CL_ANN, ( "megaraid mbox: Wait for %d commands to complete:%d\n", adapter->outstanding_cmds, - MBOX_RESET_WAIT - i)); + (MBOX_RESET_WAIT + MBOX_RESET_EXT_WAIT) - i)); } // bailout if no recovery happended in reset time - if ((i == MBOX_RESET_WAIT) && - (recovering == adapter->outstanding_cmds)) { + if (adapter->outstanding_cmds == 0) { break; } @@ -2918,12 +2941,13 @@ mbox_post_sync_cmd_fast(adapter_t *adapter, uint8_t raw_mbox[]) wmb(); WRINDOOR(raid_dev, raid_dev->mbox_dma | 0x1); - for (i = 0; i < 0xFFFFF; i++) { + for (i = 0; i < MBOX_SYNC_WAIT_CNT; i++) { if (mbox->numstatus != 0xFF) break; rmb(); + udelay(MBOX_SYNC_DELAY_200); } - if (i == 0xFFFFF) { + if (i == MBOX_SYNC_WAIT_CNT) { // We may need to re-calibrate the counter con_log(CL_ANN, (KERN_CRIT "megaraid: fast sync command timed out\n")); @@ -3475,7 +3499,7 @@ megaraid_cmm_register(adapter_t *adapter) adp.drvr_data = (unsigned long)adapter; adp.pdev = adapter->pdev; adp.issue_uioc = megaraid_mbox_mm_handler; - adp.timeout = 300; + adp.timeout = MBOX_RESET_WAIT + MBOX_RESET_EXT_WAIT; adp.max_kioc = MBOX_MAX_USER_CMDS; if ((rval = mraid_mm_register_adp(&adp)) != 0) { @@ -3702,7 +3726,6 @@ megaraid_mbox_mm_done(adapter_t *adapter, scb_t *scb) unsigned long flags; kioc = (uioc_t *)scb->gp; - kioc->status = 0; mbox64 = (mbox64_t *)(unsigned long)kioc->cmdbuf; mbox64->mbox32.status = scb->status; raw_mbox = (uint8_t *)&mbox64->mbox32; diff --git a/drivers/scsi/megaraid/megaraid_mbox.h b/drivers/scsi/megaraid/megaraid_mbox.h index 882fb1a0b57..868fb0ec93e 100644 --- a/drivers/scsi/megaraid/megaraid_mbox.h +++ b/drivers/scsi/megaraid/megaraid_mbox.h @@ -21,8 +21,8 @@ #include "megaraid_ioctl.h" -#define MEGARAID_VERSION "2.20.4.7" -#define MEGARAID_EXT_VERSION "(Release Date: Mon Nov 14 12:27:22 EST 2005)" +#define MEGARAID_VERSION "2.20.4.8" +#define MEGARAID_EXT_VERSION "(Release Date: Mon Apr 11 12:27:22 EST 2006)" /* @@ -100,6 +100,9 @@ #define MBOX_BUSY_WAIT 10 // max usec to wait for busy mailbox #define MBOX_RESET_WAIT 180 // wait these many seconds in reset #define MBOX_RESET_EXT_WAIT 120 // extended wait reset +#define MBOX_SYNC_WAIT_CNT 0xFFFF // wait loop index for synchronous mode + +#define MBOX_SYNC_DELAY_200 200 // 200 micro-seconds /* * maximum transfer that can happen through the firmware commands issued diff --git a/drivers/scsi/megaraid/megaraid_mm.c b/drivers/scsi/megaraid/megaraid_mm.c index 8f3ce043229..e8f534fb336 100644 --- a/drivers/scsi/megaraid/megaraid_mm.c +++ b/drivers/scsi/megaraid/megaraid_mm.c @@ -898,10 +898,8 @@ mraid_mm_register_adp(mraid_mmadp_t *lld_adp) adapter = kmalloc(sizeof(mraid_mmadp_t), GFP_KERNEL); - if (!adapter) { - rval = -ENOMEM; - goto memalloc_error; - } + if (!adapter) + return -ENOMEM; memset(adapter, 0, sizeof(mraid_mmadp_t)); diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c index 017729c59a4..584fe5d8e50 100644 --- a/drivers/scsi/qla2xxx/qla_os.c +++ b/drivers/scsi/qla2xxx/qla_os.c @@ -599,6 +599,7 @@ qla2x00_wait_for_loop_ready(scsi_qla_host_t *ha) * Either SUCCESS or FAILED. * * Note: +* Only return FAILED if command not returned by firmware. **************************************************************************/ int qla2xxx_eh_abort(struct scsi_cmnd *cmd) @@ -609,11 +610,12 @@ qla2xxx_eh_abort(struct scsi_cmnd *cmd) unsigned int id, lun; unsigned long serial; unsigned long flags; + int wait = 0; if (!CMD_SP(cmd)) - return FAILED; + return SUCCESS; - ret = FAILED; + ret = SUCCESS; id = cmd->device->id; lun = cmd->device->lun; @@ -642,7 +644,7 @@ qla2xxx_eh_abort(struct scsi_cmnd *cmd) } else { DEBUG3(printk("%s(%ld): abort_command " "mbx success.\n", __func__, ha->host_no)); - ret = SUCCESS; + wait = 1; } spin_lock_irqsave(&ha->hardware_lock, flags); @@ -651,17 +653,18 @@ qla2xxx_eh_abort(struct scsi_cmnd *cmd) spin_unlock_irqrestore(&ha->hardware_lock, flags); /* Wait for the command to be returned. */ - if (ret == SUCCESS) { + if (wait) { if (qla2x00_eh_wait_on_command(ha, cmd) != QLA_SUCCESS) { qla_printk(KERN_ERR, ha, "scsi(%ld:%d:%d): Abort handler timed out -- %lx " "%x.\n", ha->host_no, id, lun, serial, ret); + ret = FAILED; } } qla_printk(KERN_INFO, ha, - "scsi(%ld:%d:%d): Abort command issued -- %lx %x.\n", ha->host_no, - id, lun, serial, ret); + "scsi(%ld:%d:%d): Abort command issued -- %d %lx %x.\n", + ha->host_no, id, lun, wait, serial, ret); return ret; } @@ -1700,8 +1703,8 @@ qla2x00_free_device(scsi_qla_host_t *ha) ha->flags.online = 0; /* Detach interrupts */ - if (ha->pdev->irq) - free_irq(ha->pdev->irq, ha); + if (ha->host->irq) + free_irq(ha->host->irq, ha); /* release io space registers */ if (ha->iobase) diff --git a/drivers/scsi/sata_mv.c b/drivers/scsi/sata_mv.c index d5fdcb9a884..9b8bca1ac1f 100644 --- a/drivers/scsi/sata_mv.c +++ b/drivers/scsi/sata_mv.c @@ -37,7 +37,7 @@ #include <asm/io.h> #define DRV_NAME "sata_mv" -#define DRV_VERSION "0.6" +#define DRV_VERSION "0.7" enum { /* BAR's are enumerated in terms of pci_resource_start() terms */ @@ -50,6 +50,12 @@ enum { MV_PCI_REG_BASE = 0, MV_IRQ_COAL_REG_BASE = 0x18000, /* 6xxx part only */ + MV_IRQ_COAL_CAUSE = (MV_IRQ_COAL_REG_BASE + 0x08), + MV_IRQ_COAL_CAUSE_LO = (MV_IRQ_COAL_REG_BASE + 0x88), + MV_IRQ_COAL_CAUSE_HI = (MV_IRQ_COAL_REG_BASE + 0x8c), + MV_IRQ_COAL_THRESHOLD = (MV_IRQ_COAL_REG_BASE + 0xcc), + MV_IRQ_COAL_TIME_THRESHOLD = (MV_IRQ_COAL_REG_BASE + 0xd0), + MV_SATAHC0_REG_BASE = 0x20000, MV_FLASH_CTL = 0x1046c, MV_GPIO_PORT_CTL = 0x104f0, @@ -302,9 +308,6 @@ struct mv_port_priv { dma_addr_t crpb_dma; struct mv_sg *sg_tbl; dma_addr_t sg_tbl_dma; - - unsigned req_producer; /* cp of req_in_ptr */ - unsigned rsp_consumer; /* cp of rsp_out_ptr */ u32 pp_flags; }; @@ -937,8 +940,6 @@ static int mv_port_start(struct ata_port *ap) writelfl(pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK, port_mmio + EDMA_RSP_Q_OUT_PTR_OFS); - pp->req_producer = pp->rsp_consumer = 0; - /* Don't turn on EDMA here...do it before DMA commands only. Else * we'll be unable to send non-data, PIO, etc due to restricted access * to shadow regs. @@ -1022,16 +1023,16 @@ static void mv_fill_sg(struct ata_queued_cmd *qc) } } -static inline unsigned mv_inc_q_index(unsigned *index) +static inline unsigned mv_inc_q_index(unsigned index) { - *index = (*index + 1) & MV_MAX_Q_DEPTH_MASK; - return *index; + return (index + 1) & MV_MAX_Q_DEPTH_MASK; } static inline void mv_crqb_pack_cmd(u16 *cmdw, u8 data, u8 addr, unsigned last) { - *cmdw = data | (addr << CRQB_CMD_ADDR_SHIFT) | CRQB_CMD_CS | + u16 tmp = data | (addr << CRQB_CMD_ADDR_SHIFT) | CRQB_CMD_CS | (last ? CRQB_CMD_LAST : 0); + *cmdw = cpu_to_le16(tmp); } /** @@ -1053,15 +1054,11 @@ static void mv_qc_prep(struct ata_queued_cmd *qc) u16 *cw; struct ata_taskfile *tf; u16 flags = 0; + unsigned in_index; if (ATA_PROT_DMA != qc->tf.protocol) return; - /* the req producer index should be the same as we remember it */ - WARN_ON(((readl(mv_ap_base(qc->ap) + EDMA_REQ_Q_IN_PTR_OFS) >> - EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK) != - pp->req_producer); - /* Fill in command request block */ if (!(qc->tf.flags & ATA_TFLAG_WRITE)) @@ -1069,13 +1066,17 @@ static void mv_qc_prep(struct ata_queued_cmd *qc) WARN_ON(MV_MAX_Q_DEPTH <= qc->tag); flags |= qc->tag << CRQB_TAG_SHIFT; - pp->crqb[pp->req_producer].sg_addr = + /* get current queue index from hardware */ + in_index = (readl(mv_ap_base(ap) + EDMA_REQ_Q_IN_PTR_OFS) + >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK; + + pp->crqb[in_index].sg_addr = cpu_to_le32(pp->sg_tbl_dma & 0xffffffff); - pp->crqb[pp->req_producer].sg_addr_hi = + pp->crqb[in_index].sg_addr_hi = cpu_to_le32((pp->sg_tbl_dma >> 16) >> 16); - pp->crqb[pp->req_producer].ctrl_flags = cpu_to_le16(flags); + pp->crqb[in_index].ctrl_flags = cpu_to_le16(flags); - cw = &pp->crqb[pp->req_producer].ata_cmd[0]; + cw = &pp->crqb[in_index].ata_cmd[0]; tf = &qc->tf; /* Sadly, the CRQB cannot accomodate all registers--there are @@ -1144,16 +1145,12 @@ static void mv_qc_prep_iie(struct ata_queued_cmd *qc) struct mv_port_priv *pp = ap->private_data; struct mv_crqb_iie *crqb; struct ata_taskfile *tf; + unsigned in_index; u32 flags = 0; if (ATA_PROT_DMA != qc->tf.protocol) return; - /* the req producer index should be the same as we remember it */ - WARN_ON(((readl(mv_ap_base(qc->ap) + EDMA_REQ_Q_IN_PTR_OFS) >> - EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK) != - pp->req_producer); - /* Fill in Gen IIE command request block */ if (!(qc->tf.flags & ATA_TFLAG_WRITE)) @@ -1162,7 +1159,11 @@ static void mv_qc_prep_iie(struct ata_queued_cmd *qc) WARN_ON(MV_MAX_Q_DEPTH <= qc->tag); flags |= qc->tag << CRQB_TAG_SHIFT; - crqb = (struct mv_crqb_iie *) &pp->crqb[pp->req_producer]; + /* get current queue index from hardware */ + in_index = (readl(mv_ap_base(ap) + EDMA_REQ_Q_IN_PTR_OFS) + >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK; + + crqb = (struct mv_crqb_iie *) &pp->crqb[in_index]; crqb->addr = cpu_to_le32(pp->sg_tbl_dma & 0xffffffff); crqb->addr_hi = cpu_to_le32((pp->sg_tbl_dma >> 16) >> 16); crqb->flags = cpu_to_le32(flags); @@ -1210,6 +1211,7 @@ static unsigned int mv_qc_issue(struct ata_queued_cmd *qc) { void __iomem *port_mmio = mv_ap_base(qc->ap); struct mv_port_priv *pp = qc->ap->private_data; + unsigned in_index; u32 in_ptr; if (ATA_PROT_DMA != qc->tf.protocol) { @@ -1221,23 +1223,20 @@ static unsigned int mv_qc_issue(struct ata_queued_cmd *qc) return ata_qc_issue_prot(qc); } - in_ptr = readl(port_mmio + EDMA_REQ_Q_IN_PTR_OFS); + in_ptr = readl(port_mmio + EDMA_REQ_Q_IN_PTR_OFS); + in_index = (in_ptr >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK; - /* the req producer index should be the same as we remember it */ - WARN_ON(((in_ptr >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK) != - pp->req_producer); /* until we do queuing, the queue should be empty at this point */ - WARN_ON(((in_ptr >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK) != - ((readl(port_mmio + EDMA_REQ_Q_OUT_PTR_OFS) >> - EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK)); + WARN_ON(in_index != ((readl(port_mmio + EDMA_REQ_Q_OUT_PTR_OFS) + >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK)); - mv_inc_q_index(&pp->req_producer); /* now incr producer index */ + in_index = mv_inc_q_index(in_index); /* now incr producer index */ mv_start_dma(port_mmio, pp); /* and write the request in pointer to kick the EDMA to life */ in_ptr &= EDMA_REQ_Q_BASE_LO_MASK; - in_ptr |= pp->req_producer << EDMA_REQ_Q_PTR_SHIFT; + in_ptr |= in_index << EDMA_REQ_Q_PTR_SHIFT; writelfl(in_ptr, port_mmio + EDMA_REQ_Q_IN_PTR_OFS); return 0; @@ -1260,28 +1259,26 @@ static u8 mv_get_crpb_status(struct ata_port *ap) { void __iomem *port_mmio = mv_ap_base(ap); struct mv_port_priv *pp = ap->private_data; + unsigned out_index; u32 out_ptr; u8 ata_status; - out_ptr = readl(port_mmio + EDMA_RSP_Q_OUT_PTR_OFS); + out_ptr = readl(port_mmio + EDMA_RSP_Q_OUT_PTR_OFS); + out_index = (out_ptr >> EDMA_RSP_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK; - /* the response consumer index should be the same as we remember it */ - WARN_ON(((out_ptr >> EDMA_RSP_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK) != - pp->rsp_consumer); - - ata_status = pp->crpb[pp->rsp_consumer].flags >> CRPB_FLAG_STATUS_SHIFT; + ata_status = le16_to_cpu(pp->crpb[out_index].flags) + >> CRPB_FLAG_STATUS_SHIFT; /* increment our consumer index... */ - pp->rsp_consumer = mv_inc_q_index(&pp->rsp_consumer); + out_index = mv_inc_q_index(out_index); /* and, until we do NCQ, there should only be 1 CRPB waiting */ - WARN_ON(((readl(port_mmio + EDMA_RSP_Q_IN_PTR_OFS) >> - EDMA_RSP_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK) != - pp->rsp_consumer); + WARN_ON(out_index != ((readl(port_mmio + EDMA_RSP_Q_IN_PTR_OFS) + >> EDMA_RSP_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK)); /* write out our inc'd consumer index so EDMA knows we're caught up */ out_ptr &= EDMA_RSP_Q_BASE_LO_MASK; - out_ptr |= pp->rsp_consumer << EDMA_RSP_Q_PTR_SHIFT; + out_ptr |= out_index << EDMA_RSP_Q_PTR_SHIFT; writelfl(out_ptr, port_mmio + EDMA_RSP_Q_OUT_PTR_OFS); /* Return ATA status register for completed CRPB */ @@ -1291,6 +1288,7 @@ static u8 mv_get_crpb_status(struct ata_port *ap) /** * mv_err_intr - Handle error interrupts on the port * @ap: ATA channel to manipulate + * @reset_allowed: bool: 0 == don't trigger from reset here * * In most cases, just clear the interrupt and move on. However, * some cases require an eDMA reset, which is done right before @@ -1301,7 +1299,7 @@ static u8 mv_get_crpb_status(struct ata_port *ap) * LOCKING: * Inherited from caller. */ -static void mv_err_intr(struct ata_port *ap) +static void mv_err_intr(struct ata_port *ap, int reset_allowed) { void __iomem *port_mmio = mv_ap_base(ap); u32 edma_err_cause, serr = 0; @@ -1323,9 +1321,8 @@ static void mv_err_intr(struct ata_port *ap) writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS); /* check for fatal here and recover if needed */ - if (EDMA_ERR_FATAL & edma_err_cause) { + if (reset_allowed && (EDMA_ERR_FATAL & edma_err_cause)) mv_stop_and_reset(ap); - } } /** @@ -1374,12 +1371,12 @@ static void mv_host_intr(struct ata_host_set *host_set, u32 relevant, struct ata_port *ap = host_set->ports[port]; struct mv_port_priv *pp = ap->private_data; - hard_port = port & MV_PORT_MASK; /* range 0-3 */ + hard_port = mv_hardport_from_port(port); /* range 0..3 */ handled = 0; /* ensure ata_status is set if handled++ */ /* Note that DEV_IRQ might happen spuriously during EDMA, - * and should be ignored in such cases. We could mask it, - * but it's pretty rare and may not be worth the overhead. + * and should be ignored in such cases. + * The cause of this is still under investigation. */ if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) { /* EDMA: check for response queue interrupt */ @@ -1393,6 +1390,11 @@ static void mv_host_intr(struct ata_host_set *host_set, u32 relevant, ata_status = readb((void __iomem *) ap->ioaddr.status_addr); handled = 1; + /* ignore spurious intr if drive still BUSY */ + if (ata_status & ATA_BUSY) { + ata_status = 0; + handled = 0; + } } } @@ -1406,7 +1408,7 @@ static void mv_host_intr(struct ata_host_set *host_set, u32 relevant, shift++; /* skip bit 8 in the HC Main IRQ reg */ } if ((PORT0_ERR << shift) & relevant) { - mv_err_intr(ap); + mv_err_intr(ap, 1); err_mask |= AC_ERR_OTHER; handled = 1; } @@ -1448,6 +1450,7 @@ static irqreturn_t mv_interrupt(int irq, void *dev_instance, struct ata_host_set *host_set = dev_instance; unsigned int hc, handled = 0, n_hcs; void __iomem *mmio = host_set->mmio_base; + struct mv_host_priv *hpriv; u32 irq_stat; irq_stat = readl(mmio + HC_MAIN_IRQ_CAUSE_OFS); @@ -1469,6 +1472,17 @@ static irqreturn_t mv_interrupt(int irq, void *dev_instance, handled++; } } + + hpriv = host_set->private_data; + if (IS_60XX(hpriv)) { + /* deal with the interrupt coalescing bits */ + if (irq_stat & (TRAN_LO_DONE | TRAN_HI_DONE | PORTS_0_7_COAL_DONE)) { + writelfl(0, mmio + MV_IRQ_COAL_CAUSE_LO); + writelfl(0, mmio + MV_IRQ_COAL_CAUSE_HI); + writelfl(0, mmio + MV_IRQ_COAL_CAUSE); + } + } + if (PCI_ERR & irq_stat) { printk(KERN_ERR DRV_NAME ": PCI ERROR; PCI IRQ cause=0x%08x\n", readl(mmio + PCI_IRQ_CAUSE_OFS)); @@ -1867,7 +1881,8 @@ static void mv_channel_reset(struct mv_host_priv *hpriv, void __iomem *mmio, if (IS_60XX(hpriv)) { u32 ifctl = readl(port_mmio + SATA_INTERFACE_CTL); - ifctl |= (1 << 12) | (1 << 7); + ifctl |= (1 << 7); /* enable gen2i speed */ + ifctl = (ifctl & 0xfff) | 0x9b1000; /* from chip spec */ writelfl(ifctl, port_mmio + SATA_INTERFACE_CTL); } @@ -2031,11 +2046,14 @@ static void mv_eng_timeout(struct ata_port *ap) ap->host_set->mmio_base, ap, qc, qc->scsicmd, &qc->scsicmd->cmnd); - mv_err_intr(ap); + mv_err_intr(ap, 0); mv_stop_and_reset(ap); - qc->err_mask |= AC_ERR_TIMEOUT; - ata_eh_qc_complete(qc); + WARN_ON(!(qc->flags & ATA_QCFLAG_ACTIVE)); + if (qc->flags & ATA_QCFLAG_ACTIVE) { + qc->err_mask |= AC_ERR_TIMEOUT; + ata_eh_qc_complete(qc); + } } /** @@ -2229,7 +2247,8 @@ static int mv_init_host(struct pci_dev *pdev, struct ata_probe_ent *probe_ent, void __iomem *port_mmio = mv_port_base(mmio, port); u32 ifctl = readl(port_mmio + SATA_INTERFACE_CTL); - ifctl |= (1 << 12); + ifctl |= (1 << 7); /* enable gen2i speed */ + ifctl = (ifctl & 0xfff) | 0x9b1000; /* from chip spec */ writelfl(ifctl, port_mmio + SATA_INTERFACE_CTL); } @@ -2330,6 +2349,7 @@ static int mv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) if (rc) { return rc; } + pci_set_master(pdev); rc = pci_request_regions(pdev, DRV_NAME); if (rc) { diff --git a/drivers/scsi/scsi_devinfo.c b/drivers/scsi/scsi_devinfo.c index c750d3399a9..941c1e15c89 100644 --- a/drivers/scsi/scsi_devinfo.c +++ b/drivers/scsi/scsi_devinfo.c @@ -56,6 +56,8 @@ static struct { {"DENON", "DRD-25X", "V", BLIST_NOLUN}, /* locks up */ {"HITACHI", "DK312C", "CM81", BLIST_NOLUN}, /* responds to all lun */ {"HITACHI", "DK314C", "CR21", BLIST_NOLUN}, /* responds to all lun */ + {"IBM", "2104-DU3", NULL, BLIST_NOLUN}, /* locks up */ + {"IBM", "2104-TU3", NULL, BLIST_NOLUN}, /* locks up */ {"IMS", "CDD521/10", "2.06", BLIST_NOLUN}, /* locks up */ {"MAXTOR", "XT-3280", "PR02", BLIST_NOLUN}, /* locks up */ {"MAXTOR", "XT-4380S", "B3C", BLIST_NOLUN}, /* locks up */ diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c index 7b0f9a3810d..764a8b375ea 100644 --- a/drivers/scsi/scsi_lib.c +++ b/drivers/scsi/scsi_lib.c @@ -1067,16 +1067,29 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes, break; case NOT_READY: /* - * If the device is in the process of becoming ready, - * retry. + * If the device is in the process of becoming + * ready, or has a temporary blockage, retry. */ - if (sshdr.asc == 0x04 && sshdr.ascq == 0x01) { - scsi_requeue_command(q, cmd); - return; + if (sshdr.asc == 0x04) { + switch (sshdr.ascq) { + case 0x01: /* becoming ready */ + case 0x04: /* format in progress */ + case 0x05: /* rebuild in progress */ + case 0x06: /* recalculation in progress */ + case 0x07: /* operation in progress */ + case 0x08: /* Long write in progress */ + case 0x09: /* self test in progress */ + scsi_requeue_command(q, cmd); + return; + default: + break; + } } - if (!(req->flags & REQ_QUIET)) + if (!(req->flags & REQ_QUIET)) { scmd_printk(KERN_INFO, cmd, - "Device not ready.\n"); + "Device not ready: "); + scsi_print_sense_hdr("", &sshdr); + } scsi_end_request(cmd, 0, this_count, 1); return; case VOLUME_OVERFLOW: diff --git a/drivers/scsi/sim710.c b/drivers/scsi/sim710.c index 3274ab76c8d..255886a9ac5 100644 --- a/drivers/scsi/sim710.c +++ b/drivers/scsi/sim710.c @@ -75,7 +75,7 @@ param_setup(char *str) else if(!strncmp(pos, "id:", 3)) { if(slot == -1) { printk(KERN_WARNING "sim710: Must specify slot for id parameter\n"); - } else if(slot > MAX_SLOTS) { + } else if(slot >= MAX_SLOTS) { printk(KERN_WARNING "sim710: Illegal slot %d for id %d\n", slot, val); } else { id_array[slot] = val; diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c index d40e7c871c3..56cb4900611 100644 --- a/drivers/scsi/st.c +++ b/drivers/scsi/st.c @@ -4054,7 +4054,7 @@ static int st_probe(struct device *dev) } sdev_printk(KERN_WARNING, SDp, - "Attached scsi tape %s", tape_name(tpnt)); + "Attached scsi tape %s\n", tape_name(tpnt)); printk(KERN_WARNING "%s: try direct i/o: %s (alignment %d B)\n", tape_name(tpnt), tpnt->try_dio ? "yes" : "no", queue_dma_alignment(SDp->request_queue) + 1); |