diff options
Diffstat (limited to 'drivers/scsi/lpfc')
-rw-r--r-- | drivers/scsi/lpfc/lpfc.h | 53 | ||||
-rw-r--r-- | drivers/scsi/lpfc/lpfc_attr.c | 217 | ||||
-rw-r--r-- | drivers/scsi/lpfc/lpfc_crtn.h | 33 | ||||
-rw-r--r-- | drivers/scsi/lpfc/lpfc_ct.c | 304 | ||||
-rw-r--r-- | drivers/scsi/lpfc/lpfc_debugfs.c | 157 | ||||
-rw-r--r-- | drivers/scsi/lpfc/lpfc_disc.h | 2 | ||||
-rw-r--r-- | drivers/scsi/lpfc/lpfc_els.c | 623 | ||||
-rw-r--r-- | drivers/scsi/lpfc/lpfc_hbadisc.c | 372 | ||||
-rw-r--r-- | drivers/scsi/lpfc/lpfc_hw.h | 112 | ||||
-rw-r--r-- | drivers/scsi/lpfc/lpfc_init.c | 429 | ||||
-rw-r--r-- | drivers/scsi/lpfc/lpfc_logmsg.h | 1 | ||||
-rw-r--r-- | drivers/scsi/lpfc/lpfc_mbox.c | 32 | ||||
-rw-r--r-- | drivers/scsi/lpfc/lpfc_mem.c | 1 | ||||
-rw-r--r-- | drivers/scsi/lpfc/lpfc_nportdisc.c | 160 | ||||
-rw-r--r-- | drivers/scsi/lpfc/lpfc_scsi.c | 56 | ||||
-rw-r--r-- | drivers/scsi/lpfc/lpfc_scsi.h | 1 | ||||
-rw-r--r-- | drivers/scsi/lpfc/lpfc_sli.c | 534 | ||||
-rw-r--r-- | drivers/scsi/lpfc/lpfc_sli.h | 12 | ||||
-rw-r--r-- | drivers/scsi/lpfc/lpfc_version.h | 6 | ||||
-rw-r--r-- | drivers/scsi/lpfc/lpfc_vport.c | 93 | ||||
-rw-r--r-- | drivers/scsi/lpfc/lpfc_vport.h | 2 |
21 files changed, 2318 insertions, 882 deletions
diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h index ba3ecab9baf..f26b9538aff 100644 --- a/drivers/scsi/lpfc/lpfc.h +++ b/drivers/scsi/lpfc/lpfc.h @@ -29,7 +29,8 @@ struct lpfc_sli2_slim; #define LPFC_MAX_NS_RETRY 3 /* Number of retry attempts to contact the NameServer before giving up. */ #define LPFC_CMD_PER_LUN 3 /* max outstanding cmds per lun */ -#define LPFC_SG_SEG_CNT 64 /* sg element count per scsi cmnd */ +#define LPFC_DEFAULT_SG_SEG_CNT 64 /* sg element count per scsi cmnd */ +#define LPFC_MAX_SG_SEG_CNT 256 /* sg element count per scsi cmnd */ #define LPFC_IOCB_LIST_CNT 2250 /* list of IOCBs for fast-path usage. */ #define LPFC_Q_RAMP_UP_INTERVAL 120 /* lun q_depth ramp up interval */ @@ -68,6 +69,7 @@ struct lpfc_dmabuf { struct list_head list; void *virt; /* virtual address ptr */ dma_addr_t phys; /* mapped address */ + uint32_t buffer_tag; /* used for tagged queue ring */ }; struct lpfc_dma_pool { @@ -272,10 +274,16 @@ struct lpfc_vport { #define FC_ABORT_DISCOVERY 0x8000 /* we want to abort discovery */ #define FC_NDISC_ACTIVE 0x10000 /* NPort discovery active */ #define FC_BYPASSED_MODE 0x20000 /* NPort is in bypassed mode */ -#define FC_RFF_NOT_SUPPORTED 0x40000 /* RFF_ID was rejected by switch */ #define FC_VPORT_NEEDS_REG_VPI 0x80000 /* Needs to have its vpi registered */ #define FC_RSCN_DEFERRED 0x100000 /* A deferred RSCN being processed */ + uint32_t ct_flags; +#define FC_CT_RFF_ID 0x1 /* RFF_ID accepted by switch */ +#define FC_CT_RNN_ID 0x2 /* RNN_ID accepted by switch */ +#define FC_CT_RSNN_NN 0x4 /* RSNN_NN accepted by switch */ +#define FC_CT_RSPN_ID 0x8 /* RSPN_ID accepted by switch */ +#define FC_CT_RFT_ID 0x10 /* RFT_ID accepted by switch */ + struct list_head fc_nodes; /* Keep counters for the number of entries in each list. */ @@ -344,6 +352,7 @@ struct lpfc_vport { uint32_t cfg_discovery_threads; uint32_t cfg_log_verbose; uint32_t cfg_max_luns; + uint32_t cfg_enable_da_id; uint32_t dev_loss_tmo_changed; @@ -360,6 +369,7 @@ struct lpfc_vport { struct hbq_s { uint16_t entry_count; /* Current number of HBQ slots */ + uint16_t buffer_count; /* Current number of buffers posted */ uint32_t next_hbqPutIdx; /* Index to next HBQ slot to use */ uint32_t hbqPutIdx; /* HBQ slot to use */ uint32_t local_hbqGetIdx; /* Local copy of Get index from Port */ @@ -377,6 +387,11 @@ struct hbq_s { #define LPFC_ELS_HBQ 0 #define LPFC_EXTRA_HBQ 1 +enum hba_temp_state { + HBA_NORMAL_TEMP, + HBA_OVER_TEMP +}; + struct lpfc_hba { struct lpfc_sli sli; uint32_t sli_rev; /* SLI2 or SLI3 */ @@ -457,7 +472,8 @@ struct lpfc_hba { uint64_t cfg_soft_wwnn; uint64_t cfg_soft_wwpn; uint32_t cfg_hba_queue_depth; - + uint32_t cfg_enable_hba_reset; + uint32_t cfg_enable_hba_heartbeat; lpfc_vpd_t vpd; /* vital product data */ @@ -544,8 +560,7 @@ struct lpfc_hba { struct list_head port_list; struct lpfc_vport *pport; /* physical lpfc_vport pointer */ uint16_t max_vpi; /* Maximum virtual nports */ -#define LPFC_MAX_VPI 100 /* Max number of VPI supported */ -#define LPFC_MAX_VPORTS (LPFC_MAX_VPI+1)/* Max number of VPorts supported */ +#define LPFC_MAX_VPI 0xFFFF /* Max number of VPI supported */ unsigned long *vpi_bmask; /* vpi allocation table */ /* Data structure used by fabric iocb scheduler */ @@ -563,16 +578,30 @@ struct lpfc_hba { struct dentry *hba_debugfs_root; atomic_t debugfs_vport_count; struct dentry *debug_hbqinfo; - struct dentry *debug_dumpslim; + struct dentry *debug_dumpHostSlim; + struct dentry *debug_dumpHBASlim; struct dentry *debug_slow_ring_trc; struct lpfc_debugfs_trc *slow_ring_trc; atomic_t slow_ring_trc_cnt; #endif + /* Used for deferred freeing of ELS data buffers */ + struct list_head elsbuf; + int elsbuf_cnt; + int elsbuf_prev_cnt; + + uint8_t temp_sensor_support; /* Fields used for heart beat. */ unsigned long last_completion_time; struct timer_list hb_tmofunc; uint8_t hb_outstanding; + /* + * Following bit will be set for all buffer tags which are not + * associated with any HBQ. + */ +#define QUE_BUFTAG_BIT (1<<31) + uint32_t buffer_tag_count; + enum hba_temp_state over_temp_state; }; static inline struct Scsi_Host * @@ -598,5 +627,15 @@ lpfc_is_link_up(struct lpfc_hba *phba) phba->link_state == LPFC_HBA_READY; } -#define FC_REG_DUMP_EVENT 0x10 /* Register for Dump events */ +#define FC_REG_DUMP_EVENT 0x10 /* Register for Dump events */ +#define FC_REG_TEMPERATURE_EVENT 0x20 /* Register for temperature + event */ +struct temp_event { + uint32_t event_type; + uint32_t event_code; + uint32_t data; +}; +#define LPFC_CRIT_TEMP 0x1 +#define LPFC_THRESHOLD_TEMP 0x2 +#define LPFC_NORMAL_TEMP 0x3 diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c index 80a11218b9b..4bae4a2ed2f 100644 --- a/drivers/scsi/lpfc/lpfc_attr.c +++ b/drivers/scsi/lpfc/lpfc_attr.c @@ -1,7 +1,7 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * - * Copyright (C) 2004-2007 Emulex. All rights reserved. * + * Copyright (C) 2004-2008 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * * www.emulex.com * * Portions Copyright (C) 2004-2005 Christoph Hellwig * @@ -45,6 +45,10 @@ #define LPFC_MIN_DEVLOSS_TMO 1 #define LPFC_MAX_DEVLOSS_TMO 255 +#define LPFC_MAX_LINK_SPEED 8 +#define LPFC_LINK_SPEED_BITMAP 0x00000117 +#define LPFC_LINK_SPEED_STRING "0, 1, 2, 4, 8" + static void lpfc_jedec_to_ascii(int incr, char hdw[]) { @@ -86,6 +90,15 @@ lpfc_serialnum_show(struct class_device *cdev, char *buf) } static ssize_t +lpfc_temp_sensor_show(struct class_device *cdev, char *buf) +{ + struct Scsi_Host *shost = class_to_shost(cdev); + struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; + struct lpfc_hba *phba = vport->phba; + return snprintf(buf, PAGE_SIZE, "%d\n",phba->temp_sensor_support); +} + +static ssize_t lpfc_modeldesc_show(struct class_device *cdev, char *buf) { struct Scsi_Host *shost = class_to_shost(cdev); @@ -178,12 +191,9 @@ lpfc_state_show(struct class_device *cdev, char *buf) case LPFC_LINK_UP: case LPFC_CLEAR_LA: case LPFC_HBA_READY: - len += snprintf(buf + len, PAGE_SIZE-len, "Link Up - \n"); + len += snprintf(buf + len, PAGE_SIZE-len, "Link Up - "); switch (vport->port_state) { - len += snprintf(buf + len, PAGE_SIZE-len, - "initializing\n"); - break; case LPFC_LOCAL_CFG_LINK: len += snprintf(buf + len, PAGE_SIZE-len, "Configuring Link\n"); @@ -252,8 +262,7 @@ lpfc_issue_lip(struct Scsi_Host *shost) int mbxstatus = MBXERR_ERROR; if ((vport->fc_flag & FC_OFFLINE_MODE) || - (phba->sli.sli_flag & LPFC_BLOCK_MGMT_IO) || - (vport->port_state != LPFC_VPORT_READY)) + (phba->sli.sli_flag & LPFC_BLOCK_MGMT_IO)) return -EPERM; pmboxq = mempool_alloc(phba->mbox_mem_pool,GFP_KERNEL); @@ -305,12 +314,14 @@ lpfc_do_offline(struct lpfc_hba *phba, uint32_t type) psli = &phba->sli; + /* Wait a little for things to settle down, but not + * long enough for dev loss timeout to expire. + */ for (i = 0; i < psli->num_rings; i++) { pring = &psli->ring[i]; - /* The linkdown event takes 30 seconds to timeout. */ while (pring->txcmplq_cnt) { msleep(10); - if (cnt++ > 3000) { + if (cnt++ > 500) { /* 5 secs */ lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, "0466 Outstanding IO when " @@ -336,6 +347,9 @@ lpfc_selective_reset(struct lpfc_hba *phba) struct completion online_compl; int status = 0; + if (!phba->cfg_enable_hba_reset) + return -EIO; + status = lpfc_do_offline(phba, LPFC_EVT_OFFLINE); if (status != 0) @@ -409,6 +423,8 @@ lpfc_board_mode_store(struct class_device *cdev, const char *buf, size_t count) struct completion online_compl; int status=0; + if (!phba->cfg_enable_hba_reset) + return -EACCES; init_completion(&online_compl); if(strncmp(buf, "online", sizeof("online") - 1) == 0) { @@ -908,6 +924,8 @@ static CLASS_DEVICE_ATTR(used_rpi, S_IRUGO, lpfc_used_rpi_show, NULL); static CLASS_DEVICE_ATTR(max_xri, S_IRUGO, lpfc_max_xri_show, NULL); static CLASS_DEVICE_ATTR(used_xri, S_IRUGO, lpfc_used_xri_show, NULL); static CLASS_DEVICE_ATTR(npiv_info, S_IRUGO, lpfc_npiv_info_show, NULL); +static CLASS_DEVICE_ATTR(lpfc_temp_sensor, S_IRUGO, lpfc_temp_sensor_show, + NULL); static char *lpfc_soft_wwn_key = "C99G71SL8032A"; @@ -971,6 +989,14 @@ lpfc_soft_wwpn_store(struct class_device *cdev, const char *buf, size_t count) unsigned int i, j, cnt=count; u8 wwpn[8]; + if (!phba->cfg_enable_hba_reset) + return -EACCES; + spin_lock_irq(&phba->hbalock); + if (phba->over_temp_state == HBA_OVER_TEMP) { + spin_unlock_irq(&phba->hbalock); + return -EACCES; + } + spin_unlock_irq(&phba->hbalock); /* count may include a LF at end of string */ if (buf[cnt-1] == '\n') cnt--; @@ -1102,7 +1128,13 @@ MODULE_PARM_DESC(lpfc_sli_mode, "SLI mode selector:" " 2 - select SLI-2 even on SLI-3 capable HBAs," " 3 - select SLI-3"); -LPFC_ATTR_R(enable_npiv, 0, 0, 1, "Enable NPIV functionality"); +int lpfc_enable_npiv = 0; +module_param(lpfc_enable_npiv, int, 0); +MODULE_PARM_DESC(lpfc_enable_npiv, "Enable NPIV functionality"); +lpfc_param_show(enable_npiv); +lpfc_param_init(enable_npiv, 0, 0, 1); +static CLASS_DEVICE_ATTR(lpfc_enable_npiv, S_IRUGO, + lpfc_enable_npiv_show, NULL); /* # lpfc_nodev_tmo: If set, it will hold all I/O errors on devices that disappear @@ -1248,6 +1280,13 @@ LPFC_VPORT_ATTR_HEX_RW(log_verbose, 0x0, 0x0, 0xffff, "Verbose logging bit-mask"); /* +# lpfc_enable_da_id: This turns on the DA_ID CT command that deregisters +# objects that have been registered with the nameserver after login. +*/ +LPFC_VPORT_ATTR_R(enable_da_id, 0, 0, 1, + "Deregister nameserver objects before LOGO"); + +/* # lun_queue_depth: This parameter is used to limit the number of outstanding # commands per FCP LUN. Value range is [1,128]. Default value is 30. */ @@ -1369,7 +1408,33 @@ LPFC_VPORT_ATTR_R(scan_down, 1, 0, 1, # Set loop mode if you want to run as an NL_Port. Value range is [0,0x6]. # Default value is 0. */ -LPFC_ATTR_RW(topology, 0, 0, 6, "Select Fibre Channel topology"); +static int +lpfc_topology_set(struct lpfc_hba *phba, int val) +{ + int err; + uint32_t prev_val; + if (val >= 0 && val <= 6) { + prev_val = phba->cfg_topology; + phba->cfg_topology = val; + err = lpfc_issue_lip(lpfc_shost_from_vport(phba->pport)); + if (err) + phba->cfg_topology = prev_val; + return err; + } + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "%d:0467 lpfc_topology attribute cannot be set to %d, " + "allowed range is [0, 6]\n", + phba->brd_no, val); + return -EINVAL; +} +static int lpfc_topology = 0; +module_param(lpfc_topology, int, 0); +MODULE_PARM_DESC(lpfc_topology, "Select Fibre Channel topology"); +lpfc_param_show(topology) +lpfc_param_init(topology, 0, 0, 6) +lpfc_param_store(topology) +static CLASS_DEVICE_ATTR(lpfc_topology, S_IRUGO | S_IWUSR, + lpfc_topology_show, lpfc_topology_store); /* # lpfc_link_speed: Link speed selection for initializing the Fibre Channel @@ -1381,7 +1446,59 @@ LPFC_ATTR_RW(topology, 0, 0, 6, "Select Fibre Channel topology"); # 8 = 8 Gigabaud # Value range is [0,8]. Default value is 0. */ -LPFC_ATTR_R(link_speed, 0, 0, 8, "Select link speed"); +static int +lpfc_link_speed_set(struct lpfc_hba *phba, int val) +{ + int err; + uint32_t prev_val; + + if (((val == LINK_SPEED_1G) && !(phba->lmt & LMT_1Gb)) || + ((val == LINK_SPEED_2G) && !(phba->lmt & LMT_2Gb)) || + ((val == LINK_SPEED_4G) && !(phba->lmt & LMT_4Gb)) || + ((val == LINK_SPEED_8G) && !(phba->lmt & LMT_8Gb)) || + ((val == LINK_SPEED_10G) && !(phba->lmt & LMT_10Gb))) + return -EINVAL; + + if ((val >= 0 && val <= LPFC_MAX_LINK_SPEED) + && (LPFC_LINK_SPEED_BITMAP & (1 << val))) { + prev_val = phba->cfg_link_speed; + phba->cfg_link_speed = val; + err = lpfc_issue_lip(lpfc_shost_from_vport(phba->pport)); + if (err) + phba->cfg_link_speed = prev_val; + return err; + } + + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "%d:0469 lpfc_link_speed attribute cannot be set to %d, " + "allowed range is [0, 8]\n", + phba->brd_no, val); + return -EINVAL; +} + +static int lpfc_link_speed = 0; +module_param(lpfc_link_speed, int, 0); +MODULE_PARM_DESC(lpfc_link_speed, "Select link speed"); +lpfc_param_show(link_speed) +static int +lpfc_link_speed_init(struct lpfc_hba *phba, int val) +{ + if ((val >= 0 && val <= LPFC_MAX_LINK_SPEED) + && (LPFC_LINK_SPEED_BITMAP & (1 << val))) { + phba->cfg_link_speed = val; + return 0; + } + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "0454 lpfc_link_speed attribute cannot " + "be set to %d, allowed values are " + "["LPFC_LINK_SPEED_STRING"]\n", val); + phba->cfg_link_speed = 0; + return -EINVAL; +} + +lpfc_param_store(link_speed) +static CLASS_DEVICE_ATTR(lpfc_link_speed, S_IRUGO | S_IWUSR, + lpfc_link_speed_show, lpfc_link_speed_store); /* # lpfc_fcp_class: Determines FC class to use for the FCP protocol. @@ -1479,7 +1596,30 @@ LPFC_ATTR_RW(poll_tmo, 10, 1, 255, */ LPFC_ATTR_R(use_msi, 0, 0, 1, "Use Message Signaled Interrupts, if possible"); +/* +# lpfc_enable_hba_reset: Allow or prevent HBA resets to the hardware. +# 0 = HBA resets disabled +# 1 = HBA resets enabled (default) +# Value range is [0,1]. Default value is 1. +*/ +LPFC_ATTR_R(enable_hba_reset, 1, 0, 1, "Enable HBA resets from the driver."); + +/* +# lpfc_enable_hba_heartbeat: Enable HBA heartbeat timer.. +# 0 = HBA Heartbeat disabled +# 1 = HBA Heartbeat enabled (default) +# Value range is [0,1]. Default value is 1. +*/ +LPFC_ATTR_R(enable_hba_heartbeat, 1, 0, 1, "Enable HBA Heartbeat."); +/* + * lpfc_sg_seg_cnt: Initial Maximum DMA Segment Count + * This value can be set to values between 64 and 256. The default value is + * 64, but may be increased to allow for larger Max I/O sizes. The scsi layer + * will be allowed to request I/Os of sizes up to (MAX_SEG_COUNT * SEG_SIZE). + */ +LPFC_ATTR_R(sg_seg_cnt, LPFC_DEFAULT_SG_SEG_CNT, LPFC_DEFAULT_SG_SEG_CNT, + LPFC_MAX_SG_SEG_CNT, "Max Scatter Gather Segment Count"); struct class_device_attribute *lpfc_hba_attrs[] = { &class_device_attr_info, @@ -1494,6 +1634,7 @@ struct class_device_attribute *lpfc_hba_attrs[] = { &class_device_attr_state, &class_device_attr_num_discovered_ports, &class_device_attr_lpfc_drvr_version, + &class_device_attr_lpfc_temp_sensor, &class_device_attr_lpfc_log_verbose, &class_device_attr_lpfc_lun_queue_depth, &class_device_attr_lpfc_hba_queue_depth, @@ -1530,6 +1671,9 @@ struct class_device_attribute *lpfc_hba_attrs[] = { &class_device_attr_lpfc_soft_wwnn, &class_device_attr_lpfc_soft_wwpn, &class_device_attr_lpfc_soft_wwn_enable, + &class_device_attr_lpfc_enable_hba_reset, + &class_device_attr_lpfc_enable_hba_heartbeat, + &class_device_attr_lpfc_sg_seg_cnt, NULL, }; @@ -1552,6 +1696,7 @@ struct class_device_attribute *lpfc_vport_attrs[] = { &class_device_attr_lpfc_max_luns, &class_device_attr_nport_evt_cnt, &class_device_attr_npiv_info, + &class_device_attr_lpfc_enable_da_id, NULL, }; @@ -1727,13 +1872,18 @@ sysfs_mbox_read(struct kobject *kobj, struct bin_attribute *bin_attr, spin_lock_irq(&phba->hbalock); + if (phba->over_temp_state == HBA_OVER_TEMP) { + sysfs_mbox_idle(phba); + spin_unlock_irq(&phba->hbalock); + return -EACCES; + } + if (off == 0 && phba->sysfs_mbox.state == SMBOX_WRITING && phba->sysfs_mbox.offset >= 2 * sizeof(uint32_t)) { switch (phba->sysfs_mbox.mbox->mb.mbxCommand) { /* Offline only */ - case MBX_WRITE_NV: case MBX_INIT_LINK: case MBX_DOWN_LINK: case MBX_CONFIG_LINK: @@ -1744,9 +1894,7 @@ sysfs_mbox_read(struct kobject *kobj, struct bin_attribute *bin_attr, case MBX_DUMP_CONTEXT: case MBX_RUN_DIAGS: case MBX_RESTART: - case MBX_FLASH_WR_ULA: case MBX_SET_MASK: - case MBX_SET_SLIM: case MBX_SET_DEBUG: if (!(vport->fc_flag & FC_OFFLINE_MODE)) { printk(KERN_WARNING "mbox_read:Command 0x%x " @@ -1756,6 +1904,8 @@ sysfs_mbox_read(struct kobject *kobj, struct bin_attribute *bin_attr, spin_unlock_irq(&phba->hbalock); return -EPERM; } + case MBX_WRITE_NV: + case MBX_WRITE_VPARMS: case MBX_LOAD_SM: case MBX_READ_NV: case MBX_READ_CONFIG: @@ -1772,6 +1922,8 @@ sysfs_mbox_read(struct kobject *kobj, struct bin_attribute *bin_attr, case MBX_LOAD_EXP_ROM: case MBX_BEACON: case MBX_DEL_LD_ENTRY: + case MBX_SET_VARIABLE: + case MBX_WRITE_WWN: break; case MBX_READ_SPARM64: case MBX_READ_LA: @@ -1793,6 +1945,17 @@ sysfs_mbox_read(struct kobject *kobj, struct bin_attribute *bin_attr, return -EPERM; } + /* If HBA encountered an error attention, allow only DUMP + * mailbox command until the HBA is restarted. + */ + if ((phba->pport->stopped) && + (phba->sysfs_mbox.mbox->mb.mbxCommand + != MBX_DUMP_MEMORY)) { + sysfs_mbox_idle(phba); + spin_unlock_irq(&phba->hbalock); + return -EPERM; + } + phba->sysfs_mbox.mbox->vport = vport; if (phba->sli.sli_flag & LPFC_BLOCK_MGMT_IO) { @@ -1993,7 +2156,8 @@ lpfc_get_host_speed(struct Scsi_Host *shost) fc_host_speed(shost) = FC_PORTSPEED_UNKNOWN; break; } - } + } else + fc_host_speed(shost) = FC_PORTSPEED_UNKNOWN; spin_unlock_irq(shost->host_lock); } @@ -2013,7 +2177,7 @@ lpfc_get_host_fabric_name (struct Scsi_Host *shost) node_name = wwn_to_u64(phba->fc_fabparam.nodeName.u.wwn); else /* fabric is local port if there is no F/FL_Port */ - node_name = wwn_to_u64(vport->fc_nodename.u.wwn); + node_name = 0; spin_unlock_irq(shost->host_lock); @@ -2337,8 +2501,6 @@ struct fc_function_template lpfc_transport_functions = { .dev_loss_tmo_callbk = lpfc_dev_loss_tmo_callbk, .terminate_rport_io = lpfc_terminate_rport_io, - .vport_create = lpfc_vport_create, - .vport_delete = lpfc_vport_delete, .dd_fcvport_size = sizeof(struct lpfc_vport *), }; @@ -2414,21 +2576,23 @@ lpfc_get_cfgparam(struct lpfc_hba *phba) lpfc_poll_tmo_init(phba, lpfc_poll_tmo); lpfc_enable_npiv_init(phba, lpfc_enable_npiv); lpfc_use_msi_init(phba, lpfc_use_msi); + lpfc_enable_hba_reset_init(phba, lpfc_enable_hba_reset); + lpfc_enable_hba_heartbeat_init(phba, lpfc_enable_hba_heartbeat); phba->cfg_poll = lpfc_poll; phba->cfg_soft_wwnn = 0L; phba->cfg_soft_wwpn = 0L; - /* - * The total number of segments is the configuration value plus 2 - * since the IOCB need a command and response bde. - */ - phba->cfg_sg_seg_cnt = LPFC_SG_SEG_CNT + 2; + lpfc_sg_seg_cnt_init(phba, lpfc_sg_seg_cnt); + /* Also reinitialize the host templates with new values. */ + lpfc_vport_template.sg_tablesize = phba->cfg_sg_seg_cnt; + lpfc_template.sg_tablesize = phba->cfg_sg_seg_cnt; /* * Since the sg_tablesize is module parameter, the sg_dma_buf_size - * used to create the sg_dma_buf_pool must be dynamically calculated + * used to create the sg_dma_buf_pool must be dynamically calculated. + * 2 segments are added since the IOCB needs a command and response bde. */ phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp) + - (phba->cfg_sg_seg_cnt * sizeof(struct ulp_bde64)); + ((phba->cfg_sg_seg_cnt + 2) * sizeof(struct ulp_bde64)); lpfc_hba_queue_depth_init(phba, lpfc_hba_queue_depth); return; } @@ -2448,5 +2612,6 @@ lpfc_get_vport_cfgparam(struct lpfc_vport *vport) lpfc_discovery_threads_init(vport, lpfc_discovery_threads); lpfc_max_luns_init(vport, lpfc_max_luns); lpfc_scan_down_init(vport, lpfc_scan_down); + lpfc_enable_da_id_init(vport, lpfc_enable_da_id); return; } diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h index a599e151071..50fcb7c930b 100644 --- a/drivers/scsi/lpfc/lpfc_crtn.h +++ b/drivers/scsi/lpfc/lpfc_crtn.h @@ -23,6 +23,8 @@ typedef int (*node_filter)(struct lpfc_nodelist *ndlp, void *param); struct fc_rport; void lpfc_dump_mem(struct lpfc_hba *, LPFC_MBOXQ_t *, uint16_t); void lpfc_read_nv(struct lpfc_hba *, LPFC_MBOXQ_t *); +void lpfc_config_async(struct lpfc_hba *, LPFC_MBOXQ_t *, uint32_t); + void lpfc_heart_beat(struct lpfc_hba *, LPFC_MBOXQ_t *); int lpfc_read_la(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb, struct lpfc_dmabuf *mp); @@ -43,9 +45,9 @@ void lpfc_init_link(struct lpfc_hba *, LPFC_MBOXQ_t *, uint32_t, uint32_t); struct lpfc_vport *lpfc_find_vport_by_did(struct lpfc_hba *, uint32_t); void lpfc_cleanup_rpis(struct lpfc_vport *vport, int remove); int lpfc_linkdown(struct lpfc_hba *); +void lpfc_port_link_failure(struct lpfc_vport *); void lpfc_mbx_cmpl_read_la(struct lpfc_hba *, LPFC_MBOXQ_t *); -void lpfc_mbx_cmpl_clear_la(struct lpfc_hba *, LPFC_MBOXQ_t *); void lpfc_mbx_cmpl_reg_login(struct lpfc_hba *, LPFC_MBOXQ_t *); void lpfc_mbx_cmpl_dflt_rpi(struct lpfc_hba *, LPFC_MBOXQ_t *); void lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba *, LPFC_MBOXQ_t *); @@ -66,15 +68,15 @@ int lpfc_check_sli_ndlp(struct lpfc_hba *, struct lpfc_sli_ring *, void lpfc_nlp_init(struct lpfc_vport *, struct lpfc_nodelist *, uint32_t); struct lpfc_nodelist *lpfc_nlp_get(struct lpfc_nodelist *); int lpfc_nlp_put(struct lpfc_nodelist *); +int lpfc_nlp_not_used(struct lpfc_nodelist *ndlp); struct lpfc_nodelist *lpfc_setup_disc_node(struct lpfc_vport *, uint32_t); void lpfc_disc_list_loopmap(struct lpfc_vport *); void lpfc_disc_start(struct lpfc_vport *); -void lpfc_disc_flush_list(struct lpfc_vport *); void lpfc_cleanup_discovery_resources(struct lpfc_vport *); +void lpfc_cleanup(struct lpfc_vport *); void lpfc_disc_timeout(unsigned long); struct lpfc_nodelist *__lpfc_findnode_rpi(struct lpfc_vport *, uint16_t); -struct lpfc_nodelist *lpfc_findnode_rpi(struct lpfc_vport *, uint16_t); void lpfc_worker_wake_up(struct lpfc_hba *); int lpfc_workq_post_event(struct lpfc_hba *, void *, void *, uint32_t); @@ -82,17 +84,17 @@ int lpfc_do_work(void *); int lpfc_disc_state_machine(struct lpfc_vport *, struct lpfc_nodelist *, void *, uint32_t); -void lpfc_register_new_vport(struct lpfc_hba *, struct lpfc_vport *, - struct lpfc_nodelist *); void lpfc_do_scr_ns_plogi(struct lpfc_hba *, struct lpfc_vport *); int lpfc_check_sparm(struct lpfc_vport *, struct lpfc_nodelist *, struct serv_parm *, uint32_t); int lpfc_els_abort(struct lpfc_hba *, struct lpfc_nodelist *); +void lpfc_more_plogi(struct lpfc_vport *); +void lpfc_more_adisc(struct lpfc_vport *); +void lpfc_end_rscn(struct lpfc_vport *); int lpfc_els_chk_latt(struct lpfc_vport *); int lpfc_els_abort_flogi(struct lpfc_hba *); int lpfc_initial_flogi(struct lpfc_vport *); int lpfc_initial_fdisc(struct lpfc_vport *); -int lpfc_issue_els_fdisc(struct lpfc_vport *, struct lpfc_nodelist *, uint8_t); int lpfc_issue_els_plogi(struct lpfc_vport *, uint32_t, uint8_t); int lpfc_issue_els_prli(struct lpfc_vport *, struct lpfc_nodelist *, uint8_t); int lpfc_issue_els_adisc(struct lpfc_vport *, struct lpfc_nodelist *, uint8_t); @@ -112,7 +114,6 @@ int lpfc_els_rsp_prli_acc(struct lpfc_vport *, struct lpfc_iocbq *, void lpfc_cancel_retry_delay_tmo(struct lpfc_vport *, struct lpfc_nodelist *); void lpfc_els_retry_delay(unsigned long); void lpfc_els_retry_delay_handler(struct lpfc_nodelist *); -void lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *); void lpfc_els_unsol_event(struct lpfc_hba *, struct lpfc_sli_ring *, struct lpfc_iocbq *); int lpfc_els_handle_rscn(struct lpfc_vport *); @@ -124,7 +125,6 @@ int lpfc_els_disc_adisc(struct lpfc_vport *); int lpfc_els_disc_plogi(struct lpfc_vport *); void lpfc_els_timeout(unsigned long); void lpfc_els_timeout_handler(struct lpfc_vport *); -void lpfc_hb_timeout(unsigned long); void lpfc_hb_timeout_handler(struct lpfc_hba *); void lpfc_ct_unsol_event(struct lpfc_hba *, struct lpfc_sli_ring *, @@ -142,7 +142,6 @@ void lpfc_hba_init(struct lpfc_hba *, uint32_t *); int lpfc_post_buffer(struct lpfc_hba *, struct lpfc_sli_ring *, int, int); void lpfc_decode_firmware_rev(struct lpfc_hba *, char *, int); int lpfc_online(struct lpfc_hba *); -void lpfc_block_mgmt_io(struct lpfc_hba *); void lpfc_unblock_mgmt_io(struct lpfc_hba *); void lpfc_offline_prep(struct lpfc_hba *); void lpfc_offline(struct lpfc_hba *); @@ -165,7 +164,6 @@ int lpfc_mbox_tmo_val(struct lpfc_hba *, int); void lpfc_config_hbq(struct lpfc_hba *, uint32_t, struct lpfc_hbq_init *, uint32_t , LPFC_MBOXQ_t *); -struct lpfc_hbq_entry * lpfc_sli_next_hbq_slot(struct lpfc_hba *, uint32_t); struct hbq_dmabuf *lpfc_els_hbq_alloc(struct lpfc_hba *); void lpfc_els_hbq_free(struct lpfc_hba *, struct hbq_dmabuf *); @@ -178,7 +176,6 @@ void lpfc_poll_start_timer(struct lpfc_hba * phba); void lpfc_sli_poll_fcp_ring(struct lpfc_hba * hba); struct lpfc_iocbq * lpfc_sli_get_iocbq(struct lpfc_hba *); void lpfc_sli_release_iocbq(struct lpfc_hba * phba, struct lpfc_iocbq * iocb); -void __lpfc_sli_release_iocbq(struct lpfc_hba * phba, struct lpfc_iocbq * iocb); uint16_t lpfc_sli_next_iotag(struct lpfc_hba * phba, struct lpfc_iocbq * iocb); void lpfc_reset_barrier(struct lpfc_hba * phba); @@ -204,11 +201,14 @@ int lpfc_sli_ringpostbuf_put(struct lpfc_hba *, struct lpfc_sli_ring *, struct lpfc_dmabuf *lpfc_sli_ringpostbuf_get(struct lpfc_hba *, struct lpfc_sli_ring *, dma_addr_t); + +uint32_t lpfc_sli_get_buffer_tag(struct lpfc_hba *); +struct lpfc_dmabuf * lpfc_sli_ring_taggedbuf_get(struct lpfc_hba *, + struct lpfc_sli_ring *, uint32_t ); + int lpfc_sli_hbq_count(void); -int lpfc_sli_hbqbuf_init_hbqs(struct lpfc_hba *, uint32_t); int lpfc_sli_hbqbuf_add_hbqs(struct lpfc_hba *, uint32_t); void lpfc_sli_hbqbuf_free_all(struct lpfc_hba *); -struct hbq_dmabuf *lpfc_sli_hbqbuf_find(struct lpfc_hba *, uint32_t); int lpfc_sli_hbq_size(void); int lpfc_sli_issue_abort_iotag(struct lpfc_hba *, struct lpfc_sli_ring *, struct lpfc_iocbq *); @@ -219,9 +219,6 @@ int lpfc_sli_abort_iocb(struct lpfc_vport *, struct lpfc_sli_ring *, uint16_t, void lpfc_mbox_timeout(unsigned long); void lpfc_mbox_timeout_handler(struct lpfc_hba *); -struct lpfc_nodelist *__lpfc_find_node(struct lpfc_vport *, node_filter, - void *); -struct lpfc_nodelist *lpfc_find_node(struct lpfc_vport *, node_filter, void *); struct lpfc_nodelist *lpfc_findnode_did(struct lpfc_vport *, uint32_t); struct lpfc_nodelist *lpfc_findnode_wwpn(struct lpfc_vport *, struct lpfc_name *); @@ -260,6 +257,7 @@ extern struct scsi_host_template lpfc_vport_template; extern struct fc_function_template lpfc_transport_functions; extern struct fc_function_template lpfc_vport_transport_functions; extern int lpfc_sli_mode; +extern int lpfc_enable_npiv; int lpfc_vport_symbolic_node_name(struct lpfc_vport *, char *, size_t); void lpfc_terminate_rport_io(struct fc_rport *); @@ -281,11 +279,8 @@ extern void lpfc_debugfs_slow_ring_trc(struct lpfc_hba *, char *, uint32_t, extern struct lpfc_hbq_init *lpfc_hbq_defs[]; /* Interface exported by fabric iocb scheduler */ -int lpfc_issue_fabric_iocb(struct lpfc_hba *, struct lpfc_iocbq *); -void lpfc_fabric_abort_vport(struct lpfc_vport *); void lpfc_fabric_abort_nport(struct lpfc_nodelist *); void lpfc_fabric_abort_hba(struct lpfc_hba *); -void lpfc_fabric_abort_flogi(struct lpfc_hba *); void lpfc_fabric_block_timeout(unsigned long); void lpfc_unblock_fabric_iocbs(struct lpfc_hba *); void lpfc_adjust_queue_depth(struct lpfc_hba *); diff --git a/drivers/scsi/lpfc/lpfc_ct.c b/drivers/scsi/lpfc/lpfc_ct.c index c701e4d611a..92441ce610e 100644 --- a/drivers/scsi/lpfc/lpfc_ct.c +++ b/drivers/scsi/lpfc/lpfc_ct.c @@ -19,7 +19,7 @@ *******************************************************************/ /* - * Fibre Channel SCSI LAN Device Driver CT support + * Fibre Channel SCSI LAN Device Driver CT support: FC Generic Services FC-GS */ #include <linux/blkdev.h> @@ -57,45 +57,27 @@ static char *lpfc_release_version = LPFC_DRIVER_VERSION; -/* - * lpfc_ct_unsol_event - */ static void -lpfc_ct_unsol_buffer(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq, - struct lpfc_dmabuf *mp, uint32_t size) +lpfc_ct_ignore_hbq_buffer(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq, + struct lpfc_dmabuf *mp, uint32_t size) { if (!mp) { - printk(KERN_ERR "%s (%d): Unsolited CT, no buffer, " - "piocbq = %p, status = x%x, mp = %p, size = %d\n", - __FUNCTION__, __LINE__, - piocbq, piocbq->iocb.ulpStatus, mp, size); + lpfc_printf_log(phba, KERN_INFO, LOG_ELS, + "0146 Ignoring unsolicted CT No HBQ " + "status = x%x\n", + piocbq->iocb.ulpStatus); } - - printk(KERN_ERR "%s (%d): Ignoring unsolicted CT piocbq = %p, " - "buffer = %p, size = %d, status = x%x\n", - __FUNCTION__, __LINE__, - piocbq, mp, size, - piocbq->iocb.ulpStatus); - + lpfc_printf_log(phba, KERN_INFO, LOG_ELS, + "0145 Ignoring unsolicted CT HBQ Size:%d " + "status = x%x\n", + size, piocbq->iocb.ulpStatus); } static void -lpfc_ct_ignore_hbq_buffer(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq, - struct lpfc_dmabuf *mp, uint32_t size) +lpfc_ct_unsol_buffer(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq, + struct lpfc_dmabuf *mp, uint32_t size) { - if (!mp) { - printk(KERN_ERR "%s (%d): Unsolited CT, no " - "HBQ buffer, piocbq = %p, status = x%x\n", - __FUNCTION__, __LINE__, - piocbq, piocbq->iocb.ulpStatus); - } else { - lpfc_ct_unsol_buffer(phba, piocbq, mp, size); - printk(KERN_ERR "%s (%d): Ignoring unsolicted CT " - "piocbq = %p, buffer = %p, size = %d, " - "status = x%x\n", - __FUNCTION__, __LINE__, - piocbq, mp, size, piocbq->iocb.ulpStatus); - } + lpfc_ct_ignore_hbq_buffer(phba, piocbq, mp, size); } void @@ -109,11 +91,8 @@ lpfc_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, struct lpfc_iocbq *iocbq; dma_addr_t paddr; uint32_t size; - struct lpfc_dmabuf *bdeBuf1 = piocbq->context2; - struct lpfc_dmabuf *bdeBuf2 = piocbq->context3; - - piocbq->context2 = NULL; - piocbq->context3 = NULL; + struct list_head head; + struct lpfc_dmabuf *bdeBuf; if (unlikely(icmd->ulpStatus == IOSTAT_NEED_BUFFER)) { lpfc_sli_hbqbuf_add_hbqs(phba, LPFC_ELS_HBQ); @@ -122,7 +101,7 @@ lpfc_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, /* Not enough posted buffers; Try posting more buffers */ phba->fc_stat.NoRcvBuf++; if (!(phba->sli3_options & LPFC_SLI3_HBQ_ENABLED)) - lpfc_post_buffer(phba, pring, 0, 1); + lpfc_post_buffer(phba, pring, 2, 1); return; } @@ -133,38 +112,34 @@ lpfc_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, return; if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) { - list_for_each_entry(iocbq, &piocbq->list, list) { + INIT_LIST_HEAD(&head); + list_add_tail(&head, &piocbq->list); + list_for_each_entry(iocbq, &head, list) { icmd = &iocbq->iocb; - if (icmd->ulpBdeCount == 0) { - printk(KERN_ERR "%s (%d): Unsolited CT, no " - "BDE, iocbq = %p, status = x%x\n", - __FUNCTION__, __LINE__, - iocbq, iocbq->iocb.ulpStatus); + if (icmd->ulpBdeCount == 0) continue; - } - + bdeBuf = iocbq->context2; + iocbq->context2 = NULL; size = icmd->un.cont64[0].tus.f.bdeSize; - lpfc_ct_ignore_hbq_buffer(phba, piocbq, bdeBuf1, size); - lpfc_in_buf_free(phba, bdeBuf1); + lpfc_ct_unsol_buffer(phba, piocbq, bdeBuf, size); + lpfc_in_buf_free(phba, bdeBuf); if (icmd->ulpBdeCount == 2) { - lpfc_ct_ignore_hbq_buffer(phba, piocbq, bdeBuf2, - size); - lpfc_in_buf_free(phba, bdeBuf2); + bdeBuf = iocbq->context3; + iocbq->context3 = NULL; + size = icmd->unsli3.rcvsli3.bde2.tus.f.bdeSize; + lpfc_ct_unsol_buffer(phba, piocbq, bdeBuf, + size); + lpfc_in_buf_free(phba, bdeBuf); } } + list_del(&head); } else { struct lpfc_iocbq *next; list_for_each_entry_safe(iocbq, next, &piocbq->list, list) { icmd = &iocbq->iocb; - if (icmd->ulpBdeCount == 0) { - printk(KERN_ERR "%s (%d): Unsolited CT, no " - "BDE, iocbq = %p, status = x%x\n", - __FUNCTION__, __LINE__, - iocbq, iocbq->iocb.ulpStatus); - continue; - } - + if (icmd->ulpBdeCount == 0) + lpfc_ct_unsol_buffer(phba, piocbq, NULL, 0); for (i = 0; i < icmd->ulpBdeCount; i++) { paddr = getPaddr(icmd->un.cont64[i].addrHigh, icmd->un.cont64[i].addrLow); @@ -176,6 +151,7 @@ lpfc_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, } list_del(&iocbq->list); lpfc_sli_release_iocbq(phba, iocbq); + lpfc_post_buffer(phba, pring, i, 1); } } } @@ -203,7 +179,7 @@ lpfc_alloc_ct_rsp(struct lpfc_hba *phba, int cmdcode, struct ulp_bde64 *bpl, struct lpfc_dmabuf *mp; int cnt, i = 0; - /* We get chucks of FCELSSIZE */ + /* We get chunks of FCELSSIZE */ cnt = size > FCELSSIZE ? FCELSSIZE: size; while (size) { @@ -426,6 +402,7 @@ lpfc_ns_rsp(struct lpfc_vport *vport, struct lpfc_dmabuf *mp, uint32_t Size) lpfc_set_disctmo(vport); vport->num_disc_nodes = 0; + vport->fc_ns_retry = 0; list_add_tail(&head, &mp->list); @@ -458,7 +435,7 @@ lpfc_ns_rsp(struct lpfc_vport *vport, struct lpfc_dmabuf *mp, uint32_t Size) ((lpfc_find_vport_by_did(phba, Did) == NULL) || vport->cfg_peer_port_login)) { if ((vport->port_type != LPFC_NPIV_PORT) || - (vport->fc_flag & FC_RFF_NOT_SUPPORTED) || + (!(vport->ct_flags & FC_CT_RFF_ID)) || (!vport->cfg_restrict_login)) { ndlp = lpfc_setup_disc_node(vport, Did); if (ndlp) { @@ -506,7 +483,17 @@ lpfc_ns_rsp(struct lpfc_vport *vport, struct lpfc_dmabuf *mp, uint32_t Size) Did, vport->fc_flag, vport->fc_rscn_id_cnt); - if (lpfc_ns_cmd(vport, + /* This NPortID was previously + * a FCP target, * Don't even + * bother to send GFF_ID. + */ + ndlp = lpfc_findnode_did(vport, + Did); + if (ndlp && (ndlp->nlp_type & + NLP_FCP_TARGET)) + lpfc_setup_disc_node + (vport, Did); + else if (lpfc_ns_cmd(vport, SLI_CTNS_GFF_ID, 0, Did) == 0) vport->num_disc_nodes++; @@ -554,7 +541,7 @@ lpfc_cmpl_ct_cmd_gid_ft(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, struct lpfc_dmabuf *outp; struct lpfc_sli_ct_request *CTrsp; struct lpfc_nodelist *ndlp; - int rc; + int rc, retry; /* First save ndlp, before we overwrite it */ ndlp = cmdiocb->context_un.ndlp; @@ -574,7 +561,6 @@ lpfc_cmpl_ct_cmd_gid_ft(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, if (vport->load_flag & FC_UNLOADING) goto out; - if (lpfc_els_chk_latt(vport) || lpfc_error_lost_link(irsp)) { lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, "0216 Link event during NS query\n"); @@ -585,14 +571,35 @@ lpfc_cmpl_ct_cmd_gid_ft(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, if (irsp->ulpStatus) { /* Check for retry */ if (vport->fc_ns_retry < LPFC_MAX_NS_RETRY) { - if ((irsp->ulpStatus != IOSTAT_LOCAL_REJECT) || - (irsp->un.ulpWord[4] != IOERR_NO_RESOURCES)) + retry = 1; + if (irsp->ulpStatus == IOSTAT_LOCAL_REJECT) { + switch (irsp->un.ulpWord[4]) { + case IOERR_NO_RESOURCES: + /* We don't increment the retry + * count for this case. + */ + break; + case IOERR_LINK_DOWN: + case IOERR_SLI_ABORTED: + case IOERR_SLI_DOWN: + retry = 0; + break; + default: + vport->fc_ns_retry++; + } + } + else vport->fc_ns_retry++; - /* CT command is being retried */ - rc = lpfc_ns_cmd(vport, SLI_CTNS_GID_FT, + + if (retry) { + /* CT command is being retried */ + rc = lpfc_ns_cmd(vport, SLI_CTNS_GID_FT, vport->fc_ns_retry, 0); - if (rc == 0) - goto out; + if (rc == 0) { + /* success */ + goto out; + } + } } lpfc_vport_set_state(vport, FC_VPORT_FAILED); lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, @@ -698,7 +705,7 @@ lpfc_cmpl_ct_cmd_gff_id(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, struct lpfc_dmabuf *inp = (struct lpfc_dmabuf *) cmdiocb->context1; struct lpfc_dmabuf *outp = (struct lpfc_dmabuf *) cmdiocb->context2; struct lpfc_sli_ct_request *CTrsp; - int did; + int did, rc, retry; uint8_t fbits; struct lpfc_nodelist *ndlp; @@ -729,6 +736,39 @@ lpfc_cmpl_ct_cmd_gff_id(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, } } else { + /* Check for retry */ + if (cmdiocb->retry < LPFC_MAX_NS_RETRY) { + retry = 1; + if (irsp->ulpStatus == IOSTAT_LOCAL_REJECT) { + switch (irsp->un.ulpWord[4]) { + case IOERR_NO_RESOURCES: + /* We don't increment the retry + * count for this case. + */ + break; + case IOERR_LINK_DOWN: + case IOERR_SLI_ABORTED: + case IOERR_SLI_DOWN: + retry = 0; + break; + default: + cmdiocb->retry++; + } + } + else + cmdiocb->retry++; + + if (retry) { + /* CT command is being retried */ + rc = lpfc_ns_cmd(vport, SLI_CTNS_GFF_ID, + cmdiocb->retry, did); + if (rc == 0) { + /* success */ + lpfc_ct_free_iocb(phba, cmdiocb); + return; + } + } + } lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY, "0267 NameServer GFF Rsp " "x%x Error (%d %d) Data: x%x x%x\n", @@ -778,8 +818,8 @@ out: static void -lpfc_cmpl_ct_cmd_rft_id(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, - struct lpfc_iocbq *rspiocb) +lpfc_cmpl_ct(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, + struct lpfc_iocbq *rspiocb) { struct lpfc_vport *vport = cmdiocb->vport; struct lpfc_dmabuf *inp; @@ -809,7 +849,7 @@ lpfc_cmpl_ct_cmd_rft_id(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, /* RFT request completes status <ulpStatus> CmdRsp <CmdRsp> */ lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, - "0209 RFT request completes, latt %d, " + "0209 CT Request completes, latt %d, " "ulpStatus x%x CmdRsp x%x, Context x%x, Tag x%x\n", latt, irsp->ulpStatus, CTrsp->CommandResponse.bits.CmdRsp, @@ -848,10 +888,44 @@ out: } static void +lpfc_cmpl_ct_cmd_rft_id(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, + struct lpfc_iocbq *rspiocb) +{ + IOCB_t *irsp = &rspiocb->iocb; + struct lpfc_vport *vport = cmdiocb->vport; + + if (irsp->ulpStatus == IOSTAT_SUCCESS) { + struct lpfc_dmabuf *outp; + struct lpfc_sli_ct_request *CTrsp; + + outp = (struct lpfc_dmabuf *) cmdiocb->context2; + CTrsp = (struct lpfc_sli_ct_request *) outp->virt; + if (CTrsp->CommandResponse.bits.CmdRsp == + be16_to_cpu(SLI_CT_RESPONSE_FS_ACC)) + vport->ct_flags |= FC_CT_RFT_ID; + } + lpfc_cmpl_ct(phba, cmdiocb, rspiocb); + return; +} + +static void lpfc_cmpl_ct_cmd_rnn_id(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, struct lpfc_iocbq *rspiocb) { - lpfc_cmpl_ct_cmd_rft_id(phba, cmdiocb, rspiocb); + IOCB_t *irsp = &rspiocb->iocb; + struct lpfc_vport *vport = cmdiocb->vport; + + if (irsp->ulpStatus == IOSTAT_SUCCESS) { + struct lpfc_dmabuf *outp; + struct lpfc_sli_ct_request *CTrsp; + + outp = (struct lpfc_dmabuf *) cmdiocb->context2; + CTrsp = (struct lpfc_sli_ct_request *) outp->virt; + if (CTrsp->CommandResponse.bits.CmdRsp == + be16_to_cpu(SLI_CT_RESPONSE_FS_ACC)) + vport->ct_flags |= FC_CT_RNN_ID; + } + lpfc_cmpl_ct(phba, cmdiocb, rspiocb); return; } @@ -859,7 +933,20 @@ static void lpfc_cmpl_ct_cmd_rspn_id(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, struct lpfc_iocbq *rspiocb) { - lpfc_cmpl_ct_cmd_rft_id(phba, cmdiocb, rspiocb); + IOCB_t *irsp = &rspiocb->iocb; + struct lpfc_vport *vport = cmdiocb->vport; + + if (irsp->ulpStatus == IOSTAT_SUCCESS) { + struct lpfc_dmabuf *outp; + struct lpfc_sli_ct_request *CTrsp; + + outp = (struct lpfc_dmabuf *) cmdiocb->context2; + CTrsp = (struct lpfc_sli_ct_request *) outp->virt; + if (CTrsp->CommandResponse.bits.CmdRsp == + be16_to_cpu(SLI_CT_RESPONSE_FS_ACC)) + vport->ct_flags |= FC_CT_RSPN_ID; + } + lpfc_cmpl_ct(phba, cmdiocb, rspiocb); return; } @@ -867,7 +954,32 @@ static void lpfc_cmpl_ct_cmd_rsnn_nn(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, struct lpfc_iocbq *rspiocb) { - lpfc_cmpl_ct_cmd_rft_id(phba, cmdiocb, rspiocb); + IOCB_t *irsp = &rspiocb->iocb; + struct lpfc_vport *vport = cmdiocb->vport; + + if (irsp->ulpStatus == IOSTAT_SUCCESS) { + struct lpfc_dmabuf *outp; + struct lpfc_sli_ct_request *CTrsp; + + outp = (struct lpfc_dmabuf *) cmdiocb->context2; + CTrsp = (struct lpfc_sli_ct_request *) outp->virt; + if (CTrsp->CommandResponse.bits.CmdRsp == + be16_to_cpu(SLI_CT_RESPONSE_FS_ACC)) + vport->ct_flags |= FC_CT_RSNN_NN; + } + lpfc_cmpl_ct(phba, cmdiocb, rspiocb); + return; +} + +static void +lpfc_cmpl_ct_cmd_da_id(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, + struct lpfc_iocbq *rspiocb) +{ + struct lpfc_vport *vport = cmdiocb->vport; + + /* even if it fails we will act as though it succeeded. */ + vport->ct_flags = 0; + lpfc_cmpl_ct(phba, cmdiocb, rspiocb); return; } @@ -878,10 +990,17 @@ lpfc_cmpl_ct_cmd_rff_id(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, IOCB_t *irsp = &rspiocb->iocb; struct lpfc_vport *vport = cmdiocb->vport; - if (irsp->ulpStatus != IOSTAT_SUCCESS) - vport->fc_flag |= FC_RFF_NOT_SUPPORTED; + if (irsp->ulpStatus == IOSTAT_SUCCESS) { + struct lpfc_dmabuf *outp; + struct lpfc_sli_ct_request *CTrsp; - lpfc_cmpl_ct_cmd_rft_id(phba, cmdiocb, rspiocb); + outp = (struct lpfc_dmabuf *) cmdiocb->context2; + CTrsp = (struct lpfc_sli_ct_request *) outp->virt; + if (CTrsp->CommandResponse.bits.CmdRsp == + be16_to_cpu(SLI_CT_RESPONSE_FS_ACC)) + vport->ct_flags |= FC_CT_RFF_ID; + } + lpfc_cmpl_ct(phba, cmdiocb, rspiocb); return; } @@ -1001,6 +1120,8 @@ lpfc_ns_cmd(struct lpfc_vport *vport, int cmdcode, bpl->tus.f.bdeSize = RSPN_REQUEST_SZ; else if (cmdcode == SLI_CTNS_RSNN_NN) bpl->tus.f.bdeSize = RSNN_REQUEST_SZ; + else if (cmdcode == SLI_CTNS_DA_ID) + bpl->tus.f.bdeSize = DA_ID_REQUEST_SZ; else if (cmdcode == SLI_CTNS_RFF_ID) bpl->tus.f.bdeSize = RFF_REQUEST_SZ; else @@ -1029,31 +1150,34 @@ lpfc_ns_cmd(struct lpfc_vport *vport, int cmdcode, case SLI_CTNS_GFF_ID: CtReq->CommandResponse.bits.CmdRsp = be16_to_cpu(SLI_CTNS_GFF_ID); - CtReq->un.gff.PortId = be32_to_cpu(context); + CtReq->un.gff.PortId = cpu_to_be32(context); cmpl = lpfc_cmpl_ct_cmd_gff_id; break; case SLI_CTNS_RFT_ID: + vport->ct_flags &= ~FC_CT_RFT_ID; CtReq->CommandResponse.bits.CmdRsp = be16_to_cpu(SLI_CTNS_RFT_ID); - CtReq->un.rft.PortId = be32_to_cpu(vport->fc_myDID); + CtReq->un.rft.PortId = cpu_to_be32(vport->fc_myDID); CtReq->un.rft.fcpReg = 1; cmpl = lpfc_cmpl_ct_cmd_rft_id; break; case SLI_CTNS_RNN_ID: + vport->ct_flags &= ~FC_CT_RNN_ID; CtReq->CommandResponse.bits.CmdRsp = be16_to_cpu(SLI_CTNS_RNN_ID); - CtReq->un.rnn.PortId = be32_to_cpu(vport->fc_myDID); + CtReq->un.rnn.PortId = cpu_to_be32(vport->fc_myDID); memcpy(CtReq->un.rnn.wwnn, &vport->fc_nodename, sizeof (struct lpfc_name)); cmpl = lpfc_cmpl_ct_cmd_rnn_id; break; case SLI_CTNS_RSPN_ID: + vport->ct_flags &= ~FC_CT_RSPN_ID; CtReq->CommandResponse.bits.CmdRsp = be16_to_cpu(SLI_CTNS_RSPN_ID); - CtReq->un.rspn.PortId = be32_to_cpu(vport->fc_myDID); + CtReq->un.rspn.PortId = cpu_to_be32(vport->fc_myDID); size = sizeof(CtReq->un.rspn.symbname); CtReq->un.rspn.len = lpfc_vport_symbolic_port_name(vport, @@ -1061,6 +1185,7 @@ lpfc_ns_cmd(struct lpfc_vport *vport, int cmdcode, cmpl = lpfc_cmpl_ct_cmd_rspn_id; break; case SLI_CTNS_RSNN_NN: + vport->ct_flags &= ~FC_CT_RSNN_NN; CtReq->CommandResponse.bits.CmdRsp = be16_to_cpu(SLI_CTNS_RSNN_NN); memcpy(CtReq->un.rsnn.wwnn, &vport->fc_nodename, @@ -1071,11 +1196,18 @@ lpfc_ns_cmd(struct lpfc_vport *vport, int cmdcode, CtReq->un.rsnn.symbname, size); cmpl = lpfc_cmpl_ct_cmd_rsnn_nn; break; + case SLI_CTNS_DA_ID: + /* Implement DA_ID Nameserver request */ + CtReq->CommandResponse.bits.CmdRsp = + be16_to_cpu(SLI_CTNS_DA_ID); + CtReq->un.da_id.port_id = cpu_to_be32(vport->fc_myDID); + cmpl = lpfc_cmpl_ct_cmd_da_id; + break; case SLI_CTNS_RFF_ID: - vport->fc_flag &= ~FC_RFF_NOT_SUPPORTED; + vport->ct_flags &= ~FC_CT_RFF_ID; CtReq->CommandResponse.bits.CmdRsp = be16_to_cpu(SLI_CTNS_RFF_ID); - CtReq->un.rff.PortId = be32_to_cpu(vport->fc_myDID);; + CtReq->un.rff.PortId = cpu_to_be32(vport->fc_myDID);; CtReq->un.rff.fbits = FC4_FEATURE_INIT; CtReq->un.rff.type_code = FC_FCP_DATA; cmpl = lpfc_cmpl_ct_cmd_rff_id; diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c index d6a98bc970f..783d1eea13e 100644 --- a/drivers/scsi/lpfc/lpfc_debugfs.c +++ b/drivers/scsi/lpfc/lpfc_debugfs.c @@ -43,6 +43,7 @@ #include "lpfc_crtn.h" #include "lpfc_vport.h" #include "lpfc_version.h" +#include "lpfc_compat.h" #include "lpfc_debugfs.h" #ifdef CONFIG_LPFC_DEBUG_FS @@ -75,18 +76,18 @@ module_param(lpfc_debugfs_enable, int, 0); MODULE_PARM_DESC(lpfc_debugfs_enable, "Enable debugfs services"); /* This MUST be a power of 2 */ -static int lpfc_debugfs_max_disc_trc = 0; +static int lpfc_debugfs_max_disc_trc; module_param(lpfc_debugfs_max_disc_trc, int, 0); MODULE_PARM_DESC(lpfc_debugfs_max_disc_trc, "Set debugfs discovery trace depth"); /* This MUST be a power of 2 */ -static int lpfc_debugfs_max_slow_ring_trc = 0; +static int lpfc_debugfs_max_slow_ring_trc; module_param(lpfc_debugfs_max_slow_ring_trc, int, 0); MODULE_PARM_DESC(lpfc_debugfs_max_slow_ring_trc, "Set debugfs slow ring trace depth"); -static int lpfc_debugfs_mask_disc_trc = 0; +int lpfc_debugfs_mask_disc_trc; module_param(lpfc_debugfs_mask_disc_trc, int, 0); MODULE_PARM_DESC(lpfc_debugfs_mask_disc_trc, "Set debugfs discovery trace mask"); @@ -100,8 +101,11 @@ MODULE_PARM_DESC(lpfc_debugfs_mask_disc_trc, #define LPFC_NODELIST_SIZE 8192 #define LPFC_NODELIST_ENTRY_SIZE 120 -/* dumpslim output buffer size */ -#define LPFC_DUMPSLIM_SIZE 4096 +/* dumpHBASlim output buffer size */ +#define LPFC_DUMPHBASLIM_SIZE 4096 + +/* dumpHostSlim output buffer size */ +#define LPFC_DUMPHOSTSLIM_SIZE 4096 /* hbqinfo output buffer size */ #define LPFC_HBQINFO_SIZE 8192 @@ -243,16 +247,17 @@ lpfc_debugfs_hbqinfo_data(struct lpfc_hba *phba, char *buf, int size) raw_index = phba->hbq_get[i]; getidx = le32_to_cpu(raw_index); len += snprintf(buf+len, size-len, - "entrys:%d Put:%d nPut:%d localGet:%d hbaGet:%d\n", - hbqs->entry_count, hbqs->hbqPutIdx, hbqs->next_hbqPutIdx, - hbqs->local_hbqGetIdx, getidx); + "entrys:%d bufcnt:%d Put:%d nPut:%d localGet:%d hbaGet:%d\n", + hbqs->entry_count, hbqs->buffer_count, hbqs->hbqPutIdx, + hbqs->next_hbqPutIdx, hbqs->local_hbqGetIdx, getidx); hbqe = (struct lpfc_hbq_entry *) phba->hbqs[i].hbq_virt; for (j=0; j<hbqs->entry_count; j++) { len += snprintf(buf+len, size-len, "%03d: %08x %04x %05x ", j, - hbqe->bde.addrLow, hbqe->bde.tus.w, hbqe->buffer_tag); - + le32_to_cpu(hbqe->bde.addrLow), + le32_to_cpu(hbqe->bde.tus.w), + le32_to_cpu(hbqe->buffer_tag)); i = 0; found = 0; @@ -276,7 +281,7 @@ lpfc_debugfs_hbqinfo_data(struct lpfc_hba *phba, char *buf, int size) list_for_each_entry(d_buf, &hbqs->hbq_buffer_list, list) { hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf); phys = ((uint64_t)hbq_buf->dbuf.phys & 0xffffffff); - if (phys == hbqe->bde.addrLow) { + if (phys == le32_to_cpu(hbqe->bde.addrLow)) { len += snprintf(buf+len, size-len, "Buf%d: %p %06x\n", i, hbq_buf->dbuf.virt, hbq_buf->tag); @@ -297,18 +302,58 @@ skipit: return len; } +static int lpfc_debugfs_last_hba_slim_off; + +static int +lpfc_debugfs_dumpHBASlim_data(struct lpfc_hba *phba, char *buf, int size) +{ + int len = 0; + int i, off; + uint32_t *ptr; + char buffer[1024]; + + off = 0; + spin_lock_irq(&phba->hbalock); + + len += snprintf(buf+len, size-len, "HBA SLIM\n"); + lpfc_memcpy_from_slim(buffer, + ((uint8_t *)phba->MBslimaddr) + lpfc_debugfs_last_hba_slim_off, + 1024); + + ptr = (uint32_t *)&buffer[0]; + off = lpfc_debugfs_last_hba_slim_off; + + /* Set it up for the next time */ + lpfc_debugfs_last_hba_slim_off += 1024; + if (lpfc_debugfs_last_hba_slim_off >= 4096) + lpfc_debugfs_last_hba_slim_off = 0; + + i = 1024; + while (i > 0) { + len += snprintf(buf+len, size-len, + "%08x: %08x %08x %08x %08x %08x %08x %08x %08x\n", + off, *ptr, *(ptr+1), *(ptr+2), *(ptr+3), *(ptr+4), + *(ptr+5), *(ptr+6), *(ptr+7)); + ptr += 8; + i -= (8 * sizeof(uint32_t)); + off += (8 * sizeof(uint32_t)); + } + + spin_unlock_irq(&phba->hbalock); + return len; +} + static int -lpfc_debugfs_dumpslim_data(struct lpfc_hba *phba, char *buf, int size) +lpfc_debugfs_dumpHostSlim_data(struct lpfc_hba *phba, char *buf, int size) { int len = 0; - int cnt, i, off; + int i, off; uint32_t word0, word1, word2, word3; uint32_t *ptr; struct lpfc_pgp *pgpp; struct lpfc_sli *psli = &phba->sli; struct lpfc_sli_ring *pring; - cnt = LPFC_DUMPSLIM_SIZE; off = 0; spin_lock_irq(&phba->hbalock); @@ -620,7 +665,34 @@ out: } static int -lpfc_debugfs_dumpslim_open(struct inode *inode, struct file *file) +lpfc_debugfs_dumpHBASlim_open(struct inode *inode, struct file *file) +{ + struct lpfc_hba *phba = inode->i_private; + struct lpfc_debug *debug; + int rc = -ENOMEM; + + debug = kmalloc(sizeof(*debug), GFP_KERNEL); + if (!debug) + goto out; + + /* Round to page boundry */ + debug->buffer = kmalloc(LPFC_DUMPHBASLIM_SIZE, GFP_KERNEL); + if (!debug->buffer) { + kfree(debug); + goto out; + } + + debug->len = lpfc_debugfs_dumpHBASlim_data(phba, debug->buffer, + LPFC_DUMPHBASLIM_SIZE); + file->private_data = debug; + + rc = 0; +out: + return rc; +} + +static int +lpfc_debugfs_dumpHostSlim_open(struct inode *inode, struct file *file) { struct lpfc_hba *phba = inode->i_private; struct lpfc_debug *debug; @@ -631,14 +703,14 @@ lpfc_debugfs_dumpslim_open(struct inode *inode, struct file *file) goto out; /* Round to page boundry */ - debug->buffer = kmalloc(LPFC_DUMPSLIM_SIZE, GFP_KERNEL); + debug->buffer = kmalloc(LPFC_DUMPHOSTSLIM_SIZE, GFP_KERNEL); if (!debug->buffer) { kfree(debug); goto out; } - debug->len = lpfc_debugfs_dumpslim_data(phba, debug->buffer, - LPFC_DUMPSLIM_SIZE); + debug->len = lpfc_debugfs_dumpHostSlim_data(phba, debug->buffer, + LPFC_DUMPHOSTSLIM_SIZE); file->private_data = debug; rc = 0; @@ -741,10 +813,19 @@ static struct file_operations lpfc_debugfs_op_hbqinfo = { .release = lpfc_debugfs_release, }; -#undef lpfc_debugfs_op_dumpslim -static struct file_operations lpfc_debugfs_op_dumpslim = { +#undef lpfc_debugfs_op_dumpHBASlim +static struct file_operations lpfc_debugfs_op_dumpHBASlim = { + .owner = THIS_MODULE, + .open = lpfc_debugfs_dumpHBASlim_open, + .llseek = lpfc_debugfs_lseek, + .read = lpfc_debugfs_read, + .release = lpfc_debugfs_release, +}; + +#undef lpfc_debugfs_op_dumpHostSlim +static struct file_operations lpfc_debugfs_op_dumpHostSlim = { .owner = THIS_MODULE, - .open = lpfc_debugfs_dumpslim_open, + .open = lpfc_debugfs_dumpHostSlim_open, .llseek = lpfc_debugfs_lseek, .read = lpfc_debugfs_read, .release = lpfc_debugfs_release, @@ -812,15 +893,27 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport) goto debug_failed; } - /* Setup dumpslim */ - snprintf(name, sizeof(name), "dumpslim"); - phba->debug_dumpslim = + /* Setup dumpHBASlim */ + snprintf(name, sizeof(name), "dumpHBASlim"); + phba->debug_dumpHBASlim = + debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR, + phba->hba_debugfs_root, + phba, &lpfc_debugfs_op_dumpHBASlim); + if (!phba->debug_dumpHBASlim) { + lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, + "0409 Cannot create debugfs dumpHBASlim\n"); + goto debug_failed; + } + + /* Setup dumpHostSlim */ + snprintf(name, sizeof(name), "dumpHostSlim"); + phba->debug_dumpHostSlim = debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR, phba->hba_debugfs_root, - phba, &lpfc_debugfs_op_dumpslim); - if (!phba->debug_dumpslim) { + phba, &lpfc_debugfs_op_dumpHostSlim); + if (!phba->debug_dumpHostSlim) { lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, - "0409 Cannot create debugfs dumpslim\n"); + "0409 Cannot create debugfs dumpHostSlim\n"); goto debug_failed; } @@ -970,9 +1063,13 @@ lpfc_debugfs_terminate(struct lpfc_vport *vport) debugfs_remove(phba->debug_hbqinfo); /* hbqinfo */ phba->debug_hbqinfo = NULL; } - if (phba->debug_dumpslim) { - debugfs_remove(phba->debug_dumpslim); /* dumpslim */ - phba->debug_dumpslim = NULL; + if (phba->debug_dumpHBASlim) { + debugfs_remove(phba->debug_dumpHBASlim); /* HBASlim */ + phba->debug_dumpHBASlim = NULL; + } + if (phba->debug_dumpHostSlim) { + debugfs_remove(phba->debug_dumpHostSlim); /* HostSlim */ + phba->debug_dumpHostSlim = NULL; } if (phba->slow_ring_trc) { kfree(phba->slow_ring_trc); diff --git a/drivers/scsi/lpfc/lpfc_disc.h b/drivers/scsi/lpfc/lpfc_disc.h index aacac9ac538..cfe81c50529 100644 --- a/drivers/scsi/lpfc/lpfc_disc.h +++ b/drivers/scsi/lpfc/lpfc_disc.h @@ -36,7 +36,6 @@ enum lpfc_work_type { LPFC_EVT_WARM_START, LPFC_EVT_KILL, LPFC_EVT_ELS_RETRY, - LPFC_EVT_DEV_LOSS_DELAY, LPFC_EVT_DEV_LOSS, }; @@ -92,6 +91,7 @@ struct lpfc_nodelist { #define NLP_LOGO_SND 0x100 /* sent LOGO request for this entry */ #define NLP_RNID_SND 0x400 /* sent RNID request for this entry */ #define NLP_ELS_SND_MASK 0x7e0 /* sent ELS request for this entry */ +#define NLP_DEFER_RM 0x10000 /* Remove this ndlp if no longer used */ #define NLP_DELAY_TMO 0x20000 /* delay timeout is running for node */ #define NLP_NPR_2B_DISC 0x40000 /* node is included in num_disc_nodes */ #define NLP_RCV_PLOGI 0x80000 /* Rcv'ed PLOGI from remote system */ diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c index 8085900635d..c6b739dc6bc 100644 --- a/drivers/scsi/lpfc/lpfc_els.c +++ b/drivers/scsi/lpfc/lpfc_els.c @@ -18,7 +18,7 @@ * more details, a copy of which can be found in the file COPYING * * included with this package. * *******************************************************************/ - +/* See Fibre Channel protocol T11 FC-LS for details */ #include <linux/blkdev.h> #include <linux/pci.h> #include <linux/interrupt.h> @@ -42,6 +42,14 @@ static int lpfc_els_retry(struct lpfc_hba *, struct lpfc_iocbq *, struct lpfc_iocbq *); static void lpfc_cmpl_fabric_iocb(struct lpfc_hba *, struct lpfc_iocbq *, struct lpfc_iocbq *); +static void lpfc_fabric_abort_vport(struct lpfc_vport *vport); +static int lpfc_issue_els_fdisc(struct lpfc_vport *vport, + struct lpfc_nodelist *ndlp, uint8_t retry); +static int lpfc_issue_fabric_iocb(struct lpfc_hba *phba, + struct lpfc_iocbq *iocb); +static void lpfc_register_new_vport(struct lpfc_hba *phba, + struct lpfc_vport *vport, + struct lpfc_nodelist *ndlp); static int lpfc_max_els_tries = 3; @@ -109,14 +117,11 @@ lpfc_prep_els_iocb(struct lpfc_vport *vport, uint8_t expectRsp, /* fill in BDEs for command */ /* Allocate buffer for command payload */ - if (((pcmd = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL)) == 0) || - ((pcmd->virt = lpfc_mbuf_alloc(phba, - MEM_PRI, &(pcmd->phys))) == 0)) { - kfree(pcmd); - - lpfc_sli_release_iocbq(phba, elsiocb); - return NULL; - } + pcmd = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); + if (pcmd) + pcmd->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &pcmd->phys); + if (!pcmd || !pcmd->virt) + goto els_iocb_free_pcmb_exit; INIT_LIST_HEAD(&pcmd->list); @@ -126,13 +131,8 @@ lpfc_prep_els_iocb(struct lpfc_vport *vport, uint8_t expectRsp, if (prsp) prsp->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &prsp->phys); - if (prsp == 0 || prsp->virt == 0) { - kfree(prsp); - lpfc_mbuf_free(phba, pcmd->virt, pcmd->phys); - kfree(pcmd); - lpfc_sli_release_iocbq(phba, elsiocb); - return NULL; - } + if (!prsp || !prsp->virt) + goto els_iocb_free_prsp_exit; INIT_LIST_HEAD(&prsp->list); } else { prsp = NULL; @@ -143,15 +143,8 @@ lpfc_prep_els_iocb(struct lpfc_vport *vport, uint8_t expectRsp, if (pbuflist) pbuflist->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &pbuflist->phys); - if (pbuflist == 0 || pbuflist->virt == 0) { - lpfc_sli_release_iocbq(phba, elsiocb); - lpfc_mbuf_free(phba, pcmd->virt, pcmd->phys); - lpfc_mbuf_free(phba, prsp->virt, prsp->phys); - kfree(pcmd); - kfree(prsp); - kfree(pbuflist); - return NULL; - } + if (!pbuflist || !pbuflist->virt) + goto els_iocb_free_pbuf_exit; INIT_LIST_HEAD(&pbuflist->list); @@ -196,7 +189,10 @@ lpfc_prep_els_iocb(struct lpfc_vport *vport, uint8_t expectRsp, bpl->tus.w = le32_to_cpu(bpl->tus.w); } + /* prevent preparing iocb with NULL ndlp reference */ elsiocb->context1 = lpfc_nlp_get(ndlp); + if (!elsiocb->context1) + goto els_iocb_free_pbuf_exit; elsiocb->context2 = pcmd; elsiocb->context3 = pbuflist; elsiocb->retry = retry; @@ -222,8 +218,20 @@ lpfc_prep_els_iocb(struct lpfc_vport *vport, uint8_t expectRsp, cmdSize); } return elsiocb; -} +els_iocb_free_pbuf_exit: + lpfc_mbuf_free(phba, prsp->virt, prsp->phys); + kfree(pbuflist); + +els_iocb_free_prsp_exit: + lpfc_mbuf_free(phba, pcmd->virt, pcmd->phys); + kfree(prsp); + +els_iocb_free_pcmb_exit: + kfree(pcmd); + lpfc_sli_release_iocbq(phba, elsiocb); + return NULL; +} static int lpfc_issue_fabric_reglogin(struct lpfc_vport *vport) @@ -234,40 +242,53 @@ lpfc_issue_fabric_reglogin(struct lpfc_vport *vport) struct lpfc_nodelist *ndlp; struct serv_parm *sp; int rc; + int err = 0; sp = &phba->fc_fabparam; ndlp = lpfc_findnode_did(vport, Fabric_DID); - if (!ndlp) + if (!ndlp) { + err = 1; goto fail; + } mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); - if (!mbox) + if (!mbox) { + err = 2; goto fail; + } vport->port_state = LPFC_FABRIC_CFG_LINK; lpfc_config_link(phba, mbox); mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; mbox->vport = vport; - rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT | MBX_STOP_IOCB); - if (rc == MBX_NOT_FINISHED) + rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); + if (rc == MBX_NOT_FINISHED) { + err = 3; goto fail_free_mbox; + } mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); - if (!mbox) + if (!mbox) { + err = 4; goto fail; + } rc = lpfc_reg_login(phba, vport->vpi, Fabric_DID, (uint8_t *)sp, mbox, 0); - if (rc) + if (rc) { + err = 5; goto fail_free_mbox; + } mbox->mbox_cmpl = lpfc_mbx_cmpl_fabric_reg_login; mbox->vport = vport; mbox->context2 = lpfc_nlp_get(ndlp); - rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT | MBX_STOP_IOCB); - if (rc == MBX_NOT_FINISHED) + rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); + if (rc == MBX_NOT_FINISHED) { + err = 6; goto fail_issue_reg_login; + } return 0; @@ -282,7 +303,7 @@ fail_free_mbox: fail: lpfc_vport_set_state(vport, FC_VPORT_FAILED); lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, - "0249 Cannot issue Register Fabric login\n"); + "0249 Cannot issue Register Fabric login: Err %d\n", err); return -ENXIO; } @@ -370,11 +391,12 @@ lpfc_cmpl_els_flogi_fabric(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, } if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) { lpfc_mbx_unreg_vpi(vport); + spin_lock_irq(shost->host_lock); vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI; + spin_unlock_irq(shost->host_lock); } } - ndlp->nlp_sid = irsp->un.ulpWord[4] & Mask_DID; lpfc_nlp_set_state(vport, ndlp, NLP_STE_REG_LOGIN_ISSUE); if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED && @@ -429,8 +451,7 @@ lpfc_cmpl_els_flogi_nport(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; mbox->vport = vport; - rc = lpfc_sli_issue_mbox(phba, mbox, - MBX_NOWAIT | MBX_STOP_IOCB); + rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); if (rc == MBX_NOT_FINISHED) { mempool_free(mbox, phba->mbox_mem_pool); goto fail; @@ -463,6 +484,9 @@ lpfc_cmpl_els_flogi_nport(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, lpfc_nlp_put(ndlp); } + /* If we are pt2pt with another NPort, force NPIV off! */ + phba->sli3_options &= ~LPFC_SLI3_NPIV_ENABLED; + spin_lock_irq(shost->host_lock); vport->fc_flag |= FC_PT2PT; spin_unlock_irq(shost->host_lock); @@ -488,6 +512,9 @@ lpfc_cmpl_els_flogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, /* Check to see if link went down during discovery */ if (lpfc_els_chk_latt(vport)) { + /* One additional decrement on node reference count to + * trigger the release of the node + */ lpfc_nlp_put(ndlp); goto out; } @@ -562,8 +589,13 @@ flogifail: /* Start discovery */ lpfc_disc_start(vport); + } else if (((irsp->ulpStatus != IOSTAT_LOCAL_REJECT) || + ((irsp->un.ulpWord[4] != IOERR_SLI_ABORTED) && + (irsp->un.ulpWord[4] != IOERR_SLI_DOWN))) && + (phba->link_state != LPFC_CLEAR_LA)) { + /* If FLOGI failed enable link interrupt. */ + lpfc_issue_clear_la(phba, vport); } - out: lpfc_els_free_iocb(phba, cmdiocb); } @@ -685,6 +717,9 @@ lpfc_initial_flogi(struct lpfc_vport *vport) struct lpfc_hba *phba = vport->phba; struct lpfc_nodelist *ndlp; + vport->port_state = LPFC_FLOGI; + lpfc_set_disctmo(vport); + /* First look for the Fabric ndlp */ ndlp = lpfc_findnode_did(vport, Fabric_DID); if (!ndlp) { @@ -696,7 +731,11 @@ lpfc_initial_flogi(struct lpfc_vport *vport) } else { lpfc_dequeue_node(vport, ndlp); } + if (lpfc_issue_els_flogi(vport, ndlp, 0)) { + /* This decrement of reference count to node shall kick off + * the release of the node. + */ lpfc_nlp_put(ndlp); } return 1; @@ -720,11 +759,16 @@ lpfc_initial_fdisc(struct lpfc_vport *vport) lpfc_dequeue_node(vport, ndlp); } if (lpfc_issue_els_fdisc(vport, ndlp, 0)) { + /* decrement node reference count to trigger the release of + * the node. + */ lpfc_nlp_put(ndlp); + return 0; } return 1; } -static void + +void lpfc_more_plogi(struct lpfc_vport *vport) { int sentplogi; @@ -752,6 +796,8 @@ lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp, { struct lpfc_vport *vport = ndlp->vport; struct lpfc_nodelist *new_ndlp; + struct lpfc_rport_data *rdata; + struct fc_rport *rport; struct serv_parm *sp; uint8_t name[sizeof(struct lpfc_name)]; uint32_t rc; @@ -788,11 +834,34 @@ lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp, lpfc_unreg_rpi(vport, new_ndlp); new_ndlp->nlp_DID = ndlp->nlp_DID; new_ndlp->nlp_prev_state = ndlp->nlp_prev_state; + + if (ndlp->nlp_flag & NLP_NPR_2B_DISC) + new_ndlp->nlp_flag |= NLP_NPR_2B_DISC; + ndlp->nlp_flag &= ~NLP_NPR_2B_DISC; + lpfc_nlp_set_state(vport, new_ndlp, ndlp->nlp_state); /* Move this back to NPR state */ - if (memcmp(&ndlp->nlp_portname, name, sizeof(struct lpfc_name)) == 0) + if (memcmp(&ndlp->nlp_portname, name, sizeof(struct lpfc_name)) == 0) { + /* The new_ndlp is replacing ndlp totally, so we need + * to put ndlp on UNUSED list and try to free it. + */ + + /* Fix up the rport accordingly */ + rport = ndlp->rport; + if (rport) { + rdata = rport->dd_data; + if (rdata->pnode == ndlp) { + lpfc_nlp_put(ndlp); + ndlp->rport = NULL; + rdata->pnode = lpfc_nlp_get(new_ndlp); + new_ndlp->rport = rport; + } + new_ndlp->nlp_type = ndlp->nlp_type; + } + lpfc_drop_node(vport, ndlp); + } else { lpfc_unreg_rpi(vport, ndlp); ndlp->nlp_DID = 0; /* Two ndlps cannot have the same did */ @@ -801,6 +870,27 @@ lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp, return new_ndlp; } +void +lpfc_end_rscn(struct lpfc_vport *vport) +{ + struct Scsi_Host *shost = lpfc_shost_from_vport(vport); + + if (vport->fc_flag & FC_RSCN_MODE) { + /* + * Check to see if more RSCNs came in while we were + * processing this one. + */ + if (vport->fc_rscn_id_cnt || + (vport->fc_flag & FC_RSCN_DISCOVERY) != 0) + lpfc_els_handle_rscn(vport); + else { + spin_lock_irq(shost->host_lock); + vport->fc_flag &= ~FC_RSCN_MODE; + spin_unlock_irq(shost->host_lock); + } + } +} + static void lpfc_cmpl_els_plogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, struct lpfc_iocbq *rspiocb) @@ -871,13 +961,6 @@ lpfc_cmpl_els_plogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, goto out; } /* PLOGI failed */ - if (ndlp->nlp_DID == NameServer_DID) { - lpfc_vport_set_state(vport, FC_VPORT_FAILED); - lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, - "0250 Nameserver login error: " - "0x%x / 0x%x\n", - irsp->ulpStatus, irsp->un.ulpWord[4]); - } /* Do not call DSM for lpfc_els_abort'ed ELS cmds */ if (lpfc_error_lost_link(irsp)) { rc = NLP_STE_FREED_NODE; @@ -905,20 +988,7 @@ lpfc_cmpl_els_plogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, spin_unlock_irq(shost->host_lock); lpfc_can_disctmo(vport); - if (vport->fc_flag & FC_RSCN_MODE) { - /* - * Check to see if more RSCNs came in while - * we were processing this one. - */ - if ((vport->fc_rscn_id_cnt == 0) && - (!(vport->fc_flag & FC_RSCN_DISCOVERY))) { - spin_lock_irq(shost->host_lock); - vport->fc_flag &= ~FC_RSCN_MODE; - spin_unlock_irq(shost->host_lock); - } else { - lpfc_els_handle_rscn(vport); - } - } + lpfc_end_rscn(vport); } } @@ -933,6 +1003,7 @@ lpfc_issue_els_plogi(struct lpfc_vport *vport, uint32_t did, uint8_t retry) struct lpfc_hba *phba = vport->phba; struct serv_parm *sp; IOCB_t *icmd; + struct lpfc_nodelist *ndlp; struct lpfc_iocbq *elsiocb; struct lpfc_sli_ring *pring; struct lpfc_sli *psli; @@ -943,8 +1014,11 @@ lpfc_issue_els_plogi(struct lpfc_vport *vport, uint32_t did, uint8_t retry) psli = &phba->sli; pring = &psli->ring[LPFC_ELS_RING]; /* ELS ring */ + ndlp = lpfc_findnode_did(vport, did); + /* If ndlp if not NULL, we will bump the reference count on it */ + cmdsize = (sizeof(uint32_t) + sizeof(struct serv_parm)); - elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, NULL, did, + elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, did, ELS_CMD_PLOGI); if (!elsiocb) return 1; @@ -1109,7 +1183,7 @@ lpfc_issue_els_prli(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, return 0; } -static void +void lpfc_more_adisc(struct lpfc_vport *vport) { int sentadisc; @@ -1134,8 +1208,6 @@ lpfc_more_adisc(struct lpfc_vport *vport) static void lpfc_rscn_disc(struct lpfc_vport *vport) { - struct Scsi_Host *shost = lpfc_shost_from_vport(vport); - lpfc_can_disctmo(vport); /* RSCN discovery */ @@ -1144,19 +1216,7 @@ lpfc_rscn_disc(struct lpfc_vport *vport) if (lpfc_els_disc_plogi(vport)) return; - if (vport->fc_flag & FC_RSCN_MODE) { - /* Check to see if more RSCNs came in while we were - * processing this one. - */ - if ((vport->fc_rscn_id_cnt == 0) && - (!(vport->fc_flag & FC_RSCN_DISCOVERY))) { - spin_lock_irq(shost->host_lock); - vport->fc_flag &= ~FC_RSCN_MODE; - spin_unlock_irq(shost->host_lock); - } else { - lpfc_els_handle_rscn(vport); - } - } + lpfc_end_rscn(vport); } static void @@ -1413,6 +1473,13 @@ lpfc_issue_els_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, psli = &phba->sli; pring = &psli->ring[LPFC_ELS_RING]; + spin_lock_irq(shost->host_lock); + if (ndlp->nlp_flag & NLP_LOGO_SND) { + spin_unlock_irq(shost->host_lock); + return 0; + } + spin_unlock_irq(shost->host_lock); + cmdsize = (2 * sizeof(uint32_t)) + sizeof(struct lpfc_name); elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, ndlp->nlp_DID, ELS_CMD_LOGO); @@ -1499,6 +1566,9 @@ lpfc_issue_els_scr(struct lpfc_vport *vport, uint32_t nportid, uint8_t retry) ndlp->nlp_DID, ELS_CMD_SCR); if (!elsiocb) { + /* This will trigger the release of the node just + * allocated + */ lpfc_nlp_put(ndlp); return 1; } @@ -1520,10 +1590,17 @@ lpfc_issue_els_scr(struct lpfc_vport *vport, uint32_t nportid, uint8_t retry) phba->fc_stat.elsXmitSCR++; elsiocb->iocb_cmpl = lpfc_cmpl_els_cmd; if (lpfc_sli_issue_iocb(phba, pring, elsiocb, 0) == IOCB_ERROR) { + /* The additional lpfc_nlp_put will cause the following + * lpfc_els_free_iocb routine to trigger the rlease of + * the node. + */ lpfc_nlp_put(ndlp); lpfc_els_free_iocb(phba, elsiocb); return 1; } + /* This will cause the callback-function lpfc_cmpl_els_cmd to + * trigger the release of node. + */ lpfc_nlp_put(ndlp); return 0; } @@ -1555,6 +1632,9 @@ lpfc_issue_els_farpr(struct lpfc_vport *vport, uint32_t nportid, uint8_t retry) elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, ndlp->nlp_DID, ELS_CMD_RNID); if (!elsiocb) { + /* This will trigger the release of the node just + * allocated + */ lpfc_nlp_put(ndlp); return 1; } @@ -1591,35 +1671,21 @@ lpfc_issue_els_farpr(struct lpfc_vport *vport, uint32_t nportid, uint8_t retry) phba->fc_stat.elsXmitFARPR++; elsiocb->iocb_cmpl = lpfc_cmpl_els_cmd; if (lpfc_sli_issue_iocb(phba, pring, elsiocb, 0) == IOCB_ERROR) { + /* The additional lpfc_nlp_put will cause the following + * lpfc_els_free_iocb routine to trigger the release of + * the node. + */ lpfc_nlp_put(ndlp); lpfc_els_free_iocb(phba, elsiocb); return 1; } + /* This will cause the callback-function lpfc_cmpl_els_cmd to + * trigger the release of the node. + */ lpfc_nlp_put(ndlp); return 0; } -static void -lpfc_end_rscn(struct lpfc_vport *vport) -{ - struct Scsi_Host *shost = lpfc_shost_from_vport(vport); - - if (vport->fc_flag & FC_RSCN_MODE) { - /* - * Check to see if more RSCNs came in while we were - * processing this one. - */ - if (vport->fc_rscn_id_cnt || - (vport->fc_flag & FC_RSCN_DISCOVERY) != 0) - lpfc_els_handle_rscn(vport); - else { - spin_lock_irq(shost->host_lock); - vport->fc_flag &= ~FC_RSCN_MODE; - spin_unlock_irq(shost->host_lock); - } - } -} - void lpfc_cancel_retry_delay_tmo(struct lpfc_vport *vport, struct lpfc_nodelist *nlp) { @@ -1675,7 +1741,10 @@ lpfc_els_retry_delay(unsigned long ptr) return; } - evtp->evt_arg1 = ndlp; + /* We need to hold the node by incrementing the reference + * count until the queued work is done + */ + evtp->evt_arg1 = lpfc_nlp_get(ndlp); evtp->evt = LPFC_EVT_ELS_RETRY; list_add_tail(&evtp->evt_listp, &phba->work_list); if (phba->work_wait) @@ -1759,6 +1828,7 @@ lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, uint32_t *elscmd; struct ls_rjt stat; int retry = 0, maxretry = lpfc_max_els_tries, delay = 0; + int logerr = 0; uint32_t cmd = 0; uint32_t did; @@ -1815,6 +1885,7 @@ lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, break; case IOERR_NO_RESOURCES: + logerr = 1; /* HBA out of resources */ retry = 1; if (cmdiocb->retry > 100) delay = 100; @@ -1843,6 +1914,7 @@ lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, case IOSTAT_NPORT_BSY: case IOSTAT_FABRIC_BSY: + logerr = 1; /* Fabric / Remote NPort out of resources */ retry = 1; break; @@ -1923,6 +1995,15 @@ lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, if (did == FDMI_DID) retry = 1; + if ((cmd == ELS_CMD_FLOGI) && + (phba->fc_topology != TOPOLOGY_LOOP)) { + /* FLOGI retry policy */ + retry = 1; + maxretry = 48; + if (cmdiocb->retry >= 32) + delay = 1000; + } + if ((++cmdiocb->retry) >= maxretry) { phba->fc_stat.elsRetryExceeded++; retry = 0; @@ -2006,11 +2087,46 @@ lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, } } /* No retry ELS command <elsCmd> to remote NPORT <did> */ - lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, + if (logerr) { + lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, + "0137 No retry ELS command x%x to remote " + "NPORT x%x: Out of Resources: Error:x%x/%x\n", + cmd, did, irsp->ulpStatus, + irsp->un.ulpWord[4]); + } + else { + lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, "0108 No retry ELS command x%x to remote " "NPORT x%x Retried:%d Error:x%x/%x\n", cmd, did, cmdiocb->retry, irsp->ulpStatus, irsp->un.ulpWord[4]); + } + return 0; +} + +static int +lpfc_els_free_data(struct lpfc_hba *phba, struct lpfc_dmabuf *buf_ptr1) +{ + struct lpfc_dmabuf *buf_ptr; + + /* Free the response before processing the command. */ + if (!list_empty(&buf_ptr1->list)) { + list_remove_head(&buf_ptr1->list, buf_ptr, + struct lpfc_dmabuf, + list); + lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys); + kfree(buf_ptr); + } + lpfc_mbuf_free(phba, buf_ptr1->virt, buf_ptr1->phys); + kfree(buf_ptr1); + return 0; +} + +static int +lpfc_els_free_bpl(struct lpfc_hba *phba, struct lpfc_dmabuf *buf_ptr) +{ + lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys); + kfree(buf_ptr); return 0; } @@ -2018,30 +2134,63 @@ int lpfc_els_free_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *elsiocb) { struct lpfc_dmabuf *buf_ptr, *buf_ptr1; + struct lpfc_nodelist *ndlp; - if (elsiocb->context1) { - lpfc_nlp_put(elsiocb->context1); + ndlp = (struct lpfc_nodelist *)elsiocb->context1; + if (ndlp) { + if (ndlp->nlp_flag & NLP_DEFER_RM) { + lpfc_nlp_put(ndlp); + + /* If the ndlp is not being used by another discovery + * thread, free it. + */ + if (!lpfc_nlp_not_used(ndlp)) { + /* If ndlp is being used by another discovery + * thread, just clear NLP_DEFER_RM + */ + ndlp->nlp_flag &= ~NLP_DEFER_RM; + } + } + else + lpfc_nlp_put(ndlp); elsiocb->context1 = NULL; } /* context2 = cmd, context2->next = rsp, context3 = bpl */ if (elsiocb->context2) { - buf_ptr1 = (struct lpfc_dmabuf *) elsiocb->context2; - /* Free the response before processing the command. */ - if (!list_empty(&buf_ptr1->list)) { - list_remove_head(&buf_ptr1->list, buf_ptr, - struct lpfc_dmabuf, - list); - lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys); - kfree(buf_ptr); + if (elsiocb->iocb_flag & LPFC_DELAY_MEM_FREE) { + /* Firmware could still be in progress of DMAing + * payload, so don't free data buffer till after + * a hbeat. + */ + elsiocb->iocb_flag &= ~LPFC_DELAY_MEM_FREE; + buf_ptr = elsiocb->context2; + elsiocb->context2 = NULL; + if (buf_ptr) { + buf_ptr1 = NULL; + spin_lock_irq(&phba->hbalock); + if (!list_empty(&buf_ptr->list)) { + list_remove_head(&buf_ptr->list, + buf_ptr1, struct lpfc_dmabuf, + list); + INIT_LIST_HEAD(&buf_ptr1->list); + list_add_tail(&buf_ptr1->list, + &phba->elsbuf); + phba->elsbuf_cnt++; + } + INIT_LIST_HEAD(&buf_ptr->list); + list_add_tail(&buf_ptr->list, &phba->elsbuf); + phba->elsbuf_cnt++; + spin_unlock_irq(&phba->hbalock); + } + } else { + buf_ptr1 = (struct lpfc_dmabuf *) elsiocb->context2; + lpfc_els_free_data(phba, buf_ptr1); } - lpfc_mbuf_free(phba, buf_ptr1->virt, buf_ptr1->phys); - kfree(buf_ptr1); } if (elsiocb->context3) { buf_ptr = (struct lpfc_dmabuf *) elsiocb->context3; - lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys); - kfree(buf_ptr); + lpfc_els_free_bpl(phba, buf_ptr); } lpfc_sli_release_iocbq(phba, elsiocb); return 0; @@ -2065,15 +2214,20 @@ lpfc_cmpl_els_logo_acc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, "Data: x%x x%x x%x\n", ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, ndlp->nlp_rpi); - switch (ndlp->nlp_state) { - case NLP_STE_UNUSED_NODE: /* node is just allocated */ - lpfc_drop_node(vport, ndlp); - break; - case NLP_STE_NPR_NODE: /* NPort Recovery mode */ - lpfc_unreg_rpi(vport, ndlp); - break; - default: - break; + + if (ndlp->nlp_state == NLP_STE_NPR_NODE) { + /* NPort Recovery mode or node is just allocated */ + if (!lpfc_nlp_not_used(ndlp)) { + /* If the ndlp is being used by another discovery + * thread, just unregister the RPI. + */ + lpfc_unreg_rpi(vport, ndlp); + } else { + /* Indicate the node has already released, should + * not reference to it from within lpfc_els_free_iocb. + */ + cmdiocb->context1 = NULL; + } } lpfc_els_free_iocb(phba, cmdiocb); return; @@ -2089,7 +2243,14 @@ lpfc_mbx_cmpl_dflt_rpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) lpfc_mbuf_free(phba, mp->virt, mp->phys); kfree(mp); mempool_free(pmb, phba->mbox_mem_pool); - lpfc_nlp_put(ndlp); + if (ndlp) { + lpfc_nlp_put(ndlp); + /* This is the end of the default RPI cleanup logic for this + * ndlp. If no other discovery threads are using this ndlp. + * we should free all resources associated with it. + */ + lpfc_nlp_not_used(ndlp); + } return; } @@ -2100,15 +2261,29 @@ lpfc_cmpl_els_rsp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) cmdiocb->context1; struct lpfc_vport *vport = ndlp ? ndlp->vport : NULL; struct Scsi_Host *shost = vport ? lpfc_shost_from_vport(vport) : NULL; - IOCB_t *irsp; + IOCB_t *irsp; + uint8_t *pcmd; LPFC_MBOXQ_t *mbox = NULL; struct lpfc_dmabuf *mp = NULL; + uint32_t ls_rjt = 0; irsp = &rspiocb->iocb; if (cmdiocb->context_un.mbox) mbox = cmdiocb->context_un.mbox; + /* First determine if this is a LS_RJT cmpl. Note, this callback + * function can have cmdiocb->contest1 (ndlp) field set to NULL. + */ + pcmd = (uint8_t *) (((struct lpfc_dmabuf *) cmdiocb->context2)->virt); + if (ndlp && (*((uint32_t *) (pcmd)) == ELS_CMD_LS_RJT)) { + /* A LS_RJT associated with Default RPI cleanup has its own + * seperate code path. + */ + if (!(ndlp->nlp_flag & NLP_RM_DFLT_RPI)) + ls_rjt = 1; + } + /* Check to see if link went down during discovery */ if (!ndlp || lpfc_els_chk_latt(vport)) { if (mbox) { @@ -2119,6 +2294,15 @@ lpfc_cmpl_els_rsp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, } mempool_free(mbox, phba->mbox_mem_pool); } + if (ndlp && (ndlp->nlp_flag & NLP_RM_DFLT_RPI)) + if (lpfc_nlp_not_used(ndlp)) { + ndlp = NULL; + /* Indicate the node has already released, + * should not reference to it from within + * the routine lpfc_els_free_iocb. + */ + cmdiocb->context1 = NULL; + } goto out; } @@ -2150,20 +2334,39 @@ lpfc_cmpl_els_rsp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, lpfc_nlp_set_state(vport, ndlp, NLP_STE_REG_LOGIN_ISSUE); } - if (lpfc_sli_issue_mbox(phba, mbox, - (MBX_NOWAIT | MBX_STOP_IOCB)) + if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT) != MBX_NOT_FINISHED) { goto out; } - lpfc_nlp_put(ndlp); - /* NOTE: we should have messages for unsuccessful - reglogin */ + + /* ELS rsp: Cannot issue reg_login for <NPortid> */ + lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, + "0138 ELS rsp: Cannot issue reg_login for x%x " + "Data: x%x x%x x%x\n", + ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, + ndlp->nlp_rpi); + + if (lpfc_nlp_not_used(ndlp)) { + ndlp = NULL; + /* Indicate node has already been released, + * should not reference to it from within + * the routine lpfc_els_free_iocb. + */ + cmdiocb->context1 = NULL; + } } else { /* Do not drop node for lpfc_els_abort'ed ELS cmds */ if (!lpfc_error_lost_link(irsp) && ndlp->nlp_flag & NLP_ACC_REGLOGIN) { - lpfc_drop_node(vport, ndlp); - ndlp = NULL; + if (lpfc_nlp_not_used(ndlp)) { + ndlp = NULL; + /* Indicate node has already been + * released, should not reference + * to it from within the routine + * lpfc_els_free_iocb. + */ + cmdiocb->context1 = NULL; + } } } mp = (struct lpfc_dmabuf *) mbox->context1; @@ -2178,7 +2381,21 @@ out: spin_lock_irq(shost->host_lock); ndlp->nlp_flag &= ~(NLP_ACC_REGLOGIN | NLP_RM_DFLT_RPI); spin_unlock_irq(shost->host_lock); + + /* If the node is not being used by another discovery thread, + * and we are sending a reject, we are done with it. + * Release driver reference count here and free associated + * resources. + */ + if (ls_rjt) + if (lpfc_nlp_not_used(ndlp)) + /* Indicate node has already been released, + * should not reference to it from within + * the routine lpfc_els_free_iocb. + */ + cmdiocb->context1 = NULL; } + lpfc_els_free_iocb(phba, cmdiocb); return; } @@ -2349,14 +2566,6 @@ lpfc_els_rsp_reject(struct lpfc_vport *vport, uint32_t rejectError, elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp; rc = lpfc_sli_issue_iocb(phba, pring, elsiocb, 0); - /* If the node is in the UNUSED state, and we are sending - * a reject, we are done with it. Release driver reference - * count here. The outstanding els will release its reference on - * completion and the node can be freed then. - */ - if (ndlp->nlp_state == NLP_STE_UNUSED_NODE) - lpfc_nlp_put(ndlp); - if (rc == IOCB_ERROR) { lpfc_els_free_iocb(phba, elsiocb); return 1; @@ -2642,7 +2851,10 @@ lpfc_els_disc_plogi(struct lpfc_vport *vport) } } } - if (sentplogi == 0) { + if (sentplogi) { + lpfc_set_disctmo(vport); + } + else { spin_lock_irq(shost->host_lock); vport->fc_flag &= ~FC_NLP_MORE; spin_unlock_irq(shost->host_lock); @@ -2830,10 +3042,10 @@ lpfc_els_rcv_rscn(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, "RCV RSCN defer: did:x%x/ste:x%x flg:x%x", ndlp->nlp_DID, vport->port_state, ndlp->nlp_flag); + spin_lock_irq(shost->host_lock); vport->fc_flag |= FC_RSCN_DEFERRED; if ((rscn_cnt < FC_MAX_HOLD_RSCN) && !(vport->fc_flag & FC_RSCN_DISCOVERY)) { - spin_lock_irq(shost->host_lock); vport->fc_flag |= FC_RSCN_MODE; spin_unlock_irq(shost->host_lock); if (rscn_cnt) { @@ -2862,7 +3074,6 @@ lpfc_els_rcv_rscn(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, vport->fc_rscn_id_cnt, vport->fc_flag, vport->port_state); } else { - spin_lock_irq(shost->host_lock); vport->fc_flag |= FC_RSCN_DISCOVERY; spin_unlock_irq(shost->host_lock); /* ReDiscovery RSCN */ @@ -2877,7 +3088,9 @@ lpfc_els_rcv_rscn(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, /* send RECOVERY event for ALL nodes that match RSCN payload */ lpfc_rscn_recovery_check(vport); + spin_lock_irq(shost->host_lock); vport->fc_flag &= ~FC_RSCN_DEFERRED; + spin_unlock_irq(shost->host_lock); return 0; } @@ -2929,6 +3142,8 @@ lpfc_els_handle_rscn(struct lpfc_vport *vport) /* To process RSCN, first compare RSCN data with NameServer */ vport->fc_ns_retry = 0; + vport->num_disc_nodes = 0; + ndlp = lpfc_findnode_did(vport, NameServer_DID); if (ndlp && ndlp->nlp_state == NLP_STE_UNMAPPED_NODE) { /* Good ndlp, issue CT Request to NameServer */ @@ -3022,8 +3237,7 @@ lpfc_els_rcv_flogi(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, mbox->mb.un.varInitLnk.lipsr_AL_PA = 0; mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; mbox->vport = vport; - rc = lpfc_sli_issue_mbox - (phba, mbox, (MBX_NOWAIT | MBX_STOP_IOCB)); + rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); lpfc_set_loopback_flag(phba); if (rc == MBX_NOT_FINISHED) { mempool_free(mbox, phba->mbox_mem_pool); @@ -3140,7 +3354,10 @@ lpfc_els_rsp_rps_acc(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) elsiocb = lpfc_prep_els_iocb(phba->pport, 0, cmdsize, lpfc_max_els_tries, ndlp, ndlp->nlp_DID, ELS_CMD_ACC); + + /* Decrement the ndlp reference count from previous mbox command */ lpfc_nlp_put(ndlp); + if (!elsiocb) return; @@ -3160,13 +3377,13 @@ lpfc_els_rsp_rps_acc(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) status |= 0x4; rps_rsp->rsvd1 = 0; - rps_rsp->portStatus = be16_to_cpu(status); - rps_rsp->linkFailureCnt = be32_to_cpu(mb->un.varRdLnk.linkFailureCnt); - rps_rsp->lossSyncCnt = be32_to_cpu(mb->un.varRdLnk.lossSyncCnt); - rps_rsp->lossSignalCnt = be32_to_cpu(mb->un.varRdLnk.lossSignalCnt); - rps_rsp->primSeqErrCnt = be32_to_cpu(mb->un.varRdLnk.primSeqErrCnt); - rps_rsp->invalidXmitWord = be32_to_cpu(mb->un.varRdLnk.invalidXmitWord); - rps_rsp->crcCnt = be32_to_cpu(mb->un.varRdLnk.crcCnt); + rps_rsp->portStatus = cpu_to_be16(status); + rps_rsp->linkFailureCnt = cpu_to_be32(mb->un.varRdLnk.linkFailureCnt); + rps_rsp->lossSyncCnt = cpu_to_be32(mb->un.varRdLnk.lossSyncCnt); + rps_rsp->lossSignalCnt = cpu_to_be32(mb->un.varRdLnk.lossSignalCnt); + rps_rsp->primSeqErrCnt = cpu_to_be32(mb->un.varRdLnk.primSeqErrCnt); + rps_rsp->invalidXmitWord = cpu_to_be32(mb->un.varRdLnk.invalidXmitWord); + rps_rsp->crcCnt = cpu_to_be32(mb->un.varRdLnk.crcCnt); /* Xmit ELS RPS ACC response tag <ulpIoTag> */ lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_ELS, "0118 Xmit ELS RPS ACC response tag x%x xri x%x, " @@ -3223,11 +3440,13 @@ lpfc_els_rcv_rps(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, mbox->context2 = lpfc_nlp_get(ndlp); mbox->vport = vport; mbox->mbox_cmpl = lpfc_els_rsp_rps_acc; - if (lpfc_sli_issue_mbox (phba, mbox, - (MBX_NOWAIT | MBX_STOP_IOCB)) != MBX_NOT_FINISHED) + if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT) + != MBX_NOT_FINISHED) /* Mbox completion will send ELS Response */ return 0; - + /* Decrement reference count used for the failed mbox + * command. + */ lpfc_nlp_put(ndlp); mempool_free(mbox, phba->mbox_mem_pool); } @@ -3461,6 +3680,7 @@ lpfc_els_rcv_fan(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, * other NLP_FABRIC logins */ lpfc_drop_node(vport, ndlp); + } else if (!(ndlp->nlp_flag & NLP_NPR_ADISC)) { /* Fail outstanding I/O now since this * device is marked for PLOGI @@ -3469,8 +3689,6 @@ lpfc_els_rcv_fan(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, } } - vport->port_state = LPFC_FLOGI; - lpfc_set_disctmo(vport); lpfc_initial_flogi(vport); return 0; } @@ -3711,6 +3929,7 @@ static void lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, struct lpfc_vport *vport, struct lpfc_iocbq *elsiocb) { + struct Scsi_Host *shost; struct lpfc_nodelist *ndlp; struct ls_rjt stat; uint32_t *payload; @@ -3750,11 +3969,19 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, goto dropit; lpfc_nlp_init(vport, ndlp, did); + lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); newnode = 1; if ((did & Fabric_DID_MASK) == Fabric_DID_MASK) { ndlp->nlp_type |= NLP_FABRIC; } - lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNUSED_NODE); + } + else { + if (ndlp->nlp_state == NLP_STE_UNUSED_NODE) { + /* This is simular to the new node path */ + lpfc_nlp_get(ndlp); + lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); + newnode = 1; + } } phba->fc_stat.elsRcvFrame++; @@ -3783,6 +4010,12 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, rjt_err = LSRJT_UNABLE_TPC; break; } + + shost = lpfc_shost_from_vport(vport); + spin_lock_irq(shost->host_lock); + ndlp->nlp_flag &= ~NLP_TARGET_REMOVE; + spin_unlock_irq(shost->host_lock); + lpfc_disc_state_machine(vport, ndlp, elsiocb, NLP_EVT_RCV_PLOGI); @@ -3795,7 +4028,7 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, phba->fc_stat.elsRcvFLOGI++; lpfc_els_rcv_flogi(vport, elsiocb, ndlp); if (newnode) - lpfc_drop_node(vport, ndlp); + lpfc_nlp_put(ndlp); break; case ELS_CMD_LOGO: lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, @@ -3825,7 +4058,7 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, phba->fc_stat.elsRcvRSCN++; lpfc_els_rcv_rscn(vport, elsiocb, ndlp); if (newnode) - lpfc_drop_node(vport, ndlp); + lpfc_nlp_put(ndlp); break; case ELS_CMD_ADISC: lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, @@ -3897,7 +4130,7 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, phba->fc_stat.elsRcvLIRR++; lpfc_els_rcv_lirr(vport, elsiocb, ndlp); if (newnode) - lpfc_drop_node(vport, ndlp); + lpfc_nlp_put(ndlp); break; case ELS_CMD_RPS: lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, @@ -3907,7 +4140,7 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, phba->fc_stat.elsRcvRPS++; lpfc_els_rcv_rps(vport, elsiocb, ndlp); if (newnode) - lpfc_drop_node(vport, ndlp); + lpfc_nlp_put(ndlp); break; case ELS_CMD_RPL: lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, @@ -3917,7 +4150,7 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, phba->fc_stat.elsRcvRPL++; lpfc_els_rcv_rpl(vport, elsiocb, ndlp); if (newnode) - lpfc_drop_node(vport, ndlp); + lpfc_nlp_put(ndlp); break; case ELS_CMD_RNID: lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, @@ -3927,7 +4160,7 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, phba->fc_stat.elsRcvRNID++; lpfc_els_rcv_rnid(vport, elsiocb, ndlp); if (newnode) - lpfc_drop_node(vport, ndlp); + lpfc_nlp_put(ndlp); break; default: lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, @@ -3942,7 +4175,7 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, "0115 Unknown ELS command x%x " "received from NPORT x%x\n", cmd, did); if (newnode) - lpfc_drop_node(vport, ndlp); + lpfc_nlp_put(ndlp); break; } @@ -3958,10 +4191,11 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, return; dropit: - lpfc_printf_log(phba, KERN_ERR, LOG_ELS, + if (vport && !(vport->load_flag & FC_UNLOADING)) + lpfc_printf_log(phba, KERN_ERR, LOG_ELS, "(%d):0111 Dropping received ELS cmd " "Data: x%x x%x x%x\n", - vport ? vport->vpi : 0xffff, icmd->ulpStatus, + vport->vpi, icmd->ulpStatus, icmd->un.ulpWord[4], icmd->ulpTimeout); phba->fc_stat.elsRcvDrop++; } @@ -4114,8 +4348,9 @@ lpfc_cmpl_reg_new_vport(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) pmb->context2; MAILBOX_t *mb = &pmb->mb; + spin_lock_irq(shost->host_lock); vport->fc_flag &= ~FC_VPORT_NEEDS_REG_VPI; - lpfc_nlp_put(ndlp); + spin_unlock_irq(shost->host_lock); if (mb->mbxStatus) { lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX, @@ -4135,7 +4370,9 @@ lpfc_cmpl_reg_new_vport(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) default: /* Try to recover from this error */ lpfc_mbx_unreg_vpi(vport); + spin_lock_irq(shost->host_lock); vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI; + spin_unlock_irq(shost->host_lock); lpfc_initial_fdisc(vport); break; } @@ -4146,14 +4383,21 @@ lpfc_cmpl_reg_new_vport(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) else lpfc_do_scr_ns_plogi(phba, vport); } + + /* Now, we decrement the ndlp reference count held for this + * callback function + */ + lpfc_nlp_put(ndlp); + mempool_free(pmb, phba->mbox_mem_pool); return; } -void +static void lpfc_register_new_vport(struct lpfc_hba *phba, struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) { + struct Scsi_Host *shost = lpfc_shost_from_vport(vport); LPFC_MBOXQ_t *mbox; mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); @@ -4162,25 +4406,31 @@ lpfc_register_new_vport(struct lpfc_hba *phba, struct lpfc_vport *vport, mbox->vport = vport; mbox->context2 = lpfc_nlp_get(ndlp); mbox->mbox_cmpl = lpfc_cmpl_reg_new_vport; - if (lpfc_sli_issue_mbox(phba, mbox, - MBX_NOWAIT | MBX_STOP_IOCB) + if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT) == MBX_NOT_FINISHED) { + /* mailbox command not success, decrement ndlp + * reference count for this command + */ + lpfc_nlp_put(ndlp); mempool_free(mbox, phba->mbox_mem_pool); - vport->fc_flag &= ~FC_VPORT_NEEDS_REG_VPI; - lpfc_vport_set_state(vport, FC_VPORT_FAILED); lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX, "0253 Register VPI: Can't send mbox\n"); + goto mbox_err_exit; } } else { - lpfc_vport_set_state(vport, FC_VPORT_FAILED); - lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX, "0254 Register VPI: no memory\n"); - - vport->fc_flag &= ~FC_VPORT_NEEDS_REG_VPI; - lpfc_nlp_put(ndlp); + goto mbox_err_exit; } + return; + +mbox_err_exit: + lpfc_vport_set_state(vport, FC_VPORT_FAILED); + spin_lock_irq(shost->host_lock); + vport->fc_flag &= ~FC_VPORT_NEEDS_REG_VPI; + spin_unlock_irq(shost->host_lock); + return; } static void @@ -4251,7 +4501,9 @@ lpfc_cmpl_els_fdisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, lpfc_unreg_rpi(vport, np); } lpfc_mbx_unreg_vpi(vport); + spin_lock_irq(shost->host_lock); vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI; + spin_unlock_irq(shost->host_lock); } if (vport->fc_flag & FC_VPORT_NEEDS_REG_VPI) @@ -4259,14 +4511,15 @@ lpfc_cmpl_els_fdisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, else lpfc_do_scr_ns_plogi(phba, vport); - lpfc_nlp_put(ndlp); /* Free Fabric ndlp for vports */ + /* Unconditionaly kick off releasing fabric node for vports */ + lpfc_nlp_put(ndlp); } out: lpfc_els_free_iocb(phba, cmdiocb); } -int +static int lpfc_issue_els_fdisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, uint8_t retry) { @@ -4539,7 +4792,7 @@ lpfc_cmpl_fabric_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, } } -int +static int lpfc_issue_fabric_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *iocb) { unsigned long iflags; @@ -4583,7 +4836,7 @@ lpfc_issue_fabric_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *iocb) } -void lpfc_fabric_abort_vport(struct lpfc_vport *vport) +static void lpfc_fabric_abort_vport(struct lpfc_vport *vport) { LIST_HEAD(completions); struct lpfc_hba *phba = vport->phba; @@ -4663,6 +4916,7 @@ void lpfc_fabric_abort_hba(struct lpfc_hba *phba) } +#if 0 void lpfc_fabric_abort_flogi(struct lpfc_hba *phba) { LIST_HEAD(completions); @@ -4693,5 +4947,6 @@ void lpfc_fabric_abort_flogi(struct lpfc_hba *phba) (piocb->iocb_cmpl) (phba, piocb, piocb); } } +#endif /* 0 */ diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c index c81c2b3228d..dc042bd97ba 100644 --- a/drivers/scsi/lpfc/lpfc_hbadisc.c +++ b/drivers/scsi/lpfc/lpfc_hbadisc.c @@ -57,6 +57,7 @@ static uint8_t lpfcAlpaArray[] = { }; static void lpfc_disc_timeout_handler(struct lpfc_vport *); +static void lpfc_disc_flush_list(struct lpfc_vport *vport); void lpfc_terminate_rport_io(struct fc_rport *rport) @@ -107,20 +108,14 @@ lpfc_dev_loss_tmo_callbk(struct fc_rport *rport) struct lpfc_nodelist * ndlp; struct lpfc_vport *vport; struct lpfc_hba *phba; - struct completion devloss_compl; struct lpfc_work_evt *evtp; + int put_node; + int put_rport; rdata = rport->dd_data; ndlp = rdata->pnode; - - if (!ndlp) { - if (rport->scsi_target_id != -1) { - printk(KERN_ERR "Cannot find remote node" - " for rport in dev_loss_tmo_callbk x%x\n", - rport->port_id); - } + if (!ndlp) return; - } vport = ndlp->vport; phba = vport->phba; @@ -129,15 +124,35 @@ lpfc_dev_loss_tmo_callbk(struct fc_rport *rport) "rport devlosscb: sid:x%x did:x%x flg:x%x", ndlp->nlp_sid, ndlp->nlp_DID, ndlp->nlp_flag); - init_completion(&devloss_compl); + /* Don't defer this if we are in the process of deleting the vport + * or unloading the driver. The unload will cleanup the node + * appropriately we just need to cleanup the ndlp rport info here. + */ + if (vport->load_flag & FC_UNLOADING) { + put_node = rdata->pnode != NULL; + put_rport = ndlp->rport != NULL; + rdata->pnode = NULL; + ndlp->rport = NULL; + if (put_node) + lpfc_nlp_put(ndlp); + if (put_rport) + put_device(&rport->dev); + return; + } + + if (ndlp->nlp_state == NLP_STE_MAPPED_NODE) + return; + evtp = &ndlp->dev_loss_evt; if (!list_empty(&evtp->evt_listp)) return; spin_lock_irq(&phba->hbalock); - evtp->evt_arg1 = ndlp; - evtp->evt_arg2 = &devloss_compl; + /* We need to hold the node by incrementing the reference + * count until this queued work is done + */ + evtp->evt_arg1 = lpfc_nlp_get(ndlp); evtp->evt = LPFC_EVT_DEV_LOSS; list_add_tail(&evtp->evt_listp, &phba->work_list); if (phba->work_wait) @@ -145,8 +160,6 @@ lpfc_dev_loss_tmo_callbk(struct fc_rport *rport) spin_unlock_irq(&phba->hbalock); - wait_for_completion(&devloss_compl); - return; } @@ -154,7 +167,7 @@ lpfc_dev_loss_tmo_callbk(struct fc_rport *rport) * This function is called from the worker thread when dev_loss_tmo * expire. */ -void +static void lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp) { struct lpfc_rport_data *rdata; @@ -162,6 +175,8 @@ lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp) struct lpfc_vport *vport; struct lpfc_hba *phba; uint8_t *name; + int put_node; + int put_rport; int warn_on = 0; rport = ndlp->rport; @@ -178,14 +193,32 @@ lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp) "rport devlosstmo:did:x%x type:x%x id:x%x", ndlp->nlp_DID, ndlp->nlp_type, rport->scsi_target_id); - if (!(vport->load_flag & FC_UNLOADING) && - ndlp->nlp_state == NLP_STE_MAPPED_NODE) + /* Don't defer this if we are in the process of deleting the vport + * or unloading the driver. The unload will cleanup the node + * appropriately we just need to cleanup the ndlp rport info here. + */ + if (vport->load_flag & FC_UNLOADING) { + if (ndlp->nlp_sid != NLP_NO_SID) { + /* flush the target */ + lpfc_sli_abort_iocb(vport, + &phba->sli.ring[phba->sli.fcp_ring], + ndlp->nlp_sid, 0, LPFC_CTX_TGT); + } + put_node = rdata->pnode != NULL; + put_rport = ndlp->rport != NULL; + rdata->pnode = NULL; + ndlp->rport = NULL; + if (put_node) + lpfc_nlp_put(ndlp); + if (put_rport) + put_device(&rport->dev); return; + } - if (ndlp->nlp_type & NLP_FABRIC) { - int put_node; - int put_rport; + if (ndlp->nlp_state == NLP_STE_MAPPED_NODE) + return; + if (ndlp->nlp_type & NLP_FABRIC) { /* We will clean up these Nodes in linkup */ put_node = rdata->pnode != NULL; put_rport = ndlp->rport != NULL; @@ -227,23 +260,20 @@ lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp) ndlp->nlp_state, ndlp->nlp_rpi); } + put_node = rdata->pnode != NULL; + put_rport = ndlp->rport != NULL; + rdata->pnode = NULL; + ndlp->rport = NULL; + if (put_node) + lpfc_nlp_put(ndlp); + if (put_rport) + put_device(&rport->dev); + if (!(vport->load_flag & FC_UNLOADING) && !(ndlp->nlp_flag & NLP_DELAY_TMO) && !(ndlp->nlp_flag & NLP_NPR_2B_DISC) && - (ndlp->nlp_state != NLP_STE_UNMAPPED_NODE)) + (ndlp->nlp_state != NLP_STE_UNMAPPED_NODE)) { lpfc_disc_state_machine(vport, ndlp, NULL, NLP_EVT_DEVICE_RM); - else { - int put_node; - int put_rport; - - put_node = rdata->pnode != NULL; - put_rport = ndlp->rport != NULL; - rdata->pnode = NULL; - ndlp->rport = NULL; - if (put_node) - lpfc_nlp_put(ndlp); - if (put_rport) - put_device(&rport->dev); } } @@ -260,7 +290,6 @@ lpfc_work_list_done(struct lpfc_hba *phba) { struct lpfc_work_evt *evtp = NULL; struct lpfc_nodelist *ndlp; - struct lpfc_vport *vport; int free_evt; spin_lock_irq(&phba->hbalock); @@ -270,35 +299,22 @@ lpfc_work_list_done(struct lpfc_hba *phba) spin_unlock_irq(&phba->hbalock); free_evt = 1; switch (evtp->evt) { - case LPFC_EVT_DEV_LOSS_DELAY: - free_evt = 0; /* evt is part of ndlp */ - ndlp = (struct lpfc_nodelist *) (evtp->evt_arg1); - vport = ndlp->vport; - if (!vport) - break; - - lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_RPORT, - "rport devlossdly:did:x%x flg:x%x", - ndlp->nlp_DID, ndlp->nlp_flag, 0); - - if (!(vport->load_flag & FC_UNLOADING) && - !(ndlp->nlp_flag & NLP_DELAY_TMO) && - !(ndlp->nlp_flag & NLP_NPR_2B_DISC)) { - lpfc_disc_state_machine(vport, ndlp, NULL, - NLP_EVT_DEVICE_RM); - } - break; case LPFC_EVT_ELS_RETRY: ndlp = (struct lpfc_nodelist *) (evtp->evt_arg1); lpfc_els_retry_delay_handler(ndlp); free_evt = 0; /* evt is part of ndlp */ + /* decrement the node reference count held + * for this queued work + */ + lpfc_nlp_put(ndlp); break; case LPFC_EVT_DEV_LOSS: ndlp = (struct lpfc_nodelist *)(evtp->evt_arg1); - lpfc_nlp_get(ndlp); lpfc_dev_loss_tmo_handler(ndlp); free_evt = 0; - complete((struct completion *)(evtp->evt_arg2)); + /* decrement the node reference count held for + * this queued work + */ lpfc_nlp_put(ndlp); break; case LPFC_EVT_ONLINE: @@ -373,7 +389,7 @@ lpfc_work_done(struct lpfc_hba *phba) lpfc_handle_latt(phba); vports = lpfc_create_vport_work_array(phba); if (vports != NULL) - for(i = 0; i < LPFC_MAX_VPORTS; i++) { + for(i = 0; i <= phba->max_vpi; i++) { /* * We could have no vports in array if unloading, so if * this happens then just use the pport @@ -405,14 +421,14 @@ lpfc_work_done(struct lpfc_hba *phba) vport->work_port_events &= ~work_port_events; spin_unlock_irq(&vport->work_port_lock); } - lpfc_destroy_vport_work_array(vports); + lpfc_destroy_vport_work_array(phba, vports); pring = &phba->sli.ring[LPFC_ELS_RING]; status = (ha_copy & (HA_RXMASK << (4*LPFC_ELS_RING))); status >>= (4*LPFC_ELS_RING); if ((status & HA_RXMASK) || (pring->flag & LPFC_DEFERRED_RING_EVENT)) { - if (pring->flag & LPFC_STOP_IOCB_MASK) { + if (pring->flag & LPFC_STOP_IOCB_EVENT) { pring->flag |= LPFC_DEFERRED_RING_EVENT; } else { lpfc_sli_handle_slow_ring_event(phba, pring, @@ -544,6 +560,7 @@ lpfc_workq_post_event(struct lpfc_hba *phba, void *arg1, void *arg2, void lpfc_cleanup_rpis(struct lpfc_vport *vport, int remove) { + struct Scsi_Host *shost = lpfc_shost_from_vport(vport); struct lpfc_hba *phba = vport->phba; struct lpfc_nodelist *ndlp, *next_ndlp; int rc; @@ -552,7 +569,9 @@ lpfc_cleanup_rpis(struct lpfc_vport *vport, int remove) if (ndlp->nlp_state == NLP_STE_UNUSED_NODE) continue; - if (phba->sli3_options & LPFC_SLI3_VPORT_TEARDOWN) + if ((phba->sli3_options & LPFC_SLI3_VPORT_TEARDOWN) || + ((vport->port_type == LPFC_NPIV_PORT) && + (ndlp->nlp_DID == NameServer_DID))) lpfc_unreg_rpi(vport, ndlp); /* Leave Fabric nodes alone on link down */ @@ -565,14 +584,30 @@ lpfc_cleanup_rpis(struct lpfc_vport *vport, int remove) } if (phba->sli3_options & LPFC_SLI3_VPORT_TEARDOWN) { lpfc_mbx_unreg_vpi(vport); + spin_lock_irq(shost->host_lock); vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI; + spin_unlock_irq(shost->host_lock); } } +void +lpfc_port_link_failure(struct lpfc_vport *vport) +{ + /* Cleanup any outstanding RSCN activity */ + lpfc_els_flush_rscn(vport); + + /* Cleanup any outstanding ELS commands */ + lpfc_els_flush_cmd(vport); + + lpfc_cleanup_rpis(vport, 0); + + /* Turn off discovery timer if its running */ + lpfc_can_disctmo(vport); +} + static void lpfc_linkdown_port(struct lpfc_vport *vport) { - struct lpfc_nodelist *ndlp, *next_ndlp; struct Scsi_Host *shost = lpfc_shost_from_vport(vport); fc_host_post_event(shost, fc_get_event_number(), FCH_EVT_LINKDOWN, 0); @@ -581,21 +616,8 @@ lpfc_linkdown_port(struct lpfc_vport *vport) "Link Down: state:x%x rtry:x%x flg:x%x", vport->port_state, vport->fc_ns_retry, vport->fc_flag); - /* Cleanup any outstanding RSCN activity */ - lpfc_els_flush_rscn(vport); - - /* Cleanup any outstanding ELS commands */ - lpfc_els_flush_cmd(vport); + lpfc_port_link_failure(vport); - lpfc_cleanup_rpis(vport, 0); - - /* free any ndlp's on unused list */ - list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) - if (ndlp->nlp_state == NLP_STE_UNUSED_NODE) - lpfc_drop_node(vport, ndlp); - - /* Turn off discovery timer if its running */ - lpfc_can_disctmo(vport); } int @@ -618,18 +640,18 @@ lpfc_linkdown(struct lpfc_hba *phba) spin_unlock_irq(&phba->hbalock); vports = lpfc_create_vport_work_array(phba); if (vports != NULL) - for(i = 0; i < LPFC_MAX_VPORTS && vports[i] != NULL; i++) { + for(i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) { /* Issue a LINK DOWN event to all nodes */ lpfc_linkdown_port(vports[i]); } - lpfc_destroy_vport_work_array(vports); + lpfc_destroy_vport_work_array(phba, vports); /* Clean up any firmware default rpi's */ mb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); if (mb) { lpfc_unreg_did(phba, 0xffff, 0xffffffff, mb); mb->vport = vport; mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; - if (lpfc_sli_issue_mbox(phba, mb, (MBX_NOWAIT | MBX_STOP_IOCB)) + if (lpfc_sli_issue_mbox(phba, mb, MBX_NOWAIT) == MBX_NOT_FINISHED) { mempool_free(mb, phba->mbox_mem_pool); } @@ -643,8 +665,7 @@ lpfc_linkdown(struct lpfc_hba *phba) lpfc_config_link(phba, mb); mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; mb->vport = vport; - if (lpfc_sli_issue_mbox(phba, mb, - (MBX_NOWAIT | MBX_STOP_IOCB)) + if (lpfc_sli_issue_mbox(phba, mb, MBX_NOWAIT) == MBX_NOT_FINISHED) { mempool_free(mb, phba->mbox_mem_pool); } @@ -686,7 +707,6 @@ static void lpfc_linkup_port(struct lpfc_vport *vport) { struct Scsi_Host *shost = lpfc_shost_from_vport(vport); - struct lpfc_nodelist *ndlp, *next_ndlp; struct lpfc_hba *phba = vport->phba; if ((vport->load_flag & FC_UNLOADING) != 0) @@ -713,11 +733,6 @@ lpfc_linkup_port(struct lpfc_vport *vport) if (vport->fc_flag & FC_LBIT) lpfc_linkup_cleanup_nodes(vport); - /* free any ndlp's in unused state */ - list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, - nlp_listp) - if (ndlp->nlp_state == NLP_STE_UNUSED_NODE) - lpfc_drop_node(vport, ndlp); } static int @@ -734,9 +749,9 @@ lpfc_linkup(struct lpfc_hba *phba) vports = lpfc_create_vport_work_array(phba); if (vports != NULL) - for(i = 0; i < LPFC_MAX_VPORTS && vports[i] != NULL; i++) + for(i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) lpfc_linkup_port(vports[i]); - lpfc_destroy_vport_work_array(vports); + lpfc_destroy_vport_work_array(phba, vports); if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) lpfc_issue_clear_la(phba, phba->pport); @@ -749,7 +764,7 @@ lpfc_linkup(struct lpfc_hba *phba) * as the completion routine when the command is * handed off to the SLI layer. */ -void +static void lpfc_mbx_cmpl_clear_la(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) { struct lpfc_vport *vport = pmb->vport; @@ -852,8 +867,6 @@ lpfc_mbx_cmpl_local_config_link(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) * LPFC_FLOGI while waiting for FLOGI cmpl */ if (vport->port_state != LPFC_FLOGI) { - vport->port_state = LPFC_FLOGI; - lpfc_set_disctmo(vport); lpfc_initial_flogi(vport); } return; @@ -1022,8 +1035,7 @@ lpfc_mbx_process_link_up(struct lpfc_hba *phba, READ_LA_VAR *la) lpfc_read_sparam(phba, sparam_mbox, 0); sparam_mbox->vport = vport; sparam_mbox->mbox_cmpl = lpfc_mbx_cmpl_read_sparam; - rc = lpfc_sli_issue_mbox(phba, sparam_mbox, - (MBX_NOWAIT | MBX_STOP_IOCB)); + rc = lpfc_sli_issue_mbox(phba, sparam_mbox, MBX_NOWAIT); if (rc == MBX_NOT_FINISHED) { mp = (struct lpfc_dmabuf *) sparam_mbox->context1; lpfc_mbuf_free(phba, mp->virt, mp->phys); @@ -1040,8 +1052,7 @@ lpfc_mbx_process_link_up(struct lpfc_hba *phba, READ_LA_VAR *la) lpfc_config_link(phba, cfglink_mbox); cfglink_mbox->vport = vport; cfglink_mbox->mbox_cmpl = lpfc_mbx_cmpl_local_config_link; - rc = lpfc_sli_issue_mbox(phba, cfglink_mbox, - (MBX_NOWAIT | MBX_STOP_IOCB)); + rc = lpfc_sli_issue_mbox(phba, cfglink_mbox, MBX_NOWAIT); if (rc != MBX_NOT_FINISHED) return; mempool_free(cfglink_mbox, phba->mbox_mem_pool); @@ -1174,6 +1185,9 @@ lpfc_mbx_cmpl_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) lpfc_mbuf_free(phba, mp->virt, mp->phys); kfree(mp); mempool_free(pmb, phba->mbox_mem_pool); + /* decrement the node reference count held for this callback + * function. + */ lpfc_nlp_put(ndlp); return; @@ -1219,7 +1233,7 @@ lpfc_mbx_unreg_vpi(struct lpfc_vport *vport) lpfc_unreg_vpi(phba, vport->vpi, mbox); mbox->vport = vport; mbox->mbox_cmpl = lpfc_mbx_cmpl_unreg_vpi; - rc = lpfc_sli_issue_mbox(phba, mbox, (MBX_NOWAIT | MBX_STOP_IOCB)); + rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); if (rc == MBX_NOT_FINISHED) { lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX | LOG_VPORT, "1800 Could not issue unreg_vpi\n"); @@ -1319,7 +1333,7 @@ lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) vports = lpfc_create_vport_work_array(phba); if (vports != NULL) for(i = 0; - i < LPFC_MAX_VPORTS && vports[i] != NULL; + i <= phba->max_vpi && vports[i] != NULL; i++) { if (vports[i]->port_type == LPFC_PHYSICAL_PORT) continue; @@ -1335,7 +1349,7 @@ lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) "Fabric support\n"); } } - lpfc_destroy_vport_work_array(vports); + lpfc_destroy_vport_work_array(phba, vports); lpfc_do_scr_ns_plogi(phba, vport); } @@ -1361,11 +1375,16 @@ lpfc_mbx_cmpl_ns_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) if (mb->mbxStatus) { out: + /* decrement the node reference count held for this + * callback function. + */ lpfc_nlp_put(ndlp); lpfc_mbuf_free(phba, mp->virt, mp->phys); kfree(mp); mempool_free(pmb, phba->mbox_mem_pool); - lpfc_drop_node(vport, ndlp); + + /* If no other thread is using the ndlp, free it */ + lpfc_nlp_not_used(ndlp); if (phba->fc_topology == TOPOLOGY_LOOP) { /* @@ -1410,6 +1429,9 @@ out: goto out; } + /* decrement the node reference count held for this + * callback function. + */ lpfc_nlp_put(ndlp); lpfc_mbuf_free(phba, mp->virt, mp->phys); kfree(mp); @@ -1656,8 +1678,18 @@ lpfc_dequeue_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) void lpfc_drop_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) { + /* + * Use of lpfc_drop_node and UNUSED list: lpfc_drop_node should + * be used if we wish to issue the "last" lpfc_nlp_put() to remove + * the ndlp from the vport. The ndlp marked as UNUSED on the list + * until ALL other outstanding threads have completed. We check + * that the ndlp not already in the UNUSED state before we proceed. + */ + if (ndlp->nlp_state == NLP_STE_UNUSED_NODE) + return; lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNUSED_NODE); lpfc_nlp_put(ndlp); + return; } /* @@ -1868,8 +1900,7 @@ lpfc_unreg_rpi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) lpfc_unreg_login(phba, vport->vpi, ndlp->nlp_rpi, mbox); mbox->vport = vport; mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; - rc = lpfc_sli_issue_mbox(phba, mbox, - (MBX_NOWAIT | MBX_STOP_IOCB)); + rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); if (rc == MBX_NOT_FINISHED) mempool_free(mbox, phba->mbox_mem_pool); } @@ -1892,8 +1923,8 @@ lpfc_unreg_all_rpis(struct lpfc_vport *vport) lpfc_unreg_login(phba, vport->vpi, 0xffff, mbox); mbox->vport = vport; mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; - rc = lpfc_sli_issue_mbox(phba, mbox, - (MBX_NOWAIT | MBX_STOP_IOCB)); + mbox->context1 = NULL; + rc = lpfc_sli_issue_mbox_wait(phba, mbox, LPFC_MBOX_TMO); if (rc == MBX_NOT_FINISHED) { mempool_free(mbox, phba->mbox_mem_pool); } @@ -1912,8 +1943,8 @@ lpfc_unreg_default_rpis(struct lpfc_vport *vport) lpfc_unreg_did(phba, vport->vpi, 0xffffffff, mbox); mbox->vport = vport; mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; - rc = lpfc_sli_issue_mbox(phba, mbox, - (MBX_NOWAIT | MBX_STOP_IOCB)); + mbox->context1 = NULL; + rc = lpfc_sli_issue_mbox_wait(phba, mbox, LPFC_MBOX_TMO); if (rc == MBX_NOT_FINISHED) { lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX | LOG_VPORT, "1815 Could not issue " @@ -1981,11 +2012,6 @@ lpfc_cleanup_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) if (!list_empty(&ndlp->dev_loss_evt.evt_listp)) list_del_init(&ndlp->dev_loss_evt.evt_listp); - if (!list_empty(&ndlp->dev_loss_evt.evt_listp)) { - list_del_init(&ndlp->dev_loss_evt.evt_listp); - complete((struct completion *)(ndlp->dev_loss_evt.evt_arg2)); - } - lpfc_unreg_rpi(vport, ndlp); return 0; @@ -1999,12 +2025,39 @@ lpfc_cleanup_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) static void lpfc_nlp_remove(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) { + struct lpfc_hba *phba = vport->phba; struct lpfc_rport_data *rdata; + LPFC_MBOXQ_t *mbox; + int rc; if (ndlp->nlp_flag & NLP_DELAY_TMO) { lpfc_cancel_retry_delay_tmo(vport, ndlp); } + if (ndlp->nlp_flag & NLP_DEFER_RM && !ndlp->nlp_rpi) { + /* For this case we need to cleanup the default rpi + * allocated by the firmware. + */ + if ((mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL)) + != NULL) { + rc = lpfc_reg_login(phba, vport->vpi, ndlp->nlp_DID, + (uint8_t *) &vport->fc_sparam, mbox, 0); + if (rc) { + mempool_free(mbox, phba->mbox_mem_pool); + } + else { + mbox->mbox_flag |= LPFC_MBX_IMED_UNREG; + mbox->mbox_cmpl = lpfc_mbx_cmpl_dflt_rpi; + mbox->vport = vport; + mbox->context2 = NULL; + rc =lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); + if (rc == MBX_NOT_FINISHED) { + mempool_free(mbox, phba->mbox_mem_pool); + } + } + } + } + lpfc_cleanup_node(vport, ndlp); /* @@ -2132,6 +2185,12 @@ lpfc_setup_disc_node(struct lpfc_vport *vport, uint32_t did) } if (vport->fc_flag & FC_RSCN_MODE) { if (lpfc_rscn_payload_check(vport, did)) { + /* If we've already recieved a PLOGI from this NPort + * we don't need to try to discover it again. + */ + if (ndlp->nlp_flag & NLP_RCV_PLOGI) + return NULL; + spin_lock_irq(shost->host_lock); ndlp->nlp_flag |= NLP_NPR_2B_DISC; spin_unlock_irq(shost->host_lock); @@ -2144,8 +2203,13 @@ lpfc_setup_disc_node(struct lpfc_vport *vport, uint32_t did) } else ndlp = NULL; } else { + /* If we've already recieved a PLOGI from this NPort, + * or we are already in the process of discovery on it, + * we don't need to try to discover it again. + */ if (ndlp->nlp_state == NLP_STE_ADISC_ISSUE || - ndlp->nlp_state == NLP_STE_PLOGI_ISSUE) + ndlp->nlp_state == NLP_STE_PLOGI_ISSUE || + ndlp->nlp_flag & NLP_RCV_PLOGI) return NULL; lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); spin_lock_irq(shost->host_lock); @@ -2220,8 +2284,7 @@ lpfc_issue_clear_la(struct lpfc_hba *phba, struct lpfc_vport *vport) lpfc_clear_la(phba, mbox); mbox->mbox_cmpl = lpfc_mbx_cmpl_clear_la; mbox->vport = vport; - rc = lpfc_sli_issue_mbox(phba, mbox, (MBX_NOWAIT | - MBX_STOP_IOCB)); + rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); if (rc == MBX_NOT_FINISHED) { mempool_free(mbox, phba->mbox_mem_pool); lpfc_disc_flush_list(vport); @@ -2244,8 +2307,7 @@ lpfc_issue_reg_vpi(struct lpfc_hba *phba, struct lpfc_vport *vport) lpfc_reg_vpi(phba, vport->vpi, vport->fc_myDID, regvpimbox); regvpimbox->mbox_cmpl = lpfc_mbx_cmpl_reg_vpi; regvpimbox->vport = vport; - if (lpfc_sli_issue_mbox(phba, regvpimbox, - (MBX_NOWAIT | MBX_STOP_IOCB)) + if (lpfc_sli_issue_mbox(phba, regvpimbox, MBX_NOWAIT) == MBX_NOT_FINISHED) { mempool_free(regvpimbox, phba->mbox_mem_pool); } @@ -2414,7 +2476,7 @@ lpfc_free_tx(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp) } } -void +static void lpfc_disc_flush_list(struct lpfc_vport *vport) { struct lpfc_nodelist *ndlp, *next_ndlp; @@ -2426,7 +2488,6 @@ lpfc_disc_flush_list(struct lpfc_vport *vport) if (ndlp->nlp_state == NLP_STE_PLOGI_ISSUE || ndlp->nlp_state == NLP_STE_ADISC_ISSUE) { lpfc_free_tx(phba, ndlp); - lpfc_nlp_put(ndlp); } } } @@ -2516,6 +2577,7 @@ lpfc_disc_timeout_handler(struct lpfc_vport *vport) if (ndlp->nlp_type & NLP_FABRIC) { /* Clean up the ndlp on Fabric connections */ lpfc_drop_node(vport, ndlp); + } else if (!(ndlp->nlp_flag & NLP_NPR_ADISC)) { /* Fail outstanding IO now since device * is marked for PLOGI. @@ -2524,9 +2586,8 @@ lpfc_disc_timeout_handler(struct lpfc_vport *vport) } } if (vport->port_state != LPFC_FLOGI) { - vport->port_state = LPFC_FLOGI; - lpfc_set_disctmo(vport); lpfc_initial_flogi(vport); + return; } break; @@ -2536,7 +2597,7 @@ lpfc_disc_timeout_handler(struct lpfc_vport *vport) /* Initial FLOGI timeout */ lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY, "0222 Initial %s timeout\n", - vport->vpi ? "FLOGI" : "FDISC"); + vport->vpi ? "FDISC" : "FLOGI"); /* Assume no Fabric and go on with discovery. * Check for outstanding ELS FLOGI to abort. @@ -2558,10 +2619,10 @@ lpfc_disc_timeout_handler(struct lpfc_vport *vport) /* Next look for NameServer ndlp */ ndlp = lpfc_findnode_did(vport, NameServer_DID); if (ndlp) - lpfc_nlp_put(ndlp); - /* Start discovery */ - lpfc_disc_start(vport); - break; + lpfc_els_abort(phba, ndlp); + + /* ReStart discovery */ + goto restart_disc; case LPFC_NS_QRY: /* Check for wait for NameServer Rsp timeout */ @@ -2580,6 +2641,7 @@ lpfc_disc_timeout_handler(struct lpfc_vport *vport) } vport->fc_ns_retry = 0; +restart_disc: /* * Discovery is over. * set port_state to PORT_READY if SLI2. @@ -2608,8 +2670,7 @@ lpfc_disc_timeout_handler(struct lpfc_vport *vport) initlinkmbox->mb.un.varInitLnk.lipsr_AL_PA = 0; initlinkmbox->vport = vport; initlinkmbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; - rc = lpfc_sli_issue_mbox(phba, initlinkmbox, - (MBX_NOWAIT | MBX_STOP_IOCB)); + rc = lpfc_sli_issue_mbox(phba, initlinkmbox, MBX_NOWAIT); lpfc_set_loopback_flag(phba); if (rc == MBX_NOT_FINISHED) mempool_free(initlinkmbox, phba->mbox_mem_pool); @@ -2664,12 +2725,14 @@ lpfc_disc_timeout_handler(struct lpfc_vport *vport) clrlaerr = 1; break; + case LPFC_LINK_UP: + lpfc_issue_clear_la(phba, vport); + /* Drop thru */ case LPFC_LINK_UNKNOWN: case LPFC_WARM_START: case LPFC_INIT_START: case LPFC_INIT_MBX_CMDS: case LPFC_LINK_DOWN: - case LPFC_LINK_UP: case LPFC_HBA_ERROR: lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY, "0230 Unexpected timeout, hba link " @@ -2723,7 +2786,9 @@ lpfc_mbx_cmpl_fdmi_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) else mod_timer(&vport->fc_fdmitmo, jiffies + HZ * 60); - /* Mailbox took a reference to the node */ + /* decrement the node reference count held for this callback + * function. + */ lpfc_nlp_put(ndlp); lpfc_mbuf_free(phba, mp->virt, mp->phys); kfree(mp); @@ -2747,19 +2812,19 @@ lpfc_filter_by_wwpn(struct lpfc_nodelist *ndlp, void *param) sizeof(ndlp->nlp_portname)) == 0; } -struct lpfc_nodelist * +static struct lpfc_nodelist * __lpfc_find_node(struct lpfc_vport *vport, node_filter filter, void *param) { struct lpfc_nodelist *ndlp; list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) { - if (ndlp->nlp_state != NLP_STE_UNUSED_NODE && - filter(ndlp, param)) + if (filter(ndlp, param)) return ndlp; } return NULL; } +#if 0 /* * Search node lists for a remote port matching filter criteria * Caller needs to hold host_lock before calling this routine. @@ -2775,6 +2840,7 @@ lpfc_find_node(struct lpfc_vport *vport, node_filter filter, void *param) spin_unlock_irq(shost->host_lock); return ndlp; } +#endif /* 0 */ /* * This routine looks up the ndlp lists for the given RPI. If rpi found it @@ -2786,6 +2852,7 @@ __lpfc_findnode_rpi(struct lpfc_vport *vport, uint16_t rpi) return __lpfc_find_node(vport, lpfc_filter_by_rpi, &rpi); } +#if 0 struct lpfc_nodelist * lpfc_findnode_rpi(struct lpfc_vport *vport, uint16_t rpi) { @@ -2797,6 +2864,7 @@ lpfc_findnode_rpi(struct lpfc_vport *vport, uint16_t rpi) spin_unlock_irq(shost->host_lock); return ndlp; } +#endif /* 0 */ /* * This routine looks up the ndlp lists for the given WWPN. If WWPN found it @@ -2837,6 +2905,9 @@ lpfc_nlp_init(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, return; } +/* This routine releases all resources associated with a specifc NPort's ndlp + * and mempool_free's the nodelist. + */ static void lpfc_nlp_release(struct kref *kref) { @@ -2851,16 +2922,57 @@ lpfc_nlp_release(struct kref *kref) mempool_free(ndlp, ndlp->vport->phba->nlp_mem_pool); } +/* This routine bumps the reference count for a ndlp structure to ensure + * that one discovery thread won't free a ndlp while another discovery thread + * is using it. + */ struct lpfc_nodelist * lpfc_nlp_get(struct lpfc_nodelist *ndlp) { - if (ndlp) + if (ndlp) { + lpfc_debugfs_disc_trc(ndlp->vport, LPFC_DISC_TRC_NODE, + "node get: did:x%x flg:x%x refcnt:x%x", + ndlp->nlp_DID, ndlp->nlp_flag, + atomic_read(&ndlp->kref.refcount)); kref_get(&ndlp->kref); + } return ndlp; } + +/* This routine decrements the reference count for a ndlp structure. If the + * count goes to 0, this indicates the the associated nodelist should be freed. + */ int lpfc_nlp_put(struct lpfc_nodelist *ndlp) { + if (ndlp) { + lpfc_debugfs_disc_trc(ndlp->vport, LPFC_DISC_TRC_NODE, + "node put: did:x%x flg:x%x refcnt:x%x", + ndlp->nlp_DID, ndlp->nlp_flag, + atomic_read(&ndlp->kref.refcount)); + } return ndlp ? kref_put(&ndlp->kref, lpfc_nlp_release) : 0; } + +/* This routine free's the specified nodelist if it is not in use + * by any other discovery thread. This routine returns 1 if the ndlp + * is not being used by anyone and has been freed. A return value of + * 0 indicates it is being used by another discovery thread and the + * refcount is left unchanged. + */ +int +lpfc_nlp_not_used(struct lpfc_nodelist *ndlp) +{ + lpfc_debugfs_disc_trc(ndlp->vport, LPFC_DISC_TRC_NODE, + "node not used: did:x%x flg:x%x refcnt:x%x", + ndlp->nlp_DID, ndlp->nlp_flag, + atomic_read(&ndlp->kref.refcount)); + + if (atomic_read(&ndlp->kref.refcount) == 1) { + lpfc_nlp_put(ndlp); + return 1; + } + return 0; +} + diff --git a/drivers/scsi/lpfc/lpfc_hw.h b/drivers/scsi/lpfc/lpfc_hw.h index 451accd5564..041f83e7634 100644 --- a/drivers/scsi/lpfc/lpfc_hw.h +++ b/drivers/scsi/lpfc/lpfc_hw.h @@ -139,6 +139,9 @@ struct lpfc_sli_ct_request { uint8_t len; uint8_t symbname[255]; } rsnn; + struct da_id { /* For DA_ID requests */ + uint32_t port_id; + } da_id; struct rspn { /* For RSPN_ID requests */ uint32_t PortId; uint8_t len; @@ -150,11 +153,7 @@ struct lpfc_sli_ct_request { struct gff_acc { uint8_t fbits[128]; } gff_acc; -#ifdef __BIG_ENDIAN_BITFIELD #define FCP_TYPE_FEATURE_OFFSET 7 -#else /* __LITTLE_ENDIAN_BITFIELD */ -#define FCP_TYPE_FEATURE_OFFSET 4 -#endif struct rff { uint32_t PortId; uint8_t reserved[2]; @@ -177,6 +176,8 @@ struct lpfc_sli_ct_request { sizeof(struct rnn)) #define RSNN_REQUEST_SZ (offsetof(struct lpfc_sli_ct_request, un) + \ sizeof(struct rsnn)) +#define DA_ID_REQUEST_SZ (offsetof(struct lpfc_sli_ct_request, un) + \ + sizeof(struct da_id)) #define RSPN_REQUEST_SZ (offsetof(struct lpfc_sli_ct_request, un) + \ sizeof(struct rspn)) @@ -1228,7 +1229,8 @@ typedef struct { /* FireFly BIU registers */ #define HS_FFER3 0x20000000 /* Bit 29 */ #define HS_FFER2 0x40000000 /* Bit 30 */ #define HS_FFER1 0x80000000 /* Bit 31 */ -#define HS_FFERM 0xFF000000 /* Mask for error bits 31:24 */ +#define HS_CRIT_TEMP 0x00000100 /* Bit 8 */ +#define HS_FFERM 0xFF000100 /* Mask for error bits 31:24 and 8 */ /* Host Control Register */ @@ -1277,12 +1279,14 @@ typedef struct { /* FireFly BIU registers */ #define MBX_DEL_LD_ENTRY 0x1D #define MBX_RUN_PROGRAM 0x1E #define MBX_SET_MASK 0x20 -#define MBX_SET_SLIM 0x21 +#define MBX_SET_VARIABLE 0x21 #define MBX_UNREG_D_ID 0x23 #define MBX_KILL_BOARD 0x24 #define MBX_CONFIG_FARP 0x25 #define MBX_BEACON 0x2A #define MBX_HEARTBEAT 0x31 +#define MBX_WRITE_VPARMS 0x32 +#define MBX_ASYNCEVT_ENABLE 0x33 #define MBX_CONFIG_HBQ 0x7C #define MBX_LOAD_AREA 0x81 @@ -1297,7 +1301,7 @@ typedef struct { /* FireFly BIU registers */ #define MBX_REG_VNPID 0x96 #define MBX_UNREG_VNPID 0x97 -#define MBX_FLASH_WR_ULA 0x98 +#define MBX_WRITE_WWN 0x98 #define MBX_SET_DEBUG 0x99 #define MBX_LOAD_EXP_ROM 0x9C @@ -1344,6 +1348,7 @@ typedef struct { /* FireFly BIU registers */ /* SLI_2 IOCB Command Set */ +#define CMD_ASYNC_STATUS 0x7C #define CMD_RCV_SEQUENCE64_CX 0x81 #define CMD_XMIT_SEQUENCE64_CR 0x82 #define CMD_XMIT_SEQUENCE64_CX 0x83 @@ -1368,6 +1373,7 @@ typedef struct { /* FireFly BIU registers */ #define CMD_FCP_TRECEIVE64_CX 0xA1 #define CMD_FCP_TRSP64_CX 0xA3 +#define CMD_QUE_XRI64_CX 0xB3 #define CMD_IOCB_RCV_SEQ64_CX 0xB5 #define CMD_IOCB_RCV_ELS64_CX 0xB7 #define CMD_IOCB_RCV_CONT64_CX 0xBB @@ -1406,6 +1412,8 @@ typedef struct { /* FireFly BIU registers */ #define MBX_BUSY 0xffffff /* Attempted cmd to busy Mailbox */ #define MBX_TIMEOUT 0xfffffe /* time-out expired waiting for */ +#define TEMPERATURE_OFFSET 0xB0 /* Slim offset for critical temperature event */ + /* * Begin Structure Definitions for Mailbox Commands */ @@ -2606,6 +2614,18 @@ typedef struct { uint32_t IPAddress; } CONFIG_FARP_VAR; +/* Structure for MB Command MBX_ASYNCEVT_ENABLE (0x33) */ + +typedef struct { +#ifdef __BIG_ENDIAN_BITFIELD + uint32_t rsvd:30; + uint32_t ring:2; /* Ring for ASYNC_EVENT iocb Bits 0-1*/ +#else /* __LITTLE_ENDIAN */ + uint32_t ring:2; /* Ring for ASYNC_EVENT iocb Bits 0-1*/ + uint32_t rsvd:30; +#endif +} ASYNCEVT_ENABLE_VAR; + /* Union of all Mailbox Command types */ #define MAILBOX_CMD_WSIZE 32 #define MAILBOX_CMD_SIZE (MAILBOX_CMD_WSIZE * sizeof(uint32_t)) @@ -2645,6 +2665,7 @@ typedef union { CONFIG_PORT_VAR varCfgPort; /* cmd = 0x88 (CONFIG_PORT) */ REG_VPI_VAR varRegVpi; /* cmd = 0x96 (REG_VPI) */ UNREG_VPI_VAR varUnregVpi; /* cmd = 0x97 (UNREG_VPI) */ + ASYNCEVT_ENABLE_VAR varCfgAsyncEvent; /*cmd = x33 (CONFIG_ASYNC) */ } MAILVARIANTS; /* @@ -2973,6 +2994,34 @@ typedef struct { #endif } RCV_ELS_REQ64; +/* IOCB Command template for RCV_SEQ64 */ +struct rcv_seq64 { + struct ulp_bde64 elsReq; + uint32_t hbq_1; + uint32_t parmRo; +#ifdef __BIG_ENDIAN_BITFIELD + uint32_t rctl:8; + uint32_t type:8; + uint32_t dfctl:8; + uint32_t ls:1; + uint32_t fs:1; + uint32_t rsvd2:3; + uint32_t si:1; + uint32_t bc:1; + uint32_t rsvd3:1; +#else /* __LITTLE_ENDIAN_BITFIELD */ + uint32_t rsvd3:1; + uint32_t bc:1; + uint32_t si:1; + uint32_t rsvd2:3; + uint32_t fs:1; + uint32_t ls:1; + uint32_t dfctl:8; + uint32_t type:8; + uint32_t rctl:8; +#endif +}; + /* IOCB Command template for all 64 bit FCP Initiator commands */ typedef struct { ULP_BDL bdl; @@ -2987,6 +3036,21 @@ typedef struct { uint32_t fcpt_Length; /* transfer ready for IWRITE */ } FCPT_FIELDS64; +/* IOCB Command template for Async Status iocb commands */ +typedef struct { + uint32_t rsvd[4]; + uint32_t param; +#ifdef __BIG_ENDIAN_BITFIELD + uint16_t evt_code; /* High order bits word 5 */ + uint16_t sub_ctxt_tag; /* Low order bits word 5 */ +#else /* __LITTLE_ENDIAN_BITFIELD */ + uint16_t sub_ctxt_tag; /* High order bits word 5 */ + uint16_t evt_code; /* Low order bits word 5 */ +#endif +} ASYNCSTAT_FIELDS; +#define ASYNC_TEMP_WARN 0x100 +#define ASYNC_TEMP_SAFE 0x101 + /* IOCB Command template for CMD_IOCB_RCV_ELS64_CX (0xB7) or CMD_IOCB_RCV_SEQ64_CX (0xB5) */ @@ -3004,7 +3068,26 @@ struct rcv_sli3 { struct ulp_bde64 bde2; }; +/* Structure used for a single HBQ entry */ +struct lpfc_hbq_entry { + struct ulp_bde64 bde; + uint32_t buffer_tag; +}; +/* IOCB Command template for QUE_XRI64_CX (0xB3) command */ +typedef struct { + struct lpfc_hbq_entry buff; + uint32_t rsvd; + uint32_t rsvd1; +} QUE_XRI64_CX_FIELDS; + +struct que_xri64cx_ext_fields { + uint32_t iotag64_low; + uint32_t iotag64_high; + uint32_t ebde_count; + uint32_t rsvd; + struct lpfc_hbq_entry buff[5]; +}; typedef struct _IOCB { /* IOCB structure */ union { @@ -3028,6 +3111,9 @@ typedef struct _IOCB { /* IOCB structure */ XMT_SEQ_FIELDS64 xseq64; /* XMIT / BCAST cmd */ FCPI_FIELDS64 fcpi64; /* FCP 64 bit Initiator template */ FCPT_FIELDS64 fcpt64; /* FCP 64 bit target template */ + ASYNCSTAT_FIELDS asyncstat; /* async_status iocb */ + QUE_XRI64_CX_FIELDS quexri64cx; /* que_xri64_cx fields */ + struct rcv_seq64 rcvseq64; /* RCV_SEQ64 and RCV_CONT64 */ uint32_t ulpWord[IOCB_WORD_SZ - 2]; /* generic 6 'words' */ } un; @@ -3085,6 +3171,10 @@ typedef struct _IOCB { /* IOCB structure */ union { struct rcv_sli3 rcvsli3; /* words 8 - 15 */ + + /* words 8-31 used for que_xri_cx iocb */ + struct que_xri64cx_ext_fields que_xri64cx_ext_words; + uint32_t sli3Words[24]; /* 96 extra bytes for SLI-3 */ } unsli3; @@ -3124,12 +3214,6 @@ typedef struct _IOCB { /* IOCB structure */ } IOCB_t; -/* Structure used for a single HBQ entry */ -struct lpfc_hbq_entry { - struct ulp_bde64 bde; - uint32_t buffer_tag; -}; - #define SLI1_SLIM_SIZE (4 * 1024) @@ -3172,6 +3256,8 @@ lpfc_is_LC_HBA(unsigned short device) (device == PCI_DEVICE_ID_BSMB) || (device == PCI_DEVICE_ID_ZMID) || (device == PCI_DEVICE_ID_ZSMB) || + (device == PCI_DEVICE_ID_SAT_MID) || + (device == PCI_DEVICE_ID_SAT_SMB) || (device == PCI_DEVICE_ID_RFLY)) return 1; else diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c index ecebdfa0047..3205f7488d1 100644 --- a/drivers/scsi/lpfc/lpfc_init.c +++ b/drivers/scsi/lpfc/lpfc_init.c @@ -212,6 +212,18 @@ out_free_mbox: return 0; } +/* Completion handler for config async event mailbox command. */ +static void +lpfc_config_async_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq) +{ + if (pmboxq->mb.mbxStatus == MBX_SUCCESS) + phba->temp_sensor_support = 1; + else + phba->temp_sensor_support = 0; + mempool_free(pmboxq, phba->mbox_mem_pool); + return; +} + /************************************************************************/ /* */ /* lpfc_config_port_post */ @@ -234,6 +246,15 @@ lpfc_config_port_post(struct lpfc_hba *phba) int i, j; int rc; + spin_lock_irq(&phba->hbalock); + /* + * If the Config port completed correctly the HBA is not + * over heated any more. + */ + if (phba->over_temp_state == HBA_OVER_TEMP) + phba->over_temp_state = HBA_NORMAL_TEMP; + spin_unlock_irq(&phba->hbalock); + pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); if (!pmb) { phba->link_state = LPFC_HBA_ERROR; @@ -343,7 +364,7 @@ lpfc_config_port_post(struct lpfc_hba *phba) phba->link_state = LPFC_LINK_DOWN; - /* Only process IOCBs on ring 0 till hba_state is READY */ + /* Only process IOCBs on ELS ring till hba_state is READY */ if (psli->ring[psli->extra_ring].cmdringaddr) psli->ring[psli->extra_ring].flag |= LPFC_STOP_IOCB_EVENT; if (psli->ring[psli->fcp_ring].cmdringaddr) @@ -409,7 +430,21 @@ lpfc_config_port_post(struct lpfc_hba *phba) return -EIO; } /* MBOX buffer will be freed in mbox compl */ + pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); + lpfc_config_async(phba, pmb, LPFC_ELS_RING); + pmb->mbox_cmpl = lpfc_config_async_cmpl; + pmb->vport = phba->pport; + rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); + if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) { + lpfc_printf_log(phba, + KERN_ERR, + LOG_INIT, + "0456 Adapter failed to issue " + "ASYNCEVT_ENABLE mbox status x%x \n.", + rc); + mempool_free(pmb, phba->mbox_mem_pool); + } return (0); } @@ -449,6 +484,9 @@ lpfc_hba_down_post(struct lpfc_hba *phba) struct lpfc_sli *psli = &phba->sli; struct lpfc_sli_ring *pring; struct lpfc_dmabuf *mp, *next_mp; + struct lpfc_iocbq *iocb; + IOCB_t *cmd = NULL; + LIST_HEAD(completions); int i; if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) @@ -464,16 +502,42 @@ lpfc_hba_down_post(struct lpfc_hba *phba) } } + spin_lock_irq(&phba->hbalock); for (i = 0; i < psli->num_rings; i++) { pring = &psli->ring[i]; + + /* At this point in time the HBA is either reset or DOA. Either + * way, nothing should be on txcmplq as it will NEVER complete. + */ + list_splice_init(&pring->txcmplq, &completions); + pring->txcmplq_cnt = 0; + spin_unlock_irq(&phba->hbalock); + + while (!list_empty(&completions)) { + iocb = list_get_first(&completions, struct lpfc_iocbq, + list); + cmd = &iocb->iocb; + list_del_init(&iocb->list); + + if (!iocb->iocb_cmpl) + lpfc_sli_release_iocbq(phba, iocb); + else { + cmd->ulpStatus = IOSTAT_LOCAL_REJECT; + cmd->un.ulpWord[4] = IOERR_SLI_ABORTED; + (iocb->iocb_cmpl) (phba, iocb, iocb); + } + } + lpfc_sli_abort_iocb_ring(phba, pring); + spin_lock_irq(&phba->hbalock); } + spin_unlock_irq(&phba->hbalock); return 0; } /* HBA heart beat timeout handler */ -void +static void lpfc_hb_timeout(unsigned long ptr) { struct lpfc_hba *phba; @@ -512,8 +576,10 @@ void lpfc_hb_timeout_handler(struct lpfc_hba *phba) { LPFC_MBOXQ_t *pmboxq; + struct lpfc_dmabuf *buf_ptr; int retval; struct lpfc_sli *psli = &phba->sli; + LIST_HEAD(completions); if ((phba->link_state == LPFC_HBA_ERROR) || (phba->pport->load_flag & FC_UNLOADING) || @@ -540,49 +606,88 @@ lpfc_hb_timeout_handler(struct lpfc_hba *phba) } spin_unlock_irq(&phba->pport->work_port_lock); - /* If there is no heart beat outstanding, issue a heartbeat command */ - if (!phba->hb_outstanding) { - pmboxq = mempool_alloc(phba->mbox_mem_pool,GFP_KERNEL); - if (!pmboxq) { - mod_timer(&phba->hb_tmofunc, - jiffies + HZ * LPFC_HB_MBOX_INTERVAL); - return; + if (phba->elsbuf_cnt && + (phba->elsbuf_cnt == phba->elsbuf_prev_cnt)) { + spin_lock_irq(&phba->hbalock); + list_splice_init(&phba->elsbuf, &completions); + phba->elsbuf_cnt = 0; + phba->elsbuf_prev_cnt = 0; + spin_unlock_irq(&phba->hbalock); + + while (!list_empty(&completions)) { + list_remove_head(&completions, buf_ptr, + struct lpfc_dmabuf, list); + lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys); + kfree(buf_ptr); } + } + phba->elsbuf_prev_cnt = phba->elsbuf_cnt; - lpfc_heart_beat(phba, pmboxq); - pmboxq->mbox_cmpl = lpfc_hb_mbox_cmpl; - pmboxq->vport = phba->pport; - retval = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT); + /* If there is no heart beat outstanding, issue a heartbeat command */ + if (phba->cfg_enable_hba_heartbeat) { + if (!phba->hb_outstanding) { + pmboxq = mempool_alloc(phba->mbox_mem_pool,GFP_KERNEL); + if (!pmboxq) { + mod_timer(&phba->hb_tmofunc, + jiffies + HZ * LPFC_HB_MBOX_INTERVAL); + return; + } - if (retval != MBX_BUSY && retval != MBX_SUCCESS) { - mempool_free(pmboxq, phba->mbox_mem_pool); + lpfc_heart_beat(phba, pmboxq); + pmboxq->mbox_cmpl = lpfc_hb_mbox_cmpl; + pmboxq->vport = phba->pport; + retval = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT); + + if (retval != MBX_BUSY && retval != MBX_SUCCESS) { + mempool_free(pmboxq, phba->mbox_mem_pool); + mod_timer(&phba->hb_tmofunc, + jiffies + HZ * LPFC_HB_MBOX_INTERVAL); + return; + } mod_timer(&phba->hb_tmofunc, - jiffies + HZ * LPFC_HB_MBOX_INTERVAL); + jiffies + HZ * LPFC_HB_MBOX_TIMEOUT); + phba->hb_outstanding = 1; return; + } else { + /* + * If heart beat timeout called with hb_outstanding set + * we need to take the HBA offline. + */ + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "0459 Adapter heartbeat failure, " + "taking this port offline.\n"); + + spin_lock_irq(&phba->hbalock); + psli->sli_flag &= ~LPFC_SLI2_ACTIVE; + spin_unlock_irq(&phba->hbalock); + + lpfc_offline_prep(phba); + lpfc_offline(phba); + lpfc_unblock_mgmt_io(phba); + phba->link_state = LPFC_HBA_ERROR; + lpfc_hba_down_post(phba); } - mod_timer(&phba->hb_tmofunc, - jiffies + HZ * LPFC_HB_MBOX_TIMEOUT); - phba->hb_outstanding = 1; - return; - } else { - /* - * If heart beat timeout called with hb_outstanding set we - * need to take the HBA offline. - */ - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, - "0459 Adapter heartbeat failure, taking " - "this port offline.\n"); + } +} - spin_lock_irq(&phba->hbalock); - psli->sli_flag &= ~LPFC_SLI2_ACTIVE; - spin_unlock_irq(&phba->hbalock); +static void +lpfc_offline_eratt(struct lpfc_hba *phba) +{ + struct lpfc_sli *psli = &phba->sli; - lpfc_offline_prep(phba); - lpfc_offline(phba); - lpfc_unblock_mgmt_io(phba); - phba->link_state = LPFC_HBA_ERROR; - lpfc_hba_down_post(phba); - } + spin_lock_irq(&phba->hbalock); + psli->sli_flag &= ~LPFC_SLI2_ACTIVE; + spin_unlock_irq(&phba->hbalock); + lpfc_offline_prep(phba); + + lpfc_offline(phba); + lpfc_reset_barrier(phba); + lpfc_sli_brdreset(phba); + lpfc_hba_down_post(phba); + lpfc_sli_brdready(phba, HS_MBRDY); + lpfc_unblock_mgmt_io(phba); + phba->link_state = LPFC_HBA_ERROR; + return; } /************************************************************************/ @@ -601,6 +706,8 @@ lpfc_handle_eratt(struct lpfc_hba *phba) struct lpfc_sli_ring *pring; struct lpfc_vport **vports; uint32_t event_data; + unsigned long temperature; + struct temp_event temp_event_data; struct Scsi_Host *shost; int i; @@ -608,6 +715,9 @@ lpfc_handle_eratt(struct lpfc_hba *phba) * since we cannot communicate with the pci card anyway. */ if (pci_channel_offline(phba->pcidev)) return; + /* If resets are disabled then leave the HBA alone and return */ + if (!phba->cfg_enable_hba_reset) + return; if (phba->work_hs & HS_FFER6 || phba->work_hs & HS_FFER5) { @@ -620,14 +730,14 @@ lpfc_handle_eratt(struct lpfc_hba *phba) vports = lpfc_create_vport_work_array(phba); if (vports != NULL) for(i = 0; - i < LPFC_MAX_VPORTS && vports[i] != NULL; + i <= phba->max_vpi && vports[i] != NULL; i++){ shost = lpfc_shost_from_vport(vports[i]); spin_lock_irq(shost->host_lock); vports[i]->fc_flag |= FC_ESTABLISH_LINK; spin_unlock_irq(shost->host_lock); } - lpfc_destroy_vport_work_array(vports); + lpfc_destroy_vport_work_array(phba, vports); spin_lock_irq(&phba->hbalock); psli->sli_flag &= ~LPFC_SLI2_ACTIVE; spin_unlock_irq(&phba->hbalock); @@ -655,6 +765,31 @@ lpfc_handle_eratt(struct lpfc_hba *phba) return; } lpfc_unblock_mgmt_io(phba); + } else if (phba->work_hs & HS_CRIT_TEMP) { + temperature = readl(phba->MBslimaddr + TEMPERATURE_OFFSET); + temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT; + temp_event_data.event_code = LPFC_CRIT_TEMP; + temp_event_data.data = (uint32_t)temperature; + + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "0459 Adapter maximum temperature exceeded " + "(%ld), taking this port offline " + "Data: x%x x%x x%x\n", + temperature, phba->work_hs, + phba->work_status[0], phba->work_status[1]); + + shost = lpfc_shost_from_vport(phba->pport); + fc_host_post_vendor_event(shost, fc_get_event_number(), + sizeof(temp_event_data), + (char *) &temp_event_data, + SCSI_NL_VID_TYPE_PCI + | PCI_VENDOR_ID_EMULEX); + + spin_lock_irq(&phba->hbalock); + phba->over_temp_state = HBA_OVER_TEMP; + spin_unlock_irq(&phba->hbalock); + lpfc_offline_eratt(phba); + } else { /* The if clause above forces this code path when the status * failure is a value other than FFER6. Do not call the offline @@ -672,14 +807,7 @@ lpfc_handle_eratt(struct lpfc_hba *phba) sizeof(event_data), (char *) &event_data, SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX); - spin_lock_irq(&phba->hbalock); - psli->sli_flag &= ~LPFC_SLI2_ACTIVE; - spin_unlock_irq(&phba->hbalock); - lpfc_offline_prep(phba); - lpfc_offline(phba); - lpfc_unblock_mgmt_io(phba); - phba->link_state = LPFC_HBA_ERROR; - lpfc_hba_down_post(phba); + lpfc_offline_eratt(phba); } } @@ -699,21 +827,25 @@ lpfc_handle_latt(struct lpfc_hba *phba) LPFC_MBOXQ_t *pmb; volatile uint32_t control; struct lpfc_dmabuf *mp; - int rc = -ENOMEM; + int rc = 0; pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); - if (!pmb) + if (!pmb) { + rc = 1; goto lpfc_handle_latt_err_exit; + } mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); - if (!mp) + if (!mp) { + rc = 2; goto lpfc_handle_latt_free_pmb; + } mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys); - if (!mp->virt) + if (!mp->virt) { + rc = 3; goto lpfc_handle_latt_free_mp; - - rc = -EIO; + } /* Cleanup any outstanding ELS commands */ lpfc_els_flush_all_cmd(phba); @@ -722,9 +854,11 @@ lpfc_handle_latt(struct lpfc_hba *phba) lpfc_read_la(phba, pmb, mp); pmb->mbox_cmpl = lpfc_mbx_cmpl_read_la; pmb->vport = vport; - rc = lpfc_sli_issue_mbox (phba, pmb, (MBX_NOWAIT | MBX_STOP_IOCB)); - if (rc == MBX_NOT_FINISHED) + rc = lpfc_sli_issue_mbox (phba, pmb, MBX_NOWAIT); + if (rc == MBX_NOT_FINISHED) { + rc = 4; goto lpfc_handle_latt_free_mbuf; + } /* Clear Link Attention in HA REG */ spin_lock_irq(&phba->hbalock); @@ -756,10 +890,8 @@ lpfc_handle_latt_err_exit: lpfc_linkdown(phba); phba->link_state = LPFC_HBA_ERROR; - /* The other case is an error from issue_mbox */ - if (rc == -ENOMEM) - lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX, - "0300 READ_LA: no buffers\n"); + lpfc_printf_log(phba, KERN_ERR, LOG_MBOX, + "0300 LATT: Cannot issue READ_LA: Data:%d\n", rc); return; } @@ -1088,9 +1220,8 @@ lpfc_post_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, int cnt, /* Allocate buffer to post */ mp1 = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL); if (mp1) - mp1->virt = lpfc_mbuf_alloc(phba, MEM_PRI, - &mp1->phys); - if (mp1 == 0 || mp1->virt == 0) { + mp1->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &mp1->phys); + if (!mp1 || !mp1->virt) { kfree(mp1); lpfc_sli_release_iocbq(phba, iocb); pring->missbufcnt = cnt; @@ -1104,7 +1235,7 @@ lpfc_post_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, int cnt, if (mp2) mp2->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &mp2->phys); - if (mp2 == 0 || mp2->virt == 0) { + if (!mp2 || !mp2->virt) { kfree(mp2); lpfc_mbuf_free(phba, mp1->virt, mp1->phys); kfree(mp1); @@ -1280,15 +1411,39 @@ lpfc_hba_init(struct lpfc_hba *phba, uint32_t *hbainit) kfree(HashWorking); } -static void +void lpfc_cleanup(struct lpfc_vport *vport) { + struct lpfc_hba *phba = vport->phba; struct lpfc_nodelist *ndlp, *next_ndlp; + int i = 0; - /* clean up phba - lpfc specific */ - lpfc_can_disctmo(vport); - list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) - lpfc_nlp_put(ndlp); + if (phba->link_state > LPFC_LINK_DOWN) + lpfc_port_link_failure(vport); + + list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) { + if (ndlp->nlp_type & NLP_FABRIC) + lpfc_disc_state_machine(vport, ndlp, NULL, + NLP_EVT_DEVICE_RECOVERY); + lpfc_disc_state_machine(vport, ndlp, NULL, + NLP_EVT_DEVICE_RM); + } + + /* At this point, ALL ndlp's should be gone + * because of the previous NLP_EVT_DEVICE_RM. + * Lets wait for this to happen, if needed. + */ + while (!list_empty(&vport->fc_nodes)) { + + if (i++ > 3000) { + lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY, + "0233 Nodelist not empty\n"); + break; + } + + /* Wait for any activity on ndlps to settle */ + msleep(10); + } return; } @@ -1307,14 +1462,14 @@ lpfc_establish_link_tmo(unsigned long ptr) phba->pport->fc_flag, phba->pport->port_state); vports = lpfc_create_vport_work_array(phba); if (vports != NULL) - for(i = 0; i < LPFC_MAX_VPORTS && vports[i] != NULL; i++) { + for(i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) { struct Scsi_Host *shost; shost = lpfc_shost_from_vport(vports[i]); spin_lock_irqsave(shost->host_lock, iflag); vports[i]->fc_flag &= ~FC_ESTABLISH_LINK; spin_unlock_irqrestore(shost->host_lock, iflag); } - lpfc_destroy_vport_work_array(vports); + lpfc_destroy_vport_work_array(phba, vports); } void @@ -1339,6 +1494,16 @@ lpfc_stop_phba_timers(struct lpfc_hba *phba) return; } +static void +lpfc_block_mgmt_io(struct lpfc_hba * phba) +{ + unsigned long iflag; + + spin_lock_irqsave(&phba->hbalock, iflag); + phba->sli.sli_flag |= LPFC_BLOCK_MGMT_IO; + spin_unlock_irqrestore(&phba->hbalock, iflag); +} + int lpfc_online(struct lpfc_hba *phba) { @@ -1369,7 +1534,7 @@ lpfc_online(struct lpfc_hba *phba) vports = lpfc_create_vport_work_array(phba); if (vports != NULL) - for(i = 0; i < LPFC_MAX_VPORTS && vports[i] != NULL; i++) { + for(i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) { struct Scsi_Host *shost; shost = lpfc_shost_from_vport(vports[i]); spin_lock_irq(shost->host_lock); @@ -1378,23 +1543,13 @@ lpfc_online(struct lpfc_hba *phba) vports[i]->fc_flag |= FC_VPORT_NEEDS_REG_VPI; spin_unlock_irq(shost->host_lock); } - lpfc_destroy_vport_work_array(vports); + lpfc_destroy_vport_work_array(phba, vports); lpfc_unblock_mgmt_io(phba); return 0; } void -lpfc_block_mgmt_io(struct lpfc_hba * phba) -{ - unsigned long iflag; - - spin_lock_irqsave(&phba->hbalock, iflag); - phba->sli.sli_flag |= LPFC_BLOCK_MGMT_IO; - spin_unlock_irqrestore(&phba->hbalock, iflag); -} - -void lpfc_unblock_mgmt_io(struct lpfc_hba * phba) { unsigned long iflag; @@ -1409,6 +1564,8 @@ lpfc_offline_prep(struct lpfc_hba * phba) { struct lpfc_vport *vport = phba->pport; struct lpfc_nodelist *ndlp, *next_ndlp; + struct lpfc_vport **vports; + int i; if (vport->fc_flag & FC_OFFLINE_MODE) return; @@ -1417,10 +1574,34 @@ lpfc_offline_prep(struct lpfc_hba * phba) lpfc_linkdown(phba); - /* Issue an unreg_login to all nodes */ - list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) - if (ndlp->nlp_state != NLP_STE_UNUSED_NODE) - lpfc_unreg_rpi(vport, ndlp); + /* Issue an unreg_login to all nodes on all vports */ + vports = lpfc_create_vport_work_array(phba); + if (vports != NULL) { + for(i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) { + struct Scsi_Host *shost; + + if (vports[i]->load_flag & FC_UNLOADING) + continue; + shost = lpfc_shost_from_vport(vports[i]); + list_for_each_entry_safe(ndlp, next_ndlp, + &vports[i]->fc_nodes, + nlp_listp) { + if (ndlp->nlp_state == NLP_STE_UNUSED_NODE) + continue; + if (ndlp->nlp_type & NLP_FABRIC) { + lpfc_disc_state_machine(vports[i], ndlp, + NULL, NLP_EVT_DEVICE_RECOVERY); + lpfc_disc_state_machine(vports[i], ndlp, + NULL, NLP_EVT_DEVICE_RM); + } + spin_lock_irq(shost->host_lock); + ndlp->nlp_flag &= ~NLP_NPR_ADISC; + spin_unlock_irq(shost->host_lock); + lpfc_unreg_rpi(vports[i], ndlp); + } + } + } + lpfc_destroy_vport_work_array(phba, vports); lpfc_sli_flush_mbox_queue(phba); } @@ -1439,9 +1620,9 @@ lpfc_offline(struct lpfc_hba *phba) lpfc_stop_phba_timers(phba); vports = lpfc_create_vport_work_array(phba); if (vports != NULL) - for(i = 0; i < LPFC_MAX_VPORTS && vports[i] != NULL; i++) + for(i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) lpfc_stop_vport_timers(vports[i]); - lpfc_destroy_vport_work_array(vports); + lpfc_destroy_vport_work_array(phba, vports); lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, "0460 Bring Adapter offline\n"); /* Bring down the SLI Layer and cleanup. The HBA is offline @@ -1452,15 +1633,14 @@ lpfc_offline(struct lpfc_hba *phba) spin_unlock_irq(&phba->hbalock); vports = lpfc_create_vport_work_array(phba); if (vports != NULL) - for(i = 0; i < LPFC_MAX_VPORTS && vports[i] != NULL; i++) { + for(i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) { shost = lpfc_shost_from_vport(vports[i]); - lpfc_cleanup(vports[i]); spin_lock_irq(shost->host_lock); vports[i]->work_port_events = 0; vports[i]->fc_flag |= FC_OFFLINE_MODE; spin_unlock_irq(shost->host_lock); } - lpfc_destroy_vport_work_array(vports); + lpfc_destroy_vport_work_array(phba, vports); } /****************************************************************************** @@ -1674,6 +1854,8 @@ void lpfc_host_attrib_init(struct Scsi_Host *shost) fc_host_supported_speeds(shost) = 0; if (phba->lmt & LMT_10Gb) fc_host_supported_speeds(shost) |= FC_PORTSPEED_10GBIT; + if (phba->lmt & LMT_8Gb) + fc_host_supported_speeds(shost) |= FC_PORTSPEED_8GBIT; if (phba->lmt & LMT_4Gb) fc_host_supported_speeds(shost) |= FC_PORTSPEED_4GBIT; if (phba->lmt & LMT_2Gb) @@ -1707,13 +1889,14 @@ lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid) struct Scsi_Host *shost = NULL; void *ptr; unsigned long bar0map_len, bar2map_len; - int error = -ENODEV; + int error = -ENODEV, retval; int i, hbq_count; uint16_t iotag; + int bars = pci_select_bars(pdev, IORESOURCE_MEM); - if (pci_enable_device(pdev)) + if (pci_enable_device_bars(pdev, bars)) goto out; - if (pci_request_regions(pdev, LPFC_DRIVER_NAME)) + if (pci_request_selected_regions(pdev, bars, LPFC_DRIVER_NAME)) goto out_disable_device; phba = kzalloc(sizeof (struct lpfc_hba), GFP_KERNEL); @@ -1823,9 +2006,11 @@ lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid) lpfc_sli_setup(phba); lpfc_sli_queue_setup(phba); - error = lpfc_mem_alloc(phba); - if (error) + retval = lpfc_mem_alloc(phba); + if (retval) { + error = retval; goto out_free_hbqslimp; + } /* Initialize and populate the iocb list per host. */ INIT_LIST_HEAD(&phba->lpfc_iocb_list); @@ -1880,6 +2065,9 @@ lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid) /* Initialize list of fabric iocbs */ INIT_LIST_HEAD(&phba->fabric_iocb_list); + /* Initialize list to save ELS buffers */ + INIT_LIST_HEAD(&phba->elsbuf); + vport = lpfc_create_port(phba, phba->brd_no, &phba->pcidev->dev); if (!vport) goto out_kthread_stop; @@ -1891,8 +2079,8 @@ lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid) pci_set_drvdata(pdev, shost); if (phba->cfg_use_msi) { - error = pci_enable_msi(phba->pcidev); - if (!error) + retval = pci_enable_msi(phba->pcidev); + if (!retval) phba->using_msi = 1; else lpfc_printf_log(phba, KERN_INFO, LOG_INIT, @@ -1900,11 +2088,12 @@ lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid) "with IRQ\n"); } - error = request_irq(phba->pcidev->irq, lpfc_intr_handler, IRQF_SHARED, + retval = request_irq(phba->pcidev->irq, lpfc_intr_handler, IRQF_SHARED, LPFC_DRIVER_NAME, phba); - if (error) { + if (retval) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "0451 Enable interrupt handler failed\n"); + error = retval; goto out_disable_msi; } @@ -1914,11 +2103,15 @@ lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid) phba->HSregaddr = phba->ctrl_regs_memmap_p + HS_REG_OFFSET; phba->HCregaddr = phba->ctrl_regs_memmap_p + HC_REG_OFFSET; - if (lpfc_alloc_sysfs_attr(vport)) + if (lpfc_alloc_sysfs_attr(vport)) { + error = -ENOMEM; goto out_free_irq; + } - if (lpfc_sli_hba_setup(phba)) + if (lpfc_sli_hba_setup(phba)) { + error = -ENODEV; goto out_remove_device; + } /* * hba setup may have changed the hba_queue_depth so we need to adjust @@ -1975,7 +2168,7 @@ out_idr_remove: out_free_phba: kfree(phba); out_release_regions: - pci_release_regions(pdev); + pci_release_selected_regions(pdev, bars); out_disable_device: pci_disable_device(pdev); out: @@ -1991,6 +2184,8 @@ lpfc_pci_remove_one(struct pci_dev *pdev) struct Scsi_Host *shost = pci_get_drvdata(pdev); struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; struct lpfc_hba *phba = vport->phba; + int bars = pci_select_bars(pdev, IORESOURCE_MEM); + spin_lock_irq(&phba->hbalock); vport->load_flag |= FC_UNLOADING; spin_unlock_irq(&phba->hbalock); @@ -1998,8 +2193,12 @@ lpfc_pci_remove_one(struct pci_dev *pdev) kfree(vport->vname); lpfc_free_sysfs_attr(vport); + kthread_stop(phba->worker_thread); + fc_remove_host(shost); scsi_remove_host(shost); + lpfc_cleanup(vport); + /* * Bring down the SLI Layer. This step disable all interrupts, * clears the rings, discards all mailbox commands, and resets @@ -2014,9 +2213,6 @@ lpfc_pci_remove_one(struct pci_dev *pdev) spin_unlock_irq(&phba->hbalock); lpfc_debugfs_terminate(vport); - lpfc_cleanup(vport); - - kthread_stop(phba->worker_thread); /* Release the irq reservation */ free_irq(phba->pcidev->irq, phba); @@ -2048,7 +2244,7 @@ lpfc_pci_remove_one(struct pci_dev *pdev) kfree(phba); - pci_release_regions(pdev); + pci_release_selected_regions(pdev, bars); pci_disable_device(pdev); } @@ -2239,12 +2435,22 @@ lpfc_init(void) printk(LPFC_MODULE_DESC "\n"); printk(LPFC_COPYRIGHT "\n"); + if (lpfc_enable_npiv) { + lpfc_transport_functions.vport_create = lpfc_vport_create; + lpfc_transport_functions.vport_delete = lpfc_vport_delete; + } lpfc_transport_template = fc_attach_transport(&lpfc_transport_functions); - lpfc_vport_transport_template = - fc_attach_transport(&lpfc_vport_transport_functions); - if (!lpfc_transport_template || !lpfc_vport_transport_template) + if (lpfc_transport_template == NULL) return -ENOMEM; + if (lpfc_enable_npiv) { + lpfc_vport_transport_template = + fc_attach_transport(&lpfc_vport_transport_functions); + if (lpfc_vport_transport_template == NULL) { + fc_release_transport(lpfc_transport_template); + return -ENOMEM; + } + } error = pci_register_driver(&lpfc_driver); if (error) { fc_release_transport(lpfc_transport_template); @@ -2259,7 +2465,8 @@ lpfc_exit(void) { pci_unregister_driver(&lpfc_driver); fc_release_transport(lpfc_transport_template); - fc_release_transport(lpfc_vport_transport_template); + if (lpfc_enable_npiv) + fc_release_transport(lpfc_vport_transport_template); } module_init(lpfc_init); diff --git a/drivers/scsi/lpfc/lpfc_logmsg.h b/drivers/scsi/lpfc/lpfc_logmsg.h index 626e4d87872..c5841d7565f 100644 --- a/drivers/scsi/lpfc/lpfc_logmsg.h +++ b/drivers/scsi/lpfc/lpfc_logmsg.h @@ -26,6 +26,7 @@ #define LOG_IP 0x20 /* IP traffic history */ #define LOG_FCP 0x40 /* FCP traffic history */ #define LOG_NODE 0x80 /* Node table events */ +#define LOG_TEMP 0x100 /* Temperature sensor events */ #define LOG_MISC 0x400 /* Miscellaneous events */ #define LOG_SLI 0x800 /* SLI events */ #define LOG_FCP_ERROR 0x1000 /* log errors, not underruns */ diff --git a/drivers/scsi/lpfc/lpfc_mbox.c b/drivers/scsi/lpfc/lpfc_mbox.c index a592733664e..dfc63f6ccd7 100644 --- a/drivers/scsi/lpfc/lpfc_mbox.c +++ b/drivers/scsi/lpfc/lpfc_mbox.c @@ -82,6 +82,24 @@ lpfc_read_nv(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb) } /**********************************************/ +/* lpfc_config_async Issue a */ +/* MBX_ASYNC_EVT_ENABLE mailbox command */ +/**********************************************/ +void +lpfc_config_async(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb, + uint32_t ring) +{ + MAILBOX_t *mb; + + mb = &pmb->mb; + memset(pmb, 0, sizeof (LPFC_MBOXQ_t)); + mb->mbxCommand = MBX_ASYNCEVT_ENABLE; + mb->un.varCfgAsyncEvent.ring = ring; + mb->mbxOwner = OWN_HOST; + return; +} + +/**********************************************/ /* lpfc_heart_beat Issue a HEART_BEAT */ /* mailbox command */ /**********************************************/ @@ -270,8 +288,10 @@ lpfc_read_sparam(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb, int vpi) /* Get a buffer to hold the HBAs Service Parameters */ - if (((mp = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL)) == 0) || - ((mp->virt = lpfc_mbuf_alloc(phba, 0, &(mp->phys))) == 0)) { + mp = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL); + if (mp) + mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys); + if (!mp || !mp->virt) { kfree(mp); mb->mbxCommand = MBX_READ_SPARM64; /* READ_SPARAM: no buffers */ @@ -369,8 +389,10 @@ lpfc_reg_login(struct lpfc_hba *phba, uint16_t vpi, uint32_t did, mb->mbxOwner = OWN_HOST; /* Get a buffer to hold NPorts Service Parameters */ - if (((mp = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL)) == NULL) || - ((mp->virt = lpfc_mbuf_alloc(phba, 0, &(mp->phys))) == 0)) { + mp = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL); + if (mp) + mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys); + if (!mp || !mp->virt) { kfree(mp); mb->mbxCommand = MBX_REG_LOGIN64; /* REG_LOGIN: no buffers */ @@ -874,7 +896,7 @@ lpfc_mbox_tmo_val(struct lpfc_hba *phba, int cmd) case MBX_DOWN_LOAD: /* 0x1C */ case MBX_DEL_LD_ENTRY: /* 0x1D */ case MBX_LOAD_AREA: /* 0x81 */ - case MBX_FLASH_WR_ULA: /* 0x98 */ + case MBX_WRITE_WWN: /* 0x98 */ case MBX_LOAD_EXP_ROM: /* 0x9C */ return LPFC_MBOX_TMO_FLASH_CMD; } diff --git a/drivers/scsi/lpfc/lpfc_mem.c b/drivers/scsi/lpfc/lpfc_mem.c index 43c3b8a0d76..6dc5ab8d671 100644 --- a/drivers/scsi/lpfc/lpfc_mem.c +++ b/drivers/scsi/lpfc/lpfc_mem.c @@ -98,6 +98,7 @@ lpfc_mem_alloc(struct lpfc_hba * phba) fail_free_hbq_pool: lpfc_sli_hbqbuf_free_all(phba); + pci_pool_destroy(phba->lpfc_hbq_pool); fail_free_nlp_mem_pool: mempool_destroy(phba->nlp_mem_pool); phba->nlp_mem_pool = NULL; diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c index 880af0cd463..4a0e3406e37 100644 --- a/drivers/scsi/lpfc/lpfc_nportdisc.c +++ b/drivers/scsi/lpfc/lpfc_nportdisc.c @@ -287,6 +287,24 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, pcmd = (struct lpfc_dmabuf *) cmdiocb->context2; lp = (uint32_t *) pcmd->virt; sp = (struct serv_parm *) ((uint8_t *) lp + sizeof (uint32_t)); + if (wwn_to_u64(sp->portName.u.wwn) == 0) { + lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, + "0140 PLOGI Reject: invalid nname\n"); + stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; + stat.un.b.lsRjtRsnCodeExp = LSEXP_INVALID_PNAME; + lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, + NULL); + return 0; + } + if (wwn_to_u64(sp->nodeName.u.wwn) == 0) { + lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, + "0141 PLOGI Reject: invalid pname\n"); + stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; + stat.un.b.lsRjtRsnCodeExp = LSEXP_INVALID_NNAME; + lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, + NULL); + return 0; + } if ((lpfc_check_sparm(vport, ndlp, sp, CLASS3) == 0)) { /* Reject this request because invalid parameters */ stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; @@ -343,8 +361,7 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, lpfc_config_link(phba, mbox); mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; mbox->vport = vport; - rc = lpfc_sli_issue_mbox - (phba, mbox, (MBX_NOWAIT | MBX_STOP_IOCB)); + rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); if (rc == MBX_NOT_FINISHED) { mempool_free(mbox, phba->mbox_mem_pool); goto out; @@ -407,6 +424,61 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, ndlp, mbox); return 1; } + + /* If the remote NPort logs into us, before we can initiate + * discovery to them, cleanup the NPort from discovery accordingly. + */ + if (ndlp->nlp_state == NLP_STE_NPR_NODE) { + spin_lock_irq(shost->host_lock); + ndlp->nlp_flag &= ~NLP_DELAY_TMO; + spin_unlock_irq(shost->host_lock); + del_timer_sync(&ndlp->nlp_delayfunc); + ndlp->nlp_last_elscmd = 0; + + if (!list_empty(&ndlp->els_retry_evt.evt_listp)) + list_del_init(&ndlp->els_retry_evt.evt_listp); + + if (ndlp->nlp_flag & NLP_NPR_2B_DISC) { + spin_lock_irq(shost->host_lock); + ndlp->nlp_flag &= ~NLP_NPR_2B_DISC; + spin_unlock_irq(shost->host_lock); + + if ((ndlp->nlp_flag & NLP_ADISC_SND) && + (vport->num_disc_nodes)) { + /* Check to see if there are more + * ADISCs to be sent + */ + lpfc_more_adisc(vport); + + if ((vport->num_disc_nodes == 0) && + (vport->fc_npr_cnt)) + lpfc_els_disc_plogi(vport); + + if (vport->num_disc_nodes == 0) { + spin_lock_irq(shost->host_lock); + vport->fc_flag &= ~FC_NDISC_ACTIVE; + spin_unlock_irq(shost->host_lock); + lpfc_can_disctmo(vport); + lpfc_end_rscn(vport); + } + } + else if (vport->num_disc_nodes) { + /* Check to see if there are more + * PLOGIs to be sent + */ + lpfc_more_plogi(vport); + + if (vport->num_disc_nodes == 0) { + spin_lock_irq(shost->host_lock); + vport->fc_flag &= ~FC_NDISC_ACTIVE; + spin_unlock_irq(shost->host_lock); + lpfc_can_disctmo(vport); + lpfc_end_rscn(vport); + } + } + } + } + lpfc_els_rsp_acc(vport, ELS_CMD_PLOGI, cmdiocb, ndlp, mbox); return 1; @@ -501,12 +573,9 @@ lpfc_rcv_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, spin_unlock_irq(shost->host_lock); ndlp->nlp_last_elscmd = ELS_CMD_PLOGI; - ndlp->nlp_prev_state = ndlp->nlp_state; - lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); - } else { - ndlp->nlp_prev_state = ndlp->nlp_state; - lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNUSED_NODE); } + ndlp->nlp_prev_state = ndlp->nlp_state; + lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); spin_lock_irq(shost->host_lock); ndlp->nlp_flag &= ~NLP_NPR_ADISC; @@ -594,6 +663,25 @@ lpfc_disc_illegal(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, return ndlp->nlp_state; } +static uint32_t +lpfc_cmpl_plogi_illegal(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, + void *arg, uint32_t evt) +{ + /* This transition is only legal if we previously + * rcv'ed a PLOGI. Since we don't want 2 discovery threads + * working on the same NPortID, do nothing for this thread + * to stop it. + */ + if (!(ndlp->nlp_flag & NLP_RCV_PLOGI)) { + lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY, + "0253 Illegal State Transition: node x%x " + "event x%x, state x%x Data: x%x x%x\n", + ndlp->nlp_DID, evt, ndlp->nlp_state, ndlp->nlp_rpi, + ndlp->nlp_flag); + } + return ndlp->nlp_state; +} + /* Start of Discovery State Machine routines */ static uint32_t @@ -605,11 +693,8 @@ lpfc_rcv_plogi_unused_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, cmdiocb = (struct lpfc_iocbq *) arg; if (lpfc_rcv_plogi(vport, ndlp, cmdiocb)) { - ndlp->nlp_prev_state = NLP_STE_UNUSED_NODE; - lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNUSED_NODE); return ndlp->nlp_state; } - lpfc_drop_node(vport, ndlp); return NLP_STE_FREED_NODE; } @@ -618,7 +703,6 @@ lpfc_rcv_els_unused_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, void *arg, uint32_t evt) { lpfc_issue_els_logo(vport, ndlp, 0); - lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNUSED_NODE); return ndlp->nlp_state; } @@ -633,7 +717,6 @@ lpfc_rcv_logo_unused_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, ndlp->nlp_flag |= NLP_LOGO_ACC; spin_unlock_irq(shost->host_lock); lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL); - lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNUSED_NODE); return ndlp->nlp_state; } @@ -642,7 +725,6 @@ static uint32_t lpfc_cmpl_logo_unused_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, void *arg, uint32_t evt) { - lpfc_drop_node(vport, ndlp); return NLP_STE_FREED_NODE; } @@ -650,7 +732,6 @@ static uint32_t lpfc_device_rm_unused_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, void *arg, uint32_t evt) { - lpfc_drop_node(vport, ndlp); return NLP_STE_FREED_NODE; } @@ -752,6 +833,7 @@ lpfc_cmpl_plogi_plogi_issue(struct lpfc_vport *vport, uint32_t evt) { struct lpfc_hba *phba = vport->phba; + struct Scsi_Host *shost = lpfc_shost_from_vport(vport); struct lpfc_iocbq *cmdiocb, *rspiocb; struct lpfc_dmabuf *pcmd, *prsp, *mp; uint32_t *lp; @@ -778,6 +860,12 @@ lpfc_cmpl_plogi_plogi_issue(struct lpfc_vport *vport, lp = (uint32_t *) prsp->virt; sp = (struct serv_parm *) ((uint8_t *) lp + sizeof (uint32_t)); + if (wwn_to_u64(sp->portName.u.wwn) == 0 || + wwn_to_u64(sp->nodeName.u.wwn) == 0) { + lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, + "0142 PLOGI RSP: Invalid WWN.\n"); + goto out; + } if (!lpfc_check_sparm(vport, ndlp, sp, CLASS3)) goto out; /* PLOGI chkparm OK */ @@ -828,13 +916,15 @@ lpfc_cmpl_plogi_plogi_issue(struct lpfc_vport *vport, } mbox->context2 = lpfc_nlp_get(ndlp); mbox->vport = vport; - if (lpfc_sli_issue_mbox(phba, mbox, - (MBX_NOWAIT | MBX_STOP_IOCB)) + if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT) != MBX_NOT_FINISHED) { lpfc_nlp_set_state(vport, ndlp, NLP_STE_REG_LOGIN_ISSUE); return ndlp->nlp_state; } + /* decrement node reference count to the failed mbox + * command + */ lpfc_nlp_put(ndlp); mp = (struct lpfc_dmabuf *) mbox->context1; lpfc_mbuf_free(phba, mp->virt, mp->phys); @@ -864,13 +954,27 @@ out: "0261 Cannot Register NameServer login\n"); } - /* Free this node since the driver cannot login or has the wrong - sparm */ - lpfc_drop_node(vport, ndlp); + spin_lock_irq(shost->host_lock); + ndlp->nlp_flag |= NLP_DEFER_RM; + spin_unlock_irq(shost->host_lock); return NLP_STE_FREED_NODE; } static uint32_t +lpfc_cmpl_logo_plogi_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, + void *arg, uint32_t evt) +{ + return ndlp->nlp_state; +} + +static uint32_t +lpfc_cmpl_reglogin_plogi_issue(struct lpfc_vport *vport, + struct lpfc_nodelist *ndlp, void *arg, uint32_t evt) +{ + return ndlp->nlp_state; +} + +static uint32_t lpfc_device_rm_plogi_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, void *arg, uint32_t evt) { @@ -1137,7 +1241,7 @@ lpfc_rcv_logo_reglogin_issue(struct lpfc_vport *vport, (ndlp == (struct lpfc_nodelist *) mb->context2)) { mp = (struct lpfc_dmabuf *) (mb->context1); if (mp) { - lpfc_mbuf_free(phba, mp->virt, mp->phys); + __lpfc_mbuf_free(phba, mp->virt, mp->phys); kfree(mp); } lpfc_nlp_put(ndlp); @@ -1197,8 +1301,8 @@ lpfc_cmpl_reglogin_reglogin_issue(struct lpfc_vport *vport, * retry discovery. */ if (mb->mbxStatus == MBXERR_RPI_FULL) { - ndlp->nlp_prev_state = NLP_STE_UNUSED_NODE; - lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNUSED_NODE); + ndlp->nlp_prev_state = NLP_STE_REG_LOGIN_ISSUE; + lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); return ndlp->nlp_state; } @@ -1378,7 +1482,7 @@ out: lpfc_issue_els_logo(vport, ndlp, 0); ndlp->nlp_prev_state = NLP_STE_PRLI_ISSUE; - lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNUSED_NODE); + lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); return ndlp->nlp_state; } @@ -1753,7 +1857,7 @@ lpfc_cmpl_plogi_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, irsp = &rspiocb->iocb; if (irsp->ulpStatus) { - lpfc_drop_node(vport, ndlp); + ndlp->nlp_flag |= NLP_DEFER_RM; return NLP_STE_FREED_NODE; } return ndlp->nlp_state; @@ -1942,9 +2046,9 @@ static uint32_t (*lpfc_disc_action[NLP_STE_MAX_STATE * NLP_EVT_MAX_EVENT]) lpfc_rcv_els_plogi_issue, /* RCV_PRLO */ lpfc_cmpl_plogi_plogi_issue, /* CMPL_PLOGI */ lpfc_disc_illegal, /* CMPL_PRLI */ - lpfc_disc_illegal, /* CMPL_LOGO */ + lpfc_cmpl_logo_plogi_issue, /* CMPL_LOGO */ lpfc_disc_illegal, /* CMPL_ADISC */ - lpfc_disc_illegal, /* CMPL_REG_LOGIN */ + lpfc_cmpl_reglogin_plogi_issue,/* CMPL_REG_LOGIN */ lpfc_device_rm_plogi_issue, /* DEVICE_RM */ lpfc_device_recov_plogi_issue, /* DEVICE_RECOVERY */ @@ -1968,7 +2072,7 @@ static uint32_t (*lpfc_disc_action[NLP_STE_MAX_STATE * NLP_EVT_MAX_EVENT]) lpfc_rcv_padisc_reglogin_issue, /* RCV_ADISC */ lpfc_rcv_padisc_reglogin_issue, /* RCV_PDISC */ lpfc_rcv_prlo_reglogin_issue, /* RCV_PRLO */ - lpfc_disc_illegal, /* CMPL_PLOGI */ + lpfc_cmpl_plogi_illegal, /* CMPL_PLOGI */ lpfc_disc_illegal, /* CMPL_PRLI */ lpfc_disc_illegal, /* CMPL_LOGO */ lpfc_disc_illegal, /* CMPL_ADISC */ @@ -1982,7 +2086,7 @@ static uint32_t (*lpfc_disc_action[NLP_STE_MAX_STATE * NLP_EVT_MAX_EVENT]) lpfc_rcv_padisc_prli_issue, /* RCV_ADISC */ lpfc_rcv_padisc_prli_issue, /* RCV_PDISC */ lpfc_rcv_prlo_prli_issue, /* RCV_PRLO */ - lpfc_disc_illegal, /* CMPL_PLOGI */ + lpfc_cmpl_plogi_illegal, /* CMPL_PLOGI */ lpfc_cmpl_prli_prli_issue, /* CMPL_PRLI */ lpfc_disc_illegal, /* CMPL_LOGO */ lpfc_disc_illegal, /* CMPL_ADISC */ diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c index 4e46045dea6..fc5c3a42b05 100644 --- a/drivers/scsi/lpfc/lpfc_scsi.c +++ b/drivers/scsi/lpfc/lpfc_scsi.c @@ -130,7 +130,7 @@ lpfc_ramp_down_queue_handler(struct lpfc_hba *phba) vports = lpfc_create_vport_work_array(phba); if (vports != NULL) - for(i = 0; i < LPFC_MAX_VPORTS && vports[i] != NULL; i++) { + for(i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) { shost = lpfc_shost_from_vport(vports[i]); shost_for_each_device(sdev, shost) { new_queue_depth = @@ -151,7 +151,7 @@ lpfc_ramp_down_queue_handler(struct lpfc_hba *phba) new_queue_depth); } } - lpfc_destroy_vport_work_array(vports); + lpfc_destroy_vport_work_array(phba, vports); atomic_set(&phba->num_rsrc_err, 0); atomic_set(&phba->num_cmd_success, 0); } @@ -166,7 +166,7 @@ lpfc_ramp_up_queue_handler(struct lpfc_hba *phba) vports = lpfc_create_vport_work_array(phba); if (vports != NULL) - for(i = 0; i < LPFC_MAX_VPORTS && vports[i] != NULL; i++) { + for(i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) { shost = lpfc_shost_from_vport(vports[i]); shost_for_each_device(sdev, shost) { if (sdev->ordered_tags) @@ -179,7 +179,7 @@ lpfc_ramp_up_queue_handler(struct lpfc_hba *phba) sdev->queue_depth+1); } } - lpfc_destroy_vport_work_array(vports); + lpfc_destroy_vport_work_array(phba, vports); atomic_set(&phba->num_rsrc_err, 0); atomic_set(&phba->num_cmd_success, 0); } @@ -380,7 +380,7 @@ lpfc_scsi_prep_dma_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd) (num_bde * sizeof (struct ulp_bde64)); iocb_cmd->ulpBdeCount = 1; iocb_cmd->ulpLe = 1; - fcp_cmnd->fcpDl = be32_to_cpu(scsi_bufflen(scsi_cmnd)); + fcp_cmnd->fcpDl = cpu_to_be32(scsi_bufflen(scsi_cmnd)); return 0; } @@ -542,6 +542,7 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn, int result; struct scsi_device *sdev, *tmp_sdev; int depth = 0; + unsigned long flags; lpfc_cmd->result = pIocbOut->iocb.un.ulpWord[4]; lpfc_cmd->status = pIocbOut->iocb.ulpStatus; @@ -608,6 +609,15 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn, cmd->scsi_done(cmd); if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) { + /* + * If there is a thread waiting for command completion + * wake up the thread. + */ + spin_lock_irqsave(sdev->host->host_lock, flags); + lpfc_cmd->pCmd = NULL; + if (lpfc_cmd->waitq) + wake_up(lpfc_cmd->waitq); + spin_unlock_irqrestore(sdev->host->host_lock, flags); lpfc_release_scsi_buf(phba, lpfc_cmd); return; } @@ -669,6 +679,16 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn, } } + /* + * If there is a thread waiting for command completion + * wake up the thread. + */ + spin_lock_irqsave(sdev->host->host_lock, flags); + lpfc_cmd->pCmd = NULL; + if (lpfc_cmd->waitq) + wake_up(lpfc_cmd->waitq); + spin_unlock_irqrestore(sdev->host->host_lock, flags); + lpfc_release_scsi_buf(phba, lpfc_cmd); } @@ -743,6 +763,8 @@ lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd, piocbq->iocb.ulpContext = pnode->nlp_rpi; if (pnode->nlp_fcp_info & NLP_FCP_2_DEVICE) piocbq->iocb.ulpFCP2Rcvy = 1; + else + piocbq->iocb.ulpFCP2Rcvy = 0; piocbq->iocb.ulpClass = (pnode->nlp_fcp_info & 0x0f); piocbq->context1 = lpfc_cmd; @@ -1018,8 +1040,8 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd) struct lpfc_iocbq *abtsiocb; struct lpfc_scsi_buf *lpfc_cmd; IOCB_t *cmd, *icmd; - unsigned int loop_count = 0; int ret = SUCCESS; + DECLARE_WAIT_QUEUE_HEAD_ONSTACK(waitq); lpfc_block_error_handler(cmnd); lpfc_cmd = (struct lpfc_scsi_buf *)cmnd->host_scribble; @@ -1074,17 +1096,15 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd) if (phba->cfg_poll & DISABLE_FCP_RING_INT) lpfc_sli_poll_fcp_ring (phba); + lpfc_cmd->waitq = &waitq; /* Wait for abort to complete */ - while (lpfc_cmd->pCmd == cmnd) - { - if (phba->cfg_poll & DISABLE_FCP_RING_INT) - lpfc_sli_poll_fcp_ring (phba); + wait_event_timeout(waitq, + (lpfc_cmd->pCmd != cmnd), + (2*vport->cfg_devloss_tmo*HZ)); - schedule_timeout_uninterruptible(LPFC_ABORT_WAIT * HZ); - if (++loop_count - > (2 * vport->cfg_devloss_tmo)/LPFC_ABORT_WAIT) - break; - } + spin_lock_irq(shost->host_lock); + lpfc_cmd->waitq = NULL; + spin_unlock_irq(shost->host_lock); if (lpfc_cmd->pCmd == cmnd) { ret = FAILED; @@ -1438,8 +1458,7 @@ struct scsi_host_template lpfc_template = { .slave_destroy = lpfc_slave_destroy, .scan_finished = lpfc_scan_finished, .this_id = -1, - .sg_tablesize = LPFC_SG_SEG_CNT, - .use_sg_chaining = ENABLE_SG_CHAINING, + .sg_tablesize = LPFC_DEFAULT_SG_SEG_CNT, .cmd_per_lun = LPFC_CMD_PER_LUN, .use_clustering = ENABLE_CLUSTERING, .shost_attrs = lpfc_hba_attrs, @@ -1459,10 +1478,9 @@ struct scsi_host_template lpfc_vport_template = { .slave_destroy = lpfc_slave_destroy, .scan_finished = lpfc_scan_finished, .this_id = -1, - .sg_tablesize = LPFC_SG_SEG_CNT, + .sg_tablesize = LPFC_DEFAULT_SG_SEG_CNT, .cmd_per_lun = LPFC_CMD_PER_LUN, .use_clustering = ENABLE_CLUSTERING, - .use_sg_chaining = ENABLE_SG_CHAINING, .shost_attrs = lpfc_vport_attrs, .max_sectors = 0xFFFF, }; diff --git a/drivers/scsi/lpfc/lpfc_scsi.h b/drivers/scsi/lpfc/lpfc_scsi.h index 31787bb6d53..daba9237498 100644 --- a/drivers/scsi/lpfc/lpfc_scsi.h +++ b/drivers/scsi/lpfc/lpfc_scsi.h @@ -138,6 +138,7 @@ struct lpfc_scsi_buf { * Iotag is in here */ struct lpfc_iocbq cur_iocbq; + wait_queue_head_t *waitq; }; #define LPFC_SCSI_DMA_EXT_SIZE 264 diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c index ce348c5c706..fdd01e384e3 100644 --- a/drivers/scsi/lpfc/lpfc_sli.c +++ b/drivers/scsi/lpfc/lpfc_sli.c @@ -106,7 +106,7 @@ lpfc_sli_get_iocbq(struct lpfc_hba *phba) return iocbq; } -void +static void __lpfc_sli_release_iocbq(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq) { size_t start_clean = offsetof(struct lpfc_iocbq, iocb); @@ -199,6 +199,7 @@ lpfc_sli_iocb_cmd_type(uint8_t iocb_cmnd) case CMD_RCV_ELS_REQ_CX: case CMD_RCV_SEQUENCE64_CX: case CMD_RCV_ELS_REQ64_CX: + case CMD_ASYNC_STATUS: case CMD_IOCB_RCV_SEQ64_CX: case CMD_IOCB_RCV_ELS64_CX: case CMD_IOCB_RCV_CONT64_CX: @@ -473,8 +474,7 @@ lpfc_sli_resume_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring) if (pring->txq_cnt && lpfc_is_link_up(phba) && (pring->ringno != phba->sli.fcp_ring || - phba->sli.sli_flag & LPFC_PROCESS_LA) && - !(pring->flag & LPFC_STOP_IOCB_MBX)) { + phba->sli.sli_flag & LPFC_PROCESS_LA)) { while ((iocb = lpfc_sli_next_iocb_slot(phba, pring)) && (nextiocb = lpfc_sli_ringtx_get(phba, pring))) @@ -489,32 +489,7 @@ lpfc_sli_resume_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring) return; } -/* lpfc_sli_turn_on_ring is only called by lpfc_sli_handle_mb_event below */ -static void -lpfc_sli_turn_on_ring(struct lpfc_hba *phba, int ringno) -{ - struct lpfc_pgp *pgp = (phba->sli_rev == 3) ? - &phba->slim2p->mbx.us.s3_pgp.port[ringno] : - &phba->slim2p->mbx.us.s2.port[ringno]; - unsigned long iflags; - - /* If the ring is active, flag it */ - spin_lock_irqsave(&phba->hbalock, iflags); - if (phba->sli.ring[ringno].cmdringaddr) { - if (phba->sli.ring[ringno].flag & LPFC_STOP_IOCB_MBX) { - phba->sli.ring[ringno].flag &= ~LPFC_STOP_IOCB_MBX; - /* - * Force update of the local copy of cmdGetInx - */ - phba->sli.ring[ringno].local_getidx - = le32_to_cpu(pgp->cmdGetInx); - lpfc_sli_resume_iocb(phba, &phba->sli.ring[ringno]); - } - } - spin_unlock_irqrestore(&phba->hbalock, iflags); -} - -struct lpfc_hbq_entry * +static struct lpfc_hbq_entry * lpfc_sli_next_hbq_slot(struct lpfc_hba *phba, uint32_t hbqno) { struct hbq_s *hbqp = &phba->hbqs[hbqno]; @@ -565,6 +540,7 @@ lpfc_sli_hbqbuf_free_all(struct lpfc_hba *phba) list_del(&hbq_buf->dbuf.list); (phba->hbqs[i].hbq_free_buffer)(phba, hbq_buf); } + phba->hbqs[i].buffer_count = 0; } } @@ -633,8 +609,8 @@ lpfc_sli_hbqbuf_fill_hbqs(struct lpfc_hba *phba, uint32_t hbqno, uint32_t count) return 0; } - start = lpfc_hbq_defs[hbqno]->buffer_count; - end = count + lpfc_hbq_defs[hbqno]->buffer_count; + start = phba->hbqs[hbqno].buffer_count; + end = count + start; if (end > lpfc_hbq_defs[hbqno]->entry_count) { end = lpfc_hbq_defs[hbqno]->entry_count; } @@ -646,7 +622,7 @@ lpfc_sli_hbqbuf_fill_hbqs(struct lpfc_hba *phba, uint32_t hbqno, uint32_t count) return 1; hbq_buffer->tag = (i | (hbqno << 16)); if (lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buffer)) - lpfc_hbq_defs[hbqno]->buffer_count++; + phba->hbqs[hbqno].buffer_count++; else (phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer); } @@ -660,14 +636,14 @@ lpfc_sli_hbqbuf_add_hbqs(struct lpfc_hba *phba, uint32_t qno) lpfc_hbq_defs[qno]->add_count)); } -int +static int lpfc_sli_hbqbuf_init_hbqs(struct lpfc_hba *phba, uint32_t qno) { return(lpfc_sli_hbqbuf_fill_hbqs(phba, qno, lpfc_hbq_defs[qno]->init_count)); } -struct hbq_dmabuf * +static struct hbq_dmabuf * lpfc_sli_hbqbuf_find(struct lpfc_hba *phba, uint32_t tag) { struct lpfc_dmabuf *d_buf; @@ -686,7 +662,7 @@ lpfc_sli_hbqbuf_find(struct lpfc_hba *phba, uint32_t tag) } lpfc_printf_log(phba, KERN_ERR, LOG_SLI | LOG_VPORT, "1803 Bad hbq tag. Data: x%x x%x\n", - tag, lpfc_hbq_defs[tag >> 16]->buffer_count); + tag, phba->hbqs[tag >> 16].buffer_count); return NULL; } @@ -712,6 +688,7 @@ lpfc_sli_chk_mbx_command(uint8_t mbxCommand) case MBX_LOAD_SM: case MBX_READ_NV: case MBX_WRITE_NV: + case MBX_WRITE_VPARMS: case MBX_RUN_BIU_DIAG: case MBX_INIT_LINK: case MBX_DOWN_LINK: @@ -739,7 +716,7 @@ lpfc_sli_chk_mbx_command(uint8_t mbxCommand) case MBX_DEL_LD_ENTRY: case MBX_RUN_PROGRAM: case MBX_SET_MASK: - case MBX_SET_SLIM: + case MBX_SET_VARIABLE: case MBX_UNREG_D_ID: case MBX_KILL_BOARD: case MBX_CONFIG_FARP: @@ -751,9 +728,10 @@ lpfc_sli_chk_mbx_command(uint8_t mbxCommand) case MBX_READ_RPI64: case MBX_REG_LOGIN64: case MBX_READ_LA64: - case MBX_FLASH_WR_ULA: + case MBX_WRITE_WWN: case MBX_SET_DEBUG: case MBX_LOAD_EXP_ROM: + case MBX_ASYNCEVT_ENABLE: case MBX_REG_VPI: case MBX_UNREG_VPI: case MBX_HEARTBEAT: @@ -953,6 +931,17 @@ lpfc_sli_replace_hbqbuff(struct lpfc_hba *phba, uint32_t tag) return &new_hbq_entry->dbuf; } +static struct lpfc_dmabuf * +lpfc_sli_get_buff(struct lpfc_hba *phba, + struct lpfc_sli_ring *pring, + uint32_t tag) +{ + if (tag & QUE_BUFTAG_BIT) + return lpfc_sli_ring_taggedbuf_get(phba, pring, tag); + else + return lpfc_sli_replace_hbqbuff(phba, tag); +} + static int lpfc_sli_process_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, struct lpfc_iocbq *saveq) @@ -961,19 +950,112 @@ lpfc_sli_process_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, WORD5 * w5p; uint32_t Rctl, Type; uint32_t match, i; + struct lpfc_iocbq *iocbq; match = 0; irsp = &(saveq->iocb); - if ((irsp->ulpCommand == CMD_RCV_ELS_REQ64_CX) - || (irsp->ulpCommand == CMD_RCV_ELS_REQ_CX) - || (irsp->ulpCommand == CMD_IOCB_RCV_ELS64_CX) - || (irsp->ulpCommand == CMD_IOCB_RCV_CONT64_CX)) { + + if (irsp->ulpStatus == IOSTAT_NEED_BUFFER) + return 1; + if (irsp->ulpCommand == CMD_ASYNC_STATUS) { + if (pring->lpfc_sli_rcv_async_status) + pring->lpfc_sli_rcv_async_status(phba, pring, saveq); + else + lpfc_printf_log(phba, + KERN_WARNING, + LOG_SLI, + "0316 Ring %d handler: unexpected " + "ASYNC_STATUS iocb received evt_code " + "0x%x\n", + pring->ringno, + irsp->un.asyncstat.evt_code); + return 1; + } + + if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) { + if (irsp->ulpBdeCount != 0) { + saveq->context2 = lpfc_sli_get_buff(phba, pring, + irsp->un.ulpWord[3]); + if (!saveq->context2) + lpfc_printf_log(phba, + KERN_ERR, + LOG_SLI, + "0341 Ring %d Cannot find buffer for " + "an unsolicited iocb. tag 0x%x\n", + pring->ringno, + irsp->un.ulpWord[3]); + } + if (irsp->ulpBdeCount == 2) { + saveq->context3 = lpfc_sli_get_buff(phba, pring, + irsp->unsli3.sli3Words[7]); + if (!saveq->context3) + lpfc_printf_log(phba, + KERN_ERR, + LOG_SLI, + "0342 Ring %d Cannot find buffer for an" + " unsolicited iocb. tag 0x%x\n", + pring->ringno, + irsp->unsli3.sli3Words[7]); + } + list_for_each_entry(iocbq, &saveq->list, list) { + irsp = &(iocbq->iocb); + if (irsp->ulpBdeCount != 0) { + iocbq->context2 = lpfc_sli_get_buff(phba, pring, + irsp->un.ulpWord[3]); + if (!iocbq->context2) + lpfc_printf_log(phba, + KERN_ERR, + LOG_SLI, + "0343 Ring %d Cannot find " + "buffer for an unsolicited iocb" + ". tag 0x%x\n", pring->ringno, + irsp->un.ulpWord[3]); + } + if (irsp->ulpBdeCount == 2) { + iocbq->context3 = lpfc_sli_get_buff(phba, pring, + irsp->unsli3.sli3Words[7]); + if (!iocbq->context3) + lpfc_printf_log(phba, + KERN_ERR, + LOG_SLI, + "0344 Ring %d Cannot find " + "buffer for an unsolicited " + "iocb. tag 0x%x\n", + pring->ringno, + irsp->unsli3.sli3Words[7]); + } + } + } + if (irsp->ulpBdeCount != 0 && + (irsp->ulpCommand == CMD_IOCB_RCV_CONT64_CX || + irsp->ulpStatus == IOSTAT_INTERMED_RSP)) { + int found = 0; + + /* search continue save q for same XRI */ + list_for_each_entry(iocbq, &pring->iocb_continue_saveq, clist) { + if (iocbq->iocb.ulpContext == saveq->iocb.ulpContext) { + list_add_tail(&saveq->list, &iocbq->list); + found = 1; + break; + } + } + if (!found) + list_add_tail(&saveq->clist, + &pring->iocb_continue_saveq); + if (saveq->iocb.ulpStatus != IOSTAT_INTERMED_RSP) { + list_del_init(&iocbq->clist); + saveq = iocbq; + irsp = &(saveq->iocb); + } else + return 0; + } + if ((irsp->ulpCommand == CMD_RCV_ELS_REQ64_CX) || + (irsp->ulpCommand == CMD_RCV_ELS_REQ_CX) || + (irsp->ulpCommand == CMD_IOCB_RCV_ELS64_CX)) { Rctl = FC_ELS_REQ; Type = FC_ELS_DATA; } else { - w5p = - (WORD5 *) & (saveq->iocb.un. - ulpWord[5]); + w5p = (WORD5 *)&(saveq->iocb.un.ulpWord[5]); Rctl = w5p->hcsw.Rctl; Type = w5p->hcsw.Type; @@ -988,15 +1070,6 @@ lpfc_sli_process_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, } } - if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) { - if (irsp->ulpBdeCount != 0) - saveq->context2 = lpfc_sli_replace_hbqbuff(phba, - irsp->un.ulpWord[3]); - if (irsp->ulpBdeCount == 2) - saveq->context3 = lpfc_sli_replace_hbqbuff(phba, - irsp->unsli3.sli3Words[7]); - } - /* unSolicited Responses */ if (pring->prt[0].profile) { if (pring->prt[0].lpfc_sli_rcv_unsol_event) @@ -1006,12 +1079,9 @@ lpfc_sli_process_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, } else { /* We must search, based on rctl / type for the right routine */ - for (i = 0; i < pring->num_mask; - i++) { - if ((pring->prt[i].rctl == - Rctl) - && (pring->prt[i]. - type == Type)) { + for (i = 0; i < pring->num_mask; i++) { + if ((pring->prt[i].rctl == Rctl) + && (pring->prt[i].type == Type)) { if (pring->prt[i].lpfc_sli_rcv_unsol_event) (pring->prt[i].lpfc_sli_rcv_unsol_event) (phba, pring, saveq); @@ -1084,6 +1154,12 @@ lpfc_sli_process_sol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, IOSTAT_LOCAL_REJECT; saveq->iocb.un.ulpWord[4] = IOERR_SLI_ABORTED; + + /* Firmware could still be in progress + * of DMAing payload, so don't free data + * buffer till after a hbeat. + */ + saveq->iocb_flag |= LPFC_DELAY_MEM_FREE; } } (cmdiocbp->iocb_cmpl) (phba, cmdiocbp, saveq); @@ -1572,12 +1648,7 @@ lpfc_sli_handle_slow_ring_event(struct lpfc_hba *phba, writel(pring->rspidx, &phba->host_gp[pring->ringno].rspGetInx); - if (list_empty(&(pring->iocb_continueq))) { - list_add(&rspiocbp->list, &(pring->iocb_continueq)); - } else { - list_add_tail(&rspiocbp->list, - &(pring->iocb_continueq)); - } + list_add_tail(&rspiocbp->list, &(pring->iocb_continueq)); pring->iocb_continueq_cnt++; if (irsp->ulpLe) { @@ -1642,17 +1713,17 @@ lpfc_sli_handle_slow_ring_event(struct lpfc_hba *phba, iocb_cmd_type = irsp->ulpCommand & CMD_IOCB_MASK; type = lpfc_sli_iocb_cmd_type(iocb_cmd_type); if (type == LPFC_SOL_IOCB) { - spin_unlock_irqrestore(&phba->hbalock, - iflag); + spin_unlock_irqrestore(&phba->hbalock, iflag); rc = lpfc_sli_process_sol_iocb(phba, pring, saveq); spin_lock_irqsave(&phba->hbalock, iflag); } else if (type == LPFC_UNSOL_IOCB) { - spin_unlock_irqrestore(&phba->hbalock, - iflag); + spin_unlock_irqrestore(&phba->hbalock, iflag); rc = lpfc_sli_process_unsol_iocb(phba, pring, saveq); spin_lock_irqsave(&phba->hbalock, iflag); + if (!rc) + free_saveq = 0; } else if (type == LPFC_ABORT_IOCB) { if ((irsp->ulpCommand != CMD_XRI_ABORTED_CX) && ((cmdiocbp = @@ -1921,8 +1992,8 @@ lpfc_sli_brdkill(struct lpfc_hba *phba) "0329 Kill HBA Data: x%x x%x\n", phba->pport->port_state, psli->sli_flag); - if ((pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, - GFP_KERNEL)) == 0) + pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); + if (!pmb) return 1; /* Disable the error attention */ @@ -2113,7 +2184,10 @@ lpfc_sli_chipset_init(struct lpfc_hba *phba) <status> */ lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "0436 Adapter failed to init, " - "timeout, status reg x%x\n", status); + "timeout, status reg x%x, " + "FW Data: A8 x%x AC x%x\n", status, + readl(phba->MBslimaddr + 0xa8), + readl(phba->MBslimaddr + 0xac)); phba->link_state = LPFC_HBA_ERROR; return -ETIMEDOUT; } @@ -2125,7 +2199,10 @@ lpfc_sli_chipset_init(struct lpfc_hba *phba) <status> */ lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "0437 Adapter failed to init, " - "chipset, status reg x%x\n", status); + "chipset, status reg x%x, " + "FW Data: A8 x%x AC x%x\n", status, + readl(phba->MBslimaddr + 0xa8), + readl(phba->MBslimaddr + 0xac)); phba->link_state = LPFC_HBA_ERROR; return -EIO; } @@ -2153,7 +2230,10 @@ lpfc_sli_chipset_init(struct lpfc_hba *phba) /* Adapter failed to init, chipset, status reg <status> */ lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "0438 Adapter failed to init, chipset, " - "status reg x%x\n", status); + "status reg x%x, " + "FW Data: A8 x%x AC x%x\n", status, + readl(phba->MBslimaddr + 0xa8), + readl(phba->MBslimaddr + 0xac)); phba->link_state = LPFC_HBA_ERROR; return -EIO; } @@ -2485,11 +2565,16 @@ lpfc_mbox_timeout_handler(struct lpfc_hba *phba) lpfc_sli_abort_iocb_ring(phba, pring); lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, - "0316 Resetting board due to mailbox timeout\n"); + "0345 Resetting board due to mailbox timeout\n"); /* * lpfc_offline calls lpfc_sli_hba_down which will clean up * on oustanding mailbox commands. */ + /* If resets are disabled then set error state and return. */ + if (!phba->cfg_enable_hba_reset) { + phba->link_state = LPFC_HBA_ERROR; + return; + } lpfc_offline_prep(phba); lpfc_offline(phba); lpfc_sli_brdrestart(phba); @@ -2507,6 +2592,7 @@ lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag) uint32_t status, evtctr; uint32_t ha_copy; int i; + unsigned long timeout; unsigned long drvr_flag = 0; volatile uint32_t word0, ldata; void __iomem *to_slim; @@ -2519,7 +2605,7 @@ lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag) "1806 Mbox x%x failed. No vport\n", pmbox->mb.mbxCommand); dump_stack(); - return MBXERR_ERROR; + return MBX_NOT_FINISHED; } } @@ -2571,21 +2657,6 @@ lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag) return MBX_NOT_FINISHED; } - /* Handle STOP IOCB processing flag. This is only meaningful - * if we are not polling for mbox completion. - */ - if (flag & MBX_STOP_IOCB) { - flag &= ~MBX_STOP_IOCB; - /* Now flag each ring */ - for (i = 0; i < psli->num_rings; i++) { - /* If the ring is active, flag it */ - if (psli->ring[i].cmdringaddr) { - psli->ring[i].flag |= - LPFC_STOP_IOCB_MBX; - } - } - } - /* Another mailbox command is still being processed, queue this * command to be processed later. */ @@ -2620,23 +2691,6 @@ lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag) return MBX_BUSY; } - /* Handle STOP IOCB processing flag. This is only meaningful - * if we are not polling for mbox completion. - */ - if (flag & MBX_STOP_IOCB) { - flag &= ~MBX_STOP_IOCB; - if (flag == MBX_NOWAIT) { - /* Now flag each ring */ - for (i = 0; i < psli->num_rings; i++) { - /* If the ring is active, flag it */ - if (psli->ring[i].cmdringaddr) { - psli->ring[i].flag |= - LPFC_STOP_IOCB_MBX; - } - } - } - } - psli->sli_flag |= LPFC_SLI_MBOX_ACTIVE; /* If we are not polling, we MUST be in SLI2 mode */ @@ -2714,18 +2768,24 @@ lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag) } wmb(); - /* interrupt board to doit right away */ - writel(CA_MBATT, phba->CAregaddr); - readl(phba->CAregaddr); /* flush */ switch (flag) { case MBX_NOWAIT: - /* Don't wait for it to finish, just return */ + /* Set up reference to mailbox command */ psli->mbox_active = pmbox; + /* Interrupt board to do it */ + writel(CA_MBATT, phba->CAregaddr); + readl(phba->CAregaddr); /* flush */ + /* Don't wait for it to finish, just return */ break; case MBX_POLL: + /* Set up null reference to mailbox command */ psli->mbox_active = NULL; + /* Interrupt board to do it */ + writel(CA_MBATT, phba->CAregaddr); + readl(phba->CAregaddr); /* flush */ + if (psli->sli_flag & LPFC_SLI2_ACTIVE) { /* First read mbox status word */ word0 = *((volatile uint32_t *)&phba->slim2p->mbx); @@ -2737,15 +2797,15 @@ lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag) /* Read the HBA Host Attention Register */ ha_copy = readl(phba->HAregaddr); - - i = lpfc_mbox_tmo_val(phba, mb->mbxCommand); - i *= 1000; /* Convert to ms */ - + timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, + mb->mbxCommand) * + 1000) + jiffies; + i = 0; /* Wait for command to complete */ while (((word0 & OWN_CHIP) == OWN_CHIP) || (!(ha_copy & HA_MBATT) && (phba->link_state > LPFC_WARM_START))) { - if (i-- <= 0) { + if (time_after(jiffies, timeout)) { psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; spin_unlock_irqrestore(&phba->hbalock, drvr_flag); @@ -2758,12 +2818,12 @@ lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag) && (evtctr != psli->slistat.mbox_event)) break; - spin_unlock_irqrestore(&phba->hbalock, - drvr_flag); - - msleep(1); - - spin_lock_irqsave(&phba->hbalock, drvr_flag); + if (i++ > 10) { + spin_unlock_irqrestore(&phba->hbalock, + drvr_flag); + msleep(1); + spin_lock_irqsave(&phba->hbalock, drvr_flag); + } if (psli->sli_flag & LPFC_SLI2_ACTIVE) { /* First copy command data */ @@ -2848,7 +2908,7 @@ lpfc_sli_next_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, /* * Lockless version of lpfc_sli_issue_iocb. */ -int +static int __lpfc_sli_issue_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, struct lpfc_iocbq *piocb, uint32_t flag) { @@ -2879,9 +2939,9 @@ __lpfc_sli_issue_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, /* * Check to see if we are blocking IOCB processing because of a - * outstanding mbox command. + * outstanding event. */ - if (unlikely(pring->flag & LPFC_STOP_IOCB_MBX)) + if (unlikely(pring->flag & LPFC_STOP_IOCB_EVENT)) goto iocb_busy; if (unlikely(phba->link_state == LPFC_LINK_DOWN)) { @@ -2993,6 +3053,61 @@ lpfc_extra_ring_setup( struct lpfc_hba *phba) return 0; } +static void +lpfc_sli_async_event_handler(struct lpfc_hba * phba, + struct lpfc_sli_ring * pring, struct lpfc_iocbq * iocbq) +{ + IOCB_t *icmd; + uint16_t evt_code; + uint16_t temp; + struct temp_event temp_event_data; + struct Scsi_Host *shost; + + icmd = &iocbq->iocb; + evt_code = icmd->un.asyncstat.evt_code; + temp = icmd->ulpContext; + + if ((evt_code != ASYNC_TEMP_WARN) && + (evt_code != ASYNC_TEMP_SAFE)) { + lpfc_printf_log(phba, + KERN_ERR, + LOG_SLI, + "0346 Ring %d handler: unexpected ASYNC_STATUS" + " evt_code 0x%x\n", + pring->ringno, + icmd->un.asyncstat.evt_code); + return; + } + temp_event_data.data = (uint32_t)temp; + temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT; + if (evt_code == ASYNC_TEMP_WARN) { + temp_event_data.event_code = LPFC_THRESHOLD_TEMP; + lpfc_printf_log(phba, + KERN_ERR, + LOG_TEMP, + "0347 Adapter is very hot, please take " + "corrective action. temperature : %d Celsius\n", + temp); + } + if (evt_code == ASYNC_TEMP_SAFE) { + temp_event_data.event_code = LPFC_NORMAL_TEMP; + lpfc_printf_log(phba, + KERN_ERR, + LOG_TEMP, + "0340 Adapter temperature is OK now. " + "temperature : %d Celsius\n", + temp); + } + + /* Send temperature change event to applications */ + shost = lpfc_shost_from_vport(phba->pport); + fc_host_post_vendor_event(shost, fc_get_event_number(), + sizeof(temp_event_data), (char *) &temp_event_data, + SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX); + +} + + int lpfc_sli_setup(struct lpfc_hba *phba) { @@ -3059,6 +3174,8 @@ lpfc_sli_setup(struct lpfc_hba *phba) pring->fast_iotag = 0; pring->iotag_ctr = 0; pring->iotag_max = 4096; + pring->lpfc_sli_rcv_async_status = + lpfc_sli_async_event_handler; pring->num_mask = 4; pring->prt[0].profile = 0; /* Mask 0 */ pring->prt[0].rctl = FC_ELS_REQ; @@ -3123,6 +3240,7 @@ lpfc_sli_queue_setup(struct lpfc_hba *phba) INIT_LIST_HEAD(&pring->txq); INIT_LIST_HEAD(&pring->txcmplq); INIT_LIST_HEAD(&pring->iocb_continueq); + INIT_LIST_HEAD(&pring->iocb_continue_saveq); INIT_LIST_HEAD(&pring->postbufq); } spin_unlock_irq(&phba->hbalock); @@ -3193,6 +3311,7 @@ lpfc_sli_hba_down(struct lpfc_hba *phba) LIST_HEAD(completions); struct lpfc_sli *psli = &phba->sli; struct lpfc_sli_ring *pring; + struct lpfc_dmabuf *buf_ptr; LPFC_MBOXQ_t *pmb; struct lpfc_iocbq *iocb; IOCB_t *cmd = NULL; @@ -3232,6 +3351,19 @@ lpfc_sli_hba_down(struct lpfc_hba *phba) } } + spin_lock_irqsave(&phba->hbalock, flags); + list_splice_init(&phba->elsbuf, &completions); + phba->elsbuf_cnt = 0; + phba->elsbuf_prev_cnt = 0; + spin_unlock_irqrestore(&phba->hbalock, flags); + + while (!list_empty(&completions)) { + list_remove_head(&completions, buf_ptr, + struct lpfc_dmabuf, list); + lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys); + kfree(buf_ptr); + } + /* Return any active mbox cmds */ del_timer_sync(&psli->mbox_tmo); spin_lock_irqsave(&phba->hbalock, flags); @@ -3294,6 +3426,47 @@ lpfc_sli_ringpostbuf_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, return 0; } +uint32_t +lpfc_sli_get_buffer_tag(struct lpfc_hba *phba) +{ + spin_lock_irq(&phba->hbalock); + phba->buffer_tag_count++; + /* + * Always set the QUE_BUFTAG_BIT to distiguish between + * a tag assigned by HBQ. + */ + phba->buffer_tag_count |= QUE_BUFTAG_BIT; + spin_unlock_irq(&phba->hbalock); + return phba->buffer_tag_count; +} + +struct lpfc_dmabuf * +lpfc_sli_ring_taggedbuf_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, + uint32_t tag) +{ + struct lpfc_dmabuf *mp, *next_mp; + struct list_head *slp = &pring->postbufq; + + /* Search postbufq, from the begining, looking for a match on tag */ + spin_lock_irq(&phba->hbalock); + list_for_each_entry_safe(mp, next_mp, &pring->postbufq, list) { + if (mp->buffer_tag == tag) { + list_del_init(&mp->list); + pring->postbufq_cnt--; + spin_unlock_irq(&phba->hbalock); + return mp; + } + } + + spin_unlock_irq(&phba->hbalock); + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "0410 Cannot find virtual addr for buffer tag on " + "ring %d Data x%lx x%p x%p x%x\n", + pring->ringno, (unsigned long) tag, + slp->next, slp->prev, pring->postbufq_cnt); + + return NULL; +} struct lpfc_dmabuf * lpfc_sli_ringpostbuf_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, @@ -3361,6 +3534,12 @@ lpfc_sli_abort_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, pring->txcmplq_cnt--; spin_unlock_irq(&phba->hbalock); + /* Firmware could still be in progress of DMAing + * payload, so don't free data buffer till after + * a hbeat. + */ + abort_iocb->iocb_flag |= LPFC_DELAY_MEM_FREE; + abort_iocb->iocb_flag &= ~LPFC_DRIVER_ABORTED; abort_iocb->iocb.ulpStatus = IOSTAT_LOCAL_REJECT; abort_iocb->iocb.un.ulpWord[4] = IOERR_SLI_ABORTED; @@ -3699,7 +3878,7 @@ lpfc_sli_issue_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq, unsigned long flag; /* The caller must leave context1 empty. */ - if (pmboxq->context1 != 0) + if (pmboxq->context1) return MBX_NOT_FINISHED; /* setup wake call as IOCB callback */ @@ -3771,7 +3950,6 @@ lpfc_intr_handler(int irq, void *dev_id) uint32_t ha_copy; uint32_t work_ha_copy; unsigned long status; - int i; uint32_t control; MAILBOX_t *mbox, *pmbox; @@ -3888,7 +4066,6 @@ lpfc_intr_handler(int irq, void *dev_id) } if (work_ha_copy & HA_ERATT) { - phba->link_state = LPFC_HBA_ERROR; /* * There was a link/board error. Read the * status register to retrieve the error event @@ -3920,7 +4097,7 @@ lpfc_intr_handler(int irq, void *dev_id) * Stray Mailbox Interrupt, mbxCommand <cmd> * mbxStatus <status> */ - lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | + lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, "(%d):0304 Stray Mailbox " "Interrupt mbxCommand x%x " @@ -3928,51 +4105,60 @@ lpfc_intr_handler(int irq, void *dev_id) (vport ? vport->vpi : 0), pmbox->mbxCommand, pmbox->mbxStatus); - } - phba->last_completion_time = jiffies; - del_timer_sync(&phba->sli.mbox_tmo); - - phba->sli.mbox_active = NULL; - if (pmb->mbox_cmpl) { - lpfc_sli_pcimem_bcopy(mbox, pmbox, - MAILBOX_CMD_SIZE); - } - if (pmb->mbox_flag & LPFC_MBX_IMED_UNREG) { - pmb->mbox_flag &= ~LPFC_MBX_IMED_UNREG; + /* clear mailbox attention bit */ + work_ha_copy &= ~HA_MBATT; + } else { + phba->last_completion_time = jiffies; + del_timer(&phba->sli.mbox_tmo); - lpfc_debugfs_disc_trc(vport, - LPFC_DISC_TRC_MBOX_VPORT, - "MBOX dflt rpi: : status:x%x rpi:x%x", - (uint32_t)pmbox->mbxStatus, - pmbox->un.varWords[0], 0); - - if ( !pmbox->mbxStatus) { - mp = (struct lpfc_dmabuf *) - (pmb->context1); - ndlp = (struct lpfc_nodelist *) - pmb->context2; - - /* Reg_LOGIN of dflt RPI was successful. - * new lets get rid of the RPI using the - * same mbox buffer. - */ - lpfc_unreg_login(phba, vport->vpi, - pmbox->un.varWords[0], pmb); - pmb->mbox_cmpl = lpfc_mbx_cmpl_dflt_rpi; - pmb->context1 = mp; - pmb->context2 = ndlp; - pmb->vport = vport; - spin_lock(&phba->hbalock); - phba->sli.sli_flag &= - ~LPFC_SLI_MBOX_ACTIVE; - spin_unlock(&phba->hbalock); - goto send_current_mbox; + phba->sli.mbox_active = NULL; + if (pmb->mbox_cmpl) { + lpfc_sli_pcimem_bcopy(mbox, pmbox, + MAILBOX_CMD_SIZE); + } + if (pmb->mbox_flag & LPFC_MBX_IMED_UNREG) { + pmb->mbox_flag &= ~LPFC_MBX_IMED_UNREG; + + lpfc_debugfs_disc_trc(vport, + LPFC_DISC_TRC_MBOX_VPORT, + "MBOX dflt rpi: : " + "status:x%x rpi:x%x", + (uint32_t)pmbox->mbxStatus, + pmbox->un.varWords[0], 0); + + if (!pmbox->mbxStatus) { + mp = (struct lpfc_dmabuf *) + (pmb->context1); + ndlp = (struct lpfc_nodelist *) + pmb->context2; + + /* Reg_LOGIN of dflt RPI was + * successful. new lets get + * rid of the RPI using the + * same mbox buffer. + */ + lpfc_unreg_login(phba, + vport->vpi, + pmbox->un.varWords[0], + pmb); + pmb->mbox_cmpl = + lpfc_mbx_cmpl_dflt_rpi; + pmb->context1 = mp; + pmb->context2 = ndlp; + pmb->vport = vport; + spin_lock(&phba->hbalock); + phba->sli.sli_flag &= + ~LPFC_SLI_MBOX_ACTIVE; + spin_unlock(&phba->hbalock); + goto send_current_mbox; + } } + spin_lock(&phba->pport->work_port_lock); + phba->pport->work_port_events &= + ~WORKER_MBOX_TMO; + spin_unlock(&phba->pport->work_port_lock); + lpfc_mbox_cmpl_put(phba, pmb); } - spin_lock(&phba->pport->work_port_lock); - phba->pport->work_port_events &= ~WORKER_MBOX_TMO; - spin_unlock(&phba->pport->work_port_lock); - lpfc_mbox_cmpl_put(phba, pmb); } if ((work_ha_copy & HA_MBATT) && (phba->sli.mbox_active == NULL)) { @@ -3990,10 +4176,6 @@ send_current_mbox: lpfc_mbox_cmpl_put(phba, pmb); goto send_next_mbox; } - } else { - /* Turn on IOCB processing */ - for (i = 0; i < phba->sli.num_rings; i++) - lpfc_sli_turn_on_ring(phba, i); } } diff --git a/drivers/scsi/lpfc/lpfc_sli.h b/drivers/scsi/lpfc/lpfc_sli.h index 51b2b6b949b..7249fd252cb 100644 --- a/drivers/scsi/lpfc/lpfc_sli.h +++ b/drivers/scsi/lpfc/lpfc_sli.h @@ -33,6 +33,7 @@ typedef enum _lpfc_ctx_cmd { struct lpfc_iocbq { /* lpfc_iocbqs are used in double linked lists */ struct list_head list; + struct list_head clist; uint16_t iotag; /* pre-assigned IO tag */ uint16_t rsvd1; @@ -44,6 +45,7 @@ struct lpfc_iocbq { #define LPFC_IO_FCP 4 /* FCP command -- iocbq in scsi_buf */ #define LPFC_DRIVER_ABORTED 8 /* driver aborted this request */ #define LPFC_IO_FABRIC 0x10 /* Iocb send using fabric scheduler */ +#define LPFC_DELAY_MEM_FREE 0x20 /* Defer free'ing of FC data */ uint8_t abort_count; uint8_t rsvd2; @@ -92,8 +94,6 @@ typedef struct lpfcMboxq { #define MBX_POLL 1 /* poll mailbox till command done, then return */ #define MBX_NOWAIT 2 /* issue command then return immediately */ -#define MBX_STOP_IOCB 4 /* Stop iocb processing till mbox cmds - complete */ #define LPFC_MAX_RING_MASK 4 /* max num of rctl/type masks allowed per ring */ @@ -129,9 +129,7 @@ struct lpfc_sli_ring { uint16_t flag; /* ring flags */ #define LPFC_DEFERRED_RING_EVENT 0x001 /* Deferred processing a ring event */ #define LPFC_CALL_RING_AVAILABLE 0x002 /* indicates cmd was full */ -#define LPFC_STOP_IOCB_MBX 0x010 /* Stop processing IOCB cmds mbox */ #define LPFC_STOP_IOCB_EVENT 0x020 /* Stop processing IOCB cmds event */ -#define LPFC_STOP_IOCB_MASK 0x030 /* Stop processing IOCB cmds mask */ uint16_t abtsiotag; /* tracks next iotag to use for ABTS */ uint32_t local_getidx; /* last available cmd index (from cmdGetInx) */ @@ -163,9 +161,12 @@ struct lpfc_sli_ring { struct list_head iocb_continueq; uint16_t iocb_continueq_cnt; /* current length of queue */ uint16_t iocb_continueq_max; /* max length */ + struct list_head iocb_continue_saveq; struct lpfc_sli_ring_mask prt[LPFC_MAX_RING_MASK]; uint32_t num_mask; /* number of mask entries in prt array */ + void (*lpfc_sli_rcv_async_status) (struct lpfc_hba *, + struct lpfc_sli_ring *, struct lpfc_iocbq *); struct lpfc_sli_ring_stat stats; /* SLI statistical info */ @@ -199,9 +200,6 @@ struct lpfc_hbq_init { uint32_t add_count; /* number to allocate when starved */ } ; -#define LPFC_MAX_HBQ 16 - - /* Structure used to hold SLI statistical counters and info */ struct lpfc_sli_stat { uint64_t mbox_stat_err; /* Mbox cmds completed status error */ diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h index 0081f49286b..4b633d39a82 100644 --- a/drivers/scsi/lpfc/lpfc_version.h +++ b/drivers/scsi/lpfc/lpfc_version.h @@ -1,7 +1,7 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * - * Copyright (C) 2004-2007 Emulex. All rights reserved. * + * Copyright (C) 2004-2008 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * * www.emulex.com * * * @@ -18,10 +18,10 @@ * included with this package. * *******************************************************************/ -#define LPFC_DRIVER_VERSION "8.2.2" +#define LPFC_DRIVER_VERSION "8.2.4" #define LPFC_DRIVER_NAME "lpfc" #define LPFC_MODULE_DESC "Emulex LightPulse Fibre Channel SCSI driver " \ LPFC_DRIVER_VERSION -#define LPFC_COPYRIGHT "Copyright(c) 2004-2007 Emulex. All rights reserved." +#define LPFC_COPYRIGHT "Copyright(c) 2004-2008 Emulex. All rights reserved." diff --git a/drivers/scsi/lpfc/lpfc_vport.c b/drivers/scsi/lpfc/lpfc_vport.c index dcb415e717c..9fad7663c11 100644 --- a/drivers/scsi/lpfc/lpfc_vport.c +++ b/drivers/scsi/lpfc/lpfc_vport.c @@ -125,15 +125,26 @@ lpfc_vport_sparm(struct lpfc_hba *phba, struct lpfc_vport *vport) pmb->vport = vport; rc = lpfc_sli_issue_mbox_wait(phba, pmb, phba->fc_ratov * 2); if (rc != MBX_SUCCESS) { - lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT | LOG_VPORT, - "1818 VPort failed init, mbxCmd x%x " - "READ_SPARM mbxStatus x%x, rc = x%x\n", - mb->mbxCommand, mb->mbxStatus, rc); - lpfc_mbuf_free(phba, mp->virt, mp->phys); - kfree(mp); - if (rc != MBX_TIMEOUT) - mempool_free(pmb, phba->mbox_mem_pool); - return -EIO; + if (signal_pending(current)) { + lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT | LOG_VPORT, + "1830 Signal aborted mbxCmd x%x\n", + mb->mbxCommand); + lpfc_mbuf_free(phba, mp->virt, mp->phys); + kfree(mp); + if (rc != MBX_TIMEOUT) + mempool_free(pmb, phba->mbox_mem_pool); + return -EINTR; + } else { + lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT | LOG_VPORT, + "1818 VPort failed init, mbxCmd x%x " + "READ_SPARM mbxStatus x%x, rc = x%x\n", + mb->mbxCommand, mb->mbxStatus, rc); + lpfc_mbuf_free(phba, mp->virt, mp->phys); + kfree(mp); + if (rc != MBX_TIMEOUT) + mempool_free(pmb, phba->mbox_mem_pool); + return -EIO; + } } memcpy(&vport->fc_sparam, mp->virt, sizeof (struct serv_parm)); @@ -204,6 +215,7 @@ lpfc_vport_create(struct fc_vport *fc_vport, bool disable) int instance; int vpi; int rc = VPORT_ERROR; + int status; if ((phba->sli_rev < 3) || !(phba->sli3_options & LPFC_SLI3_NPIV_ENABLED)) { @@ -248,13 +260,19 @@ lpfc_vport_create(struct fc_vport *fc_vport, bool disable) vport->vpi = vpi; lpfc_debugfs_initialize(vport); - if (lpfc_vport_sparm(phba, vport)) { - lpfc_printf_vlog(vport, KERN_ERR, LOG_VPORT, - "1813 Create VPORT failed. " - "Cannot get sparam\n"); + if ((status = lpfc_vport_sparm(phba, vport))) { + if (status == -EINTR) { + lpfc_printf_vlog(vport, KERN_ERR, LOG_VPORT, + "1831 Create VPORT Interrupted.\n"); + rc = VPORT_ERROR; + } else { + lpfc_printf_vlog(vport, KERN_ERR, LOG_VPORT, + "1813 Create VPORT failed. " + "Cannot get sparam\n"); + rc = VPORT_NORESOURCES; + } lpfc_free_vpi(phba, vpi); destroy_port(vport); - rc = VPORT_NORESOURCES; goto error_out; } @@ -427,7 +445,6 @@ int lpfc_vport_delete(struct fc_vport *fc_vport) { struct lpfc_nodelist *ndlp = NULL; - struct lpfc_nodelist *next_ndlp; struct Scsi_Host *shost = (struct Scsi_Host *) fc_vport->shost; struct lpfc_vport *vport = *(struct lpfc_vport **)fc_vport->dd_data; struct lpfc_hba *phba = vport->phba; @@ -482,8 +499,18 @@ lpfc_vport_delete(struct fc_vport *fc_vport) ndlp = lpfc_findnode_did(phba->pport, Fabric_DID); if (ndlp && ndlp->nlp_state == NLP_STE_UNMAPPED_NODE && - phba->link_state >= LPFC_LINK_UP) { - + phba->link_state >= LPFC_LINK_UP) { + if (vport->cfg_enable_da_id) { + timeout = msecs_to_jiffies(phba->fc_ratov * 2000); + if (!lpfc_ns_cmd(vport, SLI_CTNS_DA_ID, 0, 0)) + while (vport->ct_flags && timeout) + timeout = schedule_timeout(timeout); + else + lpfc_printf_log(vport->phba, KERN_WARNING, + LOG_VPORT, + "1829 CT command failed to " + "delete objects on fabric. \n"); + } /* First look for the Fabric ndlp */ ndlp = lpfc_findnode_did(vport, Fabric_DID); if (!ndlp) { @@ -503,23 +530,20 @@ lpfc_vport_delete(struct fc_vport *fc_vport) } skip_logo: + lpfc_cleanup(vport); lpfc_sli_host_down(vport); - list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) { - lpfc_disc_state_machine(vport, ndlp, NULL, - NLP_EVT_DEVICE_RECOVERY); - lpfc_disc_state_machine(vport, ndlp, NULL, - NLP_EVT_DEVICE_RM); - } - lpfc_stop_vport_timers(vport); lpfc_unreg_all_rpis(vport); - lpfc_unreg_default_rpis(vport); - /* - * Completion of unreg_vpi (lpfc_mbx_cmpl_unreg_vpi) does the - * scsi_host_put() to release the vport. - */ - lpfc_mbx_unreg_vpi(vport); + + if (!(phba->pport->load_flag & FC_UNLOADING)) { + lpfc_unreg_default_rpis(vport); + /* + * Completion of unreg_vpi (lpfc_mbx_cmpl_unreg_vpi) + * does the scsi_host_put() to release the vport. + */ + lpfc_mbx_unreg_vpi(vport); + } lpfc_free_vpi(phba, vport->vpi); vport->work_port_events = 0; @@ -532,16 +556,13 @@ skip_logo: return VPORT_OK; } -EXPORT_SYMBOL(lpfc_vport_create); -EXPORT_SYMBOL(lpfc_vport_delete); - struct lpfc_vport ** lpfc_create_vport_work_array(struct lpfc_hba *phba) { struct lpfc_vport *port_iterator; struct lpfc_vport **vports; int index = 0; - vports = kzalloc(LPFC_MAX_VPORTS * sizeof(struct lpfc_vport *), + vports = kzalloc((phba->max_vpi + 1) * sizeof(struct lpfc_vport *), GFP_KERNEL); if (vports == NULL) return NULL; @@ -560,12 +581,12 @@ lpfc_create_vport_work_array(struct lpfc_hba *phba) } void -lpfc_destroy_vport_work_array(struct lpfc_vport **vports) +lpfc_destroy_vport_work_array(struct lpfc_hba *phba, struct lpfc_vport **vports) { int i; if (vports == NULL) return; - for (i=0; vports[i] != NULL && i < LPFC_MAX_VPORTS; i++) + for (i=0; vports[i] != NULL && i <= phba->max_vpi; i++) scsi_host_put(lpfc_shost_from_vport(vports[i])); kfree(vports); } diff --git a/drivers/scsi/lpfc/lpfc_vport.h b/drivers/scsi/lpfc/lpfc_vport.h index 91da17751a3..96c445333b6 100644 --- a/drivers/scsi/lpfc/lpfc_vport.h +++ b/drivers/scsi/lpfc/lpfc_vport.h @@ -89,7 +89,7 @@ int lpfc_vport_delete(struct fc_vport *); int lpfc_vport_getinfo(struct Scsi_Host *, struct vport_info *); int lpfc_vport_tgt_remove(struct Scsi_Host *, uint, uint); struct lpfc_vport **lpfc_create_vport_work_array(struct lpfc_hba *); -void lpfc_destroy_vport_work_array(struct lpfc_vport **); +void lpfc_destroy_vport_work_array(struct lpfc_hba *, struct lpfc_vport **); /* * queuecommand VPORT-specific return codes. Specified in the host byte code. |