aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@woody.linux-foundation.org>2007-07-15 16:51:54 -0700
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2007-07-15 16:51:54 -0700
commitbc06cffdec85d487c77109dffcd2f285bdc502d3 (patch)
treeadc6e6398243da87e66c56102840597a329183a0
parentd3502d7f25b22cfc9762bf1781faa9db1bb3be2e (diff)
parent9413d7b8aa777dd1fc7db9563ce5e80d769fe7b5 (diff)
Merge master.kernel.org:/pub/scm/linux/kernel/git/jejb/scsi-misc-2.6
* master.kernel.org:/pub/scm/linux/kernel/git/jejb/scsi-misc-2.6: (166 commits) [SCSI] ibmvscsi: convert to use the data buffer accessors [SCSI] dc395x: convert to use the data buffer accessors [SCSI] ncr53c8xx: convert to use the data buffer accessors [SCSI] sym53c8xx: convert to use the data buffer accessors [SCSI] ppa: coding police and printk levels [SCSI] aic7xxx_old: remove redundant GFP_ATOMIC from kmalloc [SCSI] i2o: remove redundant GFP_ATOMIC from kmalloc from device.c [SCSI] remove the dead CYBERSTORMIII_SCSI option [SCSI] don't build scsi_dma_{map,unmap} for !HAS_DMA [SCSI] Clean up scsi_add_lun a bit [SCSI] 53c700: Remove printk, which triggers because of low scsi clock on SNI RMs [SCSI] sni_53c710: Cleanup [SCSI] qla4xxx: Fix underrun/overrun conditions [SCSI] megaraid_mbox: use mutex instead of semaphore [SCSI] aacraid: add 51245, 51645 and 52245 adapters to documentation. [SCSI] qla2xxx: update version to 8.02.00-k1. [SCSI] qla2xxx: add support for NPIV [SCSI] stex: use resid for xfer len information [SCSI] Add Brownie 1200U3P to blacklist [SCSI] scsi.c: convert to use the data buffer accessors ...
-rw-r--r--Documentation/scsi/aacraid.txt3
-rw-r--r--Documentation/scsi/scsi_fc_transport.txt450
-rw-r--r--drivers/block/cciss_scsi.c75
-rw-r--r--drivers/ieee1394/sbp2.c75
-rw-r--r--drivers/infiniband/ulp/iser/iscsi_iser.c40
-rw-r--r--drivers/infiniband/ulp/iser/iscsi_iser.h4
-rw-r--r--drivers/infiniband/ulp/iser/iser_initiator.c14
-rw-r--r--drivers/infiniband/ulp/iser/iser_verbs.c4
-rw-r--r--drivers/infiniband/ulp/srp/ib_srp.c63
-rw-r--r--drivers/infiniband/ulp/srp/ib_srp.h5
-rw-r--r--drivers/message/fusion/linux_compat.h9
-rw-r--r--drivers/message/fusion/lsi/mpi.h7
-rw-r--r--drivers/message/fusion/lsi/mpi_cnfg.h61
-rw-r--r--drivers/message/fusion/lsi/mpi_history.txt66
-rw-r--r--drivers/message/fusion/lsi/mpi_inb.h221
-rw-r--r--drivers/message/fusion/lsi/mpi_init.h10
-rw-r--r--drivers/message/fusion/lsi/mpi_ioc.h40
-rw-r--r--drivers/message/fusion/lsi/mpi_raid.h11
-rw-r--r--drivers/message/fusion/mptbase.c40
-rw-r--r--drivers/message/fusion/mptbase.h6
-rw-r--r--drivers/message/fusion/mptctl.c2
-rw-r--r--drivers/message/fusion/mptctl.h2
-rw-r--r--drivers/message/fusion/mptfc.c3
-rw-r--r--drivers/message/fusion/mptlan.c2
-rw-r--r--drivers/message/fusion/mptlan.h2
-rw-r--r--drivers/message/fusion/mptsas.c2
-rw-r--r--drivers/message/fusion/mptscsih.c264
-rw-r--r--drivers/message/fusion/mptscsih.h2
-rw-r--r--drivers/message/fusion/mptspi.c3
-rw-r--r--drivers/message/i2o/device.c2
-rw-r--r--drivers/message/i2o/i2o_scsi.c24
-rw-r--r--drivers/s390/scsi/zfcp_aux.c20
-rw-r--r--drivers/s390/scsi/zfcp_erp.c7
-rw-r--r--drivers/scsi/3w-9xxx.c135
-rw-r--r--drivers/scsi/3w-xxxx.c104
-rw-r--r--drivers/scsi/53c700.c77
-rw-r--r--drivers/scsi/53c700.h5
-rw-r--r--drivers/scsi/53c7xx.c6102
-rw-r--r--drivers/scsi/53c7xx.h1608
-rw-r--r--drivers/scsi/53c7xx.scr1591
-rw-r--r--drivers/scsi/53c7xx_d.h_shipped2874
-rw-r--r--drivers/scsi/53c7xx_u.h_shipped102
-rw-r--r--drivers/scsi/BusLogic.c51
-rw-r--r--drivers/scsi/Kconfig56
-rw-r--r--drivers/scsi/Makefile23
-rw-r--r--drivers/scsi/NCR5380.c14
-rw-r--r--drivers/scsi/NCR5380.h6
-rw-r--r--drivers/scsi/NCR53c406a.c45
-rw-r--r--drivers/scsi/a100u2w.c1239
-rw-r--r--drivers/scsi/a100u2w.h297
-rw-r--r--drivers/scsi/a4000t.c143
-rw-r--r--drivers/scsi/aacraid/aachba.c322
-rw-r--r--drivers/scsi/aacraid/aacraid.h40
-rw-r--r--drivers/scsi/aacraid/commsup.c210
-rw-r--r--drivers/scsi/aacraid/linit.c104
-rw-r--r--drivers/scsi/aacraid/rx.c33
-rw-r--r--drivers/scsi/advansys.c101
-rw-r--r--drivers/scsi/advansys.h36
-rw-r--r--drivers/scsi/aha152x.c50
-rw-r--r--drivers/scsi/aha1740.c48
-rw-r--r--drivers/scsi/aic7xxx/aic79xx_osm.c51
-rw-r--r--drivers/scsi/aic7xxx/aic79xx_osm.h4
-rw-r--r--drivers/scsi/aic7xxx/aic7xxx_osm.c59
-rw-r--r--drivers/scsi/aic7xxx/aic7xxx_osm.h4
-rw-r--r--drivers/scsi/aic7xxx_old.c57
-rw-r--r--drivers/scsi/amiga7xx.c138
-rw-r--r--drivers/scsi/amiga7xx.h23
-rw-r--r--drivers/scsi/arcmsr/arcmsr.h4
-rw-r--r--drivers/scsi/arcmsr/arcmsr_hba.c559
-rw-r--r--drivers/scsi/bvme6000.c76
-rw-r--r--drivers/scsi/bvme6000.h24
-rw-r--r--drivers/scsi/bvme6000_scsi.c135
-rw-r--r--drivers/scsi/dc395x.c163
-rw-r--r--drivers/scsi/dpt_i2o.c33
-rw-r--r--drivers/scsi/eata.c48
-rw-r--r--drivers/scsi/esp_scsi.c30
-rw-r--r--drivers/scsi/esp_scsi.h2
-rw-r--r--drivers/scsi/fdomain.c70
-rw-r--r--drivers/scsi/gdth.c4
-rw-r--r--drivers/scsi/hptiop.c76
-rw-r--r--drivers/scsi/ibmmca.c1267
-rw-r--r--drivers/scsi/ibmmca.h21
-rw-r--r--drivers/scsi/ibmvscsi/ibmvscsi.c463
-rw-r--r--drivers/scsi/ibmvscsi/ibmvscsi.h2
-rw-r--r--drivers/scsi/ibmvscsi/rpa_vscsi.c20
-rw-r--r--drivers/scsi/initio.c3819
-rw-r--r--drivers/scsi/initio.h313
-rw-r--r--drivers/scsi/ipr.c144
-rw-r--r--drivers/scsi/ips.c401
-rw-r--r--drivers/scsi/ips.h44
-rw-r--r--drivers/scsi/iscsi_tcp.c606
-rw-r--r--drivers/scsi/iscsi_tcp.h9
-rw-r--r--drivers/scsi/jazz_esp.c4
-rw-r--r--drivers/scsi/libiscsi.c650
-rw-r--r--drivers/scsi/libsas/sas_scsi_host.c10
-rw-r--r--drivers/scsi/lpfc/Makefile5
-rw-r--r--drivers/scsi/lpfc/lpfc.h358
-rw-r--r--drivers/scsi/lpfc/lpfc_attr.c760
-rw-r--r--drivers/scsi/lpfc/lpfc_crtn.h182
-rw-r--r--drivers/scsi/lpfc/lpfc_ct.c971
-rw-r--r--drivers/scsi/lpfc/lpfc_debugfs.c508
-rw-r--r--drivers/scsi/lpfc/lpfc_debugfs.h50
-rw-r--r--drivers/scsi/lpfc/lpfc_disc.h15
-rw-r--r--drivers/scsi/lpfc/lpfc_els.c3377
-rw-r--r--drivers/scsi/lpfc/lpfc_hbadisc.c2262
-rw-r--r--drivers/scsi/lpfc/lpfc_hw.h558
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c948
-rw-r--r--drivers/scsi/lpfc/lpfc_logmsg.h1
-rw-r--r--drivers/scsi/lpfc/lpfc_mbox.c306
-rw-r--r--drivers/scsi/lpfc/lpfc_mem.c101
-rw-r--r--drivers/scsi/lpfc/lpfc_nportdisc.c1325
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.c557
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.h3
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c2047
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.h47
-rw-r--r--drivers/scsi/lpfc/lpfc_version.h2
-rw-r--r--drivers/scsi/lpfc/lpfc_vport.c523
-rw-r--r--drivers/scsi/lpfc/lpfc_vport.h113
-rw-r--r--drivers/scsi/mac53c94.c62
-rw-r--r--drivers/scsi/megaraid.c141
-rw-r--r--drivers/scsi/megaraid/mega_common.h1
-rw-r--r--drivers/scsi/megaraid/megaraid_mbox.c171
-rw-r--r--drivers/scsi/megaraid/megaraid_mbox.h4
-rw-r--r--drivers/scsi/megaraid/megaraid_sas.c112
-rw-r--r--drivers/scsi/mesh.c46
-rw-r--r--drivers/scsi/mvme16x.c78
-rw-r--r--drivers/scsi/mvme16x.h24
-rw-r--r--drivers/scsi/mvme16x_scsi.c158
-rw-r--r--drivers/scsi/ncr53c8xx.c70
-rw-r--r--drivers/scsi/nsp32.c194
-rw-r--r--drivers/scsi/pcmcia/sym53c500_cs.c42
-rw-r--r--drivers/scsi/ppa.c57
-rw-r--r--drivers/scsi/qla2xxx/Makefile2
-rw-r--r--drivers/scsi/qla2xxx/qla_attr.c164
-rw-r--r--drivers/scsi/qla2xxx/qla_dbg.c4
-rw-r--r--drivers/scsi/qla2xxx/qla_dbg.h10
-rw-r--r--drivers/scsi/qla2xxx/qla_def.h83
-rw-r--r--drivers/scsi/qla2xxx/qla_fw.h91
-rw-r--r--drivers/scsi/qla2xxx/qla_gbl.h39
-rw-r--r--drivers/scsi/qla2xxx/qla_gs.c3
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c166
-rw-r--r--drivers/scsi/qla2xxx/qla_iocb.c242
-rw-r--r--drivers/scsi/qla2xxx/qla_isr.c85
-rw-r--r--drivers/scsi/qla2xxx/qla_mbx.c396
-rw-r--r--drivers/scsi/qla2xxx/qla_mid.c497
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c237
-rw-r--r--drivers/scsi/qla2xxx/qla_version.h6
-rw-r--r--drivers/scsi/qla4xxx/ql4_dbg.c174
-rw-r--r--drivers/scsi/qla4xxx/ql4_def.h78
-rw-r--r--drivers/scsi/qla4xxx/ql4_fw.h426
-rw-r--r--drivers/scsi/qla4xxx/ql4_glbl.h7
-rw-r--r--drivers/scsi/qla4xxx/ql4_init.c105
-rw-r--r--drivers/scsi/qla4xxx/ql4_iocb.c101
-rw-r--r--drivers/scsi/qla4xxx/ql4_isr.c114
-rw-r--r--drivers/scsi/qla4xxx/ql4_mbx.c274
-rw-r--r--drivers/scsi/qla4xxx/ql4_nvram.c3
-rw-r--r--drivers/scsi/qla4xxx/ql4_os.c96
-rw-r--r--drivers/scsi/qla4xxx/ql4_version.h3
-rw-r--r--drivers/scsi/qlogicfas408.c30
-rw-r--r--drivers/scsi/scsi.c48
-rw-r--r--drivers/scsi/scsi_debug.c2
-rw-r--r--drivers/scsi/scsi_devinfo.c1
-rw-r--r--drivers/scsi/scsi_error.c26
-rw-r--r--drivers/scsi/scsi_lib_dma.c50
-rw-r--r--drivers/scsi/scsi_scan.c67
-rw-r--r--drivers/scsi/scsi_sysfs.c25
-rw-r--r--drivers/scsi/scsi_transport_fc.c831
-rw-r--r--drivers/scsi/scsi_transport_iscsi.c138
-rw-r--r--drivers/scsi/sd.c2
-rw-r--r--drivers/scsi/sg.c2
-rw-r--r--drivers/scsi/sni_53c710.c10
-rw-r--r--drivers/scsi/sr.c6
-rw-r--r--drivers/scsi/stex.c111
-rw-r--r--drivers/scsi/sun_esp.c2
-rw-r--r--drivers/scsi/sym53c416.c44
-rw-r--r--drivers/scsi/sym53c8xx_2/sym_glue.c83
-rw-r--r--drivers/scsi/sym53c8xx_2/sym_glue.h2
-rw-r--r--drivers/scsi/tmscsim.c85
-rw-r--r--drivers/scsi/tmscsim.h10
-rw-r--r--drivers/scsi/u14-34f.c60
-rw-r--r--drivers/scsi/ultrastor.c19
-rw-r--r--drivers/scsi/wd7000.c20
-rw-r--r--drivers/scsi/zorro7xx.c180
-rw-r--r--include/scsi/iscsi_if.h34
-rw-r--r--include/scsi/libiscsi.h67
-rw-r--r--include/scsi/scsi_cmnd.h20
-rw-r--r--include/scsi/scsi_device.h2
-rw-r--r--include/scsi/scsi_host.h10
-rw-r--r--include/scsi/scsi_transport_fc.h186
-rw-r--r--include/scsi/scsi_transport_iscsi.h16
190 files changed, 21725 insertions, 26337 deletions
diff --git a/Documentation/scsi/aacraid.txt b/Documentation/scsi/aacraid.txt
index ce3cb42507b..cc12b55d4b3 100644
--- a/Documentation/scsi/aacraid.txt
+++ b/Documentation/scsi/aacraid.txt
@@ -50,6 +50,9 @@ Supported Cards/Chipsets
9005:0285:9005:02be Adaptec 31605 (Marauder160)
9005:0285:9005:02c3 Adaptec 51205 (Voodoo120)
9005:0285:9005:02c4 Adaptec 51605 (Voodoo160)
+ 9005:0285:9005:02ce Adaptec 51245 (Voodoo124)
+ 9005:0285:9005:02cf Adaptec 51645 (Voodoo164)
+ 9005:0285:9005:02d0 Adaptec 52445 (Voodoo244)
1011:0046:9005:0364 Adaptec 5400S (Mustang)
9005:0287:9005:0800 Adaptec Themisto (Jupiter)
9005:0200:9005:0200 Adaptec Themisto (Jupiter)
diff --git a/Documentation/scsi/scsi_fc_transport.txt b/Documentation/scsi/scsi_fc_transport.txt
new file mode 100644
index 00000000000..d403e46d846
--- /dev/null
+++ b/Documentation/scsi/scsi_fc_transport.txt
@@ -0,0 +1,450 @@
+ SCSI FC Tansport
+ =============================================
+
+Date: 4/12/2007
+Kernel Revisions for features:
+ rports : <<TBS>>
+ vports : 2.6.22 (? TBD)
+
+
+Introduction
+============
+This file documents the features and components of the SCSI FC Transport.
+It also provides documents the API between the transport and FC LLDDs.
+The FC transport can be found at:
+ drivers/scsi/scsi_transport_fc.c
+ include/scsi/scsi_transport_fc.h
+ include/scsi/scsi_netlink_fc.h
+
+This file is found at Documentation/scsi/scsi_fc_transport.txt
+
+
+FC Remote Ports (rports)
+========================================================================
+<< To Be Supplied >>
+
+
+FC Virtual Ports (vports)
+========================================================================
+
+Overview:
+-------------------------------
+
+ New FC standards have defined mechanisms which allows for a single physical
+ port to appear on as multiple communication ports. Using the N_Port Id
+ Virtualization (NPIV) mechanism, a point-to-point connection to a Fabric
+ can be assigned more than 1 N_Port_ID. Each N_Port_ID appears as a
+ separate port to other endpoints on the fabric, even though it shares one
+ physical link to the switch for communication. Each N_Port_ID can have a
+ unique view of the fabric based on fabric zoning and array lun-masking
+ (just like a normal non-NPIV adapter). Using the Virtual Fabric (VF)
+ mechanism, adding a fabric header to each frame allows the port to
+ interact with the Fabric Port to join multiple fabrics. The port will
+ obtain an N_Port_ID on each fabric it joins. Each fabric will have its
+ own unique view of endpoints and configuration parameters. NPIV may be
+ used together with VF so that the port can obtain multiple N_Port_IDs
+ on each virtual fabric.
+
+ The FC transport is now recognizing a new object - a vport. A vport is
+ an entity that has a world-wide unique World Wide Port Name (wwpn) and
+ World Wide Node Name (wwnn). The transport also allows for the FC4's to
+ be specified for the vport, with FCP_Initiator being the primary role
+ expected. Once instantiated by one of the above methods, it will have a
+ distinct N_Port_ID and view of fabric endpoints and storage entities.
+ The fc_host associated with the physical adapter will export the ability
+ to create vports. The transport will create the vport object within the
+ Linux device tree, and instruct the fc_host's driver to instantiate the
+ virtual port. Typically, the driver will create a new scsi_host instance
+ on the vport, resulting in a unique <H,C,T,L> namespace for the vport.
+ Thus, whether a FC port is based on a physical port or on a virtual port,
+ each will appear as a unique scsi_host with its own target and lun space.
+
+ Note: At this time, the transport is written to create only NPIV-based
+ vports. However, consideration was given to VF-based vports and it
+ should be a minor change to add support if needed. The remaining
+ discussion will concentrate on NPIV.
+
+ Note: World Wide Name assignment (and uniqueness guarantees) are left
+ up to an administrative entity controling the vport. For example,
+ if vports are to be associated with virtual machines, a XEN mgmt
+ utility would be responsible for creating wwpn/wwnn's for the vport,
+ using it's own naming authority and OUI. (Note: it already does this
+ for virtual MAC addresses).
+
+
+Device Trees and Vport Objects:
+-------------------------------
+
+ Today, the device tree typically contains the scsi_host object,
+ with rports and scsi target objects underneath it. Currently the FC
+ transport creates the vport object and places it under the scsi_host
+ object corresponding to the physical adapter. The LLDD will allocate
+ a new scsi_host for the vport and link it's object under the vport.
+ The remainder of the tree under the vports scsi_host is the same
+ as the non-NPIV case. The transport is written currently to easily
+ allow the parent of the vport to be something other than the scsi_host.
+ This could be used in the future to link the object onto a vm-specific
+ device tree. If the vport's parent is not the physical port's scsi_host,
+ a symbolic link to the vport object will be placed in the physical
+ port's scsi_host.
+
+ Here's what to expect in the device tree :
+ The typical Physical Port's Scsi_Host:
+ /sys/devices/.../host17/
+ and it has the typical decendent tree:
+ /sys/devices/.../host17/rport-17:0-0/target17:0:0/17:0:0:0:
+ and then the vport is created on the Physical Port:
+ /sys/devices/.../host17/vport-17:0-0
+ and the vport's Scsi_Host is then created:
+ /sys/devices/.../host17/vport-17:0-0/host18
+ and then the rest of the tree progresses, such as:
+ /sys/devices/.../host17/vport-17:0-0/host18/rport-18:0-0/target18:0:0/18:0:0:0:
+
+ Here's what to expect in the sysfs tree :
+ scsi_hosts:
+ /sys/class/scsi_host/host17 physical port's scsi_host
+ /sys/class/scsi_host/host18 vport's scsi_host
+ fc_hosts:
+ /sys/class/fc_host/host17 physical port's fc_host
+ /sys/class/fc_host/host18 vport's fc_host
+ fc_vports:
+ /sys/class/fc_vports/vport-17:0-0 the vport's fc_vport
+ fc_rports:
+ /sys/class/fc_remote_ports/rport-17:0-0 rport on the physical port
+ /sys/class/fc_remote_ports/rport-18:0-0 rport on the vport
+
+
+Vport Attributes:
+-------------------------------
+
+ The new fc_vport class object has the following attributes
+
+ node_name: Read_Only
+ The WWNN of the vport
+
+ port_name: Read_Only
+ The WWPN of the vport
+
+ roles: Read_Only
+ Indicates the FC4 roles enabled on the vport.
+
+ symbolic_name: Read_Write
+ A string, appended to the driver's symbolic port name string, which
+ is registered with the switch to identify the vport. For example,
+ a hypervisor could set this string to "Xen Domain 2 VM 5 Vport 2",
+ and this set of identifiers can be seen on switch management screens
+ to identify the port.
+
+ vport_delete: Write_Only
+ When written with a "1", will tear down the vport.
+
+ vport_disable: Write_Only
+ When written with a "1", will transition the vport to a disabled.
+ state. The vport will still be instantiated with the Linux kernel,
+ but it will not be active on the FC link.
+ When written with a "0", will enable the vport.
+
+ vport_last_state: Read_Only
+ Indicates the previous state of the vport. See the section below on
+ "Vport States".
+
+ vport_state: Read_Only
+ Indicates the state of the vport. See the section below on
+ "Vport States".
+
+ vport_type: Read_Only
+ Reflects the FC mechanism used to create the virtual port.
+ Only NPIV is supported currently.
+
+
+ For the fc_host class object, the following attributes are added for vports:
+
+ max_npiv_vports: Read_Only
+ Indicates the maximum number of NPIV-based vports that the
+ driver/adapter can support on the fc_host.
+
+ npiv_vports_inuse: Read_Only
+ Indicates how many NPIV-based vports have been instantiated on the
+ fc_host.
+
+ vport_create: Write_Only
+ A "simple" create interface to instantiate a vport on an fc_host.
+ A "<WWPN>:<WWNN>" string is written to the attribute. The transport
+ then instantiates the vport object and calls the LLDD to create the
+ vport with the role of FCP_Initiator. Each WWN is specified as 16
+ hex characters and may *not* contain any prefixes (e.g. 0x, x, etc).
+
+ vport_delete: Write_Only
+ A "simple" delete interface to teardown a vport. A "<WWPN>:<WWNN>"
+ string is written to the attribute. The transport will locate the
+ vport on the fc_host with the same WWNs and tear it down. Each WWN
+ is specified as 16 hex characters and may *not* contain any prefixes
+ (e.g. 0x, x, etc).
+
+
+Vport States:
+-------------------------------
+
+ Vport instantiation consists of two parts:
+ - Creation with the kernel and LLDD. This means all transport and
+ driver data structures are built up, and device objects created.
+ This is equivalent to a driver "attach" on an adapter, which is
+ independent of the adapter's link state.
+ - Instantiation of the vport on the FC link via ELS traffic, etc.
+ This is equivalent to a "link up" and successfull link initialization.
+ Futher information can be found in the interfaces section below for
+ Vport Creation.
+
+ Once a vport has been instantiated with the kernel/LLDD, a vport state
+ can be reported via the sysfs attribute. The following states exist:
+
+ FC_VPORT_UNKNOWN - Unknown
+ An temporary state, typically set only while the vport is being
+ instantiated with the kernel and LLDD.
+
+ FC_VPORT_ACTIVE - Active
+ The vport has been successfully been created on the FC link.
+ It is fully functional.
+
+ FC_VPORT_DISABLED - Disabled
+ The vport instantiated, but "disabled". The vport is not instantiated
+ on the FC link. This is equivalent to a physical port with the
+ link "down".
+
+ FC_VPORT_LINKDOWN - Linkdown
+ The vport is not operational as the physical link is not operational.
+
+ FC_VPORT_INITIALIZING - Initializing
+ The vport is in the process of instantiating on the FC link.
+ The LLDD will set this state just prior to starting the ELS traffic
+ to create the vport. This state will persist until the vport is
+ successfully created (state becomes FC_VPORT_ACTIVE) or it fails
+ (state is one of the values below). As this state is transitory,
+ it will not be preserved in the "vport_last_state".
+
+ FC_VPORT_NO_FABRIC_SUPP - No Fabric Support
+ The vport is not operational. One of the following conditions were
+ encountered:
+ - The FC topology is not Point-to-Point
+ - The FC port is not connected to an F_Port
+ - The F_Port has indicated that NPIV is not supported.
+
+ FC_VPORT_NO_FABRIC_RSCS - No Fabric Resources
+ The vport is not operational. The Fabric failed FDISC with a status
+ indicating that it does not have sufficient resources to complete
+ the operation.
+
+ FC_VPORT_FABRIC_LOGOUT - Fabric Logout
+ The vport is not operational. The Fabric has LOGO'd the N_Port_ID
+ associated with the vport.
+
+ FC_VPORT_FABRIC_REJ_WWN - Fabric Rejected WWN
+ The vport is not operational. The Fabric failed FDISC with a status
+ indicating that the WWN's are not valid.
+
+ FC_VPORT_FAILED - VPort Failed
+ The vport is not operational. This is a catchall for all other
+ error conditions.
+
+
+ The following state table indicates the different state transitions:
+
+ State Event New State
+ --------------------------------------------------------------------
+ n/a Initialization Unknown
+ Unknown: Link Down Linkdown
+ Link Up & Loop No Fabric Support
+ Link Up & no Fabric No Fabric Support
+ Link Up & FLOGI response No Fabric Support
+ indicates no NPIV support
+ Link Up & FDISC being sent Initializing
+ Disable request Disable
+ Linkdown: Link Up Unknown
+ Initializing: FDISC ACC Active
+ FDISC LS_RJT w/ no resources No Fabric Resources
+ FDISC LS_RJT w/ invalid Fabric Rejected WWN
+ pname or invalid nport_id
+ FDISC LS_RJT failed for Vport Failed
+ other reasons
+ Link Down Linkdown
+ Disable request Disable
+ Disable: Enable request Unknown
+ Active: LOGO received from fabric Fabric Logout
+ Link Down Linkdown
+ Disable request Disable
+ Fabric Logout: Link still up Unknown
+
+ The following 4 error states all have the same transitions:
+ No Fabric Support:
+ No Fabric Resources:
+ Fabric Rejected WWN:
+ Vport Failed:
+ Disable request Disable
+ Link goes down Linkdown
+
+
+Transport <-> LLDD Interfaces :
+-------------------------------
+
+Vport support by LLDD:
+
+ The LLDD indicates support for vports by supplying a vport_create()
+ function in the transport template. The presense of this function will
+ cause the creation of the new attributes on the fc_host. As part of
+ the physical port completing its initialization relative to the
+ transport, it should set the max_npiv_vports attribute to indicate the
+ maximum number of vports the driver and/or adapter supports.
+
+
+Vport Creation:
+
+ The LLDD vport_create() syntax is:
+
+ int vport_create(struct fc_vport *vport, bool disable)
+
+ where:
+ vport: Is the newly allocated vport object
+ disable: If "true", the vport is to be created in a disabled stated.
+ If "false", the vport is to be enabled upon creation.
+
+ When a request is made to create a new vport (via sgio/netlink, or the
+ vport_create fc_host attribute), the transport will validate that the LLDD
+ can support another vport (e.g. max_npiv_vports > npiv_vports_inuse).
+ If not, the create request will be failed. If space remains, the transport
+ will increment the vport count, create the vport object, and then call the
+ LLDD's vport_create() function with the newly allocated vport object.
+
+ As mentioned above, vport creation is divided into two parts:
+ - Creation with the kernel and LLDD. This means all transport and
+ driver data structures are built up, and device objects created.
+ This is equivalent to a driver "attach" on an adapter, which is
+ independent of the adapter's link state.
+ - Instantiation of the vport on the FC link via ELS traffic, etc.
+ This is equivalent to a "link up" and successfull link initialization.
+
+ The LLDD's vport_create() function will not synchronously wait for both
+ parts to be fully completed before returning. It must validate that the
+ infrastructure exists to support NPIV, and complete the first part of
+ vport creation (data structure build up) before returning. We do not
+ hinge vport_create() on the link-side operation mainly because:
+ - The link may be down. It is not a failure if it is. It simply
+ means the vport is in an inoperable state until the link comes up.
+ This is consistent with the link bouncing post vport creation.
+ - The vport may be created in a disabled state.
+ - This is consistent with a model where: the vport equates to a
+ FC adapter. The vport_create is synonymous with driver attachment
+ to the adapter, which is independent of link state.
+
+ Note: special error codes have been defined to delineate infrastructure
+ failure cases for quicker resolution.
+
+ The expected behavior for the LLDD's vport_create() function is:
+ - Validate Infrastructure:
+ - If the driver or adapter cannot support another vport, whether
+ due to improper firmware, (a lie about) max_npiv, or a lack of
+ some other resource - return VPCERR_UNSUPPORTED.
+ - If the driver validates the WWN's against those already active on
+ the adapter and detects an overlap - return VPCERR_BAD_WWN.
+ - If the driver detects the topology is loop, non-fabric, or the
+ FLOGI did not support NPIV - return VPCERR_NO_FABRIC_SUPP.
+ - Allocate data structures. If errors are encountered, such as out
+ of memory conditions, return the respective negative Exxx error code.
+ - If the role is FCP Initiator, the LLDD is to :
+ - Call scsi_host_alloc() to allocate a scsi_host for the vport.
+ - Call scsi_add_host(new_shost, &vport->dev) to start the scsi_host
+ and bind it as a child of the vport device.
+ - Initializes the fc_host attribute values.
+ - Kick of further vport state transitions based on the disable flag and
+ link state - and return success (zero).
+
+ LLDD Implementers Notes:
+ - It is suggested that there be a different fc_function_templates for
+ the physical port and the virtual port. The physical port's template
+ would have the vport_create, vport_delete, and vport_disable functions,
+ while the vports would not.
+ - It is suggested that there be different scsi_host_templates
+ for the physical port and virtual port. Likely, there are driver
+ attributes, embedded into the scsi_host_template, that are applicable
+ for the physical port only (link speed, topology setting, etc). This
+ ensures that the attributes are applicable to the respective scsi_host.
+
+
+Vport Disable/Enable:
+
+ The LLDD vport_disable() syntax is:
+
+ int vport_disable(struct fc_vport *vport, bool disable)
+
+ where:
+ vport: Is vport to to be enabled or disabled
+ disable: If "true", the vport is to be disabled.
+ If "false", the vport is to be enabled.
+
+ When a request is made to change the disabled state on a vport, the
+ transport will validate the request against the existing vport state.
+ If the request is to disable and the vport is already disabled, the
+ request will fail. Similarly, if the request is to enable, and the
+ vport is not in a disabled state, the request will fail. If the request
+ is valid for the vport state, the transport will call the LLDD to
+ change the vport's state.
+
+ Within the LLDD, if a vport is disabled, it remains instantiated with
+ the kernel and LLDD, but it is not active or visible on the FC link in
+ any way. (see Vport Creation and the 2 part instantiation discussion).
+ The vport will remain in this state until it is deleted or re-enabled.
+ When enabling a vport, the LLDD reinstantiates the vport on the FC
+ link - essentially restarting the LLDD statemachine (see Vport States
+ above).
+
+
+Vport Deletion:
+
+ The LLDD vport_delete() syntax is:
+
+ int vport_delete(struct fc_vport *vport)
+
+ where:
+ vport: Is vport to delete
+
+ When a request is made to delete a vport (via sgio/netlink, or via the
+ fc_host or fc_vport vport_delete attributes), the transport will call
+ the LLDD to terminate the vport on the FC link, and teardown all other
+ datastructures and references. If the LLDD completes successfully,
+ the transport will teardown the vport objects and complete the vport
+ removal. If the LLDD delete request fails, the vport object will remain,
+ but will be in an indeterminate state.
+
+ Within the LLDD, the normal code paths for a scsi_host teardown should
+ be followed. E.g. If the vport has a FCP Initiator role, the LLDD
+ will call fc_remove_host() for the vports scsi_host, followed by
+ scsi_remove_host() and scsi_host_put() for the vports scsi_host.
+
+
+Other:
+ fc_host port_type attribute:
+ There is a new fc_host port_type value - FC_PORTTYPE_NPIV. This value
+ must be set on all vport-based fc_hosts. Normally, on a physical port,
+ the port_type attribute would be set to NPORT, NLPORT, etc based on the
+ topology type and existence of the fabric. As this is not applicable to
+ a vport, it makes more sense to report the FC mechanism used to create
+ the vport.
+
+ Driver unload:
+ FC drivers are required to call fc_remove_host() prior to calling
+ scsi_remove_host(). This allows the fc_host to tear down all remote
+ ports prior the scsi_host being torn down. The fc_remove_host() call
+ was updated to remove all vports for the fc_host as well.
+
+
+Credits
+=======
+The following people have contributed to this document:
+
+
+
+
+
+
+James Smart
+james.smart@emulex.com
+
diff --git a/drivers/block/cciss_scsi.c b/drivers/block/cciss_scsi.c
index 90961a8ea89..4aca7ddfddd 100644
--- a/drivers/block/cciss_scsi.c
+++ b/drivers/block/cciss_scsi.c
@@ -555,7 +555,6 @@ complete_scsi_command( CommandList_struct *cp, int timeout, __u32 tag)
{
struct scsi_cmnd *cmd;
ctlr_info_t *ctlr;
- u64bit addr64;
ErrorInfo_struct *ei;
ei = cp->err_info;
@@ -569,20 +568,7 @@ complete_scsi_command( CommandList_struct *cp, int timeout, __u32 tag)
cmd = (struct scsi_cmnd *) cp->scsi_cmd;
ctlr = hba[cp->ctlr];
- /* undo the DMA mappings */
-
- if (cmd->use_sg) {
- pci_unmap_sg(ctlr->pdev,
- cmd->request_buffer, cmd->use_sg,
- cmd->sc_data_direction);
- }
- else if (cmd->request_bufflen) {
- addr64.val32.lower = cp->SG[0].Addr.lower;
- addr64.val32.upper = cp->SG[0].Addr.upper;
- pci_unmap_single(ctlr->pdev, (dma_addr_t) addr64.val,
- cmd->request_bufflen,
- cmd->sc_data_direction);
- }
+ scsi_dma_unmap(cmd);
cmd->result = (DID_OK << 16); /* host byte */
cmd->result |= (COMMAND_COMPLETE << 8); /* msg byte */
@@ -597,7 +583,7 @@ complete_scsi_command( CommandList_struct *cp, int timeout, __u32 tag)
ei->SenseLen > SCSI_SENSE_BUFFERSIZE ?
SCSI_SENSE_BUFFERSIZE :
ei->SenseLen);
- cmd->resid = ei->ResidualCnt;
+ scsi_set_resid(cmd, ei->ResidualCnt);
if(ei->CommandStatus != 0)
{ /* an error has occurred */
@@ -1204,46 +1190,29 @@ cciss_scatter_gather(struct pci_dev *pdev,
CommandList_struct *cp,
struct scsi_cmnd *cmd)
{
- unsigned int use_sg, nsegs=0, len;
- struct scatterlist *scatter = (struct scatterlist *) cmd->request_buffer;
+ unsigned int len;
+ struct scatterlist *sg;
__u64 addr64;
-
- /* is it just one virtual address? */
- if (!cmd->use_sg) {
- if (cmd->request_bufflen) { /* anything to xfer? */
-
- addr64 = (__u64) pci_map_single(pdev,
- cmd->request_buffer,
- cmd->request_bufflen,
- cmd->sc_data_direction);
-
- cp->SG[0].Addr.lower =
- (__u32) (addr64 & (__u64) 0x00000000FFFFFFFF);
- cp->SG[0].Addr.upper =
- (__u32) ((addr64 >> 32) & (__u64) 0x00000000FFFFFFFF);
- cp->SG[0].Len = cmd->request_bufflen;
- nsegs=1;
- }
- } /* else, must be a list of virtual addresses.... */
- else if (cmd->use_sg <= MAXSGENTRIES) { /* not too many addrs? */
-
- use_sg = pci_map_sg(pdev, cmd->request_buffer, cmd->use_sg,
- cmd->sc_data_direction);
-
- for (nsegs=0; nsegs < use_sg; nsegs++) {
- addr64 = (__u64) sg_dma_address(&scatter[nsegs]);
- len = sg_dma_len(&scatter[nsegs]);
- cp->SG[nsegs].Addr.lower =
- (__u32) (addr64 & (__u64) 0x00000000FFFFFFFF);
- cp->SG[nsegs].Addr.upper =
- (__u32) ((addr64 >> 32) & (__u64) 0x00000000FFFFFFFF);
- cp->SG[nsegs].Len = len;
- cp->SG[nsegs].Ext = 0; // we are not chaining
+ int use_sg, i;
+
+ BUG_ON(scsi_sg_count(cmd) > MAXSGENTRIES);
+
+ use_sg = scsi_dma_map(cmd);
+ if (use_sg) { /* not too many addrs? */
+ scsi_for_each_sg(cmd, sg, use_sg, i) {
+ addr64 = (__u64) sg_dma_address(sg);
+ len = sg_dma_len(sg);
+ cp->SG[i].Addr.lower =
+ (__u32) (addr64 & (__u64) 0x00000000FFFFFFFF);
+ cp->SG[i].Addr.upper =
+ (__u32) ((addr64 >> 32) & (__u64) 0x00000000FFFFFFFF);
+ cp->SG[i].Len = len;
+ cp->SG[i].Ext = 0; // we are not chaining
}
- } else BUG();
+ }
- cp->Header.SGList = (__u8) nsegs; /* no. SGs contig in this cmd */
- cp->Header.SGTotal = (__u16) nsegs; /* total sgs in this cmd list */
+ cp->Header.SGList = (__u8) use_sg; /* no. SGs contig in this cmd */
+ cp->Header.SGTotal = (__u16) use_sg; /* total sgs in this cmd list */
return;
}
diff --git a/drivers/ieee1394/sbp2.c b/drivers/ieee1394/sbp2.c
index e0c385a3b45..e882cb951b4 100644
--- a/drivers/ieee1394/sbp2.c
+++ b/drivers/ieee1394/sbp2.c
@@ -1509,69 +1509,6 @@ static void sbp2_prep_command_orb_sg(struct sbp2_command_orb *orb,
}
}
-static void sbp2_prep_command_orb_no_sg(struct sbp2_command_orb *orb,
- struct sbp2_fwhost_info *hi,
- struct sbp2_command_info *cmd,
- struct scatterlist *sgpnt,
- u32 orb_direction,
- unsigned int scsi_request_bufflen,
- void *scsi_request_buffer,
- enum dma_data_direction dma_dir)
-{
- cmd->dma_dir = dma_dir;
- cmd->dma_size = scsi_request_bufflen;
- cmd->dma_type = CMD_DMA_SINGLE;
- cmd->cmd_dma = dma_map_single(hi->host->device.parent,
- scsi_request_buffer,
- cmd->dma_size, cmd->dma_dir);
- orb->data_descriptor_hi = ORB_SET_NODE_ID(hi->host->node_id);
- orb->misc |= ORB_SET_DIRECTION(orb_direction);
-
- /* handle case where we get a command w/o s/g enabled
- * (but check for transfers larger than 64K) */
- if (scsi_request_bufflen <= SBP2_MAX_SG_ELEMENT_LENGTH) {
-
- orb->data_descriptor_lo = cmd->cmd_dma;
- orb->misc |= ORB_SET_DATA_SIZE(scsi_request_bufflen);
-
- } else {
- /* The buffer is too large. Turn this into page tables. */
-
- struct sbp2_unrestricted_page_table *sg_element =
- &cmd->scatter_gather_element[0];
- u32 sg_count, sg_len;
- dma_addr_t sg_addr;
-
- orb->data_descriptor_lo = cmd->sge_dma;
- orb->misc |= ORB_SET_PAGE_TABLE_PRESENT(0x1);
-
- /* fill out our SBP-2 page tables; split up the large buffer */
- sg_count = 0;
- sg_len = scsi_request_bufflen;
- sg_addr = cmd->cmd_dma;
- while (sg_len) {
- sg_element[sg_count].segment_base_lo = sg_addr;
- if (sg_len > SBP2_MAX_SG_ELEMENT_LENGTH) {
- sg_element[sg_count].length_segment_base_hi =
- PAGE_TABLE_SET_SEGMENT_LENGTH(SBP2_MAX_SG_ELEMENT_LENGTH);
- sg_addr += SBP2_MAX_SG_ELEMENT_LENGTH;
- sg_len -= SBP2_MAX_SG_ELEMENT_LENGTH;
- } else {
- sg_element[sg_count].length_segment_base_hi =
- PAGE_TABLE_SET_SEGMENT_LENGTH(sg_len);
- sg_len = 0;
- }
- sg_count++;
- }
-
- orb->misc |= ORB_SET_DATA_SIZE(sg_count);
-
- sbp2util_cpu_to_be32_buffer(sg_element,
- (sizeof(struct sbp2_unrestricted_page_table)) *
- sg_count);
- }
-}
-
static void sbp2_create_command_orb(struct sbp2_lu *lu,
struct sbp2_command_info *cmd,
unchar *scsi_cmd,
@@ -1615,13 +1552,9 @@ static void sbp2_create_command_orb(struct sbp2_lu *lu,
orb->data_descriptor_hi = 0x0;
orb->data_descriptor_lo = 0x0;
orb->misc |= ORB_SET_DIRECTION(1);
- } else if (scsi_use_sg)
+ } else
sbp2_prep_command_orb_sg(orb, hi, cmd, scsi_use_sg, sgpnt,
orb_direction, dma_dir);
- else
- sbp2_prep_command_orb_no_sg(orb, hi, cmd, sgpnt, orb_direction,
- scsi_request_bufflen,
- scsi_request_buffer, dma_dir);
sbp2util_cpu_to_be32_buffer(orb, sizeof(*orb));
@@ -1710,15 +1643,15 @@ static int sbp2_send_command(struct sbp2_lu *lu, struct scsi_cmnd *SCpnt,
void (*done)(struct scsi_cmnd *))
{
unchar *scsi_cmd = (unchar *)SCpnt->cmnd;
- unsigned int request_bufflen = SCpnt->request_bufflen;
+ unsigned int request_bufflen = scsi_bufflen(SCpnt);
struct sbp2_command_info *cmd;
cmd = sbp2util_allocate_command_orb(lu, SCpnt, done);
if (!cmd)
return -EIO;
- sbp2_create_command_orb(lu, cmd, scsi_cmd, SCpnt->use_sg,
- request_bufflen, SCpnt->request_buffer,
+ sbp2_create_command_orb(lu, cmd, scsi_cmd, scsi_sg_count(SCpnt),
+ request_bufflen, scsi_sglist(SCpnt),
SCpnt->sc_data_direction);
sbp2_link_orb_command(lu, cmd);
diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.c b/drivers/infiniband/ulp/iser/iscsi_iser.c
index dd221eda3ea..effdee299b0 100644
--- a/drivers/infiniband/ulp/iser/iscsi_iser.c
+++ b/drivers/infiniband/ulp/iser/iscsi_iser.c
@@ -134,19 +134,9 @@ iscsi_iser_cmd_init(struct iscsi_cmd_task *ctask)
{
struct iscsi_iser_conn *iser_conn = ctask->conn->dd_data;
struct iscsi_iser_cmd_task *iser_ctask = ctask->dd_data;
- struct scsi_cmnd *sc = ctask->sc;
iser_ctask->command_sent = 0;
iser_ctask->iser_conn = iser_conn;
-
- if (sc->sc_data_direction == DMA_TO_DEVICE) {
- BUG_ON(ctask->total_length == 0);
-
- debug_scsi("cmd [itt %x total %d imm %d unsol_data %d\n",
- ctask->itt, ctask->total_length, ctask->imm_count,
- ctask->unsol_count);
- }
-
iser_ctask_rdma_init(iser_ctask);
}
@@ -219,6 +209,14 @@ iscsi_iser_ctask_xmit(struct iscsi_conn *conn,
struct iscsi_iser_cmd_task *iser_ctask = ctask->dd_data;
int error = 0;
+ if (ctask->sc->sc_data_direction == DMA_TO_DEVICE) {
+ BUG_ON(scsi_bufflen(ctask->sc) == 0);
+
+ debug_scsi("cmd [itt %x total %d imm %d unsol_data %d\n",
+ ctask->itt, scsi_bufflen(ctask->sc),
+ ctask->imm_count, ctask->unsol_count);
+ }
+
debug_scsi("ctask deq [cid %d itt 0x%x]\n",
conn->id, ctask->itt);
@@ -375,7 +373,8 @@ static struct iscsi_transport iscsi_iser_transport;
static struct iscsi_cls_session *
iscsi_iser_session_create(struct iscsi_transport *iscsit,
struct scsi_transport_template *scsit,
- uint32_t initial_cmdsn, uint32_t *hostno)
+ uint16_t cmds_max, uint16_t qdepth,
+ uint32_t initial_cmdsn, uint32_t *hostno)
{
struct iscsi_cls_session *cls_session;
struct iscsi_session *session;
@@ -386,7 +385,13 @@ iscsi_iser_session_create(struct iscsi_transport *iscsit,
struct iscsi_iser_cmd_task *iser_ctask;
struct iser_desc *desc;
+ /*
+ * we do not support setting can_queue cmd_per_lun from userspace yet
+ * because we preallocate so many resources
+ */
cls_session = iscsi_session_setup(iscsit, scsit,
+ ISCSI_DEF_XMIT_CMDS_MAX,
+ ISCSI_MAX_CMD_PER_LUN,
sizeof(struct iscsi_iser_cmd_task),
sizeof(struct iser_desc),
initial_cmdsn, &hn);
@@ -545,7 +550,7 @@ iscsi_iser_ep_disconnect(__u64 ep_handle)
static struct scsi_host_template iscsi_iser_sht = {
.name = "iSCSI Initiator over iSER, v." DRV_VER,
.queuecommand = iscsi_queuecommand,
- .can_queue = ISCSI_XMIT_CMDS_MAX - 1,
+ .can_queue = ISCSI_DEF_XMIT_CMDS_MAX - 1,
.sg_tablesize = ISCSI_ISER_SG_TABLESIZE,
.max_sectors = 1024,
.cmd_per_lun = ISCSI_MAX_CMD_PER_LUN,
@@ -574,8 +579,12 @@ static struct iscsi_transport iscsi_iser_transport = {
ISCSI_EXP_STATSN |
ISCSI_PERSISTENT_PORT |
ISCSI_PERSISTENT_ADDRESS |
- ISCSI_TARGET_NAME |
- ISCSI_TPGT,
+ ISCSI_TARGET_NAME | ISCSI_TPGT |
+ ISCSI_USERNAME | ISCSI_PASSWORD |
+ ISCSI_USERNAME_IN | ISCSI_PASSWORD_IN,
+ .host_param_mask = ISCSI_HOST_HWADDRESS |
+ ISCSI_HOST_NETDEV_NAME |
+ ISCSI_HOST_INITIATOR_NAME,
.host_template = &iscsi_iser_sht,
.conndata_size = sizeof(struct iscsi_conn),
.max_lun = ISCSI_ISER_MAX_LUN,
@@ -592,6 +601,9 @@ static struct iscsi_transport iscsi_iser_transport = {
.get_session_param = iscsi_session_get_param,
.start_conn = iscsi_iser_conn_start,
.stop_conn = iscsi_conn_stop,
+ /* iscsi host params */
+ .get_host_param = iscsi_host_get_param,
+ .set_host_param = iscsi_host_set_param,
/* IO */
.send_pdu = iscsi_conn_send_pdu,
.get_stats = iscsi_iser_conn_get_stats,
diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.h b/drivers/infiniband/ulp/iser/iscsi_iser.h
index 8960196ffb0..e2353701e8b 100644
--- a/drivers/infiniband/ulp/iser/iscsi_iser.h
+++ b/drivers/infiniband/ulp/iser/iscsi_iser.h
@@ -98,7 +98,7 @@
#define ISER_MAX_TX_MISC_PDUS 6 /* NOOP_OUT(2), TEXT(1), *
* SCSI_TMFUNC(2), LOGOUT(1) */
-#define ISER_QP_MAX_RECV_DTOS (ISCSI_XMIT_CMDS_MAX + \
+#define ISER_QP_MAX_RECV_DTOS (ISCSI_DEF_XMIT_CMDS_MAX + \
ISER_MAX_RX_MISC_PDUS + \
ISER_MAX_TX_MISC_PDUS)
@@ -110,7 +110,7 @@
#define ISER_INFLIGHT_DATAOUTS 8
-#define ISER_QP_MAX_REQ_DTOS (ISCSI_XMIT_CMDS_MAX * \
+#define ISER_QP_MAX_REQ_DTOS (ISCSI_DEF_XMIT_CMDS_MAX * \
(1 + ISER_INFLIGHT_DATAOUTS) + \
ISER_MAX_TX_MISC_PDUS + \
ISER_MAX_RX_MISC_PDUS)
diff --git a/drivers/infiniband/ulp/iser/iser_initiator.c b/drivers/infiniband/ulp/iser/iser_initiator.c
index 3651072f6c1..9ea5b9aaba7 100644
--- a/drivers/infiniband/ulp/iser/iser_initiator.c
+++ b/drivers/infiniband/ulp/iser/iser_initiator.c
@@ -351,18 +351,12 @@ int iser_send_command(struct iscsi_conn *conn,
else
data_buf = &iser_ctask->data[ISER_DIR_OUT];
- if (sc->use_sg) { /* using a scatter list */
- data_buf->buf = sc->request_buffer;
- data_buf->size = sc->use_sg;
- } else if (sc->request_bufflen) {
- /* using a single buffer - convert it into one entry SG */
- sg_init_one(&data_buf->sg_single,
- sc->request_buffer, sc->request_bufflen);
- data_buf->buf = &data_buf->sg_single;
- data_buf->size = 1;
+ if (scsi_sg_count(sc)) { /* using a scatter list */
+ data_buf->buf = scsi_sglist(sc);
+ data_buf->size = scsi_sg_count(sc);
}
- data_buf->data_len = sc->request_bufflen;
+ data_buf->data_len = scsi_bufflen(sc);
if (hdr->flags & ISCSI_FLAG_CMD_READ) {
err = iser_prepare_read_cmd(ctask, edtl);
diff --git a/drivers/infiniband/ulp/iser/iser_verbs.c b/drivers/infiniband/ulp/iser/iser_verbs.c
index 3702e237555..2044de1164a 100644
--- a/drivers/infiniband/ulp/iser/iser_verbs.c
+++ b/drivers/infiniband/ulp/iser/iser_verbs.c
@@ -155,8 +155,8 @@ static int iser_create_ib_conn_res(struct iser_conn *ib_conn)
params.max_pages_per_fmr = ISCSI_ISER_SG_TABLESIZE + 1;
/* make the pool size twice the max number of SCSI commands *
* the ML is expected to queue, watermark for unmap at 50% */
- params.pool_size = ISCSI_XMIT_CMDS_MAX * 2;
- params.dirty_watermark = ISCSI_XMIT_CMDS_MAX;
+ params.pool_size = ISCSI_DEF_XMIT_CMDS_MAX * 2;
+ params.dirty_watermark = ISCSI_DEF_XMIT_CMDS_MAX;
params.cache = 0;
params.flush_function = NULL;
params.access = (IB_ACCESS_LOCAL_WRITE |
diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c
index 39bf057fbc4..f01ca182f22 100644
--- a/drivers/infiniband/ulp/srp/ib_srp.c
+++ b/drivers/infiniband/ulp/srp/ib_srp.c
@@ -455,10 +455,7 @@ static void srp_unmap_data(struct scsi_cmnd *scmnd,
struct srp_target_port *target,
struct srp_request *req)
{
- struct scatterlist *scat;
- int nents;
-
- if (!scmnd->request_buffer ||
+ if (!scsi_sglist(scmnd) ||
(scmnd->sc_data_direction != DMA_TO_DEVICE &&
scmnd->sc_data_direction != DMA_FROM_DEVICE))
return;
@@ -468,20 +465,8 @@ static void srp_unmap_data(struct scsi_cmnd *scmnd,
req->fmr = NULL;
}
- /*
- * This handling of non-SG commands can be killed when the
- * SCSI midlayer no longer generates non-SG commands.
- */
- if (likely(scmnd->use_sg)) {
- nents = scmnd->use_sg;
- scat = scmnd->request_buffer;
- } else {
- nents = 1;
- scat = &req->fake_sg;
- }
-
- ib_dma_unmap_sg(target->srp_host->dev->dev, scat, nents,
- scmnd->sc_data_direction);
+ ib_dma_unmap_sg(target->srp_host->dev->dev, scsi_sglist(scmnd),
+ scsi_sg_count(scmnd), scmnd->sc_data_direction);
}
static void srp_remove_req(struct srp_target_port *target, struct srp_request *req)
@@ -595,6 +580,7 @@ static int srp_map_fmr(struct srp_target_port *target, struct scatterlist *scat,
int ret;
struct srp_device *dev = target->srp_host->dev;
struct ib_device *ibdev = dev->dev;
+ struct scatterlist *sg;
if (!dev->fmr_pool)
return -ENODEV;
@@ -604,16 +590,16 @@ static int srp_map_fmr(struct srp_target_port *target, struct scatterlist *scat,
return -EINVAL;
len = page_cnt = 0;
- for (i = 0; i < sg_cnt; ++i) {
- unsigned int dma_len = ib_sg_dma_len(ibdev, &scat[i]);
+ scsi_for_each_sg(req->scmnd, sg, sg_cnt, i) {
+ unsigned int dma_len = ib_sg_dma_len(ibdev, sg);
- if (ib_sg_dma_address(ibdev, &scat[i]) & ~dev->fmr_page_mask) {
+ if (ib_sg_dma_address(ibdev, sg) & ~dev->fmr_page_mask) {
if (i > 0)
return -EINVAL;
else
++page_cnt;
}
- if ((ib_sg_dma_address(ibdev, &scat[i]) + dma_len) &
+ if ((ib_sg_dma_address(ibdev, sg) + dma_len) &
~dev->fmr_page_mask) {
if (i < sg_cnt - 1)
return -EINVAL;
@@ -633,12 +619,12 @@ static int srp_map_fmr(struct srp_target_port *target, struct scatterlist *scat,
return -ENOMEM;
page_cnt = 0;
- for (i = 0; i < sg_cnt; ++i) {
- unsigned int dma_len = ib_sg_dma_len(ibdev, &scat[i]);
+ scsi_for_each_sg(req->scmnd, sg, sg_cnt, i) {
+ unsigned int dma_len = ib_sg_dma_len(ibdev, sg);
for (j = 0; j < dma_len; j += dev->fmr_page_size)
dma_pages[page_cnt++] =
- (ib_sg_dma_address(ibdev, &scat[i]) &
+ (ib_sg_dma_address(ibdev, sg) &
dev->fmr_page_mask) + j;
}
@@ -673,7 +659,7 @@ static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_target_port *target,
struct srp_device *dev;
struct ib_device *ibdev;
- if (!scmnd->request_buffer || scmnd->sc_data_direction == DMA_NONE)
+ if (!scsi_sglist(scmnd) || scmnd->sc_data_direction == DMA_NONE)
return sizeof (struct srp_cmd);
if (scmnd->sc_data_direction != DMA_FROM_DEVICE &&
@@ -683,18 +669,8 @@ static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_target_port *target,
return -EINVAL;
}
- /*
- * This handling of non-SG commands can be killed when the
- * SCSI midlayer no longer generates non-SG commands.
- */
- if (likely(scmnd->use_sg)) {
- nents = scmnd->use_sg;
- scat = scmnd->request_buffer;
- } else {
- nents = 1;
- scat = &req->fake_sg;
- sg_init_one(scat, scmnd->request_buffer, scmnd->request_bufflen);
- }
+ nents = scsi_sg_count(scmnd);
+ scat = scsi_sglist(scmnd);
dev = target->srp_host->dev;
ibdev = dev->dev;
@@ -724,6 +700,7 @@ static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_target_port *target,
* descriptor.
*/
struct srp_indirect_buf *buf = (void *) cmd->add_data;
+ struct scatterlist *sg;
u32 datalen = 0;
int i;
@@ -732,11 +709,11 @@ static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_target_port *target,
sizeof (struct srp_indirect_buf) +
count * sizeof (struct srp_direct_buf);
- for (i = 0; i < count; ++i) {
- unsigned int dma_len = ib_sg_dma_len(ibdev, &scat[i]);
+ scsi_for_each_sg(scmnd, sg, count, i) {
+ unsigned int dma_len = ib_sg_dma_len(ibdev, sg);
buf->desc_list[i].va =
- cpu_to_be64(ib_sg_dma_address(ibdev, &scat[i]));
+ cpu_to_be64(ib_sg_dma_address(ibdev, sg));
buf->desc_list[i].key =
cpu_to_be32(dev->mr->rkey);
buf->desc_list[i].len = cpu_to_be32(dma_len);
@@ -802,9 +779,9 @@ static void srp_process_rsp(struct srp_target_port *target, struct srp_rsp *rsp)
}
if (rsp->flags & (SRP_RSP_FLAG_DOOVER | SRP_RSP_FLAG_DOUNDER))
- scmnd->resid = be32_to_cpu(rsp->data_out_res_cnt);
+ scsi_set_resid(scmnd, be32_to_cpu(rsp->data_out_res_cnt));
else if (rsp->flags & (SRP_RSP_FLAG_DIOVER | SRP_RSP_FLAG_DIUNDER))
- scmnd->resid = be32_to_cpu(rsp->data_in_res_cnt);
+ scsi_set_resid(scmnd, be32_to_cpu(rsp->data_in_res_cnt));
if (!req->tsk_mgmt) {
scmnd->host_scribble = (void *) -1L;
diff --git a/drivers/infiniband/ulp/srp/ib_srp.h b/drivers/infiniband/ulp/srp/ib_srp.h
index 1d53c7bc368..e3573e7038c 100644
--- a/drivers/infiniband/ulp/srp/ib_srp.h
+++ b/drivers/infiniband/ulp/srp/ib_srp.h
@@ -106,11 +106,6 @@ struct srp_request {
struct srp_iu *cmd;
struct srp_iu *tsk_mgmt;
struct ib_pool_fmr *fmr;
- /*
- * Fake scatterlist used when scmnd->use_sg==0. Can be killed
- * when the SCSI midlayer no longer generates non-SG commands.
- */
- struct scatterlist fake_sg;
struct completion done;
short index;
u8 cmd_done;
diff --git a/drivers/message/fusion/linux_compat.h b/drivers/message/fusion/linux_compat.h
deleted file mode 100644
index bb2bf5aa0b6..00000000000
--- a/drivers/message/fusion/linux_compat.h
+++ /dev/null
@@ -1,9 +0,0 @@
-/* drivers/message/fusion/linux_compat.h */
-
-#ifndef FUSION_LINUX_COMPAT_H
-#define FUSION_LINUX_COMPAT_H
-
-#include <linux/version.h>
-#include <scsi/scsi_device.h>
-
-#endif /* _LINUX_COMPAT_H */
diff --git a/drivers/message/fusion/lsi/mpi.h b/drivers/message/fusion/lsi/mpi.h
index 75223bf24ae..6a92e3d118f 100644
--- a/drivers/message/fusion/lsi/mpi.h
+++ b/drivers/message/fusion/lsi/mpi.h
@@ -1,12 +1,12 @@
/*
- * Copyright (c) 2000-2006 LSI Logic Corporation.
+ * Copyright (c) 2000-2007 LSI Logic Corporation.
*
*
* Name: mpi.h
* Title: MPI Message independent structures and definitions
* Creation Date: July 27, 2000
*
- * mpi.h Version: 01.05.12
+ * mpi.h Version: 01.05.13
*
* Version History
* ---------------
@@ -78,6 +78,7 @@
* 08-30-05 01.05.10 Added 2 new IOCStatus codes for Target.
* 03-27-06 01.05.11 Bumped MPI_HEADER_VERSION_UNIT.
* 10-11-06 01.05.12 Bumped MPI_HEADER_VERSION_UNIT.
+ * 05-24-07 01.05.13 Bumped MPI_HEADER_VERSION_UNIT.
* --------------------------------------------------------------------------
*/
@@ -108,7 +109,7 @@
/* Note: The major versions of 0xe0 through 0xff are reserved */
/* versioning for this MPI header set */
-#define MPI_HEADER_VERSION_UNIT (0x0E)
+#define MPI_HEADER_VERSION_UNIT (0x10)
#define MPI_HEADER_VERSION_DEV (0x00)
#define MPI_HEADER_VERSION_UNIT_MASK (0xFF00)
#define MPI_HEADER_VERSION_UNIT_SHIFT (8)
diff --git a/drivers/message/fusion/lsi/mpi_cnfg.h b/drivers/message/fusion/lsi/mpi_cnfg.h
index 0e4c8e77a81..eda769730e3 100644
--- a/drivers/message/fusion/lsi/mpi_cnfg.h
+++ b/drivers/message/fusion/lsi/mpi_cnfg.h
@@ -1,12 +1,12 @@
/*
- * Copyright (c) 2000-2006 LSI Logic Corporation.
+ * Copyright (c) 2000-2007 LSI Logic Corporation.
*
*
* Name: mpi_cnfg.h
* Title: MPI Config message, structures, and Pages
* Creation Date: July 27, 2000
*
- * mpi_cnfg.h Version: 01.05.13
+ * mpi_cnfg.h Version: 01.05.15
*
* Version History
* ---------------
@@ -293,6 +293,21 @@
* Added more AccessStatus values for SAS Device Page 0.
* Added bit for SATA Asynchronous Notification Support in
* Flags field of SAS Device Page 0.
+ * 02-28-07 01.05.14 Added ExtFlags field to Manufacturing Page 4.
+ * Added Disable SMART Polling for CapabilitiesFlags of
+ * IOC Page 6.
+ * Added Disable SMART Polling to DeviceSettings of BIOS
+ * Page 1.
+ * Added Multi-Port Domain bit for DiscoveryStatus field
+ * of SAS IO Unit Page.
+ * Added Multi-Port Domain Illegal flag for SAS IO Unit
+ * Page 1 AdditionalControlFlags field.
+ * 05-24-07 01.05.15 Added Hide Physical Disks with Non-Integrated RAID
+ * Metadata bit to Manufacturing Page 4 ExtFlags field.
+ * Added Internal Connector to End Device Present bit to
+ * Expander Page 0 Flags field.
+ * Fixed define for
+ * MPI_SAS_EXPANDER1_DISCINFO_BAD_PHY_DISABLED.
* --------------------------------------------------------------------------
*/
@@ -639,7 +654,7 @@ typedef struct _CONFIG_PAGE_MANUFACTURING_4
U8 InfoSize1; /* 0Bh */
U8 InquirySize; /* 0Ch */
U8 Flags; /* 0Dh */
- U16 Reserved2; /* 0Eh */
+ U16 ExtFlags; /* 0Eh */
U8 InquiryData[56]; /* 10h */
U32 ISVolumeSettings; /* 48h */
U32 IMEVolumeSettings; /* 4Ch */
@@ -658,7 +673,7 @@ typedef struct _CONFIG_PAGE_MANUFACTURING_4
} CONFIG_PAGE_MANUFACTURING_4, MPI_POINTER PTR_CONFIG_PAGE_MANUFACTURING_4,
ManufacturingPage4_t, MPI_POINTER pManufacturingPage4_t;
-#define MPI_MANUFACTURING4_PAGEVERSION (0x04)
+#define MPI_MANUFACTURING4_PAGEVERSION (0x05)
/* defines for the Flags field */
#define MPI_MANPAGE4_FORCE_BAD_BLOCK_TABLE (0x80)
@@ -670,6 +685,12 @@ typedef struct _CONFIG_PAGE_MANUFACTURING_4
#define MPI_MANPAGE4_IM_RESYNC_CACHE_ENABLE (0x02)
#define MPI_MANPAGE4_IR_NO_MIX_SAS_SATA (0x01)
+/* defines for the ExtFlags field */
+#define MPI_MANPAGE4_EXTFLAGS_HIDE_NON_IR_METADATA (0x0008)
+#define MPI_MANPAGE4_EXTFLAGS_SAS_CACHE_DISABLE (0x0004)
+#define MPI_MANPAGE4_EXTFLAGS_SATA_CACHE_DISABLE (0x0002)
+#define MPI_MANPAGE4_EXTFLAGS_LEGACY_MODE (0x0001)
+
#ifndef MPI_MANPAGE5_NUM_FORCEWWID
#define MPI_MANPAGE5_NUM_FORCEWWID (1)
@@ -781,7 +802,7 @@ typedef struct _CONFIG_PAGE_MANUFACTURING_9
} CONFIG_PAGE_MANUFACTURING_9, MPI_POINTER PTR_CONFIG_PAGE_MANUFACTURING_9,
ManufacturingPage9_t, MPI_POINTER pManufacturingPage9_t;
-#define MPI_MANUFACTURING6_PAGEVERSION (0x00)
+#define MPI_MANUFACTURING9_PAGEVERSION (0x00)
typedef struct _CONFIG_PAGE_MANUFACTURING_10
@@ -1138,6 +1159,8 @@ typedef struct _CONFIG_PAGE_IOC_6
/* IOC Page 6 Capabilities Flags */
+#define MPI_IOCPAGE6_CAP_FLAGS_DISABLE_SMART_POLLING (0x00000008)
+
#define MPI_IOCPAGE6_CAP_FLAGS_MASK_METADATA_SIZE (0x00000006)
#define MPI_IOCPAGE6_CAP_FLAGS_64MB_METADATA_SIZE (0x00000000)
#define MPI_IOCPAGE6_CAP_FLAGS_512MB_METADATA_SIZE (0x00000002)
@@ -1208,6 +1231,7 @@ typedef struct _CONFIG_PAGE_BIOS_1
#define MPI_BIOSPAGE1_IOCSET_ALTERNATE_CHS (0x00000008)
/* values for the DeviceSettings field */
+#define MPI_BIOSPAGE1_DEVSET_DISABLE_SMART_POLLING (0x00000010)
#define MPI_BIOSPAGE1_DEVSET_DISABLE_SEQ_LUN (0x00000008)
#define MPI_BIOSPAGE1_DEVSET_DISABLE_RM_LUN (0x00000004)
#define MPI_BIOSPAGE1_DEVSET_DISABLE_NON_RM_LUN (0x00000002)
@@ -2281,11 +2305,11 @@ typedef struct _CONFIG_PAGE_RAID_VOL_0
typedef struct _CONFIG_PAGE_RAID_VOL_1
{
CONFIG_PAGE_HEADER Header; /* 00h */
- U8 VolumeID; /* 01h */
- U8 VolumeBus; /* 02h */
- U8 VolumeIOC; /* 03h */
- U8 Reserved0; /* 04h */
- U8 GUID[24]; /* 05h */
+ U8 VolumeID; /* 04h */
+ U8 VolumeBus; /* 05h */
+ U8 VolumeIOC; /* 06h */
+ U8 Reserved0; /* 07h */
+ U8 GUID[24]; /* 08h */
U8 Name[32]; /* 20h */
U64 WWID; /* 40h */
U32 Reserved1; /* 48h */
@@ -2340,7 +2364,7 @@ typedef struct _RAID_PHYS_DISK0_STATUS
} RAID_PHYS_DISK0_STATUS, MPI_POINTER PTR_RAID_PHYS_DISK0_STATUS,
RaidPhysDiskStatus_t, MPI_POINTER pRaidPhysDiskStatus_t;
-/* RAID Volume 2 IM Physical Disk DiskStatus flags */
+/* RAID Physical Disk PhysDiskStatus flags */
#define MPI_PHYSDISK0_STATUS_FLAG_OUT_OF_SYNC (0x01)
#define MPI_PHYSDISK0_STATUS_FLAG_QUIESCED (0x02)
@@ -2544,6 +2568,7 @@ typedef struct _CONFIG_PAGE_SAS_IO_UNIT_0
#define MPI_SAS_IOUNIT0_DS_TABLE_LINK (0x00000400)
#define MPI_SAS_IOUNIT0_DS_UNSUPPORTED_DEVICE (0x00000800)
#define MPI_SAS_IOUNIT0_DS_MAX_SATA_TARGETS (0x00001000)
+#define MPI_SAS_IOUNIT0_DS_MULTI_PORT_DOMAIN (0x00002000)
typedef struct _MPI_SAS_IO_UNIT1_PHY_DATA
@@ -2607,6 +2632,7 @@ typedef struct _CONFIG_PAGE_SAS_IO_UNIT_1
#define MPI_SAS_IOUNIT1_CONTROL_CLEAR_AFFILIATION (0x0001)
/* values for SAS IO Unit Page 1 AdditionalControlFlags */
+#define MPI_SAS_IOUNIT1_ACONTROL_MULTI_PORT_DOMAIN_ILLEGAL (0x0080)
#define MPI_SAS_IOUNIT1_ACONTROL_SATA_ASYNCHROUNOUS_NOTIFICATION (0x0040)
#define MPI_SAS_IOUNIT1_ACONTROL_HIDE_NONZERO_ATTACHED_PHY_IDENT (0x0020)
#define MPI_SAS_IOUNIT1_ACONTROL_PORT_ENABLE_ONLY_SATA_LINK_RESET (0x0010)
@@ -2734,6 +2760,7 @@ typedef struct _CONFIG_PAGE_SAS_EXPANDER_0
#define MPI_SAS_EXPANDER0_DS_UNSUPPORTED_DEVICE (0x00000800)
/* values for SAS Expander Page 0 Flags field */
+#define MPI_SAS_EXPANDER0_FLAGS_CONNECTOR_END_DEVICE (0x04)
#define MPI_SAS_EXPANDER0_FLAGS_ROUTE_TABLE_CONFIG (0x02)
#define MPI_SAS_EXPANDER0_FLAGS_CONFIG_IN_PROGRESS (0x01)
@@ -2774,7 +2801,7 @@ typedef struct _CONFIG_PAGE_SAS_EXPANDER_1
/* see mpi_sas.h for values for SAS Expander Page 1 AttachedDeviceInfo values */
/* values for SAS Expander Page 1 DiscoveryInfo field */
-#define MPI_SAS_EXPANDER1_DISCINFO_BAD_PHY DISABLED (0x04)
+#define MPI_SAS_EXPANDER1_DISCINFO_BAD_PHY_DISABLED (0x04)
#define MPI_SAS_EXPANDER1_DISCINFO_LINK_STATUS_CHANGE (0x02)
#define MPI_SAS_EXPANDER1_DISCINFO_NO_ROUTING_ENTRIES (0x01)
@@ -2895,11 +2922,11 @@ typedef struct _CONFIG_PAGE_SAS_PHY_0
U8 AttachedPhyIdentifier; /* 16h */
U8 Reserved2; /* 17h */
U32 AttachedDeviceInfo; /* 18h */
- U8 ProgrammedLinkRate; /* 20h */
- U8 HwLinkRate; /* 21h */
- U8 ChangeCount; /* 22h */
- U8 Flags; /* 23h */
- U32 PhyInfo; /* 24h */
+ U8 ProgrammedLinkRate; /* 1Ch */
+ U8 HwLinkRate; /* 1Dh */
+ U8 ChangeCount; /* 1Eh */
+ U8 Flags; /* 1Fh */
+ U32 PhyInfo; /* 20h */
} CONFIG_PAGE_SAS_PHY_0, MPI_POINTER PTR_CONFIG_PAGE_SAS_PHY_0,
SasPhyPage0_t, MPI_POINTER pSasPhyPage0_t;
diff --git a/drivers/message/fusion/lsi/mpi_history.txt b/drivers/message/fusion/lsi/mpi_history.txt
index ddc7ae029dd..a1f479057ea 100644
--- a/drivers/message/fusion/lsi/mpi_history.txt
+++ b/drivers/message/fusion/lsi/mpi_history.txt
@@ -3,28 +3,28 @@
MPI Header File Change History
==============================
- Copyright (c) 2000-2006 LSI Logic Corporation.
+ Copyright (c) 2000-2007 LSI Logic Corporation.
---------------------------------------
- Header Set Release Version: 01.05.14
- Header Set Release Date: 10-11-06
+ Header Set Release Version: 01.05.16
+ Header Set Release Date: 05-24-07
---------------------------------------
Filename Current version Prior version
---------- --------------- -------------
- mpi.h 01.05.12 01.05.11
- mpi_ioc.h 01.05.12 01.05.11
- mpi_cnfg.h 01.05.13 01.05.12
- mpi_init.h 01.05.08 01.05.07
+ mpi.h 01.05.13 01.05.12
+ mpi_ioc.h 01.05.14 01.05.13
+ mpi_cnfg.h 01.05.15 01.05.14
+ mpi_init.h 01.05.09 01.05.09
mpi_targ.h 01.05.06 01.05.06
mpi_fc.h 01.05.01 01.05.01
mpi_lan.h 01.05.01 01.05.01
- mpi_raid.h 01.05.02 01.05.02
+ mpi_raid.h 01.05.03 01.05.03
mpi_tool.h 01.05.03 01.05.03
mpi_inb.h 01.05.01 01.05.01
- mpi_sas.h 01.05.04 01.05.03
+ mpi_sas.h 01.05.04 01.05.04
mpi_type.h 01.05.02 01.05.02
- mpi_history.txt 01.05.14 01.05.13
+ mpi_history.txt 01.05.14 01.05.14
* Date Version Description
@@ -95,6 +95,7 @@ mpi.h
* 08-30-05 01.05.10 Added 2 new IOCStatus codes for Target.
* 03-27-06 01.05.11 Bumped MPI_HEADER_VERSION_UNIT.
* 10-11-06 01.05.12 Bumped MPI_HEADER_VERSION_UNIT.
+ * 05-24-07 01.05.13 Bumped MPI_HEADER_VERSION_UNIT.
* --------------------------------------------------------------------------
mpi_ioc.h
@@ -191,6 +192,13 @@ mpi_ioc.h
* data structure.
* Added new ImageType values for FWDownload and FWUpload
* requests.
+ * 02-28-07 01.05.13 Added MPI_EVENT_PRIMITIVE_ASYNCHRONOUS_EVENT for SAS
+ * Broadcast Event Data (replacing _RESERVED2).
+ * For Discovery Error Event Data DiscoveryStatus field,
+ * replaced _MULTPL_PATHS with _UNSUPPORTED_DEVICE and
+ * added _MULTI_PORT_DOMAIN.
+ * 05-24-07 01.05.14 Added Common Boot Block type to FWDownload Request.
+ * Added Common Boot Block type to FWUpload Request.
* --------------------------------------------------------------------------
mpi_cnfg.h
@@ -473,6 +481,21 @@ mpi_cnfg.h
* Added more AccessStatus values for SAS Device Page 0.
* Added bit for SATA Asynchronous Notification Support in
* Flags field of SAS Device Page 0.
+ * 02-28-07 01.05.14 Added ExtFlags field to Manufacturing Page 4.
+ * Added Disable SMART Polling for CapabilitiesFlags of
+ * IOC Page 6.
+ * Added Disable SMART Polling to DeviceSettings of BIOS
+ * Page 1.
+ * Added Multi-Port Domain bit for DiscoveryStatus field
+ * of SAS IO Unit Page.
+ * Added Multi-Port Domain Illegal flag for SAS IO Unit
+ * Page 1 AdditionalControlFlags field.
+ * 05-24-07 01.05.15 Added Hide Physical Disks with Non-Integrated RAID
+ * Metadata bit to Manufacturing Page 4 ExtFlags field.
+ * Added Internal Connector to End Device Present bit to
+ * Expander Page 0 Flags field.
+ * Fixed define for
+ * MPI_SAS_EXPANDER1_DISCINFO_BAD_PHY_DISABLED.
* --------------------------------------------------------------------------
mpi_init.h
@@ -517,6 +540,8 @@ mpi_init.h
* unique in the first 32 characters.
* 03-27-06 01.05.07 Added Task Management type of Clear ACA.
* 10-11-06 01.05.08 Shortened define for Task Management type of Clear ACA.
+ * 02-28-07 01.05.09 Defined two new MsgFlags bits for SCSI Task Management
+ * Request: Do Not Send Task IU and Soft Reset Option.
* --------------------------------------------------------------------------
mpi_targ.h
@@ -571,7 +596,7 @@ mpi_fc.h
* 11-02-00 01.01.01 Original release for post 1.0 work
* 12-04-00 01.01.02 Added messages for Common Transport Send and
* Primitive Send.
- * 01-09-01 01.01.03 Modified some of the new flags to have an MPI prefix
+ * 01-09-01 01.01.03 Modifed some of the new flags to have an MPI prefix
* and modified the FcPrimitiveSend flags.
* 01-25-01 01.01.04 Move InitiatorIndex in LinkServiceRsp reply to a larger
* field.
@@ -634,6 +659,8 @@ mpi_raid.h
* 08-19-04 01.05.01 Original release for MPI v1.5.
* 01-15-05 01.05.02 Added defines for the two new RAID Actions for
* _SET_RESYNC_RATE and _SET_DATA_SCRUB_RATE.
+ * 02-28-07 01.05.03 Added new RAID Action, Device FW Update Mode, and
+ * associated defines.
* --------------------------------------------------------------------------
mpi_tool.h
@@ -682,7 +709,22 @@ mpi_type.h
mpi_history.txt Parts list history
-Filename 01.05.13 01.05.13 01.05.12 01.05.11 01.05.10 01.05.09
+Filename 01.05.15 01.05.15
+---------- -------- --------
+mpi.h 01.05.12 01.05.13
+mpi_ioc.h 01.05.13 01.05.14
+mpi_cnfg.h 01.05.14 01.05.15
+mpi_init.h 01.05.09 01.05.09
+mpi_targ.h 01.05.06 01.05.06
+mpi_fc.h 01.05.01 01.05.01
+mpi_lan.h 01.05.01 01.05.01
+mpi_raid.h 01.05.03 01.05.03
+mpi_tool.h 01.05.03 01.05.03
+mpi_inb.h 01.05.01 01.05.01
+mpi_sas.h 01.05.04 01.05.04
+mpi_type.h 01.05.02 01.05.02
+
+Filename 01.05.14 01.05.13 01.05.12 01.05.11 01.05.10 01.05.09
---------- -------- -------- -------- -------- -------- --------
mpi.h 01.05.12 01.05.11 01.05.10 01.05.09 01.05.08 01.05.07
mpi_ioc.h 01.05.12 01.05.11 01.05.10 01.05.09 01.05.09 01.05.08
diff --git a/drivers/message/fusion/lsi/mpi_inb.h b/drivers/message/fusion/lsi/mpi_inb.h
deleted file mode 100644
index ff167309ba2..00000000000
--- a/drivers/message/fusion/lsi/mpi_inb.h
+++ /dev/null
@@ -1,221 +0,0 @@
-/*
- * Copyright (c) 2003-2004 LSI Logic Corporation.
- *
- *
- * Name: mpi_inb.h
- * Title: MPI Inband structures and definitions
- * Creation Date: September 30, 2003
- *
- * mpi_inb.h Version: 01.05.01
- *
- * Version History
- * ---------------
- *
- * Date Version Description
- * -------- -------- ------------------------------------------------------
- * 05-11-04 01.03.01 Original release.
- * 08-19-04 01.05.01 Original release for MPI v1.5.
- * --------------------------------------------------------------------------
- */
-
-#ifndef MPI_INB_H
-#define MPI_INB_H
-
-/******************************************************************************
-*
-* I n b a n d M e s s a g e s
-*
-*******************************************************************************/
-
-
-/****************************************************************************/
-/* Inband Buffer Post Request */
-/****************************************************************************/
-
-typedef struct _MSG_INBAND_BUFFER_POST_REQUEST
-{
- U8 Reserved1; /* 00h */
- U8 BufferCount; /* 01h */
- U8 ChainOffset; /* 02h */
- U8 Function; /* 03h */
- U16 Reserved2; /* 04h */
- U8 Reserved3; /* 06h */
- U8 MsgFlags; /* 07h */
- U32 MsgContext; /* 08h */
- U32 Reserved4; /* 0Ch */
- SGE_TRANS_SIMPLE_UNION SGL; /* 10h */
-} MSG_INBAND_BUFFER_POST_REQUEST, MPI_POINTER PTR_MSG_INBAND_BUFFER_POST_REQUEST,
- MpiInbandBufferPostRequest_t , MPI_POINTER pMpiInbandBufferPostRequest_t;
-
-
-typedef struct _WWN_FC_FORMAT
-{
- U64 NodeName; /* 00h */
- U64 PortName; /* 08h */
-} WWN_FC_FORMAT, MPI_POINTER PTR_WWN_FC_FORMAT,
- WwnFcFormat_t, MPI_POINTER pWwnFcFormat_t;
-
-typedef struct _WWN_SAS_FORMAT
-{
- U64 WorldWideID; /* 00h */
- U32 Reserved1; /* 08h */
- U32 Reserved2; /* 0Ch */
-} WWN_SAS_FORMAT, MPI_POINTER PTR_WWN_SAS_FORMAT,
- WwnSasFormat_t, MPI_POINTER pWwnSasFormat_t;
-
-typedef union _WWN_INBAND_FORMAT
-{
- WWN_FC_FORMAT Fc;
- WWN_SAS_FORMAT Sas;
-} WWN_INBAND_FORMAT, MPI_POINTER PTR_WWN_INBAND_FORMAT,
- WwnInbandFormat, MPI_POINTER pWwnInbandFormat;
-
-
-/* Inband Buffer Post reply message */
-
-typedef struct _MSG_INBAND_BUFFER_POST_REPLY
-{
- U16 Reserved1; /* 00h */
- U8 MsgLength; /* 02h */
- U8 Function; /* 03h */
- U16 Reserved2; /* 04h */
- U8 Reserved3; /* 06h */
- U8 MsgFlags; /* 07h */
- U32 MsgContext; /* 08h */
- U16 Reserved4; /* 0Ch */
- U16 IOCStatus; /* 0Eh */
- U32 IOCLogInfo; /* 10h */
- U32 TransferLength; /* 14h */
- U32 TransactionContext; /* 18h */
- WWN_INBAND_FORMAT Wwn; /* 1Ch */
- U32 IOCIdentifier[4]; /* 2Ch */
-} MSG_INBAND_BUFFER_POST_REPLY, MPI_POINTER PTR_MSG_INBAND_BUFFER_POST_REPLY,
- MpiInbandBufferPostReply_t, MPI_POINTER pMpiInbandBufferPostReply_t;
-
-
-/****************************************************************************/
-/* Inband Send Request */
-/****************************************************************************/
-
-typedef struct _MSG_INBAND_SEND_REQUEST
-{
- U16 Reserved1; /* 00h */
- U8 ChainOffset; /* 02h */
- U8 Function; /* 03h */
- U16 Reserved2; /* 04h */
- U8 Reserved3; /* 06h */
- U8 MsgFlags; /* 07h */
- U32 MsgContext; /* 08h */
- U32 Reserved4; /* 0Ch */
- WWN_INBAND_FORMAT Wwn; /* 10h */
- U32 Reserved5; /* 20h */
- SGE_IO_UNION SGL; /* 24h */
-} MSG_INBAND_SEND_REQUEST, MPI_POINTER PTR_MSG_INBAND_SEND_REQUEST,
- MpiInbandSendRequest_t , MPI_POINTER pMpiInbandSendRequest_t;
-
-
-/* Inband Send reply message */
-
-typedef struct _MSG_INBAND_SEND_REPLY
-{
- U16 Reserved1; /* 00h */
- U8 MsgLength; /* 02h */
- U8 Function; /* 03h */
- U16 Reserved2; /* 04h */
- U8 Reserved3; /* 06h */
- U8 MsgFlags; /* 07h */
- U32 MsgContext; /* 08h */
- U16 Reserved4; /* 0Ch */
- U16 IOCStatus; /* 0Eh */
- U32 IOCLogInfo; /* 10h */
- U32 ResponseLength; /* 14h */
-} MSG_INBAND_SEND_REPLY, MPI_POINTER PTR_MSG_INBAND_SEND_REPLY,
- MpiInbandSendReply_t, MPI_POINTER pMpiInbandSendReply_t;
-
-
-/****************************************************************************/
-/* Inband Response Request */
-/****************************************************************************/
-
-typedef struct _MSG_INBAND_RSP_REQUEST
-{
- U16 Reserved1; /* 00h */
- U8 ChainOffset; /* 02h */
- U8 Function; /* 03h */
- U16 Reserved2; /* 04h */
- U8 Reserved3; /* 06h */
- U8 MsgFlags; /* 07h */
- U32 MsgContext; /* 08h */
- U32 Reserved4; /* 0Ch */
- WWN_INBAND_FORMAT Wwn; /* 10h */
- U32 IOCIdentifier[4]; /* 20h */
- U32 ResponseLength; /* 30h */
- SGE_IO_UNION SGL; /* 34h */
-} MSG_INBAND_RSP_REQUEST, MPI_POINTER PTR_MSG_INBAND_RSP_REQUEST,
- MpiInbandRspRequest_t , MPI_POINTER pMpiInbandRspRequest_t;
-
-
-/* Inband Response reply message */
-
-typedef struct _MSG_INBAND_RSP_REPLY
-{
- U16 Reserved1; /* 00h */
- U8 MsgLength; /* 02h */
- U8 Function; /* 03h */
- U16 Reserved2; /* 04h */
- U8 Reserved3; /* 06h */
- U8 MsgFlags; /* 07h */
- U32 MsgContext; /* 08h */
- U16 Reserved4; /* 0Ch */
- U16 IOCStatus; /* 0Eh */
- U32 IOCLogInfo; /* 10h */
-} MSG_INBAND_RSP_REPLY, MPI_POINTER PTR_MSG_INBAND_RSP_REPLY,
- MpiInbandRspReply_t, MPI_POINTER pMpiInbandRspReply_t;
-
-
-/****************************************************************************/
-/* Inband Abort Request */
-/****************************************************************************/
-
-typedef struct _MSG_INBAND_ABORT_REQUEST
-{
- U8 Reserved1; /* 00h */
- U8 AbortType; /* 01h */
- U8 ChainOffset; /* 02h */
- U8 Function; /* 03h */
- U16 Reserved2; /* 04h */
- U8 Reserved3; /* 06h */
- U8 MsgFlags; /* 07h */
- U32 MsgContext; /* 08h */
- U32 Reserved4; /* 0Ch */
- U32 ContextToAbort; /* 10h */
-} MSG_INBAND_ABORT_REQUEST, MPI_POINTER PTR_MSG_INBAND_ABORT_REQUEST,
- MpiInbandAbortRequest_t , MPI_POINTER pMpiInbandAbortRequest_t;
-
-#define MPI_INBAND_ABORT_TYPE_ALL_BUFFERS (0x00)
-#define MPI_INBAND_ABORT_TYPE_EXACT_BUFFER (0x01)
-#define MPI_INBAND_ABORT_TYPE_SEND_REQUEST (0x02)
-#define MPI_INBAND_ABORT_TYPE_RESPONSE_REQUEST (0x03)
-
-
-/* Inband Abort reply message */
-
-typedef struct _MSG_INBAND_ABORT_REPLY
-{
- U8 Reserved1; /* 00h */
- U8 AbortType; /* 01h */
- U8 MsgLength; /* 02h */
- U8 Function; /* 03h */
- U16 Reserved2; /* 04h */
- U8 Reserved3; /* 06h */
- U8 MsgFlags; /* 07h */
- U32 MsgContext; /* 08h */
- U16 Reserved4; /* 0Ch */
- U16 IOCStatus; /* 0Eh */
- U32 IOCLogInfo; /* 10h */
-} MSG_INBAND_ABORT_REPLY, MPI_POINTER PTR_MSG_INBAND_ABORT_REPLY,
- MpiInbandAbortReply_t, MPI_POINTER pMpiInbandAbortReply_t;
-
-
-#endif
-
diff --git a/drivers/message/fusion/lsi/mpi_init.h b/drivers/message/fusion/lsi/mpi_init.h
index ec9dff2249a..3a02615f12d 100644
--- a/drivers/message/fusion/lsi/mpi_init.h
+++ b/drivers/message/fusion/lsi/mpi_init.h
@@ -1,12 +1,12 @@
/*
- * Copyright (c) 2000-2006 LSI Logic Corporation.
+ * Copyright (c) 2000-2007 LSI Logic Corporation.
*
*
* Name: mpi_init.h
* Title: MPI initiator mode messages and structures
* Creation Date: June 8, 2000
*
- * mpi_init.h Version: 01.05.08
+ * mpi_init.h Version: 01.05.09
*
* Version History
* ---------------
@@ -54,6 +54,8 @@
* unique in the first 32 characters.
* 03-27-06 01.05.07 Added Task Management type of Clear ACA.
* 10-11-06 01.05.08 Shortened define for Task Management type of Clear ACA.
+ * 02-28-07 01.05.09 Defined two new MsgFlags bits for SCSI Task Management
+ * Request: Do Not Send Task IU and Soft Reset Option.
* --------------------------------------------------------------------------
*/
@@ -432,10 +434,14 @@ typedef struct _MSG_SCSI_TASK_MGMT
#define MPI_SCSITASKMGMT_TASKTYPE_CLR_ACA (0x08)
/* MsgFlags bits */
+#define MPI_SCSITASKMGMT_MSGFLAGS_DO_NOT_SEND_TASK_IU (0x01)
+
#define MPI_SCSITASKMGMT_MSGFLAGS_TARGET_RESET_OPTION (0x00)
#define MPI_SCSITASKMGMT_MSGFLAGS_LIP_RESET_OPTION (0x02)
#define MPI_SCSITASKMGMT_MSGFLAGS_LIPRESET_RESET_OPTION (0x04)
+#define MPI_SCSITASKMGMT_MSGFLAGS_SOFT_RESET_OPTION (0x08)
+
/* SCSI Task Management Reply */
typedef struct _MSG_SCSI_TASK_MGMT_REPLY
{
diff --git a/drivers/message/fusion/lsi/mpi_ioc.h b/drivers/message/fusion/lsi/mpi_ioc.h
index 6c33e335337..b1893d185bc 100644
--- a/drivers/message/fusion/lsi/mpi_ioc.h
+++ b/drivers/message/fusion/lsi/mpi_ioc.h
@@ -1,12 +1,12 @@
/*
- * Copyright (c) 2000-2006 LSI Logic Corporation.
+ * Copyright (c) 2000-2007 LSI Logic Corporation.
*
*
* Name: mpi_ioc.h
* Title: MPI IOC, Port, Event, FW Download, and FW Upload messages
* Creation Date: August 11, 2000
*
- * mpi_ioc.h Version: 01.05.12
+ * mpi_ioc.h Version: 01.05.14
*
* Version History
* ---------------
@@ -106,6 +106,13 @@
* data structure.
* Added new ImageType values for FWDownload and FWUpload
* requests.
+ * 02-28-07 01.05.13 Added MPI_EVENT_PRIMITIVE_ASYNCHRONOUS_EVENT for SAS
+ * Broadcast Event Data (replacing _RESERVED2).
+ * For Discovery Error Event Data DiscoveryStatus field,
+ * replaced _MULTPL_PATHS with _UNSUPPORTED_DEVICE and
+ * added _MULTI_PORT_DOMAIN.
+ * 05-24-07 01.05.14 Added Common Boot Block type to FWDownload Request.
+ * Added Common Boot Block type to FWUpload Request.
* --------------------------------------------------------------------------
*/
@@ -792,7 +799,7 @@ typedef struct _EVENT_DATA_SAS_BROADCAST_PRIMITIVE
#define MPI_EVENT_PRIMITIVE_CHANGE (0x01)
#define MPI_EVENT_PRIMITIVE_EXPANDER (0x03)
-#define MPI_EVENT_PRIMITIVE_RESERVED2 (0x04)
+#define MPI_EVENT_PRIMITIVE_ASYNCHRONOUS_EVENT (0x04)
#define MPI_EVENT_PRIMITIVE_RESERVED3 (0x05)
#define MPI_EVENT_PRIMITIVE_RESERVED4 (0x06)
#define MPI_EVENT_PRIMITIVE_CHANGE0_RESERVED (0x07)
@@ -857,8 +864,9 @@ typedef struct _EVENT_DATA_DISCOVERY_ERROR
#define MPI_EVENT_DSCVRY_ERR_DS_SMP_CRC_ERROR (0x00000100)
#define MPI_EVENT_DSCVRY_ERR_DS_MULTPL_SUBTRACTIVE (0x00000200)
#define MPI_EVENT_DSCVRY_ERR_DS_TABLE_TO_TABLE (0x00000400)
-#define MPI_EVENT_DSCVRY_ERR_DS_MULTPL_PATHS (0x00000800)
+#define MPI_EVENT_DSCVRY_ERR_DS_UNSUPPORTED_DEVICE (0x00000800)
#define MPI_EVENT_DSCVRY_ERR_DS_MAX_SATA_TARGETS (0x00001000)
+#define MPI_EVENT_DSCVRY_ERR_DS_MULTI_PORT_DOMAIN (0x00002000)
/* SAS SMP Error Event data */
@@ -990,6 +998,7 @@ typedef struct _MSG_FW_DOWNLOAD
#define MPI_FW_DOWNLOAD_ITYPE_CONFIG_1 (0x07)
#define MPI_FW_DOWNLOAD_ITYPE_CONFIG_2 (0x08)
#define MPI_FW_DOWNLOAD_ITYPE_MEGARAID (0x09)
+#define MPI_FW_DOWNLOAD_ITYPE_COMMON_BOOT_BLOCK (0x0B)
typedef struct _FWDownloadTCSGE
@@ -1038,17 +1047,18 @@ typedef struct _MSG_FW_UPLOAD
} MSG_FW_UPLOAD, MPI_POINTER PTR_MSG_FW_UPLOAD,
FWUpload_t, MPI_POINTER pFWUpload_t;
-#define MPI_FW_UPLOAD_ITYPE_FW_IOC_MEM (0x00)
-#define MPI_FW_UPLOAD_ITYPE_FW_FLASH (0x01)
-#define MPI_FW_UPLOAD_ITYPE_BIOS_FLASH (0x02)
-#define MPI_FW_UPLOAD_ITYPE_NVDATA (0x03)
-#define MPI_FW_UPLOAD_ITYPE_BOOTLOADER (0x04)
-#define MPI_FW_UPLOAD_ITYPE_FW_BACKUP (0x05)
-#define MPI_FW_UPLOAD_ITYPE_MANUFACTURING (0x06)
-#define MPI_FW_UPLOAD_ITYPE_CONFIG_1 (0x07)
-#define MPI_FW_UPLOAD_ITYPE_CONFIG_2 (0x08)
-#define MPI_FW_UPLOAD_ITYPE_MEGARAID (0x09)
-#define MPI_FW_UPLOAD_ITYPE_COMPLETE (0x0A)
+#define MPI_FW_UPLOAD_ITYPE_FW_IOC_MEM (0x00)
+#define MPI_FW_UPLOAD_ITYPE_FW_FLASH (0x01)
+#define MPI_FW_UPLOAD_ITYPE_BIOS_FLASH (0x02)
+#define MPI_FW_UPLOAD_ITYPE_NVDATA (0x03)
+#define MPI_FW_UPLOAD_ITYPE_BOOTLOADER (0x04)
+#define MPI_FW_UPLOAD_ITYPE_FW_BACKUP (0x05)
+#define MPI_FW_UPLOAD_ITYPE_MANUFACTURING (0x06)
+#define MPI_FW_UPLOAD_ITYPE_CONFIG_1 (0x07)
+#define MPI_FW_UPLOAD_ITYPE_CONFIG_2 (0x08)
+#define MPI_FW_UPLOAD_ITYPE_MEGARAID (0x09)
+#define MPI_FW_UPLOAD_ITYPE_COMPLETE (0x0A)
+#define MPI_FW_UPLOAD_ITYPE_COMMON_BOOT_BLOCK (0x0B)
typedef struct _FWUploadTCSGE
{
diff --git a/drivers/message/fusion/lsi/mpi_raid.h b/drivers/message/fusion/lsi/mpi_raid.h
index 802255d2747..32819b1ec8e 100644
--- a/drivers/message/fusion/lsi/mpi_raid.h
+++ b/drivers/message/fusion/lsi/mpi_raid.h
@@ -1,12 +1,12 @@
/*
- * Copyright (c) 2001-2005 LSI Logic Corporation.
+ * Copyright (c) 2001-2007 LSI Logic Corporation.
*
*
* Name: mpi_raid.h
* Title: MPI RAID message and structures
* Creation Date: February 27, 2001
*
- * mpi_raid.h Version: 01.05.02
+ * mpi_raid.h Version: 01.05.03
*
* Version History
* ---------------
@@ -32,6 +32,8 @@
* 08-19-04 01.05.01 Original release for MPI v1.5.
* 01-15-05 01.05.02 Added defines for the two new RAID Actions for
* _SET_RESYNC_RATE and _SET_DATA_SCRUB_RATE.
+ * 02-28-07 01.05.03 Added new RAID Action, Device FW Update Mode, and
+ * associated defines.
* --------------------------------------------------------------------------
*/
@@ -90,6 +92,7 @@ typedef struct _MSG_RAID_ACTION
#define MPI_RAID_ACTION_INACTIVATE_VOLUME (0x12)
#define MPI_RAID_ACTION_SET_RESYNC_RATE (0x13)
#define MPI_RAID_ACTION_SET_DATA_SCRUB_RATE (0x14)
+#define MPI_RAID_ACTION_DEVICE_FW_UPDATE_MODE (0x15)
/* ActionDataWord defines for use with MPI_RAID_ACTION_CREATE_VOLUME action */
#define MPI_RAID_ACTION_ADATA_DO_NOT_SYNC (0x00000001)
@@ -111,6 +114,10 @@ typedef struct _MSG_RAID_ACTION
/* ActionDataWord defines for use with MPI_RAID_ACTION_SET_DATA_SCRUB_RATE action */
#define MPI_RAID_ACTION_ADATA_DATA_SCRUB_RATE_MASK (0x000000FF)
+/* ActionDataWord defines for use with MPI_RAID_ACTION_DEVICE_FW_UPDATE_MODE action */
+#define MPI_RAID_ACTION_ADATA_ENABLE_FW_UPDATE (0x00000001)
+#define MPI_RAID_ACTION_ADATA_MASK_FW_UPDATE_TIMEOUT (0x0000FF00)
+#define MPI_RAID_ACTION_ADATA_SHIFT_FW_UPDATE_TIMEOUT (8)
/* RAID Action reply message */
diff --git a/drivers/message/fusion/mptbase.c b/drivers/message/fusion/mptbase.c
index 5021d1a2a1d..5a10c87239c 100644
--- a/drivers/message/fusion/mptbase.c
+++ b/drivers/message/fusion/mptbase.c
@@ -6,7 +6,7 @@
* running LSI Logic Fusion MPT (Message Passing Technology) firmware.
*
* Copyright (c) 1999-2007 LSI Logic Corporation
- * (mailto:mpt_linux_developer@lsi.com)
+ * (mailto:DL-MPTFusionLinux@lsi.com)
*
*/
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
@@ -64,6 +64,7 @@
#endif
#include "mptbase.h"
+#include "lsi/mpi_log_fc.h"
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
#define my_NAME "Fusion MPT base driver"
@@ -6349,14 +6350,37 @@ ProcessEventNotification(MPT_ADAPTER *ioc, EventNotificationReply_t *pEventReply
static void
mpt_fc_log_info(MPT_ADAPTER *ioc, u32 log_info)
{
- static char *subcl_str[8] = {
- "FCP Initiator", "FCP Target", "LAN", "MPI Message Layer",
- "FC Link", "Context Manager", "Invalid Field Offset", "State Change Info"
- };
- u8 subcl = (log_info >> 24) & 0x7;
+ char *desc = "unknown";
+
+ switch (log_info & 0xFF000000) {
+ case MPI_IOCLOGINFO_FC_INIT_BASE:
+ desc = "FCP Initiator";
+ break;
+ case MPI_IOCLOGINFO_FC_TARGET_BASE:
+ desc = "FCP Target";
+ break;
+ case MPI_IOCLOGINFO_FC_LAN_BASE:
+ desc = "LAN";
+ break;
+ case MPI_IOCLOGINFO_FC_MSG_BASE:
+ desc = "MPI Message Layer";
+ break;
+ case MPI_IOCLOGINFO_FC_LINK_BASE:
+ desc = "FC Link";
+ break;
+ case MPI_IOCLOGINFO_FC_CTX_BASE:
+ desc = "Context Manager";
+ break;
+ case MPI_IOCLOGINFO_FC_INVALID_FIELD_BYTE_OFFSET:
+ desc = "Invalid Field Offset";
+ break;
+ case MPI_IOCLOGINFO_FC_STATE_CHANGE:
+ desc = "State Change Info";
+ break;
+ }
- printk(MYIOC_s_INFO_FMT "LogInfo(0x%08x): SubCl={%s}\n",
- ioc->name, log_info, subcl_str[subcl]);
+ printk(MYIOC_s_INFO_FMT "LogInfo(0x%08x): SubClass={%s}, Value=(0x%06x)\n",
+ ioc->name, log_info, desc, (log_info & 0xFFFFFF));
}
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
diff --git a/drivers/message/fusion/mptbase.h b/drivers/message/fusion/mptbase.h
index 165f81d16d0..05eb6e52875 100644
--- a/drivers/message/fusion/mptbase.h
+++ b/drivers/message/fusion/mptbase.h
@@ -6,7 +6,7 @@
* running LSI Logic Fusion MPT (Message Passing Technology) firmware.
*
* Copyright (c) 1999-2007 LSI Logic Corporation
- * (mailto:mpt_linux_developer@lsi.com)
+ * (mailto:DL-MPTFusionLinux@lsi.com)
*
*/
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
@@ -75,8 +75,8 @@
#define COPYRIGHT "Copyright (c) 1999-2007 " MODULEAUTHOR
#endif
-#define MPT_LINUX_VERSION_COMMON "3.04.04"
-#define MPT_LINUX_PACKAGE_NAME "@(#)mptlinux-3.04.04"
+#define MPT_LINUX_VERSION_COMMON "3.04.05"
+#define MPT_LINUX_PACKAGE_NAME "@(#)mptlinux-3.04.05"
#define WHAT_MAGIC_STRING "@" "(" "#" ")"
#define show_mptmod_ver(s,ver) \
diff --git a/drivers/message/fusion/mptctl.c b/drivers/message/fusion/mptctl.c
index 9d0f30478e4..58e6c319cc7 100644
--- a/drivers/message/fusion/mptctl.c
+++ b/drivers/message/fusion/mptctl.c
@@ -5,7 +5,7 @@
* running LSI Logic Fusion MPT (Message Passing Technology) firmware.
*
* Copyright (c) 1999-2007 LSI Logic Corporation
- * (mailto:mpt_linux_developer@lsi.com)
+ * (mailto:DL-MPTFusionLinux@lsi.com)
*
*/
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
diff --git a/drivers/message/fusion/mptctl.h b/drivers/message/fusion/mptctl.h
index f7e72c5e47d..180b3c15624 100644
--- a/drivers/message/fusion/mptctl.h
+++ b/drivers/message/fusion/mptctl.h
@@ -6,7 +6,7 @@
* running LSI Logic Fusion MPT (Message Passing Technology) firmware.
*
* Copyright (c) 1999-2007 LSI Logic Corporation
- * (mailto:mpt_linux_developer@lsi.com)
+ * (mailto:DL-MPTFusionLinux@lsi.com)
*
*/
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
diff --git a/drivers/message/fusion/mptfc.c b/drivers/message/fusion/mptfc.c
index 0caaf640399..b766445f19a 100644
--- a/drivers/message/fusion/mptfc.c
+++ b/drivers/message/fusion/mptfc.c
@@ -4,7 +4,7 @@
* running LSI Logic Fusion MPT (Message Passing Technology) firmware.
*
* Copyright (c) 1999-2007 LSI Logic Corporation
- * (mailto:mpt_linux_developer@lsi.com)
+ * (mailto:DL-MPTFusionLinux@lsi.com)
*
*/
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
@@ -43,7 +43,6 @@
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
-#include "linux_compat.h" /* linux-2.6 tweaks */
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/init.h>
diff --git a/drivers/message/fusion/mptlan.c b/drivers/message/fusion/mptlan.c
index 7dd34bd28ef..7e8a90cb484 100644
--- a/drivers/message/fusion/mptlan.c
+++ b/drivers/message/fusion/mptlan.c
@@ -5,7 +5,7 @@
* running LSI Logic Fusion MPT (Message Passing Technology) firmware.
*
* Copyright (c) 2000-2007 LSI Logic Corporation
- * (mailto:mpt_linux_developer@lsi.com)
+ * (mailto:DL-MPTFusionLinux@lsi.com)
*
*/
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
diff --git a/drivers/message/fusion/mptlan.h b/drivers/message/fusion/mptlan.h
index fe438bf119f..8d08c2bed24 100644
--- a/drivers/message/fusion/mptlan.h
+++ b/drivers/message/fusion/mptlan.h
@@ -5,7 +5,7 @@
* running LSI Logic Fusion MPT (Message Passing Technology) firmware.
*
* Copyright (c) 2000-2007 LSI Logic Corporation
- * (mailto:mpt_linux_developer@lsi.com)
+ * (mailto:DL-MPTFusionLinux@lsi.com)
*
*/
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
diff --git a/drivers/message/fusion/mptsas.c b/drivers/message/fusion/mptsas.c
index 1d2d03f7789..9e5424e1871 100644
--- a/drivers/message/fusion/mptsas.c
+++ b/drivers/message/fusion/mptsas.c
@@ -4,7 +4,7 @@
* running LSI Logic Fusion MPT (Message Passing Technology) firmware.
*
* Copyright (c) 1999-2007 LSI Logic Corporation
- * (mailto:mpt_linux_developer@lsi.com)
+ * (mailto:DL-MPTFusionLinux@lsi.com)
* Copyright (c) 2005-2007 Dell
*/
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
diff --git a/drivers/message/fusion/mptscsih.c b/drivers/message/fusion/mptscsih.c
index 3bd94f11e7d..d35617376f8 100644
--- a/drivers/message/fusion/mptscsih.c
+++ b/drivers/message/fusion/mptscsih.c
@@ -4,7 +4,7 @@
* running LSI Logic Fusion MPT (Message Passing Technology) firmware.
*
* Copyright (c) 1999-2007 LSI Logic Corporation
- * (mailto:mpt_linux_developer@lsi.com)
+ * (mailto:DL-MPTFusionLinux@lsi.com)
*
*/
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
@@ -44,7 +44,6 @@
*/
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
-#include "linux_compat.h" /* linux-2.6 tweaks */
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/init.h>
@@ -260,30 +259,13 @@ mptscsih_AddSGE(MPT_ADAPTER *ioc, struct scsi_cmnd *SCpnt,
/* Map the data portion, if any.
* sges_left = 0 if no data transfer.
*/
- if ( (sges_left = SCpnt->use_sg) ) {
- sges_left = pci_map_sg(ioc->pcidev,
- (struct scatterlist *) SCpnt->request_buffer,
- SCpnt->use_sg,
- SCpnt->sc_data_direction);
- if (sges_left == 0)
- return FAILED;
- } else if (SCpnt->request_bufflen) {
- SCpnt->SCp.dma_handle = pci_map_single(ioc->pcidev,
- SCpnt->request_buffer,
- SCpnt->request_bufflen,
- SCpnt->sc_data_direction);
- dsgprintk((MYIOC_s_INFO_FMT "SG: non-SG for %p, len=%d\n",
- ioc->name, SCpnt, SCpnt->request_bufflen));
- mptscsih_add_sge((char *) &pReq->SGL,
- 0xD1000000|MPT_SGE_FLAGS_ADDRESSING|sgdir|SCpnt->request_bufflen,
- SCpnt->SCp.dma_handle);
-
- return SUCCESS;
- }
+ sges_left = scsi_dma_map(SCpnt);
+ if (sges_left < 0)
+ return FAILED;
/* Handle the SG case.
*/
- sg = (struct scatterlist *) SCpnt->request_buffer;
+ sg = scsi_sglist(SCpnt);
sg_done = 0;
sgeOffset = sizeof(SCSIIORequest_t) - sizeof(SGE_IO_UNION);
chainSge = NULL;
@@ -465,7 +447,12 @@ mptscsih_issue_sep_command(MPT_ADAPTER *ioc, VirtTarget *vtarget,
MPT_FRAME_HDR *mf;
SEPRequest_t *SEPMsg;
- if (ioc->bus_type == FC)
+ if (ioc->bus_type != SAS)
+ return;
+
+ /* Not supported for hidden raid components
+ */
+ if (vtarget->tflags & MPT_TARGET_FLAGS_RAID_COMPONENT)
return;
if ((mf = mpt_get_msg_frame(ioc->InternalCtx, ioc)) == NULL) {
@@ -662,7 +649,7 @@ mptscsih_io_done(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *mr)
scsi_state = pScsiReply->SCSIState;
scsi_status = pScsiReply->SCSIStatus;
xfer_cnt = le32_to_cpu(pScsiReply->TransferCount);
- sc->resid = sc->request_bufflen - xfer_cnt;
+ scsi_set_resid(sc, scsi_bufflen(sc) - xfer_cnt);
log_info = le32_to_cpu(pScsiReply->IOCLogInfo);
/*
@@ -767,7 +754,7 @@ mptscsih_io_done(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *mr)
break;
case MPI_IOCSTATUS_SCSI_RESIDUAL_MISMATCH: /* 0x0049 */
- sc->resid = sc->request_bufflen - xfer_cnt;
+ scsi_set_resid(sc, scsi_bufflen(sc) - xfer_cnt);
if((xfer_cnt==0)||(sc->underflow > xfer_cnt))
sc->result=DID_SOFT_ERROR << 16;
else /* Sufficient data transfer occurred */
@@ -816,7 +803,7 @@ mptscsih_io_done(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *mr)
break;
case MPI_IOCSTATUS_SCSI_DATA_OVERRUN: /* 0x0044 */
- sc->resid=0;
+ scsi_set_resid(sc, 0);
case MPI_IOCSTATUS_SCSI_RECOVERED_ERROR: /* 0x0040 */
case MPI_IOCSTATUS_SUCCESS: /* 0x0000 */
sc->result = (DID_OK << 16) | scsi_status;
@@ -899,23 +886,18 @@ mptscsih_io_done(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *mr)
scsi_state, scsi_status, log_info));
dreplyprintk(("%s: [%d:%d:%d:%d] resid=%d "
- "bufflen=%d xfer_cnt=%d\n", __FUNCTION__,
- sc->device->host->host_no, sc->device->channel, sc->device->id,
- sc->device->lun, sc->resid, sc->request_bufflen,
- xfer_cnt));
+ "bufflen=%d xfer_cnt=%d\n", __FUNCTION__,
+ sc->device->host->host_no,
+ sc->device->channel, sc->device->id,
+ sc->device->lun, scsi_get_resid(sc),
+ scsi_bufflen(sc), xfer_cnt));
}
#endif
} /* end of address reply case */
/* Unmap the DMA buffers, if any. */
- if (sc->use_sg) {
- pci_unmap_sg(ioc->pcidev, (struct scatterlist *) sc->request_buffer,
- sc->use_sg, sc->sc_data_direction);
- } else if (sc->request_bufflen) {
- pci_unmap_single(ioc->pcidev, sc->SCp.dma_handle,
- sc->request_bufflen, sc->sc_data_direction);
- }
+ scsi_dma_unmap(sc);
sc->scsi_done(sc); /* Issue the command callback */
@@ -970,17 +952,8 @@ mptscsih_flush_running_cmds(MPT_SCSI_HOST *hd)
/* Set status, free OS resources (SG DMA buffers)
* Do OS callback
*/
- if (SCpnt->use_sg) {
- pci_unmap_sg(ioc->pcidev,
- (struct scatterlist *) SCpnt->request_buffer,
- SCpnt->use_sg,
- SCpnt->sc_data_direction);
- } else if (SCpnt->request_bufflen) {
- pci_unmap_single(ioc->pcidev,
- SCpnt->SCp.dma_handle,
- SCpnt->request_bufflen,
- SCpnt->sc_data_direction);
- }
+ scsi_dma_unmap(SCpnt);
+
SCpnt->result = DID_RESET << 16;
SCpnt->host_scribble = NULL;
@@ -1023,14 +996,19 @@ mptscsih_search_running_cmds(MPT_SCSI_HOST *hd, VirtDevice *vdevice)
mf = (SCSIIORequest_t *)MPT_INDEX_2_MFPTR(hd->ioc, ii);
if (mf == NULL)
continue;
+ /* If the device is a hidden raid component, then its
+ * expected that the mf->function will be RAID_SCSI_IO
+ */
+ if (vdevice->vtarget->tflags &
+ MPT_TARGET_FLAGS_RAID_COMPONENT && mf->Function !=
+ MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH)
+ continue;
+
int_to_scsilun(vdevice->lun, &lun);
if ((mf->Bus != vdevice->vtarget->channel) ||
(mf->TargetID != vdevice->vtarget->id) ||
memcmp(lun.scsi_lun, mf->LUN, 8))
continue;
- dsprintk(( "search_running: found (sc=%p, mf = %p) "
- "channel %d id %d, lun %d \n", hd->ScsiLookup[ii],
- mf, mf->Bus, mf->TargetID, vdevice->lun));
/* Cleanup
*/
@@ -1039,19 +1017,12 @@ mptscsih_search_running_cmds(MPT_SCSI_HOST *hd, VirtDevice *vdevice)
mpt_free_msg_frame(hd->ioc, (MPT_FRAME_HDR *)mf);
if ((unsigned char *)mf != sc->host_scribble)
continue;
- if (sc->use_sg) {
- pci_unmap_sg(hd->ioc->pcidev,
- (struct scatterlist *) sc->request_buffer,
- sc->use_sg,
- sc->sc_data_direction);
- } else if (sc->request_bufflen) {
- pci_unmap_single(hd->ioc->pcidev,
- sc->SCp.dma_handle,
- sc->request_bufflen,
- sc->sc_data_direction);
- }
+ scsi_dma_unmap(sc);
sc->host_scribble = NULL;
sc->result = DID_NO_CONNECT << 16;
+ dsprintk(( "search_running: found (sc=%p, mf = %p) "
+ "channel %d id %d, lun %d \n", sc, mf,
+ vdevice->vtarget->channel, vdevice->vtarget->id, vdevice->lun));
sc->scsi_done(sc);
}
}
@@ -1380,10 +1351,10 @@ mptscsih_qcmd(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *))
* will be no data transfer! GRRRRR...
*/
if (SCpnt->sc_data_direction == DMA_FROM_DEVICE) {
- datalen = SCpnt->request_bufflen;
+ datalen = scsi_bufflen(SCpnt);
scsidir = MPI_SCSIIO_CONTROL_READ; /* DATA IN (host<--ioc<--dev) */
} else if (SCpnt->sc_data_direction == DMA_TO_DEVICE) {
- datalen = SCpnt->request_bufflen;
+ datalen = scsi_bufflen(SCpnt);
scsidir = MPI_SCSIIO_CONTROL_WRITE; /* DATA OUT (host-->ioc-->dev) */
} else {
datalen = 0;
@@ -1768,20 +1739,45 @@ mptscsih_abort(struct scsi_cmnd * SCpnt)
u32 ctx2abort;
int scpnt_idx;
int retval;
- VirtDevice *vdev;
+ VirtDevice *vdevice;
ulong sn = SCpnt->serial_number;
+ MPT_ADAPTER *ioc;
/* If we can't locate our host adapter structure, return FAILED status.
*/
if ((hd = (MPT_SCSI_HOST *) SCpnt->device->host->hostdata) == NULL) {
SCpnt->result = DID_RESET << 16;
SCpnt->scsi_done(SCpnt);
- dfailprintk((KERN_INFO MYNAM ": mptscsih_abort: "
- "Can't locate host! (sc=%p)\n",
- SCpnt));
+ dfailprintk((KERN_INFO MYNAM ": mptscsih_abort: Can't locate "
+ "host! (sc=%p)\n", SCpnt));
return FAILED;
}
+ ioc = hd->ioc;
+ printk(MYIOC_s_INFO_FMT "attempting task abort! (sc=%p)\n",
+ ioc->name, SCpnt);
+ scsi_print_command(SCpnt);
+
+ vdevice = SCpnt->device->hostdata;
+ if (!vdevice || !vdevice->vtarget) {
+ dtmprintk((MYIOC_s_DEBUG_FMT "task abort: device has been "
+ "deleted (sc=%p)\n", ioc->name, SCpnt));
+ SCpnt->result = DID_NO_CONNECT << 16;
+ SCpnt->scsi_done(SCpnt);
+ retval = 0;
+ goto out;
+ }
+
+ /* Task aborts are not supported for hidden raid components.
+ */
+ if (vdevice->vtarget->tflags & MPT_TARGET_FLAGS_RAID_COMPONENT) {
+ dtmprintk((MYIOC_s_DEBUG_FMT "task abort: hidden raid "
+ "component (sc=%p)\n", ioc->name, SCpnt));
+ SCpnt->result = DID_RESET << 16;
+ retval = FAILED;
+ goto out;
+ }
+
/* Find this command
*/
if ((scpnt_idx = SCPNT_TO_LOOKUP_IDX(SCpnt)) < 0) {
@@ -1790,21 +1786,20 @@ mptscsih_abort(struct scsi_cmnd * SCpnt)
*/
SCpnt->result = DID_RESET << 16;
dtmprintk((KERN_INFO MYNAM ": %s: mptscsih_abort: "
- "Command not in the active list! (sc=%p)\n",
- hd->ioc->name, SCpnt));
- return SUCCESS;
+ "Command not in the active list! (sc=%p)\n", ioc->name,
+ SCpnt));
+ retval = 0;
+ goto out;
}
- if (hd->resetPending)
- return FAILED;
+ if (hd->resetPending) {
+ retval = FAILED;
+ goto out;
+ }
if (hd->timeouts < -1)
hd->timeouts++;
- printk(KERN_WARNING MYNAM ": %s: attempting task abort! (sc=%p)\n",
- hd->ioc->name, SCpnt);
- scsi_print_command(SCpnt);
-
/* Most important! Set TaskMsgContext to SCpnt's MsgContext!
* (the IO to be ABORT'd)
*
@@ -1817,18 +1812,17 @@ mptscsih_abort(struct scsi_cmnd * SCpnt)
hd->abortSCpnt = SCpnt;
- vdev = SCpnt->device->hostdata;
retval = mptscsih_TMHandler(hd, MPI_SCSITASKMGMT_TASKTYPE_ABORT_TASK,
- vdev->vtarget->channel, vdev->vtarget->id, vdev->lun,
- ctx2abort, mptscsih_get_tm_timeout(hd->ioc));
+ vdevice->vtarget->channel, vdevice->vtarget->id, vdevice->lun,
+ ctx2abort, mptscsih_get_tm_timeout(ioc));
if (SCPNT_TO_LOOKUP_IDX(SCpnt) == scpnt_idx &&
SCpnt->serial_number == sn)
retval = FAILED;
- printk (KERN_WARNING MYNAM ": %s: task abort: %s (sc=%p)\n",
- hd->ioc->name,
- ((retval == 0) ? "SUCCESS" : "FAILED" ), SCpnt);
+ out:
+ printk(MYIOC_s_INFO_FMT "task abort: %s (sc=%p)\n",
+ ioc->name, ((retval == 0) ? "SUCCESS" : "FAILED" ), SCpnt);
if (retval == 0)
return SUCCESS;
@@ -1850,32 +1844,47 @@ mptscsih_dev_reset(struct scsi_cmnd * SCpnt)
{
MPT_SCSI_HOST *hd;
int retval;
- VirtDevice *vdev;
+ VirtDevice *vdevice;
+ MPT_ADAPTER *ioc;
/* If we can't locate our host adapter structure, return FAILED status.
*/
if ((hd = (MPT_SCSI_HOST *) SCpnt->device->host->hostdata) == NULL){
- dtmprintk((KERN_INFO MYNAM ": mptscsih_dev_reset: "
- "Can't locate host! (sc=%p)\n",
- SCpnt));
+ dtmprintk((KERN_INFO MYNAM ": mptscsih_dev_reset: Can't "
+ "locate host! (sc=%p)\n", SCpnt));
return FAILED;
}
- if (hd->resetPending)
- return FAILED;
-
- printk(KERN_WARNING MYNAM ": %s: attempting target reset! (sc=%p)\n",
- hd->ioc->name, SCpnt);
+ ioc = hd->ioc;
+ printk(MYIOC_s_INFO_FMT "attempting target reset! (sc=%p)\n",
+ ioc->name, SCpnt);
scsi_print_command(SCpnt);
- vdev = SCpnt->device->hostdata;
+ if (hd->resetPending) {
+ retval = FAILED;
+ goto out;
+ }
+
+ vdevice = SCpnt->device->hostdata;
+ if (!vdevice || !vdevice->vtarget) {
+ retval = 0;
+ goto out;
+ }
+
+ /* Target reset to hidden raid component is not supported
+ */
+ if (vdevice->vtarget->tflags & MPT_TARGET_FLAGS_RAID_COMPONENT) {
+ retval = FAILED;
+ goto out;
+ }
+
retval = mptscsih_TMHandler(hd, MPI_SCSITASKMGMT_TASKTYPE_TARGET_RESET,
- vdev->vtarget->channel, vdev->vtarget->id,
- 0, 0, mptscsih_get_tm_timeout(hd->ioc));
+ vdevice->vtarget->channel, vdevice->vtarget->id, 0, 0,
+ mptscsih_get_tm_timeout(ioc));
- printk (KERN_WARNING MYNAM ": %s: target reset: %s (sc=%p)\n",
- hd->ioc->name,
- ((retval == 0) ? "SUCCESS" : "FAILED" ), SCpnt);
+ out:
+ printk (MYIOC_s_INFO_FMT "target reset: %s (sc=%p)\n",
+ ioc->name, ((retval == 0) ? "SUCCESS" : "FAILED" ), SCpnt);
if (retval == 0)
return SUCCESS;
@@ -1899,18 +1908,19 @@ mptscsih_bus_reset(struct scsi_cmnd * SCpnt)
MPT_SCSI_HOST *hd;
int retval;
VirtDevice *vdev;
+ MPT_ADAPTER *ioc;
/* If we can't locate our host adapter structure, return FAILED status.
*/
if ((hd = (MPT_SCSI_HOST *) SCpnt->device->host->hostdata) == NULL){
- dtmprintk((KERN_INFO MYNAM ": mptscsih_bus_reset: "
- "Can't locate host! (sc=%p)\n",
- SCpnt ) );
+ dtmprintk((KERN_INFO MYNAM ": mptscsih_bus_reset: Can't "
+ "locate host! (sc=%p)\n", SCpnt ));
return FAILED;
}
- printk(KERN_WARNING MYNAM ": %s: attempting bus reset! (sc=%p)\n",
- hd->ioc->name, SCpnt);
+ ioc = hd->ioc;
+ printk(MYIOC_s_INFO_FMT "attempting bus reset! (sc=%p)\n",
+ ioc->name, SCpnt);
scsi_print_command(SCpnt);
if (hd->timeouts < -1)
@@ -1918,11 +1928,10 @@ mptscsih_bus_reset(struct scsi_cmnd * SCpnt)
vdev = SCpnt->device->hostdata;
retval = mptscsih_TMHandler(hd, MPI_SCSITASKMGMT_TASKTYPE_RESET_BUS,
- vdev->vtarget->channel, 0, 0, 0, mptscsih_get_tm_timeout(hd->ioc));
+ vdev->vtarget->channel, 0, 0, 0, mptscsih_get_tm_timeout(ioc));
- printk (KERN_WARNING MYNAM ": %s: bus reset: %s (sc=%p)\n",
- hd->ioc->name,
- ((retval == 0) ? "SUCCESS" : "FAILED" ), SCpnt);
+ printk(MYIOC_s_INFO_FMT "bus reset: %s (sc=%p)\n",
+ ioc->name, ((retval == 0) ? "SUCCESS" : "FAILED" ), SCpnt);
if (retval == 0)
return SUCCESS;
@@ -1943,37 +1952,38 @@ int
mptscsih_host_reset(struct scsi_cmnd *SCpnt)
{
MPT_SCSI_HOST * hd;
- int status = SUCCESS;
+ int retval;
+ MPT_ADAPTER *ioc;
/* If we can't locate the host to reset, then we failed. */
if ((hd = (MPT_SCSI_HOST *) SCpnt->device->host->hostdata) == NULL){
- dtmprintk( ( KERN_INFO MYNAM ": mptscsih_host_reset: "
- "Can't locate host! (sc=%p)\n",
- SCpnt ) );
+ dtmprintk( ( KERN_INFO MYNAM ": mptscsih_host_reset: Can't "
+ "locate host! (sc=%p)\n", SCpnt));
return FAILED;
}
- printk(KERN_WARNING MYNAM ": %s: Attempting host reset! (sc=%p)\n",
- hd->ioc->name, SCpnt);
+ ioc = hd->ioc;
+ printk(MYIOC_s_INFO_FMT "attempting host reset! (sc=%p)\n",
+ ioc->name, SCpnt);
/* If our attempts to reset the host failed, then return a failed
* status. The host will be taken off line by the SCSI mid-layer.
*/
- if (mpt_HardResetHandler(hd->ioc, CAN_SLEEP) < 0){
- status = FAILED;
+ if (mpt_HardResetHandler(hd->ioc, CAN_SLEEP) < 0) {
+ retval = FAILED;
} else {
/* Make sure TM pending is cleared and TM state is set to
* NONE.
*/
+ retval = 0;
hd->tmPending = 0;
hd->tmState = TM_STATE_NONE;
}
- dtmprintk( ( KERN_INFO MYNAM ": mptscsih_host_reset: "
- "Status = %s\n",
- (status == SUCCESS) ? "SUCCESS" : "FAILED" ) );
+ printk(MYIOC_s_INFO_FMT "host reset: %s (sc=%p)\n",
+ ioc->name, ((retval == 0) ? "SUCCESS" : "FAILED" ), SCpnt);
- return status;
+ return retval;
}
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
@@ -3150,6 +3160,16 @@ mptscsih_synchronize_cache(MPT_SCSI_HOST *hd, VirtDevice *vdevice)
{
INTERNAL_CMD iocmd;
+ /* Ignore hidden raid components, this is handled when the command
+ * is sent to the volume
+ */
+ if (vdevice->vtarget->tflags & MPT_TARGET_FLAGS_RAID_COMPONENT)
+ return;
+
+ if (vdevice->vtarget->type != TYPE_DISK || vdevice->vtarget->deleted ||
+ !vdevice->configured_lun)
+ return;
+
/* Following parameters will not change
* in this routine.
*/
@@ -3164,9 +3184,7 @@ mptscsih_synchronize_cache(MPT_SCSI_HOST *hd, VirtDevice *vdevice)
iocmd.id = vdevice->vtarget->id;
iocmd.lun = vdevice->lun;
- if ((vdevice->vtarget->type == TYPE_DISK) &&
- (vdevice->configured_lun))
- mptscsih_do_cmd(hd, &iocmd);
+ mptscsih_do_cmd(hd, &iocmd);
}
EXPORT_SYMBOL(mptscsih_remove);
diff --git a/drivers/message/fusion/mptscsih.h b/drivers/message/fusion/mptscsih.h
index 843c01a6aa0..8eccdfe5701 100644
--- a/drivers/message/fusion/mptscsih.h
+++ b/drivers/message/fusion/mptscsih.h
@@ -6,7 +6,7 @@
* running LSI Logic Fusion MPT (Message Passing Technology) firmware.
*
* Copyright (c) 1999-2007 LSI Logic Corporation
- * (mailto:mpt_linux_developer@lsi.com)
+ * (mailto:DL-MPTFusionLinux@lsi.com)
*
*/
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
diff --git a/drivers/message/fusion/mptspi.c b/drivers/message/fusion/mptspi.c
index 37bf6534837..6b3e0c00952 100644
--- a/drivers/message/fusion/mptspi.c
+++ b/drivers/message/fusion/mptspi.c
@@ -4,7 +4,7 @@
* running LSI Logic Fusion MPT (Message Passing Technology) firmware.
*
* Copyright (c) 1999-2007 LSI Logic Corporation
- * (mailto:mpt_linux_developer@lsi.com)
+ * (mailto:DL-MPTFusionLinux@lsi.com)
*
*/
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
@@ -44,7 +44,6 @@
*/
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
-#include "linux_compat.h" /* linux-2.6 tweaks */
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/init.h>
diff --git a/drivers/message/i2o/device.c b/drivers/message/i2o/device.c
index b9df143e4ff..611adc3c0f7 100644
--- a/drivers/message/i2o/device.c
+++ b/drivers/message/i2o/device.c
@@ -485,7 +485,7 @@ int i2o_parm_field_get(struct i2o_device *i2o_dev, int group, int field,
u8 *resblk; /* 8 bytes for header */
int rc;
- resblk = kmalloc(buflen + 8, GFP_KERNEL | GFP_ATOMIC);
+ resblk = kmalloc(buflen + 8, GFP_KERNEL);
if (!resblk)
return -ENOMEM;
diff --git a/drivers/message/i2o/i2o_scsi.c b/drivers/message/i2o/i2o_scsi.c
index 1045c8a518b..aa6fb9429d5 100644
--- a/drivers/message/i2o/i2o_scsi.c
+++ b/drivers/message/i2o/i2o_scsi.c
@@ -377,12 +377,8 @@ static int i2o_scsi_reply(struct i2o_controller *c, u32 m,
osm_err("SCSI error %08x\n", error);
dev = &c->pdev->dev;
- if (cmd->use_sg)
- dma_unmap_sg(dev, cmd->request_buffer, cmd->use_sg,
- cmd->sc_data_direction);
- else if (cmd->SCp.dma_handle)
- dma_unmap_single(dev, cmd->SCp.dma_handle, cmd->request_bufflen,
- cmd->sc_data_direction);
+
+ scsi_dma_unmap(cmd);
cmd->scsi_done(cmd);
@@ -664,21 +660,15 @@ static int i2o_scsi_queuecommand(struct scsi_cmnd *SCpnt,
if (sgl_offset != SGL_OFFSET_0) {
/* write size of data addressed by SGL */
- *mptr++ = cpu_to_le32(SCpnt->request_bufflen);
+ *mptr++ = cpu_to_le32(scsi_bufflen(SCpnt));
/* Now fill in the SGList and command */
- if (SCpnt->use_sg) {
- if (!i2o_dma_map_sg(c, SCpnt->request_buffer,
- SCpnt->use_sg,
+
+ if (scsi_sg_count(SCpnt)) {
+ if (!i2o_dma_map_sg(c, scsi_sglist(SCpnt),
+ scsi_sg_count(SCpnt),
SCpnt->sc_data_direction, &mptr))
goto nomem;
- } else {
- SCpnt->SCp.dma_handle =
- i2o_dma_map_single(c, SCpnt->request_buffer,
- SCpnt->request_bufflen,
- SCpnt->sc_data_direction, &mptr);
- if (dma_mapping_error(SCpnt->SCp.dma_handle))
- goto nomem;
}
}
diff --git a/drivers/s390/scsi/zfcp_aux.c b/drivers/s390/scsi/zfcp_aux.c
index 821cde65e36..a1db9592513 100644
--- a/drivers/s390/scsi/zfcp_aux.c
+++ b/drivers/s390/scsi/zfcp_aux.c
@@ -815,9 +815,7 @@ zfcp_get_adapter_by_busid(char *bus_id)
struct zfcp_unit *
zfcp_unit_enqueue(struct zfcp_port *port, fcp_lun_t fcp_lun)
{
- struct zfcp_unit *unit, *tmp_unit;
- unsigned int scsi_lun;
- int found;
+ struct zfcp_unit *unit;
/*
* check that there is no unit with this FCP_LUN already in list
@@ -863,22 +861,10 @@ zfcp_unit_enqueue(struct zfcp_port *port, fcp_lun_t fcp_lun)
}
zfcp_unit_get(unit);
+ unit->scsi_lun = scsilun_to_int((struct scsi_lun *)&unit->fcp_lun);
- scsi_lun = 0;
- found = 0;
write_lock_irq(&zfcp_data.config_lock);
- list_for_each_entry(tmp_unit, &port->unit_list_head, list) {
- if (tmp_unit->scsi_lun != scsi_lun) {
- found = 1;
- break;
- }
- scsi_lun++;
- }
- unit->scsi_lun = scsi_lun;
- if (found)
- list_add_tail(&unit->list, &tmp_unit->list);
- else
- list_add_tail(&unit->list, &port->unit_list_head);
+ list_add_tail(&unit->list, &port->unit_list_head);
atomic_clear_mask(ZFCP_STATUS_COMMON_REMOVE, &unit->status);
atomic_set_mask(ZFCP_STATUS_COMMON_RUNNING, &unit->status);
write_unlock_irq(&zfcp_data.config_lock);
diff --git a/drivers/s390/scsi/zfcp_erp.c b/drivers/s390/scsi/zfcp_erp.c
index aef66bc2b6c..4e7cb6dc4d3 100644
--- a/drivers/s390/scsi/zfcp_erp.c
+++ b/drivers/s390/scsi/zfcp_erp.c
@@ -1986,6 +1986,10 @@ zfcp_erp_adapter_strategy_generic(struct zfcp_erp_action *erp_action, int close)
failed_openfcp:
zfcp_close_fsf(erp_action->adapter);
failed_qdio:
+ atomic_clear_mask(ZFCP_STATUS_ADAPTER_XCONFIG_OK |
+ ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED |
+ ZFCP_STATUS_ADAPTER_XPORT_OK,
+ &erp_action->adapter->status);
out:
return retval;
}
@@ -2167,6 +2171,9 @@ zfcp_erp_adapter_strategy_open_fsf_xconfig(struct zfcp_erp_action *erp_action)
sleep *= 2;
}
+ atomic_clear_mask(ZFCP_STATUS_ADAPTER_HOST_CON_INIT,
+ &adapter->status);
+
if (!atomic_test_mask(ZFCP_STATUS_ADAPTER_XCONFIG_OK,
&adapter->status)) {
ZFCP_LOG_INFO("error: exchange of configuration data for "
diff --git a/drivers/scsi/3w-9xxx.c b/drivers/scsi/3w-9xxx.c
index eb766c3af1c..76c09097175 100644
--- a/drivers/scsi/3w-9xxx.c
+++ b/drivers/scsi/3w-9xxx.c
@@ -1306,22 +1306,26 @@ static irqreturn_t twa_interrupt(int irq, void *dev_instance)
wake_up(&tw_dev->ioctl_wqueue);
}
} else {
+ struct scsi_cmnd *cmd;
+
+ cmd = tw_dev->srb[request_id];
+
twa_scsiop_execute_scsi_complete(tw_dev, request_id);
/* If no error command was a success */
if (error == 0) {
- tw_dev->srb[request_id]->result = (DID_OK << 16);
+ cmd->result = (DID_OK << 16);
}
/* If error, command failed */
if (error == 1) {
/* Ask for a host reset */
- tw_dev->srb[request_id]->result = (DID_OK << 16) | (CHECK_CONDITION << 1);
+ cmd->result = (DID_OK << 16) | (CHECK_CONDITION << 1);
}
/* Report residual bytes for single sgl */
- if ((tw_dev->srb[request_id]->use_sg <= 1) && (full_command_packet->command.newcommand.status == 0)) {
- if (full_command_packet->command.newcommand.sg_list[0].length < tw_dev->srb[request_id]->request_bufflen)
- tw_dev->srb[request_id]->resid = tw_dev->srb[request_id]->request_bufflen - full_command_packet->command.newcommand.sg_list[0].length;
+ if ((scsi_sg_count(cmd) <= 1) && (full_command_packet->command.newcommand.status == 0)) {
+ if (full_command_packet->command.newcommand.sg_list[0].length < scsi_bufflen(tw_dev->srb[request_id]))
+ scsi_set_resid(cmd, scsi_bufflen(cmd) - full_command_packet->command.newcommand.sg_list[0].length);
}
/* Now complete the io */
@@ -1384,52 +1388,20 @@ static int twa_map_scsi_sg_data(TW_Device_Extension *tw_dev, int request_id)
{
int use_sg;
struct scsi_cmnd *cmd = tw_dev->srb[request_id];
- struct pci_dev *pdev = tw_dev->tw_pci_dev;
- int retval = 0;
-
- if (cmd->use_sg == 0)
- goto out;
-
- use_sg = pci_map_sg(pdev, cmd->request_buffer, cmd->use_sg, DMA_BIDIRECTIONAL);
- if (use_sg == 0) {
+ use_sg = scsi_dma_map(cmd);
+ if (!use_sg)
+ return 0;
+ else if (use_sg < 0) {
TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1c, "Failed to map scatter gather list");
- goto out;
+ return 0;
}
cmd->SCp.phase = TW_PHASE_SGLIST;
cmd->SCp.have_data_in = use_sg;
- retval = use_sg;
-out:
- return retval;
-} /* End twa_map_scsi_sg_data() */
-
-/* This function will perform a pci-dma map for a single buffer */
-static dma_addr_t twa_map_scsi_single_data(TW_Device_Extension *tw_dev, int request_id)
-{
- dma_addr_t mapping;
- struct scsi_cmnd *cmd = tw_dev->srb[request_id];
- struct pci_dev *pdev = tw_dev->tw_pci_dev;
- dma_addr_t retval = 0;
-
- if (cmd->request_bufflen == 0) {
- retval = 0;
- goto out;
- }
-
- mapping = pci_map_single(pdev, cmd->request_buffer, cmd->request_bufflen, DMA_BIDIRECTIONAL);
-
- if (mapping == 0) {
- TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1d, "Failed to map page");
- goto out;
- }
- cmd->SCp.phase = TW_PHASE_SINGLE;
- cmd->SCp.have_data_in = mapping;
- retval = mapping;
-out:
- return retval;
-} /* End twa_map_scsi_single_data() */
+ return use_sg;
+} /* End twa_map_scsi_sg_data() */
/* This function will poll for a response interrupt of a request */
static int twa_poll_response(TW_Device_Extension *tw_dev, int request_id, int seconds)
@@ -1815,15 +1787,13 @@ static int twa_scsiop_execute_scsi(TW_Device_Extension *tw_dev, int request_id,
u32 num_sectors = 0x0;
int i, sg_count;
struct scsi_cmnd *srb = NULL;
- struct scatterlist *sglist = NULL;
- dma_addr_t buffaddr = 0x0;
+ struct scatterlist *sglist = NULL, *sg;
int retval = 1;
if (tw_dev->srb[request_id]) {
- if (tw_dev->srb[request_id]->request_buffer) {
- sglist = (struct scatterlist *)tw_dev->srb[request_id]->request_buffer;
- }
srb = tw_dev->srb[request_id];
+ if (scsi_sglist(srb))
+ sglist = scsi_sglist(srb);
}
/* Initialize command packet */
@@ -1856,32 +1826,12 @@ static int twa_scsiop_execute_scsi(TW_Device_Extension *tw_dev, int request_id,
if (!sglistarg) {
/* Map sglist from scsi layer to cmd packet */
- if (tw_dev->srb[request_id]->use_sg == 0) {
- if (tw_dev->srb[request_id]->request_bufflen < TW_MIN_SGL_LENGTH) {
- command_packet->sg_list[0].address = TW_CPU_TO_SGL(tw_dev->generic_buffer_phys[request_id]);
- command_packet->sg_list[0].length = cpu_to_le32(TW_MIN_SGL_LENGTH);
- if (tw_dev->srb[request_id]->sc_data_direction == DMA_TO_DEVICE || tw_dev->srb[request_id]->sc_data_direction == DMA_BIDIRECTIONAL)
- memcpy(tw_dev->generic_buffer_virt[request_id], tw_dev->srb[request_id]->request_buffer, tw_dev->srb[request_id]->request_bufflen);
- } else {
- buffaddr = twa_map_scsi_single_data(tw_dev, request_id);
- if (buffaddr == 0)
- goto out;
-
- command_packet->sg_list[0].address = TW_CPU_TO_SGL(buffaddr);
- command_packet->sg_list[0].length = cpu_to_le32(tw_dev->srb[request_id]->request_bufflen);
- }
- command_packet->sgl_entries__lunh = cpu_to_le16(TW_REQ_LUN_IN((srb->device->lun >> 4), 1));
- if (command_packet->sg_list[0].address & TW_CPU_TO_SGL(TW_ALIGNMENT_9000_SGL)) {
- TW_PRINTK(tw_dev->host, TW_DRIVER, 0x2d, "Found unaligned address during execute scsi");
- goto out;
- }
- }
-
- if (tw_dev->srb[request_id]->use_sg > 0) {
- if ((tw_dev->srb[request_id]->use_sg == 1) && (tw_dev->srb[request_id]->request_bufflen < TW_MIN_SGL_LENGTH)) {
- if (tw_dev->srb[request_id]->sc_data_direction == DMA_TO_DEVICE || tw_dev->srb[request_id]->sc_data_direction == DMA_BIDIRECTIONAL) {
- struct scatterlist *sg = (struct scatterlist *)tw_dev->srb[request_id]->request_buffer;
+ if (scsi_sg_count(srb)) {
+ if ((scsi_sg_count(srb) == 1) &&
+ (scsi_bufflen(srb) < TW_MIN_SGL_LENGTH)) {
+ if (srb->sc_data_direction == DMA_TO_DEVICE || srb->sc_data_direction == DMA_BIDIRECTIONAL) {
+ struct scatterlist *sg = scsi_sglist(srb);
char *buf = kmap_atomic(sg->page, KM_IRQ0) + sg->offset;
memcpy(tw_dev->generic_buffer_virt[request_id], buf, sg->length);
kunmap_atomic(buf - sg->offset, KM_IRQ0);
@@ -1893,16 +1843,16 @@ static int twa_scsiop_execute_scsi(TW_Device_Extension *tw_dev, int request_id,
if (sg_count == 0)
goto out;
- for (i = 0; i < sg_count; i++) {
- command_packet->sg_list[i].address = TW_CPU_TO_SGL(sg_dma_address(&sglist[i]));
- command_packet->sg_list[i].length = cpu_to_le32(sg_dma_len(&sglist[i]));
+ scsi_for_each_sg(srb, sg, sg_count, i) {
+ command_packet->sg_list[i].address = TW_CPU_TO_SGL(sg_dma_address(sg));
+ command_packet->sg_list[i].length = cpu_to_le32(sg_dma_len(sg));
if (command_packet->sg_list[i].address & TW_CPU_TO_SGL(TW_ALIGNMENT_9000_SGL)) {
TW_PRINTK(tw_dev->host, TW_DRIVER, 0x2e, "Found unaligned sgl address during execute scsi");
goto out;
}
}
}
- command_packet->sgl_entries__lunh = cpu_to_le16(TW_REQ_LUN_IN((srb->device->lun >> 4), tw_dev->srb[request_id]->use_sg));
+ command_packet->sgl_entries__lunh = cpu_to_le16(TW_REQ_LUN_IN((srb->device->lun >> 4), scsi_sg_count(tw_dev->srb[request_id])));
}
} else {
/* Internal cdb post */
@@ -1932,7 +1882,7 @@ static int twa_scsiop_execute_scsi(TW_Device_Extension *tw_dev, int request_id,
/* Update SG statistics */
if (srb) {
- tw_dev->sgl_entries = tw_dev->srb[request_id]->use_sg;
+ tw_dev->sgl_entries = scsi_sg_count(tw_dev->srb[request_id]);
if (tw_dev->sgl_entries > tw_dev->max_sgl_entries)
tw_dev->max_sgl_entries = tw_dev->sgl_entries;
}
@@ -1951,16 +1901,13 @@ out:
/* This function completes an execute scsi operation */
static void twa_scsiop_execute_scsi_complete(TW_Device_Extension *tw_dev, int request_id)
{
- if (tw_dev->srb[request_id]->request_bufflen < TW_MIN_SGL_LENGTH &&
- (tw_dev->srb[request_id]->sc_data_direction == DMA_FROM_DEVICE ||
- tw_dev->srb[request_id]->sc_data_direction == DMA_BIDIRECTIONAL)) {
- if (tw_dev->srb[request_id]->use_sg == 0) {
- memcpy(tw_dev->srb[request_id]->request_buffer,
- tw_dev->generic_buffer_virt[request_id],
- tw_dev->srb[request_id]->request_bufflen);
- }
- if (tw_dev->srb[request_id]->use_sg == 1) {
- struct scatterlist *sg = (struct scatterlist *)tw_dev->srb[request_id]->request_buffer;
+ struct scsi_cmnd *cmd = tw_dev->srb[request_id];
+
+ if (scsi_bufflen(cmd) < TW_MIN_SGL_LENGTH &&
+ (cmd->sc_data_direction == DMA_FROM_DEVICE ||
+ cmd->sc_data_direction == DMA_BIDIRECTIONAL)) {
+ if (scsi_sg_count(cmd) == 1) {
+ struct scatterlist *sg = scsi_sglist(tw_dev->srb[request_id]);
char *buf;
unsigned long flags = 0;
local_irq_save(flags);
@@ -2017,16 +1964,8 @@ static char *twa_string_lookup(twa_message_type *table, unsigned int code)
static void twa_unmap_scsi_data(TW_Device_Extension *tw_dev, int request_id)
{
struct scsi_cmnd *cmd = tw_dev->srb[request_id];
- struct pci_dev *pdev = tw_dev->tw_pci_dev;
- switch(cmd->SCp.phase) {
- case TW_PHASE_SINGLE:
- pci_unmap_single(pdev, cmd->SCp.have_data_in, cmd->request_bufflen, DMA_BIDIRECTIONAL);
- break;
- case TW_PHASE_SGLIST:
- pci_unmap_sg(pdev, cmd->request_buffer, cmd->use_sg, DMA_BIDIRECTIONAL);
- break;
- }
+ scsi_dma_unmap(cmd);
} /* End twa_unmap_scsi_data() */
/* scsi_host_template initializer */
diff --git a/drivers/scsi/3w-xxxx.c b/drivers/scsi/3w-xxxx.c
index 656bdb1352d..c7995fc216e 100644
--- a/drivers/scsi/3w-xxxx.c
+++ b/drivers/scsi/3w-xxxx.c
@@ -1273,57 +1273,24 @@ static int tw_map_scsi_sg_data(struct pci_dev *pdev, struct scsi_cmnd *cmd)
int use_sg;
dprintk(KERN_WARNING "3w-xxxx: tw_map_scsi_sg_data()\n");
-
- if (cmd->use_sg == 0)
- return 0;
- use_sg = pci_map_sg(pdev, cmd->request_buffer, cmd->use_sg, DMA_BIDIRECTIONAL);
-
- if (use_sg == 0) {
+ use_sg = scsi_dma_map(cmd);
+ if (use_sg < 0) {
printk(KERN_WARNING "3w-xxxx: tw_map_scsi_sg_data(): pci_map_sg() failed.\n");
return 0;
}
cmd->SCp.phase = TW_PHASE_SGLIST;
cmd->SCp.have_data_in = use_sg;
-
+
return use_sg;
} /* End tw_map_scsi_sg_data() */
-static u32 tw_map_scsi_single_data(struct pci_dev *pdev, struct scsi_cmnd *cmd)
-{
- dma_addr_t mapping;
-
- dprintk(KERN_WARNING "3w-xxxx: tw_map_scsi_single_data()\n");
-
- if (cmd->request_bufflen == 0)
- return 0;
-
- mapping = pci_map_page(pdev, virt_to_page(cmd->request_buffer), offset_in_page(cmd->request_buffer), cmd->request_bufflen, DMA_BIDIRECTIONAL);
-
- if (mapping == 0) {
- printk(KERN_WARNING "3w-xxxx: tw_map_scsi_single_data(): pci_map_page() failed.\n");
- return 0;
- }
-
- cmd->SCp.phase = TW_PHASE_SINGLE;
- cmd->SCp.have_data_in = mapping;
-
- return mapping;
-} /* End tw_map_scsi_single_data() */
-
static void tw_unmap_scsi_data(struct pci_dev *pdev, struct scsi_cmnd *cmd)
{
dprintk(KERN_WARNING "3w-xxxx: tw_unmap_scsi_data()\n");
- switch(cmd->SCp.phase) {
- case TW_PHASE_SINGLE:
- pci_unmap_page(pdev, cmd->SCp.have_data_in, cmd->request_bufflen, DMA_BIDIRECTIONAL);
- break;
- case TW_PHASE_SGLIST:
- pci_unmap_sg(pdev, cmd->request_buffer, cmd->use_sg, DMA_BIDIRECTIONAL);
- break;
- }
+ scsi_dma_unmap(cmd);
} /* End tw_unmap_scsi_data() */
/* This function will reset a device extension */
@@ -1499,27 +1466,16 @@ static void tw_transfer_internal(TW_Device_Extension *tw_dev, int request_id,
void *buf;
unsigned int transfer_len;
unsigned long flags = 0;
+ struct scatterlist *sg = scsi_sglist(cmd);
- if (cmd->use_sg) {
- struct scatterlist *sg =
- (struct scatterlist *)cmd->request_buffer;
- local_irq_save(flags);
- buf = kmap_atomic(sg->page, KM_IRQ0) + sg->offset;
- transfer_len = min(sg->length, len);
- } else {
- buf = cmd->request_buffer;
- transfer_len = min(cmd->request_bufflen, len);
- }
+ local_irq_save(flags);
+ buf = kmap_atomic(sg->page, KM_IRQ0) + sg->offset;
+ transfer_len = min(sg->length, len);
memcpy(buf, data, transfer_len);
-
- if (cmd->use_sg) {
- struct scatterlist *sg;
- sg = (struct scatterlist *)cmd->request_buffer;
- kunmap_atomic(buf - sg->offset, KM_IRQ0);
- local_irq_restore(flags);
- }
+ kunmap_atomic(buf - sg->offset, KM_IRQ0);
+ local_irq_restore(flags);
}
/* This function is called by the isr to complete an inquiry command */
@@ -1764,19 +1720,20 @@ static int tw_scsiop_read_write(TW_Device_Extension *tw_dev, int request_id)
{
TW_Command *command_packet;
unsigned long command_que_value;
- u32 lba = 0x0, num_sectors = 0x0, buffaddr = 0x0;
+ u32 lba = 0x0, num_sectors = 0x0;
int i, use_sg;
struct scsi_cmnd *srb;
- struct scatterlist *sglist;
+ struct scatterlist *sglist, *sg;
dprintk(KERN_NOTICE "3w-xxxx: tw_scsiop_read_write()\n");
- if (tw_dev->srb[request_id]->request_buffer == NULL) {
+ srb = tw_dev->srb[request_id];
+
+ sglist = scsi_sglist(srb);
+ if (!sglist) {
printk(KERN_WARNING "3w-xxxx: tw_scsiop_read_write(): Request buffer NULL.\n");
return 1;
}
- sglist = (struct scatterlist *)tw_dev->srb[request_id]->request_buffer;
- srb = tw_dev->srb[request_id];
/* Initialize command packet */
command_packet = (TW_Command *)tw_dev->command_packet_virtual_address[request_id];
@@ -1819,33 +1776,18 @@ static int tw_scsiop_read_write(TW_Device_Extension *tw_dev, int request_id)
command_packet->byte8.io.lba = lba;
command_packet->byte6.block_count = num_sectors;
- /* Do this if there are no sg list entries */
- if (tw_dev->srb[request_id]->use_sg == 0) {
- dprintk(KERN_NOTICE "3w-xxxx: tw_scsiop_read_write(): SG = 0\n");
- buffaddr = tw_map_scsi_single_data(tw_dev->tw_pci_dev, tw_dev->srb[request_id]);
- if (buffaddr == 0)
- return 1;
+ use_sg = tw_map_scsi_sg_data(tw_dev->tw_pci_dev, tw_dev->srb[request_id]);
+ if (!use_sg)
+ return 1;
- command_packet->byte8.io.sgl[0].address = buffaddr;
- command_packet->byte8.io.sgl[0].length = tw_dev->srb[request_id]->request_bufflen;
+ scsi_for_each_sg(tw_dev->srb[request_id], sg, use_sg, i) {
+ command_packet->byte8.io.sgl[i].address = sg_dma_address(sg);
+ command_packet->byte8.io.sgl[i].length = sg_dma_len(sg);
command_packet->size+=2;
}
- /* Do this if we have multiple sg list entries */
- if (tw_dev->srb[request_id]->use_sg > 0) {
- use_sg = tw_map_scsi_sg_data(tw_dev->tw_pci_dev, tw_dev->srb[request_id]);
- if (use_sg == 0)
- return 1;
-
- for (i=0;i<use_sg; i++) {
- command_packet->byte8.io.sgl[i].address = sg_dma_address(&sglist[i]);
- command_packet->byte8.io.sgl[i].length = sg_dma_len(&sglist[i]);
- command_packet->size+=2;
- }
- }
-
/* Update SG statistics */
- tw_dev->sgl_entries = tw_dev->srb[request_id]->use_sg;
+ tw_dev->sgl_entries = scsi_sg_count(tw_dev->srb[request_id]);
if (tw_dev->sgl_entries > tw_dev->max_sgl_entries)
tw_dev->max_sgl_entries = tw_dev->sgl_entries;
diff --git a/drivers/scsi/53c700.c b/drivers/scsi/53c700.c
index cb02656eb54..71ff3fbfce1 100644
--- a/drivers/scsi/53c700.c
+++ b/drivers/scsi/53c700.c
@@ -267,8 +267,6 @@ NCR_700_offset_period_to_sxfer(struct NCR_700_Host_Parameters *hostdata,
offset = max_offset;
}
if(XFERP < min_xferp) {
- printk(KERN_WARNING "53c700: XFERP %d is less than minium, setting to %d\n",
- XFERP, min_xferp);
XFERP = min_xferp;
}
return (offset & 0x0f) | (XFERP & 0x07)<<4;
@@ -585,16 +583,8 @@ NCR_700_unmap(struct NCR_700_Host_Parameters *hostdata, struct scsi_cmnd *SCp,
struct NCR_700_command_slot *slot)
{
if(SCp->sc_data_direction != DMA_NONE &&
- SCp->sc_data_direction != DMA_BIDIRECTIONAL) {
- if(SCp->use_sg) {
- dma_unmap_sg(hostdata->dev, SCp->request_buffer,
- SCp->use_sg, SCp->sc_data_direction);
- } else {
- dma_unmap_single(hostdata->dev, slot->dma_handle,
- SCp->request_bufflen,
- SCp->sc_data_direction);
- }
- }
+ SCp->sc_data_direction != DMA_BIDIRECTIONAL)
+ scsi_dma_unmap(SCp);
}
STATIC inline void
@@ -661,7 +651,6 @@ NCR_700_chip_setup(struct Scsi_Host *host)
{
struct NCR_700_Host_Parameters *hostdata =
(struct NCR_700_Host_Parameters *)host->hostdata[0];
- __u32 dcntl_extra = 0;
__u8 min_period;
__u8 min_xferp = (hostdata->chip710 ? NCR_710_MIN_XFERP : NCR_700_MIN_XFERP);
@@ -686,13 +675,14 @@ NCR_700_chip_setup(struct Scsi_Host *host)
burst_disable = BURST_DISABLE;
break;
}
- dcntl_extra = COMPAT_700_MODE;
+ hostdata->dcntl_extra |= COMPAT_700_MODE;
- NCR_700_writeb(dcntl_extra, host, DCNTL_REG);
+ NCR_700_writeb(hostdata->dcntl_extra, host, DCNTL_REG);
NCR_700_writeb(burst_length | hostdata->dmode_extra,
host, DMODE_710_REG);
- NCR_700_writeb(burst_disable | (hostdata->differential ?
- DIFF : 0), host, CTEST7_REG);
+ NCR_700_writeb(burst_disable | hostdata->ctest7_extra |
+ (hostdata->differential ? DIFF : 0),
+ host, CTEST7_REG);
NCR_700_writeb(BTB_TIMER_DISABLE, host, CTEST0_REG);
NCR_700_writeb(FULL_ARBITRATION | ENABLE_PARITY | PARITY
| AUTO_ATN, host, SCNTL0_REG);
@@ -727,13 +717,13 @@ NCR_700_chip_setup(struct Scsi_Host *host)
* of spec: sync divider 2, async divider 3 */
DEBUG(("53c700: sync 2 async 3\n"));
NCR_700_writeb(SYNC_DIV_2_0, host, SBCL_REG);
- NCR_700_writeb(ASYNC_DIV_3_0 | dcntl_extra, host, DCNTL_REG);
+ NCR_700_writeb(ASYNC_DIV_3_0 | hostdata->dcntl_extra, host, DCNTL_REG);
hostdata->sync_clock = hostdata->clock/2;
} else if(hostdata->clock > 50 && hostdata->clock <= 75) {
/* sync divider 1.5, async divider 3 */
DEBUG(("53c700: sync 1.5 async 3\n"));
NCR_700_writeb(SYNC_DIV_1_5, host, SBCL_REG);
- NCR_700_writeb(ASYNC_DIV_3_0 | dcntl_extra, host, DCNTL_REG);
+ NCR_700_writeb(ASYNC_DIV_3_0 | hostdata->dcntl_extra, host, DCNTL_REG);
hostdata->sync_clock = hostdata->clock*2;
hostdata->sync_clock /= 3;
@@ -741,18 +731,18 @@ NCR_700_chip_setup(struct Scsi_Host *host)
/* sync divider 1, async divider 2 */
DEBUG(("53c700: sync 1 async 2\n"));
NCR_700_writeb(SYNC_DIV_1_0, host, SBCL_REG);
- NCR_700_writeb(ASYNC_DIV_2_0 | dcntl_extra, host, DCNTL_REG);
+ NCR_700_writeb(ASYNC_DIV_2_0 | hostdata->dcntl_extra, host, DCNTL_REG);
hostdata->sync_clock = hostdata->clock;
} else if(hostdata->clock > 25 && hostdata->clock <=37) {
/* sync divider 1, async divider 1.5 */
DEBUG(("53c700: sync 1 async 1.5\n"));
NCR_700_writeb(SYNC_DIV_1_0, host, SBCL_REG);
- NCR_700_writeb(ASYNC_DIV_1_5 | dcntl_extra, host, DCNTL_REG);
+ NCR_700_writeb(ASYNC_DIV_1_5 | hostdata->dcntl_extra, host, DCNTL_REG);
hostdata->sync_clock = hostdata->clock;
} else {
DEBUG(("53c700: sync 1 async 1\n"));
NCR_700_writeb(SYNC_DIV_1_0, host, SBCL_REG);
- NCR_700_writeb(ASYNC_DIV_1_0 | dcntl_extra, host, DCNTL_REG);
+ NCR_700_writeb(ASYNC_DIV_1_0 | hostdata->dcntl_extra, host, DCNTL_REG);
/* sync divider 1, async divider 1 */
hostdata->sync_clock = hostdata->clock;
}
@@ -1263,14 +1253,13 @@ process_script_interrupt(__u32 dsps, __u32 dsp, struct scsi_cmnd *SCp,
host->host_no, pun, lun, NCR_700_condition[i],
NCR_700_phase[j], dsp - hostdata->pScript);
if(SCp != NULL) {
- scsi_print_command(SCp);
+ struct scatterlist *sg;
- if(SCp->use_sg) {
- for(i = 0; i < SCp->use_sg + 1; i++) {
- printk(KERN_INFO " SG[%d].length = %d, move_insn=%08x, addr %08x\n", i, ((struct scatterlist *)SCp->request_buffer)[i].length, ((struct NCR_700_command_slot *)SCp->host_scribble)->SG[i].ins, ((struct NCR_700_command_slot *)SCp->host_scribble)->SG[i].pAddr);
- }
+ scsi_print_command(SCp);
+ scsi_for_each_sg(SCp, sg, scsi_sg_count(SCp) + 1, i) {
+ printk(KERN_INFO " SG[%d].length = %d, move_insn=%08x, addr %08x\n", i, sg->length, ((struct NCR_700_command_slot *)SCp->host_scribble)->SG[i].ins, ((struct NCR_700_command_slot *)SCp->host_scribble)->SG[i].pAddr);
}
- }
+ }
NCR_700_internal_bus_reset(host);
} else if((dsps & 0xfffff000) == A_DEBUG_INTERRUPT) {
printk(KERN_NOTICE "scsi%d (%d:%d) DEBUG INTERRUPT %d AT %08x[%04x], continuing\n",
@@ -1844,8 +1833,8 @@ NCR_700_queuecommand(struct scsi_cmnd *SCp, void (*done)(struct scsi_cmnd *))
}
/* sanity check: some of the commands generated by the mid-layer
* have an eccentric idea of their sc_data_direction */
- if(!SCp->use_sg && !SCp->request_bufflen
- && SCp->sc_data_direction != DMA_NONE) {
+ if(!scsi_sg_count(SCp) && !scsi_bufflen(SCp) &&
+ SCp->sc_data_direction != DMA_NONE) {
#ifdef NCR_700_DEBUG
printk("53c700: Command");
scsi_print_command(SCp);
@@ -1887,31 +1876,15 @@ NCR_700_queuecommand(struct scsi_cmnd *SCp, void (*done)(struct scsi_cmnd *))
int i;
int sg_count;
dma_addr_t vPtr = 0;
+ struct scatterlist *sg;
__u32 count = 0;
- if(SCp->use_sg) {
- sg_count = dma_map_sg(hostdata->dev,
- SCp->request_buffer, SCp->use_sg,
- direction);
- } else {
- vPtr = dma_map_single(hostdata->dev,
- SCp->request_buffer,
- SCp->request_bufflen,
- direction);
- count = SCp->request_bufflen;
- slot->dma_handle = vPtr;
- sg_count = 1;
- }
-
+ sg_count = scsi_dma_map(SCp);
+ BUG_ON(sg_count < 0);
- for(i = 0; i < sg_count; i++) {
-
- if(SCp->use_sg) {
- struct scatterlist *sg = SCp->request_buffer;
-
- vPtr = sg_dma_address(&sg[i]);
- count = sg_dma_len(&sg[i]);
- }
+ scsi_for_each_sg(SCp, sg, sg_count, i) {
+ vPtr = sg_dma_address(sg);
+ count = sg_dma_len(sg);
slot->SG[i].ins = bS_to_host(move_ins | count);
DEBUG((" scatter block %d: move %d[%08x] from 0x%lx\n",
diff --git a/drivers/scsi/53c700.h b/drivers/scsi/53c700.h
index 841e1bb27d5..e06bdfeab42 100644
--- a/drivers/scsi/53c700.h
+++ b/drivers/scsi/53c700.h
@@ -177,6 +177,7 @@ struct NCR_700_command_slot {
__u8 state;
#define NCR_700_FLAG_AUTOSENSE 0x01
__u8 flags;
+ __u8 pad1[2]; /* Needed for m68k where min alignment is 2 bytes */
int tag;
__u32 resume_offset;
struct scsi_cmnd *cmnd;
@@ -196,6 +197,8 @@ struct NCR_700_Host_Parameters {
void __iomem *base; /* the base for the port (copied to host) */
struct device *dev;
__u32 dmode_extra; /* adjustable bus settings */
+ __u32 dcntl_extra; /* adjustable bus settings */
+ __u32 ctest7_extra; /* adjustable bus settings */
__u32 differential:1; /* if we are differential */
#ifdef CONFIG_53C700_LE_ON_BE
/* This option is for HP only. Set it if your chip is wired for
@@ -352,6 +355,7 @@ struct NCR_700_Host_Parameters {
#define SEL_TIMEOUT_DISABLE 0x10 /* 710 only */
#define DFP 0x08
#define EVP 0x04
+#define CTEST7_TT1 0x02
#define DIFF 0x01
#define CTEST6_REG 0x1A
#define TEMP_REG 0x1C
@@ -385,6 +389,7 @@ struct NCR_700_Host_Parameters {
#define SOFTWARE_RESET 0x01
#define COMPAT_700_MODE 0x01
#define SCRPTS_16BITS 0x20
+#define EA_710 0x20
#define ASYNC_DIV_2_0 0x00
#define ASYNC_DIV_1_5 0x40
#define ASYNC_DIV_1_0 0x80
diff --git a/drivers/scsi/53c7xx.c b/drivers/scsi/53c7xx.c
deleted file mode 100644
index 93b41f45638..00000000000
--- a/drivers/scsi/53c7xx.c
+++ /dev/null
@@ -1,6102 +0,0 @@
-/*
- * 53c710 driver. Modified from Drew Eckhardts driver
- * for 53c810 by Richard Hirst [richard@sleepie.demon.co.uk]
- * Check out PERM_OPTIONS and EXPECTED_CLOCK, which may be defined in the
- * relevant machine specific file (eg. mvme16x.[ch], amiga7xx.[ch]).
- * There are also currently some defines at the top of 53c7xx.scr.
- * The chip type is #defined in script_asm.pl, as well as the Makefile.
- * Host scsi ID expected to be 7 - see NCR53c7x0_init().
- *
- * I have removed the PCI code and some of the 53c8xx specific code -
- * simply to make this file smaller and easier to manage.
- *
- * MVME16x issues:
- * Problems trying to read any chip registers in NCR53c7x0_init(), as they
- * may never have been set by 16xBug (eg. If kernel has come in over tftp).
- */
-
-/*
- * Adapted for Linux/m68k Amiga platforms for the A4000T/A4091 and
- * WarpEngine SCSI controllers.
- * By Alan Hourihane <alanh@fairlite.demon.co.uk>
- * Thanks to Richard Hirst for making it possible with the MVME additions
- */
-
-/*
- * 53c710 rev 0 doesn't support add with carry. Rev 1 and 2 does. To
- * overcome this problem you can define FORCE_DSA_ALIGNMENT, which ensures
- * that the DSA address is always xxxxxx00. If disconnection is not allowed,
- * then the script only ever tries to add small (< 256) positive offsets to
- * DSA, so lack of carry isn't a problem. FORCE_DSA_ALIGNMENT can, of course,
- * be defined for all chip revisions at a small cost in memory usage.
- */
-
-#define FORCE_DSA_ALIGNMENT
-
-/*
- * Selection timer does not always work on the 53c710, depending on the
- * timing at the last disconnect, if this is a problem for you, try
- * using validids as detailed below.
- *
- * Options for the NCR7xx driver
- *
- * noasync:0 - disables sync and asynchronous negotiation
- * nosync:0 - disables synchronous negotiation (does async)
- * nodisconnect:0 - disables disconnection
- * validids:0x?? - Bitmask field that disallows certain ID's.
- * - e.g. 0x03 allows ID 0,1
- * - 0x1F allows ID 0,1,2,3,4
- * opthi:n - replace top word of options with 'n'
- * optlo:n - replace bottom word of options with 'n'
- * - ALWAYS SPECIFY opthi THEN optlo <<<<<<<<<<
- */
-
-/*
- * PERM_OPTIONS are driver options which will be enabled for all NCR boards
- * in the system at driver initialization time.
- *
- * Don't THINK about touching these in PERM_OPTIONS :
- * OPTION_MEMORY_MAPPED
- * 680x0 doesn't have an IO map!
- *
- * OPTION_DEBUG_TEST1
- * Test 1 does bus mastering and interrupt tests, which will help weed
- * out brain damaged main boards.
- *
- * Other PERM_OPTIONS settings are listed below. Note the actual options
- * required are set in the relevant file (mvme16x.c, amiga7xx.c, etc):
- *
- * OPTION_NO_ASYNC
- * Don't negotiate for asynchronous transfers on the first command
- * when OPTION_ALWAYS_SYNCHRONOUS is set. Useful for dain bramaged
- * devices which do something bad rather than sending a MESSAGE
- * REJECT back to us like they should if they can't cope.
- *
- * OPTION_SYNCHRONOUS
- * Enable support for synchronous transfers. Target negotiated
- * synchronous transfers will be responded to. To initiate
- * a synchronous transfer request, call
- *
- * request_synchronous (hostno, target)
- *
- * from within KGDB.
- *
- * OPTION_ALWAYS_SYNCHRONOUS
- * Negotiate for synchronous transfers with every target after
- * driver initialization or a SCSI bus reset. This is a bit dangerous,
- * since there are some dain bramaged SCSI devices which will accept
- * SDTR messages but keep talking asynchronously.
- *
- * OPTION_DISCONNECT
- * Enable support for disconnect/reconnect. To change the
- * default setting on a given host adapter, call
- *
- * request_disconnect (hostno, allow)
- *
- * where allow is non-zero to allow, 0 to disallow.
- *
- * If you really want to run 10MHz FAST SCSI-II transfers, you should
- * know that the NCR driver currently ignores parity information. Most
- * systems do 5MHz SCSI fine. I've seen a lot that have problems faster
- * than 8MHz. To play it safe, we only request 5MHz transfers.
- *
- * If you'd rather get 10MHz transfers, edit sdtr_message and change
- * the fourth byte from 50 to 25.
- */
-
-/*
- * Sponsored by
- * iX Multiuser Multitasking Magazine
- * Hannover, Germany
- * hm@ix.de
- *
- * Copyright 1993, 1994, 1995 Drew Eckhardt
- * Visionary Computing
- * (Unix and Linux consulting and custom programming)
- * drew@PoohSticks.ORG
- * +1 (303) 786-7975
- *
- * TolerANT and SCSI SCRIPTS are registered trademarks of NCR Corporation.
- *
- * For more information, please consult
- *
- * NCR53C810
- * SCSI I/O Processor
- * Programmer's Guide
- *
- * NCR 53C810
- * PCI-SCSI I/O Processor
- * Data Manual
- *
- * NCR 53C810/53C820
- * PCI-SCSI I/O Processor Design In Guide
- *
- * For literature on Symbios Logic Inc. formerly NCR, SCSI,
- * and Communication products please call (800) 334-5454 or
- * (719) 536-3300.
- *
- * PCI BIOS Specification Revision
- * PCI Local Bus Specification
- * PCI System Design Guide
- *
- * PCI Special Interest Group
- * M/S HF3-15A
- * 5200 N.E. Elam Young Parkway
- * Hillsboro, Oregon 97124-6497
- * +1 (503) 696-2000
- * +1 (800) 433-5177
- */
-
-/*
- * Design issues :
- * The cumulative latency needed to propagate a read/write request
- * through the file system, buffer cache, driver stacks, SCSI host, and
- * SCSI device is ultimately the limiting factor in throughput once we
- * have a sufficiently fast host adapter.
- *
- * So, to maximize performance we want to keep the ratio of latency to data
- * transfer time to a minimum by
- * 1. Minimizing the total number of commands sent (typical command latency
- * including drive and bus mastering host overhead is as high as 4.5ms)
- * to transfer a given amount of data.
- *
- * This is accomplished by placing no arbitrary limit on the number
- * of scatter/gather buffers supported, since we can transfer 1K
- * per scatter/gather buffer without Eric's cluster patches,
- * 4K with.
- *
- * 2. Minimizing the number of fatal interrupts serviced, since
- * fatal interrupts halt the SCSI I/O processor. Basically,
- * this means offloading the practical maximum amount of processing
- * to the SCSI chip.
- *
- * On the NCR53c810/820/720, this is accomplished by using
- * interrupt-on-the-fly signals when commands complete,
- * and only handling fatal errors and SDTR / WDTR messages
- * in the host code.
- *
- * On the NCR53c710, interrupts are generated as on the NCR53c8x0,
- * only the lack of a interrupt-on-the-fly facility complicates
- * things. Also, SCSI ID registers and commands are
- * bit fielded rather than binary encoded.
- *
- * On the NCR53c700 and NCR53c700-66, operations that are done via
- * indirect, table mode on the more advanced chips must be
- * replaced by calls through a jump table which
- * acts as a surrogate for the DSA. Unfortunately, this
- * will mean that we must service an interrupt for each
- * disconnect/reconnect.
- *
- * 3. Eliminating latency by pipelining operations at the different levels.
- *
- * This driver allows a configurable number of commands to be enqueued
- * for each target/lun combination (experimentally, I have discovered
- * that two seems to work best) and will ultimately allow for
- * SCSI-II tagged queuing.
- *
- *
- * Architecture :
- * This driver is built around a Linux queue of commands waiting to
- * be executed, and a shared Linux/NCR array of commands to start. Commands
- * are transferred to the array by the run_process_issue_queue() function
- * which is called whenever a command completes.
- *
- * As commands are completed, the interrupt routine is triggered,
- * looks for commands in the linked list of completed commands with
- * valid status, removes these commands from a list of running commands,
- * calls the done routine, and flags their target/luns as not busy.
- *
- * Due to limitations in the intelligence of the NCR chips, certain
- * concessions are made. In many cases, it is easier to dynamically
- * generate/fix-up code rather than calculate on the NCR at run time.
- * So, code is generated or fixed up for
- *
- * - Handling data transfers, using a variable number of MOVE instructions
- * interspersed with CALL MSG_IN, WHEN MSGIN instructions.
- *
- * The DATAIN and DATAOUT routines are separate, so that an incorrect
- * direction can be trapped, and space isn't wasted.
- *
- * It may turn out that we're better off using some sort
- * of table indirect instruction in a loop with a variable
- * sized table on the NCR53c710 and newer chips.
- *
- * - Checking for reselection (NCR53c710 and better)
- *
- * - Handling the details of SCSI context switches (NCR53c710 and better),
- * such as reprogramming appropriate synchronous parameters,
- * removing the dsa structure from the NCR's queue of outstanding
- * commands, etc.
- *
- */
-
-#include <linux/module.h>
-
-
-#include <linux/types.h>
-#include <asm/setup.h>
-#include <asm/dma.h>
-#include <asm/io.h>
-#include <asm/system.h>
-#include <linux/delay.h>
-#include <linux/signal.h>
-#include <linux/sched.h>
-#include <linux/errno.h>
-#include <linux/string.h>
-#include <linux/slab.h>
-#include <linux/vmalloc.h>
-#include <linux/mm.h>
-#include <linux/ioport.h>
-#include <linux/time.h>
-#include <linux/blkdev.h>
-#include <linux/spinlock.h>
-#include <linux/interrupt.h>
-#include <asm/pgtable.h>
-
-#ifdef CONFIG_AMIGA
-#include <asm/amigahw.h>
-#include <asm/amigaints.h>
-#include <asm/irq.h>
-
-#define BIG_ENDIAN
-#define NO_IO_SPACE
-#endif
-
-#ifdef CONFIG_MVME16x
-#include <asm/mvme16xhw.h>
-
-#define BIG_ENDIAN
-#define NO_IO_SPACE
-#define VALID_IDS
-#endif
-
-#ifdef CONFIG_BVME6000
-#include <asm/bvme6000hw.h>
-
-#define BIG_ENDIAN
-#define NO_IO_SPACE
-#define VALID_IDS
-#endif
-
-#include "scsi.h"
-#include <scsi/scsi_dbg.h>
-#include <scsi/scsi_host.h>
-#include <scsi/scsi_transport_spi.h>
-#include "53c7xx.h"
-#include <linux/stat.h>
-#include <linux/stddef.h>
-
-#ifdef NO_IO_SPACE
-/*
- * The following make the definitions in 53c7xx.h (write8, etc) smaller,
- * we don't have separate i/o space anyway.
- */
-#undef inb
-#undef outb
-#undef inw
-#undef outw
-#undef inl
-#undef outl
-#define inb(x) 1
-#define inw(x) 1
-#define inl(x) 1
-#define outb(x,y) 1
-#define outw(x,y) 1
-#define outl(x,y) 1
-#endif
-
-static int check_address (unsigned long addr, int size);
-static void dump_events (struct Scsi_Host *host, int count);
-static Scsi_Cmnd * return_outstanding_commands (struct Scsi_Host *host,
- int free, int issue);
-static void hard_reset (struct Scsi_Host *host);
-static void ncr_scsi_reset (struct Scsi_Host *host);
-static void print_lots (struct Scsi_Host *host);
-static void set_synchronous (struct Scsi_Host *host, int target, int sxfer,
- int scntl3, int now_connected);
-static int datapath_residual (struct Scsi_Host *host);
-static const char * sbcl_to_phase (int sbcl);
-static void print_progress (Scsi_Cmnd *cmd);
-static void print_queues (struct Scsi_Host *host);
-static void process_issue_queue (unsigned long flags);
-static int shutdown (struct Scsi_Host *host);
-static void abnormal_finished (struct NCR53c7x0_cmd *cmd, int result);
-static int disable (struct Scsi_Host *host);
-static int NCR53c7xx_run_tests (struct Scsi_Host *host);
-static irqreturn_t NCR53c7x0_intr(int irq, void *dev_id);
-static void NCR53c7x0_intfly (struct Scsi_Host *host);
-static int ncr_halt (struct Scsi_Host *host);
-static void intr_phase_mismatch (struct Scsi_Host *host, struct NCR53c7x0_cmd
- *cmd);
-static void intr_dma (struct Scsi_Host *host, struct NCR53c7x0_cmd *cmd);
-static void print_dsa (struct Scsi_Host *host, u32 *dsa,
- const char *prefix);
-static int print_insn (struct Scsi_Host *host, const u32 *insn,
- const char *prefix, int kernel);
-
-static void NCR53c7xx_dsa_fixup (struct NCR53c7x0_cmd *cmd);
-static void NCR53c7x0_init_fixup (struct Scsi_Host *host);
-static int NCR53c7x0_dstat_sir_intr (struct Scsi_Host *host, struct
- NCR53c7x0_cmd *cmd);
-static void NCR53c7x0_soft_reset (struct Scsi_Host *host);
-
-/* Size of event list (per host adapter) */
-static int track_events = 0;
-static struct Scsi_Host *first_host = NULL; /* Head of list of NCR boards */
-static struct scsi_host_template *the_template = NULL;
-
-/* NCR53c710 script handling code */
-
-#include "53c7xx_d.h"
-#ifdef A_int_debug_sync
-#define DEBUG_SYNC_INTR A_int_debug_sync
-#endif
-int NCR53c7xx_script_len = sizeof (SCRIPT);
-int NCR53c7xx_dsa_len = A_dsa_end + Ent_dsa_zero - Ent_dsa_code_template;
-#ifdef FORCE_DSA_ALIGNMENT
-int CmdPageStart = (0 - Ent_dsa_zero - sizeof(struct NCR53c7x0_cmd)) & 0xff;
-#endif
-
-static char *setup_strings[] =
- {"","","","","","","",""};
-
-#define MAX_SETUP_STRINGS ARRAY_SIZE(setup_strings)
-#define SETUP_BUFFER_SIZE 200
-static char setup_buffer[SETUP_BUFFER_SIZE];
-static char setup_used[MAX_SETUP_STRINGS];
-
-void ncr53c7xx_setup (char *str, int *ints)
-{
- int i;
- char *p1, *p2;
-
- p1 = setup_buffer;
- *p1 = '\0';
- if (str)
- strncpy(p1, str, SETUP_BUFFER_SIZE - strlen(setup_buffer));
- setup_buffer[SETUP_BUFFER_SIZE - 1] = '\0';
- p1 = setup_buffer;
- i = 0;
- while (*p1 && (i < MAX_SETUP_STRINGS)) {
- p2 = strchr(p1, ',');
- if (p2) {
- *p2 = '\0';
- if (p1 != p2)
- setup_strings[i] = p1;
- p1 = p2 + 1;
- i++;
- }
- else {
- setup_strings[i] = p1;
- break;
- }
- }
- for (i=0; i<MAX_SETUP_STRINGS; i++)
- setup_used[i] = 0;
-}
-
-
-/* check_setup_strings() returns index if key found, 0 if not
- */
-
-static int check_setup_strings(char *key, int *flags, int *val, char *buf)
-{
-int x;
-char *cp;
-
- for (x=0; x<MAX_SETUP_STRINGS; x++) {
- if (setup_used[x])
- continue;
- if (!strncmp(setup_strings[x], key, strlen(key)))
- break;
- if (!strncmp(setup_strings[x], "next", strlen("next")))
- return 0;
- }
- if (x == MAX_SETUP_STRINGS)
- return 0;
- setup_used[x] = 1;
- cp = setup_strings[x] + strlen(key);
- *val = -1;
- if (*cp != ':')
- return ++x;
- cp++;
- if ((*cp >= '0') && (*cp <= '9')) {
- *val = simple_strtoul(cp,NULL,0);
- }
- return ++x;
-}
-
-
-
-/*
- * KNOWN BUGS :
- * - There is some sort of conflict when the PPP driver is compiled with
- * support for 16 channels?
- *
- * - On systems which predate the 1.3.x initialization order change,
- * the NCR driver will cause Cannot get free page messages to appear.
- * These are harmless, but I don't know of an easy way to avoid them.
- *
- * - With OPTION_DISCONNECT, on two systems under unknown circumstances,
- * we get a PHASE MISMATCH with DSA set to zero (suggests that we
- * are occurring somewhere in the reselection code) where
- * DSP=some value DCMD|DBC=same value.
- *
- * Closer inspection suggests that we may be trying to execute
- * some portion of the DSA?
- * scsi0 : handling residual transfer (+ 0 bytes from DMA FIFO)
- * scsi0 : handling residual transfer (+ 0 bytes from DMA FIFO)
- * scsi0 : no current command : unexpected phase MSGIN.
- * DSP=0x1c46cc, DCMD|DBC=0x1c46ac, DSA=0x0
- * DSPS=0x0, TEMP=0x1c3e70, DMODE=0x80
- * scsi0 : DSP->
- * 001c46cc : 0x001c46cc 0x00000000
- * 001c46d4 : 0x001c5ea0 0x000011f8
- *
- * Changed the print code in the phase_mismatch handler so
- * that we call print_lots to try to diagnose this.
- *
- */
-
-/*
- * Possible future direction of architecture for max performance :
- *
- * We're using a single start array for the NCR chip. This is
- * sub-optimal, because we cannot add a command which would conflict with
- * an executing command to this start queue, and therefore must insert the
- * next command for a given I/T/L combination after the first has completed;
- * incurring our interrupt latency between SCSI commands.
- *
- * To allow further pipelining of the NCR and host CPU operation, we want
- * to set things up so that immediately on termination of a command destined
- * for a given LUN, we get that LUN busy again.
- *
- * To do this, we need to add a 32 bit pointer to which is jumped to
- * on completion of a command. If no new command is available, this
- * would point to the usual DSA issue queue select routine.
- *
- * If one were, it would point to a per-NCR53c7x0_cmd select routine
- * which starts execution immediately, inserting the command at the head
- * of the start queue if the NCR chip is selected or reselected.
- *
- * We would change so that we keep a list of outstanding commands
- * for each unit, rather than a single running_list. We'd insert
- * a new command into the right running list; if the NCR didn't
- * have something running for that yet, we'd put it in the
- * start queue as well. Some magic needs to happen to handle the
- * race condition between the first command terminating before the
- * new one is written.
- *
- * Potential for profiling :
- * Call do_gettimeofday(struct timeval *tv) to get 800ns resolution.
- */
-
-
-/*
- * TODO :
- * 1. To support WIDE transfers, not much needs to happen. We
- * should do CHMOVE instructions instead of MOVEs when
- * we have scatter/gather segments of uneven length. When
- * we do this, we need to handle the case where we disconnect
- * between segments.
- *
- * 2. Currently, when Icky things happen we do a FATAL(). Instead,
- * we want to do an integrity check on the parts of the NCR hostdata
- * structure which were initialized at boot time; FATAL() if that
- * fails, and otherwise try to recover. Keep track of how many
- * times this has happened within a single SCSI command; if it
- * gets excessive, then FATAL().
- *
- * 3. Parity checking is currently disabled, and a few things should
- * happen here now that we support synchronous SCSI transfers :
- * 1. On soft-reset, we shoould set the EPC (Enable Parity Checking)
- * and AAP (Assert SATN/ on parity error) bits in SCNTL0.
- *
- * 2. We should enable the parity interrupt in the SIEN0 register.
- *
- * 3. intr_phase_mismatch() needs to believe that message out is
- * always an "acceptable" phase to have a mismatch in. If
- * the old phase was MSG_IN, we should send a MESSAGE PARITY
- * error. If the old phase was something else, we should send
- * a INITIATOR_DETECTED_ERROR message. Note that this could
- * cause a RESTORE POINTERS message; so we should handle that
- * correctly first. Instead, we should probably do an
- * initiator_abort.
- *
- * 4. MPEE bit of CTEST4 should be set so we get interrupted if
- * we detect an error.
- *
- *
- * 5. The initial code has been tested on the NCR53c810. I don't
- * have access to NCR53c700, 700-66 (Forex boards), NCR53c710
- * (NCR Pentium systems), NCR53c720, NCR53c820, or NCR53c825 boards to
- * finish development on those platforms.
- *
- * NCR53c820/825/720 - need to add wide transfer support, including WDTR
- * negotiation, programming of wide transfer capabilities
- * on reselection and table indirect selection.
- *
- * NCR53c710 - need to add fatal interrupt or GEN code for
- * command completion signaling. Need to modify all
- * SDID, SCID, etc. registers, and table indirect select code
- * since these use bit fielded (ie 1<<target) instead of
- * binary encoded target ids. Need to accommodate
- * different register mappings, probably scan through
- * the SCRIPT code and change the non SFBR register operand
- * of all MOVE instructions.
- *
- * It is rather worse than this actually, the 710 corrupts
- * both TEMP and DSA when you do a MOVE MEMORY. This
- * screws you up all over the place. MOVE MEMORY 4 with a
- * destination of DSA seems to work OK, which helps some.
- * Richard Hirst richard@sleepie.demon.co.uk
- *
- * NCR53c700/700-66 - need to add code to refix addresses on
- * every nexus change, eliminate all table indirect code,
- * very messy.
- *
- * 6. The NCR53c7x0 series is very popular on other platforms that
- * could be running Linux - ie, some high performance AMIGA SCSI
- * boards use it.
- *
- * So, I should include #ifdef'd code so that it is
- * compatible with these systems.
- *
- * Specifically, the little Endian assumptions I made in my
- * bit fields need to change, and if the NCR doesn't see memory
- * the right way, we need to provide options to reverse words
- * when the scripts are relocated.
- *
- * 7. Use vremap() to access memory mapped boards.
- */
-
-/*
- * Allow for simultaneous existence of multiple SCSI scripts so we
- * can have a single driver binary for all of the family.
- *
- * - one for NCR53c700 and NCR53c700-66 chips (not yet supported)
- * - one for rest (only the NCR53c810, 815, 820, and 825 are currently
- * supported)
- *
- * So that we only need two SCSI scripts, we need to modify things so
- * that we fixup register accesses in READ/WRITE instructions, and
- * we'll also have to accommodate the bit vs. binary encoding of IDs
- * with the 7xx chips.
- */
-
-#define ROUNDUP(adr,type) \
- ((void *) (((long) (adr) + sizeof(type) - 1) & ~(sizeof(type) - 1)))
-
-
-/*
- * Function: issue_to_cmd
- *
- * Purpose: convert jump instruction in issue array to NCR53c7x0_cmd
- * structure pointer.
- *
- * Inputs; issue - pointer to start of NOP or JUMP instruction
- * in issue array.
- *
- * Returns: pointer to command on success; 0 if opcode is NOP.
- */
-
-static inline struct NCR53c7x0_cmd *
-issue_to_cmd (struct Scsi_Host *host, struct NCR53c7x0_hostdata *hostdata,
- u32 *issue)
-{
- return (issue[0] != hostdata->NOP_insn) ?
- /*
- * If the IF TRUE bit is set, it's a JUMP instruction. The
- * operand is a bus pointer to the dsa_begin routine for this DSA. The
- * dsa field of the NCR53c7x0_cmd structure starts with the
- * DSA code template. By converting to a virtual address,
- * subtracting the code template size, and offset of the
- * dsa field, we end up with a pointer to the start of the
- * structure (alternatively, we could use the
- * dsa_cmnd field, an anachronism from when we weren't
- * sure what the relationship between the NCR structures
- * and host structures were going to be.
- */
- (struct NCR53c7x0_cmd *) ((char *) bus_to_virt (issue[1]) -
- (hostdata->E_dsa_code_begin - hostdata->E_dsa_code_template) -
- offsetof(struct NCR53c7x0_cmd, dsa))
- /* If the IF TRUE bit is not set, it's a NOP */
- : NULL;
-}
-
-
-/*
- * FIXME: we should junk these, in favor of synchronous_want and
- * wide_want in the NCR53c7x0_hostdata structure.
- */
-
-/* Template for "preferred" synchronous transfer parameters. */
-
-static const unsigned char sdtr_message[] = {
-#ifdef CONFIG_SCSI_NCR53C7xx_FAST
- EXTENDED_MESSAGE, 3 /* length */, EXTENDED_SDTR, 25 /* *4ns */, 8 /* off */
-#else
- EXTENDED_MESSAGE, 3 /* length */, EXTENDED_SDTR, 50 /* *4ns */, 8 /* off */
-#endif
-};
-
-/* Template to request asynchronous transfers */
-
-static const unsigned char async_message[] = {
- EXTENDED_MESSAGE, 3 /* length */, EXTENDED_SDTR, 0, 0 /* asynchronous */
-};
-
-/* Template for "preferred" WIDE transfer parameters */
-
-static const unsigned char wdtr_message[] = {
- EXTENDED_MESSAGE, 2 /* length */, EXTENDED_WDTR, 1 /* 2^1 bytes */
-};
-
-#if 0
-/*
- * Function : struct Scsi_Host *find_host (int host)
- *
- * Purpose : KGDB support function which translates a host number
- * to a host structure.
- *
- * Inputs : host - number of SCSI host
- *
- * Returns : NULL on failure, pointer to host structure on success.
- */
-
-static struct Scsi_Host *
-find_host (int host) {
- struct Scsi_Host *h;
- for (h = first_host; h && h->host_no != host; h = h->next);
- if (!h) {
- printk (KERN_ALERT "scsi%d not found\n", host);
- return NULL;
- } else if (h->hostt != the_template) {
- printk (KERN_ALERT "scsi%d is not a NCR board\n", host);
- return NULL;
- }
- return h;
-}
-
-#if 0
-/*
- * Function : request_synchronous (int host, int target)
- *
- * Purpose : KGDB interface which will allow us to negotiate for
- * synchronous transfers. This ill be replaced with a more
- * integrated function; perhaps a new entry in the scsi_host
- * structure, accessible via an ioctl() or perhaps /proc/scsi.
- *
- * Inputs : host - number of SCSI host; target - number of target.
- *
- * Returns : 0 when negotiation has been setup for next SCSI command,
- * -1 on failure.
- */
-
-static int
-request_synchronous (int host, int target) {
- struct Scsi_Host *h;
- struct NCR53c7x0_hostdata *hostdata;
- unsigned long flags;
- if (target < 0) {
- printk (KERN_ALERT "target %d is bogus\n", target);
- return -1;
- }
- if (!(h = find_host (host)))
- return -1;
- else if (h->this_id == target) {
- printk (KERN_ALERT "target %d is host ID\n", target);
- return -1;
- }
- else if (target >= h->max_id) {
- printk (KERN_ALERT "target %d exceeds maximum of %d\n", target,
- h->max_id);
- return -1;
- }
- hostdata = (struct NCR53c7x0_hostdata *)h->hostdata[0];
-
- local_irq_save(flags);
- if (hostdata->initiate_sdtr & (1 << target)) {
- local_irq_restore(flags);
- printk (KERN_ALERT "target %d already doing SDTR\n", target);
- return -1;
- }
- hostdata->initiate_sdtr |= (1 << target);
- local_irq_restore(flags);
- return 0;
-}
-#endif
-
-/*
- * Function : request_disconnect (int host, int on_or_off)
- *
- * Purpose : KGDB support function, tells us to allow or disallow
- * disconnections.
- *
- * Inputs : host - number of SCSI host; on_or_off - non-zero to allow,
- * zero to disallow.
- *
- * Returns : 0 on success, * -1 on failure.
- */
-
-static int
-request_disconnect (int host, int on_or_off) {
- struct Scsi_Host *h;
- struct NCR53c7x0_hostdata *hostdata;
- if (!(h = find_host (host)))
- return -1;
- hostdata = (struct NCR53c7x0_hostdata *) h->hostdata[0];
- if (on_or_off)
- hostdata->options |= OPTION_DISCONNECT;
- else
- hostdata->options &= ~OPTION_DISCONNECT;
- return 0;
-}
-#endif
-
-/*
- * Function : static void NCR53c7x0_driver_init (struct Scsi_Host *host)
- *
- * Purpose : Initialize internal structures, as required on startup, or
- * after a SCSI bus reset.
- *
- * Inputs : host - pointer to this host adapter's structure
- */
-
-static void
-NCR53c7x0_driver_init (struct Scsi_Host *host) {
- struct NCR53c7x0_hostdata *hostdata = (struct NCR53c7x0_hostdata *)
- host->hostdata[0];
- int i, j;
- u32 *ncrcurrent;
-
- for (i = 0; i < 16; ++i) {
- hostdata->request_sense[i] = 0;
- for (j = 0; j < 8; ++j)
- hostdata->busy[i][j] = 0;
- set_synchronous (host, i, /* sxfer */ 0, hostdata->saved_scntl3, 0);
- }
- hostdata->issue_queue = NULL;
- hostdata->running_list = hostdata->finished_queue =
- hostdata->ncrcurrent = NULL;
- for (i = 0, ncrcurrent = (u32 *) hostdata->schedule;
- i < host->can_queue; ++i, ncrcurrent += 2) {
- ncrcurrent[0] = hostdata->NOP_insn;
- ncrcurrent[1] = 0xdeadbeef;
- }
- ncrcurrent[0] = ((DCMD_TYPE_TCI|DCMD_TCI_OP_JUMP) << 24) | DBC_TCI_TRUE;
- ncrcurrent[1] = (u32) virt_to_bus (hostdata->script) +
- hostdata->E_wait_reselect;
- hostdata->reconnect_dsa_head = 0;
- hostdata->addr_reconnect_dsa_head = (u32)
- virt_to_bus((void *) &(hostdata->reconnect_dsa_head));
- hostdata->expecting_iid = 0;
- hostdata->expecting_sto = 0;
- if (hostdata->options & OPTION_ALWAYS_SYNCHRONOUS)
- hostdata->initiate_sdtr = 0xffff;
- else
- hostdata->initiate_sdtr = 0;
- hostdata->talked_to = 0;
- hostdata->idle = 1;
-}
-
-/*
- * Function : static int clock_to_ccf_710 (int clock)
- *
- * Purpose : Return the clock conversion factor for a given SCSI clock.
- *
- * Inputs : clock - SCSI clock expressed in Hz.
- *
- * Returns : ccf on success, -1 on failure.
- */
-
-static int
-clock_to_ccf_710 (int clock) {
- if (clock <= 16666666)
- return -1;
- if (clock <= 25000000)
- return 2; /* Divide by 1.0 */
- else if (clock <= 37500000)
- return 1; /* Divide by 1.5 */
- else if (clock <= 50000000)
- return 0; /* Divide by 2.0 */
- else if (clock <= 66000000)
- return 3; /* Divide by 3.0 */
- else
- return -1;
-}
-
-/*
- * Function : static int NCR53c7x0_init (struct Scsi_Host *host)
- *
- * Purpose : initialize the internal structures for a given SCSI host
- *
- * Inputs : host - pointer to this host adapter's structure
- *
- * Preconditions : when this function is called, the chip_type
- * field of the hostdata structure MUST have been set.
- *
- * Returns : 0 on success, -1 on failure.
- */
-
-int
-NCR53c7x0_init (struct Scsi_Host *host) {
- NCR53c7x0_local_declare();
- int i, ccf;
- unsigned char revision;
- struct NCR53c7x0_hostdata *hostdata = (struct NCR53c7x0_hostdata *)
- host->hostdata[0];
- /*
- * There are some things which we need to know about in order to provide
- * a semblance of support. Print 'em if they aren't what we expect,
- * otherwise don't add to the noise.
- *
- * -1 means we don't know what to expect.
- */
- int val, flags;
- char buf[32];
- int expected_id = -1;
- int expected_clock = -1;
- int uninitialized = 0;
-#ifdef NO_IO_SPACE
- int expected_mapping = OPTION_MEMORY_MAPPED;
-#else
- int expected_mapping = OPTION_IO_MAPPED;
-#endif
- for (i=0;i<7;i++)
- hostdata->valid_ids[i] = 1; /* Default all ID's to scan */
-
- /* Parse commandline flags */
- if (check_setup_strings("noasync",&flags,&val,buf))
- {
- hostdata->options |= OPTION_NO_ASYNC;
- hostdata->options &= ~(OPTION_SYNCHRONOUS | OPTION_ALWAYS_SYNCHRONOUS);
- }
-
- if (check_setup_strings("nosync",&flags,&val,buf))
- {
- hostdata->options &= ~(OPTION_SYNCHRONOUS | OPTION_ALWAYS_SYNCHRONOUS);
- }
-
- if (check_setup_strings("nodisconnect",&flags,&val,buf))
- hostdata->options &= ~OPTION_DISCONNECT;
-
- if (check_setup_strings("validids",&flags,&val,buf))
- {
- for (i=0;i<7;i++)
- hostdata->valid_ids[i] = val & (1<<i);
- }
-
- if ((i = check_setup_strings("next",&flags,&val,buf)))
- {
- while (i)
- setup_used[--i] = 1;
- }
-
- if (check_setup_strings("opthi",&flags,&val,buf))
- hostdata->options = (long long)val << 32;
- if (check_setup_strings("optlo",&flags,&val,buf))
- hostdata->options |= val;
-
- NCR53c7x0_local_setup(host);
- switch (hostdata->chip) {
- case 710:
- case 770:
- hostdata->dstat_sir_intr = NCR53c7x0_dstat_sir_intr;
- hostdata->init_save_regs = NULL;
- hostdata->dsa_fixup = NCR53c7xx_dsa_fixup;
- hostdata->init_fixup = NCR53c7x0_init_fixup;
- hostdata->soft_reset = NCR53c7x0_soft_reset;
- hostdata->run_tests = NCR53c7xx_run_tests;
- expected_clock = hostdata->scsi_clock;
- expected_id = 7;
- break;
- default:
- printk ("scsi%d : chip type of %d is not supported yet, detaching.\n",
- host->host_no, hostdata->chip);
- scsi_unregister (host);
- return -1;
- }
-
- /* Assign constants accessed by NCR */
- hostdata->NCR53c7xx_zero = 0;
- hostdata->NCR53c7xx_msg_reject = MESSAGE_REJECT;
- hostdata->NCR53c7xx_msg_abort = ABORT;
- hostdata->NCR53c7xx_msg_nop = NOP;
- hostdata->NOP_insn = (DCMD_TYPE_TCI|DCMD_TCI_OP_JUMP) << 24;
- if (expected_mapping == -1 ||
- (hostdata->options & (OPTION_MEMORY_MAPPED)) !=
- (expected_mapping & OPTION_MEMORY_MAPPED))
- printk ("scsi%d : using %s mapped access\n", host->host_no,
- (hostdata->options & OPTION_MEMORY_MAPPED) ? "memory" :
- "io");
-
- hostdata->dmode = (hostdata->chip == 700 || hostdata->chip == 70066) ?
- DMODE_REG_00 : DMODE_REG_10;
- hostdata->istat = ((hostdata->chip / 100) == 8) ?
- ISTAT_REG_800 : ISTAT_REG_700;
-
-/* We have to assume that this may be the first access to the chip, so
- * we must set EA in DCNTL. */
-
- NCR53c7x0_write8 (DCNTL_REG, DCNTL_10_EA|DCNTL_10_COM);
-
-
-/* Only the ISTAT register is readable when the NCR is running, so make
- sure it's halted. */
- ncr_halt(host);
-
-/*
- * XXX - the NCR53c700 uses bitfielded registers for SCID, SDID, etc,
- * as does the 710 with one bit per SCSI ID. Conversely, the NCR
- * uses a normal, 3 bit binary representation of these values.
- *
- * Get the rest of the NCR documentation, and FIND OUT where the change
- * was.
- */
-
-#if 0
- /* May not be able to do this - chip my not have been set up yet */
- tmp = hostdata->this_id_mask = NCR53c7x0_read8(SCID_REG);
- for (host->this_id = 0; tmp != 1; tmp >>=1, ++host->this_id);
-#else
- host->this_id = 7;
-#endif
-
-/*
- * Note : we should never encounter a board setup for ID0. So,
- * if we see ID0, assume that it was uninitialized and set it
- * to the industry standard 7.
- */
- if (!host->this_id) {
- printk("scsi%d : initiator ID was %d, changing to 7\n",
- host->host_no, host->this_id);
- host->this_id = 7;
- hostdata->this_id_mask = 1 << 7;
- uninitialized = 1;
- };
-
- if (expected_id == -1 || host->this_id != expected_id)
- printk("scsi%d : using initiator ID %d\n", host->host_no,
- host->this_id);
-
- /*
- * Save important registers to allow a soft reset.
- */
-
- /*
- * CTEST7 controls cache snooping, burst mode, and support for
- * external differential drivers. This isn't currently used - the
- * default value may not be optimal anyway.
- * Even worse, it may never have been set up since reset.
- */
- hostdata->saved_ctest7 = NCR53c7x0_read8(CTEST7_REG) & CTEST7_SAVE;
- revision = (NCR53c7x0_read8(CTEST8_REG) & 0xF0) >> 4;
- switch (revision) {
- case 1: revision = 0; break;
- case 2: revision = 1; break;
- case 4: revision = 2; break;
- case 8: revision = 3; break;
- default: revision = 255; break;
- }
- printk("scsi%d: Revision 0x%x\n",host->host_no,revision);
-
- if ((revision == 0 || revision == 255) && (hostdata->options & (OPTION_SYNCHRONOUS|OPTION_DISCONNECT|OPTION_ALWAYS_SYNCHRONOUS)))
- {
- printk ("scsi%d: Disabling sync working and disconnect/reselect\n",
- host->host_no);
- hostdata->options &= ~(OPTION_SYNCHRONOUS|OPTION_DISCONNECT|OPTION_ALWAYS_SYNCHRONOUS);
- }
-
- /*
- * On NCR53c700 series chips, DCNTL controls the SCSI clock divisor,
- * on 800 series chips, it allows for a totem-pole IRQ driver.
- * NOTE saved_dcntl currently overwritten in init function.
- * The value read here may be garbage anyway, MVME16x board at least
- * does not initialise chip if kernel arrived via tftp.
- */
-
- hostdata->saved_dcntl = NCR53c7x0_read8(DCNTL_REG);
-
- /*
- * DMODE controls DMA burst length, and on 700 series chips,
- * 286 mode and bus width
- * NOTE: On MVME16x, chip may have been reset, so this could be a
- * power-on/reset default value.
- */
- hostdata->saved_dmode = NCR53c7x0_read8(hostdata->dmode);
-
- /*
- * Now that burst length and enabled/disabled status is known,
- * clue the user in on it.
- */
-
- ccf = clock_to_ccf_710 (expected_clock);
-
- for (i = 0; i < 16; ++i)
- hostdata->cmd_allocated[i] = 0;
-
- if (hostdata->init_save_regs)
- hostdata->init_save_regs (host);
- if (hostdata->init_fixup)
- hostdata->init_fixup (host);
-
- if (!the_template) {
- the_template = host->hostt;
- first_host = host;
- }
-
- /*
- * Linux SCSI drivers have always been plagued with initialization
- * problems - some didn't work with the BIOS disabled since they expected
- * initialization from it, some didn't work when the networking code
- * was enabled and registers got scrambled, etc.
- *
- * To avoid problems like this, in the future, we will do a soft
- * reset on the SCSI chip, taking it back to a sane state.
- */
-
- hostdata->soft_reset (host);
-
-#if 1
- hostdata->debug_count_limit = -1;
-#else
- hostdata->debug_count_limit = 1;
-#endif
- hostdata->intrs = -1;
- hostdata->resets = -1;
- memcpy ((void *) hostdata->synchronous_want, (void *) sdtr_message,
- sizeof (hostdata->synchronous_want));
-
- NCR53c7x0_driver_init (host);
-
- if (request_irq(host->irq, NCR53c7x0_intr, IRQF_SHARED, "53c7xx", host))
- {
- printk("scsi%d : IRQ%d not free, detaching\n",
- host->host_no, host->irq);
- goto err_unregister;
- }
-
- if ((hostdata->run_tests && hostdata->run_tests(host) == -1) ||
- (hostdata->options & OPTION_DEBUG_TESTS_ONLY)) {
- /* XXX Should disable interrupts, etc. here */
- goto err_free_irq;
- } else {
- if (host->io_port) {
- host->n_io_port = 128;
- if (!request_region (host->io_port, host->n_io_port, "ncr53c7xx"))
- goto err_free_irq;
- }
- }
-
- if (NCR53c7x0_read8 (SBCL_REG) & SBCL_BSY) {
- printk ("scsi%d : bus wedge, doing SCSI reset\n", host->host_no);
- hard_reset (host);
- }
- return 0;
-
- err_free_irq:
- free_irq(host->irq, NCR53c7x0_intr);
- err_unregister:
- scsi_unregister(host);
- return -1;
-}
-
-/*
- * Function : int ncr53c7xx_init(struct scsi_host_template *tpnt, int board, int chip,
- * unsigned long base, int io_port, int irq, int dma, long long options,
- * int clock);
- *
- * Purpose : initializes a NCR53c7,8x0 based on base addresses,
- * IRQ, and DMA channel.
- *
- * Inputs : tpnt - Template for this SCSI adapter, board - board level
- * product, chip - 710
- *
- * Returns : 0 on success, -1 on failure.
- *
- */
-
-int
-ncr53c7xx_init (struct scsi_host_template *tpnt, int board, int chip,
- unsigned long base, int io_port, int irq, int dma,
- long long options, int clock)
-{
- struct Scsi_Host *instance;
- struct NCR53c7x0_hostdata *hostdata;
- char chip_str[80];
- int script_len = 0, dsa_len = 0, size = 0, max_cmd_size = 0,
- schedule_size = 0, ok = 0;
- void *tmp;
- unsigned long page;
-
- switch (chip) {
- case 710:
- case 770:
- schedule_size = (tpnt->can_queue + 1) * 8 /* JUMP instruction size */;
- script_len = NCR53c7xx_script_len;
- dsa_len = NCR53c7xx_dsa_len;
- options |= OPTION_INTFLY;
- sprintf (chip_str, "NCR53c%d", chip);
- break;
- default:
- printk("scsi-ncr53c7xx : unsupported SCSI chip %d\n", chip);
- return -1;
- }
-
- printk("scsi-ncr53c7xx : %s at memory 0x%lx, io 0x%x, irq %d",
- chip_str, base, io_port, irq);
- if (dma == DMA_NONE)
- printk("\n");
- else
- printk(", dma %d\n", dma);
-
- if (options & OPTION_DEBUG_PROBE_ONLY) {
- printk ("scsi-ncr53c7xx : probe only enabled, aborting initialization\n");
- return -1;
- }
-
- max_cmd_size = sizeof(struct NCR53c7x0_cmd) + dsa_len +
- /* Size of dynamic part of command structure : */
- 2 * /* Worst case : we don't know if we need DATA IN or DATA out */
- ( 2 * /* Current instructions per scatter/gather segment */
- tpnt->sg_tablesize +
- 3 /* Current startup / termination required per phase */
- ) *
- 8 /* Each instruction is eight bytes */;
-
- /* Allocate fixed part of hostdata, dynamic part to hold appropriate
- SCSI SCRIPT(tm) plus a single, maximum-sized NCR53c7x0_cmd structure.
-
- We need a NCR53c7x0_cmd structure for scan_scsis() when we are
- not loaded as a module, and when we're loaded as a module, we
- can't use a non-dynamically allocated structure because modules
- are vmalloc()'d, which can allow structures to cross page
- boundaries and breaks our physical/virtual address assumptions
- for DMA.
-
- So, we stick it past the end of our hostdata structure.
-
- ASSUMPTION :
- Regardless of how many simultaneous SCSI commands we allow,
- the probe code only executes a _single_ instruction at a time,
- so we only need one here, and don't need to allocate NCR53c7x0_cmd
- structures for each target until we are no longer in scan_scsis
- and kmalloc() has become functional (memory_init() happens
- after all device driver initialization).
- */
-
- size = sizeof(struct NCR53c7x0_hostdata) + script_len +
- /* Note that alignment will be guaranteed, since we put the command
- allocated at probe time after the fixed-up SCSI script, which
- consists of 32 bit words, aligned on a 32 bit boundary. But
- on a 64bit machine we need 8 byte alignment for hostdata->free, so
- we add in another 4 bytes to take care of potential misalignment
- */
- (sizeof(void *) - sizeof(u32)) + max_cmd_size + schedule_size;
-
- page = __get_free_pages(GFP_ATOMIC,1);
- if(page==0)
- {
- printk(KERN_ERR "53c7xx: out of memory.\n");
- return -ENOMEM;
- }
-#ifdef FORCE_DSA_ALIGNMENT
- /*
- * 53c710 rev.0 doesn't have an add-with-carry instruction.
- * Ensure we allocate enough memory to force DSA alignment.
- */
- size += 256;
-#endif
- /* Size should be < 8K, so we can fit it in two pages. */
- if (size > 8192) {
- printk(KERN_ERR "53c7xx: hostdata > 8K\n");
- return -1;
- }
-
- instance = scsi_register (tpnt, 4);
- if (!instance)
- {
- free_page(page);
- return -1;
- }
- instance->hostdata[0] = page;
- memset((void *)instance->hostdata[0], 0, 8192);
- cache_push(virt_to_phys((void *)(instance->hostdata[0])), 8192);
- cache_clear(virt_to_phys((void *)(instance->hostdata[0])), 8192);
- kernel_set_cachemode((void *)instance->hostdata[0], 8192, IOMAP_NOCACHE_SER);
-
- /* FIXME : if we ever support an ISA NCR53c7xx based board, we
- need to check if the chip is running in a 16 bit mode, and if so
- unregister it if it is past the 16M (0x1000000) mark */
-
- hostdata = (struct NCR53c7x0_hostdata *)instance->hostdata[0];
- hostdata->size = size;
- hostdata->script_count = script_len / sizeof(u32);
- hostdata->board = board;
- hostdata->chip = chip;
-
- /*
- * Being memory mapped is more desirable, since
- *
- * - Memory accesses may be faster.
- *
- * - The destination and source address spaces are the same for
- * all instructions, meaning we don't have to twiddle dmode or
- * any other registers.
- *
- * So, we try for memory mapped, and if we don't get it,
- * we go for port mapped, and that failing we tell the user
- * it can't work.
- */
-
- if (base) {
- instance->base = base;
- /* Check for forced I/O mapping */
- if (!(options & OPTION_IO_MAPPED)) {
- options |= OPTION_MEMORY_MAPPED;
- ok = 1;
- }
- } else {
- options &= ~OPTION_MEMORY_MAPPED;
- }
-
- if (io_port) {
- instance->io_port = io_port;
- options |= OPTION_IO_MAPPED;
- ok = 1;
- } else {
- options &= ~OPTION_IO_MAPPED;
- }
-
- if (!ok) {
- printk ("scsi%d : not initializing, no I/O or memory mapping known \n",
- instance->host_no);
- scsi_unregister (instance);
- return -1;
- }
- instance->irq = irq;
- instance->dma_channel = dma;
-
- hostdata->options = options;
- hostdata->dsa_len = dsa_len;
- hostdata->max_cmd_size = max_cmd_size;
- hostdata->num_cmds = 1;
- hostdata->scsi_clock = clock;
- /* Initialize single command */
- tmp = (hostdata->script + hostdata->script_count);
-#ifdef FORCE_DSA_ALIGNMENT
- {
- void *t = ROUNDUP(tmp, void *);
- if (((u32)t & 0xff) > CmdPageStart)
- t = (void *)((u32)t + 255);
- t = (void *)(((u32)t & ~0xff) + CmdPageStart);
- hostdata->free = t;
-#if 0
- printk ("scsi: Registered size increased by 256 to %d\n", size);
- printk ("scsi: CmdPageStart = 0x%02x\n", CmdPageStart);
- printk ("scsi: tmp = 0x%08x, hostdata->free set to 0x%08x\n",
- (u32)tmp, (u32)t);
-#endif
- }
-#else
- hostdata->free = ROUNDUP(tmp, void *);
-#endif
- hostdata->free->real = tmp;
- hostdata->free->size = max_cmd_size;
- hostdata->free->free = NULL;
- hostdata->free->next = NULL;
- hostdata->extra_allocate = 0;
-
- /* Allocate command start code space */
- hostdata->schedule = (chip == 700 || chip == 70066) ?
- NULL : (u32 *) ((char *)hostdata->free + max_cmd_size);
-
-/*
- * For diagnostic purposes, we don't really care how fast things blaze.
- * For profiling, we want to access the 800ns resolution system clock,
- * using a 'C' call on the host processor.
- *
- * Therefore, there's no need for the NCR chip to directly manipulate
- * this data, and we should put it wherever is most convenient for
- * Linux.
- */
- if (track_events)
- hostdata->events = (struct NCR53c7x0_event *) (track_events ?
- vmalloc (sizeof (struct NCR53c7x0_event) * track_events) : NULL);
- else
- hostdata->events = NULL;
-
- if (hostdata->events) {
- memset ((void *) hostdata->events, 0, sizeof(struct NCR53c7x0_event) *
- track_events);
- hostdata->event_size = track_events;
- hostdata->event_index = 0;
- } else
- hostdata->event_size = 0;
-
- return NCR53c7x0_init(instance);
-}
-
-
-/*
- * Function : static void NCR53c7x0_init_fixup (struct Scsi_Host *host)
- *
- * Purpose : copy and fixup the SCSI SCRIPTS(tm) code for this device.
- *
- * Inputs : host - pointer to this host adapter's structure
- *
- */
-
-static void
-NCR53c7x0_init_fixup (struct Scsi_Host *host) {
- NCR53c7x0_local_declare();
- struct NCR53c7x0_hostdata *hostdata = (struct NCR53c7x0_hostdata *)
- host->hostdata[0];
- unsigned char tmp;
- int i, ncr_to_memory, memory_to_ncr;
- u32 base;
- NCR53c7x0_local_setup(host);
-
-
- /* XXX - NOTE : this code MUST be made endian aware */
- /* Copy code into buffer that was allocated at detection time. */
- memcpy ((void *) hostdata->script, (void *) SCRIPT,
- sizeof(SCRIPT));
- /* Fixup labels */
- for (i = 0; i < PATCHES; ++i)
- hostdata->script[LABELPATCHES[i]] +=
- virt_to_bus(hostdata->script);
- /* Fixup addresses of constants that used to be EXTERNAL */
-
- patch_abs_32 (hostdata->script, 0, NCR53c7xx_msg_abort,
- virt_to_bus(&(hostdata->NCR53c7xx_msg_abort)));
- patch_abs_32 (hostdata->script, 0, NCR53c7xx_msg_reject,
- virt_to_bus(&(hostdata->NCR53c7xx_msg_reject)));
- patch_abs_32 (hostdata->script, 0, NCR53c7xx_zero,
- virt_to_bus(&(hostdata->NCR53c7xx_zero)));
- patch_abs_32 (hostdata->script, 0, NCR53c7xx_sink,
- virt_to_bus(&(hostdata->NCR53c7xx_sink)));
- patch_abs_32 (hostdata->script, 0, NOP_insn,
- virt_to_bus(&(hostdata->NOP_insn)));
- patch_abs_32 (hostdata->script, 0, schedule,
- virt_to_bus((void *) hostdata->schedule));
-
- /* Fixup references to external variables: */
- for (i = 0; i < EXTERNAL_PATCHES_LEN; ++i)
- hostdata->script[EXTERNAL_PATCHES[i].offset] +=
- virt_to_bus(EXTERNAL_PATCHES[i].address);
-
- /*
- * Fixup absolutes set at boot-time.
- *
- * All non-code absolute variables suffixed with "dsa_" and "int_"
- * are constants, and need no fixup provided the assembler has done
- * it for us (I don't know what the "real" NCR assembler does in
- * this case, my assembler does the right magic).
- */
-
- patch_abs_rwri_data (hostdata->script, 0, dsa_save_data_pointer,
- Ent_dsa_code_save_data_pointer - Ent_dsa_zero);
- patch_abs_rwri_data (hostdata->script, 0, dsa_restore_pointers,
- Ent_dsa_code_restore_pointers - Ent_dsa_zero);
- patch_abs_rwri_data (hostdata->script, 0, dsa_check_reselect,
- Ent_dsa_code_check_reselect - Ent_dsa_zero);
-
- /*
- * Just for the hell of it, preserve the settings of
- * Burst Length and Enable Read Line bits from the DMODE
- * register. Make sure SCRIPTS start automagically.
- */
-
-#if defined(CONFIG_MVME16x) || defined(CONFIG_BVME6000)
- /* We know better what we want than 16xBug does! */
- tmp = DMODE_10_BL_8 | DMODE_10_FC2;
-#else
- tmp = NCR53c7x0_read8(DMODE_REG_10);
- tmp &= (DMODE_BL_MASK | DMODE_10_FC2 | DMODE_10_FC1 | DMODE_710_PD |
- DMODE_710_UO);
-#endif
-
- if (!(hostdata->options & OPTION_MEMORY_MAPPED)) {
- base = (u32) host->io_port;
- memory_to_ncr = tmp|DMODE_800_DIOM;
- ncr_to_memory = tmp|DMODE_800_SIOM;
- } else {
- base = virt_to_bus((void *)host->base);
- memory_to_ncr = ncr_to_memory = tmp;
- }
-
- /* SCRATCHB_REG_10 == SCRATCHA_REG_800, as it happens */
- patch_abs_32 (hostdata->script, 0, addr_scratch, base + SCRATCHA_REG_800);
- patch_abs_32 (hostdata->script, 0, addr_temp, base + TEMP_REG);
- patch_abs_32 (hostdata->script, 0, addr_dsa, base + DSA_REG);
-
- /*
- * I needed some variables in the script to be accessible to
- * both the NCR chip and the host processor. For these variables,
- * I made the arbitrary decision to store them directly in the
- * hostdata structure rather than in the RELATIVE area of the
- * SCRIPTS.
- */
-
-
- patch_abs_rwri_data (hostdata->script, 0, dmode_memory_to_memory, tmp);
- patch_abs_rwri_data (hostdata->script, 0, dmode_memory_to_ncr, memory_to_ncr);
- patch_abs_rwri_data (hostdata->script, 0, dmode_ncr_to_memory, ncr_to_memory);
-
- patch_abs_32 (hostdata->script, 0, msg_buf,
- virt_to_bus((void *)&(hostdata->msg_buf)));
- patch_abs_32 (hostdata->script, 0, reconnect_dsa_head,
- virt_to_bus((void *)&(hostdata->reconnect_dsa_head)));
- patch_abs_32 (hostdata->script, 0, addr_reconnect_dsa_head,
- virt_to_bus((void *)&(hostdata->addr_reconnect_dsa_head)));
- patch_abs_32 (hostdata->script, 0, reselected_identify,
- virt_to_bus((void *)&(hostdata->reselected_identify)));
-/* reselected_tag is currently unused */
-#if 0
- patch_abs_32 (hostdata->script, 0, reselected_tag,
- virt_to_bus((void *)&(hostdata->reselected_tag)));
-#endif
-
- patch_abs_32 (hostdata->script, 0, test_dest,
- virt_to_bus((void*)&hostdata->test_dest));
- patch_abs_32 (hostdata->script, 0, test_src,
- virt_to_bus(&hostdata->test_source));
- patch_abs_32 (hostdata->script, 0, saved_dsa,
- virt_to_bus((void *)&hostdata->saved2_dsa));
- patch_abs_32 (hostdata->script, 0, emulfly,
- virt_to_bus((void *)&hostdata->emulated_intfly));
-
- patch_abs_rwri_data (hostdata->script, 0, dsa_check_reselect,
- (unsigned char)(Ent_dsa_code_check_reselect - Ent_dsa_zero));
-
-/* These are for event logging; the ncr_event enum contains the
- actual interrupt numbers. */
-#ifdef A_int_EVENT_SELECT
- patch_abs_32 (hostdata->script, 0, int_EVENT_SELECT, (u32) EVENT_SELECT);
-#endif
-#ifdef A_int_EVENT_DISCONNECT
- patch_abs_32 (hostdata->script, 0, int_EVENT_DISCONNECT, (u32) EVENT_DISCONNECT);
-#endif
-#ifdef A_int_EVENT_RESELECT
- patch_abs_32 (hostdata->script, 0, int_EVENT_RESELECT, (u32) EVENT_RESELECT);
-#endif
-#ifdef A_int_EVENT_COMPLETE
- patch_abs_32 (hostdata->script, 0, int_EVENT_COMPLETE, (u32) EVENT_COMPLETE);
-#endif
-#ifdef A_int_EVENT_IDLE
- patch_abs_32 (hostdata->script, 0, int_EVENT_IDLE, (u32) EVENT_IDLE);
-#endif
-#ifdef A_int_EVENT_SELECT_FAILED
- patch_abs_32 (hostdata->script, 0, int_EVENT_SELECT_FAILED,
- (u32) EVENT_SELECT_FAILED);
-#endif
-#ifdef A_int_EVENT_BEFORE_SELECT
- patch_abs_32 (hostdata->script, 0, int_EVENT_BEFORE_SELECT,
- (u32) EVENT_BEFORE_SELECT);
-#endif
-#ifdef A_int_EVENT_RESELECT_FAILED
- patch_abs_32 (hostdata->script, 0, int_EVENT_RESELECT_FAILED,
- (u32) EVENT_RESELECT_FAILED);
-#endif
-
- /*
- * Make sure the NCR and Linux code agree on the location of
- * certain fields.
- */
-
- hostdata->E_accept_message = Ent_accept_message;
- hostdata->E_command_complete = Ent_command_complete;
- hostdata->E_cmdout_cmdout = Ent_cmdout_cmdout;
- hostdata->E_data_transfer = Ent_data_transfer;
- hostdata->E_debug_break = Ent_debug_break;
- hostdata->E_dsa_code_template = Ent_dsa_code_template;
- hostdata->E_dsa_code_template_end = Ent_dsa_code_template_end;
- hostdata->E_end_data_transfer = Ent_end_data_transfer;
- hostdata->E_initiator_abort = Ent_initiator_abort;
- hostdata->E_msg_in = Ent_msg_in;
- hostdata->E_other_transfer = Ent_other_transfer;
- hostdata->E_other_in = Ent_other_in;
- hostdata->E_other_out = Ent_other_out;
- hostdata->E_reject_message = Ent_reject_message;
- hostdata->E_respond_message = Ent_respond_message;
- hostdata->E_select = Ent_select;
- hostdata->E_select_msgout = Ent_select_msgout;
- hostdata->E_target_abort = Ent_target_abort;
-#ifdef Ent_test_0
- hostdata->E_test_0 = Ent_test_0;
-#endif
- hostdata->E_test_1 = Ent_test_1;
- hostdata->E_test_2 = Ent_test_2;
-#ifdef Ent_test_3
- hostdata->E_test_3 = Ent_test_3;
-#endif
- hostdata->E_wait_reselect = Ent_wait_reselect;
- hostdata->E_dsa_code_begin = Ent_dsa_code_begin;
-
- hostdata->dsa_cmdout = A_dsa_cmdout;
- hostdata->dsa_cmnd = A_dsa_cmnd;
- hostdata->dsa_datain = A_dsa_datain;
- hostdata->dsa_dataout = A_dsa_dataout;
- hostdata->dsa_end = A_dsa_end;
- hostdata->dsa_msgin = A_dsa_msgin;
- hostdata->dsa_msgout = A_dsa_msgout;
- hostdata->dsa_msgout_other = A_dsa_msgout_other;
- hostdata->dsa_next = A_dsa_next;
- hostdata->dsa_select = A_dsa_select;
- hostdata->dsa_start = Ent_dsa_code_template - Ent_dsa_zero;
- hostdata->dsa_status = A_dsa_status;
- hostdata->dsa_jump_dest = Ent_dsa_code_fix_jump - Ent_dsa_zero +
- 8 /* destination operand */;
-
- /* sanity check */
- if (A_dsa_fields_start != Ent_dsa_code_template_end -
- Ent_dsa_zero)
- printk("scsi%d : NCR dsa_fields start is %d not %d\n",
- host->host_no, A_dsa_fields_start, Ent_dsa_code_template_end -
- Ent_dsa_zero);
-
- printk("scsi%d : NCR code relocated to 0x%lx (virt 0x%p)\n", host->host_no,
- virt_to_bus(hostdata->script), hostdata->script);
-}
-
-/*
- * Function : static int NCR53c7xx_run_tests (struct Scsi_Host *host)
- *
- * Purpose : run various verification tests on the NCR chip,
- * including interrupt generation, and proper bus mastering
- * operation.
- *
- * Inputs : host - a properly initialized Scsi_Host structure
- *
- * Preconditions : the NCR chip must be in a halted state.
- *
- * Returns : 0 if all tests were successful, -1 on error.
- *
- */
-
-static int
-NCR53c7xx_run_tests (struct Scsi_Host *host) {
- NCR53c7x0_local_declare();
- struct NCR53c7x0_hostdata *hostdata = (struct NCR53c7x0_hostdata *)
- host->hostdata[0];
- unsigned long timeout;
- u32 start;
- int failed, i;
- unsigned long flags;
- NCR53c7x0_local_setup(host);
-
- /* The NCR chip _must_ be idle to run the test scripts */
-
- local_irq_save(flags);
- if (!hostdata->idle) {
- printk ("scsi%d : chip not idle, aborting tests\n", host->host_no);
- local_irq_restore(flags);
- return -1;
- }
-
- /*
- * Check for functional interrupts, this could work as an
- * autoprobe routine.
- */
-
- if ((hostdata->options & OPTION_DEBUG_TEST1) &&
- hostdata->state != STATE_DISABLED) {
- hostdata->idle = 0;
- hostdata->test_running = 1;
- hostdata->test_completed = -1;
- hostdata->test_dest = 0;
- hostdata->test_source = 0xdeadbeef;
- start = virt_to_bus (hostdata->script) + hostdata->E_test_1;
- hostdata->state = STATE_RUNNING;
- printk ("scsi%d : test 1", host->host_no);
- NCR53c7x0_write32 (DSP_REG, start);
- if (hostdata->options & OPTION_DEBUG_TRACE)
- NCR53c7x0_write8 (DCNTL_REG, hostdata->saved_dcntl | DCNTL_SSM |
- DCNTL_STD);
- printk (" started\n");
- local_irq_restore(flags);
-
- /*
- * This is currently a .5 second timeout, since (in theory) no slow
- * board will take that long. In practice, we've seen one
- * pentium which occassionally fails with this, but works with
- * 10 times as much?
- */
-
- timeout = jiffies + 5 * HZ / 10;
- while ((hostdata->test_completed == -1) && time_before(jiffies, timeout))
- barrier();
-
- failed = 1;
- if (hostdata->test_completed == -1)
- printk ("scsi%d : driver test 1 timed out%s\n",host->host_no ,
- (hostdata->test_dest == 0xdeadbeef) ?
- " due to lost interrupt.\n"
- " Please verify that the correct IRQ is being used for your board,\n"
- : "");
- else if (hostdata->test_completed != 1)
- printk ("scsi%d : test 1 bad interrupt value (%d)\n",
- host->host_no, hostdata->test_completed);
- else
- failed = (hostdata->test_dest != 0xdeadbeef);
-
- if (hostdata->test_dest != 0xdeadbeef) {
- printk ("scsi%d : driver test 1 read 0x%x instead of 0xdeadbeef indicating a\n"
- " probable cache invalidation problem. Please configure caching\n"
- " as write-through or disabled\n",
- host->host_no, hostdata->test_dest);
- }
-
- if (failed) {
- printk ("scsi%d : DSP = 0x%p (script at 0x%p, start at 0x%x)\n",
- host->host_no, bus_to_virt(NCR53c7x0_read32(DSP_REG)),
- hostdata->script, start);
- printk ("scsi%d : DSPS = 0x%x\n", host->host_no,
- NCR53c7x0_read32(DSPS_REG));
- local_irq_restore(flags);
- return -1;
- }
- hostdata->test_running = 0;
- }
-
- if ((hostdata->options & OPTION_DEBUG_TEST2) &&
- hostdata->state != STATE_DISABLED) {
- u32 dsa[48];
- unsigned char identify = IDENTIFY(0, 0);
- unsigned char cmd[6];
- unsigned char data[36];
- unsigned char status = 0xff;
- unsigned char msg = 0xff;
-
- cmd[0] = INQUIRY;
- cmd[1] = cmd[2] = cmd[3] = cmd[5] = 0;
- cmd[4] = sizeof(data);
-
- dsa[2] = 1;
- dsa[3] = virt_to_bus(&identify);
- dsa[4] = 6;
- dsa[5] = virt_to_bus(&cmd);
- dsa[6] = sizeof(data);
- dsa[7] = virt_to_bus(&data);
- dsa[8] = 1;
- dsa[9] = virt_to_bus(&status);
- dsa[10] = 1;
- dsa[11] = virt_to_bus(&msg);
-
- for (i = 0; i < 6; ++i) {
-#ifdef VALID_IDS
- if (!hostdata->valid_ids[i])
- continue;
-#endif
- local_irq_disable();
- if (!hostdata->idle) {
- printk ("scsi%d : chip not idle, aborting tests\n", host->host_no);
- local_irq_restore(flags);
- return -1;
- }
-
- /* 710: bit mapped scsi ID, async */
- dsa[0] = (1 << i) << 16;
- hostdata->idle = 0;
- hostdata->test_running = 2;
- hostdata->test_completed = -1;
- start = virt_to_bus(hostdata->script) + hostdata->E_test_2;
- hostdata->state = STATE_RUNNING;
- NCR53c7x0_write32 (DSA_REG, virt_to_bus(dsa));
- NCR53c7x0_write32 (DSP_REG, start);
- if (hostdata->options & OPTION_DEBUG_TRACE)
- NCR53c7x0_write8 (DCNTL_REG, hostdata->saved_dcntl |
- DCNTL_SSM | DCNTL_STD);
- local_irq_restore(flags);
-
- timeout = jiffies + 5 * HZ; /* arbitrary */
- while ((hostdata->test_completed == -1) && time_before(jiffies, timeout))
- barrier();
-
- NCR53c7x0_write32 (DSA_REG, 0);
-
- if (hostdata->test_completed == 2) {
- data[35] = 0;
- printk ("scsi%d : test 2 INQUIRY to target %d, lun 0 : %s\n",
- host->host_no, i, data + 8);
- printk ("scsi%d : status ", host->host_no);
- scsi_print_status (status);
- printk ("\nscsi%d : message ", host->host_no);
- spi_print_msg(&msg);
- printk ("\n");
- } else if (hostdata->test_completed == 3) {
- printk("scsi%d : test 2 no connection with target %d\n",
- host->host_no, i);
- if (!hostdata->idle) {
- printk("scsi%d : not idle\n", host->host_no);
- local_irq_restore(flags);
- return -1;
- }
- } else if (hostdata->test_completed == -1) {
- printk ("scsi%d : test 2 timed out\n", host->host_no);
- local_irq_restore(flags);
- return -1;
- }
- hostdata->test_running = 0;
- }
- }
-
- local_irq_restore(flags);
- return 0;
-}
-
-/*
- * Function : static void NCR53c7xx_dsa_fixup (struct NCR53c7x0_cmd *cmd)
- *
- * Purpose : copy the NCR53c8xx dsa structure into cmd's dsa buffer,
- * performing all necessary relocation.
- *
- * Inputs : cmd, a NCR53c7x0_cmd structure with a dsa area large
- * enough to hold the NCR53c8xx dsa.
- */
-
-static void
-NCR53c7xx_dsa_fixup (struct NCR53c7x0_cmd *cmd) {
- Scsi_Cmnd *c = cmd->cmd;
- struct Scsi_Host *host = c->device->host;
- struct NCR53c7x0_hostdata *hostdata = (struct NCR53c7x0_hostdata *)
- host->hostdata[0];
- int i;
-
- memcpy (cmd->dsa, hostdata->script + (hostdata->E_dsa_code_template / 4),
- hostdata->E_dsa_code_template_end - hostdata->E_dsa_code_template);
-
- /*
- * Note : within the NCR 'C' code, dsa points to the _start_
- * of the DSA structure, and _not_ the offset of dsa_zero within
- * that structure used to facilitate shorter signed offsets
- * for the 8 bit ALU.
- *
- * The implications of this are that
- *
- * - 32 bit A_dsa_* absolute values require an additional
- * dsa_zero added to their value to be correct, since they are
- * relative to dsa_zero which is in essentially a separate
- * space from the code symbols.
- *
- * - All other symbols require no special treatment.
- */
-
- patch_abs_tci_data (cmd->dsa, Ent_dsa_code_template / sizeof(u32),
- dsa_temp_lun, c->device->lun);
- patch_abs_32 (cmd->dsa, Ent_dsa_code_template / sizeof(u32),
- dsa_temp_addr_next, virt_to_bus(&cmd->dsa_next_addr));
- patch_abs_32 (cmd->dsa, Ent_dsa_code_template / sizeof(u32),
- dsa_temp_next, virt_to_bus(cmd->dsa) + Ent_dsa_zero -
- Ent_dsa_code_template + A_dsa_next);
- patch_abs_32 (cmd->dsa, Ent_dsa_code_template / sizeof(u32),
- dsa_temp_sync, virt_to_bus((void *)hostdata->sync[c->device->id].script));
- patch_abs_32 (cmd->dsa, Ent_dsa_code_template / sizeof(u32),
- dsa_sscf_710, virt_to_bus((void *)&hostdata->sync[c->device->id].sscf_710));
- patch_abs_tci_data (cmd->dsa, Ent_dsa_code_template / sizeof(u32),
- dsa_temp_target, 1 << c->device->id);
- /* XXX - new pointer stuff */
- patch_abs_32 (cmd->dsa, Ent_dsa_code_template / sizeof(u32),
- dsa_temp_addr_saved_pointer, virt_to_bus(&cmd->saved_data_pointer));
- patch_abs_32 (cmd->dsa, Ent_dsa_code_template / sizeof(u32),
- dsa_temp_addr_saved_residual, virt_to_bus(&cmd->saved_residual));
- patch_abs_32 (cmd->dsa, Ent_dsa_code_template / sizeof(u32),
- dsa_temp_addr_residual, virt_to_bus(&cmd->residual));
-
- /* XXX - new start stuff */
-
- patch_abs_32 (cmd->dsa, Ent_dsa_code_template / sizeof(u32),
- dsa_temp_addr_dsa_value, virt_to_bus(&cmd->dsa_addr));
-}
-
-/*
- * Function : run_process_issue_queue (void)
- *
- * Purpose : insure that the coroutine is running and will process our
- * request. process_issue_queue_running is checked/set here (in an
- * inline function) rather than in process_issue_queue itself to reduce
- * the chances of stack overflow.
- *
- */
-
-static volatile int process_issue_queue_running = 0;
-
-static __inline__ void
-run_process_issue_queue(void) {
- unsigned long flags;
- local_irq_save(flags);
- if (!process_issue_queue_running) {
- process_issue_queue_running = 1;
- process_issue_queue(flags);
- /*
- * process_issue_queue_running is cleared in process_issue_queue
- * once it can't do more work, and process_issue_queue exits with
- * interrupts disabled.
- */
- }
- local_irq_restore(flags);
-}
-
-/*
- * Function : static void abnormal_finished (struct NCR53c7x0_cmd *cmd, int
- * result)
- *
- * Purpose : mark SCSI command as finished, OR'ing the host portion
- * of the result word into the result field of the corresponding
- * Scsi_Cmnd structure, and removing it from the internal queues.
- *
- * Inputs : cmd - command, result - entire result field
- *
- * Preconditions : the NCR chip should be in a halted state when
- * abnormal_finished is run, since it modifies structures which
- * the NCR expects to have exclusive access to.
- */
-
-static void
-abnormal_finished (struct NCR53c7x0_cmd *cmd, int result) {
- Scsi_Cmnd *c = cmd->cmd;
- struct Scsi_Host *host = c->device->host;
- struct NCR53c7x0_hostdata *hostdata = (struct NCR53c7x0_hostdata *)
- host->hostdata[0];
- unsigned long flags;
- int left, found;
- volatile struct NCR53c7x0_cmd * linux_search;
- volatile struct NCR53c7x0_cmd * volatile *linux_prev;
- volatile u32 *ncr_prev, *ncrcurrent, ncr_search;
-
-#if 0
- printk ("scsi%d: abnormal finished\n", host->host_no);
-#endif
-
- local_irq_save(flags);
- found = 0;
- /*
- * Traverse the NCR issue array until we find a match or run out
- * of instructions. Instructions in the NCR issue array are
- * either JUMP or NOP instructions, which are 2 words in length.
- */
-
-
- for (found = 0, left = host->can_queue, ncrcurrent = hostdata->schedule;
- left > 0; --left, ncrcurrent += 2)
- {
- if (issue_to_cmd (host, hostdata, (u32 *) ncrcurrent) == cmd)
- {
- ncrcurrent[0] = hostdata->NOP_insn;
- ncrcurrent[1] = 0xdeadbeef;
- ++found;
- break;
- }
- }
-
- /*
- * Traverse the NCR reconnect list of DSA structures until we find
- * a pointer to this dsa or have found too many command structures.
- * We let prev point at the next field of the previous element or
- * head of the list, so we don't do anything different for removing
- * the head element.
- */
-
- for (left = host->can_queue,
- ncr_search = hostdata->reconnect_dsa_head,
- ncr_prev = &hostdata->reconnect_dsa_head;
- left >= 0 && ncr_search &&
- ((char*)bus_to_virt(ncr_search) + hostdata->dsa_start)
- != (char *) cmd->dsa;
- ncr_prev = (u32*) ((char*)bus_to_virt(ncr_search) +
- hostdata->dsa_next), ncr_search = *ncr_prev, --left);
-
- if (left < 0)
- printk("scsi%d: loop detected in ncr reconncect list\n",
- host->host_no);
- else if (ncr_search) {
- if (found)
- printk("scsi%d: scsi %ld in ncr issue array and reconnect lists\n",
- host->host_no, c->pid);
- else {
- volatile u32 * next = (u32 *)
- ((char *)bus_to_virt(ncr_search) + hostdata->dsa_next);
- *ncr_prev = *next;
-/* If we're at the tail end of the issue queue, update that pointer too. */
- found = 1;
- }
- }
-
- /*
- * Traverse the host running list until we find this command or discover
- * we have too many elements, pointing linux_prev at the next field of the
- * linux_previous element or head of the list, search at this element.
- */
-
- for (left = host->can_queue, linux_search = hostdata->running_list,
- linux_prev = &hostdata->running_list;
- left >= 0 && linux_search && linux_search != cmd;
- linux_prev = &(linux_search->next),
- linux_search = linux_search->next, --left);
-
- if (left < 0)
- printk ("scsi%d: loop detected in host running list for scsi pid %ld\n",
- host->host_no, c->pid);
- else if (linux_search) {
- *linux_prev = linux_search->next;
- --hostdata->busy[c->device->id][c->device->lun];
- }
-
- /* Return the NCR command structure to the free list */
- cmd->next = hostdata->free;
- hostdata->free = cmd;
- c->host_scribble = NULL;
-
- /* And return */
- c->result = result;
- c->scsi_done(c);
-
- local_irq_restore(flags);
- run_process_issue_queue();
-}
-
-/*
- * Function : static void intr_break (struct Scsi_Host *host,
- * struct NCR53c7x0_cmd *cmd)
- *
- * Purpose : Handler for breakpoint interrupts from a SCSI script
- *
- * Inputs : host - pointer to this host adapter's structure,
- * cmd - pointer to the command (if any) dsa was pointing
- * to.
- *
- */
-
-static void
-intr_break (struct Scsi_Host *host, struct
- NCR53c7x0_cmd *cmd) {
- NCR53c7x0_local_declare();
- struct NCR53c7x0_break *bp;
-#if 0
- Scsi_Cmnd *c = cmd ? cmd->cmd : NULL;
-#endif
- u32 *dsp;
- struct NCR53c7x0_hostdata *hostdata = (struct NCR53c7x0_hostdata *)
- host->hostdata[0];
- unsigned long flags;
- NCR53c7x0_local_setup(host);
-
- /*
- * Find the break point corresponding to this address, and
- * dump the appropriate debugging information to standard
- * output.
- */
- local_irq_save(flags);
- dsp = (u32 *) bus_to_virt(NCR53c7x0_read32(DSP_REG));
- for (bp = hostdata->breakpoints; bp && bp->address != dsp;
- bp = bp->next);
- if (!bp)
- panic("scsi%d : break point interrupt from %p with no breakpoint!",
- host->host_no, dsp);
-
- /*
- * Configure the NCR chip for manual start mode, so that we can
- * point the DSP register at the instruction that follows the
- * INT int_debug_break instruction.
- */
-
- NCR53c7x0_write8 (hostdata->dmode,
- NCR53c7x0_read8(hostdata->dmode)|DMODE_MAN);
-
- /*
- * And update the DSP register, using the size of the old
- * instruction in bytes.
- */
-
- local_irq_restore(flags);
-}
-/*
- * Function : static void print_synchronous (const char *prefix,
- * const unsigned char *msg)
- *
- * Purpose : print a pretty, user and machine parsable representation
- * of a SDTR message, including the "real" parameters, data
- * clock so we can tell transfer rate at a glance.
- *
- * Inputs ; prefix - text to prepend, msg - SDTR message (5 bytes)
- */
-
-static void
-print_synchronous (const char *prefix, const unsigned char *msg) {
- if (msg[4]) {
- int Hz = 1000000000 / (msg[3] * 4);
- int integer = Hz / 1000000;
- int fraction = (Hz - (integer * 1000000)) / 10000;
- printk ("%speriod %dns offset %d %d.%02dMHz %s SCSI%s\n",
- prefix, (int) msg[3] * 4, (int) msg[4], integer, fraction,
- (((msg[3] * 4) < 200) ? "FAST" : "synchronous"),
- (((msg[3] * 4) < 200) ? "-II" : ""));
- } else
- printk ("%sasynchronous SCSI\n", prefix);
-}
-
-/*
- * Function : static void set_synchronous (struct Scsi_Host *host,
- * int target, int sxfer, int scntl3, int now_connected)
- *
- * Purpose : reprogram transfers between the selected SCSI initiator and
- * target with the given register values; in the indirect
- * select operand, reselection script, and chip registers.
- *
- * Inputs : host - NCR53c7,8xx SCSI host, target - number SCSI target id,
- * sxfer and scntl3 - NCR registers. now_connected - if non-zero,
- * we should reprogram the registers now too.
- *
- * NOTE: For 53c710, scntl3 is actually used for SCF bits from
- * SBCL, as we don't have a SCNTL3.
- */
-
-static void
-set_synchronous (struct Scsi_Host *host, int target, int sxfer, int scntl3,
- int now_connected) {
- NCR53c7x0_local_declare();
- struct NCR53c7x0_hostdata *hostdata = (struct NCR53c7x0_hostdata *)
- host->hostdata[0];
- u32 *script;
- NCR53c7x0_local_setup(host);
-
- /* These are eight bit registers */
- sxfer &= 0xff;
- scntl3 &= 0xff;
-
- hostdata->sync[target].sxfer_sanity = sxfer;
- hostdata->sync[target].scntl3_sanity = scntl3;
-
-/*
- * HARD CODED : synchronous script is EIGHT words long. This
- * must agree with 53c7.8xx.h
- */
-
- if ((hostdata->chip != 700) && (hostdata->chip != 70066)) {
- hostdata->sync[target].select_indirect = (1 << target) << 16 |
- (sxfer << 8);
- hostdata->sync[target].sscf_710 = scntl3;
-
- script = (u32 *) hostdata->sync[target].script;
-
- /* XXX - add NCR53c7x0 code to reprogram SCF bits if we want to */
- script[0] = ((DCMD_TYPE_RWRI | DCMD_RWRI_OPC_MODIFY |
- DCMD_RWRI_OP_MOVE) << 24) |
- (SBCL_REG << 16) | (scntl3 << 8);
- script[1] = 0;
- script += 2;
-
- script[0] = ((DCMD_TYPE_RWRI | DCMD_RWRI_OPC_MODIFY |
- DCMD_RWRI_OP_MOVE) << 24) |
- (SXFER_REG << 16) | (sxfer << 8);
- script[1] = 0;
- script += 2;
-
-#ifdef DEBUG_SYNC_INTR
- if (hostdata->options & OPTION_DEBUG_DISCONNECT) {
- script[0] = ((DCMD_TYPE_TCI|DCMD_TCI_OP_INT) << 24) | DBC_TCI_TRUE;
- script[1] = DEBUG_SYNC_INTR;
- script += 2;
- }
-#endif
-
- script[0] = ((DCMD_TYPE_TCI|DCMD_TCI_OP_RETURN) << 24) | DBC_TCI_TRUE;
- script[1] = 0;
- script += 2;
- }
-
- if (hostdata->options & OPTION_DEBUG_SYNCHRONOUS)
- printk ("scsi%d : target %d sync parameters are sxfer=0x%x, scntl3=0x%x\n",
- host->host_no, target, sxfer, scntl3);
-
- if (now_connected) {
- NCR53c7x0_write8(SBCL_REG, scntl3);
- NCR53c7x0_write8(SXFER_REG, sxfer);
- }
-}
-
-
-/*
- * Function : static int asynchronous (struct Scsi_Host *host, int target)
- *
- * Purpose : reprogram between the selected SCSI Host adapter and target
- * (assumed to be currently connected) for asynchronous transfers.
- *
- * Inputs : host - SCSI host structure, target - numeric target ID.
- *
- * Preconditions : the NCR chip should be in one of the halted states
- */
-
-static void
-asynchronous (struct Scsi_Host *host, int target) {
- NCR53c7x0_local_declare();
- struct NCR53c7x0_hostdata *hostdata = (struct NCR53c7x0_hostdata *)
- host->hostdata[0];
- NCR53c7x0_local_setup(host);
- set_synchronous (host, target, /* no offset */ 0, hostdata->saved_scntl3,
- 1);
- printk ("scsi%d : setting target %d to asynchronous SCSI\n",
- host->host_no, target);
-}
-
-/*
- * XXX - do we want to go out of our way (ie, add extra code to selection
- * in the NCR53c710/NCR53c720 script) to reprogram the synchronous
- * conversion bits, or can we be content in just setting the
- * sxfer bits? I chose to do so [richard@sleepie.demon.co.uk]
- */
-
-/* Table for NCR53c8xx synchronous values */
-
-/* This table is also correct for 710, allowing that scf=4 is equivalent
- * of SSCF=0 (ie use DCNTL, divide by 3) for a 50.01-66.00MHz clock.
- * For any other clock values, we cannot use entries with SCF values of
- * 4. I guess that for a 66MHz clock, the slowest it will set is 2MHz,
- * and for a 50MHz clock, the slowest will be 2.27Mhz. Should check
- * that a device doesn't try and negotiate sync below these limits!
- */
-
-static const struct {
- int div; /* Total clock divisor * 10 */
- unsigned char scf; /* */
- unsigned char tp; /* 4 + tp = xferp divisor */
-} syncs[] = {
-/* div scf tp div scf tp div scf tp */
- { 40, 1, 0}, { 50, 1, 1}, { 60, 1, 2},
- { 70, 1, 3}, { 75, 2, 1}, { 80, 1, 4},
- { 90, 1, 5}, { 100, 1, 6}, { 105, 2, 3},
- { 110, 1, 7}, { 120, 2, 4}, { 135, 2, 5},
- { 140, 3, 3}, { 150, 2, 6}, { 160, 3, 4},
- { 165, 2, 7}, { 180, 3, 5}, { 200, 3, 6},
- { 210, 4, 3}, { 220, 3, 7}, { 240, 4, 4},
- { 270, 4, 5}, { 300, 4, 6}, { 330, 4, 7}
-};
-
-/*
- * Function : static void synchronous (struct Scsi_Host *host, int target,
- * char *msg)
- *
- * Purpose : reprogram transfers between the selected SCSI initiator and
- * target for synchronous SCSI transfers such that the synchronous
- * offset is less than that requested and period at least as long
- * as that requested. Also modify *msg such that it contains
- * an appropriate response.
- *
- * Inputs : host - NCR53c7,8xx SCSI host, target - number SCSI target id,
- * msg - synchronous transfer request.
- */
-
-
-static void
-synchronous (struct Scsi_Host *host, int target, char *msg) {
- struct NCR53c7x0_hostdata *hostdata = (struct NCR53c7x0_hostdata *)
- host->hostdata[0];
- int desire, divisor, i, limit;
- unsigned char scntl3, sxfer;
-/* The diagnostic message fits on one line, even with max. width integers */
- char buf[80];
-
-/* Desired transfer clock in Hz */
- desire = 1000000000L / (msg[3] * 4);
-/* Scale the available SCSI clock by 10 so we get tenths */
- divisor = (hostdata->scsi_clock * 10) / desire;
-
-/* NCR chips can handle at most an offset of 8 */
- if (msg[4] > 8)
- msg[4] = 8;
-
- if (hostdata->options & OPTION_DEBUG_SDTR)
- printk("scsi%d : optimal synchronous divisor of %d.%01d\n",
- host->host_no, divisor / 10, divisor % 10);
-
- limit = ARRAY_SIZE(syncs) - 1;
- for (i = 0; (i < limit) && (divisor > syncs[i].div); ++i);
-
- if (hostdata->options & OPTION_DEBUG_SDTR)
- printk("scsi%d : selected synchronous divisor of %d.%01d\n",
- host->host_no, syncs[i].div / 10, syncs[i].div % 10);
-
- msg[3] = ((1000000000L / hostdata->scsi_clock) * syncs[i].div / 10 / 4);
-
- if (hostdata->options & OPTION_DEBUG_SDTR)
- printk("scsi%d : selected synchronous period of %dns\n", host->host_no,
- msg[3] * 4);
-
- scntl3 = syncs[i].scf;
- sxfer = (msg[4] << SXFER_MO_SHIFT) | (syncs[i].tp << 4);
- if (hostdata->options & OPTION_DEBUG_SDTR)
- printk ("scsi%d : sxfer=0x%x scntl3=0x%x\n",
- host->host_no, (int) sxfer, (int) scntl3);
- set_synchronous (host, target, sxfer, scntl3, 1);
- sprintf (buf, "scsi%d : setting target %d to ", host->host_no, target);
- print_synchronous (buf, msg);
-}
-
-/*
- * Function : static int NCR53c7x0_dstat_sir_intr (struct Scsi_Host *host,
- * struct NCR53c7x0_cmd *cmd)
- *
- * Purpose : Handler for INT generated instructions for the
- * NCR53c810/820 SCSI SCRIPT
- *
- * Inputs : host - pointer to this host adapter's structure,
- * cmd - pointer to the command (if any) dsa was pointing
- * to.
- *
- */
-
-static int
-NCR53c7x0_dstat_sir_intr (struct Scsi_Host *host, struct
- NCR53c7x0_cmd *cmd) {
- NCR53c7x0_local_declare();
- int print;
- Scsi_Cmnd *c = cmd ? cmd->cmd : NULL;
- struct NCR53c7x0_hostdata *hostdata = (struct NCR53c7x0_hostdata *)
- host->hostdata[0];
- u32 dsps,*dsp; /* Argument of the INT instruction */
-
- NCR53c7x0_local_setup(host);
- dsps = NCR53c7x0_read32(DSPS_REG);
- dsp = (u32 *) bus_to_virt(NCR53c7x0_read32(DSP_REG));
-
- /* RGH 150597: Frig. Commands which fail with Check Condition are
- * Flagged as successful - hack dsps to indicate check condition */
-#if 0
- /* RGH 200597: Need to disable for BVME6000, as it gets Check Conditions
- * and then dies. Seems to handle Check Condition at startup, but
- * not mid kernel build. */
- if (dsps == A_int_norm_emulateintfly && cmd && cmd->result == 2)
- dsps = A_int_err_check_condition;
-#endif
-
- if (hostdata->options & OPTION_DEBUG_INTR)
- printk ("scsi%d : DSPS = 0x%x\n", host->host_no, dsps);
-
- switch (dsps) {
- case A_int_msg_1:
- print = 1;
- switch (hostdata->msg_buf[0]) {
- /*
- * Unless we've initiated synchronous negotiation, I don't
- * think that this should happen.
- */
- case MESSAGE_REJECT:
- hostdata->dsp = hostdata->script + hostdata->E_accept_message /
- sizeof(u32);
- hostdata->dsp_changed = 1;
- if (cmd && (cmd->flags & CMD_FLAG_SDTR)) {
- printk ("scsi%d : target %d rejected SDTR\n", host->host_no,
- c->device->id);
- cmd->flags &= ~CMD_FLAG_SDTR;
- asynchronous (host, c->device->id);
- print = 0;
- }
- break;
- case INITIATE_RECOVERY:
- printk ("scsi%d : extended contingent allegiance not supported yet, rejecting\n",
- host->host_no);
- /* Fall through to default */
- hostdata->dsp = hostdata->script + hostdata->E_reject_message /
- sizeof(u32);
- hostdata->dsp_changed = 1;
- break;
- default:
- printk ("scsi%d : unsupported message, rejecting\n",
- host->host_no);
- hostdata->dsp = hostdata->script + hostdata->E_reject_message /
- sizeof(u32);
- hostdata->dsp_changed = 1;
- }
- if (print) {
- printk ("scsi%d : received message", host->host_no);
- if (c)
- printk (" from target %d lun %d ", c->device->id, c->device->lun);
- spi_print_msg((unsigned char *) hostdata->msg_buf);
- printk("\n");
- }
-
- return SPECIFIC_INT_NOTHING;
-
-
- case A_int_msg_sdtr:
-/*
- * At this point, hostdata->msg_buf contains
- * 0 EXTENDED MESSAGE
- * 1 length
- * 2 SDTR
- * 3 period * 4ns
- * 4 offset
- */
-
- if (cmd) {
- char buf[80];
- sprintf (buf, "scsi%d : target %d %s ", host->host_no, c->device->id,
- (cmd->flags & CMD_FLAG_SDTR) ? "accepting" : "requesting");
- print_synchronous (buf, (unsigned char *) hostdata->msg_buf);
-
- /*
- * Initiator initiated, won't happen unless synchronous
- * transfers are enabled. If we get a SDTR message in
- * response to our SDTR, we should program our parameters
- * such that
- * offset <= requested offset
- * period >= requested period
- */
- if (cmd->flags & CMD_FLAG_SDTR) {
- cmd->flags &= ~CMD_FLAG_SDTR;
- if (hostdata->msg_buf[4])
- synchronous (host, c->device->id, (unsigned char *)
- hostdata->msg_buf);
- else
- asynchronous (host, c->device->id);
- hostdata->dsp = hostdata->script + hostdata->E_accept_message /
- sizeof(u32);
- hostdata->dsp_changed = 1;
- return SPECIFIC_INT_NOTHING;
- } else {
- if (hostdata->options & OPTION_SYNCHRONOUS) {
- cmd->flags |= CMD_FLAG_DID_SDTR;
- synchronous (host, c->device->id, (unsigned char *)
- hostdata->msg_buf);
- } else {
- hostdata->msg_buf[4] = 0; /* 0 offset = async */
- asynchronous (host, c->device->id);
- }
- patch_dsa_32 (cmd->dsa, dsa_msgout_other, 0, 5);
- patch_dsa_32 (cmd->dsa, dsa_msgout_other, 1, (u32)
- virt_to_bus ((void *)&hostdata->msg_buf));
- hostdata->dsp = hostdata->script +
- hostdata->E_respond_message / sizeof(u32);
- hostdata->dsp_changed = 1;
- }
- return SPECIFIC_INT_NOTHING;
- }
- /* Fall through to abort if we couldn't find a cmd, and
- therefore a dsa structure to twiddle */
- case A_int_msg_wdtr:
- hostdata->dsp = hostdata->script + hostdata->E_reject_message /
- sizeof(u32);
- hostdata->dsp_changed = 1;
- return SPECIFIC_INT_NOTHING;
- case A_int_err_unexpected_phase:
- if (hostdata->options & OPTION_DEBUG_INTR)
- printk ("scsi%d : unexpected phase\n", host->host_no);
- return SPECIFIC_INT_ABORT;
- case A_int_err_selected:
- if ((hostdata->chip / 100) == 8)
- printk ("scsi%d : selected by target %d\n", host->host_no,
- (int) NCR53c7x0_read8(SDID_REG_800) &7);
- else
- printk ("scsi%d : selected by target LCRC=0x%02x\n", host->host_no,
- (int) NCR53c7x0_read8(LCRC_REG_10));
- hostdata->dsp = hostdata->script + hostdata->E_target_abort /
- sizeof(u32);
- hostdata->dsp_changed = 1;
- return SPECIFIC_INT_NOTHING;
- case A_int_err_unexpected_reselect:
- if ((hostdata->chip / 100) == 8)
- printk ("scsi%d : unexpected reselect by target %d lun %d\n",
- host->host_no, (int) NCR53c7x0_read8(SDID_REG_800) & 7,
- hostdata->reselected_identify & 7);
- else
- printk ("scsi%d : unexpected reselect LCRC=0x%02x\n", host->host_no,
- (int) NCR53c7x0_read8(LCRC_REG_10));
- hostdata->dsp = hostdata->script + hostdata->E_initiator_abort /
- sizeof(u32);
- hostdata->dsp_changed = 1;
- return SPECIFIC_INT_NOTHING;
-/*
- * Since contingent allegiance conditions are cleared by the next
- * command issued to a target, we must issue a REQUEST SENSE
- * command after receiving a CHECK CONDITION status, before
- * another command is issued.
- *
- * Since this NCR53c7x0_cmd will be freed after use, we don't
- * care if we step on the various fields, so modify a few things.
- */
- case A_int_err_check_condition:
-#if 0
- if (hostdata->options & OPTION_DEBUG_INTR)
-#endif
- printk ("scsi%d : CHECK CONDITION\n", host->host_no);
- if (!c) {
- printk("scsi%d : CHECK CONDITION with no SCSI command\n",
- host->host_no);
- return SPECIFIC_INT_PANIC;
- }
-
- /*
- * FIXME : this uses the normal one-byte selection message.
- * We may want to renegotiate for synchronous & WIDE transfers
- * since these could be the crux of our problem.
- *
- hostdata->NOP_insn* FIXME : once SCSI-II tagged queuing is implemented, we'll
- * have to set this up so that the rest of the DSA
- * agrees with this being an untagged queue'd command.
- */
-
- patch_dsa_32 (cmd->dsa, dsa_msgout, 0, 1);
-
- /*
- * Modify the table indirect for COMMAND OUT phase, since
- * Request Sense is a six byte command.
- */
-
- patch_dsa_32 (cmd->dsa, dsa_cmdout, 0, 6);
-
- /*
- * The CDB is now mirrored in our local non-cached
- * structure, but keep the old structure up to date as well,
- * just in case anyone looks at it.
- */
-
- /*
- * XXX Need to worry about data buffer alignment/cache state
- * XXX here, but currently never get A_int_err_check_condition,
- * XXX so ignore problem for now.
- */
- cmd->cmnd[0] = c->cmnd[0] = REQUEST_SENSE;
- cmd->cmnd[0] = c->cmnd[1] &= 0xe0; /* Zero all but LUN */
- cmd->cmnd[0] = c->cmnd[2] = 0;
- cmd->cmnd[0] = c->cmnd[3] = 0;
- cmd->cmnd[0] = c->cmnd[4] = sizeof(c->sense_buffer);
- cmd->cmnd[0] = c->cmnd[5] = 0;
-
- /*
- * Disable dataout phase, and program datain to transfer to the
- * sense buffer, and add a jump to other_transfer after the
- * command so overflow/underrun conditions are detected.
- */
-
- patch_dsa_32 (cmd->dsa, dsa_dataout, 0,
- virt_to_bus(hostdata->script) + hostdata->E_other_transfer);
- patch_dsa_32 (cmd->dsa, dsa_datain, 0,
- virt_to_bus(cmd->data_transfer_start));
- cmd->data_transfer_start[0] = (((DCMD_TYPE_BMI | DCMD_BMI_OP_MOVE_I |
- DCMD_BMI_IO)) << 24) | sizeof(c->sense_buffer);
- cmd->data_transfer_start[1] = (u32) virt_to_bus(c->sense_buffer);
-
- cmd->data_transfer_start[2] = ((DCMD_TYPE_TCI | DCMD_TCI_OP_JUMP)
- << 24) | DBC_TCI_TRUE;
- cmd->data_transfer_start[3] = (u32) virt_to_bus(hostdata->script) +
- hostdata->E_other_transfer;
-
- /*
- * Currently, this command is flagged as completed, ie
- * it has valid status and message data. Reflag it as
- * incomplete. Q - need to do something so that original
- * status, etc are used.
- */
-
- cmd->result = cmd->cmd->result = 0xffff;
-
- /*
- * Restart command as a REQUEST SENSE.
- */
- hostdata->dsp = (u32 *) hostdata->script + hostdata->E_select /
- sizeof(u32);
- hostdata->dsp_changed = 1;
- return SPECIFIC_INT_NOTHING;
- case A_int_debug_break:
- return SPECIFIC_INT_BREAK;
- case A_int_norm_aborted:
- hostdata->dsp = (u32 *) hostdata->schedule;
- hostdata->dsp_changed = 1;
- if (cmd)
- abnormal_finished (cmd, DID_ERROR << 16);
- return SPECIFIC_INT_NOTHING;
- case A_int_norm_emulateintfly:
- NCR53c7x0_intfly(host);
- return SPECIFIC_INT_NOTHING;
- case A_int_test_1:
- case A_int_test_2:
- hostdata->idle = 1;
- hostdata->test_completed = (dsps - A_int_test_1) / 0x00010000 + 1;
- if (hostdata->options & OPTION_DEBUG_INTR)
- printk("scsi%d : test%d complete\n", host->host_no,
- hostdata->test_completed);
- return SPECIFIC_INT_NOTHING;
-#ifdef A_int_debug_reselected_ok
- case A_int_debug_reselected_ok:
- if (hostdata->options & (OPTION_DEBUG_SCRIPT|OPTION_DEBUG_INTR|
- OPTION_DEBUG_DISCONNECT)) {
- /*
- * Note - this dsa is not based on location relative to
- * the command structure, but to location relative to the
- * DSA register
- */
- u32 *dsa;
- dsa = (u32 *) bus_to_virt (NCR53c7x0_read32(DSA_REG));
-
- printk("scsi%d : reselected_ok (DSA = 0x%x (virt 0x%p)\n",
- host->host_no, NCR53c7x0_read32(DSA_REG), dsa);
- printk("scsi%d : resume address is 0x%x (virt 0x%p)\n",
- host->host_no, cmd->saved_data_pointer,
- bus_to_virt(cmd->saved_data_pointer));
- print_insn (host, hostdata->script + Ent_reselected_ok /
- sizeof(u32), "", 1);
- if ((hostdata->chip / 100) == 8)
- printk ("scsi%d : sxfer=0x%x, scntl3=0x%x\n",
- host->host_no, NCR53c7x0_read8(SXFER_REG),
- NCR53c7x0_read8(SCNTL3_REG_800));
- else
- printk ("scsi%d : sxfer=0x%x, cannot read SBCL\n",
- host->host_no, NCR53c7x0_read8(SXFER_REG));
- if (c) {
- print_insn (host, (u32 *)
- hostdata->sync[c->device->id].script, "", 1);
- print_insn (host, (u32 *)
- hostdata->sync[c->device->id].script + 2, "", 1);
- }
- }
- return SPECIFIC_INT_RESTART;
-#endif
-#ifdef A_int_debug_reselect_check
- case A_int_debug_reselect_check:
- if (hostdata->options & (OPTION_DEBUG_SCRIPT|OPTION_DEBUG_INTR)) {
- u32 *dsa;
-#if 0
- u32 *code;
-#endif
- /*
- * Note - this dsa is not based on location relative to
- * the command structure, but to location relative to the
- * DSA register
- */
- dsa = bus_to_virt (NCR53c7x0_read32(DSA_REG));
- printk("scsi%d : reselected_check_next (DSA = 0x%lx (virt 0x%p))\n",
- host->host_no, virt_to_bus(dsa), dsa);
- if (dsa) {
- printk("scsi%d : resume address is 0x%x (virt 0x%p)\n",
- host->host_no, cmd->saved_data_pointer,
- bus_to_virt (cmd->saved_data_pointer));
-#if 0
- printk("scsi%d : template code :\n", host->host_no);
- for (code = dsa + (Ent_dsa_code_check_reselect - Ent_dsa_zero)
- / sizeof(u32); code < (dsa + Ent_dsa_zero / sizeof(u32));
- code += print_insn (host, code, "", 1));
-#endif
- }
- print_insn (host, hostdata->script + Ent_reselected_ok /
- sizeof(u32), "", 1);
- }
- return SPECIFIC_INT_RESTART;
-#endif
-#ifdef A_int_debug_dsa_schedule
- case A_int_debug_dsa_schedule:
- if (hostdata->options & (OPTION_DEBUG_SCRIPT|OPTION_DEBUG_INTR)) {
- u32 *dsa;
- /*
- * Note - this dsa is not based on location relative to
- * the command structure, but to location relative to the
- * DSA register
- */
- dsa = (u32 *) bus_to_virt (NCR53c7x0_read32(DSA_REG));
- printk("scsi%d : dsa_schedule (old DSA = 0x%lx (virt 0x%p))\n",
- host->host_no, virt_to_bus(dsa), dsa);
- if (dsa)
- printk("scsi%d : resume address is 0x%x (virt 0x%p)\n"
- " (temp was 0x%x (virt 0x%p))\n",
- host->host_no, cmd->saved_data_pointer,
- bus_to_virt (cmd->saved_data_pointer),
- NCR53c7x0_read32 (TEMP_REG),
- bus_to_virt (NCR53c7x0_read32(TEMP_REG)));
- }
- return SPECIFIC_INT_RESTART;
-#endif
-#ifdef A_int_debug_scheduled
- case A_int_debug_scheduled:
- if (hostdata->options & (OPTION_DEBUG_SCRIPT|OPTION_DEBUG_INTR)) {
- printk("scsi%d : new I/O 0x%x (virt 0x%p) scheduled\n",
- host->host_no, NCR53c7x0_read32(DSA_REG),
- bus_to_virt(NCR53c7x0_read32(DSA_REG)));
- }
- return SPECIFIC_INT_RESTART;
-#endif
-#ifdef A_int_debug_idle
- case A_int_debug_idle:
- if (hostdata->options & (OPTION_DEBUG_SCRIPT|OPTION_DEBUG_INTR)) {
- printk("scsi%d : idle\n", host->host_no);
- }
- return SPECIFIC_INT_RESTART;
-#endif
-#ifdef A_int_debug_cmd
- case A_int_debug_cmd:
- if (hostdata->options & (OPTION_DEBUG_SCRIPT|OPTION_DEBUG_INTR)) {
- printk("scsi%d : command sent\n");
- }
- return SPECIFIC_INT_RESTART;
-#endif
-#ifdef A_int_debug_dsa_loaded
- case A_int_debug_dsa_loaded:
- if (hostdata->options & (OPTION_DEBUG_SCRIPT|OPTION_DEBUG_INTR)) {
- printk("scsi%d : DSA loaded with 0x%x (virt 0x%p)\n", host->host_no,
- NCR53c7x0_read32(DSA_REG),
- bus_to_virt(NCR53c7x0_read32(DSA_REG)));
- }
- return SPECIFIC_INT_RESTART;
-#endif
-#ifdef A_int_debug_reselected
- case A_int_debug_reselected:
- if (hostdata->options & (OPTION_DEBUG_SCRIPT|OPTION_DEBUG_INTR|
- OPTION_DEBUG_DISCONNECT)) {
- if ((hostdata->chip / 100) == 8)
- printk("scsi%d : reselected by target %d lun %d\n",
- host->host_no, (int) NCR53c7x0_read8(SDID_REG_800) & ~0x80,
- (int) hostdata->reselected_identify & 7);
- else
- printk("scsi%d : reselected by LCRC=0x%02x lun %d\n",
- host->host_no, (int) NCR53c7x0_read8(LCRC_REG_10),
- (int) hostdata->reselected_identify & 7);
- print_queues(host);
- }
- return SPECIFIC_INT_RESTART;
-#endif
-#ifdef A_int_debug_disconnect_msg
- case A_int_debug_disconnect_msg:
- if (hostdata->options & (OPTION_DEBUG_SCRIPT|OPTION_DEBUG_INTR)) {
- if (c)
- printk("scsi%d : target %d lun %d disconnecting\n",
- host->host_no, c->device->id, c->device->lun);
- else
- printk("scsi%d : unknown target disconnecting\n",
- host->host_no);
- }
- return SPECIFIC_INT_RESTART;
-#endif
-#ifdef A_int_debug_disconnected
- case A_int_debug_disconnected:
- if (hostdata->options & (OPTION_DEBUG_SCRIPT|OPTION_DEBUG_INTR|
- OPTION_DEBUG_DISCONNECT)) {
- printk ("scsi%d : disconnected, new queues are\n",
- host->host_no);
- print_queues(host);
-#if 0
- /* Not valid on ncr53c710! */
- printk ("scsi%d : sxfer=0x%x, scntl3=0x%x\n",
- host->host_no, NCR53c7x0_read8(SXFER_REG),
- NCR53c7x0_read8(SCNTL3_REG_800));
-#endif
- if (c) {
- print_insn (host, (u32 *)
- hostdata->sync[c->device->id].script, "", 1);
- print_insn (host, (u32 *)
- hostdata->sync[c->device->id].script + 2, "", 1);
- }
- }
- return SPECIFIC_INT_RESTART;
-#endif
-#ifdef A_int_debug_panic
- case A_int_debug_panic:
- printk("scsi%d : int_debug_panic received\n", host->host_no);
- print_lots (host);
- return SPECIFIC_INT_PANIC;
-#endif
-#ifdef A_int_debug_saved
- case A_int_debug_saved:
- if (hostdata->options & (OPTION_DEBUG_SCRIPT|OPTION_DEBUG_INTR|
- OPTION_DEBUG_DISCONNECT)) {
- printk ("scsi%d : saved data pointer 0x%x (virt 0x%p)\n",
- host->host_no, cmd->saved_data_pointer,
- bus_to_virt (cmd->saved_data_pointer));
- print_progress (c);
- }
- return SPECIFIC_INT_RESTART;
-#endif
-#ifdef A_int_debug_restored
- case A_int_debug_restored:
- if (hostdata->options & (OPTION_DEBUG_SCRIPT|OPTION_DEBUG_INTR|
- OPTION_DEBUG_DISCONNECT)) {
- if (cmd) {
- int size;
- printk ("scsi%d : restored data pointer 0x%x (virt 0x%p)\n",
- host->host_no, cmd->saved_data_pointer, bus_to_virt (
- cmd->saved_data_pointer));
- size = print_insn (host, (u32 *)
- bus_to_virt(cmd->saved_data_pointer), "", 1);
- size = print_insn (host, (u32 *)
- bus_to_virt(cmd->saved_data_pointer) + size, "", 1);
- print_progress (c);
- }
-#if 0
- printk ("scsi%d : datapath residual %d\n",
- host->host_no, datapath_residual (host)) ;
-#endif
- }
- return SPECIFIC_INT_RESTART;
-#endif
-#ifdef A_int_debug_sync
- case A_int_debug_sync:
- if (hostdata->options & (OPTION_DEBUG_SCRIPT|OPTION_DEBUG_INTR|
- OPTION_DEBUG_DISCONNECT|OPTION_DEBUG_SDTR)) {
- unsigned char sxfer = NCR53c7x0_read8 (SXFER_REG), scntl3;
- if ((hostdata->chip / 100) == 8) {
- scntl3 = NCR53c7x0_read8 (SCNTL3_REG_800);
- if (c) {
- if (sxfer != hostdata->sync[c->device->id].sxfer_sanity ||
- scntl3 != hostdata->sync[c->device->id].scntl3_sanity) {
- printk ("scsi%d : sync sanity check failed sxfer=0x%x, scntl3=0x%x",
- host->host_no, sxfer, scntl3);
- NCR53c7x0_write8 (SXFER_REG, sxfer);
- NCR53c7x0_write8 (SCNTL3_REG_800, scntl3);
- }
- } else
- printk ("scsi%d : unknown command sxfer=0x%x, scntl3=0x%x\n",
- host->host_no, (int) sxfer, (int) scntl3);
- } else {
- if (c) {
- if (sxfer != hostdata->sync[c->device->id].sxfer_sanity) {
- printk ("scsi%d : sync sanity check failed sxfer=0x%x",
- host->host_no, sxfer);
- NCR53c7x0_write8 (SXFER_REG, sxfer);
- NCR53c7x0_write8 (SBCL_REG,
- hostdata->sync[c->device->id].sscf_710);
- }
- } else
- printk ("scsi%d : unknown command sxfer=0x%x\n",
- host->host_no, (int) sxfer);
- }
- }
- return SPECIFIC_INT_RESTART;
-#endif
-#ifdef A_int_debug_datain
- case A_int_debug_datain:
- if (hostdata->options & (OPTION_DEBUG_SCRIPT|OPTION_DEBUG_INTR|
- OPTION_DEBUG_DISCONNECT|OPTION_DEBUG_SDTR)) {
- int size;
- if ((hostdata->chip / 100) == 8)
- printk ("scsi%d : In do_datain (%s) sxfer=0x%x, scntl3=0x%x\n"
- " datapath residual=%d\n",
- host->host_no, sbcl_to_phase (NCR53c7x0_read8 (SBCL_REG)),
- (int) NCR53c7x0_read8(SXFER_REG),
- (int) NCR53c7x0_read8(SCNTL3_REG_800),
- datapath_residual (host)) ;
- else
- printk ("scsi%d : In do_datain (%s) sxfer=0x%x\n"
- " datapath residual=%d\n",
- host->host_no, sbcl_to_phase (NCR53c7x0_read8 (SBCL_REG)),
- (int) NCR53c7x0_read8(SXFER_REG),
- datapath_residual (host)) ;
- print_insn (host, dsp, "", 1);
- size = print_insn (host, (u32 *) bus_to_virt(dsp[1]), "", 1);
- print_insn (host, (u32 *) bus_to_virt(dsp[1]) + size, "", 1);
- }
- return SPECIFIC_INT_RESTART;
-#endif
-#ifdef A_int_debug_check_dsa
- case A_int_debug_check_dsa:
- if (NCR53c7x0_read8 (SCNTL1_REG) & SCNTL1_CON) {
- int sdid;
- int tmp;
- char *where;
- if (hostdata->chip / 100 == 8)
- sdid = NCR53c7x0_read8 (SDID_REG_800) & 15;
- else {
- tmp = NCR53c7x0_read8 (SDID_REG_700);
- if (!tmp)
- panic ("SDID_REG_700 = 0");
- tmp >>= 1;
- sdid = 0;
- while (tmp) {
- tmp >>= 1;
- sdid++;
- }
- }
- where = dsp - NCR53c7x0_insn_size(NCR53c7x0_read8
- (DCMD_REG)) == hostdata->script +
- Ent_select_check_dsa / sizeof(u32) ?
- "selection" : "reselection";
- if (c && sdid != c->device->id) {
- printk ("scsi%d : SDID target %d != DSA target %d at %s\n",
- host->host_no, sdid, c->device->id, where);
- print_lots(host);
- dump_events (host, 20);
- return SPECIFIC_INT_PANIC;
- }
- }
- return SPECIFIC_INT_RESTART;
-#endif
- default:
- if ((dsps & 0xff000000) == 0x03000000) {
- printk ("scsi%d : misc debug interrupt 0x%x\n",
- host->host_no, dsps);
- return SPECIFIC_INT_RESTART;
- } else if ((dsps & 0xff000000) == 0x05000000) {
- if (hostdata->events) {
- struct NCR53c7x0_event *event;
- ++hostdata->event_index;
- if (hostdata->event_index >= hostdata->event_size)
- hostdata->event_index = 0;
- event = (struct NCR53c7x0_event *) hostdata->events +
- hostdata->event_index;
- event->event = (enum ncr_event) dsps;
- event->dsa = bus_to_virt(NCR53c7x0_read32(DSA_REG));
- if (NCR53c7x0_read8 (SCNTL1_REG) & SCNTL1_CON) {
- if (hostdata->chip / 100 == 8)
- event->target = NCR53c7x0_read8(SSID_REG_800);
- else {
- unsigned char tmp, sdid;
- tmp = NCR53c7x0_read8 (SDID_REG_700);
- if (!tmp)
- panic ("SDID_REG_700 = 0");
- tmp >>= 1;
- sdid = 0;
- while (tmp) {
- tmp >>= 1;
- sdid++;
- }
- event->target = sdid;
- }
- }
- else
- event->target = 255;
-
- if (event->event == EVENT_RESELECT)
- event->lun = hostdata->reselected_identify & 0xf;
- else if (c)
- event->lun = c->device->lun;
- else
- event->lun = 255;
- do_gettimeofday(&(event->time));
- if (c) {
- event->pid = c->pid;
- memcpy ((void *) event->cmnd, (void *) c->cmnd,
- sizeof (event->cmnd));
- } else {
- event->pid = -1;
- }
- }
- return SPECIFIC_INT_RESTART;
- }
-
- printk ("scsi%d : unknown user interrupt 0x%x\n",
- host->host_no, (unsigned) dsps);
- return SPECIFIC_INT_PANIC;
- }
-}
-
-/*
- * XXX - the stock NCR assembler won't output the scriptu.h file,
- * which undefine's all #define'd CPP symbols from the script.h
- * file, which will create problems if you use multiple scripts
- * with the same symbol names.
- *
- * If you insist on using NCR's assembler, you could generate
- * scriptu.h from script.h using something like
- *
- * grep #define script.h | \
- * sed 's/#define[ ][ ]*\([_a-zA-Z][_a-zA-Z0-9]*\).*$/#undefine \1/' \
- * > scriptu.h
- */
-
-#include "53c7xx_u.h"
-
-/* XXX - add alternate script handling code here */
-
-
-/*
- * Function : static void NCR537xx_soft_reset (struct Scsi_Host *host)
- *
- * Purpose : perform a soft reset of the NCR53c7xx chip
- *
- * Inputs : host - pointer to this host adapter's structure
- *
- * Preconditions : NCR53c7x0_init must have been called for this
- * host.
- *
- */
-
-static void
-NCR53c7x0_soft_reset (struct Scsi_Host *host) {
- NCR53c7x0_local_declare();
- unsigned long flags;
- struct NCR53c7x0_hostdata *hostdata = (struct NCR53c7x0_hostdata *)
- host->hostdata[0];
- NCR53c7x0_local_setup(host);
-
- local_irq_save(flags);
-
- /* Disable scsi chip and s/w level 7 ints */
-
-#ifdef CONFIG_MVME16x
- if (MACH_IS_MVME16x)
- {
- volatile unsigned long v;
-
- v = *(volatile unsigned long *)0xfff4006c;
- v &= ~0x8000;
- *(volatile unsigned long *)0xfff4006c = v;
- v = *(volatile unsigned long *)0xfff4202c;
- v &= ~0x10;
- *(volatile unsigned long *)0xfff4202c = v;
- }
-#endif
- /* Anything specific for your hardware? */
-
- /*
- * Do a soft reset of the chip so that everything is
- * reinitialized to the power-on state.
- *
- * Basically follow the procedure outlined in the NCR53c700
- * data manual under Chapter Six, How to Use, Steps Necessary to
- * Start SCRIPTS, with the exception of actually starting the
- * script and setting up the synchronous transfer gunk.
- */
-
- /* Should we reset the scsi bus here??????????????????? */
-
- NCR53c7x0_write8(ISTAT_REG_700, ISTAT_10_SRST);
- NCR53c7x0_write8(ISTAT_REG_700, 0);
-
- /*
- * saved_dcntl is set up in NCR53c7x0_init() before it is overwritten
- * here. We should have some better way of working out the CF bit
- * setting..
- */
-
- hostdata->saved_dcntl = DCNTL_10_EA|DCNTL_10_COM;
- if (hostdata->scsi_clock > 50000000)
- hostdata->saved_dcntl |= DCNTL_700_CF_3;
- else
- if (hostdata->scsi_clock > 37500000)
- hostdata->saved_dcntl |= DCNTL_700_CF_2;
-#if 0
- else
- /* Any clocks less than 37.5MHz? */
-#endif
-
- if (hostdata->options & OPTION_DEBUG_TRACE)
- NCR53c7x0_write8(DCNTL_REG, hostdata->saved_dcntl | DCNTL_SSM);
- else
- NCR53c7x0_write8(DCNTL_REG, hostdata->saved_dcntl);
- /* Following disables snooping - snooping is not required, as non-
- * cached pages are used for shared data, and appropriate use is
- * made of cache_push/cache_clear. Indeed, for 68060
- * enabling snooping causes disk corruption of ext2fs free block
- * bitmaps and the like. If you have a 68060 with snooping hardwared
- * on, then you need to enable CONFIG_060_WRITETHROUGH.
- */
- NCR53c7x0_write8(CTEST7_REG, CTEST7_10_TT1|CTEST7_STD);
- /* Actually burst of eight, according to my 53c710 databook */
- NCR53c7x0_write8(hostdata->dmode, DMODE_10_BL_8 | DMODE_10_FC2);
- NCR53c7x0_write8(SCID_REG, 1 << host->this_id);
- NCR53c7x0_write8(SBCL_REG, 0);
- NCR53c7x0_write8(SCNTL1_REG, SCNTL1_ESR_700);
- NCR53c7x0_write8(SCNTL0_REG, ((hostdata->options & OPTION_PARITY) ?
- SCNTL0_EPC : 0) | SCNTL0_EPG_700 | SCNTL0_ARB1 | SCNTL0_ARB2);
-
- /*
- * Enable all interrupts, except parity which we only want when
- * the user requests it.
- */
-
- NCR53c7x0_write8(DIEN_REG, DIEN_700_BF |
- DIEN_ABRT | DIEN_SSI | DIEN_SIR | DIEN_700_OPC);
-
- NCR53c7x0_write8(SIEN_REG_700, ((hostdata->options & OPTION_PARITY) ?
- SIEN_PAR : 0) | SIEN_700_STO | SIEN_RST | SIEN_UDC |
- SIEN_SGE | SIEN_MA);
-
-#ifdef CONFIG_MVME16x
- if (MACH_IS_MVME16x)
- {
- volatile unsigned long v;
-
- /* Enable scsi chip and s/w level 7 ints */
- v = *(volatile unsigned long *)0xfff40080;
- v = (v & ~(0xf << 28)) | (4 << 28);
- *(volatile unsigned long *)0xfff40080 = v;
- v = *(volatile unsigned long *)0xfff4006c;
- v |= 0x8000;
- *(volatile unsigned long *)0xfff4006c = v;
- v = *(volatile unsigned long *)0xfff4202c;
- v = (v & ~0xff) | 0x10 | 4;
- *(volatile unsigned long *)0xfff4202c = v;
- }
-#endif
- /* Anything needed for your hardware? */
- local_irq_restore(flags);
-}
-
-
-/*
- * Function static struct NCR53c7x0_cmd *allocate_cmd (Scsi_Cmnd *cmd)
- *
- * Purpose : Return the first free NCR53c7x0_cmd structure (which are
- * reused in a LIFO manner to minimize cache thrashing).
- *
- * Side effects : If we haven't yet scheduled allocation of NCR53c7x0_cmd
- * structures for this device, do so. Attempt to complete all scheduled
- * allocations using get_zeroed_page(), putting NCR53c7x0_cmd structures on
- * the free list. Teach programmers not to drink and hack.
- *
- * Inputs : cmd - SCSI command
- *
- * Returns : NCR53c7x0_cmd structure allocated on behalf of cmd;
- * NULL on failure.
- */
-
-static void
-my_free_page (void *addr, int dummy)
-{
- /* XXX This assumes default cache mode to be IOMAP_FULL_CACHING, which
- * XXX may be invalid (CONFIG_060_WRITETHROUGH)
- */
- kernel_set_cachemode((void *)addr, 4096, IOMAP_FULL_CACHING);
- free_page ((u32)addr);
-}
-
-static struct NCR53c7x0_cmd *
-allocate_cmd (Scsi_Cmnd *cmd) {
- struct Scsi_Host *host = cmd->device->host;
- struct NCR53c7x0_hostdata *hostdata =
- (struct NCR53c7x0_hostdata *) host->hostdata[0];
- u32 real; /* Real address */
- int size; /* Size of *tmp */
- struct NCR53c7x0_cmd *tmp;
- unsigned long flags;
-
- if (hostdata->options & OPTION_DEBUG_ALLOCATION)
- printk ("scsi%d : num_cmds = %d, can_queue = %d\n"
- " target = %d, lun = %d, %s\n",
- host->host_no, hostdata->num_cmds, host->can_queue,
- cmd->device->id, cmd->device->lun, (hostdata->cmd_allocated[cmd->device->id] &
- (1 << cmd->device->lun)) ? "already allocated" : "not allocated");
-
-/*
- * If we have not yet reserved commands for this I_T_L nexus, and
- * the device exists (as indicated by permanent Scsi_Cmnd structures
- * being allocated under 1.3.x, or being outside of scan_scsis in
- * 1.2.x), do so now.
- */
- if (!(hostdata->cmd_allocated[cmd->device->id] & (1 << cmd->device->lun)) &&
- cmd->device && cmd->device->has_cmdblocks) {
- if ((hostdata->extra_allocate + hostdata->num_cmds) < host->can_queue)
- hostdata->extra_allocate += host->cmd_per_lun;
- hostdata->cmd_allocated[cmd->device->id] |= (1 << cmd->device->lun);
- }
-
- for (; hostdata->extra_allocate > 0 ; --hostdata->extra_allocate,
- ++hostdata->num_cmds) {
- /* historically, kmalloc has returned unaligned addresses; pad so we
- have enough room to ROUNDUP */
- size = hostdata->max_cmd_size + sizeof (void *);
-#ifdef FORCE_DSA_ALIGNMENT
- /*
- * 53c710 rev.0 doesn't have an add-with-carry instruction.
- * Ensure we allocate enough memory to force alignment.
- */
- size += 256;
-#endif
-/* FIXME: for ISA bus '7xx chips, we need to or GFP_DMA in here */
-
- if (size > 4096) {
- printk (KERN_ERR "53c7xx: allocate_cmd size > 4K\n");
- return NULL;
- }
- real = get_zeroed_page(GFP_ATOMIC);
- if (real == 0)
- return NULL;
- cache_push(virt_to_phys((void *)real), 4096);
- cache_clear(virt_to_phys((void *)real), 4096);
- kernel_set_cachemode((void *)real, 4096, IOMAP_NOCACHE_SER);
- tmp = ROUNDUP(real, void *);
-#ifdef FORCE_DSA_ALIGNMENT
- {
- if (((u32)tmp & 0xff) > CmdPageStart)
- tmp = (struct NCR53c7x0_cmd *)((u32)tmp + 255);
- tmp = (struct NCR53c7x0_cmd *)(((u32)tmp & ~0xff) + CmdPageStart);
-#if 0
- printk ("scsi: size = %d, real = 0x%08x, tmp set to 0x%08x\n",
- size, real, (u32)tmp);
-#endif
- }
-#endif
- tmp->real = (void *)real;
- tmp->size = size;
- tmp->free = ((void (*)(void *, int)) my_free_page);
- local_irq_save(flags);
- tmp->next = hostdata->free;
- hostdata->free = tmp;
- local_irq_restore(flags);
- }
- local_irq_save(flags);
- tmp = (struct NCR53c7x0_cmd *) hostdata->free;
- if (tmp) {
- hostdata->free = tmp->next;
- }
- local_irq_restore(flags);
- if (!tmp)
- printk ("scsi%d : can't allocate command for target %d lun %d\n",
- host->host_no, cmd->device->id, cmd->device->lun);
- return tmp;
-}
-
-/*
- * Function static struct NCR53c7x0_cmd *create_cmd (Scsi_Cmnd *cmd)
- *
- *
- * Purpose : allocate a NCR53c7x0_cmd structure, initialize it based on the
- * Scsi_Cmnd structure passed in cmd, including dsa and Linux field
- * initialization, and dsa code relocation.
- *
- * Inputs : cmd - SCSI command
- *
- * Returns : NCR53c7x0_cmd structure corresponding to cmd,
- * NULL on failure.
- */
-static struct NCR53c7x0_cmd *
-create_cmd (Scsi_Cmnd *cmd) {
- NCR53c7x0_local_declare();
- struct Scsi_Host *host = cmd->device->host;
- struct NCR53c7x0_hostdata *hostdata = (struct NCR53c7x0_hostdata *)
- host->hostdata[0];
- struct NCR53c7x0_cmd *tmp; /* NCR53c7x0_cmd structure for this command */
- int datain, /* Number of instructions per phase */
- dataout;
- int data_transfer_instructions, /* Count of dynamic instructions */
- i; /* Counter */
- u32 *cmd_datain, /* Address of datain/dataout code */
- *cmd_dataout; /* Incremented as we assemble */
-#ifdef notyet
- unsigned char *msgptr; /* Current byte in select message */
- int msglen; /* Length of whole select message */
-#endif
- unsigned long flags;
- u32 exp_select_indirect; /* Used in sanity check */
- NCR53c7x0_local_setup(cmd->device->host);
-
- if (!(tmp = allocate_cmd (cmd)))
- return NULL;
-
- /*
- * Copy CDB and initialised result fields from Scsi_Cmnd to NCR53c7x0_cmd.
- * We do this because NCR53c7x0_cmd may have a special cache mode
- * selected to cope with lack of bus snooping, etc.
- */
-
- memcpy(tmp->cmnd, cmd->cmnd, 12);
- tmp->result = cmd->result;
-
- /*
- * Decide whether we need to generate commands for DATA IN,
- * DATA OUT, neither, or both based on the SCSI command
- */
-
- switch (cmd->cmnd[0]) {
- /* These commands do DATA IN */
- case INQUIRY:
- case MODE_SENSE:
- case READ_6:
- case READ_10:
- case READ_CAPACITY:
- case REQUEST_SENSE:
- case READ_BLOCK_LIMITS:
- case READ_TOC:
- datain = 2 * (cmd->use_sg ? cmd->use_sg : 1) + 3;
- dataout = 0;
- break;
- /* These commands do DATA OUT */
- case MODE_SELECT:
- case WRITE_6:
- case WRITE_10:
-#if 0
- printk("scsi%d : command is ", host->host_no);
- __scsi_print_command(cmd->cmnd);
-#endif
-#if 0
- printk ("scsi%d : %d scatter/gather segments\n", host->host_no,
- cmd->use_sg);
-#endif
- datain = 0;
- dataout = 2 * (cmd->use_sg ? cmd->use_sg : 1) + 3;
-#if 0
- hostdata->options |= OPTION_DEBUG_INTR;
-#endif
- break;
- /*
- * These commands do no data transfer, we should force an
- * interrupt if a data phase is attempted on them.
- */
- case TEST_UNIT_READY:
- case ALLOW_MEDIUM_REMOVAL:
- case START_STOP:
- datain = dataout = 0;
- break;
- /*
- * We don't know about these commands, so generate code to handle
- * both DATA IN and DATA OUT phases. More efficient to identify them
- * and add them to the above cases.
- */
- default:
- printk("scsi%d : datain+dataout for command ", host->host_no);
- __scsi_print_command(cmd->cmnd);
- datain = dataout = 2 * (cmd->use_sg ? cmd->use_sg : 1) + 3;
- }
-
- /*
- * New code : so that active pointers work correctly regardless
- * of where the saved data pointer is at, we want to immediately
- * enter the dynamic code after selection, and on a non-data
- * phase perform a CALL to the non-data phase handler, with
- * returns back to this address.
- *
- * If a phase mismatch is encountered in the middle of a
- * Block MOVE instruction, we want to _leave_ that instruction
- * unchanged as the current case is, modify a temporary buffer,
- * and point the active pointer (TEMP) at that.
- *
- * Furthermore, we want to implement a saved data pointer,
- * set by the SAVE_DATA_POINTERs message.
- *
- * So, the data transfer segments will change to
- * CALL data_transfer, WHEN NOT data phase
- * MOVE x, x, WHEN data phase
- * ( repeat )
- * JUMP other_transfer
- */
-
- data_transfer_instructions = datain + dataout;
-
- /*
- * When we perform a request sense, we overwrite various things,
- * including the data transfer code. Make sure we have enough
- * space to do that.
- */
-
- if (data_transfer_instructions < 2)
- data_transfer_instructions = 2;
-
-
- /*
- * The saved data pointer is set up so that a RESTORE POINTERS message
- * will start the data transfer over at the beginning.
- */
-
- tmp->saved_data_pointer = virt_to_bus (hostdata->script) +
- hostdata->E_data_transfer;
-
- /*
- * Initialize Linux specific fields.
- */
-
- tmp->cmd = cmd;
- tmp->next = NULL;
- tmp->flags = 0;
- tmp->dsa_next_addr = virt_to_bus(tmp->dsa) + hostdata->dsa_next -
- hostdata->dsa_start;
- tmp->dsa_addr = virt_to_bus(tmp->dsa) - hostdata->dsa_start;
-
- /*
- * Calculate addresses of dynamic code to fill in DSA
- */
-
- tmp->data_transfer_start = tmp->dsa + (hostdata->dsa_end -
- hostdata->dsa_start) / sizeof(u32);
- tmp->data_transfer_end = tmp->data_transfer_start +
- 2 * data_transfer_instructions;
-
- cmd_datain = datain ? tmp->data_transfer_start : NULL;
- cmd_dataout = dataout ? (datain ? cmd_datain + 2 * datain : tmp->
- data_transfer_start) : NULL;
-
- /*
- * Fill in the NCR53c7x0_cmd structure as follows
- * dsa, with fixed up DSA code
- * datain code
- * dataout code
- */
-
- /* Copy template code into dsa and perform all necessary fixups */
- if (hostdata->dsa_fixup)
- hostdata->dsa_fixup(tmp);
-
- patch_dsa_32(tmp->dsa, dsa_next, 0, 0);
- /*
- * XXX is this giving 53c710 access to the Scsi_Cmnd in some way?
- * Do we need to change it for caching reasons?
- */
- patch_dsa_32(tmp->dsa, dsa_cmnd, 0, virt_to_bus(cmd));
-
- if (hostdata->options & OPTION_DEBUG_SYNCHRONOUS) {
-
- exp_select_indirect = ((1 << cmd->device->id) << 16) |
- (hostdata->sync[cmd->device->id].sxfer_sanity << 8);
-
- if (hostdata->sync[cmd->device->id].select_indirect !=
- exp_select_indirect) {
- printk ("scsi%d : sanity check failed select_indirect=0x%x\n",
- host->host_no, hostdata->sync[cmd->device->id].select_indirect);
- FATAL(host);
-
- }
- }
-
- patch_dsa_32(tmp->dsa, dsa_select, 0,
- hostdata->sync[cmd->device->id].select_indirect);
-
- /*
- * Right now, we'll do the WIDE and SYNCHRONOUS negotiations on
- * different commands; although it should be trivial to do them
- * both at the same time.
- */
- if (hostdata->initiate_wdtr & (1 << cmd->device->id)) {
- memcpy ((void *) (tmp->select + 1), (void *) wdtr_message,
- sizeof(wdtr_message));
- patch_dsa_32(tmp->dsa, dsa_msgout, 0, 1 + sizeof(wdtr_message));
- local_irq_save(flags);
- hostdata->initiate_wdtr &= ~(1 << cmd->device->id);
- local_irq_restore(flags);
- } else if (hostdata->initiate_sdtr & (1 << cmd->device->id)) {
- memcpy ((void *) (tmp->select + 1), (void *) sdtr_message,
- sizeof(sdtr_message));
- patch_dsa_32(tmp->dsa, dsa_msgout, 0, 1 + sizeof(sdtr_message));
- tmp->flags |= CMD_FLAG_SDTR;
- local_irq_save(flags);
- hostdata->initiate_sdtr &= ~(1 << cmd->device->id);
- local_irq_restore(flags);
-
- }
-#if 1
- else if (!(hostdata->talked_to & (1 << cmd->device->id)) &&
- !(hostdata->options & OPTION_NO_ASYNC)) {
-
- memcpy ((void *) (tmp->select + 1), (void *) async_message,
- sizeof(async_message));
- patch_dsa_32(tmp->dsa, dsa_msgout, 0, 1 + sizeof(async_message));
- tmp->flags |= CMD_FLAG_SDTR;
- }
-#endif
- else
- patch_dsa_32(tmp->dsa, dsa_msgout, 0, 1);
-
- hostdata->talked_to |= (1 << cmd->device->id);
- tmp->select[0] = (hostdata->options & OPTION_DISCONNECT) ?
- IDENTIFY (1, cmd->device->lun) : IDENTIFY (0, cmd->device->lun);
- patch_dsa_32(tmp->dsa, dsa_msgout, 1, virt_to_bus(tmp->select));
- patch_dsa_32(tmp->dsa, dsa_cmdout, 0, cmd->cmd_len);
- patch_dsa_32(tmp->dsa, dsa_cmdout, 1, virt_to_bus(tmp->cmnd));
- patch_dsa_32(tmp->dsa, dsa_dataout, 0, cmd_dataout ?
- virt_to_bus (cmd_dataout)
- : virt_to_bus (hostdata->script) + hostdata->E_other_transfer);
- patch_dsa_32(tmp->dsa, dsa_datain, 0, cmd_datain ?
- virt_to_bus (cmd_datain)
- : virt_to_bus (hostdata->script) + hostdata->E_other_transfer);
- /*
- * XXX - need to make endian aware, should use separate variables
- * for both status and message bytes.
- */
- patch_dsa_32(tmp->dsa, dsa_msgin, 0, 1);
-/*
- * FIXME : these only works for little endian. We probably want to
- * provide message and status fields in the NCR53c7x0_cmd
- * structure, and assign them to cmd->result when we're done.
- */
-#ifdef BIG_ENDIAN
- patch_dsa_32(tmp->dsa, dsa_msgin, 1, virt_to_bus(&tmp->result) + 2);
- patch_dsa_32(tmp->dsa, dsa_status, 0, 1);
- patch_dsa_32(tmp->dsa, dsa_status, 1, virt_to_bus(&tmp->result) + 3);
-#else
- patch_dsa_32(tmp->dsa, dsa_msgin, 1, virt_to_bus(&tmp->result) + 1);
- patch_dsa_32(tmp->dsa, dsa_status, 0, 1);
- patch_dsa_32(tmp->dsa, dsa_status, 1, virt_to_bus(&tmp->result));
-#endif
- patch_dsa_32(tmp->dsa, dsa_msgout_other, 0, 1);
- patch_dsa_32(tmp->dsa, dsa_msgout_other, 1,
- virt_to_bus(&(hostdata->NCR53c7xx_msg_nop)));
-
- /*
- * Generate code for zero or more of the DATA IN, DATA OUT phases
- * in the format
- *
- * CALL data_transfer, WHEN NOT phase
- * MOVE first buffer length, first buffer address, WHEN phase
- * ...
- * MOVE last buffer length, last buffer address, WHEN phase
- * JUMP other_transfer
- */
-
-/*
- * See if we're getting to data transfer by generating an unconditional
- * interrupt.
- */
-#if 0
- if (datain) {
- cmd_datain[0] = 0x98080000;
- cmd_datain[1] = 0x03ffd00d;
- cmd_datain += 2;
- }
-#endif
-
-/*
- * XXX - I'm undecided whether all of this nonsense is faster
- * in the long run, or whether I should just go and implement a loop
- * on the NCR chip using table indirect mode?
- *
- * In any case, this is how it _must_ be done for 53c700/700-66 chips,
- * so this stays even when we come up with something better.
- *
- * When we're limited to 1 simultaneous command, no overlapping processing,
- * we're seeing 630K/sec, with 7% CPU usage on a slow Syquest 45M
- * drive.
- *
- * Not bad, not good. We'll see.
- */
-
- tmp->bounce.len = 0; /* Assume aligned buffer */
-
- for (i = 0; cmd->use_sg ? (i < cmd->use_sg) : !i; cmd_datain += 4,
- cmd_dataout += 4, ++i) {
- u32 vbuf = cmd->use_sg
- ? (u32)page_address(((struct scatterlist *)cmd->request_buffer)[i].page)+
- ((struct scatterlist *)cmd->request_buffer)[i].offset
- : (u32)(cmd->request_buffer);
- u32 bbuf = virt_to_bus((void *)vbuf);
- u32 count = cmd->use_sg ?
- ((struct scatterlist *)cmd->request_buffer)[i].length :
- cmd->request_bufflen;
-
- /*
- * If we have buffers which are not aligned with 16 byte cache
- * lines, then we just hope nothing accesses the other parts of
- * those cache lines while the transfer is in progress. That would
- * fill the cache, and subsequent reads of the dma data would pick
- * up the wrong thing.
- * XXX We need a bounce buffer to handle that correctly.
- */
-
- if (((bbuf & 15) || (count & 15)) && (datain || dataout))
- {
- /* Bounce buffer needed */
- if (cmd->use_sg)
- printk ("53c7xx: Non-aligned buffer with use_sg\n");
- else if (datain && dataout)
- printk ("53c7xx: Non-aligned buffer with datain && dataout\n");
- else if (count > 256)
- printk ("53c7xx: Non-aligned transfer > 256 bytes\n");
- else
- {
- if (datain)
- {
- tmp->bounce.len = count;
- tmp->bounce.addr = vbuf;
- bbuf = virt_to_bus(tmp->bounce.buf);
- tmp->bounce.buf[0] = 0xff;
- tmp->bounce.buf[1] = 0xfe;
- tmp->bounce.buf[2] = 0xfd;
- tmp->bounce.buf[3] = 0xfc;
- }
- if (dataout)
- {
- memcpy ((void *)tmp->bounce.buf, (void *)vbuf, count);
- bbuf = virt_to_bus(tmp->bounce.buf);
- }
- }
- }
-
- if (datain) {
- cache_clear(virt_to_phys((void *)vbuf), count);
- /* CALL other_in, WHEN NOT DATA_IN */
- cmd_datain[0] = ((DCMD_TYPE_TCI | DCMD_TCI_OP_CALL |
- DCMD_TCI_IO) << 24) |
- DBC_TCI_WAIT_FOR_VALID | DBC_TCI_COMPARE_PHASE;
- cmd_datain[1] = virt_to_bus (hostdata->script) +
- hostdata->E_other_in;
- /* MOVE count, buf, WHEN DATA_IN */
- cmd_datain[2] = ((DCMD_TYPE_BMI | DCMD_BMI_OP_MOVE_I | DCMD_BMI_IO)
- << 24) | count;
- cmd_datain[3] = bbuf;
-#if 0
- print_insn (host, cmd_datain, "dynamic ", 1);
- print_insn (host, cmd_datain + 2, "dynamic ", 1);
-#endif
- }
- if (dataout) {
- cache_push(virt_to_phys((void *)vbuf), count);
- /* CALL other_out, WHEN NOT DATA_OUT */
- cmd_dataout[0] = ((DCMD_TYPE_TCI | DCMD_TCI_OP_CALL) << 24) |
- DBC_TCI_WAIT_FOR_VALID | DBC_TCI_COMPARE_PHASE;
- cmd_dataout[1] = virt_to_bus(hostdata->script) +
- hostdata->E_other_out;
- /* MOVE count, buf, WHEN DATA+OUT */
- cmd_dataout[2] = ((DCMD_TYPE_BMI | DCMD_BMI_OP_MOVE_I) << 24)
- | count;
- cmd_dataout[3] = bbuf;
-#if 0
- print_insn (host, cmd_dataout, "dynamic ", 1);
- print_insn (host, cmd_dataout + 2, "dynamic ", 1);
-#endif
- }
- }
-
- /*
- * Install JUMP instructions after the data transfer routines to return
- * control to the do_other_transfer routines.
- */
-
-
- if (datain) {
- cmd_datain[0] = ((DCMD_TYPE_TCI | DCMD_TCI_OP_JUMP) << 24) |
- DBC_TCI_TRUE;
- cmd_datain[1] = virt_to_bus(hostdata->script) +
- hostdata->E_other_transfer;
-#if 0
- print_insn (host, cmd_datain, "dynamic jump ", 1);
-#endif
- cmd_datain += 2;
- }
-#if 0
- if (datain) {
- cmd_datain[0] = 0x98080000;
- cmd_datain[1] = 0x03ffdeed;
- cmd_datain += 2;
- }
-#endif
- if (dataout) {
- cmd_dataout[0] = ((DCMD_TYPE_TCI | DCMD_TCI_OP_JUMP) << 24) |
- DBC_TCI_TRUE;
- cmd_dataout[1] = virt_to_bus(hostdata->script) +
- hostdata->E_other_transfer;
-#if 0
- print_insn (host, cmd_dataout, "dynamic jump ", 1);
-#endif
- cmd_dataout += 2;
- }
-
- return tmp;
-}
-
-/*
- * Function : int NCR53c7xx_queue_command (Scsi_Cmnd *cmd,
- * void (*done)(Scsi_Cmnd *))
- *
- * Purpose : enqueues a SCSI command
- *
- * Inputs : cmd - SCSI command, done - function called on completion, with
- * a pointer to the command descriptor.
- *
- * Returns : 0
- *
- * Side effects :
- * cmd is added to the per instance driver issue_queue, with major
- * twiddling done to the host specific fields of cmd. If the
- * process_issue_queue coroutine isn't running, it is restarted.
- *
- * NOTE : we use the host_scribble field of the Scsi_Cmnd structure to
- * hold our own data, and pervert the ptr field of the SCp field
- * to create a linked list.
- */
-
-int
-NCR53c7xx_queue_command (Scsi_Cmnd *cmd, void (* done)(Scsi_Cmnd *)) {
- struct Scsi_Host *host = cmd->device->host;
- struct NCR53c7x0_hostdata *hostdata =
- (struct NCR53c7x0_hostdata *) host->hostdata[0];
- unsigned long flags;
- Scsi_Cmnd *tmp;
-
- cmd->scsi_done = done;
- cmd->host_scribble = NULL;
- cmd->SCp.ptr = NULL;
- cmd->SCp.buffer = NULL;
-
-#ifdef VALID_IDS
- /* Ignore commands on invalid IDs */
- if (!hostdata->valid_ids[cmd->device->id]) {
- printk("scsi%d : ignoring target %d lun %d\n", host->host_no,
- cmd->device->id, cmd->device->lun);
- cmd->result = (DID_BAD_TARGET << 16);
- done(cmd);
- return 0;
- }
-#endif
-
- local_irq_save(flags);
- if ((hostdata->options & (OPTION_DEBUG_INIT_ONLY|OPTION_DEBUG_PROBE_ONLY))
- || ((hostdata->options & OPTION_DEBUG_TARGET_LIMIT) &&
- !(hostdata->debug_lun_limit[cmd->device->id] & (1 << cmd->device->lun)))
-#ifdef LINUX_1_2
- || cmd->device->id > 7
-#else
- || cmd->device->id >= host->max_id
-#endif
- || cmd->device->id == host->this_id
- || hostdata->state == STATE_DISABLED) {
- printk("scsi%d : disabled or bad target %d lun %d\n", host->host_no,
- cmd->device->id, cmd->device->lun);
- cmd->result = (DID_BAD_TARGET << 16);
- done(cmd);
- local_irq_restore(flags);
- return 0;
- }
-
- if ((hostdata->options & OPTION_DEBUG_NCOMMANDS_LIMIT) &&
- (hostdata->debug_count_limit == 0)) {
- printk("scsi%d : maximum commands exceeded\n", host->host_no);
- cmd->result = (DID_BAD_TARGET << 16);
- done(cmd);
- local_irq_restore(flags);
- return 0;
- }
-
- if (hostdata->options & OPTION_DEBUG_READ_ONLY) {
- switch (cmd->cmnd[0]) {
- case WRITE_6:
- case WRITE_10:
- printk("scsi%d : WRITE attempted with NO_WRITE debugging flag set\n",
- host->host_no);
- cmd->result = (DID_BAD_TARGET << 16);
- done(cmd);
- local_irq_restore(flags);
- return 0;
- }
- }
-
- if ((hostdata->options & OPTION_DEBUG_TARGET_LIMIT) &&
- hostdata->debug_count_limit != -1)
- --hostdata->debug_count_limit;
-
- cmd->result = 0xffff; /* The NCR will overwrite message
- and status with valid data */
- cmd->host_scribble = (unsigned char *) tmp = create_cmd (cmd);
-
- /*
- * REQUEST SENSE commands are inserted at the head of the queue
- * so that we do not clear the contingent allegiance condition
- * they may be looking at.
- */
-
- if (!(hostdata->issue_queue) || (cmd->cmnd[0] == REQUEST_SENSE)) {
- cmd->SCp.ptr = (unsigned char *) hostdata->issue_queue;
- hostdata->issue_queue = cmd;
- } else {
- for (tmp = (Scsi_Cmnd *) hostdata->issue_queue; tmp->SCp.ptr;
- tmp = (Scsi_Cmnd *) tmp->SCp.ptr);
- tmp->SCp.ptr = (unsigned char *) cmd;
- }
- local_irq_restore(flags);
- run_process_issue_queue();
- return 0;
-}
-
-/*
- * Function : void to_schedule_list (struct Scsi_Host *host,
- * struct NCR53c7x0_hostdata * hostdata, Scsi_Cmnd *cmd)
- *
- * Purpose : takes a SCSI command which was just removed from the
- * issue queue, and deals with it by inserting it in the first
- * free slot in the schedule list or by terminating it immediately.
- *
- * Inputs :
- * host - SCSI host adapter; hostdata - hostdata structure for
- * this adapter; cmd - a pointer to the command; should have
- * the host_scribble field initialized to point to a valid
- *
- * Side effects :
- * cmd is added to the per instance schedule list, with minor
- * twiddling done to the host specific fields of cmd.
- *
- */
-
-static __inline__ void
-to_schedule_list (struct Scsi_Host *host, struct NCR53c7x0_hostdata *hostdata,
- struct NCR53c7x0_cmd *cmd) {
- NCR53c7x0_local_declare();
- Scsi_Cmnd *tmp = cmd->cmd;
- unsigned long flags;
- /* dsa start is negative, so subtraction is used */
- volatile u32 *ncrcurrent;
-
- int i;
- NCR53c7x0_local_setup(host);
-#if 0
- printk("scsi%d : new dsa is 0x%lx (virt 0x%p)\n", host->host_no,
- virt_to_bus(hostdata->dsa), hostdata->dsa);
-#endif
-
- local_irq_save(flags);
-
- /*
- * Work around race condition : if an interrupt fired and we
- * got disabled forget about this command.
- */
-
- if (hostdata->state == STATE_DISABLED) {
- printk("scsi%d : driver disabled\n", host->host_no);
- tmp->result = (DID_BAD_TARGET << 16);
- cmd->next = (struct NCR53c7x0_cmd *) hostdata->free;
- hostdata->free = cmd;
- tmp->scsi_done(tmp);
- local_irq_restore(flags);
- return;
- }
-
- for (i = host->can_queue, ncrcurrent = hostdata->schedule;
- i > 0 && ncrcurrent[0] != hostdata->NOP_insn;
- --i, ncrcurrent += 2 /* JUMP instructions are two words */);
-
- if (i > 0) {
- ++hostdata->busy[tmp->device->id][tmp->device->lun];
- cmd->next = hostdata->running_list;
- hostdata->running_list = cmd;
-
- /* Restore this instruction to a NOP once the command starts */
- cmd->dsa [(hostdata->dsa_jump_dest - hostdata->dsa_start) /
- sizeof(u32)] = (u32) virt_to_bus ((void *)ncrcurrent);
- /* Replace the current jump operand. */
- ncrcurrent[1] =
- virt_to_bus ((void *) cmd->dsa) + hostdata->E_dsa_code_begin -
- hostdata->E_dsa_code_template;
- /* Replace the NOP instruction with a JUMP */
- ncrcurrent[0] = ((DCMD_TYPE_TCI|DCMD_TCI_OP_JUMP) << 24) |
- DBC_TCI_TRUE;
- } else {
- printk ("scsi%d: no free slot\n", host->host_no);
- disable(host);
- tmp->result = (DID_ERROR << 16);
- cmd->next = (struct NCR53c7x0_cmd *) hostdata->free;
- hostdata->free = cmd;
- tmp->scsi_done(tmp);
- local_irq_restore(flags);
- return;
- }
-
- /*
- * If the NCR chip is in an idle state, start it running the scheduler
- * immediately. Otherwise, signal the chip to jump to schedule as
- * soon as it is idle.
- */
-
- if (hostdata->idle) {
- hostdata->idle = 0;
- hostdata->state = STATE_RUNNING;
- NCR53c7x0_write32 (DSP_REG, virt_to_bus ((void *)hostdata->schedule));
- if (hostdata->options & OPTION_DEBUG_TRACE)
- NCR53c7x0_write8 (DCNTL_REG, hostdata->saved_dcntl |
- DCNTL_SSM | DCNTL_STD);
- } else {
- NCR53c7x0_write8(hostdata->istat, ISTAT_10_SIGP);
- }
-
- local_irq_restore(flags);
-}
-
-/*
- * Function : busyp (struct Scsi_Host *host, struct NCR53c7x0_hostdata
- * *hostdata, Scsi_Cmnd *cmd)
- *
- * Purpose : decide if we can pass the given SCSI command on to the
- * device in question or not.
- *
- * Returns : non-zero when we're busy, 0 when we aren't.
- */
-
-static __inline__ int
-busyp (struct Scsi_Host *host, struct NCR53c7x0_hostdata *hostdata,
- Scsi_Cmnd *cmd) {
- /* FIXME : in the future, this needs to accommodate SCSI-II tagged
- queuing, and we may be able to play with fairness here a bit.
- */
- return hostdata->busy[cmd->device->id][cmd->device->lun];
-}
-
-/*
- * Function : process_issue_queue (void)
- *
- * Purpose : transfer commands from the issue queue to NCR start queue
- * of each NCR53c7/8xx in the system, avoiding kernel stack
- * overflows when the scsi_done() function is invoked recursively.
- *
- * NOTE : process_issue_queue exits with interrupts *disabled*, so the
- * caller must reenable them if it desires.
- *
- * NOTE : process_issue_queue should be called from both
- * NCR53c7x0_queue_command() and from the interrupt handler
- * after command completion in case NCR53c7x0_queue_command()
- * isn't invoked again but we've freed up resources that are
- * needed.
- */
-
-static void
-process_issue_queue (unsigned long flags) {
- Scsi_Cmnd *tmp, *prev;
- struct Scsi_Host *host;
- struct NCR53c7x0_hostdata *hostdata;
- int done;
-
- /*
- * We run (with interrupts disabled) until we're sure that none of
- * the host adapters have anything that can be done, at which point
- * we set process_issue_queue_running to 0 and exit.
- *
- * Interrupts are enabled before doing various other internal
- * instructions, after we've decided that we need to run through
- * the loop again.
- *
- */
-
- do {
- local_irq_disable(); /* Freeze request queues */
- done = 1;
- for (host = first_host; host && host->hostt == the_template;
- host = host->next) {
- hostdata = (struct NCR53c7x0_hostdata *) host->hostdata[0];
- local_irq_disable();
- if (hostdata->issue_queue) {
- if (hostdata->state == STATE_DISABLED) {
- tmp = (Scsi_Cmnd *) hostdata->issue_queue;
- hostdata->issue_queue = (Scsi_Cmnd *) tmp->SCp.ptr;
- tmp->result = (DID_BAD_TARGET << 16);
- if (tmp->host_scribble) {
- ((struct NCR53c7x0_cmd *)tmp->host_scribble)->next =
- hostdata->free;
- hostdata->free =
- (struct NCR53c7x0_cmd *)tmp->host_scribble;
- tmp->host_scribble = NULL;
- }
- tmp->scsi_done (tmp);
- done = 0;
- } else
- for (tmp = (Scsi_Cmnd *) hostdata->issue_queue,
- prev = NULL; tmp; prev = tmp, tmp = (Scsi_Cmnd *)
- tmp->SCp.ptr)
- if (!tmp->host_scribble ||
- !busyp (host, hostdata, tmp)) {
- if (prev)
- prev->SCp.ptr = tmp->SCp.ptr;
- else
- hostdata->issue_queue = (Scsi_Cmnd *)
- tmp->SCp.ptr;
- tmp->SCp.ptr = NULL;
- if (tmp->host_scribble) {
- if (hostdata->options & OPTION_DEBUG_QUEUES)
- printk ("scsi%d : moving command for target %d lun %d to start list\n",
- host->host_no, tmp->device->id, tmp->device->lun);
-
-
- to_schedule_list (host, hostdata,
- (struct NCR53c7x0_cmd *)
- tmp->host_scribble);
- } else {
- if (((tmp->result & 0xff) == 0xff) ||
- ((tmp->result & 0xff00) == 0xff00)) {
- printk ("scsi%d : danger Will Robinson!\n",
- host->host_no);
- tmp->result = DID_ERROR << 16;
- disable (host);
- }
- tmp->scsi_done(tmp);
- }
- done = 0;
- } /* if target/lun is not busy */
- } /* if hostdata->issue_queue */
- if (!done)
- local_irq_restore(flags);
- } /* for host */
- } while (!done);
- process_issue_queue_running = 0;
-}
-
-/*
- * Function : static void intr_scsi (struct Scsi_Host *host,
- * struct NCR53c7x0_cmd *cmd)
- *
- * Purpose : handle all SCSI interrupts, indicated by the setting
- * of the SIP bit in the ISTAT register.
- *
- * Inputs : host, cmd - host and NCR command causing the interrupt, cmd
- * may be NULL.
- */
-
-static void
-intr_scsi (struct Scsi_Host *host, struct NCR53c7x0_cmd *cmd) {
- NCR53c7x0_local_declare();
- struct NCR53c7x0_hostdata *hostdata =
- (struct NCR53c7x0_hostdata *) host->hostdata[0];
- unsigned char sstat0_sist0, sist1, /* Registers */
- fatal; /* Did a fatal interrupt
- occur ? */
-
- NCR53c7x0_local_setup(host);
-
- fatal = 0;
-
- sstat0_sist0 = NCR53c7x0_read8(SSTAT0_REG);
- sist1 = 0;
-
- if (hostdata->options & OPTION_DEBUG_INTR)
- printk ("scsi%d : SIST0 0x%0x, SIST1 0x%0x\n", host->host_no,
- sstat0_sist0, sist1);
-
- /* 250ms selection timeout */
- if (sstat0_sist0 & SSTAT0_700_STO) {
- fatal = 1;
- if (hostdata->options & OPTION_DEBUG_INTR) {
- printk ("scsi%d : Selection Timeout\n", host->host_no);
- if (cmd) {
- printk("scsi%d : target %d, lun %d, command ",
- host->host_no, cmd->cmd->device->id, cmd->cmd->device->lun);
- __scsi_print_command (cmd->cmd->cmnd);
- printk("scsi%d : dsp = 0x%x (virt 0x%p)\n", host->host_no,
- NCR53c7x0_read32(DSP_REG),
- bus_to_virt(NCR53c7x0_read32(DSP_REG)));
- } else {
- printk("scsi%d : no command\n", host->host_no);
- }
- }
-/*
- * XXX - question : how do we want to handle the Illegal Instruction
- * interrupt, which may occur before or after the Selection Timeout
- * interrupt?
- */
-
- if (1) {
- hostdata->idle = 1;
- hostdata->expecting_sto = 0;
-
- if (hostdata->test_running) {
- hostdata->test_running = 0;
- hostdata->test_completed = 3;
- } else if (cmd) {
- abnormal_finished(cmd, DID_BAD_TARGET << 16);
- }
-#if 0
- hostdata->intrs = 0;
-#endif
- }
- }
-
-/*
- * FIXME : in theory, we can also get a UDC when a STO occurs.
- */
- if (sstat0_sist0 & SSTAT0_UDC) {
- fatal = 1;
- if (cmd) {
- printk("scsi%d : target %d lun %d unexpected disconnect\n",
- host->host_no, cmd->cmd->device->id, cmd->cmd->device->lun);
- print_lots (host);
- abnormal_finished(cmd, DID_ERROR << 16);
- } else
- printk("scsi%d : unexpected disconnect (no command)\n",
- host->host_no);
-
- hostdata->dsp = (u32 *) hostdata->schedule;
- hostdata->dsp_changed = 1;
- }
-
- /* SCSI PARITY error */
- if (sstat0_sist0 & SSTAT0_PAR) {
- fatal = 1;
- if (cmd && cmd->cmd) {
- printk("scsi%d : target %d lun %d parity error.\n",
- host->host_no, cmd->cmd->device->id, cmd->cmd->device->lun);
- abnormal_finished (cmd, DID_PARITY << 16);
- } else
- printk("scsi%d : parity error\n", host->host_no);
- /* Should send message out, parity error */
-
- /* XXX - Reduce synchronous transfer rate! */
- hostdata->dsp = hostdata->script + hostdata->E_initiator_abort /
- sizeof(u32);
- hostdata->dsp_changed = 1;
- /* SCSI GROSS error */
- }
-
- if (sstat0_sist0 & SSTAT0_SGE) {
- fatal = 1;
- printk("scsi%d : gross error, saved2_dsa = 0x%x\n", host->host_no,
- (unsigned int)hostdata->saved2_dsa);
- print_lots (host);
-
- /*
- * A SCSI gross error may occur when we have
- *
- * - A synchronous offset which causes the SCSI FIFO to be overwritten.
- *
- * - A REQ which causes the maximum synchronous offset programmed in
- * the SXFER register to be exceeded.
- *
- * - A phase change with an outstanding synchronous offset.
- *
- * - Residual data in the synchronous data FIFO, with a transfer
- * other than a synchronous receive is started.$#
- */
-
-
- /* XXX Should deduce synchronous transfer rate! */
- hostdata->dsp = hostdata->script + hostdata->E_initiator_abort /
- sizeof(u32);
- hostdata->dsp_changed = 1;
- /* Phase mismatch */
- }
-
- if (sstat0_sist0 & SSTAT0_MA) {
- fatal = 1;
- if (hostdata->options & OPTION_DEBUG_INTR)
- printk ("scsi%d : SSTAT0_MA\n", host->host_no);
- intr_phase_mismatch (host, cmd);
- }
-
-#if 0
- if (sstat0_sist0 & SIST0_800_RSL)
- printk ("scsi%d : Oh no Mr. Bill!\n", host->host_no);
-#endif
-
-/*
- * If a fatal SCSI interrupt occurs, we must insure that the DMA and
- * SCSI FIFOs were flushed.
- */
-
- if (fatal) {
- if (!hostdata->dstat_valid) {
- hostdata->dstat = NCR53c7x0_read8(DSTAT_REG);
- hostdata->dstat_valid = 1;
- }
-
- if (!(hostdata->dstat & DSTAT_DFE)) {
- printk ("scsi%d : DMA FIFO not empty\n", host->host_no);
- /*
- * Really need to check this code for 710 RGH.
- * Havn't seen any problems, but maybe we should FLUSH before
- * clearing sometimes.
- */
- NCR53c7x0_write8 (CTEST8_REG, CTEST8_10_CLF);
- while (NCR53c7x0_read8 (CTEST8_REG) & CTEST8_10_CLF)
- ;
- hostdata->dstat |= DSTAT_DFE;
- }
- }
-}
-
-#ifdef CYCLIC_TRACE
-
-/*
- * The following implements a cyclic log of instructions executed, if you turn
- * TRACE on. It will also print the log for you. Very useful when debugging
- * 53c710 support, possibly not really needed any more.
- */
-
-u32 insn_log[4096];
-u32 insn_log_index = 0;
-
-void log1 (u32 i)
-{
- insn_log[insn_log_index++] = i;
- if (insn_log_index == 4096)
- insn_log_index = 0;
-}
-
-void log_insn (u32 *ip)
-{
- log1 ((u32)ip);
- log1 (*ip);
- log1 (*(ip+1));
- if (((*ip >> 24) & DCMD_TYPE_MASK) == DCMD_TYPE_MMI)
- log1 (*(ip+2));
-}
-
-void dump_log(void)
-{
- int cnt = 0;
- int i = insn_log_index;
- int size;
- struct Scsi_Host *host = first_host;
-
- while (cnt < 4096) {
- printk ("%08x (+%6x): ", insn_log[i], (insn_log[i] - (u32)&(((struct NCR53c7x0_hostdata *)host->hostdata[0])->script))/4);
- if (++i == 4096)
- i = 0;
- cnt++;
- if (((insn_log[i] >> 24) & DCMD_TYPE_MASK) == DCMD_TYPE_MMI)
- size = 3;
- else
- size = 2;
- while (size--) {
- printk ("%08x ", insn_log[i]);
- if (++i == 4096)
- i = 0;
- cnt++;
- }
- printk ("\n");
- }
-}
-#endif
-
-
-/*
- * Function : static void NCR53c7x0_intfly (struct Scsi_Host *host)
- *
- * Purpose : Scan command queue for specified host, looking for completed
- * commands.
- *
- * Inputs : Scsi_Host pointer.
- *
- * This is called from the interrupt handler, when a simulated INTFLY
- * interrupt occurs.
- */
-
-static void
-NCR53c7x0_intfly (struct Scsi_Host *host)
-{
- NCR53c7x0_local_declare();
- struct NCR53c7x0_hostdata *hostdata; /* host->hostdata[0] */
- struct NCR53c7x0_cmd *cmd, /* command which halted */
- **cmd_prev_ptr;
- unsigned long flags;
- char search_found = 0; /* Got at least one ? */
-
- hostdata = (struct NCR53c7x0_hostdata *) host->hostdata[0];
- NCR53c7x0_local_setup(host);
-
- if (hostdata->options & OPTION_DEBUG_INTR)
- printk ("scsi%d : INTFLY\n", host->host_no);
-
- /*
- * Traverse our list of running commands, and look
- * for those with valid (non-0xff ff) status and message
- * bytes encoded in the result which signify command
- * completion.
- */
-
- local_irq_save(flags);
-restart:
- for (cmd_prev_ptr = (struct NCR53c7x0_cmd **)&(hostdata->running_list),
- cmd = (struct NCR53c7x0_cmd *) hostdata->running_list; cmd ;
- cmd_prev_ptr = (struct NCR53c7x0_cmd **) &(cmd->next),
- cmd = (struct NCR53c7x0_cmd *) cmd->next)
- {
- Scsi_Cmnd *tmp;
-
- if (!cmd) {
- printk("scsi%d : very weird.\n", host->host_no);
- break;
- }
-
- if (!(tmp = cmd->cmd)) {
- printk("scsi%d : weird. NCR53c7x0_cmd has no Scsi_Cmnd\n",
- host->host_no);
- continue;
- }
- /* Copy the result over now; may not be complete,
- * but subsequent tests may as well be done on
- * cached memory.
- */
- tmp->result = cmd->result;
-
- if (((tmp->result & 0xff) == 0xff) ||
- ((tmp->result & 0xff00) == 0xff00))
- continue;
-
- search_found = 1;
-
- if (cmd->bounce.len)
- memcpy ((void *)cmd->bounce.addr,
- (void *)cmd->bounce.buf, cmd->bounce.len);
-
- /* Important - remove from list _before_ done is called */
- if (cmd_prev_ptr)
- *cmd_prev_ptr = (struct NCR53c7x0_cmd *) cmd->next;
-
- --hostdata->busy[tmp->device->id][tmp->device->lun];
- cmd->next = hostdata->free;
- hostdata->free = cmd;
-
- tmp->host_scribble = NULL;
-
- if (hostdata->options & OPTION_DEBUG_INTR) {
- printk ("scsi%d : command complete : pid %lu, id %d,lun %d result 0x%x ",
- host->host_no, tmp->pid, tmp->device->id, tmp->device->lun, tmp->result);
- __scsi_print_command (tmp->cmnd);
- }
-
- tmp->scsi_done(tmp);
- goto restart;
- }
- local_irq_restore(flags);
-
- if (!search_found) {
- printk ("scsi%d : WARNING : INTFLY with no completed commands.\n",
- host->host_no);
- } else {
- run_process_issue_queue();
- }
- return;
-}
-
-/*
- * Function : static irqreturn_t NCR53c7x0_intr (int irq, void *dev_id)
- *
- * Purpose : handle NCR53c7x0 interrupts for all NCR devices sharing
- * the same IRQ line.
- *
- * Inputs : Since we're using the IRQF_DISABLED interrupt handler
- * semantics, irq indicates the interrupt which invoked
- * this handler.
- *
- * On the 710 we simualte an INTFLY with a script interrupt, and the
- * script interrupt handler will call back to this function.
- */
-
-static irqreturn_t
-NCR53c7x0_intr (int irq, void *dev_id)
-{
- NCR53c7x0_local_declare();
- struct Scsi_Host *host; /* Host we are looking at */
- unsigned char istat; /* Values of interrupt regs */
- struct NCR53c7x0_hostdata *hostdata; /* host->hostdata[0] */
- struct NCR53c7x0_cmd *cmd; /* command which halted */
- u32 *dsa; /* DSA */
- int handled = 0;
-
-#ifdef NCR_DEBUG
- char buf[80]; /* Debugging sprintf buffer */
- size_t buflen; /* Length of same */
-#endif
-
- host = (struct Scsi_Host *)dev_id;
- hostdata = (struct NCR53c7x0_hostdata *) host->hostdata[0];
- NCR53c7x0_local_setup(host);
-
- /*
- * Only read istat once per loop, since reading it again will unstack
- * interrupts
- */
-
- while ((istat = NCR53c7x0_read8(hostdata->istat)) & (ISTAT_SIP|ISTAT_DIP)) {
- handled = 1;
- hostdata->dsp_changed = 0;
- hostdata->dstat_valid = 0;
- hostdata->state = STATE_HALTED;
-
- if (NCR53c7x0_read8 (SSTAT2_REG) & SSTAT2_FF_MASK)
- printk ("scsi%d : SCSI FIFO not empty\n", host->host_no);
-
- /*
- * NCR53c700 and NCR53c700-66 change the current SCSI
- * process, hostdata->ncrcurrent, in the Linux driver so
- * cmd = hostdata->ncrcurrent.
- *
- * With other chips, we must look through the commands
- * executing and find the command structure which
- * corresponds to the DSA register.
- */
-
- if (hostdata->options & OPTION_700) {
- cmd = (struct NCR53c7x0_cmd *) hostdata->ncrcurrent;
- } else {
- dsa = bus_to_virt(NCR53c7x0_read32(DSA_REG));
- for (cmd = (struct NCR53c7x0_cmd *) hostdata->running_list;
- cmd && (dsa + (hostdata->dsa_start / sizeof(u32))) != cmd->dsa;
- cmd = (struct NCR53c7x0_cmd *)(cmd->next))
- ;
- }
- if (hostdata->options & OPTION_DEBUG_INTR) {
- if (cmd) {
- printk("scsi%d : interrupt for pid %lu, id %d, lun %d ",
- host->host_no, cmd->cmd->pid, (int) cmd->cmd->device->id,
- (int) cmd->cmd->device->lun);
- __scsi_print_command (cmd->cmd->cmnd);
- } else {
- printk("scsi%d : no active command\n", host->host_no);
- }
- }
-
- if (istat & ISTAT_SIP) {
- if (hostdata->options & OPTION_DEBUG_INTR)
- printk ("scsi%d : ISTAT_SIP\n", host->host_no);
- intr_scsi (host, cmd);
- }
-
- if (istat & ISTAT_DIP) {
- if (hostdata->options & OPTION_DEBUG_INTR)
- printk ("scsi%d : ISTAT_DIP\n", host->host_no);
- intr_dma (host, cmd);
- }
-
- if (!hostdata->dstat_valid) {
- hostdata->dstat = NCR53c7x0_read8(DSTAT_REG);
- hostdata->dstat_valid = 1;
- }
-
- if (!(hostdata->dstat & DSTAT_DFE)) {
- printk ("scsi%d : DMA FIFO not empty\n", host->host_no);
- /* Really need to check this out for 710 RGH */
- NCR53c7x0_write8 (CTEST8_REG, CTEST8_10_CLF);
- while (NCR53c7x0_read8 (CTEST8_REG) & CTEST8_10_CLF)
- ;
- hostdata->dstat |= DSTAT_DFE;
- }
-
- if (!hostdata->idle && hostdata->state == STATE_HALTED) {
- if (!hostdata->dsp_changed)
- hostdata->dsp = (u32 *)bus_to_virt(NCR53c7x0_read32(DSP_REG));
-#if 0
- printk("scsi%d : new dsp is 0x%lx (virt 0x%p)\n",
- host->host_no, virt_to_bus(hostdata->dsp), hostdata->dsp);
-#endif
-
- hostdata->state = STATE_RUNNING;
- NCR53c7x0_write32 (DSP_REG, virt_to_bus(hostdata->dsp));
- if (hostdata->options & OPTION_DEBUG_TRACE) {
-#ifdef CYCLIC_TRACE
- log_insn (hostdata->dsp);
-#else
- print_insn (host, hostdata->dsp, "t ", 1);
-#endif
- NCR53c7x0_write8 (DCNTL_REG,
- hostdata->saved_dcntl | DCNTL_SSM | DCNTL_STD);
- }
- }
- }
- return IRQ_HANDLED;
-}
-
-
-/*
- * Function : static int abort_connected (struct Scsi_Host *host)
- *
- * Purpose : Assuming that the NCR SCSI processor is currently
- * halted, break the currently established nexus. Clean
- * up of the NCR53c7x0_cmd and Scsi_Cmnd structures should
- * be done on receipt of the abort interrupt.
- *
- * Inputs : host - SCSI host
- *
- */
-
-static int
-abort_connected (struct Scsi_Host *host) {
-#ifdef NEW_ABORT
- NCR53c7x0_local_declare();
-#endif
- struct NCR53c7x0_hostdata *hostdata = (struct NCR53c7x0_hostdata *)
- host->hostdata[0];
-/* FIXME : this probably should change for production kernels; at the
- least, counter should move to a per-host structure. */
- static int counter = 5;
-#ifdef NEW_ABORT
- int sstat, phase, offset;
- u32 *script;
- NCR53c7x0_local_setup(host);
-#endif
-
- if (--counter <= 0) {
- disable(host);
- return 0;
- }
-
- printk ("scsi%d : DANGER : abort_connected() called \n",
- host->host_no);
-
-#ifdef NEW_ABORT
-
-/*
- * New strategy : Rather than using a generic abort routine,
- * we'll specifically try to source or sink the appropriate
- * amount of data for the phase we're currently in (taking into
- * account the current synchronous offset)
- */
-
- sstat = NCR53c8x0_read8 (SSTAT2_REG);
- offset = OFFSET (sstat & SSTAT2_FF_MASK) >> SSTAT2_FF_SHIFT;
- phase = sstat & SSTAT2_PHASE_MASK;
-
-/*
- * SET ATN
- * MOVE source_or_sink, WHEN CURRENT PHASE
- * < repeat for each outstanding byte >
- * JUMP send_abort_message
- */
-
- script = hostdata->abort_script = kmalloc (
- 8 /* instruction size */ * (
- 1 /* set ATN */ +
- (!offset ? 1 : offset) /* One transfer per outstanding byte */ +
- 1 /* send abort message */),
- GFP_ATOMIC);
-
-
-#else /* def NEW_ABORT */
- hostdata->dsp = hostdata->script + hostdata->E_initiator_abort /
- sizeof(u32);
-#endif /* def NEW_ABORT */
- hostdata->dsp_changed = 1;
-
-/* XXX - need to flag the command as aborted after the abort_connected
- code runs
- */
- return 0;
-}
-
-/*
- * Function : static int datapath_residual (Scsi_Host *host)
- *
- * Purpose : return residual data count of what's in the chip.
- *
- * Inputs : host - SCSI host
- */
-
-static int
-datapath_residual (struct Scsi_Host *host) {
- NCR53c7x0_local_declare();
- int count, synchronous, sstat;
- unsigned int ddir;
-
- NCR53c7x0_local_setup(host);
- /* COMPAT : the 700 and 700-66 need to use DFIFO_00_BO_MASK */
- count = ((NCR53c7x0_read8 (DFIFO_REG) & DFIFO_10_BO_MASK) -
- (NCR53c7x0_read32 (DBC_REG) & DFIFO_10_BO_MASK)) & DFIFO_10_BO_MASK;
- synchronous = NCR53c7x0_read8 (SXFER_REG) & SXFER_MO_MASK;
- /* COMPAT : DDIR is elsewhere on non-'8xx chips. */
- ddir = NCR53c7x0_read8 (CTEST0_REG_700) & CTEST0_700_DDIR;
-
- if (ddir) {
- /* Receive */
- if (synchronous)
- count += (NCR53c7x0_read8 (SSTAT2_REG) & SSTAT2_FF_MASK) >> SSTAT2_FF_SHIFT;
- else
- if (NCR53c7x0_read8 (SSTAT1_REG) & SSTAT1_ILF)
- ++count;
- } else {
- /* Send */
- sstat = NCR53c7x0_read8 (SSTAT1_REG);
- if (sstat & SSTAT1_OLF)
- ++count;
- if (synchronous && (sstat & SSTAT1_ORF))
- ++count;
- }
- return count;
-}
-
-/*
- * Function : static const char * sbcl_to_phase (int sbcl)_
- *
- * Purpose : Convert SBCL register to user-parsable phase representation
- *
- * Inputs : sbcl - value of sbcl register
- */
-
-
-static const char *
-sbcl_to_phase (int sbcl) {
- switch (sbcl & SBCL_PHASE_MASK) {
- case SBCL_PHASE_DATAIN:
- return "DATAIN";
- case SBCL_PHASE_DATAOUT:
- return "DATAOUT";
- case SBCL_PHASE_MSGIN:
- return "MSGIN";
- case SBCL_PHASE_MSGOUT:
- return "MSGOUT";
- case SBCL_PHASE_CMDOUT:
- return "CMDOUT";
- case SBCL_PHASE_STATIN:
- return "STATUSIN";
- default:
- return "unknown";
- }
-}
-
-/*
- * Function : static const char * sstat2_to_phase (int sstat)_
- *
- * Purpose : Convert SSTAT2 register to user-parsable phase representation
- *
- * Inputs : sstat - value of sstat register
- */
-
-
-static const char *
-sstat2_to_phase (int sstat) {
- switch (sstat & SSTAT2_PHASE_MASK) {
- case SSTAT2_PHASE_DATAIN:
- return "DATAIN";
- case SSTAT2_PHASE_DATAOUT:
- return "DATAOUT";
- case SSTAT2_PHASE_MSGIN:
- return "MSGIN";
- case SSTAT2_PHASE_MSGOUT:
- return "MSGOUT";
- case SSTAT2_PHASE_CMDOUT:
- return "CMDOUT";
- case SSTAT2_PHASE_STATIN:
- return "STATUSIN";
- default:
- return "unknown";
- }
-}
-
-/*
- * Function : static void intr_phase_mismatch (struct Scsi_Host *host,
- * struct NCR53c7x0_cmd *cmd)
- *
- * Purpose : Handle phase mismatch interrupts
- *
- * Inputs : host, cmd - host and NCR command causing the interrupt, cmd
- * may be NULL.
- *
- * Side effects : The abort_connected() routine is called or the NCR chip
- * is restarted, jumping to the command_complete entry point, or
- * patching the address and transfer count of the current instruction
- * and calling the msg_in entry point as appropriate.
- */
-
-static void
-intr_phase_mismatch (struct Scsi_Host *host, struct NCR53c7x0_cmd *cmd) {
- NCR53c7x0_local_declare();
- u32 dbc_dcmd, *dsp, *dsp_next;
- unsigned char dcmd, sbcl;
- struct NCR53c7x0_hostdata *hostdata = (struct NCR53c7x0_hostdata *)
- host->hostdata[0];
- int residual;
- enum {ACTION_ABORT, ACTION_ABORT_PRINT, ACTION_CONTINUE} action =
- ACTION_ABORT_PRINT;
- const char *where = NULL;
-
- NCR53c7x0_local_setup(host);
-
- /*
- * Corrective action is based on where in the SCSI SCRIPT(tm) the error
- * occurred, as well as which SCSI phase we are currently in.
- */
- dsp_next = bus_to_virt(NCR53c7x0_read32(DSP_REG));
-
- /*
- * Fetch the current instruction, and remove the operands for easier
- * interpretation.
- */
- dbc_dcmd = NCR53c7x0_read32(DBC_REG);
- dcmd = (dbc_dcmd & 0xff000000) >> 24;
- /*
- * Like other processors, the NCR adjusts the instruction pointer before
- * instruction decode. Set the DSP address back to what it should
- * be for this instruction based on its size (2 or 3 32 bit words).
- */
- dsp = dsp_next - NCR53c7x0_insn_size(dcmd);
-
-
- /*
- * Read new SCSI phase from the SBCL lines. Since all of our code uses
- * a WHEN conditional instead of an IF conditional, we don't need to
- * wait for a new REQ.
- */
- sbcl = NCR53c7x0_read8(SBCL_REG) & SBCL_PHASE_MASK;
-
- if (!cmd) {
- action = ACTION_ABORT_PRINT;
- where = "no current command";
- /*
- * The way my SCSI SCRIPTS(tm) are architected, recoverable phase
- * mismatches should only occur where we're doing a multi-byte
- * BMI instruction. Specifically, this means
- *
- * - select messages (a SCSI-I target may ignore additional messages
- * after the IDENTIFY; any target may reject a SDTR or WDTR)
- *
- * - command out (targets may send a message to signal an error
- * condition, or go into STATUSIN after they've decided
- * they don't like the command.
- *
- * - reply_message (targets may reject a multi-byte message in the
- * middle)
- *
- * - data transfer routines (command completion with buffer space
- * left, disconnect message, or error message)
- */
- } else if (((dsp >= cmd->data_transfer_start &&
- dsp < cmd->data_transfer_end)) || dsp == (cmd->residual + 2)) {
- if ((dcmd & (DCMD_TYPE_MASK|DCMD_BMI_OP_MASK|DCMD_BMI_INDIRECT|
- DCMD_BMI_MSG|DCMD_BMI_CD)) == (DCMD_TYPE_BMI|
- DCMD_BMI_OP_MOVE_I)) {
- residual = datapath_residual (host);
- if (hostdata->options & OPTION_DEBUG_DISCONNECT)
- printk ("scsi%d : handling residual transfer (+ %d bytes from DMA FIFO)\n",
- host->host_no, residual);
-
- /*
- * The first instruction is a CALL to the alternate handler for
- * this data transfer phase, so we can do calls to
- * munge_msg_restart as we would if control were passed
- * from normal dynamic code.
- */
- if (dsp != cmd->residual + 2) {
- cmd->residual[0] = ((DCMD_TYPE_TCI | DCMD_TCI_OP_CALL |
- ((dcmd & DCMD_BMI_IO) ? DCMD_TCI_IO : 0)) << 24) |
- DBC_TCI_WAIT_FOR_VALID | DBC_TCI_COMPARE_PHASE;
- cmd->residual[1] = virt_to_bus(hostdata->script)
- + ((dcmd & DCMD_BMI_IO)
- ? hostdata->E_other_in : hostdata->E_other_out);
- }
-
- /*
- * The second instruction is the a data transfer block
- * move instruction, reflecting the pointer and count at the
- * time of the phase mismatch.
- */
- cmd->residual[2] = dbc_dcmd + residual;
- cmd->residual[3] = NCR53c7x0_read32(DNAD_REG) - residual;
-
- /*
- * The third and final instruction is a jump to the instruction
- * which follows the instruction which had to be 'split'
- */
- if (dsp != cmd->residual + 2) {
- cmd->residual[4] = ((DCMD_TYPE_TCI|DCMD_TCI_OP_JUMP)
- << 24) | DBC_TCI_TRUE;
- cmd->residual[5] = virt_to_bus(dsp_next);
- }
-
- /*
- * For the sake of simplicity, transfer control to the
- * conditional CALL at the start of the residual buffer.
- */
- hostdata->dsp = cmd->residual;
- hostdata->dsp_changed = 1;
- action = ACTION_CONTINUE;
- } else {
- where = "non-BMI dynamic DSA code";
- action = ACTION_ABORT_PRINT;
- }
- } else if (dsp == (hostdata->script + hostdata->E_select_msgout / 4 + 2)) {
- /* RGH 290697: Added +2 above, to compensate for the script
- * instruction which disables the selection timer. */
- /* Release ATN */
- NCR53c7x0_write8 (SOCL_REG, 0);
- switch (sbcl) {
- /*
- * Some devices (SQ555 come to mind) grab the IDENTIFY message
- * sent on selection, and decide to go into COMMAND OUT phase
- * rather than accepting the rest of the messages or rejecting
- * them. Handle these devices gracefully.
- */
- case SBCL_PHASE_CMDOUT:
- hostdata->dsp = dsp + 2 /* two _words_ */;
- hostdata->dsp_changed = 1;
- printk ("scsi%d : target %d ignored SDTR and went into COMMAND OUT\n",
- host->host_no, cmd->cmd->device->id);
- cmd->flags &= ~CMD_FLAG_SDTR;
- action = ACTION_CONTINUE;
- break;
- case SBCL_PHASE_MSGIN:
- hostdata->dsp = hostdata->script + hostdata->E_msg_in /
- sizeof(u32);
- hostdata->dsp_changed = 1;
- action = ACTION_CONTINUE;
- break;
- default:
- where="select message out";
- action = ACTION_ABORT_PRINT;
- }
- /*
- * Some SCSI devices will interpret a command as they read the bytes
- * off the SCSI bus, and may decide that the command is Bogus before
- * they've read the entire command off the bus.
- */
- } else if (dsp == hostdata->script + hostdata->E_cmdout_cmdout / sizeof
- (u32)) {
- hostdata->dsp = hostdata->script + hostdata->E_data_transfer /
- sizeof (u32);
- hostdata->dsp_changed = 1;
- action = ACTION_CONTINUE;
- /* FIXME : we need to handle message reject, etc. within msg_respond. */
-#ifdef notyet
- } else if (dsp == hostdata->script + hostdata->E_reply_message) {
- switch (sbcl) {
- /* Any other phase mismatches abort the currently executing command. */
-#endif
- } else {
- where = "unknown location";
- action = ACTION_ABORT_PRINT;
- }
-
- /* Flush DMA FIFO */
- if (!hostdata->dstat_valid) {
- hostdata->dstat = NCR53c7x0_read8(DSTAT_REG);
- hostdata->dstat_valid = 1;
- }
- if (!(hostdata->dstat & DSTAT_DFE)) {
- /* Really need to check this out for 710 RGH */
- NCR53c7x0_write8 (CTEST8_REG, CTEST8_10_CLF);
- while (NCR53c7x0_read8 (CTEST8_REG) & CTEST8_10_CLF);
- hostdata->dstat |= DSTAT_DFE;
- }
-
- switch (action) {
- case ACTION_ABORT_PRINT:
- printk("scsi%d : %s : unexpected phase %s.\n",
- host->host_no, where ? where : "unknown location",
- sbcl_to_phase(sbcl));
- print_lots (host);
- /* Fall through to ACTION_ABORT */
- case ACTION_ABORT:
- abort_connected (host);
- break;
- case ACTION_CONTINUE:
- break;
- }
-
-#if 0
- if (hostdata->dsp_changed) {
- printk("scsi%d: new dsp 0x%p\n", host->host_no, hostdata->dsp);
- print_insn (host, hostdata->dsp, "", 1);
- }
-#endif
-}
-
-/*
- * Function : static void intr_bf (struct Scsi_Host *host,
- * struct NCR53c7x0_cmd *cmd)
- *
- * Purpose : handle BUS FAULT interrupts
- *
- * Inputs : host, cmd - host and NCR command causing the interrupt, cmd
- * may be NULL.
- */
-
-static void
-intr_bf (struct Scsi_Host *host, struct NCR53c7x0_cmd *cmd) {
- NCR53c7x0_local_declare();
- u32 *dsp,
- *next_dsp, /* Current dsp */
- *dsa,
- dbc_dcmd; /* DCMD (high eight bits) + DBC */
- char *reason = NULL;
- /* Default behavior is for a silent error, with a retry until we've
- exhausted retries. */
- enum {MAYBE, ALWAYS, NEVER} retry = MAYBE;
- int report = 0;
- NCR53c7x0_local_setup(host);
-
- dbc_dcmd = NCR53c7x0_read32 (DBC_REG);
- next_dsp = bus_to_virt (NCR53c7x0_read32(DSP_REG));
- dsp = next_dsp - NCR53c7x0_insn_size ((dbc_dcmd >> 24) & 0xff);
-/* FIXME - check chip type */
- dsa = bus_to_virt (NCR53c7x0_read32(DSA_REG));
-
- /*
- * Bus faults can be caused by either a Bad Address or
- * Target Abort. We should check the Received Target Abort
- * bit of the PCI status register and Master Abort Bit.
- *
- * - Master Abort bit indicates that no device claimed
- * the address with DEVSEL within five clocks
- *
- * - Target Abort bit indicates that a target claimed it,
- * but changed its mind once it saw the byte enables.
- *
- */
-
- /* 53c710, not PCI system */
- report = 1;
- reason = "Unknown";
-
-#ifndef notyet
- report = 1;
-#endif
- if (report && reason)
- {
- printk(KERN_ALERT "scsi%d : BUS FAULT reason = %s\n",
- host->host_no, reason ? reason : "unknown");
- print_lots (host);
- }
-
-#ifndef notyet
- retry = NEVER;
-#endif
-
- /*
- * TODO : we should attempt to recover from any spurious bus
- * faults. After X retries, we should figure that things are
- * sufficiently wedged, and call NCR53c7xx_reset.
- *
- * This code should only get executed once we've decided that we
- * cannot retry.
- */
-
- if (retry == NEVER) {
- printk(KERN_ALERT " mail richard@sleepie.demon.co.uk\n");
- FATAL (host);
- }
-}
-
-/*
- * Function : static void intr_dma (struct Scsi_Host *host,
- * struct NCR53c7x0_cmd *cmd)
- *
- * Purpose : handle all DMA interrupts, indicated by the setting
- * of the DIP bit in the ISTAT register.
- *
- * Inputs : host, cmd - host and NCR command causing the interrupt, cmd
- * may be NULL.
- */
-
-static void
-intr_dma (struct Scsi_Host *host, struct NCR53c7x0_cmd *cmd) {
- NCR53c7x0_local_declare();
- struct NCR53c7x0_hostdata *hostdata = (struct NCR53c7x0_hostdata *)
- host->hostdata[0];
- unsigned char dstat; /* DSTAT */
- u32 *dsp,
- *next_dsp, /* Current dsp */
- *dsa,
- dbc_dcmd; /* DCMD (high eight bits) + DBC */
- int tmp;
- unsigned long flags;
- NCR53c7x0_local_setup(host);
-
- if (!hostdata->dstat_valid) {
- hostdata->dstat = NCR53c7x0_read8(DSTAT_REG);
- hostdata->dstat_valid = 1;
- }
-
- dstat = hostdata->dstat;
-
- if (hostdata->options & OPTION_DEBUG_INTR)
- printk("scsi%d : DSTAT=0x%x\n", host->host_no, (int) dstat);
-
- dbc_dcmd = NCR53c7x0_read32 (DBC_REG);
- next_dsp = bus_to_virt(NCR53c7x0_read32(DSP_REG));
- dsp = next_dsp - NCR53c7x0_insn_size ((dbc_dcmd >> 24) & 0xff);
-/* XXX - check chip type */
- dsa = bus_to_virt(NCR53c7x0_read32(DSA_REG));
-
- /*
- * DSTAT_ABRT is the aborted interrupt. This is set whenever the
- * SCSI chip is aborted.
- *
- * With NCR53c700 and NCR53c700-66 style chips, we should only
- * get this when the chip is currently running the accept
- * reselect/select code and we have set the abort bit in the
- * ISTAT register.
- *
- */
-
- if (dstat & DSTAT_ABRT) {
-#if 0
- /* XXX - add code here to deal with normal abort */
- if ((hostdata->options & OPTION_700) && (hostdata->state ==
- STATE_ABORTING)) {
- } else
-#endif
- {
- printk(KERN_ALERT "scsi%d : unexpected abort interrupt at\n"
- " ", host->host_no);
- print_insn (host, dsp, KERN_ALERT "s ", 1);
- FATAL (host);
- }
- }
-
- /*
- * DSTAT_SSI is the single step interrupt. Should be generated
- * whenever we have single stepped or are tracing.
- */
-
- if (dstat & DSTAT_SSI) {
- if (hostdata->options & OPTION_DEBUG_TRACE) {
- /* Don't print instr. until we write DSP at end of intr function */
- } else if (hostdata->options & OPTION_DEBUG_SINGLE) {
- print_insn (host, dsp, "s ", 0);
- local_irq_save(flags);
-/* XXX - should we do this, or can we get away with writing dsp? */
-
- NCR53c7x0_write8 (DCNTL_REG, (NCR53c7x0_read8(DCNTL_REG) &
- ~DCNTL_SSM) | DCNTL_STD);
- local_irq_restore(flags);
- } else {
- printk(KERN_ALERT "scsi%d : unexpected single step interrupt at\n"
- " ", host->host_no);
- print_insn (host, dsp, KERN_ALERT "", 1);
- printk(KERN_ALERT " mail drew@PoohSticks.ORG\n");
- FATAL (host);
- }
- }
-
- /*
- * DSTAT_IID / DSTAT_OPC (same bit, same meaning, only the name
- * is different) is generated whenever an illegal instruction is
- * encountered.
- *
- * XXX - we may want to emulate INTFLY here, so we can use
- * the same SCSI SCRIPT (tm) for NCR53c710 through NCR53c810
- * chips.
- */
-
- if (dstat & DSTAT_OPC) {
- /*
- * Ascertain if this IID interrupts occurred before or after a STO
- * interrupt. Since the interrupt handling code now leaves
- * DSP unmodified until _after_ all stacked interrupts have been
- * processed, reading the DSP returns the original DSP register.
- * This means that if dsp lies between the select code, and
- * message out following the selection code (where the IID interrupt
- * would have to have occurred by due to the implicit wait for REQ),
- * we have an IID interrupt resulting from a STO condition and
- * can ignore it.
- */
-
- if (((dsp >= (hostdata->script + hostdata->E_select / sizeof(u32))) &&
- (dsp <= (hostdata->script + hostdata->E_select_msgout /
- sizeof(u32) + 8))) || (hostdata->test_running == 2)) {
- if (hostdata->options & OPTION_DEBUG_INTR)
- printk ("scsi%d : ignoring DSTAT_IID for SSTAT_STO\n",
- host->host_no);
- if (hostdata->expecting_iid) {
- hostdata->expecting_iid = 0;
- hostdata->idle = 1;
- if (hostdata->test_running == 2) {
- hostdata->test_running = 0;
- hostdata->test_completed = 3;
- } else if (cmd)
- abnormal_finished (cmd, DID_BAD_TARGET << 16);
- } else {
- hostdata->expecting_sto = 1;
- }
- /*
- * We can't guarantee we'll be able to execute the WAIT DISCONNECT
- * instruction within the 3.4us of bus free and arbitration delay
- * that a target can RESELECT in and assert REQ after we've dropped
- * ACK. If this happens, we'll get an illegal instruction interrupt.
- * Doing away with the WAIT DISCONNECT instructions broke everything,
- * so instead I'll settle for moving one WAIT DISCONNECT a few
- * instructions closer to the CLEAR ACK before it to minimize the
- * chances of this happening, and handle it if it occurs anyway.
- *
- * Simply continue with what we were doing, and control should
- * be transferred to the schedule routine which will ultimately
- * pass control onto the reselection or selection (not yet)
- * code.
- */
- } else if (dbc_dcmd == 0x48000000 && (NCR53c7x0_read8 (SBCL_REG) &
- SBCL_REQ)) {
- if (!(hostdata->options & OPTION_NO_PRINT_RACE))
- {
- printk("scsi%d: REQ before WAIT DISCONNECT IID\n",
- host->host_no);
- hostdata->options |= OPTION_NO_PRINT_RACE;
- }
- } else {
- printk(KERN_ALERT "scsi%d : invalid instruction\n", host->host_no);
- print_lots (host);
- printk(KERN_ALERT " mail Richard@sleepie.demon.co.uk with ALL\n"
- " boot messages and diagnostic output\n");
- FATAL (host);
- }
- }
-
- /*
- * DSTAT_BF are bus fault errors. DSTAT_800_BF is valid for 710 also.
- */
-
- if (dstat & DSTAT_800_BF) {
- intr_bf (host, cmd);
- }
-
-
- /*
- * DSTAT_SIR interrupts are generated by the execution of
- * the INT instruction. Since the exact values available
- * are determined entirely by the SCSI script running,
- * and are local to a particular script, a unique handler
- * is called for each script.
- */
-
- if (dstat & DSTAT_SIR) {
- if (hostdata->options & OPTION_DEBUG_INTR)
- printk ("scsi%d : DSTAT_SIR\n", host->host_no);
- switch ((tmp = hostdata->dstat_sir_intr (host, cmd))) {
- case SPECIFIC_INT_NOTHING:
- case SPECIFIC_INT_RESTART:
- break;
- case SPECIFIC_INT_ABORT:
- abort_connected(host);
- break;
- case SPECIFIC_INT_PANIC:
- printk(KERN_ALERT "scsi%d : failure at ", host->host_no);
- print_insn (host, dsp, KERN_ALERT "", 1);
- printk(KERN_ALERT " dstat_sir_intr() returned SPECIFIC_INT_PANIC\n");
- FATAL (host);
- break;
- case SPECIFIC_INT_BREAK:
- intr_break (host, cmd);
- break;
- default:
- printk(KERN_ALERT "scsi%d : failure at ", host->host_no);
- print_insn (host, dsp, KERN_ALERT "", 1);
- printk(KERN_ALERT" dstat_sir_intr() returned unknown value %d\n",
- tmp);
- FATAL (host);
- }
- }
-}
-
-/*
- * Function : static int print_insn (struct Scsi_Host *host,
- * u32 *insn, int kernel)
- *
- * Purpose : print numeric representation of the instruction pointed
- * to by insn to the debugging or kernel message buffer
- * as appropriate.
- *
- * If desired, a user level program can interpret this
- * information.
- *
- * Inputs : host, insn - host, pointer to instruction, prefix -
- * string to prepend, kernel - use printk instead of debugging buffer.
- *
- * Returns : size, in u32s, of instruction printed.
- */
-
-/*
- * FIXME: should change kernel parameter so that it takes an ENUM
- * specifying severity - either KERN_ALERT or KERN_PANIC so
- * all panic messages are output with the same severity.
- */
-
-static int
-print_insn (struct Scsi_Host *host, const u32 *insn,
- const char *prefix, int kernel) {
- char buf[160], /* Temporary buffer and pointer. ICKY
- arbitrary length. */
-
-
- *tmp;
- unsigned char dcmd; /* dcmd register for *insn */
- int size;
-
- /*
- * Check to see if the instruction pointer is not bogus before
- * indirecting through it; avoiding red-zone at start of
- * memory.
- *
- * FIXME: icky magic needs to happen here on non-intel boxes which
- * don't have kernel memory mapped in like this. Might be reasonable
- * to use vverify()?
- */
-
- if (virt_to_phys((void *)insn) < PAGE_SIZE ||
- virt_to_phys((void *)(insn + 8)) > virt_to_phys(high_memory) ||
- ((((dcmd = (insn[0] >> 24) & 0xff) & DCMD_TYPE_MMI) == DCMD_TYPE_MMI) &&
- virt_to_phys((void *)(insn + 12)) > virt_to_phys(high_memory))) {
- size = 0;
- sprintf (buf, "%s%p: address out of range\n",
- prefix, insn);
- } else {
-/*
- * FIXME : (void *) cast in virt_to_bus should be unnecessary, because
- * it should take const void * as argument.
- */
-#if !defined(CONFIG_MVME16x) && !defined(CONFIG_BVME6000)
- sprintf(buf, "%s0x%lx (virt 0x%p) : 0x%08x 0x%08x (virt 0x%p)",
- (prefix ? prefix : ""), virt_to_bus((void *) insn), insn,
- insn[0], insn[1], bus_to_virt (insn[1]));
-#else
- /* Remove virtual addresses to reduce output, as they are the same */
- sprintf(buf, "%s0x%x (+%x) : 0x%08x 0x%08x",
- (prefix ? prefix : ""), (u32)insn, ((u32)insn -
- (u32)&(((struct NCR53c7x0_hostdata *)host->hostdata[0])->script))/4,
- insn[0], insn[1]);
-#endif
- tmp = buf + strlen(buf);
- if ((dcmd & DCMD_TYPE_MASK) == DCMD_TYPE_MMI) {
-#if !defined(CONFIG_MVME16x) && !defined(CONFIG_BVME6000)
- sprintf (tmp, " 0x%08x (virt 0x%p)\n", insn[2],
- bus_to_virt(insn[2]));
-#else
- /* Remove virtual addr to reduce output, as it is the same */
- sprintf (tmp, " 0x%08x\n", insn[2]);
-#endif
- size = 3;
- } else {
- sprintf (tmp, "\n");
- size = 2;
- }
- }
-
- if (kernel)
- printk ("%s", buf);
-#ifdef NCR_DEBUG
- else {
- size_t len = strlen(buf);
- debugger_kernel_write(host, buf, len);
- }
-#endif
- return size;
-}
-
-/*
- * Function : int NCR53c7xx_abort (Scsi_Cmnd *cmd)
- *
- * Purpose : Abort an errant SCSI command, doing all necessary
- * cleanup of the issue_queue, running_list, shared Linux/NCR
- * dsa issue and reconnect queues.
- *
- * Inputs : cmd - command to abort, code - entire result field
- *
- * Returns : 0 on success, -1 on failure.
- */
-
-int
-NCR53c7xx_abort (Scsi_Cmnd *cmd) {
- NCR53c7x0_local_declare();
- struct Scsi_Host *host = cmd->device->host;
- struct NCR53c7x0_hostdata *hostdata = host ? (struct NCR53c7x0_hostdata *)
- host->hostdata[0] : NULL;
- unsigned long flags;
- struct NCR53c7x0_cmd *curr, **prev;
- Scsi_Cmnd *me, **last;
-#if 0
- static long cache_pid = -1;
-#endif
-
-
- if (!host) {
- printk ("Bogus SCSI command pid %ld; no host structure\n",
- cmd->pid);
- return SCSI_ABORT_ERROR;
- } else if (!hostdata) {
- printk ("Bogus SCSI host %d; no hostdata\n", host->host_no);
- return SCSI_ABORT_ERROR;
- }
- NCR53c7x0_local_setup(host);
-
-/*
- * CHECK : I don't think that reading ISTAT will unstack any interrupts,
- * since we need to write the INTF bit to clear it, and SCSI/DMA
- * interrupts don't clear until we read SSTAT/SIST and DSTAT registers.
- *
- * See that this is the case. Appears to be correct on the 710, at least.
- *
- * I suspect that several of our failures may be coming from a new fatal
- * interrupt (possibly due to a phase mismatch) happening after we've left
- * the interrupt handler, but before the PIC has had the interrupt condition
- * cleared.
- */
-
- if (NCR53c7x0_read8(hostdata->istat) & (ISTAT_DIP|ISTAT_SIP)) {
- printk ("scsi%d : dropped interrupt for command %ld\n", host->host_no,
- cmd->pid);
- NCR53c7x0_intr (host->irq, NULL, NULL);
- return SCSI_ABORT_BUSY;
- }
-
- local_irq_save(flags);
-#if 0
- if (cache_pid == cmd->pid)
- panic ("scsi%d : bloody fetus %d\n", host->host_no, cmd->pid);
- else
- cache_pid = cmd->pid;
-#endif
-
-
-/*
- * The command could be hiding in the issue_queue. This would be very
- * nice, as commands can't be moved from the high level driver's issue queue
- * into the shared queue until an interrupt routine is serviced, and this
- * moving is atomic.
- *
- * If this is the case, we don't have to worry about anything - we simply
- * pull the command out of the old queue, and call it aborted.
- */
-
- for (me = (Scsi_Cmnd *) hostdata->issue_queue,
- last = (Scsi_Cmnd **) &(hostdata->issue_queue);
- me && me != cmd; last = (Scsi_Cmnd **)&(me->SCp.ptr),
- me = (Scsi_Cmnd *)me->SCp.ptr);
-
- if (me) {
- *last = (Scsi_Cmnd *) me->SCp.ptr;
- if (me->host_scribble) {
- ((struct NCR53c7x0_cmd *)me->host_scribble)->next = hostdata->free;
- hostdata->free = (struct NCR53c7x0_cmd *) me->host_scribble;
- me->host_scribble = NULL;
- }
- cmd->result = DID_ABORT << 16;
- cmd->scsi_done(cmd);
- printk ("scsi%d : found command %ld in Linux issue queue\n",
- host->host_no, me->pid);
- local_irq_restore(flags);
- run_process_issue_queue();
- return SCSI_ABORT_SUCCESS;
- }
-
-/*
- * That failing, the command could be in our list of already executing
- * commands. If this is the case, drastic measures are called for.
- */
-
- for (curr = (struct NCR53c7x0_cmd *) hostdata->running_list,
- prev = (struct NCR53c7x0_cmd **) &(hostdata->running_list);
- curr && curr->cmd != cmd; prev = (struct NCR53c7x0_cmd **)
- &(curr->next), curr = (struct NCR53c7x0_cmd *) curr->next);
-
- if (curr) {
- if ((curr->result & 0xff) != 0xff && (curr->result & 0xff00) != 0xff00) {
- cmd->result = curr->result;
- if (prev)
- *prev = (struct NCR53c7x0_cmd *) curr->next;
- curr->next = (struct NCR53c7x0_cmd *) hostdata->free;
- cmd->host_scribble = NULL;
- hostdata->free = curr;
- cmd->scsi_done(cmd);
- printk ("scsi%d : found finished command %ld in running list\n",
- host->host_no, cmd->pid);
- local_irq_restore(flags);
- return SCSI_ABORT_NOT_RUNNING;
- } else {
- printk ("scsi%d : DANGER : command running, can not abort.\n",
- cmd->device->host->host_no);
- local_irq_restore(flags);
- return SCSI_ABORT_BUSY;
- }
- }
-
-/*
- * And if we couldn't find it in any of our queues, it must have been
- * a dropped interrupt.
- */
-
- curr = (struct NCR53c7x0_cmd *) cmd->host_scribble;
- if (curr) {
- curr->next = hostdata->free;
- hostdata->free = curr;
- cmd->host_scribble = NULL;
- }
-
- if (curr == NULL || ((curr->result & 0xff00) == 0xff00) ||
- ((curr->result & 0xff) == 0xff)) {
- printk ("scsi%d : did this command ever run?\n", host->host_no);
- cmd->result = DID_ABORT << 16;
- } else {
- printk ("scsi%d : probably lost INTFLY, normal completion\n",
- host->host_no);
- cmd->result = curr->result;
-/*
- * FIXME : We need to add an additional flag which indicates if a
- * command was ever counted as BUSY, so if we end up here we can
- * decrement the busy count if and only if it is necessary.
- */
- --hostdata->busy[cmd->device->id][cmd->device->lun];
- }
- local_irq_restore(flags);
- cmd->scsi_done(cmd);
-
-/*
- * We need to run process_issue_queue since termination of this command
- * may allow another queued command to execute first?
- */
- return SCSI_ABORT_NOT_RUNNING;
-}
-
-/*
- * Function : int NCR53c7xx_reset (Scsi_Cmnd *cmd)
- *
- * Purpose : perform a hard reset of the SCSI bus and NCR
- * chip.
- *
- * Inputs : cmd - command which caused the SCSI RESET
- *
- * Returns : 0 on success.
- */
-
-int
-NCR53c7xx_reset (Scsi_Cmnd *cmd, unsigned int reset_flags) {
- NCR53c7x0_local_declare();
- unsigned long flags;
- int found = 0;
- struct NCR53c7x0_cmd * c;
- Scsi_Cmnd *tmp;
- /*
- * When we call scsi_done(), it's going to wake up anything sleeping on the
- * resources which were in use by the aborted commands, and we'll start to
- * get new commands.
- *
- * We can't let this happen until after we've re-initialized the driver
- * structures, and can't reinitialize those structures until after we've
- * dealt with their contents.
- *
- * So, we need to find all of the commands which were running, stick
- * them on a linked list of completed commands (we'll use the host_scribble
- * pointer), do our reinitialization, and then call the done function for
- * each command.
- */
- Scsi_Cmnd *nuke_list = NULL;
- struct Scsi_Host *host = cmd->device->host;
- struct NCR53c7x0_hostdata *hostdata =
- (struct NCR53c7x0_hostdata *) host->hostdata[0];
-
- NCR53c7x0_local_setup(host);
- local_irq_save(flags);
- ncr_halt (host);
- print_lots (host);
- dump_events (host, 30);
- ncr_scsi_reset (host);
- for (tmp = nuke_list = return_outstanding_commands (host, 1 /* free */,
- 0 /* issue */ ); tmp; tmp = (Scsi_Cmnd *) tmp->SCp.buffer)
- if (tmp == cmd) {
- found = 1;
- break;
- }
-
- /*
- * If we didn't find the command which caused this reset in our running
- * list, then we've lost it. See that it terminates normally anyway.
- */
- if (!found) {
- c = (struct NCR53c7x0_cmd *) cmd->host_scribble;
- if (c) {
- cmd->host_scribble = NULL;
- c->next = hostdata->free;
- hostdata->free = c;
- } else
- printk ("scsi%d: lost command %ld\n", host->host_no, cmd->pid);
- cmd->SCp.buffer = (struct scatterlist *) nuke_list;
- nuke_list = cmd;
- }
-
- NCR53c7x0_driver_init (host);
- hostdata->soft_reset (host);
- if (hostdata->resets == 0)
- disable(host);
- else if (hostdata->resets != -1)
- --hostdata->resets;
- local_irq_restore(flags);
- for (; nuke_list; nuke_list = tmp) {
- tmp = (Scsi_Cmnd *) nuke_list->SCp.buffer;
- nuke_list->result = DID_RESET << 16;
- nuke_list->scsi_done (nuke_list);
- }
- local_irq_restore(flags);
- return SCSI_RESET_SUCCESS;
-}
-
-/*
- * The NCR SDMS bios follows Annex A of the SCSI-CAM draft, and
- * therefore shares the scsicam_bios_param function.
- */
-
-/*
- * Function : int insn_to_offset (Scsi_Cmnd *cmd, u32 *insn)
- *
- * Purpose : convert instructions stored at NCR pointer into data
- * pointer offset.
- *
- * Inputs : cmd - SCSI command; insn - pointer to instruction. Either current
- * DSP, or saved data pointer.
- *
- * Returns : offset on success, -1 on failure.
- */
-
-
-static int
-insn_to_offset (Scsi_Cmnd *cmd, u32 *insn) {
- struct NCR53c7x0_hostdata *hostdata =
- (struct NCR53c7x0_hostdata *) cmd->device->host->hostdata[0];
- struct NCR53c7x0_cmd *ncmd =
- (struct NCR53c7x0_cmd *) cmd->host_scribble;
- int offset = 0, buffers;
- struct scatterlist *segment;
- char *ptr;
- int found = 0;
-
-/*
- * With the current code implementation, if the insn is inside dynamically
- * generated code, the data pointer will be the instruction preceding
- * the next transfer segment.
- */
-
- if (!check_address ((unsigned long) ncmd, sizeof (struct NCR53c7x0_cmd)) &&
- ((insn >= ncmd->data_transfer_start &&
- insn < ncmd->data_transfer_end) ||
- (insn >= ncmd->residual &&
- insn < (ncmd->residual +
- sizeof(ncmd->residual))))) {
- ptr = bus_to_virt(insn[3]);
-
- if ((buffers = cmd->use_sg)) {
- for (offset = 0,
- segment = (struct scatterlist *) cmd->request_buffer;
- buffers && !((found = ((ptr >= (char *)page_address(segment->page)+segment->offset) &&
- (ptr < ((char *)page_address(segment->page)+segment->offset+segment->length)))));
- --buffers, offset += segment->length, ++segment)
-#if 0
- printk("scsi%d: comparing 0x%p to 0x%p\n",
- cmd->device->host->host_no, saved, page_address(segment->page+segment->offset));
-#else
- ;
-#endif
- offset += ptr - ((char *)page_address(segment->page)+segment->offset);
- } else {
- found = 1;
- offset = ptr - (char *) (cmd->request_buffer);
- }
- } else if ((insn >= hostdata->script +
- hostdata->E_data_transfer / sizeof(u32)) &&
- (insn <= hostdata->script +
- hostdata->E_end_data_transfer / sizeof(u32))) {
- found = 1;
- offset = 0;
- }
- return found ? offset : -1;
-}
-
-
-
-/*
- * Function : void print_progress (Scsi_Cmnd *cmd)
- *
- * Purpose : print the current location of the saved data pointer
- *
- * Inputs : cmd - command we are interested in
- *
- */
-
-static void
-print_progress (Scsi_Cmnd *cmd) {
- NCR53c7x0_local_declare();
- struct NCR53c7x0_cmd *ncmd =
- (struct NCR53c7x0_cmd *) cmd->host_scribble;
- int offset, i;
- char *where;
- u32 *ptr;
- NCR53c7x0_local_setup (cmd->device->host);
-
- if (check_address ((unsigned long) ncmd,sizeof (struct NCR53c7x0_cmd)) == 0)
- {
- printk("\nNCR53c7x0_cmd fields:\n");
- printk(" bounce.len=0x%x, addr=0x%0x, buf[]=0x%02x %02x %02x %02x\n",
- ncmd->bounce.len, ncmd->bounce.addr, ncmd->bounce.buf[0],
- ncmd->bounce.buf[1], ncmd->bounce.buf[2], ncmd->bounce.buf[3]);
- printk(" result=%04x, cdb[0]=0x%02x\n", ncmd->result, ncmd->cmnd[0]);
- }
-
- for (i = 0; i < 2; ++i) {
- if (check_address ((unsigned long) ncmd,
- sizeof (struct NCR53c7x0_cmd)) == -1)
- continue;
- if (!i) {
- where = "saved";
- ptr = bus_to_virt(ncmd->saved_data_pointer);
- } else {
- where = "active";
- ptr = bus_to_virt (NCR53c7x0_read32 (DSP_REG) -
- NCR53c7x0_insn_size (NCR53c7x0_read8 (DCMD_REG)) *
- sizeof(u32));
- }
- offset = insn_to_offset (cmd, ptr);
-
- if (offset != -1)
- printk ("scsi%d : %s data pointer at offset %d\n",
- cmd->device->host->host_no, where, offset);
- else {
- int size;
- printk ("scsi%d : can't determine %s data pointer offset\n",
- cmd->device->host->host_no, where);
- if (ncmd) {
- size = print_insn (cmd->device->host,
- bus_to_virt(ncmd->saved_data_pointer), "", 1);
- print_insn (cmd->device->host,
- bus_to_virt(ncmd->saved_data_pointer) + size * sizeof(u32),
- "", 1);
- }
- }
- }
-}
-
-
-static void
-print_dsa (struct Scsi_Host *host, u32 *dsa, const char *prefix) {
- struct NCR53c7x0_hostdata *hostdata = (struct NCR53c7x0_hostdata *)
- host->hostdata[0];
- int i, len;
- char *ptr;
- Scsi_Cmnd *cmd;
-
- if (check_address ((unsigned long) dsa, hostdata->dsa_end -
- hostdata->dsa_start) == -1) {
- printk("scsi%d : bad dsa virt 0x%p\n", host->host_no, dsa);
- return;
- }
- printk("%sscsi%d : dsa at phys 0x%lx (virt 0x%p)\n"
- " + %d : dsa_msgout length = %u, data = 0x%x (virt 0x%p)\n" ,
- prefix ? prefix : "",
- host->host_no, virt_to_bus (dsa), dsa, hostdata->dsa_msgout,
- dsa[hostdata->dsa_msgout / sizeof(u32)],
- dsa[hostdata->dsa_msgout / sizeof(u32) + 1],
- bus_to_virt (dsa[hostdata->dsa_msgout / sizeof(u32) + 1]));
-
- /*
- * Only print messages if they're sane in length so we don't
- * blow the kernel printk buffer on something which won't buy us
- * anything.
- */
-
- if (dsa[hostdata->dsa_msgout / sizeof(u32)] <
- sizeof (hostdata->free->select))
- for (i = dsa[hostdata->dsa_msgout / sizeof(u32)],
- ptr = bus_to_virt (dsa[hostdata->dsa_msgout / sizeof(u32) + 1]);
- i > 0 && !check_address ((unsigned long) ptr, 1);
- ptr += len, i -= len) {
- printk(" ");
- len = spi_print_msg(ptr);
- printk("\n");
- if (!len)
- break;
- }
-
- printk(" + %d : select_indirect = 0x%x\n",
- hostdata->dsa_select, dsa[hostdata->dsa_select / sizeof(u32)]);
- cmd = (Scsi_Cmnd *) bus_to_virt(dsa[hostdata->dsa_cmnd / sizeof(u32)]);
- printk(" + %d : dsa_cmnd = 0x%x ", hostdata->dsa_cmnd,
- (u32) virt_to_bus(cmd));
- /* XXX Maybe we should access cmd->host_scribble->result here. RGH */
- if (cmd) {
- printk(" result = 0x%x, target = %d, lun = %d, cmd = ",
- cmd->result, cmd->device->id, cmd->device->lun);
- __scsi_print_command(cmd->cmnd);
- } else
- printk("\n");
- printk(" + %d : dsa_next = 0x%x\n", hostdata->dsa_next,
- dsa[hostdata->dsa_next / sizeof(u32)]);
- if (cmd) {
- printk("scsi%d target %d : sxfer_sanity = 0x%x, scntl3_sanity = 0x%x\n"
- " script : ",
- host->host_no, cmd->device->id,
- hostdata->sync[cmd->device->id].sxfer_sanity,
- hostdata->sync[cmd->device->id].scntl3_sanity);
- for (i = 0; i < (sizeof(hostdata->sync[cmd->device->id].script) / 4); ++i)
- printk ("0x%x ", hostdata->sync[cmd->device->id].script[i]);
- printk ("\n");
- print_progress (cmd);
- }
-}
-/*
- * Function : void print_queues (Scsi_Host *host)
- *
- * Purpose : print the contents of the NCR issue and reconnect queues
- *
- * Inputs : host - SCSI host we are interested in
- *
- */
-
-static void
-print_queues (struct Scsi_Host *host) {
- struct NCR53c7x0_hostdata *hostdata = (struct NCR53c7x0_hostdata *)
- host->hostdata[0];
- u32 *dsa, *next_dsa;
- volatile u32 *ncrcurrent;
- int left;
- Scsi_Cmnd *cmd, *next_cmd;
- unsigned long flags;
-
- printk ("scsi%d : issue queue\n", host->host_no);
-
- for (left = host->can_queue, cmd = (Scsi_Cmnd *) hostdata->issue_queue;
- left >= 0 && cmd;
- cmd = next_cmd) {
- next_cmd = (Scsi_Cmnd *) cmd->SCp.ptr;
- local_irq_save(flags);
- if (cmd->host_scribble) {
- if (check_address ((unsigned long) (cmd->host_scribble),
- sizeof (cmd->host_scribble)) == -1)
- printk ("scsi%d: scsi pid %ld bad pointer to NCR53c7x0_cmd\n",
- host->host_no, cmd->pid);
- /* print_dsa does sanity check on address, no need to check */
- else
- print_dsa (host, ((struct NCR53c7x0_cmd *) cmd->host_scribble)
- -> dsa, "");
- } else
- printk ("scsi%d : scsi pid %ld for target %d lun %d has no NCR53c7x0_cmd\n",
- host->host_no, cmd->pid, cmd->device->id, cmd->device->lun);
- local_irq_restore(flags);
- }
-
- if (left <= 0) {
- printk ("scsi%d : loop detected in issue queue\n",
- host->host_no);
- }
-
- /*
- * Traverse the NCR reconnect and start DSA structures, printing out
- * each element until we hit the end or detect a loop. Currently,
- * the reconnect structure is a linked list; and the start structure
- * is an array. Eventually, the reconnect structure will become a
- * list as well, since this simplifies the code.
- */
-
- printk ("scsi%d : schedule dsa array :\n", host->host_no);
- for (left = host->can_queue, ncrcurrent = hostdata->schedule;
- left > 0; ncrcurrent += 2, --left)
- if (ncrcurrent[0] != hostdata->NOP_insn)
-/* FIXME : convert pointer to dsa_begin to pointer to dsa. */
- print_dsa (host, bus_to_virt (ncrcurrent[1] -
- (hostdata->E_dsa_code_begin -
- hostdata->E_dsa_code_template)), "");
- printk ("scsi%d : end schedule dsa array\n", host->host_no);
-
- printk ("scsi%d : reconnect_dsa_head :\n", host->host_no);
-
- for (left = host->can_queue,
- dsa = bus_to_virt (hostdata->reconnect_dsa_head);
- left >= 0 && dsa;
- dsa = next_dsa) {
- local_irq_save(flags);
- if (check_address ((unsigned long) dsa, sizeof(dsa)) == -1) {
- printk ("scsi%d: bad DSA pointer 0x%p", host->host_no,
- dsa);
- next_dsa = NULL;
- }
- else
- {
- next_dsa = bus_to_virt(dsa[hostdata->dsa_next / sizeof(u32)]);
- print_dsa (host, dsa, "");
- }
- local_irq_restore(flags);
- }
- printk ("scsi%d : end reconnect_dsa_head\n", host->host_no);
- if (left < 0)
- printk("scsi%d: possible loop in ncr reconnect list\n",
- host->host_no);
-}
-
-static void
-print_lots (struct Scsi_Host *host) {
- NCR53c7x0_local_declare();
- struct NCR53c7x0_hostdata *hostdata =
- (struct NCR53c7x0_hostdata *) host->hostdata[0];
- u32 *dsp_next, *dsp, *dsa, dbc_dcmd;
- unsigned char dcmd, sbcl;
- int i, size;
- NCR53c7x0_local_setup(host);
-
- if ((dsp_next = bus_to_virt(NCR53c7x0_read32 (DSP_REG)))) {
- dbc_dcmd = NCR53c7x0_read32(DBC_REG);
- dcmd = (dbc_dcmd & 0xff000000) >> 24;
- dsp = dsp_next - NCR53c7x0_insn_size(dcmd);
- dsa = bus_to_virt(NCR53c7x0_read32(DSA_REG));
- sbcl = NCR53c7x0_read8 (SBCL_REG);
-
- /*
- * For the 53c710, the following will report value 0 for SCNTL3
- * and STEST0 - we don't have these registers.
- */
- printk ("scsi%d : DCMD|DBC=0x%x, DNAD=0x%x (virt 0x%p)\n"
- " DSA=0x%lx (virt 0x%p)\n"
- " DSPS=0x%x, TEMP=0x%x (virt 0x%p), DMODE=0x%x\n"
- " SXFER=0x%x, SCNTL3=0x%x\n"
- " %s%s%sphase=%s, %d bytes in SCSI FIFO\n"
- " SCRATCH=0x%x, saved2_dsa=0x%0lx\n",
- host->host_no, dbc_dcmd, NCR53c7x0_read32(DNAD_REG),
- bus_to_virt(NCR53c7x0_read32(DNAD_REG)),
- virt_to_bus(dsa), dsa,
- NCR53c7x0_read32(DSPS_REG), NCR53c7x0_read32(TEMP_REG),
- bus_to_virt (NCR53c7x0_read32(TEMP_REG)),
- (int) NCR53c7x0_read8(hostdata->dmode),
- (int) NCR53c7x0_read8(SXFER_REG),
- ((hostdata->chip / 100) == 8) ?
- (int) NCR53c7x0_read8(SCNTL3_REG_800) : 0,
- (sbcl & SBCL_BSY) ? "BSY " : "",
- (sbcl & SBCL_SEL) ? "SEL " : "",
- (sbcl & SBCL_REQ) ? "REQ " : "",
- sstat2_to_phase(NCR53c7x0_read8 (((hostdata->chip / 100) == 8) ?
- SSTAT1_REG : SSTAT2_REG)),
- (NCR53c7x0_read8 ((hostdata->chip / 100) == 8 ?
- SSTAT1_REG : SSTAT2_REG) & SSTAT2_FF_MASK) >> SSTAT2_FF_SHIFT,
- ((hostdata->chip / 100) == 8) ? NCR53c7x0_read8 (STEST0_REG_800) :
- NCR53c7x0_read32(SCRATCHA_REG_800),
- hostdata->saved2_dsa);
- printk ("scsi%d : DSP 0x%lx (virt 0x%p) ->\n", host->host_no,
- virt_to_bus(dsp), dsp);
- for (i = 6; i > 0; --i, dsp += size)
- size = print_insn (host, dsp, "", 1);
- if (NCR53c7x0_read8 (SCNTL1_REG) & SCNTL1_CON) {
- if ((hostdata->chip / 100) == 8)
- printk ("scsi%d : connected (SDID=0x%x, SSID=0x%x)\n",
- host->host_no, NCR53c7x0_read8 (SDID_REG_800),
- NCR53c7x0_read8 (SSID_REG_800));
- else
- printk ("scsi%d : connected (SDID=0x%x)\n",
- host->host_no, NCR53c7x0_read8 (SDID_REG_700));
- print_dsa (host, dsa, "");
- }
-
-#if 1
- print_queues (host);
-#endif
- }
-}
-
-/*
- * Function : static int shutdown (struct Scsi_Host *host)
- *
- * Purpose : does a clean (we hope) shutdown of the NCR SCSI
- * chip. Use prior to dumping core, unloading the NCR driver,
- *
- * Returns : 0 on success
- */
-static int
-shutdown (struct Scsi_Host *host) {
- NCR53c7x0_local_declare();
- unsigned long flags;
- struct NCR53c7x0_hostdata *hostdata = (struct NCR53c7x0_hostdata *)
- host->hostdata[0];
- NCR53c7x0_local_setup(host);
- local_irq_save(flags);
-/* Get in a state where we can reset the SCSI bus */
- ncr_halt (host);
- ncr_scsi_reset (host);
- hostdata->soft_reset(host);
-
- disable (host);
- local_irq_restore(flags);
- return 0;
-}
-
-/*
- * Function : void ncr_scsi_reset (struct Scsi_Host *host)
- *
- * Purpose : reset the SCSI bus.
- */
-
-static void
-ncr_scsi_reset (struct Scsi_Host *host) {
- NCR53c7x0_local_declare();
- unsigned long flags;
- NCR53c7x0_local_setup(host);
- local_irq_save(flags);
- NCR53c7x0_write8(SCNTL1_REG, SCNTL1_RST);
- udelay(25); /* Minimum amount of time to assert RST */
- NCR53c7x0_write8(SCNTL1_REG, 0);
- local_irq_restore(flags);
-}
-
-/*
- * Function : void hard_reset (struct Scsi_Host *host)
- *
- */
-
-static void
-hard_reset (struct Scsi_Host *host) {
- struct NCR53c7x0_hostdata *hostdata = (struct NCR53c7x0_hostdata *)
- host->hostdata[0];
- unsigned long flags;
- local_irq_save(flags);
- ncr_scsi_reset(host);
- NCR53c7x0_driver_init (host);
- if (hostdata->soft_reset)
- hostdata->soft_reset (host);
- local_irq_restore(flags);
-}
-
-
-/*
- * Function : Scsi_Cmnd *return_outstanding_commands (struct Scsi_Host *host,
- * int free, int issue)
- *
- * Purpose : return a linked list (using the SCp.buffer field as next,
- * so we don't perturb hostdata. We don't use a field of the
- * NCR53c7x0_cmd structure since we may not have allocated one
- * for the command causing the reset.) of Scsi_Cmnd structures that
- * had propagated below the Linux issue queue level. If free is set,
- * free the NCR53c7x0_cmd structures which are associated with
- * the Scsi_Cmnd structures, and clean up any internal
- * NCR lists that the commands were on. If issue is set,
- * also return commands in the issue queue.
- *
- * Returns : linked list of commands
- *
- * NOTE : the caller should insure that the NCR chip is halted
- * if the free flag is set.
- */
-
-static Scsi_Cmnd *
-return_outstanding_commands (struct Scsi_Host *host, int free, int issue) {
- struct NCR53c7x0_hostdata *hostdata = (struct NCR53c7x0_hostdata *)
- host->hostdata[0];
- struct NCR53c7x0_cmd *c;
- int i;
- u32 *ncrcurrent;
- Scsi_Cmnd *list = NULL, *tmp;
- for (c = (struct NCR53c7x0_cmd *) hostdata->running_list; c;
- c = (struct NCR53c7x0_cmd *) c->next) {
- if (c->cmd->SCp.buffer) {
- printk ("scsi%d : loop detected in running list!\n", host->host_no);
- break;
- } else {
- printk ("Duh? Bad things happening in the NCR driver\n");
- break;
- }
-
- c->cmd->SCp.buffer = (struct scatterlist *) list;
- list = c->cmd;
- if (free) {
- c->next = hostdata->free;
- hostdata->free = c;
- }
- }
-
- if (free) {
- for (i = 0, ncrcurrent = (u32 *) hostdata->schedule;
- i < host->can_queue; ++i, ncrcurrent += 2) {
- ncrcurrent[0] = hostdata->NOP_insn;
- ncrcurrent[1] = 0xdeadbeef;
- }
- hostdata->ncrcurrent = NULL;
- }
-
- if (issue) {
- for (tmp = (Scsi_Cmnd *) hostdata->issue_queue; tmp; tmp = tmp->next) {
- if (tmp->SCp.buffer) {
- printk ("scsi%d : loop detected in issue queue!\n",
- host->host_no);
- break;
- }
- tmp->SCp.buffer = (struct scatterlist *) list;
- list = tmp;
- }
- if (free)
- hostdata->issue_queue = NULL;
-
- }
- return list;
-}
-
-/*
- * Function : static int disable (struct Scsi_Host *host)
- *
- * Purpose : disables the given NCR host, causing all commands
- * to return a driver error. Call this so we can unload the
- * module during development and try again. Eventually,
- * we should be able to find clean workarounds for these
- * problems.
- *
- * Inputs : host - hostadapter to twiddle
- *
- * Returns : 0 on success.
- */
-
-static int
-disable (struct Scsi_Host *host) {
- struct NCR53c7x0_hostdata *hostdata = (struct NCR53c7x0_hostdata *)
- host->hostdata[0];
- unsigned long flags;
- Scsi_Cmnd *nuke_list, *tmp;
- local_irq_save(flags);
- if (hostdata->state != STATE_HALTED)
- ncr_halt (host);
- nuke_list = return_outstanding_commands (host, 1 /* free */, 1 /* issue */);
- hard_reset (host);
- hostdata->state = STATE_DISABLED;
- local_irq_restore(flags);
- printk ("scsi%d : nuking commands\n", host->host_no);
- for (; nuke_list; nuke_list = tmp) {
- tmp = (Scsi_Cmnd *) nuke_list->SCp.buffer;
- nuke_list->result = DID_ERROR << 16;
- nuke_list->scsi_done(nuke_list);
- }
- printk ("scsi%d : done. \n", host->host_no);
- printk (KERN_ALERT "scsi%d : disabled. Unload and reload\n",
- host->host_no);
- return 0;
-}
-
-/*
- * Function : static int ncr_halt (struct Scsi_Host *host)
- *
- * Purpose : halts the SCSI SCRIPTS(tm) processor on the NCR chip
- *
- * Inputs : host - SCSI chip to halt
- *
- * Returns : 0 on success
- */
-
-static int
-ncr_halt (struct Scsi_Host *host) {
- NCR53c7x0_local_declare();
- unsigned long flags;
- unsigned char istat, tmp;
- struct NCR53c7x0_hostdata *hostdata = (struct NCR53c7x0_hostdata *)
- host->hostdata[0];
- int stage;
- NCR53c7x0_local_setup(host);
-
- local_irq_save(flags);
- /* Stage 0 : eat all interrupts
- Stage 1 : set ABORT
- Stage 2 : eat all but abort interrupts
- Stage 3 : eat all interrupts
- */
- for (stage = 0;;) {
- if (stage == 1) {
- NCR53c7x0_write8(hostdata->istat, ISTAT_ABRT);
- ++stage;
- }
- istat = NCR53c7x0_read8 (hostdata->istat);
- if (istat & ISTAT_SIP) {
- tmp = NCR53c7x0_read8(SSTAT0_REG);
- } else if (istat & ISTAT_DIP) {
- tmp = NCR53c7x0_read8(DSTAT_REG);
- if (stage == 2) {
- if (tmp & DSTAT_ABRT) {
- NCR53c7x0_write8(hostdata->istat, 0);
- ++stage;
- } else {
- printk(KERN_ALERT "scsi%d : could not halt NCR chip\n",
- host->host_no);
- disable (host);
- }
- }
- }
- if (!(istat & (ISTAT_SIP|ISTAT_DIP))) {
- if (stage == 0)
- ++stage;
- else if (stage == 3)
- break;
- }
- }
- hostdata->state = STATE_HALTED;
- local_irq_restore(flags);
-#if 0
- print_lots (host);
-#endif
- return 0;
-}
-
-/*
- * Function: event_name (int event)
- *
- * Purpose: map event enum into user-readable strings.
- */
-
-static const char *
-event_name (int event) {
- switch (event) {
- case EVENT_NONE: return "none";
- case EVENT_ISSUE_QUEUE: return "to issue queue";
- case EVENT_START_QUEUE: return "to start queue";
- case EVENT_SELECT: return "selected";
- case EVENT_DISCONNECT: return "disconnected";
- case EVENT_RESELECT: return "reselected";
- case EVENT_COMPLETE: return "completed";
- case EVENT_IDLE: return "idle";
- case EVENT_SELECT_FAILED: return "select failed";
- case EVENT_BEFORE_SELECT: return "before select";
- case EVENT_RESELECT_FAILED: return "reselect failed";
- default: return "unknown";
- }
-}
-
-/*
- * Function : void dump_events (struct Scsi_Host *host, count)
- *
- * Purpose : print last count events which have occurred.
- */
-static void
-dump_events (struct Scsi_Host *host, int count) {
- struct NCR53c7x0_hostdata *hostdata = (struct NCR53c7x0_hostdata *)
- host->hostdata[0];
- struct NCR53c7x0_event event;
- int i;
- unsigned long flags;
- if (hostdata->events) {
- if (count > hostdata->event_size)
- count = hostdata->event_size;
- for (i = hostdata->event_index; count > 0;
- i = (i ? i - 1 : hostdata->event_size -1), --count) {
-/*
- * By copying the event we're currently examining with interrupts
- * disabled, we can do multiple printk(), etc. operations and
- * still be guaranteed that they're happening on the same
- * event structure.
- */
- local_irq_save(flags);
-#if 0
- event = hostdata->events[i];
-#else
- memcpy ((void *) &event, (void *) &(hostdata->events[i]),
- sizeof(event));
-#endif
-
- local_irq_restore(flags);
- printk ("scsi%d : %s event %d at %ld secs %ld usecs target %d lun %d\n",
- host->host_no, event_name (event.event), count,
- (long) event.time.tv_sec, (long) event.time.tv_usec,
- event.target, event.lun);
- if (event.dsa)
- printk (" event for dsa 0x%lx (virt 0x%p)\n",
- virt_to_bus(event.dsa), event.dsa);
- if (event.pid != -1) {
- printk (" event for pid %ld ", event.pid);
- __scsi_print_command (event.cmnd);
- }
- }
- }
-}
-
-/*
- * Function: check_address
- *
- * Purpose: Check to see if a possibly corrupt pointer will fault the
- * kernel.
- *
- * Inputs: addr - address; size - size of area
- *
- * Returns: 0 if area is OK, -1 on error.
- *
- * NOTES: should be implemented in terms of vverify on kernels
- * that have it.
- */
-
-static int
-check_address (unsigned long addr, int size) {
- return (virt_to_phys((void *)addr) < PAGE_SIZE || virt_to_phys((void *)(addr + size)) > virt_to_phys(high_memory) ? -1 : 0);
-}
-
-#ifdef MODULE
-int
-NCR53c7x0_release(struct Scsi_Host *host) {
- struct NCR53c7x0_hostdata *hostdata =
- (struct NCR53c7x0_hostdata *) host->hostdata[0];
- struct NCR53c7x0_cmd *cmd, *tmp;
- shutdown (host);
- if (host->irq != SCSI_IRQ_NONE)
- {
- int irq_count;
- struct Scsi_Host *tmp;
- for (irq_count = 0, tmp = first_host; tmp; tmp = tmp->next)
- if (tmp->hostt == the_template && tmp->irq == host->irq)
- ++irq_count;
- if (irq_count == 1)
- free_irq(host->irq, NULL);
- }
- if (host->dma_channel != DMA_NONE)
- free_dma(host->dma_channel);
- if (host->io_port)
- release_region(host->io_port, host->n_io_port);
-
- for (cmd = (struct NCR53c7x0_cmd *) hostdata->free; cmd; cmd = tmp,
- --hostdata->num_cmds) {
- tmp = (struct NCR53c7x0_cmd *) cmd->next;
- /*
- * If we're going to loop, try to stop it to get a more accurate
- * count of the leaked commands.
- */
- cmd->next = NULL;
- if (cmd->free)
- cmd->free ((void *) cmd->real, cmd->size);
- }
- if (hostdata->num_cmds)
- printk ("scsi%d : leaked %d NCR53c7x0_cmd structures\n",
- host->host_no, hostdata->num_cmds);
-
- vfree(hostdata->events);
-
- /* XXX This assumes default cache mode to be IOMAP_FULL_CACHING, which
- * XXX may be invalid (CONFIG_060_WRITETHROUGH)
- */
- kernel_set_cachemode((void *)hostdata, 8192, IOMAP_FULL_CACHING);
- free_pages ((u32)hostdata, 1);
- return 1;
-}
-#endif /* def MODULE */
diff --git a/drivers/scsi/53c7xx.h b/drivers/scsi/53c7xx.h
deleted file mode 100644
index 218f3b90153..00000000000
--- a/drivers/scsi/53c7xx.h
+++ /dev/null
@@ -1,1608 +0,0 @@
-/*
- * 53c710 driver. Modified from Drew Eckhardts driver
- * for 53c810 by Richard Hirst [richard@sleepie.demon.co.uk]
- *
- * I have left the code for the 53c8xx family in here, because it didn't
- * seem worth removing it. The possibility of IO_MAPPED chips rather
- * than MEMORY_MAPPED remains, in case someone wants to add support for
- * 53c710 chips on Intel PCs (some older machines have them on the
- * motherboard).
- *
- * NOTE THERE MAY BE PROBLEMS WITH CASTS IN read8 AND Co.
- */
-
-/*
- * NCR 53c{7,8}0x0 driver, header file
- *
- * Sponsored by
- * iX Multiuser Multitasking Magazine
- * Hannover, Germany
- * hm@ix.de
- *
- * Copyright 1993, 1994, 1995 Drew Eckhardt
- * Visionary Computing
- * (Unix and Linux consulting and custom programming)
- * drew@PoohSticks.ORG
- * +1 (303) 786-7975
- *
- * TolerANT and SCSI SCRIPTS are registered trademarks of NCR Corporation.
- *
- * PRE-ALPHA
- *
- * For more information, please consult
- *
- * NCR 53C700/53C700-66
- * SCSI I/O Processor
- * Data Manual
- *
- * NCR 53C810
- * PCI-SCSI I/O Processor
- * Data Manual
- *
- * NCR Microelectronics
- * 1635 Aeroplaza Drive
- * Colorado Springs, CO 80916
- * +1 (719) 578-3400
- *
- * Toll free literature number
- * +1 (800) 334-5454
- *
- */
-
-#ifndef NCR53c710_H
-#define NCR53c710_H
-
-#ifndef HOSTS_C
-
-/* SCSI control 0 rw, default = 0xc0 */
-#define SCNTL0_REG 0x00
-#define SCNTL0_ARB1 0x80 /* 0 0 = simple arbitration */
-#define SCNTL0_ARB2 0x40 /* 1 1 = full arbitration */
-#define SCNTL0_STRT 0x20 /* Start Sequence */
-#define SCNTL0_WATN 0x10 /* Select with ATN */
-#define SCNTL0_EPC 0x08 /* Enable parity checking */
-/* Bit 2 is reserved on 800 series chips */
-#define SCNTL0_EPG_700 0x04 /* Enable parity generation */
-#define SCNTL0_AAP 0x02 /* ATN/ on parity error */
-#define SCNTL0_TRG 0x01 /* Target mode */
-
-/* SCSI control 1 rw, default = 0x00 */
-
-#define SCNTL1_REG 0x01
-#define SCNTL1_EXC 0x80 /* Extra Clock Cycle of Data setup */
-#define SCNTL1_ADB 0x40 /* contents of SODL on bus */
-#define SCNTL1_ESR_700 0x20 /* Enable SIOP response to selection
- and reselection */
-#define SCNTL1_DHP_800 0x20 /* Disable halt on parity error or ATN
- target mode only */
-#define SCNTL1_CON 0x10 /* Connected */
-#define SCNTL1_RST 0x08 /* SCSI RST/ */
-#define SCNTL1_AESP 0x04 /* Force bad parity */
-#define SCNTL1_SND_700 0x02 /* Start SCSI send */
-#define SCNTL1_IARB_800 0x02 /* Immediate Arbitration, start
- arbitration immediately after
- busfree is detected */
-#define SCNTL1_RCV_700 0x01 /* Start SCSI receive */
-#define SCNTL1_SST_800 0x01 /* Start SCSI transfer */
-
-/* SCSI control 2 rw, */
-
-#define SCNTL2_REG_800 0x02
-#define SCNTL2_800_SDU 0x80 /* SCSI disconnect unexpected */
-
-/* SCSI control 3 rw */
-
-#define SCNTL3_REG_800 0x03
-#define SCNTL3_800_SCF_SHIFT 4
-#define SCNTL3_800_SCF_MASK 0x70
-#define SCNTL3_800_SCF2 0x40 /* Synchronous divisor */
-#define SCNTL3_800_SCF1 0x20 /* 0x00 = SCLK/3 */
-#define SCNTL3_800_SCF0 0x10 /* 0x10 = SCLK/1 */
- /* 0x20 = SCLK/1.5
- 0x30 = SCLK/2
- 0x40 = SCLK/3 */
-
-#define SCNTL3_800_CCF_SHIFT 0
-#define SCNTL3_800_CCF_MASK 0x07
-#define SCNTL3_800_CCF2 0x04 /* 0x00 50.01 to 66 */
-#define SCNTL3_800_CCF1 0x02 /* 0x01 16.67 to 25 */
-#define SCNTL3_800_CCF0 0x01 /* 0x02 25.01 - 37.5
- 0x03 37.51 - 50
- 0x04 50.01 - 66 */
-
-/*
- * SCSI destination ID rw - the appropriate bit is set for the selected
- * target ID. This is written by the SCSI SCRIPTS processor.
- * default = 0x00
- */
-#define SDID_REG_700 0x02
-#define SDID_REG_800 0x06
-
-#define GP_REG_800 0x07 /* General purpose IO */
-#define GP_800_IO1 0x02
-#define GP_800_IO2 0x01
-
-/* SCSI interrupt enable rw, default = 0x00 */
-#define SIEN_REG_700 0x03
-#define SIEN0_REG_800 0x40
-#define SIEN_MA 0x80 /* Phase mismatch (ini) or ATN (tgt) */
-#define SIEN_FC 0x40 /* Function complete */
-#define SIEN_700_STO 0x20 /* Selection or reselection timeout */
-#define SIEN_800_SEL 0x20 /* Selected */
-#define SIEN_700_SEL 0x10 /* Selected or reselected */
-#define SIEN_800_RESEL 0x10 /* Reselected */
-#define SIEN_SGE 0x08 /* SCSI gross error */
-#define SIEN_UDC 0x04 /* Unexpected disconnect */
-#define SIEN_RST 0x02 /* SCSI RST/ received */
-#define SIEN_PAR 0x01 /* Parity error */
-
-/*
- * SCSI chip ID rw
- * NCR53c700 :
- * When arbitrating, the highest bit is used, when reselection or selection
- * occurs, the chip responds to all IDs for which a bit is set.
- * default = 0x00
- * NCR53c810 :
- * Uses bit mapping
- */
-#define SCID_REG 0x04
-/* Bit 7 is reserved on 800 series chips */
-#define SCID_800_RRE 0x40 /* Enable response to reselection */
-#define SCID_800_SRE 0x20 /* Enable response to selection */
-/* Bits four and three are reserved on 800 series chips */
-#define SCID_800_ENC_MASK 0x07 /* Encoded SCSI ID */
-
-/* SCSI transfer rw, default = 0x00 */
-#define SXFER_REG 0x05
-#define SXFER_DHP 0x80 /* Disable halt on parity */
-
-#define SXFER_TP2 0x40 /* Transfer period msb */
-#define SXFER_TP1 0x20
-#define SXFER_TP0 0x10 /* lsb */
-#define SXFER_TP_MASK 0x70
-/* FIXME : SXFER_TP_SHIFT == 5 is right for '8xx chips */
-#define SXFER_TP_SHIFT 5
-#define SXFER_TP_4 0x00 /* Divisors */
-#define SXFER_TP_5 0x10<<1
-#define SXFER_TP_6 0x20<<1
-#define SXFER_TP_7 0x30<<1
-#define SXFER_TP_8 0x40<<1
-#define SXFER_TP_9 0x50<<1
-#define SXFER_TP_10 0x60<<1
-#define SXFER_TP_11 0x70<<1
-
-#define SXFER_MO3 0x08 /* Max offset msb */
-#define SXFER_MO2 0x04
-#define SXFER_MO1 0x02
-#define SXFER_MO0 0x01 /* lsb */
-#define SXFER_MO_MASK 0x0f
-#define SXFER_MO_SHIFT 0
-
-/*
- * SCSI output data latch rw
- * The contents of this register are driven onto the SCSI bus when
- * the Assert Data Bus bit of the SCNTL1 register is set and
- * the CD, IO, and MSG bits of the SOCL register match the SCSI phase
- */
-#define SODL_REG_700 0x06
-#define SODL_REG_800 0x54
-
-
-/*
- * SCSI output control latch rw, default = 0
- * Note that when the chip is being manually programmed as an initiator,
- * the MSG, CD, and IO bits must be set correctly for the phase the target
- * is driving the bus in. Otherwise no data transfer will occur due to
- * phase mismatch.
- */
-
-#define SOCL_REG 0x07
-#define SOCL_REQ 0x80 /* REQ */
-#define SOCL_ACK 0x40 /* ACK */
-#define SOCL_BSY 0x20 /* BSY */
-#define SOCL_SEL 0x10 /* SEL */
-#define SOCL_ATN 0x08 /* ATN */
-#define SOCL_MSG 0x04 /* MSG */
-#define SOCL_CD 0x02 /* C/D */
-#define SOCL_IO 0x01 /* I/O */
-
-/*
- * SCSI first byte received latch ro
- * This register contains the first byte received during a block MOVE
- * SCSI SCRIPTS instruction, including
- *
- * Initiator mode Target mode
- * Message in Command
- * Status Message out
- * Data in Data out
- *
- * It also contains the selecting or reselecting device's ID and our
- * ID.
- *
- * Note that this is the register the various IF conditionals can
- * operate on.
- */
-#define SFBR_REG 0x08
-
-/*
- * SCSI input data latch ro
- * In initiator mode, data is latched into this register on the rising
- * edge of REQ/. In target mode, data is latched on the rising edge of
- * ACK/
- */
-#define SIDL_REG_700 0x09
-#define SIDL_REG_800 0x50
-
-/*
- * SCSI bus data lines ro
- * This register reflects the instantaneous status of the SCSI data
- * lines. Note that SCNTL0 must be set to disable parity checking,
- * otherwise reading this register will latch new parity.
- */
-#define SBDL_REG_700 0x0a
-#define SBDL_REG_800 0x58
-
-#define SSID_REG_800 0x0a
-#define SSID_800_VAL 0x80 /* Exactly two bits asserted at sel */
-#define SSID_800_ENCID_MASK 0x07 /* Device which performed operation */
-
-
-/*
- * SCSI bus control lines rw,
- * instantaneous readout of control lines
- */
-#define SBCL_REG 0x0b
-#define SBCL_REQ 0x80 /* REQ ro */
-#define SBCL_ACK 0x40 /* ACK ro */
-#define SBCL_BSY 0x20 /* BSY ro */
-#define SBCL_SEL 0x10 /* SEL ro */
-#define SBCL_ATN 0x08 /* ATN ro */
-#define SBCL_MSG 0x04 /* MSG ro */
-#define SBCL_CD 0x02 /* C/D ro */
-#define SBCL_IO 0x01 /* I/O ro */
-#define SBCL_PHASE_CMDOUT SBCL_CD
-#define SBCL_PHASE_DATAIN SBCL_IO
-#define SBCL_PHASE_DATAOUT 0
-#define SBCL_PHASE_MSGIN (SBCL_CD|SBCL_IO|SBCL_MSG)
-#define SBCL_PHASE_MSGOUT (SBCL_CD|SBCL_MSG)
-#define SBCL_PHASE_STATIN (SBCL_CD|SBCL_IO)
-#define SBCL_PHASE_MASK (SBCL_CD|SBCL_IO|SBCL_MSG)
-/*
- * Synchronous SCSI Clock Control bits
- * 0 - set by DCNTL
- * 1 - SCLK / 1.0
- * 2 - SCLK / 1.5
- * 3 - SCLK / 2.0
- */
-#define SBCL_SSCF1 0x02 /* wo, -66 only */
-#define SBCL_SSCF0 0x01 /* wo, -66 only */
-#define SBCL_SSCF_MASK 0x03
-
-/*
- * XXX note : when reading the DSTAT and STAT registers to clear interrupts,
- * insure that 10 clocks elapse between the two
- */
-/* DMA status ro */
-#define DSTAT_REG 0x0c
-#define DSTAT_DFE 0x80 /* DMA FIFO empty */
-#define DSTAT_800_MDPE 0x40 /* Master Data Parity Error */
-#define DSTAT_800_BF 0x20 /* Bus Fault */
-#define DSTAT_ABRT 0x10 /* Aborted - set on error */
-#define DSTAT_SSI 0x08 /* SCRIPTS single step interrupt */
-#define DSTAT_SIR 0x04 /* SCRIPTS interrupt received -
- set when INT instruction is
- executed */
-#define DSTAT_WTD 0x02 /* Watchdog timeout detected */
-#define DSTAT_OPC 0x01 /* Illegal instruction */
-#define DSTAT_800_IID 0x01 /* Same thing, different name */
-
-
-/* NCR53c800 moves this stuff into SIST0 */
-#define SSTAT0_REG 0x0d /* SCSI status 0 ro */
-#define SIST0_REG_800 0x42
-#define SSTAT0_MA 0x80 /* ini : phase mismatch,
- * tgt : ATN/ asserted
- */
-#define SSTAT0_CMP 0x40 /* function complete */
-#define SSTAT0_700_STO 0x20 /* Selection or reselection timeout */
-#define SIST0_800_SEL 0x20 /* Selected */
-#define SSTAT0_700_SEL 0x10 /* Selected or reselected */
-#define SIST0_800_RSL 0x10 /* Reselected */
-#define SSTAT0_SGE 0x08 /* SCSI gross error */
-#define SSTAT0_UDC 0x04 /* Unexpected disconnect */
-#define SSTAT0_RST 0x02 /* SCSI RST/ received */
-#define SSTAT0_PAR 0x01 /* Parity error */
-
-/* And uses SSTAT0 for what was SSTAT1 */
-
-#define SSTAT1_REG 0x0e /* SCSI status 1 ro */
-#define SSTAT1_ILF 0x80 /* SIDL full */
-#define SSTAT1_ORF 0x40 /* SODR full */
-#define SSTAT1_OLF 0x20 /* SODL full */
-#define SSTAT1_AIP 0x10 /* Arbitration in progress */
-#define SSTAT1_LOA 0x08 /* Lost arbitration */
-#define SSTAT1_WOA 0x04 /* Won arbitration */
-#define SSTAT1_RST 0x02 /* Instant readout of RST/ */
-#define SSTAT1_SDP 0x01 /* Instant readout of SDP/ */
-
-#define SSTAT2_REG 0x0f /* SCSI status 2 ro */
-#define SSTAT2_FF3 0x80 /* number of bytes in synchronous */
-#define SSTAT2_FF2 0x40 /* data FIFO */
-#define SSTAT2_FF1 0x20
-#define SSTAT2_FF0 0x10
-#define SSTAT2_FF_MASK 0xf0
-#define SSTAT2_FF_SHIFT 4
-
-/*
- * Latched signals, latched on the leading edge of REQ/ for initiators,
- * ACK/ for targets.
- */
-#define SSTAT2_SDP 0x08 /* SDP */
-#define SSTAT2_MSG 0x04 /* MSG */
-#define SSTAT2_CD 0x02 /* C/D */
-#define SSTAT2_IO 0x01 /* I/O */
-#define SSTAT2_PHASE_CMDOUT SSTAT2_CD
-#define SSTAT2_PHASE_DATAIN SSTAT2_IO
-#define SSTAT2_PHASE_DATAOUT 0
-#define SSTAT2_PHASE_MSGIN (SSTAT2_CD|SSTAT2_IO|SSTAT2_MSG)
-#define SSTAT2_PHASE_MSGOUT (SSTAT2_CD|SSTAT2_MSG)
-#define SSTAT2_PHASE_STATIN (SSTAT2_CD|SSTAT2_IO)
-#define SSTAT2_PHASE_MASK (SSTAT2_CD|SSTAT2_IO|SSTAT2_MSG)
-
-
-/* NCR53c700-66 only */
-#define SCRATCHA_REG_00 0x10 /* through 0x13 Scratch A rw */
-/* NCR53c710 and higher */
-#define DSA_REG 0x10 /* DATA structure address */
-
-#define CTEST0_REG_700 0x14 /* Chip test 0 ro */
-#define CTEST0_REG_800 0x18 /* Chip test 0 rw, general purpose */
-/* 0x80 - 0x04 are reserved */
-#define CTEST0_700_RTRG 0x02 /* Real target mode */
-#define CTEST0_700_DDIR 0x01 /* Data direction, 1 =
- * SCSI bus to host, 0 =
- * host to SCSI.
- */
-
-#define CTEST1_REG_700 0x15 /* Chip test 1 ro */
-#define CTEST1_REG_800 0x19 /* Chip test 1 ro */
-#define CTEST1_FMT3 0x80 /* Identify which byte lanes are empty */
-#define CTEST1_FMT2 0x40 /* in the DMA FIFO */
-#define CTEST1_FMT1 0x20
-#define CTEST1_FMT0 0x10
-
-#define CTEST1_FFL3 0x08 /* Identify which bytes lanes are full */
-#define CTEST1_FFL2 0x04 /* in the DMA FIFO */
-#define CTEST1_FFL1 0x02
-#define CTEST1_FFL0 0x01
-
-#define CTEST2_REG_700 0x16 /* Chip test 2 ro */
-#define CTEST2_REG_800 0x1a /* Chip test 2 ro */
-
-#define CTEST2_800_DDIR 0x80 /* 1 = SCSI->host */
-#define CTEST2_800_SIGP 0x40 /* A copy of SIGP in ISTAT.
- Reading this register clears */
-#define CTEST2_800_CIO 0x20 /* Configured as IO */.
-#define CTEST2_800_CM 0x10 /* Configured as memory */
-
-/* 0x80 - 0x40 are reserved on 700 series chips */
-#define CTEST2_700_SOFF 0x20 /* SCSI Offset Compare,
- * As an initiator, this bit is
- * one when the synchronous offset
- * is zero, as a target this bit
- * is one when the synchronous
- * offset is at the maximum
- * defined in SXFER
- */
-#define CTEST2_700_SFP 0x10 /* SCSI FIFO parity bit,
- * reading CTEST3 unloads a byte
- * from the FIFO and sets this
- */
-#define CTEST2_700_DFP 0x08 /* DMA FIFO parity bit,
- * reading CTEST6 unloads a byte
- * from the FIFO and sets this
- */
-#define CTEST2_TEOP 0x04 /* SCSI true end of process,
- * indicates a totally finished
- * transfer
- */
-#define CTEST2_DREQ 0x02 /* Data request signal */
-/* 0x01 is reserved on 700 series chips */
-#define CTEST2_800_DACK 0x01
-
-/*
- * Chip test 3 ro
- * Unloads the bottom byte of the eight deep SCSI synchronous FIFO,
- * check SSTAT2 FIFO full bits to determine size. Note that a GROSS
- * error results if a read is attempted on this register. Also note
- * that 16 and 32 bit reads of this register will cause corruption.
- */
-#define CTEST3_REG_700 0x17
-/* Chip test 3 rw */
-#define CTEST3_REG_800 0x1b
-#define CTEST3_800_V3 0x80 /* Chip revision */
-#define CTEST3_800_V2 0x40
-#define CTEST3_800_V1 0x20
-#define CTEST3_800_V0 0x10
-#define CTEST3_800_FLF 0x08 /* Flush DMA FIFO */
-#define CTEST3_800_CLF 0x04 /* Clear DMA FIFO */
-#define CTEST3_800_FM 0x02 /* Fetch mode pin */
-/* bit 0 is reserved on 800 series chips */
-
-#define CTEST4_REG_700 0x18 /* Chip test 4 rw */
-#define CTEST4_REG_800 0x21 /* Chip test 4 rw */
-/* 0x80 is reserved on 700 series chips */
-#define CTEST4_800_BDIS 0x80 /* Burst mode disable */
-#define CTEST4_ZMOD 0x40 /* High impedance mode */
-#define CTEST4_SZM 0x20 /* SCSI bus high impedance */
-#define CTEST4_700_SLBE 0x10 /* SCSI loopback enabled */
-#define CTEST4_800_SRTM 0x10 /* Shadow Register Test Mode */
-#define CTEST4_700_SFWR 0x08 /* SCSI FIFO write enable,
- * redirects writes from SODL
- * to the SCSI FIFO.
- */
-#define CTEST4_800_MPEE 0x08 /* Enable parity checking
- during master cycles on PCI
- bus */
-
-/*
- * These bits send the contents of the CTEST6 register to the appropriate
- * byte lane of the 32 bit DMA FIFO. Normal operation is zero, otherwise
- * the high bit means the low two bits select the byte lane.
- */
-#define CTEST4_FBL2 0x04
-#define CTEST4_FBL1 0x02
-#define CTEST4_FBL0 0x01
-#define CTEST4_FBL_MASK 0x07
-#define CTEST4_FBL_0 0x04 /* Select DMA FIFO byte lane 0 */
-#define CTEST4_FBL_1 0x05 /* Select DMA FIFO byte lane 1 */
-#define CTEST4_FBL_2 0x06 /* Select DMA FIFO byte lane 2 */
-#define CTEST4_FBL_3 0x07 /* Select DMA FIFO byte lane 3 */
-#define CTEST4_800_SAVE (CTEST4_800_BDIS)
-
-
-#define CTEST5_REG_700 0x19 /* Chip test 5 rw */
-#define CTEST5_REG_800 0x22 /* Chip test 5 rw */
-/*
- * Clock Address Incrementor. When set, it increments the
- * DNAD register to the next bus size boundary. It automatically
- * resets itself when the operation is complete.
- */
-#define CTEST5_ADCK 0x80
-/*
- * Clock Byte Counter. When set, it decrements the DBC register to
- * the next bus size boundary.
- */
-#define CTEST5_BBCK 0x40
-/*
- * Reset SCSI Offset. Setting this bit to 1 clears the current offset
- * pointer in the SCSI synchronous offset counter (SSTAT). This bit
- * is set to 1 if a SCSI Gross Error Condition occurs. The offset should
- * be cleared when a synchronous transfer fails. When written, it is
- * automatically cleared after the SCSI synchronous offset counter is
- * reset.
- */
-/* Bit 5 is reserved on 800 series chips */
-#define CTEST5_700_ROFF 0x20
-/*
- * Master Control for Set or Reset pulses. When 1, causes the low
- * four bits of register to set when set, 0 causes the low bits to
- * clear when set.
- */
-#define CTEST5_MASR 0x10
-#define CTEST5_DDIR 0x08 /* DMA direction */
-/*
- * Bits 2-0 are reserved on 800 series chips
- */
-#define CTEST5_700_EOP 0x04 /* End of process */
-#define CTEST5_700_DREQ 0x02 /* Data request */
-#define CTEST5_700_DACK 0x01 /* Data acknowledge */
-
-/*
- * Chip test 6 rw - writing to this register writes to the byte
- * lane in the DMA FIFO as determined by the FBL bits in the CTEST4
- * register.
- */
-#define CTEST6_REG_700 0x1a
-#define CTEST6_REG_800 0x23
-
-#define CTEST7_REG 0x1b /* Chip test 7 rw */
-/* 0x80 - 0x40 are reserved on NCR53c700 and NCR53c700-66 chips */
-#define CTEST7_10_CDIS 0x80 /* Cache burst disable */
-#define CTEST7_10_SC1 0x40 /* Snoop control bits */
-#define CTEST7_10_SC0 0x20
-#define CTEST7_10_SC_MASK 0x60
-/* 0x20 is reserved on the NCR53c700 */
-#define CTEST7_0060_FM 0x20 /* Fetch mode */
-#define CTEST7_STD 0x10 /* Selection timeout disable */
-#define CTEST7_DFP 0x08 /* DMA FIFO parity bit for CTEST6 */
-#define CTEST7_EVP 0x04 /* 1 = host bus even parity, 0 = odd */
-#define CTEST7_10_TT1 0x02 /* Transfer type */
-#define CTEST7_00_DC 0x02 /* Set to drive DC low during instruction
- fetch */
-#define CTEST7_DIFF 0x01 /* Differential mode */
-
-#define CTEST7_SAVE ( CTEST7_EVP | CTEST7_DIFF )
-
-
-#define TEMP_REG 0x1c /* through 0x1f Temporary stack rw */
-
-#define DFIFO_REG 0x20 /* DMA FIFO rw */
-/*
- * 0x80 is reserved on the NCR53c710, the CLF and FLF bits have been
- * moved into the CTEST8 register.
- */
-#define DFIFO_00_FLF 0x80 /* Flush DMA FIFO to memory */
-#define DFIFO_00_CLF 0x40 /* Clear DMA and SCSI FIFOs */
-#define DFIFO_BO6 0x40
-#define DFIFO_BO5 0x20
-#define DFIFO_BO4 0x10
-#define DFIFO_BO3 0x08
-#define DFIFO_BO2 0x04
-#define DFIFO_BO1 0x02
-#define DFIFO_BO0 0x01
-#define DFIFO_10_BO_MASK 0x7f /* 7 bit counter */
-#define DFIFO_00_BO_MASK 0x3f /* 6 bit counter */
-
-/*
- * Interrupt status rw
- * Note that this is the only register which can be read while SCSI
- * SCRIPTS are being executed.
- */
-#define ISTAT_REG_700 0x21
-#define ISTAT_REG_800 0x14
-#define ISTAT_ABRT 0x80 /* Software abort, write
- *1 to abort, wait for interrupt. */
-/* 0x40 and 0x20 are reserved on NCR53c700 and NCR53c700-66 chips */
-#define ISTAT_10_SRST 0x40 /* software reset */
-#define ISTAT_10_SIGP 0x20 /* signal script */
-/* 0x10 is reserved on NCR53c700 series chips */
-#define ISTAT_800_SEM 0x10 /* semaphore */
-#define ISTAT_CON 0x08 /* 1 when connected */
-#define ISTAT_800_INTF 0x04 /* Interrupt on the fly */
-#define ISTAT_700_PRE 0x04 /* Pointer register empty.
- * Set to 1 when DSPS and DSP
- * registers are empty in pipeline
- * mode, always set otherwise.
- */
-#define ISTAT_SIP 0x02 /* SCSI interrupt pending from
- * SCSI portion of SIOP see
- * SSTAT0
- */
-#define ISTAT_DIP 0x01 /* DMA interrupt pending
- * see DSTAT
- */
-
-/* NCR53c700-66 and NCR53c710 only */
-#define CTEST8_REG 0x22 /* Chip test 8 rw */
-#define CTEST8_0066_EAS 0x80 /* Enable alternate SCSI clock,
- * ie read from SCLK/ rather than CLK/
- */
-#define CTEST8_0066_EFM 0x40 /* Enable fetch and master outputs */
-#define CTEST8_0066_GRP 0x20 /* Generate Receive Parity for
- * pass through. This insures that
- * bad parity won't reach the host
- * bus.
- */
-#define CTEST8_0066_TE 0x10 /* TolerANT enable. Enable
- * active negation, should only
- * be used for slow SCSI
- * non-differential.
- */
-#define CTEST8_0066_HSC 0x08 /* Halt SCSI clock */
-#define CTEST8_0066_SRA 0x04 /* Shorten REQ/ACK filtering,
- * must be set for fast SCSI-II
- * speeds.
- */
-#define CTEST8_0066_DAS 0x02 /* Disable automatic target/initiator
- * switching.
- */
-#define CTEST8_0066_LDE 0x01 /* Last disconnect enable.
- * The status of pending
- * disconnect is maintained by
- * the core, eliminating
- * the possibility of missing a
- * selection or reselection
- * while waiting to fetch a
- * WAIT DISCONNECT opcode.
- */
-
-#define CTEST8_10_V3 0x80 /* Chip revision */
-#define CTEST8_10_V2 0x40
-#define CTEST8_10_V1 0x20
-#define CTEST8_10_V0 0x10
-#define CTEST8_10_V_MASK 0xf0
-#define CTEST8_10_FLF 0x08 /* Flush FIFOs */
-#define CTEST8_10_CLF 0x04 /* Clear FIFOs */
-#define CTEST8_10_FM 0x02 /* Fetch pin mode */
-#define CTEST8_10_SM 0x01 /* Snoop pin mode */
-
-
-/*
- * The CTEST9 register may be used to differentiate between a
- * NCR53c700 and a NCR53c710.
- *
- * Write 0xff to this register.
- * Read it.
- * If the contents are 0xff, it is a NCR53c700
- * If the contents are 0x00, it is a NCR53c700-66 first revision
- * If the contents are some other value, it is some other NCR53c700-66
- */
-#define CTEST9_REG_00 0x23 /* Chip test 9 ro */
-#define LCRC_REG_10 0x23
-
-/*
- * 0x24 through 0x27 are the DMA byte counter register. Instructions
- * write their high 8 bits into the DCMD register, the low 24 bits into
- * the DBC register.
- *
- * Function is dependent on the command type being executed.
- */
-
-
-#define DBC_REG 0x24
-/*
- * For Block Move Instructions, DBC is a 24 bit quantity representing
- * the number of bytes to transfer.
- * For Transfer Control Instructions, DBC is bit fielded as follows :
- */
-/* Bits 20 - 23 should be clear */
-#define DBC_TCI_TRUE (1 << 19) /* Jump when true */
-#define DBC_TCI_COMPARE_DATA (1 << 18) /* Compare data */
-#define DBC_TCI_COMPARE_PHASE (1 << 17) /* Compare phase with DCMD field */
-#define DBC_TCI_WAIT_FOR_VALID (1 << 16) /* Wait for REQ */
-/* Bits 8 - 15 are reserved on some implementations ? */
-#define DBC_TCI_MASK_MASK 0xff00 /* Mask for data compare */
-#define DBC_TCI_MASK_SHIFT 8
-#define DBC_TCI_DATA_MASK 0xff /* Data to be compared */
-#define DBC_TCI_DATA_SHIFT 0
-
-#define DBC_RWRI_IMMEDIATE_MASK 0xff00 /* Immediate data */
-#define DBC_RWRI_IMMEDIATE_SHIFT 8 /* Amount to shift */
-#define DBC_RWRI_ADDRESS_MASK 0x3f0000 /* Register address */
-#define DBC_RWRI_ADDRESS_SHIFT 16
-
-
-/*
- * DMA command r/w
- */
-#define DCMD_REG 0x27
-#define DCMD_TYPE_MASK 0xc0 /* Masks off type */
-#define DCMD_TYPE_BMI 0x00 /* Indicates a Block Move instruction */
-#define DCMD_BMI_IO 0x01 /* I/O, CD, and MSG bits selecting */
-#define DCMD_BMI_CD 0x02 /* the phase for the block MOVE */
-#define DCMD_BMI_MSG 0x04 /* instruction */
-
-#define DCMD_BMI_OP_MASK 0x18 /* mask for opcode */
-#define DCMD_BMI_OP_MOVE_T 0x00 /* MOVE */
-#define DCMD_BMI_OP_MOVE_I 0x08 /* MOVE Initiator */
-
-#define DCMD_BMI_INDIRECT 0x20 /* Indirect addressing */
-
-#define DCMD_TYPE_TCI 0x80 /* Indicates a Transfer Control
- instruction */
-#define DCMD_TCI_IO 0x01 /* I/O, CD, and MSG bits selecting */
-#define DCMD_TCI_CD 0x02 /* the phase for the block MOVE */
-#define DCMD_TCI_MSG 0x04 /* instruction */
-#define DCMD_TCI_OP_MASK 0x38 /* mask for opcode */
-#define DCMD_TCI_OP_JUMP 0x00 /* JUMP */
-#define DCMD_TCI_OP_CALL 0x08 /* CALL */
-#define DCMD_TCI_OP_RETURN 0x10 /* RETURN */
-#define DCMD_TCI_OP_INT 0x18 /* INT */
-
-#define DCMD_TYPE_RWRI 0x40 /* Indicates I/O or register Read/Write
- instruction */
-#define DCMD_RWRI_OPC_MASK 0x38 /* Opcode mask */
-#define DCMD_RWRI_OPC_WRITE 0x28 /* Write SFBR to register */
-#define DCMD_RWRI_OPC_READ 0x30 /* Read register to SFBR */
-#define DCMD_RWRI_OPC_MODIFY 0x38 /* Modify in place */
-
-#define DCMD_RWRI_OP_MASK 0x07
-#define DCMD_RWRI_OP_MOVE 0x00
-#define DCMD_RWRI_OP_SHL 0x01
-#define DCMD_RWRI_OP_OR 0x02
-#define DCMD_RWRI_OP_XOR 0x03
-#define DCMD_RWRI_OP_AND 0x04
-#define DCMD_RWRI_OP_SHR 0x05
-#define DCMD_RWRI_OP_ADD 0x06
-#define DCMD_RWRI_OP_ADDC 0x07
-
-#define DCMD_TYPE_MMI 0xc0 /* Indicates a Memory Move instruction
- (three words) */
-
-
-#define DNAD_REG 0x28 /* through 0x2b DMA next address for
- data */
-#define DSP_REG 0x2c /* through 0x2f DMA SCRIPTS pointer rw */
-#define DSPS_REG 0x30 /* through 0x33 DMA SCRIPTS pointer
- save rw */
-#define DMODE_REG_00 0x34 /* DMA mode rw */
-#define DMODE_00_BL1 0x80 /* Burst length bits */
-#define DMODE_00_BL0 0x40
-#define DMODE_BL_MASK 0xc0
-/* Burst lengths (800) */
-#define DMODE_BL_2 0x00 /* 2 transfer */
-#define DMODE_BL_4 0x40 /* 4 transfers */
-#define DMODE_BL_8 0x80 /* 8 transfers */
-#define DMODE_BL_16 0xc0 /* 16 transfers */
-
-#define DMODE_10_BL_1 0x00 /* 1 transfer */
-#define DMODE_10_BL_2 0x40 /* 2 transfers */
-#define DMODE_10_BL_4 0x80 /* 4 transfers */
-#define DMODE_10_BL_8 0xc0 /* 8 transfers */
-#define DMODE_10_FC2 0x20 /* Driven to FC2 pin */
-#define DMODE_10_FC1 0x10 /* Driven to FC1 pin */
-#define DMODE_710_PD 0x08 /* Program/data on FC0 pin */
-#define DMODE_710_UO 0x02 /* User prog. output */
-
-#define DMODE_700_BW16 0x20 /* Host buswidth = 16 */
-#define DMODE_700_286 0x10 /* 286 mode */
-#define DMODE_700_IOM 0x08 /* Transfer to IO port */
-#define DMODE_700_FAM 0x04 /* Fixed address mode */
-#define DMODE_700_PIPE 0x02 /* Pipeline mode disables
- * automatic fetch / exec
- */
-#define DMODE_MAN 0x01 /* Manual start mode,
- * requires a 1 to be written
- * to the start DMA bit in the DCNTL
- * register to run scripts
- */
-
-#define DMODE_700_SAVE ( DMODE_00_BL_MASK | DMODE_00_BW16 | DMODE_00_286 )
-
-/* NCR53c800 series only */
-#define SCRATCHA_REG_800 0x34 /* through 0x37 Scratch A rw */
-/* NCR53c710 only */
-#define SCRATCHB_REG_10 0x34 /* through 0x37 scratch B rw */
-
-#define DMODE_REG_10 0x38 /* DMA mode rw, NCR53c710 and newer */
-#define DMODE_800_SIOM 0x20 /* Source IO = 1 */
-#define DMODE_800_DIOM 0x10 /* Destination IO = 1 */
-#define DMODE_800_ERL 0x08 /* Enable Read Line */
-
-/* 35-38 are reserved on 700 and 700-66 series chips */
-#define DIEN_REG 0x39 /* DMA interrupt enable rw */
-/* 0x80, 0x40, and 0x20 are reserved on 700-series chips */
-#define DIEN_800_MDPE 0x40 /* Master data parity error */
-#define DIEN_800_BF 0x20 /* BUS fault */
-#define DIEN_700_BF 0x20 /* BUS fault */
-#define DIEN_ABRT 0x10 /* Enable aborted interrupt */
-#define DIEN_SSI 0x08 /* Enable single step interrupt */
-#define DIEN_SIR 0x04 /* Enable SCRIPTS INT command
- * interrupt
- */
-/* 0x02 is reserved on 800 series chips */
-#define DIEN_700_WTD 0x02 /* Enable watchdog timeout interrupt */
-#define DIEN_700_OPC 0x01 /* Enable illegal instruction
- * interrupt
- */
-#define DIEN_800_IID 0x01 /* Same meaning, different name */
-
-/*
- * DMA watchdog timer rw
- * set in 16 CLK input periods.
- */
-#define DWT_REG 0x3a
-
-/* DMA control rw */
-#define DCNTL_REG 0x3b
-#define DCNTL_700_CF1 0x80 /* Clock divisor bits */
-#define DCNTL_700_CF0 0x40
-#define DCNTL_700_CF_MASK 0xc0
-/* Clock divisors Divisor SCLK range (MHZ) */
-#define DCNTL_700_CF_2 0x00 /* 2.0 37.51-50.00 */
-#define DCNTL_700_CF_1_5 0x40 /* 1.5 25.01-37.50 */
-#define DCNTL_700_CF_1 0x80 /* 1.0 16.67-25.00 */
-#define DCNTL_700_CF_3 0xc0 /* 3.0 50.01-66.67 (53c700-66) */
-
-#define DCNTL_700_S16 0x20 /* Load scripts 16 bits at a time */
-#define DCNTL_SSM 0x10 /* Single step mode */
-#define DCNTL_700_LLM 0x08 /* Low level mode, can only be set
- * after selection */
-#define DCNTL_800_IRQM 0x08 /* Totem pole IRQ pin */
-#define DCNTL_STD 0x04 /* Start DMA / SCRIPTS */
-/* 0x02 is reserved */
-#define DCNTL_00_RST 0x01 /* Software reset, resets everything
- * but 286 mode bit in DMODE. On the
- * NCR53c710, this bit moved to CTEST8
- */
-#define DCNTL_10_COM 0x01 /* 700 software compatibility mode */
-#define DCNTL_10_EA 0x20 /* Enable Ack - needed for MVME16x */
-
-#define DCNTL_700_SAVE ( DCNTL_CF_MASK | DCNTL_S16)
-
-
-/* NCR53c700-66 only */
-#define SCRATCHB_REG_00 0x3c /* through 0x3f scratch b rw */
-#define SCRATCHB_REG_800 0x5c /* through 0x5f scratch b rw */
-/* NCR53c710 only */
-#define ADDER_REG_10 0x3c /* Adder, NCR53c710 only */
-
-#define SIEN1_REG_800 0x41
-#define SIEN1_800_STO 0x04 /* selection/reselection timeout */
-#define SIEN1_800_GEN 0x02 /* general purpose timer */
-#define SIEN1_800_HTH 0x01 /* handshake to handshake */
-
-#define SIST1_REG_800 0x43
-#define SIST1_800_STO 0x04 /* selection/reselection timeout */
-#define SIST1_800_GEN 0x02 /* general purpose timer */
-#define SIST1_800_HTH 0x01 /* handshake to handshake */
-
-#define SLPAR_REG_800 0x44 /* Parity */
-
-#define MACNTL_REG_800 0x46 /* Memory access control */
-#define MACNTL_800_TYP3 0x80
-#define MACNTL_800_TYP2 0x40
-#define MACNTL_800_TYP1 0x20
-#define MACNTL_800_TYP0 0x10
-#define MACNTL_800_DWR 0x08
-#define MACNTL_800_DRD 0x04
-#define MACNTL_800_PSCPT 0x02
-#define MACNTL_800_SCPTS 0x01
-
-#define GPCNTL_REG_800 0x47 /* General Purpose Pin Control */
-
-/* Timeouts are expressed such that 0=off, 1=100us, doubling after that */
-#define STIME0_REG_800 0x48 /* SCSI Timer Register 0 */
-#define STIME0_800_HTH_MASK 0xf0 /* Handshake to Handshake timeout */
-#define STIME0_800_HTH_SHIFT 4
-#define STIME0_800_SEL_MASK 0x0f /* Selection timeout */
-#define STIME0_800_SEL_SHIFT 0
-
-#define STIME1_REG_800 0x49
-#define STIME1_800_GEN_MASK 0x0f /* General purpose timer */
-
-#define RESPID_REG_800 0x4a /* Response ID, bit fielded. 8
- bits on narrow chips, 16 on WIDE */
-
-#define STEST0_REG_800 0x4c
-#define STEST0_800_SLT 0x08 /* Selection response logic test */
-#define STEST0_800_ART 0x04 /* Arbitration priority encoder test */
-#define STEST0_800_SOZ 0x02 /* Synchronous offset zero */
-#define STEST0_800_SOM 0x01 /* Synchronous offset maximum */
-
-#define STEST1_REG_800 0x4d
-#define STEST1_800_SCLK 0x80 /* Disable SCSI clock */
-
-#define STEST2_REG_800 0x4e
-#define STEST2_800_SCE 0x80 /* Enable SOCL/SODL */
-#define STEST2_800_ROF 0x40 /* Reset SCSI sync offset */
-#define STEST2_800_SLB 0x10 /* Enable SCSI loopback mode */
-#define STEST2_800_SZM 0x08 /* SCSI high impedance mode */
-#define STEST2_800_EXT 0x02 /* Extend REQ/ACK filter 30 to 60ns */
-#define STEST2_800_LOW 0x01 /* SCSI low level mode */
-
-#define STEST3_REG_800 0x4f
-#define STEST3_800_TE 0x80 /* Enable active negation */
-#define STEST3_800_STR 0x40 /* SCSI FIFO test read */
-#define STEST3_800_HSC 0x20 /* Halt SCSI clock */
-#define STEST3_800_DSI 0x10 /* Disable single initiator response */
-#define STEST3_800_TTM 0x04 /* Time test mode */
-#define STEST3_800_CSF 0x02 /* Clear SCSI FIFO */
-#define STEST3_800_STW 0x01 /* SCSI FIFO test write */
-
-#define OPTION_PARITY 0x1 /* Enable parity checking */
-#define OPTION_TAGGED_QUEUE 0x2 /* Enable SCSI-II tagged queuing */
-#define OPTION_700 0x8 /* Always run NCR53c700 scripts */
-#define OPTION_INTFLY 0x10 /* Use INTFLY interrupts */
-#define OPTION_DEBUG_INTR 0x20 /* Debug interrupts */
-#define OPTION_DEBUG_INIT_ONLY 0x40 /* Run initialization code and
- simple test code, return
- DID_NO_CONNECT if any SCSI
- commands are attempted. */
-#define OPTION_DEBUG_READ_ONLY 0x80 /* Return DID_ERROR if any
- SCSI write is attempted */
-#define OPTION_DEBUG_TRACE 0x100 /* Animated trace mode, print
- each address and instruction
- executed to debug buffer. */
-#define OPTION_DEBUG_SINGLE 0x200 /* stop after executing one
- instruction */
-#define OPTION_SYNCHRONOUS 0x400 /* Enable sync SCSI. */
-#define OPTION_MEMORY_MAPPED 0x800 /* NCR registers have valid
- memory mapping */
-#define OPTION_IO_MAPPED 0x1000 /* NCR registers have valid
- I/O mapping */
-#define OPTION_DEBUG_PROBE_ONLY 0x2000 /* Probe only, don't even init */
-#define OPTION_DEBUG_TESTS_ONLY 0x4000 /* Probe, init, run selected tests */
-#define OPTION_DEBUG_TEST0 0x08000 /* Run test 0 */
-#define OPTION_DEBUG_TEST1 0x10000 /* Run test 1 */
-#define OPTION_DEBUG_TEST2 0x20000 /* Run test 2 */
-#define OPTION_DEBUG_DUMP 0x40000 /* Dump commands */
-#define OPTION_DEBUG_TARGET_LIMIT 0x80000 /* Only talk to target+luns specified */
-#define OPTION_DEBUG_NCOMMANDS_LIMIT 0x100000 /* Limit the number of commands */
-#define OPTION_DEBUG_SCRIPT 0x200000 /* Print when checkpoints are passed */
-#define OPTION_DEBUG_FIXUP 0x400000 /* print fixup values */
-#define OPTION_DEBUG_DSA 0x800000
-#define OPTION_DEBUG_CORRUPTION 0x1000000 /* Detect script corruption */
-#define OPTION_DEBUG_SDTR 0x2000000 /* Debug SDTR problem */
-#define OPTION_DEBUG_MISMATCH 0x4000000 /* Debug phase mismatches */
-#define OPTION_DISCONNECT 0x8000000 /* Allow disconnect */
-#define OPTION_DEBUG_DISCONNECT 0x10000000
-#define OPTION_ALWAYS_SYNCHRONOUS 0x20000000 /* Negotiate sync. transfers
- on power up */
-#define OPTION_DEBUG_QUEUES 0x80000000
-#define OPTION_DEBUG_ALLOCATION 0x100000000LL
-#define OPTION_DEBUG_SYNCHRONOUS 0x200000000LL /* Sanity check SXFER and
- SCNTL3 registers */
-#define OPTION_NO_ASYNC 0x400000000LL /* Don't automagically send
- SDTR for async transfers when
- we haven't been told to do
- a synchronous transfer. */
-#define OPTION_NO_PRINT_RACE 0x800000000LL /* Don't print message when
- the reselect/WAIT DISCONNECT
- race condition hits */
-#if !defined(PERM_OPTIONS)
-#define PERM_OPTIONS 0
-#endif
-
-/*
- * Some data which is accessed by the NCR chip must be 4-byte aligned.
- * For some hosts the default is less than that (eg. 68K uses 2-byte).
- * Alignment has only been forced where it is important; also if one
- * 32 bit structure field is aligned then it is assumed that following
- * 32 bit fields are also aligned. Take care when adding fields
- * which are other than 32 bit.
- */
-
-struct NCR53c7x0_synchronous {
- u32 select_indirect /* Value used for indirect selection */
- __attribute__ ((aligned (4)));
- u32 sscf_710; /* Used to set SSCF bits for 710 */
- u32 script[8]; /* Size ?? Script used when target is
- reselected */
- unsigned char synchronous_want[5]; /* Per target desired SDTR */
-/*
- * Set_synchronous programs these, select_indirect and current settings after
- * int_debug_should show a match.
- */
- unsigned char sxfer_sanity, scntl3_sanity;
-};
-
-#define CMD_FLAG_SDTR 1 /* Initiating synchronous
- transfer negotiation */
-#define CMD_FLAG_WDTR 2 /* Initiating wide transfer
- negotiation */
-#define CMD_FLAG_DID_SDTR 4 /* did SDTR */
-#define CMD_FLAG_DID_WDTR 8 /* did WDTR */
-
-struct NCR53c7x0_table_indirect {
- u32 count;
- void *address;
-};
-
-enum ncr_event {
- EVENT_NONE = 0,
-/*
- * Order is IMPORTANT, since these must correspond to the event interrupts
- * in 53c7,8xx.scr
- */
-
- EVENT_ISSUE_QUEUE = 0x5000000, /* 0 Command was added to issue queue */
- EVENT_START_QUEUE, /* 1 Command moved to start queue */
- EVENT_SELECT, /* 2 Command completed selection */
- EVENT_DISCONNECT, /* 3 Command disconnected */
- EVENT_RESELECT, /* 4 Command reselected */
- EVENT_COMPLETE, /* 5 Command completed */
- EVENT_IDLE, /* 6 */
- EVENT_SELECT_FAILED, /* 7 */
- EVENT_BEFORE_SELECT, /* 8 */
- EVENT_RESELECT_FAILED /* 9 */
-};
-
-struct NCR53c7x0_event {
- enum ncr_event event; /* What type of event */
- unsigned char target;
- unsigned char lun;
- struct timeval time;
- u32 *dsa; /* What's in the DSA register now (virt) */
-/*
- * A few things from that SCSI pid so we know what happened after
- * the Scsi_Cmnd structure in question may have disappeared.
- */
- unsigned long pid; /* The SCSI PID which caused this
- event */
- unsigned char cmnd[12];
-};
-
-/*
- * Things in the NCR53c7x0_cmd structure are split into two parts :
- *
- * 1. A fixed portion, for things which are not accessed directly by static NCR
- * code (ie, are referenced only by the Linux side of the driver,
- * or only by dynamically generated code).
- *
- * 2. The DSA portion, for things which are accessed directly by static NCR
- * code.
- *
- * This is a little ugly, but it
- * 1. Avoids conflicts between the NCR code's picture of the structure, and
- * Linux code's idea of what it looks like.
- *
- * 2. Minimizes the pain in the Linux side of the code needed
- * to calculate real dsa locations for things, etc.
- *
- */
-
-struct NCR53c7x0_cmd {
- void *real; /* Real, unaligned address for
- free function */
- void (* free)(void *, int); /* Command to deallocate; NULL
- for structures allocated with
- scsi_register, etc. */
- Scsi_Cmnd *cmd; /* Associated Scsi_Cmnd
- structure, Scsi_Cmnd points
- at NCR53c7x0_cmd using
- host_scribble structure */
-
- int size; /* scsi_malloc'd size of this
- structure */
-
- int flags; /* CMD_* flags */
-
- unsigned char cmnd[12]; /* CDB, copied from Scsi_Cmnd */
- int result; /* Copy to Scsi_Cmnd when done */
-
- struct { /* Private non-cached bounce buffer */
- unsigned char buf[256];
- u32 addr;
- u32 len;
- } bounce;
-
-/*
- * SDTR and WIDE messages are an either/or affair
- * in this message, since we will go into message out and send
- * _the whole mess_ without dropping out of message out to
- * let the target go into message in after sending the first
- * message.
- */
-
- unsigned char select[11]; /* Select message, includes
- IDENTIFY
- (optional) QUEUE TAG
- (optional) SDTR or WDTR
- */
-
-
- volatile struct NCR53c7x0_cmd *next; /* Linux maintained lists (free,
- running, eventually finished */
-
-
- u32 *data_transfer_start; /* Start of data transfer routines */
- u32 *data_transfer_end; /* Address after end of data transfer o
- routines */
-/*
- * The following three fields were moved from the DSA proper to here
- * since only dynamically generated NCR code refers to them, meaning
- * we don't need dsa_* absolutes, and it is simpler to let the
- * host code refer to them directly.
- */
-
-/*
- * HARD CODED : residual and saved_residual need to agree with the sizes
- * used in NCR53c7,8xx.scr.
- *
- * FIXME: we want to consider the case where we have odd-length
- * scatter/gather buffers and a WIDE transfer, in which case
- * we'll need to use the CHAIN MOVE instruction. Ick.
- */
- u32 residual[6] __attribute__ ((aligned (4)));
- /* Residual data transfer which
- allows pointer code to work
- right.
-
- [0-1] : Conditional call to
- appropriate other transfer
- routine.
- [2-3] : Residual block transfer
- instruction.
- [4-5] : Jump to instruction
- after splice.
- */
- u32 saved_residual[6]; /* Copy of old residual, so we
- can get another partial
- transfer and still recover
- */
-
- u32 saved_data_pointer; /* Saved data pointer */
-
- u32 dsa_next_addr; /* _Address_ of dsa_next field
- in this dsa for RISCy
- style constant. */
-
- u32 dsa_addr; /* Address of dsa; RISCy style
- constant */
-
- u32 dsa[0]; /* Variable length (depending
- on host type, number of scatter /
- gather buffers, etc). */
-};
-
-struct NCR53c7x0_break {
- u32 *address, old_instruction[2];
- struct NCR53c7x0_break *next;
- unsigned char old_size; /* Size of old instruction */
-};
-
-/* Indicates that the NCR is not executing code */
-#define STATE_HALTED 0
-/*
- * Indicates that the NCR is executing the wait for select / reselect
- * script. Only used when running NCR53c700 compatible scripts, only
- * state during which an ABORT is _not_ considered an error condition.
- */
-#define STATE_WAITING 1
-/* Indicates that the NCR is executing other code. */
-#define STATE_RUNNING 2
-/*
- * Indicates that the NCR was being aborted.
- */
-#define STATE_ABORTING 3
-/* Indicates that the NCR was successfully aborted. */
-#define STATE_ABORTED 4
-/* Indicates that the NCR has been disabled due to a fatal error */
-#define STATE_DISABLED 5
-
-/*
- * Where knowledge of SCSI SCRIPT(tm) specified values are needed
- * in an interrupt handler, an interrupt handler exists for each
- * different SCSI script so we don't have name space problems.
- *
- * Return values of these handlers are as follows :
- */
-#define SPECIFIC_INT_NOTHING 0 /* don't even restart */
-#define SPECIFIC_INT_RESTART 1 /* restart at the next instruction */
-#define SPECIFIC_INT_ABORT 2 /* recoverable error, abort cmd */
-#define SPECIFIC_INT_PANIC 3 /* unrecoverable error, panic */
-#define SPECIFIC_INT_DONE 4 /* normal command completion */
-#define SPECIFIC_INT_BREAK 5 /* break point encountered */
-
-struct NCR53c7x0_hostdata {
- int size; /* Size of entire Scsi_Host
- structure */
- int board; /* set to board type, useful if
- we have host specific things,
- ie, a general purpose I/O
- bit is being used to enable
- termination, etc. */
-
- int chip; /* set to chip type; 700-66 is
- 700-66, rest are last three
- digits of part number */
-
- char valid_ids[8]; /* Valid SCSI ID's for adapter */
-
- u32 *dsp; /* dsp to restart with after
- all stacked interrupts are
- handled. */
-
- unsigned dsp_changed:1; /* Has dsp changed within this
- set of stacked interrupts ? */
-
- unsigned char dstat; /* Most recent value of dstat */
- unsigned dstat_valid:1;
-
- unsigned expecting_iid:1; /* Expect IID interrupt */
- unsigned expecting_sto:1; /* Expect STO interrupt */
-
- /*
- * The code stays cleaner if we use variables with function
- * pointers and offsets that are unique for the different
- * scripts rather than having a slew of switch(hostdata->chip)
- * statements.
- *
- * It also means that the #defines from the SCSI SCRIPTS(tm)
- * don't have to be visible outside of the script-specific
- * instructions, preventing name space pollution.
- */
-
- void (* init_fixup)(struct Scsi_Host *host);
- void (* init_save_regs)(struct Scsi_Host *host);
- void (* dsa_fixup)(struct NCR53c7x0_cmd *cmd);
- void (* soft_reset)(struct Scsi_Host *host);
- int (* run_tests)(struct Scsi_Host *host);
-
- /*
- * Called when DSTAT_SIR is set, indicating an interrupt generated
- * by the INT instruction, where values are unique for each SCSI
- * script. Should return one of the SPEC_* values.
- */
-
- int (* dstat_sir_intr)(struct Scsi_Host *host, struct NCR53c7x0_cmd *cmd);
-
- int dsa_len; /* Size of DSA structure */
-
- /*
- * Location of DSA fields for the SCSI SCRIPT corresponding to this
- * chip.
- */
-
- s32 dsa_start;
- s32 dsa_end;
- s32 dsa_next;
- s32 dsa_prev;
- s32 dsa_cmnd;
- s32 dsa_select;
- s32 dsa_msgout;
- s32 dsa_cmdout;
- s32 dsa_dataout;
- s32 dsa_datain;
- s32 dsa_msgin;
- s32 dsa_msgout_other;
- s32 dsa_write_sync;
- s32 dsa_write_resume;
- s32 dsa_check_reselect;
- s32 dsa_status;
- s32 dsa_saved_pointer;
- s32 dsa_jump_dest;
-
- /*
- * Important entry points that generic fixup code needs
- * to know about, fixed up.
- */
-
- s32 E_accept_message;
- s32 E_command_complete;
- s32 E_data_transfer;
- s32 E_dsa_code_template;
- s32 E_dsa_code_template_end;
- s32 E_end_data_transfer;
- s32 E_msg_in;
- s32 E_initiator_abort;
- s32 E_other_transfer;
- s32 E_other_in;
- s32 E_other_out;
- s32 E_target_abort;
- s32 E_debug_break;
- s32 E_reject_message;
- s32 E_respond_message;
- s32 E_select;
- s32 E_select_msgout;
- s32 E_test_0;
- s32 E_test_1;
- s32 E_test_2;
- s32 E_test_3;
- s32 E_dsa_zero;
- s32 E_cmdout_cmdout;
- s32 E_wait_reselect;
- s32 E_dsa_code_begin;
-
- long long options; /* Bitfielded set of options enabled */
- volatile u32 test_completed; /* Test completed */
- int test_running; /* Test currently running */
- s32 test_source
- __attribute__ ((aligned (4)));
- volatile s32 test_dest;
-
- volatile int state; /* state of driver, only used for
- OPTION_700 */
-
- unsigned char dmode; /*
- * set to the address of the DMODE
- * register for this chip.
- */
- unsigned char istat; /*
- * set to the address of the ISTAT
- * register for this chip.
- */
-
- int scsi_clock; /*
- * SCSI clock in HZ. 0 may be used
- * for unknown, although this will
- * disable synchronous negotiation.
- */
-
- volatile int intrs; /* Number of interrupts */
- volatile int resets; /* Number of SCSI resets */
- unsigned char saved_dmode;
- unsigned char saved_ctest4;
- unsigned char saved_ctest7;
- unsigned char saved_dcntl;
- unsigned char saved_scntl3;
-
- unsigned char this_id_mask;
-
- /* Debugger information */
- struct NCR53c7x0_break *breakpoints, /* Linked list of all break points */
- *breakpoint_current; /* Current breakpoint being stepped
- through, NULL if we are running
- normally. */
-#ifdef NCR_DEBUG
- int debug_size; /* Size of debug buffer */
- volatile int debug_count; /* Current data count */
- volatile char *debug_buf; /* Output ring buffer */
- volatile char *debug_write; /* Current write pointer */
- volatile char *debug_read; /* Current read pointer */
-#endif /* def NCR_DEBUG */
-
- /* XXX - primitive debugging junk, remove when working ? */
- int debug_print_limit; /* Number of commands to print
- out exhaustive debugging
- information for if
- OPTION_DEBUG_DUMP is set */
-
- unsigned char debug_lun_limit[16]; /* If OPTION_DEBUG_TARGET_LIMIT
- set, puke if commands are sent
- to other target/lun combinations */
-
- int debug_count_limit; /* Number of commands to execute
- before puking to limit debugging
- output */
-
-
- volatile unsigned idle:1; /* set to 1 if idle */
-
- /*
- * Table of synchronous+wide transfer parameters set on a per-target
- * basis.
- */
-
- volatile struct NCR53c7x0_synchronous sync[16]
- __attribute__ ((aligned (4)));
-
- volatile Scsi_Cmnd *issue_queue
- __attribute__ ((aligned (4)));
- /* waiting to be issued by
- Linux driver */
- volatile struct NCR53c7x0_cmd *running_list;
- /* commands running, maintained
- by Linux driver */
-
- volatile struct NCR53c7x0_cmd *ncrcurrent; /* currently connected
- nexus, ONLY valid for
- NCR53c700/NCR53c700-66
- */
-
- volatile struct NCR53c7x0_cmd *spare; /* pointer to spare,
- allocated at probe time,
- which we can use for
- initialization */
- volatile struct NCR53c7x0_cmd *free;
- int max_cmd_size; /* Maximum size of NCR53c7x0_cmd
- based on number of
- scatter/gather segments, etc.
- */
- volatile int num_cmds; /* Number of commands
- allocated */
- volatile int extra_allocate;
- volatile unsigned char cmd_allocated[16]; /* Have we allocated commands
- for this target yet? If not,
- do so ASAP */
- volatile unsigned char busy[16][8]; /* number of commands
- executing on each target
- */
- /*
- * Eventually, I'll switch to a coroutine for calling
- * cmd->done(cmd), etc. so that we can overlap interrupt
- * processing with this code for maximum performance.
- */
-
- volatile struct NCR53c7x0_cmd *finished_queue;
-
- /* Shared variables between SCRIPT and host driver */
- volatile u32 *schedule
- __attribute__ ((aligned (4))); /* Array of JUMPs to dsa_begin
- routines of various DSAs.
- When not in use, replace
- with jump to next slot */
-
-
- volatile unsigned char msg_buf[16]; /* buffer for messages
- other than the command
- complete message */
-
- /* Per-target default synchronous and WIDE messages */
- volatile unsigned char synchronous_want[16][5];
- volatile unsigned char wide_want[16][4];
-
- /* Bit fielded set of targets we want to speak synchronously with */
- volatile u16 initiate_sdtr;
- /* Bit fielded set of targets we want to speak wide with */
- volatile u16 initiate_wdtr;
- /* Bit fielded list of targets we've talked to. */
- volatile u16 talked_to;
-
- /* Array of bit-fielded lun lists that we need to request_sense */
- volatile unsigned char request_sense[16];
-
- u32 addr_reconnect_dsa_head
- __attribute__ ((aligned (4))); /* RISCy style constant,
- address of following */
- volatile u32 reconnect_dsa_head;
- /* Data identifying nexus we are trying to match during reselection */
- volatile unsigned char reselected_identify; /* IDENTIFY message */
- volatile unsigned char reselected_tag; /* second byte of queue tag
- message or 0 */
-
- /* These were static variables before we moved them */
-
- s32 NCR53c7xx_zero
- __attribute__ ((aligned (4)));
- s32 NCR53c7xx_sink;
- u32 NOP_insn;
- char NCR53c7xx_msg_reject;
- char NCR53c7xx_msg_abort;
- char NCR53c7xx_msg_nop;
-
- /*
- * Following item introduced by RGH to support NCRc710, which is
- * VERY brain-dead when it come to memory moves
- */
-
- /* DSA save area used only by the NCR chip */
- volatile unsigned long saved2_dsa
- __attribute__ ((aligned (4)));
-
- volatile unsigned long emulated_intfly
- __attribute__ ((aligned (4)));
-
- volatile int event_size, event_index;
- volatile struct NCR53c7x0_event *events;
-
- /* If we need to generate code to kill off the currently connected
- command, this is where we do it. Should have a BMI instruction
- to source or sink the current data, followed by a JUMP
- to abort_connected */
-
- u32 *abort_script;
-
- int script_count; /* Size of script in words */
- u32 script[0]; /* Relocated SCSI script */
-
-};
-
-#define SCSI_IRQ_NONE 255
-#define DMA_NONE 255
-#define IRQ_AUTO 254
-#define DMA_AUTO 254
-
-#define BOARD_GENERIC 0
-
-#define NCR53c7x0_insn_size(insn) \
- (((insn) & DCMD_TYPE_MASK) == DCMD_TYPE_MMI ? 3 : 2)
-
-
-#define NCR53c7x0_local_declare() \
- volatile unsigned char *NCR53c7x0_address_memory; \
- unsigned int NCR53c7x0_address_io; \
- int NCR53c7x0_memory_mapped
-
-#define NCR53c7x0_local_setup(host) \
- NCR53c7x0_address_memory = (void *) (host)->base; \
- NCR53c7x0_address_io = (unsigned int) (host)->io_port; \
- NCR53c7x0_memory_mapped = ((struct NCR53c7x0_hostdata *) \
- host->hostdata[0])-> options & OPTION_MEMORY_MAPPED
-
-#ifdef BIG_ENDIAN
-/* These could be more efficient, given that we are always memory mapped,
- * but they don't give the same problems as the write macros, so leave
- * them. */
-#ifdef __mc68000__
-#define NCR53c7x0_read8(address) \
- ((unsigned int)raw_inb((u32)NCR53c7x0_address_memory + ((u32)(address)^3)) )
-
-#define NCR53c7x0_read16(address) \
- ((unsigned int)raw_inw((u32)NCR53c7x0_address_memory + ((u32)(address)^2)))
-#else
-#define NCR53c7x0_read8(address) \
- (NCR53c7x0_memory_mapped ? \
- (unsigned int)readb((u32)NCR53c7x0_address_memory + ((u32)(address)^3)) : \
- inb(NCR53c7x0_address_io + (address)))
-
-#define NCR53c7x0_read16(address) \
- (NCR53c7x0_memory_mapped ? \
- (unsigned int)readw((u32)NCR53c7x0_address_memory + ((u32)(address)^2)) : \
- inw(NCR53c7x0_address_io + (address)))
-#endif /* mc68000 */
-#else
-#define NCR53c7x0_read8(address) \
- (NCR53c7x0_memory_mapped ? \
- (unsigned int)readb((u32)NCR53c7x0_address_memory + (u32)(address)) : \
- inb(NCR53c7x0_address_io + (address)))
-
-#define NCR53c7x0_read16(address) \
- (NCR53c7x0_memory_mapped ? \
- (unsigned int)readw((u32)NCR53c7x0_address_memory + (u32)(address)) : \
- inw(NCR53c7x0_address_io + (address)))
-#endif
-
-#ifdef __mc68000__
-#define NCR53c7x0_read32(address) \
- ((unsigned int) raw_inl((u32)NCR53c7x0_address_memory + (u32)(address)))
-#else
-#define NCR53c7x0_read32(address) \
- (NCR53c7x0_memory_mapped ? \
- (unsigned int) readl((u32)NCR53c7x0_address_memory + (u32)(address)) : \
- inl(NCR53c7x0_address_io + (address)))
-#endif /* mc68000*/
-
-#ifdef BIG_ENDIAN
-/* If we are big-endian, then we are not Intel, so probably don't have
- * an i/o map as well as a memory map. So, let's assume memory mapped.
- * Also, I am having terrible problems trying to persuade the compiler
- * not to lay down code which does a read after write for these macros.
- * If you remove 'volatile' from writeb() and friends it is ok....
- */
-
-#define NCR53c7x0_write8(address,value) \
- *(volatile unsigned char *) \
- ((u32)NCR53c7x0_address_memory + ((u32)(address)^3)) = (value)
-
-#define NCR53c7x0_write16(address,value) \
- *(volatile unsigned short *) \
- ((u32)NCR53c7x0_address_memory + ((u32)(address)^2)) = (value)
-
-#define NCR53c7x0_write32(address,value) \
- *(volatile unsigned long *) \
- ((u32)NCR53c7x0_address_memory + ((u32)(address))) = (value)
-
-#else
-
-#define NCR53c7x0_write8(address,value) \
- (NCR53c7x0_memory_mapped ? \
- ({writeb((value), (u32)NCR53c7x0_address_memory + (u32)(address)); mb();}) : \
- outb((value), NCR53c7x0_address_io + (address)))
-
-#define NCR53c7x0_write16(address,value) \
- (NCR53c7x0_memory_mapped ? \
- ({writew((value), (u32)NCR53c7x0_address_memory + (u32)(address)); mb();}) : \
- outw((value), NCR53c7x0_address_io + (address)))
-
-#define NCR53c7x0_write32(address,value) \
- (NCR53c7x0_memory_mapped ? \
- ({writel((value), (u32)NCR53c7x0_address_memory + (u32)(address)); mb();}) : \
- outl((value), NCR53c7x0_address_io + (address)))
-
-#endif
-
-/* Patch arbitrary 32 bit words in the script */
-#define patch_abs_32(script, offset, symbol, value) \
- for (i = 0; i < (sizeof (A_##symbol##_used) / sizeof \
- (u32)); ++i) { \
- (script)[A_##symbol##_used[i] - (offset)] += (value); \
- if (hostdata->options & OPTION_DEBUG_FIXUP) \
- printk("scsi%d : %s reference %d at 0x%x in %s is now 0x%x\n",\
- host->host_no, #symbol, i, A_##symbol##_used[i] - \
- (int)(offset), #script, (script)[A_##symbol##_used[i] - \
- (offset)]); \
- }
-
-/* Patch read/write instruction immediate field */
-#define patch_abs_rwri_data(script, offset, symbol, value) \
- for (i = 0; i < (sizeof (A_##symbol##_used) / sizeof \
- (u32)); ++i) \
- (script)[A_##symbol##_used[i] - (offset)] = \
- ((script)[A_##symbol##_used[i] - (offset)] & \
- ~DBC_RWRI_IMMEDIATE_MASK) | \
- (((value) << DBC_RWRI_IMMEDIATE_SHIFT) & \
- DBC_RWRI_IMMEDIATE_MASK)
-
-/* Patch transfer control instruction data field */
-#define patch_abs_tci_data(script, offset, symbol, value) \
- for (i = 0; i < (sizeof (A_##symbol##_used) / sizeof \
- (u32)); ++i) \
- (script)[A_##symbol##_used[i] - (offset)] = \
- ((script)[A_##symbol##_used[i] - (offset)] & \
- ~DBC_TCI_DATA_MASK) | \
- (((value) << DBC_TCI_DATA_SHIFT) & \
- DBC_TCI_DATA_MASK)
-
-/* Patch field in dsa structure (assignment should be +=?) */
-#define patch_dsa_32(dsa, symbol, word, value) \
- { \
- (dsa)[(hostdata->##symbol - hostdata->dsa_start) / sizeof(u32) \
- + (word)] = (value); \
- if (hostdata->options & OPTION_DEBUG_DSA) \
- printk("scsi : dsa %s symbol %s(%d) word %d now 0x%x\n", \
- #dsa, #symbol, hostdata->##symbol, \
- (word), (u32) (value)); \
- }
-
-/* Paranoid people could use panic() here. */
-#define FATAL(host) shutdown((host));
-
-extern int ncr53c7xx_init(struct scsi_host_template *tpnt, int board, int chip,
- unsigned long base, int io_port, int irq, int dma,
- long long options, int clock);
-
-#endif /* NCR53c710_C */
-#endif /* NCR53c710_H */
diff --git a/drivers/scsi/53c7xx.scr b/drivers/scsi/53c7xx.scr
deleted file mode 100644
index 9c5694a2da8..00000000000
--- a/drivers/scsi/53c7xx.scr
+++ /dev/null
@@ -1,1591 +0,0 @@
-#undef DEBUG
-#undef EVENTS
-#undef NO_SELECTION_TIMEOUT
-#define BIG_ENDIAN
-
-; 53c710 driver. Modified from Drew Eckhardts driver
-; for 53c810 by Richard Hirst [richard@sleepie.demon.co.uk]
-;
-; I have left the script for the 53c8xx family in here, as it is likely
-; to be useful to see what I changed when bug hunting.
-
-; NCR 53c810 driver, main script
-; Sponsored by
-; iX Multiuser Multitasking Magazine
-; hm@ix.de
-;
-; Copyright 1993, 1994, 1995 Drew Eckhardt
-; Visionary Computing
-; (Unix and Linux consulting and custom programming)
-; drew@PoohSticks.ORG
-; +1 (303) 786-7975
-;
-; TolerANT and SCSI SCRIPTS are registered trademarks of NCR Corporation.
-;
-; PRE-ALPHA
-;
-; For more information, please consult
-;
-; NCR 53C810
-; PCI-SCSI I/O Processor
-; Data Manual
-;
-; NCR 53C710
-; SCSI I/O Processor
-; Programmers Guide
-;
-; NCR Microelectronics
-; 1635 Aeroplaza Drive
-; Colorado Springs, CO 80916
-; 1+ (719) 578-3400
-;
-; Toll free literature number
-; +1 (800) 334-5454
-;
-; IMPORTANT : This code is self modifying due to the limitations of
-; the NCR53c7,8xx series chips. Persons debugging this code with
-; the remote debugger should take this into account, and NOT set
-; breakpoints in modified instructions.
-;
-; Design:
-; The NCR53c7,8xx family of SCSI chips are busmasters with an onboard
-; microcontroller using a simple instruction set.
-;
-; So, to minimize the effects of interrupt latency, and to maximize
-; throughput, this driver offloads the practical maximum amount
-; of processing to the SCSI chip while still maintaining a common
-; structure.
-;
-; Where tradeoffs were needed between efficiency on the older
-; chips and the newer NCR53c800 series, the NCR53c800 series
-; was chosen.
-;
-; While the NCR53c700 and NCR53c700-66 lacked the facilities to fully
-; automate SCSI transfers without host processor intervention, this
-; isn't the case with the NCR53c710 and newer chips which allow
-;
-; - reads and writes to the internal registers from within the SCSI
-; scripts, allowing the SCSI SCRIPTS(tm) code to save processor
-; state so that multiple threads of execution are possible, and also
-; provide an ALU for loop control, etc.
-;
-; - table indirect addressing for some instructions. This allows
-; pointers to be located relative to the DSA ((Data Structure
-; Address) register.
-;
-; These features make it possible to implement a mailbox style interface,
-; where the same piece of code is run to handle I/O for multiple threads
-; at once minimizing our need to relocate code. Since the NCR53c700/
-; NCR53c800 series have a unique combination of features, making a
-; a standard ingoing/outgoing mailbox system, costly, I've modified it.
-;
-; - Mailboxes are a mixture of code and data. This lets us greatly
-; simplify the NCR53c810 code and do things that would otherwise
-; not be possible.
-;
-; The saved data pointer is now implemented as follows :
-;
-; Control flow has been architected such that if control reaches
-; munge_save_data_pointer, on a restore pointers message or
-; reconnection, a jump to the address formerly in the TEMP register
-; will allow the SCSI command to resume execution.
-;
-
-;
-; Note : the DSA structures must be aligned on 32 bit boundaries,
-; since the source and destination of MOVE MEMORY instructions
-; must share the same alignment and this is the alignment of the
-; NCR registers.
-;
-
-; For some systems (MVME166, for example) dmode is always the same, so don't
-; waste time writing it
-
-#if 1
-#define DMODE_MEMORY_TO_NCR
-#define DMODE_MEMORY_TO_MEMORY
-#define DMODE_NCR_TO_MEMORY
-#else
-#define DMODE_MEMORY_TO_NCR MOVE dmode_memory_to_ncr TO DMODE
-#define DMODE_MEMORY_TO_MEMORY MOVE dmode_memory_to_memory TO DMODE
-#define DMODE_NCR_TO_MEMORY MOVE dmode_ncr_to_memory TO DMODE
-#endif
-
-ABSOLUTE dsa_temp_lun = 0 ; Patch to lun for current dsa
-ABSOLUTE dsa_temp_next = 0 ; Patch to dsa next for current dsa
-ABSOLUTE dsa_temp_addr_next = 0 ; Patch to address of dsa next address
- ; for current dsa
-ABSOLUTE dsa_temp_sync = 0 ; Patch to address of per-target
- ; sync routine
-ABSOLUTE dsa_sscf_710 = 0 ; Patch to address of per-target
- ; sscf value (53c710)
-ABSOLUTE dsa_temp_target = 0 ; Patch to id for current dsa
-ABSOLUTE dsa_temp_addr_saved_pointer = 0; Patch to address of per-command
- ; saved data pointer
-ABSOLUTE dsa_temp_addr_residual = 0 ; Patch to address of per-command
- ; current residual code
-ABSOLUTE dsa_temp_addr_saved_residual = 0; Patch to address of per-command
- ; saved residual code
-ABSOLUTE dsa_temp_addr_new_value = 0 ; Address of value for JUMP operand
-ABSOLUTE dsa_temp_addr_array_value = 0 ; Address to copy to
-ABSOLUTE dsa_temp_addr_dsa_value = 0 ; Address of this DSA value
-
-;
-; Once a device has initiated reselection, we need to compare it
-; against the singly linked list of commands which have disconnected
-; and are pending reselection. These commands are maintained in
-; an unordered singly linked list of DSA structures, through the
-; DSA pointers at their 'centers' headed by the reconnect_dsa_head
-; pointer.
-;
-; To avoid complications in removing commands from the list,
-; I minimize the amount of expensive (at eight operations per
-; addition @ 500-600ns each) pointer operations which must
-; be done in the NCR driver by precomputing them on the
-; host processor during dsa structure generation.
-;
-; The fixed-up per DSA code knows how to recognize the nexus
-; associated with the corresponding SCSI command, and modifies
-; the source and destination pointers for the MOVE MEMORY
-; instruction which is executed when reselected_ok is called
-; to remove the command from the list. Similarly, DSA is
-; loaded with the address of the next DSA structure and
-; reselected_check_next is called if a failure occurs.
-;
-; Perhaps more concisely, the net effect of the mess is
-;
-; for (dsa = reconnect_dsa_head, dest = &reconnect_dsa_head,
-; src = NULL; dsa; dest = &dsa->next, dsa = dsa->next) {
-; src = &dsa->next;
-; if (target_id == dsa->id && target_lun == dsa->lun) {
-; *dest = *src;
-; break;
-; }
-; }
-;
-; if (!dsa)
-; error (int_err_unexpected_reselect);
-; else
-; longjmp (dsa->jump_resume, 0);
-;
-;
-
-#if (CHIP != 700) && (CHIP != 70066)
-; Define DSA structure used for mailboxes
-ENTRY dsa_code_template
-dsa_code_template:
-ENTRY dsa_code_begin
-dsa_code_begin:
-; RGH: Don't care about TEMP and DSA here
- DMODE_MEMORY_TO_NCR
- MOVE MEMORY 4, dsa_temp_addr_dsa_value, addr_scratch
- DMODE_MEMORY_TO_MEMORY
-#if (CHIP == 710)
- MOVE MEMORY 4, addr_scratch, saved_dsa
- ; We are about to go and select the device, so must set SSCF bits
- MOVE MEMORY 4, dsa_sscf_710, addr_scratch
-#ifdef BIG_ENDIAN
- MOVE SCRATCH3 TO SFBR
-#else
- MOVE SCRATCH0 TO SFBR
-#endif
- MOVE SFBR TO SBCL
- MOVE MEMORY 4, saved_dsa, addr_dsa
-#else
- CALL scratch_to_dsa
-#endif
- CALL select
-; Handle the phase mismatch which may have resulted from the
-; MOVE FROM dsa_msgout if we returned here. The CLEAR ATN
-; may or may not be necessary, and we should update script_asm.pl
-; to handle multiple pieces.
- CLEAR ATN
- CLEAR ACK
-
-; Replace second operand with address of JUMP instruction dest operand
-; in schedule table for this DSA. Becomes dsa_jump_dest in 53c7,8xx.c.
-ENTRY dsa_code_fix_jump
-dsa_code_fix_jump:
- MOVE MEMORY 4, NOP_insn, 0
- JUMP select_done
-
-; wrong_dsa loads the DSA register with the value of the dsa_next
-; field.
-;
-wrong_dsa:
-#if (CHIP == 710)
-; NOTE DSA is corrupt when we arrive here!
-#endif
-; Patch the MOVE MEMORY INSTRUCTION such that
-; the destination address is the address of the OLD
-; next pointer.
-;
- MOVE MEMORY 4, dsa_temp_addr_next, reselected_ok_patch + 8
- DMODE_MEMORY_TO_NCR
-;
-; Move the _contents_ of the next pointer into the DSA register as
-; the next I_T_L or I_T_L_Q tupple to check against the established
-; nexus.
-;
- MOVE MEMORY 4, dsa_temp_next, addr_scratch
- DMODE_MEMORY_TO_MEMORY
-#if (CHIP == 710)
- MOVE MEMORY 4, addr_scratch, saved_dsa
- MOVE MEMORY 4, saved_dsa, addr_dsa
-#else
- CALL scratch_to_dsa
-#endif
- JUMP reselected_check_next
-
-ABSOLUTE dsa_save_data_pointer = 0
-ENTRY dsa_code_save_data_pointer
-dsa_code_save_data_pointer:
-#if (CHIP == 710)
- ; When we get here, TEMP has been saved in jump_temp+4, DSA is corrupt
- ; We MUST return with DSA correct
- MOVE MEMORY 4, jump_temp+4, dsa_temp_addr_saved_pointer
-; HARD CODED : 24 bytes needs to agree with 53c7,8xx.h
- MOVE MEMORY 24, dsa_temp_addr_residual, dsa_temp_addr_saved_residual
- CLEAR ACK
-#ifdef DEBUG
- INT int_debug_saved
-#endif
- MOVE MEMORY 4, saved_dsa, addr_dsa
- JUMP jump_temp
-#else
- DMODE_NCR_TO_MEMORY
- MOVE MEMORY 4, addr_temp, dsa_temp_addr_saved_pointer
- DMODE_MEMORY_TO_MEMORY
-; HARD CODED : 24 bytes needs to agree with 53c7,8xx.h
- MOVE MEMORY 24, dsa_temp_addr_residual, dsa_temp_addr_saved_residual
- CLEAR ACK
-#ifdef DEBUG
- INT int_debug_saved
-#endif
- RETURN
-#endif
-ABSOLUTE dsa_restore_pointers = 0
-ENTRY dsa_code_restore_pointers
-dsa_code_restore_pointers:
-#if (CHIP == 710)
- ; TEMP and DSA are corrupt when we get here, but who cares!
- MOVE MEMORY 4, dsa_temp_addr_saved_pointer, jump_temp + 4
-; HARD CODED : 24 bytes needs to agree with 53c7,8xx.h
- MOVE MEMORY 24, dsa_temp_addr_saved_residual, dsa_temp_addr_residual
- CLEAR ACK
- ; Restore DSA, note we don't care about TEMP
- MOVE MEMORY 4, saved_dsa, addr_dsa
-#ifdef DEBUG
- INT int_debug_restored
-#endif
- JUMP jump_temp
-#else
- DMODE_MEMORY_TO_NCR
- MOVE MEMORY 4, dsa_temp_addr_saved_pointer, addr_temp
- DMODE_MEMORY_TO_MEMORY
-; HARD CODED : 24 bytes needs to agree with 53c7,8xx.h
- MOVE MEMORY 24, dsa_temp_addr_saved_residual, dsa_temp_addr_residual
- CLEAR ACK
-#ifdef DEBUG
- INT int_debug_restored
-#endif
- RETURN
-#endif
-
-ABSOLUTE dsa_check_reselect = 0
-; dsa_check_reselect determines whether or not the current target and
-; lun match the current DSA
-ENTRY dsa_code_check_reselect
-dsa_code_check_reselect:
-#if (CHIP == 710)
- /* Arrives here with DSA correct */
- /* Assumes we are always ID 7 */
- MOVE LCRC TO SFBR ; LCRC has our ID and his ID bits set
- JUMP REL (wrong_dsa), IF NOT dsa_temp_target, AND MASK 0x80
-#else
- MOVE SSID TO SFBR ; SSID contains 3 bit target ID
-; FIXME : we need to accommodate bit fielded and binary here for '7xx/'8xx chips
- JUMP REL (wrong_dsa), IF NOT dsa_temp_target, AND MASK 0xf8
-#endif
-;
-; Hack - move to scratch first, since SFBR is not writeable
-; via the CPU and hence a MOVE MEMORY instruction.
-;
- DMODE_MEMORY_TO_NCR
- MOVE MEMORY 1, reselected_identify, addr_scratch
- DMODE_MEMORY_TO_MEMORY
-#ifdef BIG_ENDIAN
- ; BIG ENDIAN ON MVME16x
- MOVE SCRATCH3 TO SFBR
-#else
- MOVE SCRATCH0 TO SFBR
-#endif
-; FIXME : we need to accommodate bit fielded and binary here for '7xx/'8xx chips
-; Are you sure about that? richard@sleepie.demon.co.uk
- JUMP REL (wrong_dsa), IF NOT dsa_temp_lun, AND MASK 0xf8
-; Patch the MOVE MEMORY INSTRUCTION such that
-; the source address is the address of this dsa's
-; next pointer.
- MOVE MEMORY 4, dsa_temp_addr_next, reselected_ok_patch + 4
- CALL reselected_ok
-#if (CHIP == 710)
-; Restore DSA following memory moves in reselected_ok
-; dsa_temp_sync doesn't really care about DSA, but it has an
-; optional debug INT so a valid DSA is a good idea.
- MOVE MEMORY 4, saved_dsa, addr_dsa
-#endif
- CALL dsa_temp_sync
-; Release ACK on the IDENTIFY message _after_ we've set the synchronous
-; transfer parameters!
- CLEAR ACK
-; Implicitly restore pointers on reselection, so a RETURN
-; will transfer control back to the right spot.
- CALL REL (dsa_code_restore_pointers)
- RETURN
-ENTRY dsa_zero
-dsa_zero:
-ENTRY dsa_code_template_end
-dsa_code_template_end:
-
-; Perform sanity check for dsa_fields_start == dsa_code_template_end -
-; dsa_zero, puke.
-
-ABSOLUTE dsa_fields_start = 0 ; Sanity marker
- ; pad 48 bytes (fix this RSN)
-ABSOLUTE dsa_next = 48 ; len 4 Next DSA
- ; del 4 Previous DSA address
-ABSOLUTE dsa_cmnd = 56 ; len 4 Scsi_Cmnd * for this thread.
-ABSOLUTE dsa_select = 60 ; len 4 Device ID, Period, Offset for
- ; table indirect select
-ABSOLUTE dsa_msgout = 64 ; len 8 table indirect move parameter for
- ; select message
-ABSOLUTE dsa_cmdout = 72 ; len 8 table indirect move parameter for
- ; command
-ABSOLUTE dsa_dataout = 80 ; len 4 code pointer for dataout
-ABSOLUTE dsa_datain = 84 ; len 4 code pointer for datain
-ABSOLUTE dsa_msgin = 88 ; len 8 table indirect move for msgin
-ABSOLUTE dsa_status = 96 ; len 8 table indirect move for status byte
-ABSOLUTE dsa_msgout_other = 104 ; len 8 table indirect for normal message out
- ; (Synchronous transfer negotiation, etc).
-ABSOLUTE dsa_end = 112
-
-ABSOLUTE schedule = 0 ; Array of JUMP dsa_begin or JUMP (next),
- ; terminated by a call to JUMP wait_reselect
-
-; Linked lists of DSA structures
-ABSOLUTE reconnect_dsa_head = 0 ; Link list of DSAs which can reconnect
-ABSOLUTE addr_reconnect_dsa_head = 0 ; Address of variable containing
- ; address of reconnect_dsa_head
-
-; These select the source and destination of a MOVE MEMORY instruction
-ABSOLUTE dmode_memory_to_memory = 0x0
-ABSOLUTE dmode_memory_to_ncr = 0x0
-ABSOLUTE dmode_ncr_to_memory = 0x0
-
-ABSOLUTE addr_scratch = 0x0
-ABSOLUTE addr_temp = 0x0
-#if (CHIP == 710)
-ABSOLUTE saved_dsa = 0x0
-ABSOLUTE emulfly = 0x0
-ABSOLUTE addr_dsa = 0x0
-#endif
-#endif /* CHIP != 700 && CHIP != 70066 */
-
-; Interrupts -
-; MSB indicates type
-; 0 handle error condition
-; 1 handle message
-; 2 handle normal condition
-; 3 debugging interrupt
-; 4 testing interrupt
-; Next byte indicates specific error
-
-; XXX not yet implemented, I'm not sure if I want to -
-; Next byte indicates the routine the error occurred in
-; The LSB indicates the specific place the error occurred
-
-ABSOLUTE int_err_unexpected_phase = 0x00000000 ; Unexpected phase encountered
-ABSOLUTE int_err_selected = 0x00010000 ; SELECTED (nee RESELECTED)
-ABSOLUTE int_err_unexpected_reselect = 0x00020000
-ABSOLUTE int_err_check_condition = 0x00030000
-ABSOLUTE int_err_no_phase = 0x00040000
-ABSOLUTE int_msg_wdtr = 0x01000000 ; WDTR message received
-ABSOLUTE int_msg_sdtr = 0x01010000 ; SDTR received
-ABSOLUTE int_msg_1 = 0x01020000 ; single byte special message
- ; received
-
-ABSOLUTE int_norm_select_complete = 0x02000000 ; Select complete, reprogram
- ; registers.
-ABSOLUTE int_norm_reselect_complete = 0x02010000 ; Nexus established
-ABSOLUTE int_norm_command_complete = 0x02020000 ; Command complete
-ABSOLUTE int_norm_disconnected = 0x02030000 ; Disconnected
-ABSOLUTE int_norm_aborted =0x02040000 ; Aborted *dsa
-ABSOLUTE int_norm_reset = 0x02050000 ; Generated BUS reset.
-ABSOLUTE int_norm_emulateintfly = 0x02060000 ; 53C710 Emulated intfly
-ABSOLUTE int_debug_break = 0x03000000 ; Break point
-#ifdef DEBUG
-ABSOLUTE int_debug_scheduled = 0x03010000 ; new I/O scheduled
-ABSOLUTE int_debug_idle = 0x03020000 ; scheduler is idle
-ABSOLUTE int_debug_dsa_loaded = 0x03030000 ; dsa reloaded
-ABSOLUTE int_debug_reselected = 0x03040000 ; NCR reselected
-ABSOLUTE int_debug_head = 0x03050000 ; issue head overwritten
-ABSOLUTE int_debug_disconnected = 0x03060000 ; disconnected
-ABSOLUTE int_debug_disconnect_msg = 0x03070000 ; got message to disconnect
-ABSOLUTE int_debug_dsa_schedule = 0x03080000 ; in dsa_schedule
-ABSOLUTE int_debug_reselect_check = 0x03090000 ; Check for reselection of DSA
-ABSOLUTE int_debug_reselected_ok = 0x030a0000 ; Reselection accepted
-#endif
-ABSOLUTE int_debug_panic = 0x030b0000 ; Panic driver
-#ifdef DEBUG
-ABSOLUTE int_debug_saved = 0x030c0000 ; save/restore pointers
-ABSOLUTE int_debug_restored = 0x030d0000
-ABSOLUTE int_debug_sync = 0x030e0000 ; Sanity check synchronous
- ; parameters.
-ABSOLUTE int_debug_datain = 0x030f0000 ; going into data in phase
- ; now.
-ABSOLUTE int_debug_check_dsa = 0x03100000 ; Sanity check DSA against
- ; SDID.
-#endif
-
-ABSOLUTE int_test_1 = 0x04000000 ; Test 1 complete
-ABSOLUTE int_test_2 = 0x04010000 ; Test 2 complete
-ABSOLUTE int_test_3 = 0x04020000 ; Test 3 complete
-
-
-; These should start with 0x05000000, with low bits incrementing for
-; each one.
-
-#ifdef EVENTS
-ABSOLUTE int_EVENT_SELECT = 0
-ABSOLUTE int_EVENT_DISCONNECT = 0
-ABSOLUTE int_EVENT_RESELECT = 0
-ABSOLUTE int_EVENT_COMPLETE = 0
-ABSOLUTE int_EVENT_IDLE = 0
-ABSOLUTE int_EVENT_SELECT_FAILED = 0
-ABSOLUTE int_EVENT_BEFORE_SELECT = 0
-ABSOLUTE int_EVENT_RESELECT_FAILED = 0
-#endif
-
-ABSOLUTE NCR53c7xx_msg_abort = 0 ; Pointer to abort message
-ABSOLUTE NCR53c7xx_msg_reject = 0 ; Pointer to reject message
-ABSOLUTE NCR53c7xx_zero = 0 ; long with zero in it, use for source
-ABSOLUTE NCR53c7xx_sink = 0 ; long to dump worthless data in
-ABSOLUTE NOP_insn = 0 ; NOP instruction
-
-; Pointer to message, potentially multi-byte
-ABSOLUTE msg_buf = 0
-
-; Pointer to holding area for reselection information
-ABSOLUTE reselected_identify = 0
-ABSOLUTE reselected_tag = 0
-
-; Request sense command pointer, it's a 6 byte command, should
-; be constant for all commands since we always want 16 bytes of
-; sense and we don't need to change any fields as we did under
-; SCSI-I when we actually cared about the LUN field.
-;EXTERNAL NCR53c7xx_sense ; Request sense command
-
-#if (CHIP != 700) && (CHIP != 70066)
-; dsa_schedule
-; PURPOSE : after a DISCONNECT message has been received, and pointers
-; saved, insert the current DSA structure at the head of the
-; disconnected queue and fall through to the scheduler.
-;
-; CALLS : OK
-;
-; INPUTS : dsa - current DSA structure, reconnect_dsa_head - list
-; of disconnected commands
-;
-; MODIFIES : SCRATCH, reconnect_dsa_head
-;
-; EXITS : always passes control to schedule
-
-ENTRY dsa_schedule
-dsa_schedule:
-#ifdef DEBUG
- INT int_debug_dsa_schedule
-#endif
-
-;
-; Calculate the address of the next pointer within the DSA
-; structure of the command that is currently disconnecting
-;
-#if (CHIP == 710)
- ; Read what should be the current DSA from memory - actual DSA
- ; register is probably corrupt
- MOVE MEMORY 4, saved_dsa, addr_scratch
-#else
- CALL dsa_to_scratch
-#endif
- MOVE SCRATCH0 + dsa_next TO SCRATCH0
- MOVE SCRATCH1 + 0 TO SCRATCH1 WITH CARRY
- MOVE SCRATCH2 + 0 TO SCRATCH2 WITH CARRY
- MOVE SCRATCH3 + 0 TO SCRATCH3 WITH CARRY
-
-; Point the next field of this DSA structure at the current disconnected
-; list
- DMODE_NCR_TO_MEMORY
- MOVE MEMORY 4, addr_scratch, dsa_schedule_insert + 8
- DMODE_MEMORY_TO_MEMORY
-dsa_schedule_insert:
- MOVE MEMORY 4, reconnect_dsa_head, 0
-
-; And update the head pointer.
-#if (CHIP == 710)
- ; Read what should be the current DSA from memory - actual DSA
- ; register is probably corrupt
- MOVE MEMORY 4, saved_dsa, addr_scratch
-#else
- CALL dsa_to_scratch
-#endif
- DMODE_NCR_TO_MEMORY
- MOVE MEMORY 4, addr_scratch, reconnect_dsa_head
- DMODE_MEMORY_TO_MEMORY
-/* Temporarily, see what happens. */
-#ifndef ORIGINAL
-#if (CHIP != 710)
- MOVE SCNTL2 & 0x7f TO SCNTL2
-#endif
- CLEAR ACK
-#endif
-#if (CHIP == 710)
- ; Time to correct DSA following memory move
- MOVE MEMORY 4, saved_dsa, addr_dsa
-#endif
- WAIT DISCONNECT
-#ifdef EVENTS
- INT int_EVENT_DISCONNECT;
-#endif
-#ifdef DEBUG
- INT int_debug_disconnected
-#endif
- JUMP schedule
-#endif
-
-;
-; select
-;
-; PURPOSE : establish a nexus for the SCSI command referenced by DSA.
-; On success, the current DSA structure is removed from the issue
-; queue. Usually, this is entered as a fall-through from schedule,
-; although the contingent allegiance handling code will write
-; the select entry address to the DSP to restart a command as a
-; REQUEST SENSE. A message is sent (usually IDENTIFY, although
-; additional SDTR or WDTR messages may be sent). COMMAND OUT
-; is handled.
-;
-; INPUTS : DSA - SCSI command, issue_dsa_head
-;
-; CALLS : NOT OK
-;
-; MODIFIES : SCRATCH, issue_dsa_head
-;
-; EXITS : on reselection or selection, go to select_failed
-; otherwise, RETURN so control is passed back to
-; dsa_begin.
-;
-
-ENTRY select
-select:
-
-#ifdef EVENTS
- INT int_EVENT_BEFORE_SELECT
-#endif
-
-#ifdef DEBUG
- INT int_debug_scheduled
-#endif
- CLEAR TARGET
-
-; XXX
-;
-; In effect, SELECTION operations are backgrounded, with execution
-; continuing until code which waits for REQ or a fatal interrupt is
-; encountered.
-;
-; So, for more performance, we could overlap the code which removes
-; the command from the NCRs issue queue with the selection, but
-; at this point I don't want to deal with the error recovery.
-;
-
-#if (CHIP != 700) && (CHIP != 70066)
-#if (CHIP == 710)
- ; Enable selection timer
-#ifdef NO_SELECTION_TIMEOUT
- MOVE CTEST7 & 0xff TO CTEST7
-#else
- MOVE CTEST7 & 0xef TO CTEST7
-#endif
-#endif
- SELECT ATN FROM dsa_select, select_failed
- JUMP select_msgout, WHEN MSG_OUT
-ENTRY select_msgout
-select_msgout:
-#if (CHIP == 710)
- ; Disable selection timer
- MOVE CTEST7 | 0x10 TO CTEST7
-#endif
- MOVE FROM dsa_msgout, WHEN MSG_OUT
-#else
-ENTRY select_msgout
- SELECT ATN 0, select_failed
-select_msgout:
- MOVE 0, 0, WHEN MSGOUT
-#endif
-
-#ifdef EVENTS
- INT int_EVENT_SELECT
-#endif
- RETURN
-
-;
-; select_done
-;
-; PURPOSE: continue on to normal data transfer; called as the exit
-; point from dsa_begin.
-;
-; INPUTS: dsa
-;
-; CALLS: OK
-;
-;
-
-select_done:
-#if (CHIP == 710)
-; NOTE DSA is corrupt when we arrive here!
- MOVE MEMORY 4, saved_dsa, addr_dsa
-#endif
-
-#ifdef DEBUG
-ENTRY select_check_dsa
-select_check_dsa:
- INT int_debug_check_dsa
-#endif
-
-; After a successful selection, we should get either a CMD phase or
-; some transfer request negotiation message.
-
- JUMP cmdout, WHEN CMD
- INT int_err_unexpected_phase, WHEN NOT MSG_IN
-
-select_msg_in:
- CALL msg_in, WHEN MSG_IN
- JUMP select_msg_in, WHEN MSG_IN
-
-cmdout:
- INT int_err_unexpected_phase, WHEN NOT CMD
-#if (CHIP == 700)
- INT int_norm_selected
-#endif
-ENTRY cmdout_cmdout
-cmdout_cmdout:
-#if (CHIP != 700) && (CHIP != 70066)
- MOVE FROM dsa_cmdout, WHEN CMD
-#else
- MOVE 0, 0, WHEN CMD
-#endif /* (CHIP != 700) && (CHIP != 70066) */
-
-;
-; data_transfer
-; other_out
-; other_in
-; other_transfer
-;
-; PURPOSE : handle the main data transfer for a SCSI command in
-; several parts. In the first part, data_transfer, DATA_IN
-; and DATA_OUT phases are allowed, with the user provided
-; code (usually dynamically generated based on the scatter/gather
-; list associated with a SCSI command) called to handle these
-; phases.
-;
-; After control has passed to one of the user provided
-; DATA_IN or DATA_OUT routines, back calls are made to
-; other_transfer_in or other_transfer_out to handle non-DATA IN
-; and DATA OUT phases respectively, with the state of the active
-; data pointer being preserved in TEMP.
-;
-; On completion, the user code passes control to other_transfer
-; which causes DATA_IN and DATA_OUT to result in unexpected_phase
-; interrupts so that data overruns may be trapped.
-;
-; INPUTS : DSA - SCSI command
-;
-; CALLS : OK in data_transfer_start, not ok in other_out and other_in, ok in
-; other_transfer
-;
-; MODIFIES : SCRATCH
-;
-; EXITS : if STATUS IN is detected, signifying command completion,
-; the NCR jumps to command_complete. If MSG IN occurs, a
-; CALL is made to msg_in. Otherwise, other_transfer runs in
-; an infinite loop.
-;
-
-ENTRY data_transfer
-data_transfer:
- JUMP cmdout_cmdout, WHEN CMD
- CALL msg_in, WHEN MSG_IN
- INT int_err_unexpected_phase, WHEN MSG_OUT
- JUMP do_dataout, WHEN DATA_OUT
- JUMP do_datain, WHEN DATA_IN
- JUMP command_complete, WHEN STATUS
- JUMP data_transfer
-ENTRY end_data_transfer
-end_data_transfer:
-
-;
-; FIXME: On NCR53c700 and NCR53c700-66 chips, do_dataout/do_datain
-; should be fixed up whenever the nexus changes so it can point to the
-; correct routine for that command.
-;
-
-#if (CHIP != 700) && (CHIP != 70066)
-; Nasty jump to dsa->dataout
-do_dataout:
-#if (CHIP == 710)
- MOVE MEMORY 4, saved_dsa, addr_scratch
-#else
- CALL dsa_to_scratch
-#endif
- MOVE SCRATCH0 + dsa_dataout TO SCRATCH0
- MOVE SCRATCH1 + 0 TO SCRATCH1 WITH CARRY
- MOVE SCRATCH2 + 0 TO SCRATCH2 WITH CARRY
- MOVE SCRATCH3 + 0 TO SCRATCH3 WITH CARRY
- DMODE_NCR_TO_MEMORY
- MOVE MEMORY 4, addr_scratch, dataout_to_jump + 4
- DMODE_MEMORY_TO_MEMORY
-dataout_to_jump:
- MOVE MEMORY 4, 0, dataout_jump + 4
-#if (CHIP == 710)
- ; Time to correct DSA following memory move
- MOVE MEMORY 4, saved_dsa, addr_dsa
-#endif
-dataout_jump:
- JUMP 0
-
-; Nasty jump to dsa->dsain
-do_datain:
-#if (CHIP == 710)
- MOVE MEMORY 4, saved_dsa, addr_scratch
-#else
- CALL dsa_to_scratch
-#endif
- MOVE SCRATCH0 + dsa_datain TO SCRATCH0
- MOVE SCRATCH1 + 0 TO SCRATCH1 WITH CARRY
- MOVE SCRATCH2 + 0 TO SCRATCH2 WITH CARRY
- MOVE SCRATCH3 + 0 TO SCRATCH3 WITH CARRY
- DMODE_NCR_TO_MEMORY
- MOVE MEMORY 4, addr_scratch, datain_to_jump + 4
- DMODE_MEMORY_TO_MEMORY
-ENTRY datain_to_jump
-datain_to_jump:
- MOVE MEMORY 4, 0, datain_jump + 4
-#if (CHIP == 710)
- ; Time to correct DSA following memory move
- MOVE MEMORY 4, saved_dsa, addr_dsa
-#endif
-#ifdef DEBUG
- INT int_debug_datain
-#endif
-datain_jump:
- JUMP 0
-#endif /* (CHIP != 700) && (CHIP != 70066) */
-
-
-; Note that other_out and other_in loop until a non-data phase
-; is discovered, so we only execute return statements when we
-; can go on to the next data phase block move statement.
-
-ENTRY other_out
-other_out:
-#if 0
- INT 0x03ffdead
-#endif
- INT int_err_unexpected_phase, WHEN CMD
- JUMP msg_in_restart, WHEN MSG_IN
- INT int_err_unexpected_phase, WHEN MSG_OUT
- INT int_err_unexpected_phase, WHEN DATA_IN
- JUMP command_complete, WHEN STATUS
- JUMP other_out, WHEN NOT DATA_OUT
-#if (CHIP == 710)
-; TEMP should be OK, as we got here from a call in the user dataout code.
-#endif
- RETURN
-
-ENTRY other_in
-other_in:
-#if 0
- INT 0x03ffdead
-#endif
- INT int_err_unexpected_phase, WHEN CMD
- JUMP msg_in_restart, WHEN MSG_IN
- INT int_err_unexpected_phase, WHEN MSG_OUT
- INT int_err_unexpected_phase, WHEN DATA_OUT
- JUMP command_complete, WHEN STATUS
- JUMP other_in, WHEN NOT DATA_IN
-#if (CHIP == 710)
-; TEMP should be OK, as we got here from a call in the user datain code.
-#endif
- RETURN
-
-
-ENTRY other_transfer
-other_transfer:
- INT int_err_unexpected_phase, WHEN CMD
- CALL msg_in, WHEN MSG_IN
- INT int_err_unexpected_phase, WHEN MSG_OUT
- INT int_err_unexpected_phase, WHEN DATA_OUT
- INT int_err_unexpected_phase, WHEN DATA_IN
- JUMP command_complete, WHEN STATUS
- JUMP other_transfer
-
-;
-; msg_in_restart
-; msg_in
-; munge_msg
-;
-; PURPOSE : process messages from a target. msg_in is called when the
-; caller hasn't read the first byte of the message. munge_message
-; is called when the caller has read the first byte of the message,
-; and left it in SFBR. msg_in_restart is called when the caller
-; hasn't read the first byte of the message, and wishes RETURN
-; to transfer control back to the address of the conditional
-; CALL instruction rather than to the instruction after it.
-;
-; Various int_* interrupts are generated when the host system
-; needs to intervene, as is the case with SDTR, WDTR, and
-; INITIATE RECOVERY messages.
-;
-; When the host system handles one of these interrupts,
-; it can respond by reentering at reject_message,
-; which rejects the message and returns control to
-; the caller of msg_in or munge_msg, accept_message
-; which clears ACK and returns control, or reply_message
-; which sends the message pointed to by the DSA
-; msgout_other table indirect field.
-;
-; DISCONNECT messages are handled by moving the command
-; to the reconnect_dsa_queue.
-#if (CHIP == 710)
-; NOTE: DSA should be valid when we get here - we cannot save both it
-; and TEMP in this routine.
-#endif
-;
-; INPUTS : DSA - SCSI COMMAND, SFBR - first byte of message (munge_msg
-; only)
-;
-; CALLS : NO. The TEMP register isn't backed up to allow nested calls.
-;
-; MODIFIES : SCRATCH, DSA on DISCONNECT
-;
-; EXITS : On receipt of SAVE DATA POINTER, RESTORE POINTERS,
-; and normal return from message handlers running under
-; Linux, control is returned to the caller. Receipt
-; of DISCONNECT messages pass control to dsa_schedule.
-;
-ENTRY msg_in_restart
-msg_in_restart:
-; XXX - hackish
-;
-; Since it's easier to debug changes to the statically
-; compiled code, rather than the dynamically generated
-; stuff, such as
-;
-; MOVE x, y, WHEN data_phase
-; CALL other_z, WHEN NOT data_phase
-; MOVE x, y, WHEN data_phase
-;
-; I'd like to have certain routines (notably the message handler)
-; restart on the conditional call rather than the next instruction.
-;
-; So, subtract 8 from the return address
-
- MOVE TEMP0 + 0xf8 TO TEMP0
- MOVE TEMP1 + 0xff TO TEMP1 WITH CARRY
- MOVE TEMP2 + 0xff TO TEMP2 WITH CARRY
- MOVE TEMP3 + 0xff TO TEMP3 WITH CARRY
-
-ENTRY msg_in
-msg_in:
- MOVE 1, msg_buf, WHEN MSG_IN
-
-munge_msg:
- JUMP munge_extended, IF 0x01 ; EXTENDED MESSAGE
- JUMP munge_2, IF 0x20, AND MASK 0xdf ; two byte message
-;
-; XXX - I've seen a handful of broken SCSI devices which fail to issue
-; a SAVE POINTERS message before disconnecting in the middle of
-; a transfer, assuming that the DATA POINTER will be implicitly
-; restored.
-;
-; Historically, I've often done an implicit save when the DISCONNECT
-; message is processed. We may want to consider having the option of
-; doing that here.
-;
- JUMP munge_save_data_pointer, IF 0x02 ; SAVE DATA POINTER
- JUMP munge_restore_pointers, IF 0x03 ; RESTORE POINTERS
- JUMP munge_disconnect, IF 0x04 ; DISCONNECT
- INT int_msg_1, IF 0x07 ; MESSAGE REJECT
- INT int_msg_1, IF 0x0f ; INITIATE RECOVERY
-#ifdef EVENTS
- INT int_EVENT_SELECT_FAILED
-#endif
- JUMP reject_message
-
-munge_2:
- JUMP reject_message
-;
-; The SCSI standard allows targets to recover from transient
-; error conditions by backing up the data pointer with a
-; RESTORE POINTERS message.
-;
-; So, we must save and restore the _residual_ code as well as
-; the current instruction pointer. Because of this messiness,
-; it is simpler to put dynamic code in the dsa for this and to
-; just do a simple jump down there.
-;
-
-munge_save_data_pointer:
-#if (CHIP == 710)
- ; We have something in TEMP here, so first we must save that
- MOVE TEMP0 TO SFBR
- MOVE SFBR TO SCRATCH0
- MOVE TEMP1 TO SFBR
- MOVE SFBR TO SCRATCH1
- MOVE TEMP2 TO SFBR
- MOVE SFBR TO SCRATCH2
- MOVE TEMP3 TO SFBR
- MOVE SFBR TO SCRATCH3
- MOVE MEMORY 4, addr_scratch, jump_temp + 4
- ; Now restore DSA
- MOVE MEMORY 4, saved_dsa, addr_dsa
-#endif
- MOVE DSA0 + dsa_save_data_pointer TO SFBR
- MOVE SFBR TO SCRATCH0
- MOVE DSA1 + 0xff TO SFBR WITH CARRY
- MOVE SFBR TO SCRATCH1
- MOVE DSA2 + 0xff TO SFBR WITH CARRY
- MOVE SFBR TO SCRATCH2
- MOVE DSA3 + 0xff TO SFBR WITH CARRY
- MOVE SFBR TO SCRATCH3
-
- DMODE_NCR_TO_MEMORY
- MOVE MEMORY 4, addr_scratch, jump_dsa_save + 4
- DMODE_MEMORY_TO_MEMORY
-jump_dsa_save:
- JUMP 0
-
-munge_restore_pointers:
-#if (CHIP == 710)
- ; The code at dsa_restore_pointers will RETURN, but we don't care
- ; about TEMP here, as it will overwrite it anyway.
-#endif
- MOVE DSA0 + dsa_restore_pointers TO SFBR
- MOVE SFBR TO SCRATCH0
- MOVE DSA1 + 0xff TO SFBR WITH CARRY
- MOVE SFBR TO SCRATCH1
- MOVE DSA2 + 0xff TO SFBR WITH CARRY
- MOVE SFBR TO SCRATCH2
- MOVE DSA3 + 0xff TO SFBR WITH CARRY
- MOVE SFBR TO SCRATCH3
-
- DMODE_NCR_TO_MEMORY
- MOVE MEMORY 4, addr_scratch, jump_dsa_restore + 4
- DMODE_MEMORY_TO_MEMORY
-jump_dsa_restore:
- JUMP 0
-
-
-munge_disconnect:
-#ifdef DEBUG
- INT int_debug_disconnect_msg
-#endif
-
-/*
- * Before, we overlapped processing with waiting for disconnect, but
- * debugging was beginning to appear messy. Temporarily move things
- * to just before the WAIT DISCONNECT.
- */
-
-#ifdef ORIGINAL
-#if (CHIP == 710)
-; Following clears Unexpected Disconnect bit. What do we do?
-#else
- MOVE SCNTL2 & 0x7f TO SCNTL2
-#endif
- CLEAR ACK
-#endif
-
-#if (CHIP != 700) && (CHIP != 70066)
- JUMP dsa_schedule
-#else
- WAIT DISCONNECT
- INT int_norm_disconnected
-#endif
-
-munge_extended:
- CLEAR ACK
- INT int_err_unexpected_phase, WHEN NOT MSG_IN
- MOVE 1, msg_buf + 1, WHEN MSG_IN
- JUMP munge_extended_2, IF 0x02
- JUMP munge_extended_3, IF 0x03
- JUMP reject_message
-
-munge_extended_2:
- CLEAR ACK
- MOVE 1, msg_buf + 2, WHEN MSG_IN
- JUMP reject_message, IF NOT 0x02 ; Must be WDTR
- CLEAR ACK
- MOVE 1, msg_buf + 3, WHEN MSG_IN
- INT int_msg_wdtr
-
-munge_extended_3:
- CLEAR ACK
- MOVE 1, msg_buf + 2, WHEN MSG_IN
- JUMP reject_message, IF NOT 0x01 ; Must be SDTR
- CLEAR ACK
- MOVE 2, msg_buf + 3, WHEN MSG_IN
- INT int_msg_sdtr
-
-ENTRY reject_message
-reject_message:
- SET ATN
- CLEAR ACK
- MOVE 1, NCR53c7xx_msg_reject, WHEN MSG_OUT
- RETURN
-
-ENTRY accept_message
-accept_message:
- CLEAR ATN
- CLEAR ACK
- RETURN
-
-ENTRY respond_message
-respond_message:
- SET ATN
- CLEAR ACK
- MOVE FROM dsa_msgout_other, WHEN MSG_OUT
- RETURN
-
-;
-; command_complete
-;
-; PURPOSE : handle command termination when STATUS IN is detected by reading
-; a status byte followed by a command termination message.
-;
-; Normal termination results in an INTFLY instruction, and
-; the host system can pick out which command terminated by
-; examining the MESSAGE and STATUS buffers of all currently
-; executing commands;
-;
-; Abnormal (CHECK_CONDITION) termination results in an
-; int_err_check_condition interrupt so that a REQUEST SENSE
-; command can be issued out-of-order so that no other command
-; clears the contingent allegiance condition.
-;
-;
-; INPUTS : DSA - command
-;
-; CALLS : OK
-;
-; EXITS : On successful termination, control is passed to schedule.
-; On abnormal termination, the user will usually modify the
-; DSA fields and corresponding buffers and return control
-; to select.
-;
-
-ENTRY command_complete
-command_complete:
- MOVE FROM dsa_status, WHEN STATUS
-#if (CHIP != 700) && (CHIP != 70066)
- MOVE SFBR TO SCRATCH0 ; Save status
-#endif /* (CHIP != 700) && (CHIP != 70066) */
-ENTRY command_complete_msgin
-command_complete_msgin:
- MOVE FROM dsa_msgin, WHEN MSG_IN
-; Indicate that we should be expecting a disconnect
-#if (CHIP != 710)
- MOVE SCNTL2 & 0x7f TO SCNTL2
-#else
- ; Above code cleared the Unexpected Disconnect bit, what do we do?
-#endif
- CLEAR ACK
-#if (CHIP != 700) && (CHIP != 70066)
- WAIT DISCONNECT
-
-;
-; The SCSI specification states that when a UNIT ATTENTION condition
-; is pending, as indicated by a CHECK CONDITION status message,
-; the target shall revert to asynchronous transfers. Since
-; synchronous transfers parameters are maintained on a per INITIATOR/TARGET
-; basis, and returning control to our scheduler could work on a command
-; running on another lun on that target using the old parameters, we must
-; interrupt the host processor to get them changed, or change them ourselves.
-;
-; Once SCSI-II tagged queueing is implemented, things will be even more
-; hairy, since contingent allegiance conditions exist on a per-target/lun
-; basis, and issuing a new command with a different tag would clear it.
-; In these cases, we must interrupt the host processor to get a request
-; added to the HEAD of the queue with the request sense command, or we
-; must automatically issue the request sense command.
-
-#if 0
- MOVE SCRATCH0 TO SFBR
- JUMP command_failed, IF 0x02
-#endif
-#if (CHIP == 710)
-#if defined(MVME16x_INTFLY)
-; For MVME16x (ie CHIP=710) we will force an INTFLY by triggering a software
-; interrupt (SW7). We can use SCRATCH, as we are about to jump to
-; schedule, which corrupts it anyway. Will probably remove this later,
-; but want to check performance effects first.
-
-#define INTFLY_ADDR 0xfff40070
-
- MOVE 0 TO SCRATCH0
- MOVE 0x80 TO SCRATCH1
- MOVE 0 TO SCRATCH2
- MOVE 0 TO SCRATCH3
- MOVE MEMORY 4, addr_scratch, INTFLY_ADDR
-#else
- INT int_norm_emulateintfly
-#endif
-#else
- INTFLY
-#endif
-#endif /* (CHIP != 700) && (CHIP != 70066) */
-#if (CHIP == 710)
- ; Time to correct DSA following memory move
- MOVE MEMORY 4, saved_dsa, addr_dsa
-#endif
-#ifdef EVENTS
- INT int_EVENT_COMPLETE
-#endif
-#if (CHIP != 700) && (CHIP != 70066)
- JUMP schedule
-command_failed:
- INT int_err_check_condition
-#else
- INT int_norm_command_complete
-#endif
-
-;
-; wait_reselect
-;
-; PURPOSE : This is essentially the idle routine, where control lands
-; when there are no new processes to schedule. wait_reselect
-; waits for reselection, selection, and new commands.
-;
-; When a successful reselection occurs, with the aid
-; of fixed up code in each DSA, wait_reselect walks the
-; reconnect_dsa_queue, asking each dsa if the target ID
-; and LUN match its.
-;
-; If a match is found, a call is made back to reselected_ok,
-; which through the miracles of self modifying code, extracts
-; the found DSA from the reconnect_dsa_queue and then
-; returns control to the DSAs thread of execution.
-;
-; INPUTS : NONE
-;
-; CALLS : OK
-;
-; MODIFIES : DSA,
-;
-; EXITS : On successful reselection, control is returned to the
-; DSA which called reselected_ok. If the WAIT RESELECT
-; was interrupted by a new commands arrival signaled by
-; SIG_P, control is passed to schedule. If the NCR is
-; selected, the host system is interrupted with an
-; int_err_selected which is usually responded to by
-; setting DSP to the target_abort address.
-
-ENTRY wait_reselect
-wait_reselect:
-#ifdef EVENTS
- int int_EVENT_IDLE
-#endif
-#ifdef DEBUG
- int int_debug_idle
-#endif
- WAIT RESELECT wait_reselect_failed
-
-reselected:
-#ifdef EVENTS
- int int_EVENT_RESELECT
-#endif
- CLEAR TARGET
- DMODE_MEMORY_TO_MEMORY
- ; Read all data needed to reestablish the nexus -
- MOVE 1, reselected_identify, WHEN MSG_IN
- ; We used to CLEAR ACK here.
-#if (CHIP != 700) && (CHIP != 70066)
-#ifdef DEBUG
- int int_debug_reselected
-#endif
-
- ; Point DSA at the current head of the disconnected queue.
- DMODE_MEMORY_TO_NCR
- MOVE MEMORY 4, reconnect_dsa_head, addr_scratch
- DMODE_MEMORY_TO_MEMORY
-#if (CHIP == 710)
- MOVE MEMORY 4, addr_scratch, saved_dsa
-#else
- CALL scratch_to_dsa
-#endif
-
- ; Fix the update-next pointer so that the reconnect_dsa_head
- ; pointer is the one that will be updated if this DSA is a hit
- ; and we remove it from the queue.
-
- MOVE MEMORY 4, addr_reconnect_dsa_head, reselected_ok_patch + 8
-#if (CHIP == 710)
- ; Time to correct DSA following memory move
- MOVE MEMORY 4, saved_dsa, addr_dsa
-#endif
-
-ENTRY reselected_check_next
-reselected_check_next:
-#ifdef DEBUG
- INT int_debug_reselect_check
-#endif
- ; Check for a NULL pointer.
- MOVE DSA0 TO SFBR
- JUMP reselected_not_end, IF NOT 0
- MOVE DSA1 TO SFBR
- JUMP reselected_not_end, IF NOT 0
- MOVE DSA2 TO SFBR
- JUMP reselected_not_end, IF NOT 0
- MOVE DSA3 TO SFBR
- JUMP reselected_not_end, IF NOT 0
- INT int_err_unexpected_reselect
-
-reselected_not_end:
- ;
- ; XXX the ALU is only eight bits wide, and the assembler
- ; wont do the dirt work for us. As long as dsa_check_reselect
- ; is negative, we need to sign extend with 1 bits to the full
- ; 32 bit width of the address.
- ;
- ; A potential work around would be to have a known alignment
- ; of the DSA structure such that the base address plus
- ; dsa_check_reselect doesn't require carrying from bytes
- ; higher than the LSB.
- ;
-
- MOVE DSA0 TO SFBR
- MOVE SFBR + dsa_check_reselect TO SCRATCH0
- MOVE DSA1 TO SFBR
- MOVE SFBR + 0xff TO SCRATCH1 WITH CARRY
- MOVE DSA2 TO SFBR
- MOVE SFBR + 0xff TO SCRATCH2 WITH CARRY
- MOVE DSA3 TO SFBR
- MOVE SFBR + 0xff TO SCRATCH3 WITH CARRY
-
- DMODE_NCR_TO_MEMORY
- MOVE MEMORY 4, addr_scratch, reselected_check + 4
- DMODE_MEMORY_TO_MEMORY
-#if (CHIP == 710)
- ; Time to correct DSA following memory move
- MOVE MEMORY 4, saved_dsa, addr_dsa
-#endif
-reselected_check:
- JUMP 0
-
-
-;
-;
-#if (CHIP == 710)
-; We have problems here - the memory move corrupts TEMP and DSA. This
-; routine is called from DSA code, and patched from many places. Scratch
-; is probably free when it is called.
-; We have to:
-; copy temp to scratch, one byte at a time
-; write scratch to patch a jump in place of the return
-; do the move memory
-; jump to the patched in return address
-; DSA is corrupt when we get here, and can be left corrupt
-
-ENTRY reselected_ok
-reselected_ok:
- MOVE TEMP0 TO SFBR
- MOVE SFBR TO SCRATCH0
- MOVE TEMP1 TO SFBR
- MOVE SFBR TO SCRATCH1
- MOVE TEMP2 TO SFBR
- MOVE SFBR TO SCRATCH2
- MOVE TEMP3 TO SFBR
- MOVE SFBR TO SCRATCH3
- MOVE MEMORY 4, addr_scratch, reselected_ok_jump + 4
-reselected_ok_patch:
- MOVE MEMORY 4, 0, 0
-reselected_ok_jump:
- JUMP 0
-#else
-ENTRY reselected_ok
-reselected_ok:
-reselected_ok_patch:
- MOVE MEMORY 4, 0, 0 ; Patched : first word
- ; is address of
- ; successful dsa_next
- ; Second word is last
- ; unsuccessful dsa_next,
- ; starting with
- ; dsa_reconnect_head
- ; We used to CLEAR ACK here.
-#ifdef DEBUG
- INT int_debug_reselected_ok
-#endif
-#ifdef DEBUG
- INT int_debug_check_dsa
-#endif
- RETURN ; Return control to where
-#endif
-#else
- INT int_norm_reselected
-#endif /* (CHIP != 700) && (CHIP != 70066) */
-
-selected:
- INT int_err_selected;
-
-;
-; A select or reselect failure can be caused by one of two conditions :
-; 1. SIG_P was set. This will be the case if the user has written
-; a new value to a previously NULL head of the issue queue.
-;
-; 2. The NCR53c810 was selected or reselected by another device.
-;
-; 3. The bus was already busy since we were selected or reselected
-; before starting the command.
-
-wait_reselect_failed:
-#ifdef EVENTS
- INT int_EVENT_RESELECT_FAILED
-#endif
-; Check selected bit.
-#if (CHIP == 710)
- ; Must work out how to tell if we are selected....
-#else
- MOVE SIST0 & 0x20 TO SFBR
- JUMP selected, IF 0x20
-#endif
-; Reading CTEST2 clears the SIG_P bit in the ISTAT register.
- MOVE CTEST2 & 0x40 TO SFBR
- JUMP schedule, IF 0x40
-; Check connected bit.
-; FIXME: this needs to change if we support target mode
- MOVE ISTAT & 0x08 TO SFBR
- JUMP reselected, IF 0x08
-; FIXME : Something bogus happened, and we shouldn't fail silently.
-#if 0
- JUMP schedule
-#else
- INT int_debug_panic
-#endif
-
-
-select_failed:
-#if (CHIP == 710)
- ; Disable selection timer
- MOVE CTEST7 | 0x10 TO CTEST7
-#endif
-#ifdef EVENTS
- int int_EVENT_SELECT_FAILED
-#endif
-; Otherwise, mask the selected and reselected bits off SIST0
-#if (CHIP ==710)
- ; Let's assume we don't get selected for now
- MOVE SSTAT0 & 0x10 TO SFBR
-#else
- MOVE SIST0 & 0x30 TO SFBR
- JUMP selected, IF 0x20
-#endif
- JUMP reselected, IF 0x10
-; If SIGP is set, the user just gave us another command, and
-; we should restart or return to the scheduler.
-; Reading CTEST2 clears the SIG_P bit in the ISTAT register.
- MOVE CTEST2 & 0x40 TO SFBR
- JUMP select, IF 0x40
-; Check connected bit.
-; FIXME: this needs to change if we support target mode
-; FIXME: is this really necessary?
- MOVE ISTAT & 0x08 TO SFBR
- JUMP reselected, IF 0x08
-; FIXME : Something bogus happened, and we shouldn't fail silently.
-#if 0
- JUMP schedule
-#else
- INT int_debug_panic
-#endif
-
-;
-; test_1
-; test_2
-;
-; PURPOSE : run some verification tests on the NCR. test_1
-; copies test_src to test_dest and interrupts the host
-; processor, testing for cache coherency and interrupt
-; problems in the processes.
-;
-; test_2 runs a command with offsets relative to the
-; DSA on entry, and is useful for miscellaneous experimentation.
-;
-
-; Verify that interrupts are working correctly and that we don't
-; have a cache invalidation problem.
-
-ABSOLUTE test_src = 0, test_dest = 0
-ENTRY test_1
-test_1:
- MOVE MEMORY 4, test_src, test_dest
- INT int_test_1
-
-;
-; Run arbitrary commands, with test code establishing a DSA
-;
-
-ENTRY test_2
-test_2:
- CLEAR TARGET
-#if (CHIP == 710)
- ; Enable selection timer
-#ifdef NO_SELECTION_TIMEOUT
- MOVE CTEST7 & 0xff TO CTEST7
-#else
- MOVE CTEST7 & 0xef TO CTEST7
-#endif
-#endif
- SELECT ATN FROM 0, test_2_fail
- JUMP test_2_msgout, WHEN MSG_OUT
-ENTRY test_2_msgout
-test_2_msgout:
-#if (CHIP == 710)
- ; Disable selection timer
- MOVE CTEST7 | 0x10 TO CTEST7
-#endif
- MOVE FROM 8, WHEN MSG_OUT
- MOVE FROM 16, WHEN CMD
- MOVE FROM 24, WHEN DATA_IN
- MOVE FROM 32, WHEN STATUS
- MOVE FROM 40, WHEN MSG_IN
-#if (CHIP != 710)
- MOVE SCNTL2 & 0x7f TO SCNTL2
-#endif
- CLEAR ACK
- WAIT DISCONNECT
-test_2_fail:
-#if (CHIP == 710)
- ; Disable selection timer
- MOVE CTEST7 | 0x10 TO CTEST7
-#endif
- INT int_test_2
-
-ENTRY debug_break
-debug_break:
- INT int_debug_break
-
-;
-; initiator_abort
-; target_abort
-;
-; PURPOSE : Abort the currently established nexus from with initiator
-; or target mode.
-;
-;
-
-ENTRY target_abort
-target_abort:
- SET TARGET
- DISCONNECT
- CLEAR TARGET
- JUMP schedule
-
-ENTRY initiator_abort
-initiator_abort:
- SET ATN
-;
-; The SCSI-I specification says that targets may go into MSG out at
-; their leisure upon receipt of the ATN single. On all versions of the
-; specification, we can't change phases until REQ transitions true->false,
-; so we need to sink/source one byte of data to allow the transition.
-;
-; For the sake of safety, we'll only source one byte of data in all
-; cases, but to accommodate the SCSI-I dain bramage, we'll sink an
-; arbitrary number of bytes.
- JUMP spew_cmd, WHEN CMD
- JUMP eat_msgin, WHEN MSG_IN
- JUMP eat_datain, WHEN DATA_IN
- JUMP eat_status, WHEN STATUS
- JUMP spew_dataout, WHEN DATA_OUT
- JUMP sated
-spew_cmd:
- MOVE 1, NCR53c7xx_zero, WHEN CMD
- JUMP sated
-eat_msgin:
- MOVE 1, NCR53c7xx_sink, WHEN MSG_IN
- JUMP eat_msgin, WHEN MSG_IN
- JUMP sated
-eat_status:
- MOVE 1, NCR53c7xx_sink, WHEN STATUS
- JUMP eat_status, WHEN STATUS
- JUMP sated
-eat_datain:
- MOVE 1, NCR53c7xx_sink, WHEN DATA_IN
- JUMP eat_datain, WHEN DATA_IN
- JUMP sated
-spew_dataout:
- MOVE 1, NCR53c7xx_zero, WHEN DATA_OUT
-sated:
-#if (CHIP != 710)
- MOVE SCNTL2 & 0x7f TO SCNTL2
-#endif
- MOVE 1, NCR53c7xx_msg_abort, WHEN MSG_OUT
- WAIT DISCONNECT
- INT int_norm_aborted
-
-#if (CHIP != 710)
-;
-; dsa_to_scratch
-; scratch_to_dsa
-;
-; PURPOSE :
-; The NCR chips cannot do a move memory instruction with the DSA register
-; as the source or destination. So, we provide a couple of subroutines
-; that let us switch between the DSA register and scratch register.
-;
-; Memory moves to/from the DSPS register also don't work, but we
-; don't use them.
-;
-;
-
-
-dsa_to_scratch:
- MOVE DSA0 TO SFBR
- MOVE SFBR TO SCRATCH0
- MOVE DSA1 TO SFBR
- MOVE SFBR TO SCRATCH1
- MOVE DSA2 TO SFBR
- MOVE SFBR TO SCRATCH2
- MOVE DSA3 TO SFBR
- MOVE SFBR TO SCRATCH3
- RETURN
-
-scratch_to_dsa:
- MOVE SCRATCH0 TO SFBR
- MOVE SFBR TO DSA0
- MOVE SCRATCH1 TO SFBR
- MOVE SFBR TO DSA1
- MOVE SCRATCH2 TO SFBR
- MOVE SFBR TO DSA2
- MOVE SCRATCH3 TO SFBR
- MOVE SFBR TO DSA3
- RETURN
-#endif
-
-#if (CHIP == 710)
-; Little patched jump, used to overcome problems with TEMP getting
-; corrupted on memory moves.
-
-jump_temp:
- JUMP 0
-#endif
diff --git a/drivers/scsi/53c7xx_d.h_shipped b/drivers/scsi/53c7xx_d.h_shipped
deleted file mode 100644
index 21d31b08ec3..00000000000
--- a/drivers/scsi/53c7xx_d.h_shipped
+++ /dev/null
@@ -1,2874 +0,0 @@
-/* DO NOT EDIT - Generated automatically by script_asm.pl */
-static u32 SCRIPT[] = {
-/*
-
-
-
-
-
-; 53c710 driver. Modified from Drew Eckhardts driver
-; for 53c810 by Richard Hirst [richard@sleepie.demon.co.uk]
-;
-; I have left the script for the 53c8xx family in here, as it is likely
-; to be useful to see what I changed when bug hunting.
-
-; NCR 53c810 driver, main script
-; Sponsored by
-; iX Multiuser Multitasking Magazine
-; hm@ix.de
-;
-; Copyright 1993, 1994, 1995 Drew Eckhardt
-; Visionary Computing
-; (Unix and Linux consulting and custom programming)
-; drew@PoohSticks.ORG
-; +1 (303) 786-7975
-;
-; TolerANT and SCSI SCRIPTS are registered trademarks of NCR Corporation.
-;
-; PRE-ALPHA
-;
-; For more information, please consult
-;
-; NCR 53C810
-; PCI-SCSI I/O Processor
-; Data Manual
-;
-; NCR 53C710
-; SCSI I/O Processor
-; Programmers Guide
-;
-; NCR Microelectronics
-; 1635 Aeroplaza Drive
-; Colorado Springs, CO 80916
-; 1+ (719) 578-3400
-;
-; Toll free literature number
-; +1 (800) 334-5454
-;
-; IMPORTANT : This code is self modifying due to the limitations of
-; the NCR53c7,8xx series chips. Persons debugging this code with
-; the remote debugger should take this into account, and NOT set
-; breakpoints in modified instructions.
-;
-; Design:
-; The NCR53c7,8xx family of SCSI chips are busmasters with an onboard
-; microcontroller using a simple instruction set.
-;
-; So, to minimize the effects of interrupt latency, and to maximize
-; throughput, this driver offloads the practical maximum amount
-; of processing to the SCSI chip while still maintaining a common
-; structure.
-;
-; Where tradeoffs were needed between efficiency on the older
-; chips and the newer NCR53c800 series, the NCR53c800 series
-; was chosen.
-;
-; While the NCR53c700 and NCR53c700-66 lacked the facilities to fully
-; automate SCSI transfers without host processor intervention, this
-; isn't the case with the NCR53c710 and newer chips which allow
-;
-; - reads and writes to the internal registers from within the SCSI
-; scripts, allowing the SCSI SCRIPTS(tm) code to save processor
-; state so that multiple threads of execution are possible, and also
-; provide an ALU for loop control, etc.
-;
-; - table indirect addressing for some instructions. This allows
-; pointers to be located relative to the DSA ((Data Structure
-; Address) register.
-;
-; These features make it possible to implement a mailbox style interface,
-; where the same piece of code is run to handle I/O for multiple threads
-; at once minimizing our need to relocate code. Since the NCR53c700/
-; NCR53c800 series have a unique combination of features, making a
-; a standard ingoing/outgoing mailbox system, costly, I've modified it.
-;
-; - Mailboxes are a mixture of code and data. This lets us greatly
-; simplify the NCR53c810 code and do things that would otherwise
-; not be possible.
-;
-; The saved data pointer is now implemented as follows :
-;
-; Control flow has been architected such that if control reaches
-; munge_save_data_pointer, on a restore pointers message or
-; reconnection, a jump to the address formerly in the TEMP register
-; will allow the SCSI command to resume execution.
-;
-
-;
-; Note : the DSA structures must be aligned on 32 bit boundaries,
-; since the source and destination of MOVE MEMORY instructions
-; must share the same alignment and this is the alignment of the
-; NCR registers.
-;
-
-; For some systems (MVME166, for example) dmode is always the same, so don't
-; waste time writing it
-
-
-
-
-
-
-
-
-
-
-
-ABSOLUTE dsa_temp_lun = 0 ; Patch to lun for current dsa
-ABSOLUTE dsa_temp_next = 0 ; Patch to dsa next for current dsa
-ABSOLUTE dsa_temp_addr_next = 0 ; Patch to address of dsa next address
- ; for current dsa
-ABSOLUTE dsa_temp_sync = 0 ; Patch to address of per-target
- ; sync routine
-ABSOLUTE dsa_sscf_710 = 0 ; Patch to address of per-target
- ; sscf value (53c710)
-ABSOLUTE dsa_temp_target = 0 ; Patch to id for current dsa
-ABSOLUTE dsa_temp_addr_saved_pointer = 0; Patch to address of per-command
- ; saved data pointer
-ABSOLUTE dsa_temp_addr_residual = 0 ; Patch to address of per-command
- ; current residual code
-ABSOLUTE dsa_temp_addr_saved_residual = 0; Patch to address of per-command
- ; saved residual code
-ABSOLUTE dsa_temp_addr_new_value = 0 ; Address of value for JUMP operand
-ABSOLUTE dsa_temp_addr_array_value = 0 ; Address to copy to
-ABSOLUTE dsa_temp_addr_dsa_value = 0 ; Address of this DSA value
-
-;
-; Once a device has initiated reselection, we need to compare it
-; against the singly linked list of commands which have disconnected
-; and are pending reselection. These commands are maintained in
-; an unordered singly linked list of DSA structures, through the
-; DSA pointers at their 'centers' headed by the reconnect_dsa_head
-; pointer.
-;
-; To avoid complications in removing commands from the list,
-; I minimize the amount of expensive (at eight operations per
-; addition @ 500-600ns each) pointer operations which must
-; be done in the NCR driver by precomputing them on the
-; host processor during dsa structure generation.
-;
-; The fixed-up per DSA code knows how to recognize the nexus
-; associated with the corresponding SCSI command, and modifies
-; the source and destination pointers for the MOVE MEMORY
-; instruction which is executed when reselected_ok is called
-; to remove the command from the list. Similarly, DSA is
-; loaded with the address of the next DSA structure and
-; reselected_check_next is called if a failure occurs.
-;
-; Perhaps more concisely, the net effect of the mess is
-;
-; for (dsa = reconnect_dsa_head, dest = &reconnect_dsa_head,
-; src = NULL; dsa; dest = &dsa->next, dsa = dsa->next) {
-; src = &dsa->next;
-; if (target_id == dsa->id && target_lun == dsa->lun) {
-; *dest = *src;
-; break;
-; }
-; }
-;
-; if (!dsa)
-; error (int_err_unexpected_reselect);
-; else
-; longjmp (dsa->jump_resume, 0);
-;
-;
-
-
-; Define DSA structure used for mailboxes
-ENTRY dsa_code_template
-dsa_code_template:
-ENTRY dsa_code_begin
-dsa_code_begin:
-; RGH: Don't care about TEMP and DSA here
-
- MOVE MEMORY 4, dsa_temp_addr_dsa_value, addr_scratch
-
-at 0x00000000 : */ 0xc0000004,0x00000000,0x00000000,
-/*
-
-
- MOVE MEMORY 4, addr_scratch, saved_dsa
-
-at 0x00000003 : */ 0xc0000004,0x00000000,0x00000000,
-/*
- ; We are about to go and select the device, so must set SSCF bits
- MOVE MEMORY 4, dsa_sscf_710, addr_scratch
-
-at 0x00000006 : */ 0xc0000004,0x00000000,0x00000000,
-/*
-
- MOVE SCRATCH3 TO SFBR
-
-at 0x00000009 : */ 0x72370000,0x00000000,
-/*
-
-
-
- MOVE SFBR TO SBCL
-
-at 0x0000000b : */ 0x6a0b0000,0x00000000,
-/*
- MOVE MEMORY 4, saved_dsa, addr_dsa
-
-at 0x0000000d : */ 0xc0000004,0x00000000,0x00000000,
-/*
-
-
-
- CALL select
-
-at 0x00000010 : */ 0x88080000,0x000001f8,
-/*
-; Handle the phase mismatch which may have resulted from the
-; MOVE FROM dsa_msgout if we returned here. The CLEAR ATN
-; may or may not be necessary, and we should update script_asm.pl
-; to handle multiple pieces.
- CLEAR ATN
-
-at 0x00000012 : */ 0x60000008,0x00000000,
-/*
- CLEAR ACK
-
-at 0x00000014 : */ 0x60000040,0x00000000,
-/*
-
-; Replace second operand with address of JUMP instruction dest operand
-; in schedule table for this DSA. Becomes dsa_jump_dest in 53c7,8xx.c.
-ENTRY dsa_code_fix_jump
-dsa_code_fix_jump:
- MOVE MEMORY 4, NOP_insn, 0
-
-at 0x00000016 : */ 0xc0000004,0x00000000,0x00000000,
-/*
- JUMP select_done
-
-at 0x00000019 : */ 0x80080000,0x00000230,
-/*
-
-; wrong_dsa loads the DSA register with the value of the dsa_next
-; field.
-;
-wrong_dsa:
-
-; NOTE DSA is corrupt when we arrive here!
-
-; Patch the MOVE MEMORY INSTRUCTION such that
-; the destination address is the address of the OLD
-; next pointer.
-;
- MOVE MEMORY 4, dsa_temp_addr_next, reselected_ok_patch + 8
-
-at 0x0000001b : */ 0xc0000004,0x00000000,0x000007ec,
-/*
-
-;
-; Move the _contents_ of the next pointer into the DSA register as
-; the next I_T_L or I_T_L_Q tupple to check against the established
-; nexus.
-;
- MOVE MEMORY 4, dsa_temp_next, addr_scratch
-
-at 0x0000001e : */ 0xc0000004,0x00000000,0x00000000,
-/*
-
-
- MOVE MEMORY 4, addr_scratch, saved_dsa
-
-at 0x00000021 : */ 0xc0000004,0x00000000,0x00000000,
-/*
- MOVE MEMORY 4, saved_dsa, addr_dsa
-
-at 0x00000024 : */ 0xc0000004,0x00000000,0x00000000,
-/*
-
-
-
- JUMP reselected_check_next
-
-at 0x00000027 : */ 0x80080000,0x000006f0,
-/*
-
-ABSOLUTE dsa_save_data_pointer = 0
-ENTRY dsa_code_save_data_pointer
-dsa_code_save_data_pointer:
-
- ; When we get here, TEMP has been saved in jump_temp+4, DSA is corrupt
- ; We MUST return with DSA correct
- MOVE MEMORY 4, jump_temp+4, dsa_temp_addr_saved_pointer
-
-at 0x00000029 : */ 0xc0000004,0x000009c8,0x00000000,
-/*
-; HARD CODED : 24 bytes needs to agree with 53c7,8xx.h
- MOVE MEMORY 24, dsa_temp_addr_residual, dsa_temp_addr_saved_residual
-
-at 0x0000002c : */ 0xc0000018,0x00000000,0x00000000,
-/*
- CLEAR ACK
-
-at 0x0000002f : */ 0x60000040,0x00000000,
-/*
-
-
-
- MOVE MEMORY 4, saved_dsa, addr_dsa
-
-at 0x00000031 : */ 0xc0000004,0x00000000,0x00000000,
-/*
- JUMP jump_temp
-
-at 0x00000034 : */ 0x80080000,0x000009c4,
-/*
-
-ABSOLUTE dsa_restore_pointers = 0
-ENTRY dsa_code_restore_pointers
-dsa_code_restore_pointers:
-
- ; TEMP and DSA are corrupt when we get here, but who cares!
- MOVE MEMORY 4, dsa_temp_addr_saved_pointer, jump_temp + 4
-
-at 0x00000036 : */ 0xc0000004,0x00000000,0x000009c8,
-/*
-; HARD CODED : 24 bytes needs to agree with 53c7,8xx.h
- MOVE MEMORY 24, dsa_temp_addr_saved_residual, dsa_temp_addr_residual
-
-at 0x00000039 : */ 0xc0000018,0x00000000,0x00000000,
-/*
- CLEAR ACK
-
-at 0x0000003c : */ 0x60000040,0x00000000,
-/*
- ; Restore DSA, note we don't care about TEMP
- MOVE MEMORY 4, saved_dsa, addr_dsa
-
-at 0x0000003e : */ 0xc0000004,0x00000000,0x00000000,
-/*
-
-
-
- JUMP jump_temp
-
-at 0x00000041 : */ 0x80080000,0x000009c4,
-/*
-
-
-ABSOLUTE dsa_check_reselect = 0
-; dsa_check_reselect determines whether or not the current target and
-; lun match the current DSA
-ENTRY dsa_code_check_reselect
-dsa_code_check_reselect:
-
-
-
- MOVE LCRC TO SFBR ; LCRC has our ID and his ID bits set
-
-at 0x00000043 : */ 0x72230000,0x00000000,
-/*
- JUMP REL (wrong_dsa), IF NOT dsa_temp_target, AND MASK 0x80
-
-at 0x00000045 : */ 0x80848000,0x00ffff50,
-/*
-
-
-
-
-
-;
-; Hack - move to scratch first, since SFBR is not writeable
-; via the CPU and hence a MOVE MEMORY instruction.
-;
-
- MOVE MEMORY 1, reselected_identify, addr_scratch
-
-at 0x00000047 : */ 0xc0000001,0x00000000,0x00000000,
-/*
-
-
- ; BIG ENDIAN ON MVME16x
- MOVE SCRATCH3 TO SFBR
-
-at 0x0000004a : */ 0x72370000,0x00000000,
-/*
-
-
-
-; FIXME : we need to accommodate bit fielded and binary here for '7xx/'8xx chips
-; Are you sure about that? richard@sleepie.demon.co.uk
- JUMP REL (wrong_dsa), IF NOT dsa_temp_lun, AND MASK 0xf8
-
-at 0x0000004c : */ 0x8084f800,0x00ffff34,
-/*
-; Patch the MOVE MEMORY INSTRUCTION such that
-; the source address is the address of this dsa's
-; next pointer.
- MOVE MEMORY 4, dsa_temp_addr_next, reselected_ok_patch + 4
-
-at 0x0000004e : */ 0xc0000004,0x00000000,0x000007e8,
-/*
- CALL reselected_ok
-
-at 0x00000051 : */ 0x88080000,0x00000798,
-/*
-
-; Restore DSA following memory moves in reselected_ok
-; dsa_temp_sync doesn't really care about DSA, but it has an
-; optional debug INT so a valid DSA is a good idea.
- MOVE MEMORY 4, saved_dsa, addr_dsa
-
-at 0x00000053 : */ 0xc0000004,0x00000000,0x00000000,
-/*
-
- CALL dsa_temp_sync
-
-at 0x00000056 : */ 0x88080000,0x00000000,
-/*
-; Release ACK on the IDENTIFY message _after_ we've set the synchronous
-; transfer parameters!
- CLEAR ACK
-
-at 0x00000058 : */ 0x60000040,0x00000000,
-/*
-; Implicitly restore pointers on reselection, so a RETURN
-; will transfer control back to the right spot.
- CALL REL (dsa_code_restore_pointers)
-
-at 0x0000005a : */ 0x88880000,0x00ffff68,
-/*
- RETURN
-
-at 0x0000005c : */ 0x90080000,0x00000000,
-/*
-ENTRY dsa_zero
-dsa_zero:
-ENTRY dsa_code_template_end
-dsa_code_template_end:
-
-; Perform sanity check for dsa_fields_start == dsa_code_template_end -
-; dsa_zero, puke.
-
-ABSOLUTE dsa_fields_start = 0 ; Sanity marker
- ; pad 48 bytes (fix this RSN)
-ABSOLUTE dsa_next = 48 ; len 4 Next DSA
- ; del 4 Previous DSA address
-ABSOLUTE dsa_cmnd = 56 ; len 4 Scsi_Cmnd * for this thread.
-ABSOLUTE dsa_select = 60 ; len 4 Device ID, Period, Offset for
- ; table indirect select
-ABSOLUTE dsa_msgout = 64 ; len 8 table indirect move parameter for
- ; select message
-ABSOLUTE dsa_cmdout = 72 ; len 8 table indirect move parameter for
- ; command
-ABSOLUTE dsa_dataout = 80 ; len 4 code pointer for dataout
-ABSOLUTE dsa_datain = 84 ; len 4 code pointer for datain
-ABSOLUTE dsa_msgin = 88 ; len 8 table indirect move for msgin
-ABSOLUTE dsa_status = 96 ; len 8 table indirect move for status byte
-ABSOLUTE dsa_msgout_other = 104 ; len 8 table indirect for normal message out
- ; (Synchronous transfer negotiation, etc).
-ABSOLUTE dsa_end = 112
-
-ABSOLUTE schedule = 0 ; Array of JUMP dsa_begin or JUMP (next),
- ; terminated by a call to JUMP wait_reselect
-
-; Linked lists of DSA structures
-ABSOLUTE reconnect_dsa_head = 0 ; Link list of DSAs which can reconnect
-ABSOLUTE addr_reconnect_dsa_head = 0 ; Address of variable containing
- ; address of reconnect_dsa_head
-
-; These select the source and destination of a MOVE MEMORY instruction
-ABSOLUTE dmode_memory_to_memory = 0x0
-ABSOLUTE dmode_memory_to_ncr = 0x0
-ABSOLUTE dmode_ncr_to_memory = 0x0
-
-ABSOLUTE addr_scratch = 0x0
-ABSOLUTE addr_temp = 0x0
-
-ABSOLUTE saved_dsa = 0x0
-ABSOLUTE emulfly = 0x0
-ABSOLUTE addr_dsa = 0x0
-
-
-
-; Interrupts -
-; MSB indicates type
-; 0 handle error condition
-; 1 handle message
-; 2 handle normal condition
-; 3 debugging interrupt
-; 4 testing interrupt
-; Next byte indicates specific error
-
-; XXX not yet implemented, I'm not sure if I want to -
-; Next byte indicates the routine the error occurred in
-; The LSB indicates the specific place the error occurred
-
-ABSOLUTE int_err_unexpected_phase = 0x00000000 ; Unexpected phase encountered
-ABSOLUTE int_err_selected = 0x00010000 ; SELECTED (nee RESELECTED)
-ABSOLUTE int_err_unexpected_reselect = 0x00020000
-ABSOLUTE int_err_check_condition = 0x00030000
-ABSOLUTE int_err_no_phase = 0x00040000
-ABSOLUTE int_msg_wdtr = 0x01000000 ; WDTR message received
-ABSOLUTE int_msg_sdtr = 0x01010000 ; SDTR received
-ABSOLUTE int_msg_1 = 0x01020000 ; single byte special message
- ; received
-
-ABSOLUTE int_norm_select_complete = 0x02000000 ; Select complete, reprogram
- ; registers.
-ABSOLUTE int_norm_reselect_complete = 0x02010000 ; Nexus established
-ABSOLUTE int_norm_command_complete = 0x02020000 ; Command complete
-ABSOLUTE int_norm_disconnected = 0x02030000 ; Disconnected
-ABSOLUTE int_norm_aborted =0x02040000 ; Aborted *dsa
-ABSOLUTE int_norm_reset = 0x02050000 ; Generated BUS reset.
-ABSOLUTE int_norm_emulateintfly = 0x02060000 ; 53C710 Emulated intfly
-ABSOLUTE int_debug_break = 0x03000000 ; Break point
-
-ABSOLUTE int_debug_panic = 0x030b0000 ; Panic driver
-
-
-ABSOLUTE int_test_1 = 0x04000000 ; Test 1 complete
-ABSOLUTE int_test_2 = 0x04010000 ; Test 2 complete
-ABSOLUTE int_test_3 = 0x04020000 ; Test 3 complete
-
-
-; These should start with 0x05000000, with low bits incrementing for
-; each one.
-
-
-
-ABSOLUTE NCR53c7xx_msg_abort = 0 ; Pointer to abort message
-ABSOLUTE NCR53c7xx_msg_reject = 0 ; Pointer to reject message
-ABSOLUTE NCR53c7xx_zero = 0 ; long with zero in it, use for source
-ABSOLUTE NCR53c7xx_sink = 0 ; long to dump worthless data in
-ABSOLUTE NOP_insn = 0 ; NOP instruction
-
-; Pointer to message, potentially multi-byte
-ABSOLUTE msg_buf = 0
-
-; Pointer to holding area for reselection information
-ABSOLUTE reselected_identify = 0
-ABSOLUTE reselected_tag = 0
-
-; Request sense command pointer, it's a 6 byte command, should
-; be constant for all commands since we always want 16 bytes of
-; sense and we don't need to change any fields as we did under
-; SCSI-I when we actually cared about the LUN field.
-;EXTERNAL NCR53c7xx_sense ; Request sense command
-
-
-; dsa_schedule
-; PURPOSE : after a DISCONNECT message has been received, and pointers
-; saved, insert the current DSA structure at the head of the
-; disconnected queue and fall through to the scheduler.
-;
-; CALLS : OK
-;
-; INPUTS : dsa - current DSA structure, reconnect_dsa_head - list
-; of disconnected commands
-;
-; MODIFIES : SCRATCH, reconnect_dsa_head
-;
-; EXITS : always passes control to schedule
-
-ENTRY dsa_schedule
-dsa_schedule:
-
-
-
-
-;
-; Calculate the address of the next pointer within the DSA
-; structure of the command that is currently disconnecting
-;
-
- ; Read what should be the current DSA from memory - actual DSA
- ; register is probably corrupt
- MOVE MEMORY 4, saved_dsa, addr_scratch
-
-at 0x0000005e : */ 0xc0000004,0x00000000,0x00000000,
-/*
-
-
-
- MOVE SCRATCH0 + dsa_next TO SCRATCH0
-
-at 0x00000061 : */ 0x7e343000,0x00000000,
-/*
- MOVE SCRATCH1 + 0 TO SCRATCH1 WITH CARRY
-
-at 0x00000063 : */ 0x7f350000,0x00000000,
-/*
- MOVE SCRATCH2 + 0 TO SCRATCH2 WITH CARRY
-
-at 0x00000065 : */ 0x7f360000,0x00000000,
-/*
- MOVE SCRATCH3 + 0 TO SCRATCH3 WITH CARRY
-
-at 0x00000067 : */ 0x7f370000,0x00000000,
-/*
-
-; Point the next field of this DSA structure at the current disconnected
-; list
-
- MOVE MEMORY 4, addr_scratch, dsa_schedule_insert + 8
-
-at 0x00000069 : */ 0xc0000004,0x00000000,0x000001b8,
-/*
-
-dsa_schedule_insert:
- MOVE MEMORY 4, reconnect_dsa_head, 0
-
-at 0x0000006c : */ 0xc0000004,0x00000000,0x00000000,
-/*
-
-; And update the head pointer.
-
- ; Read what should be the current DSA from memory - actual DSA
- ; register is probably corrupt
- MOVE MEMORY 4, saved_dsa, addr_scratch
-
-at 0x0000006f : */ 0xc0000004,0x00000000,0x00000000,
-/*
-
-
-
-
- MOVE MEMORY 4, addr_scratch, reconnect_dsa_head
-
-at 0x00000072 : */ 0xc0000004,0x00000000,0x00000000,
-/*
-
-
-
-
-
-
- CLEAR ACK
-
-at 0x00000075 : */ 0x60000040,0x00000000,
-/*
-
-
- ; Time to correct DSA following memory move
- MOVE MEMORY 4, saved_dsa, addr_dsa
-
-at 0x00000077 : */ 0xc0000004,0x00000000,0x00000000,
-/*
-
- WAIT DISCONNECT
-
-at 0x0000007a : */ 0x48000000,0x00000000,
-/*
-
-
-
-
-
-
- JUMP schedule
-
-at 0x0000007c : */ 0x80080000,0x00000000,
-/*
-
-
-;
-; select
-;
-; PURPOSE : establish a nexus for the SCSI command referenced by DSA.
-; On success, the current DSA structure is removed from the issue
-; queue. Usually, this is entered as a fall-through from schedule,
-; although the contingent allegiance handling code will write
-; the select entry address to the DSP to restart a command as a
-; REQUEST SENSE. A message is sent (usually IDENTIFY, although
-; additional SDTR or WDTR messages may be sent). COMMAND OUT
-; is handled.
-;
-; INPUTS : DSA - SCSI command, issue_dsa_head
-;
-; CALLS : NOT OK
-;
-; MODIFIES : SCRATCH, issue_dsa_head
-;
-; EXITS : on reselection or selection, go to select_failed
-; otherwise, RETURN so control is passed back to
-; dsa_begin.
-;
-
-ENTRY select
-select:
-
-
-
-
-
-
-
-
- CLEAR TARGET
-
-at 0x0000007e : */ 0x60000200,0x00000000,
-/*
-
-; XXX
-;
-; In effect, SELECTION operations are backgrounded, with execution
-; continuing until code which waits for REQ or a fatal interrupt is
-; encountered.
-;
-; So, for more performance, we could overlap the code which removes
-; the command from the NCRs issue queue with the selection, but
-; at this point I don't want to deal with the error recovery.
-;
-
-
-
- ; Enable selection timer
-
-
-
- MOVE CTEST7 & 0xef TO CTEST7
-
-at 0x00000080 : */ 0x7c1bef00,0x00000000,
-/*
-
-
- SELECT ATN FROM dsa_select, select_failed
-
-at 0x00000082 : */ 0x4300003c,0x00000828,
-/*
- JUMP select_msgout, WHEN MSG_OUT
-
-at 0x00000084 : */ 0x860b0000,0x00000218,
-/*
-ENTRY select_msgout
-select_msgout:
-
- ; Disable selection timer
- MOVE CTEST7 | 0x10 TO CTEST7
-
-at 0x00000086 : */ 0x7a1b1000,0x00000000,
-/*
-
- MOVE FROM dsa_msgout, WHEN MSG_OUT
-
-at 0x00000088 : */ 0x1e000000,0x00000040,
-/*
-
-
-
-
-
-
-
-
-
-
- RETURN
-
-at 0x0000008a : */ 0x90080000,0x00000000,
-/*
-
-;
-; select_done
-;
-; PURPOSE: continue on to normal data transfer; called as the exit
-; point from dsa_begin.
-;
-; INPUTS: dsa
-;
-; CALLS: OK
-;
-;
-
-select_done:
-
-; NOTE DSA is corrupt when we arrive here!
- MOVE MEMORY 4, saved_dsa, addr_dsa
-
-at 0x0000008c : */ 0xc0000004,0x00000000,0x00000000,
-/*
-
-
-
-
-
-
-
-
-; After a successful selection, we should get either a CMD phase or
-; some transfer request negotiation message.
-
- JUMP cmdout, WHEN CMD
-
-at 0x0000008f : */ 0x820b0000,0x0000025c,
-/*
- INT int_err_unexpected_phase, WHEN NOT MSG_IN
-
-at 0x00000091 : */ 0x9f030000,0x00000000,
-/*
-
-select_msg_in:
- CALL msg_in, WHEN MSG_IN
-
-at 0x00000093 : */ 0x8f0b0000,0x0000041c,
-/*
- JUMP select_msg_in, WHEN MSG_IN
-
-at 0x00000095 : */ 0x870b0000,0x0000024c,
-/*
-
-cmdout:
- INT int_err_unexpected_phase, WHEN NOT CMD
-
-at 0x00000097 : */ 0x9a030000,0x00000000,
-/*
-
-
-
-ENTRY cmdout_cmdout
-cmdout_cmdout:
-
- MOVE FROM dsa_cmdout, WHEN CMD
-
-at 0x00000099 : */ 0x1a000000,0x00000048,
-/*
-
-
-
-
-;
-; data_transfer
-; other_out
-; other_in
-; other_transfer
-;
-; PURPOSE : handle the main data transfer for a SCSI command in
-; several parts. In the first part, data_transfer, DATA_IN
-; and DATA_OUT phases are allowed, with the user provided
-; code (usually dynamically generated based on the scatter/gather
-; list associated with a SCSI command) called to handle these
-; phases.
-;
-; After control has passed to one of the user provided
-; DATA_IN or DATA_OUT routines, back calls are made to
-; other_transfer_in or other_transfer_out to handle non-DATA IN
-; and DATA OUT phases respectively, with the state of the active
-; data pointer being preserved in TEMP.
-;
-; On completion, the user code passes control to other_transfer
-; which causes DATA_IN and DATA_OUT to result in unexpected_phase
-; interrupts so that data overruns may be trapped.
-;
-; INPUTS : DSA - SCSI command
-;
-; CALLS : OK in data_transfer_start, not ok in other_out and other_in, ok in
-; other_transfer
-;
-; MODIFIES : SCRATCH
-;
-; EXITS : if STATUS IN is detected, signifying command completion,
-; the NCR jumps to command_complete. If MSG IN occurs, a
-; CALL is made to msg_in. Otherwise, other_transfer runs in
-; an infinite loop.
-;
-
-ENTRY data_transfer
-data_transfer:
- JUMP cmdout_cmdout, WHEN CMD
-
-at 0x0000009b : */ 0x820b0000,0x00000264,
-/*
- CALL msg_in, WHEN MSG_IN
-
-at 0x0000009d : */ 0x8f0b0000,0x0000041c,
-/*
- INT int_err_unexpected_phase, WHEN MSG_OUT
-
-at 0x0000009f : */ 0x9e0b0000,0x00000000,
-/*
- JUMP do_dataout, WHEN DATA_OUT
-
-at 0x000000a1 : */ 0x800b0000,0x000002a4,
-/*
- JUMP do_datain, WHEN DATA_IN
-
-at 0x000000a3 : */ 0x810b0000,0x000002fc,
-/*
- JUMP command_complete, WHEN STATUS
-
-at 0x000000a5 : */ 0x830b0000,0x0000065c,
-/*
- JUMP data_transfer
-
-at 0x000000a7 : */ 0x80080000,0x0000026c,
-/*
-ENTRY end_data_transfer
-end_data_transfer:
-
-;
-; FIXME: On NCR53c700 and NCR53c700-66 chips, do_dataout/do_datain
-; should be fixed up whenever the nexus changes so it can point to the
-; correct routine for that command.
-;
-
-
-; Nasty jump to dsa->dataout
-do_dataout:
-
- MOVE MEMORY 4, saved_dsa, addr_scratch
-
-at 0x000000a9 : */ 0xc0000004,0x00000000,0x00000000,
-/*
-
-
-
- MOVE SCRATCH0 + dsa_dataout TO SCRATCH0
-
-at 0x000000ac : */ 0x7e345000,0x00000000,
-/*
- MOVE SCRATCH1 + 0 TO SCRATCH1 WITH CARRY
-
-at 0x000000ae : */ 0x7f350000,0x00000000,
-/*
- MOVE SCRATCH2 + 0 TO SCRATCH2 WITH CARRY
-
-at 0x000000b0 : */ 0x7f360000,0x00000000,
-/*
- MOVE SCRATCH3 + 0 TO SCRATCH3 WITH CARRY
-
-at 0x000000b2 : */ 0x7f370000,0x00000000,
-/*
-
- MOVE MEMORY 4, addr_scratch, dataout_to_jump + 4
-
-at 0x000000b4 : */ 0xc0000004,0x00000000,0x000002e0,
-/*
-
-dataout_to_jump:
- MOVE MEMORY 4, 0, dataout_jump + 4
-
-at 0x000000b7 : */ 0xc0000004,0x00000000,0x000002f8,
-/*
-
- ; Time to correct DSA following memory move
- MOVE MEMORY 4, saved_dsa, addr_dsa
-
-at 0x000000ba : */ 0xc0000004,0x00000000,0x00000000,
-/*
-
-dataout_jump:
- JUMP 0
-
-at 0x000000bd : */ 0x80080000,0x00000000,
-/*
-
-; Nasty jump to dsa->dsain
-do_datain:
-
- MOVE MEMORY 4, saved_dsa, addr_scratch
-
-at 0x000000bf : */ 0xc0000004,0x00000000,0x00000000,
-/*
-
-
-
- MOVE SCRATCH0 + dsa_datain TO SCRATCH0
-
-at 0x000000c2 : */ 0x7e345400,0x00000000,
-/*
- MOVE SCRATCH1 + 0 TO SCRATCH1 WITH CARRY
-
-at 0x000000c4 : */ 0x7f350000,0x00000000,
-/*
- MOVE SCRATCH2 + 0 TO SCRATCH2 WITH CARRY
-
-at 0x000000c6 : */ 0x7f360000,0x00000000,
-/*
- MOVE SCRATCH3 + 0 TO SCRATCH3 WITH CARRY
-
-at 0x000000c8 : */ 0x7f370000,0x00000000,
-/*
-
- MOVE MEMORY 4, addr_scratch, datain_to_jump + 4
-
-at 0x000000ca : */ 0xc0000004,0x00000000,0x00000338,
-/*
-
-ENTRY datain_to_jump
-datain_to_jump:
- MOVE MEMORY 4, 0, datain_jump + 4
-
-at 0x000000cd : */ 0xc0000004,0x00000000,0x00000350,
-/*
-
- ; Time to correct DSA following memory move
- MOVE MEMORY 4, saved_dsa, addr_dsa
-
-at 0x000000d0 : */ 0xc0000004,0x00000000,0x00000000,
-/*
-
-
-
-
-datain_jump:
- JUMP 0
-
-at 0x000000d3 : */ 0x80080000,0x00000000,
-/*
-
-
-
-; Note that other_out and other_in loop until a non-data phase
-; is discovered, so we only execute return statements when we
-; can go on to the next data phase block move statement.
-
-ENTRY other_out
-other_out:
-
-
-
- INT int_err_unexpected_phase, WHEN CMD
-
-at 0x000000d5 : */ 0x9a0b0000,0x00000000,
-/*
- JUMP msg_in_restart, WHEN MSG_IN
-
-at 0x000000d7 : */ 0x870b0000,0x000003fc,
-/*
- INT int_err_unexpected_phase, WHEN MSG_OUT
-
-at 0x000000d9 : */ 0x9e0b0000,0x00000000,
-/*
- INT int_err_unexpected_phase, WHEN DATA_IN
-
-at 0x000000db : */ 0x990b0000,0x00000000,
-/*
- JUMP command_complete, WHEN STATUS
-
-at 0x000000dd : */ 0x830b0000,0x0000065c,
-/*
- JUMP other_out, WHEN NOT DATA_OUT
-
-at 0x000000df : */ 0x80030000,0x00000354,
-/*
-
-; TEMP should be OK, as we got here from a call in the user dataout code.
-
- RETURN
-
-at 0x000000e1 : */ 0x90080000,0x00000000,
-/*
-
-ENTRY other_in
-other_in:
-
-
-
- INT int_err_unexpected_phase, WHEN CMD
-
-at 0x000000e3 : */ 0x9a0b0000,0x00000000,
-/*
- JUMP msg_in_restart, WHEN MSG_IN
-
-at 0x000000e5 : */ 0x870b0000,0x000003fc,
-/*
- INT int_err_unexpected_phase, WHEN MSG_OUT
-
-at 0x000000e7 : */ 0x9e0b0000,0x00000000,
-/*
- INT int_err_unexpected_phase, WHEN DATA_OUT
-
-at 0x000000e9 : */ 0x980b0000,0x00000000,
-/*
- JUMP command_complete, WHEN STATUS
-
-at 0x000000eb : */ 0x830b0000,0x0000065c,
-/*
- JUMP other_in, WHEN NOT DATA_IN
-
-at 0x000000ed : */ 0x81030000,0x0000038c,
-/*
-
-; TEMP should be OK, as we got here from a call in the user datain code.
-
- RETURN
-
-at 0x000000ef : */ 0x90080000,0x00000000,
-/*
-
-
-ENTRY other_transfer
-other_transfer:
- INT int_err_unexpected_phase, WHEN CMD
-
-at 0x000000f1 : */ 0x9a0b0000,0x00000000,
-/*
- CALL msg_in, WHEN MSG_IN
-
-at 0x000000f3 : */ 0x8f0b0000,0x0000041c,
-/*
- INT int_err_unexpected_phase, WHEN MSG_OUT
-
-at 0x000000f5 : */ 0x9e0b0000,0x00000000,
-/*
- INT int_err_unexpected_phase, WHEN DATA_OUT
-
-at 0x000000f7 : */ 0x980b0000,0x00000000,
-/*
- INT int_err_unexpected_phase, WHEN DATA_IN
-
-at 0x000000f9 : */ 0x990b0000,0x00000000,
-/*
- JUMP command_complete, WHEN STATUS
-
-at 0x000000fb : */ 0x830b0000,0x0000065c,
-/*
- JUMP other_transfer
-
-at 0x000000fd : */ 0x80080000,0x000003c4,
-/*
-
-;
-; msg_in_restart
-; msg_in
-; munge_msg
-;
-; PURPOSE : process messages from a target. msg_in is called when the
-; caller hasn't read the first byte of the message. munge_message
-; is called when the caller has read the first byte of the message,
-; and left it in SFBR. msg_in_restart is called when the caller
-; hasn't read the first byte of the message, and wishes RETURN
-; to transfer control back to the address of the conditional
-; CALL instruction rather than to the instruction after it.
-;
-; Various int_* interrupts are generated when the host system
-; needs to intervene, as is the case with SDTR, WDTR, and
-; INITIATE RECOVERY messages.
-;
-; When the host system handles one of these interrupts,
-; it can respond by reentering at reject_message,
-; which rejects the message and returns control to
-; the caller of msg_in or munge_msg, accept_message
-; which clears ACK and returns control, or reply_message
-; which sends the message pointed to by the DSA
-; msgout_other table indirect field.
-;
-; DISCONNECT messages are handled by moving the command
-; to the reconnect_dsa_queue.
-
-; NOTE: DSA should be valid when we get here - we cannot save both it
-; and TEMP in this routine.
-
-;
-; INPUTS : DSA - SCSI COMMAND, SFBR - first byte of message (munge_msg
-; only)
-;
-; CALLS : NO. The TEMP register isn't backed up to allow nested calls.
-;
-; MODIFIES : SCRATCH, DSA on DISCONNECT
-;
-; EXITS : On receipt of SAVE DATA POINTER, RESTORE POINTERS,
-; and normal return from message handlers running under
-; Linux, control is returned to the caller. Receipt
-; of DISCONNECT messages pass control to dsa_schedule.
-;
-ENTRY msg_in_restart
-msg_in_restart:
-; XXX - hackish
-;
-; Since it's easier to debug changes to the statically
-; compiled code, rather than the dynamically generated
-; stuff, such as
-;
-; MOVE x, y, WHEN data_phase
-; CALL other_z, WHEN NOT data_phase
-; MOVE x, y, WHEN data_phase
-;
-; I'd like to have certain routines (notably the message handler)
-; restart on the conditional call rather than the next instruction.
-;
-; So, subtract 8 from the return address
-
- MOVE TEMP0 + 0xf8 TO TEMP0
-
-at 0x000000ff : */ 0x7e1cf800,0x00000000,
-/*
- MOVE TEMP1 + 0xff TO TEMP1 WITH CARRY
-
-at 0x00000101 : */ 0x7f1dff00,0x00000000,
-/*
- MOVE TEMP2 + 0xff TO TEMP2 WITH CARRY
-
-at 0x00000103 : */ 0x7f1eff00,0x00000000,
-/*
- MOVE TEMP3 + 0xff TO TEMP3 WITH CARRY
-
-at 0x00000105 : */ 0x7f1fff00,0x00000000,
-/*
-
-ENTRY msg_in
-msg_in:
- MOVE 1, msg_buf, WHEN MSG_IN
-
-at 0x00000107 : */ 0x0f000001,0x00000000,
-/*
-
-munge_msg:
- JUMP munge_extended, IF 0x01 ; EXTENDED MESSAGE
-
-at 0x00000109 : */ 0x800c0001,0x00000574,
-/*
- JUMP munge_2, IF 0x20, AND MASK 0xdf ; two byte message
-
-at 0x0000010b : */ 0x800cdf20,0x00000464,
-/*
-;
-; XXX - I've seen a handful of broken SCSI devices which fail to issue
-; a SAVE POINTERS message before disconnecting in the middle of
-; a transfer, assuming that the DATA POINTER will be implicitly
-; restored.
-;
-; Historically, I've often done an implicit save when the DISCONNECT
-; message is processed. We may want to consider having the option of
-; doing that here.
-;
- JUMP munge_save_data_pointer, IF 0x02 ; SAVE DATA POINTER
-
-at 0x0000010d : */ 0x800c0002,0x0000046c,
-/*
- JUMP munge_restore_pointers, IF 0x03 ; RESTORE POINTERS
-
-at 0x0000010f : */ 0x800c0003,0x00000518,
-/*
- JUMP munge_disconnect, IF 0x04 ; DISCONNECT
-
-at 0x00000111 : */ 0x800c0004,0x0000056c,
-/*
- INT int_msg_1, IF 0x07 ; MESSAGE REJECT
-
-at 0x00000113 : */ 0x980c0007,0x01020000,
-/*
- INT int_msg_1, IF 0x0f ; INITIATE RECOVERY
-
-at 0x00000115 : */ 0x980c000f,0x01020000,
-/*
-
-
-
- JUMP reject_message
-
-at 0x00000117 : */ 0x80080000,0x00000604,
-/*
-
-munge_2:
- JUMP reject_message
-
-at 0x00000119 : */ 0x80080000,0x00000604,
-/*
-;
-; The SCSI standard allows targets to recover from transient
-; error conditions by backing up the data pointer with a
-; RESTORE POINTERS message.
-;
-; So, we must save and restore the _residual_ code as well as
-; the current instruction pointer. Because of this messiness,
-; it is simpler to put dynamic code in the dsa for this and to
-; just do a simple jump down there.
-;
-
-munge_save_data_pointer:
-
- ; We have something in TEMP here, so first we must save that
- MOVE TEMP0 TO SFBR
-
-at 0x0000011b : */ 0x721c0000,0x00000000,
-/*
- MOVE SFBR TO SCRATCH0
-
-at 0x0000011d : */ 0x6a340000,0x00000000,
-/*
- MOVE TEMP1 TO SFBR
-
-at 0x0000011f : */ 0x721d0000,0x00000000,
-/*
- MOVE SFBR TO SCRATCH1
-
-at 0x00000121 : */ 0x6a350000,0x00000000,
-/*
- MOVE TEMP2 TO SFBR
-
-at 0x00000123 : */ 0x721e0000,0x00000000,
-/*
- MOVE SFBR TO SCRATCH2
-
-at 0x00000125 : */ 0x6a360000,0x00000000,
-/*
- MOVE TEMP3 TO SFBR
-
-at 0x00000127 : */ 0x721f0000,0x00000000,
-/*
- MOVE SFBR TO SCRATCH3
-
-at 0x00000129 : */ 0x6a370000,0x00000000,
-/*
- MOVE MEMORY 4, addr_scratch, jump_temp + 4
-
-at 0x0000012b : */ 0xc0000004,0x00000000,0x000009c8,
-/*
- ; Now restore DSA
- MOVE MEMORY 4, saved_dsa, addr_dsa
-
-at 0x0000012e : */ 0xc0000004,0x00000000,0x00000000,
-/*
-
- MOVE DSA0 + dsa_save_data_pointer TO SFBR
-
-at 0x00000131 : */ 0x76100000,0x00000000,
-/*
- MOVE SFBR TO SCRATCH0
-
-at 0x00000133 : */ 0x6a340000,0x00000000,
-/*
- MOVE DSA1 + 0xff TO SFBR WITH CARRY
-
-at 0x00000135 : */ 0x7711ff00,0x00000000,
-/*
- MOVE SFBR TO SCRATCH1
-
-at 0x00000137 : */ 0x6a350000,0x00000000,
-/*
- MOVE DSA2 + 0xff TO SFBR WITH CARRY
-
-at 0x00000139 : */ 0x7712ff00,0x00000000,
-/*
- MOVE SFBR TO SCRATCH2
-
-at 0x0000013b : */ 0x6a360000,0x00000000,
-/*
- MOVE DSA3 + 0xff TO SFBR WITH CARRY
-
-at 0x0000013d : */ 0x7713ff00,0x00000000,
-/*
- MOVE SFBR TO SCRATCH3
-
-at 0x0000013f : */ 0x6a370000,0x00000000,
-/*
-
-
- MOVE MEMORY 4, addr_scratch, jump_dsa_save + 4
-
-at 0x00000141 : */ 0xc0000004,0x00000000,0x00000514,
-/*
-
-jump_dsa_save:
- JUMP 0
-
-at 0x00000144 : */ 0x80080000,0x00000000,
-/*
-
-munge_restore_pointers:
-
- ; The code at dsa_restore_pointers will RETURN, but we don't care
- ; about TEMP here, as it will overwrite it anyway.
-
- MOVE DSA0 + dsa_restore_pointers TO SFBR
-
-at 0x00000146 : */ 0x76100000,0x00000000,
-/*
- MOVE SFBR TO SCRATCH0
-
-at 0x00000148 : */ 0x6a340000,0x00000000,
-/*
- MOVE DSA1 + 0xff TO SFBR WITH CARRY
-
-at 0x0000014a : */ 0x7711ff00,0x00000000,
-/*
- MOVE SFBR TO SCRATCH1
-
-at 0x0000014c : */ 0x6a350000,0x00000000,
-/*
- MOVE DSA2 + 0xff TO SFBR WITH CARRY
-
-at 0x0000014e : */ 0x7712ff00,0x00000000,
-/*
- MOVE SFBR TO SCRATCH2
-
-at 0x00000150 : */ 0x6a360000,0x00000000,
-/*
- MOVE DSA3 + 0xff TO SFBR WITH CARRY
-
-at 0x00000152 : */ 0x7713ff00,0x00000000,
-/*
- MOVE SFBR TO SCRATCH3
-
-at 0x00000154 : */ 0x6a370000,0x00000000,
-/*
-
-
- MOVE MEMORY 4, addr_scratch, jump_dsa_restore + 4
-
-at 0x00000156 : */ 0xc0000004,0x00000000,0x00000568,
-/*
-
-jump_dsa_restore:
- JUMP 0
-
-at 0x00000159 : */ 0x80080000,0x00000000,
-/*
-
-
-munge_disconnect:
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
- JUMP dsa_schedule
-
-at 0x0000015b : */ 0x80080000,0x00000178,
-/*
-
-
-
-
-
-munge_extended:
- CLEAR ACK
-
-at 0x0000015d : */ 0x60000040,0x00000000,
-/*
- INT int_err_unexpected_phase, WHEN NOT MSG_IN
-
-at 0x0000015f : */ 0x9f030000,0x00000000,
-/*
- MOVE 1, msg_buf + 1, WHEN MSG_IN
-
-at 0x00000161 : */ 0x0f000001,0x00000001,
-/*
- JUMP munge_extended_2, IF 0x02
-
-at 0x00000163 : */ 0x800c0002,0x000005a4,
-/*
- JUMP munge_extended_3, IF 0x03
-
-at 0x00000165 : */ 0x800c0003,0x000005d4,
-/*
- JUMP reject_message
-
-at 0x00000167 : */ 0x80080000,0x00000604,
-/*
-
-munge_extended_2:
- CLEAR ACK
-
-at 0x00000169 : */ 0x60000040,0x00000000,
-/*
- MOVE 1, msg_buf + 2, WHEN MSG_IN
-
-at 0x0000016b : */ 0x0f000001,0x00000002,
-/*
- JUMP reject_message, IF NOT 0x02 ; Must be WDTR
-
-at 0x0000016d : */ 0x80040002,0x00000604,
-/*
- CLEAR ACK
-
-at 0x0000016f : */ 0x60000040,0x00000000,
-/*
- MOVE 1, msg_buf + 3, WHEN MSG_IN
-
-at 0x00000171 : */ 0x0f000001,0x00000003,
-/*
- INT int_msg_wdtr
-
-at 0x00000173 : */ 0x98080000,0x01000000,
-/*
-
-munge_extended_3:
- CLEAR ACK
-
-at 0x00000175 : */ 0x60000040,0x00000000,
-/*
- MOVE 1, msg_buf + 2, WHEN MSG_IN
-
-at 0x00000177 : */ 0x0f000001,0x00000002,
-/*
- JUMP reject_message, IF NOT 0x01 ; Must be SDTR
-
-at 0x00000179 : */ 0x80040001,0x00000604,
-/*
- CLEAR ACK
-
-at 0x0000017b : */ 0x60000040,0x00000000,
-/*
- MOVE 2, msg_buf + 3, WHEN MSG_IN
-
-at 0x0000017d : */ 0x0f000002,0x00000003,
-/*
- INT int_msg_sdtr
-
-at 0x0000017f : */ 0x98080000,0x01010000,
-/*
-
-ENTRY reject_message
-reject_message:
- SET ATN
-
-at 0x00000181 : */ 0x58000008,0x00000000,
-/*
- CLEAR ACK
-
-at 0x00000183 : */ 0x60000040,0x00000000,
-/*
- MOVE 1, NCR53c7xx_msg_reject, WHEN MSG_OUT
-
-at 0x00000185 : */ 0x0e000001,0x00000000,
-/*
- RETURN
-
-at 0x00000187 : */ 0x90080000,0x00000000,
-/*
-
-ENTRY accept_message
-accept_message:
- CLEAR ATN
-
-at 0x00000189 : */ 0x60000008,0x00000000,
-/*
- CLEAR ACK
-
-at 0x0000018b : */ 0x60000040,0x00000000,
-/*
- RETURN
-
-at 0x0000018d : */ 0x90080000,0x00000000,
-/*
-
-ENTRY respond_message
-respond_message:
- SET ATN
-
-at 0x0000018f : */ 0x58000008,0x00000000,
-/*
- CLEAR ACK
-
-at 0x00000191 : */ 0x60000040,0x00000000,
-/*
- MOVE FROM dsa_msgout_other, WHEN MSG_OUT
-
-at 0x00000193 : */ 0x1e000000,0x00000068,
-/*
- RETURN
-
-at 0x00000195 : */ 0x90080000,0x00000000,
-/*
-
-;
-; command_complete
-;
-; PURPOSE : handle command termination when STATUS IN is detected by reading
-; a status byte followed by a command termination message.
-;
-; Normal termination results in an INTFLY instruction, and
-; the host system can pick out which command terminated by
-; examining the MESSAGE and STATUS buffers of all currently
-; executing commands;
-;
-; Abnormal (CHECK_CONDITION) termination results in an
-; int_err_check_condition interrupt so that a REQUEST SENSE
-; command can be issued out-of-order so that no other command
-; clears the contingent allegiance condition.
-;
-;
-; INPUTS : DSA - command
-;
-; CALLS : OK
-;
-; EXITS : On successful termination, control is passed to schedule.
-; On abnormal termination, the user will usually modify the
-; DSA fields and corresponding buffers and return control
-; to select.
-;
-
-ENTRY command_complete
-command_complete:
- MOVE FROM dsa_status, WHEN STATUS
-
-at 0x00000197 : */ 0x1b000000,0x00000060,
-/*
-
- MOVE SFBR TO SCRATCH0 ; Save status
-
-at 0x00000199 : */ 0x6a340000,0x00000000,
-/*
-
-ENTRY command_complete_msgin
-command_complete_msgin:
- MOVE FROM dsa_msgin, WHEN MSG_IN
-
-at 0x0000019b : */ 0x1f000000,0x00000058,
-/*
-; Indicate that we should be expecting a disconnect
-
-
-
- ; Above code cleared the Unexpected Disconnect bit, what do we do?
-
- CLEAR ACK
-
-at 0x0000019d : */ 0x60000040,0x00000000,
-/*
-
- WAIT DISCONNECT
-
-at 0x0000019f : */ 0x48000000,0x00000000,
-/*
-
-;
-; The SCSI specification states that when a UNIT ATTENTION condition
-; is pending, as indicated by a CHECK CONDITION status message,
-; the target shall revert to asynchronous transfers. Since
-; synchronous transfers parameters are maintained on a per INITIATOR/TARGET
-; basis, and returning control to our scheduler could work on a command
-; running on another lun on that target using the old parameters, we must
-; interrupt the host processor to get them changed, or change them ourselves.
-;
-; Once SCSI-II tagged queueing is implemented, things will be even more
-; hairy, since contingent allegiance conditions exist on a per-target/lun
-; basis, and issuing a new command with a different tag would clear it.
-; In these cases, we must interrupt the host processor to get a request
-; added to the HEAD of the queue with the request sense command, or we
-; must automatically issue the request sense command.
-
-
-
-
-
-
-
- INT int_norm_emulateintfly
-
-at 0x000001a1 : */ 0x98080000,0x02060000,
-/*
-
-
-
-
-
-
- ; Time to correct DSA following memory move
- MOVE MEMORY 4, saved_dsa, addr_dsa
-
-at 0x000001a3 : */ 0xc0000004,0x00000000,0x00000000,
-/*
-
-
-
-
-
- JUMP schedule
-
-at 0x000001a6 : */ 0x80080000,0x00000000,
-/*
-command_failed:
- INT int_err_check_condition
-
-at 0x000001a8 : */ 0x98080000,0x00030000,
-/*
-
-
-
-
-;
-; wait_reselect
-;
-; PURPOSE : This is essentially the idle routine, where control lands
-; when there are no new processes to schedule. wait_reselect
-; waits for reselection, selection, and new commands.
-;
-; When a successful reselection occurs, with the aid
-; of fixed up code in each DSA, wait_reselect walks the
-; reconnect_dsa_queue, asking each dsa if the target ID
-; and LUN match its.
-;
-; If a match is found, a call is made back to reselected_ok,
-; which through the miracles of self modifying code, extracts
-; the found DSA from the reconnect_dsa_queue and then
-; returns control to the DSAs thread of execution.
-;
-; INPUTS : NONE
-;
-; CALLS : OK
-;
-; MODIFIES : DSA,
-;
-; EXITS : On successful reselection, control is returned to the
-; DSA which called reselected_ok. If the WAIT RESELECT
-; was interrupted by a new commands arrival signaled by
-; SIG_P, control is passed to schedule. If the NCR is
-; selected, the host system is interrupted with an
-; int_err_selected which is usually responded to by
-; setting DSP to the target_abort address.
-
-ENTRY wait_reselect
-wait_reselect:
-
-
-
-
-
-
- WAIT RESELECT wait_reselect_failed
-
-at 0x000001aa : */ 0x50000000,0x00000800,
-/*
-
-reselected:
-
-
-
- CLEAR TARGET
-
-at 0x000001ac : */ 0x60000200,0x00000000,
-/*
-
- ; Read all data needed to reestablish the nexus -
- MOVE 1, reselected_identify, WHEN MSG_IN
-
-at 0x000001ae : */ 0x0f000001,0x00000000,
-/*
- ; We used to CLEAR ACK here.
-
-
-
-
-
- ; Point DSA at the current head of the disconnected queue.
-
- MOVE MEMORY 4, reconnect_dsa_head, addr_scratch
-
-at 0x000001b0 : */ 0xc0000004,0x00000000,0x00000000,
-/*
-
-
- MOVE MEMORY 4, addr_scratch, saved_dsa
-
-at 0x000001b3 : */ 0xc0000004,0x00000000,0x00000000,
-/*
-
-
-
-
- ; Fix the update-next pointer so that the reconnect_dsa_head
- ; pointer is the one that will be updated if this DSA is a hit
- ; and we remove it from the queue.
-
- MOVE MEMORY 4, addr_reconnect_dsa_head, reselected_ok_patch + 8
-
-at 0x000001b6 : */ 0xc0000004,0x00000000,0x000007ec,
-/*
-
- ; Time to correct DSA following memory move
- MOVE MEMORY 4, saved_dsa, addr_dsa
-
-at 0x000001b9 : */ 0xc0000004,0x00000000,0x00000000,
-/*
-
-
-ENTRY reselected_check_next
-reselected_check_next:
-
-
-
- ; Check for a NULL pointer.
- MOVE DSA0 TO SFBR
-
-at 0x000001bc : */ 0x72100000,0x00000000,
-/*
- JUMP reselected_not_end, IF NOT 0
-
-at 0x000001be : */ 0x80040000,0x00000738,
-/*
- MOVE DSA1 TO SFBR
-
-at 0x000001c0 : */ 0x72110000,0x00000000,
-/*
- JUMP reselected_not_end, IF NOT 0
-
-at 0x000001c2 : */ 0x80040000,0x00000738,
-/*
- MOVE DSA2 TO SFBR
-
-at 0x000001c4 : */ 0x72120000,0x00000000,
-/*
- JUMP reselected_not_end, IF NOT 0
-
-at 0x000001c6 : */ 0x80040000,0x00000738,
-/*
- MOVE DSA3 TO SFBR
-
-at 0x000001c8 : */ 0x72130000,0x00000000,
-/*
- JUMP reselected_not_end, IF NOT 0
-
-at 0x000001ca : */ 0x80040000,0x00000738,
-/*
- INT int_err_unexpected_reselect
-
-at 0x000001cc : */ 0x98080000,0x00020000,
-/*
-
-reselected_not_end:
- ;
- ; XXX the ALU is only eight bits wide, and the assembler
- ; wont do the dirt work for us. As long as dsa_check_reselect
- ; is negative, we need to sign extend with 1 bits to the full
- ; 32 bit width of the address.
- ;
- ; A potential work around would be to have a known alignment
- ; of the DSA structure such that the base address plus
- ; dsa_check_reselect doesn't require carrying from bytes
- ; higher than the LSB.
- ;
-
- MOVE DSA0 TO SFBR
-
-at 0x000001ce : */ 0x72100000,0x00000000,
-/*
- MOVE SFBR + dsa_check_reselect TO SCRATCH0
-
-at 0x000001d0 : */ 0x6e340000,0x00000000,
-/*
- MOVE DSA1 TO SFBR
-
-at 0x000001d2 : */ 0x72110000,0x00000000,
-/*
- MOVE SFBR + 0xff TO SCRATCH1 WITH CARRY
-
-at 0x000001d4 : */ 0x6f35ff00,0x00000000,
-/*
- MOVE DSA2 TO SFBR
-
-at 0x000001d6 : */ 0x72120000,0x00000000,
-/*
- MOVE SFBR + 0xff TO SCRATCH2 WITH CARRY
-
-at 0x000001d8 : */ 0x6f36ff00,0x00000000,
-/*
- MOVE DSA3 TO SFBR
-
-at 0x000001da : */ 0x72130000,0x00000000,
-/*
- MOVE SFBR + 0xff TO SCRATCH3 WITH CARRY
-
-at 0x000001dc : */ 0x6f37ff00,0x00000000,
-/*
-
-
- MOVE MEMORY 4, addr_scratch, reselected_check + 4
-
-at 0x000001de : */ 0xc0000004,0x00000000,0x00000794,
-/*
-
-
- ; Time to correct DSA following memory move
- MOVE MEMORY 4, saved_dsa, addr_dsa
-
-at 0x000001e1 : */ 0xc0000004,0x00000000,0x00000000,
-/*
-
-reselected_check:
- JUMP 0
-
-at 0x000001e4 : */ 0x80080000,0x00000000,
-/*
-
-
-;
-;
-
-; We have problems here - the memory move corrupts TEMP and DSA. This
-; routine is called from DSA code, and patched from many places. Scratch
-; is probably free when it is called.
-; We have to:
-; copy temp to scratch, one byte at a time
-; write scratch to patch a jump in place of the return
-; do the move memory
-; jump to the patched in return address
-; DSA is corrupt when we get here, and can be left corrupt
-
-ENTRY reselected_ok
-reselected_ok:
- MOVE TEMP0 TO SFBR
-
-at 0x000001e6 : */ 0x721c0000,0x00000000,
-/*
- MOVE SFBR TO SCRATCH0
-
-at 0x000001e8 : */ 0x6a340000,0x00000000,
-/*
- MOVE TEMP1 TO SFBR
-
-at 0x000001ea : */ 0x721d0000,0x00000000,
-/*
- MOVE SFBR TO SCRATCH1
-
-at 0x000001ec : */ 0x6a350000,0x00000000,
-/*
- MOVE TEMP2 TO SFBR
-
-at 0x000001ee : */ 0x721e0000,0x00000000,
-/*
- MOVE SFBR TO SCRATCH2
-
-at 0x000001f0 : */ 0x6a360000,0x00000000,
-/*
- MOVE TEMP3 TO SFBR
-
-at 0x000001f2 : */ 0x721f0000,0x00000000,
-/*
- MOVE SFBR TO SCRATCH3
-
-at 0x000001f4 : */ 0x6a370000,0x00000000,
-/*
- MOVE MEMORY 4, addr_scratch, reselected_ok_jump + 4
-
-at 0x000001f6 : */ 0xc0000004,0x00000000,0x000007f4,
-/*
-reselected_ok_patch:
- MOVE MEMORY 4, 0, 0
-
-at 0x000001f9 : */ 0xc0000004,0x00000000,0x00000000,
-/*
-reselected_ok_jump:
- JUMP 0
-
-at 0x000001fc : */ 0x80080000,0x00000000,
-/*
-
-
-
-
-
-selected:
- INT int_err_selected;
-
-at 0x000001fe : */ 0x98080000,0x00010000,
-/*
-
-;
-; A select or reselect failure can be caused by one of two conditions :
-; 1. SIG_P was set. This will be the case if the user has written
-; a new value to a previously NULL head of the issue queue.
-;
-; 2. The NCR53c810 was selected or reselected by another device.
-;
-; 3. The bus was already busy since we were selected or reselected
-; before starting the command.
-
-wait_reselect_failed:
-
-
-
-; Check selected bit.
-
- ; Must work out how to tell if we are selected....
-
-
-
-
-; Reading CTEST2 clears the SIG_P bit in the ISTAT register.
- MOVE CTEST2 & 0x40 TO SFBR
-
-at 0x00000200 : */ 0x74164000,0x00000000,
-/*
- JUMP schedule, IF 0x40
-
-at 0x00000202 : */ 0x800c0040,0x00000000,
-/*
-; Check connected bit.
-; FIXME: this needs to change if we support target mode
- MOVE ISTAT & 0x08 TO SFBR
-
-at 0x00000204 : */ 0x74210800,0x00000000,
-/*
- JUMP reselected, IF 0x08
-
-at 0x00000206 : */ 0x800c0008,0x000006b0,
-/*
-; FIXME : Something bogus happened, and we shouldn't fail silently.
-
-
-
- INT int_debug_panic
-
-at 0x00000208 : */ 0x98080000,0x030b0000,
-/*
-
-
-
-select_failed:
-
- ; Disable selection timer
- MOVE CTEST7 | 0x10 TO CTEST7
-
-at 0x0000020a : */ 0x7a1b1000,0x00000000,
-/*
-
-
-
-
-; Otherwise, mask the selected and reselected bits off SIST0
-
- ; Let's assume we don't get selected for now
- MOVE SSTAT0 & 0x10 TO SFBR
-
-at 0x0000020c : */ 0x740d1000,0x00000000,
-/*
-
-
-
-
- JUMP reselected, IF 0x10
-
-at 0x0000020e : */ 0x800c0010,0x000006b0,
-/*
-; If SIGP is set, the user just gave us another command, and
-; we should restart or return to the scheduler.
-; Reading CTEST2 clears the SIG_P bit in the ISTAT register.
- MOVE CTEST2 & 0x40 TO SFBR
-
-at 0x00000210 : */ 0x74164000,0x00000000,
-/*
- JUMP select, IF 0x40
-
-at 0x00000212 : */ 0x800c0040,0x000001f8,
-/*
-; Check connected bit.
-; FIXME: this needs to change if we support target mode
-; FIXME: is this really necessary?
- MOVE ISTAT & 0x08 TO SFBR
-
-at 0x00000214 : */ 0x74210800,0x00000000,
-/*
- JUMP reselected, IF 0x08
-
-at 0x00000216 : */ 0x800c0008,0x000006b0,
-/*
-; FIXME : Something bogus happened, and we shouldn't fail silently.
-
-
-
- INT int_debug_panic
-
-at 0x00000218 : */ 0x98080000,0x030b0000,
-/*
-
-
-;
-; test_1
-; test_2
-;
-; PURPOSE : run some verification tests on the NCR. test_1
-; copies test_src to test_dest and interrupts the host
-; processor, testing for cache coherency and interrupt
-; problems in the processes.
-;
-; test_2 runs a command with offsets relative to the
-; DSA on entry, and is useful for miscellaneous experimentation.
-;
-
-; Verify that interrupts are working correctly and that we don't
-; have a cache invalidation problem.
-
-ABSOLUTE test_src = 0, test_dest = 0
-ENTRY test_1
-test_1:
- MOVE MEMORY 4, test_src, test_dest
-
-at 0x0000021a : */ 0xc0000004,0x00000000,0x00000000,
-/*
- INT int_test_1
-
-at 0x0000021d : */ 0x98080000,0x04000000,
-/*
-
-;
-; Run arbitrary commands, with test code establishing a DSA
-;
-
-ENTRY test_2
-test_2:
- CLEAR TARGET
-
-at 0x0000021f : */ 0x60000200,0x00000000,
-/*
-
- ; Enable selection timer
-
-
-
- MOVE CTEST7 & 0xef TO CTEST7
-
-at 0x00000221 : */ 0x7c1bef00,0x00000000,
-/*
-
-
- SELECT ATN FROM 0, test_2_fail
-
-at 0x00000223 : */ 0x43000000,0x000008dc,
-/*
- JUMP test_2_msgout, WHEN MSG_OUT
-
-at 0x00000225 : */ 0x860b0000,0x0000089c,
-/*
-ENTRY test_2_msgout
-test_2_msgout:
-
- ; Disable selection timer
- MOVE CTEST7 | 0x10 TO CTEST7
-
-at 0x00000227 : */ 0x7a1b1000,0x00000000,
-/*
-
- MOVE FROM 8, WHEN MSG_OUT
-
-at 0x00000229 : */ 0x1e000000,0x00000008,
-/*
- MOVE FROM 16, WHEN CMD
-
-at 0x0000022b : */ 0x1a000000,0x00000010,
-/*
- MOVE FROM 24, WHEN DATA_IN
-
-at 0x0000022d : */ 0x19000000,0x00000018,
-/*
- MOVE FROM 32, WHEN STATUS
-
-at 0x0000022f : */ 0x1b000000,0x00000020,
-/*
- MOVE FROM 40, WHEN MSG_IN
-
-at 0x00000231 : */ 0x1f000000,0x00000028,
-/*
-
-
-
- CLEAR ACK
-
-at 0x00000233 : */ 0x60000040,0x00000000,
-/*
- WAIT DISCONNECT
-
-at 0x00000235 : */ 0x48000000,0x00000000,
-/*
-test_2_fail:
-
- ; Disable selection timer
- MOVE CTEST7 | 0x10 TO CTEST7
-
-at 0x00000237 : */ 0x7a1b1000,0x00000000,
-/*
-
- INT int_test_2
-
-at 0x00000239 : */ 0x98080000,0x04010000,
-/*
-
-ENTRY debug_break
-debug_break:
- INT int_debug_break
-
-at 0x0000023b : */ 0x98080000,0x03000000,
-/*
-
-;
-; initiator_abort
-; target_abort
-;
-; PURPOSE : Abort the currently established nexus from with initiator
-; or target mode.
-;
-;
-
-ENTRY target_abort
-target_abort:
- SET TARGET
-
-at 0x0000023d : */ 0x58000200,0x00000000,
-/*
- DISCONNECT
-
-at 0x0000023f : */ 0x48000000,0x00000000,
-/*
- CLEAR TARGET
-
-at 0x00000241 : */ 0x60000200,0x00000000,
-/*
- JUMP schedule
-
-at 0x00000243 : */ 0x80080000,0x00000000,
-/*
-
-ENTRY initiator_abort
-initiator_abort:
- SET ATN
-
-at 0x00000245 : */ 0x58000008,0x00000000,
-/*
-;
-; The SCSI-I specification says that targets may go into MSG out at
-; their leisure upon receipt of the ATN single. On all versions of the
-; specification, we can't change phases until REQ transitions true->false,
-; so we need to sink/source one byte of data to allow the transition.
-;
-; For the sake of safety, we'll only source one byte of data in all
-; cases, but to accommodate the SCSI-I dain bramage, we'll sink an
-; arbitrary number of bytes.
- JUMP spew_cmd, WHEN CMD
-
-at 0x00000247 : */ 0x820b0000,0x0000094c,
-/*
- JUMP eat_msgin, WHEN MSG_IN
-
-at 0x00000249 : */ 0x870b0000,0x0000095c,
-/*
- JUMP eat_datain, WHEN DATA_IN
-
-at 0x0000024b : */ 0x810b0000,0x0000098c,
-/*
- JUMP eat_status, WHEN STATUS
-
-at 0x0000024d : */ 0x830b0000,0x00000974,
-/*
- JUMP spew_dataout, WHEN DATA_OUT
-
-at 0x0000024f : */ 0x800b0000,0x000009a4,
-/*
- JUMP sated
-
-at 0x00000251 : */ 0x80080000,0x000009ac,
-/*
-spew_cmd:
- MOVE 1, NCR53c7xx_zero, WHEN CMD
-
-at 0x00000253 : */ 0x0a000001,0x00000000,
-/*
- JUMP sated
-
-at 0x00000255 : */ 0x80080000,0x000009ac,
-/*
-eat_msgin:
- MOVE 1, NCR53c7xx_sink, WHEN MSG_IN
-
-at 0x00000257 : */ 0x0f000001,0x00000000,
-/*
- JUMP eat_msgin, WHEN MSG_IN
-
-at 0x00000259 : */ 0x870b0000,0x0000095c,
-/*
- JUMP sated
-
-at 0x0000025b : */ 0x80080000,0x000009ac,
-/*
-eat_status:
- MOVE 1, NCR53c7xx_sink, WHEN STATUS
-
-at 0x0000025d : */ 0x0b000001,0x00000000,
-/*
- JUMP eat_status, WHEN STATUS
-
-at 0x0000025f : */ 0x830b0000,0x00000974,
-/*
- JUMP sated
-
-at 0x00000261 : */ 0x80080000,0x000009ac,
-/*
-eat_datain:
- MOVE 1, NCR53c7xx_sink, WHEN DATA_IN
-
-at 0x00000263 : */ 0x09000001,0x00000000,
-/*
- JUMP eat_datain, WHEN DATA_IN
-
-at 0x00000265 : */ 0x810b0000,0x0000098c,
-/*
- JUMP sated
-
-at 0x00000267 : */ 0x80080000,0x000009ac,
-/*
-spew_dataout:
- MOVE 1, NCR53c7xx_zero, WHEN DATA_OUT
-
-at 0x00000269 : */ 0x08000001,0x00000000,
-/*
-sated:
-
-
-
- MOVE 1, NCR53c7xx_msg_abort, WHEN MSG_OUT
-
-at 0x0000026b : */ 0x0e000001,0x00000000,
-/*
- WAIT DISCONNECT
-
-at 0x0000026d : */ 0x48000000,0x00000000,
-/*
- INT int_norm_aborted
-
-at 0x0000026f : */ 0x98080000,0x02040000,
-/*
-
-
-
-
-; Little patched jump, used to overcome problems with TEMP getting
-; corrupted on memory moves.
-
-jump_temp:
- JUMP 0
-
-at 0x00000271 : */ 0x80080000,0x00000000,
-};
-
-#define A_NCR53c7xx_msg_abort 0x00000000
-static u32 A_NCR53c7xx_msg_abort_used[] __attribute((unused)) = {
- 0x0000026c,
-};
-
-#define A_NCR53c7xx_msg_reject 0x00000000
-static u32 A_NCR53c7xx_msg_reject_used[] __attribute((unused)) = {
- 0x00000186,
-};
-
-#define A_NCR53c7xx_sink 0x00000000
-static u32 A_NCR53c7xx_sink_used[] __attribute((unused)) = {
- 0x00000258,
- 0x0000025e,
- 0x00000264,
-};
-
-#define A_NCR53c7xx_zero 0x00000000
-static u32 A_NCR53c7xx_zero_used[] __attribute((unused)) = {
- 0x00000254,
- 0x0000026a,
-};
-
-#define A_NOP_insn 0x00000000
-static u32 A_NOP_insn_used[] __attribute((unused)) = {
- 0x00000017,
-};
-
-#define A_addr_dsa 0x00000000
-static u32 A_addr_dsa_used[] __attribute((unused)) = {
- 0x0000000f,
- 0x00000026,
- 0x00000033,
- 0x00000040,
- 0x00000055,
- 0x00000079,
- 0x0000008e,
- 0x000000bc,
- 0x000000d2,
- 0x00000130,
- 0x000001a5,
- 0x000001bb,
- 0x000001e3,
-};
-
-#define A_addr_reconnect_dsa_head 0x00000000
-static u32 A_addr_reconnect_dsa_head_used[] __attribute((unused)) = {
- 0x000001b7,
-};
-
-#define A_addr_scratch 0x00000000
-static u32 A_addr_scratch_used[] __attribute((unused)) = {
- 0x00000002,
- 0x00000004,
- 0x00000008,
- 0x00000020,
- 0x00000022,
- 0x00000049,
- 0x00000060,
- 0x0000006a,
- 0x00000071,
- 0x00000073,
- 0x000000ab,
- 0x000000b5,
- 0x000000c1,
- 0x000000cb,
- 0x0000012c,
- 0x00000142,
- 0x00000157,
- 0x000001b2,
- 0x000001b4,
- 0x000001df,
- 0x000001f7,
-};
-
-#define A_addr_temp 0x00000000
-static u32 A_addr_temp_used[] __attribute((unused)) = {
-};
-
-#define A_dmode_memory_to_memory 0x00000000
-static u32 A_dmode_memory_to_memory_used[] __attribute((unused)) = {
-};
-
-#define A_dmode_memory_to_ncr 0x00000000
-static u32 A_dmode_memory_to_ncr_used[] __attribute((unused)) = {
-};
-
-#define A_dmode_ncr_to_memory 0x00000000
-static u32 A_dmode_ncr_to_memory_used[] __attribute((unused)) = {
-};
-
-#define A_dsa_check_reselect 0x00000000
-static u32 A_dsa_check_reselect_used[] __attribute((unused)) = {
- 0x000001d0,
-};
-
-#define A_dsa_cmdout 0x00000048
-static u32 A_dsa_cmdout_used[] __attribute((unused)) = {
- 0x0000009a,
-};
-
-#define A_dsa_cmnd 0x00000038
-static u32 A_dsa_cmnd_used[] __attribute((unused)) = {
-};
-
-#define A_dsa_datain 0x00000054
-static u32 A_dsa_datain_used[] __attribute((unused)) = {
- 0x000000c2,
-};
-
-#define A_dsa_dataout 0x00000050
-static u32 A_dsa_dataout_used[] __attribute((unused)) = {
- 0x000000ac,
-};
-
-#define A_dsa_end 0x00000070
-static u32 A_dsa_end_used[] __attribute((unused)) = {
-};
-
-#define A_dsa_fields_start 0x00000000
-static u32 A_dsa_fields_start_used[] __attribute((unused)) = {
-};
-
-#define A_dsa_msgin 0x00000058
-static u32 A_dsa_msgin_used[] __attribute((unused)) = {
- 0x0000019c,
-};
-
-#define A_dsa_msgout 0x00000040
-static u32 A_dsa_msgout_used[] __attribute((unused)) = {
- 0x00000089,
-};
-
-#define A_dsa_msgout_other 0x00000068
-static u32 A_dsa_msgout_other_used[] __attribute((unused)) = {
- 0x00000194,
-};
-
-#define A_dsa_next 0x00000030
-static u32 A_dsa_next_used[] __attribute((unused)) = {
- 0x00000061,
-};
-
-#define A_dsa_restore_pointers 0x00000000
-static u32 A_dsa_restore_pointers_used[] __attribute((unused)) = {
- 0x00000146,
-};
-
-#define A_dsa_save_data_pointer 0x00000000
-static u32 A_dsa_save_data_pointer_used[] __attribute((unused)) = {
- 0x00000131,
-};
-
-#define A_dsa_select 0x0000003c
-static u32 A_dsa_select_used[] __attribute((unused)) = {
- 0x00000082,
-};
-
-#define A_dsa_sscf_710 0x00000000
-static u32 A_dsa_sscf_710_used[] __attribute((unused)) = {
- 0x00000007,
-};
-
-#define A_dsa_status 0x00000060
-static u32 A_dsa_status_used[] __attribute((unused)) = {
- 0x00000198,
-};
-
-#define A_dsa_temp_addr_array_value 0x00000000
-static u32 A_dsa_temp_addr_array_value_used[] __attribute((unused)) = {
-};
-
-#define A_dsa_temp_addr_dsa_value 0x00000000
-static u32 A_dsa_temp_addr_dsa_value_used[] __attribute((unused)) = {
- 0x00000001,
-};
-
-#define A_dsa_temp_addr_new_value 0x00000000
-static u32 A_dsa_temp_addr_new_value_used[] __attribute((unused)) = {
-};
-
-#define A_dsa_temp_addr_next 0x00000000
-static u32 A_dsa_temp_addr_next_used[] __attribute((unused)) = {
- 0x0000001c,
- 0x0000004f,
-};
-
-#define A_dsa_temp_addr_residual 0x00000000
-static u32 A_dsa_temp_addr_residual_used[] __attribute((unused)) = {
- 0x0000002d,
- 0x0000003b,
-};
-
-#define A_dsa_temp_addr_saved_pointer 0x00000000
-static u32 A_dsa_temp_addr_saved_pointer_used[] __attribute((unused)) = {
- 0x0000002b,
- 0x00000037,
-};
-
-#define A_dsa_temp_addr_saved_residual 0x00000000
-static u32 A_dsa_temp_addr_saved_residual_used[] __attribute((unused)) = {
- 0x0000002e,
- 0x0000003a,
-};
-
-#define A_dsa_temp_lun 0x00000000
-static u32 A_dsa_temp_lun_used[] __attribute((unused)) = {
- 0x0000004c,
-};
-
-#define A_dsa_temp_next 0x00000000
-static u32 A_dsa_temp_next_used[] __attribute((unused)) = {
- 0x0000001f,
-};
-
-#define A_dsa_temp_sync 0x00000000
-static u32 A_dsa_temp_sync_used[] __attribute((unused)) = {
- 0x00000057,
-};
-
-#define A_dsa_temp_target 0x00000000
-static u32 A_dsa_temp_target_used[] __attribute((unused)) = {
- 0x00000045,
-};
-
-#define A_emulfly 0x00000000
-static u32 A_emulfly_used[] __attribute((unused)) = {
-};
-
-#define A_int_debug_break 0x03000000
-static u32 A_int_debug_break_used[] __attribute((unused)) = {
- 0x0000023c,
-};
-
-#define A_int_debug_panic 0x030b0000
-static u32 A_int_debug_panic_used[] __attribute((unused)) = {
- 0x00000209,
- 0x00000219,
-};
-
-#define A_int_err_check_condition 0x00030000
-static u32 A_int_err_check_condition_used[] __attribute((unused)) = {
- 0x000001a9,
-};
-
-#define A_int_err_no_phase 0x00040000
-static u32 A_int_err_no_phase_used[] __attribute((unused)) = {
-};
-
-#define A_int_err_selected 0x00010000
-static u32 A_int_err_selected_used[] __attribute((unused)) = {
- 0x000001ff,
-};
-
-#define A_int_err_unexpected_phase 0x00000000
-static u32 A_int_err_unexpected_phase_used[] __attribute((unused)) = {
- 0x00000092,
- 0x00000098,
- 0x000000a0,
- 0x000000d6,
- 0x000000da,
- 0x000000dc,
- 0x000000e4,
- 0x000000e8,
- 0x000000ea,
- 0x000000f2,
- 0x000000f6,
- 0x000000f8,
- 0x000000fa,
- 0x00000160,
-};
-
-#define A_int_err_unexpected_reselect 0x00020000
-static u32 A_int_err_unexpected_reselect_used[] __attribute((unused)) = {
- 0x000001cd,
-};
-
-#define A_int_msg_1 0x01020000
-static u32 A_int_msg_1_used[] __attribute((unused)) = {
- 0x00000114,
- 0x00000116,
-};
-
-#define A_int_msg_sdtr 0x01010000
-static u32 A_int_msg_sdtr_used[] __attribute((unused)) = {
- 0x00000180,
-};
-
-#define A_int_msg_wdtr 0x01000000
-static u32 A_int_msg_wdtr_used[] __attribute((unused)) = {
- 0x00000174,
-};
-
-#define A_int_norm_aborted 0x02040000
-static u32 A_int_norm_aborted_used[] __attribute((unused)) = {
- 0x00000270,
-};
-
-#define A_int_norm_command_complete 0x02020000
-static u32 A_int_norm_command_complete_used[] __attribute((unused)) = {
-};
-
-#define A_int_norm_disconnected 0x02030000
-static u32 A_int_norm_disconnected_used[] __attribute((unused)) = {
-};
-
-#define A_int_norm_emulateintfly 0x02060000
-static u32 A_int_norm_emulateintfly_used[] __attribute((unused)) = {
- 0x000001a2,
-};
-
-#define A_int_norm_reselect_complete 0x02010000
-static u32 A_int_norm_reselect_complete_used[] __attribute((unused)) = {
-};
-
-#define A_int_norm_reset 0x02050000
-static u32 A_int_norm_reset_used[] __attribute((unused)) = {
-};
-
-#define A_int_norm_select_complete 0x02000000
-static u32 A_int_norm_select_complete_used[] __attribute((unused)) = {
-};
-
-#define A_int_test_1 0x04000000
-static u32 A_int_test_1_used[] __attribute((unused)) = {
- 0x0000021e,
-};
-
-#define A_int_test_2 0x04010000
-static u32 A_int_test_2_used[] __attribute((unused)) = {
- 0x0000023a,
-};
-
-#define A_int_test_3 0x04020000
-static u32 A_int_test_3_used[] __attribute((unused)) = {
-};
-
-#define A_msg_buf 0x00000000
-static u32 A_msg_buf_used[] __attribute((unused)) = {
- 0x00000108,
- 0x00000162,
- 0x0000016c,
- 0x00000172,
- 0x00000178,
- 0x0000017e,
-};
-
-#define A_reconnect_dsa_head 0x00000000
-static u32 A_reconnect_dsa_head_used[] __attribute((unused)) = {
- 0x0000006d,
- 0x00000074,
- 0x000001b1,
-};
-
-#define A_reselected_identify 0x00000000
-static u32 A_reselected_identify_used[] __attribute((unused)) = {
- 0x00000048,
- 0x000001af,
-};
-
-#define A_reselected_tag 0x00000000
-static u32 A_reselected_tag_used[] __attribute((unused)) = {
-};
-
-#define A_saved_dsa 0x00000000
-static u32 A_saved_dsa_used[] __attribute((unused)) = {
- 0x00000005,
- 0x0000000e,
- 0x00000023,
- 0x00000025,
- 0x00000032,
- 0x0000003f,
- 0x00000054,
- 0x0000005f,
- 0x00000070,
- 0x00000078,
- 0x0000008d,
- 0x000000aa,
- 0x000000bb,
- 0x000000c0,
- 0x000000d1,
- 0x0000012f,
- 0x000001a4,
- 0x000001b5,
- 0x000001ba,
- 0x000001e2,
-};
-
-#define A_schedule 0x00000000
-static u32 A_schedule_used[] __attribute((unused)) = {
- 0x0000007d,
- 0x000001a7,
- 0x00000203,
- 0x00000244,
-};
-
-#define A_test_dest 0x00000000
-static u32 A_test_dest_used[] __attribute((unused)) = {
- 0x0000021c,
-};
-
-#define A_test_src 0x00000000
-static u32 A_test_src_used[] __attribute((unused)) = {
- 0x0000021b,
-};
-
-#define Ent_accept_message 0x00000624
-#define Ent_cmdout_cmdout 0x00000264
-#define Ent_command_complete 0x0000065c
-#define Ent_command_complete_msgin 0x0000066c
-#define Ent_data_transfer 0x0000026c
-#define Ent_datain_to_jump 0x00000334
-#define Ent_debug_break 0x000008ec
-#define Ent_dsa_code_begin 0x00000000
-#define Ent_dsa_code_check_reselect 0x0000010c
-#define Ent_dsa_code_fix_jump 0x00000058
-#define Ent_dsa_code_restore_pointers 0x000000d8
-#define Ent_dsa_code_save_data_pointer 0x000000a4
-#define Ent_dsa_code_template 0x00000000
-#define Ent_dsa_code_template_end 0x00000178
-#define Ent_dsa_schedule 0x00000178
-#define Ent_dsa_zero 0x00000178
-#define Ent_end_data_transfer 0x000002a4
-#define Ent_initiator_abort 0x00000914
-#define Ent_msg_in 0x0000041c
-#define Ent_msg_in_restart 0x000003fc
-#define Ent_other_in 0x0000038c
-#define Ent_other_out 0x00000354
-#define Ent_other_transfer 0x000003c4
-#define Ent_reject_message 0x00000604
-#define Ent_reselected_check_next 0x000006f0
-#define Ent_reselected_ok 0x00000798
-#define Ent_respond_message 0x0000063c
-#define Ent_select 0x000001f8
-#define Ent_select_msgout 0x00000218
-#define Ent_target_abort 0x000008f4
-#define Ent_test_1 0x00000868
-#define Ent_test_2 0x0000087c
-#define Ent_test_2_msgout 0x0000089c
-#define Ent_wait_reselect 0x000006a8
-static u32 LABELPATCHES[] __attribute((unused)) = {
- 0x00000011,
- 0x0000001a,
- 0x0000001d,
- 0x00000028,
- 0x0000002a,
- 0x00000035,
- 0x00000038,
- 0x00000042,
- 0x00000050,
- 0x00000052,
- 0x0000006b,
- 0x00000083,
- 0x00000085,
- 0x00000090,
- 0x00000094,
- 0x00000096,
- 0x0000009c,
- 0x0000009e,
- 0x000000a2,
- 0x000000a4,
- 0x000000a6,
- 0x000000a8,
- 0x000000b6,
- 0x000000b9,
- 0x000000cc,
- 0x000000cf,
- 0x000000d8,
- 0x000000de,
- 0x000000e0,
- 0x000000e6,
- 0x000000ec,
- 0x000000ee,
- 0x000000f4,
- 0x000000fc,
- 0x000000fe,
- 0x0000010a,
- 0x0000010c,
- 0x0000010e,
- 0x00000110,
- 0x00000112,
- 0x00000118,
- 0x0000011a,
- 0x0000012d,
- 0x00000143,
- 0x00000158,
- 0x0000015c,
- 0x00000164,
- 0x00000166,
- 0x00000168,
- 0x0000016e,
- 0x0000017a,
- 0x000001ab,
- 0x000001b8,
- 0x000001bf,
- 0x000001c3,
- 0x000001c7,
- 0x000001cb,
- 0x000001e0,
- 0x000001f8,
- 0x00000207,
- 0x0000020f,
- 0x00000213,
- 0x00000217,
- 0x00000224,
- 0x00000226,
- 0x00000248,
- 0x0000024a,
- 0x0000024c,
- 0x0000024e,
- 0x00000250,
- 0x00000252,
- 0x00000256,
- 0x0000025a,
- 0x0000025c,
- 0x00000260,
- 0x00000262,
- 0x00000266,
- 0x00000268,
-};
-
-static struct {
- u32 offset;
- void *address;
-} EXTERNAL_PATCHES[] __attribute((unused)) = {
-};
-
-static u32 INSTRUCTIONS __attribute((unused)) = 290;
-static u32 PATCHES __attribute((unused)) = 78;
-static u32 EXTERNAL_PATCHES_LEN __attribute((unused)) = 0;
diff --git a/drivers/scsi/53c7xx_u.h_shipped b/drivers/scsi/53c7xx_u.h_shipped
deleted file mode 100644
index 7b337174e22..00000000000
--- a/drivers/scsi/53c7xx_u.h_shipped
+++ /dev/null
@@ -1,102 +0,0 @@
-#undef A_NCR53c7xx_msg_abort
-#undef A_NCR53c7xx_msg_reject
-#undef A_NCR53c7xx_sink
-#undef A_NCR53c7xx_zero
-#undef A_NOP_insn
-#undef A_addr_dsa
-#undef A_addr_reconnect_dsa_head
-#undef A_addr_scratch
-#undef A_addr_temp
-#undef A_dmode_memory_to_memory
-#undef A_dmode_memory_to_ncr
-#undef A_dmode_ncr_to_memory
-#undef A_dsa_check_reselect
-#undef A_dsa_cmdout
-#undef A_dsa_cmnd
-#undef A_dsa_datain
-#undef A_dsa_dataout
-#undef A_dsa_end
-#undef A_dsa_fields_start
-#undef A_dsa_msgin
-#undef A_dsa_msgout
-#undef A_dsa_msgout_other
-#undef A_dsa_next
-#undef A_dsa_restore_pointers
-#undef A_dsa_save_data_pointer
-#undef A_dsa_select
-#undef A_dsa_sscf_710
-#undef A_dsa_status
-#undef A_dsa_temp_addr_array_value
-#undef A_dsa_temp_addr_dsa_value
-#undef A_dsa_temp_addr_new_value
-#undef A_dsa_temp_addr_next
-#undef A_dsa_temp_addr_residual
-#undef A_dsa_temp_addr_saved_pointer
-#undef A_dsa_temp_addr_saved_residual
-#undef A_dsa_temp_lun
-#undef A_dsa_temp_next
-#undef A_dsa_temp_sync
-#undef A_dsa_temp_target
-#undef A_emulfly
-#undef A_int_debug_break
-#undef A_int_debug_panic
-#undef A_int_err_check_condition
-#undef A_int_err_no_phase
-#undef A_int_err_selected
-#undef A_int_err_unexpected_phase
-#undef A_int_err_unexpected_reselect
-#undef A_int_msg_1
-#undef A_int_msg_sdtr
-#undef A_int_msg_wdtr
-#undef A_int_norm_aborted
-#undef A_int_norm_command_complete
-#undef A_int_norm_disconnected
-#undef A_int_norm_emulateintfly
-#undef A_int_norm_reselect_complete
-#undef A_int_norm_reset
-#undef A_int_norm_select_complete
-#undef A_int_test_1
-#undef A_int_test_2
-#undef A_int_test_3
-#undef A_msg_buf
-#undef A_reconnect_dsa_head
-#undef A_reselected_identify
-#undef A_reselected_tag
-#undef A_saved_dsa
-#undef A_schedule
-#undef A_test_dest
-#undef A_test_src
-#undef Ent_accept_message
-#undef Ent_cmdout_cmdout
-#undef Ent_command_complete
-#undef Ent_command_complete_msgin
-#undef Ent_data_transfer
-#undef Ent_datain_to_jump
-#undef Ent_debug_break
-#undef Ent_dsa_code_begin
-#undef Ent_dsa_code_check_reselect
-#undef Ent_dsa_code_fix_jump
-#undef Ent_dsa_code_restore_pointers
-#undef Ent_dsa_code_save_data_pointer
-#undef Ent_dsa_code_template
-#undef Ent_dsa_code_template_end
-#undef Ent_dsa_schedule
-#undef Ent_dsa_zero
-#undef Ent_end_data_transfer
-#undef Ent_initiator_abort
-#undef Ent_msg_in
-#undef Ent_msg_in_restart
-#undef Ent_other_in
-#undef Ent_other_out
-#undef Ent_other_transfer
-#undef Ent_reject_message
-#undef Ent_reselected_check_next
-#undef Ent_reselected_ok
-#undef Ent_respond_message
-#undef Ent_select
-#undef Ent_select_msgout
-#undef Ent_target_abort
-#undef Ent_test_1
-#undef Ent_test_2
-#undef Ent_test_2_msgout
-#undef Ent_wait_reselect
diff --git a/drivers/scsi/BusLogic.c b/drivers/scsi/BusLogic.c
index 96f4cab0761..9b206176f71 100644
--- a/drivers/scsi/BusLogic.c
+++ b/drivers/scsi/BusLogic.c
@@ -304,18 +304,10 @@ static struct BusLogic_CCB *BusLogic_AllocateCCB(struct BusLogic_HostAdapter
static void BusLogic_DeallocateCCB(struct BusLogic_CCB *CCB)
{
struct BusLogic_HostAdapter *HostAdapter = CCB->HostAdapter;
- struct scsi_cmnd *cmd = CCB->Command;
- if (cmd->use_sg != 0) {
- pci_unmap_sg(HostAdapter->PCI_Device,
- (struct scatterlist *)cmd->request_buffer,
- cmd->use_sg, cmd->sc_data_direction);
- } else if (cmd->request_bufflen != 0) {
- pci_unmap_single(HostAdapter->PCI_Device, CCB->DataPointer,
- CCB->DataLength, cmd->sc_data_direction);
- }
+ scsi_dma_unmap(CCB->Command);
pci_unmap_single(HostAdapter->PCI_Device, CCB->SenseDataPointer,
- CCB->SenseDataLength, PCI_DMA_FROMDEVICE);
+ CCB->SenseDataLength, PCI_DMA_FROMDEVICE);
CCB->Command = NULL;
CCB->Status = BusLogic_CCB_Free;
@@ -2648,7 +2640,8 @@ static void BusLogic_ProcessCompletedCCBs(struct BusLogic_HostAdapter *HostAdapt
*/
if (CCB->CDB[0] == INQUIRY && CCB->CDB[1] == 0 && CCB->HostAdapterStatus == BusLogic_CommandCompletedNormally) {
struct BusLogic_TargetFlags *TargetFlags = &HostAdapter->TargetFlags[CCB->TargetID];
- struct SCSI_Inquiry *InquiryResult = (struct SCSI_Inquiry *) Command->request_buffer;
+ struct SCSI_Inquiry *InquiryResult =
+ (struct SCSI_Inquiry *) scsi_sglist(Command);
TargetFlags->TargetExists = true;
TargetFlags->TaggedQueuingSupported = InquiryResult->CmdQue;
TargetFlags->WideTransfersSupported = InquiryResult->WBus16;
@@ -2819,9 +2812,8 @@ static int BusLogic_QueueCommand(struct scsi_cmnd *Command, void (*CompletionRou
int CDB_Length = Command->cmd_len;
int TargetID = Command->device->id;
int LogicalUnit = Command->device->lun;
- void *BufferPointer = Command->request_buffer;
- int BufferLength = Command->request_bufflen;
- int SegmentCount = Command->use_sg;
+ int BufferLength = scsi_bufflen(Command);
+ int Count;
struct BusLogic_CCB *CCB;
/*
SCSI REQUEST_SENSE commands will be executed automatically by the Host
@@ -2851,36 +2843,35 @@ static int BusLogic_QueueCommand(struct scsi_cmnd *Command, void (*CompletionRou
return 0;
}
}
+
/*
Initialize the fields in the BusLogic Command Control Block (CCB).
*/
- if (SegmentCount == 0 && BufferLength != 0) {
- CCB->Opcode = BusLogic_InitiatorCCB;
- CCB->DataLength = BufferLength;
- CCB->DataPointer = pci_map_single(HostAdapter->PCI_Device,
- BufferPointer, BufferLength,
- Command->sc_data_direction);
- } else if (SegmentCount != 0) {
- struct scatterlist *ScatterList = (struct scatterlist *) BufferPointer;
- int Segment, Count;
-
- Count = pci_map_sg(HostAdapter->PCI_Device, ScatterList, SegmentCount,
- Command->sc_data_direction);
+ Count = scsi_dma_map(Command);
+ BUG_ON(Count < 0);
+ if (Count) {
+ struct scatterlist *sg;
+ int i;
+
CCB->Opcode = BusLogic_InitiatorCCB_ScatterGather;
CCB->DataLength = Count * sizeof(struct BusLogic_ScatterGatherSegment);
if (BusLogic_MultiMasterHostAdapterP(HostAdapter))
CCB->DataPointer = (unsigned int) CCB->DMA_Handle + ((unsigned long) &CCB->ScatterGatherList - (unsigned long) CCB);
else
CCB->DataPointer = Virtual_to_32Bit_Virtual(CCB->ScatterGatherList);
- for (Segment = 0; Segment < Count; Segment++) {
- CCB->ScatterGatherList[Segment].SegmentByteCount = sg_dma_len(ScatterList + Segment);
- CCB->ScatterGatherList[Segment].SegmentDataPointer = sg_dma_address(ScatterList + Segment);
+
+ scsi_for_each_sg(Command, sg, Count, i) {
+ CCB->ScatterGatherList[i].SegmentByteCount =
+ sg_dma_len(sg);
+ CCB->ScatterGatherList[i].SegmentDataPointer =
+ sg_dma_address(sg);
}
- } else {
+ } else if (!Count) {
CCB->Opcode = BusLogic_InitiatorCCB;
CCB->DataLength = BufferLength;
CCB->DataPointer = 0;
}
+
switch (CDB[0]) {
case READ_6:
case READ_10:
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
index eb46cb0e3cb..9d2119b53ac 100644
--- a/drivers/scsi/Kconfig
+++ b/drivers/scsi/Kconfig
@@ -10,6 +10,7 @@ config RAID_ATTRS
config SCSI
tristate "SCSI device support"
depends on BLOCK
+ select SCSI_DMA if HAS_DMA
---help---
If you want to use a SCSI hard disk, SCSI tape drive, SCSI CD-ROM or
any other SCSI device under Linux, say Y and make sure that you know
@@ -29,6 +30,10 @@ config SCSI
However, do not compile this as a module if your root file system
(the one containing the directory /) is located on a SCSI device.
+config SCSI_DMA
+ bool
+ default n
+
config SCSI_TGT
tristate "SCSI target support"
depends on SCSI && EXPERIMENTAL
@@ -739,7 +744,7 @@ config SCSI_GENERIC_NCR53C400
config SCSI_IBMMCA
tristate "IBMMCA SCSI support"
- depends on MCA_LEGACY && SCSI
+ depends on MCA && SCSI
---help---
This is support for the IBM SCSI adapter found in many of the PS/2
series computers. These machines have an MCA bus, so you need to
@@ -1007,6 +1012,11 @@ config SCSI_STEX
To compile this driver as a module, choose M here: the
module will be called stex.
+config 53C700_BE_BUS
+ bool
+ depends on SCSI_A4000T || SCSI_ZORRO7XX || MVME16x_SCSI || BVME6000_SCSI
+ default y
+
config SCSI_SYM53C8XX_2
tristate "SYM53C8XX Version 2 SCSI support"
depends on PCI && SCSI
@@ -1611,13 +1621,25 @@ config FASTLANE_SCSI
If you have the Phase5 Fastlane Z3 SCSI controller, or plan to use
one in the near future, say Y to this question. Otherwise, say N.
-config SCSI_AMIGA7XX
- bool "Amiga NCR53c710 SCSI support (EXPERIMENTAL)"
- depends on AMIGA && SCSI && EXPERIMENTAL && BROKEN
+config SCSI_A4000T
+ tristate "A4000T NCR53c710 SCSI support (EXPERIMENTAL)"
+ depends on AMIGA && SCSI && EXPERIMENTAL
+ select SCSI_SPI_ATTRS
help
- Support for various NCR53c710-based SCSI controllers on the Amiga.
+ If you have an Amiga 4000T and have SCSI devices connected to the
+ built-in SCSI controller, say Y. Otherwise, say N.
+
+ To compile this driver as a module, choose M here: the
+ module will be called a4000t.
+
+config SCSI_ZORRO7XX
+ tristate "Zorro NCR53c710 SCSI support (EXPERIMENTAL)"
+ depends on ZORRO && SCSI && EXPERIMENTAL
+ select SCSI_SPI_ATTRS
+ help
+ Support for various NCR53c710-based SCSI controllers on Zorro
+ expansion boards for the Amiga.
This includes:
- - the builtin SCSI controller on the Amiga 4000T,
- the Amiga 4091 Zorro III SCSI-2 controller,
- the MacroSystem Development's WarpEngine Amiga SCSI-2 controller
(info at
@@ -1625,10 +1647,6 @@ config SCSI_AMIGA7XX
- the SCSI controller on the Phase5 Blizzard PowerUP 603e+
accelerator card for the Amiga 1200,
- the SCSI controller on the GVP Turbo 040/060 accelerator.
- Note that all of the above SCSI controllers, except for the builtin
- SCSI controller on the Amiga 4000T, reside on the Zorro expansion
- bus, so you also have to enable Zorro bus support if you want to use
- them.
config OKTAGON_SCSI
tristate "BSC Oktagon SCSI support (EXPERIMENTAL)"
@@ -1712,8 +1730,8 @@ config MVME147_SCSI
single-board computer.
config MVME16x_SCSI
- bool "NCR53C710 SCSI driver for MVME16x"
- depends on MVME16x && SCSI && BROKEN
+ tristate "NCR53C710 SCSI driver for MVME16x"
+ depends on MVME16x && SCSI
select SCSI_SPI_ATTRS
help
The Motorola MVME162, 166, 167, 172 and 177 boards use the NCR53C710
@@ -1721,22 +1739,14 @@ config MVME16x_SCSI
will want to say Y to this question.
config BVME6000_SCSI
- bool "NCR53C710 SCSI driver for BVME6000"
- depends on BVME6000 && SCSI && BROKEN
+ tristate "NCR53C710 SCSI driver for BVME6000"
+ depends on BVME6000 && SCSI
select SCSI_SPI_ATTRS
help
The BVME4000 and BVME6000 boards from BVM Ltd use the NCR53C710
SCSI controller chip. Almost everyone using one of these boards
will want to say Y to this question.
-config SCSI_NCR53C7xx_FAST
- bool "allow FAST-SCSI [10MHz]"
- depends on SCSI_AMIGA7XX || MVME16x_SCSI || BVME6000_SCSI
- help
- This will enable 10MHz FAST-SCSI transfers with your host
- adapter. Some systems have problems with that speed, so it's safest
- to say N here.
-
config SUN3_SCSI
tristate "Sun3 NCR5380 SCSI"
depends on SUN3 && SCSI
@@ -1766,8 +1776,6 @@ config SCSI_SUNESP
To compile this driver as a module, choose M here: the
module will be called esp.
-# bool 'Cyberstorm Mk III SCSI support (EXPERIMENTAL)' CONFIG_CYBERSTORMIII_SCSI
-
config ZFCP
tristate "FCP host bus adapter driver for IBM eServer zSeries"
depends on S390 && QDIO && SCSI
diff --git a/drivers/scsi/Makefile b/drivers/scsi/Makefile
index b1b63279158..0f868955715 100644
--- a/drivers/scsi/Makefile
+++ b/drivers/scsi/Makefile
@@ -37,7 +37,8 @@ obj-$(CONFIG_SCSI_SAS_LIBSAS) += libsas/
obj-$(CONFIG_ISCSI_TCP) += libiscsi.o iscsi_tcp.o
obj-$(CONFIG_INFINIBAND_ISER) += libiscsi.o
-obj-$(CONFIG_SCSI_AMIGA7XX) += amiga7xx.o 53c7xx.o
+obj-$(CONFIG_SCSI_A4000T) += 53c700.o a4000t.o
+obj-$(CONFIG_SCSI_ZORRO7XX) += 53c700.o zorro7xx.o
obj-$(CONFIG_A3000_SCSI) += a3000.o wd33c93.o
obj-$(CONFIG_A2091_SCSI) += a2091.o wd33c93.o
obj-$(CONFIG_GVP11_SCSI) += gvp11.o wd33c93.o
@@ -53,8 +54,8 @@ obj-$(CONFIG_ATARI_SCSI) += atari_scsi.o
obj-$(CONFIG_MAC_SCSI) += mac_scsi.o
obj-$(CONFIG_SCSI_MAC_ESP) += mac_esp.o NCR53C9x.o
obj-$(CONFIG_SUN3_SCSI) += sun3_scsi.o sun3_scsi_vme.o
-obj-$(CONFIG_MVME16x_SCSI) += mvme16x.o 53c7xx.o
-obj-$(CONFIG_BVME6000_SCSI) += bvme6000.o 53c7xx.o
+obj-$(CONFIG_MVME16x_SCSI) += 53c700.o mvme16x_scsi.o
+obj-$(CONFIG_BVME6000_SCSI) += 53c700.o bvme6000_scsi.o
obj-$(CONFIG_SCSI_SIM710) += 53c700.o sim710.o
obj-$(CONFIG_SCSI_ADVANSYS) += advansys.o
obj-$(CONFIG_SCSI_PSI240I) += psi240i.o
@@ -89,7 +90,6 @@ obj-$(CONFIG_SCSI_QLA_ISCSI) += qla4xxx/
obj-$(CONFIG_SCSI_LPFC) += lpfc/
obj-$(CONFIG_SCSI_PAS16) += pas16.o
obj-$(CONFIG_SCSI_SEAGATE) += seagate.o
-obj-$(CONFIG_SCSI_FD_8xx) += seagate.o
obj-$(CONFIG_SCSI_T128) += t128.o
obj-$(CONFIG_SCSI_DMX3191D) += dmx3191d.o
obj-$(CONFIG_SCSI_DTC3280) += dtc.o
@@ -148,9 +148,9 @@ obj-$(CONFIG_SCSI_DEBUG) += scsi_debug.o
obj-$(CONFIG_SCSI_WAIT_SCAN) += scsi_wait_scan.o
scsi_mod-y += scsi.o hosts.o scsi_ioctl.o constants.o \
- scsicam.o scsi_error.o scsi_lib.o \
- scsi_scan.o scsi_sysfs.o \
- scsi_devinfo.o
+ scsicam.o scsi_error.o scsi_lib.o
+scsi_mod-$(CONFIG_SCSI_DMA) += scsi_lib_dma.o
+scsi_mod-y += scsi_scan.o scsi_sysfs.o scsi_devinfo.o
scsi_mod-$(CONFIG_SCSI_NETLINK) += scsi_netlink.o
scsi_mod-$(CONFIG_SYSCTL) += scsi_sysctl.o
scsi_mod-$(CONFIG_SCSI_PROC_FS) += scsi_proc.o
@@ -168,10 +168,8 @@ NCR_Q720_mod-objs := NCR_Q720.o ncr53c8xx.o
oktagon_esp_mod-objs := oktagon_esp.o oktagon_io.o
# Files generated that shall be removed upon make clean
-clean-files := 53c7xx_d.h 53c700_d.h \
- 53c7xx_u.h 53c700_u.h
+clean-files := 53c700_d.h 53c700_u.h
-$(obj)/53c7xx.o: $(obj)/53c7xx_d.h $(obj)/53c7xx_u.h
$(obj)/53c700.o $(MODVERDIR)/$(obj)/53c700.ver: $(obj)/53c700_d.h
# If you want to play with the firmware, uncomment
@@ -179,11 +177,6 @@ $(obj)/53c700.o $(MODVERDIR)/$(obj)/53c700.ver: $(obj)/53c700_d.h
ifdef GENERATE_FIRMWARE
-$(obj)/53c7xx_d.h: $(src)/53c7xx.scr $(src)/script_asm.pl
- $(CPP) -traditional -DCHIP=710 - < $< | grep -v '^#' | $(PERL) -s $(src)/script_asm.pl -ncr7x0_family $@ $(@:_d.h=_u.h)
-
-$(obj)/53c7xx_u.h: $(obj)/53c7xx_d.h
-
$(obj)/53c700_d.h: $(src)/53c700.scr $(src)/script_asm.pl
$(PERL) -s $(src)/script_asm.pl -ncr7x0_family $@ $(@:_d.h=_u.h) < $<
diff --git a/drivers/scsi/NCR5380.c b/drivers/scsi/NCR5380.c
index 88ea5a1fb60..f8e449a98d2 100644
--- a/drivers/scsi/NCR5380.c
+++ b/drivers/scsi/NCR5380.c
@@ -347,7 +347,7 @@ static int NCR5380_poll_politely(struct Scsi_Host *instance, int reg, int bit, i
if((r & bit) == val)
return 0;
if(!in_interrupt())
- yield();
+ cond_resched();
else
cpu_relax();
}
@@ -357,7 +357,7 @@ static int NCR5380_poll_politely(struct Scsi_Host *instance, int reg, int bit, i
static struct {
unsigned char value;
const char *name;
-} phases[] = {
+} phases[] __maybe_unused = {
{PHASE_DATAOUT, "DATAOUT"},
{PHASE_DATAIN, "DATAIN"},
{PHASE_CMDOUT, "CMDOUT"},
@@ -575,7 +575,8 @@ static irqreturn_t __init probe_intr(int irq, void *dev_id)
* Locks: none, irqs must be enabled on entry
*/
-static int __init NCR5380_probe_irq(struct Scsi_Host *instance, int possible)
+static int __init __maybe_unused NCR5380_probe_irq(struct Scsi_Host *instance,
+ int possible)
{
NCR5380_local_declare();
struct NCR5380_hostdata *hostdata = (struct NCR5380_hostdata *) instance->hostdata;
@@ -629,7 +630,8 @@ static int __init NCR5380_probe_irq(struct Scsi_Host *instance, int possible)
* Locks: none
*/
-static void __init NCR5380_print_options(struct Scsi_Host *instance)
+static void __init __maybe_unused
+NCR5380_print_options(struct Scsi_Host *instance)
{
printk(" generic options"
#ifdef AUTOPROBE_IRQ
@@ -703,8 +705,8 @@ char *lprint_command(unsigned char *cmd, char *pos, char *buffer, int len);
static
char *lprint_opcode(int opcode, char *pos, char *buffer, int length);
-static
-int NCR5380_proc_info(struct Scsi_Host *instance, char *buffer, char **start, off_t offset, int length, int inout)
+static int __maybe_unused NCR5380_proc_info(struct Scsi_Host *instance,
+ char *buffer, char **start, off_t offset, int length, int inout)
{
char *pos = buffer;
struct NCR5380_hostdata *hostdata;
diff --git a/drivers/scsi/NCR5380.h b/drivers/scsi/NCR5380.h
index 713a108c02e..bccf13f7153 100644
--- a/drivers/scsi/NCR5380.h
+++ b/drivers/scsi/NCR5380.h
@@ -299,7 +299,7 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance);
static irqreturn_t NCR5380_intr(int irq, void *dev_id);
#endif
static void NCR5380_main(struct work_struct *work);
-static void NCR5380_print_options(struct Scsi_Host *instance);
+static void __maybe_unused NCR5380_print_options(struct Scsi_Host *instance);
#ifdef NDEBUG
static void NCR5380_print_phase(struct Scsi_Host *instance);
static void NCR5380_print(struct Scsi_Host *instance);
@@ -307,8 +307,8 @@ static void NCR5380_print(struct Scsi_Host *instance);
static int NCR5380_abort(Scsi_Cmnd * cmd);
static int NCR5380_bus_reset(Scsi_Cmnd * cmd);
static int NCR5380_queue_command(Scsi_Cmnd * cmd, void (*done) (Scsi_Cmnd *));
-static int NCR5380_proc_info(struct Scsi_Host *instance, char *buffer, char **start,
-off_t offset, int length, int inout);
+static int __maybe_unused NCR5380_proc_info(struct Scsi_Host *instance,
+ char *buffer, char **start, off_t offset, int length, int inout);
static void NCR5380_reselect(struct Scsi_Host *instance);
static int NCR5380_select(struct Scsi_Host *instance, Scsi_Cmnd * cmd, int tag);
diff --git a/drivers/scsi/NCR53c406a.c b/drivers/scsi/NCR53c406a.c
index 7c0b17f8690..eda8c48f6be 100644
--- a/drivers/scsi/NCR53c406a.c
+++ b/drivers/scsi/NCR53c406a.c
@@ -698,7 +698,7 @@ static int NCR53c406a_queue(Scsi_Cmnd * SCpnt, void (*done) (Scsi_Cmnd *))
int i;
VDEB(printk("NCR53c406a_queue called\n"));
- DEB(printk("cmd=%02x, cmd_len=%02x, target=%02x, lun=%02x, bufflen=%d\n", SCpnt->cmnd[0], SCpnt->cmd_len, SCpnt->target, SCpnt->lun, SCpnt->request_bufflen));
+ DEB(printk("cmd=%02x, cmd_len=%02x, target=%02x, lun=%02x, bufflen=%d\n", SCpnt->cmnd[0], SCpnt->cmd_len, SCpnt->target, SCpnt->lun, scsi_bufflen(SCpnt)));
#if 0
VDEB(for (i = 0; i < SCpnt->cmd_len; i++)
@@ -785,8 +785,8 @@ static void NCR53c406a_intr(void *dev_id)
unsigned char status, int_reg;
#if USE_PIO
unsigned char pio_status;
- struct scatterlist *sglist;
- unsigned int sgcount;
+ struct scatterlist *sg;
+ int i;
#endif
VDEB(printk("NCR53c406a_intr called\n"));
@@ -866,22 +866,18 @@ static void NCR53c406a_intr(void *dev_id)
current_SC->SCp.phase = data_out;
VDEB(printk("NCR53c406a: Data-Out phase\n"));
outb(FLUSH_FIFO, CMD_REG);
- LOAD_DMA_COUNT(current_SC->request_bufflen); /* Max transfer size */
+ LOAD_DMA_COUNT(scsi_bufflen(current_SC)); /* Max transfer size */
#if USE_DMA /* No s/g support for DMA */
- NCR53c406a_dma_write(current_SC->request_buffer, current_SC->request_bufflen);
+ NCR53c406a_dma_write(scsi_sglist(current_SC),
+ scsdi_bufflen(current_SC));
+
#endif /* USE_DMA */
outb(TRANSFER_INFO | DMA_OP, CMD_REG);
#if USE_PIO
- if (!current_SC->use_sg) /* Don't use scatter-gather */
- NCR53c406a_pio_write(current_SC->request_buffer, current_SC->request_bufflen);
- else { /* use scatter-gather */
- sgcount = current_SC->use_sg;
- sglist = current_SC->request_buffer;
- while (sgcount--) {
- NCR53c406a_pio_write(page_address(sglist->page) + sglist->offset, sglist->length);
- sglist++;
- }
- }
+ scsi_for_each_sg(current_SC, sg, scsi_sg_count(current_SC), i) {
+ NCR53c406a_pio_write(page_address(sg->page) + sg->offset,
+ sg->length);
+ }
REG0;
#endif /* USE_PIO */
}
@@ -893,22 +889,17 @@ static void NCR53c406a_intr(void *dev_id)
current_SC->SCp.phase = data_in;
VDEB(printk("NCR53c406a: Data-In phase\n"));
outb(FLUSH_FIFO, CMD_REG);
- LOAD_DMA_COUNT(current_SC->request_bufflen); /* Max transfer size */
+ LOAD_DMA_COUNT(scsi_bufflen(current_SC)); /* Max transfer size */
#if USE_DMA /* No s/g support for DMA */
- NCR53c406a_dma_read(current_SC->request_buffer, current_SC->request_bufflen);
+ NCR53c406a_dma_read(scsi_sglist(current_SC),
+ scsdi_bufflen(current_SC));
#endif /* USE_DMA */
outb(TRANSFER_INFO | DMA_OP, CMD_REG);
#if USE_PIO
- if (!current_SC->use_sg) /* Don't use scatter-gather */
- NCR53c406a_pio_read(current_SC->request_buffer, current_SC->request_bufflen);
- else { /* Use scatter-gather */
- sgcount = current_SC->use_sg;
- sglist = current_SC->request_buffer;
- while (sgcount--) {
- NCR53c406a_pio_read(page_address(sglist->page) + sglist->offset, sglist->length);
- sglist++;
- }
- }
+ scsi_for_each_sg(current_SC, sg, scsi_sg_count(current_SC), i) {
+ NCR53c406a_pio_read(page_address(sg->page) + sg->offset,
+ sg->length);
+ }
REG0;
#endif /* USE_PIO */
}
diff --git a/drivers/scsi/a100u2w.c b/drivers/scsi/a100u2w.c
index 7f4241bfb9c..f608d4a1d6d 100644
--- a/drivers/scsi/a100u2w.c
+++ b/drivers/scsi/a100u2w.c
@@ -19,27 +19,6 @@
* along with this program; see the file COPYING. If not, write to
* the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
*
- * --------------------------------------------------------------------------
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions, and the following disclaimer,
- * without modification, immediately at the beginning of the file.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. The name of the author may not be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- * Where this Software is combined with software released under the terms of
- * the GNU General Public License ("GPL") and the terms of the GPL would require the
- * combined work to also be released under the terms of the GPL, the terms
- * and conditions of this License will apply in addition to those of the
- * GPL with the exception of any terms or conditions of this License that
- * conflict with, or are expressly prohibited by, the GPL.
- *
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
@@ -75,6 +54,8 @@
* 9/28/04 Christoph Hellwig <hch@lst.de>
* - merge the two source files
* - remove internal queueing code
+ * 14/06/07 Alan Cox <alan@redhat.com>
+ * - Grand cleanup and Linuxisation
*/
#include <linux/module.h>
@@ -102,14 +83,12 @@
#include "a100u2w.h"
-#define JIFFIES_TO_MS(t) ((t) * 1000 / HZ)
-#define MS_TO_JIFFIES(j) ((j * HZ) / 1000)
+static struct orc_scb *__orc_alloc_scb(struct orc_host * host);
+static void inia100_scb_handler(struct orc_host *host, struct orc_scb *scb);
-static ORC_SCB *orc_alloc_scb(ORC_HCS * hcsp);
-static void inia100SCBPost(BYTE * pHcb, BYTE * pScb);
+static struct orc_nvram nvram, *nvramp = &nvram;
-static NVRAM nvram, *nvramp = &nvram;
-static UCHAR dftNvRam[64] =
+static u8 default_nvram[64] =
{
/*----------header -------------*/
0x01, /* 0x00: Sub System Vendor ID 0 */
@@ -158,823 +137,882 @@ static UCHAR dftNvRam[64] =
};
-/***************************************************************************/
-static void waitForPause(unsigned amount)
-{
- ULONG the_time = jiffies + MS_TO_JIFFIES(amount);
- while (time_before_eq(jiffies, the_time))
- cpu_relax();
-}
-
-/***************************************************************************/
-static UCHAR waitChipReady(ORC_HCS * hcsp)
+static u8 wait_chip_ready(struct orc_host * host)
{
int i;
for (i = 0; i < 10; i++) { /* Wait 1 second for report timeout */
- if (ORC_RD(hcsp->HCS_Base, ORC_HCTRL) & HOSTSTOP) /* Wait HOSTSTOP set */
+ if (inb(host->base + ORC_HCTRL) & HOSTSTOP) /* Wait HOSTSTOP set */
return 1;
- waitForPause(100); /* wait 100ms before try again */
+ mdelay(100);
}
return 0;
}
-/***************************************************************************/
-static UCHAR waitFWReady(ORC_HCS * hcsp)
+static u8 wait_firmware_ready(struct orc_host * host)
{
int i;
for (i = 0; i < 10; i++) { /* Wait 1 second for report timeout */
- if (ORC_RD(hcsp->HCS_Base, ORC_HSTUS) & RREADY) /* Wait READY set */
+ if (inb(host->base + ORC_HSTUS) & RREADY) /* Wait READY set */
return 1;
- waitForPause(100); /* wait 100ms before try again */
+ mdelay(100); /* wait 100ms before try again */
}
return 0;
}
/***************************************************************************/
-static UCHAR waitSCSIRSTdone(ORC_HCS * hcsp)
+static u8 wait_scsi_reset_done(struct orc_host * host)
{
int i;
for (i = 0; i < 10; i++) { /* Wait 1 second for report timeout */
- if (!(ORC_RD(hcsp->HCS_Base, ORC_HCTRL) & SCSIRST)) /* Wait SCSIRST done */
+ if (!(inb(host->base + ORC_HCTRL) & SCSIRST)) /* Wait SCSIRST done */
return 1;
- waitForPause(100); /* wait 100ms before try again */
+ mdelay(100); /* wait 100ms before try again */
}
return 0;
}
/***************************************************************************/
-static UCHAR waitHDOoff(ORC_HCS * hcsp)
+static u8 wait_HDO_off(struct orc_host * host)
{
int i;
for (i = 0; i < 10; i++) { /* Wait 1 second for report timeout */
- if (!(ORC_RD(hcsp->HCS_Base, ORC_HCTRL) & HDO)) /* Wait HDO off */
+ if (!(inb(host->base + ORC_HCTRL) & HDO)) /* Wait HDO off */
return 1;
- waitForPause(100); /* wait 100ms before try again */
+ mdelay(100); /* wait 100ms before try again */
}
return 0;
}
/***************************************************************************/
-static UCHAR waitHDIset(ORC_HCS * hcsp, UCHAR * pData)
+static u8 wait_hdi_set(struct orc_host * host, u8 * data)
{
int i;
for (i = 0; i < 10; i++) { /* Wait 1 second for report timeout */
- if ((*pData = ORC_RD(hcsp->HCS_Base, ORC_HSTUS)) & HDI)
+ if ((*data = inb(host->base + ORC_HSTUS)) & HDI)
return 1; /* Wait HDI set */
- waitForPause(100); /* wait 100ms before try again */
+ mdelay(100); /* wait 100ms before try again */
}
return 0;
}
/***************************************************************************/
-static unsigned short get_FW_version(ORC_HCS * hcsp)
+static unsigned short orc_read_fwrev(struct orc_host * host)
{
- UCHAR bData;
- union {
- unsigned short sVersion;
- unsigned char cVersion[2];
- } Version;
-
- ORC_WR(hcsp->HCS_Base + ORC_HDATA, ORC_CMD_VERSION);
- ORC_WR(hcsp->HCS_Base + ORC_HCTRL, HDO);
- if (waitHDOoff(hcsp) == 0) /* Wait HDO off */
+ u16 version;
+ u8 data;
+
+ outb(ORC_CMD_VERSION, host->base + ORC_HDATA);
+ outb(HDO, host->base + ORC_HCTRL);
+ if (wait_HDO_off(host) == 0) /* Wait HDO off */
return 0;
- if (waitHDIset(hcsp, &bData) == 0) /* Wait HDI set */
+ if (wait_hdi_set(host, &data) == 0) /* Wait HDI set */
return 0;
- Version.cVersion[0] = ORC_RD(hcsp->HCS_Base, ORC_HDATA);
- ORC_WR(hcsp->HCS_Base + ORC_HSTUS, bData); /* Clear HDI */
+ version = inb(host->base + ORC_HDATA);
+ outb(data, host->base + ORC_HSTUS); /* Clear HDI */
- if (waitHDIset(hcsp, &bData) == 0) /* Wait HDI set */
+ if (wait_hdi_set(host, &data) == 0) /* Wait HDI set */
return 0;
- Version.cVersion[1] = ORC_RD(hcsp->HCS_Base, ORC_HDATA);
- ORC_WR(hcsp->HCS_Base + ORC_HSTUS, bData); /* Clear HDI */
+ version |= inb(host->base + ORC_HDATA) << 8;
+ outb(data, host->base + ORC_HSTUS); /* Clear HDI */
- return (Version.sVersion);
+ return version;
}
/***************************************************************************/
-static UCHAR set_NVRAM(ORC_HCS * hcsp, unsigned char address, unsigned char value)
+static u8 orc_nv_write(struct orc_host * host, unsigned char address, unsigned char value)
{
- ORC_WR(hcsp->HCS_Base + ORC_HDATA, ORC_CMD_SET_NVM); /* Write command */
- ORC_WR(hcsp->HCS_Base + ORC_HCTRL, HDO);
- if (waitHDOoff(hcsp) == 0) /* Wait HDO off */
+ outb(ORC_CMD_SET_NVM, host->base + ORC_HDATA); /* Write command */
+ outb(HDO, host->base + ORC_HCTRL);
+ if (wait_HDO_off(host) == 0) /* Wait HDO off */
return 0;
- ORC_WR(hcsp->HCS_Base + ORC_HDATA, address); /* Write address */
- ORC_WR(hcsp->HCS_Base + ORC_HCTRL, HDO);
- if (waitHDOoff(hcsp) == 0) /* Wait HDO off */
+ outb(address, host->base + ORC_HDATA); /* Write address */
+ outb(HDO, host->base + ORC_HCTRL);
+ if (wait_HDO_off(host) == 0) /* Wait HDO off */
return 0;
- ORC_WR(hcsp->HCS_Base + ORC_HDATA, value); /* Write value */
- ORC_WR(hcsp->HCS_Base + ORC_HCTRL, HDO);
- if (waitHDOoff(hcsp) == 0) /* Wait HDO off */
+ outb(value, host->base + ORC_HDATA); /* Write value */
+ outb(HDO, host->base + ORC_HCTRL);
+ if (wait_HDO_off(host) == 0) /* Wait HDO off */
return 0;
return 1;
}
/***************************************************************************/
-static UCHAR get_NVRAM(ORC_HCS * hcsp, unsigned char address, unsigned char *pDataIn)
+static u8 orc_nv_read(struct orc_host * host, u8 address, u8 *ptr)
{
- unsigned char bData;
+ unsigned char data;
- ORC_WR(hcsp->HCS_Base + ORC_HDATA, ORC_CMD_GET_NVM); /* Write command */
- ORC_WR(hcsp->HCS_Base + ORC_HCTRL, HDO);
- if (waitHDOoff(hcsp) == 0) /* Wait HDO off */
+ outb(ORC_CMD_GET_NVM, host->base + ORC_HDATA); /* Write command */
+ outb(HDO, host->base + ORC_HCTRL);
+ if (wait_HDO_off(host) == 0) /* Wait HDO off */
return 0;
- ORC_WR(hcsp->HCS_Base + ORC_HDATA, address); /* Write address */
- ORC_WR(hcsp->HCS_Base + ORC_HCTRL, HDO);
- if (waitHDOoff(hcsp) == 0) /* Wait HDO off */
+ outb(address, host->base + ORC_HDATA); /* Write address */
+ outb(HDO, host->base + ORC_HCTRL);
+ if (wait_HDO_off(host) == 0) /* Wait HDO off */
return 0;
- if (waitHDIset(hcsp, &bData) == 0) /* Wait HDI set */
+ if (wait_hdi_set(host, &data) == 0) /* Wait HDI set */
return 0;
- *pDataIn = ORC_RD(hcsp->HCS_Base, ORC_HDATA);
- ORC_WR(hcsp->HCS_Base + ORC_HSTUS, bData); /* Clear HDI */
+ *ptr = inb(host->base + ORC_HDATA);
+ outb(data, host->base + ORC_HSTUS); /* Clear HDI */
return 1;
+
}
-/***************************************************************************/
-static void orc_exec_scb(ORC_HCS * hcsp, ORC_SCB * scbp)
+/**
+ * orc_exec_sb - Queue an SCB with the HA
+ * @host: host adapter the SCB belongs to
+ * @scb: SCB to queue for execution
+ */
+
+static void orc_exec_scb(struct orc_host * host, struct orc_scb * scb)
{
- scbp->SCB_Status = ORCSCB_POST;
- ORC_WR(hcsp->HCS_Base + ORC_PQUEUE, scbp->SCB_ScbIdx);
- return;
+ scb->status = ORCSCB_POST;
+ outb(scb->scbidx, host->base + ORC_PQUEUE);
}
-/***********************************************************************
- Read SCSI H/A configuration parameters from serial EEPROM
-************************************************************************/
-static int se2_rd_all(ORC_HCS * hcsp)
+/**
+ * se2_rd_all - read SCSI parameters from EEPROM
+ * @host: Host whose EEPROM is being loaded
+ *
+ * Read SCSI H/A configuration parameters from serial EEPROM
+ */
+
+static int se2_rd_all(struct orc_host * host)
{
int i;
- UCHAR *np, chksum = 0;
+ u8 *np, chksum = 0;
- np = (UCHAR *) nvramp;
+ np = (u8 *) nvramp;
for (i = 0; i < 64; i++, np++) { /* <01> */
- if (get_NVRAM(hcsp, (unsigned char) i, np) == 0)
+ if (orc_nv_read(host, (u8) i, np) == 0)
return -1;
-// *np++ = get_NVRAM(hcsp, (unsigned char ) i);
}
-/*------ Is ckecksum ok ? ------*/
- np = (UCHAR *) nvramp;
+ /*------ Is ckecksum ok ? ------*/
+ np = (u8 *) nvramp;
for (i = 0; i < 63; i++)
chksum += *np++;
- if (nvramp->CheckSum != (UCHAR) chksum)
+ if (nvramp->CheckSum != (u8) chksum)
return -1;
return 1;
}
-/************************************************************************
- Update SCSI H/A configuration parameters from serial EEPROM
-*************************************************************************/
-static void se2_update_all(ORC_HCS * hcsp)
+/**
+ * se2_update_all - update the EEPROM
+ * @host: Host whose EEPROM is being updated
+ *
+ * Update changed bytes in the EEPROM image.
+ */
+
+static void se2_update_all(struct orc_host * host)
{ /* setup default pattern */
int i;
- UCHAR *np, *np1, chksum = 0;
+ u8 *np, *np1, chksum = 0;
/* Calculate checksum first */
- np = (UCHAR *) dftNvRam;
+ np = (u8 *) default_nvram;
for (i = 0; i < 63; i++)
chksum += *np++;
*np = chksum;
- np = (UCHAR *) dftNvRam;
- np1 = (UCHAR *) nvramp;
+ np = (u8 *) default_nvram;
+ np1 = (u8 *) nvramp;
for (i = 0; i < 64; i++, np++, np1++) {
- if (*np != *np1) {
- set_NVRAM(hcsp, (unsigned char) i, *np);
- }
+ if (*np != *np1)
+ orc_nv_write(host, (u8) i, *np);
}
- return;
}
-/*************************************************************************
- Function name : read_eeprom
-**************************************************************************/
-static void read_eeprom(ORC_HCS * hcsp)
+/**
+ * read_eeprom - load EEPROM
+ * @host: Host EEPROM to read
+ *
+ * Read the EEPROM for a given host. If it is invalid or fails
+ * the restore the defaults and use them.
+ */
+
+static void read_eeprom(struct orc_host * host)
{
- if (se2_rd_all(hcsp) != 1) {
- se2_update_all(hcsp); /* setup default pattern */
- se2_rd_all(hcsp); /* load again */
+ if (se2_rd_all(host) != 1) {
+ se2_update_all(host); /* setup default pattern */
+ se2_rd_all(host); /* load again */
}
}
-/***************************************************************************/
-static UCHAR load_FW(ORC_HCS * hcsp)
+/**
+ * orc_load_firmware - initialise firmware
+ * @host: Host to set up
+ *
+ * Load the firmware from the EEPROM into controller SRAM. This
+ * is basically a 4K block copy and then a 4K block read to check
+ * correctness. The rest is convulted by the indirect interfaces
+ * in the hardware
+ */
+
+static u8 orc_load_firmware(struct orc_host * host)
{
- U32 dData;
- USHORT wBIOSAddress;
- USHORT i;
- UCHAR *pData, bData;
-
-
- bData = ORC_RD(hcsp->HCS_Base, ORC_GCFG);
- ORC_WR(hcsp->HCS_Base + ORC_GCFG, bData | EEPRG); /* Enable EEPROM programming */
- ORC_WR(hcsp->HCS_Base + ORC_EBIOSADR2, 0x00);
- ORC_WRSHORT(hcsp->HCS_Base + ORC_EBIOSADR0, 0x00);
- if (ORC_RD(hcsp->HCS_Base, ORC_EBIOSDATA) != 0x55) {
- ORC_WR(hcsp->HCS_Base + ORC_GCFG, bData); /* Disable EEPROM programming */
+ u32 data32;
+ u16 bios_addr;
+ u16 i;
+ u8 *data32_ptr, data;
+
+
+ /* Set up the EEPROM for access */
+
+ data = inb(host->base + ORC_GCFG);
+ outb(data | EEPRG, host->base + ORC_GCFG); /* Enable EEPROM programming */
+ outb(0x00, host->base + ORC_EBIOSADR2);
+ outw(0x0000, host->base + ORC_EBIOSADR0);
+ if (inb(host->base + ORC_EBIOSDATA) != 0x55) {
+ outb(data, host->base + ORC_GCFG); /* Disable EEPROM programming */
return 0;
}
- ORC_WRSHORT(hcsp->HCS_Base + ORC_EBIOSADR0, 0x01);
- if (ORC_RD(hcsp->HCS_Base, ORC_EBIOSDATA) != 0xAA) {
- ORC_WR(hcsp->HCS_Base + ORC_GCFG, bData); /* Disable EEPROM programming */
+ outw(0x0001, host->base + ORC_EBIOSADR0);
+ if (inb(host->base + ORC_EBIOSDATA) != 0xAA) {
+ outb(data, host->base + ORC_GCFG); /* Disable EEPROM programming */
return 0;
}
- ORC_WR(hcsp->HCS_Base + ORC_RISCCTL, PRGMRST | DOWNLOAD); /* Enable SRAM programming */
- pData = (UCHAR *) & dData;
- dData = 0; /* Initial FW address to 0 */
- ORC_WRSHORT(hcsp->HCS_Base + ORC_EBIOSADR0, 0x10);
- *pData = ORC_RD(hcsp->HCS_Base, ORC_EBIOSDATA); /* Read from BIOS */
- ORC_WRSHORT(hcsp->HCS_Base + ORC_EBIOSADR0, 0x11);
- *(pData + 1) = ORC_RD(hcsp->HCS_Base, ORC_EBIOSDATA); /* Read from BIOS */
- ORC_WRSHORT(hcsp->HCS_Base + ORC_EBIOSADR0, 0x12);
- *(pData + 2) = ORC_RD(hcsp->HCS_Base, ORC_EBIOSDATA); /* Read from BIOS */
- ORC_WR(hcsp->HCS_Base + ORC_EBIOSADR2, *(pData + 2));
- ORC_WRLONG(hcsp->HCS_Base + ORC_FWBASEADR, dData); /* Write FW address */
-
- wBIOSAddress = (USHORT) dData; /* FW code locate at BIOS address + ? */
- for (i = 0, pData = (UCHAR *) & dData; /* Download the code */
+
+ outb(PRGMRST | DOWNLOAD, host->base + ORC_RISCCTL); /* Enable SRAM programming */
+ data32_ptr = (u8 *) & data32;
+ data32 = 0; /* Initial FW address to 0 */
+ outw(0x0010, host->base + ORC_EBIOSADR0);
+ *data32_ptr = inb(host->base + ORC_EBIOSDATA); /* Read from BIOS */
+ outw(0x0011, host->base + ORC_EBIOSADR0);
+ *(data32_ptr + 1) = inb(host->base + ORC_EBIOSDATA); /* Read from BIOS */
+ outw(0x0012, host->base + ORC_EBIOSADR0);
+ *(data32_ptr + 2) = inb(host->base + ORC_EBIOSDATA); /* Read from BIOS */
+ outw(*(data32_ptr + 2), host->base + ORC_EBIOSADR2);
+ outl(data32, host->base + ORC_FWBASEADR); /* Write FW address */
+
+ /* Copy the code from the BIOS to the SRAM */
+
+ bios_addr = (u16) data32; /* FW code locate at BIOS address + ? */
+ for (i = 0, data32_ptr = (u8 *) & data32; /* Download the code */
i < 0x1000; /* Firmware code size = 4K */
- i++, wBIOSAddress++) {
- ORC_WRSHORT(hcsp->HCS_Base + ORC_EBIOSADR0, wBIOSAddress);
- *pData++ = ORC_RD(hcsp->HCS_Base, ORC_EBIOSDATA); /* Read from BIOS */
+ i++, bios_addr++) {
+ outw(bios_addr, host->base + ORC_EBIOSADR0);
+ *data32_ptr++ = inb(host->base + ORC_EBIOSDATA); /* Read from BIOS */
if ((i % 4) == 3) {
- ORC_WRLONG(hcsp->HCS_Base + ORC_RISCRAM, dData); /* Write every 4 bytes */
- pData = (UCHAR *) & dData;
+ outl(data32, host->base + ORC_RISCRAM); /* Write every 4 bytes */
+ data32_ptr = (u8 *) & data32;
}
}
- ORC_WR(hcsp->HCS_Base + ORC_RISCCTL, PRGMRST | DOWNLOAD); /* Reset program count 0 */
- wBIOSAddress -= 0x1000; /* Reset the BIOS adddress */
- for (i = 0, pData = (UCHAR *) & dData; /* Check the code */
+ /* Go back and check they match */
+
+ outb(PRGMRST | DOWNLOAD, host->base + ORC_RISCCTL); /* Reset program count 0 */
+ bios_addr -= 0x1000; /* Reset the BIOS adddress */
+ for (i = 0, data32_ptr = (u8 *) & data32; /* Check the code */
i < 0x1000; /* Firmware code size = 4K */
- i++, wBIOSAddress++) {
- ORC_WRSHORT(hcsp->HCS_Base + ORC_EBIOSADR0, wBIOSAddress);
- *pData++ = ORC_RD(hcsp->HCS_Base, ORC_EBIOSDATA); /* Read from BIOS */
+ i++, bios_addr++) {
+ outw(bios_addr, host->base + ORC_EBIOSADR0);
+ *data32_ptr++ = inb(host->base + ORC_EBIOSDATA); /* Read from BIOS */
if ((i % 4) == 3) {
- if (ORC_RDLONG(hcsp->HCS_Base, ORC_RISCRAM) != dData) {
- ORC_WR(hcsp->HCS_Base + ORC_RISCCTL, PRGMRST); /* Reset program to 0 */
- ORC_WR(hcsp->HCS_Base + ORC_GCFG, bData); /*Disable EEPROM programming */
+ if (inl(host->base + ORC_RISCRAM) != data32) {
+ outb(PRGMRST, host->base + ORC_RISCCTL); /* Reset program to 0 */
+ outb(data, host->base + ORC_GCFG); /*Disable EEPROM programming */
return 0;
}
- pData = (UCHAR *) & dData;
+ data32_ptr = (u8 *) & data32;
}
}
- ORC_WR(hcsp->HCS_Base + ORC_RISCCTL, PRGMRST); /* Reset program to 0 */
- ORC_WR(hcsp->HCS_Base + ORC_GCFG, bData); /* Disable EEPROM programming */
+
+ /* Success */
+ outb(PRGMRST, host->base + ORC_RISCCTL); /* Reset program to 0 */
+ outb(data, host->base + ORC_GCFG); /* Disable EEPROM programming */
return 1;
}
/***************************************************************************/
-static void setup_SCBs(ORC_HCS * hcsp)
+static void setup_SCBs(struct orc_host * host)
{
- ORC_SCB *pVirScb;
+ struct orc_scb *scb;
int i;
- ESCB *pVirEscb;
- dma_addr_t pPhysEscb;
+ struct orc_extended_scb *escb;
+ dma_addr_t escb_phys;
- /* Setup SCB HCS_Base and SCB Size registers */
- ORC_WR(hcsp->HCS_Base + ORC_SCBSIZE, ORC_MAXQUEUE); /* Total number of SCBs */
- /* SCB HCS_Base address 0 */
- ORC_WRLONG(hcsp->HCS_Base + ORC_SCBBASE0, hcsp->HCS_physScbArray);
- /* SCB HCS_Base address 1 */
- ORC_WRLONG(hcsp->HCS_Base + ORC_SCBBASE1, hcsp->HCS_physScbArray);
+ /* Setup SCB base and SCB Size registers */
+ outb(ORC_MAXQUEUE, host->base + ORC_SCBSIZE); /* Total number of SCBs */
+ /* SCB base address 0 */
+ outl(host->scb_phys, host->base + ORC_SCBBASE0);
+ /* SCB base address 1 */
+ outl(host->scb_phys, host->base + ORC_SCBBASE1);
/* setup scatter list address with one buffer */
- pVirScb = hcsp->HCS_virScbArray;
- pVirEscb = hcsp->HCS_virEscbArray;
+ scb = host->scb_virt;
+ escb = host->escb_virt;
for (i = 0; i < ORC_MAXQUEUE; i++) {
- pPhysEscb = (hcsp->HCS_physEscbArray + (sizeof(ESCB) * i));
- pVirScb->SCB_SGPAddr = (U32) pPhysEscb;
- pVirScb->SCB_SensePAddr = (U32) pPhysEscb;
- pVirScb->SCB_EScb = pVirEscb;
- pVirScb->SCB_ScbIdx = i;
- pVirScb++;
- pVirEscb++;
+ escb_phys = (host->escb_phys + (sizeof(struct orc_extended_scb) * i));
+ scb->sg_addr = (u32) escb_phys;
+ scb->sense_addr = (u32) escb_phys;
+ scb->escb = escb;
+ scb->scbidx = i;
+ scb++;
+ escb++;
}
-
- return;
}
-/***************************************************************************/
-static void initAFlag(ORC_HCS * hcsp)
+/**
+ * init_alloc_map - initialise allocation map
+ * @host: host map to configure
+ *
+ * Initialise the allocation maps for this device. If the device
+ * is not quiescent the caller must hold the allocation lock
+ */
+
+static void init_alloc_map(struct orc_host * host)
{
- UCHAR i, j;
+ u8 i, j;
for (i = 0; i < MAX_CHANNELS; i++) {
for (j = 0; j < 8; j++) {
- hcsp->BitAllocFlag[i][j] = 0xffffffff;
+ host->allocation_map[i][j] = 0xffffffff;
}
}
}
-/***************************************************************************/
-static int init_orchid(ORC_HCS * hcsp)
+/**
+ * init_orchid - initialise the host adapter
+ * @host:host adapter to initialise
+ *
+ * Initialise the controller and if neccessary load the firmware.
+ *
+ * Returns -1 if the initialisation fails.
+ */
+
+static int init_orchid(struct orc_host * host)
{
- UBYTE *readBytep;
- USHORT revision;
- UCHAR i;
-
- initAFlag(hcsp);
- ORC_WR(hcsp->HCS_Base + ORC_GIMSK, 0xFF); /* Disable all interrupt */
- if (ORC_RD(hcsp->HCS_Base, ORC_HSTUS) & RREADY) { /* Orchid is ready */
- revision = get_FW_version(hcsp);
+ u8 *ptr;
+ u16 revision;
+ u8 i;
+
+ init_alloc_map(host);
+ outb(0xFF, host->base + ORC_GIMSK); /* Disable all interrupts */
+
+ if (inb(host->base + ORC_HSTUS) & RREADY) { /* Orchid is ready */
+ revision = orc_read_fwrev(host);
if (revision == 0xFFFF) {
- ORC_WR(hcsp->HCS_Base + ORC_HCTRL, DEVRST); /* Reset Host Adapter */
- if (waitChipReady(hcsp) == 0)
- return (-1);
- load_FW(hcsp); /* Download FW */
- setup_SCBs(hcsp); /* Setup SCB HCS_Base and SCB Size registers */
- ORC_WR(hcsp->HCS_Base + ORC_HCTRL, 0); /* clear HOSTSTOP */
- if (waitFWReady(hcsp) == 0)
- return (-1);
+ outb(DEVRST, host->base + ORC_HCTRL); /* Reset Host Adapter */
+ if (wait_chip_ready(host) == 0)
+ return -1;
+ orc_load_firmware(host); /* Download FW */
+ setup_SCBs(host); /* Setup SCB base and SCB Size registers */
+ outb(0x00, host->base + ORC_HCTRL); /* clear HOSTSTOP */
+ if (wait_firmware_ready(host) == 0)
+ return -1;
/* Wait for firmware ready */
} else {
- setup_SCBs(hcsp); /* Setup SCB HCS_Base and SCB Size registers */
+ setup_SCBs(host); /* Setup SCB base and SCB Size registers */
}
} else { /* Orchid is not Ready */
- ORC_WR(hcsp->HCS_Base + ORC_HCTRL, DEVRST); /* Reset Host Adapter */
- if (waitChipReady(hcsp) == 0)
- return (-1);
- load_FW(hcsp); /* Download FW */
- setup_SCBs(hcsp); /* Setup SCB HCS_Base and SCB Size registers */
- ORC_WR(hcsp->HCS_Base + ORC_HCTRL, HDO); /* Do Hardware Reset & */
+ outb(DEVRST, host->base + ORC_HCTRL); /* Reset Host Adapter */
+ if (wait_chip_ready(host) == 0)
+ return -1;
+ orc_load_firmware(host); /* Download FW */
+ setup_SCBs(host); /* Setup SCB base and SCB Size registers */
+ outb(HDO, host->base + ORC_HCTRL); /* Do Hardware Reset & */
/* clear HOSTSTOP */
- if (waitFWReady(hcsp) == 0) /* Wait for firmware ready */
- return (-1);
+ if (wait_firmware_ready(host) == 0) /* Wait for firmware ready */
+ return -1;
}
-/*------------- get serial EEProm settting -------*/
-
- read_eeprom(hcsp);
-
- if (nvramp->Revision != 1)
- return (-1);
+ /* Load an EEProm copy into RAM */
+ /* Assumes single threaded at this point */
+ read_eeprom(host);
- hcsp->HCS_SCSI_ID = nvramp->SCSI0Id;
- hcsp->HCS_BIOS = nvramp->BIOSConfig1;
- hcsp->HCS_MaxTar = MAX_TARGETS;
- readBytep = (UCHAR *) & (nvramp->Target00Config);
- for (i = 0; i < 16; readBytep++, i++) {
- hcsp->TargetFlag[i] = *readBytep;
- hcsp->MaximumTags[i] = ORC_MAXTAGS;
- } /* for */
+ if (nvramp->revision != 1)
+ return -1;
- if (nvramp->SCSI0Config & NCC_BUSRESET) { /* Reset SCSI bus */
- hcsp->HCS_Flags |= HCF_SCSI_RESET;
+ host->scsi_id = nvramp->scsi_id;
+ host->BIOScfg = nvramp->BIOSConfig1;
+ host->max_targets = MAX_TARGETS;
+ ptr = (u8 *) & (nvramp->Target00Config);
+ for (i = 0; i < 16; ptr++, i++) {
+ host->target_flag[i] = *ptr;
+ host->max_tags[i] = ORC_MAXTAGS;
}
- ORC_WR(hcsp->HCS_Base + ORC_GIMSK, 0xFB); /* enable RP FIFO interrupt */
- return (0);
+
+ if (nvramp->SCSI0Config & NCC_BUSRESET)
+ host->flags |= HCF_SCSI_RESET;
+ outb(0xFB, host->base + ORC_GIMSK); /* enable RP FIFO interrupt */
+ return 0;
}
-/*****************************************************************************
- Function name : orc_reset_scsi_bus
- Description : Reset registers, reset a hanging bus and
- kill active and disconnected commands for target w/o soft reset
- Input : pHCB - Pointer to host adapter structure
- Output : None.
- Return : pSRB - Pointer to SCSI request block.
-*****************************************************************************/
-static int orc_reset_scsi_bus(ORC_HCS * pHCB)
+/**
+ * orc_reset_scsi_bus - perform bus reset
+ * @host: host being reset
+ *
+ * Perform a full bus reset on the adapter.
+ */
+
+static int orc_reset_scsi_bus(struct orc_host * host)
{ /* I need Host Control Block Information */
- ULONG flags;
+ unsigned long flags;
- spin_lock_irqsave(&(pHCB->BitAllocFlagLock), flags);
+ spin_lock_irqsave(&host->allocation_lock, flags);
- initAFlag(pHCB);
+ init_alloc_map(host);
/* reset scsi bus */
- ORC_WR(pHCB->HCS_Base + ORC_HCTRL, SCSIRST);
- if (waitSCSIRSTdone(pHCB) == 0) {
- spin_unlock_irqrestore(&(pHCB->BitAllocFlagLock), flags);
+ outb(SCSIRST, host->base + ORC_HCTRL);
+ /* FIXME: We can spend up to a second with the lock held and
+ interrupts off here */
+ if (wait_scsi_reset_done(host) == 0) {
+ spin_unlock_irqrestore(&host->allocation_lock, flags);
return FAILED;
} else {
- spin_unlock_irqrestore(&(pHCB->BitAllocFlagLock), flags);
+ spin_unlock_irqrestore(&host->allocation_lock, flags);
return SUCCESS;
}
}
-/*****************************************************************************
- Function name : orc_device_reset
- Description : Reset registers, reset a hanging bus and
- kill active and disconnected commands for target w/o soft reset
- Input : pHCB - Pointer to host adapter structure
- Output : None.
- Return : pSRB - Pointer to SCSI request block.
-*****************************************************************************/
-static int orc_device_reset(ORC_HCS * pHCB, struct scsi_cmnd *SCpnt, unsigned int target)
+/**
+ * orc_device_reset - device reset handler
+ * @host: host to reset
+ * @cmd: command causing the reset
+ * @target; target device
+ *
+ * Reset registers, reset a hanging bus and kill active and disconnected
+ * commands for target w/o soft reset
+ */
+
+static int orc_device_reset(struct orc_host * host, struct scsi_cmnd *cmd, unsigned int target)
{ /* I need Host Control Block Information */
- ORC_SCB *pScb;
- ESCB *pVirEscb;
- ORC_SCB *pVirScb;
- UCHAR i;
- ULONG flags;
+ struct orc_scb *scb;
+ struct orc_extended_scb *escb;
+ struct orc_scb *host_scb;
+ u8 i;
+ unsigned long flags;
- spin_lock_irqsave(&(pHCB->BitAllocFlagLock), flags);
- pScb = (ORC_SCB *) NULL;
- pVirEscb = (ESCB *) NULL;
+ spin_lock_irqsave(&(host->allocation_lock), flags);
+ scb = (struct orc_scb *) NULL;
+ escb = (struct orc_extended_scb *) NULL;
/* setup scatter list address with one buffer */
- pVirScb = pHCB->HCS_virScbArray;
+ host_scb = host->scb_virt;
+
+ /* FIXME: is this safe if we then fail to issue the reset or race
+ a completion ? */
+ init_alloc_map(host);
- initAFlag(pHCB);
- /* device reset */
+ /* Find the scb corresponding to the command */
for (i = 0; i < ORC_MAXQUEUE; i++) {
- pVirEscb = pVirScb->SCB_EScb;
- if ((pVirScb->SCB_Status) && (pVirEscb->SCB_Srb == SCpnt))
+ escb = host_scb->escb;
+ if (host_scb->status && escb->srb == cmd)
break;
- pVirScb++;
+ host_scb++;
}
if (i == ORC_MAXQUEUE) {
- printk("Unable to Reset - No SCB Found\n");
- spin_unlock_irqrestore(&(pHCB->BitAllocFlagLock), flags);
+ printk(KERN_ERR "Unable to Reset - No SCB Found\n");
+ spin_unlock_irqrestore(&(host->allocation_lock), flags);
return FAILED;
}
- if ((pScb = orc_alloc_scb(pHCB)) == NULL) {
- spin_unlock_irqrestore(&(pHCB->BitAllocFlagLock), flags);
+
+ /* Allocate a new SCB for the reset command to the firmware */
+ if ((scb = __orc_alloc_scb(host)) == NULL) {
+ /* Can't happen.. */
+ spin_unlock_irqrestore(&(host->allocation_lock), flags);
return FAILED;
}
- pScb->SCB_Opcode = ORC_BUSDEVRST;
- pScb->SCB_Target = target;
- pScb->SCB_HaStat = 0;
- pScb->SCB_TaStat = 0;
- pScb->SCB_Status = 0x0;
- pScb->SCB_Link = 0xFF;
- pScb->SCB_Reserved0 = 0;
- pScb->SCB_Reserved1 = 0;
- pScb->SCB_XferLen = 0;
- pScb->SCB_SGLen = 0;
-
- pVirEscb->SCB_Srb = NULL;
- pVirEscb->SCB_Srb = SCpnt;
- orc_exec_scb(pHCB, pScb); /* Start execute SCB */
- spin_unlock_irqrestore(&(pHCB->BitAllocFlagLock), flags);
+
+ /* Reset device is handled by the firmare, we fill in an SCB and
+ fire it at the controller, it does the rest */
+ scb->opcode = ORC_BUSDEVRST;
+ scb->target = target;
+ scb->hastat = 0;
+ scb->tastat = 0;
+ scb->status = 0x0;
+ scb->link = 0xFF;
+ scb->reserved0 = 0;
+ scb->reserved1 = 0;
+ scb->xferlen = 0;
+ scb->sg_len = 0;
+
+ escb->srb = NULL;
+ escb->srb = cmd;
+ orc_exec_scb(host, scb); /* Start execute SCB */
+ spin_unlock_irqrestore(&host->allocation_lock, flags);
return SUCCESS;
}
+/**
+ * __orc_alloc_scb - allocate an SCB
+ * @host: host to allocate from
+ *
+ * Allocate an SCB and return a pointer to the SCB object. NULL
+ * is returned if no SCB is free. The caller must already hold
+ * the allocator lock at this point.
+ */
-/***************************************************************************/
-static ORC_SCB *__orc_alloc_scb(ORC_HCS * hcsp)
+
+static struct orc_scb *__orc_alloc_scb(struct orc_host * host)
{
- ORC_SCB *pTmpScb;
- UCHAR Ch;
- ULONG idx;
- UCHAR index;
- UCHAR i;
+ u8 channel;
+ unsigned long idx;
+ u8 index;
+ u8 i;
- Ch = hcsp->HCS_Index;
+ channel = host->index;
for (i = 0; i < 8; i++) {
for (index = 0; index < 32; index++) {
- if ((hcsp->BitAllocFlag[Ch][i] >> index) & 0x01) {
- hcsp->BitAllocFlag[Ch][i] &= ~(1 << index);
+ if ((host->allocation_map[channel][i] >> index) & 0x01) {
+ host->allocation_map[channel][i] &= ~(1 << index);
break;
}
}
idx = index + 32 * i;
- pTmpScb = (ORC_SCB *) ((ULONG) hcsp->HCS_virScbArray + (idx * sizeof(ORC_SCB)));
- return (pTmpScb);
+ /* Translate the index to a structure instance */
+ return (struct orc_scb *) ((unsigned long) host->scb_virt + (idx * sizeof(struct orc_scb)));
}
- return (NULL);
+ return NULL;
}
-static ORC_SCB *orc_alloc_scb(ORC_HCS * hcsp)
+/**
+ * orc_alloc_scb - allocate an SCB
+ * @host: host to allocate from
+ *
+ * Allocate an SCB and return a pointer to the SCB object. NULL
+ * is returned if no SCB is free.
+ */
+
+static struct orc_scb *orc_alloc_scb(struct orc_host * host)
{
- ORC_SCB *pTmpScb;
- ULONG flags;
+ struct orc_scb *scb;
+ unsigned long flags;
- spin_lock_irqsave(&(hcsp->BitAllocFlagLock), flags);
- pTmpScb = __orc_alloc_scb(hcsp);
- spin_unlock_irqrestore(&(hcsp->BitAllocFlagLock), flags);
- return (pTmpScb);
+ spin_lock_irqsave(&host->allocation_lock, flags);
+ scb = __orc_alloc_scb(host);
+ spin_unlock_irqrestore(&host->allocation_lock, flags);
+ return scb;
}
+/**
+ * orc_release_scb - release an SCB
+ * @host: host owning the SCB
+ * @scb: SCB that is now free
+ *
+ * Called to return a completed SCB to the allocation pool. Before
+ * calling the SCB must be out of use on both the host and the HA.
+ */
-/***************************************************************************/
-static void orc_release_scb(ORC_HCS * hcsp, ORC_SCB * scbp)
+static void orc_release_scb(struct orc_host *host, struct orc_scb *scb)
{
- ULONG flags;
- UCHAR Index;
- UCHAR i;
- UCHAR Ch;
-
- spin_lock_irqsave(&(hcsp->BitAllocFlagLock), flags);
- Ch = hcsp->HCS_Index;
- Index = scbp->SCB_ScbIdx;
- i = Index / 32;
- Index %= 32;
- hcsp->BitAllocFlag[Ch][i] |= (1 << Index);
- spin_unlock_irqrestore(&(hcsp->BitAllocFlagLock), flags);
+ unsigned long flags;
+ u8 index, i, channel;
+
+ spin_lock_irqsave(&(host->allocation_lock), flags);
+ channel = host->index; /* Channel */
+ index = scb->scbidx;
+ i = index / 32;
+ index %= 32;
+ host->allocation_map[channel][i] |= (1 << index);
+ spin_unlock_irqrestore(&(host->allocation_lock), flags);
}
-/*****************************************************************************
- Function name : abort_SCB
- Description : Abort a queued command.
- (commands that are on the bus can't be aborted easily)
- Input : pHCB - Pointer to host adapter structure
- Output : None.
- Return : pSRB - Pointer to SCSI request block.
-*****************************************************************************/
-static int abort_SCB(ORC_HCS * hcsp, ORC_SCB * pScb)
+/**
+ * orchid_abort_scb - abort a command
+ *
+ * Abort a queued command that has been passed to the firmware layer
+ * if possible. This is all handled by the firmware. We aks the firmware
+ * and it either aborts the command or fails
+ */
+
+static int orchid_abort_scb(struct orc_host * host, struct orc_scb * scb)
{
- unsigned char bData, bStatus;
+ unsigned char data, status;
- ORC_WR(hcsp->HCS_Base + ORC_HDATA, ORC_CMD_ABORT_SCB); /* Write command */
- ORC_WR(hcsp->HCS_Base + ORC_HCTRL, HDO);
- if (waitHDOoff(hcsp) == 0) /* Wait HDO off */
+ outb(ORC_CMD_ABORT_SCB, host->base + ORC_HDATA); /* Write command */
+ outb(HDO, host->base + ORC_HCTRL);
+ if (wait_HDO_off(host) == 0) /* Wait HDO off */
return 0;
- ORC_WR(hcsp->HCS_Base + ORC_HDATA, pScb->SCB_ScbIdx); /* Write address */
- ORC_WR(hcsp->HCS_Base + ORC_HCTRL, HDO);
- if (waitHDOoff(hcsp) == 0) /* Wait HDO off */
+ outb(scb->scbidx, host->base + ORC_HDATA); /* Write address */
+ outb(HDO, host->base + ORC_HCTRL);
+ if (wait_HDO_off(host) == 0) /* Wait HDO off */
return 0;
- if (waitHDIset(hcsp, &bData) == 0) /* Wait HDI set */
+ if (wait_hdi_set(host, &data) == 0) /* Wait HDI set */
return 0;
- bStatus = ORC_RD(hcsp->HCS_Base, ORC_HDATA);
- ORC_WR(hcsp->HCS_Base + ORC_HSTUS, bData); /* Clear HDI */
+ status = inb(host->base + ORC_HDATA);
+ outb(data, host->base + ORC_HSTUS); /* Clear HDI */
- if (bStatus == 1) /* 0 - Successfully */
+ if (status == 1) /* 0 - Successfully */
return 0; /* 1 - Fail */
return 1;
}
-/*****************************************************************************
- Function name : inia100_abort
- Description : Abort a queued command.
- (commands that are on the bus can't be aborted easily)
- Input : pHCB - Pointer to host adapter structure
- Output : None.
- Return : pSRB - Pointer to SCSI request block.
-*****************************************************************************/
-static int orc_abort_srb(ORC_HCS * hcsp, struct scsi_cmnd *SCpnt)
+static int inia100_abort_cmd(struct orc_host * host, struct scsi_cmnd *cmd)
{
- ESCB *pVirEscb;
- ORC_SCB *pVirScb;
- UCHAR i;
- ULONG flags;
+ struct orc_extended_scb *escb;
+ struct orc_scb *scb;
+ u8 i;
+ unsigned long flags;
- spin_lock_irqsave(&(hcsp->BitAllocFlagLock), flags);
+ spin_lock_irqsave(&(host->allocation_lock), flags);
- pVirScb = hcsp->HCS_virScbArray;
+ scb = host->scb_virt;
- for (i = 0; i < ORC_MAXQUEUE; i++, pVirScb++) {
- pVirEscb = pVirScb->SCB_EScb;
- if ((pVirScb->SCB_Status) && (pVirEscb->SCB_Srb == SCpnt)) {
- if (pVirScb->SCB_TagMsg == 0) {
- spin_unlock_irqrestore(&(hcsp->BitAllocFlagLock), flags);
- return FAILED;
+ /* Walk the queue until we find the SCB that belongs to the command
+ block. This isn't a performance critical path so a walk in the park
+ here does no harm */
+
+ for (i = 0; i < ORC_MAXQUEUE; i++, scb++) {
+ escb = scb->escb;
+ if (scb->status && escb->srb == cmd) {
+ if (scb->tag_msg == 0) {
+ goto out;
} else {
- if (abort_SCB(hcsp, pVirScb)) {
- pVirEscb->SCB_Srb = NULL;
- spin_unlock_irqrestore(&(hcsp->BitAllocFlagLock), flags);
+ /* Issue an ABORT to the firmware */
+ if (orchid_abort_scb(host, scb)) {
+ escb->srb = NULL;
+ spin_unlock_irqrestore(&host->allocation_lock, flags);
return SUCCESS;
- } else {
- spin_unlock_irqrestore(&(hcsp->BitAllocFlagLock), flags);
- return FAILED;
- }
+ } else
+ goto out;
}
}
}
- spin_unlock_irqrestore(&(hcsp->BitAllocFlagLock), flags);
+out:
+ spin_unlock_irqrestore(&host->allocation_lock, flags);
return FAILED;
}
-/***********************************************************************
- Routine Description:
- This is the interrupt service routine for the Orchid SCSI adapter.
- It reads the interrupt register to determine if the adapter is indeed
- the source of the interrupt and clears the interrupt at the device.
- Arguments:
- HwDeviceExtension - HBA miniport driver's adapter data storage
- Return Value:
-***********************************************************************/
-static void orc_interrupt(
- ORC_HCS * hcsp
-)
+/**
+ * orc_interrupt - IRQ processing
+ * @host: Host causing the interrupt
+ *
+ * This function is called from the IRQ handler and protected
+ * by the host lock. While the controller reports that there are
+ * scb's for processing we pull them off the controller, turn the
+ * index into a host address pointer to the scb and call the scb
+ * handler.
+ *
+ * Returns IRQ_HANDLED if any SCBs were processed, IRQ_NONE otherwise
+ */
+
+static irqreturn_t orc_interrupt(struct orc_host * host)
{
- BYTE bScbIdx;
- ORC_SCB *pScb;
+ u8 scb_index;
+ struct orc_scb *scb;
- if (ORC_RD(hcsp->HCS_Base, ORC_RQUEUECNT) == 0) {
- return; // 0;
+ /* Check if we have an SCB queued for servicing */
+ if (inb(host->base + ORC_RQUEUECNT) == 0)
+ return IRQ_NONE;
- }
do {
- bScbIdx = ORC_RD(hcsp->HCS_Base, ORC_RQUEUE);
-
- pScb = (ORC_SCB *) ((ULONG) hcsp->HCS_virScbArray + (ULONG) (sizeof(ORC_SCB) * bScbIdx));
- pScb->SCB_Status = 0x0;
-
- inia100SCBPost((BYTE *) hcsp, (BYTE *) pScb);
- } while (ORC_RD(hcsp->HCS_Base, ORC_RQUEUECNT));
- return; //1;
-
+ /* Get the SCB index of the SCB to service */
+ scb_index = inb(host->base + ORC_RQUEUE);
+
+ /* Translate it back to a host pointer */
+ scb = (struct orc_scb *) ((unsigned long) host->scb_virt + (unsigned long) (sizeof(struct orc_scb) * scb_index));
+ scb->status = 0x0;
+ /* Process the SCB */
+ inia100_scb_handler(host, scb);
+ } while (inb(host->base + ORC_RQUEUECNT));
+ return IRQ_HANDLED;
} /* End of I1060Interrupt() */
-/*****************************************************************************
- Function name : inia100BuildSCB
- Description :
- Input : pHCB - Pointer to host adapter structure
- Output : None.
- Return : pSRB - Pointer to SCSI request block.
-*****************************************************************************/
-static void inia100BuildSCB(ORC_HCS * pHCB, ORC_SCB * pSCB, struct scsi_cmnd * SCpnt)
+/**
+ * inia100_build_scb - build SCB
+ * @host: host owing the control block
+ * @scb: control block to use
+ * @cmd: Mid layer command
+ *
+ * Build a host adapter control block from the SCSI mid layer command
+ */
+
+static void inia100_build_scb(struct orc_host * host, struct orc_scb * scb, struct scsi_cmnd * cmd)
{ /* Create corresponding SCB */
- struct scatterlist *pSrbSG;
- ORC_SG *pSG; /* Pointer to SG list */
+ struct scatterlist *sg;
+ struct orc_sgent *sgent; /* Pointer to SG list */
int i, count_sg;
- ESCB *pEScb;
-
- pEScb = pSCB->SCB_EScb;
- pEScb->SCB_Srb = SCpnt;
- pSG = NULL;
-
- pSCB->SCB_Opcode = ORC_EXECSCSI;
- pSCB->SCB_Flags = SCF_NO_DCHK; /* Clear done bit */
- pSCB->SCB_Target = SCpnt->device->id;
- pSCB->SCB_Lun = SCpnt->device->lun;
- pSCB->SCB_Reserved0 = 0;
- pSCB->SCB_Reserved1 = 0;
- pSCB->SCB_SGLen = 0;
-
- if ((pSCB->SCB_XferLen = (U32) SCpnt->request_bufflen)) {
- pSG = (ORC_SG *) & pEScb->ESCB_SGList[0];
- if (SCpnt->use_sg) {
- pSrbSG = (struct scatterlist *) SCpnt->request_buffer;
- count_sg = pci_map_sg(pHCB->pdev, pSrbSG, SCpnt->use_sg,
- SCpnt->sc_data_direction);
- pSCB->SCB_SGLen = (U32) (count_sg * 8);
- for (i = 0; i < count_sg; i++, pSG++, pSrbSG++) {
- pSG->SG_Ptr = (U32) sg_dma_address(pSrbSG);
- pSG->SG_Len = (U32) sg_dma_len(pSrbSG);
- }
- } else if (SCpnt->request_bufflen != 0) {/* Non SG */
- pSCB->SCB_SGLen = 0x8;
- SCpnt->SCp.dma_handle = pci_map_single(pHCB->pdev,
- SCpnt->request_buffer,
- SCpnt->request_bufflen,
- SCpnt->sc_data_direction);
- pSG->SG_Ptr = (U32) SCpnt->SCp.dma_handle;
- pSG->SG_Len = (U32) SCpnt->request_bufflen;
- } else {
- pSCB->SCB_SGLen = 0;
- pSG->SG_Ptr = 0;
- pSG->SG_Len = 0;
+ struct orc_extended_scb *escb;
+
+ /* Links between the escb, scb and Linux scsi midlayer cmd */
+ escb = scb->escb;
+ escb->srb = cmd;
+ sgent = NULL;
+
+ /* Set up the SCB to do a SCSI command block */
+ scb->opcode = ORC_EXECSCSI;
+ scb->flags = SCF_NO_DCHK; /* Clear done bit */
+ scb->target = cmd->device->id;
+ scb->lun = cmd->device->lun;
+ scb->reserved0 = 0;
+ scb->reserved1 = 0;
+ scb->sg_len = 0;
+
+ scb->xferlen = (u32) scsi_bufflen(cmd);
+ sgent = (struct orc_sgent *) & escb->sglist[0];
+
+ count_sg = scsi_dma_map(cmd);
+ BUG_ON(count_sg < 0);
+
+ /* Build the scatter gather lists */
+ if (count_sg) {
+ scb->sg_len = (u32) (count_sg * 8);
+ scsi_for_each_sg(cmd, sg, count_sg, i) {
+ sgent->base = (u32) sg_dma_address(sg);
+ sgent->length = (u32) sg_dma_len(sg);
+ sgent++;
}
+ } else {
+ scb->sg_len = 0;
+ sgent->base = 0;
+ sgent->length = 0;
}
- pSCB->SCB_SGPAddr = (U32) pSCB->SCB_SensePAddr;
- pSCB->SCB_HaStat = 0;
- pSCB->SCB_TaStat = 0;
- pSCB->SCB_Link = 0xFF;
- pSCB->SCB_SenseLen = SENSE_SIZE;
- pSCB->SCB_CDBLen = SCpnt->cmd_len;
- if (pSCB->SCB_CDBLen >= IMAX_CDB) {
- printk("max cdb length= %x\b", SCpnt->cmd_len);
- pSCB->SCB_CDBLen = IMAX_CDB;
+ scb->sg_addr = (u32) scb->sense_addr;
+ scb->hastat = 0;
+ scb->tastat = 0;
+ scb->link = 0xFF;
+ scb->sense_len = SENSE_SIZE;
+ scb->cdb_len = cmd->cmd_len;
+ if (scb->cdb_len >= IMAX_CDB) {
+ printk("max cdb length= %x\b", cmd->cmd_len);
+ scb->cdb_len = IMAX_CDB;
}
- pSCB->SCB_Ident = SCpnt->device->lun | DISC_ALLOW;
- if (SCpnt->device->tagged_supported) { /* Tag Support */
- pSCB->SCB_TagMsg = SIMPLE_QUEUE_TAG; /* Do simple tag only */
+ scb->ident = cmd->device->lun | DISC_ALLOW;
+ if (cmd->device->tagged_supported) { /* Tag Support */
+ scb->tag_msg = SIMPLE_QUEUE_TAG; /* Do simple tag only */
} else {
- pSCB->SCB_TagMsg = 0; /* No tag support */
+ scb->tag_msg = 0; /* No tag support */
}
- memcpy(&pSCB->SCB_CDB[0], &SCpnt->cmnd, pSCB->SCB_CDBLen);
- return;
+ memcpy(&scb->cdb[0], &cmd->cmnd, scb->cdb_len);
}
-/*****************************************************************************
- Function name : inia100_queue
- Description : Queue a command and setup interrupts for a free bus.
- Input : pHCB - Pointer to host adapter structure
- Output : None.
- Return : pSRB - Pointer to SCSI request block.
-*****************************************************************************/
-static int inia100_queue(struct scsi_cmnd * SCpnt, void (*done) (struct scsi_cmnd *))
+/**
+ * inia100_queue - queue command with host
+ * @cmd: Command block
+ * @done: Completion function
+ *
+ * Called by the mid layer to queue a command. Process the command
+ * block, build the host specific scb structures and if there is room
+ * queue the command down to the controller
+ */
+
+static int inia100_queue(struct scsi_cmnd * cmd, void (*done) (struct scsi_cmnd *))
{
- register ORC_SCB *pSCB;
- ORC_HCS *pHCB; /* Point to Host adapter control block */
+ struct orc_scb *scb;
+ struct orc_host *host; /* Point to Host adapter control block */
- pHCB = (ORC_HCS *) SCpnt->device->host->hostdata;
- SCpnt->scsi_done = done;
+ host = (struct orc_host *) cmd->device->host->hostdata;
+ cmd->scsi_done = done;
/* Get free SCSI control block */
- if ((pSCB = orc_alloc_scb(pHCB)) == NULL)
+ if ((scb = orc_alloc_scb(host)) == NULL)
return SCSI_MLQUEUE_HOST_BUSY;
- inia100BuildSCB(pHCB, pSCB, SCpnt);
- orc_exec_scb(pHCB, pSCB); /* Start execute SCB */
-
- return (0);
+ inia100_build_scb(host, scb, cmd);
+ orc_exec_scb(host, scb); /* Start execute SCB */
+ return 0;
}
/*****************************************************************************
Function name : inia100_abort
Description : Abort a queued command.
(commands that are on the bus can't be aborted easily)
- Input : pHCB - Pointer to host adapter structure
+ Input : host - Pointer to host adapter structure
Output : None.
Return : pSRB - Pointer to SCSI request block.
*****************************************************************************/
-static int inia100_abort(struct scsi_cmnd * SCpnt)
+static int inia100_abort(struct scsi_cmnd * cmd)
{
- ORC_HCS *hcsp;
+ struct orc_host *host;
- hcsp = (ORC_HCS *) SCpnt->device->host->hostdata;
- return orc_abort_srb(hcsp, SCpnt);
+ host = (struct orc_host *) cmd->device->host->hostdata;
+ return inia100_abort_cmd(host, cmd);
}
/*****************************************************************************
Function name : inia100_reset
Description : Reset registers, reset a hanging bus and
kill active and disconnected commands for target w/o soft reset
- Input : pHCB - Pointer to host adapter structure
+ Input : host - Pointer to host adapter structure
Output : None.
Return : pSRB - Pointer to SCSI request block.
*****************************************************************************/
-static int inia100_bus_reset(struct scsi_cmnd * SCpnt)
+static int inia100_bus_reset(struct scsi_cmnd * cmd)
{ /* I need Host Control Block Information */
- ORC_HCS *pHCB;
- pHCB = (ORC_HCS *) SCpnt->device->host->hostdata;
- return orc_reset_scsi_bus(pHCB);
+ struct orc_host *host;
+ host = (struct orc_host *) cmd->device->host->hostdata;
+ return orc_reset_scsi_bus(host);
}
/*****************************************************************************
Function name : inia100_device_reset
Description : Reset the device
- Input : pHCB - Pointer to host adapter structure
+ Input : host - Pointer to host adapter structure
Output : None.
Return : pSRB - Pointer to SCSI request block.
*****************************************************************************/
-static int inia100_device_reset(struct scsi_cmnd * SCpnt)
+static int inia100_device_reset(struct scsi_cmnd * cmd)
{ /* I need Host Control Block Information */
- ORC_HCS *pHCB;
- pHCB = (ORC_HCS *) SCpnt->device->host->hostdata;
- return orc_device_reset(pHCB, SCpnt, scmd_id(SCpnt));
+ struct orc_host *host;
+ host = (struct orc_host *) cmd->device->host->hostdata;
+ return orc_device_reset(host, cmd, scmd_id(cmd));
}
-/*****************************************************************************
- Function name : inia100SCBPost
- Description : This is callback routine be called when orc finish one
- SCSI command.
- Input : pHCB - Pointer to host adapter control block.
- pSCB - Pointer to SCSI control block.
- Output : None.
- Return : None.
-*****************************************************************************/
-static void inia100SCBPost(BYTE * pHcb, BYTE * pScb)
+/**
+ * inia100_scb_handler - interrupt callback
+ * @host: Host causing the interrupt
+ * @scb: SCB the controller returned as needing processing
+ *
+ * Perform completion processing on a control block. Do the conversions
+ * from host to SCSI midlayer error coding, save any sense data and
+ * the complete with the midlayer and recycle the scb.
+ */
+
+static void inia100_scb_handler(struct orc_host *host, struct orc_scb *scb)
{
- struct scsi_cmnd *pSRB; /* Pointer to SCSI request block */
- ORC_HCS *pHCB;
- ORC_SCB *pSCB;
- ESCB *pEScb;
-
- pHCB = (ORC_HCS *) pHcb;
- pSCB = (ORC_SCB *) pScb;
- pEScb = pSCB->SCB_EScb;
- if ((pSRB = (struct scsi_cmnd *) pEScb->SCB_Srb) == 0) {
- printk("inia100SCBPost: SRB pointer is empty\n");
- orc_release_scb(pHCB, pSCB); /* Release SCB for current channel */
+ struct scsi_cmnd *cmd; /* Pointer to SCSI request block */
+ struct orc_extended_scb *escb;
+
+ escb = scb->escb;
+ if ((cmd = (struct scsi_cmnd *) escb->srb) == NULL) {
+ printk(KERN_ERR "inia100_scb_handler: SRB pointer is empty\n");
+ orc_release_scb(host, scb); /* Release SCB for current channel */
return;
}
- pEScb->SCB_Srb = NULL;
+ escb->srb = NULL;
- switch (pSCB->SCB_HaStat) {
+ switch (scb->hastat) {
case 0x0:
case 0xa: /* Linked command complete without error and linked normally */
case 0xb: /* Linked command complete without error interrupt generated */
- pSCB->SCB_HaStat = 0;
+ scb->hastat = 0;
break;
case 0x11: /* Selection time out-The initiator selection or target
reselection was not complete within the SCSI Time out period */
- pSCB->SCB_HaStat = DID_TIME_OUT;
+ scb->hastat = DID_TIME_OUT;
break;
case 0x14: /* Target bus phase sequence failure-An invalid bus phase or bus
phase sequence was requested by the target. The host adapter
will generate a SCSI Reset Condition, notifying the host with
a SCRD interrupt */
- pSCB->SCB_HaStat = DID_RESET;
+ scb->hastat = DID_RESET;
break;
case 0x1a: /* SCB Aborted. 07/21/98 */
- pSCB->SCB_HaStat = DID_ABORT;
+ scb->hastat = DID_ABORT;
break;
case 0x12: /* Data overrun/underrun-The target attempted to transfer more data
@@ -984,46 +1022,41 @@ static void inia100SCBPost(BYTE * pHcb, BYTE * pScb)
case 0x16: /* Invalid CCB Operation Code-The first byte of the CCB was invalid. */
default:
- printk("inia100: %x %x\n", pSCB->SCB_HaStat, pSCB->SCB_TaStat);
- pSCB->SCB_HaStat = DID_ERROR; /* Couldn't find any better */
+ printk(KERN_DEBUG "inia100: %x %x\n", scb->hastat, scb->tastat);
+ scb->hastat = DID_ERROR; /* Couldn't find any better */
break;
}
- if (pSCB->SCB_TaStat == 2) { /* Check condition */
- memcpy((unsigned char *) &pSRB->sense_buffer[0],
- (unsigned char *) &pEScb->ESCB_SGList[0], SENSE_SIZE);
+ if (scb->tastat == 2) { /* Check condition */
+ memcpy((unsigned char *) &cmd->sense_buffer[0],
+ (unsigned char *) &escb->sglist[0], SENSE_SIZE);
}
- pSRB->result = pSCB->SCB_TaStat | (pSCB->SCB_HaStat << 16);
-
- if (pSRB->use_sg) {
- pci_unmap_sg(pHCB->pdev,
- (struct scatterlist *)pSRB->request_buffer,
- pSRB->use_sg, pSRB->sc_data_direction);
- } else if (pSRB->request_bufflen != 0) {
- pci_unmap_single(pHCB->pdev, pSRB->SCp.dma_handle,
- pSRB->request_bufflen,
- pSRB->sc_data_direction);
- }
-
- pSRB->scsi_done(pSRB); /* Notify system DONE */
-
- orc_release_scb(pHCB, pSCB); /* Release SCB for current channel */
+ cmd->result = scb->tastat | (scb->hastat << 16);
+ scsi_dma_unmap(cmd);
+ cmd->scsi_done(cmd); /* Notify system DONE */
+ orc_release_scb(host, scb); /* Release SCB for current channel */
}
-/*
- * Interrupt handler (main routine of the driver)
+/**
+ * inia100_intr - interrupt handler
+ * @irqno: Interrupt value
+ * @devid: Host adapter
+ *
+ * Entry point for IRQ handling. All the real work is performed
+ * by orc_interrupt.
*/
static irqreturn_t inia100_intr(int irqno, void *devid)
{
- struct Scsi_Host *host = (struct Scsi_Host *)devid;
- ORC_HCS *pHcb = (ORC_HCS *)host->hostdata;
+ struct Scsi_Host *shost = (struct Scsi_Host *)devid;
+ struct orc_host *host = (struct orc_host *)shost->hostdata;
unsigned long flags;
+ irqreturn_t res;
- spin_lock_irqsave(host->host_lock, flags);
- orc_interrupt(pHcb);
- spin_unlock_irqrestore(host->host_lock, flags);
+ spin_lock_irqsave(shost->host_lock, flags);
+ res = orc_interrupt(host);
+ spin_unlock_irqrestore(shost->host_lock, flags);
- return IRQ_HANDLED;
+ return res;
}
static struct scsi_host_template inia100_template = {
@@ -1044,12 +1077,12 @@ static int __devinit inia100_probe_one(struct pci_dev *pdev,
const struct pci_device_id *id)
{
struct Scsi_Host *shost;
- ORC_HCS *pHCB;
+ struct orc_host *host;
unsigned long port, bios;
int error = -ENODEV;
u32 sz;
- unsigned long dBiosAdr;
- char *pbBiosAdr;
+ unsigned long biosaddr;
+ char *bios_phys;
if (pci_enable_device(pdev))
goto out;
@@ -1068,55 +1101,55 @@ static int __devinit inia100_probe_one(struct pci_dev *pdev,
}
/* <02> read from base address + 0x50 offset to get the bios value. */
- bios = ORC_RDWORD(port, 0x50);
+ bios = inw(port + 0x50);
- shost = scsi_host_alloc(&inia100_template, sizeof(ORC_HCS));
+ shost = scsi_host_alloc(&inia100_template, sizeof(struct orc_host));
if (!shost)
goto out_release_region;
- pHCB = (ORC_HCS *)shost->hostdata;
- pHCB->pdev = pdev;
- pHCB->HCS_Base = port;
- pHCB->HCS_BIOS = bios;
- spin_lock_init(&pHCB->BitAllocFlagLock);
+ host = (struct orc_host *)shost->hostdata;
+ host->pdev = pdev;
+ host->base = port;
+ host->BIOScfg = bios;
+ spin_lock_init(&host->allocation_lock);
/* Get total memory needed for SCB */
- sz = ORC_MAXQUEUE * sizeof(ORC_SCB);
- pHCB->HCS_virScbArray = pci_alloc_consistent(pdev, sz,
- &pHCB->HCS_physScbArray);
- if (!pHCB->HCS_virScbArray) {
+ sz = ORC_MAXQUEUE * sizeof(struct orc_scb);
+ host->scb_virt = pci_alloc_consistent(pdev, sz,
+ &host->scb_phys);
+ if (!host->scb_virt) {
printk("inia100: SCB memory allocation error\n");
goto out_host_put;
}
- memset(pHCB->HCS_virScbArray, 0, sz);
+ memset(host->scb_virt, 0, sz);
/* Get total memory needed for ESCB */
- sz = ORC_MAXQUEUE * sizeof(ESCB);
- pHCB->HCS_virEscbArray = pci_alloc_consistent(pdev, sz,
- &pHCB->HCS_physEscbArray);
- if (!pHCB->HCS_virEscbArray) {
+ sz = ORC_MAXQUEUE * sizeof(struct orc_extended_scb);
+ host->escb_virt = pci_alloc_consistent(pdev, sz,
+ &host->escb_phys);
+ if (!host->escb_virt) {
printk("inia100: ESCB memory allocation error\n");
goto out_free_scb_array;
}
- memset(pHCB->HCS_virEscbArray, 0, sz);
+ memset(host->escb_virt, 0, sz);
- dBiosAdr = pHCB->HCS_BIOS;
- dBiosAdr = (dBiosAdr << 4);
- pbBiosAdr = phys_to_virt(dBiosAdr);
- if (init_orchid(pHCB)) { /* Initialize orchid chip */
+ biosaddr = host->BIOScfg;
+ biosaddr = (biosaddr << 4);
+ bios_phys = phys_to_virt(biosaddr);
+ if (init_orchid(host)) { /* Initialize orchid chip */
printk("inia100: initial orchid fail!!\n");
goto out_free_escb_array;
}
- shost->io_port = pHCB->HCS_Base;
+ shost->io_port = host->base;
shost->n_io_port = 0xff;
shost->can_queue = ORC_MAXQUEUE;
shost->unique_id = shost->io_port;
- shost->max_id = pHCB->HCS_MaxTar;
+ shost->max_id = host->max_targets;
shost->max_lun = 16;
- shost->irq = pHCB->HCS_Intr = pdev->irq;
- shost->this_id = pHCB->HCS_SCSI_ID; /* Assign HCS index */
+ shost->irq = pdev->irq;
+ shost->this_id = host->scsi_id; /* Assign HCS index */
shost->sg_tablesize = TOTAL_SG_ENTRY;
/* Initial orc chip */
@@ -1137,36 +1170,36 @@ static int __devinit inia100_probe_one(struct pci_dev *pdev,
scsi_scan_host(shost);
return 0;
- out_free_irq:
+out_free_irq:
free_irq(shost->irq, shost);
- out_free_escb_array:
- pci_free_consistent(pdev, ORC_MAXQUEUE * sizeof(ESCB),
- pHCB->HCS_virEscbArray, pHCB->HCS_physEscbArray);
- out_free_scb_array:
- pci_free_consistent(pdev, ORC_MAXQUEUE * sizeof(ORC_SCB),
- pHCB->HCS_virScbArray, pHCB->HCS_physScbArray);
- out_host_put:
+out_free_escb_array:
+ pci_free_consistent(pdev, ORC_MAXQUEUE * sizeof(struct orc_extended_scb),
+ host->escb_virt, host->escb_phys);
+out_free_scb_array:
+ pci_free_consistent(pdev, ORC_MAXQUEUE * sizeof(struct orc_scb),
+ host->scb_virt, host->scb_phys);
+out_host_put:
scsi_host_put(shost);
- out_release_region:
+out_release_region:
release_region(port, 256);
- out_disable_device:
+out_disable_device:
pci_disable_device(pdev);
- out:
+out:
return error;
}
static void __devexit inia100_remove_one(struct pci_dev *pdev)
{
struct Scsi_Host *shost = pci_get_drvdata(pdev);
- ORC_HCS *pHCB = (ORC_HCS *)shost->hostdata;
+ struct orc_host *host = (struct orc_host *)shost->hostdata;
scsi_remove_host(shost);
free_irq(shost->irq, shost);
- pci_free_consistent(pdev, ORC_MAXQUEUE * sizeof(ESCB),
- pHCB->HCS_virEscbArray, pHCB->HCS_physEscbArray);
- pci_free_consistent(pdev, ORC_MAXQUEUE * sizeof(ORC_SCB),
- pHCB->HCS_virScbArray, pHCB->HCS_physScbArray);
+ pci_free_consistent(pdev, ORC_MAXQUEUE * sizeof(struct orc_extended_scb),
+ host->escb_virt, host->escb_phys);
+ pci_free_consistent(pdev, ORC_MAXQUEUE * sizeof(struct orc_scb),
+ host->scb_virt, host->scb_phys);
release_region(shost->io_port, 256);
scsi_host_put(shost);
diff --git a/drivers/scsi/a100u2w.h b/drivers/scsi/a100u2w.h
index 6f542d2600e..d40e0c52819 100644
--- a/drivers/scsi/a100u2w.h
+++ b/drivers/scsi/a100u2w.h
@@ -18,27 +18,6 @@
* along with this program; see the file COPYING. If not, write to
* the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
*
- * --------------------------------------------------------------------------
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions, and the following disclaimer,
- * without modification, immediately at the beginning of the file.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. The name of the author may not be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- * Where this Software is combined with software released under the terms of
- * the GNU General Public License ("GPL") and the terms of the GPL would require the
- * combined work to also be released under the terms of the GPL, the terms
- * and conditions of this License will apply in addition to those of the
- * GPL with the exception of any terms or conditions of this License that
- * conflict with, or are expressly prohibited by, the GPL.
- *
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
@@ -50,30 +29,19 @@
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
- */
-
-/*
+ *
* Revision History:
* 06/18/98 HL, Initial production Version 1.02
* 12/19/98 bv, Use spinlocks for 2.1.95 and up
* 06/25/02 Doug Ledford <dledford@redhat.com>
* - This and the i60uscsi.h file are almost identical,
* merged them into a single header used by both .c files.
+ * 14/06/07 Alan Cox <alan@redhat.com>
+ * - Grand cleanup and Linuxisation
*/
#define inia100_REVID "Initio INI-A100U2W SCSI device driver; Revision: 1.02d"
-#define ULONG unsigned long
-#define USHORT unsigned short
-#define UCHAR unsigned char
-#define BYTE unsigned char
-#define WORD unsigned short
-#define DWORD unsigned long
-#define UBYTE unsigned char
-#define UWORD unsigned short
-#define UDWORD unsigned long
-#define U32 u32
-
#if 1
#define ORC_MAXQUEUE 245
#define ORC_MAXTAGS 64
@@ -90,10 +58,10 @@
/************************************************************************/
/* Scatter-Gather Element Structure */
/************************************************************************/
-typedef struct ORC_SG_Struc {
- U32 SG_Ptr; /* Data Pointer */
- U32 SG_Len; /* Data Length */
-} ORC_SG;
+struct orc_sgent {
+ u32 base; /* Data Pointer */
+ u32 length; /* Data Length */
+};
/* SCSI related definition */
#define DISC_NOT_ALLOW 0x80 /* Disconnect is not allowed */
@@ -165,42 +133,45 @@ typedef struct ORC_SG_Struc {
#define ORC_PRGMCTR1 0xE3 /* RISC program counter */
#define ORC_RISCRAM 0xEC /* RISC RAM data port 4 bytes */
-typedef struct orc_extended_scb { /* Extended SCB */
- ORC_SG ESCB_SGList[TOTAL_SG_ENTRY]; /*0 Start of SG list */
- struct scsi_cmnd *SCB_Srb; /*50 SRB Pointer */
-} ESCB;
+struct orc_extended_scb { /* Extended SCB */
+ struct orc_sgent sglist[TOTAL_SG_ENTRY]; /*0 Start of SG list */
+ struct scsi_cmnd *srb; /*50 SRB Pointer */
+};
/***********************************************************************
SCSI Control Block
+
+ 0x40 bytes long, the last 8 are user bytes
************************************************************************/
-typedef struct orc_scb { /* Scsi_Ctrl_Blk */
- UBYTE SCB_Opcode; /*00 SCB command code&residual */
- UBYTE SCB_Flags; /*01 SCB Flags */
- UBYTE SCB_Target; /*02 Target Id */
- UBYTE SCB_Lun; /*03 Lun */
- U32 SCB_Reserved0; /*04 Reserved for ORCHID must 0 */
- U32 SCB_XferLen; /*08 Data Transfer Length */
- U32 SCB_Reserved1; /*0C Reserved for ORCHID must 0 */
- U32 SCB_SGLen; /*10 SG list # * 8 */
- U32 SCB_SGPAddr; /*14 SG List Buf physical Addr */
- U32 SCB_SGPAddrHigh; /*18 SG Buffer high physical Addr */
- UBYTE SCB_HaStat; /*1C Host Status */
- UBYTE SCB_TaStat; /*1D Target Status */
- UBYTE SCB_Status; /*1E SCB status */
- UBYTE SCB_Link; /*1F Link pointer, default 0xFF */
- UBYTE SCB_SenseLen; /*20 Sense Allocation Length */
- UBYTE SCB_CDBLen; /*21 CDB Length */
- UBYTE SCB_Ident; /*22 Identify */
- UBYTE SCB_TagMsg; /*23 Tag Message */
- UBYTE SCB_CDB[IMAX_CDB]; /*24 SCSI CDBs */
- UBYTE SCB_ScbIdx; /*3C Index for this ORCSCB */
- U32 SCB_SensePAddr; /*34 Sense Buffer physical Addr */
-
- ESCB *SCB_EScb; /*38 Extended SCB Pointer */
-#ifndef ALPHA
- UBYTE SCB_Reserved2[4]; /*3E Reserved for Driver use */
+struct orc_scb { /* Scsi_Ctrl_Blk */
+ u8 opcode; /*00 SCB command code&residual */
+ u8 flags; /*01 SCB Flags */
+ u8 target; /*02 Target Id */
+ u8 lun; /*03 Lun */
+ u32 reserved0; /*04 Reserved for ORCHID must 0 */
+ u32 xferlen; /*08 Data Transfer Length */
+ u32 reserved1; /*0C Reserved for ORCHID must 0 */
+ u32 sg_len; /*10 SG list # * 8 */
+ u32 sg_addr; /*14 SG List Buf physical Addr */
+ u32 sg_addrhigh; /*18 SG Buffer high physical Addr */
+ u8 hastat; /*1C Host Status */
+ u8 tastat; /*1D Target Status */
+ u8 status; /*1E SCB status */
+ u8 link; /*1F Link pointer, default 0xFF */
+ u8 sense_len; /*20 Sense Allocation Length */
+ u8 cdb_len; /*21 CDB Length */
+ u8 ident; /*22 Identify */
+ u8 tag_msg; /*23 Tag Message */
+ u8 cdb[IMAX_CDB]; /*24 SCSI CDBs */
+ u8 scbidx; /*3C Index for this ORCSCB */
+ u32 sense_addr; /*34 Sense Buffer physical Addr */
+
+ struct orc_extended_scb *escb; /*38 Extended SCB Pointer */
+ /* 64bit pointer or 32bit pointer + reserved ? */
+#ifndef CONFIG_64BIT
+ u8 reserved2[4]; /*3E Reserved for Driver use */
#endif
-} ORC_SCB;
+};
/* Opcodes of ORCSCB_Opcode */
#define ORC_EXECSCSI 0x00 /* SCSI initiator command with residual */
@@ -239,13 +210,13 @@ typedef struct orc_scb { /* Scsi_Ctrl_Blk */
Target Device Control Structure
**********************************************************************/
-typedef struct ORC_Tar_Ctrl_Struc {
- UBYTE TCS_DrvDASD; /* 6 */
- UBYTE TCS_DrvSCSI; /* 7 */
- UBYTE TCS_DrvHead; /* 8 */
- UWORD TCS_DrvFlags; /* 4 */
- UBYTE TCS_DrvSector; /* 7 */
-} ORC_TCS;
+struct orc_target {
+ u8 TCS_DrvDASD; /* 6 */
+ u8 TCS_DrvSCSI; /* 7 */
+ u8 TCS_DrvHead; /* 8 */
+ u16 TCS_DrvFlags; /* 4 */
+ u8 TCS_DrvSector; /* 7 */
+};
/* Bit Definition for TCF_DrvFlags */
#define TCS_DF_NODASD_SUPT 0x20 /* Suppress OS/2 DASD Mgr support */
@@ -255,32 +226,23 @@ typedef struct ORC_Tar_Ctrl_Struc {
/***********************************************************************
Host Adapter Control Structure
************************************************************************/
-typedef struct ORC_Ha_Ctrl_Struc {
- USHORT HCS_Base; /* 00 */
- UBYTE HCS_Index; /* 02 */
- UBYTE HCS_Intr; /* 04 */
- UBYTE HCS_SCSI_ID; /* 06 H/A SCSI ID */
- UBYTE HCS_BIOS; /* 07 BIOS configuration */
-
- UBYTE HCS_Flags; /* 0B */
- UBYTE HCS_HAConfig1; /* 1B SCSI0MAXTags */
- UBYTE HCS_MaxTar; /* 1B SCSI0MAXTags */
-
- USHORT HCS_Units; /* Number of units this adapter */
- USHORT HCS_AFlags; /* Adapter info. defined flags */
- ULONG HCS_Timeout; /* Adapter timeout value */
- ORC_SCB *HCS_virScbArray; /* 28 Virtual Pointer to SCB array */
- dma_addr_t HCS_physScbArray; /* Scb Physical address */
- ESCB *HCS_virEscbArray; /* Virtual pointer to ESCB Scatter list */
- dma_addr_t HCS_physEscbArray; /* scatter list Physical address */
- UBYTE TargetFlag[16]; /* 30 target configuration, TCF_EN_TAG */
- UBYTE MaximumTags[16]; /* 40 ORC_MAX_SCBS */
- UBYTE ActiveTags[16][16]; /* 50 */
- ORC_TCS HCS_Tcs[16]; /* 28 */
- U32 BitAllocFlag[MAX_CHANNELS][8]; /* Max STB is 256, So 256/32 */
- spinlock_t BitAllocFlagLock;
+struct orc_host {
+ unsigned long base; /* Base address */
+ u8 index; /* Index (Channel)*/
+ u8 scsi_id; /* H/A SCSI ID */
+ u8 BIOScfg; /*BIOS configuration */
+ u8 flags;
+ u8 max_targets; /* SCSI0MAXTags */
+ struct orc_scb *scb_virt; /* Virtual Pointer to SCB array */
+ dma_addr_t scb_phys; /* Scb Physical address */
+ struct orc_extended_scb *escb_virt; /* Virtual pointer to ESCB Scatter list */
+ dma_addr_t escb_phys; /* scatter list Physical address */
+ u8 target_flag[16]; /* target configuration, TCF_EN_TAG */
+ u8 max_tags[16]; /* ORC_MAX_SCBS */
+ u32 allocation_map[MAX_CHANNELS][8]; /* Max STB is 256, So 256/32 */
+ spinlock_t allocation_lock;
struct pci_dev *pdev;
-} ORC_HCS;
+};
/* Bit Definition for HCS_Flags */
@@ -301,79 +263,79 @@ typedef struct ORC_Ha_Ctrl_Struc {
#define HCS_AF_DISABLE_RESET 0x10 /* Adapter disable reset */
#define HCS_AF_DISABLE_ADPT 0x80 /* Adapter disable */
-typedef struct _NVRAM {
+struct orc_nvram {
/*----------header ---------------*/
- UCHAR SubVendorID0; /* 00 - Sub Vendor ID */
- UCHAR SubVendorID1; /* 00 - Sub Vendor ID */
- UCHAR SubSysID0; /* 02 - Sub System ID */
- UCHAR SubSysID1; /* 02 - Sub System ID */
- UCHAR SubClass; /* 04 - Sub Class */
- UCHAR VendorID0; /* 05 - Vendor ID */
- UCHAR VendorID1; /* 05 - Vendor ID */
- UCHAR DeviceID0; /* 07 - Device ID */
- UCHAR DeviceID1; /* 07 - Device ID */
- UCHAR Reserved0[2]; /* 09 - Reserved */
- UCHAR Revision; /* 0B - Revision of data structure */
+ u8 SubVendorID0; /* 00 - Sub Vendor ID */
+ u8 SubVendorID1; /* 00 - Sub Vendor ID */
+ u8 SubSysID0; /* 02 - Sub System ID */
+ u8 SubSysID1; /* 02 - Sub System ID */
+ u8 SubClass; /* 04 - Sub Class */
+ u8 VendorID0; /* 05 - Vendor ID */
+ u8 VendorID1; /* 05 - Vendor ID */
+ u8 DeviceID0; /* 07 - Device ID */
+ u8 DeviceID1; /* 07 - Device ID */
+ u8 Reserved0[2]; /* 09 - Reserved */
+ u8 revision; /* 0B - revision of data structure */
/* ----Host Adapter Structure ---- */
- UCHAR NumOfCh; /* 0C - Number of SCSI channel */
- UCHAR BIOSConfig1; /* 0D - BIOS configuration 1 */
- UCHAR BIOSConfig2; /* 0E - BIOS boot channel&target ID */
- UCHAR BIOSConfig3; /* 0F - BIOS configuration 3 */
+ u8 NumOfCh; /* 0C - Number of SCSI channel */
+ u8 BIOSConfig1; /* 0D - BIOS configuration 1 */
+ u8 BIOSConfig2; /* 0E - BIOS boot channel&target ID */
+ u8 BIOSConfig3; /* 0F - BIOS configuration 3 */
/* ----SCSI channel Structure ---- */
/* from "CTRL-I SCSI Host Adapter SetUp menu " */
- UCHAR SCSI0Id; /* 10 - Channel 0 SCSI ID */
- UCHAR SCSI0Config; /* 11 - Channel 0 SCSI configuration */
- UCHAR SCSI0MaxTags; /* 12 - Channel 0 Maximum tags */
- UCHAR SCSI0ResetTime; /* 13 - Channel 0 Reset recovering time */
- UCHAR ReservedforChannel0[2]; /* 14 - Reserved */
+ u8 scsi_id; /* 10 - Channel 0 SCSI ID */
+ u8 SCSI0Config; /* 11 - Channel 0 SCSI configuration */
+ u8 SCSI0MaxTags; /* 12 - Channel 0 Maximum tags */
+ u8 SCSI0ResetTime; /* 13 - Channel 0 Reset recovering time */
+ u8 ReservedforChannel0[2]; /* 14 - Reserved */
/* ----SCSI target Structure ---- */
/* from "CTRL-I SCSI device SetUp menu " */
- UCHAR Target00Config; /* 16 - Channel 0 Target 0 config */
- UCHAR Target01Config; /* 17 - Channel 0 Target 1 config */
- UCHAR Target02Config; /* 18 - Channel 0 Target 2 config */
- UCHAR Target03Config; /* 19 - Channel 0 Target 3 config */
- UCHAR Target04Config; /* 1A - Channel 0 Target 4 config */
- UCHAR Target05Config; /* 1B - Channel 0 Target 5 config */
- UCHAR Target06Config; /* 1C - Channel 0 Target 6 config */
- UCHAR Target07Config; /* 1D - Channel 0 Target 7 config */
- UCHAR Target08Config; /* 1E - Channel 0 Target 8 config */
- UCHAR Target09Config; /* 1F - Channel 0 Target 9 config */
- UCHAR Target0AConfig; /* 20 - Channel 0 Target A config */
- UCHAR Target0BConfig; /* 21 - Channel 0 Target B config */
- UCHAR Target0CConfig; /* 22 - Channel 0 Target C config */
- UCHAR Target0DConfig; /* 23 - Channel 0 Target D config */
- UCHAR Target0EConfig; /* 24 - Channel 0 Target E config */
- UCHAR Target0FConfig; /* 25 - Channel 0 Target F config */
-
- UCHAR SCSI1Id; /* 26 - Channel 1 SCSI ID */
- UCHAR SCSI1Config; /* 27 - Channel 1 SCSI configuration */
- UCHAR SCSI1MaxTags; /* 28 - Channel 1 Maximum tags */
- UCHAR SCSI1ResetTime; /* 29 - Channel 1 Reset recovering time */
- UCHAR ReservedforChannel1[2]; /* 2A - Reserved */
+ u8 Target00Config; /* 16 - Channel 0 Target 0 config */
+ u8 Target01Config; /* 17 - Channel 0 Target 1 config */
+ u8 Target02Config; /* 18 - Channel 0 Target 2 config */
+ u8 Target03Config; /* 19 - Channel 0 Target 3 config */
+ u8 Target04Config; /* 1A - Channel 0 Target 4 config */
+ u8 Target05Config; /* 1B - Channel 0 Target 5 config */
+ u8 Target06Config; /* 1C - Channel 0 Target 6 config */
+ u8 Target07Config; /* 1D - Channel 0 Target 7 config */
+ u8 Target08Config; /* 1E - Channel 0 Target 8 config */
+ u8 Target09Config; /* 1F - Channel 0 Target 9 config */
+ u8 Target0AConfig; /* 20 - Channel 0 Target A config */
+ u8 Target0BConfig; /* 21 - Channel 0 Target B config */
+ u8 Target0CConfig; /* 22 - Channel 0 Target C config */
+ u8 Target0DConfig; /* 23 - Channel 0 Target D config */
+ u8 Target0EConfig; /* 24 - Channel 0 Target E config */
+ u8 Target0FConfig; /* 25 - Channel 0 Target F config */
+
+ u8 SCSI1Id; /* 26 - Channel 1 SCSI ID */
+ u8 SCSI1Config; /* 27 - Channel 1 SCSI configuration */
+ u8 SCSI1MaxTags; /* 28 - Channel 1 Maximum tags */
+ u8 SCSI1ResetTime; /* 29 - Channel 1 Reset recovering time */
+ u8 ReservedforChannel1[2]; /* 2A - Reserved */
/* ----SCSI target Structure ---- */
/* from "CTRL-I SCSI device SetUp menu " */
- UCHAR Target10Config; /* 2C - Channel 1 Target 0 config */
- UCHAR Target11Config; /* 2D - Channel 1 Target 1 config */
- UCHAR Target12Config; /* 2E - Channel 1 Target 2 config */
- UCHAR Target13Config; /* 2F - Channel 1 Target 3 config */
- UCHAR Target14Config; /* 30 - Channel 1 Target 4 config */
- UCHAR Target15Config; /* 31 - Channel 1 Target 5 config */
- UCHAR Target16Config; /* 32 - Channel 1 Target 6 config */
- UCHAR Target17Config; /* 33 - Channel 1 Target 7 config */
- UCHAR Target18Config; /* 34 - Channel 1 Target 8 config */
- UCHAR Target19Config; /* 35 - Channel 1 Target 9 config */
- UCHAR Target1AConfig; /* 36 - Channel 1 Target A config */
- UCHAR Target1BConfig; /* 37 - Channel 1 Target B config */
- UCHAR Target1CConfig; /* 38 - Channel 1 Target C config */
- UCHAR Target1DConfig; /* 39 - Channel 1 Target D config */
- UCHAR Target1EConfig; /* 3A - Channel 1 Target E config */
- UCHAR Target1FConfig; /* 3B - Channel 1 Target F config */
- UCHAR reserved[3]; /* 3C - Reserved */
+ u8 Target10Config; /* 2C - Channel 1 Target 0 config */
+ u8 Target11Config; /* 2D - Channel 1 Target 1 config */
+ u8 Target12Config; /* 2E - Channel 1 Target 2 config */
+ u8 Target13Config; /* 2F - Channel 1 Target 3 config */
+ u8 Target14Config; /* 30 - Channel 1 Target 4 config */
+ u8 Target15Config; /* 31 - Channel 1 Target 5 config */
+ u8 Target16Config; /* 32 - Channel 1 Target 6 config */
+ u8 Target17Config; /* 33 - Channel 1 Target 7 config */
+ u8 Target18Config; /* 34 - Channel 1 Target 8 config */
+ u8 Target19Config; /* 35 - Channel 1 Target 9 config */
+ u8 Target1AConfig; /* 36 - Channel 1 Target A config */
+ u8 Target1BConfig; /* 37 - Channel 1 Target B config */
+ u8 Target1CConfig; /* 38 - Channel 1 Target C config */
+ u8 Target1DConfig; /* 39 - Channel 1 Target D config */
+ u8 Target1EConfig; /* 3A - Channel 1 Target E config */
+ u8 Target1FConfig; /* 3B - Channel 1 Target F config */
+ u8 reserved[3]; /* 3C - Reserved */
/* ---------- CheckSum ---------- */
- UCHAR CheckSum; /* 3F - Checksum of NVRam */
-} NVRAM, *PNVRAM;
+ u8 CheckSum; /* 3F - Checksum of NVRam */
+};
/* Bios Configuration for nvram->BIOSConfig1 */
#define NBC_BIOSENABLE 0x01 /* BIOS enable */
@@ -407,10 +369,3 @@ typedef struct _NVRAM {
#define NCC_RESET_TIME 0x0A /* SCSI RESET recovering time */
#define NTC_DEFAULT (NTC_1GIGA | NTC_NO_WIDESYNC | NTC_DISC_ENABLE)
-#define ORC_RD(x,y) (UCHAR)(inb( (int)((ULONG)((ULONG)x+(UCHAR)y)) ))
-#define ORC_RDWORD(x,y) (short)(inl((int)((ULONG)((ULONG)x+(UCHAR)y)) ))
-#define ORC_RDLONG(x,y) (long)(inl((int)((ULONG)((ULONG)x+(UCHAR)y)) ))
-
-#define ORC_WR( adr,data) outb( (UCHAR)(data), (int)(adr))
-#define ORC_WRSHORT(adr,data) outw( (UWORD)(data), (int)(adr))
-#define ORC_WRLONG( adr,data) outl( (ULONG)(data), (int)(adr))
diff --git a/drivers/scsi/a4000t.c b/drivers/scsi/a4000t.c
new file mode 100644
index 00000000000..6a5784683ed
--- /dev/null
+++ b/drivers/scsi/a4000t.c
@@ -0,0 +1,143 @@
+/*
+ * Detection routine for the NCR53c710 based Amiga SCSI Controllers for Linux.
+ * Amiga Technologies A4000T SCSI controller.
+ *
+ * Written 1997 by Alan Hourihane <alanh@fairlite.demon.co.uk>
+ * plus modifications of the 53c7xx.c driver to support the Amiga.
+ *
+ * Rewritten to use 53c700.c by Kars de Jong <jongk@linux-m68k.org>
+ */
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <asm/amigahw.h>
+#include <asm/amigaints.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_transport_spi.h>
+
+#include "53c700.h"
+
+MODULE_AUTHOR("Alan Hourihane <alanh@fairlite.demon.co.uk> / Kars de Jong <jongk@linux-m68k.org>");
+MODULE_DESCRIPTION("Amiga A4000T NCR53C710 driver");
+MODULE_LICENSE("GPL");
+
+
+static struct scsi_host_template a4000t_scsi_driver_template = {
+ .name = "A4000T builtin SCSI",
+ .proc_name = "A4000t",
+ .this_id = 7,
+ .module = THIS_MODULE,
+};
+
+static struct platform_device *a4000t_scsi_device;
+
+#define A4000T_SCSI_ADDR 0xdd0040
+
+static int __devinit a4000t_probe(struct device *dev)
+{
+ struct Scsi_Host * host = NULL;
+ struct NCR_700_Host_Parameters *hostdata;
+
+ if (!(MACH_IS_AMIGA && AMIGAHW_PRESENT(A4000_SCSI)))
+ goto out;
+
+ if (!request_mem_region(A4000T_SCSI_ADDR, 0x1000,
+ "A4000T builtin SCSI"))
+ goto out;
+
+ hostdata = kmalloc(sizeof(struct NCR_700_Host_Parameters), GFP_KERNEL);
+ if (hostdata == NULL) {
+ printk(KERN_ERR "a4000t-scsi: Failed to allocate host data\n");
+ goto out_release;
+ }
+ memset(hostdata, 0, sizeof(struct NCR_700_Host_Parameters));
+
+ /* Fill in the required pieces of hostdata */
+ hostdata->base = (void __iomem *)ZTWO_VADDR(A4000T_SCSI_ADDR);
+ hostdata->clock = 50;
+ hostdata->chip710 = 1;
+ hostdata->dmode_extra = DMODE_FC2;
+ hostdata->dcntl_extra = EA_710;
+
+ /* and register the chip */
+ host = NCR_700_detect(&a4000t_scsi_driver_template, hostdata, dev);
+ if (!host) {
+ printk(KERN_ERR "a4000t-scsi: No host detected; "
+ "board configuration problem?\n");
+ goto out_free;
+ }
+
+ host->this_id = 7;
+ host->base = A4000T_SCSI_ADDR;
+ host->irq = IRQ_AMIGA_PORTS;
+
+ if (request_irq(host->irq, NCR_700_intr, IRQF_SHARED, "a4000t-scsi",
+ host)) {
+ printk(KERN_ERR "a4000t-scsi: request_irq failed\n");
+ goto out_put_host;
+ }
+
+ scsi_scan_host(host);
+
+ return 0;
+
+ out_put_host:
+ scsi_host_put(host);
+ out_free:
+ kfree(hostdata);
+ out_release:
+ release_mem_region(A4000T_SCSI_ADDR, 0x1000);
+ out:
+ return -ENODEV;
+}
+
+static __devexit int a4000t_device_remove(struct device *dev)
+{
+ struct Scsi_Host *host = dev_to_shost(dev);
+ struct NCR_700_Host_Parameters *hostdata = shost_priv(host);
+
+ scsi_remove_host(host);
+
+ NCR_700_release(host);
+ kfree(hostdata);
+ free_irq(host->irq, host);
+ release_mem_region(A4000T_SCSI_ADDR, 0x1000);
+
+ return 0;
+}
+
+static struct device_driver a4000t_scsi_driver = {
+ .name = "a4000t-scsi",
+ .bus = &platform_bus_type,
+ .probe = a4000t_probe,
+ .remove = __devexit_p(a4000t_device_remove),
+};
+
+static int __init a4000t_scsi_init(void)
+{
+ int err;
+
+ err = driver_register(&a4000t_scsi_driver);
+ if (err)
+ return err;
+
+ a4000t_scsi_device = platform_device_register_simple("a4000t-scsi",
+ -1, NULL, 0);
+ if (IS_ERR(a4000t_scsi_device)) {
+ driver_unregister(&a4000t_scsi_driver);
+ return PTR_ERR(a4000t_scsi_device);
+ }
+
+ return err;
+}
+
+static void __exit a4000t_scsi_exit(void)
+{
+ platform_device_unregister(a4000t_scsi_device);
+ driver_unregister(&a4000t_scsi_driver);
+}
+
+module_init(a4000t_scsi_init);
+module_exit(a4000t_scsi_exit);
diff --git a/drivers/scsi/aacraid/aachba.c b/drivers/scsi/aacraid/aachba.c
index 8dcfe4ec35c..0b6fd0b654d 100644
--- a/drivers/scsi/aacraid/aachba.c
+++ b/drivers/scsi/aacraid/aachba.c
@@ -169,6 +169,18 @@ int acbsize = -1;
module_param(acbsize, int, S_IRUGO|S_IWUSR);
MODULE_PARM_DESC(acbsize, "Request a specific adapter control block (FIB) size. Valid values are 512, 2048, 4096 and 8192. Default is to use suggestion from Firmware.");
+int update_interval = 30 * 60;
+module_param(update_interval, int, S_IRUGO|S_IWUSR);
+MODULE_PARM_DESC(update_interval, "Interval in seconds between time sync updates issued to adapter.");
+
+int check_interval = 24 * 60 * 60;
+module_param(check_interval, int, S_IRUGO|S_IWUSR);
+MODULE_PARM_DESC(check_interval, "Interval in seconds between adapter health checks.");
+
+int check_reset = 1;
+module_param(check_reset, int, S_IRUGO|S_IWUSR);
+MODULE_PARM_DESC(check_reset, "If adapter fails health check, reset the adapter.");
+
int expose_physicals = -1;
module_param(expose_physicals, int, S_IRUGO|S_IWUSR);
MODULE_PARM_DESC(expose_physicals, "Expose physical components of the arrays. -1=protect 0=off, 1=on");
@@ -312,11 +324,10 @@ int aac_get_containers(struct aac_dev *dev)
if (maximum_num_containers < MAXIMUM_NUM_CONTAINERS)
maximum_num_containers = MAXIMUM_NUM_CONTAINERS;
- fsa_dev_ptr = kmalloc(sizeof(*fsa_dev_ptr) * maximum_num_containers,
+ fsa_dev_ptr = kzalloc(sizeof(*fsa_dev_ptr) * maximum_num_containers,
GFP_KERNEL);
if (!fsa_dev_ptr)
return -ENOMEM;
- memset(fsa_dev_ptr, 0, sizeof(*fsa_dev_ptr) * maximum_num_containers);
dev->fsa_dev = fsa_dev_ptr;
dev->maximum_num_containers = maximum_num_containers;
@@ -344,21 +355,16 @@ static void aac_internal_transfer(struct scsi_cmnd *scsicmd, void *data, unsigne
{
void *buf;
int transfer_len;
- struct scatterlist *sg = scsicmd->request_buffer;
+ struct scatterlist *sg = scsi_sglist(scsicmd);
+
+ buf = kmap_atomic(sg->page, KM_IRQ0) + sg->offset;
+ transfer_len = min(sg->length, len + offset);
- if (scsicmd->use_sg) {
- buf = kmap_atomic(sg->page, KM_IRQ0) + sg->offset;
- transfer_len = min(sg->length, len + offset);
- } else {
- buf = scsicmd->request_buffer;
- transfer_len = min(scsicmd->request_bufflen, len + offset);
- }
transfer_len -= offset;
if (buf && transfer_len > 0)
memcpy(buf + offset, data, transfer_len);
- if (scsicmd->use_sg)
- kunmap_atomic(buf - sg->offset, KM_IRQ0);
+ kunmap_atomic(buf - sg->offset, KM_IRQ0);
}
@@ -451,7 +457,7 @@ static int aac_probe_container_callback2(struct scsi_cmnd * scsicmd)
{
struct fsa_dev_info *fsa_dev_ptr = ((struct aac_dev *)(scsicmd->device->host->hostdata))->fsa_dev;
- if (fsa_dev_ptr[scmd_id(scsicmd)].valid)
+ if ((fsa_dev_ptr[scmd_id(scsicmd)].valid & 1))
return aac_scsi_cmd(scsicmd);
scsicmd->result = DID_NO_CONNECT << 16;
@@ -459,18 +465,18 @@ static int aac_probe_container_callback2(struct scsi_cmnd * scsicmd)
return 0;
}
-static int _aac_probe_container2(void * context, struct fib * fibptr)
+static void _aac_probe_container2(void * context, struct fib * fibptr)
{
struct fsa_dev_info *fsa_dev_ptr;
int (*callback)(struct scsi_cmnd *);
struct scsi_cmnd * scsicmd = (struct scsi_cmnd *)context;
- if (!aac_valid_context(scsicmd, fibptr))
- return 0;
- fsa_dev_ptr = ((struct aac_dev *)(scsicmd->device->host->hostdata))->fsa_dev;
+ if (!aac_valid_context(scsicmd, fibptr))
+ return;
scsicmd->SCp.Status = 0;
+ fsa_dev_ptr = fibptr->dev->fsa_dev;
if (fsa_dev_ptr) {
struct aac_mount * dresp = (struct aac_mount *) fib_data(fibptr);
fsa_dev_ptr += scmd_id(scsicmd);
@@ -493,10 +499,11 @@ static int _aac_probe_container2(void * context, struct fib * fibptr)
aac_fib_free(fibptr);
callback = (int (*)(struct scsi_cmnd *))(scsicmd->SCp.ptr);
scsicmd->SCp.ptr = NULL;
- return (*callback)(scsicmd);
+ (*callback)(scsicmd);
+ return;
}
-static int _aac_probe_container1(void * context, struct fib * fibptr)
+static void _aac_probe_container1(void * context, struct fib * fibptr)
{
struct scsi_cmnd * scsicmd;
struct aac_mount * dresp;
@@ -506,13 +513,14 @@ static int _aac_probe_container1(void * context, struct fib * fibptr)
dresp = (struct aac_mount *) fib_data(fibptr);
dresp->mnt[0].capacityhigh = 0;
if ((le32_to_cpu(dresp->status) != ST_OK) ||
- (le32_to_cpu(dresp->mnt[0].vol) != CT_NONE))
- return _aac_probe_container2(context, fibptr);
+ (le32_to_cpu(dresp->mnt[0].vol) != CT_NONE)) {
+ _aac_probe_container2(context, fibptr);
+ return;
+ }
scsicmd = (struct scsi_cmnd *) context;
- scsicmd->SCp.phase = AAC_OWNER_MIDLEVEL;
if (!aac_valid_context(scsicmd, fibptr))
- return 0;
+ return;
aac_fib_init(fibptr);
@@ -527,21 +535,18 @@ static int _aac_probe_container1(void * context, struct fib * fibptr)
sizeof(struct aac_query_mount),
FsaNormal,
0, 1,
- (fib_callback) _aac_probe_container2,
+ _aac_probe_container2,
(void *) scsicmd);
/*
* Check that the command queued to the controller
*/
- if (status == -EINPROGRESS) {
+ if (status == -EINPROGRESS)
scsicmd->SCp.phase = AAC_OWNER_FIRMWARE;
- return 0;
- }
- if (status < 0) {
+ else if (status < 0) {
/* Inherit results from VM_NameServe, if any */
dresp->status = cpu_to_le32(ST_OK);
- return _aac_probe_container2(context, fibptr);
+ _aac_probe_container2(context, fibptr);
}
- return 0;
}
static int _aac_probe_container(struct scsi_cmnd * scsicmd, int (*callback)(struct scsi_cmnd *))
@@ -566,7 +571,7 @@ static int _aac_probe_container(struct scsi_cmnd * scsicmd, int (*callback)(stru
sizeof(struct aac_query_mount),
FsaNormal,
0, 1,
- (fib_callback) _aac_probe_container1,
+ _aac_probe_container1,
(void *) scsicmd);
/*
* Check that the command queued to the controller
@@ -620,7 +625,7 @@ int aac_probe_container(struct aac_dev *dev, int cid)
return -ENOMEM;
}
scsicmd->list.next = NULL;
- scsicmd->scsi_done = (void (*)(struct scsi_cmnd*))_aac_probe_container1;
+ scsicmd->scsi_done = (void (*)(struct scsi_cmnd*))aac_probe_container_callback1;
scsicmd->device = scsidev;
scsidev->sdev_state = 0;
@@ -825,7 +830,7 @@ static int aac_read_raw_io(struct fib * fib, struct scsi_cmnd * cmd, u64 lba, u3
readcmd->block[1] = cpu_to_le32((u32)((lba&0xffffffff00000000LL)>>32));
readcmd->count = cpu_to_le32(count<<9);
readcmd->cid = cpu_to_le16(scmd_id(cmd));
- readcmd->flags = cpu_to_le16(1);
+ readcmd->flags = cpu_to_le16(IO_TYPE_READ);
readcmd->bpTotal = 0;
readcmd->bpComplete = 0;
@@ -904,7 +909,7 @@ static int aac_read_block(struct fib * fib, struct scsi_cmnd * cmd, u64 lba, u32
(void *) cmd);
}
-static int aac_write_raw_io(struct fib * fib, struct scsi_cmnd * cmd, u64 lba, u32 count)
+static int aac_write_raw_io(struct fib * fib, struct scsi_cmnd * cmd, u64 lba, u32 count, int fua)
{
u16 fibsize;
struct aac_raw_io *writecmd;
@@ -914,7 +919,9 @@ static int aac_write_raw_io(struct fib * fib, struct scsi_cmnd * cmd, u64 lba, u
writecmd->block[1] = cpu_to_le32((u32)((lba&0xffffffff00000000LL)>>32));
writecmd->count = cpu_to_le32(count<<9);
writecmd->cid = cpu_to_le16(scmd_id(cmd));
- writecmd->flags = 0;
+ writecmd->flags = fua ?
+ cpu_to_le16(IO_TYPE_WRITE|IO_SUREWRITE) :
+ cpu_to_le16(IO_TYPE_WRITE);
writecmd->bpTotal = 0;
writecmd->bpComplete = 0;
@@ -933,7 +940,7 @@ static int aac_write_raw_io(struct fib * fib, struct scsi_cmnd * cmd, u64 lba, u
(void *) cmd);
}
-static int aac_write_block64(struct fib * fib, struct scsi_cmnd * cmd, u64 lba, u32 count)
+static int aac_write_block64(struct fib * fib, struct scsi_cmnd * cmd, u64 lba, u32 count, int fua)
{
u16 fibsize;
struct aac_write64 *writecmd;
@@ -964,7 +971,7 @@ static int aac_write_block64(struct fib * fib, struct scsi_cmnd * cmd, u64 lba,
(void *) cmd);
}
-static int aac_write_block(struct fib * fib, struct scsi_cmnd * cmd, u64 lba, u32 count)
+static int aac_write_block(struct fib * fib, struct scsi_cmnd * cmd, u64 lba, u32 count, int fua)
{
u16 fibsize;
struct aac_write *writecmd;
@@ -1041,7 +1048,7 @@ static int aac_scsi_64(struct fib * fib, struct scsi_cmnd * cmd)
struct aac_srb * srbcmd = aac_scsi_common(fib, cmd);
aac_build_sg64(cmd, (struct sgmap64*) &srbcmd->sg);
- srbcmd->count = cpu_to_le32(cmd->request_bufflen);
+ srbcmd->count = cpu_to_le32(scsi_bufflen(cmd));
memset(srbcmd->cdb, 0, sizeof(srbcmd->cdb));
memcpy(srbcmd->cdb, cmd->cmnd, cmd->cmd_len);
@@ -1069,7 +1076,7 @@ static int aac_scsi_32(struct fib * fib, struct scsi_cmnd * cmd)
struct aac_srb * srbcmd = aac_scsi_common(fib, cmd);
aac_build_sg(cmd, (struct sgmap*)&srbcmd->sg);
- srbcmd->count = cpu_to_le32(cmd->request_bufflen);
+ srbcmd->count = cpu_to_le32(scsi_bufflen(cmd));
memset(srbcmd->cdb, 0, sizeof(srbcmd->cdb));
memcpy(srbcmd->cdb, cmd->cmnd, cmd->cmd_len);
@@ -1172,6 +1179,7 @@ int aac_get_adapter_info(struct aac_dev* dev)
}
if (!dev->in_reset) {
+ char buffer[16];
tmp = le32_to_cpu(dev->adapter_info.kernelrev);
printk(KERN_INFO "%s%d: kernel %d.%d-%d[%d] %.*s\n",
dev->name,
@@ -1192,16 +1200,23 @@ int aac_get_adapter_info(struct aac_dev* dev)
dev->name, dev->id,
tmp>>24,(tmp>>16)&0xff,tmp&0xff,
le32_to_cpu(dev->adapter_info.biosbuild));
- if (le32_to_cpu(dev->adapter_info.serial[0]) != 0xBAD0)
- printk(KERN_INFO "%s%d: serial %x\n",
- dev->name, dev->id,
- le32_to_cpu(dev->adapter_info.serial[0]));
+ buffer[0] = '\0';
+ if (aac_show_serial_number(
+ shost_to_class(dev->scsi_host_ptr), buffer))
+ printk(KERN_INFO "%s%d: serial %s",
+ dev->name, dev->id, buffer);
if (dev->supplement_adapter_info.VpdInfo.Tsid[0]) {
printk(KERN_INFO "%s%d: TSID %.*s\n",
dev->name, dev->id,
(int)sizeof(dev->supplement_adapter_info.VpdInfo.Tsid),
dev->supplement_adapter_info.VpdInfo.Tsid);
}
+ if (!check_reset ||
+ (dev->supplement_adapter_info.SupportedOptions2 &
+ le32_to_cpu(AAC_OPTION_IGNORE_RESET))) {
+ printk(KERN_INFO "%s%d: Reset Adapter Ignored\n",
+ dev->name, dev->id);
+ }
}
dev->nondasd_support = 0;
@@ -1332,7 +1347,7 @@ static void io_callback(void *context, struct fib * fibptr)
if (!aac_valid_context(scsicmd, fibptr))
return;
- dev = (struct aac_dev *)scsicmd->device->host->hostdata;
+ dev = fibptr->dev;
cid = scmd_id(scsicmd);
if (nblank(dprintk(x))) {
@@ -1371,16 +1386,9 @@ static void io_callback(void *context, struct fib * fibptr)
}
BUG_ON(fibptr == NULL);
-
- if(scsicmd->use_sg)
- pci_unmap_sg(dev->pdev,
- (struct scatterlist *)scsicmd->request_buffer,
- scsicmd->use_sg,
- scsicmd->sc_data_direction);
- else if(scsicmd->request_bufflen)
- pci_unmap_single(dev->pdev, scsicmd->SCp.dma_handle,
- scsicmd->request_bufflen,
- scsicmd->sc_data_direction);
+
+ scsi_dma_unmap(scsicmd);
+
readreply = (struct aac_read_reply *)fib_data(fibptr);
if (le32_to_cpu(readreply->status) == ST_OK)
scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_GOOD;
@@ -1498,6 +1506,7 @@ static int aac_write(struct scsi_cmnd * scsicmd)
{
u64 lba;
u32 count;
+ int fua;
int status;
struct aac_dev *dev;
struct fib * cmd_fibcontext;
@@ -1512,6 +1521,7 @@ static int aac_write(struct scsi_cmnd * scsicmd)
count = scsicmd->cmnd[4];
if (count == 0)
count = 256;
+ fua = 0;
} else if (scsicmd->cmnd[0] == WRITE_16) { /* 16 byte command */
dprintk((KERN_DEBUG "aachba: received a write(16) command on id %d.\n", scmd_id(scsicmd)));
@@ -1524,6 +1534,7 @@ static int aac_write(struct scsi_cmnd * scsicmd)
(scsicmd->cmnd[8] << 8) | scsicmd->cmnd[9];
count = (scsicmd->cmnd[10] << 24) | (scsicmd->cmnd[11] << 16) |
(scsicmd->cmnd[12] << 8) | scsicmd->cmnd[13];
+ fua = scsicmd->cmnd[1] & 0x8;
} else if (scsicmd->cmnd[0] == WRITE_12) { /* 12 byte command */
dprintk((KERN_DEBUG "aachba: received a write(12) command on id %d.\n", scmd_id(scsicmd)));
@@ -1531,10 +1542,12 @@ static int aac_write(struct scsi_cmnd * scsicmd)
| (scsicmd->cmnd[4] << 8) | scsicmd->cmnd[5];
count = (scsicmd->cmnd[6] << 24) | (scsicmd->cmnd[7] << 16)
| (scsicmd->cmnd[8] << 8) | scsicmd->cmnd[9];
+ fua = scsicmd->cmnd[1] & 0x8;
} else {
dprintk((KERN_DEBUG "aachba: received a write(10) command on id %d.\n", scmd_id(scsicmd)));
lba = ((u64)scsicmd->cmnd[2] << 24) | (scsicmd->cmnd[3] << 16) | (scsicmd->cmnd[4] << 8) | scsicmd->cmnd[5];
count = (scsicmd->cmnd[7] << 8) | scsicmd->cmnd[8];
+ fua = scsicmd->cmnd[1] & 0x8;
}
dprintk((KERN_DEBUG "aac_write[cpu %d]: lba = %llu, t = %ld.\n",
smp_processor_id(), (unsigned long long)lba, jiffies));
@@ -1549,7 +1562,7 @@ static int aac_write(struct scsi_cmnd * scsicmd)
return 0;
}
- status = aac_adapter_write(cmd_fibcontext, scsicmd, lba, count);
+ status = aac_adapter_write(cmd_fibcontext, scsicmd, lba, count, fua);
/*
* Check that the command queued to the controller
@@ -1592,7 +1605,7 @@ static void synchronize_callback(void *context, struct fib *fibptr)
COMMAND_COMPLETE << 8 | SAM_STAT_GOOD;
else {
struct scsi_device *sdev = cmd->device;
- struct aac_dev *dev = (struct aac_dev *)sdev->host->hostdata;
+ struct aac_dev *dev = fibptr->dev;
u32 cid = sdev_id(sdev);
printk(KERN_WARNING
"synchronize_callback: synchronize failed, status = %d\n",
@@ -1699,7 +1712,7 @@ static int aac_synchronize(struct scsi_cmnd *scsicmd)
int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
{
- u32 cid = 0;
+ u32 cid;
struct Scsi_Host *host = scsicmd->device->host;
struct aac_dev *dev = (struct aac_dev *)host->hostdata;
struct fsa_dev_info *fsa_dev_ptr = dev->fsa_dev;
@@ -1711,15 +1724,15 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
* Test does not apply to ID 16, the pseudo id for the controller
* itself.
*/
- if (scmd_id(scsicmd) != host->this_id) {
- if ((scmd_channel(scsicmd) == CONTAINER_CHANNEL)) {
- if((scmd_id(scsicmd) >= dev->maximum_num_containers) ||
+ cid = scmd_id(scsicmd);
+ if (cid != host->this_id) {
+ if (scmd_channel(scsicmd) == CONTAINER_CHANNEL) {
+ if((cid >= dev->maximum_num_containers) ||
(scsicmd->device->lun != 0)) {
scsicmd->result = DID_NO_CONNECT << 16;
scsicmd->scsi_done(scsicmd);
return 0;
}
- cid = scmd_id(scsicmd);
/*
* If the target container doesn't exist, it may have
@@ -1782,7 +1795,7 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
{
struct inquiry_data inq_data;
- dprintk((KERN_DEBUG "INQUIRY command, ID: %d.\n", scmd_id(scsicmd)));
+ dprintk((KERN_DEBUG "INQUIRY command, ID: %d.\n", cid));
memset(&inq_data, 0, sizeof (struct inquiry_data));
inq_data.inqd_ver = 2; /* claim compliance to SCSI-2 */
@@ -1794,7 +1807,7 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
* Set the Vendor, Product, and Revision Level
* see: <vendor>.c i.e. aac.c
*/
- if (scmd_id(scsicmd) == host->this_id) {
+ if (cid == host->this_id) {
setinqstr(dev, (void *) (inq_data.inqd_vid), ARRAY_SIZE(container_types));
inq_data.inqd_pdt = INQD_PDT_PROC; /* Processor device */
aac_internal_transfer(scsicmd, &inq_data, 0, sizeof(inq_data));
@@ -1886,15 +1899,29 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
case MODE_SENSE:
{
- char mode_buf[4];
+ char mode_buf[7];
+ int mode_buf_length = 4;
dprintk((KERN_DEBUG "MODE SENSE command.\n"));
mode_buf[0] = 3; /* Mode data length */
mode_buf[1] = 0; /* Medium type - default */
- mode_buf[2] = 0; /* Device-specific param, bit 8: 0/1 = write enabled/protected */
+ mode_buf[2] = 0; /* Device-specific param,
+ bit 8: 0/1 = write enabled/protected
+ bit 4: 0/1 = FUA enabled */
+ if (dev->raw_io_interface)
+ mode_buf[2] = 0x10;
mode_buf[3] = 0; /* Block descriptor length */
-
- aac_internal_transfer(scsicmd, mode_buf, 0, sizeof(mode_buf));
+ if (((scsicmd->cmnd[2] & 0x3f) == 8) ||
+ ((scsicmd->cmnd[2] & 0x3f) == 0x3f)) {
+ mode_buf[0] = 6;
+ mode_buf[4] = 8;
+ mode_buf[5] = 1;
+ mode_buf[6] = 0x04; /* WCE */
+ mode_buf_length = 7;
+ if (mode_buf_length > scsicmd->cmnd[4])
+ mode_buf_length = scsicmd->cmnd[4];
+ }
+ aac_internal_transfer(scsicmd, mode_buf, 0, mode_buf_length);
scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_GOOD;
scsicmd->scsi_done(scsicmd);
@@ -1902,18 +1929,33 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
}
case MODE_SENSE_10:
{
- char mode_buf[8];
+ char mode_buf[11];
+ int mode_buf_length = 8;
dprintk((KERN_DEBUG "MODE SENSE 10 byte command.\n"));
mode_buf[0] = 0; /* Mode data length (MSB) */
mode_buf[1] = 6; /* Mode data length (LSB) */
mode_buf[2] = 0; /* Medium type - default */
- mode_buf[3] = 0; /* Device-specific param, bit 8: 0/1 = write enabled/protected */
+ mode_buf[3] = 0; /* Device-specific param,
+ bit 8: 0/1 = write enabled/protected
+ bit 4: 0/1 = FUA enabled */
+ if (dev->raw_io_interface)
+ mode_buf[3] = 0x10;
mode_buf[4] = 0; /* reserved */
mode_buf[5] = 0; /* reserved */
mode_buf[6] = 0; /* Block descriptor length (MSB) */
mode_buf[7] = 0; /* Block descriptor length (LSB) */
- aac_internal_transfer(scsicmd, mode_buf, 0, sizeof(mode_buf));
+ if (((scsicmd->cmnd[2] & 0x3f) == 8) ||
+ ((scsicmd->cmnd[2] & 0x3f) == 0x3f)) {
+ mode_buf[1] = 9;
+ mode_buf[8] = 8;
+ mode_buf[9] = 1;
+ mode_buf[10] = 0x04; /* WCE */
+ mode_buf_length = 11;
+ if (mode_buf_length > scsicmd->cmnd[8])
+ mode_buf_length = scsicmd->cmnd[8];
+ }
+ aac_internal_transfer(scsicmd, mode_buf, 0, mode_buf_length);
scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_GOOD;
scsicmd->scsi_done(scsicmd);
@@ -2136,28 +2178,21 @@ static void aac_srb_callback(void *context, struct fib * fibptr)
if (!aac_valid_context(scsicmd, fibptr))
return;
- dev = (struct aac_dev *)scsicmd->device->host->hostdata;
-
BUG_ON(fibptr == NULL);
+ dev = fibptr->dev;
+
srbreply = (struct aac_srb_reply *) fib_data(fibptr);
scsicmd->sense_buffer[0] = '\0'; /* Initialize sense valid flag to false */
/*
* Calculate resid for sg
*/
-
- scsicmd->resid = scsicmd->request_bufflen -
- le32_to_cpu(srbreply->data_xfer_length);
-
- if(scsicmd->use_sg)
- pci_unmap_sg(dev->pdev,
- (struct scatterlist *)scsicmd->request_buffer,
- scsicmd->use_sg,
- scsicmd->sc_data_direction);
- else if(scsicmd->request_bufflen)
- pci_unmap_single(dev->pdev, scsicmd->SCp.dma_handle, scsicmd->request_bufflen,
- scsicmd->sc_data_direction);
+
+ scsi_set_resid(scsicmd, scsi_bufflen(scsicmd)
+ - le32_to_cpu(srbreply->data_xfer_length));
+
+ scsi_dma_unmap(scsicmd);
/*
* First check the fib status
@@ -2233,7 +2268,7 @@ static void aac_srb_callback(void *context, struct fib * fibptr)
break;
case SRB_STATUS_BUSY:
- scsicmd->result = DID_NO_CONNECT << 16 | COMMAND_COMPLETE << 8;
+ scsicmd->result = DID_BUS_BUSY << 16 | COMMAND_COMPLETE << 8;
break;
case SRB_STATUS_BUS_RESET:
@@ -2343,34 +2378,33 @@ static unsigned long aac_build_sg(struct scsi_cmnd* scsicmd, struct sgmap* psg)
{
struct aac_dev *dev;
unsigned long byte_count = 0;
+ int nseg;
dev = (struct aac_dev *)scsicmd->device->host->hostdata;
// Get rid of old data
psg->count = 0;
psg->sg[0].addr = 0;
- psg->sg[0].count = 0;
- if (scsicmd->use_sg) {
+ psg->sg[0].count = 0;
+
+ nseg = scsi_dma_map(scsicmd);
+ BUG_ON(nseg < 0);
+ if (nseg) {
struct scatterlist *sg;
int i;
- int sg_count;
- sg = (struct scatterlist *) scsicmd->request_buffer;
- sg_count = pci_map_sg(dev->pdev, sg, scsicmd->use_sg,
- scsicmd->sc_data_direction);
- psg->count = cpu_to_le32(sg_count);
+ psg->count = cpu_to_le32(nseg);
- for (i = 0; i < sg_count; i++) {
+ scsi_for_each_sg(scsicmd, sg, nseg, i) {
psg->sg[i].addr = cpu_to_le32(sg_dma_address(sg));
psg->sg[i].count = cpu_to_le32(sg_dma_len(sg));
byte_count += sg_dma_len(sg);
- sg++;
}
/* hba wants the size to be exact */
- if(byte_count > scsicmd->request_bufflen){
- u32 temp = le32_to_cpu(psg->sg[i-1].count) -
- (byte_count - scsicmd->request_bufflen);
+ if (byte_count > scsi_bufflen(scsicmd)) {
+ u32 temp = le32_to_cpu(psg->sg[i-1].count) -
+ (byte_count - scsi_bufflen(scsicmd));
psg->sg[i-1].count = cpu_to_le32(temp);
- byte_count = scsicmd->request_bufflen;
+ byte_count = scsi_bufflen(scsicmd);
}
/* Check for command underflow */
if(scsicmd->underflow && (byte_count < scsicmd->underflow)){
@@ -2378,18 +2412,6 @@ static unsigned long aac_build_sg(struct scsi_cmnd* scsicmd, struct sgmap* psg)
byte_count, scsicmd->underflow);
}
}
- else if(scsicmd->request_bufflen) {
- u32 addr;
- scsicmd->SCp.dma_handle = pci_map_single(dev->pdev,
- scsicmd->request_buffer,
- scsicmd->request_bufflen,
- scsicmd->sc_data_direction);
- addr = scsicmd->SCp.dma_handle;
- psg->count = cpu_to_le32(1);
- psg->sg[0].addr = cpu_to_le32(addr);
- psg->sg[0].count = cpu_to_le32(scsicmd->request_bufflen);
- byte_count = scsicmd->request_bufflen;
- }
return byte_count;
}
@@ -2399,6 +2421,7 @@ static unsigned long aac_build_sg64(struct scsi_cmnd* scsicmd, struct sgmap64* p
struct aac_dev *dev;
unsigned long byte_count = 0;
u64 addr;
+ int nseg;
dev = (struct aac_dev *)scsicmd->device->host->hostdata;
// Get rid of old data
@@ -2406,31 +2429,28 @@ static unsigned long aac_build_sg64(struct scsi_cmnd* scsicmd, struct sgmap64* p
psg->sg[0].addr[0] = 0;
psg->sg[0].addr[1] = 0;
psg->sg[0].count = 0;
- if (scsicmd->use_sg) {
+
+ nseg = scsi_dma_map(scsicmd);
+ BUG_ON(nseg < 0);
+ if (nseg) {
struct scatterlist *sg;
int i;
- int sg_count;
- sg = (struct scatterlist *) scsicmd->request_buffer;
-
- sg_count = pci_map_sg(dev->pdev, sg, scsicmd->use_sg,
- scsicmd->sc_data_direction);
- for (i = 0; i < sg_count; i++) {
+ scsi_for_each_sg(scsicmd, sg, nseg, i) {
int count = sg_dma_len(sg);
addr = sg_dma_address(sg);
psg->sg[i].addr[0] = cpu_to_le32(addr & 0xffffffff);
psg->sg[i].addr[1] = cpu_to_le32(addr>>32);
psg->sg[i].count = cpu_to_le32(count);
byte_count += count;
- sg++;
}
- psg->count = cpu_to_le32(sg_count);
+ psg->count = cpu_to_le32(nseg);
/* hba wants the size to be exact */
- if(byte_count > scsicmd->request_bufflen){
- u32 temp = le32_to_cpu(psg->sg[i-1].count) -
- (byte_count - scsicmd->request_bufflen);
+ if (byte_count > scsi_bufflen(scsicmd)) {
+ u32 temp = le32_to_cpu(psg->sg[i-1].count) -
+ (byte_count - scsi_bufflen(scsicmd));
psg->sg[i-1].count = cpu_to_le32(temp);
- byte_count = scsicmd->request_bufflen;
+ byte_count = scsi_bufflen(scsicmd);
}
/* Check for command underflow */
if(scsicmd->underflow && (byte_count < scsicmd->underflow)){
@@ -2438,26 +2458,13 @@ static unsigned long aac_build_sg64(struct scsi_cmnd* scsicmd, struct sgmap64* p
byte_count, scsicmd->underflow);
}
}
- else if(scsicmd->request_bufflen) {
- scsicmd->SCp.dma_handle = pci_map_single(dev->pdev,
- scsicmd->request_buffer,
- scsicmd->request_bufflen,
- scsicmd->sc_data_direction);
- addr = scsicmd->SCp.dma_handle;
- psg->count = cpu_to_le32(1);
- psg->sg[0].addr[0] = cpu_to_le32(addr & 0xffffffff);
- psg->sg[0].addr[1] = cpu_to_le32(addr >> 32);
- psg->sg[0].count = cpu_to_le32(scsicmd->request_bufflen);
- byte_count = scsicmd->request_bufflen;
- }
return byte_count;
}
static unsigned long aac_build_sgraw(struct scsi_cmnd* scsicmd, struct sgmapraw* psg)
{
- struct Scsi_Host *host = scsicmd->device->host;
- struct aac_dev *dev = (struct aac_dev *)host->hostdata;
unsigned long byte_count = 0;
+ int nseg;
// Get rid of old data
psg->count = 0;
@@ -2467,16 +2474,14 @@ static unsigned long aac_build_sgraw(struct scsi_cmnd* scsicmd, struct sgmapraw*
psg->sg[0].addr[1] = 0;
psg->sg[0].count = 0;
psg->sg[0].flags = 0;
- if (scsicmd->use_sg) {
+
+ nseg = scsi_dma_map(scsicmd);
+ BUG_ON(nseg < 0);
+ if (nseg) {
struct scatterlist *sg;
int i;
- int sg_count;
- sg = (struct scatterlist *) scsicmd->request_buffer;
- sg_count = pci_map_sg(dev->pdev, sg, scsicmd->use_sg,
- scsicmd->sc_data_direction);
-
- for (i = 0; i < sg_count; i++) {
+ scsi_for_each_sg(scsicmd, sg, nseg, i) {
int count = sg_dma_len(sg);
u64 addr = sg_dma_address(sg);
psg->sg[i].next = 0;
@@ -2486,15 +2491,14 @@ static unsigned long aac_build_sgraw(struct scsi_cmnd* scsicmd, struct sgmapraw*
psg->sg[i].count = cpu_to_le32(count);
psg->sg[i].flags = 0;
byte_count += count;
- sg++;
}
- psg->count = cpu_to_le32(sg_count);
+ psg->count = cpu_to_le32(nseg);
/* hba wants the size to be exact */
- if(byte_count > scsicmd->request_bufflen){
- u32 temp = le32_to_cpu(psg->sg[i-1].count) -
- (byte_count - scsicmd->request_bufflen);
+ if (byte_count > scsi_bufflen(scsicmd)) {
+ u32 temp = le32_to_cpu(psg->sg[i-1].count) -
+ (byte_count - scsi_bufflen(scsicmd));
psg->sg[i-1].count = cpu_to_le32(temp);
- byte_count = scsicmd->request_bufflen;
+ byte_count = scsi_bufflen(scsicmd);
}
/* Check for command underflow */
if(scsicmd->underflow && (byte_count < scsicmd->underflow)){
@@ -2502,24 +2506,6 @@ static unsigned long aac_build_sgraw(struct scsi_cmnd* scsicmd, struct sgmapraw*
byte_count, scsicmd->underflow);
}
}
- else if(scsicmd->request_bufflen) {
- int count;
- u64 addr;
- scsicmd->SCp.dma_handle = pci_map_single(dev->pdev,
- scsicmd->request_buffer,
- scsicmd->request_bufflen,
- scsicmd->sc_data_direction);
- addr = scsicmd->SCp.dma_handle;
- count = scsicmd->request_bufflen;
- psg->count = cpu_to_le32(1);
- psg->sg[0].next = 0;
- psg->sg[0].prev = 0;
- psg->sg[0].addr[1] = cpu_to_le32((u32)(addr>>32));
- psg->sg[0].addr[0] = cpu_to_le32((u32)(addr & 0xffffffff));
- psg->sg[0].count = cpu_to_le32(count);
- psg->sg[0].flags = 0;
- byte_count = scsicmd->request_bufflen;
- }
return byte_count;
}
diff --git a/drivers/scsi/aacraid/aacraid.h b/drivers/scsi/aacraid/aacraid.h
index c81edf36913..f1d3b66af87 100644
--- a/drivers/scsi/aacraid/aacraid.h
+++ b/drivers/scsi/aacraid/aacraid.h
@@ -12,8 +12,8 @@
*----------------------------------------------------------------------------*/
#ifndef AAC_DRIVER_BUILD
-# define AAC_DRIVER_BUILD 2437
-# define AAC_DRIVER_BRANCH "-mh4"
+# define AAC_DRIVER_BUILD 2447
+# define AAC_DRIVER_BRANCH "-ms"
#endif
#define MAXIMUM_NUM_CONTAINERS 32
@@ -464,12 +464,12 @@ struct adapter_ops
int (*adapter_restart)(struct aac_dev *dev, int bled);
/* Transport operations */
int (*adapter_ioremap)(struct aac_dev * dev, u32 size);
- irqreturn_t (*adapter_intr)(int irq, void *dev_id);
+ irq_handler_t adapter_intr;
/* Packet operations */
int (*adapter_deliver)(struct fib * fib);
int (*adapter_bounds)(struct aac_dev * dev, struct scsi_cmnd * cmd, u64 lba);
int (*adapter_read)(struct fib * fib, struct scsi_cmnd * cmd, u64 lba, u32 count);
- int (*adapter_write)(struct fib * fib, struct scsi_cmnd * cmd, u64 lba, u32 count);
+ int (*adapter_write)(struct fib * fib, struct scsi_cmnd * cmd, u64 lba, u32 count, int fua);
int (*adapter_scsi)(struct fib * fib, struct scsi_cmnd * cmd);
/* Administrative operations */
int (*adapter_comm)(struct aac_dev * dev, int comm);
@@ -860,10 +860,12 @@ struct aac_supplement_adapter_info
__le32 FlashFirmwareBootBuild;
u8 MfgPcbaSerialNo[12];
u8 MfgWWNName[8];
- __le32 MoreFeatureBits;
+ __le32 SupportedOptions2;
__le32 ReservedGrowth[1];
};
#define AAC_FEATURE_FALCON 0x00000010
+#define AAC_OPTION_MU_RESET 0x00000001
+#define AAC_OPTION_IGNORE_RESET 0x00000002
#define AAC_SIS_VERSION_V3 3
#define AAC_SIS_SLOT_UNKNOWN 0xFF
@@ -1054,8 +1056,8 @@ struct aac_dev
#define aac_adapter_read(fib,cmd,lba,count) \
((fib)->dev)->a_ops.adapter_read(fib,cmd,lba,count)
-#define aac_adapter_write(fib,cmd,lba,count) \
- ((fib)->dev)->a_ops.adapter_write(fib,cmd,lba,count)
+#define aac_adapter_write(fib,cmd,lba,count,fua) \
+ ((fib)->dev)->a_ops.adapter_write(fib,cmd,lba,count,fua)
#define aac_adapter_scsi(fib,cmd) \
((fib)->dev)->a_ops.adapter_scsi(fib,cmd)
@@ -1213,6 +1215,9 @@ struct aac_write64
__le32 block;
__le16 pad;
__le16 flags;
+#define IO_TYPE_WRITE 0x00000000
+#define IO_TYPE_READ 0x00000001
+#define IO_SUREWRITE 0x00000008
struct sgmap64 sg; // Must be last in struct because it is variable
};
struct aac_write_reply
@@ -1257,6 +1262,19 @@ struct aac_synchronize_reply {
u8 data[16];
};
+#define CT_PAUSE_IO 65
+#define CT_RELEASE_IO 66
+struct aac_pause {
+ __le32 command; /* VM_ContainerConfig */
+ __le32 type; /* CT_PAUSE_IO */
+ __le32 timeout; /* 10ms ticks */
+ __le32 min;
+ __le32 noRescan;
+ __le32 parm3;
+ __le32 parm4;
+ __le32 count; /* sizeof(((struct aac_pause_reply *)NULL)->data) */
+};
+
struct aac_srb
{
__le32 function;
@@ -1804,6 +1822,10 @@ int aac_get_config_status(struct aac_dev *dev, int commit_flag);
int aac_get_containers(struct aac_dev *dev);
int aac_scsi_cmd(struct scsi_cmnd *cmd);
int aac_dev_ioctl(struct aac_dev *dev, int cmd, void __user *arg);
+#ifndef shost_to_class
+#define shost_to_class(shost) &shost->shost_classdev
+#endif
+ssize_t aac_show_serial_number(struct class_device *class_dev, char *buf);
int aac_do_ioctl(struct aac_dev * dev, int cmd, void __user *arg);
int aac_rx_init(struct aac_dev *dev);
int aac_rkt_init(struct aac_dev *dev);
@@ -1813,6 +1835,7 @@ int aac_queue_get(struct aac_dev * dev, u32 * index, u32 qid, struct hw_fib * hw
unsigned int aac_response_normal(struct aac_queue * q);
unsigned int aac_command_normal(struct aac_queue * q);
unsigned int aac_intr_normal(struct aac_dev * dev, u32 Index);
+int aac_reset_adapter(struct aac_dev * dev, int forced);
int aac_check_health(struct aac_dev * dev);
int aac_command_thread(void *data);
int aac_close_fib_context(struct aac_dev * dev, struct aac_fib_context *fibctx);
@@ -1832,3 +1855,6 @@ extern int aif_timeout;
extern int expose_physicals;
extern int aac_reset_devices;
extern int aac_commit;
+extern int update_interval;
+extern int check_interval;
+extern int check_reset;
diff --git a/drivers/scsi/aacraid/commsup.c b/drivers/scsi/aacraid/commsup.c
index 9aca57eda94..d510839c0bb 100644
--- a/drivers/scsi/aacraid/commsup.c
+++ b/drivers/scsi/aacraid/commsup.c
@@ -1021,7 +1021,7 @@ static void aac_handle_aif(struct aac_dev * dev, struct fib * fibptr)
}
-static int _aac_reset_adapter(struct aac_dev *aac)
+static int _aac_reset_adapter(struct aac_dev *aac, int forced)
{
int index, quirks;
int retval;
@@ -1029,25 +1029,32 @@ static int _aac_reset_adapter(struct aac_dev *aac)
struct scsi_device *dev;
struct scsi_cmnd *command;
struct scsi_cmnd *command_list;
+ int jafo = 0;
/*
* Assumptions:
- * - host is locked.
+ * - host is locked, unless called by the aacraid thread.
+ * (a matter of convenience, due to legacy issues surrounding
+ * eh_host_adapter_reset).
* - in_reset is asserted, so no new i/o is getting to the
* card.
- * - The card is dead.
+ * - The card is dead, or will be very shortly ;-/ so no new
+ * commands are completing in the interrupt service.
*/
host = aac->scsi_host_ptr;
scsi_block_requests(host);
aac_adapter_disable_int(aac);
- spin_unlock_irq(host->host_lock);
- kthread_stop(aac->thread);
+ if (aac->thread->pid != current->pid) {
+ spin_unlock_irq(host->host_lock);
+ kthread_stop(aac->thread);
+ jafo = 1;
+ }
/*
* If a positive health, means in a known DEAD PANIC
* state and the adapter could be reset to `try again'.
*/
- retval = aac_adapter_restart(aac, aac_adapter_check_health(aac));
+ retval = aac_adapter_restart(aac, forced ? 0 : aac_adapter_check_health(aac));
if (retval)
goto out;
@@ -1104,10 +1111,12 @@ static int _aac_reset_adapter(struct aac_dev *aac)
if (aac_get_driver_ident(index)->quirks & AAC_QUIRK_31BIT)
if ((retval = pci_set_dma_mask(aac->pdev, DMA_32BIT_MASK)))
goto out;
- aac->thread = kthread_run(aac_command_thread, aac, aac->name);
- if (IS_ERR(aac->thread)) {
- retval = PTR_ERR(aac->thread);
- goto out;
+ if (jafo) {
+ aac->thread = kthread_run(aac_command_thread, aac, aac->name);
+ if (IS_ERR(aac->thread)) {
+ retval = PTR_ERR(aac->thread);
+ goto out;
+ }
}
(void)aac_get_adapter_info(aac);
quirks = aac_get_driver_ident(index)->quirks;
@@ -1150,7 +1159,98 @@ static int _aac_reset_adapter(struct aac_dev *aac)
out:
aac->in_reset = 0;
scsi_unblock_requests(host);
- spin_lock_irq(host->host_lock);
+ if (jafo) {
+ spin_lock_irq(host->host_lock);
+ }
+ return retval;
+}
+
+int aac_reset_adapter(struct aac_dev * aac, int forced)
+{
+ unsigned long flagv = 0;
+ int retval;
+ struct Scsi_Host * host;
+
+ if (spin_trylock_irqsave(&aac->fib_lock, flagv) == 0)
+ return -EBUSY;
+
+ if (aac->in_reset) {
+ spin_unlock_irqrestore(&aac->fib_lock, flagv);
+ return -EBUSY;
+ }
+ aac->in_reset = 1;
+ spin_unlock_irqrestore(&aac->fib_lock, flagv);
+
+ /*
+ * Wait for all commands to complete to this specific
+ * target (block maximum 60 seconds). Although not necessary,
+ * it does make us a good storage citizen.
+ */
+ host = aac->scsi_host_ptr;
+ scsi_block_requests(host);
+ if (forced < 2) for (retval = 60; retval; --retval) {
+ struct scsi_device * dev;
+ struct scsi_cmnd * command;
+ int active = 0;
+
+ __shost_for_each_device(dev, host) {
+ spin_lock_irqsave(&dev->list_lock, flagv);
+ list_for_each_entry(command, &dev->cmd_list, list) {
+ if (command->SCp.phase == AAC_OWNER_FIRMWARE) {
+ active++;
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&dev->list_lock, flagv);
+ if (active)
+ break;
+
+ }
+ /*
+ * We can exit If all the commands are complete
+ */
+ if (active == 0)
+ break;
+ ssleep(1);
+ }
+
+ /* Quiesce build, flush cache, write through mode */
+ aac_send_shutdown(aac);
+ spin_lock_irqsave(host->host_lock, flagv);
+ retval = _aac_reset_adapter(aac, forced);
+ spin_unlock_irqrestore(host->host_lock, flagv);
+
+ if (retval == -ENODEV) {
+ /* Unwind aac_send_shutdown() IOP_RESET unsupported/disabled */
+ struct fib * fibctx = aac_fib_alloc(aac);
+ if (fibctx) {
+ struct aac_pause *cmd;
+ int status;
+
+ aac_fib_init(fibctx);
+
+ cmd = (struct aac_pause *) fib_data(fibctx);
+
+ cmd->command = cpu_to_le32(VM_ContainerConfig);
+ cmd->type = cpu_to_le32(CT_PAUSE_IO);
+ cmd->timeout = cpu_to_le32(1);
+ cmd->min = cpu_to_le32(1);
+ cmd->noRescan = cpu_to_le32(1);
+ cmd->count = cpu_to_le32(0);
+
+ status = aac_fib_send(ContainerCommand,
+ fibctx,
+ sizeof(struct aac_pause),
+ FsaNormal,
+ -2 /* Timeout silently */, 1,
+ NULL, NULL);
+
+ if (status >= 0)
+ aac_fib_complete(fibctx);
+ aac_fib_free(fibctx);
+ }
+ }
+
return retval;
}
@@ -1270,10 +1370,15 @@ int aac_check_health(struct aac_dev * aac)
printk(KERN_ERR "%s: Host adapter BLINK LED 0x%x\n", aac->name, BlinkLED);
+ if (!check_reset || (aac->supplement_adapter_info.SupportedOptions2 &
+ le32_to_cpu(AAC_OPTION_IGNORE_RESET)))
+ goto out;
host = aac->scsi_host_ptr;
- spin_lock_irqsave(host->host_lock, flagv);
- BlinkLED = _aac_reset_adapter(aac);
- spin_unlock_irqrestore(host->host_lock, flagv);
+ if (aac->thread->pid != current->pid)
+ spin_lock_irqsave(host->host_lock, flagv);
+ BlinkLED = _aac_reset_adapter(aac, 0);
+ if (aac->thread->pid != current->pid)
+ spin_unlock_irqrestore(host->host_lock, flagv);
return BlinkLED;
out:
@@ -1300,6 +1405,9 @@ int aac_command_thread(void *data)
struct aac_fib_context *fibctx;
unsigned long flags;
DECLARE_WAITQUEUE(wait, current);
+ unsigned long next_jiffies = jiffies + HZ;
+ unsigned long next_check_jiffies = next_jiffies;
+ long difference = HZ;
/*
* We can only have one thread per adapter for AIF's.
@@ -1368,7 +1476,7 @@ int aac_command_thread(void *data)
cpu_to_le32(AifCmdJobProgress))) {
aac_handle_aif(dev, fib);
}
-
+
time_now = jiffies/HZ;
/*
@@ -1507,11 +1615,79 @@ int aac_command_thread(void *data)
* There are no more AIF's
*/
spin_unlock_irqrestore(dev->queues->queue[HostNormCmdQueue].lock, flags);
- schedule();
+
+ /*
+ * Background activity
+ */
+ if ((time_before(next_check_jiffies,next_jiffies))
+ && ((difference = next_check_jiffies - jiffies) <= 0)) {
+ next_check_jiffies = next_jiffies;
+ if (aac_check_health(dev) == 0) {
+ difference = ((long)(unsigned)check_interval)
+ * HZ;
+ next_check_jiffies = jiffies + difference;
+ } else if (!dev->queues)
+ break;
+ }
+ if (!time_before(next_check_jiffies,next_jiffies)
+ && ((difference = next_jiffies - jiffies) <= 0)) {
+ struct timeval now;
+ int ret;
+
+ /* Don't even try to talk to adapter if its sick */
+ ret = aac_check_health(dev);
+ if (!ret && !dev->queues)
+ break;
+ next_check_jiffies = jiffies
+ + ((long)(unsigned)check_interval)
+ * HZ;
+ do_gettimeofday(&now);
+
+ /* Synchronize our watches */
+ if (((1000000 - (1000000 / HZ)) > now.tv_usec)
+ && (now.tv_usec > (1000000 / HZ)))
+ difference = (((1000000 - now.tv_usec) * HZ)
+ + 500000) / 1000000;
+ else if (ret == 0) {
+ struct fib *fibptr;
+
+ if ((fibptr = aac_fib_alloc(dev))) {
+ u32 * info;
+
+ aac_fib_init(fibptr);
+
+ info = (u32 *) fib_data(fibptr);
+ if (now.tv_usec > 500000)
+ ++now.tv_sec;
+
+ *info = cpu_to_le32(now.tv_sec);
+
+ (void)aac_fib_send(SendHostTime,
+ fibptr,
+ sizeof(*info),
+ FsaNormal,
+ 1, 1,
+ NULL,
+ NULL);
+ aac_fib_complete(fibptr);
+ aac_fib_free(fibptr);
+ }
+ difference = (long)(unsigned)update_interval*HZ;
+ } else {
+ /* retry shortly */
+ difference = 10 * HZ;
+ }
+ next_jiffies = jiffies + difference;
+ if (time_before(next_check_jiffies,next_jiffies))
+ difference = next_check_jiffies - jiffies;
+ }
+ if (difference <= 0)
+ difference = 1;
+ set_current_state(TASK_INTERRUPTIBLE);
+ schedule_timeout(difference);
if (kthread_should_stop())
break;
- set_current_state(TASK_INTERRUPTIBLE);
}
if (dev->queues)
remove_wait_queue(&dev->queues->queue[HostNormCmdQueue].cmdready, &wait);
diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c
index 5c487ff096c..d76e1a8cb93 100644
--- a/drivers/scsi/aacraid/linit.c
+++ b/drivers/scsi/aacraid/linit.c
@@ -39,10 +39,8 @@
#include <linux/pci.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
-#include <linux/dma-mapping.h>
#include <linux/syscalls.h>
#include <linux/delay.h>
-#include <linux/smp_lock.h>
#include <linux/kthread.h>
#include <asm/semaphore.h>
@@ -223,12 +221,12 @@ static struct aac_driver_ident aac_drivers[] = {
{ aac_rx_init, "percraid", "DELL ", "PERC 320/DC ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG }, /* Perc 320/DC*/
{ aac_sa_init, "aacraid", "ADAPTEC ", "Adaptec 5400S ", 4, AAC_QUIRK_34SG }, /* Adaptec 5400S (Mustang)*/
{ aac_sa_init, "aacraid", "ADAPTEC ", "AAC-364 ", 4, AAC_QUIRK_34SG }, /* Adaptec 5400S (Mustang)*/
- { aac_sa_init, "percraid", "DELL ", "PERCRAID ", 4, AAC_QUIRK_31BIT | AAC_QUIRK_34SG }, /* Dell PERC2/QC */
+ { aac_sa_init, "percraid", "DELL ", "PERCRAID ", 4, AAC_QUIRK_34SG }, /* Dell PERC2/QC */
{ aac_sa_init, "hpnraid", "HP ", "NetRAID ", 4, AAC_QUIRK_34SG }, /* HP NetRAID-4M */
{ aac_rx_init, "aacraid", "DELL ", "RAID ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG }, /* Dell Catchall */
{ aac_rx_init, "aacraid", "Legend ", "RAID ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG }, /* Legend Catchall */
- { aac_rx_init, "aacraid", "ADAPTEC ", "RAID ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG }, /* Adaptec Catch All */
+ { aac_rx_init, "aacraid", "ADAPTEC ", "RAID ", 2 }, /* Adaptec Catch All */
{ aac_rkt_init, "aacraid", "ADAPTEC ", "RAID ", 2 }, /* Adaptec Rocket Catch All */
{ aac_nark_init, "aacraid", "ADAPTEC ", "RAID ", 2 } /* Adaptec NEMER/ARK Catch All */
};
@@ -403,10 +401,6 @@ static int aac_biosparm(struct scsi_device *sdev, struct block_device *bdev,
static int aac_slave_configure(struct scsi_device *sdev)
{
- if (sdev_channel(sdev) == CONTAINER_CHANNEL) {
- sdev->skip_ms_page_8 = 1;
- sdev->skip_ms_page_3f = 1;
- }
if ((sdev->type == TYPE_DISK) &&
(sdev_channel(sdev) != CONTAINER_CHANNEL)) {
if (expose_physicals == 0)
@@ -450,6 +444,43 @@ static int aac_slave_configure(struct scsi_device *sdev)
return 0;
}
+/**
+ * aac_change_queue_depth - alter queue depths
+ * @sdev: SCSI device we are considering
+ * @depth: desired queue depth
+ *
+ * Alters queue depths for target device based on the host adapter's
+ * total capacity and the queue depth supported by the target device.
+ */
+
+static int aac_change_queue_depth(struct scsi_device *sdev, int depth)
+{
+ if (sdev->tagged_supported && (sdev->type == TYPE_DISK) &&
+ (sdev_channel(sdev) == CONTAINER_CHANNEL)) {
+ struct scsi_device * dev;
+ struct Scsi_Host *host = sdev->host;
+ unsigned num = 0;
+
+ __shost_for_each_device(dev, host) {
+ if (dev->tagged_supported && (dev->type == TYPE_DISK) &&
+ (sdev_channel(dev) == CONTAINER_CHANNEL))
+ ++num;
+ ++num;
+ }
+ if (num >= host->can_queue)
+ num = host->can_queue - 1;
+ if (depth > (host->can_queue - num))
+ depth = host->can_queue - num;
+ if (depth > 256)
+ depth = 256;
+ else if (depth < 2)
+ depth = 2;
+ scsi_adjust_queue_depth(sdev, MSG_ORDERED_TAG, depth);
+ } else
+ scsi_adjust_queue_depth(sdev, 0, 1);
+ return sdev->queue_depth;
+}
+
static int aac_ioctl(struct scsi_device *sdev, int cmd, void __user * arg)
{
struct aac_dev *dev = (struct aac_dev *)sdev->host->hostdata;
@@ -548,6 +579,14 @@ static int aac_eh_reset(struct scsi_cmnd* cmd)
ssleep(1);
}
printk(KERN_ERR "%s: SCSI bus appears hung\n", AAC_DRIVERNAME);
+ /*
+ * This adapter needs a blind reset, only do so for Adapters that
+ * support a register, instead of a commanded, reset.
+ */
+ if ((aac->supplement_adapter_info.SupportedOptions2 &
+ le32_to_cpu(AAC_OPTION_MU_RESET|AAC_OPTION_IGNORE_RESET)) ==
+ le32_to_cpu(AAC_OPTION_MU_RESET))
+ aac_reset_adapter(aac, 2); /* Bypass wait for command quiesce */
return SUCCESS; /* Cause an immediate retry of the command with a ten second delay after successful tur */
}
@@ -731,15 +770,21 @@ static ssize_t aac_show_bios_version(struct class_device *class_dev,
return len;
}
-static ssize_t aac_show_serial_number(struct class_device *class_dev,
- char *buf)
+ssize_t aac_show_serial_number(struct class_device *class_dev, char *buf)
{
struct aac_dev *dev = (struct aac_dev*)class_to_shost(class_dev)->hostdata;
int len = 0;
if (le32_to_cpu(dev->adapter_info.serial[0]) != 0xBAD0)
- len = snprintf(buf, PAGE_SIZE, "%x\n",
+ len = snprintf(buf, PAGE_SIZE, "%06X\n",
le32_to_cpu(dev->adapter_info.serial[0]));
+ if (len &&
+ !memcmp(&dev->supplement_adapter_info.MfgPcbaSerialNo[
+ sizeof(dev->supplement_adapter_info.MfgPcbaSerialNo)+2-len],
+ buf, len))
+ len = snprintf(buf, PAGE_SIZE, "%.*s\n",
+ (int)sizeof(dev->supplement_adapter_info.MfgPcbaSerialNo),
+ dev->supplement_adapter_info.MfgPcbaSerialNo);
return len;
}
@@ -755,6 +800,31 @@ static ssize_t aac_show_max_id(struct class_device *class_dev, char *buf)
class_to_shost(class_dev)->max_id);
}
+static ssize_t aac_store_reset_adapter(struct class_device *class_dev,
+ const char *buf, size_t count)
+{
+ int retval = -EACCES;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return retval;
+ retval = aac_reset_adapter((struct aac_dev*)class_to_shost(class_dev)->hostdata, buf[0] == '!');
+ if (retval >= 0)
+ retval = count;
+ return retval;
+}
+
+static ssize_t aac_show_reset_adapter(struct class_device *class_dev,
+ char *buf)
+{
+ struct aac_dev *dev = (struct aac_dev*)class_to_shost(class_dev)->hostdata;
+ int len, tmp;
+
+ tmp = aac_adapter_check_health(dev);
+ if ((tmp == 0) && dev->in_reset)
+ tmp = -EBUSY;
+ len = snprintf(buf, PAGE_SIZE, "0x%x", tmp);
+ return len;
+}
static struct class_device_attribute aac_model = {
.attr = {
@@ -812,6 +882,14 @@ static struct class_device_attribute aac_max_id = {
},
.show = aac_show_max_id,
};
+static struct class_device_attribute aac_reset = {
+ .attr = {
+ .name = "reset_host",
+ .mode = S_IWUSR|S_IRUGO,
+ },
+ .store = aac_store_reset_adapter,
+ .show = aac_show_reset_adapter,
+};
static struct class_device_attribute *aac_attrs[] = {
&aac_model,
@@ -822,6 +900,7 @@ static struct class_device_attribute *aac_attrs[] = {
&aac_serial_number,
&aac_max_channel,
&aac_max_id,
+ &aac_reset,
NULL
};
@@ -848,6 +927,7 @@ static struct scsi_host_template aac_driver_template = {
.bios_param = aac_biosparm,
.shost_attrs = aac_attrs,
.slave_configure = aac_slave_configure,
+ .change_queue_depth = aac_change_queue_depth,
.eh_abort_handler = aac_eh_abort,
.eh_host_reset_handler = aac_eh_reset,
.can_queue = AAC_NUM_IO_FIB,
@@ -1086,7 +1166,7 @@ static int __init aac_init(void)
{
int error;
- printk(KERN_INFO "Adaptec %s driver (%s)\n",
+ printk(KERN_INFO "Adaptec %s driver %s\n",
AAC_DRIVERNAME, aac_driver_version);
error = pci_register_driver(&aac_pci_driver);
diff --git a/drivers/scsi/aacraid/rx.c b/drivers/scsi/aacraid/rx.c
index ae978a373c5..ebc65b9fea9 100644
--- a/drivers/scsi/aacraid/rx.c
+++ b/drivers/scsi/aacraid/rx.c
@@ -464,21 +464,24 @@ static int aac_rx_restart_adapter(struct aac_dev *dev, int bled)
{
u32 var;
- if (bled)
- printk(KERN_ERR "%s%d: adapter kernel panic'd %x.\n",
- dev->name, dev->id, bled);
- else {
- bled = aac_adapter_sync_cmd(dev, IOP_RESET_ALWAYS,
- 0, 0, 0, 0, 0, 0, &var, NULL, NULL, NULL, NULL);
- if (!bled && (var != 0x00000001))
- bled = -EINVAL;
- }
- if (bled && (bled != -ETIMEDOUT))
- bled = aac_adapter_sync_cmd(dev, IOP_RESET,
- 0, 0, 0, 0, 0, 0, &var, NULL, NULL, NULL, NULL);
+ if (!(dev->supplement_adapter_info.SupportedOptions2 &
+ le32_to_cpu(AAC_OPTION_MU_RESET)) || (bled >= 0) || (bled == -2)) {
+ if (bled)
+ printk(KERN_ERR "%s%d: adapter kernel panic'd %x.\n",
+ dev->name, dev->id, bled);
+ else {
+ bled = aac_adapter_sync_cmd(dev, IOP_RESET_ALWAYS,
+ 0, 0, 0, 0, 0, 0, &var, NULL, NULL, NULL, NULL);
+ if (!bled && (var != 0x00000001))
+ bled = -EINVAL;
+ }
+ if (bled && (bled != -ETIMEDOUT))
+ bled = aac_adapter_sync_cmd(dev, IOP_RESET,
+ 0, 0, 0, 0, 0, 0, &var, NULL, NULL, NULL, NULL);
- if (bled && (bled != -ETIMEDOUT))
- return -EINVAL;
+ if (bled && (bled != -ETIMEDOUT))
+ return -EINVAL;
+ }
if (bled || (var == 0x3803000F)) { /* USE_OTHER_METHOD */
rx_writel(dev, MUnit.reserved2, 3);
msleep(5000); /* Delay 5 seconds */
@@ -596,7 +599,7 @@ int _aac_rx_init(struct aac_dev *dev)
}
msleep(1);
}
- if (restart)
+ if (restart && aac_commit)
aac_commit = 1;
/*
* Fill in the common function dispatch table.
diff --git a/drivers/scsi/advansys.c b/drivers/scsi/advansys.c
index 9b3303b6411..2b6689709e5 100644
--- a/drivers/scsi/advansys.c
+++ b/drivers/scsi/advansys.c
@@ -798,7 +798,6 @@
#include <scsi/scsi_tcq.h>
#include <scsi/scsi.h>
#include <scsi/scsi_host.h>
-#include "advansys.h"
#ifdef CONFIG_PCI
#include <linux/pci.h>
#endif /* CONFIG_PCI */
@@ -2014,7 +2013,7 @@ STATIC int AscSgListToQueue(int);
STATIC void AscEnableIsaDma(uchar);
#endif /* CONFIG_ISA */
STATIC ASC_DCNT AscGetMaxDmaCount(ushort);
-
+static const char *advansys_info(struct Scsi_Host *shp);
/*
* --- Adv Library Constants and Macros
@@ -3970,10 +3969,6 @@ STATIC ushort asc_bus[ASC_NUM_BUS] __initdata = {
ASC_IS_PCI,
};
-/*
- * Used with the LILO 'advansys' option to eliminate or
- * limit I/O port probing at boot time, cf. advansys_setup().
- */
STATIC int asc_iopflag = ASC_FALSE;
STATIC int asc_ioport[ASC_NUM_IOPORT_PROBE] = { 0, 0, 0, 0 };
@@ -4055,10 +4050,6 @@ STATIC void asc_prt_hex(char *f, uchar *, int);
#endif /* ADVANSYS_DEBUG */
-/*
- * --- Linux 'struct scsi_host_template' and advansys_setup() Functions
- */
-
#ifdef CONFIG_PROC_FS
/*
* advansys_proc_info() - /proc/scsi/advansys/[0-(ASC_NUM_BOARD_SUPPORTED-1)]
@@ -4080,7 +4071,7 @@ STATIC void asc_prt_hex(char *f, uchar *, int);
* if 'prtbuf' is too small it will not be overwritten. Instead the
* user just won't get all the available statistics.
*/
-int
+static int
advansys_proc_info(struct Scsi_Host *shost, char *buffer, char **start,
off_t offset, int length, int inout)
{
@@ -4296,7 +4287,7 @@ advansys_proc_info(struct Scsi_Host *shost, char *buffer, char **start,
* it must not call SCSI mid-level functions including scsi_malloc()
* and scsi_free().
*/
-int __init
+static int __init
advansys_detect(struct scsi_host_template *tpnt)
{
static int detect_called = ASC_FALSE;
@@ -5428,7 +5419,7 @@ advansys_detect(struct scsi_host_template *tpnt)
*
* Release resources allocated for a single AdvanSys adapter.
*/
-int
+static int
advansys_release(struct Scsi_Host *shp)
{
asc_board_t *boardp;
@@ -5475,7 +5466,7 @@ advansys_release(struct Scsi_Host *shp)
* Note: The information line should not exceed ASC_INFO_SIZE bytes,
* otherwise the static 'info' array will be overrun.
*/
-const char *
+static const char *
advansys_info(struct Scsi_Host *shp)
{
static char info[ASC_INFO_SIZE];
@@ -5568,7 +5559,7 @@ advansys_info(struct Scsi_Host *shp)
* This function always returns 0. Command return status is saved
* in the 'scp' result field.
*/
-int
+static int
advansys_queuecommand(struct scsi_cmnd *scp, void (*done)(struct scsi_cmnd *))
{
struct Scsi_Host *shp;
@@ -5656,7 +5647,7 @@ advansys_queuecommand(struct scsi_cmnd *scp, void (*done)(struct scsi_cmnd *))
* sleeping is allowed and no locking other than for host structures is
* required. Returns SUCCESS or FAILED.
*/
-int
+static int
advansys_reset(struct scsi_cmnd *scp)
{
struct Scsi_Host *shp;
@@ -5841,7 +5832,7 @@ advansys_reset(struct scsi_cmnd *scp)
* ip[1]: sectors
* ip[2]: cylinders
*/
-int
+static int
advansys_biosparam(struct scsi_device *sdev, struct block_device *bdev,
sector_t capacity, int ip[])
{
@@ -5875,82 +5866,6 @@ advansys_biosparam(struct scsi_device *sdev, struct block_device *bdev,
}
/*
- * advansys_setup()
- *
- * This function is called from init/main.c at boot time.
- * It it passed LILO parameters that can be set from the
- * LILO command line or in /etc/lilo.conf.
- *
- * It is used by the AdvanSys driver to either disable I/O
- * port scanning or to limit scanning to 1 - 4 I/O ports.
- * Regardless of the option setting EISA and PCI boards
- * will still be searched for and detected. This option
- * only affects searching for ISA and VL boards.
- *
- * If ADVANSYS_DEBUG is defined the driver debug level may
- * be set using the 5th (ASC_NUM_IOPORT_PROBE + 1) I/O Port.
- *
- * Examples:
- * 1. Eliminate I/O port scanning:
- * boot: linux advansys=
- * or
- * boot: linux advansys=0x0
- * 2. Limit I/O port scanning to one I/O port:
- * boot: linux advansys=0x110
- * 3. Limit I/O port scanning to four I/O ports:
- * boot: linux advansys=0x110,0x210,0x230,0x330
- * 4. If ADVANSYS_DEBUG, limit I/O port scanning to four I/O ports and
- * set the driver debug level to 2.
- * boot: linux advansys=0x110,0x210,0x230,0x330,0xdeb2
- *
- * ints[0] - number of arguments
- * ints[1] - first argument
- * ints[2] - second argument
- * ...
- */
-void __init
-advansys_setup(char *str, int *ints)
-{
- int i;
-
- if (asc_iopflag == ASC_TRUE) {
- printk("AdvanSys SCSI: 'advansys' LILO option may appear only once\n");
- return;
- }
-
- asc_iopflag = ASC_TRUE;
-
- if (ints[0] > ASC_NUM_IOPORT_PROBE) {
-#ifdef ADVANSYS_DEBUG
- if ((ints[0] == ASC_NUM_IOPORT_PROBE + 1) &&
- (ints[ASC_NUM_IOPORT_PROBE + 1] >> 4 == 0xdeb)) {
- asc_dbglvl = ints[ASC_NUM_IOPORT_PROBE + 1] & 0xf;
- } else {
-#endif /* ADVANSYS_DEBUG */
- printk("AdvanSys SCSI: only %d I/O ports accepted\n",
- ASC_NUM_IOPORT_PROBE);
-#ifdef ADVANSYS_DEBUG
- }
-#endif /* ADVANSYS_DEBUG */
- }
-
-#ifdef ADVANSYS_DEBUG
- ASC_DBG1(1, "advansys_setup: ints[0] %d\n", ints[0]);
- for (i = 1; i < ints[0]; i++) {
- ASC_DBG2(1, " ints[%d] 0x%x", i, ints[i]);
- }
- ASC_DBG(1, "\n");
-#endif /* ADVANSYS_DEBUG */
-
- for (i = 1; i <= ints[0] && i <= ASC_NUM_IOPORT_PROBE; i++) {
- asc_ioport[i-1] = ints[i];
- ASC_DBG2(1, "advansys_setup: asc_ioport[%d] 0x%x\n",
- i - 1, asc_ioport[i-1]);
- }
-}
-
-
-/*
* --- Loadable Driver Support
*/
diff --git a/drivers/scsi/advansys.h b/drivers/scsi/advansys.h
deleted file mode 100644
index 8ee7fb16a72..00000000000
--- a/drivers/scsi/advansys.h
+++ /dev/null
@@ -1,36 +0,0 @@
-/*
- * advansys.h - Linux Host Driver for AdvanSys SCSI Adapters
- *
- * Copyright (c) 1995-2000 Advanced System Products, Inc.
- * Copyright (c) 2000-2001 ConnectCom Solutions, Inc.
- * All Rights Reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that redistributions of source
- * code retain the above copyright notice and this comment without
- * modification.
- *
- * As of March 8, 2000 Advanced System Products, Inc. (AdvanSys)
- * changed its name to ConnectCom Solutions, Inc.
- *
- */
-
-#ifndef _ADVANSYS_H
-#define _ADVANSYS_H
-
-/*
- * struct scsi_host_template function prototypes.
- */
-int advansys_detect(struct scsi_host_template *);
-int advansys_release(struct Scsi_Host *);
-const char *advansys_info(struct Scsi_Host *);
-int advansys_queuecommand(struct scsi_cmnd *, void (* done)(struct scsi_cmnd *));
-int advansys_reset(struct scsi_cmnd *);
-int advansys_biosparam(struct scsi_device *, struct block_device *,
- sector_t, int[]);
-static int advansys_slave_configure(struct scsi_device *);
-
-/* init/main.c setup function */
-void advansys_setup(char *, int *);
-
-#endif /* _ADVANSYS_H */
diff --git a/drivers/scsi/aha152x.c b/drivers/scsi/aha152x.c
index 4b4d1233ce8..85f2394ffc3 100644
--- a/drivers/scsi/aha152x.c
+++ b/drivers/scsi/aha152x.c
@@ -240,6 +240,7 @@
#include <linux/io.h>
#include <linux/blkdev.h>
#include <asm/system.h>
+#include <linux/completion.h>
#include <linux/errno.h>
#include <linux/string.h>
#include <linux/wait.h>
@@ -253,7 +254,6 @@
#include <linux/spinlock.h>
#include <linux/workqueue.h>
#include <linux/list.h>
-#include <asm/semaphore.h>
#include <scsi/scsicam.h>
#include "scsi.h"
@@ -551,7 +551,7 @@ struct aha152x_hostdata {
*/
struct aha152x_scdata {
Scsi_Cmnd *next; /* next sc in queue */
- struct semaphore *sem; /* semaphore to block on */
+ struct completion *done;/* semaphore to block on */
unsigned char cmd_len;
unsigned char cmnd[MAX_COMMAND_SIZE];
unsigned short use_sg;
@@ -608,7 +608,7 @@ struct aha152x_scdata {
#define SCDATA(SCpnt) ((struct aha152x_scdata *) (SCpnt)->host_scribble)
#define SCNEXT(SCpnt) SCDATA(SCpnt)->next
-#define SCSEM(SCpnt) SCDATA(SCpnt)->sem
+#define SCSEM(SCpnt) SCDATA(SCpnt)->done
#define SG_ADDRESS(buffer) ((char *) (page_address((buffer)->page)+(buffer)->offset))
@@ -969,7 +969,8 @@ static int setup_expected_interrupts(struct Scsi_Host *shpnt)
/*
* Queue a command and setup interrupts for a free bus.
*/
-static int aha152x_internal_queue(Scsi_Cmnd *SCpnt, struct semaphore *sem, int phase, void (*done)(Scsi_Cmnd *))
+static int aha152x_internal_queue(Scsi_Cmnd *SCpnt, struct completion *complete,
+ int phase, void (*done)(Scsi_Cmnd *))
{
struct Scsi_Host *shpnt = SCpnt->device->host;
unsigned long flags;
@@ -1013,7 +1014,7 @@ static int aha152x_internal_queue(Scsi_Cmnd *SCpnt, struct semaphore *sem, int p
}
SCNEXT(SCpnt) = NULL;
- SCSEM(SCpnt) = sem;
+ SCSEM(SCpnt) = complete;
/* setup scratch area
SCp.ptr : buffer pointer
@@ -1084,9 +1085,9 @@ static void reset_done(Scsi_Cmnd *SCpnt)
DPRINTK(debug_eh, INFO_LEAD "reset_done called\n", CMDINFO(SCpnt));
#endif
if(SCSEM(SCpnt)) {
- up(SCSEM(SCpnt));
+ complete(SCSEM(SCpnt));
} else {
- printk(KERN_ERR "aha152x: reset_done w/o semaphore\n");
+ printk(KERN_ERR "aha152x: reset_done w/o completion\n");
}
}
@@ -1139,21 +1140,6 @@ static int aha152x_abort(Scsi_Cmnd *SCpnt)
return FAILED;
}
-static void timer_expired(unsigned long p)
-{
- Scsi_Cmnd *SCp = (Scsi_Cmnd *)p;
- struct semaphore *sem = SCSEM(SCp);
- struct Scsi_Host *shpnt = SCp->device->host;
- unsigned long flags;
-
- /* remove command from issue queue */
- DO_LOCK(flags);
- remove_SC(&ISSUE_SC, SCp);
- DO_UNLOCK(flags);
-
- up(sem);
-}
-
/*
* Reset a device
*
@@ -1161,14 +1147,14 @@ static void timer_expired(unsigned long p)
static int aha152x_device_reset(Scsi_Cmnd * SCpnt)
{
struct Scsi_Host *shpnt = SCpnt->device->host;
- DECLARE_MUTEX_LOCKED(sem);
- struct timer_list timer;
+ DECLARE_COMPLETION(done);
int ret, issued, disconnected;
unsigned char old_cmd_len = SCpnt->cmd_len;
unsigned short old_use_sg = SCpnt->use_sg;
void *old_buffer = SCpnt->request_buffer;
unsigned old_bufflen = SCpnt->request_bufflen;
unsigned long flags;
+ unsigned long timeleft;
#if defined(AHA152X_DEBUG)
if(HOSTDATA(shpnt)->debug & debug_eh) {
@@ -1192,15 +1178,15 @@ static int aha152x_device_reset(Scsi_Cmnd * SCpnt)
SCpnt->request_buffer = NULL;
SCpnt->request_bufflen = 0;
- init_timer(&timer);
- timer.data = (unsigned long) SCpnt;
- timer.expires = jiffies + 100*HZ; /* 10s */
- timer.function = (void (*)(unsigned long)) timer_expired;
+ aha152x_internal_queue(SCpnt, &done, resetting, reset_done);
- aha152x_internal_queue(SCpnt, &sem, resetting, reset_done);
- add_timer(&timer);
- down(&sem);
- del_timer(&timer);
+ timeleft = wait_for_completion_timeout(&done, 100*HZ);
+ if (!timeleft) {
+ /* remove command from issue queue */
+ DO_LOCK(flags);
+ remove_SC(&ISSUE_SC, SCpnt);
+ DO_UNLOCK(flags);
+ }
SCpnt->cmd_len = old_cmd_len;
SCpnt->use_sg = old_use_sg;
diff --git a/drivers/scsi/aha1740.c b/drivers/scsi/aha1740.c
index d7af9c63a04..e4a4f3a965d 100644
--- a/drivers/scsi/aha1740.c
+++ b/drivers/scsi/aha1740.c
@@ -271,20 +271,8 @@ static irqreturn_t aha1740_intr_handle(int irq, void *dev_id)
continue;
}
sgptr = (struct aha1740_sg *) SCtmp->host_scribble;
- if (SCtmp->use_sg) {
- /* We used scatter-gather.
- Do the unmapping dance. */
- dma_unmap_sg (&edev->dev,
- (struct scatterlist *) SCtmp->request_buffer,
- SCtmp->use_sg,
- SCtmp->sc_data_direction);
- } else {
- dma_unmap_single (&edev->dev,
- sgptr->buf_dma_addr,
- SCtmp->request_bufflen,
- DMA_BIDIRECTIONAL);
- }
-
+ scsi_dma_unmap(SCtmp);
+
/* Free the sg block */
dma_free_coherent (&edev->dev,
sizeof (struct aha1740_sg),
@@ -349,11 +337,9 @@ static int aha1740_queuecommand(Scsi_Cmnd * SCpnt, void (*done)(Scsi_Cmnd *))
unchar target = scmd_id(SCpnt);
struct aha1740_hostdata *host = HOSTDATA(SCpnt->device->host);
unsigned long flags;
- void *buff = SCpnt->request_buffer;
- int bufflen = SCpnt->request_bufflen;
dma_addr_t sg_dma;
struct aha1740_sg *sgptr;
- int ecbno;
+ int ecbno, nseg;
DEB(int i);
if(*cmd == REQUEST_SENSE) {
@@ -423,24 +409,23 @@ static int aha1740_queuecommand(Scsi_Cmnd * SCpnt, void (*done)(Scsi_Cmnd *))
}
sgptr = (struct aha1740_sg *) SCpnt->host_scribble;
sgptr->sg_dma_addr = sg_dma;
-
- if (SCpnt->use_sg) {
- struct scatterlist * sgpnt;
+
+ nseg = scsi_dma_map(SCpnt);
+ BUG_ON(nseg < 0);
+ if (nseg) {
+ struct scatterlist *sg;
struct aha1740_chain * cptr;
- int i, count;
+ int i;
DEB(unsigned char * ptr);
host->ecb[ecbno].sg = 1; /* SCSI Initiator Command
* w/scatter-gather*/
- sgpnt = (struct scatterlist *) SCpnt->request_buffer;
cptr = sgptr->sg_chain;
- count = dma_map_sg (&host->edev->dev, sgpnt, SCpnt->use_sg,
- SCpnt->sc_data_direction);
- for(i=0; i < count; i++) {
- cptr[i].datalen = sg_dma_len (sgpnt + i);
- cptr[i].dataptr = sg_dma_address (sgpnt + i);
+ scsi_for_each_sg(SCpnt, sg, nseg, i) {
+ cptr[i].datalen = sg_dma_len (sg);
+ cptr[i].dataptr = sg_dma_address (sg);
}
- host->ecb[ecbno].datalen = count*sizeof(struct aha1740_chain);
+ host->ecb[ecbno].datalen = nseg * sizeof(struct aha1740_chain);
host->ecb[ecbno].dataptr = sg_dma;
#ifdef DEBUG
printk("cptr %x: ",cptr);
@@ -448,11 +433,8 @@ static int aha1740_queuecommand(Scsi_Cmnd * SCpnt, void (*done)(Scsi_Cmnd *))
for(i=0;i<24;i++) printk("%02x ", ptr[i]);
#endif
} else {
- host->ecb[ecbno].datalen = bufflen;
- sgptr->buf_dma_addr = dma_map_single (&host->edev->dev,
- buff, bufflen,
- DMA_BIDIRECTIONAL);
- host->ecb[ecbno].dataptr = sgptr->buf_dma_addr;
+ host->ecb[ecbno].datalen = 0;
+ host->ecb[ecbno].dataptr = 0;
}
host->ecb[ecbno].lun = SCpnt->device->lun;
host->ecb[ecbno].ses = 1; /* Suppress underrun errors */
diff --git a/drivers/scsi/aic7xxx/aic79xx_osm.c b/drivers/scsi/aic7xxx/aic79xx_osm.c
index 6054881f21f..286ab83116f 100644
--- a/drivers/scsi/aic7xxx/aic79xx_osm.c
+++ b/drivers/scsi/aic7xxx/aic79xx_osm.c
@@ -376,21 +376,10 @@ static __inline void
ahd_linux_unmap_scb(struct ahd_softc *ahd, struct scb *scb)
{
struct scsi_cmnd *cmd;
- int direction;
cmd = scb->io_ctx;
- direction = cmd->sc_data_direction;
ahd_sync_sglist(ahd, scb, BUS_DMASYNC_POSTWRITE);
- if (cmd->use_sg != 0) {
- struct scatterlist *sg;
-
- sg = (struct scatterlist *)cmd->request_buffer;
- pci_unmap_sg(ahd->dev_softc, sg, cmd->use_sg, direction);
- } else if (cmd->request_bufflen != 0) {
- pci_unmap_single(ahd->dev_softc,
- scb->platform_data->buf_busaddr,
- cmd->request_bufflen, direction);
- }
+ scsi_dma_unmap(cmd);
}
/******************************** Macros **************************************/
@@ -1422,6 +1411,7 @@ ahd_linux_run_command(struct ahd_softc *ahd, struct ahd_linux_device *dev,
u_int col_idx;
uint16_t mask;
unsigned long flags;
+ int nseg;
ahd_lock(ahd, &flags);
@@ -1494,18 +1484,17 @@ ahd_linux_run_command(struct ahd_softc *ahd, struct ahd_linux_device *dev,
ahd_set_residual(scb, 0);
ahd_set_sense_residual(scb, 0);
scb->sg_count = 0;
- if (cmd->use_sg != 0) {
- void *sg;
- struct scatterlist *cur_seg;
- u_int nseg;
- int dir;
-
- cur_seg = (struct scatterlist *)cmd->request_buffer;
- dir = cmd->sc_data_direction;
- nseg = pci_map_sg(ahd->dev_softc, cur_seg,
- cmd->use_sg, dir);
+
+ nseg = scsi_dma_map(cmd);
+ BUG_ON(nseg < 0);
+ if (nseg > 0) {
+ void *sg = scb->sg_list;
+ struct scatterlist *cur_seg;
+ int i;
+
scb->platform_data->xfer_len = 0;
- for (sg = scb->sg_list; nseg > 0; nseg--, cur_seg++) {
+
+ scsi_for_each_sg(cmd, cur_seg, nseg, i) {
dma_addr_t addr;
bus_size_t len;
@@ -1513,22 +1502,8 @@ ahd_linux_run_command(struct ahd_softc *ahd, struct ahd_linux_device *dev,
len = sg_dma_len(cur_seg);
scb->platform_data->xfer_len += len;
sg = ahd_sg_setup(ahd, scb, sg, addr, len,
- /*last*/nseg == 1);
+ i == (nseg - 1));
}
- } else if (cmd->request_bufflen != 0) {
- void *sg;
- dma_addr_t addr;
- int dir;
-
- sg = scb->sg_list;
- dir = cmd->sc_data_direction;
- addr = pci_map_single(ahd->dev_softc,
- cmd->request_buffer,
- cmd->request_bufflen, dir);
- scb->platform_data->xfer_len = cmd->request_bufflen;
- scb->platform_data->buf_busaddr = addr;
- sg = ahd_sg_setup(ahd, scb, sg, addr,
- cmd->request_bufflen, /*last*/TRUE);
}
LIST_INSERT_HEAD(&ahd->pending_scbs, scb, pending_links);
diff --git a/drivers/scsi/aic7xxx/aic79xx_osm.h b/drivers/scsi/aic7xxx/aic79xx_osm.h
index ad9761b237d..853998be147 100644
--- a/drivers/scsi/aic7xxx/aic79xx_osm.h
+++ b/drivers/scsi/aic7xxx/aic79xx_osm.h
@@ -781,7 +781,7 @@ int ahd_get_transfer_dir(struct scb *scb)
static __inline
void ahd_set_residual(struct scb *scb, u_long resid)
{
- scb->io_ctx->resid = resid;
+ scsi_set_resid(scb->io_ctx, resid);
}
static __inline
@@ -793,7 +793,7 @@ void ahd_set_sense_residual(struct scb *scb, u_long resid)
static __inline
u_long ahd_get_residual(struct scb *scb)
{
- return (scb->io_ctx->resid);
+ return scsi_get_resid(scb->io_ctx);
}
static __inline
diff --git a/drivers/scsi/aic7xxx/aic7xxx_osm.c b/drivers/scsi/aic7xxx/aic7xxx_osm.c
index 660f26e23a3..1803ab6fc21 100644
--- a/drivers/scsi/aic7xxx/aic7xxx_osm.c
+++ b/drivers/scsi/aic7xxx/aic7xxx_osm.c
@@ -402,18 +402,8 @@ ahc_linux_unmap_scb(struct ahc_softc *ahc, struct scb *scb)
cmd = scb->io_ctx;
ahc_sync_sglist(ahc, scb, BUS_DMASYNC_POSTWRITE);
- if (cmd->use_sg != 0) {
- struct scatterlist *sg;
-
- sg = (struct scatterlist *)cmd->request_buffer;
- pci_unmap_sg(ahc->dev_softc, sg, cmd->use_sg,
- cmd->sc_data_direction);
- } else if (cmd->request_bufflen != 0) {
- pci_unmap_single(ahc->dev_softc,
- scb->platform_data->buf_busaddr,
- cmd->request_bufflen,
- cmd->sc_data_direction);
- }
+
+ scsi_dma_unmap(cmd);
}
static __inline int
@@ -1381,6 +1371,7 @@ ahc_linux_run_command(struct ahc_softc *ahc, struct ahc_linux_device *dev,
struct ahc_tmode_tstate *tstate;
uint16_t mask;
struct scb_tailq *untagged_q = NULL;
+ int nseg;
/*
* Schedule us to run later. The only reason we are not
@@ -1472,23 +1463,21 @@ ahc_linux_run_command(struct ahc_softc *ahc, struct ahc_linux_device *dev,
ahc_set_residual(scb, 0);
ahc_set_sense_residual(scb, 0);
scb->sg_count = 0;
- if (cmd->use_sg != 0) {
+
+ nseg = scsi_dma_map(cmd);
+ BUG_ON(nseg < 0);
+ if (nseg > 0) {
struct ahc_dma_seg *sg;
struct scatterlist *cur_seg;
- struct scatterlist *end_seg;
- int nseg;
+ int i;
- cur_seg = (struct scatterlist *)cmd->request_buffer;
- nseg = pci_map_sg(ahc->dev_softc, cur_seg, cmd->use_sg,
- cmd->sc_data_direction);
- end_seg = cur_seg + nseg;
/* Copy the segments into the SG list. */
sg = scb->sg_list;
/*
* The sg_count may be larger than nseg if
* a transfer crosses a 32bit page.
- */
- while (cur_seg < end_seg) {
+ */
+ scsi_for_each_sg(cmd, cur_seg, nseg, i) {
dma_addr_t addr;
bus_size_t len;
int consumed;
@@ -1499,7 +1488,6 @@ ahc_linux_run_command(struct ahc_softc *ahc, struct ahc_linux_device *dev,
sg, addr, len);
sg += consumed;
scb->sg_count += consumed;
- cur_seg++;
}
sg--;
sg->len |= ahc_htole32(AHC_DMA_LAST_SEG);
@@ -1516,33 +1504,6 @@ ahc_linux_run_command(struct ahc_softc *ahc, struct ahc_linux_device *dev,
*/
scb->hscb->dataptr = scb->sg_list->addr;
scb->hscb->datacnt = scb->sg_list->len;
- } else if (cmd->request_bufflen != 0) {
- struct ahc_dma_seg *sg;
- dma_addr_t addr;
-
- sg = scb->sg_list;
- addr = pci_map_single(ahc->dev_softc,
- cmd->request_buffer,
- cmd->request_bufflen,
- cmd->sc_data_direction);
- scb->platform_data->buf_busaddr = addr;
- scb->sg_count = ahc_linux_map_seg(ahc, scb,
- sg, addr,
- cmd->request_bufflen);
- sg->len |= ahc_htole32(AHC_DMA_LAST_SEG);
-
- /*
- * Reset the sg list pointer.
- */
- scb->hscb->sgptr =
- ahc_htole32(scb->sg_list_phys | SG_FULL_RESID);
-
- /*
- * Copy the first SG into the "current"
- * data pointer area.
- */
- scb->hscb->dataptr = sg->addr;
- scb->hscb->datacnt = sg->len;
} else {
scb->hscb->sgptr = ahc_htole32(SG_LIST_NULL);
scb->hscb->dataptr = 0;
diff --git a/drivers/scsi/aic7xxx/aic7xxx_osm.h b/drivers/scsi/aic7xxx/aic7xxx_osm.h
index 8fee7edc6eb..b48dab447bd 100644
--- a/drivers/scsi/aic7xxx/aic7xxx_osm.h
+++ b/drivers/scsi/aic7xxx/aic7xxx_osm.h
@@ -751,7 +751,7 @@ int ahc_get_transfer_dir(struct scb *scb)
static __inline
void ahc_set_residual(struct scb *scb, u_long resid)
{
- scb->io_ctx->resid = resid;
+ scsi_set_resid(scb->io_ctx, resid);
}
static __inline
@@ -763,7 +763,7 @@ void ahc_set_sense_residual(struct scb *scb, u_long resid)
static __inline
u_long ahc_get_residual(struct scb *scb)
{
- return (scb->io_ctx->resid);
+ return scsi_get_resid(scb->io_ctx);
}
static __inline
diff --git a/drivers/scsi/aic7xxx_old.c b/drivers/scsi/aic7xxx_old.c
index a988d5abf70..4998bb850c4 100644
--- a/drivers/scsi/aic7xxx_old.c
+++ b/drivers/scsi/aic7xxx_old.c
@@ -2690,17 +2690,8 @@ aic7xxx_done(struct aic7xxx_host *p, struct aic7xxx_scb *scb)
struct aic7xxx_scb *scbp;
unsigned char queue_depth;
- if (cmd->use_sg > 1)
- {
- struct scatterlist *sg;
+ scsi_dma_unmap(cmd);
- sg = (struct scatterlist *)cmd->request_buffer;
- pci_unmap_sg(p->pdev, sg, cmd->use_sg, cmd->sc_data_direction);
- }
- else if (cmd->request_bufflen)
- pci_unmap_single(p->pdev, aic7xxx_mapping(cmd),
- cmd->request_bufflen,
- cmd->sc_data_direction);
if (scb->flags & SCB_SENSE)
{
pci_unmap_single(p->pdev,
@@ -3869,7 +3860,7 @@ aic7xxx_calculate_residual (struct aic7xxx_host *p, struct aic7xxx_scb *scb)
* the mid layer didn't check residual data counts to see if the
* command needs retried.
*/
- cmd->resid = scb->sg_length - actual;
+ scsi_set_resid(cmd, scb->sg_length - actual);
aic7xxx_status(cmd) = hscb->target_status;
}
}
@@ -6581,7 +6572,7 @@ aic7xxx_slave_alloc(struct scsi_device *SDptr)
struct aic7xxx_host *p = (struct aic7xxx_host *)SDptr->host->hostdata;
struct aic_dev_data *aic_dev;
- aic_dev = kmalloc(sizeof(struct aic_dev_data), GFP_ATOMIC | GFP_KERNEL);
+ aic_dev = kmalloc(sizeof(struct aic_dev_data), GFP_KERNEL);
if(!aic_dev)
return 1;
/*
@@ -10137,6 +10128,7 @@ static void aic7xxx_buildscb(struct aic7xxx_host *p, struct scsi_cmnd *cmd,
struct scsi_device *sdptr = cmd->device;
unsigned char tindex = TARGET_INDEX(cmd);
struct request *req = cmd->request;
+ int use_sg;
mask = (0x01 << tindex);
hscb = scb->hscb;
@@ -10209,8 +10201,10 @@ static void aic7xxx_buildscb(struct aic7xxx_host *p, struct scsi_cmnd *cmd,
memcpy(scb->cmnd, cmd->cmnd, cmd->cmd_len);
hscb->SCSI_cmd_pointer = cpu_to_le32(SCB_DMA_ADDR(scb, scb->cmnd));
- if (cmd->use_sg)
- {
+ use_sg = scsi_dma_map(cmd);
+ BUG_ON(use_sg < 0);
+
+ if (use_sg) {
struct scatterlist *sg; /* Must be mid-level SCSI code scatterlist */
/*
@@ -10219,11 +10213,11 @@ static void aic7xxx_buildscb(struct aic7xxx_host *p, struct scsi_cmnd *cmd,
* differences and the kernel SG list uses virtual addresses where
* we need physical addresses.
*/
- int i, use_sg;
+ int i;
- sg = (struct scatterlist *)cmd->request_buffer;
scb->sg_length = 0;
- use_sg = pci_map_sg(p->pdev, sg, cmd->use_sg, cmd->sc_data_direction);
+
+
/*
* Copy the segments into the SG array. NOTE!!! - We used to
* have the first entry both in the data_pointer area and the first
@@ -10231,10 +10225,9 @@ static void aic7xxx_buildscb(struct aic7xxx_host *p, struct scsi_cmnd *cmd,
* entry in both places, but now we download the address of
* scb->sg_list[1] instead of 0 to the sg pointer in the hscb.
*/
- for (i = 0; i < use_sg; i++)
- {
- unsigned int len = sg_dma_len(sg+i);
- scb->sg_list[i].address = cpu_to_le32(sg_dma_address(sg+i));
+ scsi_for_each_sg(cmd, sg, use_sg, i) {
+ unsigned int len = sg_dma_len(sg);
+ scb->sg_list[i].address = cpu_to_le32(sg_dma_address(sg));
scb->sg_list[i].length = cpu_to_le32(len);
scb->sg_length += len;
}
@@ -10244,33 +10237,13 @@ static void aic7xxx_buildscb(struct aic7xxx_host *p, struct scsi_cmnd *cmd,
scb->sg_count = i;
hscb->SG_segment_count = i;
hscb->SG_list_pointer = cpu_to_le32(SCB_DMA_ADDR(scb, &scb->sg_list[1]));
- }
- else
- {
- if (cmd->request_bufflen)
- {
- unsigned int address = pci_map_single(p->pdev, cmd->request_buffer,
- cmd->request_bufflen,
- cmd->sc_data_direction);
- aic7xxx_mapping(cmd) = address;
- scb->sg_list[0].address = cpu_to_le32(address);
- scb->sg_list[0].length = cpu_to_le32(cmd->request_bufflen);
- scb->sg_count = 1;
- scb->sg_length = cmd->request_bufflen;
- hscb->SG_segment_count = 1;
- hscb->SG_list_pointer = cpu_to_le32(SCB_DMA_ADDR(scb, &scb->sg_list[0]));
- hscb->data_count = scb->sg_list[0].length;
- hscb->data_pointer = scb->sg_list[0].address;
- }
- else
- {
+ } else {
scb->sg_count = 0;
scb->sg_length = 0;
hscb->SG_segment_count = 0;
hscb->SG_list_pointer = 0;
hscb->data_count = 0;
hscb->data_pointer = 0;
- }
}
}
diff --git a/drivers/scsi/amiga7xx.c b/drivers/scsi/amiga7xx.c
deleted file mode 100644
index d5d3c4d5a25..00000000000
--- a/drivers/scsi/amiga7xx.c
+++ /dev/null
@@ -1,138 +0,0 @@
-/*
- * Detection routine for the NCR53c710 based Amiga SCSI Controllers for Linux.
- * Amiga MacroSystemUS WarpEngine SCSI controller.
- * Amiga Technologies A4000T SCSI controller.
- * Amiga Technologies/DKB A4091 SCSI controller.
- *
- * Written 1997 by Alan Hourihane <alanh@fairlite.demon.co.uk>
- * plus modifications of the 53c7xx.c driver to support the Amiga.
- */
-#include <linux/types.h>
-#include <linux/mm.h>
-#include <linux/blkdev.h>
-#include <linux/zorro.h>
-#include <linux/stat.h>
-
-#include <asm/setup.h>
-#include <asm/page.h>
-#include <asm/pgtable.h>
-#include <asm/amigaints.h>
-#include <asm/amigahw.h>
-#include <asm/dma.h>
-#include <asm/irq.h>
-
-#include "scsi.h"
-#include <scsi/scsi_host.h>
-#include "53c7xx.h"
-#include "amiga7xx.h"
-
-
-static int amiga7xx_register_one(struct scsi_host_template *tpnt,
- unsigned long address)
-{
- long long options;
- int clock;
-
- if (!request_mem_region(address, 0x1000, "ncr53c710"))
- return 0;
-
- address = (unsigned long)z_ioremap(address, 0x1000);
- options = OPTION_MEMORY_MAPPED | OPTION_DEBUG_TEST1 | OPTION_INTFLY |
- OPTION_SYNCHRONOUS | OPTION_ALWAYS_SYNCHRONOUS |
- OPTION_DISCONNECT;
- clock = 50000000; /* 50 MHz SCSI Clock */
- ncr53c7xx_init(tpnt, 0, 710, address, 0, IRQ_AMIGA_PORTS, DMA_NONE,
- options, clock);
- return 1;
-}
-
-
-#ifdef CONFIG_ZORRO
-
-static struct {
- zorro_id id;
- unsigned long offset;
- int absolute; /* offset is absolute address */
-} amiga7xx_table[] = {
- { .id = ZORRO_PROD_PHASE5_BLIZZARD_603E_PLUS, .offset = 0xf40000,
- .absolute = 1 },
- { .id = ZORRO_PROD_MACROSYSTEMS_WARP_ENGINE_40xx, .offset = 0x40000 },
- { .id = ZORRO_PROD_CBM_A4091_1, .offset = 0x800000 },
- { .id = ZORRO_PROD_CBM_A4091_2, .offset = 0x800000 },
- { .id = ZORRO_PROD_GVP_GFORCE_040_060, .offset = 0x40000 },
- { 0 }
-};
-
-static int __init amiga7xx_zorro_detect(struct scsi_host_template *tpnt)
-{
- int num = 0, i;
- struct zorro_dev *z = NULL;
- unsigned long address;
-
- while ((z = zorro_find_device(ZORRO_WILDCARD, z))) {
- for (i = 0; amiga7xx_table[i].id; i++)
- if (z->id == amiga7xx_table[i].id)
- break;
- if (!amiga7xx_table[i].id)
- continue;
- if (amiga7xx_table[i].absolute)
- address = amiga7xx_table[i].offset;
- else
- address = z->resource.start + amiga7xx_table[i].offset;
- num += amiga7xx_register_one(tpnt, address);
- }
- return num;
-}
-
-#endif /* CONFIG_ZORRO */
-
-
-int __init amiga7xx_detect(struct scsi_host_template *tpnt)
-{
- static unsigned char called = 0;
- int num = 0;
-
- if (called || !MACH_IS_AMIGA)
- return 0;
-
- tpnt->proc_name = "Amiga7xx";
-
- if (AMIGAHW_PRESENT(A4000_SCSI))
- num += amiga7xx_register_one(tpnt, 0xdd0040);
-
-#ifdef CONFIG_ZORRO
- num += amiga7xx_zorro_detect(tpnt);
-#endif
-
- called = 1;
- return num;
-}
-
-static int amiga7xx_release(struct Scsi_Host *shost)
-{
- if (shost->irq)
- free_irq(shost->irq, NULL);
- if (shost->dma_channel != 0xff)
- free_dma(shost->dma_channel);
- if (shost->io_port && shost->n_io_port)
- release_region(shost->io_port, shost->n_io_port);
- scsi_unregister(shost);
- return 0;
-}
-
-static struct scsi_host_template driver_template = {
- .name = "Amiga NCR53c710 SCSI",
- .detect = amiga7xx_detect,
- .release = amiga7xx_release,
- .queuecommand = NCR53c7xx_queue_command,
- .abort = NCR53c7xx_abort,
- .reset = NCR53c7xx_reset,
- .can_queue = 24,
- .this_id = 7,
- .sg_tablesize = 63,
- .cmd_per_lun = 3,
- .use_clustering = DISABLE_CLUSTERING
-};
-
-
-#include "scsi_module.c"
diff --git a/drivers/scsi/amiga7xx.h b/drivers/scsi/amiga7xx.h
deleted file mode 100644
index 7cd63a99688..00000000000
--- a/drivers/scsi/amiga7xx.h
+++ /dev/null
@@ -1,23 +0,0 @@
-#ifndef AMIGA7XX_H
-
-#include <linux/types.h>
-
-int amiga7xx_detect(struct scsi_host_template *);
-const char *NCR53c7x0_info(void);
-int NCR53c7xx_queue_command(Scsi_Cmnd *, void (*done)(Scsi_Cmnd *));
-int NCR53c7xx_abort(Scsi_Cmnd *);
-int NCR53c7x0_release (struct Scsi_Host *);
-int NCR53c7xx_reset(Scsi_Cmnd *, unsigned int);
-void NCR53c7x0_intr(int irq, void *dev_id);
-
-#ifndef CMD_PER_LUN
-#define CMD_PER_LUN 3
-#endif
-
-#ifndef CAN_QUEUE
-#define CAN_QUEUE 24
-#endif
-
-#include <scsi/scsicam.h>
-
-#endif /* AMIGA7XX_H */
diff --git a/drivers/scsi/arcmsr/arcmsr.h b/drivers/scsi/arcmsr/arcmsr.h
index aff96db9ccf..f0b8bf4534f 100644
--- a/drivers/scsi/arcmsr/arcmsr.h
+++ b/drivers/scsi/arcmsr/arcmsr.h
@@ -48,9 +48,10 @@ struct class_device_attribute;
#define ARCMSR_MAX_OUTSTANDING_CMD 256
#define ARCMSR_MAX_FREECCB_NUM 288
-#define ARCMSR_DRIVER_VERSION "Driver Version 1.20.00.13"
+#define ARCMSR_DRIVER_VERSION "Driver Version 1.20.00.14"
#define ARCMSR_SCSI_INITIATOR_ID 255
#define ARCMSR_MAX_XFER_SECTORS 512
+#define ARCMSR_MAX_XFER_SECTORS_B 4096
#define ARCMSR_MAX_TARGETID 17
#define ARCMSR_MAX_TARGETLUN 8
#define ARCMSR_MAX_CMD_PERLUN ARCMSR_MAX_OUTSTANDING_CMD
@@ -469,4 +470,3 @@ extern void arcmsr_post_Qbuffer(struct AdapterControlBlock *acb);
extern struct class_device_attribute *arcmsr_host_attrs[];
extern int arcmsr_alloc_sysfs_attr(struct AdapterControlBlock *acb);
void arcmsr_free_sysfs_attr(struct AdapterControlBlock *acb);
-
diff --git a/drivers/scsi/arcmsr/arcmsr_hba.c b/drivers/scsi/arcmsr/arcmsr_hba.c
index 8b46158cc04..0ddfc21e9f7 100644
--- a/drivers/scsi/arcmsr/arcmsr_hba.c
+++ b/drivers/scsi/arcmsr/arcmsr_hba.c
@@ -57,6 +57,7 @@
#include <linux/dma-mapping.h>
#include <linux/timer.h>
#include <linux/pci.h>
+#include <linux/aer.h>
#include <asm/dma.h>
#include <asm/io.h>
#include <asm/system.h>
@@ -71,7 +72,7 @@
#include "arcmsr.h"
MODULE_AUTHOR("Erich Chen <erich@areca.com.tw>");
-MODULE_DESCRIPTION("ARECA (ARC11xx/12xx) SATA RAID HOST Adapter");
+MODULE_DESCRIPTION("ARECA (ARC11xx/12xx/13xx/16xx) SATA/SAS RAID HOST Adapter");
MODULE_LICENSE("Dual BSD/GPL");
MODULE_VERSION(ARCMSR_DRIVER_VERSION);
@@ -93,7 +94,9 @@ static void arcmsr_flush_adapter_cache(struct AdapterControlBlock *acb);
static uint8_t arcmsr_wait_msgint_ready(struct AdapterControlBlock *acb);
static const char *arcmsr_info(struct Scsi_Host *);
static irqreturn_t arcmsr_interrupt(struct AdapterControlBlock *acb);
-
+static pci_ers_result_t arcmsr_pci_error_detected(struct pci_dev *pdev,
+ pci_channel_state_t state);
+static pci_ers_result_t arcmsr_pci_slot_reset(struct pci_dev *pdev);
static int arcmsr_adjust_disk_queue_depth(struct scsi_device *sdev, int queue_depth)
{
if (queue_depth > ARCMSR_MAX_CMD_PERLUN)
@@ -104,7 +107,8 @@ static int arcmsr_adjust_disk_queue_depth(struct scsi_device *sdev, int queue_de
static struct scsi_host_template arcmsr_scsi_host_template = {
.module = THIS_MODULE,
- .name = "ARCMSR ARECA SATA RAID HOST Adapter" ARCMSR_DRIVER_VERSION,
+ .name = "ARCMSR ARECA SATA/SAS RAID HOST Adapter"
+ ARCMSR_DRIVER_VERSION,
.info = arcmsr_info,
.queuecommand = arcmsr_queue_command,
.eh_abort_handler = arcmsr_abort,
@@ -119,6 +123,10 @@ static struct scsi_host_template arcmsr_scsi_host_template = {
.use_clustering = ENABLE_CLUSTERING,
.shost_attrs = arcmsr_host_attrs,
};
+static struct pci_error_handlers arcmsr_pci_error_handlers = {
+ .error_detected = arcmsr_pci_error_detected,
+ .slot_reset = arcmsr_pci_slot_reset,
+};
static struct pci_device_id arcmsr_device_id_table[] = {
{PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1110)},
@@ -144,7 +152,8 @@ static struct pci_driver arcmsr_pci_driver = {
.id_table = arcmsr_device_id_table,
.probe = arcmsr_probe,
.remove = arcmsr_remove,
- .shutdown = arcmsr_shutdown
+ .shutdown = arcmsr_shutdown,
+ .err_handler = &arcmsr_pci_error_handlers,
};
static irqreturn_t arcmsr_do_interrupt(int irq, void *dev_id)
@@ -328,6 +337,8 @@ static int arcmsr_probe(struct pci_dev *pdev,
arcmsr_iop_init(acb);
pci_set_drvdata(pdev, host);
+ if (strncmp(acb->firm_version, "V1.42", 5) >= 0)
+ host->max_sectors= ARCMSR_MAX_XFER_SECTORS_B;
error = scsi_add_host(host, &pdev->dev);
if (error)
@@ -338,6 +349,7 @@ static int arcmsr_probe(struct pci_dev *pdev,
goto out_free_sysfs;
scsi_scan_host(host);
+ pci_enable_pcie_error_reporting(pdev);
return 0;
out_free_sysfs:
out_free_irq:
@@ -369,19 +381,9 @@ static void arcmsr_abort_allcmd(struct AdapterControlBlock *acb)
static void arcmsr_pci_unmap_dma(struct CommandControlBlock *ccb)
{
- struct AdapterControlBlock *acb = ccb->acb;
struct scsi_cmnd *pcmd = ccb->pcmd;
- if (pcmd->use_sg != 0) {
- struct scatterlist *sl;
-
- sl = (struct scatterlist *)pcmd->request_buffer;
- pci_unmap_sg(acb->pdev, sl, pcmd->use_sg, pcmd->sc_data_direction);
- }
- else if (pcmd->request_bufflen != 0)
- pci_unmap_single(acb->pdev,
- pcmd->SCp.dma_handle,
- pcmd->request_bufflen, pcmd->sc_data_direction);
+ scsi_dma_unmap(pcmd);
}
static void arcmsr_ccb_complete(struct CommandControlBlock *ccb, int stand_flag)
@@ -498,7 +500,7 @@ static void arcmsr_enable_outbound_ints(struct AdapterControlBlock *acb,
static void arcmsr_flush_adapter_cache(struct AdapterControlBlock *acb)
{
- struct MessageUnit __iomem *reg=acb->pmu;
+ struct MessageUnit __iomem *reg = acb->pmu;
writel(ARCMSR_INBOUND_MESG0_FLUSH_CACHE, &reg->inbound_msgaddr0);
if (arcmsr_wait_msgint_ready(acb))
@@ -551,6 +553,7 @@ static void arcmsr_build_ccb(struct AdapterControlBlock *acb,
int8_t *psge = (int8_t *)&arcmsr_cdb->u;
uint32_t address_lo, address_hi;
int arccdbsize = 0x30;
+ int nseg;
ccb->pcmd = pcmd;
memset(arcmsr_cdb, 0, sizeof (struct ARCMSR_CDB));
@@ -561,20 +564,20 @@ static void arcmsr_build_ccb(struct AdapterControlBlock *acb,
arcmsr_cdb->CdbLength = (uint8_t)pcmd->cmd_len;
arcmsr_cdb->Context = (unsigned long)arcmsr_cdb;
memcpy(arcmsr_cdb->Cdb, pcmd->cmnd, pcmd->cmd_len);
- if (pcmd->use_sg) {
- int length, sgcount, i, cdb_sgcount = 0;
- struct scatterlist *sl;
-
- /* Get Scatter Gather List from scsiport. */
- sl = (struct scatterlist *) pcmd->request_buffer;
- sgcount = pci_map_sg(acb->pdev, sl, pcmd->use_sg,
- pcmd->sc_data_direction);
+
+ nseg = scsi_dma_map(pcmd);
+ BUG_ON(nseg < 0);
+
+ if (nseg) {
+ int length, i, cdb_sgcount = 0;
+ struct scatterlist *sg;
+
/* map stor port SG list to our iop SG List. */
- for (i = 0; i < sgcount; i++) {
+ scsi_for_each_sg(pcmd, sg, nseg, i) {
/* Get the physical address of the current data pointer */
- length = cpu_to_le32(sg_dma_len(sl));
- address_lo = cpu_to_le32(dma_addr_lo32(sg_dma_address(sl)));
- address_hi = cpu_to_le32(dma_addr_hi32(sg_dma_address(sl)));
+ length = cpu_to_le32(sg_dma_len(sg));
+ address_lo = cpu_to_le32(dma_addr_lo32(sg_dma_address(sg)));
+ address_hi = cpu_to_le32(dma_addr_hi32(sg_dma_address(sg)));
if (address_hi == 0) {
struct SG32ENTRY *pdma_sg = (struct SG32ENTRY *)psge;
@@ -591,32 +594,12 @@ static void arcmsr_build_ccb(struct AdapterControlBlock *acb,
psge += sizeof (struct SG64ENTRY);
arccdbsize += sizeof (struct SG64ENTRY);
}
- sl++;
cdb_sgcount++;
}
arcmsr_cdb->sgcount = (uint8_t)cdb_sgcount;
- arcmsr_cdb->DataLength = pcmd->request_bufflen;
+ arcmsr_cdb->DataLength = scsi_bufflen(pcmd);
if ( arccdbsize > 256)
arcmsr_cdb->Flags |= ARCMSR_CDB_FLAG_SGL_BSIZE;
- } else if (pcmd->request_bufflen) {
- dma_addr_t dma_addr;
- dma_addr = pci_map_single(acb->pdev, pcmd->request_buffer,
- pcmd->request_bufflen, pcmd->sc_data_direction);
- pcmd->SCp.dma_handle = dma_addr;
- address_lo = cpu_to_le32(dma_addr_lo32(dma_addr));
- address_hi = cpu_to_le32(dma_addr_hi32(dma_addr));
- if (address_hi == 0) {
- struct SG32ENTRY *pdma_sg = (struct SG32ENTRY *)psge;
- pdma_sg->address = address_lo;
- pdma_sg->length = pcmd->request_bufflen;
- } else {
- struct SG64ENTRY *pdma_sg = (struct SG64ENTRY *)psge;
- pdma_sg->addresshigh = address_hi;
- pdma_sg->address = address_lo;
- pdma_sg->length = pcmd->request_bufflen|IS_SG64_ADDR;
- }
- arcmsr_cdb->sgcount = 1;
- arcmsr_cdb->DataLength = pcmd->request_bufflen;
}
if (pcmd->sc_data_direction == DMA_TO_DEVICE ) {
arcmsr_cdb->Flags |= ARCMSR_CDB_FLAG_WRITE;
@@ -747,7 +730,7 @@ static irqreturn_t arcmsr_interrupt(struct AdapterControlBlock *acb)
int id, lun;
/*
****************************************************************
- ** areca cdb command done
+ ** areca cdb command done
****************************************************************
*/
while (1) {
@@ -758,20 +741,20 @@ static irqreturn_t arcmsr_interrupt(struct AdapterControlBlock *acb)
(flag_ccb << 5));
if ((ccb->acb != acb) || (ccb->startdone != ARCMSR_CCB_START)) {
if (ccb->startdone == ARCMSR_CCB_ABORTED) {
- struct scsi_cmnd *abortcmd=ccb->pcmd;
+ struct scsi_cmnd *abortcmd = ccb->pcmd;
if (abortcmd) {
abortcmd->result |= DID_ABORT >> 16;
arcmsr_ccb_complete(ccb, 1);
printk(KERN_NOTICE
- "arcmsr%d: ccb='0x%p' isr got aborted command \n"
+ "arcmsr%d: ccb ='0x%p' isr got aborted command \n"
, acb->host->host_no, ccb);
}
continue;
}
printk(KERN_NOTICE
- "arcmsr%d: isr get an illegal ccb command done acb='0x%p'"
- "ccb='0x%p' ccbacb='0x%p' startdone = 0x%x"
- " ccboutstandingcount=%d \n"
+ "arcmsr%d: isr get an illegal ccb command done acb = '0x%p'"
+ "ccb = '0x%p' ccbacb = '0x%p' startdone = 0x%x"
+ " ccboutstandingcount = %d \n"
, acb->host->host_no
, acb
, ccb
@@ -791,7 +774,7 @@ static irqreturn_t arcmsr_interrupt(struct AdapterControlBlock *acb)
switch(ccb->arcmsr_cdb.DeviceStatus) {
case ARCMSR_DEV_SELECT_TIMEOUT: {
acb->devstate[id][lun] = ARECA_RAID_GONE;
- ccb->pcmd->result = DID_TIME_OUT << 16;
+ ccb->pcmd->result = DID_NO_CONNECT << 16;
arcmsr_ccb_complete(ccb, 1);
}
break;
@@ -810,8 +793,8 @@ static irqreturn_t arcmsr_interrupt(struct AdapterControlBlock *acb)
break;
default:
printk(KERN_NOTICE
- "arcmsr%d: scsi id=%d lun=%d"
- " isr get command error done,"
+ "arcmsr%d: scsi id = %d lun = %d"
+ " isr get command error done, "
"but got unknown DeviceStatus = 0x%x \n"
, acb->host->host_no
, id
@@ -848,24 +831,21 @@ static int arcmsr_iop_message_xfer(struct AdapterControlBlock *acb, struct scsi_
struct CMD_MESSAGE_FIELD *pcmdmessagefld;
int retvalue = 0, transfer_len = 0;
char *buffer;
+ struct scatterlist *sg;
uint32_t controlcode = (uint32_t ) cmd->cmnd[5] << 24 |
(uint32_t ) cmd->cmnd[6] << 16 |
(uint32_t ) cmd->cmnd[7] << 8 |
(uint32_t ) cmd->cmnd[8];
/* 4 bytes: Areca io control code */
- if (cmd->use_sg) {
- struct scatterlist *sg = (struct scatterlist *)cmd->request_buffer;
- buffer = kmap_atomic(sg->page, KM_IRQ0) + sg->offset;
- if (cmd->use_sg > 1) {
- retvalue = ARCMSR_MESSAGE_FAIL;
- goto message_out;
- }
- transfer_len += sg->length;
- } else {
- buffer = cmd->request_buffer;
- transfer_len = cmd->request_bufflen;
+ sg = scsi_sglist(cmd);
+ buffer = kmap_atomic(sg->page, KM_IRQ0) + sg->offset;
+ if (scsi_sg_count(cmd) > 1) {
+ retvalue = ARCMSR_MESSAGE_FAIL;
+ goto message_out;
}
+ transfer_len += sg->length;
+
if (transfer_len > sizeof(struct CMD_MESSAGE_FIELD)) {
retvalue = ARCMSR_MESSAGE_FAIL;
goto message_out;
@@ -1057,12 +1037,9 @@ static int arcmsr_iop_message_xfer(struct AdapterControlBlock *acb, struct scsi_
retvalue = ARCMSR_MESSAGE_FAIL;
}
message_out:
- if (cmd->use_sg) {
- struct scatterlist *sg;
+ sg = scsi_sglist(cmd);
+ kunmap_atomic(buffer - sg->offset, KM_IRQ0);
- sg = (struct scatterlist *) cmd->request_buffer;
- kunmap_atomic(buffer - sg->offset, KM_IRQ0);
- }
return retvalue;
}
@@ -1085,6 +1062,7 @@ static void arcmsr_handle_virtual_command(struct AdapterControlBlock *acb,
case INQUIRY: {
unsigned char inqdata[36];
char *buffer;
+ struct scatterlist *sg;
if (cmd->device->lun) {
cmd->result = (DID_TIME_OUT << 16);
@@ -1096,7 +1074,7 @@ static void arcmsr_handle_virtual_command(struct AdapterControlBlock *acb,
inqdata[1] = 0;
/* rem media bit & Dev Type Modifier */
inqdata[2] = 0;
- /* ISO,ECMA,& ANSI versions */
+ /* ISO, ECMA, & ANSI versions */
inqdata[4] = 31;
/* length of additional data */
strncpy(&inqdata[8], "Areca ", 8);
@@ -1104,21 +1082,14 @@ static void arcmsr_handle_virtual_command(struct AdapterControlBlock *acb,
strncpy(&inqdata[16], "RAID controller ", 16);
/* Product Identification */
strncpy(&inqdata[32], "R001", 4); /* Product Revision */
- if (cmd->use_sg) {
- struct scatterlist *sg;
- sg = (struct scatterlist *) cmd->request_buffer;
- buffer = kmap_atomic(sg->page, KM_IRQ0) + sg->offset;
- } else {
- buffer = cmd->request_buffer;
- }
+ sg = scsi_sglist(cmd);
+ buffer = kmap_atomic(sg->page, KM_IRQ0) + sg->offset;
+
memcpy(buffer, inqdata, sizeof(inqdata));
- if (cmd->use_sg) {
- struct scatterlist *sg;
+ sg = scsi_sglist(cmd);
+ kunmap_atomic(buffer - sg->offset, KM_IRQ0);
- sg = (struct scatterlist *) cmd->request_buffer;
- kunmap_atomic(buffer - sg->offset, KM_IRQ0);
- }
cmd->scsi_done(cmd);
}
break;
@@ -1153,7 +1124,7 @@ static int arcmsr_queue_command(struct scsi_cmnd *cmd,
, acb->host->host_no);
return SCSI_MLQUEUE_HOST_BUSY;
}
- if(target == 16) {
+ if (target == 16) {
/* virtual device for iop message transfer */
arcmsr_handle_virtual_command(acb, cmd);
return 0;
@@ -1166,7 +1137,7 @@ static int arcmsr_queue_command(struct scsi_cmnd *cmd,
printk(KERN_NOTICE
"arcmsr%d: block 'read/write'"
"command with gone raid volume"
- " Cmd=%2x, TargetId=%d, Lun=%d \n"
+ " Cmd = %2x, TargetId = %d, Lun = %d \n"
, acb->host->host_no
, cmd->cmnd[0]
, target, lun);
@@ -1257,7 +1228,7 @@ static void arcmsr_polling_ccbdone(struct AdapterControlBlock *acb,
if ((ccb->startdone == ARCMSR_CCB_ABORTED) ||
(ccb == poll_ccb)) {
printk(KERN_NOTICE
- "arcmsr%d: scsi id=%d lun=%d ccb='0x%p'"
+ "arcmsr%d: scsi id = %d lun = %d ccb = '0x%p'"
" poll command abort successfully \n"
, acb->host->host_no
, ccb->pcmd->device->id
@@ -1270,8 +1241,8 @@ static void arcmsr_polling_ccbdone(struct AdapterControlBlock *acb,
}
printk(KERN_NOTICE
"arcmsr%d: polling get an illegal ccb"
- " command done ccb='0x%p'"
- "ccboutstandingcount=%d \n"
+ " command done ccb ='0x%p'"
+ "ccboutstandingcount = %d \n"
, acb->host->host_no
, ccb
, atomic_read(&acb->ccboutstandingcount));
@@ -1288,7 +1259,7 @@ static void arcmsr_polling_ccbdone(struct AdapterControlBlock *acb,
switch(ccb->arcmsr_cdb.DeviceStatus) {
case ARCMSR_DEV_SELECT_TIMEOUT: {
acb->devstate[id][lun] = ARECA_RAID_GONE;
- ccb->pcmd->result = DID_TIME_OUT << 16;
+ ccb->pcmd->result = DID_NO_CONNECT << 16;
arcmsr_ccb_complete(ccb, 1);
}
break;
@@ -1307,7 +1278,7 @@ static void arcmsr_polling_ccbdone(struct AdapterControlBlock *acb,
break;
default:
printk(KERN_NOTICE
- "arcmsr%d: scsi id=%d lun=%d"
+ "arcmsr%d: scsi id = %d lun = %d"
" polling and getting command error done"
"but got unknown DeviceStatus = 0x%x \n"
, acb->host->host_no
@@ -1322,6 +1293,94 @@ static void arcmsr_polling_ccbdone(struct AdapterControlBlock *acb,
}
}
}
+static void arcmsr_done4_abort_postqueue(struct AdapterControlBlock *acb)
+{
+ int i = 0, found = 0;
+ int id, lun;
+ uint32_t flag_ccb, outbound_intstatus;
+ struct MessageUnit __iomem *reg = acb->pmu;
+ struct CommandControlBlock *ccb;
+ /*clear and abort all outbound posted Q*/
+
+ while (((flag_ccb = readl(&reg->outbound_queueport)) != 0xFFFFFFFF) &&
+(i++ < 256)){
+ ccb = (struct CommandControlBlock *)(acb->vir2phy_offset +
+(flag_ccb << 5));
+ if (ccb){
+ if ((ccb->acb != acb)||(ccb->startdone != \
+ARCMSR_CCB_START)){
+ printk(KERN_NOTICE "arcmsr%d: polling get \
+an illegal ccb" "command done ccb = '0x%p'""ccboutstandingcount = %d \n",
+ acb->host->host_no, ccb,
+ atomic_read(&acb->ccboutstandingcount));
+ continue;
+ }
+
+ id = ccb->pcmd->device->id;
+ lun = ccb->pcmd->device->lun;
+ if (!(flag_ccb & ARCMSR_CCBREPLY_FLAG_ERROR)){
+ if (acb->devstate[id][lun] == ARECA_RAID_GONE)
+ acb->devstate[id][lun] = ARECA_RAID_GOOD;
+ ccb->pcmd->result = DID_OK << 16;
+ arcmsr_ccb_complete(ccb, 1);
+ }
+ else {
+ switch(ccb->arcmsr_cdb.DeviceStatus) {
+ case ARCMSR_DEV_SELECT_TIMEOUT: {
+ acb->devstate[id][lun] = ARECA_RAID_GONE;
+ ccb->pcmd->result = DID_NO_CONNECT << 16;
+ arcmsr_ccb_complete(ccb, 1);
+ }
+ break;
+
+ case ARCMSR_DEV_ABORTED:
+
+ case ARCMSR_DEV_INIT_FAIL: {
+ acb->devstate[id][lun] =
+ ARECA_RAID_GONE;
+ ccb->pcmd->result =
+ DID_BAD_TARGET << 16;
+ arcmsr_ccb_complete(ccb, 1);
+ }
+ break;
+
+ case ARCMSR_DEV_CHECK_CONDITION: {
+ acb->devstate[id][lun] =
+ ARECA_RAID_GOOD;
+ arcmsr_report_sense_info(ccb);
+ arcmsr_ccb_complete(ccb, 1);
+ }
+ break;
+
+ default:
+ printk(KERN_NOTICE
+ "arcmsr%d: scsi id = %d \
+ lun = %d""polling and \
+ getting command error \
+ done""but got unknown \
+ DeviceStatus = 0x%x \n",
+ acb->host->host_no, id,
+ lun, ccb->arcmsr_cdb.DeviceStatus);
+ acb->devstate[id][lun] =
+ ARECA_RAID_GONE;
+ ccb->pcmd->result =
+ DID_BAD_TARGET << 16;
+ arcmsr_ccb_complete(ccb, 1);
+ break;
+ }
+ }
+ found = 1;
+ }
+ }
+ if (found){
+ outbound_intstatus = readl(&reg->outbound_intstatus) & \
+ acb->outbound_int_enable;
+ writel(outbound_intstatus, &reg->outbound_intstatus);
+ /*clear interrupt*/
+ }
+ return;
+}
+
static void arcmsr_iop_init(struct AdapterControlBlock *acb)
{
@@ -1355,7 +1414,6 @@ static void arcmsr_iop_init(struct AdapterControlBlock *acb)
static void arcmsr_iop_reset(struct AdapterControlBlock *acb)
{
- struct MessageUnit __iomem *reg = acb->pmu;
struct CommandControlBlock *ccb;
uint32_t intmask_org;
int i = 0;
@@ -1368,21 +1426,17 @@ static void arcmsr_iop_reset(struct AdapterControlBlock *acb)
/* disable all outbound interrupt */
intmask_org = arcmsr_disable_outbound_ints(acb);
/* clear all outbound posted Q */
- for (i = 0; i < ARCMSR_MAX_OUTSTANDING_CMD; i++)
- readl(&reg->outbound_queueport);
+ arcmsr_done4_abort_postqueue(acb);
for (i = 0; i < ARCMSR_MAX_FREECCB_NUM; i++) {
ccb = acb->pccb_pool[i];
- if ((ccb->startdone == ARCMSR_CCB_START) ||
- (ccb->startdone == ARCMSR_CCB_ABORTED)) {
+ if (ccb->startdone == ARCMSR_CCB_START) {
ccb->startdone = ARCMSR_CCB_ABORTED;
- ccb->pcmd->result = DID_ABORT << 16;
- arcmsr_ccb_complete(ccb, 1);
}
}
/* enable all outbound interrupt */
arcmsr_enable_outbound_ints(acb, intmask_org);
}
- atomic_set(&acb->ccboutstandingcount, 0);
+
}
static int arcmsr_bus_reset(struct scsi_cmnd *cmd)
@@ -1428,10 +1482,9 @@ static int arcmsr_abort(struct scsi_cmnd *cmd)
int i = 0;
printk(KERN_NOTICE
- "arcmsr%d: abort device command of scsi id=%d lun=%d \n",
+ "arcmsr%d: abort device command of scsi id = %d lun = %d \n",
acb->host->host_no, cmd->device->id, cmd->device->lun);
acb->num_aborts++;
-
/*
************************************************
** the all interrupt service routine is locked
@@ -1486,10 +1539,306 @@ static const char *arcmsr_info(struct Scsi_Host *host)
type = "X-TYPE";
break;
}
- sprintf(buf, "Areca %s Host Adapter RAID Controller%s\n %s",
+ sprintf(buf, "Areca %s Host Adapter RAID Controller%s\n %s",
type, raid6 ? "( RAID6 capable)" : "",
ARCMSR_DRIVER_VERSION);
return buf;
}
+static pci_ers_result_t arcmsr_pci_slot_reset(struct pci_dev *pdev)
+{
+ struct Scsi_Host *host;
+ struct AdapterControlBlock *acb;
+ uint8_t bus, dev_fun;
+ int error;
+
+ error = pci_enable_device(pdev);
+ if (error)
+ return PCI_ERS_RESULT_DISCONNECT;
+ pci_set_master(pdev);
+
+ host = scsi_host_alloc(&arcmsr_scsi_host_template, sizeof \
+(struct AdapterControlBlock));
+ if (!host)
+ return PCI_ERS_RESULT_DISCONNECT;
+ acb = (struct AdapterControlBlock *)host->hostdata;
+ memset(acb, 0, sizeof (struct AdapterControlBlock));
+
+ error = pci_set_dma_mask(pdev, DMA_64BIT_MASK);
+ if (error) {
+ error = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
+ if (error) {
+ printk(KERN_WARNING
+ "scsi%d: No suitable DMA mask available\n",
+ host->host_no);
+ return PCI_ERS_RESULT_DISCONNECT;
+ }
+ }
+ bus = pdev->bus->number;
+ dev_fun = pdev->devfn;
+ acb = (struct AdapterControlBlock *) host->hostdata;
+ memset(acb, 0, sizeof(struct AdapterControlBlock));
+ acb->pdev = pdev;
+ acb->host = host;
+ host->max_sectors = ARCMSR_MAX_XFER_SECTORS;
+ host->max_lun = ARCMSR_MAX_TARGETLUN;
+ host->max_id = ARCMSR_MAX_TARGETID;/*16:8*/
+ host->max_cmd_len = 16; /*this is issue of 64bit LBA, over 2T byte*/
+ host->sg_tablesize = ARCMSR_MAX_SG_ENTRIES;
+ host->can_queue = ARCMSR_MAX_FREECCB_NUM; /* max simultaneous cmds */
+ host->cmd_per_lun = ARCMSR_MAX_CMD_PERLUN;
+ host->this_id = ARCMSR_SCSI_INITIATOR_ID;
+ host->unique_id = (bus << 8) | dev_fun;
+ host->irq = pdev->irq;
+ error = pci_request_regions(pdev, "arcmsr");
+ if (error)
+ return PCI_ERS_RESULT_DISCONNECT;
+
+ acb->pmu = ioremap(pci_resource_start(pdev, 0),
+ pci_resource_len(pdev, 0));
+ if (!acb->pmu) {
+ printk(KERN_NOTICE "arcmsr%d: memory"
+ " mapping region fail \n", acb->host->host_no);
+ return PCI_ERS_RESULT_DISCONNECT;
+ }
+ acb->acb_flags |= (ACB_F_MESSAGE_WQBUFFER_CLEARED |
+ ACB_F_MESSAGE_RQBUFFER_CLEARED |
+ ACB_F_MESSAGE_WQBUFFER_READED);
+ acb->acb_flags &= ~ACB_F_SCSISTOPADAPTER;
+ INIT_LIST_HEAD(&acb->ccb_free_list);
+
+ error = arcmsr_alloc_ccb_pool(acb);
+ if (error)
+ return PCI_ERS_RESULT_DISCONNECT;
+
+ error = request_irq(pdev->irq, arcmsr_do_interrupt,
+ IRQF_DISABLED | IRQF_SHARED, "arcmsr", acb);
+ if (error)
+ return PCI_ERS_RESULT_DISCONNECT;
+
+ arcmsr_iop_init(acb);
+ if (strncmp(acb->firm_version, "V1.42", 5) >= 0)
+ host->max_sectors = ARCMSR_MAX_XFER_SECTORS_B;
+
+ pci_set_drvdata(pdev, host);
+
+ error = scsi_add_host(host, &pdev->dev);
+ if (error)
+ return PCI_ERS_RESULT_DISCONNECT;
+
+ error = arcmsr_alloc_sysfs_attr(acb);
+ if (error)
+ return PCI_ERS_RESULT_DISCONNECT;
+
+ scsi_scan_host(host);
+ return PCI_ERS_RESULT_RECOVERED;
+}
+
+static void arcmsr_pci_ers_need_reset_forepart(struct pci_dev *pdev)
+{
+ struct Scsi_Host *host = pci_get_drvdata(pdev);
+ struct AdapterControlBlock *acb = (struct AdapterControlBlock *) host->hostdata;
+ struct MessageUnit __iomem *reg = acb->pmu;
+ struct CommandControlBlock *ccb;
+ /*clear and abort all outbound posted Q*/
+ int i = 0, found = 0;
+ int id, lun;
+ uint32_t flag_ccb, outbound_intstatus;
+
+ while (((flag_ccb = readl(&reg->outbound_queueport)) != 0xFFFFFFFF) &&
+ (i++ < 256)){
+ ccb = (struct CommandControlBlock *)(acb->vir2phy_offset
+ + (flag_ccb << 5));
+ if (ccb){
+ if ((ccb->acb != acb)||(ccb->startdone !=
+ ARCMSR_CCB_START)){
+ printk(KERN_NOTICE "arcmsr%d: polling \
+ get an illegal ccb"" command done ccb = '0x%p'"
+ "ccboutstandingcount = %d \n",
+ acb->host->host_no, ccb,
+ atomic_read(&acb->ccboutstandingcount));
+ continue;
+ }
+ id = ccb->pcmd->device->id;
+ lun = ccb->pcmd->device->lun;
+ if (!(flag_ccb & ARCMSR_CCBREPLY_FLAG_ERROR)) {
+ if (acb->devstate[id][lun] ==
+ ARECA_RAID_GONE)
+ acb->devstate[id][lun] =
+ ARECA_RAID_GOOD;
+ ccb->pcmd->result = DID_OK << 16;
+ arcmsr_ccb_complete(ccb, 1);
+ }
+ else {
+ switch(ccb->arcmsr_cdb.DeviceStatus) {
+ case ARCMSR_DEV_SELECT_TIMEOUT: {
+ acb->devstate[id][lun] =
+ ARECA_RAID_GONE;
+ ccb->pcmd->result =
+ DID_NO_CONNECT << 16;
+ arcmsr_ccb_complete(ccb, 1);
+ }
+ break;
+
+ case ARCMSR_DEV_ABORTED:
+
+ case ARCMSR_DEV_INIT_FAIL: {
+ acb->devstate[id][lun] =
+ ARECA_RAID_GONE;
+ ccb->pcmd->result =
+ DID_BAD_TARGET << 16;
+ arcmsr_ccb_complete(ccb, 1);
+ }
+ break;
+
+ case ARCMSR_DEV_CHECK_CONDITION: {
+ acb->devstate[id][lun] =
+ ARECA_RAID_GOOD;
+ arcmsr_report_sense_info(ccb);
+ arcmsr_ccb_complete(ccb, 1);
+ }
+ break;
+
+ default:
+ printk(KERN_NOTICE
+ "arcmsr%d: scsi \
+ id = %d lun = %d"
+ " polling and \
+ getting command \
+ error done"
+ "but got unknown \
+ DeviceStatus = 0x%x \n"
+ , acb->host->host_no,
+ id, lun,
+ ccb->arcmsr_cdb.DeviceStatus);
+ acb->devstate[id][lun] =
+ ARECA_RAID_GONE;
+ ccb->pcmd->result =
+ DID_BAD_TARGET << 16;
+ arcmsr_ccb_complete(ccb, 1);
+ break;
+ }
+ }
+ found = 1;
+ }
+ }
+ if (found){
+ outbound_intstatus = readl(&reg->outbound_intstatus) &
+ acb->outbound_int_enable;
+ writel(outbound_intstatus, &reg->outbound_intstatus);
+ /*clear interrupt*/
+ }
+ return;
+}
+
+
+static void arcmsr_pci_ers_disconnect_forepart(struct pci_dev *pdev)
+{
+ struct Scsi_Host *host = pci_get_drvdata(pdev);
+ struct AdapterControlBlock *acb = (struct AdapterControlBlock *) host->hostdata;
+ struct MessageUnit __iomem *reg = acb->pmu;
+ struct CommandControlBlock *ccb;
+ /*clear and abort all outbound posted Q*/
+ int i = 0, found = 0;
+ int id, lun;
+ uint32_t flag_ccb, outbound_intstatus;
+
+ while (((flag_ccb = readl(&reg->outbound_queueport)) != 0xFFFFFFFF) &&
+ (i++ < 256)){
+ ccb = (struct CommandControlBlock *)(acb->vir2phy_offset +
+ (flag_ccb << 5));
+ if (ccb){
+ if ((ccb->acb != acb)||(ccb->startdone !=
+ ARCMSR_CCB_START)){
+ printk(KERN_NOTICE
+ "arcmsr%d: polling get an illegal ccb"
+ " command done ccb = '0x%p'"
+ "ccboutstandingcount = %d \n",
+ acb->host->host_no, ccb,
+ atomic_read(&acb->ccboutstandingcount));
+ continue;
+ }
+
+ id = ccb->pcmd->device->id;
+ lun = ccb->pcmd->device->lun;
+ if (!(flag_ccb & ARCMSR_CCBREPLY_FLAG_ERROR)) {
+ if (acb->devstate[id][lun] == ARECA_RAID_GONE)
+ acb->devstate[id][lun] = ARECA_RAID_GOOD;
+ ccb->pcmd->result = DID_OK << 16;
+ arcmsr_ccb_complete(ccb, 1);
+ }
+ else {
+ switch(ccb->arcmsr_cdb.DeviceStatus) {
+ case ARCMSR_DEV_SELECT_TIMEOUT: {
+ acb->devstate[id][lun] =
+ ARECA_RAID_GONE;
+ ccb->pcmd->result =
+ DID_NO_CONNECT << 16;
+ arcmsr_ccb_complete(ccb, 1);
+ }
+ break;
+
+ case ARCMSR_DEV_ABORTED:
+
+ case ARCMSR_DEV_INIT_FAIL: {
+ acb->devstate[id][lun] =
+ ARECA_RAID_GONE;
+ ccb->pcmd->result =
+ DID_BAD_TARGET << 16;
+ arcmsr_ccb_complete(ccb, 1);
+ }
+ break;
+
+ case ARCMSR_DEV_CHECK_CONDITION: {
+ acb->devstate[id][lun] =
+ ARECA_RAID_GOOD;
+ arcmsr_report_sense_info(ccb);
+ arcmsr_ccb_complete(ccb, 1);
+ }
+ break;
+
+ default:
+ printk(KERN_NOTICE "arcmsr%d: \
+ scsi id = %d lun = %d"
+ " polling and \
+ getting command error done"
+ "but got unknown \
+ DeviceStatus = 0x%x \n"
+ , acb->host->host_no,
+ id, lun, ccb->arcmsr_cdb.DeviceStatus);
+ acb->devstate[id][lun] =
+ ARECA_RAID_GONE;
+ ccb->pcmd->result =
+ DID_BAD_TARGET << 16;
+ arcmsr_ccb_complete(ccb, 1);
+ break;
+ }
+ }
+ found = 1;
+ }
+ }
+ if (found){
+ outbound_intstatus = readl(&reg->outbound_intstatus) &
+ acb->outbound_int_enable;
+ writel(outbound_intstatus, &reg->outbound_intstatus);
+ /*clear interrupt*/
+ }
+ return;
+}
+
+static pci_ers_result_t arcmsr_pci_error_detected(struct pci_dev *pdev,
+ pci_channel_state_t state)
+{
+ switch (state) {
+ case pci_channel_io_frozen:
+ arcmsr_pci_ers_need_reset_forepart(pdev);
+ return PCI_ERS_RESULT_NEED_RESET;
+ case pci_channel_io_perm_failure:
+ arcmsr_pci_ers_disconnect_forepart(pdev);
+ return PCI_ERS_RESULT_DISCONNECT;
+ break;
+ default:
+ return PCI_ERS_RESULT_NEED_RESET;
+ }
+}
diff --git a/drivers/scsi/bvme6000.c b/drivers/scsi/bvme6000.c
deleted file mode 100644
index 599b400a3c4..00000000000
--- a/drivers/scsi/bvme6000.c
+++ /dev/null
@@ -1,76 +0,0 @@
-/*
- * Detection routine for the NCR53c710 based BVME6000 SCSI Controllers for Linux.
- *
- * Based on work by Alan Hourihane
- */
-#include <linux/types.h>
-#include <linux/mm.h>
-#include <linux/blkdev.h>
-#include <linux/zorro.h>
-
-#include <asm/setup.h>
-#include <asm/page.h>
-#include <asm/pgtable.h>
-#include <asm/bvme6000hw.h>
-#include <asm/irq.h>
-
-#include "scsi.h"
-#include <scsi/scsi_host.h>
-#include "53c7xx.h"
-#include "bvme6000.h"
-
-#include<linux/stat.h>
-
-
-int bvme6000_scsi_detect(struct scsi_host_template *tpnt)
-{
- static unsigned char called = 0;
- int clock;
- long long options;
-
- if (called)
- return 0;
- if (!MACH_IS_BVME6000)
- return 0;
-
- tpnt->proc_name = "BVME6000";
-
- options = OPTION_MEMORY_MAPPED|OPTION_DEBUG_TEST1|OPTION_INTFLY|OPTION_SYNCHRONOUS|OPTION_ALWAYS_SYNCHRONOUS|OPTION_DISCONNECT;
-
- clock = 40000000; /* 66MHz SCSI Clock */
-
- ncr53c7xx_init(tpnt, 0, 710, (unsigned long)BVME_NCR53C710_BASE,
- 0, BVME_IRQ_SCSI, DMA_NONE,
- options, clock);
- called = 1;
- return 1;
-}
-
-static int bvme6000_scsi_release(struct Scsi_Host *shost)
-{
- if (shost->irq)
- free_irq(shost->irq, NULL);
- if (shost->dma_channel != 0xff)
- free_dma(shost->dma_channel);
- if (shost->io_port && shost->n_io_port)
- release_region(shost->io_port, shost->n_io_port);
- scsi_unregister(shost);
- return 0;
-}
-
-static struct scsi_host_template driver_template = {
- .name = "BVME6000 NCR53c710 SCSI",
- .detect = bvme6000_scsi_detect,
- .release = bvme6000_scsi_release,
- .queuecommand = NCR53c7xx_queue_command,
- .abort = NCR53c7xx_abort,
- .reset = NCR53c7xx_reset,
- .can_queue = 24,
- .this_id = 7,
- .sg_tablesize = 63,
- .cmd_per_lun = 3,
- .use_clustering = DISABLE_CLUSTERING
-};
-
-
-#include "scsi_module.c"
diff --git a/drivers/scsi/bvme6000.h b/drivers/scsi/bvme6000.h
deleted file mode 100644
index ea3e4b2b922..00000000000
--- a/drivers/scsi/bvme6000.h
+++ /dev/null
@@ -1,24 +0,0 @@
-#ifndef BVME6000_SCSI_H
-#define BVME6000_SCSI_H
-
-#include <linux/types.h>
-
-int bvme6000_scsi_detect(struct scsi_host_template *);
-const char *NCR53c7x0_info(void);
-int NCR53c7xx_queue_command(Scsi_Cmnd *, void (*done)(Scsi_Cmnd *));
-int NCR53c7xx_abort(Scsi_Cmnd *);
-int NCR53c7x0_release (struct Scsi_Host *);
-int NCR53c7xx_reset(Scsi_Cmnd *, unsigned int);
-void NCR53c7x0_intr(int irq, void *dev_id);
-
-#ifndef CMD_PER_LUN
-#define CMD_PER_LUN 3
-#endif
-
-#ifndef CAN_QUEUE
-#define CAN_QUEUE 24
-#endif
-
-#include <scsi/scsicam.h>
-
-#endif /* BVME6000_SCSI_H */
diff --git a/drivers/scsi/bvme6000_scsi.c b/drivers/scsi/bvme6000_scsi.c
new file mode 100644
index 00000000000..012cdea7946
--- /dev/null
+++ b/drivers/scsi/bvme6000_scsi.c
@@ -0,0 +1,135 @@
+/*
+ * Detection routine for the NCR53c710 based BVME6000 SCSI Controllers for Linux.
+ *
+ * Based on work by Alan Hourihane and Kars de Jong
+ *
+ * Rewritten to use 53c700.c by Richard Hirst <richard@sleepie.demon.co.uk>
+ */
+
+#include <linux/module.h>
+#include <linux/blkdev.h>
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <asm/bvme6000hw.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_transport.h>
+#include <scsi/scsi_transport_spi.h>
+
+#include "53c700.h"
+
+MODULE_AUTHOR("Richard Hirst <richard@sleepie.demon.co.uk>");
+MODULE_DESCRIPTION("BVME6000 NCR53C710 driver");
+MODULE_LICENSE("GPL");
+
+static struct scsi_host_template bvme6000_scsi_driver_template = {
+ .name = "BVME6000 NCR53c710 SCSI",
+ .proc_name = "BVME6000",
+ .this_id = 7,
+ .module = THIS_MODULE,
+};
+
+static struct platform_device *bvme6000_scsi_device;
+
+static __devinit int
+bvme6000_probe(struct device *dev)
+{
+ struct Scsi_Host * host = NULL;
+ struct NCR_700_Host_Parameters *hostdata;
+
+ if (!MACH_IS_BVME6000)
+ goto out;
+
+ hostdata = kmalloc(sizeof(struct NCR_700_Host_Parameters), GFP_KERNEL);
+ if (hostdata == NULL) {
+ printk(KERN_ERR "bvme6000-scsi: "
+ "Failed to allocate host data\n");
+ goto out;
+ }
+ memset(hostdata, 0, sizeof(struct NCR_700_Host_Parameters));
+
+ /* Fill in the required pieces of hostdata */
+ hostdata->base = (void __iomem *)BVME_NCR53C710_BASE;
+ hostdata->clock = 40; /* XXX - depends on the CPU clock! */
+ hostdata->chip710 = 1;
+ hostdata->dmode_extra = DMODE_FC2;
+ hostdata->dcntl_extra = EA_710;
+ hostdata->ctest7_extra = CTEST7_TT1;
+
+ /* and register the chip */
+ host = NCR_700_detect(&bvme6000_scsi_driver_template, hostdata, dev);
+ if (!host) {
+ printk(KERN_ERR "bvme6000-scsi: No host detected; "
+ "board configuration problem?\n");
+ goto out_free;
+ }
+ host->base = BVME_NCR53C710_BASE;
+ host->this_id = 7;
+ host->irq = BVME_IRQ_SCSI;
+ if (request_irq(BVME_IRQ_SCSI, NCR_700_intr, 0, "bvme6000-scsi",
+ host)) {
+ printk(KERN_ERR "bvme6000-scsi: request_irq failed\n");
+ goto out_put_host;
+ }
+
+ scsi_scan_host(host);
+
+ return 0;
+
+ out_put_host:
+ scsi_host_put(host);
+ out_free:
+ kfree(hostdata);
+ out:
+ return -ENODEV;
+}
+
+static __devexit int
+bvme6000_device_remove(struct device *dev)
+{
+ struct Scsi_Host *host = dev_to_shost(dev);
+ struct NCR_700_Host_Parameters *hostdata = shost_priv(host);
+
+ scsi_remove_host(host);
+ NCR_700_release(host);
+ kfree(hostdata);
+ free_irq(host->irq, host);
+
+ return 0;
+}
+
+static struct device_driver bvme6000_scsi_driver = {
+ .name = "bvme6000-scsi",
+ .bus = &platform_bus_type,
+ .probe = bvme6000_probe,
+ .remove = __devexit_p(bvme6000_device_remove),
+};
+
+static int __init bvme6000_scsi_init(void)
+{
+ int err;
+
+ err = driver_register(&bvme6000_scsi_driver);
+ if (err)
+ return err;
+
+ bvme6000_scsi_device = platform_device_register_simple("bvme6000-scsi",
+ -1, NULL, 0);
+ if (IS_ERR(bvme6000_scsi_device)) {
+ driver_unregister(&bvme6000_scsi_driver);
+ return PTR_ERR(bvme6000_scsi_device);
+ }
+
+ return 0;
+}
+
+static void __exit bvme6000_scsi_exit(void)
+{
+ platform_device_unregister(bvme6000_scsi_device);
+ driver_unregister(&bvme6000_scsi_driver);
+}
+
+module_init(bvme6000_scsi_init);
+module_exit(bvme6000_scsi_exit);
diff --git a/drivers/scsi/dc395x.c b/drivers/scsi/dc395x.c
index 564ea90ed3a..7b8a3457b69 100644
--- a/drivers/scsi/dc395x.c
+++ b/drivers/scsi/dc395x.c
@@ -979,6 +979,7 @@ static void send_srb(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb)
static void build_srb(struct scsi_cmnd *cmd, struct DeviceCtlBlk *dcb,
struct ScsiReqBlk *srb)
{
+ int nseg;
enum dma_data_direction dir = cmd->sc_data_direction;
dprintkdbg(DBG_0, "build_srb: (pid#%li) <%02i-%i>\n",
cmd->pid, dcb->target_id, dcb->target_lun);
@@ -1000,27 +1001,30 @@ static void build_srb(struct scsi_cmnd *cmd, struct DeviceCtlBlk *dcb,
srb->scsi_phase = PH_BUS_FREE; /* initial phase */
srb->end_message = 0;
- if (dir == PCI_DMA_NONE || !cmd->request_buffer) {
+ nseg = scsi_dma_map(cmd);
+ BUG_ON(nseg < 0);
+
+ if (dir == PCI_DMA_NONE || !nseg) {
dprintkdbg(DBG_0,
"build_srb: [0] len=%d buf=%p use_sg=%d !MAP=%08x\n",
- cmd->bufflen, cmd->request_buffer,
- cmd->use_sg, srb->segment_x[0].address);
- } else if (cmd->use_sg) {
+ cmd->bufflen, scsi_sglist(cmd), scsi_sg_count(cmd),
+ srb->segment_x[0].address);
+ } else {
int i;
- u32 reqlen = cmd->request_bufflen;
- struct scatterlist *sl = (struct scatterlist *)
- cmd->request_buffer;
+ u32 reqlen = scsi_bufflen(cmd);
+ struct scatterlist *sg;
struct SGentry *sgp = srb->segment_x;
- srb->sg_count = pci_map_sg(dcb->acb->dev, sl, cmd->use_sg,
- dir);
+
+ srb->sg_count = nseg;
+
dprintkdbg(DBG_0,
- "build_srb: [n] len=%d buf=%p use_sg=%d segs=%d\n",
- reqlen, cmd->request_buffer, cmd->use_sg,
- srb->sg_count);
+ "build_srb: [n] len=%d buf=%p use_sg=%d segs=%d\n",
+ reqlen, scsi_sglist(cmd), scsi_sg_count(cmd),
+ srb->sg_count);
- for (i = 0; i < srb->sg_count; i++) {
- u32 busaddr = (u32)sg_dma_address(&sl[i]);
- u32 seglen = (u32)sl[i].length;
+ scsi_for_each_sg(cmd, sg, srb->sg_count, i) {
+ u32 busaddr = (u32)sg_dma_address(sg);
+ u32 seglen = (u32)sg->length;
sgp[i].address = busaddr;
sgp[i].length = seglen;
srb->total_xfer_length += seglen;
@@ -1050,23 +1054,6 @@ static void build_srb(struct scsi_cmnd *cmd, struct DeviceCtlBlk *dcb,
dprintkdbg(DBG_SG, "build_srb: [n] map sg %p->%08x(%05x)\n",
srb->segment_x, srb->sg_bus_addr, SEGMENTX_LEN);
- } else {
- srb->total_xfer_length = cmd->request_bufflen;
- srb->sg_count = 1;
- srb->segment_x[0].address =
- pci_map_single(dcb->acb->dev, cmd->request_buffer,
- srb->total_xfer_length, dir);
-
- /* Fixup for WIDE padding - make sure length is even */
- if (dcb->sync_period & WIDE_SYNC && srb->total_xfer_length % 2)
- srb->total_xfer_length++;
-
- srb->segment_x[0].length = srb->total_xfer_length;
-
- dprintkdbg(DBG_0,
- "build_srb: [1] len=%d buf=%p use_sg=%d map=%08x\n",
- srb->total_xfer_length, cmd->request_buffer,
- cmd->use_sg, srb->segment_x[0].address);
}
srb->request_length = srb->total_xfer_length;
@@ -2128,7 +2115,7 @@ static void data_out_phase0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
/*clear_fifo(acb, "DOP1"); */
/* KG: What is this supposed to be useful for? WIDE padding stuff? */
if (d_left_counter == 1 && dcb->sync_period & WIDE_SYNC
- && srb->cmd->request_bufflen % 2) {
+ && scsi_bufflen(srb->cmd) % 2) {
d_left_counter = 0;
dprintkl(KERN_INFO,
"data_out_phase0: Discard 1 byte (0x%02x)\n",
@@ -2159,7 +2146,7 @@ static void data_out_phase0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
sg_update_list(srb, d_left_counter);
/* KG: Most ugly hack! Apparently, this works around a chip bug */
if ((srb->segment_x[srb->sg_index].length ==
- diff && srb->cmd->use_sg)
+ diff && scsi_sg_count(srb->cmd))
|| ((oldxferred & ~PAGE_MASK) ==
(PAGE_SIZE - diff))
) {
@@ -2289,19 +2276,15 @@ static void data_in_phase0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
unsigned char *virt, *base = NULL;
unsigned long flags = 0;
size_t len = left_io;
+ size_t offset = srb->request_length - left_io;
+
+ local_irq_save(flags);
+ /* Assumption: it's inside one page as it's at most 4 bytes and
+ I just assume it's on a 4-byte boundary */
+ base = scsi_kmap_atomic_sg(scsi_sglist(srb->cmd),
+ srb->sg_count, &offset, &len);
+ virt = base + offset;
- if (srb->cmd->use_sg) {
- size_t offset = srb->request_length - left_io;
- local_irq_save(flags);
- /* Assumption: it's inside one page as it's at most 4 bytes and
- I just assume it's on a 4-byte boundary */
- base = scsi_kmap_atomic_sg((struct scatterlist *)srb->cmd->request_buffer,
- srb->sg_count, &offset, &len);
- virt = base + offset;
- } else {
- virt = srb->cmd->request_buffer + srb->cmd->request_bufflen - left_io;
- len = left_io;
- }
left_io -= len;
while (len) {
@@ -2341,10 +2324,8 @@ static void data_in_phase0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
DC395x_write8(acb, TRM_S1040_SCSI_CONFIG2, 0);
}
- if (srb->cmd->use_sg) {
- scsi_kunmap_atomic_sg(base);
- local_irq_restore(flags);
- }
+ scsi_kunmap_atomic_sg(base);
+ local_irq_restore(flags);
}
/*printk(" %08x", *(u32*)(bus_to_virt (addr))); */
/*srb->total_xfer_length = 0; */
@@ -2455,7 +2436,7 @@ static void data_io_transfer(struct AdapterCtlBlk *acb,
*/
srb->state |= SRB_DATA_XFER;
DC395x_write32(acb, TRM_S1040_DMA_XHIGHADDR, 0);
- if (srb->cmd->use_sg) { /* with S/G */
+ if (scsi_sg_count(srb->cmd)) { /* with S/G */
io_dir |= DMACMD_SG;
DC395x_write32(acb, TRM_S1040_DMA_XLOWADDR,
srb->sg_bus_addr +
@@ -2513,18 +2494,14 @@ static void data_io_transfer(struct AdapterCtlBlk *acb,
unsigned char *virt, *base = NULL;
unsigned long flags = 0;
size_t len = left_io;
+ size_t offset = srb->request_length - left_io;
+
+ local_irq_save(flags);
+ /* Again, max 4 bytes */
+ base = scsi_kmap_atomic_sg(scsi_sglist(srb->cmd),
+ srb->sg_count, &offset, &len);
+ virt = base + offset;
- if (srb->cmd->use_sg) {
- size_t offset = srb->request_length - left_io;
- local_irq_save(flags);
- /* Again, max 4 bytes */
- base = scsi_kmap_atomic_sg((struct scatterlist *)srb->cmd->request_buffer,
- srb->sg_count, &offset, &len);
- virt = base + offset;
- } else {
- virt = srb->cmd->request_buffer + srb->cmd->request_bufflen - left_io;
- len = left_io;
- }
left_io -= len;
while (len--) {
@@ -2536,10 +2513,8 @@ static void data_io_transfer(struct AdapterCtlBlk *acb,
sg_subtract_one(srb);
}
- if (srb->cmd->use_sg) {
- scsi_kunmap_atomic_sg(base);
- local_irq_restore(flags);
- }
+ scsi_kunmap_atomic_sg(base);
+ local_irq_restore(flags);
}
if (srb->dcb->sync_period & WIDE_SYNC) {
if (ln % 2) {
@@ -3295,7 +3270,8 @@ static void pci_unmap_srb(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb)
{
struct scsi_cmnd *cmd = srb->cmd;
enum dma_data_direction dir = cmd->sc_data_direction;
- if (cmd->use_sg && dir != PCI_DMA_NONE) {
+
+ if (scsi_sg_count(cmd) && dir != PCI_DMA_NONE) {
/* unmap DC395x SG list */
dprintkdbg(DBG_SG, "pci_unmap_srb: list=%08x(%05x)\n",
srb->sg_bus_addr, SEGMENTX_LEN);
@@ -3303,16 +3279,9 @@ static void pci_unmap_srb(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb)
SEGMENTX_LEN,
PCI_DMA_TODEVICE);
dprintkdbg(DBG_SG, "pci_unmap_srb: segs=%i buffer=%p\n",
- cmd->use_sg, cmd->request_buffer);
+ scsi_sg_count(cmd), scsi_bufflen(cmd));
/* unmap the sg segments */
- pci_unmap_sg(acb->dev,
- (struct scatterlist *)cmd->request_buffer,
- cmd->use_sg, dir);
- } else if (cmd->request_buffer && dir != PCI_DMA_NONE) {
- dprintkdbg(DBG_SG, "pci_unmap_srb: buffer=%08x(%05x)\n",
- srb->segment_x[0].address, cmd->request_bufflen);
- pci_unmap_single(acb->dev, srb->segment_x[0].address,
- cmd->request_bufflen, dir);
+ scsi_dma_unmap(cmd);
}
}
@@ -3352,8 +3321,8 @@ static void srb_done(struct AdapterCtlBlk *acb, struct DeviceCtlBlk *dcb,
dprintkdbg(DBG_1, "srb_done: (pid#%li) <%02i-%i>\n", srb->cmd->pid,
srb->cmd->device->id, srb->cmd->device->lun);
dprintkdbg(DBG_SG, "srb_done: srb=%p sg=%i(%i/%i) buf=%p\n",
- srb, cmd->use_sg, srb->sg_index, srb->sg_count,
- cmd->request_buffer);
+ srb, scsi_sg_count(cmd), srb->sg_index, srb->sg_count,
+ scsi_sgtalbe(cmd));
status = srb->target_status;
if (srb->flag & AUTO_REQSENSE) {
dprintkdbg(DBG_0, "srb_done: AUTO_REQSENSE1\n");
@@ -3482,16 +3451,10 @@ static void srb_done(struct AdapterCtlBlk *acb, struct DeviceCtlBlk *dcb,
}
}
- if (dir != PCI_DMA_NONE) {
- if (cmd->use_sg)
- pci_dma_sync_sg_for_cpu(acb->dev,
- (struct scatterlist *)cmd->
- request_buffer, cmd->use_sg, dir);
- else if (cmd->request_buffer)
- pci_dma_sync_single_for_cpu(acb->dev,
- srb->segment_x[0].address,
- cmd->request_bufflen, dir);
- }
+ if (dir != PCI_DMA_NONE && scsi_sg_count(cmd))
+ pci_dma_sync_sg_for_cpu(acb->dev, scsi_sglist(cmd),
+ scsi_sg_count(cmd), dir);
+
ckc_only = 0;
/* Check Error Conditions */
ckc_e:
@@ -3500,19 +3463,15 @@ static void srb_done(struct AdapterCtlBlk *acb, struct DeviceCtlBlk *dcb,
unsigned char *base = NULL;
struct ScsiInqData *ptr;
unsigned long flags = 0;
+ struct scatterlist* sg = scsi_sglist(cmd);
+ size_t offset = 0, len = sizeof(struct ScsiInqData);
- if (cmd->use_sg) {
- struct scatterlist* sg = (struct scatterlist *)cmd->request_buffer;
- size_t offset = 0, len = sizeof(struct ScsiInqData);
-
- local_irq_save(flags);
- base = scsi_kmap_atomic_sg(sg, cmd->use_sg, &offset, &len);
- ptr = (struct ScsiInqData *)(base + offset);
- } else
- ptr = (struct ScsiInqData *)(cmd->request_buffer);
+ local_irq_save(flags);
+ base = scsi_kmap_atomic_sg(sg, scsi_sg_count(cmd), &offset, &len);
+ ptr = (struct ScsiInqData *)(base + offset);
if (!ckc_only && (cmd->result & RES_DID) == 0
- && cmd->cmnd[2] == 0 && cmd->request_bufflen >= 8
+ && cmd->cmnd[2] == 0 && scsi_bufflen(cmd) >= 8
&& dir != PCI_DMA_NONE && ptr && (ptr->Vers & 0x07) >= 2)
dcb->inquiry7 = ptr->Flags;
@@ -3527,14 +3486,12 @@ static void srb_done(struct AdapterCtlBlk *acb, struct DeviceCtlBlk *dcb,
}
}
- if (cmd->use_sg) {
- scsi_kunmap_atomic_sg(base);
- local_irq_restore(flags);
- }
+ scsi_kunmap_atomic_sg(base);
+ local_irq_restore(flags);
}
/* Here is the info for Doug Gilbert's sg3 ... */
- cmd->resid = srb->total_xfer_length;
+ scsi_set_resid(cmd, srb->total_xfer_length);
/* This may be interpreted by sb. or not ... */
cmd->SCp.this_residual = srb->total_xfer_length;
cmd->SCp.buffers_residual = 0;
diff --git a/drivers/scsi/dpt_i2o.c b/drivers/scsi/dpt_i2o.c
index 8c7d2bbf9b1..2e2362d787c 100644
--- a/drivers/scsi/dpt_i2o.c
+++ b/drivers/scsi/dpt_i2o.c
@@ -2078,12 +2078,13 @@ static s32 adpt_scsi_to_i2o(adpt_hba* pHba, struct scsi_cmnd* cmd, struct adpt_d
u32 *lenptr;
int direction;
int scsidir;
+ int nseg;
u32 len;
u32 reqlen;
s32 rcode;
memset(msg, 0 , sizeof(msg));
- len = cmd->request_bufflen;
+ len = scsi_bufflen(cmd);
direction = 0x00000000;
scsidir = 0x00000000; // DATA NO XFER
@@ -2140,21 +2141,21 @@ static s32 adpt_scsi_to_i2o(adpt_hba* pHba, struct scsi_cmnd* cmd, struct adpt_d
lenptr=mptr++; /* Remember me - fill in when we know */
reqlen = 14; // SINGLE SGE
/* Now fill in the SGList and command */
- if(cmd->use_sg) {
- struct scatterlist *sg = (struct scatterlist *)cmd->request_buffer;
- int sg_count = pci_map_sg(pHba->pDev, sg, cmd->use_sg,
- cmd->sc_data_direction);
+ nseg = scsi_dma_map(cmd);
+ BUG_ON(nseg < 0);
+ if (nseg) {
+ struct scatterlist *sg;
len = 0;
- for(i = 0 ; i < sg_count; i++) {
+ scsi_for_each_sg(cmd, sg, nseg, i) {
*mptr++ = direction|0x10000000|sg_dma_len(sg);
len+=sg_dma_len(sg);
*mptr++ = sg_dma_address(sg);
- sg++;
+ /* Make this an end of list */
+ if (i == nseg - 1)
+ mptr[-2] = direction|0xD0000000|sg_dma_len(sg);
}
- /* Make this an end of list */
- mptr[-2] = direction|0xD0000000|sg_dma_len(sg-1);
reqlen = mptr - msg;
*lenptr = len;
@@ -2163,16 +2164,8 @@ static s32 adpt_scsi_to_i2o(adpt_hba* pHba, struct scsi_cmnd* cmd, struct adpt_d
len, cmd->underflow);
}
} else {
- *lenptr = len = cmd->request_bufflen;
- if(len == 0) {
- reqlen = 12;
- } else {
- *mptr++ = 0xD0000000|direction|cmd->request_bufflen;
- *mptr++ = pci_map_single(pHba->pDev,
- cmd->request_buffer,
- cmd->request_bufflen,
- cmd->sc_data_direction);
- }
+ *lenptr = len = 0;
+ reqlen = 12;
}
/* Stick the headers on */
@@ -2232,7 +2225,7 @@ static s32 adpt_i2o_to_scsi(void __iomem *reply, struct scsi_cmnd* cmd)
hba_status = detailed_status >> 8;
// calculate resid for sg
- cmd->resid = cmd->request_bufflen - readl(reply+5);
+ scsi_set_resid(cmd, scsi_bufflen(cmd) - readl(reply+5));
pHba = (adpt_hba*) cmd->device->host->hostdata[0];
diff --git a/drivers/scsi/eata.c b/drivers/scsi/eata.c
index 2d38025861a..a83e9f150b9 100644
--- a/drivers/scsi/eata.c
+++ b/drivers/scsi/eata.c
@@ -1609,8 +1609,9 @@ static int eata2x_detect(struct scsi_host_template *tpnt)
static void map_dma(unsigned int i, struct hostdata *ha)
{
- unsigned int k, count, pci_dir;
- struct scatterlist *sgpnt;
+ unsigned int k, pci_dir;
+ int count;
+ struct scatterlist *sg;
struct mscp *cpp;
struct scsi_cmnd *SCpnt;
@@ -1625,38 +1626,19 @@ static void map_dma(unsigned int i, struct hostdata *ha)
cpp->sense_len = sizeof SCpnt->sense_buffer;
- if (!SCpnt->use_sg) {
-
- /* If we get here with PCI_DMA_NONE, pci_map_single triggers a BUG() */
- if (!SCpnt->request_bufflen)
- pci_dir = PCI_DMA_BIDIRECTIONAL;
-
- if (SCpnt->request_buffer)
- cpp->data_address = H2DEV(pci_map_single(ha->pdev,
- SCpnt->
- request_buffer,
- SCpnt->
- request_bufflen,
- pci_dir));
-
- cpp->data_len = H2DEV(SCpnt->request_bufflen);
- return;
- }
-
- sgpnt = (struct scatterlist *)SCpnt->request_buffer;
- count = pci_map_sg(ha->pdev, sgpnt, SCpnt->use_sg, pci_dir);
-
- for (k = 0; k < count; k++) {
- cpp->sglist[k].address = H2DEV(sg_dma_address(&sgpnt[k]));
- cpp->sglist[k].num_bytes = H2DEV(sg_dma_len(&sgpnt[k]));
+ count = scsi_dma_map(SCpnt);
+ BUG_ON(count < 0);
+ scsi_for_each_sg(SCpnt, sg, count, k) {
+ cpp->sglist[k].address = H2DEV(sg_dma_address(sg));
+ cpp->sglist[k].num_bytes = H2DEV(sg_dma_len(sg));
}
cpp->sg = 1;
cpp->data_address = H2DEV(pci_map_single(ha->pdev, cpp->sglist,
- SCpnt->use_sg *
+ scsi_sg_count(SCpnt) *
sizeof(struct sg_list),
pci_dir));
- cpp->data_len = H2DEV((SCpnt->use_sg * sizeof(struct sg_list)));
+ cpp->data_len = H2DEV((scsi_sg_count(SCpnt) * sizeof(struct sg_list)));
}
static void unmap_dma(unsigned int i, struct hostdata *ha)
@@ -1673,9 +1655,7 @@ static void unmap_dma(unsigned int i, struct hostdata *ha)
pci_unmap_single(ha->pdev, DEV2H(cpp->sense_addr),
DEV2H(cpp->sense_len), PCI_DMA_FROMDEVICE);
- if (SCpnt->use_sg)
- pci_unmap_sg(ha->pdev, SCpnt->request_buffer, SCpnt->use_sg,
- pci_dir);
+ scsi_dma_unmap(SCpnt);
if (!DEV2H(cpp->data_len))
pci_dir = PCI_DMA_BIDIRECTIONAL;
@@ -1700,9 +1680,9 @@ static void sync_dma(unsigned int i, struct hostdata *ha)
DEV2H(cpp->sense_len),
PCI_DMA_FROMDEVICE);
- if (SCpnt->use_sg)
- pci_dma_sync_sg_for_cpu(ha->pdev, SCpnt->request_buffer,
- SCpnt->use_sg, pci_dir);
+ if (scsi_sg_count(SCpnt))
+ pci_dma_sync_sg_for_cpu(ha->pdev, scsi_sglist(SCpnt),
+ scsi_sg_count(SCpnt), pci_dir);
if (!DEV2H(cpp->data_len))
pci_dir = PCI_DMA_BIDIRECTIONAL;
diff --git a/drivers/scsi/esp_scsi.c b/drivers/scsi/esp_scsi.c
index 71caf2ded6b..77b06a983fa 100644
--- a/drivers/scsi/esp_scsi.c
+++ b/drivers/scsi/esp_scsi.c
@@ -324,17 +324,14 @@ static void esp_reset_esp(struct esp *esp)
static void esp_map_dma(struct esp *esp, struct scsi_cmnd *cmd)
{
struct esp_cmd_priv *spriv = ESP_CMD_PRIV(cmd);
- struct scatterlist *sg = cmd->request_buffer;
+ struct scatterlist *sg = scsi_sglist(cmd);
int dir = cmd->sc_data_direction;
int total, i;
if (dir == DMA_NONE)
return;
- BUG_ON(cmd->use_sg == 0);
-
- spriv->u.num_sg = esp->ops->map_sg(esp, sg,
- cmd->use_sg, dir);
+ spriv->u.num_sg = esp->ops->map_sg(esp, sg, scsi_sg_count(cmd), dir);
spriv->cur_residue = sg_dma_len(sg);
spriv->cur_sg = sg;
@@ -407,8 +404,7 @@ static void esp_unmap_dma(struct esp *esp, struct scsi_cmnd *cmd)
if (dir == DMA_NONE)
return;
- esp->ops->unmap_sg(esp, cmd->request_buffer,
- spriv->u.num_sg, dir);
+ esp->ops->unmap_sg(esp, scsi_sglist(cmd), spriv->u.num_sg, dir);
}
static void esp_save_pointers(struct esp *esp, struct esp_cmd_entry *ent)
@@ -921,7 +917,7 @@ static void esp_event_queue_full(struct esp *esp, struct esp_cmd_entry *ent)
static int esp_queuecommand(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *))
{
struct scsi_device *dev = cmd->device;
- struct esp *esp = host_to_esp(dev->host);
+ struct esp *esp = shost_priv(dev->host);
struct esp_cmd_priv *spriv;
struct esp_cmd_entry *ent;
@@ -2357,7 +2353,7 @@ EXPORT_SYMBOL(scsi_esp_unregister);
static int esp_slave_alloc(struct scsi_device *dev)
{
- struct esp *esp = host_to_esp(dev->host);
+ struct esp *esp = shost_priv(dev->host);
struct esp_target_data *tp = &esp->target[dev->id];
struct esp_lun_data *lp;
@@ -2381,7 +2377,7 @@ static int esp_slave_alloc(struct scsi_device *dev)
static int esp_slave_configure(struct scsi_device *dev)
{
- struct esp *esp = host_to_esp(dev->host);
+ struct esp *esp = shost_priv(dev->host);
struct esp_target_data *tp = &esp->target[dev->id];
int goal_tags, queue_depth;
@@ -2423,7 +2419,7 @@ static void esp_slave_destroy(struct scsi_device *dev)
static int esp_eh_abort_handler(struct scsi_cmnd *cmd)
{
- struct esp *esp = host_to_esp(cmd->device->host);
+ struct esp *esp = shost_priv(cmd->device->host);
struct esp_cmd_entry *ent, *tmp;
struct completion eh_done;
unsigned long flags;
@@ -2539,7 +2535,7 @@ out_failure:
static int esp_eh_bus_reset_handler(struct scsi_cmnd *cmd)
{
- struct esp *esp = host_to_esp(cmd->device->host);
+ struct esp *esp = shost_priv(cmd->device->host);
struct completion eh_reset;
unsigned long flags;
@@ -2575,7 +2571,7 @@ static int esp_eh_bus_reset_handler(struct scsi_cmnd *cmd)
/* All bets are off, reset the entire device. */
static int esp_eh_host_reset_handler(struct scsi_cmnd *cmd)
{
- struct esp *esp = host_to_esp(cmd->device->host);
+ struct esp *esp = shost_priv(cmd->device->host);
unsigned long flags;
spin_lock_irqsave(esp->host->host_lock, flags);
@@ -2615,7 +2611,7 @@ EXPORT_SYMBOL(scsi_esp_template);
static void esp_get_signalling(struct Scsi_Host *host)
{
- struct esp *esp = host_to_esp(host);
+ struct esp *esp = shost_priv(host);
enum spi_signal_type type;
if (esp->flags & ESP_FLAG_DIFFERENTIAL)
@@ -2629,7 +2625,7 @@ static void esp_get_signalling(struct Scsi_Host *host)
static void esp_set_offset(struct scsi_target *target, int offset)
{
struct Scsi_Host *host = dev_to_shost(target->dev.parent);
- struct esp *esp = host_to_esp(host);
+ struct esp *esp = shost_priv(host);
struct esp_target_data *tp = &esp->target[target->id];
tp->nego_goal_offset = offset;
@@ -2639,7 +2635,7 @@ static void esp_set_offset(struct scsi_target *target, int offset)
static void esp_set_period(struct scsi_target *target, int period)
{
struct Scsi_Host *host = dev_to_shost(target->dev.parent);
- struct esp *esp = host_to_esp(host);
+ struct esp *esp = shost_priv(host);
struct esp_target_data *tp = &esp->target[target->id];
tp->nego_goal_period = period;
@@ -2649,7 +2645,7 @@ static void esp_set_period(struct scsi_target *target, int period)
static void esp_set_width(struct scsi_target *target, int width)
{
struct Scsi_Host *host = dev_to_shost(target->dev.parent);
- struct esp *esp = host_to_esp(host);
+ struct esp *esp = shost_priv(host);
struct esp_target_data *tp = &esp->target[target->id];
tp->nego_goal_width = (width ? 1 : 0);
diff --git a/drivers/scsi/esp_scsi.h b/drivers/scsi/esp_scsi.h
index 8d4a6690401..d5576d54ce7 100644
--- a/drivers/scsi/esp_scsi.h
+++ b/drivers/scsi/esp_scsi.h
@@ -517,8 +517,6 @@ struct esp {
struct sbus_dma *dma;
};
-#define host_to_esp(host) ((struct esp *)(host)->hostdata)
-
/* A front-end driver for the ESP chip should do the following in
* it's device probe routine:
* 1) Allocate the host and private area using scsi_host_alloc()
diff --git a/drivers/scsi/fdomain.c b/drivers/scsi/fdomain.c
index 5d4ea6f7795..36169d597e9 100644
--- a/drivers/scsi/fdomain.c
+++ b/drivers/scsi/fdomain.c
@@ -410,6 +410,8 @@ static irqreturn_t do_fdomain_16x0_intr( int irq, void *dev_id );
static char * fdomain = NULL;
module_param(fdomain, charp, 0);
+#ifndef PCMCIA
+
static unsigned long addresses[] = {
0xc8000,
0xca000,
@@ -426,6 +428,8 @@ static unsigned short ports[] = { 0x140, 0x150, 0x160, 0x170 };
static unsigned short ints[] = { 3, 5, 10, 11, 12, 14, 15, 0 };
+#endif /* !PCMCIA */
+
/*
READ THIS BEFORE YOU ADD A SIGNATURE!
@@ -458,6 +462,8 @@ static unsigned short ints[] = { 3, 5, 10, 11, 12, 14, 15, 0 };
*/
+#ifndef PCMCIA
+
static struct signature {
const char *signature;
int sig_offset;
@@ -503,6 +509,8 @@ static struct signature {
#define SIGNATURE_COUNT ARRAY_SIZE(signatures)
+#endif /* !PCMCIA */
+
static void print_banner( struct Scsi_Host *shpnt )
{
if (!shpnt) return; /* This won't ever happen */
@@ -633,6 +641,8 @@ static int fdomain_test_loopback( void )
return 0;
}
+#ifndef PCMCIA
+
/* fdomain_get_irq assumes that we have a valid MCA ID for a
TMC-1660/TMC-1680 Future Domain board. Now, check to be sure the
bios_base matches these ports. If someone was unlucky enough to have
@@ -667,7 +677,6 @@ static int fdomain_get_irq( int base )
static int fdomain_isa_detect( int *irq, int *iobase )
{
-#ifndef PCMCIA
int i, j;
int base = 0xdeadbeef;
int flag = 0;
@@ -786,11 +795,22 @@ found:
*iobase = base;
return 1; /* success */
-#else
- return 0;
-#endif
}
+#else /* PCMCIA */
+
+static int fdomain_isa_detect( int *irq, int *iobase )
+{
+ if (irq)
+ *irq = 0;
+ if (iobase)
+ *iobase = 0;
+ return 0;
+}
+
+#endif /* !PCMCIA */
+
+
/* PCI detection function: int fdomain_pci_bios_detect(int* irq, int*
iobase) This function gets the Interrupt Level and I/O base address from
the PCI configuration registers. */
@@ -1345,16 +1365,15 @@ static irqreturn_t do_fdomain_16x0_intr(int irq, void *dev_id)
#if ERRORS_ONLY
if (current_SC->cmnd[0] == REQUEST_SENSE && !current_SC->SCp.Status) {
- if ((unsigned char)(*((char *)current_SC->request_buffer+2)) & 0x0f) {
+ char *buf = scsi_sglist(current_SC);
+ if ((unsigned char)(*(buf + 2)) & 0x0f) {
unsigned char key;
unsigned char code;
unsigned char qualifier;
- key = (unsigned char)(*((char *)current_SC->request_buffer + 2))
- & 0x0f;
- code = (unsigned char)(*((char *)current_SC->request_buffer + 12));
- qualifier = (unsigned char)(*((char *)current_SC->request_buffer
- + 13));
+ key = (unsigned char)(*(buf + 2)) & 0x0f;
+ code = (unsigned char)(*(buf + 12));
+ qualifier = (unsigned char)(*(buf + 13));
if (key != UNIT_ATTENTION
&& !(key == NOT_READY
@@ -1405,8 +1424,8 @@ static int fdomain_16x0_queue(struct scsi_cmnd *SCpnt,
printk( "queue: target = %d cmnd = 0x%02x pieces = %d size = %u\n",
SCpnt->target,
*(unsigned char *)SCpnt->cmnd,
- SCpnt->use_sg,
- SCpnt->request_bufflen );
+ scsi_sg_count(SCpnt),
+ scsi_bufflen(SCpnt));
#endif
fdomain_make_bus_idle();
@@ -1416,20 +1435,19 @@ static int fdomain_16x0_queue(struct scsi_cmnd *SCpnt,
/* Initialize static data */
- if (current_SC->use_sg) {
- current_SC->SCp.buffer =
- (struct scatterlist *)current_SC->request_buffer;
- current_SC->SCp.ptr = page_address(current_SC->SCp.buffer->page) + current_SC->SCp.buffer->offset;
- current_SC->SCp.this_residual = current_SC->SCp.buffer->length;
- current_SC->SCp.buffers_residual = current_SC->use_sg - 1;
+ if (scsi_sg_count(current_SC)) {
+ current_SC->SCp.buffer = scsi_sglist(current_SC);
+ current_SC->SCp.ptr = page_address(current_SC->SCp.buffer->page)
+ + current_SC->SCp.buffer->offset;
+ current_SC->SCp.this_residual = current_SC->SCp.buffer->length;
+ current_SC->SCp.buffers_residual = scsi_sg_count(current_SC) - 1;
} else {
- current_SC->SCp.ptr = (char *)current_SC->request_buffer;
- current_SC->SCp.this_residual = current_SC->request_bufflen;
- current_SC->SCp.buffer = NULL;
- current_SC->SCp.buffers_residual = 0;
+ current_SC->SCp.ptr = 0;
+ current_SC->SCp.this_residual = 0;
+ current_SC->SCp.buffer = NULL;
+ current_SC->SCp.buffers_residual = 0;
}
-
-
+
current_SC->SCp.Status = 0;
current_SC->SCp.Message = 0;
current_SC->SCp.have_data_in = 0;
@@ -1472,8 +1490,8 @@ static void print_info(struct scsi_cmnd *SCpnt)
SCpnt->SCp.phase,
SCpnt->device->id,
*(unsigned char *)SCpnt->cmnd,
- SCpnt->use_sg,
- SCpnt->request_bufflen );
+ scsi_sg_count(SCpnt),
+ scsi_bufflen(SCpnt));
printk( "sent_command = %d, have_data_in = %d, timeout = %d\n",
SCpnt->SCp.sent_command,
SCpnt->SCp.have_data_in,
diff --git a/drivers/scsi/gdth.c b/drivers/scsi/gdth.c
index 60446b88f72..d0b95ce0ba0 100644
--- a/drivers/scsi/gdth.c
+++ b/drivers/scsi/gdth.c
@@ -876,7 +876,7 @@ static int __init gdth_search_pci(gdth_pci_str *pcistr)
/* Vortex only makes RAID controllers.
* We do not really want to specify all 550 ids here, so wildcard match.
*/
-static struct pci_device_id gdthtable[] __attribute_used__ = {
+static struct pci_device_id gdthtable[] __maybe_unused = {
{PCI_VENDOR_ID_VORTEX,PCI_ANY_ID,PCI_ANY_ID, PCI_ANY_ID},
{PCI_VENDOR_ID_INTEL,PCI_DEVICE_ID_INTEL_SRC,PCI_ANY_ID,PCI_ANY_ID},
{PCI_VENDOR_ID_INTEL,PCI_DEVICE_ID_INTEL_SRC_XSCALE,PCI_ANY_ID,PCI_ANY_ID},
@@ -1955,7 +1955,7 @@ static int __init gdth_search_drives(int hanum)
for (j = 0; j < 12; ++j)
rtc[j] = CMOS_READ(j);
} while (rtc[0] != CMOS_READ(0));
- spin_lock_irqrestore(&rtc_lock, flags);
+ spin_unlock_irqrestore(&rtc_lock, flags);
TRACE2(("gdth_search_drives(): RTC: %x/%x/%x\n",*(ulong32 *)&rtc[0],
*(ulong32 *)&rtc[4], *(ulong32 *)&rtc[8]));
/* 3. send to controller firmware */
diff --git a/drivers/scsi/hptiop.c b/drivers/scsi/hptiop.c
index bec83cbee59..0e579ca4581 100644
--- a/drivers/scsi/hptiop.c
+++ b/drivers/scsi/hptiop.c
@@ -339,20 +339,8 @@ static void hptiop_host_request_callback(struct hptiop_hba *hba, u32 tag)
scp = hba->reqs[tag].scp;
- if (HPT_SCP(scp)->mapped) {
- if (scp->use_sg)
- pci_unmap_sg(hba->pcidev,
- (struct scatterlist *)scp->request_buffer,
- scp->use_sg,
- scp->sc_data_direction
- );
- else
- pci_unmap_single(hba->pcidev,
- HPT_SCP(scp)->dma_handle,
- scp->request_bufflen,
- scp->sc_data_direction
- );
- }
+ if (HPT_SCP(scp)->mapped)
+ scsi_dma_unmap(scp);
switch (le32_to_cpu(req->header.result)) {
case IOP_RESULT_SUCCESS:
@@ -448,43 +436,26 @@ static int hptiop_buildsgl(struct scsi_cmnd *scp, struct hpt_iopsg *psg)
{
struct Scsi_Host *host = scp->device->host;
struct hptiop_hba *hba = (struct hptiop_hba *)host->hostdata;
- struct scatterlist *sglist = (struct scatterlist *)scp->request_buffer;
-
- /*
- * though we'll not get non-use_sg fields anymore,
- * keep use_sg checking anyway
- */
- if (scp->use_sg) {
- int idx;
-
- HPT_SCP(scp)->sgcnt = pci_map_sg(hba->pcidev,
- sglist, scp->use_sg,
- scp->sc_data_direction);
- HPT_SCP(scp)->mapped = 1;
- BUG_ON(HPT_SCP(scp)->sgcnt > hba->max_sg_descriptors);
-
- for (idx = 0; idx < HPT_SCP(scp)->sgcnt; idx++) {
- psg[idx].pci_address =
- cpu_to_le64(sg_dma_address(&sglist[idx]));
- psg[idx].size = cpu_to_le32(sg_dma_len(&sglist[idx]));
- psg[idx].eot = (idx == HPT_SCP(scp)->sgcnt - 1) ?
- cpu_to_le32(1) : 0;
- }
+ struct scatterlist *sg;
+ int idx, nseg;
+
+ nseg = scsi_dma_map(scp);
+ BUG_ON(nseg < 0);
+ if (!nseg)
+ return 0;
- return HPT_SCP(scp)->sgcnt;
- } else {
- HPT_SCP(scp)->dma_handle = pci_map_single(
- hba->pcidev,
- scp->request_buffer,
- scp->request_bufflen,
- scp->sc_data_direction
- );
- HPT_SCP(scp)->mapped = 1;
- psg->pci_address = cpu_to_le64(HPT_SCP(scp)->dma_handle);
- psg->size = cpu_to_le32(scp->request_bufflen);
- psg->eot = cpu_to_le32(1);
- return 1;
+ HPT_SCP(scp)->sgcnt = nseg;
+ HPT_SCP(scp)->mapped = 1;
+
+ BUG_ON(HPT_SCP(scp)->sgcnt > hba->max_sg_descriptors);
+
+ scsi_for_each_sg(scp, sg, HPT_SCP(scp)->sgcnt, idx) {
+ psg[idx].pci_address = cpu_to_le64(sg_dma_address(sg));
+ psg[idx].size = cpu_to_le32(sg_dma_len(sg));
+ psg[idx].eot = (idx == HPT_SCP(scp)->sgcnt - 1) ?
+ cpu_to_le32(1) : 0;
}
+ return HPT_SCP(scp)->sgcnt;
}
static int hptiop_queuecommand(struct scsi_cmnd *scp,
@@ -529,9 +500,8 @@ static int hptiop_queuecommand(struct scsi_cmnd *scp,
req = (struct hpt_iop_request_scsi_command *)_req->req_virt;
/* build S/G table */
- if (scp->request_bufflen)
- sg_count = hptiop_buildsgl(scp, req->sg_list);
- else
+ sg_count = hptiop_buildsgl(scp, req->sg_list);
+ if (!sg_count)
HPT_SCP(scp)->mapped = 0;
req->header.flags = cpu_to_le32(IOP_REQUEST_FLAG_OUTPUT_CONTEXT);
@@ -540,7 +510,7 @@ static int hptiop_queuecommand(struct scsi_cmnd *scp,
req->header.context = cpu_to_le32(IOPMU_QUEUE_ADDR_HOST_BIT |
(u32)_req->index);
req->header.context_hi32 = 0;
- req->dataxfer_length = cpu_to_le32(scp->request_bufflen);
+ req->dataxfer_length = cpu_to_le32(scsi_bufflen(scp));
req->channel = scp->device->channel;
req->target = scp->device->id;
req->lun = scp->device->lun;
diff --git a/drivers/scsi/ibmmca.c b/drivers/scsi/ibmmca.c
index 0e57fb6964d..4275d1b04ce 100644
--- a/drivers/scsi/ibmmca.c
+++ b/drivers/scsi/ibmmca.c
@@ -31,14 +31,21 @@
#include <linux/mca.h>
#include <linux/spinlock.h>
#include <linux/init.h>
-#include <linux/mca-legacy.h>
#include <asm/system.h>
#include <asm/io.h>
#include "scsi.h"
#include <scsi/scsi_host.h>
-#include "ibmmca.h"
+
+/* Common forward declarations for all Linux-versions: */
+static int ibmmca_queuecommand (Scsi_Cmnd *, void (*done) (Scsi_Cmnd *));
+static int ibmmca_abort (Scsi_Cmnd *);
+static int ibmmca_host_reset (Scsi_Cmnd *);
+static int ibmmca_biosparam (struct scsi_device *, struct block_device *, sector_t, int *);
+static int ibmmca_proc_info(struct Scsi_Host *shpnt, char *buffer, char **start, off_t offset, int length, int inout);
+
+
/* current version of this driver-source: */
#define IBMMCA_SCSI_DRIVER_VERSION "4.0b-ac"
@@ -65,11 +72,11 @@
#define IM_DEBUG_CMD_DEVICE TYPE_TAPE
/* relative addresses of hardware registers on a subsystem */
-#define IM_CMD_REG(hi) (hosts[(hi)]->io_port) /*Command Interface, (4 bytes long) */
-#define IM_ATTN_REG(hi) (hosts[(hi)]->io_port+4) /*Attention (1 byte) */
-#define IM_CTR_REG(hi) (hosts[(hi)]->io_port+5) /*Basic Control (1 byte) */
-#define IM_INTR_REG(hi) (hosts[(hi)]->io_port+6) /*Interrupt Status (1 byte, r/o) */
-#define IM_STAT_REG(hi) (hosts[(hi)]->io_port+7) /*Basic Status (1 byte, read only) */
+#define IM_CMD_REG(h) ((h)->io_port) /*Command Interface, (4 bytes long) */
+#define IM_ATTN_REG(h) ((h)->io_port+4) /*Attention (1 byte) */
+#define IM_CTR_REG(h) ((h)->io_port+5) /*Basic Control (1 byte) */
+#define IM_INTR_REG(h) ((h)->io_port+6) /*Interrupt Status (1 byte, r/o) */
+#define IM_STAT_REG(h) ((h)->io_port+7) /*Basic Status (1 byte, read only) */
/* basic I/O-port of first adapter */
#define IM_IO_PORT 0x3540
@@ -266,30 +273,36 @@ static int global_adapter_speed = 0; /* full speed by default */
if ((display_mode & LED_ACTIVITY)||(!display_mode)) \
outb(inb(PS2_SYS_CTR) & 0x3f, PS2_SYS_CTR); }
-/*list of supported subsystems */
-struct subsys_list_struct {
- unsigned short mca_id;
- char *description;
-};
-
/* types of different supported hardware that goes to hostdata special */
#define IBM_SCSI2_FW 0
#define IBM_7568_WCACHE 1
#define IBM_EXP_UNIT 2
#define IBM_SCSI_WCACHE 3
#define IBM_SCSI 4
+#define IBM_INTEGSCSI 5
/* other special flags for hostdata structure */
#define FORCED_DETECTION 100
#define INTEGRATED_SCSI 101
/* List of possible IBM-SCSI-adapters */
-static struct subsys_list_struct subsys_list[] = {
- {0x8efc, "IBM SCSI-2 F/W Adapter"}, /* special = 0 */
- {0x8efd, "IBM 7568 Industrial Computer SCSI Adapter w/Cache"}, /* special = 1 */
- {0x8ef8, "IBM Expansion Unit SCSI Controller"}, /* special = 2 */
- {0x8eff, "IBM SCSI Adapter w/Cache"}, /* special = 3 */
- {0x8efe, "IBM SCSI Adapter"}, /* special = 4 */
+static short ibmmca_id_table[] = {
+ 0x8efc,
+ 0x8efd,
+ 0x8ef8,
+ 0x8eff,
+ 0x8efe,
+ /* No entry for integrated SCSI, that's part of the register */
+ 0
+};
+
+static const char *ibmmca_description[] = {
+ "IBM SCSI-2 F/W Adapter", /* special = 0 */
+ "IBM 7568 Industrial Computer SCSI Adapter w/Cache", /* special = 1 */
+ "IBM Expansion Unit SCSI Controller", /* special = 2 */
+ "IBM SCSI Adapter w/Cache", /* special = 3 */
+ "IBM SCSI Adapter", /* special = 4 */
+ "IBM Integrated SCSI Controller", /* special = 5 */
};
/* Max number of logical devices (can be up from 0 to 14). 15 is the address
@@ -375,30 +388,30 @@ struct ibmmca_hostdata {
};
/* macros to access host data structure */
-#define subsystem_pun(hi) (hosts[(hi)]->this_id)
-#define subsystem_maxid(hi) (hosts[(hi)]->max_id)
-#define ld(hi) (((struct ibmmca_hostdata *) hosts[(hi)]->hostdata)->_ld)
-#define get_ldn(hi) (((struct ibmmca_hostdata *) hosts[(hi)]->hostdata)->_get_ldn)
-#define get_scsi(hi) (((struct ibmmca_hostdata *) hosts[(hi)]->hostdata)->_get_scsi)
-#define local_checking_phase_flag(hi) (((struct ibmmca_hostdata *) hosts[(hi)]->hostdata)->_local_checking_phase_flag)
-#define got_interrupt(hi) (((struct ibmmca_hostdata *) hosts[(hi)]->hostdata)->_got_interrupt)
-#define stat_result(hi) (((struct ibmmca_hostdata *) hosts[(hi)]->hostdata)->_stat_result)
-#define reset_status(hi) (((struct ibmmca_hostdata *) hosts[(hi)]->hostdata)->_reset_status)
-#define last_scsi_command(hi) (((struct ibmmca_hostdata *) hosts[(hi)]->hostdata)->_last_scsi_command)
-#define last_scsi_type(hi) (((struct ibmmca_hostdata *) hosts[(hi)]->hostdata)->_last_scsi_type)
-#define last_scsi_blockcount(hi) (((struct ibmmca_hostdata *) hosts[(hi)]->hostdata)->_last_scsi_blockcount)
-#define last_scsi_logical_block(hi) (((struct ibmmca_hostdata *) hosts[(hi)]->hostdata)->_last_scsi_logical_block)
-#define last_scsi_type(hi) (((struct ibmmca_hostdata *) hosts[(hi)]->hostdata)->_last_scsi_type)
-#define next_ldn(hi) (((struct ibmmca_hostdata *) hosts[(hi)]->hostdata)->_next_ldn)
-#define IBM_DS(hi) (((struct ibmmca_hostdata *) hosts[(hi)]->hostdata)->_IBM_DS)
-#define special(hi) (((struct ibmmca_hostdata *) hosts[(hi)]->hostdata)->_special)
-#define subsystem_connector_size(hi) (((struct ibmmca_hostdata *) hosts[(hi)]->hostdata)->_connector_size)
-#define adapter_speed(hi) (((struct ibmmca_hostdata *) hosts[(hi)]->hostdata)->_adapter_speed)
-#define pos2(hi) (((struct ibmmca_hostdata *) hosts[(hi)]->hostdata)->_pos[2])
-#define pos3(hi) (((struct ibmmca_hostdata *) hosts[(hi)]->hostdata)->_pos[3])
-#define pos4(hi) (((struct ibmmca_hostdata *) hosts[(hi)]->hostdata)->_pos[4])
-#define pos5(hi) (((struct ibmmca_hostdata *) hosts[(hi)]->hostdata)->_pos[5])
-#define pos6(hi) (((struct ibmmca_hostdata *) hosts[(hi)]->hostdata)->_pos[6])
+#define subsystem_pun(h) ((h)->this_id)
+#define subsystem_maxid(h) ((h)->max_id)
+#define ld(h) (((struct ibmmca_hostdata *) (h)->hostdata)->_ld)
+#define get_ldn(h) (((struct ibmmca_hostdata *) (h)->hostdata)->_get_ldn)
+#define get_scsi(h) (((struct ibmmca_hostdata *) (h)->hostdata)->_get_scsi)
+#define local_checking_phase_flag(h) (((struct ibmmca_hostdata *) (h)->hostdata)->_local_checking_phase_flag)
+#define got_interrupt(h) (((struct ibmmca_hostdata *) (h)->hostdata)->_got_interrupt)
+#define stat_result(h) (((struct ibmmca_hostdata *) (h)->hostdata)->_stat_result)
+#define reset_status(h) (((struct ibmmca_hostdata *) (h)->hostdata)->_reset_status)
+#define last_scsi_command(h) (((struct ibmmca_hostdata *) (h)->hostdata)->_last_scsi_command)
+#define last_scsi_type(h) (((struct ibmmca_hostdata *) (h)->hostdata)->_last_scsi_type)
+#define last_scsi_blockcount(h) (((struct ibmmca_hostdata *) (h)->hostdata)->_last_scsi_blockcount)
+#define last_scsi_logical_block(h) (((struct ibmmca_hostdata *) (h)->hostdata)->_last_scsi_logical_block)
+#define last_scsi_type(h) (((struct ibmmca_hostdata *) (h)->hostdata)->_last_scsi_type)
+#define next_ldn(h) (((struct ibmmca_hostdata *) (h)->hostdata)->_next_ldn)
+#define IBM_DS(h) (((struct ibmmca_hostdata *) (h)->hostdata)->_IBM_DS)
+#define special(h) (((struct ibmmca_hostdata *) (h)->hostdata)->_special)
+#define subsystem_connector_size(h) (((struct ibmmca_hostdata *) (h)->hostdata)->_connector_size)
+#define adapter_speed(h) (((struct ibmmca_hostdata *) (h)->hostdata)->_adapter_speed)
+#define pos2(h) (((struct ibmmca_hostdata *) (h)->hostdata)->_pos[2])
+#define pos3(h) (((struct ibmmca_hostdata *) (h)->hostdata)->_pos[3])
+#define pos4(h) (((struct ibmmca_hostdata *) (h)->hostdata)->_pos[4])
+#define pos5(h) (((struct ibmmca_hostdata *) (h)->hostdata)->_pos[5])
+#define pos6(h) (((struct ibmmca_hostdata *) (h)->hostdata)->_pos[6])
/* Define a arbitrary number as subsystem-marker-type. This number is, as
described in the ANSI-SCSI-standard, not occupied by other device-types. */
@@ -459,11 +472,6 @@ MODULE_LICENSE("GPL");
/*counter of concurrent disk read/writes, to turn on/off disk led */
static int disk_rw_in_progress = 0;
-/* host information */
-static int found = 0;
-static struct Scsi_Host *hosts[IM_MAX_HOSTS + 1] = {
- NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL
-};
static unsigned int pos[8]; /* whole pos register-line for diagnosis */
/* Taking into account the additions, made by ZP Gu.
* This selects now the preset value from the configfile and
@@ -474,70 +482,68 @@ static char ibm_ansi_order = 1;
static char ibm_ansi_order = 0;
#endif
-static void issue_cmd(int, unsigned long, unsigned char);
+static void issue_cmd(struct Scsi_Host *, unsigned long, unsigned char);
static void internal_done(Scsi_Cmnd * cmd);
-static void check_devices(int, int);
-static int immediate_assign(int, unsigned int, unsigned int, unsigned int, unsigned int);
-static int immediate_feature(int, unsigned int, unsigned int);
+static void check_devices(struct Scsi_Host *, int);
+static int immediate_assign(struct Scsi_Host *, unsigned int, unsigned int, unsigned int, unsigned int);
+static int immediate_feature(struct Scsi_Host *, unsigned int, unsigned int);
#ifdef CONFIG_IBMMCA_SCSI_DEV_RESET
-static int immediate_reset(int, unsigned int);
+static int immediate_reset(struct Scsi_Host *, unsigned int);
#endif
-static int device_inquiry(int, int);
-static int read_capacity(int, int);
-static int get_pos_info(int);
+static int device_inquiry(struct Scsi_Host *, int);
+static int read_capacity(struct Scsi_Host *, int);
+static int get_pos_info(struct Scsi_Host *);
static char *ti_p(int);
static char *ti_l(int);
static char *ibmrate(unsigned int, int);
static int probe_display(int);
-static int probe_bus_mode(int);
-static int device_exists(int, int, int *, int *);
-static struct Scsi_Host *ibmmca_register(struct scsi_host_template *, int, int, int, char *);
+static int probe_bus_mode(struct Scsi_Host *);
+static int device_exists(struct Scsi_Host *, int, int *, int *);
static int option_setup(char *);
/* local functions needed for proc_info */
-static int ldn_access_load(int, int);
-static int ldn_access_total_read_write(int);
+static int ldn_access_load(struct Scsi_Host *, int);
+static int ldn_access_total_read_write(struct Scsi_Host *);
static irqreturn_t interrupt_handler(int irq, void *dev_id)
{
- int host_index, ihost_index;
unsigned int intr_reg;
unsigned int cmd_result;
unsigned int ldn;
+ unsigned long flags;
Scsi_Cmnd *cmd;
int lastSCSI;
- struct Scsi_Host *dev = dev_id;
+ struct device *dev = dev_id;
+ struct Scsi_Host *shpnt = dev_get_drvdata(dev);
- spin_lock(dev->host_lock);
- /* search for one adapter-response on shared interrupt */
- for (host_index = 0; hosts[host_index] && !(inb(IM_STAT_REG(host_index)) & IM_INTR_REQUEST); host_index++);
- /* return if some other device on this IRQ caused the interrupt */
- if (!hosts[host_index]) {
- spin_unlock(dev->host_lock);
+ spin_lock_irqsave(shpnt->host_lock, flags);
+
+ if(!(inb(IM_STAT_REG(shpnt)) & IM_INTR_REQUEST)) {
+ spin_unlock_irqrestore(shpnt->host_lock, flags);
return IRQ_NONE;
}
/* the reset-function already did all the job, even ints got
renabled on the subsystem, so just return */
- if ((reset_status(host_index) == IM_RESET_NOT_IN_PROGRESS_NO_INT) || (reset_status(host_index) == IM_RESET_FINISHED_OK_NO_INT)) {
- reset_status(host_index) = IM_RESET_NOT_IN_PROGRESS;
- spin_unlock(dev->host_lock);
+ if ((reset_status(shpnt) == IM_RESET_NOT_IN_PROGRESS_NO_INT) || (reset_status(shpnt) == IM_RESET_FINISHED_OK_NO_INT)) {
+ reset_status(shpnt) = IM_RESET_NOT_IN_PROGRESS;
+ spin_unlock_irqrestore(shpnt->host_lock, flags);
return IRQ_HANDLED;
}
/*must wait for attention reg not busy, then send EOI to subsystem */
while (1) {
- if (!(inb(IM_STAT_REG(host_index)) & IM_BUSY))
+ if (!(inb(IM_STAT_REG(shpnt)) & IM_BUSY))
break;
cpu_relax();
}
- ihost_index = host_index;
+
/*get command result and logical device */
- intr_reg = (unsigned char) (inb(IM_INTR_REG(ihost_index)));
+ intr_reg = (unsigned char) (inb(IM_INTR_REG(shpnt)));
cmd_result = intr_reg & 0xf0;
ldn = intr_reg & 0x0f;
/* get the last_scsi_command here */
- lastSCSI = last_scsi_command(ihost_index)[ldn];
- outb(IM_EOI | ldn, IM_ATTN_REG(ihost_index));
+ lastSCSI = last_scsi_command(shpnt)[ldn];
+ outb(IM_EOI | ldn, IM_ATTN_REG(shpnt));
/*these should never happen (hw fails, or a local programming bug) */
if (!global_command_error_excuse) {
@@ -547,38 +553,38 @@ static irqreturn_t interrupt_handler(int irq, void *dev_id)
case IM_SOFTWARE_SEQUENCING_ERROR:
case IM_CMD_ERROR:
printk(KERN_ERR "IBM MCA SCSI: Fatal Subsystem ERROR!\n");
- printk(KERN_ERR " Last cmd=0x%x, ena=%x, len=", lastSCSI, ld(ihost_index)[ldn].scb.enable);
- if (ld(ihost_index)[ldn].cmd)
- printk("%ld/%ld,", (long) (ld(ihost_index)[ldn].cmd->request_bufflen), (long) (ld(ihost_index)[ldn].scb.sys_buf_length));
+ printk(KERN_ERR " Last cmd=0x%x, ena=%x, len=", lastSCSI, ld(shpnt)[ldn].scb.enable);
+ if (ld(shpnt)[ldn].cmd)
+ printk("%ld/%ld,", (long) (scsi_bufflen(ld(shpnt)[ldn].cmd)), (long) (ld(shpnt)[ldn].scb.sys_buf_length));
else
printk("none,");
- if (ld(ihost_index)[ldn].cmd)
- printk("Blocksize=%d", ld(ihost_index)[ldn].scb.u2.blk.length);
+ if (ld(shpnt)[ldn].cmd)
+ printk("Blocksize=%d", ld(shpnt)[ldn].scb.u2.blk.length);
else
printk("Blocksize=none");
- printk(", host=0x%x, ldn=0x%x\n", ihost_index, ldn);
- if (ld(ihost_index)[ldn].cmd) {
- printk(KERN_ERR "Blockcount=%d/%d\n", last_scsi_blockcount(ihost_index)[ldn], ld(ihost_index)[ldn].scb.u2.blk.count);
- printk(KERN_ERR "Logical block=%lx/%lx\n", last_scsi_logical_block(ihost_index)[ldn], ld(ihost_index)[ldn].scb.u1.log_blk_adr);
+ printk(", host=%p, ldn=0x%x\n", shpnt, ldn);
+ if (ld(shpnt)[ldn].cmd) {
+ printk(KERN_ERR "Blockcount=%d/%d\n", last_scsi_blockcount(shpnt)[ldn], ld(shpnt)[ldn].scb.u2.blk.count);
+ printk(KERN_ERR "Logical block=%lx/%lx\n", last_scsi_logical_block(shpnt)[ldn], ld(shpnt)[ldn].scb.u1.log_blk_adr);
}
printk(KERN_ERR "Reason given: %s\n", (cmd_result == IM_ADAPTER_HW_FAILURE) ? "HARDWARE FAILURE" : (cmd_result == IM_SOFTWARE_SEQUENCING_ERROR) ? "SOFTWARE SEQUENCING ERROR" : (cmd_result == IM_CMD_ERROR) ? "COMMAND ERROR" : "UNKNOWN");
/* if errors appear, enter this section to give detailed info */
printk(KERN_ERR "IBM MCA SCSI: Subsystem Error-Status follows:\n");
- printk(KERN_ERR " Command Type................: %x\n", last_scsi_type(ihost_index)[ldn]);
- printk(KERN_ERR " Attention Register..........: %x\n", inb(IM_ATTN_REG(ihost_index)));
- printk(KERN_ERR " Basic Control Register......: %x\n", inb(IM_CTR_REG(ihost_index)));
+ printk(KERN_ERR " Command Type................: %x\n", last_scsi_type(shpnt)[ldn]);
+ printk(KERN_ERR " Attention Register..........: %x\n", inb(IM_ATTN_REG(shpnt)));
+ printk(KERN_ERR " Basic Control Register......: %x\n", inb(IM_CTR_REG(shpnt)));
printk(KERN_ERR " Interrupt Status Register...: %x\n", intr_reg);
- printk(KERN_ERR " Basic Status Register.......: %x\n", inb(IM_STAT_REG(ihost_index)));
- if ((last_scsi_type(ihost_index)[ldn] == IM_SCB) || (last_scsi_type(ihost_index)[ldn] == IM_LONG_SCB)) {
- printk(KERN_ERR " SCB-Command.................: %x\n", ld(ihost_index)[ldn].scb.command);
- printk(KERN_ERR " SCB-Enable..................: %x\n", ld(ihost_index)[ldn].scb.enable);
- printk(KERN_ERR " SCB-logical block address...: %lx\n", ld(ihost_index)[ldn].scb.u1.log_blk_adr);
- printk(KERN_ERR " SCB-system buffer address...: %lx\n", ld(ihost_index)[ldn].scb.sys_buf_adr);
- printk(KERN_ERR " SCB-system buffer length....: %lx\n", ld(ihost_index)[ldn].scb.sys_buf_length);
- printk(KERN_ERR " SCB-tsb address.............: %lx\n", ld(ihost_index)[ldn].scb.tsb_adr);
- printk(KERN_ERR " SCB-Chain address...........: %lx\n", ld(ihost_index)[ldn].scb.scb_chain_adr);
- printk(KERN_ERR " SCB-block count.............: %x\n", ld(ihost_index)[ldn].scb.u2.blk.count);
- printk(KERN_ERR " SCB-block length............: %x\n", ld(ihost_index)[ldn].scb.u2.blk.length);
+ printk(KERN_ERR " Basic Status Register.......: %x\n", inb(IM_STAT_REG(shpnt)));
+ if ((last_scsi_type(shpnt)[ldn] == IM_SCB) || (last_scsi_type(shpnt)[ldn] == IM_LONG_SCB)) {
+ printk(KERN_ERR " SCB-Command.................: %x\n", ld(shpnt)[ldn].scb.command);
+ printk(KERN_ERR " SCB-Enable..................: %x\n", ld(shpnt)[ldn].scb.enable);
+ printk(KERN_ERR " SCB-logical block address...: %lx\n", ld(shpnt)[ldn].scb.u1.log_blk_adr);
+ printk(KERN_ERR " SCB-system buffer address...: %lx\n", ld(shpnt)[ldn].scb.sys_buf_adr);
+ printk(KERN_ERR " SCB-system buffer length....: %lx\n", ld(shpnt)[ldn].scb.sys_buf_length);
+ printk(KERN_ERR " SCB-tsb address.............: %lx\n", ld(shpnt)[ldn].scb.tsb_adr);
+ printk(KERN_ERR " SCB-Chain address...........: %lx\n", ld(shpnt)[ldn].scb.scb_chain_adr);
+ printk(KERN_ERR " SCB-block count.............: %x\n", ld(shpnt)[ldn].scb.u2.blk.count);
+ printk(KERN_ERR " SCB-block length............: %x\n", ld(shpnt)[ldn].scb.u2.blk.length);
}
printk(KERN_ERR " Send this report to the maintainer.\n");
panic("IBM MCA SCSI: Fatal error message from the subsystem (0x%X,0x%X)!\n", lastSCSI, cmd_result);
@@ -600,72 +606,73 @@ static irqreturn_t interrupt_handler(int irq, void *dev_id)
}
}
/* if no panic appeared, increase the interrupt-counter */
- IBM_DS(ihost_index).total_interrupts++;
+ IBM_DS(shpnt).total_interrupts++;
/*only for local checking phase */
- if (local_checking_phase_flag(ihost_index)) {
- stat_result(ihost_index) = cmd_result;
- got_interrupt(ihost_index) = 1;
- reset_status(ihost_index) = IM_RESET_FINISHED_OK;
- last_scsi_command(ihost_index)[ldn] = NO_SCSI;
- spin_unlock(dev->host_lock);
+ if (local_checking_phase_flag(shpnt)) {
+ stat_result(shpnt) = cmd_result;
+ got_interrupt(shpnt) = 1;
+ reset_status(shpnt) = IM_RESET_FINISHED_OK;
+ last_scsi_command(shpnt)[ldn] = NO_SCSI;
+ spin_unlock_irqrestore(shpnt->host_lock, flags);
return IRQ_HANDLED;
}
/* handling of commands coming from upper level of scsi driver */
- if (last_scsi_type(ihost_index)[ldn] == IM_IMM_CMD) {
+ if (last_scsi_type(shpnt)[ldn] == IM_IMM_CMD) {
/* verify ldn, and may handle rare reset immediate command */
- if ((reset_status(ihost_index) == IM_RESET_IN_PROGRESS) && (last_scsi_command(ihost_index)[ldn] == IM_RESET_IMM_CMD)) {
+ if ((reset_status(shpnt) == IM_RESET_IN_PROGRESS) && (last_scsi_command(shpnt)[ldn] == IM_RESET_IMM_CMD)) {
if (cmd_result == IM_CMD_COMPLETED_WITH_FAILURE) {
disk_rw_in_progress = 0;
PS2_DISK_LED_OFF();
- reset_status(ihost_index) = IM_RESET_FINISHED_FAIL;
+ reset_status(shpnt) = IM_RESET_FINISHED_FAIL;
} else {
/*reset disk led counter, turn off disk led */
disk_rw_in_progress = 0;
PS2_DISK_LED_OFF();
- reset_status(ihost_index) = IM_RESET_FINISHED_OK;
+ reset_status(shpnt) = IM_RESET_FINISHED_OK;
}
- stat_result(ihost_index) = cmd_result;
- last_scsi_command(ihost_index)[ldn] = NO_SCSI;
- last_scsi_type(ihost_index)[ldn] = 0;
- spin_unlock(dev->host_lock);
+ stat_result(shpnt) = cmd_result;
+ last_scsi_command(shpnt)[ldn] = NO_SCSI;
+ last_scsi_type(shpnt)[ldn] = 0;
+ spin_unlock_irqrestore(shpnt->host_lock, flags);
return IRQ_HANDLED;
- } else if (last_scsi_command(ihost_index)[ldn] == IM_ABORT_IMM_CMD) {
+ } else if (last_scsi_command(shpnt)[ldn] == IM_ABORT_IMM_CMD) {
/* react on SCSI abort command */
#ifdef IM_DEBUG_PROBE
printk("IBM MCA SCSI: Interrupt from SCSI-abort.\n");
#endif
disk_rw_in_progress = 0;
PS2_DISK_LED_OFF();
- cmd = ld(ihost_index)[ldn].cmd;
- ld(ihost_index)[ldn].cmd = NULL;
+ cmd = ld(shpnt)[ldn].cmd;
+ ld(shpnt)[ldn].cmd = NULL;
if (cmd_result == IM_CMD_COMPLETED_WITH_FAILURE)
cmd->result = DID_NO_CONNECT << 16;
else
cmd->result = DID_ABORT << 16;
- stat_result(ihost_index) = cmd_result;
- last_scsi_command(ihost_index)[ldn] = NO_SCSI;
- last_scsi_type(ihost_index)[ldn] = 0;
+ stat_result(shpnt) = cmd_result;
+ last_scsi_command(shpnt)[ldn] = NO_SCSI;
+ last_scsi_type(shpnt)[ldn] = 0;
if (cmd->scsi_done)
(cmd->scsi_done) (cmd); /* should be the internal_done */
- spin_unlock(dev->host_lock);
+ spin_unlock_irqrestore(shpnt->host_lock, flags);
return IRQ_HANDLED;
} else {
disk_rw_in_progress = 0;
PS2_DISK_LED_OFF();
- reset_status(ihost_index) = IM_RESET_FINISHED_OK;
- stat_result(ihost_index) = cmd_result;
- last_scsi_command(ihost_index)[ldn] = NO_SCSI;
- spin_unlock(dev->host_lock);
+ reset_status(shpnt) = IM_RESET_FINISHED_OK;
+ stat_result(shpnt) = cmd_result;
+ last_scsi_command(shpnt)[ldn] = NO_SCSI;
+ spin_unlock_irqrestore(shpnt->host_lock, flags);
return IRQ_HANDLED;
}
}
- last_scsi_command(ihost_index)[ldn] = NO_SCSI;
- last_scsi_type(ihost_index)[ldn] = 0;
- cmd = ld(ihost_index)[ldn].cmd;
- ld(ihost_index)[ldn].cmd = NULL;
+ last_scsi_command(shpnt)[ldn] = NO_SCSI;
+ last_scsi_type(shpnt)[ldn] = 0;
+ cmd = ld(shpnt)[ldn].cmd;
+ ld(shpnt)[ldn].cmd = NULL;
#ifdef IM_DEBUG_TIMEOUT
if (cmd) {
if ((cmd->target == TIMEOUT_PUN) && (cmd->device->lun == TIMEOUT_LUN)) {
+ spin_unlock_irqsave(shpnt->host_lock, flags);
printk("IBM MCA SCSI: Ignoring interrupt from pun=%x, lun=%x.\n", cmd->target, cmd->device->lun);
return IRQ_HANDLED;
}
@@ -674,15 +681,15 @@ static irqreturn_t interrupt_handler(int irq, void *dev_id)
/*if no command structure, just return, else clear cmd */
if (!cmd)
{
- spin_unlock(dev->host_lock);
+ spin_unlock_irqrestore(shpnt->host_lock, flags);
return IRQ_HANDLED;
}
#ifdef IM_DEBUG_INT
- printk("cmd=%02x ireg=%02x ds=%02x cs=%02x de=%02x ce=%02x\n", cmd->cmnd[0], intr_reg, ld(ihost_index)[ldn].tsb.dev_status, ld(ihost_index)[ldn].tsb.cmd_status, ld(ihost_index)[ldn].tsb.dev_error, ld(ihost_index)[ldn].tsb.cmd_error);
+ printk("cmd=%02x ireg=%02x ds=%02x cs=%02x de=%02x ce=%02x\n", cmd->cmnd[0], intr_reg, ld(shpnt)[ldn].tsb.dev_status, ld(shpnt)[ldn].tsb.cmd_status, ld(shpnt)[ldn].tsb.dev_error, ld(shpnt)[ldn].tsb.cmd_error);
#endif
/*if this is end of media read/write, may turn off PS/2 disk led */
- if ((ld(ihost_index)[ldn].device_type != TYPE_NO_LUN) && (ld(ihost_index)[ldn].device_type != TYPE_NO_DEVICE)) {
+ if ((ld(shpnt)[ldn].device_type != TYPE_NO_LUN) && (ld(shpnt)[ldn].device_type != TYPE_NO_DEVICE)) {
/* only access this, if there was a valid device addressed */
if (--disk_rw_in_progress == 0)
PS2_DISK_LED_OFF();
@@ -693,8 +700,8 @@ static irqreturn_t interrupt_handler(int irq, void *dev_id)
* adapters do not support CMD_TERMINATED, TASK_SET_FULL and
* ACA_ACTIVE as returning statusbyte information. (ML) */
if (cmd_result == IM_CMD_COMPLETED_WITH_FAILURE) {
- cmd->result = (unsigned char) (ld(ihost_index)[ldn].tsb.dev_status & 0x1e);
- IBM_DS(ihost_index).total_errors++;
+ cmd->result = (unsigned char) (ld(shpnt)[ldn].tsb.dev_status & 0x1e);
+ IBM_DS(shpnt).total_errors++;
} else
cmd->result = 0;
/* write device status into cmd->result, and call done function */
@@ -705,24 +712,25 @@ static irqreturn_t interrupt_handler(int irq, void *dev_id)
cmd->result |= DID_OK << 16;
if (cmd->scsi_done)
(cmd->scsi_done) (cmd);
- spin_unlock(dev->host_lock);
+ spin_unlock_irqrestore(shpnt->host_lock, flags);
return IRQ_HANDLED;
}
-static void issue_cmd(int host_index, unsigned long cmd_reg, unsigned char attn_reg)
+static void issue_cmd(struct Scsi_Host *shpnt, unsigned long cmd_reg,
+ unsigned char attn_reg)
{
unsigned long flags;
/* must wait for attention reg not busy */
while (1) {
- spin_lock_irqsave(hosts[host_index]->host_lock, flags);
- if (!(inb(IM_STAT_REG(host_index)) & IM_BUSY))
+ spin_lock_irqsave(shpnt->host_lock, flags);
+ if (!(inb(IM_STAT_REG(shpnt)) & IM_BUSY))
break;
- spin_unlock_irqrestore(hosts[host_index]->host_lock, flags);
+ spin_unlock_irqrestore(shpnt->host_lock, flags);
}
/* write registers and enable system interrupts */
- outl(cmd_reg, IM_CMD_REG(host_index));
- outb(attn_reg, IM_ATTN_REG(host_index));
- spin_unlock_irqrestore(hosts[host_index]->host_lock, flags);
+ outl(cmd_reg, IM_CMD_REG(shpnt));
+ outb(attn_reg, IM_ATTN_REG(shpnt));
+ spin_unlock_irqrestore(shpnt->host_lock, flags);
}
static void internal_done(Scsi_Cmnd * cmd)
@@ -732,34 +740,34 @@ static void internal_done(Scsi_Cmnd * cmd)
}
/* SCSI-SCB-command for device_inquiry */
-static int device_inquiry(int host_index, int ldn)
+static int device_inquiry(struct Scsi_Host *shpnt, int ldn)
{
int retr;
struct im_scb *scb;
struct im_tsb *tsb;
unsigned char *buf;
- scb = &(ld(host_index)[ldn].scb);
- tsb = &(ld(host_index)[ldn].tsb);
- buf = (unsigned char *) (&(ld(host_index)[ldn].buf));
- ld(host_index)[ldn].tsb.dev_status = 0; /* prepare statusblock */
+ scb = &(ld(shpnt)[ldn].scb);
+ tsb = &(ld(shpnt)[ldn].tsb);
+ buf = (unsigned char *) (&(ld(shpnt)[ldn].buf));
+ ld(shpnt)[ldn].tsb.dev_status = 0; /* prepare statusblock */
for (retr = 0; retr < 3; retr++) {
/* fill scb with inquiry command */
scb->command = IM_DEVICE_INQUIRY_CMD | IM_NO_DISCONNECT;
scb->enable = IM_REPORT_TSB_ONLY_ON_ERROR | IM_READ_CONTROL | IM_SUPRESS_EXCEPTION_SHORT | IM_RETRY_ENABLE | IM_BYPASS_BUFFER;
- last_scsi_command(host_index)[ldn] = IM_DEVICE_INQUIRY_CMD;
- last_scsi_type(host_index)[ldn] = IM_SCB;
+ last_scsi_command(shpnt)[ldn] = IM_DEVICE_INQUIRY_CMD;
+ last_scsi_type(shpnt)[ldn] = IM_SCB;
scb->sys_buf_adr = isa_virt_to_bus(buf);
scb->sys_buf_length = 255; /* maximum bufferlength gives max info */
scb->tsb_adr = isa_virt_to_bus(tsb);
/* issue scb to passed ldn, and busy wait for interrupt */
- got_interrupt(host_index) = 0;
- issue_cmd(host_index, isa_virt_to_bus(scb), IM_SCB | ldn);
- while (!got_interrupt(host_index))
+ got_interrupt(shpnt) = 0;
+ issue_cmd(shpnt, isa_virt_to_bus(scb), IM_SCB | ldn);
+ while (!got_interrupt(shpnt))
barrier();
/*if command successful, break */
- if ((stat_result(host_index) == IM_SCB_CMD_COMPLETED) || (stat_result(host_index) == IM_SCB_CMD_COMPLETED_WITH_RETRIES))
+ if ((stat_result(shpnt) == IM_SCB_CMD_COMPLETED) || (stat_result(shpnt) == IM_SCB_CMD_COMPLETED_WITH_RETRIES))
return 1;
}
/*if all three retries failed, return "no device at this ldn" */
@@ -769,34 +777,34 @@ static int device_inquiry(int host_index, int ldn)
return 1;
}
-static int read_capacity(int host_index, int ldn)
+static int read_capacity(struct Scsi_Host *shpnt, int ldn)
{
int retr;
struct im_scb *scb;
struct im_tsb *tsb;
unsigned char *buf;
- scb = &(ld(host_index)[ldn].scb);
- tsb = &(ld(host_index)[ldn].tsb);
- buf = (unsigned char *) (&(ld(host_index)[ldn].buf));
- ld(host_index)[ldn].tsb.dev_status = 0;
+ scb = &(ld(shpnt)[ldn].scb);
+ tsb = &(ld(shpnt)[ldn].tsb);
+ buf = (unsigned char *) (&(ld(shpnt)[ldn].buf));
+ ld(shpnt)[ldn].tsb.dev_status = 0;
for (retr = 0; retr < 3; retr++) {
/*fill scb with read capacity command */
scb->command = IM_READ_CAPACITY_CMD;
scb->enable = IM_REPORT_TSB_ONLY_ON_ERROR | IM_READ_CONTROL | IM_RETRY_ENABLE | IM_BYPASS_BUFFER;
- last_scsi_command(host_index)[ldn] = IM_READ_CAPACITY_CMD;
- last_scsi_type(host_index)[ldn] = IM_SCB;
+ last_scsi_command(shpnt)[ldn] = IM_READ_CAPACITY_CMD;
+ last_scsi_type(shpnt)[ldn] = IM_SCB;
scb->sys_buf_adr = isa_virt_to_bus(buf);
scb->sys_buf_length = 8;
scb->tsb_adr = isa_virt_to_bus(tsb);
/*issue scb to passed ldn, and busy wait for interrupt */
- got_interrupt(host_index) = 0;
- issue_cmd(host_index, isa_virt_to_bus(scb), IM_SCB | ldn);
- while (!got_interrupt(host_index))
+ got_interrupt(shpnt) = 0;
+ issue_cmd(shpnt, isa_virt_to_bus(scb), IM_SCB | ldn);
+ while (!got_interrupt(shpnt))
barrier();
/*if got capacity, get block length and return one device found */
- if ((stat_result(host_index) == IM_SCB_CMD_COMPLETED) || (stat_result(host_index) == IM_SCB_CMD_COMPLETED_WITH_RETRIES))
+ if ((stat_result(shpnt) == IM_SCB_CMD_COMPLETED) || (stat_result(shpnt) == IM_SCB_CMD_COMPLETED_WITH_RETRIES))
return 1;
}
/*if all three retries failed, return "no device at this ldn" */
@@ -806,39 +814,39 @@ static int read_capacity(int host_index, int ldn)
return 1;
}
-static int get_pos_info(int host_index)
+static int get_pos_info(struct Scsi_Host *shpnt)
{
int retr;
struct im_scb *scb;
struct im_tsb *tsb;
unsigned char *buf;
- scb = &(ld(host_index)[MAX_LOG_DEV].scb);
- tsb = &(ld(host_index)[MAX_LOG_DEV].tsb);
- buf = (unsigned char *) (&(ld(host_index)[MAX_LOG_DEV].buf));
- ld(host_index)[MAX_LOG_DEV].tsb.dev_status = 0;
+ scb = &(ld(shpnt)[MAX_LOG_DEV].scb);
+ tsb = &(ld(shpnt)[MAX_LOG_DEV].tsb);
+ buf = (unsigned char *) (&(ld(shpnt)[MAX_LOG_DEV].buf));
+ ld(shpnt)[MAX_LOG_DEV].tsb.dev_status = 0;
for (retr = 0; retr < 3; retr++) {
/*fill scb with get_pos_info command */
scb->command = IM_GET_POS_INFO_CMD;
scb->enable = IM_READ_CONTROL | IM_REPORT_TSB_ONLY_ON_ERROR | IM_RETRY_ENABLE | IM_BYPASS_BUFFER;
- last_scsi_command(host_index)[MAX_LOG_DEV] = IM_GET_POS_INFO_CMD;
- last_scsi_type(host_index)[MAX_LOG_DEV] = IM_SCB;
+ last_scsi_command(shpnt)[MAX_LOG_DEV] = IM_GET_POS_INFO_CMD;
+ last_scsi_type(shpnt)[MAX_LOG_DEV] = IM_SCB;
scb->sys_buf_adr = isa_virt_to_bus(buf);
- if (special(host_index) == IBM_SCSI2_FW)
+ if (special(shpnt) == IBM_SCSI2_FW)
scb->sys_buf_length = 256; /* get all info from F/W adapter */
else
scb->sys_buf_length = 18; /* get exactly 18 bytes for other SCSI */
scb->tsb_adr = isa_virt_to_bus(tsb);
/*issue scb to ldn=15, and busy wait for interrupt */
- got_interrupt(host_index) = 0;
- issue_cmd(host_index, isa_virt_to_bus(scb), IM_SCB | MAX_LOG_DEV);
+ got_interrupt(shpnt) = 0;
+ issue_cmd(shpnt, isa_virt_to_bus(scb), IM_SCB | MAX_LOG_DEV);
/* FIXME: timeout */
- while (!got_interrupt(host_index))
+ while (!got_interrupt(shpnt))
barrier();
/*if got POS-stuff, get block length and return one device found */
- if ((stat_result(host_index) == IM_SCB_CMD_COMPLETED) || (stat_result(host_index) == IM_SCB_CMD_COMPLETED_WITH_RETRIES))
+ if ((stat_result(shpnt) == IM_SCB_CMD_COMPLETED) || (stat_result(shpnt) == IM_SCB_CMD_COMPLETED_WITH_RETRIES))
return 1;
}
/* if all three retries failed, return "no device at this ldn" */
@@ -851,14 +859,16 @@ static int get_pos_info(int host_index)
/* SCSI-immediate-command for assign. This functions maps/unmaps specific
ldn-numbers on SCSI (PUN,LUN). It is needed for presetting of the
subsystem and for dynamical remapping od ldns. */
-static int immediate_assign(int host_index, unsigned int pun, unsigned int lun, unsigned int ldn, unsigned int operation)
+static int immediate_assign(struct Scsi_Host *shpnt, unsigned int pun,
+ unsigned int lun, unsigned int ldn,
+ unsigned int operation)
{
int retr;
unsigned long imm_cmd;
for (retr = 0; retr < 3; retr++) {
/* select mutation level of the SCSI-adapter */
- switch (special(host_index)) {
+ switch (special(shpnt)) {
case IBM_SCSI2_FW:
imm_cmd = (unsigned long) (IM_ASSIGN_IMM_CMD);
imm_cmd |= (unsigned long) ((lun & 7) << 24);
@@ -867,7 +877,7 @@ static int immediate_assign(int host_index, unsigned int pun, unsigned int lun,
imm_cmd |= (unsigned long) ((ldn & 15) << 16);
break;
default:
- imm_cmd = inl(IM_CMD_REG(host_index));
+ imm_cmd = inl(IM_CMD_REG(shpnt));
imm_cmd &= (unsigned long) (0xF8000000); /* keep reserved bits */
imm_cmd |= (unsigned long) (IM_ASSIGN_IMM_CMD);
imm_cmd |= (unsigned long) ((lun & 7) << 24);
@@ -876,15 +886,15 @@ static int immediate_assign(int host_index, unsigned int pun, unsigned int lun,
imm_cmd |= (unsigned long) ((ldn & 15) << 16);
break;
}
- last_scsi_command(host_index)[MAX_LOG_DEV] = IM_ASSIGN_IMM_CMD;
- last_scsi_type(host_index)[MAX_LOG_DEV] = IM_IMM_CMD;
- got_interrupt(host_index) = 0;
- issue_cmd(host_index, (unsigned long) (imm_cmd), IM_IMM_CMD | MAX_LOG_DEV);
- while (!got_interrupt(host_index))
+ last_scsi_command(shpnt)[MAX_LOG_DEV] = IM_ASSIGN_IMM_CMD;
+ last_scsi_type(shpnt)[MAX_LOG_DEV] = IM_IMM_CMD;
+ got_interrupt(shpnt) = 0;
+ issue_cmd(shpnt, (unsigned long) (imm_cmd), IM_IMM_CMD | MAX_LOG_DEV);
+ while (!got_interrupt(shpnt))
barrier();
/*if command successful, break */
- if (stat_result(host_index) == IM_IMMEDIATE_CMD_COMPLETED)
+ if (stat_result(shpnt) == IM_IMMEDIATE_CMD_COMPLETED)
return 1;
}
if (retr >= 3)
@@ -893,7 +903,7 @@ static int immediate_assign(int host_index, unsigned int pun, unsigned int lun,
return 1;
}
-static int immediate_feature(int host_index, unsigned int speed, unsigned int timeout)
+static int immediate_feature(struct Scsi_Host *shpnt, unsigned int speed, unsigned int timeout)
{
int retr;
unsigned long imm_cmd;
@@ -903,16 +913,16 @@ static int immediate_feature(int host_index, unsigned int speed, unsigned int ti
imm_cmd = IM_FEATURE_CTR_IMM_CMD;
imm_cmd |= (unsigned long) ((speed & 0x7) << 29);
imm_cmd |= (unsigned long) ((timeout & 0x1fff) << 16);
- last_scsi_command(host_index)[MAX_LOG_DEV] = IM_FEATURE_CTR_IMM_CMD;
- last_scsi_type(host_index)[MAX_LOG_DEV] = IM_IMM_CMD;
- got_interrupt(host_index) = 0;
+ last_scsi_command(shpnt)[MAX_LOG_DEV] = IM_FEATURE_CTR_IMM_CMD;
+ last_scsi_type(shpnt)[MAX_LOG_DEV] = IM_IMM_CMD;
+ got_interrupt(shpnt) = 0;
/* we need to run into command errors in order to probe for the
* right speed! */
global_command_error_excuse = 1;
- issue_cmd(host_index, (unsigned long) (imm_cmd), IM_IMM_CMD | MAX_LOG_DEV);
+ issue_cmd(shpnt, (unsigned long) (imm_cmd), IM_IMM_CMD | MAX_LOG_DEV);
/* FIXME: timeout */
- while (!got_interrupt(host_index))
+ while (!got_interrupt(shpnt))
barrier();
if (global_command_error_excuse == CMD_FAIL) {
global_command_error_excuse = 0;
@@ -920,7 +930,7 @@ static int immediate_feature(int host_index, unsigned int speed, unsigned int ti
} else
global_command_error_excuse = 0;
/*if command successful, break */
- if (stat_result(host_index) == IM_IMMEDIATE_CMD_COMPLETED)
+ if (stat_result(shpnt) == IM_IMMEDIATE_CMD_COMPLETED)
return 1;
}
if (retr >= 3)
@@ -930,35 +940,35 @@ static int immediate_feature(int host_index, unsigned int speed, unsigned int ti
}
#ifdef CONFIG_IBMMCA_SCSI_DEV_RESET
-static int immediate_reset(int host_index, unsigned int ldn)
+static int immediate_reset(struct Scsi_Host *shpnt, unsigned int ldn)
{
int retries;
int ticks;
unsigned long imm_command;
for (retries = 0; retries < 3; retries++) {
- imm_command = inl(IM_CMD_REG(host_index));
+ imm_command = inl(IM_CMD_REG(shpnt));
imm_command &= (unsigned long) (0xFFFF0000); /* keep reserved bits */
imm_command |= (unsigned long) (IM_RESET_IMM_CMD);
- last_scsi_command(host_index)[ldn] = IM_RESET_IMM_CMD;
- last_scsi_type(host_index)[ldn] = IM_IMM_CMD;
- got_interrupt(host_index) = 0;
- reset_status(host_index) = IM_RESET_IN_PROGRESS;
- issue_cmd(host_index, (unsigned long) (imm_command), IM_IMM_CMD | ldn);
+ last_scsi_command(shpnt)[ldn] = IM_RESET_IMM_CMD;
+ last_scsi_type(shpnt)[ldn] = IM_IMM_CMD;
+ got_interrupt(shpnt) = 0;
+ reset_status(shpnt) = IM_RESET_IN_PROGRESS;
+ issue_cmd(shpnt, (unsigned long) (imm_command), IM_IMM_CMD | ldn);
ticks = IM_RESET_DELAY * HZ;
- while (reset_status(host_index) == IM_RESET_IN_PROGRESS && --ticks) {
+ while (reset_status(shpnt) == IM_RESET_IN_PROGRESS && --ticks) {
udelay((1 + 999 / HZ) * 1000);
barrier();
}
/* if reset did not complete, just complain */
if (!ticks) {
printk(KERN_ERR "IBM MCA SCSI: reset did not complete within %d seconds.\n", IM_RESET_DELAY);
- reset_status(host_index) = IM_RESET_FINISHED_OK;
+ reset_status(shpnt) = IM_RESET_FINISHED_OK;
/* did not work, finish */
return 1;
}
/*if command successful, break */
- if (stat_result(host_index) == IM_IMMEDIATE_CMD_COMPLETED)
+ if (stat_result(shpnt) == IM_IMMEDIATE_CMD_COMPLETED)
return 1;
}
if (retries >= 3)
@@ -1060,35 +1070,35 @@ static int probe_display(int what)
return 0;
}
-static int probe_bus_mode(int host_index)
+static int probe_bus_mode(struct Scsi_Host *shpnt)
{
struct im_pos_info *info;
int num_bus = 0;
int ldn;
- info = (struct im_pos_info *) (&(ld(host_index)[MAX_LOG_DEV].buf));
- if (get_pos_info(host_index)) {
+ info = (struct im_pos_info *) (&(ld(shpnt)[MAX_LOG_DEV].buf));
+ if (get_pos_info(shpnt)) {
if (info->connector_size & 0xf000)
- subsystem_connector_size(host_index) = 16;
+ subsystem_connector_size(shpnt) = 16;
else
- subsystem_connector_size(host_index) = 32;
+ subsystem_connector_size(shpnt) = 32;
num_bus |= (info->pos_4b & 8) >> 3;
for (ldn = 0; ldn <= MAX_LOG_DEV; ldn++) {
- if ((special(host_index) == IBM_SCSI_WCACHE) || (special(host_index) == IBM_7568_WCACHE)) {
+ if ((special(shpnt) == IBM_SCSI_WCACHE) || (special(shpnt) == IBM_7568_WCACHE)) {
if (!((info->cache_stat >> ldn) & 1))
- ld(host_index)[ldn].cache_flag = 0;
+ ld(shpnt)[ldn].cache_flag = 0;
}
if (!((info->retry_stat >> ldn) & 1))
- ld(host_index)[ldn].retry_flag = 0;
+ ld(shpnt)[ldn].retry_flag = 0;
}
#ifdef IM_DEBUG_PROBE
printk("IBM MCA SCSI: SCSI-Cache bits: ");
for (ldn = 0; ldn <= MAX_LOG_DEV; ldn++) {
- printk("%d", ld(host_index)[ldn].cache_flag);
+ printk("%d", ld(shpnt)[ldn].cache_flag);
}
printk("\nIBM MCA SCSI: SCSI-Retry bits: ");
for (ldn = 0; ldn <= MAX_LOG_DEV; ldn++) {
- printk("%d", ld(host_index)[ldn].retry_flag);
+ printk("%d", ld(shpnt)[ldn].retry_flag);
}
printk("\n");
#endif
@@ -1097,7 +1107,7 @@ static int probe_bus_mode(int host_index)
}
/* probing scsi devices */
-static void check_devices(int host_index, int adaptertype)
+static void check_devices(struct Scsi_Host *shpnt, int adaptertype)
{
int id, lun, ldn, ticks;
int count_devices; /* local counter for connected device */
@@ -1108,24 +1118,24 @@ static void check_devices(int host_index, int adaptertype)
/* assign default values to certain variables */
ticks = 0;
count_devices = 0;
- IBM_DS(host_index).dyn_flag = 0; /* normally no need for dynamical ldn management */
- IBM_DS(host_index).total_errors = 0; /* set errorcounter to 0 */
- next_ldn(host_index) = 7; /* next ldn to be assigned is 7, because 0-6 is 'hardwired' */
+ IBM_DS(shpnt).dyn_flag = 0; /* normally no need for dynamical ldn management */
+ IBM_DS(shpnt).total_errors = 0; /* set errorcounter to 0 */
+ next_ldn(shpnt) = 7; /* next ldn to be assigned is 7, because 0-6 is 'hardwired' */
/* initialize the very important driver-informational arrays/structs */
- memset(ld(host_index), 0, sizeof(ld(host_index)));
+ memset(ld(shpnt), 0, sizeof(ld(shpnt)));
for (ldn = 0; ldn <= MAX_LOG_DEV; ldn++) {
- last_scsi_command(host_index)[ldn] = NO_SCSI; /* emptify last SCSI-command storage */
- last_scsi_type(host_index)[ldn] = 0;
- ld(host_index)[ldn].cache_flag = 1;
- ld(host_index)[ldn].retry_flag = 1;
+ last_scsi_command(shpnt)[ldn] = NO_SCSI; /* emptify last SCSI-command storage */
+ last_scsi_type(shpnt)[ldn] = 0;
+ ld(shpnt)[ldn].cache_flag = 1;
+ ld(shpnt)[ldn].retry_flag = 1;
}
- memset(get_ldn(host_index), TYPE_NO_DEVICE, sizeof(get_ldn(host_index))); /* this is essential ! */
- memset(get_scsi(host_index), TYPE_NO_DEVICE, sizeof(get_scsi(host_index))); /* this is essential ! */
+ memset(get_ldn(shpnt), TYPE_NO_DEVICE, sizeof(get_ldn(shpnt))); /* this is essential ! */
+ memset(get_scsi(shpnt), TYPE_NO_DEVICE, sizeof(get_scsi(shpnt))); /* this is essential ! */
for (lun = 0; lun < 8; lun++) {
/* mark the adapter at its pun on all luns */
- get_scsi(host_index)[subsystem_pun(host_index)][lun] = TYPE_IBM_SCSI_ADAPTER;
- get_ldn(host_index)[subsystem_pun(host_index)][lun] = MAX_LOG_DEV; /* make sure, the subsystem
+ get_scsi(shpnt)[subsystem_pun(shpnt)][lun] = TYPE_IBM_SCSI_ADAPTER;
+ get_ldn(shpnt)[subsystem_pun(shpnt)][lun] = MAX_LOG_DEV; /* make sure, the subsystem
ldn is active for all
luns. */
}
@@ -1134,9 +1144,9 @@ static void check_devices(int host_index, int adaptertype)
/* monitor connected on model XX95. */
/* STEP 1: */
- adapter_speed(host_index) = global_adapter_speed;
- speedrun = adapter_speed(host_index);
- while (immediate_feature(host_index, speedrun, adapter_timeout) == 2) {
+ adapter_speed(shpnt) = global_adapter_speed;
+ speedrun = adapter_speed(shpnt);
+ while (immediate_feature(shpnt, speedrun, adapter_timeout) == 2) {
probe_display(1);
if (speedrun == 7)
panic("IBM MCA SCSI: Cannot set Synchronous-Transfer-Rate!\n");
@@ -1144,30 +1154,30 @@ static void check_devices(int host_index, int adaptertype)
if (speedrun > 7)
speedrun = 7;
}
- adapter_speed(host_index) = speedrun;
+ adapter_speed(shpnt) = speedrun;
/* Get detailed information about the current adapter, necessary for
* device operations: */
- num_bus = probe_bus_mode(host_index);
+ num_bus = probe_bus_mode(shpnt);
/* num_bus contains only valid data for the F/W adapter! */
if (adaptertype == IBM_SCSI2_FW) { /* F/W SCSI adapter: */
/* F/W adapter PUN-space extension evaluation: */
if (num_bus) {
printk(KERN_INFO "IBM MCA SCSI: Separate bus mode (wide-addressing enabled)\n");
- subsystem_maxid(host_index) = 16;
+ subsystem_maxid(shpnt) = 16;
} else {
printk(KERN_INFO "IBM MCA SCSI: Combined bus mode (wide-addressing disabled)\n");
- subsystem_maxid(host_index) = 8;
+ subsystem_maxid(shpnt) = 8;
}
printk(KERN_INFO "IBM MCA SCSI: Sync.-Rate (F/W: 20, Int.: 10, Ext.: %s) MBytes/s\n", ibmrate(speedrun, adaptertype));
} else /* all other IBM SCSI adapters: */
printk(KERN_INFO "IBM MCA SCSI: Synchronous-SCSI-Transfer-Rate: %s MBytes/s\n", ibmrate(speedrun, adaptertype));
/* assign correct PUN device space */
- max_pun = subsystem_maxid(host_index);
+ max_pun = subsystem_maxid(shpnt);
#ifdef IM_DEBUG_PROBE
- printk("IBM MCA SCSI: Current SCSI-host index: %d\n", host_index);
+ printk("IBM MCA SCSI: Current SCSI-host index: %d\n", shpnt);
printk("IBM MCA SCSI: Removing default logical SCSI-device mapping.");
#else
printk(KERN_INFO "IBM MCA SCSI: Dev. Order: %s, Mapping (takes <2min): ", (ibm_ansi_order) ? "ANSI" : "New");
@@ -1177,7 +1187,7 @@ static void check_devices(int host_index, int adaptertype)
#ifdef IM_DEBUG_PROBE
printk(".");
#endif
- immediate_assign(host_index, 0, 0, ldn, REMOVE_LDN); /* remove ldn (wherever) */
+ immediate_assign(shpnt, 0, 0, ldn, REMOVE_LDN); /* remove ldn (wherever) */
}
lun = 0; /* default lun is 0 */
#ifndef IM_DEBUG_PROBE
@@ -1196,18 +1206,18 @@ static void check_devices(int host_index, int adaptertype)
#ifdef IM_DEBUG_PROBE
printk(".");
#endif
- if (id != subsystem_pun(host_index)) {
+ if (id != subsystem_pun(shpnt)) {
/* if pun is not the adapter: */
/* set ldn=0 to pun,lun */
- immediate_assign(host_index, id, lun, PROBE_LDN, SET_LDN);
- if (device_inquiry(host_index, PROBE_LDN)) { /* probe device */
- get_scsi(host_index)[id][lun] = (unsigned char) (ld(host_index)[PROBE_LDN].buf[0]);
+ immediate_assign(shpnt, id, lun, PROBE_LDN, SET_LDN);
+ if (device_inquiry(shpnt, PROBE_LDN)) { /* probe device */
+ get_scsi(shpnt)[id][lun] = (unsigned char) (ld(shpnt)[PROBE_LDN].buf[0]);
/* entry, even for NO_LUN */
- if (ld(host_index)[PROBE_LDN].buf[0] != TYPE_NO_LUN)
+ if (ld(shpnt)[PROBE_LDN].buf[0] != TYPE_NO_LUN)
count_devices++; /* a existing device is found */
}
/* remove ldn */
- immediate_assign(host_index, id, lun, PROBE_LDN, REMOVE_LDN);
+ immediate_assign(shpnt, id, lun, PROBE_LDN, REMOVE_LDN);
}
}
#ifndef IM_DEBUG_PROBE
@@ -1227,16 +1237,16 @@ static void check_devices(int host_index, int adaptertype)
#ifdef IM_DEBUG_PROBE
printk(".");
#endif
- if (id != subsystem_pun(host_index)) {
- if (get_scsi(host_index)[id][lun] != TYPE_NO_LUN && get_scsi(host_index)[id][lun] != TYPE_NO_DEVICE) {
+ if (id != subsystem_pun(shpnt)) {
+ if (get_scsi(shpnt)[id][lun] != TYPE_NO_LUN && get_scsi(shpnt)[id][lun] != TYPE_NO_DEVICE) {
/* Only map if accepted type. Always enter for
lun == 0 to get no gaps into ldn-mapping for ldn<7. */
- immediate_assign(host_index, id, lun, ldn, SET_LDN);
- get_ldn(host_index)[id][lun] = ldn; /* map ldn */
- if (device_exists(host_index, ldn, &ld(host_index)[ldn].block_length, &ld(host_index)[ldn].device_type)) {
+ immediate_assign(shpnt, id, lun, ldn, SET_LDN);
+ get_ldn(shpnt)[id][lun] = ldn; /* map ldn */
+ if (device_exists(shpnt, ldn, &ld(shpnt)[ldn].block_length, &ld(shpnt)[ldn].device_type)) {
#ifdef CONFIG_IBMMCA_SCSI_DEV_RESET
printk("resetting device at ldn=%x ... ", ldn);
- immediate_reset(host_index, ldn);
+ immediate_reset(shpnt, ldn);
#endif
ldn++;
} else {
@@ -1244,15 +1254,15 @@ static void check_devices(int host_index, int adaptertype)
* handle it or because it has problems */
if (lun > 0) {
/* remove mapping */
- get_ldn(host_index)[id][lun] = TYPE_NO_DEVICE;
- immediate_assign(host_index, 0, 0, ldn, REMOVE_LDN);
+ get_ldn(shpnt)[id][lun] = TYPE_NO_DEVICE;
+ immediate_assign(shpnt, 0, 0, ldn, REMOVE_LDN);
} else
ldn++;
}
} else if (lun == 0) {
/* map lun == 0, even if no device exists */
- immediate_assign(host_index, id, lun, ldn, SET_LDN);
- get_ldn(host_index)[id][lun] = ldn; /* map ldn */
+ immediate_assign(shpnt, id, lun, ldn, SET_LDN);
+ get_ldn(shpnt)[id][lun] = ldn; /* map ldn */
ldn++;
}
}
@@ -1262,14 +1272,14 @@ static void check_devices(int host_index, int adaptertype)
/* map remaining ldns to non-existing devices */
for (lun = 1; lun < 8 && ldn < MAX_LOG_DEV; lun++)
for (id = 0; id < max_pun && ldn < MAX_LOG_DEV; id++) {
- if (get_scsi(host_index)[id][lun] == TYPE_NO_LUN || get_scsi(host_index)[id][lun] == TYPE_NO_DEVICE) {
+ if (get_scsi(shpnt)[id][lun] == TYPE_NO_LUN || get_scsi(shpnt)[id][lun] == TYPE_NO_DEVICE) {
probe_display(1);
/* Map remaining ldns only to NON-existing pun,lun
combinations to make sure an inquiry will fail.
For MULTI_LUN, it is needed to avoid adapter autonome
SCSI-remapping. */
- immediate_assign(host_index, id, lun, ldn, SET_LDN);
- get_ldn(host_index)[id][lun] = ldn;
+ immediate_assign(shpnt, id, lun, ldn, SET_LDN);
+ get_ldn(shpnt)[id][lun] = ldn;
ldn++;
}
}
@@ -1292,51 +1302,51 @@ static void check_devices(int host_index, int adaptertype)
for (id = 0; id < max_pun; id++) {
printk("%2d ", id);
for (lun = 0; lun < 8; lun++)
- printk("%2s ", ti_p(get_scsi(host_index)[id][lun]));
+ printk("%2s ", ti_p(get_scsi(shpnt)[id][lun]));
printk(" %2d ", id);
for (lun = 0; lun < 8; lun++)
- printk("%2s ", ti_l(get_ldn(host_index)[id][lun]));
+ printk("%2s ", ti_l(get_ldn(shpnt)[id][lun]));
printk("\n");
}
#endif
/* assign total number of found SCSI-devices to the statistics struct */
- IBM_DS(host_index).total_scsi_devices = count_devices;
+ IBM_DS(shpnt).total_scsi_devices = count_devices;
/* decide for output in /proc-filesystem, if the configuration of
SCSI-devices makes dynamical reassignment of devices necessary */
if (count_devices >= MAX_LOG_DEV)
- IBM_DS(host_index).dyn_flag = 1; /* dynamical assignment is necessary */
+ IBM_DS(shpnt).dyn_flag = 1; /* dynamical assignment is necessary */
else
- IBM_DS(host_index).dyn_flag = 0; /* dynamical assignment is not necessary */
+ IBM_DS(shpnt).dyn_flag = 0; /* dynamical assignment is not necessary */
/* If no SCSI-devices are assigned, return 1 in order to cause message. */
if (ldn == 0)
printk("IBM MCA SCSI: Warning: No SCSI-devices found/assigned!\n");
/* reset the counters for statistics on the current adapter */
- IBM_DS(host_index).scbs = 0;
- IBM_DS(host_index).long_scbs = 0;
- IBM_DS(host_index).total_accesses = 0;
- IBM_DS(host_index).total_interrupts = 0;
- IBM_DS(host_index).dynamical_assignments = 0;
- memset(IBM_DS(host_index).ldn_access, 0x0, sizeof(IBM_DS(host_index).ldn_access));
- memset(IBM_DS(host_index).ldn_read_access, 0x0, sizeof(IBM_DS(host_index).ldn_read_access));
- memset(IBM_DS(host_index).ldn_write_access, 0x0, sizeof(IBM_DS(host_index).ldn_write_access));
- memset(IBM_DS(host_index).ldn_inquiry_access, 0x0, sizeof(IBM_DS(host_index).ldn_inquiry_access));
- memset(IBM_DS(host_index).ldn_modeselect_access, 0x0, sizeof(IBM_DS(host_index).ldn_modeselect_access));
- memset(IBM_DS(host_index).ldn_assignments, 0x0, sizeof(IBM_DS(host_index).ldn_assignments));
+ IBM_DS(shpnt).scbs = 0;
+ IBM_DS(shpnt).long_scbs = 0;
+ IBM_DS(shpnt).total_accesses = 0;
+ IBM_DS(shpnt).total_interrupts = 0;
+ IBM_DS(shpnt).dynamical_assignments = 0;
+ memset(IBM_DS(shpnt).ldn_access, 0x0, sizeof(IBM_DS(shpnt).ldn_access));
+ memset(IBM_DS(shpnt).ldn_read_access, 0x0, sizeof(IBM_DS(shpnt).ldn_read_access));
+ memset(IBM_DS(shpnt).ldn_write_access, 0x0, sizeof(IBM_DS(shpnt).ldn_write_access));
+ memset(IBM_DS(shpnt).ldn_inquiry_access, 0x0, sizeof(IBM_DS(shpnt).ldn_inquiry_access));
+ memset(IBM_DS(shpnt).ldn_modeselect_access, 0x0, sizeof(IBM_DS(shpnt).ldn_modeselect_access));
+ memset(IBM_DS(shpnt).ldn_assignments, 0x0, sizeof(IBM_DS(shpnt).ldn_assignments));
probe_display(0);
return;
}
-static int device_exists(int host_index, int ldn, int *block_length, int *device_type)
+static int device_exists(struct Scsi_Host *shpnt, int ldn, int *block_length, int *device_type)
{
unsigned char *buf;
/* if no valid device found, return immediately with 0 */
- if (!(device_inquiry(host_index, ldn)))
+ if (!(device_inquiry(shpnt, ldn)))
return 0;
- buf = (unsigned char *) (&(ld(host_index)[ldn].buf));
+ buf = (unsigned char *) (&(ld(shpnt)[ldn].buf));
if (*buf == TYPE_ROM) {
*device_type = TYPE_ROM;
*block_length = 2048; /* (standard blocksize for yellow-/red-book) */
@@ -1349,7 +1359,7 @@ static int device_exists(int host_index, int ldn, int *block_length, int *device
}
if (*buf == TYPE_DISK) {
*device_type = TYPE_DISK;
- if (read_capacity(host_index, ldn)) {
+ if (read_capacity(shpnt, ldn)) {
*block_length = *(buf + 7) + (*(buf + 6) << 8) + (*(buf + 5) << 16) + (*(buf + 4) << 24);
return 1;
} else
@@ -1357,7 +1367,7 @@ static int device_exists(int host_index, int ldn, int *block_length, int *device
}
if (*buf == TYPE_MOD) {
*device_type = TYPE_MOD;
- if (read_capacity(host_index, ldn)) {
+ if (read_capacity(shpnt, ldn)) {
*block_length = *(buf + 7) + (*(buf + 6) << 8) + (*(buf + 5) << 16) + (*(buf + 4) << 24);
return 1;
} else
@@ -1430,6 +1440,9 @@ static void internal_ibmmca_scsi_setup(char *str, int *ints)
return;
}
+#if 0
+ FIXME NEED TO MOVE TO SYSFS
+
static int ibmmca_getinfo(char *buf, int slot, void *dev_id)
{
struct Scsi_Host *shpnt;
@@ -1480,58 +1493,34 @@ static int ibmmca_getinfo(char *buf, int slot, void *dev_id)
return len;
}
+#endif
-int ibmmca_detect(struct scsi_host_template * scsi_template)
+static struct scsi_host_template ibmmca_driver_template = {
+ .proc_name = "ibmmca",
+ .proc_info = ibmmca_proc_info,
+ .name = "IBM SCSI-Subsystem",
+ .queuecommand = ibmmca_queuecommand,
+ .eh_abort_handler = ibmmca_abort,
+ .eh_host_reset_handler = ibmmca_host_reset,
+ .bios_param = ibmmca_biosparam,
+ .can_queue = 16,
+ .this_id = 7,
+ .sg_tablesize = 16,
+ .cmd_per_lun = 1,
+ .use_clustering = ENABLE_CLUSTERING,
+};
+
+static int ibmmca_probe(struct device *dev)
{
struct Scsi_Host *shpnt;
- int port, id, i, j, k, slot;
- int devices_on_irq_11 = 0;
- int devices_on_irq_14 = 0;
- int IRQ14_registered = 0;
- int IRQ11_registered = 0;
-
- found = 0; /* make absolutely sure, that found is set to 0 */
+ int port, id, i, j, k, irq, enabled, ret = -EINVAL;
+ struct mca_device *mca_dev = to_mca_device(dev);
+ const char *description = ibmmca_description[mca_dev->index];
/* First of all, print the version number of the driver. This is
* important to allow better user bugreports in case of already
* having problems with the MCA_bus probing. */
printk(KERN_INFO "IBM MCA SCSI: Version %s\n", IBMMCA_SCSI_DRIVER_VERSION);
- /* if this is not MCA machine, return "nothing found" */
- if (!MCA_bus) {
- printk(KERN_INFO "IBM MCA SCSI: No Microchannel-bus present --> Aborting.\n" " This machine does not have any IBM MCA-bus\n" " or the MCA-Kernel-support is not enabled!\n");
- return 0;
- }
-
-#ifdef MODULE
- /* If the driver is run as module, read from conf.modules or cmd-line */
- if (boot_options)
- option_setup(boot_options);
-#endif
-
- /* get interrupt request level */
- if (request_irq(IM_IRQ, interrupt_handler, IRQF_SHARED, "ibmmcascsi", hosts)) {
- printk(KERN_ERR "IBM MCA SCSI: Unable to get shared IRQ %d.\n", IM_IRQ);
- return 0;
- } else
- IRQ14_registered++;
-
- /* if ibmmcascsi setup option was passed to kernel, return "found" */
- for (i = 0; i < IM_MAX_HOSTS; i++)
- if (io_port[i] > 0 && scsi_id[i] >= 0 && scsi_id[i] < 8) {
- printk("IBM MCA SCSI: forced detected SCSI Adapter, io=0x%x, scsi id=%d.\n", io_port[i], scsi_id[i]);
- if ((shpnt = ibmmca_register(scsi_template, io_port[i], scsi_id[i], FORCED_DETECTION, "forced detected SCSI Adapter"))) {
- for (k = 2; k < 7; k++)
- ((struct ibmmca_hostdata *) shpnt->hostdata)->_pos[k] = 0;
- ((struct ibmmca_hostdata *) shpnt->hostdata)->_special = FORCED_DETECTION;
- mca_set_adapter_name(MCA_INTEGSCSI, "forced detected SCSI Adapter");
- mca_set_adapter_procfn(MCA_INTEGSCSI, (MCA_ProcFn) ibmmca_getinfo, shpnt);
- mca_mark_as_used(MCA_INTEGSCSI);
- devices_on_irq_14++;
- }
- }
- if (found)
- return found;
-
/* The POS2-register of all PS/2 model SCSI-subsystems has the following
* interpretation of bits:
* Bit 7 - 4 : Chip Revision ID (Release)
@@ -1558,7 +1547,14 @@ int ibmmca_detect(struct scsi_host_template * scsi_template)
/* first look for the IBM SCSI integrated subsystem on the motherboard */
for (j = 0; j < 8; j++) /* read the pos-information */
- pos[j] = mca_read_stored_pos(MCA_INTEGSCSI, j);
+ pos[j] = mca_device_read_pos(mca_dev, j);
+ id = (pos[3] & 0xe0) >> 5; /* this is correct and represents the PUN */
+ enabled = (pos[2] &0x01);
+ if (!enabled) {
+ printk(KERN_WARNING "IBM MCA SCSI: WARNING - Your SCSI-subsystem is disabled!\n");
+ printk(KERN_WARNING " SCSI-operations may not work.\n");
+ }
+
/* pos2 = pos3 = 0xff if there is no integrated SCSI-subsystem present, but
* if we ignore the settings of all surrounding pos registers, it is not
* completely sufficient to only check pos2 and pos3. */
@@ -1566,232 +1562,137 @@ int ibmmca_detect(struct scsi_host_template * scsi_template)
* make sure, we see a real integrated onboard SCSI-interface and no
* internal system information, which gets mapped to some pos registers
* on models 95xx. */
- if ((!pos[0] && !pos[1] && pos[2] > 0 && pos[3] > 0 && !pos[4] && !pos[5] && !pos[6] && !pos[7]) || (pos[0] == 0xff && pos[1] == 0xff && pos[2] < 0xff && pos[3] < 0xff && pos[4] == 0xff && pos[5] == 0xff && pos[6] == 0xff && pos[7] == 0xff)) {
- if ((pos[2] & 1) == 1) /* is the subsystem chip enabled ? */
- port = IM_IO_PORT;
- else { /* if disabled, no IRQs will be generated, as the chip won't
- * listen to the incoming commands and will do really nothing,
- * except for listening to the pos-register settings. If this
- * happens, I need to hugely think about it, as one has to
- * write something to the MCA-Bus pos register in order to
- * enable the chip. Normally, IBM-SCSI won't pass the POST,
- * when the chip is disabled (see IBM tech. ref.). */
- port = IM_IO_PORT; /* anyway, set the portnumber and warn */
- printk("IBM MCA SCSI: WARNING - Your SCSI-subsystem is disabled!\n" " SCSI-operations may not work.\n");
+ if (mca_dev->slot == MCA_INTEGSCSI &&
+ ((!pos[0] && !pos[1] && pos[2] > 0 &&
+ pos[3] > 0 && !pos[4] && !pos[5] &&
+ !pos[6] && !pos[7]) ||
+ (pos[0] == 0xff && pos[1] == 0xff &&
+ pos[2] < 0xff && pos[3] < 0xff &&
+ pos[4] == 0xff && pos[5] == 0xff &&
+ pos[6] == 0xff && pos[7] == 0xff))) {
+ irq = IM_IRQ;
+ port = IM_IO_PORT;
+ } else {
+ irq = IM_IRQ;
+ port = IM_IO_PORT + ((pos[2] &0x0e) << 2);
+ if ((mca_dev->index == IBM_SCSI2_FW) && (pos[6] != 0)) {
+ printk(KERN_ERR "IBM MCA SCSI: ERROR - Wrong POS(6)-register setting!\n");
+ printk(KERN_ERR " Impossible to determine adapter PUN!\n");
+ printk(KERN_ERR " Guessing adapter PUN = 7.\n");
+ id = 7;
+ } else {
+ id = (pos[3] & 0xe0) >> 5; /* get subsystem PUN */
+ if (mca_dev->index == IBM_SCSI2_FW) {
+ id |= (pos[3] & 0x10) >> 1; /* get subsystem PUN high-bit
+ * for F/W adapters */
+ }
}
- id = (pos[3] & 0xe0) >> 5; /* this is correct and represents the PUN */
- /* give detailed information on the subsystem. This helps me
- * additionally during debugging and analyzing bug-reports. */
- printk(KERN_INFO "IBM MCA SCSI: IBM Integrated SCSI Controller ffound, io=0x%x, scsi id=%d,\n", port, id);
- printk(KERN_INFO " chip rev.=%d, 8K NVRAM=%s, subsystem=%s\n", ((pos[2] & 0xf0) >> 4), (pos[2] & 2) ? "locked" : "accessible", (pos[2] & 1) ? "enabled." : "disabled.");
-
- /* register the found integrated SCSI-subsystem */
- if ((shpnt = ibmmca_register(scsi_template, port, id, INTEGRATED_SCSI, "IBM Integrated SCSI Controller")))
- {
- for (k = 2; k < 7; k++)
- ((struct ibmmca_hostdata *) shpnt->hostdata)->_pos[k] = pos[k];
- ((struct ibmmca_hostdata *) shpnt->hostdata)->_special = INTEGRATED_SCSI;
- mca_set_adapter_name(MCA_INTEGSCSI, "IBM Integrated SCSI Controller");
- mca_set_adapter_procfn(MCA_INTEGSCSI, (MCA_ProcFn) ibmmca_getinfo, shpnt);
- mca_mark_as_used(MCA_INTEGSCSI);
- devices_on_irq_14++;
+ if ((mca_dev->index == IBM_SCSI2_FW) &&
+ (pos[4] & 0x01) && (pos[6] == 0)) {
+ /* IRQ11 is used by SCSI-2 F/W Adapter/A */
+ printk(KERN_DEBUG "IBM MCA SCSI: SCSI-2 F/W adapter needs IRQ 11.\n");
+ irq = IM_IRQ_FW;
}
}
- /* now look for other adapters in MCA slots, */
- /* determine the number of known IBM-SCSI-subsystem types */
- /* see the pos[2] dependence to get the adapter port-offset. */
- for (i = 0; i < ARRAY_SIZE(subsys_list); i++) {
- /* scan each slot for a fitting adapter id */
- slot = 0; /* start at slot 0 */
- while ((slot = mca_find_adapter(subsys_list[i].mca_id, slot))
- != MCA_NOTFOUND) { /* scan through all slots */
- for (j = 0; j < 8; j++) /* read the pos-information */
- pos[j] = mca_read_stored_pos(slot, j);
- if ((pos[2] & 1) == 1)
- /* is the subsystem chip enabled ? */
- /* (explanations see above) */
- port = IM_IO_PORT + ((pos[2] & 0x0e) << 2);
- else {
- /* anyway, set the portnumber and warn */
- port = IM_IO_PORT + ((pos[2] & 0x0e) << 2);
- printk(KERN_WARNING "IBM MCA SCSI: WARNING - Your SCSI-subsystem is disabled!\n");
- printk(KERN_WARNING " SCSI-operations may not work.\n");
- }
- if ((i == IBM_SCSI2_FW) && (pos[6] != 0)) {
- printk(KERN_ERR "IBM MCA SCSI: ERROR - Wrong POS(6)-register setting!\n");
- printk(KERN_ERR " Impossible to determine adapter PUN!\n");
- printk(KERN_ERR " Guessing adapter PUN = 7.\n");
- id = 7;
- } else {
- id = (pos[3] & 0xe0) >> 5; /* get subsystem PUN */
- if (i == IBM_SCSI2_FW) {
- id |= (pos[3] & 0x10) >> 1; /* get subsystem PUN high-bit
- * for F/W adapters */
- }
- }
- if ((i == IBM_SCSI2_FW) && (pos[4] & 0x01) && (pos[6] == 0)) {
- /* IRQ11 is used by SCSI-2 F/W Adapter/A */
- printk(KERN_DEBUG "IBM MCA SCSI: SCSI-2 F/W adapter needs IRQ 11.\n");
- /* get interrupt request level */
- if (request_irq(IM_IRQ_FW, interrupt_handler, IRQF_SHARED, "ibmmcascsi", hosts)) {
- printk(KERN_ERR "IBM MCA SCSI: Unable to get shared IRQ %d.\n", IM_IRQ_FW);
- } else
- IRQ11_registered++;
- }
- printk(KERN_INFO "IBM MCA SCSI: %s found in slot %d, io=0x%x, scsi id=%d,\n", subsys_list[i].description, slot + 1, port, id);
- if ((pos[2] & 0xf0) == 0xf0)
- printk(KERN_DEBUG" ROM Addr.=off,");
- else
- printk(KERN_DEBUG " ROM Addr.=0x%x,", ((pos[2] & 0xf0) << 13) + 0xc0000);
- printk(KERN_DEBUG " port-offset=0x%x, subsystem=%s\n", ((pos[2] & 0x0e) << 2), (pos[2] & 1) ? "enabled." : "disabled.");
-
- /* register the hostadapter */
- if ((shpnt = ibmmca_register(scsi_template, port, id, i, subsys_list[i].description))) {
- for (k = 2; k < 8; k++)
- ((struct ibmmca_hostdata *) shpnt->hostdata)->_pos[k] = pos[k];
- ((struct ibmmca_hostdata *) shpnt->hostdata)->_special = i;
- mca_set_adapter_name(slot, subsys_list[i].description);
- mca_set_adapter_procfn(slot, (MCA_ProcFn) ibmmca_getinfo, shpnt);
- mca_mark_as_used(slot);
- if ((i == IBM_SCSI2_FW) && (pos[4] & 0x01) && (pos[6] == 0))
- devices_on_irq_11++;
- else
- devices_on_irq_14++;
- }
- slot++; /* advance to next slot */
- } /* advance to next adapter id in the list of IBM-SCSI-subsystems */
- }
- /* now check for SCSI-adapters, mapped to the integrated SCSI
- * area. E.g. a W/Cache in MCA-slot 9(!). Do the check correct here,
- * as this is a known effect on some models 95xx. */
- for (i = 0; i < ARRAY_SIZE(subsys_list); i++) {
- /* scan each slot for a fitting adapter id */
- slot = mca_find_adapter(subsys_list[i].mca_id, MCA_INTEGSCSI);
- if (slot != MCA_NOTFOUND) { /* scan through all slots */
- for (j = 0; j < 8; j++) /* read the pos-information */
- pos[j] = mca_read_stored_pos(slot, j);
- if ((pos[2] & 1) == 1) { /* is the subsystem chip enabled ? */
- /* (explanations see above) */
- port = IM_IO_PORT + ((pos[2] & 0x0e) << 2);
- } else { /* anyway, set the portnumber and warn */
- port = IM_IO_PORT + ((pos[2] & 0x0e) << 2);
- printk(KERN_WARNING "IBM MCA SCSI: WARNING - Your SCSI-subsystem is disabled!\n");
- printk(KERN_WARNING " SCSI-operations may not work.\n");
- }
- if ((i == IBM_SCSI2_FW) && (pos[6] != 0)) {
- printk(KERN_ERR "IBM MCA SCSI: ERROR - Wrong POS(6)-register setting!\n");
- printk(KERN_ERR " Impossible to determine adapter PUN!\n");
- printk(KERN_ERR " Guessing adapter PUN = 7.\n");
- id = 7;
- } else {
- id = (pos[3] & 0xe0) >> 5; /* get subsystem PUN */
- if (i == IBM_SCSI2_FW)
- id |= (pos[3] & 0x10) >> 1; /* get subsystem PUN high-bit
- * for F/W adapters */
- }
- if ((i == IBM_SCSI2_FW) && (pos[4] & 0x01) && (pos[6] == 0)) {
- /* IRQ11 is used by SCSI-2 F/W Adapter/A */
- printk(KERN_DEBUG "IBM MCA SCSI: SCSI-2 F/W adapter needs IRQ 11.\n");
- /* get interrupt request level */
- if (request_irq(IM_IRQ_FW, interrupt_handler, IRQF_SHARED, "ibmmcascsi", hosts))
- printk(KERN_ERR "IBM MCA SCSI: Unable to get shared IRQ %d.\n", IM_IRQ_FW);
- else
- IRQ11_registered++;
- }
- printk(KERN_INFO "IBM MCA SCSI: %s found in slot %d, io=0x%x, scsi id=%d,\n", subsys_list[i].description, slot + 1, port, id);
- if ((pos[2] & 0xf0) == 0xf0)
- printk(KERN_DEBUG " ROM Addr.=off,");
- else
- printk(KERN_DEBUG " ROM Addr.=0x%x,", ((pos[2] & 0xf0) << 13) + 0xc0000);
- printk(KERN_DEBUG " port-offset=0x%x, subsystem=%s\n", ((pos[2] & 0x0e) << 2), (pos[2] & 1) ? "enabled." : "disabled.");
-
- /* register the hostadapter */
- if ((shpnt = ibmmca_register(scsi_template, port, id, i, subsys_list[i].description))) {
- for (k = 2; k < 7; k++)
- ((struct ibmmca_hostdata *) shpnt->hostdata)->_pos[k] = pos[k];
- ((struct ibmmca_hostdata *) shpnt->hostdata)->_special = i;
- mca_set_adapter_name(slot, subsys_list[i].description);
- mca_set_adapter_procfn(slot, (MCA_ProcFn) ibmmca_getinfo, shpnt);
- mca_mark_as_used(slot);
- if ((i == IBM_SCSI2_FW) && (pos[4] & 0x01) && (pos[6] == 0))
- devices_on_irq_11++;
- else
- devices_on_irq_14++;
- }
- slot++; /* advance to next slot */
- } /* advance to next adapter id in the list of IBM-SCSI-subsystems */
- }
- if (IRQ11_registered && !devices_on_irq_11)
- free_irq(IM_IRQ_FW, hosts); /* no devices on IRQ 11 */
- if (IRQ14_registered && !devices_on_irq_14)
- free_irq(IM_IRQ, hosts); /* no devices on IRQ 14 */
- if (!devices_on_irq_11 && !devices_on_irq_14)
- printk(KERN_WARNING "IBM MCA SCSI: No IBM SCSI-subsystem adapter attached.\n");
- return found; /* return the number of found SCSI hosts. Should be 1 or 0. */
-}
-static struct Scsi_Host *ibmmca_register(struct scsi_host_template * scsi_template, int port, int id, int adaptertype, char *hostname)
-{
- struct Scsi_Host *shpnt;
- int i, j;
- unsigned int ctrl;
+ /* give detailed information on the subsystem. This helps me
+ * additionally during debugging and analyzing bug-reports. */
+ printk(KERN_INFO "IBM MCA SCSI: %s found, io=0x%x, scsi id=%d,\n",
+ description, port, id);
+ if (mca_dev->slot == MCA_INTEGSCSI)
+ printk(KERN_INFO " chip rev.=%d, 8K NVRAM=%s, subsystem=%s\n", ((pos[2] & 0xf0) >> 4), (pos[2] & 2) ? "locked" : "accessible", (pos[2] & 1) ? "enabled." : "disabled.");
+ else {
+ if ((pos[2] & 0xf0) == 0xf0)
+ printk(KERN_DEBUG " ROM Addr.=off,");
+ else
+ printk(KERN_DEBUG " ROM Addr.=0x%x,", ((pos[2] & 0xf0) << 13) + 0xc0000);
+
+ printk(KERN_DEBUG " port-offset=0x%x, subsystem=%s\n", ((pos[2] & 0x0e) << 2), (pos[2] & 1) ? "enabled." : "disabled.");
+ }
/* check I/O region */
- if (!request_region(port, IM_N_IO_PORT, hostname)) {
+ if (!request_region(port, IM_N_IO_PORT, description)) {
printk(KERN_ERR "IBM MCA SCSI: Unable to get I/O region 0x%x-0x%x (%d ports).\n", port, port + IM_N_IO_PORT - 1, IM_N_IO_PORT);
- return NULL;
+ goto out_fail;
}
/* register host */
- shpnt = scsi_register(scsi_template, sizeof(struct ibmmca_hostdata));
+ shpnt = scsi_host_alloc(&ibmmca_driver_template,
+ sizeof(struct ibmmca_hostdata));
if (!shpnt) {
printk(KERN_ERR "IBM MCA SCSI: Unable to register host.\n");
- release_region(port, IM_N_IO_PORT);
- return NULL;
+ goto out_release;
+ }
+
+ dev_set_drvdata(dev, shpnt);
+ if(request_irq(irq, interrupt_handler, IRQF_SHARED, description, dev)) {
+ printk(KERN_ERR "IBM MCA SCSI: failed to request interrupt %d\n", irq);
+ goto out_free_host;
}
/* request I/O region */
- hosts[found] = shpnt; /* add new found hostadapter to the list */
- special(found) = adaptertype; /* important assignment or else crash! */
- subsystem_connector_size(found) = 0; /* preset slot-size */
- shpnt->irq = IM_IRQ; /* assign necessary stuff for the adapter */
+ special(shpnt) = mca_dev->index; /* important assignment or else crash! */
+ subsystem_connector_size(shpnt) = 0; /* preset slot-size */
+ shpnt->irq = irq; /* assign necessary stuff for the adapter */
shpnt->io_port = port;
shpnt->n_io_port = IM_N_IO_PORT;
shpnt->this_id = id;
shpnt->max_id = 8; /* 8 PUNs are default */
/* now, the SCSI-subsystem is connected to Linux */
- ctrl = (unsigned int) (inb(IM_CTR_REG(found))); /* get control-register status */
#ifdef IM_DEBUG_PROBE
+ ctrl = (unsigned int) (inb(IM_CTR_REG(found))); /* get control-register status */
printk("IBM MCA SCSI: Control Register contents: %x, status: %x\n", ctrl, inb(IM_STAT_REG(found)));
printk("IBM MCA SCSI: This adapters' POS-registers: ");
for (i = 0; i < 8; i++)
printk("%x ", pos[i]);
printk("\n");
#endif
- reset_status(found) = IM_RESET_NOT_IN_PROGRESS;
+ reset_status(shpnt) = IM_RESET_NOT_IN_PROGRESS;
for (i = 0; i < 16; i++) /* reset the tables */
for (j = 0; j < 8; j++)
- get_ldn(found)[i][j] = MAX_LOG_DEV;
+ get_ldn(shpnt)[i][j] = MAX_LOG_DEV;
/* check which logical devices exist */
/* after this line, local interrupting is possible: */
- local_checking_phase_flag(found) = 1;
- check_devices(found, adaptertype); /* call by value, using the global variable hosts */
- local_checking_phase_flag(found) = 0;
- found++; /* now increase index to be prepared for next found subsystem */
+ local_checking_phase_flag(shpnt) = 1;
+ check_devices(shpnt, mca_dev->index); /* call by value, using the global variable hosts */
+ local_checking_phase_flag(shpnt) = 0;
+
/* an ibm mca subsystem has been detected */
- return shpnt;
+
+ for (k = 2; k < 7; k++)
+ ((struct ibmmca_hostdata *) shpnt->hostdata)->_pos[k] = pos[k];
+ ((struct ibmmca_hostdata *) shpnt->hostdata)->_special = INTEGRATED_SCSI;
+ mca_device_set_name(mca_dev, description);
+ /* FIXME: NEED TO REPLUMB TO SYSFS
+ mca_set_adapter_procfn(MCA_INTEGSCSI, (MCA_ProcFn) ibmmca_getinfo, shpnt);
+ */
+ mca_device_set_claim(mca_dev, 1);
+ if (scsi_add_host(shpnt, dev)) {
+ dev_printk(KERN_ERR, dev, "IBM MCA SCSI: scsi_add_host failed\n");
+ goto out_free_host;
+ }
+ scsi_scan_host(shpnt);
+
+ return 0;
+ out_free_host:
+ scsi_host_put(shpnt);
+ out_release:
+ release_region(port, IM_N_IO_PORT);
+ out_fail:
+ return ret;
}
-static int ibmmca_release(struct Scsi_Host *shpnt)
+static int __devexit ibmmca_remove(struct device *dev)
{
+ struct Scsi_Host *shpnt = dev_get_drvdata(dev);
+ scsi_remove_host(shpnt);
release_region(shpnt->io_port, shpnt->n_io_port);
- if (!(--found))
- free_irq(shpnt->irq, hosts);
+ free_irq(shpnt->irq, dev);
return 0;
}
@@ -1805,33 +1706,24 @@ static int ibmmca_queuecommand(Scsi_Cmnd * cmd, void (*done) (Scsi_Cmnd *))
int current_ldn;
int id, lun;
int target;
- int host_index;
int max_pun;
int i;
- struct scatterlist *sl;
+ struct scatterlist *sg;
shpnt = cmd->device->host;
- /* search for the right hostadapter */
- for (host_index = 0; hosts[host_index] && hosts[host_index]->host_no != shpnt->host_no; host_index++);
- if (!hosts[host_index]) { /* invalid hostadapter descriptor address */
- cmd->result = DID_NO_CONNECT << 16;
- if (done)
- done(cmd);
- return 0;
- }
- max_pun = subsystem_maxid(host_index);
+ max_pun = subsystem_maxid(shpnt);
if (ibm_ansi_order) {
target = max_pun - 1 - cmd->device->id;
- if ((target <= subsystem_pun(host_index)) && (cmd->device->id <= subsystem_pun(host_index)))
+ if ((target <= subsystem_pun(shpnt)) && (cmd->device->id <= subsystem_pun(shpnt)))
target--;
- else if ((target >= subsystem_pun(host_index)) && (cmd->device->id >= subsystem_pun(host_index)))
+ else if ((target >= subsystem_pun(shpnt)) && (cmd->device->id >= subsystem_pun(shpnt)))
target++;
} else
target = cmd->device->id;
/* if (target,lun) is NO LUN or not existing at all, return error */
- if ((get_scsi(host_index)[target][cmd->device->lun] == TYPE_NO_LUN) || (get_scsi(host_index)[target][cmd->device->lun] == TYPE_NO_DEVICE)) {
+ if ((get_scsi(shpnt)[target][cmd->device->lun] == TYPE_NO_LUN) || (get_scsi(shpnt)[target][cmd->device->lun] == TYPE_NO_DEVICE)) {
cmd->result = DID_NO_CONNECT << 16;
if (done)
done(cmd);
@@ -1839,16 +1731,16 @@ static int ibmmca_queuecommand(Scsi_Cmnd * cmd, void (*done) (Scsi_Cmnd *))
}
/*if (target,lun) unassigned, do further checks... */
- ldn = get_ldn(host_index)[target][cmd->device->lun];
+ ldn = get_ldn(shpnt)[target][cmd->device->lun];
if (ldn >= MAX_LOG_DEV) { /* on invalid ldn do special stuff */
if (ldn > MAX_LOG_DEV) { /* dynamical remapping if ldn unassigned */
- current_ldn = next_ldn(host_index); /* stop-value for one circle */
- while (ld(host_index)[next_ldn(host_index)].cmd) { /* search for a occupied, but not in */
+ current_ldn = next_ldn(shpnt); /* stop-value for one circle */
+ while (ld(shpnt)[next_ldn(shpnt)].cmd) { /* search for a occupied, but not in */
/* command-processing ldn. */
- next_ldn(host_index)++;
- if (next_ldn(host_index) >= MAX_LOG_DEV)
- next_ldn(host_index) = 7;
- if (current_ldn == next_ldn(host_index)) { /* One circle done ? */
+ next_ldn(shpnt)++;
+ if (next_ldn(shpnt) >= MAX_LOG_DEV)
+ next_ldn(shpnt) = 7;
+ if (current_ldn == next_ldn(shpnt)) { /* One circle done ? */
/* no non-processing ldn found */
scmd_printk(KERN_WARNING, cmd,
"IBM MCA SCSI: Cannot assign SCSI-device dynamically!\n"
@@ -1864,56 +1756,56 @@ static int ibmmca_queuecommand(Scsi_Cmnd * cmd, void (*done) (Scsi_Cmnd *))
/* unmap non-processing ldn */
for (id = 0; id < max_pun; id++)
for (lun = 0; lun < 8; lun++) {
- if (get_ldn(host_index)[id][lun] == next_ldn(host_index)) {
- get_ldn(host_index)[id][lun] = TYPE_NO_DEVICE;
- get_scsi(host_index)[id][lun] = TYPE_NO_DEVICE;
+ if (get_ldn(shpnt)[id][lun] == next_ldn(shpnt)) {
+ get_ldn(shpnt)[id][lun] = TYPE_NO_DEVICE;
+ get_scsi(shpnt)[id][lun] = TYPE_NO_DEVICE;
/* unmap entry */
}
}
/* set reduced interrupt_handler-mode for checking */
- local_checking_phase_flag(host_index) = 1;
+ local_checking_phase_flag(shpnt) = 1;
/* map found ldn to pun,lun */
- get_ldn(host_index)[target][cmd->device->lun] = next_ldn(host_index);
+ get_ldn(shpnt)[target][cmd->device->lun] = next_ldn(shpnt);
/* change ldn to the right value, that is now next_ldn */
- ldn = next_ldn(host_index);
+ ldn = next_ldn(shpnt);
/* unassign all ldns (pun,lun,ldn does not matter for remove) */
- immediate_assign(host_index, 0, 0, 0, REMOVE_LDN);
+ immediate_assign(shpnt, 0, 0, 0, REMOVE_LDN);
/* set only LDN for remapped device */
- immediate_assign(host_index, target, cmd->device->lun, ldn, SET_LDN);
+ immediate_assign(shpnt, target, cmd->device->lun, ldn, SET_LDN);
/* get device information for ld[ldn] */
- if (device_exists(host_index, ldn, &ld(host_index)[ldn].block_length, &ld(host_index)[ldn].device_type)) {
- ld(host_index)[ldn].cmd = NULL; /* To prevent panic set 0, because
+ if (device_exists(shpnt, ldn, &ld(shpnt)[ldn].block_length, &ld(shpnt)[ldn].device_type)) {
+ ld(shpnt)[ldn].cmd = NULL; /* To prevent panic set 0, because
devices that were not assigned,
should have nothing in progress. */
- get_scsi(host_index)[target][cmd->device->lun] = ld(host_index)[ldn].device_type;
+ get_scsi(shpnt)[target][cmd->device->lun] = ld(shpnt)[ldn].device_type;
/* increase assignment counters for statistics in /proc */
- IBM_DS(host_index).dynamical_assignments++;
- IBM_DS(host_index).ldn_assignments[ldn]++;
+ IBM_DS(shpnt).dynamical_assignments++;
+ IBM_DS(shpnt).ldn_assignments[ldn]++;
} else
/* panic here, because a device, found at boottime has
vanished */
panic("IBM MCA SCSI: ldn=0x%x, SCSI-device on (%d,%d) vanished!\n", ldn, target, cmd->device->lun);
/* unassign again all ldns (pun,lun,ldn does not matter for remove) */
- immediate_assign(host_index, 0, 0, 0, REMOVE_LDN);
+ immediate_assign(shpnt, 0, 0, 0, REMOVE_LDN);
/* remap all ldns, as written in the pun/lun table */
lun = 0;
#ifdef CONFIG_SCSI_MULTI_LUN
for (lun = 0; lun < 8; lun++)
#endif
for (id = 0; id < max_pun; id++) {
- if (get_ldn(host_index)[id][lun] <= MAX_LOG_DEV)
- immediate_assign(host_index, id, lun, get_ldn(host_index)[id][lun], SET_LDN);
+ if (get_ldn(shpnt)[id][lun] <= MAX_LOG_DEV)
+ immediate_assign(shpnt, id, lun, get_ldn(shpnt)[id][lun], SET_LDN);
}
/* set back to normal interrupt_handling */
- local_checking_phase_flag(host_index) = 0;
+ local_checking_phase_flag(shpnt) = 0;
#ifdef IM_DEBUG_PROBE
/* Information on syslog terminal */
printk("IBM MCA SCSI: ldn=0x%x dynamically reassigned to (%d,%d).\n", ldn, target, cmd->device->lun);
#endif
/* increase next_ldn for next dynamical assignment */
- next_ldn(host_index)++;
- if (next_ldn(host_index) >= MAX_LOG_DEV)
- next_ldn(host_index) = 7;
+ next_ldn(shpnt)++;
+ if (next_ldn(shpnt) >= MAX_LOG_DEV)
+ next_ldn(shpnt) = 7;
} else { /* wall against Linux accesses to the subsystem adapter */
cmd->result = DID_BAD_TARGET << 16;
if (done)
@@ -1923,34 +1815,32 @@ static int ibmmca_queuecommand(Scsi_Cmnd * cmd, void (*done) (Scsi_Cmnd *))
}
/*verify there is no command already in progress for this log dev */
- if (ld(host_index)[ldn].cmd)
+ if (ld(shpnt)[ldn].cmd)
panic("IBM MCA SCSI: cmd already in progress for this ldn.\n");
/*save done in cmd, and save cmd for the interrupt handler */
cmd->scsi_done = done;
- ld(host_index)[ldn].cmd = cmd;
+ ld(shpnt)[ldn].cmd = cmd;
/*fill scb information independent of the scsi command */
- scb = &(ld(host_index)[ldn].scb);
- ld(host_index)[ldn].tsb.dev_status = 0;
+ scb = &(ld(shpnt)[ldn].scb);
+ ld(shpnt)[ldn].tsb.dev_status = 0;
scb->enable = IM_REPORT_TSB_ONLY_ON_ERROR | IM_RETRY_ENABLE;
- scb->tsb_adr = isa_virt_to_bus(&(ld(host_index)[ldn].tsb));
+ scb->tsb_adr = isa_virt_to_bus(&(ld(shpnt)[ldn].tsb));
scsi_cmd = cmd->cmnd[0];
- if (cmd->use_sg) {
- i = cmd->use_sg;
- sl = (struct scatterlist *) (cmd->request_buffer);
- if (i > 16)
- panic("IBM MCA SCSI: scatter-gather list too long.\n");
- while (--i >= 0) {
- ld(host_index)[ldn].sge[i].address = (void *) (isa_page_to_bus(sl[i].page) + sl[i].offset);
- ld(host_index)[ldn].sge[i].byte_length = sl[i].length;
+ if (scsi_sg_count(cmd)) {
+ BUG_ON(scsi_sg_count(cmd) > 16);
+
+ scsi_for_each_sg(cmd, sg, scsi_sg_count(cmd), i) {
+ ld(shpnt)[ldn].sge[i].address = (void *) (isa_page_to_bus(sg->page) + sg->offset);
+ ld(shpnt)[ldn].sge[i].byte_length = sg->length;
}
scb->enable |= IM_POINTER_TO_LIST;
- scb->sys_buf_adr = isa_virt_to_bus(&(ld(host_index)[ldn].sge[0]));
- scb->sys_buf_length = cmd->use_sg * sizeof(struct im_sge);
+ scb->sys_buf_adr = isa_virt_to_bus(&(ld(shpnt)[ldn].sge[0]));
+ scb->sys_buf_length = scsi_sg_count(cmd) * sizeof(struct im_sge);
} else {
- scb->sys_buf_adr = isa_virt_to_bus(cmd->request_buffer);
+ scb->sys_buf_adr = isa_virt_to_bus(scsi_sglist(cmd));
/* recent Linux midlevel SCSI places 1024 byte for inquiry
* command. Far too much for old PS/2 hardware. */
switch (scsi_cmd) {
@@ -1961,16 +1851,16 @@ static int ibmmca_queuecommand(Scsi_Cmnd * cmd, void (*done) (Scsi_Cmnd *))
case REQUEST_SENSE:
case MODE_SENSE:
case MODE_SELECT:
- if (cmd->request_bufflen > 255)
+ if (scsi_bufflen(cmd) > 255)
scb->sys_buf_length = 255;
else
- scb->sys_buf_length = cmd->request_bufflen;
+ scb->sys_buf_length = scsi_bufflen(cmd);
break;
case TEST_UNIT_READY:
scb->sys_buf_length = 0;
break;
default:
- scb->sys_buf_length = cmd->request_bufflen;
+ scb->sys_buf_length = scsi_bufflen(cmd);
break;
}
}
@@ -1982,16 +1872,16 @@ static int ibmmca_queuecommand(Scsi_Cmnd * cmd, void (*done) (Scsi_Cmnd *))
/* for specific device-type debugging: */
#ifdef IM_DEBUG_CMD_SPEC_DEV
- if (ld(host_index)[ldn].device_type == IM_DEBUG_CMD_DEVICE)
- printk("(SCSI-device-type=0x%x) issue scsi cmd=%02x to ldn=%d\n", ld(host_index)[ldn].device_type, scsi_cmd, ldn);
+ if (ld(shpnt)[ldn].device_type == IM_DEBUG_CMD_DEVICE)
+ printk("(SCSI-device-type=0x%x) issue scsi cmd=%02x to ldn=%d\n", ld(shpnt)[ldn].device_type, scsi_cmd, ldn);
#endif
/* for possible panics store current command */
- last_scsi_command(host_index)[ldn] = scsi_cmd;
- last_scsi_type(host_index)[ldn] = IM_SCB;
+ last_scsi_command(shpnt)[ldn] = scsi_cmd;
+ last_scsi_type(shpnt)[ldn] = IM_SCB;
/* update statistical info */
- IBM_DS(host_index).total_accesses++;
- IBM_DS(host_index).ldn_access[ldn]++;
+ IBM_DS(shpnt).total_accesses++;
+ IBM_DS(shpnt).ldn_access[ldn]++;
switch (scsi_cmd) {
case READ_6:
@@ -2003,17 +1893,17 @@ static int ibmmca_queuecommand(Scsi_Cmnd * cmd, void (*done) (Scsi_Cmnd *))
/* Distinguish between disk and other devices. Only disks (that are the
most frequently accessed devices) should be supported by the
IBM-SCSI-Subsystem commands. */
- switch (ld(host_index)[ldn].device_type) {
+ switch (ld(shpnt)[ldn].device_type) {
case TYPE_DISK: /* for harddisks enter here ... */
case TYPE_MOD: /* ... try it also for MO-drives (send flames as */
/* you like, if this won't work.) */
if (scsi_cmd == READ_6 || scsi_cmd == READ_10 || scsi_cmd == READ_12) {
/* read command preparations */
scb->enable |= IM_READ_CONTROL;
- IBM_DS(host_index).ldn_read_access[ldn]++; /* increase READ-access on ldn stat. */
+ IBM_DS(shpnt).ldn_read_access[ldn]++; /* increase READ-access on ldn stat. */
scb->command = IM_READ_DATA_CMD | IM_NO_DISCONNECT;
} else { /* write command preparations */
- IBM_DS(host_index).ldn_write_access[ldn]++; /* increase write-count on ldn stat. */
+ IBM_DS(shpnt).ldn_write_access[ldn]++; /* increase write-count on ldn stat. */
scb->command = IM_WRITE_DATA_CMD | IM_NO_DISCONNECT;
}
if (scsi_cmd == READ_6 || scsi_cmd == WRITE_6) {
@@ -2023,9 +1913,9 @@ static int ibmmca_queuecommand(Scsi_Cmnd * cmd, void (*done) (Scsi_Cmnd *))
scb->u1.log_blk_adr = (((unsigned) cmd->cmnd[5]) << 0) | (((unsigned) cmd->cmnd[4]) << 8) | (((unsigned) cmd->cmnd[3]) << 16) | (((unsigned) cmd->cmnd[2]) << 24);
scb->u2.blk.count = (((unsigned) cmd->cmnd[8]) << 0) | (((unsigned) cmd->cmnd[7]) << 8);
}
- last_scsi_logical_block(host_index)[ldn] = scb->u1.log_blk_adr;
- last_scsi_blockcount(host_index)[ldn] = scb->u2.blk.count;
- scb->u2.blk.length = ld(host_index)[ldn].block_length;
+ last_scsi_logical_block(shpnt)[ldn] = scb->u1.log_blk_adr;
+ last_scsi_blockcount(shpnt)[ldn] = scb->u2.blk.count;
+ scb->u2.blk.length = ld(shpnt)[ldn].block_length;
break;
/* for other devices, enter here. Other types are not known by
Linux! TYPE_NO_LUN is forbidden as valid device. */
@@ -2046,14 +1936,14 @@ static int ibmmca_queuecommand(Scsi_Cmnd * cmd, void (*done) (Scsi_Cmnd *))
scb->enable |= IM_BYPASS_BUFFER;
scb->u1.scsi_cmd_length = cmd->cmd_len;
memcpy(scb->u2.scsi_command, cmd->cmnd, cmd->cmd_len);
- last_scsi_type(host_index)[ldn] = IM_LONG_SCB;
+ last_scsi_type(shpnt)[ldn] = IM_LONG_SCB;
/* Read/write on this non-disk devices is also displayworthy,
so flash-up the LED/display. */
break;
}
break;
case INQUIRY:
- IBM_DS(host_index).ldn_inquiry_access[ldn]++;
+ IBM_DS(shpnt).ldn_inquiry_access[ldn]++;
scb->command = IM_DEVICE_INQUIRY_CMD;
scb->enable |= IM_READ_CONTROL | IM_SUPRESS_EXCEPTION_SHORT | IM_BYPASS_BUFFER;
scb->u1.log_blk_adr = 0;
@@ -2064,7 +1954,7 @@ static int ibmmca_queuecommand(Scsi_Cmnd * cmd, void (*done) (Scsi_Cmnd *))
scb->u1.log_blk_adr = 0;
scb->u1.scsi_cmd_length = 6;
memcpy(scb->u2.scsi_command, cmd->cmnd, 6);
- last_scsi_type(host_index)[ldn] = IM_LONG_SCB;
+ last_scsi_type(shpnt)[ldn] = IM_LONG_SCB;
break;
case READ_CAPACITY:
/* the length of system memory buffer must be exactly 8 bytes */
@@ -2081,12 +1971,12 @@ static int ibmmca_queuecommand(Scsi_Cmnd * cmd, void (*done) (Scsi_Cmnd *))
/* Commands that need write-only-mode (system -> device): */
case MODE_SELECT:
case MODE_SELECT_10:
- IBM_DS(host_index).ldn_modeselect_access[ldn]++;
+ IBM_DS(shpnt).ldn_modeselect_access[ldn]++;
scb->command = IM_OTHER_SCSI_CMD_CMD;
scb->enable |= IM_SUPRESS_EXCEPTION_SHORT | IM_BYPASS_BUFFER; /*Select needs WRITE-enabled */
scb->u1.scsi_cmd_length = cmd->cmd_len;
memcpy(scb->u2.scsi_command, cmd->cmnd, cmd->cmd_len);
- last_scsi_type(host_index)[ldn] = IM_LONG_SCB;
+ last_scsi_type(shpnt)[ldn] = IM_LONG_SCB;
break;
/* For other commands, read-only is useful. Most other commands are
running without an input-data-block. */
@@ -2095,19 +1985,19 @@ static int ibmmca_queuecommand(Scsi_Cmnd * cmd, void (*done) (Scsi_Cmnd *))
scb->enable |= IM_READ_CONTROL | IM_SUPRESS_EXCEPTION_SHORT | IM_BYPASS_BUFFER;
scb->u1.scsi_cmd_length = cmd->cmd_len;
memcpy(scb->u2.scsi_command, cmd->cmnd, cmd->cmd_len);
- last_scsi_type(host_index)[ldn] = IM_LONG_SCB;
+ last_scsi_type(shpnt)[ldn] = IM_LONG_SCB;
break;
}
/*issue scb command, and return */
if (++disk_rw_in_progress == 1)
PS2_DISK_LED_ON(shpnt->host_no, target);
- if (last_scsi_type(host_index)[ldn] == IM_LONG_SCB) {
- issue_cmd(host_index, isa_virt_to_bus(scb), IM_LONG_SCB | ldn);
- IBM_DS(host_index).long_scbs++;
+ if (last_scsi_type(shpnt)[ldn] == IM_LONG_SCB) {
+ issue_cmd(shpnt, isa_virt_to_bus(scb), IM_LONG_SCB | ldn);
+ IBM_DS(shpnt).long_scbs++;
} else {
- issue_cmd(host_index, isa_virt_to_bus(scb), IM_SCB | ldn);
- IBM_DS(host_index).scbs++;
+ issue_cmd(shpnt, isa_virt_to_bus(scb), IM_SCB | ldn);
+ IBM_DS(shpnt).scbs++;
}
return 0;
}
@@ -2122,7 +2012,6 @@ static int __ibmmca_abort(Scsi_Cmnd * cmd)
unsigned int ldn;
void (*saved_done) (Scsi_Cmnd *);
int target;
- int host_index;
int max_pun;
unsigned long imm_command;
@@ -2131,35 +2020,23 @@ static int __ibmmca_abort(Scsi_Cmnd * cmd)
#endif
shpnt = cmd->device->host;
- /* search for the right hostadapter */
- for (host_index = 0; hosts[host_index] && hosts[host_index]->host_no != shpnt->host_no; host_index++);
- if (!hosts[host_index]) { /* invalid hostadapter descriptor address */
- cmd->result = DID_NO_CONNECT << 16;
- if (cmd->scsi_done)
- (cmd->scsi_done) (cmd);
- shpnt = cmd->device->host;
-#ifdef IM_DEBUG_PROBE
- printk(KERN_DEBUG "IBM MCA SCSI: Abort adapter selection failed!\n");
-#endif
- return SUCCESS;
- }
- max_pun = subsystem_maxid(host_index);
+ max_pun = subsystem_maxid(shpnt);
if (ibm_ansi_order) {
target = max_pun - 1 - cmd->device->id;
- if ((target <= subsystem_pun(host_index)) && (cmd->device->id <= subsystem_pun(host_index)))
+ if ((target <= subsystem_pun(shpnt)) && (cmd->device->id <= subsystem_pun(shpnt)))
target--;
- else if ((target >= subsystem_pun(host_index)) && (cmd->device->id >= subsystem_pun(host_index)))
+ else if ((target >= subsystem_pun(shpnt)) && (cmd->device->id >= subsystem_pun(shpnt)))
target++;
} else
target = cmd->device->id;
/* get logical device number, and disable system interrupts */
printk(KERN_WARNING "IBM MCA SCSI: Sending abort to device pun=%d, lun=%d.\n", target, cmd->device->lun);
- ldn = get_ldn(host_index)[target][cmd->device->lun];
+ ldn = get_ldn(shpnt)[target][cmd->device->lun];
/*if cmd for this ldn has already finished, no need to abort */
- if (!ld(host_index)[ldn].cmd) {
+ if (!ld(shpnt)[ldn].cmd) {
return SUCCESS;
}
@@ -2170,20 +2047,20 @@ static int __ibmmca_abort(Scsi_Cmnd * cmd)
saved_done = cmd->scsi_done;
cmd->scsi_done = internal_done;
cmd->SCp.Status = 0;
- last_scsi_command(host_index)[ldn] = IM_ABORT_IMM_CMD;
- last_scsi_type(host_index)[ldn] = IM_IMM_CMD;
- imm_command = inl(IM_CMD_REG(host_index));
+ last_scsi_command(shpnt)[ldn] = IM_ABORT_IMM_CMD;
+ last_scsi_type(shpnt)[ldn] = IM_IMM_CMD;
+ imm_command = inl(IM_CMD_REG(shpnt));
imm_command &= (unsigned long) (0xffff0000); /* mask reserved stuff */
imm_command |= (unsigned long) (IM_ABORT_IMM_CMD);
/* must wait for attention reg not busy */
/* FIXME - timeout, politeness */
while (1) {
- if (!(inb(IM_STAT_REG(host_index)) & IM_BUSY))
+ if (!(inb(IM_STAT_REG(shpnt)) & IM_BUSY))
break;
}
/* write registers and enable system interrupts */
- outl(imm_command, IM_CMD_REG(host_index));
- outb(IM_IMM_CMD | ldn, IM_ATTN_REG(host_index));
+ outl(imm_command, IM_CMD_REG(shpnt));
+ outb(IM_IMM_CMD | ldn, IM_ATTN_REG(shpnt));
#ifdef IM_DEBUG_PROBE
printk("IBM MCA SCSI: Abort queued to adapter...\n");
#endif
@@ -2202,7 +2079,7 @@ static int __ibmmca_abort(Scsi_Cmnd * cmd)
cmd->result |= DID_ABORT << 16;
if (cmd->scsi_done)
(cmd->scsi_done) (cmd);
- ld(host_index)[ldn].cmd = NULL;
+ ld(shpnt)[ldn].cmd = NULL;
#ifdef IM_DEBUG_PROBE
printk("IBM MCA SCSI: Abort finished with success.\n");
#endif
@@ -2211,7 +2088,7 @@ static int __ibmmca_abort(Scsi_Cmnd * cmd)
cmd->result |= DID_NO_CONNECT << 16;
if (cmd->scsi_done)
(cmd->scsi_done) (cmd);
- ld(host_index)[ldn].cmd = NULL;
+ ld(shpnt)[ldn].cmd = NULL;
#ifdef IM_DEBUG_PROBE
printk("IBM MCA SCSI: Abort failed.\n");
#endif
@@ -2236,71 +2113,65 @@ static int __ibmmca_host_reset(Scsi_Cmnd * cmd)
struct Scsi_Host *shpnt;
Scsi_Cmnd *cmd_aid;
int ticks, i;
- int host_index;
unsigned long imm_command;
BUG_ON(cmd == NULL);
ticks = IM_RESET_DELAY * HZ;
shpnt = cmd->device->host;
- /* search for the right hostadapter */
- for (host_index = 0; hosts[host_index] && hosts[host_index]->host_no != shpnt->host_no; host_index++);
-
- if (!hosts[host_index]) /* invalid hostadapter descriptor address */
- return FAILED;
- if (local_checking_phase_flag(host_index)) {
+ if (local_checking_phase_flag(shpnt)) {
printk(KERN_WARNING "IBM MCA SCSI: unable to reset while checking devices.\n");
return FAILED;
}
/* issue reset immediate command to subsystem, and wait for interrupt */
printk("IBM MCA SCSI: resetting all devices.\n");
- reset_status(host_index) = IM_RESET_IN_PROGRESS;
- last_scsi_command(host_index)[0xf] = IM_RESET_IMM_CMD;
- last_scsi_type(host_index)[0xf] = IM_IMM_CMD;
- imm_command = inl(IM_CMD_REG(host_index));
+ reset_status(shpnt) = IM_RESET_IN_PROGRESS;
+ last_scsi_command(shpnt)[0xf] = IM_RESET_IMM_CMD;
+ last_scsi_type(shpnt)[0xf] = IM_IMM_CMD;
+ imm_command = inl(IM_CMD_REG(shpnt));
imm_command &= (unsigned long) (0xffff0000); /* mask reserved stuff */
imm_command |= (unsigned long) (IM_RESET_IMM_CMD);
/* must wait for attention reg not busy */
while (1) {
- if (!(inb(IM_STAT_REG(host_index)) & IM_BUSY))
+ if (!(inb(IM_STAT_REG(shpnt)) & IM_BUSY))
break;
spin_unlock_irq(shpnt->host_lock);
yield();
spin_lock_irq(shpnt->host_lock);
}
/*write registers and enable system interrupts */
- outl(imm_command, IM_CMD_REG(host_index));
- outb(IM_IMM_CMD | 0xf, IM_ATTN_REG(host_index));
+ outl(imm_command, IM_CMD_REG(shpnt));
+ outb(IM_IMM_CMD | 0xf, IM_ATTN_REG(shpnt));
/* wait for interrupt finished or intr_stat register to be set, as the
* interrupt will not be executed, while we are in here! */
/* FIXME: This is really really icky we so want a sleeping version of this ! */
- while (reset_status(host_index) == IM_RESET_IN_PROGRESS && --ticks && ((inb(IM_INTR_REG(host_index)) & 0x8f) != 0x8f)) {
+ while (reset_status(shpnt) == IM_RESET_IN_PROGRESS && --ticks && ((inb(IM_INTR_REG(shpnt)) & 0x8f) != 0x8f)) {
udelay((1 + 999 / HZ) * 1000);
barrier();
}
/* if reset did not complete, just return an error */
if (!ticks) {
printk(KERN_ERR "IBM MCA SCSI: reset did not complete within %d seconds.\n", IM_RESET_DELAY);
- reset_status(host_index) = IM_RESET_FINISHED_FAIL;
+ reset_status(shpnt) = IM_RESET_FINISHED_FAIL;
return FAILED;
}
- if ((inb(IM_INTR_REG(host_index)) & 0x8f) == 0x8f) {
+ if ((inb(IM_INTR_REG(shpnt)) & 0x8f) == 0x8f) {
/* analysis done by this routine and not by the intr-routine */
- if (inb(IM_INTR_REG(host_index)) == 0xaf)
- reset_status(host_index) = IM_RESET_FINISHED_OK_NO_INT;
- else if (inb(IM_INTR_REG(host_index)) == 0xcf)
- reset_status(host_index) = IM_RESET_FINISHED_FAIL;
+ if (inb(IM_INTR_REG(shpnt)) == 0xaf)
+ reset_status(shpnt) = IM_RESET_FINISHED_OK_NO_INT;
+ else if (inb(IM_INTR_REG(shpnt)) == 0xcf)
+ reset_status(shpnt) = IM_RESET_FINISHED_FAIL;
else /* failed, 4get it */
- reset_status(host_index) = IM_RESET_NOT_IN_PROGRESS_NO_INT;
- outb(IM_EOI | 0xf, IM_ATTN_REG(host_index));
+ reset_status(shpnt) = IM_RESET_NOT_IN_PROGRESS_NO_INT;
+ outb(IM_EOI | 0xf, IM_ATTN_REG(shpnt));
}
/* if reset failed, just return an error */
- if (reset_status(host_index) == IM_RESET_FINISHED_FAIL) {
+ if (reset_status(shpnt) == IM_RESET_FINISHED_FAIL) {
printk(KERN_ERR "IBM MCA SCSI: reset failed.\n");
return FAILED;
}
@@ -2308,9 +2179,9 @@ static int __ibmmca_host_reset(Scsi_Cmnd * cmd)
/* so reset finished ok - call outstanding done's, and return success */
printk(KERN_INFO "IBM MCA SCSI: Reset successfully completed.\n");
for (i = 0; i < MAX_LOG_DEV; i++) {
- cmd_aid = ld(host_index)[i].cmd;
+ cmd_aid = ld(shpnt)[i].cmd;
if (cmd_aid && cmd_aid->scsi_done) {
- ld(host_index)[i].cmd = NULL;
+ ld(shpnt)[i].cmd = NULL;
cmd_aid->result = DID_RESET << 16;
}
}
@@ -2351,46 +2222,46 @@ static int ibmmca_biosparam(struct scsi_device *sdev, struct block_device *bdev,
}
/* calculate percentage of total accesses on a ldn */
-static int ldn_access_load(int host_index, int ldn)
+static int ldn_access_load(struct Scsi_Host *shpnt, int ldn)
{
- if (IBM_DS(host_index).total_accesses == 0)
+ if (IBM_DS(shpnt).total_accesses == 0)
return (0);
- if (IBM_DS(host_index).ldn_access[ldn] == 0)
+ if (IBM_DS(shpnt).ldn_access[ldn] == 0)
return (0);
- return (IBM_DS(host_index).ldn_access[ldn] * 100) / IBM_DS(host_index).total_accesses;
+ return (IBM_DS(shpnt).ldn_access[ldn] * 100) / IBM_DS(shpnt).total_accesses;
}
/* calculate total amount of r/w-accesses */
-static int ldn_access_total_read_write(int host_index)
+static int ldn_access_total_read_write(struct Scsi_Host *shpnt)
{
int a;
int i;
a = 0;
for (i = 0; i <= MAX_LOG_DEV; i++)
- a += IBM_DS(host_index).ldn_read_access[i] + IBM_DS(host_index).ldn_write_access[i];
+ a += IBM_DS(shpnt).ldn_read_access[i] + IBM_DS(shpnt).ldn_write_access[i];
return (a);
}
-static int ldn_access_total_inquiry(int host_index)
+static int ldn_access_total_inquiry(struct Scsi_Host *shpnt)
{
int a;
int i;
a = 0;
for (i = 0; i <= MAX_LOG_DEV; i++)
- a += IBM_DS(host_index).ldn_inquiry_access[i];
+ a += IBM_DS(shpnt).ldn_inquiry_access[i];
return (a);
}
-static int ldn_access_total_modeselect(int host_index)
+static int ldn_access_total_modeselect(struct Scsi_Host *shpnt)
{
int a;
int i;
a = 0;
for (i = 0; i <= MAX_LOG_DEV; i++)
- a += IBM_DS(host_index).ldn_modeselect_access[i];
+ a += IBM_DS(shpnt).ldn_modeselect_access[i];
return (a);
}
@@ -2398,19 +2269,14 @@ static int ldn_access_total_modeselect(int host_index)
static int ibmmca_proc_info(struct Scsi_Host *shpnt, char *buffer, char **start, off_t offset, int length, int inout)
{
int len = 0;
- int i, id, lun, host_index;
+ int i, id, lun;
unsigned long flags;
int max_pun;
- for (i = 0; hosts[i] && hosts[i] != shpnt; i++);
- spin_lock_irqsave(hosts[i]->host_lock, flags); /* Check it */
- host_index = i;
- if (!shpnt) {
- len += sprintf(buffer + len, "\nIBM MCA SCSI: Can't find adapter");
- return len;
- }
- max_pun = subsystem_maxid(host_index);
+ spin_lock_irqsave(shpnt->host_lock, flags); /* Check it */
+
+ max_pun = subsystem_maxid(shpnt);
len += sprintf(buffer + len, "\n IBM-SCSI-Subsystem-Linux-Driver, Version %s\n\n\n", IBMMCA_SCSI_DRIVER_VERSION);
len += sprintf(buffer + len, " SCSI Access-Statistics:\n");
@@ -2421,40 +2287,40 @@ static int ibmmca_proc_info(struct Scsi_Host *shpnt, char *buffer, char **start,
len += sprintf(buffer + len, " Multiple LUN probing.....: No\n");
#endif
len += sprintf(buffer + len, " This Hostnumber..........: %d\n", shpnt->host_no);
- len += sprintf(buffer + len, " Base I/O-Port............: 0x%x\n", (unsigned int) (IM_CMD_REG(host_index)));
+ len += sprintf(buffer + len, " Base I/O-Port............: 0x%x\n", (unsigned int) (IM_CMD_REG(shpnt)));
len += sprintf(buffer + len, " (Shared) IRQ.............: %d\n", IM_IRQ);
- len += sprintf(buffer + len, " Total Interrupts.........: %d\n", IBM_DS(host_index).total_interrupts);
- len += sprintf(buffer + len, " Total SCSI Accesses......: %d\n", IBM_DS(host_index).total_accesses);
- len += sprintf(buffer + len, " Total short SCBs.........: %d\n", IBM_DS(host_index).scbs);
- len += sprintf(buffer + len, " Total long SCBs..........: %d\n", IBM_DS(host_index).long_scbs);
- len += sprintf(buffer + len, " Total SCSI READ/WRITE..: %d\n", ldn_access_total_read_write(host_index));
- len += sprintf(buffer + len, " Total SCSI Inquiries...: %d\n", ldn_access_total_inquiry(host_index));
- len += sprintf(buffer + len, " Total SCSI Modeselects.: %d\n", ldn_access_total_modeselect(host_index));
- len += sprintf(buffer + len, " Total SCSI other cmds..: %d\n", IBM_DS(host_index).total_accesses - ldn_access_total_read_write(host_index)
- - ldn_access_total_modeselect(host_index)
- - ldn_access_total_inquiry(host_index));
- len += sprintf(buffer + len, " Total SCSI command fails.: %d\n\n", IBM_DS(host_index).total_errors);
+ len += sprintf(buffer + len, " Total Interrupts.........: %d\n", IBM_DS(shpnt).total_interrupts);
+ len += sprintf(buffer + len, " Total SCSI Accesses......: %d\n", IBM_DS(shpnt).total_accesses);
+ len += sprintf(buffer + len, " Total short SCBs.........: %d\n", IBM_DS(shpnt).scbs);
+ len += sprintf(buffer + len, " Total long SCBs..........: %d\n", IBM_DS(shpnt).long_scbs);
+ len += sprintf(buffer + len, " Total SCSI READ/WRITE..: %d\n", ldn_access_total_read_write(shpnt));
+ len += sprintf(buffer + len, " Total SCSI Inquiries...: %d\n", ldn_access_total_inquiry(shpnt));
+ len += sprintf(buffer + len, " Total SCSI Modeselects.: %d\n", ldn_access_total_modeselect(shpnt));
+ len += sprintf(buffer + len, " Total SCSI other cmds..: %d\n", IBM_DS(shpnt).total_accesses - ldn_access_total_read_write(shpnt)
+ - ldn_access_total_modeselect(shpnt)
+ - ldn_access_total_inquiry(shpnt));
+ len += sprintf(buffer + len, " Total SCSI command fails.: %d\n\n", IBM_DS(shpnt).total_errors);
len += sprintf(buffer + len, " Logical-Device-Number (LDN) Access-Statistics:\n");
len += sprintf(buffer + len, " LDN | Accesses [%%] | READ | WRITE | ASSIGNMENTS\n");
len += sprintf(buffer + len, " -----|--------------|-----------|-----------|--------------\n");
for (i = 0; i <= MAX_LOG_DEV; i++)
- len += sprintf(buffer + len, " %2X | %3d | %8d | %8d | %8d\n", i, ldn_access_load(host_index, i), IBM_DS(host_index).ldn_read_access[i], IBM_DS(host_index).ldn_write_access[i], IBM_DS(host_index).ldn_assignments[i]);
+ len += sprintf(buffer + len, " %2X | %3d | %8d | %8d | %8d\n", i, ldn_access_load(shpnt, i), IBM_DS(shpnt).ldn_read_access[i], IBM_DS(shpnt).ldn_write_access[i], IBM_DS(shpnt).ldn_assignments[i]);
len += sprintf(buffer + len, " -----------------------------------------------------------\n\n");
len += sprintf(buffer + len, " Dynamical-LDN-Assignment-Statistics:\n");
- len += sprintf(buffer + len, " Number of physical SCSI-devices..: %d (+ Adapter)\n", IBM_DS(host_index).total_scsi_devices);
- len += sprintf(buffer + len, " Dynamical Assignment necessary...: %s\n", IBM_DS(host_index).dyn_flag ? "Yes" : "No ");
- len += sprintf(buffer + len, " Next LDN to be assigned..........: 0x%x\n", next_ldn(host_index));
- len += sprintf(buffer + len, " Dynamical assignments done yet...: %d\n", IBM_DS(host_index).dynamical_assignments);
+ len += sprintf(buffer + len, " Number of physical SCSI-devices..: %d (+ Adapter)\n", IBM_DS(shpnt).total_scsi_devices);
+ len += sprintf(buffer + len, " Dynamical Assignment necessary...: %s\n", IBM_DS(shpnt).dyn_flag ? "Yes" : "No ");
+ len += sprintf(buffer + len, " Next LDN to be assigned..........: 0x%x\n", next_ldn(shpnt));
+ len += sprintf(buffer + len, " Dynamical assignments done yet...: %d\n", IBM_DS(shpnt).dynamical_assignments);
len += sprintf(buffer + len, "\n Current SCSI-Device-Mapping:\n");
len += sprintf(buffer + len, " Physical SCSI-Device Map Logical SCSI-Device Map\n");
len += sprintf(buffer + len, " ID\\LUN 0 1 2 3 4 5 6 7 ID\\LUN 0 1 2 3 4 5 6 7\n");
for (id = 0; id < max_pun; id++) {
len += sprintf(buffer + len, " %2d ", id);
for (lun = 0; lun < 8; lun++)
- len += sprintf(buffer + len, "%2s ", ti_p(get_scsi(host_index)[id][lun]));
+ len += sprintf(buffer + len, "%2s ", ti_p(get_scsi(shpnt)[id][lun]));
len += sprintf(buffer + len, " %2d ", id);
for (lun = 0; lun < 8; lun++)
- len += sprintf(buffer + len, "%2s ", ti_l(get_ldn(host_index)[id][lun]));
+ len += sprintf(buffer + len, "%2s ", ti_l(get_ldn(shpnt)[id][lun]));
len += sprintf(buffer + len, "\n");
}
@@ -2488,20 +2354,31 @@ static int option_setup(char *str)
__setup("ibmmcascsi=", option_setup);
-static struct scsi_host_template driver_template = {
- .proc_name = "ibmmca",
- .proc_info = ibmmca_proc_info,
- .name = "IBM SCSI-Subsystem",
- .detect = ibmmca_detect,
- .release = ibmmca_release,
- .queuecommand = ibmmca_queuecommand,
- .eh_abort_handler = ibmmca_abort,
- .eh_host_reset_handler = ibmmca_host_reset,
- .bios_param = ibmmca_biosparam,
- .can_queue = 16,
- .this_id = 7,
- .sg_tablesize = 16,
- .cmd_per_lun = 1,
- .use_clustering = ENABLE_CLUSTERING,
+static struct mca_driver ibmmca_driver = {
+ .id_table = ibmmca_id_table,
+ .driver = {
+ .name = "ibmmca",
+ .bus = &mca_bus_type,
+ .probe = ibmmca_probe,
+ .remove = __devexit_p(ibmmca_remove),
+ },
};
-#include "scsi_module.c"
+
+static int __init ibmmca_init(void)
+{
+#ifdef MODULE
+ /* If the driver is run as module, read from conf.modules or cmd-line */
+ if (boot_options)
+ option_setup(boot_options);
+#endif
+
+ return mca_register_driver_integrated(&ibmmca_driver, MCA_INTEGSCSI);
+}
+
+static void __exit ibmmca_exit(void)
+{
+ mca_unregister_driver(&ibmmca_driver);
+}
+
+module_init(ibmmca_init);
+module_exit(ibmmca_exit);
diff --git a/drivers/scsi/ibmmca.h b/drivers/scsi/ibmmca.h
deleted file mode 100644
index 017ee2fa6d6..00000000000
--- a/drivers/scsi/ibmmca.h
+++ /dev/null
@@ -1,21 +0,0 @@
-/*
- * Low Level Driver for the IBM Microchannel SCSI Subsystem
- * (Headerfile, see Documentation/scsi/ibmmca.txt for description of the
- * IBM MCA SCSI-driver.
- * For use under the GNU General Public License within the Linux-kernel project.
- * This include file works only correctly with kernel 2.4.0 or higher!!! */
-
-#ifndef _IBMMCA_H
-#define _IBMMCA_H
-
-/* Common forward declarations for all Linux-versions: */
-
-/* Interfaces to the midlevel Linux SCSI driver */
-static int ibmmca_detect (struct scsi_host_template *);
-static int ibmmca_release (struct Scsi_Host *);
-static int ibmmca_queuecommand (Scsi_Cmnd *, void (*done) (Scsi_Cmnd *));
-static int ibmmca_abort (Scsi_Cmnd *);
-static int ibmmca_host_reset (Scsi_Cmnd *);
-static int ibmmca_biosparam (struct scsi_device *, struct block_device *, sector_t, int *);
-
-#endif /* _IBMMCA_H */
diff --git a/drivers/scsi/ibmvscsi/ibmvscsi.c b/drivers/scsi/ibmvscsi/ibmvscsi.c
index b10eefe735c..5870866abc9 100644
--- a/drivers/scsi/ibmvscsi/ibmvscsi.c
+++ b/drivers/scsi/ibmvscsi/ibmvscsi.c
@@ -173,9 +173,8 @@ static void release_event_pool(struct event_pool *pool,
}
}
if (in_use)
- printk(KERN_WARNING
- "ibmvscsi: releasing event pool with %d "
- "events still in use?\n", in_use);
+ dev_warn(hostdata->dev, "releasing event pool with %d "
+ "events still in use?\n", in_use);
kfree(pool->events);
dma_free_coherent(hostdata->dev,
pool->size * sizeof(*pool->iu_storage),
@@ -210,15 +209,13 @@ static void free_event_struct(struct event_pool *pool,
struct srp_event_struct *evt)
{
if (!valid_event_struct(pool, evt)) {
- printk(KERN_ERR
- "ibmvscsi: Freeing invalid event_struct %p "
- "(not in pool %p)\n", evt, pool->events);
+ dev_err(evt->hostdata->dev, "Freeing invalid event_struct %p "
+ "(not in pool %p)\n", evt, pool->events);
return;
}
if (atomic_inc_return(&evt->free) != 1) {
- printk(KERN_ERR
- "ibmvscsi: Freeing event_struct %p "
- "which is not in use!\n", evt);
+ dev_err(evt->hostdata->dev, "Freeing event_struct %p "
+ "which is not in use!\n", evt);
return;
}
}
@@ -353,20 +350,19 @@ static void unmap_cmd_data(struct srp_cmd *cmd,
}
}
-static int map_sg_list(int num_entries,
- struct scatterlist *sg,
+static int map_sg_list(struct scsi_cmnd *cmd, int nseg,
struct srp_direct_buf *md)
{
int i;
+ struct scatterlist *sg;
u64 total_length = 0;
- for (i = 0; i < num_entries; ++i) {
+ scsi_for_each_sg(cmd, sg, nseg, i) {
struct srp_direct_buf *descr = md + i;
- struct scatterlist *sg_entry = &sg[i];
- descr->va = sg_dma_address(sg_entry);
- descr->len = sg_dma_len(sg_entry);
+ descr->va = sg_dma_address(sg);
+ descr->len = sg_dma_len(sg);
descr->key = 0;
- total_length += sg_dma_len(sg_entry);
+ total_length += sg_dma_len(sg);
}
return total_length;
}
@@ -387,40 +383,37 @@ static int map_sg_data(struct scsi_cmnd *cmd,
int sg_mapped;
u64 total_length = 0;
- struct scatterlist *sg = cmd->request_buffer;
struct srp_direct_buf *data =
(struct srp_direct_buf *) srp_cmd->add_data;
struct srp_indirect_buf *indirect =
(struct srp_indirect_buf *) data;
- sg_mapped = dma_map_sg(dev, sg, cmd->use_sg, DMA_BIDIRECTIONAL);
-
- if (sg_mapped == 0)
+ sg_mapped = scsi_dma_map(cmd);
+ if (!sg_mapped)
+ return 1;
+ else if (sg_mapped < 0)
return 0;
+ else if (sg_mapped > SG_ALL) {
+ printk(KERN_ERR
+ "ibmvscsi: More than %d mapped sg entries, got %d\n",
+ SG_ALL, sg_mapped);
+ return 0;
+ }
set_srp_direction(cmd, srp_cmd, sg_mapped);
/* special case; we can use a single direct descriptor */
if (sg_mapped == 1) {
- data->va = sg_dma_address(&sg[0]);
- data->len = sg_dma_len(&sg[0]);
- data->key = 0;
+ map_sg_list(cmd, sg_mapped, data);
return 1;
}
- if (sg_mapped > SG_ALL) {
- printk(KERN_ERR
- "ibmvscsi: More than %d mapped sg entries, got %d\n",
- SG_ALL, sg_mapped);
- return 0;
- }
-
indirect->table_desc.va = 0;
indirect->table_desc.len = sg_mapped * sizeof(struct srp_direct_buf);
indirect->table_desc.key = 0;
if (sg_mapped <= MAX_INDIRECT_BUFS) {
- total_length = map_sg_list(sg_mapped, sg,
+ total_length = map_sg_list(cmd, sg_mapped,
&indirect->desc_list[0]);
indirect->len = total_length;
return 1;
@@ -429,61 +422,27 @@ static int map_sg_data(struct scsi_cmnd *cmd,
/* get indirect table */
if (!evt_struct->ext_list) {
evt_struct->ext_list = (struct srp_direct_buf *)
- dma_alloc_coherent(dev,
+ dma_alloc_coherent(dev,
SG_ALL * sizeof(struct srp_direct_buf),
&evt_struct->ext_list_token, 0);
if (!evt_struct->ext_list) {
- printk(KERN_ERR
- "ibmvscsi: Can't allocate memory for indirect table\n");
+ sdev_printk(KERN_ERR, cmd->device,
+ "Can't allocate memory for indirect table\n");
return 0;
-
}
}
- total_length = map_sg_list(sg_mapped, sg, evt_struct->ext_list);
+ total_length = map_sg_list(cmd, sg_mapped, evt_struct->ext_list);
indirect->len = total_length;
indirect->table_desc.va = evt_struct->ext_list_token;
indirect->table_desc.len = sg_mapped * sizeof(indirect->desc_list[0]);
memcpy(indirect->desc_list, evt_struct->ext_list,
MAX_INDIRECT_BUFS * sizeof(struct srp_direct_buf));
-
return 1;
}
/**
- * map_single_data: - Maps memory and initializes memory decriptor fields
- * @cmd: struct scsi_cmnd with the memory to be mapped
- * @srp_cmd: srp_cmd that contains the memory descriptor
- * @dev: device for which to map dma memory
- *
- * Called by map_data_for_srp_cmd() when building srp cmd from scsi cmd.
- * Returns 1 on success.
-*/
-static int map_single_data(struct scsi_cmnd *cmd,
- struct srp_cmd *srp_cmd, struct device *dev)
-{
- struct srp_direct_buf *data =
- (struct srp_direct_buf *) srp_cmd->add_data;
-
- data->va =
- dma_map_single(dev, cmd->request_buffer,
- cmd->request_bufflen,
- DMA_BIDIRECTIONAL);
- if (dma_mapping_error(data->va)) {
- printk(KERN_ERR
- "ibmvscsi: Unable to map request_buffer for command!\n");
- return 0;
- }
- data->len = cmd->request_bufflen;
- data->key = 0;
-
- set_srp_direction(cmd, srp_cmd, 1);
-
- return 1;
-}
-
-/**
* map_data_for_srp_cmd: - Calls functions to map data for srp cmds
* @cmd: struct scsi_cmnd with the memory to be mapped
* @srp_cmd: srp_cmd that contains the memory descriptor
@@ -503,23 +462,83 @@ static int map_data_for_srp_cmd(struct scsi_cmnd *cmd,
case DMA_NONE:
return 1;
case DMA_BIDIRECTIONAL:
- printk(KERN_ERR
- "ibmvscsi: Can't map DMA_BIDIRECTIONAL to read/write\n");
+ sdev_printk(KERN_ERR, cmd->device,
+ "Can't map DMA_BIDIRECTIONAL to read/write\n");
return 0;
default:
- printk(KERN_ERR
- "ibmvscsi: Unknown data direction 0x%02x; can't map!\n",
- cmd->sc_data_direction);
+ sdev_printk(KERN_ERR, cmd->device,
+ "Unknown data direction 0x%02x; can't map!\n",
+ cmd->sc_data_direction);
return 0;
}
- if (!cmd->request_buffer)
- return 1;
- if (cmd->use_sg)
- return map_sg_data(cmd, evt_struct, srp_cmd, dev);
- return map_single_data(cmd, srp_cmd, dev);
+ return map_sg_data(cmd, evt_struct, srp_cmd, dev);
}
+/**
+ * purge_requests: Our virtual adapter just shut down. purge any sent requests
+ * @hostdata: the adapter
+ */
+static void purge_requests(struct ibmvscsi_host_data *hostdata, int error_code)
+{
+ struct srp_event_struct *tmp_evt, *pos;
+ unsigned long flags;
+
+ spin_lock_irqsave(hostdata->host->host_lock, flags);
+ list_for_each_entry_safe(tmp_evt, pos, &hostdata->sent, list) {
+ list_del(&tmp_evt->list);
+ del_timer(&tmp_evt->timer);
+ if (tmp_evt->cmnd) {
+ tmp_evt->cmnd->result = (error_code << 16);
+ unmap_cmd_data(&tmp_evt->iu.srp.cmd,
+ tmp_evt,
+ tmp_evt->hostdata->dev);
+ if (tmp_evt->cmnd_done)
+ tmp_evt->cmnd_done(tmp_evt->cmnd);
+ } else if (tmp_evt->done)
+ tmp_evt->done(tmp_evt);
+ free_event_struct(&tmp_evt->hostdata->pool, tmp_evt);
+ }
+ spin_unlock_irqrestore(hostdata->host->host_lock, flags);
+}
+
+/**
+ * ibmvscsi_reset_host - Reset the connection to the server
+ * @hostdata: struct ibmvscsi_host_data to reset
+*/
+static void ibmvscsi_reset_host(struct ibmvscsi_host_data *hostdata)
+{
+ scsi_block_requests(hostdata->host);
+ atomic_set(&hostdata->request_limit, 0);
+
+ purge_requests(hostdata, DID_ERROR);
+ if ((ibmvscsi_reset_crq_queue(&hostdata->queue, hostdata)) ||
+ (ibmvscsi_send_crq(hostdata, 0xC001000000000000LL, 0)) ||
+ (vio_enable_interrupts(to_vio_dev(hostdata->dev)))) {
+ atomic_set(&hostdata->request_limit, -1);
+ dev_err(hostdata->dev, "error after reset\n");
+ }
+
+ scsi_unblock_requests(hostdata->host);
+}
+
+/**
+ * ibmvscsi_timeout - Internal command timeout handler
+ * @evt_struct: struct srp_event_struct that timed out
+ *
+ * Called when an internally generated command times out
+*/
+static void ibmvscsi_timeout(struct srp_event_struct *evt_struct)
+{
+ struct ibmvscsi_host_data *hostdata = evt_struct->hostdata;
+
+ dev_err(hostdata->dev, "Command timed out (%x). Resetting connection\n",
+ evt_struct->iu.srp.cmd.opcode);
+
+ ibmvscsi_reset_host(hostdata);
+}
+
+
/* ------------------------------------------------------------
* Routines for sending and receiving SRPs
*/
@@ -527,12 +546,14 @@ static int map_data_for_srp_cmd(struct scsi_cmnd *cmd,
* ibmvscsi_send_srp_event: - Transforms event to u64 array and calls send_crq()
* @evt_struct: evt_struct to be sent
* @hostdata: ibmvscsi_host_data of host
+ * @timeout: timeout in seconds - 0 means do not time command
*
* Returns the value returned from ibmvscsi_send_crq(). (Zero for success)
* Note that this routine assumes that host_lock is held for synchronization
*/
static int ibmvscsi_send_srp_event(struct srp_event_struct *evt_struct,
- struct ibmvscsi_host_data *hostdata)
+ struct ibmvscsi_host_data *hostdata,
+ unsigned long timeout)
{
u64 *crq_as_u64 = (u64 *) &evt_struct->crq;
int request_status;
@@ -588,12 +609,20 @@ static int ibmvscsi_send_srp_event(struct srp_event_struct *evt_struct,
*/
list_add_tail(&evt_struct->list, &hostdata->sent);
+ init_timer(&evt_struct->timer);
+ if (timeout) {
+ evt_struct->timer.data = (unsigned long) evt_struct;
+ evt_struct->timer.expires = jiffies + (timeout * HZ);
+ evt_struct->timer.function = (void (*)(unsigned long))ibmvscsi_timeout;
+ add_timer(&evt_struct->timer);
+ }
+
if ((rc =
ibmvscsi_send_crq(hostdata, crq_as_u64[0], crq_as_u64[1])) != 0) {
list_del(&evt_struct->list);
+ del_timer(&evt_struct->timer);
- printk(KERN_ERR "ibmvscsi: send error %d\n",
- rc);
+ dev_err(hostdata->dev, "send error %d\n", rc);
atomic_inc(&hostdata->request_limit);
goto send_error;
}
@@ -634,9 +663,8 @@ static void handle_cmd_rsp(struct srp_event_struct *evt_struct)
if (unlikely(rsp->opcode != SRP_RSP)) {
if (printk_ratelimit())
- printk(KERN_WARNING
- "ibmvscsi: bad SRP RSP type %d\n",
- rsp->opcode);
+ dev_warn(evt_struct->hostdata->dev,
+ "bad SRP RSP type %d\n", rsp->opcode);
}
if (cmnd) {
@@ -650,9 +678,9 @@ static void handle_cmd_rsp(struct srp_event_struct *evt_struct)
evt_struct->hostdata->dev);
if (rsp->flags & SRP_RSP_FLAG_DOOVER)
- cmnd->resid = rsp->data_out_res_cnt;
+ scsi_set_resid(cmnd, rsp->data_out_res_cnt);
else if (rsp->flags & SRP_RSP_FLAG_DIOVER)
- cmnd->resid = rsp->data_in_res_cnt;
+ scsi_set_resid(cmnd, rsp->data_in_res_cnt);
}
if (evt_struct->cmnd_done)
@@ -697,7 +725,7 @@ static int ibmvscsi_queuecommand(struct scsi_cmnd *cmnd,
srp_cmd->lun = ((u64) lun) << 48;
if (!map_data_for_srp_cmd(cmnd, evt_struct, srp_cmd, hostdata->dev)) {
- printk(KERN_ERR "ibmvscsi: couldn't convert cmd to srp_cmd\n");
+ sdev_printk(KERN_ERR, cmnd->device, "couldn't convert cmd to srp_cmd\n");
free_event_struct(&hostdata->pool, evt_struct);
return SCSI_MLQUEUE_HOST_BUSY;
}
@@ -722,7 +750,7 @@ static int ibmvscsi_queuecommand(struct scsi_cmnd *cmnd,
offsetof(struct srp_indirect_buf, desc_list);
}
- return ibmvscsi_send_srp_event(evt_struct, hostdata);
+ return ibmvscsi_send_srp_event(evt_struct, hostdata, 0);
}
/* ------------------------------------------------------------
@@ -744,16 +772,16 @@ static void adapter_info_rsp(struct srp_event_struct *evt_struct)
DMA_BIDIRECTIONAL);
if (evt_struct->xfer_iu->mad.adapter_info.common.status) {
- printk("ibmvscsi: error %d getting adapter info\n",
- evt_struct->xfer_iu->mad.adapter_info.common.status);
+ dev_err(hostdata->dev, "error %d getting adapter info\n",
+ evt_struct->xfer_iu->mad.adapter_info.common.status);
} else {
- printk("ibmvscsi: host srp version: %s, "
- "host partition %s (%d), OS %d, max io %u\n",
- hostdata->madapter_info.srp_version,
- hostdata->madapter_info.partition_name,
- hostdata->madapter_info.partition_number,
- hostdata->madapter_info.os_type,
- hostdata->madapter_info.port_max_txu[0]);
+ dev_info(hostdata->dev, "host srp version: %s, "
+ "host partition %s (%d), OS %d, max io %u\n",
+ hostdata->madapter_info.srp_version,
+ hostdata->madapter_info.partition_name,
+ hostdata->madapter_info.partition_number,
+ hostdata->madapter_info.os_type,
+ hostdata->madapter_info.port_max_txu[0]);
if (hostdata->madapter_info.port_max_txu[0])
hostdata->host->max_sectors =
@@ -761,11 +789,10 @@ static void adapter_info_rsp(struct srp_event_struct *evt_struct)
if (hostdata->madapter_info.os_type == 3 &&
strcmp(hostdata->madapter_info.srp_version, "1.6a") <= 0) {
- printk("ibmvscsi: host (Ver. %s) doesn't support large"
- "transfers\n",
- hostdata->madapter_info.srp_version);
- printk("ibmvscsi: limiting scatterlists to %d\n",
- MAX_INDIRECT_BUFS);
+ dev_err(hostdata->dev, "host (Ver. %s) doesn't support large transfers\n",
+ hostdata->madapter_info.srp_version);
+ dev_err(hostdata->dev, "limiting scatterlists to %d\n",
+ MAX_INDIRECT_BUFS);
hostdata->host->sg_tablesize = MAX_INDIRECT_BUFS;
}
}
@@ -784,19 +811,20 @@ static void send_mad_adapter_info(struct ibmvscsi_host_data *hostdata)
{
struct viosrp_adapter_info *req;
struct srp_event_struct *evt_struct;
+ unsigned long flags;
dma_addr_t addr;
evt_struct = get_event_struct(&hostdata->pool);
if (!evt_struct) {
- printk(KERN_ERR "ibmvscsi: couldn't allocate an event "
- "for ADAPTER_INFO_REQ!\n");
+ dev_err(hostdata->dev,
+ "couldn't allocate an event for ADAPTER_INFO_REQ!\n");
return;
}
init_event_struct(evt_struct,
adapter_info_rsp,
VIOSRP_MAD_FORMAT,
- init_timeout * HZ);
+ init_timeout);
req = &evt_struct->iu.mad.adapter_info;
memset(req, 0x00, sizeof(*req));
@@ -809,20 +837,20 @@ static void send_mad_adapter_info(struct ibmvscsi_host_data *hostdata)
DMA_BIDIRECTIONAL);
if (dma_mapping_error(req->buffer)) {
- printk(KERN_ERR
- "ibmvscsi: Unable to map request_buffer "
- "for adapter_info!\n");
+ dev_err(hostdata->dev, "Unable to map request_buffer for adapter_info!\n");
free_event_struct(&hostdata->pool, evt_struct);
return;
}
- if (ibmvscsi_send_srp_event(evt_struct, hostdata)) {
- printk(KERN_ERR "ibmvscsi: couldn't send ADAPTER_INFO_REQ!\n");
+ spin_lock_irqsave(hostdata->host->host_lock, flags);
+ if (ibmvscsi_send_srp_event(evt_struct, hostdata, init_timeout * 2)) {
+ dev_err(hostdata->dev, "couldn't send ADAPTER_INFO_REQ!\n");
dma_unmap_single(hostdata->dev,
addr,
sizeof(hostdata->madapter_info),
DMA_BIDIRECTIONAL);
}
+ spin_unlock_irqrestore(hostdata->host->host_lock, flags);
};
/**
@@ -839,24 +867,23 @@ static void login_rsp(struct srp_event_struct *evt_struct)
case SRP_LOGIN_RSP: /* it worked! */
break;
case SRP_LOGIN_REJ: /* refused! */
- printk(KERN_INFO "ibmvscsi: SRP_LOGIN_REJ reason %u\n",
- evt_struct->xfer_iu->srp.login_rej.reason);
+ dev_info(hostdata->dev, "SRP_LOGIN_REJ reason %u\n",
+ evt_struct->xfer_iu->srp.login_rej.reason);
/* Login failed. */
atomic_set(&hostdata->request_limit, -1);
return;
default:
- printk(KERN_ERR
- "ibmvscsi: Invalid login response typecode 0x%02x!\n",
- evt_struct->xfer_iu->srp.login_rsp.opcode);
+ dev_err(hostdata->dev, "Invalid login response typecode 0x%02x!\n",
+ evt_struct->xfer_iu->srp.login_rsp.opcode);
/* Login failed. */
atomic_set(&hostdata->request_limit, -1);
return;
}
- printk(KERN_INFO "ibmvscsi: SRP_LOGIN succeeded\n");
+ dev_info(hostdata->dev, "SRP_LOGIN succeeded\n");
if (evt_struct->xfer_iu->srp.login_rsp.req_lim_delta < 0)
- printk(KERN_ERR "ibmvscsi: Invalid request_limit.\n");
+ dev_err(hostdata->dev, "Invalid request_limit.\n");
/* Now we know what the real request-limit is.
* This value is set rather than added to request_limit because
@@ -885,15 +912,14 @@ static int send_srp_login(struct ibmvscsi_host_data *hostdata)
struct srp_login_req *login;
struct srp_event_struct *evt_struct = get_event_struct(&hostdata->pool);
if (!evt_struct) {
- printk(KERN_ERR
- "ibmvscsi: couldn't allocate an event for login req!\n");
+ dev_err(hostdata->dev, "couldn't allocate an event for login req!\n");
return FAILED;
}
init_event_struct(evt_struct,
login_rsp,
VIOSRP_SRP_FORMAT,
- init_timeout * HZ);
+ init_timeout);
login = &evt_struct->iu.srp.login_req;
memset(login, 0x00, sizeof(struct srp_login_req));
@@ -907,9 +933,9 @@ static int send_srp_login(struct ibmvscsi_host_data *hostdata)
*/
atomic_set(&hostdata->request_limit, 1);
- rc = ibmvscsi_send_srp_event(evt_struct, hostdata);
+ rc = ibmvscsi_send_srp_event(evt_struct, hostdata, init_timeout * 2);
spin_unlock_irqrestore(hostdata->host->host_lock, flags);
- printk("ibmvscsic: sent SRP login\n");
+ dev_info(hostdata->dev, "sent SRP login\n");
return rc;
};
@@ -958,20 +984,20 @@ static int ibmvscsi_eh_abort_handler(struct scsi_cmnd *cmd)
if (!found_evt) {
spin_unlock_irqrestore(hostdata->host->host_lock, flags);
- return FAILED;
+ return SUCCESS;
}
evt = get_event_struct(&hostdata->pool);
if (evt == NULL) {
spin_unlock_irqrestore(hostdata->host->host_lock, flags);
- printk(KERN_ERR "ibmvscsi: failed to allocate abort event\n");
+ sdev_printk(KERN_ERR, cmd->device, "failed to allocate abort event\n");
return FAILED;
}
init_event_struct(evt,
sync_completion,
VIOSRP_SRP_FORMAT,
- init_timeout * HZ);
+ init_timeout);
tsk_mgmt = &evt->iu.srp.tsk_mgmt;
@@ -982,15 +1008,16 @@ static int ibmvscsi_eh_abort_handler(struct scsi_cmnd *cmd)
tsk_mgmt->tsk_mgmt_func = SRP_TSK_ABORT_TASK;
tsk_mgmt->task_tag = (u64) found_evt;
- printk(KERN_INFO "ibmvscsi: aborting command. lun 0x%lx, tag 0x%lx\n",
- tsk_mgmt->lun, tsk_mgmt->task_tag);
+ sdev_printk(KERN_INFO, cmd->device, "aborting command. lun 0x%lx, tag 0x%lx\n",
+ tsk_mgmt->lun, tsk_mgmt->task_tag);
evt->sync_srp = &srp_rsp;
init_completion(&evt->comp);
- rsp_rc = ibmvscsi_send_srp_event(evt, hostdata);
+ rsp_rc = ibmvscsi_send_srp_event(evt, hostdata, init_timeout * 2);
spin_unlock_irqrestore(hostdata->host->host_lock, flags);
if (rsp_rc != 0) {
- printk(KERN_ERR "ibmvscsi: failed to send abort() event\n");
+ sdev_printk(KERN_ERR, cmd->device,
+ "failed to send abort() event. rc=%d\n", rsp_rc);
return FAILED;
}
@@ -999,9 +1026,8 @@ static int ibmvscsi_eh_abort_handler(struct scsi_cmnd *cmd)
/* make sure we got a good response */
if (unlikely(srp_rsp.srp.rsp.opcode != SRP_RSP)) {
if (printk_ratelimit())
- printk(KERN_WARNING
- "ibmvscsi: abort bad SRP RSP type %d\n",
- srp_rsp.srp.rsp.opcode);
+ sdev_printk(KERN_WARNING, cmd->device, "abort bad SRP RSP type %d\n",
+ srp_rsp.srp.rsp.opcode);
return FAILED;
}
@@ -1012,10 +1038,9 @@ static int ibmvscsi_eh_abort_handler(struct scsi_cmnd *cmd)
if (rsp_rc) {
if (printk_ratelimit())
- printk(KERN_WARNING
- "ibmvscsi: abort code %d for task tag 0x%lx\n",
- rsp_rc,
- tsk_mgmt->task_tag);
+ sdev_printk(KERN_WARNING, cmd->device,
+ "abort code %d for task tag 0x%lx\n",
+ rsp_rc, tsk_mgmt->task_tag);
return FAILED;
}
@@ -1034,15 +1059,13 @@ static int ibmvscsi_eh_abort_handler(struct scsi_cmnd *cmd)
if (found_evt == NULL) {
spin_unlock_irqrestore(hostdata->host->host_lock, flags);
- printk(KERN_INFO
- "ibmvscsi: aborted task tag 0x%lx completed\n",
- tsk_mgmt->task_tag);
+ sdev_printk(KERN_INFO, cmd->device, "aborted task tag 0x%lx completed\n",
+ tsk_mgmt->task_tag);
return SUCCESS;
}
- printk(KERN_INFO
- "ibmvscsi: successfully aborted task tag 0x%lx\n",
- tsk_mgmt->task_tag);
+ sdev_printk(KERN_INFO, cmd->device, "successfully aborted task tag 0x%lx\n",
+ tsk_mgmt->task_tag);
cmd->result = (DID_ABORT << 16);
list_del(&found_evt->list);
@@ -1076,14 +1099,14 @@ static int ibmvscsi_eh_device_reset_handler(struct scsi_cmnd *cmd)
evt = get_event_struct(&hostdata->pool);
if (evt == NULL) {
spin_unlock_irqrestore(hostdata->host->host_lock, flags);
- printk(KERN_ERR "ibmvscsi: failed to allocate reset event\n");
+ sdev_printk(KERN_ERR, cmd->device, "failed to allocate reset event\n");
return FAILED;
}
init_event_struct(evt,
sync_completion,
VIOSRP_SRP_FORMAT,
- init_timeout * HZ);
+ init_timeout);
tsk_mgmt = &evt->iu.srp.tsk_mgmt;
@@ -1093,15 +1116,16 @@ static int ibmvscsi_eh_device_reset_handler(struct scsi_cmnd *cmd)
tsk_mgmt->lun = ((u64) lun) << 48;
tsk_mgmt->tsk_mgmt_func = SRP_TSK_LUN_RESET;
- printk(KERN_INFO "ibmvscsi: resetting device. lun 0x%lx\n",
- tsk_mgmt->lun);
+ sdev_printk(KERN_INFO, cmd->device, "resetting device. lun 0x%lx\n",
+ tsk_mgmt->lun);
evt->sync_srp = &srp_rsp;
init_completion(&evt->comp);
- rsp_rc = ibmvscsi_send_srp_event(evt, hostdata);
+ rsp_rc = ibmvscsi_send_srp_event(evt, hostdata, init_timeout * 2);
spin_unlock_irqrestore(hostdata->host->host_lock, flags);
if (rsp_rc != 0) {
- printk(KERN_ERR "ibmvscsi: failed to send reset event\n");
+ sdev_printk(KERN_ERR, cmd->device,
+ "failed to send reset event. rc=%d\n", rsp_rc);
return FAILED;
}
@@ -1110,9 +1134,8 @@ static int ibmvscsi_eh_device_reset_handler(struct scsi_cmnd *cmd)
/* make sure we got a good response */
if (unlikely(srp_rsp.srp.rsp.opcode != SRP_RSP)) {
if (printk_ratelimit())
- printk(KERN_WARNING
- "ibmvscsi: reset bad SRP RSP type %d\n",
- srp_rsp.srp.rsp.opcode);
+ sdev_printk(KERN_WARNING, cmd->device, "reset bad SRP RSP type %d\n",
+ srp_rsp.srp.rsp.opcode);
return FAILED;
}
@@ -1123,9 +1146,9 @@ static int ibmvscsi_eh_device_reset_handler(struct scsi_cmnd *cmd)
if (rsp_rc) {
if (printk_ratelimit())
- printk(KERN_WARNING
- "ibmvscsi: reset code %d for task tag 0x%lx\n",
- rsp_rc, tsk_mgmt->task_tag);
+ sdev_printk(KERN_WARNING, cmd->device,
+ "reset code %d for task tag 0x%lx\n",
+ rsp_rc, tsk_mgmt->task_tag);
return FAILED;
}
@@ -1154,32 +1177,30 @@ static int ibmvscsi_eh_device_reset_handler(struct scsi_cmnd *cmd)
}
/**
- * purge_requests: Our virtual adapter just shut down. purge any sent requests
- * @hostdata: the adapter
- */
-static void purge_requests(struct ibmvscsi_host_data *hostdata, int error_code)
+ * ibmvscsi_eh_host_reset_handler - Reset the connection to the server
+ * @cmd: struct scsi_cmnd having problems
+*/
+static int ibmvscsi_eh_host_reset_handler(struct scsi_cmnd *cmd)
{
- struct srp_event_struct *tmp_evt, *pos;
- unsigned long flags;
+ unsigned long wait_switch = 0;
+ struct ibmvscsi_host_data *hostdata =
+ (struct ibmvscsi_host_data *)cmd->device->host->hostdata;
- spin_lock_irqsave(hostdata->host->host_lock, flags);
- list_for_each_entry_safe(tmp_evt, pos, &hostdata->sent, list) {
- list_del(&tmp_evt->list);
- if (tmp_evt->cmnd) {
- tmp_evt->cmnd->result = (error_code << 16);
- unmap_cmd_data(&tmp_evt->iu.srp.cmd,
- tmp_evt,
- tmp_evt->hostdata->dev);
- if (tmp_evt->cmnd_done)
- tmp_evt->cmnd_done(tmp_evt->cmnd);
- } else {
- if (tmp_evt->done) {
- tmp_evt->done(tmp_evt);
- }
- }
- free_event_struct(&tmp_evt->hostdata->pool, tmp_evt);
+ dev_err(hostdata->dev, "Resetting connection due to error recovery\n");
+
+ ibmvscsi_reset_host(hostdata);
+
+ for (wait_switch = jiffies + (init_timeout * HZ);
+ time_before(jiffies, wait_switch) &&
+ atomic_read(&hostdata->request_limit) < 2;) {
+
+ msleep(10);
}
- spin_unlock_irqrestore(hostdata->host->host_lock, flags);
+
+ if (atomic_read(&hostdata->request_limit) <= 0)
+ return FAILED;
+
+ return SUCCESS;
}
/**
@@ -1191,6 +1212,7 @@ static void purge_requests(struct ibmvscsi_host_data *hostdata, int error_code)
void ibmvscsi_handle_crq(struct viosrp_crq *crq,
struct ibmvscsi_host_data *hostdata)
{
+ long rc;
unsigned long flags;
struct srp_event_struct *evt_struct =
(struct srp_event_struct *)crq->IU_data_ptr;
@@ -1198,27 +1220,25 @@ void ibmvscsi_handle_crq(struct viosrp_crq *crq,
case 0xC0: /* initialization */
switch (crq->format) {
case 0x01: /* Initialization message */
- printk(KERN_INFO "ibmvscsi: partner initialized\n");
+ dev_info(hostdata->dev, "partner initialized\n");
/* Send back a response */
- if (ibmvscsi_send_crq(hostdata,
- 0xC002000000000000LL, 0) == 0) {
+ if ((rc = ibmvscsi_send_crq(hostdata,
+ 0xC002000000000000LL, 0)) == 0) {
/* Now login */
send_srp_login(hostdata);
} else {
- printk(KERN_ERR
- "ibmvscsi: Unable to send init rsp\n");
+ dev_err(hostdata->dev, "Unable to send init rsp. rc=%ld\n", rc);
}
break;
case 0x02: /* Initialization response */
- printk(KERN_INFO
- "ibmvscsi: partner initialization complete\n");
+ dev_info(hostdata->dev, "partner initialization complete\n");
/* Now login */
send_srp_login(hostdata);
break;
default:
- printk(KERN_ERR "ibmvscsi: unknown crq message type\n");
+ dev_err(hostdata->dev, "unknown crq message type: %d\n", crq->format);
}
return;
case 0xFF: /* Hypervisor telling us the connection is closed */
@@ -1226,8 +1246,7 @@ void ibmvscsi_handle_crq(struct viosrp_crq *crq,
atomic_set(&hostdata->request_limit, 0);
if (crq->format == 0x06) {
/* We need to re-setup the interpartition connection */
- printk(KERN_INFO
- "ibmvscsi: Re-enabling adapter!\n");
+ dev_info(hostdata->dev, "Re-enabling adapter!\n");
purge_requests(hostdata, DID_REQUEUE);
if ((ibmvscsi_reenable_crq_queue(&hostdata->queue,
hostdata)) ||
@@ -1235,14 +1254,11 @@ void ibmvscsi_handle_crq(struct viosrp_crq *crq,
0xC001000000000000LL, 0))) {
atomic_set(&hostdata->request_limit,
-1);
- printk(KERN_ERR
- "ibmvscsi: error after"
- " enable\n");
+ dev_err(hostdata->dev, "error after enable\n");
}
} else {
- printk(KERN_INFO
- "ibmvscsi: Virtual adapter failed rc %d!\n",
- crq->format);
+ dev_err(hostdata->dev, "Virtual adapter failed rc %d!\n",
+ crq->format);
purge_requests(hostdata, DID_ERROR);
if ((ibmvscsi_reset_crq_queue(&hostdata->queue,
@@ -1251,8 +1267,7 @@ void ibmvscsi_handle_crq(struct viosrp_crq *crq,
0xC001000000000000LL, 0))) {
atomic_set(&hostdata->request_limit,
-1);
- printk(KERN_ERR
- "ibmvscsi: error after reset\n");
+ dev_err(hostdata->dev, "error after reset\n");
}
}
scsi_unblock_requests(hostdata->host);
@@ -1260,9 +1275,8 @@ void ibmvscsi_handle_crq(struct viosrp_crq *crq,
case 0x80: /* real payload */
break;
default:
- printk(KERN_ERR
- "ibmvscsi: got an invalid message type 0x%02x\n",
- crq->valid);
+ dev_err(hostdata->dev, "got an invalid message type 0x%02x\n",
+ crq->valid);
return;
}
@@ -1271,16 +1285,14 @@ void ibmvscsi_handle_crq(struct viosrp_crq *crq,
* actually sent
*/
if (!valid_event_struct(&hostdata->pool, evt_struct)) {
- printk(KERN_ERR
- "ibmvscsi: returned correlation_token 0x%p is invalid!\n",
+ dev_err(hostdata->dev, "returned correlation_token 0x%p is invalid!\n",
(void *)crq->IU_data_ptr);
return;
}
if (atomic_read(&evt_struct->free)) {
- printk(KERN_ERR
- "ibmvscsi: received duplicate correlation_token 0x%p!\n",
- (void *)crq->IU_data_ptr);
+ dev_err(hostdata->dev, "received duplicate correlation_token 0x%p!\n",
+ (void *)crq->IU_data_ptr);
return;
}
@@ -1288,11 +1300,12 @@ void ibmvscsi_handle_crq(struct viosrp_crq *crq,
atomic_add(evt_struct->xfer_iu->srp.rsp.req_lim_delta,
&hostdata->request_limit);
+ del_timer(&evt_struct->timer);
+
if (evt_struct->done)
evt_struct->done(evt_struct);
else
- printk(KERN_ERR
- "ibmvscsi: returned done() is NULL; not running it!\n");
+ dev_err(hostdata->dev, "returned done() is NULL; not running it!\n");
/*
* Lock the host_lock before messing with these structures, since we
@@ -1313,20 +1326,20 @@ static int ibmvscsi_do_host_config(struct ibmvscsi_host_data *hostdata,
{
struct viosrp_host_config *host_config;
struct srp_event_struct *evt_struct;
+ unsigned long flags;
dma_addr_t addr;
int rc;
evt_struct = get_event_struct(&hostdata->pool);
if (!evt_struct) {
- printk(KERN_ERR
- "ibmvscsi: could't allocate event for HOST_CONFIG!\n");
+ dev_err(hostdata->dev, "couldn't allocate event for HOST_CONFIG!\n");
return -1;
}
init_event_struct(evt_struct,
sync_completion,
VIOSRP_MAD_FORMAT,
- init_timeout * HZ);
+ init_timeout);
host_config = &evt_struct->iu.mad.host_config;
@@ -1339,14 +1352,15 @@ static int ibmvscsi_do_host_config(struct ibmvscsi_host_data *hostdata,
DMA_BIDIRECTIONAL);
if (dma_mapping_error(host_config->buffer)) {
- printk(KERN_ERR
- "ibmvscsi: dma_mapping error " "getting host config\n");
+ dev_err(hostdata->dev, "dma_mapping error getting host config\n");
free_event_struct(&hostdata->pool, evt_struct);
return -1;
}
init_completion(&evt_struct->comp);
- rc = ibmvscsi_send_srp_event(evt_struct, hostdata);
+ spin_lock_irqsave(hostdata->host->host_lock, flags);
+ rc = ibmvscsi_send_srp_event(evt_struct, hostdata, init_timeout * 2);
+ spin_unlock_irqrestore(hostdata->host->host_lock, flags);
if (rc == 0)
wait_for_completion(&evt_struct->comp);
dma_unmap_single(hostdata->dev, addr, length, DMA_BIDIRECTIONAL);
@@ -1375,6 +1389,23 @@ static int ibmvscsi_slave_configure(struct scsi_device *sdev)
return 0;
}
+/**
+ * ibmvscsi_change_queue_depth - Change the device's queue depth
+ * @sdev: scsi device struct
+ * @qdepth: depth to set
+ *
+ * Return value:
+ * actual depth set
+ **/
+static int ibmvscsi_change_queue_depth(struct scsi_device *sdev, int qdepth)
+{
+ if (qdepth > IBMVSCSI_MAX_CMDS_PER_LUN)
+ qdepth = IBMVSCSI_MAX_CMDS_PER_LUN;
+
+ scsi_adjust_queue_depth(sdev, 0, qdepth);
+ return sdev->queue_depth;
+}
+
/* ------------------------------------------------------------
* sysfs attributes
*/
@@ -1520,7 +1551,9 @@ static struct scsi_host_template driver_template = {
.queuecommand = ibmvscsi_queuecommand,
.eh_abort_handler = ibmvscsi_eh_abort_handler,
.eh_device_reset_handler = ibmvscsi_eh_device_reset_handler,
+ .eh_host_reset_handler = ibmvscsi_eh_host_reset_handler,
.slave_configure = ibmvscsi_slave_configure,
+ .change_queue_depth = ibmvscsi_change_queue_depth,
.cmd_per_lun = 16,
.can_queue = IBMVSCSI_MAX_REQUESTS_DEFAULT,
.this_id = -1,
@@ -1545,7 +1578,7 @@ static int ibmvscsi_probe(struct vio_dev *vdev, const struct vio_device_id *id)
driver_template.can_queue = max_requests;
host = scsi_host_alloc(&driver_template, sizeof(*hostdata));
if (!host) {
- printk(KERN_ERR "ibmvscsi: couldn't allocate host data\n");
+ dev_err(&vdev->dev, "couldn't allocate host data\n");
goto scsi_host_alloc_failed;
}
@@ -1559,11 +1592,11 @@ static int ibmvscsi_probe(struct vio_dev *vdev, const struct vio_device_id *id)
rc = ibmvscsi_init_crq_queue(&hostdata->queue, hostdata, max_requests);
if (rc != 0 && rc != H_RESOURCE) {
- printk(KERN_ERR "ibmvscsi: couldn't initialize crq\n");
+ dev_err(&vdev->dev, "couldn't initialize crq. rc=%d\n", rc);
goto init_crq_failed;
}
if (initialize_event_pool(&hostdata->pool, max_requests, hostdata) != 0) {
- printk(KERN_ERR "ibmvscsi: couldn't initialize event pool\n");
+ dev_err(&vdev->dev, "couldn't initialize event pool\n");
goto init_pool_failed;
}
diff --git a/drivers/scsi/ibmvscsi/ibmvscsi.h b/drivers/scsi/ibmvscsi/ibmvscsi.h
index 77cc1d40f5b..b19c2e26c2a 100644
--- a/drivers/scsi/ibmvscsi/ibmvscsi.h
+++ b/drivers/scsi/ibmvscsi/ibmvscsi.h
@@ -45,6 +45,7 @@ struct Scsi_Host;
#define MAX_INDIRECT_BUFS 10
#define IBMVSCSI_MAX_REQUESTS_DEFAULT 100
+#define IBMVSCSI_MAX_CMDS_PER_LUN 64
/* ------------------------------------------------------------
* Data Structures
@@ -69,6 +70,7 @@ struct srp_event_struct {
union viosrp_iu iu;
void (*cmnd_done) (struct scsi_cmnd *);
struct completion comp;
+ struct timer_list timer;
union viosrp_iu *sync_srp;
struct srp_direct_buf *ext_list;
dma_addr_t ext_list_token;
diff --git a/drivers/scsi/ibmvscsi/rpa_vscsi.c b/drivers/scsi/ibmvscsi/rpa_vscsi.c
index d8700aaa611..9c14e789df5 100644
--- a/drivers/scsi/ibmvscsi/rpa_vscsi.c
+++ b/drivers/scsi/ibmvscsi/rpa_vscsi.c
@@ -177,7 +177,7 @@ static void set_adapter_info(struct ibmvscsi_host_data *hostdata)
memset(&hostdata->madapter_info, 0x00,
sizeof(hostdata->madapter_info));
- printk(KERN_INFO "rpa_vscsi: SPR_VERSION: %s\n", SRP_VERSION);
+ dev_info(hostdata->dev, "SRP_VERSION: %s\n", SRP_VERSION);
strcpy(hostdata->madapter_info.srp_version, SRP_VERSION);
strncpy(hostdata->madapter_info.partition_name, partition_name,
@@ -232,25 +232,24 @@ int ibmvscsi_init_crq_queue(struct crq_queue *queue,
if (rc == 2) {
/* Adapter is good, but other end is not ready */
- printk(KERN_WARNING "ibmvscsi: Partner adapter not ready\n");
+ dev_warn(hostdata->dev, "Partner adapter not ready\n");
retrc = 0;
} else if (rc != 0) {
- printk(KERN_WARNING "ibmvscsi: Error %d opening adapter\n", rc);
+ dev_warn(hostdata->dev, "Error %d opening adapter\n", rc);
goto reg_crq_failed;
}
if (request_irq(vdev->irq,
ibmvscsi_handle_event,
0, "ibmvscsi", (void *)hostdata) != 0) {
- printk(KERN_ERR "ibmvscsi: couldn't register irq 0x%x\n",
- vdev->irq);
+ dev_err(hostdata->dev, "couldn't register irq 0x%x\n",
+ vdev->irq);
goto req_irq_failed;
}
rc = vio_enable_interrupts(vdev);
if (rc != 0) {
- printk(KERN_ERR "ibmvscsi: Error %d enabling interrupts!!!\n",
- rc);
+ dev_err(hostdata->dev, "Error %d enabling interrupts!!!\n", rc);
goto req_irq_failed;
}
@@ -294,7 +293,7 @@ int ibmvscsi_reenable_crq_queue(struct crq_queue *queue,
} while ((rc == H_IN_PROGRESS) || (rc == H_BUSY) || (H_IS_LONG_BUSY(rc)));
if (rc)
- printk(KERN_ERR "ibmvscsi: Error %d enabling adapter\n", rc);
+ dev_err(hostdata->dev, "Error %d enabling adapter\n", rc);
return rc;
}
@@ -327,10 +326,9 @@ int ibmvscsi_reset_crq_queue(struct crq_queue *queue,
queue->msg_token, PAGE_SIZE);
if (rc == 2) {
/* Adapter is good, but other end is not ready */
- printk(KERN_WARNING "ibmvscsi: Partner adapter not ready\n");
+ dev_warn(hostdata->dev, "Partner adapter not ready\n");
} else if (rc != 0) {
- printk(KERN_WARNING
- "ibmvscsi: couldn't register crq--rc 0x%x\n", rc);
+ dev_warn(hostdata->dev, "couldn't register crq--rc 0x%x\n", rc);
}
return rc;
}
diff --git a/drivers/scsi/initio.c b/drivers/scsi/initio.c
index 7e7635ca78f..d9dfb69ae03 100644
--- a/drivers/scsi/initio.c
+++ b/drivers/scsi/initio.c
@@ -3,7 +3,8 @@
*
* Copyright (c) 1994-1998 Initio Corporation
* Copyright (c) 1998 Bas Vermeulen <bvermeul@blackstar.xs4all.nl>
- * All rights reserved.
+ * Copyright (c) 2004 Christoph Hellwig <hch@lst.de>
+ * Copyright (c) 2007 Red Hat <alan@redhat.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -19,38 +20,6 @@
* along with this program; see the file COPYING. If not, write to
* the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
*
- * --------------------------------------------------------------------------
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions, and the following disclaimer,
- * without modification, immediately at the beginning of the file.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. The name of the author may not be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- * Where this Software is combined with software released under the terms of
- * the GNU General Public License ("GPL") and the terms of the GPL would require the
- * combined work to also be released under the terms of the GPL, the terms
- * and conditions of this License will apply in addition to those of the
- * GPL with the exception of any terms or conditions of this License that
- * conflict with, or are expressly prohibited by, the GPL.
- *
- * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
- * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
- * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
- * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
- * SUCH DAMAGE.
*
*************************************************************************
*
@@ -70,14 +39,14 @@
* - Fix memory allocation problem
* 03/04/98 hc - v1.01l
* - Fix tape rewind which will hang the system problem
- * - Set can_queue to tul_num_scb
+ * - Set can_queue to initio_num_scb
* 06/25/98 hc - v1.01m
* - Get it work for kernel version >= 2.1.75
- * - Dynamic assign SCSI bus reset holding time in init_tulip()
+ * - Dynamic assign SCSI bus reset holding time in initio_init()
* 07/02/98 hc - v1.01n
* - Support 0002134A
* 08/07/98 hc - v1.01o
- * - Change the tul_abort_srb routine to use scsi_done. <01>
+ * - Change the initio_abort_srb routine to use scsi_done. <01>
* 09/07/98 hl - v1.02
* - Change the INI9100U define and proc_dir_entry to
* reflect the newer Kernel 2.1.118, but the v1.o1o
@@ -150,23 +119,13 @@
static unsigned int i91u_debug = DEBUG_DEFAULT;
#endif
-#define TUL_RDWORD(x,y) (short)(inl((int)((ULONG)((ULONG)x+(UCHAR)y)) ))
-
-typedef struct PCI_ID_Struc {
- unsigned short vendor_id;
- unsigned short device_id;
-} PCI_ID;
-
-static int tul_num_ch = 4; /* Maximum 4 adapters */
-static int tul_num_scb;
-static int tul_tag_enable = 1;
-static SCB *tul_scb;
+static int initio_tag_enable = 1;
#ifdef DEBUG_i91u
static int setup_debug = 0;
#endif
-static void i91uSCBPost(BYTE * pHcb, BYTE * pScb);
+static void i91uSCBPost(u8 * pHcb, u8 * pScb);
/* PCI Devices supported by this driver */
static struct pci_device_id i91u_pci_devices[] = {
@@ -184,74 +143,66 @@ MODULE_DEVICE_TABLE(pci, i91u_pci_devices);
#define DEBUG_STATE 0
#define INT_DISC 0
-/*--- external functions --*/
-static void tul_se2_wait(void);
-
-/*--- forward refrence ---*/
-static SCB *tul_find_busy_scb(HCS * pCurHcb, WORD tarlun);
-static SCB *tul_find_done_scb(HCS * pCurHcb);
-
-static int tulip_main(HCS * pCurHcb);
-
-static int tul_next_state(HCS * pCurHcb);
-static int tul_state_1(HCS * pCurHcb);
-static int tul_state_2(HCS * pCurHcb);
-static int tul_state_3(HCS * pCurHcb);
-static int tul_state_4(HCS * pCurHcb);
-static int tul_state_5(HCS * pCurHcb);
-static int tul_state_6(HCS * pCurHcb);
-static int tul_state_7(HCS * pCurHcb);
-static int tul_xfer_data_in(HCS * pCurHcb);
-static int tul_xfer_data_out(HCS * pCurHcb);
-static int tul_xpad_in(HCS * pCurHcb);
-static int tul_xpad_out(HCS * pCurHcb);
-static int tul_status_msg(HCS * pCurHcb);
-
-static int tul_msgin(HCS * pCurHcb);
-static int tul_msgin_sync(HCS * pCurHcb);
-static int tul_msgin_accept(HCS * pCurHcb);
-static int tul_msgout_reject(HCS * pCurHcb);
-static int tul_msgin_extend(HCS * pCurHcb);
-
-static int tul_msgout_ide(HCS * pCurHcb);
-static int tul_msgout_abort_targ(HCS * pCurHcb);
-static int tul_msgout_abort_tag(HCS * pCurHcb);
-
-static int tul_bus_device_reset(HCS * pCurHcb);
-static void tul_select_atn(HCS * pCurHcb, SCB * pCurScb);
-static void tul_select_atn3(HCS * pCurHcb, SCB * pCurScb);
-static void tul_select_atn_stop(HCS * pCurHcb, SCB * pCurScb);
-static int int_tul_busfree(HCS * pCurHcb);
-static int int_tul_scsi_rst(HCS * pCurHcb);
-static int int_tul_bad_seq(HCS * pCurHcb);
-static int int_tul_resel(HCS * pCurHcb);
-static int tul_sync_done(HCS * pCurHcb);
-static int wdtr_done(HCS * pCurHcb);
-static int wait_tulip(HCS * pCurHcb);
-static int tul_wait_done_disc(HCS * pCurHcb);
-static int tul_wait_disc(HCS * pCurHcb);
-static void tulip_scsi(HCS * pCurHcb);
-static int tul_post_scsi_rst(HCS * pCurHcb);
-
-static void tul_se2_ew_en(WORD CurBase);
-static void tul_se2_ew_ds(WORD CurBase);
-static int tul_se2_rd_all(WORD CurBase);
-static void tul_se2_update_all(WORD CurBase); /* setup default pattern */
-static void tul_read_eeprom(WORD CurBase);
-
- /* ---- INTERNAL VARIABLES ---- */
-static HCS tul_hcs[MAX_SUPPORTED_ADAPTERS];
-static INI_ADPT_STRUCT i91u_adpt[MAX_SUPPORTED_ADAPTERS];
-
-/*NVRAM nvram, *nvramp = &nvram; */
+/*--- forward references ---*/
+static struct scsi_ctrl_blk *initio_find_busy_scb(struct initio_host * host, u16 tarlun);
+static struct scsi_ctrl_blk *initio_find_done_scb(struct initio_host * host);
+
+static int tulip_main(struct initio_host * host);
+
+static int initio_next_state(struct initio_host * host);
+static int initio_state_1(struct initio_host * host);
+static int initio_state_2(struct initio_host * host);
+static int initio_state_3(struct initio_host * host);
+static int initio_state_4(struct initio_host * host);
+static int initio_state_5(struct initio_host * host);
+static int initio_state_6(struct initio_host * host);
+static int initio_state_7(struct initio_host * host);
+static int initio_xfer_data_in(struct initio_host * host);
+static int initio_xfer_data_out(struct initio_host * host);
+static int initio_xpad_in(struct initio_host * host);
+static int initio_xpad_out(struct initio_host * host);
+static int initio_status_msg(struct initio_host * host);
+
+static int initio_msgin(struct initio_host * host);
+static int initio_msgin_sync(struct initio_host * host);
+static int initio_msgin_accept(struct initio_host * host);
+static int initio_msgout_reject(struct initio_host * host);
+static int initio_msgin_extend(struct initio_host * host);
+
+static int initio_msgout_ide(struct initio_host * host);
+static int initio_msgout_abort_targ(struct initio_host * host);
+static int initio_msgout_abort_tag(struct initio_host * host);
+
+static int initio_bus_device_reset(struct initio_host * host);
+static void initio_select_atn(struct initio_host * host, struct scsi_ctrl_blk * scb);
+static void initio_select_atn3(struct initio_host * host, struct scsi_ctrl_blk * scb);
+static void initio_select_atn_stop(struct initio_host * host, struct scsi_ctrl_blk * scb);
+static int int_initio_busfree(struct initio_host * host);
+static int int_initio_scsi_rst(struct initio_host * host);
+static int int_initio_bad_seq(struct initio_host * host);
+static int int_initio_resel(struct initio_host * host);
+static int initio_sync_done(struct initio_host * host);
+static int wdtr_done(struct initio_host * host);
+static int wait_tulip(struct initio_host * host);
+static int initio_wait_done_disc(struct initio_host * host);
+static int initio_wait_disc(struct initio_host * host);
+static void tulip_scsi(struct initio_host * host);
+static int initio_post_scsi_rst(struct initio_host * host);
+
+static void initio_se2_ew_en(unsigned long base);
+static void initio_se2_ew_ds(unsigned long base);
+static int initio_se2_rd_all(unsigned long base);
+static void initio_se2_update_all(unsigned long base); /* setup default pattern */
+static void initio_read_eeprom(unsigned long base);
+
+/* ---- INTERNAL VARIABLES ---- */
+
static NVRAM i91unvram;
static NVRAM *i91unvramp;
-
-
-static UCHAR i91udftNvRam[64] =
+static u8 i91udftNvRam[64] =
{
-/*----------- header -----------*/
+ /*----------- header -----------*/
0x25, 0xc9, /* Signature */
0x40, /* Size */
0x01, /* Revision */
@@ -289,7 +240,7 @@ static UCHAR i91udftNvRam[64] =
0, 0}; /* - CheckSum - */
-static UCHAR tul_rate_tbl[8] = /* fast 20 */
+static u8 initio_rate_tbl[8] = /* fast 20 */
{
/* nanosecond devide by 4 */
12, /* 50ns, 20M */
@@ -302,53 +253,17 @@ static UCHAR tul_rate_tbl[8] = /* fast 20 */
62 /* 250ns, 4M */
};
-static void tul_do_pause(unsigned amount)
-{ /* Pause for amount jiffies */
+static void initio_do_pause(unsigned amount)
+{
+ /* Pause for amount jiffies */
unsigned long the_time = jiffies + amount;
- while (time_before_eq(jiffies, the_time));
+ while (time_before_eq(jiffies, the_time))
+ cpu_relax();
}
/*-- forward reference --*/
-/*******************************************************************
- Use memeory refresh time ~ 15us * 2
-********************************************************************/
-void tul_se2_wait(void)
-{
-#if 1
- udelay(30);
-#else
- UCHAR readByte;
-
- readByte = TUL_RD(0, 0x61);
- if ((readByte & 0x10) == 0x10) {
- for (;;) {
- readByte = TUL_RD(0, 0x61);
- if ((readByte & 0x10) == 0x10)
- break;
- }
- for (;;) {
- readByte = TUL_RD(0, 0x61);
- if ((readByte & 0x10) != 0x10)
- break;
- }
- } else {
- for (;;) {
- readByte = TUL_RD(0, 0x61);
- if ((readByte & 0x10) == 0x10)
- break;
- }
- for (;;) {
- readByte = TUL_RD(0, 0x61);
- if ((readByte & 0x10) != 0x10)
- break;
- }
- }
-#endif
-}
-
-
/******************************************************************
Input: instruction for Serial E2PROM
@@ -379,1174 +294,1019 @@ void tul_se2_wait(void)
******************************************************************/
-static void tul_se2_instr(WORD CurBase, UCHAR instr)
+
+/**
+ * initio_se2_instr - bitbang an instruction
+ * @base: Base of InitIO controller
+ * @instr: Instruction for serial E2PROM
+ *
+ * Bitbang an instruction out to the serial E2Prom
+ */
+
+static void initio_se2_instr(unsigned long base, u8 instr)
{
int i;
- UCHAR b;
+ u8 b;
- TUL_WR(CurBase + TUL_NVRAM, SE2CS | SE2DO); /* cs+start bit */
- tul_se2_wait();
- TUL_WR(CurBase + TUL_NVRAM, SE2CS | SE2CLK | SE2DO); /* +CLK */
- tul_se2_wait();
+ outb(SE2CS | SE2DO, base + TUL_NVRAM); /* cs+start bit */
+ udelay(30);
+ outb(SE2CS | SE2CLK | SE2DO, base + TUL_NVRAM); /* +CLK */
+ udelay(30);
for (i = 0; i < 8; i++) {
if (instr & 0x80)
- b = SE2CS | SE2DO; /* -CLK+dataBit */
+ b = SE2CS | SE2DO; /* -CLK+dataBit */
else
- b = SE2CS; /* -CLK */
- TUL_WR(CurBase + TUL_NVRAM, b);
- tul_se2_wait();
- TUL_WR(CurBase + TUL_NVRAM, b | SE2CLK); /* +CLK */
- tul_se2_wait();
+ b = SE2CS; /* -CLK */
+ outb(b, base + TUL_NVRAM);
+ udelay(30);
+ outb(b | SE2CLK, base + TUL_NVRAM); /* +CLK */
+ udelay(30);
instr <<= 1;
}
- TUL_WR(CurBase + TUL_NVRAM, SE2CS); /* -CLK */
- tul_se2_wait();
- return;
+ outb(SE2CS, base + TUL_NVRAM); /* -CLK */
+ udelay(30);
}
-/******************************************************************
- Function name : tul_se2_ew_en
- Description : Enable erase/write state of serial EEPROM
-******************************************************************/
-void tul_se2_ew_en(WORD CurBase)
+/**
+ * initio_se2_ew_en - Enable erase/write
+ * @base: Base address of InitIO controller
+ *
+ * Enable erase/write state of serial EEPROM
+ */
+void initio_se2_ew_en(unsigned long base)
{
- tul_se2_instr(CurBase, 0x30); /* EWEN */
- TUL_WR(CurBase + TUL_NVRAM, 0); /* -CS */
- tul_se2_wait();
- return;
+ initio_se2_instr(base, 0x30); /* EWEN */
+ outb(0, base + TUL_NVRAM); /* -CS */
+ udelay(30);
}
-/************************************************************************
- Disable erase/write state of serial EEPROM
-*************************************************************************/
-void tul_se2_ew_ds(WORD CurBase)
+/**
+ * initio_se2_ew_ds - Disable erase/write
+ * @base: Base address of InitIO controller
+ *
+ * Disable erase/write state of serial EEPROM
+ */
+void initio_se2_ew_ds(unsigned long base)
{
- tul_se2_instr(CurBase, 0); /* EWDS */
- TUL_WR(CurBase + TUL_NVRAM, 0); /* -CS */
- tul_se2_wait();
- return;
+ initio_se2_instr(base, 0); /* EWDS */
+ outb(0, base + TUL_NVRAM); /* -CS */
+ udelay(30);
}
-/******************************************************************
- Input :address of Serial E2PROM
- Output :value stored in Serial E2PROM
-*******************************************************************/
-static USHORT tul_se2_rd(WORD CurBase, ULONG adr)
+/**
+ * initio_se2_rd - read E2PROM word
+ * @base: Base of InitIO controller
+ * @addr: Address of word in E2PROM
+ *
+ * Read a word from the NV E2PROM device
+ */
+static u16 initio_se2_rd(unsigned long base, u8 addr)
{
- UCHAR instr, readByte;
- USHORT readWord;
+ u8 instr, rb;
+ u16 val = 0;
int i;
- instr = (UCHAR) (adr | 0x80);
- tul_se2_instr(CurBase, instr); /* READ INSTR */
- readWord = 0;
+ instr = (u8) (addr | 0x80);
+ initio_se2_instr(base, instr); /* READ INSTR */
for (i = 15; i >= 0; i--) {
- TUL_WR(CurBase + TUL_NVRAM, SE2CS | SE2CLK); /* +CLK */
- tul_se2_wait();
- TUL_WR(CurBase + TUL_NVRAM, SE2CS); /* -CLK */
+ outb(SE2CS | SE2CLK, base + TUL_NVRAM); /* +CLK */
+ udelay(30);
+ outb(SE2CS, base + TUL_NVRAM); /* -CLK */
/* sample data after the following edge of clock */
- readByte = TUL_RD(CurBase, TUL_NVRAM);
- readByte &= SE2DI;
- readWord += (readByte << i);
- tul_se2_wait(); /* 6/20/95 */
+ rb = inb(base + TUL_NVRAM);
+ rb &= SE2DI;
+ val += (rb << i);
+ udelay(30); /* 6/20/95 */
}
- TUL_WR(CurBase + TUL_NVRAM, 0); /* no chip select */
- tul_se2_wait();
- return readWord;
+ outb(0, base + TUL_NVRAM); /* no chip select */
+ udelay(30);
+ return val;
}
-
-/******************************************************************
- Input: new value in Serial E2PROM, address of Serial E2PROM
-*******************************************************************/
-static void tul_se2_wr(WORD CurBase, UCHAR adr, USHORT writeWord)
+/**
+ * initio_se2_wr - read E2PROM word
+ * @base: Base of InitIO controller
+ * @addr: Address of word in E2PROM
+ * @val: Value to write
+ *
+ * Write a word to the NV E2PROM device. Used when recovering from
+ * a problem with the NV.
+ */
+static void initio_se2_wr(unsigned long base, u8 addr, u16 val)
{
- UCHAR readByte;
- UCHAR instr;
+ u8 rb;
+ u8 instr;
int i;
- instr = (UCHAR) (adr | 0x40);
- tul_se2_instr(CurBase, instr); /* WRITE INSTR */
+ instr = (u8) (addr | 0x40);
+ initio_se2_instr(base, instr); /* WRITE INSTR */
for (i = 15; i >= 0; i--) {
- if (writeWord & 0x8000)
- TUL_WR(CurBase + TUL_NVRAM, SE2CS | SE2DO); /* -CLK+dataBit 1 */
+ if (val & 0x8000)
+ outb(SE2CS | SE2DO, base + TUL_NVRAM); /* -CLK+dataBit 1 */
else
- TUL_WR(CurBase + TUL_NVRAM, SE2CS); /* -CLK+dataBit 0 */
- tul_se2_wait();
- TUL_WR(CurBase + TUL_NVRAM, SE2CS | SE2CLK); /* +CLK */
- tul_se2_wait();
- writeWord <<= 1;
+ outb(SE2CS, base + TUL_NVRAM); /* -CLK+dataBit 0 */
+ udelay(30);
+ outb(SE2CS | SE2CLK, base + TUL_NVRAM); /* +CLK */
+ udelay(30);
+ val <<= 1;
}
- TUL_WR(CurBase + TUL_NVRAM, SE2CS); /* -CLK */
- tul_se2_wait();
- TUL_WR(CurBase + TUL_NVRAM, 0); /* -CS */
- tul_se2_wait();
+ outb(SE2CS, base + TUL_NVRAM); /* -CLK */
+ udelay(30);
+ outb(0, base + TUL_NVRAM); /* -CS */
+ udelay(30);
- TUL_WR(CurBase + TUL_NVRAM, SE2CS); /* +CS */
- tul_se2_wait();
+ outb(SE2CS, base + TUL_NVRAM); /* +CS */
+ udelay(30);
for (;;) {
- TUL_WR(CurBase + TUL_NVRAM, SE2CS | SE2CLK); /* +CLK */
- tul_se2_wait();
- TUL_WR(CurBase + TUL_NVRAM, SE2CS); /* -CLK */
- tul_se2_wait();
- if ((readByte = TUL_RD(CurBase, TUL_NVRAM)) & SE2DI)
+ outb(SE2CS | SE2CLK, base + TUL_NVRAM); /* +CLK */
+ udelay(30);
+ outb(SE2CS, base + TUL_NVRAM); /* -CLK */
+ udelay(30);
+ if ((rb = inb(base + TUL_NVRAM)) & SE2DI)
break; /* write complete */
}
- TUL_WR(CurBase + TUL_NVRAM, 0); /* -CS */
- return;
+ outb(0, base + TUL_NVRAM); /* -CS */
}
+/**
+ * initio_se2_rd_all - read hostadapter NV configuration
+ * @base: Base address of InitIO controller
+ *
+ * Reads the E2PROM data into main memory. Ensures that the checksum
+ * and header marker are valid. Returns 1 on success -1 on error.
+ */
-/***********************************************************************
- Read SCSI H/A configuration parameters from serial EEPROM
-************************************************************************/
-int tul_se2_rd_all(WORD CurBase)
+static int initio_se2_rd_all(unsigned long base)
{
int i;
- ULONG chksum = 0;
- USHORT *np;
+ u16 chksum = 0;
+ u16 *np;
i91unvramp = &i91unvram;
- np = (USHORT *) i91unvramp;
- for (i = 0; i < 32; i++) {
- *np++ = tul_se2_rd(CurBase, i);
- }
+ np = (u16 *) i91unvramp;
+ for (i = 0; i < 32; i++)
+ *np++ = initio_se2_rd(base, i);
-/*--------------------Is signature "ini" ok ? ----------------*/
+ /* Is signature "ini" ok ? */
if (i91unvramp->NVM_Signature != INI_SIGNATURE)
return -1;
-/*---------------------- Is ckecksum ok ? ----------------------*/
- np = (USHORT *) i91unvramp;
+ /* Is ckecksum ok ? */
+ np = (u16 *) i91unvramp;
for (i = 0; i < 31; i++)
chksum += *np++;
- if (i91unvramp->NVM_CheckSum != (USHORT) chksum)
+ if (i91unvramp->NVM_CheckSum != chksum)
return -1;
return 1;
}
-
-/***********************************************************************
- Update SCSI H/A configuration parameters from serial EEPROM
-************************************************************************/
-void tul_se2_update_all(WORD CurBase)
+/**
+ * initio_se2_update_all - Update E2PROM
+ * @base: Base of InitIO controller
+ *
+ * Update the E2PROM by wrting any changes into the E2PROM
+ * chip, rewriting the checksum.
+ */
+static void initio_se2_update_all(unsigned long base)
{ /* setup default pattern */
int i;
- ULONG chksum = 0;
- USHORT *np, *np1;
+ u16 chksum = 0;
+ u16 *np, *np1;
i91unvramp = &i91unvram;
/* Calculate checksum first */
- np = (USHORT *) i91udftNvRam;
+ np = (u16 *) i91udftNvRam;
for (i = 0; i < 31; i++)
chksum += *np++;
- *np = (USHORT) chksum;
- tul_se2_ew_en(CurBase); /* Enable write */
+ *np = chksum;
+ initio_se2_ew_en(base); /* Enable write */
- np = (USHORT *) i91udftNvRam;
- np1 = (USHORT *) i91unvramp;
+ np = (u16 *) i91udftNvRam;
+ np1 = (u16 *) i91unvramp;
for (i = 0; i < 32; i++, np++, np1++) {
- if (*np != *np1) {
- tul_se2_wr(CurBase, i, *np);
- }
+ if (*np != *np1)
+ initio_se2_wr(base, i, *np);
}
-
- tul_se2_ew_ds(CurBase); /* Disable write */
- return;
+ initio_se2_ew_ds(base); /* Disable write */
}
-/*************************************************************************
- Function name : read_eeprom
-**************************************************************************/
-void tul_read_eeprom(WORD CurBase)
-{
- UCHAR gctrl;
-
- i91unvramp = &i91unvram;
-/*------Enable EEProm programming ---*/
- gctrl = TUL_RD(CurBase, TUL_GCTRL);
- TUL_WR(CurBase + TUL_GCTRL, gctrl | TUL_GCTRL_EEPROM_BIT);
- if (tul_se2_rd_all(CurBase) != 1) {
- tul_se2_update_all(CurBase); /* setup default pattern */
- tul_se2_rd_all(CurBase); /* load again */
- }
-/*------ Disable EEProm programming ---*/
- gctrl = TUL_RD(CurBase, TUL_GCTRL);
- TUL_WR(CurBase + TUL_GCTRL, gctrl & ~TUL_GCTRL_EEPROM_BIT);
-} /* read_eeprom */
+/**
+ * initio_read_eeprom - Retrieve configuration
+ * @base: Base of InitIO Host Adapter
+ *
+ * Retrieve the host adapter configuration data from E2Prom. If the
+ * data is invalid then the defaults are used and are also restored
+ * into the E2PROM. This forms the access point for the SCSI driver
+ * into the E2PROM layer, the other functions for the E2PROM are all
+ * internal use.
+ *
+ * Must be called single threaded, uses a shared global area.
+ */
-static int Addi91u_into_Adapter_table(WORD wBIOS, WORD wBASE, BYTE bInterrupt,
- BYTE bBus, BYTE bDevice)
+static void initio_read_eeprom(unsigned long base)
{
- int i, j;
+ u8 gctrl;
- for (i = 0; i < MAX_SUPPORTED_ADAPTERS; i++) {
- if (i91u_adpt[i].ADPT_BIOS < wBIOS)
- continue;
- if (i91u_adpt[i].ADPT_BIOS == wBIOS) {
- if (i91u_adpt[i].ADPT_BASE == wBASE) {
- if (i91u_adpt[i].ADPT_Bus != 0xFF)
- return 1;
- } else if (i91u_adpt[i].ADPT_BASE < wBASE)
- continue;
- }
- for (j = MAX_SUPPORTED_ADAPTERS - 1; j > i; j--) {
- i91u_adpt[j].ADPT_BASE = i91u_adpt[j - 1].ADPT_BASE;
- i91u_adpt[j].ADPT_INTR = i91u_adpt[j - 1].ADPT_INTR;
- i91u_adpt[j].ADPT_BIOS = i91u_adpt[j - 1].ADPT_BIOS;
- i91u_adpt[j].ADPT_Bus = i91u_adpt[j - 1].ADPT_Bus;
- i91u_adpt[j].ADPT_Device = i91u_adpt[j - 1].ADPT_Device;
- }
- i91u_adpt[i].ADPT_BASE = wBASE;
- i91u_adpt[i].ADPT_INTR = bInterrupt;
- i91u_adpt[i].ADPT_BIOS = wBIOS;
- i91u_adpt[i].ADPT_Bus = bBus;
- i91u_adpt[i].ADPT_Device = bDevice;
- return 0;
+ i91unvramp = &i91unvram;
+ /* Enable EEProm programming */
+ gctrl = inb(base + TUL_GCTRL);
+ outb(gctrl | TUL_GCTRL_EEPROM_BIT, base + TUL_GCTRL);
+ if (initio_se2_rd_all(base) != 1) {
+ initio_se2_update_all(base); /* setup default pattern */
+ initio_se2_rd_all(base); /* load again */
}
- return 1;
+ /* Disable EEProm programming */
+ gctrl = inb(base + TUL_GCTRL);
+ outb(gctrl & ~TUL_GCTRL_EEPROM_BIT, base + TUL_GCTRL);
}
-static void init_i91uAdapter_table(void)
-{
- int i;
-
- for (i = 0; i < MAX_SUPPORTED_ADAPTERS; i++) { /* Initialize adapter structure */
- i91u_adpt[i].ADPT_BIOS = 0xffff;
- i91u_adpt[i].ADPT_BASE = 0xffff;
- i91u_adpt[i].ADPT_INTR = 0xff;
- i91u_adpt[i].ADPT_Bus = 0xff;
- i91u_adpt[i].ADPT_Device = 0xff;
- }
- return;
-}
+/**
+ * initio_stop_bm - stop bus master
+ * @host: InitIO we are stopping
+ *
+ * Stop any pending DMA operation, aborting the DMA if neccessary
+ */
-static void tul_stop_bm(HCS * pCurHcb)
+static void initio_stop_bm(struct initio_host * host)
{
- if (TUL_RD(pCurHcb->HCS_Base, TUL_XStatus) & XPEND) { /* if DMA xfer is pending, abort DMA xfer */
- TUL_WR(pCurHcb->HCS_Base + TUL_XCmd, TAX_X_ABT | TAX_X_CLR_FIFO);
+ if (inb(host->addr + TUL_XStatus) & XPEND) { /* if DMA xfer is pending, abort DMA xfer */
+ outb(TAX_X_ABT | TAX_X_CLR_FIFO, host->addr + TUL_XCmd);
/* wait Abort DMA xfer done */
- while ((TUL_RD(pCurHcb->HCS_Base, TUL_Int) & XABT) == 0);
+ while ((inb(host->addr + TUL_Int) & XABT) == 0)
+ cpu_relax();
}
- TUL_WR(pCurHcb->HCS_Base + TUL_SCtrl0, TSC_FLUSH_FIFO);
+ outb(TSC_FLUSH_FIFO, host->addr + TUL_SCtrl0);
}
-/***************************************************************************/
-static void get_tulipPCIConfig(HCS * pCurHcb, int ch_idx)
-{
- pCurHcb->HCS_Base = i91u_adpt[ch_idx].ADPT_BASE; /* Supply base address */
- pCurHcb->HCS_BIOS = i91u_adpt[ch_idx].ADPT_BIOS; /* Supply BIOS address */
- pCurHcb->HCS_Intr = i91u_adpt[ch_idx].ADPT_INTR; /* Supply interrupt line */
- return;
-}
+/**
+ * initio_reset_scsi - Reset SCSI host controller
+ * @host: InitIO host to reset
+ * @seconds: Recovery time
+ *
+ * Perform a full reset of the SCSI subsystem.
+ */
-/***************************************************************************/
-static int tul_reset_scsi(HCS * pCurHcb, int seconds)
+static int initio_reset_scsi(struct initio_host * host, int seconds)
{
- TUL_WR(pCurHcb->HCS_Base + TUL_SCtrl0, TSC_RST_BUS);
+ outb(TSC_RST_BUS, host->addr + TUL_SCtrl0);
- while (!((pCurHcb->HCS_JSInt = TUL_RD(pCurHcb->HCS_Base, TUL_SInt)) & TSS_SCSIRST_INT));
- /* reset tulip chip */
+ while (!((host->jsint = inb(host->addr + TUL_SInt)) & TSS_SCSIRST_INT))
+ cpu_relax();
- TUL_WR(pCurHcb->HCS_Base + TUL_SSignal, 0);
+ /* reset tulip chip */
+ outb(0, host->addr + TUL_SSignal);
/* Stall for a while, wait for target's firmware ready,make it 2 sec ! */
/* SONY 5200 tape drive won't work if only stall for 1 sec */
- tul_do_pause(seconds * HZ);
-
- TUL_RD(pCurHcb->HCS_Base, TUL_SInt);
+ /* FIXME: this is a very long busy wait right now */
+ initio_do_pause(seconds * HZ);
- return (SCSI_RESET_SUCCESS);
+ inb(host->addr + TUL_SInt);
+ return SCSI_RESET_SUCCESS;
}
-/***************************************************************************/
-static int init_tulip(HCS * pCurHcb, SCB * scbp, int tul_num_scb,
- BYTE * pbBiosAdr, int seconds)
+/**
+ * initio_init - set up an InitIO host adapter
+ * @host: InitIO host adapter
+ * @num_scbs: Number of SCBS
+ * @bios_addr: BIOS address
+ *
+ * Set up the host adapter and devices according to the configuration
+ * retrieved from the E2PROM.
+ *
+ * Locking: Calls E2PROM layer code which is not re-enterable so must
+ * run single threaded for now.
+ */
+
+static void initio_init(struct initio_host * host, u8 *bios_addr)
{
int i;
- BYTE *pwFlags;
- BYTE *pbHeads;
- SCB *pTmpScb, *pPrevScb = NULL;
-
- pCurHcb->HCS_NumScbs = tul_num_scb;
- pCurHcb->HCS_Semaph = 1;
- spin_lock_init(&pCurHcb->HCS_SemaphLock);
- pCurHcb->HCS_JSStatus0 = 0;
- pCurHcb->HCS_Scb = scbp;
- pCurHcb->HCS_NxtPend = scbp;
- pCurHcb->HCS_NxtAvail = scbp;
- for (i = 0, pTmpScb = scbp; i < tul_num_scb; i++, pTmpScb++) {
- pTmpScb->SCB_TagId = i;
- if (i != 0)
- pPrevScb->SCB_NxtScb = pTmpScb;
- pPrevScb = pTmpScb;
- }
- pPrevScb->SCB_NxtScb = NULL;
- pCurHcb->HCS_ScbEnd = pTmpScb;
- pCurHcb->HCS_FirstAvail = scbp;
- pCurHcb->HCS_LastAvail = pPrevScb;
- spin_lock_init(&pCurHcb->HCS_AvailLock);
- pCurHcb->HCS_FirstPend = NULL;
- pCurHcb->HCS_LastPend = NULL;
- pCurHcb->HCS_FirstBusy = NULL;
- pCurHcb->HCS_LastBusy = NULL;
- pCurHcb->HCS_FirstDone = NULL;
- pCurHcb->HCS_LastDone = NULL;
- pCurHcb->HCS_ActScb = NULL;
- pCurHcb->HCS_ActTcs = NULL;
-
- tul_read_eeprom(pCurHcb->HCS_Base);
-/*---------- get H/A configuration -------------*/
+ u8 *flags;
+ u8 *heads;
+
+ /* Get E2Prom configuration */
+ initio_read_eeprom(host->addr);
if (i91unvramp->NVM_SCSIInfo[0].NVM_NumOfTarg == 8)
- pCurHcb->HCS_MaxTar = 8;
+ host->max_tar = 8;
else
- pCurHcb->HCS_MaxTar = 16;
+ host->max_tar = 16;
- pCurHcb->HCS_Config = i91unvramp->NVM_SCSIInfo[0].NVM_ChConfig1;
+ host->config = i91unvramp->NVM_SCSIInfo[0].NVM_ChConfig1;
- pCurHcb->HCS_SCSI_ID = i91unvramp->NVM_SCSIInfo[0].NVM_ChSCSIID;
- pCurHcb->HCS_IdMask = ~(1 << pCurHcb->HCS_SCSI_ID);
+ host->scsi_id = i91unvramp->NVM_SCSIInfo[0].NVM_ChSCSIID;
+ host->idmask = ~(1 << host->scsi_id);
#ifdef CHK_PARITY
/* Enable parity error response */
- TUL_WR(pCurHcb->HCS_Base + TUL_PCMD, TUL_RD(pCurHcb->HCS_Base, TUL_PCMD) | 0x40);
+ outb(inb(host->addr + TUL_PCMD) | 0x40, host->addr + TUL_PCMD);
#endif
/* Mask all the interrupt */
- TUL_WR(pCurHcb->HCS_Base + TUL_Mask, 0x1F);
+ outb(0x1F, host->addr + TUL_Mask);
- tul_stop_bm(pCurHcb);
+ initio_stop_bm(host);
/* --- Initialize the tulip --- */
- TUL_WR(pCurHcb->HCS_Base + TUL_SCtrl0, TSC_RST_CHIP);
+ outb(TSC_RST_CHIP, host->addr + TUL_SCtrl0);
/* program HBA's SCSI ID */
- TUL_WR(pCurHcb->HCS_Base + TUL_SScsiId, pCurHcb->HCS_SCSI_ID << 4);
+ outb(host->scsi_id << 4, host->addr + TUL_SScsiId);
/* Enable Initiator Mode ,phase latch,alternate sync period mode,
disable SCSI reset */
- if (pCurHcb->HCS_Config & HCC_EN_PAR)
- pCurHcb->HCS_SConf1 = (TSC_INITDEFAULT | TSC_EN_SCSI_PAR);
+ if (host->config & HCC_EN_PAR)
+ host->sconf1 = (TSC_INITDEFAULT | TSC_EN_SCSI_PAR);
else
- pCurHcb->HCS_SConf1 = (TSC_INITDEFAULT);
- TUL_WR(pCurHcb->HCS_Base + TUL_SConfig, pCurHcb->HCS_SConf1);
+ host->sconf1 = (TSC_INITDEFAULT);
+ outb(host->sconf1, host->addr + TUL_SConfig);
- /* Enable HW reselect */
- TUL_WR(pCurHcb->HCS_Base + TUL_SCtrl1, TSC_HW_RESELECT);
+ /* Enable HW reselect */
+ outb(TSC_HW_RESELECT, host->addr + TUL_SCtrl1);
- TUL_WR(pCurHcb->HCS_Base + TUL_SPeriod, 0);
+ outb(0, host->addr + TUL_SPeriod);
/* selection time out = 250 ms */
- TUL_WR(pCurHcb->HCS_Base + TUL_STimeOut, 153);
+ outb(153, host->addr + TUL_STimeOut);
-/*--------- Enable SCSI terminator -----*/
- TUL_WR(pCurHcb->HCS_Base + TUL_XCtrl, (pCurHcb->HCS_Config & (HCC_ACT_TERM1 | HCC_ACT_TERM2)));
- TUL_WR(pCurHcb->HCS_Base + TUL_GCTRL1,
- ((pCurHcb->HCS_Config & HCC_AUTO_TERM) >> 4) | (TUL_RD(pCurHcb->HCS_Base, TUL_GCTRL1) & 0xFE));
+ /* Enable SCSI terminator */
+ outb((host->config & (HCC_ACT_TERM1 | HCC_ACT_TERM2)),
+ host->addr + TUL_XCtrl);
+ outb(((host->config & HCC_AUTO_TERM) >> 4) |
+ (inb(host->addr + TUL_GCTRL1) & 0xFE),
+ host->addr + TUL_GCTRL1);
for (i = 0,
- pwFlags = & (i91unvramp->NVM_SCSIInfo[0].NVM_Targ0Config),
- pbHeads = pbBiosAdr + 0x180;
- i < pCurHcb->HCS_MaxTar;
- i++, pwFlags++) {
- pCurHcb->HCS_Tcs[i].TCS_Flags = *pwFlags & ~(TCF_SYNC_DONE | TCF_WDTR_DONE);
- if (pCurHcb->HCS_Tcs[i].TCS_Flags & TCF_EN_255)
- pCurHcb->HCS_Tcs[i].TCS_DrvFlags = TCF_DRV_255_63;
+ flags = & (i91unvramp->NVM_SCSIInfo[0].NVM_Targ0Config),
+ heads = bios_addr + 0x180;
+ i < host->max_tar;
+ i++, flags++) {
+ host->targets[i].flags = *flags & ~(TCF_SYNC_DONE | TCF_WDTR_DONE);
+ if (host->targets[i].flags & TCF_EN_255)
+ host->targets[i].drv_flags = TCF_DRV_255_63;
else
- pCurHcb->HCS_Tcs[i].TCS_DrvFlags = 0;
- pCurHcb->HCS_Tcs[i].TCS_JS_Period = 0;
- pCurHcb->HCS_Tcs[i].TCS_SConfig0 = pCurHcb->HCS_SConf1;
- pCurHcb->HCS_Tcs[i].TCS_DrvHead = *pbHeads++;
- if (pCurHcb->HCS_Tcs[i].TCS_DrvHead == 255)
- pCurHcb->HCS_Tcs[i].TCS_DrvFlags = TCF_DRV_255_63;
+ host->targets[i].drv_flags = 0;
+ host->targets[i].js_period = 0;
+ host->targets[i].sconfig0 = host->sconf1;
+ host->targets[i].heads = *heads++;
+ if (host->targets[i].heads == 255)
+ host->targets[i].drv_flags = TCF_DRV_255_63;
else
- pCurHcb->HCS_Tcs[i].TCS_DrvFlags = 0;
- pCurHcb->HCS_Tcs[i].TCS_DrvSector = *pbHeads++;
- pCurHcb->HCS_Tcs[i].TCS_Flags &= ~TCF_BUSY;
- pCurHcb->HCS_ActTags[i] = 0;
- pCurHcb->HCS_MaxTags[i] = 0xFF;
+ host->targets[i].drv_flags = 0;
+ host->targets[i].sectors = *heads++;
+ host->targets[i].flags &= ~TCF_BUSY;
+ host->act_tags[i] = 0;
+ host->max_tags[i] = 0xFF;
} /* for */
printk("i91u: PCI Base=0x%04X, IRQ=%d, BIOS=0x%04X0, SCSI ID=%d\n",
- pCurHcb->HCS_Base, pCurHcb->HCS_Intr,
- pCurHcb->HCS_BIOS, pCurHcb->HCS_SCSI_ID);
-/*------------------- reset SCSI Bus ---------------------------*/
- if (pCurHcb->HCS_Config & HCC_SCSI_RESET) {
- printk("i91u: Reset SCSI Bus ... \n");
- tul_reset_scsi(pCurHcb, seconds);
+ host->addr, host->irq,
+ host->bios_addr, host->scsi_id);
+ /* Reset SCSI Bus */
+ if (host->config & HCC_SCSI_RESET) {
+ printk(KERN_INFO "i91u: Reset SCSI Bus ... \n");
+ initio_reset_scsi(host, 10);
}
- TUL_WR(pCurHcb->HCS_Base + TUL_SCFG1, 0x17);
- TUL_WR(pCurHcb->HCS_Base + TUL_SIntEnable, 0xE9);
- return (0);
+ outb(0x17, host->addr + TUL_SCFG1);
+ outb(0xE9, host->addr + TUL_SIntEnable);
}
-/***************************************************************************/
-static SCB *tul_alloc_scb(HCS * hcsp)
+/**
+ * initio_alloc_scb - Allocate an SCB
+ * @host: InitIO host we are allocating for
+ *
+ * Walk the SCB list for the controller and allocate a free SCB if
+ * one exists.
+ */
+static struct scsi_ctrl_blk *initio_alloc_scb(struct initio_host *host)
{
- SCB *pTmpScb;
- ULONG flags;
- spin_lock_irqsave(&(hcsp->HCS_AvailLock), flags);
- if ((pTmpScb = hcsp->HCS_FirstAvail) != NULL) {
+ struct scsi_ctrl_blk *scb;
+ unsigned long flags;
+
+ spin_lock_irqsave(&host->avail_lock, flags);
+ if ((scb = host->first_avail) != NULL) {
#if DEBUG_QUEUE
- printk("find scb at %08lx\n", (ULONG) pTmpScb);
+ printk("find scb at %p\n", scb);
#endif
- if ((hcsp->HCS_FirstAvail = pTmpScb->SCB_NxtScb) == NULL)
- hcsp->HCS_LastAvail = NULL;
- pTmpScb->SCB_NxtScb = NULL;
- pTmpScb->SCB_Status = SCB_RENT;
+ if ((host->first_avail = scb->next) == NULL)
+ host->last_avail = NULL;
+ scb->next = NULL;
+ scb->status = SCB_RENT;
}
- spin_unlock_irqrestore(&(hcsp->HCS_AvailLock), flags);
- return (pTmpScb);
+ spin_unlock_irqrestore(&host->avail_lock, flags);
+ return scb;
}
-/***************************************************************************/
-static void tul_release_scb(HCS * hcsp, SCB * scbp)
+/**
+ * initio_release_scb - Release an SCB
+ * @host: InitIO host that owns the SCB
+ * @cmnd: SCB command block being returned
+ *
+ * Return an allocated SCB to the host free list
+ */
+
+static void initio_release_scb(struct initio_host * host, struct scsi_ctrl_blk * cmnd)
{
- ULONG flags;
+ unsigned long flags;
#if DEBUG_QUEUE
- printk("Release SCB %lx; ", (ULONG) scbp);
+ printk("Release SCB %p; ", cmnd);
#endif
- spin_lock_irqsave(&(hcsp->HCS_AvailLock), flags);
- scbp->SCB_Srb = NULL;
- scbp->SCB_Status = 0;
- scbp->SCB_NxtScb = NULL;
- if (hcsp->HCS_LastAvail != NULL) {
- hcsp->HCS_LastAvail->SCB_NxtScb = scbp;
- hcsp->HCS_LastAvail = scbp;
+ spin_lock_irqsave(&(host->avail_lock), flags);
+ cmnd->srb = NULL;
+ cmnd->status = 0;
+ cmnd->next = NULL;
+ if (host->last_avail != NULL) {
+ host->last_avail->next = cmnd;
+ host->last_avail = cmnd;
} else {
- hcsp->HCS_FirstAvail = scbp;
- hcsp->HCS_LastAvail = scbp;
+ host->first_avail = cmnd;
+ host->last_avail = cmnd;
}
- spin_unlock_irqrestore(&(hcsp->HCS_AvailLock), flags);
+ spin_unlock_irqrestore(&(host->avail_lock), flags);
}
/***************************************************************************/
-static void tul_append_pend_scb(HCS * pCurHcb, SCB * scbp)
+static void initio_append_pend_scb(struct initio_host * host, struct scsi_ctrl_blk * scbp)
{
#if DEBUG_QUEUE
- printk("Append pend SCB %lx; ", (ULONG) scbp);
+ printk("Append pend SCB %p; ", scbp);
#endif
- scbp->SCB_Status = SCB_PEND;
- scbp->SCB_NxtScb = NULL;
- if (pCurHcb->HCS_LastPend != NULL) {
- pCurHcb->HCS_LastPend->SCB_NxtScb = scbp;
- pCurHcb->HCS_LastPend = scbp;
+ scbp->status = SCB_PEND;
+ scbp->next = NULL;
+ if (host->last_pending != NULL) {
+ host->last_pending->next = scbp;
+ host->last_pending = scbp;
} else {
- pCurHcb->HCS_FirstPend = scbp;
- pCurHcb->HCS_LastPend = scbp;
+ host->first_pending = scbp;
+ host->last_pending = scbp;
}
}
/***************************************************************************/
-static void tul_push_pend_scb(HCS * pCurHcb, SCB * scbp)
+static void initio_push_pend_scb(struct initio_host * host, struct scsi_ctrl_blk * scbp)
{
#if DEBUG_QUEUE
- printk("Push pend SCB %lx; ", (ULONG) scbp);
+ printk("Push pend SCB %p; ", scbp);
#endif
- scbp->SCB_Status = SCB_PEND;
- if ((scbp->SCB_NxtScb = pCurHcb->HCS_FirstPend) != NULL) {
- pCurHcb->HCS_FirstPend = scbp;
+ scbp->status = SCB_PEND;
+ if ((scbp->next = host->first_pending) != NULL) {
+ host->first_pending = scbp;
} else {
- pCurHcb->HCS_FirstPend = scbp;
- pCurHcb->HCS_LastPend = scbp;
+ host->first_pending = scbp;
+ host->last_pending = scbp;
}
}
-/***************************************************************************/
-static SCB *tul_find_first_pend_scb(HCS * pCurHcb)
+static struct scsi_ctrl_blk *initio_find_first_pend_scb(struct initio_host * host)
{
- SCB *pFirstPend;
+ struct scsi_ctrl_blk *first;
- pFirstPend = pCurHcb->HCS_FirstPend;
- while (pFirstPend != NULL) {
- if (pFirstPend->SCB_Opcode != ExecSCSI) {
- return (pFirstPend);
- }
- if (pFirstPend->SCB_TagMsg == 0) {
- if ((pCurHcb->HCS_ActTags[pFirstPend->SCB_Target] == 0) &&
- !(pCurHcb->HCS_Tcs[pFirstPend->SCB_Target].TCS_Flags & TCF_BUSY)) {
- return (pFirstPend);
- }
+ first = host->first_pending;
+ while (first != NULL) {
+ if (first->opcode != ExecSCSI)
+ return first;
+ if (first->tagmsg == 0) {
+ if ((host->act_tags[first->target] == 0) &&
+ !(host->targets[first->target].flags & TCF_BUSY))
+ return first;
} else {
- if ((pCurHcb->HCS_ActTags[pFirstPend->SCB_Target] >=
- pCurHcb->HCS_MaxTags[pFirstPend->SCB_Target]) |
- (pCurHcb->HCS_Tcs[pFirstPend->SCB_Target].TCS_Flags & TCF_BUSY)) {
- pFirstPend = pFirstPend->SCB_NxtScb;
+ if ((host->act_tags[first->target] >=
+ host->max_tags[first->target]) |
+ (host->targets[first->target].flags & TCF_BUSY)) {
+ first = first->next;
continue;
}
- return (pFirstPend);
+ return first;
}
- pFirstPend = pFirstPend->SCB_NxtScb;
+ first = first->next;
}
-
-
- return (pFirstPend);
+ return first;
}
-/***************************************************************************/
-static void tul_unlink_pend_scb(HCS * pCurHcb, SCB * pCurScb)
+
+static void initio_unlink_pend_scb(struct initio_host * host, struct scsi_ctrl_blk * scb)
{
- SCB *pTmpScb, *pPrevScb;
+ struct scsi_ctrl_blk *tmp, *prev;
#if DEBUG_QUEUE
- printk("unlink pend SCB %lx; ", (ULONG) pCurScb);
+ printk("unlink pend SCB %p; ", scb);
#endif
- pPrevScb = pTmpScb = pCurHcb->HCS_FirstPend;
- while (pTmpScb != NULL) {
- if (pCurScb == pTmpScb) { /* Unlink this SCB */
- if (pTmpScb == pCurHcb->HCS_FirstPend) {
- if ((pCurHcb->HCS_FirstPend = pTmpScb->SCB_NxtScb) == NULL)
- pCurHcb->HCS_LastPend = NULL;
+ prev = tmp = host->first_pending;
+ while (tmp != NULL) {
+ if (scb == tmp) { /* Unlink this SCB */
+ if (tmp == host->first_pending) {
+ if ((host->first_pending = tmp->next) == NULL)
+ host->last_pending = NULL;
} else {
- pPrevScb->SCB_NxtScb = pTmpScb->SCB_NxtScb;
- if (pTmpScb == pCurHcb->HCS_LastPend)
- pCurHcb->HCS_LastPend = pPrevScb;
+ prev->next = tmp->next;
+ if (tmp == host->last_pending)
+ host->last_pending = prev;
}
- pTmpScb->SCB_NxtScb = NULL;
+ tmp->next = NULL;
break;
}
- pPrevScb = pTmpScb;
- pTmpScb = pTmpScb->SCB_NxtScb;
+ prev = tmp;
+ tmp = tmp->next;
}
- return;
}
-/***************************************************************************/
-static void tul_append_busy_scb(HCS * pCurHcb, SCB * scbp)
+
+static void initio_append_busy_scb(struct initio_host * host, struct scsi_ctrl_blk * scbp)
{
#if DEBUG_QUEUE
- printk("append busy SCB %lx; ", (ULONG) scbp);
+ printk("append busy SCB %o; ", scbp);
#endif
- if (scbp->SCB_TagMsg)
- pCurHcb->HCS_ActTags[scbp->SCB_Target]++;
+ if (scbp->tagmsg)
+ host->act_tags[scbp->target]++;
else
- pCurHcb->HCS_Tcs[scbp->SCB_Target].TCS_Flags |= TCF_BUSY;
- scbp->SCB_Status = SCB_BUSY;
- scbp->SCB_NxtScb = NULL;
- if (pCurHcb->HCS_LastBusy != NULL) {
- pCurHcb->HCS_LastBusy->SCB_NxtScb = scbp;
- pCurHcb->HCS_LastBusy = scbp;
+ host->targets[scbp->target].flags |= TCF_BUSY;
+ scbp->status = SCB_BUSY;
+ scbp->next = NULL;
+ if (host->last_busy != NULL) {
+ host->last_busy->next = scbp;
+ host->last_busy = scbp;
} else {
- pCurHcb->HCS_FirstBusy = scbp;
- pCurHcb->HCS_LastBusy = scbp;
+ host->first_busy = scbp;
+ host->last_busy = scbp;
}
}
/***************************************************************************/
-static SCB *tul_pop_busy_scb(HCS * pCurHcb)
+static struct scsi_ctrl_blk *initio_pop_busy_scb(struct initio_host * host)
{
- SCB *pTmpScb;
+ struct scsi_ctrl_blk *tmp;
- if ((pTmpScb = pCurHcb->HCS_FirstBusy) != NULL) {
- if ((pCurHcb->HCS_FirstBusy = pTmpScb->SCB_NxtScb) == NULL)
- pCurHcb->HCS_LastBusy = NULL;
- pTmpScb->SCB_NxtScb = NULL;
- if (pTmpScb->SCB_TagMsg)
- pCurHcb->HCS_ActTags[pTmpScb->SCB_Target]--;
+ if ((tmp = host->first_busy) != NULL) {
+ if ((host->first_busy = tmp->next) == NULL)
+ host->last_busy = NULL;
+ tmp->next = NULL;
+ if (tmp->tagmsg)
+ host->act_tags[tmp->target]--;
else
- pCurHcb->HCS_Tcs[pTmpScb->SCB_Target].TCS_Flags &= ~TCF_BUSY;
+ host->targets[tmp->target].flags &= ~TCF_BUSY;
}
#if DEBUG_QUEUE
- printk("Pop busy SCB %lx; ", (ULONG) pTmpScb);
+ printk("Pop busy SCB %p; ", tmp);
#endif
- return (pTmpScb);
+ return tmp;
}
/***************************************************************************/
-static void tul_unlink_busy_scb(HCS * pCurHcb, SCB * pCurScb)
+static void initio_unlink_busy_scb(struct initio_host * host, struct scsi_ctrl_blk * scb)
{
- SCB *pTmpScb, *pPrevScb;
+ struct scsi_ctrl_blk *tmp, *prev;
#if DEBUG_QUEUE
- printk("unlink busy SCB %lx; ", (ULONG) pCurScb);
+ printk("unlink busy SCB %p; ", scb);
#endif
- pPrevScb = pTmpScb = pCurHcb->HCS_FirstBusy;
- while (pTmpScb != NULL) {
- if (pCurScb == pTmpScb) { /* Unlink this SCB */
- if (pTmpScb == pCurHcb->HCS_FirstBusy) {
- if ((pCurHcb->HCS_FirstBusy = pTmpScb->SCB_NxtScb) == NULL)
- pCurHcb->HCS_LastBusy = NULL;
+ prev = tmp = host->first_busy;
+ while (tmp != NULL) {
+ if (scb == tmp) { /* Unlink this SCB */
+ if (tmp == host->first_busy) {
+ if ((host->first_busy = tmp->next) == NULL)
+ host->last_busy = NULL;
} else {
- pPrevScb->SCB_NxtScb = pTmpScb->SCB_NxtScb;
- if (pTmpScb == pCurHcb->HCS_LastBusy)
- pCurHcb->HCS_LastBusy = pPrevScb;
+ prev->next = tmp->next;
+ if (tmp == host->last_busy)
+ host->last_busy = prev;
}
- pTmpScb->SCB_NxtScb = NULL;
- if (pTmpScb->SCB_TagMsg)
- pCurHcb->HCS_ActTags[pTmpScb->SCB_Target]--;
+ tmp->next = NULL;
+ if (tmp->tagmsg)
+ host->act_tags[tmp->target]--;
else
- pCurHcb->HCS_Tcs[pTmpScb->SCB_Target].TCS_Flags &= ~TCF_BUSY;
+ host->targets[tmp->target].flags &= ~TCF_BUSY;
break;
}
- pPrevScb = pTmpScb;
- pTmpScb = pTmpScb->SCB_NxtScb;
+ prev = tmp;
+ tmp = tmp->next;
}
return;
}
-/***************************************************************************/
-SCB *tul_find_busy_scb(HCS * pCurHcb, WORD tarlun)
+struct scsi_ctrl_blk *initio_find_busy_scb(struct initio_host * host, u16 tarlun)
{
- SCB *pTmpScb, *pPrevScb;
- WORD scbp_tarlun;
+ struct scsi_ctrl_blk *tmp, *prev;
+ u16 scbp_tarlun;
- pPrevScb = pTmpScb = pCurHcb->HCS_FirstBusy;
- while (pTmpScb != NULL) {
- scbp_tarlun = (pTmpScb->SCB_Lun << 8) | (pTmpScb->SCB_Target);
+ prev = tmp = host->first_busy;
+ while (tmp != NULL) {
+ scbp_tarlun = (tmp->lun << 8) | (tmp->target);
if (scbp_tarlun == tarlun) { /* Unlink this SCB */
break;
}
- pPrevScb = pTmpScb;
- pTmpScb = pTmpScb->SCB_NxtScb;
+ prev = tmp;
+ tmp = tmp->next;
}
#if DEBUG_QUEUE
- printk("find busy SCB %lx; ", (ULONG) pTmpScb);
+ printk("find busy SCB %p; ", tmp);
#endif
- return (pTmpScb);
+ return tmp;
}
-/***************************************************************************/
-static void tul_append_done_scb(HCS * pCurHcb, SCB * scbp)
+static void initio_append_done_scb(struct initio_host * host, struct scsi_ctrl_blk * scbp)
{
-
#if DEBUG_QUEUE
- printk("append done SCB %lx; ", (ULONG) scbp);
+ printk("append done SCB %p; ", scbp);
#endif
- scbp->SCB_Status = SCB_DONE;
- scbp->SCB_NxtScb = NULL;
- if (pCurHcb->HCS_LastDone != NULL) {
- pCurHcb->HCS_LastDone->SCB_NxtScb = scbp;
- pCurHcb->HCS_LastDone = scbp;
+ scbp->status = SCB_DONE;
+ scbp->next = NULL;
+ if (host->last_done != NULL) {
+ host->last_done->next = scbp;
+ host->last_done = scbp;
} else {
- pCurHcb->HCS_FirstDone = scbp;
- pCurHcb->HCS_LastDone = scbp;
+ host->first_done = scbp;
+ host->last_done = scbp;
}
}
-/***************************************************************************/
-SCB *tul_find_done_scb(HCS * pCurHcb)
+struct scsi_ctrl_blk *initio_find_done_scb(struct initio_host * host)
{
- SCB *pTmpScb;
+ struct scsi_ctrl_blk *tmp;
-
- if ((pTmpScb = pCurHcb->HCS_FirstDone) != NULL) {
- if ((pCurHcb->HCS_FirstDone = pTmpScb->SCB_NxtScb) == NULL)
- pCurHcb->HCS_LastDone = NULL;
- pTmpScb->SCB_NxtScb = NULL;
+ if ((tmp = host->first_done) != NULL) {
+ if ((host->first_done = tmp->next) == NULL)
+ host->last_done = NULL;
+ tmp->next = NULL;
}
#if DEBUG_QUEUE
- printk("find done SCB %lx; ", (ULONG) pTmpScb);
+ printk("find done SCB %p; ",tmp);
#endif
- return (pTmpScb);
+ return tmp;
}
-/***************************************************************************/
-static int tul_abort_srb(HCS * pCurHcb, struct scsi_cmnd *srbp)
+static int initio_abort_srb(struct initio_host * host, struct scsi_cmnd *srbp)
{
- ULONG flags;
- SCB *pTmpScb, *pPrevScb;
+ unsigned long flags;
+ struct scsi_ctrl_blk *tmp, *prev;
- spin_lock_irqsave(&(pCurHcb->HCS_SemaphLock), flags);
+ spin_lock_irqsave(&host->semaph_lock, flags);
- if ((pCurHcb->HCS_Semaph == 0) && (pCurHcb->HCS_ActScb == NULL)) {
- TUL_WR(pCurHcb->HCS_Base + TUL_Mask, 0x1F);
+ if ((host->semaph == 0) && (host->active == NULL)) {
/* disable Jasmin SCSI Int */
-
- spin_unlock_irqrestore(&(pCurHcb->HCS_SemaphLock), flags);
-
- tulip_main(pCurHcb);
-
- spin_lock_irqsave(&(pCurHcb->HCS_SemaphLock), flags);
-
- pCurHcb->HCS_Semaph = 1;
- TUL_WR(pCurHcb->HCS_Base + TUL_Mask, 0x0F);
-
- spin_unlock_irqrestore(&(pCurHcb->HCS_SemaphLock), flags);
-
+ outb(0x1F, host->addr + TUL_Mask);
+ spin_unlock_irqrestore(&host->semaph_lock, flags);
+ /* FIXME: synchronize_irq needed ? */
+ tulip_main(host);
+ spin_lock_irqsave(&host->semaph_lock, flags);
+ host->semaph = 1;
+ outb(0x0F, host->addr + TUL_Mask);
+ spin_unlock_irqrestore(&host->semaph_lock, flags);
return SCSI_ABORT_SNOOZE;
}
- pPrevScb = pTmpScb = pCurHcb->HCS_FirstPend; /* Check Pend queue */
- while (pTmpScb != NULL) {
+ prev = tmp = host->first_pending; /* Check Pend queue */
+ while (tmp != NULL) {
/* 07/27/98 */
- if (pTmpScb->SCB_Srb == srbp) {
- if (pTmpScb == pCurHcb->HCS_ActScb) {
- spin_unlock_irqrestore(&(pCurHcb->HCS_SemaphLock), flags);
+ if (tmp->srb == srbp) {
+ if (tmp == host->active) {
+ spin_unlock_irqrestore(&host->semaph_lock, flags);
return SCSI_ABORT_BUSY;
- } else if (pTmpScb == pCurHcb->HCS_FirstPend) {
- if ((pCurHcb->HCS_FirstPend = pTmpScb->SCB_NxtScb) == NULL)
- pCurHcb->HCS_LastPend = NULL;
+ } else if (tmp == host->first_pending) {
+ if ((host->first_pending = tmp->next) == NULL)
+ host->last_pending = NULL;
} else {
- pPrevScb->SCB_NxtScb = pTmpScb->SCB_NxtScb;
- if (pTmpScb == pCurHcb->HCS_LastPend)
- pCurHcb->HCS_LastPend = pPrevScb;
+ prev->next = tmp->next;
+ if (tmp == host->last_pending)
+ host->last_pending = prev;
}
- pTmpScb->SCB_HaStat = HOST_ABORTED;
- pTmpScb->SCB_Flags |= SCF_DONE;
- if (pTmpScb->SCB_Flags & SCF_POST)
- (*pTmpScb->SCB_Post) ((BYTE *) pCurHcb, (BYTE *) pTmpScb);
- spin_unlock_irqrestore(&(pCurHcb->HCS_SemaphLock), flags);
+ tmp->hastat = HOST_ABORTED;
+ tmp->flags |= SCF_DONE;
+ if (tmp->flags & SCF_POST)
+ (*tmp->post) ((u8 *) host, (u8 *) tmp);
+ spin_unlock_irqrestore(&host->semaph_lock, flags);
return SCSI_ABORT_SUCCESS;
}
- pPrevScb = pTmpScb;
- pTmpScb = pTmpScb->SCB_NxtScb;
+ prev = tmp;
+ tmp = tmp->next;
}
- pPrevScb = pTmpScb = pCurHcb->HCS_FirstBusy; /* Check Busy queue */
- while (pTmpScb != NULL) {
-
- if (pTmpScb->SCB_Srb == srbp) {
-
- if (pTmpScb == pCurHcb->HCS_ActScb) {
- spin_unlock_irqrestore(&(pCurHcb->HCS_SemaphLock), flags);
+ prev = tmp = host->first_busy; /* Check Busy queue */
+ while (tmp != NULL) {
+ if (tmp->srb == srbp) {
+ if (tmp == host->active) {
+ spin_unlock_irqrestore(&host->semaph_lock, flags);
return SCSI_ABORT_BUSY;
- } else if (pTmpScb->SCB_TagMsg == 0) {
- spin_unlock_irqrestore(&(pCurHcb->HCS_SemaphLock), flags);
+ } else if (tmp->tagmsg == 0) {
+ spin_unlock_irqrestore(&host->semaph_lock, flags);
return SCSI_ABORT_BUSY;
} else {
- pCurHcb->HCS_ActTags[pTmpScb->SCB_Target]--;
- if (pTmpScb == pCurHcb->HCS_FirstBusy) {
- if ((pCurHcb->HCS_FirstBusy = pTmpScb->SCB_NxtScb) == NULL)
- pCurHcb->HCS_LastBusy = NULL;
+ host->act_tags[tmp->target]--;
+ if (tmp == host->first_busy) {
+ if ((host->first_busy = tmp->next) == NULL)
+ host->last_busy = NULL;
} else {
- pPrevScb->SCB_NxtScb = pTmpScb->SCB_NxtScb;
- if (pTmpScb == pCurHcb->HCS_LastBusy)
- pCurHcb->HCS_LastBusy = pPrevScb;
+ prev->next = tmp->next;
+ if (tmp == host->last_busy)
+ host->last_busy = prev;
}
- pTmpScb->SCB_NxtScb = NULL;
+ tmp->next = NULL;
- pTmpScb->SCB_HaStat = HOST_ABORTED;
- pTmpScb->SCB_Flags |= SCF_DONE;
- if (pTmpScb->SCB_Flags & SCF_POST)
- (*pTmpScb->SCB_Post) ((BYTE *) pCurHcb, (BYTE *) pTmpScb);
- spin_unlock_irqrestore(&(pCurHcb->HCS_SemaphLock), flags);
+ tmp->hastat = HOST_ABORTED;
+ tmp->flags |= SCF_DONE;
+ if (tmp->flags & SCF_POST)
+ (*tmp->post) ((u8 *) host, (u8 *) tmp);
+ spin_unlock_irqrestore(&host->semaph_lock, flags);
return SCSI_ABORT_SUCCESS;
}
}
- pPrevScb = pTmpScb;
- pTmpScb = pTmpScb->SCB_NxtScb;
+ prev = tmp;
+ tmp = tmp->next;
}
- spin_unlock_irqrestore(&(pCurHcb->HCS_SemaphLock), flags);
- return (SCSI_ABORT_NOT_RUNNING);
+ spin_unlock_irqrestore(&host->semaph_lock, flags);
+ return SCSI_ABORT_NOT_RUNNING;
}
/***************************************************************************/
-static int tul_bad_seq(HCS * pCurHcb)
+static int initio_bad_seq(struct initio_host * host)
{
- SCB *pCurScb;
-
- printk("tul_bad_seg c=%d\n", pCurHcb->HCS_Index);
-
- if ((pCurScb = pCurHcb->HCS_ActScb) != NULL) {
- tul_unlink_busy_scb(pCurHcb, pCurScb);
- pCurScb->SCB_HaStat = HOST_BAD_PHAS;
- pCurScb->SCB_TaStat = 0;
- tul_append_done_scb(pCurHcb, pCurScb);
- }
- tul_stop_bm(pCurHcb);
-
- tul_reset_scsi(pCurHcb, 8); /* 7/29/98 */
-
- return (tul_post_scsi_rst(pCurHcb));
-}
-
-#if 0
-
-/************************************************************************/
-static int tul_device_reset(HCS * pCurHcb, struct scsi_cmnd *pSrb,
- unsigned int target, unsigned int ResetFlags)
-{
- ULONG flags;
- SCB *pScb;
- spin_lock_irqsave(&(pCurHcb->HCS_SemaphLock), flags);
-
- if (ResetFlags & SCSI_RESET_ASYNCHRONOUS) {
-
- if ((pCurHcb->HCS_Semaph == 0) && (pCurHcb->HCS_ActScb == NULL)) {
- TUL_WR(pCurHcb->HCS_Base + TUL_Mask, 0x1F);
- /* disable Jasmin SCSI Int */
-
- spin_unlock_irqrestore(&(pCurHcb->HCS_SemaphLock), flags);
-
- tulip_main(pCurHcb);
-
- spin_lock_irqsave(&(pCurHcb->HCS_SemaphLock), flags);
-
- pCurHcb->HCS_Semaph = 1;
- TUL_WR(pCurHcb->HCS_Base + TUL_Mask, 0x0F);
-
- spin_unlock_irqrestore(&(pCurHcb->HCS_SemaphLock), flags);
-
- return SCSI_RESET_SNOOZE;
- }
- pScb = pCurHcb->HCS_FirstBusy; /* Check Busy queue */
- while (pScb != NULL) {
- if (pScb->SCB_Srb == pSrb)
- break;
- pScb = pScb->SCB_NxtScb;
- }
- if (pScb == NULL) {
- printk("Unable to Reset - No SCB Found\n");
-
- spin_unlock_irqrestore(&(pCurHcb->HCS_SemaphLock), flags);
- return SCSI_RESET_NOT_RUNNING;
- }
- }
- if ((pScb = tul_alloc_scb(pCurHcb)) == NULL) {
- spin_unlock_irqrestore(&(pCurHcb->HCS_SemaphLock), flags);
- return SCSI_RESET_NOT_RUNNING;
- }
- pScb->SCB_Opcode = BusDevRst;
- pScb->SCB_Flags = SCF_POST;
- pScb->SCB_Target = target;
- pScb->SCB_Mode = 0;
-
- pScb->SCB_Srb = NULL;
- if (ResetFlags & SCSI_RESET_SYNCHRONOUS) {
- pScb->SCB_Srb = pSrb;
- }
- tul_push_pend_scb(pCurHcb, pScb); /* push this SCB to Pending queue */
-
- if (pCurHcb->HCS_Semaph == 1) {
- TUL_WR(pCurHcb->HCS_Base + TUL_Mask, 0x1F);
- /* disable Jasmin SCSI Int */
- pCurHcb->HCS_Semaph = 0;
-
- spin_unlock_irqrestore(&(pCurHcb->HCS_SemaphLock), flags);
+ struct scsi_ctrl_blk *scb;
- tulip_main(pCurHcb);
+ printk("initio_bad_seg c=%d\n", host->index);
- spin_lock_irqsave(&(pCurHcb->HCS_SemaphLock), flags);
-
- pCurHcb->HCS_Semaph = 1;
- TUL_WR(pCurHcb->HCS_Base + TUL_Mask, 0x0F);
+ if ((scb = host->active) != NULL) {
+ initio_unlink_busy_scb(host, scb);
+ scb->hastat = HOST_BAD_PHAS;
+ scb->tastat = 0;
+ initio_append_done_scb(host, scb);
}
- spin_unlock_irqrestore(&(pCurHcb->HCS_SemaphLock), flags);
- return SCSI_RESET_PENDING;
+ initio_stop_bm(host);
+ initio_reset_scsi(host, 8); /* 7/29/98 */
+ return initio_post_scsi_rst(host);
}
-static int tul_reset_scsi_bus(HCS * pCurHcb)
-{
- ULONG flags;
-
- spin_lock_irqsave(&(pCurHcb->HCS_SemaphLock), flags);
- TUL_WR(pCurHcb->HCS_Base + TUL_Mask, 0x1F);
- pCurHcb->HCS_Semaph = 0;
-
- spin_unlock_irqrestore(&(pCurHcb->HCS_SemaphLock), flags);
-
- tul_stop_bm(pCurHcb);
-
- tul_reset_scsi(pCurHcb, 2); /* 7/29/98 */
-
- spin_lock_irqsave(&(pCurHcb->HCS_SemaphLock), flags);
- tul_post_scsi_rst(pCurHcb);
-
- spin_unlock_irqrestore(&(pCurHcb->HCS_SemaphLock), flags);
-
- tulip_main(pCurHcb);
-
- spin_lock_irqsave(&(pCurHcb->HCS_SemaphLock), flags);
-
- pCurHcb->HCS_Semaph = 1;
- TUL_WR(pCurHcb->HCS_Base + TUL_Mask, 0x0F);
- spin_unlock_irqrestore(&(pCurHcb->HCS_SemaphLock), flags);
- return (SCSI_RESET_SUCCESS | SCSI_RESET_HOST_RESET);
-}
-
-#endif /* 0 */
/************************************************************************/
-static void tul_exec_scb(HCS * pCurHcb, SCB * pCurScb)
+static void initio_exec_scb(struct initio_host * host, struct scsi_ctrl_blk * scb)
{
- ULONG flags;
+ unsigned long flags;
- pCurScb->SCB_Mode = 0;
+ scb->mode = 0;
- pCurScb->SCB_SGIdx = 0;
- pCurScb->SCB_SGMax = pCurScb->SCB_SGLen;
+ scb->sgidx = 0;
+ scb->sgmax = scb->sglen;
- spin_lock_irqsave(&(pCurHcb->HCS_SemaphLock), flags);
+ spin_lock_irqsave(&host->semaph_lock, flags);
- tul_append_pend_scb(pCurHcb, pCurScb); /* Append this SCB to Pending queue */
+ initio_append_pend_scb(host, scb); /* Append this SCB to Pending queue */
/* VVVVV 07/21/98 */
- if (pCurHcb->HCS_Semaph == 1) {
- TUL_WR(pCurHcb->HCS_Base + TUL_Mask, 0x1F);
- /* disable Jasmin SCSI Int */
- pCurHcb->HCS_Semaph = 0;
-
- spin_unlock_irqrestore(&(pCurHcb->HCS_SemaphLock), flags);
+ if (host->semaph == 1) {
+ /* Disable Jasmin SCSI Int */
+ outb(0x1F, host->addr + TUL_Mask);
+ host->semaph = 0;
+ spin_unlock_irqrestore(&host->semaph_lock, flags);
- tulip_main(pCurHcb);
+ tulip_main(host);
- spin_lock_irqsave(&(pCurHcb->HCS_SemaphLock), flags);
-
- pCurHcb->HCS_Semaph = 1;
- TUL_WR(pCurHcb->HCS_Base + TUL_Mask, 0x0F);
+ spin_lock_irqsave(&host->semaph_lock, flags);
+ host->semaph = 1;
+ outb(0x0F, host->addr + TUL_Mask);
}
- spin_unlock_irqrestore(&(pCurHcb->HCS_SemaphLock), flags);
+ spin_unlock_irqrestore(&host->semaph_lock, flags);
return;
}
/***************************************************************************/
-static int tul_isr(HCS * pCurHcb)
+static int initio_isr(struct initio_host * host)
{
- /* Enter critical section */
-
- if (TUL_RD(pCurHcb->HCS_Base, TUL_Int) & TSS_INT_PENDING) {
- if (pCurHcb->HCS_Semaph == 1) {
- TUL_WR(pCurHcb->HCS_Base + TUL_Mask, 0x1F);
+ if (inb(host->addr + TUL_Int) & TSS_INT_PENDING) {
+ if (host->semaph == 1) {
+ outb(0x1F, host->addr + TUL_Mask);
/* Disable Tulip SCSI Int */
- pCurHcb->HCS_Semaph = 0;
+ host->semaph = 0;
- tulip_main(pCurHcb);
+ tulip_main(host);
- pCurHcb->HCS_Semaph = 1;
- TUL_WR(pCurHcb->HCS_Base + TUL_Mask, 0x0F);
- return (1);
+ host->semaph = 1;
+ outb(0x0F, host->addr + TUL_Mask);
+ return 1;
}
}
- return (0);
+ return 0;
}
-/***************************************************************************/
-int tulip_main(HCS * pCurHcb)
+static int tulip_main(struct initio_host * host)
{
- SCB *pCurScb;
+ struct scsi_ctrl_blk *scb;
for (;;) {
-
- tulip_scsi(pCurHcb); /* Call tulip_scsi */
-
- while ((pCurScb = tul_find_done_scb(pCurHcb)) != NULL) { /* find done entry */
- if (pCurScb->SCB_TaStat == INI_QUEUE_FULL) {
- pCurHcb->HCS_MaxTags[pCurScb->SCB_Target] =
- pCurHcb->HCS_ActTags[pCurScb->SCB_Target] - 1;
- pCurScb->SCB_TaStat = 0;
- tul_append_pend_scb(pCurHcb, pCurScb);
+ tulip_scsi(host); /* Call tulip_scsi */
+
+ /* Walk the list of completed SCBs */
+ while ((scb = initio_find_done_scb(host)) != NULL) { /* find done entry */
+ if (scb->tastat == INI_QUEUE_FULL) {
+ host->max_tags[scb->target] =
+ host->act_tags[scb->target] - 1;
+ scb->tastat = 0;
+ initio_append_pend_scb(host, scb);
continue;
}
- if (!(pCurScb->SCB_Mode & SCM_RSENS)) { /* not in auto req. sense mode */
- if (pCurScb->SCB_TaStat == 2) {
+ if (!(scb->mode & SCM_RSENS)) { /* not in auto req. sense mode */
+ if (scb->tastat == 2) {
/* clr sync. nego flag */
- if (pCurScb->SCB_Flags & SCF_SENSE) {
- BYTE len;
- len = pCurScb->SCB_SenseLen;
+ if (scb->flags & SCF_SENSE) {
+ u8 len;
+ len = scb->senselen;
if (len == 0)
len = 1;
- pCurScb->SCB_BufLen = pCurScb->SCB_SenseLen;
- pCurScb->SCB_BufPtr = pCurScb->SCB_SensePtr;
- pCurScb->SCB_Flags &= ~(SCF_SG | SCF_DIR); /* for xfer_data_in */
-/* pCurScb->SCB_Flags |= SCF_NO_DCHK; */
- /* so, we won't report worng direction in xfer_data_in,
+ scb->buflen = scb->senselen;
+ scb->bufptr = scb->senseptr;
+ scb->flags &= ~(SCF_SG | SCF_DIR); /* for xfer_data_in */
+ /* so, we won't report wrong direction in xfer_data_in,
and won't report HOST_DO_DU in state_6 */
- pCurScb->SCB_Mode = SCM_RSENS;
- pCurScb->SCB_Ident &= 0xBF; /* Disable Disconnect */
- pCurScb->SCB_TagMsg = 0;
- pCurScb->SCB_TaStat = 0;
- pCurScb->SCB_CDBLen = 6;
- pCurScb->SCB_CDB[0] = SCSICMD_RequestSense;
- pCurScb->SCB_CDB[1] = 0;
- pCurScb->SCB_CDB[2] = 0;
- pCurScb->SCB_CDB[3] = 0;
- pCurScb->SCB_CDB[4] = len;
- pCurScb->SCB_CDB[5] = 0;
- tul_push_pend_scb(pCurHcb, pCurScb);
+ scb->mode = SCM_RSENS;
+ scb->ident &= 0xBF; /* Disable Disconnect */
+ scb->tagmsg = 0;
+ scb->tastat = 0;
+ scb->cdblen = 6;
+ scb->cdb[0] = SCSICMD_RequestSense;
+ scb->cdb[1] = 0;
+ scb->cdb[2] = 0;
+ scb->cdb[3] = 0;
+ scb->cdb[4] = len;
+ scb->cdb[5] = 0;
+ initio_push_pend_scb(host, scb);
break;
}
}
} else { /* in request sense mode */
- if (pCurScb->SCB_TaStat == 2) { /* check contition status again after sending
+ if (scb->tastat == 2) { /* check contition status again after sending
requset sense cmd 0x3 */
- pCurScb->SCB_HaStat = HOST_BAD_PHAS;
+ scb->hastat = HOST_BAD_PHAS;
}
- pCurScb->SCB_TaStat = 2;
+ scb->tastat = 2;
}
- pCurScb->SCB_Flags |= SCF_DONE;
- if (pCurScb->SCB_Flags & SCF_POST) {
- (*pCurScb->SCB_Post) ((BYTE *) pCurHcb, (BYTE *) pCurScb);
+ scb->flags |= SCF_DONE;
+ if (scb->flags & SCF_POST) {
+ /* FIXME: only one post method and lose casts */
+ (*scb->post) ((u8 *) host, (u8 *) scb);
}
} /* while */
-
/* find_active: */
- if (TUL_RD(pCurHcb->HCS_Base, TUL_SStatus0) & TSS_INT_PENDING)
+ if (inb(host->addr + TUL_SStatus0) & TSS_INT_PENDING)
continue;
-
- if (pCurHcb->HCS_ActScb) { /* return to OS and wait for xfer_done_ISR/Selected_ISR */
+ if (host->active) /* return to OS and wait for xfer_done_ISR/Selected_ISR */
return 1; /* return to OS, enable interrupt */
- }
/* Check pending SCB */
- if (tul_find_first_pend_scb(pCurHcb) == NULL) {
+ if (initio_find_first_pend_scb(host) == NULL)
return 1; /* return to OS, enable interrupt */
- }
} /* End of for loop */
/* statement won't reach here */
}
-
-
-
-/*+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ */
-/***************************************************************************/
-/***************************************************************************/
-/***************************************************************************/
-/***************************************************************************/
-
-/***************************************************************************/
-void tulip_scsi(HCS * pCurHcb)
+static void tulip_scsi(struct initio_host * host)
{
- SCB *pCurScb;
- TCS *pCurTcb;
+ struct scsi_ctrl_blk *scb;
+ struct target_control *active_tc;
/* make sure to service interrupt asap */
-
- if ((pCurHcb->HCS_JSStatus0 = TUL_RD(pCurHcb->HCS_Base, TUL_SStatus0)) & TSS_INT_PENDING) {
-
- pCurHcb->HCS_Phase = pCurHcb->HCS_JSStatus0 & TSS_PH_MASK;
- pCurHcb->HCS_JSStatus1 = TUL_RD(pCurHcb->HCS_Base, TUL_SStatus1);
- pCurHcb->HCS_JSInt = TUL_RD(pCurHcb->HCS_Base, TUL_SInt);
- if (pCurHcb->HCS_JSInt & TSS_SCSIRST_INT) { /* SCSI bus reset detected */
- int_tul_scsi_rst(pCurHcb);
+ if ((host->jsstatus0 = inb(host->addr + TUL_SStatus0)) & TSS_INT_PENDING) {
+ host->phase = host->jsstatus0 & TSS_PH_MASK;
+ host->jsstatus1 = inb(host->addr + TUL_SStatus1);
+ host->jsint = inb(host->addr + TUL_SInt);
+ if (host->jsint & TSS_SCSIRST_INT) { /* SCSI bus reset detected */
+ int_initio_scsi_rst(host);
return;
}
- if (pCurHcb->HCS_JSInt & TSS_RESEL_INT) { /* if selected/reselected interrupt */
- if (int_tul_resel(pCurHcb) == 0)
- tul_next_state(pCurHcb);
+ if (host->jsint & TSS_RESEL_INT) { /* if selected/reselected interrupt */
+ if (int_initio_resel(host) == 0)
+ initio_next_state(host);
return;
}
- if (pCurHcb->HCS_JSInt & TSS_SEL_TIMEOUT) {
- int_tul_busfree(pCurHcb);
+ if (host->jsint & TSS_SEL_TIMEOUT) {
+ int_initio_busfree(host);
return;
}
- if (pCurHcb->HCS_JSInt & TSS_DISC_INT) { /* BUS disconnection */
- int_tul_busfree(pCurHcb); /* unexpected bus free or sel timeout */
+ if (host->jsint & TSS_DISC_INT) { /* BUS disconnection */
+ int_initio_busfree(host); /* unexpected bus free or sel timeout */
return;
}
- if (pCurHcb->HCS_JSInt & (TSS_FUNC_COMP | TSS_BUS_SERV)) { /* func complete or Bus service */
- if ((pCurScb = pCurHcb->HCS_ActScb) != NULL)
- tul_next_state(pCurHcb);
+ if (host->jsint & (TSS_FUNC_COMP | TSS_BUS_SERV)) { /* func complete or Bus service */
+ if ((scb = host->active) != NULL)
+ initio_next_state(host);
return;
}
}
- if (pCurHcb->HCS_ActScb != NULL)
+ if (host->active != NULL)
return;
- if ((pCurScb = tul_find_first_pend_scb(pCurHcb)) == NULL)
+ if ((scb = initio_find_first_pend_scb(host)) == NULL)
return;
/* program HBA's SCSI ID & target SCSI ID */
- TUL_WR(pCurHcb->HCS_Base + TUL_SScsiId,
- (pCurHcb->HCS_SCSI_ID << 4) | (pCurScb->SCB_Target & 0x0F));
- if (pCurScb->SCB_Opcode == ExecSCSI) {
- pCurTcb = &pCurHcb->HCS_Tcs[pCurScb->SCB_Target];
+ outb((host->scsi_id << 4) | (scb->target & 0x0F),
+ host->addr + TUL_SScsiId);
+ if (scb->opcode == ExecSCSI) {
+ active_tc = &host->targets[scb->target];
- if (pCurScb->SCB_TagMsg)
- pCurTcb->TCS_DrvFlags |= TCF_DRV_EN_TAG;
+ if (scb->tagmsg)
+ active_tc->drv_flags |= TCF_DRV_EN_TAG;
else
- pCurTcb->TCS_DrvFlags &= ~TCF_DRV_EN_TAG;
+ active_tc->drv_flags &= ~TCF_DRV_EN_TAG;
- TUL_WR(pCurHcb->HCS_Base + TUL_SPeriod, pCurTcb->TCS_JS_Period);
- if ((pCurTcb->TCS_Flags & (TCF_WDTR_DONE | TCF_NO_WDTR)) == 0) { /* do wdtr negotiation */
- tul_select_atn_stop(pCurHcb, pCurScb);
+ outb(active_tc->js_period, host->addr + TUL_SPeriod);
+ if ((active_tc->flags & (TCF_WDTR_DONE | TCF_NO_WDTR)) == 0) { /* do wdtr negotiation */
+ initio_select_atn_stop(host, scb);
} else {
- if ((pCurTcb->TCS_Flags & (TCF_SYNC_DONE | TCF_NO_SYNC_NEGO)) == 0) { /* do sync negotiation */
- tul_select_atn_stop(pCurHcb, pCurScb);
+ if ((active_tc->flags & (TCF_SYNC_DONE | TCF_NO_SYNC_NEGO)) == 0) { /* do sync negotiation */
+ initio_select_atn_stop(host, scb);
} else {
- if (pCurScb->SCB_TagMsg)
- tul_select_atn3(pCurHcb, pCurScb);
+ if (scb->tagmsg)
+ initio_select_atn3(host, scb);
else
- tul_select_atn(pCurHcb, pCurScb);
+ initio_select_atn(host, scb);
}
}
- if (pCurScb->SCB_Flags & SCF_POLL) {
- while (wait_tulip(pCurHcb) != -1) {
- if (tul_next_state(pCurHcb) == -1)
+ if (scb->flags & SCF_POLL) {
+ while (wait_tulip(host) != -1) {
+ if (initio_next_state(host) == -1)
break;
}
}
- } else if (pCurScb->SCB_Opcode == BusDevRst) {
- tul_select_atn_stop(pCurHcb, pCurScb);
- pCurScb->SCB_NxtStat = 8;
- if (pCurScb->SCB_Flags & SCF_POLL) {
- while (wait_tulip(pCurHcb) != -1) {
- if (tul_next_state(pCurHcb) == -1)
+ } else if (scb->opcode == BusDevRst) {
+ initio_select_atn_stop(host, scb);
+ scb->next_state = 8;
+ if (scb->flags & SCF_POLL) {
+ while (wait_tulip(host) != -1) {
+ if (initio_next_state(host) == -1)
break;
}
}
- } else if (pCurScb->SCB_Opcode == AbortCmd) {
- if (tul_abort_srb(pCurHcb, pCurScb->SCB_Srb) != 0) {
-
-
- tul_unlink_pend_scb(pCurHcb, pCurScb);
-
- tul_release_scb(pCurHcb, pCurScb);
+ } else if (scb->opcode == AbortCmd) {
+ if (initio_abort_srb(host, scb->srb) != 0) {
+ initio_unlink_pend_scb(host, scb);
+ initio_release_scb(host, scb);
} else {
- pCurScb->SCB_Opcode = BusDevRst;
- tul_select_atn_stop(pCurHcb, pCurScb);
- pCurScb->SCB_NxtStat = 8;
+ scb->opcode = BusDevRst;
+ initio_select_atn_stop(host, scb);
+ scb->next_state = 8;
}
-
-/* 08/03/98 */
} else {
- tul_unlink_pend_scb(pCurHcb, pCurScb);
- pCurScb->SCB_HaStat = 0x16; /* bad command */
- tul_append_done_scb(pCurHcb, pCurScb);
+ initio_unlink_pend_scb(host, scb);
+ scb->hastat = 0x16; /* bad command */
+ initio_append_done_scb(host, scb);
}
return;
}
+/**
+ * initio_next_state - Next SCSI state
+ * @host: InitIO host we are processing
+ *
+ * Progress the active command block along the state machine
+ * until we hit a state which we must wait for activity to occur.
+ *
+ * Returns zero or a negative code.
+ */
-/***************************************************************************/
-int tul_next_state(HCS * pCurHcb)
+static int initio_next_state(struct initio_host * host)
{
int next;
- next = pCurHcb->HCS_ActScb->SCB_NxtStat;
+ next = host->active->next_state;
for (;;) {
switch (next) {
case 1:
- next = tul_state_1(pCurHcb);
+ next = initio_state_1(host);
break;
case 2:
- next = tul_state_2(pCurHcb);
+ next = initio_state_2(host);
break;
case 3:
- next = tul_state_3(pCurHcb);
+ next = initio_state_3(host);
break;
case 4:
- next = tul_state_4(pCurHcb);
+ next = initio_state_4(host);
break;
case 5:
- next = tul_state_5(pCurHcb);
+ next = initio_state_5(host);
break;
case 6:
- next = tul_state_6(pCurHcb);
+ next = initio_state_6(host);
break;
case 7:
- next = tul_state_7(pCurHcb);
+ next = initio_state_7(host);
break;
case 8:
- return (tul_bus_device_reset(pCurHcb));
+ return initio_bus_device_reset(host);
default:
- return (tul_bad_seq(pCurHcb));
+ return initio_bad_seq(host);
}
if (next <= 0)
return next;
@@ -1554,338 +1314,363 @@ int tul_next_state(HCS * pCurHcb)
}
-/***************************************************************************/
-/* sTate after selection with attention & stop */
-int tul_state_1(HCS * pCurHcb)
+/**
+ * initio_state_1 - SCSI state machine
+ * @host: InitIO host we are controlling
+ *
+ * Perform SCSI state processing for Select/Attention/Stop
+ */
+
+static int initio_state_1(struct initio_host * host)
{
- SCB *pCurScb = pCurHcb->HCS_ActScb;
- TCS *pCurTcb = pCurHcb->HCS_ActTcs;
+ struct scsi_ctrl_blk *scb = host->active;
+ struct target_control *active_tc = host->active_tc;
#if DEBUG_STATE
printk("-s1-");
#endif
- tul_unlink_pend_scb(pCurHcb, pCurScb);
- tul_append_busy_scb(pCurHcb, pCurScb);
+ /* Move the SCB from pending to busy */
+ initio_unlink_pend_scb(host, scb);
+ initio_append_busy_scb(host, scb);
- TUL_WR(pCurHcb->HCS_Base + TUL_SConfig, pCurTcb->TCS_SConfig0);
+ outb(active_tc->sconfig0, host->addr + TUL_SConfig );
/* ATN on */
- if (pCurHcb->HCS_Phase == MSG_OUT) {
-
- TUL_WR(pCurHcb->HCS_Base + TUL_SCtrl1, (TSC_EN_BUS_IN | TSC_HW_RESELECT));
-
- TUL_WR(pCurHcb->HCS_Base + TUL_SFifo, pCurScb->SCB_Ident);
-
- if (pCurScb->SCB_TagMsg) {
- TUL_WR(pCurHcb->HCS_Base + TUL_SFifo, pCurScb->SCB_TagMsg);
- TUL_WR(pCurHcb->HCS_Base + TUL_SFifo, pCurScb->SCB_TagId);
- }
- if ((pCurTcb->TCS_Flags & (TCF_WDTR_DONE | TCF_NO_WDTR)) == 0) {
-
- pCurTcb->TCS_Flags |= TCF_WDTR_DONE;
-
- TUL_WR(pCurHcb->HCS_Base + TUL_SFifo, MSG_EXTEND);
- TUL_WR(pCurHcb->HCS_Base + TUL_SFifo, 2); /* Extended msg length */
- TUL_WR(pCurHcb->HCS_Base + TUL_SFifo, 3); /* Sync request */
- TUL_WR(pCurHcb->HCS_Base + TUL_SFifo, 1); /* Start from 16 bits */
- } else if ((pCurTcb->TCS_Flags & (TCF_SYNC_DONE | TCF_NO_SYNC_NEGO)) == 0) {
-
- pCurTcb->TCS_Flags |= TCF_SYNC_DONE;
-
- TUL_WR(pCurHcb->HCS_Base + TUL_SFifo, MSG_EXTEND);
- TUL_WR(pCurHcb->HCS_Base + TUL_SFifo, 3); /* extended msg length */
- TUL_WR(pCurHcb->HCS_Base + TUL_SFifo, 1); /* sync request */
- TUL_WR(pCurHcb->HCS_Base + TUL_SFifo, tul_rate_tbl[pCurTcb->TCS_Flags & TCF_SCSI_RATE]);
- TUL_WR(pCurHcb->HCS_Base + TUL_SFifo, MAX_OFFSET); /* REQ/ACK offset */
- }
- TUL_WR(pCurHcb->HCS_Base + TUL_SCmd, TSC_XF_FIFO_OUT);
- if (wait_tulip(pCurHcb) == -1)
- return (-1);
- }
- TUL_WR(pCurHcb->HCS_Base + TUL_SCtrl0, TSC_FLUSH_FIFO);
- TUL_WR(pCurHcb->HCS_Base + TUL_SSignal, (TUL_RD(pCurHcb->HCS_Base, TUL_SSignal) & (TSC_SET_ACK | 7)));
- return (3);
-}
-
+ if (host->phase == MSG_OUT) {
+ outb(TSC_EN_BUS_IN | TSC_HW_RESELECT, host->addr + TUL_SCtrl1);
+ outb(scb->ident, host->addr + TUL_SFifo);
+
+ if (scb->tagmsg) {
+ outb(scb->tagmsg, host->addr + TUL_SFifo);
+ outb(scb->tagid, host->addr + TUL_SFifo);
+ }
+ if ((active_tc->flags & (TCF_WDTR_DONE | TCF_NO_WDTR)) == 0) {
+ active_tc->flags |= TCF_WDTR_DONE;
+ outb(MSG_EXTEND, host->addr + TUL_SFifo);
+ outb(2, host->addr + TUL_SFifo); /* Extended msg length */
+ outb(3, host->addr + TUL_SFifo); /* Sync request */
+ outb(1, host->addr + TUL_SFifo); /* Start from 16 bits */
+ } else if ((active_tc->flags & (TCF_SYNC_DONE | TCF_NO_SYNC_NEGO)) == 0) {
+ active_tc->flags |= TCF_SYNC_DONE;
+ outb(MSG_EXTEND, host->addr + TUL_SFifo);
+ outb(3, host->addr + TUL_SFifo); /* extended msg length */
+ outb(1, host->addr + TUL_SFifo); /* sync request */
+ outb(initio_rate_tbl[active_tc->flags & TCF_SCSI_RATE], host->addr + TUL_SFifo);
+ outb(MAX_OFFSET, host->addr + TUL_SFifo); /* REQ/ACK offset */
+ }
+ outb(TSC_XF_FIFO_OUT, host->addr + TUL_SCmd);
+ if (wait_tulip(host) == -1)
+ return -1;
+ }
+ outb(TSC_FLUSH_FIFO, host->addr + TUL_SCtrl0);
+ outb((inb(host->addr + TUL_SSignal) & (TSC_SET_ACK | 7)), host->addr + TUL_SSignal);
+ /* Into before CDB xfer */
+ return 3;
+}
+
+
+/**
+ * initio_state_2 - SCSI state machine
+ * @host: InitIO host we are controlling
+ *
+ * state after selection with attention
+ * state after selection with attention3
+ */
-/***************************************************************************/
-/* state after selection with attention */
-/* state after selection with attention3 */
-int tul_state_2(HCS * pCurHcb)
+static int initio_state_2(struct initio_host * host)
{
- SCB *pCurScb = pCurHcb->HCS_ActScb;
- TCS *pCurTcb = pCurHcb->HCS_ActTcs;
+ struct scsi_ctrl_blk *scb = host->active;
+ struct target_control *active_tc = host->active_tc;
#if DEBUG_STATE
printk("-s2-");
#endif
- tul_unlink_pend_scb(pCurHcb, pCurScb);
- tul_append_busy_scb(pCurHcb, pCurScb);
+ initio_unlink_pend_scb(host, scb);
+ initio_append_busy_scb(host, scb);
- TUL_WR(pCurHcb->HCS_Base + TUL_SConfig, pCurTcb->TCS_SConfig0);
+ outb(active_tc->sconfig0, host->addr + TUL_SConfig);
- if (pCurHcb->HCS_JSStatus1 & TSS_CMD_PH_CMP) {
- return (4);
- }
- TUL_WR(pCurHcb->HCS_Base + TUL_SCtrl0, TSC_FLUSH_FIFO);
- TUL_WR(pCurHcb->HCS_Base + TUL_SSignal, (TUL_RD(pCurHcb->HCS_Base, TUL_SSignal) & (TSC_SET_ACK | 7)));
- return (3);
+ if (host->jsstatus1 & TSS_CMD_PH_CMP)
+ return 4;
+
+ outb(TSC_FLUSH_FIFO, host->addr + TUL_SCtrl0);
+ outb((inb(host->addr + TUL_SSignal) & (TSC_SET_ACK | 7)), host->addr + TUL_SSignal);
+ /* Into before CDB xfer */
+ return 3;
}
-/***************************************************************************/
-/* state before CDB xfer is done */
-int tul_state_3(HCS * pCurHcb)
+/**
+ * initio_state_3 - SCSI state machine
+ * @host: InitIO host we are controlling
+ *
+ * state before CDB xfer is done
+ */
+
+static int initio_state_3(struct initio_host * host)
{
- SCB *pCurScb = pCurHcb->HCS_ActScb;
- TCS *pCurTcb = pCurHcb->HCS_ActTcs;
+ struct scsi_ctrl_blk *scb = host->active;
+ struct target_control *active_tc = host->active_tc;
int i;
#if DEBUG_STATE
printk("-s3-");
#endif
for (;;) {
- switch (pCurHcb->HCS_Phase) {
+ switch (host->phase) {
case CMD_OUT: /* Command out phase */
- for (i = 0; i < (int) pCurScb->SCB_CDBLen; i++)
- TUL_WR(pCurHcb->HCS_Base + TUL_SFifo, pCurScb->SCB_CDB[i]);
- TUL_WR(pCurHcb->HCS_Base + TUL_SCmd, TSC_XF_FIFO_OUT);
- if (wait_tulip(pCurHcb) == -1)
- return (-1);
- if (pCurHcb->HCS_Phase == CMD_OUT) {
- return (tul_bad_seq(pCurHcb));
- }
- return (4);
+ for (i = 0; i < (int) scb->cdblen; i++)
+ outb(scb->cdb[i], host->addr + TUL_SFifo);
+ outb(TSC_XF_FIFO_OUT, host->addr + TUL_SCmd);
+ if (wait_tulip(host) == -1)
+ return -1;
+ if (host->phase == CMD_OUT)
+ return initio_bad_seq(host);
+ return 4;
case MSG_IN: /* Message in phase */
- pCurScb->SCB_NxtStat = 3;
- if (tul_msgin(pCurHcb) == -1)
- return (-1);
+ scb->next_state = 3;
+ if (initio_msgin(host) == -1)
+ return -1;
break;
case STATUS_IN: /* Status phase */
- if (tul_status_msg(pCurHcb) == -1)
- return (-1);
+ if (initio_status_msg(host) == -1)
+ return -1;
break;
case MSG_OUT: /* Message out phase */
- if (pCurTcb->TCS_Flags & (TCF_SYNC_DONE | TCF_NO_SYNC_NEGO)) {
-
- TUL_WR(pCurHcb->HCS_Base + TUL_SFifo, MSG_NOP); /* msg nop */
- TUL_WR(pCurHcb->HCS_Base + TUL_SCmd, TSC_XF_FIFO_OUT);
- if (wait_tulip(pCurHcb) == -1)
- return (-1);
-
+ if (active_tc->flags & (TCF_SYNC_DONE | TCF_NO_SYNC_NEGO)) {
+ outb(MSG_NOP, host->addr + TUL_SFifo); /* msg nop */
+ outb(TSC_XF_FIFO_OUT, host->addr + TUL_SCmd);
+ if (wait_tulip(host) == -1)
+ return -1;
} else {
- pCurTcb->TCS_Flags |= TCF_SYNC_DONE;
-
- TUL_WR(pCurHcb->HCS_Base + TUL_SFifo, MSG_EXTEND);
- TUL_WR(pCurHcb->HCS_Base + TUL_SFifo, 3); /* ext. msg len */
- TUL_WR(pCurHcb->HCS_Base + TUL_SFifo, 1); /* sync request */
- TUL_WR(pCurHcb->HCS_Base + TUL_SFifo, tul_rate_tbl[pCurTcb->TCS_Flags & TCF_SCSI_RATE]);
- TUL_WR(pCurHcb->HCS_Base + TUL_SFifo, MAX_OFFSET); /* REQ/ACK offset */
- TUL_WR(pCurHcb->HCS_Base + TUL_SCmd, TSC_XF_FIFO_OUT);
- if (wait_tulip(pCurHcb) == -1)
- return (-1);
- TUL_WR(pCurHcb->HCS_Base + TUL_SCtrl0, TSC_FLUSH_FIFO);
- TUL_WR(pCurHcb->HCS_Base + TUL_SSignal, TUL_RD(pCurHcb->HCS_Base, TUL_SSignal) & (TSC_SET_ACK | 7));
+ active_tc->flags |= TCF_SYNC_DONE;
+
+ outb(MSG_EXTEND, host->addr + TUL_SFifo);
+ outb(3, host->addr + TUL_SFifo); /* ext. msg len */
+ outb(1, host->addr + TUL_SFifo); /* sync request */
+ outb(initio_rate_tbl[active_tc->flags & TCF_SCSI_RATE], host->addr + TUL_SFifo);
+ outb(MAX_OFFSET, host->addr + TUL_SFifo); /* REQ/ACK offset */
+ outb(TSC_XF_FIFO_OUT, host->addr + TUL_SCmd);
+ if (wait_tulip(host) == -1)
+ return -1;
+ outb(TSC_FLUSH_FIFO, host->addr + TUL_SCtrl0);
+ outb(inb(host->addr + TUL_SSignal) & (TSC_SET_ACK | 7), host->addr + TUL_SSignal);
}
break;
-
default:
- return (tul_bad_seq(pCurHcb));
+ return initio_bad_seq(host);
}
}
}
+/**
+ * initio_state_4 - SCSI state machine
+ * @host: InitIO host we are controlling
+ *
+ * SCSI state machine. State 4
+ */
-/***************************************************************************/
-int tul_state_4(HCS * pCurHcb)
+static int initio_state_4(struct initio_host * host)
{
- SCB *pCurScb = pCurHcb->HCS_ActScb;
+ struct scsi_ctrl_blk *scb = host->active;
#if DEBUG_STATE
printk("-s4-");
#endif
- if ((pCurScb->SCB_Flags & SCF_DIR) == SCF_NO_XF) {
- return (6); /* Go to state 6 */
+ if ((scb->flags & SCF_DIR) == SCF_NO_XF) {
+ return 6; /* Go to state 6 (After data) */
}
for (;;) {
- if (pCurScb->SCB_BufLen == 0)
- return (6); /* Go to state 6 */
+ if (scb->buflen == 0)
+ return 6;
- switch (pCurHcb->HCS_Phase) {
+ switch (host->phase) {
case STATUS_IN: /* Status phase */
- if ((pCurScb->SCB_Flags & SCF_DIR) != 0) { /* if direction bit set then report data underrun */
- pCurScb->SCB_HaStat = HOST_DO_DU;
- }
- if ((tul_status_msg(pCurHcb)) == -1)
- return (-1);
+ if ((scb->flags & SCF_DIR) != 0) /* if direction bit set then report data underrun */
+ scb->hastat = HOST_DO_DU;
+ if ((initio_status_msg(host)) == -1)
+ return -1;
break;
case MSG_IN: /* Message in phase */
- pCurScb->SCB_NxtStat = 0x4;
- if (tul_msgin(pCurHcb) == -1)
- return (-1);
+ scb->next_state = 0x4;
+ if (initio_msgin(host) == -1)
+ return -1;
break;
case MSG_OUT: /* Message out phase */
- if (pCurHcb->HCS_JSStatus0 & TSS_PAR_ERROR) {
- pCurScb->SCB_BufLen = 0;
- pCurScb->SCB_HaStat = HOST_DO_DU;
- if (tul_msgout_ide(pCurHcb) == -1)
- return (-1);
- return (6); /* Go to state 6 */
+ if (host->jsstatus0 & TSS_PAR_ERROR) {
+ scb->buflen = 0;
+ scb->hastat = HOST_DO_DU;
+ if (initio_msgout_ide(host) == -1)
+ return -1;
+ return 6;
} else {
- TUL_WR(pCurHcb->HCS_Base + TUL_SFifo, MSG_NOP); /* msg nop */
- TUL_WR(pCurHcb->HCS_Base + TUL_SCmd, TSC_XF_FIFO_OUT);
- if (wait_tulip(pCurHcb) == -1)
- return (-1);
+ outb(MSG_NOP, host->addr + TUL_SFifo); /* msg nop */
+ outb(TSC_XF_FIFO_OUT, host->addr + TUL_SCmd);
+ if (wait_tulip(host) == -1)
+ return -1;
}
break;
case DATA_IN: /* Data in phase */
- return (tul_xfer_data_in(pCurHcb));
+ return initio_xfer_data_in(host);
case DATA_OUT: /* Data out phase */
- return (tul_xfer_data_out(pCurHcb));
+ return initio_xfer_data_out(host);
default:
- return (tul_bad_seq(pCurHcb));
+ return initio_bad_seq(host);
}
}
}
-/***************************************************************************/
-/* state after dma xfer done or phase change before xfer done */
-int tul_state_5(HCS * pCurHcb)
+/**
+ * initio_state_5 - SCSI state machine
+ * @host: InitIO host we are controlling
+ *
+ * State after dma xfer done or phase change before xfer done
+ */
+
+static int initio_state_5(struct initio_host * host)
{
- SCB *pCurScb = pCurHcb->HCS_ActScb;
+ struct scsi_ctrl_blk *scb = host->active;
long cnt, xcnt; /* cannot use unsigned !! code: if (xcnt < 0) */
#if DEBUG_STATE
printk("-s5-");
#endif
-/*------ get remaining count -------*/
-
- cnt = TUL_RDLONG(pCurHcb->HCS_Base, TUL_SCnt0) & 0x0FFFFFF;
+ /*------ get remaining count -------*/
+ cnt = inl(host->addr + TUL_SCnt0) & 0x0FFFFFF;
- if (TUL_RD(pCurHcb->HCS_Base, TUL_XCmd) & 0x20) {
+ if (inb(host->addr + TUL_XCmd) & 0x20) {
/* ----------------------- DATA_IN ----------------------------- */
/* check scsi parity error */
- if (pCurHcb->HCS_JSStatus0 & TSS_PAR_ERROR) {
- pCurScb->SCB_HaStat = HOST_DO_DU;
- }
- if (TUL_RD(pCurHcb->HCS_Base, TUL_XStatus) & XPEND) { /* DMA xfer pending, Send STOP */
+ if (host->jsstatus0 & TSS_PAR_ERROR)
+ scb->hastat = HOST_DO_DU;
+ if (inb(host->addr + TUL_XStatus) & XPEND) { /* DMA xfer pending, Send STOP */
/* tell Hardware scsi xfer has been terminated */
- TUL_WR(pCurHcb->HCS_Base + TUL_XCtrl, TUL_RD(pCurHcb->HCS_Base, TUL_XCtrl) | 0x80);
+ outb(inb(host->addr + TUL_XCtrl) | 0x80, host->addr + TUL_XCtrl);
/* wait until DMA xfer not pending */
- while (TUL_RD(pCurHcb->HCS_Base, TUL_XStatus) & XPEND);
+ while (inb(host->addr + TUL_XStatus) & XPEND)
+ cpu_relax();
}
} else {
-/*-------- DATA OUT -----------*/
- if ((TUL_RD(pCurHcb->HCS_Base, TUL_SStatus1) & TSS_XFER_CMP) == 0) {
- if (pCurHcb->HCS_ActTcs->TCS_JS_Period & TSC_WIDE_SCSI)
- cnt += (TUL_RD(pCurHcb->HCS_Base, TUL_SFifoCnt) & 0x1F) << 1;
+ /*-------- DATA OUT -----------*/
+ if ((inb(host->addr + TUL_SStatus1) & TSS_XFER_CMP) == 0) {
+ if (host->active_tc->js_period & TSC_WIDE_SCSI)
+ cnt += (inb(host->addr + TUL_SFifoCnt) & 0x1F) << 1;
else
- cnt += (TUL_RD(pCurHcb->HCS_Base, TUL_SFifoCnt) & 0x1F);
+ cnt += (inb(host->addr + TUL_SFifoCnt) & 0x1F);
}
- if (TUL_RD(pCurHcb->HCS_Base, TUL_XStatus) & XPEND) { /* if DMA xfer is pending, abort DMA xfer */
- TUL_WR(pCurHcb->HCS_Base + TUL_XCmd, TAX_X_ABT);
+ if (inb(host->addr + TUL_XStatus) & XPEND) { /* if DMA xfer is pending, abort DMA xfer */
+ outb(TAX_X_ABT, host->addr + TUL_XCmd);
/* wait Abort DMA xfer done */
- while ((TUL_RD(pCurHcb->HCS_Base, TUL_Int) & XABT) == 0);
+ while ((inb(host->addr + TUL_Int) & XABT) == 0)
+ cpu_relax();
}
- if ((cnt == 1) && (pCurHcb->HCS_Phase == DATA_OUT)) {
- TUL_WR(pCurHcb->HCS_Base + TUL_SCmd, TSC_XF_FIFO_OUT);
- if (wait_tulip(pCurHcb) == -1) {
- return (-1);
- }
+ if ((cnt == 1) && (host->phase == DATA_OUT)) {
+ outb(TSC_XF_FIFO_OUT, host->addr + TUL_SCmd);
+ if (wait_tulip(host) == -1)
+ return -1;
cnt = 0;
} else {
- if ((TUL_RD(pCurHcb->HCS_Base, TUL_SStatus1) & TSS_XFER_CMP) == 0)
- TUL_WR(pCurHcb->HCS_Base + TUL_SCtrl0, TSC_FLUSH_FIFO);
+ if ((inb(host->addr + TUL_SStatus1) & TSS_XFER_CMP) == 0)
+ outb(TSC_FLUSH_FIFO, host->addr + TUL_SCtrl0);
}
}
-
if (cnt == 0) {
- pCurScb->SCB_BufLen = 0;
- return (6); /* Go to state 6 */
+ scb->buflen = 0;
+ return 6; /* After Data */
}
/* Update active data pointer */
- xcnt = (long) pCurScb->SCB_BufLen - cnt; /* xcnt== bytes already xferred */
- pCurScb->SCB_BufLen = (U32) cnt; /* cnt == bytes left to be xferred */
- if (pCurScb->SCB_Flags & SCF_SG) {
- register SG *sgp;
- ULONG i;
-
- sgp = &pCurScb->SCB_SGList[pCurScb->SCB_SGIdx];
- for (i = pCurScb->SCB_SGIdx; i < pCurScb->SCB_SGMax; sgp++, i++) {
- xcnt -= (long) sgp->SG_Len;
+ xcnt = (long) scb->buflen - cnt; /* xcnt== bytes already xferred */
+ scb->buflen = (u32) cnt; /* cnt == bytes left to be xferred */
+ if (scb->flags & SCF_SG) {
+ struct sg_entry *sgp;
+ unsigned long i;
+
+ sgp = &scb->sglist[scb->sgidx];
+ for (i = scb->sgidx; i < scb->sgmax; sgp++, i++) {
+ xcnt -= (long) sgp->len;
if (xcnt < 0) { /* this sgp xfer half done */
- xcnt += (long) sgp->SG_Len; /* xcnt == bytes xferred in this sgp */
- sgp->SG_Ptr += (U32) xcnt; /* new ptr to be xfer */
- sgp->SG_Len -= (U32) xcnt; /* new len to be xfer */
- pCurScb->SCB_BufPtr += ((U32) (i - pCurScb->SCB_SGIdx) << 3);
+ xcnt += (long) sgp->len; /* xcnt == bytes xferred in this sgp */
+ sgp->data += (u32) xcnt; /* new ptr to be xfer */
+ sgp->len -= (u32) xcnt; /* new len to be xfer */
+ scb->bufptr += ((u32) (i - scb->sgidx) << 3);
/* new SG table ptr */
- pCurScb->SCB_SGLen = (BYTE) (pCurScb->SCB_SGMax - i);
+ scb->sglen = (u8) (scb->sgmax - i);
/* new SG table len */
- pCurScb->SCB_SGIdx = (WORD) i;
+ scb->sgidx = (u16) i;
/* for next disc and come in this loop */
- return (4); /* Go to state 4 */
+ return 4; /* Go to state 4 */
}
/* else (xcnt >= 0 , i.e. this sgp already xferred */
} /* for */
- return (6); /* Go to state 6 */
+ return 6; /* Go to state 6 */
} else {
- pCurScb->SCB_BufPtr += (U32) xcnt;
+ scb->bufptr += (u32) xcnt;
}
- return (4); /* Go to state 4 */
+ return 4; /* Go to state 4 */
}
-/***************************************************************************/
-/* state after Data phase */
-int tul_state_6(HCS * pCurHcb)
+/**
+ * initio_state_6 - SCSI state machine
+ * @host: InitIO host we are controlling
+ *
+ * State after Data phase
+ */
+
+static int initio_state_6(struct initio_host * host)
{
- SCB *pCurScb = pCurHcb->HCS_ActScb;
+ struct scsi_ctrl_blk *scb = host->active;
#if DEBUG_STATE
printk("-s6-");
#endif
for (;;) {
- switch (pCurHcb->HCS_Phase) {
+ switch (host->phase) {
case STATUS_IN: /* Status phase */
- if ((tul_status_msg(pCurHcb)) == -1)
- return (-1);
+ if ((initio_status_msg(host)) == -1)
+ return -1;
break;
case MSG_IN: /* Message in phase */
- pCurScb->SCB_NxtStat = 6;
- if ((tul_msgin(pCurHcb)) == -1)
- return (-1);
+ scb->next_state = 6;
+ if ((initio_msgin(host)) == -1)
+ return -1;
break;
case MSG_OUT: /* Message out phase */
- TUL_WR(pCurHcb->HCS_Base + TUL_SFifo, MSG_NOP); /* msg nop */
- TUL_WR(pCurHcb->HCS_Base + TUL_SCmd, TSC_XF_FIFO_OUT);
- if (wait_tulip(pCurHcb) == -1)
- return (-1);
+ outb(MSG_NOP, host->addr + TUL_SFifo); /* msg nop */
+ outb(TSC_XF_FIFO_OUT, host->addr + TUL_SCmd);
+ if (wait_tulip(host) == -1)
+ return -1;
break;
case DATA_IN: /* Data in phase */
- return (tul_xpad_in(pCurHcb));
+ return initio_xpad_in(host);
case DATA_OUT: /* Data out phase */
- return (tul_xpad_out(pCurHcb));
+ return initio_xpad_out(host);
default:
- return (tul_bad_seq(pCurHcb));
+ return initio_bad_seq(host);
}
}
}
-/***************************************************************************/
-int tul_state_7(HCS * pCurHcb)
+/**
+ * initio_state_7 - SCSI state machine
+ * @host: InitIO host we are controlling
+ *
+ */
+
+int initio_state_7(struct initio_host * host)
{
int cnt, i;
@@ -1893,1139 +1678,1029 @@ int tul_state_7(HCS * pCurHcb)
printk("-s7-");
#endif
/* flush SCSI FIFO */
- cnt = TUL_RD(pCurHcb->HCS_Base, TUL_SFifoCnt) & 0x1F;
+ cnt = inb(host->addr + TUL_SFifoCnt) & 0x1F;
if (cnt) {
for (i = 0; i < cnt; i++)
- TUL_RD(pCurHcb->HCS_Base, TUL_SFifo);
+ inb(host->addr + TUL_SFifo);
}
- switch (pCurHcb->HCS_Phase) {
+ switch (host->phase) {
case DATA_IN: /* Data in phase */
case DATA_OUT: /* Data out phase */
- return (tul_bad_seq(pCurHcb));
+ return initio_bad_seq(host);
default:
- return (6); /* Go to state 6 */
+ return 6; /* Go to state 6 */
}
}
-/***************************************************************************/
-int tul_xfer_data_in(HCS * pCurHcb)
+/**
+ * initio_xfer_data_in - Commence data input
+ * @host: InitIO host in use
+ *
+ * Commence a block of data transfer. The transfer itself will
+ * be managed by the controller and we will get a completion (or
+ * failure) interrupt.
+ */
+static int initio_xfer_data_in(struct initio_host * host)
{
- SCB *pCurScb = pCurHcb->HCS_ActScb;
+ struct scsi_ctrl_blk *scb = host->active;
- if ((pCurScb->SCB_Flags & SCF_DIR) == SCF_DOUT) {
- return (6); /* wrong direction */
- }
- TUL_WRLONG(pCurHcb->HCS_Base + TUL_SCnt0, pCurScb->SCB_BufLen);
+ if ((scb->flags & SCF_DIR) == SCF_DOUT)
+ return 6; /* wrong direction */
- TUL_WR(pCurHcb->HCS_Base + TUL_SCmd, TSC_XF_DMA_IN); /* 7/25/95 */
+ outl(scb->buflen, host->addr + TUL_SCnt0);
+ outb(TSC_XF_DMA_IN, host->addr + TUL_SCmd); /* 7/25/95 */
- if (pCurScb->SCB_Flags & SCF_SG) { /* S/G xfer */
- TUL_WRLONG(pCurHcb->HCS_Base + TUL_XCntH, ((ULONG) pCurScb->SCB_SGLen) << 3);
- TUL_WRLONG(pCurHcb->HCS_Base + TUL_XAddH, pCurScb->SCB_BufPtr);
- TUL_WR(pCurHcb->HCS_Base + TUL_XCmd, TAX_SG_IN);
+ if (scb->flags & SCF_SG) { /* S/G xfer */
+ outl(((u32) scb->sglen) << 3, host->addr + TUL_XCntH);
+ outl(scb->bufptr, host->addr + TUL_XAddH);
+ outb(TAX_SG_IN, host->addr + TUL_XCmd);
} else {
- TUL_WRLONG(pCurHcb->HCS_Base + TUL_XCntH, pCurScb->SCB_BufLen);
- TUL_WRLONG(pCurHcb->HCS_Base + TUL_XAddH, pCurScb->SCB_BufPtr);
- TUL_WR(pCurHcb->HCS_Base + TUL_XCmd, TAX_X_IN);
+ outl(scb->buflen, host->addr + TUL_XCntH);
+ outl(scb->bufptr, host->addr + TUL_XAddH);
+ outb(TAX_X_IN, host->addr + TUL_XCmd);
}
- pCurScb->SCB_NxtStat = 0x5;
- return (0); /* return to OS, wait xfer done , let jas_isr come in */
+ scb->next_state = 0x5;
+ return 0; /* return to OS, wait xfer done , let jas_isr come in */
}
+/**
+ * initio_xfer_data_out - Commence data output
+ * @host: InitIO host in use
+ *
+ * Commence a block of data transfer. The transfer itself will
+ * be managed by the controller and we will get a completion (or
+ * failure) interrupt.
+ */
-/***************************************************************************/
-int tul_xfer_data_out(HCS * pCurHcb)
+static int initio_xfer_data_out(struct initio_host * host)
{
- SCB *pCurScb = pCurHcb->HCS_ActScb;
+ struct scsi_ctrl_blk *scb = host->active;
- if ((pCurScb->SCB_Flags & SCF_DIR) == SCF_DIN) {
- return (6); /* wrong direction */
- }
- TUL_WRLONG(pCurHcb->HCS_Base + TUL_SCnt0, pCurScb->SCB_BufLen);
- TUL_WR(pCurHcb->HCS_Base + TUL_SCmd, TSC_XF_DMA_OUT);
+ if ((scb->flags & SCF_DIR) == SCF_DIN)
+ return 6; /* wrong direction */
- if (pCurScb->SCB_Flags & SCF_SG) { /* S/G xfer */
- TUL_WRLONG(pCurHcb->HCS_Base + TUL_XCntH, ((ULONG) pCurScb->SCB_SGLen) << 3);
- TUL_WRLONG(pCurHcb->HCS_Base + TUL_XAddH, pCurScb->SCB_BufPtr);
- TUL_WR(pCurHcb->HCS_Base + TUL_XCmd, TAX_SG_OUT);
+ outl(scb->buflen, host->addr + TUL_SCnt0);
+ outb(TSC_XF_DMA_OUT, host->addr + TUL_SCmd);
+
+ if (scb->flags & SCF_SG) { /* S/G xfer */
+ outl(((u32) scb->sglen) << 3, host->addr + TUL_XCntH);
+ outl(scb->bufptr, host->addr + TUL_XAddH);
+ outb(TAX_SG_OUT, host->addr + TUL_XCmd);
} else {
- TUL_WRLONG(pCurHcb->HCS_Base + TUL_XCntH, pCurScb->SCB_BufLen);
- TUL_WRLONG(pCurHcb->HCS_Base + TUL_XAddH, pCurScb->SCB_BufPtr);
- TUL_WR(pCurHcb->HCS_Base + TUL_XCmd, TAX_X_OUT);
+ outl(scb->buflen, host->addr + TUL_XCntH);
+ outl(scb->bufptr, host->addr + TUL_XAddH);
+ outb(TAX_X_OUT, host->addr + TUL_XCmd);
}
- pCurScb->SCB_NxtStat = 0x5;
- return (0); /* return to OS, wait xfer done , let jas_isr come in */
+ scb->next_state = 0x5;
+ return 0; /* return to OS, wait xfer done , let jas_isr come in */
}
-
-/***************************************************************************/
-int tul_xpad_in(HCS * pCurHcb)
+int initio_xpad_in(struct initio_host * host)
{
- SCB *pCurScb = pCurHcb->HCS_ActScb;
- TCS *pCurTcb = pCurHcb->HCS_ActTcs;
+ struct scsi_ctrl_blk *scb = host->active;
+ struct target_control *active_tc = host->active_tc;
- if ((pCurScb->SCB_Flags & SCF_DIR) != SCF_NO_DCHK) {
- pCurScb->SCB_HaStat = HOST_DO_DU; /* over run */
- }
+ if ((scb->flags & SCF_DIR) != SCF_NO_DCHK)
+ scb->hastat = HOST_DO_DU; /* over run */
for (;;) {
- if (pCurTcb->TCS_JS_Period & TSC_WIDE_SCSI)
- TUL_WRLONG(pCurHcb->HCS_Base + TUL_SCnt0, 2);
+ if (active_tc->js_period & TSC_WIDE_SCSI)
+ outl(2, host->addr + TUL_SCnt0);
else
- TUL_WRLONG(pCurHcb->HCS_Base + TUL_SCnt0, 1);
+ outl(1, host->addr + TUL_SCnt0);
- TUL_WR(pCurHcb->HCS_Base + TUL_SCmd, TSC_XF_FIFO_IN);
- if ((wait_tulip(pCurHcb)) == -1) {
- return (-1);
+ outb(TSC_XF_FIFO_IN, host->addr + TUL_SCmd);
+ if (wait_tulip(host) == -1)
+ return -1;
+ if (host->phase != DATA_IN) {
+ outb(TSC_FLUSH_FIFO, host->addr + TUL_SCtrl0);
+ return 6;
}
- if (pCurHcb->HCS_Phase != DATA_IN) {
- TUL_WR(pCurHcb->HCS_Base + TUL_SCtrl0, TSC_FLUSH_FIFO);
- return (6);
- }
- TUL_RD(pCurHcb->HCS_Base, TUL_SFifo);
+ inb(host->addr + TUL_SFifo);
}
}
-int tul_xpad_out(HCS * pCurHcb)
+int initio_xpad_out(struct initio_host * host)
{
- SCB *pCurScb = pCurHcb->HCS_ActScb;
- TCS *pCurTcb = pCurHcb->HCS_ActTcs;
+ struct scsi_ctrl_blk *scb = host->active;
+ struct target_control *active_tc = host->active_tc;
- if ((pCurScb->SCB_Flags & SCF_DIR) != SCF_NO_DCHK) {
- pCurScb->SCB_HaStat = HOST_DO_DU; /* over run */
- }
+ if ((scb->flags & SCF_DIR) != SCF_NO_DCHK)
+ scb->hastat = HOST_DO_DU; /* over run */
for (;;) {
- if (pCurTcb->TCS_JS_Period & TSC_WIDE_SCSI)
- TUL_WRLONG(pCurHcb->HCS_Base + TUL_SCnt0, 2);
+ if (active_tc->js_period & TSC_WIDE_SCSI)
+ outl(2, host->addr + TUL_SCnt0);
else
- TUL_WRLONG(pCurHcb->HCS_Base + TUL_SCnt0, 1);
+ outl(1, host->addr + TUL_SCnt0);
- TUL_WR(pCurHcb->HCS_Base + TUL_SFifo, 0);
- TUL_WR(pCurHcb->HCS_Base + TUL_SCmd, TSC_XF_FIFO_OUT);
- if ((wait_tulip(pCurHcb)) == -1) {
- return (-1);
- }
- if (pCurHcb->HCS_Phase != DATA_OUT) { /* Disable wide CPU to allow read 16 bits */
- TUL_WR(pCurHcb->HCS_Base + TUL_SCtrl1, TSC_HW_RESELECT);
- TUL_WR(pCurHcb->HCS_Base + TUL_SCtrl0, TSC_FLUSH_FIFO);
- return (6);
+ outb(0, host->addr + TUL_SFifo);
+ outb(TSC_XF_FIFO_OUT, host->addr + TUL_SCmd);
+ if ((wait_tulip(host)) == -1)
+ return -1;
+ if (host->phase != DATA_OUT) { /* Disable wide CPU to allow read 16 bits */
+ outb(TSC_HW_RESELECT, host->addr + TUL_SCtrl1);
+ outb(TSC_FLUSH_FIFO, host->addr + TUL_SCtrl0);
+ return 6;
}
}
}
-
-/***************************************************************************/
-int tul_status_msg(HCS * pCurHcb)
+int initio_status_msg(struct initio_host * host)
{ /* status & MSG_IN */
- SCB *pCurScb = pCurHcb->HCS_ActScb;
- BYTE msg;
+ struct scsi_ctrl_blk *scb = host->active;
+ u8 msg;
+
+ outb(TSC_CMD_COMP, host->addr + TUL_SCmd);
+ if (wait_tulip(host) == -1)
+ return -1;
- TUL_WR(pCurHcb->HCS_Base + TUL_SCmd, TSC_CMD_COMP);
- if ((wait_tulip(pCurHcb)) == -1) {
- return (-1);
- }
/* get status */
- pCurScb->SCB_TaStat = TUL_RD(pCurHcb->HCS_Base, TUL_SFifo);
+ scb->tastat = inb(host->addr + TUL_SFifo);
- if (pCurHcb->HCS_Phase == MSG_OUT) {
- if (pCurHcb->HCS_JSStatus0 & TSS_PAR_ERROR) {
- TUL_WR(pCurHcb->HCS_Base + TUL_SFifo, MSG_PARITY);
- } else {
- TUL_WR(pCurHcb->HCS_Base + TUL_SFifo, MSG_NOP);
- }
- TUL_WR(pCurHcb->HCS_Base + TUL_SCmd, TSC_XF_FIFO_OUT);
- return (wait_tulip(pCurHcb));
- }
- if (pCurHcb->HCS_Phase == MSG_IN) {
- msg = TUL_RD(pCurHcb->HCS_Base, TUL_SFifo);
- if (pCurHcb->HCS_JSStatus0 & TSS_PAR_ERROR) { /* Parity error */
- if ((tul_msgin_accept(pCurHcb)) == -1)
- return (-1);
- if (pCurHcb->HCS_Phase != MSG_OUT)
- return (tul_bad_seq(pCurHcb));
- TUL_WR(pCurHcb->HCS_Base + TUL_SFifo, MSG_PARITY);
- TUL_WR(pCurHcb->HCS_Base + TUL_SCmd, TSC_XF_FIFO_OUT);
- return (wait_tulip(pCurHcb));
+ if (host->phase == MSG_OUT) {
+ if (host->jsstatus0 & TSS_PAR_ERROR)
+ outb(MSG_PARITY, host->addr + TUL_SFifo);
+ else
+ outb(MSG_NOP, host->addr + TUL_SFifo);
+ outb(TSC_XF_FIFO_OUT, host->addr + TUL_SCmd);
+ return wait_tulip(host);
+ }
+ if (host->phase == MSG_IN) {
+ msg = inb(host->addr + TUL_SFifo);
+ if (host->jsstatus0 & TSS_PAR_ERROR) { /* Parity error */
+ if ((initio_msgin_accept(host)) == -1)
+ return -1;
+ if (host->phase != MSG_OUT)
+ return initio_bad_seq(host);
+ outb(MSG_PARITY, host->addr + TUL_SFifo);
+ outb(TSC_XF_FIFO_OUT, host->addr + TUL_SCmd);
+ return wait_tulip(host);
}
if (msg == 0) { /* Command complete */
- if ((pCurScb->SCB_TaStat & 0x18) == 0x10) { /* No link support */
- return (tul_bad_seq(pCurHcb));
- }
- TUL_WR(pCurHcb->HCS_Base + TUL_SCtrl0, TSC_FLUSH_FIFO);
- TUL_WR(pCurHcb->HCS_Base + TUL_SCmd, TSC_MSG_ACCEPT);
- return tul_wait_done_disc(pCurHcb);
+ if ((scb->tastat & 0x18) == 0x10) /* No link support */
+ return initio_bad_seq(host);
+ outb(TSC_FLUSH_FIFO, host->addr + TUL_SCtrl0);
+ outb(TSC_MSG_ACCEPT, host->addr + TUL_SCmd);
+ return initio_wait_done_disc(host);
}
- if ((msg == MSG_LINK_COMP) || (msg == MSG_LINK_FLAG)) {
- if ((pCurScb->SCB_TaStat & 0x18) == 0x10)
- return (tul_msgin_accept(pCurHcb));
+ if (msg == MSG_LINK_COMP || msg == MSG_LINK_FLAG) {
+ if ((scb->tastat & 0x18) == 0x10)
+ return initio_msgin_accept(host);
}
}
- return (tul_bad_seq(pCurHcb));
+ return initio_bad_seq(host);
}
-/***************************************************************************/
/* scsi bus free */
-int int_tul_busfree(HCS * pCurHcb)
+int int_initio_busfree(struct initio_host * host)
{
- SCB *pCurScb = pCurHcb->HCS_ActScb;
+ struct scsi_ctrl_blk *scb = host->active;
- if (pCurScb != NULL) {
- if (pCurScb->SCB_Status & SCB_SELECT) { /* selection timeout */
- tul_unlink_pend_scb(pCurHcb, pCurScb);
- pCurScb->SCB_HaStat = HOST_SEL_TOUT;
- tul_append_done_scb(pCurHcb, pCurScb);
+ if (scb != NULL) {
+ if (scb->status & SCB_SELECT) { /* selection timeout */
+ initio_unlink_pend_scb(host, scb);
+ scb->hastat = HOST_SEL_TOUT;
+ initio_append_done_scb(host, scb);
} else { /* Unexpected bus free */
- tul_unlink_busy_scb(pCurHcb, pCurScb);
- pCurScb->SCB_HaStat = HOST_BUS_FREE;
- tul_append_done_scb(pCurHcb, pCurScb);
+ initio_unlink_busy_scb(host, scb);
+ scb->hastat = HOST_BUS_FREE;
+ initio_append_done_scb(host, scb);
}
- pCurHcb->HCS_ActScb = NULL;
- pCurHcb->HCS_ActTcs = NULL;
+ host->active = NULL;
+ host->active_tc = NULL;
}
- TUL_WR(pCurHcb->HCS_Base + TUL_SCtrl0, TSC_FLUSH_FIFO); /* Flush SCSI FIFO */
- TUL_WR(pCurHcb->HCS_Base + TUL_SConfig, TSC_INITDEFAULT);
- TUL_WR(pCurHcb->HCS_Base + TUL_SCtrl1, TSC_HW_RESELECT); /* Enable HW reselect */
- return (-1);
+ outb(TSC_FLUSH_FIFO, host->addr + TUL_SCtrl0); /* Flush SCSI FIFO */
+ outb(TSC_INITDEFAULT, host->addr + TUL_SConfig);
+ outb(TSC_HW_RESELECT, host->addr + TUL_SCtrl1); /* Enable HW reselect */
+ return -1;
}
-/***************************************************************************/
-/* scsi bus reset */
-static int int_tul_scsi_rst(HCS * pCurHcb)
+/**
+ * int_initio_scsi_rst - SCSI reset occurred
+ * @host: Host seeing the reset
+ *
+ * A SCSI bus reset has occurred. Clean up any pending transfer
+ * the hardware is doing by DMA and then abort all active and
+ * disconnected commands. The mid layer should sort the rest out
+ * for us
+ */
+
+static int int_initio_scsi_rst(struct initio_host * host)
{
- SCB *pCurScb;
+ struct scsi_ctrl_blk *scb;
int i;
/* if DMA xfer is pending, abort DMA xfer */
- if (TUL_RD(pCurHcb->HCS_Base, TUL_XStatus) & 0x01) {
- TUL_WR(pCurHcb->HCS_Base + TUL_XCmd, TAX_X_ABT | TAX_X_CLR_FIFO);
+ if (inb(host->addr + TUL_XStatus) & 0x01) {
+ outb(TAX_X_ABT | TAX_X_CLR_FIFO, host->addr + TUL_XCmd);
/* wait Abort DMA xfer done */
- while ((TUL_RD(pCurHcb->HCS_Base, TUL_Int) & 0x04) == 0);
- TUL_WR(pCurHcb->HCS_Base + TUL_SCtrl0, TSC_FLUSH_FIFO);
+ while ((inb(host->addr + TUL_Int) & 0x04) == 0)
+ cpu_relax();
+ outb(TSC_FLUSH_FIFO, host->addr + TUL_SCtrl0);
}
/* Abort all active & disconnected scb */
- while ((pCurScb = tul_pop_busy_scb(pCurHcb)) != NULL) {
- pCurScb->SCB_HaStat = HOST_BAD_PHAS;
- tul_append_done_scb(pCurHcb, pCurScb);
+ while ((scb = initio_pop_busy_scb(host)) != NULL) {
+ scb->hastat = HOST_BAD_PHAS;
+ initio_append_done_scb(host, scb);
}
- pCurHcb->HCS_ActScb = NULL;
- pCurHcb->HCS_ActTcs = NULL;
+ host->active = NULL;
+ host->active_tc = NULL;
/* clr sync nego. done flag */
- for (i = 0; i < pCurHcb->HCS_MaxTar; i++) {
- pCurHcb->HCS_Tcs[i].TCS_Flags &= ~(TCF_SYNC_DONE | TCF_WDTR_DONE);
- }
- return (-1);
+ for (i = 0; i < host->max_tar; i++)
+ host->targets[i].flags &= ~(TCF_SYNC_DONE | TCF_WDTR_DONE);
+ return -1;
}
+/**
+ * int_initio_scsi_resel - Reselection occured
+ * @host: InitIO host adapter
+ *
+ * A SCSI reselection event has been signalled and the interrupt
+ * is now being processed. Work out which command block needs attention
+ * and continue processing that command.
+ */
-/***************************************************************************/
-/* scsi reselection */
-int int_tul_resel(HCS * pCurHcb)
+int int_initio_resel(struct initio_host * host)
{
- SCB *pCurScb;
- TCS *pCurTcb;
- BYTE tag, msg = 0;
- BYTE tar, lun;
+ struct scsi_ctrl_blk *scb;
+ struct target_control *active_tc;
+ u8 tag, msg = 0;
+ u8 tar, lun;
- if ((pCurScb = pCurHcb->HCS_ActScb) != NULL) {
- if (pCurScb->SCB_Status & SCB_SELECT) { /* if waiting for selection complete */
- pCurScb->SCB_Status &= ~SCB_SELECT;
- }
- pCurHcb->HCS_ActScb = NULL;
+ if ((scb = host->active) != NULL) {
+ /* FIXME: Why check and not just clear ? */
+ if (scb->status & SCB_SELECT) /* if waiting for selection complete */
+ scb->status &= ~SCB_SELECT;
+ host->active = NULL;
}
/* --------- get target id---------------------- */
- tar = TUL_RD(pCurHcb->HCS_Base, TUL_SBusId);
+ tar = inb(host->addr + TUL_SBusId);
/* ------ get LUN from Identify message----------- */
- lun = TUL_RD(pCurHcb->HCS_Base, TUL_SIdent) & 0x0F;
+ lun = inb(host->addr + TUL_SIdent) & 0x0F;
/* 07/22/98 from 0x1F -> 0x0F */
- pCurTcb = &pCurHcb->HCS_Tcs[tar];
- pCurHcb->HCS_ActTcs = pCurTcb;
- TUL_WR(pCurHcb->HCS_Base + TUL_SConfig, pCurTcb->TCS_SConfig0);
- TUL_WR(pCurHcb->HCS_Base + TUL_SPeriod, pCurTcb->TCS_JS_Period);
-
+ active_tc = &host->targets[tar];
+ host->active_tc = active_tc;
+ outb(active_tc->sconfig0, host->addr + TUL_SConfig);
+ outb(active_tc->js_period, host->addr + TUL_SPeriod);
/* ------------- tag queueing ? ------------------- */
- if (pCurTcb->TCS_DrvFlags & TCF_DRV_EN_TAG) {
- if ((tul_msgin_accept(pCurHcb)) == -1)
- return (-1);
- if (pCurHcb->HCS_Phase != MSG_IN)
+ if (active_tc->drv_flags & TCF_DRV_EN_TAG) {
+ if ((initio_msgin_accept(host)) == -1)
+ return -1;
+ if (host->phase != MSG_IN)
goto no_tag;
- TUL_WRLONG(pCurHcb->HCS_Base + TUL_SCnt0, 1);
- TUL_WR(pCurHcb->HCS_Base + TUL_SCmd, TSC_XF_FIFO_IN);
- if ((wait_tulip(pCurHcb)) == -1)
- return (-1);
- msg = TUL_RD(pCurHcb->HCS_Base, TUL_SFifo); /* Read Tag Message */
+ outl(1, host->addr + TUL_SCnt0);
+ outb(TSC_XF_FIFO_IN, host->addr + TUL_SCmd);
+ if (wait_tulip(host) == -1)
+ return -1;
+ msg = inb(host->addr + TUL_SFifo); /* Read Tag Message */
- if ((msg < MSG_STAG) || (msg > MSG_OTAG)) /* Is simple Tag */
+ if (msg < MSG_STAG || msg > MSG_OTAG) /* Is simple Tag */
goto no_tag;
- if ((tul_msgin_accept(pCurHcb)) == -1)
- return (-1);
+ if (initio_msgin_accept(host) == -1)
+ return -1;
- if (pCurHcb->HCS_Phase != MSG_IN)
+ if (host->phase != MSG_IN)
goto no_tag;
- TUL_WRLONG(pCurHcb->HCS_Base + TUL_SCnt0, 1);
- TUL_WR(pCurHcb->HCS_Base + TUL_SCmd, TSC_XF_FIFO_IN);
- if ((wait_tulip(pCurHcb)) == -1)
- return (-1);
- tag = TUL_RD(pCurHcb->HCS_Base, TUL_SFifo); /* Read Tag ID */
- pCurScb = pCurHcb->HCS_Scb + tag;
- if ((pCurScb->SCB_Target != tar) || (pCurScb->SCB_Lun != lun)) {
- return tul_msgout_abort_tag(pCurHcb);
- }
- if (pCurScb->SCB_Status != SCB_BUSY) { /* 03/24/95 */
- return tul_msgout_abort_tag(pCurHcb);
- }
- pCurHcb->HCS_ActScb = pCurScb;
- if ((tul_msgin_accept(pCurHcb)) == -1)
- return (-1);
+ outl(1, host->addr + TUL_SCnt0);
+ outb(TSC_XF_FIFO_IN, host->addr + TUL_SCmd);
+ if (wait_tulip(host) == -1)
+ return -1;
+ tag = inb(host->addr + TUL_SFifo); /* Read Tag ID */
+ scb = host->scb + tag;
+ if (scb->target != tar || scb->lun != lun) {
+ return initio_msgout_abort_tag(host);
+ }
+ if (scb->status != SCB_BUSY) { /* 03/24/95 */
+ return initio_msgout_abort_tag(host);
+ }
+ host->active = scb;
+ if ((initio_msgin_accept(host)) == -1)
+ return -1;
} else { /* No tag */
no_tag:
- if ((pCurScb = tul_find_busy_scb(pCurHcb, tar | (lun << 8))) == NULL) {
- return tul_msgout_abort_targ(pCurHcb);
+ if ((scb = initio_find_busy_scb(host, tar | (lun << 8))) == NULL) {
+ return initio_msgout_abort_targ(host);
}
- pCurHcb->HCS_ActScb = pCurScb;
- if (!(pCurTcb->TCS_DrvFlags & TCF_DRV_EN_TAG)) {
- if ((tul_msgin_accept(pCurHcb)) == -1)
- return (-1);
+ host->active = scb;
+ if (!(active_tc->drv_flags & TCF_DRV_EN_TAG)) {
+ if ((initio_msgin_accept(host)) == -1)
+ return -1;
}
}
return 0;
}
+/**
+ * int_initio_bad_seq - out of phase
+ * @host: InitIO host flagging event
+ *
+ * We have ended up out of phase somehow. Reset the host controller
+ * and throw all our toys out of the pram. Let the midlayer clean up
+ */
-/***************************************************************************/
-static int int_tul_bad_seq(HCS * pCurHcb)
+static int int_initio_bad_seq(struct initio_host * host)
{ /* target wrong phase */
- SCB *pCurScb;
+ struct scsi_ctrl_blk *scb;
int i;
- tul_reset_scsi(pCurHcb, 10);
+ initio_reset_scsi(host, 10);
- while ((pCurScb = tul_pop_busy_scb(pCurHcb)) != NULL) {
- pCurScb->SCB_HaStat = HOST_BAD_PHAS;
- tul_append_done_scb(pCurHcb, pCurScb);
- }
- for (i = 0; i < pCurHcb->HCS_MaxTar; i++) {
- pCurHcb->HCS_Tcs[i].TCS_Flags &= ~(TCF_SYNC_DONE | TCF_WDTR_DONE);
+ while ((scb = initio_pop_busy_scb(host)) != NULL) {
+ scb->hastat = HOST_BAD_PHAS;
+ initio_append_done_scb(host, scb);
}
- return (-1);
+ for (i = 0; i < host->max_tar; i++)
+ host->targets[i].flags &= ~(TCF_SYNC_DONE | TCF_WDTR_DONE);
+ return -1;
}
-/***************************************************************************/
-int tul_msgout_abort_targ(HCS * pCurHcb)
+/**
+ * initio_msgout_abort_targ - abort a tag
+ * @host: InitIO host
+ *
+ * Abort when the target/lun does not match or when our SCB is not
+ * busy. Used by untagged commands.
+ */
+
+static int initio_msgout_abort_targ(struct initio_host * host)
{
- TUL_WR(pCurHcb->HCS_Base + TUL_SSignal, ((TUL_RD(pCurHcb->HCS_Base, TUL_SSignal) & (TSC_SET_ACK | 7)) | TSC_SET_ATN));
- if (tul_msgin_accept(pCurHcb) == -1)
- return (-1);
- if (pCurHcb->HCS_Phase != MSG_OUT)
- return (tul_bad_seq(pCurHcb));
+ outb(((inb(host->addr + TUL_SSignal) & (TSC_SET_ACK | 7)) | TSC_SET_ATN), host->addr + TUL_SSignal);
+ if (initio_msgin_accept(host) == -1)
+ return -1;
+ if (host->phase != MSG_OUT)
+ return initio_bad_seq(host);
- TUL_WR(pCurHcb->HCS_Base + TUL_SFifo, MSG_ABORT);
- TUL_WR(pCurHcb->HCS_Base + TUL_SCmd, TSC_XF_FIFO_OUT);
+ outb(MSG_ABORT, host->addr + TUL_SFifo);
+ outb(TSC_XF_FIFO_OUT, host->addr + TUL_SCmd);
- return tul_wait_disc(pCurHcb);
+ return initio_wait_disc(host);
}
-/***************************************************************************/
-int tul_msgout_abort_tag(HCS * pCurHcb)
+/**
+ * initio_msgout_abort_tag - abort a tag
+ * @host: InitIO host
+ *
+ * Abort when the target/lun does not match or when our SCB is not
+ * busy. Used for tagged commands.
+ */
+
+static int initio_msgout_abort_tag(struct initio_host * host)
{
- TUL_WR(pCurHcb->HCS_Base + TUL_SSignal, ((TUL_RD(pCurHcb->HCS_Base, TUL_SSignal) & (TSC_SET_ACK | 7)) | TSC_SET_ATN));
- if (tul_msgin_accept(pCurHcb) == -1)
- return (-1);
- if (pCurHcb->HCS_Phase != MSG_OUT)
- return (tul_bad_seq(pCurHcb));
+ outb(((inb(host->addr + TUL_SSignal) & (TSC_SET_ACK | 7)) | TSC_SET_ATN), host->addr + TUL_SSignal);
+ if (initio_msgin_accept(host) == -1)
+ return -1;
+ if (host->phase != MSG_OUT)
+ return initio_bad_seq(host);
- TUL_WR(pCurHcb->HCS_Base + TUL_SFifo, MSG_ABORT_TAG);
- TUL_WR(pCurHcb->HCS_Base + TUL_SCmd, TSC_XF_FIFO_OUT);
+ outb(MSG_ABORT_TAG, host->addr + TUL_SFifo);
+ outb(TSC_XF_FIFO_OUT, host->addr + TUL_SCmd);
- return tul_wait_disc(pCurHcb);
+ return initio_wait_disc(host);
}
-/***************************************************************************/
-int tul_msgin(HCS * pCurHcb)
+/**
+ * initio_msgin - Message in
+ * @host: InitIO Host
+ *
+ * Process incoming message
+ */
+static int initio_msgin(struct initio_host * host)
{
- TCS *pCurTcb;
+ struct target_control *active_tc;
for (;;) {
+ outb(TSC_FLUSH_FIFO, host->addr + TUL_SCtrl0);
- TUL_WR(pCurHcb->HCS_Base + TUL_SCtrl0, TSC_FLUSH_FIFO);
-
- TUL_WRLONG(pCurHcb->HCS_Base + TUL_SCnt0, 1);
- TUL_WR(pCurHcb->HCS_Base + TUL_SCmd, TSC_XF_FIFO_IN);
- if ((wait_tulip(pCurHcb)) == -1)
- return (-1);
+ outl(1, host->addr + TUL_SCnt0);
+ outb(TSC_XF_FIFO_IN, host->addr + TUL_SCmd);
+ if (wait_tulip(host) == -1)
+ return -1;
- switch (TUL_RD(pCurHcb->HCS_Base, TUL_SFifo)) {
+ switch (inb(host->addr + TUL_SFifo)) {
case MSG_DISC: /* Disconnect msg */
- TUL_WR(pCurHcb->HCS_Base + TUL_SCmd, TSC_MSG_ACCEPT);
-
- return tul_wait_disc(pCurHcb);
-
+ outb(TSC_MSG_ACCEPT, host->addr + TUL_SCmd);
+ return initio_wait_disc(host);
case MSG_SDP:
case MSG_RESTORE:
case MSG_NOP:
- tul_msgin_accept(pCurHcb);
+ initio_msgin_accept(host);
break;
-
case MSG_REJ: /* Clear ATN first */
- TUL_WR(pCurHcb->HCS_Base + TUL_SSignal,
- (TUL_RD(pCurHcb->HCS_Base, TUL_SSignal) & (TSC_SET_ACK | 7)));
- pCurTcb = pCurHcb->HCS_ActTcs;
- if ((pCurTcb->TCS_Flags & (TCF_SYNC_DONE | TCF_NO_SYNC_NEGO)) == 0) { /* do sync nego */
- TUL_WR(pCurHcb->HCS_Base + TUL_SSignal, ((TUL_RD(pCurHcb->HCS_Base, TUL_SSignal) & (TSC_SET_ACK | 7)) | TSC_SET_ATN));
- }
- tul_msgin_accept(pCurHcb);
+ outb((inb(host->addr + TUL_SSignal) & (TSC_SET_ACK | 7)),
+ host->addr + TUL_SSignal);
+ active_tc = host->active_tc;
+ if ((active_tc->flags & (TCF_SYNC_DONE | TCF_NO_SYNC_NEGO)) == 0) /* do sync nego */
+ outb(((inb(host->addr + TUL_SSignal) & (TSC_SET_ACK | 7)) | TSC_SET_ATN),
+ host->addr + TUL_SSignal);
+ initio_msgin_accept(host);
break;
-
case MSG_EXTEND: /* extended msg */
- tul_msgin_extend(pCurHcb);
+ initio_msgin_extend(host);
break;
-
case MSG_IGNOREWIDE:
- tul_msgin_accept(pCurHcb);
+ initio_msgin_accept(host);
break;
-
- /* get */
- TUL_WR(pCurHcb->HCS_Base + TUL_SCmd, TSC_XF_FIFO_IN);
- if (wait_tulip(pCurHcb) == -1)
- return -1;
-
- TUL_WR(pCurHcb->HCS_Base + TUL_SFifo, 0); /* put pad */
- TUL_RD(pCurHcb->HCS_Base, TUL_SFifo); /* get IGNORE field */
- TUL_RD(pCurHcb->HCS_Base, TUL_SFifo); /* get pad */
-
- tul_msgin_accept(pCurHcb);
- break;
-
case MSG_COMP:
- {
- TUL_WR(pCurHcb->HCS_Base + TUL_SCtrl0, TSC_FLUSH_FIFO);
- TUL_WR(pCurHcb->HCS_Base + TUL_SCmd, TSC_MSG_ACCEPT);
- return tul_wait_done_disc(pCurHcb);
- }
+ outb(TSC_FLUSH_FIFO, host->addr + TUL_SCtrl0);
+ outb(TSC_MSG_ACCEPT, host->addr + TUL_SCmd);
+ return initio_wait_done_disc(host);
default:
- tul_msgout_reject(pCurHcb);
+ initio_msgout_reject(host);
break;
}
- if (pCurHcb->HCS_Phase != MSG_IN)
- return (pCurHcb->HCS_Phase);
+ if (host->phase != MSG_IN)
+ return host->phase;
}
/* statement won't reach here */
}
-
-
-
-/***************************************************************************/
-int tul_msgout_reject(HCS * pCurHcb)
+static int initio_msgout_reject(struct initio_host * host)
{
+ outb(((inb(host->addr + TUL_SSignal) & (TSC_SET_ACK | 7)) | TSC_SET_ATN), host->addr + TUL_SSignal);
- TUL_WR(pCurHcb->HCS_Base + TUL_SSignal, ((TUL_RD(pCurHcb->HCS_Base, TUL_SSignal) & (TSC_SET_ACK | 7)) | TSC_SET_ATN));
-
- if ((tul_msgin_accept(pCurHcb)) == -1)
- return (-1);
+ if (initio_msgin_accept(host) == -1)
+ return -1;
- if (pCurHcb->HCS_Phase == MSG_OUT) {
- TUL_WR(pCurHcb->HCS_Base + TUL_SFifo, MSG_REJ); /* Msg reject */
- TUL_WR(pCurHcb->HCS_Base + TUL_SCmd, TSC_XF_FIFO_OUT);
- return (wait_tulip(pCurHcb));
+ if (host->phase == MSG_OUT) {
+ outb(MSG_REJ, host->addr + TUL_SFifo); /* Msg reject */
+ outb(TSC_XF_FIFO_OUT, host->addr + TUL_SCmd);
+ return wait_tulip(host);
}
- return (pCurHcb->HCS_Phase);
+ return host->phase;
}
-
-
-/***************************************************************************/
-int tul_msgout_ide(HCS * pCurHcb)
+static int initio_msgout_ide(struct initio_host * host)
{
- TUL_WR(pCurHcb->HCS_Base + TUL_SFifo, MSG_IDE); /* Initiator Detected Error */
- TUL_WR(pCurHcb->HCS_Base + TUL_SCmd, TSC_XF_FIFO_OUT);
- return (wait_tulip(pCurHcb));
+ outb(MSG_IDE, host->addr + TUL_SFifo); /* Initiator Detected Error */
+ outb(TSC_XF_FIFO_OUT, host->addr + TUL_SCmd);
+ return wait_tulip(host);
}
-
-/***************************************************************************/
-int tul_msgin_extend(HCS * pCurHcb)
+static int initio_msgin_extend(struct initio_host * host)
{
- BYTE len, idx;
+ u8 len, idx;
- if (tul_msgin_accept(pCurHcb) != MSG_IN)
- return (pCurHcb->HCS_Phase);
+ if (initio_msgin_accept(host) != MSG_IN)
+ return host->phase;
/* Get extended msg length */
- TUL_WRLONG(pCurHcb->HCS_Base + TUL_SCnt0, 1);
- TUL_WR(pCurHcb->HCS_Base + TUL_SCmd, TSC_XF_FIFO_IN);
- if (wait_tulip(pCurHcb) == -1)
- return (-1);
+ outl(1, host->addr + TUL_SCnt0);
+ outb(TSC_XF_FIFO_IN, host->addr + TUL_SCmd);
+ if (wait_tulip(host) == -1)
+ return -1;
- len = TUL_RD(pCurHcb->HCS_Base, TUL_SFifo);
- pCurHcb->HCS_Msg[0] = len;
+ len = inb(host->addr + TUL_SFifo);
+ host->msg[0] = len;
for (idx = 1; len != 0; len--) {
- if ((tul_msgin_accept(pCurHcb)) != MSG_IN)
- return (pCurHcb->HCS_Phase);
- TUL_WRLONG(pCurHcb->HCS_Base + TUL_SCnt0, 1);
- TUL_WR(pCurHcb->HCS_Base + TUL_SCmd, TSC_XF_FIFO_IN);
- if (wait_tulip(pCurHcb) == -1)
- return (-1);
- pCurHcb->HCS_Msg[idx++] = TUL_RD(pCurHcb->HCS_Base, TUL_SFifo);
- }
- if (pCurHcb->HCS_Msg[1] == 1) { /* if it's synchronous data transfer request */
- if (pCurHcb->HCS_Msg[0] != 3) /* if length is not right */
- return (tul_msgout_reject(pCurHcb));
- if (pCurHcb->HCS_ActTcs->TCS_Flags & TCF_NO_SYNC_NEGO) { /* Set OFFSET=0 to do async, nego back */
- pCurHcb->HCS_Msg[3] = 0;
+ if ((initio_msgin_accept(host)) != MSG_IN)
+ return host->phase;
+ outl(1, host->addr + TUL_SCnt0);
+ outb(TSC_XF_FIFO_IN, host->addr + TUL_SCmd);
+ if (wait_tulip(host) == -1)
+ return -1;
+ host->msg[idx++] = inb(host->addr + TUL_SFifo);
+ }
+ if (host->msg[1] == 1) { /* if it's synchronous data transfer request */
+ u8 r;
+ if (host->msg[0] != 3) /* if length is not right */
+ return initio_msgout_reject(host);
+ if (host->active_tc->flags & TCF_NO_SYNC_NEGO) { /* Set OFFSET=0 to do async, nego back */
+ host->msg[3] = 0;
} else {
- if ((tul_msgin_sync(pCurHcb) == 0) &&
- (pCurHcb->HCS_ActTcs->TCS_Flags & TCF_SYNC_DONE)) {
- tul_sync_done(pCurHcb);
- return (tul_msgin_accept(pCurHcb));
+ if (initio_msgin_sync(host) == 0 &&
+ (host->active_tc->flags & TCF_SYNC_DONE)) {
+ initio_sync_done(host);
+ return initio_msgin_accept(host);
}
}
- TUL_WR(pCurHcb->HCS_Base + TUL_SSignal, ((TUL_RD(pCurHcb->HCS_Base, TUL_SSignal) & (TSC_SET_ACK | 7)) | TSC_SET_ATN));
- if ((tul_msgin_accept(pCurHcb)) != MSG_OUT)
- return (pCurHcb->HCS_Phase);
+ r = inb(host->addr + TUL_SSignal);
+ outb((r & (TSC_SET_ACK | 7)) | TSC_SET_ATN,
+ host->addr + TUL_SSignal);
+ if (initio_msgin_accept(host) != MSG_OUT)
+ return host->phase;
/* sync msg out */
- TUL_WR(pCurHcb->HCS_Base + TUL_SCtrl0, TSC_FLUSH_FIFO);
+ outb(TSC_FLUSH_FIFO, host->addr + TUL_SCtrl0);
- tul_sync_done(pCurHcb);
+ initio_sync_done(host);
- TUL_WR(pCurHcb->HCS_Base + TUL_SFifo, MSG_EXTEND);
- TUL_WR(pCurHcb->HCS_Base + TUL_SFifo, 3);
- TUL_WR(pCurHcb->HCS_Base + TUL_SFifo, 1);
- TUL_WR(pCurHcb->HCS_Base + TUL_SFifo, pCurHcb->HCS_Msg[2]);
- TUL_WR(pCurHcb->HCS_Base + TUL_SFifo, pCurHcb->HCS_Msg[3]);
-
- TUL_WR(pCurHcb->HCS_Base + TUL_SCmd, TSC_XF_FIFO_OUT);
- return (wait_tulip(pCurHcb));
+ outb(MSG_EXTEND, host->addr + TUL_SFifo);
+ outb(3, host->addr + TUL_SFifo);
+ outb(1, host->addr + TUL_SFifo);
+ outb(host->msg[2], host->addr + TUL_SFifo);
+ outb(host->msg[3], host->addr + TUL_SFifo);
+ outb(TSC_XF_FIFO_OUT, host->addr + TUL_SCmd);
+ return wait_tulip(host);
}
- if ((pCurHcb->HCS_Msg[0] != 2) || (pCurHcb->HCS_Msg[1] != 3))
- return (tul_msgout_reject(pCurHcb));
+ if (host->msg[0] != 2 || host->msg[1] != 3)
+ return initio_msgout_reject(host);
/* if it's WIDE DATA XFER REQ */
- if (pCurHcb->HCS_ActTcs->TCS_Flags & TCF_NO_WDTR) {
- pCurHcb->HCS_Msg[2] = 0;
+ if (host->active_tc->flags & TCF_NO_WDTR) {
+ host->msg[2] = 0;
} else {
- if (pCurHcb->HCS_Msg[2] > 2) /* > 32 bits */
- return (tul_msgout_reject(pCurHcb));
- if (pCurHcb->HCS_Msg[2] == 2) { /* == 32 */
- pCurHcb->HCS_Msg[2] = 1;
+ if (host->msg[2] > 2) /* > 32 bits */
+ return initio_msgout_reject(host);
+ if (host->msg[2] == 2) { /* == 32 */
+ host->msg[2] = 1;
} else {
- if ((pCurHcb->HCS_ActTcs->TCS_Flags & TCF_NO_WDTR) == 0) {
- wdtr_done(pCurHcb);
- if ((pCurHcb->HCS_ActTcs->TCS_Flags & (TCF_SYNC_DONE | TCF_NO_SYNC_NEGO)) == 0)
- TUL_WR(pCurHcb->HCS_Base + TUL_SSignal, ((TUL_RD(pCurHcb->HCS_Base, TUL_SSignal) & (TSC_SET_ACK | 7)) | TSC_SET_ATN));
- return (tul_msgin_accept(pCurHcb));
+ if ((host->active_tc->flags & TCF_NO_WDTR) == 0) {
+ wdtr_done(host);
+ if ((host->active_tc->flags & (TCF_SYNC_DONE | TCF_NO_SYNC_NEGO)) == 0)
+ outb(((inb(host->addr + TUL_SSignal) & (TSC_SET_ACK | 7)) | TSC_SET_ATN), host->addr + TUL_SSignal);
+ return initio_msgin_accept(host);
}
}
}
- TUL_WR(pCurHcb->HCS_Base + TUL_SSignal, ((TUL_RD(pCurHcb->HCS_Base, TUL_SSignal) & (TSC_SET_ACK | 7)) | TSC_SET_ATN));
+ outb(((inb(host->addr + TUL_SSignal) & (TSC_SET_ACK | 7)) | TSC_SET_ATN), host->addr + TUL_SSignal);
- if (tul_msgin_accept(pCurHcb) != MSG_OUT)
- return (pCurHcb->HCS_Phase);
+ if (initio_msgin_accept(host) != MSG_OUT)
+ return host->phase;
/* WDTR msg out */
- TUL_WR(pCurHcb->HCS_Base + TUL_SFifo, MSG_EXTEND);
- TUL_WR(pCurHcb->HCS_Base + TUL_SFifo, 2);
- TUL_WR(pCurHcb->HCS_Base + TUL_SFifo, 3);
- TUL_WR(pCurHcb->HCS_Base + TUL_SFifo, pCurHcb->HCS_Msg[2]);
- TUL_WR(pCurHcb->HCS_Base + TUL_SCmd, TSC_XF_FIFO_OUT);
- return (wait_tulip(pCurHcb));
+ outb(MSG_EXTEND, host->addr + TUL_SFifo);
+ outb(2, host->addr + TUL_SFifo);
+ outb(3, host->addr + TUL_SFifo);
+ outb(host->msg[2], host->addr + TUL_SFifo);
+ outb(TSC_XF_FIFO_OUT, host->addr + TUL_SCmd);
+ return wait_tulip(host);
}
-/***************************************************************************/
-int tul_msgin_sync(HCS * pCurHcb)
+static int initio_msgin_sync(struct initio_host * host)
{
char default_period;
- default_period = tul_rate_tbl[pCurHcb->HCS_ActTcs->TCS_Flags & TCF_SCSI_RATE];
- if (pCurHcb->HCS_Msg[3] > MAX_OFFSET) {
- pCurHcb->HCS_Msg[3] = MAX_OFFSET;
- if (pCurHcb->HCS_Msg[2] < default_period) {
- pCurHcb->HCS_Msg[2] = default_period;
+ default_period = initio_rate_tbl[host->active_tc->flags & TCF_SCSI_RATE];
+ if (host->msg[3] > MAX_OFFSET) {
+ host->msg[3] = MAX_OFFSET;
+ if (host->msg[2] < default_period) {
+ host->msg[2] = default_period;
return 1;
}
- if (pCurHcb->HCS_Msg[2] >= 59) { /* Change to async */
- pCurHcb->HCS_Msg[3] = 0;
- }
+ if (host->msg[2] >= 59) /* Change to async */
+ host->msg[3] = 0;
return 1;
}
/* offset requests asynchronous transfers ? */
- if (pCurHcb->HCS_Msg[3] == 0) {
+ if (host->msg[3] == 0) {
return 0;
}
- if (pCurHcb->HCS_Msg[2] < default_period) {
- pCurHcb->HCS_Msg[2] = default_period;
+ if (host->msg[2] < default_period) {
+ host->msg[2] = default_period;
return 1;
}
- if (pCurHcb->HCS_Msg[2] >= 59) {
- pCurHcb->HCS_Msg[3] = 0;
+ if (host->msg[2] >= 59) {
+ host->msg[3] = 0;
return 1;
}
return 0;
}
-
-/***************************************************************************/
-int wdtr_done(HCS * pCurHcb)
+static int wdtr_done(struct initio_host * host)
{
- pCurHcb->HCS_ActTcs->TCS_Flags &= ~TCF_SYNC_DONE;
- pCurHcb->HCS_ActTcs->TCS_Flags |= TCF_WDTR_DONE;
+ host->active_tc->flags &= ~TCF_SYNC_DONE;
+ host->active_tc->flags |= TCF_WDTR_DONE;
- pCurHcb->HCS_ActTcs->TCS_JS_Period = 0;
- if (pCurHcb->HCS_Msg[2]) { /* if 16 bit */
- pCurHcb->HCS_ActTcs->TCS_JS_Period |= TSC_WIDE_SCSI;
- }
- pCurHcb->HCS_ActTcs->TCS_SConfig0 &= ~TSC_ALT_PERIOD;
- TUL_WR(pCurHcb->HCS_Base + TUL_SConfig, pCurHcb->HCS_ActTcs->TCS_SConfig0);
- TUL_WR(pCurHcb->HCS_Base + TUL_SPeriod, pCurHcb->HCS_ActTcs->TCS_JS_Period);
+ host->active_tc->js_period = 0;
+ if (host->msg[2]) /* if 16 bit */
+ host->active_tc->js_period |= TSC_WIDE_SCSI;
+ host->active_tc->sconfig0 &= ~TSC_ALT_PERIOD;
+ outb(host->active_tc->sconfig0, host->addr + TUL_SConfig);
+ outb(host->active_tc->js_period, host->addr + TUL_SPeriod);
return 1;
}
-/***************************************************************************/
-int tul_sync_done(HCS * pCurHcb)
+static int initio_sync_done(struct initio_host * host)
{
int i;
- pCurHcb->HCS_ActTcs->TCS_Flags |= TCF_SYNC_DONE;
+ host->active_tc->flags |= TCF_SYNC_DONE;
- if (pCurHcb->HCS_Msg[3]) {
- pCurHcb->HCS_ActTcs->TCS_JS_Period |= pCurHcb->HCS_Msg[3];
+ if (host->msg[3]) {
+ host->active_tc->js_period |= host->msg[3];
for (i = 0; i < 8; i++) {
- if (tul_rate_tbl[i] >= pCurHcb->HCS_Msg[2]) /* pick the big one */
+ if (initio_rate_tbl[i] >= host->msg[2]) /* pick the big one */
break;
}
- pCurHcb->HCS_ActTcs->TCS_JS_Period |= (i << 4);
- pCurHcb->HCS_ActTcs->TCS_SConfig0 |= TSC_ALT_PERIOD;
+ host->active_tc->js_period |= (i << 4);
+ host->active_tc->sconfig0 |= TSC_ALT_PERIOD;
}
- TUL_WR(pCurHcb->HCS_Base + TUL_SConfig, pCurHcb->HCS_ActTcs->TCS_SConfig0);
- TUL_WR(pCurHcb->HCS_Base + TUL_SPeriod, pCurHcb->HCS_ActTcs->TCS_JS_Period);
+ outb(host->active_tc->sconfig0, host->addr + TUL_SConfig);
+ outb(host->active_tc->js_period, host->addr + TUL_SPeriod);
- return (-1);
+ return -1;
}
-int tul_post_scsi_rst(HCS * pCurHcb)
+static int initio_post_scsi_rst(struct initio_host * host)
{
- SCB *pCurScb;
- TCS *pCurTcb;
+ struct scsi_ctrl_blk *scb;
+ struct target_control *active_tc;
int i;
- pCurHcb->HCS_ActScb = NULL;
- pCurHcb->HCS_ActTcs = NULL;
- pCurHcb->HCS_Flags = 0;
+ host->active = NULL;
+ host->active_tc = NULL;
+ host->flags = 0;
- while ((pCurScb = tul_pop_busy_scb(pCurHcb)) != NULL) {
- pCurScb->SCB_HaStat = HOST_BAD_PHAS;
- tul_append_done_scb(pCurHcb, pCurScb);
+ while ((scb = initio_pop_busy_scb(host)) != NULL) {
+ scb->hastat = HOST_BAD_PHAS;
+ initio_append_done_scb(host, scb);
}
/* clear sync done flag */
- pCurTcb = &pCurHcb->HCS_Tcs[0];
- for (i = 0; i < pCurHcb->HCS_MaxTar; pCurTcb++, i++) {
- pCurTcb->TCS_Flags &= ~(TCF_SYNC_DONE | TCF_WDTR_DONE);
+ active_tc = &host->targets[0];
+ for (i = 0; i < host->max_tar; active_tc++, i++) {
+ active_tc->flags &= ~(TCF_SYNC_DONE | TCF_WDTR_DONE);
/* Initialize the sync. xfer register values to an asyn xfer */
- pCurTcb->TCS_JS_Period = 0;
- pCurTcb->TCS_SConfig0 = pCurHcb->HCS_SConf1;
- pCurHcb->HCS_ActTags[0] = 0; /* 07/22/98 */
- pCurHcb->HCS_Tcs[i].TCS_Flags &= ~TCF_BUSY; /* 07/22/98 */
+ active_tc->js_period = 0;
+ active_tc->sconfig0 = host->sconf1;
+ host->act_tags[0] = 0; /* 07/22/98 */
+ host->targets[i].flags &= ~TCF_BUSY; /* 07/22/98 */
} /* for */
- return (-1);
+ return -1;
}
-/***************************************************************************/
-void tul_select_atn_stop(HCS * pCurHcb, SCB * pCurScb)
+static void initio_select_atn_stop(struct initio_host * host, struct scsi_ctrl_blk * scb)
{
- pCurScb->SCB_Status |= SCB_SELECT;
- pCurScb->SCB_NxtStat = 0x1;
- pCurHcb->HCS_ActScb = pCurScb;
- pCurHcb->HCS_ActTcs = &pCurHcb->HCS_Tcs[pCurScb->SCB_Target];
- TUL_WR(pCurHcb->HCS_Base + TUL_SCmd, TSC_SELATNSTOP);
- return;
+ scb->status |= SCB_SELECT;
+ scb->next_state = 0x1;
+ host->active = scb;
+ host->active_tc = &host->targets[scb->target];
+ outb(TSC_SELATNSTOP, host->addr + TUL_SCmd);
}
-/***************************************************************************/
-void tul_select_atn(HCS * pCurHcb, SCB * pCurScb)
+static void initio_select_atn(struct initio_host * host, struct scsi_ctrl_blk * scb)
{
int i;
- pCurScb->SCB_Status |= SCB_SELECT;
- pCurScb->SCB_NxtStat = 0x2;
+ scb->status |= SCB_SELECT;
+ scb->next_state = 0x2;
- TUL_WR(pCurHcb->HCS_Base + TUL_SFifo, pCurScb->SCB_Ident);
- for (i = 0; i < (int) pCurScb->SCB_CDBLen; i++)
- TUL_WR(pCurHcb->HCS_Base + TUL_SFifo, pCurScb->SCB_CDB[i]);
- pCurHcb->HCS_ActTcs = &pCurHcb->HCS_Tcs[pCurScb->SCB_Target];
- pCurHcb->HCS_ActScb = pCurScb;
- TUL_WR(pCurHcb->HCS_Base + TUL_SCmd, TSC_SEL_ATN);
- return;
+ outb(scb->ident, host->addr + TUL_SFifo);
+ for (i = 0; i < (int) scb->cdblen; i++)
+ outb(scb->cdb[i], host->addr + TUL_SFifo);
+ host->active_tc = &host->targets[scb->target];
+ host->active = scb;
+ outb(TSC_SEL_ATN, host->addr + TUL_SCmd);
}
-/***************************************************************************/
-void tul_select_atn3(HCS * pCurHcb, SCB * pCurScb)
+static void initio_select_atn3(struct initio_host * host, struct scsi_ctrl_blk * scb)
{
int i;
- pCurScb->SCB_Status |= SCB_SELECT;
- pCurScb->SCB_NxtStat = 0x2;
-
- TUL_WR(pCurHcb->HCS_Base + TUL_SFifo, pCurScb->SCB_Ident);
- TUL_WR(pCurHcb->HCS_Base + TUL_SFifo, pCurScb->SCB_TagMsg);
- TUL_WR(pCurHcb->HCS_Base + TUL_SFifo, pCurScb->SCB_TagId);
- for (i = 0; i < (int) pCurScb->SCB_CDBLen; i++)
- TUL_WR(pCurHcb->HCS_Base + TUL_SFifo, pCurScb->SCB_CDB[i]);
- pCurHcb->HCS_ActTcs = &pCurHcb->HCS_Tcs[pCurScb->SCB_Target];
- pCurHcb->HCS_ActScb = pCurScb;
- TUL_WR(pCurHcb->HCS_Base + TUL_SCmd, TSC_SEL_ATN3);
- return;
+ scb->status |= SCB_SELECT;
+ scb->next_state = 0x2;
+
+ outb(scb->ident, host->addr + TUL_SFifo);
+ outb(scb->tagmsg, host->addr + TUL_SFifo);
+ outb(scb->tagid, host->addr + TUL_SFifo);
+ for (i = 0; i < scb->cdblen; i++)
+ outb(scb->cdb[i], host->addr + TUL_SFifo);
+ host->active_tc = &host->targets[scb->target];
+ host->active = scb;
+ outb(TSC_SEL_ATN3, host->addr + TUL_SCmd);
}
-/***************************************************************************/
-/* SCSI Bus Device Reset */
-int tul_bus_device_reset(HCS * pCurHcb)
+/**
+ * initio_bus_device_reset - SCSI Bus Device Reset
+ * @host: InitIO host to reset
+ *
+ * Perform a device reset and abort all pending SCBs for the
+ * victim device
+ */
+int initio_bus_device_reset(struct initio_host * host)
{
- SCB *pCurScb = pCurHcb->HCS_ActScb;
- TCS *pCurTcb = pCurHcb->HCS_ActTcs;
- SCB *pTmpScb, *pPrevScb;
- BYTE tar;
+ struct scsi_ctrl_blk *scb = host->active;
+ struct target_control *active_tc = host->active_tc;
+ struct scsi_ctrl_blk *tmp, *prev;
+ u8 tar;
- if (pCurHcb->HCS_Phase != MSG_OUT) {
- return (int_tul_bad_seq(pCurHcb)); /* Unexpected phase */
- }
- tul_unlink_pend_scb(pCurHcb, pCurScb);
- tul_release_scb(pCurHcb, pCurScb);
+ if (host->phase != MSG_OUT)
+ return int_initio_bad_seq(host); /* Unexpected phase */
+
+ initio_unlink_pend_scb(host, scb);
+ initio_release_scb(host, scb);
- tar = pCurScb->SCB_Target; /* target */
- pCurTcb->TCS_Flags &= ~(TCF_SYNC_DONE | TCF_WDTR_DONE | TCF_BUSY);
+ tar = scb->target; /* target */
+ active_tc->flags &= ~(TCF_SYNC_DONE | TCF_WDTR_DONE | TCF_BUSY);
/* clr sync. nego & WDTR flags 07/22/98 */
/* abort all SCB with same target */
- pPrevScb = pTmpScb = pCurHcb->HCS_FirstBusy; /* Check Busy queue */
- while (pTmpScb != NULL) {
-
- if (pTmpScb->SCB_Target == tar) {
+ prev = tmp = host->first_busy; /* Check Busy queue */
+ while (tmp != NULL) {
+ if (tmp->target == tar) {
/* unlink it */
- if (pTmpScb == pCurHcb->HCS_FirstBusy) {
- if ((pCurHcb->HCS_FirstBusy = pTmpScb->SCB_NxtScb) == NULL)
- pCurHcb->HCS_LastBusy = NULL;
+ if (tmp == host->first_busy) {
+ if ((host->first_busy = tmp->next) == NULL)
+ host->last_busy = NULL;
} else {
- pPrevScb->SCB_NxtScb = pTmpScb->SCB_NxtScb;
- if (pTmpScb == pCurHcb->HCS_LastBusy)
- pCurHcb->HCS_LastBusy = pPrevScb;
+ prev->next = tmp->next;
+ if (tmp == host->last_busy)
+ host->last_busy = prev;
}
- pTmpScb->SCB_HaStat = HOST_ABORTED;
- tul_append_done_scb(pCurHcb, pTmpScb);
+ tmp->hastat = HOST_ABORTED;
+ initio_append_done_scb(host, tmp);
}
/* Previous haven't change */
else {
- pPrevScb = pTmpScb;
+ prev = tmp;
}
- pTmpScb = pTmpScb->SCB_NxtScb;
+ tmp = tmp->next;
}
-
- TUL_WR(pCurHcb->HCS_Base + TUL_SFifo, MSG_DEVRST);
- TUL_WR(pCurHcb->HCS_Base + TUL_SCmd, TSC_XF_FIFO_OUT);
-
- return tul_wait_disc(pCurHcb);
+ outb(MSG_DEVRST, host->addr + TUL_SFifo);
+ outb(TSC_XF_FIFO_OUT, host->addr + TUL_SCmd);
+ return initio_wait_disc(host);
}
-/***************************************************************************/
-int tul_msgin_accept(HCS * pCurHcb)
+static int initio_msgin_accept(struct initio_host * host)
{
- TUL_WR(pCurHcb->HCS_Base + TUL_SCmd, TSC_MSG_ACCEPT);
- return (wait_tulip(pCurHcb));
+ outb(TSC_MSG_ACCEPT, host->addr + TUL_SCmd);
+ return wait_tulip(host);
}
-/***************************************************************************/
-int wait_tulip(HCS * pCurHcb)
+static int wait_tulip(struct initio_host * host)
{
- while (!((pCurHcb->HCS_JSStatus0 = TUL_RD(pCurHcb->HCS_Base, TUL_SStatus0))
- & TSS_INT_PENDING));
+ while (!((host->jsstatus0 = inb(host->addr + TUL_SStatus0))
+ & TSS_INT_PENDING))
+ cpu_relax();
- pCurHcb->HCS_JSInt = TUL_RD(pCurHcb->HCS_Base, TUL_SInt);
- pCurHcb->HCS_Phase = pCurHcb->HCS_JSStatus0 & TSS_PH_MASK;
- pCurHcb->HCS_JSStatus1 = TUL_RD(pCurHcb->HCS_Base, TUL_SStatus1);
+ host->jsint = inb(host->addr + TUL_SInt);
+ host->phase = host->jsstatus0 & TSS_PH_MASK;
+ host->jsstatus1 = inb(host->addr + TUL_SStatus1);
- if (pCurHcb->HCS_JSInt & TSS_RESEL_INT) { /* if SCSI bus reset detected */
- return (int_tul_resel(pCurHcb));
- }
- if (pCurHcb->HCS_JSInt & TSS_SEL_TIMEOUT) { /* if selected/reselected timeout interrupt */
- return (int_tul_busfree(pCurHcb));
- }
- if (pCurHcb->HCS_JSInt & TSS_SCSIRST_INT) { /* if SCSI bus reset detected */
- return (int_tul_scsi_rst(pCurHcb));
- }
- if (pCurHcb->HCS_JSInt & TSS_DISC_INT) { /* BUS disconnection */
- if (pCurHcb->HCS_Flags & HCF_EXPECT_DONE_DISC) {
- TUL_WR(pCurHcb->HCS_Base + TUL_SCtrl0, TSC_FLUSH_FIFO); /* Flush SCSI FIFO */
- tul_unlink_busy_scb(pCurHcb, pCurHcb->HCS_ActScb);
- pCurHcb->HCS_ActScb->SCB_HaStat = 0;
- tul_append_done_scb(pCurHcb, pCurHcb->HCS_ActScb);
- pCurHcb->HCS_ActScb = NULL;
- pCurHcb->HCS_ActTcs = NULL;
- pCurHcb->HCS_Flags &= ~HCF_EXPECT_DONE_DISC;
- TUL_WR(pCurHcb->HCS_Base + TUL_SConfig, TSC_INITDEFAULT);
- TUL_WR(pCurHcb->HCS_Base + TUL_SCtrl1, TSC_HW_RESELECT); /* Enable HW reselect */
- return (-1);
+ if (host->jsint & TSS_RESEL_INT) /* if SCSI bus reset detected */
+ return int_initio_resel(host);
+ if (host->jsint & TSS_SEL_TIMEOUT) /* if selected/reselected timeout interrupt */
+ return int_initio_busfree(host);
+ if (host->jsint & TSS_SCSIRST_INT) /* if SCSI bus reset detected */
+ return int_initio_scsi_rst(host);
+
+ if (host->jsint & TSS_DISC_INT) { /* BUS disconnection */
+ if (host->flags & HCF_EXPECT_DONE_DISC) {
+ outb(TSC_FLUSH_FIFO, host->addr + TUL_SCtrl0); /* Flush SCSI FIFO */
+ initio_unlink_busy_scb(host, host->active);
+ host->active->hastat = 0;
+ initio_append_done_scb(host, host->active);
+ host->active = NULL;
+ host->active_tc = NULL;
+ host->flags &= ~HCF_EXPECT_DONE_DISC;
+ outb(TSC_INITDEFAULT, host->addr + TUL_SConfig);
+ outb(TSC_HW_RESELECT, host->addr + TUL_SCtrl1); /* Enable HW reselect */
+ return -1;
}
- if (pCurHcb->HCS_Flags & HCF_EXPECT_DISC) {
- TUL_WR(pCurHcb->HCS_Base + TUL_SCtrl0, TSC_FLUSH_FIFO); /* Flush SCSI FIFO */
- pCurHcb->HCS_ActScb = NULL;
- pCurHcb->HCS_ActTcs = NULL;
- pCurHcb->HCS_Flags &= ~HCF_EXPECT_DISC;
- TUL_WR(pCurHcb->HCS_Base + TUL_SConfig, TSC_INITDEFAULT);
- TUL_WR(pCurHcb->HCS_Base + TUL_SCtrl1, TSC_HW_RESELECT); /* Enable HW reselect */
- return (-1);
+ if (host->flags & HCF_EXPECT_DISC) {
+ outb(TSC_FLUSH_FIFO, host->addr + TUL_SCtrl0); /* Flush SCSI FIFO */
+ host->active = NULL;
+ host->active_tc = NULL;
+ host->flags &= ~HCF_EXPECT_DISC;
+ outb(TSC_INITDEFAULT, host->addr + TUL_SConfig);
+ outb(TSC_HW_RESELECT, host->addr + TUL_SCtrl1); /* Enable HW reselect */
+ return -1;
}
- return (int_tul_busfree(pCurHcb));
- }
- if (pCurHcb->HCS_JSInt & (TSS_FUNC_COMP | TSS_BUS_SERV)) {
- return (pCurHcb->HCS_Phase);
+ return int_initio_busfree(host);
}
- return (pCurHcb->HCS_Phase);
+ /* The old code really does the below. Can probably be removed */
+ if (host->jsint & (TSS_FUNC_COMP | TSS_BUS_SERV))
+ return host->phase;
+ return host->phase;
}
-/***************************************************************************/
-int tul_wait_disc(HCS * pCurHcb)
-{
-
- while (!((pCurHcb->HCS_JSStatus0 = TUL_RD(pCurHcb->HCS_Base, TUL_SStatus0))
- & TSS_INT_PENDING));
+static int initio_wait_disc(struct initio_host * host)
+{
+ while (!((host->jsstatus0 = inb(host->addr + TUL_SStatus0)) & TSS_INT_PENDING))
+ cpu_relax();
- pCurHcb->HCS_JSInt = TUL_RD(pCurHcb->HCS_Base, TUL_SInt);
+ host->jsint = inb(host->addr + TUL_SInt);
- if (pCurHcb->HCS_JSInt & TSS_SCSIRST_INT) { /* if SCSI bus reset detected */
- return (int_tul_scsi_rst(pCurHcb));
- }
- if (pCurHcb->HCS_JSInt & TSS_DISC_INT) { /* BUS disconnection */
- TUL_WR(pCurHcb->HCS_Base + TUL_SCtrl0, TSC_FLUSH_FIFO); /* Flush SCSI FIFO */
- TUL_WR(pCurHcb->HCS_Base + TUL_SConfig, TSC_INITDEFAULT);
- TUL_WR(pCurHcb->HCS_Base + TUL_SCtrl1, TSC_HW_RESELECT); /* Enable HW reselect */
- pCurHcb->HCS_ActScb = NULL;
- return (-1);
+ if (host->jsint & TSS_SCSIRST_INT) /* if SCSI bus reset detected */
+ return int_initio_scsi_rst(host);
+ if (host->jsint & TSS_DISC_INT) { /* BUS disconnection */
+ outb(TSC_FLUSH_FIFO, host->addr + TUL_SCtrl0); /* Flush SCSI FIFO */
+ outb(TSC_INITDEFAULT, host->addr + TUL_SConfig);
+ outb(TSC_HW_RESELECT, host->addr + TUL_SCtrl1); /* Enable HW reselect */
+ host->active = NULL;
+ return -1;
}
- return (tul_bad_seq(pCurHcb));
+ return initio_bad_seq(host);
}
-/***************************************************************************/
-int tul_wait_done_disc(HCS * pCurHcb)
+static int initio_wait_done_disc(struct initio_host * host)
{
+ while (!((host->jsstatus0 = inb(host->addr + TUL_SStatus0))
+ & TSS_INT_PENDING))
+ cpu_relax();
+ host->jsint = inb(host->addr + TUL_SInt);
- while (!((pCurHcb->HCS_JSStatus0 = TUL_RD(pCurHcb->HCS_Base, TUL_SStatus0))
- & TSS_INT_PENDING));
-
- pCurHcb->HCS_JSInt = TUL_RD(pCurHcb->HCS_Base, TUL_SInt);
-
-
- if (pCurHcb->HCS_JSInt & TSS_SCSIRST_INT) { /* if SCSI bus reset detected */
- return (int_tul_scsi_rst(pCurHcb));
- }
- if (pCurHcb->HCS_JSInt & TSS_DISC_INT) { /* BUS disconnection */
- TUL_WR(pCurHcb->HCS_Base + TUL_SCtrl0, TSC_FLUSH_FIFO); /* Flush SCSI FIFO */
- TUL_WR(pCurHcb->HCS_Base + TUL_SConfig, TSC_INITDEFAULT);
- TUL_WR(pCurHcb->HCS_Base + TUL_SCtrl1, TSC_HW_RESELECT); /* Enable HW reselect */
- tul_unlink_busy_scb(pCurHcb, pCurHcb->HCS_ActScb);
+ if (host->jsint & TSS_SCSIRST_INT) /* if SCSI bus reset detected */
+ return int_initio_scsi_rst(host);
+ if (host->jsint & TSS_DISC_INT) { /* BUS disconnection */
+ outb(TSC_FLUSH_FIFO, host->addr + TUL_SCtrl0); /* Flush SCSI FIFO */
+ outb(TSC_INITDEFAULT, host->addr + TUL_SConfig);
+ outb(TSC_HW_RESELECT, host->addr + TUL_SCtrl1); /* Enable HW reselect */
+ initio_unlink_busy_scb(host, host->active);
- tul_append_done_scb(pCurHcb, pCurHcb->HCS_ActScb);
- pCurHcb->HCS_ActScb = NULL;
- return (-1);
+ initio_append_done_scb(host, host->active);
+ host->active = NULL;
+ return -1;
}
- return (tul_bad_seq(pCurHcb));
+ return initio_bad_seq(host);
}
+/**
+ * i91u_intr - IRQ handler
+ * @irqno: IRQ number
+ * @dev_id: IRQ identifier
+ *
+ * Take the relevant locks and then invoke the actual isr processing
+ * code under the lock.
+ */
+
static irqreturn_t i91u_intr(int irqno, void *dev_id)
{
struct Scsi_Host *dev = dev_id;
unsigned long flags;
+ int r;
spin_lock_irqsave(dev->host_lock, flags);
- tul_isr((HCS *)dev->base);
+ r = initio_isr((struct initio_host *)dev->hostdata);
spin_unlock_irqrestore(dev->host_lock, flags);
- return IRQ_HANDLED;
-}
-
-static int tul_NewReturnNumberOfAdapters(void)
-{
- struct pci_dev *pDev = NULL; /* Start from none */
- int iAdapters = 0;
- long dRegValue;
- WORD wBIOS;
- int i = 0;
-
- init_i91uAdapter_table();
-
- for (i = 0; i < ARRAY_SIZE(i91u_pci_devices); i++)
- {
- while ((pDev = pci_find_device(i91u_pci_devices[i].vendor, i91u_pci_devices[i].device, pDev)) != NULL) {
- if (pci_enable_device(pDev))
- continue;
- pci_read_config_dword(pDev, 0x44, (u32 *) & dRegValue);
- wBIOS = (UWORD) (dRegValue & 0xFF);
- if (((dRegValue & 0xFF00) >> 8) == 0xFF)
- dRegValue = 0;
- wBIOS = (wBIOS << 8) + ((UWORD) ((dRegValue & 0xFF00) >> 8));
- if (pci_set_dma_mask(pDev, DMA_32BIT_MASK)) {
- printk(KERN_WARNING
- "i91u: Could not set 32 bit DMA mask\n");
- continue;
- }
-
- if (Addi91u_into_Adapter_table(wBIOS,
- (pDev->resource[0].start),
- pDev->irq,
- pDev->bus->number,
- (pDev->devfn >> 3)
- ) == 0)
- iAdapters++;
- }
- }
-
- return (iAdapters);
+ if (r)
+ return IRQ_HANDLED;
+ else
+ return IRQ_NONE;
}
-static int i91u_detect(struct scsi_host_template * tpnt)
-{
- HCS *pHCB;
- struct Scsi_Host *hreg;
- unsigned long i; /* 01/14/98 */
- int ok = 0, iAdapters;
- ULONG dBiosAdr;
- BYTE *pbBiosAdr;
-
- /* Get total number of adapters in the motherboard */
- iAdapters = tul_NewReturnNumberOfAdapters();
- if (iAdapters == 0) /* If no tulip founded, return */
- return (0);
-
- tul_num_ch = (iAdapters > tul_num_ch) ? tul_num_ch : iAdapters;
- /* Update actually channel number */
- if (tul_tag_enable) { /* 1.01i */
- tul_num_scb = MAX_TARGETS * i91u_MAXQUEUE;
- } else {
- tul_num_scb = MAX_TARGETS + 3; /* 1-tape, 1-CD_ROM, 1- extra */
- } /* Update actually SCBs per adapter */
-
- /* Get total memory needed for HCS */
- i = tul_num_ch * sizeof(HCS);
- memset((unsigned char *) &tul_hcs[0], 0, i); /* Initialize tul_hcs 0 */
- /* Get total memory needed for SCB */
-
- for (; tul_num_scb >= MAX_TARGETS + 3; tul_num_scb--) {
- i = tul_num_ch * tul_num_scb * sizeof(SCB);
- if ((tul_scb = kmalloc(i, GFP_ATOMIC | GFP_DMA)) != NULL)
- break;
- }
- if (tul_scb == NULL) {
- printk("i91u: SCB memory allocation error\n");
- return (0);
- }
- memset((unsigned char *) tul_scb, 0, i);
- for (i = 0, pHCB = &tul_hcs[0]; /* Get pointer for control block */
- i < tul_num_ch;
- i++, pHCB++) {
- get_tulipPCIConfig(pHCB, i);
-
- dBiosAdr = pHCB->HCS_BIOS;
- dBiosAdr = (dBiosAdr << 4);
-
- pbBiosAdr = phys_to_virt(dBiosAdr);
-
- init_tulip(pHCB, tul_scb + (i * tul_num_scb), tul_num_scb, pbBiosAdr, 10);
- request_region(pHCB->HCS_Base, 256, "i91u"); /* Register */
-
- pHCB->HCS_Index = i; /* 7/29/98 */
- hreg = scsi_register(tpnt, sizeof(HCS));
- if(hreg == NULL) {
- release_region(pHCB->HCS_Base, 256);
- return 0;
- }
- hreg->io_port = pHCB->HCS_Base;
- hreg->n_io_port = 0xff;
- hreg->can_queue = tul_num_scb; /* 03/05/98 */
- hreg->unique_id = pHCB->HCS_Base;
- hreg->max_id = pHCB->HCS_MaxTar;
- hreg->max_lun = 32; /* 10/21/97 */
- hreg->irq = pHCB->HCS_Intr;
- hreg->this_id = pHCB->HCS_SCSI_ID; /* Assign HCS index */
- hreg->base = (unsigned long)pHCB;
- hreg->sg_tablesize = TOTAL_SG_ENTRY; /* Maximun support is 32 */
-
- /* Initial tulip chip */
- ok = request_irq(pHCB->HCS_Intr, i91u_intr, IRQF_DISABLED | IRQF_SHARED, "i91u", hreg);
- if (ok < 0) {
- printk(KERN_WARNING "i91u: unable to request IRQ %d\n\n", pHCB->HCS_Intr);
- return 0;
- }
- }
-
- tpnt->this_id = -1;
- tpnt->can_queue = 1;
-
- return 1;
-}
+/**
+ * initio_build_scb - Build the mappings and SCB
+ * @host: InitIO host taking the command
+ * @cblk: Firmware command block
+ * @cmnd: SCSI midlayer command block
+ *
+ * Translate the abstract SCSI command into a firmware command block
+ * suitable for feeding to the InitIO host controller. This also requires
+ * we build the scatter gather lists and ensure they are mapped properly.
+ */
-static void i91uBuildSCB(HCS * pHCB, SCB * pSCB, struct scsi_cmnd * SCpnt)
+static void initio_build_scb(struct initio_host * host, struct scsi_ctrl_blk * cblk, struct scsi_cmnd * cmnd)
{ /* Create corresponding SCB */
- struct scatterlist *pSrbSG;
- SG *pSG; /* Pointer to SG list */
- int i;
- long TotalLen;
+ struct scatterlist *sglist;
+ struct sg_entry *sg; /* Pointer to SG list */
+ int i, nseg;
+ long total_len;
dma_addr_t dma_addr;
- pSCB->SCB_Post = i91uSCBPost; /* i91u's callback routine */
- pSCB->SCB_Srb = SCpnt;
- pSCB->SCB_Opcode = ExecSCSI;
- pSCB->SCB_Flags = SCF_POST; /* After SCSI done, call post routine */
- pSCB->SCB_Target = SCpnt->device->id;
- pSCB->SCB_Lun = SCpnt->device->lun;
- pSCB->SCB_Ident = SCpnt->device->lun | DISC_ALLOW;
-
- pSCB->SCB_Flags |= SCF_SENSE; /* Turn on auto request sense */
- dma_addr = dma_map_single(&pHCB->pci_dev->dev, SCpnt->sense_buffer,
- SENSE_SIZE, DMA_FROM_DEVICE);
- pSCB->SCB_SensePtr = cpu_to_le32((u32)dma_addr);
- pSCB->SCB_SenseLen = cpu_to_le32(SENSE_SIZE);
- SCpnt->SCp.ptr = (char *)(unsigned long)dma_addr;
+ /* Fill in the command headers */
+ cblk->post = i91uSCBPost; /* i91u's callback routine */
+ cblk->srb = cmnd;
+ cblk->opcode = ExecSCSI;
+ cblk->flags = SCF_POST; /* After SCSI done, call post routine */
+ cblk->target = cmnd->device->id;
+ cblk->lun = cmnd->device->lun;
+ cblk->ident = cmnd->device->lun | DISC_ALLOW;
- pSCB->SCB_CDBLen = SCpnt->cmd_len;
- pSCB->SCB_HaStat = 0;
- pSCB->SCB_TaStat = 0;
- memcpy(&pSCB->SCB_CDB[0], &SCpnt->cmnd, SCpnt->cmd_len);
+ cblk->flags |= SCF_SENSE; /* Turn on auto request sense */
- if (SCpnt->device->tagged_supported) { /* Tag Support */
- pSCB->SCB_TagMsg = SIMPLE_QUEUE_TAG; /* Do simple tag only */
+ /* Map the sense buffer into bus memory */
+ dma_addr = dma_map_single(&host->pci_dev->dev, cmnd->sense_buffer,
+ SENSE_SIZE, DMA_FROM_DEVICE);
+ cblk->senseptr = cpu_to_le32((u32)dma_addr);
+ cblk->senselen = cpu_to_le32(SENSE_SIZE);
+ cmnd->SCp.ptr = (char *)(unsigned long)dma_addr;
+ cblk->cdblen = cmnd->cmd_len;
+
+ /* Clear the returned status */
+ cblk->hastat = 0;
+ cblk->tastat = 0;
+ /* Command the command */
+ memcpy(&cblk->cdb[0], &cmnd->cmnd, cmnd->cmd_len);
+
+ /* Set up tags */
+ if (cmnd->device->tagged_supported) { /* Tag Support */
+ cblk->tagmsg = SIMPLE_QUEUE_TAG; /* Do simple tag only */
} else {
- pSCB->SCB_TagMsg = 0; /* No tag support */
+ cblk->tagmsg = 0; /* No tag support */
}
+
/* todo handle map_sg error */
- if (SCpnt->use_sg) {
- dma_addr = dma_map_single(&pHCB->pci_dev->dev, &pSCB->SCB_SGList[0],
- sizeof(struct SG_Struc) * TOTAL_SG_ENTRY,
+ nseg = scsi_dma_map(cmnd);
+ BUG_ON(nseg < 0);
+ if (nseg) {
+ dma_addr = dma_map_single(&host->pci_dev->dev, &cblk->sglist[0],
+ sizeof(struct sg_entry) * TOTAL_SG_ENTRY,
DMA_BIDIRECTIONAL);
- pSCB->SCB_BufPtr = cpu_to_le32((u32)dma_addr);
- SCpnt->SCp.dma_handle = dma_addr;
-
- pSrbSG = (struct scatterlist *) SCpnt->request_buffer;
- pSCB->SCB_SGLen = dma_map_sg(&pHCB->pci_dev->dev, pSrbSG,
- SCpnt->use_sg, SCpnt->sc_data_direction);
-
- pSCB->SCB_Flags |= SCF_SG; /* Turn on SG list flag */
- for (i = 0, TotalLen = 0, pSG = &pSCB->SCB_SGList[0]; /* 1.01g */
- i < pSCB->SCB_SGLen; i++, pSG++, pSrbSG++) {
- pSG->SG_Ptr = cpu_to_le32((u32)sg_dma_address(pSrbSG));
- TotalLen += pSG->SG_Len = cpu_to_le32((u32)sg_dma_len(pSrbSG));
+ cblk->bufptr = cpu_to_le32((u32)dma_addr);
+ cmnd->SCp.dma_handle = dma_addr;
+
+
+ cblk->flags |= SCF_SG; /* Turn on SG list flag */
+ total_len = 0;
+ sg = &cblk->sglist[0];
+ scsi_for_each_sg(cmnd, sglist, cblk->sglen, i) {
+ sg->data = cpu_to_le32((u32)sg_dma_address(sglist));
+ total_len += sg->len = cpu_to_le32((u32)sg_dma_len(sglist));
}
- pSCB->SCB_BufLen = (SCpnt->request_bufflen > TotalLen) ?
- TotalLen : SCpnt->request_bufflen;
- } else if (SCpnt->request_bufflen) { /* Non SG */
- dma_addr = dma_map_single(&pHCB->pci_dev->dev, SCpnt->request_buffer,
- SCpnt->request_bufflen,
- SCpnt->sc_data_direction);
- SCpnt->SCp.dma_handle = dma_addr;
- pSCB->SCB_BufPtr = cpu_to_le32((u32)dma_addr);
- pSCB->SCB_BufLen = cpu_to_le32((u32)SCpnt->request_bufflen);
- pSCB->SCB_SGLen = 0;
- } else {
- pSCB->SCB_BufLen = 0;
- pSCB->SCB_SGLen = 0;
+ cblk->buflen = (scsi_bufflen(cmnd) > total_len) ?
+ total_len : scsi_bufflen(cmnd);
+ } else { /* No data transfer required */
+ cblk->buflen = 0;
+ cblk->sglen = 0;
}
}
+/**
+ * i91u_queuecommand - Queue a new command if possible
+ * @cmd: SCSI command block from the mid layer
+ * @done: Completion handler
+ *
+ * Attempts to queue a new command with the host adapter. Will return
+ * zero if successful or indicate a host busy condition if not (which
+ * will cause the mid layer to call us again later with the command)
+ */
+
static int i91u_queuecommand(struct scsi_cmnd *cmd,
void (*done)(struct scsi_cmnd *))
{
- HCS *pHCB = (HCS *) cmd->device->host->base;
- register SCB *pSCB;
+ struct initio_host *host = (struct initio_host *) cmd->device->host->hostdata;
+ struct scsi_ctrl_blk *cmnd;
cmd->scsi_done = done;
- pSCB = tul_alloc_scb(pHCB);
- if (!pSCB)
+ cmnd = initio_alloc_scb(host);
+ if (!cmnd)
return SCSI_MLQUEUE_HOST_BUSY;
- i91uBuildSCB(pHCB, pSCB, cmd);
- tul_exec_scb(pHCB, pSCB);
+ initio_build_scb(host, cmnd, cmd);
+ initio_exec_scb(host, cmnd);
return 0;
}
-#if 0 /* no new EH yet */
-/*
- * Abort a queued command
- * (commands that are on the bus can't be aborted easily)
- */
-static int i91u_abort(struct scsi_cmnd * SCpnt)
-{
- HCS *pHCB;
-
- pHCB = (HCS *) SCpnt->device->host->base;
- return tul_abort_srb(pHCB, SCpnt);
-}
-
-/*
- * Reset registers, reset a hanging bus and
- * kill active and disconnected commands for target w/o soft reset
+/**
+ * i91u_bus_reset - reset the SCSI bus
+ * @cmnd: Command block we want to trigger the reset for
+ *
+ * Initiate a SCSI bus reset sequence
*/
-static int i91u_reset(struct scsi_cmnd * SCpnt, unsigned int reset_flags)
-{ /* I need Host Control Block Information */
- HCS *pHCB;
-
- pHCB = (HCS *) SCpnt->device->host->base;
- if (reset_flags & (SCSI_RESET_SUGGEST_BUS_RESET | SCSI_RESET_SUGGEST_HOST_RESET))
- return tul_reset_scsi_bus(pHCB);
- else
- return tul_device_reset(pHCB, SCpnt, SCpnt->device->id, reset_flags);
-}
-#endif
-
-static int i91u_bus_reset(struct scsi_cmnd * SCpnt)
+static int i91u_bus_reset(struct scsi_cmnd * cmnd)
{
- HCS *pHCB;
+ struct initio_host *host;
- pHCB = (HCS *) SCpnt->device->host->base;
+ host = (struct initio_host *) cmnd->device->host->hostdata;
- spin_lock_irq(SCpnt->device->host->host_lock);
- tul_reset_scsi(pHCB, 0);
- spin_unlock_irq(SCpnt->device->host->host_lock);
+ spin_lock_irq(cmnd->device->host->host_lock);
+ initio_reset_scsi(host, 0);
+ spin_unlock_irq(cmnd->device->host->host_lock);
return SUCCESS;
}
-/*
- * Return the "logical geometry"
+/**
+ * i91u_biospararm - return the "logical geometry
+ * @sdev: SCSI device
+ * @dev; Matching block device
+ * @capacity: Sector size of drive
+ * @info_array: Return space for BIOS geometry
+ *
+ * Map the device geometry in a manner compatible with the host
+ * controller BIOS behaviour.
+ *
+ * FIXME: limited to 2^32 sector devices.
*/
+
static int i91u_biosparam(struct scsi_device *sdev, struct block_device *dev,
sector_t capacity, int *info_array)
{
- HCS *pHcb; /* Point to Host adapter control block */
- TCS *pTcb;
+ struct initio_host *host; /* Point to Host adapter control block */
+ struct target_control *tc;
- pHcb = (HCS *) sdev->host->base;
- pTcb = &pHcb->HCS_Tcs[sdev->id];
+ host = (struct initio_host *) sdev->host->hostdata;
+ tc = &host->targets[sdev->id];
- if (pTcb->TCS_DrvHead) {
- info_array[0] = pTcb->TCS_DrvHead;
- info_array[1] = pTcb->TCS_DrvSector;
- info_array[2] = (unsigned long)capacity / pTcb->TCS_DrvHead / pTcb->TCS_DrvSector;
+ if (tc->heads) {
+ info_array[0] = tc->heads;
+ info_array[1] = tc->sectors;
+ info_array[2] = (unsigned long)capacity / tc->heads / tc->sectors;
} else {
- if (pTcb->TCS_DrvFlags & TCF_DRV_255_63) {
+ if (tc->drv_flags & TCF_DRV_255_63) {
info_array[0] = 255;
info_array[1] = 63;
info_array[2] = (unsigned long)capacity / 255 / 63;
@@ -3047,7 +2722,16 @@ static int i91u_biosparam(struct scsi_device *sdev, struct block_device *dev,
return 0;
}
-static void i91u_unmap_cmnd(struct pci_dev *pci_dev, struct scsi_cmnd *cmnd)
+/**
+ * i91u_unmap_scb - Unmap a command
+ * @pci_dev: PCI device the command is for
+ * @cmnd: The command itself
+ *
+ * Unmap any PCI mapping/IOMMU resources allocated when the command
+ * was mapped originally as part of initio_build_scb
+ */
+
+static void i91u_unmap_scb(struct pci_dev *pci_dev, struct scsi_cmnd *cmnd)
{
/* auto sense buffer */
if (cmnd->SCp.ptr) {
@@ -3058,65 +2742,63 @@ static void i91u_unmap_cmnd(struct pci_dev *pci_dev, struct scsi_cmnd *cmnd)
}
/* request buffer */
- if (cmnd->use_sg) {
+ if (scsi_sg_count(cmnd)) {
dma_unmap_single(&pci_dev->dev, cmnd->SCp.dma_handle,
- sizeof(struct SG_Struc) * TOTAL_SG_ENTRY,
+ sizeof(struct sg_entry) * TOTAL_SG_ENTRY,
DMA_BIDIRECTIONAL);
- dma_unmap_sg(&pci_dev->dev, cmnd->request_buffer,
- cmnd->use_sg,
- cmnd->sc_data_direction);
- } else if (cmnd->request_bufflen) {
- dma_unmap_single(&pci_dev->dev, cmnd->SCp.dma_handle,
- cmnd->request_bufflen,
- cmnd->sc_data_direction);
+ scsi_dma_unmap(cmnd);
}
}
-/*****************************************************************************
- Function name : i91uSCBPost
- Description : This is callback routine be called when tulip finish one
- SCSI command.
- Input : pHCB - Pointer to host adapter control block.
- pSCB - Pointer to SCSI control block.
- Output : None.
- Return : None.
-*****************************************************************************/
-static void i91uSCBPost(BYTE * pHcb, BYTE * pScb)
-{
- struct scsi_cmnd *pSRB; /* Pointer to SCSI request block */
- HCS *pHCB;
- SCB *pSCB;
+/**
+ * i91uSCBPost - SCSI callback
+ * @host: Pointer to host adapter control block.
+ * @cmnd: Pointer to SCSI control block.
+ *
+ * This is callback routine be called when tulip finish one
+ * SCSI command.
+ */
- pHCB = (HCS *) pHcb;
- pSCB = (SCB *) pScb;
- if ((pSRB = pSCB->SCB_Srb) == 0) {
- printk("i91uSCBPost: SRB pointer is empty\n");
+static void i91uSCBPost(u8 * host_mem, u8 * cblk_mem)
+{
+ struct scsi_cmnd *cmnd; /* Pointer to SCSI request block */
+ struct initio_host *host;
+ struct scsi_ctrl_blk *cblk;
- tul_release_scb(pHCB, pSCB); /* Release SCB for current channel */
+ host = (struct initio_host *) host_mem;
+ cblk = (struct scsi_ctrl_blk *) cblk_mem;
+ if ((cmnd = cblk->srb) == NULL) {
+ printk(KERN_ERR "i91uSCBPost: SRB pointer is empty\n");
+ WARN_ON(1);
+ initio_release_scb(host, cblk); /* Release SCB for current channel */
return;
}
- switch (pSCB->SCB_HaStat) {
+
+ /*
+ * Remap the firmware error status into a mid layer one
+ */
+ switch (cblk->hastat) {
case 0x0:
case 0xa: /* Linked command complete without error and linked normally */
case 0xb: /* Linked command complete without error interrupt generated */
- pSCB->SCB_HaStat = 0;
+ cblk->hastat = 0;
break;
case 0x11: /* Selection time out-The initiator selection or target
reselection was not complete within the SCSI Time out period */
- pSCB->SCB_HaStat = DID_TIME_OUT;
+ cblk->hastat = DID_TIME_OUT;
break;
case 0x14: /* Target bus phase sequence failure-An invalid bus phase or bus
phase sequence was requested by the target. The host adapter
will generate a SCSI Reset Condition, notifying the host with
a SCRD interrupt */
- pSCB->SCB_HaStat = DID_RESET;
+ cblk->hastat = DID_RESET;
break;
case 0x1a: /* SCB Aborted. 07/21/98 */
- pSCB->SCB_HaStat = DID_ABORT;
+ cblk->hastat = DID_ABORT;
break;
case 0x12: /* Data overrun/underrun-The target attempted to transfer more data
@@ -3126,49 +2808,196 @@ static void i91uSCBPost(BYTE * pHcb, BYTE * pScb)
case 0x16: /* Invalid SCB Operation Code. */
default:
- printk("ini9100u: %x %x\n", pSCB->SCB_HaStat, pSCB->SCB_TaStat);
- pSCB->SCB_HaStat = DID_ERROR; /* Couldn't find any better */
+ printk("ini9100u: %x %x\n", cblk->hastat, cblk->tastat);
+ cblk->hastat = DID_ERROR; /* Couldn't find any better */
break;
}
- pSRB->result = pSCB->SCB_TaStat | (pSCB->SCB_HaStat << 16);
+ cmnd->result = cblk->tastat | (cblk->hastat << 16);
+ WARN_ON(cmnd == NULL);
+ i91u_unmap_scb(host->pci_dev, cmnd);
+ cmnd->scsi_done(cmnd); /* Notify system DONE */
+ initio_release_scb(host, cblk); /* Release SCB for current channel */
+}
+
+static struct scsi_host_template initio_template = {
+ .proc_name = "INI9100U",
+ .name = "Initio INI-9X00U/UW SCSI device driver",
+ .queuecommand = i91u_queuecommand,
+ .eh_bus_reset_handler = i91u_bus_reset,
+ .bios_param = i91u_biosparam,
+ .can_queue = MAX_TARGETS * i91u_MAXQUEUE,
+ .this_id = 1,
+ .sg_tablesize = SG_ALL,
+ .cmd_per_lun = 1,
+ .use_clustering = ENABLE_CLUSTERING,
+};
+
+static int initio_probe_one(struct pci_dev *pdev,
+ const struct pci_device_id *id)
+{
+ struct Scsi_Host *shost;
+ struct initio_host *host;
+ u32 reg;
+ u16 bios_seg;
+ struct scsi_ctrl_blk *scb, *tmp, *prev = NULL /* silence gcc */;
+ int num_scb, i, error;
+
+ error = pci_enable_device(pdev);
+ if (error)
+ return error;
+
+ pci_read_config_dword(pdev, 0x44, (u32 *) & reg);
+ bios_seg = (u16) (reg & 0xFF);
+ if (((reg & 0xFF00) >> 8) == 0xFF)
+ reg = 0;
+ bios_seg = (bios_seg << 8) + ((u16) ((reg & 0xFF00) >> 8));
+
+ if (pci_set_dma_mask(pdev, DMA_32BIT_MASK)) {
+ printk(KERN_WARNING "i91u: Could not set 32 bit DMA mask\n");
+ error = -ENODEV;
+ goto out_disable_device;
+ }
+ shost = scsi_host_alloc(&initio_template, sizeof(struct initio_host));
+ if (!shost) {
+ printk(KERN_WARNING "initio: Could not allocate host structure.\n");
+ error = -ENOMEM;
+ goto out_disable_device;
+ }
+ host = (struct initio_host *)shost->hostdata;
+ memset(host, 0, sizeof(struct initio_host));
- if (pSRB == NULL) {
- printk("pSRB is NULL\n");
+ if (!request_region(host->addr, 256, "i91u")) {
+ printk(KERN_WARNING "initio: I/O port range 0x%x is busy.\n", host->addr);
+ error = -ENODEV;
+ goto out_host_put;
}
- i91u_unmap_cmnd(pHCB->pci_dev, pSRB);
- pSRB->scsi_done(pSRB); /* Notify system DONE */
+ if (initio_tag_enable) /* 1.01i */
+ num_scb = MAX_TARGETS * i91u_MAXQUEUE;
+ else
+ num_scb = MAX_TARGETS + 3; /* 1-tape, 1-CD_ROM, 1- extra */
- tul_release_scb(pHCB, pSCB); /* Release SCB for current channel */
-}
+ for (; num_scb >= MAX_TARGETS + 3; num_scb--) {
+ i = num_scb * sizeof(struct scsi_ctrl_blk);
+ if ((scb = kzalloc(i, GFP_DMA)) != NULL)
+ break;
+ }
+
+ if (!scb) {
+ printk(KERN_WARNING "initio: Cannot allocate SCB array.\n");
+ error = -ENOMEM;
+ goto out_release_region;
+ }
-/*
- * Release ressources
+ host->num_scbs = num_scb;
+ host->scb = scb;
+ host->next_pending = scb;
+ host->next_avail = scb;
+ for (i = 0, tmp = scb; i < num_scb; i++, tmp++) {
+ tmp->tagid = i;
+ if (i != 0)
+ prev->next = tmp;
+ prev = tmp;
+ }
+ prev->next = NULL;
+ host->scb_end = tmp;
+ host->first_avail = scb;
+ host->last_avail = prev;
+
+ initio_init(host, phys_to_virt(bios_seg << 4));
+
+ host->jsstatus0 = 0;
+
+ shost->io_port = host->addr;
+ shost->n_io_port = 0xff;
+ shost->can_queue = num_scb; /* 03/05/98 */
+ shost->unique_id = host->addr;
+ shost->max_id = host->max_tar;
+ shost->max_lun = 32; /* 10/21/97 */
+ shost->irq = pdev->irq;
+ shost->this_id = host->scsi_id; /* Assign HCS index */
+ shost->base = host->addr;
+ shost->sg_tablesize = TOTAL_SG_ENTRY;
+
+ error = request_irq(pdev->irq, i91u_intr, IRQF_DISABLED|IRQF_SHARED, "i91u", shost);
+ if (error < 0) {
+ printk(KERN_WARNING "initio: Unable to request IRQ %d\n", pdev->irq);
+ goto out_free_scbs;
+ }
+
+ pci_set_drvdata(pdev, shost);
+ host->pci_dev = pdev;
+
+ error = scsi_add_host(shost, &pdev->dev);
+ if (error)
+ goto out_free_irq;
+ scsi_scan_host(shost);
+ return 0;
+out_free_irq:
+ free_irq(pdev->irq, shost);
+out_free_scbs:
+ kfree(host->scb);
+out_release_region:
+ release_region(host->addr, 256);
+out_host_put:
+ scsi_host_put(shost);
+out_disable_device:
+ pci_disable_device(pdev);
+ return error;
+}
+
+/**
+ * initio_remove_one - control shutdown
+ * @pdev: PCI device being released
+ *
+ * Release the resources assigned to this adapter after it has
+ * finished being used.
*/
-static int i91u_release(struct Scsi_Host *hreg)
+
+static void initio_remove_one(struct pci_dev *pdev)
{
- free_irq(hreg->irq, hreg);
- release_region(hreg->io_port, 256);
- return 0;
+ struct Scsi_Host *host = pci_get_drvdata(pdev);
+ struct initio_host *s = (struct initio_host *)host->hostdata;
+ scsi_remove_host(host);
+ free_irq(pdev->irq, host);
+ release_region(s->addr, 256);
+ scsi_host_put(host);
+ pci_disable_device(pdev);
}
-MODULE_LICENSE("Dual BSD/GPL");
-
-static struct scsi_host_template driver_template = {
- .proc_name = "INI9100U",
- .name = i91u_REVID,
- .detect = i91u_detect,
- .release = i91u_release,
- .queuecommand = i91u_queuecommand,
-// .abort = i91u_abort,
-// .reset = i91u_reset,
- .eh_bus_reset_handler = i91u_bus_reset,
- .bios_param = i91u_biosparam,
- .can_queue = 1,
- .this_id = 1,
- .sg_tablesize = SG_ALL,
- .cmd_per_lun = 1,
- .use_clustering = ENABLE_CLUSTERING,
+
+MODULE_LICENSE("GPL");
+
+static struct pci_device_id initio_pci_tbl[] = {
+ {PCI_VENDOR_ID_INIT, 0x9500, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
+ {PCI_VENDOR_ID_INIT, 0x9400, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
+ {PCI_VENDOR_ID_INIT, 0x9401, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
+ {PCI_VENDOR_ID_INIT, 0x0002, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
+ {PCI_VENDOR_ID_DOMEX, 0x0002, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
+ {0,}
+};
+MODULE_DEVICE_TABLE(pci, initio_pci_tbl);
+
+static struct pci_driver initio_pci_driver = {
+ .name = "initio",
+ .id_table = initio_pci_tbl,
+ .probe = initio_probe_one,
+ .remove = __devexit_p(initio_remove_one),
};
-#include "scsi_module.c"
+static int __init initio_init_driver(void)
+{
+ return pci_register_driver(&initio_pci_driver);
+}
+
+static void __exit initio_exit_driver(void)
+{
+ pci_unregister_driver(&initio_pci_driver);
+}
+
+MODULE_DESCRIPTION("Initio INI-9X00U/UW SCSI device driver");
+MODULE_AUTHOR("Initio Corporation");
+MODULE_LICENSE("GPL");
+
+module_init(initio_init_driver);
+module_exit(initio_exit_driver);
diff --git a/drivers/scsi/initio.h b/drivers/scsi/initio.h
index acb67a4af2c..cb48efa81fe 100644
--- a/drivers/scsi/initio.h
+++ b/drivers/scsi/initio.h
@@ -4,6 +4,8 @@
* Copyright (c) 1994-1998 Initio Corporation
* All rights reserved.
*
+ * Cleanups (c) Copyright 2007 Red Hat <alan@redhat.com>
+ *
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2, or (at your option)
@@ -18,27 +20,6 @@
* along with this program; see the file COPYING. If not, write to
* the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
*
- * --------------------------------------------------------------------------
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions, and the following disclaimer,
- * without modification, immediately at the beginning of the file.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. The name of the author may not be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- * Where this Software is combined with software released under the terms of
- * the GNU General Public License ("GPL") and the terms of the GPL would require the
- * combined work to also be released under the terms of the GPL, the terms
- * and conditions of this License will apply in addition to those of the
- * GPL with the exception of any terms or conditions of this License that
- * conflict with, or are expressly prohibited by, the GPL.
- *
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
@@ -56,17 +37,6 @@
#include <linux/types.h>
-#define ULONG unsigned long
-#define USHORT unsigned short
-#define UCHAR unsigned char
-#define BYTE unsigned char
-#define WORD unsigned short
-#define DWORD unsigned long
-#define UBYTE unsigned char
-#define UWORD unsigned short
-#define UDWORD unsigned long
-#define U32 u32
-
#define TOTAL_SG_ENTRY 32
#define MAX_SUPPORTED_ADAPTERS 8
#define MAX_OFFSET 15
@@ -368,55 +338,55 @@ typedef struct {
/************************************************************************/
/* Scatter-Gather Element Structure */
/************************************************************************/
-typedef struct SG_Struc {
- U32 SG_Ptr; /* Data Pointer */
- U32 SG_Len; /* Data Length */
-} SG;
+struct sg_entry {
+ u32 data; /* Data Pointer */
+ u32 len; /* Data Length */
+};
/***********************************************************************
SCSI Control Block
************************************************************************/
-typedef struct Scsi_Ctrl_Blk {
- struct Scsi_Ctrl_Blk *SCB_NxtScb;
- UBYTE SCB_Status; /*4 */
- UBYTE SCB_NxtStat; /*5 */
- UBYTE SCB_Mode; /*6 */
- UBYTE SCB_Msgin; /*7 SCB_Res0 */
- UWORD SCB_SGIdx; /*8 */
- UWORD SCB_SGMax; /*A */
+struct scsi_ctrl_blk {
+ struct scsi_ctrl_blk *next;
+ u8 status; /*4 */
+ u8 next_state; /*5 */
+ u8 mode; /*6 */
+ u8 msgin; /*7 SCB_Res0 */
+ u16 sgidx; /*8 */
+ u16 sgmax; /*A */
#ifdef ALPHA
- U32 SCB_Reserved[2]; /*C */
+ u32 reserved[2]; /*C */
#else
- U32 SCB_Reserved[3]; /*C */
+ u32 reserved[3]; /*C */
#endif
- U32 SCB_XferLen; /*18 Current xfer len */
- U32 SCB_TotXLen; /*1C Total xfer len */
- U32 SCB_PAddr; /*20 SCB phy. Addr. */
-
- UBYTE SCB_Opcode; /*24 SCB command code */
- UBYTE SCB_Flags; /*25 SCB Flags */
- UBYTE SCB_Target; /*26 Target Id */
- UBYTE SCB_Lun; /*27 Lun */
- U32 SCB_BufPtr; /*28 Data Buffer Pointer */
- U32 SCB_BufLen; /*2C Data Allocation Length */
- UBYTE SCB_SGLen; /*30 SG list # */
- UBYTE SCB_SenseLen; /*31 Sense Allocation Length */
- UBYTE SCB_HaStat; /*32 */
- UBYTE SCB_TaStat; /*33 */
- UBYTE SCB_CDBLen; /*34 CDB Length */
- UBYTE SCB_Ident; /*35 Identify */
- UBYTE SCB_TagMsg; /*36 Tag Message */
- UBYTE SCB_TagId; /*37 Queue Tag */
- UBYTE SCB_CDB[12]; /*38 */
- U32 SCB_SGPAddr; /*44 SG List/Sense Buf phy. Addr. */
- U32 SCB_SensePtr; /*48 Sense data pointer */
- void (*SCB_Post) (BYTE *, BYTE *); /*4C POST routine */
- struct scsi_cmnd *SCB_Srb; /*50 SRB Pointer */
- SG SCB_SGList[TOTAL_SG_ENTRY]; /*54 Start of SG list */
-} SCB;
-
-/* Bit Definition for SCB_Status */
+ u32 xferlen; /*18 Current xfer len */
+ u32 totxlen; /*1C Total xfer len */
+ u32 paddr; /*20 SCB phy. Addr. */
+
+ u8 opcode; /*24 SCB command code */
+ u8 flags; /*25 SCB Flags */
+ u8 target; /*26 Target Id */
+ u8 lun; /*27 Lun */
+ u32 bufptr; /*28 Data Buffer Pointer */
+ u32 buflen; /*2C Data Allocation Length */
+ u8 sglen; /*30 SG list # */
+ u8 senselen; /*31 Sense Allocation Length */
+ u8 hastat; /*32 */
+ u8 tastat; /*33 */
+ u8 cdblen; /*34 CDB Length */
+ u8 ident; /*35 Identify */
+ u8 tagmsg; /*36 Tag Message */
+ u8 tagid; /*37 Queue Tag */
+ u8 cdb[12]; /*38 */
+ u32 sgpaddr; /*44 SG List/Sense Buf phy. Addr. */
+ u32 senseptr; /*48 Sense data pointer */
+ void (*post) (u8 *, u8 *); /*4C POST routine */
+ struct scsi_cmnd *srb; /*50 SRB Pointer */
+ struct sg_entry sglist[TOTAL_SG_ENTRY]; /*54 Start of SG list */
+};
+
+/* Bit Definition for status */
#define SCB_RENT 0x01
#define SCB_PEND 0x02
#define SCB_CONTIG 0x04 /* Contigent Allegiance */
@@ -425,17 +395,17 @@ typedef struct Scsi_Ctrl_Blk {
#define SCB_DONE 0x20
-/* Opcodes of SCB_Opcode */
+/* Opcodes for opcode */
#define ExecSCSI 0x1
#define BusDevRst 0x2
#define AbortCmd 0x3
-/* Bit Definition for SCB_Mode */
+/* Bit Definition for mode */
#define SCM_RSENS 0x01 /* request sense mode */
-/* Bit Definition for SCB_Flags */
+/* Bit Definition for flags */
#define SCF_DONE 0x01
#define SCF_POST 0x02
#define SCF_SENSE 0x04
@@ -492,15 +462,14 @@ typedef struct Scsi_Ctrl_Blk {
Target Device Control Structure
**********************************************************************/
-typedef struct Tar_Ctrl_Struc {
- UWORD TCS_Flags; /* 0 */
- UBYTE TCS_JS_Period; /* 2 */
- UBYTE TCS_SConfig0; /* 3 */
-
- UWORD TCS_DrvFlags; /* 4 */
- UBYTE TCS_DrvHead; /* 6 */
- UBYTE TCS_DrvSector; /* 7 */
-} TCS;
+struct target_control {
+ u16 flags;
+ u8 js_period;
+ u8 sconfig0;
+ u16 drv_flags;
+ u8 heads;
+ u8 sectors;
+};
/***********************************************************************
Target Device Control Structure
@@ -523,62 +492,53 @@ typedef struct Tar_Ctrl_Struc {
#define TCF_DRV_EN_TAG 0x0800
#define TCF_DRV_255_63 0x0400
-typedef struct I91u_Adpt_Struc {
- UWORD ADPT_BIOS; /* 0 */
- UWORD ADPT_BASE; /* 1 */
- UBYTE ADPT_Bus; /* 2 */
- UBYTE ADPT_Device; /* 3 */
- UBYTE ADPT_INTR; /* 4 */
-} INI_ADPT_STRUCT;
-
-
/***********************************************************************
Host Adapter Control Structure
************************************************************************/
-typedef struct Ha_Ctrl_Struc {
- UWORD HCS_Base; /* 00 */
- UWORD HCS_BIOS; /* 02 */
- UBYTE HCS_Intr; /* 04 */
- UBYTE HCS_SCSI_ID; /* 05 */
- UBYTE HCS_MaxTar; /* 06 */
- UBYTE HCS_NumScbs; /* 07 */
-
- UBYTE HCS_Flags; /* 08 */
- UBYTE HCS_Index; /* 09 */
- UBYTE HCS_HaId; /* 0A */
- UBYTE HCS_Config; /* 0B */
- UWORD HCS_IdMask; /* 0C */
- UBYTE HCS_Semaph; /* 0E */
- UBYTE HCS_Phase; /* 0F */
- UBYTE HCS_JSStatus0; /* 10 */
- UBYTE HCS_JSInt; /* 11 */
- UBYTE HCS_JSStatus1; /* 12 */
- UBYTE HCS_SConf1; /* 13 */
-
- UBYTE HCS_Msg[8]; /* 14 */
- SCB *HCS_NxtAvail; /* 1C */
- SCB *HCS_Scb; /* 20 */
- SCB *HCS_ScbEnd; /* 24 */
- SCB *HCS_NxtPend; /* 28 */
- SCB *HCS_NxtContig; /* 2C */
- SCB *HCS_ActScb; /* 30 */
- TCS *HCS_ActTcs; /* 34 */
-
- SCB *HCS_FirstAvail; /* 38 */
- SCB *HCS_LastAvail; /* 3C */
- SCB *HCS_FirstPend; /* 40 */
- SCB *HCS_LastPend; /* 44 */
- SCB *HCS_FirstBusy; /* 48 */
- SCB *HCS_LastBusy; /* 4C */
- SCB *HCS_FirstDone; /* 50 */
- SCB *HCS_LastDone; /* 54 */
- UBYTE HCS_MaxTags[16]; /* 58 */
- UBYTE HCS_ActTags[16]; /* 68 */
- TCS HCS_Tcs[MAX_TARGETS]; /* 78 */
- spinlock_t HCS_AvailLock;
- spinlock_t HCS_SemaphLock;
+struct initio_host {
+ u16 addr; /* 00 */
+ u16 bios_addr; /* 02 */
+ u8 irq; /* 04 */
+ u8 scsi_id; /* 05 */
+ u8 max_tar; /* 06 */
+ u8 num_scbs; /* 07 */
+
+ u8 flags; /* 08 */
+ u8 index; /* 09 */
+ u8 ha_id; /* 0A */
+ u8 config; /* 0B */
+ u16 idmask; /* 0C */
+ u8 semaph; /* 0E */
+ u8 phase; /* 0F */
+ u8 jsstatus0; /* 10 */
+ u8 jsint; /* 11 */
+ u8 jsstatus1; /* 12 */
+ u8 sconf1; /* 13 */
+
+ u8 msg[8]; /* 14 */
+ struct scsi_ctrl_blk *next_avail; /* 1C */
+ struct scsi_ctrl_blk *scb; /* 20 */
+ struct scsi_ctrl_blk *scb_end; /* 24 */ /*UNUSED*/
+ struct scsi_ctrl_blk *next_pending; /* 28 */
+ struct scsi_ctrl_blk *next_contig; /* 2C */ /*UNUSED*/
+ struct scsi_ctrl_blk *active; /* 30 */
+ struct target_control *active_tc; /* 34 */
+
+ struct scsi_ctrl_blk *first_avail; /* 38 */
+ struct scsi_ctrl_blk *last_avail; /* 3C */
+ struct scsi_ctrl_blk *first_pending; /* 40 */
+ struct scsi_ctrl_blk *last_pending; /* 44 */
+ struct scsi_ctrl_blk *first_busy; /* 48 */
+ struct scsi_ctrl_blk *last_busy; /* 4C */
+ struct scsi_ctrl_blk *first_done; /* 50 */
+ struct scsi_ctrl_blk *last_done; /* 54 */
+ u8 max_tags[16]; /* 58 */
+ u8 act_tags[16]; /* 68 */
+ struct target_control targets[MAX_TARGETS]; /* 78 */
+ spinlock_t avail_lock;
+ spinlock_t semaph_lock;
struct pci_dev *pci_dev;
-} HCS;
+};
/* Bit Definition for HCB_Config */
#define HCC_SCSI_RESET 0x01
@@ -599,47 +559,47 @@ typedef struct Ha_Ctrl_Struc {
*******************************************************************/
typedef struct _NVRAM_SCSI { /* SCSI channel configuration */
- UCHAR NVM_ChSCSIID; /* 0Ch -> Channel SCSI ID */
- UCHAR NVM_ChConfig1; /* 0Dh -> Channel config 1 */
- UCHAR NVM_ChConfig2; /* 0Eh -> Channel config 2 */
- UCHAR NVM_NumOfTarg; /* 0Fh -> Number of SCSI target */
+ u8 NVM_ChSCSIID; /* 0Ch -> Channel SCSI ID */
+ u8 NVM_ChConfig1; /* 0Dh -> Channel config 1 */
+ u8 NVM_ChConfig2; /* 0Eh -> Channel config 2 */
+ u8 NVM_NumOfTarg; /* 0Fh -> Number of SCSI target */
/* SCSI target configuration */
- UCHAR NVM_Targ0Config; /* 10h -> Target 0 configuration */
- UCHAR NVM_Targ1Config; /* 11h -> Target 1 configuration */
- UCHAR NVM_Targ2Config; /* 12h -> Target 2 configuration */
- UCHAR NVM_Targ3Config; /* 13h -> Target 3 configuration */
- UCHAR NVM_Targ4Config; /* 14h -> Target 4 configuration */
- UCHAR NVM_Targ5Config; /* 15h -> Target 5 configuration */
- UCHAR NVM_Targ6Config; /* 16h -> Target 6 configuration */
- UCHAR NVM_Targ7Config; /* 17h -> Target 7 configuration */
- UCHAR NVM_Targ8Config; /* 18h -> Target 8 configuration */
- UCHAR NVM_Targ9Config; /* 19h -> Target 9 configuration */
- UCHAR NVM_TargAConfig; /* 1Ah -> Target A configuration */
- UCHAR NVM_TargBConfig; /* 1Bh -> Target B configuration */
- UCHAR NVM_TargCConfig; /* 1Ch -> Target C configuration */
- UCHAR NVM_TargDConfig; /* 1Dh -> Target D configuration */
- UCHAR NVM_TargEConfig; /* 1Eh -> Target E configuration */
- UCHAR NVM_TargFConfig; /* 1Fh -> Target F configuration */
+ u8 NVM_Targ0Config; /* 10h -> Target 0 configuration */
+ u8 NVM_Targ1Config; /* 11h -> Target 1 configuration */
+ u8 NVM_Targ2Config; /* 12h -> Target 2 configuration */
+ u8 NVM_Targ3Config; /* 13h -> Target 3 configuration */
+ u8 NVM_Targ4Config; /* 14h -> Target 4 configuration */
+ u8 NVM_Targ5Config; /* 15h -> Target 5 configuration */
+ u8 NVM_Targ6Config; /* 16h -> Target 6 configuration */
+ u8 NVM_Targ7Config; /* 17h -> Target 7 configuration */
+ u8 NVM_Targ8Config; /* 18h -> Target 8 configuration */
+ u8 NVM_Targ9Config; /* 19h -> Target 9 configuration */
+ u8 NVM_TargAConfig; /* 1Ah -> Target A configuration */
+ u8 NVM_TargBConfig; /* 1Bh -> Target B configuration */
+ u8 NVM_TargCConfig; /* 1Ch -> Target C configuration */
+ u8 NVM_TargDConfig; /* 1Dh -> Target D configuration */
+ u8 NVM_TargEConfig; /* 1Eh -> Target E configuration */
+ u8 NVM_TargFConfig; /* 1Fh -> Target F configuration */
} NVRAM_SCSI;
typedef struct _NVRAM {
/*----------header ---------------*/
- USHORT NVM_Signature; /* 0,1: Signature */
- UCHAR NVM_Size; /* 2: Size of data structure */
- UCHAR NVM_Revision; /* 3: Revision of data structure */
+ u16 NVM_Signature; /* 0,1: Signature */
+ u8 NVM_Size; /* 2: Size of data structure */
+ u8 NVM_Revision; /* 3: Revision of data structure */
/* ----Host Adapter Structure ---- */
- UCHAR NVM_ModelByte0; /* 4: Model number (byte 0) */
- UCHAR NVM_ModelByte1; /* 5: Model number (byte 1) */
- UCHAR NVM_ModelInfo; /* 6: Model information */
- UCHAR NVM_NumOfCh; /* 7: Number of SCSI channel */
- UCHAR NVM_BIOSConfig1; /* 8: BIOS configuration 1 */
- UCHAR NVM_BIOSConfig2; /* 9: BIOS configuration 2 */
- UCHAR NVM_HAConfig1; /* A: Hoat adapter configuration 1 */
- UCHAR NVM_HAConfig2; /* B: Hoat adapter configuration 2 */
+ u8 NVM_ModelByte0; /* 4: Model number (byte 0) */
+ u8 NVM_ModelByte1; /* 5: Model number (byte 1) */
+ u8 NVM_ModelInfo; /* 6: Model information */
+ u8 NVM_NumOfCh; /* 7: Number of SCSI channel */
+ u8 NVM_BIOSConfig1; /* 8: BIOS configuration 1 */
+ u8 NVM_BIOSConfig2; /* 9: BIOS configuration 2 */
+ u8 NVM_HAConfig1; /* A: Hoat adapter configuration 1 */
+ u8 NVM_HAConfig2; /* B: Hoat adapter configuration 2 */
NVRAM_SCSI NVM_SCSIInfo[2];
- UCHAR NVM_reserved[10];
+ u8 NVM_reserved[10];
/* ---------- CheckSum ---------- */
- USHORT NVM_CheckSum; /* 0x3E, 0x3F: Checksum of NVRam */
+ u16 NVM_CheckSum; /* 0x3E, 0x3F: Checksum of NVRam */
} NVRAM, *PNVRAM;
/* Bios Configuration for nvram->BIOSConfig1 */
@@ -681,19 +641,6 @@ typedef struct _NVRAM {
#define DISC_ALLOW 0xC0 /* Disconnect is allowed */
#define SCSICMD_RequestSense 0x03
-typedef struct _HCSinfo {
- ULONG base;
- UCHAR vec;
- UCHAR bios; /* High byte of BIOS address */
- USHORT BaseAndBios; /* high byte: pHcsInfo->bios,low byte:pHcsInfo->base */
-} HCSINFO;
-
-#define TUL_RD(x,y) (UCHAR)(inb( (int)((ULONG)(x+y)) ))
-#define TUL_RDLONG(x,y) (ULONG)(inl((int)((ULONG)(x+y)) ))
-#define TUL_WR( adr,data) outb( (UCHAR)(data), (int)(adr))
-#define TUL_WRSHORT(adr,data) outw( (UWORD)(data), (int)(adr))
-#define TUL_WRLONG( adr,data) outl( (ULONG)(data), (int)(adr))
-
#define SCSI_ABORT_SNOOZE 0
#define SCSI_ABORT_SUCCESS 1
#define SCSI_ABORT_PENDING 2
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
index b3bf77f1ec0..f142eafb6fc 100644
--- a/drivers/scsi/ipr.c
+++ b/drivers/scsi/ipr.c
@@ -540,32 +540,6 @@ struct ipr_cmnd *ipr_get_free_ipr_cmnd(struct ipr_ioa_cfg *ioa_cfg)
}
/**
- * ipr_unmap_sglist - Unmap scatterlist if mapped
- * @ioa_cfg: ioa config struct
- * @ipr_cmd: ipr command struct
- *
- * Return value:
- * nothing
- **/
-static void ipr_unmap_sglist(struct ipr_ioa_cfg *ioa_cfg,
- struct ipr_cmnd *ipr_cmd)
-{
- struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
-
- if (ipr_cmd->dma_use_sg) {
- if (scsi_cmd->use_sg > 0) {
- pci_unmap_sg(ioa_cfg->pdev, scsi_cmd->request_buffer,
- scsi_cmd->use_sg,
- scsi_cmd->sc_data_direction);
- } else {
- pci_unmap_single(ioa_cfg->pdev, ipr_cmd->dma_handle,
- scsi_cmd->request_bufflen,
- scsi_cmd->sc_data_direction);
- }
- }
-}
-
-/**
* ipr_mask_and_clear_interrupts - Mask all and clear specified interrupts
* @ioa_cfg: ioa config struct
* @clr_ints: interrupts to clear
@@ -677,7 +651,7 @@ static void ipr_scsi_eh_done(struct ipr_cmnd *ipr_cmd)
scsi_cmd->result |= (DID_ERROR << 16);
- ipr_unmap_sglist(ioa_cfg, ipr_cmd);
+ scsi_dma_unmap(ipr_cmd->scsi_cmd);
scsi_cmd->scsi_done(scsi_cmd);
list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
}
@@ -4298,93 +4272,55 @@ static irqreturn_t ipr_isr(int irq, void *devp)
static int ipr_build_ioadl(struct ipr_ioa_cfg *ioa_cfg,
struct ipr_cmnd *ipr_cmd)
{
- int i;
- struct scatterlist *sglist;
+ int i, nseg;
+ struct scatterlist *sg;
u32 length;
u32 ioadl_flags = 0;
struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
- length = scsi_cmd->request_bufflen;
-
- if (length == 0)
+ length = scsi_bufflen(scsi_cmd);
+ if (!length)
return 0;
- if (scsi_cmd->use_sg) {
- ipr_cmd->dma_use_sg = pci_map_sg(ioa_cfg->pdev,
- scsi_cmd->request_buffer,
- scsi_cmd->use_sg,
- scsi_cmd->sc_data_direction);
-
- if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) {
- ioadl_flags = IPR_IOADL_FLAGS_WRITE;
- ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
- ioarcb->write_data_transfer_length = cpu_to_be32(length);
- ioarcb->write_ioadl_len =
- cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
- } else if (scsi_cmd->sc_data_direction == DMA_FROM_DEVICE) {
- ioadl_flags = IPR_IOADL_FLAGS_READ;
- ioarcb->read_data_transfer_length = cpu_to_be32(length);
- ioarcb->read_ioadl_len =
- cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
- }
-
- sglist = scsi_cmd->request_buffer;
+ nseg = scsi_dma_map(scsi_cmd);
+ if (nseg < 0) {
+ dev_err(&ioa_cfg->pdev->dev, "pci_map_sg failed!\n");
+ return -1;
+ }
- if (ipr_cmd->dma_use_sg <= ARRAY_SIZE(ioarcb->add_data.u.ioadl)) {
- ioadl = ioarcb->add_data.u.ioadl;
- ioarcb->write_ioadl_addr =
- cpu_to_be32(be32_to_cpu(ioarcb->ioarcb_host_pci_addr) +
- offsetof(struct ipr_ioarcb, add_data));
- ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
- }
+ ipr_cmd->dma_use_sg = nseg;
- for (i = 0; i < ipr_cmd->dma_use_sg; i++) {
- ioadl[i].flags_and_data_len =
- cpu_to_be32(ioadl_flags | sg_dma_len(&sglist[i]));
- ioadl[i].address =
- cpu_to_be32(sg_dma_address(&sglist[i]));
- }
+ if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) {
+ ioadl_flags = IPR_IOADL_FLAGS_WRITE;
+ ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
+ ioarcb->write_data_transfer_length = cpu_to_be32(length);
+ ioarcb->write_ioadl_len =
+ cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
+ } else if (scsi_cmd->sc_data_direction == DMA_FROM_DEVICE) {
+ ioadl_flags = IPR_IOADL_FLAGS_READ;
+ ioarcb->read_data_transfer_length = cpu_to_be32(length);
+ ioarcb->read_ioadl_len =
+ cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
+ }
- if (likely(ipr_cmd->dma_use_sg)) {
- ioadl[i-1].flags_and_data_len |=
- cpu_to_be32(IPR_IOADL_FLAGS_LAST);
- return 0;
- } else
- dev_err(&ioa_cfg->pdev->dev, "pci_map_sg failed!\n");
- } else {
- if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) {
- ioadl_flags = IPR_IOADL_FLAGS_WRITE;
- ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
- ioarcb->write_data_transfer_length = cpu_to_be32(length);
- ioarcb->write_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
- } else if (scsi_cmd->sc_data_direction == DMA_FROM_DEVICE) {
- ioadl_flags = IPR_IOADL_FLAGS_READ;
- ioarcb->read_data_transfer_length = cpu_to_be32(length);
- ioarcb->read_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
- }
+ if (ipr_cmd->dma_use_sg <= ARRAY_SIZE(ioarcb->add_data.u.ioadl)) {
+ ioadl = ioarcb->add_data.u.ioadl;
+ ioarcb->write_ioadl_addr =
+ cpu_to_be32(be32_to_cpu(ioarcb->ioarcb_host_pci_addr) +
+ offsetof(struct ipr_ioarcb, add_data));
+ ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
+ }
- ipr_cmd->dma_handle = pci_map_single(ioa_cfg->pdev,
- scsi_cmd->request_buffer, length,
- scsi_cmd->sc_data_direction);
-
- if (likely(!pci_dma_mapping_error(ipr_cmd->dma_handle))) {
- ioadl = ioarcb->add_data.u.ioadl;
- ioarcb->write_ioadl_addr =
- cpu_to_be32(be32_to_cpu(ioarcb->ioarcb_host_pci_addr) +
- offsetof(struct ipr_ioarcb, add_data));
- ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
- ipr_cmd->dma_use_sg = 1;
- ioadl[0].flags_and_data_len =
- cpu_to_be32(ioadl_flags | length | IPR_IOADL_FLAGS_LAST);
- ioadl[0].address = cpu_to_be32(ipr_cmd->dma_handle);
- return 0;
- } else
- dev_err(&ioa_cfg->pdev->dev, "pci_map_single failed!\n");
+ scsi_for_each_sg(scsi_cmd, sg, ipr_cmd->dma_use_sg, i) {
+ ioadl[i].flags_and_data_len =
+ cpu_to_be32(ioadl_flags | sg_dma_len(sg));
+ ioadl[i].address = cpu_to_be32(sg_dma_address(sg));
}
- return -1;
+ ioadl[i-1].flags_and_data_len |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
+ return 0;
}
/**
@@ -4447,7 +4383,7 @@ static void ipr_erp_done(struct ipr_cmnd *ipr_cmd)
res->needs_sync_complete = 1;
res->in_erp = 0;
}
- ipr_unmap_sglist(ioa_cfg, ipr_cmd);
+ scsi_dma_unmap(ipr_cmd->scsi_cmd);
list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
scsi_cmd->scsi_done(scsi_cmd);
}
@@ -4825,7 +4761,7 @@ static void ipr_erp_start(struct ipr_ioa_cfg *ioa_cfg,
break;
}
- ipr_unmap_sglist(ioa_cfg, ipr_cmd);
+ scsi_dma_unmap(ipr_cmd->scsi_cmd);
list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
scsi_cmd->scsi_done(scsi_cmd);
}
@@ -4846,10 +4782,10 @@ static void ipr_scsi_done(struct ipr_cmnd *ipr_cmd)
struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
- scsi_cmd->resid = be32_to_cpu(ipr_cmd->ioasa.residual_data_len);
+ scsi_set_resid(scsi_cmd, be32_to_cpu(ipr_cmd->ioasa.residual_data_len));
if (likely(IPR_IOASC_SENSE_KEY(ioasc) == 0)) {
- ipr_unmap_sglist(ioa_cfg, ipr_cmd);
+ scsi_dma_unmap(ipr_cmd->scsi_cmd);
list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
scsi_cmd->scsi_done(scsi_cmd);
} else
diff --git a/drivers/scsi/ips.c b/drivers/scsi/ips.c
index 40f148e0833..9f8ed6b8157 100644
--- a/drivers/scsi/ips.c
+++ b/drivers/scsi/ips.c
@@ -211,19 +211,6 @@ module_param(ips, charp, 0);
#warning "This driver has only been tested on the x86/ia64/x86_64 platforms"
#endif
-#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,5,0)
-#include <linux/blk.h>
-#include "sd.h"
-#define IPS_LOCK_SAVE(lock,flags) spin_lock_irqsave(&io_request_lock,flags)
-#define IPS_UNLOCK_RESTORE(lock,flags) spin_unlock_irqrestore(&io_request_lock,flags)
-#ifndef __devexit_p
-#define __devexit_p(x) x
-#endif
-#else
-#define IPS_LOCK_SAVE(lock,flags) do{spin_lock(lock);(void)flags;}while(0)
-#define IPS_UNLOCK_RESTORE(lock,flags) do{spin_unlock(lock);(void)flags;}while(0)
-#endif
-
#define IPS_DMA_DIR(scb) ((!scb->scsi_cmd || ips_is_passthru(scb->scsi_cmd) || \
DMA_NONE == scb->scsi_cmd->sc_data_direction) ? \
PCI_DMA_BIDIRECTIONAL : \
@@ -381,24 +368,13 @@ static struct scsi_host_template ips_driver_template = {
.eh_abort_handler = ips_eh_abort,
.eh_host_reset_handler = ips_eh_reset,
.proc_name = "ips",
-#if LINUX_VERSION_CODE > KERNEL_VERSION(2,5,0)
.proc_info = ips_proc_info,
.slave_configure = ips_slave_configure,
-#else
- .proc_info = ips_proc24_info,
- .select_queue_depths = ips_select_queue_depth,
-#endif
.bios_param = ips_biosparam,
.this_id = -1,
.sg_tablesize = IPS_MAX_SG,
.cmd_per_lun = 3,
.use_clustering = ENABLE_CLUSTERING,
-#if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0)
- .use_new_eh_code = 1,
-#endif
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,4,20) && LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0)
- .highmem_io = 1,
-#endif
};
@@ -731,7 +707,7 @@ ips_release(struct Scsi_Host *sh)
/* free IRQ */
free_irq(ha->irq, ha);
- IPS_REMOVE_HOST(sh);
+ scsi_remove_host(sh);
scsi_host_put(sh);
ips_released_controllers++;
@@ -813,7 +789,6 @@ int ips_eh_abort(struct scsi_cmnd *SC)
ips_ha_t *ha;
ips_copp_wait_item_t *item;
int ret;
- unsigned long cpu_flags;
struct Scsi_Host *host;
METHOD_TRACE("ips_eh_abort", 1);
@@ -830,7 +805,7 @@ int ips_eh_abort(struct scsi_cmnd *SC)
if (!ha->active)
return (FAILED);
- IPS_LOCK_SAVE(host->host_lock, cpu_flags);
+ spin_lock(host->host_lock);
/* See if the command is on the copp queue */
item = ha->copp_waitlist.head;
@@ -851,7 +826,7 @@ int ips_eh_abort(struct scsi_cmnd *SC)
ret = (FAILED);
}
- IPS_UNLOCK_RESTORE(host->host_lock, cpu_flags);
+ spin_unlock(host->host_lock);
return ret;
}
@@ -1129,7 +1104,7 @@ static int ips_queue(struct scsi_cmnd *SC, void (*done) (struct scsi_cmnd *))
/* A Reset IOCTL is only sent by the boot CD in extreme cases. */
/* There can never be any system activity ( network or disk ), but check */
/* anyway just as a good practice. */
- pt = (ips_passthru_t *) SC->request_buffer;
+ pt = (ips_passthru_t *) scsi_sglist(SC);
if ((pt->CoppCP.cmd.reset.op_code == IPS_CMD_RESET_CHANNEL) &&
(pt->CoppCP.cmd.reset.adapter_flag == 1)) {
if (ha->scb_activelist.count != 0) {
@@ -1176,18 +1151,10 @@ static int ips_queue(struct scsi_cmnd *SC, void (*done) (struct scsi_cmnd *))
/* Set bios geometry for the controller */
/* */
/****************************************************************************/
-static int
-#if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0)
-ips_biosparam(Disk * disk, kdev_t dev, int geom[])
-{
- ips_ha_t *ha = (ips_ha_t *) disk->device->host->hostdata;
- unsigned long capacity = disk->capacity;
-#else
-ips_biosparam(struct scsi_device *sdev, struct block_device *bdev,
- sector_t capacity, int geom[])
+static int ips_biosparam(struct scsi_device *sdev, struct block_device *bdev,
+ sector_t capacity, int geom[])
{
ips_ha_t *ha = (ips_ha_t *) sdev->host->hostdata;
-#endif
int heads;
int sectors;
int cylinders;
@@ -1225,70 +1192,6 @@ ips_biosparam(struct scsi_device *sdev, struct block_device *bdev,
return (0);
}
-#if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0)
-
-/* ips_proc24_info is a wrapper around ips_proc_info *
- * for compatibility with the 2.4 scsi parameters */
-static int
-ips_proc24_info(char *buffer, char **start, off_t offset, int length,
- int hostno, int func)
-{
- int i;
-
- for (i = 0; i < ips_next_controller; i++) {
- if (ips_sh[i] && ips_sh[i]->host_no == hostno) {
- return ips_proc_info(ips_sh[i], buffer, start,
- offset, length, func);
- }
- }
- return -EINVAL;
-}
-
-/****************************************************************************/
-/* */
-/* Routine Name: ips_select_queue_depth */
-/* */
-/* Routine Description: */
-/* */
-/* Select queue depths for the devices on the contoller */
-/* */
-/****************************************************************************/
-static void
-ips_select_queue_depth(struct Scsi_Host *host, struct scsi_device * scsi_devs)
-{
- struct scsi_device *device;
- ips_ha_t *ha;
- int count = 0;
- int min;
-
- ha = IPS_HA(host);
- min = ha->max_cmds / 4;
-
- for (device = scsi_devs; device; device = device->next) {
- if (device->host == host) {
- if ((device->channel == 0) && (device->type == 0))
- count++;
- }
- }
-
- for (device = scsi_devs; device; device = device->next) {
- if (device->host == host) {
- if ((device->channel == 0) && (device->type == 0)) {
- device->queue_depth =
- (ha->max_cmds - 1) / count;
- if (device->queue_depth < min)
- device->queue_depth = min;
- } else {
- device->queue_depth = 2;
- }
-
- if (device->queue_depth < 2)
- device->queue_depth = 2;
- }
- }
-}
-
-#else
/****************************************************************************/
/* */
/* Routine Name: ips_slave_configure */
@@ -1316,7 +1219,6 @@ ips_slave_configure(struct scsi_device * SDptr)
SDptr->skip_ms_page_3f = 1;
return 0;
}
-#endif
/****************************************************************************/
/* */
@@ -1331,7 +1233,6 @@ static irqreturn_t
do_ipsintr(int irq, void *dev_id)
{
ips_ha_t *ha;
- unsigned long cpu_flags;
struct Scsi_Host *host;
int irqstatus;
@@ -1347,16 +1248,16 @@ do_ipsintr(int irq, void *dev_id)
return IRQ_HANDLED;
}
- IPS_LOCK_SAVE(host->host_lock, cpu_flags);
+ spin_lock(host->host_lock);
if (!ha->active) {
- IPS_UNLOCK_RESTORE(host->host_lock, cpu_flags);
+ spin_unlock(host->host_lock);
return IRQ_HANDLED;
}
irqstatus = (*ha->func.intr) (ha);
- IPS_UNLOCK_RESTORE(host->host_lock, cpu_flags);
+ spin_unlock(host->host_lock);
/* start the next command */
ips_next(ha, IPS_INTR_ON);
@@ -1606,30 +1507,22 @@ static int ips_is_passthru(struct scsi_cmnd *SC)
if ((SC->cmnd[0] == IPS_IOCTL_COMMAND) &&
(SC->device->channel == 0) &&
(SC->device->id == IPS_ADAPTER_ID) &&
- (SC->device->lun == 0) && SC->request_buffer) {
- if ((!SC->use_sg) && SC->request_bufflen &&
- (((char *) SC->request_buffer)[0] == 'C') &&
- (((char *) SC->request_buffer)[1] == 'O') &&
- (((char *) SC->request_buffer)[2] == 'P') &&
- (((char *) SC->request_buffer)[3] == 'P'))
- return 1;
- else if (SC->use_sg) {
- struct scatterlist *sg = SC->request_buffer;
- char *buffer;
-
- /* kmap_atomic() ensures addressability of the user buffer.*/
- /* local_irq_save() protects the KM_IRQ0 address slot. */
- local_irq_save(flags);
- buffer = kmap_atomic(sg->page, KM_IRQ0) + sg->offset;
- if (buffer && buffer[0] == 'C' && buffer[1] == 'O' &&
- buffer[2] == 'P' && buffer[3] == 'P') {
- kunmap_atomic(buffer - sg->offset, KM_IRQ0);
- local_irq_restore(flags);
- return 1;
- }
- kunmap_atomic(buffer - sg->offset, KM_IRQ0);
- local_irq_restore(flags);
- }
+ (SC->device->lun == 0) && scsi_sglist(SC)) {
+ struct scatterlist *sg = scsi_sglist(SC);
+ char *buffer;
+
+ /* kmap_atomic() ensures addressability of the user buffer.*/
+ /* local_irq_save() protects the KM_IRQ0 address slot. */
+ local_irq_save(flags);
+ buffer = kmap_atomic(sg->page, KM_IRQ0) + sg->offset;
+ if (buffer && buffer[0] == 'C' && buffer[1] == 'O' &&
+ buffer[2] == 'P' && buffer[3] == 'P') {
+ kunmap_atomic(buffer - sg->offset, KM_IRQ0);
+ local_irq_restore(flags);
+ return 1;
+ }
+ kunmap_atomic(buffer - sg->offset, KM_IRQ0);
+ local_irq_restore(flags);
}
return 0;
}
@@ -1680,18 +1573,14 @@ ips_make_passthru(ips_ha_t *ha, struct scsi_cmnd *SC, ips_scb_t *scb, int intr)
{
ips_passthru_t *pt;
int length = 0;
- int ret;
+ int i, ret;
+ struct scatterlist *sg = scsi_sglist(SC);
METHOD_TRACE("ips_make_passthru", 1);
- if (!SC->use_sg) {
- length = SC->request_bufflen;
- } else {
- struct scatterlist *sg = SC->request_buffer;
- int i;
- for (i = 0; i < SC->use_sg; i++)
- length += sg[i].length;
- }
+ scsi_for_each_sg(SC, sg, scsi_sg_count(SC), i)
+ length += sg[i].length;
+
if (length < sizeof (ips_passthru_t)) {
/* wrong size */
DEBUG_VAR(1, "(%s%d) Passthru structure wrong size",
@@ -2115,7 +2004,7 @@ ips_cleanup_passthru(ips_ha_t * ha, ips_scb_t * scb)
METHOD_TRACE("ips_cleanup_passthru", 1);
- if ((!scb) || (!scb->scsi_cmd) || (!scb->scsi_cmd->request_buffer)) {
+ if ((!scb) || (!scb->scsi_cmd) || (!scsi_sglist(scb->scsi_cmd))) {
DEBUG_VAR(1, "(%s%d) couldn't cleanup after passthru",
ips_name, ha->host_num);
@@ -2730,7 +2619,6 @@ ips_next(ips_ha_t * ha, int intr)
struct scsi_cmnd *q;
ips_copp_wait_item_t *item;
int ret;
- unsigned long cpu_flags = 0;
struct Scsi_Host *host;
METHOD_TRACE("ips_next", 1);
@@ -2742,7 +2630,7 @@ ips_next(ips_ha_t * ha, int intr)
* this command won't time out
*/
if (intr == IPS_INTR_ON)
- IPS_LOCK_SAVE(host->host_lock, cpu_flags);
+ spin_lock(host->host_lock);
if ((ha->subsys->param[3] & 0x300000)
&& (ha->scb_activelist.count == 0)) {
@@ -2769,14 +2657,14 @@ ips_next(ips_ha_t * ha, int intr)
item = ips_removeq_copp_head(&ha->copp_waitlist);
ha->num_ioctl++;
if (intr == IPS_INTR_ON)
- IPS_UNLOCK_RESTORE(host->host_lock, cpu_flags);
+ spin_unlock(host->host_lock);
scb->scsi_cmd = item->scsi_cmd;
kfree(item);
ret = ips_make_passthru(ha, scb->scsi_cmd, scb, intr);
if (intr == IPS_INTR_ON)
- IPS_LOCK_SAVE(host->host_lock, cpu_flags);
+ spin_lock(host->host_lock);
switch (ret) {
case IPS_FAILURE:
if (scb->scsi_cmd) {
@@ -2846,7 +2734,7 @@ ips_next(ips_ha_t * ha, int intr)
SC = ips_removeq_wait(&ha->scb_waitlist, q);
if (intr == IPS_INTR_ON)
- IPS_UNLOCK_RESTORE(host->host_lock, cpu_flags); /* Unlock HA after command is taken off queue */
+ spin_unlock(host->host_lock); /* Unlock HA after command is taken off queue */
SC->result = DID_OK;
SC->host_scribble = NULL;
@@ -2866,41 +2754,26 @@ ips_next(ips_ha_t * ha, int intr)
/* copy in the CDB */
memcpy(scb->cdb, SC->cmnd, SC->cmd_len);
- /* Now handle the data buffer */
- if (SC->use_sg) {
+ scb->sg_count = scsi_dma_map(SC);
+ BUG_ON(scb->sg_count < 0);
+ if (scb->sg_count) {
struct scatterlist *sg;
int i;
- sg = SC->request_buffer;
- scb->sg_count = pci_map_sg(ha->pcidev, sg, SC->use_sg,
- SC->sc_data_direction);
scb->flags |= IPS_SCB_MAP_SG;
- for (i = 0; i < scb->sg_count; i++) {
+
+ scsi_for_each_sg(SC, sg, scb->sg_count, i) {
if (ips_fill_scb_sg_single
- (ha, sg_dma_address(&sg[i]), scb, i,
- sg_dma_len(&sg[i])) < 0)
+ (ha, sg_dma_address(sg), scb, i,
+ sg_dma_len(sg)) < 0)
break;
}
scb->dcdb.transfer_length = scb->data_len;
} else {
- if (SC->request_bufflen) {
- scb->data_busaddr =
- pci_map_single(ha->pcidev,
- SC->request_buffer,
- SC->request_bufflen,
- SC->sc_data_direction);
- scb->flags |= IPS_SCB_MAP_SINGLE;
- ips_fill_scb_sg_single(ha, scb->data_busaddr,
- scb, 0,
- SC->request_bufflen);
- scb->dcdb.transfer_length = scb->data_len;
- } else {
- scb->data_busaddr = 0L;
- scb->sg_len = 0;
- scb->data_len = 0;
- scb->dcdb.transfer_length = 0;
- }
-
+ scb->data_busaddr = 0L;
+ scb->sg_len = 0;
+ scb->data_len = 0;
+ scb->dcdb.transfer_length = 0;
}
scb->dcdb.cmd_attribute =
@@ -2919,7 +2792,7 @@ ips_next(ips_ha_t * ha, int intr)
scb->dcdb.transfer_length = 0;
}
if (intr == IPS_INTR_ON)
- IPS_LOCK_SAVE(host->host_lock, cpu_flags);
+ spin_lock(host->host_lock);
ret = ips_send_cmd(ha, scb);
@@ -2958,7 +2831,7 @@ ips_next(ips_ha_t * ha, int intr)
} /* end while */
if (intr == IPS_INTR_ON)
- IPS_UNLOCK_RESTORE(host->host_lock, cpu_flags);
+ spin_unlock(host->host_lock);
}
/****************************************************************************/
@@ -3377,52 +3250,32 @@ ips_done(ips_ha_t * ha, ips_scb_t * scb)
* the rest of the data and continue.
*/
if ((scb->breakup) || (scb->sg_break)) {
+ struct scatterlist *sg;
+ int sg_dma_index, ips_sg_index = 0;
+
/* we had a data breakup */
scb->data_len = 0;
- if (scb->sg_count) {
- /* S/G request */
- struct scatterlist *sg;
- int ips_sg_index = 0;
- int sg_dma_index;
-
- sg = scb->scsi_cmd->request_buffer;
-
- /* Spin forward to last dma chunk */
- sg_dma_index = scb->breakup;
-
- /* Take care of possible partial on last chunk */
- ips_fill_scb_sg_single(ha,
- sg_dma_address(&sg
- [sg_dma_index]),
- scb, ips_sg_index++,
- sg_dma_len(&sg
- [sg_dma_index]));
-
- for (; sg_dma_index < scb->sg_count;
- sg_dma_index++) {
- if (ips_fill_scb_sg_single
- (ha,
- sg_dma_address(&sg[sg_dma_index]),
- scb, ips_sg_index++,
- sg_dma_len(&sg[sg_dma_index])) < 0)
- break;
+ sg = scsi_sglist(scb->scsi_cmd);
- }
+ /* Spin forward to last dma chunk */
+ sg_dma_index = scb->breakup;
- } else {
- /* Non S/G Request */
- (void) ips_fill_scb_sg_single(ha,
- scb->
- data_busaddr +
- (scb->sg_break *
- ha->max_xfer),
- scb, 0,
- scb->scsi_cmd->
- request_bufflen -
- (scb->sg_break *
- ha->max_xfer));
- }
+ /* Take care of possible partial on last chunk */
+ ips_fill_scb_sg_single(ha,
+ sg_dma_address(&sg[sg_dma_index]),
+ scb, ips_sg_index++,
+ sg_dma_len(&sg[sg_dma_index]));
+
+ for (; sg_dma_index < scsi_sg_count(scb->scsi_cmd);
+ sg_dma_index++) {
+ if (ips_fill_scb_sg_single
+ (ha,
+ sg_dma_address(&sg[sg_dma_index]),
+ scb, ips_sg_index++,
+ sg_dma_len(&sg[sg_dma_index])) < 0)
+ break;
+ }
scb->dcdb.transfer_length = scb->data_len;
scb->dcdb.cmd_attribute |=
@@ -3653,32 +3506,27 @@ ips_send_wait(ips_ha_t * ha, ips_scb_t * scb, int timeout, int intr)
static void
ips_scmd_buf_write(struct scsi_cmnd *scmd, void *data, unsigned int count)
{
- if (scmd->use_sg) {
- int i;
- unsigned int min_cnt, xfer_cnt;
- char *cdata = (char *) data;
- unsigned char *buffer;
- unsigned long flags;
- struct scatterlist *sg = scmd->request_buffer;
- for (i = 0, xfer_cnt = 0;
- (i < scmd->use_sg) && (xfer_cnt < count); i++) {
- min_cnt = min(count - xfer_cnt, sg[i].length);
-
- /* kmap_atomic() ensures addressability of the data buffer.*/
- /* local_irq_save() protects the KM_IRQ0 address slot. */
- local_irq_save(flags);
- buffer = kmap_atomic(sg[i].page, KM_IRQ0) + sg[i].offset;
- memcpy(buffer, &cdata[xfer_cnt], min_cnt);
- kunmap_atomic(buffer - sg[i].offset, KM_IRQ0);
- local_irq_restore(flags);
-
- xfer_cnt += min_cnt;
- }
-
- } else {
- unsigned int min_cnt = min(count, scmd->request_bufflen);
- memcpy(scmd->request_buffer, data, min_cnt);
- }
+ int i;
+ unsigned int min_cnt, xfer_cnt;
+ char *cdata = (char *) data;
+ unsigned char *buffer;
+ unsigned long flags;
+ struct scatterlist *sg = scsi_sglist(scmd);
+
+ for (i = 0, xfer_cnt = 0;
+ (i < scsi_sg_count(scmd)) && (xfer_cnt < count); i++) {
+ min_cnt = min(count - xfer_cnt, sg[i].length);
+
+ /* kmap_atomic() ensures addressability of the data buffer.*/
+ /* local_irq_save() protects the KM_IRQ0 address slot. */
+ local_irq_save(flags);
+ buffer = kmap_atomic(sg[i].page, KM_IRQ0) + sg[i].offset;
+ memcpy(buffer, &cdata[xfer_cnt], min_cnt);
+ kunmap_atomic(buffer - sg[i].offset, KM_IRQ0);
+ local_irq_restore(flags);
+
+ xfer_cnt += min_cnt;
+ }
}
/****************************************************************************/
@@ -3691,32 +3539,27 @@ ips_scmd_buf_write(struct scsi_cmnd *scmd, void *data, unsigned int count)
static void
ips_scmd_buf_read(struct scsi_cmnd *scmd, void *data, unsigned int count)
{
- if (scmd->use_sg) {
- int i;
- unsigned int min_cnt, xfer_cnt;
- char *cdata = (char *) data;
- unsigned char *buffer;
- unsigned long flags;
- struct scatterlist *sg = scmd->request_buffer;
- for (i = 0, xfer_cnt = 0;
- (i < scmd->use_sg) && (xfer_cnt < count); i++) {
- min_cnt = min(count - xfer_cnt, sg[i].length);
-
- /* kmap_atomic() ensures addressability of the data buffer.*/
- /* local_irq_save() protects the KM_IRQ0 address slot. */
- local_irq_save(flags);
- buffer = kmap_atomic(sg[i].page, KM_IRQ0) + sg[i].offset;
- memcpy(&cdata[xfer_cnt], buffer, min_cnt);
- kunmap_atomic(buffer - sg[i].offset, KM_IRQ0);
- local_irq_restore(flags);
-
- xfer_cnt += min_cnt;
- }
-
- } else {
- unsigned int min_cnt = min(count, scmd->request_bufflen);
- memcpy(data, scmd->request_buffer, min_cnt);
- }
+ int i;
+ unsigned int min_cnt, xfer_cnt;
+ char *cdata = (char *) data;
+ unsigned char *buffer;
+ unsigned long flags;
+ struct scatterlist *sg = scsi_sglist(scmd);
+
+ for (i = 0, xfer_cnt = 0;
+ (i < scsi_sg_count(scmd)) && (xfer_cnt < count); i++) {
+ min_cnt = min(count - xfer_cnt, sg[i].length);
+
+ /* kmap_atomic() ensures addressability of the data buffer.*/
+ /* local_irq_save() protects the KM_IRQ0 address slot. */
+ local_irq_save(flags);
+ buffer = kmap_atomic(sg[i].page, KM_IRQ0) + sg[i].offset;
+ memcpy(&cdata[xfer_cnt], buffer, min_cnt);
+ kunmap_atomic(buffer - sg[i].offset, KM_IRQ0);
+ local_irq_restore(flags);
+
+ xfer_cnt += min_cnt;
+ }
}
/****************************************************************************/
@@ -4350,7 +4193,7 @@ ips_rdcap(ips_ha_t * ha, ips_scb_t * scb)
METHOD_TRACE("ips_rdcap", 1);
- if (scb->scsi_cmd->request_bufflen < 8)
+ if (scsi_bufflen(scb->scsi_cmd) < 8)
return (0);
cap.lba =
@@ -4735,8 +4578,7 @@ ips_freescb(ips_ha_t * ha, ips_scb_t * scb)
METHOD_TRACE("ips_freescb", 1);
if (scb->flags & IPS_SCB_MAP_SG)
- pci_unmap_sg(ha->pcidev, scb->scsi_cmd->request_buffer,
- scb->scsi_cmd->use_sg, IPS_DMA_DIR(scb));
+ scsi_dma_unmap(scb->scsi_cmd);
else if (scb->flags & IPS_SCB_MAP_SINGLE)
pci_unmap_single(ha->pcidev, scb->data_busaddr, scb->data_len,
IPS_DMA_DIR(scb));
@@ -7004,7 +6846,6 @@ ips_register_scsi(int index)
kfree(oldha);
ips_sh[index] = sh;
ips_ha[index] = ha;
- IPS_SCSI_SET_DEVICE(sh, ha);
/* Store away needed values for later use */
sh->io_port = ha->io_addr;
@@ -7016,17 +6857,16 @@ ips_register_scsi(int index)
sh->cmd_per_lun = sh->hostt->cmd_per_lun;
sh->unchecked_isa_dma = sh->hostt->unchecked_isa_dma;
sh->use_clustering = sh->hostt->use_clustering;
-
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,4,7)
sh->max_sectors = 128;
-#endif
sh->max_id = ha->ntargets;
sh->max_lun = ha->nlun;
sh->max_channel = ha->nbus - 1;
sh->can_queue = ha->max_cmds - 1;
- IPS_ADD_HOST(sh, NULL);
+ scsi_add_host(sh, NULL);
+ scsi_scan_host(sh);
+
return 0;
}
@@ -7069,7 +6909,7 @@ ips_module_init(void)
return -ENODEV;
ips_driver_template.module = THIS_MODULE;
ips_order_controllers();
- if (IPS_REGISTER_HOSTS(&ips_driver_template)) {
+ if (!ips_detect(&ips_driver_template)) {
pci_unregister_driver(&ips_pci_driver);
return -ENODEV;
}
@@ -7087,7 +6927,6 @@ ips_module_init(void)
static void __exit
ips_module_exit(void)
{
- IPS_UNREGISTER_HOSTS(&ips_driver_template);
pci_unregister_driver(&ips_pci_driver);
unregister_reboot_notifier(&ips_notifier);
}
@@ -7436,15 +7275,9 @@ ips_init_phase2(int index)
return SUCCESS;
}
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,4,9)
MODULE_LICENSE("GPL");
-#endif
-
MODULE_DESCRIPTION("IBM ServeRAID Adapter Driver " IPS_VER_STRING);
-
-#ifdef MODULE_VERSION
MODULE_VERSION(IPS_VER_STRING);
-#endif
/*
diff --git a/drivers/scsi/ips.h b/drivers/scsi/ips.h
index b726dcc424b..24123d537c5 100644
--- a/drivers/scsi/ips.h
+++ b/drivers/scsi/ips.h
@@ -58,10 +58,6 @@
/*
* Some handy macros
*/
- #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,4,20) || defined CONFIG_HIGHIO
- #define IPS_HIGHIO
- #endif
-
#define IPS_HA(x) ((ips_ha_t *) x->hostdata)
#define IPS_COMMAND_ID(ha, scb) (int) (scb - ha->scbs)
#define IPS_IS_TROMBONE(ha) (((ha->device_id == IPS_DEVICEID_COPPERHEAD) && \
@@ -84,38 +80,8 @@
#define IPS_SGLIST_SIZE(ha) (IPS_USE_ENH_SGLIST(ha) ? \
sizeof(IPS_ENH_SG_LIST) : sizeof(IPS_STD_SG_LIST))
- #if LINUX_VERSION_CODE < KERNEL_VERSION(2,4,4)
- #define pci_set_dma_mask(dev,mask) ( mask > 0xffffffff ? 1:0 )
- #define scsi_set_pci_device(sh,dev) (0)
- #endif
-
- #ifndef IRQ_NONE
- typedef void irqreturn_t;
- #define IRQ_NONE
- #define IRQ_HANDLED
- #define IRQ_RETVAL(x)
- #endif
-
- #if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0)
- #define IPS_REGISTER_HOSTS(SHT) scsi_register_module(MODULE_SCSI_HA,SHT)
- #define IPS_UNREGISTER_HOSTS(SHT) scsi_unregister_module(MODULE_SCSI_HA,SHT)
- #define IPS_ADD_HOST(shost,device)
- #define IPS_REMOVE_HOST(shost)
- #define IPS_SCSI_SET_DEVICE(sh,ha) scsi_set_pci_device(sh, (ha)->pcidev)
- #define IPS_PRINTK(level, pcidev, format, arg...) \
- printk(level "%s %s:" format , "ips" , \
- (pcidev)->slot_name , ## arg)
- #define scsi_host_alloc(sh,size) scsi_register(sh,size)
- #define scsi_host_put(sh) scsi_unregister(sh)
- #else
- #define IPS_REGISTER_HOSTS(SHT) (!ips_detect(SHT))
- #define IPS_UNREGISTER_HOSTS(SHT)
- #define IPS_ADD_HOST(shost,device) do { scsi_add_host(shost,device); scsi_scan_host(shost); } while (0)
- #define IPS_REMOVE_HOST(shost) scsi_remove_host(shost)
- #define IPS_SCSI_SET_DEVICE(sh,ha) do { } while (0)
- #define IPS_PRINTK(level, pcidev, format, arg...) \
+ #define IPS_PRINTK(level, pcidev, format, arg...) \
dev_printk(level , &((pcidev)->dev) , format , ## arg)
- #endif
#define MDELAY(n) \
do { \
@@ -134,7 +100,7 @@
#define pci_dma_hi32(a) ((a >> 16) >> 16)
#define pci_dma_lo32(a) (a & 0xffffffff)
- #if (BITS_PER_LONG > 32) || (defined CONFIG_HIGHMEM64G && defined IPS_HIGHIO)
+ #if (BITS_PER_LONG > 32) || defined(CONFIG_HIGHMEM64G)
#define IPS_ENABLE_DMA64 (1)
#else
#define IPS_ENABLE_DMA64 (0)
@@ -451,16 +417,10 @@
/*
* Scsi_Host Template
*/
-#if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0)
- static int ips_proc24_info(char *, char **, off_t, int, int, int);
- static void ips_select_queue_depth(struct Scsi_Host *, struct scsi_device *);
- static int ips_biosparam(Disk *disk, kdev_t dev, int geom[]);
-#else
static int ips_proc_info(struct Scsi_Host *, char *, char **, off_t, int, int);
static int ips_biosparam(struct scsi_device *sdev, struct block_device *bdev,
sector_t capacity, int geom[]);
static int ips_slave_configure(struct scsi_device *SDptr);
-#endif
/*
* Raid Command Formats
diff --git a/drivers/scsi/iscsi_tcp.c b/drivers/scsi/iscsi_tcp.c
index c9a3abf9e7b..aebcd5fcdc5 100644
--- a/drivers/scsi/iscsi_tcp.c
+++ b/drivers/scsi/iscsi_tcp.c
@@ -29,14 +29,15 @@
#include <linux/types.h>
#include <linux/list.h>
#include <linux/inet.h>
+#include <linux/file.h>
#include <linux/blkdev.h>
#include <linux/crypto.h>
#include <linux/delay.h>
#include <linux/kfifo.h>
#include <linux/scatterlist.h>
-#include <linux/mutex.h>
#include <net/tcp.h>
#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_device.h>
#include <scsi/scsi_host.h>
#include <scsi/scsi.h>
#include <scsi/scsi_transport_iscsi.h>
@@ -109,7 +110,7 @@ iscsi_hdr_digest(struct iscsi_conn *conn, struct iscsi_buf *buf,
struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
crypto_hash_digest(&tcp_conn->tx_hash, &buf->sg, buf->sg.length, crc);
- buf->sg.length = tcp_conn->hdr_size;
+ buf->sg.length += sizeof(u32);
}
static inline int
@@ -211,16 +212,14 @@ iscsi_tcp_cleanup_ctask(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
static int
iscsi_data_rsp(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
{
- int rc;
struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data;
struct iscsi_data_rsp *rhdr = (struct iscsi_data_rsp *)tcp_conn->in.hdr;
struct iscsi_session *session = conn->session;
+ struct scsi_cmnd *sc = ctask->sc;
int datasn = be32_to_cpu(rhdr->datasn);
- rc = iscsi_check_assign_cmdsn(session, (struct iscsi_nopin*)rhdr);
- if (rc)
- return rc;
+ iscsi_update_cmdsn(session, (struct iscsi_nopin*)rhdr);
/*
* setup Data-In byte counter (gets decremented..)
*/
@@ -229,31 +228,36 @@ iscsi_data_rsp(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
if (tcp_conn->in.datalen == 0)
return 0;
- if (ctask->datasn != datasn)
+ if (tcp_ctask->exp_datasn != datasn) {
+ debug_tcp("%s: ctask->exp_datasn(%d) != rhdr->datasn(%d)\n",
+ __FUNCTION__, tcp_ctask->exp_datasn, datasn);
return ISCSI_ERR_DATASN;
+ }
- ctask->datasn++;
+ tcp_ctask->exp_datasn++;
tcp_ctask->data_offset = be32_to_cpu(rhdr->offset);
- if (tcp_ctask->data_offset + tcp_conn->in.datalen > ctask->total_length)
+ if (tcp_ctask->data_offset + tcp_conn->in.datalen > scsi_bufflen(sc)) {
+ debug_tcp("%s: data_offset(%d) + data_len(%d) > total_length_in(%d)\n",
+ __FUNCTION__, tcp_ctask->data_offset,
+ tcp_conn->in.datalen, scsi_bufflen(sc));
return ISCSI_ERR_DATA_OFFSET;
+ }
if (rhdr->flags & ISCSI_FLAG_DATA_STATUS) {
- struct scsi_cmnd *sc = ctask->sc;
-
conn->exp_statsn = be32_to_cpu(rhdr->statsn) + 1;
if (rhdr->flags & ISCSI_FLAG_DATA_UNDERFLOW) {
int res_count = be32_to_cpu(rhdr->residual_count);
if (res_count > 0 &&
- res_count <= sc->request_bufflen) {
- sc->resid = res_count;
+ res_count <= scsi_bufflen(sc)) {
+ scsi_set_resid(sc, res_count);
sc->result = (DID_OK << 16) | rhdr->cmd_status;
} else
sc->result = (DID_BAD_TARGET << 16) |
rhdr->cmd_status;
} else if (rhdr->flags & ISCSI_FLAG_DATA_OVERFLOW) {
- sc->resid = be32_to_cpu(rhdr->residual_count);
+ scsi_set_resid(sc, be32_to_cpu(rhdr->residual_count));
sc->result = (DID_OK << 16) | rhdr->cmd_status;
} else
sc->result = (DID_OK << 16) | rhdr->cmd_status;
@@ -281,6 +285,8 @@ iscsi_solicit_data_init(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask,
{
struct iscsi_data *hdr;
struct scsi_cmnd *sc = ctask->sc;
+ int i, sg_count = 0;
+ struct scatterlist *sg;
hdr = &r2t->dtask.hdr;
memset(hdr, 0, sizeof(struct iscsi_data));
@@ -308,39 +314,30 @@ iscsi_solicit_data_init(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask,
iscsi_buf_init_iov(&r2t->headbuf, (char*)hdr,
sizeof(struct iscsi_hdr));
- if (sc->use_sg) {
- int i, sg_count = 0;
- struct scatterlist *sg = sc->request_buffer;
-
- r2t->sg = NULL;
- for (i = 0; i < sc->use_sg; i++, sg += 1) {
- /* FIXME: prefetch ? */
- if (sg_count + sg->length > r2t->data_offset) {
- int page_offset;
+ sg = scsi_sglist(sc);
+ r2t->sg = NULL;
+ for (i = 0; i < scsi_sg_count(sc); i++, sg += 1) {
+ /* FIXME: prefetch ? */
+ if (sg_count + sg->length > r2t->data_offset) {
+ int page_offset;
- /* sg page found! */
+ /* sg page found! */
- /* offset within this page */
- page_offset = r2t->data_offset - sg_count;
+ /* offset within this page */
+ page_offset = r2t->data_offset - sg_count;
- /* fill in this buffer */
- iscsi_buf_init_sg(&r2t->sendbuf, sg);
- r2t->sendbuf.sg.offset += page_offset;
- r2t->sendbuf.sg.length -= page_offset;
+ /* fill in this buffer */
+ iscsi_buf_init_sg(&r2t->sendbuf, sg);
+ r2t->sendbuf.sg.offset += page_offset;
+ r2t->sendbuf.sg.length -= page_offset;
- /* xmit logic will continue with next one */
- r2t->sg = sg + 1;
- break;
- }
- sg_count += sg->length;
+ /* xmit logic will continue with next one */
+ r2t->sg = sg + 1;
+ break;
}
- BUG_ON(r2t->sg == NULL);
- } else {
- iscsi_buf_init_iov(&r2t->sendbuf,
- (char*)sc->request_buffer + r2t->data_offset,
- r2t->data_count);
- r2t->sg = NULL;
+ sg_count += sg->length;
}
+ BUG_ON(r2t->sg == NULL);
}
/**
@@ -365,17 +362,16 @@ iscsi_r2t_rsp(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
return ISCSI_ERR_DATALEN;
}
- if (tcp_ctask->exp_r2tsn && tcp_ctask->exp_r2tsn != r2tsn)
+ if (tcp_ctask->exp_datasn != r2tsn){
+ debug_tcp("%s: ctask->exp_datasn(%d) != rhdr->r2tsn(%d)\n",
+ __FUNCTION__, tcp_ctask->exp_datasn, r2tsn);
return ISCSI_ERR_R2TSN;
-
- rc = iscsi_check_assign_cmdsn(session, (struct iscsi_nopin*)rhdr);
- if (rc)
- return rc;
-
- /* FIXME: use R2TSN to detect missing R2T */
+ }
/* fill-in new R2T associated with the task */
spin_lock(&session->lock);
+ iscsi_update_cmdsn(session, (struct iscsi_nopin*)rhdr);
+
if (!ctask->sc || ctask->mtask ||
session->state != ISCSI_STATE_LOGGED_IN) {
printk(KERN_INFO "iscsi_tcp: dropping R2T itt %d in "
@@ -401,11 +397,11 @@ iscsi_r2t_rsp(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
r2t->data_length, session->max_burst);
r2t->data_offset = be32_to_cpu(rhdr->data_offset);
- if (r2t->data_offset + r2t->data_length > ctask->total_length) {
+ if (r2t->data_offset + r2t->data_length > scsi_bufflen(ctask->sc)) {
spin_unlock(&session->lock);
printk(KERN_ERR "iscsi_tcp: invalid R2T with data len %u at "
"offset %u and total length %d\n", r2t->data_length,
- r2t->data_offset, ctask->total_length);
+ r2t->data_offset, scsi_bufflen(ctask->sc));
return ISCSI_ERR_DATALEN;
}
@@ -414,9 +410,9 @@ iscsi_r2t_rsp(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
iscsi_solicit_data_init(conn, ctask, r2t);
- tcp_ctask->exp_r2tsn = r2tsn + 1;
+ tcp_ctask->exp_datasn = r2tsn + 1;
__kfifo_put(tcp_ctask->r2tqueue, (void*)&r2t, sizeof(void*));
- tcp_ctask->xmstate |= XMSTATE_SOL_HDR;
+ tcp_ctask->xmstate |= XMSTATE_SOL_HDR_INIT;
list_move_tail(&ctask->running, &conn->xmitqueue);
scsi_queue_work(session->host, &conn->xmitwork);
@@ -600,7 +596,7 @@ iscsi_ctask_copy(struct iscsi_tcp_conn *tcp_conn, struct iscsi_cmd_task *ctask,
{
struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data;
int buf_left = buf_size - (tcp_conn->data_copied + offset);
- int size = min(tcp_conn->in.copy, buf_left);
+ unsigned size = min(tcp_conn->in.copy, buf_left);
int rc;
size = min(size, ctask->data_count);
@@ -609,7 +605,7 @@ iscsi_ctask_copy(struct iscsi_tcp_conn *tcp_conn, struct iscsi_cmd_task *ctask,
size, tcp_conn->in.offset, tcp_conn->in.copied);
BUG_ON(size <= 0);
- BUG_ON(tcp_ctask->sent + size > ctask->total_length);
+ BUG_ON(tcp_ctask->sent + size > scsi_bufflen(ctask->sc));
rc = skb_copy_bits(tcp_conn->in.skb, tcp_conn->in.offset,
(char*)buf + (offset + tcp_conn->data_copied), size);
@@ -707,25 +703,8 @@ static int iscsi_scsi_data_in(struct iscsi_conn *conn)
BUG_ON((void*)ctask != sc->SCp.ptr);
- /*
- * copying Data-In into the Scsi_Cmnd
- */
- if (!sc->use_sg) {
- i = ctask->data_count;
- rc = iscsi_ctask_copy(tcp_conn, ctask, sc->request_buffer,
- sc->request_bufflen,
- tcp_ctask->data_offset);
- if (rc == -EAGAIN)
- return rc;
- if (conn->datadgst_en)
- iscsi_recv_digest_update(tcp_conn, sc->request_buffer,
- i);
- rc = 0;
- goto done;
- }
-
offset = tcp_ctask->data_offset;
- sg = sc->request_buffer;
+ sg = scsi_sglist(sc);
if (tcp_ctask->data_offset)
for (i = 0; i < tcp_ctask->sg_count; i++)
@@ -734,7 +713,7 @@ static int iscsi_scsi_data_in(struct iscsi_conn *conn)
if (offset < 0)
offset = 0;
- for (i = tcp_ctask->sg_count; i < sc->use_sg; i++) {
+ for (i = tcp_ctask->sg_count; i < scsi_sg_count(sc); i++) {
char *dest;
dest = kmap_atomic(sg[i].page, KM_SOFTIRQ0);
@@ -779,7 +758,6 @@ static int iscsi_scsi_data_in(struct iscsi_conn *conn)
}
BUG_ON(ctask->data_count);
-done:
/* check for non-exceptional status */
if (tcp_conn->in.hdr->flags & ISCSI_FLAG_DATA_STATUS) {
debug_scsi("done [sc %lx res %d itt 0x%x flags 0x%x]\n",
@@ -895,11 +873,27 @@ more:
}
}
- if (tcp_conn->in_progress == IN_PROGRESS_DDIGEST_RECV) {
+ if (tcp_conn->in_progress == IN_PROGRESS_DDIGEST_RECV &&
+ tcp_conn->in.copy) {
uint32_t recv_digest;
debug_tcp("extra data_recv offset %d copy %d\n",
tcp_conn->in.offset, tcp_conn->in.copy);
+
+ if (!tcp_conn->data_copied) {
+ if (tcp_conn->in.padding) {
+ debug_tcp("padding -> %d\n",
+ tcp_conn->in.padding);
+ memset(pad, 0, tcp_conn->in.padding);
+ sg_init_one(&sg, pad, tcp_conn->in.padding);
+ crypto_hash_update(&tcp_conn->rx_hash,
+ &sg, sg.length);
+ }
+ crypto_hash_final(&tcp_conn->rx_hash,
+ (u8 *) &tcp_conn->in.datadgst);
+ debug_tcp("rx digest 0x%x\n", tcp_conn->in.datadgst);
+ }
+
rc = iscsi_tcp_copy(conn, sizeof(uint32_t));
if (rc) {
if (rc == -EAGAIN)
@@ -924,8 +918,7 @@ more:
}
if (tcp_conn->in_progress == IN_PROGRESS_DATA_RECV &&
- tcp_conn->in.copy) {
-
+ tcp_conn->in.copy) {
debug_tcp("data_recv offset %d copy %d\n",
tcp_conn->in.offset, tcp_conn->in.copy);
@@ -936,24 +929,32 @@ more:
iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED);
return 0;
}
- tcp_conn->in.copy -= tcp_conn->in.padding;
- tcp_conn->in.offset += tcp_conn->in.padding;
- if (conn->datadgst_en) {
- if (tcp_conn->in.padding) {
- debug_tcp("padding -> %d\n",
- tcp_conn->in.padding);
- memset(pad, 0, tcp_conn->in.padding);
- sg_init_one(&sg, pad, tcp_conn->in.padding);
- crypto_hash_update(&tcp_conn->rx_hash,
- &sg, sg.length);
- }
- crypto_hash_final(&tcp_conn->rx_hash,
- (u8 *) &tcp_conn->in.datadgst);
- debug_tcp("rx digest 0x%x\n", tcp_conn->in.datadgst);
+
+ if (tcp_conn->in.padding)
+ tcp_conn->in_progress = IN_PROGRESS_PAD_RECV;
+ else if (conn->datadgst_en)
tcp_conn->in_progress = IN_PROGRESS_DDIGEST_RECV;
- tcp_conn->data_copied = 0;
- } else
+ else
+ tcp_conn->in_progress = IN_PROGRESS_WAIT_HEADER;
+ tcp_conn->data_copied = 0;
+ }
+
+ if (tcp_conn->in_progress == IN_PROGRESS_PAD_RECV &&
+ tcp_conn->in.copy) {
+ int copylen = min(tcp_conn->in.padding - tcp_conn->data_copied,
+ tcp_conn->in.copy);
+
+ tcp_conn->in.copy -= copylen;
+ tcp_conn->in.offset += copylen;
+ tcp_conn->data_copied += copylen;
+
+ if (tcp_conn->data_copied != tcp_conn->in.padding)
+ tcp_conn->in_progress = IN_PROGRESS_PAD_RECV;
+ else if (conn->datadgst_en)
+ tcp_conn->in_progress = IN_PROGRESS_DDIGEST_RECV;
+ else
tcp_conn->in_progress = IN_PROGRESS_WAIT_HEADER;
+ tcp_conn->data_copied = 0;
}
debug_tcp("f, processed %d from out of %d padding %d\n",
@@ -1215,7 +1216,6 @@ iscsi_solicit_data_cont(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask,
struct iscsi_r2t_info *r2t, int left)
{
struct iscsi_data *hdr;
- struct scsi_cmnd *sc = ctask->sc;
int new_offset;
hdr = &r2t->dtask.hdr;
@@ -1245,15 +1245,8 @@ iscsi_solicit_data_cont(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask,
if (iscsi_buf_left(&r2t->sendbuf))
return;
- if (sc->use_sg) {
- iscsi_buf_init_sg(&r2t->sendbuf, r2t->sg);
- r2t->sg += 1;
- } else {
- iscsi_buf_init_iov(&r2t->sendbuf,
- (char*)sc->request_buffer + new_offset,
- r2t->data_count);
- r2t->sg = NULL;
- }
+ iscsi_buf_init_sg(&r2t->sendbuf, r2t->sg);
+ r2t->sg += 1;
}
static void iscsi_set_padding(struct iscsi_tcp_cmd_task *tcp_ctask,
@@ -1277,41 +1270,10 @@ static void iscsi_set_padding(struct iscsi_tcp_cmd_task *tcp_ctask,
static void
iscsi_tcp_cmd_init(struct iscsi_cmd_task *ctask)
{
- struct scsi_cmnd *sc = ctask->sc;
struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data;
BUG_ON(__kfifo_len(tcp_ctask->r2tqueue));
-
- tcp_ctask->sent = 0;
- tcp_ctask->sg_count = 0;
-
- if (sc->sc_data_direction == DMA_TO_DEVICE) {
- tcp_ctask->xmstate = XMSTATE_W_HDR;
- tcp_ctask->exp_r2tsn = 0;
- BUG_ON(ctask->total_length == 0);
-
- if (sc->use_sg) {
- struct scatterlist *sg = sc->request_buffer;
-
- iscsi_buf_init_sg(&tcp_ctask->sendbuf, sg);
- tcp_ctask->sg = sg + 1;
- tcp_ctask->bad_sg = sg + sc->use_sg;
- } else {
- iscsi_buf_init_iov(&tcp_ctask->sendbuf,
- sc->request_buffer,
- sc->request_bufflen);
- tcp_ctask->sg = NULL;
- tcp_ctask->bad_sg = NULL;
- }
- debug_scsi("cmd [itt 0x%x total %d imm_data %d "
- "unsol count %d, unsol offset %d]\n",
- ctask->itt, ctask->total_length, ctask->imm_count,
- ctask->unsol_count, ctask->unsol_offset);
- } else
- tcp_ctask->xmstate = XMSTATE_R_HDR;
-
- iscsi_buf_init_iov(&tcp_ctask->headbuf, (char*)ctask->hdr,
- sizeof(struct iscsi_hdr));
+ tcp_ctask->xmstate = XMSTATE_CMD_HDR_INIT;
}
/**
@@ -1324,9 +1286,11 @@ iscsi_tcp_cmd_init(struct iscsi_cmd_task *ctask)
* call it again later, or recover. '0' return code means successful
* xmit.
*
- * Management xmit state machine consists of two states:
- * IN_PROGRESS_IMM_HEAD - PDU Header xmit in progress
- * IN_PROGRESS_IMM_DATA - PDU Data xmit in progress
+ * Management xmit state machine consists of these states:
+ * XMSTATE_IMM_HDR_INIT - calculate digest of PDU Header
+ * XMSTATE_IMM_HDR - PDU Header xmit in progress
+ * XMSTATE_IMM_DATA - PDU Data xmit in progress
+ * XMSTATE_IDLE - management PDU is done
**/
static int
iscsi_tcp_mtask_xmit(struct iscsi_conn *conn, struct iscsi_mgmt_task *mtask)
@@ -1337,23 +1301,34 @@ iscsi_tcp_mtask_xmit(struct iscsi_conn *conn, struct iscsi_mgmt_task *mtask)
debug_scsi("mtask deq [cid %d state %x itt 0x%x]\n",
conn->id, tcp_mtask->xmstate, mtask->itt);
- if (tcp_mtask->xmstate & XMSTATE_IMM_HDR) {
- tcp_mtask->xmstate &= ~XMSTATE_IMM_HDR;
- if (mtask->data_count)
+ if (tcp_mtask->xmstate & XMSTATE_IMM_HDR_INIT) {
+ iscsi_buf_init_iov(&tcp_mtask->headbuf, (char*)mtask->hdr,
+ sizeof(struct iscsi_hdr));
+
+ if (mtask->data_count) {
tcp_mtask->xmstate |= XMSTATE_IMM_DATA;
+ iscsi_buf_init_iov(&tcp_mtask->sendbuf,
+ (char*)mtask->data,
+ mtask->data_count);
+ }
+
if (conn->c_stage != ISCSI_CONN_INITIAL_STAGE &&
conn->stop_stage != STOP_CONN_RECOVER &&
conn->hdrdgst_en)
iscsi_hdr_digest(conn, &tcp_mtask->headbuf,
(u8*)tcp_mtask->hdrext);
+
+ tcp_mtask->sent = 0;
+ tcp_mtask->xmstate &= ~XMSTATE_IMM_HDR_INIT;
+ tcp_mtask->xmstate |= XMSTATE_IMM_HDR;
+ }
+
+ if (tcp_mtask->xmstate & XMSTATE_IMM_HDR) {
rc = iscsi_sendhdr(conn, &tcp_mtask->headbuf,
mtask->data_count);
- if (rc) {
- tcp_mtask->xmstate |= XMSTATE_IMM_HDR;
- if (mtask->data_count)
- tcp_mtask->xmstate &= ~XMSTATE_IMM_DATA;
+ if (rc)
return rc;
- }
+ tcp_mtask->xmstate &= ~XMSTATE_IMM_HDR;
}
if (tcp_mtask->xmstate & XMSTATE_IMM_DATA) {
@@ -1387,55 +1362,67 @@ iscsi_tcp_mtask_xmit(struct iscsi_conn *conn, struct iscsi_mgmt_task *mtask)
return 0;
}
-static inline int
-iscsi_send_read_hdr(struct iscsi_conn *conn,
- struct iscsi_tcp_cmd_task *tcp_ctask)
+static int
+iscsi_send_cmd_hdr(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
{
- int rc;
+ struct scsi_cmnd *sc = ctask->sc;
+ struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data;
+ int rc = 0;
- tcp_ctask->xmstate &= ~XMSTATE_R_HDR;
- if (conn->hdrdgst_en)
- iscsi_hdr_digest(conn, &tcp_ctask->headbuf,
- (u8*)tcp_ctask->hdrext);
- rc = iscsi_sendhdr(conn, &tcp_ctask->headbuf, 0);
- if (!rc) {
- BUG_ON(tcp_ctask->xmstate != XMSTATE_IDLE);
- return 0; /* wait for Data-In */
- }
- tcp_ctask->xmstate |= XMSTATE_R_HDR;
- return rc;
-}
+ if (tcp_ctask->xmstate & XMSTATE_CMD_HDR_INIT) {
+ tcp_ctask->sent = 0;
+ tcp_ctask->sg_count = 0;
+ tcp_ctask->exp_datasn = 0;
-static inline int
-iscsi_send_write_hdr(struct iscsi_conn *conn,
- struct iscsi_cmd_task *ctask)
-{
- struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data;
- int rc;
+ if (sc->sc_data_direction == DMA_TO_DEVICE) {
+ struct scatterlist *sg = scsi_sglist(sc);
- tcp_ctask->xmstate &= ~XMSTATE_W_HDR;
- if (conn->hdrdgst_en)
- iscsi_hdr_digest(conn, &tcp_ctask->headbuf,
- (u8*)tcp_ctask->hdrext);
- rc = iscsi_sendhdr(conn, &tcp_ctask->headbuf, ctask->imm_count);
- if (rc) {
- tcp_ctask->xmstate |= XMSTATE_W_HDR;
- return rc;
+ iscsi_buf_init_sg(&tcp_ctask->sendbuf, sg);
+ tcp_ctask->sg = sg + 1;
+ tcp_ctask->bad_sg = sg + scsi_sg_count(sc);
+
+ debug_scsi("cmd [itt 0x%x total %d imm_data %d "
+ "unsol count %d, unsol offset %d]\n",
+ ctask->itt, scsi_bufflen(sc),
+ ctask->imm_count, ctask->unsol_count,
+ ctask->unsol_offset);
+ }
+
+ iscsi_buf_init_iov(&tcp_ctask->headbuf, (char*)ctask->hdr,
+ sizeof(struct iscsi_hdr));
+
+ if (conn->hdrdgst_en)
+ iscsi_hdr_digest(conn, &tcp_ctask->headbuf,
+ (u8*)tcp_ctask->hdrext);
+ tcp_ctask->xmstate &= ~XMSTATE_CMD_HDR_INIT;
+ tcp_ctask->xmstate |= XMSTATE_CMD_HDR_XMIT;
}
- if (ctask->imm_count) {
- tcp_ctask->xmstate |= XMSTATE_IMM_DATA;
- iscsi_set_padding(tcp_ctask, ctask->imm_count);
+ if (tcp_ctask->xmstate & XMSTATE_CMD_HDR_XMIT) {
+ rc = iscsi_sendhdr(conn, &tcp_ctask->headbuf, ctask->imm_count);
+ if (rc)
+ return rc;
+ tcp_ctask->xmstate &= ~XMSTATE_CMD_HDR_XMIT;
+
+ if (sc->sc_data_direction != DMA_TO_DEVICE)
+ return 0;
+
+ if (ctask->imm_count) {
+ tcp_ctask->xmstate |= XMSTATE_IMM_DATA;
+ iscsi_set_padding(tcp_ctask, ctask->imm_count);
- if (ctask->conn->datadgst_en) {
- iscsi_data_digest_init(ctask->conn->dd_data, tcp_ctask);
- tcp_ctask->immdigest = 0;
+ if (ctask->conn->datadgst_en) {
+ iscsi_data_digest_init(ctask->conn->dd_data,
+ tcp_ctask);
+ tcp_ctask->immdigest = 0;
+ }
}
- }
- if (ctask->unsol_count)
- tcp_ctask->xmstate |= XMSTATE_UNS_HDR | XMSTATE_UNS_INIT;
- return 0;
+ if (ctask->unsol_count)
+ tcp_ctask->xmstate |=
+ XMSTATE_UNS_HDR | XMSTATE_UNS_INIT;
+ }
+ return rc;
}
static int
@@ -1624,9 +1611,7 @@ static int iscsi_send_sol_pdu(struct iscsi_conn *conn,
struct iscsi_data_task *dtask;
int left, rc;
- if (tcp_ctask->xmstate & XMSTATE_SOL_HDR) {
- tcp_ctask->xmstate &= ~XMSTATE_SOL_HDR;
- tcp_ctask->xmstate |= XMSTATE_SOL_DATA;
+ if (tcp_ctask->xmstate & XMSTATE_SOL_HDR_INIT) {
if (!tcp_ctask->r2t) {
spin_lock_bh(&session->lock);
__kfifo_get(tcp_ctask->r2tqueue, (void*)&tcp_ctask->r2t,
@@ -1640,12 +1625,19 @@ send_hdr:
if (conn->hdrdgst_en)
iscsi_hdr_digest(conn, &r2t->headbuf,
(u8*)dtask->hdrext);
+ tcp_ctask->xmstate &= ~XMSTATE_SOL_HDR_INIT;
+ tcp_ctask->xmstate |= XMSTATE_SOL_HDR;
+ }
+
+ if (tcp_ctask->xmstate & XMSTATE_SOL_HDR) {
+ r2t = tcp_ctask->r2t;
+ dtask = &r2t->dtask;
+
rc = iscsi_sendhdr(conn, &r2t->headbuf, r2t->data_count);
- if (rc) {
- tcp_ctask->xmstate &= ~XMSTATE_SOL_DATA;
- tcp_ctask->xmstate |= XMSTATE_SOL_HDR;
+ if (rc)
return rc;
- }
+ tcp_ctask->xmstate &= ~XMSTATE_SOL_HDR;
+ tcp_ctask->xmstate |= XMSTATE_SOL_DATA;
if (conn->datadgst_en) {
iscsi_data_digest_init(conn->dd_data, tcp_ctask);
@@ -1677,8 +1669,6 @@ send_hdr:
left = r2t->data_length - r2t->sent;
if (left) {
iscsi_solicit_data_cont(conn, ctask, r2t, left);
- tcp_ctask->xmstate |= XMSTATE_SOL_DATA;
- tcp_ctask->xmstate &= ~XMSTATE_SOL_HDR;
goto send_hdr;
}
@@ -1693,8 +1683,6 @@ send_hdr:
if (__kfifo_get(tcp_ctask->r2tqueue, (void*)&r2t,
sizeof(void*))) {
tcp_ctask->r2t = r2t;
- tcp_ctask->xmstate |= XMSTATE_SOL_DATA;
- tcp_ctask->xmstate &= ~XMSTATE_SOL_HDR;
spin_unlock_bh(&session->lock);
goto send_hdr;
}
@@ -1703,6 +1691,46 @@ send_hdr:
return 0;
}
+/**
+ * iscsi_tcp_ctask_xmit - xmit normal PDU task
+ * @conn: iscsi connection
+ * @ctask: iscsi command task
+ *
+ * Notes:
+ * The function can return -EAGAIN in which case caller must
+ * call it again later, or recover. '0' return code means successful
+ * xmit.
+ * The function is devided to logical helpers (above) for the different
+ * xmit stages.
+ *
+ *iscsi_send_cmd_hdr()
+ * XMSTATE_CMD_HDR_INIT - prepare Header and Data buffers Calculate
+ * Header Digest
+ * XMSTATE_CMD_HDR_XMIT - Transmit header in progress
+ *
+ *iscsi_send_padding
+ * XMSTATE_W_PAD - Prepare and send pading
+ * XMSTATE_W_RESEND_PAD - retry send pading
+ *
+ *iscsi_send_digest
+ * XMSTATE_W_RESEND_DATA_DIGEST - Finalize and send Data Digest
+ * XMSTATE_W_RESEND_DATA_DIGEST - retry sending digest
+ *
+ *iscsi_send_unsol_hdr
+ * XMSTATE_UNS_INIT - prepare un-solicit data header and digest
+ * XMSTATE_UNS_HDR - send un-solicit header
+ *
+ *iscsi_send_unsol_pdu
+ * XMSTATE_UNS_DATA - send un-solicit data in progress
+ *
+ *iscsi_send_sol_pdu
+ * XMSTATE_SOL_HDR_INIT - solicit data header and digest initialize
+ * XMSTATE_SOL_HDR - send solicit header
+ * XMSTATE_SOL_DATA - send solicit data
+ *
+ *iscsi_tcp_ctask_xmit
+ * XMSTATE_IMM_DATA - xmit managment data (??)
+ **/
static int
iscsi_tcp_ctask_xmit(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
{
@@ -1712,20 +1740,11 @@ iscsi_tcp_ctask_xmit(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
debug_scsi("ctask deq [cid %d xmstate %x itt 0x%x]\n",
conn->id, tcp_ctask->xmstate, ctask->itt);
- /*
- * serialize with TMF AbortTask
- */
- if (ctask->mtask)
+ rc = iscsi_send_cmd_hdr(conn, ctask);
+ if (rc)
return rc;
-
- if (tcp_ctask->xmstate & XMSTATE_R_HDR)
- return iscsi_send_read_hdr(conn, tcp_ctask);
-
- if (tcp_ctask->xmstate & XMSTATE_W_HDR) {
- rc = iscsi_send_write_hdr(conn, ctask);
- if (rc)
- return rc;
- }
+ if (ctask->sc->sc_data_direction != DMA_TO_DEVICE)
+ return 0;
if (tcp_ctask->xmstate & XMSTATE_IMM_DATA) {
rc = iscsi_send_data(ctask, &tcp_ctask->sendbuf, &tcp_ctask->sg,
@@ -1810,18 +1829,22 @@ tcp_conn_alloc_fail:
static void
iscsi_tcp_release_conn(struct iscsi_conn *conn)
{
+ struct iscsi_session *session = conn->session;
struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
+ struct socket *sock = tcp_conn->sock;
- if (!tcp_conn->sock)
+ if (!sock)
return;
- sock_hold(tcp_conn->sock->sk);
+ sock_hold(sock->sk);
iscsi_conn_restore_callbacks(tcp_conn);
- sock_put(tcp_conn->sock->sk);
+ sock_put(sock->sk);
- sock_release(tcp_conn->sock);
+ spin_lock_bh(&session->lock);
tcp_conn->sock = NULL;
conn->recv_lock = NULL;
+ spin_unlock_bh(&session->lock);
+ sockfd_put(sock);
}
static void
@@ -1852,6 +1875,46 @@ iscsi_tcp_conn_stop(struct iscsi_cls_conn *cls_conn, int flag)
tcp_conn->hdr_size = sizeof(struct iscsi_hdr);
}
+static int iscsi_tcp_get_addr(struct iscsi_conn *conn, struct socket *sock,
+ char *buf, int *port,
+ int (*getname)(struct socket *, struct sockaddr *,
+ int *addrlen))
+{
+ struct sockaddr_storage *addr;
+ struct sockaddr_in6 *sin6;
+ struct sockaddr_in *sin;
+ int rc = 0, len;
+
+ addr = kmalloc(GFP_KERNEL, sizeof(*addr));
+ if (!addr)
+ return -ENOMEM;
+
+ if (getname(sock, (struct sockaddr *) addr, &len)) {
+ rc = -ENODEV;
+ goto free_addr;
+ }
+
+ switch (addr->ss_family) {
+ case AF_INET:
+ sin = (struct sockaddr_in *)addr;
+ spin_lock_bh(&conn->session->lock);
+ sprintf(buf, NIPQUAD_FMT, NIPQUAD(sin->sin_addr.s_addr));
+ *port = be16_to_cpu(sin->sin_port);
+ spin_unlock_bh(&conn->session->lock);
+ break;
+ case AF_INET6:
+ sin6 = (struct sockaddr_in6 *)addr;
+ spin_lock_bh(&conn->session->lock);
+ sprintf(buf, NIP6_FMT, NIP6(sin6->sin6_addr));
+ *port = be16_to_cpu(sin6->sin6_port);
+ spin_unlock_bh(&conn->session->lock);
+ break;
+ }
+free_addr:
+ kfree(addr);
+ return rc;
+}
+
static int
iscsi_tcp_conn_bind(struct iscsi_cls_session *cls_session,
struct iscsi_cls_conn *cls_conn, uint64_t transport_eph,
@@ -1869,10 +1932,24 @@ iscsi_tcp_conn_bind(struct iscsi_cls_session *cls_session,
printk(KERN_ERR "iscsi_tcp: sockfd_lookup failed %d\n", err);
return -EEXIST;
}
+ /*
+ * copy these values now because if we drop the session
+ * userspace may still want to query the values since we will
+ * be using them for the reconnect
+ */
+ err = iscsi_tcp_get_addr(conn, sock, conn->portal_address,
+ &conn->portal_port, kernel_getpeername);
+ if (err)
+ goto free_socket;
+
+ err = iscsi_tcp_get_addr(conn, sock, conn->local_address,
+ &conn->local_port, kernel_getsockname);
+ if (err)
+ goto free_socket;
err = iscsi_conn_bind(cls_session, cls_conn, is_leading);
if (err)
- return err;
+ goto free_socket;
/* bind iSCSI connection and socket */
tcp_conn->sock = sock;
@@ -1896,25 +1973,19 @@ iscsi_tcp_conn_bind(struct iscsi_cls_session *cls_session,
* set receive state machine into initial state
*/
tcp_conn->in_progress = IN_PROGRESS_WAIT_HEADER;
-
return 0;
+
+free_socket:
+ sockfd_put(sock);
+ return err;
}
/* called with host lock */
static void
-iscsi_tcp_mgmt_init(struct iscsi_conn *conn, struct iscsi_mgmt_task *mtask,
- char *data, uint32_t data_size)
+iscsi_tcp_mgmt_init(struct iscsi_conn *conn, struct iscsi_mgmt_task *mtask)
{
struct iscsi_tcp_mgmt_task *tcp_mtask = mtask->dd_data;
-
- iscsi_buf_init_iov(&tcp_mtask->headbuf, (char*)mtask->hdr,
- sizeof(struct iscsi_hdr));
- tcp_mtask->xmstate = XMSTATE_IMM_HDR;
- tcp_mtask->sent = 0;
-
- if (mtask->data_count)
- iscsi_buf_init_iov(&tcp_mtask->sendbuf, (char*)mtask->data,
- mtask->data_count);
+ tcp_mtask->xmstate = XMSTATE_IMM_HDR_INIT;
}
static int
@@ -2026,41 +2097,18 @@ iscsi_tcp_conn_get_param(struct iscsi_cls_conn *cls_conn,
enum iscsi_param param, char *buf)
{
struct iscsi_conn *conn = cls_conn->dd_data;
- struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
- struct inet_sock *inet;
- struct ipv6_pinfo *np;
- struct sock *sk;
int len;
switch(param) {
case ISCSI_PARAM_CONN_PORT:
- mutex_lock(&conn->xmitmutex);
- if (!tcp_conn->sock) {
- mutex_unlock(&conn->xmitmutex);
- return -EINVAL;
- }
-
- inet = inet_sk(tcp_conn->sock->sk);
- len = sprintf(buf, "%hu\n", be16_to_cpu(inet->dport));
- mutex_unlock(&conn->xmitmutex);
+ spin_lock_bh(&conn->session->lock);
+ len = sprintf(buf, "%hu\n", conn->portal_port);
+ spin_unlock_bh(&conn->session->lock);
break;
case ISCSI_PARAM_CONN_ADDRESS:
- mutex_lock(&conn->xmitmutex);
- if (!tcp_conn->sock) {
- mutex_unlock(&conn->xmitmutex);
- return -EINVAL;
- }
-
- sk = tcp_conn->sock->sk;
- if (sk->sk_family == PF_INET) {
- inet = inet_sk(sk);
- len = sprintf(buf, NIPQUAD_FMT "\n",
- NIPQUAD(inet->daddr));
- } else {
- np = inet6_sk(sk);
- len = sprintf(buf, NIP6_FMT "\n", NIP6(np->daddr));
- }
- mutex_unlock(&conn->xmitmutex);
+ spin_lock_bh(&conn->session->lock);
+ len = sprintf(buf, "%s\n", conn->portal_address);
+ spin_unlock_bh(&conn->session->lock);
break;
default:
return iscsi_conn_get_param(cls_conn, param, buf);
@@ -2069,6 +2117,29 @@ iscsi_tcp_conn_get_param(struct iscsi_cls_conn *cls_conn,
return len;
}
+static int
+iscsi_tcp_host_get_param(struct Scsi_Host *shost, enum iscsi_host_param param,
+ char *buf)
+{
+ struct iscsi_session *session = iscsi_hostdata(shost->hostdata);
+ int len;
+
+ switch (param) {
+ case ISCSI_HOST_PARAM_IPADDRESS:
+ spin_lock_bh(&session->lock);
+ if (!session->leadconn)
+ len = -ENODEV;
+ else
+ len = sprintf(buf, "%s\n",
+ session->leadconn->local_address);
+ spin_unlock_bh(&session->lock);
+ break;
+ default:
+ return iscsi_host_get_param(shost, param, buf);
+ }
+ return len;
+}
+
static void
iscsi_conn_get_stats(struct iscsi_cls_conn *cls_conn, struct iscsi_stats *stats)
{
@@ -2096,6 +2167,7 @@ iscsi_conn_get_stats(struct iscsi_cls_conn *cls_conn, struct iscsi_stats *stats)
static struct iscsi_cls_session *
iscsi_tcp_session_create(struct iscsi_transport *iscsit,
struct scsi_transport_template *scsit,
+ uint16_t cmds_max, uint16_t qdepth,
uint32_t initial_cmdsn, uint32_t *hostno)
{
struct iscsi_cls_session *cls_session;
@@ -2103,7 +2175,7 @@ iscsi_tcp_session_create(struct iscsi_transport *iscsit,
uint32_t hn;
int cmd_i;
- cls_session = iscsi_session_setup(iscsit, scsit,
+ cls_session = iscsi_session_setup(iscsit, scsit, cmds_max, qdepth,
sizeof(struct iscsi_tcp_cmd_task),
sizeof(struct iscsi_tcp_mgmt_task),
initial_cmdsn, &hn);
@@ -2142,17 +2214,24 @@ static void iscsi_tcp_session_destroy(struct iscsi_cls_session *cls_session)
iscsi_session_teardown(cls_session);
}
+static int iscsi_tcp_slave_configure(struct scsi_device *sdev)
+{
+ blk_queue_dma_alignment(sdev->request_queue, 0);
+ return 0;
+}
+
static struct scsi_host_template iscsi_sht = {
.name = "iSCSI Initiator over TCP/IP",
.queuecommand = iscsi_queuecommand,
.change_queue_depth = iscsi_change_queue_depth,
- .can_queue = ISCSI_XMIT_CMDS_MAX - 1,
+ .can_queue = ISCSI_DEF_XMIT_CMDS_MAX - 1,
.sg_tablesize = ISCSI_SG_TABLESIZE,
.max_sectors = 0xFFFF,
.cmd_per_lun = ISCSI_DEF_CMD_PER_LUN,
.eh_abort_handler = iscsi_eh_abort,
.eh_host_reset_handler = iscsi_eh_host_reset,
.use_clustering = DISABLE_CLUSTERING,
+ .slave_configure = iscsi_tcp_slave_configure,
.proc_name = "iscsi_tcp",
.this_id = -1,
};
@@ -2179,8 +2258,12 @@ static struct iscsi_transport iscsi_tcp_transport = {
ISCSI_EXP_STATSN |
ISCSI_PERSISTENT_PORT |
ISCSI_PERSISTENT_ADDRESS |
- ISCSI_TARGET_NAME |
- ISCSI_TPGT,
+ ISCSI_TARGET_NAME | ISCSI_TPGT |
+ ISCSI_USERNAME | ISCSI_PASSWORD |
+ ISCSI_USERNAME_IN | ISCSI_PASSWORD_IN,
+ .host_param_mask = ISCSI_HOST_HWADDRESS | ISCSI_HOST_IPADDRESS |
+ ISCSI_HOST_INITIATOR_NAME |
+ ISCSI_HOST_NETDEV_NAME,
.host_template = &iscsi_sht,
.conndata_size = sizeof(struct iscsi_conn),
.max_conn = 1,
@@ -2197,6 +2280,9 @@ static struct iscsi_transport iscsi_tcp_transport = {
.get_session_param = iscsi_session_get_param,
.start_conn = iscsi_conn_start,
.stop_conn = iscsi_tcp_conn_stop,
+ /* iscsi host params */
+ .get_host_param = iscsi_tcp_host_get_param,
+ .set_host_param = iscsi_host_set_param,
/* IO */
.send_pdu = iscsi_conn_send_pdu,
.get_stats = iscsi_conn_get_stats,
diff --git a/drivers/scsi/iscsi_tcp.h b/drivers/scsi/iscsi_tcp.h
index 32736831790..7eba44df0a7 100644
--- a/drivers/scsi/iscsi_tcp.h
+++ b/drivers/scsi/iscsi_tcp.h
@@ -29,11 +29,12 @@
#define IN_PROGRESS_HEADER_GATHER 0x1
#define IN_PROGRESS_DATA_RECV 0x2
#define IN_PROGRESS_DDIGEST_RECV 0x3
+#define IN_PROGRESS_PAD_RECV 0x4
/* xmit state machine */
#define XMSTATE_IDLE 0x0
-#define XMSTATE_R_HDR 0x1
-#define XMSTATE_W_HDR 0x2
+#define XMSTATE_CMD_HDR_INIT 0x1
+#define XMSTATE_CMD_HDR_XMIT 0x2
#define XMSTATE_IMM_HDR 0x4
#define XMSTATE_IMM_DATA 0x8
#define XMSTATE_UNS_INIT 0x10
@@ -44,6 +45,8 @@
#define XMSTATE_W_PAD 0x200
#define XMSTATE_W_RESEND_PAD 0x400
#define XMSTATE_W_RESEND_DATA_DIGEST 0x800
+#define XMSTATE_IMM_HDR_INIT 0x1000
+#define XMSTATE_SOL_HDR_INIT 0x2000
#define ISCSI_PAD_LEN 4
#define ISCSI_SG_TABLESIZE SG_ALL
@@ -152,7 +155,7 @@ struct iscsi_tcp_cmd_task {
struct scatterlist *sg; /* per-cmd SG list */
struct scatterlist *bad_sg; /* assert statement */
int sg_count; /* SG's to process */
- uint32_t exp_r2tsn;
+ uint32_t exp_datasn; /* expected target's R2TSN/DataSN */
int data_offset;
struct iscsi_r2t_info *r2t; /* in progress R2T */
struct iscsi_queue r2tpool;
diff --git a/drivers/scsi/jazz_esp.c b/drivers/scsi/jazz_esp.c
index 81e497d9eae..5d231015bb2 100644
--- a/drivers/scsi/jazz_esp.c
+++ b/drivers/scsi/jazz_esp.c
@@ -1,6 +1,6 @@
/* jazz_esp.c: ESP front-end for MIPS JAZZ systems.
*
- * Copyright (C) 2007 Thomas Bogendörfer (tsbogend@alpha.frankende)
+ * Copyright (C) 2007 Thomas Bogendörfer (tsbogend@alpha.frankende)
*/
#include <linux/kernel.h>
@@ -143,7 +143,7 @@ static int __devinit esp_jazz_probe(struct platform_device *dev)
goto fail;
host->max_id = 8;
- esp = host_to_esp(host);
+ esp = shost_priv(host);
esp->host = host;
esp->dev = dev;
diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c
index 3f5b9b445b2..4d85ce10019 100644
--- a/drivers/scsi/libiscsi.c
+++ b/drivers/scsi/libiscsi.c
@@ -22,7 +22,6 @@
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*/
#include <linux/types.h>
-#include <linux/mutex.h>
#include <linux/kfifo.h>
#include <linux/delay.h>
#include <asm/unaligned.h>
@@ -46,27 +45,53 @@ class_to_transport_session(struct iscsi_cls_session *cls_session)
}
EXPORT_SYMBOL_GPL(class_to_transport_session);
-#define INVALID_SN_DELTA 0xffff
+/* Serial Number Arithmetic, 32 bits, less than, RFC1982 */
+#define SNA32_CHECK 2147483648UL
-int
-iscsi_check_assign_cmdsn(struct iscsi_session *session, struct iscsi_nopin *hdr)
+static int iscsi_sna_lt(u32 n1, u32 n2)
+{
+ return n1 != n2 && ((n1 < n2 && (n2 - n1 < SNA32_CHECK)) ||
+ (n1 > n2 && (n2 - n1 < SNA32_CHECK)));
+}
+
+/* Serial Number Arithmetic, 32 bits, less than, RFC1982 */
+static int iscsi_sna_lte(u32 n1, u32 n2)
+{
+ return n1 == n2 || ((n1 < n2 && (n2 - n1 < SNA32_CHECK)) ||
+ (n1 > n2 && (n2 - n1 < SNA32_CHECK)));
+}
+
+void
+iscsi_update_cmdsn(struct iscsi_session *session, struct iscsi_nopin *hdr)
{
uint32_t max_cmdsn = be32_to_cpu(hdr->max_cmdsn);
uint32_t exp_cmdsn = be32_to_cpu(hdr->exp_cmdsn);
- if (max_cmdsn < exp_cmdsn -1 &&
- max_cmdsn > exp_cmdsn - INVALID_SN_DELTA)
- return ISCSI_ERR_MAX_CMDSN;
- if (max_cmdsn > session->max_cmdsn ||
- max_cmdsn < session->max_cmdsn - INVALID_SN_DELTA)
- session->max_cmdsn = max_cmdsn;
- if (exp_cmdsn > session->exp_cmdsn ||
- exp_cmdsn < session->exp_cmdsn - INVALID_SN_DELTA)
+ /*
+ * standard specifies this check for when to update expected and
+ * max sequence numbers
+ */
+ if (iscsi_sna_lt(max_cmdsn, exp_cmdsn - 1))
+ return;
+
+ if (exp_cmdsn != session->exp_cmdsn &&
+ !iscsi_sna_lt(exp_cmdsn, session->exp_cmdsn))
session->exp_cmdsn = exp_cmdsn;
- return 0;
+ if (max_cmdsn != session->max_cmdsn &&
+ !iscsi_sna_lt(max_cmdsn, session->max_cmdsn)) {
+ session->max_cmdsn = max_cmdsn;
+ /*
+ * if the window closed with IO queued, then kick the
+ * xmit thread
+ */
+ if (!list_empty(&session->leadconn->xmitqueue) ||
+ __kfifo_len(session->leadconn->mgmtqueue))
+ scsi_queue_work(session->host,
+ &session->leadconn->xmitwork);
+ }
}
-EXPORT_SYMBOL_GPL(iscsi_check_assign_cmdsn);
+EXPORT_SYMBOL_GPL(iscsi_update_cmdsn);
void iscsi_prep_unsolicit_data_pdu(struct iscsi_cmd_task *ctask,
struct iscsi_data *hdr)
@@ -115,14 +140,17 @@ static void iscsi_prep_scsi_cmd_pdu(struct iscsi_cmd_task *ctask)
hdr->flags = ISCSI_ATTR_SIMPLE;
int_to_scsilun(sc->device->lun, (struct scsi_lun *)hdr->lun);
hdr->itt = build_itt(ctask->itt, conn->id, session->age);
- hdr->data_length = cpu_to_be32(sc->request_bufflen);
+ hdr->data_length = cpu_to_be32(scsi_bufflen(sc));
hdr->cmdsn = cpu_to_be32(session->cmdsn);
session->cmdsn++;
hdr->exp_statsn = cpu_to_be32(conn->exp_statsn);
memcpy(hdr->cdb, sc->cmnd, sc->cmd_len);
- memset(&hdr->cdb[sc->cmd_len], 0, MAX_COMMAND_SIZE - sc->cmd_len);
+ if (sc->cmd_len < MAX_COMMAND_SIZE)
+ memset(&hdr->cdb[sc->cmd_len], 0,
+ MAX_COMMAND_SIZE - sc->cmd_len);
ctask->data_count = 0;
+ ctask->imm_count = 0;
if (sc->sc_data_direction == DMA_TO_DEVICE) {
hdr->flags |= ISCSI_FLAG_CMD_WRITE;
/*
@@ -139,25 +167,24 @@ static void iscsi_prep_scsi_cmd_pdu(struct iscsi_cmd_task *ctask)
*
* pad_count bytes to be sent as zero-padding
*/
- ctask->imm_count = 0;
ctask->unsol_count = 0;
ctask->unsol_offset = 0;
ctask->unsol_datasn = 0;
if (session->imm_data_en) {
- if (ctask->total_length >= session->first_burst)
+ if (scsi_bufflen(sc) >= session->first_burst)
ctask->imm_count = min(session->first_burst,
conn->max_xmit_dlength);
else
- ctask->imm_count = min(ctask->total_length,
+ ctask->imm_count = min(scsi_bufflen(sc),
conn->max_xmit_dlength);
hton24(ctask->hdr->dlength, ctask->imm_count);
} else
zero_data(ctask->hdr->dlength);
if (!session->initial_r2t_en) {
- ctask->unsol_count = min(session->first_burst,
- ctask->total_length) - ctask->imm_count;
+ ctask->unsol_count = min((session->first_burst),
+ (scsi_bufflen(sc))) - ctask->imm_count;
ctask->unsol_offset = ctask->imm_count;
}
@@ -165,7 +192,6 @@ static void iscsi_prep_scsi_cmd_pdu(struct iscsi_cmd_task *ctask)
/* No unsolicit Data-Out's */
ctask->hdr->flags |= ISCSI_FLAG_CMD_FINAL;
} else {
- ctask->datasn = 0;
hdr->flags |= ISCSI_FLAG_CMD_FINAL;
zero_data(hdr->dlength);
@@ -174,8 +200,13 @@ static void iscsi_prep_scsi_cmd_pdu(struct iscsi_cmd_task *ctask)
}
conn->scsicmd_pdus_cnt++;
+
+ debug_scsi("iscsi prep [%s cid %d sc %p cdb 0x%x itt 0x%x len %d "
+ "cmdsn %d win %d]\n",
+ sc->sc_data_direction == DMA_TO_DEVICE ? "write" : "read",
+ conn->id, sc, sc->cmnd[0], ctask->itt, scsi_bufflen(sc),
+ session->cmdsn, session->max_cmdsn - session->exp_cmdsn + 1);
}
-EXPORT_SYMBOL_GPL(iscsi_prep_scsi_cmd_pdu);
/**
* iscsi_complete_command - return command back to scsi-ml
@@ -204,26 +235,12 @@ static void __iscsi_get_ctask(struct iscsi_cmd_task *ctask)
atomic_inc(&ctask->refcount);
}
-static void iscsi_get_ctask(struct iscsi_cmd_task *ctask)
-{
- spin_lock_bh(&ctask->conn->session->lock);
- __iscsi_get_ctask(ctask);
- spin_unlock_bh(&ctask->conn->session->lock);
-}
-
static void __iscsi_put_ctask(struct iscsi_cmd_task *ctask)
{
if (atomic_dec_and_test(&ctask->refcount))
iscsi_complete_command(ctask);
}
-static void iscsi_put_ctask(struct iscsi_cmd_task *ctask)
-{
- spin_lock_bh(&ctask->conn->session->lock);
- __iscsi_put_ctask(ctask);
- spin_unlock_bh(&ctask->conn->session->lock);
-}
-
/**
* iscsi_cmd_rsp - SCSI Command Response processing
* @conn: iscsi connection
@@ -235,21 +252,15 @@ static void iscsi_put_ctask(struct iscsi_cmd_task *ctask)
* iscsi_cmd_rsp sets up the scsi_cmnd fields based on the PDU and
* then completes the command and task.
**/
-static int iscsi_scsi_cmd_rsp(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
- struct iscsi_cmd_task *ctask, char *data,
- int datalen)
+static void iscsi_scsi_cmd_rsp(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
+ struct iscsi_cmd_task *ctask, char *data,
+ int datalen)
{
- int rc;
struct iscsi_cmd_rsp *rhdr = (struct iscsi_cmd_rsp *)hdr;
struct iscsi_session *session = conn->session;
struct scsi_cmnd *sc = ctask->sc;
- rc = iscsi_check_assign_cmdsn(session, (struct iscsi_nopin*)rhdr);
- if (rc) {
- sc->result = DID_ERROR << 16;
- goto out;
- }
-
+ iscsi_update_cmdsn(session, (struct iscsi_nopin*)rhdr);
conn->exp_statsn = be32_to_cpu(rhdr->statsn) + 1;
sc->result = (DID_OK << 16) | rhdr->cmd_status;
@@ -286,14 +297,14 @@ invalid_datalen:
if (rhdr->flags & ISCSI_FLAG_CMD_UNDERFLOW) {
int res_count = be32_to_cpu(rhdr->residual_count);
- if (res_count > 0 && res_count <= sc->request_bufflen)
- sc->resid = res_count;
+ if (res_count > 0 && res_count <= scsi_bufflen(sc))
+ scsi_set_resid(sc, res_count);
else
sc->result = (DID_BAD_TARGET << 16) | rhdr->cmd_status;
} else if (rhdr->flags & ISCSI_FLAG_CMD_BIDI_UNDERFLOW)
sc->result = (DID_BAD_TARGET << 16) | rhdr->cmd_status;
else if (rhdr->flags & ISCSI_FLAG_CMD_OVERFLOW)
- sc->resid = be32_to_cpu(rhdr->residual_count);
+ scsi_set_resid(sc, be32_to_cpu(rhdr->residual_count));
out:
debug_scsi("done [sc %lx res %d itt 0x%x]\n",
@@ -301,7 +312,6 @@ out:
conn->scsirsp_pdus_cnt++;
__iscsi_put_ctask(ctask);
- return rc;
}
static void iscsi_tmf_rsp(struct iscsi_conn *conn, struct iscsi_hdr *hdr)
@@ -381,8 +391,8 @@ int __iscsi_complete_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
switch(opcode) {
case ISCSI_OP_SCSI_CMD_RSP:
BUG_ON((void*)ctask != ctask->sc->SCp.ptr);
- rc = iscsi_scsi_cmd_rsp(conn, hdr, ctask, data,
- datalen);
+ iscsi_scsi_cmd_rsp(conn, hdr, ctask, data,
+ datalen);
break;
case ISCSI_OP_SCSI_DATA_IN:
BUG_ON((void*)ctask != ctask->sc->SCp.ptr);
@@ -405,11 +415,7 @@ int __iscsi_complete_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
debug_scsi("immrsp [op 0x%x cid %d itt 0x%x len %d]\n",
opcode, conn->id, mtask->itt, datalen);
- rc = iscsi_check_assign_cmdsn(session,
- (struct iscsi_nopin*)hdr);
- if (rc)
- goto done;
-
+ iscsi_update_cmdsn(session, (struct iscsi_nopin*)hdr);
switch(opcode) {
case ISCSI_OP_LOGOUT_RSP:
if (datalen) {
@@ -458,10 +464,7 @@ int __iscsi_complete_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
break;
}
} else if (itt == ~0U) {
- rc = iscsi_check_assign_cmdsn(session,
- (struct iscsi_nopin*)hdr);
- if (rc)
- goto done;
+ iscsi_update_cmdsn(session, (struct iscsi_nopin*)hdr);
switch(opcode) {
case ISCSI_OP_NOOP_IN:
@@ -491,7 +494,6 @@ int __iscsi_complete_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
} else
rc = ISCSI_ERR_BAD_ITT;
-done:
return rc;
}
EXPORT_SYMBOL_GPL(__iscsi_complete_pdu);
@@ -578,17 +580,47 @@ void iscsi_conn_failure(struct iscsi_conn *conn, enum iscsi_err err)
}
EXPORT_SYMBOL_GPL(iscsi_conn_failure);
+static void iscsi_prep_mtask(struct iscsi_conn *conn,
+ struct iscsi_mgmt_task *mtask)
+{
+ struct iscsi_session *session = conn->session;
+ struct iscsi_hdr *hdr = mtask->hdr;
+ struct iscsi_nopout *nop = (struct iscsi_nopout *)hdr;
+
+ if (hdr->opcode != (ISCSI_OP_LOGIN | ISCSI_OP_IMMEDIATE) &&
+ hdr->opcode != (ISCSI_OP_TEXT | ISCSI_OP_IMMEDIATE))
+ nop->exp_statsn = cpu_to_be32(conn->exp_statsn);
+ /*
+ * pre-format CmdSN for outgoing PDU.
+ */
+ nop->cmdsn = cpu_to_be32(session->cmdsn);
+ if (hdr->itt != RESERVED_ITT) {
+ hdr->itt = build_itt(mtask->itt, conn->id, session->age);
+ if (conn->c_stage == ISCSI_CONN_STARTED &&
+ !(hdr->opcode & ISCSI_OP_IMMEDIATE))
+ session->cmdsn++;
+ }
+
+ if (session->tt->init_mgmt_task)
+ session->tt->init_mgmt_task(conn, mtask);
+
+ debug_scsi("mgmtpdu [op 0x%x hdr->itt 0x%x datalen %d]\n",
+ hdr->opcode, hdr->itt, mtask->data_count);
+}
+
static int iscsi_xmit_mtask(struct iscsi_conn *conn)
{
struct iscsi_hdr *hdr = conn->mtask->hdr;
int rc, was_logout = 0;
+ spin_unlock_bh(&conn->session->lock);
if ((hdr->opcode & ISCSI_OPCODE_MASK) == ISCSI_OP_LOGOUT) {
conn->session->state = ISCSI_STATE_IN_RECOVERY;
iscsi_block_session(session_to_cls(conn->session));
was_logout = 1;
}
rc = conn->session->tt->xmit_mgmt_task(conn, conn->mtask);
+ spin_lock_bh(&conn->session->lock);
if (rc)
return rc;
@@ -602,6 +634,45 @@ static int iscsi_xmit_mtask(struct iscsi_conn *conn)
return 0;
}
+static int iscsi_check_cmdsn_window_closed(struct iscsi_conn *conn)
+{
+ struct iscsi_session *session = conn->session;
+
+ /*
+ * Check for iSCSI window and take care of CmdSN wrap-around
+ */
+ if (!iscsi_sna_lte(session->cmdsn, session->max_cmdsn)) {
+ debug_scsi("iSCSI CmdSN closed. MaxCmdSN %u CmdSN %u\n",
+ session->max_cmdsn, session->cmdsn);
+ return -ENOSPC;
+ }
+ return 0;
+}
+
+static int iscsi_xmit_ctask(struct iscsi_conn *conn)
+{
+ struct iscsi_cmd_task *ctask = conn->ctask;
+ int rc = 0;
+
+ /*
+ * serialize with TMF AbortTask
+ */
+ if (ctask->state == ISCSI_TASK_ABORTING)
+ goto done;
+
+ __iscsi_get_ctask(ctask);
+ spin_unlock_bh(&conn->session->lock);
+ rc = conn->session->tt->xmit_cmd_task(conn, ctask);
+ spin_lock_bh(&conn->session->lock);
+ __iscsi_put_ctask(ctask);
+
+done:
+ if (!rc)
+ /* done with this ctask */
+ conn->ctask = NULL;
+ return rc;
+}
+
/**
* iscsi_data_xmit - xmit any command into the scheduled connection
* @conn: iscsi connection
@@ -613,106 +684,79 @@ static int iscsi_xmit_mtask(struct iscsi_conn *conn)
**/
static int iscsi_data_xmit(struct iscsi_conn *conn)
{
- struct iscsi_transport *tt;
int rc = 0;
+ spin_lock_bh(&conn->session->lock);
if (unlikely(conn->suspend_tx)) {
debug_scsi("conn %d Tx suspended!\n", conn->id);
+ spin_unlock_bh(&conn->session->lock);
return -ENODATA;
}
- tt = conn->session->tt;
-
- /*
- * Transmit in the following order:
- *
- * 1) un-finished xmit (ctask or mtask)
- * 2) immediate control PDUs
- * 3) write data
- * 4) SCSI commands
- * 5) non-immediate control PDUs
- *
- * No need to lock around __kfifo_get as long as
- * there's one producer and one consumer.
- */
-
- BUG_ON(conn->ctask && conn->mtask);
if (conn->ctask) {
- iscsi_get_ctask(conn->ctask);
- rc = tt->xmit_cmd_task(conn, conn->ctask);
- iscsi_put_ctask(conn->ctask);
+ rc = iscsi_xmit_ctask(conn);
if (rc)
goto again;
- /* done with this in-progress ctask */
- conn->ctask = NULL;
}
+
if (conn->mtask) {
rc = iscsi_xmit_mtask(conn);
if (rc)
goto again;
}
- /* process immediate first */
- if (unlikely(__kfifo_len(conn->immqueue))) {
- while (__kfifo_get(conn->immqueue, (void*)&conn->mtask,
- sizeof(void*))) {
- spin_lock_bh(&conn->session->lock);
- list_add_tail(&conn->mtask->running,
- &conn->mgmt_run_list);
- spin_unlock_bh(&conn->session->lock);
- rc = iscsi_xmit_mtask(conn);
- if (rc)
- goto again;
- }
+ /*
+ * process mgmt pdus like nops before commands since we should
+ * only have one nop-out as a ping from us and targets should not
+ * overflow us with nop-ins
+ */
+check_mgmt:
+ while (__kfifo_get(conn->mgmtqueue, (void*)&conn->mtask,
+ sizeof(void*))) {
+ iscsi_prep_mtask(conn, conn->mtask);
+ list_add_tail(&conn->mtask->running, &conn->mgmt_run_list);
+ rc = iscsi_xmit_mtask(conn);
+ if (rc)
+ goto again;
}
/* process command queue */
- spin_lock_bh(&conn->session->lock);
while (!list_empty(&conn->xmitqueue)) {
+ rc = iscsi_check_cmdsn_window_closed(conn);
+ if (rc) {
+ spin_unlock_bh(&conn->session->lock);
+ return rc;
+ }
/*
* iscsi tcp may readd the task to the xmitqueue to send
* write data
*/
conn->ctask = list_entry(conn->xmitqueue.next,
struct iscsi_cmd_task, running);
+ if (conn->ctask->state == ISCSI_TASK_PENDING) {
+ iscsi_prep_scsi_cmd_pdu(conn->ctask);
+ conn->session->tt->init_cmd_task(conn->ctask);
+ }
conn->ctask->state = ISCSI_TASK_RUNNING;
list_move_tail(conn->xmitqueue.next, &conn->run_list);
- __iscsi_get_ctask(conn->ctask);
- spin_unlock_bh(&conn->session->lock);
-
- rc = tt->xmit_cmd_task(conn, conn->ctask);
-
- spin_lock_bh(&conn->session->lock);
- __iscsi_put_ctask(conn->ctask);
- if (rc) {
- spin_unlock_bh(&conn->session->lock);
+ rc = iscsi_xmit_ctask(conn);
+ if (rc)
goto again;
- }
+ /*
+ * we could continuously get new ctask requests so
+ * we need to check the mgmt queue for nops that need to
+ * be sent to aviod starvation
+ */
+ if (__kfifo_len(conn->mgmtqueue))
+ goto check_mgmt;
}
spin_unlock_bh(&conn->session->lock);
- /* done with this ctask */
- conn->ctask = NULL;
-
- /* process the rest control plane PDUs, if any */
- if (unlikely(__kfifo_len(conn->mgmtqueue))) {
- while (__kfifo_get(conn->mgmtqueue, (void*)&conn->mtask,
- sizeof(void*))) {
- spin_lock_bh(&conn->session->lock);
- list_add_tail(&conn->mtask->running,
- &conn->mgmt_run_list);
- spin_unlock_bh(&conn->session->lock);
- rc = iscsi_xmit_mtask(conn);
- if (rc)
- goto again;
- }
- }
-
return -ENODATA;
again:
if (unlikely(conn->suspend_tx))
- return -ENODATA;
-
+ rc = -ENODATA;
+ spin_unlock_bh(&conn->session->lock);
return rc;
}
@@ -724,11 +768,9 @@ static void iscsi_xmitworker(struct work_struct *work)
/*
* serialize Xmit worker on a per-connection basis.
*/
- mutex_lock(&conn->xmitmutex);
do {
rc = iscsi_data_xmit(conn);
} while (rc >= 0 || rc == -EAGAIN);
- mutex_unlock(&conn->xmitmutex);
}
enum {
@@ -786,20 +828,23 @@ int iscsi_queuecommand(struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *))
goto fault;
}
- /*
- * Check for iSCSI window and take care of CmdSN wrap-around
- */
- if ((int)(session->max_cmdsn - session->cmdsn) < 0) {
- reason = FAILURE_WINDOW_CLOSED;
- goto reject;
- }
-
conn = session->leadconn;
if (!conn) {
reason = FAILURE_SESSION_FREED;
goto fault;
}
+ /*
+ * We check this here and in data xmit, because if we get to the point
+ * that this check is hitting the window then we have enough IO in
+ * flight and enough IO waiting to be transmitted it is better
+ * to let the scsi/block layer queue up.
+ */
+ if (iscsi_check_cmdsn_window_closed(conn)) {
+ reason = FAILURE_WINDOW_CLOSED;
+ goto reject;
+ }
+
if (!__kfifo_get(session->cmdpool.queue, (void*)&ctask,
sizeof(void*))) {
reason = FAILURE_OOM;
@@ -814,18 +859,8 @@ int iscsi_queuecommand(struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *))
ctask->conn = conn;
ctask->sc = sc;
INIT_LIST_HEAD(&ctask->running);
- ctask->total_length = sc->request_bufflen;
- iscsi_prep_scsi_cmd_pdu(ctask);
-
- session->tt->init_cmd_task(ctask);
list_add_tail(&ctask->running, &conn->xmitqueue);
- debug_scsi(
- "ctask enq [%s cid %d sc %p cdb 0x%x itt 0x%x len %d cmdsn %d "
- "win %d]\n",
- sc->sc_data_direction == DMA_TO_DEVICE ? "write" : "read",
- conn->id, sc, sc->cmnd[0], ctask->itt, sc->request_bufflen,
- session->cmdsn, session->max_cmdsn - session->exp_cmdsn + 1);
spin_unlock(&session->lock);
scsi_queue_work(host, &conn->xmitwork);
@@ -841,7 +876,7 @@ fault:
printk(KERN_ERR "iscsi: cmd 0x%x is not queued (%d)\n",
sc->cmnd[0], reason);
sc->result = (DID_NO_CONNECT << 16);
- sc->resid = sc->request_bufflen;
+ scsi_set_resid(sc, scsi_bufflen(sc));
sc->scsi_done(sc);
return 0;
}
@@ -856,19 +891,16 @@ int iscsi_change_queue_depth(struct scsi_device *sdev, int depth)
}
EXPORT_SYMBOL_GPL(iscsi_change_queue_depth);
-static int
-iscsi_conn_send_generic(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
- char *data, uint32_t data_size)
+static struct iscsi_mgmt_task *
+__iscsi_conn_send_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
+ char *data, uint32_t data_size)
{
struct iscsi_session *session = conn->session;
- struct iscsi_nopout *nop = (struct iscsi_nopout *)hdr;
struct iscsi_mgmt_task *mtask;
- spin_lock_bh(&session->lock);
- if (session->state == ISCSI_STATE_TERMINATE) {
- spin_unlock_bh(&session->lock);
- return -EPERM;
- }
+ if (session->state == ISCSI_STATE_TERMINATE)
+ return NULL;
+
if (hdr->opcode == (ISCSI_OP_LOGIN | ISCSI_OP_IMMEDIATE) ||
hdr->opcode == (ISCSI_OP_TEXT | ISCSI_OP_IMMEDIATE))
/*
@@ -882,27 +914,11 @@ iscsi_conn_send_generic(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
BUG_ON(conn->c_stage == ISCSI_CONN_INITIAL_STAGE);
BUG_ON(conn->c_stage == ISCSI_CONN_STOPPED);
- nop->exp_statsn = cpu_to_be32(conn->exp_statsn);
if (!__kfifo_get(session->mgmtpool.queue,
- (void*)&mtask, sizeof(void*))) {
- spin_unlock_bh(&session->lock);
- return -ENOSPC;
- }
+ (void*)&mtask, sizeof(void*)))
+ return NULL;
}
- /*
- * pre-format CmdSN for outgoing PDU.
- */
- if (hdr->itt != RESERVED_ITT) {
- hdr->itt = build_itt(mtask->itt, conn->id, session->age);
- nop->cmdsn = cpu_to_be32(session->cmdsn);
- if (conn->c_stage == ISCSI_CONN_STARTED &&
- !(hdr->opcode & ISCSI_OP_IMMEDIATE))
- session->cmdsn++;
- } else
- /* do not advance CmdSN */
- nop->cmdsn = cpu_to_be32(session->cmdsn);
-
if (data_size) {
memcpy(mtask->data, data, data_size);
mtask->data_count = data_size;
@@ -911,38 +927,23 @@ iscsi_conn_send_generic(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
INIT_LIST_HEAD(&mtask->running);
memcpy(mtask->hdr, hdr, sizeof(struct iscsi_hdr));
- if (session->tt->init_mgmt_task)
- session->tt->init_mgmt_task(conn, mtask, data, data_size);
- spin_unlock_bh(&session->lock);
-
- debug_scsi("mgmtpdu [op 0x%x hdr->itt 0x%x datalen %d]\n",
- hdr->opcode, hdr->itt, data_size);
-
- /*
- * since send_pdu() could be called at least from two contexts,
- * we need to serialize __kfifo_put, so we don't have to take
- * additional lock on fast data-path
- */
- if (hdr->opcode & ISCSI_OP_IMMEDIATE)
- __kfifo_put(conn->immqueue, (void*)&mtask, sizeof(void*));
- else
- __kfifo_put(conn->mgmtqueue, (void*)&mtask, sizeof(void*));
-
- scsi_queue_work(session->host, &conn->xmitwork);
- return 0;
+ __kfifo_put(conn->mgmtqueue, (void*)&mtask, sizeof(void*));
+ return mtask;
}
int iscsi_conn_send_pdu(struct iscsi_cls_conn *cls_conn, struct iscsi_hdr *hdr,
char *data, uint32_t data_size)
{
struct iscsi_conn *conn = cls_conn->dd_data;
- int rc;
-
- mutex_lock(&conn->xmitmutex);
- rc = iscsi_conn_send_generic(conn, hdr, data, data_size);
- mutex_unlock(&conn->xmitmutex);
+ struct iscsi_session *session = conn->session;
+ int err = 0;
- return rc;
+ spin_lock_bh(&session->lock);
+ if (!__iscsi_conn_send_pdu(conn, hdr, data, data_size))
+ err = -EPERM;
+ spin_unlock_bh(&session->lock);
+ scsi_queue_work(session->host, &conn->xmitwork);
+ return err;
}
EXPORT_SYMBOL_GPL(iscsi_conn_send_pdu);
@@ -1027,14 +1028,12 @@ static void iscsi_tmabort_timedout(unsigned long data)
spin_unlock(&session->lock);
}
-/* must be called with the mutex lock */
static int iscsi_exec_abort_task(struct scsi_cmnd *sc,
struct iscsi_cmd_task *ctask)
{
struct iscsi_conn *conn = ctask->conn;
struct iscsi_session *session = conn->session;
struct iscsi_tm *hdr = &conn->tmhdr;
- int rc;
/*
* ctask timed out but session is OK requests must be serialized.
@@ -1047,32 +1046,27 @@ static int iscsi_exec_abort_task(struct scsi_cmnd *sc,
hdr->rtt = ctask->hdr->itt;
hdr->refcmdsn = ctask->hdr->cmdsn;
- rc = iscsi_conn_send_generic(conn, (struct iscsi_hdr *)hdr,
- NULL, 0);
- if (rc) {
+ ctask->mtask = __iscsi_conn_send_pdu(conn, (struct iscsi_hdr *)hdr,
+ NULL, 0);
+ if (!ctask->mtask) {
iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED);
- debug_scsi("abort sent failure [itt 0x%x] %d\n", ctask->itt,
- rc);
- return rc;
+ debug_scsi("abort sent failure [itt 0x%x]\n", ctask->itt);
+ return -EPERM;
}
+ ctask->state = ISCSI_TASK_ABORTING;
debug_scsi("abort sent [itt 0x%x]\n", ctask->itt);
- spin_lock_bh(&session->lock);
- ctask->mtask = (struct iscsi_mgmt_task *)
- session->mgmt_cmds[get_itt(hdr->itt) -
- ISCSI_MGMT_ITT_OFFSET];
-
if (conn->tmabort_state == TMABORT_INITIAL) {
conn->tmfcmd_pdus_cnt++;
- conn->tmabort_timer.expires = 10*HZ + jiffies;
+ conn->tmabort_timer.expires = 20*HZ + jiffies;
conn->tmabort_timer.function = iscsi_tmabort_timedout;
conn->tmabort_timer.data = (unsigned long)ctask;
add_timer(&conn->tmabort_timer);
debug_scsi("abort set timeout [itt 0x%x]\n", ctask->itt);
}
spin_unlock_bh(&session->lock);
- mutex_unlock(&conn->xmitmutex);
+ scsi_queue_work(session->host, &conn->xmitwork);
/*
* block eh thread until:
@@ -1089,13 +1083,12 @@ static int iscsi_exec_abort_task(struct scsi_cmnd *sc,
if (signal_pending(current))
flush_signals(current);
del_timer_sync(&conn->tmabort_timer);
-
- mutex_lock(&conn->xmitmutex);
+ spin_lock_bh(&session->lock);
return 0;
}
/*
- * xmit mutex and session lock must be held
+ * session lock must be held
*/
static struct iscsi_mgmt_task *
iscsi_remove_mgmt_task(struct kfifo *fifo, uint32_t itt)
@@ -1127,7 +1120,7 @@ static int iscsi_ctask_mtask_cleanup(struct iscsi_cmd_task *ctask)
if (!ctask->mtask)
return -EINVAL;
- if (!iscsi_remove_mgmt_task(conn->immqueue, ctask->mtask->itt))
+ if (!iscsi_remove_mgmt_task(conn->mgmtqueue, ctask->mtask->itt))
list_del(&ctask->mtask->running);
__kfifo_put(session->mgmtpool.queue, (void*)&ctask->mtask,
sizeof(void*));
@@ -1136,7 +1129,7 @@ static int iscsi_ctask_mtask_cleanup(struct iscsi_cmd_task *ctask)
}
/*
- * session lock and xmitmutex must be held
+ * session lock must be held
*/
static void fail_command(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask,
int err)
@@ -1147,11 +1140,14 @@ static void fail_command(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask,
if (!sc)
return;
- conn->session->tt->cleanup_cmd_task(conn, ctask);
+ if (ctask->state != ISCSI_TASK_PENDING)
+ conn->session->tt->cleanup_cmd_task(conn, ctask);
iscsi_ctask_mtask_cleanup(ctask);
sc->result = err;
- sc->resid = sc->request_bufflen;
+ scsi_set_resid(sc, scsi_bufflen(sc));
+ if (conn->ctask == ctask)
+ conn->ctask = NULL;
/* release ref from queuecommand */
__iscsi_put_ctask(ctask);
}
@@ -1179,7 +1175,6 @@ int iscsi_eh_abort(struct scsi_cmnd *sc)
conn->eh_abort_cnt++;
debug_scsi("aborting [sc %p itt 0x%x]\n", sc, ctask->itt);
- mutex_lock(&conn->xmitmutex);
spin_lock_bh(&session->lock);
/*
@@ -1192,9 +1187,8 @@ int iscsi_eh_abort(struct scsi_cmnd *sc)
/* ctask completed before time out */
if (!ctask->sc) {
- spin_unlock_bh(&session->lock);
debug_scsi("sc completed while abort in progress\n");
- goto success_rel_mutex;
+ goto success;
}
/* what should we do here ? */
@@ -1204,15 +1198,13 @@ int iscsi_eh_abort(struct scsi_cmnd *sc)
goto failed;
}
- if (ctask->state == ISCSI_TASK_PENDING)
- goto success_cleanup;
+ if (ctask->state == ISCSI_TASK_PENDING) {
+ fail_command(conn, ctask, DID_ABORT << 16);
+ goto success;
+ }
conn->tmabort_state = TMABORT_INITIAL;
-
- spin_unlock_bh(&session->lock);
rc = iscsi_exec_abort_task(sc, ctask);
- spin_lock_bh(&session->lock);
-
if (rc || sc->SCp.phase != session->age ||
session->state != ISCSI_STATE_LOGGED_IN)
goto failed;
@@ -1220,45 +1212,44 @@ int iscsi_eh_abort(struct scsi_cmnd *sc)
switch (conn->tmabort_state) {
case TMABORT_SUCCESS:
- goto success_cleanup;
+ spin_unlock_bh(&session->lock);
+ /*
+ * clean up task if aborted. grab the recv lock as a writer
+ */
+ write_lock_bh(conn->recv_lock);
+ spin_lock(&session->lock);
+ fail_command(conn, ctask, DID_ABORT << 16);
+ spin_unlock(&session->lock);
+ write_unlock_bh(conn->recv_lock);
+ /*
+ * make sure xmit thread is not still touching the
+ * ctask/scsi_cmnd
+ */
+ scsi_flush_work(session->host);
+ goto success_unlocked;
case TMABORT_NOT_FOUND:
if (!ctask->sc) {
/* ctask completed before tmf abort response */
- spin_unlock_bh(&session->lock);
debug_scsi("sc completed while abort in progress\n");
- goto success_rel_mutex;
+ goto success;
}
/* fall through */
default:
/* timedout or failed */
spin_unlock_bh(&session->lock);
iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED);
- spin_lock_bh(&session->lock);
- goto failed;
+ goto failed_unlocked;
}
-success_cleanup:
- debug_scsi("abort success [sc %lx itt 0x%x]\n", (long)sc, ctask->itt);
+success:
spin_unlock_bh(&session->lock);
-
- /*
- * clean up task if aborted. we have the xmitmutex so grab
- * the recv lock as a writer
- */
- write_lock_bh(conn->recv_lock);
- spin_lock(&session->lock);
- fail_command(conn, ctask, DID_ABORT << 16);
- spin_unlock(&session->lock);
- write_unlock_bh(conn->recv_lock);
-
-success_rel_mutex:
- mutex_unlock(&conn->xmitmutex);
+success_unlocked:
+ debug_scsi("abort success [sc %lx itt 0x%x]\n", (long)sc, ctask->itt);
return SUCCESS;
failed:
spin_unlock_bh(&session->lock);
- mutex_unlock(&conn->xmitmutex);
-
+failed_unlocked:
debug_scsi("abort failed [sc %lx itt 0x%x]\n", (long)sc, ctask->itt);
return FAILED;
}
@@ -1339,6 +1330,10 @@ EXPORT_SYMBOL_GPL(iscsi_pool_free);
* iscsi_session_setup - create iscsi cls session and host and session
* @scsit: scsi transport template
* @iscsit: iscsi transport template
+ * @cmds_max: scsi host can queue
+ * @qdepth: scsi host cmds per lun
+ * @cmd_task_size: LLD ctask private data size
+ * @mgmt_task_size: LLD mtask private data size
* @initial_cmdsn: initial CmdSN
* @hostno: host no allocated
*
@@ -1348,6 +1343,7 @@ EXPORT_SYMBOL_GPL(iscsi_pool_free);
struct iscsi_cls_session *
iscsi_session_setup(struct iscsi_transport *iscsit,
struct scsi_transport_template *scsit,
+ uint16_t cmds_max, uint16_t qdepth,
int cmd_task_size, int mgmt_task_size,
uint32_t initial_cmdsn, uint32_t *hostno)
{
@@ -1356,11 +1352,32 @@ iscsi_session_setup(struct iscsi_transport *iscsit,
struct iscsi_cls_session *cls_session;
int cmd_i;
+ if (qdepth > ISCSI_MAX_CMD_PER_LUN || qdepth < 1) {
+ if (qdepth != 0)
+ printk(KERN_ERR "iscsi: invalid queue depth of %d. "
+ "Queue depth must be between 1 and %d.\n",
+ qdepth, ISCSI_MAX_CMD_PER_LUN);
+ qdepth = ISCSI_DEF_CMD_PER_LUN;
+ }
+
+ if (cmds_max < 2 || (cmds_max & (cmds_max - 1)) ||
+ cmds_max >= ISCSI_MGMT_ITT_OFFSET) {
+ if (cmds_max != 0)
+ printk(KERN_ERR "iscsi: invalid can_queue of %d. "
+ "can_queue must be a power of 2 and between "
+ "2 and %d - setting to %d.\n", cmds_max,
+ ISCSI_MGMT_ITT_OFFSET, ISCSI_DEF_XMIT_CMDS_MAX);
+ cmds_max = ISCSI_DEF_XMIT_CMDS_MAX;
+ }
+
shost = scsi_host_alloc(iscsit->host_template,
hostdata_privsize(sizeof(*session)));
if (!shost)
return NULL;
+ /* the iscsi layer takes one task for reserve */
+ shost->can_queue = cmds_max - 1;
+ shost->cmd_per_lun = qdepth;
shost->max_id = 1;
shost->max_channel = 0;
shost->max_lun = iscsit->max_lun;
@@ -1374,7 +1391,7 @@ iscsi_session_setup(struct iscsi_transport *iscsit,
session->host = shost;
session->state = ISCSI_STATE_FREE;
session->mgmtpool_max = ISCSI_MGMT_CMDS_MAX;
- session->cmds_max = ISCSI_XMIT_CMDS_MAX;
+ session->cmds_max = cmds_max;
session->cmdsn = initial_cmdsn;
session->exp_cmdsn = initial_cmdsn + 1;
session->max_cmdsn = initial_cmdsn + 1;
@@ -1461,7 +1478,14 @@ void iscsi_session_teardown(struct iscsi_cls_session *cls_session)
iscsi_pool_free(&session->mgmtpool, (void**)session->mgmt_cmds);
iscsi_pool_free(&session->cmdpool, (void**)session->cmds);
+ kfree(session->password);
+ kfree(session->password_in);
+ kfree(session->username);
+ kfree(session->username_in);
kfree(session->targetname);
+ kfree(session->netdev);
+ kfree(session->hwaddress);
+ kfree(session->initiatorname);
iscsi_destroy_session(cls_session);
scsi_host_put(shost);
@@ -1499,11 +1523,6 @@ iscsi_conn_setup(struct iscsi_cls_session *cls_session, uint32_t conn_idx)
INIT_LIST_HEAD(&conn->xmitqueue);
/* initialize general immediate & non-immediate PDU commands queue */
- conn->immqueue = kfifo_alloc(session->mgmtpool_max * sizeof(void*),
- GFP_KERNEL, NULL);
- if (conn->immqueue == ERR_PTR(-ENOMEM))
- goto immqueue_alloc_fail;
-
conn->mgmtqueue = kfifo_alloc(session->mgmtpool_max * sizeof(void*),
GFP_KERNEL, NULL);
if (conn->mgmtqueue == ERR_PTR(-ENOMEM))
@@ -1527,7 +1546,6 @@ iscsi_conn_setup(struct iscsi_cls_session *cls_session, uint32_t conn_idx)
conn->login_mtask->data = conn->data = data;
init_timer(&conn->tmabort_timer);
- mutex_init(&conn->xmitmutex);
init_waitqueue_head(&conn->ehwait);
return cls_conn;
@@ -1538,8 +1556,6 @@ login_mtask_data_alloc_fail:
login_mtask_alloc_fail:
kfifo_free(conn->mgmtqueue);
mgmtqueue_alloc_fail:
- kfifo_free(conn->immqueue);
-immqueue_alloc_fail:
iscsi_destroy_conn(cls_conn);
return NULL;
}
@@ -1558,10 +1574,8 @@ void iscsi_conn_teardown(struct iscsi_cls_conn *cls_conn)
struct iscsi_session *session = conn->session;
unsigned long flags;
- set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx);
- mutex_lock(&conn->xmitmutex);
-
spin_lock_bh(&session->lock);
+ set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx);
conn->c_stage = ISCSI_CONN_CLEANUP_WAIT;
if (session->leadconn == conn) {
/*
@@ -1572,8 +1586,6 @@ void iscsi_conn_teardown(struct iscsi_cls_conn *cls_conn)
}
spin_unlock_bh(&session->lock);
- mutex_unlock(&conn->xmitmutex);
-
/*
* Block until all in-progress commands for this connection
* time out or fail.
@@ -1610,7 +1622,6 @@ void iscsi_conn_teardown(struct iscsi_cls_conn *cls_conn)
}
spin_unlock_bh(&session->lock);
- kfifo_free(conn->immqueue);
kfifo_free(conn->mgmtqueue);
iscsi_destroy_conn(cls_conn);
@@ -1671,8 +1682,7 @@ flush_control_queues(struct iscsi_session *session, struct iscsi_conn *conn)
struct iscsi_mgmt_task *mtask, *tmp;
/* handle pending */
- while (__kfifo_get(conn->immqueue, (void*)&mtask, sizeof(void*)) ||
- __kfifo_get(conn->mgmtqueue, (void*)&mtask, sizeof(void*))) {
+ while (__kfifo_get(conn->mgmtqueue, (void*)&mtask, sizeof(void*))) {
if (mtask == conn->login_mtask)
continue;
debug_scsi("flushing pending mgmt task itt 0x%x\n", mtask->itt);
@@ -1742,12 +1752,12 @@ static void iscsi_start_session_recovery(struct iscsi_session *session,
conn->c_stage = ISCSI_CONN_STOPPED;
set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx);
spin_unlock_bh(&session->lock);
+ scsi_flush_work(session->host);
write_lock_bh(conn->recv_lock);
set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_rx);
write_unlock_bh(conn->recv_lock);
- mutex_lock(&conn->xmitmutex);
/*
* for connection level recovery we should not calculate
* header digest. conn->hdr_size used for optimization
@@ -1771,8 +1781,6 @@ static void iscsi_start_session_recovery(struct iscsi_session *session,
fail_all_commands(conn);
flush_control_queues(session, conn);
spin_unlock_bh(&session->lock);
-
- mutex_unlock(&conn->xmitmutex);
}
void iscsi_conn_stop(struct iscsi_cls_conn *cls_conn, int flag)
@@ -1867,6 +1875,30 @@ int iscsi_set_param(struct iscsi_cls_conn *cls_conn,
case ISCSI_PARAM_EXP_STATSN:
sscanf(buf, "%u", &conn->exp_statsn);
break;
+ case ISCSI_PARAM_USERNAME:
+ kfree(session->username);
+ session->username = kstrdup(buf, GFP_KERNEL);
+ if (!session->username)
+ return -ENOMEM;
+ break;
+ case ISCSI_PARAM_USERNAME_IN:
+ kfree(session->username_in);
+ session->username_in = kstrdup(buf, GFP_KERNEL);
+ if (!session->username_in)
+ return -ENOMEM;
+ break;
+ case ISCSI_PARAM_PASSWORD:
+ kfree(session->password);
+ session->password = kstrdup(buf, GFP_KERNEL);
+ if (!session->password)
+ return -ENOMEM;
+ break;
+ case ISCSI_PARAM_PASSWORD_IN:
+ kfree(session->password_in);
+ session->password_in = kstrdup(buf, GFP_KERNEL);
+ if (!session->password_in)
+ return -ENOMEM;
+ break;
case ISCSI_PARAM_TARGET_NAME:
/* this should not change between logins */
if (session->targetname)
@@ -1940,6 +1972,18 @@ int iscsi_session_get_param(struct iscsi_cls_session *cls_session,
case ISCSI_PARAM_TPGT:
len = sprintf(buf, "%d\n", session->tpgt);
break;
+ case ISCSI_PARAM_USERNAME:
+ len = sprintf(buf, "%s\n", session->username);
+ break;
+ case ISCSI_PARAM_USERNAME_IN:
+ len = sprintf(buf, "%s\n", session->username_in);
+ break;
+ case ISCSI_PARAM_PASSWORD:
+ len = sprintf(buf, "%s\n", session->password);
+ break;
+ case ISCSI_PARAM_PASSWORD_IN:
+ len = sprintf(buf, "%s\n", session->password_in);
+ break;
default:
return -ENOSYS;
}
@@ -1990,6 +2034,66 @@ int iscsi_conn_get_param(struct iscsi_cls_conn *cls_conn,
}
EXPORT_SYMBOL_GPL(iscsi_conn_get_param);
+int iscsi_host_get_param(struct Scsi_Host *shost, enum iscsi_host_param param,
+ char *buf)
+{
+ struct iscsi_session *session = iscsi_hostdata(shost->hostdata);
+ int len;
+
+ switch (param) {
+ case ISCSI_HOST_PARAM_NETDEV_NAME:
+ if (!session->netdev)
+ len = sprintf(buf, "%s\n", "default");
+ else
+ len = sprintf(buf, "%s\n", session->netdev);
+ break;
+ case ISCSI_HOST_PARAM_HWADDRESS:
+ if (!session->hwaddress)
+ len = sprintf(buf, "%s\n", "default");
+ else
+ len = sprintf(buf, "%s\n", session->hwaddress);
+ break;
+ case ISCSI_HOST_PARAM_INITIATOR_NAME:
+ if (!session->initiatorname)
+ len = sprintf(buf, "%s\n", "unknown");
+ else
+ len = sprintf(buf, "%s\n", session->initiatorname);
+ break;
+
+ default:
+ return -ENOSYS;
+ }
+
+ return len;
+}
+EXPORT_SYMBOL_GPL(iscsi_host_get_param);
+
+int iscsi_host_set_param(struct Scsi_Host *shost, enum iscsi_host_param param,
+ char *buf, int buflen)
+{
+ struct iscsi_session *session = iscsi_hostdata(shost->hostdata);
+
+ switch (param) {
+ case ISCSI_HOST_PARAM_NETDEV_NAME:
+ if (!session->netdev)
+ session->netdev = kstrdup(buf, GFP_KERNEL);
+ break;
+ case ISCSI_HOST_PARAM_HWADDRESS:
+ if (!session->hwaddress)
+ session->hwaddress = kstrdup(buf, GFP_KERNEL);
+ break;
+ case ISCSI_HOST_PARAM_INITIATOR_NAME:
+ if (!session->initiatorname)
+ session->initiatorname = kstrdup(buf, GFP_KERNEL);
+ break;
+ default:
+ return -ENOSYS;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(iscsi_host_set_param);
+
MODULE_AUTHOR("Mike Christie");
MODULE_DESCRIPTION("iSCSI library functions");
MODULE_LICENSE("GPL");
diff --git a/drivers/scsi/libsas/sas_scsi_host.c b/drivers/scsi/libsas/sas_scsi_host.c
index b4b52694497..d70ddfda93f 100644
--- a/drivers/scsi/libsas/sas_scsi_host.c
+++ b/drivers/scsi/libsas/sas_scsi_host.c
@@ -76,8 +76,8 @@ static void sas_scsi_task_done(struct sas_task *task)
hs = DID_NO_CONNECT;
break;
case SAS_DATA_UNDERRUN:
- sc->resid = ts->residual;
- if (sc->request_bufflen - sc->resid < sc->underflow)
+ scsi_set_resid(sc, ts->residual);
+ if (scsi_bufflen(sc) - scsi_get_resid(sc) < sc->underflow)
hs = DID_ERROR;
break;
case SAS_DATA_OVERRUN:
@@ -161,9 +161,9 @@ static struct sas_task *sas_create_task(struct scsi_cmnd *cmd,
task->ssp_task.task_attr = sas_scsi_get_task_attr(cmd);
memcpy(task->ssp_task.cdb, cmd->cmnd, 16);
- task->scatter = cmd->request_buffer;
- task->num_scatter = cmd->use_sg;
- task->total_xfer_len = cmd->request_bufflen;
+ task->scatter = scsi_sglist(cmd);
+ task->num_scatter = scsi_sg_count(cmd);
+ task->total_xfer_len = scsi_bufflen(cmd);
task->data_dir = cmd->sc_data_direction;
task->task_done = sas_scsi_task_done;
diff --git a/drivers/scsi/lpfc/Makefile b/drivers/scsi/lpfc/Makefile
index d1be465d5f5..1c286707dd5 100644
--- a/drivers/scsi/lpfc/Makefile
+++ b/drivers/scsi/lpfc/Makefile
@@ -1,7 +1,7 @@
#/*******************************************************************
# * This file is part of the Emulex Linux Device Driver for *
# * Fibre Channel Host Bus Adapters. *
-# * Copyright (C) 2004-2005 Emulex. All rights reserved. *
+# * Copyright (C) 2004-2006 Emulex. All rights reserved. *
# * EMULEX and SLI are trademarks of Emulex. *
# * www.emulex.com *
# * *
@@ -27,4 +27,5 @@ endif
obj-$(CONFIG_SCSI_LPFC) := lpfc.o
lpfc-objs := lpfc_mem.o lpfc_sli.o lpfc_ct.o lpfc_els.o lpfc_hbadisc.o \
- lpfc_init.o lpfc_mbox.o lpfc_nportdisc.o lpfc_scsi.o lpfc_attr.o
+ lpfc_init.o lpfc_mbox.o lpfc_nportdisc.o lpfc_scsi.o lpfc_attr.o \
+ lpfc_vport.o lpfc_debugfs.o
diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
index 82e8f90c461..f8f64d6485c 100644
--- a/drivers/scsi/lpfc/lpfc.h
+++ b/drivers/scsi/lpfc/lpfc.h
@@ -19,8 +19,9 @@
* included with this package. *
*******************************************************************/
-struct lpfc_sli2_slim;
+#include <scsi/scsi_host.h>
+struct lpfc_sli2_slim;
#define LPFC_MAX_TARGET 256 /* max number of targets supported */
#define LPFC_MAX_DISC_THREADS 64 /* max outstanding discovery els
@@ -32,6 +33,20 @@ struct lpfc_sli2_slim;
#define LPFC_IOCB_LIST_CNT 2250 /* list of IOCBs for fast-path usage. */
#define LPFC_Q_RAMP_UP_INTERVAL 120 /* lun q_depth ramp up interval */
+/*
+ * Following time intervals are used of adjusting SCSI device
+ * queue depths when there are driver resource error or Firmware
+ * resource error.
+ */
+#define QUEUE_RAMP_DOWN_INTERVAL (1 * HZ) /* 1 Second */
+#define QUEUE_RAMP_UP_INTERVAL (300 * HZ) /* 5 minutes */
+
+/* Number of exchanges reserved for discovery to complete */
+#define LPFC_DISC_IOCB_BUFF_COUNT 20
+
+#define LPFC_HB_MBOX_INTERVAL 5 /* Heart beat interval in seconds. */
+#define LPFC_HB_MBOX_TIMEOUT 30 /* Heart beat timeout in seconds. */
+
/* Define macros for 64 bit support */
#define putPaddrLow(addr) ((uint32_t) (0xffffffff & (u64)(addr)))
#define putPaddrHigh(addr) ((uint32_t) (0xffffffff & (((u64)(addr))>>32)))
@@ -61,6 +76,11 @@ struct lpfc_dma_pool {
uint32_t current_count;
};
+struct hbq_dmabuf {
+ struct lpfc_dmabuf dbuf;
+ uint32_t tag;
+};
+
/* Priority bit. Set value to exceed low water mark in lpfc_mem. */
#define MEM_PRI 0x100
@@ -90,6 +110,29 @@ typedef struct lpfc_vpd {
uint32_t sli2FwRev;
uint8_t sli2FwName[16];
} rev;
+ struct {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t rsvd2 :24; /* Reserved */
+ uint32_t cmv : 1; /* Configure Max VPIs */
+ uint32_t ccrp : 1; /* Config Command Ring Polling */
+ uint32_t csah : 1; /* Configure Synchronous Abort Handling */
+ uint32_t chbs : 1; /* Cofigure Host Backing store */
+ uint32_t cinb : 1; /* Enable Interrupt Notification Block */
+ uint32_t cerbm : 1; /* Configure Enhanced Receive Buf Mgmt */
+ uint32_t cmx : 1; /* Configure Max XRIs */
+ uint32_t cmr : 1; /* Configure Max RPIs */
+#else /* __LITTLE_ENDIAN */
+ uint32_t cmr : 1; /* Configure Max RPIs */
+ uint32_t cmx : 1; /* Configure Max XRIs */
+ uint32_t cerbm : 1; /* Configure Enhanced Receive Buf Mgmt */
+ uint32_t cinb : 1; /* Enable Interrupt Notification Block */
+ uint32_t chbs : 1; /* Cofigure Host Backing store */
+ uint32_t csah : 1; /* Configure Synchronous Abort Handling */
+ uint32_t ccrp : 1; /* Config Command Ring Polling */
+ uint32_t cmv : 1; /* Configure Max VPIs */
+ uint32_t rsvd2 :24; /* Reserved */
+#endif
+ } sli3Feat;
} lpfc_vpd_t;
struct lpfc_scsi_buf;
@@ -122,6 +165,7 @@ struct lpfc_stats {
uint32_t elsRcvRPS;
uint32_t elsRcvRPL;
uint32_t elsXmitFLOGI;
+ uint32_t elsXmitFDISC;
uint32_t elsXmitPLOGI;
uint32_t elsXmitPRLI;
uint32_t elsXmitADISC;
@@ -165,50 +209,186 @@ struct lpfc_sysfs_mbox {
struct lpfcMboxq * mbox;
};
+struct lpfc_hba;
+
+
+enum discovery_state {
+ LPFC_VPORT_UNKNOWN = 0, /* vport state is unknown */
+ LPFC_VPORT_FAILED = 1, /* vport has failed */
+ LPFC_LOCAL_CFG_LINK = 6, /* local NPORT Id configured */
+ LPFC_FLOGI = 7, /* FLOGI sent to Fabric */
+ LPFC_FDISC = 8, /* FDISC sent for vport */
+ LPFC_FABRIC_CFG_LINK = 9, /* Fabric assigned NPORT Id
+ * configured */
+ LPFC_NS_REG = 10, /* Register with NameServer */
+ LPFC_NS_QRY = 11, /* Query NameServer for NPort ID list */
+ LPFC_BUILD_DISC_LIST = 12, /* Build ADISC and PLOGI lists for
+ * device authentication / discovery */
+ LPFC_DISC_AUTH = 13, /* Processing ADISC list */
+ LPFC_VPORT_READY = 32,
+};
+
+enum hba_state {
+ LPFC_LINK_UNKNOWN = 0, /* HBA state is unknown */
+ LPFC_WARM_START = 1, /* HBA state after selective reset */
+ LPFC_INIT_START = 2, /* Initial state after board reset */
+ LPFC_INIT_MBX_CMDS = 3, /* Initialize HBA with mbox commands */
+ LPFC_LINK_DOWN = 4, /* HBA initialized, link is down */
+ LPFC_LINK_UP = 5, /* Link is up - issue READ_LA */
+ LPFC_CLEAR_LA = 6, /* authentication cmplt - issue
+ * CLEAR_LA */
+ LPFC_HBA_READY = 32,
+ LPFC_HBA_ERROR = -1
+};
+
+struct lpfc_vport {
+ struct list_head listentry;
+ struct lpfc_hba *phba;
+ uint8_t port_type;
+#define LPFC_PHYSICAL_PORT 1
+#define LPFC_NPIV_PORT 2
+#define LPFC_FABRIC_PORT 3
+ enum discovery_state port_state;
+
+ uint16_t vpi;
+
+ uint32_t fc_flag; /* FC flags */
+/* Several of these flags are HBA centric and should be moved to
+ * phba->link_flag (e.g. FC_PTP, FC_PUBLIC_LOOP)
+ */
+#define FC_PT2PT 0x1 /* pt2pt with no fabric */
+#define FC_PT2PT_PLOGI 0x2 /* pt2pt initiate PLOGI */
+#define FC_DISC_TMO 0x4 /* Discovery timer running */
+#define FC_PUBLIC_LOOP 0x8 /* Public loop */
+#define FC_LBIT 0x10 /* LOGIN bit in loopinit set */
+#define FC_RSCN_MODE 0x20 /* RSCN cmd rcv'ed */
+#define FC_NLP_MORE 0x40 /* More node to process in node tbl */
+#define FC_OFFLINE_MODE 0x80 /* Interface is offline for diag */
+#define FC_FABRIC 0x100 /* We are fabric attached */
+#define FC_ESTABLISH_LINK 0x200 /* Reestablish Link */
+#define FC_RSCN_DISCOVERY 0x400 /* Auth all devices after RSCN */
+#define FC_SCSI_SCAN_TMO 0x4000 /* scsi scan timer running */
+#define FC_ABORT_DISCOVERY 0x8000 /* we want to abort discovery */
+#define FC_NDISC_ACTIVE 0x10000 /* NPort discovery active */
+#define FC_BYPASSED_MODE 0x20000 /* NPort is in bypassed mode */
+#define FC_RFF_NOT_SUPPORTED 0x40000 /* RFF_ID was rejected by switch */
+#define FC_VPORT_NEEDS_REG_VPI 0x80000 /* Needs to have its vpi registered */
+#define FC_RSCN_DEFERRED 0x100000 /* A deferred RSCN being processed */
+
+ struct list_head fc_nodes;
+
+ /* Keep counters for the number of entries in each list. */
+ uint16_t fc_plogi_cnt;
+ uint16_t fc_adisc_cnt;
+ uint16_t fc_reglogin_cnt;
+ uint16_t fc_prli_cnt;
+ uint16_t fc_unmap_cnt;
+ uint16_t fc_map_cnt;
+ uint16_t fc_npr_cnt;
+ uint16_t fc_unused_cnt;
+ struct serv_parm fc_sparam; /* buffer for our service parameters */
+
+ uint32_t fc_myDID; /* fibre channel S_ID */
+ uint32_t fc_prevDID; /* previous fibre channel S_ID */
+
+ int32_t stopped; /* HBA has not been restarted since last ERATT */
+ uint8_t fc_linkspeed; /* Link speed after last READ_LA */
+
+ uint32_t num_disc_nodes; /*in addition to hba_state */
+
+ uint32_t fc_nlp_cnt; /* outstanding NODELIST requests */
+ uint32_t fc_rscn_id_cnt; /* count of RSCNs payloads in list */
+ struct lpfc_dmabuf *fc_rscn_id_list[FC_MAX_HOLD_RSCN];
+ struct lpfc_name fc_nodename; /* fc nodename */
+ struct lpfc_name fc_portname; /* fc portname */
+
+ struct lpfc_work_evt disc_timeout_evt;
+
+ struct timer_list fc_disctmo; /* Discovery rescue timer */
+ uint8_t fc_ns_retry; /* retries for fabric nameserver */
+ uint32_t fc_prli_sent; /* cntr for outstanding PRLIs */
+
+ spinlock_t work_port_lock;
+ uint32_t work_port_events; /* Timeout to be handled */
+#define WORKER_DISC_TMO 0x1 /* vport: Discovery timeout */
+#define WORKER_ELS_TMO 0x2 /* vport: ELS timeout */
+#define WORKER_FDMI_TMO 0x4 /* vport: FDMI timeout */
+
+#define WORKER_MBOX_TMO 0x100 /* hba: MBOX timeout */
+#define WORKER_HB_TMO 0x200 /* hba: Heart beat timeout */
+#define WORKER_FABRIC_BLOCK_TMO 0x400 /* hba: fabric block timout */
+#define WORKER_RAMP_DOWN_QUEUE 0x800 /* hba: Decrease Q depth */
+#define WORKER_RAMP_UP_QUEUE 0x1000 /* hba: Increase Q depth */
+
+ struct timer_list fc_fdmitmo;
+ struct timer_list els_tmofunc;
+
+ int unreg_vpi_cmpl;
+
+ uint8_t load_flag;
+#define FC_LOADING 0x1 /* HBA in process of loading drvr */
+#define FC_UNLOADING 0x2 /* HBA in process of unloading drvr */
+ char *vname; /* Application assigned name */
+ struct fc_vport *fc_vport;
+
+#ifdef CONFIG_LPFC_DEBUG_FS
+ struct dentry *debug_disc_trc;
+ struct dentry *debug_nodelist;
+ struct dentry *vport_debugfs_root;
+ struct lpfc_disc_trc *disc_trc;
+ atomic_t disc_trc_cnt;
+#endif
+};
+
+struct hbq_s {
+ uint16_t entry_count; /* Current number of HBQ slots */
+ uint32_t next_hbqPutIdx; /* Index to next HBQ slot to use */
+ uint32_t hbqPutIdx; /* HBQ slot to use */
+ uint32_t local_hbqGetIdx; /* Local copy of Get index from Port */
+};
+
+#define LPFC_MAX_HBQS 16
+/* this matches the possition in the lpfc_hbq_defs array */
+#define LPFC_ELS_HBQ 0
+
struct lpfc_hba {
struct lpfc_sli sli;
+ uint32_t sli_rev; /* SLI2 or SLI3 */
+ uint32_t sli3_options; /* Mask of enabled SLI3 options */
+#define LPFC_SLI3_ENABLED 0x01
+#define LPFC_SLI3_HBQ_ENABLED 0x02
+#define LPFC_SLI3_NPIV_ENABLED 0x04
+#define LPFC_SLI3_VPORT_TEARDOWN 0x08
+ uint32_t iocb_cmd_size;
+ uint32_t iocb_rsp_size;
+
+ enum hba_state link_state;
+ uint32_t link_flag; /* link state flags */
+#define LS_LOOPBACK_MODE 0x1 /* NPort is in Loopback mode */
+ /* This flag is set while issuing */
+ /* INIT_LINK mailbox command */
+#define LS_NPIV_FAB_SUPPORTED 0x2 /* Fabric supports NPIV */
+#define LS_IGNORE_ERATT 0x3 /* intr handler should ignore ERATT */
+
struct lpfc_sli2_slim *slim2p;
+ struct lpfc_dmabuf hbqslimp;
+
dma_addr_t slim2p_mapping;
+
uint16_t pci_cfg_value;
- int32_t hba_state;
-
-#define LPFC_STATE_UNKNOWN 0 /* HBA state is unknown */
-#define LPFC_WARM_START 1 /* HBA state after selective reset */
-#define LPFC_INIT_START 2 /* Initial state after board reset */
-#define LPFC_INIT_MBX_CMDS 3 /* Initialize HBA with mbox commands */
-#define LPFC_LINK_DOWN 4 /* HBA initialized, link is down */
-#define LPFC_LINK_UP 5 /* Link is up - issue READ_LA */
-#define LPFC_LOCAL_CFG_LINK 6 /* local NPORT Id configured */
-#define LPFC_FLOGI 7 /* FLOGI sent to Fabric */
-#define LPFC_FABRIC_CFG_LINK 8 /* Fabric assigned NPORT Id
- configured */
-#define LPFC_NS_REG 9 /* Register with NameServer */
-#define LPFC_NS_QRY 10 /* Query NameServer for NPort ID list */
-#define LPFC_BUILD_DISC_LIST 11 /* Build ADISC and PLOGI lists for
- * device authentication / discovery */
-#define LPFC_DISC_AUTH 12 /* Processing ADISC list */
-#define LPFC_CLEAR_LA 13 /* authentication cmplt - issue
- CLEAR_LA */
-#define LPFC_HBA_READY 32
-#define LPFC_HBA_ERROR -1
+ uint8_t work_found;
+#define LPFC_MAX_WORKER_ITERATION 4
- int32_t stopped; /* HBA has not been restarted since last ERATT */
uint8_t fc_linkspeed; /* Link speed after last READ_LA */
uint32_t fc_eventTag; /* event tag for link attention */
- uint32_t fc_prli_sent; /* cntr for outstanding PRLIs */
- uint32_t num_disc_nodes; /*in addition to hba_state */
struct timer_list fc_estabtmo; /* link establishment timer */
- struct timer_list fc_disctmo; /* Discovery rescue timer */
- struct timer_list fc_fdmitmo; /* fdmi timer */
/* These fields used to be binfo */
- struct lpfc_name fc_nodename; /* fc nodename */
- struct lpfc_name fc_portname; /* fc portname */
uint32_t fc_pref_DID; /* preferred D_ID */
- uint8_t fc_pref_ALPA; /* preferred AL_PA */
+ uint8_t fc_pref_ALPA; /* preferred AL_PA */
uint32_t fc_edtov; /* E_D_TOV timer value */
uint32_t fc_arbtov; /* ARB_TOV timer value */
uint32_t fc_ratov; /* R_A_TOV timer value */
@@ -216,61 +396,21 @@ struct lpfc_hba {
uint32_t fc_altov; /* AL_TOV timer value */
uint32_t fc_crtov; /* C_R_TOV timer value */
uint32_t fc_citov; /* C_I_TOV timer value */
- uint32_t fc_myDID; /* fibre channel S_ID */
- uint32_t fc_prevDID; /* previous fibre channel S_ID */
- struct serv_parm fc_sparam; /* buffer for our service parameters */
struct serv_parm fc_fabparam; /* fabric service parameters buffer */
uint8_t alpa_map[128]; /* AL_PA map from READ_LA */
- uint8_t fc_ns_retry; /* retries for fabric nameserver */
- uint32_t fc_nlp_cnt; /* outstanding NODELIST requests */
- uint32_t fc_rscn_id_cnt; /* count of RSCNs payloads in list */
- struct lpfc_dmabuf *fc_rscn_id_list[FC_MAX_HOLD_RSCN];
uint32_t lmt;
- uint32_t fc_flag; /* FC flags */
-#define FC_PT2PT 0x1 /* pt2pt with no fabric */
-#define FC_PT2PT_PLOGI 0x2 /* pt2pt initiate PLOGI */
-#define FC_DISC_TMO 0x4 /* Discovery timer running */
-#define FC_PUBLIC_LOOP 0x8 /* Public loop */
-#define FC_LBIT 0x10 /* LOGIN bit in loopinit set */
-#define FC_RSCN_MODE 0x20 /* RSCN cmd rcv'ed */
-#define FC_NLP_MORE 0x40 /* More node to process in node tbl */
-#define FC_OFFLINE_MODE 0x80 /* Interface is offline for diag */
-#define FC_FABRIC 0x100 /* We are fabric attached */
-#define FC_ESTABLISH_LINK 0x200 /* Reestablish Link */
-#define FC_RSCN_DISCOVERY 0x400 /* Authenticate all devices after RSCN*/
-#define FC_BLOCK_MGMT_IO 0x800 /* Don't allow mgmt mbx or iocb cmds */
-#define FC_LOADING 0x1000 /* HBA in process of loading drvr */
-#define FC_UNLOADING 0x2000 /* HBA in process of unloading drvr */
-#define FC_SCSI_SCAN_TMO 0x4000 /* scsi scan timer running */
-#define FC_ABORT_DISCOVERY 0x8000 /* we want to abort discovery */
-#define FC_NDISC_ACTIVE 0x10000 /* NPort discovery active */
-#define FC_BYPASSED_MODE 0x20000 /* NPort is in bypassed mode */
-#define FC_LOOPBACK_MODE 0x40000 /* NPort is in Loopback mode */
- /* This flag is set while issuing */
- /* INIT_LINK mailbox command */
-#define FC_IGNORE_ERATT 0x80000 /* intr handler should ignore ERATT */
uint32_t fc_topology; /* link topology, from LINK INIT */
struct lpfc_stats fc_stat;
- struct list_head fc_nodes;
-
- /* Keep counters for the number of entries in each list. */
- uint16_t fc_plogi_cnt;
- uint16_t fc_adisc_cnt;
- uint16_t fc_reglogin_cnt;
- uint16_t fc_prli_cnt;
- uint16_t fc_unmap_cnt;
- uint16_t fc_map_cnt;
- uint16_t fc_npr_cnt;
- uint16_t fc_unused_cnt;
struct lpfc_nodelist fc_fcpnodev; /* nodelist entry for no device */
uint32_t nport_event_cnt; /* timestamp for nlplist entry */
- uint32_t wwnn[2];
+ uint8_t wwnn[8];
+ uint8_t wwpn[8];
uint32_t RandomData[7];
uint32_t cfg_log_verbose;
@@ -278,6 +418,9 @@ struct lpfc_hba {
uint32_t cfg_nodev_tmo;
uint32_t cfg_devloss_tmo;
uint32_t cfg_hba_queue_depth;
+ uint32_t cfg_peer_port_login;
+ uint32_t cfg_vport_restrict_login;
+ uint32_t cfg_npiv_enable;
uint32_t cfg_fcp_class;
uint32_t cfg_use_adisc;
uint32_t cfg_ack0;
@@ -304,22 +447,20 @@ struct lpfc_hba {
lpfc_vpd_t vpd; /* vital product data */
- struct Scsi_Host *host;
struct pci_dev *pcidev;
struct list_head work_list;
uint32_t work_ha; /* Host Attention Bits for WT */
uint32_t work_ha_mask; /* HA Bits owned by WT */
uint32_t work_hs; /* HS stored in case of ERRAT */
uint32_t work_status[2]; /* Extra status from SLIM */
- uint32_t work_hba_events; /* Timeout to be handled */
-#define WORKER_DISC_TMO 0x1 /* Discovery timeout */
-#define WORKER_ELS_TMO 0x2 /* ELS timeout */
-#define WORKER_MBOX_TMO 0x4 /* MBOX timeout */
-#define WORKER_FDMI_TMO 0x8 /* FDMI timeout */
wait_queue_head_t *work_wait;
struct task_struct *worker_thread;
+ struct list_head hbq_buffer_list;
+ uint32_t hbq_count; /* Count of configured HBQs */
+ struct hbq_s hbqs[LPFC_MAX_HBQS]; /* local copy of hbq indicies */
+
unsigned long pci_bar0_map; /* Physical address for PCI BAR0 */
unsigned long pci_bar2_map; /* Physical address for PCI BAR2 */
void __iomem *slim_memmap_p; /* Kernel memory mapped address for
@@ -334,6 +475,10 @@ struct lpfc_hba {
reg */
void __iomem *HCregaddr; /* virtual address for host ctl reg */
+ struct lpfc_hgp __iomem *host_gp; /* Host side get/put pointers */
+ uint32_t __iomem *hbq_put; /* Address in SLIM to HBQ put ptrs */
+ uint32_t *hbq_get; /* Host mem address of HBQ get ptrs */
+
int brd_no; /* FC board number */
char SerialNumber[32]; /* adapter Serial Number */
@@ -353,7 +498,6 @@ struct lpfc_hba {
uint8_t soft_wwn_enable;
struct timer_list fcp_poll_timer;
- struct timer_list els_tmofunc;
/*
* stat counters
@@ -370,31 +514,69 @@ struct lpfc_hba {
uint32_t total_scsi_bufs;
struct list_head lpfc_iocb_list;
uint32_t total_iocbq_bufs;
+ spinlock_t hbalock;
/* pci_mem_pools */
struct pci_pool *lpfc_scsi_dma_buf_pool;
struct pci_pool *lpfc_mbuf_pool;
+ struct pci_pool *lpfc_hbq_pool;
struct lpfc_dma_pool lpfc_mbuf_safety_pool;
mempool_t *mbox_mem_pool;
mempool_t *nlp_mem_pool;
struct fc_host_statistics link_stats;
+
+ struct list_head port_list;
+ struct lpfc_vport *pport; /* physical lpfc_vport pointer */
+ uint16_t max_vpi; /* Maximum virtual nports */
+#define LPFC_MAX_VPI 100 /* Max number of VPorts supported */
+ unsigned long *vpi_bmask; /* vpi allocation table */
+
+ /* Data structure used by fabric iocb scheduler */
+ struct list_head fabric_iocb_list;
+ atomic_t fabric_iocb_count;
+ struct timer_list fabric_block_timer;
+ unsigned long bit_flags;
+#define FABRIC_COMANDS_BLOCKED 0
+ atomic_t num_rsrc_err;
+ atomic_t num_cmd_success;
+ unsigned long last_rsrc_error_time;
+ unsigned long last_ramp_down_time;
+ unsigned long last_ramp_up_time;
+#ifdef CONFIG_LPFC_DEBUG_FS
+ struct dentry *hba_debugfs_root;
+ atomic_t debugfs_vport_count;
+#endif
+
+ /* Fields used for heart beat. */
+ unsigned long last_completion_time;
+ struct timer_list hb_tmofunc;
+ uint8_t hb_outstanding;
};
+static inline struct Scsi_Host *
+lpfc_shost_from_vport(struct lpfc_vport *vport)
+{
+ return container_of((void *) vport, struct Scsi_Host, hostdata[0]);
+}
+
static inline void
-lpfc_set_loopback_flag(struct lpfc_hba *phba) {
+lpfc_set_loopback_flag(struct lpfc_hba *phba)
+{
if (phba->cfg_topology == FLAGS_LOCAL_LB)
- phba->fc_flag |= FC_LOOPBACK_MODE;
+ phba->link_flag |= LS_LOOPBACK_MODE;
else
- phba->fc_flag &= ~FC_LOOPBACK_MODE;
+ phba->link_flag &= ~LS_LOOPBACK_MODE;
}
-struct rnidrsp {
- void *buf;
- uint32_t uniqueid;
- struct list_head list;
- uint32_t data;
-};
+static inline int
+lpfc_is_link_up(struct lpfc_hba *phba)
+{
+ return phba->link_state == LPFC_LINK_UP ||
+ phba->link_state == LPFC_CLEAR_LA ||
+ phba->link_state == LPFC_HBA_READY;
+}
#define FC_REG_DUMP_EVENT 0x10 /* Register for Dump events */
+
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c
index 5dfda9778c8..860a52c090f 100644
--- a/drivers/scsi/lpfc/lpfc_attr.c
+++ b/drivers/scsi/lpfc/lpfc_attr.c
@@ -39,6 +39,7 @@
#include "lpfc_version.h"
#include "lpfc_compat.h"
#include "lpfc_crtn.h"
+#include "lpfc_vport.h"
#define LPFC_DEF_DEVLOSS_TMO 30
#define LPFC_MIN_DEVLOSS_TMO 1
@@ -76,116 +77,156 @@ static ssize_t
lpfc_info_show(struct class_device *cdev, char *buf)
{
struct Scsi_Host *host = class_to_shost(cdev);
+
return snprintf(buf, PAGE_SIZE, "%s\n",lpfc_info(host));
}
static ssize_t
lpfc_serialnum_show(struct class_device *cdev, char *buf)
{
- struct Scsi_Host *host = class_to_shost(cdev);
- struct lpfc_hba *phba = (struct lpfc_hba*)host->hostdata;
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+ struct lpfc_hba *phba = vport->phba;
+
return snprintf(buf, PAGE_SIZE, "%s\n",phba->SerialNumber);
}
static ssize_t
lpfc_modeldesc_show(struct class_device *cdev, char *buf)
{
- struct Scsi_Host *host = class_to_shost(cdev);
- struct lpfc_hba *phba = (struct lpfc_hba*)host->hostdata;
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+ struct lpfc_hba *phba = vport->phba;
+
return snprintf(buf, PAGE_SIZE, "%s\n",phba->ModelDesc);
}
static ssize_t
lpfc_modelname_show(struct class_device *cdev, char *buf)
{
- struct Scsi_Host *host = class_to_shost(cdev);
- struct lpfc_hba *phba = (struct lpfc_hba*)host->hostdata;
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+ struct lpfc_hba *phba = vport->phba;
+
return snprintf(buf, PAGE_SIZE, "%s\n",phba->ModelName);
}
static ssize_t
lpfc_programtype_show(struct class_device *cdev, char *buf)
{
- struct Scsi_Host *host = class_to_shost(cdev);
- struct lpfc_hba *phba = (struct lpfc_hba*)host->hostdata;
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+ struct lpfc_hba *phba = vport->phba;
+
return snprintf(buf, PAGE_SIZE, "%s\n",phba->ProgramType);
}
static ssize_t
-lpfc_portnum_show(struct class_device *cdev, char *buf)
+lpfc_vportnum_show(struct class_device *cdev, char *buf)
{
- struct Scsi_Host *host = class_to_shost(cdev);
- struct lpfc_hba *phba = (struct lpfc_hba*)host->hostdata;
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+ struct lpfc_hba *phba = vport->phba;
+
return snprintf(buf, PAGE_SIZE, "%s\n",phba->Port);
}
static ssize_t
lpfc_fwrev_show(struct class_device *cdev, char *buf)
{
- struct Scsi_Host *host = class_to_shost(cdev);
- struct lpfc_hba *phba = (struct lpfc_hba*)host->hostdata;
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+ struct lpfc_hba *phba = vport->phba;
char fwrev[32];
+
lpfc_decode_firmware_rev(phba, fwrev, 1);
- return snprintf(buf, PAGE_SIZE, "%s\n",fwrev);
+ return snprintf(buf, PAGE_SIZE, "%s, sli-%d\n", fwrev, phba->sli_rev);
}
static ssize_t
lpfc_hdw_show(struct class_device *cdev, char *buf)
{
char hdw[9];
- struct Scsi_Host *host = class_to_shost(cdev);
- struct lpfc_hba *phba = (struct lpfc_hba*)host->hostdata;
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+ struct lpfc_hba *phba = vport->phba;
lpfc_vpd_t *vp = &phba->vpd;
+
lpfc_jedec_to_ascii(vp->rev.biuRev, hdw);
return snprintf(buf, PAGE_SIZE, "%s\n", hdw);
}
static ssize_t
lpfc_option_rom_version_show(struct class_device *cdev, char *buf)
{
- struct Scsi_Host *host = class_to_shost(cdev);
- struct lpfc_hba *phba = (struct lpfc_hba*)host->hostdata;
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+ struct lpfc_hba *phba = vport->phba;
+
return snprintf(buf, PAGE_SIZE, "%s\n", phba->OptionROMVersion);
}
static ssize_t
lpfc_state_show(struct class_device *cdev, char *buf)
{
- struct Scsi_Host *host = class_to_shost(cdev);
- struct lpfc_hba *phba = (struct lpfc_hba*)host->hostdata;
- int len = 0;
- switch (phba->hba_state) {
- case LPFC_STATE_UNKNOWN:
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+ struct lpfc_hba *phba = vport->phba;
+ int len = 0;
+
+ switch (phba->link_state) {
+ case LPFC_LINK_UNKNOWN:
case LPFC_WARM_START:
case LPFC_INIT_START:
case LPFC_INIT_MBX_CMDS:
case LPFC_LINK_DOWN:
+ case LPFC_HBA_ERROR:
len += snprintf(buf + len, PAGE_SIZE-len, "Link Down\n");
break;
case LPFC_LINK_UP:
- case LPFC_LOCAL_CFG_LINK:
- len += snprintf(buf + len, PAGE_SIZE-len, "Link Up\n");
- break;
- case LPFC_FLOGI:
- case LPFC_FABRIC_CFG_LINK:
- case LPFC_NS_REG:
- case LPFC_NS_QRY:
- case LPFC_BUILD_DISC_LIST:
- case LPFC_DISC_AUTH:
case LPFC_CLEAR_LA:
- len += snprintf(buf + len, PAGE_SIZE-len,
- "Link Up - Discovery\n");
- break;
case LPFC_HBA_READY:
- len += snprintf(buf + len, PAGE_SIZE-len,
- "Link Up - Ready:\n");
+ len += snprintf(buf + len, PAGE_SIZE-len, "Link Up - \n");
+
+ switch (vport->port_state) {
+ len += snprintf(buf + len, PAGE_SIZE-len,
+ "initializing\n");
+ break;
+ case LPFC_LOCAL_CFG_LINK:
+ len += snprintf(buf + len, PAGE_SIZE-len,
+ "Configuring Link\n");
+ break;
+ case LPFC_FDISC:
+ case LPFC_FLOGI:
+ case LPFC_FABRIC_CFG_LINK:
+ case LPFC_NS_REG:
+ case LPFC_NS_QRY:
+ case LPFC_BUILD_DISC_LIST:
+ case LPFC_DISC_AUTH:
+ len += snprintf(buf + len, PAGE_SIZE - len,
+ "Discovery\n");
+ break;
+ case LPFC_VPORT_READY:
+ len += snprintf(buf + len, PAGE_SIZE - len, "Ready\n");
+ break;
+
+ case LPFC_VPORT_FAILED:
+ len += snprintf(buf + len, PAGE_SIZE - len, "Failed\n");
+ break;
+
+ case LPFC_VPORT_UNKNOWN:
+ len += snprintf(buf + len, PAGE_SIZE - len,
+ "Unknown\n");
+ break;
+ }
+
if (phba->fc_topology == TOPOLOGY_LOOP) {
- if (phba->fc_flag & FC_PUBLIC_LOOP)
+ if (vport->fc_flag & FC_PUBLIC_LOOP)
len += snprintf(buf + len, PAGE_SIZE-len,
" Public Loop\n");
else
len += snprintf(buf + len, PAGE_SIZE-len,
" Private Loop\n");
} else {
- if (phba->fc_flag & FC_FABRIC)
+ if (vport->fc_flag & FC_FABRIC)
len += snprintf(buf + len, PAGE_SIZE-len,
" Fabric\n");
else
@@ -193,29 +234,32 @@ lpfc_state_show(struct class_device *cdev, char *buf)
" Point-2-Point\n");
}
}
+
return len;
}
static ssize_t
lpfc_num_discovered_ports_show(struct class_device *cdev, char *buf)
{
- struct Scsi_Host *host = class_to_shost(cdev);
- struct lpfc_hba *phba = (struct lpfc_hba*)host->hostdata;
- return snprintf(buf, PAGE_SIZE, "%d\n", phba->fc_map_cnt +
- phba->fc_unmap_cnt);
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+
+ return snprintf(buf, PAGE_SIZE, "%d\n",
+ vport->fc_map_cnt + vport->fc_unmap_cnt);
}
static int
-lpfc_issue_lip(struct Scsi_Host *host)
+lpfc_issue_lip(struct Scsi_Host *shost)
{
- struct lpfc_hba *phba = (struct lpfc_hba *) host->hostdata;
+ struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+ struct lpfc_hba *phba = vport->phba;
LPFC_MBOXQ_t *pmboxq;
int mbxstatus = MBXERR_ERROR;
- if ((phba->fc_flag & FC_OFFLINE_MODE) ||
- (phba->fc_flag & FC_BLOCK_MGMT_IO) ||
- (phba->hba_state != LPFC_HBA_READY))
+ if ((vport->fc_flag & FC_OFFLINE_MODE) ||
+ (phba->sli.sli_flag & LPFC_BLOCK_MGMT_IO) ||
+ (vport->port_state != LPFC_VPORT_READY))
return -EPERM;
pmboxq = mempool_alloc(phba->mbox_mem_pool,GFP_KERNEL);
@@ -238,9 +282,7 @@ lpfc_issue_lip(struct Scsi_Host *host)
}
lpfc_set_loopback_flag(phba);
- if (mbxstatus == MBX_TIMEOUT)
- pmboxq->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
- else
+ if (mbxstatus != MBX_TIMEOUT)
mempool_free(pmboxq, phba->mbox_mem_pool);
if (mbxstatus == MBXERR_ERROR)
@@ -320,8 +362,10 @@ lpfc_selective_reset(struct lpfc_hba *phba)
static ssize_t
lpfc_issue_reset(struct class_device *cdev, const char *buf, size_t count)
{
- struct Scsi_Host *host = class_to_shost(cdev);
- struct lpfc_hba *phba = (struct lpfc_hba*)host->hostdata;
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+ struct lpfc_hba *phba = vport->phba;
+
int status = -EINVAL;
if (strncmp(buf, "selective", sizeof("selective") - 1) == 0)
@@ -336,23 +380,26 @@ lpfc_issue_reset(struct class_device *cdev, const char *buf, size_t count)
static ssize_t
lpfc_nport_evt_cnt_show(struct class_device *cdev, char *buf)
{
- struct Scsi_Host *host = class_to_shost(cdev);
- struct lpfc_hba *phba = (struct lpfc_hba*)host->hostdata;
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+ struct lpfc_hba *phba = vport->phba;
+
return snprintf(buf, PAGE_SIZE, "%d\n", phba->nport_event_cnt);
}
static ssize_t
lpfc_board_mode_show(struct class_device *cdev, char *buf)
{
- struct Scsi_Host *host = class_to_shost(cdev);
- struct lpfc_hba *phba = (struct lpfc_hba*)host->hostdata;
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+ struct lpfc_hba *phba = vport->phba;
char * state;
- if (phba->hba_state == LPFC_HBA_ERROR)
+ if (phba->link_state == LPFC_HBA_ERROR)
state = "error";
- else if (phba->hba_state == LPFC_WARM_START)
+ else if (phba->link_state == LPFC_WARM_START)
state = "warm start";
- else if (phba->hba_state == LPFC_INIT_START)
+ else if (phba->link_state == LPFC_INIT_START)
state = "offline";
else
state = "online";
@@ -363,8 +410,9 @@ lpfc_board_mode_show(struct class_device *cdev, char *buf)
static ssize_t
lpfc_board_mode_store(struct class_device *cdev, const char *buf, size_t count)
{
- struct Scsi_Host *host = class_to_shost(cdev);
- struct lpfc_hba *phba = (struct lpfc_hba*)host->hostdata;
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+ struct lpfc_hba *phba = vport->phba;
struct completion online_compl;
int status=0;
@@ -389,11 +437,166 @@ lpfc_board_mode_store(struct class_device *cdev, const char *buf, size_t count)
return -EIO;
}
+int
+lpfc_get_hba_info(struct lpfc_hba *phba,
+ uint32_t *mxri, uint32_t *axri,
+ uint32_t *mrpi, uint32_t *arpi,
+ uint32_t *mvpi, uint32_t *avpi)
+{
+ struct lpfc_sli *psli = &phba->sli;
+ LPFC_MBOXQ_t *pmboxq;
+ MAILBOX_t *pmb;
+ int rc = 0;
+
+ /*
+ * prevent udev from issuing mailbox commands until the port is
+ * configured.
+ */
+ if (phba->link_state < LPFC_LINK_DOWN ||
+ !phba->mbox_mem_pool ||
+ (phba->sli.sli_flag & LPFC_SLI2_ACTIVE) == 0)
+ return 0;
+
+ if (phba->sli.sli_flag & LPFC_BLOCK_MGMT_IO)
+ return 0;
+
+ pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+ if (!pmboxq)
+ return 0;
+ memset(pmboxq, 0, sizeof (LPFC_MBOXQ_t));
+
+ pmb = &pmboxq->mb;
+ pmb->mbxCommand = MBX_READ_CONFIG;
+ pmb->mbxOwner = OWN_HOST;
+ pmboxq->context1 = NULL;
+
+ if ((phba->pport->fc_flag & FC_OFFLINE_MODE) ||
+ (!(psli->sli_flag & LPFC_SLI2_ACTIVE)))
+ rc = MBX_NOT_FINISHED;
+ else
+ rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2);
+
+ if (rc != MBX_SUCCESS) {
+ if (rc != MBX_TIMEOUT)
+ mempool_free(pmboxq, phba->mbox_mem_pool);
+ return 0;
+ }
+
+ if (mrpi)
+ *mrpi = pmb->un.varRdConfig.max_rpi;
+ if (arpi)
+ *arpi = pmb->un.varRdConfig.avail_rpi;
+ if (mxri)
+ *mxri = pmb->un.varRdConfig.max_xri;
+ if (axri)
+ *axri = pmb->un.varRdConfig.avail_xri;
+ if (mvpi)
+ *mvpi = pmb->un.varRdConfig.max_vpi;
+ if (avpi)
+ *avpi = pmb->un.varRdConfig.avail_vpi;
+
+ mempool_free(pmboxq, phba->mbox_mem_pool);
+ return 1;
+}
+
+static ssize_t
+lpfc_max_rpi_show(struct class_device *cdev, char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+ struct lpfc_hba *phba = vport->phba;
+ uint32_t cnt;
+
+ if (lpfc_get_hba_info(phba, NULL, NULL, &cnt, NULL, NULL, NULL))
+ return snprintf(buf, PAGE_SIZE, "%d\n", cnt);
+ return snprintf(buf, PAGE_SIZE, "Unknown\n");
+}
+
+static ssize_t
+lpfc_used_rpi_show(struct class_device *cdev, char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+ struct lpfc_hba *phba = vport->phba;
+ uint32_t cnt, acnt;
+
+ if (lpfc_get_hba_info(phba, NULL, NULL, &cnt, &acnt, NULL, NULL))
+ return snprintf(buf, PAGE_SIZE, "%d\n", (cnt - acnt));
+ return snprintf(buf, PAGE_SIZE, "Unknown\n");
+}
+
+static ssize_t
+lpfc_max_xri_show(struct class_device *cdev, char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+ struct lpfc_hba *phba = vport->phba;
+ uint32_t cnt;
+
+ if (lpfc_get_hba_info(phba, &cnt, NULL, NULL, NULL, NULL, NULL))
+ return snprintf(buf, PAGE_SIZE, "%d\n", cnt);
+ return snprintf(buf, PAGE_SIZE, "Unknown\n");
+}
+
+static ssize_t
+lpfc_used_xri_show(struct class_device *cdev, char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+ struct lpfc_hba *phba = vport->phba;
+ uint32_t cnt, acnt;
+
+ if (lpfc_get_hba_info(phba, &cnt, &acnt, NULL, NULL, NULL, NULL))
+ return snprintf(buf, PAGE_SIZE, "%d\n", (cnt - acnt));
+ return snprintf(buf, PAGE_SIZE, "Unknown\n");
+}
+
+static ssize_t
+lpfc_max_vpi_show(struct class_device *cdev, char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+ struct lpfc_hba *phba = vport->phba;
+ uint32_t cnt;
+
+ if (lpfc_get_hba_info(phba, NULL, NULL, NULL, NULL, &cnt, NULL))
+ return snprintf(buf, PAGE_SIZE, "%d\n", cnt);
+ return snprintf(buf, PAGE_SIZE, "Unknown\n");
+}
+
+static ssize_t
+lpfc_used_vpi_show(struct class_device *cdev, char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+ struct lpfc_hba *phba = vport->phba;
+ uint32_t cnt, acnt;
+
+ if (lpfc_get_hba_info(phba, NULL, NULL, NULL, NULL, &cnt, &acnt))
+ return snprintf(buf, PAGE_SIZE, "%d\n", (cnt - acnt));
+ return snprintf(buf, PAGE_SIZE, "Unknown\n");
+}
+
+static ssize_t
+lpfc_npiv_info_show(struct class_device *cdev, char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+ struct lpfc_hba *phba = vport->phba;
+
+ if (!(phba->max_vpi))
+ return snprintf(buf, PAGE_SIZE, "NPIV Not Supported\n");
+ if (vport->port_type == LPFC_PHYSICAL_PORT)
+ return snprintf(buf, PAGE_SIZE, "NPIV Physical\n");
+ return snprintf(buf, PAGE_SIZE, "NPIV Virtual (VPI %d)\n", vport->vpi);
+}
+
static ssize_t
lpfc_poll_show(struct class_device *cdev, char *buf)
{
- struct Scsi_Host *host = class_to_shost(cdev);
- struct lpfc_hba *phba = (struct lpfc_hba*)host->hostdata;
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+ struct lpfc_hba *phba = vport->phba;
return snprintf(buf, PAGE_SIZE, "%#x\n", phba->cfg_poll);
}
@@ -402,8 +605,9 @@ static ssize_t
lpfc_poll_store(struct class_device *cdev, const char *buf,
size_t count)
{
- struct Scsi_Host *host = class_to_shost(cdev);
- struct lpfc_hba *phba = (struct lpfc_hba*)host->hostdata;
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+ struct lpfc_hba *phba = vport->phba;
uint32_t creg_val;
uint32_t old_val;
int val=0;
@@ -417,7 +621,7 @@ lpfc_poll_store(struct class_device *cdev, const char *buf,
if ((val & 0x3) != val)
return -EINVAL;
- spin_lock_irq(phba->host->host_lock);
+ spin_lock_irq(&phba->hbalock);
old_val = phba->cfg_poll;
@@ -432,16 +636,16 @@ lpfc_poll_store(struct class_device *cdev, const char *buf,
lpfc_poll_start_timer(phba);
}
} else if (val != 0x0) {
- spin_unlock_irq(phba->host->host_lock);
+ spin_unlock_irq(&phba->hbalock);
return -EINVAL;
}
if (!(val & DISABLE_FCP_RING_INT) &&
(old_val & DISABLE_FCP_RING_INT))
{
- spin_unlock_irq(phba->host->host_lock);
+ spin_unlock_irq(&phba->hbalock);
del_timer(&phba->fcp_poll_timer);
- spin_lock_irq(phba->host->host_lock);
+ spin_lock_irq(&phba->hbalock);
creg_val = readl(phba->HCregaddr);
creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING);
writel(creg_val, phba->HCregaddr);
@@ -450,7 +654,7 @@ lpfc_poll_store(struct class_device *cdev, const char *buf,
phba->cfg_poll = val;
- spin_unlock_irq(phba->host->host_lock);
+ spin_unlock_irq(&phba->hbalock);
return strlen(buf);
}
@@ -459,8 +663,9 @@ lpfc_poll_store(struct class_device *cdev, const char *buf,
static ssize_t \
lpfc_##attr##_show(struct class_device *cdev, char *buf) \
{ \
- struct Scsi_Host *host = class_to_shost(cdev);\
- struct lpfc_hba *phba = (struct lpfc_hba*)host->hostdata;\
+ struct Scsi_Host *shost = class_to_shost(cdev);\
+ struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;\
+ struct lpfc_hba *phba = vport->phba;\
int val = 0;\
val = phba->cfg_##attr;\
return snprintf(buf, PAGE_SIZE, "%d\n",\
@@ -471,8 +676,9 @@ lpfc_##attr##_show(struct class_device *cdev, char *buf) \
static ssize_t \
lpfc_##attr##_show(struct class_device *cdev, char *buf) \
{ \
- struct Scsi_Host *host = class_to_shost(cdev);\
- struct lpfc_hba *phba = (struct lpfc_hba*)host->hostdata;\
+ struct Scsi_Host *shost = class_to_shost(cdev);\
+ struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;\
+ struct lpfc_hba *phba = vport->phba;\
int val = 0;\
val = phba->cfg_##attr;\
return snprintf(buf, PAGE_SIZE, "%#x\n",\
@@ -514,8 +720,9 @@ lpfc_##attr##_set(struct lpfc_hba *phba, int val) \
static ssize_t \
lpfc_##attr##_store(struct class_device *cdev, const char *buf, size_t count) \
{ \
- struct Scsi_Host *host = class_to_shost(cdev);\
- struct lpfc_hba *phba = (struct lpfc_hba*)host->hostdata;\
+ struct Scsi_Host *shost = class_to_shost(cdev);\
+ struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;\
+ struct lpfc_hba *phba = vport->phba;\
int val=0;\
if (!isdigit(buf[0]))\
return -EINVAL;\
@@ -576,7 +783,7 @@ static CLASS_DEVICE_ATTR(serialnum, S_IRUGO, lpfc_serialnum_show, NULL);
static CLASS_DEVICE_ATTR(modeldesc, S_IRUGO, lpfc_modeldesc_show, NULL);
static CLASS_DEVICE_ATTR(modelname, S_IRUGO, lpfc_modelname_show, NULL);
static CLASS_DEVICE_ATTR(programtype, S_IRUGO, lpfc_programtype_show, NULL);
-static CLASS_DEVICE_ATTR(portnum, S_IRUGO, lpfc_portnum_show, NULL);
+static CLASS_DEVICE_ATTR(portnum, S_IRUGO, lpfc_vportnum_show, NULL);
static CLASS_DEVICE_ATTR(fwrev, S_IRUGO, lpfc_fwrev_show, NULL);
static CLASS_DEVICE_ATTR(hdw, S_IRUGO, lpfc_hdw_show, NULL);
static CLASS_DEVICE_ATTR(state, S_IRUGO, lpfc_state_show, NULL);
@@ -592,6 +799,13 @@ static CLASS_DEVICE_ATTR(management_version, S_IRUGO, management_version_show,
static CLASS_DEVICE_ATTR(board_mode, S_IRUGO | S_IWUSR,
lpfc_board_mode_show, lpfc_board_mode_store);
static CLASS_DEVICE_ATTR(issue_reset, S_IWUSR, NULL, lpfc_issue_reset);
+static CLASS_DEVICE_ATTR(max_vpi, S_IRUGO, lpfc_max_vpi_show, NULL);
+static CLASS_DEVICE_ATTR(used_vpi, S_IRUGO, lpfc_used_vpi_show, NULL);
+static CLASS_DEVICE_ATTR(max_rpi, S_IRUGO, lpfc_max_rpi_show, NULL);
+static CLASS_DEVICE_ATTR(used_rpi, S_IRUGO, lpfc_used_rpi_show, NULL);
+static CLASS_DEVICE_ATTR(max_xri, S_IRUGO, lpfc_max_xri_show, NULL);
+static CLASS_DEVICE_ATTR(used_xri, S_IRUGO, lpfc_used_xri_show, NULL);
+static CLASS_DEVICE_ATTR(npiv_info, S_IRUGO, lpfc_npiv_info_show, NULL);
static char *lpfc_soft_wwn_key = "C99G71SL8032A";
@@ -600,8 +814,9 @@ static ssize_t
lpfc_soft_wwn_enable_store(struct class_device *cdev, const char *buf,
size_t count)
{
- struct Scsi_Host *host = class_to_shost(cdev);
- struct lpfc_hba *phba = (struct lpfc_hba*)host->hostdata;
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+ struct lpfc_hba *phba = vport->phba;
unsigned int cnt = count;
/*
@@ -634,8 +849,10 @@ static CLASS_DEVICE_ATTR(lpfc_soft_wwn_enable, S_IWUSR, NULL,
static ssize_t
lpfc_soft_wwpn_show(struct class_device *cdev, char *buf)
{
- struct Scsi_Host *host = class_to_shost(cdev);
- struct lpfc_hba *phba = (struct lpfc_hba*)host->hostdata;
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+ struct lpfc_hba *phba = vport->phba;
+
return snprintf(buf, PAGE_SIZE, "0x%llx\n",
(unsigned long long)phba->cfg_soft_wwpn);
}
@@ -644,8 +861,9 @@ lpfc_soft_wwpn_show(struct class_device *cdev, char *buf)
static ssize_t
lpfc_soft_wwpn_store(struct class_device *cdev, const char *buf, size_t count)
{
- struct Scsi_Host *host = class_to_shost(cdev);
- struct lpfc_hba *phba = (struct lpfc_hba*)host->hostdata;
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+ struct lpfc_hba *phba = vport->phba;
struct completion online_compl;
int stat1=0, stat2=0;
unsigned int i, j, cnt=count;
@@ -680,9 +898,9 @@ lpfc_soft_wwpn_store(struct class_device *cdev, const char *buf, size_t count)
}
}
phba->cfg_soft_wwpn = wwn_to_u64(wwpn);
- fc_host_port_name(host) = phba->cfg_soft_wwpn;
+ fc_host_port_name(shost) = phba->cfg_soft_wwpn;
if (phba->cfg_soft_wwnn)
- fc_host_node_name(host) = phba->cfg_soft_wwnn;
+ fc_host_node_name(shost) = phba->cfg_soft_wwnn;
dev_printk(KERN_NOTICE, &phba->pcidev->dev,
"lpfc%d: Reinitializing to use soft_wwpn\n", phba->brd_no);
@@ -777,6 +995,15 @@ MODULE_PARM_DESC(lpfc_poll, "FCP ring polling mode control:"
static CLASS_DEVICE_ATTR(lpfc_poll, S_IRUGO | S_IWUSR,
lpfc_poll_show, lpfc_poll_store);
+int lpfc_sli_mode = 0;
+module_param(lpfc_sli_mode, int, 0);
+MODULE_PARM_DESC(lpfc_sli_mode, "SLI mode selector:"
+ " 0 - auto (SLI-3 if supported),"
+ " 2 - select SLI-2 even on SLI-3 capable HBAs,"
+ " 3 - select SLI-3");
+
+LPFC_ATTR_R(npiv_enable, 0, 0, 1, "Enable NPIV functionality");
+
/*
# lpfc_nodev_tmo: If set, it will hold all I/O errors on devices that disappear
# until the timer expires. Value range is [0,255]. Default value is 30.
@@ -790,8 +1017,9 @@ MODULE_PARM_DESC(lpfc_nodev_tmo,
static ssize_t
lpfc_nodev_tmo_show(struct class_device *cdev, char *buf)
{
- struct Scsi_Host *host = class_to_shost(cdev);
- struct lpfc_hba *phba = (struct lpfc_hba*)host->hostdata;
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+ struct lpfc_hba *phba = vport->phba;
int val = 0;
val = phba->cfg_devloss_tmo;
return snprintf(buf, PAGE_SIZE, "%d\n",
@@ -832,13 +1060,19 @@ lpfc_nodev_tmo_init(struct lpfc_hba *phba, int val)
static void
lpfc_update_rport_devloss_tmo(struct lpfc_hba *phba)
{
+ struct lpfc_vport *vport;
+ struct Scsi_Host *shost;
struct lpfc_nodelist *ndlp;
- spin_lock_irq(phba->host->host_lock);
- list_for_each_entry(ndlp, &phba->fc_nodes, nlp_listp)
- if (ndlp->rport)
- ndlp->rport->dev_loss_tmo = phba->cfg_devloss_tmo;
- spin_unlock_irq(phba->host->host_lock);
+ list_for_each_entry(vport, &phba->port_list, listentry) {
+ shost = lpfc_shost_from_vport(vport);
+ spin_lock_irq(shost->host_lock);
+ list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp)
+ if (ndlp->rport)
+ ndlp->rport->dev_loss_tmo =
+ phba->cfg_devloss_tmo;
+ spin_unlock_irq(shost->host_lock);
+ }
}
static int
@@ -946,6 +1180,33 @@ LPFC_ATTR_R(hba_queue_depth, 8192, 32, 8192,
"Max number of FCP commands we can queue to a lpfc HBA");
/*
+# peer_port_login: This parameter allows/prevents logins
+# between peer ports hosted on the same physical port.
+# When this parameter is set 0 peer ports of same physical port
+# are not allowed to login to each other.
+# When this parameter is set 1 peer ports of same physical port
+# are allowed to login to each other.
+# Default value of this parameter is 0.
+*/
+LPFC_ATTR_R(peer_port_login, 0, 0, 1,
+ "Allow peer ports on the same physical port to login to each "
+ "other.");
+
+/*
+# vport_restrict_login: This parameter allows/prevents logins
+# between Virtual Ports and remote initiators.
+# When this parameter is not set (0) Virtual Ports will accept PLOGIs from
+# other initiators and will attempt to PLOGI all remote ports.
+# When this parameter is set (1) Virtual Ports will reject PLOGIs from
+# remote ports and will not attempt to PLOGI to other initiators.
+# This parameter does not restrict to the physical port.
+# This parameter does not restrict logins to Fabric resident remote ports.
+# Default value of this parameter is 1.
+*/
+LPFC_ATTR_RW(vport_restrict_login, 1, 0, 1,
+ "Restrict virtual ports login to remote initiators.");
+
+/*
# Some disk devices have a "select ID" or "select Target" capability.
# From a protocol standpoint "select ID" usually means select the
# Fibre channel "ALPA". In the FC-AL Profile there is an "informative
@@ -1088,7 +1349,8 @@ LPFC_ATTR_RW(poll_tmo, 10, 1, 255,
LPFC_ATTR_R(use_msi, 0, 0, 1, "Use Message Signaled Interrupts, if possible");
-struct class_device_attribute *lpfc_host_attrs[] = {
+
+struct class_device_attribute *lpfc_hba_attrs[] = {
&class_device_attr_info,
&class_device_attr_serialnum,
&class_device_attr_modeldesc,
@@ -1104,6 +1366,8 @@ struct class_device_attribute *lpfc_host_attrs[] = {
&class_device_attr_lpfc_log_verbose,
&class_device_attr_lpfc_lun_queue_depth,
&class_device_attr_lpfc_hba_queue_depth,
+ &class_device_attr_lpfc_peer_port_login,
+ &class_device_attr_lpfc_vport_restrict_login,
&class_device_attr_lpfc_nodev_tmo,
&class_device_attr_lpfc_devloss_tmo,
&class_device_attr_lpfc_fcp_class,
@@ -1119,9 +1383,17 @@ struct class_device_attribute *lpfc_host_attrs[] = {
&class_device_attr_lpfc_multi_ring_type,
&class_device_attr_lpfc_fdmi_on,
&class_device_attr_lpfc_max_luns,
+ &class_device_attr_lpfc_npiv_enable,
&class_device_attr_nport_evt_cnt,
&class_device_attr_management_version,
&class_device_attr_board_mode,
+ &class_device_attr_max_vpi,
+ &class_device_attr_used_vpi,
+ &class_device_attr_max_rpi,
+ &class_device_attr_used_rpi,
+ &class_device_attr_max_xri,
+ &class_device_attr_used_xri,
+ &class_device_attr_npiv_info,
&class_device_attr_issue_reset,
&class_device_attr_lpfc_poll,
&class_device_attr_lpfc_poll_tmo,
@@ -1137,9 +1409,11 @@ sysfs_ctlreg_write(struct kobject *kobj, struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
{
size_t buf_off;
- struct Scsi_Host *host = class_to_shost(container_of(kobj,
- struct class_device, kobj));
- struct lpfc_hba *phba = (struct lpfc_hba*)host->hostdata;
+ struct class_device *cdev = container_of(kobj, struct class_device,
+ kobj);
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+ struct lpfc_hba *phba = vport->phba;
if ((off + count) > FF_REG_AREA_SIZE)
return -ERANGE;
@@ -1149,18 +1423,16 @@ sysfs_ctlreg_write(struct kobject *kobj, struct bin_attribute *bin_attr,
if (off % 4 || count % 4 || (unsigned long)buf % 4)
return -EINVAL;
- spin_lock_irq(phba->host->host_lock);
-
- if (!(phba->fc_flag & FC_OFFLINE_MODE)) {
- spin_unlock_irq(phba->host->host_lock);
+ if (!(vport->fc_flag & FC_OFFLINE_MODE)) {
return -EPERM;
}
+ spin_lock_irq(&phba->hbalock);
for (buf_off = 0; buf_off < count; buf_off += sizeof(uint32_t))
writel(*((uint32_t *)(buf + buf_off)),
phba->ctrl_regs_memmap_p + off + buf_off);
- spin_unlock_irq(phba->host->host_lock);
+ spin_unlock_irq(&phba->hbalock);
return count;
}
@@ -1171,9 +1443,11 @@ sysfs_ctlreg_read(struct kobject *kobj, struct bin_attribute *bin_attr,
{
size_t buf_off;
uint32_t * tmp_ptr;
- struct Scsi_Host *host = class_to_shost(container_of(kobj,
- struct class_device, kobj));
- struct lpfc_hba *phba = (struct lpfc_hba*)host->hostdata;
+ struct class_device *cdev = container_of(kobj, struct class_device,
+ kobj);
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+ struct lpfc_hba *phba = vport->phba;
if (off > FF_REG_AREA_SIZE)
return -ERANGE;
@@ -1186,14 +1460,14 @@ sysfs_ctlreg_read(struct kobject *kobj, struct bin_attribute *bin_attr,
if (off % 4 || count % 4 || (unsigned long)buf % 4)
return -EINVAL;
- spin_lock_irq(phba->host->host_lock);
+ spin_lock_irq(&phba->hbalock);
for (buf_off = 0; buf_off < count; buf_off += sizeof(uint32_t)) {
tmp_ptr = (uint32_t *)(buf + buf_off);
*tmp_ptr = readl(phba->ctrl_regs_memmap_p + off + buf_off);
}
- spin_unlock_irq(phba->host->host_lock);
+ spin_unlock_irq(&phba->hbalock);
return count;
}
@@ -1210,7 +1484,7 @@ static struct bin_attribute sysfs_ctlreg_attr = {
static void
-sysfs_mbox_idle (struct lpfc_hba * phba)
+sysfs_mbox_idle(struct lpfc_hba *phba)
{
phba->sysfs_mbox.state = SMBOX_IDLE;
phba->sysfs_mbox.offset = 0;
@@ -1226,10 +1500,12 @@ static ssize_t
sysfs_mbox_write(struct kobject *kobj, struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
{
- struct Scsi_Host * host =
- class_to_shost(container_of(kobj, struct class_device, kobj));
- struct lpfc_hba * phba = (struct lpfc_hba*)host->hostdata;
- struct lpfcMboxq * mbox = NULL;
+ struct class_device *cdev = container_of(kobj, struct class_device,
+ kobj);
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+ struct lpfc_hba *phba = vport->phba;
+ struct lpfcMboxq *mbox = NULL;
if ((count + off) > MAILBOX_CMD_SIZE)
return -ERANGE;
@@ -1247,7 +1523,7 @@ sysfs_mbox_write(struct kobject *kobj, struct bin_attribute *bin_attr,
memset(mbox, 0, sizeof (LPFC_MBOXQ_t));
}
- spin_lock_irq(host->host_lock);
+ spin_lock_irq(&phba->hbalock);
if (off == 0) {
if (phba->sysfs_mbox.mbox)
@@ -1258,9 +1534,9 @@ sysfs_mbox_write(struct kobject *kobj, struct bin_attribute *bin_attr,
} else {
if (phba->sysfs_mbox.state != SMBOX_WRITING ||
phba->sysfs_mbox.offset != off ||
- phba->sysfs_mbox.mbox == NULL ) {
+ phba->sysfs_mbox.mbox == NULL) {
sysfs_mbox_idle(phba);
- spin_unlock_irq(host->host_lock);
+ spin_unlock_irq(&phba->hbalock);
return -EAGAIN;
}
}
@@ -1270,7 +1546,7 @@ sysfs_mbox_write(struct kobject *kobj, struct bin_attribute *bin_attr,
phba->sysfs_mbox.offset = off + count;
- spin_unlock_irq(host->host_lock);
+ spin_unlock_irq(&phba->hbalock);
return count;
}
@@ -1279,10 +1555,11 @@ static ssize_t
sysfs_mbox_read(struct kobject *kobj, struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
{
- struct Scsi_Host *host =
- class_to_shost(container_of(kobj, struct class_device,
- kobj));
- struct lpfc_hba *phba = (struct lpfc_hba*)host->hostdata;
+ struct class_device *cdev = container_of(kobj, struct class_device,
+ kobj);
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+ struct lpfc_hba *phba = vport->phba;
int rc;
if (off > MAILBOX_CMD_SIZE)
@@ -1297,7 +1574,7 @@ sysfs_mbox_read(struct kobject *kobj, struct bin_attribute *bin_attr,
if (off && count == 0)
return 0;
- spin_lock_irq(phba->host->host_lock);
+ spin_lock_irq(&phba->hbalock);
if (off == 0 &&
phba->sysfs_mbox.state == SMBOX_WRITING &&
@@ -1320,12 +1597,12 @@ sysfs_mbox_read(struct kobject *kobj, struct bin_attribute *bin_attr,
case MBX_SET_MASK:
case MBX_SET_SLIM:
case MBX_SET_DEBUG:
- if (!(phba->fc_flag & FC_OFFLINE_MODE)) {
+ if (!(vport->fc_flag & FC_OFFLINE_MODE)) {
printk(KERN_WARNING "mbox_read:Command 0x%x "
"is illegal in on-line state\n",
phba->sysfs_mbox.mbox->mb.mbxCommand);
sysfs_mbox_idle(phba);
- spin_unlock_irq(phba->host->host_lock);
+ spin_unlock_irq(&phba->hbalock);
return -EPERM;
}
case MBX_LOAD_SM:
@@ -1355,48 +1632,48 @@ sysfs_mbox_read(struct kobject *kobj, struct bin_attribute *bin_attr,
printk(KERN_WARNING "mbox_read: Illegal Command 0x%x\n",
phba->sysfs_mbox.mbox->mb.mbxCommand);
sysfs_mbox_idle(phba);
- spin_unlock_irq(phba->host->host_lock);
+ spin_unlock_irq(&phba->hbalock);
return -EPERM;
default:
printk(KERN_WARNING "mbox_read: Unknown Command 0x%x\n",
phba->sysfs_mbox.mbox->mb.mbxCommand);
sysfs_mbox_idle(phba);
- spin_unlock_irq(phba->host->host_lock);
+ spin_unlock_irq(&phba->hbalock);
return -EPERM;
}
- if (phba->fc_flag & FC_BLOCK_MGMT_IO) {
+ phba->sysfs_mbox.mbox->vport = vport;
+
+ if (phba->sli.sli_flag & LPFC_BLOCK_MGMT_IO) {
sysfs_mbox_idle(phba);
- spin_unlock_irq(host->host_lock);
+ spin_unlock_irq(&phba->hbalock);
return -EAGAIN;
}
- if ((phba->fc_flag & FC_OFFLINE_MODE) ||
+ if ((vport->fc_flag & FC_OFFLINE_MODE) ||
(!(phba->sli.sli_flag & LPFC_SLI2_ACTIVE))){
- spin_unlock_irq(phba->host->host_lock);
+ spin_unlock_irq(&phba->hbalock);
rc = lpfc_sli_issue_mbox (phba,
phba->sysfs_mbox.mbox,
MBX_POLL);
- spin_lock_irq(phba->host->host_lock);
+ spin_lock_irq(&phba->hbalock);
} else {
- spin_unlock_irq(phba->host->host_lock);
+ spin_unlock_irq(&phba->hbalock);
rc = lpfc_sli_issue_mbox_wait (phba,
phba->sysfs_mbox.mbox,
lpfc_mbox_tmo_val(phba,
phba->sysfs_mbox.mbox->mb.mbxCommand) * HZ);
- spin_lock_irq(phba->host->host_lock);
+ spin_lock_irq(&phba->hbalock);
}
if (rc != MBX_SUCCESS) {
if (rc == MBX_TIMEOUT) {
- phba->sysfs_mbox.mbox->mbox_cmpl =
- lpfc_sli_def_mbox_cmpl;
phba->sysfs_mbox.mbox = NULL;
}
sysfs_mbox_idle(phba);
- spin_unlock_irq(host->host_lock);
+ spin_unlock_irq(&phba->hbalock);
return (rc == MBX_TIMEOUT) ? -ETIME : -ENODEV;
}
phba->sysfs_mbox.state = SMBOX_READING;
@@ -1405,7 +1682,7 @@ sysfs_mbox_read(struct kobject *kobj, struct bin_attribute *bin_attr,
phba->sysfs_mbox.state != SMBOX_READING) {
printk(KERN_WARNING "mbox_read: Bad State\n");
sysfs_mbox_idle(phba);
- spin_unlock_irq(host->host_lock);
+ spin_unlock_irq(&phba->hbalock);
return -EAGAIN;
}
@@ -1416,7 +1693,7 @@ sysfs_mbox_read(struct kobject *kobj, struct bin_attribute *bin_attr,
if (phba->sysfs_mbox.offset == MAILBOX_CMD_SIZE)
sysfs_mbox_idle(phba);
- spin_unlock_irq(phba->host->host_lock);
+ spin_unlock_irq(&phba->hbalock);
return count;
}
@@ -1432,35 +1709,35 @@ static struct bin_attribute sysfs_mbox_attr = {
};
int
-lpfc_alloc_sysfs_attr(struct lpfc_hba *phba)
+lpfc_alloc_sysfs_attr(struct lpfc_vport *vport)
{
- struct Scsi_Host *host = phba->host;
+ struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
int error;
- error = sysfs_create_bin_file(&host->shost_classdev.kobj,
- &sysfs_ctlreg_attr);
+ error = sysfs_create_bin_file(&shost->shost_classdev.kobj,
+ &sysfs_ctlreg_attr);
if (error)
goto out;
- error = sysfs_create_bin_file(&host->shost_classdev.kobj,
- &sysfs_mbox_attr);
+ error = sysfs_create_bin_file(&shost->shost_classdev.kobj,
+ &sysfs_mbox_attr);
if (error)
goto out_remove_ctlreg_attr;
return 0;
out_remove_ctlreg_attr:
- sysfs_remove_bin_file(&host->shost_classdev.kobj, &sysfs_ctlreg_attr);
+ sysfs_remove_bin_file(&shost->shost_classdev.kobj, &sysfs_ctlreg_attr);
out:
return error;
}
void
-lpfc_free_sysfs_attr(struct lpfc_hba *phba)
+lpfc_free_sysfs_attr(struct lpfc_vport *vport)
{
- struct Scsi_Host *host = phba->host;
+ struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
- sysfs_remove_bin_file(&host->shost_classdev.kobj, &sysfs_mbox_attr);
- sysfs_remove_bin_file(&host->shost_classdev.kobj, &sysfs_ctlreg_attr);
+ sysfs_remove_bin_file(&shost->shost_classdev.kobj, &sysfs_mbox_attr);
+ sysfs_remove_bin_file(&shost->shost_classdev.kobj, &sysfs_ctlreg_attr);
}
@@ -1471,26 +1748,30 @@ lpfc_free_sysfs_attr(struct lpfc_hba *phba)
static void
lpfc_get_host_port_id(struct Scsi_Host *shost)
{
- struct lpfc_hba *phba = (struct lpfc_hba*)shost->hostdata;
+ struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+
/* note: fc_myDID already in cpu endianness */
- fc_host_port_id(shost) = phba->fc_myDID;
+ fc_host_port_id(shost) = vport->fc_myDID;
}
static void
lpfc_get_host_port_type(struct Scsi_Host *shost)
{
- struct lpfc_hba *phba = (struct lpfc_hba*)shost->hostdata;
+ struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+ struct lpfc_hba *phba = vport->phba;
spin_lock_irq(shost->host_lock);
- if (phba->hba_state == LPFC_HBA_READY) {
+ if (vport->port_type == LPFC_NPIV_PORT) {
+ fc_host_port_type(shost) = FC_PORTTYPE_NPIV;
+ } else if (lpfc_is_link_up(phba)) {
if (phba->fc_topology == TOPOLOGY_LOOP) {
- if (phba->fc_flag & FC_PUBLIC_LOOP)
+ if (vport->fc_flag & FC_PUBLIC_LOOP)
fc_host_port_type(shost) = FC_PORTTYPE_NLPORT;
else
fc_host_port_type(shost) = FC_PORTTYPE_LPORT;
} else {
- if (phba->fc_flag & FC_FABRIC)
+ if (vport->fc_flag & FC_FABRIC)
fc_host_port_type(shost) = FC_PORTTYPE_NPORT;
else
fc_host_port_type(shost) = FC_PORTTYPE_PTP;
@@ -1504,29 +1785,20 @@ lpfc_get_host_port_type(struct Scsi_Host *shost)
static void
lpfc_get_host_port_state(struct Scsi_Host *shost)
{
- struct lpfc_hba *phba = (struct lpfc_hba*)shost->hostdata;
+ struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+ struct lpfc_hba *phba = vport->phba;
spin_lock_irq(shost->host_lock);
- if (phba->fc_flag & FC_OFFLINE_MODE)
+ if (vport->fc_flag & FC_OFFLINE_MODE)
fc_host_port_state(shost) = FC_PORTSTATE_OFFLINE;
else {
- switch (phba->hba_state) {
- case LPFC_STATE_UNKNOWN:
- case LPFC_WARM_START:
- case LPFC_INIT_START:
- case LPFC_INIT_MBX_CMDS:
+ switch (phba->link_state) {
+ case LPFC_LINK_UNKNOWN:
case LPFC_LINK_DOWN:
fc_host_port_state(shost) = FC_PORTSTATE_LINKDOWN;
break;
case LPFC_LINK_UP:
- case LPFC_LOCAL_CFG_LINK:
- case LPFC_FLOGI:
- case LPFC_FABRIC_CFG_LINK:
- case LPFC_NS_REG:
- case LPFC_NS_QRY:
- case LPFC_BUILD_DISC_LIST:
- case LPFC_DISC_AUTH:
case LPFC_CLEAR_LA:
case LPFC_HBA_READY:
/* Links up, beyond this port_type reports state */
@@ -1547,11 +1819,12 @@ lpfc_get_host_port_state(struct Scsi_Host *shost)
static void
lpfc_get_host_speed(struct Scsi_Host *shost)
{
- struct lpfc_hba *phba = (struct lpfc_hba*)shost->hostdata;
+ struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+ struct lpfc_hba *phba = vport->phba;
spin_lock_irq(shost->host_lock);
- if (phba->hba_state == LPFC_HBA_READY) {
+ if (lpfc_is_link_up(phba)) {
switch(phba->fc_linkspeed) {
case LA_1GHZ_LINK:
fc_host_speed(shost) = FC_PORTSPEED_1GBIT;
@@ -1577,39 +1850,31 @@ lpfc_get_host_speed(struct Scsi_Host *shost)
static void
lpfc_get_host_fabric_name (struct Scsi_Host *shost)
{
- struct lpfc_hba *phba = (struct lpfc_hba*)shost->hostdata;
+ struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+ struct lpfc_hba *phba = vport->phba;
u64 node_name;
spin_lock_irq(shost->host_lock);
- if ((phba->fc_flag & FC_FABRIC) ||
+ if ((vport->fc_flag & FC_FABRIC) ||
((phba->fc_topology == TOPOLOGY_LOOP) &&
- (phba->fc_flag & FC_PUBLIC_LOOP)))
+ (vport->fc_flag & FC_PUBLIC_LOOP)))
node_name = wwn_to_u64(phba->fc_fabparam.nodeName.u.wwn);
else
/* fabric is local port if there is no F/FL_Port */
- node_name = wwn_to_u64(phba->fc_nodename.u.wwn);
+ node_name = wwn_to_u64(vport->fc_nodename.u.wwn);
spin_unlock_irq(shost->host_lock);
fc_host_fabric_name(shost) = node_name;
}
-static void
-lpfc_get_host_symbolic_name (struct Scsi_Host *shost)
-{
- struct lpfc_hba *phba = (struct lpfc_hba*)shost->hostdata;
-
- spin_lock_irq(shost->host_lock);
- lpfc_get_hba_sym_node_name(phba, fc_host_symbolic_name(shost));
- spin_unlock_irq(shost->host_lock);
-}
-
static struct fc_host_statistics *
lpfc_get_stats(struct Scsi_Host *shost)
{
- struct lpfc_hba *phba = (struct lpfc_hba *)shost->hostdata;
- struct lpfc_sli *psli = &phba->sli;
+ struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+ struct lpfc_hba *phba = vport->phba;
+ struct lpfc_sli *psli = &phba->sli;
struct fc_host_statistics *hs = &phba->link_stats;
struct lpfc_lnk_stat * lso = &psli->lnk_stat_offsets;
LPFC_MBOXQ_t *pmboxq;
@@ -1617,7 +1882,16 @@ lpfc_get_stats(struct Scsi_Host *shost)
unsigned long seconds;
int rc = 0;
- if (phba->fc_flag & FC_BLOCK_MGMT_IO)
+ /*
+ * prevent udev from issuing mailbox commands until the port is
+ * configured.
+ */
+ if (phba->link_state < LPFC_LINK_DOWN ||
+ !phba->mbox_mem_pool ||
+ (phba->sli.sli_flag & LPFC_SLI2_ACTIVE) == 0)
+ return NULL;
+
+ if (phba->sli.sli_flag & LPFC_BLOCK_MGMT_IO)
return NULL;
pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
@@ -1629,17 +1903,16 @@ lpfc_get_stats(struct Scsi_Host *shost)
pmb->mbxCommand = MBX_READ_STATUS;
pmb->mbxOwner = OWN_HOST;
pmboxq->context1 = NULL;
+ pmboxq->vport = vport;
- if ((phba->fc_flag & FC_OFFLINE_MODE) ||
+ if ((vport->fc_flag & FC_OFFLINE_MODE) ||
(!(psli->sli_flag & LPFC_SLI2_ACTIVE)))
rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL);
else
rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2);
if (rc != MBX_SUCCESS) {
- if (rc == MBX_TIMEOUT)
- pmboxq->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
- else
+ if (rc != MBX_TIMEOUT)
mempool_free(pmboxq, phba->mbox_mem_pool);
return NULL;
}
@@ -1655,18 +1928,17 @@ lpfc_get_stats(struct Scsi_Host *shost)
pmb->mbxCommand = MBX_READ_LNK_STAT;
pmb->mbxOwner = OWN_HOST;
pmboxq->context1 = NULL;
+ pmboxq->vport = vport;
- if ((phba->fc_flag & FC_OFFLINE_MODE) ||
+ if ((vport->fc_flag & FC_OFFLINE_MODE) ||
(!(psli->sli_flag & LPFC_SLI2_ACTIVE)))
rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL);
else
rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2);
if (rc != MBX_SUCCESS) {
- if (rc == MBX_TIMEOUT)
- pmboxq->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
- else
- mempool_free( pmboxq, phba->mbox_mem_pool);
+ if (rc != MBX_TIMEOUT)
+ mempool_free(pmboxq, phba->mbox_mem_pool);
return NULL;
}
@@ -1713,14 +1985,15 @@ lpfc_get_stats(struct Scsi_Host *shost)
static void
lpfc_reset_stats(struct Scsi_Host *shost)
{
- struct lpfc_hba *phba = (struct lpfc_hba *)shost->hostdata;
- struct lpfc_sli *psli = &phba->sli;
- struct lpfc_lnk_stat * lso = &psli->lnk_stat_offsets;
+ struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+ struct lpfc_hba *phba = vport->phba;
+ struct lpfc_sli *psli = &phba->sli;
+ struct lpfc_lnk_stat *lso = &psli->lnk_stat_offsets;
LPFC_MBOXQ_t *pmboxq;
MAILBOX_t *pmb;
int rc = 0;
- if (phba->fc_flag & FC_BLOCK_MGMT_IO)
+ if (phba->sli.sli_flag & LPFC_BLOCK_MGMT_IO)
return;
pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
@@ -1733,17 +2006,16 @@ lpfc_reset_stats(struct Scsi_Host *shost)
pmb->mbxOwner = OWN_HOST;
pmb->un.varWords[0] = 0x1; /* reset request */
pmboxq->context1 = NULL;
+ pmboxq->vport = vport;
- if ((phba->fc_flag & FC_OFFLINE_MODE) ||
+ if ((vport->fc_flag & FC_OFFLINE_MODE) ||
(!(psli->sli_flag & LPFC_SLI2_ACTIVE)))
rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL);
else
rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2);
if (rc != MBX_SUCCESS) {
- if (rc == MBX_TIMEOUT)
- pmboxq->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
- else
+ if (rc != MBX_TIMEOUT)
mempool_free(pmboxq, phba->mbox_mem_pool);
return;
}
@@ -1752,17 +2024,16 @@ lpfc_reset_stats(struct Scsi_Host *shost)
pmb->mbxCommand = MBX_READ_LNK_STAT;
pmb->mbxOwner = OWN_HOST;
pmboxq->context1 = NULL;
+ pmboxq->vport = vport;
- if ((phba->fc_flag & FC_OFFLINE_MODE) ||
+ if ((vport->fc_flag & FC_OFFLINE_MODE) ||
(!(psli->sli_flag & LPFC_SLI2_ACTIVE)))
rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL);
else
rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2);
if (rc != MBX_SUCCESS) {
- if (rc == MBX_TIMEOUT)
- pmboxq->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
- else
+ if (rc != MBX_TIMEOUT)
mempool_free( pmboxq, phba->mbox_mem_pool);
return;
}
@@ -1791,13 +2062,13 @@ lpfc_reset_stats(struct Scsi_Host *shost)
static struct lpfc_nodelist *
lpfc_get_node_by_target(struct scsi_target *starget)
{
- struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
- struct lpfc_hba *phba = (struct lpfc_hba *) shost->hostdata;
+ struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
+ struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
struct lpfc_nodelist *ndlp;
spin_lock_irq(shost->host_lock);
/* Search for this, mapped, target ID */
- list_for_each_entry(ndlp, &phba->fc_nodes, nlp_listp) {
+ list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
if (ndlp->nlp_state == NLP_STE_MAPPED_NODE &&
starget->id == ndlp->nlp_sid) {
spin_unlock_irq(shost->host_lock);
@@ -1887,8 +2158,66 @@ struct fc_function_template lpfc_transport_functions = {
.get_host_fabric_name = lpfc_get_host_fabric_name,
.show_host_fabric_name = 1,
- .get_host_symbolic_name = lpfc_get_host_symbolic_name,
- .show_host_symbolic_name = 1,
+ /*
+ * The LPFC driver treats linkdown handling as target loss events
+ * so there are no sysfs handlers for link_down_tmo.
+ */
+
+ .get_fc_host_stats = lpfc_get_stats,
+ .reset_fc_host_stats = lpfc_reset_stats,
+
+ .dd_fcrport_size = sizeof(struct lpfc_rport_data),
+ .show_rport_maxframe_size = 1,
+ .show_rport_supported_classes = 1,
+
+ .set_rport_dev_loss_tmo = lpfc_set_rport_loss_tmo,
+ .show_rport_dev_loss_tmo = 1,
+
+ .get_starget_port_id = lpfc_get_starget_port_id,
+ .show_starget_port_id = 1,
+
+ .get_starget_node_name = lpfc_get_starget_node_name,
+ .show_starget_node_name = 1,
+
+ .get_starget_port_name = lpfc_get_starget_port_name,
+ .show_starget_port_name = 1,
+
+ .issue_fc_host_lip = lpfc_issue_lip,
+ .dev_loss_tmo_callbk = lpfc_dev_loss_tmo_callbk,
+ .terminate_rport_io = lpfc_terminate_rport_io,
+
+ .vport_create = lpfc_vport_create,
+ .vport_delete = lpfc_vport_delete,
+ .dd_fcvport_size = sizeof(struct lpfc_vport *),
+};
+
+struct fc_function_template lpfc_vport_transport_functions = {
+ /* fixed attributes the driver supports */
+ .show_host_node_name = 1,
+ .show_host_port_name = 1,
+ .show_host_supported_classes = 1,
+ .show_host_supported_fc4s = 1,
+ .show_host_supported_speeds = 1,
+ .show_host_maxframe_size = 1,
+
+ /* dynamic attributes the driver supports */
+ .get_host_port_id = lpfc_get_host_port_id,
+ .show_host_port_id = 1,
+
+ .get_host_port_type = lpfc_get_host_port_type,
+ .show_host_port_type = 1,
+
+ .get_host_port_state = lpfc_get_host_port_state,
+ .show_host_port_state = 1,
+
+ /* active_fc4s is shown but doesn't change (thus no get function) */
+ .show_host_active_fc4s = 1,
+
+ .get_host_speed = lpfc_get_host_speed,
+ .show_host_speed = 1,
+
+ .get_host_fabric_name = lpfc_get_host_fabric_name,
+ .show_host_fabric_name = 1,
/*
* The LPFC driver treats linkdown handling as target loss events
@@ -1917,6 +2246,8 @@ struct fc_function_template lpfc_transport_functions = {
.issue_fc_host_lip = lpfc_issue_lip,
.dev_loss_tmo_callbk = lpfc_dev_loss_tmo_callbk,
.terminate_rport_io = lpfc_terminate_rport_io,
+
+ .vport_disable = lpfc_vport_disable,
};
void
@@ -1939,6 +2270,9 @@ lpfc_get_cfgparam(struct lpfc_hba *phba)
lpfc_discovery_threads_init(phba, lpfc_discovery_threads);
lpfc_max_luns_init(phba, lpfc_max_luns);
lpfc_poll_tmo_init(phba, lpfc_poll_tmo);
+ lpfc_peer_port_login_init(phba, lpfc_peer_port_login);
+ lpfc_npiv_enable_init(phba, lpfc_npiv_enable);
+ lpfc_vport_restrict_login_init(phba, lpfc_vport_restrict_login);
lpfc_use_msi_init(phba, lpfc_use_msi);
lpfc_devloss_tmo_init(phba, lpfc_devloss_tmo);
lpfc_nodev_tmo_init(phba, lpfc_nodev_tmo);
diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h
index b8c2a8862d8..e19d1a74658 100644
--- a/drivers/scsi/lpfc/lpfc_crtn.h
+++ b/drivers/scsi/lpfc/lpfc_crtn.h
@@ -23,92 +23,114 @@ typedef int (*node_filter)(struct lpfc_nodelist *ndlp, void *param);
struct fc_rport;
void lpfc_dump_mem(struct lpfc_hba *, LPFC_MBOXQ_t *, uint16_t);
void lpfc_read_nv(struct lpfc_hba *, LPFC_MBOXQ_t *);
+void lpfc_heart_beat(struct lpfc_hba *, LPFC_MBOXQ_t *);
int lpfc_read_la(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb,
struct lpfc_dmabuf *mp);
void lpfc_clear_la(struct lpfc_hba *, LPFC_MBOXQ_t *);
+void lpfc_issue_clear_la(struct lpfc_hba *phba, struct lpfc_vport *vport);
void lpfc_config_link(struct lpfc_hba *, LPFC_MBOXQ_t *);
-int lpfc_read_sparam(struct lpfc_hba *, LPFC_MBOXQ_t *);
+int lpfc_read_sparam(struct lpfc_hba *, LPFC_MBOXQ_t *, int);
void lpfc_read_config(struct lpfc_hba *, LPFC_MBOXQ_t *);
void lpfc_read_lnk_stat(struct lpfc_hba *, LPFC_MBOXQ_t *);
-int lpfc_reg_login(struct lpfc_hba *, uint32_t, uint8_t *, LPFC_MBOXQ_t *,
- uint32_t);
-void lpfc_unreg_login(struct lpfc_hba *, uint32_t, LPFC_MBOXQ_t *);
-void lpfc_unreg_did(struct lpfc_hba *, uint32_t, LPFC_MBOXQ_t *);
+int lpfc_reg_login(struct lpfc_hba *, uint16_t, uint32_t, uint8_t *,
+ LPFC_MBOXQ_t *, uint32_t);
+void lpfc_unreg_login(struct lpfc_hba *, uint16_t, uint32_t, LPFC_MBOXQ_t *);
+void lpfc_unreg_did(struct lpfc_hba *, uint16_t, uint32_t, LPFC_MBOXQ_t *);
+void lpfc_reg_vpi(struct lpfc_hba *, uint16_t, uint32_t, LPFC_MBOXQ_t *);
+void lpfc_unreg_vpi(struct lpfc_hba *, uint16_t, LPFC_MBOXQ_t *);
void lpfc_init_link(struct lpfc_hba *, LPFC_MBOXQ_t *, uint32_t, uint32_t);
-
+void lpfc_cleanup_rpis(struct lpfc_vport *vport, int remove);
int lpfc_linkdown(struct lpfc_hba *);
void lpfc_mbx_cmpl_read_la(struct lpfc_hba *, LPFC_MBOXQ_t *);
void lpfc_mbx_cmpl_clear_la(struct lpfc_hba *, LPFC_MBOXQ_t *);
void lpfc_mbx_cmpl_reg_login(struct lpfc_hba *, LPFC_MBOXQ_t *);
+void lpfc_mbx_cmpl_dflt_rpi(struct lpfc_hba *, LPFC_MBOXQ_t *);
void lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba *, LPFC_MBOXQ_t *);
void lpfc_mbx_cmpl_ns_reg_login(struct lpfc_hba *, LPFC_MBOXQ_t *);
void lpfc_mbx_cmpl_fdmi_reg_login(struct lpfc_hba *, LPFC_MBOXQ_t *);
-void lpfc_dequeue_node(struct lpfc_hba *, struct lpfc_nodelist *);
-void lpfc_nlp_set_state(struct lpfc_hba *, struct lpfc_nodelist *, int);
-void lpfc_drop_node(struct lpfc_hba *, struct lpfc_nodelist *);
-void lpfc_set_disctmo(struct lpfc_hba *);
-int lpfc_can_disctmo(struct lpfc_hba *);
-int lpfc_unreg_rpi(struct lpfc_hba *, struct lpfc_nodelist *);
+void lpfc_dequeue_node(struct lpfc_vport *, struct lpfc_nodelist *);
+void lpfc_nlp_set_state(struct lpfc_vport *, struct lpfc_nodelist *, int);
+void lpfc_drop_node(struct lpfc_vport *, struct lpfc_nodelist *);
+void lpfc_set_disctmo(struct lpfc_vport *);
+int lpfc_can_disctmo(struct lpfc_vport *);
+int lpfc_unreg_rpi(struct lpfc_vport *, struct lpfc_nodelist *);
+void lpfc_unreg_all_rpis(struct lpfc_vport *);
+void lpfc_unreg_default_rpis(struct lpfc_vport *);
+void lpfc_issue_reg_vpi(struct lpfc_hba *, struct lpfc_vport *);
+
int lpfc_check_sli_ndlp(struct lpfc_hba *, struct lpfc_sli_ring *,
- struct lpfc_iocbq *, struct lpfc_nodelist *);
-void lpfc_nlp_init(struct lpfc_hba *, struct lpfc_nodelist *, uint32_t);
+ struct lpfc_iocbq *, struct lpfc_nodelist *);
+void lpfc_nlp_init(struct lpfc_vport *, struct lpfc_nodelist *, uint32_t);
struct lpfc_nodelist *lpfc_nlp_get(struct lpfc_nodelist *);
int lpfc_nlp_put(struct lpfc_nodelist *);
-struct lpfc_nodelist *lpfc_setup_disc_node(struct lpfc_hba *, uint32_t);
-void lpfc_disc_list_loopmap(struct lpfc_hba *);
-void lpfc_disc_start(struct lpfc_hba *);
-void lpfc_disc_flush_list(struct lpfc_hba *);
+struct lpfc_nodelist *lpfc_setup_disc_node(struct lpfc_vport *, uint32_t);
+void lpfc_disc_list_loopmap(struct lpfc_vport *);
+void lpfc_disc_start(struct lpfc_vport *);
+void lpfc_disc_flush_list(struct lpfc_vport *);
+void lpfc_cleanup_discovery_resources(struct lpfc_vport *);
void lpfc_disc_timeout(unsigned long);
-struct lpfc_nodelist *__lpfc_findnode_rpi(struct lpfc_hba * phba, uint16_t rpi);
-struct lpfc_nodelist *lpfc_findnode_rpi(struct lpfc_hba * phba, uint16_t rpi);
+struct lpfc_nodelist *__lpfc_findnode_rpi(struct lpfc_vport *, uint16_t);
+struct lpfc_nodelist *lpfc_findnode_rpi(struct lpfc_vport *, uint16_t);
+void lpfc_worker_wake_up(struct lpfc_hba *);
int lpfc_workq_post_event(struct lpfc_hba *, void *, void *, uint32_t);
int lpfc_do_work(void *);
-int lpfc_disc_state_machine(struct lpfc_hba *, struct lpfc_nodelist *, void *,
+int lpfc_disc_state_machine(struct lpfc_vport *, struct lpfc_nodelist *, void *,
uint32_t);
-int lpfc_check_sparm(struct lpfc_hba *, struct lpfc_nodelist *,
+void lpfc_register_new_vport(struct lpfc_hba *, struct lpfc_vport *,
+ struct lpfc_nodelist *);
+void lpfc_do_scr_ns_plogi(struct lpfc_hba *, struct lpfc_vport *);
+int lpfc_check_sparm(struct lpfc_vport *, struct lpfc_nodelist *,
struct serv_parm *, uint32_t);
-int lpfc_els_abort(struct lpfc_hba *, struct lpfc_nodelist * ndlp);
+int lpfc_els_abort(struct lpfc_hba *, struct lpfc_nodelist *);
+int lpfc_els_chk_latt(struct lpfc_vport *);
int lpfc_els_abort_flogi(struct lpfc_hba *);
-int lpfc_initial_flogi(struct lpfc_hba *);
-int lpfc_issue_els_plogi(struct lpfc_hba *, uint32_t, uint8_t);
-int lpfc_issue_els_prli(struct lpfc_hba *, struct lpfc_nodelist *, uint8_t);
-int lpfc_issue_els_adisc(struct lpfc_hba *, struct lpfc_nodelist *, uint8_t);
-int lpfc_issue_els_logo(struct lpfc_hba *, struct lpfc_nodelist *, uint8_t);
-int lpfc_issue_els_scr(struct lpfc_hba *, uint32_t, uint8_t);
+int lpfc_initial_flogi(struct lpfc_vport *);
+int lpfc_initial_fdisc(struct lpfc_vport *);
+int lpfc_issue_els_fdisc(struct lpfc_vport *, struct lpfc_nodelist *, uint8_t);
+int lpfc_issue_els_plogi(struct lpfc_vport *, uint32_t, uint8_t);
+int lpfc_issue_els_prli(struct lpfc_vport *, struct lpfc_nodelist *, uint8_t);
+int lpfc_issue_els_adisc(struct lpfc_vport *, struct lpfc_nodelist *, uint8_t);
+int lpfc_issue_els_logo(struct lpfc_vport *, struct lpfc_nodelist *, uint8_t);
+int lpfc_issue_els_npiv_logo(struct lpfc_vport *, struct lpfc_nodelist *);
+int lpfc_issue_els_scr(struct lpfc_vport *, uint32_t, uint8_t);
int lpfc_els_free_iocb(struct lpfc_hba *, struct lpfc_iocbq *);
-int lpfc_els_rsp_acc(struct lpfc_hba *, uint32_t, struct lpfc_iocbq *,
+int lpfc_ct_free_iocb(struct lpfc_hba *, struct lpfc_iocbq *);
+int lpfc_els_rsp_acc(struct lpfc_vport *, uint32_t, struct lpfc_iocbq *,
struct lpfc_nodelist *, LPFC_MBOXQ_t *, uint8_t);
-int lpfc_els_rsp_reject(struct lpfc_hba *, uint32_t, struct lpfc_iocbq *,
- struct lpfc_nodelist *);
-int lpfc_els_rsp_adisc_acc(struct lpfc_hba *, struct lpfc_iocbq *,
+int lpfc_els_rsp_reject(struct lpfc_vport *, uint32_t, struct lpfc_iocbq *,
+ struct lpfc_nodelist *, LPFC_MBOXQ_t *);
+int lpfc_els_rsp_adisc_acc(struct lpfc_vport *, struct lpfc_iocbq *,
struct lpfc_nodelist *);
-int lpfc_els_rsp_prli_acc(struct lpfc_hba *, struct lpfc_iocbq *,
+int lpfc_els_rsp_prli_acc(struct lpfc_vport *, struct lpfc_iocbq *,
struct lpfc_nodelist *);
-void lpfc_cancel_retry_delay_tmo(struct lpfc_hba *, struct lpfc_nodelist *);
+void lpfc_cancel_retry_delay_tmo(struct lpfc_vport *, struct lpfc_nodelist *);
void lpfc_els_retry_delay(unsigned long);
void lpfc_els_retry_delay_handler(struct lpfc_nodelist *);
+void lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *);
void lpfc_els_unsol_event(struct lpfc_hba *, struct lpfc_sli_ring *,
struct lpfc_iocbq *);
-int lpfc_els_handle_rscn(struct lpfc_hba *);
-int lpfc_els_flush_rscn(struct lpfc_hba *);
-int lpfc_rscn_payload_check(struct lpfc_hba *, uint32_t);
-void lpfc_els_flush_cmd(struct lpfc_hba *);
-int lpfc_els_disc_adisc(struct lpfc_hba *);
-int lpfc_els_disc_plogi(struct lpfc_hba *);
+int lpfc_els_handle_rscn(struct lpfc_vport *);
+void lpfc_els_flush_rscn(struct lpfc_vport *);
+int lpfc_rscn_payload_check(struct lpfc_vport *, uint32_t);
+void lpfc_els_flush_cmd(struct lpfc_vport *);
+int lpfc_els_disc_adisc(struct lpfc_vport *);
+int lpfc_els_disc_plogi(struct lpfc_vport *);
void lpfc_els_timeout(unsigned long);
-void lpfc_els_timeout_handler(struct lpfc_hba *);
+void lpfc_els_timeout_handler(struct lpfc_vport *);
+void lpfc_hb_timeout(unsigned long);
+void lpfc_hb_timeout_handler(struct lpfc_hba *);
void lpfc_ct_unsol_event(struct lpfc_hba *, struct lpfc_sli_ring *,
struct lpfc_iocbq *);
-int lpfc_ns_cmd(struct lpfc_hba *, struct lpfc_nodelist *, int);
-int lpfc_fdmi_cmd(struct lpfc_hba *, struct lpfc_nodelist *, int);
+int lpfc_ns_cmd(struct lpfc_vport *, int, uint8_t, uint32_t);
+int lpfc_fdmi_cmd(struct lpfc_vport *, struct lpfc_nodelist *, int);
void lpfc_fdmi_tmo(unsigned long);
-void lpfc_fdmi_tmo_handler(struct lpfc_hba *);
+void lpfc_fdmi_timeout_handler(struct lpfc_vport *vport);
int lpfc_config_port_prep(struct lpfc_hba *);
int lpfc_config_port_post(struct lpfc_hba *);
@@ -136,16 +158,23 @@ void lpfc_config_port(struct lpfc_hba *, LPFC_MBOXQ_t *);
void lpfc_kill_board(struct lpfc_hba *, LPFC_MBOXQ_t *);
void lpfc_mbox_put(struct lpfc_hba *, LPFC_MBOXQ_t *);
LPFC_MBOXQ_t *lpfc_mbox_get(struct lpfc_hba *);
+void lpfc_mbox_cmpl_put(struct lpfc_hba *, LPFC_MBOXQ_t *);
int lpfc_mbox_tmo_val(struct lpfc_hba *, int);
+void lpfc_config_hbq(struct lpfc_hba *, struct lpfc_hbq_init *, uint32_t ,
+ LPFC_MBOXQ_t *);
+struct lpfc_hbq_entry * lpfc_sli_next_hbq_slot(struct lpfc_hba *, uint32_t);
+
int lpfc_mem_alloc(struct lpfc_hba *);
void lpfc_mem_free(struct lpfc_hba *);
+void lpfc_stop_vport_timers(struct lpfc_vport *);
void lpfc_poll_timeout(unsigned long ptr);
void lpfc_poll_start_timer(struct lpfc_hba * phba);
void lpfc_sli_poll_fcp_ring(struct lpfc_hba * hba);
struct lpfc_iocbq * lpfc_sli_get_iocbq(struct lpfc_hba *);
void lpfc_sli_release_iocbq(struct lpfc_hba * phba, struct lpfc_iocbq * iocb);
+void __lpfc_sli_release_iocbq(struct lpfc_hba * phba, struct lpfc_iocbq * iocb);
uint16_t lpfc_sli_next_iotag(struct lpfc_hba * phba, struct lpfc_iocbq * iocb);
void lpfc_reset_barrier(struct lpfc_hba * phba);
@@ -154,6 +183,7 @@ int lpfc_sli_brdkill(struct lpfc_hba *);
int lpfc_sli_brdreset(struct lpfc_hba *);
int lpfc_sli_brdrestart(struct lpfc_hba *);
int lpfc_sli_hba_setup(struct lpfc_hba *);
+int lpfc_sli_host_down(struct lpfc_vport *);
int lpfc_sli_hba_down(struct lpfc_hba *);
int lpfc_sli_issue_mbox(struct lpfc_hba *, LPFC_MBOXQ_t *, uint32_t);
int lpfc_sli_handle_mb_event(struct lpfc_hba *);
@@ -164,27 +194,36 @@ void lpfc_sli_def_mbox_cmpl(struct lpfc_hba *, LPFC_MBOXQ_t *);
int lpfc_sli_issue_iocb(struct lpfc_hba *, struct lpfc_sli_ring *,
struct lpfc_iocbq *, uint32_t);
void lpfc_sli_pcimem_bcopy(void *, void *, uint32_t);
-int lpfc_sli_abort_iocb_ring(struct lpfc_hba *, struct lpfc_sli_ring *);
+void lpfc_sli_abort_iocb_ring(struct lpfc_hba *, struct lpfc_sli_ring *);
int lpfc_sli_ringpostbuf_put(struct lpfc_hba *, struct lpfc_sli_ring *,
struct lpfc_dmabuf *);
struct lpfc_dmabuf *lpfc_sli_ringpostbuf_get(struct lpfc_hba *,
struct lpfc_sli_ring *,
dma_addr_t);
+int lpfc_sli_hbqbuf_init_hbqs(struct lpfc_hba *, uint32_t);
+int lpfc_sli_hbqbuf_add_hbqs(struct lpfc_hba *, uint32_t);
+void lpfc_sli_hbqbuf_free_all(struct lpfc_hba *);
+struct hbq_dmabuf *lpfc_sli_hbqbuf_find(struct lpfc_hba *, uint32_t);
+int lpfc_sli_hbq_size(void);
int lpfc_sli_issue_abort_iotag(struct lpfc_hba *, struct lpfc_sli_ring *,
struct lpfc_iocbq *);
int lpfc_sli_sum_iocb(struct lpfc_hba *, struct lpfc_sli_ring *, uint16_t,
- uint64_t, lpfc_ctx_cmd);
+ uint64_t, lpfc_ctx_cmd);
int lpfc_sli_abort_iocb(struct lpfc_hba *, struct lpfc_sli_ring *, uint16_t,
- uint64_t, uint32_t, lpfc_ctx_cmd);
+ uint64_t, uint32_t, lpfc_ctx_cmd);
void lpfc_mbox_timeout(unsigned long);
void lpfc_mbox_timeout_handler(struct lpfc_hba *);
-struct lpfc_nodelist *lpfc_findnode_did(struct lpfc_hba *, uint32_t);
-struct lpfc_nodelist *lpfc_findnode_wwpn(struct lpfc_hba *, struct lpfc_name *);
+struct lpfc_nodelist *__lpfc_find_node(struct lpfc_vport *, node_filter,
+ void *);
+struct lpfc_nodelist *lpfc_find_node(struct lpfc_vport *, node_filter, void *);
+struct lpfc_nodelist *lpfc_findnode_did(struct lpfc_vport *, uint32_t);
+struct lpfc_nodelist *lpfc_findnode_wwpn(struct lpfc_vport *,
+ struct lpfc_name *);
int lpfc_sli_issue_mbox_wait(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq,
- uint32_t timeout);
+ uint32_t timeout);
int lpfc_sli_issue_iocb_wait(struct lpfc_hba * phba,
struct lpfc_sli_ring * pring,
@@ -195,25 +234,56 @@ void lpfc_sli_abort_fcp_cmpl(struct lpfc_hba * phba,
struct lpfc_iocbq * cmdiocb,
struct lpfc_iocbq * rspiocb);
+void *lpfc_hbq_alloc(struct lpfc_hba *, int, dma_addr_t *);
+void lpfc_hbq_free(struct lpfc_hba *, void *, dma_addr_t);
+void lpfc_sli_free_hbq(struct lpfc_hba *, struct hbq_dmabuf *);
+
void *lpfc_mbuf_alloc(struct lpfc_hba *, int, dma_addr_t *);
+void __lpfc_mbuf_free(struct lpfc_hba *, void *, dma_addr_t);
void lpfc_mbuf_free(struct lpfc_hba *, void *, dma_addr_t);
+void lpfc_in_buf_free(struct lpfc_hba *, struct lpfc_dmabuf *);
/* Function prototypes. */
const char* lpfc_info(struct Scsi_Host *);
-void lpfc_scan_start(struct Scsi_Host *);
int lpfc_scan_finished(struct Scsi_Host *, unsigned long);
void lpfc_get_cfgparam(struct lpfc_hba *);
-int lpfc_alloc_sysfs_attr(struct lpfc_hba *);
-void lpfc_free_sysfs_attr(struct lpfc_hba *);
-extern struct class_device_attribute *lpfc_host_attrs[];
+int lpfc_alloc_sysfs_attr(struct lpfc_vport *);
+void lpfc_free_sysfs_attr(struct lpfc_vport *);
+extern struct class_device_attribute *lpfc_hba_attrs[];
extern struct scsi_host_template lpfc_template;
extern struct fc_function_template lpfc_transport_functions;
+extern struct fc_function_template lpfc_vport_transport_functions;
+extern int lpfc_sli_mode;
-void lpfc_get_hba_sym_node_name(struct lpfc_hba * phba, uint8_t * symbp);
+int lpfc_vport_symbolic_node_name(struct lpfc_vport *, char *, size_t);
void lpfc_terminate_rport_io(struct fc_rport *);
void lpfc_dev_loss_tmo_callbk(struct fc_rport *rport);
+struct lpfc_vport *lpfc_create_port(struct lpfc_hba *, int, struct fc_vport *);
+int lpfc_vport_disable(struct fc_vport *fc_vport, bool disable);
+void lpfc_mbx_unreg_vpi(struct lpfc_vport *);
+void destroy_port(struct lpfc_vport *);
+int lpfc_get_instance(void);
+void lpfc_host_attrib_init(struct Scsi_Host *);
+
+extern void lpfc_debugfs_initialize(struct lpfc_vport *);
+extern void lpfc_debugfs_terminate(struct lpfc_vport *);
+extern void lpfc_debugfs_disc_trc(struct lpfc_vport *, int, char *, uint32_t,
+ uint32_t, uint32_t);
+
+/* Interface exported by fabric iocb scheduler */
+int lpfc_issue_fabric_iocb(struct lpfc_hba *, struct lpfc_iocbq *);
+void lpfc_fabric_abort_vport(struct lpfc_vport *);
+void lpfc_fabric_abort_nport(struct lpfc_nodelist *);
+void lpfc_fabric_abort_hba(struct lpfc_hba *);
+void lpfc_fabric_abort_flogi(struct lpfc_hba *);
+void lpfc_fabric_block_timeout(unsigned long);
+void lpfc_unblock_fabric_iocbs(struct lpfc_hba *);
+void lpfc_adjust_queue_depth(struct lpfc_hba *);
+void lpfc_ramp_down_queue_handler(struct lpfc_hba *);
+void lpfc_ramp_up_queue_handler(struct lpfc_hba *);
+
#define ScsiResult(host_code, scsi_code) (((host_code) << 16) | scsi_code)
#define HBA_EVENT_RSCN 5
#define HBA_EVENT_LINK_UP 2
diff --git a/drivers/scsi/lpfc/lpfc_ct.c b/drivers/scsi/lpfc/lpfc_ct.c
index 34a9e3bb261..ae9d6f385a6 100644
--- a/drivers/scsi/lpfc/lpfc_ct.c
+++ b/drivers/scsi/lpfc/lpfc_ct.c
@@ -40,6 +40,8 @@
#include "lpfc_logmsg.h"
#include "lpfc_crtn.h"
#include "lpfc_version.h"
+#include "lpfc_vport.h"
+#include "lpfc_debugfs.h"
#define HBA_PORTSPEED_UNKNOWN 0 /* Unknown - transceiver
* incapable of reporting */
@@ -58,25 +60,69 @@ static char *lpfc_release_version = LPFC_DRIVER_VERSION;
/*
* lpfc_ct_unsol_event
*/
+static void
+lpfc_ct_unsol_buffer(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq,
+ struct lpfc_dmabuf *mp, uint32_t size)
+{
+ if (!mp) {
+ printk(KERN_ERR "%s (%d): Unsolited CT, no buffer, "
+ "piocbq = %p, status = x%x, mp = %p, size = %d\n",
+ __FUNCTION__, __LINE__,
+ piocbq, piocbq->iocb.ulpStatus, mp, size);
+ }
+
+ printk(KERN_ERR "%s (%d): Ignoring unsolicted CT piocbq = %p, "
+ "buffer = %p, size = %d, status = x%x\n",
+ __FUNCTION__, __LINE__,
+ piocbq, mp, size,
+ piocbq->iocb.ulpStatus);
+
+}
+
+static void
+lpfc_ct_ignore_hbq_buffer(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq,
+ struct lpfc_dmabuf *mp, uint32_t size)
+{
+ if (!mp) {
+ printk(KERN_ERR "%s (%d): Unsolited CT, no "
+ "HBQ buffer, piocbq = %p, status = x%x\n",
+ __FUNCTION__, __LINE__,
+ piocbq, piocbq->iocb.ulpStatus);
+ } else {
+ lpfc_ct_unsol_buffer(phba, piocbq, mp, size);
+ printk(KERN_ERR "%s (%d): Ignoring unsolicted CT "
+ "piocbq = %p, buffer = %p, size = %d, "
+ "status = x%x\n",
+ __FUNCTION__, __LINE__,
+ piocbq, mp, size, piocbq->iocb.ulpStatus);
+ }
+}
+
void
-lpfc_ct_unsol_event(struct lpfc_hba * phba,
- struct lpfc_sli_ring * pring, struct lpfc_iocbq * piocbq)
+lpfc_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
+ struct lpfc_iocbq *piocbq)
{
- struct lpfc_iocbq *next_piocbq;
- struct lpfc_dmabuf *pmbuf = NULL;
- struct lpfc_dmabuf *matp, *next_matp;
- uint32_t ctx = 0, size = 0, cnt = 0;
+ struct lpfc_dmabuf *mp = NULL;
IOCB_t *icmd = &piocbq->iocb;
- IOCB_t *save_icmd = icmd;
- int i, go_exit = 0;
- struct list_head head;
+ int i;
+ struct lpfc_iocbq *iocbq;
+ dma_addr_t paddr;
+ uint32_t size;
+ struct lpfc_dmabuf *bdeBuf1 = piocbq->context2;
+ struct lpfc_dmabuf *bdeBuf2 = piocbq->context3;
+
+ piocbq->context2 = NULL;
+ piocbq->context3 = NULL;
- if ((icmd->ulpStatus == IOSTAT_LOCAL_REJECT) &&
+ if (unlikely(icmd->ulpStatus == IOSTAT_NEED_BUFFER)) {
+ lpfc_sli_hbqbuf_add_hbqs(phba, LPFC_ELS_HBQ);
+ } else if ((icmd->ulpStatus == IOSTAT_LOCAL_REJECT) &&
((icmd->un.ulpWord[4] & 0xff) == IOERR_RCV_BUFFER_WAITING)) {
/* Not enough posted buffers; Try posting more buffers */
phba->fc_stat.NoRcvBuf++;
- lpfc_post_buffer(phba, pring, 0, 1);
+ if (!(phba->sli3_options & LPFC_SLI3_HBQ_ENABLED))
+ lpfc_post_buffer(phba, pring, 0, 1);
return;
}
@@ -86,66 +132,56 @@ lpfc_ct_unsol_event(struct lpfc_hba * phba,
if (icmd->ulpBdeCount == 0)
return;
- INIT_LIST_HEAD(&head);
- list_add_tail(&head, &piocbq->list);
-
- list_for_each_entry_safe(piocbq, next_piocbq, &head, list) {
- icmd = &piocbq->iocb;
- if (ctx == 0)
- ctx = (uint32_t) (icmd->ulpContext);
- if (icmd->ulpBdeCount == 0)
- continue;
-
- for (i = 0; i < icmd->ulpBdeCount; i++) {
- matp = lpfc_sli_ringpostbuf_get(phba, pring,
- getPaddr(icmd->un.
- cont64[i].
- addrHigh,
- icmd->un.
- cont64[i].
- addrLow));
- if (!matp) {
- /* Insert lpfc log message here */
- lpfc_post_buffer(phba, pring, cnt, 1);
- go_exit = 1;
- goto ct_unsol_event_exit_piocbq;
+ if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
+ list_for_each_entry(iocbq, &piocbq->list, list) {
+ icmd = &iocbq->iocb;
+ if (icmd->ulpBdeCount == 0) {
+ printk(KERN_ERR "%s (%d): Unsolited CT, no "
+ "BDE, iocbq = %p, status = x%x\n",
+ __FUNCTION__, __LINE__,
+ iocbq, iocbq->iocb.ulpStatus);
+ continue;
}
- /* Typically for Unsolicited CT requests */
- if (!pmbuf) {
- pmbuf = matp;
- INIT_LIST_HEAD(&pmbuf->list);
- } else
- list_add_tail(&matp->list, &pmbuf->list);
-
- size += icmd->un.cont64[i].tus.f.bdeSize;
- cnt++;
+ size = icmd->un.cont64[0].tus.f.bdeSize;
+ lpfc_ct_ignore_hbq_buffer(phba, piocbq, bdeBuf1, size);
+ lpfc_in_buf_free(phba, bdeBuf1);
+ if (icmd->ulpBdeCount == 2) {
+ lpfc_ct_ignore_hbq_buffer(phba, piocbq, bdeBuf2,
+ size);
+ lpfc_in_buf_free(phba, bdeBuf2);
+ }
}
+ } else {
+ struct lpfc_iocbq *next;
+
+ list_for_each_entry_safe(iocbq, next, &piocbq->list, list) {
+ icmd = &iocbq->iocb;
+ if (icmd->ulpBdeCount == 0) {
+ printk(KERN_ERR "%s (%d): Unsolited CT, no "
+ "BDE, iocbq = %p, status = x%x\n",
+ __FUNCTION__, __LINE__,
+ iocbq, iocbq->iocb.ulpStatus);
+ continue;
+ }
- icmd->ulpBdeCount = 0;
- }
-
- lpfc_post_buffer(phba, pring, cnt, 1);
- if (save_icmd->ulpStatus) {
- go_exit = 1;
- }
-
-ct_unsol_event_exit_piocbq:
- list_del(&head);
- if (pmbuf) {
- list_for_each_entry_safe(matp, next_matp, &pmbuf->list, list) {
- lpfc_mbuf_free(phba, matp->virt, matp->phys);
- list_del(&matp->list);
- kfree(matp);
+ for (i = 0; i < icmd->ulpBdeCount; i++) {
+ paddr = getPaddr(icmd->un.cont64[i].addrHigh,
+ icmd->un.cont64[i].addrLow);
+ mp = lpfc_sli_ringpostbuf_get(phba, pring,
+ paddr);
+ size = icmd->un.cont64[i].tus.f.bdeSize;
+ lpfc_ct_unsol_buffer(phba, piocbq, mp, size);
+ lpfc_in_buf_free(phba, mp);
+ }
+ list_del(&iocbq->list);
+ lpfc_sli_release_iocbq(phba, iocbq);
}
- lpfc_mbuf_free(phba, pmbuf->virt, pmbuf->phys);
- kfree(pmbuf);
}
- return;
}
static void
-lpfc_free_ct_rsp(struct lpfc_hba * phba, struct lpfc_dmabuf * mlist)
+lpfc_free_ct_rsp(struct lpfc_hba *phba, struct lpfc_dmabuf *mlist)
{
struct lpfc_dmabuf *mlast, *next_mlast;
@@ -160,7 +196,7 @@ lpfc_free_ct_rsp(struct lpfc_hba * phba, struct lpfc_dmabuf * mlist)
}
static struct lpfc_dmabuf *
-lpfc_alloc_ct_rsp(struct lpfc_hba * phba, int cmdcode, struct ulp_bde64 * bpl,
+lpfc_alloc_ct_rsp(struct lpfc_hba *phba, int cmdcode, struct ulp_bde64 *bpl,
uint32_t size, int *entries)
{
struct lpfc_dmabuf *mlist = NULL;
@@ -181,7 +217,8 @@ lpfc_alloc_ct_rsp(struct lpfc_hba * phba, int cmdcode, struct ulp_bde64 * bpl,
INIT_LIST_HEAD(&mp->list);
- if (cmdcode == be16_to_cpu(SLI_CTNS_GID_FT))
+ if (cmdcode == be16_to_cpu(SLI_CTNS_GID_FT) ||
+ cmdcode == be16_to_cpu(SLI_CTNS_GFF_ID))
mp->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &(mp->phys));
else
mp->virt = lpfc_mbuf_alloc(phba, 0, &(mp->phys));
@@ -201,8 +238,8 @@ lpfc_alloc_ct_rsp(struct lpfc_hba * phba, int cmdcode, struct ulp_bde64 * bpl,
bpl->tus.f.bdeFlags = BUFF_USE_RCV;
/* build buffer ptr list for IOCB */
- bpl->addrLow = le32_to_cpu( putPaddrLow(mp->phys) );
- bpl->addrHigh = le32_to_cpu( putPaddrHigh(mp->phys) );
+ bpl->addrLow = le32_to_cpu(putPaddrLow(mp->phys) );
+ bpl->addrHigh = le32_to_cpu(putPaddrHigh(mp->phys) );
bpl->tus.f.bdeSize = (uint16_t) cnt;
bpl->tus.w = le32_to_cpu(bpl->tus.w);
bpl++;
@@ -215,24 +252,49 @@ lpfc_alloc_ct_rsp(struct lpfc_hba * phba, int cmdcode, struct ulp_bde64 * bpl,
return mlist;
}
+int
+lpfc_ct_free_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *ctiocb)
+{
+ struct lpfc_dmabuf *buf_ptr;
+
+ if (ctiocb->context1) {
+ buf_ptr = (struct lpfc_dmabuf *) ctiocb->context1;
+ lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys);
+ kfree(buf_ptr);
+ ctiocb->context1 = NULL;
+ }
+ if (ctiocb->context2) {
+ lpfc_free_ct_rsp(phba, (struct lpfc_dmabuf *) ctiocb->context2);
+ ctiocb->context2 = NULL;
+ }
+
+ if (ctiocb->context3) {
+ buf_ptr = (struct lpfc_dmabuf *) ctiocb->context3;
+ lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys);
+ kfree(buf_ptr);
+ ctiocb->context1 = NULL;
+ }
+ lpfc_sli_release_iocbq(phba, ctiocb);
+ return 0;
+}
+
static int
-lpfc_gen_req(struct lpfc_hba *phba, struct lpfc_dmabuf *bmp,
+lpfc_gen_req(struct lpfc_vport *vport, struct lpfc_dmabuf *bmp,
struct lpfc_dmabuf *inp, struct lpfc_dmabuf *outp,
void (*cmpl) (struct lpfc_hba *, struct lpfc_iocbq *,
struct lpfc_iocbq *),
struct lpfc_nodelist *ndlp, uint32_t usr_flg, uint32_t num_entry,
- uint32_t tmo)
+ uint32_t tmo, uint8_t retry)
{
-
- struct lpfc_sli *psli = &phba->sli;
+ struct lpfc_hba *phba = vport->phba;
+ struct lpfc_sli *psli = &phba->sli;
struct lpfc_sli_ring *pring = &psli->ring[LPFC_ELS_RING];
IOCB_t *icmd;
struct lpfc_iocbq *geniocb;
+ int rc;
/* Allocate buffer for command iocb */
- spin_lock_irq(phba->host->host_lock);
geniocb = lpfc_sli_get_iocbq(phba);
- spin_unlock_irq(phba->host->host_lock);
if (geniocb == NULL)
return 1;
@@ -272,31 +334,40 @@ lpfc_gen_req(struct lpfc_hba *phba, struct lpfc_dmabuf *bmp,
icmd->ulpClass = CLASS3;
icmd->ulpContext = ndlp->nlp_rpi;
+ if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) {
+ /* For GEN_REQUEST64_CR, use the RPI */
+ icmd->ulpCt_h = 0;
+ icmd->ulpCt_l = 0;
+ }
+
/* Issue GEN REQ IOCB for NPORT <did> */
lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
- "%d:0119 Issue GEN REQ IOCB for NPORT x%x "
- "Data: x%x x%x\n", phba->brd_no, icmd->un.ulpWord[5],
- icmd->ulpIoTag, phba->hba_state);
+ "%d (%d):0119 Issue GEN REQ IOCB to NPORT x%x "
+ "Data: x%x x%x\n", phba->brd_no, vport->vpi,
+ ndlp->nlp_DID, icmd->ulpIoTag,
+ vport->port_state);
geniocb->iocb_cmpl = cmpl;
geniocb->drvrTimeout = icmd->ulpTimeout + LPFC_DRVR_TIMEOUT;
- spin_lock_irq(phba->host->host_lock);
- if (lpfc_sli_issue_iocb(phba, pring, geniocb, 0) == IOCB_ERROR) {
+ geniocb->vport = vport;
+ geniocb->retry = retry;
+ rc = lpfc_sli_issue_iocb(phba, pring, geniocb, 0);
+
+ if (rc == IOCB_ERROR) {
lpfc_sli_release_iocbq(phba, geniocb);
- spin_unlock_irq(phba->host->host_lock);
return 1;
}
- spin_unlock_irq(phba->host->host_lock);
return 0;
}
static int
-lpfc_ct_cmd(struct lpfc_hba *phba, struct lpfc_dmabuf *inmp,
+lpfc_ct_cmd(struct lpfc_vport *vport, struct lpfc_dmabuf *inmp,
struct lpfc_dmabuf *bmp, struct lpfc_nodelist *ndlp,
void (*cmpl) (struct lpfc_hba *, struct lpfc_iocbq *,
struct lpfc_iocbq *),
- uint32_t rsp_size)
+ uint32_t rsp_size, uint8_t retry)
{
+ struct lpfc_hba *phba = vport->phba;
struct ulp_bde64 *bpl = (struct ulp_bde64 *) bmp->virt;
struct lpfc_dmabuf *outmp;
int cnt = 0, status;
@@ -310,8 +381,8 @@ lpfc_ct_cmd(struct lpfc_hba *phba, struct lpfc_dmabuf *inmp,
if (!outmp)
return -ENOMEM;
- status = lpfc_gen_req(phba, bmp, inmp, outmp, cmpl, ndlp, 0,
- cnt+1, 0);
+ status = lpfc_gen_req(vport, bmp, inmp, outmp, cmpl, ndlp, 0,
+ cnt+1, 0, retry);
if (status) {
lpfc_free_ct_rsp(phba, outmp);
return -ENOMEM;
@@ -319,20 +390,35 @@ lpfc_ct_cmd(struct lpfc_hba *phba, struct lpfc_dmabuf *inmp,
return 0;
}
+static struct lpfc_vport *
+lpfc_find_vport_by_did(struct lpfc_hba *phba, uint32_t did) {
+
+ struct lpfc_vport *vport_curr;
+
+ list_for_each_entry(vport_curr, &phba->port_list, listentry) {
+ if ((vport_curr->fc_myDID) &&
+ (vport_curr->fc_myDID == did))
+ return vport_curr;
+ }
+
+ return NULL;
+}
+
static int
-lpfc_ns_rsp(struct lpfc_hba * phba, struct lpfc_dmabuf * mp, uint32_t Size)
+lpfc_ns_rsp(struct lpfc_vport *vport, struct lpfc_dmabuf *mp, uint32_t Size)
{
+ struct lpfc_hba *phba = vport->phba;
struct lpfc_sli_ct_request *Response =
(struct lpfc_sli_ct_request *) mp->virt;
struct lpfc_nodelist *ndlp = NULL;
struct lpfc_dmabuf *mlast, *next_mp;
uint32_t *ctptr = (uint32_t *) & Response->un.gid.PortType;
- uint32_t Did;
- uint32_t CTentry;
+ uint32_t Did, CTentry;
int Cnt;
struct list_head head;
- lpfc_set_disctmo(phba);
+ lpfc_set_disctmo(vport);
+ vport->num_disc_nodes = 0;
list_add_tail(&head, &mp->list);
@@ -350,39 +436,96 @@ lpfc_ns_rsp(struct lpfc_hba * phba, struct lpfc_dmabuf * mp, uint32_t Size)
/* Loop through entire NameServer list of DIDs */
while (Cnt >= sizeof (uint32_t)) {
-
/* Get next DID from NameServer List */
CTentry = *ctptr++;
Did = ((be32_to_cpu(CTentry)) & Mask_DID);
ndlp = NULL;
- if (Did != phba->fc_myDID) {
- /* Check for rscn processing or not */
- ndlp = lpfc_setup_disc_node(phba, Did);
- }
- /* Mark all node table entries that are in the
- Nameserver */
- if (ndlp) {
- /* NameServer Rsp */
- lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
- "%d:0238 Process x%x NameServer"
- " Rsp Data: x%x x%x x%x\n",
- phba->brd_no,
+
+ /*
+ * Check for rscn processing or not
+ * To conserve rpi's, filter out addresses for other
+ * vports on the same physical HBAs.
+ */
+ if ((Did != vport->fc_myDID) &&
+ ((lpfc_find_vport_by_did(phba, Did) == NULL) ||
+ phba->cfg_peer_port_login)) {
+ if ((vport->port_type != LPFC_NPIV_PORT) ||
+ (vport->fc_flag & FC_RFF_NOT_SUPPORTED) ||
+ (!phba->cfg_vport_restrict_login)) {
+ ndlp = lpfc_setup_disc_node(vport, Did);
+ if (ndlp) {
+ lpfc_debugfs_disc_trc(vport,
+ LPFC_DISC_TRC_CT,
+ "Parse GID_FTrsp: "
+ "did:x%x flg:x%x x%x",
Did, ndlp->nlp_flag,
- phba->fc_flag,
- phba->fc_rscn_id_cnt);
- } else {
- /* NameServer Rsp */
- lpfc_printf_log(phba,
- KERN_INFO,
- LOG_DISCOVERY,
- "%d:0239 Skip x%x NameServer "
- "Rsp Data: x%x x%x x%x\n",
- phba->brd_no,
- Did, Size, phba->fc_flag,
- phba->fc_rscn_id_cnt);
+ vport->fc_flag);
+
+ lpfc_printf_log(phba, KERN_INFO,
+ LOG_DISCOVERY,
+ "%d (%d):0238 Process "
+ "x%x NameServer Rsp"
+ "Data: x%x x%x x%x\n",
+ phba->brd_no,
+ vport->vpi, Did,
+ ndlp->nlp_flag,
+ vport->fc_flag,
+ vport->fc_rscn_id_cnt);
+ } else {
+ lpfc_debugfs_disc_trc(vport,
+ LPFC_DISC_TRC_CT,
+ "Skip1 GID_FTrsp: "
+ "did:x%x flg:x%x cnt:%d",
+ Did, vport->fc_flag,
+ vport->fc_rscn_id_cnt);
+
+ lpfc_printf_log(phba, KERN_INFO,
+ LOG_DISCOVERY,
+ "%d (%d):0239 Skip x%x "
+ "NameServer Rsp Data: "
+ "x%x x%x\n",
+ phba->brd_no,
+ vport->vpi, Did,
+ vport->fc_flag,
+ vport->fc_rscn_id_cnt);
+ }
+
+ } else {
+ if (!(vport->fc_flag & FC_RSCN_MODE) ||
+ (lpfc_rscn_payload_check(vport, Did))) {
+ lpfc_debugfs_disc_trc(vport,
+ LPFC_DISC_TRC_CT,
+ "Query GID_FTrsp: "
+ "did:x%x flg:x%x cnt:%d",
+ Did, vport->fc_flag,
+ vport->fc_rscn_id_cnt);
+
+ if (lpfc_ns_cmd(vport,
+ SLI_CTNS_GFF_ID,
+ 0, Did) == 0)
+ vport->num_disc_nodes++;
+ }
+ else {
+ lpfc_debugfs_disc_trc(vport,
+ LPFC_DISC_TRC_CT,
+ "Skip2 GID_FTrsp: "
+ "did:x%x flg:x%x cnt:%d",
+ Did, vport->fc_flag,
+ vport->fc_rscn_id_cnt);
+
+ lpfc_printf_log(phba, KERN_INFO,
+ LOG_DISCOVERY,
+ "%d (%d):0245 Skip x%x "
+ "NameServer Rsp Data: "
+ "x%x x%x\n",
+ phba->brd_no,
+ vport->vpi, Did,
+ vport->fc_flag,
+ vport->fc_rscn_id_cnt);
+ }
+ }
}
-
if (CTentry & (be32_to_cpu(SLI_CT_LAST_ENTRY)))
goto nsout1;
Cnt -= sizeof (uint32_t);
@@ -393,190 +536,369 @@ lpfc_ns_rsp(struct lpfc_hba * phba, struct lpfc_dmabuf * mp, uint32_t Size)
nsout1:
list_del(&head);
-
- /*
- * The driver has cycled through all Nports in the RSCN payload.
- * Complete the handling by cleaning up and marking the
- * current driver state.
- */
- if (phba->hba_state == LPFC_HBA_READY) {
- lpfc_els_flush_rscn(phba);
- spin_lock_irq(phba->host->host_lock);
- phba->fc_flag |= FC_RSCN_MODE; /* we are still in RSCN mode */
- spin_unlock_irq(phba->host->host_lock);
- }
return 0;
}
-
-
-
static void
-lpfc_cmpl_ct_cmd_gid_ft(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb,
- struct lpfc_iocbq * rspiocb)
+lpfc_cmpl_ct_cmd_gid_ft(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
+ struct lpfc_iocbq *rspiocb)
{
+ struct lpfc_vport *vport = cmdiocb->vport;
+ struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
IOCB_t *irsp;
- struct lpfc_sli *psli;
struct lpfc_dmabuf *bmp;
- struct lpfc_dmabuf *inp;
struct lpfc_dmabuf *outp;
- struct lpfc_nodelist *ndlp;
struct lpfc_sli_ct_request *CTrsp;
+ int rc;
- psli = &phba->sli;
/* we pass cmdiocb to state machine which needs rspiocb as well */
cmdiocb->context_un.rsp_iocb = rspiocb;
- inp = (struct lpfc_dmabuf *) cmdiocb->context1;
outp = (struct lpfc_dmabuf *) cmdiocb->context2;
bmp = (struct lpfc_dmabuf *) cmdiocb->context3;
-
irsp = &rspiocb->iocb;
- if (irsp->ulpStatus) {
- if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) &&
- ((irsp->un.ulpWord[4] == IOERR_SLI_DOWN) ||
- (irsp->un.ulpWord[4] == IOERR_SLI_ABORTED))) {
- goto out;
- }
+ lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_CT,
+ "GID_FT cmpl: status:x%x/x%x rtry:%d",
+ irsp->ulpStatus, irsp->un.ulpWord[4], vport->fc_ns_retry);
+
+ /* Don't bother processing response if vport is being torn down. */
+ if (vport->load_flag & FC_UNLOADING)
+ goto out;
+
+
+ if (lpfc_els_chk_latt(vport) || lpfc_error_lost_link(irsp)) {
+ lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
+ "%d (%d):0216 Link event during NS query\n",
+ phba->brd_no, vport->vpi);
+ lpfc_vport_set_state(vport, FC_VPORT_FAILED);
+ goto out;
+ }
+
+ if (irsp->ulpStatus) {
/* Check for retry */
- if (phba->fc_ns_retry < LPFC_MAX_NS_RETRY) {
- phba->fc_ns_retry++;
+ if (vport->fc_ns_retry < LPFC_MAX_NS_RETRY) {
+ if ((irsp->ulpStatus != IOSTAT_LOCAL_REJECT) ||
+ (irsp->un.ulpWord[4] != IOERR_NO_RESOURCES))
+ vport->fc_ns_retry++;
/* CT command is being retried */
- ndlp = lpfc_findnode_did(phba, NameServer_DID);
- if (ndlp && ndlp->nlp_state == NLP_STE_UNMAPPED_NODE) {
- if (lpfc_ns_cmd(phba, ndlp, SLI_CTNS_GID_FT) ==
- 0) {
- goto out;
- }
- }
+ rc = lpfc_ns_cmd(vport, SLI_CTNS_GID_FT,
+ vport->fc_ns_retry, 0);
+ if (rc == 0)
+ goto out;
}
+ lpfc_vport_set_state(vport, FC_VPORT_FAILED);
+ lpfc_printf_log(phba, KERN_ERR, LOG_ELS,
+ "%d (%d):0257 GID_FT Query error: 0x%x 0x%x\n",
+ phba->brd_no, vport->vpi, irsp->ulpStatus,
+ vport->fc_ns_retry);
} else {
/* Good status, continue checking */
CTrsp = (struct lpfc_sli_ct_request *) outp->virt;
if (CTrsp->CommandResponse.bits.CmdRsp ==
be16_to_cpu(SLI_CT_RESPONSE_FS_ACC)) {
lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
- "%d:0208 NameServer Rsp "
+ "%d (%d):0208 NameServer Rsp "
"Data: x%x\n",
- phba->brd_no,
- phba->fc_flag);
- lpfc_ns_rsp(phba, outp,
+ phba->brd_no, vport->vpi,
+ vport->fc_flag);
+ lpfc_ns_rsp(vport, outp,
(uint32_t) (irsp->un.genreq64.bdl.bdeSize));
} else if (CTrsp->CommandResponse.bits.CmdRsp ==
be16_to_cpu(SLI_CT_RESPONSE_FS_RJT)) {
/* NameServer Rsp Error */
lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
- "%d:0240 NameServer Rsp Error "
+ "%d (%d):0240 NameServer Rsp Error "
"Data: x%x x%x x%x x%x\n",
- phba->brd_no,
+ phba->brd_no, vport->vpi,
CTrsp->CommandResponse.bits.CmdRsp,
(uint32_t) CTrsp->ReasonCode,
(uint32_t) CTrsp->Explanation,
- phba->fc_flag);
+ vport->fc_flag);
+
+ lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_CT,
+ "GID_FT rsp err1 cmd:x%x rsn:x%x exp:x%x",
+ (uint32_t)CTrsp->CommandResponse.bits.CmdRsp,
+ (uint32_t) CTrsp->ReasonCode,
+ (uint32_t) CTrsp->Explanation);
+
} else {
/* NameServer Rsp Error */
- lpfc_printf_log(phba,
- KERN_INFO,
- LOG_DISCOVERY,
- "%d:0241 NameServer Rsp Error "
+ lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
+ "%d (%d):0241 NameServer Rsp Error "
"Data: x%x x%x x%x x%x\n",
- phba->brd_no,
+ phba->brd_no, vport->vpi,
CTrsp->CommandResponse.bits.CmdRsp,
(uint32_t) CTrsp->ReasonCode,
(uint32_t) CTrsp->Explanation,
- phba->fc_flag);
+ vport->fc_flag);
+
+ lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_CT,
+ "GID_FT rsp err2 cmd:x%x rsn:x%x exp:x%x",
+ (uint32_t)CTrsp->CommandResponse.bits.CmdRsp,
+ (uint32_t) CTrsp->ReasonCode,
+ (uint32_t) CTrsp->Explanation);
}
}
/* Link up / RSCN discovery */
- lpfc_disc_start(phba);
+ if (vport->num_disc_nodes == 0) {
+ /*
+ * The driver has cycled through all Nports in the RSCN payload.
+ * Complete the handling by cleaning up and marking the
+ * current driver state.
+ */
+ if (vport->port_state >= LPFC_DISC_AUTH) {
+ if (vport->fc_flag & FC_RSCN_MODE) {
+ lpfc_els_flush_rscn(vport);
+ spin_lock_irq(shost->host_lock);
+ vport->fc_flag |= FC_RSCN_MODE; /* RSCN still */
+ spin_unlock_irq(shost->host_lock);
+ }
+ else
+ lpfc_els_flush_rscn(vport);
+ }
+
+ lpfc_disc_start(vport);
+ }
out:
- lpfc_free_ct_rsp(phba, outp);
- lpfc_mbuf_free(phba, inp->virt, inp->phys);
- lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
- kfree(inp);
- kfree(bmp);
- spin_lock_irq(phba->host->host_lock);
- lpfc_sli_release_iocbq(phba, cmdiocb);
- spin_unlock_irq(phba->host->host_lock);
+ lpfc_ct_free_iocb(phba, cmdiocb);
return;
}
+void
+lpfc_cmpl_ct_cmd_gff_id(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
+ struct lpfc_iocbq *rspiocb)
+{
+ struct lpfc_vport *vport = cmdiocb->vport;
+ struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+ IOCB_t *irsp = &rspiocb->iocb;
+ struct lpfc_dmabuf *inp = (struct lpfc_dmabuf *) cmdiocb->context1;
+ struct lpfc_dmabuf *outp = (struct lpfc_dmabuf *) cmdiocb->context2;
+ struct lpfc_sli_ct_request *CTrsp;
+ int did;
+ uint8_t fbits;
+ struct lpfc_nodelist *ndlp;
+
+ did = ((struct lpfc_sli_ct_request *) inp->virt)->un.gff.PortId;
+ did = be32_to_cpu(did);
+
+ lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_CT,
+ "GFF_ID cmpl: status:x%x/x%x did:x%x",
+ irsp->ulpStatus, irsp->un.ulpWord[4], did);
+
+ if (irsp->ulpStatus == IOSTAT_SUCCESS) {
+ /* Good status, continue checking */
+ CTrsp = (struct lpfc_sli_ct_request *) outp->virt;
+ fbits = CTrsp->un.gff_acc.fbits[FCP_TYPE_FEATURE_OFFSET];
+
+ if (CTrsp->CommandResponse.bits.CmdRsp ==
+ be16_to_cpu(SLI_CT_RESPONSE_FS_ACC)) {
+ if ((fbits & FC4_FEATURE_INIT) &&
+ !(fbits & FC4_FEATURE_TARGET)) {
+ lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
+ "%d (%d):0245 Skip x%x GFF "
+ "NameServer Rsp Data: (init) "
+ "x%x x%x\n", phba->brd_no,
+ vport->vpi, did, fbits,
+ vport->fc_rscn_id_cnt);
+ goto out;
+ }
+ }
+ }
+ else {
+ lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
+ "%d (%d):0267 NameServer GFF Rsp"
+ " x%x Error (%d %d) Data: x%x x%x\n",
+ phba->brd_no, vport->vpi, did,
+ irsp->ulpStatus, irsp->un.ulpWord[4],
+ vport->fc_flag, vport->fc_rscn_id_cnt)
+ }
+
+ /* This is a target port, unregistered port, or the GFF_ID failed */
+ ndlp = lpfc_setup_disc_node(vport, did);
+ if (ndlp) {
+ lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
+ "%d (%d):0242 Process x%x GFF "
+ "NameServer Rsp Data: x%x x%x x%x\n",
+ phba->brd_no, vport->vpi,
+ did, ndlp->nlp_flag, vport->fc_flag,
+ vport->fc_rscn_id_cnt);
+ } else {
+ lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
+ "%d (%d):0243 Skip x%x GFF "
+ "NameServer Rsp Data: x%x x%x\n",
+ phba->brd_no, vport->vpi, did,
+ vport->fc_flag, vport->fc_rscn_id_cnt);
+ }
+out:
+ /* Link up / RSCN discovery */
+ if (vport->num_disc_nodes)
+ vport->num_disc_nodes--;
+ if (vport->num_disc_nodes == 0) {
+ /*
+ * The driver has cycled through all Nports in the RSCN payload.
+ * Complete the handling by cleaning up and marking the
+ * current driver state.
+ */
+ if (vport->port_state >= LPFC_DISC_AUTH) {
+ if (vport->fc_flag & FC_RSCN_MODE) {
+ lpfc_els_flush_rscn(vport);
+ spin_lock_irq(shost->host_lock);
+ vport->fc_flag |= FC_RSCN_MODE; /* RSCN still */
+ spin_unlock_irq(shost->host_lock);
+ }
+ else
+ lpfc_els_flush_rscn(vport);
+ }
+ lpfc_disc_start(vport);
+ }
+ lpfc_ct_free_iocb(phba, cmdiocb);
+ return;
+}
+
+
static void
-lpfc_cmpl_ct_cmd_rft_id(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb,
- struct lpfc_iocbq * rspiocb)
+lpfc_cmpl_ct_cmd_rft_id(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
+ struct lpfc_iocbq *rspiocb)
{
- struct lpfc_sli *psli;
- struct lpfc_dmabuf *bmp;
+ struct lpfc_vport *vport = cmdiocb->vport;
struct lpfc_dmabuf *inp;
struct lpfc_dmabuf *outp;
IOCB_t *irsp;
struct lpfc_sli_ct_request *CTrsp;
+ int cmdcode, rc;
+ uint8_t retry;
+ uint32_t latt;
- psli = &phba->sli;
/* we pass cmdiocb to state machine which needs rspiocb as well */
cmdiocb->context_un.rsp_iocb = rspiocb;
inp = (struct lpfc_dmabuf *) cmdiocb->context1;
outp = (struct lpfc_dmabuf *) cmdiocb->context2;
- bmp = (struct lpfc_dmabuf *) cmdiocb->context3;
irsp = &rspiocb->iocb;
+ cmdcode = be16_to_cpu(((struct lpfc_sli_ct_request *) inp->virt)->
+ CommandResponse.bits.CmdRsp);
CTrsp = (struct lpfc_sli_ct_request *) outp->virt;
+ latt = lpfc_els_chk_latt(vport);
+
/* RFT request completes status <ulpStatus> CmdRsp <CmdRsp> */
lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
- "%d:0209 RFT request completes ulpStatus x%x "
- "CmdRsp x%x\n", phba->brd_no, irsp->ulpStatus,
- CTrsp->CommandResponse.bits.CmdRsp);
+ "%d (%d):0209 RFT request completes, latt %d, "
+ "ulpStatus x%x CmdRsp x%x, Context x%x, Tag x%x\n",
+ phba->brd_no, vport->vpi, latt, irsp->ulpStatus,
+ CTrsp->CommandResponse.bits.CmdRsp,
+ cmdiocb->iocb.ulpContext, cmdiocb->iocb.ulpIoTag);
- lpfc_free_ct_rsp(phba, outp);
- lpfc_mbuf_free(phba, inp->virt, inp->phys);
- lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
- kfree(inp);
- kfree(bmp);
- spin_lock_irq(phba->host->host_lock);
- lpfc_sli_release_iocbq(phba, cmdiocb);
- spin_unlock_irq(phba->host->host_lock);
+ lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_CT,
+ "CT cmd cmpl: status:x%x/x%x cmd:x%x",
+ irsp->ulpStatus, irsp->un.ulpWord[4], cmdcode);
+
+ if (irsp->ulpStatus) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
+ "%d (%d):0268 NS cmd %x Error (%d %d)\n",
+ phba->brd_no, vport->vpi, cmdcode,
+ irsp->ulpStatus, irsp->un.ulpWord[4]);
+
+ if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) &&
+ ((irsp->un.ulpWord[4] == IOERR_SLI_DOWN) ||
+ (irsp->un.ulpWord[4] == IOERR_SLI_ABORTED)))
+ goto out;
+
+ retry = cmdiocb->retry;
+ if (retry >= LPFC_MAX_NS_RETRY)
+ goto out;
+
+ retry++;
+ lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
+ "%d (%d):0216 Retrying NS cmd %x\n",
+ phba->brd_no, vport->vpi, cmdcode);
+ rc = lpfc_ns_cmd(vport, cmdcode, retry, 0);
+ if (rc == 0)
+ goto out;
+ }
+
+out:
+ lpfc_ct_free_iocb(phba, cmdiocb);
return;
}
static void
-lpfc_cmpl_ct_cmd_rnn_id(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb,
- struct lpfc_iocbq * rspiocb)
+lpfc_cmpl_ct_cmd_rnn_id(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
+ struct lpfc_iocbq *rspiocb)
{
lpfc_cmpl_ct_cmd_rft_id(phba, cmdiocb, rspiocb);
return;
}
static void
-lpfc_cmpl_ct_cmd_rsnn_nn(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb,
- struct lpfc_iocbq * rspiocb)
+lpfc_cmpl_ct_cmd_rspn_id(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
+ struct lpfc_iocbq *rspiocb)
{
lpfc_cmpl_ct_cmd_rft_id(phba, cmdiocb, rspiocb);
return;
}
static void
-lpfc_cmpl_ct_cmd_rff_id(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb,
- struct lpfc_iocbq * rspiocb)
+lpfc_cmpl_ct_cmd_rsnn_nn(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
+ struct lpfc_iocbq *rspiocb)
{
lpfc_cmpl_ct_cmd_rft_id(phba, cmdiocb, rspiocb);
return;
}
-void
-lpfc_get_hba_sym_node_name(struct lpfc_hba * phba, uint8_t * symbp)
+static void
+lpfc_cmpl_ct_cmd_rff_id(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
+ struct lpfc_iocbq *rspiocb)
{
- char fwrev[16];
+ IOCB_t *irsp = &rspiocb->iocb;
+ struct lpfc_vport *vport = cmdiocb->vport;
- lpfc_decode_firmware_rev(phba, fwrev, 0);
+ if (irsp->ulpStatus != IOSTAT_SUCCESS)
+ vport->fc_flag |= FC_RFF_NOT_SUPPORTED;
- sprintf(symbp, "Emulex %s FV%s DV%s", phba->ModelName,
- fwrev, lpfc_release_version);
+ lpfc_cmpl_ct_cmd_rft_id(phba, cmdiocb, rspiocb);
return;
}
+int
+lpfc_vport_symbolic_port_name(struct lpfc_vport *vport, char *symbol,
+ size_t size)
+{
+ int n;
+ uint8_t *wwn = vport->phba->wwpn;
+
+ n = snprintf(symbol, size,
+ "Emulex PPN-%02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x",
+ wwn[0], wwn[1], wwn[2], wwn[3],
+ wwn[4], wwn[5], wwn[6], wwn[7]);
+
+ if (vport->port_type == LPFC_PHYSICAL_PORT)
+ return n;
+
+ if (n < size)
+ n += snprintf(symbol + n, size - n, " VPort-%d", vport->vpi);
+
+ if (n < size && vport->vname)
+ n += snprintf(symbol + n, size - n, " VName-%s", vport->vname);
+ return n;
+}
+
+int
+lpfc_vport_symbolic_node_name(struct lpfc_vport *vport, char *symbol,
+ size_t size)
+{
+ char fwrev[16];
+ int n;
+
+ lpfc_decode_firmware_rev(vport->phba, fwrev, 0);
+
+ n = snprintf(symbol, size, "Emulex %s FV%s DV%s",
+ vport->phba->ModelName, fwrev, lpfc_release_version);
+ return n;
+}
+
/*
* lpfc_ns_cmd
* Description:
@@ -585,55 +907,76 @@ lpfc_get_hba_sym_node_name(struct lpfc_hba * phba, uint8_t * symbp)
* LI_CTNS_RFT_ID
*/
int
-lpfc_ns_cmd(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp, int cmdcode)
+lpfc_ns_cmd(struct lpfc_vport *vport, int cmdcode,
+ uint8_t retry, uint32_t context)
{
+ struct lpfc_nodelist * ndlp;
+ struct lpfc_hba *phba = vport->phba;
struct lpfc_dmabuf *mp, *bmp;
struct lpfc_sli_ct_request *CtReq;
struct ulp_bde64 *bpl;
void (*cmpl) (struct lpfc_hba *, struct lpfc_iocbq *,
struct lpfc_iocbq *) = NULL;
uint32_t rsp_size = 1024;
+ size_t size;
+ int rc = 0;
+
+ ndlp = lpfc_findnode_did(vport, NameServer_DID);
+ if (ndlp == NULL || ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) {
+ rc=1;
+ goto ns_cmd_exit;
+ }
/* fill in BDEs for command */
/* Allocate buffer for command payload */
mp = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL);
- if (!mp)
+ if (!mp) {
+ rc=2;
goto ns_cmd_exit;
+ }
INIT_LIST_HEAD(&mp->list);
mp->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &(mp->phys));
- if (!mp->virt)
+ if (!mp->virt) {
+ rc=3;
goto ns_cmd_free_mp;
+ }
/* Allocate buffer for Buffer ptr list */
bmp = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL);
- if (!bmp)
+ if (!bmp) {
+ rc=4;
goto ns_cmd_free_mpvirt;
+ }
INIT_LIST_HEAD(&bmp->list);
bmp->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &(bmp->phys));
- if (!bmp->virt)
+ if (!bmp->virt) {
+ rc=5;
goto ns_cmd_free_bmp;
+ }
/* NameServer Req */
- lpfc_printf_log(phba,
- KERN_INFO,
- LOG_DISCOVERY,
- "%d:0236 NameServer Req Data: x%x x%x x%x\n",
- phba->brd_no, cmdcode, phba->fc_flag,
- phba->fc_rscn_id_cnt);
+ lpfc_printf_log(phba, KERN_INFO ,LOG_DISCOVERY,
+ "%d (%d):0236 NameServer Req Data: x%x x%x x%x\n",
+ phba->brd_no, vport->vpi, cmdcode, vport->fc_flag,
+ vport->fc_rscn_id_cnt);
bpl = (struct ulp_bde64 *) bmp->virt;
memset(bpl, 0, sizeof(struct ulp_bde64));
- bpl->addrHigh = le32_to_cpu( putPaddrHigh(mp->phys) );
- bpl->addrLow = le32_to_cpu( putPaddrLow(mp->phys) );
+ bpl->addrHigh = le32_to_cpu(putPaddrHigh(mp->phys) );
+ bpl->addrLow = le32_to_cpu(putPaddrLow(mp->phys) );
bpl->tus.f.bdeFlags = 0;
if (cmdcode == SLI_CTNS_GID_FT)
bpl->tus.f.bdeSize = GID_REQUEST_SZ;
+ else if (cmdcode == SLI_CTNS_GFF_ID)
+ bpl->tus.f.bdeSize = GFF_REQUEST_SZ;
else if (cmdcode == SLI_CTNS_RFT_ID)
bpl->tus.f.bdeSize = RFT_REQUEST_SZ;
else if (cmdcode == SLI_CTNS_RNN_ID)
bpl->tus.f.bdeSize = RNN_REQUEST_SZ;
+ else if (cmdcode == SLI_CTNS_RSPN_ID)
+ bpl->tus.f.bdeSize = RSPN_REQUEST_SZ;
else if (cmdcode == SLI_CTNS_RSNN_NN)
bpl->tus.f.bdeSize = RSNN_REQUEST_SZ;
else if (cmdcode == SLI_CTNS_RFF_ID)
@@ -654,56 +997,78 @@ lpfc_ns_cmd(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp, int cmdcode)
CtReq->CommandResponse.bits.CmdRsp =
be16_to_cpu(SLI_CTNS_GID_FT);
CtReq->un.gid.Fc4Type = SLI_CTPT_FCP;
- if (phba->hba_state < LPFC_HBA_READY)
- phba->hba_state = LPFC_NS_QRY;
- lpfc_set_disctmo(phba);
+ if (vport->port_state < LPFC_NS_QRY)
+ vport->port_state = LPFC_NS_QRY;
+ lpfc_set_disctmo(vport);
cmpl = lpfc_cmpl_ct_cmd_gid_ft;
rsp_size = FC_MAX_NS_RSP;
break;
+ case SLI_CTNS_GFF_ID:
+ CtReq->CommandResponse.bits.CmdRsp =
+ be16_to_cpu(SLI_CTNS_GFF_ID);
+ CtReq->un.gff.PortId = be32_to_cpu(context);
+ cmpl = lpfc_cmpl_ct_cmd_gff_id;
+ break;
+
case SLI_CTNS_RFT_ID:
CtReq->CommandResponse.bits.CmdRsp =
be16_to_cpu(SLI_CTNS_RFT_ID);
- CtReq->un.rft.PortId = be32_to_cpu(phba->fc_myDID);
+ CtReq->un.rft.PortId = be32_to_cpu(vport->fc_myDID);
CtReq->un.rft.fcpReg = 1;
cmpl = lpfc_cmpl_ct_cmd_rft_id;
break;
- case SLI_CTNS_RFF_ID:
- CtReq->CommandResponse.bits.CmdRsp =
- be16_to_cpu(SLI_CTNS_RFF_ID);
- CtReq->un.rff.PortId = be32_to_cpu(phba->fc_myDID);
- CtReq->un.rff.feature_res = 0;
- CtReq->un.rff.feature_tgt = 0;
- CtReq->un.rff.type_code = FC_FCP_DATA;
- CtReq->un.rff.feature_init = 1;
- cmpl = lpfc_cmpl_ct_cmd_rff_id;
- break;
-
case SLI_CTNS_RNN_ID:
CtReq->CommandResponse.bits.CmdRsp =
be16_to_cpu(SLI_CTNS_RNN_ID);
- CtReq->un.rnn.PortId = be32_to_cpu(phba->fc_myDID);
- memcpy(CtReq->un.rnn.wwnn, &phba->fc_nodename,
+ CtReq->un.rnn.PortId = be32_to_cpu(vport->fc_myDID);
+ memcpy(CtReq->un.rnn.wwnn, &vport->fc_nodename,
sizeof (struct lpfc_name));
cmpl = lpfc_cmpl_ct_cmd_rnn_id;
break;
+ case SLI_CTNS_RSPN_ID:
+ CtReq->CommandResponse.bits.CmdRsp =
+ be16_to_cpu(SLI_CTNS_RSPN_ID);
+ CtReq->un.rspn.PortId = be32_to_cpu(vport->fc_myDID);
+ size = sizeof(CtReq->un.rspn.symbname);
+ CtReq->un.rspn.len =
+ lpfc_vport_symbolic_port_name(vport,
+ CtReq->un.rspn.symbname, size);
+ cmpl = lpfc_cmpl_ct_cmd_rspn_id;
+ break;
case SLI_CTNS_RSNN_NN:
CtReq->CommandResponse.bits.CmdRsp =
be16_to_cpu(SLI_CTNS_RSNN_NN);
- memcpy(CtReq->un.rsnn.wwnn, &phba->fc_nodename,
+ memcpy(CtReq->un.rsnn.wwnn, &vport->fc_nodename,
sizeof (struct lpfc_name));
- lpfc_get_hba_sym_node_name(phba, CtReq->un.rsnn.symbname);
- CtReq->un.rsnn.len = strlen(CtReq->un.rsnn.symbname);
+ size = sizeof(CtReq->un.rsnn.symbname);
+ CtReq->un.rsnn.len =
+ lpfc_vport_symbolic_node_name(vport,
+ CtReq->un.rsnn.symbname, size);
cmpl = lpfc_cmpl_ct_cmd_rsnn_nn;
break;
+ case SLI_CTNS_RFF_ID:
+ vport->fc_flag &= ~FC_RFF_NOT_SUPPORTED;
+ CtReq->CommandResponse.bits.CmdRsp =
+ be16_to_cpu(SLI_CTNS_RFF_ID);
+ CtReq->un.rff.PortId = be32_to_cpu(vport->fc_myDID);;
+ CtReq->un.rff.fbits = FC4_FEATURE_INIT;
+ CtReq->un.rff.type_code = FC_FCP_DATA;
+ cmpl = lpfc_cmpl_ct_cmd_rff_id;
+ break;
}
- if (!lpfc_ct_cmd(phba, mp, bmp, ndlp, cmpl, rsp_size))
+ if (!lpfc_ct_cmd(vport, mp, bmp, ndlp, cmpl, rsp_size, retry)) {
/* On success, The cmpl function will free the buffers */
+ lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_CT,
+ "Issue CT cmd: cmd:x%x did:x%x",
+ cmdcode, ndlp->nlp_DID, 0);
return 0;
+ }
+ rc=6;
lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
ns_cmd_free_bmp:
kfree(bmp);
@@ -712,14 +1077,17 @@ ns_cmd_free_mpvirt:
ns_cmd_free_mp:
kfree(mp);
ns_cmd_exit:
+ lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
+ "%d (%d):0266 Issue NameServer Req x%x err %d Data: x%x x%x\n",
+ phba->brd_no, vport->vpi, cmdcode, rc, vport->fc_flag,
+ vport->fc_rscn_id_cnt);
return 1;
}
static void
-lpfc_cmpl_ct_cmd_fdmi(struct lpfc_hba * phba,
- struct lpfc_iocbq * cmdiocb, struct lpfc_iocbq * rspiocb)
+lpfc_cmpl_ct_cmd_fdmi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
+ struct lpfc_iocbq * rspiocb)
{
- struct lpfc_dmabuf *bmp = cmdiocb->context3;
struct lpfc_dmabuf *inp = cmdiocb->context1;
struct lpfc_dmabuf *outp = cmdiocb->context2;
struct lpfc_sli_ct_request *CTrsp = outp->virt;
@@ -727,48 +1095,60 @@ lpfc_cmpl_ct_cmd_fdmi(struct lpfc_hba * phba,
struct lpfc_nodelist *ndlp;
uint16_t fdmi_cmd = CTcmd->CommandResponse.bits.CmdRsp;
uint16_t fdmi_rsp = CTrsp->CommandResponse.bits.CmdRsp;
+ struct lpfc_vport *vport = cmdiocb->vport;
+ IOCB_t *irsp = &rspiocb->iocb;
+ uint32_t latt;
+
+ latt = lpfc_els_chk_latt(vport);
+
+ lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_CT,
+ "FDMI cmpl: status:x%x/x%x latt:%d",
+ irsp->ulpStatus, irsp->un.ulpWord[4], latt);
+
+ if (latt || irsp->ulpStatus) {
+ lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
+ "%d (%d):0229 FDMI cmd %04x failed, latt = %d "
+ "ulpStatus: x%x, rid x%x\n",
+ phba->brd_no, vport->vpi,
+ be16_to_cpu(fdmi_cmd), latt, irsp->ulpStatus,
+ irsp->un.ulpWord[4]);
+ lpfc_ct_free_iocb(phba, cmdiocb);
+ return;
+ }
- ndlp = lpfc_findnode_did(phba, FDMI_DID);
+ ndlp = lpfc_findnode_did(vport, FDMI_DID);
if (fdmi_rsp == be16_to_cpu(SLI_CT_RESPONSE_FS_RJT)) {
/* FDMI rsp failed */
- lpfc_printf_log(phba,
- KERN_INFO,
- LOG_DISCOVERY,
- "%d:0220 FDMI rsp failed Data: x%x\n",
- phba->brd_no,
- be16_to_cpu(fdmi_cmd));
+ lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
+ "%d (%d):0220 FDMI rsp failed Data: x%x\n",
+ phba->brd_no, vport->vpi,
+ be16_to_cpu(fdmi_cmd));
}
switch (be16_to_cpu(fdmi_cmd)) {
case SLI_MGMT_RHBA:
- lpfc_fdmi_cmd(phba, ndlp, SLI_MGMT_RPA);
+ lpfc_fdmi_cmd(vport, ndlp, SLI_MGMT_RPA);
break;
case SLI_MGMT_RPA:
break;
case SLI_MGMT_DHBA:
- lpfc_fdmi_cmd(phba, ndlp, SLI_MGMT_DPRT);
+ lpfc_fdmi_cmd(vport, ndlp, SLI_MGMT_DPRT);
break;
case SLI_MGMT_DPRT:
- lpfc_fdmi_cmd(phba, ndlp, SLI_MGMT_RHBA);
+ lpfc_fdmi_cmd(vport, ndlp, SLI_MGMT_RHBA);
break;
}
-
- lpfc_free_ct_rsp(phba, outp);
- lpfc_mbuf_free(phba, inp->virt, inp->phys);
- lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
- kfree(inp);
- kfree(bmp);
- spin_lock_irq(phba->host->host_lock);
- lpfc_sli_release_iocbq(phba, cmdiocb);
- spin_unlock_irq(phba->host->host_lock);
+ lpfc_ct_free_iocb(phba, cmdiocb);
return;
}
+
int
-lpfc_fdmi_cmd(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp, int cmdcode)
+lpfc_fdmi_cmd(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, int cmdcode)
{
+ struct lpfc_hba *phba = vport->phba;
struct lpfc_dmabuf *mp, *bmp;
struct lpfc_sli_ct_request *CtReq;
struct ulp_bde64 *bpl;
@@ -805,12 +1185,10 @@ lpfc_fdmi_cmd(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp, int cmdcode)
INIT_LIST_HEAD(&bmp->list);
/* FDMI request */
- lpfc_printf_log(phba,
- KERN_INFO,
- LOG_DISCOVERY,
- "%d:0218 FDMI Request Data: x%x x%x x%x\n",
- phba->brd_no,
- phba->fc_flag, phba->hba_state, cmdcode);
+ lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
+ "%d (%d):0218 FDMI Request Data: x%x x%x x%x\n",
+ phba->brd_no, vport->vpi, vport->fc_flag,
+ vport->port_state, cmdcode);
CtReq = (struct lpfc_sli_ct_request *) mp->virt;
@@ -833,11 +1211,11 @@ lpfc_fdmi_cmd(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp, int cmdcode)
be16_to_cpu(SLI_MGMT_RHBA);
CtReq->CommandResponse.bits.Size = 0;
rh = (REG_HBA *) & CtReq->un.PortID;
- memcpy(&rh->hi.PortName, &phba->fc_sparam.portName,
+ memcpy(&rh->hi.PortName, &vport->fc_sparam.portName,
sizeof (struct lpfc_name));
/* One entry (port) per adapter */
rh->rpl.EntryCnt = be32_to_cpu(1);
- memcpy(&rh->rpl.pe, &phba->fc_sparam.portName,
+ memcpy(&rh->rpl.pe, &vport->fc_sparam.portName,
sizeof (struct lpfc_name));
/* point to the HBA attribute block */
@@ -853,7 +1231,7 @@ lpfc_fdmi_cmd(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp, int cmdcode)
ae->ad.bits.AttrType = be16_to_cpu(NODE_NAME);
ae->ad.bits.AttrLen = be16_to_cpu(FOURBYTES
+ sizeof (struct lpfc_name));
- memcpy(&ae->un.NodeName, &phba->fc_sparam.nodeName,
+ memcpy(&ae->un.NodeName, &vport->fc_sparam.nodeName,
sizeof (struct lpfc_name));
ab->EntryCnt++;
size += FOURBYTES + sizeof (struct lpfc_name);
@@ -991,7 +1369,7 @@ lpfc_fdmi_cmd(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp, int cmdcode)
pab = (REG_PORT_ATTRIBUTE *) & CtReq->un.PortID;
size = sizeof (struct lpfc_name) + FOURBYTES;
memcpy((uint8_t *) & pab->PortName,
- (uint8_t *) & phba->fc_sparam.portName,
+ (uint8_t *) & vport->fc_sparam.portName,
sizeof (struct lpfc_name));
pab->ab.EntryCnt = 0;
@@ -1053,7 +1431,7 @@ lpfc_fdmi_cmd(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp, int cmdcode)
ae = (ATTRIBUTE_ENTRY *) ((uint8_t *) pab + size);
ae->ad.bits.AttrType = be16_to_cpu(MAX_FRAME_SIZE);
ae->ad.bits.AttrLen = be16_to_cpu(FOURBYTES + 4);
- hsp = (struct serv_parm *) & phba->fc_sparam;
+ hsp = (struct serv_parm *) & vport->fc_sparam;
ae->un.MaxFrameSize =
(((uint32_t) hsp->cmn.
bbRcvSizeMsb) << 8) | (uint32_t) hsp->cmn.
@@ -1097,7 +1475,7 @@ lpfc_fdmi_cmd(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp, int cmdcode)
CtReq->CommandResponse.bits.Size = 0;
pe = (PORT_ENTRY *) & CtReq->un.PortID;
memcpy((uint8_t *) & pe->PortName,
- (uint8_t *) & phba->fc_sparam.portName,
+ (uint8_t *) & vport->fc_sparam.portName,
sizeof (struct lpfc_name));
size = GID_REQUEST_SZ - 4 + sizeof (struct lpfc_name);
break;
@@ -1107,22 +1485,22 @@ lpfc_fdmi_cmd(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp, int cmdcode)
CtReq->CommandResponse.bits.Size = 0;
pe = (PORT_ENTRY *) & CtReq->un.PortID;
memcpy((uint8_t *) & pe->PortName,
- (uint8_t *) & phba->fc_sparam.portName,
+ (uint8_t *) & vport->fc_sparam.portName,
sizeof (struct lpfc_name));
size = GID_REQUEST_SZ - 4 + sizeof (struct lpfc_name);
break;
}
bpl = (struct ulp_bde64 *) bmp->virt;
- bpl->addrHigh = le32_to_cpu( putPaddrHigh(mp->phys) );
- bpl->addrLow = le32_to_cpu( putPaddrLow(mp->phys) );
+ bpl->addrHigh = le32_to_cpu(putPaddrHigh(mp->phys) );
+ bpl->addrLow = le32_to_cpu(putPaddrLow(mp->phys) );
bpl->tus.f.bdeFlags = 0;
bpl->tus.f.bdeSize = size;
bpl->tus.w = le32_to_cpu(bpl->tus.w);
cmpl = lpfc_cmpl_ct_cmd_fdmi;
- if (!lpfc_ct_cmd(phba, mp, bmp, ndlp, cmpl, FC_MAX_NS_RSP))
+ if (!lpfc_ct_cmd(vport, mp, bmp, ndlp, cmpl, FC_MAX_NS_RSP, 0))
return 0;
lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
@@ -1134,49 +1512,50 @@ fdmi_cmd_free_mp:
kfree(mp);
fdmi_cmd_exit:
/* Issue FDMI request failed */
- lpfc_printf_log(phba,
- KERN_INFO,
- LOG_DISCOVERY,
- "%d:0244 Issue FDMI request failed Data: x%x\n",
- phba->brd_no,
- cmdcode);
+ lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
+ "%d (%d):0244 Issue FDMI request failed Data: x%x\n",
+ phba->brd_no, vport->vpi, cmdcode);
return 1;
}
void
lpfc_fdmi_tmo(unsigned long ptr)
{
- struct lpfc_hba *phba = (struct lpfc_hba *)ptr;
+ struct lpfc_vport *vport = (struct lpfc_vport *)ptr;
+ struct lpfc_hba *phba = vport->phba;
unsigned long iflag;
- spin_lock_irqsave(phba->host->host_lock, iflag);
- if (!(phba->work_hba_events & WORKER_FDMI_TMO)) {
- phba->work_hba_events |= WORKER_FDMI_TMO;
+ spin_lock_irqsave(&vport->work_port_lock, iflag);
+ if (!(vport->work_port_events & WORKER_FDMI_TMO)) {
+ vport->work_port_events |= WORKER_FDMI_TMO;
+ spin_unlock_irqrestore(&vport->work_port_lock, iflag);
+
+ spin_lock_irqsave(&phba->hbalock, iflag);
if (phba->work_wait)
- wake_up(phba->work_wait);
+ lpfc_worker_wake_up(phba);
+ spin_unlock_irqrestore(&phba->hbalock, iflag);
}
- spin_unlock_irqrestore(phba->host->host_lock,iflag);
+ else
+ spin_unlock_irqrestore(&vport->work_port_lock, iflag);
}
void
-lpfc_fdmi_tmo_handler(struct lpfc_hba *phba)
+lpfc_fdmi_timeout_handler(struct lpfc_vport *vport)
{
struct lpfc_nodelist *ndlp;
- ndlp = lpfc_findnode_did(phba, FDMI_DID);
+ ndlp = lpfc_findnode_did(vport, FDMI_DID);
if (ndlp) {
- if (init_utsname()->nodename[0] != '\0') {
- lpfc_fdmi_cmd(phba, ndlp, SLI_MGMT_DHBA);
- } else {
- mod_timer(&phba->fc_fdmitmo, jiffies + HZ * 60);
- }
+ if (init_utsname()->nodename[0] != '\0')
+ lpfc_fdmi_cmd(vport, ndlp, SLI_MGMT_DHBA);
+ else
+ mod_timer(&vport->fc_fdmitmo, jiffies + HZ * 60);
}
return;
}
-
void
-lpfc_decode_firmware_rev(struct lpfc_hba * phba, char *fwrevision, int flag)
+lpfc_decode_firmware_rev(struct lpfc_hba *phba, char *fwrevision, int flag)
{
struct lpfc_sli *psli = &phba->sli;
lpfc_vpd_t *vp = &phba->vpd;
diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c
new file mode 100644
index 00000000000..673cfe11cc2
--- /dev/null
+++ b/drivers/scsi/lpfc/lpfc_debugfs.c
@@ -0,0 +1,508 @@
+/*******************************************************************
+ * This file is part of the Emulex Linux Device Driver for *
+ * Fibre Channel Host Bus Adapters. *
+ * Copyright (C) 2007 Emulex. All rights reserved. *
+ * EMULEX and SLI are trademarks of Emulex. *
+ * www.emulex.com *
+ * *
+ * This program is free software; you can redistribute it and/or *
+ * modify it under the terms of version 2 of the GNU General *
+ * Public License as published by the Free Software Foundation. *
+ * This program is distributed in the hope that it will be useful. *
+ * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
+ * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
+ * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
+ * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
+ * TO BE LEGALLY INVALID. See the GNU General Public License for *
+ * more details, a copy of which can be found in the file COPYING *
+ * included with this package. *
+ *******************************************************************/
+
+#include <linux/blkdev.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/idr.h>
+#include <linux/interrupt.h>
+#include <linux/kthread.h>
+#include <linux/pci.h>
+#include <linux/spinlock.h>
+#include <linux/ctype.h>
+#include <linux/version.h>
+
+#include <scsi/scsi.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_transport_fc.h>
+
+#include "lpfc_hw.h"
+#include "lpfc_sli.h"
+#include "lpfc_disc.h"
+#include "lpfc_scsi.h"
+#include "lpfc.h"
+#include "lpfc_logmsg.h"
+#include "lpfc_crtn.h"
+#include "lpfc_vport.h"
+#include "lpfc_version.h"
+#include "lpfc_vport.h"
+#include "lpfc_debugfs.h"
+
+#ifdef CONFIG_LPFC_DEBUG_FS
+/* debugfs interface
+ *
+ * To access this interface the user should:
+ * # mkdir /debug
+ * # mount -t debugfs none /debug
+ *
+ * The lpfc debugfs directory hierachy is:
+ * lpfc/lpfcX/vportY
+ * where X is the lpfc hba unique_id
+ * where Y is the vport VPI on that hba
+ *
+ * Debugging services available per vport:
+ * discovery_trace
+ * This is an ACSII readable file that contains a trace of the last
+ * lpfc_debugfs_max_disc_trc events that happened on a specific vport.
+ * See lpfc_debugfs.h for different categories of
+ * discovery events. To enable the discovery trace, the following
+ * module parameters must be set:
+ * lpfc_debugfs_enable=1 Turns on lpfc debugfs filesystem support
+ * lpfc_debugfs_max_disc_trc=X Where X is the event trace depth for
+ * EACH vport. X MUST also be a power of 2.
+ * lpfc_debugfs_mask_disc_trc=Y Where Y is an event mask as defined in
+ * lpfc_debugfs.h .
+ */
+static int lpfc_debugfs_enable = 0;
+module_param(lpfc_debugfs_enable, int, 0);
+MODULE_PARM_DESC(lpfc_debugfs_enable, "Enable debugfs services");
+
+static int lpfc_debugfs_max_disc_trc = 0; /* This MUST be a power of 2 */
+module_param(lpfc_debugfs_max_disc_trc, int, 0);
+MODULE_PARM_DESC(lpfc_debugfs_max_disc_trc,
+ "Set debugfs discovery trace depth");
+
+static int lpfc_debugfs_mask_disc_trc = 0;
+module_param(lpfc_debugfs_mask_disc_trc, int, 0);
+MODULE_PARM_DESC(lpfc_debugfs_mask_disc_trc,
+ "Set debugfs discovery trace mask");
+
+#include <linux/debugfs.h>
+
+/* size of discovery_trace output line */
+#define LPFC_DISC_TRC_ENTRY_SIZE 80
+
+/* nodelist output buffer size */
+#define LPFC_NODELIST_SIZE 8192
+#define LPFC_NODELIST_ENTRY_SIZE 120
+
+struct lpfc_debug {
+ char *buffer;
+ int len;
+};
+
+atomic_t lpfc_debugfs_disc_trc_cnt = ATOMIC_INIT(0);
+unsigned long lpfc_debugfs_start_time = 0L;
+
+static int
+lpfc_debugfs_disc_trc_data(struct lpfc_vport *vport, char *buf, int size)
+{
+ int i, index, len, enable;
+ uint32_t ms;
+ struct lpfc_disc_trc *dtp;
+ char buffer[80];
+
+
+ enable = lpfc_debugfs_enable;
+ lpfc_debugfs_enable = 0;
+
+ len = 0;
+ index = (atomic_read(&vport->disc_trc_cnt) + 1) &
+ (lpfc_debugfs_max_disc_trc - 1);
+ for (i = index; i < lpfc_debugfs_max_disc_trc; i++) {
+ dtp = vport->disc_trc + i;
+ if (!dtp->fmt)
+ continue;
+ ms = jiffies_to_msecs(dtp->jif - lpfc_debugfs_start_time);
+ snprintf(buffer, 80, "%010d:%010d ms:%s\n",
+ dtp->seq_cnt, ms, dtp->fmt);
+ len += snprintf(buf+len, size-len, buffer,
+ dtp->data1, dtp->data2, dtp->data3);
+ }
+ for (i = 0; i < index; i++) {
+ dtp = vport->disc_trc + i;
+ if (!dtp->fmt)
+ continue;
+ ms = jiffies_to_msecs(dtp->jif - lpfc_debugfs_start_time);
+ snprintf(buffer, 80, "%010d:%010d ms:%s\n",
+ dtp->seq_cnt, ms, dtp->fmt);
+ len += snprintf(buf+len, size-len, buffer,
+ dtp->data1, dtp->data2, dtp->data3);
+ }
+
+ lpfc_debugfs_enable = enable;
+ return len;
+}
+
+static int
+lpfc_debugfs_nodelist_data(struct lpfc_vport *vport, char *buf, int size)
+{
+ int len = 0;
+ int cnt;
+ struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+ struct lpfc_nodelist *ndlp;
+ unsigned char *statep, *name;
+
+ cnt = (LPFC_NODELIST_SIZE / LPFC_NODELIST_ENTRY_SIZE);
+
+ spin_lock_irq(shost->host_lock);
+ list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
+ if (!cnt) {
+ len += snprintf(buf+len, size-len,
+ "Missing Nodelist Entries\n");
+ break;
+ }
+ cnt--;
+ switch (ndlp->nlp_state) {
+ case NLP_STE_UNUSED_NODE:
+ statep = "UNUSED";
+ break;
+ case NLP_STE_PLOGI_ISSUE:
+ statep = "PLOGI ";
+ break;
+ case NLP_STE_ADISC_ISSUE:
+ statep = "ADISC ";
+ break;
+ case NLP_STE_REG_LOGIN_ISSUE:
+ statep = "REGLOG";
+ break;
+ case NLP_STE_PRLI_ISSUE:
+ statep = "PRLI ";
+ break;
+ case NLP_STE_UNMAPPED_NODE:
+ statep = "UNMAP ";
+ break;
+ case NLP_STE_MAPPED_NODE:
+ statep = "MAPPED";
+ break;
+ case NLP_STE_NPR_NODE:
+ statep = "NPR ";
+ break;
+ default:
+ statep = "UNKNOWN";
+ }
+ len += snprintf(buf+len, size-len, "%s DID:x%06x ",
+ statep, ndlp->nlp_DID);
+ name = (unsigned char *)&ndlp->nlp_portname;
+ len += snprintf(buf+len, size-len,
+ "WWPN %02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x ",
+ *name, *(name+1), *(name+2), *(name+3),
+ *(name+4), *(name+5), *(name+6), *(name+7));
+ name = (unsigned char *)&ndlp->nlp_nodename;
+ len += snprintf(buf+len, size-len,
+ "WWNN %02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x ",
+ *name, *(name+1), *(name+2), *(name+3),
+ *(name+4), *(name+5), *(name+6), *(name+7));
+ len += snprintf(buf+len, size-len, "RPI:%03d flag:x%08x ",
+ ndlp->nlp_rpi, ndlp->nlp_flag);
+ if (!ndlp->nlp_type)
+ len += snprintf(buf+len, size-len, "UNKNOWN_TYPE");
+ if (ndlp->nlp_type & NLP_FC_NODE)
+ len += snprintf(buf+len, size-len, "FC_NODE ");
+ if (ndlp->nlp_type & NLP_FABRIC)
+ len += snprintf(buf+len, size-len, "FABRIC ");
+ if (ndlp->nlp_type & NLP_FCP_TARGET)
+ len += snprintf(buf+len, size-len, "FCP_TGT sid:%d ",
+ ndlp->nlp_sid);
+ if (ndlp->nlp_type & NLP_FCP_INITIATOR)
+ len += snprintf(buf+len, size-len, "FCP_INITIATOR");
+ len += snprintf(buf+len, size-len, "\n");
+ }
+ spin_unlock_irq(shost->host_lock);
+ return len;
+}
+#endif
+
+
+inline void
+lpfc_debugfs_disc_trc(struct lpfc_vport *vport, int mask, char *fmt,
+ uint32_t data1, uint32_t data2, uint32_t data3)
+{
+#ifdef CONFIG_LPFC_DEBUG_FS
+ struct lpfc_disc_trc *dtp;
+ int index;
+
+ if (!(lpfc_debugfs_mask_disc_trc & mask))
+ return;
+
+ if (!lpfc_debugfs_enable || !lpfc_debugfs_max_disc_trc ||
+ !vport || !vport->disc_trc)
+ return;
+
+ index = atomic_inc_return(&vport->disc_trc_cnt) &
+ (lpfc_debugfs_max_disc_trc - 1);
+ dtp = vport->disc_trc + index;
+ dtp->fmt = fmt;
+ dtp->data1 = data1;
+ dtp->data2 = data2;
+ dtp->data3 = data3;
+ dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_disc_trc_cnt);
+ dtp->jif = jiffies;
+#endif
+ return;
+}
+
+#ifdef CONFIG_LPFC_DEBUG_FS
+static int
+lpfc_debugfs_disc_trc_open(struct inode *inode, struct file *file)
+{
+ struct lpfc_vport *vport = inode->i_private;
+ struct lpfc_debug *debug;
+ int size;
+ int rc = -ENOMEM;
+
+ if (!lpfc_debugfs_max_disc_trc) {
+ rc = -ENOSPC;
+ goto out;
+ }
+
+ debug = kmalloc(sizeof(*debug), GFP_KERNEL);
+ if (!debug)
+ goto out;
+
+ /* Round to page boundry */
+ size = (lpfc_debugfs_max_disc_trc * LPFC_DISC_TRC_ENTRY_SIZE);
+ size = PAGE_ALIGN(size);
+
+ debug->buffer = kmalloc(size, GFP_KERNEL);
+ if (!debug->buffer) {
+ kfree(debug);
+ goto out;
+ }
+
+ debug->len = lpfc_debugfs_disc_trc_data(vport, debug->buffer, size);
+ file->private_data = debug;
+
+ rc = 0;
+out:
+ return rc;
+}
+
+static int
+lpfc_debugfs_nodelist_open(struct inode *inode, struct file *file)
+{
+ struct lpfc_vport *vport = inode->i_private;
+ struct lpfc_debug *debug;
+ int rc = -ENOMEM;
+
+ debug = kmalloc(sizeof(*debug), GFP_KERNEL);
+ if (!debug)
+ goto out;
+
+ /* Round to page boundry */
+ debug->buffer = kmalloc(LPFC_NODELIST_SIZE, GFP_KERNEL);
+ if (!debug->buffer) {
+ kfree(debug);
+ goto out;
+ }
+
+ debug->len = lpfc_debugfs_nodelist_data(vport, debug->buffer,
+ LPFC_NODELIST_SIZE);
+ file->private_data = debug;
+
+ rc = 0;
+out:
+ return rc;
+}
+
+static loff_t
+lpfc_debugfs_lseek(struct file *file, loff_t off, int whence)
+{
+ struct lpfc_debug *debug;
+ loff_t pos = -1;
+
+ debug = file->private_data;
+
+ switch (whence) {
+ case 0:
+ pos = off;
+ break;
+ case 1:
+ pos = file->f_pos + off;
+ break;
+ case 2:
+ pos = debug->len - off;
+ }
+ return (pos < 0 || pos > debug->len) ? -EINVAL : (file->f_pos = pos);
+}
+
+static ssize_t
+lpfc_debugfs_read(struct file *file, char __user *buf,
+ size_t nbytes, loff_t *ppos)
+{
+ struct lpfc_debug *debug = file->private_data;
+ return simple_read_from_buffer(buf, nbytes, ppos, debug->buffer,
+ debug->len);
+}
+
+static int
+lpfc_debugfs_release(struct inode *inode, struct file *file)
+{
+ struct lpfc_debug *debug = file->private_data;
+
+ kfree(debug->buffer);
+ kfree(debug);
+
+ return 0;
+}
+
+#undef lpfc_debugfs_op_disc_trc
+static struct file_operations lpfc_debugfs_op_disc_trc = {
+ .owner = THIS_MODULE,
+ .open = lpfc_debugfs_disc_trc_open,
+ .llseek = lpfc_debugfs_lseek,
+ .read = lpfc_debugfs_read,
+ .release = lpfc_debugfs_release,
+};
+
+#undef lpfc_debugfs_op_nodelist
+static struct file_operations lpfc_debugfs_op_nodelist = {
+ .owner = THIS_MODULE,
+ .open = lpfc_debugfs_nodelist_open,
+ .llseek = lpfc_debugfs_lseek,
+ .read = lpfc_debugfs_read,
+ .release = lpfc_debugfs_release,
+};
+
+static struct dentry *lpfc_debugfs_root = NULL;
+static atomic_t lpfc_debugfs_hba_count;
+#endif
+
+inline void
+lpfc_debugfs_initialize(struct lpfc_vport *vport)
+{
+#ifdef CONFIG_LPFC_DEBUG_FS
+ struct lpfc_hba *phba = vport->phba;
+ char name[64];
+ uint32_t num, i;
+
+ if (!lpfc_debugfs_enable)
+ return;
+
+ if (lpfc_debugfs_max_disc_trc) {
+ num = lpfc_debugfs_max_disc_trc - 1;
+ if (num & lpfc_debugfs_max_disc_trc) {
+ /* Change to be a power of 2 */
+ num = lpfc_debugfs_max_disc_trc;
+ i = 0;
+ while (num > 1) {
+ num = num >> 1;
+ i++;
+ }
+ lpfc_debugfs_max_disc_trc = (1 << i);
+ printk(KERN_ERR
+ "lpfc_debugfs_max_disc_trc changed to %d\n",
+ lpfc_debugfs_max_disc_trc);
+ }
+ }
+
+ if (!lpfc_debugfs_root) {
+ lpfc_debugfs_root = debugfs_create_dir("lpfc", NULL);
+ atomic_set(&lpfc_debugfs_hba_count, 0);
+ if (!lpfc_debugfs_root)
+ goto debug_failed;
+ }
+
+ snprintf(name, sizeof(name), "lpfc%d", phba->brd_no);
+ if (!phba->hba_debugfs_root) {
+ phba->hba_debugfs_root =
+ debugfs_create_dir(name, lpfc_debugfs_root);
+ if (!phba->hba_debugfs_root)
+ goto debug_failed;
+ atomic_inc(&lpfc_debugfs_hba_count);
+ atomic_set(&phba->debugfs_vport_count, 0);
+ }
+
+ snprintf(name, sizeof(name), "vport%d", vport->vpi);
+ if (!vport->vport_debugfs_root) {
+ vport->vport_debugfs_root =
+ debugfs_create_dir(name, phba->hba_debugfs_root);
+ if (!vport->vport_debugfs_root)
+ goto debug_failed;
+ atomic_inc(&phba->debugfs_vport_count);
+ }
+
+ if (!lpfc_debugfs_start_time)
+ lpfc_debugfs_start_time = jiffies;
+
+ vport->disc_trc = kmalloc(
+ (sizeof(struct lpfc_disc_trc) * lpfc_debugfs_max_disc_trc),
+ GFP_KERNEL);
+
+ if (!vport->disc_trc)
+ goto debug_failed;
+ memset(vport->disc_trc, 0,
+ (sizeof(struct lpfc_disc_trc) * lpfc_debugfs_max_disc_trc));
+
+ snprintf(name, sizeof(name), "discovery_trace");
+ vport->debug_disc_trc =
+ debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR,
+ vport->vport_debugfs_root,
+ vport, &lpfc_debugfs_op_disc_trc);
+ if (!vport->debug_disc_trc) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "%d:0409 Cannot create debugfs",
+ phba->brd_no);
+ goto debug_failed;
+ }
+ snprintf(name, sizeof(name), "nodelist");
+ vport->debug_nodelist =
+ debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR,
+ vport->vport_debugfs_root,
+ vport, &lpfc_debugfs_op_nodelist);
+ if (!vport->debug_nodelist) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "%d:0409 Cannot create debugfs",
+ phba->brd_no);
+ goto debug_failed;
+ }
+debug_failed:
+ return;
+#endif
+}
+
+
+inline void
+lpfc_debugfs_terminate(struct lpfc_vport *vport)
+{
+#ifdef CONFIG_LPFC_DEBUG_FS
+ struct lpfc_hba *phba = vport->phba;
+
+ if (vport->disc_trc) {
+ kfree(vport->disc_trc);
+ vport->disc_trc = NULL;
+ }
+ if (vport->debug_disc_trc) {
+ debugfs_remove(vport->debug_disc_trc); /* discovery_trace */
+ vport->debug_disc_trc = NULL;
+ }
+ if (vport->debug_nodelist) {
+ debugfs_remove(vport->debug_nodelist); /* nodelist */
+ vport->debug_nodelist = NULL;
+ }
+ if (vport->vport_debugfs_root) {
+ debugfs_remove(vport->vport_debugfs_root); /* vportX */
+ vport->vport_debugfs_root = NULL;
+ atomic_dec(&phba->debugfs_vport_count);
+ }
+ if (atomic_read(&phba->debugfs_vport_count) == 0) {
+ debugfs_remove(vport->phba->hba_debugfs_root); /* lpfcX */
+ vport->phba->hba_debugfs_root = NULL;
+ atomic_dec(&lpfc_debugfs_hba_count);
+ if (atomic_read(&lpfc_debugfs_hba_count) == 0) {
+ debugfs_remove(lpfc_debugfs_root); /* lpfc */
+ lpfc_debugfs_root = NULL;
+ }
+ }
+#endif
+}
+
+
diff --git a/drivers/scsi/lpfc/lpfc_debugfs.h b/drivers/scsi/lpfc/lpfc_debugfs.h
new file mode 100644
index 00000000000..fffb678426a
--- /dev/null
+++ b/drivers/scsi/lpfc/lpfc_debugfs.h
@@ -0,0 +1,50 @@
+/*******************************************************************
+ * This file is part of the Emulex Linux Device Driver for *
+ * Fibre Channel Host Bus Adapters. *
+ * Copyright (C) 2007 Emulex. All rights reserved. *
+ * EMULEX and SLI are trademarks of Emulex. *
+ * www.emulex.com *
+ * *
+ * This program is free software; you can redistribute it and/or *
+ * modify it under the terms of version 2 of the GNU General *
+ * Public License as published by the Free Software Foundation. *
+ * This program is distributed in the hope that it will be useful. *
+ * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
+ * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
+ * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
+ * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
+ * TO BE LEGALLY INVALID. See the GNU General Public License for *
+ * more details, a copy of which can be found in the file COPYING *
+ * included with this package. *
+ *******************************************************************/
+
+#ifndef _H_LPFC_DEBUG_FS
+#define _H_LPFC_DEBUG_FS
+
+#ifdef CONFIG_LPFC_DEBUG_FS
+struct lpfc_disc_trc {
+ char *fmt;
+ uint32_t data1;
+ uint32_t data2;
+ uint32_t data3;
+ uint32_t seq_cnt;
+ unsigned long jif;
+};
+#endif
+
+/* Mask for discovery_trace */
+#define LPFC_DISC_TRC_ELS_CMD 0x1 /* Trace ELS commands */
+#define LPFC_DISC_TRC_ELS_RSP 0x2 /* Trace ELS response */
+#define LPFC_DISC_TRC_ELS_UNSOL 0x4 /* Trace ELS rcv'ed */
+#define LPFC_DISC_TRC_ELS_ALL 0x7 /* Trace ELS */
+#define LPFC_DISC_TRC_MBOX_VPORT 0x8 /* Trace vport MBOXs */
+#define LPFC_DISC_TRC_MBOX 0x10 /* Trace other MBOXs */
+#define LPFC_DISC_TRC_MBOX_ALL 0x18 /* Trace all MBOXs */
+#define LPFC_DISC_TRC_CT 0x20 /* Trace disc CT requests */
+#define LPFC_DISC_TRC_DSM 0x40 /* Trace DSM events */
+#define LPFC_DISC_TRC_RPORT 0x80 /* Trace rport events */
+#define LPFC_DISC_TRC_NODE 0x100 /* Trace ndlp state changes */
+
+#define LPFC_DISC_TRC_DISCOVERY 0xef /* common mask for general
+ * discovery */
+#endif /* H_LPFC_DEBUG_FS */
diff --git a/drivers/scsi/lpfc/lpfc_disc.h b/drivers/scsi/lpfc/lpfc_disc.h
index 498059f3f7f..aacac9ac538 100644
--- a/drivers/scsi/lpfc/lpfc_disc.h
+++ b/drivers/scsi/lpfc/lpfc_disc.h
@@ -36,21 +36,23 @@ enum lpfc_work_type {
LPFC_EVT_WARM_START,
LPFC_EVT_KILL,
LPFC_EVT_ELS_RETRY,
+ LPFC_EVT_DEV_LOSS_DELAY,
+ LPFC_EVT_DEV_LOSS,
};
/* structure used to queue event to the discovery tasklet */
struct lpfc_work_evt {
struct list_head evt_listp;
- void * evt_arg1;
- void * evt_arg2;
+ void *evt_arg1;
+ void *evt_arg2;
enum lpfc_work_type evt;
};
struct lpfc_nodelist {
struct list_head nlp_listp;
- struct lpfc_name nlp_portname; /* port name */
- struct lpfc_name nlp_nodename; /* node name */
+ struct lpfc_name nlp_portname;
+ struct lpfc_name nlp_nodename;
uint32_t nlp_flag; /* entry flags */
uint32_t nlp_DID; /* FC D_ID of entry */
uint32_t nlp_last_elscmd; /* Last ELS cmd sent */
@@ -75,8 +77,9 @@ struct lpfc_nodelist {
struct timer_list nlp_delayfunc; /* Used for delayed ELS cmds */
struct fc_rport *rport; /* Corresponding FC transport
port structure */
- struct lpfc_hba *nlp_phba;
+ struct lpfc_vport *vport;
struct lpfc_work_evt els_retry_evt;
+ struct lpfc_work_evt dev_loss_evt;
unsigned long last_ramp_up_time; /* jiffy of last ramp up */
unsigned long last_q_full_time; /* jiffy of last queue full */
struct kref kref;
@@ -98,7 +101,9 @@ struct lpfc_nodelist {
ACC */
#define NLP_NPR_ADISC 0x2000000 /* Issue ADISC when dq'ed from
NPR list */
+#define NLP_RM_DFLT_RPI 0x4000000 /* need to remove leftover dflt RPI */
#define NLP_NODEV_REMOVE 0x8000000 /* Defer removal till discovery ends */
+#define NLP_TARGET_REMOVE 0x10000000 /* Target remove in process */
/* There are 4 different double linked lists nodelist entries can reside on.
* The Port Login (PLOGI) list and Address Discovery (ADISC) list are used
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
index 638b3cd677b..33fbc166694 100644
--- a/drivers/scsi/lpfc/lpfc_els.c
+++ b/drivers/scsi/lpfc/lpfc_els.c
@@ -35,38 +35,38 @@
#include "lpfc.h"
#include "lpfc_logmsg.h"
#include "lpfc_crtn.h"
+#include "lpfc_vport.h"
+#include "lpfc_debugfs.h"
static int lpfc_els_retry(struct lpfc_hba *, struct lpfc_iocbq *,
struct lpfc_iocbq *);
+static void lpfc_cmpl_fabric_iocb(struct lpfc_hba *, struct lpfc_iocbq *,
+ struct lpfc_iocbq *);
+
static int lpfc_max_els_tries = 3;
-static int
-lpfc_els_chk_latt(struct lpfc_hba * phba)
+int
+lpfc_els_chk_latt(struct lpfc_vport *vport)
{
- struct lpfc_sli *psli;
- LPFC_MBOXQ_t *mbox;
+ struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+ struct lpfc_hba *phba = vport->phba;
uint32_t ha_copy;
- int rc;
- psli = &phba->sli;
-
- if ((phba->hba_state >= LPFC_HBA_READY) ||
- (phba->hba_state == LPFC_LINK_DOWN))
+ if (vport->port_state >= LPFC_VPORT_READY ||
+ phba->link_state == LPFC_LINK_DOWN)
return 0;
/* Read the HBA Host Attention Register */
- spin_lock_irq(phba->host->host_lock);
ha_copy = readl(phba->HAregaddr);
- spin_unlock_irq(phba->host->host_lock);
if (!(ha_copy & HA_LATT))
return 0;
/* Pending Link Event during Discovery */
- lpfc_printf_log(phba, KERN_WARNING, LOG_DISCOVERY,
- "%d:0237 Pending Link Event during "
+ lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
+ "%d (%d):0237 Pending Link Event during "
"Discovery: State x%x\n",
- phba->brd_no, phba->hba_state);
+ phba->brd_no, vport->vpi, phba->pport->port_state);
/* CLEAR_LA should re-enable link attention events and
* we should then imediately take a LATT event. The
@@ -74,48 +74,34 @@ lpfc_els_chk_latt(struct lpfc_hba * phba)
* will cleanup any left over in-progress discovery
* events.
*/
- spin_lock_irq(phba->host->host_lock);
- phba->fc_flag |= FC_ABORT_DISCOVERY;
- spin_unlock_irq(phba->host->host_lock);
-
- if (phba->hba_state != LPFC_CLEAR_LA) {
- if ((mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL))) {
- phba->hba_state = LPFC_CLEAR_LA;
- lpfc_clear_la(phba, mbox);
- mbox->mbox_cmpl = lpfc_mbx_cmpl_clear_la;
- rc = lpfc_sli_issue_mbox (phba, mbox,
- (MBX_NOWAIT | MBX_STOP_IOCB));
- if (rc == MBX_NOT_FINISHED) {
- mempool_free(mbox, phba->mbox_mem_pool);
- phba->hba_state = LPFC_HBA_ERROR;
- }
- }
- }
+ spin_lock_irq(shost->host_lock);
+ vport->fc_flag |= FC_ABORT_DISCOVERY;
+ spin_unlock_irq(shost->host_lock);
- return 1;
+ if (phba->link_state != LPFC_CLEAR_LA)
+ lpfc_issue_clear_la(phba, vport);
+ return 1;
}
static struct lpfc_iocbq *
-lpfc_prep_els_iocb(struct lpfc_hba * phba, uint8_t expectRsp,
- uint16_t cmdSize, uint8_t retry, struct lpfc_nodelist * ndlp,
- uint32_t did, uint32_t elscmd)
+lpfc_prep_els_iocb(struct lpfc_vport *vport, uint8_t expectRsp,
+ uint16_t cmdSize, uint8_t retry,
+ struct lpfc_nodelist *ndlp, uint32_t did,
+ uint32_t elscmd)
{
- struct lpfc_sli_ring *pring;
+ struct lpfc_hba *phba = vport->phba;
struct lpfc_iocbq *elsiocb;
struct lpfc_dmabuf *pcmd, *prsp, *pbuflist;
struct ulp_bde64 *bpl;
IOCB_t *icmd;
- pring = &phba->sli.ring[LPFC_ELS_RING];
- if (phba->hba_state < LPFC_LINK_UP)
- return NULL;
+ if (!lpfc_is_link_up(phba))
+ return NULL;
/* Allocate buffer for command iocb */
- spin_lock_irq(phba->host->host_lock);
elsiocb = lpfc_sli_get_iocbq(phba);
- spin_unlock_irq(phba->host->host_lock);
if (elsiocb == NULL)
return NULL;
@@ -123,14 +109,12 @@ lpfc_prep_els_iocb(struct lpfc_hba * phba, uint8_t expectRsp,
/* fill in BDEs for command */
/* Allocate buffer for command payload */
- if (((pcmd = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL)) == 0) ||
+ if (((pcmd = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL)) == 0) ||
((pcmd->virt = lpfc_mbuf_alloc(phba,
MEM_PRI, &(pcmd->phys))) == 0)) {
kfree(pcmd);
- spin_lock_irq(phba->host->host_lock);
lpfc_sli_release_iocbq(phba, elsiocb);
- spin_unlock_irq(phba->host->host_lock);
return NULL;
}
@@ -138,7 +122,7 @@ lpfc_prep_els_iocb(struct lpfc_hba * phba, uint8_t expectRsp,
/* Allocate buffer for response payload */
if (expectRsp) {
- prsp = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL);
+ prsp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
if (prsp)
prsp->virt = lpfc_mbuf_alloc(phba, MEM_PRI,
&prsp->phys);
@@ -146,9 +130,7 @@ lpfc_prep_els_iocb(struct lpfc_hba * phba, uint8_t expectRsp,
kfree(prsp);
lpfc_mbuf_free(phba, pcmd->virt, pcmd->phys);
kfree(pcmd);
- spin_lock_irq(phba->host->host_lock);
lpfc_sli_release_iocbq(phba, elsiocb);
- spin_unlock_irq(phba->host->host_lock);
return NULL;
}
INIT_LIST_HEAD(&prsp->list);
@@ -157,14 +139,12 @@ lpfc_prep_els_iocb(struct lpfc_hba * phba, uint8_t expectRsp,
}
/* Allocate buffer for Buffer ptr list */
- pbuflist = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL);
+ pbuflist = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
if (pbuflist)
- pbuflist->virt = lpfc_mbuf_alloc(phba, MEM_PRI,
- &pbuflist->phys);
+ pbuflist->virt = lpfc_mbuf_alloc(phba, MEM_PRI,
+ &pbuflist->phys);
if (pbuflist == 0 || pbuflist->virt == 0) {
- spin_lock_irq(phba->host->host_lock);
lpfc_sli_release_iocbq(phba, elsiocb);
- spin_unlock_irq(phba->host->host_lock);
lpfc_mbuf_free(phba, pcmd->virt, pcmd->phys);
lpfc_mbuf_free(phba, prsp->virt, prsp->phys);
kfree(pcmd);
@@ -178,20 +158,28 @@ lpfc_prep_els_iocb(struct lpfc_hba * phba, uint8_t expectRsp,
icmd->un.elsreq64.bdl.addrHigh = putPaddrHigh(pbuflist->phys);
icmd->un.elsreq64.bdl.addrLow = putPaddrLow(pbuflist->phys);
icmd->un.elsreq64.bdl.bdeFlags = BUFF_TYPE_BDL;
+ icmd->un.elsreq64.remoteID = did; /* DID */
if (expectRsp) {
- icmd->un.elsreq64.bdl.bdeSize = (2 * sizeof (struct ulp_bde64));
- icmd->un.elsreq64.remoteID = did; /* DID */
+ icmd->un.elsreq64.bdl.bdeSize = (2 * sizeof(struct ulp_bde64));
icmd->ulpCommand = CMD_ELS_REQUEST64_CR;
icmd->ulpTimeout = phba->fc_ratov * 2;
} else {
- icmd->un.elsreq64.bdl.bdeSize = sizeof (struct ulp_bde64);
+ icmd->un.elsreq64.bdl.bdeSize = sizeof(struct ulp_bde64);
icmd->ulpCommand = CMD_XMIT_ELS_RSP64_CX;
}
-
icmd->ulpBdeCount = 1;
icmd->ulpLe = 1;
icmd->ulpClass = CLASS3;
+ if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) {
+ icmd->un.elsreq64.myID = vport->fc_myDID;
+
+ /* For ELS_REQUEST64_CR, use the VPI by default */
+ icmd->ulpContext = vport->vpi;
+ icmd->ulpCt_h = 0;
+ icmd->ulpCt_l = 1;
+ }
+
bpl = (struct ulp_bde64 *) pbuflist->virt;
bpl->addrLow = le32_to_cpu(putPaddrLow(pcmd->phys));
bpl->addrHigh = le32_to_cpu(putPaddrHigh(pcmd->phys));
@@ -209,10 +197,12 @@ lpfc_prep_els_iocb(struct lpfc_hba * phba, uint8_t expectRsp,
}
/* Save for completion so we can release these resources */
- elsiocb->context1 = lpfc_nlp_get(ndlp);
+ if (elscmd != ELS_CMD_LS_RJT)
+ elsiocb->context1 = lpfc_nlp_get(ndlp);
elsiocb->context2 = pcmd;
elsiocb->context3 = pbuflist;
elsiocb->retry = retry;
+ elsiocb->vport = vport;
elsiocb->drvrTimeout = (phba->fc_ratov << 1) + LPFC_DRVR_TIMEOUT;
if (prsp) {
@@ -222,16 +212,16 @@ lpfc_prep_els_iocb(struct lpfc_hba * phba, uint8_t expectRsp,
if (expectRsp) {
/* Xmit ELS command <elsCmd> to remote NPORT <did> */
lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
- "%d:0116 Xmit ELS command x%x to remote "
- "NPORT x%x I/O tag: x%x, HBA state: x%x\n",
- phba->brd_no, elscmd,
- did, elsiocb->iotag, phba->hba_state);
+ "%d (%d):0116 Xmit ELS command x%x to remote "
+ "NPORT x%x I/O tag: x%x, port state: x%x\n",
+ phba->brd_no, vport->vpi, elscmd, did,
+ elsiocb->iotag, vport->port_state);
} else {
/* Xmit ELS response <elsCmd> to remote NPORT <did> */
lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
- "%d:0117 Xmit ELS response x%x to remote "
+ "%d (%d):0117 Xmit ELS response x%x to remote "
"NPORT x%x I/O tag: x%x, size: x%x\n",
- phba->brd_no, elscmd,
+ phba->brd_no, vport->vpi, elscmd,
ndlp->nlp_DID, elsiocb->iotag, cmdSize);
}
@@ -240,16 +230,79 @@ lpfc_prep_els_iocb(struct lpfc_hba * phba, uint8_t expectRsp,
static int
-lpfc_cmpl_els_flogi_fabric(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
- struct serv_parm *sp, IOCB_t *irsp)
+lpfc_issue_fabric_reglogin(struct lpfc_vport *vport)
{
+ struct lpfc_hba *phba = vport->phba;
LPFC_MBOXQ_t *mbox;
struct lpfc_dmabuf *mp;
+ struct lpfc_nodelist *ndlp;
+ struct serv_parm *sp;
int rc;
- spin_lock_irq(phba->host->host_lock);
- phba->fc_flag |= FC_FABRIC;
- spin_unlock_irq(phba->host->host_lock);
+ sp = &phba->fc_fabparam;
+ ndlp = lpfc_findnode_did(vport, Fabric_DID);
+ if (!ndlp)
+ goto fail;
+
+ mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+ if (!mbox)
+ goto fail;
+
+ vport->port_state = LPFC_FABRIC_CFG_LINK;
+ lpfc_config_link(phba, mbox);
+ mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
+ mbox->vport = vport;
+
+ rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT | MBX_STOP_IOCB);
+ if (rc == MBX_NOT_FINISHED)
+ goto fail_free_mbox;
+
+ mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+ if (!mbox)
+ goto fail;
+ rc = lpfc_reg_login(phba, vport->vpi, Fabric_DID, (uint8_t *)sp, mbox,
+ 0);
+ if (rc)
+ goto fail_free_mbox;
+
+ mbox->mbox_cmpl = lpfc_mbx_cmpl_fabric_reg_login;
+ mbox->vport = vport;
+ mbox->context2 = lpfc_nlp_get(ndlp);
+
+ rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT | MBX_STOP_IOCB);
+ if (rc == MBX_NOT_FINISHED)
+ goto fail_issue_reg_login;
+
+ return 0;
+
+fail_issue_reg_login:
+ lpfc_nlp_put(ndlp);
+ mp = (struct lpfc_dmabuf *) mbox->context1;
+ lpfc_mbuf_free(phba, mp->virt, mp->phys);
+ kfree(mp);
+fail_free_mbox:
+ mempool_free(mbox, phba->mbox_mem_pool);
+
+fail:
+ lpfc_vport_set_state(vport, FC_VPORT_FAILED);
+ lpfc_printf_log(phba, KERN_ERR, LOG_ELS,
+ "%d (%d):0249 Cannot issue Register Fabric login\n",
+ phba->brd_no, vport->vpi);
+ return -ENXIO;
+}
+
+static int
+lpfc_cmpl_els_flogi_fabric(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+ struct serv_parm *sp, IOCB_t *irsp)
+{
+ struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+ struct lpfc_hba *phba = vport->phba;
+ struct lpfc_nodelist *np;
+ struct lpfc_nodelist *next_np;
+
+ spin_lock_irq(shost->host_lock);
+ vport->fc_flag |= FC_FABRIC;
+ spin_unlock_irq(shost->host_lock);
phba->fc_edtov = be32_to_cpu(sp->cmn.e_d_tov);
if (sp->cmn.edtovResolution) /* E_D_TOV ticks are in nanoseconds */
@@ -258,20 +311,20 @@ lpfc_cmpl_els_flogi_fabric(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
phba->fc_ratov = (be32_to_cpu(sp->cmn.w2.r_a_tov) + 999) / 1000;
if (phba->fc_topology == TOPOLOGY_LOOP) {
- spin_lock_irq(phba->host->host_lock);
- phba->fc_flag |= FC_PUBLIC_LOOP;
- spin_unlock_irq(phba->host->host_lock);
+ spin_lock_irq(shost->host_lock);
+ vport->fc_flag |= FC_PUBLIC_LOOP;
+ spin_unlock_irq(shost->host_lock);
} else {
/*
* If we are a N-port connected to a Fabric, fixup sparam's so
* logins to devices on remote loops work.
*/
- phba->fc_sparam.cmn.altBbCredit = 1;
+ vport->fc_sparam.cmn.altBbCredit = 1;
}
- phba->fc_myDID = irsp->un.ulpWord[4] & Mask_DID;
+ vport->fc_myDID = irsp->un.ulpWord[4] & Mask_DID;
memcpy(&ndlp->nlp_portname, &sp->portName, sizeof(struct lpfc_name));
- memcpy(&ndlp->nlp_nodename, &sp->nodeName, sizeof (struct lpfc_name));
+ memcpy(&ndlp->nlp_nodename, &sp->nodeName, sizeof(struct lpfc_name));
ndlp->nlp_class_sup = 0;
if (sp->cls1.classValid)
ndlp->nlp_class_sup |= FC_COS_CLASS1;
@@ -285,68 +338,85 @@ lpfc_cmpl_els_flogi_fabric(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
sp->cmn.bbRcvSizeLsb;
memcpy(&phba->fc_fabparam, sp, sizeof(struct serv_parm));
- mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
- if (!mbox)
- goto fail;
-
- phba->hba_state = LPFC_FABRIC_CFG_LINK;
- lpfc_config_link(phba, mbox);
- mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
+ if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) {
+ if (sp->cmn.response_multiple_NPort) {
+ lpfc_printf_log(phba, KERN_WARNING, LOG_ELS | LOG_VPORT,
+ "%d:1816 FLOGI NPIV supported, "
+ "response data 0x%x\n",
+ phba->brd_no,
+ sp->cmn.response_multiple_NPort);
+ phba->link_flag |= LS_NPIV_FAB_SUPPORTED;
- rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT | MBX_STOP_IOCB);
- if (rc == MBX_NOT_FINISHED)
- goto fail_free_mbox;
-
- mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
- if (!mbox)
- goto fail;
+ } else {
+ /* Because we asked f/w for NPIV it still expects us
+ to call reg_vnpid atleast for the physcial host */
+ lpfc_printf_log(phba, KERN_WARNING, LOG_ELS | LOG_VPORT,
+ "%d:1817 Fabric does not support NPIV "
+ "- configuring single port mode.\n",
+ phba->brd_no);
+ phba->link_flag &= ~LS_NPIV_FAB_SUPPORTED;
+ }
+ }
- if (lpfc_reg_login(phba, Fabric_DID, (uint8_t *) sp, mbox, 0))
- goto fail_free_mbox;
+ if ((vport->fc_prevDID != vport->fc_myDID) &&
+ !(vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)) {
- mbox->mbox_cmpl = lpfc_mbx_cmpl_fabric_reg_login;
- mbox->context2 = lpfc_nlp_get(ndlp);
+ /* If our NportID changed, we need to ensure all
+ * remaining NPORTs get unreg_login'ed.
+ */
+ list_for_each_entry_safe(np, next_np,
+ &vport->fc_nodes, nlp_listp) {
+ if ((np->nlp_state != NLP_STE_NPR_NODE) ||
+ !(np->nlp_flag & NLP_NPR_ADISC))
+ continue;
+ spin_lock_irq(shost->host_lock);
+ np->nlp_flag &= ~NLP_NPR_ADISC;
+ spin_unlock_irq(shost->host_lock);
+ lpfc_unreg_rpi(vport, np);
+ }
+ if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) {
+ lpfc_mbx_unreg_vpi(vport);
+ vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
+ }
+ }
- rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT | MBX_STOP_IOCB);
- if (rc == MBX_NOT_FINISHED)
- goto fail_issue_reg_login;
+ ndlp->nlp_sid = irsp->un.ulpWord[4] & Mask_DID;
+ lpfc_nlp_set_state(vport, ndlp, NLP_STE_REG_LOGIN_ISSUE);
+ if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED &&
+ vport->fc_flag & FC_VPORT_NEEDS_REG_VPI) {
+ lpfc_register_new_vport(phba, vport, ndlp);
+ return 0;
+ }
+ lpfc_issue_fabric_reglogin(vport);
return 0;
-
- fail_issue_reg_login:
- lpfc_nlp_put(ndlp);
- mp = (struct lpfc_dmabuf *) mbox->context1;
- lpfc_mbuf_free(phba, mp->virt, mp->phys);
- kfree(mp);
- fail_free_mbox:
- mempool_free(mbox, phba->mbox_mem_pool);
- fail:
- return -ENXIO;
}
/*
* We FLOGIed into an NPort, initiate pt2pt protocol
*/
static int
-lpfc_cmpl_els_flogi_nport(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
- struct serv_parm *sp)
+lpfc_cmpl_els_flogi_nport(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+ struct serv_parm *sp)
{
+ struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+ struct lpfc_hba *phba = vport->phba;
LPFC_MBOXQ_t *mbox;
int rc;
- spin_lock_irq(phba->host->host_lock);
- phba->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP);
- spin_unlock_irq(phba->host->host_lock);
+ spin_lock_irq(shost->host_lock);
+ vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP);
+ spin_unlock_irq(shost->host_lock);
phba->fc_edtov = FF_DEF_EDTOV;
phba->fc_ratov = FF_DEF_RATOV;
- rc = memcmp(&phba->fc_portname, &sp->portName,
- sizeof(struct lpfc_name));
+ rc = memcmp(&vport->fc_portname, &sp->portName,
+ sizeof(vport->fc_portname));
if (rc >= 0) {
/* This side will initiate the PLOGI */
- spin_lock_irq(phba->host->host_lock);
- phba->fc_flag |= FC_PT2PT_PLOGI;
- spin_unlock_irq(phba->host->host_lock);
+ spin_lock_irq(shost->host_lock);
+ vport->fc_flag |= FC_PT2PT_PLOGI;
+ spin_unlock_irq(shost->host_lock);
/*
* N_Port ID cannot be 0, set our to LocalID the other
@@ -355,7 +425,7 @@ lpfc_cmpl_els_flogi_nport(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
/* not equal */
if (rc)
- phba->fc_myDID = PT2PT_LocalID;
+ vport->fc_myDID = PT2PT_LocalID;
mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
if (!mbox)
@@ -364,15 +434,16 @@ lpfc_cmpl_els_flogi_nport(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
lpfc_config_link(phba, mbox);
mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
+ mbox->vport = vport;
rc = lpfc_sli_issue_mbox(phba, mbox,
- MBX_NOWAIT | MBX_STOP_IOCB);
+ MBX_NOWAIT | MBX_STOP_IOCB);
if (rc == MBX_NOT_FINISHED) {
mempool_free(mbox, phba->mbox_mem_pool);
goto fail;
}
lpfc_nlp_put(ndlp);
- ndlp = lpfc_findnode_did(phba, PT2PT_RemoteID);
+ ndlp = lpfc_findnode_did(vport, PT2PT_RemoteID);
if (!ndlp) {
/*
* Cannot find existing Fabric ndlp, so allocate a
@@ -382,28 +453,30 @@ lpfc_cmpl_els_flogi_nport(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
if (!ndlp)
goto fail;
- lpfc_nlp_init(phba, ndlp, PT2PT_RemoteID);
+ lpfc_nlp_init(vport, ndlp, PT2PT_RemoteID);
}
memcpy(&ndlp->nlp_portname, &sp->portName,
- sizeof(struct lpfc_name));
+ sizeof(struct lpfc_name));
memcpy(&ndlp->nlp_nodename, &sp->nodeName,
- sizeof(struct lpfc_name));
- lpfc_nlp_set_state(phba, ndlp, NLP_STE_NPR_NODE);
+ sizeof(struct lpfc_name));
+ lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
+ spin_lock_irq(shost->host_lock);
ndlp->nlp_flag |= NLP_NPR_2B_DISC;
+ spin_unlock_irq(shost->host_lock);
} else {
/* This side will wait for the PLOGI */
lpfc_nlp_put(ndlp);
}
- spin_lock_irq(phba->host->host_lock);
- phba->fc_flag |= FC_PT2PT;
- spin_unlock_irq(phba->host->host_lock);
+ spin_lock_irq(shost->host_lock);
+ vport->fc_flag |= FC_PT2PT;
+ spin_unlock_irq(shost->host_lock);
/* Start discovery - this should just do CLEAR_LA */
- lpfc_disc_start(phba);
+ lpfc_disc_start(vport);
return 0;
- fail:
+fail:
return -ENXIO;
}
@@ -411,6 +484,8 @@ static void
lpfc_cmpl_els_flogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
struct lpfc_iocbq *rspiocb)
{
+ struct lpfc_vport *vport = cmdiocb->vport;
+ struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
IOCB_t *irsp = &rspiocb->iocb;
struct lpfc_nodelist *ndlp = cmdiocb->context1;
struct lpfc_dmabuf *pcmd = cmdiocb->context2, *prsp;
@@ -418,21 +493,25 @@ lpfc_cmpl_els_flogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
int rc;
/* Check to see if link went down during discovery */
- if (lpfc_els_chk_latt(phba)) {
+ if (lpfc_els_chk_latt(vport)) {
lpfc_nlp_put(ndlp);
goto out;
}
+ lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
+ "FLOGI cmpl: status:x%x/x%x state:x%x",
+ irsp->ulpStatus, irsp->un.ulpWord[4],
+ vport->port_state);
+
if (irsp->ulpStatus) {
/* Check for retry */
- if (lpfc_els_retry(phba, cmdiocb, rspiocb)) {
- /* ELS command is being retried */
+ if (lpfc_els_retry(phba, cmdiocb, rspiocb))
goto out;
- }
+
/* FLOGI failed, so there is no fabric */
- spin_lock_irq(phba->host->host_lock);
- phba->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP);
- spin_unlock_irq(phba->host->host_lock);
+ spin_lock_irq(shost->host_lock);
+ vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP);
+ spin_unlock_irq(shost->host_lock);
/* If private loop, then allow max outstanding els to be
* LPFC_MAX_DISC_THREADS (32). Scanning in the case of no
@@ -443,11 +522,10 @@ lpfc_cmpl_els_flogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
}
/* FLOGI failure */
- lpfc_printf_log(phba,
- KERN_INFO,
- LOG_ELS,
- "%d:0100 FLOGI failure Data: x%x x%x x%x\n",
- phba->brd_no,
+ lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
+ "%d (%d):0100 FLOGI failure Data: x%x x%x "
+ "x%x\n",
+ phba->brd_no, vport->vpi,
irsp->ulpStatus, irsp->un.ulpWord[4],
irsp->ulpTimeout);
goto flogifail;
@@ -463,21 +541,21 @@ lpfc_cmpl_els_flogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
/* FLOGI completes successfully */
lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
- "%d:0101 FLOGI completes sucessfully "
+ "%d (%d):0101 FLOGI completes sucessfully "
"Data: x%x x%x x%x x%x\n",
- phba->brd_no,
+ phba->brd_no, vport->vpi,
irsp->un.ulpWord[4], sp->cmn.e_d_tov,
sp->cmn.w2.r_a_tov, sp->cmn.edtovResolution);
- if (phba->hba_state == LPFC_FLOGI) {
+ if (vport->port_state == LPFC_FLOGI) {
/*
* If Common Service Parameters indicate Nport
* we are point to point, if Fport we are Fabric.
*/
if (sp->cmn.fPort)
- rc = lpfc_cmpl_els_flogi_fabric(phba, ndlp, sp, irsp);
+ rc = lpfc_cmpl_els_flogi_fabric(vport, ndlp, sp, irsp);
else
- rc = lpfc_cmpl_els_flogi_nport(phba, ndlp, sp);
+ rc = lpfc_cmpl_els_flogi_nport(vport, ndlp, sp);
if (!rc)
goto out;
@@ -486,14 +564,12 @@ lpfc_cmpl_els_flogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
flogifail:
lpfc_nlp_put(ndlp);
- if (irsp->ulpStatus != IOSTAT_LOCAL_REJECT ||
- (irsp->un.ulpWord[4] != IOERR_SLI_ABORTED &&
- irsp->un.ulpWord[4] != IOERR_SLI_DOWN)) {
+ if (!lpfc_error_lost_link(irsp)) {
/* FLOGI failed, so just use loop map to make discovery list */
- lpfc_disc_list_loopmap(phba);
+ lpfc_disc_list_loopmap(vport);
/* Start discovery */
- lpfc_disc_start(phba);
+ lpfc_disc_start(vport);
}
out:
@@ -501,9 +577,10 @@ out:
}
static int
-lpfc_issue_els_flogi(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp,
+lpfc_issue_els_flogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
uint8_t retry)
{
+ struct lpfc_hba *phba = vport->phba;
struct serv_parm *sp;
IOCB_t *icmd;
struct lpfc_iocbq *elsiocb;
@@ -515,9 +592,10 @@ lpfc_issue_els_flogi(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp,
pring = &phba->sli.ring[LPFC_ELS_RING];
- cmdsize = (sizeof (uint32_t) + sizeof (struct serv_parm));
- elsiocb = lpfc_prep_els_iocb(phba, 1, cmdsize, retry, ndlp,
- ndlp->nlp_DID, ELS_CMD_FLOGI);
+ cmdsize = (sizeof(uint32_t) + sizeof(struct serv_parm));
+ elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp,
+ ndlp->nlp_DID, ELS_CMD_FLOGI);
+
if (!elsiocb)
return 1;
@@ -526,8 +604,8 @@ lpfc_issue_els_flogi(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp,
/* For FLOGI request, remainder of payload is service parameters */
*((uint32_t *) (pcmd)) = ELS_CMD_FLOGI;
- pcmd += sizeof (uint32_t);
- memcpy(pcmd, &phba->fc_sparam, sizeof (struct serv_parm));
+ pcmd += sizeof(uint32_t);
+ memcpy(pcmd, &vport->fc_sparam, sizeof(struct serv_parm));
sp = (struct serv_parm *) pcmd;
/* Setup CSPs accordingly for Fabric */
@@ -541,16 +619,32 @@ lpfc_issue_els_flogi(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp,
if (sp->cmn.fcphHigh < FC_PH3)
sp->cmn.fcphHigh = FC_PH3;
+ if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) {
+ sp->cmn.request_multiple_Nport = 1;
+
+ /* For FLOGI, Let FLOGI rsp set the NPortID for VPI 0 */
+ icmd->ulpCt_h = 1;
+ icmd->ulpCt_l = 0;
+ }
+
+ if (phba->fc_topology != TOPOLOGY_LOOP) {
+ icmd->un.elsreq64.myID = 0;
+ icmd->un.elsreq64.fl = 1;
+ }
+
tmo = phba->fc_ratov;
phba->fc_ratov = LPFC_DISC_FLOGI_TMO;
- lpfc_set_disctmo(phba);
+ lpfc_set_disctmo(vport);
phba->fc_ratov = tmo;
phba->fc_stat.elsXmitFLOGI++;
elsiocb->iocb_cmpl = lpfc_cmpl_els_flogi;
- spin_lock_irq(phba->host->host_lock);
- rc = lpfc_sli_issue_iocb(phba, pring, elsiocb, 0);
- spin_unlock_irq(phba->host->host_lock);
+
+ lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
+ "Issue FLOGI: opt:x%x",
+ phba->sli3_options, 0, 0);
+
+ rc = lpfc_issue_fabric_iocb(phba, elsiocb);
if (rc == IOCB_ERROR) {
lpfc_els_free_iocb(phba, elsiocb);
return 1;
@@ -559,7 +653,7 @@ lpfc_issue_els_flogi(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp,
}
int
-lpfc_els_abort_flogi(struct lpfc_hba * phba)
+lpfc_els_abort_flogi(struct lpfc_hba *phba)
{
struct lpfc_sli_ring *pring;
struct lpfc_iocbq *iocb, *next_iocb;
@@ -577,73 +671,99 @@ lpfc_els_abort_flogi(struct lpfc_hba * phba)
* Check the txcmplq for an iocb that matches the nport the driver is
* searching for.
*/
- spin_lock_irq(phba->host->host_lock);
+ spin_lock_irq(&phba->hbalock);
list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list) {
icmd = &iocb->iocb;
- if (icmd->ulpCommand == CMD_ELS_REQUEST64_CR) {
+ if (icmd->ulpCommand == CMD_ELS_REQUEST64_CR &&
+ icmd->un.elsreq64.bdl.ulpIoTag32) {
ndlp = (struct lpfc_nodelist *)(iocb->context1);
- if (ndlp && (ndlp->nlp_DID == Fabric_DID))
+ if (ndlp && (ndlp->nlp_DID == Fabric_DID)) {
lpfc_sli_issue_abort_iotag(phba, pring, iocb);
+ }
}
}
- spin_unlock_irq(phba->host->host_lock);
+ spin_unlock_irq(&phba->hbalock);
return 0;
}
int
-lpfc_initial_flogi(struct lpfc_hba *phba)
+lpfc_initial_flogi(struct lpfc_vport *vport)
{
+ struct lpfc_hba *phba = vport->phba;
struct lpfc_nodelist *ndlp;
/* First look for the Fabric ndlp */
- ndlp = lpfc_findnode_did(phba, Fabric_DID);
+ ndlp = lpfc_findnode_did(vport, Fabric_DID);
if (!ndlp) {
/* Cannot find existing Fabric ndlp, so allocate a new one */
ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL);
if (!ndlp)
return 0;
- lpfc_nlp_init(phba, ndlp, Fabric_DID);
+ lpfc_nlp_init(vport, ndlp, Fabric_DID);
} else {
- lpfc_dequeue_node(phba, ndlp);
+ lpfc_dequeue_node(vport, ndlp);
}
- if (lpfc_issue_els_flogi(phba, ndlp, 0)) {
+ if (lpfc_issue_els_flogi(vport, ndlp, 0)) {
lpfc_nlp_put(ndlp);
}
return 1;
}
+int
+lpfc_initial_fdisc(struct lpfc_vport *vport)
+{
+ struct lpfc_hba *phba = vport->phba;
+ struct lpfc_nodelist *ndlp;
+
+ /* First look for the Fabric ndlp */
+ ndlp = lpfc_findnode_did(vport, Fabric_DID);
+ if (!ndlp) {
+ /* Cannot find existing Fabric ndlp, so allocate a new one */
+ ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL);
+ if (!ndlp)
+ return 0;
+ lpfc_nlp_init(vport, ndlp, Fabric_DID);
+ } else {
+ lpfc_dequeue_node(vport, ndlp);
+ }
+ if (lpfc_issue_els_fdisc(vport, ndlp, 0)) {
+ lpfc_nlp_put(ndlp);
+ }
+ return 1;
+}
static void
-lpfc_more_plogi(struct lpfc_hba * phba)
+lpfc_more_plogi(struct lpfc_vport *vport)
{
int sentplogi;
+ struct lpfc_hba *phba = vport->phba;
- if (phba->num_disc_nodes)
- phba->num_disc_nodes--;
+ if (vport->num_disc_nodes)
+ vport->num_disc_nodes--;
/* Continue discovery with <num_disc_nodes> PLOGIs to go */
lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
- "%d:0232 Continue discovery with %d PLOGIs to go "
+ "%d (%d):0232 Continue discovery with %d PLOGIs to go "
"Data: x%x x%x x%x\n",
- phba->brd_no, phba->num_disc_nodes, phba->fc_plogi_cnt,
- phba->fc_flag, phba->hba_state);
+ phba->brd_no, vport->vpi, vport->num_disc_nodes,
+ vport->fc_plogi_cnt, vport->fc_flag, vport->port_state);
/* Check to see if there are more PLOGIs to be sent */
- if (phba->fc_flag & FC_NLP_MORE) {
- /* go thru NPR list and issue any remaining ELS PLOGIs */
- sentplogi = lpfc_els_disc_plogi(phba);
- }
+ if (vport->fc_flag & FC_NLP_MORE)
+ /* go thru NPR nodes and issue any remaining ELS PLOGIs */
+ sentplogi = lpfc_els_disc_plogi(vport);
+
return;
}
static struct lpfc_nodelist *
-lpfc_plogi_confirm_nport(struct lpfc_hba *phba, struct lpfc_dmabuf *prsp,
+lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp,
struct lpfc_nodelist *ndlp)
{
+ struct lpfc_vport *vport = ndlp->vport;
struct lpfc_nodelist *new_ndlp;
- uint32_t *lp;
struct serv_parm *sp;
- uint8_t name[sizeof (struct lpfc_name)];
+ uint8_t name[sizeof(struct lpfc_name)];
uint32_t rc;
/* Fabric nodes can have the same WWPN so we don't bother searching
@@ -652,50 +772,51 @@ lpfc_plogi_confirm_nport(struct lpfc_hba *phba, struct lpfc_dmabuf *prsp,
if (ndlp->nlp_type & NLP_FABRIC)
return ndlp;
- lp = (uint32_t *) prsp->virt;
- sp = (struct serv_parm *) ((uint8_t *) lp + sizeof (uint32_t));
+ sp = (struct serv_parm *) ((uint8_t *) prsp + sizeof(uint32_t));
memset(name, 0, sizeof(struct lpfc_name));
/* Now we find out if the NPort we are logging into, matches the WWPN
* we have for that ndlp. If not, we have some work to do.
*/
- new_ndlp = lpfc_findnode_wwpn(phba, &sp->portName);
+ new_ndlp = lpfc_findnode_wwpn(vport, &sp->portName);
if (new_ndlp == ndlp)
return ndlp;
if (!new_ndlp) {
- rc =
- memcmp(&ndlp->nlp_portname, name, sizeof(struct lpfc_name));
+ rc = memcmp(&ndlp->nlp_portname, name,
+ sizeof(struct lpfc_name));
if (!rc)
return ndlp;
new_ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_ATOMIC);
if (!new_ndlp)
return ndlp;
- lpfc_nlp_init(phba, new_ndlp, ndlp->nlp_DID);
+ lpfc_nlp_init(vport, new_ndlp, ndlp->nlp_DID);
}
- lpfc_unreg_rpi(phba, new_ndlp);
+ lpfc_unreg_rpi(vport, new_ndlp);
new_ndlp->nlp_DID = ndlp->nlp_DID;
new_ndlp->nlp_prev_state = ndlp->nlp_prev_state;
- lpfc_nlp_set_state(phba, new_ndlp, ndlp->nlp_state);
+ lpfc_nlp_set_state(vport, new_ndlp, ndlp->nlp_state);
- /* Move this back to NPR list */
+ /* Move this back to NPR state */
if (memcmp(&ndlp->nlp_portname, name, sizeof(struct lpfc_name)) == 0)
- lpfc_drop_node(phba, ndlp);
+ lpfc_drop_node(vport, ndlp);
else {
- lpfc_unreg_rpi(phba, ndlp);
+ lpfc_unreg_rpi(vport, ndlp);
ndlp->nlp_DID = 0; /* Two ndlps cannot have the same did */
- lpfc_nlp_set_state(phba, ndlp, NLP_STE_NPR_NODE);
+ lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
}
return new_ndlp;
}
static void
-lpfc_cmpl_els_plogi(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb,
- struct lpfc_iocbq * rspiocb)
+lpfc_cmpl_els_plogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
+ struct lpfc_iocbq *rspiocb)
{
+ struct lpfc_vport *vport = cmdiocb->vport;
+ struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
IOCB_t *irsp;
struct lpfc_nodelist *ndlp;
struct lpfc_dmabuf *prsp;
@@ -705,32 +826,43 @@ lpfc_cmpl_els_plogi(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb,
cmdiocb->context_un.rsp_iocb = rspiocb;
irsp = &rspiocb->iocb;
- ndlp = lpfc_findnode_did(phba, irsp->un.elsreq64.remoteID);
- if (!ndlp)
+ lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
+ "PLOGI cmpl: status:x%x/x%x did:x%x",
+ irsp->ulpStatus, irsp->un.ulpWord[4],
+ irsp->un.elsreq64.remoteID);
+
+ ndlp = lpfc_findnode_did(vport, irsp->un.elsreq64.remoteID);
+ if (!ndlp) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_ELS,
+ "%d (%d):0136 PLOGI completes to NPort x%x "
+ "with no ndlp. Data: x%x x%x x%x\n",
+ phba->brd_no, vport->vpi, irsp->un.elsreq64.remoteID,
+ irsp->ulpStatus, irsp->un.ulpWord[4], irsp->ulpIoTag);
goto out;
+ }
/* Since ndlp can be freed in the disc state machine, note if this node
* is being used during discovery.
*/
+ spin_lock_irq(shost->host_lock);
disc = (ndlp->nlp_flag & NLP_NPR_2B_DISC);
- spin_lock_irq(phba->host->host_lock);
ndlp->nlp_flag &= ~NLP_NPR_2B_DISC;
- spin_unlock_irq(phba->host->host_lock);
+ spin_unlock_irq(shost->host_lock);
rc = 0;
/* PLOGI completes to NPort <nlp_DID> */
lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
- "%d:0102 PLOGI completes to NPort x%x "
+ "%d (%d):0102 PLOGI completes to NPort x%x "
"Data: x%x x%x x%x x%x x%x\n",
- phba->brd_no, ndlp->nlp_DID, irsp->ulpStatus,
- irsp->un.ulpWord[4], irsp->ulpTimeout, disc,
- phba->num_disc_nodes);
+ phba->brd_no, vport->vpi, ndlp->nlp_DID,
+ irsp->ulpStatus, irsp->un.ulpWord[4],
+ irsp->ulpTimeout, disc, vport->num_disc_nodes);
/* Check to see if link went down during discovery */
- if (lpfc_els_chk_latt(phba)) {
- spin_lock_irq(phba->host->host_lock);
+ if (lpfc_els_chk_latt(vport)) {
+ spin_lock_irq(shost->host_lock);
ndlp->nlp_flag |= NLP_NPR_2B_DISC;
- spin_unlock_irq(phba->host->host_lock);
+ spin_unlock_irq(shost->host_lock);
goto out;
}
@@ -743,56 +875,62 @@ lpfc_cmpl_els_plogi(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb,
if (lpfc_els_retry(phba, cmdiocb, rspiocb)) {
/* ELS command is being retried */
if (disc) {
- spin_lock_irq(phba->host->host_lock);
+ spin_lock_irq(shost->host_lock);
ndlp->nlp_flag |= NLP_NPR_2B_DISC;
- spin_unlock_irq(phba->host->host_lock);
+ spin_unlock_irq(shost->host_lock);
}
goto out;
}
/* PLOGI failed */
+ if (ndlp->nlp_DID == NameServer_DID) {
+ lpfc_vport_set_state(vport, FC_VPORT_FAILED);
+ lpfc_printf_log(phba, KERN_ERR, LOG_ELS,
+ "%d (%d):0250 Nameserver login error: "
+ "0x%x / 0x%x\n",
+ phba->brd_no, vport->vpi,
+ irsp->ulpStatus, irsp->un.ulpWord[4]);
+ }
+
/* Do not call DSM for lpfc_els_abort'ed ELS cmds */
- if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) &&
- ((irsp->un.ulpWord[4] == IOERR_SLI_ABORTED) ||
- (irsp->un.ulpWord[4] == IOERR_LINK_DOWN) ||
- (irsp->un.ulpWord[4] == IOERR_SLI_DOWN))) {
+ if (lpfc_error_lost_link(irsp)) {
rc = NLP_STE_FREED_NODE;
} else {
- rc = lpfc_disc_state_machine(phba, ndlp, cmdiocb,
- NLP_EVT_CMPL_PLOGI);
+ rc = lpfc_disc_state_machine(vport, ndlp, cmdiocb,
+ NLP_EVT_CMPL_PLOGI);
}
} else {
/* Good status, call state machine */
prsp = list_entry(((struct lpfc_dmabuf *)
- cmdiocb->context2)->list.next,
- struct lpfc_dmabuf, list);
- ndlp = lpfc_plogi_confirm_nport(phba, prsp, ndlp);
- rc = lpfc_disc_state_machine(phba, ndlp, cmdiocb,
- NLP_EVT_CMPL_PLOGI);
+ cmdiocb->context2)->list.next,
+ struct lpfc_dmabuf, list);
+ ndlp = lpfc_plogi_confirm_nport(phba, prsp->virt, ndlp);
+ rc = lpfc_disc_state_machine(vport, ndlp, cmdiocb,
+ NLP_EVT_CMPL_PLOGI);
}
- if (disc && phba->num_disc_nodes) {
+ if (disc && vport->num_disc_nodes) {
/* Check to see if there are more PLOGIs to be sent */
- lpfc_more_plogi(phba);
+ lpfc_more_plogi(vport);
- if (phba->num_disc_nodes == 0) {
- spin_lock_irq(phba->host->host_lock);
- phba->fc_flag &= ~FC_NDISC_ACTIVE;
- spin_unlock_irq(phba->host->host_lock);
+ if (vport->num_disc_nodes == 0) {
+ spin_lock_irq(shost->host_lock);
+ vport->fc_flag &= ~FC_NDISC_ACTIVE;
+ spin_unlock_irq(shost->host_lock);
- lpfc_can_disctmo(phba);
- if (phba->fc_flag & FC_RSCN_MODE) {
+ lpfc_can_disctmo(vport);
+ if (vport->fc_flag & FC_RSCN_MODE) {
/*
* Check to see if more RSCNs came in while
* we were processing this one.
*/
- if ((phba->fc_rscn_id_cnt == 0) &&
- (!(phba->fc_flag & FC_RSCN_DISCOVERY))) {
- spin_lock_irq(phba->host->host_lock);
- phba->fc_flag &= ~FC_RSCN_MODE;
- spin_unlock_irq(phba->host->host_lock);
+ if ((vport->fc_rscn_id_cnt == 0) &&
+ (!(vport->fc_flag & FC_RSCN_DISCOVERY))) {
+ spin_lock_irq(shost->host_lock);
+ vport->fc_flag &= ~FC_RSCN_MODE;
+ spin_unlock_irq(shost->host_lock);
} else {
- lpfc_els_handle_rscn(phba);
+ lpfc_els_handle_rscn(vport);
}
}
}
@@ -804,8 +942,9 @@ out:
}
int
-lpfc_issue_els_plogi(struct lpfc_hba * phba, uint32_t did, uint8_t retry)
+lpfc_issue_els_plogi(struct lpfc_vport *vport, uint32_t did, uint8_t retry)
{
+ struct lpfc_hba *phba = vport->phba;
struct serv_parm *sp;
IOCB_t *icmd;
struct lpfc_iocbq *elsiocb;
@@ -813,13 +952,14 @@ lpfc_issue_els_plogi(struct lpfc_hba * phba, uint32_t did, uint8_t retry)
struct lpfc_sli *psli;
uint8_t *pcmd;
uint16_t cmdsize;
+ int ret;
psli = &phba->sli;
pring = &psli->ring[LPFC_ELS_RING]; /* ELS ring */
- cmdsize = (sizeof (uint32_t) + sizeof (struct serv_parm));
- elsiocb = lpfc_prep_els_iocb(phba, 1, cmdsize, retry, NULL, did,
- ELS_CMD_PLOGI);
+ cmdsize = (sizeof(uint32_t) + sizeof(struct serv_parm));
+ elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, NULL, did,
+ ELS_CMD_PLOGI);
if (!elsiocb)
return 1;
@@ -828,8 +968,8 @@ lpfc_issue_els_plogi(struct lpfc_hba * phba, uint32_t did, uint8_t retry)
/* For PLOGI request, remainder of payload is service parameters */
*((uint32_t *) (pcmd)) = ELS_CMD_PLOGI;
- pcmd += sizeof (uint32_t);
- memcpy(pcmd, &phba->fc_sparam, sizeof (struct serv_parm));
+ pcmd += sizeof(uint32_t);
+ memcpy(pcmd, &vport->fc_sparam, sizeof(struct serv_parm));
sp = (struct serv_parm *) pcmd;
if (sp->cmn.fcphLow < FC_PH_4_3)
@@ -838,22 +978,27 @@ lpfc_issue_els_plogi(struct lpfc_hba * phba, uint32_t did, uint8_t retry)
if (sp->cmn.fcphHigh < FC_PH3)
sp->cmn.fcphHigh = FC_PH3;
+ lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
+ "Issue PLOGI: did:x%x",
+ did, 0, 0);
+
phba->fc_stat.elsXmitPLOGI++;
elsiocb->iocb_cmpl = lpfc_cmpl_els_plogi;
- spin_lock_irq(phba->host->host_lock);
- if (lpfc_sli_issue_iocb(phba, pring, elsiocb, 0) == IOCB_ERROR) {
- spin_unlock_irq(phba->host->host_lock);
+ ret = lpfc_sli_issue_iocb(phba, pring, elsiocb, 0);
+
+ if (ret == IOCB_ERROR) {
lpfc_els_free_iocb(phba, elsiocb);
return 1;
}
- spin_unlock_irq(phba->host->host_lock);
return 0;
}
static void
-lpfc_cmpl_els_prli(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb,
- struct lpfc_iocbq * rspiocb)
+lpfc_cmpl_els_prli(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
+ struct lpfc_iocbq *rspiocb)
{
+ struct lpfc_vport *vport = cmdiocb->vport;
+ struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
IOCB_t *irsp;
struct lpfc_sli *psli;
struct lpfc_nodelist *ndlp;
@@ -864,21 +1009,26 @@ lpfc_cmpl_els_prli(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb,
irsp = &(rspiocb->iocb);
ndlp = (struct lpfc_nodelist *) cmdiocb->context1;
- spin_lock_irq(phba->host->host_lock);
+ spin_lock_irq(shost->host_lock);
ndlp->nlp_flag &= ~NLP_PRLI_SND;
- spin_unlock_irq(phba->host->host_lock);
+ spin_unlock_irq(shost->host_lock);
+
+ lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
+ "PRLI cmpl: status:x%x/x%x did:x%x",
+ irsp->ulpStatus, irsp->un.ulpWord[4],
+ ndlp->nlp_DID);
/* PRLI completes to NPort <nlp_DID> */
lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
- "%d:0103 PRLI completes to NPort x%x "
+ "%d (%d):0103 PRLI completes to NPort x%x "
"Data: x%x x%x x%x x%x\n",
- phba->brd_no, ndlp->nlp_DID, irsp->ulpStatus,
- irsp->un.ulpWord[4], irsp->ulpTimeout,
- phba->num_disc_nodes);
+ phba->brd_no, vport->vpi, ndlp->nlp_DID,
+ irsp->ulpStatus, irsp->un.ulpWord[4], irsp->ulpTimeout,
+ vport->num_disc_nodes);
- phba->fc_prli_sent--;
+ vport->fc_prli_sent--;
/* Check to see if link went down during discovery */
- if (lpfc_els_chk_latt(phba))
+ if (lpfc_els_chk_latt(vport))
goto out;
if (irsp->ulpStatus) {
@@ -889,18 +1039,16 @@ lpfc_cmpl_els_prli(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb,
}
/* PRLI failed */
/* Do not call DSM for lpfc_els_abort'ed ELS cmds */
- if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) &&
- ((irsp->un.ulpWord[4] == IOERR_SLI_ABORTED) ||
- (irsp->un.ulpWord[4] == IOERR_LINK_DOWN) ||
- (irsp->un.ulpWord[4] == IOERR_SLI_DOWN))) {
+ if (lpfc_error_lost_link(irsp)) {
goto out;
} else {
- lpfc_disc_state_machine(phba, ndlp, cmdiocb,
- NLP_EVT_CMPL_PRLI);
+ lpfc_disc_state_machine(vport, ndlp, cmdiocb,
+ NLP_EVT_CMPL_PRLI);
}
} else {
/* Good status, call state machine */
- lpfc_disc_state_machine(phba, ndlp, cmdiocb, NLP_EVT_CMPL_PRLI);
+ lpfc_disc_state_machine(vport, ndlp, cmdiocb,
+ NLP_EVT_CMPL_PRLI);
}
out:
@@ -909,9 +1057,11 @@ out:
}
int
-lpfc_issue_els_prli(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp,
+lpfc_issue_els_prli(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
uint8_t retry)
{
+ struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+ struct lpfc_hba *phba = vport->phba;
PRLI *npr;
IOCB_t *icmd;
struct lpfc_iocbq *elsiocb;
@@ -923,9 +1073,9 @@ lpfc_issue_els_prli(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp,
psli = &phba->sli;
pring = &psli->ring[LPFC_ELS_RING]; /* ELS ring */
- cmdsize = (sizeof (uint32_t) + sizeof (PRLI));
- elsiocb = lpfc_prep_els_iocb(phba, 1, cmdsize, retry, ndlp,
- ndlp->nlp_DID, ELS_CMD_PRLI);
+ cmdsize = (sizeof(uint32_t) + sizeof(PRLI));
+ elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp,
+ ndlp->nlp_DID, ELS_CMD_PRLI);
if (!elsiocb)
return 1;
@@ -933,9 +1083,9 @@ lpfc_issue_els_prli(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp,
pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
/* For PRLI request, remainder of payload is service parameters */
- memset(pcmd, 0, (sizeof (PRLI) + sizeof (uint32_t)));
+ memset(pcmd, 0, (sizeof(PRLI) + sizeof(uint32_t)));
*((uint32_t *) (pcmd)) = ELS_CMD_PRLI;
- pcmd += sizeof (uint32_t);
+ pcmd += sizeof(uint32_t);
/* For PRLI, remainder of payload is PRLI parameter page */
npr = (PRLI *) pcmd;
@@ -955,81 +1105,88 @@ lpfc_issue_els_prli(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp,
npr->prliType = PRLI_FCP_TYPE;
npr->initiatorFunc = 1;
+ lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
+ "Issue PRLI: did:x%x",
+ ndlp->nlp_DID, 0, 0);
+
phba->fc_stat.elsXmitPRLI++;
elsiocb->iocb_cmpl = lpfc_cmpl_els_prli;
- spin_lock_irq(phba->host->host_lock);
+ spin_lock_irq(shost->host_lock);
ndlp->nlp_flag |= NLP_PRLI_SND;
+ spin_unlock_irq(shost->host_lock);
if (lpfc_sli_issue_iocb(phba, pring, elsiocb, 0) == IOCB_ERROR) {
+ spin_lock_irq(shost->host_lock);
ndlp->nlp_flag &= ~NLP_PRLI_SND;
- spin_unlock_irq(phba->host->host_lock);
+ spin_unlock_irq(shost->host_lock);
lpfc_els_free_iocb(phba, elsiocb);
return 1;
}
- spin_unlock_irq(phba->host->host_lock);
- phba->fc_prli_sent++;
+ vport->fc_prli_sent++;
return 0;
}
static void
-lpfc_more_adisc(struct lpfc_hba * phba)
+lpfc_more_adisc(struct lpfc_vport *vport)
{
int sentadisc;
+ struct lpfc_hba *phba = vport->phba;
- if (phba->num_disc_nodes)
- phba->num_disc_nodes--;
+ if (vport->num_disc_nodes)
+ vport->num_disc_nodes--;
/* Continue discovery with <num_disc_nodes> ADISCs to go */
lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
- "%d:0210 Continue discovery with %d ADISCs to go "
+ "%d (%d):0210 Continue discovery with %d ADISCs to go "
"Data: x%x x%x x%x\n",
- phba->brd_no, phba->num_disc_nodes, phba->fc_adisc_cnt,
- phba->fc_flag, phba->hba_state);
+ phba->brd_no, vport->vpi, vport->num_disc_nodes,
+ vport->fc_adisc_cnt, vport->fc_flag, vport->port_state);
/* Check to see if there are more ADISCs to be sent */
- if (phba->fc_flag & FC_NLP_MORE) {
- lpfc_set_disctmo(phba);
-
- /* go thru NPR list and issue any remaining ELS ADISCs */
- sentadisc = lpfc_els_disc_adisc(phba);
+ if (vport->fc_flag & FC_NLP_MORE) {
+ lpfc_set_disctmo(vport);
+ /* go thru NPR nodes and issue any remaining ELS ADISCs */
+ sentadisc = lpfc_els_disc_adisc(vport);
}
return;
}
static void
-lpfc_rscn_disc(struct lpfc_hba * phba)
+lpfc_rscn_disc(struct lpfc_vport *vport)
{
+ struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+
+ lpfc_can_disctmo(vport);
+
/* RSCN discovery */
- /* go thru NPR list and issue ELS PLOGIs */
- if (phba->fc_npr_cnt) {
- if (lpfc_els_disc_plogi(phba))
+ /* go thru NPR nodes and issue ELS PLOGIs */
+ if (vport->fc_npr_cnt)
+ if (lpfc_els_disc_plogi(vport))
return;
- }
- if (phba->fc_flag & FC_RSCN_MODE) {
+
+ if (vport->fc_flag & FC_RSCN_MODE) {
/* Check to see if more RSCNs came in while we were
* processing this one.
*/
- if ((phba->fc_rscn_id_cnt == 0) &&
- (!(phba->fc_flag & FC_RSCN_DISCOVERY))) {
- spin_lock_irq(phba->host->host_lock);
- phba->fc_flag &= ~FC_RSCN_MODE;
- spin_unlock_irq(phba->host->host_lock);
+ if ((vport->fc_rscn_id_cnt == 0) &&
+ (!(vport->fc_flag & FC_RSCN_DISCOVERY))) {
+ spin_lock_irq(shost->host_lock);
+ vport->fc_flag &= ~FC_RSCN_MODE;
+ spin_unlock_irq(shost->host_lock);
} else {
- lpfc_els_handle_rscn(phba);
+ lpfc_els_handle_rscn(vport);
}
}
}
static void
-lpfc_cmpl_els_adisc(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb,
- struct lpfc_iocbq * rspiocb)
+lpfc_cmpl_els_adisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
+ struct lpfc_iocbq *rspiocb)
{
+ struct lpfc_vport *vport = cmdiocb->vport;
+ struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
IOCB_t *irsp;
- struct lpfc_sli *psli;
struct lpfc_nodelist *ndlp;
- LPFC_MBOXQ_t *mbox;
- int disc, rc;
-
- psli = &phba->sli;
+ int disc;
/* we pass cmdiocb to state machine which needs rspiocb as well */
cmdiocb->context_un.rsp_iocb = rspiocb;
@@ -1037,27 +1194,32 @@ lpfc_cmpl_els_adisc(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb,
irsp = &(rspiocb->iocb);
ndlp = (struct lpfc_nodelist *) cmdiocb->context1;
+ lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
+ "ADISC cmpl: status:x%x/x%x did:x%x",
+ irsp->ulpStatus, irsp->un.ulpWord[4],
+ ndlp->nlp_DID);
+
/* Since ndlp can be freed in the disc state machine, note if this node
* is being used during discovery.
*/
+ spin_lock_irq(shost->host_lock);
disc = (ndlp->nlp_flag & NLP_NPR_2B_DISC);
- spin_lock_irq(phba->host->host_lock);
ndlp->nlp_flag &= ~(NLP_ADISC_SND | NLP_NPR_2B_DISC);
- spin_unlock_irq(phba->host->host_lock);
+ spin_unlock_irq(shost->host_lock);
/* ADISC completes to NPort <nlp_DID> */
lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
- "%d:0104 ADISC completes to NPort x%x "
+ "%d (%d):0104 ADISC completes to NPort x%x "
"Data: x%x x%x x%x x%x x%x\n",
- phba->brd_no, ndlp->nlp_DID, irsp->ulpStatus,
- irsp->un.ulpWord[4], irsp->ulpTimeout, disc,
- phba->num_disc_nodes);
+ phba->brd_no, vport->vpi, ndlp->nlp_DID,
+ irsp->ulpStatus, irsp->un.ulpWord[4], irsp->ulpTimeout,
+ disc, vport->num_disc_nodes);
/* Check to see if link went down during discovery */
- if (lpfc_els_chk_latt(phba)) {
- spin_lock_irq(phba->host->host_lock);
+ if (lpfc_els_chk_latt(vport)) {
+ spin_lock_irq(shost->host_lock);
ndlp->nlp_flag |= NLP_NPR_2B_DISC;
- spin_unlock_irq(phba->host->host_lock);
+ spin_unlock_irq(shost->host_lock);
goto out;
}
@@ -1066,67 +1228,68 @@ lpfc_cmpl_els_adisc(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb,
if (lpfc_els_retry(phba, cmdiocb, rspiocb)) {
/* ELS command is being retried */
if (disc) {
- spin_lock_irq(phba->host->host_lock);
+ spin_lock_irq(shost->host_lock);
ndlp->nlp_flag |= NLP_NPR_2B_DISC;
- spin_unlock_irq(phba->host->host_lock);
- lpfc_set_disctmo(phba);
+ spin_unlock_irq(shost->host_lock);
+ lpfc_set_disctmo(vport);
}
goto out;
}
/* ADISC failed */
/* Do not call DSM for lpfc_els_abort'ed ELS cmds */
- if ((irsp->ulpStatus != IOSTAT_LOCAL_REJECT) ||
- ((irsp->un.ulpWord[4] != IOERR_SLI_ABORTED) &&
- (irsp->un.ulpWord[4] != IOERR_LINK_DOWN) &&
- (irsp->un.ulpWord[4] != IOERR_SLI_DOWN))) {
- lpfc_disc_state_machine(phba, ndlp, cmdiocb,
- NLP_EVT_CMPL_ADISC);
+ if (!lpfc_error_lost_link(irsp)) {
+ lpfc_disc_state_machine(vport, ndlp, cmdiocb,
+ NLP_EVT_CMPL_ADISC);
}
} else {
/* Good status, call state machine */
- lpfc_disc_state_machine(phba, ndlp, cmdiocb,
+ lpfc_disc_state_machine(vport, ndlp, cmdiocb,
NLP_EVT_CMPL_ADISC);
}
- if (disc && phba->num_disc_nodes) {
+ if (disc && vport->num_disc_nodes) {
/* Check to see if there are more ADISCs to be sent */
- lpfc_more_adisc(phba);
+ lpfc_more_adisc(vport);
/* Check to see if we are done with ADISC authentication */
- if (phba->num_disc_nodes == 0) {
- lpfc_can_disctmo(phba);
- /* If we get here, there is nothing left to wait for */
- if ((phba->hba_state < LPFC_HBA_READY) &&
- (phba->hba_state != LPFC_CLEAR_LA)) {
- /* Link up discovery */
- if ((mbox = mempool_alloc(phba->mbox_mem_pool,
- GFP_KERNEL))) {
- phba->hba_state = LPFC_CLEAR_LA;
- lpfc_clear_la(phba, mbox);
- mbox->mbox_cmpl =
- lpfc_mbx_cmpl_clear_la;
- rc = lpfc_sli_issue_mbox
- (phba, mbox,
- (MBX_NOWAIT | MBX_STOP_IOCB));
- if (rc == MBX_NOT_FINISHED) {
- mempool_free(mbox,
- phba->mbox_mem_pool);
- lpfc_disc_flush_list(phba);
- psli->ring[(psli->extra_ring)].
- flag &=
- ~LPFC_STOP_IOCB_EVENT;
- psli->ring[(psli->fcp_ring)].
- flag &=
- ~LPFC_STOP_IOCB_EVENT;
- psli->ring[(psli->next_ring)].
- flag &=
- ~LPFC_STOP_IOCB_EVENT;
- phba->hba_state =
- LPFC_HBA_READY;
+ if (vport->num_disc_nodes == 0) {
+ /* If we get here, there is nothing left to ADISC */
+ /*
+ * For NPIV, cmpl_reg_vpi will set port_state to READY,
+ * and continue discovery.
+ */
+ if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) &&
+ !(vport->fc_flag & FC_RSCN_MODE)) {
+ lpfc_issue_reg_vpi(phba, vport);
+ goto out;
+ }
+ /*
+ * For SLI2, we need to set port_state to READY
+ * and continue discovery.
+ */
+ if (vport->port_state < LPFC_VPORT_READY) {
+ /* If we get here, there is nothing to ADISC */
+ if (vport->port_type == LPFC_PHYSICAL_PORT)
+ lpfc_issue_clear_la(phba, vport);
+
+ if (!(vport->fc_flag & FC_ABORT_DISCOVERY)) {
+ vport->num_disc_nodes = 0;
+ /* go thru NPR list, issue ELS PLOGIs */
+ if (vport->fc_npr_cnt)
+ lpfc_els_disc_plogi(vport);
+
+ if (!vport->num_disc_nodes) {
+ spin_lock_irq(shost->host_lock);
+ vport->fc_flag &=
+ ~FC_NDISC_ACTIVE;
+ spin_unlock_irq(
+ shost->host_lock);
+ lpfc_can_disctmo(vport);
}
}
+ vport->port_state = LPFC_VPORT_READY;
} else {
- lpfc_rscn_disc(phba);
+ lpfc_rscn_disc(vport);
}
}
}
@@ -1136,23 +1299,22 @@ out:
}
int
-lpfc_issue_els_adisc(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp,
+lpfc_issue_els_adisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
uint8_t retry)
{
+ struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+ struct lpfc_hba *phba = vport->phba;
ADISC *ap;
IOCB_t *icmd;
struct lpfc_iocbq *elsiocb;
- struct lpfc_sli_ring *pring;
- struct lpfc_sli *psli;
+ struct lpfc_sli *psli = &phba->sli;
+ struct lpfc_sli_ring *pring = &psli->ring[LPFC_ELS_RING];
uint8_t *pcmd;
uint16_t cmdsize;
- psli = &phba->sli;
- pring = &psli->ring[LPFC_ELS_RING]; /* ELS ring */
-
- cmdsize = (sizeof (uint32_t) + sizeof (ADISC));
- elsiocb = lpfc_prep_els_iocb(phba, 1, cmdsize, retry, ndlp,
- ndlp->nlp_DID, ELS_CMD_ADISC);
+ cmdsize = (sizeof(uint32_t) + sizeof(ADISC));
+ elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp,
+ ndlp->nlp_DID, ELS_CMD_ADISC);
if (!elsiocb)
return 1;
@@ -1161,81 +1323,97 @@ lpfc_issue_els_adisc(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp,
/* For ADISC request, remainder of payload is service parameters */
*((uint32_t *) (pcmd)) = ELS_CMD_ADISC;
- pcmd += sizeof (uint32_t);
+ pcmd += sizeof(uint32_t);
/* Fill in ADISC payload */
ap = (ADISC *) pcmd;
ap->hardAL_PA = phba->fc_pref_ALPA;
- memcpy(&ap->portName, &phba->fc_portname, sizeof (struct lpfc_name));
- memcpy(&ap->nodeName, &phba->fc_nodename, sizeof (struct lpfc_name));
- ap->DID = be32_to_cpu(phba->fc_myDID);
+ memcpy(&ap->portName, &vport->fc_portname, sizeof(struct lpfc_name));
+ memcpy(&ap->nodeName, &vport->fc_nodename, sizeof(struct lpfc_name));
+ ap->DID = be32_to_cpu(vport->fc_myDID);
+
+ lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
+ "Issue ADISC: did:x%x",
+ ndlp->nlp_DID, 0, 0);
phba->fc_stat.elsXmitADISC++;
elsiocb->iocb_cmpl = lpfc_cmpl_els_adisc;
- spin_lock_irq(phba->host->host_lock);
+ spin_lock_irq(shost->host_lock);
ndlp->nlp_flag |= NLP_ADISC_SND;
+ spin_unlock_irq(shost->host_lock);
if (lpfc_sli_issue_iocb(phba, pring, elsiocb, 0) == IOCB_ERROR) {
+ spin_lock_irq(shost->host_lock);
ndlp->nlp_flag &= ~NLP_ADISC_SND;
- spin_unlock_irq(phba->host->host_lock);
+ spin_unlock_irq(shost->host_lock);
lpfc_els_free_iocb(phba, elsiocb);
return 1;
}
- spin_unlock_irq(phba->host->host_lock);
return 0;
}
static void
-lpfc_cmpl_els_logo(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb,
- struct lpfc_iocbq * rspiocb)
+lpfc_cmpl_els_logo(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
+ struct lpfc_iocbq *rspiocb)
{
+ struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) cmdiocb->context1;
+ struct lpfc_vport *vport = ndlp->vport;
+ struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
IOCB_t *irsp;
struct lpfc_sli *psli;
- struct lpfc_nodelist *ndlp;
psli = &phba->sli;
/* we pass cmdiocb to state machine which needs rspiocb as well */
cmdiocb->context_un.rsp_iocb = rspiocb;
irsp = &(rspiocb->iocb);
- ndlp = (struct lpfc_nodelist *) cmdiocb->context1;
- spin_lock_irq(phba->host->host_lock);
+ spin_lock_irq(shost->host_lock);
ndlp->nlp_flag &= ~NLP_LOGO_SND;
- spin_unlock_irq(phba->host->host_lock);
+ spin_unlock_irq(shost->host_lock);
+
+ lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
+ "LOGO cmpl: status:x%x/x%x did:x%x",
+ irsp->ulpStatus, irsp->un.ulpWord[4],
+ ndlp->nlp_DID);
/* LOGO completes to NPort <nlp_DID> */
lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
- "%d:0105 LOGO completes to NPort x%x "
+ "%d (%d):0105 LOGO completes to NPort x%x "
"Data: x%x x%x x%x x%x\n",
- phba->brd_no, ndlp->nlp_DID, irsp->ulpStatus,
- irsp->un.ulpWord[4], irsp->ulpTimeout,
- phba->num_disc_nodes);
+ phba->brd_no, vport->vpi, ndlp->nlp_DID,
+ irsp->ulpStatus, irsp->un.ulpWord[4], irsp->ulpTimeout,
+ vport->num_disc_nodes);
/* Check to see if link went down during discovery */
- if (lpfc_els_chk_latt(phba))
+ if (lpfc_els_chk_latt(vport))
goto out;
+ if (ndlp->nlp_flag & NLP_TARGET_REMOVE) {
+ /* NLP_EVT_DEVICE_RM should unregister the RPI
+ * which should abort all outstanding IOs.
+ */
+ lpfc_disc_state_machine(vport, ndlp, cmdiocb,
+ NLP_EVT_DEVICE_RM);
+ goto out;
+ }
+
if (irsp->ulpStatus) {
/* Check for retry */
- if (lpfc_els_retry(phba, cmdiocb, rspiocb)) {
+ if (lpfc_els_retry(phba, cmdiocb, rspiocb))
/* ELS command is being retried */
goto out;
- }
/* LOGO failed */
/* Do not call DSM for lpfc_els_abort'ed ELS cmds */
- if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) &&
- ((irsp->un.ulpWord[4] == IOERR_SLI_ABORTED) ||
- (irsp->un.ulpWord[4] == IOERR_LINK_DOWN) ||
- (irsp->un.ulpWord[4] == IOERR_SLI_DOWN))) {
+ if (lpfc_error_lost_link(irsp))
goto out;
- } else {
- lpfc_disc_state_machine(phba, ndlp, cmdiocb,
- NLP_EVT_CMPL_LOGO);
- }
+ else
+ lpfc_disc_state_machine(vport, ndlp, cmdiocb,
+ NLP_EVT_CMPL_LOGO);
} else {
/* Good status, call state machine.
* This will unregister the rpi if needed.
*/
- lpfc_disc_state_machine(phba, ndlp, cmdiocb, NLP_EVT_CMPL_LOGO);
+ lpfc_disc_state_machine(vport, ndlp, cmdiocb,
+ NLP_EVT_CMPL_LOGO);
}
out:
@@ -1244,75 +1422,91 @@ out:
}
int
-lpfc_issue_els_logo(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp,
+lpfc_issue_els_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
uint8_t retry)
{
+ struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+ struct lpfc_hba *phba = vport->phba;
IOCB_t *icmd;
struct lpfc_iocbq *elsiocb;
struct lpfc_sli_ring *pring;
struct lpfc_sli *psli;
uint8_t *pcmd;
uint16_t cmdsize;
+ int rc;
psli = &phba->sli;
pring = &psli->ring[LPFC_ELS_RING];
- cmdsize = (2 * sizeof (uint32_t)) + sizeof (struct lpfc_name);
- elsiocb = lpfc_prep_els_iocb(phba, 1, cmdsize, retry, ndlp,
- ndlp->nlp_DID, ELS_CMD_LOGO);
+ cmdsize = (2 * sizeof(uint32_t)) + sizeof(struct lpfc_name);
+ elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp,
+ ndlp->nlp_DID, ELS_CMD_LOGO);
if (!elsiocb)
return 1;
icmd = &elsiocb->iocb;
pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
*((uint32_t *) (pcmd)) = ELS_CMD_LOGO;
- pcmd += sizeof (uint32_t);
+ pcmd += sizeof(uint32_t);
/* Fill in LOGO payload */
- *((uint32_t *) (pcmd)) = be32_to_cpu(phba->fc_myDID);
- pcmd += sizeof (uint32_t);
- memcpy(pcmd, &phba->fc_portname, sizeof (struct lpfc_name));
+ *((uint32_t *) (pcmd)) = be32_to_cpu(vport->fc_myDID);
+ pcmd += sizeof(uint32_t);
+ memcpy(pcmd, &vport->fc_portname, sizeof(struct lpfc_name));
+
+ lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
+ "Issue LOGO: did:x%x",
+ ndlp->nlp_DID, 0, 0);
phba->fc_stat.elsXmitLOGO++;
elsiocb->iocb_cmpl = lpfc_cmpl_els_logo;
- spin_lock_irq(phba->host->host_lock);
+ spin_lock_irq(shost->host_lock);
ndlp->nlp_flag |= NLP_LOGO_SND;
- if (lpfc_sli_issue_iocb(phba, pring, elsiocb, 0) == IOCB_ERROR) {
+ spin_unlock_irq(shost->host_lock);
+ rc = lpfc_sli_issue_iocb(phba, pring, elsiocb, 0);
+
+ if (rc == IOCB_ERROR) {
+ spin_lock_irq(shost->host_lock);
ndlp->nlp_flag &= ~NLP_LOGO_SND;
- spin_unlock_irq(phba->host->host_lock);
+ spin_unlock_irq(shost->host_lock);
lpfc_els_free_iocb(phba, elsiocb);
return 1;
}
- spin_unlock_irq(phba->host->host_lock);
return 0;
}
static void
-lpfc_cmpl_els_cmd(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb,
- struct lpfc_iocbq * rspiocb)
+lpfc_cmpl_els_cmd(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
+ struct lpfc_iocbq *rspiocb)
{
+ struct lpfc_vport *vport = cmdiocb->vport;
IOCB_t *irsp;
irsp = &rspiocb->iocb;
+ lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
+ "ELS cmd cmpl: status:x%x/x%x did:x%x",
+ irsp->ulpStatus, irsp->un.ulpWord[4],
+ irsp->un.elsreq64.remoteID);
+
/* ELS cmd tag <ulpIoTag> completes */
- lpfc_printf_log(phba,
- KERN_INFO,
- LOG_ELS,
- "%d:0106 ELS cmd tag x%x completes Data: x%x x%x x%x\n",
- phba->brd_no,
+ lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
+ "%d (%d):0106 ELS cmd tag x%x completes Data: x%x x%x "
+ "x%x\n",
+ phba->brd_no, vport->vpi,
irsp->ulpIoTag, irsp->ulpStatus,
irsp->un.ulpWord[4], irsp->ulpTimeout);
/* Check to see if link went down during discovery */
- lpfc_els_chk_latt(phba);
+ lpfc_els_chk_latt(vport);
lpfc_els_free_iocb(phba, cmdiocb);
return;
}
int
-lpfc_issue_els_scr(struct lpfc_hba * phba, uint32_t nportid, uint8_t retry)
+lpfc_issue_els_scr(struct lpfc_vport *vport, uint32_t nportid, uint8_t retry)
{
+ struct lpfc_hba *phba = vport->phba;
IOCB_t *icmd;
struct lpfc_iocbq *elsiocb;
struct lpfc_sli_ring *pring;
@@ -1323,15 +1517,16 @@ lpfc_issue_els_scr(struct lpfc_hba * phba, uint32_t nportid, uint8_t retry)
psli = &phba->sli;
pring = &psli->ring[LPFC_ELS_RING]; /* ELS ring */
- cmdsize = (sizeof (uint32_t) + sizeof (SCR));
+ cmdsize = (sizeof(uint32_t) + sizeof(SCR));
ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL);
if (!ndlp)
return 1;
- lpfc_nlp_init(phba, ndlp, nportid);
+ lpfc_nlp_init(vport, ndlp, nportid);
+
+ elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp,
+ ndlp->nlp_DID, ELS_CMD_SCR);
- elsiocb = lpfc_prep_els_iocb(phba, 1, cmdsize, retry, ndlp,
- ndlp->nlp_DID, ELS_CMD_SCR);
if (!elsiocb) {
lpfc_nlp_put(ndlp);
return 1;
@@ -1341,29 +1536,31 @@ lpfc_issue_els_scr(struct lpfc_hba * phba, uint32_t nportid, uint8_t retry)
pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
*((uint32_t *) (pcmd)) = ELS_CMD_SCR;
- pcmd += sizeof (uint32_t);
+ pcmd += sizeof(uint32_t);
/* For SCR, remainder of payload is SCR parameter page */
- memset(pcmd, 0, sizeof (SCR));
+ memset(pcmd, 0, sizeof(SCR));
((SCR *) pcmd)->Function = SCR_FUNC_FULL;
+ lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
+ "Issue SCR: did:x%x",
+ ndlp->nlp_DID, 0, 0);
+
phba->fc_stat.elsXmitSCR++;
elsiocb->iocb_cmpl = lpfc_cmpl_els_cmd;
- spin_lock_irq(phba->host->host_lock);
if (lpfc_sli_issue_iocb(phba, pring, elsiocb, 0) == IOCB_ERROR) {
- spin_unlock_irq(phba->host->host_lock);
lpfc_nlp_put(ndlp);
lpfc_els_free_iocb(phba, elsiocb);
return 1;
}
- spin_unlock_irq(phba->host->host_lock);
lpfc_nlp_put(ndlp);
return 0;
}
static int
-lpfc_issue_els_farpr(struct lpfc_hba * phba, uint32_t nportid, uint8_t retry)
+lpfc_issue_els_farpr(struct lpfc_vport *vport, uint32_t nportid, uint8_t retry)
{
+ struct lpfc_hba *phba = vport->phba;
IOCB_t *icmd;
struct lpfc_iocbq *elsiocb;
struct lpfc_sli_ring *pring;
@@ -1377,14 +1574,15 @@ lpfc_issue_els_farpr(struct lpfc_hba * phba, uint32_t nportid, uint8_t retry)
psli = &phba->sli;
pring = &psli->ring[LPFC_ELS_RING]; /* ELS ring */
- cmdsize = (sizeof (uint32_t) + sizeof (FARP));
+ cmdsize = (sizeof(uint32_t) + sizeof(FARP));
ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL);
if (!ndlp)
return 1;
- lpfc_nlp_init(phba, ndlp, nportid);
- elsiocb = lpfc_prep_els_iocb(phba, 1, cmdsize, retry, ndlp,
- ndlp->nlp_DID, ELS_CMD_RNID);
+ lpfc_nlp_init(vport, ndlp, nportid);
+
+ elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp,
+ ndlp->nlp_DID, ELS_CMD_RNID);
if (!elsiocb) {
lpfc_nlp_put(ndlp);
return 1;
@@ -1394,44 +1592,71 @@ lpfc_issue_els_farpr(struct lpfc_hba * phba, uint32_t nportid, uint8_t retry)
pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
*((uint32_t *) (pcmd)) = ELS_CMD_FARPR;
- pcmd += sizeof (uint32_t);
+ pcmd += sizeof(uint32_t);
/* Fill in FARPR payload */
fp = (FARP *) (pcmd);
- memset(fp, 0, sizeof (FARP));
+ memset(fp, 0, sizeof(FARP));
lp = (uint32_t *) pcmd;
*lp++ = be32_to_cpu(nportid);
- *lp++ = be32_to_cpu(phba->fc_myDID);
+ *lp++ = be32_to_cpu(vport->fc_myDID);
fp->Rflags = 0;
fp->Mflags = (FARP_MATCH_PORT | FARP_MATCH_NODE);
- memcpy(&fp->RportName, &phba->fc_portname, sizeof (struct lpfc_name));
- memcpy(&fp->RnodeName, &phba->fc_nodename, sizeof (struct lpfc_name));
- if ((ondlp = lpfc_findnode_did(phba, nportid))) {
+ memcpy(&fp->RportName, &vport->fc_portname, sizeof(struct lpfc_name));
+ memcpy(&fp->RnodeName, &vport->fc_nodename, sizeof(struct lpfc_name));
+ ondlp = lpfc_findnode_did(vport, nportid);
+ if (ondlp) {
memcpy(&fp->OportName, &ondlp->nlp_portname,
- sizeof (struct lpfc_name));
+ sizeof(struct lpfc_name));
memcpy(&fp->OnodeName, &ondlp->nlp_nodename,
- sizeof (struct lpfc_name));
+ sizeof(struct lpfc_name));
}
+ lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
+ "Issue FARPR: did:x%x",
+ ndlp->nlp_DID, 0, 0);
+
phba->fc_stat.elsXmitFARPR++;
elsiocb->iocb_cmpl = lpfc_cmpl_els_cmd;
- spin_lock_irq(phba->host->host_lock);
if (lpfc_sli_issue_iocb(phba, pring, elsiocb, 0) == IOCB_ERROR) {
- spin_unlock_irq(phba->host->host_lock);
lpfc_nlp_put(ndlp);
lpfc_els_free_iocb(phba, elsiocb);
return 1;
}
- spin_unlock_irq(phba->host->host_lock);
lpfc_nlp_put(ndlp);
return 0;
}
+static void
+lpfc_end_rscn(struct lpfc_vport *vport)
+{
+ struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+
+ if (vport->fc_flag & FC_RSCN_MODE) {
+ /*
+ * Check to see if more RSCNs came in while we were
+ * processing this one.
+ */
+ if (vport->fc_rscn_id_cnt ||
+ (vport->fc_flag & FC_RSCN_DISCOVERY) != 0)
+ lpfc_els_handle_rscn(vport);
+ else {
+ spin_lock_irq(shost->host_lock);
+ vport->fc_flag &= ~FC_RSCN_MODE;
+ spin_unlock_irq(shost->host_lock);
+ }
+ }
+}
+
void
-lpfc_cancel_retry_delay_tmo(struct lpfc_hba *phba, struct lpfc_nodelist * nlp)
+lpfc_cancel_retry_delay_tmo(struct lpfc_vport *vport, struct lpfc_nodelist *nlp)
{
+ struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+
+ spin_lock_irq(shost->host_lock);
nlp->nlp_flag &= ~NLP_DELAY_TMO;
+ spin_unlock_irq(shost->host_lock);
del_timer_sync(&nlp->nlp_delayfunc);
nlp->nlp_last_elscmd = 0;
@@ -1439,30 +1664,21 @@ lpfc_cancel_retry_delay_tmo(struct lpfc_hba *phba, struct lpfc_nodelist * nlp)
list_del_init(&nlp->els_retry_evt.evt_listp);
if (nlp->nlp_flag & NLP_NPR_2B_DISC) {
+ spin_lock_irq(shost->host_lock);
nlp->nlp_flag &= ~NLP_NPR_2B_DISC;
- if (phba->num_disc_nodes) {
+ spin_unlock_irq(shost->host_lock);
+ if (vport->num_disc_nodes) {
/* Check to see if there are more
* PLOGIs to be sent
*/
- lpfc_more_plogi(phba);
-
- if (phba->num_disc_nodes == 0) {
- phba->fc_flag &= ~FC_NDISC_ACTIVE;
- lpfc_can_disctmo(phba);
- if (phba->fc_flag & FC_RSCN_MODE) {
- /*
- * Check to see if more RSCNs
- * came in while we were
- * processing this one.
- */
- if((phba->fc_rscn_id_cnt==0) &&
- !(phba->fc_flag & FC_RSCN_DISCOVERY)) {
- phba->fc_flag &= ~FC_RSCN_MODE;
- }
- else {
- lpfc_els_handle_rscn(phba);
- }
- }
+ lpfc_more_plogi(vport);
+
+ if (vport->num_disc_nodes == 0) {
+ spin_lock_irq(shost->host_lock);
+ vport->fc_flag &= ~FC_NDISC_ACTIVE;
+ spin_unlock_irq(shost->host_lock);
+ lpfc_can_disctmo(vport);
+ lpfc_end_rscn(vport);
}
}
}
@@ -1472,18 +1688,19 @@ lpfc_cancel_retry_delay_tmo(struct lpfc_hba *phba, struct lpfc_nodelist * nlp)
void
lpfc_els_retry_delay(unsigned long ptr)
{
- struct lpfc_nodelist *ndlp;
- struct lpfc_hba *phba;
- unsigned long iflag;
- struct lpfc_work_evt *evtp;
-
- ndlp = (struct lpfc_nodelist *)ptr;
- phba = ndlp->nlp_phba;
+ struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) ptr;
+ struct lpfc_vport *vport = ndlp->vport;
+ struct lpfc_hba *phba = vport->phba;
+ unsigned long flags;
+ struct lpfc_work_evt *evtp = &ndlp->els_retry_evt;
+
+ ndlp = (struct lpfc_nodelist *) ptr;
+ phba = ndlp->vport->phba;
evtp = &ndlp->els_retry_evt;
- spin_lock_irqsave(phba->host->host_lock, iflag);
+ spin_lock_irqsave(&phba->hbalock, flags);
if (!list_empty(&evtp->evt_listp)) {
- spin_unlock_irqrestore(phba->host->host_lock, iflag);
+ spin_unlock_irqrestore(&phba->hbalock, flags);
return;
}
@@ -1491,33 +1708,31 @@ lpfc_els_retry_delay(unsigned long ptr)
evtp->evt = LPFC_EVT_ELS_RETRY;
list_add_tail(&evtp->evt_listp, &phba->work_list);
if (phba->work_wait)
- wake_up(phba->work_wait);
+ lpfc_worker_wake_up(phba);
- spin_unlock_irqrestore(phba->host->host_lock, iflag);
+ spin_unlock_irqrestore(&phba->hbalock, flags);
return;
}
void
lpfc_els_retry_delay_handler(struct lpfc_nodelist *ndlp)
{
- struct lpfc_hba *phba;
- uint32_t cmd;
- uint32_t did;
- uint8_t retry;
+ struct lpfc_vport *vport = ndlp->vport;
+ struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+ uint32_t cmd, did, retry;
- phba = ndlp->nlp_phba;
- spin_lock_irq(phba->host->host_lock);
+ spin_lock_irq(shost->host_lock);
did = ndlp->nlp_DID;
cmd = ndlp->nlp_last_elscmd;
ndlp->nlp_last_elscmd = 0;
if (!(ndlp->nlp_flag & NLP_DELAY_TMO)) {
- spin_unlock_irq(phba->host->host_lock);
+ spin_unlock_irq(shost->host_lock);
return;
}
ndlp->nlp_flag &= ~NLP_DELAY_TMO;
- spin_unlock_irq(phba->host->host_lock);
+ spin_unlock_irq(shost->host_lock);
/*
* If a discovery event readded nlp_delayfunc after timer
* firing and before processing the timer, cancel the
@@ -1528,57 +1743,54 @@ lpfc_els_retry_delay_handler(struct lpfc_nodelist *ndlp)
switch (cmd) {
case ELS_CMD_FLOGI:
- lpfc_issue_els_flogi(phba, ndlp, retry);
+ lpfc_issue_els_flogi(vport, ndlp, retry);
break;
case ELS_CMD_PLOGI:
- if(!lpfc_issue_els_plogi(phba, ndlp->nlp_DID, retry)) {
+ if (!lpfc_issue_els_plogi(vport, ndlp->nlp_DID, retry)) {
ndlp->nlp_prev_state = ndlp->nlp_state;
- lpfc_nlp_set_state(phba, ndlp, NLP_STE_PLOGI_ISSUE);
+ lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE);
}
break;
case ELS_CMD_ADISC:
- if (!lpfc_issue_els_adisc(phba, ndlp, retry)) {
+ if (!lpfc_issue_els_adisc(vport, ndlp, retry)) {
ndlp->nlp_prev_state = ndlp->nlp_state;
- lpfc_nlp_set_state(phba, ndlp, NLP_STE_ADISC_ISSUE);
+ lpfc_nlp_set_state(vport, ndlp, NLP_STE_ADISC_ISSUE);
}
break;
case ELS_CMD_PRLI:
- if (!lpfc_issue_els_prli(phba, ndlp, retry)) {
+ if (!lpfc_issue_els_prli(vport, ndlp, retry)) {
ndlp->nlp_prev_state = ndlp->nlp_state;
- lpfc_nlp_set_state(phba, ndlp, NLP_STE_PRLI_ISSUE);
+ lpfc_nlp_set_state(vport, ndlp, NLP_STE_PRLI_ISSUE);
}
break;
case ELS_CMD_LOGO:
- if (!lpfc_issue_els_logo(phba, ndlp, retry)) {
+ if (!lpfc_issue_els_logo(vport, ndlp, retry)) {
ndlp->nlp_prev_state = ndlp->nlp_state;
- lpfc_nlp_set_state(phba, ndlp, NLP_STE_NPR_NODE);
+ lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
}
break;
+ case ELS_CMD_FDISC:
+ lpfc_issue_els_fdisc(vport, ndlp, retry);
+ break;
}
return;
}
static int
-lpfc_els_retry(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb,
- struct lpfc_iocbq * rspiocb)
+lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
+ struct lpfc_iocbq *rspiocb)
{
- IOCB_t *irsp;
- struct lpfc_dmabuf *pcmd;
- struct lpfc_nodelist *ndlp;
+ struct lpfc_vport *vport = cmdiocb->vport;
+ struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+ IOCB_t *irsp = &rspiocb->iocb;
+ struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) cmdiocb->context1;
+ struct lpfc_dmabuf *pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
uint32_t *elscmd;
struct ls_rjt stat;
- int retry, maxretry;
- int delay;
- uint32_t cmd;
+ int retry = 0, maxretry = lpfc_max_els_tries, delay = 0;
+ uint32_t cmd = 0;
uint32_t did;
- retry = 0;
- delay = 0;
- maxretry = lpfc_max_els_tries;
- irsp = &rspiocb->iocb;
- ndlp = (struct lpfc_nodelist *) cmdiocb->context1;
- pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
- cmd = 0;
/* Note: context2 may be 0 for internal driver abort
* of delays ELS command.
@@ -1594,11 +1806,15 @@ lpfc_els_retry(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb,
else {
/* We should only hit this case for retrying PLOGI */
did = irsp->un.elsreq64.remoteID;
- ndlp = lpfc_findnode_did(phba, did);
+ ndlp = lpfc_findnode_did(vport, did);
if (!ndlp && (cmd != ELS_CMD_PLOGI))
return 1;
}
+ lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
+ "Retry ELS: wd7:x%x wd4:x%x did:x%x",
+ *(((uint32_t *) irsp) + 7), irsp->un.ulpWord[4], ndlp->nlp_DID);
+
switch (irsp->ulpStatus) {
case IOSTAT_FCP_RSP_ERROR:
case IOSTAT_REMOTE_STOP:
@@ -1607,25 +1823,37 @@ lpfc_els_retry(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb,
case IOSTAT_LOCAL_REJECT:
switch ((irsp->un.ulpWord[4] & 0xff)) {
case IOERR_LOOP_OPEN_FAILURE:
- if (cmd == ELS_CMD_PLOGI) {
- if (cmdiocb->retry == 0) {
- delay = 1;
- }
- }
+ if (cmd == ELS_CMD_PLOGI && cmdiocb->retry == 0)
+ delay = 1000;
retry = 1;
break;
- case IOERR_SEQUENCE_TIMEOUT:
- retry = 1;
+ case IOERR_ILLEGAL_COMMAND:
+ if ((phba->sli3_options & LPFC_SLI3_VPORT_TEARDOWN) &&
+ (cmd == ELS_CMD_FDISC)) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_ELS,
+ "%d (%d):0124 FDISC failed (3/6) retrying...\n",
+ phba->brd_no, vport->vpi);
+ lpfc_mbx_unreg_vpi(vport);
+ retry = 1;
+ /* Always retry for this case */
+ cmdiocb->retry = 0;
+ }
break;
case IOERR_NO_RESOURCES:
- if (cmd == ELS_CMD_PLOGI) {
- delay = 1;
- }
+ retry = 1;
+ if (cmdiocb->retry > 100)
+ delay = 100;
+ maxretry = 250;
+ break;
+
+ case IOERR_ILLEGAL_FRAME:
+ delay = 100;
retry = 1;
break;
+ case IOERR_SEQUENCE_TIMEOUT:
case IOERR_INVALID_RPI:
retry = 1;
break;
@@ -1655,27 +1883,57 @@ lpfc_els_retry(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb,
if (stat.un.b.lsRjtRsnCodeExp ==
LSEXP_CMD_IN_PROGRESS) {
if (cmd == ELS_CMD_PLOGI) {
- delay = 1;
+ delay = 1000;
maxretry = 48;
}
retry = 1;
break;
}
if (cmd == ELS_CMD_PLOGI) {
- delay = 1;
+ delay = 1000;
maxretry = lpfc_max_els_tries + 1;
retry = 1;
break;
}
+ if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) &&
+ (cmd == ELS_CMD_FDISC) &&
+ (stat.un.b.lsRjtRsnCodeExp == LSEXP_OUT_OF_RESOURCE)){
+ lpfc_printf_log(phba, KERN_ERR, LOG_ELS,
+ "%d (%d):0125 FDISC Failed (x%x)."
+ " Fabric out of resources\n",
+ phba->brd_no, vport->vpi, stat.un.lsRjtError);
+ lpfc_vport_set_state(vport,
+ FC_VPORT_NO_FABRIC_RSCS);
+ }
break;
case LSRJT_LOGICAL_BSY:
- if (cmd == ELS_CMD_PLOGI) {
- delay = 1;
+ if ((cmd == ELS_CMD_PLOGI) ||
+ (cmd == ELS_CMD_PRLI)) {
+ delay = 1000;
maxretry = 48;
+ } else if (cmd == ELS_CMD_FDISC) {
+ /* Always retry for this case */
+ cmdiocb->retry = 0;
}
retry = 1;
break;
+
+ case LSRJT_LOGICAL_ERR:
+ case LSRJT_PROTOCOL_ERR:
+ if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) &&
+ (cmd == ELS_CMD_FDISC) &&
+ ((stat.un.b.lsRjtRsnCodeExp == LSEXP_INVALID_PNAME) ||
+ (stat.un.b.lsRjtRsnCodeExp == LSEXP_INVALID_NPORT_ID))
+ ) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_ELS,
+ "%d (%d):0123 FDISC Failed (x%x)."
+ " Fabric Detected Bad WWN\n",
+ phba->brd_no, vport->vpi, stat.un.lsRjtError);
+ lpfc_vport_set_state(vport,
+ FC_VPORT_FABRIC_REJ_WWN);
+ }
+ break;
}
break;
@@ -1695,21 +1953,27 @@ lpfc_els_retry(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb,
retry = 0;
}
+ if ((vport->load_flag & FC_UNLOADING) != 0)
+ retry = 0;
+
if (retry) {
/* Retry ELS command <elsCmd> to remote NPORT <did> */
lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
- "%d:0107 Retry ELS command x%x to remote "
+ "%d (%d):0107 Retry ELS command x%x to remote "
"NPORT x%x Data: x%x x%x\n",
- phba->brd_no,
+ phba->brd_no, vport->vpi,
cmd, did, cmdiocb->retry, delay);
- if ((cmd == ELS_CMD_PLOGI) || (cmd == ELS_CMD_ADISC)) {
+ if (((cmd == ELS_CMD_PLOGI) || (cmd == ELS_CMD_ADISC)) &&
+ ((irsp->ulpStatus != IOSTAT_LOCAL_REJECT) ||
+ ((irsp->un.ulpWord[4] & 0xff) != IOERR_NO_RESOURCES))) {
+ /* Don't reset timer for no resources */
+
/* If discovery / RSCN timer is running, reset it */
- if (timer_pending(&phba->fc_disctmo) ||
- (phba->fc_flag & FC_RSCN_MODE)) {
- lpfc_set_disctmo(phba);
- }
+ if (timer_pending(&vport->fc_disctmo) ||
+ (vport->fc_flag & FC_RSCN_MODE))
+ lpfc_set_disctmo(vport);
}
phba->fc_stat.elsXmitRetry++;
@@ -1717,50 +1981,62 @@ lpfc_els_retry(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb,
phba->fc_stat.elsDelayRetry++;
ndlp->nlp_retry = cmdiocb->retry;
- mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ);
+ /* delay is specified in milliseconds */
+ mod_timer(&ndlp->nlp_delayfunc,
+ jiffies + msecs_to_jiffies(delay));
+ spin_lock_irq(shost->host_lock);
ndlp->nlp_flag |= NLP_DELAY_TMO;
+ spin_unlock_irq(shost->host_lock);
ndlp->nlp_prev_state = ndlp->nlp_state;
- lpfc_nlp_set_state(phba, ndlp, NLP_STE_NPR_NODE);
+ if (cmd == ELS_CMD_PRLI)
+ lpfc_nlp_set_state(vport, ndlp,
+ NLP_STE_REG_LOGIN_ISSUE);
+ else
+ lpfc_nlp_set_state(vport, ndlp,
+ NLP_STE_NPR_NODE);
ndlp->nlp_last_elscmd = cmd;
return 1;
}
switch (cmd) {
case ELS_CMD_FLOGI:
- lpfc_issue_els_flogi(phba, ndlp, cmdiocb->retry);
+ lpfc_issue_els_flogi(vport, ndlp, cmdiocb->retry);
+ return 1;
+ case ELS_CMD_FDISC:
+ lpfc_issue_els_fdisc(vport, ndlp, cmdiocb->retry);
return 1;
case ELS_CMD_PLOGI:
if (ndlp) {
ndlp->nlp_prev_state = ndlp->nlp_state;
- lpfc_nlp_set_state(phba, ndlp,
+ lpfc_nlp_set_state(vport, ndlp,
NLP_STE_PLOGI_ISSUE);
}
- lpfc_issue_els_plogi(phba, did, cmdiocb->retry);
+ lpfc_issue_els_plogi(vport, did, cmdiocb->retry);
return 1;
case ELS_CMD_ADISC:
ndlp->nlp_prev_state = ndlp->nlp_state;
- lpfc_nlp_set_state(phba, ndlp, NLP_STE_ADISC_ISSUE);
- lpfc_issue_els_adisc(phba, ndlp, cmdiocb->retry);
+ lpfc_nlp_set_state(vport, ndlp, NLP_STE_ADISC_ISSUE);
+ lpfc_issue_els_adisc(vport, ndlp, cmdiocb->retry);
return 1;
case ELS_CMD_PRLI:
ndlp->nlp_prev_state = ndlp->nlp_state;
- lpfc_nlp_set_state(phba, ndlp, NLP_STE_PRLI_ISSUE);
- lpfc_issue_els_prli(phba, ndlp, cmdiocb->retry);
+ lpfc_nlp_set_state(vport, ndlp, NLP_STE_PRLI_ISSUE);
+ lpfc_issue_els_prli(vport, ndlp, cmdiocb->retry);
return 1;
case ELS_CMD_LOGO:
ndlp->nlp_prev_state = ndlp->nlp_state;
- lpfc_nlp_set_state(phba, ndlp, NLP_STE_NPR_NODE);
- lpfc_issue_els_logo(phba, ndlp, cmdiocb->retry);
+ lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
+ lpfc_issue_els_logo(vport, ndlp, cmdiocb->retry);
return 1;
}
}
/* No retry ELS command <elsCmd> to remote NPORT <did> */
lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
- "%d:0108 No retry ELS command x%x to remote NPORT x%x "
- "Data: x%x\n",
- phba->brd_no,
+ "%d (%d):0108 No retry ELS command x%x to remote "
+ "NPORT x%x Data: x%x\n",
+ phba->brd_no, vport->vpi,
cmd, did, cmdiocb->retry);
return 0;
@@ -1795,33 +2071,36 @@ lpfc_els_free_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *elsiocb)
lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys);
kfree(buf_ptr);
}
- spin_lock_irq(phba->host->host_lock);
lpfc_sli_release_iocbq(phba, elsiocb);
- spin_unlock_irq(phba->host->host_lock);
return 0;
}
static void
-lpfc_cmpl_els_logo_acc(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb,
- struct lpfc_iocbq * rspiocb)
+lpfc_cmpl_els_logo_acc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
+ struct lpfc_iocbq *rspiocb)
{
- struct lpfc_nodelist *ndlp;
+ struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) cmdiocb->context1;
+ struct lpfc_vport *vport = cmdiocb->vport;
+ IOCB_t *irsp;
- ndlp = (struct lpfc_nodelist *) cmdiocb->context1;
+ irsp = &rspiocb->iocb;
+ lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
+ "ACC LOGO cmpl: status:x%x/x%x did:x%x",
+ irsp->ulpStatus, irsp->un.ulpWord[4], ndlp->nlp_DID);
/* ACC to LOGO completes to NPort <nlp_DID> */
lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
- "%d:0109 ACC to LOGO completes to NPort x%x "
+ "%d (%d):0109 ACC to LOGO completes to NPort x%x "
"Data: x%x x%x x%x\n",
- phba->brd_no, ndlp->nlp_DID, ndlp->nlp_flag,
- ndlp->nlp_state, ndlp->nlp_rpi);
+ phba->brd_no, vport->vpi, ndlp->nlp_DID,
+ ndlp->nlp_flag, ndlp->nlp_state, ndlp->nlp_rpi);
switch (ndlp->nlp_state) {
case NLP_STE_UNUSED_NODE: /* node is just allocated */
- lpfc_drop_node(phba, ndlp);
+ lpfc_drop_node(vport, ndlp);
break;
case NLP_STE_NPR_NODE: /* NPort Recovery mode */
- lpfc_unreg_rpi(phba, ndlp);
+ lpfc_unreg_rpi(vport, ndlp);
break;
default:
break;
@@ -1830,24 +2109,38 @@ lpfc_cmpl_els_logo_acc(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb,
return;
}
+void
+lpfc_mbx_cmpl_dflt_rpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
+{
+ struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1);
+ struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) pmb->context2;
+
+ pmb->context1 = NULL;
+ lpfc_mbuf_free(phba, mp->virt, mp->phys);
+ kfree(mp);
+ mempool_free(pmb, phba->mbox_mem_pool);
+ lpfc_nlp_put(ndlp);
+ return;
+}
+
static void
-lpfc_cmpl_els_acc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
+lpfc_cmpl_els_rsp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
struct lpfc_iocbq *rspiocb)
{
+ struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) cmdiocb->context1;
+ struct lpfc_vport *vport = ndlp ? ndlp->vport : NULL;
+ struct Scsi_Host *shost = vport ? lpfc_shost_from_vport(vport) : NULL;
IOCB_t *irsp;
- struct lpfc_nodelist *ndlp;
LPFC_MBOXQ_t *mbox = NULL;
- struct lpfc_dmabuf *mp;
+ struct lpfc_dmabuf *mp = NULL;
irsp = &rspiocb->iocb;
- ndlp = (struct lpfc_nodelist *) cmdiocb->context1;
if (cmdiocb->context_un.mbox)
mbox = cmdiocb->context_un.mbox;
-
/* Check to see if link went down during discovery */
- if (lpfc_els_chk_latt(phba) || !ndlp) {
+ if (!ndlp || lpfc_els_chk_latt(vport)) {
if (mbox) {
mp = (struct lpfc_dmabuf *) mbox->context1;
if (mp) {
@@ -1859,24 +2152,37 @@ lpfc_cmpl_els_acc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
goto out;
}
+ lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
+ "ACC cmpl: status:x%x/x%x did:x%x",
+ irsp->ulpStatus, irsp->un.ulpWord[4],
+ irsp->un.rcvels.remoteID);
+
/* ELS response tag <ulpIoTag> completes */
lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
- "%d:0110 ELS response tag x%x completes "
+ "%d (%d):0110 ELS response tag x%x completes "
"Data: x%x x%x x%x x%x x%x x%x x%x\n",
- phba->brd_no,
+ phba->brd_no, vport->vpi,
cmdiocb->iocb.ulpIoTag, rspiocb->iocb.ulpStatus,
rspiocb->iocb.un.ulpWord[4], rspiocb->iocb.ulpTimeout,
- ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
+ ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
ndlp->nlp_rpi);
if (mbox) {
if ((rspiocb->iocb.ulpStatus == 0)
&& (ndlp->nlp_flag & NLP_ACC_REGLOGIN)) {
- lpfc_unreg_rpi(phba, ndlp);
- mbox->mbox_cmpl = lpfc_mbx_cmpl_reg_login;
+ lpfc_unreg_rpi(vport, ndlp);
mbox->context2 = lpfc_nlp_get(ndlp);
- ndlp->nlp_prev_state = ndlp->nlp_state;
- lpfc_nlp_set_state(phba, ndlp, NLP_STE_REG_LOGIN_ISSUE);
+ mbox->vport = vport;
+ if (ndlp->nlp_flag & NLP_RM_DFLT_RPI) {
+ mbox->mbox_flag |= LPFC_MBX_IMED_UNREG;
+ mbox->mbox_cmpl = lpfc_mbx_cmpl_dflt_rpi;
+ }
+ else {
+ mbox->mbox_cmpl = lpfc_mbx_cmpl_reg_login;
+ ndlp->nlp_prev_state = ndlp->nlp_state;
+ lpfc_nlp_set_state(vport, ndlp,
+ NLP_STE_REG_LOGIN_ISSUE);
+ }
if (lpfc_sli_issue_mbox(phba, mbox,
(MBX_NOWAIT | MBX_STOP_IOCB))
!= MBX_NOT_FINISHED) {
@@ -1886,15 +2192,11 @@ lpfc_cmpl_els_acc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
/* NOTE: we should have messages for unsuccessful
reglogin */
} else {
- /* Do not call NO_LIST for lpfc_els_abort'ed ELS cmds */
- if (!((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) &&
- ((irsp->un.ulpWord[4] == IOERR_SLI_ABORTED) ||
- (irsp->un.ulpWord[4] == IOERR_LINK_DOWN) ||
- (irsp->un.ulpWord[4] == IOERR_SLI_DOWN)))) {
- if (ndlp->nlp_flag & NLP_ACC_REGLOGIN) {
- lpfc_drop_node(phba, ndlp);
- ndlp = NULL;
- }
+ /* Do not drop node for lpfc_els_abort'ed ELS cmds */
+ if (!lpfc_error_lost_link(irsp) &&
+ ndlp->nlp_flag & NLP_ACC_REGLOGIN) {
+ lpfc_drop_node(vport, ndlp);
+ ndlp = NULL;
}
}
mp = (struct lpfc_dmabuf *) mbox->context1;
@@ -1906,19 +2208,21 @@ lpfc_cmpl_els_acc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
}
out:
if (ndlp) {
- spin_lock_irq(phba->host->host_lock);
- ndlp->nlp_flag &= ~NLP_ACC_REGLOGIN;
- spin_unlock_irq(phba->host->host_lock);
+ spin_lock_irq(shost->host_lock);
+ ndlp->nlp_flag &= ~(NLP_ACC_REGLOGIN | NLP_RM_DFLT_RPI);
+ spin_unlock_irq(shost->host_lock);
}
lpfc_els_free_iocb(phba, cmdiocb);
return;
}
int
-lpfc_els_rsp_acc(struct lpfc_hba * phba, uint32_t flag,
- struct lpfc_iocbq * oldiocb, struct lpfc_nodelist * ndlp,
- LPFC_MBOXQ_t * mbox, uint8_t newnode)
+lpfc_els_rsp_acc(struct lpfc_vport *vport, uint32_t flag,
+ struct lpfc_iocbq *oldiocb, struct lpfc_nodelist *ndlp,
+ LPFC_MBOXQ_t *mbox, uint8_t newnode)
{
+ struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+ struct lpfc_hba *phba = vport->phba;
IOCB_t *icmd;
IOCB_t *oldcmd;
struct lpfc_iocbq *elsiocb;
@@ -1935,23 +2239,30 @@ lpfc_els_rsp_acc(struct lpfc_hba * phba, uint32_t flag,
switch (flag) {
case ELS_CMD_ACC:
- cmdsize = sizeof (uint32_t);
- elsiocb = lpfc_prep_els_iocb(phba, 0, cmdsize, oldiocb->retry,
- ndlp, ndlp->nlp_DID, ELS_CMD_ACC);
+ cmdsize = sizeof(uint32_t);
+ elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry,
+ ndlp, ndlp->nlp_DID, ELS_CMD_ACC);
if (!elsiocb) {
+ spin_lock_irq(shost->host_lock);
ndlp->nlp_flag &= ~NLP_LOGO_ACC;
+ spin_unlock_irq(shost->host_lock);
return 1;
}
+
icmd = &elsiocb->iocb;
icmd->ulpContext = oldcmd->ulpContext; /* Xri */
pcmd = (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
*((uint32_t *) (pcmd)) = ELS_CMD_ACC;
- pcmd += sizeof (uint32_t);
+ pcmd += sizeof(uint32_t);
+
+ lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
+ "Issue ACC: did:x%x flg:x%x",
+ ndlp->nlp_DID, ndlp->nlp_flag, 0);
break;
case ELS_CMD_PLOGI:
- cmdsize = (sizeof (struct serv_parm) + sizeof (uint32_t));
- elsiocb = lpfc_prep_els_iocb(phba, 0, cmdsize, oldiocb->retry,
- ndlp, ndlp->nlp_DID, ELS_CMD_ACC);
+ cmdsize = (sizeof(struct serv_parm) + sizeof(uint32_t));
+ elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry,
+ ndlp, ndlp->nlp_DID, ELS_CMD_ACC);
if (!elsiocb)
return 1;
@@ -1963,12 +2274,16 @@ lpfc_els_rsp_acc(struct lpfc_hba * phba, uint32_t flag,
elsiocb->context_un.mbox = mbox;
*((uint32_t *) (pcmd)) = ELS_CMD_ACC;
- pcmd += sizeof (uint32_t);
- memcpy(pcmd, &phba->fc_sparam, sizeof (struct serv_parm));
+ pcmd += sizeof(uint32_t);
+ memcpy(pcmd, &vport->fc_sparam, sizeof(struct serv_parm));
+
+ lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
+ "Issue ACC PLOGI: did:x%x flg:x%x",
+ ndlp->nlp_DID, ndlp->nlp_flag, 0);
break;
case ELS_CMD_PRLO:
- cmdsize = sizeof (uint32_t) + sizeof (PRLO);
- elsiocb = lpfc_prep_els_iocb(phba, 0, cmdsize, oldiocb->retry,
+ cmdsize = sizeof(uint32_t) + sizeof(PRLO);
+ elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry,
ndlp, ndlp->nlp_DID, ELS_CMD_PRLO);
if (!elsiocb)
return 1;
@@ -1978,10 +2293,14 @@ lpfc_els_rsp_acc(struct lpfc_hba * phba, uint32_t flag,
pcmd = (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
memcpy(pcmd, ((struct lpfc_dmabuf *) oldiocb->context2)->virt,
- sizeof (uint32_t) + sizeof (PRLO));
+ sizeof(uint32_t) + sizeof(PRLO));
*((uint32_t *) (pcmd)) = ELS_CMD_PRLO_ACC;
els_pkt_ptr = (ELS_PKT *) pcmd;
els_pkt_ptr->un.prlo.acceptRspCode = PRLO_REQ_EXECUTED;
+
+ lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
+ "Issue ACC PRLO: did:x%x flg:x%x",
+ ndlp->nlp_DID, ndlp->nlp_flag, 0);
break;
default:
return 1;
@@ -1994,25 +2313,23 @@ lpfc_els_rsp_acc(struct lpfc_hba * phba, uint32_t flag,
/* Xmit ELS ACC response tag <ulpIoTag> */
lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
- "%d:0128 Xmit ELS ACC response tag x%x, XRI: x%x, "
+ "%d (%d):0128 Xmit ELS ACC response tag x%x, XRI: x%x, "
"DID: x%x, nlp_flag: x%x nlp_state: x%x RPI: x%x\n",
- phba->brd_no, elsiocb->iotag,
+ phba->brd_no, vport->vpi, elsiocb->iotag,
elsiocb->iocb.ulpContext, ndlp->nlp_DID,
ndlp->nlp_flag, ndlp->nlp_state, ndlp->nlp_rpi);
if (ndlp->nlp_flag & NLP_LOGO_ACC) {
- spin_lock_irq(phba->host->host_lock);
+ spin_lock_irq(shost->host_lock);
ndlp->nlp_flag &= ~NLP_LOGO_ACC;
- spin_unlock_irq(phba->host->host_lock);
+ spin_unlock_irq(shost->host_lock);
elsiocb->iocb_cmpl = lpfc_cmpl_els_logo_acc;
} else {
- elsiocb->iocb_cmpl = lpfc_cmpl_els_acc;
+ elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
}
phba->fc_stat.elsXmitACC++;
- spin_lock_irq(phba->host->host_lock);
rc = lpfc_sli_issue_iocb(phba, pring, elsiocb, 0);
- spin_unlock_irq(phba->host->host_lock);
if (rc == IOCB_ERROR) {
lpfc_els_free_iocb(phba, elsiocb);
return 1;
@@ -2021,9 +2338,11 @@ lpfc_els_rsp_acc(struct lpfc_hba * phba, uint32_t flag,
}
int
-lpfc_els_rsp_reject(struct lpfc_hba * phba, uint32_t rejectError,
- struct lpfc_iocbq * oldiocb, struct lpfc_nodelist * ndlp)
+lpfc_els_rsp_reject(struct lpfc_vport *vport, uint32_t rejectError,
+ struct lpfc_iocbq *oldiocb, struct lpfc_nodelist *ndlp,
+ LPFC_MBOXQ_t *mbox)
{
+ struct lpfc_hba *phba = vport->phba;
IOCB_t *icmd;
IOCB_t *oldcmd;
struct lpfc_iocbq *elsiocb;
@@ -2036,9 +2355,9 @@ lpfc_els_rsp_reject(struct lpfc_hba * phba, uint32_t rejectError,
psli = &phba->sli;
pring = &psli->ring[LPFC_ELS_RING]; /* ELS ring */
- cmdsize = 2 * sizeof (uint32_t);
- elsiocb = lpfc_prep_els_iocb(phba, 0, cmdsize, oldiocb->retry,
- ndlp, ndlp->nlp_DID, ELS_CMD_LS_RJT);
+ cmdsize = 2 * sizeof(uint32_t);
+ elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp,
+ ndlp->nlp_DID, ELS_CMD_LS_RJT);
if (!elsiocb)
return 1;
@@ -2048,22 +2367,30 @@ lpfc_els_rsp_reject(struct lpfc_hba * phba, uint32_t rejectError,
pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
*((uint32_t *) (pcmd)) = ELS_CMD_LS_RJT;
- pcmd += sizeof (uint32_t);
+ pcmd += sizeof(uint32_t);
*((uint32_t *) (pcmd)) = rejectError;
+ if (mbox) {
+ elsiocb->context_un.mbox = mbox;
+ elsiocb->context1 = lpfc_nlp_get(ndlp);
+ }
+
/* Xmit ELS RJT <err> response tag <ulpIoTag> */
lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
- "%d:0129 Xmit ELS RJT x%x response tag x%x xri x%x, "
- "did x%x, nlp_flag x%x, nlp_state x%x, rpi x%x\n",
- phba->brd_no, rejectError, elsiocb->iotag,
+ "%d (%d):0129 Xmit ELS RJT x%x response tag x%x "
+ "xri x%x, did x%x, nlp_flag x%x, nlp_state x%x, "
+ "rpi x%x\n",
+ phba->brd_no, vport->vpi, rejectError, elsiocb->iotag,
elsiocb->iocb.ulpContext, ndlp->nlp_DID,
ndlp->nlp_flag, ndlp->nlp_state, ndlp->nlp_rpi);
+ lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
+ "Issue LS_RJT: did:x%x flg:x%x err:x%x",
+ ndlp->nlp_DID, ndlp->nlp_flag, rejectError);
+
phba->fc_stat.elsXmitLSRJT++;
- elsiocb->iocb_cmpl = lpfc_cmpl_els_acc;
- spin_lock_irq(phba->host->host_lock);
+ elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
rc = lpfc_sli_issue_iocb(phba, pring, elsiocb, 0);
- spin_unlock_irq(phba->host->host_lock);
if (rc == IOCB_ERROR) {
lpfc_els_free_iocb(phba, elsiocb);
return 1;
@@ -2072,25 +2399,22 @@ lpfc_els_rsp_reject(struct lpfc_hba * phba, uint32_t rejectError,
}
int
-lpfc_els_rsp_adisc_acc(struct lpfc_hba * phba,
- struct lpfc_iocbq * oldiocb, struct lpfc_nodelist * ndlp)
+lpfc_els_rsp_adisc_acc(struct lpfc_vport *vport, struct lpfc_iocbq *oldiocb,
+ struct lpfc_nodelist *ndlp)
{
+ struct lpfc_hba *phba = vport->phba;
+ struct lpfc_sli *psli = &phba->sli;
+ struct lpfc_sli_ring *pring = &psli->ring[LPFC_ELS_RING];
ADISC *ap;
- IOCB_t *icmd;
- IOCB_t *oldcmd;
+ IOCB_t *icmd, *oldcmd;
struct lpfc_iocbq *elsiocb;
- struct lpfc_sli_ring *pring;
- struct lpfc_sli *psli;
uint8_t *pcmd;
uint16_t cmdsize;
int rc;
- psli = &phba->sli;
- pring = &psli->ring[LPFC_ELS_RING]; /* ELS ring */
-
- cmdsize = sizeof (uint32_t) + sizeof (ADISC);
- elsiocb = lpfc_prep_els_iocb(phba, 0, cmdsize, oldiocb->retry,
- ndlp, ndlp->nlp_DID, ELS_CMD_ACC);
+ cmdsize = sizeof(uint32_t) + sizeof(ADISC);
+ elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp,
+ ndlp->nlp_DID, ELS_CMD_ACC);
if (!elsiocb)
return 1;
@@ -2100,28 +2424,30 @@ lpfc_els_rsp_adisc_acc(struct lpfc_hba * phba,
/* Xmit ADISC ACC response tag <ulpIoTag> */
lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
- "%d:0130 Xmit ADISC ACC response iotag x%x xri: "
+ "%d (%d):0130 Xmit ADISC ACC response iotag x%x xri: "
"x%x, did x%x, nlp_flag x%x, nlp_state x%x rpi x%x\n",
- phba->brd_no, elsiocb->iotag,
+ phba->brd_no, vport->vpi, elsiocb->iotag,
elsiocb->iocb.ulpContext, ndlp->nlp_DID,
ndlp->nlp_flag, ndlp->nlp_state, ndlp->nlp_rpi);
pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
*((uint32_t *) (pcmd)) = ELS_CMD_ACC;
- pcmd += sizeof (uint32_t);
+ pcmd += sizeof(uint32_t);
ap = (ADISC *) (pcmd);
ap->hardAL_PA = phba->fc_pref_ALPA;
- memcpy(&ap->portName, &phba->fc_portname, sizeof (struct lpfc_name));
- memcpy(&ap->nodeName, &phba->fc_nodename, sizeof (struct lpfc_name));
- ap->DID = be32_to_cpu(phba->fc_myDID);
+ memcpy(&ap->portName, &vport->fc_portname, sizeof(struct lpfc_name));
+ memcpy(&ap->nodeName, &vport->fc_nodename, sizeof(struct lpfc_name));
+ ap->DID = be32_to_cpu(vport->fc_myDID);
+
+ lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
+ "Issue ACC ADISC: did:x%x flg:x%x",
+ ndlp->nlp_DID, ndlp->nlp_flag, 0);
phba->fc_stat.elsXmitACC++;
- elsiocb->iocb_cmpl = lpfc_cmpl_els_acc;
- spin_lock_irq(phba->host->host_lock);
+ elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
rc = lpfc_sli_issue_iocb(phba, pring, elsiocb, 0);
- spin_unlock_irq(phba->host->host_lock);
if (rc == IOCB_ERROR) {
lpfc_els_free_iocb(phba, elsiocb);
return 1;
@@ -2130,9 +2456,10 @@ lpfc_els_rsp_adisc_acc(struct lpfc_hba * phba,
}
int
-lpfc_els_rsp_prli_acc(struct lpfc_hba *phba, struct lpfc_iocbq *oldiocb,
+lpfc_els_rsp_prli_acc(struct lpfc_vport *vport, struct lpfc_iocbq *oldiocb,
struct lpfc_nodelist *ndlp)
{
+ struct lpfc_hba *phba = vport->phba;
PRLI *npr;
lpfc_vpd_t *vpd;
IOCB_t *icmd;
@@ -2147,8 +2474,8 @@ lpfc_els_rsp_prli_acc(struct lpfc_hba *phba, struct lpfc_iocbq *oldiocb,
psli = &phba->sli;
pring = &psli->ring[LPFC_ELS_RING]; /* ELS ring */
- cmdsize = sizeof (uint32_t) + sizeof (PRLI);
- elsiocb = lpfc_prep_els_iocb(phba, 0, cmdsize, oldiocb->retry, ndlp,
+ cmdsize = sizeof(uint32_t) + sizeof(PRLI);
+ elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp,
ndlp->nlp_DID, (ELS_CMD_ACC | (ELS_CMD_PRLI & ~ELS_RSP_MASK)));
if (!elsiocb)
return 1;
@@ -2159,19 +2486,19 @@ lpfc_els_rsp_prli_acc(struct lpfc_hba *phba, struct lpfc_iocbq *oldiocb,
/* Xmit PRLI ACC response tag <ulpIoTag> */
lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
- "%d:0131 Xmit PRLI ACC response tag x%x xri x%x, "
+ "%d (%d):0131 Xmit PRLI ACC response tag x%x xri x%x, "
"did x%x, nlp_flag x%x, nlp_state x%x, rpi x%x\n",
- phba->brd_no, elsiocb->iotag,
+ phba->brd_no, vport->vpi, elsiocb->iotag,
elsiocb->iocb.ulpContext, ndlp->nlp_DID,
ndlp->nlp_flag, ndlp->nlp_state, ndlp->nlp_rpi);
pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
*((uint32_t *) (pcmd)) = (ELS_CMD_ACC | (ELS_CMD_PRLI & ~ELS_RSP_MASK));
- pcmd += sizeof (uint32_t);
+ pcmd += sizeof(uint32_t);
/* For PRLI, remainder of payload is PRLI parameter page */
- memset(pcmd, 0, sizeof (PRLI));
+ memset(pcmd, 0, sizeof(PRLI));
npr = (PRLI *) pcmd;
vpd = &phba->vpd;
@@ -2193,12 +2520,14 @@ lpfc_els_rsp_prli_acc(struct lpfc_hba *phba, struct lpfc_iocbq *oldiocb,
npr->prliType = PRLI_FCP_TYPE;
npr->initiatorFunc = 1;
+ lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
+ "Issue ACC PRLI: did:x%x flg:x%x",
+ ndlp->nlp_DID, ndlp->nlp_flag, 0);
+
phba->fc_stat.elsXmitACC++;
- elsiocb->iocb_cmpl = lpfc_cmpl_els_acc;
+ elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
- spin_lock_irq(phba->host->host_lock);
rc = lpfc_sli_issue_iocb(phba, pring, elsiocb, 0);
- spin_unlock_irq(phba->host->host_lock);
if (rc == IOCB_ERROR) {
lpfc_els_free_iocb(phba, elsiocb);
return 1;
@@ -2207,12 +2536,12 @@ lpfc_els_rsp_prli_acc(struct lpfc_hba *phba, struct lpfc_iocbq *oldiocb,
}
static int
-lpfc_els_rsp_rnid_acc(struct lpfc_hba *phba, uint8_t format,
+lpfc_els_rsp_rnid_acc(struct lpfc_vport *vport, uint8_t format,
struct lpfc_iocbq *oldiocb, struct lpfc_nodelist *ndlp)
{
+ struct lpfc_hba *phba = vport->phba;
RNID *rn;
- IOCB_t *icmd;
- IOCB_t *oldcmd;
+ IOCB_t *icmd, *oldcmd;
struct lpfc_iocbq *elsiocb;
struct lpfc_sli_ring *pring;
struct lpfc_sli *psli;
@@ -2223,13 +2552,13 @@ lpfc_els_rsp_rnid_acc(struct lpfc_hba *phba, uint8_t format,
psli = &phba->sli;
pring = &psli->ring[LPFC_ELS_RING];
- cmdsize = sizeof (uint32_t) + sizeof (uint32_t)
- + (2 * sizeof (struct lpfc_name));
+ cmdsize = sizeof(uint32_t) + sizeof(uint32_t)
+ + (2 * sizeof(struct lpfc_name));
if (format)
- cmdsize += sizeof (RNID_TOP_DISC);
+ cmdsize += sizeof(RNID_TOP_DISC);
- elsiocb = lpfc_prep_els_iocb(phba, 0, cmdsize, oldiocb->retry,
- ndlp, ndlp->nlp_DID, ELS_CMD_ACC);
+ elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp,
+ ndlp->nlp_DID, ELS_CMD_ACC);
if (!elsiocb)
return 1;
@@ -2239,30 +2568,30 @@ lpfc_els_rsp_rnid_acc(struct lpfc_hba *phba, uint8_t format,
/* Xmit RNID ACC response tag <ulpIoTag> */
lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
- "%d:0132 Xmit RNID ACC response tag x%x "
+ "%d (%d):0132 Xmit RNID ACC response tag x%x "
"xri x%x\n",
- phba->brd_no, elsiocb->iotag,
+ phba->brd_no, vport->vpi, elsiocb->iotag,
elsiocb->iocb.ulpContext);
pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
*((uint32_t *) (pcmd)) = ELS_CMD_ACC;
- pcmd += sizeof (uint32_t);
+ pcmd += sizeof(uint32_t);
- memset(pcmd, 0, sizeof (RNID));
+ memset(pcmd, 0, sizeof(RNID));
rn = (RNID *) (pcmd);
rn->Format = format;
- rn->CommonLen = (2 * sizeof (struct lpfc_name));
- memcpy(&rn->portName, &phba->fc_portname, sizeof (struct lpfc_name));
- memcpy(&rn->nodeName, &phba->fc_nodename, sizeof (struct lpfc_name));
+ rn->CommonLen = (2 * sizeof(struct lpfc_name));
+ memcpy(&rn->portName, &vport->fc_portname, sizeof(struct lpfc_name));
+ memcpy(&rn->nodeName, &vport->fc_nodename, sizeof(struct lpfc_name));
switch (format) {
case 0:
rn->SpecificLen = 0;
break;
case RNID_TOPOLOGY_DISC:
- rn->SpecificLen = sizeof (RNID_TOP_DISC);
+ rn->SpecificLen = sizeof(RNID_TOP_DISC);
memcpy(&rn->un.topologyDisc.portName,
- &phba->fc_portname, sizeof (struct lpfc_name));
+ &vport->fc_portname, sizeof(struct lpfc_name));
rn->un.topologyDisc.unitType = RNID_HBA;
rn->un.topologyDisc.physPort = 0;
rn->un.topologyDisc.attachedNodes = 0;
@@ -2273,15 +2602,17 @@ lpfc_els_rsp_rnid_acc(struct lpfc_hba *phba, uint8_t format,
break;
}
+ lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
+ "Issue ACC RNID: did:x%x flg:x%x",
+ ndlp->nlp_DID, ndlp->nlp_flag, 0);
+
phba->fc_stat.elsXmitACC++;
- elsiocb->iocb_cmpl = lpfc_cmpl_els_acc;
+ elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
lpfc_nlp_put(ndlp);
elsiocb->context1 = NULL; /* Don't need ndlp for cmpl,
* it could be freed */
- spin_lock_irq(phba->host->host_lock);
rc = lpfc_sli_issue_iocb(phba, pring, elsiocb, 0);
- spin_unlock_irq(phba->host->host_lock);
if (rc == IOCB_ERROR) {
lpfc_els_free_iocb(phba, elsiocb);
return 1;
@@ -2290,168 +2621,153 @@ lpfc_els_rsp_rnid_acc(struct lpfc_hba *phba, uint8_t format,
}
int
-lpfc_els_disc_adisc(struct lpfc_hba *phba)
+lpfc_els_disc_adisc(struct lpfc_vport *vport)
{
- int sentadisc;
+ struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
struct lpfc_nodelist *ndlp, *next_ndlp;
+ int sentadisc = 0;
- sentadisc = 0;
/* go thru NPR nodes and issue any remaining ELS ADISCs */
- list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_nodes, nlp_listp) {
+ list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
if (ndlp->nlp_state == NLP_STE_NPR_NODE &&
(ndlp->nlp_flag & NLP_NPR_2B_DISC) != 0 &&
(ndlp->nlp_flag & NLP_NPR_ADISC) != 0) {
- spin_lock_irq(phba->host->host_lock);
+ spin_lock_irq(shost->host_lock);
ndlp->nlp_flag &= ~NLP_NPR_ADISC;
- spin_unlock_irq(phba->host->host_lock);
+ spin_unlock_irq(shost->host_lock);
ndlp->nlp_prev_state = ndlp->nlp_state;
- lpfc_nlp_set_state(phba, ndlp, NLP_STE_ADISC_ISSUE);
- lpfc_issue_els_adisc(phba, ndlp, 0);
+ lpfc_nlp_set_state(vport, ndlp, NLP_STE_ADISC_ISSUE);
+ lpfc_issue_els_adisc(vport, ndlp, 0);
sentadisc++;
- phba->num_disc_nodes++;
- if (phba->num_disc_nodes >=
- phba->cfg_discovery_threads) {
- spin_lock_irq(phba->host->host_lock);
- phba->fc_flag |= FC_NLP_MORE;
- spin_unlock_irq(phba->host->host_lock);
+ vport->num_disc_nodes++;
+ if (vport->num_disc_nodes >=
+ vport->phba->cfg_discovery_threads) {
+ spin_lock_irq(shost->host_lock);
+ vport->fc_flag |= FC_NLP_MORE;
+ spin_unlock_irq(shost->host_lock);
break;
}
}
}
if (sentadisc == 0) {
- spin_lock_irq(phba->host->host_lock);
- phba->fc_flag &= ~FC_NLP_MORE;
- spin_unlock_irq(phba->host->host_lock);
+ spin_lock_irq(shost->host_lock);
+ vport->fc_flag &= ~FC_NLP_MORE;
+ spin_unlock_irq(shost->host_lock);
}
return sentadisc;
}
int
-lpfc_els_disc_plogi(struct lpfc_hba * phba)
+lpfc_els_disc_plogi(struct lpfc_vport *vport)
{
- int sentplogi;
+ struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
struct lpfc_nodelist *ndlp, *next_ndlp;
+ int sentplogi = 0;
- sentplogi = 0;
- /* go thru NPR list and issue any remaining ELS PLOGIs */
- list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_nodes, nlp_listp) {
+ /* go thru NPR nodes and issue any remaining ELS PLOGIs */
+ list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
if (ndlp->nlp_state == NLP_STE_NPR_NODE &&
(ndlp->nlp_flag & NLP_NPR_2B_DISC) != 0 &&
(ndlp->nlp_flag & NLP_DELAY_TMO) == 0 &&
(ndlp->nlp_flag & NLP_NPR_ADISC) == 0) {
ndlp->nlp_prev_state = ndlp->nlp_state;
- lpfc_nlp_set_state(phba, ndlp, NLP_STE_PLOGI_ISSUE);
- lpfc_issue_els_plogi(phba, ndlp->nlp_DID, 0);
+ lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE);
+ lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0);
sentplogi++;
- phba->num_disc_nodes++;
- if (phba->num_disc_nodes >=
- phba->cfg_discovery_threads) {
- spin_lock_irq(phba->host->host_lock);
- phba->fc_flag |= FC_NLP_MORE;
- spin_unlock_irq(phba->host->host_lock);
+ vport->num_disc_nodes++;
+ if (vport->num_disc_nodes >=
+ vport->phba->cfg_discovery_threads) {
+ spin_lock_irq(shost->host_lock);
+ vport->fc_flag |= FC_NLP_MORE;
+ spin_unlock_irq(shost->host_lock);
break;
}
}
}
if (sentplogi == 0) {
- spin_lock_irq(phba->host->host_lock);
- phba->fc_flag &= ~FC_NLP_MORE;
- spin_unlock_irq(phba->host->host_lock);
+ spin_lock_irq(shost->host_lock);
+ vport->fc_flag &= ~FC_NLP_MORE;
+ spin_unlock_irq(shost->host_lock);
}
return sentplogi;
}
-int
-lpfc_els_flush_rscn(struct lpfc_hba * phba)
+void
+lpfc_els_flush_rscn(struct lpfc_vport *vport)
{
- struct lpfc_dmabuf *mp;
+ struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+ struct lpfc_hba *phba = vport->phba;
int i;
- for (i = 0; i < phba->fc_rscn_id_cnt; i++) {
- mp = phba->fc_rscn_id_list[i];
- lpfc_mbuf_free(phba, mp->virt, mp->phys);
- kfree(mp);
- phba->fc_rscn_id_list[i] = NULL;
- }
- phba->fc_rscn_id_cnt = 0;
- spin_lock_irq(phba->host->host_lock);
- phba->fc_flag &= ~(FC_RSCN_MODE | FC_RSCN_DISCOVERY);
- spin_unlock_irq(phba->host->host_lock);
- lpfc_can_disctmo(phba);
- return 0;
+ for (i = 0; i < vport->fc_rscn_id_cnt; i++) {
+ lpfc_in_buf_free(phba, vport->fc_rscn_id_list[i]);
+ vport->fc_rscn_id_list[i] = NULL;
+ }
+ spin_lock_irq(shost->host_lock);
+ vport->fc_rscn_id_cnt = 0;
+ vport->fc_flag &= ~(FC_RSCN_MODE | FC_RSCN_DISCOVERY);
+ spin_unlock_irq(shost->host_lock);
+ lpfc_can_disctmo(vport);
}
int
-lpfc_rscn_payload_check(struct lpfc_hba * phba, uint32_t did)
+lpfc_rscn_payload_check(struct lpfc_vport *vport, uint32_t did)
{
D_ID ns_did;
D_ID rscn_did;
- struct lpfc_dmabuf *mp;
uint32_t *lp;
- uint32_t payload_len, cmd, i, match;
+ uint32_t payload_len, i;
+ struct lpfc_hba *phba = vport->phba;
ns_did.un.word = did;
- match = 0;
/* Never match fabric nodes for RSCNs */
if ((did & Fabric_DID_MASK) == Fabric_DID_MASK)
- return(0);
+ return 0;
/* If we are doing a FULL RSCN rediscovery, match everything */
- if (phba->fc_flag & FC_RSCN_DISCOVERY) {
+ if (vport->fc_flag & FC_RSCN_DISCOVERY)
return did;
- }
- for (i = 0; i < phba->fc_rscn_id_cnt; i++) {
- mp = phba->fc_rscn_id_list[i];
- lp = (uint32_t *) mp->virt;
- cmd = *lp++;
- payload_len = be32_to_cpu(cmd) & 0xffff; /* payload length */
- payload_len -= sizeof (uint32_t); /* take off word 0 */
+ for (i = 0; i < vport->fc_rscn_id_cnt; i++) {
+ lp = vport->fc_rscn_id_list[i]->virt;
+ payload_len = be32_to_cpu(*lp++ & ~ELS_CMD_MASK);
+ payload_len -= sizeof(uint32_t); /* take off word 0 */
while (payload_len) {
- rscn_did.un.word = *lp++;
- rscn_did.un.word = be32_to_cpu(rscn_did.un.word);
- payload_len -= sizeof (uint32_t);
+ rscn_did.un.word = be32_to_cpu(*lp++);
+ payload_len -= sizeof(uint32_t);
switch (rscn_did.un.b.resv) {
case 0: /* Single N_Port ID effected */
- if (ns_did.un.word == rscn_did.un.word) {
- match = did;
- }
+ if (ns_did.un.word == rscn_did.un.word)
+ return did;
break;
case 1: /* Whole N_Port Area effected */
if ((ns_did.un.b.domain == rscn_did.un.b.domain)
&& (ns_did.un.b.area == rscn_did.un.b.area))
- {
- match = did;
- }
+ return did;
break;
case 2: /* Whole N_Port Domain effected */
if (ns_did.un.b.domain == rscn_did.un.b.domain)
- {
- match = did;
- }
- break;
- case 3: /* Whole Fabric effected */
- match = did;
+ return did;
break;
default:
- /* Unknown Identifier in RSCN list */
+ /* Unknown Identifier in RSCN node */
lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
- "%d:0217 Unknown Identifier in "
- "RSCN payload Data: x%x\n",
- phba->brd_no, rscn_did.un.word);
- break;
- }
- if (match) {
- break;
+ "%d (%d):0217 Unknown "
+ "Identifier in RSCN payload "
+ "Data: x%x\n",
+ phba->brd_no, vport->vpi,
+ rscn_did.un.word);
+ case 3: /* Whole Fabric effected */
+ return did;
}
}
}
- return match;
+ return 0;
}
static int
-lpfc_rscn_recovery_check(struct lpfc_hba *phba)
+lpfc_rscn_recovery_check(struct lpfc_vport *vport)
{
struct lpfc_nodelist *ndlp = NULL;
@@ -2459,188 +2775,261 @@ lpfc_rscn_recovery_check(struct lpfc_hba *phba)
* them to NPR state.
*/
- list_for_each_entry(ndlp, &phba->fc_nodes, nlp_listp) {
+ list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
if (ndlp->nlp_state == NLP_STE_UNUSED_NODE ||
- lpfc_rscn_payload_check(phba, ndlp->nlp_DID) == 0)
+ lpfc_rscn_payload_check(vport, ndlp->nlp_DID) == 0)
continue;
- lpfc_disc_state_machine(phba, ndlp, NULL,
- NLP_EVT_DEVICE_RECOVERY);
+ lpfc_disc_state_machine(vport, ndlp, NULL,
+ NLP_EVT_DEVICE_RECOVERY);
/*
* Make sure NLP_DELAY_TMO is NOT running after a device
* recovery event.
*/
if (ndlp->nlp_flag & NLP_DELAY_TMO)
- lpfc_cancel_retry_delay_tmo(phba, ndlp);
+ lpfc_cancel_retry_delay_tmo(vport, ndlp);
}
return 0;
}
static int
-lpfc_els_rcv_rscn(struct lpfc_hba * phba,
- struct lpfc_iocbq * cmdiocb,
- struct lpfc_nodelist * ndlp, uint8_t newnode)
+lpfc_els_rcv_rscn(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
+ struct lpfc_nodelist *ndlp, uint8_t newnode)
{
+ struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+ struct lpfc_hba *phba = vport->phba;
struct lpfc_dmabuf *pcmd;
- uint32_t *lp;
+ struct lpfc_vport *next_vport;
+ uint32_t *lp, *datap;
IOCB_t *icmd;
- uint32_t payload_len, cmd;
+ uint32_t payload_len, length, nportid, *cmd;
+ int rscn_cnt = vport->fc_rscn_id_cnt;
+ int rscn_id = 0, hba_id = 0;
int i;
icmd = &cmdiocb->iocb;
pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
lp = (uint32_t *) pcmd->virt;
- cmd = *lp++;
- payload_len = be32_to_cpu(cmd) & 0xffff; /* payload length */
- payload_len -= sizeof (uint32_t); /* take off word 0 */
- cmd &= ELS_CMD_MASK;
+ payload_len = be32_to_cpu(*lp++ & ~ELS_CMD_MASK);
+ payload_len -= sizeof(uint32_t); /* take off word 0 */
/* RSCN received */
- lpfc_printf_log(phba,
- KERN_INFO,
- LOG_DISCOVERY,
- "%d:0214 RSCN received Data: x%x x%x x%x x%x\n",
- phba->brd_no,
- phba->fc_flag, payload_len, *lp, phba->fc_rscn_id_cnt);
+ lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
+ "%d (%d):0214 RSCN received Data: x%x x%x x%x x%x\n",
+ phba->brd_no, vport->vpi, vport->fc_flag, payload_len,
+ *lp, rscn_cnt);
for (i = 0; i < payload_len/sizeof(uint32_t); i++)
- fc_host_post_event(phba->host, fc_get_event_number(),
+ fc_host_post_event(shost, fc_get_event_number(),
FCH_EVT_RSCN, lp[i]);
/* If we are about to begin discovery, just ACC the RSCN.
* Discovery processing will satisfy it.
*/
- if (phba->hba_state <= LPFC_NS_QRY) {
- lpfc_els_rsp_acc(phba, ELS_CMD_ACC, cmdiocb, ndlp, NULL,
- newnode);
+ if (vport->port_state <= LPFC_NS_QRY) {
+ lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
+ "RCV RSCN ignore: did:x%x/ste:x%x flg:x%x",
+ ndlp->nlp_DID, vport->port_state, ndlp->nlp_flag);
+
+ lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL,
+ newnode);
return 0;
}
+ /* If this RSCN just contains NPortIDs for other vports on this HBA,
+ * just ACC and ignore it.
+ */
+ if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) &&
+ !(phba->cfg_peer_port_login)) {
+ i = payload_len;
+ datap = lp;
+ while (i > 0) {
+ nportid = *datap++;
+ nportid = ((be32_to_cpu(nportid)) & Mask_DID);
+ i -= sizeof(uint32_t);
+ rscn_id++;
+ list_for_each_entry(next_vport, &phba->port_list,
+ listentry) {
+ if (nportid == next_vport->fc_myDID) {
+ hba_id++;
+ break;
+ }
+ }
+ }
+ if (rscn_id == hba_id) {
+ /* ALL NPortIDs in RSCN are on HBA */
+ lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
+ "%d (%d):0214 Ignore RSCN Data: x%x x%x x%x x%x\n",
+ phba->brd_no, vport->vpi, vport->fc_flag, payload_len,
+ *lp, rscn_cnt);
+
+ lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
+ "RCV RSCN vport: did:x%x/ste:x%x flg:x%x",
+ ndlp->nlp_DID, vport->port_state,
+ ndlp->nlp_flag);
+
+ lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb,
+ ndlp, NULL, newnode);
+ return 0;
+ }
+ }
+
/* If we are already processing an RSCN, save the received
* RSCN payload buffer, cmdiocb->context2 to process later.
*/
- if (phba->fc_flag & (FC_RSCN_MODE | FC_NDISC_ACTIVE)) {
- if ((phba->fc_rscn_id_cnt < FC_MAX_HOLD_RSCN) &&
- !(phba->fc_flag & FC_RSCN_DISCOVERY)) {
- spin_lock_irq(phba->host->host_lock);
- phba->fc_flag |= FC_RSCN_MODE;
- spin_unlock_irq(phba->host->host_lock);
- phba->fc_rscn_id_list[phba->fc_rscn_id_cnt++] = pcmd;
-
- /* If we zero, cmdiocb->context2, the calling
- * routine will not try to free it.
- */
- cmdiocb->context2 = NULL;
+ if (vport->fc_flag & (FC_RSCN_MODE | FC_NDISC_ACTIVE)) {
+ lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
+ "RCV RSCN defer: did:x%x/ste:x%x flg:x%x",
+ ndlp->nlp_DID, vport->port_state, ndlp->nlp_flag);
+
+ vport->fc_flag |= FC_RSCN_DEFERRED;
+ if ((rscn_cnt < FC_MAX_HOLD_RSCN) &&
+ !(vport->fc_flag & FC_RSCN_DISCOVERY)) {
+ spin_lock_irq(shost->host_lock);
+ vport->fc_flag |= FC_RSCN_MODE;
+ spin_unlock_irq(shost->host_lock);
+ if (rscn_cnt) {
+ cmd = vport->fc_rscn_id_list[rscn_cnt-1]->virt;
+ length = be32_to_cpu(*cmd & ~ELS_CMD_MASK);
+ }
+ if ((rscn_cnt) &&
+ (payload_len + length <= LPFC_BPL_SIZE)) {
+ *cmd &= ELS_CMD_MASK;
+ *cmd |= be32_to_cpu(payload_len + length);
+ memcpy(((uint8_t *)cmd) + length, lp,
+ payload_len);
+ } else {
+ vport->fc_rscn_id_list[rscn_cnt] = pcmd;
+ vport->fc_rscn_id_cnt++;
+ /* If we zero, cmdiocb->context2, the calling
+ * routine will not try to free it.
+ */
+ cmdiocb->context2 = NULL;
+ }
/* Deferred RSCN */
lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
- "%d:0235 Deferred RSCN "
+ "%d (%d):0235 Deferred RSCN "
"Data: x%x x%x x%x\n",
- phba->brd_no, phba->fc_rscn_id_cnt,
- phba->fc_flag, phba->hba_state);
+ phba->brd_no, vport->vpi,
+ vport->fc_rscn_id_cnt, vport->fc_flag,
+ vport->port_state);
} else {
- spin_lock_irq(phba->host->host_lock);
- phba->fc_flag |= FC_RSCN_DISCOVERY;
- spin_unlock_irq(phba->host->host_lock);
+ spin_lock_irq(shost->host_lock);
+ vport->fc_flag |= FC_RSCN_DISCOVERY;
+ spin_unlock_irq(shost->host_lock);
/* ReDiscovery RSCN */
lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
- "%d:0234 ReDiscovery RSCN "
+ "%d (%d):0234 ReDiscovery RSCN "
"Data: x%x x%x x%x\n",
- phba->brd_no, phba->fc_rscn_id_cnt,
- phba->fc_flag, phba->hba_state);
+ phba->brd_no, vport->vpi,
+ vport->fc_rscn_id_cnt, vport->fc_flag,
+ vport->port_state);
}
/* Send back ACC */
- lpfc_els_rsp_acc(phba, ELS_CMD_ACC, cmdiocb, ndlp, NULL,
+ lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL,
newnode);
/* send RECOVERY event for ALL nodes that match RSCN payload */
- lpfc_rscn_recovery_check(phba);
+ lpfc_rscn_recovery_check(vport);
+ vport->fc_flag &= ~FC_RSCN_DEFERRED;
return 0;
}
- phba->fc_flag |= FC_RSCN_MODE;
- phba->fc_rscn_id_list[phba->fc_rscn_id_cnt++] = pcmd;
+ lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
+ "RCV RSCN: did:x%x/ste:x%x flg:x%x",
+ ndlp->nlp_DID, vport->port_state, ndlp->nlp_flag);
+
+ spin_lock_irq(shost->host_lock);
+ vport->fc_flag |= FC_RSCN_MODE;
+ spin_unlock_irq(shost->host_lock);
+ vport->fc_rscn_id_list[vport->fc_rscn_id_cnt++] = pcmd;
/*
* If we zero, cmdiocb->context2, the calling routine will
* not try to free it.
*/
cmdiocb->context2 = NULL;
- lpfc_set_disctmo(phba);
+ lpfc_set_disctmo(vport);
/* Send back ACC */
- lpfc_els_rsp_acc(phba, ELS_CMD_ACC, cmdiocb, ndlp, NULL, newnode);
+ lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL, newnode);
/* send RECOVERY event for ALL nodes that match RSCN payload */
- lpfc_rscn_recovery_check(phba);
+ lpfc_rscn_recovery_check(vport);
- return lpfc_els_handle_rscn(phba);
+ return lpfc_els_handle_rscn(vport);
}
int
-lpfc_els_handle_rscn(struct lpfc_hba * phba)
+lpfc_els_handle_rscn(struct lpfc_vport *vport)
{
struct lpfc_nodelist *ndlp;
+ struct lpfc_hba *phba = vport->phba;
+
+ /* Ignore RSCN if the port is being torn down. */
+ if (vport->load_flag & FC_UNLOADING) {
+ lpfc_els_flush_rscn(vport);
+ return 0;
+ }
/* Start timer for RSCN processing */
- lpfc_set_disctmo(phba);
+ lpfc_set_disctmo(vport);
/* RSCN processed */
- lpfc_printf_log(phba,
- KERN_INFO,
- LOG_DISCOVERY,
- "%d:0215 RSCN processed Data: x%x x%x x%x x%x\n",
- phba->brd_no,
- phba->fc_flag, 0, phba->fc_rscn_id_cnt,
- phba->hba_state);
+ lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
+ "%d (%d):0215 RSCN processed Data: x%x x%x x%x x%x\n",
+ phba->brd_no, vport->vpi,
+ vport->fc_flag, 0, vport->fc_rscn_id_cnt,
+ vport->port_state);
/* To process RSCN, first compare RSCN data with NameServer */
- phba->fc_ns_retry = 0;
- ndlp = lpfc_findnode_did(phba, NameServer_DID);
+ vport->fc_ns_retry = 0;
+ ndlp = lpfc_findnode_did(vport, NameServer_DID);
if (ndlp && ndlp->nlp_state == NLP_STE_UNMAPPED_NODE) {
/* Good ndlp, issue CT Request to NameServer */
- if (lpfc_ns_cmd(phba, ndlp, SLI_CTNS_GID_FT) == 0) {
+ if (lpfc_ns_cmd(vport, SLI_CTNS_GID_FT, 0, 0) == 0)
/* Wait for NameServer query cmpl before we can
continue */
return 1;
- }
} else {
/* If login to NameServer does not exist, issue one */
/* Good status, issue PLOGI to NameServer */
- ndlp = lpfc_findnode_did(phba, NameServer_DID);
- if (ndlp) {
+ ndlp = lpfc_findnode_did(vport, NameServer_DID);
+ if (ndlp)
/* Wait for NameServer login cmpl before we can
continue */
return 1;
- }
+
ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL);
if (!ndlp) {
- lpfc_els_flush_rscn(phba);
+ lpfc_els_flush_rscn(vport);
return 0;
} else {
- lpfc_nlp_init(phba, ndlp, NameServer_DID);
+ lpfc_nlp_init(vport, ndlp, NameServer_DID);
ndlp->nlp_type |= NLP_FABRIC;
ndlp->nlp_prev_state = ndlp->nlp_state;
- lpfc_nlp_set_state(phba, ndlp, NLP_STE_PLOGI_ISSUE);
- lpfc_issue_els_plogi(phba, NameServer_DID, 0);
+ lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE);
+ lpfc_issue_els_plogi(vport, NameServer_DID, 0);
/* Wait for NameServer login cmpl before we can
continue */
return 1;
}
}
- lpfc_els_flush_rscn(phba);
+ lpfc_els_flush_rscn(vport);
return 0;
}
static int
-lpfc_els_rcv_flogi(struct lpfc_hba * phba,
- struct lpfc_iocbq * cmdiocb,
- struct lpfc_nodelist * ndlp, uint8_t newnode)
+lpfc_els_rcv_flogi(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
+ struct lpfc_nodelist *ndlp, uint8_t newnode)
{
+ struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+ struct lpfc_hba *phba = vport->phba;
struct lpfc_dmabuf *pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
uint32_t *lp = (uint32_t *) pcmd->virt;
IOCB_t *icmd = &cmdiocb->iocb;
@@ -2655,7 +3044,7 @@ lpfc_els_rcv_flogi(struct lpfc_hba * phba,
/* FLOGI received */
- lpfc_set_disctmo(phba);
+ lpfc_set_disctmo(vport);
if (phba->fc_topology == TOPOLOGY_LOOP) {
/* We should never receive a FLOGI in loop mode, ignore it */
@@ -2664,33 +3053,34 @@ lpfc_els_rcv_flogi(struct lpfc_hba * phba,
/* An FLOGI ELS command <elsCmd> was received from DID <did> in
Loop Mode */
lpfc_printf_log(phba, KERN_ERR, LOG_ELS,
- "%d:0113 An FLOGI ELS command x%x was received "
- "from DID x%x in Loop Mode\n",
- phba->brd_no, cmd, did);
+ "%d (%d):0113 An FLOGI ELS command x%x was "
+ "received from DID x%x in Loop Mode\n",
+ phba->brd_no, vport->vpi, cmd, did);
return 1;
}
did = Fabric_DID;
- if ((lpfc_check_sparm(phba, ndlp, sp, CLASS3))) {
+ if ((lpfc_check_sparm(vport, ndlp, sp, CLASS3))) {
/* For a FLOGI we accept, then if our portname is greater
* then the remote portname we initiate Nport login.
*/
- rc = memcmp(&phba->fc_portname, &sp->portName,
- sizeof (struct lpfc_name));
+ rc = memcmp(&vport->fc_portname, &sp->portName,
+ sizeof(struct lpfc_name));
if (!rc) {
- if ((mbox = mempool_alloc(phba->mbox_mem_pool,
- GFP_KERNEL)) == 0) {
+ mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+ if (!mbox)
return 1;
- }
+
lpfc_linkdown(phba);
lpfc_init_link(phba, mbox,
phba->cfg_topology,
phba->cfg_link_speed);
mbox->mb.un.varInitLnk.lipsr_AL_PA = 0;
mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
+ mbox->vport = vport;
rc = lpfc_sli_issue_mbox
(phba, mbox, (MBX_NOWAIT | MBX_STOP_IOCB));
lpfc_set_loopback_flag(phba);
@@ -2699,31 +3089,34 @@ lpfc_els_rcv_flogi(struct lpfc_hba * phba,
}
return 1;
} else if (rc > 0) { /* greater than */
- spin_lock_irq(phba->host->host_lock);
- phba->fc_flag |= FC_PT2PT_PLOGI;
- spin_unlock_irq(phba->host->host_lock);
+ spin_lock_irq(shost->host_lock);
+ vport->fc_flag |= FC_PT2PT_PLOGI;
+ spin_unlock_irq(shost->host_lock);
}
- phba->fc_flag |= FC_PT2PT;
- phba->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP);
+ spin_lock_irq(shost->host_lock);
+ vport->fc_flag |= FC_PT2PT;
+ vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP);
+ spin_unlock_irq(shost->host_lock);
} else {
/* Reject this request because invalid parameters */
stat.un.b.lsRjtRsvd0 = 0;
stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
stat.un.b.lsRjtRsnCodeExp = LSEXP_SPARM_OPTIONS;
stat.un.b.vendorUnique = 0;
- lpfc_els_rsp_reject(phba, stat.un.lsRjtError, cmdiocb, ndlp);
+ lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp,
+ NULL);
return 1;
}
/* Send back ACC */
- lpfc_els_rsp_acc(phba, ELS_CMD_PLOGI, cmdiocb, ndlp, NULL, newnode);
+ lpfc_els_rsp_acc(vport, ELS_CMD_PLOGI, cmdiocb, ndlp, NULL, newnode);
return 0;
}
static int
-lpfc_els_rcv_rnid(struct lpfc_hba * phba,
- struct lpfc_iocbq * cmdiocb, struct lpfc_nodelist * ndlp)
+lpfc_els_rcv_rnid(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
+ struct lpfc_nodelist *ndlp)
{
struct lpfc_dmabuf *pcmd;
uint32_t *lp;
@@ -2746,7 +3139,7 @@ lpfc_els_rcv_rnid(struct lpfc_hba * phba,
case 0:
case RNID_TOPOLOGY_DISC:
/* Send back ACC */
- lpfc_els_rsp_rnid_acc(phba, rn->Format, cmdiocb, ndlp);
+ lpfc_els_rsp_rnid_acc(vport, rn->Format, cmdiocb, ndlp);
break;
default:
/* Reject this request because format not supported */
@@ -2754,14 +3147,15 @@ lpfc_els_rcv_rnid(struct lpfc_hba * phba,
stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA;
stat.un.b.vendorUnique = 0;
- lpfc_els_rsp_reject(phba, stat.un.lsRjtError, cmdiocb, ndlp);
+ lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp,
+ NULL);
}
return 0;
}
static int
-lpfc_els_rcv_lirr(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
- struct lpfc_nodelist *ndlp)
+lpfc_els_rcv_lirr(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
+ struct lpfc_nodelist *ndlp)
{
struct ls_rjt stat;
@@ -2770,15 +3164,15 @@ lpfc_els_rcv_lirr(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA;
stat.un.b.vendorUnique = 0;
- lpfc_els_rsp_reject(phba, stat.un.lsRjtError, cmdiocb, ndlp);
+ lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL);
return 0;
}
static void
lpfc_els_rsp_rps_acc(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
{
- struct lpfc_sli *psli;
- struct lpfc_sli_ring *pring;
+ struct lpfc_sli *psli = &phba->sli;
+ struct lpfc_sli_ring *pring = &psli->ring[LPFC_ELS_RING];
MAILBOX_t *mb;
IOCB_t *icmd;
RPS_RSP *rps_rsp;
@@ -2788,8 +3182,6 @@ lpfc_els_rsp_rps_acc(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
uint16_t xri, status;
uint32_t cmdsize;
- psli = &phba->sli;
- pring = &psli->ring[LPFC_ELS_RING];
mb = &pmb->mb;
ndlp = (struct lpfc_nodelist *) pmb->context2;
@@ -2804,8 +3196,9 @@ lpfc_els_rsp_rps_acc(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
cmdsize = sizeof(RPS_RSP) + sizeof(uint32_t);
mempool_free(pmb, phba->mbox_mem_pool);
- elsiocb = lpfc_prep_els_iocb(phba, 0, cmdsize, lpfc_max_els_tries, ndlp,
- ndlp->nlp_DID, ELS_CMD_ACC);
+ elsiocb = lpfc_prep_els_iocb(phba->pport, 0, cmdsize,
+ lpfc_max_els_tries, ndlp,
+ ndlp->nlp_DID, ELS_CMD_ACC);
lpfc_nlp_put(ndlp);
if (!elsiocb)
return;
@@ -2815,14 +3208,14 @@ lpfc_els_rsp_rps_acc(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
*((uint32_t *) (pcmd)) = ELS_CMD_ACC;
- pcmd += sizeof (uint32_t); /* Skip past command */
+ pcmd += sizeof(uint32_t); /* Skip past command */
rps_rsp = (RPS_RSP *)pcmd;
if (phba->fc_topology != TOPOLOGY_LOOP)
status = 0x10;
else
status = 0x8;
- if (phba->fc_flag & FC_FABRIC)
+ if (phba->pport->fc_flag & FC_FABRIC)
status |= 0x4;
rps_rsp->rsvd1 = 0;
@@ -2836,25 +3229,25 @@ lpfc_els_rsp_rps_acc(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
/* Xmit ELS RPS ACC response tag <ulpIoTag> */
lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
- "%d:0118 Xmit ELS RPS ACC response tag x%x xri x%x, "
- "did x%x, nlp_flag x%x, nlp_state x%x, rpi x%x\n",
- phba->brd_no, elsiocb->iotag,
+ "%d (%d):0118 Xmit ELS RPS ACC response tag x%x "
+ "xri x%x, did x%x, nlp_flag x%x, nlp_state x%x, "
+ "rpi x%x\n",
+ phba->brd_no, ndlp->vport->vpi, elsiocb->iotag,
elsiocb->iocb.ulpContext, ndlp->nlp_DID,
ndlp->nlp_flag, ndlp->nlp_state, ndlp->nlp_rpi);
- elsiocb->iocb_cmpl = lpfc_cmpl_els_acc;
+ elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
phba->fc_stat.elsXmitACC++;
-
- if (lpfc_sli_issue_iocb(phba, pring, elsiocb, 0) == IOCB_ERROR) {
+ if (lpfc_sli_issue_iocb(phba, pring, elsiocb, 0) == IOCB_ERROR)
lpfc_els_free_iocb(phba, elsiocb);
- }
return;
}
static int
-lpfc_els_rcv_rps(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb,
- struct lpfc_nodelist * ndlp)
+lpfc_els_rcv_rps(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
+ struct lpfc_nodelist *ndlp)
{
+ struct lpfc_hba *phba = vport->phba;
uint32_t *lp;
uint8_t flag;
LPFC_MBOXQ_t *mbox;
@@ -2868,7 +3261,8 @@ lpfc_els_rcv_rps(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb,
stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA;
stat.un.b.vendorUnique = 0;
- lpfc_els_rsp_reject(phba, stat.un.lsRjtError, cmdiocb, ndlp);
+ lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp,
+ NULL);
}
pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
@@ -2878,19 +3272,24 @@ lpfc_els_rcv_rps(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb,
if ((flag == 0) ||
((flag == 1) && (be32_to_cpu(rps->un.portNum) == 0)) ||
- ((flag == 2) && (memcmp(&rps->un.portName, &phba->fc_portname,
- sizeof (struct lpfc_name)) == 0))) {
- if ((mbox = mempool_alloc(phba->mbox_mem_pool, GFP_ATOMIC))) {
+ ((flag == 2) && (memcmp(&rps->un.portName, &vport->fc_portname,
+ sizeof(struct lpfc_name)) == 0))) {
+
+ printk("Fix me....\n");
+ dump_stack();
+ mbox = mempool_alloc(phba->mbox_mem_pool, GFP_ATOMIC);
+ if (mbox) {
lpfc_read_lnk_stat(phba, mbox);
mbox->context1 =
- (void *)((unsigned long)cmdiocb->iocb.ulpContext);
+ (void *)((unsigned long) cmdiocb->iocb.ulpContext);
mbox->context2 = lpfc_nlp_get(ndlp);
+ mbox->vport = vport;
mbox->mbox_cmpl = lpfc_els_rsp_rps_acc;
if (lpfc_sli_issue_mbox (phba, mbox,
- (MBX_NOWAIT | MBX_STOP_IOCB)) != MBX_NOT_FINISHED) {
+ (MBX_NOWAIT | MBX_STOP_IOCB)) != MBX_NOT_FINISHED)
/* Mbox completion will send ELS Response */
return 0;
- }
+
lpfc_nlp_put(ndlp);
mempool_free(mbox, phba->mbox_mem_pool);
}
@@ -2899,27 +3298,25 @@ lpfc_els_rcv_rps(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb,
stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA;
stat.un.b.vendorUnique = 0;
- lpfc_els_rsp_reject(phba, stat.un.lsRjtError, cmdiocb, ndlp);
+ lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL);
return 0;
}
static int
-lpfc_els_rsp_rpl_acc(struct lpfc_hba * phba, uint16_t cmdsize,
- struct lpfc_iocbq * oldiocb, struct lpfc_nodelist * ndlp)
+lpfc_els_rsp_rpl_acc(struct lpfc_vport *vport, uint16_t cmdsize,
+ struct lpfc_iocbq *oldiocb, struct lpfc_nodelist *ndlp)
{
- IOCB_t *icmd;
- IOCB_t *oldcmd;
+ struct lpfc_hba *phba = vport->phba;
+ IOCB_t *icmd, *oldcmd;
RPL_RSP rpl_rsp;
struct lpfc_iocbq *elsiocb;
- struct lpfc_sli_ring *pring;
- struct lpfc_sli *psli;
+ struct lpfc_sli *psli = &phba->sli;
+ struct lpfc_sli_ring *pring = &psli->ring[LPFC_ELS_RING];
uint8_t *pcmd;
- psli = &phba->sli;
- pring = &psli->ring[LPFC_ELS_RING]; /* ELS ring */
+ elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp,
+ ndlp->nlp_DID, ELS_CMD_ACC);
- elsiocb = lpfc_prep_els_iocb(phba, 0, cmdsize, oldiocb->retry,
- ndlp, ndlp->nlp_DID, ELS_CMD_ACC);
if (!elsiocb)
return 1;
@@ -2929,7 +3326,7 @@ lpfc_els_rsp_rpl_acc(struct lpfc_hba * phba, uint16_t cmdsize,
pcmd = (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
*((uint32_t *) (pcmd)) = ELS_CMD_ACC;
- pcmd += sizeof (uint16_t);
+ pcmd += sizeof(uint16_t);
*((uint16_t *)(pcmd)) = be16_to_cpu(cmdsize);
pcmd += sizeof(uint16_t);
@@ -2937,8 +3334,8 @@ lpfc_els_rsp_rpl_acc(struct lpfc_hba * phba, uint16_t cmdsize,
rpl_rsp.listLen = be32_to_cpu(1);
rpl_rsp.index = 0;
rpl_rsp.port_num_blk.portNum = 0;
- rpl_rsp.port_num_blk.portID = be32_to_cpu(phba->fc_myDID);
- memcpy(&rpl_rsp.port_num_blk.portName, &phba->fc_portname,
+ rpl_rsp.port_num_blk.portID = be32_to_cpu(vport->fc_myDID);
+ memcpy(&rpl_rsp.port_num_blk.portName, &vport->fc_portname,
sizeof(struct lpfc_name));
memcpy(pcmd, &rpl_rsp, cmdsize - sizeof(uint32_t));
@@ -2946,13 +3343,14 @@ lpfc_els_rsp_rpl_acc(struct lpfc_hba * phba, uint16_t cmdsize,
/* Xmit ELS RPL ACC response tag <ulpIoTag> */
lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
- "%d:0120 Xmit ELS RPL ACC response tag x%x xri x%x, "
- "did x%x, nlp_flag x%x, nlp_state x%x, rpi x%x\n",
- phba->brd_no, elsiocb->iotag,
+ "%d (%d):0120 Xmit ELS RPL ACC response tag x%x "
+ "xri x%x, did x%x, nlp_flag x%x, nlp_state x%x, "
+ "rpi x%x\n",
+ phba->brd_no, vport->vpi, elsiocb->iotag,
elsiocb->iocb.ulpContext, ndlp->nlp_DID,
ndlp->nlp_flag, ndlp->nlp_state, ndlp->nlp_rpi);
- elsiocb->iocb_cmpl = lpfc_cmpl_els_acc;
+ elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
phba->fc_stat.elsXmitACC++;
if (lpfc_sli_issue_iocb(phba, pring, elsiocb, 0) == IOCB_ERROR) {
@@ -2963,8 +3361,8 @@ lpfc_els_rsp_rpl_acc(struct lpfc_hba * phba, uint16_t cmdsize,
}
static int
-lpfc_els_rcv_rpl(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb,
- struct lpfc_nodelist * ndlp)
+lpfc_els_rcv_rpl(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
+ struct lpfc_nodelist *ndlp)
{
struct lpfc_dmabuf *pcmd;
uint32_t *lp;
@@ -2979,7 +3377,8 @@ lpfc_els_rcv_rpl(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb,
stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA;
stat.un.b.vendorUnique = 0;
- lpfc_els_rsp_reject(phba, stat.un.lsRjtError, cmdiocb, ndlp);
+ lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp,
+ NULL);
}
pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
@@ -2996,15 +3395,16 @@ lpfc_els_rcv_rpl(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb,
} else {
cmdsize = sizeof(uint32_t) + maxsize * sizeof(uint32_t);
}
- lpfc_els_rsp_rpl_acc(phba, cmdsize, cmdiocb, ndlp);
+ lpfc_els_rsp_rpl_acc(vport, cmdsize, cmdiocb, ndlp);
return 0;
}
static int
-lpfc_els_rcv_farp(struct lpfc_hba * phba,
- struct lpfc_iocbq * cmdiocb, struct lpfc_nodelist * ndlp)
+lpfc_els_rcv_farp(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
+ struct lpfc_nodelist *ndlp)
{
+ struct lpfc_hba *phba = vport->phba;
struct lpfc_dmabuf *pcmd;
uint32_t *lp;
IOCB_t *icmd;
@@ -3020,11 +3420,9 @@ lpfc_els_rcv_farp(struct lpfc_hba * phba,
fp = (FARP *) lp;
/* FARP-REQ received from DID <did> */
- lpfc_printf_log(phba,
- KERN_INFO,
- LOG_ELS,
- "%d:0601 FARP-REQ received from DID x%x\n",
- phba->brd_no, did);
+ lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
+ "%d (%d):0601 FARP-REQ received from DID x%x\n",
+ phba->brd_no, vport->vpi, did);
/* We will only support match on WWPN or WWNN */
if (fp->Mflags & ~(FARP_MATCH_NODE | FARP_MATCH_PORT)) {
@@ -3034,15 +3432,15 @@ lpfc_els_rcv_farp(struct lpfc_hba * phba,
cnt = 0;
/* If this FARP command is searching for my portname */
if (fp->Mflags & FARP_MATCH_PORT) {
- if (memcmp(&fp->RportName, &phba->fc_portname,
- sizeof (struct lpfc_name)) == 0)
+ if (memcmp(&fp->RportName, &vport->fc_portname,
+ sizeof(struct lpfc_name)) == 0)
cnt = 1;
}
/* If this FARP command is searching for my nodename */
if (fp->Mflags & FARP_MATCH_NODE) {
- if (memcmp(&fp->RnodeName, &phba->fc_nodename,
- sizeof (struct lpfc_name)) == 0)
+ if (memcmp(&fp->RnodeName, &vport->fc_nodename,
+ sizeof(struct lpfc_name)) == 0)
cnt = 1;
}
@@ -3052,28 +3450,28 @@ lpfc_els_rcv_farp(struct lpfc_hba * phba,
/* Log back into the node before sending the FARP. */
if (fp->Rflags & FARP_REQUEST_PLOGI) {
ndlp->nlp_prev_state = ndlp->nlp_state;
- lpfc_nlp_set_state(phba, ndlp,
+ lpfc_nlp_set_state(vport, ndlp,
NLP_STE_PLOGI_ISSUE);
- lpfc_issue_els_plogi(phba, ndlp->nlp_DID, 0);
+ lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0);
}
/* Send a FARP response to that node */
- if (fp->Rflags & FARP_REQUEST_FARPR) {
- lpfc_issue_els_farpr(phba, did, 0);
- }
+ if (fp->Rflags & FARP_REQUEST_FARPR)
+ lpfc_issue_els_farpr(vport, did, 0);
}
}
return 0;
}
static int
-lpfc_els_rcv_farpr(struct lpfc_hba * phba,
- struct lpfc_iocbq * cmdiocb, struct lpfc_nodelist * ndlp)
+lpfc_els_rcv_farpr(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
+ struct lpfc_nodelist *ndlp)
{
struct lpfc_dmabuf *pcmd;
uint32_t *lp;
IOCB_t *icmd;
uint32_t cmd, did;
+ struct lpfc_hba *phba = vport->phba;
icmd = &cmdiocb->iocb;
did = icmd->un.elsreq64.remoteID;
@@ -3082,21 +3480,18 @@ lpfc_els_rcv_farpr(struct lpfc_hba * phba,
cmd = *lp++;
/* FARP-RSP received from DID <did> */
- lpfc_printf_log(phba,
- KERN_INFO,
- LOG_ELS,
- "%d:0600 FARP-RSP received from DID x%x\n",
- phba->brd_no, did);
-
+ lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
+ "%d (%d):0600 FARP-RSP received from DID x%x\n",
+ phba->brd_no, vport->vpi, did);
/* ACCEPT the Farp resp request */
- lpfc_els_rsp_acc(phba, ELS_CMD_ACC, cmdiocb, ndlp, NULL, 0);
+ lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL, 0);
return 0;
}
static int
-lpfc_els_rcv_fan(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb,
- struct lpfc_nodelist * fan_ndlp)
+lpfc_els_rcv_fan(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
+ struct lpfc_nodelist *fan_ndlp)
{
struct lpfc_dmabuf *pcmd;
uint32_t *lp;
@@ -3104,10 +3499,12 @@ lpfc_els_rcv_fan(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb,
uint32_t cmd, did;
FAN *fp;
struct lpfc_nodelist *ndlp, *next_ndlp;
+ struct lpfc_hba *phba = vport->phba;
/* FAN received */
- lpfc_printf_log(phba, KERN_INFO, LOG_ELS, "%d:0265 FAN received\n",
- phba->brd_no);
+ lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
+ "%d (%d):0265 FAN received\n",
+ phba->brd_no, vport->vpi);
icmd = &cmdiocb->iocb;
did = icmd->un.elsreq64.remoteID;
@@ -3115,11 +3512,11 @@ lpfc_els_rcv_fan(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb,
lp = (uint32_t *)pcmd->virt;
cmd = *lp++;
- fp = (FAN *)lp;
+ fp = (FAN *) lp;
/* FAN received; Fan does not have a reply sequence */
- if (phba->hba_state == LPFC_LOCAL_CFG_LINK) {
+ if (phba->pport->port_state == LPFC_LOCAL_CFG_LINK) {
if ((memcmp(&phba->fc_fabparam.nodeName, &fp->FnodeName,
sizeof(struct lpfc_name)) != 0) ||
(memcmp(&phba->fc_fabparam.portName, &fp->FportName,
@@ -3130,7 +3527,7 @@ lpfc_els_rcv_fan(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb,
*/
list_for_each_entry_safe(ndlp, next_ndlp,
- &phba->fc_nodes, nlp_listp) {
+ &vport->fc_nodes, nlp_listp) {
if (ndlp->nlp_state != NLP_STE_NPR_NODE)
continue;
if (ndlp->nlp_type & NLP_FABRIC) {
@@ -3138,24 +3535,24 @@ lpfc_els_rcv_fan(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb,
* Clean up old Fabric, Nameserver and
* other NLP_FABRIC logins
*/
- lpfc_drop_node(phba, ndlp);
+ lpfc_drop_node(vport, ndlp);
} else if (!(ndlp->nlp_flag & NLP_NPR_ADISC)) {
/* Fail outstanding I/O now since this
* device is marked for PLOGI
*/
- lpfc_unreg_rpi(phba, ndlp);
+ lpfc_unreg_rpi(vport, ndlp);
}
}
- phba->hba_state = LPFC_FLOGI;
- lpfc_set_disctmo(phba);
- lpfc_initial_flogi(phba);
+ vport->port_state = LPFC_FLOGI;
+ lpfc_set_disctmo(vport);
+ lpfc_initial_flogi(vport);
return 0;
}
/* Discovery not needed,
* move the nodes to their original state.
*/
- list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_nodes,
+ list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes,
nlp_listp) {
if (ndlp->nlp_state != NLP_STE_NPR_NODE)
continue;
@@ -3163,13 +3560,13 @@ lpfc_els_rcv_fan(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb,
switch (ndlp->nlp_prev_state) {
case NLP_STE_UNMAPPED_NODE:
ndlp->nlp_prev_state = NLP_STE_NPR_NODE;
- lpfc_nlp_set_state(phba, ndlp,
+ lpfc_nlp_set_state(vport, ndlp,
NLP_STE_UNMAPPED_NODE);
break;
case NLP_STE_MAPPED_NODE:
ndlp->nlp_prev_state = NLP_STE_NPR_NODE;
- lpfc_nlp_set_state(phba, ndlp,
+ lpfc_nlp_set_state(vport, ndlp,
NLP_STE_MAPPED_NODE);
break;
@@ -3179,7 +3576,7 @@ lpfc_els_rcv_fan(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb,
}
/* Start discovery - this should just do CLEAR_LA */
- lpfc_disc_start(phba);
+ lpfc_disc_start(vport);
}
return 0;
}
@@ -3187,42 +3584,42 @@ lpfc_els_rcv_fan(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb,
void
lpfc_els_timeout(unsigned long ptr)
{
- struct lpfc_hba *phba;
+ struct lpfc_vport *vport = (struct lpfc_vport *) ptr;
+ struct lpfc_hba *phba = vport->phba;
unsigned long iflag;
- phba = (struct lpfc_hba *)ptr;
- if (phba == 0)
- return;
- spin_lock_irqsave(phba->host->host_lock, iflag);
- if (!(phba->work_hba_events & WORKER_ELS_TMO)) {
- phba->work_hba_events |= WORKER_ELS_TMO;
+ spin_lock_irqsave(&vport->work_port_lock, iflag);
+ if ((vport->work_port_events & WORKER_ELS_TMO) == 0) {
+ vport->work_port_events |= WORKER_ELS_TMO;
+ spin_unlock_irqrestore(&vport->work_port_lock, iflag);
+
+ spin_lock_irqsave(&phba->hbalock, iflag);
if (phba->work_wait)
- wake_up(phba->work_wait);
+ lpfc_worker_wake_up(phba);
+ spin_unlock_irqrestore(&phba->hbalock, iflag);
}
- spin_unlock_irqrestore(phba->host->host_lock, iflag);
+ else
+ spin_unlock_irqrestore(&vport->work_port_lock, iflag);
return;
}
void
-lpfc_els_timeout_handler(struct lpfc_hba *phba)
+lpfc_els_timeout_handler(struct lpfc_vport *vport)
{
+ struct lpfc_hba *phba = vport->phba;
struct lpfc_sli_ring *pring;
struct lpfc_iocbq *tmp_iocb, *piocb;
IOCB_t *cmd = NULL;
struct lpfc_dmabuf *pcmd;
- uint32_t *elscmd;
- uint32_t els_command=0;
+ uint32_t els_command = 0;
uint32_t timeout;
- uint32_t remote_ID;
+ uint32_t remote_ID = 0xffffffff;
- if (phba == 0)
- return;
- spin_lock_irq(phba->host->host_lock);
/* If the timer is already canceled do nothing */
- if (!(phba->work_hba_events & WORKER_ELS_TMO)) {
- spin_unlock_irq(phba->host->host_lock);
+ if ((vport->work_port_events & WORKER_ELS_TMO) == 0) {
return;
}
+ spin_lock_irq(&phba->hbalock);
timeout = (uint32_t)(phba->fc_ratov << 1);
pring = &phba->sli.ring[LPFC_ELS_RING];
@@ -3230,63 +3627,70 @@ lpfc_els_timeout_handler(struct lpfc_hba *phba)
list_for_each_entry_safe(piocb, tmp_iocb, &pring->txcmplq, list) {
cmd = &piocb->iocb;
- if ((piocb->iocb_flag & LPFC_IO_LIBDFC) ||
- (piocb->iocb.ulpCommand == CMD_ABORT_XRI_CN) ||
- (piocb->iocb.ulpCommand == CMD_CLOSE_XRI_CN)) {
+ if ((piocb->iocb_flag & LPFC_IO_LIBDFC) != 0 ||
+ piocb->iocb.ulpCommand == CMD_ABORT_XRI_CN ||
+ piocb->iocb.ulpCommand == CMD_CLOSE_XRI_CN)
continue;
- }
+
+ if (piocb->vport != vport)
+ continue;
+
pcmd = (struct lpfc_dmabuf *) piocb->context2;
- if (pcmd) {
- elscmd = (uint32_t *) (pcmd->virt);
- els_command = *elscmd;
- }
+ if (pcmd)
+ els_command = *(uint32_t *) (pcmd->virt);
- if ((els_command == ELS_CMD_FARP)
- || (els_command == ELS_CMD_FARPR)) {
+ if (els_command == ELS_CMD_FARP ||
+ els_command == ELS_CMD_FARPR ||
+ els_command == ELS_CMD_FDISC)
+ continue;
+
+ if (vport != piocb->vport)
continue;
- }
if (piocb->drvrTimeout > 0) {
- if (piocb->drvrTimeout >= timeout) {
+ if (piocb->drvrTimeout >= timeout)
piocb->drvrTimeout -= timeout;
- } else {
+ else
piocb->drvrTimeout = 0;
- }
continue;
}
- if (cmd->ulpCommand == CMD_GEN_REQUEST64_CR) {
- struct lpfc_nodelist *ndlp;
- ndlp = __lpfc_findnode_rpi(phba, cmd->ulpContext);
- remote_ID = ndlp->nlp_DID;
- } else {
+ remote_ID = 0xffffffff;
+ if (cmd->ulpCommand != CMD_GEN_REQUEST64_CR)
remote_ID = cmd->un.elsreq64.remoteID;
+ else {
+ struct lpfc_nodelist *ndlp;
+ ndlp = __lpfc_findnode_rpi(vport, cmd->ulpContext);
+ if (ndlp)
+ remote_ID = ndlp->nlp_DID;
}
- lpfc_printf_log(phba,
- KERN_ERR,
- LOG_ELS,
- "%d:0127 ELS timeout Data: x%x x%x x%x x%x\n",
- phba->brd_no, els_command,
+ lpfc_printf_log(phba, KERN_ERR, LOG_ELS,
+ "%d (%d):0127 ELS timeout Data: x%x x%x x%x "
+ "x%x\n",
+ phba->brd_no, vport->vpi, els_command,
remote_ID, cmd->ulpCommand, cmd->ulpIoTag);
lpfc_sli_issue_abort_iotag(phba, pring, piocb);
}
- if (phba->sli.ring[LPFC_ELS_RING].txcmplq_cnt)
- mod_timer(&phba->els_tmofunc, jiffies + HZ * timeout);
+ spin_unlock_irq(&phba->hbalock);
- spin_unlock_irq(phba->host->host_lock);
+ if (phba->sli.ring[LPFC_ELS_RING].txcmplq_cnt)
+ mod_timer(&vport->els_tmofunc, jiffies + HZ * timeout);
}
void
-lpfc_els_flush_cmd(struct lpfc_hba *phba)
+lpfc_els_flush_cmd(struct lpfc_vport *vport)
{
LIST_HEAD(completions);
+ struct lpfc_hba *phba = vport->phba;
struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
struct lpfc_iocbq *tmp_iocb, *piocb;
IOCB_t *cmd = NULL;
- spin_lock_irq(phba->host->host_lock);
+ lpfc_fabric_abort_vport(vport);
+
+ spin_lock_irq(&phba->hbalock);
list_for_each_entry_safe(piocb, tmp_iocb, &pring->txq, list) {
cmd = &piocb->iocb;
@@ -3301,271 +3705,1042 @@ lpfc_els_flush_cmd(struct lpfc_hba *phba)
cmd->ulpCommand == CMD_ABORT_XRI_CN)
continue;
+ if (piocb->vport != vport)
+ continue;
+
list_move_tail(&piocb->list, &completions);
pring->txq_cnt--;
-
}
list_for_each_entry_safe(piocb, tmp_iocb, &pring->txcmplq, list) {
- cmd = &piocb->iocb;
-
if (piocb->iocb_flag & LPFC_IO_LIBDFC) {
continue;
}
+ if (piocb->vport != vport)
+ continue;
+
lpfc_sli_issue_abort_iotag(phba, pring, piocb);
}
- spin_unlock_irq(phba->host->host_lock);
+ spin_unlock_irq(&phba->hbalock);
- while(!list_empty(&completions)) {
+ while (!list_empty(&completions)) {
piocb = list_get_first(&completions, struct lpfc_iocbq, list);
cmd = &piocb->iocb;
- list_del(&piocb->list);
+ list_del_init(&piocb->list);
- if (piocb->iocb_cmpl) {
+ if (!piocb->iocb_cmpl)
+ lpfc_sli_release_iocbq(phba, piocb);
+ else {
cmd->ulpStatus = IOSTAT_LOCAL_REJECT;
cmd->un.ulpWord[4] = IOERR_SLI_ABORTED;
(piocb->iocb_cmpl) (phba, piocb, piocb);
- } else
- lpfc_sli_release_iocbq(phba, piocb);
+ }
}
return;
}
-void
-lpfc_els_unsol_event(struct lpfc_hba * phba,
- struct lpfc_sli_ring * pring, struct lpfc_iocbq * elsiocb)
+static void
+lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
+ struct lpfc_vport *vport, struct lpfc_iocbq *elsiocb)
{
- struct lpfc_sli *psli;
struct lpfc_nodelist *ndlp;
- struct lpfc_dmabuf *mp;
- uint32_t *lp;
- IOCB_t *icmd;
struct ls_rjt stat;
- uint32_t cmd;
- uint32_t did;
- uint32_t newnode;
- uint32_t drop_cmd = 0; /* by default do NOT drop received cmd */
- uint32_t rjt_err = 0;
-
- psli = &phba->sli;
- icmd = &elsiocb->iocb;
-
- if ((icmd->ulpStatus == IOSTAT_LOCAL_REJECT) &&
- ((icmd->un.ulpWord[4] & 0xff) == IOERR_RCV_BUFFER_WAITING)) {
- /* Not enough posted buffers; Try posting more buffers */
- phba->fc_stat.NoRcvBuf++;
- lpfc_post_buffer(phba, pring, 0, 1);
- return;
- }
-
- /* If there are no BDEs associated with this IOCB,
- * there is nothing to do.
- */
- if (icmd->ulpBdeCount == 0)
- return;
+ uint32_t *payload;
+ uint32_t cmd, did, newnode, rjt_err = 0;
+ IOCB_t *icmd = &elsiocb->iocb;
- /* type of ELS cmd is first 32bit word in packet */
- mp = lpfc_sli_ringpostbuf_get(phba, pring, getPaddr(icmd->un.
- cont64[0].
- addrHigh,
- icmd->un.
- cont64[0].addrLow));
- if (mp == 0) {
- drop_cmd = 1;
+ if (vport == NULL || elsiocb->context2 == NULL)
goto dropit;
- }
newnode = 0;
- lp = (uint32_t *) mp->virt;
- cmd = *lp++;
- lpfc_post_buffer(phba, &psli->ring[LPFC_ELS_RING], 1, 1);
+ payload = ((struct lpfc_dmabuf *)elsiocb->context2)->virt;
+ cmd = *payload;
+ if ((phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) == 0)
+ lpfc_post_buffer(phba, pring, 1, 1);
+ did = icmd->un.rcvels.remoteID;
if (icmd->ulpStatus) {
- lpfc_mbuf_free(phba, mp->virt, mp->phys);
- kfree(mp);
- drop_cmd = 1;
+ lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
+ "RCV Unsol ELS: status:x%x/x%x did:x%x",
+ icmd->ulpStatus, icmd->un.ulpWord[4], did);
goto dropit;
}
/* Check to see if link went down during discovery */
- if (lpfc_els_chk_latt(phba)) {
- lpfc_mbuf_free(phba, mp->virt, mp->phys);
- kfree(mp);
- drop_cmd = 1;
+ if (lpfc_els_chk_latt(vport))
goto dropit;
- }
- did = icmd->un.rcvels.remoteID;
- ndlp = lpfc_findnode_did(phba, did);
+ /* Ignore traffic recevied during vport shutdown. */
+ if (vport->load_flag & FC_UNLOADING)
+ goto dropit;
+
+ ndlp = lpfc_findnode_did(vport, did);
if (!ndlp) {
/* Cannot find existing Fabric ndlp, so allocate a new one */
ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL);
- if (!ndlp) {
- lpfc_mbuf_free(phba, mp->virt, mp->phys);
- kfree(mp);
- drop_cmd = 1;
+ if (!ndlp)
goto dropit;
- }
- lpfc_nlp_init(phba, ndlp, did);
+ lpfc_nlp_init(vport, ndlp, did);
newnode = 1;
if ((did & Fabric_DID_MASK) == Fabric_DID_MASK) {
ndlp->nlp_type |= NLP_FABRIC;
}
- lpfc_nlp_set_state(phba, ndlp, NLP_STE_UNUSED_NODE);
+ lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNUSED_NODE);
}
phba->fc_stat.elsRcvFrame++;
if (elsiocb->context1)
lpfc_nlp_put(elsiocb->context1);
elsiocb->context1 = lpfc_nlp_get(ndlp);
- elsiocb->context2 = mp;
+ elsiocb->vport = vport;
if ((cmd & ELS_CMD_MASK) == ELS_CMD_RSCN) {
cmd &= ELS_CMD_MASK;
}
/* ELS command <elsCmd> received from NPORT <did> */
lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
- "%d:0112 ELS command x%x received from NPORT x%x "
- "Data: x%x\n", phba->brd_no, cmd, did, phba->hba_state);
+ "%d (%d):0112 ELS command x%x received from NPORT x%x "
+ "Data: x%x\n", phba->brd_no, vport->vpi, cmd, did,
+ vport->port_state);
switch (cmd) {
case ELS_CMD_PLOGI:
+ lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
+ "RCV PLOGI: did:x%x/ste:x%x flg:x%x",
+ did, vport->port_state, ndlp->nlp_flag);
+
phba->fc_stat.elsRcvPLOGI++;
- if (phba->hba_state < LPFC_DISC_AUTH) {
- rjt_err = 1;
+ ndlp = lpfc_plogi_confirm_nport(phba, payload, ndlp);
+
+ if (vport->port_state < LPFC_DISC_AUTH) {
+ rjt_err = LSRJT_UNABLE_TPC;
break;
}
- ndlp = lpfc_plogi_confirm_nport(phba, mp, ndlp);
- lpfc_disc_state_machine(phba, ndlp, elsiocb, NLP_EVT_RCV_PLOGI);
+ lpfc_disc_state_machine(vport, ndlp, elsiocb,
+ NLP_EVT_RCV_PLOGI);
+
break;
case ELS_CMD_FLOGI:
+ lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
+ "RCV FLOGI: did:x%x/ste:x%x flg:x%x",
+ did, vport->port_state, ndlp->nlp_flag);
+
phba->fc_stat.elsRcvFLOGI++;
- lpfc_els_rcv_flogi(phba, elsiocb, ndlp, newnode);
+ lpfc_els_rcv_flogi(vport, elsiocb, ndlp, newnode);
if (newnode)
- lpfc_drop_node(phba, ndlp);
+ lpfc_drop_node(vport, ndlp);
break;
case ELS_CMD_LOGO:
+ lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
+ "RCV LOGO: did:x%x/ste:x%x flg:x%x",
+ did, vport->port_state, ndlp->nlp_flag);
+
phba->fc_stat.elsRcvLOGO++;
- if (phba->hba_state < LPFC_DISC_AUTH) {
- rjt_err = 1;
+ if (vport->port_state < LPFC_DISC_AUTH) {
+ rjt_err = LSRJT_UNABLE_TPC;
break;
}
- lpfc_disc_state_machine(phba, ndlp, elsiocb, NLP_EVT_RCV_LOGO);
+ lpfc_disc_state_machine(vport, ndlp, elsiocb, NLP_EVT_RCV_LOGO);
break;
case ELS_CMD_PRLO:
+ lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
+ "RCV PRLO: did:x%x/ste:x%x flg:x%x",
+ did, vport->port_state, ndlp->nlp_flag);
+
phba->fc_stat.elsRcvPRLO++;
- if (phba->hba_state < LPFC_DISC_AUTH) {
- rjt_err = 1;
+ if (vport->port_state < LPFC_DISC_AUTH) {
+ rjt_err = LSRJT_UNABLE_TPC;
break;
}
- lpfc_disc_state_machine(phba, ndlp, elsiocb, NLP_EVT_RCV_PRLO);
+ lpfc_disc_state_machine(vport, ndlp, elsiocb, NLP_EVT_RCV_PRLO);
break;
case ELS_CMD_RSCN:
phba->fc_stat.elsRcvRSCN++;
- lpfc_els_rcv_rscn(phba, elsiocb, ndlp, newnode);
+ lpfc_els_rcv_rscn(vport, elsiocb, ndlp, newnode);
if (newnode)
- lpfc_drop_node(phba, ndlp);
+ lpfc_drop_node(vport, ndlp);
break;
case ELS_CMD_ADISC:
+ lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
+ "RCV ADISC: did:x%x/ste:x%x flg:x%x",
+ did, vport->port_state, ndlp->nlp_flag);
+
phba->fc_stat.elsRcvADISC++;
- if (phba->hba_state < LPFC_DISC_AUTH) {
- rjt_err = 1;
+ if (vport->port_state < LPFC_DISC_AUTH) {
+ rjt_err = LSRJT_UNABLE_TPC;
break;
}
- lpfc_disc_state_machine(phba, ndlp, elsiocb, NLP_EVT_RCV_ADISC);
+ lpfc_disc_state_machine(vport, ndlp, elsiocb,
+ NLP_EVT_RCV_ADISC);
break;
case ELS_CMD_PDISC:
+ lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
+ "RCV PDISC: did:x%x/ste:x%x flg:x%x",
+ did, vport->port_state, ndlp->nlp_flag);
+
phba->fc_stat.elsRcvPDISC++;
- if (phba->hba_state < LPFC_DISC_AUTH) {
- rjt_err = 1;
+ if (vport->port_state < LPFC_DISC_AUTH) {
+ rjt_err = LSRJT_UNABLE_TPC;
break;
}
- lpfc_disc_state_machine(phba, ndlp, elsiocb, NLP_EVT_RCV_PDISC);
+ lpfc_disc_state_machine(vport, ndlp, elsiocb,
+ NLP_EVT_RCV_PDISC);
break;
case ELS_CMD_FARPR:
+ lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
+ "RCV FARPR: did:x%x/ste:x%x flg:x%x",
+ did, vport->port_state, ndlp->nlp_flag);
+
phba->fc_stat.elsRcvFARPR++;
- lpfc_els_rcv_farpr(phba, elsiocb, ndlp);
+ lpfc_els_rcv_farpr(vport, elsiocb, ndlp);
break;
case ELS_CMD_FARP:
+ lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
+ "RCV FARP: did:x%x/ste:x%x flg:x%x",
+ did, vport->port_state, ndlp->nlp_flag);
+
phba->fc_stat.elsRcvFARP++;
- lpfc_els_rcv_farp(phba, elsiocb, ndlp);
+ lpfc_els_rcv_farp(vport, elsiocb, ndlp);
break;
case ELS_CMD_FAN:
+ lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
+ "RCV FAN: did:x%x/ste:x%x flg:x%x",
+ did, vport->port_state, ndlp->nlp_flag);
+
phba->fc_stat.elsRcvFAN++;
- lpfc_els_rcv_fan(phba, elsiocb, ndlp);
+ lpfc_els_rcv_fan(vport, elsiocb, ndlp);
break;
case ELS_CMD_PRLI:
+ lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
+ "RCV PRLI: did:x%x/ste:x%x flg:x%x",
+ did, vport->port_state, ndlp->nlp_flag);
+
phba->fc_stat.elsRcvPRLI++;
- if (phba->hba_state < LPFC_DISC_AUTH) {
- rjt_err = 1;
+ if (vport->port_state < LPFC_DISC_AUTH) {
+ rjt_err = LSRJT_UNABLE_TPC;
break;
}
- lpfc_disc_state_machine(phba, ndlp, elsiocb, NLP_EVT_RCV_PRLI);
+ lpfc_disc_state_machine(vport, ndlp, elsiocb, NLP_EVT_RCV_PRLI);
break;
case ELS_CMD_LIRR:
+ lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
+ "RCV LIRR: did:x%x/ste:x%x flg:x%x",
+ did, vport->port_state, ndlp->nlp_flag);
+
phba->fc_stat.elsRcvLIRR++;
- lpfc_els_rcv_lirr(phba, elsiocb, ndlp);
+ lpfc_els_rcv_lirr(vport, elsiocb, ndlp);
if (newnode)
- lpfc_drop_node(phba, ndlp);
+ lpfc_drop_node(vport, ndlp);
break;
case ELS_CMD_RPS:
+ lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
+ "RCV RPS: did:x%x/ste:x%x flg:x%x",
+ did, vport->port_state, ndlp->nlp_flag);
+
phba->fc_stat.elsRcvRPS++;
- lpfc_els_rcv_rps(phba, elsiocb, ndlp);
+ lpfc_els_rcv_rps(vport, elsiocb, ndlp);
if (newnode)
- lpfc_drop_node(phba, ndlp);
+ lpfc_drop_node(vport, ndlp);
break;
case ELS_CMD_RPL:
+ lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
+ "RCV RPL: did:x%x/ste:x%x flg:x%x",
+ did, vport->port_state, ndlp->nlp_flag);
+
phba->fc_stat.elsRcvRPL++;
- lpfc_els_rcv_rpl(phba, elsiocb, ndlp);
+ lpfc_els_rcv_rpl(vport, elsiocb, ndlp);
if (newnode)
- lpfc_drop_node(phba, ndlp);
+ lpfc_drop_node(vport, ndlp);
break;
case ELS_CMD_RNID:
+ lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
+ "RCV RNID: did:x%x/ste:x%x flg:x%x",
+ did, vport->port_state, ndlp->nlp_flag);
+
phba->fc_stat.elsRcvRNID++;
- lpfc_els_rcv_rnid(phba, elsiocb, ndlp);
+ lpfc_els_rcv_rnid(vport, elsiocb, ndlp);
if (newnode)
- lpfc_drop_node(phba, ndlp);
+ lpfc_drop_node(vport, ndlp);
break;
default:
+ lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
+ "RCV ELS cmd: cmd:x%x did:x%x/ste:x%x",
+ cmd, did, vport->port_state);
+
/* Unsupported ELS command, reject */
- rjt_err = 1;
+ rjt_err = LSRJT_INVALID_CMD;
/* Unknown ELS command <elsCmd> received from NPORT <did> */
lpfc_printf_log(phba, KERN_ERR, LOG_ELS,
- "%d:0115 Unknown ELS command x%x received from "
- "NPORT x%x\n", phba->brd_no, cmd, did);
+ "%d (%d):0115 Unknown ELS command x%x "
+ "received from NPORT x%x\n",
+ phba->brd_no, vport->vpi, cmd, did);
if (newnode)
- lpfc_drop_node(phba, ndlp);
+ lpfc_drop_node(vport, ndlp);
break;
}
/* check if need to LS_RJT received ELS cmd */
if (rjt_err) {
- stat.un.b.lsRjtRsvd0 = 0;
- stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
+ memset(&stat, 0, sizeof(stat));
+ stat.un.b.lsRjtRsnCode = rjt_err;
stat.un.b.lsRjtRsnCodeExp = LSEXP_NOTHING_MORE;
- stat.un.b.vendorUnique = 0;
- lpfc_els_rsp_reject(phba, stat.un.lsRjtError, elsiocb, ndlp);
+ lpfc_els_rsp_reject(vport, stat.un.lsRjtError, elsiocb, ndlp,
+ NULL);
+ if (newnode)
+ lpfc_drop_node(vport, ndlp);
+ }
+
+ return;
+
+dropit:
+ lpfc_printf_log(phba, KERN_ERR, LOG_ELS,
+ "%d (%d):0111 Dropping received ELS cmd "
+ "Data: x%x x%x x%x\n",
+ phba->brd_no, vport ? vport->vpi : 0xffff,
+ icmd->ulpStatus, icmd->un.ulpWord[4],
+ icmd->ulpTimeout);
+ phba->fc_stat.elsRcvDrop++;
+}
+
+static struct lpfc_vport *
+lpfc_find_vport_by_vpid(struct lpfc_hba *phba, uint16_t vpi)
+{
+ struct lpfc_vport *vport;
+
+ list_for_each_entry(vport, &phba->port_list, listentry) {
+ if (vport->vpi == vpi)
+ return vport;
+ }
+ return NULL;
+}
+
+void
+lpfc_els_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
+ struct lpfc_iocbq *elsiocb)
+{
+ struct lpfc_vport *vport = phba->pport;
+ IOCB_t *icmd = &elsiocb->iocb;
+ dma_addr_t paddr;
+ struct lpfc_dmabuf *bdeBuf1 = elsiocb->context2;
+ struct lpfc_dmabuf *bdeBuf2 = elsiocb->context3;
+
+ elsiocb->context2 = NULL;
+ elsiocb->context3 = NULL;
+
+ if (icmd->ulpStatus == IOSTAT_NEED_BUFFER) {
+ lpfc_sli_hbqbuf_add_hbqs(phba, LPFC_ELS_HBQ);
+ } else if (icmd->ulpStatus == IOSTAT_LOCAL_REJECT &&
+ (icmd->un.ulpWord[4] & 0xff) == IOERR_RCV_BUFFER_WAITING) {
+ phba->fc_stat.NoRcvBuf++;
+ /* Not enough posted buffers; Try posting more buffers */
+ if (!(phba->sli3_options & LPFC_SLI3_HBQ_ENABLED))
+ lpfc_post_buffer(phba, pring, 0, 1);
+ return;
+ }
+
+ if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) &&
+ (icmd->ulpCommand == CMD_IOCB_RCV_ELS64_CX ||
+ icmd->ulpCommand == CMD_IOCB_RCV_SEQ64_CX)) {
+ if (icmd->unsli3.rcvsli3.vpi == 0xffff)
+ vport = phba->pport;
+ else {
+ uint16_t vpi = icmd->unsli3.rcvsli3.vpi;
+ vport = lpfc_find_vport_by_vpid(phba, vpi);
+ }
+ }
+ /* If there are no BDEs associated
+ * with this IOCB, there is nothing to do.
+ */
+ if (icmd->ulpBdeCount == 0)
+ return;
+
+ /* type of ELS cmd is first 32bit word
+ * in packet
+ */
+ if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
+ elsiocb->context2 = bdeBuf1;
+ } else {
+ paddr = getPaddr(icmd->un.cont64[0].addrHigh,
+ icmd->un.cont64[0].addrLow);
+ elsiocb->context2 = lpfc_sli_ringpostbuf_get(phba, pring,
+ paddr);
}
+ lpfc_els_unsol_buffer(phba, pring, vport, elsiocb);
+ /*
+ * The different unsolicited event handlers would tell us
+ * if they are done with "mp" by setting context2 to NULL.
+ */
lpfc_nlp_put(elsiocb->context1);
elsiocb->context1 = NULL;
if (elsiocb->context2) {
- lpfc_mbuf_free(phba, mp->virt, mp->phys);
- kfree(mp);
+ lpfc_in_buf_free(phba, (struct lpfc_dmabuf *)elsiocb->context2);
+ elsiocb->context2 = NULL;
}
-dropit:
- /* check if need to drop received ELS cmd */
- if (drop_cmd == 1) {
+
+ /* RCV_ELS64_CX provide for 2 BDEs - process 2nd if included */
+ if ((phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) &&
+ icmd->ulpBdeCount == 2) {
+ elsiocb->context2 = bdeBuf2;
+ lpfc_els_unsol_buffer(phba, pring, vport, elsiocb);
+ /* free mp if we are done with it */
+ if (elsiocb->context2) {
+ lpfc_in_buf_free(phba, elsiocb->context2);
+ elsiocb->context2 = NULL;
+ }
+ }
+}
+
+void
+lpfc_do_scr_ns_plogi(struct lpfc_hba *phba, struct lpfc_vport *vport)
+{
+ struct lpfc_nodelist *ndlp, *ndlp_fdmi;
+
+ ndlp = lpfc_findnode_did(vport, NameServer_DID);
+ if (!ndlp) {
+ ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL);
+ if (!ndlp) {
+ if (phba->fc_topology == TOPOLOGY_LOOP) {
+ lpfc_disc_start(vport);
+ return;
+ }
+ lpfc_vport_set_state(vport, FC_VPORT_FAILED);
+ lpfc_printf_log(phba, KERN_ERR, LOG_ELS,
+ "%d (%d):0251 NameServer login: no memory\n",
+ phba->brd_no, vport->vpi);
+ return;
+ }
+ lpfc_nlp_init(vport, ndlp, NameServer_DID);
+ ndlp->nlp_type |= NLP_FABRIC;
+ }
+
+ lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE);
+
+ if (lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0)) {
+ lpfc_vport_set_state(vport, FC_VPORT_FAILED);
lpfc_printf_log(phba, KERN_ERR, LOG_ELS,
- "%d:0111 Dropping received ELS cmd "
- "Data: x%x x%x x%x\n", phba->brd_no,
- icmd->ulpStatus, icmd->un.ulpWord[4],
- icmd->ulpTimeout);
- phba->fc_stat.elsRcvDrop++;
+ "%d (%d):0252 Cannot issue NameServer login\n",
+ phba->brd_no, vport->vpi);
+ return;
+ }
+
+ if (phba->cfg_fdmi_on) {
+ ndlp_fdmi = mempool_alloc(phba->nlp_mem_pool,
+ GFP_KERNEL);
+ if (ndlp_fdmi) {
+ lpfc_nlp_init(vport, ndlp_fdmi, FDMI_DID);
+ ndlp_fdmi->nlp_type |= NLP_FABRIC;
+ ndlp_fdmi->nlp_state =
+ NLP_STE_PLOGI_ISSUE;
+ lpfc_issue_els_plogi(vport, ndlp_fdmi->nlp_DID,
+ 0);
+ }
+ }
+ return;
+}
+
+static void
+lpfc_cmpl_reg_new_vport(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
+{
+ struct lpfc_vport *vport = pmb->vport;
+ struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+ struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) pmb->context2;
+ MAILBOX_t *mb = &pmb->mb;
+
+ vport->fc_flag &= ~FC_VPORT_NEEDS_REG_VPI;
+ lpfc_nlp_put(ndlp);
+
+ if (mb->mbxStatus) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
+ "%d (%d):0915 Register VPI failed: 0x%x\n",
+ phba->brd_no, vport->vpi, mb->mbxStatus);
+
+ switch (mb->mbxStatus) {
+ case 0x11: /* unsupported feature */
+ case 0x9603: /* max_vpi exceeded */
+ /* giving up on vport registration */
+ lpfc_vport_set_state(vport, FC_VPORT_FAILED);
+ spin_lock_irq(shost->host_lock);
+ vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP);
+ spin_unlock_irq(shost->host_lock);
+ lpfc_can_disctmo(vport);
+ break;
+ default:
+ /* Try to recover from this error */
+ lpfc_mbx_unreg_vpi(vport);
+ vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
+ lpfc_initial_fdisc(vport);
+ break;
+ }
+
+ } else {
+ if (vport == phba->pport)
+ lpfc_issue_fabric_reglogin(vport);
+ else
+ lpfc_do_scr_ns_plogi(phba, vport);
}
+ mempool_free(pmb, phba->mbox_mem_pool);
return;
}
+
+void
+lpfc_register_new_vport(struct lpfc_hba *phba, struct lpfc_vport *vport,
+ struct lpfc_nodelist *ndlp)
+{
+ LPFC_MBOXQ_t *mbox;
+
+ mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+ if (mbox) {
+ lpfc_reg_vpi(phba, vport->vpi, vport->fc_myDID, mbox);
+ mbox->vport = vport;
+ mbox->context2 = lpfc_nlp_get(ndlp);
+ mbox->mbox_cmpl = lpfc_cmpl_reg_new_vport;
+ if (lpfc_sli_issue_mbox(phba, mbox,
+ MBX_NOWAIT | MBX_STOP_IOCB)
+ == MBX_NOT_FINISHED) {
+ mempool_free(mbox, phba->mbox_mem_pool);
+ vport->fc_flag &= ~FC_VPORT_NEEDS_REG_VPI;
+
+ lpfc_vport_set_state(vport, FC_VPORT_FAILED);
+
+ lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
+ "%d (%d):0253 Register VPI: Cannot send mbox\n",
+ phba->brd_no, vport->vpi);
+ }
+ } else {
+ lpfc_vport_set_state(vport, FC_VPORT_FAILED);
+
+ lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
+ "%d (%d):0254 Register VPI: no memory\n",
+ phba->brd_no, vport->vpi);
+
+ vport->fc_flag &= ~FC_VPORT_NEEDS_REG_VPI;
+ lpfc_nlp_put(ndlp);
+ }
+}
+
+static void
+lpfc_cmpl_els_fdisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
+ struct lpfc_iocbq *rspiocb)
+{
+ struct lpfc_vport *vport = cmdiocb->vport;
+ struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+ struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) cmdiocb->context1;
+ struct lpfc_nodelist *np;
+ struct lpfc_nodelist *next_np;
+ IOCB_t *irsp = &rspiocb->iocb;
+ struct lpfc_iocbq *piocb;
+
+ lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
+ "%d (%d):0123 FDISC completes. x%x/x%x prevDID: x%x\n",
+ phba->brd_no, vport->vpi,
+ irsp->ulpStatus, irsp->un.ulpWord[4], vport->fc_prevDID);
+
+ /* Since all FDISCs are being single threaded, we
+ * must reset the discovery timer for ALL vports
+ * waiting to send FDISC when one completes.
+ */
+ list_for_each_entry(piocb, &phba->fabric_iocb_list, list) {
+ lpfc_set_disctmo(piocb->vport);
+ }
+
+ lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
+ "FDISC cmpl: status:x%x/x%x prevdid:x%x",
+ irsp->ulpStatus, irsp->un.ulpWord[4], vport->fc_prevDID);
+
+ if (irsp->ulpStatus) {
+ /* Check for retry */
+ if (lpfc_els_retry(phba, cmdiocb, rspiocb))
+ goto out;
+
+ /* FDISC failed */
+ lpfc_printf_log(phba, KERN_ERR, LOG_ELS,
+ "%d (%d):0124 FDISC failed. (%d/%d)\n",
+ phba->brd_no, vport->vpi,
+ irsp->ulpStatus, irsp->un.ulpWord[4]);
+
+ if (vport->fc_vport->vport_state == FC_VPORT_INITIALIZING)
+ lpfc_vport_set_state(vport, FC_VPORT_FAILED);
+
+ lpfc_nlp_put(ndlp);
+ /* giving up on FDISC. Cancel discovery timer */
+ lpfc_can_disctmo(vport);
+ } else {
+ spin_lock_irq(shost->host_lock);
+ vport->fc_flag |= FC_FABRIC;
+ if (vport->phba->fc_topology == TOPOLOGY_LOOP)
+ vport->fc_flag |= FC_PUBLIC_LOOP;
+ spin_unlock_irq(shost->host_lock);
+
+ vport->fc_myDID = irsp->un.ulpWord[4] & Mask_DID;
+ lpfc_vport_set_state(vport, FC_VPORT_ACTIVE);
+ if ((vport->fc_prevDID != vport->fc_myDID) &&
+ !(vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)) {
+ /* If our NportID changed, we need to ensure all
+ * remaining NPORTs get unreg_login'ed so we can
+ * issue unreg_vpi.
+ */
+ list_for_each_entry_safe(np, next_np,
+ &vport->fc_nodes, nlp_listp) {
+ if (np->nlp_state != NLP_STE_NPR_NODE
+ || !(np->nlp_flag & NLP_NPR_ADISC))
+ continue;
+ spin_lock_irq(shost->host_lock);
+ np->nlp_flag &= ~NLP_NPR_ADISC;
+ spin_unlock_irq(shost->host_lock);
+ lpfc_unreg_rpi(vport, np);
+ }
+ lpfc_mbx_unreg_vpi(vport);
+ vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
+ }
+
+ if (vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)
+ lpfc_register_new_vport(phba, vport, ndlp);
+ else
+ lpfc_do_scr_ns_plogi(phba, vport);
+
+ lpfc_nlp_put(ndlp); /* Free Fabric ndlp for vports */
+ }
+
+out:
+ lpfc_els_free_iocb(phba, cmdiocb);
+}
+
+int
+lpfc_issue_els_fdisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+ uint8_t retry)
+{
+ struct lpfc_hba *phba = vport->phba;
+ IOCB_t *icmd;
+ struct lpfc_iocbq *elsiocb;
+ struct serv_parm *sp;
+ uint8_t *pcmd;
+ uint16_t cmdsize;
+ int did = ndlp->nlp_DID;
+ int rc;
+
+ cmdsize = (sizeof(uint32_t) + sizeof(struct serv_parm));
+ elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, did,
+ ELS_CMD_FDISC);
+ if (!elsiocb) {
+ lpfc_vport_set_state(vport, FC_VPORT_FAILED);
+
+ lpfc_printf_log(phba, KERN_ERR, LOG_ELS,
+ "%d (%d):0255 Issue FDISC: no IOCB\n",
+ phba->brd_no, vport->vpi);
+ return 1;
+ }
+
+ icmd = &elsiocb->iocb;
+ icmd->un.elsreq64.myID = 0;
+ icmd->un.elsreq64.fl = 1;
+
+ /* For FDISC, Let FDISC rsp set the NPortID for this VPI */
+ icmd->ulpCt_h = 1;
+ icmd->ulpCt_l = 0;
+
+ pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
+ *((uint32_t *) (pcmd)) = ELS_CMD_FDISC;
+ pcmd += sizeof(uint32_t); /* CSP Word 1 */
+ memcpy(pcmd, &vport->phba->pport->fc_sparam, sizeof(struct serv_parm));
+ sp = (struct serv_parm *) pcmd;
+ /* Setup CSPs accordingly for Fabric */
+ sp->cmn.e_d_tov = 0;
+ sp->cmn.w2.r_a_tov = 0;
+ sp->cls1.classValid = 0;
+ sp->cls2.seqDelivery = 1;
+ sp->cls3.seqDelivery = 1;
+
+ pcmd += sizeof(uint32_t); /* CSP Word 2 */
+ pcmd += sizeof(uint32_t); /* CSP Word 3 */
+ pcmd += sizeof(uint32_t); /* CSP Word 4 */
+ pcmd += sizeof(uint32_t); /* Port Name */
+ memcpy(pcmd, &vport->fc_portname, 8);
+ pcmd += sizeof(uint32_t); /* Node Name */
+ pcmd += sizeof(uint32_t); /* Node Name */
+ memcpy(pcmd, &vport->fc_nodename, 8);
+
+ lpfc_set_disctmo(vport);
+
+ phba->fc_stat.elsXmitFDISC++;
+ elsiocb->iocb_cmpl = lpfc_cmpl_els_fdisc;
+
+ lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
+ "Issue FDISC: did:x%x",
+ did, 0, 0);
+
+ rc = lpfc_issue_fabric_iocb(phba, elsiocb);
+ if (rc == IOCB_ERROR) {
+ lpfc_els_free_iocb(phba, elsiocb);
+ lpfc_vport_set_state(vport, FC_VPORT_FAILED);
+
+ lpfc_printf_log(phba, KERN_ERR, LOG_ELS,
+ "%d (%d):0256 Issue FDISC: Cannot send IOCB\n",
+ phba->brd_no, vport->vpi);
+
+ return 1;
+ }
+ lpfc_vport_set_state(vport, FC_VPORT_INITIALIZING);
+ vport->port_state = LPFC_FDISC;
+ return 0;
+}
+
+static void
+lpfc_cmpl_els_npiv_logo(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
+ struct lpfc_iocbq *rspiocb)
+{
+ struct lpfc_vport *vport = cmdiocb->vport;
+ IOCB_t *irsp;
+
+ irsp = &rspiocb->iocb;
+ lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
+ "LOGO npiv cmpl: status:x%x/x%x did:x%x",
+ irsp->ulpStatus, irsp->un.ulpWord[4], irsp->un.rcvels.remoteID);
+
+ lpfc_els_free_iocb(phba, cmdiocb);
+ vport->unreg_vpi_cmpl = VPORT_ERROR;
+}
+
+int
+lpfc_issue_els_npiv_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
+{
+ struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+ struct lpfc_hba *phba = vport->phba;
+ struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
+ IOCB_t *icmd;
+ struct lpfc_iocbq *elsiocb;
+ uint8_t *pcmd;
+ uint16_t cmdsize;
+
+ cmdsize = 2 * sizeof(uint32_t) + sizeof(struct lpfc_name);
+ elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, 0, ndlp, ndlp->nlp_DID,
+ ELS_CMD_LOGO);
+ if (!elsiocb)
+ return 1;
+
+ icmd = &elsiocb->iocb;
+ pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
+ *((uint32_t *) (pcmd)) = ELS_CMD_LOGO;
+ pcmd += sizeof(uint32_t);
+
+ /* Fill in LOGO payload */
+ *((uint32_t *) (pcmd)) = be32_to_cpu(vport->fc_myDID);
+ pcmd += sizeof(uint32_t);
+ memcpy(pcmd, &vport->fc_portname, sizeof(struct lpfc_name));
+
+ lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
+ "Issue LOGO npiv did:x%x flg:x%x",
+ ndlp->nlp_DID, ndlp->nlp_flag, 0);
+
+ elsiocb->iocb_cmpl = lpfc_cmpl_els_npiv_logo;
+ spin_lock_irq(shost->host_lock);
+ ndlp->nlp_flag |= NLP_LOGO_SND;
+ spin_unlock_irq(shost->host_lock);
+ if (lpfc_sli_issue_iocb(phba, pring, elsiocb, 0) == IOCB_ERROR) {
+ spin_lock_irq(shost->host_lock);
+ ndlp->nlp_flag &= ~NLP_LOGO_SND;
+ spin_unlock_irq(shost->host_lock);
+ lpfc_els_free_iocb(phba, elsiocb);
+ return 1;
+ }
+ return 0;
+}
+
+void
+lpfc_fabric_block_timeout(unsigned long ptr)
+{
+ struct lpfc_hba *phba = (struct lpfc_hba *) ptr;
+ unsigned long iflags;
+ uint32_t tmo_posted;
+ spin_lock_irqsave(&phba->pport->work_port_lock, iflags);
+ tmo_posted = phba->pport->work_port_events & WORKER_FABRIC_BLOCK_TMO;
+ if (!tmo_posted)
+ phba->pport->work_port_events |= WORKER_FABRIC_BLOCK_TMO;
+ spin_unlock_irqrestore(&phba->pport->work_port_lock, iflags);
+
+ if (!tmo_posted) {
+ spin_lock_irqsave(&phba->hbalock, iflags);
+ if (phba->work_wait)
+ lpfc_worker_wake_up(phba);
+ spin_unlock_irqrestore(&phba->hbalock, iflags);
+ }
+}
+
+static void
+lpfc_resume_fabric_iocbs(struct lpfc_hba *phba)
+{
+ struct lpfc_iocbq *iocb;
+ unsigned long iflags;
+ int ret;
+ struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
+ IOCB_t *cmd;
+
+repeat:
+ iocb = NULL;
+ spin_lock_irqsave(&phba->hbalock, iflags);
+ /* Post any pending iocb to the SLI layer */
+ if (atomic_read(&phba->fabric_iocb_count) == 0) {
+ list_remove_head(&phba->fabric_iocb_list, iocb, typeof(*iocb),
+ list);
+ if (iocb)
+ atomic_inc(&phba->fabric_iocb_count);
+ }
+ spin_unlock_irqrestore(&phba->hbalock, iflags);
+ if (iocb) {
+ iocb->fabric_iocb_cmpl = iocb->iocb_cmpl;
+ iocb->iocb_cmpl = lpfc_cmpl_fabric_iocb;
+ iocb->iocb_flag |= LPFC_IO_FABRIC;
+
+ lpfc_debugfs_disc_trc(iocb->vport, LPFC_DISC_TRC_ELS_CMD,
+ "Fabric sched1: ste:x%x",
+ iocb->vport->port_state, 0, 0);
+
+ ret = lpfc_sli_issue_iocb(phba, pring, iocb, 0);
+
+ if (ret == IOCB_ERROR) {
+ iocb->iocb_cmpl = iocb->fabric_iocb_cmpl;
+ iocb->fabric_iocb_cmpl = NULL;
+ iocb->iocb_flag &= ~LPFC_IO_FABRIC;
+ cmd = &iocb->iocb;
+ cmd->ulpStatus = IOSTAT_LOCAL_REJECT;
+ cmd->un.ulpWord[4] = IOERR_SLI_ABORTED;
+ iocb->iocb_cmpl(phba, iocb, iocb);
+
+ atomic_dec(&phba->fabric_iocb_count);
+ goto repeat;
+ }
+ }
+
+ return;
+}
+
+void
+lpfc_unblock_fabric_iocbs(struct lpfc_hba *phba)
+{
+ clear_bit(FABRIC_COMANDS_BLOCKED, &phba->bit_flags);
+
+ lpfc_resume_fabric_iocbs(phba);
+ return;
+}
+
+static void
+lpfc_block_fabric_iocbs(struct lpfc_hba *phba)
+{
+ int blocked;
+
+ blocked = test_and_set_bit(FABRIC_COMANDS_BLOCKED, &phba->bit_flags);
+ /* Start a timer to unblock fabric
+ * iocbs after 100ms
+ */
+ if (!blocked)
+ mod_timer(&phba->fabric_block_timer, jiffies + HZ/10 );
+
+ return;
+}
+
+static void
+lpfc_cmpl_fabric_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
+ struct lpfc_iocbq *rspiocb)
+{
+ struct ls_rjt stat;
+
+ if ((cmdiocb->iocb_flag & LPFC_IO_FABRIC) != LPFC_IO_FABRIC)
+ BUG();
+
+ switch (rspiocb->iocb.ulpStatus) {
+ case IOSTAT_NPORT_RJT:
+ case IOSTAT_FABRIC_RJT:
+ if (rspiocb->iocb.un.ulpWord[4] & RJT_UNAVAIL_TEMP) {
+ lpfc_block_fabric_iocbs(phba);
+ }
+ break;
+
+ case IOSTAT_NPORT_BSY:
+ case IOSTAT_FABRIC_BSY:
+ lpfc_block_fabric_iocbs(phba);
+ break;
+
+ case IOSTAT_LS_RJT:
+ stat.un.lsRjtError =
+ be32_to_cpu(rspiocb->iocb.un.ulpWord[4]);
+ if ((stat.un.b.lsRjtRsnCode == LSRJT_UNABLE_TPC) ||
+ (stat.un.b.lsRjtRsnCode == LSRJT_LOGICAL_BSY))
+ lpfc_block_fabric_iocbs(phba);
+ break;
+ }
+
+ if (atomic_read(&phba->fabric_iocb_count) == 0)
+ BUG();
+
+ cmdiocb->iocb_cmpl = cmdiocb->fabric_iocb_cmpl;
+ cmdiocb->fabric_iocb_cmpl = NULL;
+ cmdiocb->iocb_flag &= ~LPFC_IO_FABRIC;
+ cmdiocb->iocb_cmpl(phba, cmdiocb, rspiocb);
+
+ atomic_dec(&phba->fabric_iocb_count);
+ if (!test_bit(FABRIC_COMANDS_BLOCKED, &phba->bit_flags)) {
+ /* Post any pending iocbs to HBA */
+ lpfc_resume_fabric_iocbs(phba);
+ }
+}
+
+int
+lpfc_issue_fabric_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *iocb)
+{
+ unsigned long iflags;
+ struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
+ int ready;
+ int ret;
+
+ if (atomic_read(&phba->fabric_iocb_count) > 1)
+ BUG();
+
+ spin_lock_irqsave(&phba->hbalock, iflags);
+ ready = atomic_read(&phba->fabric_iocb_count) == 0 &&
+ !test_bit(FABRIC_COMANDS_BLOCKED, &phba->bit_flags);
+
+ spin_unlock_irqrestore(&phba->hbalock, iflags);
+ if (ready) {
+ iocb->fabric_iocb_cmpl = iocb->iocb_cmpl;
+ iocb->iocb_cmpl = lpfc_cmpl_fabric_iocb;
+ iocb->iocb_flag |= LPFC_IO_FABRIC;
+
+ lpfc_debugfs_disc_trc(iocb->vport, LPFC_DISC_TRC_ELS_CMD,
+ "Fabric sched2: ste:x%x",
+ iocb->vport->port_state, 0, 0);
+
+ atomic_inc(&phba->fabric_iocb_count);
+ ret = lpfc_sli_issue_iocb(phba, pring, iocb, 0);
+
+ if (ret == IOCB_ERROR) {
+ iocb->iocb_cmpl = iocb->fabric_iocb_cmpl;
+ iocb->fabric_iocb_cmpl = NULL;
+ iocb->iocb_flag &= ~LPFC_IO_FABRIC;
+ atomic_dec(&phba->fabric_iocb_count);
+ }
+ } else {
+ spin_lock_irqsave(&phba->hbalock, iflags);
+ list_add_tail(&iocb->list, &phba->fabric_iocb_list);
+ spin_unlock_irqrestore(&phba->hbalock, iflags);
+ ret = IOCB_SUCCESS;
+ }
+ return ret;
+}
+
+
+void lpfc_fabric_abort_vport(struct lpfc_vport *vport)
+{
+ LIST_HEAD(completions);
+ struct lpfc_hba *phba = vport->phba;
+ struct lpfc_iocbq *tmp_iocb, *piocb;
+ IOCB_t *cmd;
+
+ spin_lock_irq(&phba->hbalock);
+ list_for_each_entry_safe(piocb, tmp_iocb, &phba->fabric_iocb_list,
+ list) {
+
+ if (piocb->vport != vport)
+ continue;
+
+ list_move_tail(&piocb->list, &completions);
+ }
+ spin_unlock_irq(&phba->hbalock);
+
+ while (!list_empty(&completions)) {
+ piocb = list_get_first(&completions, struct lpfc_iocbq, list);
+ list_del_init(&piocb->list);
+
+ cmd = &piocb->iocb;
+ cmd->ulpStatus = IOSTAT_LOCAL_REJECT;
+ cmd->un.ulpWord[4] = IOERR_SLI_ABORTED;
+ (piocb->iocb_cmpl) (phba, piocb, piocb);
+ }
+}
+
+void lpfc_fabric_abort_nport(struct lpfc_nodelist *ndlp)
+{
+ LIST_HEAD(completions);
+ struct lpfc_hba *phba = ndlp->vport->phba;
+ struct lpfc_iocbq *tmp_iocb, *piocb;
+ struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
+ IOCB_t *cmd;
+
+ spin_lock_irq(&phba->hbalock);
+ list_for_each_entry_safe(piocb, tmp_iocb, &phba->fabric_iocb_list,
+ list) {
+ if ((lpfc_check_sli_ndlp(phba, pring, piocb, ndlp))) {
+
+ list_move_tail(&piocb->list, &completions);
+ }
+ }
+ spin_unlock_irq(&phba->hbalock);
+
+ while (!list_empty(&completions)) {
+ piocb = list_get_first(&completions, struct lpfc_iocbq, list);
+ list_del_init(&piocb->list);
+
+ cmd = &piocb->iocb;
+ cmd->ulpStatus = IOSTAT_LOCAL_REJECT;
+ cmd->un.ulpWord[4] = IOERR_SLI_ABORTED;
+ (piocb->iocb_cmpl) (phba, piocb, piocb);
+ }
+}
+
+void lpfc_fabric_abort_hba(struct lpfc_hba *phba)
+{
+ LIST_HEAD(completions);
+ struct lpfc_iocbq *piocb;
+ IOCB_t *cmd;
+
+ spin_lock_irq(&phba->hbalock);
+ list_splice_init(&phba->fabric_iocb_list, &completions);
+ spin_unlock_irq(&phba->hbalock);
+
+ while (!list_empty(&completions)) {
+ piocb = list_get_first(&completions, struct lpfc_iocbq, list);
+ list_del_init(&piocb->list);
+
+ cmd = &piocb->iocb;
+ cmd->ulpStatus = IOSTAT_LOCAL_REJECT;
+ cmd->un.ulpWord[4] = IOERR_SLI_ABORTED;
+ (piocb->iocb_cmpl) (phba, piocb, piocb);
+ }
+}
+
+
+void lpfc_fabric_abort_flogi(struct lpfc_hba *phba)
+{
+ LIST_HEAD(completions);
+ struct lpfc_iocbq *tmp_iocb, *piocb;
+ IOCB_t *cmd;
+ struct lpfc_nodelist *ndlp;
+
+ spin_lock_irq(&phba->hbalock);
+ list_for_each_entry_safe(piocb, tmp_iocb, &phba->fabric_iocb_list,
+ list) {
+
+ cmd = &piocb->iocb;
+ ndlp = (struct lpfc_nodelist *) piocb->context1;
+ if (cmd->ulpCommand == CMD_ELS_REQUEST64_CR &&
+ ndlp != NULL &&
+ ndlp->nlp_DID == Fabric_DID)
+ list_move_tail(&piocb->list, &completions);
+ }
+ spin_unlock_irq(&phba->hbalock);
+
+ while (!list_empty(&completions)) {
+ piocb = list_get_first(&completions, struct lpfc_iocbq, list);
+ list_del_init(&piocb->list);
+
+ cmd = &piocb->iocb;
+ cmd->ulpStatus = IOSTAT_LOCAL_REJECT;
+ cmd->un.ulpWord[4] = IOERR_SLI_ABORTED;
+ (piocb->iocb_cmpl) (phba, piocb, piocb);
+ }
+}
+
+
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
index 61caa8d379e..f2f4639eab5 100644
--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
+++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
@@ -36,6 +36,8 @@
#include "lpfc.h"
#include "lpfc_logmsg.h"
#include "lpfc_crtn.h"
+#include "lpfc_vport.h"
+#include "lpfc_debugfs.h"
/* AlpaArray for assignment of scsid for scan-down and bind_method */
static uint8_t lpfcAlpaArray[] = {
@@ -54,7 +56,7 @@ static uint8_t lpfcAlpaArray[] = {
0x10, 0x0F, 0x08, 0x04, 0x02, 0x01
};
-static void lpfc_disc_timeout_handler(struct lpfc_hba *);
+static void lpfc_disc_timeout_handler(struct lpfc_vport *);
void
lpfc_terminate_rport_io(struct fc_rport *rport)
@@ -74,14 +76,16 @@ lpfc_terminate_rport_io(struct fc_rport *rport)
return;
}
- phba = ndlp->nlp_phba;
+ phba = ndlp->vport->phba;
+
+ lpfc_debugfs_disc_trc(ndlp->vport, LPFC_DISC_TRC_RPORT,
+ "rport terminate: sid:x%x did:x%x flg:x%x",
+ ndlp->nlp_sid, ndlp->nlp_DID, ndlp->nlp_flag);
- spin_lock_irq(phba->host->host_lock);
if (ndlp->nlp_sid != NLP_NO_SID) {
lpfc_sli_abort_iocb(phba, &phba->sli.ring[phba->sli.fcp_ring],
ndlp->nlp_sid, 0, 0, LPFC_CTX_TGT);
}
- spin_unlock_irq(phba->host->host_lock);
return;
}
@@ -94,105 +98,213 @@ lpfc_dev_loss_tmo_callbk(struct fc_rport *rport)
{
struct lpfc_rport_data *rdata;
struct lpfc_nodelist * ndlp;
- uint8_t *name;
- int warn_on = 0;
- struct lpfc_hba *phba;
+ struct lpfc_vport *vport;
+ struct lpfc_hba *phba;
+ struct completion devloss_compl;
+ struct lpfc_work_evt *evtp;
rdata = rport->dd_data;
ndlp = rdata->pnode;
if (!ndlp) {
- if (rport->roles & FC_RPORT_ROLE_FCP_TARGET)
+ if (rport->scsi_target_id != -1) {
printk(KERN_ERR "Cannot find remote node"
- " for rport in dev_loss_tmo_callbk x%x\n",
- rport->port_id);
+ " for rport in dev_loss_tmo_callbk x%x\n",
+ rport->port_id);
+ }
return;
}
- if (ndlp->nlp_state == NLP_STE_MAPPED_NODE)
+ vport = ndlp->vport;
+ phba = vport->phba;
+
+ lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_RPORT,
+ "rport devlosscb: sid:x%x did:x%x flg:x%x",
+ ndlp->nlp_sid, ndlp->nlp_DID, ndlp->nlp_flag);
+
+ init_completion(&devloss_compl);
+ evtp = &ndlp->dev_loss_evt;
+
+ if (!list_empty(&evtp->evt_listp))
+ return;
+
+ spin_lock_irq(&phba->hbalock);
+ evtp->evt_arg1 = ndlp;
+ evtp->evt_arg2 = &devloss_compl;
+ evtp->evt = LPFC_EVT_DEV_LOSS;
+ list_add_tail(&evtp->evt_listp, &phba->work_list);
+ if (phba->work_wait)
+ wake_up(phba->work_wait);
+
+ spin_unlock_irq(&phba->hbalock);
+
+ wait_for_completion(&devloss_compl);
+
+ return;
+}
+
+/*
+ * This function is called from the worker thread when dev_loss_tmo
+ * expire.
+ */
+void
+lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp)
+{
+ struct lpfc_rport_data *rdata;
+ struct fc_rport *rport;
+ struct lpfc_vport *vport;
+ struct lpfc_hba *phba;
+ uint8_t *name;
+ int warn_on = 0;
+
+ rport = ndlp->rport;
+
+ if (!rport)
+ return;
+
+ rdata = rport->dd_data;
+ name = (uint8_t *) &ndlp->nlp_portname;
+ vport = ndlp->vport;
+ phba = vport->phba;
+
+ lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_RPORT,
+ "rport devlosstmo:did:x%x type:x%x id:x%x",
+ ndlp->nlp_DID, ndlp->nlp_type, rport->scsi_target_id);
+
+ if (!(vport->load_flag & FC_UNLOADING) &&
+ ndlp->nlp_state == NLP_STE_MAPPED_NODE)
return;
- name = (uint8_t *)&ndlp->nlp_portname;
- phba = ndlp->nlp_phba;
+ if (ndlp->nlp_type & NLP_FABRIC) {
+ int put_node;
+ int put_rport;
- spin_lock_irq(phba->host->host_lock);
+ /* We will clean up these Nodes in linkup */
+ put_node = rdata->pnode != NULL;
+ put_rport = ndlp->rport != NULL;
+ rdata->pnode = NULL;
+ ndlp->rport = NULL;
+ if (put_node)
+ lpfc_nlp_put(ndlp);
+ if (put_rport)
+ put_device(&rport->dev);
+ return;
+ }
if (ndlp->nlp_sid != NLP_NO_SID) {
warn_on = 1;
/* flush the target */
lpfc_sli_abort_iocb(phba, &phba->sli.ring[phba->sli.fcp_ring],
- ndlp->nlp_sid, 0, 0, LPFC_CTX_TGT);
+ ndlp->nlp_sid, 0, 0, LPFC_CTX_TGT);
}
- if (phba->fc_flag & FC_UNLOADING)
+ if (vport->load_flag & FC_UNLOADING)
warn_on = 0;
- spin_unlock_irq(phba->host->host_lock);
-
if (warn_on) {
lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
- "%d:0203 Devloss timeout on "
+ "%d (%d):0203 Devloss timeout on "
"WWPN %x:%x:%x:%x:%x:%x:%x:%x "
"NPort x%x Data: x%x x%x x%x\n",
- phba->brd_no,
+ phba->brd_no, vport->vpi,
*name, *(name+1), *(name+2), *(name+3),
*(name+4), *(name+5), *(name+6), *(name+7),
ndlp->nlp_DID, ndlp->nlp_flag,
ndlp->nlp_state, ndlp->nlp_rpi);
} else {
lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
- "%d:0204 Devloss timeout on "
+ "%d (%d):0204 Devloss timeout on "
"WWPN %x:%x:%x:%x:%x:%x:%x:%x "
"NPort x%x Data: x%x x%x x%x\n",
- phba->brd_no,
+ phba->brd_no, vport->vpi,
*name, *(name+1), *(name+2), *(name+3),
*(name+4), *(name+5), *(name+6), *(name+7),
ndlp->nlp_DID, ndlp->nlp_flag,
ndlp->nlp_state, ndlp->nlp_rpi);
}
- if (!(phba->fc_flag & FC_UNLOADING) &&
+ if (!(vport->load_flag & FC_UNLOADING) &&
!(ndlp->nlp_flag & NLP_DELAY_TMO) &&
!(ndlp->nlp_flag & NLP_NPR_2B_DISC) &&
(ndlp->nlp_state != NLP_STE_UNMAPPED_NODE))
- lpfc_disc_state_machine(phba, ndlp, NULL, NLP_EVT_DEVICE_RM);
+ lpfc_disc_state_machine(vport, ndlp, NULL, NLP_EVT_DEVICE_RM);
else {
+ int put_node;
+ int put_rport;
+
+ put_node = rdata->pnode != NULL;
+ put_rport = ndlp->rport != NULL;
rdata->pnode = NULL;
ndlp->rport = NULL;
- lpfc_nlp_put(ndlp);
- put_device(&rport->dev);
+ if (put_node)
+ lpfc_nlp_put(ndlp);
+ if (put_rport)
+ put_device(&rport->dev);
}
+}
+
+void
+lpfc_worker_wake_up(struct lpfc_hba *phba)
+{
+ wake_up(phba->work_wait);
return;
}
static void
-lpfc_work_list_done(struct lpfc_hba * phba)
+lpfc_work_list_done(struct lpfc_hba *phba)
{
struct lpfc_work_evt *evtp = NULL;
struct lpfc_nodelist *ndlp;
+ struct lpfc_vport *vport;
int free_evt;
- spin_lock_irq(phba->host->host_lock);
- while(!list_empty(&phba->work_list)) {
+ spin_lock_irq(&phba->hbalock);
+ while (!list_empty(&phba->work_list)) {
list_remove_head((&phba->work_list), evtp, typeof(*evtp),
evt_listp);
- spin_unlock_irq(phba->host->host_lock);
+ spin_unlock_irq(&phba->hbalock);
free_evt = 1;
switch (evtp->evt) {
+ case LPFC_EVT_DEV_LOSS_DELAY:
+ free_evt = 0; /* evt is part of ndlp */
+ ndlp = (struct lpfc_nodelist *) (evtp->evt_arg1);
+ vport = ndlp->vport;
+ if (!vport)
+ break;
+
+ lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_RPORT,
+ "rport devlossdly:did:x%x flg:x%x",
+ ndlp->nlp_DID, ndlp->nlp_flag, 0);
+
+ if (!(vport->load_flag & FC_UNLOADING) &&
+ !(ndlp->nlp_flag & NLP_DELAY_TMO) &&
+ !(ndlp->nlp_flag & NLP_NPR_2B_DISC)) {
+ lpfc_disc_state_machine(vport, ndlp, NULL,
+ NLP_EVT_DEVICE_RM);
+ }
+ break;
case LPFC_EVT_ELS_RETRY:
- ndlp = (struct lpfc_nodelist *)(evtp->evt_arg1);
+ ndlp = (struct lpfc_nodelist *) (evtp->evt_arg1);
lpfc_els_retry_delay_handler(ndlp);
+ free_evt = 0; /* evt is part of ndlp */
+ break;
+ case LPFC_EVT_DEV_LOSS:
+ ndlp = (struct lpfc_nodelist *)(evtp->evt_arg1);
+ lpfc_nlp_get(ndlp);
+ lpfc_dev_loss_tmo_handler(ndlp);
free_evt = 0;
+ complete((struct completion *)(evtp->evt_arg2));
+ lpfc_nlp_put(ndlp);
break;
case LPFC_EVT_ONLINE:
- if (phba->hba_state < LPFC_LINK_DOWN)
- *(int *)(evtp->evt_arg1) = lpfc_online(phba);
+ if (phba->link_state < LPFC_LINK_DOWN)
+ *(int *) (evtp->evt_arg1) = lpfc_online(phba);
else
- *(int *)(evtp->evt_arg1) = 0;
+ *(int *) (evtp->evt_arg1) = 0;
complete((struct completion *)(evtp->evt_arg2));
break;
case LPFC_EVT_OFFLINE_PREP:
- if (phba->hba_state >= LPFC_LINK_DOWN)
+ if (phba->link_state >= LPFC_LINK_DOWN)
lpfc_offline_prep(phba);
*(int *)(evtp->evt_arg1) = 0;
complete((struct completion *)(evtp->evt_arg2));
@@ -218,33 +330,31 @@ lpfc_work_list_done(struct lpfc_hba * phba)
case LPFC_EVT_KILL:
lpfc_offline(phba);
*(int *)(evtp->evt_arg1)
- = (phba->stopped) ? 0 : lpfc_sli_brdkill(phba);
+ = (phba->pport->stopped)
+ ? 0 : lpfc_sli_brdkill(phba);
lpfc_unblock_mgmt_io(phba);
complete((struct completion *)(evtp->evt_arg2));
break;
}
if (free_evt)
kfree(evtp);
- spin_lock_irq(phba->host->host_lock);
+ spin_lock_irq(&phba->hbalock);
}
- spin_unlock_irq(phba->host->host_lock);
+ spin_unlock_irq(&phba->hbalock);
}
-static void
-lpfc_work_done(struct lpfc_hba * phba)
+void
+lpfc_work_done(struct lpfc_hba *phba)
{
struct lpfc_sli_ring *pring;
- int i;
- uint32_t ha_copy;
- uint32_t control;
- uint32_t work_hba_events;
+ uint32_t ha_copy, status, control, work_port_events;
+ struct lpfc_vport *vport;
- spin_lock_irq(phba->host->host_lock);
+ spin_lock_irq(&phba->hbalock);
ha_copy = phba->work_ha;
phba->work_ha = 0;
- work_hba_events=phba->work_hba_events;
- spin_unlock_irq(phba->host->host_lock);
+ spin_unlock_irq(&phba->hbalock);
if (ha_copy & HA_ERATT)
lpfc_handle_eratt(phba);
@@ -255,66 +365,111 @@ lpfc_work_done(struct lpfc_hba * phba)
if (ha_copy & HA_LATT)
lpfc_handle_latt(phba);
- if (work_hba_events & WORKER_DISC_TMO)
- lpfc_disc_timeout_handler(phba);
-
- if (work_hba_events & WORKER_ELS_TMO)
- lpfc_els_timeout_handler(phba);
-
- if (work_hba_events & WORKER_MBOX_TMO)
- lpfc_mbox_timeout_handler(phba);
-
- if (work_hba_events & WORKER_FDMI_TMO)
- lpfc_fdmi_tmo_handler(phba);
-
- spin_lock_irq(phba->host->host_lock);
- phba->work_hba_events &= ~work_hba_events;
- spin_unlock_irq(phba->host->host_lock);
-
- for (i = 0; i < phba->sli.num_rings; i++, ha_copy >>= 4) {
- pring = &phba->sli.ring[i];
- if ((ha_copy & HA_RXATT)
- || (pring->flag & LPFC_DEFERRED_RING_EVENT)) {
- if (pring->flag & LPFC_STOP_IOCB_MASK) {
- pring->flag |= LPFC_DEFERRED_RING_EVENT;
- } else {
- lpfc_sli_handle_slow_ring_event(phba, pring,
- (ha_copy &
- HA_RXMASK));
- pring->flag &= ~LPFC_DEFERRED_RING_EVENT;
- }
- /*
- * Turn on Ring interrupts
- */
- spin_lock_irq(phba->host->host_lock);
- control = readl(phba->HCregaddr);
- control |= (HC_R0INT_ENA << i);
+ spin_lock_irq(&phba->hbalock);
+ list_for_each_entry(vport, &phba->port_list, listentry) {
+ struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+
+ if (!scsi_host_get(shost)) {
+ continue;
+ }
+ spin_unlock_irq(&phba->hbalock);
+ work_port_events = vport->work_port_events;
+
+ if (work_port_events & WORKER_DISC_TMO)
+ lpfc_disc_timeout_handler(vport);
+
+ if (work_port_events & WORKER_ELS_TMO)
+ lpfc_els_timeout_handler(vport);
+
+ if (work_port_events & WORKER_HB_TMO)
+ lpfc_hb_timeout_handler(phba);
+
+ if (work_port_events & WORKER_MBOX_TMO)
+ lpfc_mbox_timeout_handler(phba);
+
+ if (work_port_events & WORKER_FABRIC_BLOCK_TMO)
+ lpfc_unblock_fabric_iocbs(phba);
+
+ if (work_port_events & WORKER_FDMI_TMO)
+ lpfc_fdmi_timeout_handler(vport);
+
+ if (work_port_events & WORKER_RAMP_DOWN_QUEUE)
+ lpfc_ramp_down_queue_handler(phba);
+
+ if (work_port_events & WORKER_RAMP_UP_QUEUE)
+ lpfc_ramp_up_queue_handler(phba);
+
+ spin_lock_irq(&vport->work_port_lock);
+ vport->work_port_events &= ~work_port_events;
+ spin_unlock_irq(&vport->work_port_lock);
+ scsi_host_put(shost);
+ spin_lock_irq(&phba->hbalock);
+ }
+ spin_unlock_irq(&phba->hbalock);
+
+ pring = &phba->sli.ring[LPFC_ELS_RING];
+ status = (ha_copy & (HA_RXMASK << (4*LPFC_ELS_RING)));
+ status >>= (4*LPFC_ELS_RING);
+ if ((status & HA_RXMASK)
+ || (pring->flag & LPFC_DEFERRED_RING_EVENT)) {
+ if (pring->flag & LPFC_STOP_IOCB_MASK) {
+ pring->flag |= LPFC_DEFERRED_RING_EVENT;
+ } else {
+ lpfc_sli_handle_slow_ring_event(phba, pring,
+ (status &
+ HA_RXMASK));
+ pring->flag &= ~LPFC_DEFERRED_RING_EVENT;
+ }
+ /*
+ * Turn on Ring interrupts
+ */
+ spin_lock_irq(&phba->hbalock);
+ control = readl(phba->HCregaddr);
+ if (!(control & (HC_R0INT_ENA << LPFC_ELS_RING))) {
+ control |= (HC_R0INT_ENA << LPFC_ELS_RING);
writel(control, phba->HCregaddr);
readl(phba->HCregaddr); /* flush */
- spin_unlock_irq(phba->host->host_lock);
}
+ spin_unlock_irq(&phba->hbalock);
}
-
- lpfc_work_list_done (phba);
-
+ lpfc_work_list_done(phba);
}
static int
-check_work_wait_done(struct lpfc_hba *phba) {
+check_work_wait_done(struct lpfc_hba *phba)
+{
+ struct lpfc_vport *vport;
+ struct lpfc_sli_ring *pring;
+ int rc = 0;
+
+ spin_lock_irq(&phba->hbalock);
+ list_for_each_entry(vport, &phba->port_list, listentry) {
+ if (vport->work_port_events) {
+ rc = 1;
+ goto exit;
+ }
+ }
- spin_lock_irq(phba->host->host_lock);
- if (phba->work_ha ||
- phba->work_hba_events ||
- (!list_empty(&phba->work_list)) ||
+ if (phba->work_ha || (!list_empty(&phba->work_list)) ||
kthread_should_stop()) {
- spin_unlock_irq(phba->host->host_lock);
- return 1;
- } else {
- spin_unlock_irq(phba->host->host_lock);
- return 0;
+ rc = 1;
+ goto exit;
}
+
+ pring = &phba->sli.ring[LPFC_ELS_RING];
+ if (pring->flag & LPFC_DEFERRED_RING_EVENT)
+ rc = 1;
+exit:
+ if (rc)
+ phba->work_found++;
+ else
+ phba->work_found = 0;
+
+ spin_unlock_irq(&phba->hbalock);
+ return rc;
}
+
int
lpfc_do_work(void *p)
{
@@ -324,11 +479,13 @@ lpfc_do_work(void *p)
set_user_nice(current, -20);
phba->work_wait = &work_waitq;
+ phba->work_found = 0;
while (1) {
rc = wait_event_interruptible(work_waitq,
- check_work_wait_done(phba));
+ check_work_wait_done(phba));
+
BUG_ON(rc);
if (kthread_should_stop())
@@ -336,6 +493,17 @@ lpfc_do_work(void *p)
lpfc_work_done(phba);
+ /* If there is alot of slow ring work, like during link up
+ * check_work_wait_done() may cause this thread to not give
+ * up the CPU for very long periods of time. This may cause
+ * soft lockups or other problems. To avoid these situations
+ * give up the CPU here after LPFC_MAX_WORKER_ITERATION
+ * consecutive iterations.
+ */
+ if (phba->work_found >= LPFC_MAX_WORKER_ITERATION) {
+ phba->work_found = 0;
+ schedule();
+ }
}
phba->work_wait = NULL;
return 0;
@@ -347,16 +515,17 @@ lpfc_do_work(void *p)
* embedding it in the IOCB.
*/
int
-lpfc_workq_post_event(struct lpfc_hba * phba, void *arg1, void *arg2,
+lpfc_workq_post_event(struct lpfc_hba *phba, void *arg1, void *arg2,
uint32_t evt)
{
struct lpfc_work_evt *evtp;
+ unsigned long flags;
/*
* All Mailbox completions and LPFC_ELS_RING rcv ring IOCB events will
* be queued to worker thread for processing
*/
- evtp = kmalloc(sizeof(struct lpfc_work_evt), GFP_KERNEL);
+ evtp = kmalloc(sizeof(struct lpfc_work_evt), GFP_ATOMIC);
if (!evtp)
return 0;
@@ -364,136 +533,210 @@ lpfc_workq_post_event(struct lpfc_hba * phba, void *arg1, void *arg2,
evtp->evt_arg2 = arg2;
evtp->evt = evt;
- spin_lock_irq(phba->host->host_lock);
+ spin_lock_irqsave(&phba->hbalock, flags);
list_add_tail(&evtp->evt_listp, &phba->work_list);
if (phba->work_wait)
- wake_up(phba->work_wait);
- spin_unlock_irq(phba->host->host_lock);
+ lpfc_worker_wake_up(phba);
+ spin_unlock_irqrestore(&phba->hbalock, flags);
return 1;
}
-int
-lpfc_linkdown(struct lpfc_hba *phba)
+void
+lpfc_cleanup_rpis(struct lpfc_vport *vport, int remove)
{
- struct lpfc_sli *psli;
- struct lpfc_nodelist *ndlp, *next_ndlp;
- LPFC_MBOXQ_t *mb;
- int rc;
+ struct lpfc_hba *phba = vport->phba;
+ struct lpfc_nodelist *ndlp, *next_ndlp;
+ int rc;
- psli = &phba->sli;
- /* sysfs or selective reset may call this routine to clean up */
- if (phba->hba_state >= LPFC_LINK_DOWN) {
- if (phba->hba_state == LPFC_LINK_DOWN)
- return 0;
+ list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
+ if (ndlp->nlp_state == NLP_STE_UNUSED_NODE)
+ continue;
- spin_lock_irq(phba->host->host_lock);
- phba->hba_state = LPFC_LINK_DOWN;
- spin_unlock_irq(phba->host->host_lock);
+ if (phba->sli3_options & LPFC_SLI3_VPORT_TEARDOWN)
+ lpfc_unreg_rpi(vport, ndlp);
+
+ /* Leave Fabric nodes alone on link down */
+ if (!remove && ndlp->nlp_type & NLP_FABRIC)
+ continue;
+ rc = lpfc_disc_state_machine(vport, ndlp, NULL,
+ remove
+ ? NLP_EVT_DEVICE_RM
+ : NLP_EVT_DEVICE_RECOVERY);
}
+ if (phba->sli3_options & LPFC_SLI3_VPORT_TEARDOWN) {
+ lpfc_mbx_unreg_vpi(vport);
+ vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
+ }
+}
+
+static void
+lpfc_linkdown_port(struct lpfc_vport *vport)
+{
+ struct lpfc_nodelist *ndlp, *next_ndlp;
+ struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
- fc_host_post_event(phba->host, fc_get_event_number(),
- FCH_EVT_LINKDOWN, 0);
+ fc_host_post_event(shost, fc_get_event_number(), FCH_EVT_LINKDOWN, 0);
- /* Clean up any firmware default rpi's */
- if ((mb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL))) {
- lpfc_unreg_did(phba, 0xffffffff, mb);
- mb->mbox_cmpl=lpfc_sli_def_mbox_cmpl;
- if (lpfc_sli_issue_mbox(phba, mb, (MBX_NOWAIT | MBX_STOP_IOCB))
- == MBX_NOT_FINISHED) {
- mempool_free( mb, phba->mbox_mem_pool);
- }
- }
+ lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
+ "Link Down: state:x%x rtry:x%x flg:x%x",
+ vport->port_state, vport->fc_ns_retry, vport->fc_flag);
/* Cleanup any outstanding RSCN activity */
- lpfc_els_flush_rscn(phba);
+ lpfc_els_flush_rscn(vport);
/* Cleanup any outstanding ELS commands */
- lpfc_els_flush_cmd(phba);
+ lpfc_els_flush_cmd(vport);
- /*
- * Issue a LINK DOWN event to all nodes.
- */
- list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_nodes, nlp_listp) {
- /* free any ndlp's on unused list */
+ lpfc_cleanup_rpis(vport, 0);
+
+ /* free any ndlp's on unused list */
+ list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp)
+ /* free any ndlp's in unused state */
if (ndlp->nlp_state == NLP_STE_UNUSED_NODE)
- lpfc_drop_node(phba, ndlp);
- else /* otherwise, force node recovery. */
- rc = lpfc_disc_state_machine(phba, ndlp, NULL,
- NLP_EVT_DEVICE_RECOVERY);
+ lpfc_drop_node(vport, ndlp);
+
+ /* Turn off discovery timer if its running */
+ lpfc_can_disctmo(vport);
+}
+
+int
+lpfc_linkdown(struct lpfc_hba *phba)
+{
+ struct lpfc_vport *vport = phba->pport;
+ struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+ struct lpfc_vport *port_iterator;
+ LPFC_MBOXQ_t *mb;
+
+ if (phba->link_state == LPFC_LINK_DOWN) {
+ return 0;
+ }
+ spin_lock_irq(&phba->hbalock);
+ if (phba->link_state > LPFC_LINK_DOWN) {
+ phba->link_state = LPFC_LINK_DOWN;
+ phba->pport->fc_flag &= ~FC_LBIT;
+ }
+ spin_unlock_irq(&phba->hbalock);
+
+ list_for_each_entry(port_iterator, &phba->port_list, listentry) {
+
+ /* Issue a LINK DOWN event to all nodes */
+ lpfc_linkdown_port(port_iterator);
+ }
+
+ /* Clean up any firmware default rpi's */
+ mb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+ if (mb) {
+ lpfc_unreg_did(phba, 0xffff, 0xffffffff, mb);
+ mb->vport = vport;
+ mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
+ if (lpfc_sli_issue_mbox(phba, mb, (MBX_NOWAIT | MBX_STOP_IOCB))
+ == MBX_NOT_FINISHED) {
+ mempool_free(mb, phba->mbox_mem_pool);
+ }
}
/* Setup myDID for link up if we are in pt2pt mode */
- if (phba->fc_flag & FC_PT2PT) {
- phba->fc_myDID = 0;
- if ((mb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL))) {
+ if (phba->pport->fc_flag & FC_PT2PT) {
+ phba->pport->fc_myDID = 0;
+ mb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+ if (mb) {
lpfc_config_link(phba, mb);
- mb->mbox_cmpl=lpfc_sli_def_mbox_cmpl;
- if (lpfc_sli_issue_mbox
- (phba, mb, (MBX_NOWAIT | MBX_STOP_IOCB))
+ mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
+ mb->vport = vport;
+ if (lpfc_sli_issue_mbox(phba, mb,
+ (MBX_NOWAIT | MBX_STOP_IOCB))
== MBX_NOT_FINISHED) {
- mempool_free( mb, phba->mbox_mem_pool);
+ mempool_free(mb, phba->mbox_mem_pool);
}
}
- spin_lock_irq(phba->host->host_lock);
- phba->fc_flag &= ~(FC_PT2PT | FC_PT2PT_PLOGI);
- spin_unlock_irq(phba->host->host_lock);
+ spin_lock_irq(shost->host_lock);
+ phba->pport->fc_flag &= ~(FC_PT2PT | FC_PT2PT_PLOGI);
+ spin_unlock_irq(shost->host_lock);
}
- spin_lock_irq(phba->host->host_lock);
- phba->fc_flag &= ~FC_LBIT;
- spin_unlock_irq(phba->host->host_lock);
-
- /* Turn off discovery timer if its running */
- lpfc_can_disctmo(phba);
- /* Must process IOCBs on all rings to handle ABORTed I/Os */
return 0;
}
-static int
-lpfc_linkup(struct lpfc_hba *phba)
+static void
+lpfc_linkup_cleanup_nodes(struct lpfc_vport *vport)
{
- struct lpfc_nodelist *ndlp, *next_ndlp;
+ struct lpfc_nodelist *ndlp;
- fc_host_post_event(phba->host, fc_get_event_number(),
- FCH_EVT_LINKUP, 0);
-
- spin_lock_irq(phba->host->host_lock);
- phba->hba_state = LPFC_LINK_UP;
- phba->fc_flag &= ~(FC_PT2PT | FC_PT2PT_PLOGI | FC_ABORT_DISCOVERY |
- FC_RSCN_MODE | FC_NLP_MORE | FC_RSCN_DISCOVERY);
- phba->fc_flag |= FC_NDISC_ACTIVE;
- phba->fc_ns_retry = 0;
- spin_unlock_irq(phba->host->host_lock);
-
-
- if (phba->fc_flag & FC_LBIT) {
- list_for_each_entry(ndlp, &phba->fc_nodes, nlp_listp) {
- if (ndlp->nlp_state != NLP_STE_UNUSED_NODE) {
- if (ndlp->nlp_type & NLP_FABRIC) {
- /*
- * On Linkup its safe to clean up the
- * ndlp from Fabric connections.
- */
- lpfc_nlp_set_state(phba, ndlp,
- NLP_STE_UNUSED_NODE);
- } else if (!(ndlp->nlp_flag & NLP_NPR_ADISC)) {
- /*
- * Fail outstanding IO now since
- * device is marked for PLOGI.
- */
- lpfc_unreg_rpi(phba, ndlp);
- }
- }
+ list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
+ if (ndlp->nlp_state == NLP_STE_UNUSED_NODE)
+ continue;
+
+ if (ndlp->nlp_type & NLP_FABRIC) {
+ /* On Linkup its safe to clean up the ndlp
+ * from Fabric connections.
+ */
+ if (ndlp->nlp_DID != Fabric_DID)
+ lpfc_unreg_rpi(vport, ndlp);
+ lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
+ } else if (!(ndlp->nlp_flag & NLP_NPR_ADISC)) {
+ /* Fail outstanding IO now since device is
+ * marked for PLOGI.
+ */
+ lpfc_unreg_rpi(vport, ndlp);
}
}
+}
- /* free any ndlp's on unused list */
- list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_nodes,
- nlp_listp) {
+static void
+lpfc_linkup_port(struct lpfc_vport *vport)
+{
+ struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+ struct lpfc_nodelist *ndlp, *next_ndlp;
+ struct lpfc_hba *phba = vport->phba;
+
+ if ((vport->load_flag & FC_UNLOADING) != 0)
+ return;
+
+ lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
+ "Link Up: top:x%x speed:x%x flg:x%x",
+ phba->fc_topology, phba->fc_linkspeed, phba->link_flag);
+
+ /* If NPIV is not enabled, only bring the physical port up */
+ if (!(phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) &&
+ (vport != phba->pport))
+ return;
+
+ fc_host_post_event(shost, fc_get_event_number(), FCH_EVT_LINKUP, 0);
+
+ spin_lock_irq(shost->host_lock);
+ vport->fc_flag &= ~(FC_PT2PT | FC_PT2PT_PLOGI | FC_ABORT_DISCOVERY |
+ FC_RSCN_MODE | FC_NLP_MORE | FC_RSCN_DISCOVERY);
+ vport->fc_flag |= FC_NDISC_ACTIVE;
+ vport->fc_ns_retry = 0;
+ spin_unlock_irq(shost->host_lock);
+
+ if (vport->fc_flag & FC_LBIT)
+ lpfc_linkup_cleanup_nodes(vport);
+
+ /* free any ndlp's in unused state */
+ list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes,
+ nlp_listp)
if (ndlp->nlp_state == NLP_STE_UNUSED_NODE)
- lpfc_drop_node(phba, ndlp);
+ lpfc_drop_node(vport, ndlp);
+}
+
+static int
+lpfc_linkup(struct lpfc_hba *phba)
+{
+ struct lpfc_vport *vport;
+
+ phba->link_state = LPFC_LINK_UP;
+
+ /* Unblock fabric iocbs if they are blocked */
+ clear_bit(FABRIC_COMANDS_BLOCKED, &phba->bit_flags);
+ del_timer_sync(&phba->fabric_block_timer);
+
+ list_for_each_entry(vport, &phba->port_list, listentry) {
+ lpfc_linkup_port(vport);
}
+ if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED)
+ lpfc_issue_clear_la(phba, phba->pport);
return 0;
}
@@ -505,14 +748,14 @@ lpfc_linkup(struct lpfc_hba *phba)
* handed off to the SLI layer.
*/
void
-lpfc_mbx_cmpl_clear_la(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
+lpfc_mbx_cmpl_clear_la(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
{
- struct lpfc_sli *psli;
- MAILBOX_t *mb;
+ struct lpfc_vport *vport = pmb->vport;
+ struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+ struct lpfc_sli *psli = &phba->sli;
+ MAILBOX_t *mb = &pmb->mb;
uint32_t control;
- psli = &phba->sli;
- mb = &pmb->mb;
/* Since we don't do discovery right now, turn these off here */
psli->ring[psli->extra_ring].flag &= ~LPFC_STOP_IOCB_EVENT;
psli->ring[psli->fcp_ring].flag &= ~LPFC_STOP_IOCB_EVENT;
@@ -522,69 +765,74 @@ lpfc_mbx_cmpl_clear_la(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
if ((mb->mbxStatus) && (mb->mbxStatus != 0x1601)) {
/* CLEAR_LA mbox error <mbxStatus> state <hba_state> */
lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
- "%d:0320 CLEAR_LA mbxStatus error x%x hba "
+ "%d (%d):0320 CLEAR_LA mbxStatus error x%x hba "
"state x%x\n",
- phba->brd_no, mb->mbxStatus, phba->hba_state);
+ phba->brd_no, vport->vpi, mb->mbxStatus,
+ vport->port_state);
- phba->hba_state = LPFC_HBA_ERROR;
+ phba->link_state = LPFC_HBA_ERROR;
goto out;
}
- if (phba->fc_flag & FC_ABORT_DISCOVERY)
- goto out;
+ if (vport->port_type == LPFC_PHYSICAL_PORT)
+ phba->link_state = LPFC_HBA_READY;
- phba->num_disc_nodes = 0;
- /* go thru NPR list and issue ELS PLOGIs */
- if (phba->fc_npr_cnt) {
- lpfc_els_disc_plogi(phba);
- }
+ spin_lock_irq(&phba->hbalock);
+ psli->sli_flag |= LPFC_PROCESS_LA;
+ control = readl(phba->HCregaddr);
+ control |= HC_LAINT_ENA;
+ writel(control, phba->HCregaddr);
+ readl(phba->HCregaddr); /* flush */
+ spin_unlock_irq(&phba->hbalock);
+ return;
+
+ vport->num_disc_nodes = 0;
+ /* go thru NPR nodes and issue ELS PLOGIs */
+ if (vport->fc_npr_cnt)
+ lpfc_els_disc_plogi(vport);
- if (!phba->num_disc_nodes) {
- spin_lock_irq(phba->host->host_lock);
- phba->fc_flag &= ~FC_NDISC_ACTIVE;
- spin_unlock_irq(phba->host->host_lock);
+ if (!vport->num_disc_nodes) {
+ spin_lock_irq(shost->host_lock);
+ vport->fc_flag &= ~FC_NDISC_ACTIVE;
+ spin_unlock_irq(shost->host_lock);
}
- phba->hba_state = LPFC_HBA_READY;
+ vport->port_state = LPFC_VPORT_READY;
out:
/* Device Discovery completes */
- lpfc_printf_log(phba,
- KERN_INFO,
- LOG_DISCOVERY,
- "%d:0225 Device Discovery completes\n",
- phba->brd_no);
+ lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
+ "%d (%d):0225 Device Discovery completes\n",
+ phba->brd_no, vport->vpi);
- mempool_free( pmb, phba->mbox_mem_pool);
+ mempool_free(pmb, phba->mbox_mem_pool);
- spin_lock_irq(phba->host->host_lock);
- phba->fc_flag &= ~FC_ABORT_DISCOVERY;
- if (phba->fc_flag & FC_ESTABLISH_LINK) {
- phba->fc_flag &= ~FC_ESTABLISH_LINK;
- }
- spin_unlock_irq(phba->host->host_lock);
+ spin_lock_irq(shost->host_lock);
+ vport->fc_flag &= ~(FC_ABORT_DISCOVERY | FC_ESTABLISH_LINK);
+ spin_unlock_irq(shost->host_lock);
del_timer_sync(&phba->fc_estabtmo);
- lpfc_can_disctmo(phba);
+ lpfc_can_disctmo(vport);
/* turn on Link Attention interrupts */
- spin_lock_irq(phba->host->host_lock);
+
+ spin_lock_irq(&phba->hbalock);
psli->sli_flag |= LPFC_PROCESS_LA;
control = readl(phba->HCregaddr);
control |= HC_LAINT_ENA;
writel(control, phba->HCregaddr);
readl(phba->HCregaddr); /* flush */
- spin_unlock_irq(phba->host->host_lock);
+ spin_unlock_irq(&phba->hbalock);
return;
}
+
static void
lpfc_mbx_cmpl_local_config_link(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
{
- struct lpfc_sli *psli = &phba->sli;
- int rc;
+ struct lpfc_vport *vport = pmb->vport;
if (pmb->mb.mbxStatus)
goto out;
@@ -592,154 +840,139 @@ lpfc_mbx_cmpl_local_config_link(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
mempool_free(pmb, phba->mbox_mem_pool);
if (phba->fc_topology == TOPOLOGY_LOOP &&
- phba->fc_flag & FC_PUBLIC_LOOP &&
- !(phba->fc_flag & FC_LBIT)) {
+ vport->fc_flag & FC_PUBLIC_LOOP &&
+ !(vport->fc_flag & FC_LBIT)) {
/* Need to wait for FAN - use discovery timer
- * for timeout. hba_state is identically
+ * for timeout. port_state is identically
* LPFC_LOCAL_CFG_LINK while waiting for FAN
*/
- lpfc_set_disctmo(phba);
+ lpfc_set_disctmo(vport);
return;
- }
+ }
- /* Start discovery by sending a FLOGI. hba_state is identically
+ /* Start discovery by sending a FLOGI. port_state is identically
* LPFC_FLOGI while waiting for FLOGI cmpl
*/
- phba->hba_state = LPFC_FLOGI;
- lpfc_set_disctmo(phba);
- lpfc_initial_flogi(phba);
+ if (vport->port_state != LPFC_FLOGI) {
+ vport->port_state = LPFC_FLOGI;
+ lpfc_set_disctmo(vport);
+ lpfc_initial_flogi(vport);
+ }
return;
out:
lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
- "%d:0306 CONFIG_LINK mbxStatus error x%x "
+ "%d (%d):0306 CONFIG_LINK mbxStatus error x%x "
"HBA state x%x\n",
- phba->brd_no, pmb->mb.mbxStatus, phba->hba_state);
+ phba->brd_no, vport->vpi, pmb->mb.mbxStatus,
+ vport->port_state);
- lpfc_linkdown(phba);
+ mempool_free(pmb, phba->mbox_mem_pool);
- phba->hba_state = LPFC_HBA_ERROR;
+ lpfc_linkdown(phba);
lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
- "%d:0200 CONFIG_LINK bad hba state x%x\n",
- phba->brd_no, phba->hba_state);
+ "%d (%d):0200 CONFIG_LINK bad hba state x%x\n",
+ phba->brd_no, vport->vpi, vport->port_state);
- lpfc_clear_la(phba, pmb);
- pmb->mbox_cmpl = lpfc_mbx_cmpl_clear_la;
- rc = lpfc_sli_issue_mbox(phba, pmb, (MBX_NOWAIT | MBX_STOP_IOCB));
- if (rc == MBX_NOT_FINISHED) {
- mempool_free(pmb, phba->mbox_mem_pool);
- lpfc_disc_flush_list(phba);
- psli->ring[(psli->extra_ring)].flag &= ~LPFC_STOP_IOCB_EVENT;
- psli->ring[(psli->fcp_ring)].flag &= ~LPFC_STOP_IOCB_EVENT;
- psli->ring[(psli->next_ring)].flag &= ~LPFC_STOP_IOCB_EVENT;
- phba->hba_state = LPFC_HBA_READY;
- }
+ lpfc_issue_clear_la(phba, vport);
return;
}
static void
-lpfc_mbx_cmpl_read_sparam(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
+lpfc_mbx_cmpl_read_sparam(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
{
- struct lpfc_sli *psli = &phba->sli;
MAILBOX_t *mb = &pmb->mb;
struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) pmb->context1;
+ struct lpfc_vport *vport = pmb->vport;
/* Check for error */
if (mb->mbxStatus) {
/* READ_SPARAM mbox error <mbxStatus> state <hba_state> */
lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
- "%d:0319 READ_SPARAM mbxStatus error x%x "
+ "%d (%d):0319 READ_SPARAM mbxStatus error x%x "
"hba state x%x>\n",
- phba->brd_no, mb->mbxStatus, phba->hba_state);
+ phba->brd_no, vport->vpi, mb->mbxStatus,
+ vport->port_state);
lpfc_linkdown(phba);
- phba->hba_state = LPFC_HBA_ERROR;
goto out;
}
- memcpy((uint8_t *) & phba->fc_sparam, (uint8_t *) mp->virt,
+ memcpy((uint8_t *) &vport->fc_sparam, (uint8_t *) mp->virt,
sizeof (struct serv_parm));
if (phba->cfg_soft_wwnn)
- u64_to_wwn(phba->cfg_soft_wwnn, phba->fc_sparam.nodeName.u.wwn);
+ u64_to_wwn(phba->cfg_soft_wwnn,
+ vport->fc_sparam.nodeName.u.wwn);
if (phba->cfg_soft_wwpn)
- u64_to_wwn(phba->cfg_soft_wwpn, phba->fc_sparam.portName.u.wwn);
- memcpy((uint8_t *) & phba->fc_nodename,
- (uint8_t *) & phba->fc_sparam.nodeName,
- sizeof (struct lpfc_name));
- memcpy((uint8_t *) & phba->fc_portname,
- (uint8_t *) & phba->fc_sparam.portName,
- sizeof (struct lpfc_name));
+ u64_to_wwn(phba->cfg_soft_wwpn,
+ vport->fc_sparam.portName.u.wwn);
+ memcpy(&vport->fc_nodename, &vport->fc_sparam.nodeName,
+ sizeof(vport->fc_nodename));
+ memcpy(&vport->fc_portname, &vport->fc_sparam.portName,
+ sizeof(vport->fc_portname));
+ if (vport->port_type == LPFC_PHYSICAL_PORT) {
+ memcpy(&phba->wwnn, &vport->fc_nodename, sizeof(phba->wwnn));
+ memcpy(&phba->wwpn, &vport->fc_portname, sizeof(phba->wwnn));
+ }
+
lpfc_mbuf_free(phba, mp->virt, mp->phys);
kfree(mp);
- mempool_free( pmb, phba->mbox_mem_pool);
+ mempool_free(pmb, phba->mbox_mem_pool);
return;
out:
pmb->context1 = NULL;
lpfc_mbuf_free(phba, mp->virt, mp->phys);
kfree(mp);
- if (phba->hba_state != LPFC_CLEAR_LA) {
- lpfc_clear_la(phba, pmb);
- pmb->mbox_cmpl = lpfc_mbx_cmpl_clear_la;
- if (lpfc_sli_issue_mbox(phba, pmb, (MBX_NOWAIT | MBX_STOP_IOCB))
- == MBX_NOT_FINISHED) {
- mempool_free( pmb, phba->mbox_mem_pool);
- lpfc_disc_flush_list(phba);
- psli->ring[(psli->extra_ring)].flag &=
- ~LPFC_STOP_IOCB_EVENT;
- psli->ring[(psli->fcp_ring)].flag &=
- ~LPFC_STOP_IOCB_EVENT;
- psli->ring[(psli->next_ring)].flag &=
- ~LPFC_STOP_IOCB_EVENT;
- phba->hba_state = LPFC_HBA_READY;
- }
- } else {
- mempool_free( pmb, phba->mbox_mem_pool);
- }
+ lpfc_issue_clear_la(phba, vport);
+ mempool_free(pmb, phba->mbox_mem_pool);
return;
}
static void
lpfc_mbx_process_link_up(struct lpfc_hba *phba, READ_LA_VAR *la)
{
- int i;
+ struct lpfc_vport *vport = phba->pport;
LPFC_MBOXQ_t *sparam_mbox, *cfglink_mbox;
+ int i;
struct lpfc_dmabuf *mp;
int rc;
sparam_mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
cfglink_mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
- spin_lock_irq(phba->host->host_lock);
+ spin_lock_irq(&phba->hbalock);
switch (la->UlnkSpeed) {
- case LA_1GHZ_LINK:
- phba->fc_linkspeed = LA_1GHZ_LINK;
- break;
- case LA_2GHZ_LINK:
- phba->fc_linkspeed = LA_2GHZ_LINK;
- break;
- case LA_4GHZ_LINK:
- phba->fc_linkspeed = LA_4GHZ_LINK;
- break;
- case LA_8GHZ_LINK:
- phba->fc_linkspeed = LA_8GHZ_LINK;
- break;
- default:
- phba->fc_linkspeed = LA_UNKNW_LINK;
- break;
+ case LA_1GHZ_LINK:
+ phba->fc_linkspeed = LA_1GHZ_LINK;
+ break;
+ case LA_2GHZ_LINK:
+ phba->fc_linkspeed = LA_2GHZ_LINK;
+ break;
+ case LA_4GHZ_LINK:
+ phba->fc_linkspeed = LA_4GHZ_LINK;
+ break;
+ case LA_8GHZ_LINK:
+ phba->fc_linkspeed = LA_8GHZ_LINK;
+ break;
+ default:
+ phba->fc_linkspeed = LA_UNKNW_LINK;
+ break;
}
phba->fc_topology = la->topology;
+ phba->link_flag &= ~LS_NPIV_FAB_SUPPORTED;
if (phba->fc_topology == TOPOLOGY_LOOP) {
- /* Get Loop Map information */
+ phba->sli3_options &= ~LPFC_SLI3_NPIV_ENABLED;
+ /* Get Loop Map information */
if (la->il)
- phba->fc_flag |= FC_LBIT;
+ vport->fc_flag |= FC_LBIT;
- phba->fc_myDID = la->granted_AL_PA;
+ vport->fc_myDID = la->granted_AL_PA;
i = la->un.lilpBde64.tus.f.bdeSize;
if (i == 0) {
@@ -769,29 +1002,35 @@ lpfc_mbx_process_link_up(struct lpfc_hba *phba, READ_LA_VAR *la)
}
/* Link Up Event ALPA map */
lpfc_printf_log(phba,
- KERN_WARNING,
- LOG_LINK_EVENT,
- "%d:1304 Link Up Event "
- "ALPA map Data: x%x "
- "x%x x%x x%x\n",
- phba->brd_no,
- un.pa.wd1, un.pa.wd2,
- un.pa.wd3, un.pa.wd4);
+ KERN_WARNING,
+ LOG_LINK_EVENT,
+ "%d:1304 Link Up Event "
+ "ALPA map Data: x%x "
+ "x%x x%x x%x\n",
+ phba->brd_no,
+ un.pa.wd1, un.pa.wd2,
+ un.pa.wd3, un.pa.wd4);
}
}
}
} else {
- phba->fc_myDID = phba->fc_pref_DID;
- phba->fc_flag |= FC_LBIT;
+ if (!(phba->sli3_options & LPFC_SLI3_NPIV_ENABLED)) {
+ if (phba->max_vpi && phba->cfg_npiv_enable &&
+ (phba->sli_rev == 3))
+ phba->sli3_options |= LPFC_SLI3_NPIV_ENABLED;
+ }
+ vport->fc_myDID = phba->fc_pref_DID;
+ vport->fc_flag |= FC_LBIT;
}
- spin_unlock_irq(phba->host->host_lock);
+ spin_unlock_irq(&phba->hbalock);
lpfc_linkup(phba);
if (sparam_mbox) {
- lpfc_read_sparam(phba, sparam_mbox);
+ lpfc_read_sparam(phba, sparam_mbox, 0);
+ sparam_mbox->vport = vport;
sparam_mbox->mbox_cmpl = lpfc_mbx_cmpl_read_sparam;
rc = lpfc_sli_issue_mbox(phba, sparam_mbox,
- (MBX_NOWAIT | MBX_STOP_IOCB));
+ (MBX_NOWAIT | MBX_STOP_IOCB));
if (rc == MBX_NOT_FINISHED) {
mp = (struct lpfc_dmabuf *) sparam_mbox->context1;
lpfc_mbuf_free(phba, mp->virt, mp->phys);
@@ -799,36 +1038,48 @@ lpfc_mbx_process_link_up(struct lpfc_hba *phba, READ_LA_VAR *la)
mempool_free(sparam_mbox, phba->mbox_mem_pool);
if (cfglink_mbox)
mempool_free(cfglink_mbox, phba->mbox_mem_pool);
- return;
+ goto out;
}
}
if (cfglink_mbox) {
- phba->hba_state = LPFC_LOCAL_CFG_LINK;
+ vport->port_state = LPFC_LOCAL_CFG_LINK;
lpfc_config_link(phba, cfglink_mbox);
+ cfglink_mbox->vport = vport;
cfglink_mbox->mbox_cmpl = lpfc_mbx_cmpl_local_config_link;
rc = lpfc_sli_issue_mbox(phba, cfglink_mbox,
- (MBX_NOWAIT | MBX_STOP_IOCB));
- if (rc == MBX_NOT_FINISHED)
- mempool_free(cfglink_mbox, phba->mbox_mem_pool);
+ (MBX_NOWAIT | MBX_STOP_IOCB));
+ if (rc != MBX_NOT_FINISHED)
+ return;
+ mempool_free(cfglink_mbox, phba->mbox_mem_pool);
}
+out:
+ lpfc_vport_set_state(vport, FC_VPORT_FAILED);
+ lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
+ "%d (%d):0263 Discovery Mailbox error: state: 0x%x : %p %p\n",
+ phba->brd_no, vport->vpi,
+ vport->port_state, sparam_mbox, cfglink_mbox);
+
+ lpfc_issue_clear_la(phba, vport);
+ return;
}
static void
-lpfc_mbx_issue_link_down(struct lpfc_hba *phba) {
+lpfc_mbx_issue_link_down(struct lpfc_hba *phba)
+{
uint32_t control;
struct lpfc_sli *psli = &phba->sli;
lpfc_linkdown(phba);
/* turn on Link Attention interrupts - no CLEAR_LA needed */
- spin_lock_irq(phba->host->host_lock);
+ spin_lock_irq(&phba->hbalock);
psli->sli_flag |= LPFC_PROCESS_LA;
control = readl(phba->HCregaddr);
control |= HC_LAINT_ENA;
writel(control, phba->HCregaddr);
readl(phba->HCregaddr); /* flush */
- spin_unlock_irq(phba->host->host_lock);
+ spin_unlock_irq(&phba->hbalock);
}
/*
@@ -838,22 +1089,21 @@ lpfc_mbx_issue_link_down(struct lpfc_hba *phba) {
* handed off to the SLI layer.
*/
void
-lpfc_mbx_cmpl_read_la(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
+lpfc_mbx_cmpl_read_la(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
{
+ struct lpfc_vport *vport = pmb->vport;
+ struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
READ_LA_VAR *la;
MAILBOX_t *mb = &pmb->mb;
struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1);
/* Check for error */
if (mb->mbxStatus) {
- lpfc_printf_log(phba,
- KERN_INFO,
- LOG_LINK_EVENT,
+ lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT,
"%d:1307 READ_LA mbox error x%x state x%x\n",
- phba->brd_no,
- mb->mbxStatus, phba->hba_state);
+ phba->brd_no, mb->mbxStatus, vport->port_state);
lpfc_mbx_issue_link_down(phba);
- phba->hba_state = LPFC_HBA_ERROR;
+ phba->link_state = LPFC_HBA_ERROR;
goto lpfc_mbx_cmpl_read_la_free_mbuf;
}
@@ -861,27 +1111,26 @@ lpfc_mbx_cmpl_read_la(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
memcpy(&phba->alpa_map[0], mp->virt, 128);
- spin_lock_irq(phba->host->host_lock);
+ spin_lock_irq(shost->host_lock);
if (la->pb)
- phba->fc_flag |= FC_BYPASSED_MODE;
+ vport->fc_flag |= FC_BYPASSED_MODE;
else
- phba->fc_flag &= ~FC_BYPASSED_MODE;
- spin_unlock_irq(phba->host->host_lock);
+ vport->fc_flag &= ~FC_BYPASSED_MODE;
+ spin_unlock_irq(shost->host_lock);
if (((phba->fc_eventTag + 1) < la->eventTag) ||
- (phba->fc_eventTag == la->eventTag)) {
+ (phba->fc_eventTag == la->eventTag)) {
phba->fc_stat.LinkMultiEvent++;
- if (la->attType == AT_LINK_UP) {
+ if (la->attType == AT_LINK_UP)
if (phba->fc_eventTag != 0)
lpfc_linkdown(phba);
- }
}
phba->fc_eventTag = la->eventTag;
if (la->attType == AT_LINK_UP) {
phba->fc_stat.LinkUp++;
- if (phba->fc_flag & FC_LOOPBACK_MODE) {
+ if (phba->link_flag & LS_LOOPBACK_MODE) {
lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT,
"%d:1306 Link Up Event in loop back mode "
"x%x received Data: x%x x%x x%x x%x\n",
@@ -903,7 +1152,7 @@ lpfc_mbx_cmpl_read_la(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
"%d:1305 Link Down Event x%x received "
"Data: x%x x%x x%x\n",
phba->brd_no, la->eventTag, phba->fc_eventTag,
- phba->hba_state, phba->fc_flag);
+ phba->pport->port_state, vport->fc_flag);
lpfc_mbx_issue_link_down(phba);
}
@@ -921,31 +1170,115 @@ lpfc_mbx_cmpl_read_la_free_mbuf:
* handed off to the SLI layer.
*/
void
-lpfc_mbx_cmpl_reg_login(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
+lpfc_mbx_cmpl_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
{
- struct lpfc_sli *psli;
- MAILBOX_t *mb;
- struct lpfc_dmabuf *mp;
- struct lpfc_nodelist *ndlp;
-
- psli = &phba->sli;
- mb = &pmb->mb;
-
- ndlp = (struct lpfc_nodelist *) pmb->context2;
- mp = (struct lpfc_dmabuf *) (pmb->context1);
+ struct lpfc_vport *vport = pmb->vport;
+ struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1);
+ struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) pmb->context2;
pmb->context1 = NULL;
/* Good status, call state machine */
- lpfc_disc_state_machine(phba, ndlp, pmb, NLP_EVT_CMPL_REG_LOGIN);
+ lpfc_disc_state_machine(vport, ndlp, pmb, NLP_EVT_CMPL_REG_LOGIN);
lpfc_mbuf_free(phba, mp->virt, mp->phys);
kfree(mp);
- mempool_free( pmb, phba->mbox_mem_pool);
+ mempool_free(pmb, phba->mbox_mem_pool);
lpfc_nlp_put(ndlp);
return;
}
+static void
+lpfc_mbx_cmpl_unreg_vpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
+{
+ MAILBOX_t *mb = &pmb->mb;
+ struct lpfc_vport *vport = pmb->vport;
+ struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+
+ switch (mb->mbxStatus) {
+ case 0x0011:
+ case 0x0020:
+ case 0x9700:
+ lpfc_printf_log(phba, KERN_INFO, LOG_NODE,
+ "%d (%d):0911 cmpl_unreg_vpi, "
+ "mb status = 0x%x\n",
+ phba->brd_no, vport->vpi, mb->mbxStatus);
+ break;
+ }
+ vport->unreg_vpi_cmpl = VPORT_OK;
+ mempool_free(pmb, phba->mbox_mem_pool);
+ /*
+ * This shost reference might have been taken at the beginning of
+ * lpfc_vport_delete()
+ */
+ if (vport->load_flag & FC_UNLOADING)
+ scsi_host_put(shost);
+}
+
+void
+lpfc_mbx_unreg_vpi(struct lpfc_vport *vport)
+{
+ struct lpfc_hba *phba = vport->phba;
+ LPFC_MBOXQ_t *mbox;
+ int rc;
+
+ mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+ if (!mbox)
+ return;
+
+ lpfc_unreg_vpi(phba, vport->vpi, mbox);
+ mbox->vport = vport;
+ mbox->mbox_cmpl = lpfc_mbx_cmpl_unreg_vpi;
+ rc = lpfc_sli_issue_mbox(phba, mbox, (MBX_NOWAIT | MBX_STOP_IOCB));
+ if (rc == MBX_NOT_FINISHED) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_VPORT,
+ "%d (%d):1800 Could not issue unreg_vpi\n",
+ phba->brd_no, vport->vpi);
+ mempool_free(mbox, phba->mbox_mem_pool);
+ vport->unreg_vpi_cmpl = VPORT_ERROR;
+ }
+}
+
+static void
+lpfc_mbx_cmpl_reg_vpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
+{
+ struct lpfc_vport *vport = pmb->vport;
+ struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+ MAILBOX_t *mb = &pmb->mb;
+
+ switch (mb->mbxStatus) {
+ case 0x0011:
+ case 0x9601:
+ case 0x9602:
+ lpfc_printf_log(phba, KERN_INFO, LOG_NODE,
+ "%d (%d):0912 cmpl_reg_vpi, mb status = 0x%x\n",
+ phba->brd_no, vport->vpi, mb->mbxStatus);
+ lpfc_vport_set_state(vport, FC_VPORT_FAILED);
+ spin_lock_irq(shost->host_lock);
+ vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP);
+ spin_unlock_irq(shost->host_lock);
+ vport->fc_myDID = 0;
+ goto out;
+ }
+
+ vport->num_disc_nodes = 0;
+ /* go thru NPR list and issue ELS PLOGIs */
+ if (vport->fc_npr_cnt)
+ lpfc_els_disc_plogi(vport);
+
+ if (!vport->num_disc_nodes) {
+ spin_lock_irq(shost->host_lock);
+ vport->fc_flag &= ~FC_NDISC_ACTIVE;
+ spin_unlock_irq(shost->host_lock);
+ lpfc_can_disctmo(vport);
+ }
+ vport->port_state = LPFC_VPORT_READY;
+
+out:
+ mempool_free(pmb, phba->mbox_mem_pool);
+ return;
+}
+
/*
* This routine handles processing a Fabric REG_LOGIN mailbox
* command upon completion. It is setup in the LPFC_MBOXQ
@@ -953,20 +1286,14 @@ lpfc_mbx_cmpl_reg_login(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
* handed off to the SLI layer.
*/
void
-lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
+lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
{
- struct lpfc_sli *psli;
- MAILBOX_t *mb;
- struct lpfc_dmabuf *mp;
+ struct lpfc_vport *vport = pmb->vport;
+ struct lpfc_vport *next_vport;
+ MAILBOX_t *mb = &pmb->mb;
+ struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1);
struct lpfc_nodelist *ndlp;
- struct lpfc_nodelist *ndlp_fdmi;
-
-
- psli = &phba->sli;
- mb = &pmb->mb;
-
ndlp = (struct lpfc_nodelist *) pmb->context2;
- mp = (struct lpfc_dmabuf *) (pmb->context1);
pmb->context1 = NULL;
pmb->context2 = NULL;
@@ -977,60 +1304,46 @@ lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
mempool_free(pmb, phba->mbox_mem_pool);
lpfc_nlp_put(ndlp);
- /* FLOGI failed, so just use loop map to make discovery list */
- lpfc_disc_list_loopmap(phba);
+ if (phba->fc_topology == TOPOLOGY_LOOP) {
+ /* FLOGI failed, use loop map to make discovery list */
+ lpfc_disc_list_loopmap(vport);
+
+ /* Start discovery */
+ lpfc_disc_start(vport);
+ return;
+ }
+
+ lpfc_vport_set_state(vport, FC_VPORT_FAILED);
+ lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
+ "%d (%d):0258 Register Fabric login error: 0x%x\n",
+ phba->brd_no, vport->vpi, mb->mbxStatus);
- /* Start discovery */
- lpfc_disc_start(phba);
return;
}
ndlp->nlp_rpi = mb->un.varWords[0];
ndlp->nlp_type |= NLP_FABRIC;
- lpfc_nlp_set_state(phba, ndlp, NLP_STE_UNMAPPED_NODE);
+ lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
lpfc_nlp_put(ndlp); /* Drop the reference from the mbox */
- if (phba->hba_state == LPFC_FABRIC_CFG_LINK) {
- /* This NPort has been assigned an NPort_ID by the fabric as a
- * result of the completed fabric login. Issue a State Change
- * Registration (SCR) ELS request to the fabric controller
- * (SCR_DID) so that this NPort gets RSCN events from the
- * fabric.
- */
- lpfc_issue_els_scr(phba, SCR_DID, 0);
-
- ndlp = lpfc_findnode_did(phba, NameServer_DID);
- if (!ndlp) {
- /* Allocate a new node instance. If the pool is empty,
- * start the discovery process and skip the Nameserver
- * login process. This is attempted again later on.
- * Otherwise, issue a Port Login (PLOGI) to NameServer.
- */
- ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_ATOMIC);
- if (!ndlp) {
- lpfc_disc_start(phba);
- lpfc_mbuf_free(phba, mp->virt, mp->phys);
- kfree(mp);
- mempool_free(pmb, phba->mbox_mem_pool);
- return;
- } else {
- lpfc_nlp_init(phba, ndlp, NameServer_DID);
- ndlp->nlp_type |= NLP_FABRIC;
- }
- }
- lpfc_nlp_set_state(phba, ndlp, NLP_STE_PLOGI_ISSUE);
- lpfc_issue_els_plogi(phba, NameServer_DID, 0);
- if (phba->cfg_fdmi_on) {
- ndlp_fdmi = mempool_alloc(phba->nlp_mem_pool,
- GFP_KERNEL);
- if (ndlp_fdmi) {
- lpfc_nlp_init(phba, ndlp_fdmi, FDMI_DID);
- ndlp_fdmi->nlp_type |= NLP_FABRIC;
- ndlp_fdmi->nlp_state = NLP_STE_PLOGI_ISSUE;
- lpfc_issue_els_plogi(phba, FDMI_DID, 0);
+ if (vport->port_state == LPFC_FABRIC_CFG_LINK) {
+ list_for_each_entry(next_vport, &phba->port_list, listentry) {
+ if (next_vport->port_type == LPFC_PHYSICAL_PORT)
+ continue;
+
+ if (phba->link_flag & LS_NPIV_FAB_SUPPORTED)
+ lpfc_initial_fdisc(next_vport);
+ else if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) {
+ lpfc_vport_set_state(vport,
+ FC_VPORT_NO_FABRIC_SUPP);
+ lpfc_printf_log(phba, KERN_ERR, LOG_ELS,
+ "%d (%d):0259 No NPIV Fabric "
+ "support\n",
+ phba->brd_no, vport->vpi);
}
}
+ lpfc_do_scr_ns_plogi(phba, vport);
}
lpfc_mbuf_free(phba, mp->virt, mp->phys);
@@ -1046,32 +1359,36 @@ lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
* handed off to the SLI layer.
*/
void
-lpfc_mbx_cmpl_ns_reg_login(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
+lpfc_mbx_cmpl_ns_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
{
- struct lpfc_sli *psli;
- MAILBOX_t *mb;
- struct lpfc_dmabuf *mp;
- struct lpfc_nodelist *ndlp;
-
- psli = &phba->sli;
- mb = &pmb->mb;
-
- ndlp = (struct lpfc_nodelist *) pmb->context2;
- mp = (struct lpfc_dmabuf *) (pmb->context1);
+ MAILBOX_t *mb = &pmb->mb;
+ struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1);
+ struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) pmb->context2;
+ struct lpfc_vport *vport = pmb->vport;
if (mb->mbxStatus) {
+out:
lpfc_nlp_put(ndlp);
lpfc_mbuf_free(phba, mp->virt, mp->phys);
kfree(mp);
mempool_free(pmb, phba->mbox_mem_pool);
- lpfc_drop_node(phba, ndlp);
+ lpfc_drop_node(vport, ndlp);
- /* RegLogin failed, so just use loop map to make discovery
- list */
- lpfc_disc_list_loopmap(phba);
+ if (phba->fc_topology == TOPOLOGY_LOOP) {
+ /*
+ * RegLogin failed, use loop map to make discovery
+ * list
+ */
+ lpfc_disc_list_loopmap(vport);
- /* Start discovery */
- lpfc_disc_start(phba);
+ /* Start discovery */
+ lpfc_disc_start(vport);
+ return;
+ }
+ lpfc_vport_set_state(vport, FC_VPORT_FAILED);
+ lpfc_printf_log(phba, KERN_ERR, LOG_ELS,
+ "%d (%d):0260 Register NameServer error: 0x%x\n",
+ phba->brd_no, vport->vpi, mb->mbxStatus);
return;
}
@@ -1079,37 +1396,43 @@ lpfc_mbx_cmpl_ns_reg_login(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
ndlp->nlp_rpi = mb->un.varWords[0];
ndlp->nlp_type |= NLP_FABRIC;
- lpfc_nlp_set_state(phba, ndlp, NLP_STE_UNMAPPED_NODE);
+ lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
- if (phba->hba_state < LPFC_HBA_READY) {
- /* Link up discovery requires Fabrib registration. */
- lpfc_ns_cmd(phba, ndlp, SLI_CTNS_RNN_ID);
- lpfc_ns_cmd(phba, ndlp, SLI_CTNS_RSNN_NN);
- lpfc_ns_cmd(phba, ndlp, SLI_CTNS_RFT_ID);
- lpfc_ns_cmd(phba, ndlp, SLI_CTNS_RFF_ID);
+ if (vport->port_state < LPFC_VPORT_READY) {
+ /* Link up discovery requires Fabric registration. */
+ lpfc_ns_cmd(vport, SLI_CTNS_RFF_ID, 0, 0); /* Do this first! */
+ lpfc_ns_cmd(vport, SLI_CTNS_RNN_ID, 0, 0);
+ lpfc_ns_cmd(vport, SLI_CTNS_RSNN_NN, 0, 0);
+ lpfc_ns_cmd(vport, SLI_CTNS_RSPN_ID, 0, 0);
+ lpfc_ns_cmd(vport, SLI_CTNS_RFT_ID, 0, 0);
+
+ /* Issue SCR just before NameServer GID_FT Query */
+ lpfc_issue_els_scr(vport, SCR_DID, 0);
}
- phba->fc_ns_retry = 0;
+ vport->fc_ns_retry = 0;
/* Good status, issue CT Request to NameServer */
- if (lpfc_ns_cmd(phba, ndlp, SLI_CTNS_GID_FT)) {
+ if (lpfc_ns_cmd(vport, SLI_CTNS_GID_FT, 0, 0)) {
/* Cannot issue NameServer Query, so finish up discovery */
- lpfc_disc_start(phba);
+ goto out;
}
lpfc_nlp_put(ndlp);
lpfc_mbuf_free(phba, mp->virt, mp->phys);
kfree(mp);
- mempool_free( pmb, phba->mbox_mem_pool);
+ mempool_free(pmb, phba->mbox_mem_pool);
return;
}
static void
-lpfc_register_remote_port(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
+lpfc_register_remote_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
{
- struct fc_rport *rport;
+ struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+ struct fc_rport *rport;
struct lpfc_rport_data *rdata;
struct fc_rport_identifiers rport_ids;
+ struct lpfc_hba *phba = vport->phba;
/* Remote port has reappeared. Re-register w/ FC transport */
rport_ids.node_name = wwn_to_u64(ndlp->nlp_nodename.u.wwn);
@@ -1125,10 +1448,15 @@ lpfc_register_remote_port(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
* registered the port.
*/
if (ndlp->rport && ndlp->rport->dd_data &&
- *(struct lpfc_rport_data **) ndlp->rport->dd_data) {
+ ((struct lpfc_rport_data *) ndlp->rport->dd_data)->pnode == ndlp) {
lpfc_nlp_put(ndlp);
}
- ndlp->rport = rport = fc_remote_port_add(phba->host, 0, &rport_ids);
+
+ lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_RPORT,
+ "rport add: did:x%x flg:x%x type x%x",
+ ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_type);
+
+ ndlp->rport = rport = fc_remote_port_add(shost, 0, &rport_ids);
if (!rport || !get_device(&rport->dev)) {
dev_printk(KERN_WARNING, &phba->pcidev->dev,
"Warning: fc_remote_port_add failed\n");
@@ -1151,25 +1479,20 @@ lpfc_register_remote_port(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
fc_remote_port_rolechg(rport, rport_ids.roles);
if ((rport->scsi_target_id != -1) &&
- (rport->scsi_target_id < LPFC_MAX_TARGET)) {
+ (rport->scsi_target_id < LPFC_MAX_TARGET)) {
ndlp->nlp_sid = rport->scsi_target_id;
}
-
return;
}
static void
-lpfc_unregister_remote_port(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
+lpfc_unregister_remote_port(struct lpfc_nodelist *ndlp)
{
struct fc_rport *rport = ndlp->rport;
- struct lpfc_rport_data *rdata = rport->dd_data;
- if (rport->scsi_target_id == -1) {
- ndlp->rport = NULL;
- rdata->pnode = NULL;
- lpfc_nlp_put(ndlp);
- put_device(&rport->dev);
- }
+ lpfc_debugfs_disc_trc(ndlp->vport, LPFC_DISC_TRC_RPORT,
+ "rport delete: did:x%x flg:x%x type x%x",
+ ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_type);
fc_remote_port_delete(rport);
@@ -1177,42 +1500,46 @@ lpfc_unregister_remote_port(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
}
static void
-lpfc_nlp_counters(struct lpfc_hba *phba, int state, int count)
+lpfc_nlp_counters(struct lpfc_vport *vport, int state, int count)
{
- spin_lock_irq(phba->host->host_lock);
+ struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+
+ spin_lock_irq(shost->host_lock);
switch (state) {
case NLP_STE_UNUSED_NODE:
- phba->fc_unused_cnt += count;
+ vport->fc_unused_cnt += count;
break;
case NLP_STE_PLOGI_ISSUE:
- phba->fc_plogi_cnt += count;
+ vport->fc_plogi_cnt += count;
break;
case NLP_STE_ADISC_ISSUE:
- phba->fc_adisc_cnt += count;
+ vport->fc_adisc_cnt += count;
break;
case NLP_STE_REG_LOGIN_ISSUE:
- phba->fc_reglogin_cnt += count;
+ vport->fc_reglogin_cnt += count;
break;
case NLP_STE_PRLI_ISSUE:
- phba->fc_prli_cnt += count;
+ vport->fc_prli_cnt += count;
break;
case NLP_STE_UNMAPPED_NODE:
- phba->fc_unmap_cnt += count;
+ vport->fc_unmap_cnt += count;
break;
case NLP_STE_MAPPED_NODE:
- phba->fc_map_cnt += count;
+ vport->fc_map_cnt += count;
break;
case NLP_STE_NPR_NODE:
- phba->fc_npr_cnt += count;
+ vport->fc_npr_cnt += count;
break;
}
- spin_unlock_irq(phba->host->host_lock);
+ spin_unlock_irq(shost->host_lock);
}
static void
-lpfc_nlp_state_cleanup(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
+lpfc_nlp_state_cleanup(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
int old_state, int new_state)
{
+ struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+
if (new_state == NLP_STE_UNMAPPED_NODE) {
ndlp->nlp_type &= ~(NLP_FCP_TARGET | NLP_FCP_INITIATOR);
ndlp->nlp_flag &= ~NLP_NODEV_REMOVE;
@@ -1226,35 +1553,34 @@ lpfc_nlp_state_cleanup(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
/* Transport interface */
if (ndlp->rport && (old_state == NLP_STE_MAPPED_NODE ||
old_state == NLP_STE_UNMAPPED_NODE)) {
- phba->nport_event_cnt++;
- lpfc_unregister_remote_port(phba, ndlp);
+ vport->phba->nport_event_cnt++;
+ lpfc_unregister_remote_port(ndlp);
}
if (new_state == NLP_STE_MAPPED_NODE ||
new_state == NLP_STE_UNMAPPED_NODE) {
- phba->nport_event_cnt++;
- /*
- * Tell the fc transport about the port, if we haven't
- * already. If we have, and it's a scsi entity, be
- * sure to unblock any attached scsi devices
- */
- lpfc_register_remote_port(phba, ndlp);
+ vport->phba->nport_event_cnt++;
+ /*
+ * Tell the fc transport about the port, if we haven't
+ * already. If we have, and it's a scsi entity, be
+ * sure to unblock any attached scsi devices
+ */
+ lpfc_register_remote_port(vport, ndlp);
}
-
- /*
- * if we added to Mapped list, but the remote port
- * registration failed or assigned a target id outside
- * our presentable range - move the node to the
- * Unmapped List
- */
+ /*
+ * if we added to Mapped list, but the remote port
+ * registration failed or assigned a target id outside
+ * our presentable range - move the node to the
+ * Unmapped List
+ */
if (new_state == NLP_STE_MAPPED_NODE &&
(!ndlp->rport ||
ndlp->rport->scsi_target_id == -1 ||
ndlp->rport->scsi_target_id >= LPFC_MAX_TARGET)) {
- spin_lock_irq(phba->host->host_lock);
+ spin_lock_irq(shost->host_lock);
ndlp->nlp_flag |= NLP_TGT_NO_SCSIID;
- spin_unlock_irq(phba->host->host_lock);
- lpfc_nlp_set_state(phba, ndlp, NLP_STE_UNMAPPED_NODE);
+ spin_unlock_irq(shost->host_lock);
+ lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
}
}
@@ -1280,61 +1606,74 @@ lpfc_nlp_state_name(char *buffer, size_t size, int state)
}
void
-lpfc_nlp_set_state(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp, int state)
+lpfc_nlp_set_state(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+ int state)
{
+ struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
int old_state = ndlp->nlp_state;
char name1[16], name2[16];
- lpfc_printf_log(phba, KERN_INFO, LOG_NODE,
- "%d:0904 NPort state transition x%06x, %s -> %s\n",
- phba->brd_no,
+ lpfc_printf_log(vport->phba, KERN_INFO, LOG_NODE,
+ "%d (%d):0904 NPort state transition x%06x, %s -> %s\n",
+ vport->phba->brd_no, vport->vpi,
ndlp->nlp_DID,
lpfc_nlp_state_name(name1, sizeof(name1), old_state),
lpfc_nlp_state_name(name2, sizeof(name2), state));
+
+ lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_NODE,
+ "node statechg did:x%x old:%d ste:%d",
+ ndlp->nlp_DID, old_state, state);
+
if (old_state == NLP_STE_NPR_NODE &&
(ndlp->nlp_flag & NLP_DELAY_TMO) != 0 &&
state != NLP_STE_NPR_NODE)
- lpfc_cancel_retry_delay_tmo(phba, ndlp);
+ lpfc_cancel_retry_delay_tmo(vport, ndlp);
if (old_state == NLP_STE_UNMAPPED_NODE) {
ndlp->nlp_flag &= ~NLP_TGT_NO_SCSIID;
ndlp->nlp_type &= ~NLP_FC_NODE;
}
if (list_empty(&ndlp->nlp_listp)) {
- spin_lock_irq(phba->host->host_lock);
- list_add_tail(&ndlp->nlp_listp, &phba->fc_nodes);
- spin_unlock_irq(phba->host->host_lock);
+ spin_lock_irq(shost->host_lock);
+ list_add_tail(&ndlp->nlp_listp, &vport->fc_nodes);
+ spin_unlock_irq(shost->host_lock);
} else if (old_state)
- lpfc_nlp_counters(phba, old_state, -1);
+ lpfc_nlp_counters(vport, old_state, -1);
ndlp->nlp_state = state;
- lpfc_nlp_counters(phba, state, 1);
- lpfc_nlp_state_cleanup(phba, ndlp, old_state, state);
+ lpfc_nlp_counters(vport, state, 1);
+ lpfc_nlp_state_cleanup(vport, ndlp, old_state, state);
}
void
-lpfc_dequeue_node(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
+lpfc_dequeue_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
{
+ struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+
if ((ndlp->nlp_flag & NLP_DELAY_TMO) != 0)
- lpfc_cancel_retry_delay_tmo(phba, ndlp);
+ lpfc_cancel_retry_delay_tmo(vport, ndlp);
if (ndlp->nlp_state && !list_empty(&ndlp->nlp_listp))
- lpfc_nlp_counters(phba, ndlp->nlp_state, -1);
- spin_lock_irq(phba->host->host_lock);
+ lpfc_nlp_counters(vport, ndlp->nlp_state, -1);
+ spin_lock_irq(shost->host_lock);
list_del_init(&ndlp->nlp_listp);
- spin_unlock_irq(phba->host->host_lock);
- lpfc_nlp_state_cleanup(phba, ndlp, ndlp->nlp_state, 0);
+ spin_unlock_irq(shost->host_lock);
+ lpfc_nlp_state_cleanup(vport, ndlp, ndlp->nlp_state,
+ NLP_STE_UNUSED_NODE);
}
void
-lpfc_drop_node(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
+lpfc_drop_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
{
+ struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+
if ((ndlp->nlp_flag & NLP_DELAY_TMO) != 0)
- lpfc_cancel_retry_delay_tmo(phba, ndlp);
+ lpfc_cancel_retry_delay_tmo(vport, ndlp);
if (ndlp->nlp_state && !list_empty(&ndlp->nlp_listp))
- lpfc_nlp_counters(phba, ndlp->nlp_state, -1);
- spin_lock_irq(phba->host->host_lock);
+ lpfc_nlp_counters(vport, ndlp->nlp_state, -1);
+ spin_lock_irq(shost->host_lock);
list_del_init(&ndlp->nlp_listp);
- spin_unlock_irq(phba->host->host_lock);
+ ndlp->nlp_flag &= ~NLP_TARGET_REMOVE;
+ spin_unlock_irq(shost->host_lock);
lpfc_nlp_put(ndlp);
}
@@ -1342,11 +1681,13 @@ lpfc_drop_node(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
* Start / ReStart rescue timer for Discovery / RSCN handling
*/
void
-lpfc_set_disctmo(struct lpfc_hba * phba)
+lpfc_set_disctmo(struct lpfc_vport *vport)
{
+ struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+ struct lpfc_hba *phba = vport->phba;
uint32_t tmo;
- if (phba->hba_state == LPFC_LOCAL_CFG_LINK) {
+ if (vport->port_state == LPFC_LOCAL_CFG_LINK) {
/* For FAN, timeout should be greater then edtov */
tmo = (((phba->fc_edtov + 999) / 1000) + 1);
} else {
@@ -1356,18 +1697,25 @@ lpfc_set_disctmo(struct lpfc_hba * phba)
tmo = ((phba->fc_ratov * 3) + 3);
}
- mod_timer(&phba->fc_disctmo, jiffies + HZ * tmo);
- spin_lock_irq(phba->host->host_lock);
- phba->fc_flag |= FC_DISC_TMO;
- spin_unlock_irq(phba->host->host_lock);
+
+ if (!timer_pending(&vport->fc_disctmo)) {
+ lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
+ "set disc timer: tmo:x%x state:x%x flg:x%x",
+ tmo, vport->port_state, vport->fc_flag);
+ }
+
+ mod_timer(&vport->fc_disctmo, jiffies + HZ * tmo);
+ spin_lock_irq(shost->host_lock);
+ vport->fc_flag |= FC_DISC_TMO;
+ spin_unlock_irq(shost->host_lock);
/* Start Discovery Timer state <hba_state> */
lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
- "%d:0247 Start Discovery Timer state x%x "
+ "%d (%d):0247 Start Discovery Timer state x%x "
"Data: x%x x%lx x%x x%x\n",
- phba->brd_no,
- phba->hba_state, tmo, (unsigned long)&phba->fc_disctmo,
- phba->fc_plogi_cnt, phba->fc_adisc_cnt);
+ phba->brd_no, vport->vpi, vport->port_state, tmo,
+ (unsigned long)&vport->fc_disctmo, vport->fc_plogi_cnt,
+ vport->fc_adisc_cnt);
return;
}
@@ -1376,23 +1724,34 @@ lpfc_set_disctmo(struct lpfc_hba * phba)
* Cancel rescue timer for Discovery / RSCN handling
*/
int
-lpfc_can_disctmo(struct lpfc_hba * phba)
+lpfc_can_disctmo(struct lpfc_vport *vport)
{
+ struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+ struct lpfc_hba *phba = vport->phba;
+ unsigned long iflags;
+
+ lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
+ "can disc timer: state:x%x rtry:x%x flg:x%x",
+ vport->port_state, vport->fc_ns_retry, vport->fc_flag);
+
/* Turn off discovery timer if its running */
- if (phba->fc_flag & FC_DISC_TMO) {
- spin_lock_irq(phba->host->host_lock);
- phba->fc_flag &= ~FC_DISC_TMO;
- spin_unlock_irq(phba->host->host_lock);
- del_timer_sync(&phba->fc_disctmo);
- phba->work_hba_events &= ~WORKER_DISC_TMO;
+ if (vport->fc_flag & FC_DISC_TMO) {
+ spin_lock_irqsave(shost->host_lock, iflags);
+ vport->fc_flag &= ~FC_DISC_TMO;
+ spin_unlock_irqrestore(shost->host_lock, iflags);
+ del_timer_sync(&vport->fc_disctmo);
+ spin_lock_irqsave(&vport->work_port_lock, iflags);
+ vport->work_port_events &= ~WORKER_DISC_TMO;
+ spin_unlock_irqrestore(&vport->work_port_lock, iflags);
}
/* Cancel Discovery Timer state <hba_state> */
lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
- "%d:0248 Cancel Discovery Timer state x%x "
+ "%d (%d):0248 Cancel Discovery Timer state x%x "
"Data: x%x x%x x%x\n",
- phba->brd_no, phba->hba_state, phba->fc_flag,
- phba->fc_plogi_cnt, phba->fc_adisc_cnt);
+ phba->brd_no, vport->vpi, vport->port_state,
+ vport->fc_flag, vport->fc_plogi_cnt,
+ vport->fc_adisc_cnt);
return 0;
}
@@ -1402,15 +1761,18 @@ lpfc_can_disctmo(struct lpfc_hba * phba)
* Return true if iocb matches the specified nport
*/
int
-lpfc_check_sli_ndlp(struct lpfc_hba * phba,
- struct lpfc_sli_ring * pring,
- struct lpfc_iocbq * iocb, struct lpfc_nodelist * ndlp)
+lpfc_check_sli_ndlp(struct lpfc_hba *phba,
+ struct lpfc_sli_ring *pring,
+ struct lpfc_iocbq *iocb,
+ struct lpfc_nodelist *ndlp)
{
- struct lpfc_sli *psli;
- IOCB_t *icmd;
+ struct lpfc_sli *psli = &phba->sli;
+ IOCB_t *icmd = &iocb->iocb;
+ struct lpfc_vport *vport = ndlp->vport;
+
+ if (iocb->vport != vport)
+ return 0;
- psli = &phba->sli;
- icmd = &iocb->iocb;
if (pring->ringno == LPFC_ELS_RING) {
switch (icmd->ulpCommand) {
case CMD_GEN_REQUEST64_CR:
@@ -1428,7 +1790,7 @@ lpfc_check_sli_ndlp(struct lpfc_hba * phba,
} else if (pring->ringno == psli->fcp_ring) {
/* Skip match check if waiting to relogin to FCP target */
if ((ndlp->nlp_type & NLP_FCP_TARGET) &&
- (ndlp->nlp_flag & NLP_DELAY_TMO)) {
+ (ndlp->nlp_flag & NLP_DELAY_TMO)) {
return 0;
}
if (icmd->ulpContext == (volatile ushort)ndlp->nlp_rpi) {
@@ -1445,7 +1807,7 @@ lpfc_check_sli_ndlp(struct lpfc_hba * phba,
* associated with nlp_rpi in the LPFC_NODELIST entry.
*/
static int
-lpfc_no_rpi(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp)
+lpfc_no_rpi(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
{
LIST_HEAD(completions);
struct lpfc_sli *psli;
@@ -1454,6 +1816,8 @@ lpfc_no_rpi(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp)
IOCB_t *icmd;
uint32_t rpi, i;
+ lpfc_fabric_abort_nport(ndlp);
+
/*
* Everything that matches on txcmplq will be returned
* by firmware with a no rpi error.
@@ -1465,15 +1829,15 @@ lpfc_no_rpi(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp)
for (i = 0; i < psli->num_rings; i++) {
pring = &psli->ring[i];
- spin_lock_irq(phba->host->host_lock);
+ spin_lock_irq(&phba->hbalock);
list_for_each_entry_safe(iocb, next_iocb, &pring->txq,
- list) {
+ list) {
/*
* Check to see if iocb matches the nport we are
* looking for
*/
- if ((lpfc_check_sli_ndlp
- (phba, pring, iocb, ndlp))) {
+ if ((lpfc_check_sli_ndlp(phba, pring, iocb,
+ ndlp))) {
/* It matches, so deque and call compl
with an error */
list_move_tail(&iocb->list,
@@ -1481,22 +1845,22 @@ lpfc_no_rpi(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp)
pring->txq_cnt--;
}
}
- spin_unlock_irq(phba->host->host_lock);
-
+ spin_unlock_irq(&phba->hbalock);
}
}
while (!list_empty(&completions)) {
iocb = list_get_first(&completions, struct lpfc_iocbq, list);
- list_del(&iocb->list);
+ list_del_init(&iocb->list);
- if (iocb->iocb_cmpl) {
+ if (!iocb->iocb_cmpl)
+ lpfc_sli_release_iocbq(phba, iocb);
+ else {
icmd = &iocb->iocb;
icmd->ulpStatus = IOSTAT_LOCAL_REJECT;
icmd->un.ulpWord[4] = IOERR_SLI_ABORTED;
- (iocb->iocb_cmpl) (phba, iocb, iocb);
- } else
- lpfc_sli_release_iocbq(phba, iocb);
+ (iocb->iocb_cmpl)(phba, iocb, iocb);
+ }
}
return 0;
@@ -1512,19 +1876,22 @@ lpfc_no_rpi(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp)
* we are waiting to PLOGI back to the remote NPort.
*/
int
-lpfc_unreg_rpi(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp)
+lpfc_unreg_rpi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
{
- LPFC_MBOXQ_t *mbox;
+ struct lpfc_hba *phba = vport->phba;
+ LPFC_MBOXQ_t *mbox;
int rc;
if (ndlp->nlp_rpi) {
- if ((mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL))) {
- lpfc_unreg_login(phba, ndlp->nlp_rpi, mbox);
- mbox->mbox_cmpl=lpfc_sli_def_mbox_cmpl;
- rc = lpfc_sli_issue_mbox
- (phba, mbox, (MBX_NOWAIT | MBX_STOP_IOCB));
+ mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+ if (mbox) {
+ lpfc_unreg_login(phba, vport->vpi, ndlp->nlp_rpi, mbox);
+ mbox->vport = vport;
+ mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
+ rc = lpfc_sli_issue_mbox(phba, mbox,
+ (MBX_NOWAIT | MBX_STOP_IOCB));
if (rc == MBX_NOT_FINISHED)
- mempool_free( mbox, phba->mbox_mem_pool);
+ mempool_free(mbox, phba->mbox_mem_pool);
}
lpfc_no_rpi(phba, ndlp);
ndlp->nlp_rpi = 0;
@@ -1533,25 +1900,70 @@ lpfc_unreg_rpi(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp)
return 0;
}
+void
+lpfc_unreg_all_rpis(struct lpfc_vport *vport)
+{
+ struct lpfc_hba *phba = vport->phba;
+ LPFC_MBOXQ_t *mbox;
+ int rc;
+
+ mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+ if (mbox) {
+ lpfc_unreg_login(phba, vport->vpi, 0xffff, mbox);
+ mbox->vport = vport;
+ mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
+ rc = lpfc_sli_issue_mbox(phba, mbox,
+ (MBX_NOWAIT | MBX_STOP_IOCB));
+ if (rc == MBX_NOT_FINISHED) {
+ mempool_free(mbox, phba->mbox_mem_pool);
+ }
+ }
+}
+
+void
+lpfc_unreg_default_rpis(struct lpfc_vport *vport)
+{
+ struct lpfc_hba *phba = vport->phba;
+ LPFC_MBOXQ_t *mbox;
+ int rc;
+
+ mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+ if (mbox) {
+ lpfc_unreg_did(phba, vport->vpi, 0xffffffff, mbox);
+ mbox->vport = vport;
+ mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
+ rc = lpfc_sli_issue_mbox(phba, mbox,
+ (MBX_NOWAIT | MBX_STOP_IOCB));
+ if (rc == MBX_NOT_FINISHED) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_VPORT,
+ "%d (%d):1815 Could not issue "
+ "unreg_did (default rpis)\n",
+ phba->brd_no, vport->vpi);
+ mempool_free(mbox, phba->mbox_mem_pool);
+ }
+ }
+}
+
/*
* Free resources associated with LPFC_NODELIST entry
* so it can be freed.
*/
static int
-lpfc_cleanup_node(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp)
+lpfc_cleanup_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
{
- LPFC_MBOXQ_t *mb;
- LPFC_MBOXQ_t *nextmb;
+ struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+ struct lpfc_hba *phba = vport->phba;
+ LPFC_MBOXQ_t *mb, *nextmb;
struct lpfc_dmabuf *mp;
/* Cleanup node for NPort <nlp_DID> */
lpfc_printf_log(phba, KERN_INFO, LOG_NODE,
- "%d:0900 Cleanup node for NPort x%x "
+ "%d (%d):0900 Cleanup node for NPort x%x "
"Data: x%x x%x x%x\n",
- phba->brd_no, ndlp->nlp_DID, ndlp->nlp_flag,
+ phba->brd_no, vport->vpi, ndlp->nlp_DID, ndlp->nlp_flag,
ndlp->nlp_state, ndlp->nlp_rpi);
- lpfc_dequeue_node(phba, ndlp);
+ lpfc_dequeue_node(vport, ndlp);
/* cleanup any ndlp on mbox q waiting for reglogin cmpl */
if ((mb = phba->sli.mbox_active)) {
@@ -1562,13 +1974,13 @@ lpfc_cleanup_node(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp)
}
}
- spin_lock_irq(phba->host->host_lock);
+ spin_lock_irq(&phba->hbalock);
list_for_each_entry_safe(mb, nextmb, &phba->sli.mboxq, list) {
if ((mb->mb.mbxCommand == MBX_REG_LOGIN64) &&
- (ndlp == (struct lpfc_nodelist *) mb->context2)) {
+ (ndlp == (struct lpfc_nodelist *) mb->context2)) {
mp = (struct lpfc_dmabuf *) (mb->context1);
if (mp) {
- lpfc_mbuf_free(phba, mp->virt, mp->phys);
+ __lpfc_mbuf_free(phba, mp->virt, mp->phys);
kfree(mp);
}
list_del(&mb->list);
@@ -1576,20 +1988,27 @@ lpfc_cleanup_node(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp)
lpfc_nlp_put(ndlp);
}
}
- spin_unlock_irq(phba->host->host_lock);
+ spin_unlock_irq(&phba->hbalock);
lpfc_els_abort(phba,ndlp);
- spin_lock_irq(phba->host->host_lock);
+ spin_lock_irq(shost->host_lock);
ndlp->nlp_flag &= ~NLP_DELAY_TMO;
- spin_unlock_irq(phba->host->host_lock);
+ spin_unlock_irq(shost->host_lock);
ndlp->nlp_last_elscmd = 0;
del_timer_sync(&ndlp->nlp_delayfunc);
if (!list_empty(&ndlp->els_retry_evt.evt_listp))
list_del_init(&ndlp->els_retry_evt.evt_listp);
+ if (!list_empty(&ndlp->dev_loss_evt.evt_listp))
+ list_del_init(&ndlp->dev_loss_evt.evt_listp);
+
+ if (!list_empty(&ndlp->dev_loss_evt.evt_listp)) {
+ list_del_init(&ndlp->dev_loss_evt.evt_listp);
+ complete((struct completion *)(ndlp->dev_loss_evt.evt_arg2));
+ }
- lpfc_unreg_rpi(phba, ndlp);
+ lpfc_unreg_rpi(vport, ndlp);
return 0;
}
@@ -1600,18 +2019,22 @@ lpfc_cleanup_node(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp)
* machine, defer the free till we reach the end of the state machine.
*/
static void
-lpfc_nlp_remove(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
+lpfc_nlp_remove(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
{
struct lpfc_rport_data *rdata;
if (ndlp->nlp_flag & NLP_DELAY_TMO) {
- lpfc_cancel_retry_delay_tmo(phba, ndlp);
+ lpfc_cancel_retry_delay_tmo(vport, ndlp);
}
- lpfc_cleanup_node(phba, ndlp);
+ lpfc_cleanup_node(vport, ndlp);
- if ((ndlp->rport) && !(phba->fc_flag & FC_UNLOADING)) {
- put_device(&ndlp->rport->dev);
+ /*
+ * We can get here with a non-NULL ndlp->rport because when we
+ * unregister a rport we don't break the rport/node linkage. So if we
+ * do, make sure we don't leaving any dangling pointers behind.
+ */
+ if (ndlp->rport) {
rdata = ndlp->rport->dd_data;
rdata->pnode = NULL;
ndlp->rport = NULL;
@@ -1619,11 +2042,10 @@ lpfc_nlp_remove(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
}
static int
-lpfc_matchdid(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp, uint32_t did)
+lpfc_matchdid(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+ uint32_t did)
{
- D_ID mydid;
- D_ID ndlpdid;
- D_ID matchdid;
+ D_ID mydid, ndlpdid, matchdid;
if (did == Bcast_DID)
return 0;
@@ -1637,7 +2059,7 @@ lpfc_matchdid(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp, uint32_t did)
return 1;
/* Next check for area/domain identically equals 0 match */
- mydid.un.word = phba->fc_myDID;
+ mydid.un.word = vport->fc_myDID;
if ((mydid.un.b.domain == 0) && (mydid.un.b.area == 0)) {
return 0;
}
@@ -1669,101 +2091,116 @@ lpfc_matchdid(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp, uint32_t did)
}
/* Search for a nodelist entry */
-struct lpfc_nodelist *
-lpfc_findnode_did(struct lpfc_hba *phba, uint32_t did)
+static struct lpfc_nodelist *
+__lpfc_findnode_did(struct lpfc_vport *vport, uint32_t did)
{
+ struct lpfc_hba *phba = vport->phba;
struct lpfc_nodelist *ndlp;
uint32_t data1;
- spin_lock_irq(phba->host->host_lock);
- list_for_each_entry(ndlp, &phba->fc_nodes, nlp_listp) {
- if (lpfc_matchdid(phba, ndlp, did)) {
+ list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
+ if (lpfc_matchdid(vport, ndlp, did)) {
data1 = (((uint32_t) ndlp->nlp_state << 24) |
((uint32_t) ndlp->nlp_xri << 16) |
((uint32_t) ndlp->nlp_type << 8) |
((uint32_t) ndlp->nlp_rpi & 0xff));
lpfc_printf_log(phba, KERN_INFO, LOG_NODE,
- "%d:0929 FIND node DID "
+ "%d (%d):0929 FIND node DID "
" Data: x%p x%x x%x x%x\n",
- phba->brd_no,
+ phba->brd_no, vport->vpi,
ndlp, ndlp->nlp_DID,
ndlp->nlp_flag, data1);
- spin_unlock_irq(phba->host->host_lock);
return ndlp;
}
}
- spin_unlock_irq(phba->host->host_lock);
/* FIND node did <did> NOT FOUND */
lpfc_printf_log(phba, KERN_INFO, LOG_NODE,
- "%d:0932 FIND node did x%x NOT FOUND.\n",
- phba->brd_no, did);
+ "%d (%d):0932 FIND node did x%x NOT FOUND.\n",
+ phba->brd_no, vport->vpi, did);
return NULL;
}
struct lpfc_nodelist *
-lpfc_setup_disc_node(struct lpfc_hba * phba, uint32_t did)
+lpfc_findnode_did(struct lpfc_vport *vport, uint32_t did)
{
+ struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
struct lpfc_nodelist *ndlp;
- ndlp = lpfc_findnode_did(phba, did);
+ spin_lock_irq(shost->host_lock);
+ ndlp = __lpfc_findnode_did(vport, did);
+ spin_unlock_irq(shost->host_lock);
+ return ndlp;
+}
+
+struct lpfc_nodelist *
+lpfc_setup_disc_node(struct lpfc_vport *vport, uint32_t did)
+{
+ struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+ struct lpfc_nodelist *ndlp;
+
+ ndlp = lpfc_findnode_did(vport, did);
if (!ndlp) {
- if ((phba->fc_flag & FC_RSCN_MODE) &&
- ((lpfc_rscn_payload_check(phba, did) == 0)))
+ if ((vport->fc_flag & FC_RSCN_MODE) != 0 &&
+ lpfc_rscn_payload_check(vport, did) == 0)
return NULL;
ndlp = (struct lpfc_nodelist *)
- mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL);
+ mempool_alloc(vport->phba->nlp_mem_pool, GFP_KERNEL);
if (!ndlp)
return NULL;
- lpfc_nlp_init(phba, ndlp, did);
- lpfc_nlp_set_state(phba, ndlp, NLP_STE_NPR_NODE);
+ lpfc_nlp_init(vport, ndlp, did);
+ lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
+ spin_lock_irq(shost->host_lock);
ndlp->nlp_flag |= NLP_NPR_2B_DISC;
+ spin_unlock_irq(shost->host_lock);
return ndlp;
}
- if (phba->fc_flag & FC_RSCN_MODE) {
- if (lpfc_rscn_payload_check(phba, did)) {
+ if (vport->fc_flag & FC_RSCN_MODE) {
+ if (lpfc_rscn_payload_check(vport, did)) {
+ spin_lock_irq(shost->host_lock);
ndlp->nlp_flag |= NLP_NPR_2B_DISC;
+ spin_unlock_irq(shost->host_lock);
/* Since this node is marked for discovery,
* delay timeout is not needed.
*/
if (ndlp->nlp_flag & NLP_DELAY_TMO)
- lpfc_cancel_retry_delay_tmo(phba, ndlp);
+ lpfc_cancel_retry_delay_tmo(vport, ndlp);
} else
ndlp = NULL;
} else {
if (ndlp->nlp_state == NLP_STE_ADISC_ISSUE ||
ndlp->nlp_state == NLP_STE_PLOGI_ISSUE)
return NULL;
- lpfc_nlp_set_state(phba, ndlp, NLP_STE_NPR_NODE);
+ lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
+ spin_lock_irq(shost->host_lock);
ndlp->nlp_flag |= NLP_NPR_2B_DISC;
+ spin_unlock_irq(shost->host_lock);
}
return ndlp;
}
/* Build a list of nodes to discover based on the loopmap */
void
-lpfc_disc_list_loopmap(struct lpfc_hba * phba)
+lpfc_disc_list_loopmap(struct lpfc_vport *vport)
{
+ struct lpfc_hba *phba = vport->phba;
int j;
uint32_t alpa, index;
- if (phba->hba_state <= LPFC_LINK_DOWN) {
+ if (!lpfc_is_link_up(phba))
return;
- }
- if (phba->fc_topology != TOPOLOGY_LOOP) {
+
+ if (phba->fc_topology != TOPOLOGY_LOOP)
return;
- }
/* Check for loop map present or not */
if (phba->alpa_map[0]) {
for (j = 1; j <= phba->alpa_map[0]; j++) {
alpa = phba->alpa_map[j];
-
- if (((phba->fc_myDID & 0xff) == alpa) || (alpa == 0)) {
+ if (((vport->fc_myDID & 0xff) == alpa) || (alpa == 0))
continue;
- }
- lpfc_setup_disc_node(phba, alpa);
+ lpfc_setup_disc_node(vport, alpa);
}
} else {
/* No alpamap, so try all alpa's */
@@ -1776,113 +2213,167 @@ lpfc_disc_list_loopmap(struct lpfc_hba * phba)
else
index = FC_MAXLOOP - j - 1;
alpa = lpfcAlpaArray[index];
- if ((phba->fc_myDID & 0xff) == alpa) {
+ if ((vport->fc_myDID & 0xff) == alpa)
continue;
- }
-
- lpfc_setup_disc_node(phba, alpa);
+ lpfc_setup_disc_node(vport, alpa);
}
}
return;
}
-/* Start Link up / RSCN discovery on NPR list */
void
-lpfc_disc_start(struct lpfc_hba * phba)
+lpfc_issue_clear_la(struct lpfc_hba *phba, struct lpfc_vport *vport)
{
- struct lpfc_sli *psli;
LPFC_MBOXQ_t *mbox;
- struct lpfc_nodelist *ndlp, *next_ndlp;
+ struct lpfc_sli *psli = &phba->sli;
+ struct lpfc_sli_ring *extra_ring = &psli->ring[psli->extra_ring];
+ struct lpfc_sli_ring *fcp_ring = &psli->ring[psli->fcp_ring];
+ struct lpfc_sli_ring *next_ring = &psli->ring[psli->next_ring];
+ int rc;
+
+ /*
+ * if it's not a physical port or if we already send
+ * clear_la then don't send it.
+ */
+ if ((phba->link_state >= LPFC_CLEAR_LA) ||
+ (vport->port_type != LPFC_PHYSICAL_PORT))
+ return;
+
+ /* Link up discovery */
+ if ((mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL)) != NULL) {
+ phba->link_state = LPFC_CLEAR_LA;
+ lpfc_clear_la(phba, mbox);
+ mbox->mbox_cmpl = lpfc_mbx_cmpl_clear_la;
+ mbox->vport = vport;
+ rc = lpfc_sli_issue_mbox(phba, mbox, (MBX_NOWAIT |
+ MBX_STOP_IOCB));
+ if (rc == MBX_NOT_FINISHED) {
+ mempool_free(mbox, phba->mbox_mem_pool);
+ lpfc_disc_flush_list(vport);
+ extra_ring->flag &= ~LPFC_STOP_IOCB_EVENT;
+ fcp_ring->flag &= ~LPFC_STOP_IOCB_EVENT;
+ next_ring->flag &= ~LPFC_STOP_IOCB_EVENT;
+ phba->link_state = LPFC_HBA_ERROR;
+ }
+ }
+}
+
+/* Reg_vpi to tell firmware to resume normal operations */
+void
+lpfc_issue_reg_vpi(struct lpfc_hba *phba, struct lpfc_vport *vport)
+{
+ LPFC_MBOXQ_t *regvpimbox;
+
+ regvpimbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+ if (regvpimbox) {
+ lpfc_reg_vpi(phba, vport->vpi, vport->fc_myDID, regvpimbox);
+ regvpimbox->mbox_cmpl = lpfc_mbx_cmpl_reg_vpi;
+ regvpimbox->vport = vport;
+ if (lpfc_sli_issue_mbox(phba, regvpimbox,
+ (MBX_NOWAIT | MBX_STOP_IOCB))
+ == MBX_NOT_FINISHED) {
+ mempool_free(regvpimbox, phba->mbox_mem_pool);
+ }
+ }
+}
+
+/* Start Link up / RSCN discovery on NPR nodes */
+void
+lpfc_disc_start(struct lpfc_vport *vport)
+{
+ struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+ struct lpfc_hba *phba = vport->phba;
uint32_t num_sent;
uint32_t clear_la_pending;
int did_changed;
- int rc;
- psli = &phba->sli;
-
- if (phba->hba_state <= LPFC_LINK_DOWN) {
+ if (!lpfc_is_link_up(phba))
return;
- }
- if (phba->hba_state == LPFC_CLEAR_LA)
+
+ if (phba->link_state == LPFC_CLEAR_LA)
clear_la_pending = 1;
else
clear_la_pending = 0;
- if (phba->hba_state < LPFC_HBA_READY) {
- phba->hba_state = LPFC_DISC_AUTH;
- }
- lpfc_set_disctmo(phba);
+ if (vport->port_state < LPFC_VPORT_READY)
+ vport->port_state = LPFC_DISC_AUTH;
- if (phba->fc_prevDID == phba->fc_myDID) {
+ lpfc_set_disctmo(vport);
+
+ if (vport->fc_prevDID == vport->fc_myDID)
did_changed = 0;
- } else {
+ else
did_changed = 1;
- }
- phba->fc_prevDID = phba->fc_myDID;
- phba->num_disc_nodes = 0;
+
+ vport->fc_prevDID = vport->fc_myDID;
+ vport->num_disc_nodes = 0;
/* Start Discovery state <hba_state> */
lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
- "%d:0202 Start Discovery hba state x%x "
+ "%d (%d):0202 Start Discovery hba state x%x "
"Data: x%x x%x x%x\n",
- phba->brd_no, phba->hba_state, phba->fc_flag,
- phba->fc_plogi_cnt, phba->fc_adisc_cnt);
-
- /* If our did changed, we MUST do PLOGI */
- list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_nodes, nlp_listp) {
- if (ndlp->nlp_state == NLP_STE_NPR_NODE &&
- (ndlp->nlp_flag & NLP_NPR_2B_DISC) != 0 &&
- did_changed) {
- spin_lock_irq(phba->host->host_lock);
- ndlp->nlp_flag &= ~NLP_NPR_ADISC;
- spin_unlock_irq(phba->host->host_lock);
- }
- }
+ phba->brd_no, vport->vpi, vport->port_state,
+ vport->fc_flag, vport->fc_plogi_cnt,
+ vport->fc_adisc_cnt);
/* First do ADISCs - if any */
- num_sent = lpfc_els_disc_adisc(phba);
+ num_sent = lpfc_els_disc_adisc(vport);
if (num_sent)
return;
- if ((phba->hba_state < LPFC_HBA_READY) && (!clear_la_pending)) {
+ /*
+ * For SLI3, cmpl_reg_vpi will set port_state to READY, and
+ * continue discovery.
+ */
+ if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) &&
+ !(vport->fc_flag & FC_RSCN_MODE)) {
+ lpfc_issue_reg_vpi(phba, vport);
+ return;
+ }
+
+ /*
+ * For SLI2, we need to set port_state to READY and continue
+ * discovery.
+ */
+ if (vport->port_state < LPFC_VPORT_READY && !clear_la_pending) {
/* If we get here, there is nothing to ADISC */
- if ((mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL))) {
- phba->hba_state = LPFC_CLEAR_LA;
- lpfc_clear_la(phba, mbox);
- mbox->mbox_cmpl = lpfc_mbx_cmpl_clear_la;
- rc = lpfc_sli_issue_mbox(phba, mbox,
- (MBX_NOWAIT | MBX_STOP_IOCB));
- if (rc == MBX_NOT_FINISHED) {
- mempool_free( mbox, phba->mbox_mem_pool);
- lpfc_disc_flush_list(phba);
- psli->ring[(psli->extra_ring)].flag &=
- ~LPFC_STOP_IOCB_EVENT;
- psli->ring[(psli->fcp_ring)].flag &=
- ~LPFC_STOP_IOCB_EVENT;
- psli->ring[(psli->next_ring)].flag &=
- ~LPFC_STOP_IOCB_EVENT;
- phba->hba_state = LPFC_HBA_READY;
+ if (vport->port_type == LPFC_PHYSICAL_PORT)
+ lpfc_issue_clear_la(phba, vport);
+
+ if (!(vport->fc_flag & FC_ABORT_DISCOVERY)) {
+ vport->num_disc_nodes = 0;
+ /* go thru NPR nodes and issue ELS PLOGIs */
+ if (vport->fc_npr_cnt)
+ lpfc_els_disc_plogi(vport);
+
+ if (!vport->num_disc_nodes) {
+ spin_lock_irq(shost->host_lock);
+ vport->fc_flag &= ~FC_NDISC_ACTIVE;
+ spin_unlock_irq(shost->host_lock);
+ lpfc_can_disctmo(vport);
}
}
+ vport->port_state = LPFC_VPORT_READY;
} else {
/* Next do PLOGIs - if any */
- num_sent = lpfc_els_disc_plogi(phba);
+ num_sent = lpfc_els_disc_plogi(vport);
if (num_sent)
return;
- if (phba->fc_flag & FC_RSCN_MODE) {
+ if (vport->fc_flag & FC_RSCN_MODE) {
/* Check to see if more RSCNs came in while we
* were processing this one.
*/
- if ((phba->fc_rscn_id_cnt == 0) &&
- (!(phba->fc_flag & FC_RSCN_DISCOVERY))) {
- spin_lock_irq(phba->host->host_lock);
- phba->fc_flag &= ~FC_RSCN_MODE;
- spin_unlock_irq(phba->host->host_lock);
+ if ((vport->fc_rscn_id_cnt == 0) &&
+ (!(vport->fc_flag & FC_RSCN_DISCOVERY))) {
+ spin_lock_irq(shost->host_lock);
+ vport->fc_flag &= ~FC_RSCN_MODE;
+ spin_unlock_irq(shost->host_lock);
+ lpfc_can_disctmo(vport);
} else
- lpfc_els_handle_rscn(phba);
+ lpfc_els_handle_rscn(vport);
}
}
return;
@@ -1893,7 +2384,7 @@ lpfc_disc_start(struct lpfc_hba * phba)
* ring the match the sppecified nodelist.
*/
static void
-lpfc_free_tx(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp)
+lpfc_free_tx(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
{
LIST_HEAD(completions);
struct lpfc_sli *psli;
@@ -1907,7 +2398,7 @@ lpfc_free_tx(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp)
/* Error matching iocb on txq or txcmplq
* First check the txq.
*/
- spin_lock_irq(phba->host->host_lock);
+ spin_lock_irq(&phba->hbalock);
list_for_each_entry_safe(iocb, next_iocb, &pring->txq, list) {
if (iocb->context1 != ndlp) {
continue;
@@ -1927,36 +2418,36 @@ lpfc_free_tx(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp)
continue;
}
icmd = &iocb->iocb;
- if ((icmd->ulpCommand == CMD_ELS_REQUEST64_CR) ||
- (icmd->ulpCommand == CMD_XMIT_ELS_RSP64_CX)) {
+ if (icmd->ulpCommand == CMD_ELS_REQUEST64_CR ||
+ icmd->ulpCommand == CMD_XMIT_ELS_RSP64_CX) {
lpfc_sli_issue_abort_iotag(phba, pring, iocb);
}
}
- spin_unlock_irq(phba->host->host_lock);
+ spin_unlock_irq(&phba->hbalock);
while (!list_empty(&completions)) {
iocb = list_get_first(&completions, struct lpfc_iocbq, list);
- list_del(&iocb->list);
+ list_del_init(&iocb->list);
- if (iocb->iocb_cmpl) {
+ if (!iocb->iocb_cmpl)
+ lpfc_sli_release_iocbq(phba, iocb);
+ else {
icmd = &iocb->iocb;
icmd->ulpStatus = IOSTAT_LOCAL_REJECT;
icmd->un.ulpWord[4] = IOERR_SLI_ABORTED;
(iocb->iocb_cmpl) (phba, iocb, iocb);
- } else
- lpfc_sli_release_iocbq(phba, iocb);
+ }
}
-
- return;
}
void
-lpfc_disc_flush_list(struct lpfc_hba * phba)
+lpfc_disc_flush_list(struct lpfc_vport *vport)
{
struct lpfc_nodelist *ndlp, *next_ndlp;
+ struct lpfc_hba *phba = vport->phba;
- if (phba->fc_plogi_cnt || phba->fc_adisc_cnt) {
- list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_nodes,
+ if (vport->fc_plogi_cnt || vport->fc_adisc_cnt) {
+ list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes,
nlp_listp) {
if (ndlp->nlp_state == NLP_STE_PLOGI_ISSUE ||
ndlp->nlp_state == NLP_STE_ADISC_ISSUE) {
@@ -1967,6 +2458,14 @@ lpfc_disc_flush_list(struct lpfc_hba * phba)
}
}
+void
+lpfc_cleanup_discovery_resources(struct lpfc_vport *vport)
+{
+ lpfc_els_flush_rscn(vport);
+ lpfc_els_flush_cmd(vport);
+ lpfc_disc_flush_list(vport);
+}
+
/*****************************************************************************/
/*
* NAME: lpfc_disc_timeout
@@ -1985,158 +2484,154 @@ lpfc_disc_flush_list(struct lpfc_hba * phba)
void
lpfc_disc_timeout(unsigned long ptr)
{
- struct lpfc_hba *phba = (struct lpfc_hba *)ptr;
+ struct lpfc_vport *vport = (struct lpfc_vport *) ptr;
+ struct lpfc_hba *phba = vport->phba;
unsigned long flags = 0;
if (unlikely(!phba))
return;
- spin_lock_irqsave(phba->host->host_lock, flags);
- if (!(phba->work_hba_events & WORKER_DISC_TMO)) {
- phba->work_hba_events |= WORKER_DISC_TMO;
+ if ((vport->work_port_events & WORKER_DISC_TMO) == 0) {
+ spin_lock_irqsave(&vport->work_port_lock, flags);
+ vport->work_port_events |= WORKER_DISC_TMO;
+ spin_unlock_irqrestore(&vport->work_port_lock, flags);
+
+ spin_lock_irqsave(&phba->hbalock, flags);
if (phba->work_wait)
- wake_up(phba->work_wait);
+ lpfc_worker_wake_up(phba);
+ spin_unlock_irqrestore(&phba->hbalock, flags);
}
- spin_unlock_irqrestore(phba->host->host_lock, flags);
return;
}
static void
-lpfc_disc_timeout_handler(struct lpfc_hba *phba)
+lpfc_disc_timeout_handler(struct lpfc_vport *vport)
{
- struct lpfc_sli *psli;
+ struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+ struct lpfc_hba *phba = vport->phba;
+ struct lpfc_sli *psli = &phba->sli;
struct lpfc_nodelist *ndlp, *next_ndlp;
- LPFC_MBOXQ_t *clearlambox, *initlinkmbox;
+ LPFC_MBOXQ_t *initlinkmbox;
int rc, clrlaerr = 0;
- if (unlikely(!phba))
+ if (!(vport->fc_flag & FC_DISC_TMO))
return;
- if (!(phba->fc_flag & FC_DISC_TMO))
- return;
+ spin_lock_irq(shost->host_lock);
+ vport->fc_flag &= ~FC_DISC_TMO;
+ spin_unlock_irq(shost->host_lock);
- psli = &phba->sli;
+ lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
+ "disc timeout: state:x%x rtry:x%x flg:x%x",
+ vport->port_state, vport->fc_ns_retry, vport->fc_flag);
- spin_lock_irq(phba->host->host_lock);
- phba->fc_flag &= ~FC_DISC_TMO;
- spin_unlock_irq(phba->host->host_lock);
-
- switch (phba->hba_state) {
+ switch (vport->port_state) {
case LPFC_LOCAL_CFG_LINK:
- /* hba_state is identically LPFC_LOCAL_CFG_LINK while waiting for FAN */
- /* FAN timeout */
- lpfc_printf_log(phba,
- KERN_WARNING,
- LOG_DISCOVERY,
- "%d:0221 FAN timeout\n",
- phba->brd_no);
+ /* port_state is identically LPFC_LOCAL_CFG_LINK while waiting for
+ * FAN
+ */
+ /* FAN timeout */
+ lpfc_printf_log(phba, KERN_WARNING, LOG_DISCOVERY,
+ "%d (%d):0221 FAN timeout\n",
+ phba->brd_no, vport->vpi);
/* Start discovery by sending FLOGI, clean up old rpis */
- list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_nodes,
+ list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes,
nlp_listp) {
if (ndlp->nlp_state != NLP_STE_NPR_NODE)
continue;
if (ndlp->nlp_type & NLP_FABRIC) {
/* Clean up the ndlp on Fabric connections */
- lpfc_drop_node(phba, ndlp);
+ lpfc_drop_node(vport, ndlp);
} else if (!(ndlp->nlp_flag & NLP_NPR_ADISC)) {
/* Fail outstanding IO now since device
* is marked for PLOGI.
*/
- lpfc_unreg_rpi(phba, ndlp);
+ lpfc_unreg_rpi(vport, ndlp);
}
}
- phba->hba_state = LPFC_FLOGI;
- lpfc_set_disctmo(phba);
- lpfc_initial_flogi(phba);
+ if (vport->port_state != LPFC_FLOGI) {
+ vport->port_state = LPFC_FLOGI;
+ lpfc_set_disctmo(vport);
+ lpfc_initial_flogi(vport);
+ }
break;
+ case LPFC_FDISC:
case LPFC_FLOGI:
- /* hba_state is identically LPFC_FLOGI while waiting for FLOGI cmpl */
+ /* port_state is identically LPFC_FLOGI while waiting for FLOGI cmpl */
/* Initial FLOGI timeout */
- lpfc_printf_log(phba,
- KERN_ERR,
- LOG_DISCOVERY,
- "%d:0222 Initial FLOGI timeout\n",
- phba->brd_no);
+ lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
+ "%d (%d):0222 Initial %s timeout\n",
+ phba->brd_no, vport->vpi,
+ vport->vpi ? "FLOGI" : "FDISC");
/* Assume no Fabric and go on with discovery.
* Check for outstanding ELS FLOGI to abort.
*/
/* FLOGI failed, so just use loop map to make discovery list */
- lpfc_disc_list_loopmap(phba);
+ lpfc_disc_list_loopmap(vport);
/* Start discovery */
- lpfc_disc_start(phba);
+ lpfc_disc_start(vport);
break;
case LPFC_FABRIC_CFG_LINK:
/* hba_state is identically LPFC_FABRIC_CFG_LINK while waiting for
NameServer login */
lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
- "%d:0223 Timeout while waiting for NameServer "
- "login\n", phba->brd_no);
+ "%d (%d):0223 Timeout while waiting for "
+ "NameServer login\n",
+ phba->brd_no, vport->vpi);
/* Next look for NameServer ndlp */
- ndlp = lpfc_findnode_did(phba, NameServer_DID);
+ ndlp = lpfc_findnode_did(vport, NameServer_DID);
if (ndlp)
lpfc_nlp_put(ndlp);
/* Start discovery */
- lpfc_disc_start(phba);
+ lpfc_disc_start(vport);
break;
case LPFC_NS_QRY:
/* Check for wait for NameServer Rsp timeout */
lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
- "%d:0224 NameServer Query timeout "
+ "%d (%d):0224 NameServer Query timeout "
"Data: x%x x%x\n",
- phba->brd_no,
- phba->fc_ns_retry, LPFC_MAX_NS_RETRY);
-
- ndlp = lpfc_findnode_did(phba, NameServer_DID);
- if (ndlp && ndlp->nlp_state == NLP_STE_UNMAPPED_NODE) {
- if (phba->fc_ns_retry < LPFC_MAX_NS_RETRY) {
- /* Try it one more time */
- rc = lpfc_ns_cmd(phba, ndlp, SLI_CTNS_GID_FT);
- if (rc == 0)
- break;
- }
- phba->fc_ns_retry = 0;
+ phba->brd_no, vport->vpi,
+ vport->fc_ns_retry, LPFC_MAX_NS_RETRY);
+
+ if (vport->fc_ns_retry < LPFC_MAX_NS_RETRY) {
+ /* Try it one more time */
+ vport->fc_ns_retry++;
+ rc = lpfc_ns_cmd(vport, SLI_CTNS_GID_FT,
+ vport->fc_ns_retry, 0);
+ if (rc == 0)
+ break;
}
+ vport->fc_ns_retry = 0;
- /* Nothing to authenticate, so CLEAR_LA right now */
- clearlambox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
- if (!clearlambox) {
- clrlaerr = 1;
- lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
- "%d:0226 Device Discovery "
- "completion error\n",
- phba->brd_no);
- phba->hba_state = LPFC_HBA_ERROR;
- break;
- }
-
- phba->hba_state = LPFC_CLEAR_LA;
- lpfc_clear_la(phba, clearlambox);
- clearlambox->mbox_cmpl = lpfc_mbx_cmpl_clear_la;
- rc = lpfc_sli_issue_mbox(phba, clearlambox,
- (MBX_NOWAIT | MBX_STOP_IOCB));
- if (rc == MBX_NOT_FINISHED) {
- mempool_free(clearlambox, phba->mbox_mem_pool);
- clrlaerr = 1;
- break;
+ /*
+ * Discovery is over.
+ * set port_state to PORT_READY if SLI2.
+ * cmpl_reg_vpi will set port_state to READY for SLI3.
+ */
+ if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED)
+ lpfc_issue_reg_vpi(phba, vport);
+ else { /* NPIV Not enabled */
+ lpfc_issue_clear_la(phba, vport);
+ vport->port_state = LPFC_VPORT_READY;
}
/* Setup and issue mailbox INITIALIZE LINK command */
initlinkmbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
if (!initlinkmbox) {
lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
- "%d:0206 Device Discovery "
+ "%d (%d):0206 Device Discovery "
"completion error\n",
- phba->brd_no);
- phba->hba_state = LPFC_HBA_ERROR;
+ phba->brd_no, vport->vpi);
+ phba->link_state = LPFC_HBA_ERROR;
break;
}
@@ -2144,6 +2639,8 @@ lpfc_disc_timeout_handler(struct lpfc_hba *phba)
lpfc_init_link(phba, initlinkmbox, phba->cfg_topology,
phba->cfg_link_speed);
initlinkmbox->mb.un.varInitLnk.lipsr_AL_PA = 0;
+ initlinkmbox->vport = vport;
+ initlinkmbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
rc = lpfc_sli_issue_mbox(phba, initlinkmbox,
(MBX_NOWAIT | MBX_STOP_IOCB));
lpfc_set_loopback_flag(phba);
@@ -2154,67 +2651,81 @@ lpfc_disc_timeout_handler(struct lpfc_hba *phba)
case LPFC_DISC_AUTH:
/* Node Authentication timeout */
- lpfc_printf_log(phba,
- KERN_ERR,
- LOG_DISCOVERY,
- "%d:0227 Node Authentication timeout\n",
- phba->brd_no);
- lpfc_disc_flush_list(phba);
- clearlambox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
- if (!clearlambox) {
- clrlaerr = 1;
- lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
- "%d:0207 Device Discovery "
- "completion error\n",
- phba->brd_no);
- phba->hba_state = LPFC_HBA_ERROR;
- break;
+ lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
+ "%d (%d):0227 Node Authentication timeout\n",
+ phba->brd_no, vport->vpi);
+ lpfc_disc_flush_list(vport);
+
+ /*
+ * set port_state to PORT_READY if SLI2.
+ * cmpl_reg_vpi will set port_state to READY for SLI3.
+ */
+ if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED)
+ lpfc_issue_reg_vpi(phba, vport);
+ else { /* NPIV Not enabled */
+ lpfc_issue_clear_la(phba, vport);
+ vport->port_state = LPFC_VPORT_READY;
}
- phba->hba_state = LPFC_CLEAR_LA;
- lpfc_clear_la(phba, clearlambox);
- clearlambox->mbox_cmpl = lpfc_mbx_cmpl_clear_la;
- rc = lpfc_sli_issue_mbox(phba, clearlambox,
- (MBX_NOWAIT | MBX_STOP_IOCB));
- if (rc == MBX_NOT_FINISHED) {
- mempool_free(clearlambox, phba->mbox_mem_pool);
- clrlaerr = 1;
+ break;
+
+ case LPFC_VPORT_READY:
+ if (vport->fc_flag & FC_RSCN_MODE) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
+ "%d (%d):0231 RSCN timeout Data: x%x "
+ "x%x\n",
+ phba->brd_no, vport->vpi,
+ vport->fc_ns_retry, LPFC_MAX_NS_RETRY);
+
+ /* Cleanup any outstanding ELS commands */
+ lpfc_els_flush_cmd(vport);
+
+ lpfc_els_flush_rscn(vport);
+ lpfc_disc_flush_list(vport);
}
break;
+ default:
+ lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
+ "%d (%d):0229 Unexpected discovery timeout, "
+ "vport State x%x\n",
+ phba->brd_no, vport->vpi, vport->port_state);
+
+ break;
+ }
+
+ switch (phba->link_state) {
case LPFC_CLEAR_LA:
- /* CLEAR LA timeout */
- lpfc_printf_log(phba,
- KERN_ERR,
- LOG_DISCOVERY,
- "%d:0228 CLEAR LA timeout\n",
- phba->brd_no);
+ /* CLEAR LA timeout */
+ lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
+ "%d (%d):0228 CLEAR LA timeout\n",
+ phba->brd_no, vport->vpi);
clrlaerr = 1;
break;
- case LPFC_HBA_READY:
- if (phba->fc_flag & FC_RSCN_MODE) {
- lpfc_printf_log(phba,
- KERN_ERR,
- LOG_DISCOVERY,
- "%d:0231 RSCN timeout Data: x%x x%x\n",
- phba->brd_no,
- phba->fc_ns_retry, LPFC_MAX_NS_RETRY);
-
- /* Cleanup any outstanding ELS commands */
- lpfc_els_flush_cmd(phba);
+ case LPFC_LINK_UNKNOWN:
+ case LPFC_WARM_START:
+ case LPFC_INIT_START:
+ case LPFC_INIT_MBX_CMDS:
+ case LPFC_LINK_DOWN:
+ case LPFC_LINK_UP:
+ case LPFC_HBA_ERROR:
+ lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
+ "%d (%d):0230 Unexpected timeout, hba link "
+ "state x%x\n",
+ phba->brd_no, vport->vpi, phba->link_state);
+ clrlaerr = 1;
+ break;
- lpfc_els_flush_rscn(phba);
- lpfc_disc_flush_list(phba);
- }
+ case LPFC_HBA_READY:
break;
}
if (clrlaerr) {
- lpfc_disc_flush_list(phba);
+ lpfc_disc_flush_list(vport);
psli->ring[(psli->extra_ring)].flag &= ~LPFC_STOP_IOCB_EVENT;
psli->ring[(psli->fcp_ring)].flag &= ~LPFC_STOP_IOCB_EVENT;
psli->ring[(psli->next_ring)].flag &= ~LPFC_STOP_IOCB_EVENT;
- phba->hba_state = LPFC_HBA_READY;
+ vport->port_state = LPFC_VPORT_READY;
}
return;
@@ -2227,37 +2738,29 @@ lpfc_disc_timeout_handler(struct lpfc_hba *phba)
* handed off to the SLI layer.
*/
void
-lpfc_mbx_cmpl_fdmi_reg_login(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
+lpfc_mbx_cmpl_fdmi_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
{
- struct lpfc_sli *psli;
- MAILBOX_t *mb;
- struct lpfc_dmabuf *mp;
- struct lpfc_nodelist *ndlp;
-
- psli = &phba->sli;
- mb = &pmb->mb;
-
- ndlp = (struct lpfc_nodelist *) pmb->context2;
- mp = (struct lpfc_dmabuf *) (pmb->context1);
+ MAILBOX_t *mb = &pmb->mb;
+ struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1);
+ struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) pmb->context2;
+ struct lpfc_vport *vport = pmb->vport;
pmb->context1 = NULL;
ndlp->nlp_rpi = mb->un.varWords[0];
ndlp->nlp_type |= NLP_FABRIC;
- lpfc_nlp_set_state(phba, ndlp, NLP_STE_UNMAPPED_NODE);
+ lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
- /* Start issuing Fabric-Device Management Interface (FDMI)
- * command to 0xfffffa (FDMI well known port)
+ /*
+ * Start issuing Fabric-Device Management Interface (FDMI) command to
+ * 0xfffffa (FDMI well known port) or Delay issuing FDMI command if
+ * fdmi-on=2 (supporting RPA/hostnmae)
*/
- if (phba->cfg_fdmi_on == 1) {
- lpfc_fdmi_cmd(phba, ndlp, SLI_MGMT_DHBA);
- } else {
- /*
- * Delay issuing FDMI command if fdmi-on=2
- * (supporting RPA/hostnmae)
- */
- mod_timer(&phba->fc_fdmitmo, jiffies + HZ * 60);
- }
+
+ if (phba->cfg_fdmi_on == 1)
+ lpfc_fdmi_cmd(vport, ndlp, SLI_MGMT_DHBA);
+ else
+ mod_timer(&vport->fc_fdmitmo, jiffies + HZ * 60);
/* Mailbox took a reference to the node */
lpfc_nlp_put(ndlp);
@@ -2283,16 +2786,12 @@ lpfc_filter_by_wwpn(struct lpfc_nodelist *ndlp, void *param)
sizeof(ndlp->nlp_portname)) == 0;
}
-/*
- * Search node lists for a remote port matching filter criteria
- * Caller needs to hold host_lock before calling this routine.
- */
struct lpfc_nodelist *
-__lpfc_find_node(struct lpfc_hba *phba, node_filter filter, void *param)
+__lpfc_find_node(struct lpfc_vport *vport, node_filter filter, void *param)
{
struct lpfc_nodelist *ndlp;
- list_for_each_entry(ndlp, &phba->fc_nodes, nlp_listp) {
+ list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
if (ndlp->nlp_state != NLP_STE_UNUSED_NODE &&
filter(ndlp, param))
return ndlp;
@@ -2302,68 +2801,104 @@ __lpfc_find_node(struct lpfc_hba *phba, node_filter filter, void *param)
/*
* Search node lists for a remote port matching filter criteria
- * This routine is used when the caller does NOT have host_lock.
+ * Caller needs to hold host_lock before calling this routine.
*/
struct lpfc_nodelist *
-lpfc_find_node(struct lpfc_hba *phba, node_filter filter, void *param)
+lpfc_find_node(struct lpfc_vport *vport, node_filter filter, void *param)
{
+ struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
struct lpfc_nodelist *ndlp;
- spin_lock_irq(phba->host->host_lock);
- ndlp = __lpfc_find_node(phba, filter, param);
- spin_unlock_irq(phba->host->host_lock);
+ spin_lock_irq(shost->host_lock);
+ ndlp = __lpfc_find_node(vport, filter, param);
+ spin_unlock_irq(shost->host_lock);
return ndlp;
}
/*
* This routine looks up the ndlp lists for the given RPI. If rpi found it
- * returns the node list pointer else return NULL.
+ * returns the node list element pointer else return NULL.
*/
struct lpfc_nodelist *
-__lpfc_findnode_rpi(struct lpfc_hba *phba, uint16_t rpi)
+__lpfc_findnode_rpi(struct lpfc_vport *vport, uint16_t rpi)
{
- return __lpfc_find_node(phba, lpfc_filter_by_rpi, &rpi);
+ return __lpfc_find_node(vport, lpfc_filter_by_rpi, &rpi);
}
struct lpfc_nodelist *
-lpfc_findnode_rpi(struct lpfc_hba * phba, uint16_t rpi)
+lpfc_findnode_rpi(struct lpfc_vport *vport, uint16_t rpi)
{
+ struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
struct lpfc_nodelist *ndlp;
- spin_lock_irq(phba->host->host_lock);
- ndlp = __lpfc_findnode_rpi(phba, rpi);
- spin_unlock_irq(phba->host->host_lock);
+ spin_lock_irq(shost->host_lock);
+ ndlp = __lpfc_findnode_rpi(vport, rpi);
+ spin_unlock_irq(shost->host_lock);
return ndlp;
}
/*
* This routine looks up the ndlp lists for the given WWPN. If WWPN found it
- * returns the node list pointer else return NULL.
+ * returns the node element list pointer else return NULL.
*/
struct lpfc_nodelist *
-lpfc_findnode_wwpn(struct lpfc_hba *phba, struct lpfc_name *wwpn)
+lpfc_findnode_wwpn(struct lpfc_vport *vport, struct lpfc_name *wwpn)
{
+ struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
struct lpfc_nodelist *ndlp;
- spin_lock_irq(phba->host->host_lock);
- ndlp = __lpfc_find_node(phba, lpfc_filter_by_wwpn, wwpn);
- spin_unlock_irq(phba->host->host_lock);
- return NULL;
+ spin_lock_irq(shost->host_lock);
+ ndlp = __lpfc_find_node(vport, lpfc_filter_by_wwpn, wwpn);
+ spin_unlock_irq(shost->host_lock);
+ return ndlp;
}
void
-lpfc_nlp_init(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp, uint32_t did)
+lpfc_dev_loss_delay(unsigned long ptr)
+{
+ struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) ptr;
+ struct lpfc_vport *vport = ndlp->vport;
+ struct lpfc_hba *phba = vport->phba;
+ struct lpfc_work_evt *evtp = &ndlp->dev_loss_evt;
+ unsigned long flags;
+
+ evtp = &ndlp->dev_loss_evt;
+
+ spin_lock_irqsave(&phba->hbalock, flags);
+ if (!list_empty(&evtp->evt_listp)) {
+ spin_unlock_irqrestore(&phba->hbalock, flags);
+ return;
+ }
+
+ evtp->evt_arg1 = ndlp;
+ evtp->evt = LPFC_EVT_DEV_LOSS_DELAY;
+ list_add_tail(&evtp->evt_listp, &phba->work_list);
+ if (phba->work_wait)
+ lpfc_worker_wake_up(phba);
+ spin_unlock_irqrestore(&phba->hbalock, flags);
+ return;
+}
+
+void
+lpfc_nlp_init(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+ uint32_t did)
{
memset(ndlp, 0, sizeof (struct lpfc_nodelist));
INIT_LIST_HEAD(&ndlp->els_retry_evt.evt_listp);
+ INIT_LIST_HEAD(&ndlp->dev_loss_evt.evt_listp);
init_timer(&ndlp->nlp_delayfunc);
ndlp->nlp_delayfunc.function = lpfc_els_retry_delay;
ndlp->nlp_delayfunc.data = (unsigned long)ndlp;
ndlp->nlp_DID = did;
- ndlp->nlp_phba = phba;
+ ndlp->vport = vport;
ndlp->nlp_sid = NLP_NO_SID;
INIT_LIST_HEAD(&ndlp->nlp_listp);
kref_init(&ndlp->kref);
+
+ lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_NODE,
+ "node init: did:x%x",
+ ndlp->nlp_DID, 0, 0);
+
return;
}
@@ -2372,8 +2907,13 @@ lpfc_nlp_release(struct kref *kref)
{
struct lpfc_nodelist *ndlp = container_of(kref, struct lpfc_nodelist,
kref);
- lpfc_nlp_remove(ndlp->nlp_phba, ndlp);
- mempool_free(ndlp, ndlp->nlp_phba->nlp_mem_pool);
+
+ lpfc_debugfs_disc_trc(ndlp->vport, LPFC_DISC_TRC_NODE,
+ "node release: did:x%x flg:x%x type:x%x",
+ ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_type);
+
+ lpfc_nlp_remove(ndlp->vport, ndlp);
+ mempool_free(ndlp, ndlp->vport->phba->nlp_mem_pool);
}
struct lpfc_nodelist *
diff --git a/drivers/scsi/lpfc/lpfc_hw.h b/drivers/scsi/lpfc/lpfc_hw.h
index 2623a9bc777..c2fb59f595f 100644
--- a/drivers/scsi/lpfc/lpfc_hw.h
+++ b/drivers/scsi/lpfc/lpfc_hw.h
@@ -59,6 +59,12 @@
#define SLI2_IOCB_CMD_R3XTRA_ENTRIES 24
#define SLI2_IOCB_RSP_R3XTRA_ENTRIES 32
+#define SLI2_IOCB_CMD_SIZE 32
+#define SLI2_IOCB_RSP_SIZE 32
+#define SLI3_IOCB_CMD_SIZE 128
+#define SLI3_IOCB_RSP_SIZE 64
+
+
/* Common Transport structures and definitions */
union CtRevisionId {
@@ -79,6 +85,9 @@ union CtCommandResponse {
uint32_t word;
};
+#define FC4_FEATURE_INIT 0x2
+#define FC4_FEATURE_TARGET 0x1
+
struct lpfc_sli_ct_request {
/* Structure is in Big Endian format */
union CtRevisionId RevisionId;
@@ -121,20 +130,6 @@ struct lpfc_sli_ct_request {
uint32_t rsvd[7];
} rft;
- struct rff {
- uint32_t PortId;
- uint8_t reserved[2];
-#ifdef __BIG_ENDIAN_BITFIELD
- uint8_t feature_res:6;
- uint8_t feature_init:1;
- uint8_t feature_tgt:1;
-#else /* __LITTLE_ENDIAN_BITFIELD */
- uint8_t feature_tgt:1;
- uint8_t feature_init:1;
- uint8_t feature_res:6;
-#endif
- uint8_t type_code; /* type=8 for FCP */
- } rff;
struct rnn {
uint32_t PortId; /* For RNN_ID requests */
uint8_t wwnn[8];
@@ -144,15 +139,42 @@ struct lpfc_sli_ct_request {
uint8_t len;
uint8_t symbname[255];
} rsnn;
+ struct rspn { /* For RSPN_ID requests */
+ uint32_t PortId;
+ uint8_t len;
+ uint8_t symbname[255];
+ } rspn;
+ struct gff {
+ uint32_t PortId;
+ } gff;
+ struct gff_acc {
+ uint8_t fbits[128];
+ } gff_acc;
+#define FCP_TYPE_FEATURE_OFFSET 4
+ struct rff {
+ uint32_t PortId;
+ uint8_t reserved[2];
+ uint8_t fbits;
+ uint8_t type_code; /* type=8 for FCP */
+ } rff;
} un;
};
#define SLI_CT_REVISION 1
-#define GID_REQUEST_SZ (sizeof(struct lpfc_sli_ct_request) - 260)
-#define RFT_REQUEST_SZ (sizeof(struct lpfc_sli_ct_request) - 228)
-#define RFF_REQUEST_SZ (sizeof(struct lpfc_sli_ct_request) - 235)
-#define RNN_REQUEST_SZ (sizeof(struct lpfc_sli_ct_request) - 252)
-#define RSNN_REQUEST_SZ (sizeof(struct lpfc_sli_ct_request))
+#define GID_REQUEST_SZ (offsetof(struct lpfc_sli_ct_request, un) + \
+ sizeof(struct gid))
+#define GFF_REQUEST_SZ (offsetof(struct lpfc_sli_ct_request, un) + \
+ sizeof(struct gff))
+#define RFT_REQUEST_SZ (offsetof(struct lpfc_sli_ct_request, un) + \
+ sizeof(struct rft))
+#define RFF_REQUEST_SZ (offsetof(struct lpfc_sli_ct_request, un) + \
+ sizeof(struct rff))
+#define RNN_REQUEST_SZ (offsetof(struct lpfc_sli_ct_request, un) + \
+ sizeof(struct rnn))
+#define RSNN_REQUEST_SZ (offsetof(struct lpfc_sli_ct_request, un) + \
+ sizeof(struct rsnn))
+#define RSPN_REQUEST_SZ (offsetof(struct lpfc_sli_ct_request, un) + \
+ sizeof(struct rspn))
/*
* FsType Definitions
@@ -227,6 +249,7 @@ struct lpfc_sli_ct_request {
#define SLI_CTNS_GFT_ID 0x0117
#define SLI_CTNS_GSPN_ID 0x0118
#define SLI_CTNS_GPT_ID 0x011A
+#define SLI_CTNS_GFF_ID 0x011F
#define SLI_CTNS_GID_PN 0x0121
#define SLI_CTNS_GID_NN 0x0131
#define SLI_CTNS_GIP_NN 0x0135
@@ -240,9 +263,9 @@ struct lpfc_sli_ct_request {
#define SLI_CTNS_RNN_ID 0x0213
#define SLI_CTNS_RCS_ID 0x0214
#define SLI_CTNS_RFT_ID 0x0217
-#define SLI_CTNS_RFF_ID 0x021F
#define SLI_CTNS_RSPN_ID 0x0218
#define SLI_CTNS_RPT_ID 0x021A
+#define SLI_CTNS_RFF_ID 0x021F
#define SLI_CTNS_RIP_NN 0x0235
#define SLI_CTNS_RIPA_NN 0x0236
#define SLI_CTNS_RSNN_NN 0x0239
@@ -311,9 +334,9 @@ struct csp {
uint8_t bbCreditlsb; /* FC Word 0, byte 3 */
#ifdef __BIG_ENDIAN_BITFIELD
- uint16_t increasingOffset:1; /* FC Word 1, bit 31 */
+ uint16_t request_multiple_Nport:1; /* FC Word 1, bit 31 */
uint16_t randomOffset:1; /* FC Word 1, bit 30 */
- uint16_t word1Reserved2:1; /* FC Word 1, bit 29 */
+ uint16_t response_multiple_NPort:1; /* FC Word 1, bit 29 */
uint16_t fPort:1; /* FC Word 1, bit 28 */
uint16_t altBbCredit:1; /* FC Word 1, bit 27 */
uint16_t edtovResolution:1; /* FC Word 1, bit 26 */
@@ -332,9 +355,9 @@ struct csp {
uint16_t edtovResolution:1; /* FC Word 1, bit 26 */
uint16_t altBbCredit:1; /* FC Word 1, bit 27 */
uint16_t fPort:1; /* FC Word 1, bit 28 */
- uint16_t word1Reserved2:1; /* FC Word 1, bit 29 */
+ uint16_t response_multiple_NPort:1; /* FC Word 1, bit 29 */
uint16_t randomOffset:1; /* FC Word 1, bit 30 */
- uint16_t increasingOffset:1; /* FC Word 1, bit 31 */
+ uint16_t request_multiple_Nport:1; /* FC Word 1, bit 31 */
uint16_t payloadlength:1; /* FC Word 1, bit 16 */
uint16_t contIncSeqCnt:1; /* FC Word 1, bit 17 */
@@ -1255,7 +1278,9 @@ typedef struct { /* FireFly BIU registers */
#define MBX_KILL_BOARD 0x24
#define MBX_CONFIG_FARP 0x25
#define MBX_BEACON 0x2A
+#define MBX_HEARTBEAT 0x31
+#define MBX_CONFIG_HBQ 0x7C
#define MBX_LOAD_AREA 0x81
#define MBX_RUN_BIU_DIAG64 0x84
#define MBX_CONFIG_PORT 0x88
@@ -1263,6 +1288,10 @@ typedef struct { /* FireFly BIU registers */
#define MBX_READ_RPI64 0x8F
#define MBX_REG_LOGIN64 0x93
#define MBX_READ_LA64 0x95
+#define MBX_REG_VPI 0x96
+#define MBX_UNREG_VPI 0x97
+#define MBX_REG_VNPID 0x96
+#define MBX_UNREG_VNPID 0x97
#define MBX_FLASH_WR_ULA 0x98
#define MBX_SET_DEBUG 0x99
@@ -1335,6 +1364,10 @@ typedef struct { /* FireFly BIU registers */
#define CMD_FCP_TRECEIVE64_CX 0xA1
#define CMD_FCP_TRSP64_CX 0xA3
+#define CMD_IOCB_RCV_SEQ64_CX 0xB5
+#define CMD_IOCB_RCV_ELS64_CX 0xB7
+#define CMD_IOCB_RCV_CONT64_CX 0xBB
+
#define CMD_GEN_REQUEST64_CR 0xC2
#define CMD_GEN_REQUEST64_CX 0xC3
@@ -1561,6 +1594,7 @@ typedef struct {
#define FLAGS_TOPOLOGY_MODE_PT_PT 0x02 /* Attempt pt-pt only */
#define FLAGS_TOPOLOGY_MODE_LOOP 0x04 /* Attempt loop only */
#define FLAGS_TOPOLOGY_MODE_PT_LOOP 0x06 /* Attempt pt-pt then loop */
+#define FLAGS_UNREG_LOGIN_ALL 0x08 /* UNREG_LOGIN all on link down */
#define FLAGS_LIRP_LILP 0x80 /* LIRP / LILP is disabled */
#define FLAGS_TOPOLOGY_FAILOVER 0x0400 /* Bit 10 */
@@ -1744,8 +1778,6 @@ typedef struct {
#define LMT_4Gb 0x040
#define LMT_8Gb 0x080
#define LMT_10Gb 0x100
-
-
uint32_t rsvd2;
uint32_t rsvd3;
uint32_t max_xri;
@@ -1754,7 +1786,10 @@ typedef struct {
uint32_t avail_xri;
uint32_t avail_iocb;
uint32_t avail_rpi;
- uint32_t default_rpi;
+ uint32_t max_vpi;
+ uint32_t rsvd4;
+ uint32_t rsvd5;
+ uint32_t avail_vpi;
} READ_CONFIG_VAR;
/* Structure for MB Command READ_RCONFIG (12) */
@@ -1818,6 +1853,13 @@ typedef struct {
structure */
struct ulp_bde64 sp64;
} un;
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint16_t rsvd3;
+ uint16_t vpi;
+#else /* __LITTLE_ENDIAN_BITFIELD */
+ uint16_t vpi;
+ uint16_t rsvd3;
+#endif
} READ_SPARM_VAR;
/* Structure for MB Command READ_STATUS (14) */
@@ -1918,11 +1960,17 @@ typedef struct {
#ifdef __BIG_ENDIAN_BITFIELD
uint32_t cv:1;
uint32_t rr:1;
- uint32_t rsvd1:29;
+ uint32_t rsvd2:2;
+ uint32_t v3req:1;
+ uint32_t v3rsp:1;
+ uint32_t rsvd1:25;
uint32_t rv:1;
#else /* __LITTLE_ENDIAN_BITFIELD */
uint32_t rv:1;
- uint32_t rsvd1:29;
+ uint32_t rsvd1:25;
+ uint32_t v3rsp:1;
+ uint32_t v3req:1;
+ uint32_t rsvd2:2;
uint32_t rr:1;
uint32_t cv:1;
#endif
@@ -1972,8 +2020,8 @@ typedef struct {
uint8_t sli1FwName[16];
uint32_t sli2FwRev;
uint8_t sli2FwName[16];
- uint32_t rsvd2;
- uint32_t RandomData[7];
+ uint32_t sli3Feat;
+ uint32_t RandomData[6];
} READ_REV_VAR;
/* Structure for MB Command READ_LINK_STAT (18) */
@@ -2013,6 +2061,14 @@ typedef struct {
struct ulp_bde64 sp64;
} un;
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint16_t rsvd6;
+ uint16_t vpi;
+#else /* __LITTLE_ENDIAN_BITFIELD */
+ uint16_t vpi;
+ uint16_t rsvd6;
+#endif
+
} REG_LOGIN_VAR;
/* Word 30 contents for REG_LOGIN */
@@ -2037,16 +2093,78 @@ typedef struct {
#ifdef __BIG_ENDIAN_BITFIELD
uint16_t rsvd1;
uint16_t rpi;
+ uint32_t rsvd2;
+ uint32_t rsvd3;
+ uint32_t rsvd4;
+ uint32_t rsvd5;
+ uint16_t rsvd6;
+ uint16_t vpi;
#else /* __LITTLE_ENDIAN_BITFIELD */
uint16_t rpi;
uint16_t rsvd1;
+ uint32_t rsvd2;
+ uint32_t rsvd3;
+ uint32_t rsvd4;
+ uint32_t rsvd5;
+ uint16_t vpi;
+ uint16_t rsvd6;
#endif
} UNREG_LOGIN_VAR;
+/* Structure for MB Command REG_VPI (0x96) */
+typedef struct {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t rsvd1;
+ uint32_t rsvd2:8;
+ uint32_t sid:24;
+ uint32_t rsvd3;
+ uint32_t rsvd4;
+ uint32_t rsvd5;
+ uint16_t rsvd6;
+ uint16_t vpi;
+#else /* __LITTLE_ENDIAN */
+ uint32_t rsvd1;
+ uint32_t sid:24;
+ uint32_t rsvd2:8;
+ uint32_t rsvd3;
+ uint32_t rsvd4;
+ uint32_t rsvd5;
+ uint16_t vpi;
+ uint16_t rsvd6;
+#endif
+} REG_VPI_VAR;
+
+/* Structure for MB Command UNREG_VPI (0x97) */
+typedef struct {
+ uint32_t rsvd1;
+ uint32_t rsvd2;
+ uint32_t rsvd3;
+ uint32_t rsvd4;
+ uint32_t rsvd5;
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint16_t rsvd6;
+ uint16_t vpi;
+#else /* __LITTLE_ENDIAN */
+ uint16_t vpi;
+ uint16_t rsvd6;
+#endif
+} UNREG_VPI_VAR;
+
/* Structure for MB Command UNREG_D_ID (0x23) */
typedef struct {
uint32_t did;
+ uint32_t rsvd2;
+ uint32_t rsvd3;
+ uint32_t rsvd4;
+ uint32_t rsvd5;
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint16_t rsvd6;
+ uint16_t vpi;
+#else
+ uint16_t vpi;
+ uint16_t rsvd6;
+#endif
} UNREG_D_ID_VAR;
/* Structure for MB Command READ_LA (21) */
@@ -2178,13 +2296,240 @@ typedef struct {
#define DMP_RSP_OFFSET 0x14 /* word 5 contains first word of rsp */
#define DMP_RSP_SIZE 0x6C /* maximum of 27 words of rsp data */
-/* Structure for MB Command CONFIG_PORT (0x88) */
+struct hbq_mask {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint8_t tmatch;
+ uint8_t tmask;
+ uint8_t rctlmatch;
+ uint8_t rctlmask;
+#else /* __LITTLE_ENDIAN */
+ uint8_t rctlmask;
+ uint8_t rctlmatch;
+ uint8_t tmask;
+ uint8_t tmatch;
+#endif
+};
+
+
+/* Structure for MB Command CONFIG_HBQ (7c) */
+
+struct config_hbq_var {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t rsvd1 :7;
+ uint32_t recvNotify :1; /* Receive Notification */
+ uint32_t numMask :8; /* # Mask Entries */
+ uint32_t profile :8; /* Selection Profile */
+ uint32_t rsvd2 :8;
+#else /* __LITTLE_ENDIAN */
+ uint32_t rsvd2 :8;
+ uint32_t profile :8; /* Selection Profile */
+ uint32_t numMask :8; /* # Mask Entries */
+ uint32_t recvNotify :1; /* Receive Notification */
+ uint32_t rsvd1 :7;
+#endif
+
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t hbqId :16;
+ uint32_t rsvd3 :12;
+ uint32_t ringMask :4;
+#else /* __LITTLE_ENDIAN */
+ uint32_t ringMask :4;
+ uint32_t rsvd3 :12;
+ uint32_t hbqId :16;
+#endif
+
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t entry_count :16;
+ uint32_t rsvd4 :8;
+ uint32_t headerLen :8;
+#else /* __LITTLE_ENDIAN */
+ uint32_t headerLen :8;
+ uint32_t rsvd4 :8;
+ uint32_t entry_count :16;
+#endif
+
+ uint32_t hbqaddrLow;
+ uint32_t hbqaddrHigh;
+
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t rsvd5 :31;
+ uint32_t logEntry :1;
+#else /* __LITTLE_ENDIAN */
+ uint32_t logEntry :1;
+ uint32_t rsvd5 :31;
+#endif
+
+ uint32_t rsvd6; /* w7 */
+ uint32_t rsvd7; /* w8 */
+ uint32_t rsvd8; /* w9 */
+
+ struct hbq_mask hbqMasks[6];
+
+
+ union {
+ uint32_t allprofiles[12];
+
+ struct {
+ #ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t seqlenoff :16;
+ uint32_t maxlen :16;
+ #else /* __LITTLE_ENDIAN */
+ uint32_t maxlen :16;
+ uint32_t seqlenoff :16;
+ #endif
+ #ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t rsvd1 :28;
+ uint32_t seqlenbcnt :4;
+ #else /* __LITTLE_ENDIAN */
+ uint32_t seqlenbcnt :4;
+ uint32_t rsvd1 :28;
+ #endif
+ uint32_t rsvd[10];
+ } profile2;
+
+ struct {
+ #ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t seqlenoff :16;
+ uint32_t maxlen :16;
+ #else /* __LITTLE_ENDIAN */
+ uint32_t maxlen :16;
+ uint32_t seqlenoff :16;
+ #endif
+ #ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t cmdcodeoff :28;
+ uint32_t rsvd1 :12;
+ uint32_t seqlenbcnt :4;
+ #else /* __LITTLE_ENDIAN */
+ uint32_t seqlenbcnt :4;
+ uint32_t rsvd1 :12;
+ uint32_t cmdcodeoff :28;
+ #endif
+ uint32_t cmdmatch[8];
+
+ uint32_t rsvd[2];
+ } profile3;
+
+ struct {
+ #ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t seqlenoff :16;
+ uint32_t maxlen :16;
+ #else /* __LITTLE_ENDIAN */
+ uint32_t maxlen :16;
+ uint32_t seqlenoff :16;
+ #endif
+ #ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t cmdcodeoff :28;
+ uint32_t rsvd1 :12;
+ uint32_t seqlenbcnt :4;
+ #else /* __LITTLE_ENDIAN */
+ uint32_t seqlenbcnt :4;
+ uint32_t rsvd1 :12;
+ uint32_t cmdcodeoff :28;
+ #endif
+ uint32_t cmdmatch[8];
+
+ uint32_t rsvd[2];
+ } profile5;
+
+ } profiles;
+};
+
+
+
+/* Structure for MB Command CONFIG_PORT (0x88) */
typedef struct {
- uint32_t pcbLen;
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t cBE : 1;
+ uint32_t cET : 1;
+ uint32_t cHpcb : 1;
+ uint32_t cMA : 1;
+ uint32_t sli_mode : 4;
+ uint32_t pcbLen : 24; /* bit 23:0 of memory based port
+ * config block */
+#else /* __LITTLE_ENDIAN */
+ uint32_t pcbLen : 24; /* bit 23:0 of memory based port
+ * config block */
+ uint32_t sli_mode : 4;
+ uint32_t cMA : 1;
+ uint32_t cHpcb : 1;
+ uint32_t cET : 1;
+ uint32_t cBE : 1;
+#endif
+
uint32_t pcbLow; /* bit 31:0 of memory based port config block */
uint32_t pcbHigh; /* bit 63:32 of memory based port config block */
- uint32_t hbainit[5];
+ uint32_t hbainit[6];
+
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t rsvd : 24; /* Reserved */
+ uint32_t cmv : 1; /* Configure Max VPIs */
+ uint32_t ccrp : 1; /* Config Command Ring Polling */
+ uint32_t csah : 1; /* Configure Synchronous Abort Handling */
+ uint32_t chbs : 1; /* Cofigure Host Backing store */
+ uint32_t cinb : 1; /* Enable Interrupt Notification Block */
+ uint32_t cerbm : 1; /* Configure Enhanced Receive Buf Mgmt */
+ uint32_t cmx : 1; /* Configure Max XRIs */
+ uint32_t cmr : 1; /* Configure Max RPIs */
+#else /* __LITTLE_ENDIAN */
+ uint32_t cmr : 1; /* Configure Max RPIs */
+ uint32_t cmx : 1; /* Configure Max XRIs */
+ uint32_t cerbm : 1; /* Configure Enhanced Receive Buf Mgmt */
+ uint32_t cinb : 1; /* Enable Interrupt Notification Block */
+ uint32_t chbs : 1; /* Cofigure Host Backing store */
+ uint32_t csah : 1; /* Configure Synchronous Abort Handling */
+ uint32_t ccrp : 1; /* Config Command Ring Polling */
+ uint32_t cmv : 1; /* Configure Max VPIs */
+ uint32_t rsvd : 24; /* Reserved */
+#endif
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t rsvd2 : 24; /* Reserved */
+ uint32_t gmv : 1; /* Grant Max VPIs */
+ uint32_t gcrp : 1; /* Grant Command Ring Polling */
+ uint32_t gsah : 1; /* Grant Synchronous Abort Handling */
+ uint32_t ghbs : 1; /* Grant Host Backing Store */
+ uint32_t ginb : 1; /* Grant Interrupt Notification Block */
+ uint32_t gerbm : 1; /* Grant ERBM Request */
+ uint32_t gmx : 1; /* Grant Max XRIs */
+ uint32_t gmr : 1; /* Grant Max RPIs */
+#else /* __LITTLE_ENDIAN */
+ uint32_t gmr : 1; /* Grant Max RPIs */
+ uint32_t gmx : 1; /* Grant Max XRIs */
+ uint32_t gerbm : 1; /* Grant ERBM Request */
+ uint32_t ginb : 1; /* Grant Interrupt Notification Block */
+ uint32_t ghbs : 1; /* Grant Host Backing Store */
+ uint32_t gsah : 1; /* Grant Synchronous Abort Handling */
+ uint32_t gcrp : 1; /* Grant Command Ring Polling */
+ uint32_t gmv : 1; /* Grant Max VPIs */
+ uint32_t rsvd2 : 24; /* Reserved */
+#endif
+
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t max_rpi : 16; /* Max RPIs Port should configure */
+ uint32_t max_xri : 16; /* Max XRIs Port should configure */
+#else /* __LITTLE_ENDIAN */
+ uint32_t max_xri : 16; /* Max XRIs Port should configure */
+ uint32_t max_rpi : 16; /* Max RPIs Port should configure */
+#endif
+
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t max_hbq : 16; /* Max HBQs Host expect to configure */
+ uint32_t rsvd3 : 16; /* Max HBQs Host expect to configure */
+#else /* __LITTLE_ENDIAN */
+ uint32_t rsvd3 : 16; /* Max HBQs Host expect to configure */
+ uint32_t max_hbq : 16; /* Max HBQs Host expect to configure */
+#endif
+
+ uint32_t rsvd4; /* Reserved */
+
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t rsvd5 : 16; /* Reserved */
+ uint32_t max_vpi : 16; /* Max number of virt N-Ports */
+#else /* __LITTLE_ENDIAN */
+ uint32_t max_vpi : 16; /* Max number of virt N-Ports */
+ uint32_t rsvd5 : 16; /* Reserved */
+#endif
+
} CONFIG_PORT_VAR;
/* SLI-2 Port Control Block */
@@ -2262,33 +2607,40 @@ typedef struct {
#define MAILBOX_CMD_SIZE (MAILBOX_CMD_WSIZE * sizeof(uint32_t))
typedef union {
- uint32_t varWords[MAILBOX_CMD_WSIZE - 1];
- LOAD_SM_VAR varLdSM; /* cmd = 1 (LOAD_SM) */
- READ_NV_VAR varRDnvp; /* cmd = 2 (READ_NVPARMS) */
- WRITE_NV_VAR varWTnvp; /* cmd = 3 (WRITE_NVPARMS) */
- BIU_DIAG_VAR varBIUdiag; /* cmd = 4 (RUN_BIU_DIAG) */
- INIT_LINK_VAR varInitLnk; /* cmd = 5 (INIT_LINK) */
+ uint32_t varWords[MAILBOX_CMD_WSIZE - 1]; /* first word is type/
+ * feature/max ring number
+ */
+ LOAD_SM_VAR varLdSM; /* cmd = 1 (LOAD_SM) */
+ READ_NV_VAR varRDnvp; /* cmd = 2 (READ_NVPARMS) */
+ WRITE_NV_VAR varWTnvp; /* cmd = 3 (WRITE_NVPARMS) */
+ BIU_DIAG_VAR varBIUdiag; /* cmd = 4 (RUN_BIU_DIAG) */
+ INIT_LINK_VAR varInitLnk; /* cmd = 5 (INIT_LINK) */
DOWN_LINK_VAR varDwnLnk; /* cmd = 6 (DOWN_LINK) */
- CONFIG_LINK varCfgLnk; /* cmd = 7 (CONFIG_LINK) */
- PART_SLIM_VAR varSlim; /* cmd = 8 (PART_SLIM) */
+ CONFIG_LINK varCfgLnk; /* cmd = 7 (CONFIG_LINK) */
+ PART_SLIM_VAR varSlim; /* cmd = 8 (PART_SLIM) */
CONFIG_RING_VAR varCfgRing; /* cmd = 9 (CONFIG_RING) */
RESET_RING_VAR varRstRing; /* cmd = 10 (RESET_RING) */
READ_CONFIG_VAR varRdConfig; /* cmd = 11 (READ_CONFIG) */
READ_RCONF_VAR varRdRConfig; /* cmd = 12 (READ_RCONFIG) */
READ_SPARM_VAR varRdSparm; /* cmd = 13 (READ_SPARM(64)) */
READ_STATUS_VAR varRdStatus; /* cmd = 14 (READ_STATUS) */
- READ_RPI_VAR varRdRPI; /* cmd = 15 (READ_RPI(64)) */
- READ_XRI_VAR varRdXRI; /* cmd = 16 (READ_XRI) */
- READ_REV_VAR varRdRev; /* cmd = 17 (READ_REV) */
- READ_LNK_VAR varRdLnk; /* cmd = 18 (READ_LNK_STAT) */
+ READ_RPI_VAR varRdRPI; /* cmd = 15 (READ_RPI(64)) */
+ READ_XRI_VAR varRdXRI; /* cmd = 16 (READ_XRI) */
+ READ_REV_VAR varRdRev; /* cmd = 17 (READ_REV) */
+ READ_LNK_VAR varRdLnk; /* cmd = 18 (READ_LNK_STAT) */
REG_LOGIN_VAR varRegLogin; /* cmd = 19 (REG_LOGIN(64)) */
UNREG_LOGIN_VAR varUnregLogin; /* cmd = 20 (UNREG_LOGIN) */
- READ_LA_VAR varReadLA; /* cmd = 21 (READ_LA(64)) */
+ READ_LA_VAR varReadLA; /* cmd = 21 (READ_LA(64)) */
CLEAR_LA_VAR varClearLA; /* cmd = 22 (CLEAR_LA) */
- DUMP_VAR varDmp; /* Warm Start DUMP mbx cmd */
- UNREG_D_ID_VAR varUnregDID; /* cmd = 0x23 (UNREG_D_ID) */
- CONFIG_FARP_VAR varCfgFarp; /* cmd = 0x25 (CONFIG_FARP) NEW_FEATURE */
- CONFIG_PORT_VAR varCfgPort; /* cmd = 0x88 (CONFIG_PORT) */
+ DUMP_VAR varDmp; /* Warm Start DUMP mbx cmd */
+ UNREG_D_ID_VAR varUnregDID; /* cmd = 0x23 (UNREG_D_ID) */
+ CONFIG_FARP_VAR varCfgFarp; /* cmd = 0x25 (CONFIG_FARP)
+ * NEW_FEATURE
+ */
+ struct config_hbq_var varCfgHbq;/* cmd = 0x7c (CONFIG_HBQ) */
+ CONFIG_PORT_VAR varCfgPort; /* cmd = 0x88 (CONFIG_PORT) */
+ REG_VPI_VAR varRegVpi; /* cmd = 0x96 (REG_VPI) */
+ UNREG_VPI_VAR varUnregVpi; /* cmd = 0x97 (UNREG_VPI) */
} MAILVARIANTS;
/*
@@ -2305,14 +2657,27 @@ struct lpfc_pgp {
__le32 rspPutInx;
};
-typedef struct _SLI2_DESC {
- struct lpfc_hgp host[MAX_RINGS];
+struct sli2_desc {
uint32_t unused1[16];
+ struct lpfc_hgp host[MAX_RINGS];
+ struct lpfc_pgp port[MAX_RINGS];
+};
+
+struct sli3_desc {
+ struct lpfc_hgp host[MAX_RINGS];
+ uint32_t reserved[8];
+ uint32_t hbq_put[16];
+};
+
+struct sli3_pgp {
struct lpfc_pgp port[MAX_RINGS];
-} SLI2_DESC;
+ uint32_t hbq_get[16];
+};
typedef union {
- SLI2_DESC s2;
+ struct sli2_desc s2;
+ struct sli3_desc s3;
+ struct sli3_pgp s3_pgp;
} SLI_VAR;
typedef struct {
@@ -2618,6 +2983,25 @@ typedef struct {
uint32_t fcpt_Length; /* transfer ready for IWRITE */
} FCPT_FIELDS64;
+/* IOCB Command template for CMD_IOCB_RCV_ELS64_CX (0xB7)
+ or CMD_IOCB_RCV_SEQ64_CX (0xB5) */
+
+struct rcv_sli3 {
+ uint32_t word8Rsvd;
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint16_t vpi;
+ uint16_t word9Rsvd;
+#else /* __LITTLE_ENDIAN */
+ uint16_t word9Rsvd;
+ uint16_t vpi;
+#endif
+ uint32_t word10Rsvd;
+ uint32_t acc_len; /* accumulated length */
+ struct ulp_bde64 bde2;
+};
+
+
+
typedef struct _IOCB { /* IOCB structure */
union {
GENERIC_RSP grsp; /* Generic response */
@@ -2632,8 +3016,8 @@ typedef struct _IOCB { /* IOCB structure */
/* SLI-2 structures */
- struct ulp_bde64 cont64[2]; /* up to 2 64 bit continuation
- bde_64s */
+ struct ulp_bde64 cont64[2]; /* up to 2 64 bit continuation
+ * bde_64s */
ELS_REQUEST64 elsreq64; /* ELS_REQUEST template */
GEN_REQUEST64 genreq64; /* GEN_REQUEST template */
RCV_ELS_REQ64 rcvels64; /* RCV_ELS_REQ template */
@@ -2695,9 +3079,20 @@ typedef struct _IOCB { /* IOCB structure */
uint32_t ulpTimeout:8;
#endif
+ union {
+ struct rcv_sli3 rcvsli3; /* words 8 - 15 */
+ uint32_t sli3Words[24]; /* 96 extra bytes for SLI-3 */
+ } unsli3;
+
+#define ulpCt_h ulpXS
+#define ulpCt_l ulpFCP2Rcvy
+
+#define IOCB_FCP 1 /* IOCB is used for FCP ELS cmds-ulpRsvByte */
+#define IOCB_IP 2 /* IOCB is used for IP ELS cmds */
#define PARM_UNUSED 0 /* PU field (Word 4) not used */
#define PARM_REL_OFF 1 /* PU field (Word 4) = R. O. */
#define PARM_READ_CHECK 2 /* PU field (Word 4) = Data Transfer Length */
+#define PARM_NPIV_DID 3
#define CLASS1 0 /* Class 1 */
#define CLASS2 1 /* Class 2 */
#define CLASS3 2 /* Class 3 */
@@ -2718,39 +3113,51 @@ typedef struct _IOCB { /* IOCB structure */
#define IOSTAT_RSVD2 0xC
#define IOSTAT_RSVD3 0xD
#define IOSTAT_RSVD4 0xE
-#define IOSTAT_RSVD5 0xF
+#define IOSTAT_NEED_BUFFER 0xF
#define IOSTAT_DRIVER_REJECT 0x10 /* ulpStatus - Driver defined */
#define IOSTAT_DEFAULT 0xF /* Same as rsvd5 for now */
#define IOSTAT_CNT 0x11
} IOCB_t;
+/* Structure used for a single HBQ entry */
+struct lpfc_hbq_entry {
+ struct ulp_bde64 bde;
+ uint32_t buffer_tag;
+};
+
#define SLI1_SLIM_SIZE (4 * 1024)
/* Up to 498 IOCBs will fit into 16k
* 256 (MAILBOX_t) + 140 (PCB_t) + ( 32 (IOCB_t) * 498 ) = < 16384
*/
-#define SLI2_SLIM_SIZE (16 * 1024)
+#define SLI2_SLIM_SIZE (64 * 1024)
/* Maximum IOCBs that will fit in SLI2 slim */
#define MAX_SLI2_IOCB 498
+#define MAX_SLIM_IOCB_SIZE (SLI2_SLIM_SIZE - \
+ (sizeof(MAILBOX_t) + sizeof(PCB_t)))
+
+/* HBQ entries are 4 words each = 4k */
+#define LPFC_TOTAL_HBQ_SIZE (sizeof(struct lpfc_hbq_entry) * \
+ lpfc_sli_hbq_count())
struct lpfc_sli2_slim {
MAILBOX_t mbx;
PCB_t pcb;
- IOCB_t IOCBs[MAX_SLI2_IOCB];
+ IOCB_t IOCBs[MAX_SLIM_IOCB_SIZE];
};
-/*******************************************************************
-This macro check PCI device to allow special handling for LC HBAs.
-
-Parameters:
-device : struct pci_dev 's device field
-
-return 1 => TRUE
- 0 => FALSE
- *******************************************************************/
+/*
+ * This function checks PCI device to allow special handling for LC HBAs.
+ *
+ * Parameters:
+ * device : struct pci_dev 's device field
+ *
+ * return 1 => TRUE
+ * 0 => FALSE
+ */
static inline int
lpfc_is_LC_HBA(unsigned short device)
{
@@ -2766,3 +3173,16 @@ lpfc_is_LC_HBA(unsigned short device)
else
return 0;
}
+
+/*
+ * Determine if an IOCB failed because of a link event or firmware reset.
+ */
+
+static inline int
+lpfc_error_lost_link(IOCB_t *iocbp)
+{
+ return (iocbp->ulpStatus == IOSTAT_LOCAL_REJECT &&
+ (iocbp->un.ulpWord[4] == IOERR_SLI_ABORTED ||
+ iocbp->un.ulpWord[4] == IOERR_LINK_DOWN ||
+ iocbp->un.ulpWord[4] == IOERR_SLI_DOWN));
+}
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index 955b2e48d04..f81f85ee190 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -27,6 +27,7 @@
#include <linux/kthread.h>
#include <linux/pci.h>
#include <linux/spinlock.h>
+#include <linux/ctype.h>
#include <scsi/scsi.h>
#include <scsi/scsi_device.h>
@@ -40,15 +41,20 @@
#include "lpfc.h"
#include "lpfc_logmsg.h"
#include "lpfc_crtn.h"
+#include "lpfc_vport.h"
#include "lpfc_version.h"
+#include "lpfc_vport.h"
static int lpfc_parse_vpd(struct lpfc_hba *, uint8_t *, int);
static void lpfc_get_hba_model_desc(struct lpfc_hba *, uint8_t *, uint8_t *);
static int lpfc_post_rcv_buf(struct lpfc_hba *);
static struct scsi_transport_template *lpfc_transport_template = NULL;
+static struct scsi_transport_template *lpfc_vport_transport_template = NULL;
static DEFINE_IDR(lpfc_hba_index);
+
+
/************************************************************************/
/* */
/* lpfc_config_port_prep */
@@ -61,7 +67,7 @@ static DEFINE_IDR(lpfc_hba_index);
/* */
/************************************************************************/
int
-lpfc_config_port_prep(struct lpfc_hba * phba)
+lpfc_config_port_prep(struct lpfc_hba *phba)
{
lpfc_vpd_t *vp = &phba->vpd;
int i = 0, rc;
@@ -75,12 +81,12 @@ lpfc_config_port_prep(struct lpfc_hba * phba)
pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
if (!pmb) {
- phba->hba_state = LPFC_HBA_ERROR;
+ phba->link_state = LPFC_HBA_ERROR;
return -ENOMEM;
}
mb = &pmb->mb;
- phba->hba_state = LPFC_INIT_MBX_CMDS;
+ phba->link_state = LPFC_INIT_MBX_CMDS;
if (lpfc_is_LC_HBA(phba->pcidev->device)) {
if (init_key) {
@@ -100,9 +106,7 @@ lpfc_config_port_prep(struct lpfc_hba * phba)
rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
if (rc != MBX_SUCCESS) {
- lpfc_printf_log(phba,
- KERN_ERR,
- LOG_MBOX,
+ lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
"%d:0324 Config Port initialization "
"error, mbxCmd x%x READ_NVPARM, "
"mbxStatus x%x\n",
@@ -112,16 +116,18 @@ lpfc_config_port_prep(struct lpfc_hba * phba)
return -ERESTART;
}
memcpy(phba->wwnn, (char *)mb->un.varRDnvp.nodename,
- sizeof (mb->un.varRDnvp.nodename));
+ sizeof(phba->wwnn));
+ memcpy(phba->wwpn, (char *)mb->un.varRDnvp.portname,
+ sizeof(phba->wwpn));
}
+ phba->sli3_options = 0x0;
+
/* Setup and issue mailbox READ REV command */
lpfc_read_rev(phba, pmb);
rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
if (rc != MBX_SUCCESS) {
- lpfc_printf_log(phba,
- KERN_ERR,
- LOG_INIT,
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"%d:0439 Adapter failed to init, mbxCmd x%x "
"READ_REV, mbxStatus x%x\n",
phba->brd_no,
@@ -130,6 +136,7 @@ lpfc_config_port_prep(struct lpfc_hba * phba)
return -ERESTART;
}
+
/*
* The value of rr must be 1 since the driver set the cv field to 1.
* This setting requires the FW to set all revision fields.
@@ -144,8 +151,12 @@ lpfc_config_port_prep(struct lpfc_hba * phba)
return -ERESTART;
}
+ if (phba->sli_rev == 3 && !mb->un.varRdRev.v3rsp)
+ return -EINVAL;
+
/* Save information as VPD data */
vp->rev.rBit = 1;
+ memcpy(&vp->sli3Feat, &mb->un.varRdRev.sli3Feat, sizeof(uint32_t));
vp->rev.sli1FwRev = mb->un.varRdRev.sli1FwRev;
memcpy(vp->rev.sli1FwName, (char*) mb->un.varRdRev.sli1FwName, 16);
vp->rev.sli2FwRev = mb->un.varRdRev.sli2FwRev;
@@ -161,6 +172,13 @@ lpfc_config_port_prep(struct lpfc_hba * phba)
vp->rev.postKernRev = mb->un.varRdRev.postKernRev;
vp->rev.opFwRev = mb->un.varRdRev.opFwRev;
+ /* If the sli feature level is less then 9, we must
+ * tear down all RPIs and VPIs on link down if NPIV
+ * is enabled.
+ */
+ if (vp->rev.feaLevelHigh < 9)
+ phba->sli3_options |= LPFC_SLI3_VPORT_TEARDOWN;
+
if (lpfc_is_LC_HBA(phba->pcidev->device))
memcpy(phba->RandomData, (char *)&mb->un.varWords[24],
sizeof (phba->RandomData));
@@ -188,7 +206,7 @@ lpfc_config_port_prep(struct lpfc_hba * phba)
if (mb->un.varDmp.word_cnt > DMP_VPD_SIZE - offset)
mb->un.varDmp.word_cnt = DMP_VPD_SIZE - offset;
lpfc_sli_pcimem_bcopy(pmb->context2, lpfc_vpd_data + offset,
- mb->un.varDmp.word_cnt);
+ mb->un.varDmp.word_cnt);
offset += mb->un.varDmp.word_cnt;
} while (mb->un.varDmp.word_cnt && offset < DMP_VPD_SIZE);
lpfc_parse_vpd(phba, lpfc_vpd_data, offset);
@@ -212,48 +230,34 @@ out_free_mbox:
/* */
/************************************************************************/
int
-lpfc_config_port_post(struct lpfc_hba * phba)
+lpfc_config_port_post(struct lpfc_hba *phba)
{
+ struct lpfc_vport *vport = phba->pport;
LPFC_MBOXQ_t *pmb;
MAILBOX_t *mb;
struct lpfc_dmabuf *mp;
struct lpfc_sli *psli = &phba->sli;
uint32_t status, timeout;
- int i, j, rc;
+ int i, j;
+ int rc;
pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
if (!pmb) {
- phba->hba_state = LPFC_HBA_ERROR;
+ phba->link_state = LPFC_HBA_ERROR;
return -ENOMEM;
}
mb = &pmb->mb;
- lpfc_config_link(phba, pmb);
- rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
- if (rc != MBX_SUCCESS) {
- lpfc_printf_log(phba,
- KERN_ERR,
- LOG_INIT,
- "%d:0447 Adapter failed init, mbxCmd x%x "
- "CONFIG_LINK mbxStatus x%x\n",
- phba->brd_no,
- mb->mbxCommand, mb->mbxStatus);
- phba->hba_state = LPFC_HBA_ERROR;
- mempool_free( pmb, phba->mbox_mem_pool);
- return -EIO;
- }
-
/* Get login parameters for NID. */
- lpfc_read_sparam(phba, pmb);
+ lpfc_read_sparam(phba, pmb, 0);
+ pmb->vport = vport;
if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) {
- lpfc_printf_log(phba,
- KERN_ERR,
- LOG_INIT,
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"%d:0448 Adapter failed init, mbxCmd x%x "
"READ_SPARM mbxStatus x%x\n",
phba->brd_no,
mb->mbxCommand, mb->mbxStatus);
- phba->hba_state = LPFC_HBA_ERROR;
+ phba->link_state = LPFC_HBA_ERROR;
mp = (struct lpfc_dmabuf *) pmb->context1;
mempool_free( pmb, phba->mbox_mem_pool);
lpfc_mbuf_free(phba, mp->virt, mp->phys);
@@ -263,25 +267,27 @@ lpfc_config_port_post(struct lpfc_hba * phba)
mp = (struct lpfc_dmabuf *) pmb->context1;
- memcpy(&phba->fc_sparam, mp->virt, sizeof (struct serv_parm));
+ memcpy(&vport->fc_sparam, mp->virt, sizeof (struct serv_parm));
lpfc_mbuf_free(phba, mp->virt, mp->phys);
kfree(mp);
pmb->context1 = NULL;
if (phba->cfg_soft_wwnn)
- u64_to_wwn(phba->cfg_soft_wwnn, phba->fc_sparam.nodeName.u.wwn);
+ u64_to_wwn(phba->cfg_soft_wwnn,
+ vport->fc_sparam.nodeName.u.wwn);
if (phba->cfg_soft_wwpn)
- u64_to_wwn(phba->cfg_soft_wwpn, phba->fc_sparam.portName.u.wwn);
- memcpy(&phba->fc_nodename, &phba->fc_sparam.nodeName,
+ u64_to_wwn(phba->cfg_soft_wwpn,
+ vport->fc_sparam.portName.u.wwn);
+ memcpy(&vport->fc_nodename, &vport->fc_sparam.nodeName,
sizeof (struct lpfc_name));
- memcpy(&phba->fc_portname, &phba->fc_sparam.portName,
+ memcpy(&vport->fc_portname, &vport->fc_sparam.portName,
sizeof (struct lpfc_name));
/* If no serial number in VPD data, use low 6 bytes of WWNN */
/* This should be consolidated into parse_vpd ? - mr */
if (phba->SerialNumber[0] == 0) {
uint8_t *outptr;
- outptr = &phba->fc_nodename.u.s.IEEE[0];
+ outptr = &vport->fc_nodename.u.s.IEEE[0];
for (i = 0; i < 12; i++) {
status = *outptr++;
j = ((status & 0xf0) >> 4);
@@ -303,15 +309,14 @@ lpfc_config_port_post(struct lpfc_hba * phba)
}
lpfc_read_config(phba, pmb);
+ pmb->vport = vport;
if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) {
- lpfc_printf_log(phba,
- KERN_ERR,
- LOG_INIT,
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"%d:0453 Adapter failed to init, mbxCmd x%x "
"READ_CONFIG, mbxStatus x%x\n",
phba->brd_no,
mb->mbxCommand, mb->mbxStatus);
- phba->hba_state = LPFC_HBA_ERROR;
+ phba->link_state = LPFC_HBA_ERROR;
mempool_free( pmb, phba->mbox_mem_pool);
return -EIO;
}
@@ -338,9 +343,7 @@ lpfc_config_port_post(struct lpfc_hba * phba)
|| ((phba->cfg_link_speed == LINK_SPEED_10G)
&& !(phba->lmt & LMT_10Gb))) {
/* Reset link speed to auto */
- lpfc_printf_log(phba,
- KERN_WARNING,
- LOG_LINK_EVENT,
+ lpfc_printf_log(phba, KERN_WARNING, LOG_LINK_EVENT,
"%d:1302 Invalid speed for this board: "
"Reset link speed to auto: x%x\n",
phba->brd_no,
@@ -348,7 +351,7 @@ lpfc_config_port_post(struct lpfc_hba * phba)
phba->cfg_link_speed = LINK_SPEED_AUTO;
}
- phba->hba_state = LPFC_LINK_DOWN;
+ phba->link_state = LPFC_LINK_DOWN;
/* Only process IOCBs on ring 0 till hba_state is READY */
if (psli->ring[psli->extra_ring].cmdringaddr)
@@ -359,10 +362,11 @@ lpfc_config_port_post(struct lpfc_hba * phba)
psli->ring[psli->next_ring].flag |= LPFC_STOP_IOCB_EVENT;
/* Post receive buffers for desired rings */
- lpfc_post_rcv_buf(phba);
+ if (phba->sli_rev != 3)
+ lpfc_post_rcv_buf(phba);
/* Enable appropriate host interrupts */
- spin_lock_irq(phba->host->host_lock);
+ spin_lock_irq(&phba->hbalock);
status = readl(phba->HCregaddr);
status |= HC_MBINT_ENA | HC_ERINT_ENA | HC_LAINT_ENA;
if (psli->num_rings > 0)
@@ -380,22 +384,24 @@ lpfc_config_port_post(struct lpfc_hba * phba)
writel(status, phba->HCregaddr);
readl(phba->HCregaddr); /* flush */
- spin_unlock_irq(phba->host->host_lock);
+ spin_unlock_irq(&phba->hbalock);
/*
* Setup the ring 0 (els) timeout handler
*/
timeout = phba->fc_ratov << 1;
- mod_timer(&phba->els_tmofunc, jiffies + HZ * timeout);
+ mod_timer(&vport->els_tmofunc, jiffies + HZ * timeout);
+ mod_timer(&phba->hb_tmofunc, jiffies + HZ * LPFC_HB_MBOX_INTERVAL);
+ phba->hb_outstanding = 0;
+ phba->last_completion_time = jiffies;
lpfc_init_link(phba, pmb, phba->cfg_topology, phba->cfg_link_speed);
pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
+ pmb->vport = vport;
rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
lpfc_set_loopback_flag(phba);
if (rc != MBX_SUCCESS) {
- lpfc_printf_log(phba,
- KERN_ERR,
- LOG_INIT,
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"%d:0454 Adapter failed to init, mbxCmd x%x "
"INIT_LINK, mbxStatus x%x\n",
phba->brd_no,
@@ -408,7 +414,7 @@ lpfc_config_port_post(struct lpfc_hba * phba)
writel(0xffffffff, phba->HAregaddr);
readl(phba->HAregaddr); /* flush */
- phba->hba_state = LPFC_HBA_ERROR;
+ phba->link_state = LPFC_HBA_ERROR;
if (rc != MBX_BUSY)
mempool_free(pmb, phba->mbox_mem_pool);
return -EIO;
@@ -429,18 +435,19 @@ lpfc_config_port_post(struct lpfc_hba * phba)
/* */
/************************************************************************/
int
-lpfc_hba_down_prep(struct lpfc_hba * phba)
+lpfc_hba_down_prep(struct lpfc_hba *phba)
{
+ struct lpfc_vport *vport = phba->pport;
+
/* Disable interrupts */
writel(0, phba->HCregaddr);
readl(phba->HCregaddr); /* flush */
- /* Cleanup potential discovery resources */
- lpfc_els_flush_rscn(phba);
- lpfc_els_flush_cmd(phba);
- lpfc_disc_flush_list(phba);
+ list_for_each_entry(vport, &phba->port_list, listentry) {
+ lpfc_cleanup_discovery_resources(vport);
+ }
- return (0);
+ return 0;
}
/************************************************************************/
@@ -453,20 +460,24 @@ lpfc_hba_down_prep(struct lpfc_hba * phba)
/* */
/************************************************************************/
int
-lpfc_hba_down_post(struct lpfc_hba * phba)
+lpfc_hba_down_post(struct lpfc_hba *phba)
{
struct lpfc_sli *psli = &phba->sli;
struct lpfc_sli_ring *pring;
struct lpfc_dmabuf *mp, *next_mp;
int i;
- /* Cleanup preposted buffers on the ELS ring */
- pring = &psli->ring[LPFC_ELS_RING];
- list_for_each_entry_safe(mp, next_mp, &pring->postbufq, list) {
- list_del(&mp->list);
- pring->postbufq_cnt--;
- lpfc_mbuf_free(phba, mp->virt, mp->phys);
- kfree(mp);
+ if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED)
+ lpfc_sli_hbqbuf_free_all(phba);
+ else {
+ /* Cleanup preposted buffers on the ELS ring */
+ pring = &psli->ring[LPFC_ELS_RING];
+ list_for_each_entry_safe(mp, next_mp, &pring->postbufq, list) {
+ list_del(&mp->list);
+ pring->postbufq_cnt--;
+ lpfc_mbuf_free(phba, mp->virt, mp->phys);
+ kfree(mp);
+ }
}
for (i = 0; i < psli->num_rings; i++) {
@@ -477,6 +488,119 @@ lpfc_hba_down_post(struct lpfc_hba * phba)
return 0;
}
+/* HBA heart beat timeout handler */
+void
+lpfc_hb_timeout(unsigned long ptr)
+{
+ struct lpfc_hba *phba;
+ unsigned long iflag;
+
+ phba = (struct lpfc_hba *)ptr;
+ spin_lock_irqsave(&phba->pport->work_port_lock, iflag);
+ if (!(phba->pport->work_port_events & WORKER_HB_TMO))
+ phba->pport->work_port_events |= WORKER_HB_TMO;
+ spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag);
+
+ if (phba->work_wait)
+ wake_up(phba->work_wait);
+ return;
+}
+
+static void
+lpfc_hb_mbox_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq)
+{
+ unsigned long drvr_flag;
+
+ spin_lock_irqsave(&phba->hbalock, drvr_flag);
+ phba->hb_outstanding = 0;
+ spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
+
+ mempool_free(pmboxq, phba->mbox_mem_pool);
+ if (!(phba->pport->fc_flag & FC_OFFLINE_MODE) &&
+ !(phba->link_state == LPFC_HBA_ERROR) &&
+ !(phba->pport->fc_flag & FC_UNLOADING))
+ mod_timer(&phba->hb_tmofunc,
+ jiffies + HZ * LPFC_HB_MBOX_INTERVAL);
+ return;
+}
+
+void
+lpfc_hb_timeout_handler(struct lpfc_hba *phba)
+{
+ LPFC_MBOXQ_t *pmboxq;
+ int retval;
+ struct lpfc_sli *psli = &phba->sli;
+
+ if ((phba->link_state == LPFC_HBA_ERROR) ||
+ (phba->pport->fc_flag & FC_UNLOADING) ||
+ (phba->pport->fc_flag & FC_OFFLINE_MODE))
+ return;
+
+ spin_lock_irq(&phba->pport->work_port_lock);
+ /* If the timer is already canceled do nothing */
+ if (!(phba->pport->work_port_events & WORKER_HB_TMO)) {
+ spin_unlock_irq(&phba->pport->work_port_lock);
+ return;
+ }
+
+ if (time_after(phba->last_completion_time + LPFC_HB_MBOX_INTERVAL * HZ,
+ jiffies)) {
+ spin_unlock_irq(&phba->pport->work_port_lock);
+ if (!phba->hb_outstanding)
+ mod_timer(&phba->hb_tmofunc,
+ jiffies + HZ * LPFC_HB_MBOX_INTERVAL);
+ else
+ mod_timer(&phba->hb_tmofunc,
+ jiffies + HZ * LPFC_HB_MBOX_TIMEOUT);
+ return;
+ }
+ spin_unlock_irq(&phba->pport->work_port_lock);
+
+ /* If there is no heart beat outstanding, issue a heartbeat command */
+ if (!phba->hb_outstanding) {
+ pmboxq = mempool_alloc(phba->mbox_mem_pool,GFP_KERNEL);
+ if (!pmboxq) {
+ mod_timer(&phba->hb_tmofunc,
+ jiffies + HZ * LPFC_HB_MBOX_INTERVAL);
+ return;
+ }
+
+ lpfc_heart_beat(phba, pmboxq);
+ pmboxq->mbox_cmpl = lpfc_hb_mbox_cmpl;
+ pmboxq->vport = phba->pport;
+ retval = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT);
+
+ if (retval != MBX_BUSY && retval != MBX_SUCCESS) {
+ mempool_free(pmboxq, phba->mbox_mem_pool);
+ mod_timer(&phba->hb_tmofunc,
+ jiffies + HZ * LPFC_HB_MBOX_INTERVAL);
+ return;
+ }
+ mod_timer(&phba->hb_tmofunc,
+ jiffies + HZ * LPFC_HB_MBOX_TIMEOUT);
+ phba->hb_outstanding = 1;
+ return;
+ } else {
+ /*
+ * If heart beat timeout called with hb_outstanding set we
+ * need to take the HBA offline.
+ */
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "%d:0459 Adapter heartbeat failure, taking "
+ "this port offline.\n", phba->brd_no);
+
+ spin_lock_irq(&phba->hbalock);
+ psli->sli_flag &= ~LPFC_SLI2_ACTIVE;
+ spin_unlock_irq(&phba->hbalock);
+
+ lpfc_offline_prep(phba);
+ lpfc_offline(phba);
+ lpfc_unblock_mgmt_io(phba);
+ phba->link_state = LPFC_HBA_ERROR;
+ lpfc_hba_down_post(phba);
+ }
+}
+
/************************************************************************/
/* */
/* lpfc_handle_eratt */
@@ -486,11 +610,15 @@ lpfc_hba_down_post(struct lpfc_hba * phba)
/* */
/************************************************************************/
void
-lpfc_handle_eratt(struct lpfc_hba * phba)
+lpfc_handle_eratt(struct lpfc_hba *phba)
{
- struct lpfc_sli *psli = &phba->sli;
+ struct lpfc_vport *vport = phba->pport;
+ struct lpfc_sli *psli = &phba->sli;
struct lpfc_sli_ring *pring;
+ struct lpfc_vport *port_iterator;
uint32_t event_data;
+ struct Scsi_Host *shost;
+
/* If the pci channel is offline, ignore possible errors,
* since we cannot communicate with the pci card anyway. */
if (pci_channel_offline(phba->pcidev))
@@ -504,10 +632,17 @@ lpfc_handle_eratt(struct lpfc_hba * phba)
"Data: x%x x%x x%x\n",
phba->brd_no, phba->work_hs,
phba->work_status[0], phba->work_status[1]);
- spin_lock_irq(phba->host->host_lock);
- phba->fc_flag |= FC_ESTABLISH_LINK;
+ list_for_each_entry(port_iterator, &phba->port_list,
+ listentry) {
+ shost = lpfc_shost_from_vport(port_iterator);
+
+ spin_lock_irq(shost->host_lock);
+ port_iterator->fc_flag |= FC_ESTABLISH_LINK;
+ spin_unlock_irq(shost->host_lock);
+ }
+ spin_lock_irq(&phba->hbalock);
psli->sli_flag &= ~LPFC_SLI2_ACTIVE;
- spin_unlock_irq(phba->host->host_lock);
+ spin_unlock_irq(&phba->hbalock);
/*
* Firmware stops when it triggled erratt with HS_FFER6.
@@ -544,15 +679,18 @@ lpfc_handle_eratt(struct lpfc_hba * phba)
phba->work_status[0], phba->work_status[1]);
event_data = FC_REG_DUMP_EVENT;
- fc_host_post_vendor_event(phba->host, fc_get_event_number(),
+ shost = lpfc_shost_from_vport(vport);
+ fc_host_post_vendor_event(shost, fc_get_event_number(),
sizeof(event_data), (char *) &event_data,
SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX);
+ spin_lock_irq(&phba->hbalock);
psli->sli_flag &= ~LPFC_SLI2_ACTIVE;
+ spin_unlock_irq(&phba->hbalock);
lpfc_offline_prep(phba);
lpfc_offline(phba);
lpfc_unblock_mgmt_io(phba);
- phba->hba_state = LPFC_HBA_ERROR;
+ phba->link_state = LPFC_HBA_ERROR;
lpfc_hba_down_post(phba);
}
}
@@ -566,9 +704,11 @@ lpfc_handle_eratt(struct lpfc_hba * phba)
/* */
/************************************************************************/
void
-lpfc_handle_latt(struct lpfc_hba * phba)
+lpfc_handle_latt(struct lpfc_hba *phba)
{
- struct lpfc_sli *psli = &phba->sli;
+ struct lpfc_vport *vport = phba->pport;
+ struct lpfc_sli *psli = &phba->sli;
+ struct lpfc_vport *port_iterator;
LPFC_MBOXQ_t *pmb;
volatile uint32_t control;
struct lpfc_dmabuf *mp;
@@ -589,20 +729,22 @@ lpfc_handle_latt(struct lpfc_hba * phba)
rc = -EIO;
/* Cleanup any outstanding ELS commands */
- lpfc_els_flush_cmd(phba);
+ list_for_each_entry(port_iterator, &phba->port_list, listentry)
+ lpfc_els_flush_cmd(port_iterator);
psli->slistat.link_event++;
lpfc_read_la(phba, pmb, mp);
pmb->mbox_cmpl = lpfc_mbx_cmpl_read_la;
+ pmb->vport = vport;
rc = lpfc_sli_issue_mbox (phba, pmb, (MBX_NOWAIT | MBX_STOP_IOCB));
if (rc == MBX_NOT_FINISHED)
goto lpfc_handle_latt_free_mbuf;
/* Clear Link Attention in HA REG */
- spin_lock_irq(phba->host->host_lock);
+ spin_lock_irq(&phba->hbalock);
writel(HA_LATT, phba->HAregaddr);
readl(phba->HAregaddr); /* flush */
- spin_unlock_irq(phba->host->host_lock);
+ spin_unlock_irq(&phba->hbalock);
return;
@@ -614,7 +756,7 @@ lpfc_handle_latt_free_pmb:
mempool_free(pmb, phba->mbox_mem_pool);
lpfc_handle_latt_err_exit:
/* Enable Link attention interrupts */
- spin_lock_irq(phba->host->host_lock);
+ spin_lock_irq(&phba->hbalock);
psli->sli_flag |= LPFC_PROCESS_LA;
control = readl(phba->HCregaddr);
control |= HC_LAINT_ENA;
@@ -624,15 +766,13 @@ lpfc_handle_latt_err_exit:
/* Clear Link Attention in HA REG */
writel(HA_LATT, phba->HAregaddr);
readl(phba->HAregaddr); /* flush */
- spin_unlock_irq(phba->host->host_lock);
+ spin_unlock_irq(&phba->hbalock);
lpfc_linkdown(phba);
- phba->hba_state = LPFC_HBA_ERROR;
+ phba->link_state = LPFC_HBA_ERROR;
/* The other case is an error from issue_mbox */
if (rc == -ENOMEM)
- lpfc_printf_log(phba,
- KERN_WARNING,
- LOG_MBOX,
+ lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX,
"%d:0300 READ_LA: no buffers\n",
phba->brd_no);
@@ -646,7 +786,7 @@ lpfc_handle_latt_err_exit:
/* */
/************************************************************************/
static int
-lpfc_parse_vpd(struct lpfc_hba * phba, uint8_t * vpd, int len)
+lpfc_parse_vpd(struct lpfc_hba *phba, uint8_t *vpd, int len)
{
uint8_t lenlo, lenhi;
int Length;
@@ -658,9 +798,7 @@ lpfc_parse_vpd(struct lpfc_hba * phba, uint8_t * vpd, int len)
return 0;
/* Vital Product */
- lpfc_printf_log(phba,
- KERN_INFO,
- LOG_INIT,
+ lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
"%d:0455 Vital Product Data: x%x x%x x%x x%x\n",
phba->brd_no,
(uint32_t) vpd[0], (uint32_t) vpd[1], (uint32_t) vpd[2],
@@ -785,7 +923,7 @@ lpfc_parse_vpd(struct lpfc_hba * phba, uint8_t * vpd, int len)
}
static void
-lpfc_get_hba_model_desc(struct lpfc_hba * phba, uint8_t * mdp, uint8_t * descp)
+lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp)
{
lpfc_vpd_t *vp;
uint16_t dev_id = phba->pcidev->device;
@@ -943,7 +1081,7 @@ lpfc_get_hba_model_desc(struct lpfc_hba * phba, uint8_t * mdp, uint8_t * descp)
/* Returns the number of buffers NOT posted. */
/**************************************************/
int
-lpfc_post_buffer(struct lpfc_hba * phba, struct lpfc_sli_ring * pring, int cnt,
+lpfc_post_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, int cnt,
int type)
{
IOCB_t *icmd;
@@ -955,9 +1093,7 @@ lpfc_post_buffer(struct lpfc_hba * phba, struct lpfc_sli_ring * pring, int cnt,
/* While there are buffers to post */
while (cnt > 0) {
/* Allocate buffer for command iocb */
- spin_lock_irq(phba->host->host_lock);
iocb = lpfc_sli_get_iocbq(phba);
- spin_unlock_irq(phba->host->host_lock);
if (iocb == NULL) {
pring->missbufcnt = cnt;
return cnt;
@@ -972,9 +1108,7 @@ lpfc_post_buffer(struct lpfc_hba * phba, struct lpfc_sli_ring * pring, int cnt,
&mp1->phys);
if (mp1 == 0 || mp1->virt == 0) {
kfree(mp1);
- spin_lock_irq(phba->host->host_lock);
lpfc_sli_release_iocbq(phba, iocb);
- spin_unlock_irq(phba->host->host_lock);
pring->missbufcnt = cnt;
return cnt;
}
@@ -990,9 +1124,7 @@ lpfc_post_buffer(struct lpfc_hba * phba, struct lpfc_sli_ring * pring, int cnt,
kfree(mp2);
lpfc_mbuf_free(phba, mp1->virt, mp1->phys);
kfree(mp1);
- spin_lock_irq(phba->host->host_lock);
lpfc_sli_release_iocbq(phba, iocb);
- spin_unlock_irq(phba->host->host_lock);
pring->missbufcnt = cnt;
return cnt;
}
@@ -1018,7 +1150,6 @@ lpfc_post_buffer(struct lpfc_hba * phba, struct lpfc_sli_ring * pring, int cnt,
icmd->ulpCommand = CMD_QUE_RING_BUF64_CN;
icmd->ulpLe = 1;
- spin_lock_irq(phba->host->host_lock);
if (lpfc_sli_issue_iocb(phba, pring, iocb, 0) == IOCB_ERROR) {
lpfc_mbuf_free(phba, mp1->virt, mp1->phys);
kfree(mp1);
@@ -1030,14 +1161,11 @@ lpfc_post_buffer(struct lpfc_hba * phba, struct lpfc_sli_ring * pring, int cnt,
}
lpfc_sli_release_iocbq(phba, iocb);
pring->missbufcnt = cnt;
- spin_unlock_irq(phba->host->host_lock);
return cnt;
}
- spin_unlock_irq(phba->host->host_lock);
lpfc_sli_ringpostbuf_put(phba, pring, mp1);
- if (mp2) {
+ if (mp2)
lpfc_sli_ringpostbuf_put(phba, pring, mp2);
- }
}
pring->missbufcnt = 0;
return 0;
@@ -1050,7 +1178,7 @@ lpfc_post_buffer(struct lpfc_hba * phba, struct lpfc_sli_ring * pring, int cnt,
/* */
/************************************************************************/
static int
-lpfc_post_rcv_buf(struct lpfc_hba * phba)
+lpfc_post_rcv_buf(struct lpfc_hba *phba)
{
struct lpfc_sli *psli = &phba->sli;
@@ -1151,7 +1279,7 @@ lpfc_hba_init(struct lpfc_hba *phba, uint32_t *hbainit)
{
int t;
uint32_t *HashWorking;
- uint32_t *pwwnn = phba->wwnn;
+ uint32_t *pwwnn = (uint32_t *) phba->wwnn;
HashWorking = kmalloc(80 * sizeof(uint32_t), GFP_KERNEL);
if (!HashWorking)
@@ -1170,64 +1298,76 @@ lpfc_hba_init(struct lpfc_hba *phba, uint32_t *hbainit)
}
static void
-lpfc_cleanup(struct lpfc_hba * phba)
+lpfc_cleanup(struct lpfc_vport *vport)
{
struct lpfc_nodelist *ndlp, *next_ndlp;
/* clean up phba - lpfc specific */
- lpfc_can_disctmo(phba);
- list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_nodes, nlp_listp)
+ lpfc_can_disctmo(vport);
+ list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp)
lpfc_nlp_put(ndlp);
-
- INIT_LIST_HEAD(&phba->fc_nodes);
-
return;
}
static void
lpfc_establish_link_tmo(unsigned long ptr)
{
- struct lpfc_hba *phba = (struct lpfc_hba *)ptr;
+ struct lpfc_hba *phba = (struct lpfc_hba *) ptr;
+ struct lpfc_vport *vport = phba->pport;
unsigned long iflag;
-
/* Re-establishing Link, timer expired */
lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
"%d:1300 Re-establishing Link, timer expired "
"Data: x%x x%x\n",
- phba->brd_no, phba->fc_flag, phba->hba_state);
- spin_lock_irqsave(phba->host->host_lock, iflag);
- phba->fc_flag &= ~FC_ESTABLISH_LINK;
- spin_unlock_irqrestore(phba->host->host_lock, iflag);
+ phba->brd_no, vport->fc_flag,
+ vport->port_state);
+ list_for_each_entry(vport, &phba->port_list, listentry) {
+ struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+
+ spin_lock_irqsave(shost->host_lock, iflag);
+ vport->fc_flag &= ~FC_ESTABLISH_LINK;
+ spin_unlock_irqrestore(shost->host_lock, iflag);
+ }
}
-static int
-lpfc_stop_timer(struct lpfc_hba * phba)
+void
+lpfc_stop_vport_timers(struct lpfc_vport *vport)
{
- struct lpfc_sli *psli = &phba->sli;
+ del_timer_sync(&vport->els_tmofunc);
+ del_timer_sync(&vport->fc_fdmitmo);
+ lpfc_can_disctmo(vport);
+ return;
+}
+
+static void
+lpfc_stop_phba_timers(struct lpfc_hba *phba)
+{
+ struct lpfc_vport *vport;
del_timer_sync(&phba->fcp_poll_timer);
del_timer_sync(&phba->fc_estabtmo);
- del_timer_sync(&phba->fc_disctmo);
- del_timer_sync(&phba->fc_fdmitmo);
- del_timer_sync(&phba->els_tmofunc);
- psli = &phba->sli;
- del_timer_sync(&psli->mbox_tmo);
- return(1);
+ list_for_each_entry(vport, &phba->port_list, listentry)
+ lpfc_stop_vport_timers(vport);
+ del_timer_sync(&phba->sli.mbox_tmo);
+ del_timer_sync(&phba->fabric_block_timer);
+ phba->hb_outstanding = 0;
+ del_timer_sync(&phba->hb_tmofunc);
+ return;
}
int
-lpfc_online(struct lpfc_hba * phba)
+lpfc_online(struct lpfc_hba *phba)
{
+ struct lpfc_vport *vport = phba->pport;
+
if (!phba)
return 0;
- if (!(phba->fc_flag & FC_OFFLINE_MODE))
+ if (!(vport->fc_flag & FC_OFFLINE_MODE))
return 0;
- lpfc_printf_log(phba,
- KERN_WARNING,
- LOG_INIT,
+ lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
"%d:0458 Bring Adapter online\n",
phba->brd_no);
@@ -1243,9 +1383,14 @@ lpfc_online(struct lpfc_hba * phba)
return 1;
}
- spin_lock_irq(phba->host->host_lock);
- phba->fc_flag &= ~FC_OFFLINE_MODE;
- spin_unlock_irq(phba->host->host_lock);
+ list_for_each_entry(vport, &phba->port_list, listentry) {
+ struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+ spin_lock_irq(shost->host_lock);
+ vport->fc_flag &= ~FC_OFFLINE_MODE;
+ if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED)
+ vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
+ spin_unlock_irq(shost->host_lock);
+ }
lpfc_unblock_mgmt_io(phba);
return 0;
@@ -1256,9 +1401,9 @@ lpfc_block_mgmt_io(struct lpfc_hba * phba)
{
unsigned long iflag;
- spin_lock_irqsave(phba->host->host_lock, iflag);
- phba->fc_flag |= FC_BLOCK_MGMT_IO;
- spin_unlock_irqrestore(phba->host->host_lock, iflag);
+ spin_lock_irqsave(&phba->hbalock, iflag);
+ phba->sli.sli_flag |= LPFC_BLOCK_MGMT_IO;
+ spin_unlock_irqrestore(&phba->hbalock, iflag);
}
void
@@ -1266,17 +1411,18 @@ lpfc_unblock_mgmt_io(struct lpfc_hba * phba)
{
unsigned long iflag;
- spin_lock_irqsave(phba->host->host_lock, iflag);
- phba->fc_flag &= ~FC_BLOCK_MGMT_IO;
- spin_unlock_irqrestore(phba->host->host_lock, iflag);
+ spin_lock_irqsave(&phba->hbalock, iflag);
+ phba->sli.sli_flag &= ~LPFC_BLOCK_MGMT_IO;
+ spin_unlock_irqrestore(&phba->hbalock, iflag);
}
void
lpfc_offline_prep(struct lpfc_hba * phba)
{
+ struct lpfc_vport *vport = phba->pport;
struct lpfc_nodelist *ndlp, *next_ndlp;
- if (phba->fc_flag & FC_OFFLINE_MODE)
+ if (vport->fc_flag & FC_OFFLINE_MODE)
return;
lpfc_block_mgmt_io(phba);
@@ -1284,39 +1430,49 @@ lpfc_offline_prep(struct lpfc_hba * phba)
lpfc_linkdown(phba);
/* Issue an unreg_login to all nodes */
- list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_nodes, nlp_listp)
+ list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp)
if (ndlp->nlp_state != NLP_STE_UNUSED_NODE)
- lpfc_unreg_rpi(phba, ndlp);
+ lpfc_unreg_rpi(vport, ndlp);
lpfc_sli_flush_mbox_queue(phba);
}
void
-lpfc_offline(struct lpfc_hba * phba)
+lpfc_offline(struct lpfc_hba *phba)
{
- unsigned long iflag;
+ struct lpfc_vport *vport = phba->pport;
+ struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+ struct lpfc_vport *port_iterator;
- if (phba->fc_flag & FC_OFFLINE_MODE)
+ if (vport->fc_flag & FC_OFFLINE_MODE)
return;
/* stop all timers associated with this hba */
- lpfc_stop_timer(phba);
+ lpfc_stop_phba_timers(phba);
+ list_for_each_entry(port_iterator, &phba->port_list, listentry) {
+ port_iterator->work_port_events = 0;
+ }
- lpfc_printf_log(phba,
- KERN_WARNING,
- LOG_INIT,
+ lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
"%d:0460 Bring Adapter offline\n",
phba->brd_no);
/* Bring down the SLI Layer and cleanup. The HBA is offline
now. */
lpfc_sli_hba_down(phba);
- lpfc_cleanup(phba);
- spin_lock_irqsave(phba->host->host_lock, iflag);
- phba->work_hba_events = 0;
+ spin_lock_irq(&phba->hbalock);
phba->work_ha = 0;
- phba->fc_flag |= FC_OFFLINE_MODE;
- spin_unlock_irqrestore(phba->host->host_lock, iflag);
+ vport->fc_flag |= FC_OFFLINE_MODE;
+ spin_unlock_irq(&phba->hbalock);
+ list_for_each_entry(port_iterator, &phba->port_list, listentry) {
+ shost = lpfc_shost_from_vport(port_iterator);
+
+ lpfc_cleanup(port_iterator);
+ spin_lock_irq(shost->host_lock);
+ vport->work_port_events = 0;
+ vport->fc_flag |= FC_OFFLINE_MODE;
+ spin_unlock_irq(shost->host_lock);
+ }
}
/******************************************************************************
@@ -1326,17 +1482,17 @@ lpfc_offline(struct lpfc_hba * phba)
*
******************************************************************************/
static int
-lpfc_scsi_free(struct lpfc_hba * phba)
+lpfc_scsi_free(struct lpfc_hba *phba)
{
struct lpfc_scsi_buf *sb, *sb_next;
struct lpfc_iocbq *io, *io_next;
- spin_lock_irq(phba->host->host_lock);
+ spin_lock_irq(&phba->hbalock);
/* Release all the lpfc_scsi_bufs maintained by this host. */
list_for_each_entry_safe(sb, sb_next, &phba->lpfc_scsi_buf_list, list) {
list_del(&sb->list);
pci_pool_free(phba->lpfc_scsi_dma_buf_pool, sb->data,
- sb->dma_handle);
+ sb->dma_handle);
kfree(sb);
phba->total_scsi_bufs--;
}
@@ -1348,134 +1504,183 @@ lpfc_scsi_free(struct lpfc_hba * phba)
phba->total_iocbq_bufs--;
}
- spin_unlock_irq(phba->host->host_lock);
+ spin_unlock_irq(&phba->hbalock);
return 0;
}
-void lpfc_remove_device(struct lpfc_hba *phba)
-{
- unsigned long iflag;
- lpfc_free_sysfs_attr(phba);
-
- spin_lock_irqsave(phba->host->host_lock, iflag);
- phba->fc_flag |= FC_UNLOADING;
+struct lpfc_vport *
+lpfc_create_port(struct lpfc_hba *phba, int instance, struct fc_vport *fc_vport)
+{
+ struct lpfc_vport *vport;
+ struct Scsi_Host *shost;
+ int error = 0;
- spin_unlock_irqrestore(phba->host->host_lock, iflag);
+ shost = scsi_host_alloc(&lpfc_template, sizeof(struct lpfc_vport));
+ if (!shost)
+ goto out;
- fc_remove_host(phba->host);
- scsi_remove_host(phba->host);
+ vport = (struct lpfc_vport *) shost->hostdata;
+ vport->phba = phba;
- kthread_stop(phba->worker_thread);
+ vport->load_flag |= FC_LOADING;
+ vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
+ shost->unique_id = instance;
+ shost->max_id = LPFC_MAX_TARGET;
+ shost->max_lun = phba->cfg_max_luns;
+ shost->this_id = -1;
+ shost->max_cmd_len = 16;
/*
- * Bring down the SLI Layer. This step disable all interrupts,
- * clears the rings, discards all mailbox commands, and resets
- * the HBA.
+ * Set initial can_queue value since 0 is no longer supported and
+ * scsi_add_host will fail. This will be adjusted later based on the
+ * max xri value determined in hba setup.
*/
- lpfc_sli_hba_down(phba);
- lpfc_sli_brdrestart(phba);
+ shost->can_queue = phba->cfg_hba_queue_depth - 10;
+ if (fc_vport != NULL) {
+ shost->transportt = lpfc_vport_transport_template;
+ vport->port_type = LPFC_NPIV_PORT;
+ } else {
+ shost->transportt = lpfc_transport_template;
+ vport->port_type = LPFC_PHYSICAL_PORT;
+ }
- /* Release the irq reservation */
- free_irq(phba->pcidev->irq, phba);
- pci_disable_msi(phba->pcidev);
+ /* Initialize all internally managed lists. */
+ INIT_LIST_HEAD(&vport->fc_nodes);
+ spin_lock_init(&vport->work_port_lock);
- lpfc_cleanup(phba);
- lpfc_stop_timer(phba);
- phba->work_hba_events = 0;
+ init_timer(&vport->fc_disctmo);
+ vport->fc_disctmo.function = lpfc_disc_timeout;
+ vport->fc_disctmo.data = (unsigned long)vport;
- /*
- * Call scsi_free before mem_free since scsi bufs are released to their
- * corresponding pools here.
- */
- lpfc_scsi_free(phba);
- lpfc_mem_free(phba);
+ init_timer(&vport->fc_fdmitmo);
+ vport->fc_fdmitmo.function = lpfc_fdmi_tmo;
+ vport->fc_fdmitmo.data = (unsigned long)vport;
- /* Free resources associated with SLI2 interface */
- dma_free_coherent(&phba->pcidev->dev, SLI2_SLIM_SIZE,
- phba->slim2p, phba->slim2p_mapping);
+ init_timer(&vport->els_tmofunc);
+ vport->els_tmofunc.function = lpfc_els_timeout;
+ vport->els_tmofunc.data = (unsigned long)vport;
- /* unmap adapter SLIM and Control Registers */
- iounmap(phba->ctrl_regs_memmap_p);
- iounmap(phba->slim_memmap_p);
+ if (fc_vport != NULL) {
+ error = scsi_add_host(shost, &fc_vport->dev);
+ } else {
+ error = scsi_add_host(shost, &phba->pcidev->dev);
+ }
+ if (error)
+ goto out_put_shost;
- pci_release_regions(phba->pcidev);
- pci_disable_device(phba->pcidev);
+ list_add_tail(&vport->listentry, &phba->port_list);
+ return vport;
- idr_remove(&lpfc_hba_index, phba->brd_no);
- scsi_host_put(phba->host);
+out_put_shost:
+ scsi_host_put(shost);
+out:
+ return NULL;
}
-void lpfc_scan_start(struct Scsi_Host *host)
+void
+destroy_port(struct lpfc_vport *vport)
{
- struct lpfc_hba *phba = (struct lpfc_hba*)host->hostdata;
+ struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+ struct lpfc_hba *phba = vport->phba;
- if (lpfc_alloc_sysfs_attr(phba))
- goto error;
+ kfree(vport->vname);
- phba->MBslimaddr = phba->slim_memmap_p;
- phba->HAregaddr = phba->ctrl_regs_memmap_p + HA_REG_OFFSET;
- phba->CAregaddr = phba->ctrl_regs_memmap_p + CA_REG_OFFSET;
- phba->HSregaddr = phba->ctrl_regs_memmap_p + HS_REG_OFFSET;
- phba->HCregaddr = phba->ctrl_regs_memmap_p + HC_REG_OFFSET;
+ lpfc_debugfs_terminate(vport);
+ fc_remove_host(shost);
+ scsi_remove_host(shost);
- if (lpfc_sli_hba_setup(phba))
- goto error;
+ spin_lock_irq(&phba->hbalock);
+ list_del_init(&vport->listentry);
+ spin_unlock_irq(&phba->hbalock);
- /*
- * hba setup may have changed the hba_queue_depth so we need to adjust
- * the value of can_queue.
- */
- host->can_queue = phba->cfg_hba_queue_depth - 10;
+ lpfc_cleanup(vport);
return;
+}
+
+int
+lpfc_get_instance(void)
+{
+ int instance = 0;
-error:
- lpfc_remove_device(phba);
+ /* Assign an unused number */
+ if (!idr_pre_get(&lpfc_hba_index, GFP_KERNEL))
+ return -1;
+ if (idr_get_new(&lpfc_hba_index, NULL, &instance))
+ return -1;
+ return instance;
}
+/*
+ * Note: there is no scan_start function as adapter initialization
+ * will have asynchronously kicked off the link initialization.
+ */
+
int lpfc_scan_finished(struct Scsi_Host *shost, unsigned long time)
{
- struct lpfc_hba *phba = (struct lpfc_hba *)shost->hostdata;
+ struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+ struct lpfc_hba *phba = vport->phba;
+ int stat = 0;
- if (!phba->host)
- return 1;
- if (time >= 30 * HZ)
+ spin_lock_irq(shost->host_lock);
+
+ if (vport->fc_flag & FC_UNLOADING) {
+ stat = 1;
+ goto finished;
+ }
+ if (time >= 30 * HZ) {
+ lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+ "%d:0461 Scanning longer than 30 "
+ "seconds. Continuing initialization\n",
+ phba->brd_no);
+ stat = 1;
+ goto finished;
+ }
+ if (time >= 15 * HZ && phba->link_state <= LPFC_LINK_DOWN) {
+ lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+ "%d:0465 Link down longer than 15 "
+ "seconds. Continuing initialization\n",
+ phba->brd_no);
+ stat = 1;
goto finished;
+ }
- if (phba->hba_state != LPFC_HBA_READY)
- return 0;
- if (phba->num_disc_nodes || phba->fc_prli_sent)
- return 0;
- if ((phba->fc_map_cnt == 0) && (time < 2 * HZ))
- return 0;
- if (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE)
- return 0;
- if ((phba->hba_state > LPFC_LINK_DOWN) || (time < 15 * HZ))
- return 0;
+ if (vport->port_state != LPFC_VPORT_READY)
+ goto finished;
+ if (vport->num_disc_nodes || vport->fc_prli_sent)
+ goto finished;
+ if (vport->fc_map_cnt == 0 && time < 2 * HZ)
+ goto finished;
+ if ((phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) != 0)
+ goto finished;
+
+ stat = 1;
finished:
- if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
- spin_lock_irq(shost->host_lock);
- lpfc_poll_start_timer(phba);
- spin_unlock_irq(shost->host_lock);
- }
+ spin_unlock_irq(shost->host_lock);
+ return stat;
+}
+void lpfc_host_attrib_init(struct Scsi_Host *shost)
+{
+ struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+ struct lpfc_hba *phba = vport->phba;
/*
- * set fixed host attributes
- * Must done after lpfc_sli_hba_setup()
+ * Set fixed host attributes. Must done after lpfc_sli_hba_setup().
*/
- fc_host_node_name(shost) = wwn_to_u64(phba->fc_nodename.u.wwn);
- fc_host_port_name(shost) = wwn_to_u64(phba->fc_portname.u.wwn);
+ fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn);
+ fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn);
fc_host_supported_classes(shost) = FC_COS_CLASS3;
memset(fc_host_supported_fc4s(shost), 0,
- sizeof(fc_host_supported_fc4s(shost)));
+ sizeof(fc_host_supported_fc4s(shost)));
fc_host_supported_fc4s(shost)[2] = 1;
fc_host_supported_fc4s(shost)[7] = 1;
- lpfc_get_hba_sym_node_name(phba, fc_host_symbolic_name(shost));
+ lpfc_vport_symbolic_node_name(vport, fc_host_symbolic_name(shost),
+ sizeof fc_host_symbolic_name(shost));
fc_host_supported_speeds(shost) = 0;
if (phba->lmt & LMT_10Gb)
@@ -1488,31 +1693,31 @@ finished:
fc_host_supported_speeds(shost) |= FC_PORTSPEED_1GBIT;
fc_host_maxframe_size(shost) =
- ((((uint32_t) phba->fc_sparam.cmn.bbRcvSizeMsb & 0x0F) << 8) |
- (uint32_t) phba->fc_sparam.cmn.bbRcvSizeLsb);
+ (((uint32_t) vport->fc_sparam.cmn.bbRcvSizeMsb & 0x0F) << 8) |
+ (uint32_t) vport->fc_sparam.cmn.bbRcvSizeLsb;
/* This value is also unchanging */
memset(fc_host_active_fc4s(shost), 0,
- sizeof(fc_host_active_fc4s(shost)));
+ sizeof(fc_host_active_fc4s(shost)));
fc_host_active_fc4s(shost)[2] = 1;
fc_host_active_fc4s(shost)[7] = 1;
+ fc_host_max_npiv_vports(shost) = phba->max_vpi;
spin_lock_irq(shost->host_lock);
- phba->fc_flag &= ~FC_LOADING;
+ vport->fc_flag &= ~FC_LOADING;
spin_unlock_irq(shost->host_lock);
-
- return 1;
}
static int __devinit
lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid)
{
- struct Scsi_Host *host;
- struct lpfc_hba *phba;
- struct lpfc_sli *psli;
+ struct lpfc_vport *vport = NULL;
+ struct lpfc_hba *phba;
+ struct lpfc_sli *psli;
struct lpfc_iocbq *iocbq_entry = NULL, *iocbq_next = NULL;
+ struct Scsi_Host *shost = NULL;
unsigned long bar0map_len, bar2map_len;
- int error = -ENODEV, retval;
+ int error = -ENODEV;
int i;
uint16_t iotag;
@@ -1521,61 +1726,46 @@ lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid)
if (pci_request_regions(pdev, LPFC_DRIVER_NAME))
goto out_disable_device;
- host = scsi_host_alloc(&lpfc_template, sizeof (struct lpfc_hba));
- if (!host)
+ phba = kzalloc(sizeof (struct lpfc_hba), GFP_KERNEL);
+ if (!phba)
goto out_release_regions;
- phba = (struct lpfc_hba*)host->hostdata;
- memset(phba, 0, sizeof (struct lpfc_hba));
- phba->host = host;
+ spin_lock_init(&phba->hbalock);
- phba->fc_flag |= FC_LOADING;
phba->pcidev = pdev;
/* Assign an unused board number */
- if (!idr_pre_get(&lpfc_hba_index, GFP_KERNEL))
- goto out_put_host;
+ if ((phba->brd_no = lpfc_get_instance()) < 0)
+ goto out_free_phba;
- error = idr_get_new(&lpfc_hba_index, NULL, &phba->brd_no);
- if (error)
- goto out_put_host;
-
- host->unique_id = phba->brd_no;
+ INIT_LIST_HEAD(&phba->port_list);
+ INIT_LIST_HEAD(&phba->hbq_buffer_list);
+ /*
+ * Get all the module params for configuring this host and then
+ * establish the host.
+ */
+ lpfc_get_cfgparam(phba);
+ phba->max_vpi = LPFC_MAX_VPI;
/* Initialize timers used by driver */
init_timer(&phba->fc_estabtmo);
phba->fc_estabtmo.function = lpfc_establish_link_tmo;
phba->fc_estabtmo.data = (unsigned long)phba;
- init_timer(&phba->fc_disctmo);
- phba->fc_disctmo.function = lpfc_disc_timeout;
- phba->fc_disctmo.data = (unsigned long)phba;
-
- init_timer(&phba->fc_fdmitmo);
- phba->fc_fdmitmo.function = lpfc_fdmi_tmo;
- phba->fc_fdmitmo.data = (unsigned long)phba;
- init_timer(&phba->els_tmofunc);
- phba->els_tmofunc.function = lpfc_els_timeout;
- phba->els_tmofunc.data = (unsigned long)phba;
+
+ init_timer(&phba->hb_tmofunc);
+ phba->hb_tmofunc.function = lpfc_hb_timeout;
+ phba->hb_tmofunc.data = (unsigned long)phba;
+
psli = &phba->sli;
init_timer(&psli->mbox_tmo);
psli->mbox_tmo.function = lpfc_mbox_timeout;
- psli->mbox_tmo.data = (unsigned long)phba;
-
+ psli->mbox_tmo.data = (unsigned long) phba;
init_timer(&phba->fcp_poll_timer);
phba->fcp_poll_timer.function = lpfc_poll_timeout;
- phba->fcp_poll_timer.data = (unsigned long)phba;
-
- /*
- * Get all the module params for configuring this host and then
- * establish the host parameters.
- */
- lpfc_get_cfgparam(phba);
-
- host->max_id = LPFC_MAX_TARGET;
- host->max_lun = phba->cfg_max_luns;
- host->this_id = -1;
-
- INIT_LIST_HEAD(&phba->fc_nodes);
+ phba->fcp_poll_timer.data = (unsigned long) phba;
+ init_timer(&phba->fabric_block_timer);
+ phba->fabric_block_timer.function = lpfc_fabric_block_timeout;
+ phba->fabric_block_timer.data = (unsigned long) phba;
pci_set_master(pdev);
pci_try_set_mwi(pdev);
@@ -1620,13 +1810,22 @@ lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid)
memset(phba->slim2p, 0, SLI2_SLIM_SIZE);
+ phba->hbqslimp.virt = dma_alloc_coherent(&phba->pcidev->dev,
+ lpfc_sli_hbq_size(),
+ &phba->hbqslimp.phys,
+ GFP_KERNEL);
+ if (!phba->hbqslimp.virt)
+ goto out_free_slim;
+
+ memset(phba->hbqslimp.virt, 0, lpfc_sli_hbq_size());
+
/* Initialize the SLI Layer to run with lpfc HBAs. */
lpfc_sli_setup(phba);
lpfc_sli_queue_setup(phba);
error = lpfc_mem_alloc(phba);
if (error)
- goto out_free_slim;
+ goto out_free_hbqslimp;
/* Initialize and populate the iocb list per host. */
INIT_LIST_HEAD(&phba->lpfc_iocb_list);
@@ -1650,10 +1849,11 @@ lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid)
error = -ENOMEM;
goto out_free_iocbq;
}
- spin_lock_irq(phba->host->host_lock);
+
+ spin_lock_irq(&phba->hbalock);
list_add(&iocbq_entry->list, &phba->lpfc_iocb_list);
phba->total_iocbq_bufs++;
- spin_unlock_irq(phba->host->host_lock);
+ spin_unlock_irq(&phba->hbalock);
}
/* Initialize HBA structure */
@@ -1674,22 +1874,22 @@ lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid)
goto out_free_iocbq;
}
- /*
- * Set initial can_queue value since 0 is no longer supported and
- * scsi_add_host will fail. This will be adjusted later based on the
- * max xri value determined in hba setup.
- */
- host->can_queue = phba->cfg_hba_queue_depth - 10;
-
- /* Tell the midlayer we support 16 byte commands */
- host->max_cmd_len = 16;
-
/* Initialize the list of scsi buffers used by driver for scsi IO. */
spin_lock_init(&phba->scsi_buf_list_lock);
INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list);
- host->transportt = lpfc_transport_template;
- pci_set_drvdata(pdev, host);
+ /* Initialize list of fabric iocbs */
+ INIT_LIST_HEAD(&phba->fabric_iocb_list);
+
+ vport = lpfc_create_port(phba, phba->brd_no, NULL);
+ if (!vport)
+ goto out_kthread_stop;
+
+ shost = lpfc_shost_from_vport(vport);
+ phba->pport = vport;
+ lpfc_debugfs_initialize(vport);
+
+ pci_set_drvdata(pdev, shost);
if (phba->cfg_use_msi) {
error = pci_enable_msi(phba->pcidev);
@@ -1700,38 +1900,68 @@ lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid)
}
error = request_irq(phba->pcidev->irq, lpfc_intr_handler, IRQF_SHARED,
- LPFC_DRIVER_NAME, phba);
+ LPFC_DRIVER_NAME, phba);
if (error) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"%d:0451 Enable interrupt handler failed\n",
phba->brd_no);
- goto out_kthread_stop;
+ goto out_disable_msi;
}
- error = scsi_add_host(host, &pdev->dev);
- if (error)
+ phba->MBslimaddr = phba->slim_memmap_p;
+ phba->HAregaddr = phba->ctrl_regs_memmap_p + HA_REG_OFFSET;
+ phba->CAregaddr = phba->ctrl_regs_memmap_p + CA_REG_OFFSET;
+ phba->HSregaddr = phba->ctrl_regs_memmap_p + HS_REG_OFFSET;
+ phba->HCregaddr = phba->ctrl_regs_memmap_p + HC_REG_OFFSET;
+
+ if (lpfc_alloc_sysfs_attr(vport))
goto out_free_irq;
- scsi_scan_host(host);
+ if (lpfc_sli_hba_setup(phba))
+ goto out_remove_device;
+
+ /*
+ * hba setup may have changed the hba_queue_depth so we need to adjust
+ * the value of can_queue.
+ */
+ shost->can_queue = phba->cfg_hba_queue_depth - 10;
+
+ lpfc_host_attrib_init(shost);
+
+ if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
+ spin_lock_irq(shost->host_lock);
+ lpfc_poll_start_timer(phba);
+ spin_unlock_irq(shost->host_lock);
+ }
+
+ scsi_scan_host(shost);
return 0;
+out_remove_device:
+ lpfc_free_sysfs_attr(vport);
+ spin_lock_irq(shost->host_lock);
+ vport->fc_flag |= FC_UNLOADING;
+ spin_unlock_irq(shost->host_lock);
out_free_irq:
- lpfc_stop_timer(phba);
- phba->work_hba_events = 0;
+ lpfc_stop_phba_timers(phba);
+ phba->pport->work_port_events = 0;
free_irq(phba->pcidev->irq, phba);
+out_disable_msi:
pci_disable_msi(phba->pcidev);
+ destroy_port(vport);
out_kthread_stop:
kthread_stop(phba->worker_thread);
out_free_iocbq:
list_for_each_entry_safe(iocbq_entry, iocbq_next,
&phba->lpfc_iocb_list, list) {
- spin_lock_irq(phba->host->host_lock);
kfree(iocbq_entry);
phba->total_iocbq_bufs--;
- spin_unlock_irq(phba->host->host_lock);
}
lpfc_mem_free(phba);
+out_free_hbqslimp:
+ dma_free_coherent(&pdev->dev, lpfc_sli_hbq_size(), phba->hbqslimp.virt,
+ phba->hbqslimp.phys);
out_free_slim:
dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE, phba->slim2p,
phba->slim2p_mapping);
@@ -1741,27 +1971,85 @@ out_iounmap_slim:
iounmap(phba->slim_memmap_p);
out_idr_remove:
idr_remove(&lpfc_hba_index, phba->brd_no);
-out_put_host:
- phba->host = NULL;
- scsi_host_put(host);
+out_free_phba:
+ kfree(phba);
out_release_regions:
pci_release_regions(pdev);
out_disable_device:
pci_disable_device(pdev);
out:
pci_set_drvdata(pdev, NULL);
+ if (shost)
+ scsi_host_put(shost);
return error;
}
static void __devexit
lpfc_pci_remove_one(struct pci_dev *pdev)
{
- struct Scsi_Host *host = pci_get_drvdata(pdev);
- struct lpfc_hba *phba = (struct lpfc_hba *)host->hostdata;
+ struct Scsi_Host *shost = pci_get_drvdata(pdev);
+ struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+ struct lpfc_hba *phba = vport->phba;
+ struct lpfc_vport *port_iterator;
+ list_for_each_entry(port_iterator, &phba->port_list, listentry)
+ port_iterator->load_flag |= FC_UNLOADING;
+
+ kfree(vport->vname);
+ lpfc_free_sysfs_attr(vport);
+
+ fc_remove_host(shost);
+ scsi_remove_host(shost);
+
+ /*
+ * Bring down the SLI Layer. This step disable all interrupts,
+ * clears the rings, discards all mailbox commands, and resets
+ * the HBA.
+ */
+ lpfc_sli_hba_down(phba);
+ lpfc_sli_brdrestart(phba);
+
+ lpfc_stop_phba_timers(phba);
+ spin_lock_irq(&phba->hbalock);
+ list_del_init(&vport->listentry);
+ spin_unlock_irq(&phba->hbalock);
+
- lpfc_remove_device(phba);
+ lpfc_debugfs_terminate(vport);
+ lpfc_cleanup(vport);
+
+ kthread_stop(phba->worker_thread);
+
+ /* Release the irq reservation */
+ free_irq(phba->pcidev->irq, phba);
+ pci_disable_msi(phba->pcidev);
pci_set_drvdata(pdev, NULL);
+ scsi_host_put(shost);
+
+ /*
+ * Call scsi_free before mem_free since scsi bufs are released to their
+ * corresponding pools here.
+ */
+ lpfc_scsi_free(phba);
+ lpfc_mem_free(phba);
+
+ dma_free_coherent(&pdev->dev, lpfc_sli_hbq_size(), phba->hbqslimp.virt,
+ phba->hbqslimp.phys);
+
+ /* Free resources associated with SLI2 interface */
+ dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE,
+ phba->slim2p, phba->slim2p_mapping);
+
+ /* unmap adapter SLIM and Control Registers */
+ iounmap(phba->ctrl_regs_memmap_p);
+ iounmap(phba->slim_memmap_p);
+
+ idr_remove(&lpfc_hba_index, phba->brd_no);
+
+ kfree(phba);
+
+ pci_release_regions(pdev);
+ pci_disable_device(pdev);
}
/**
@@ -1819,10 +2107,13 @@ static pci_ers_result_t lpfc_io_slot_reset(struct pci_dev *pdev)
pci_set_master(pdev);
/* Re-establishing Link */
- spin_lock_irq(phba->host->host_lock);
- phba->fc_flag |= FC_ESTABLISH_LINK;
+ spin_lock_irq(host->host_lock);
+ phba->pport->fc_flag |= FC_ESTABLISH_LINK;
+ spin_unlock_irq(host->host_lock);
+
+ spin_lock_irq(&phba->hbalock);
psli->sli_flag &= ~LPFC_SLI2_ACTIVE;
- spin_unlock_irq(phba->host->host_lock);
+ spin_unlock_irq(&phba->hbalock);
/* Take device offline; this will perform cleanup */
@@ -1932,7 +2223,7 @@ static struct pci_driver lpfc_driver = {
.id_table = lpfc_id_table,
.probe = lpfc_pci_probe_one,
.remove = __devexit_p(lpfc_pci_remove_one),
- .err_handler = &lpfc_err_handler,
+ .err_handler = &lpfc_err_handler,
};
static int __init
@@ -1945,11 +2236,15 @@ lpfc_init(void)
lpfc_transport_template =
fc_attach_transport(&lpfc_transport_functions);
- if (!lpfc_transport_template)
+ lpfc_vport_transport_template =
+ fc_attach_transport(&lpfc_vport_transport_functions);
+ if (!lpfc_transport_template || !lpfc_vport_transport_template)
return -ENOMEM;
error = pci_register_driver(&lpfc_driver);
- if (error)
+ if (error) {
fc_release_transport(lpfc_transport_template);
+ fc_release_transport(lpfc_vport_transport_template);
+ }
return error;
}
@@ -1959,6 +2254,7 @@ lpfc_exit(void)
{
pci_unregister_driver(&lpfc_driver);
fc_release_transport(lpfc_transport_template);
+ fc_release_transport(lpfc_vport_transport_template);
}
module_init(lpfc_init);
diff --git a/drivers/scsi/lpfc/lpfc_logmsg.h b/drivers/scsi/lpfc/lpfc_logmsg.h
index 438cbcd9eb1..8a6ceffeabc 100644
--- a/drivers/scsi/lpfc/lpfc_logmsg.h
+++ b/drivers/scsi/lpfc/lpfc_logmsg.h
@@ -30,6 +30,7 @@
#define LOG_SLI 0x800 /* SLI events */
#define LOG_FCP_ERROR 0x1000 /* log errors, not underruns */
#define LOG_LIBDFC 0x2000 /* Libdfc events */
+#define LOG_VPORT 0x4000 /* NPIV events */
#define LOG_ALL_MSG 0xffff /* LOG all messages */
#define lpfc_printf_log(phba, level, mask, fmt, arg...) \
diff --git a/drivers/scsi/lpfc/lpfc_mbox.c b/drivers/scsi/lpfc/lpfc_mbox.c
index 8041c3f06f7..8f42fbfdd29 100644
--- a/drivers/scsi/lpfc/lpfc_mbox.c
+++ b/drivers/scsi/lpfc/lpfc_mbox.c
@@ -82,6 +82,22 @@ lpfc_read_nv(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
}
/**********************************************/
+/* lpfc_heart_beat Issue a HEART_BEAT */
+/* mailbox command */
+/**********************************************/
+void
+lpfc_heart_beat(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
+{
+ MAILBOX_t *mb;
+
+ mb = &pmb->mb;
+ memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
+ mb->mbxCommand = MBX_HEARTBEAT;
+ mb->mbxOwner = OWN_HOST;
+ return;
+}
+
+/**********************************************/
/* lpfc_read_la Issue a READ LA */
/* mailbox command */
/**********************************************/
@@ -134,6 +150,7 @@ lpfc_clear_la(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
void
lpfc_config_link(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
{
+ struct lpfc_vport *vport = phba->pport;
MAILBOX_t *mb = &pmb->mb;
memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
@@ -147,7 +164,7 @@ lpfc_config_link(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
mb->un.varCfgLnk.cr_count = phba->cfg_cr_count;
}
- mb->un.varCfgLnk.myId = phba->fc_myDID;
+ mb->un.varCfgLnk.myId = vport->fc_myDID;
mb->un.varCfgLnk.edtov = phba->fc_edtov;
mb->un.varCfgLnk.arbtov = phba->fc_arbtov;
mb->un.varCfgLnk.ratov = phba->fc_ratov;
@@ -239,7 +256,7 @@ lpfc_init_link(struct lpfc_hba * phba,
/* mailbox command */
/**********************************************/
int
-lpfc_read_sparam(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
+lpfc_read_sparam(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb, int vpi)
{
struct lpfc_dmabuf *mp;
MAILBOX_t *mb;
@@ -270,6 +287,7 @@ lpfc_read_sparam(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
mb->un.varRdSparm.un.sp64.tus.f.bdeSize = sizeof (struct serv_parm);
mb->un.varRdSparm.un.sp64.addrHigh = putPaddrHigh(mp->phys);
mb->un.varRdSparm.un.sp64.addrLow = putPaddrLow(mp->phys);
+ mb->un.varRdSparm.vpi = vpi;
/* save address for completion */
pmb->context1 = mp;
@@ -282,7 +300,8 @@ lpfc_read_sparam(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
/* mailbox command */
/********************************************/
void
-lpfc_unreg_did(struct lpfc_hba * phba, uint32_t did, LPFC_MBOXQ_t * pmb)
+lpfc_unreg_did(struct lpfc_hba * phba, uint16_t vpi, uint32_t did,
+ LPFC_MBOXQ_t * pmb)
{
MAILBOX_t *mb;
@@ -290,6 +309,7 @@ lpfc_unreg_did(struct lpfc_hba * phba, uint32_t did, LPFC_MBOXQ_t * pmb)
memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
mb->un.varUnregDID.did = did;
+ mb->un.varUnregDID.vpi = vpi;
mb->mbxCommand = MBX_UNREG_D_ID;
mb->mbxOwner = OWN_HOST;
@@ -335,19 +355,17 @@ lpfc_read_lnk_stat(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
/* mailbox command */
/********************************************/
int
-lpfc_reg_login(struct lpfc_hba * phba,
- uint32_t did, uint8_t * param, LPFC_MBOXQ_t * pmb, uint32_t flag)
+lpfc_reg_login(struct lpfc_hba *phba, uint16_t vpi, uint32_t did,
+ uint8_t *param, LPFC_MBOXQ_t *pmb, uint32_t flag)
{
+ MAILBOX_t *mb = &pmb->mb;
uint8_t *sparam;
struct lpfc_dmabuf *mp;
- MAILBOX_t *mb;
- struct lpfc_sli *psli;
- psli = &phba->sli;
- mb = &pmb->mb;
memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
mb->un.varRegLogin.rpi = 0;
+ mb->un.varRegLogin.vpi = vpi;
mb->un.varRegLogin.did = did;
mb->un.varWords[30] = flag; /* Set flag to issue action on cmpl */
@@ -359,12 +377,10 @@ lpfc_reg_login(struct lpfc_hba * phba,
kfree(mp);
mb->mbxCommand = MBX_REG_LOGIN64;
/* REG_LOGIN: no buffers */
- lpfc_printf_log(phba,
- KERN_WARNING,
- LOG_MBOX,
- "%d:0302 REG_LOGIN: no buffers Data x%x x%x\n",
- phba->brd_no,
- (uint32_t) did, (uint32_t) flag);
+ lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX,
+ "%d (%d):0302 REG_LOGIN: no buffers, DID x%x, "
+ "flag x%x\n",
+ phba->brd_no, vpi, did, flag);
return (1);
}
INIT_LIST_HEAD(&mp->list);
@@ -389,7 +405,8 @@ lpfc_reg_login(struct lpfc_hba * phba,
/* mailbox command */
/**********************************************/
void
-lpfc_unreg_login(struct lpfc_hba * phba, uint32_t rpi, LPFC_MBOXQ_t * pmb)
+lpfc_unreg_login(struct lpfc_hba *phba, uint16_t vpi, uint32_t rpi,
+ LPFC_MBOXQ_t * pmb)
{
MAILBOX_t *mb;
@@ -398,12 +415,52 @@ lpfc_unreg_login(struct lpfc_hba * phba, uint32_t rpi, LPFC_MBOXQ_t * pmb)
mb->un.varUnregLogin.rpi = (uint16_t) rpi;
mb->un.varUnregLogin.rsvd1 = 0;
+ mb->un.varUnregLogin.vpi = vpi;
mb->mbxCommand = MBX_UNREG_LOGIN;
mb->mbxOwner = OWN_HOST;
return;
}
+/**************************************************/
+/* lpfc_reg_vpi Issue a REG_VPI */
+/* mailbox command */
+/**************************************************/
+void
+lpfc_reg_vpi(struct lpfc_hba *phba, uint16_t vpi, uint32_t sid,
+ LPFC_MBOXQ_t *pmb)
+{
+ MAILBOX_t *mb = &pmb->mb;
+
+ memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
+
+ mb->un.varRegVpi.vpi = vpi;
+ mb->un.varRegVpi.sid = sid;
+
+ mb->mbxCommand = MBX_REG_VPI;
+ mb->mbxOwner = OWN_HOST;
+ return;
+
+}
+
+/**************************************************/
+/* lpfc_unreg_vpi Issue a UNREG_VNPI */
+/* mailbox command */
+/**************************************************/
+void
+lpfc_unreg_vpi(struct lpfc_hba *phba, uint16_t vpi, LPFC_MBOXQ_t *pmb)
+{
+ MAILBOX_t *mb = &pmb->mb;
+ memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
+
+ mb->un.varUnregVpi.vpi = vpi;
+
+ mb->mbxCommand = MBX_UNREG_VPI;
+ mb->mbxOwner = OWN_HOST;
+ return;
+
+}
+
static void
lpfc_config_pcb_setup(struct lpfc_hba * phba)
{
@@ -412,14 +469,18 @@ lpfc_config_pcb_setup(struct lpfc_hba * phba)
PCB_t *pcbp = &phba->slim2p->pcb;
dma_addr_t pdma_addr;
uint32_t offset;
- uint32_t iocbCnt;
+ uint32_t iocbCnt = 0;
int i;
pcbp->maxRing = (psli->num_rings - 1);
- iocbCnt = 0;
for (i = 0; i < psli->num_rings; i++) {
pring = &psli->ring[i];
+
+ pring->sizeCiocb = phba->sli_rev == 3 ? SLI3_IOCB_CMD_SIZE:
+ SLI2_IOCB_CMD_SIZE;
+ pring->sizeRiocb = phba->sli_rev == 3 ? SLI3_IOCB_RSP_SIZE:
+ SLI2_IOCB_RSP_SIZE;
/* A ring MUST have both cmd and rsp entries defined to be
valid */
if ((pring->numCiocb == 0) || (pring->numRiocb == 0)) {
@@ -434,20 +495,18 @@ lpfc_config_pcb_setup(struct lpfc_hba * phba)
continue;
}
/* Command ring setup for ring */
- pring->cmdringaddr =
- (void *)&phba->slim2p->IOCBs[iocbCnt];
+ pring->cmdringaddr = (void *) &phba->slim2p->IOCBs[iocbCnt];
pcbp->rdsc[i].cmdEntries = pring->numCiocb;
- offset = (uint8_t *)&phba->slim2p->IOCBs[iocbCnt] -
- (uint8_t *)phba->slim2p;
+ offset = (uint8_t *) &phba->slim2p->IOCBs[iocbCnt] -
+ (uint8_t *) phba->slim2p;
pdma_addr = phba->slim2p_mapping + offset;
pcbp->rdsc[i].cmdAddrHigh = putPaddrHigh(pdma_addr);
pcbp->rdsc[i].cmdAddrLow = putPaddrLow(pdma_addr);
iocbCnt += pring->numCiocb;
/* Response ring setup for ring */
- pring->rspringaddr =
- (void *)&phba->slim2p->IOCBs[iocbCnt];
+ pring->rspringaddr = (void *) &phba->slim2p->IOCBs[iocbCnt];
pcbp->rdsc[i].rspEntries = pring->numRiocb;
offset = (uint8_t *)&phba->slim2p->IOCBs[iocbCnt] -
@@ -462,16 +521,108 @@ lpfc_config_pcb_setup(struct lpfc_hba * phba)
void
lpfc_read_rev(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
{
- MAILBOX_t *mb;
-
- mb = &pmb->mb;
+ MAILBOX_t *mb = &pmb->mb;
memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
mb->un.varRdRev.cv = 1;
+ mb->un.varRdRev.v3req = 1; /* Request SLI3 info */
mb->mbxCommand = MBX_READ_REV;
mb->mbxOwner = OWN_HOST;
return;
}
+static void
+lpfc_build_hbq_profile2(struct config_hbq_var *hbqmb,
+ struct lpfc_hbq_init *hbq_desc)
+{
+ hbqmb->profiles.profile2.seqlenbcnt = hbq_desc->seqlenbcnt;
+ hbqmb->profiles.profile2.maxlen = hbq_desc->maxlen;
+ hbqmb->profiles.profile2.seqlenoff = hbq_desc->seqlenoff;
+}
+
+static void
+lpfc_build_hbq_profile3(struct config_hbq_var *hbqmb,
+ struct lpfc_hbq_init *hbq_desc)
+{
+ hbqmb->profiles.profile3.seqlenbcnt = hbq_desc->seqlenbcnt;
+ hbqmb->profiles.profile3.maxlen = hbq_desc->maxlen;
+ hbqmb->profiles.profile3.cmdcodeoff = hbq_desc->cmdcodeoff;
+ hbqmb->profiles.profile3.seqlenoff = hbq_desc->seqlenoff;
+ memcpy(&hbqmb->profiles.profile3.cmdmatch, hbq_desc->cmdmatch,
+ sizeof(hbqmb->profiles.profile3.cmdmatch));
+}
+
+static void
+lpfc_build_hbq_profile5(struct config_hbq_var *hbqmb,
+ struct lpfc_hbq_init *hbq_desc)
+{
+ hbqmb->profiles.profile5.seqlenbcnt = hbq_desc->seqlenbcnt;
+ hbqmb->profiles.profile5.maxlen = hbq_desc->maxlen;
+ hbqmb->profiles.profile5.cmdcodeoff = hbq_desc->cmdcodeoff;
+ hbqmb->profiles.profile5.seqlenoff = hbq_desc->seqlenoff;
+ memcpy(&hbqmb->profiles.profile5.cmdmatch, hbq_desc->cmdmatch,
+ sizeof(hbqmb->profiles.profile5.cmdmatch));
+}
+
+void
+lpfc_config_hbq(struct lpfc_hba *phba, struct lpfc_hbq_init *hbq_desc,
+ uint32_t hbq_entry_index, LPFC_MBOXQ_t *pmb)
+{
+ int i;
+ MAILBOX_t *mb = &pmb->mb;
+ struct config_hbq_var *hbqmb = &mb->un.varCfgHbq;
+
+ memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
+ hbqmb->entry_count = hbq_desc->entry_count; /* # entries in HBQ */
+ hbqmb->recvNotify = hbq_desc->rn; /* Receive
+ * Notification */
+ hbqmb->numMask = hbq_desc->mask_count; /* # R_CTL/TYPE masks
+ * # in words 0-19 */
+ hbqmb->profile = hbq_desc->profile; /* Selection profile:
+ * 0 = all,
+ * 7 = logentry */
+ hbqmb->ringMask = hbq_desc->ring_mask; /* Binds HBQ to a ring
+ * e.g. Ring0=b0001,
+ * ring2=b0100 */
+ hbqmb->headerLen = hbq_desc->headerLen; /* 0 if not profile 4
+ * or 5 */
+ hbqmb->logEntry = hbq_desc->logEntry; /* Set to 1 if this
+ * HBQ will be used
+ * for LogEntry
+ * buffers */
+ hbqmb->hbqaddrLow = putPaddrLow(phba->hbqslimp.phys) +
+ hbq_entry_index * sizeof(struct lpfc_hbq_entry);
+ hbqmb->hbqaddrHigh = putPaddrHigh(phba->hbqslimp.phys);
+
+ mb->mbxCommand = MBX_CONFIG_HBQ;
+ mb->mbxOwner = OWN_HOST;
+
+ /* Copy info for profiles 2,3,5. Other
+ * profiles this area is reserved
+ */
+ if (hbq_desc->profile == 2)
+ lpfc_build_hbq_profile2(hbqmb, hbq_desc);
+ else if (hbq_desc->profile == 3)
+ lpfc_build_hbq_profile3(hbqmb, hbq_desc);
+ else if (hbq_desc->profile == 5)
+ lpfc_build_hbq_profile5(hbqmb, hbq_desc);
+
+ /* Return if no rctl / type masks for this HBQ */
+ if (!hbq_desc->mask_count)
+ return;
+
+ /* Otherwise we setup specific rctl / type masks for this HBQ */
+ for (i = 0; i < hbq_desc->mask_count; i++) {
+ hbqmb->hbqMasks[i].tmatch = hbq_desc->hbqMasks[i].tmatch;
+ hbqmb->hbqMasks[i].tmask = hbq_desc->hbqMasks[i].tmask;
+ hbqmb->hbqMasks[i].rctlmatch = hbq_desc->hbqMasks[i].rctlmatch;
+ hbqmb->hbqMasks[i].rctlmask = hbq_desc->hbqMasks[i].rctlmask;
+ }
+
+ return;
+}
+
+
+
void
lpfc_config_ring(struct lpfc_hba * phba, int ring, LPFC_MBOXQ_t * pmb)
{
@@ -514,15 +665,16 @@ lpfc_config_ring(struct lpfc_hba * phba, int ring, LPFC_MBOXQ_t * pmb)
}
void
-lpfc_config_port(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
+lpfc_config_port(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
{
+ MAILBOX_t __iomem *mb_slim = (MAILBOX_t __iomem *) phba->MBslimaddr;
MAILBOX_t *mb = &pmb->mb;
dma_addr_t pdma_addr;
uint32_t bar_low, bar_high;
size_t offset;
struct lpfc_hgp hgp;
- void __iomem *to_slim;
int i;
+ uint32_t pgp_offset;
memset(pmb, 0, sizeof(LPFC_MBOXQ_t));
mb->mbxCommand = MBX_CONFIG_PORT;
@@ -535,12 +687,29 @@ lpfc_config_port(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
mb->un.varCfgPort.pcbLow = putPaddrLow(pdma_addr);
mb->un.varCfgPort.pcbHigh = putPaddrHigh(pdma_addr);
+ /* If HBA supports SLI=3 ask for it */
+
+ if (phba->sli_rev == 3 && phba->vpd.sli3Feat.cerbm) {
+ mb->un.varCfgPort.cerbm = 1; /* Request HBQs */
+ mb->un.varCfgPort.max_hbq = 1; /* Requesting 2 HBQs */
+ if (phba->max_vpi && phba->cfg_npiv_enable &&
+ phba->vpd.sli3Feat.cmv) {
+ mb->un.varCfgPort.max_vpi = phba->max_vpi;
+ mb->un.varCfgPort.cmv = 1;
+ phba->sli3_options |= LPFC_SLI3_NPIV_ENABLED;
+ } else
+ mb->un.varCfgPort.max_vpi = phba->max_vpi = 0;
+ } else
+ phba->sli_rev = 2;
+ mb->un.varCfgPort.sli_mode = phba->sli_rev;
+
/* Now setup pcb */
phba->slim2p->pcb.type = TYPE_NATIVE_SLI2;
phba->slim2p->pcb.feature = FEATURE_INITIAL_SLI2;
/* Setup Mailbox pointers */
- phba->slim2p->pcb.mailBoxSize = sizeof(MAILBOX_t);
+ phba->slim2p->pcb.mailBoxSize = offsetof(MAILBOX_t, us) +
+ sizeof(struct sli2_desc);
offset = (uint8_t *)&phba->slim2p->mbx - (uint8_t *)phba->slim2p;
pdma_addr = phba->slim2p_mapping + offset;
phba->slim2p->pcb.mbAddrHigh = putPaddrHigh(pdma_addr);
@@ -568,29 +737,70 @@ lpfc_config_port(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
pci_read_config_dword(phba->pcidev, PCI_BASE_ADDRESS_0, &bar_low);
pci_read_config_dword(phba->pcidev, PCI_BASE_ADDRESS_1, &bar_high);
+ /*
+ * Set up HGP - Port Memory
+ *
+ * The port expects the host get/put pointers to reside in memory
+ * following the "non-diagnostic" mode mailbox (32 words, 0x80 bytes)
+ * area of SLIM. In SLI-2 mode, there's an additional 16 reserved
+ * words (0x40 bytes). This area is not reserved if HBQs are
+ * configured in SLI-3.
+ *
+ * CR0Put - SLI2(no HBQs) = 0xc0, With HBQs = 0x80
+ * RR0Get 0xc4 0x84
+ * CR1Put 0xc8 0x88
+ * RR1Get 0xcc 0x8c
+ * CR2Put 0xd0 0x90
+ * RR2Get 0xd4 0x94
+ * CR3Put 0xd8 0x98
+ * RR3Get 0xdc 0x9c
+ *
+ * Reserved 0xa0-0xbf
+ * If HBQs configured:
+ * HBQ 0 Put ptr 0xc0
+ * HBQ 1 Put ptr 0xc4
+ * HBQ 2 Put ptr 0xc8
+ * ......
+ * HBQ(M-1)Put Pointer 0xc0+(M-1)*4
+ *
+ */
+
+ if (phba->sli_rev == 3) {
+ phba->host_gp = &mb_slim->us.s3.host[0];
+ phba->hbq_put = &mb_slim->us.s3.hbq_put[0];
+ } else {
+ phba->host_gp = &mb_slim->us.s2.host[0];
+ phba->hbq_put = NULL;
+ }
/* mask off BAR0's flag bits 0 - 3 */
phba->slim2p->pcb.hgpAddrLow = (bar_low & PCI_BASE_ADDRESS_MEM_MASK) +
- (SLIMOFF*sizeof(uint32_t));
+ (void __iomem *) phba->host_gp -
+ (void __iomem *)phba->MBslimaddr;
if (bar_low & PCI_BASE_ADDRESS_MEM_TYPE_64)
phba->slim2p->pcb.hgpAddrHigh = bar_high;
else
phba->slim2p->pcb.hgpAddrHigh = 0;
/* write HGP data to SLIM at the required longword offset */
memset(&hgp, 0, sizeof(struct lpfc_hgp));
- to_slim = phba->MBslimaddr + (SLIMOFF*sizeof (uint32_t));
for (i=0; i < phba->sli.num_rings; i++) {
- lpfc_memcpy_to_slim(to_slim, &hgp, sizeof(struct lpfc_hgp));
- to_slim += sizeof (struct lpfc_hgp);
+ lpfc_memcpy_to_slim(phba->host_gp + i, &hgp,
+ sizeof(*phba->host_gp));
}
/* Setup Port Group ring pointer */
- offset = (uint8_t *)&phba->slim2p->mbx.us.s2.port -
- (uint8_t *)phba->slim2p;
- pdma_addr = phba->slim2p_mapping + offset;
+ if (phba->sli_rev == 3)
+ pgp_offset = (uint8_t *)&phba->slim2p->mbx.us.s3_pgp.port -
+ (uint8_t *)phba->slim2p;
+ else
+ pgp_offset = (uint8_t *)&phba->slim2p->mbx.us.s2.port -
+ (uint8_t *)phba->slim2p;
+
+ pdma_addr = phba->slim2p_mapping + pgp_offset;
phba->slim2p->pcb.pgpAddrHigh = putPaddrHigh(pdma_addr);
phba->slim2p->pcb.pgpAddrLow = putPaddrLow(pdma_addr);
+ phba->hbq_get = &phba->slim2p->mbx.us.s3_pgp.hbq_get[0];
/* Use callback routine to setp rings in the pcb */
lpfc_config_pcb_setup(phba);
@@ -606,11 +816,7 @@ lpfc_config_port(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
/* Swap PCB if needed */
lpfc_sli_pcimem_bcopy(&phba->slim2p->pcb, &phba->slim2p->pcb,
- sizeof (PCB_t));
-
- lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
- "%d:0405 Service Level Interface (SLI) 2 selected\n",
- phba->brd_no);
+ sizeof(PCB_t));
}
void
@@ -644,15 +850,23 @@ lpfc_mbox_get(struct lpfc_hba * phba)
LPFC_MBOXQ_t *mbq = NULL;
struct lpfc_sli *psli = &phba->sli;
- list_remove_head((&psli->mboxq), mbq, LPFC_MBOXQ_t,
- list);
- if (mbq) {
+ list_remove_head((&psli->mboxq), mbq, LPFC_MBOXQ_t, list);
+ if (mbq)
psli->mboxq_cnt--;
- }
return mbq;
}
+void
+lpfc_mbox_cmpl_put(struct lpfc_hba * phba, LPFC_MBOXQ_t * mbq)
+{
+ /* This function expects to be called from interupt context */
+ spin_lock(&phba->hbalock);
+ list_add_tail(&mbq->list, &phba->sli.mboxq_cmpl);
+ spin_unlock(&phba->hbalock);
+ return;
+}
+
int
lpfc_mbox_tmo_val(struct lpfc_hba *phba, int cmd)
{
diff --git a/drivers/scsi/lpfc/lpfc_mem.c b/drivers/scsi/lpfc/lpfc_mem.c
index ec3bbbde6f7..3594c469494 100644
--- a/drivers/scsi/lpfc/lpfc_mem.c
+++ b/drivers/scsi/lpfc/lpfc_mem.c
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2004-2005 Emulex. All rights reserved. *
+ * Copyright (C) 2004-2006 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.emulex.com *
* Portions Copyright (C) 2004-2005 Christoph Hellwig *
@@ -38,10 +38,13 @@
#define LPFC_MBUF_POOL_SIZE 64 /* max elements in MBUF safety pool */
#define LPFC_MEM_POOL_SIZE 64 /* max elem in non-DMA safety pool */
+
+
int
lpfc_mem_alloc(struct lpfc_hba * phba)
{
struct lpfc_dma_pool *pool = &phba->lpfc_mbuf_safety_pool;
+ int longs;
int i;
phba->lpfc_scsi_dma_buf_pool = pci_pool_create("lpfc_scsi_dma_buf_pool",
@@ -80,10 +83,27 @@ lpfc_mem_alloc(struct lpfc_hba * phba)
if (!phba->nlp_mem_pool)
goto fail_free_mbox_pool;
+ phba->lpfc_hbq_pool = pci_pool_create("lpfc_hbq_pool",phba->pcidev,
+ LPFC_BPL_SIZE, 8, 0);
+ if (!phba->lpfc_hbq_pool)
+ goto fail_free_nlp_mem_pool;
+
+ /* vpi zero is reserved for the physical port so add 1 to max */
+ longs = ((phba->max_vpi + 1) + BITS_PER_LONG - 1) / BITS_PER_LONG;
+ phba->vpi_bmask = kzalloc(longs * sizeof(unsigned long), GFP_KERNEL);
+ if (!phba->vpi_bmask)
+ goto fail_free_hbq_pool;
+
return 0;
+ fail_free_hbq_pool:
+ lpfc_sli_hbqbuf_free_all(phba);
+ fail_free_nlp_mem_pool:
+ mempool_destroy(phba->nlp_mem_pool);
+ phba->nlp_mem_pool = NULL;
fail_free_mbox_pool:
mempool_destroy(phba->mbox_mem_pool);
+ phba->mbox_mem_pool = NULL;
fail_free_mbuf_pool:
while (i--)
pci_pool_free(phba->lpfc_mbuf_pool, pool->elements[i].virt,
@@ -91,8 +111,10 @@ lpfc_mem_alloc(struct lpfc_hba * phba)
kfree(pool->elements);
fail_free_lpfc_mbuf_pool:
pci_pool_destroy(phba->lpfc_mbuf_pool);
+ phba->lpfc_mbuf_pool = NULL;
fail_free_dma_buf_pool:
pci_pool_destroy(phba->lpfc_scsi_dma_buf_pool);
+ phba->lpfc_scsi_dma_buf_pool = NULL;
fail:
return -ENOMEM;
}
@@ -106,6 +128,9 @@ lpfc_mem_free(struct lpfc_hba * phba)
struct lpfc_dmabuf *mp;
int i;
+ kfree(phba->vpi_bmask);
+ lpfc_sli_hbqbuf_free_all(phba);
+
list_for_each_entry_safe(mbox, next_mbox, &psli->mboxq, list) {
mp = (struct lpfc_dmabuf *) (mbox->context1);
if (mp) {
@@ -115,6 +140,15 @@ lpfc_mem_free(struct lpfc_hba * phba)
list_del(&mbox->list);
mempool_free(mbox, phba->mbox_mem_pool);
}
+ list_for_each_entry_safe(mbox, next_mbox, &psli->mboxq_cmpl, list) {
+ mp = (struct lpfc_dmabuf *) (mbox->context1);
+ if (mp) {
+ lpfc_mbuf_free(phba, mp->virt, mp->phys);
+ kfree(mp);
+ }
+ list_del(&mbox->list);
+ mempool_free(mbox, phba->mbox_mem_pool);
+ }
psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
if (psli->mbox_active) {
@@ -132,13 +166,21 @@ lpfc_mem_free(struct lpfc_hba * phba)
pci_pool_free(phba->lpfc_mbuf_pool, pool->elements[i].virt,
pool->elements[i].phys);
kfree(pool->elements);
+
+ pci_pool_destroy(phba->lpfc_hbq_pool);
mempool_destroy(phba->nlp_mem_pool);
mempool_destroy(phba->mbox_mem_pool);
pci_pool_destroy(phba->lpfc_scsi_dma_buf_pool);
pci_pool_destroy(phba->lpfc_mbuf_pool);
- /* Free the iocb lookup array */
+ phba->lpfc_hbq_pool = NULL;
+ phba->nlp_mem_pool = NULL;
+ phba->mbox_mem_pool = NULL;
+ phba->lpfc_scsi_dma_buf_pool = NULL;
+ phba->lpfc_mbuf_pool = NULL;
+
+ /* Free the iocb lookup array */
kfree(psli->iocbq_lookup);
psli->iocbq_lookup = NULL;
@@ -148,20 +190,23 @@ void *
lpfc_mbuf_alloc(struct lpfc_hba *phba, int mem_flags, dma_addr_t *handle)
{
struct lpfc_dma_pool *pool = &phba->lpfc_mbuf_safety_pool;
+ unsigned long iflags;
void *ret;
ret = pci_pool_alloc(phba->lpfc_mbuf_pool, GFP_KERNEL, handle);
- if (!ret && ( mem_flags & MEM_PRI) && pool->current_count) {
+ spin_lock_irqsave(&phba->hbalock, iflags);
+ if (!ret && (mem_flags & MEM_PRI) && pool->current_count) {
pool->current_count--;
ret = pool->elements[pool->current_count].virt;
*handle = pool->elements[pool->current_count].phys;
}
+ spin_unlock_irqrestore(&phba->hbalock, iflags);
return ret;
}
void
-lpfc_mbuf_free(struct lpfc_hba * phba, void *virt, dma_addr_t dma)
+__lpfc_mbuf_free(struct lpfc_hba * phba, void *virt, dma_addr_t dma)
{
struct lpfc_dma_pool *pool = &phba->lpfc_mbuf_safety_pool;
@@ -174,3 +219,51 @@ lpfc_mbuf_free(struct lpfc_hba * phba, void *virt, dma_addr_t dma)
}
return;
}
+
+void
+lpfc_mbuf_free(struct lpfc_hba * phba, void *virt, dma_addr_t dma)
+{
+ unsigned long iflags;
+
+ spin_lock_irqsave(&phba->hbalock, iflags);
+ __lpfc_mbuf_free(phba, virt, dma);
+ spin_unlock_irqrestore(&phba->hbalock, iflags);
+ return;
+}
+
+void *
+lpfc_hbq_alloc(struct lpfc_hba *phba, int mem_flags, dma_addr_t *handle)
+{
+ void *ret;
+ ret = pci_pool_alloc(phba->lpfc_hbq_pool, GFP_ATOMIC, handle);
+ return ret;
+}
+
+void
+lpfc_hbq_free(struct lpfc_hba *phba, void *virt, dma_addr_t dma)
+{
+ pci_pool_free(phba->lpfc_hbq_pool, virt, dma);
+ return;
+}
+
+void
+lpfc_in_buf_free(struct lpfc_hba *phba, struct lpfc_dmabuf *mp)
+{
+ struct hbq_dmabuf *hbq_entry;
+
+ if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
+ hbq_entry = container_of(mp, struct hbq_dmabuf, dbuf);
+ if (hbq_entry->tag == -1) {
+ lpfc_hbq_free(phba, hbq_entry->dbuf.virt,
+ hbq_entry->dbuf.phys);
+ kfree(hbq_entry);
+ } else {
+ lpfc_sli_free_hbq(phba, hbq_entry);
+ }
+ } else {
+ lpfc_mbuf_free(phba, mp->virt, mp->phys);
+ kfree(mp);
+ }
+ return;
+}
+
diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c
index b309841e384..bca2f5c9b4b 100644
--- a/drivers/scsi/lpfc/lpfc_nportdisc.c
+++ b/drivers/scsi/lpfc/lpfc_nportdisc.c
@@ -1,4 +1,4 @@
-/*******************************************************************
+ /*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
* Copyright (C) 2004-2007 Emulex. All rights reserved. *
@@ -35,20 +35,22 @@
#include "lpfc.h"
#include "lpfc_logmsg.h"
#include "lpfc_crtn.h"
+#include "lpfc_vport.h"
+#include "lpfc_debugfs.h"
/* Called to verify a rcv'ed ADISC was intended for us. */
static int
-lpfc_check_adisc(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp,
- struct lpfc_name * nn, struct lpfc_name * pn)
+lpfc_check_adisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+ struct lpfc_name *nn, struct lpfc_name *pn)
{
/* Compare the ADISC rsp WWNN / WWPN matches our internal node
* table entry for that node.
*/
- if (memcmp(nn, &ndlp->nlp_nodename, sizeof (struct lpfc_name)) != 0)
+ if (memcmp(nn, &ndlp->nlp_nodename, sizeof (struct lpfc_name)))
return 0;
- if (memcmp(pn, &ndlp->nlp_portname, sizeof (struct lpfc_name)) != 0)
+ if (memcmp(pn, &ndlp->nlp_portname, sizeof (struct lpfc_name)))
return 0;
/* we match, return success */
@@ -56,11 +58,10 @@ lpfc_check_adisc(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp,
}
int
-lpfc_check_sparm(struct lpfc_hba * phba,
- struct lpfc_nodelist * ndlp, struct serv_parm * sp,
- uint32_t class)
+lpfc_check_sparm(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+ struct serv_parm * sp, uint32_t class)
{
- volatile struct serv_parm *hsp = &phba->fc_sparam;
+ volatile struct serv_parm *hsp = &vport->fc_sparam;
uint16_t hsp_value, ssp_value = 0;
/*
@@ -75,12 +76,14 @@ lpfc_check_sparm(struct lpfc_hba * phba,
hsp->cls1.rcvDataSizeLsb;
ssp_value = (sp->cls1.rcvDataSizeMsb << 8) |
sp->cls1.rcvDataSizeLsb;
+ if (!ssp_value)
+ goto bad_service_param;
if (ssp_value > hsp_value) {
sp->cls1.rcvDataSizeLsb = hsp->cls1.rcvDataSizeLsb;
sp->cls1.rcvDataSizeMsb = hsp->cls1.rcvDataSizeMsb;
}
} else if (class == CLASS1) {
- return 0;
+ goto bad_service_param;
}
if (sp->cls2.classValid) {
@@ -88,12 +91,14 @@ lpfc_check_sparm(struct lpfc_hba * phba,
hsp->cls2.rcvDataSizeLsb;
ssp_value = (sp->cls2.rcvDataSizeMsb << 8) |
sp->cls2.rcvDataSizeLsb;
+ if (!ssp_value)
+ goto bad_service_param;
if (ssp_value > hsp_value) {
sp->cls2.rcvDataSizeLsb = hsp->cls2.rcvDataSizeLsb;
sp->cls2.rcvDataSizeMsb = hsp->cls2.rcvDataSizeMsb;
}
} else if (class == CLASS2) {
- return 0;
+ goto bad_service_param;
}
if (sp->cls3.classValid) {
@@ -101,12 +106,14 @@ lpfc_check_sparm(struct lpfc_hba * phba,
hsp->cls3.rcvDataSizeLsb;
ssp_value = (sp->cls3.rcvDataSizeMsb << 8) |
sp->cls3.rcvDataSizeLsb;
+ if (!ssp_value)
+ goto bad_service_param;
if (ssp_value > hsp_value) {
sp->cls3.rcvDataSizeLsb = hsp->cls3.rcvDataSizeLsb;
sp->cls3.rcvDataSizeMsb = hsp->cls3.rcvDataSizeMsb;
}
} else if (class == CLASS3) {
- return 0;
+ goto bad_service_param;
}
/*
@@ -125,12 +132,22 @@ lpfc_check_sparm(struct lpfc_hba * phba,
memcpy(&ndlp->nlp_nodename, &sp->nodeName, sizeof (struct lpfc_name));
memcpy(&ndlp->nlp_portname, &sp->portName, sizeof (struct lpfc_name));
return 1;
+bad_service_param:
+ lpfc_printf_log(vport->phba, KERN_ERR, LOG_DISCOVERY,
+ "%d (%d):0207 Device %x "
+ "(%02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x) sent "
+ "invalid service parameters. Ignoring device.\n",
+ vport->phba->brd_no, ndlp->vport->vpi, ndlp->nlp_DID,
+ sp->nodeName.u.wwn[0], sp->nodeName.u.wwn[1],
+ sp->nodeName.u.wwn[2], sp->nodeName.u.wwn[3],
+ sp->nodeName.u.wwn[4], sp->nodeName.u.wwn[5],
+ sp->nodeName.u.wwn[6], sp->nodeName.u.wwn[7]);
+ return 0;
}
static void *
-lpfc_check_elscmpl_iocb(struct lpfc_hba * phba,
- struct lpfc_iocbq *cmdiocb,
- struct lpfc_iocbq *rspiocb)
+lpfc_check_elscmpl_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
+ struct lpfc_iocbq *rspiocb)
{
struct lpfc_dmabuf *pcmd, *prsp;
uint32_t *lp;
@@ -168,32 +185,29 @@ lpfc_check_elscmpl_iocb(struct lpfc_hba * phba,
* routine effectively results in a "software abort".
*/
int
-lpfc_els_abort(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp)
+lpfc_els_abort(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
{
LIST_HEAD(completions);
- struct lpfc_sli *psli;
- struct lpfc_sli_ring *pring;
+ struct lpfc_sli *psli = &phba->sli;
+ struct lpfc_sli_ring *pring = &psli->ring[LPFC_ELS_RING];
struct lpfc_iocbq *iocb, *next_iocb;
IOCB_t *cmd;
/* Abort outstanding I/O on NPort <nlp_DID> */
lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
- "%d:0205 Abort outstanding I/O on NPort x%x "
+ "%d (%d):0205 Abort outstanding I/O on NPort x%x "
"Data: x%x x%x x%x\n",
- phba->brd_no, ndlp->nlp_DID, ndlp->nlp_flag,
- ndlp->nlp_state, ndlp->nlp_rpi);
+ phba->brd_no, ndlp->vport->vpi, ndlp->nlp_DID,
+ ndlp->nlp_flag, ndlp->nlp_state, ndlp->nlp_rpi);
- psli = &phba->sli;
- pring = &psli->ring[LPFC_ELS_RING];
+ lpfc_fabric_abort_nport(ndlp);
/* First check the txq */
- spin_lock_irq(phba->host->host_lock);
+ spin_lock_irq(&phba->hbalock);
list_for_each_entry_safe(iocb, next_iocb, &pring->txq, list) {
- /* Check to see if iocb matches the nport we are looking
- for */
+ /* Check to see if iocb matches the nport we are looking for */
if (lpfc_check_sli_ndlp(phba, pring, iocb, ndlp)) {
- /* It matches, so deque and call compl with an
- error */
+ /* It matches, so deque and call compl with anp error */
list_move_tail(&iocb->list, &completions);
pring->txq_cnt--;
}
@@ -201,37 +215,39 @@ lpfc_els_abort(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp)
/* Next check the txcmplq */
list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list) {
- /* Check to see if iocb matches the nport we are looking
- for */
- if (lpfc_check_sli_ndlp(phba, pring, iocb, ndlp))
+ /* Check to see if iocb matches the nport we are looking for */
+ if (lpfc_check_sli_ndlp(phba, pring, iocb, ndlp)) {
lpfc_sli_issue_abort_iotag(phba, pring, iocb);
+ }
}
- spin_unlock_irq(phba->host->host_lock);
+ spin_unlock_irq(&phba->hbalock);
while (!list_empty(&completions)) {
iocb = list_get_first(&completions, struct lpfc_iocbq, list);
cmd = &iocb->iocb;
- list_del(&iocb->list);
+ list_del_init(&iocb->list);
- if (iocb->iocb_cmpl) {
+ if (!iocb->iocb_cmpl)
+ lpfc_sli_release_iocbq(phba, iocb);
+ else {
cmd->ulpStatus = IOSTAT_LOCAL_REJECT;
cmd->un.ulpWord[4] = IOERR_SLI_ABORTED;
(iocb->iocb_cmpl) (phba, iocb, iocb);
- } else
- lpfc_sli_release_iocbq(phba, iocb);
+ }
}
/* If we are delaying issuing an ELS command, cancel it */
if (ndlp->nlp_flag & NLP_DELAY_TMO)
- lpfc_cancel_retry_delay_tmo(phba, ndlp);
+ lpfc_cancel_retry_delay_tmo(phba->pport, ndlp);
return 0;
}
static int
-lpfc_rcv_plogi(struct lpfc_hba * phba,
- struct lpfc_nodelist * ndlp,
- struct lpfc_iocbq *cmdiocb)
+lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+ struct lpfc_iocbq *cmdiocb)
{
+ struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+ struct lpfc_hba *phba = vport->phba;
struct lpfc_dmabuf *pcmd;
uint32_t *lp;
IOCB_t *icmd;
@@ -241,14 +257,14 @@ lpfc_rcv_plogi(struct lpfc_hba * phba,
int rc;
memset(&stat, 0, sizeof (struct ls_rjt));
- if (phba->hba_state <= LPFC_FLOGI) {
+ if (vport->port_state <= LPFC_FLOGI) {
/* Before responding to PLOGI, check for pt2pt mode.
* If we are pt2pt, with an outstanding FLOGI, abort
* the FLOGI and resend it first.
*/
- if (phba->fc_flag & FC_PT2PT) {
- lpfc_els_abort_flogi(phba);
- if (!(phba->fc_flag & FC_PT2PT_PLOGI)) {
+ if (vport->fc_flag & FC_PT2PT) {
+ lpfc_els_abort_flogi(phba);
+ if (!(vport->fc_flag & FC_PT2PT_PLOGI)) {
/* If the other side is supposed to initiate
* the PLOGI anyway, just ACC it now and
* move on with discovery.
@@ -257,45 +273,42 @@ lpfc_rcv_plogi(struct lpfc_hba * phba,
phba->fc_ratov = FF_DEF_RATOV;
/* Start discovery - this should just do
CLEAR_LA */
- lpfc_disc_start(phba);
- } else {
- lpfc_initial_flogi(phba);
- }
+ lpfc_disc_start(vport);
+ } else
+ lpfc_initial_flogi(vport);
} else {
stat.un.b.lsRjtRsnCode = LSRJT_LOGICAL_BSY;
stat.un.b.lsRjtRsnCodeExp = LSEXP_NOTHING_MORE;
- lpfc_els_rsp_reject(phba, stat.un.lsRjtError, cmdiocb,
- ndlp);
+ lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb,
+ ndlp, NULL);
return 0;
}
}
pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
lp = (uint32_t *) pcmd->virt;
sp = (struct serv_parm *) ((uint8_t *) lp + sizeof (uint32_t));
- if ((lpfc_check_sparm(phba, ndlp, sp, CLASS3) == 0)) {
+ if ((lpfc_check_sparm(vport, ndlp, sp, CLASS3) == 0)) {
/* Reject this request because invalid parameters */
stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
stat.un.b.lsRjtRsnCodeExp = LSEXP_SPARM_OPTIONS;
- lpfc_els_rsp_reject(phba, stat.un.lsRjtError, cmdiocb, ndlp);
+ lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp,
+ NULL);
return 0;
}
icmd = &cmdiocb->iocb;
/* PLOGI chkparm OK */
- lpfc_printf_log(phba,
- KERN_INFO,
- LOG_ELS,
- "%d:0114 PLOGI chkparm OK Data: x%x x%x x%x x%x\n",
- phba->brd_no,
+ lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
+ "%d (%d):0114 PLOGI chkparm OK Data: x%x x%x x%x x%x\n",
+ phba->brd_no, vport->vpi,
ndlp->nlp_DID, ndlp->nlp_state, ndlp->nlp_flag,
ndlp->nlp_rpi);
- if ((phba->cfg_fcp_class == 2) &&
- (sp->cls2.classValid)) {
+ if (phba->cfg_fcp_class == 2 && sp->cls2.classValid)
ndlp->nlp_fcp_info |= CLASS2;
- } else {
+ else
ndlp->nlp_fcp_info |= CLASS3;
- }
+
ndlp->nlp_class_sup = 0;
if (sp->cls1.classValid)
ndlp->nlp_class_sup |= FC_COS_CLASS1;
@@ -317,35 +330,37 @@ lpfc_rcv_plogi(struct lpfc_hba * phba,
case NLP_STE_PRLI_ISSUE:
case NLP_STE_UNMAPPED_NODE:
case NLP_STE_MAPPED_NODE:
- lpfc_els_rsp_acc(phba, ELS_CMD_PLOGI, cmdiocb, ndlp, NULL, 0);
+ lpfc_els_rsp_acc(vport, ELS_CMD_PLOGI, cmdiocb, ndlp, NULL, 0);
return 1;
}
- if ((phba->fc_flag & FC_PT2PT)
- && !(phba->fc_flag & FC_PT2PT_PLOGI)) {
+ if ((vport->fc_flag & FC_PT2PT) &&
+ !(vport->fc_flag & FC_PT2PT_PLOGI)) {
/* rcv'ed PLOGI decides what our NPortId will be */
- phba->fc_myDID = icmd->un.rcvels.parmRo;
+ vport->fc_myDID = icmd->un.rcvels.parmRo;
mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
if (mbox == NULL)
goto out;
lpfc_config_link(phba, mbox);
mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
+ mbox->vport = vport;
rc = lpfc_sli_issue_mbox
(phba, mbox, (MBX_NOWAIT | MBX_STOP_IOCB));
if (rc == MBX_NOT_FINISHED) {
- mempool_free( mbox, phba->mbox_mem_pool);
+ mempool_free(mbox, phba->mbox_mem_pool);
goto out;
}
- lpfc_can_disctmo(phba);
+ lpfc_can_disctmo(vport);
}
mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
- if (mbox == NULL)
+ if (!mbox)
goto out;
- if (lpfc_reg_login(phba, icmd->un.rcvels.remoteID,
- (uint8_t *) sp, mbox, 0)) {
- mempool_free( mbox, phba->mbox_mem_pool);
+ rc = lpfc_reg_login(phba, vport->vpi, icmd->un.rcvels.remoteID,
+ (uint8_t *) sp, mbox, 0);
+ if (rc) {
+ mempool_free(mbox, phba->mbox_mem_pool);
goto out;
}
@@ -357,7 +372,10 @@ lpfc_rcv_plogi(struct lpfc_hba * phba,
* mbox->context2 = lpfc_nlp_get(ndlp) deferred until mailbox
* command issued in lpfc_cmpl_els_acc().
*/
+ mbox->vport = vport;
+ spin_lock_irq(shost->host_lock);
ndlp->nlp_flag |= (NLP_ACC_REGLOGIN | NLP_RCV_PLOGI);
+ spin_unlock_irq(shost->host_lock);
/*
* If there is an outstanding PLOGI issued, abort it before
@@ -373,24 +391,41 @@ lpfc_rcv_plogi(struct lpfc_hba * phba,
lpfc_els_abort(phba, ndlp);
}
- lpfc_els_rsp_acc(phba, ELS_CMD_PLOGI, cmdiocb, ndlp, mbox, 0);
+ if ((vport->port_type == LPFC_NPIV_PORT &&
+ phba->cfg_vport_restrict_login)) {
+
+ /* In order to preserve RPIs, we want to cleanup
+ * the default RPI the firmware created to rcv
+ * this ELS request. The only way to do this is
+ * to register, then unregister the RPI.
+ */
+ spin_lock_irq(shost->host_lock);
+ ndlp->nlp_flag |= NLP_RM_DFLT_RPI;
+ spin_unlock_irq(shost->host_lock);
+ stat.un.b.lsRjtRsnCode = LSRJT_INVALID_CMD;
+ stat.un.b.lsRjtRsnCodeExp = LSEXP_NOTHING_MORE;
+ lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb,
+ ndlp, mbox);
+ return 1;
+ }
+ lpfc_els_rsp_acc(vport, ELS_CMD_PLOGI, cmdiocb, ndlp, mbox, 0);
return 1;
out:
stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
stat.un.b.lsRjtRsnCodeExp = LSEXP_OUT_OF_RESOURCE;
- lpfc_els_rsp_reject(phba, stat.un.lsRjtError, cmdiocb, ndlp);
+ lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL);
return 0;
}
static int
-lpfc_rcv_padisc(struct lpfc_hba * phba,
- struct lpfc_nodelist * ndlp,
+lpfc_rcv_padisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
struct lpfc_iocbq *cmdiocb)
{
+ struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
struct lpfc_dmabuf *pcmd;
- struct serv_parm *sp;
- struct lpfc_name *pnn, *ppn;
+ struct serv_parm *sp;
+ struct lpfc_name *pnn, *ppn;
struct ls_rjt stat;
ADISC *ap;
IOCB_t *icmd;
@@ -412,13 +447,12 @@ lpfc_rcv_padisc(struct lpfc_hba * phba,
}
icmd = &cmdiocb->iocb;
- if ((icmd->ulpStatus == 0) &&
- (lpfc_check_adisc(phba, ndlp, pnn, ppn))) {
+ if (icmd->ulpStatus == 0 && lpfc_check_adisc(vport, ndlp, pnn, ppn)) {
if (cmd == ELS_CMD_ADISC) {
- lpfc_els_rsp_adisc_acc(phba, cmdiocb, ndlp);
+ lpfc_els_rsp_adisc_acc(vport, cmdiocb, ndlp);
} else {
- lpfc_els_rsp_acc(phba, ELS_CMD_PLOGI, cmdiocb, ndlp,
- NULL, 0);
+ lpfc_els_rsp_acc(vport, ELS_CMD_PLOGI, cmdiocb, ndlp,
+ NULL, 0);
}
return 1;
}
@@ -427,55 +461,57 @@ lpfc_rcv_padisc(struct lpfc_hba * phba,
stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
stat.un.b.lsRjtRsnCodeExp = LSEXP_SPARM_OPTIONS;
stat.un.b.vendorUnique = 0;
- lpfc_els_rsp_reject(phba, stat.un.lsRjtError, cmdiocb, ndlp);
+ lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL);
/* 1 sec timeout */
mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ);
- spin_lock_irq(phba->host->host_lock);
+ spin_lock_irq(shost->host_lock);
ndlp->nlp_flag |= NLP_DELAY_TMO;
- spin_unlock_irq(phba->host->host_lock);
+ spin_unlock_irq(shost->host_lock);
ndlp->nlp_last_elscmd = ELS_CMD_PLOGI;
ndlp->nlp_prev_state = ndlp->nlp_state;
- lpfc_nlp_set_state(phba, ndlp, NLP_STE_NPR_NODE);
+ lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
return 0;
}
static int
-lpfc_rcv_logo(struct lpfc_hba * phba,
- struct lpfc_nodelist * ndlp,
- struct lpfc_iocbq *cmdiocb,
- uint32_t els_cmd)
+lpfc_rcv_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+ struct lpfc_iocbq *cmdiocb, uint32_t els_cmd)
{
- /* Put ndlp on NPR list with 1 sec timeout for plogi, ACC logo */
+ struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+
+ /* Put ndlp in NPR state with 1 sec timeout for plogi, ACC logo */
/* Only call LOGO ACC for first LOGO, this avoids sending unnecessary
* PLOGIs during LOGO storms from a device.
*/
+ spin_lock_irq(shost->host_lock);
ndlp->nlp_flag |= NLP_LOGO_ACC;
+ spin_unlock_irq(shost->host_lock);
if (els_cmd == ELS_CMD_PRLO)
- lpfc_els_rsp_acc(phba, ELS_CMD_PRLO, cmdiocb, ndlp, NULL, 0);
+ lpfc_els_rsp_acc(vport, ELS_CMD_PRLO, cmdiocb, ndlp, NULL, 0);
else
- lpfc_els_rsp_acc(phba, ELS_CMD_ACC, cmdiocb, ndlp, NULL, 0);
+ lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL, 0);
if (!(ndlp->nlp_type & NLP_FABRIC) ||
- (ndlp->nlp_state == NLP_STE_ADISC_ISSUE)) {
+ (ndlp->nlp_state == NLP_STE_ADISC_ISSUE)) {
/* Only try to re-login if this is NOT a Fabric Node */
mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ * 1);
- spin_lock_irq(phba->host->host_lock);
+ spin_lock_irq(shost->host_lock);
ndlp->nlp_flag |= NLP_DELAY_TMO;
- spin_unlock_irq(phba->host->host_lock);
+ spin_unlock_irq(shost->host_lock);
ndlp->nlp_last_elscmd = ELS_CMD_PLOGI;
ndlp->nlp_prev_state = ndlp->nlp_state;
- lpfc_nlp_set_state(phba, ndlp, NLP_STE_NPR_NODE);
+ lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
} else {
ndlp->nlp_prev_state = ndlp->nlp_state;
- lpfc_nlp_set_state(phba, ndlp, NLP_STE_UNUSED_NODE);
+ lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNUSED_NODE);
}
- spin_lock_irq(phba->host->host_lock);
+ spin_lock_irq(shost->host_lock);
ndlp->nlp_flag &= ~NLP_NPR_ADISC;
- spin_unlock_irq(phba->host->host_lock);
+ spin_unlock_irq(shost->host_lock);
/* The driver has to wait until the ACC completes before it continues
* processing the LOGO. The action will resume in
* lpfc_cmpl_els_logo_acc routine. Since part of processing includes an
@@ -485,9 +521,8 @@ lpfc_rcv_logo(struct lpfc_hba * phba,
}
static void
-lpfc_rcv_prli(struct lpfc_hba * phba,
- struct lpfc_nodelist * ndlp,
- struct lpfc_iocbq *cmdiocb)
+lpfc_rcv_prli(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+ struct lpfc_iocbq *cmdiocb)
{
struct lpfc_dmabuf *pcmd;
uint32_t *lp;
@@ -501,8 +536,7 @@ lpfc_rcv_prli(struct lpfc_hba * phba,
ndlp->nlp_type &= ~(NLP_FCP_TARGET | NLP_FCP_INITIATOR);
ndlp->nlp_fcp_info &= ~NLP_FCP_2_DEVICE;
- if ((npr->acceptRspCode == PRLI_REQ_EXECUTED) &&
- (npr->prliType == PRLI_FCP_TYPE)) {
+ if (npr->prliType == PRLI_FCP_TYPE) {
if (npr->initiatorFunc)
ndlp->nlp_type |= NLP_FCP_INITIATOR;
if (npr->targetFunc)
@@ -517,36 +551,42 @@ lpfc_rcv_prli(struct lpfc_hba * phba,
roles |= FC_RPORT_ROLE_FCP_INITIATOR;
if (ndlp->nlp_type & NLP_FCP_TARGET)
roles |= FC_RPORT_ROLE_FCP_TARGET;
+
+ lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_RPORT,
+ "rport rolechg: role:x%x did:x%x flg:x%x",
+ roles, ndlp->nlp_DID, ndlp->nlp_flag);
+
fc_remote_port_rolechg(rport, roles);
}
}
static uint32_t
-lpfc_disc_set_adisc(struct lpfc_hba * phba,
- struct lpfc_nodelist * ndlp)
+lpfc_disc_set_adisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
{
+ struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+ struct lpfc_hba *phba = vport->phba;
+
/* Check config parameter use-adisc or FCP-2 */
- if ((phba->cfg_use_adisc == 0) &&
- !(phba->fc_flag & FC_RSCN_MODE)) {
- if (!(ndlp->nlp_fcp_info & NLP_FCP_2_DEVICE))
- return 0;
+ if ((phba->cfg_use_adisc && (vport->fc_flag & FC_RSCN_MODE)) ||
+ ndlp->nlp_fcp_info & NLP_FCP_2_DEVICE) {
+ spin_lock_irq(shost->host_lock);
+ ndlp->nlp_flag |= NLP_NPR_ADISC;
+ spin_unlock_irq(shost->host_lock);
+ return 1;
}
- spin_lock_irq(phba->host->host_lock);
- ndlp->nlp_flag |= NLP_NPR_ADISC;
- spin_unlock_irq(phba->host->host_lock);
- return 1;
+ ndlp->nlp_flag &= ~NLP_NPR_ADISC;
+ lpfc_unreg_rpi(vport, ndlp);
+ return 0;
}
static uint32_t
-lpfc_disc_illegal(struct lpfc_hba * phba,
- struct lpfc_nodelist * ndlp, void *arg, uint32_t evt)
-{
- lpfc_printf_log(phba,
- KERN_ERR,
- LOG_DISCOVERY,
- "%d:0253 Illegal State Transition: node x%x event x%x, "
- "state x%x Data: x%x x%x\n",
- phba->brd_no,
+lpfc_disc_illegal(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+ void *arg, uint32_t evt)
+{
+ lpfc_printf_log(vport->phba, KERN_ERR, LOG_DISCOVERY,
+ "%d (%d):0253 Illegal State Transition: node x%x "
+ "event x%x, state x%x Data: x%x x%x\n",
+ vport->phba->brd_no, vport->vpi,
ndlp->nlp_DID, evt, ndlp->nlp_state, ndlp->nlp_rpi,
ndlp->nlp_flag);
return ndlp->nlp_state;
@@ -555,151 +595,162 @@ lpfc_disc_illegal(struct lpfc_hba * phba,
/* Start of Discovery State Machine routines */
static uint32_t
-lpfc_rcv_plogi_unused_node(struct lpfc_hba * phba,
- struct lpfc_nodelist * ndlp, void *arg, uint32_t evt)
+lpfc_rcv_plogi_unused_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+ void *arg, uint32_t evt)
{
struct lpfc_iocbq *cmdiocb;
cmdiocb = (struct lpfc_iocbq *) arg;
- if (lpfc_rcv_plogi(phba, ndlp, cmdiocb)) {
+ if (lpfc_rcv_plogi(vport, ndlp, cmdiocb)) {
ndlp->nlp_prev_state = NLP_STE_UNUSED_NODE;
- lpfc_nlp_set_state(phba, ndlp, NLP_STE_UNUSED_NODE);
+ lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNUSED_NODE);
return ndlp->nlp_state;
}
- lpfc_drop_node(phba, ndlp);
+ lpfc_drop_node(vport, ndlp);
return NLP_STE_FREED_NODE;
}
static uint32_t
-lpfc_rcv_els_unused_node(struct lpfc_hba * phba,
- struct lpfc_nodelist * ndlp, void *arg, uint32_t evt)
+lpfc_rcv_els_unused_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+ void *arg, uint32_t evt)
{
- lpfc_issue_els_logo(phba, ndlp, 0);
- lpfc_nlp_set_state(phba, ndlp, NLP_STE_UNUSED_NODE);
+ lpfc_issue_els_logo(vport, ndlp, 0);
+ lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNUSED_NODE);
return ndlp->nlp_state;
}
static uint32_t
-lpfc_rcv_logo_unused_node(struct lpfc_hba * phba,
- struct lpfc_nodelist * ndlp, void *arg, uint32_t evt)
+lpfc_rcv_logo_unused_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+ void *arg, uint32_t evt)
{
- struct lpfc_iocbq *cmdiocb;
-
- cmdiocb = (struct lpfc_iocbq *) arg;
+ struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+ struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
- spin_lock_irq(phba->host->host_lock);
+ spin_lock_irq(shost->host_lock);
ndlp->nlp_flag |= NLP_LOGO_ACC;
- spin_unlock_irq(phba->host->host_lock);
- lpfc_els_rsp_acc(phba, ELS_CMD_ACC, cmdiocb, ndlp, NULL, 0);
- lpfc_nlp_set_state(phba, ndlp, NLP_STE_UNUSED_NODE);
+ spin_unlock_irq(shost->host_lock);
+ lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL, 0);
+ lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNUSED_NODE);
return ndlp->nlp_state;
}
static uint32_t
-lpfc_cmpl_logo_unused_node(struct lpfc_hba * phba,
- struct lpfc_nodelist * ndlp, void *arg, uint32_t evt)
+lpfc_cmpl_logo_unused_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+ void *arg, uint32_t evt)
{
- lpfc_drop_node(phba, ndlp);
+ lpfc_drop_node(vport, ndlp);
return NLP_STE_FREED_NODE;
}
static uint32_t
-lpfc_device_rm_unused_node(struct lpfc_hba * phba,
- struct lpfc_nodelist * ndlp, void *arg, uint32_t evt)
+lpfc_device_rm_unused_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+ void *arg, uint32_t evt)
{
- lpfc_drop_node(phba, ndlp);
+ lpfc_drop_node(vport, ndlp);
return NLP_STE_FREED_NODE;
}
static uint32_t
-lpfc_rcv_plogi_plogi_issue(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp,
+lpfc_rcv_plogi_plogi_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
void *arg, uint32_t evt)
{
+ struct lpfc_hba *phba = vport->phba;
struct lpfc_iocbq *cmdiocb = arg;
- struct lpfc_dmabuf *pcmd;
- struct serv_parm *sp;
- uint32_t *lp;
+ struct lpfc_dmabuf *pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
+ uint32_t *lp = (uint32_t *) pcmd->virt;
+ struct serv_parm *sp = (struct serv_parm *) (lp + 1);
struct ls_rjt stat;
int port_cmp;
- pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
- lp = (uint32_t *) pcmd->virt;
- sp = (struct serv_parm *) ((uint8_t *) lp + sizeof (uint32_t));
-
memset(&stat, 0, sizeof (struct ls_rjt));
/* For a PLOGI, we only accept if our portname is less
* than the remote portname.
*/
phba->fc_stat.elsLogiCol++;
- port_cmp = memcmp(&phba->fc_portname, &sp->portName,
- sizeof (struct lpfc_name));
+ port_cmp = memcmp(&vport->fc_portname, &sp->portName,
+ sizeof(struct lpfc_name));
if (port_cmp >= 0) {
/* Reject this request because the remote node will accept
ours */
stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
stat.un.b.lsRjtRsnCodeExp = LSEXP_CMD_IN_PROGRESS;
- lpfc_els_rsp_reject(phba, stat.un.lsRjtError, cmdiocb, ndlp);
+ lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp,
+ NULL);
} else {
- lpfc_rcv_plogi(phba, ndlp, cmdiocb);
- } /* if our portname was less */
+ lpfc_rcv_plogi(vport, ndlp, cmdiocb);
+ } /* If our portname was less */
return ndlp->nlp_state;
}
static uint32_t
-lpfc_rcv_logo_plogi_issue(struct lpfc_hba * phba,
- struct lpfc_nodelist * ndlp, void *arg, uint32_t evt)
+lpfc_rcv_prli_plogi_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+ void *arg, uint32_t evt)
{
- struct lpfc_iocbq *cmdiocb;
+ struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
+ struct ls_rjt stat;
- cmdiocb = (struct lpfc_iocbq *) arg;
+ memset(&stat, 0, sizeof (struct ls_rjt));
+ stat.un.b.lsRjtRsnCode = LSRJT_LOGICAL_BSY;
+ stat.un.b.lsRjtRsnCodeExp = LSEXP_NOTHING_MORE;
+ lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL);
+ return ndlp->nlp_state;
+}
- /* software abort outstanding PLOGI */
- lpfc_els_abort(phba, ndlp);
+static uint32_t
+lpfc_rcv_logo_plogi_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+ void *arg, uint32_t evt)
+{
+ struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
+
+ /* software abort outstanding PLOGI */
+ lpfc_els_abort(vport->phba, ndlp);
- lpfc_rcv_logo(phba, ndlp, cmdiocb, ELS_CMD_LOGO);
+ lpfc_rcv_logo(vport, ndlp, cmdiocb, ELS_CMD_LOGO);
return ndlp->nlp_state;
}
static uint32_t
-lpfc_rcv_els_plogi_issue(struct lpfc_hba * phba,
- struct lpfc_nodelist * ndlp, void *arg, uint32_t evt)
+lpfc_rcv_els_plogi_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+ void *arg, uint32_t evt)
{
- struct lpfc_iocbq *cmdiocb;
-
- cmdiocb = (struct lpfc_iocbq *) arg;
+ struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+ struct lpfc_hba *phba = vport->phba;
+ struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
/* software abort outstanding PLOGI */
lpfc_els_abort(phba, ndlp);
if (evt == NLP_EVT_RCV_LOGO) {
- lpfc_els_rsp_acc(phba, ELS_CMD_ACC, cmdiocb, ndlp, NULL, 0);
+ lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL, 0);
} else {
- lpfc_issue_els_logo(phba, ndlp, 0);
+ lpfc_issue_els_logo(vport, ndlp, 0);
}
- /* Put ndlp in npr list set plogi timer for 1 sec */
+ /* Put ndlp in npr state set plogi timer for 1 sec */
mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ * 1);
- spin_lock_irq(phba->host->host_lock);
+ spin_lock_irq(shost->host_lock);
ndlp->nlp_flag |= NLP_DELAY_TMO;
- spin_unlock_irq(phba->host->host_lock);
+ spin_unlock_irq(shost->host_lock);
ndlp->nlp_last_elscmd = ELS_CMD_PLOGI;
ndlp->nlp_prev_state = NLP_STE_PLOGI_ISSUE;
- lpfc_nlp_set_state(phba, ndlp, NLP_STE_NPR_NODE);
+ lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
return ndlp->nlp_state;
}
static uint32_t
-lpfc_cmpl_plogi_plogi_issue(struct lpfc_hba * phba,
- struct lpfc_nodelist * ndlp, void *arg,
+lpfc_cmpl_plogi_plogi_issue(struct lpfc_vport *vport,
+ struct lpfc_nodelist *ndlp,
+ void *arg,
uint32_t evt)
{
- struct lpfc_iocbq *cmdiocb, *rspiocb;
+ struct lpfc_hba *phba = vport->phba;
+ struct lpfc_iocbq *cmdiocb, *rspiocb;
struct lpfc_dmabuf *pcmd, *prsp, *mp;
uint32_t *lp;
IOCB_t *irsp;
@@ -721,31 +772,26 @@ lpfc_cmpl_plogi_plogi_issue(struct lpfc_hba * phba,
pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
- prsp = list_get_first(&pcmd->list,
- struct lpfc_dmabuf,
- list);
- lp = (uint32_t *) prsp->virt;
+ prsp = list_get_first(&pcmd->list, struct lpfc_dmabuf, list);
+ lp = (uint32_t *) prsp->virt;
sp = (struct serv_parm *) ((uint8_t *) lp + sizeof (uint32_t));
- if (!lpfc_check_sparm(phba, ndlp, sp, CLASS3))
+ if (!lpfc_check_sparm(vport, ndlp, sp, CLASS3))
goto out;
/* PLOGI chkparm OK */
- lpfc_printf_log(phba,
- KERN_INFO,
- LOG_ELS,
- "%d:0121 PLOGI chkparm OK "
+ lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
+ "%d (%d):0121 PLOGI chkparm OK "
"Data: x%x x%x x%x x%x\n",
- phba->brd_no,
+ phba->brd_no, vport->vpi,
ndlp->nlp_DID, ndlp->nlp_state,
ndlp->nlp_flag, ndlp->nlp_rpi);
- if ((phba->cfg_fcp_class == 2) &&
- (sp->cls2.classValid)) {
+ if (phba->cfg_fcp_class == 2 && (sp->cls2.classValid))
ndlp->nlp_fcp_info |= CLASS2;
- } else {
+ else
ndlp->nlp_fcp_info |= CLASS3;
- }
+
ndlp->nlp_class_sup = 0;
if (sp->cls1.classValid)
ndlp->nlp_class_sup |= FC_COS_CLASS1;
@@ -756,16 +802,23 @@ lpfc_cmpl_plogi_plogi_issue(struct lpfc_hba * phba,
if (sp->cls4.classValid)
ndlp->nlp_class_sup |= FC_COS_CLASS4;
ndlp->nlp_maxframe =
- ((sp->cmn.bbRcvSizeMsb & 0x0F) << 8) |
- sp->cmn.bbRcvSizeLsb;
+ ((sp->cmn.bbRcvSizeMsb & 0x0F) << 8) | sp->cmn.bbRcvSizeLsb;
- if (!(mbox = mempool_alloc(phba->mbox_mem_pool,
- GFP_KERNEL)))
+ mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+ if (!mbox) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_ELS,
+ "%d (%d):0133 PLOGI: no memory for reg_login "
+ "Data: x%x x%x x%x x%x\n",
+ phba->brd_no, vport->vpi,
+ ndlp->nlp_DID, ndlp->nlp_state,
+ ndlp->nlp_flag, ndlp->nlp_rpi);
goto out;
+ }
- lpfc_unreg_rpi(phba, ndlp);
- if (lpfc_reg_login(phba, irsp->un.elsreq64.remoteID, (uint8_t *) sp,
- mbox, 0) == 0) {
+ lpfc_unreg_rpi(vport, ndlp);
+
+ if (lpfc_reg_login(phba, vport->vpi, irsp->un.elsreq64.remoteID,
+ (uint8_t *) sp, mbox, 0) == 0) {
switch (ndlp->nlp_DID) {
case NameServer_DID:
mbox->mbox_cmpl = lpfc_mbx_cmpl_ns_reg_login;
@@ -777,68 +830,104 @@ lpfc_cmpl_plogi_plogi_issue(struct lpfc_hba * phba,
mbox->mbox_cmpl = lpfc_mbx_cmpl_reg_login;
}
mbox->context2 = lpfc_nlp_get(ndlp);
+ mbox->vport = vport;
if (lpfc_sli_issue_mbox(phba, mbox,
(MBX_NOWAIT | MBX_STOP_IOCB))
!= MBX_NOT_FINISHED) {
- lpfc_nlp_set_state(phba, ndlp, NLP_STE_REG_LOGIN_ISSUE);
+ lpfc_nlp_set_state(vport, ndlp,
+ NLP_STE_REG_LOGIN_ISSUE);
return ndlp->nlp_state;
}
lpfc_nlp_put(ndlp);
- mp = (struct lpfc_dmabuf *)mbox->context1;
+ mp = (struct lpfc_dmabuf *) mbox->context1;
lpfc_mbuf_free(phba, mp->virt, mp->phys);
kfree(mp);
mempool_free(mbox, phba->mbox_mem_pool);
+
+ lpfc_printf_log(phba, KERN_ERR, LOG_ELS,
+ "%d (%d):0134 PLOGI: cannot issue reg_login "
+ "Data: x%x x%x x%x x%x\n",
+ phba->brd_no, vport->vpi,
+ ndlp->nlp_DID, ndlp->nlp_state,
+ ndlp->nlp_flag, ndlp->nlp_rpi);
} else {
mempool_free(mbox, phba->mbox_mem_pool);
+
+ lpfc_printf_log(phba, KERN_ERR, LOG_ELS,
+ "%d (%d):0135 PLOGI: cannot format reg_login "
+ "Data: x%x x%x x%x x%x\n",
+ phba->brd_no, vport->vpi,
+ ndlp->nlp_DID, ndlp->nlp_state,
+ ndlp->nlp_flag, ndlp->nlp_rpi);
}
- out:
+out:
+ if (ndlp->nlp_DID == NameServer_DID) {
+ lpfc_vport_set_state(vport, FC_VPORT_FAILED);
+ lpfc_printf_log(phba, KERN_ERR, LOG_ELS,
+ "%d (%d):0261 Cannot Register NameServer login\n",
+ phba->brd_no, vport->vpi);
+ }
+
/* Free this node since the driver cannot login or has the wrong
sparm */
- lpfc_drop_node(phba, ndlp);
+ lpfc_drop_node(vport, ndlp);
return NLP_STE_FREED_NODE;
}
static uint32_t
-lpfc_device_rm_plogi_issue(struct lpfc_hba * phba,
- struct lpfc_nodelist * ndlp, void *arg, uint32_t evt)
+lpfc_device_rm_plogi_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+ void *arg, uint32_t evt)
{
- if(ndlp->nlp_flag & NLP_NPR_2B_DISC) {
+ struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+
+ if (ndlp->nlp_flag & NLP_NPR_2B_DISC) {
+ spin_lock_irq(shost->host_lock);
ndlp->nlp_flag |= NLP_NODEV_REMOVE;
+ spin_unlock_irq(shost->host_lock);
return ndlp->nlp_state;
- }
- else {
+ } else {
/* software abort outstanding PLOGI */
- lpfc_els_abort(phba, ndlp);
+ lpfc_els_abort(vport->phba, ndlp);
- lpfc_drop_node(phba, ndlp);
+ lpfc_drop_node(vport, ndlp);
return NLP_STE_FREED_NODE;
}
}
static uint32_t
-lpfc_device_recov_plogi_issue(struct lpfc_hba * phba,
- struct lpfc_nodelist * ndlp, void *arg,
- uint32_t evt)
+lpfc_device_recov_plogi_issue(struct lpfc_vport *vport,
+ struct lpfc_nodelist *ndlp,
+ void *arg,
+ uint32_t evt)
{
+ struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+ struct lpfc_hba *phba = vport->phba;
+
+ /* Don't do anything that will mess up processing of the
+ * previous RSCN.
+ */
+ if (vport->fc_flag & FC_RSCN_DEFERRED)
+ return ndlp->nlp_state;
+
/* software abort outstanding PLOGI */
lpfc_els_abort(phba, ndlp);
ndlp->nlp_prev_state = NLP_STE_PLOGI_ISSUE;
- lpfc_nlp_set_state(phba, ndlp, NLP_STE_NPR_NODE);
- spin_lock_irq(phba->host->host_lock);
+ lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
+ spin_lock_irq(shost->host_lock);
ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC);
- spin_unlock_irq(phba->host->host_lock);
+ spin_unlock_irq(shost->host_lock);
return ndlp->nlp_state;
}
static uint32_t
-lpfc_rcv_plogi_adisc_issue(struct lpfc_hba * phba,
- struct lpfc_nodelist * ndlp, void *arg,
- uint32_t evt)
+lpfc_rcv_plogi_adisc_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+ void *arg, uint32_t evt)
{
+ struct lpfc_hba *phba = vport->phba;
struct lpfc_iocbq *cmdiocb;
/* software abort outstanding ADISC */
@@ -846,34 +935,31 @@ lpfc_rcv_plogi_adisc_issue(struct lpfc_hba * phba,
cmdiocb = (struct lpfc_iocbq *) arg;
- if (lpfc_rcv_plogi(phba, ndlp, cmdiocb)) {
+ if (lpfc_rcv_plogi(vport, ndlp, cmdiocb))
return ndlp->nlp_state;
- }
+
ndlp->nlp_prev_state = NLP_STE_ADISC_ISSUE;
- lpfc_nlp_set_state(phba, ndlp, NLP_STE_PLOGI_ISSUE);
- lpfc_issue_els_plogi(phba, ndlp->nlp_DID, 0);
+ lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0);
+ lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE);
return ndlp->nlp_state;
}
static uint32_t
-lpfc_rcv_prli_adisc_issue(struct lpfc_hba * phba,
- struct lpfc_nodelist * ndlp, void *arg,
- uint32_t evt)
+lpfc_rcv_prli_adisc_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+ void *arg, uint32_t evt)
{
- struct lpfc_iocbq *cmdiocb;
-
- cmdiocb = (struct lpfc_iocbq *) arg;
+ struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
- lpfc_els_rsp_prli_acc(phba, cmdiocb, ndlp);
+ lpfc_els_rsp_prli_acc(vport, cmdiocb, ndlp);
return ndlp->nlp_state;
}
static uint32_t
-lpfc_rcv_logo_adisc_issue(struct lpfc_hba * phba,
- struct lpfc_nodelist * ndlp, void *arg,
- uint32_t evt)
+lpfc_rcv_logo_adisc_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+ void *arg, uint32_t evt)
{
+ struct lpfc_hba *phba = vport->phba;
struct lpfc_iocbq *cmdiocb;
cmdiocb = (struct lpfc_iocbq *) arg;
@@ -881,42 +967,43 @@ lpfc_rcv_logo_adisc_issue(struct lpfc_hba * phba,
/* software abort outstanding ADISC */
lpfc_els_abort(phba, ndlp);
- lpfc_rcv_logo(phba, ndlp, cmdiocb, ELS_CMD_LOGO);
+ lpfc_rcv_logo(vport, ndlp, cmdiocb, ELS_CMD_LOGO);
return ndlp->nlp_state;
}
static uint32_t
-lpfc_rcv_padisc_adisc_issue(struct lpfc_hba * phba,
- struct lpfc_nodelist * ndlp, void *arg,
- uint32_t evt)
+lpfc_rcv_padisc_adisc_issue(struct lpfc_vport *vport,
+ struct lpfc_nodelist *ndlp,
+ void *arg, uint32_t evt)
{
struct lpfc_iocbq *cmdiocb;
cmdiocb = (struct lpfc_iocbq *) arg;
- lpfc_rcv_padisc(phba, ndlp, cmdiocb);
+ lpfc_rcv_padisc(vport, ndlp, cmdiocb);
return ndlp->nlp_state;
}
static uint32_t
-lpfc_rcv_prlo_adisc_issue(struct lpfc_hba * phba,
- struct lpfc_nodelist * ndlp, void *arg,
- uint32_t evt)
+lpfc_rcv_prlo_adisc_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+ void *arg, uint32_t evt)
{
struct lpfc_iocbq *cmdiocb;
cmdiocb = (struct lpfc_iocbq *) arg;
/* Treat like rcv logo */
- lpfc_rcv_logo(phba, ndlp, cmdiocb, ELS_CMD_PRLO);
+ lpfc_rcv_logo(vport, ndlp, cmdiocb, ELS_CMD_PRLO);
return ndlp->nlp_state;
}
static uint32_t
-lpfc_cmpl_adisc_adisc_issue(struct lpfc_hba * phba,
- struct lpfc_nodelist * ndlp, void *arg,
- uint32_t evt)
+lpfc_cmpl_adisc_adisc_issue(struct lpfc_vport *vport,
+ struct lpfc_nodelist *ndlp,
+ void *arg, uint32_t evt)
{
+ struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+ struct lpfc_hba *phba = vport->phba;
struct lpfc_iocbq *cmdiocb, *rspiocb;
IOCB_t *irsp;
ADISC *ap;
@@ -928,101 +1015,112 @@ lpfc_cmpl_adisc_adisc_issue(struct lpfc_hba * phba,
irsp = &rspiocb->iocb;
if ((irsp->ulpStatus) ||
- (!lpfc_check_adisc(phba, ndlp, &ap->nodeName, &ap->portName))) {
+ (!lpfc_check_adisc(vport, ndlp, &ap->nodeName, &ap->portName))) {
/* 1 sec timeout */
mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ);
- spin_lock_irq(phba->host->host_lock);
+ spin_lock_irq(shost->host_lock);
ndlp->nlp_flag |= NLP_DELAY_TMO;
- spin_unlock_irq(phba->host->host_lock);
+ spin_unlock_irq(shost->host_lock);
ndlp->nlp_last_elscmd = ELS_CMD_PLOGI;
- memset(&ndlp->nlp_nodename, 0, sizeof (struct lpfc_name));
- memset(&ndlp->nlp_portname, 0, sizeof (struct lpfc_name));
+ memset(&ndlp->nlp_nodename, 0, sizeof(struct lpfc_name));
+ memset(&ndlp->nlp_portname, 0, sizeof(struct lpfc_name));
ndlp->nlp_prev_state = NLP_STE_ADISC_ISSUE;
- lpfc_nlp_set_state(phba, ndlp, NLP_STE_NPR_NODE);
- lpfc_unreg_rpi(phba, ndlp);
+ lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
+ lpfc_unreg_rpi(vport, ndlp);
return ndlp->nlp_state;
}
if (ndlp->nlp_type & NLP_FCP_TARGET) {
ndlp->nlp_prev_state = NLP_STE_ADISC_ISSUE;
- lpfc_nlp_set_state(phba, ndlp, NLP_STE_MAPPED_NODE);
+ lpfc_nlp_set_state(vport, ndlp, NLP_STE_MAPPED_NODE);
} else {
ndlp->nlp_prev_state = NLP_STE_ADISC_ISSUE;
- lpfc_nlp_set_state(phba, ndlp, NLP_STE_UNMAPPED_NODE);
+ lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
}
return ndlp->nlp_state;
}
static uint32_t
-lpfc_device_rm_adisc_issue(struct lpfc_hba * phba,
- struct lpfc_nodelist * ndlp, void *arg,
- uint32_t evt)
+lpfc_device_rm_adisc_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+ void *arg, uint32_t evt)
{
- if(ndlp->nlp_flag & NLP_NPR_2B_DISC) {
+ struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+
+ if (ndlp->nlp_flag & NLP_NPR_2B_DISC) {
+ spin_lock_irq(shost->host_lock);
ndlp->nlp_flag |= NLP_NODEV_REMOVE;
+ spin_unlock_irq(shost->host_lock);
return ndlp->nlp_state;
- }
- else {
+ } else {
/* software abort outstanding ADISC */
- lpfc_els_abort(phba, ndlp);
+ lpfc_els_abort(vport->phba, ndlp);
- lpfc_drop_node(phba, ndlp);
+ lpfc_drop_node(vport, ndlp);
return NLP_STE_FREED_NODE;
}
}
static uint32_t
-lpfc_device_recov_adisc_issue(struct lpfc_hba * phba,
- struct lpfc_nodelist * ndlp, void *arg,
- uint32_t evt)
+lpfc_device_recov_adisc_issue(struct lpfc_vport *vport,
+ struct lpfc_nodelist *ndlp,
+ void *arg,
+ uint32_t evt)
{
+ struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+ struct lpfc_hba *phba = vport->phba;
+
+ /* Don't do anything that will mess up processing of the
+ * previous RSCN.
+ */
+ if (vport->fc_flag & FC_RSCN_DEFERRED)
+ return ndlp->nlp_state;
+
/* software abort outstanding ADISC */
lpfc_els_abort(phba, ndlp);
ndlp->nlp_prev_state = NLP_STE_ADISC_ISSUE;
- lpfc_nlp_set_state(phba, ndlp, NLP_STE_NPR_NODE);
- spin_lock_irq(phba->host->host_lock);
+ lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
+ spin_lock_irq(shost->host_lock);
ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC);
- ndlp->nlp_flag |= NLP_NPR_ADISC;
- spin_unlock_irq(phba->host->host_lock);
-
+ spin_unlock_irq(shost->host_lock);
+ lpfc_disc_set_adisc(vport, ndlp);
return ndlp->nlp_state;
}
static uint32_t
-lpfc_rcv_plogi_reglogin_issue(struct lpfc_hba * phba,
- struct lpfc_nodelist * ndlp, void *arg,
+lpfc_rcv_plogi_reglogin_issue(struct lpfc_vport *vport,
+ struct lpfc_nodelist *ndlp,
+ void *arg,
uint32_t evt)
{
- struct lpfc_iocbq *cmdiocb;
+ struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
- cmdiocb = (struct lpfc_iocbq *) arg;
-
- lpfc_rcv_plogi(phba, ndlp, cmdiocb);
+ lpfc_rcv_plogi(vport, ndlp, cmdiocb);
return ndlp->nlp_state;
}
static uint32_t
-lpfc_rcv_prli_reglogin_issue(struct lpfc_hba * phba,
- struct lpfc_nodelist * ndlp, void *arg,
+lpfc_rcv_prli_reglogin_issue(struct lpfc_vport *vport,
+ struct lpfc_nodelist *ndlp,
+ void *arg,
uint32_t evt)
{
- struct lpfc_iocbq *cmdiocb;
+ struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
- cmdiocb = (struct lpfc_iocbq *) arg;
-
- lpfc_els_rsp_prli_acc(phba, cmdiocb, ndlp);
+ lpfc_els_rsp_prli_acc(vport, cmdiocb, ndlp);
return ndlp->nlp_state;
}
static uint32_t
-lpfc_rcv_logo_reglogin_issue(struct lpfc_hba * phba,
- struct lpfc_nodelist * ndlp, void *arg,
+lpfc_rcv_logo_reglogin_issue(struct lpfc_vport *vport,
+ struct lpfc_nodelist *ndlp,
+ void *arg,
uint32_t evt)
{
- struct lpfc_iocbq *cmdiocb;
+ struct lpfc_hba *phba = vport->phba;
+ struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
LPFC_MBOXQ_t *mb;
LPFC_MBOXQ_t *nextmb;
struct lpfc_dmabuf *mp;
@@ -1033,12 +1131,13 @@ lpfc_rcv_logo_reglogin_issue(struct lpfc_hba * phba,
if ((mb = phba->sli.mbox_active)) {
if ((mb->mb.mbxCommand == MBX_REG_LOGIN64) &&
(ndlp == (struct lpfc_nodelist *) mb->context2)) {
+ lpfc_nlp_put(ndlp);
mb->context2 = NULL;
mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
}
}
- spin_lock_irq(phba->host->host_lock);
+ spin_lock_irq(&phba->hbalock);
list_for_each_entry_safe(mb, nextmb, &phba->sli.mboxq, list) {
if ((mb->mb.mbxCommand == MBX_REG_LOGIN64) &&
(ndlp == (struct lpfc_nodelist *) mb->context2)) {
@@ -1047,61 +1146,61 @@ lpfc_rcv_logo_reglogin_issue(struct lpfc_hba * phba,
lpfc_mbuf_free(phba, mp->virt, mp->phys);
kfree(mp);
}
+ lpfc_nlp_put(ndlp);
list_del(&mb->list);
mempool_free(mb, phba->mbox_mem_pool);
}
}
- spin_unlock_irq(phba->host->host_lock);
+ spin_unlock_irq(&phba->hbalock);
- lpfc_rcv_logo(phba, ndlp, cmdiocb, ELS_CMD_LOGO);
+ lpfc_rcv_logo(vport, ndlp, cmdiocb, ELS_CMD_LOGO);
return ndlp->nlp_state;
}
static uint32_t
-lpfc_rcv_padisc_reglogin_issue(struct lpfc_hba * phba,
- struct lpfc_nodelist * ndlp, void *arg,
+lpfc_rcv_padisc_reglogin_issue(struct lpfc_vport *vport,
+ struct lpfc_nodelist *ndlp,
+ void *arg,
uint32_t evt)
{
- struct lpfc_iocbq *cmdiocb;
+ struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
- cmdiocb = (struct lpfc_iocbq *) arg;
-
- lpfc_rcv_padisc(phba, ndlp, cmdiocb);
+ lpfc_rcv_padisc(vport, ndlp, cmdiocb);
return ndlp->nlp_state;
}
static uint32_t
-lpfc_rcv_prlo_reglogin_issue(struct lpfc_hba * phba,
- struct lpfc_nodelist * ndlp, void *arg,
+lpfc_rcv_prlo_reglogin_issue(struct lpfc_vport *vport,
+ struct lpfc_nodelist *ndlp,
+ void *arg,
uint32_t evt)
{
struct lpfc_iocbq *cmdiocb;
cmdiocb = (struct lpfc_iocbq *) arg;
- lpfc_els_rsp_acc(phba, ELS_CMD_PRLO, cmdiocb, ndlp, NULL, 0);
+ lpfc_els_rsp_acc(vport, ELS_CMD_PRLO, cmdiocb, ndlp, NULL, 0);
return ndlp->nlp_state;
}
static uint32_t
-lpfc_cmpl_reglogin_reglogin_issue(struct lpfc_hba * phba,
- struct lpfc_nodelist * ndlp,
- void *arg, uint32_t evt)
+lpfc_cmpl_reglogin_reglogin_issue(struct lpfc_vport *vport,
+ struct lpfc_nodelist *ndlp,
+ void *arg,
+ uint32_t evt)
{
- LPFC_MBOXQ_t *pmb;
- MAILBOX_t *mb;
- uint32_t did;
+ struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+ struct lpfc_hba *phba = vport->phba;
+ LPFC_MBOXQ_t *pmb = (LPFC_MBOXQ_t *) arg;
+ MAILBOX_t *mb = &pmb->mb;
+ uint32_t did = mb->un.varWords[1];
- pmb = (LPFC_MBOXQ_t *) arg;
- mb = &pmb->mb;
- did = mb->un.varWords[1];
if (mb->mbxStatus) {
/* RegLogin failed */
- lpfc_printf_log(phba,
- KERN_ERR,
- LOG_DISCOVERY,
- "%d:0246 RegLogin failed Data: x%x x%x x%x\n",
- phba->brd_no,
- did, mb->mbxStatus, phba->hba_state);
+ lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
+ "%d (%d):0246 RegLogin failed Data: x%x x%x "
+ "x%x\n",
+ phba->brd_no, vport->vpi,
+ did, mb->mbxStatus, vport->port_state);
/*
* If RegLogin failed due to lack of HBA resources do not
@@ -1109,20 +1208,20 @@ lpfc_cmpl_reglogin_reglogin_issue(struct lpfc_hba * phba,
*/
if (mb->mbxStatus == MBXERR_RPI_FULL) {
ndlp->nlp_prev_state = NLP_STE_UNUSED_NODE;
- lpfc_nlp_set_state(phba, ndlp, NLP_STE_UNUSED_NODE);
+ lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNUSED_NODE);
return ndlp->nlp_state;
}
- /* Put ndlp in npr list set plogi timer for 1 sec */
+ /* Put ndlp in npr state set plogi timer for 1 sec */
mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ * 1);
- spin_lock_irq(phba->host->host_lock);
+ spin_lock_irq(shost->host_lock);
ndlp->nlp_flag |= NLP_DELAY_TMO;
- spin_unlock_irq(phba->host->host_lock);
+ spin_unlock_irq(shost->host_lock);
ndlp->nlp_last_elscmd = ELS_CMD_PLOGI;
- lpfc_issue_els_logo(phba, ndlp, 0);
+ lpfc_issue_els_logo(vport, ndlp, 0);
ndlp->nlp_prev_state = NLP_STE_REG_LOGIN_ISSUE;
- lpfc_nlp_set_state(phba, ndlp, NLP_STE_NPR_NODE);
+ lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
return ndlp->nlp_state;
}
@@ -1131,91 +1230,99 @@ lpfc_cmpl_reglogin_reglogin_issue(struct lpfc_hba * phba,
/* Only if we are not a fabric nport do we issue PRLI */
if (!(ndlp->nlp_type & NLP_FABRIC)) {
ndlp->nlp_prev_state = NLP_STE_REG_LOGIN_ISSUE;
- lpfc_nlp_set_state(phba, ndlp, NLP_STE_PRLI_ISSUE);
- lpfc_issue_els_prli(phba, ndlp, 0);
+ lpfc_nlp_set_state(vport, ndlp, NLP_STE_PRLI_ISSUE);
+ lpfc_issue_els_prli(vport, ndlp, 0);
} else {
ndlp->nlp_prev_state = NLP_STE_REG_LOGIN_ISSUE;
- lpfc_nlp_set_state(phba, ndlp, NLP_STE_UNMAPPED_NODE);
+ lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
}
return ndlp->nlp_state;
}
static uint32_t
-lpfc_device_rm_reglogin_issue(struct lpfc_hba * phba,
- struct lpfc_nodelist * ndlp, void *arg,
+lpfc_device_rm_reglogin_issue(struct lpfc_vport *vport,
+ struct lpfc_nodelist *ndlp,
+ void *arg,
uint32_t evt)
{
- if(ndlp->nlp_flag & NLP_NPR_2B_DISC) {
+ struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+
+ if (ndlp->nlp_flag & NLP_NPR_2B_DISC) {
+ spin_lock_irq(shost->host_lock);
ndlp->nlp_flag |= NLP_NODEV_REMOVE;
+ spin_unlock_irq(shost->host_lock);
return ndlp->nlp_state;
- }
- else {
- lpfc_drop_node(phba, ndlp);
+ } else {
+ lpfc_drop_node(vport, ndlp);
return NLP_STE_FREED_NODE;
}
}
static uint32_t
-lpfc_device_recov_reglogin_issue(struct lpfc_hba * phba,
- struct lpfc_nodelist * ndlp, void *arg,
- uint32_t evt)
+lpfc_device_recov_reglogin_issue(struct lpfc_vport *vport,
+ struct lpfc_nodelist *ndlp,
+ void *arg,
+ uint32_t evt)
{
+ struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+
+ /* Don't do anything that will mess up processing of the
+ * previous RSCN.
+ */
+ if (vport->fc_flag & FC_RSCN_DEFERRED)
+ return ndlp->nlp_state;
+
ndlp->nlp_prev_state = NLP_STE_REG_LOGIN_ISSUE;
- lpfc_nlp_set_state(phba, ndlp, NLP_STE_NPR_NODE);
- spin_lock_irq(phba->host->host_lock);
+ lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
+ spin_lock_irq(shost->host_lock);
ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC);
- spin_unlock_irq(phba->host->host_lock);
+ spin_unlock_irq(shost->host_lock);
+ lpfc_disc_set_adisc(vport, ndlp);
return ndlp->nlp_state;
}
static uint32_t
-lpfc_rcv_plogi_prli_issue(struct lpfc_hba * phba,
- struct lpfc_nodelist * ndlp, void *arg, uint32_t evt)
+lpfc_rcv_plogi_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+ void *arg, uint32_t evt)
{
struct lpfc_iocbq *cmdiocb;
cmdiocb = (struct lpfc_iocbq *) arg;
- lpfc_rcv_plogi(phba, ndlp, cmdiocb);
+ lpfc_rcv_plogi(vport, ndlp, cmdiocb);
return ndlp->nlp_state;
}
static uint32_t
-lpfc_rcv_prli_prli_issue(struct lpfc_hba * phba,
- struct lpfc_nodelist * ndlp, void *arg, uint32_t evt)
+lpfc_rcv_prli_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+ void *arg, uint32_t evt)
{
- struct lpfc_iocbq *cmdiocb;
-
- cmdiocb = (struct lpfc_iocbq *) arg;
+ struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
- lpfc_els_rsp_prli_acc(phba, cmdiocb, ndlp);
+ lpfc_els_rsp_prli_acc(vport, cmdiocb, ndlp);
return ndlp->nlp_state;
}
static uint32_t
-lpfc_rcv_logo_prli_issue(struct lpfc_hba * phba,
- struct lpfc_nodelist * ndlp, void *arg, uint32_t evt)
+lpfc_rcv_logo_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+ void *arg, uint32_t evt)
{
- struct lpfc_iocbq *cmdiocb;
-
- cmdiocb = (struct lpfc_iocbq *) arg;
+ struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
/* Software abort outstanding PRLI before sending acc */
- lpfc_els_abort(phba, ndlp);
+ lpfc_els_abort(vport->phba, ndlp);
- lpfc_rcv_logo(phba, ndlp, cmdiocb, ELS_CMD_LOGO);
+ lpfc_rcv_logo(vport, ndlp, cmdiocb, ELS_CMD_LOGO);
return ndlp->nlp_state;
}
static uint32_t
-lpfc_rcv_padisc_prli_issue(struct lpfc_hba * phba,
- struct lpfc_nodelist * ndlp, void *arg, uint32_t evt)
+lpfc_rcv_padisc_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+ void *arg, uint32_t evt)
{
- struct lpfc_iocbq *cmdiocb;
-
- cmdiocb = (struct lpfc_iocbq *) arg;
+ struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
- lpfc_rcv_padisc(phba, ndlp, cmdiocb);
+ lpfc_rcv_padisc(vport, ndlp, cmdiocb);
return ndlp->nlp_state;
}
@@ -1225,21 +1332,22 @@ lpfc_rcv_padisc_prli_issue(struct lpfc_hba * phba,
* NEXT STATE = PRLI_ISSUE
*/
static uint32_t
-lpfc_rcv_prlo_prli_issue(struct lpfc_hba * phba,
- struct lpfc_nodelist * ndlp, void *arg, uint32_t evt)
+lpfc_rcv_prlo_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+ void *arg, uint32_t evt)
{
- struct lpfc_iocbq *cmdiocb;
+ struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
- cmdiocb = (struct lpfc_iocbq *) arg;
- lpfc_els_rsp_acc(phba, ELS_CMD_PRLO, cmdiocb, ndlp, NULL, 0);
+ lpfc_els_rsp_acc(vport, ELS_CMD_PRLO, cmdiocb, ndlp, NULL, 0);
return ndlp->nlp_state;
}
static uint32_t
-lpfc_cmpl_prli_prli_issue(struct lpfc_hba * phba,
- struct lpfc_nodelist * ndlp, void *arg, uint32_t evt)
+lpfc_cmpl_prli_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+ void *arg, uint32_t evt)
{
+ struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
struct lpfc_iocbq *cmdiocb, *rspiocb;
+ struct lpfc_hba *phba = vport->phba;
IOCB_t *irsp;
PRLI *npr;
@@ -1249,8 +1357,12 @@ lpfc_cmpl_prli_prli_issue(struct lpfc_hba * phba,
irsp = &rspiocb->iocb;
if (irsp->ulpStatus) {
+ if ((vport->port_type == LPFC_NPIV_PORT) &&
+ phba->cfg_vport_restrict_login) {
+ goto out;
+ }
ndlp->nlp_prev_state = NLP_STE_PRLI_ISSUE;
- lpfc_nlp_set_state(phba, ndlp, NLP_STE_UNMAPPED_NODE);
+ lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
return ndlp->nlp_state;
}
@@ -1266,319 +1378,329 @@ lpfc_cmpl_prli_prli_issue(struct lpfc_hba * phba,
if (npr->Retry)
ndlp->nlp_fcp_info |= NLP_FCP_2_DEVICE;
}
+ if (!(ndlp->nlp_type & NLP_FCP_TARGET) &&
+ (vport->port_type == LPFC_NPIV_PORT) &&
+ phba->cfg_vport_restrict_login) {
+out:
+ spin_lock_irq(shost->host_lock);
+ ndlp->nlp_flag |= NLP_TARGET_REMOVE;
+ spin_unlock_irq(shost->host_lock);
+ lpfc_issue_els_logo(vport, ndlp, 0);
+
+ ndlp->nlp_prev_state = NLP_STE_PRLI_ISSUE;
+ lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNUSED_NODE);
+ return ndlp->nlp_state;
+ }
ndlp->nlp_prev_state = NLP_STE_PRLI_ISSUE;
- lpfc_nlp_set_state(phba, ndlp, NLP_STE_MAPPED_NODE);
+ if (ndlp->nlp_type & NLP_FCP_TARGET)
+ lpfc_nlp_set_state(vport, ndlp, NLP_STE_MAPPED_NODE);
+ else
+ lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
return ndlp->nlp_state;
}
/*! lpfc_device_rm_prli_issue
- *
- * \pre
- * \post
- * \param phba
- * \param ndlp
- * \param arg
- * \param evt
- * \return uint32_t
- *
- * \b Description:
- * This routine is envoked when we a request to remove a nport we are in the
- * process of PRLIing. We should software abort outstanding prli, unreg
- * login, send a logout. We will change node state to UNUSED_NODE, put it
- * on plogi list so it can be freed when LOGO completes.
- *
- */
+ *
+ * \pre
+ * \post
+ * \param phba
+ * \param ndlp
+ * \param arg
+ * \param evt
+ * \return uint32_t
+ *
+ * \b Description:
+ * This routine is envoked when we a request to remove a nport we are in the
+ * process of PRLIing. We should software abort outstanding prli, unreg
+ * login, send a logout. We will change node state to UNUSED_NODE, put it
+ * on plogi list so it can be freed when LOGO completes.
+ *
+ */
+
static uint32_t
-lpfc_device_rm_prli_issue(struct lpfc_hba * phba,
- struct lpfc_nodelist * ndlp, void *arg, uint32_t evt)
+lpfc_device_rm_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+ void *arg, uint32_t evt)
{
- if(ndlp->nlp_flag & NLP_NPR_2B_DISC) {
+ struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+
+ if (ndlp->nlp_flag & NLP_NPR_2B_DISC) {
+ spin_lock_irq(shost->host_lock);
ndlp->nlp_flag |= NLP_NODEV_REMOVE;
+ spin_unlock_irq(shost->host_lock);
return ndlp->nlp_state;
- }
- else {
+ } else {
/* software abort outstanding PLOGI */
- lpfc_els_abort(phba, ndlp);
+ lpfc_els_abort(vport->phba, ndlp);
- lpfc_drop_node(phba, ndlp);
+ lpfc_drop_node(vport, ndlp);
return NLP_STE_FREED_NODE;
}
}
/*! lpfc_device_recov_prli_issue
- *
- * \pre
- * \post
- * \param phba
- * \param ndlp
- * \param arg
- * \param evt
- * \return uint32_t
- *
- * \b Description:
- * The routine is envoked when the state of a device is unknown, like
- * during a link down. We should remove the nodelist entry from the
- * unmapped list, issue a UNREG_LOGIN, do a software abort of the
- * outstanding PRLI command, then free the node entry.
- */
+ *
+ * \pre
+ * \post
+ * \param phba
+ * \param ndlp
+ * \param arg
+ * \param evt
+ * \return uint32_t
+ *
+ * \b Description:
+ * The routine is envoked when the state of a device is unknown, like
+ * during a link down. We should remove the nodelist entry from the
+ * unmapped list, issue a UNREG_LOGIN, do a software abort of the
+ * outstanding PRLI command, then free the node entry.
+ */
static uint32_t
-lpfc_device_recov_prli_issue(struct lpfc_hba * phba,
- struct lpfc_nodelist * ndlp, void *arg, uint32_t evt)
+lpfc_device_recov_prli_issue(struct lpfc_vport *vport,
+ struct lpfc_nodelist *ndlp,
+ void *arg,
+ uint32_t evt)
{
+ struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+ struct lpfc_hba *phba = vport->phba;
+
+ /* Don't do anything that will mess up processing of the
+ * previous RSCN.
+ */
+ if (vport->fc_flag & FC_RSCN_DEFERRED)
+ return ndlp->nlp_state;
+
/* software abort outstanding PRLI */
lpfc_els_abort(phba, ndlp);
ndlp->nlp_prev_state = NLP_STE_PRLI_ISSUE;
- lpfc_nlp_set_state(phba, ndlp, NLP_STE_NPR_NODE);
- spin_lock_irq(phba->host->host_lock);
+ lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
+ spin_lock_irq(shost->host_lock);
ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC);
- spin_unlock_irq(phba->host->host_lock);
+ spin_unlock_irq(shost->host_lock);
+ lpfc_disc_set_adisc(vport, ndlp);
return ndlp->nlp_state;
}
static uint32_t
-lpfc_rcv_plogi_unmap_node(struct lpfc_hba * phba,
- struct lpfc_nodelist * ndlp, void *arg, uint32_t evt)
+lpfc_rcv_plogi_unmap_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+ void *arg, uint32_t evt)
{
- struct lpfc_iocbq *cmdiocb;
-
- cmdiocb = (struct lpfc_iocbq *) arg;
+ struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
- lpfc_rcv_plogi(phba, ndlp, cmdiocb);
+ lpfc_rcv_plogi(vport, ndlp, cmdiocb);
return ndlp->nlp_state;
}
static uint32_t
-lpfc_rcv_prli_unmap_node(struct lpfc_hba * phba,
- struct lpfc_nodelist * ndlp, void *arg, uint32_t evt)
+lpfc_rcv_prli_unmap_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+ void *arg, uint32_t evt)
{
- struct lpfc_iocbq *cmdiocb;
+ struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
- cmdiocb = (struct lpfc_iocbq *) arg;
-
- lpfc_rcv_prli(phba, ndlp, cmdiocb);
- lpfc_els_rsp_prli_acc(phba, cmdiocb, ndlp);
+ lpfc_rcv_prli(vport, ndlp, cmdiocb);
+ lpfc_els_rsp_prli_acc(vport, cmdiocb, ndlp);
return ndlp->nlp_state;
}
static uint32_t
-lpfc_rcv_logo_unmap_node(struct lpfc_hba * phba,
- struct lpfc_nodelist * ndlp, void *arg, uint32_t evt)
+lpfc_rcv_logo_unmap_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+ void *arg, uint32_t evt)
{
- struct lpfc_iocbq *cmdiocb;
+ struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
- cmdiocb = (struct lpfc_iocbq *) arg;
-
- lpfc_rcv_logo(phba, ndlp, cmdiocb, ELS_CMD_LOGO);
+ lpfc_rcv_logo(vport, ndlp, cmdiocb, ELS_CMD_LOGO);
return ndlp->nlp_state;
}
static uint32_t
-lpfc_rcv_padisc_unmap_node(struct lpfc_hba * phba,
- struct lpfc_nodelist * ndlp, void *arg, uint32_t evt)
+lpfc_rcv_padisc_unmap_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+ void *arg, uint32_t evt)
{
- struct lpfc_iocbq *cmdiocb;
+ struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
- cmdiocb = (struct lpfc_iocbq *) arg;
-
- lpfc_rcv_padisc(phba, ndlp, cmdiocb);
+ lpfc_rcv_padisc(vport, ndlp, cmdiocb);
return ndlp->nlp_state;
}
static uint32_t
-lpfc_rcv_prlo_unmap_node(struct lpfc_hba * phba,
- struct lpfc_nodelist * ndlp, void *arg, uint32_t evt)
+lpfc_rcv_prlo_unmap_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+ void *arg, uint32_t evt)
{
- struct lpfc_iocbq *cmdiocb;
-
- cmdiocb = (struct lpfc_iocbq *) arg;
+ struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
- lpfc_els_rsp_acc(phba, ELS_CMD_PRLO, cmdiocb, ndlp, NULL, 0);
+ lpfc_els_rsp_acc(vport, ELS_CMD_PRLO, cmdiocb, ndlp, NULL, 0);
return ndlp->nlp_state;
}
static uint32_t
-lpfc_device_recov_unmap_node(struct lpfc_hba * phba,
- struct lpfc_nodelist * ndlp, void *arg, uint32_t evt)
+lpfc_device_recov_unmap_node(struct lpfc_vport *vport,
+ struct lpfc_nodelist *ndlp,
+ void *arg,
+ uint32_t evt)
{
+ struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+
ndlp->nlp_prev_state = NLP_STE_UNMAPPED_NODE;
- lpfc_nlp_set_state(phba, ndlp, NLP_STE_NPR_NODE);
+ lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
+ spin_lock_irq(shost->host_lock);
ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC);
- lpfc_disc_set_adisc(phba, ndlp);
+ spin_unlock_irq(shost->host_lock);
+ lpfc_disc_set_adisc(vport, ndlp);
return ndlp->nlp_state;
}
static uint32_t
-lpfc_rcv_plogi_mapped_node(struct lpfc_hba * phba,
- struct lpfc_nodelist * ndlp, void *arg, uint32_t evt)
+lpfc_rcv_plogi_mapped_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+ void *arg, uint32_t evt)
{
- struct lpfc_iocbq *cmdiocb;
-
- cmdiocb = (struct lpfc_iocbq *) arg;
+ struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
- lpfc_rcv_plogi(phba, ndlp, cmdiocb);
+ lpfc_rcv_plogi(vport, ndlp, cmdiocb);
return ndlp->nlp_state;
}
static uint32_t
-lpfc_rcv_prli_mapped_node(struct lpfc_hba * phba,
- struct lpfc_nodelist * ndlp, void *arg, uint32_t evt)
+lpfc_rcv_prli_mapped_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+ void *arg, uint32_t evt)
{
- struct lpfc_iocbq *cmdiocb;
+ struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
- cmdiocb = (struct lpfc_iocbq *) arg;
-
- lpfc_els_rsp_prli_acc(phba, cmdiocb, ndlp);
+ lpfc_els_rsp_prli_acc(vport, cmdiocb, ndlp);
return ndlp->nlp_state;
}
static uint32_t
-lpfc_rcv_logo_mapped_node(struct lpfc_hba * phba,
- struct lpfc_nodelist * ndlp, void *arg, uint32_t evt)
+lpfc_rcv_logo_mapped_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+ void *arg, uint32_t evt)
{
- struct lpfc_iocbq *cmdiocb;
+ struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
- cmdiocb = (struct lpfc_iocbq *) arg;
-
- lpfc_rcv_logo(phba, ndlp, cmdiocb, ELS_CMD_LOGO);
+ lpfc_rcv_logo(vport, ndlp, cmdiocb, ELS_CMD_LOGO);
return ndlp->nlp_state;
}
static uint32_t
-lpfc_rcv_padisc_mapped_node(struct lpfc_hba * phba,
- struct lpfc_nodelist * ndlp, void *arg,
- uint32_t evt)
+lpfc_rcv_padisc_mapped_node(struct lpfc_vport *vport,
+ struct lpfc_nodelist *ndlp,
+ void *arg, uint32_t evt)
{
- struct lpfc_iocbq *cmdiocb;
-
- cmdiocb = (struct lpfc_iocbq *) arg;
+ struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
- lpfc_rcv_padisc(phba, ndlp, cmdiocb);
+ lpfc_rcv_padisc(vport, ndlp, cmdiocb);
return ndlp->nlp_state;
}
static uint32_t
-lpfc_rcv_prlo_mapped_node(struct lpfc_hba * phba,
- struct lpfc_nodelist * ndlp, void *arg, uint32_t evt)
+lpfc_rcv_prlo_mapped_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+ void *arg, uint32_t evt)
{
- struct lpfc_iocbq *cmdiocb;
-
- cmdiocb = (struct lpfc_iocbq *) arg;
+ struct lpfc_hba *phba = vport->phba;
+ struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
/* flush the target */
- spin_lock_irq(phba->host->host_lock);
lpfc_sli_abort_iocb(phba, &phba->sli.ring[phba->sli.fcp_ring],
- ndlp->nlp_sid, 0, 0, LPFC_CTX_TGT);
- spin_unlock_irq(phba->host->host_lock);
+ ndlp->nlp_sid, 0, 0, LPFC_CTX_TGT);
/* Treat like rcv logo */
- lpfc_rcv_logo(phba, ndlp, cmdiocb, ELS_CMD_PRLO);
+ lpfc_rcv_logo(vport, ndlp, cmdiocb, ELS_CMD_PRLO);
return ndlp->nlp_state;
}
static uint32_t
-lpfc_device_recov_mapped_node(struct lpfc_hba * phba,
- struct lpfc_nodelist * ndlp, void *arg,
- uint32_t evt)
+lpfc_device_recov_mapped_node(struct lpfc_vport *vport,
+ struct lpfc_nodelist *ndlp,
+ void *arg,
+ uint32_t evt)
{
+ struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+
ndlp->nlp_prev_state = NLP_STE_MAPPED_NODE;
- lpfc_nlp_set_state(phba, ndlp, NLP_STE_NPR_NODE);
- spin_lock_irq(phba->host->host_lock);
+ lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
+ spin_lock_irq(shost->host_lock);
ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC);
- spin_unlock_irq(phba->host->host_lock);
- lpfc_disc_set_adisc(phba, ndlp);
+ spin_unlock_irq(shost->host_lock);
+ lpfc_disc_set_adisc(vport, ndlp);
return ndlp->nlp_state;
}
static uint32_t
-lpfc_rcv_plogi_npr_node(struct lpfc_hba * phba,
- struct lpfc_nodelist * ndlp, void *arg,
- uint32_t evt)
+lpfc_rcv_plogi_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+ void *arg, uint32_t evt)
{
- struct lpfc_iocbq *cmdiocb;
-
- cmdiocb = (struct lpfc_iocbq *) arg;
+ struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+ struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
/* Ignore PLOGI if we have an outstanding LOGO */
- if (ndlp->nlp_flag & NLP_LOGO_SND) {
+ if (ndlp->nlp_flag & (NLP_LOGO_SND | NLP_LOGO_ACC)) {
return ndlp->nlp_state;
}
- if (lpfc_rcv_plogi(phba, ndlp, cmdiocb)) {
- spin_lock_irq(phba->host->host_lock);
+ if (lpfc_rcv_plogi(vport, ndlp, cmdiocb)) {
+ spin_lock_irq(shost->host_lock);
ndlp->nlp_flag &= ~NLP_NPR_ADISC;
- spin_unlock_irq(phba->host->host_lock);
+ spin_unlock_irq(shost->host_lock);
return ndlp->nlp_state;
}
/* send PLOGI immediately, move to PLOGI issue state */
if (!(ndlp->nlp_flag & NLP_DELAY_TMO)) {
ndlp->nlp_prev_state = NLP_STE_NPR_NODE;
- lpfc_nlp_set_state(phba, ndlp, NLP_STE_PLOGI_ISSUE);
- lpfc_issue_els_plogi(phba, ndlp->nlp_DID, 0);
+ lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE);
+ lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0);
}
return ndlp->nlp_state;
}
static uint32_t
-lpfc_rcv_prli_npr_node(struct lpfc_hba * phba,
- struct lpfc_nodelist * ndlp, void *arg,
- uint32_t evt)
+lpfc_rcv_prli_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+ void *arg, uint32_t evt)
{
- struct lpfc_iocbq *cmdiocb;
- struct ls_rjt stat;
-
- cmdiocb = (struct lpfc_iocbq *) arg;
+ struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+ struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
+ struct ls_rjt stat;
memset(&stat, 0, sizeof (struct ls_rjt));
stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
stat.un.b.lsRjtRsnCodeExp = LSEXP_NOTHING_MORE;
- lpfc_els_rsp_reject(phba, stat.un.lsRjtError, cmdiocb, ndlp);
+ lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL);
if (!(ndlp->nlp_flag & NLP_DELAY_TMO)) {
if (ndlp->nlp_flag & NLP_NPR_ADISC) {
- spin_lock_irq(phba->host->host_lock);
+ spin_lock_irq(shost->host_lock);
ndlp->nlp_flag &= ~NLP_NPR_ADISC;
- spin_unlock_irq(phba->host->host_lock);
ndlp->nlp_prev_state = NLP_STE_NPR_NODE;
- lpfc_nlp_set_state(phba, ndlp, NLP_STE_ADISC_ISSUE);
- lpfc_issue_els_adisc(phba, ndlp, 0);
+ spin_unlock_irq(shost->host_lock);
+ lpfc_nlp_set_state(vport, ndlp, NLP_STE_ADISC_ISSUE);
+ lpfc_issue_els_adisc(vport, ndlp, 0);
} else {
ndlp->nlp_prev_state = NLP_STE_NPR_NODE;
- lpfc_nlp_set_state(phba, ndlp, NLP_STE_PLOGI_ISSUE);
- lpfc_issue_els_plogi(phba, ndlp->nlp_DID, 0);
+ lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE);
+ lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0);
}
}
return ndlp->nlp_state;
}
static uint32_t
-lpfc_rcv_logo_npr_node(struct lpfc_hba * phba,
- struct lpfc_nodelist * ndlp, void *arg,
- uint32_t evt)
+lpfc_rcv_logo_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+ void *arg, uint32_t evt)
{
- struct lpfc_iocbq *cmdiocb;
+ struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
- cmdiocb = (struct lpfc_iocbq *) arg;
-
- lpfc_rcv_logo(phba, ndlp, cmdiocb, ELS_CMD_LOGO);
+ lpfc_rcv_logo(vport, ndlp, cmdiocb, ELS_CMD_LOGO);
return ndlp->nlp_state;
}
static uint32_t
-lpfc_rcv_padisc_npr_node(struct lpfc_hba * phba,
- struct lpfc_nodelist * ndlp, void *arg,
- uint32_t evt)
+lpfc_rcv_padisc_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+ void *arg, uint32_t evt)
{
- struct lpfc_iocbq *cmdiocb;
-
- cmdiocb = (struct lpfc_iocbq *) arg;
+ struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
- lpfc_rcv_padisc(phba, ndlp, cmdiocb);
+ lpfc_rcv_padisc(vport, ndlp, cmdiocb);
/*
* Do not start discovery if discovery is about to start
@@ -1586,53 +1708,52 @@ lpfc_rcv_padisc_npr_node(struct lpfc_hba * phba,
* here will affect the counting of discovery threads.
*/
if (!(ndlp->nlp_flag & NLP_DELAY_TMO) &&
- !(ndlp->nlp_flag & NLP_NPR_2B_DISC)){
+ !(ndlp->nlp_flag & NLP_NPR_2B_DISC)) {
if (ndlp->nlp_flag & NLP_NPR_ADISC) {
+ ndlp->nlp_flag &= ~NLP_NPR_ADISC;
ndlp->nlp_prev_state = NLP_STE_NPR_NODE;
- lpfc_nlp_set_state(phba, ndlp, NLP_STE_ADISC_ISSUE);
- lpfc_issue_els_adisc(phba, ndlp, 0);
+ lpfc_nlp_set_state(vport, ndlp, NLP_STE_ADISC_ISSUE);
+ lpfc_issue_els_adisc(vport, ndlp, 0);
} else {
ndlp->nlp_prev_state = NLP_STE_NPR_NODE;
- lpfc_nlp_set_state(phba, ndlp, NLP_STE_PLOGI_ISSUE);
- lpfc_issue_els_plogi(phba, ndlp->nlp_DID, 0);
+ lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE);
+ lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0);
}
}
return ndlp->nlp_state;
}
static uint32_t
-lpfc_rcv_prlo_npr_node(struct lpfc_hba * phba,
- struct lpfc_nodelist * ndlp, void *arg,
- uint32_t evt)
+lpfc_rcv_prlo_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+ void *arg, uint32_t evt)
{
- struct lpfc_iocbq *cmdiocb;
-
- cmdiocb = (struct lpfc_iocbq *) arg;
+ struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+ struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
- spin_lock_irq(phba->host->host_lock);
+ spin_lock_irq(shost->host_lock);
ndlp->nlp_flag |= NLP_LOGO_ACC;
- spin_unlock_irq(phba->host->host_lock);
+ spin_unlock_irq(shost->host_lock);
- lpfc_els_rsp_acc(phba, ELS_CMD_ACC, cmdiocb, ndlp, NULL, 0);
+ lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL, 0);
- if (!(ndlp->nlp_flag & NLP_DELAY_TMO)) {
+ if ((ndlp->nlp_flag & NLP_DELAY_TMO) == 0) {
mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ * 1);
- spin_lock_irq(phba->host->host_lock);
+ spin_lock_irq(shost->host_lock);
ndlp->nlp_flag |= NLP_DELAY_TMO;
ndlp->nlp_flag &= ~NLP_NPR_ADISC;
- spin_unlock_irq(phba->host->host_lock);
+ spin_unlock_irq(shost->host_lock);
ndlp->nlp_last_elscmd = ELS_CMD_PLOGI;
} else {
- spin_lock_irq(phba->host->host_lock);
+ spin_lock_irq(shost->host_lock);
ndlp->nlp_flag &= ~NLP_NPR_ADISC;
- spin_unlock_irq(phba->host->host_lock);
+ spin_unlock_irq(shost->host_lock);
}
return ndlp->nlp_state;
}
static uint32_t
-lpfc_cmpl_plogi_npr_node(struct lpfc_hba * phba,
- struct lpfc_nodelist * ndlp, void *arg, uint32_t evt)
+lpfc_cmpl_plogi_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+ void *arg, uint32_t evt)
{
struct lpfc_iocbq *cmdiocb, *rspiocb;
IOCB_t *irsp;
@@ -1642,15 +1763,15 @@ lpfc_cmpl_plogi_npr_node(struct lpfc_hba * phba,
irsp = &rspiocb->iocb;
if (irsp->ulpStatus) {
- lpfc_drop_node(phba, ndlp);
+ lpfc_drop_node(vport, ndlp);
return NLP_STE_FREED_NODE;
}
return ndlp->nlp_state;
}
static uint32_t
-lpfc_cmpl_prli_npr_node(struct lpfc_hba * phba,
- struct lpfc_nodelist * ndlp, void *arg, uint32_t evt)
+lpfc_cmpl_prli_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+ void *arg, uint32_t evt)
{
struct lpfc_iocbq *cmdiocb, *rspiocb;
IOCB_t *irsp;
@@ -1660,25 +1781,24 @@ lpfc_cmpl_prli_npr_node(struct lpfc_hba * phba,
irsp = &rspiocb->iocb;
if (irsp->ulpStatus && (ndlp->nlp_flag & NLP_NODEV_REMOVE)) {
- lpfc_drop_node(phba, ndlp);
+ lpfc_drop_node(vport, ndlp);
return NLP_STE_FREED_NODE;
}
return ndlp->nlp_state;
}
static uint32_t
-lpfc_cmpl_logo_npr_node(struct lpfc_hba * phba,
- struct lpfc_nodelist * ndlp, void *arg, uint32_t evt)
+lpfc_cmpl_logo_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+ void *arg, uint32_t evt)
{
- lpfc_unreg_rpi(phba, ndlp);
+ lpfc_unreg_rpi(vport, ndlp);
/* This routine does nothing, just return the current state */
return ndlp->nlp_state;
}
static uint32_t
-lpfc_cmpl_adisc_npr_node(struct lpfc_hba * phba,
- struct lpfc_nodelist * ndlp, void *arg,
- uint32_t evt)
+lpfc_cmpl_adisc_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+ void *arg, uint32_t evt)
{
struct lpfc_iocbq *cmdiocb, *rspiocb;
IOCB_t *irsp;
@@ -1688,28 +1808,25 @@ lpfc_cmpl_adisc_npr_node(struct lpfc_hba * phba,
irsp = &rspiocb->iocb;
if (irsp->ulpStatus && (ndlp->nlp_flag & NLP_NODEV_REMOVE)) {
- lpfc_drop_node(phba, ndlp);
+ lpfc_drop_node(vport, ndlp);
return NLP_STE_FREED_NODE;
}
return ndlp->nlp_state;
}
static uint32_t
-lpfc_cmpl_reglogin_npr_node(struct lpfc_hba * phba,
- struct lpfc_nodelist * ndlp, void *arg,
- uint32_t evt)
+lpfc_cmpl_reglogin_npr_node(struct lpfc_vport *vport,
+ struct lpfc_nodelist *ndlp,
+ void *arg, uint32_t evt)
{
- LPFC_MBOXQ_t *pmb;
- MAILBOX_t *mb;
-
- pmb = (LPFC_MBOXQ_t *) arg;
- mb = &pmb->mb;
+ LPFC_MBOXQ_t *pmb = (LPFC_MBOXQ_t *) arg;
+ MAILBOX_t *mb = &pmb->mb;
if (!mb->mbxStatus)
ndlp->nlp_rpi = mb->un.varWords[0];
else {
if (ndlp->nlp_flag & NLP_NODEV_REMOVE) {
- lpfc_drop_node(phba, ndlp);
+ lpfc_drop_node(vport, ndlp);
return NLP_STE_FREED_NODE;
}
}
@@ -1717,28 +1834,38 @@ lpfc_cmpl_reglogin_npr_node(struct lpfc_hba * phba,
}
static uint32_t
-lpfc_device_rm_npr_node(struct lpfc_hba * phba,
- struct lpfc_nodelist * ndlp, void *arg,
- uint32_t evt)
+lpfc_device_rm_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+ void *arg, uint32_t evt)
{
+ struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+
if (ndlp->nlp_flag & NLP_NPR_2B_DISC) {
+ spin_lock_irq(shost->host_lock);
ndlp->nlp_flag |= NLP_NODEV_REMOVE;
+ spin_unlock_irq(shost->host_lock);
return ndlp->nlp_state;
}
- lpfc_drop_node(phba, ndlp);
+ lpfc_drop_node(vport, ndlp);
return NLP_STE_FREED_NODE;
}
static uint32_t
-lpfc_device_recov_npr_node(struct lpfc_hba * phba,
- struct lpfc_nodelist * ndlp, void *arg,
- uint32_t evt)
+lpfc_device_recov_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+ void *arg, uint32_t evt)
{
- spin_lock_irq(phba->host->host_lock);
+ struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+
+ /* Don't do anything that will mess up processing of the
+ * previous RSCN.
+ */
+ if (vport->fc_flag & FC_RSCN_DEFERRED)
+ return ndlp->nlp_state;
+
+ spin_lock_irq(shost->host_lock);
ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC);
- spin_unlock_irq(phba->host->host_lock);
+ spin_unlock_irq(shost->host_lock);
if (ndlp->nlp_flag & NLP_DELAY_TMO) {
- lpfc_cancel_retry_delay_tmo(phba, ndlp);
+ lpfc_cancel_retry_delay_tmo(vport, ndlp);
}
return ndlp->nlp_state;
}
@@ -1801,7 +1928,7 @@ lpfc_device_recov_npr_node(struct lpfc_hba * phba,
*/
static uint32_t (*lpfc_disc_action[NLP_STE_MAX_STATE * NLP_EVT_MAX_EVENT])
- (struct lpfc_hba *, struct lpfc_nodelist *, void *, uint32_t) = {
+ (struct lpfc_vport *, struct lpfc_nodelist *, void *, uint32_t) = {
/* Action routine Event Current State */
lpfc_rcv_plogi_unused_node, /* RCV_PLOGI UNUSED_NODE */
lpfc_rcv_els_unused_node, /* RCV_PRLI */
@@ -1818,7 +1945,7 @@ static uint32_t (*lpfc_disc_action[NLP_STE_MAX_STATE * NLP_EVT_MAX_EVENT])
lpfc_disc_illegal, /* DEVICE_RECOVERY */
lpfc_rcv_plogi_plogi_issue, /* RCV_PLOGI PLOGI_ISSUE */
- lpfc_rcv_els_plogi_issue, /* RCV_PRLI */
+ lpfc_rcv_prli_plogi_issue, /* RCV_PRLI */
lpfc_rcv_logo_plogi_issue, /* RCV_LOGO */
lpfc_rcv_els_plogi_issue, /* RCV_ADISC */
lpfc_rcv_els_plogi_issue, /* RCV_PDISC */
@@ -1917,35 +2044,41 @@ static uint32_t (*lpfc_disc_action[NLP_STE_MAX_STATE * NLP_EVT_MAX_EVENT])
};
int
-lpfc_disc_state_machine(struct lpfc_hba * phba,
- struct lpfc_nodelist * ndlp, void *arg, uint32_t evt)
+lpfc_disc_state_machine(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+ void *arg, uint32_t evt)
{
+ struct lpfc_hba *phba = vport->phba;
uint32_t cur_state, rc;
- uint32_t(*func) (struct lpfc_hba *, struct lpfc_nodelist *, void *,
+ uint32_t(*func) (struct lpfc_vport *, struct lpfc_nodelist *, void *,
uint32_t);
lpfc_nlp_get(ndlp);
cur_state = ndlp->nlp_state;
/* DSM in event <evt> on NPort <nlp_DID> in state <cur_state> */
- lpfc_printf_log(phba,
- KERN_INFO,
- LOG_DISCOVERY,
- "%d:0211 DSM in event x%x on NPort x%x in state %d "
- "Data: x%x\n",
- phba->brd_no,
+ lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
+ "%d (%d):0211 DSM in event x%x on NPort x%x in "
+ "state %d Data: x%x\n",
+ phba->brd_no, vport->vpi,
evt, ndlp->nlp_DID, cur_state, ndlp->nlp_flag);
+ lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_DSM,
+ "DSM in: evt:%d ste:%d did:x%x",
+ evt, cur_state, ndlp->nlp_DID);
+
func = lpfc_disc_action[(cur_state * NLP_EVT_MAX_EVENT) + evt];
- rc = (func) (phba, ndlp, arg, evt);
+ rc = (func) (vport, ndlp, arg, evt);
/* DSM out state <rc> on NPort <nlp_DID> */
- lpfc_printf_log(phba,
- KERN_INFO,
- LOG_DISCOVERY,
- "%d:0212 DSM out state %d on NPort x%x Data: x%x\n",
- phba->brd_no,
- rc, ndlp->nlp_DID, ndlp->nlp_flag);
+ lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
+ "%d (%d):0212 DSM out state %d on NPort x%x "
+ "Data: x%x\n",
+ phba->brd_no, vport->vpi,
+ rc, ndlp->nlp_DID, ndlp->nlp_flag);
+
+ lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_DSM,
+ "DSM out: ste:%d did:x%x flg:x%x",
+ rc, ndlp->nlp_DID, ndlp->nlp_flag);
lpfc_nlp_put(ndlp);
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
index 9a12d05e99e..8f45bbc4212 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.c
+++ b/drivers/scsi/lpfc/lpfc_scsi.c
@@ -37,10 +37,158 @@
#include "lpfc.h"
#include "lpfc_logmsg.h"
#include "lpfc_crtn.h"
+#include "lpfc_vport.h"
#define LPFC_RESET_WAIT 2
#define LPFC_ABORT_WAIT 2
+/*
+ * This function is called with no lock held when there is a resource
+ * error in driver or in firmware.
+ */
+void
+lpfc_adjust_queue_depth(struct lpfc_hba *phba)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&phba->hbalock, flags);
+ atomic_inc(&phba->num_rsrc_err);
+ phba->last_rsrc_error_time = jiffies;
+
+ if ((phba->last_ramp_down_time + QUEUE_RAMP_DOWN_INTERVAL) > jiffies) {
+ spin_unlock_irqrestore(&phba->hbalock, flags);
+ return;
+ }
+
+ phba->last_ramp_down_time = jiffies;
+
+ spin_unlock_irqrestore(&phba->hbalock, flags);
+
+ spin_lock_irqsave(&phba->pport->work_port_lock, flags);
+ if ((phba->pport->work_port_events &
+ WORKER_RAMP_DOWN_QUEUE) == 0) {
+ phba->pport->work_port_events |= WORKER_RAMP_DOWN_QUEUE;
+ }
+ spin_unlock_irqrestore(&phba->pport->work_port_lock, flags);
+
+ spin_lock_irqsave(&phba->hbalock, flags);
+ if (phba->work_wait)
+ wake_up(phba->work_wait);
+ spin_unlock_irqrestore(&phba->hbalock, flags);
+
+ return;
+}
+
+/*
+ * This function is called with no lock held when there is a successful
+ * SCSI command completion.
+ */
+static inline void
+lpfc_rampup_queue_depth(struct lpfc_hba *phba,
+ struct scsi_device *sdev)
+{
+ unsigned long flags;
+ atomic_inc(&phba->num_cmd_success);
+
+ if (phba->cfg_lun_queue_depth <= sdev->queue_depth)
+ return;
+
+ spin_lock_irqsave(&phba->hbalock, flags);
+ if (((phba->last_ramp_up_time + QUEUE_RAMP_UP_INTERVAL) > jiffies) ||
+ ((phba->last_rsrc_error_time + QUEUE_RAMP_UP_INTERVAL ) > jiffies)) {
+ spin_unlock_irqrestore(&phba->hbalock, flags);
+ return;
+ }
+
+ phba->last_ramp_up_time = jiffies;
+ spin_unlock_irqrestore(&phba->hbalock, flags);
+
+ spin_lock_irqsave(&phba->pport->work_port_lock, flags);
+ if ((phba->pport->work_port_events &
+ WORKER_RAMP_UP_QUEUE) == 0) {
+ phba->pport->work_port_events |= WORKER_RAMP_UP_QUEUE;
+ }
+ spin_unlock_irqrestore(&phba->pport->work_port_lock, flags);
+
+ spin_lock_irqsave(&phba->hbalock, flags);
+ if (phba->work_wait)
+ wake_up(phba->work_wait);
+ spin_unlock_irqrestore(&phba->hbalock, flags);
+}
+
+void
+lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
+{
+ struct lpfc_vport *vport;
+ struct Scsi_Host *host;
+ struct scsi_device *sdev;
+ unsigned long new_queue_depth;
+ unsigned long num_rsrc_err, num_cmd_success;
+
+ num_rsrc_err = atomic_read(&phba->num_rsrc_err);
+ num_cmd_success = atomic_read(&phba->num_cmd_success);
+
+ spin_lock_irq(&phba->hbalock);
+ list_for_each_entry(vport, &phba->port_list, listentry) {
+ host = lpfc_shost_from_vport(vport);
+ if (!scsi_host_get(host))
+ continue;
+
+ spin_unlock_irq(&phba->hbalock);
+
+ shost_for_each_device(sdev, host) {
+ new_queue_depth = sdev->queue_depth * num_rsrc_err /
+ (num_rsrc_err + num_cmd_success);
+ if (!new_queue_depth)
+ new_queue_depth = sdev->queue_depth - 1;
+ else
+ new_queue_depth =
+ sdev->queue_depth - new_queue_depth;
+
+ if (sdev->ordered_tags)
+ scsi_adjust_queue_depth(sdev, MSG_ORDERED_TAG,
+ new_queue_depth);
+ else
+ scsi_adjust_queue_depth(sdev, MSG_SIMPLE_TAG,
+ new_queue_depth);
+ }
+ spin_lock_irq(&phba->hbalock);
+ scsi_host_put(host);
+ }
+ spin_unlock_irq(&phba->hbalock);
+ atomic_set(&phba->num_rsrc_err, 0);
+ atomic_set(&phba->num_cmd_success, 0);
+}
+
+void
+lpfc_ramp_up_queue_handler(struct lpfc_hba *phba)
+{
+ struct lpfc_vport *vport;
+ struct Scsi_Host *host;
+ struct scsi_device *sdev;
+
+ spin_lock_irq(&phba->hbalock);
+ list_for_each_entry(vport, &phba->port_list, listentry) {
+ host = lpfc_shost_from_vport(vport);
+ if (!scsi_host_get(host))
+ continue;
+
+ spin_unlock_irq(&phba->hbalock);
+ shost_for_each_device(sdev, host) {
+ if (sdev->ordered_tags)
+ scsi_adjust_queue_depth(sdev, MSG_ORDERED_TAG,
+ sdev->queue_depth+1);
+ else
+ scsi_adjust_queue_depth(sdev, MSG_SIMPLE_TAG,
+ sdev->queue_depth+1);
+ }
+ spin_lock_irq(&phba->hbalock);
+ scsi_host_put(host);
+ }
+ spin_unlock_irq(&phba->hbalock);
+ atomic_set(&phba->num_rsrc_err, 0);
+ atomic_set(&phba->num_cmd_success, 0);
+}
/*
* This routine allocates a scsi buffer, which contains all the necessary
@@ -51,8 +199,9 @@
* and the BPL BDE is setup in the IOCB.
*/
static struct lpfc_scsi_buf *
-lpfc_new_scsi_buf(struct lpfc_hba * phba)
+lpfc_new_scsi_buf(struct lpfc_vport *vport)
{
+ struct lpfc_hba *phba = vport->phba;
struct lpfc_scsi_buf *psb;
struct ulp_bde64 *bpl;
IOCB_t *iocb;
@@ -63,7 +212,6 @@ lpfc_new_scsi_buf(struct lpfc_hba * phba)
if (!psb)
return NULL;
memset(psb, 0, sizeof (struct lpfc_scsi_buf));
- psb->scsi_hba = phba;
/*
* Get memory from the pci pool to map the virt space to pci bus space
@@ -155,7 +303,7 @@ lpfc_get_scsi_buf(struct lpfc_hba * phba)
}
static void
-lpfc_release_scsi_buf(struct lpfc_hba * phba, struct lpfc_scsi_buf * psb)
+lpfc_release_scsi_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb)
{
unsigned long iflag = 0;
@@ -166,7 +314,7 @@ lpfc_release_scsi_buf(struct lpfc_hba * phba, struct lpfc_scsi_buf * psb)
}
static int
-lpfc_scsi_prep_dma_buf(struct lpfc_hba * phba, struct lpfc_scsi_buf * lpfc_cmd)
+lpfc_scsi_prep_dma_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
{
struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
struct scatterlist *sgel = NULL;
@@ -175,8 +323,7 @@ lpfc_scsi_prep_dma_buf(struct lpfc_hba * phba, struct lpfc_scsi_buf * lpfc_cmd)
IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb;
dma_addr_t physaddr;
uint32_t i, num_bde = 0;
- int datadir = scsi_cmnd->sc_data_direction;
- int dma_error;
+ int nseg, datadir = scsi_cmnd->sc_data_direction;
/*
* There are three possibilities here - use scatter-gather segment, use
@@ -185,26 +332,26 @@ lpfc_scsi_prep_dma_buf(struct lpfc_hba * phba, struct lpfc_scsi_buf * lpfc_cmd)
* data bde entry.
*/
bpl += 2;
- if (scsi_cmnd->use_sg) {
+ if (scsi_sg_count(scsi_cmnd)) {
/*
* The driver stores the segment count returned from pci_map_sg
* because this a count of dma-mappings used to map the use_sg
* pages. They are not guaranteed to be the same for those
* architectures that implement an IOMMU.
*/
- sgel = (struct scatterlist *)scsi_cmnd->request_buffer;
- lpfc_cmd->seg_cnt = dma_map_sg(&phba->pcidev->dev, sgel,
- scsi_cmnd->use_sg, datadir);
- if (lpfc_cmd->seg_cnt == 0)
+
+ nseg = dma_map_sg(&phba->pcidev->dev, scsi_sglist(scsi_cmnd),
+ scsi_sg_count(scsi_cmnd), datadir);
+ if (unlikely(!nseg))
return 1;
+ lpfc_cmd->seg_cnt = nseg;
if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) {
printk(KERN_ERR "%s: Too many sg segments from "
"dma_map_sg. Config %d, seg_cnt %d",
__FUNCTION__, phba->cfg_sg_seg_cnt,
lpfc_cmd->seg_cnt);
- dma_unmap_sg(&phba->pcidev->dev, sgel,
- lpfc_cmd->seg_cnt, datadir);
+ scsi_dma_unmap(scsi_cmnd);
return 1;
}
@@ -214,7 +361,7 @@ lpfc_scsi_prep_dma_buf(struct lpfc_hba * phba, struct lpfc_scsi_buf * lpfc_cmd)
* single scsi command. Just run through the seg_cnt and format
* the bde's.
*/
- for (i = 0; i < lpfc_cmd->seg_cnt; i++) {
+ scsi_for_each_sg(scsi_cmnd, sgel, nseg, i) {
physaddr = sg_dma_address(sgel);
bpl->addrLow = le32_to_cpu(putPaddrLow(physaddr));
bpl->addrHigh = le32_to_cpu(putPaddrHigh(physaddr));
@@ -225,34 +372,8 @@ lpfc_scsi_prep_dma_buf(struct lpfc_hba * phba, struct lpfc_scsi_buf * lpfc_cmd)
bpl->tus.f.bdeFlags = BUFF_USE_RCV;
bpl->tus.w = le32_to_cpu(bpl->tus.w);
bpl++;
- sgel++;
num_bde++;
}
- } else if (scsi_cmnd->request_buffer && scsi_cmnd->request_bufflen) {
- physaddr = dma_map_single(&phba->pcidev->dev,
- scsi_cmnd->request_buffer,
- scsi_cmnd->request_bufflen,
- datadir);
- dma_error = dma_mapping_error(physaddr);
- if (dma_error) {
- lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
- "%d:0718 Unable to dma_map_single "
- "request_buffer: x%x\n",
- phba->brd_no, dma_error);
- return 1;
- }
-
- lpfc_cmd->nonsg_phys = physaddr;
- bpl->addrLow = le32_to_cpu(putPaddrLow(physaddr));
- bpl->addrHigh = le32_to_cpu(putPaddrHigh(physaddr));
- bpl->tus.f.bdeSize = scsi_cmnd->request_bufflen;
- if (datadir == DMA_TO_DEVICE)
- bpl->tus.f.bdeFlags = 0;
- else
- bpl->tus.f.bdeFlags = BUFF_USE_RCV;
- bpl->tus.w = le32_to_cpu(bpl->tus.w);
- num_bde = 1;
- bpl++;
}
/*
@@ -266,7 +387,7 @@ lpfc_scsi_prep_dma_buf(struct lpfc_hba * phba, struct lpfc_scsi_buf * lpfc_cmd)
(num_bde * sizeof (struct ulp_bde64));
iocb_cmd->ulpBdeCount = 1;
iocb_cmd->ulpLe = 1;
- fcp_cmnd->fcpDl = be32_to_cpu(scsi_cmnd->request_bufflen);
+ fcp_cmnd->fcpDl = be32_to_cpu(scsi_bufflen(scsi_cmnd));
return 0;
}
@@ -279,26 +400,20 @@ lpfc_scsi_unprep_dma_buf(struct lpfc_hba * phba, struct lpfc_scsi_buf * psb)
* a request buffer, but did not request use_sg. There is a third
* case, but it does not require resource deallocation.
*/
- if ((psb->seg_cnt > 0) && (psb->pCmd->use_sg)) {
- dma_unmap_sg(&phba->pcidev->dev, psb->pCmd->request_buffer,
- psb->seg_cnt, psb->pCmd->sc_data_direction);
- } else {
- if ((psb->nonsg_phys) && (psb->pCmd->request_bufflen)) {
- dma_unmap_single(&phba->pcidev->dev, psb->nonsg_phys,
- psb->pCmd->request_bufflen,
- psb->pCmd->sc_data_direction);
- }
- }
+ if (psb->seg_cnt > 0)
+ scsi_dma_unmap(psb->pCmd);
}
static void
-lpfc_handle_fcp_err(struct lpfc_scsi_buf *lpfc_cmd, struct lpfc_iocbq *rsp_iocb)
+lpfc_handle_fcp_err(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
+ struct lpfc_iocbq *rsp_iocb)
{
struct scsi_cmnd *cmnd = lpfc_cmd->pCmd;
struct fcp_cmnd *fcpcmd = lpfc_cmd->fcp_cmnd;
struct fcp_rsp *fcprsp = lpfc_cmd->fcp_rsp;
- struct lpfc_hba *phba = lpfc_cmd->scsi_hba;
+ struct lpfc_hba *phba = vport->phba;
uint32_t fcpi_parm = rsp_iocb->iocb.un.fcpi.fcpi_parm;
+ uint32_t vpi = vport->vpi;
uint32_t resp_info = fcprsp->rspStatus2;
uint32_t scsi_status = fcprsp->rspStatus3;
uint32_t *lp;
@@ -331,9 +446,9 @@ lpfc_handle_fcp_err(struct lpfc_scsi_buf *lpfc_cmd, struct lpfc_iocbq *rsp_iocb)
logit = LOG_FCP;
lpfc_printf_log(phba, KERN_WARNING, logit,
- "%d:0730 FCP command x%x failed: x%x SNS x%x x%x "
+ "%d (%d):0730 FCP command x%x failed: x%x SNS x%x x%x "
"Data: x%x x%x x%x x%x x%x\n",
- phba->brd_no, cmnd->cmnd[0], scsi_status,
+ phba->brd_no, vpi, cmnd->cmnd[0], scsi_status,
be32_to_cpu(*lp), be32_to_cpu(*(lp + 3)), resp_info,
be32_to_cpu(fcprsp->rspResId),
be32_to_cpu(fcprsp->rspSnsLen),
@@ -349,15 +464,16 @@ lpfc_handle_fcp_err(struct lpfc_scsi_buf *lpfc_cmd, struct lpfc_iocbq *rsp_iocb)
}
}
- cmnd->resid = 0;
+ scsi_set_resid(cmnd, 0);
if (resp_info & RESID_UNDER) {
- cmnd->resid = be32_to_cpu(fcprsp->rspResId);
+ scsi_set_resid(cmnd, be32_to_cpu(fcprsp->rspResId));
lpfc_printf_log(phba, KERN_INFO, LOG_FCP,
- "%d:0716 FCP Read Underrun, expected %d, "
- "residual %d Data: x%x x%x x%x\n", phba->brd_no,
- be32_to_cpu(fcpcmd->fcpDl), cmnd->resid,
- fcpi_parm, cmnd->cmnd[0], cmnd->underflow);
+ "%d (%d):0716 FCP Read Underrun, expected %d, "
+ "residual %d Data: x%x x%x x%x\n",
+ phba->brd_no, vpi, be32_to_cpu(fcpcmd->fcpDl),
+ scsi_get_resid(cmnd), fcpi_parm, cmnd->cmnd[0],
+ cmnd->underflow);
/*
* If there is an under run check if under run reported by
@@ -366,15 +482,16 @@ lpfc_handle_fcp_err(struct lpfc_scsi_buf *lpfc_cmd, struct lpfc_iocbq *rsp_iocb)
*/
if ((cmnd->sc_data_direction == DMA_FROM_DEVICE) &&
fcpi_parm &&
- (cmnd->resid != fcpi_parm)) {
+ (scsi_get_resid(cmnd) != fcpi_parm)) {
lpfc_printf_log(phba, KERN_WARNING,
- LOG_FCP | LOG_FCP_ERROR,
- "%d:0735 FCP Read Check Error and Underrun "
- "Data: x%x x%x x%x x%x\n", phba->brd_no,
- be32_to_cpu(fcpcmd->fcpDl),
- cmnd->resid,
- fcpi_parm, cmnd->cmnd[0]);
- cmnd->resid = cmnd->request_bufflen;
+ LOG_FCP | LOG_FCP_ERROR,
+ "%d (%d):0735 FCP Read Check Error "
+ "and Underrun Data: x%x x%x x%x x%x\n",
+ phba->brd_no, vpi,
+ be32_to_cpu(fcpcmd->fcpDl),
+ scsi_get_resid(cmnd), fcpi_parm,
+ cmnd->cmnd[0]);
+ scsi_set_resid(cmnd, scsi_bufflen(cmnd));
host_status = DID_ERROR;
}
/*
@@ -385,22 +502,23 @@ lpfc_handle_fcp_err(struct lpfc_scsi_buf *lpfc_cmd, struct lpfc_iocbq *rsp_iocb)
*/
if (!(resp_info & SNS_LEN_VALID) &&
(scsi_status == SAM_STAT_GOOD) &&
- (cmnd->request_bufflen - cmnd->resid) < cmnd->underflow) {
+ (scsi_bufflen(cmnd) - scsi_get_resid(cmnd)
+ < cmnd->underflow)) {
lpfc_printf_log(phba, KERN_INFO, LOG_FCP,
- "%d:0717 FCP command x%x residual "
+ "%d (%d):0717 FCP command x%x residual "
"underrun converted to error "
- "Data: x%x x%x x%x\n", phba->brd_no,
- cmnd->cmnd[0], cmnd->request_bufflen,
- cmnd->resid, cmnd->underflow);
-
+ "Data: x%x x%x x%x\n",
+ phba->brd_no, vpi, cmnd->cmnd[0],
+ scsi_bufflen(cmnd),
+ scsi_get_resid(cmnd), cmnd->underflow);
host_status = DID_ERROR;
}
} else if (resp_info & RESID_OVER) {
lpfc_printf_log(phba, KERN_WARNING, LOG_FCP,
- "%d:0720 FCP command x%x residual "
+ "%d (%d):0720 FCP command x%x residual "
"overrun error. Data: x%x x%x \n",
- phba->brd_no, cmnd->cmnd[0],
- cmnd->request_bufflen, cmnd->resid);
+ phba->brd_no, vpi, cmnd->cmnd[0],
+ scsi_bufflen(cmnd), scsi_get_resid(cmnd));
host_status = DID_ERROR;
/*
@@ -410,13 +528,14 @@ lpfc_handle_fcp_err(struct lpfc_scsi_buf *lpfc_cmd, struct lpfc_iocbq *rsp_iocb)
} else if ((scsi_status == SAM_STAT_GOOD) && fcpi_parm &&
(cmnd->sc_data_direction == DMA_FROM_DEVICE)) {
lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_FCP_ERROR,
- "%d:0734 FCP Read Check Error Data: "
- "x%x x%x x%x x%x\n", phba->brd_no,
- be32_to_cpu(fcpcmd->fcpDl),
- be32_to_cpu(fcprsp->rspResId),
- fcpi_parm, cmnd->cmnd[0]);
+ "%d (%d):0734 FCP Read Check Error Data: "
+ "x%x x%x x%x x%x\n",
+ phba->brd_no, vpi,
+ be32_to_cpu(fcpcmd->fcpDl),
+ be32_to_cpu(fcprsp->rspResId),
+ fcpi_parm, cmnd->cmnd[0]);
host_status = DID_ERROR;
- cmnd->resid = cmnd->request_bufflen;
+ scsi_set_resid(cmnd, scsi_bufflen(cmnd));
}
out:
@@ -429,9 +548,13 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
{
struct lpfc_scsi_buf *lpfc_cmd =
(struct lpfc_scsi_buf *) pIocbIn->context1;
+ struct lpfc_vport *vport = pIocbIn->vport;
struct lpfc_rport_data *rdata = lpfc_cmd->rdata;
struct lpfc_nodelist *pnode = rdata->pnode;
struct scsi_cmnd *cmd = lpfc_cmd->pCmd;
+ uint32_t vpi = (lpfc_cmd->cur_iocbq.vport
+ ? lpfc_cmd->cur_iocbq.vport->vpi
+ : 0);
int result;
struct scsi_device *sdev, *tmp_sdev;
int depth = 0;
@@ -447,22 +570,31 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
lpfc_cmd->status = IOSTAT_DEFAULT;
lpfc_printf_log(phba, KERN_WARNING, LOG_FCP,
- "%d:0729 FCP cmd x%x failed <%d/%d> status: "
- "x%x result: x%x Data: x%x x%x\n",
- phba->brd_no, cmd->cmnd[0], cmd->device->id,
- cmd->device->lun, lpfc_cmd->status,
- lpfc_cmd->result, pIocbOut->iocb.ulpContext,
+ "%d (%d):0729 FCP cmd x%x failed <%d/%d> "
+ "status: x%x result: x%x Data: x%x x%x\n",
+ phba->brd_no, vpi, cmd->cmnd[0],
+ cmd->device ? cmd->device->id : 0xffff,
+ cmd->device ? cmd->device->lun : 0xffff,
+ lpfc_cmd->status, lpfc_cmd->result,
+ pIocbOut->iocb.ulpContext,
lpfc_cmd->cur_iocbq.iocb.ulpIoTag);
switch (lpfc_cmd->status) {
case IOSTAT_FCP_RSP_ERROR:
/* Call FCP RSP handler to determine result */
- lpfc_handle_fcp_err(lpfc_cmd,pIocbOut);
+ lpfc_handle_fcp_err(vport, lpfc_cmd, pIocbOut);
break;
case IOSTAT_NPORT_BSY:
case IOSTAT_FABRIC_BSY:
cmd->result = ScsiResult(DID_BUS_BUSY, 0);
break;
+ case IOSTAT_LOCAL_REJECT:
+ if (lpfc_cmd->result == RJT_UNAVAIL_PERM ||
+ lpfc_cmd->result == IOERR_NO_RESOURCES ||
+ lpfc_cmd->result == RJT_LOGIN_REQUIRED) {
+ cmd->result = ScsiResult(DID_REQUEUE, 0);
+ break;
+ } /* else: fall through */
default:
cmd->result = ScsiResult(DID_ERROR, 0);
break;
@@ -479,11 +611,12 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
uint32_t *lp = (uint32_t *)cmd->sense_buffer;
lpfc_printf_log(phba, KERN_INFO, LOG_FCP,
- "%d:0710 Iodone <%d/%d> cmd %p, error x%x "
- "SNS x%x x%x Data: x%x x%x\n",
- phba->brd_no, cmd->device->id,
+ "%d (%d):0710 Iodone <%d/%d> cmd %p, error "
+ "x%x SNS x%x x%x Data: x%x x%x\n",
+ phba->brd_no, vpi, cmd->device->id,
cmd->device->lun, cmd, cmd->result,
- *lp, *(lp + 3), cmd->retries, cmd->resid);
+ *lp, *(lp + 3), cmd->retries,
+ scsi_get_resid(cmd));
}
result = cmd->result;
@@ -496,6 +629,10 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
return;
}
+
+ if (!result)
+ lpfc_rampup_queue_depth(phba, sdev);
+
if (!result && pnode != NULL &&
((jiffies - pnode->last_ramp_up_time) >
LPFC_Q_RAMP_UP_INTERVAL * HZ) &&
@@ -534,7 +671,7 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
tmp_sdev->queue_depth - 1);
}
/*
- * The queue depth cannot be lowered any more.
+ * The queue depth cannot be lowered any more.
* Modify the returned error code to store
* the final depth value set by
* scsi_track_queue_full.
@@ -544,8 +681,9 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
if (depth) {
lpfc_printf_log(phba, KERN_WARNING, LOG_FCP,
- "%d:0711 detected queue full - lun queue depth "
- " adjusted to %d.\n", phba->brd_no, depth);
+ "%d (%d):0711 detected queue full - "
+ "lun queue depth adjusted to %d.\n",
+ phba->brd_no, vpi, depth);
}
}
@@ -553,9 +691,10 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
}
static void
-lpfc_scsi_prep_cmnd(struct lpfc_hba * phba, struct lpfc_scsi_buf * lpfc_cmd,
- struct lpfc_nodelist *pnode)
+lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
+ struct lpfc_nodelist *pnode)
{
+ struct lpfc_hba *phba = vport->phba;
struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb;
@@ -592,7 +731,7 @@ lpfc_scsi_prep_cmnd(struct lpfc_hba * phba, struct lpfc_scsi_buf * lpfc_cmd,
* bumping the bpl beyond the fcp_cmnd and fcp_rsp regions to the first
* data bde entry.
*/
- if (scsi_cmnd->use_sg) {
+ if (scsi_sg_count(scsi_cmnd)) {
if (datadir == DMA_TO_DEVICE) {
iocb_cmd->ulpCommand = CMD_FCP_IWRITE64_CR;
iocb_cmd->un.fcpi.fcpi_parm = 0;
@@ -602,23 +741,7 @@ lpfc_scsi_prep_cmnd(struct lpfc_hba * phba, struct lpfc_scsi_buf * lpfc_cmd,
} else {
iocb_cmd->ulpCommand = CMD_FCP_IREAD64_CR;
iocb_cmd->ulpPU = PARM_READ_CHECK;
- iocb_cmd->un.fcpi.fcpi_parm =
- scsi_cmnd->request_bufflen;
- fcp_cmnd->fcpCntl3 = READ_DATA;
- phba->fc4InputRequests++;
- }
- } else if (scsi_cmnd->request_buffer && scsi_cmnd->request_bufflen) {
- if (datadir == DMA_TO_DEVICE) {
- iocb_cmd->ulpCommand = CMD_FCP_IWRITE64_CR;
- iocb_cmd->un.fcpi.fcpi_parm = 0;
- iocb_cmd->ulpPU = 0;
- fcp_cmnd->fcpCntl3 = WRITE_DATA;
- phba->fc4OutputRequests++;
- } else {
- iocb_cmd->ulpCommand = CMD_FCP_IREAD64_CR;
- iocb_cmd->ulpPU = PARM_READ_CHECK;
- iocb_cmd->un.fcpi.fcpi_parm =
- scsi_cmnd->request_bufflen;
+ iocb_cmd->un.fcpi.fcpi_parm = scsi_bufflen(scsi_cmnd);
fcp_cmnd->fcpCntl3 = READ_DATA;
phba->fc4InputRequests++;
}
@@ -642,15 +765,15 @@ lpfc_scsi_prep_cmnd(struct lpfc_hba * phba, struct lpfc_scsi_buf * lpfc_cmd,
piocbq->context1 = lpfc_cmd;
piocbq->iocb_cmpl = lpfc_scsi_cmd_iocb_cmpl;
piocbq->iocb.ulpTimeout = lpfc_cmd->timeout;
+ piocbq->vport = vport;
}
static int
-lpfc_scsi_prep_task_mgmt_cmd(struct lpfc_hba *phba,
+lpfc_scsi_prep_task_mgmt_cmd(struct lpfc_vport *vport,
struct lpfc_scsi_buf *lpfc_cmd,
unsigned int lun,
uint8_t task_mgmt_cmd)
{
- struct lpfc_sli *psli;
struct lpfc_iocbq *piocbq;
IOCB_t *piocb;
struct fcp_cmnd *fcp_cmnd;
@@ -661,8 +784,9 @@ lpfc_scsi_prep_task_mgmt_cmd(struct lpfc_hba *phba,
return 0;
}
- psli = &phba->sli;
piocbq = &(lpfc_cmd->cur_iocbq);
+ piocbq->vport = vport;
+
piocb = &piocbq->iocb;
fcp_cmnd = lpfc_cmd->fcp_cmnd;
@@ -688,7 +812,7 @@ lpfc_scsi_prep_task_mgmt_cmd(struct lpfc_hba *phba,
piocb->ulpTimeout = lpfc_cmd->timeout;
}
- return (1);
+ return 1;
}
static void
@@ -704,10 +828,11 @@ lpfc_tskmgmt_def_cmpl(struct lpfc_hba *phba,
}
static int
-lpfc_scsi_tgt_reset(struct lpfc_scsi_buf * lpfc_cmd, struct lpfc_hba * phba,
+lpfc_scsi_tgt_reset(struct lpfc_scsi_buf *lpfc_cmd, struct lpfc_vport *vport,
unsigned tgt_id, unsigned int lun,
struct lpfc_rport_data *rdata)
{
+ struct lpfc_hba *phba = vport->phba;
struct lpfc_iocbq *iocbq;
struct lpfc_iocbq *iocbqrsp;
int ret;
@@ -716,12 +841,11 @@ lpfc_scsi_tgt_reset(struct lpfc_scsi_buf * lpfc_cmd, struct lpfc_hba * phba,
return FAILED;
lpfc_cmd->rdata = rdata;
- ret = lpfc_scsi_prep_task_mgmt_cmd(phba, lpfc_cmd, lun,
+ ret = lpfc_scsi_prep_task_mgmt_cmd(vport, lpfc_cmd, lun,
FCP_TARGET_RESET);
if (!ret)
return FAILED;
- lpfc_cmd->scsi_hba = phba;
iocbq = &lpfc_cmd->cur_iocbq;
iocbqrsp = lpfc_sli_get_iocbq(phba);
@@ -730,10 +854,10 @@ lpfc_scsi_tgt_reset(struct lpfc_scsi_buf * lpfc_cmd, struct lpfc_hba * phba,
/* Issue Target Reset to TGT <num> */
lpfc_printf_log(phba, KERN_INFO, LOG_FCP,
- "%d:0702 Issue Target Reset to TGT %d "
+ "%d (%d):0702 Issue Target Reset to TGT %d "
"Data: x%x x%x\n",
- phba->brd_no, tgt_id, rdata->pnode->nlp_rpi,
- rdata->pnode->nlp_flag);
+ phba->brd_no, vport->vpi, tgt_id,
+ rdata->pnode->nlp_rpi, rdata->pnode->nlp_flag);
ret = lpfc_sli_issue_iocb_wait(phba,
&phba->sli.ring[phba->sli.fcp_ring],
@@ -758,7 +882,8 @@ lpfc_scsi_tgt_reset(struct lpfc_scsi_buf * lpfc_cmd, struct lpfc_hba * phba,
const char *
lpfc_info(struct Scsi_Host *host)
{
- struct lpfc_hba *phba = (struct lpfc_hba *) host->hostdata;
+ struct lpfc_vport *vport = (struct lpfc_vport *) host->hostdata;
+ struct lpfc_hba *phba = vport->phba;
int len;
static char lpfcinfobuf[384];
@@ -800,26 +925,22 @@ void lpfc_poll_start_timer(struct lpfc_hba * phba)
void lpfc_poll_timeout(unsigned long ptr)
{
- struct lpfc_hba *phba = (struct lpfc_hba *)ptr;
- unsigned long iflag;
-
- spin_lock_irqsave(phba->host->host_lock, iflag);
+ struct lpfc_hba *phba = (struct lpfc_hba *) ptr;
if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) {
lpfc_sli_poll_fcp_ring (phba);
if (phba->cfg_poll & DISABLE_FCP_RING_INT)
lpfc_poll_rearm_timer(phba);
}
-
- spin_unlock_irqrestore(phba->host->host_lock, iflag);
}
static int
lpfc_queuecommand(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *))
{
- struct lpfc_hba *phba =
- (struct lpfc_hba *) cmnd->device->host->hostdata;
- struct lpfc_sli *psli = &phba->sli;
+ struct Scsi_Host *shost = cmnd->device->host;
+ struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+ struct lpfc_hba *phba = vport->phba;
+ struct lpfc_sli *psli = &phba->sli;
struct lpfc_rport_data *rdata = cmnd->device->hostdata;
struct lpfc_nodelist *ndlp = rdata->pnode;
struct lpfc_scsi_buf *lpfc_cmd;
@@ -840,11 +961,14 @@ lpfc_queuecommand(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *))
cmnd->result = ScsiResult(DID_BUS_BUSY, 0);
goto out_fail_command;
}
- lpfc_cmd = lpfc_get_scsi_buf (phba);
+ lpfc_cmd = lpfc_get_scsi_buf(phba);
if (lpfc_cmd == NULL) {
+ lpfc_adjust_queue_depth(phba);
+
lpfc_printf_log(phba, KERN_INFO, LOG_FCP,
- "%d:0707 driver's buffer pool is empty, "
- "IO busied\n", phba->brd_no);
+ "%d (%d):0707 driver's buffer pool is empty, "
+ "IO busied\n",
+ phba->brd_no, vport->vpi);
goto out_host_busy;
}
@@ -862,10 +986,10 @@ lpfc_queuecommand(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *))
if (err)
goto out_host_busy_free_buf;
- lpfc_scsi_prep_cmnd(phba, lpfc_cmd, ndlp);
+ lpfc_scsi_prep_cmnd(vport, lpfc_cmd, ndlp);
err = lpfc_sli_issue_iocb(phba, &phba->sli.ring[psli->fcp_ring],
- &lpfc_cmd->cur_iocbq, SLI_IOCB_RET_IOCB);
+ &lpfc_cmd->cur_iocbq, SLI_IOCB_RET_IOCB);
if (err)
goto out_host_busy_free_buf;
@@ -907,8 +1031,9 @@ lpfc_block_error_handler(struct scsi_cmnd *cmnd)
static int
lpfc_abort_handler(struct scsi_cmnd *cmnd)
{
- struct Scsi_Host *shost = cmnd->device->host;
- struct lpfc_hba *phba = (struct lpfc_hba *)shost->hostdata;
+ struct Scsi_Host *shost = cmnd->device->host;
+ struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+ struct lpfc_hba *phba = vport->phba;
struct lpfc_sli_ring *pring = &phba->sli.ring[phba->sli.fcp_ring];
struct lpfc_iocbq *iocb;
struct lpfc_iocbq *abtsiocb;
@@ -918,8 +1043,6 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd)
int ret = SUCCESS;
lpfc_block_error_handler(cmnd);
- spin_lock_irq(shost->host_lock);
-
lpfc_cmd = (struct lpfc_scsi_buf *)cmnd->host_scribble;
BUG_ON(!lpfc_cmd);
@@ -956,12 +1079,13 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd)
icmd->ulpLe = 1;
icmd->ulpClass = cmd->ulpClass;
- if (phba->hba_state >= LPFC_LINK_UP)
+ if (lpfc_is_link_up(phba))
icmd->ulpCommand = CMD_ABORT_XRI_CN;
else
icmd->ulpCommand = CMD_CLOSE_XRI_CN;
abtsiocb->iocb_cmpl = lpfc_sli_abort_fcp_cmpl;
+ abtsiocb->vport = vport;
if (lpfc_sli_issue_iocb(phba, pring, abtsiocb, 0) == IOCB_ERROR) {
lpfc_sli_release_iocbq(phba, abtsiocb);
ret = FAILED;
@@ -977,9 +1101,7 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd)
if (phba->cfg_poll & DISABLE_FCP_RING_INT)
lpfc_sli_poll_fcp_ring (phba);
- spin_unlock_irq(phba->host->host_lock);
- schedule_timeout_uninterruptible(LPFC_ABORT_WAIT*HZ);
- spin_lock_irq(phba->host->host_lock);
+ schedule_timeout_uninterruptible(LPFC_ABORT_WAIT * HZ);
if (++loop_count
> (2 * phba->cfg_devloss_tmo)/LPFC_ABORT_WAIT)
break;
@@ -988,30 +1110,30 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd)
if (lpfc_cmd->pCmd == cmnd) {
ret = FAILED;
lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
- "%d:0748 abort handler timed out waiting for "
- "abort to complete: ret %#x, ID %d, LUN %d, "
- "snum %#lx\n",
- phba->brd_no, ret, cmnd->device->id,
- cmnd->device->lun, cmnd->serial_number);
+ "%d (%d):0748 abort handler timed out waiting "
+ "for abort to complete: ret %#x, ID %d, "
+ "LUN %d, snum %#lx\n",
+ phba->brd_no, vport->vpi, ret,
+ cmnd->device->id, cmnd->device->lun,
+ cmnd->serial_number);
}
out:
lpfc_printf_log(phba, KERN_WARNING, LOG_FCP,
- "%d:0749 SCSI Layer I/O Abort Request "
+ "%d (%d):0749 SCSI Layer I/O Abort Request "
"Status x%x ID %d LUN %d snum %#lx\n",
- phba->brd_no, ret, cmnd->device->id,
+ phba->brd_no, vport->vpi, ret, cmnd->device->id,
cmnd->device->lun, cmnd->serial_number);
- spin_unlock_irq(shost->host_lock);
-
return ret;
}
static int
lpfc_device_reset_handler(struct scsi_cmnd *cmnd)
{
- struct Scsi_Host *shost = cmnd->device->host;
- struct lpfc_hba *phba = (struct lpfc_hba *)shost->hostdata;
+ struct Scsi_Host *shost = cmnd->device->host;
+ struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+ struct lpfc_hba *phba = vport->phba;
struct lpfc_scsi_buf *lpfc_cmd;
struct lpfc_iocbq *iocbq, *iocbqrsp;
struct lpfc_rport_data *rdata = cmnd->device->hostdata;
@@ -1022,28 +1144,26 @@ lpfc_device_reset_handler(struct scsi_cmnd *cmnd)
int cnt, loopcnt;
lpfc_block_error_handler(cmnd);
- spin_lock_irq(shost->host_lock);
loopcnt = 0;
/*
* If target is not in a MAPPED state, delay the reset until
* target is rediscovered or devloss timeout expires.
*/
- while ( 1 ) {
+ while (1) {
if (!pnode)
goto out;
if (pnode->nlp_state != NLP_STE_MAPPED_NODE) {
- spin_unlock_irq(phba->host->host_lock);
schedule_timeout_uninterruptible(msecs_to_jiffies(500));
- spin_lock_irq(phba->host->host_lock);
loopcnt++;
rdata = cmnd->device->hostdata;
if (!rdata ||
(loopcnt > ((phba->cfg_devloss_tmo * 2) + 1))) {
lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
- "%d:0721 LUN Reset rport failure:"
- " cnt x%x rdata x%p\n",
- phba->brd_no, loopcnt, rdata);
+ "%d (%d):0721 LUN Reset rport "
+ "failure: cnt x%x rdata x%p\n",
+ phba->brd_no, vport->vpi,
+ loopcnt, rdata);
goto out;
}
pnode = rdata->pnode;
@@ -1054,15 +1174,14 @@ lpfc_device_reset_handler(struct scsi_cmnd *cmnd)
break;
}
- lpfc_cmd = lpfc_get_scsi_buf (phba);
+ lpfc_cmd = lpfc_get_scsi_buf(phba);
if (lpfc_cmd == NULL)
goto out;
lpfc_cmd->timeout = 60;
- lpfc_cmd->scsi_hba = phba;
lpfc_cmd->rdata = rdata;
- ret = lpfc_scsi_prep_task_mgmt_cmd(phba, lpfc_cmd, cmnd->device->lun,
+ ret = lpfc_scsi_prep_task_mgmt_cmd(vport, lpfc_cmd, cmnd->device->lun,
FCP_TARGET_RESET);
if (!ret)
goto out_free_scsi_buf;
@@ -1075,8 +1194,9 @@ lpfc_device_reset_handler(struct scsi_cmnd *cmnd)
goto out_free_scsi_buf;
lpfc_printf_log(phba, KERN_INFO, LOG_FCP,
- "%d:0703 Issue target reset to TGT %d LUN %d rpi x%x "
- "nlp_flag x%x\n", phba->brd_no, cmnd->device->id,
+ "%d (%d):0703 Issue target reset to TGT %d LUN %d "
+ "rpi x%x nlp_flag x%x\n",
+ phba->brd_no, vport->vpi, cmnd->device->id,
cmnd->device->lun, pnode->nlp_rpi, pnode->nlp_flag);
iocb_status = lpfc_sli_issue_iocb_wait(phba,
@@ -1111,9 +1231,7 @@ lpfc_device_reset_handler(struct scsi_cmnd *cmnd)
0, LPFC_CTX_LUN);
loopcnt = 0;
while(cnt) {
- spin_unlock_irq(phba->host->host_lock);
schedule_timeout_uninterruptible(LPFC_RESET_WAIT*HZ);
- spin_lock_irq(phba->host->host_lock);
if (++loopcnt
> (2 * phba->cfg_devloss_tmo)/LPFC_RESET_WAIT)
@@ -1127,8 +1245,9 @@ lpfc_device_reset_handler(struct scsi_cmnd *cmnd)
if (cnt) {
lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
- "%d:0719 device reset I/O flush failure: cnt x%x\n",
- phba->brd_no, cnt);
+ "%d (%d):0719 device reset I/O flush failure: "
+ "cnt x%x\n",
+ phba->brd_no, vport->vpi, cnt);
ret = FAILED;
}
@@ -1137,21 +1256,21 @@ out_free_scsi_buf:
lpfc_release_scsi_buf(phba, lpfc_cmd);
}
lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
- "%d:0713 SCSI layer issued device reset (%d, %d) "
+ "%d (%d):0713 SCSI layer issued device reset (%d, %d) "
"return x%x status x%x result x%x\n",
- phba->brd_no, cmnd->device->id, cmnd->device->lun,
- ret, cmd_status, cmd_result);
+ phba->brd_no, vport->vpi, cmnd->device->id,
+ cmnd->device->lun, ret, cmd_status, cmd_result);
out:
- spin_unlock_irq(shost->host_lock);
return ret;
}
static int
lpfc_bus_reset_handler(struct scsi_cmnd *cmnd)
{
- struct Scsi_Host *shost = cmnd->device->host;
- struct lpfc_hba *phba = (struct lpfc_hba *)shost->hostdata;
+ struct Scsi_Host *shost = cmnd->device->host;
+ struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+ struct lpfc_hba *phba = vport->phba;
struct lpfc_nodelist *ndlp = NULL;
int match;
int ret = FAILED, i, err_count = 0;
@@ -1159,7 +1278,6 @@ lpfc_bus_reset_handler(struct scsi_cmnd *cmnd)
struct lpfc_scsi_buf * lpfc_cmd;
lpfc_block_error_handler(cmnd);
- spin_lock_irq(shost->host_lock);
lpfc_cmd = lpfc_get_scsi_buf(phba);
if (lpfc_cmd == NULL)
@@ -1167,7 +1285,6 @@ lpfc_bus_reset_handler(struct scsi_cmnd *cmnd)
/* The lpfc_cmd storage is reused. Set all loop invariants. */
lpfc_cmd->timeout = 60;
- lpfc_cmd->scsi_hba = phba;
/*
* Since the driver manages a single bus device, reset all
@@ -1177,7 +1294,8 @@ lpfc_bus_reset_handler(struct scsi_cmnd *cmnd)
for (i = 0; i < LPFC_MAX_TARGET; i++) {
/* Search for mapped node by target ID */
match = 0;
- list_for_each_entry(ndlp, &phba->fc_nodes, nlp_listp) {
+ spin_lock_irq(shost->host_lock);
+ list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
if (ndlp->nlp_state == NLP_STE_MAPPED_NODE &&
i == ndlp->nlp_sid &&
ndlp->rport) {
@@ -1185,15 +1303,18 @@ lpfc_bus_reset_handler(struct scsi_cmnd *cmnd)
break;
}
}
+ spin_unlock_irq(shost->host_lock);
if (!match)
continue;
- ret = lpfc_scsi_tgt_reset(lpfc_cmd, phba, i, cmnd->device->lun,
+ ret = lpfc_scsi_tgt_reset(lpfc_cmd, vport, i,
+ cmnd->device->lun,
ndlp->rport->dd_data);
if (ret != SUCCESS) {
lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
- "%d:0700 Bus Reset on target %d failed\n",
- phba->brd_no, i);
+ "%d (%d):0700 Bus Reset on target %d "
+ "failed\n",
+ phba->brd_no, vport->vpi, i);
err_count++;
break;
}
@@ -1219,9 +1340,7 @@ lpfc_bus_reset_handler(struct scsi_cmnd *cmnd)
0, 0, 0, LPFC_CTX_HOST);
loopcnt = 0;
while(cnt) {
- spin_unlock_irq(phba->host->host_lock);
schedule_timeout_uninterruptible(LPFC_RESET_WAIT*HZ);
- spin_lock_irq(phba->host->host_lock);
if (++loopcnt
> (2 * phba->cfg_devloss_tmo)/LPFC_RESET_WAIT)
@@ -1234,25 +1353,24 @@ lpfc_bus_reset_handler(struct scsi_cmnd *cmnd)
if (cnt) {
lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
- "%d:0715 Bus Reset I/O flush failure: cnt x%x left x%x\n",
- phba->brd_no, cnt, i);
+ "%d (%d):0715 Bus Reset I/O flush failure: "
+ "cnt x%x left x%x\n",
+ phba->brd_no, vport->vpi, cnt, i);
ret = FAILED;
}
- lpfc_printf_log(phba,
- KERN_ERR,
- LOG_FCP,
- "%d:0714 SCSI layer issued Bus Reset Data: x%x\n",
- phba->brd_no, ret);
+ lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
+ "%d (%d):0714 SCSI layer issued Bus Reset Data: x%x\n",
+ phba->brd_no, vport->vpi, ret);
out:
- spin_unlock_irq(shost->host_lock);
return ret;
}
static int
lpfc_slave_alloc(struct scsi_device *sdev)
{
- struct lpfc_hba *phba = (struct lpfc_hba *)sdev->host->hostdata;
+ struct lpfc_vport *vport = (struct lpfc_vport *) sdev->host->hostdata;
+ struct lpfc_hba *phba = vport->phba;
struct lpfc_scsi_buf *scsi_buf = NULL;
struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
uint32_t total = 0, i;
@@ -1273,27 +1391,35 @@ lpfc_slave_alloc(struct scsi_device *sdev)
*/
total = phba->total_scsi_bufs;
num_to_alloc = phba->cfg_lun_queue_depth + 2;
- if (total >= phba->cfg_hba_queue_depth) {
+
+ /* Allow some exchanges to be available always to complete discovery */
+ if (total >= phba->cfg_hba_queue_depth - LPFC_DISC_IOCB_BUFF_COUNT ) {
lpfc_printf_log(phba, KERN_WARNING, LOG_FCP,
- "%d:0704 At limitation of %d preallocated "
- "command buffers\n", phba->brd_no, total);
+ "%d (%d):0704 At limitation of %d "
+ "preallocated command buffers\n",
+ phba->brd_no, vport->vpi, total);
return 0;
- } else if (total + num_to_alloc > phba->cfg_hba_queue_depth) {
+
+ /* Allow some exchanges to be available always to complete discovery */
+ } else if (total + num_to_alloc >
+ phba->cfg_hba_queue_depth - LPFC_DISC_IOCB_BUFF_COUNT ) {
lpfc_printf_log(phba, KERN_WARNING, LOG_FCP,
- "%d:0705 Allocation request of %d command "
- "buffers will exceed max of %d. Reducing "
- "allocation request to %d.\n", phba->brd_no,
- num_to_alloc, phba->cfg_hba_queue_depth,
+ "%d (%d):0705 Allocation request of %d "
+ "command buffers will exceed max of %d. "
+ "Reducing allocation request to %d.\n",
+ phba->brd_no, vport->vpi, num_to_alloc,
+ phba->cfg_hba_queue_depth,
(phba->cfg_hba_queue_depth - total));
num_to_alloc = phba->cfg_hba_queue_depth - total;
}
for (i = 0; i < num_to_alloc; i++) {
- scsi_buf = lpfc_new_scsi_buf(phba);
+ scsi_buf = lpfc_new_scsi_buf(vport);
if (!scsi_buf) {
lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
- "%d:0706 Failed to allocate command "
- "buffer\n", phba->brd_no);
+ "%d (%d):0706 Failed to allocate "
+ "command buffer\n",
+ phba->brd_no, vport->vpi);
break;
}
@@ -1308,8 +1434,9 @@ lpfc_slave_alloc(struct scsi_device *sdev)
static int
lpfc_slave_configure(struct scsi_device *sdev)
{
- struct lpfc_hba *phba = (struct lpfc_hba *) sdev->host->hostdata;
- struct fc_rport *rport = starget_to_rport(sdev->sdev_target);
+ struct lpfc_vport *vport = (struct lpfc_vport *) sdev->host->hostdata;
+ struct lpfc_hba *phba = vport->phba;
+ struct fc_rport *rport = starget_to_rport(sdev->sdev_target);
if (sdev->tagged_supported)
scsi_activate_tcq(sdev, phba->cfg_lun_queue_depth);
@@ -1340,6 +1467,7 @@ lpfc_slave_destroy(struct scsi_device *sdev)
return;
}
+
struct scsi_host_template lpfc_template = {
.module = THIS_MODULE,
.name = LPFC_DRIVER_NAME,
@@ -1352,11 +1480,10 @@ struct scsi_host_template lpfc_template = {
.slave_configure = lpfc_slave_configure,
.slave_destroy = lpfc_slave_destroy,
.scan_finished = lpfc_scan_finished,
- .scan_start = lpfc_scan_start,
.this_id = -1,
.sg_tablesize = LPFC_SG_SEG_CNT,
.cmd_per_lun = LPFC_CMD_PER_LUN,
.use_clustering = ENABLE_CLUSTERING,
- .shost_attrs = lpfc_host_attrs,
+ .shost_attrs = lpfc_hba_attrs,
.max_sectors = 0xFFFF,
};
diff --git a/drivers/scsi/lpfc/lpfc_scsi.h b/drivers/scsi/lpfc/lpfc_scsi.h
index cdcd2535803..31787bb6d53 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.h
+++ b/drivers/scsi/lpfc/lpfc_scsi.h
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2004-2005 Emulex. All rights reserved. *
+ * Copyright (C) 2004-2006 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.emulex.com *
* *
@@ -110,7 +110,6 @@ struct fcp_cmnd {
struct lpfc_scsi_buf {
struct list_head list;
struct scsi_cmnd *pCmd;
- struct lpfc_hba *scsi_hba;
struct lpfc_rport_data *rdata;
uint32_t timeout;
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index a1e721459e2..f4d5a6b00fd 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -38,23 +38,25 @@
#include "lpfc_crtn.h"
#include "lpfc_logmsg.h"
#include "lpfc_compat.h"
+#include "lpfc_debugfs.h"
/*
* Define macro to log: Mailbox command x%x cannot issue Data
* This allows multiple uses of lpfc_msgBlk0311
* w/o perturbing log msg utility.
*/
-#define LOG_MBOX_CANNOT_ISSUE_DATA( phba, mb, psli, flag) \
+#define LOG_MBOX_CANNOT_ISSUE_DATA(phba, pmbox, psli, flag) \
lpfc_printf_log(phba, \
KERN_INFO, \
LOG_MBOX | LOG_SLI, \
- "%d:0311 Mailbox command x%x cannot issue " \
- "Data: x%x x%x x%x\n", \
+ "%d (%d):0311 Mailbox command x%x cannot " \
+ "issue Data: x%x x%x x%x\n", \
phba->brd_no, \
- mb->mbxCommand, \
- phba->hba_state, \
+ pmbox->vport ? pmbox->vport->vpi : 0, \
+ pmbox->mb.mbxCommand, \
+ phba->pport->port_state, \
psli->sli_flag, \
- flag);
+ flag)
/* There are only four IOCB completion types. */
@@ -65,8 +67,26 @@ typedef enum _lpfc_iocb_type {
LPFC_ABORT_IOCB
} lpfc_iocb_type;
-struct lpfc_iocbq *
-lpfc_sli_get_iocbq(struct lpfc_hba * phba)
+ /* SLI-2/SLI-3 provide different sized iocbs. Given a pointer
+ * to the start of the ring, and the slot number of the
+ * desired iocb entry, calc a pointer to that entry.
+ */
+static inline IOCB_t *
+lpfc_cmd_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
+{
+ return (IOCB_t *) (((char *) pring->cmdringaddr) +
+ pring->cmdidx * phba->iocb_cmd_size);
+}
+
+static inline IOCB_t *
+lpfc_resp_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
+{
+ return (IOCB_t *) (((char *) pring->rspringaddr) +
+ pring->rspidx * phba->iocb_rsp_size);
+}
+
+static struct lpfc_iocbq *
+__lpfc_sli_get_iocbq(struct lpfc_hba *phba)
{
struct list_head *lpfc_iocb_list = &phba->lpfc_iocb_list;
struct lpfc_iocbq * iocbq = NULL;
@@ -75,10 +95,22 @@ lpfc_sli_get_iocbq(struct lpfc_hba * phba)
return iocbq;
}
+struct lpfc_iocbq *
+lpfc_sli_get_iocbq(struct lpfc_hba *phba)
+{
+ struct lpfc_iocbq * iocbq = NULL;
+ unsigned long iflags;
+
+ spin_lock_irqsave(&phba->hbalock, iflags);
+ iocbq = __lpfc_sli_get_iocbq(phba);
+ spin_unlock_irqrestore(&phba->hbalock, iflags);
+ return iocbq;
+}
+
void
-lpfc_sli_release_iocbq(struct lpfc_hba * phba, struct lpfc_iocbq * iocbq)
+__lpfc_sli_release_iocbq(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
{
- size_t start_clean = (size_t)(&((struct lpfc_iocbq *)NULL)->iocb);
+ size_t start_clean = offsetof(struct lpfc_iocbq, iocb);
/*
* Clean all volatile data fields, preserve iotag and node struct.
@@ -87,6 +119,19 @@ lpfc_sli_release_iocbq(struct lpfc_hba * phba, struct lpfc_iocbq * iocbq)
list_add_tail(&iocbq->list, &phba->lpfc_iocb_list);
}
+void
+lpfc_sli_release_iocbq(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
+{
+ unsigned long iflags;
+
+ /*
+ * Clean all volatile data fields, preserve iotag and node struct.
+ */
+ spin_lock_irqsave(&phba->hbalock, iflags);
+ __lpfc_sli_release_iocbq(phba, iocbq);
+ spin_unlock_irqrestore(&phba->hbalock, iflags);
+}
+
/*
* Translate the iocb command to an iocb command type used to decide the final
* disposition of each completed IOCB.
@@ -155,6 +200,9 @@ lpfc_sli_iocb_cmd_type(uint8_t iocb_cmnd)
case CMD_RCV_ELS_REQ_CX:
case CMD_RCV_SEQUENCE64_CX:
case CMD_RCV_ELS_REQ64_CX:
+ case CMD_IOCB_RCV_SEQ64_CX:
+ case CMD_IOCB_RCV_ELS64_CX:
+ case CMD_IOCB_RCV_CONT64_CX:
type = LPFC_UNSOL_IOCB;
break;
default:
@@ -166,73 +214,77 @@ lpfc_sli_iocb_cmd_type(uint8_t iocb_cmnd)
}
static int
-lpfc_sli_ring_map(struct lpfc_hba * phba, LPFC_MBOXQ_t *pmb)
+lpfc_sli_ring_map(struct lpfc_hba *phba)
{
struct lpfc_sli *psli = &phba->sli;
- MAILBOX_t *pmbox = &pmb->mb;
- int i, rc;
+ LPFC_MBOXQ_t *pmb;
+ MAILBOX_t *pmbox;
+ int i, rc, ret = 0;
+ pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+ if (!pmb)
+ return -ENOMEM;
+ pmbox = &pmb->mb;
+ phba->link_state = LPFC_INIT_MBX_CMDS;
for (i = 0; i < psli->num_rings; i++) {
- phba->hba_state = LPFC_INIT_MBX_CMDS;
lpfc_config_ring(phba, i, pmb);
rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
if (rc != MBX_SUCCESS) {
- lpfc_printf_log(phba,
- KERN_ERR,
- LOG_INIT,
- "%d:0446 Adapter failed to init, "
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "%d:0446 Adapter failed to init (%d), "
"mbxCmd x%x CFG_RING, mbxStatus x%x, "
"ring %d\n",
- phba->brd_no,
+ phba->brd_no, rc,
pmbox->mbxCommand,
pmbox->mbxStatus,
i);
- phba->hba_state = LPFC_HBA_ERROR;
- return -ENXIO;
+ phba->link_state = LPFC_HBA_ERROR;
+ ret = -ENXIO;
+ break;
}
}
- return 0;
+ mempool_free(pmb, phba->mbox_mem_pool);
+ return ret;
}
static int
-lpfc_sli_ringtxcmpl_put(struct lpfc_hba * phba,
- struct lpfc_sli_ring * pring, struct lpfc_iocbq * piocb)
+lpfc_sli_ringtxcmpl_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
+ struct lpfc_iocbq *piocb)
{
list_add_tail(&piocb->list, &pring->txcmplq);
pring->txcmplq_cnt++;
- if (unlikely(pring->ringno == LPFC_ELS_RING))
- mod_timer(&phba->els_tmofunc,
- jiffies + HZ * (phba->fc_ratov << 1));
+ if ((unlikely(pring->ringno == LPFC_ELS_RING)) &&
+ (piocb->iocb.ulpCommand != CMD_ABORT_XRI_CN) &&
+ (piocb->iocb.ulpCommand != CMD_CLOSE_XRI_CN)) {
+ if (!piocb->vport)
+ BUG();
+ else
+ mod_timer(&piocb->vport->els_tmofunc,
+ jiffies + HZ * (phba->fc_ratov << 1));
+ }
- return (0);
+
+ return 0;
}
static struct lpfc_iocbq *
-lpfc_sli_ringtx_get(struct lpfc_hba * phba, struct lpfc_sli_ring * pring)
+lpfc_sli_ringtx_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
{
- struct list_head *dlp;
struct lpfc_iocbq *cmd_iocb;
- dlp = &pring->txq;
- cmd_iocb = NULL;
- list_remove_head((&pring->txq), cmd_iocb,
- struct lpfc_iocbq,
- list);
- if (cmd_iocb) {
- /* If the first ptr is not equal to the list header,
- * deque the IOCBQ_t and return it.
- */
+ list_remove_head((&pring->txq), cmd_iocb, struct lpfc_iocbq, list);
+ if (cmd_iocb != NULL)
pring->txq_cnt--;
- }
- return (cmd_iocb);
+ return cmd_iocb;
}
static IOCB_t *
lpfc_sli_next_iocb_slot (struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
{
- struct lpfc_pgp *pgp = &phba->slim2p->mbx.us.s2.port[pring->ringno];
+ struct lpfc_pgp *pgp = (phba->sli_rev == 3) ?
+ &phba->slim2p->mbx.us.s3_pgp.port[pring->ringno] :
+ &phba->slim2p->mbx.us.s2.port[pring->ringno];
uint32_t max_cmd_idx = pring->numCiocb;
- IOCB_t *iocb = NULL;
if ((pring->next_cmdidx == pring->cmdidx) &&
(++pring->next_cmdidx >= max_cmd_idx))
@@ -249,15 +301,17 @@ lpfc_sli_next_iocb_slot (struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
phba->brd_no, pring->ringno,
pring->local_getidx, max_cmd_idx);
- phba->hba_state = LPFC_HBA_ERROR;
+ phba->link_state = LPFC_HBA_ERROR;
/*
* All error attention handlers are posted to
* worker thread
*/
phba->work_ha |= HA_ERATT;
phba->work_hs = HS_FFER3;
+
+ /* hbalock should already be held */
if (phba->work_wait)
- wake_up(phba->work_wait);
+ lpfc_worker_wake_up(phba);
return NULL;
}
@@ -266,39 +320,34 @@ lpfc_sli_next_iocb_slot (struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
return NULL;
}
- iocb = IOCB_ENTRY(pring->cmdringaddr, pring->cmdidx);
-
- return iocb;
+ return lpfc_cmd_iocb(phba, pring);
}
uint16_t
-lpfc_sli_next_iotag(struct lpfc_hba * phba, struct lpfc_iocbq * iocbq)
+lpfc_sli_next_iotag(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
{
- struct lpfc_iocbq ** new_arr;
- struct lpfc_iocbq ** old_arr;
+ struct lpfc_iocbq **new_arr;
+ struct lpfc_iocbq **old_arr;
size_t new_len;
struct lpfc_sli *psli = &phba->sli;
uint16_t iotag;
- spin_lock_irq(phba->host->host_lock);
+ spin_lock_irq(&phba->hbalock);
iotag = psli->last_iotag;
if(++iotag < psli->iocbq_lookup_len) {
psli->last_iotag = iotag;
psli->iocbq_lookup[iotag] = iocbq;
- spin_unlock_irq(phba->host->host_lock);
+ spin_unlock_irq(&phba->hbalock);
iocbq->iotag = iotag;
return iotag;
- }
- else if (psli->iocbq_lookup_len < (0xffff
+ } else if (psli->iocbq_lookup_len < (0xffff
- LPFC_IOCBQ_LOOKUP_INCREMENT)) {
new_len = psli->iocbq_lookup_len + LPFC_IOCBQ_LOOKUP_INCREMENT;
- spin_unlock_irq(phba->host->host_lock);
- new_arr = kmalloc(new_len * sizeof (struct lpfc_iocbq *),
+ spin_unlock_irq(&phba->hbalock);
+ new_arr = kzalloc(new_len * sizeof (struct lpfc_iocbq *),
GFP_KERNEL);
if (new_arr) {
- memset((char *)new_arr, 0,
- new_len * sizeof (struct lpfc_iocbq *));
- spin_lock_irq(phba->host->host_lock);
+ spin_lock_irq(&phba->hbalock);
old_arr = psli->iocbq_lookup;
if (new_len <= psli->iocbq_lookup_len) {
/* highly unprobable case */
@@ -307,11 +356,11 @@ lpfc_sli_next_iotag(struct lpfc_hba * phba, struct lpfc_iocbq * iocbq)
if(++iotag < psli->iocbq_lookup_len) {
psli->last_iotag = iotag;
psli->iocbq_lookup[iotag] = iocbq;
- spin_unlock_irq(phba->host->host_lock);
+ spin_unlock_irq(&phba->hbalock);
iocbq->iotag = iotag;
return iotag;
}
- spin_unlock_irq(phba->host->host_lock);
+ spin_unlock_irq(&phba->hbalock);
return 0;
}
if (psli->iocbq_lookup)
@@ -322,13 +371,13 @@ lpfc_sli_next_iotag(struct lpfc_hba * phba, struct lpfc_iocbq * iocbq)
psli->iocbq_lookup_len = new_len;
psli->last_iotag = iotag;
psli->iocbq_lookup[iotag] = iocbq;
- spin_unlock_irq(phba->host->host_lock);
+ spin_unlock_irq(&phba->hbalock);
iocbq->iotag = iotag;
kfree(old_arr);
return iotag;
}
} else
- spin_unlock_irq(phba->host->host_lock);
+ spin_unlock_irq(&phba->hbalock);
lpfc_printf_log(phba, KERN_ERR,LOG_SLI,
"%d:0318 Failed to allocate IOTAG.last IOTAG is %d\n",
@@ -349,7 +398,7 @@ lpfc_sli_submit_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
/*
* Issue iocb command to adapter
*/
- lpfc_sli_pcimem_bcopy(&nextiocb->iocb, iocb, sizeof (IOCB_t));
+ lpfc_sli_pcimem_bcopy(&nextiocb->iocb, iocb, phba->iocb_cmd_size);
wmb();
pring->stats.iocb_cmd++;
@@ -361,20 +410,18 @@ lpfc_sli_submit_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
if (nextiocb->iocb_cmpl)
lpfc_sli_ringtxcmpl_put(phba, pring, nextiocb);
else
- lpfc_sli_release_iocbq(phba, nextiocb);
+ __lpfc_sli_release_iocbq(phba, nextiocb);
/*
* Let the HBA know what IOCB slot will be the next one the
* driver will put a command into.
*/
pring->cmdidx = pring->next_cmdidx;
- writel(pring->cmdidx, phba->MBslimaddr
- + (SLIMOFF + (pring->ringno * 2)) * 4);
+ writel(pring->cmdidx, &phba->host_gp[pring->ringno].cmdPutInx);
}
static void
-lpfc_sli_update_full_ring(struct lpfc_hba * phba,
- struct lpfc_sli_ring *pring)
+lpfc_sli_update_full_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
{
int ringno = pring->ringno;
@@ -393,8 +440,7 @@ lpfc_sli_update_full_ring(struct lpfc_hba * phba,
}
static void
-lpfc_sli_update_ring(struct lpfc_hba * phba,
- struct lpfc_sli_ring *pring)
+lpfc_sli_update_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
{
int ringno = pring->ringno;
@@ -407,7 +453,7 @@ lpfc_sli_update_ring(struct lpfc_hba * phba,
}
static void
-lpfc_sli_resume_iocb(struct lpfc_hba * phba, struct lpfc_sli_ring * pring)
+lpfc_sli_resume_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
{
IOCB_t *iocb;
struct lpfc_iocbq *nextiocb;
@@ -420,7 +466,7 @@ lpfc_sli_resume_iocb(struct lpfc_hba * phba, struct lpfc_sli_ring * pring)
* (d) IOCB processing is not blocked by the outstanding mbox command.
*/
if (pring->txq_cnt &&
- (phba->hba_state > LPFC_LINK_DOWN) &&
+ lpfc_is_link_up(phba) &&
(pring->ringno != phba->sli.fcp_ring ||
phba->sli.sli_flag & LPFC_PROCESS_LA) &&
!(pring->flag & LPFC_STOP_IOCB_MBX)) {
@@ -440,11 +486,15 @@ lpfc_sli_resume_iocb(struct lpfc_hba * phba, struct lpfc_sli_ring * pring)
/* lpfc_sli_turn_on_ring is only called by lpfc_sli_handle_mb_event below */
static void
-lpfc_sli_turn_on_ring(struct lpfc_hba * phba, int ringno)
+lpfc_sli_turn_on_ring(struct lpfc_hba *phba, int ringno)
{
- struct lpfc_pgp *pgp = &phba->slim2p->mbx.us.s2.port[ringno];
+ struct lpfc_pgp *pgp = (phba->sli_rev == 3) ?
+ &phba->slim2p->mbx.us.s3_pgp.port[ringno] :
+ &phba->slim2p->mbx.us.s2.port[ringno];
+ unsigned long iflags;
/* If the ring is active, flag it */
+ spin_lock_irqsave(&phba->hbalock, iflags);
if (phba->sli.ring[ringno].cmdringaddr) {
if (phba->sli.ring[ringno].flag & LPFC_STOP_IOCB_MBX) {
phba->sli.ring[ringno].flag &= ~LPFC_STOP_IOCB_MBX;
@@ -453,11 +503,176 @@ lpfc_sli_turn_on_ring(struct lpfc_hba * phba, int ringno)
*/
phba->sli.ring[ringno].local_getidx
= le32_to_cpu(pgp->cmdGetInx);
- spin_lock_irq(phba->host->host_lock);
lpfc_sli_resume_iocb(phba, &phba->sli.ring[ringno]);
- spin_unlock_irq(phba->host->host_lock);
}
}
+ spin_unlock_irqrestore(&phba->hbalock, iflags);
+}
+
+struct lpfc_hbq_entry *
+lpfc_sli_next_hbq_slot(struct lpfc_hba *phba, uint32_t hbqno)
+{
+ struct hbq_s *hbqp = &phba->hbqs[hbqno];
+
+ if (hbqp->next_hbqPutIdx == hbqp->hbqPutIdx &&
+ ++hbqp->next_hbqPutIdx >= hbqp->entry_count)
+ hbqp->next_hbqPutIdx = 0;
+
+ if (unlikely(hbqp->local_hbqGetIdx == hbqp->next_hbqPutIdx)) {
+ uint32_t raw_index = phba->hbq_get[hbqno];
+ uint32_t getidx = le32_to_cpu(raw_index);
+
+ hbqp->local_hbqGetIdx = getidx;
+
+ if (unlikely(hbqp->local_hbqGetIdx >= hbqp->entry_count)) {
+ lpfc_printf_log(phba, KERN_ERR,
+ LOG_SLI | LOG_VPORT,
+ "%d:1802 HBQ %d: local_hbqGetIdx "
+ "%u is > than hbqp->entry_count %u\n",
+ phba->brd_no, hbqno,
+ hbqp->local_hbqGetIdx,
+ hbqp->entry_count);
+
+ phba->link_state = LPFC_HBA_ERROR;
+ return NULL;
+ }
+
+ if (hbqp->local_hbqGetIdx == hbqp->next_hbqPutIdx)
+ return NULL;
+ }
+
+ return (struct lpfc_hbq_entry *) phba->hbqslimp.virt + hbqp->hbqPutIdx;
+}
+
+void
+lpfc_sli_hbqbuf_free_all(struct lpfc_hba *phba)
+{
+ struct lpfc_dmabuf *dmabuf, *next_dmabuf;
+ struct hbq_dmabuf *hbq_buf;
+
+ /* Return all memory used by all HBQs */
+ list_for_each_entry_safe(dmabuf, next_dmabuf,
+ &phba->hbq_buffer_list, list) {
+ hbq_buf = container_of(dmabuf, struct hbq_dmabuf, dbuf);
+ list_del(&hbq_buf->dbuf.list);
+ lpfc_hbq_free(phba, hbq_buf->dbuf.virt, hbq_buf->dbuf.phys);
+ kfree(hbq_buf);
+ }
+}
+
+static void
+lpfc_sli_hbq_to_firmware(struct lpfc_hba *phba, uint32_t hbqno,
+ struct hbq_dmabuf *hbq_buf)
+{
+ struct lpfc_hbq_entry *hbqe;
+ dma_addr_t physaddr = hbq_buf->dbuf.phys;
+
+ /* Get next HBQ entry slot to use */
+ hbqe = lpfc_sli_next_hbq_slot(phba, hbqno);
+ if (hbqe) {
+ struct hbq_s *hbqp = &phba->hbqs[hbqno];
+
+ hbqe->bde.addrHigh = le32_to_cpu(putPaddrHigh(physaddr));
+ hbqe->bde.addrLow = le32_to_cpu(putPaddrLow(physaddr));
+ hbqe->bde.tus.f.bdeSize = FCELSSIZE;
+ hbqe->bde.tus.f.bdeFlags = 0;
+ hbqe->bde.tus.w = le32_to_cpu(hbqe->bde.tus.w);
+ hbqe->buffer_tag = le32_to_cpu(hbq_buf->tag);
+ /* Sync SLIM */
+ hbqp->hbqPutIdx = hbqp->next_hbqPutIdx;
+ writel(hbqp->hbqPutIdx, phba->hbq_put + hbqno);
+ /* flush */
+ readl(phba->hbq_put + hbqno);
+ list_add_tail(&hbq_buf->dbuf.list, &phba->hbq_buffer_list);
+ }
+}
+
+static struct lpfc_hbq_init lpfc_els_hbq = {
+ .rn = 1,
+ .entry_count = 200,
+ .mask_count = 0,
+ .profile = 0,
+ .ring_mask = 1 << LPFC_ELS_RING,
+ .buffer_count = 0,
+ .init_count = 20,
+ .add_count = 5,
+};
+
+static struct lpfc_hbq_init *lpfc_hbq_defs[] = {
+ &lpfc_els_hbq,
+};
+
+int
+lpfc_sli_hbqbuf_fill_hbqs(struct lpfc_hba *phba, uint32_t hbqno, uint32_t count)
+{
+ uint32_t i, start, end;
+ struct hbq_dmabuf *hbq_buffer;
+
+ start = lpfc_hbq_defs[hbqno]->buffer_count;
+ end = count + lpfc_hbq_defs[hbqno]->buffer_count;
+ if (end > lpfc_hbq_defs[hbqno]->entry_count) {
+ end = lpfc_hbq_defs[hbqno]->entry_count;
+ }
+
+ /* Populate HBQ entries */
+ for (i = start; i < end; i++) {
+ hbq_buffer = kmalloc(sizeof(struct hbq_dmabuf),
+ GFP_KERNEL);
+ if (!hbq_buffer)
+ return 1;
+ hbq_buffer->dbuf.virt = lpfc_hbq_alloc(phba, MEM_PRI,
+ &hbq_buffer->dbuf.phys);
+ if (hbq_buffer->dbuf.virt == NULL)
+ return 1;
+ hbq_buffer->tag = (i | (hbqno << 16));
+ lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buffer);
+ lpfc_hbq_defs[hbqno]->buffer_count++;
+ }
+ return 0;
+}
+
+int
+lpfc_sli_hbqbuf_add_hbqs(struct lpfc_hba *phba, uint32_t qno)
+{
+ return(lpfc_sli_hbqbuf_fill_hbqs(phba, qno,
+ lpfc_hbq_defs[qno]->add_count));
+}
+
+int
+lpfc_sli_hbqbuf_init_hbqs(struct lpfc_hba *phba, uint32_t qno)
+{
+ return(lpfc_sli_hbqbuf_fill_hbqs(phba, qno,
+ lpfc_hbq_defs[qno]->init_count));
+}
+
+struct hbq_dmabuf *
+lpfc_sli_hbqbuf_find(struct lpfc_hba *phba, uint32_t tag)
+{
+ struct lpfc_dmabuf *d_buf;
+ struct hbq_dmabuf *hbq_buf;
+
+ list_for_each_entry(d_buf, &phba->hbq_buffer_list, list) {
+ hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf);
+ if ((hbq_buf->tag & 0xffff) == tag) {
+ return hbq_buf;
+ }
+ }
+ lpfc_printf_log(phba, KERN_ERR, LOG_SLI | LOG_VPORT,
+ "%d:1803 Bad hbq tag. Data: x%x x%x\n",
+ phba->brd_no, tag,
+ lpfc_hbq_defs[tag >> 16]->buffer_count);
+ return NULL;
+}
+
+void
+lpfc_sli_free_hbq(struct lpfc_hba *phba, struct hbq_dmabuf *sp)
+{
+ uint32_t hbqno;
+
+ if (sp) {
+ hbqno = sp->tag >> 16;
+ lpfc_sli_hbq_to_firmware(phba, hbqno, sp);
+ }
}
static int
@@ -511,32 +726,38 @@ lpfc_sli_chk_mbx_command(uint8_t mbxCommand)
case MBX_FLASH_WR_ULA:
case MBX_SET_DEBUG:
case MBX_LOAD_EXP_ROM:
+ case MBX_REG_VPI:
+ case MBX_UNREG_VPI:
+ case MBX_HEARTBEAT:
ret = mbxCommand;
break;
default:
ret = MBX_SHUTDOWN;
break;
}
- return (ret);
+ return ret;
}
static void
-lpfc_sli_wake_mbox_wait(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq)
+lpfc_sli_wake_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
{
wait_queue_head_t *pdone_q;
+ unsigned long drvr_flag;
/*
* If pdone_q is empty, the driver thread gave up waiting and
* continued running.
*/
pmboxq->mbox_flag |= LPFC_MBX_WAKE;
+ spin_lock_irqsave(&phba->hbalock, drvr_flag);
pdone_q = (wait_queue_head_t *) pmboxq->context1;
if (pdone_q)
wake_up_interruptible(pdone_q);
+ spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
return;
}
void
-lpfc_sli_def_mbox_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
+lpfc_sli_def_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
{
struct lpfc_dmabuf *mp;
uint16_t rpi;
@@ -553,79 +774,64 @@ lpfc_sli_def_mbox_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
* If a REG_LOGIN succeeded after node is destroyed or node
* is in re-discovery driver need to cleanup the RPI.
*/
- if (!(phba->fc_flag & FC_UNLOADING) &&
- (pmb->mb.mbxCommand == MBX_REG_LOGIN64) &&
- (!pmb->mb.mbxStatus)) {
+ if (!(phba->pport->load_flag & FC_UNLOADING) &&
+ pmb->mb.mbxCommand == MBX_REG_LOGIN64 &&
+ !pmb->mb.mbxStatus) {
rpi = pmb->mb.un.varWords[0];
- lpfc_unreg_login(phba, rpi, pmb);
- pmb->mbox_cmpl=lpfc_sli_def_mbox_cmpl;
+ lpfc_unreg_login(phba, pmb->mb.un.varRegLogin.vpi, rpi, pmb);
+ pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
if (rc != MBX_NOT_FINISHED)
return;
}
- mempool_free( pmb, phba->mbox_mem_pool);
+ mempool_free(pmb, phba->mbox_mem_pool);
return;
}
int
-lpfc_sli_handle_mb_event(struct lpfc_hba * phba)
+lpfc_sli_handle_mb_event(struct lpfc_hba *phba)
{
- MAILBOX_t *mbox;
MAILBOX_t *pmbox;
LPFC_MBOXQ_t *pmb;
- struct lpfc_sli *psli;
- int i, rc;
- uint32_t process_next;
-
- psli = &phba->sli;
- /* We should only get here if we are in SLI2 mode */
- if (!(phba->sli.sli_flag & LPFC_SLI2_ACTIVE)) {
- return (1);
- }
+ int rc;
+ LIST_HEAD(cmplq);
phba->sli.slistat.mbox_event++;
+ /* Get all completed mailboxe buffers into the cmplq */
+ spin_lock_irq(&phba->hbalock);
+ list_splice_init(&phba->sli.mboxq_cmpl, &cmplq);
+ spin_unlock_irq(&phba->hbalock);
+
/* Get a Mailbox buffer to setup mailbox commands for callback */
- if ((pmb = phba->sli.mbox_active)) {
- pmbox = &pmb->mb;
- mbox = &phba->slim2p->mbx;
+ do {
+ list_remove_head(&cmplq, pmb, LPFC_MBOXQ_t, list);
+ if (pmb == NULL)
+ break;
- /* First check out the status word */
- lpfc_sli_pcimem_bcopy(mbox, pmbox, sizeof (uint32_t));
+ pmbox = &pmb->mb;
- /* Sanity check to ensure the host owns the mailbox */
- if (pmbox->mbxOwner != OWN_HOST) {
- /* Lets try for a while */
- for (i = 0; i < 10240; i++) {
- /* First copy command data */
- lpfc_sli_pcimem_bcopy(mbox, pmbox,
- sizeof (uint32_t));
- if (pmbox->mbxOwner == OWN_HOST)
- goto mbout;
+ if (pmbox->mbxCommand != MBX_HEARTBEAT) {
+ if (pmb->vport) {
+ lpfc_debugfs_disc_trc(pmb->vport,
+ LPFC_DISC_TRC_MBOX_VPORT,
+ "MBOX cmpl vport: cmd:x%x mb:x%x x%x",
+ (uint32_t)pmbox->mbxCommand,
+ pmbox->un.varWords[0],
+ pmbox->un.varWords[1]);
+ }
+ else {
+ lpfc_debugfs_disc_trc(phba->pport,
+ LPFC_DISC_TRC_MBOX,
+ "MBOX cmpl: cmd:x%x mb:x%x x%x",
+ (uint32_t)pmbox->mbxCommand,
+ pmbox->un.varWords[0],
+ pmbox->un.varWords[1]);
}
- /* Stray Mailbox Interrupt, mbxCommand <cmd> mbxStatus
- <status> */
- lpfc_printf_log(phba,
- KERN_WARNING,
- LOG_MBOX | LOG_SLI,
- "%d:0304 Stray Mailbox Interrupt "
- "mbxCommand x%x mbxStatus x%x\n",
- phba->brd_no,
- pmbox->mbxCommand,
- pmbox->mbxStatus);
-
- spin_lock_irq(phba->host->host_lock);
- phba->sli.sli_flag |= LPFC_SLI_MBOX_ACTIVE;
- spin_unlock_irq(phba->host->host_lock);
- return (1);
}
- mbout:
- del_timer_sync(&phba->sli.mbox_tmo);
- phba->work_hba_events &= ~WORKER_MBOX_TMO;
-
/*
* It is a fatal error if unknown mbox command completion.
*/
@@ -633,51 +839,50 @@ lpfc_sli_handle_mb_event(struct lpfc_hba * phba)
MBX_SHUTDOWN) {
/* Unknow mailbox command compl */
- lpfc_printf_log(phba,
- KERN_ERR,
- LOG_MBOX | LOG_SLI,
- "%d:0323 Unknown Mailbox command %x Cmpl\n",
- phba->brd_no,
- pmbox->mbxCommand);
- phba->hba_state = LPFC_HBA_ERROR;
+ lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
+ "%d (%d):0323 Unknown Mailbox command "
+ "%x Cmpl\n",
+ phba->brd_no,
+ pmb->vport ? pmb->vport->vpi : 0,
+ pmbox->mbxCommand);
+ phba->link_state = LPFC_HBA_ERROR;
phba->work_hs = HS_FFER3;
lpfc_handle_eratt(phba);
- return (0);
+ continue;
}
- phba->sli.mbox_active = NULL;
if (pmbox->mbxStatus) {
phba->sli.slistat.mbox_stat_err++;
if (pmbox->mbxStatus == MBXERR_NO_RESOURCES) {
/* Mbox cmd cmpl error - RETRYing */
- lpfc_printf_log(phba,
- KERN_INFO,
- LOG_MBOX | LOG_SLI,
- "%d:0305 Mbox cmd cmpl error - "
- "RETRYing Data: x%x x%x x%x x%x\n",
- phba->brd_no,
- pmbox->mbxCommand,
- pmbox->mbxStatus,
- pmbox->un.varWords[0],
- phba->hba_state);
+ lpfc_printf_log(phba, KERN_INFO,
+ LOG_MBOX | LOG_SLI,
+ "%d (%d):0305 Mbox cmd cmpl "
+ "error - RETRYing Data: x%x "
+ "x%x x%x x%x\n",
+ phba->brd_no,
+ pmb->vport ? pmb->vport->vpi :0,
+ pmbox->mbxCommand,
+ pmbox->mbxStatus,
+ pmbox->un.varWords[0],
+ pmb->vport->port_state);
pmbox->mbxStatus = 0;
pmbox->mbxOwner = OWN_HOST;
- spin_lock_irq(phba->host->host_lock);
+ spin_lock_irq(&phba->hbalock);
phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
- spin_unlock_irq(phba->host->host_lock);
+ spin_unlock_irq(&phba->hbalock);
rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
if (rc == MBX_SUCCESS)
- return (0);
+ continue;
}
}
/* Mailbox cmd <cmd> Cmpl <cmpl> */
- lpfc_printf_log(phba,
- KERN_INFO,
- LOG_MBOX | LOG_SLI,
- "%d:0307 Mailbox cmd x%x Cmpl x%p "
+ lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
+ "%d (%d):0307 Mailbox cmd x%x Cmpl x%p "
"Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x\n",
phba->brd_no,
+ pmb->vport ? pmb->vport->vpi : 0,
pmbox->mbxCommand,
pmb->mbox_cmpl,
*((uint32_t *) pmbox),
@@ -690,39 +895,35 @@ lpfc_sli_handle_mb_event(struct lpfc_hba * phba)
pmbox->un.varWords[6],
pmbox->un.varWords[7]);
- if (pmb->mbox_cmpl) {
- lpfc_sli_pcimem_bcopy(mbox, pmbox, MAILBOX_CMD_SIZE);
+ if (pmb->mbox_cmpl)
pmb->mbox_cmpl(phba,pmb);
- }
- }
-
-
- do {
- process_next = 0; /* by default don't loop */
- spin_lock_irq(phba->host->host_lock);
- phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
-
- /* Process next mailbox command if there is one */
- if ((pmb = lpfc_mbox_get(phba))) {
- spin_unlock_irq(phba->host->host_lock);
- rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
- if (rc == MBX_NOT_FINISHED) {
- pmb->mb.mbxStatus = MBX_NOT_FINISHED;
- pmb->mbox_cmpl(phba,pmb);
- process_next = 1;
- continue; /* loop back */
- }
- } else {
- spin_unlock_irq(phba->host->host_lock);
- /* Turn on IOCB processing */
- for (i = 0; i < phba->sli.num_rings; i++)
- lpfc_sli_turn_on_ring(phba, i);
- }
-
- } while (process_next);
+ } while (1);
+ return 0;
+}
- return (0);
+static struct lpfc_dmabuf *
+lpfc_sli_replace_hbqbuff(struct lpfc_hba *phba, uint32_t tag)
+{
+ struct hbq_dmabuf *hbq_entry, *new_hbq_entry;
+
+ hbq_entry = lpfc_sli_hbqbuf_find(phba, tag);
+ if (hbq_entry == NULL)
+ return NULL;
+ list_del(&hbq_entry->dbuf.list);
+ new_hbq_entry = kmalloc(sizeof(struct hbq_dmabuf), GFP_ATOMIC);
+ if (new_hbq_entry == NULL)
+ return &hbq_entry->dbuf;
+ new_hbq_entry->dbuf = hbq_entry->dbuf;
+ new_hbq_entry->tag = -1;
+ hbq_entry->dbuf.virt = lpfc_hbq_alloc(phba, 0, &hbq_entry->dbuf.phys);
+ if (hbq_entry->dbuf.virt == NULL) {
+ kfree(new_hbq_entry);
+ return &hbq_entry->dbuf;
+ }
+ lpfc_sli_free_hbq(phba, hbq_entry);
+ return &new_hbq_entry->dbuf;
}
+
static int
lpfc_sli_process_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
struct lpfc_iocbq *saveq)
@@ -735,7 +936,9 @@ lpfc_sli_process_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
match = 0;
irsp = &(saveq->iocb);
if ((irsp->ulpCommand == CMD_RCV_ELS_REQ64_CX)
- || (irsp->ulpCommand == CMD_RCV_ELS_REQ_CX)) {
+ || (irsp->ulpCommand == CMD_RCV_ELS_REQ_CX)
+ || (irsp->ulpCommand == CMD_IOCB_RCV_ELS64_CX)
+ || (irsp->ulpCommand == CMD_IOCB_RCV_CONT64_CX)) {
Rctl = FC_ELS_REQ;
Type = FC_ELS_DATA;
} else {
@@ -747,13 +950,24 @@ lpfc_sli_process_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
/* Firmware Workaround */
if ((Rctl == 0) && (pring->ringno == LPFC_ELS_RING) &&
- (irsp->ulpCommand == CMD_RCV_SEQUENCE64_CX)) {
+ (irsp->ulpCommand == CMD_RCV_SEQUENCE64_CX ||
+ irsp->ulpCommand == CMD_IOCB_RCV_SEQ64_CX)) {
Rctl = FC_ELS_REQ;
Type = FC_ELS_DATA;
w5p->hcsw.Rctl = Rctl;
w5p->hcsw.Type = Type;
}
}
+
+ if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
+ if (irsp->ulpBdeCount != 0)
+ saveq->context2 = lpfc_sli_replace_hbqbuff(phba,
+ irsp->un.ulpWord[3]);
+ if (irsp->ulpBdeCount == 2)
+ saveq->context3 = lpfc_sli_replace_hbqbuff(phba,
+ irsp->un.ulpWord[15]);
+ }
+
/* unSolicited Responses */
if (pring->prt[0].profile) {
if (pring->prt[0].lpfc_sli_rcv_unsol_event)
@@ -781,23 +995,21 @@ lpfc_sli_process_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
/* Unexpected Rctl / Type received */
/* Ring <ringno> handler: unexpected
Rctl <Rctl> Type <Type> received */
- lpfc_printf_log(phba,
- KERN_WARNING,
- LOG_SLI,
+ lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
"%d:0313 Ring %d handler: unexpected Rctl x%x "
- "Type x%x received \n",
+ "Type x%x received\n",
phba->brd_no,
pring->ringno,
Rctl,
Type);
}
- return(1);
+ return 1;
}
static struct lpfc_iocbq *
-lpfc_sli_iocbq_lookup(struct lpfc_hba * phba,
- struct lpfc_sli_ring * pring,
- struct lpfc_iocbq * prspiocb)
+lpfc_sli_iocbq_lookup(struct lpfc_hba *phba,
+ struct lpfc_sli_ring *pring,
+ struct lpfc_iocbq *prspiocb)
{
struct lpfc_iocbq *cmd_iocb = NULL;
uint16_t iotag;
@@ -806,7 +1018,7 @@ lpfc_sli_iocbq_lookup(struct lpfc_hba * phba,
if (iotag != 0 && iotag <= phba->sli.last_iotag) {
cmd_iocb = phba->sli.iocbq_lookup[iotag];
- list_del(&cmd_iocb->list);
+ list_del_init(&cmd_iocb->list);
pring->txcmplq_cnt--;
return cmd_iocb;
}
@@ -821,16 +1033,18 @@ lpfc_sli_iocbq_lookup(struct lpfc_hba * phba,
}
static int
-lpfc_sli_process_sol_iocb(struct lpfc_hba * phba, struct lpfc_sli_ring * pring,
+lpfc_sli_process_sol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
struct lpfc_iocbq *saveq)
{
- struct lpfc_iocbq * cmdiocbp;
+ struct lpfc_iocbq *cmdiocbp;
int rc = 1;
unsigned long iflag;
/* Based on the iotag field, get the cmd IOCB from the txcmplq */
- spin_lock_irqsave(phba->host->host_lock, iflag);
+ spin_lock_irqsave(&phba->hbalock, iflag);
cmdiocbp = lpfc_sli_iocbq_lookup(phba, pring, saveq);
+ spin_unlock_irqrestore(&phba->hbalock, iflag);
+
if (cmdiocbp) {
if (cmdiocbp->iocb_cmpl) {
/*
@@ -846,17 +1060,8 @@ lpfc_sli_process_sol_iocb(struct lpfc_hba * phba, struct lpfc_sli_ring * pring,
saveq->iocb.un.ulpWord[4] =
IOERR_SLI_ABORTED;
}
- spin_unlock_irqrestore(phba->host->host_lock,
- iflag);
- (cmdiocbp->iocb_cmpl) (phba, cmdiocbp, saveq);
- spin_lock_irqsave(phba->host->host_lock, iflag);
- }
- else {
- spin_unlock_irqrestore(phba->host->host_lock,
- iflag);
- (cmdiocbp->iocb_cmpl) (phba, cmdiocbp, saveq);
- spin_lock_irqsave(phba->host->host_lock, iflag);
}
+ (cmdiocbp->iocb_cmpl) (phba, cmdiocbp, saveq);
} else
lpfc_sli_release_iocbq(phba, cmdiocbp);
} else {
@@ -870,29 +1075,30 @@ lpfc_sli_process_sol_iocb(struct lpfc_hba * phba, struct lpfc_sli_ring * pring,
* Ring <ringno> handler: unexpected completion IoTag
* <IoTag>
*/
- lpfc_printf_log(phba,
- KERN_WARNING,
- LOG_SLI,
- "%d:0322 Ring %d handler: unexpected "
- "completion IoTag x%x Data: x%x x%x x%x x%x\n",
- phba->brd_no,
- pring->ringno,
- saveq->iocb.ulpIoTag,
- saveq->iocb.ulpStatus,
- saveq->iocb.un.ulpWord[4],
- saveq->iocb.ulpCommand,
- saveq->iocb.ulpContext);
+ lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
+ "%d (%d):0322 Ring %d handler: "
+ "unexpected completion IoTag x%x "
+ "Data: x%x x%x x%x x%x\n",
+ phba->brd_no,
+ cmdiocbp->vport->vpi,
+ pring->ringno,
+ saveq->iocb.ulpIoTag,
+ saveq->iocb.ulpStatus,
+ saveq->iocb.un.ulpWord[4],
+ saveq->iocb.ulpCommand,
+ saveq->iocb.ulpContext);
}
}
- spin_unlock_irqrestore(phba->host->host_lock, iflag);
return rc;
}
-static void lpfc_sli_rsp_pointers_error(struct lpfc_hba * phba,
- struct lpfc_sli_ring * pring)
+static void
+lpfc_sli_rsp_pointers_error(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
{
- struct lpfc_pgp *pgp = &phba->slim2p->mbx.us.s2.port[pring->ringno];
+ struct lpfc_pgp *pgp = (phba->sli_rev == 3) ?
+ &phba->slim2p->mbx.us.s3_pgp.port[pring->ringno] :
+ &phba->slim2p->mbx.us.s2.port[pring->ringno];
/*
* Ring <ringno> handler: portRspPut <portRspPut> is bigger then
* rsp ring <portRspMax>
@@ -904,7 +1110,7 @@ static void lpfc_sli_rsp_pointers_error(struct lpfc_hba * phba,
le32_to_cpu(pgp->rspPutInx),
pring->numRiocb);
- phba->hba_state = LPFC_HBA_ERROR;
+ phba->link_state = LPFC_HBA_ERROR;
/*
* All error attention handlers are posted to
@@ -912,16 +1118,18 @@ static void lpfc_sli_rsp_pointers_error(struct lpfc_hba * phba,
*/
phba->work_ha |= HA_ERATT;
phba->work_hs = HS_FFER3;
+
+ /* hbalock should already be held */
if (phba->work_wait)
- wake_up(phba->work_wait);
+ lpfc_worker_wake_up(phba);
return;
}
-void lpfc_sli_poll_fcp_ring(struct lpfc_hba * phba)
+void lpfc_sli_poll_fcp_ring(struct lpfc_hba *phba)
{
- struct lpfc_sli * psli = &phba->sli;
- struct lpfc_sli_ring * pring = &psli->ring[LPFC_FCP_RING];
+ struct lpfc_sli *psli = &phba->sli;
+ struct lpfc_sli_ring *pring = &psli->ring[LPFC_FCP_RING];
IOCB_t *irsp = NULL;
IOCB_t *entry = NULL;
struct lpfc_iocbq *cmdiocbq = NULL;
@@ -931,13 +1139,15 @@ void lpfc_sli_poll_fcp_ring(struct lpfc_hba * phba)
uint32_t portRspPut, portRspMax;
int type;
uint32_t rsp_cmpl = 0;
- void __iomem *to_slim;
uint32_t ha_copy;
+ unsigned long iflags;
pring->stats.iocb_event++;
- /* The driver assumes SLI-2 mode */
- pgp = &phba->slim2p->mbx.us.s2.port[pring->ringno];
+ pgp = (phba->sli_rev == 3) ?
+ &phba->slim2p->mbx.us.s3_pgp.port[pring->ringno] :
+ &phba->slim2p->mbx.us.s2.port[pring->ringno];
+
/*
* The next available response entry should never exceed the maximum
@@ -952,15 +1162,13 @@ void lpfc_sli_poll_fcp_ring(struct lpfc_hba * phba)
rmb();
while (pring->rspidx != portRspPut) {
-
- entry = IOCB_ENTRY(pring->rspringaddr, pring->rspidx);
-
+ entry = lpfc_resp_iocb(phba, pring);
if (++pring->rspidx >= portRspMax)
pring->rspidx = 0;
lpfc_sli_pcimem_bcopy((uint32_t *) entry,
(uint32_t *) &rspiocbq.iocb,
- sizeof (IOCB_t));
+ phba->iocb_rsp_size);
irsp = &rspiocbq.iocb;
type = lpfc_sli_iocb_cmd_type(irsp->ulpCommand & CMD_IOCB_MASK);
pring->stats.iocb_rsp++;
@@ -998,8 +1206,10 @@ void lpfc_sli_poll_fcp_ring(struct lpfc_hba * phba)
break;
}
+ spin_lock_irqsave(&phba->hbalock, iflags);
cmdiocbq = lpfc_sli_iocbq_lookup(phba, pring,
&rspiocbq);
+ spin_unlock_irqrestore(&phba->hbalock, iflags);
if ((cmdiocbq) && (cmdiocbq->iocb_cmpl)) {
(cmdiocbq->iocb_cmpl)(phba, cmdiocbq,
&rspiocbq);
@@ -1033,9 +1243,7 @@ void lpfc_sli_poll_fcp_ring(struct lpfc_hba * phba)
* been updated, sync the pgp->rspPutInx and fetch the new port
* response put pointer.
*/
- to_slim = phba->MBslimaddr +
- (SLIMOFF + (pring->ringno * 2) + 1) * 4;
- writeb(pring->rspidx, to_slim);
+ writel(pring->rspidx, &phba->host_gp[pring->ringno].rspGetInx);
if (pring->rspidx == portRspPut)
portRspPut = le32_to_cpu(pgp->rspPutInx);
@@ -1045,13 +1253,16 @@ void lpfc_sli_poll_fcp_ring(struct lpfc_hba * phba)
ha_copy >>= (LPFC_FCP_RING * 4);
if ((rsp_cmpl > 0) && (ha_copy & HA_R0RE_REQ)) {
+ spin_lock_irqsave(&phba->hbalock, iflags);
pring->stats.iocb_rsp_full++;
status = ((CA_R0ATT | CA_R0RE_RSP) << (LPFC_FCP_RING * 4));
writel(status, phba->CAregaddr);
readl(phba->CAregaddr);
+ spin_unlock_irqrestore(&phba->hbalock, iflags);
}
if ((ha_copy & HA_R0CE_RSP) &&
(pring->flag & LPFC_CALL_RING_AVAILABLE)) {
+ spin_lock_irqsave(&phba->hbalock, iflags);
pring->flag &= ~LPFC_CALL_RING_AVAILABLE;
pring->stats.iocb_cmd_empty++;
@@ -1062,6 +1273,7 @@ void lpfc_sli_poll_fcp_ring(struct lpfc_hba * phba)
if ((pring->lpfc_sli_cmd_available))
(pring->lpfc_sli_cmd_available) (phba, pring);
+ spin_unlock_irqrestore(&phba->hbalock, iflags);
}
return;
@@ -1072,10 +1284,12 @@ void lpfc_sli_poll_fcp_ring(struct lpfc_hba * phba)
* to check it explicitly.
*/
static int
-lpfc_sli_handle_fast_ring_event(struct lpfc_hba * phba,
- struct lpfc_sli_ring * pring, uint32_t mask)
+lpfc_sli_handle_fast_ring_event(struct lpfc_hba *phba,
+ struct lpfc_sli_ring *pring, uint32_t mask)
{
- struct lpfc_pgp *pgp = &phba->slim2p->mbx.us.s2.port[pring->ringno];
+ struct lpfc_pgp *pgp = (phba->sli_rev == 3) ?
+ &phba->slim2p->mbx.us.s3_pgp.port[pring->ringno] :
+ &phba->slim2p->mbx.us.s2.port[pring->ringno];
IOCB_t *irsp = NULL;
IOCB_t *entry = NULL;
struct lpfc_iocbq *cmdiocbq = NULL;
@@ -1086,9 +1300,8 @@ lpfc_sli_handle_fast_ring_event(struct lpfc_hba * phba,
lpfc_iocb_type type;
unsigned long iflag;
uint32_t rsp_cmpl = 0;
- void __iomem *to_slim;
- spin_lock_irqsave(phba->host->host_lock, iflag);
+ spin_lock_irqsave(&phba->hbalock, iflag);
pring->stats.iocb_event++;
/*
@@ -1099,7 +1312,7 @@ lpfc_sli_handle_fast_ring_event(struct lpfc_hba * phba,
portRspPut = le32_to_cpu(pgp->rspPutInx);
if (unlikely(portRspPut >= portRspMax)) {
lpfc_sli_rsp_pointers_error(phba, pring);
- spin_unlock_irqrestore(phba->host->host_lock, iflag);
+ spin_unlock_irqrestore(&phba->hbalock, iflag);
return 1;
}
@@ -1110,14 +1323,15 @@ lpfc_sli_handle_fast_ring_event(struct lpfc_hba * phba,
* structure. The copy involves a byte-swap since the
* network byte order and pci byte orders are different.
*/
- entry = IOCB_ENTRY(pring->rspringaddr, pring->rspidx);
+ entry = lpfc_resp_iocb(phba, pring);
+ phba->last_completion_time = jiffies;
if (++pring->rspidx >= portRspMax)
pring->rspidx = 0;
lpfc_sli_pcimem_bcopy((uint32_t *) entry,
(uint32_t *) &rspiocbq.iocb,
- sizeof (IOCB_t));
+ phba->iocb_rsp_size);
INIT_LIST_HEAD(&(rspiocbq.list));
irsp = &rspiocbq.iocb;
@@ -1126,16 +1340,30 @@ lpfc_sli_handle_fast_ring_event(struct lpfc_hba * phba,
rsp_cmpl++;
if (unlikely(irsp->ulpStatus)) {
+ /*
+ * If resource errors reported from HBA, reduce
+ * queuedepths of the SCSI device.
+ */
+ if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) &&
+ (irsp->un.ulpWord[4] == IOERR_NO_RESOURCES)) {
+ spin_unlock_irqrestore(&phba->hbalock, iflag);
+ lpfc_adjust_queue_depth(phba);
+ spin_lock_irqsave(&phba->hbalock, iflag);
+ }
+
/* Rsp ring <ringno> error: IOCB */
lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
- "%d:0336 Rsp Ring %d error: IOCB Data: "
- "x%x x%x x%x x%x x%x x%x x%x x%x\n",
- phba->brd_no, pring->ringno,
- irsp->un.ulpWord[0], irsp->un.ulpWord[1],
- irsp->un.ulpWord[2], irsp->un.ulpWord[3],
- irsp->un.ulpWord[4], irsp->un.ulpWord[5],
- *(((uint32_t *) irsp) + 6),
- *(((uint32_t *) irsp) + 7));
+ "%d:0336 Rsp Ring %d error: IOCB Data: "
+ "x%x x%x x%x x%x x%x x%x x%x x%x\n",
+ phba->brd_no, pring->ringno,
+ irsp->un.ulpWord[0],
+ irsp->un.ulpWord[1],
+ irsp->un.ulpWord[2],
+ irsp->un.ulpWord[3],
+ irsp->un.ulpWord[4],
+ irsp->un.ulpWord[5],
+ *(((uint32_t *) irsp) + 6),
+ *(((uint32_t *) irsp) + 7));
}
switch (type) {
@@ -1149,7 +1377,8 @@ lpfc_sli_handle_fast_ring_event(struct lpfc_hba * phba,
lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
"%d:0333 IOCB cmd 0x%x"
" processed. Skipping"
- " completion\n", phba->brd_no,
+ " completion\n",
+ phba->brd_no,
irsp->ulpCommand);
break;
}
@@ -1161,19 +1390,19 @@ lpfc_sli_handle_fast_ring_event(struct lpfc_hba * phba,
(cmdiocbq->iocb_cmpl)(phba, cmdiocbq,
&rspiocbq);
} else {
- spin_unlock_irqrestore(
- phba->host->host_lock, iflag);
+ spin_unlock_irqrestore(&phba->hbalock,
+ iflag);
(cmdiocbq->iocb_cmpl)(phba, cmdiocbq,
&rspiocbq);
- spin_lock_irqsave(phba->host->host_lock,
+ spin_lock_irqsave(&phba->hbalock,
iflag);
}
}
break;
case LPFC_UNSOL_IOCB:
- spin_unlock_irqrestore(phba->host->host_lock, iflag);
+ spin_unlock_irqrestore(&phba->hbalock, iflag);
lpfc_sli_process_unsol_iocb(phba, pring, &rspiocbq);
- spin_lock_irqsave(phba->host->host_lock, iflag);
+ spin_lock_irqsave(&phba->hbalock, iflag);
break;
default:
if (irsp->ulpCommand == CMD_ADAPTER_MSG) {
@@ -1186,11 +1415,13 @@ lpfc_sli_handle_fast_ring_event(struct lpfc_hba * phba,
} else {
/* Unknown IOCB command */
lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
- "%d:0334 Unknown IOCB command "
- "Data: x%x, x%x x%x x%x x%x\n",
- phba->brd_no, type, irsp->ulpCommand,
- irsp->ulpStatus, irsp->ulpIoTag,
- irsp->ulpContext);
+ "%d:0334 Unknown IOCB command "
+ "Data: x%x, x%x x%x x%x x%x\n",
+ phba->brd_no, type,
+ irsp->ulpCommand,
+ irsp->ulpStatus,
+ irsp->ulpIoTag,
+ irsp->ulpContext);
}
break;
}
@@ -1201,9 +1432,7 @@ lpfc_sli_handle_fast_ring_event(struct lpfc_hba * phba,
* been updated, sync the pgp->rspPutInx and fetch the new port
* response put pointer.
*/
- to_slim = phba->MBslimaddr +
- (SLIMOFF + (pring->ringno * 2) + 1) * 4;
- writel(pring->rspidx, to_slim);
+ writel(pring->rspidx, &phba->host_gp[pring->ringno].rspGetInx);
if (pring->rspidx == portRspPut)
portRspPut = le32_to_cpu(pgp->rspPutInx);
@@ -1228,31 +1457,31 @@ lpfc_sli_handle_fast_ring_event(struct lpfc_hba * phba,
}
- spin_unlock_irqrestore(phba->host->host_lock, iflag);
+ spin_unlock_irqrestore(&phba->hbalock, iflag);
return rc;
}
-
int
-lpfc_sli_handle_slow_ring_event(struct lpfc_hba * phba,
- struct lpfc_sli_ring * pring, uint32_t mask)
+lpfc_sli_handle_slow_ring_event(struct lpfc_hba *phba,
+ struct lpfc_sli_ring *pring, uint32_t mask)
{
+ struct lpfc_pgp *pgp = (phba->sli_rev == 3) ?
+ &phba->slim2p->mbx.us.s3_pgp.port[pring->ringno] :
+ &phba->slim2p->mbx.us.s2.port[pring->ringno];
IOCB_t *entry;
IOCB_t *irsp = NULL;
struct lpfc_iocbq *rspiocbp = NULL;
struct lpfc_iocbq *next_iocb;
struct lpfc_iocbq *cmdiocbp;
struct lpfc_iocbq *saveq;
- struct lpfc_pgp *pgp = &phba->slim2p->mbx.us.s2.port[pring->ringno];
uint8_t iocb_cmd_type;
lpfc_iocb_type type;
uint32_t status, free_saveq;
uint32_t portRspPut, portRspMax;
int rc = 1;
unsigned long iflag;
- void __iomem *to_slim;
- spin_lock_irqsave(phba->host->host_lock, iflag);
+ spin_lock_irqsave(&phba->hbalock, iflag);
pring->stats.iocb_event++;
/*
@@ -1266,16 +1495,14 @@ lpfc_sli_handle_slow_ring_event(struct lpfc_hba * phba,
* Ring <ringno> handler: portRspPut <portRspPut> is bigger then
* rsp ring <portRspMax>
*/
- lpfc_printf_log(phba,
- KERN_ERR,
- LOG_SLI,
+ lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
"%d:0303 Ring %d handler: portRspPut %d "
"is bigger then rsp ring %d\n",
- phba->brd_no,
- pring->ringno, portRspPut, portRspMax);
+ phba->brd_no, pring->ringno, portRspPut,
+ portRspMax);
- phba->hba_state = LPFC_HBA_ERROR;
- spin_unlock_irqrestore(phba->host->host_lock, iflag);
+ phba->link_state = LPFC_HBA_ERROR;
+ spin_unlock_irqrestore(&phba->hbalock, iflag);
phba->work_hs = HS_FFER3;
lpfc_handle_eratt(phba);
@@ -1298,23 +1525,24 @@ lpfc_sli_handle_slow_ring_event(struct lpfc_hba * phba,
* the ulpLe field is set, the entire Command has been
* received.
*/
- entry = IOCB_ENTRY(pring->rspringaddr, pring->rspidx);
- rspiocbp = lpfc_sli_get_iocbq(phba);
+ entry = lpfc_resp_iocb(phba, pring);
+
+ phba->last_completion_time = jiffies;
+ rspiocbp = __lpfc_sli_get_iocbq(phba);
if (rspiocbp == NULL) {
printk(KERN_ERR "%s: out of buffers! Failing "
"completion.\n", __FUNCTION__);
break;
}
- lpfc_sli_pcimem_bcopy(entry, &rspiocbp->iocb, sizeof (IOCB_t));
+ lpfc_sli_pcimem_bcopy(entry, &rspiocbp->iocb,
+ phba->iocb_rsp_size);
irsp = &rspiocbp->iocb;
if (++pring->rspidx >= portRspMax)
pring->rspidx = 0;
- to_slim = phba->MBslimaddr + (SLIMOFF + (pring->ringno * 2)
- + 1) * 4;
- writel(pring->rspidx, to_slim);
+ writel(pring->rspidx, &phba->host_gp[pring->ringno].rspGetInx);
if (list_empty(&(pring->iocb_continueq))) {
list_add(&rspiocbp->list, &(pring->iocb_continueq));
@@ -1338,23 +1566,44 @@ lpfc_sli_handle_slow_ring_event(struct lpfc_hba * phba,
pring->stats.iocb_rsp++;
+ /*
+ * If resource errors reported from HBA, reduce
+ * queuedepths of the SCSI device.
+ */
+ if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) &&
+ (irsp->un.ulpWord[4] == IOERR_NO_RESOURCES)) {
+ spin_unlock_irqrestore(&phba->hbalock, iflag);
+ lpfc_adjust_queue_depth(phba);
+ spin_lock_irqsave(&phba->hbalock, iflag);
+ }
+
if (irsp->ulpStatus) {
/* Rsp ring <ringno> error: IOCB */
- lpfc_printf_log(phba,
- KERN_WARNING,
- LOG_SLI,
- "%d:0328 Rsp Ring %d error: IOCB Data: "
- "x%x x%x x%x x%x x%x x%x x%x x%x\n",
- phba->brd_no,
- pring->ringno,
- irsp->un.ulpWord[0],
- irsp->un.ulpWord[1],
- irsp->un.ulpWord[2],
- irsp->un.ulpWord[3],
- irsp->un.ulpWord[4],
- irsp->un.ulpWord[5],
- *(((uint32_t *) irsp) + 6),
- *(((uint32_t *) irsp) + 7));
+ lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
+ "%d:0328 Rsp Ring %d error: "
+ "IOCB Data: "
+ "x%x x%x x%x x%x "
+ "x%x x%x x%x x%x "
+ "x%x x%x x%x x%x "
+ "x%x x%x x%x x%x\n",
+ phba->brd_no,
+ pring->ringno,
+ irsp->un.ulpWord[0],
+ irsp->un.ulpWord[1],
+ irsp->un.ulpWord[2],
+ irsp->un.ulpWord[3],
+ irsp->un.ulpWord[4],
+ irsp->un.ulpWord[5],
+ *(((uint32_t *) irsp) + 6),
+ *(((uint32_t *) irsp) + 7),
+ *(((uint32_t *) irsp) + 8),
+ *(((uint32_t *) irsp) + 9),
+ *(((uint32_t *) irsp) + 10),
+ *(((uint32_t *) irsp) + 11),
+ *(((uint32_t *) irsp) + 12),
+ *(((uint32_t *) irsp) + 13),
+ *(((uint32_t *) irsp) + 14),
+ *(((uint32_t *) irsp) + 15));
}
/*
@@ -1366,17 +1615,17 @@ lpfc_sli_handle_slow_ring_event(struct lpfc_hba * phba,
iocb_cmd_type = irsp->ulpCommand & CMD_IOCB_MASK;
type = lpfc_sli_iocb_cmd_type(iocb_cmd_type);
if (type == LPFC_SOL_IOCB) {
- spin_unlock_irqrestore(phba->host->host_lock,
+ spin_unlock_irqrestore(&phba->hbalock,
iflag);
rc = lpfc_sli_process_sol_iocb(phba, pring,
- saveq);
- spin_lock_irqsave(phba->host->host_lock, iflag);
+ saveq);
+ spin_lock_irqsave(&phba->hbalock, iflag);
} else if (type == LPFC_UNSOL_IOCB) {
- spin_unlock_irqrestore(phba->host->host_lock,
+ spin_unlock_irqrestore(&phba->hbalock,
iflag);
rc = lpfc_sli_process_unsol_iocb(phba, pring,
- saveq);
- spin_lock_irqsave(phba->host->host_lock, iflag);
+ saveq);
+ spin_lock_irqsave(&phba->hbalock, iflag);
} else if (type == LPFC_ABORT_IOCB) {
if ((irsp->ulpCommand != CMD_XRI_ABORTED_CX) &&
((cmdiocbp =
@@ -1386,15 +1635,15 @@ lpfc_sli_handle_slow_ring_event(struct lpfc_hba * phba,
routine */
if (cmdiocbp->iocb_cmpl) {
spin_unlock_irqrestore(
- phba->host->host_lock,
+ &phba->hbalock,
iflag);
(cmdiocbp->iocb_cmpl) (phba,
cmdiocbp, saveq);
spin_lock_irqsave(
- phba->host->host_lock,
+ &phba->hbalock,
iflag);
} else
- lpfc_sli_release_iocbq(phba,
+ __lpfc_sli_release_iocbq(phba,
cmdiocbp);
}
} else if (type == LPFC_UNKNOWN_IOCB) {
@@ -1411,32 +1660,28 @@ lpfc_sli_handle_slow_ring_event(struct lpfc_hba * phba,
phba->brd_no, adaptermsg);
} else {
/* Unknown IOCB command */
- lpfc_printf_log(phba,
- KERN_ERR,
- LOG_SLI,
- "%d:0335 Unknown IOCB command "
- "Data: x%x x%x x%x x%x\n",
- phba->brd_no,
- irsp->ulpCommand,
- irsp->ulpStatus,
- irsp->ulpIoTag,
- irsp->ulpContext);
+ lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ "%d:0335 Unknown IOCB "
+ "command Data: x%x "
+ "x%x x%x x%x\n",
+ phba->brd_no,
+ irsp->ulpCommand,
+ irsp->ulpStatus,
+ irsp->ulpIoTag,
+ irsp->ulpContext);
}
}
if (free_saveq) {
- if (!list_empty(&saveq->list)) {
- list_for_each_entry_safe(rspiocbp,
- next_iocb,
- &saveq->list,
- list) {
- list_del(&rspiocbp->list);
- lpfc_sli_release_iocbq(phba,
- rspiocbp);
- }
+ list_for_each_entry_safe(rspiocbp, next_iocb,
+ &saveq->list, list) {
+ list_del(&rspiocbp->list);
+ __lpfc_sli_release_iocbq(phba,
+ rspiocbp);
}
- lpfc_sli_release_iocbq(phba, saveq);
+ __lpfc_sli_release_iocbq(phba, saveq);
}
+ rspiocbp = NULL;
}
/*
@@ -1449,7 +1694,7 @@ lpfc_sli_handle_slow_ring_event(struct lpfc_hba * phba,
}
} /* while (pring->rspidx != portRspPut) */
- if ((rspiocbp != 0) && (mask & HA_R0RE_REQ)) {
+ if ((rspiocbp != NULL) && (mask & HA_R0RE_REQ)) {
/* At least one response entry has been freed */
pring->stats.iocb_rsp_full++;
/* SET RxRE_RSP in Chip Att register */
@@ -1470,24 +1715,25 @@ lpfc_sli_handle_slow_ring_event(struct lpfc_hba * phba,
}
- spin_unlock_irqrestore(phba->host->host_lock, iflag);
+ spin_unlock_irqrestore(&phba->hbalock, iflag);
return rc;
}
-int
+void
lpfc_sli_abort_iocb_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
{
LIST_HEAD(completions);
struct lpfc_iocbq *iocb, *next_iocb;
IOCB_t *cmd = NULL;
- int errcnt;
- errcnt = 0;
+ if (pring->ringno == LPFC_ELS_RING) {
+ lpfc_fabric_abort_hba(phba);
+ }
/* Error everything on txq and txcmplq
* First do the txq.
*/
- spin_lock_irq(phba->host->host_lock);
+ spin_lock_irq(&phba->hbalock);
list_splice_init(&pring->txq, &completions);
pring->txq_cnt = 0;
@@ -1495,26 +1741,25 @@ lpfc_sli_abort_iocb_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list)
lpfc_sli_issue_abort_iotag(phba, pring, iocb);
- spin_unlock_irq(phba->host->host_lock);
+ spin_unlock_irq(&phba->hbalock);
while (!list_empty(&completions)) {
iocb = list_get_first(&completions, struct lpfc_iocbq, list);
cmd = &iocb->iocb;
- list_del(&iocb->list);
+ list_del_init(&iocb->list);
- if (iocb->iocb_cmpl) {
+ if (!iocb->iocb_cmpl)
+ lpfc_sli_release_iocbq(phba, iocb);
+ else {
cmd->ulpStatus = IOSTAT_LOCAL_REJECT;
cmd->un.ulpWord[4] = IOERR_SLI_ABORTED;
(iocb->iocb_cmpl) (phba, iocb, iocb);
- } else
- lpfc_sli_release_iocbq(phba, iocb);
+ }
}
-
- return errcnt;
}
int
-lpfc_sli_brdready(struct lpfc_hba * phba, uint32_t mask)
+lpfc_sli_brdready(struct lpfc_hba *phba, uint32_t mask)
{
uint32_t status;
int i = 0;
@@ -1541,7 +1786,8 @@ lpfc_sli_brdready(struct lpfc_hba * phba, uint32_t mask)
msleep(2500);
if (i == 15) {
- phba->hba_state = LPFC_STATE_UNKNOWN; /* Do post */
+ /* Do post */
+ phba->pport->port_state = LPFC_VPORT_UNKNOWN;
lpfc_sli_brdrestart(phba);
}
/* Read the HBA Host Status Register */
@@ -1550,7 +1796,7 @@ lpfc_sli_brdready(struct lpfc_hba * phba, uint32_t mask)
/* Check to see if any errors occurred during init */
if ((status & HS_FFERM) || (i >= 20)) {
- phba->hba_state = LPFC_HBA_ERROR;
+ phba->link_state = LPFC_HBA_ERROR;
retval = 1;
}
@@ -1559,7 +1805,7 @@ lpfc_sli_brdready(struct lpfc_hba * phba, uint32_t mask)
#define BARRIER_TEST_PATTERN (0xdeadbeef)
-void lpfc_reset_barrier(struct lpfc_hba * phba)
+void lpfc_reset_barrier(struct lpfc_hba *phba)
{
uint32_t __iomem *resp_buf;
uint32_t __iomem *mbox_buf;
@@ -1584,12 +1830,12 @@ void lpfc_reset_barrier(struct lpfc_hba * phba)
hc_copy = readl(phba->HCregaddr);
writel((hc_copy & ~HC_ERINT_ENA), phba->HCregaddr);
readl(phba->HCregaddr); /* flush */
- phba->fc_flag |= FC_IGNORE_ERATT;
+ phba->link_flag |= LS_IGNORE_ERATT;
if (readl(phba->HAregaddr) & HA_ERATT) {
/* Clear Chip error bit */
writel(HA_ERATT, phba->HAregaddr);
- phba->stopped = 1;
+ phba->pport->stopped = 1;
}
mbox = 0;
@@ -1606,7 +1852,7 @@ void lpfc_reset_barrier(struct lpfc_hba * phba)
if (readl(resp_buf + 1) != ~(BARRIER_TEST_PATTERN)) {
if (phba->sli.sli_flag & LPFC_SLI2_ACTIVE ||
- phba->stopped)
+ phba->pport->stopped)
goto restore_hc;
else
goto clear_errat;
@@ -1623,17 +1869,17 @@ clear_errat:
if (readl(phba->HAregaddr) & HA_ERATT) {
writel(HA_ERATT, phba->HAregaddr);
- phba->stopped = 1;
+ phba->pport->stopped = 1;
}
restore_hc:
- phba->fc_flag &= ~FC_IGNORE_ERATT;
+ phba->link_flag &= ~LS_IGNORE_ERATT;
writel(hc_copy, phba->HCregaddr);
readl(phba->HCregaddr); /* flush */
}
int
-lpfc_sli_brdkill(struct lpfc_hba * phba)
+lpfc_sli_brdkill(struct lpfc_hba *phba)
{
struct lpfc_sli *psli;
LPFC_MBOXQ_t *pmb;
@@ -1645,26 +1891,22 @@ lpfc_sli_brdkill(struct lpfc_hba * phba)
psli = &phba->sli;
/* Kill HBA */
- lpfc_printf_log(phba,
- KERN_INFO,
- LOG_SLI,
- "%d:0329 Kill HBA Data: x%x x%x\n",
- phba->brd_no,
- phba->hba_state,
- psli->sli_flag);
+ lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
+ "%d:0329 Kill HBA Data: x%x x%x\n",
+ phba->brd_no, phba->pport->port_state, psli->sli_flag);
if ((pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool,
GFP_KERNEL)) == 0)
return 1;
/* Disable the error attention */
- spin_lock_irq(phba->host->host_lock);
+ spin_lock_irq(&phba->hbalock);
status = readl(phba->HCregaddr);
status &= ~HC_ERINT_ENA;
writel(status, phba->HCregaddr);
readl(phba->HCregaddr); /* flush */
- phba->fc_flag |= FC_IGNORE_ERATT;
- spin_unlock_irq(phba->host->host_lock);
+ phba->link_flag |= LS_IGNORE_ERATT;
+ spin_unlock_irq(&phba->hbalock);
lpfc_kill_board(phba, pmb);
pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
@@ -1673,9 +1915,9 @@ lpfc_sli_brdkill(struct lpfc_hba * phba)
if (retval != MBX_SUCCESS) {
if (retval != MBX_BUSY)
mempool_free(pmb, phba->mbox_mem_pool);
- spin_lock_irq(phba->host->host_lock);
- phba->fc_flag &= ~FC_IGNORE_ERATT;
- spin_unlock_irq(phba->host->host_lock);
+ spin_lock_irq(&phba->hbalock);
+ phba->link_flag &= ~LS_IGNORE_ERATT;
+ spin_unlock_irq(&phba->hbalock);
return 1;
}
@@ -1698,22 +1940,22 @@ lpfc_sli_brdkill(struct lpfc_hba * phba)
del_timer_sync(&psli->mbox_tmo);
if (ha_copy & HA_ERATT) {
writel(HA_ERATT, phba->HAregaddr);
- phba->stopped = 1;
+ phba->pport->stopped = 1;
}
- spin_lock_irq(phba->host->host_lock);
+ spin_lock_irq(&phba->hbalock);
psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
- phba->fc_flag &= ~FC_IGNORE_ERATT;
- spin_unlock_irq(phba->host->host_lock);
+ phba->link_flag &= ~LS_IGNORE_ERATT;
+ spin_unlock_irq(&phba->hbalock);
psli->mbox_active = NULL;
lpfc_hba_down_post(phba);
- phba->hba_state = LPFC_HBA_ERROR;
+ phba->link_state = LPFC_HBA_ERROR;
- return (ha_copy & HA_ERATT ? 0 : 1);
+ return ha_copy & HA_ERATT ? 0 : 1;
}
int
-lpfc_sli_brdreset(struct lpfc_hba * phba)
+lpfc_sli_brdreset(struct lpfc_hba *phba)
{
struct lpfc_sli *psli;
struct lpfc_sli_ring *pring;
@@ -1725,12 +1967,12 @@ lpfc_sli_brdreset(struct lpfc_hba * phba)
/* Reset HBA */
lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
"%d:0325 Reset HBA Data: x%x x%x\n", phba->brd_no,
- phba->hba_state, psli->sli_flag);
+ phba->pport->port_state, psli->sli_flag);
/* perform board reset */
phba->fc_eventTag = 0;
- phba->fc_myDID = 0;
- phba->fc_prevDID = 0;
+ phba->pport->fc_myDID = 0;
+ phba->pport->fc_prevDID = 0;
/* Turn off parity checking and serr during the physical reset */
pci_read_config_word(phba->pcidev, PCI_COMMAND, &cfg_value);
@@ -1760,12 +2002,12 @@ lpfc_sli_brdreset(struct lpfc_hba * phba)
pring->missbufcnt = 0;
}
- phba->hba_state = LPFC_WARM_START;
+ phba->link_state = LPFC_WARM_START;
return 0;
}
int
-lpfc_sli_brdrestart(struct lpfc_hba * phba)
+lpfc_sli_brdrestart(struct lpfc_hba *phba)
{
MAILBOX_t *mb;
struct lpfc_sli *psli;
@@ -1773,14 +2015,14 @@ lpfc_sli_brdrestart(struct lpfc_hba * phba)
volatile uint32_t word0;
void __iomem *to_slim;
- spin_lock_irq(phba->host->host_lock);
+ spin_lock_irq(&phba->hbalock);
psli = &phba->sli;
/* Restart HBA */
lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
"%d:0337 Restart HBA Data: x%x x%x\n", phba->brd_no,
- phba->hba_state, psli->sli_flag);
+ phba->pport->port_state, psli->sli_flag);
word0 = 0;
mb = (MAILBOX_t *) &word0;
@@ -1794,7 +2036,7 @@ lpfc_sli_brdrestart(struct lpfc_hba * phba)
readl(to_slim); /* flush */
/* Only skip post after fc_ffinit is completed */
- if (phba->hba_state) {
+ if (phba->pport->port_state) {
skip_post = 1;
word0 = 1; /* This is really setting up word1 */
} else {
@@ -1806,10 +2048,10 @@ lpfc_sli_brdrestart(struct lpfc_hba * phba)
readl(to_slim); /* flush */
lpfc_sli_brdreset(phba);
- phba->stopped = 0;
- phba->hba_state = LPFC_INIT_START;
+ phba->pport->stopped = 0;
+ phba->link_state = LPFC_INIT_START;
- spin_unlock_irq(phba->host->host_lock);
+ spin_unlock_irq(&phba->hbalock);
memset(&psli->lnk_stat_offsets, 0, sizeof(psli->lnk_stat_offsets));
psli->stats_start = get_seconds();
@@ -1843,14 +2085,11 @@ lpfc_sli_chipset_init(struct lpfc_hba *phba)
if (i++ >= 20) {
/* Adapter failed to init, timeout, status reg
<status> */
- lpfc_printf_log(phba,
- KERN_ERR,
- LOG_INIT,
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"%d:0436 Adapter failed to init, "
"timeout, status reg x%x\n",
- phba->brd_no,
- status);
- phba->hba_state = LPFC_HBA_ERROR;
+ phba->brd_no, status);
+ phba->link_state = LPFC_HBA_ERROR;
return -ETIMEDOUT;
}
@@ -1859,14 +2098,12 @@ lpfc_sli_chipset_init(struct lpfc_hba *phba)
/* ERROR: During chipset initialization */
/* Adapter failed to init, chipset, status reg
<status> */
- lpfc_printf_log(phba,
- KERN_ERR,
- LOG_INIT,
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"%d:0437 Adapter failed to init, "
"chipset, status reg x%x\n",
phba->brd_no,
status);
- phba->hba_state = LPFC_HBA_ERROR;
+ phba->link_state = LPFC_HBA_ERROR;
return -EIO;
}
@@ -1879,7 +2116,8 @@ lpfc_sli_chipset_init(struct lpfc_hba *phba)
}
if (i == 15) {
- phba->hba_state = LPFC_STATE_UNKNOWN; /* Do post */
+ /* Do post */
+ phba->pport->port_state = LPFC_VPORT_UNKNOWN;
lpfc_sli_brdrestart(phba);
}
/* Read the HBA Host Status Register */
@@ -1890,14 +2128,12 @@ lpfc_sli_chipset_init(struct lpfc_hba *phba)
if (status & HS_FFERM) {
/* ERROR: During chipset initialization */
/* Adapter failed to init, chipset, status reg <status> */
- lpfc_printf_log(phba,
- KERN_ERR,
- LOG_INIT,
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"%d:0438 Adapter failed to init, chipset, "
"status reg x%x\n",
phba->brd_no,
status);
- phba->hba_state = LPFC_HBA_ERROR;
+ phba->link_state = LPFC_HBA_ERROR;
return -EIO;
}
@@ -1911,80 +2147,253 @@ lpfc_sli_chipset_init(struct lpfc_hba *phba)
return 0;
}
+static int
+lpfc_sli_hbq_count(void)
+{
+ return ARRAY_SIZE(lpfc_hbq_defs);
+}
+
+static int
+lpfc_sli_hbq_entry_count(void)
+{
+ int hbq_count = lpfc_sli_hbq_count();
+ int count = 0;
+ int i;
+
+ for (i = 0; i < hbq_count; ++i)
+ count += lpfc_hbq_defs[i]->entry_count;
+ return count;
+}
+
int
-lpfc_sli_hba_setup(struct lpfc_hba * phba)
+lpfc_sli_hbq_size(void)
+{
+ return lpfc_sli_hbq_entry_count() * sizeof(struct lpfc_hbq_entry);
+}
+
+static int
+lpfc_sli_hbq_setup(struct lpfc_hba *phba)
+{
+ int hbq_count = lpfc_sli_hbq_count();
+ LPFC_MBOXQ_t *pmb;
+ MAILBOX_t *pmbox;
+ uint32_t hbqno;
+ uint32_t hbq_entry_index;
+
+ /* Get a Mailbox buffer to setup mailbox
+ * commands for HBA initialization
+ */
+ pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+
+ if (!pmb)
+ return -ENOMEM;
+
+ pmbox = &pmb->mb;
+
+ /* Initialize the struct lpfc_sli_hbq structure for each hbq */
+ phba->link_state = LPFC_INIT_MBX_CMDS;
+
+ hbq_entry_index = 0;
+ for (hbqno = 0; hbqno < hbq_count; ++hbqno) {
+ phba->hbqs[hbqno].next_hbqPutIdx = 0;
+ phba->hbqs[hbqno].hbqPutIdx = 0;
+ phba->hbqs[hbqno].local_hbqGetIdx = 0;
+ phba->hbqs[hbqno].entry_count =
+ lpfc_hbq_defs[hbqno]->entry_count;
+ lpfc_config_hbq(phba, lpfc_hbq_defs[hbqno], hbq_entry_index,
+ pmb);
+ hbq_entry_index += phba->hbqs[hbqno].entry_count;
+
+ if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) {
+ /* Adapter failed to init, mbxCmd <cmd> CFG_RING,
+ mbxStatus <status>, ring <num> */
+
+ lpfc_printf_log(phba, KERN_ERR,
+ LOG_SLI | LOG_VPORT,
+ "%d:1805 Adapter failed to init. "
+ "Data: x%x x%x x%x\n",
+ phba->brd_no, pmbox->mbxCommand,
+ pmbox->mbxStatus, hbqno);
+
+ phba->link_state = LPFC_HBA_ERROR;
+ mempool_free(pmb, phba->mbox_mem_pool);
+ return ENXIO;
+ }
+ }
+ phba->hbq_count = hbq_count;
+
+ mempool_free(pmb, phba->mbox_mem_pool);
+
+ /* Initially populate or replenish the HBQs */
+ for (hbqno = 0; hbqno < hbq_count; ++hbqno) {
+ if (lpfc_sli_hbqbuf_init_hbqs(phba, hbqno))
+ return -ENOMEM;
+ }
+ return 0;
+}
+
+static int
+lpfc_do_config_port(struct lpfc_hba *phba, int sli_mode)
{
LPFC_MBOXQ_t *pmb;
uint32_t resetcount = 0, rc = 0, done = 0;
pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
if (!pmb) {
- phba->hba_state = LPFC_HBA_ERROR;
+ phba->link_state = LPFC_HBA_ERROR;
return -ENOMEM;
}
+ phba->sli_rev = sli_mode;
while (resetcount < 2 && !done) {
- spin_lock_irq(phba->host->host_lock);
+ spin_lock_irq(&phba->hbalock);
phba->sli.sli_flag |= LPFC_SLI_MBOX_ACTIVE;
- spin_unlock_irq(phba->host->host_lock);
- phba->hba_state = LPFC_STATE_UNKNOWN;
+ spin_unlock_irq(&phba->hbalock);
+ phba->pport->port_state = LPFC_VPORT_UNKNOWN;
lpfc_sli_brdrestart(phba);
msleep(2500);
rc = lpfc_sli_chipset_init(phba);
if (rc)
break;
- spin_lock_irq(phba->host->host_lock);
+ spin_lock_irq(&phba->hbalock);
phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
- spin_unlock_irq(phba->host->host_lock);
+ spin_unlock_irq(&phba->hbalock);
resetcount++;
- /* Call pre CONFIG_PORT mailbox command initialization. A value of 0
- * means the call was successful. Any other nonzero value is a failure,
- * but if ERESTART is returned, the driver may reset the HBA and try
- * again.
- */
+ /* Call pre CONFIG_PORT mailbox command initialization. A
+ * value of 0 means the call was successful. Any other
+ * nonzero value is a failure, but if ERESTART is returned,
+ * the driver may reset the HBA and try again.
+ */
rc = lpfc_config_port_prep(phba);
if (rc == -ERESTART) {
- phba->hba_state = 0;
+ phba->link_state = LPFC_LINK_UNKNOWN;
continue;
} else if (rc) {
break;
}
- phba->hba_state = LPFC_INIT_MBX_CMDS;
+ phba->link_state = LPFC_INIT_MBX_CMDS;
lpfc_config_port(phba, pmb);
rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
- if (rc == MBX_SUCCESS)
- done = 1;
- else {
+ if (rc != MBX_SUCCESS) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"%d:0442 Adapter failed to init, mbxCmd x%x "
"CONFIG_PORT, mbxStatus x%x Data: x%x\n",
phba->brd_no, pmb->mb.mbxCommand,
pmb->mb.mbxStatus, 0);
+ spin_lock_irq(&phba->hbalock);
phba->sli.sli_flag &= ~LPFC_SLI2_ACTIVE;
+ spin_unlock_irq(&phba->hbalock);
+ rc = -ENXIO;
+ } else {
+ done = 1;
+ phba->max_vpi = (phba->max_vpi &&
+ pmb->mb.un.varCfgPort.gmv) != 0
+ ? pmb->mb.un.varCfgPort.max_vpi
+ : 0;
+ }
+ }
+
+ if (!done) {
+ rc = -EINVAL;
+ goto do_prep_failed;
+ }
+
+ if ((pmb->mb.un.varCfgPort.sli_mode == 3) &&
+ (!pmb->mb.un.varCfgPort.cMA)) {
+ rc = -ENXIO;
+ goto do_prep_failed;
+ }
+ return rc;
+
+do_prep_failed:
+ mempool_free(pmb, phba->mbox_mem_pool);
+ return rc;
+}
+
+int
+lpfc_sli_hba_setup(struct lpfc_hba *phba)
+{
+ uint32_t rc;
+ int mode = 3;
+
+ switch (lpfc_sli_mode) {
+ case 2:
+ if (phba->cfg_npiv_enable) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_VPORT,
+ "%d:1824 NPIV enabled: Override lpfc_sli_mode "
+ "parameter (%d) to auto (0).\n",
+ phba->brd_no, lpfc_sli_mode);
+ break;
}
+ mode = 2;
+ break;
+ case 0:
+ case 3:
+ break;
+ default:
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_VPORT,
+ "%d:1819 Unrecognized lpfc_sli_mode "
+ "parameter: %d.\n",
+ phba->brd_no, lpfc_sli_mode);
+
+ break;
}
- if (!done)
+
+ rc = lpfc_do_config_port(phba, mode);
+ if (rc && lpfc_sli_mode == 3)
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_VPORT,
+ "%d:1820 Unable to select SLI-3. "
+ "Not supported by adapter.\n",
+ phba->brd_no);
+ if (rc && mode != 2)
+ rc = lpfc_do_config_port(phba, 2);
+ if (rc)
goto lpfc_sli_hba_setup_error;
- rc = lpfc_sli_ring_map(phba, pmb);
+ if (phba->sli_rev == 3) {
+ phba->iocb_cmd_size = SLI3_IOCB_CMD_SIZE;
+ phba->iocb_rsp_size = SLI3_IOCB_RSP_SIZE;
+ phba->sli3_options |= LPFC_SLI3_ENABLED;
+ phba->sli3_options |= LPFC_SLI3_HBQ_ENABLED;
+
+ } else {
+ phba->iocb_cmd_size = SLI2_IOCB_CMD_SIZE;
+ phba->iocb_rsp_size = SLI2_IOCB_RSP_SIZE;
+ phba->sli3_options = 0;
+ }
+
+ lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+ "%d:0444 Firmware in SLI %x mode. Max_vpi %d\n",
+ phba->brd_no, phba->sli_rev, phba->max_vpi);
+ rc = lpfc_sli_ring_map(phba);
if (rc)
goto lpfc_sli_hba_setup_error;
+ /* Init HBQs */
+
+ if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
+ rc = lpfc_sli_hbq_setup(phba);
+ if (rc)
+ goto lpfc_sli_hba_setup_error;
+ }
+
phba->sli.sli_flag |= LPFC_PROCESS_LA;
rc = lpfc_config_port_post(phba);
if (rc)
goto lpfc_sli_hba_setup_error;
- goto lpfc_sli_hba_setup_exit;
+ return rc;
+
lpfc_sli_hba_setup_error:
- phba->hba_state = LPFC_HBA_ERROR;
-lpfc_sli_hba_setup_exit:
- mempool_free(pmb, phba->mbox_mem_pool);
+ phba->link_state = LPFC_HBA_ERROR;
+ lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+ "%d:0445 Firmware initialization failed\n",
+ phba->brd_no);
return rc;
}
@@ -2004,56 +2413,58 @@ lpfc_sli_hba_setup_exit:
void
lpfc_mbox_timeout(unsigned long ptr)
{
- struct lpfc_hba *phba;
+ struct lpfc_hba *phba = (struct lpfc_hba *) ptr;
unsigned long iflag;
+ uint32_t tmo_posted;
- phba = (struct lpfc_hba *)ptr;
- spin_lock_irqsave(phba->host->host_lock, iflag);
- if (!(phba->work_hba_events & WORKER_MBOX_TMO)) {
- phba->work_hba_events |= WORKER_MBOX_TMO;
+ spin_lock_irqsave(&phba->pport->work_port_lock, iflag);
+ tmo_posted = phba->pport->work_port_events & WORKER_MBOX_TMO;
+ if (!tmo_posted)
+ phba->pport->work_port_events |= WORKER_MBOX_TMO;
+ spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag);
+
+ if (!tmo_posted) {
+ spin_lock_irqsave(&phba->hbalock, iflag);
if (phba->work_wait)
- wake_up(phba->work_wait);
+ lpfc_worker_wake_up(phba);
+ spin_unlock_irqrestore(&phba->hbalock, iflag);
}
- spin_unlock_irqrestore(phba->host->host_lock, iflag);
}
void
lpfc_mbox_timeout_handler(struct lpfc_hba *phba)
{
- LPFC_MBOXQ_t *pmbox;
- MAILBOX_t *mb;
+ LPFC_MBOXQ_t *pmbox = phba->sli.mbox_active;
+ MAILBOX_t *mb = &pmbox->mb;
struct lpfc_sli *psli = &phba->sli;
struct lpfc_sli_ring *pring;
- spin_lock_irq(phba->host->host_lock);
- if (!(phba->work_hba_events & WORKER_MBOX_TMO)) {
- spin_unlock_irq(phba->host->host_lock);
+ if (!(phba->pport->work_port_events & WORKER_MBOX_TMO)) {
return;
}
- pmbox = phba->sli.mbox_active;
- mb = &pmbox->mb;
-
/* Mbox cmd <mbxCommand> timeout */
- lpfc_printf_log(phba,
- KERN_ERR,
- LOG_MBOX | LOG_SLI,
- "%d:0310 Mailbox command x%x timeout Data: x%x x%x x%p\n",
- phba->brd_no,
- mb->mbxCommand,
- phba->hba_state,
- phba->sli.sli_flag,
- phba->sli.mbox_active);
+ lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
+ "%d:0310 Mailbox command x%x timeout Data: x%x x%x "
+ "x%p\n",
+ phba->brd_no,
+ mb->mbxCommand,
+ phba->pport->port_state,
+ phba->sli.sli_flag,
+ phba->sli.mbox_active);
/* Setting state unknown so lpfc_sli_abort_iocb_ring
* would get IOCB_ERROR from lpfc_sli_issue_iocb, allowing
* it to fail all oustanding SCSI IO.
*/
- phba->hba_state = LPFC_STATE_UNKNOWN;
- phba->work_hba_events &= ~WORKER_MBOX_TMO;
- phba->fc_flag |= FC_ESTABLISH_LINK;
+ spin_lock_irq(&phba->pport->work_port_lock);
+ phba->pport->work_port_events &= ~WORKER_MBOX_TMO;
+ spin_unlock_irq(&phba->pport->work_port_lock);
+ spin_lock_irq(&phba->hbalock);
+ phba->link_state = LPFC_LINK_UNKNOWN;
+ phba->pport->fc_flag |= FC_ESTABLISH_LINK;
psli->sli_flag &= ~LPFC_SLI2_ACTIVE;
- spin_unlock_irq(phba->host->host_lock);
+ spin_unlock_irq(&phba->hbalock);
pring = &psli->ring[psli->fcp_ring];
lpfc_sli_abort_iocb_ring(phba, pring);
@@ -2075,10 +2486,10 @@ lpfc_mbox_timeout_handler(struct lpfc_hba *phba)
}
int
-lpfc_sli_issue_mbox(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmbox, uint32_t flag)
+lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag)
{
MAILBOX_t *mb;
- struct lpfc_sli *psli;
+ struct lpfc_sli *psli = &phba->sli;
uint32_t status, evtctr;
uint32_t ha_copy;
int i;
@@ -2086,31 +2497,44 @@ lpfc_sli_issue_mbox(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmbox, uint32_t flag)
volatile uint32_t word0, ldata;
void __iomem *to_slim;
+ if (pmbox->mbox_cmpl && pmbox->mbox_cmpl != lpfc_sli_def_mbox_cmpl &&
+ pmbox->mbox_cmpl != lpfc_sli_wake_mbox_wait) {
+ if(!pmbox->vport) {
+ lpfc_printf_log(phba, KERN_ERR,
+ LOG_MBOX | LOG_VPORT,
+ "%d:1806 Mbox x%x failed. No vport\n",
+ phba->brd_no,
+ pmbox->mb.mbxCommand);
+ dump_stack();
+ return MBXERR_ERROR;
+ }
+ }
+
+
/* If the PCI channel is in offline state, do not post mbox. */
if (unlikely(pci_channel_offline(phba->pcidev)))
return MBX_NOT_FINISHED;
+ spin_lock_irqsave(&phba->hbalock, drvr_flag);
psli = &phba->sli;
- spin_lock_irqsave(phba->host->host_lock, drvr_flag);
-
mb = &pmbox->mb;
status = MBX_SUCCESS;
- if (phba->hba_state == LPFC_HBA_ERROR) {
- spin_unlock_irqrestore(phba->host->host_lock, drvr_flag);
+ if (phba->link_state == LPFC_HBA_ERROR) {
+ spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
/* Mbox command <mbxCommand> cannot issue */
- LOG_MBOX_CANNOT_ISSUE_DATA( phba, mb, psli, flag)
- return (MBX_NOT_FINISHED);
+ LOG_MBOX_CANNOT_ISSUE_DATA(phba, pmbox, psli, flag)
+ return MBX_NOT_FINISHED;
}
if (mb->mbxCommand != MBX_KILL_BOARD && flag & MBX_NOWAIT &&
!(readl(phba->HCregaddr) & HC_MBINT_ENA)) {
- spin_unlock_irqrestore(phba->host->host_lock, drvr_flag);
- LOG_MBOX_CANNOT_ISSUE_DATA( phba, mb, psli, flag)
- return (MBX_NOT_FINISHED);
+ spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
+ LOG_MBOX_CANNOT_ISSUE_DATA(phba, pmbox, psli, flag)
+ return MBX_NOT_FINISHED;
}
if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) {
@@ -2120,20 +2544,18 @@ lpfc_sli_issue_mbox(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmbox, uint32_t flag)
*/
if (flag & MBX_POLL) {
- spin_unlock_irqrestore(phba->host->host_lock,
- drvr_flag);
+ spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
/* Mbox command <mbxCommand> cannot issue */
- LOG_MBOX_CANNOT_ISSUE_DATA( phba, mb, psli, flag)
- return (MBX_NOT_FINISHED);
+ LOG_MBOX_CANNOT_ISSUE_DATA(phba, pmbox, psli, flag);
+ return MBX_NOT_FINISHED;
}
if (!(psli->sli_flag & LPFC_SLI2_ACTIVE)) {
- spin_unlock_irqrestore(phba->host->host_lock,
- drvr_flag);
+ spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
/* Mbox command <mbxCommand> cannot issue */
- LOG_MBOX_CANNOT_ISSUE_DATA( phba, mb, psli, flag)
- return (MBX_NOT_FINISHED);
+ LOG_MBOX_CANNOT_ISSUE_DATA(phba, pmbox, psli, flag);
+ return MBX_NOT_FINISHED;
}
/* Handle STOP IOCB processing flag. This is only meaningful
@@ -2157,21 +2579,33 @@ lpfc_sli_issue_mbox(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmbox, uint32_t flag)
lpfc_mbox_put(phba, pmbox);
/* Mbox cmd issue - BUSY */
- lpfc_printf_log(phba,
- KERN_INFO,
- LOG_MBOX | LOG_SLI,
- "%d:0308 Mbox cmd issue - BUSY Data: x%x x%x x%x x%x\n",
- phba->brd_no,
- mb->mbxCommand,
- phba->hba_state,
- psli->sli_flag,
- flag);
+ lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
+ "%d (%d):0308 Mbox cmd issue - BUSY Data: "
+ "x%x x%x x%x x%x\n",
+ phba->brd_no,
+ pmbox->vport ? pmbox->vport->vpi : 0xffffff,
+ mb->mbxCommand, phba->pport->port_state,
+ psli->sli_flag, flag);
psli->slistat.mbox_busy++;
- spin_unlock_irqrestore(phba->host->host_lock,
- drvr_flag);
+ spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
+
+ if (pmbox->vport) {
+ lpfc_debugfs_disc_trc(pmbox->vport,
+ LPFC_DISC_TRC_MBOX_VPORT,
+ "MBOX Bsy vport: cmd:x%x mb:x%x x%x",
+ (uint32_t)mb->mbxCommand,
+ mb->un.varWords[0], mb->un.varWords[1]);
+ }
+ else {
+ lpfc_debugfs_disc_trc(phba->pport,
+ LPFC_DISC_TRC_MBOX,
+ "MBOX Bsy: cmd:x%x mb:x%x x%x",
+ (uint32_t)mb->mbxCommand,
+ mb->un.varWords[0], mb->un.varWords[1]);
+ }
- return (MBX_BUSY);
+ return MBX_BUSY;
}
/* Handle STOP IOCB processing flag. This is only meaningful
@@ -2198,11 +2632,10 @@ lpfc_sli_issue_mbox(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmbox, uint32_t flag)
if (!(psli->sli_flag & LPFC_SLI2_ACTIVE) &&
(mb->mbxCommand != MBX_KILL_BOARD)) {
psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
- spin_unlock_irqrestore(phba->host->host_lock,
- drvr_flag);
+ spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
/* Mbox command <mbxCommand> cannot issue */
- LOG_MBOX_CANNOT_ISSUE_DATA( phba, mb, psli, flag);
- return (MBX_NOT_FINISHED);
+ LOG_MBOX_CANNOT_ISSUE_DATA(phba, pmbox, psli, flag);
+ return MBX_NOT_FINISHED;
}
/* timeout active mbox command */
mod_timer(&psli->mbox_tmo, (jiffies +
@@ -2210,15 +2643,29 @@ lpfc_sli_issue_mbox(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmbox, uint32_t flag)
}
/* Mailbox cmd <cmd> issue */
- lpfc_printf_log(phba,
- KERN_INFO,
- LOG_MBOX | LOG_SLI,
- "%d:0309 Mailbox cmd x%x issue Data: x%x x%x x%x\n",
- phba->brd_no,
- mb->mbxCommand,
- phba->hba_state,
- psli->sli_flag,
- flag);
+ lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
+ "%d (%d):0309 Mailbox cmd x%x issue Data: x%x x%x "
+ "x%x\n",
+ phba->brd_no, pmbox->vport ? pmbox->vport->vpi : 0,
+ mb->mbxCommand, phba->pport->port_state,
+ psli->sli_flag, flag);
+
+ if (mb->mbxCommand != MBX_HEARTBEAT) {
+ if (pmbox->vport) {
+ lpfc_debugfs_disc_trc(pmbox->vport,
+ LPFC_DISC_TRC_MBOX_VPORT,
+ "MBOX Send vport: cmd:x%x mb:x%x x%x",
+ (uint32_t)mb->mbxCommand,
+ mb->un.varWords[0], mb->un.varWords[1]);
+ }
+ else {
+ lpfc_debugfs_disc_trc(phba->pport,
+ LPFC_DISC_TRC_MBOX,
+ "MBOX Send: cmd:x%x mb:x%x x%x",
+ (uint32_t)mb->mbxCommand,
+ mb->un.varWords[0], mb->un.varWords[1]);
+ }
+ }
psli->slistat.mbox_cmd++;
evtctr = psli->slistat.mbox_event;
@@ -2233,7 +2680,7 @@ lpfc_sli_issue_mbox(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmbox, uint32_t flag)
if (mb->mbxCommand == MBX_CONFIG_PORT) {
/* copy command data into host mbox for cmpl */
lpfc_sli_pcimem_bcopy(mb, &phba->slim2p->mbx,
- MAILBOX_CMD_SIZE);
+ MAILBOX_CMD_SIZE);
}
/* First copy mbox command data to HBA SLIM, skip past first
@@ -2285,12 +2732,12 @@ lpfc_sli_issue_mbox(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmbox, uint32_t flag)
/* Wait for command to complete */
while (((word0 & OWN_CHIP) == OWN_CHIP) ||
(!(ha_copy & HA_MBATT) &&
- (phba->hba_state > LPFC_WARM_START))) {
+ (phba->link_state > LPFC_WARM_START))) {
if (i-- <= 0) {
psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
- spin_unlock_irqrestore(phba->host->host_lock,
+ spin_unlock_irqrestore(&phba->hbalock,
drvr_flag);
- return (MBX_NOT_FINISHED);
+ return MBX_NOT_FINISHED;
}
/* Check if we took a mbox interrupt while we were
@@ -2299,12 +2746,12 @@ lpfc_sli_issue_mbox(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmbox, uint32_t flag)
&& (evtctr != psli->slistat.mbox_event))
break;
- spin_unlock_irqrestore(phba->host->host_lock,
+ spin_unlock_irqrestore(&phba->hbalock,
drvr_flag);
msleep(1);
- spin_lock_irqsave(phba->host->host_lock, drvr_flag);
+ spin_lock_irqsave(&phba->hbalock, drvr_flag);
if (psli->sli_flag & LPFC_SLI2_ACTIVE) {
/* First copy command data */
@@ -2335,7 +2782,7 @@ lpfc_sli_issue_mbox(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmbox, uint32_t flag)
if (psli->sli_flag & LPFC_SLI2_ACTIVE) {
/* copy results back to user */
lpfc_sli_pcimem_bcopy(&phba->slim2p->mbx, mb,
- MAILBOX_CMD_SIZE);
+ MAILBOX_CMD_SIZE);
} else {
/* First copy command data */
lpfc_memcpy_from_slim(mb, phba->MBslimaddr,
@@ -2355,23 +2802,25 @@ lpfc_sli_issue_mbox(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmbox, uint32_t flag)
status = mb->mbxStatus;
}
- spin_unlock_irqrestore(phba->host->host_lock, drvr_flag);
- return (status);
+ spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
+ return status;
}
-static int
-lpfc_sli_ringtx_put(struct lpfc_hba * phba, struct lpfc_sli_ring * pring,
- struct lpfc_iocbq * piocb)
+/*
+ * Caller needs to hold lock.
+ */
+static void
+__lpfc_sli_ringtx_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
+ struct lpfc_iocbq *piocb)
{
/* Insert the caller's iocb in the txq tail for later processing. */
list_add_tail(&piocb->list, &pring->txq);
pring->txq_cnt++;
- return (0);
}
static struct lpfc_iocbq *
lpfc_sli_next_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
- struct lpfc_iocbq ** piocb)
+ struct lpfc_iocbq **piocb)
{
struct lpfc_iocbq * nextiocb;
@@ -2384,13 +2833,29 @@ lpfc_sli_next_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
return nextiocb;
}
+/*
+ * Lockless version of lpfc_sli_issue_iocb.
+ */
int
-lpfc_sli_issue_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
+__lpfc_sli_issue_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
struct lpfc_iocbq *piocb, uint32_t flag)
{
struct lpfc_iocbq *nextiocb;
IOCB_t *iocb;
+ if (piocb->iocb_cmpl && (!piocb->vport) &&
+ (piocb->iocb.ulpCommand != CMD_ABORT_XRI_CN) &&
+ (piocb->iocb.ulpCommand != CMD_CLOSE_XRI_CN)) {
+ lpfc_printf_log(phba, KERN_ERR,
+ LOG_SLI | LOG_VPORT,
+ "%d:1807 IOCB x%x failed. No vport\n",
+ phba->brd_no,
+ piocb->iocb.ulpCommand);
+ dump_stack();
+ return IOCB_ERROR;
+ }
+
+
/* If the PCI channel is in offline state, do not post iocbs. */
if (unlikely(pci_channel_offline(phba->pcidev)))
return IOCB_ERROR;
@@ -2398,7 +2863,7 @@ lpfc_sli_issue_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
/*
* We should never get an IOCB if we are in a < LINK_DOWN state
*/
- if (unlikely(phba->hba_state < LPFC_LINK_DOWN))
+ if (unlikely(phba->link_state < LPFC_LINK_DOWN))
return IOCB_ERROR;
/*
@@ -2408,7 +2873,7 @@ lpfc_sli_issue_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
if (unlikely(pring->flag & LPFC_STOP_IOCB_MBX))
goto iocb_busy;
- if (unlikely(phba->hba_state == LPFC_LINK_DOWN)) {
+ if (unlikely(phba->link_state == LPFC_LINK_DOWN)) {
/*
* Only CREATE_XRI, CLOSE_XRI, and QUE_RING_BUF
* can be issued if the link is not up.
@@ -2436,8 +2901,9 @@ lpfc_sli_issue_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
* attention events.
*/
} else if (unlikely(pring->ringno == phba->sli.fcp_ring &&
- !(phba->sli.sli_flag & LPFC_PROCESS_LA)))
+ !(phba->sli.sli_flag & LPFC_PROCESS_LA))) {
goto iocb_busy;
+ }
while ((iocb = lpfc_sli_next_iocb_slot(phba, pring)) &&
(nextiocb = lpfc_sli_next_iocb(phba, pring, &piocb)))
@@ -2459,13 +2925,28 @@ lpfc_sli_issue_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
out_busy:
if (!(flag & SLI_IOCB_RET_IOCB)) {
- lpfc_sli_ringtx_put(phba, pring, piocb);
+ __lpfc_sli_ringtx_put(phba, pring, piocb);
return IOCB_SUCCESS;
}
return IOCB_BUSY;
}
+
+int
+lpfc_sli_issue_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
+ struct lpfc_iocbq *piocb, uint32_t flag)
+{
+ unsigned long iflags;
+ int rc;
+
+ spin_lock_irqsave(&phba->hbalock, iflags);
+ rc = __lpfc_sli_issue_iocb(phba, pring, piocb, flag);
+ spin_unlock_irqrestore(&phba->hbalock, iflags);
+
+ return rc;
+}
+
static int
lpfc_extra_ring_setup( struct lpfc_hba *phba)
{
@@ -2504,7 +2985,7 @@ lpfc_extra_ring_setup( struct lpfc_hba *phba)
int
lpfc_sli_setup(struct lpfc_hba *phba)
{
- int i, totiocb = 0;
+ int i, totiocbsize = 0;
struct lpfc_sli *psli = &phba->sli;
struct lpfc_sli_ring *pring;
@@ -2529,6 +3010,12 @@ lpfc_sli_setup(struct lpfc_hba *phba)
pring->numRiocb += SLI2_IOCB_RSP_R1XTRA_ENTRIES;
pring->numCiocb += SLI2_IOCB_CMD_R3XTRA_ENTRIES;
pring->numRiocb += SLI2_IOCB_RSP_R3XTRA_ENTRIES;
+ pring->sizeCiocb = (phba->sli_rev == 3) ?
+ SLI3_IOCB_CMD_SIZE :
+ SLI2_IOCB_CMD_SIZE;
+ pring->sizeRiocb = (phba->sli_rev == 3) ?
+ SLI3_IOCB_RSP_SIZE :
+ SLI2_IOCB_RSP_SIZE;
pring->iotag_ctr = 0;
pring->iotag_max =
(phba->cfg_hba_queue_depth * 2);
@@ -2539,12 +3026,25 @@ lpfc_sli_setup(struct lpfc_hba *phba)
/* numCiocb and numRiocb are used in config_port */
pring->numCiocb = SLI2_IOCB_CMD_R1_ENTRIES;
pring->numRiocb = SLI2_IOCB_RSP_R1_ENTRIES;
+ pring->sizeCiocb = (phba->sli_rev == 3) ?
+ SLI3_IOCB_CMD_SIZE :
+ SLI2_IOCB_CMD_SIZE;
+ pring->sizeRiocb = (phba->sli_rev == 3) ?
+ SLI3_IOCB_RSP_SIZE :
+ SLI2_IOCB_RSP_SIZE;
+ pring->iotag_max = phba->cfg_hba_queue_depth;
pring->num_mask = 0;
break;
case LPFC_ELS_RING: /* ring 2 - ELS / CT */
/* numCiocb and numRiocb are used in config_port */
pring->numCiocb = SLI2_IOCB_CMD_R2_ENTRIES;
pring->numRiocb = SLI2_IOCB_RSP_R2_ENTRIES;
+ pring->sizeCiocb = (phba->sli_rev == 3) ?
+ SLI3_IOCB_CMD_SIZE :
+ SLI2_IOCB_CMD_SIZE;
+ pring->sizeRiocb = (phba->sli_rev == 3) ?
+ SLI3_IOCB_RSP_SIZE :
+ SLI2_IOCB_RSP_SIZE;
pring->fast_iotag = 0;
pring->iotag_ctr = 0;
pring->iotag_max = 4096;
@@ -2575,14 +3075,16 @@ lpfc_sli_setup(struct lpfc_hba *phba)
lpfc_ct_unsol_event;
break;
}
- totiocb += (pring->numCiocb + pring->numRiocb);
+ totiocbsize += (pring->numCiocb * pring->sizeCiocb) +
+ (pring->numRiocb * pring->sizeRiocb);
}
- if (totiocb > MAX_SLI2_IOCB) {
+ if (totiocbsize > MAX_SLIM_IOCB_SIZE) {
/* Too many cmd / rsp ring entries in SLI2 SLIM */
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"%d:0462 Too many cmd / rsp ring entries in "
- "SLI2 SLIM Data: x%x x%x\n",
- phba->brd_no, totiocb, MAX_SLI2_IOCB);
+ "SLI2 SLIM Data: x%x x%lx\n",
+ phba->brd_no, totiocbsize,
+ (unsigned long) MAX_SLIM_IOCB_SIZE);
}
if (phba->cfg_multi_ring_support == 2)
lpfc_extra_ring_setup(phba);
@@ -2591,15 +3093,16 @@ lpfc_sli_setup(struct lpfc_hba *phba)
}
int
-lpfc_sli_queue_setup(struct lpfc_hba * phba)
+lpfc_sli_queue_setup(struct lpfc_hba *phba)
{
struct lpfc_sli *psli;
struct lpfc_sli_ring *pring;
int i;
psli = &phba->sli;
- spin_lock_irq(phba->host->host_lock);
+ spin_lock_irq(&phba->hbalock);
INIT_LIST_HEAD(&psli->mboxq);
+ INIT_LIST_HEAD(&psli->mboxq_cmpl);
/* Initialize list headers for txq and txcmplq as double linked lists */
for (i = 0; i < psli->num_rings; i++) {
pring = &psli->ring[i];
@@ -2612,15 +3115,73 @@ lpfc_sli_queue_setup(struct lpfc_hba * phba)
INIT_LIST_HEAD(&pring->iocb_continueq);
INIT_LIST_HEAD(&pring->postbufq);
}
- spin_unlock_irq(phba->host->host_lock);
- return (1);
+ spin_unlock_irq(&phba->hbalock);
+ return 1;
}
int
-lpfc_sli_hba_down(struct lpfc_hba * phba)
+lpfc_sli_host_down(struct lpfc_vport *vport)
{
LIST_HEAD(completions);
- struct lpfc_sli *psli;
+ struct lpfc_hba *phba = vport->phba;
+ struct lpfc_sli *psli = &phba->sli;
+ struct lpfc_sli_ring *pring;
+ struct lpfc_iocbq *iocb, *next_iocb;
+ int i;
+ unsigned long flags = 0;
+ uint16_t prev_pring_flag;
+
+ lpfc_cleanup_discovery_resources(vport);
+
+ spin_lock_irqsave(&phba->hbalock, flags);
+ for (i = 0; i < psli->num_rings; i++) {
+ pring = &psli->ring[i];
+ prev_pring_flag = pring->flag;
+ if (pring->ringno == LPFC_ELS_RING) /* Only slow rings */
+ pring->flag |= LPFC_DEFERRED_RING_EVENT;
+ /*
+ * Error everything on the txq since these iocbs have not been
+ * given to the FW yet.
+ */
+ list_for_each_entry_safe(iocb, next_iocb, &pring->txq, list) {
+ if (iocb->vport != vport)
+ continue;
+ list_move_tail(&iocb->list, &completions);
+ pring->txq_cnt--;
+ }
+
+ /* Next issue ABTS for everything on the txcmplq */
+ list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq,
+ list) {
+ if (iocb->vport != vport)
+ continue;
+ lpfc_sli_issue_abort_iotag(phba, pring, iocb);
+ }
+
+ pring->flag = prev_pring_flag;
+ }
+
+ spin_unlock_irqrestore(&phba->hbalock, flags);
+
+ while (!list_empty(&completions)) {
+ list_remove_head(&completions, iocb, struct lpfc_iocbq, list);
+
+ if (!iocb->iocb_cmpl)
+ lpfc_sli_release_iocbq(phba, iocb);
+ else {
+ iocb->iocb.ulpStatus = IOSTAT_LOCAL_REJECT;
+ iocb->iocb.un.ulpWord[4] = IOERR_SLI_DOWN;
+ (iocb->iocb_cmpl) (phba, iocb, iocb);
+ }
+ }
+ return 1;
+}
+
+int
+lpfc_sli_hba_down(struct lpfc_hba *phba)
+{
+ LIST_HEAD(completions);
+ struct lpfc_sli *psli = &phba->sli;
struct lpfc_sli_ring *pring;
LPFC_MBOXQ_t *pmb;
struct lpfc_iocbq *iocb;
@@ -2628,13 +3189,15 @@ lpfc_sli_hba_down(struct lpfc_hba * phba)
int i;
unsigned long flags = 0;
- psli = &phba->sli;
lpfc_hba_down_prep(phba);
- spin_lock_irqsave(phba->host->host_lock, flags);
+ lpfc_fabric_abort_hba(phba);
+
+ spin_lock_irqsave(&phba->hbalock, flags);
for (i = 0; i < psli->num_rings; i++) {
pring = &psli->ring[i];
- pring->flag |= LPFC_DEFERRED_RING_EVENT;
+ if (pring->ringno == LPFC_ELS_RING) /* Only slow rings */
+ pring->flag |= LPFC_DEFERRED_RING_EVENT;
/*
* Error everything on the txq since these iocbs have not been
@@ -2644,51 +3207,50 @@ lpfc_sli_hba_down(struct lpfc_hba * phba)
pring->txq_cnt = 0;
}
- spin_unlock_irqrestore(phba->host->host_lock, flags);
+ spin_unlock_irqrestore(&phba->hbalock, flags);
while (!list_empty(&completions)) {
- iocb = list_get_first(&completions, struct lpfc_iocbq, list);
+ list_remove_head(&completions, iocb, struct lpfc_iocbq, list);
cmd = &iocb->iocb;
- list_del(&iocb->list);
- if (iocb->iocb_cmpl) {
+ if (!iocb->iocb_cmpl)
+ lpfc_sli_release_iocbq(phba, iocb);
+ else {
cmd->ulpStatus = IOSTAT_LOCAL_REJECT;
cmd->un.ulpWord[4] = IOERR_SLI_DOWN;
(iocb->iocb_cmpl) (phba, iocb, iocb);
- } else
- lpfc_sli_release_iocbq(phba, iocb);
+ }
}
/* Return any active mbox cmds */
del_timer_sync(&psli->mbox_tmo);
- spin_lock_irqsave(phba->host->host_lock, flags);
- phba->work_hba_events &= ~WORKER_MBOX_TMO;
+ spin_lock_irqsave(&phba->hbalock, flags);
+
+ spin_lock(&phba->pport->work_port_lock);
+ phba->pport->work_port_events &= ~WORKER_MBOX_TMO;
+ spin_unlock(&phba->pport->work_port_lock);
+
if (psli->mbox_active) {
- pmb = psli->mbox_active;
- pmb->mb.mbxStatus = MBX_NOT_FINISHED;
- if (pmb->mbox_cmpl) {
- spin_unlock_irqrestore(phba->host->host_lock, flags);
- pmb->mbox_cmpl(phba,pmb);
- spin_lock_irqsave(phba->host->host_lock, flags);
- }
+ list_add_tail(&psli->mbox_active->list, &completions);
+ psli->mbox_active = NULL;
+ psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
}
- psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
- psli->mbox_active = NULL;
- /* Return any pending mbox cmds */
- while ((pmb = lpfc_mbox_get(phba)) != NULL) {
+ /* Return any pending or completed mbox cmds */
+ list_splice_init(&phba->sli.mboxq, &completions);
+ list_splice_init(&phba->sli.mboxq_cmpl, &completions);
+ INIT_LIST_HEAD(&psli->mboxq);
+ INIT_LIST_HEAD(&psli->mboxq_cmpl);
+
+ spin_unlock_irqrestore(&phba->hbalock, flags);
+
+ while (!list_empty(&completions)) {
+ list_remove_head(&completions, pmb, LPFC_MBOXQ_t, list);
pmb->mb.mbxStatus = MBX_NOT_FINISHED;
if (pmb->mbox_cmpl) {
- spin_unlock_irqrestore(phba->host->host_lock, flags);
pmb->mbox_cmpl(phba,pmb);
- spin_lock_irqsave(phba->host->host_lock, flags);
}
}
-
- INIT_LIST_HEAD(&psli->mboxq);
-
- spin_unlock_irqrestore(phba->host->host_lock, flags);
-
return 1;
}
@@ -2710,14 +3272,15 @@ lpfc_sli_pcimem_bcopy(void *srcp, void *destp, uint32_t cnt)
}
int
-lpfc_sli_ringpostbuf_put(struct lpfc_hba * phba, struct lpfc_sli_ring * pring,
- struct lpfc_dmabuf * mp)
+lpfc_sli_ringpostbuf_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
+ struct lpfc_dmabuf *mp)
{
/* Stick struct lpfc_dmabuf at end of postbufq so driver can look it up
later */
+ spin_lock_irq(&phba->hbalock);
list_add_tail(&mp->list, &pring->postbufq);
-
pring->postbufq_cnt++;
+ spin_unlock_irq(&phba->hbalock);
return 0;
}
@@ -2730,14 +3293,17 @@ lpfc_sli_ringpostbuf_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
struct list_head *slp = &pring->postbufq;
/* Search postbufq, from the begining, looking for a match on phys */
+ spin_lock_irq(&phba->hbalock);
list_for_each_entry_safe(mp, next_mp, &pring->postbufq, list) {
if (mp->phys == phys) {
list_del_init(&mp->list);
pring->postbufq_cnt--;
+ spin_unlock_irq(&phba->hbalock);
return mp;
}
}
+ spin_unlock_irq(&phba->hbalock);
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"%d:0410 Cannot find virtual addr for mapped buf on "
"ring %d Data x%llx x%p x%p x%x\n",
@@ -2747,92 +3313,110 @@ lpfc_sli_ringpostbuf_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
}
static void
-lpfc_sli_abort_els_cmpl(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb,
- struct lpfc_iocbq * rspiocb)
+lpfc_sli_abort_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
+ struct lpfc_iocbq *rspiocb)
{
- IOCB_t *irsp;
+ IOCB_t *irsp = &rspiocb->iocb;
uint16_t abort_iotag, abort_context;
- struct lpfc_iocbq *abort_iocb, *rsp_ab_iocb;
+ struct lpfc_iocbq *abort_iocb;
struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
abort_iocb = NULL;
- irsp = &rspiocb->iocb;
-
- spin_lock_irq(phba->host->host_lock);
if (irsp->ulpStatus) {
abort_context = cmdiocb->iocb.un.acxri.abortContextTag;
abort_iotag = cmdiocb->iocb.un.acxri.abortIoTag;
+ spin_lock_irq(&phba->hbalock);
if (abort_iotag != 0 && abort_iotag <= phba->sli.last_iotag)
abort_iocb = phba->sli.iocbq_lookup[abort_iotag];
- lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
- "%d:0327 Cannot abort els iocb %p"
- " with tag %x context %x\n",
- phba->brd_no, abort_iocb,
- abort_iotag, abort_context);
+ lpfc_printf_log(phba, KERN_INFO, LOG_ELS | LOG_SLI,
+ "%d:0327 Cannot abort els iocb %p "
+ "with tag %x context %x, abort status %x, "
+ "abort code %x\n",
+ phba->brd_no, abort_iocb, abort_iotag,
+ abort_context, irsp->ulpStatus,
+ irsp->un.ulpWord[4]);
/*
* make sure we have the right iocbq before taking it
* off the txcmplq and try to call completion routine.
*/
- if (abort_iocb &&
- abort_iocb->iocb.ulpContext == abort_context &&
- abort_iocb->iocb_flag & LPFC_DRIVER_ABORTED) {
- list_del(&abort_iocb->list);
+ if (!abort_iocb ||
+ abort_iocb->iocb.ulpContext != abort_context ||
+ (abort_iocb->iocb_flag & LPFC_DRIVER_ABORTED) == 0)
+ spin_unlock_irq(&phba->hbalock);
+ else {
+ list_del_init(&abort_iocb->list);
pring->txcmplq_cnt--;
+ spin_unlock_irq(&phba->hbalock);
- rsp_ab_iocb = lpfc_sli_get_iocbq(phba);
- if (rsp_ab_iocb == NULL)
- lpfc_sli_release_iocbq(phba, abort_iocb);
- else {
- abort_iocb->iocb_flag &=
- ~LPFC_DRIVER_ABORTED;
- rsp_ab_iocb->iocb.ulpStatus =
- IOSTAT_LOCAL_REJECT;
- rsp_ab_iocb->iocb.un.ulpWord[4] =
- IOERR_SLI_ABORTED;
- spin_unlock_irq(phba->host->host_lock);
- (abort_iocb->iocb_cmpl)
- (phba, abort_iocb, rsp_ab_iocb);
- spin_lock_irq(phba->host->host_lock);
- lpfc_sli_release_iocbq(phba, rsp_ab_iocb);
- }
+ abort_iocb->iocb_flag &= ~LPFC_DRIVER_ABORTED;
+ abort_iocb->iocb.ulpStatus = IOSTAT_LOCAL_REJECT;
+ abort_iocb->iocb.un.ulpWord[4] = IOERR_SLI_ABORTED;
+ (abort_iocb->iocb_cmpl)(phba, abort_iocb, abort_iocb);
}
}
lpfc_sli_release_iocbq(phba, cmdiocb);
- spin_unlock_irq(phba->host->host_lock);
+ return;
+}
+
+static void
+lpfc_ignore_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
+ struct lpfc_iocbq *rspiocb)
+{
+ IOCB_t *irsp = &rspiocb->iocb;
+
+ /* ELS cmd tag <ulpIoTag> completes */
+ lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
+ "%d (X):0133 Ignoring ELS cmd tag x%x completion Data: "
+ "x%x x%x x%x\n",
+ phba->brd_no, irsp->ulpIoTag, irsp->ulpStatus,
+ irsp->un.ulpWord[4], irsp->ulpTimeout);
+ if (cmdiocb->iocb.ulpCommand == CMD_GEN_REQUEST64_CR)
+ lpfc_ct_free_iocb(phba, cmdiocb);
+ else
+ lpfc_els_free_iocb(phba, cmdiocb);
return;
}
int
-lpfc_sli_issue_abort_iotag(struct lpfc_hba * phba,
- struct lpfc_sli_ring * pring,
- struct lpfc_iocbq * cmdiocb)
+lpfc_sli_issue_abort_iotag(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
+ struct lpfc_iocbq *cmdiocb)
{
+ struct lpfc_vport *vport = cmdiocb->vport;
struct lpfc_iocbq *abtsiocbp;
IOCB_t *icmd = NULL;
IOCB_t *iabt = NULL;
int retval = IOCB_ERROR;
- /* There are certain command types we don't want
- * to abort.
+ /*
+ * There are certain command types we don't want to abort. And we
+ * don't want to abort commands that are already in the process of
+ * being aborted.
*/
icmd = &cmdiocb->iocb;
- if ((icmd->ulpCommand == CMD_ABORT_XRI_CN) ||
- (icmd->ulpCommand == CMD_CLOSE_XRI_CN))
+ if (icmd->ulpCommand == CMD_ABORT_XRI_CN ||
+ icmd->ulpCommand == CMD_CLOSE_XRI_CN ||
+ (cmdiocb->iocb_flag & LPFC_DRIVER_ABORTED) != 0)
return 0;
- /* If we're unloading, interrupts are disabled so we
- * need to cleanup the iocb here.
+ /* If we're unloading, don't abort iocb on the ELS ring, but change the
+ * callback so that nothing happens when it finishes.
*/
- if (phba->fc_flag & FC_UNLOADING)
+ if ((vport->load_flag & FC_UNLOADING) &&
+ (pring->ringno == LPFC_ELS_RING)) {
+ if (cmdiocb->iocb_flag & LPFC_IO_FABRIC)
+ cmdiocb->fabric_iocb_cmpl = lpfc_ignore_els_cmpl;
+ else
+ cmdiocb->iocb_cmpl = lpfc_ignore_els_cmpl;
goto abort_iotag_exit;
+ }
/* issue ABTS for this IOCB based on iotag */
- abtsiocbp = lpfc_sli_get_iocbq(phba);
+ abtsiocbp = __lpfc_sli_get_iocbq(phba);
if (abtsiocbp == NULL)
return 0;
@@ -2848,7 +3432,7 @@ lpfc_sli_issue_abort_iotag(struct lpfc_hba * phba,
iabt->ulpLe = 1;
iabt->ulpClass = icmd->ulpClass;
- if (phba->hba_state >= LPFC_LINK_UP)
+ if (phba->link_state >= LPFC_LINK_UP)
iabt->ulpCommand = CMD_ABORT_XRI_CN;
else
iabt->ulpCommand = CMD_CLOSE_XRI_CN;
@@ -2856,32 +3440,20 @@ lpfc_sli_issue_abort_iotag(struct lpfc_hba * phba,
abtsiocbp->iocb_cmpl = lpfc_sli_abort_els_cmpl;
lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
- "%d:0339 Abort xri x%x, original iotag x%x, abort "
- "cmd iotag x%x\n",
- phba->brd_no, iabt->un.acxri.abortContextTag,
+ "%d (%d):0339 Abort xri x%x, original iotag x%x, "
+ "abort cmd iotag x%x\n",
+ phba->brd_no, vport->vpi,
+ iabt->un.acxri.abortContextTag,
iabt->un.acxri.abortIoTag, abtsiocbp->iotag);
- retval = lpfc_sli_issue_iocb(phba, pring, abtsiocbp, 0);
+ retval = __lpfc_sli_issue_iocb(phba, pring, abtsiocbp, 0);
abort_iotag_exit:
-
- /* If we could not issue an abort dequeue the iocb and handle
- * the completion here.
+ /*
+ * Caller to this routine should check for IOCB_ERROR
+ * and handle it properly. This routine no longer removes
+ * iocb off txcmplq and call compl in case of IOCB_ERROR.
*/
- if (retval == IOCB_ERROR) {
- list_del(&cmdiocb->list);
- pring->txcmplq_cnt--;
-
- if (cmdiocb->iocb_cmpl) {
- icmd->ulpStatus = IOSTAT_LOCAL_REJECT;
- icmd->un.ulpWord[4] = IOERR_SLI_ABORTED;
- spin_unlock_irq(phba->host->host_lock);
- (cmdiocb->iocb_cmpl) (phba, cmdiocb, cmdiocb);
- spin_lock_irq(phba->host->host_lock);
- } else
- lpfc_sli_release_iocbq(phba, cmdiocb);
- }
-
- return 1;
+ return retval;
}
static int
@@ -2930,7 +3502,7 @@ lpfc_sli_validate_fcp_iocb(struct lpfc_iocbq *iocbq, uint16_t tgt_id,
int
lpfc_sli_sum_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
- uint16_t tgt_id, uint64_t lun_id, lpfc_ctx_cmd ctx_cmd)
+ uint16_t tgt_id, uint64_t lun_id, lpfc_ctx_cmd ctx_cmd)
{
struct lpfc_iocbq *iocbq;
int sum, i;
@@ -2947,14 +3519,10 @@ lpfc_sli_sum_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
}
void
-lpfc_sli_abort_fcp_cmpl(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb,
- struct lpfc_iocbq * rspiocb)
+lpfc_sli_abort_fcp_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
+ struct lpfc_iocbq *rspiocb)
{
- unsigned long iflags;
-
- spin_lock_irqsave(phba->host->host_lock, iflags);
lpfc_sli_release_iocbq(phba, cmdiocb);
- spin_unlock_irqrestore(phba->host->host_lock, iflags);
return;
}
@@ -2972,8 +3540,8 @@ lpfc_sli_abort_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
for (i = 1; i <= phba->sli.last_iotag; i++) {
iocbq = phba->sli.iocbq_lookup[i];
- if (lpfc_sli_validate_fcp_iocb (iocbq, tgt_id, lun_id,
- 0, abort_cmd) != 0)
+ if (lpfc_sli_validate_fcp_iocb(iocbq, tgt_id, lun_id, 0,
+ abort_cmd) != 0)
continue;
/* issue ABTS for this IOCB based on iotag */
@@ -2989,8 +3557,9 @@ lpfc_sli_abort_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
abtsiocb->iocb.un.acxri.abortIoTag = cmd->ulpIoTag;
abtsiocb->iocb.ulpLe = 1;
abtsiocb->iocb.ulpClass = cmd->ulpClass;
+ abtsiocb->vport = phba->pport;
- if (phba->hba_state >= LPFC_LINK_UP)
+ if (lpfc_is_link_up(phba))
abtsiocb->iocb.ulpCommand = CMD_ABORT_XRI_CN;
else
abtsiocb->iocb.ulpCommand = CMD_CLOSE_XRI_CN;
@@ -3016,16 +3585,16 @@ lpfc_sli_wake_iocb_wait(struct lpfc_hba *phba,
wait_queue_head_t *pdone_q;
unsigned long iflags;
- spin_lock_irqsave(phba->host->host_lock, iflags);
+ spin_lock_irqsave(&phba->hbalock, iflags);
cmdiocbq->iocb_flag |= LPFC_IO_WAKE;
if (cmdiocbq->context2 && rspiocbq)
memcpy(&((struct lpfc_iocbq *)cmdiocbq->context2)->iocb,
&rspiocbq->iocb, sizeof(IOCB_t));
pdone_q = cmdiocbq->context_un.wait_queue;
- spin_unlock_irqrestore(phba->host->host_lock, iflags);
if (pdone_q)
wake_up(pdone_q);
+ spin_unlock_irqrestore(&phba->hbalock, iflags);
return;
}
@@ -3035,11 +3604,12 @@ lpfc_sli_wake_iocb_wait(struct lpfc_hba *phba,
* lpfc_sli_issue_call since the wake routine sets a unique value and by
* definition this is a wait function.
*/
+
int
-lpfc_sli_issue_iocb_wait(struct lpfc_hba * phba,
- struct lpfc_sli_ring * pring,
- struct lpfc_iocbq * piocb,
- struct lpfc_iocbq * prspiocbq,
+lpfc_sli_issue_iocb_wait(struct lpfc_hba *phba,
+ struct lpfc_sli_ring *pring,
+ struct lpfc_iocbq *piocb,
+ struct lpfc_iocbq *prspiocbq,
uint32_t timeout)
{
DECLARE_WAIT_QUEUE_HEAD_ONSTACK(done_q);
@@ -3071,11 +3641,9 @@ lpfc_sli_issue_iocb_wait(struct lpfc_hba * phba,
retval = lpfc_sli_issue_iocb(phba, pring, piocb, 0);
if (retval == IOCB_SUCCESS) {
timeout_req = timeout * HZ;
- spin_unlock_irq(phba->host->host_lock);
timeleft = wait_event_timeout(done_q,
piocb->iocb_flag & LPFC_IO_WAKE,
timeout_req);
- spin_lock_irq(phba->host->host_lock);
if (piocb->iocb_flag & LPFC_IO_WAKE) {
lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
@@ -3117,16 +3685,16 @@ lpfc_sli_issue_iocb_wait(struct lpfc_hba * phba,
}
int
-lpfc_sli_issue_mbox_wait(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq,
+lpfc_sli_issue_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq,
uint32_t timeout)
{
DECLARE_WAIT_QUEUE_HEAD_ONSTACK(done_q);
int retval;
+ unsigned long flag;
/* The caller must leave context1 empty. */
- if (pmboxq->context1 != 0) {
- return (MBX_NOT_FINISHED);
- }
+ if (pmboxq->context1 != 0)
+ return MBX_NOT_FINISHED;
/* setup wake call as IOCB callback */
pmboxq->mbox_cmpl = lpfc_sli_wake_mbox_wait;
@@ -3141,6 +3709,7 @@ lpfc_sli_issue_mbox_wait(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq,
pmboxq->mbox_flag & LPFC_MBX_WAKE,
timeout * HZ);
+ spin_lock_irqsave(&phba->hbalock, flag);
pmboxq->context1 = NULL;
/*
* if LPFC_MBX_WAKE flag is set the mailbox is completed
@@ -3148,8 +3717,11 @@ lpfc_sli_issue_mbox_wait(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq,
*/
if (pmboxq->mbox_flag & LPFC_MBX_WAKE)
retval = MBX_SUCCESS;
- else
+ else {
retval = MBX_TIMEOUT;
+ pmboxq->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
+ }
+ spin_unlock_irqrestore(&phba->hbalock, flag);
}
return retval;
@@ -3158,14 +3730,27 @@ lpfc_sli_issue_mbox_wait(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq,
int
lpfc_sli_flush_mbox_queue(struct lpfc_hba * phba)
{
+ struct lpfc_vport *vport = phba->pport;
int i = 0;
+ uint32_t ha_copy;
- while (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE && !phba->stopped) {
+ while (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE && !vport->stopped) {
if (i++ > LPFC_MBOX_TMO * 1000)
return 1;
- if (lpfc_sli_handle_mb_event(phba) == 0)
- i = 0;
+ /*
+ * Call lpfc_sli_handle_mb_event only if a mailbox cmd
+ * did finish. This way we won't get the misleading
+ * "Stray Mailbox Interrupt" message.
+ */
+ spin_lock_irq(&phba->hbalock);
+ ha_copy = phba->work_ha;
+ phba->work_ha &= ~HA_MBATT;
+ spin_unlock_irq(&phba->hbalock);
+
+ if (ha_copy & HA_MBATT)
+ if (lpfc_sli_handle_mb_event(phba) == 0)
+ i = 0;
msleep(1);
}
@@ -3176,13 +3761,20 @@ lpfc_sli_flush_mbox_queue(struct lpfc_hba * phba)
irqreturn_t
lpfc_intr_handler(int irq, void *dev_id)
{
- struct lpfc_hba *phba;
+ struct lpfc_hba *phba;
uint32_t ha_copy;
uint32_t work_ha_copy;
unsigned long status;
int i;
uint32_t control;
+ MAILBOX_t *mbox, *pmbox;
+ struct lpfc_vport *vport;
+ struct lpfc_nodelist *ndlp;
+ struct lpfc_dmabuf *mp;
+ LPFC_MBOXQ_t *pmb;
+ int rc;
+
/*
* Get the driver's phba structure from the dev_id and
* assume the HBA is not interrupting.
@@ -3204,7 +3796,7 @@ lpfc_intr_handler(int irq, void *dev_id)
*/
/* Ignore all interrupts during initialization. */
- if (unlikely(phba->hba_state < LPFC_LINK_DOWN))
+ if (unlikely(phba->link_state < LPFC_LINK_DOWN))
return IRQ_NONE;
/*
@@ -3212,16 +3804,16 @@ lpfc_intr_handler(int irq, void *dev_id)
* Clear Attention Sources, except Error Attention (to
* preserve status) and Link Attention
*/
- spin_lock(phba->host->host_lock);
+ spin_lock(&phba->hbalock);
ha_copy = readl(phba->HAregaddr);
/* If somebody is waiting to handle an eratt don't process it
* here. The brdkill function will do this.
*/
- if (phba->fc_flag & FC_IGNORE_ERATT)
+ if (phba->link_flag & LS_IGNORE_ERATT)
ha_copy &= ~HA_ERATT;
writel((ha_copy & ~(HA_LATT | HA_ERATT)), phba->HAregaddr);
readl(phba->HAregaddr); /* flush */
- spin_unlock(phba->host->host_lock);
+ spin_unlock(&phba->hbalock);
if (unlikely(!ha_copy))
return IRQ_NONE;
@@ -3235,36 +3827,41 @@ lpfc_intr_handler(int irq, void *dev_id)
* Turn off Link Attention interrupts
* until CLEAR_LA done
*/
- spin_lock(phba->host->host_lock);
+ spin_lock(&phba->hbalock);
phba->sli.sli_flag &= ~LPFC_PROCESS_LA;
control = readl(phba->HCregaddr);
control &= ~HC_LAINT_ENA;
writel(control, phba->HCregaddr);
readl(phba->HCregaddr); /* flush */
- spin_unlock(phba->host->host_lock);
+ spin_unlock(&phba->hbalock);
}
else
work_ha_copy &= ~HA_LATT;
}
if (work_ha_copy & ~(HA_ERATT|HA_MBATT|HA_LATT)) {
- for (i = 0; i < phba->sli.num_rings; i++) {
- if (work_ha_copy & (HA_RXATT << (4*i))) {
- /*
- * Turn off Slow Rings interrupts
- */
- spin_lock(phba->host->host_lock);
- control = readl(phba->HCregaddr);
- control &= ~(HC_R0INT_ENA << i);
+ /*
+ * Turn off Slow Rings interrupts, LPFC_ELS_RING is
+ * the only slow ring.
+ */
+ status = (work_ha_copy &
+ (HA_RXMASK << (4*LPFC_ELS_RING)));
+ status >>= (4*LPFC_ELS_RING);
+ if (status & HA_RXMASK) {
+ spin_lock(&phba->hbalock);
+ control = readl(phba->HCregaddr);
+ if (control & (HC_R0INT_ENA << LPFC_ELS_RING)) {
+ control &=
+ ~(HC_R0INT_ENA << LPFC_ELS_RING);
writel(control, phba->HCregaddr);
readl(phba->HCregaddr); /* flush */
- spin_unlock(phba->host->host_lock);
}
+ spin_unlock(&phba->hbalock);
}
}
if (work_ha_copy & HA_ERATT) {
- phba->hba_state = LPFC_HBA_ERROR;
+ phba->link_state = LPFC_HBA_ERROR;
/*
* There was a link/board error. Read the
* status register to retrieve the error event
@@ -3279,14 +3876,108 @@ lpfc_intr_handler(int irq, void *dev_id)
/* Clear Chip error bit */
writel(HA_ERATT, phba->HAregaddr);
readl(phba->HAregaddr); /* flush */
- phba->stopped = 1;
+ phba->pport->stopped = 1;
+ }
+
+ if ((work_ha_copy & HA_MBATT) &&
+ (phba->sli.mbox_active)) {
+ pmb = phba->sli.mbox_active;
+ pmbox = &pmb->mb;
+ mbox = &phba->slim2p->mbx;
+ vport = pmb->vport;
+
+ /* First check out the status word */
+ lpfc_sli_pcimem_bcopy(mbox, pmbox, sizeof(uint32_t));
+ if (pmbox->mbxOwner != OWN_HOST) {
+ /*
+ * Stray Mailbox Interrupt, mbxCommand <cmd>
+ * mbxStatus <status>
+ */
+ lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX |
+ LOG_SLI,
+ "%d (%d):0304 Stray Mailbox "
+ "Interrupt mbxCommand x%x "
+ "mbxStatus x%x\n",
+ phba->brd_no,
+ (vport
+ ? vport->vpi : 0),
+ pmbox->mbxCommand,
+ pmbox->mbxStatus);
+ }
+ phba->last_completion_time = jiffies;
+ del_timer_sync(&phba->sli.mbox_tmo);
+
+ phba->sli.mbox_active = NULL;
+ if (pmb->mbox_cmpl) {
+ lpfc_sli_pcimem_bcopy(mbox, pmbox,
+ MAILBOX_CMD_SIZE);
+ }
+ if (pmb->mbox_flag & LPFC_MBX_IMED_UNREG) {
+ pmb->mbox_flag &= ~LPFC_MBX_IMED_UNREG;
+
+ lpfc_debugfs_disc_trc(vport,
+ LPFC_DISC_TRC_MBOX_VPORT,
+ "MBOX dflt rpi: : status:x%x rpi:x%x",
+ (uint32_t)pmbox->mbxStatus,
+ pmbox->un.varWords[0], 0);
+
+ if ( !pmbox->mbxStatus) {
+ mp = (struct lpfc_dmabuf *)
+ (pmb->context1);
+ ndlp = (struct lpfc_nodelist *)
+ pmb->context2;
+
+ /* Reg_LOGIN of dflt RPI was successful.
+ * new lets get rid of the RPI using the
+ * same mbox buffer.
+ */
+ lpfc_unreg_login(phba, vport->vpi,
+ pmbox->un.varWords[0], pmb);
+ pmb->mbox_cmpl = lpfc_mbx_cmpl_dflt_rpi;
+ pmb->context1 = mp;
+ pmb->context2 = ndlp;
+ pmb->vport = vport;
+ spin_lock(&phba->hbalock);
+ phba->sli.sli_flag &=
+ ~LPFC_SLI_MBOX_ACTIVE;
+ spin_unlock(&phba->hbalock);
+ goto send_current_mbox;
+ }
+ }
+ spin_lock(&phba->pport->work_port_lock);
+ phba->pport->work_port_events &= ~WORKER_MBOX_TMO;
+ spin_unlock(&phba->pport->work_port_lock);
+ lpfc_mbox_cmpl_put(phba, pmb);
+ }
+ if ((work_ha_copy & HA_MBATT) &&
+ (phba->sli.mbox_active == NULL)) {
+send_next_mbox:
+ spin_lock(&phba->hbalock);
+ phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
+ pmb = lpfc_mbox_get(phba);
+ spin_unlock(&phba->hbalock);
+send_current_mbox:
+ /* Process next mailbox command if there is one */
+ if (pmb != NULL) {
+ rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
+ if (rc == MBX_NOT_FINISHED) {
+ pmb->mb.mbxStatus = MBX_NOT_FINISHED;
+ lpfc_mbox_cmpl_put(phba, pmb);
+ goto send_next_mbox;
+ }
+ } else {
+ /* Turn on IOCB processing */
+ for (i = 0; i < phba->sli.num_rings; i++)
+ lpfc_sli_turn_on_ring(phba, i);
+ }
+
}
- spin_lock(phba->host->host_lock);
+ spin_lock(&phba->hbalock);
phba->work_ha |= work_ha_copy;
if (phba->work_wait)
- wake_up(phba->work_wait);
- spin_unlock(phba->host->host_lock);
+ lpfc_worker_wake_up(phba);
+ spin_unlock(&phba->hbalock);
}
ha_copy &= ~(phba->work_ha_mask);
@@ -3298,7 +3989,7 @@ lpfc_intr_handler(int irq, void *dev_id)
*/
status = (ha_copy & (HA_RXMASK << (4*LPFC_FCP_RING)));
status >>= (4*LPFC_FCP_RING);
- if (status & HA_RXATT)
+ if (status & HA_RXMASK)
lpfc_sli_handle_fast_ring_event(phba,
&phba->sli.ring[LPFC_FCP_RING],
status);
@@ -3311,7 +4002,7 @@ lpfc_intr_handler(int irq, void *dev_id)
*/
status = (ha_copy & (HA_RXMASK << (4*LPFC_EXTRA_RING)));
status >>= (4*LPFC_EXTRA_RING);
- if (status & HA_RXATT) {
+ if (status & HA_RXMASK) {
lpfc_sli_handle_fast_ring_event(phba,
&phba->sli.ring[LPFC_EXTRA_RING],
status);
diff --git a/drivers/scsi/lpfc/lpfc_sli.h b/drivers/scsi/lpfc/lpfc_sli.h
index 41c38d324ab..76058505795 100644
--- a/drivers/scsi/lpfc/lpfc_sli.h
+++ b/drivers/scsi/lpfc/lpfc_sli.h
@@ -20,6 +20,7 @@
/* forward declaration for LPFC_IOCB_t's use */
struct lpfc_hba;
+struct lpfc_vport;
/* Define the context types that SLI handles for abort and sums. */
typedef enum _lpfc_ctx_cmd {
@@ -43,10 +44,12 @@ struct lpfc_iocbq {
#define LPFC_IO_WAKE 2 /* High Priority Queue signal flag */
#define LPFC_IO_FCP 4 /* FCP command -- iocbq in scsi_buf */
#define LPFC_DRIVER_ABORTED 8 /* driver aborted this request */
+#define LPFC_IO_FABRIC 0x10 /* Iocb send using fabric scheduler */
uint8_t abort_count;
uint8_t rsvd2;
uint32_t drvrTimeout; /* driver timeout in seconds */
+ struct lpfc_vport *vport;/* virtual port pointer */
void *context1; /* caller context information */
void *context2; /* caller context information */
void *context3; /* caller context information */
@@ -56,6 +59,8 @@ struct lpfc_iocbq {
struct lpfcMboxq *mbox;
} context_un;
+ void (*fabric_iocb_cmpl) (struct lpfc_hba *, struct lpfc_iocbq *,
+ struct lpfc_iocbq *);
void (*iocb_cmpl) (struct lpfc_hba *, struct lpfc_iocbq *,
struct lpfc_iocbq *);
@@ -68,12 +73,14 @@ struct lpfc_iocbq {
#define IOCB_ERROR 2
#define IOCB_TIMEDOUT 3
-#define LPFC_MBX_WAKE 1
+#define LPFC_MBX_WAKE 1
+#define LPFC_MBX_IMED_UNREG 2
typedef struct lpfcMboxq {
/* MBOXQs are used in single linked lists */
struct list_head list; /* ptr to next mailbox command */
MAILBOX_t mb; /* Mailbox cmd */
+ struct lpfc_vport *vport;/* virutal port pointer */
void *context1; /* caller context information */
void *context2; /* caller context information */
@@ -135,6 +142,8 @@ struct lpfc_sli_ring {
uint8_t ringno; /* ring number */
uint16_t numCiocb; /* number of command iocb's per ring */
uint16_t numRiocb; /* number of rsp iocb's per ring */
+ uint16_t sizeCiocb; /* Size of command iocb's in this ring */
+ uint16_t sizeRiocb; /* Size of response iocb's in this ring */
uint32_t fast_iotag; /* max fastlookup based iotag */
uint32_t iotag_ctr; /* keeps track of the next iotag to use */
@@ -165,6 +174,34 @@ struct lpfc_sli_ring {
struct lpfc_sli_ring *);
};
+/* Structure used for configuring rings to a specific profile or rctl / type */
+struct lpfc_hbq_init {
+ uint32_t rn; /* Receive buffer notification */
+ uint32_t entry_count; /* max # of entries in HBQ */
+ uint32_t headerLen; /* 0 if not profile 4 or 5 */
+ uint32_t logEntry; /* Set to 1 if this HBQ used for LogEntry */
+ uint32_t profile; /* Selection profile 0=all, 7=logentry */
+ uint32_t ring_mask; /* Binds HBQ to a ring e.g. Ring0=b0001,
+ * ring2=b0100 */
+ uint32_t hbq_index; /* index of this hbq in ring .HBQs[] */
+
+ uint32_t seqlenoff;
+ uint32_t maxlen;
+ uint32_t seqlenbcnt;
+ uint32_t cmdcodeoff;
+ uint32_t cmdmatch[8];
+ uint32_t mask_count; /* number of mask entries in prt array */
+ struct hbq_mask hbqMasks[6];
+
+ /* Non-config rings fields to keep track of buffer allocations */
+ uint32_t buffer_count; /* number of buffers allocated */
+ uint32_t init_count; /* number to allocate when initialized */
+ uint32_t add_count; /* number to allocate when starved */
+} ;
+
+#define LPFC_MAX_HBQ 16
+
+
/* Structure used to hold SLI statistical counters and info */
struct lpfc_sli_stat {
uint64_t mbox_stat_err; /* Mbox cmds completed status error */
@@ -197,6 +234,7 @@ struct lpfc_sli {
#define LPFC_SLI_MBOX_ACTIVE 0x100 /* HBA mailbox is currently active */
#define LPFC_SLI2_ACTIVE 0x200 /* SLI2 overlay in firmware is active */
#define LPFC_PROCESS_LA 0x400 /* Able to process link attention */
+#define LPFC_BLOCK_MGMT_IO 0x800 /* Don't allow mgmt mbx or iocb cmds */
struct lpfc_sli_ring ring[LPFC_MAX_RING];
int fcp_ring; /* ring used for FCP initiator commands */
@@ -209,6 +247,7 @@ struct lpfc_sli {
uint16_t mboxq_cnt; /* current length of queue */
uint16_t mboxq_max; /* max length */
LPFC_MBOXQ_t *mbox_active; /* active mboxq information */
+ struct list_head mboxq_cmpl;
struct timer_list mbox_tmo; /* Hold clk to timeout active mbox
cmd */
@@ -221,12 +260,6 @@ struct lpfc_sli {
struct lpfc_lnk_stat lnk_stat_offsets;
};
-/* Given a pointer to the start of the ring, and the slot number of
- * the desired iocb entry, calc a pointer to that entry.
- * (assume iocb entry size is 32 bytes, or 8 words)
- */
-#define IOCB_ENTRY(ring,slot) ((IOCB_t *)(((char *)(ring)) + ((slot) * 32)))
-
#define LPFC_MBOX_TMO 30 /* Sec tmo for outstanding mbox
command */
#define LPFC_MBOX_TMO_FLASH_CMD 300 /* Sec tmo for outstanding FLASH write
diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h
index 92a9107019d..a5bc79eef05 100644
--- a/drivers/scsi/lpfc/lpfc_version.h
+++ b/drivers/scsi/lpfc/lpfc_version.h
@@ -18,7 +18,7 @@
* included with this package. *
*******************************************************************/
-#define LPFC_DRIVER_VERSION "8.1.12"
+#define LPFC_DRIVER_VERSION "8.2.1"
#define LPFC_DRIVER_NAME "lpfc"
diff --git a/drivers/scsi/lpfc/lpfc_vport.c b/drivers/scsi/lpfc/lpfc_vport.c
new file mode 100644
index 00000000000..85797dbf547
--- /dev/null
+++ b/drivers/scsi/lpfc/lpfc_vport.c
@@ -0,0 +1,523 @@
+/*******************************************************************
+ * This file is part of the Emulex Linux Device Driver for *
+ * Fibre Channel Host Bus Adapters. *
+ * Copyright (C) 2004-2006 Emulex. All rights reserved. *
+ * EMULEX and SLI are trademarks of Emulex. *
+ * www.emulex.com *
+ * Portions Copyright (C) 2004-2005 Christoph Hellwig *
+ * *
+ * This program is free software; you can redistribute it and/or *
+ * modify it under the terms of version 2 of the GNU General *
+ * Public License as published by the Free Software Foundation. *
+ * This program is distributed in the hope that it will be useful. *
+ * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
+ * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
+ * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
+ * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
+ * TO BE LEGALLY INVALID. See the GNU General Public License for *
+ * more details, a copy of which can be found in the file COPYING *
+ * included with this package. *
+ *******************************************************************/
+
+#include <linux/blkdev.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/idr.h>
+#include <linux/interrupt.h>
+#include <linux/kthread.h>
+#include <linux/pci.h>
+#include <linux/spinlock.h>
+
+#include <scsi/scsi.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_transport_fc.h>
+#include "lpfc_hw.h"
+#include "lpfc_sli.h"
+#include "lpfc_disc.h"
+#include "lpfc_scsi.h"
+#include "lpfc.h"
+#include "lpfc_logmsg.h"
+#include "lpfc_crtn.h"
+#include "lpfc_version.h"
+#include "lpfc_vport.h"
+
+inline void lpfc_vport_set_state(struct lpfc_vport *vport,
+ enum fc_vport_state new_state)
+{
+ struct fc_vport *fc_vport = vport->fc_vport;
+
+ if (fc_vport) {
+ /*
+ * When the transport defines fc_vport_set state we will replace
+ * this code with the following line
+ */
+ /* fc_vport_set_state(fc_vport, new_state); */
+ if (new_state != FC_VPORT_INITIALIZING)
+ fc_vport->vport_last_state = fc_vport->vport_state;
+ fc_vport->vport_state = new_state;
+ }
+
+ /* for all the error states we will set the invternal state to FAILED */
+ switch (new_state) {
+ case FC_VPORT_NO_FABRIC_SUPP:
+ case FC_VPORT_NO_FABRIC_RSCS:
+ case FC_VPORT_FABRIC_LOGOUT:
+ case FC_VPORT_FABRIC_REJ_WWN:
+ case FC_VPORT_FAILED:
+ vport->port_state = LPFC_VPORT_FAILED;
+ break;
+ case FC_VPORT_LINKDOWN:
+ vport->port_state = LPFC_VPORT_UNKNOWN;
+ break;
+ default:
+ /* do nothing */
+ break;
+ }
+}
+
+static int
+lpfc_alloc_vpi(struct lpfc_hba *phba)
+{
+ int vpi;
+
+ spin_lock_irq(&phba->hbalock);
+ /* Start at bit 1 because vpi zero is reserved for the physical port */
+ vpi = find_next_zero_bit(phba->vpi_bmask, (phba->max_vpi + 1), 1);
+ if (vpi > phba->max_vpi)
+ vpi = 0;
+ else
+ set_bit(vpi, phba->vpi_bmask);
+ spin_unlock_irq(&phba->hbalock);
+ return vpi;
+}
+
+static void
+lpfc_free_vpi(struct lpfc_hba *phba, int vpi)
+{
+ spin_lock_irq(&phba->hbalock);
+ clear_bit(vpi, phba->vpi_bmask);
+ spin_unlock_irq(&phba->hbalock);
+}
+
+static int
+lpfc_vport_sparm(struct lpfc_hba *phba, struct lpfc_vport *vport)
+{
+ LPFC_MBOXQ_t *pmb;
+ MAILBOX_t *mb;
+ struct lpfc_dmabuf *mp;
+ int rc;
+
+ pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+ if (!pmb) {
+ return -ENOMEM;
+ }
+ mb = &pmb->mb;
+
+ lpfc_read_sparam(phba, pmb, vport->vpi);
+ /*
+ * Grab buffer pointer and clear context1 so we can use
+ * lpfc_sli_issue_box_wait
+ */
+ mp = (struct lpfc_dmabuf *) pmb->context1;
+ pmb->context1 = NULL;
+
+ pmb->vport = vport;
+ rc = lpfc_sli_issue_mbox_wait(phba, pmb, phba->fc_ratov * 2);
+ if (rc != MBX_SUCCESS) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_VPORT,
+ "%d (%d):1818 VPort failed init, mbxCmd x%x "
+ "READ_SPARM mbxStatus x%x, rc = x%x\n",
+ phba->brd_no, vport->vpi,
+ mb->mbxCommand, mb->mbxStatus, rc);
+ lpfc_mbuf_free(phba, mp->virt, mp->phys);
+ kfree(mp);
+ if (rc != MBX_TIMEOUT)
+ mempool_free(pmb, phba->mbox_mem_pool);
+ return -EIO;
+ }
+
+ memcpy(&vport->fc_sparam, mp->virt, sizeof (struct serv_parm));
+ memcpy(&vport->fc_nodename, &vport->fc_sparam.nodeName,
+ sizeof (struct lpfc_name));
+ memcpy(&vport->fc_portname, &vport->fc_sparam.portName,
+ sizeof (struct lpfc_name));
+
+ lpfc_mbuf_free(phba, mp->virt, mp->phys);
+ kfree(mp);
+ mempool_free(pmb, phba->mbox_mem_pool);
+
+ return 0;
+}
+
+static int
+lpfc_valid_wwn_format(struct lpfc_hba *phba, struct lpfc_name *wwn,
+ const char *name_type)
+{
+ /* ensure that IEEE format 1 addresses
+ * contain zeros in bits 59-48
+ */
+ if (!((wwn->u.wwn[0] >> 4) == 1 &&
+ ((wwn->u.wwn[0] & 0xf) != 0 || (wwn->u.wwn[1] & 0xf) != 0)))
+ return 1;
+
+ lpfc_printf_log(phba, KERN_ERR, LOG_VPORT,
+ "%d:1822 Invalid %s: %02x:%02x:%02x:%02x:"
+ "%02x:%02x:%02x:%02x\n",
+ phba->brd_no, name_type,
+ wwn->u.wwn[0], wwn->u.wwn[1],
+ wwn->u.wwn[2], wwn->u.wwn[3],
+ wwn->u.wwn[4], wwn->u.wwn[5],
+ wwn->u.wwn[6], wwn->u.wwn[7]);
+ return 0;
+}
+
+static int
+lpfc_unique_wwpn(struct lpfc_hba *phba, struct lpfc_vport *new_vport)
+{
+ struct lpfc_vport *vport;
+
+ list_for_each_entry(vport, &phba->port_list, listentry) {
+ if (vport == new_vport)
+ continue;
+ /* If they match, return not unique */
+ if (memcmp(&vport->fc_sparam.portName,
+ &new_vport->fc_sparam.portName,
+ sizeof(struct lpfc_name)) == 0)
+ return 0;
+ }
+ return 1;
+}
+
+int
+lpfc_vport_create(struct fc_vport *fc_vport, bool disable)
+{
+ struct lpfc_nodelist *ndlp;
+ struct lpfc_vport *pport =
+ (struct lpfc_vport *) fc_vport->shost->hostdata;
+ struct lpfc_hba *phba = pport->phba;
+ struct lpfc_vport *vport = NULL;
+ int instance;
+ int vpi;
+ int rc = VPORT_ERROR;
+
+ if ((phba->sli_rev < 3) ||
+ !(phba->sli3_options & LPFC_SLI3_NPIV_ENABLED)) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_VPORT,
+ "%d:1808 Create VPORT failed: "
+ "NPIV is not enabled: SLImode:%d\n",
+ phba->brd_no, phba->sli_rev);
+ rc = VPORT_INVAL;
+ goto error_out;
+ }
+
+ vpi = lpfc_alloc_vpi(phba);
+ if (vpi == 0) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_VPORT,
+ "%d:1809 Create VPORT failed: "
+ "Max VPORTs (%d) exceeded\n",
+ phba->brd_no, phba->max_vpi);
+ rc = VPORT_NORESOURCES;
+ goto error_out;
+ }
+
+
+ /* Assign an unused board number */
+ if ((instance = lpfc_get_instance()) < 0) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_VPORT,
+ "%d:1810 Create VPORT failed: Cannot get "
+ "instance number\n", phba->brd_no);
+ lpfc_free_vpi(phba, vpi);
+ rc = VPORT_NORESOURCES;
+ goto error_out;
+ }
+
+ vport = lpfc_create_port(phba, instance, fc_vport);
+ if (!vport) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_VPORT,
+ "%d:1811 Create VPORT failed: vpi x%x\n",
+ phba->brd_no, vpi);
+ lpfc_free_vpi(phba, vpi);
+ rc = VPORT_NORESOURCES;
+ goto error_out;
+ }
+
+ vport->vpi = vpi;
+ lpfc_debugfs_initialize(vport);
+
+ if (lpfc_vport_sparm(phba, vport)) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_VPORT,
+ "%d:1813 Create VPORT failed: vpi:%d "
+ "Cannot get sparam\n",
+ phba->brd_no, vpi);
+ lpfc_free_vpi(phba, vpi);
+ destroy_port(vport);
+ rc = VPORT_NORESOURCES;
+ goto error_out;
+ }
+
+ memcpy(vport->fc_portname.u.wwn, vport->fc_sparam.portName.u.wwn, 8);
+ memcpy(vport->fc_nodename.u.wwn, vport->fc_sparam.nodeName.u.wwn, 8);
+
+ if (fc_vport->node_name != 0)
+ u64_to_wwn(fc_vport->node_name, vport->fc_nodename.u.wwn);
+ if (fc_vport->port_name != 0)
+ u64_to_wwn(fc_vport->port_name, vport->fc_portname.u.wwn);
+
+ memcpy(&vport->fc_sparam.portName, vport->fc_portname.u.wwn, 8);
+ memcpy(&vport->fc_sparam.nodeName, vport->fc_nodename.u.wwn, 8);
+
+ if (!lpfc_valid_wwn_format(phba, &vport->fc_sparam.nodeName, "WWNN") ||
+ !lpfc_valid_wwn_format(phba, &vport->fc_sparam.portName, "WWPN")) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_VPORT,
+ "%d:1821 Create VPORT failed: vpi:%d "
+ "Invalid WWN format\n",
+ phba->brd_no, vpi);
+ lpfc_free_vpi(phba, vpi);
+ destroy_port(vport);
+ rc = VPORT_INVAL;
+ goto error_out;
+ }
+
+ if (!lpfc_unique_wwpn(phba, vport)) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_VPORT,
+ "%d:1823 Create VPORT failed: vpi:%d "
+ "Duplicate WWN on HBA\n",
+ phba->brd_no, vpi);
+ lpfc_free_vpi(phba, vpi);
+ destroy_port(vport);
+ rc = VPORT_INVAL;
+ goto error_out;
+ }
+
+ *(struct lpfc_vport **)fc_vport->dd_data = vport;
+ vport->fc_vport = fc_vport;
+
+ if ((phba->link_state < LPFC_LINK_UP) ||
+ (phba->fc_topology == TOPOLOGY_LOOP)) {
+ lpfc_vport_set_state(vport, FC_VPORT_LINKDOWN);
+ rc = VPORT_OK;
+ goto out;
+ }
+
+ if (disable) {
+ rc = VPORT_OK;
+ goto out;
+ }
+
+ /* Use the Physical nodes Fabric NDLP to determine if the link is
+ * up and ready to FDISC.
+ */
+ ndlp = lpfc_findnode_did(phba->pport, Fabric_DID);
+ if (ndlp && ndlp->nlp_state == NLP_STE_UNMAPPED_NODE) {
+ if (phba->link_flag & LS_NPIV_FAB_SUPPORTED) {
+ lpfc_set_disctmo(vport);
+ lpfc_initial_fdisc(vport);
+ } else {
+ lpfc_vport_set_state(vport, FC_VPORT_NO_FABRIC_SUPP);
+ lpfc_printf_log(phba, KERN_ERR, LOG_ELS,
+ "%d (%d):0262 No NPIV Fabric "
+ "support\n",
+ phba->brd_no, vport->vpi);
+ }
+ } else {
+ lpfc_vport_set_state(vport, FC_VPORT_FAILED);
+ }
+ rc = VPORT_OK;
+
+out:
+ lpfc_host_attrib_init(lpfc_shost_from_vport(vport));
+error_out:
+ return rc;
+}
+
+int
+disable_vport(struct fc_vport *fc_vport)
+{
+ struct lpfc_vport *vport = *(struct lpfc_vport **)fc_vport->dd_data;
+ struct lpfc_hba *phba = vport->phba;
+ struct lpfc_nodelist *ndlp = NULL, *next_ndlp = NULL;
+ long timeout;
+
+ ndlp = lpfc_findnode_did(vport, Fabric_DID);
+ if (ndlp && phba->link_state >= LPFC_LINK_UP) {
+ vport->unreg_vpi_cmpl = VPORT_INVAL;
+ timeout = msecs_to_jiffies(phba->fc_ratov * 2000);
+ if (!lpfc_issue_els_npiv_logo(vport, ndlp))
+ while (vport->unreg_vpi_cmpl == VPORT_INVAL && timeout)
+ timeout = schedule_timeout(timeout);
+ }
+
+ lpfc_sli_host_down(vport);
+
+ /* Mark all nodes for discovery so we can remove them by
+ * calling lpfc_cleanup_rpis(vport, 1)
+ */
+ list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
+ if (ndlp->nlp_state == NLP_STE_UNUSED_NODE)
+ continue;
+ lpfc_disc_state_machine(vport, ndlp, NULL,
+ NLP_EVT_DEVICE_RECOVERY);
+ }
+ lpfc_cleanup_rpis(vport, 1);
+
+ lpfc_stop_vport_timers(vport);
+ lpfc_unreg_all_rpis(vport);
+ lpfc_unreg_default_rpis(vport);
+ /*
+ * Completion of unreg_vpi (lpfc_mbx_cmpl_unreg_vpi) does the
+ * scsi_host_put() to release the vport.
+ */
+ lpfc_mbx_unreg_vpi(vport);
+
+ lpfc_vport_set_state(vport, FC_VPORT_DISABLED);
+ return VPORT_OK;
+}
+
+int
+enable_vport(struct fc_vport *fc_vport)
+{
+ struct lpfc_vport *vport = *(struct lpfc_vport **)fc_vport->dd_data;
+ struct lpfc_hba *phba = vport->phba;
+ struct lpfc_nodelist *ndlp = NULL;
+
+ if ((phba->link_state < LPFC_LINK_UP) ||
+ (phba->fc_topology == TOPOLOGY_LOOP)) {
+ lpfc_vport_set_state(vport, FC_VPORT_LINKDOWN);
+ return VPORT_OK;
+ }
+
+ vport->load_flag |= FC_LOADING;
+ vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
+
+ /* Use the Physical nodes Fabric NDLP to determine if the link is
+ * up and ready to FDISC.
+ */
+ ndlp = lpfc_findnode_did(phba->pport, Fabric_DID);
+ if (ndlp && ndlp->nlp_state == NLP_STE_UNMAPPED_NODE) {
+ if (phba->link_flag & LS_NPIV_FAB_SUPPORTED) {
+ lpfc_set_disctmo(vport);
+ lpfc_initial_fdisc(vport);
+ } else {
+ lpfc_vport_set_state(vport, FC_VPORT_NO_FABRIC_SUPP);
+ lpfc_printf_log(phba, KERN_ERR, LOG_ELS,
+ "%d (%d):0264 No NPIV Fabric "
+ "support\n",
+ phba->brd_no, vport->vpi);
+ }
+ } else {
+ lpfc_vport_set_state(vport, FC_VPORT_FAILED);
+ }
+
+ return VPORT_OK;
+}
+
+int
+lpfc_vport_disable(struct fc_vport *fc_vport, bool disable)
+{
+ if (disable)
+ return disable_vport(fc_vport);
+ else
+ return enable_vport(fc_vport);
+}
+
+
+int
+lpfc_vport_delete(struct fc_vport *fc_vport)
+{
+ struct lpfc_nodelist *ndlp = NULL;
+ struct lpfc_nodelist *next_ndlp;
+ struct Scsi_Host *shost = (struct Scsi_Host *) fc_vport->shost;
+ struct lpfc_vport *vport = *(struct lpfc_vport **)fc_vport->dd_data;
+ struct lpfc_hba *phba = vport->phba;
+ long timeout;
+ int rc = VPORT_ERROR;
+
+ /*
+ * This is a bit of a mess. We want to ensure the shost doesn't get
+ * torn down until we're done with the embedded lpfc_vport structure.
+ *
+ * Beyond holding a reference for this function, we also need a
+ * reference for outstanding I/O requests we schedule during delete
+ * processing. But once we scsi_remove_host() we can no longer obtain
+ * a reference through scsi_host_get().
+ *
+ * So we take two references here. We release one reference at the
+ * bottom of the function -- after delinking the vport. And we
+ * release the other at the completion of the unreg_vpi that get's
+ * initiated after we've disposed of all other resources associated
+ * with the port.
+ */
+ if (!scsi_host_get(shost) || !scsi_host_get(shost))
+ return VPORT_INVAL;
+
+ if (vport->port_type == LPFC_PHYSICAL_PORT) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_VPORT,
+ "%d:1812 vport_delete failed: Cannot delete "
+ "physical host\n", phba->brd_no);
+ goto out;
+ }
+
+ vport->load_flag |= FC_UNLOADING;
+
+ kfree(vport->vname);
+ lpfc_debugfs_terminate(vport);
+ fc_remove_host(lpfc_shost_from_vport(vport));
+ scsi_remove_host(lpfc_shost_from_vport(vport));
+
+ ndlp = lpfc_findnode_did(phba->pport, Fabric_DID);
+ if (ndlp && ndlp->nlp_state == NLP_STE_UNMAPPED_NODE &&
+ phba->link_state >= LPFC_LINK_UP) {
+
+ /* First look for the Fabric ndlp */
+ ndlp = lpfc_findnode_did(vport, Fabric_DID);
+ if (!ndlp) {
+ /* Cannot find existing Fabric ndlp, allocate one */
+ ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL);
+ if (!ndlp)
+ goto skip_logo;
+ lpfc_nlp_init(vport, ndlp, Fabric_DID);
+ } else {
+ lpfc_dequeue_node(vport, ndlp);
+ }
+ vport->unreg_vpi_cmpl = VPORT_INVAL;
+ timeout = msecs_to_jiffies(phba->fc_ratov * 2000);
+ if (!lpfc_issue_els_npiv_logo(vport, ndlp))
+ while (vport->unreg_vpi_cmpl == VPORT_INVAL && timeout)
+ timeout = schedule_timeout(timeout);
+ }
+
+skip_logo:
+ lpfc_sli_host_down(vport);
+
+ list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
+ lpfc_disc_state_machine(vport, ndlp, NULL,
+ NLP_EVT_DEVICE_RECOVERY);
+ lpfc_disc_state_machine(vport, ndlp, NULL,
+ NLP_EVT_DEVICE_RM);
+ }
+
+ lpfc_stop_vport_timers(vport);
+ lpfc_unreg_all_rpis(vport);
+ lpfc_unreg_default_rpis(vport);
+ /*
+ * Completion of unreg_vpi (lpfc_mbx_cmpl_unreg_vpi) does the
+ * scsi_host_put() to release the vport.
+ */
+ lpfc_mbx_unreg_vpi(vport);
+
+ lpfc_free_vpi(phba, vport->vpi);
+ vport->work_port_events = 0;
+ spin_lock_irq(&phba->hbalock);
+ list_del_init(&vport->listentry);
+ spin_unlock_irq(&phba->hbalock);
+
+ rc = VPORT_OK;
+out:
+ scsi_host_put(shost);
+ return rc;
+}
+
+
+EXPORT_SYMBOL(lpfc_vport_create);
+EXPORT_SYMBOL(lpfc_vport_delete);
diff --git a/drivers/scsi/lpfc/lpfc_vport.h b/drivers/scsi/lpfc/lpfc_vport.h
new file mode 100644
index 00000000000..f223550f8cb
--- /dev/null
+++ b/drivers/scsi/lpfc/lpfc_vport.h
@@ -0,0 +1,113 @@
+/*******************************************************************
+ * This file is part of the Emulex Linux Device Driver for *
+ * Fibre Channel Host Bus Adapters. *
+ * Copyright (C) 2004-2006 Emulex. All rights reserved. *
+ * EMULEX and SLI are trademarks of Emulex. *
+ * www.emulex.com *
+ * Portions Copyright (C) 2004-2005 Christoph Hellwig *
+ * *
+ * This program is free software; you can redistribute it and/or *
+ * modify it under the terms of version 2 of the GNU General *
+ * Public License as published by the Free Software Foundation. *
+ * This program is distributed in the hope that it will be useful. *
+ * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
+ * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
+ * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
+ * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
+ * TO BE LEGALLY INVALID. See the GNU General Public License for *
+ * more details, a copy of which can be found in the file COPYING *
+ * included with this package. *
+ *******************************************************************/
+
+#ifndef _H_LPFC_VPORT
+#define _H_LPFC_VPORT
+
+/* API version values (each will be an individual bit) */
+#define VPORT_API_VERSION_1 0x01
+
+/* Values returned via lpfc_vport_getinfo() */
+struct vport_info {
+
+ uint32_t api_versions;
+ uint8_t linktype;
+#define VPORT_TYPE_PHYSICAL 0
+#define VPORT_TYPE_VIRTUAL 1
+
+ uint8_t state;
+#define VPORT_STATE_OFFLINE 0
+#define VPORT_STATE_ACTIVE 1
+#define VPORT_STATE_FAILED 2
+
+ uint8_t fail_reason;
+ uint8_t prev_fail_reason;
+#define VPORT_FAIL_UNKNOWN 0
+#define VPORT_FAIL_LINKDOWN 1
+#define VPORT_FAIL_FAB_UNSUPPORTED 2
+#define VPORT_FAIL_FAB_NORESOURCES 3
+#define VPORT_FAIL_FAB_LOGOUT 4
+#define VPORT_FAIL_ADAP_NORESOURCES 5
+
+ uint8_t node_name[8]; /* WWNN */
+ uint8_t port_name[8]; /* WWPN */
+
+ struct Scsi_Host *shost;
+
+/* Following values are valid only on physical links */
+ uint32_t vports_max;
+ uint32_t vports_inuse;
+ uint32_t rpi_max;
+ uint32_t rpi_inuse;
+#define VPORT_CNT_INVALID 0xFFFFFFFF
+};
+
+/* data used in link creation */
+struct vport_data {
+ uint32_t api_version;
+
+ uint32_t options;
+#define VPORT_OPT_AUTORETRY 0x01
+
+ uint8_t node_name[8]; /* WWNN */
+ uint8_t port_name[8]; /* WWPN */
+
+/*
+ * Upon successful creation, vport_shost will point to the new Scsi_Host
+ * structure for the new virtual link.
+ */
+ struct Scsi_Host *vport_shost;
+};
+
+/* API function return codes */
+#define VPORT_OK 0
+#define VPORT_ERROR -1
+#define VPORT_INVAL -2
+#define VPORT_NOMEM -3
+#define VPORT_NORESOURCES -4
+
+int lpfc_vport_create(struct fc_vport *, bool);
+int lpfc_vport_delete(struct fc_vport *);
+int lpfc_vport_getinfo(struct Scsi_Host *, struct vport_info *);
+int lpfc_vport_tgt_remove(struct Scsi_Host *, uint, uint);
+
+/*
+ * queuecommand VPORT-specific return codes. Specified in the host byte code.
+ * Returned when the virtual link has failed or is not active.
+ */
+#define DID_VPORT_ERROR 0x0f
+
+#define VPORT_INFO 0x1
+#define VPORT_CREATE 0x2
+#define VPORT_DELETE 0x4
+
+struct vport_cmd_tag {
+ uint32_t cmd;
+ struct vport_data cdata;
+ struct vport_info cinfo;
+ void *vport;
+ int vport_num;
+};
+
+void lpfc_vport_set_state(struct lpfc_vport *vport,
+ enum fc_vport_state new_state);
+
+#endif /* H_LPFC_VPORT */
diff --git a/drivers/scsi/mac53c94.c b/drivers/scsi/mac53c94.c
index 5806ede120a..b12ad7c7c67 100644
--- a/drivers/scsi/mac53c94.c
+++ b/drivers/scsi/mac53c94.c
@@ -77,7 +77,7 @@ static int mac53c94_queue(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *
for (i = 0; i < cmd->cmd_len; ++i)
printk(" %.2x", cmd->cmnd[i]);
printk("\n" KERN_DEBUG "use_sg=%d request_bufflen=%d request_buffer=%p\n",
- cmd->use_sg, cmd->request_bufflen, cmd->request_buffer);
+ scsi_sg_count(cmd), scsi_bufflen(cmd), scsi_sglist(cmd));
}
#endif
@@ -173,8 +173,7 @@ static void mac53c94_start(struct fsc_state *state)
writeb(CMD_SELECT, &regs->command);
state->phase = selecting;
- if (cmd->use_sg > 0 || cmd->request_bufflen != 0)
- set_dma_cmds(state, cmd);
+ set_dma_cmds(state, cmd);
}
static irqreturn_t do_mac53c94_interrupt(int irq, void *dev_id)
@@ -262,7 +261,7 @@ static void mac53c94_interrupt(int irq, void *dev_id)
writeb(CMD_NOP, &regs->command);
/* set DMA controller going if any data to transfer */
if ((stat & (STAT_MSG|STAT_CD)) == 0
- && (cmd->use_sg > 0 || cmd->request_bufflen != 0)) {
+ && (scsi_sg_count(cmd) > 0 || scsi_bufflen(cmd))) {
nb = cmd->SCp.this_residual;
if (nb > 0xfff0)
nb = 0xfff0;
@@ -310,14 +309,7 @@ static void mac53c94_interrupt(int irq, void *dev_id)
printk(KERN_DEBUG "intr %x before data xfer complete\n", intr);
}
writel(RUN << 16, &dma->control); /* stop dma */
- if (cmd->use_sg != 0) {
- pci_unmap_sg(state->pdev,
- (struct scatterlist *)cmd->request_buffer,
- cmd->use_sg, cmd->sc_data_direction);
- } else {
- pci_unmap_single(state->pdev, state->dma_addr,
- cmd->request_bufflen, cmd->sc_data_direction);
- }
+ scsi_dma_unmap(cmd);
/* should check dma status */
writeb(CMD_I_COMPLETE, &regs->command);
state->phase = completing;
@@ -365,47 +357,35 @@ static void cmd_done(struct fsc_state *state, int result)
*/
static void set_dma_cmds(struct fsc_state *state, struct scsi_cmnd *cmd)
{
- int i, dma_cmd, total;
+ int i, dma_cmd, total, nseg;
struct scatterlist *scl;
struct dbdma_cmd *dcmds;
dma_addr_t dma_addr;
u32 dma_len;
+ nseg = scsi_dma_map(cmd);
+ BUG_ON(nseg < 0);
+ if (!nseg)
+ return;
+
dma_cmd = cmd->sc_data_direction == DMA_TO_DEVICE ?
OUTPUT_MORE : INPUT_MORE;
dcmds = state->dma_cmds;
- if (cmd->use_sg > 0) {
- int nseg;
-
- total = 0;
- scl = (struct scatterlist *) cmd->request_buffer;
- nseg = pci_map_sg(state->pdev, scl, cmd->use_sg,
- cmd->sc_data_direction);
- for (i = 0; i < nseg; ++i) {
- dma_addr = sg_dma_address(scl);
- dma_len = sg_dma_len(scl);
- if (dma_len > 0xffff)
- panic("mac53c94: scatterlist element >= 64k");
- total += dma_len;
- st_le16(&dcmds->req_count, dma_len);
- st_le16(&dcmds->command, dma_cmd);
- st_le32(&dcmds->phy_addr, dma_addr);
- dcmds->xfer_status = 0;
- ++scl;
- ++dcmds;
- }
- } else {
- total = cmd->request_bufflen;
- if (total > 0xffff)
- panic("mac53c94: transfer size >= 64k");
- dma_addr = pci_map_single(state->pdev, cmd->request_buffer,
- total, cmd->sc_data_direction);
- state->dma_addr = dma_addr;
- st_le16(&dcmds->req_count, total);
+ total = 0;
+
+ scsi_for_each_sg(cmd, scl, nseg, i) {
+ dma_addr = sg_dma_address(scl);
+ dma_len = sg_dma_len(scl);
+ if (dma_len > 0xffff)
+ panic("mac53c94: scatterlist element >= 64k");
+ total += dma_len;
+ st_le16(&dcmds->req_count, dma_len);
+ st_le16(&dcmds->command, dma_cmd);
st_le32(&dcmds->phy_addr, dma_addr);
dcmds->xfer_status = 0;
++dcmds;
}
+
dma_cmd += OUTPUT_LAST - OUTPUT_MORE;
st_le16(&dcmds[-1].command, dma_cmd);
st_le16(&dcmds->command, DBDMA_STOP);
diff --git a/drivers/scsi/megaraid.c b/drivers/scsi/megaraid.c
index 3cce75d7026..3907f6718ed 100644
--- a/drivers/scsi/megaraid.c
+++ b/drivers/scsi/megaraid.c
@@ -523,10 +523,8 @@ mega_build_cmd(adapter_t *adapter, Scsi_Cmnd *cmd, int *busy)
/*
* filter the internal and ioctl commands
*/
- if((cmd->cmnd[0] == MEGA_INTERNAL_CMD)) {
- return cmd->request_buffer;
- }
-
+ if((cmd->cmnd[0] == MEGA_INTERNAL_CMD))
+ return (scb_t *)cmd->host_scribble;
/*
* We know what channels our logical drives are on - mega_find_card()
@@ -657,22 +655,14 @@ mega_build_cmd(adapter_t *adapter, Scsi_Cmnd *cmd, int *busy)
case MODE_SENSE: {
char *buf;
+ struct scatterlist *sg;
- if (cmd->use_sg) {
- struct scatterlist *sg;
+ sg = scsi_sglist(cmd);
+ buf = kmap_atomic(sg->page, KM_IRQ0) + sg->offset;
- sg = (struct scatterlist *)cmd->request_buffer;
- buf = kmap_atomic(sg->page, KM_IRQ0) +
- sg->offset;
- } else
- buf = cmd->request_buffer;
memset(buf, 0, cmd->cmnd[4]);
- if (cmd->use_sg) {
- struct scatterlist *sg;
+ kunmap_atomic(buf - sg->offset, KM_IRQ0);
- sg = (struct scatterlist *)cmd->request_buffer;
- kunmap_atomic(buf - sg->offset, KM_IRQ0);
- }
cmd->result = (DID_OK << 16);
cmd->scsi_done(cmd);
return NULL;
@@ -1551,23 +1541,15 @@ mega_cmd_done(adapter_t *adapter, u8 completed[], int nstatus, int status)
islogical = adapter->logdrv_chan[cmd->device->channel];
if( cmd->cmnd[0] == INQUIRY && !islogical ) {
- if( cmd->use_sg ) {
- sgl = (struct scatterlist *)
- cmd->request_buffer;
-
- if( sgl->page ) {
- c = *(unsigned char *)
+ sgl = scsi_sglist(cmd);
+ if( sgl->page ) {
+ c = *(unsigned char *)
page_address((&sgl[0])->page) +
(&sgl[0])->offset;
- }
- else {
- printk(KERN_WARNING
- "megaraid: invalid sg.\n");
- c = 0;
- }
- }
- else {
- c = *(u8 *)cmd->request_buffer;
+ } else {
+ printk(KERN_WARNING
+ "megaraid: invalid sg.\n");
+ c = 0;
}
if(IS_RAID_CH(adapter, cmd->device->channel) &&
@@ -1704,30 +1686,14 @@ mega_rundoneq (adapter_t *adapter)
static void
mega_free_scb(adapter_t *adapter, scb_t *scb)
{
- unsigned long length;
-
switch( scb->dma_type ) {
case MEGA_DMA_TYPE_NONE:
break;
- case MEGA_BULK_DATA:
- if (scb->cmd->use_sg == 0)
- length = scb->cmd->request_bufflen;
- else {
- struct scatterlist *sgl =
- (struct scatterlist *)scb->cmd->request_buffer;
- length = sgl->length;
- }
- pci_unmap_page(adapter->dev, scb->dma_h_bulkdata,
- length, scb->dma_direction);
- break;
-
case MEGA_SGLIST:
- pci_unmap_sg(adapter->dev, scb->cmd->request_buffer,
- scb->cmd->use_sg, scb->dma_direction);
+ scsi_dma_unmap(scb->cmd);
break;
-
default:
break;
}
@@ -1767,80 +1733,33 @@ __mega_busywait_mbox (adapter_t *adapter)
static int
mega_build_sglist(adapter_t *adapter, scb_t *scb, u32 *buf, u32 *len)
{
- struct scatterlist *sgl;
- struct page *page;
- unsigned long offset;
- unsigned int length;
+ struct scatterlist *sg;
Scsi_Cmnd *cmd;
int sgcnt;
int idx;
cmd = scb->cmd;
- /* Scatter-gather not used */
- if( cmd->use_sg == 0 || (cmd->use_sg == 1 &&
- !adapter->has_64bit_addr)) {
-
- if (cmd->use_sg == 0) {
- page = virt_to_page(cmd->request_buffer);
- offset = offset_in_page(cmd->request_buffer);
- length = cmd->request_bufflen;
- } else {
- sgl = (struct scatterlist *)cmd->request_buffer;
- page = sgl->page;
- offset = sgl->offset;
- length = sgl->length;
- }
-
- scb->dma_h_bulkdata = pci_map_page(adapter->dev,
- page, offset,
- length,
- scb->dma_direction);
- scb->dma_type = MEGA_BULK_DATA;
-
- /*
- * We need to handle special 64-bit commands that need a
- * minimum of 1 SG
- */
- if( adapter->has_64bit_addr ) {
- scb->sgl64[0].address = scb->dma_h_bulkdata;
- scb->sgl64[0].length = length;
- *buf = (u32)scb->sgl_dma_addr;
- *len = (u32)length;
- return 1;
- }
- else {
- *buf = (u32)scb->dma_h_bulkdata;
- *len = (u32)length;
- }
- return 0;
- }
-
- sgl = (struct scatterlist *)cmd->request_buffer;
-
/*
* Copy Scatter-Gather list info into controller structure.
*
* The number of sg elements returned must not exceed our limit
*/
- sgcnt = pci_map_sg(adapter->dev, sgl, cmd->use_sg,
- scb->dma_direction);
+ sgcnt = scsi_dma_map(cmd);
scb->dma_type = MEGA_SGLIST;
- BUG_ON(sgcnt > adapter->sglen);
+ BUG_ON(sgcnt > adapter->sglen || sgcnt < 0);
*len = 0;
- for( idx = 0; idx < sgcnt; idx++, sgl++ ) {
-
- if( adapter->has_64bit_addr ) {
- scb->sgl64[idx].address = sg_dma_address(sgl);
- *len += scb->sgl64[idx].length = sg_dma_len(sgl);
- }
- else {
- scb->sgl[idx].address = sg_dma_address(sgl);
- *len += scb->sgl[idx].length = sg_dma_len(sgl);
+ scsi_for_each_sg(cmd, sg, sgcnt, idx) {
+ if (adapter->has_64bit_addr) {
+ scb->sgl64[idx].address = sg_dma_address(sg);
+ *len += scb->sgl64[idx].length = sg_dma_len(sg);
+ } else {
+ scb->sgl[idx].address = sg_dma_address(sg);
+ *len += scb->sgl[idx].length = sg_dma_len(sg);
}
}
@@ -3571,7 +3490,7 @@ megadev_ioctl(struct inode *inode, struct file *filep, unsigned int cmd,
/*
* The user passthru structure
*/
- upthru = (mega_passthru __user *)MBOX(uioc)->xferaddr;
+ upthru = (mega_passthru __user *)(unsigned long)MBOX(uioc)->xferaddr;
/*
* Copy in the user passthru here.
@@ -3623,7 +3542,7 @@ megadev_ioctl(struct inode *inode, struct file *filep, unsigned int cmd,
/*
* Get the user data
*/
- if( copy_from_user(data, (char __user *)uxferaddr,
+ if( copy_from_user(data, (char __user *)(unsigned long) uxferaddr,
pthru->dataxferlen) ) {
rval = (-EFAULT);
goto freemem_and_return;
@@ -3649,7 +3568,7 @@ megadev_ioctl(struct inode *inode, struct file *filep, unsigned int cmd,
* Is data going up-stream
*/
if( pthru->dataxferlen && (uioc.flags & UIOC_RD) ) {
- if( copy_to_user((char __user *)uxferaddr, data,
+ if( copy_to_user((char __user *)(unsigned long) uxferaddr, data,
pthru->dataxferlen) ) {
rval = (-EFAULT);
}
@@ -3702,7 +3621,7 @@ freemem_and_return:
/*
* Get the user data
*/
- if( copy_from_user(data, (char __user *)uxferaddr,
+ if( copy_from_user(data, (char __user *)(unsigned long) uxferaddr,
uioc.xferlen) ) {
pci_free_consistent(pdev,
@@ -3742,7 +3661,7 @@ freemem_and_return:
* Is data going up-stream
*/
if( uioc.xferlen && (uioc.flags & UIOC_RD) ) {
- if( copy_to_user((char __user *)uxferaddr, data,
+ if( copy_to_user((char __user *)(unsigned long) uxferaddr, data,
uioc.xferlen) ) {
rval = (-EFAULT);
@@ -4494,7 +4413,7 @@ mega_internal_command(adapter_t *adapter, megacmd_t *mc, mega_passthru *pthru)
scmd->device = sdev;
scmd->device->host = adapter->host;
- scmd->request_buffer = (void *)scb;
+ scmd->host_scribble = (void *)scb;
scmd->cmnd[0] = MEGA_INTERNAL_CMD;
scb->state |= SCB_ACTIVE;
diff --git a/drivers/scsi/megaraid/mega_common.h b/drivers/scsi/megaraid/mega_common.h
index 26e1e6c5565..fef9ac95875 100644
--- a/drivers/scsi/megaraid/mega_common.h
+++ b/drivers/scsi/megaraid/mega_common.h
@@ -21,6 +21,7 @@
#include <linux/types.h>
#include <linux/pci.h>
#include <linux/spinlock.h>
+#include <linux/mutex.h>
#include <linux/interrupt.h>
#include <linux/delay.h>
#include <linux/blkdev.h>
diff --git a/drivers/scsi/megaraid/megaraid_mbox.c b/drivers/scsi/megaraid/megaraid_mbox.c
index 04d0b6918c6..c46685a03a9 100644
--- a/drivers/scsi/megaraid/megaraid_mbox.c
+++ b/drivers/scsi/megaraid/megaraid_mbox.c
@@ -1378,8 +1378,6 @@ megaraid_mbox_mksgl(adapter_t *adapter, scb_t *scb)
{
struct scatterlist *sgl;
mbox_ccb_t *ccb;
- struct page *page;
- unsigned long offset;
struct scsi_cmnd *scp;
int sgcnt;
int i;
@@ -1388,48 +1386,16 @@ megaraid_mbox_mksgl(adapter_t *adapter, scb_t *scb)
scp = scb->scp;
ccb = (mbox_ccb_t *)scb->ccb;
+ sgcnt = scsi_dma_map(scp);
+ BUG_ON(sgcnt < 0 || sgcnt > adapter->sglen);
+
// no mapping required if no data to be transferred
- if (!scp->request_buffer || !scp->request_bufflen)
+ if (!sgcnt)
return 0;
- if (!scp->use_sg) { /* scatter-gather list not used */
-
- page = virt_to_page(scp->request_buffer);
-
- offset = ((unsigned long)scp->request_buffer & ~PAGE_MASK);
-
- ccb->buf_dma_h = pci_map_page(adapter->pdev, page, offset,
- scp->request_bufflen,
- scb->dma_direction);
- scb->dma_type = MRAID_DMA_WBUF;
-
- /*
- * We need to handle special 64-bit commands that need a
- * minimum of 1 SG
- */
- sgcnt = 1;
- ccb->sgl64[0].address = ccb->buf_dma_h;
- ccb->sgl64[0].length = scp->request_bufflen;
-
- return sgcnt;
- }
-
- sgl = (struct scatterlist *)scp->request_buffer;
-
- // The number of sg elements returned must not exceed our limit
- sgcnt = pci_map_sg(adapter->pdev, sgl, scp->use_sg,
- scb->dma_direction);
-
- if (sgcnt > adapter->sglen) {
- con_log(CL_ANN, (KERN_CRIT
- "megaraid critical: too many sg elements:%d\n",
- sgcnt));
- BUG();
- }
-
scb->dma_type = MRAID_DMA_WSG;
- for (i = 0; i < sgcnt; i++, sgl++) {
+ scsi_for_each_sg(scp, sgl, sgcnt, i) {
ccb->sgl64[i].address = sg_dma_address(sgl);
ccb->sgl64[i].length = sg_dma_len(sgl);
}
@@ -1489,19 +1455,11 @@ mbox_post_cmd(adapter_t *adapter, scb_t *scb)
adapter->outstanding_cmds++;
- if (scb->dma_direction == PCI_DMA_TODEVICE) {
- if (!scb->scp->use_sg) { // sg list not used
- pci_dma_sync_single_for_device(adapter->pdev,
- ccb->buf_dma_h,
- scb->scp->request_bufflen,
- PCI_DMA_TODEVICE);
- }
- else {
- pci_dma_sync_sg_for_device(adapter->pdev,
- scb->scp->request_buffer,
- scb->scp->use_sg, PCI_DMA_TODEVICE);
- }
- }
+ if (scb->dma_direction == PCI_DMA_TODEVICE)
+ pci_dma_sync_sg_for_device(adapter->pdev,
+ scsi_sglist(scb->scp),
+ scsi_sg_count(scb->scp),
+ PCI_DMA_TODEVICE);
mbox->busy = 1; // Set busy
mbox->poll = 0;
@@ -1624,29 +1582,26 @@ megaraid_mbox_build_cmd(adapter_t *adapter, struct scsi_cmnd *scp, int *busy)
return scb;
case MODE_SENSE:
- if (scp->use_sg) {
- struct scatterlist *sgl;
- caddr_t vaddr;
+ {
+ struct scatterlist *sgl;
+ caddr_t vaddr;
- sgl = (struct scatterlist *)scp->request_buffer;
- if (sgl->page) {
- vaddr = (caddr_t)
- (page_address((&sgl[0])->page)
- + (&sgl[0])->offset);
+ sgl = scsi_sglist(scp);
+ if (sgl->page) {
+ vaddr = (caddr_t)
+ (page_address((&sgl[0])->page)
+ + (&sgl[0])->offset);
- memset(vaddr, 0, scp->cmnd[4]);
- }
- else {
- con_log(CL_ANN, (KERN_WARNING
- "megaraid mailbox: invalid sg:%d\n",
- __LINE__));
- }
+ memset(vaddr, 0, scp->cmnd[4]);
}
else {
- memset(scp->request_buffer, 0, scp->cmnd[4]);
+ con_log(CL_ANN, (KERN_WARNING
+ "megaraid mailbox: invalid sg:%d\n",
+ __LINE__));
}
- scp->result = (DID_OK << 16);
- return NULL;
+ }
+ scp->result = (DID_OK << 16);
+ return NULL;
case INQUIRY:
/*
@@ -1716,7 +1671,7 @@ megaraid_mbox_build_cmd(adapter_t *adapter, struct scsi_cmnd *scp, int *busy)
mbox->cmd = MBOXCMD_PASSTHRU64;
scb->dma_direction = scp->sc_data_direction;
- pthru->dataxferlen = scp->request_bufflen;
+ pthru->dataxferlen = scsi_bufflen(scp);
pthru->dataxferaddr = ccb->sgl_dma_h;
pthru->numsge = megaraid_mbox_mksgl(adapter,
scb);
@@ -2050,8 +2005,8 @@ megaraid_mbox_prepare_pthru(adapter_t *adapter, scb_t *scb,
memcpy(pthru->cdb, scp->cmnd, scp->cmd_len);
- if (scp->request_bufflen) {
- pthru->dataxferlen = scp->request_bufflen;
+ if (scsi_bufflen(scp)) {
+ pthru->dataxferlen = scsi_bufflen(scp);
pthru->dataxferaddr = ccb->sgl_dma_h;
pthru->numsge = megaraid_mbox_mksgl(adapter, scb);
}
@@ -2099,8 +2054,8 @@ megaraid_mbox_prepare_epthru(adapter_t *adapter, scb_t *scb,
memcpy(epthru->cdb, scp->cmnd, scp->cmd_len);
- if (scp->request_bufflen) {
- epthru->dataxferlen = scp->request_bufflen;
+ if (scsi_bufflen(scp)) {
+ epthru->dataxferlen = scsi_bufflen(scp);
epthru->dataxferaddr = ccb->sgl_dma_h;
epthru->numsge = megaraid_mbox_mksgl(adapter, scb);
}
@@ -2266,37 +2221,13 @@ megaraid_mbox_sync_scb(adapter_t *adapter, scb_t *scb)
ccb = (mbox_ccb_t *)scb->ccb;
- switch (scb->dma_type) {
-
- case MRAID_DMA_WBUF:
- if (scb->dma_direction == PCI_DMA_FROMDEVICE) {
- pci_dma_sync_single_for_cpu(adapter->pdev,
- ccb->buf_dma_h,
- scb->scp->request_bufflen,
+ if (scb->dma_direction == PCI_DMA_FROMDEVICE)
+ pci_dma_sync_sg_for_cpu(adapter->pdev,
+ scsi_sglist(scb->scp),
+ scsi_sg_count(scb->scp),
PCI_DMA_FROMDEVICE);
- }
-
- pci_unmap_page(adapter->pdev, ccb->buf_dma_h,
- scb->scp->request_bufflen, scb->dma_direction);
-
- break;
-
- case MRAID_DMA_WSG:
- if (scb->dma_direction == PCI_DMA_FROMDEVICE) {
- pci_dma_sync_sg_for_cpu(adapter->pdev,
- scb->scp->request_buffer,
- scb->scp->use_sg, PCI_DMA_FROMDEVICE);
- }
-
- pci_unmap_sg(adapter->pdev, scb->scp->request_buffer,
- scb->scp->use_sg, scb->dma_direction);
-
- break;
-
- default:
- break;
- }
+ scsi_dma_unmap(scb->scp);
return;
}
@@ -2399,24 +2330,16 @@ megaraid_mbox_dpc(unsigned long devp)
if (scp->cmnd[0] == INQUIRY && status == 0 && islogical == 0
&& IS_RAID_CH(raid_dev, scb->dev_channel)) {
- if (scp->use_sg) {
- sgl = (struct scatterlist *)
- scp->request_buffer;
-
- if (sgl->page) {
- c = *(unsigned char *)
+ sgl = scsi_sglist(scp);
+ if (sgl->page) {
+ c = *(unsigned char *)
(page_address((&sgl[0])->page) +
- (&sgl[0])->offset);
- }
- else {
- con_log(CL_ANN, (KERN_WARNING
- "megaraid mailbox: invalid sg:%d\n",
- __LINE__));
- c = 0;
- }
- }
- else {
- c = *(uint8_t *)scp->request_buffer;
+ (&sgl[0])->offset);
+ } else {
+ con_log(CL_ANN, (KERN_WARNING
+ "megaraid mailbox: invalid sg:%d\n",
+ __LINE__));
+ c = 0;
}
if ((c & 0x1F ) == TYPE_DISK) {
@@ -3957,7 +3880,7 @@ megaraid_sysfs_alloc_resources(adapter_t *adapter)
megaraid_sysfs_free_resources(adapter);
}
- sema_init(&raid_dev->sysfs_sem, 1);
+ mutex_init(&raid_dev->sysfs_mtx);
init_waitqueue_head(&raid_dev->sysfs_wait_q);
@@ -4058,7 +3981,7 @@ megaraid_sysfs_get_ldmap(adapter_t *adapter)
/*
* Allow only one read at a time to go through the sysfs attributes
*/
- down(&raid_dev->sysfs_sem);
+ mutex_lock(&raid_dev->sysfs_mtx);
uioc = raid_dev->sysfs_uioc;
mbox64 = raid_dev->sysfs_mbox64;
@@ -4134,7 +4057,7 @@ megaraid_sysfs_get_ldmap(adapter_t *adapter)
del_timer_sync(timerp);
- up(&raid_dev->sysfs_sem);
+ mutex_unlock(&raid_dev->sysfs_mtx);
return rval;
}
diff --git a/drivers/scsi/megaraid/megaraid_mbox.h b/drivers/scsi/megaraid/megaraid_mbox.h
index 9de803cebd4..626459d1e90 100644
--- a/drivers/scsi/megaraid/megaraid_mbox.h
+++ b/drivers/scsi/megaraid/megaraid_mbox.h
@@ -168,7 +168,7 @@ typedef struct {
* @hw_error : set if FW not responding
* @fast_load : If set, skip physical device scanning
* @channel_class : channel class, RAID or SCSI
- * @sysfs_sem : semaphore to serialize access to sysfs res.
+ * @sysfs_mtx : mutex to serialize access to sysfs res.
* @sysfs_uioc : management packet to issue FW calls from sysfs
* @sysfs_mbox64 : mailbox packet to issue FW calls from sysfs
* @sysfs_buffer : data buffer for FW commands issued from sysfs
@@ -208,7 +208,7 @@ typedef struct {
int hw_error;
int fast_load;
uint8_t channel_class;
- struct semaphore sysfs_sem;
+ struct mutex sysfs_mtx;
uioc_t *sysfs_uioc;
mbox64_t *sysfs_mbox64;
caddr_t sysfs_buffer;
diff --git a/drivers/scsi/megaraid/megaraid_sas.c b/drivers/scsi/megaraid/megaraid_sas.c
index e2cf12ef368..b7f2e613c90 100644
--- a/drivers/scsi/megaraid/megaraid_sas.c
+++ b/drivers/scsi/megaraid/megaraid_sas.c
@@ -433,34 +433,15 @@ megasas_make_sgl32(struct megasas_instance *instance, struct scsi_cmnd *scp,
int sge_count;
struct scatterlist *os_sgl;
- /*
- * Return 0 if there is no data transfer
- */
- if (!scp->request_buffer || !scp->request_bufflen)
- return 0;
+ sge_count = scsi_dma_map(scp);
+ BUG_ON(sge_count < 0);
- if (!scp->use_sg) {
- mfi_sgl->sge32[0].phys_addr = pci_map_single(instance->pdev,
- scp->
- request_buffer,
- scp->
- request_bufflen,
- scp->
- sc_data_direction);
- mfi_sgl->sge32[0].length = scp->request_bufflen;
-
- return 1;
- }
-
- os_sgl = (struct scatterlist *)scp->request_buffer;
- sge_count = pci_map_sg(instance->pdev, os_sgl, scp->use_sg,
- scp->sc_data_direction);
-
- for (i = 0; i < sge_count; i++, os_sgl++) {
- mfi_sgl->sge32[i].length = sg_dma_len(os_sgl);
- mfi_sgl->sge32[i].phys_addr = sg_dma_address(os_sgl);
+ if (sge_count) {
+ scsi_for_each_sg(scp, os_sgl, sge_count, i) {
+ mfi_sgl->sge32[i].length = sg_dma_len(os_sgl);
+ mfi_sgl->sge32[i].phys_addr = sg_dma_address(os_sgl);
+ }
}
-
return sge_count;
}
@@ -481,35 +462,15 @@ megasas_make_sgl64(struct megasas_instance *instance, struct scsi_cmnd *scp,
int sge_count;
struct scatterlist *os_sgl;
- /*
- * Return 0 if there is no data transfer
- */
- if (!scp->request_buffer || !scp->request_bufflen)
- return 0;
-
- if (!scp->use_sg) {
- mfi_sgl->sge64[0].phys_addr = pci_map_single(instance->pdev,
- scp->
- request_buffer,
- scp->
- request_bufflen,
- scp->
- sc_data_direction);
-
- mfi_sgl->sge64[0].length = scp->request_bufflen;
-
- return 1;
- }
-
- os_sgl = (struct scatterlist *)scp->request_buffer;
- sge_count = pci_map_sg(instance->pdev, os_sgl, scp->use_sg,
- scp->sc_data_direction);
+ sge_count = scsi_dma_map(scp);
+ BUG_ON(sge_count < 0);
- for (i = 0; i < sge_count; i++, os_sgl++) {
- mfi_sgl->sge64[i].length = sg_dma_len(os_sgl);
- mfi_sgl->sge64[i].phys_addr = sg_dma_address(os_sgl);
+ if (sge_count) {
+ scsi_for_each_sg(scp, os_sgl, sge_count, i) {
+ mfi_sgl->sge64[i].length = sg_dma_len(os_sgl);
+ mfi_sgl->sge64[i].phys_addr = sg_dma_address(os_sgl);
+ }
}
-
return sge_count;
}
@@ -593,7 +554,7 @@ megasas_build_dcdb(struct megasas_instance *instance, struct scsi_cmnd *scp,
pthru->cdb_len = scp->cmd_len;
pthru->timeout = 0;
pthru->flags = flags;
- pthru->data_xfer_len = scp->request_bufflen;
+ pthru->data_xfer_len = scsi_bufflen(scp);
memcpy(pthru->cdb, scp->cmnd, scp->cmd_len);
@@ -1195,45 +1156,6 @@ megasas_complete_abort(struct megasas_instance *instance,
}
/**
- * megasas_unmap_sgbuf - Unmap SG buffers
- * @instance: Adapter soft state
- * @cmd: Completed command
- */
-static void
-megasas_unmap_sgbuf(struct megasas_instance *instance, struct megasas_cmd *cmd)
-{
- dma_addr_t buf_h;
- u8 opcode;
-
- if (cmd->scmd->use_sg) {
- pci_unmap_sg(instance->pdev, cmd->scmd->request_buffer,
- cmd->scmd->use_sg, cmd->scmd->sc_data_direction);
- return;
- }
-
- if (!cmd->scmd->request_bufflen)
- return;
-
- opcode = cmd->frame->hdr.cmd;
-
- if ((opcode == MFI_CMD_LD_READ) || (opcode == MFI_CMD_LD_WRITE)) {
- if (IS_DMA64)
- buf_h = cmd->frame->io.sgl.sge64[0].phys_addr;
- else
- buf_h = cmd->frame->io.sgl.sge32[0].phys_addr;
- } else {
- if (IS_DMA64)
- buf_h = cmd->frame->pthru.sgl.sge64[0].phys_addr;
- else
- buf_h = cmd->frame->pthru.sgl.sge32[0].phys_addr;
- }
-
- pci_unmap_single(instance->pdev, buf_h, cmd->scmd->request_bufflen,
- cmd->scmd->sc_data_direction);
- return;
-}
-
-/**
* megasas_complete_cmd - Completes a command
* @instance: Adapter soft state
* @cmd: Command to be completed
@@ -1281,7 +1203,7 @@ megasas_complete_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd,
atomic_dec(&instance->fw_outstanding);
- megasas_unmap_sgbuf(instance, cmd);
+ scsi_dma_unmap(cmd->scmd);
cmd->scmd->scsi_done(cmd->scmd);
megasas_return_cmd(instance, cmd);
@@ -1329,7 +1251,7 @@ megasas_complete_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd,
atomic_dec(&instance->fw_outstanding);
- megasas_unmap_sgbuf(instance, cmd);
+ scsi_dma_unmap(cmd->scmd);
cmd->scmd->scsi_done(cmd->scmd);
megasas_return_cmd(instance, cmd);
diff --git a/drivers/scsi/mesh.c b/drivers/scsi/mesh.c
index e64d1a19d8d..651d09b08f2 100644
--- a/drivers/scsi/mesh.c
+++ b/drivers/scsi/mesh.c
@@ -421,7 +421,7 @@ static void mesh_start_cmd(struct mesh_state *ms, struct scsi_cmnd *cmd)
for (i = 0; i < cmd->cmd_len; ++i)
printk(" %x", cmd->cmnd[i]);
printk(" use_sg=%d buffer=%p bufflen=%u\n",
- cmd->use_sg, cmd->request_buffer, cmd->request_bufflen);
+ scsi_sg_count(cmd), scsi_sglist(cmd), scsi_bufflen(cmd));
}
#endif
if (ms->dma_started)
@@ -602,13 +602,16 @@ static void mesh_done(struct mesh_state *ms, int start_next)
cmd->result += (cmd->SCp.Message << 8);
if (DEBUG_TARGET(cmd)) {
printk(KERN_DEBUG "mesh_done: result = %x, data_ptr=%d, buflen=%d\n",
- cmd->result, ms->data_ptr, cmd->request_bufflen);
+ cmd->result, ms->data_ptr, scsi_bufflen(cmd));
+#if 0
+ /* needs to use sg? */
if ((cmd->cmnd[0] == 0 || cmd->cmnd[0] == 0x12 || cmd->cmnd[0] == 3)
&& cmd->request_buffer != 0) {
unsigned char *b = cmd->request_buffer;
printk(KERN_DEBUG "buffer = %x %x %x %x %x %x %x %x\n",
b[0], b[1], b[2], b[3], b[4], b[5], b[6], b[7]);
}
+#endif
}
cmd->SCp.this_residual -= ms->data_ptr;
mesh_completed(ms, cmd);
@@ -1265,15 +1268,18 @@ static void set_dma_cmds(struct mesh_state *ms, struct scsi_cmnd *cmd)
dcmds = ms->dma_cmds;
dtot = 0;
if (cmd) {
- cmd->SCp.this_residual = cmd->request_bufflen;
- if (cmd->use_sg > 0) {
- int nseg;
+ int nseg;
+
+ cmd->SCp.this_residual = scsi_bufflen(cmd);
+
+ nseg = scsi_dma_map(cmd);
+ BUG_ON(nseg < 0);
+
+ if (nseg) {
total = 0;
- scl = (struct scatterlist *) cmd->request_buffer;
off = ms->data_ptr;
- nseg = pci_map_sg(ms->pdev, scl, cmd->use_sg,
- cmd->sc_data_direction);
- for (i = 0; i <nseg; ++i, ++scl) {
+
+ scsi_for_each_sg(cmd, scl, nseg, i) {
u32 dma_addr = sg_dma_address(scl);
u32 dma_len = sg_dma_len(scl);
@@ -1292,16 +1298,6 @@ static void set_dma_cmds(struct mesh_state *ms, struct scsi_cmnd *cmd)
dtot += dma_len - off;
off = 0;
}
- } else if (ms->data_ptr < cmd->request_bufflen) {
- dtot = cmd->request_bufflen - ms->data_ptr;
- if (dtot > 0xffff)
- panic("mesh: transfer size >= 64k");
- st_le16(&dcmds->req_count, dtot);
- /* XXX Use pci DMA API here ... */
- st_le32(&dcmds->phy_addr,
- virt_to_phys(cmd->request_buffer) + ms->data_ptr);
- dcmds->xfer_status = 0;
- ++dcmds;
}
}
if (dtot == 0) {
@@ -1356,18 +1352,14 @@ static void halt_dma(struct mesh_state *ms)
dumplog(ms, ms->conn_tgt);
dumpslog(ms);
#endif /* MESH_DBG */
- } else if (cmd && cmd->request_bufflen != 0 &&
- ms->data_ptr > cmd->request_bufflen) {
+ } else if (cmd && scsi_bufflen(cmd) &&
+ ms->data_ptr > scsi_bufflen(cmd)) {
printk(KERN_DEBUG "mesh: target %d overrun, "
"data_ptr=%x total=%x goes_out=%d\n",
- ms->conn_tgt, ms->data_ptr, cmd->request_bufflen,
+ ms->conn_tgt, ms->data_ptr, scsi_bufflen(cmd),
ms->tgts[ms->conn_tgt].data_goes_out);
}
- if (cmd->use_sg != 0) {
- struct scatterlist *sg;
- sg = (struct scatterlist *)cmd->request_buffer;
- pci_unmap_sg(ms->pdev, sg, cmd->use_sg, cmd->sc_data_direction);
- }
+ scsi_dma_unmap(cmd);
ms->dma_started = 0;
}
diff --git a/drivers/scsi/mvme16x.c b/drivers/scsi/mvme16x.c
deleted file mode 100644
index 575fe6f7e0e..00000000000
--- a/drivers/scsi/mvme16x.c
+++ /dev/null
@@ -1,78 +0,0 @@
-/*
- * Detection routine for the NCR53c710 based MVME16x SCSI Controllers for Linux.
- *
- * Based on work by Alan Hourihane
- */
-#include <linux/types.h>
-#include <linux/mm.h>
-#include <linux/blkdev.h>
-
-#include <asm/page.h>
-#include <asm/pgtable.h>
-#include <asm/mvme16xhw.h>
-#include <asm/irq.h>
-
-#include "scsi.h"
-#include <scsi/scsi_host.h>
-#include "53c7xx.h"
-#include "mvme16x.h"
-
-#include<linux/stat.h>
-
-
-int mvme16x_scsi_detect(struct scsi_host_template *tpnt)
-{
- static unsigned char called = 0;
- int clock;
- long long options;
-
- if (!MACH_IS_MVME16x)
- return 0;
- if (mvme16x_config & MVME16x_CONFIG_NO_SCSICHIP) {
- printk ("SCSI detection disabled, SCSI chip not present\n");
- return 0;
- }
- if (called)
- return 0;
-
- tpnt->proc_name = "MVME16x";
-
- options = OPTION_MEMORY_MAPPED|OPTION_DEBUG_TEST1|OPTION_INTFLY|OPTION_SYNCHRONOUS|OPTION_ALWAYS_SYNCHRONOUS|OPTION_DISCONNECT;
-
- clock = 66000000; /* 66MHz SCSI Clock */
-
- ncr53c7xx_init(tpnt, 0, 710, (unsigned long)0xfff47000,
- 0, MVME16x_IRQ_SCSI, DMA_NONE,
- options, clock);
- called = 1;
- return 1;
-}
-
-static int mvme16x_scsi_release(struct Scsi_Host *shost)
-{
- if (shost->irq)
- free_irq(shost->irq, NULL);
- if (shost->dma_channel != 0xff)
- free_dma(shost->dma_channel);
- if (shost->io_port && shost->n_io_port)
- release_region(shost->io_port, shost->n_io_port);
- scsi_unregister(shost);
- return 0;
-}
-
-static struct scsi_host_template driver_template = {
- .name = "MVME16x NCR53c710 SCSI",
- .detect = mvme16x_scsi_detect,
- .release = mvme16x_scsi_release,
- .queuecommand = NCR53c7xx_queue_command,
- .abort = NCR53c7xx_abort,
- .reset = NCR53c7xx_reset,
- .can_queue = 24,
- .this_id = 7,
- .sg_tablesize = 63,
- .cmd_per_lun = 3,
- .use_clustering = DISABLE_CLUSTERING
-};
-
-
-#include "scsi_module.c"
diff --git a/drivers/scsi/mvme16x.h b/drivers/scsi/mvme16x.h
deleted file mode 100644
index 73e33b37a3f..00000000000
--- a/drivers/scsi/mvme16x.h
+++ /dev/null
@@ -1,24 +0,0 @@
-#ifndef MVME16x_SCSI_H
-#define MVME16x_SCSI_H
-
-#include <linux/types.h>
-
-int mvme16x_scsi_detect(struct scsi_host_template *);
-const char *NCR53c7x0_info(void);
-int NCR53c7xx_queue_command(Scsi_Cmnd *, void (*done)(Scsi_Cmnd *));
-int NCR53c7xx_abort(Scsi_Cmnd *);
-int NCR53c7x0_release (struct Scsi_Host *);
-int NCR53c7xx_reset(Scsi_Cmnd *, unsigned int);
-void NCR53c7x0_intr(int irq, void *dev_id);
-
-#ifndef CMD_PER_LUN
-#define CMD_PER_LUN 3
-#endif
-
-#ifndef CAN_QUEUE
-#define CAN_QUEUE 24
-#endif
-
-#include <scsi/scsicam.h>
-
-#endif /* MVME16x_SCSI_H */
diff --git a/drivers/scsi/mvme16x_scsi.c b/drivers/scsi/mvme16x_scsi.c
new file mode 100644
index 00000000000..d6ef22a941c
--- /dev/null
+++ b/drivers/scsi/mvme16x_scsi.c
@@ -0,0 +1,158 @@
+/*
+ * Detection routine for the NCR53c710 based MVME16x SCSI Controllers for Linux.
+ *
+ * Based on work by Alan Hourihane
+ *
+ * Rewritten to use 53c700.c by Kars de Jong <jongk@linux-m68k.org>
+ */
+
+#include <linux/module.h>
+#include <linux/blkdev.h>
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <asm/mvme16xhw.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_transport.h>
+#include <scsi/scsi_transport_spi.h>
+
+#include "53c700.h"
+
+MODULE_AUTHOR("Kars de Jong <jongk@linux-m68k.org>");
+MODULE_DESCRIPTION("MVME16x NCR53C710 driver");
+MODULE_LICENSE("GPL");
+
+static struct scsi_host_template mvme16x_scsi_driver_template = {
+ .name = "MVME16x NCR53c710 SCSI",
+ .proc_name = "MVME16x",
+ .this_id = 7,
+ .module = THIS_MODULE,
+};
+
+static struct platform_device *mvme16x_scsi_device;
+
+static __devinit int
+mvme16x_probe(struct device *dev)
+{
+ struct Scsi_Host * host = NULL;
+ struct NCR_700_Host_Parameters *hostdata;
+
+ if (!MACH_IS_MVME16x)
+ goto out;
+
+ if (mvme16x_config & MVME16x_CONFIG_NO_SCSICHIP) {
+ printk(KERN_INFO "mvme16x-scsi: detection disabled, "
+ "SCSI chip not present\n");
+ goto out;
+ }
+
+ hostdata = kmalloc(sizeof(struct NCR_700_Host_Parameters), GFP_KERNEL);
+ if (hostdata == NULL) {
+ printk(KERN_ERR "mvme16x-scsi: "
+ "Failed to allocate host data\n");
+ goto out;
+ }
+ memset(hostdata, 0, sizeof(struct NCR_700_Host_Parameters));
+
+ /* Fill in the required pieces of hostdata */
+ hostdata->base = (void __iomem *)0xfff47000UL;
+ hostdata->clock = 50; /* XXX - depends on the CPU clock! */
+ hostdata->chip710 = 1;
+ hostdata->dmode_extra = DMODE_FC2;
+ hostdata->dcntl_extra = EA_710;
+ hostdata->ctest7_extra = CTEST7_TT1;
+
+ /* and register the chip */
+ host = NCR_700_detect(&mvme16x_scsi_driver_template, hostdata, dev);
+ if (!host) {
+ printk(KERN_ERR "mvme16x-scsi: No host detected; "
+ "board configuration problem?\n");
+ goto out_free;
+ }
+ host->this_id = 7;
+ host->base = 0xfff47000UL;
+ host->irq = MVME16x_IRQ_SCSI;
+ if (request_irq(host->irq, NCR_700_intr, 0, "mvme16x-scsi", host)) {
+ printk(KERN_ERR "mvme16x-scsi: request_irq failed\n");
+ goto out_put_host;
+ }
+
+ /* Enable scsi chip ints */
+ {
+ volatile unsigned long v;
+
+ /* Enable scsi interrupts at level 4 in PCCchip2 */
+ v = in_be32(0xfff4202c);
+ v = (v & ~0xff) | 0x10 | 4;
+ out_be32(0xfff4202c, v);
+ }
+
+ scsi_scan_host(host);
+
+ return 0;
+
+ out_put_host:
+ scsi_host_put(host);
+ out_free:
+ kfree(hostdata);
+ out:
+ return -ENODEV;
+}
+
+static __devexit int
+mvme16x_device_remove(struct device *dev)
+{
+ struct Scsi_Host *host = dev_to_shost(dev);
+ struct NCR_700_Host_Parameters *hostdata = shost_priv(host);
+
+ /* Disable scsi chip ints */
+ {
+ volatile unsigned long v;
+
+ v = in_be32(0xfff4202c);
+ v &= ~0x10;
+ out_be32(0xfff4202c, v);
+ }
+ scsi_remove_host(host);
+ NCR_700_release(host);
+ kfree(hostdata);
+ free_irq(host->irq, host);
+
+ return 0;
+}
+
+static struct device_driver mvme16x_scsi_driver = {
+ .name = "mvme16x-scsi",
+ .bus = &platform_bus_type,
+ .probe = mvme16x_probe,
+ .remove = __devexit_p(mvme16x_device_remove),
+};
+
+static int __init mvme16x_scsi_init(void)
+{
+ int err;
+
+ err = driver_register(&mvme16x_scsi_driver);
+ if (err)
+ return err;
+
+ mvme16x_scsi_device = platform_device_register_simple("mvme16x-scsi",
+ -1, NULL, 0);
+ if (IS_ERR(mvme16x_scsi_device)) {
+ driver_unregister(&mvme16x_scsi_driver);
+ return PTR_ERR(mvme16x_scsi_device);
+ }
+
+ return 0;
+}
+
+static void __exit mvme16x_scsi_exit(void)
+{
+ platform_device_unregister(mvme16x_scsi_device);
+ driver_unregister(&mvme16x_scsi_driver);
+}
+
+module_init(mvme16x_scsi_init);
+module_exit(mvme16x_scsi_exit);
diff --git a/drivers/scsi/ncr53c8xx.c b/drivers/scsi/ncr53c8xx.c
index bbf521cbc55..030ba49f33f 100644
--- a/drivers/scsi/ncr53c8xx.c
+++ b/drivers/scsi/ncr53c8xx.c
@@ -529,43 +529,20 @@ static void __unmap_scsi_data(struct device *dev, struct scsi_cmnd *cmd)
{
switch(cmd->__data_mapped) {
case 2:
- dma_unmap_sg(dev, cmd->request_buffer, cmd->use_sg,
- cmd->sc_data_direction);
- break;
- case 1:
- dma_unmap_single(dev, cmd->__data_mapping,
- cmd->request_bufflen,
- cmd->sc_data_direction);
+ scsi_dma_unmap(cmd);
break;
}
cmd->__data_mapped = 0;
}
-static u_long __map_scsi_single_data(struct device *dev, struct scsi_cmnd *cmd)
-{
- dma_addr_t mapping;
-
- if (cmd->request_bufflen == 0)
- return 0;
-
- mapping = dma_map_single(dev, cmd->request_buffer,
- cmd->request_bufflen,
- cmd->sc_data_direction);
- cmd->__data_mapped = 1;
- cmd->__data_mapping = mapping;
-
- return mapping;
-}
-
static int __map_scsi_sg_data(struct device *dev, struct scsi_cmnd *cmd)
{
int use_sg;
- if (cmd->use_sg == 0)
+ use_sg = scsi_dma_map(cmd);
+ if (!use_sg)
return 0;
- use_sg = dma_map_sg(dev, cmd->request_buffer, cmd->use_sg,
- cmd->sc_data_direction);
cmd->__data_mapped = 2;
cmd->__data_mapping = use_sg;
@@ -573,7 +550,6 @@ static int __map_scsi_sg_data(struct device *dev, struct scsi_cmnd *cmd)
}
#define unmap_scsi_data(np, cmd) __unmap_scsi_data(np->dev, cmd)
-#define map_scsi_single_data(np, cmd) __map_scsi_single_data(np->dev, cmd)
#define map_scsi_sg_data(np, cmd) __map_scsi_sg_data(np->dev, cmd)
/*==========================================================
@@ -7667,39 +7643,16 @@ fail:
** sizes to the data segment array.
*/
-static int ncr_scatter_no_sglist(struct ncb *np, struct ccb *cp, struct scsi_cmnd *cmd)
-{
- struct scr_tblmove *data = &cp->phys.data[MAX_SCATTER - 1];
- int segment;
-
- cp->data_len = cmd->request_bufflen;
-
- if (cmd->request_bufflen) {
- dma_addr_t baddr = map_scsi_single_data(np, cmd);
- if (baddr) {
- ncr_build_sge(np, data, baddr, cmd->request_bufflen);
- segment = 1;
- } else {
- segment = -2;
- }
- } else {
- segment = 0;
- }
-
- return segment;
-}
-
static int ncr_scatter(struct ncb *np, struct ccb *cp, struct scsi_cmnd *cmd)
{
int segment = 0;
- int use_sg = (int) cmd->use_sg;
+ int use_sg = scsi_sg_count(cmd);
cp->data_len = 0;
- if (!use_sg)
- segment = ncr_scatter_no_sglist(np, cp, cmd);
- else if ((use_sg = map_scsi_sg_data(np, cmd)) > 0) {
- struct scatterlist *scatter = (struct scatterlist *)cmd->request_buffer;
+ use_sg = map_scsi_sg_data(np, cmd);
+ if (use_sg > 0) {
+ struct scatterlist *sg;
struct scr_tblmove *data;
if (use_sg > MAX_SCATTER) {
@@ -7709,16 +7662,15 @@ static int ncr_scatter(struct ncb *np, struct ccb *cp, struct scsi_cmnd *cmd)
data = &cp->phys.data[MAX_SCATTER - use_sg];
- for (segment = 0; segment < use_sg; segment++) {
- dma_addr_t baddr = sg_dma_address(&scatter[segment]);
- unsigned int len = sg_dma_len(&scatter[segment]);
+ scsi_for_each_sg(cmd, sg, use_sg, segment) {
+ dma_addr_t baddr = sg_dma_address(sg);
+ unsigned int len = sg_dma_len(sg);
ncr_build_sge(np, &data[segment], baddr, len);
cp->data_len += len;
}
- } else {
+ } else
segment = -2;
- }
return segment;
}
diff --git a/drivers/scsi/nsp32.c b/drivers/scsi/nsp32.c
index 3e9765f0281..7fed3537215 100644
--- a/drivers/scsi/nsp32.c
+++ b/drivers/scsi/nsp32.c
@@ -49,10 +49,6 @@
#include <scsi/scsi_host.h>
#include <scsi/scsi_ioctl.h>
-#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0))
-# include <linux/blk.h>
-#endif
-
#include "nsp32.h"
@@ -199,17 +195,9 @@ static int __init init_nsp32 (void);
static void __exit exit_nsp32 (void);
/* struct struct scsi_host_template */
-#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,5,73))
static int nsp32_proc_info (struct Scsi_Host *, char *, char **, off_t, int, int);
-#else
-static int nsp32_proc_info (char *, char **, off_t, int, int, int);
-#endif
-#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,5,73))
static int nsp32_detect (struct pci_dev *pdev);
-#else
-static int nsp32_detect (struct scsi_host_template *);
-#endif
static int nsp32_queuecommand(struct scsi_cmnd *,
void (*done)(struct scsi_cmnd *));
static const char *nsp32_info (struct Scsi_Host *);
@@ -296,15 +284,7 @@ static struct scsi_host_template nsp32_template = {
.eh_abort_handler = nsp32_eh_abort,
.eh_bus_reset_handler = nsp32_eh_bus_reset,
.eh_host_reset_handler = nsp32_eh_host_reset,
-#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,5,74))
- .detect = nsp32_detect,
- .release = nsp32_release,
-#endif
-#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,5,2))
- .use_new_eh_code = 1,
-#else
/* .highmem_io = 1, */
-#endif
};
#include "nsp32_io.h"
@@ -739,7 +719,7 @@ static int nsp32_selection_autoscsi(struct scsi_cmnd *SCpnt)
command = 0;
command |= (TRANSFER_GO | ALL_COUNTER_CLR);
if (data->trans_method & NSP32_TRANSFER_BUSMASTER) {
- if (SCpnt->request_bufflen > 0) {
+ if (scsi_bufflen(SCpnt) > 0) {
command |= BM_START;
}
} else if (data->trans_method & NSP32_TRANSFER_MMIO) {
@@ -888,31 +868,28 @@ static int nsp32_reselection(struct scsi_cmnd *SCpnt, unsigned char newlun)
static int nsp32_setup_sg_table(struct scsi_cmnd *SCpnt)
{
nsp32_hw_data *data = (nsp32_hw_data *)SCpnt->device->host->hostdata;
- struct scatterlist *sgl;
+ struct scatterlist *sg;
nsp32_sgtable *sgt = data->cur_lunt->sglun->sgt;
int num, i;
u32_le l;
- if (SCpnt->request_bufflen == 0) {
- return TRUE;
- }
-
if (sgt == NULL) {
nsp32_dbg(NSP32_DEBUG_SGLIST, "SGT == null");
return FALSE;
}
- if (SCpnt->use_sg) {
- sgl = (struct scatterlist *)SCpnt->request_buffer;
- num = pci_map_sg(data->Pci, sgl, SCpnt->use_sg,
- SCpnt->sc_data_direction);
- for (i = 0; i < num; i++) {
+ num = scsi_dma_map(SCpnt);
+ if (!num)
+ return TRUE;
+ else if (num < 0)
+ return FALSE;
+ else {
+ scsi_for_each_sg(SCpnt, sg, num, i) {
/*
* Build nsp32_sglist, substitute sg dma addresses.
*/
- sgt[i].addr = cpu_to_le32(sg_dma_address(sgl));
- sgt[i].len = cpu_to_le32(sg_dma_len(sgl));
- sgl++;
+ sgt[i].addr = cpu_to_le32(sg_dma_address(sg));
+ sgt[i].len = cpu_to_le32(sg_dma_len(sg));
if (le32_to_cpu(sgt[i].len) > 0x10000) {
nsp32_msg(KERN_ERR,
@@ -929,23 +906,6 @@ static int nsp32_setup_sg_table(struct scsi_cmnd *SCpnt)
/* set end mark */
l = le32_to_cpu(sgt[num-1].len);
sgt[num-1].len = cpu_to_le32(l | SGTEND);
-
- } else {
- SCpnt->SCp.have_data_in = pci_map_single(data->Pci,
- SCpnt->request_buffer, SCpnt->request_bufflen,
- SCpnt->sc_data_direction);
-
- sgt[0].addr = cpu_to_le32(SCpnt->SCp.have_data_in);
- sgt[0].len = cpu_to_le32(SCpnt->request_bufflen | SGTEND); /* set end mark */
-
- if (SCpnt->request_bufflen > 0x10000) {
- nsp32_msg(KERN_ERR,
- "can't transfer over 64KB at a time, size=0x%lx", SCpnt->request_bufflen);
- return FALSE;
- }
- nsp32_dbg(NSP32_DEBUG_SGLIST, "single : addr 0x%lx len=0x%lx",
- le32_to_cpu(sgt[0].addr),
- le32_to_cpu(sgt[0].len ));
}
return TRUE;
@@ -962,7 +922,7 @@ static int nsp32_queuecommand(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_
"enter. target: 0x%x LUN: 0x%x cmnd: 0x%x cmndlen: 0x%x "
"use_sg: 0x%x reqbuf: 0x%lx reqlen: 0x%x",
SCpnt->device->id, SCpnt->device->lun, SCpnt->cmnd[0], SCpnt->cmd_len,
- SCpnt->use_sg, SCpnt->request_buffer, SCpnt->request_bufflen);
+ scsi_sg_count(SCpnt), scsi_sglist(SCpnt), scsi_bufflen(SCpnt));
if (data->CurrentSC != NULL) {
nsp32_msg(KERN_ERR, "Currentsc != NULL. Cancel this command request");
@@ -994,10 +954,10 @@ static int nsp32_queuecommand(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_
data->CurrentSC = SCpnt;
SCpnt->SCp.Status = CHECK_CONDITION;
SCpnt->SCp.Message = 0;
- SCpnt->resid = SCpnt->request_bufflen;
+ scsi_set_resid(SCpnt, scsi_bufflen(SCpnt));
- SCpnt->SCp.ptr = (char *) SCpnt->request_buffer;
- SCpnt->SCp.this_residual = SCpnt->request_bufflen;
+ SCpnt->SCp.ptr = (char *)scsi_sglist(SCpnt);
+ SCpnt->SCp.this_residual = scsi_bufflen(SCpnt);
SCpnt->SCp.buffer = NULL;
SCpnt->SCp.buffers_residual = 0;
@@ -1210,13 +1170,9 @@ static irqreturn_t do_nsp32_isr(int irq, void *dev_id)
unsigned long flags;
int ret;
int handled = 0;
-
-#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,5,0))
struct Scsi_Host *host = data->Host;
+
spin_lock_irqsave(host->host_lock, flags);
-#else
- spin_lock_irqsave(&io_request_lock, flags);
-#endif
/*
* IRQ check, then enable IRQ mask
@@ -1312,7 +1268,7 @@ static irqreturn_t do_nsp32_isr(int irq, void *dev_id)
}
if ((auto_stat & DATA_IN_PHASE) &&
- (SCpnt->resid > 0) &&
+ (scsi_get_resid(SCpnt) > 0) &&
((nsp32_read2(base, FIFO_REST_CNT) & FIFO_REST_MASK) != 0)) {
printk( "auto+fifo\n");
//nsp32_pio_read(SCpnt);
@@ -1333,7 +1289,7 @@ static irqreturn_t do_nsp32_isr(int irq, void *dev_id)
nsp32_dbg(NSP32_DEBUG_INTR, "SSACK=0x%lx",
nsp32_read4(base, SAVED_SACK_CNT));
- SCpnt->resid = 0; /* all data transfered! */
+ scsi_set_resid(SCpnt, 0); /* all data transfered! */
}
/*
@@ -1480,11 +1436,7 @@ static irqreturn_t do_nsp32_isr(int irq, void *dev_id)
nsp32_write2(base, IRQ_CONTROL, 0);
out2:
-#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,5,0))
spin_unlock_irqrestore(host->host_lock, flags);
-#else
- spin_unlock_irqrestore(&io_request_lock, flags);
-#endif
nsp32_dbg(NSP32_DEBUG_INTR, "exit");
@@ -1499,28 +1451,15 @@ static irqreturn_t do_nsp32_isr(int irq, void *dev_id)
nsp32_dbg(NSP32_DEBUG_PROC, "buffer=0x%p pos=0x%p length=%d %d\n", buffer, pos, length, length - (pos - buffer));\
} \
} while(0)
-static int nsp32_proc_info(
-#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,5,73))
- struct Scsi_Host *host,
-#endif
- char *buffer,
- char **start,
- off_t offset,
- int length,
-#if !(LINUX_VERSION_CODE > KERNEL_VERSION(2,5,73))
- int hostno,
-#endif
- int inout)
+
+static int nsp32_proc_info(struct Scsi_Host *host, char *buffer, char **start,
+ off_t offset, int length, int inout)
{
char *pos = buffer;
int thislength;
unsigned long flags;
nsp32_hw_data *data;
-#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,5,73))
int hostno;
-#else
- struct Scsi_Host *host;
-#endif
unsigned int base;
unsigned char mode_reg;
int id, speed;
@@ -1531,15 +1470,7 @@ static int nsp32_proc_info(
return -EINVAL;
}
-#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,5,73))
hostno = host->host_no;
-#else
- /* search this HBA host */
- host = scsi_host_hn_get(hostno);
- if (host == NULL) {
- return -ESRCH;
- }
-#endif
data = (nsp32_hw_data *)host->hostdata;
base = host->io_port;
@@ -1626,25 +1557,8 @@ static void nsp32_scsi_done(struct scsi_cmnd *SCpnt)
nsp32_hw_data *data = (nsp32_hw_data *)SCpnt->device->host->hostdata;
unsigned int base = SCpnt->device->host->io_port;
- /*
- * unmap pci
- */
- if (SCpnt->request_bufflen == 0) {
- goto skip;
- }
+ scsi_dma_unmap(SCpnt);
- if (SCpnt->use_sg) {
- pci_unmap_sg(data->Pci,
- (struct scatterlist *)SCpnt->request_buffer,
- SCpnt->use_sg, SCpnt->sc_data_direction);
- } else {
- pci_unmap_single(data->Pci,
- (u32)SCpnt->SCp.have_data_in,
- SCpnt->request_bufflen,
- SCpnt->sc_data_direction);
- }
-
- skip:
/*
* clear TRANSFERCONTROL_BM_START
*/
@@ -1800,7 +1714,7 @@ static int nsp32_busfree_occur(struct scsi_cmnd *SCpnt, unsigned short execph)
SCpnt->SCp.Message = 0;
nsp32_dbg(NSP32_DEBUG_BUSFREE,
"normal end stat=0x%x resid=0x%x\n",
- SCpnt->SCp.Status, SCpnt->resid);
+ SCpnt->SCp.Status, scsi_get_resid(SCpnt));
SCpnt->result = (DID_OK << 16) |
(SCpnt->SCp.Message << 8) |
(SCpnt->SCp.Status << 0);
@@ -1844,7 +1758,7 @@ static void nsp32_adjust_busfree(struct scsi_cmnd *SCpnt, unsigned int s_sacklen
unsigned int restlen, sentlen;
u32_le len, addr;
- nsp32_dbg(NSP32_DEBUG_SGLIST, "old resid=0x%x", SCpnt->resid);
+ nsp32_dbg(NSP32_DEBUG_SGLIST, "old resid=0x%x", scsi_get_resid(SCpnt));
/* adjust saved SACK count with 4 byte start address boundary */
s_sacklen -= le32_to_cpu(sgt[old_entry].addr) & 3;
@@ -1888,12 +1802,12 @@ static void nsp32_adjust_busfree(struct scsi_cmnd *SCpnt, unsigned int s_sacklen
return;
last:
- if (SCpnt->resid < sentlen) {
+ if (scsi_get_resid(SCpnt) < sentlen) {
nsp32_msg(KERN_ERR, "resid underflow");
}
- SCpnt->resid -= sentlen;
- nsp32_dbg(NSP32_DEBUG_SGLIST, "new resid=0x%x", SCpnt->resid);
+ scsi_set_resid(SCpnt, scsi_get_resid(SCpnt) - sentlen);
+ nsp32_dbg(NSP32_DEBUG_SGLIST, "new resid=0x%x", scsi_get_resid(SCpnt));
/* update hostdata and lun */
@@ -2022,7 +1936,7 @@ static void nsp32_restart_autoscsi(struct scsi_cmnd *SCpnt, unsigned short comma
transfer = 0;
transfer |= (TRANSFER_GO | ALL_COUNTER_CLR);
if (data->trans_method & NSP32_TRANSFER_BUSMASTER) {
- if (SCpnt->request_bufflen > 0) {
+ if (scsi_bufflen(SCpnt) > 0) {
transfer |= BM_START;
}
} else if (data->trans_method & NSP32_TRANSFER_MMIO) {
@@ -2674,17 +2588,7 @@ static void nsp32_sack_negate(nsp32_hw_data *data)
* 0x900-0xbff: (map same 0x800-0x8ff I/O port image repeatedly)
* 0xc00-0xfff: CardBus status registers
*/
-#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,5,73))
-#define DETECT_OK 0
-#define DETECT_NG 1
-#define PCIDEV pdev
static int nsp32_detect(struct pci_dev *pdev)
-#else
-#define DETECT_OK 1
-#define DETECT_NG 0
-#define PCIDEV (data->Pci)
-static int nsp32_detect(struct scsi_host_template *sht)
-#endif
{
struct Scsi_Host *host; /* registered host structure */
struct resource *res;
@@ -2697,11 +2601,7 @@ static int nsp32_detect(struct scsi_host_template *sht)
/*
* register this HBA as SCSI device
*/
-#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,5,73))
host = scsi_host_alloc(&nsp32_template, sizeof(nsp32_hw_data));
-#else
- host = scsi_register(sht, sizeof(nsp32_hw_data));
-#endif
if (host == NULL) {
nsp32_msg (KERN_ERR, "failed to scsi register");
goto err;
@@ -2719,9 +2619,6 @@ static int nsp32_detect(struct scsi_host_template *sht)
host->unique_id = data->BaseAddress;
host->n_io_port = data->NumAddress;
host->base = (unsigned long)data->MmioAddress;
-#if (LINUX_VERSION_CODE <= KERNEL_VERSION(2,5,63))
- scsi_set_pci_device(host, PCIDEV);
-#endif
data->Host = host;
spin_lock_init(&(data->Lock));
@@ -2776,7 +2673,7 @@ static int nsp32_detect(struct scsi_host_template *sht)
/*
* setup DMA
*/
- if (pci_set_dma_mask(PCIDEV, DMA_32BIT_MASK) != 0) {
+ if (pci_set_dma_mask(pdev, DMA_32BIT_MASK) != 0) {
nsp32_msg (KERN_ERR, "failed to set PCI DMA mask");
goto scsi_unregister;
}
@@ -2784,7 +2681,7 @@ static int nsp32_detect(struct scsi_host_template *sht)
/*
* allocate autoparam DMA resource.
*/
- data->autoparam = pci_alloc_consistent(PCIDEV, sizeof(nsp32_autoparam), &(data->auto_paddr));
+ data->autoparam = pci_alloc_consistent(pdev, sizeof(nsp32_autoparam), &(data->auto_paddr));
if (data->autoparam == NULL) {
nsp32_msg(KERN_ERR, "failed to allocate DMA memory");
goto scsi_unregister;
@@ -2793,7 +2690,7 @@ static int nsp32_detect(struct scsi_host_template *sht)
/*
* allocate scatter-gather DMA resource.
*/
- data->sg_list = pci_alloc_consistent(PCIDEV, NSP32_SG_TABLE_SIZE,
+ data->sg_list = pci_alloc_consistent(pdev, NSP32_SG_TABLE_SIZE,
&(data->sg_paddr));
if (data->sg_list == NULL) {
nsp32_msg(KERN_ERR, "failed to allocate DMA memory");
@@ -2883,16 +2780,14 @@ static int nsp32_detect(struct scsi_host_template *sht)
goto free_irq;
}
-#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,5,73))
- ret = scsi_add_host(host, &PCIDEV->dev);
+ ret = scsi_add_host(host, &pdev->dev);
if (ret) {
nsp32_msg(KERN_ERR, "failed to add scsi host");
goto free_region;
}
scsi_scan_host(host);
-#endif
- pci_set_drvdata(PCIDEV, host);
- return DETECT_OK;
+ pci_set_drvdata(pdev, host);
+ return 0;
free_region:
release_region(host->io_port, host->n_io_port);
@@ -2901,22 +2796,19 @@ static int nsp32_detect(struct scsi_host_template *sht)
free_irq(host->irq, data);
free_sg_list:
- pci_free_consistent(PCIDEV, NSP32_SG_TABLE_SIZE,
+ pci_free_consistent(pdev, NSP32_SG_TABLE_SIZE,
data->sg_list, data->sg_paddr);
free_autoparam:
- pci_free_consistent(PCIDEV, sizeof(nsp32_autoparam),
+ pci_free_consistent(pdev, sizeof(nsp32_autoparam),
data->autoparam, data->auto_paddr);
scsi_unregister:
scsi_host_put(host);
err:
- return DETECT_NG;
+ return 1;
}
-#undef DETECT_OK
-#undef DETECT_NG
-#undef PCIDEV
static int nsp32_release(struct Scsi_Host *host)
{
@@ -3516,11 +3408,7 @@ static int __devinit nsp32_probe(struct pci_dev *pdev, const struct pci_device_i
pci_set_master(pdev);
-#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,5,73))
ret = nsp32_detect(pdev);
-#else
- ret = scsi_register_host(&nsp32_template);
-#endif
nsp32_msg(KERN_INFO, "irq: %i mmio: %p+0x%lx slot: %s model: %s",
pdev->irq,
@@ -3535,25 +3423,17 @@ static int __devinit nsp32_probe(struct pci_dev *pdev, const struct pci_device_i
static void __devexit nsp32_remove(struct pci_dev *pdev)
{
-#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,5,73))
struct Scsi_Host *host = pci_get_drvdata(pdev);
-#endif
nsp32_dbg(NSP32_DEBUG_REGISTER, "enter");
-#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,5,73))
scsi_remove_host(host);
nsp32_release(host);
scsi_host_put(host);
-#else
- scsi_unregister_host(&nsp32_template);
-#endif
}
-
-
static struct pci_driver nsp32_driver = {
.name = "nsp32",
.id_table = nsp32_pci_table,
diff --git a/drivers/scsi/pcmcia/sym53c500_cs.c b/drivers/scsi/pcmcia/sym53c500_cs.c
index ffe75c431b2..2695b7187b2 100644
--- a/drivers/scsi/pcmcia/sym53c500_cs.c
+++ b/drivers/scsi/pcmcia/sym53c500_cs.c
@@ -370,8 +370,6 @@ SYM53C500_intr(int irq, void *dev_id)
DEB(unsigned char seq_reg;)
unsigned char status, int_reg;
unsigned char pio_status;
- struct scatterlist *sglist;
- unsigned int sgcount;
int port_base = dev->io_port;
struct sym53c500_data *data =
(struct sym53c500_data *)dev->hostdata;
@@ -434,20 +432,19 @@ SYM53C500_intr(int irq, void *dev_id)
switch (status & 0x07) { /* scsi phase */
case 0x00: /* DATA-OUT */
if (int_reg & 0x10) { /* Target requesting info transfer */
+ struct scatterlist *sg;
+ int i;
+
curSC->SCp.phase = data_out;
VDEB(printk("SYM53C500: Data-Out phase\n"));
outb(FLUSH_FIFO, port_base + CMD_REG);
- LOAD_DMA_COUNT(port_base, curSC->request_bufflen); /* Max transfer size */
+ LOAD_DMA_COUNT(port_base, scsi_bufflen(curSC)); /* Max transfer size */
outb(TRANSFER_INFO | DMA_OP, port_base + CMD_REG);
- if (!curSC->use_sg) /* Don't use scatter-gather */
- SYM53C500_pio_write(fast_pio, port_base, curSC->request_buffer, curSC->request_bufflen);
- else { /* use scatter-gather */
- sgcount = curSC->use_sg;
- sglist = curSC->request_buffer;
- while (sgcount--) {
- SYM53C500_pio_write(fast_pio, port_base, page_address(sglist->page) + sglist->offset, sglist->length);
- sglist++;
- }
+
+ scsi_for_each_sg(curSC, sg, scsi_sg_count(curSC), i) {
+ SYM53C500_pio_write(fast_pio, port_base,
+ page_address(sg->page) + sg->offset,
+ sg->length);
}
REG0(port_base);
}
@@ -455,20 +452,19 @@ SYM53C500_intr(int irq, void *dev_id)
case 0x01: /* DATA-IN */
if (int_reg & 0x10) { /* Target requesting info transfer */
+ struct scatterlist *sg;
+ int i;
+
curSC->SCp.phase = data_in;
VDEB(printk("SYM53C500: Data-In phase\n"));
outb(FLUSH_FIFO, port_base + CMD_REG);
- LOAD_DMA_COUNT(port_base, curSC->request_bufflen); /* Max transfer size */
+ LOAD_DMA_COUNT(port_base, scsi_bufflen(curSC)); /* Max transfer size */
outb(TRANSFER_INFO | DMA_OP, port_base + CMD_REG);
- if (!curSC->use_sg) /* Don't use scatter-gather */
- SYM53C500_pio_read(fast_pio, port_base, curSC->request_buffer, curSC->request_bufflen);
- else { /* Use scatter-gather */
- sgcount = curSC->use_sg;
- sglist = curSC->request_buffer;
- while (sgcount--) {
- SYM53C500_pio_read(fast_pio, port_base, page_address(sglist->page) + sglist->offset, sglist->length);
- sglist++;
- }
+
+ scsi_for_each_sg(curSC, sg, scsi_sg_count(curSC), i) {
+ SYM53C500_pio_read(fast_pio, port_base,
+ page_address(sg->page) + sg->offset,
+ sg->length);
}
REG0(port_base);
}
@@ -578,7 +574,7 @@ SYM53C500_queue(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *))
DEB(printk("cmd=%02x, cmd_len=%02x, target=%02x, lun=%02x, bufflen=%d\n",
SCpnt->cmnd[0], SCpnt->cmd_len, SCpnt->device->id,
- SCpnt->device->lun, SCpnt->request_bufflen));
+ SCpnt->device->lun, scsi_bufflen(SCpnt)));
VDEB(for (i = 0; i < SCpnt->cmd_len; i++)
printk("cmd[%d]=%02x ", i, SCpnt->cmnd[i]));
diff --git a/drivers/scsi/ppa.c b/drivers/scsi/ppa.c
index 584ba4d6e03..2f1fa1eb7e9 100644
--- a/drivers/scsi/ppa.c
+++ b/drivers/scsi/ppa.c
@@ -129,11 +129,11 @@ static inline int ppa_proc_write(ppa_struct *dev, char *buffer, int length)
if ((length > 10) && (strncmp(buffer, "recon_tmo=", 10) == 0)) {
x = simple_strtoul(buffer + 10, NULL, 0);
dev->recon_tmo = x;
- printk("ppa: recon_tmo set to %ld\n", x);
+ printk(KERN_INFO "ppa: recon_tmo set to %ld\n", x);
return length;
}
- printk("ppa /proc: invalid variable\n");
- return (-EINVAL);
+ printk(KERN_WARNING "ppa /proc: invalid variable\n");
+ return -EINVAL;
}
static int ppa_proc_info(struct Scsi_Host *host, char *buffer, char **start, off_t offset, int length, int inout)
@@ -216,7 +216,7 @@ static unsigned char ppa_wait(ppa_struct *dev)
/* Counter expired - Time out occurred */
ppa_fail(dev, DID_TIME_OUT);
- printk("ppa timeout in ppa_wait\n");
+ printk(KERN_WARNING "ppa timeout in ppa_wait\n");
return 0; /* command timed out */
}
@@ -248,7 +248,7 @@ static inline void ecp_sync(ppa_struct *dev)
return;
udelay(5);
}
- printk("ppa: ECP sync failed as data still present in FIFO.\n");
+ printk(KERN_WARNING "ppa: ECP sync failed as data still present in FIFO.\n");
}
}
@@ -328,7 +328,7 @@ static int ppa_out(ppa_struct *dev, char *buffer, int len)
break;
default:
- printk("PPA: bug in ppa_out()\n");
+ printk(KERN_ERR "PPA: bug in ppa_out()\n");
r = 0;
}
return r;
@@ -381,7 +381,7 @@ static int ppa_in(ppa_struct *dev, char *buffer, int len)
break;
default:
- printk("PPA: bug in ppa_ins()\n");
+ printk(KERN_ERR "PPA: bug in ppa_ins()\n");
r = 0;
break;
}
@@ -633,7 +633,7 @@ static void ppa_interrupt(struct work_struct *work)
struct scsi_cmnd *cmd = dev->cur_cmd;
if (!cmd) {
- printk("PPA: bug in ppa_interrupt\n");
+ printk(KERN_ERR "PPA: bug in ppa_interrupt\n");
return;
}
if (ppa_engine(dev, cmd)) {
@@ -646,31 +646,31 @@ static void ppa_interrupt(struct work_struct *work)
case DID_OK:
break;
case DID_NO_CONNECT:
- printk("ppa: no device at SCSI ID %i\n", cmd->device->target);
+ printk(KERN_DEBUG "ppa: no device at SCSI ID %i\n", cmd->device->target);
break;
case DID_BUS_BUSY:
- printk("ppa: BUS BUSY - EPP timeout detected\n");
+ printk(KERN_DEBUG "ppa: BUS BUSY - EPP timeout detected\n");
break;
case DID_TIME_OUT:
- printk("ppa: unknown timeout\n");
+ printk(KERN_DEBUG "ppa: unknown timeout\n");
break;
case DID_ABORT:
- printk("ppa: told to abort\n");
+ printk(KERN_DEBUG "ppa: told to abort\n");
break;
case DID_PARITY:
- printk("ppa: parity error (???)\n");
+ printk(KERN_DEBUG "ppa: parity error (???)\n");
break;
case DID_ERROR:
- printk("ppa: internal driver error\n");
+ printk(KERN_DEBUG "ppa: internal driver error\n");
break;
case DID_RESET:
- printk("ppa: told to reset device\n");
+ printk(KERN_DEBUG "ppa: told to reset device\n");
break;
case DID_BAD_INTR:
- printk("ppa: bad interrupt (???)\n");
+ printk(KERN_WARNING "ppa: bad interrupt (???)\n");
break;
default:
- printk("ppa: bad return code (%02x)\n",
+ printk(KERN_WARNING "ppa: bad return code (%02x)\n",
(cmd->result >> 16) & 0xff);
}
#endif
@@ -724,8 +724,7 @@ static int ppa_engine(ppa_struct *dev, struct scsi_cmnd *cmd)
if (retv) {
if (time_after(jiffies, dev->jstart + (1 * HZ))) {
- printk
- ("ppa: Parallel port cable is unplugged!!\n");
+ printk(KERN_ERR "ppa: Parallel port cable is unplugged.\n");
ppa_fail(dev, DID_BUS_BUSY);
return 0;
} else {
@@ -755,11 +754,9 @@ static int ppa_engine(ppa_struct *dev, struct scsi_cmnd *cmd)
case 4: /* Phase 4 - Setup scatter/gather buffers */
if (cmd->use_sg) {
/* if many buffers are available, start filling the first */
- cmd->SCp.buffer =
- (struct scatterlist *) cmd->request_buffer;
+ cmd->SCp.buffer = (struct scatterlist *) cmd->request_buffer;
cmd->SCp.this_residual = cmd->SCp.buffer->length;
- cmd->SCp.ptr =
- page_address(cmd->SCp.buffer->page) +
+ cmd->SCp.ptr = page_address(cmd->SCp.buffer->page) +
cmd->SCp.buffer->offset;
} else {
/* else fill the only available buffer */
@@ -800,7 +797,7 @@ static int ppa_engine(ppa_struct *dev, struct scsi_cmnd *cmd)
break;
default:
- printk("ppa: Invalid scsi phase\n");
+ printk(KERN_ERR "ppa: Invalid scsi phase\n");
}
return 0;
}
@@ -811,7 +808,7 @@ static int ppa_queuecommand(struct scsi_cmnd *cmd,
ppa_struct *dev = ppa_dev(cmd->device->host);
if (dev->cur_cmd) {
- printk("PPA: bug in ppa_queuecommand\n");
+ printk(KERN_ERR "PPA: bug in ppa_queuecommand\n");
return 0;
}
dev->failed = 0;
@@ -899,7 +896,7 @@ static int device_check(ppa_struct *dev)
/* This routine looks for a device and then attempts to use EPP
to send a command. If all goes as planned then EPP is available. */
- static char cmd[6] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
+ static u8 cmd[6] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
int loop, old_mode, status, k, ppb = dev->base;
unsigned char l;
@@ -909,14 +906,14 @@ static int device_check(ppa_struct *dev)
if ((ppb & 0x0007) == 0x0000)
dev->mode = PPA_EPP_32;
- second_pass:
+second_pass:
ppa_connect(dev, CONNECT_EPP_MAYBE);
/* Select SCSI device */
if (!ppa_select(dev, loop)) {
ppa_disconnect(dev);
continue;
}
- printk("ppa: Found device at ID %i, Attempting to use %s\n",
+ printk(KERN_INFO "ppa: Found device at ID %i, Attempting to use %s\n",
loop, PPA_MODE_STRING[dev->mode]);
/* Send SCSI command */
@@ -965,7 +962,7 @@ static int device_check(ppa_struct *dev)
return -EIO;
}
ppa_disconnect(dev);
- printk("ppa: Communication established with ID %i using %s\n",
+ printk(KERN_INFO "ppa: Communication established with ID %i using %s\n",
loop, PPA_MODE_STRING[dev->mode]);
ppa_connect(dev, CONNECT_EPP_MAYBE);
ppa_reset_pulse(ppb);
@@ -1140,7 +1137,7 @@ static struct parport_driver ppa_driver = {
static int __init ppa_driver_init(void)
{
- printk("ppa: Version %s\n", PPA_VERSION);
+ printk(KERN_INFO "ppa: Version %s\n", PPA_VERSION);
return parport_register_driver(&ppa_driver);
}
diff --git a/drivers/scsi/qla2xxx/Makefile b/drivers/scsi/qla2xxx/Makefile
index 411663af7bb..71ddb5db494 100644
--- a/drivers/scsi/qla2xxx/Makefile
+++ b/drivers/scsi/qla2xxx/Makefile
@@ -1,4 +1,4 @@
qla2xxx-y := qla_os.o qla_init.o qla_mbx.o qla_iocb.o qla_isr.o qla_gs.o \
- qla_dbg.o qla_sup.o qla_attr.o
+ qla_dbg.o qla_sup.o qla_attr.o qla_mid.o
obj-$(CONFIG_SCSI_QLA_FC) += qla2xxx.o
diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
index 942db9de785..3eb2208675a 100644
--- a/drivers/scsi/qla2xxx/qla_attr.c
+++ b/drivers/scsi/qla2xxx/qla_attr.c
@@ -6,8 +6,11 @@
*/
#include "qla_def.h"
+#include <linux/kthread.h>
#include <linux/vmalloc.h>
+int qla24xx_vport_disable(struct fc_vport *, bool);
+
/* SYSFS attributes --------------------------------------------------------- */
static ssize_t
@@ -963,6 +966,122 @@ qla2x00_get_host_port_state(struct Scsi_Host *shost)
fc_host_port_state(shost) = FC_PORTSTATE_ONLINE;
}
+static int
+qla24xx_vport_create(struct fc_vport *fc_vport, bool disable)
+{
+ int ret = 0;
+ scsi_qla_host_t *ha = (scsi_qla_host_t *) fc_vport->shost->hostdata;
+ scsi_qla_host_t *vha;
+
+ ret = qla24xx_vport_create_req_sanity_check(fc_vport);
+ if (ret) {
+ DEBUG15(printk("qla24xx_vport_create_req_sanity_check failed, "
+ "status %x\n", ret));
+ return (ret);
+ }
+
+ vha = qla24xx_create_vhost(fc_vport);
+ if (vha == NULL) {
+ DEBUG15(printk ("qla24xx_create_vhost failed, vha = %p\n",
+ vha));
+ return FC_VPORT_FAILED;
+ }
+ if (disable) {
+ atomic_set(&vha->vp_state, VP_OFFLINE);
+ fc_vport_set_state(fc_vport, FC_VPORT_DISABLED);
+ } else
+ atomic_set(&vha->vp_state, VP_FAILED);
+
+ /* ready to create vport */
+ qla_printk(KERN_INFO, vha, "VP entry id %d assigned.\n", vha->vp_idx);
+
+ /* initialized vport states */
+ atomic_set(&vha->loop_state, LOOP_DOWN);
+ vha->vp_err_state= VP_ERR_PORTDWN;
+ vha->vp_prev_err_state= VP_ERR_UNKWN;
+ /* Check if physical ha port is Up */
+ if (atomic_read(&ha->loop_state) == LOOP_DOWN ||
+ atomic_read(&ha->loop_state) == LOOP_DEAD) {
+ /* Don't retry or attempt login of this virtual port */
+ DEBUG15(printk ("scsi(%ld): pport loop_state is not UP.\n",
+ vha->host_no));
+ atomic_set(&vha->loop_state, LOOP_DEAD);
+ if (!disable)
+ fc_vport_set_state(fc_vport, FC_VPORT_LINKDOWN);
+ }
+
+ if (scsi_add_host(vha->host, &fc_vport->dev)) {
+ DEBUG15(printk("scsi(%ld): scsi_add_host failure for VP[%d].\n",
+ vha->host_no, vha->vp_idx));
+ goto vport_create_failed_2;
+ }
+
+ /* initialize attributes */
+ fc_host_node_name(vha->host) = wwn_to_u64(vha->node_name);
+ fc_host_port_name(vha->host) = wwn_to_u64(vha->port_name);
+ fc_host_supported_classes(vha->host) =
+ fc_host_supported_classes(ha->host);
+ fc_host_supported_speeds(vha->host) =
+ fc_host_supported_speeds(ha->host);
+
+ qla24xx_vport_disable(fc_vport, disable);
+
+ return 0;
+vport_create_failed_2:
+ qla24xx_disable_vp(vha);
+ qla24xx_deallocate_vp_id(vha);
+ kfree(vha->port_name);
+ kfree(vha->node_name);
+ scsi_host_put(vha->host);
+ return FC_VPORT_FAILED;
+}
+
+int
+qla24xx_vport_delete(struct fc_vport *fc_vport)
+{
+ scsi_qla_host_t *ha = (scsi_qla_host_t *) fc_vport->shost->hostdata;
+ scsi_qla_host_t *vha = fc_vport->dd_data;
+
+ qla24xx_disable_vp(vha);
+ qla24xx_deallocate_vp_id(vha);
+
+ down(&ha->vport_sem);
+ ha->cur_vport_count--;
+ clear_bit(vha->vp_idx, (unsigned long *)ha->vp_idx_map);
+ up(&ha->vport_sem);
+
+ kfree(vha->node_name);
+ kfree(vha->port_name);
+
+ if (vha->timer_active) {
+ qla2x00_vp_stop_timer(vha);
+ DEBUG15(printk ("scsi(%ld): timer for the vport[%d] = %p "
+ "has stopped\n",
+ vha->host_no, vha->vp_idx, vha));
+ }
+
+ fc_remove_host(vha->host);
+
+ scsi_remove_host(vha->host);
+
+ scsi_host_put(vha->host);
+
+ return 0;
+}
+
+int
+qla24xx_vport_disable(struct fc_vport *fc_vport, bool disable)
+{
+ scsi_qla_host_t *vha = fc_vport->dd_data;
+
+ if (disable)
+ qla24xx_disable_vp(vha);
+ else
+ qla24xx_enable_vp(vha);
+
+ return 0;
+}
+
struct fc_function_template qla2xxx_transport_functions = {
.show_host_node_name = 1,
@@ -1000,6 +1119,49 @@ struct fc_function_template qla2xxx_transport_functions = {
.issue_fc_host_lip = qla2x00_issue_lip,
.get_fc_host_stats = qla2x00_get_fc_host_stats,
+
+ .vport_create = qla24xx_vport_create,
+ .vport_disable = qla24xx_vport_disable,
+ .vport_delete = qla24xx_vport_delete,
+};
+
+struct fc_function_template qla2xxx_transport_vport_functions = {
+
+ .show_host_node_name = 1,
+ .show_host_port_name = 1,
+ .show_host_supported_classes = 1,
+
+ .get_host_port_id = qla2x00_get_host_port_id,
+ .show_host_port_id = 1,
+ .get_host_speed = qla2x00_get_host_speed,
+ .show_host_speed = 1,
+ .get_host_port_type = qla2x00_get_host_port_type,
+ .show_host_port_type = 1,
+ .get_host_symbolic_name = qla2x00_get_host_symbolic_name,
+ .show_host_symbolic_name = 1,
+ .set_host_system_hostname = qla2x00_set_host_system_hostname,
+ .show_host_system_hostname = 1,
+ .get_host_fabric_name = qla2x00_get_host_fabric_name,
+ .show_host_fabric_name = 1,
+ .get_host_port_state = qla2x00_get_host_port_state,
+ .show_host_port_state = 1,
+
+ .dd_fcrport_size = sizeof(struct fc_port *),
+ .show_rport_supported_classes = 1,
+
+ .get_starget_node_name = qla2x00_get_starget_node_name,
+ .show_starget_node_name = 1,
+ .get_starget_port_name = qla2x00_get_starget_port_name,
+ .show_starget_port_name = 1,
+ .get_starget_port_id = qla2x00_get_starget_port_id,
+ .show_starget_port_id = 1,
+
+ .get_rport_dev_loss_tmo = qla2x00_get_rport_loss_tmo,
+ .set_rport_dev_loss_tmo = qla2x00_set_rport_loss_tmo,
+ .show_rport_dev_loss_tmo = 1,
+
+ .issue_fc_host_lip = qla2x00_issue_lip,
+ .get_fc_host_stats = qla2x00_get_fc_host_stats,
};
void
@@ -1008,4 +1170,6 @@ qla2x00_init_host_attr(scsi_qla_host_t *ha)
fc_host_node_name(ha->host) = wwn_to_u64(ha->node_name);
fc_host_port_name(ha->host) = wwn_to_u64(ha->port_name);
fc_host_supported_classes(ha->host) = FC_COS_CLASS3;
+ fc_host_max_npiv_vports(ha->host) = MAX_NUM_VPORT_FABRIC;
+ fc_host_npiv_vports_inuse(ha->host) = ha->cur_vport_count;
}
diff --git a/drivers/scsi/qla2xxx/qla_dbg.c b/drivers/scsi/qla2xxx/qla_dbg.c
index f6ed6962bc2..996c47a6307 100644
--- a/drivers/scsi/qla2xxx/qla_dbg.c
+++ b/drivers/scsi/qla2xxx/qla_dbg.c
@@ -1411,9 +1411,9 @@ qla2x00_print_scsi_cmd(struct scsi_cmnd * cmd)
printk("0x%02x ", cmd->cmnd[i]);
}
printk("\n seg_cnt=%d, allowed=%d, retries=%d\n",
- cmd->use_sg, cmd->allowed, cmd->retries);
+ scsi_sg_count(cmd), cmd->allowed, cmd->retries);
printk(" request buffer=0x%p, request buffer len=0x%x\n",
- cmd->request_buffer, cmd->request_bufflen);
+ scsi_sglist(cmd), scsi_bufflen(cmd));
printk(" tag=%d, transfersize=0x%x\n",
cmd->tag, cmd->transfersize);
printk(" serial_number=%lx, SP=%p\n", cmd->serial_number, sp);
diff --git a/drivers/scsi/qla2xxx/qla_dbg.h b/drivers/scsi/qla2xxx/qla_dbg.h
index 5b12278968e..49dffeb7851 100644
--- a/drivers/scsi/qla2xxx/qla_dbg.h
+++ b/drivers/scsi/qla2xxx/qla_dbg.h
@@ -21,6 +21,7 @@
/* #define QL_DEBUG_LEVEL_12 */ /* Output IP trace msgs */
/* #define QL_DEBUG_LEVEL_13 */ /* Output fdmi function trace msgs */
/* #define QL_DEBUG_LEVEL_14 */ /* Output RSCN trace msgs */
+/* #define QL_DEBUG_LEVEL_15 */ /* Output NPIV trace msgs */
/*
* Local Macro Definitions.
*/
@@ -30,7 +31,8 @@
defined(QL_DEBUG_LEVEL_7) || defined(QL_DEBUG_LEVEL_8) || \
defined(QL_DEBUG_LEVEL_9) || defined(QL_DEBUG_LEVEL_10) || \
defined(QL_DEBUG_LEVEL_11) || defined(QL_DEBUG_LEVEL_12) || \
- defined(QL_DEBUG_LEVEL_13) || defined(QL_DEBUG_LEVEL_14)
+ defined(QL_DEBUG_LEVEL_13) || defined(QL_DEBUG_LEVEL_14) || \
+ defined(QL_DEBUG_LEVEL_15)
#define QL_DEBUG_ROUTINES
#endif
@@ -125,6 +127,12 @@
#define DEBUG14(x) do {} while (0)
#endif
+#if defined(QL_DEBUG_LEVEL_15)
+#define DEBUG15(x) do {x;} while (0)
+#else
+#define DEBUG15(x) do {} while (0)
+#endif
+
/*
* Firmware Dump structure definition
*/
diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
index e8948b679f5..a1ca590ba44 100644
--- a/drivers/scsi/qla2xxx/qla_def.h
+++ b/drivers/scsi/qla2xxx/qla_def.h
@@ -1551,6 +1551,9 @@ typedef struct fc_port {
unsigned long last_queue_full;
unsigned long last_ramp_up;
+
+ struct list_head vp_fcport;
+ uint16_t vp_idx;
} fc_port_t;
/*
@@ -1999,6 +2002,36 @@ struct gid_list_info {
};
#define GID_LIST_SIZE (sizeof(struct gid_list_info) * MAX_FIBRE_DEVICES)
+/* NPIV */
+typedef struct vport_info {
+ uint8_t port_name[WWN_SIZE];
+ uint8_t node_name[WWN_SIZE];
+ int vp_id;
+ uint16_t loop_id;
+ unsigned long host_no;
+ uint8_t port_id[3];
+ int loop_state;
+} vport_info_t;
+
+typedef struct vport_params {
+ uint8_t port_name[WWN_SIZE];
+ uint8_t node_name[WWN_SIZE];
+ uint32_t options;
+#define VP_OPTS_RETRY_ENABLE BIT_0
+#define VP_OPTS_VP_DISABLE BIT_1
+} vport_params_t;
+
+/* NPIV - return codes of VP create and modify */
+#define VP_RET_CODE_OK 0
+#define VP_RET_CODE_FATAL 1
+#define VP_RET_CODE_WRONG_ID 2
+#define VP_RET_CODE_WWPN 3
+#define VP_RET_CODE_RESOURCES 4
+#define VP_RET_CODE_NO_MEM 5
+#define VP_RET_CODE_NOT_FOUND 6
+
+#define to_qla_parent(x) (((x)->parent) ? (x)->parent : (x))
+
/*
* ISP operations
*/
@@ -2073,6 +2106,16 @@ struct qla_msix_entry {
uint16_t msix_entry;
};
+#define WATCH_INTERVAL 1 /* number of seconds */
+
+/* NPIV */
+#define MAX_MULTI_ID_LOOP 126
+#define MAX_MULTI_ID_FABRIC 64
+#define MAX_NUM_VPORT_LOOP (MAX_MULTI_ID_LOOP - 1)
+#define MAX_NUM_VPORT_FABRIC (MAX_MULTI_ID_FABRIC - 1)
+#define MAX_NUM_VHBA_LOOP (MAX_MULTI_ID_LOOP - 1)
+#define MAX_NUM_VHBA_FABRIC (MAX_MULTI_ID_FABRIC - 1)
+
/*
* Linux Host Adapter structure
*/
@@ -2108,6 +2151,8 @@ typedef struct scsi_qla_host {
uint32_t msix_enabled :1;
uint32_t disable_serdes :1;
uint32_t gpsc_supported :1;
+ uint32_t vsan_enabled :1;
+ uint32_t npiv_supported :1;
} flags;
atomic_t loop_state;
@@ -2147,6 +2192,7 @@ typedef struct scsi_qla_host {
#define BEACON_BLINK_NEEDED 25
#define REGISTER_FDMI_NEEDED 26
#define FCPORT_UPDATE_NEEDED 27
+#define VP_DPC_NEEDED 28 /* wake up for VP dpc handling */
uint32_t device_flags;
#define DFLG_LOCAL_DEVICES BIT_0
@@ -2237,6 +2283,11 @@ typedef struct scsi_qla_host {
/* ISP configuration data. */
uint16_t loop_id; /* Host adapter loop id */
+ uint16_t switch_cap;
+#define FLOGI_SEQ_DEL BIT_8
+#define FLOGI_MID_SUPPORT BIT_10
+#define FLOGI_VSAN_SUPPORT BIT_12
+#define FLOGI_SP_SUPPORT BIT_13
uint16_t fb_rev;
port_id_t d_id; /* Host adapter port id */
@@ -2344,6 +2395,7 @@ typedef struct scsi_qla_host {
#define MBX_UPDATE_FLASH_ACTIVE 3
struct semaphore mbx_cmd_sem; /* Serialialize mbx access */
+ struct semaphore vport_sem; /* Virtual port synchronization */
struct semaphore mbx_intr_sem; /* Used for completion notification */
uint32_t mbx_flags;
@@ -2428,6 +2480,37 @@ typedef struct scsi_qla_host {
struct fc_host_statistics fc_host_stat;
struct qla_msix_entry msix_entries[QLA_MSIX_ENTRIES];
+
+ struct list_head vp_list; /* list of VP */
+ struct fc_vport *fc_vport; /* holds fc_vport * for each vport */
+ uint8_t vp_idx_map[16];
+ uint16_t num_vhosts; /* number of vports created */
+ uint16_t num_vsans; /* number of vsan created */
+ uint16_t vp_idx; /* vport ID */
+
+ struct scsi_qla_host *parent; /* holds pport */
+ unsigned long vp_flags;
+ struct list_head vp_fcports; /* list of fcports */
+#define VP_IDX_ACQUIRED 0 /* bit no 0 */
+#define VP_CREATE_NEEDED 1
+#define VP_BIND_NEEDED 2
+#define VP_DELETE_NEEDED 3
+#define VP_SCR_NEEDED 4 /* State Change Request registration */
+ atomic_t vp_state;
+#define VP_OFFLINE 0
+#define VP_ACTIVE 1
+#define VP_FAILED 2
+// #define VP_DISABLE 3
+ uint16_t vp_err_state;
+ uint16_t vp_prev_err_state;
+#define VP_ERR_UNKWN 0
+#define VP_ERR_PORTDWN 1
+#define VP_ERR_FAB_UNSUPPORTED 2
+#define VP_ERR_FAB_NORESOURCES 3
+#define VP_ERR_FAB_LOGOUT 4
+#define VP_ERR_ADAP_NORESOURCES 5
+ int max_npiv_vports; /* 63 or 125 per topoloty */
+ int cur_vport_count;
} scsi_qla_host_t;
diff --git a/drivers/scsi/qla2xxx/qla_fw.h b/drivers/scsi/qla2xxx/qla_fw.h
index a0a722cf423..63a11fef5d1 100644
--- a/drivers/scsi/qla2xxx/qla_fw.h
+++ b/drivers/scsi/qla2xxx/qla_fw.h
@@ -69,6 +69,16 @@ struct port_database_24xx {
uint8_t reserved_3[24];
};
+struct vp_database_24xx {
+ uint16_t vp_status;
+ uint8_t options;
+ uint8_t id;
+ uint8_t port_name[WWN_SIZE];
+ uint8_t node_name[WWN_SIZE];
+ uint16_t port_id_low;
+ uint16_t port_id_high;
+};
+
struct nvram_24xx {
/* NVRAM header. */
uint8_t id[4];
@@ -962,6 +972,25 @@ struct mid_db_24xx {
struct mid_db_entry_24xx entries[MAX_MID_VPS];
};
+ /*
+ * Virtual Fabric ID type definition.
+ */
+typedef struct vf_id {
+ uint16_t id : 12;
+ uint16_t priority : 4;
+} vf_id_t;
+
+/*
+ * Virtual Fabric HopCt type definition.
+ */
+typedef struct vf_hopct {
+ uint16_t reserved : 8;
+ uint16_t hopct : 8;
+} vf_hopct_t;
+
+/*
+ * Virtual Port Control IOCB
+ */
#define VP_CTRL_IOCB_TYPE 0x30 /* Vitual Port Control entry. */
struct vp_ctrl_entry_24xx {
uint8_t entry_type; /* Entry type. */
@@ -974,6 +1003,7 @@ struct vp_ctrl_entry_24xx {
uint16_t vp_idx_failed;
uint16_t comp_status; /* Completion status. */
+#define CS_VCE_IOCB_ERROR 0x01 /* Error processing IOCB */
#define CS_VCE_ACQ_ID_ERROR 0x02 /* Error while acquireing ID. */
#define CS_VCE_BUSY 0x05 /* Firmware not ready to accept cmd. */
@@ -982,24 +1012,34 @@ struct vp_ctrl_entry_24xx {
#define VCE_COMMAND_DISABLE_VPS 0x08 /* Disable VPs. */
#define VCE_COMMAND_DISABLE_VPS_REINIT 0x09 /* Disable VPs and reinit link. */
#define VCE_COMMAND_DISABLE_VPS_LOGO 0x0a /* Disable VPs and LOGO ports. */
+#define VCE_COMMAND_DISABLE_VPS_LOGO_ALL 0x0b /* Disable VPs and LOGO ports. */
uint16_t vp_count;
uint8_t vp_idx_map[16];
-
- uint8_t reserved_4[32];
+ uint16_t flags;
+ struct vf_id id;
+ uint16_t reserved_4;
+ struct vf_hopct hopct;
+ uint8_t reserved_5[8];
};
+/*
+ * Modify Virtual Port Configuration IOCB
+ */
#define VP_CONFIG_IOCB_TYPE 0x31 /* Vitual Port Config entry. */
struct vp_config_entry_24xx {
uint8_t entry_type; /* Entry type. */
uint8_t entry_count; /* Entry count. */
- uint8_t sys_define; /* System defined. */
+ uint8_t handle_count;
uint8_t entry_status; /* Entry Status. */
uint32_t handle; /* System handle. */
- uint16_t reserved_1;
+ uint16_t flags;
+#define CS_VF_BIND_VPORTS_TO_VF BIT_0
+#define CS_VF_SET_QOS_OF_VPORTS BIT_1
+#define CS_VF_SET_HOPS_OF_VPORTS BIT_2
uint16_t comp_status; /* Completion status. */
#define CS_VCT_STS_ERROR 0x01 /* Specified VPs were not disabled. */
@@ -1009,27 +1049,29 @@ struct vp_config_entry_24xx {
#define CS_VCT_BUSY 0x05 /* Firmware not ready to accept cmd. */
uint8_t command;
-#define VCT_COMMAND_MOD_VPS 0x00 /* Enable VPs. */
-#define VCT_COMMAND_MOD_ENABLE_VPS 0x08 /* Disable VPs. */
+#define VCT_COMMAND_MOD_VPS 0x00 /* Modify VP configurations. */
+#define VCT_COMMAND_MOD_ENABLE_VPS 0x01 /* Modify configuration & enable VPs. */
uint8_t vp_count;
- uint8_t vp_idx1;
- uint8_t vp_idx2;
+ uint8_t vp_index1;
+ uint8_t vp_index2;
uint8_t options_idx1;
uint8_t hard_address_idx1;
- uint16_t reserved_2;
+ uint16_t reserved_vp1;
uint8_t port_name_idx1[WWN_SIZE];
uint8_t node_name_idx1[WWN_SIZE];
uint8_t options_idx2;
uint8_t hard_address_idx2;
- uint16_t reserved_3;
+ uint16_t reserved_vp2;
uint8_t port_name_idx2[WWN_SIZE];
uint8_t node_name_idx2[WWN_SIZE];
-
- uint8_t reserved_4[8];
+ struct vf_id id;
+ uint16_t reserved_4;
+ struct vf_hopct hopct;
+ uint8_t reserved_5;
};
#define VP_RPT_ID_IOCB_TYPE 0x32 /* Report ID Acquisition entry. */
@@ -1054,5 +1096,30 @@ struct vp_rpt_id_entry_24xx {
uint8_t reserved_4[32];
};
+#define VF_EVFP_IOCB_TYPE 0x26 /* Exchange Virtual Fabric Parameters entry. */
+struct vf_evfp_entry_24xx {
+ uint8_t entry_type; /* Entry type. */
+ uint8_t entry_count; /* Entry count. */
+ uint8_t sys_define; /* System defined. */
+ uint8_t entry_status; /* Entry Status. */
+
+ uint32_t handle; /* System handle. */
+ uint16_t comp_status; /* Completion status. */
+ uint16_t timeout; /* timeout */
+ uint16_t adim_tagging_mode;
+
+ uint16_t vfport_id;
+ uint32_t exch_addr;
+
+ uint16_t nport_handle; /* N_PORT handle. */
+ uint16_t control_flags;
+ uint32_t io_parameter_0;
+ uint32_t io_parameter_1;
+ uint32_t tx_address[2]; /* Data segment 0 address. */
+ uint32_t tx_len; /* Data segment 0 length. */
+ uint32_t rx_address[2]; /* Data segment 1 address. */
+ uint32_t rx_len; /* Data segment 1 length. */
+};
+
/* END MID Support ***********************************************************/
#endif
diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h
index 74544ae4b0e..b44eff2803c 100644
--- a/drivers/scsi/qla2xxx/qla_gbl.h
+++ b/drivers/scsi/qla2xxx/qla_gbl.h
@@ -62,6 +62,38 @@ extern int ql2xfdmienable;
extern int ql2xallocfwdump;
extern int ql2xextended_error_logging;
extern int ql2xqfullrampup;
+extern int num_hosts;
+
+/*
+ * Global Functions in qla_mid.c source file.
+ */
+extern struct scsi_host_template qla2x00_driver_template;
+extern struct scsi_host_template qla24xx_driver_template;
+extern struct scsi_transport_template *qla2xxx_transport_vport_template;
+extern uint8_t qla2x00_mem_alloc(scsi_qla_host_t *);
+extern void qla2x00_timer(scsi_qla_host_t *);
+extern void qla2x00_start_timer(scsi_qla_host_t *, void *, unsigned long);
+extern void qla2x00_stop_timer(scsi_qla_host_t *);
+extern uint32_t qla24xx_allocate_vp_id(scsi_qla_host_t *);
+extern void qla24xx_deallocate_vp_id(scsi_qla_host_t *);
+extern int qla24xx_disable_vp (scsi_qla_host_t *);
+extern int qla24xx_enable_vp (scsi_qla_host_t *);
+extern void qla2x00_mem_free(scsi_qla_host_t *);
+extern int qla24xx_control_vp(scsi_qla_host_t *, int );
+extern int qla24xx_modify_vp_config(scsi_qla_host_t *);
+extern int qla2x00_send_change_request(scsi_qla_host_t *, uint16_t, uint16_t);
+extern void qla2x00_vp_stop_timer(scsi_qla_host_t *);
+extern int qla24xx_configure_vhba (scsi_qla_host_t *);
+extern int qla24xx_get_vp_entry(scsi_qla_host_t *, uint16_t, int);
+extern int qla24xx_get_vp_database(scsi_qla_host_t *, uint16_t);
+extern int qla2x00_do_dpc_vp(scsi_qla_host_t *);
+extern void qla24xx_report_id_acquisition(scsi_qla_host_t *,
+ struct vp_rpt_id_entry_24xx *);
+extern scsi_qla_host_t * qla24xx_find_vhost_by_name(scsi_qla_host_t *,
+ uint8_t *);
+extern void qla2x00_do_dpc_all_vps(scsi_qla_host_t *);
+extern int qla24xx_vport_create_req_sanity_check(struct fc_vport *);
+extern scsi_qla_host_t * qla24xx_create_vhost(struct fc_vport *);
extern void qla2x00_sp_compl(scsi_qla_host_t *, srb_t *);
@@ -77,6 +109,10 @@ extern struct fw_blob *qla2x00_request_firmware(scsi_qla_host_t *);
extern int qla2x00_wait_for_hba_online(scsi_qla_host_t *);
extern void qla2xxx_wake_dpc(scsi_qla_host_t *);
+extern void qla2x00_alert_all_vps(scsi_qla_host_t *, uint16_t *);
+extern void qla2x00_async_event(scsi_qla_host_t *, uint16_t *);
+extern void qla2x00_vp_abort_isp(scsi_qla_host_t *);
+extern int qla24xx_vport_delete(struct fc_vport *);
/*
* Global Function Prototypes in qla_iocb.c source file.
@@ -128,7 +164,7 @@ qla2x00_abort_target(fc_port_t *);
extern int
qla2x00_get_adapter_id(scsi_qla_host_t *, uint16_t *, uint8_t *, uint8_t *,
- uint8_t *, uint16_t *);
+ uint8_t *, uint16_t *, uint16_t *);
extern int
qla2x00_get_retry_cnt(scsi_qla_host_t *, uint8_t *, uint8_t *, uint16_t *);
@@ -303,6 +339,7 @@ struct class_device_attribute;
extern struct class_device_attribute *qla2x00_host_attrs[];
struct fc_function_template;
extern struct fc_function_template qla2xxx_transport_functions;
+extern struct fc_function_template qla2xxx_transport_vport_functions;
extern void qla2x00_alloc_sysfs_attr(scsi_qla_host_t *);
extern void qla2x00_free_sysfs_attr(scsi_qla_host_t *);
extern void qla2x00_init_host_attr(scsi_qla_host_t *);
diff --git a/drivers/scsi/qla2xxx/qla_gs.c b/drivers/scsi/qla2xxx/qla_gs.c
index ec5b2dd90d6..a086b3f0df6 100644
--- a/drivers/scsi/qla2xxx/qla_gs.c
+++ b/drivers/scsi/qla2xxx/qla_gs.c
@@ -88,6 +88,7 @@ qla24xx_prep_ms_iocb(scsi_qla_host_t *ha, uint32_t req_size, uint32_t rsp_size)
ct_pkt->dseg_1_address[0] = cpu_to_le32(LSD(ha->ct_sns_dma));
ct_pkt->dseg_1_address[1] = cpu_to_le32(MSD(ha->ct_sns_dma));
ct_pkt->dseg_1_len = ct_pkt->rsp_byte_count;
+ ct_pkt->vp_index = ha->vp_idx;
return (ct_pkt);
}
@@ -1186,6 +1187,7 @@ qla24xx_prep_ms_fdmi_iocb(scsi_qla_host_t *ha, uint32_t req_size,
ct_pkt->dseg_1_address[0] = cpu_to_le32(LSD(ha->ct_sns_dma));
ct_pkt->dseg_1_address[1] = cpu_to_le32(MSD(ha->ct_sns_dma));
ct_pkt->dseg_1_len = ct_pkt->rsp_byte_count;
+ ct_pkt->vp_index = ha->vp_idx;
return ct_pkt;
}
@@ -1746,6 +1748,7 @@ qla24xx_prep_ms_fm_iocb(scsi_qla_host_t *ha, uint32_t req_size,
ct_pkt->dseg_1_address[0] = cpu_to_le32(LSD(ha->ct_sns_dma));
ct_pkt->dseg_1_address[1] = cpu_to_le32(MSD(ha->ct_sns_dma));
ct_pkt->dseg_1_len = ct_pkt->rsp_byte_count;
+ ct_pkt->vp_index = ha->vp_idx;
return ct_pkt;
}
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index cf94f8636ba..cc6ebb609e9 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -899,6 +899,10 @@ qla2x00_setup_chip(scsi_qla_host_t *ha)
&ha->fw_subminor_version,
&ha->fw_attributes, &ha->fw_memory_size);
qla2x00_resize_request_q(ha);
+ ha->flags.npiv_supported = 0;
+ if (IS_QLA24XX(ha) &&
+ (ha->fw_attributes & BIT_2))
+ ha->flags.npiv_supported = 1;
if (ql2xallocfwdump)
qla2x00_alloc_fw_dump(ha);
@@ -1101,6 +1105,8 @@ qla2x00_init_rings(scsi_qla_host_t *ha)
int rval;
unsigned long flags = 0;
int cnt;
+ struct mid_init_cb_24xx *mid_init_cb =
+ (struct mid_init_cb_24xx *) ha->init_cb;
spin_lock_irqsave(&ha->hardware_lock, flags);
@@ -1132,6 +1138,10 @@ qla2x00_init_rings(scsi_qla_host_t *ha)
ha->isp_ops.update_fw_options(ha);
DEBUG(printk("scsi(%ld): Issue init firmware.\n", ha->host_no));
+
+ mid_init_cb->count = MAX_NUM_VPORT_FABRIC;
+ ha->max_npiv_vports = MAX_NUM_VPORT_FABRIC;
+
rval = qla2x00_init_firmware(ha, ha->init_cb_size);
if (rval) {
DEBUG2_3(printk("scsi(%ld): Init firmware **** FAILED ****.\n",
@@ -1263,6 +1273,7 @@ qla2x00_configure_hba(scsi_qla_host_t *ha)
int rval;
uint16_t loop_id;
uint16_t topo;
+ uint16_t sw_cap;
uint8_t al_pa;
uint8_t area;
uint8_t domain;
@@ -1270,7 +1281,7 @@ qla2x00_configure_hba(scsi_qla_host_t *ha)
/* Get host addresses. */
rval = qla2x00_get_adapter_id(ha,
- &loop_id, &al_pa, &area, &domain, &topo);
+ &loop_id, &al_pa, &area, &domain, &topo, &sw_cap);
if (rval != QLA_SUCCESS) {
if (LOOP_TRANSITION(ha) || atomic_read(&ha->loop_down_timer) ||
(rval == QLA_COMMAND_ERROR && loop_id == 0x7)) {
@@ -1295,6 +1306,7 @@ qla2x00_configure_hba(scsi_qla_host_t *ha)
/* initialize */
ha->min_external_loopid = SNS_FIRST_LOOP_ID;
ha->operating_mode = LOOP;
+ ha->switch_cap = 0;
switch (topo) {
case 0:
@@ -1307,6 +1319,7 @@ qla2x00_configure_hba(scsi_qla_host_t *ha)
case 1:
DEBUG3(printk("scsi(%ld): HBA in FL topology.\n",
ha->host_no));
+ ha->switch_cap = sw_cap;
ha->current_topology = ISP_CFG_FL;
strcpy(connect_type, "(FL_Port)");
break;
@@ -1322,6 +1335,7 @@ qla2x00_configure_hba(scsi_qla_host_t *ha)
case 3:
DEBUG3(printk("scsi(%ld): HBA in F P2P topology.\n",
ha->host_no));
+ ha->switch_cap = sw_cap;
ha->operating_mode = P2P;
ha->current_topology = ISP_CFG_F;
strcpy(connect_type, "(F_Port)");
@@ -1743,7 +1757,6 @@ qla2x00_rport_del(void *data)
spin_unlock_irqrestore(&fcport->rport_lock, flags);
if (rport)
fc_remote_port_delete(rport);
-
}
/**
@@ -1765,6 +1778,7 @@ qla2x00_alloc_fcport(scsi_qla_host_t *ha, gfp_t flags)
/* Setup fcport template structure. */
memset(fcport, 0, sizeof (fc_port_t));
fcport->ha = ha;
+ fcport->vp_idx = ha->vp_idx;
fcport->port_type = FCT_UNKNOWN;
fcport->loop_id = FC_NO_LOOP_ID;
atomic_set(&fcport->state, FCS_UNCONFIGURED);
@@ -1911,6 +1925,7 @@ qla2x00_configure_local_loop(scsi_qla_host_t *ha)
char *id_iter;
uint16_t loop_id;
uint8_t domain, area, al_pa;
+ scsi_qla_host_t *pha = to_qla_parent(ha);
found_devs = 0;
new_fcport = NULL;
@@ -1942,7 +1957,10 @@ qla2x00_configure_local_loop(scsi_qla_host_t *ha)
/*
* Mark local devices that were present with FCF_DEVICE_LOST for now.
*/
- list_for_each_entry(fcport, &ha->fcports, list) {
+ list_for_each_entry(fcport, &pha->fcports, list) {
+ if (fcport->vp_idx != ha->vp_idx)
+ continue;
+
if (atomic_read(&fcport->state) == FCS_ONLINE &&
fcport->port_type != FCT_BROADCAST &&
(fcport->flags & FCF_FABRIC_DEVICE) == 0) {
@@ -1988,6 +2006,7 @@ qla2x00_configure_local_loop(scsi_qla_host_t *ha)
new_fcport->d_id.b.area = area;
new_fcport->d_id.b.al_pa = al_pa;
new_fcport->loop_id = loop_id;
+ new_fcport->vp_idx = ha->vp_idx;
rval2 = qla2x00_get_port_database(ha, new_fcport, 0);
if (rval2 != QLA_SUCCESS) {
DEBUG2(printk("scsi(%ld): Failed to retrieve fcport "
@@ -2003,7 +2022,10 @@ qla2x00_configure_local_loop(scsi_qla_host_t *ha)
/* Check for matching device in port list. */
found = 0;
fcport = NULL;
- list_for_each_entry(fcport, &ha->fcports, list) {
+ list_for_each_entry(fcport, &pha->fcports, list) {
+ if (fcport->vp_idx != ha->vp_idx)
+ continue;
+
if (memcmp(new_fcport->port_name, fcport->port_name,
WWN_SIZE))
continue;
@@ -2023,7 +2045,13 @@ qla2x00_configure_local_loop(scsi_qla_host_t *ha)
if (!found) {
/* New device, add to fcports list. */
new_fcport->flags &= ~FCF_PERSISTENT_BOUND;
- list_add_tail(&new_fcport->list, &ha->fcports);
+ if (ha->parent) {
+ new_fcport->ha = ha;
+ new_fcport->vp_idx = ha->vp_idx;
+ list_add_tail(&new_fcport->vp_fcport,
+ &ha->vp_fcports);
+ }
+ list_add_tail(&new_fcport->list, &pha->fcports);
/* Allocate a new replacement fcport. */
fcport = new_fcport;
@@ -2199,11 +2227,13 @@ qla2x00_reg_remote_port(scsi_qla_host_t *ha, fc_port_t *fcport)
void
qla2x00_update_fcport(scsi_qla_host_t *ha, fc_port_t *fcport)
{
+ scsi_qla_host_t *pha = to_qla_parent(ha);
+
fcport->ha = ha;
fcport->login_retry = 0;
- fcport->port_login_retry_count = ha->port_down_retry_count *
+ fcport->port_login_retry_count = pha->port_down_retry_count *
PORT_RETRY_TIME;
- atomic_set(&fcport->port_down_timer, ha->port_down_retry_count *
+ atomic_set(&fcport->port_down_timer, pha->port_down_retry_count *
PORT_RETRY_TIME);
fcport->flags &= ~FCF_LOGIN_NEEDED;
@@ -2234,6 +2264,7 @@ qla2x00_configure_fabric(scsi_qla_host_t *ha)
uint16_t mb[MAILBOX_REGISTER_COUNT];
uint16_t loop_id;
LIST_HEAD(new_fcports);
+ scsi_qla_host_t *pha = to_qla_parent(ha);
/* If FL port exists, then SNS is present */
if (IS_QLA24XX(ha) || IS_QLA54XX(ha))
@@ -2307,7 +2338,10 @@ qla2x00_configure_fabric(scsi_qla_host_t *ha)
* Logout all previous fabric devices marked lost, except
* tape devices.
*/
- list_for_each_entry(fcport, &ha->fcports, list) {
+ list_for_each_entry(fcport, &pha->fcports, list) {
+ if (fcport->vp_idx !=ha->vp_idx)
+ continue;
+
if (test_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags))
break;
@@ -2332,13 +2366,16 @@ qla2x00_configure_fabric(scsi_qla_host_t *ha)
}
/* Starting free loop ID. */
- next_loopid = ha->min_external_loopid;
+ next_loopid = pha->min_external_loopid;
/*
* Scan through our port list and login entries that need to be
* logged in.
*/
- list_for_each_entry(fcport, &ha->fcports, list) {
+ list_for_each_entry(fcport, &pha->fcports, list) {
+ if (fcport->vp_idx != ha->vp_idx)
+ continue;
+
if (atomic_read(&ha->loop_down_timer) ||
test_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags))
break;
@@ -2380,11 +2417,18 @@ qla2x00_configure_fabric(scsi_qla_host_t *ha)
break;
}
- /* Remove device from the new list and add it to DB */
- list_move_tail(&fcport->list, &ha->fcports);
-
/* Login and update database */
qla2x00_fabric_dev_login(ha, fcport, &next_loopid);
+
+ if (ha->parent) {
+ fcport->ha = ha;
+ fcport->vp_idx = ha->vp_idx;
+ list_add_tail(&fcport->vp_fcport,
+ &ha->vp_fcports);
+ list_move_tail(&fcport->list,
+ &ha->parent->fcports);
+ } else
+ list_move_tail(&fcport->list, &ha->fcports);
}
} while (0);
@@ -2428,6 +2472,11 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *ha, struct list_head *new_fcports)
int swl_idx;
int first_dev, last_dev;
port_id_t wrap, nxt_d_id;
+ int vp_index;
+ int empty_vp_index;
+ int found_vp;
+ scsi_qla_host_t *vha;
+ scsi_qla_host_t *pha = to_qla_parent(ha);
rval = QLA_SUCCESS;
@@ -2461,13 +2510,13 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *ha, struct list_head *new_fcports)
return (QLA_MEMORY_ALLOC_FAILED);
}
new_fcport->flags |= (FCF_FABRIC_DEVICE | FCF_LOGIN_NEEDED);
-
+ new_fcport->vp_idx = ha->vp_idx;
/* Set start port ID scan at adapter ID. */
first_dev = 1;
last_dev = 0;
/* Starting free loop ID. */
- loop_id = ha->min_external_loopid;
+ loop_id = pha->min_external_loopid;
for (; loop_id <= ha->last_loop_id; loop_id++) {
if (qla2x00_is_reserved_id(ha, loop_id))
continue;
@@ -2521,10 +2570,42 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *ha, struct list_head *new_fcports)
break;
}
- /* Bypass if host adapter. */
- if (new_fcport->d_id.b24 == ha->d_id.b24)
+ /* Bypass if same physical adapter. */
+ if (new_fcport->d_id.b24 == pha->d_id.b24)
continue;
+ /* Bypass virtual ports of the same host. */
+ if (pha->num_vhosts) {
+ vp_index = find_next_bit(
+ (unsigned long *)pha->vp_idx_map,
+ MAX_MULTI_ID_FABRIC + 1, 1);
+
+ for (;vp_index <= MAX_MULTI_ID_FABRIC;
+ vp_index = find_next_bit(
+ (unsigned long *)pha->vp_idx_map,
+ MAX_MULTI_ID_FABRIC + 1, vp_index + 1)) {
+ empty_vp_index = 1;
+ found_vp = 0;
+ list_for_each_entry(vha, &pha->vp_list,
+ vp_list) {
+ if (vp_index == vha->vp_idx) {
+ empty_vp_index = 0;
+ found_vp = 1;
+ break;
+ }
+ }
+
+ if (empty_vp_index)
+ continue;
+
+ if (found_vp &&
+ new_fcport->d_id.b24 == vha->d_id.b24)
+ break;
+ }
+ if (vp_index <= MAX_MULTI_ID_FABRIC)
+ continue;
+ }
+
/* Bypass if same domain and area of adapter. */
if (((new_fcport->d_id.b24 & 0xffff00) ==
(ha->d_id.b24 & 0xffff00)) && ha->current_topology ==
@@ -2537,7 +2618,9 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *ha, struct list_head *new_fcports)
/* Locate matching device in database. */
found = 0;
- list_for_each_entry(fcport, &ha->fcports, list) {
+ list_for_each_entry(fcport, &pha->fcports, list) {
+ if (new_fcport->vp_idx != fcport->vp_idx)
+ continue;
if (memcmp(new_fcport->port_name, fcport->port_name,
WWN_SIZE))
continue;
@@ -2605,6 +2688,7 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *ha, struct list_head *new_fcports)
}
new_fcport->flags |= (FCF_FABRIC_DEVICE | FCF_LOGIN_NEEDED);
new_fcport->d_id.b24 = nxt_d_id.b24;
+ new_fcport->vp_idx = ha->vp_idx;
}
kfree(swl);
@@ -2637,6 +2721,7 @@ qla2x00_find_new_loop_id(scsi_qla_host_t *ha, fc_port_t *dev)
int found;
fc_port_t *fcport;
uint16_t first_loop_id;
+ scsi_qla_host_t *pha = to_qla_parent(ha);
rval = QLA_SUCCESS;
@@ -2663,7 +2748,7 @@ qla2x00_find_new_loop_id(scsi_qla_host_t *ha, fc_port_t *dev)
/* Check for loop ID being already in use. */
found = 0;
fcport = NULL;
- list_for_each_entry(fcport, &ha->fcports, list) {
+ list_for_each_entry(fcport, &pha->fcports, list) {
if (fcport->loop_id == dev->loop_id && fcport != dev) {
/* ID possibly in use */
found++;
@@ -2710,6 +2795,7 @@ qla2x00_device_resync(scsi_qla_host_t *ha)
uint8_t rscn_out_iter;
uint8_t format;
port_id_t d_id;
+ scsi_qla_host_t *pha = to_qla_parent(ha);
rval = QLA_RSCNS_HANDLED;
@@ -2776,7 +2862,10 @@ qla2x00_device_resync(scsi_qla_host_t *ha)
rval = QLA_SUCCESS;
- list_for_each_entry(fcport, &ha->fcports, list) {
+ list_for_each_entry(fcport, &pha->fcports, list) {
+ if (fcport->vp_idx != ha->vp_idx)
+ continue;
+
if ((fcport->flags & FCF_FABRIC_DEVICE) == 0 ||
(fcport->d_id.b24 & mask) != d_id.b24 ||
fcport->port_type == FCT_BROADCAST)
@@ -3940,3 +4029,40 @@ qla2x00_try_to_stop_firmware(scsi_qla_host_t *ha)
ret = qla2x00_stop_firmware(ha);
}
}
+
+int
+qla24xx_configure_vhba(scsi_qla_host_t *ha)
+{
+ int rval = QLA_SUCCESS;
+ uint16_t mb[MAILBOX_REGISTER_COUNT];
+
+ if (!ha->parent)
+ return -EINVAL;
+
+ rval = qla2x00_fw_ready(ha);
+ if (rval == QLA_SUCCESS) {
+ clear_bit(RESET_MARKER_NEEDED, &ha->dpc_flags);
+ qla2x00_marker(ha, 0, 0, MK_SYNC_ALL);
+ }
+
+ ha->flags.management_server_logged_in = 0;
+
+ /* Login to SNS first */
+ qla24xx_login_fabric(ha, NPH_SNS, 0xff, 0xff, 0xfc,
+ mb, BIT_1);
+ if (mb[0] != MBS_COMMAND_COMPLETE) {
+ DEBUG15(qla_printk(KERN_INFO, ha,
+ "Failed SNS login: loop_id=%x mb[0]=%x mb[1]=%x "
+ "mb[2]=%x mb[6]=%x mb[7]=%x\n", NPH_SNS,
+ mb[0], mb[1], mb[2], mb[6], mb[7]));
+ return (QLA_FUNCTION_FAILED);
+ }
+
+ atomic_set(&ha->loop_down_timer, 0);
+ atomic_set(&ha->loop_state, LOOP_UP);
+ set_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags);
+ set_bit(LOCAL_LOOP_UPDATE, &ha->dpc_flags);
+ rval = qla2x00_loop_resync(ha);
+
+ return rval;
+}
diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c
index c5b3c610a32..c71863ff548 100644
--- a/drivers/scsi/qla2xxx/qla_iocb.c
+++ b/drivers/scsi/qla2xxx/qla_iocb.c
@@ -155,6 +155,8 @@ void qla2x00_build_scsi_iocbs_32(srb_t *sp, cmd_entry_t *cmd_pkt,
uint32_t *cur_dsd;
scsi_qla_host_t *ha;
struct scsi_cmnd *cmd;
+ struct scatterlist *sg;
+ int i;
cmd = sp->cmd;
@@ -163,7 +165,7 @@ void qla2x00_build_scsi_iocbs_32(srb_t *sp, cmd_entry_t *cmd_pkt,
__constant_cpu_to_le32(COMMAND_TYPE);
/* No data transfer */
- if (cmd->request_bufflen == 0 || cmd->sc_data_direction == DMA_NONE) {
+ if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
cmd_pkt->byte_count = __constant_cpu_to_le32(0);
return;
}
@@ -177,35 +179,23 @@ void qla2x00_build_scsi_iocbs_32(srb_t *sp, cmd_entry_t *cmd_pkt,
cur_dsd = (uint32_t *)&cmd_pkt->dseg_0_address;
/* Load data segments */
- if (cmd->use_sg != 0) {
- struct scatterlist *cur_seg;
- struct scatterlist *end_seg;
-
- cur_seg = (struct scatterlist *)cmd->request_buffer;
- end_seg = cur_seg + tot_dsds;
- while (cur_seg < end_seg) {
- cont_entry_t *cont_pkt;
-
- /* Allocate additional continuation packets? */
- if (avail_dsds == 0) {
- /*
- * Seven DSDs are available in the Continuation
- * Type 0 IOCB.
- */
- cont_pkt = qla2x00_prep_cont_type0_iocb(ha);
- cur_dsd = (uint32_t *)&cont_pkt->dseg_0_address;
- avail_dsds = 7;
- }
-
- *cur_dsd++ = cpu_to_le32(sg_dma_address(cur_seg));
- *cur_dsd++ = cpu_to_le32(sg_dma_len(cur_seg));
- avail_dsds--;
-
- cur_seg++;
+ scsi_for_each_sg(cmd, sg, tot_dsds, i) {
+ cont_entry_t *cont_pkt;
+
+ /* Allocate additional continuation packets? */
+ if (avail_dsds == 0) {
+ /*
+ * Seven DSDs are available in the Continuation
+ * Type 0 IOCB.
+ */
+ cont_pkt = qla2x00_prep_cont_type0_iocb(ha);
+ cur_dsd = (uint32_t *)&cont_pkt->dseg_0_address;
+ avail_dsds = 7;
}
- } else {
- *cur_dsd++ = cpu_to_le32(sp->dma_handle);
- *cur_dsd++ = cpu_to_le32(cmd->request_bufflen);
+
+ *cur_dsd++ = cpu_to_le32(sg_dma_address(sg));
+ *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
+ avail_dsds--;
}
}
@@ -224,6 +214,8 @@ void qla2x00_build_scsi_iocbs_64(srb_t *sp, cmd_entry_t *cmd_pkt,
uint32_t *cur_dsd;
scsi_qla_host_t *ha;
struct scsi_cmnd *cmd;
+ struct scatterlist *sg;
+ int i;
cmd = sp->cmd;
@@ -232,7 +224,7 @@ void qla2x00_build_scsi_iocbs_64(srb_t *sp, cmd_entry_t *cmd_pkt,
__constant_cpu_to_le32(COMMAND_A64_TYPE);
/* No data transfer */
- if (cmd->request_bufflen == 0 || cmd->sc_data_direction == DMA_NONE) {
+ if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
cmd_pkt->byte_count = __constant_cpu_to_le32(0);
return;
}
@@ -246,39 +238,26 @@ void qla2x00_build_scsi_iocbs_64(srb_t *sp, cmd_entry_t *cmd_pkt,
cur_dsd = (uint32_t *)&cmd_pkt->dseg_0_address;
/* Load data segments */
- if (cmd->use_sg != 0) {
- struct scatterlist *cur_seg;
- struct scatterlist *end_seg;
-
- cur_seg = (struct scatterlist *)cmd->request_buffer;
- end_seg = cur_seg + tot_dsds;
- while (cur_seg < end_seg) {
- dma_addr_t sle_dma;
- cont_a64_entry_t *cont_pkt;
-
- /* Allocate additional continuation packets? */
- if (avail_dsds == 0) {
- /*
- * Five DSDs are available in the Continuation
- * Type 1 IOCB.
- */
- cont_pkt = qla2x00_prep_cont_type1_iocb(ha);
- cur_dsd = (uint32_t *)cont_pkt->dseg_0_address;
- avail_dsds = 5;
- }
-
- sle_dma = sg_dma_address(cur_seg);
- *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
- *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
- *cur_dsd++ = cpu_to_le32(sg_dma_len(cur_seg));
- avail_dsds--;
-
- cur_seg++;
+ scsi_for_each_sg(cmd, sg, tot_dsds, i) {
+ dma_addr_t sle_dma;
+ cont_a64_entry_t *cont_pkt;
+
+ /* Allocate additional continuation packets? */
+ if (avail_dsds == 0) {
+ /*
+ * Five DSDs are available in the Continuation
+ * Type 1 IOCB.
+ */
+ cont_pkt = qla2x00_prep_cont_type1_iocb(ha);
+ cur_dsd = (uint32_t *)cont_pkt->dseg_0_address;
+ avail_dsds = 5;
}
- } else {
- *cur_dsd++ = cpu_to_le32(LSD(sp->dma_handle));
- *cur_dsd++ = cpu_to_le32(MSD(sp->dma_handle));
- *cur_dsd++ = cpu_to_le32(cmd->request_bufflen);
+
+ sle_dma = sg_dma_address(sg);
+ *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
+ *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
+ *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
+ avail_dsds--;
}
}
@@ -291,7 +270,7 @@ void qla2x00_build_scsi_iocbs_64(srb_t *sp, cmd_entry_t *cmd_pkt,
int
qla2x00_start_scsi(srb_t *sp)
{
- int ret;
+ int ret, nseg;
unsigned long flags;
scsi_qla_host_t *ha;
struct scsi_cmnd *cmd;
@@ -299,7 +278,6 @@ qla2x00_start_scsi(srb_t *sp)
uint32_t index;
uint32_t handle;
cmd_entry_t *cmd_pkt;
- struct scatterlist *sg;
uint16_t cnt;
uint16_t req_cnt;
uint16_t tot_dsds;
@@ -337,23 +315,15 @@ qla2x00_start_scsi(srb_t *sp)
goto queuing_error;
/* Map the sg table so we have an accurate count of sg entries needed */
- if (cmd->use_sg) {
- sg = (struct scatterlist *) cmd->request_buffer;
- tot_dsds = pci_map_sg(ha->pdev, sg, cmd->use_sg,
- cmd->sc_data_direction);
- if (tot_dsds == 0)
- goto queuing_error;
- } else if (cmd->request_bufflen) {
- dma_addr_t req_dma;
-
- req_dma = pci_map_single(ha->pdev, cmd->request_buffer,
- cmd->request_bufflen, cmd->sc_data_direction);
- if (dma_mapping_error(req_dma))
+ if (scsi_sg_count(cmd)) {
+ nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
+ scsi_sg_count(cmd), cmd->sc_data_direction);
+ if (unlikely(!nseg))
goto queuing_error;
+ } else
+ nseg = 0;
- sp->dma_handle = req_dma;
- tot_dsds = 1;
- }
+ tot_dsds = nseg;
/* Calculate the number of request entries needed. */
req_cnt = ha->isp_ops.calc_req_entries(tot_dsds);
@@ -391,7 +361,7 @@ qla2x00_start_scsi(srb_t *sp)
/* Load SCSI command packet. */
memcpy(cmd_pkt->scsi_cdb, cmd->cmnd, cmd->cmd_len);
- cmd_pkt->byte_count = cpu_to_le32((uint32_t)cmd->request_bufflen);
+ cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
/* Build IOCB segments */
ha->isp_ops.build_iocbs(sp, cmd_pkt, tot_dsds);
@@ -423,14 +393,9 @@ qla2x00_start_scsi(srb_t *sp)
return (QLA_SUCCESS);
queuing_error:
- if (cmd->use_sg && tot_dsds) {
- sg = (struct scatterlist *) cmd->request_buffer;
- pci_unmap_sg(ha->pdev, sg, cmd->use_sg,
- cmd->sc_data_direction);
- } else if (tot_dsds) {
- pci_unmap_single(ha->pdev, sp->dma_handle,
- cmd->request_bufflen, cmd->sc_data_direction);
- }
+ if (tot_dsds)
+ scsi_dma_unmap(cmd);
+
spin_unlock_irqrestore(&ha->hardware_lock, flags);
return (QLA_FUNCTION_FAILED);
@@ -453,9 +418,10 @@ __qla2x00_marker(scsi_qla_host_t *ha, uint16_t loop_id, uint16_t lun,
{
mrk_entry_t *mrk;
struct mrk_entry_24xx *mrk24;
+ scsi_qla_host_t *pha = to_qla_parent(ha);
mrk24 = NULL;
- mrk = (mrk_entry_t *)qla2x00_req_pkt(ha);
+ mrk = (mrk_entry_t *)qla2x00_req_pkt(pha);
if (mrk == NULL) {
DEBUG2_3(printk("%s(%ld): failed to allocate Marker IOCB.\n",
__func__, ha->host_no));
@@ -472,6 +438,7 @@ __qla2x00_marker(scsi_qla_host_t *ha, uint16_t loop_id, uint16_t lun,
mrk24->lun[1] = LSB(lun);
mrk24->lun[2] = MSB(lun);
host_to_fcp_swap(mrk24->lun, sizeof(mrk24->lun));
+ mrk24->vp_index = ha->vp_idx;
} else {
SET_TARGET_ID(ha, mrk->target, loop_id);
mrk->lun = cpu_to_le16(lun);
@@ -479,7 +446,7 @@ __qla2x00_marker(scsi_qla_host_t *ha, uint16_t loop_id, uint16_t lun,
}
wmb();
- qla2x00_isp_cmd(ha);
+ qla2x00_isp_cmd(pha);
return (QLA_SUCCESS);
}
@@ -642,6 +609,8 @@ qla24xx_build_scsi_iocbs(srb_t *sp, struct cmd_type_7 *cmd_pkt,
uint32_t *cur_dsd;
scsi_qla_host_t *ha;
struct scsi_cmnd *cmd;
+ struct scatterlist *sg;
+ int i;
cmd = sp->cmd;
@@ -650,7 +619,7 @@ qla24xx_build_scsi_iocbs(srb_t *sp, struct cmd_type_7 *cmd_pkt,
__constant_cpu_to_le32(COMMAND_TYPE_7);
/* No data transfer */
- if (cmd->request_bufflen == 0 || cmd->sc_data_direction == DMA_NONE) {
+ if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
cmd_pkt->byte_count = __constant_cpu_to_le32(0);
return;
}
@@ -670,39 +639,27 @@ qla24xx_build_scsi_iocbs(srb_t *sp, struct cmd_type_7 *cmd_pkt,
cur_dsd = (uint32_t *)&cmd_pkt->dseg_0_address;
/* Load data segments */
- if (cmd->use_sg != 0) {
- struct scatterlist *cur_seg;
- struct scatterlist *end_seg;
-
- cur_seg = (struct scatterlist *)cmd->request_buffer;
- end_seg = cur_seg + tot_dsds;
- while (cur_seg < end_seg) {
- dma_addr_t sle_dma;
- cont_a64_entry_t *cont_pkt;
-
- /* Allocate additional continuation packets? */
- if (avail_dsds == 0) {
- /*
- * Five DSDs are available in the Continuation
- * Type 1 IOCB.
- */
- cont_pkt = qla2x00_prep_cont_type1_iocb(ha);
- cur_dsd = (uint32_t *)cont_pkt->dseg_0_address;
- avail_dsds = 5;
- }
-
- sle_dma = sg_dma_address(cur_seg);
- *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
- *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
- *cur_dsd++ = cpu_to_le32(sg_dma_len(cur_seg));
- avail_dsds--;
-
- cur_seg++;
+
+ scsi_for_each_sg(cmd, sg, tot_dsds, i) {
+ dma_addr_t sle_dma;
+ cont_a64_entry_t *cont_pkt;
+
+ /* Allocate additional continuation packets? */
+ if (avail_dsds == 0) {
+ /*
+ * Five DSDs are available in the Continuation
+ * Type 1 IOCB.
+ */
+ cont_pkt = qla2x00_prep_cont_type1_iocb(ha);
+ cur_dsd = (uint32_t *)cont_pkt->dseg_0_address;
+ avail_dsds = 5;
}
- } else {
- *cur_dsd++ = cpu_to_le32(LSD(sp->dma_handle));
- *cur_dsd++ = cpu_to_le32(MSD(sp->dma_handle));
- *cur_dsd++ = cpu_to_le32(cmd->request_bufflen);
+
+ sle_dma = sg_dma_address(sg);
+ *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
+ *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
+ *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
+ avail_dsds--;
}
}
@@ -716,7 +673,7 @@ qla24xx_build_scsi_iocbs(srb_t *sp, struct cmd_type_7 *cmd_pkt,
int
qla24xx_start_scsi(srb_t *sp)
{
- int ret;
+ int ret, nseg;
unsigned long flags;
scsi_qla_host_t *ha;
struct scsi_cmnd *cmd;
@@ -724,7 +681,6 @@ qla24xx_start_scsi(srb_t *sp)
uint32_t index;
uint32_t handle;
struct cmd_type_7 *cmd_pkt;
- struct scatterlist *sg;
uint16_t cnt;
uint16_t req_cnt;
uint16_t tot_dsds;
@@ -762,23 +718,15 @@ qla24xx_start_scsi(srb_t *sp)
goto queuing_error;
/* Map the sg table so we have an accurate count of sg entries needed */
- if (cmd->use_sg) {
- sg = (struct scatterlist *) cmd->request_buffer;
- tot_dsds = pci_map_sg(ha->pdev, sg, cmd->use_sg,
- cmd->sc_data_direction);
- if (tot_dsds == 0)
- goto queuing_error;
- } else if (cmd->request_bufflen) {
- dma_addr_t req_dma;
-
- req_dma = pci_map_single(ha->pdev, cmd->request_buffer,
- cmd->request_bufflen, cmd->sc_data_direction);
- if (dma_mapping_error(req_dma))
+ if (scsi_sg_count(cmd)) {
+ nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
+ scsi_sg_count(cmd), cmd->sc_data_direction);
+ if (unlikely(!nseg))
goto queuing_error;
+ } else
+ nseg = 0;
- sp->dma_handle = req_dma;
- tot_dsds = 1;
- }
+ tot_dsds = nseg;
req_cnt = qla24xx_calc_iocbs(tot_dsds);
if (ha->req_q_cnt < (req_cnt + 2)) {
@@ -813,6 +761,7 @@ qla24xx_start_scsi(srb_t *sp)
cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
+ cmd_pkt->vp_index = sp->fcport->vp_idx;
int_to_scsilun(sp->cmd->device->lun, &cmd_pkt->lun);
host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
@@ -821,7 +770,7 @@ qla24xx_start_scsi(srb_t *sp)
memcpy(cmd_pkt->fcp_cdb, cmd->cmnd, cmd->cmd_len);
host_to_fcp_swap(cmd_pkt->fcp_cdb, sizeof(cmd_pkt->fcp_cdb));
- cmd_pkt->byte_count = cpu_to_le32((uint32_t)cmd->request_bufflen);
+ cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
/* Build IOCB segments */
qla24xx_build_scsi_iocbs(sp, cmd_pkt, tot_dsds);
@@ -853,14 +802,9 @@ qla24xx_start_scsi(srb_t *sp)
return QLA_SUCCESS;
queuing_error:
- if (cmd->use_sg && tot_dsds) {
- sg = (struct scatterlist *) cmd->request_buffer;
- pci_unmap_sg(ha->pdev, sg, cmd->use_sg,
- cmd->sc_data_direction);
- } else if (tot_dsds) {
- pci_unmap_single(ha->pdev, sp->dma_handle,
- cmd->request_bufflen, cmd->sc_data_direction);
- }
+ if (tot_dsds)
+ scsi_dma_unmap(cmd);
+
spin_unlock_irqrestore(&ha->hardware_lock, flags);
return QLA_FUNCTION_FAILED;
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
index ca463469063..0ba4c8d3787 100644
--- a/drivers/scsi/qla2xxx/qla_isr.c
+++ b/drivers/scsi/qla2xxx/qla_isr.c
@@ -9,7 +9,6 @@
#include <scsi/scsi_tcq.h>
static void qla2x00_mbx_completion(scsi_qla_host_t *, uint16_t);
-static void qla2x00_async_event(scsi_qla_host_t *, uint16_t *);
static void qla2x00_process_completed_request(struct scsi_qla_host *, uint32_t);
static void qla2x00_status_entry(scsi_qla_host_t *, void *);
static void qla2x00_status_cont_entry(scsi_qla_host_t *, sts_cont_entry_t *);
@@ -244,7 +243,7 @@ qla2x00_mbx_completion(scsi_qla_host_t *ha, uint16_t mb0)
* @ha: SCSI driver HA context
* @mb: Mailbox registers (0 - 3)
*/
-static void
+void
qla2x00_async_event(scsi_qla_host_t *ha, uint16_t *mb)
{
#define LS_UNKNOWN 2
@@ -386,6 +385,11 @@ qla2x00_async_event(scsi_qla_host_t *ha, uint16_t *mb)
qla2x00_mark_all_devices_lost(ha, 1);
}
+ if (ha->parent) {
+ atomic_set(&ha->vp_state, VP_FAILED);
+ fc_vport_set_state(ha->fc_vport, FC_VPORT_FAILED);
+ }
+
set_bit(REGISTER_FC4_NEEDED, &ha->dpc_flags);
ha->flags.management_server_logged_in = 0;
@@ -422,6 +426,11 @@ qla2x00_async_event(scsi_qla_host_t *ha, uint16_t *mb)
qla2x00_mark_all_devices_lost(ha, 1);
}
+ if (ha->parent) {
+ atomic_set(&ha->vp_state, VP_FAILED);
+ fc_vport_set_state(ha->fc_vport, FC_VPORT_FAILED);
+ }
+
ha->flags.management_server_logged_in = 0;
ha->link_data_rate = PORT_SPEED_UNKNOWN;
if (ql2xfdmienable)
@@ -440,6 +449,11 @@ qla2x00_async_event(scsi_qla_host_t *ha, uint16_t *mb)
qla2x00_mark_all_devices_lost(ha, 1);
}
+ if (ha->parent) {
+ atomic_set(&ha->vp_state, VP_FAILED);
+ fc_vport_set_state(ha->fc_vport, FC_VPORT_FAILED);
+ }
+
set_bit(RESET_MARKER_NEEDED, &ha->dpc_flags);
ha->operating_mode = LOOP;
@@ -465,6 +479,11 @@ qla2x00_async_event(scsi_qla_host_t *ha, uint16_t *mb)
qla2x00_mark_all_devices_lost(ha, 1);
}
+ if (ha->parent) {
+ atomic_set(&ha->vp_state, VP_FAILED);
+ fc_vport_set_state(ha->fc_vport, FC_VPORT_FAILED);
+ }
+
if (!(test_bit(ABORT_ISP_ACTIVE, &ha->dpc_flags))) {
set_bit(RESET_MARKER_NEEDED, &ha->dpc_flags);
}
@@ -491,6 +510,11 @@ qla2x00_async_event(scsi_qla_host_t *ha, uint16_t *mb)
qla2x00_mark_all_devices_lost(ha, 1);
}
+ if (ha->parent) {
+ atomic_set(&ha->vp_state, VP_FAILED);
+ fc_vport_set_state(ha->fc_vport, FC_VPORT_FAILED);
+ }
+
set_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags);
set_bit(LOCAL_LOOP_UPDATE, &ha->dpc_flags);
break;
@@ -530,6 +554,10 @@ qla2x00_async_event(scsi_qla_host_t *ha, uint16_t *mb)
break;
case MBA_RSCN_UPDATE: /* State Change Registration */
+ /* Check if the Vport has issued a SCR */
+ if (ha->parent && test_bit(VP_SCR_NEEDED, &ha->vp_flags))
+ break;
+
DEBUG2(printk("scsi(%ld): Asynchronous RSCR UPDATE.\n",
ha->host_no));
DEBUG(printk(KERN_INFO
@@ -589,6 +617,9 @@ qla2x00_async_event(scsi_qla_host_t *ha, uint16_t *mb)
ha->host_no, mb[1], mb[2]));
break;
}
+
+ if (!ha->parent && ha->num_vhosts)
+ qla2x00_alert_all_vps(ha, mb);
}
static void
@@ -889,19 +920,19 @@ qla2x00_status_entry(scsi_qla_host_t *ha, void *pkt)
}
if (scsi_status & (SS_RESIDUAL_UNDER | SS_RESIDUAL_OVER)) {
resid = resid_len;
- cp->resid = resid;
+ scsi_set_resid(cp, resid);
CMD_RESID_LEN(cp) = resid;
if (!lscsi_status &&
- ((unsigned)(cp->request_bufflen - resid) <
+ ((unsigned)(scsi_bufflen(cp) - resid) <
cp->underflow)) {
qla_printk(KERN_INFO, ha,
- "scsi(%ld:%d:%d:%d): Mid-layer underflow "
- "detected (%x of %x bytes)...returning "
- "error status.\n", ha->host_no,
- cp->device->channel, cp->device->id,
- cp->device->lun, resid,
- cp->request_bufflen);
+ "scsi(%ld:%d:%d:%d): Mid-layer underflow "
+ "detected (%x of %x bytes)...returning "
+ "error status.\n", ha->host_no,
+ cp->device->channel, cp->device->id,
+ cp->device->lun, resid,
+ scsi_bufflen(cp));
cp->result = DID_ERROR << 16;
break;
@@ -963,7 +994,7 @@ qla2x00_status_entry(scsi_qla_host_t *ha, void *pkt)
resid = fw_resid_len;
if (scsi_status & SS_RESIDUAL_UNDER) {
- cp->resid = resid;
+ scsi_set_resid(cp, resid);
CMD_RESID_LEN(cp) = resid;
} else {
DEBUG2(printk(KERN_INFO
@@ -1042,26 +1073,26 @@ qla2x00_status_entry(scsi_qla_host_t *ha, void *pkt)
*/
if (!(scsi_status & SS_RESIDUAL_UNDER)) {
DEBUG2(printk("scsi(%ld:%d:%d:%d) Dropped "
- "frame(s) detected (%x of %x bytes)..."
- "retrying command.\n", ha->host_no,
- cp->device->channel, cp->device->id,
- cp->device->lun, resid,
- cp->request_bufflen));
+ "frame(s) detected (%x of %x bytes)..."
+ "retrying command.\n", ha->host_no,
+ cp->device->channel, cp->device->id,
+ cp->device->lun, resid,
+ scsi_bufflen(cp)));
cp->result = DID_BUS_BUSY << 16;
break;
}
/* Handle mid-layer underflow */
- if ((unsigned)(cp->request_bufflen - resid) <
+ if ((unsigned)(scsi_bufflen(cp) - resid) <
cp->underflow) {
qla_printk(KERN_INFO, ha,
- "scsi(%ld:%d:%d:%d): Mid-layer underflow "
- "detected (%x of %x bytes)...returning "
- "error status.\n", ha->host_no,
- cp->device->channel, cp->device->id,
- cp->device->lun, resid,
- cp->request_bufflen);
+ "scsi(%ld:%d:%d:%d): Mid-layer underflow "
+ "detected (%x of %x bytes)...returning "
+ "error status.\n", ha->host_no,
+ cp->device->channel, cp->device->id,
+ cp->device->lun, resid,
+ scsi_bufflen(cp));
cp->result = DID_ERROR << 16;
break;
@@ -1084,7 +1115,7 @@ qla2x00_status_entry(scsi_qla_host_t *ha, void *pkt)
DEBUG2(printk(KERN_INFO
"PID=0x%lx req=0x%x xtra=0x%x -- returning DID_ERROR "
"status!\n",
- cp->serial_number, cp->request_bufflen, resid_len));
+ cp->serial_number, scsi_bufflen(cp), resid_len));
cp->result = DID_ERROR << 16;
break;
@@ -1393,6 +1424,10 @@ qla24xx_process_response_queue(struct scsi_qla_host *ha)
case MS_IOCB_TYPE:
qla24xx_ms_entry(ha, (struct ct_entry_24xx *)pkt);
break;
+ case VP_RPT_ID_IOCB_TYPE:
+ qla24xx_report_id_acquisition(ha,
+ (struct vp_rpt_id_entry_24xx *)pkt);
+ break;
default:
/* Type Not Supported. */
DEBUG4(printk(KERN_WARNING
@@ -1633,7 +1668,7 @@ struct qla_init_msix_entry {
uint16_t entry;
uint16_t index;
const char *name;
- irqreturn_t (*handler)(int, void *);
+ irq_handler_t handler;
};
static struct qla_init_msix_entry imsix_entries[QLA_MSIX_ENTRIES] = {
diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c
index 71e32a24852..2cd0cff2592 100644
--- a/drivers/scsi/qla2xxx/qla_mbx.c
+++ b/drivers/scsi/qla2xxx/qla_mbx.c
@@ -42,25 +42,29 @@ qla2x00_mbx_sem_timeout(unsigned long data)
* Kernel context.
*/
static int
-qla2x00_mailbox_command(scsi_qla_host_t *ha, mbx_cmd_t *mcp)
+qla2x00_mailbox_command(scsi_qla_host_t *pvha, mbx_cmd_t *mcp)
{
int rval;
unsigned long flags = 0;
- device_reg_t __iomem *reg = ha->iobase;
+ device_reg_t __iomem *reg;
struct timer_list tmp_intr_timer;
uint8_t abort_active;
- uint8_t io_lock_on = ha->flags.init_done;
+ uint8_t io_lock_on;
uint16_t command;
uint16_t *iptr;
uint16_t __iomem *optr;
uint32_t cnt;
uint32_t mboxes;
unsigned long wait_time;
+ scsi_qla_host_t *ha = to_qla_parent(pvha);
+
+ reg = ha->iobase;
+ io_lock_on = ha->flags.init_done;
rval = QLA_SUCCESS;
abort_active = test_bit(ABORT_ISP_ACTIVE, &ha->dpc_flags);
- DEBUG11(printk("%s(%ld): entered.\n", __func__, ha->host_no));
+ DEBUG11(printk("%s(%ld): entered.\n", __func__, pvha->host_no));
/*
* Wait for active mailbox commands to finish by waiting at most tov
@@ -889,7 +893,7 @@ qla2x00_abort_target(fc_port_t *fcport)
*/
int
qla2x00_get_adapter_id(scsi_qla_host_t *ha, uint16_t *id, uint8_t *al_pa,
- uint8_t *area, uint8_t *domain, uint16_t *top)
+ uint8_t *area, uint8_t *domain, uint16_t *top, uint16_t *sw_cap)
{
int rval;
mbx_cmd_t mc;
@@ -899,8 +903,9 @@ qla2x00_get_adapter_id(scsi_qla_host_t *ha, uint16_t *id, uint8_t *al_pa,
ha->host_no));
mcp->mb[0] = MBC_GET_ADAPTER_LOOP_ID;
+ mcp->mb[9] = ha->vp_idx;
mcp->out_mb = MBX_0;
- mcp->in_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
+ mcp->in_mb = MBX_9|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
mcp->tov = 30;
mcp->flags = 0;
rval = qla2x00_mailbox_command(ha, mcp);
@@ -913,6 +918,7 @@ qla2x00_get_adapter_id(scsi_qla_host_t *ha, uint16_t *id, uint8_t *al_pa,
*area = MSB(mcp->mb[2]);
*domain = LSB(mcp->mb[3]);
*top = mcp->mb[6];
+ *sw_cap = mcp->mb[7];
if (rval != QLA_SUCCESS) {
/*EMPTY*/
@@ -1009,7 +1015,11 @@ qla2x00_init_firmware(scsi_qla_host_t *ha, uint16_t size)
DEBUG11(printk("qla2x00_init_firmware(%ld): entered.\n",
ha->host_no));
- mcp->mb[0] = MBC_INITIALIZE_FIRMWARE;
+ if (ha->flags.npiv_supported)
+ mcp->mb[0] = MBC_MID_INITIALIZE_FIRMWARE;
+ else
+ mcp->mb[0] = MBC_INITIALIZE_FIRMWARE;
+
mcp->mb[2] = MSW(ha->init_cb_dma);
mcp->mb[3] = LSW(ha->init_cb_dma);
mcp->mb[4] = 0;
@@ -1081,7 +1091,8 @@ qla2x00_get_port_database(scsi_qla_host_t *ha, fc_port_t *fcport, uint8_t opt)
mcp->mb[3] = LSW(pd_dma);
mcp->mb[6] = MSW(MSD(pd_dma));
mcp->mb[7] = LSW(MSD(pd_dma));
- mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_0;
+ mcp->mb[9] = ha->vp_idx;
+ mcp->out_mb = MBX_9|MBX_7|MBX_6|MBX_3|MBX_2|MBX_0;
mcp->in_mb = MBX_0;
if (IS_QLA24XX(ha) || IS_QLA54XX(ha)) {
mcp->mb[1] = fcport->loop_id;
@@ -1259,7 +1270,8 @@ qla2x00_get_port_name(scsi_qla_host_t *ha, uint16_t loop_id, uint8_t *name,
ha->host_no));
mcp->mb[0] = MBC_GET_PORT_NAME;
- mcp->out_mb = MBX_1|MBX_0;
+ mcp->mb[9] = ha->vp_idx;
+ mcp->out_mb = MBX_9|MBX_1|MBX_0;
if (HAS_EXTENDED_IDS(ha)) {
mcp->mb[1] = loop_id;
mcp->mb[10] = opt;
@@ -1447,6 +1459,7 @@ qla24xx_login_fabric(scsi_qla_host_t *ha, uint16_t loop_id, uint8_t domain,
lg->port_id[0] = al_pa;
lg->port_id[1] = area;
lg->port_id[2] = domain;
+ lg->vp_index = cpu_to_le16(ha->vp_idx);
rval = qla2x00_issue_iocb(ha, lg, lg_dma, 0);
if (rval != QLA_SUCCESS) {
DEBUG2_3_11(printk("%s(%ld): failed to issue Login IOCB "
@@ -1701,6 +1714,7 @@ qla24xx_fabric_logout(scsi_qla_host_t *ha, uint16_t loop_id, uint8_t domain,
lg->port_id[0] = al_pa;
lg->port_id[1] = area;
lg->port_id[2] = domain;
+ lg->vp_index = cpu_to_le16(ha->vp_idx);
rval = qla2x00_issue_iocb(ha, lg, lg_dma, 0);
if (rval != QLA_SUCCESS) {
DEBUG2_3_11(printk("%s(%ld): failed to issue Logout IOCB "
@@ -1863,7 +1877,8 @@ qla2x00_get_id_list(scsi_qla_host_t *ha, void *id_list, dma_addr_t id_list_dma,
mcp->mb[6] = MSW(MSD(id_list_dma));
mcp->mb[7] = LSW(MSD(id_list_dma));
mcp->mb[8] = 0;
- mcp->out_mb |= MBX_8|MBX_7|MBX_6|MBX_3|MBX_2;
+ mcp->mb[9] = ha->vp_idx;
+ mcp->out_mb |= MBX_9|MBX_8|MBX_7|MBX_6|MBX_3|MBX_2;
} else {
mcp->mb[1] = MSW(id_list_dma);
mcp->mb[2] = LSW(id_list_dma);
@@ -2212,6 +2227,7 @@ qla24xx_abort_command(scsi_qla_host_t *ha, srb_t *sp)
abt->port_id[0] = fcport->d_id.b.al_pa;
abt->port_id[1] = fcport->d_id.b.area;
abt->port_id[2] = fcport->d_id.b.domain;
+ abt->vp_index = fcport->vp_idx;
rval = qla2x00_issue_iocb(ha, abt, abt_dma, 0);
if (rval != QLA_SUCCESS) {
DEBUG2_3_11(printk("%s(%ld): failed to issue IOCB (%x).\n",
@@ -2249,7 +2265,7 @@ qla24xx_abort_target(fc_port_t *fcport)
int rval;
struct tsk_mgmt_cmd *tsk;
dma_addr_t tsk_dma;
- scsi_qla_host_t *ha;
+ scsi_qla_host_t *ha, *pha;
if (fcport == NULL)
return 0;
@@ -2257,7 +2273,8 @@ qla24xx_abort_target(fc_port_t *fcport)
DEBUG11(printk("%s(%ld): entered.\n", __func__, fcport->ha->host_no));
ha = fcport->ha;
- tsk = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &tsk_dma);
+ pha = to_qla_parent(ha);
+ tsk = dma_pool_alloc(pha->s_dma_pool, GFP_KERNEL, &tsk_dma);
if (tsk == NULL) {
DEBUG2_3(printk("%s(%ld): failed to allocate Task Management "
"IOCB.\n", __func__, ha->host_no));
@@ -2273,6 +2290,8 @@ qla24xx_abort_target(fc_port_t *fcport)
tsk->p.tsk.port_id[0] = fcport->d_id.b.al_pa;
tsk->p.tsk.port_id[1] = fcport->d_id.b.area;
tsk->p.tsk.port_id[2] = fcport->d_id.b.domain;
+ tsk->p.tsk.vp_index = fcport->vp_idx;
+
rval = qla2x00_issue_iocb(ha, tsk, tsk_dma, 0);
if (rval != QLA_SUCCESS) {
DEBUG2_3_11(printk("%s(%ld): failed to issue Target Reset IOCB "
@@ -2303,7 +2322,7 @@ qla24xx_abort_target(fc_port_t *fcport)
}
atarget_done:
- dma_pool_free(ha->s_dma_pool, tsk, tsk_dma);
+ dma_pool_free(pha->s_dma_pool, tsk, tsk_dma);
return rval;
}
@@ -2610,3 +2629,354 @@ qla2x00_set_idma_speed(scsi_qla_host_t *ha, uint16_t loop_id,
return rval;
}
+
+/*
+ * qla24xx_get_vp_database
+ * Get the VP's database for all configured ports.
+ *
+ * Input:
+ * ha = adapter block pointer.
+ * size = size of initialization control block.
+ *
+ * Returns:
+ * qla2x00 local function return status code.
+ *
+ * Context:
+ * Kernel context.
+ */
+int
+qla24xx_get_vp_database(scsi_qla_host_t *ha, uint16_t size)
+{
+ int rval;
+ mbx_cmd_t mc;
+ mbx_cmd_t *mcp = &mc;
+
+ DEBUG11(printk("scsi(%ld):%s - entered.\n",
+ ha->host_no, __func__));
+
+ mcp->mb[0] = MBC_MID_GET_VP_DATABASE;
+ mcp->mb[2] = MSW(ha->init_cb_dma);
+ mcp->mb[3] = LSW(ha->init_cb_dma);
+ mcp->mb[4] = 0;
+ mcp->mb[5] = 0;
+ mcp->mb[6] = MSW(MSD(ha->init_cb_dma));
+ mcp->mb[7] = LSW(MSD(ha->init_cb_dma));
+ mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_0;
+ mcp->in_mb = MBX_1|MBX_0;
+ mcp->buf_size = size;
+ mcp->flags = MBX_DMA_OUT;
+ mcp->tov = MBX_TOV_SECONDS;
+ rval = qla2x00_mailbox_command(ha, mcp);
+
+ if (rval != QLA_SUCCESS) {
+ /*EMPTY*/
+ DEBUG2_3_11(printk("%s(%ld): failed=%x "
+ "mb0=%x.\n",
+ __func__, ha->host_no, rval, mcp->mb[0]));
+ } else {
+ /*EMPTY*/
+ DEBUG11(printk("%s(%ld): done.\n",
+ __func__, ha->host_no));
+ }
+
+ return rval;
+}
+
+int
+qla24xx_get_vp_entry(scsi_qla_host_t *ha, uint16_t size, int vp_id)
+{
+ int rval;
+ mbx_cmd_t mc;
+ mbx_cmd_t *mcp = &mc;
+
+ DEBUG11(printk("%s(%ld): entered.\n", __func__, ha->host_no));
+
+ mcp->mb[0] = MBC_MID_GET_VP_ENTRY;
+ mcp->mb[2] = MSW(ha->init_cb_dma);
+ mcp->mb[3] = LSW(ha->init_cb_dma);
+ mcp->mb[4] = 0;
+ mcp->mb[5] = 0;
+ mcp->mb[6] = MSW(MSD(ha->init_cb_dma));
+ mcp->mb[7] = LSW(MSD(ha->init_cb_dma));
+ mcp->mb[9] = vp_id;
+ mcp->out_mb = MBX_9|MBX_7|MBX_6|MBX_3|MBX_2|MBX_0;
+ mcp->in_mb = MBX_0;
+ mcp->buf_size = size;
+ mcp->flags = MBX_DMA_OUT;
+ mcp->tov = 30;
+ rval = qla2x00_mailbox_command(ha, mcp);
+
+ if (rval != QLA_SUCCESS) {
+ /*EMPTY*/
+ DEBUG2_3_11(printk("qla24xx_get_vp_entry(%ld): failed=%x "
+ "mb0=%x.\n",
+ ha->host_no, rval, mcp->mb[0]));
+ } else {
+ /*EMPTY*/
+ DEBUG11(printk("qla24xx_get_vp_entry(%ld): done.\n",
+ ha->host_no));
+ }
+
+ return rval;
+}
+
+void
+qla24xx_report_id_acquisition(scsi_qla_host_t *ha,
+ struct vp_rpt_id_entry_24xx *rptid_entry)
+{
+ uint8_t vp_idx;
+ scsi_qla_host_t *vha;
+
+ if (rptid_entry->entry_status != 0)
+ return;
+ if (rptid_entry->entry_status != __constant_cpu_to_le16(CS_COMPLETE))
+ return;
+
+ if (rptid_entry->format == 0) {
+ DEBUG15(printk("%s:format 0 : scsi(%ld) number of VPs setup %d,"
+ " number of VPs acquired %d\n", __func__, ha->host_no,
+ MSB(rptid_entry->vp_count), LSB(rptid_entry->vp_count)));
+ DEBUG15(printk("%s primary port id %02x%02x%02x\n", __func__,
+ rptid_entry->port_id[2], rptid_entry->port_id[1],
+ rptid_entry->port_id[0]));
+ } else if (rptid_entry->format == 1) {
+ vp_idx = LSB(rptid_entry->vp_idx);
+ DEBUG15(printk("%s:format 1: scsi(%ld): VP[%d] enabled "
+ "- status %d - "
+ "with port id %02x%02x%02x\n",__func__,ha->host_no,
+ vp_idx, MSB(rptid_entry->vp_idx),
+ rptid_entry->port_id[2], rptid_entry->port_id[1],
+ rptid_entry->port_id[0]));
+ if (vp_idx == 0)
+ return;
+
+ if (MSB(rptid_entry->vp_idx) == 1)
+ return;
+
+ list_for_each_entry(vha, &ha->vp_list, vp_list)
+ if (vp_idx == vha->vp_idx)
+ break;
+
+ if (!vha)
+ return;
+
+ vha->d_id.b.domain = rptid_entry->port_id[2];
+ vha->d_id.b.area = rptid_entry->port_id[1];
+ vha->d_id.b.al_pa = rptid_entry->port_id[0];
+
+ /*
+ * Cannot configure here as we are still sitting on the
+ * response queue. Handle it in dpc context.
+ */
+ set_bit(VP_IDX_ACQUIRED, &vha->vp_flags);
+ set_bit(VP_DPC_NEEDED, &ha->dpc_flags);
+
+ wake_up_process(ha->dpc_thread);
+ }
+}
+
+/*
+ * qla24xx_modify_vp_config
+ * Change VP configuration for vha
+ *
+ * Input:
+ * vha = adapter block pointer.
+ *
+ * Returns:
+ * qla2xxx local function return status code.
+ *
+ * Context:
+ * Kernel context.
+ */
+int
+qla24xx_modify_vp_config(scsi_qla_host_t *vha)
+{
+ int rval;
+ struct vp_config_entry_24xx *vpmod;
+ dma_addr_t vpmod_dma;
+ scsi_qla_host_t *pha;
+
+ /* This can be called by the parent */
+ pha = to_qla_parent(vha);
+
+ vpmod = dma_pool_alloc(pha->s_dma_pool, GFP_KERNEL, &vpmod_dma);
+ if (!vpmod) {
+ DEBUG2_3(printk("%s(%ld): failed to allocate Modify VP "
+ "IOCB.\n", __func__, pha->host_no));
+ return QLA_MEMORY_ALLOC_FAILED;
+ }
+
+ memset(vpmod, 0, sizeof(struct vp_config_entry_24xx));
+ vpmod->entry_type = VP_CONFIG_IOCB_TYPE;
+ vpmod->entry_count = 1;
+ vpmod->command = VCT_COMMAND_MOD_ENABLE_VPS;
+ vpmod->vp_count = 1;
+ vpmod->vp_index1 = vha->vp_idx;
+ vpmod->options_idx1 = BIT_3|BIT_4|BIT_5;
+ memcpy(vpmod->node_name_idx1, vha->node_name, WWN_SIZE);
+ memcpy(vpmod->port_name_idx1, vha->port_name, WWN_SIZE);
+ vpmod->entry_count = 1;
+
+ rval = qla2x00_issue_iocb(pha, vpmod, vpmod_dma, 0);
+ if (rval != QLA_SUCCESS) {
+ DEBUG2_3_11(printk("%s(%ld): failed to issue VP config IOCB"
+ "(%x).\n", __func__, pha->host_no, rval));
+ } else if (vpmod->comp_status != 0) {
+ DEBUG2_3_11(printk("%s(%ld): failed to complete IOCB "
+ "-- error status (%x).\n", __func__, pha->host_no,
+ vpmod->comp_status));
+ rval = QLA_FUNCTION_FAILED;
+ } else if (vpmod->comp_status != __constant_cpu_to_le16(CS_COMPLETE)) {
+ DEBUG2_3_11(printk("%s(%ld): failed to complete IOCB "
+ "-- completion status (%x).\n", __func__, pha->host_no,
+ le16_to_cpu(vpmod->comp_status)));
+ rval = QLA_FUNCTION_FAILED;
+ } else {
+ /* EMPTY */
+ DEBUG11(printk("%s(%ld): done.\n", __func__, pha->host_no));
+ fc_vport_set_state(vha->fc_vport, FC_VPORT_INITIALIZING);
+ }
+ dma_pool_free(pha->s_dma_pool, vpmod, vpmod_dma);
+
+ return rval;
+}
+
+/*
+ * qla24xx_control_vp
+ * Enable a virtual port for given host
+ *
+ * Input:
+ * ha = adapter block pointer.
+ * vhba = virtual adapter (unused)
+ * index = index number for enabled VP
+ *
+ * Returns:
+ * qla2xxx local function return status code.
+ *
+ * Context:
+ * Kernel context.
+ */
+int
+qla24xx_control_vp(scsi_qla_host_t *vha, int cmd)
+{
+ int rval;
+ int map, pos;
+ struct vp_ctrl_entry_24xx *vce;
+ dma_addr_t vce_dma;
+ scsi_qla_host_t *ha = vha->parent;
+ int vp_index = vha->vp_idx;
+
+ DEBUG11(printk("%s(%ld): entered. Enabling index %d\n", __func__,
+ ha->host_no, vp_index));
+
+ if (vp_index == 0 || vp_index >= MAX_MULTI_ID_LOOP)
+ return QLA_PARAMETER_ERROR;
+
+ vce = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &vce_dma);
+ if (!vce) {
+ DEBUG2_3(printk("%s(%ld): "
+ "failed to allocate VP Control IOCB.\n", __func__,
+ ha->host_no));
+ return QLA_MEMORY_ALLOC_FAILED;
+ }
+ memset(vce, 0, sizeof(struct vp_ctrl_entry_24xx));
+
+ vce->entry_type = VP_CTRL_IOCB_TYPE;
+ vce->entry_count = 1;
+ vce->command = cpu_to_le16(cmd);
+ vce->vp_count = __constant_cpu_to_le16(1);
+
+ /* index map in firmware starts with 1; decrement index
+ * this is ok as we never use index 0
+ */
+ map = (vp_index - 1) / 8;
+ pos = (vp_index - 1) & 7;
+ down(&ha->vport_sem);
+ vce->vp_idx_map[map] |= 1 << pos;
+ up(&ha->vport_sem);
+
+ rval = qla2x00_issue_iocb(ha, vce, vce_dma, 0);
+ if (rval != QLA_SUCCESS) {
+ DEBUG2_3_11(printk("%s(%ld): failed to issue VP control IOCB"
+ "(%x).\n", __func__, ha->host_no, rval));
+ printk("%s(%ld): failed to issue VP control IOCB"
+ "(%x).\n", __func__, ha->host_no, rval);
+ } else if (vce->entry_status != 0) {
+ DEBUG2_3_11(printk("%s(%ld): failed to complete IOCB "
+ "-- error status (%x).\n", __func__, ha->host_no,
+ vce->entry_status));
+ printk("%s(%ld): failed to complete IOCB "
+ "-- error status (%x).\n", __func__, ha->host_no,
+ vce->entry_status);
+ rval = QLA_FUNCTION_FAILED;
+ } else if (vce->comp_status != __constant_cpu_to_le16(CS_COMPLETE)) {
+ DEBUG2_3_11(printk("%s(%ld): failed to complete IOCB "
+ "-- completion status (%x).\n", __func__, ha->host_no,
+ le16_to_cpu(vce->comp_status)));
+ printk("%s(%ld): failed to complete IOCB "
+ "-- completion status (%x).\n", __func__, ha->host_no,
+ le16_to_cpu(vce->comp_status));
+ rval = QLA_FUNCTION_FAILED;
+ } else {
+ DEBUG2(printk("%s(%ld): done.\n", __func__, ha->host_no));
+ }
+
+ dma_pool_free(ha->s_dma_pool, vce, vce_dma);
+
+ return rval;
+}
+
+/*
+ * qla2x00_send_change_request
+ * Receive or disable RSCN request from fabric controller
+ *
+ * Input:
+ * ha = adapter block pointer
+ * format = registration format:
+ * 0 - Reserved
+ * 1 - Fabric detected registration
+ * 2 - N_port detected registration
+ * 3 - Full registration
+ * FF - clear registration
+ * vp_idx = Virtual port index
+ *
+ * Returns:
+ * qla2x00 local function return status code.
+ *
+ * Context:
+ * Kernel Context
+ */
+
+int
+qla2x00_send_change_request(scsi_qla_host_t *ha, uint16_t format,
+ uint16_t vp_idx)
+{
+ int rval;
+ mbx_cmd_t mc;
+ mbx_cmd_t *mcp = &mc;
+
+ /*
+ * This command is implicitly executed by firmware during login for the
+ * physical hosts
+ */
+ if (vp_idx == 0)
+ return QLA_FUNCTION_FAILED;
+
+ mcp->mb[0] = MBC_SEND_CHANGE_REQUEST;
+ mcp->mb[1] = format;
+ mcp->mb[9] = vp_idx;
+ mcp->out_mb = MBX_9|MBX_1|MBX_0;
+ mcp->in_mb = MBX_0|MBX_1;
+ mcp->tov = MBX_TOV_SECONDS;
+ mcp->flags = 0;
+ rval = qla2x00_mailbox_command(ha, mcp);
+
+ if (rval == QLA_SUCCESS) {
+ if (mcp->mb[0] != MBS_COMMAND_COMPLETE) {
+ rval = BIT_1;
+ }
+ } else
+ rval = BIT_1;
+
+ return rval;
+}
diff --git a/drivers/scsi/qla2xxx/qla_mid.c b/drivers/scsi/qla2xxx/qla_mid.c
new file mode 100644
index 00000000000..54dc415d8b5
--- /dev/null
+++ b/drivers/scsi/qla2xxx/qla_mid.c
@@ -0,0 +1,497 @@
+/*
+ * QLOGIC LINUX SOFTWARE
+ *
+ * QLogic ISP2x00 device driver for Linux 2.6.x
+ * Copyright (C) 2003-2005 QLogic Corporation
+ * (www.qlogic.com)
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2, or (at your option) any
+ * later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ */
+#include "qla_def.h"
+
+#include <linux/version.h>
+#include <linux/moduleparam.h>
+#include <linux/vmalloc.h>
+#include <linux/smp_lock.h>
+#include <linux/list.h>
+
+#include <scsi/scsi_tcq.h>
+#include <scsi/scsicam.h>
+#include <linux/delay.h>
+
+void qla2x00_vp_stop_timer(scsi_qla_host_t *);
+
+void
+qla2x00_vp_stop_timer(scsi_qla_host_t *vha)
+{
+ if (vha->parent && vha->timer_active) {
+ del_timer_sync(&vha->timer);
+ vha->timer_active = 0;
+ }
+}
+
+uint32_t
+qla24xx_allocate_vp_id(scsi_qla_host_t *vha)
+{
+ uint32_t vp_id;
+ scsi_qla_host_t *ha = vha->parent;
+
+ /* Find an empty slot and assign an vp_id */
+ down(&ha->vport_sem);
+ vp_id = find_first_zero_bit((unsigned long *)ha->vp_idx_map,
+ MAX_MULTI_ID_FABRIC);
+ if (vp_id > MAX_MULTI_ID_FABRIC) {
+ DEBUG15(printk ("vp_id %d is bigger than MAX_MULTI_ID_FABRID\n",
+ vp_id));
+ up(&ha->vport_sem);
+ return vp_id;
+ }
+
+ set_bit(vp_id, (unsigned long *)ha->vp_idx_map);
+ ha->num_vhosts++;
+ vha->vp_idx = vp_id;
+ list_add_tail(&vha->vp_list, &ha->vp_list);
+ up(&ha->vport_sem);
+ return vp_id;
+}
+
+void
+qla24xx_deallocate_vp_id(scsi_qla_host_t *vha)
+{
+ uint16_t vp_id;
+ scsi_qla_host_t *ha = vha->parent;
+
+ down(&ha->vport_sem);
+ vp_id = vha->vp_idx;
+ ha->num_vhosts--;
+ clear_bit(vp_id, (unsigned long *)ha->vp_idx_map);
+ list_del(&vha->vp_list);
+ up(&ha->vport_sem);
+}
+
+scsi_qla_host_t *
+qla24xx_find_vhost_by_name(scsi_qla_host_t *ha, uint8_t *port_name)
+{
+ scsi_qla_host_t *vha;
+
+ /* Locate matching device in database. */
+ list_for_each_entry(vha, &ha->vp_list, vp_list) {
+ if (!memcmp(port_name, vha->port_name, WWN_SIZE))
+ return vha;
+ }
+ return NULL;
+}
+
+/*
+ * qla2x00_mark_vp_devices_dead
+ * Updates fcport state when device goes offline.
+ *
+ * Input:
+ * ha = adapter block pointer.
+ * fcport = port structure pointer.
+ *
+ * Return:
+ * None.
+ *
+ * Context:
+ */
+void
+qla2x00_mark_vp_devices_dead(scsi_qla_host_t *vha)
+{
+ fc_port_t *fcport;
+ scsi_qla_host_t *pha = to_qla_parent(vha);
+
+ list_for_each_entry(fcport, &pha->fcports, list) {
+ if (fcport->vp_idx != vha->vp_idx)
+ continue;
+
+ DEBUG15(printk("scsi(%ld): Marking port dead, "
+ "loop_id=0x%04x :%x\n",
+ vha->host_no, fcport->loop_id, fcport->vp_idx));
+
+ atomic_set(&fcport->state, FCS_DEVICE_DEAD);
+ qla2x00_mark_device_lost(vha, fcport, 0, 0);
+ }
+}
+
+int
+qla24xx_disable_vp(scsi_qla_host_t *vha)
+{
+ int ret;
+
+ ret = qla24xx_control_vp(vha, VCE_COMMAND_DISABLE_VPS_LOGO_ALL);
+ atomic_set(&vha->loop_state, LOOP_DOWN);
+ atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
+
+ /* Delete all vp's fcports from parent's list */
+ qla2x00_mark_vp_devices_dead(vha);
+ atomic_set(&vha->vp_state, VP_FAILED);
+ vha->flags.management_server_logged_in = 0;
+ if (ret == QLA_SUCCESS) {
+ fc_vport_set_state(vha->fc_vport, FC_VPORT_DISABLED);
+ } else {
+ fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
+ return -1;
+ }
+ return 0;
+}
+
+int
+qla24xx_enable_vp(scsi_qla_host_t *vha)
+{
+ int ret;
+ scsi_qla_host_t *ha = vha->parent;
+
+ /* Check if physical ha port is Up */
+ if (atomic_read(&ha->loop_state) == LOOP_DOWN ||
+ atomic_read(&ha->loop_state) == LOOP_DEAD ) {
+ vha->vp_err_state = VP_ERR_PORTDWN;
+ fc_vport_set_state(vha->fc_vport, FC_VPORT_LINKDOWN);
+ goto enable_failed;
+ }
+
+ /* Initialize the new vport unless it is a persistent port */
+ down(&ha->vport_sem);
+ ret = qla24xx_modify_vp_config(vha);
+ up(&ha->vport_sem);
+
+ if (ret != QLA_SUCCESS) {
+ fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
+ goto enable_failed;
+ }
+
+ DEBUG15(qla_printk(KERN_INFO, ha,
+ "Virtual port with id: %d - Enabled\n", vha->vp_idx));
+ return 0;
+
+enable_failed:
+ DEBUG15(qla_printk(KERN_INFO, ha,
+ "Virtual port with id: %d - Disabled\n", vha->vp_idx));
+ return 1;
+}
+
+/**
+ * qla24xx_modify_vport() - Modifies the virtual fabric port's configuration
+ * @ha: HA context
+ * @vp: pointer to buffer of virtual port parameters.
+ * @ret_code: return error code:
+ *
+ * Returns the virtual port id, or MAX_VSAN_ID, if couldn't create.
+ */
+uint32_t
+qla24xx_modify_vhba(scsi_qla_host_t *ha, vport_params_t *vp, uint32_t *vp_id)
+{
+ scsi_qla_host_t *vha;
+
+ vha = qla24xx_find_vhost_by_name(ha, vp->port_name);
+ if (!vha) {
+ *vp_id = MAX_NUM_VPORT_LOOP;
+ return VP_RET_CODE_WWPN;
+ }
+
+ if (qla24xx_enable_vp(vha)) {
+ scsi_host_put(vha->host);
+ qla2x00_mem_free(vha);
+ *vp_id = MAX_NUM_VPORT_LOOP;
+ return VP_RET_CODE_RESOURCES;
+ }
+
+ *vp_id = vha->vp_idx;
+ return VP_RET_CODE_OK;
+}
+
+void
+qla24xx_configure_vp(scsi_qla_host_t *vha)
+{
+ struct fc_vport *fc_vport;
+ int ret;
+
+ fc_vport = vha->fc_vport;
+
+ DEBUG15(printk("scsi(%ld): %s: change request #3 for this host.\n",
+ vha->host_no, __func__));
+ ret = qla2x00_send_change_request(vha, 0x3, vha->vp_idx);
+ if (ret != QLA_SUCCESS) {
+ DEBUG15(qla_printk(KERN_ERR, vha, "Failed to enable receiving"
+ " of RSCN requests: 0x%x\n", ret));
+ return;
+ } else {
+ /* Corresponds to SCR enabled */
+ clear_bit(VP_SCR_NEEDED, &vha->vp_flags);
+ }
+
+ vha->flags.online = 1;
+ if (qla24xx_configure_vhba(vha))
+ return;
+
+ atomic_set(&vha->vp_state, VP_ACTIVE);
+ fc_vport_set_state(fc_vport, FC_VPORT_ACTIVE);
+}
+
+void
+qla2x00_alert_all_vps(scsi_qla_host_t *ha, uint16_t *mb)
+{
+ int i, vp_idx_matched;
+ scsi_qla_host_t *vha;
+
+ if (ha->parent)
+ return;
+
+ i = find_next_bit((unsigned long *)ha->vp_idx_map,
+ MAX_MULTI_ID_FABRIC + 1, 1);
+ for (;i <= MAX_MULTI_ID_FABRIC;
+ i = find_next_bit((unsigned long *)ha->vp_idx_map,
+ MAX_MULTI_ID_FABRIC + 1, i + 1)) {
+ vp_idx_matched = 0;
+
+ list_for_each_entry(vha, &ha->vp_list, vp_list) {
+ if (i == vha->vp_idx) {
+ vp_idx_matched = 1;
+ break;
+ }
+ }
+
+ if (vp_idx_matched) {
+ switch (mb[0]) {
+ case MBA_LIP_OCCURRED:
+ case MBA_LOOP_UP:
+ case MBA_LOOP_DOWN:
+ case MBA_LIP_RESET:
+ case MBA_POINT_TO_POINT:
+ case MBA_CHG_IN_CONNECTION:
+ case MBA_PORT_UPDATE:
+ case MBA_RSCN_UPDATE:
+ DEBUG15(printk("scsi(%ld)%s: Async_event for"
+ " VP[%d], mb = 0x%x, vha=%p\n",
+ vha->host_no, __func__,i, *mb, vha));
+ qla2x00_async_event(vha, mb);
+ break;
+ }
+ }
+ }
+}
+
+void
+qla2x00_vp_abort_isp(scsi_qla_host_t *vha)
+{
+ /*
+ * Physical port will do most of the abort and recovery work. We can
+ * just treat it as a loop down
+ */
+ if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
+ atomic_set(&vha->loop_state, LOOP_DOWN);
+ qla2x00_mark_all_devices_lost(vha, 0);
+ } else {
+ if (!atomic_read(&vha->loop_down_timer))
+ atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
+ }
+
+ DEBUG15(printk("scsi(%ld): Scheduling enable of Vport %d...\n",
+ vha->host_no, vha->vp_idx));
+ qla24xx_enable_vp(vha);
+}
+
+int
+qla2x00_do_dpc_vp(scsi_qla_host_t *vha)
+{
+ if (test_and_clear_bit(VP_IDX_ACQUIRED, &vha->vp_flags)) {
+ /* VP acquired. complete port configuration */
+ qla24xx_configure_vp(vha);
+ return 0;
+ }
+
+ if (test_and_clear_bit(ISP_ABORT_NEEDED, &vha->dpc_flags))
+ qla2x00_vp_abort_isp(vha);
+
+ if (test_and_clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags) &&
+ (!(test_and_set_bit(RESET_ACTIVE, &vha->dpc_flags)))) {
+ clear_bit(RESET_ACTIVE, &vha->dpc_flags);
+ }
+
+ if (test_and_clear_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) {
+ if (!(test_and_set_bit(LOOP_RESYNC_ACTIVE, &vha->dpc_flags))) {
+ qla2x00_loop_resync(vha);
+ clear_bit(LOOP_RESYNC_ACTIVE, &vha->dpc_flags);
+ }
+ }
+
+ return 0;
+}
+
+void
+qla2x00_do_dpc_all_vps(scsi_qla_host_t *ha)
+{
+ int ret;
+ int i, vp_idx_matched;
+ scsi_qla_host_t *vha;
+
+ if (ha->parent)
+ return;
+ if (list_empty(&ha->vp_list))
+ return;
+
+ clear_bit(VP_DPC_NEEDED, &ha->dpc_flags);
+
+ i = find_next_bit((unsigned long *)ha->vp_idx_map,
+ MAX_MULTI_ID_FABRIC + 1, 1);
+ for (;i <= MAX_MULTI_ID_FABRIC;
+ i = find_next_bit((unsigned long *)ha->vp_idx_map,
+ MAX_MULTI_ID_FABRIC + 1, i + 1)) {
+ vp_idx_matched = 0;
+
+ list_for_each_entry(vha, &ha->vp_list, vp_list) {
+ if (i == vha->vp_idx) {
+ vp_idx_matched = 1;
+ break;
+ }
+ }
+
+ if (vp_idx_matched)
+ ret = qla2x00_do_dpc_vp(vha);
+ }
+}
+
+int
+qla24xx_vport_create_req_sanity_check(struct fc_vport *fc_vport)
+{
+ scsi_qla_host_t *ha = (scsi_qla_host_t *) fc_vport->shost->hostdata;
+ scsi_qla_host_t *vha;
+ uint8_t port_name[WWN_SIZE];
+
+ if (fc_vport->roles != FC_PORT_ROLE_FCP_INITIATOR)
+ return VPCERR_UNSUPPORTED;
+
+ /* Check up the F/W and H/W support NPIV */
+ if (!ha->flags.npiv_supported)
+ return VPCERR_UNSUPPORTED;
+
+ /* Check up whether npiv supported switch presented */
+ if (!(ha->switch_cap & FLOGI_MID_SUPPORT))
+ return VPCERR_NO_FABRIC_SUPP;
+
+ /* Check up unique WWPN */
+ u64_to_wwn(fc_vport->port_name, port_name);
+ vha = qla24xx_find_vhost_by_name(ha, port_name);
+ if (vha)
+ return VPCERR_BAD_WWN;
+
+ /* Check up max-npiv-supports */
+ if (ha->num_vhosts > ha->max_npiv_vports) {
+ DEBUG15(printk("scsi(%ld): num_vhosts %d is bigger than "
+ "max_npv_vports %d.\n", ha->host_no,
+ (uint16_t) ha->num_vhosts, (int) ha->max_npiv_vports));
+ return VPCERR_UNSUPPORTED;
+ }
+ return 0;
+}
+
+scsi_qla_host_t *
+qla24xx_create_vhost(struct fc_vport *fc_vport)
+{
+ scsi_qla_host_t *ha = (scsi_qla_host_t *) fc_vport->shost->hostdata;
+ scsi_qla_host_t *vha;
+ struct Scsi_Host *host;
+
+ host = scsi_host_alloc(&qla24xx_driver_template,
+ sizeof(scsi_qla_host_t));
+ if (!host) {
+ printk(KERN_WARNING
+ "qla2xxx: scsi_host_alloc() failed for vport\n");
+ return(NULL);
+ }
+
+ vha = (scsi_qla_host_t *)host->hostdata;
+
+ /* clone the parent hba */
+ memcpy(vha, ha, sizeof (scsi_qla_host_t));
+
+ fc_vport->dd_data = vha;
+
+ vha->node_name = kmalloc(WWN_SIZE * sizeof(char), GFP_KERNEL);
+ if (!vha->node_name)
+ goto create_vhost_failed_1;
+
+ vha->port_name = kmalloc(WWN_SIZE * sizeof(char), GFP_KERNEL);
+ if (!vha->port_name)
+ goto create_vhost_failed_2;
+
+ /* New host info */
+ u64_to_wwn(fc_vport->node_name, vha->node_name);
+ u64_to_wwn(fc_vport->port_name, vha->port_name);
+
+ vha->host = host;
+ vha->host_no = host->host_no;
+ vha->parent = ha;
+ vha->fc_vport = fc_vport;
+ vha->device_flags = 0;
+ vha->instance = num_hosts;
+ vha->vp_idx = qla24xx_allocate_vp_id(vha);
+ if (vha->vp_idx > ha->max_npiv_vports) {
+ DEBUG15(printk("scsi(%ld): Couldn't allocate vp_id.\n",
+ vha->host_no));
+ goto create_vhost_failed_3;
+ }
+ vha->mgmt_svr_loop_id = 10 + vha->vp_idx;
+
+ init_MUTEX(&vha->mbx_cmd_sem);
+ init_MUTEX_LOCKED(&vha->mbx_intr_sem);
+
+ INIT_LIST_HEAD(&vha->list);
+ INIT_LIST_HEAD(&vha->fcports);
+ INIT_LIST_HEAD(&vha->vp_fcports);
+
+ vha->dpc_flags = 0L;
+ set_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags);
+ set_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags);
+
+ /*
+ * To fix the issue of processing a parent's RSCN for the vport before
+ * its SCR is complete.
+ */
+ set_bit(VP_SCR_NEEDED, &vha->vp_flags);
+ atomic_set(&vha->loop_state, LOOP_DOWN);
+ atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
+
+ qla2x00_start_timer(vha, qla2x00_timer, WATCH_INTERVAL);
+
+ host->can_queue = vha->request_q_length + 128;
+ host->this_id = 255;
+ host->cmd_per_lun = 3;
+ host->max_cmd_len = MAX_CMDSZ;
+ host->max_channel = MAX_BUSES - 1;
+ host->max_lun = MAX_LUNS;
+ host->unique_id = vha->instance;
+ host->max_id = MAX_TARGETS_2200;
+ host->transportt = qla2xxx_transport_vport_template;
+
+ DEBUG15(printk("DEBUG: detect vport hba %ld at address = %p\n",
+ vha->host_no, vha));
+
+ vha->flags.init_done = 1;
+ num_hosts++;
+
+ down(&ha->vport_sem);
+ set_bit(vha->vp_idx, (unsigned long *)ha->vp_idx_map);
+ ha->cur_vport_count++;
+ up(&ha->vport_sem);
+
+ return vha;
+
+create_vhost_failed_3:
+ kfree(vha->port_name);
+
+create_vhost_failed_2:
+ kfree(vha->node_name);
+
+create_vhost_failed_1:
+ return NULL;
+}
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index b98136adaaa..b5a77b0c0de 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -29,8 +29,7 @@ static struct kmem_cache *srb_cachep;
/*
* Ioctl related information.
*/
-static int num_hosts;
-
+int num_hosts;
int ql2xlogintimeout = 20;
module_param(ql2xlogintimeout, int, S_IRUGO|S_IRUSR);
MODULE_PARM_DESC(ql2xlogintimeout,
@@ -112,7 +111,7 @@ static int qla2x00_device_reset(scsi_qla_host_t *, fc_port_t *);
static int qla2x00_change_queue_depth(struct scsi_device *, int);
static int qla2x00_change_queue_type(struct scsi_device *, int);
-static struct scsi_host_template qla2x00_driver_template = {
+struct scsi_host_template qla2x00_driver_template = {
.module = THIS_MODULE,
.name = QLA2XXX_DRIVER_NAME,
.queuecommand = qla2x00_queuecommand,
@@ -143,7 +142,7 @@ static struct scsi_host_template qla2x00_driver_template = {
.shost_attrs = qla2x00_host_attrs,
};
-static struct scsi_host_template qla24xx_driver_template = {
+struct scsi_host_template qla24xx_driver_template = {
.module = THIS_MODULE,
.name = QLA2XXX_DRIVER_NAME,
.queuecommand = qla24xx_queuecommand,
@@ -171,21 +170,21 @@ static struct scsi_host_template qla24xx_driver_template = {
};
static struct scsi_transport_template *qla2xxx_transport_template = NULL;
+struct scsi_transport_template *qla2xxx_transport_vport_template = NULL;
/* TODO Convert to inlines
*
* Timer routines
*/
-#define WATCH_INTERVAL 1 /* number of seconds */
-static void qla2x00_timer(scsi_qla_host_t *);
+void qla2x00_timer(scsi_qla_host_t *);
-static __inline__ void qla2x00_start_timer(scsi_qla_host_t *,
+__inline__ void qla2x00_start_timer(scsi_qla_host_t *,
void *, unsigned long);
static __inline__ void qla2x00_restart_timer(scsi_qla_host_t *, unsigned long);
-static __inline__ void qla2x00_stop_timer(scsi_qla_host_t *);
+__inline__ void qla2x00_stop_timer(scsi_qla_host_t *);
-static inline void
+__inline__ void
qla2x00_start_timer(scsi_qla_host_t *ha, void *func, unsigned long interval)
{
init_timer(&ha->timer);
@@ -202,7 +201,7 @@ qla2x00_restart_timer(scsi_qla_host_t *ha, unsigned long interval)
mod_timer(&ha->timer, jiffies + interval * HZ);
}
-static __inline__ void
+__inline__ void
qla2x00_stop_timer(scsi_qla_host_t *ha)
{
del_timer_sync(&ha->timer);
@@ -213,8 +212,8 @@ static int qla2x00_do_dpc(void *data);
static void qla2x00_rst_aen(scsi_qla_host_t *);
-static uint8_t qla2x00_mem_alloc(scsi_qla_host_t *);
-static void qla2x00_mem_free(scsi_qla_host_t *ha);
+uint8_t qla2x00_mem_alloc(scsi_qla_host_t *);
+void qla2x00_mem_free(scsi_qla_host_t *ha);
static int qla2x00_allocate_sp_pool( scsi_qla_host_t *ha);
static void qla2x00_free_sp_pool(scsi_qla_host_t *ha);
static void qla2x00_sp_free_dma(scsi_qla_host_t *, srb_t *);
@@ -438,6 +437,7 @@ qla24xx_queuecommand(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *))
struct fc_rport *rport = starget_to_rport(scsi_target(cmd->device));
srb_t *sp;
int rval;
+ scsi_qla_host_t *pha = to_qla_parent(ha);
rval = fc_remote_port_chkready(rport);
if (rval) {
@@ -453,7 +453,7 @@ qla24xx_queuecommand(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *))
if (atomic_read(&fcport->state) != FCS_ONLINE) {
if (atomic_read(&fcport->state) == FCS_DEVICE_DEAD ||
- atomic_read(&ha->loop_state) == LOOP_DEAD) {
+ atomic_read(&pha->loop_state) == LOOP_DEAD) {
cmd->result = DID_NO_CONNECT << 16;
goto qc24_fail_command;
}
@@ -462,7 +462,7 @@ qla24xx_queuecommand(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *))
spin_unlock_irq(ha->host->host_lock);
- sp = qla2x00_get_new_sp(ha, fcport, cmd, done);
+ sp = qla2x00_get_new_sp(pha, fcport, cmd, done);
if (!sp)
goto qc24_host_busy_lock;
@@ -475,8 +475,8 @@ qla24xx_queuecommand(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *))
return 0;
qc24_host_busy_free_sp:
- qla2x00_sp_free_dma(ha, sp);
- mempool_free(sp, ha->srb_mempool);
+ qla2x00_sp_free_dma(pha, sp);
+ mempool_free(sp, pha->srb_mempool);
qc24_host_busy_lock:
spin_lock_irq(ha->host->host_lock);
@@ -548,16 +548,17 @@ qla2x00_wait_for_hba_online(scsi_qla_host_t *ha)
{
int return_status;
unsigned long wait_online;
+ scsi_qla_host_t *pha = to_qla_parent(ha);
wait_online = jiffies + (MAX_LOOP_TIMEOUT * HZ);
- while (((test_bit(ISP_ABORT_NEEDED, &ha->dpc_flags)) ||
- test_bit(ABORT_ISP_ACTIVE, &ha->dpc_flags) ||
- test_bit(ISP_ABORT_RETRY, &ha->dpc_flags) ||
- ha->dpc_active) && time_before(jiffies, wait_online)) {
+ while (((test_bit(ISP_ABORT_NEEDED, &pha->dpc_flags)) ||
+ test_bit(ABORT_ISP_ACTIVE, &pha->dpc_flags) ||
+ test_bit(ISP_ABORT_RETRY, &pha->dpc_flags) ||
+ pha->dpc_active) && time_before(jiffies, wait_online)) {
msleep(1000);
}
- if (ha->flags.online)
+ if (pha->flags.online)
return_status = QLA_SUCCESS;
else
return_status = QLA_FUNCTION_FAILED;
@@ -588,14 +589,15 @@ qla2x00_wait_for_loop_ready(scsi_qla_host_t *ha)
{
int return_status = QLA_SUCCESS;
unsigned long loop_timeout ;
+ scsi_qla_host_t *pha = to_qla_parent(ha);
/* wait for 5 min at the max for loop to be ready */
loop_timeout = jiffies + (MAX_LOOP_TIMEOUT * HZ);
- while ((!atomic_read(&ha->loop_down_timer) &&
- atomic_read(&ha->loop_state) == LOOP_DOWN) ||
- atomic_read(&ha->loop_state) != LOOP_READY) {
- if (atomic_read(&ha->loop_state) == LOOP_DEAD) {
+ while ((!atomic_read(&pha->loop_down_timer) &&
+ atomic_read(&pha->loop_state) == LOOP_DOWN) ||
+ atomic_read(&pha->loop_state) != LOOP_READY) {
+ if (atomic_read(&pha->loop_state) == LOOP_DEAD) {
return_status = QLA_FUNCTION_FAILED;
break;
}
@@ -650,6 +652,7 @@ qla2xxx_eh_abort(struct scsi_cmnd *cmd)
unsigned long serial;
unsigned long flags;
int wait = 0;
+ scsi_qla_host_t *pha = to_qla_parent(ha);
qla2x00_block_error_handler(cmd);
@@ -663,9 +666,9 @@ qla2xxx_eh_abort(struct scsi_cmnd *cmd)
serial = cmd->serial_number;
/* Check active list for command command. */
- spin_lock_irqsave(&ha->hardware_lock, flags);
+ spin_lock_irqsave(&pha->hardware_lock, flags);
for (i = 1; i < MAX_OUTSTANDING_COMMANDS; i++) {
- sp = ha->outstanding_cmds[i];
+ sp = pha->outstanding_cmds[i];
if (sp == NULL)
continue;
@@ -677,7 +680,7 @@ qla2xxx_eh_abort(struct scsi_cmnd *cmd)
__func__, ha->host_no, sp, serial));
DEBUG3(qla2x00_print_scsi_cmd(cmd));
- spin_unlock_irqrestore(&ha->hardware_lock, flags);
+ spin_unlock_irqrestore(&pha->hardware_lock, flags);
if (ha->isp_ops.abort_command(ha, sp)) {
DEBUG2(printk("%s(%ld): abort_command "
"mbx failed.\n", __func__, ha->host_no));
@@ -686,11 +689,11 @@ qla2xxx_eh_abort(struct scsi_cmnd *cmd)
"mbx success.\n", __func__, ha->host_no));
wait = 1;
}
- spin_lock_irqsave(&ha->hardware_lock, flags);
+ spin_lock_irqsave(&pha->hardware_lock, flags);
break;
}
- spin_unlock_irqrestore(&ha->hardware_lock, flags);
+ spin_unlock_irqrestore(&pha->hardware_lock, flags);
/* Wait for the command to be returned. */
if (wait) {
@@ -731,6 +734,7 @@ qla2x00_eh_wait_for_pending_target_commands(scsi_qla_host_t *ha, unsigned int t)
srb_t *sp;
struct scsi_cmnd *cmd;
unsigned long flags;
+ scsi_qla_host_t *pha = to_qla_parent(ha);
status = 0;
@@ -739,19 +743,20 @@ qla2x00_eh_wait_for_pending_target_commands(scsi_qla_host_t *ha, unsigned int t)
* array
*/
for (cnt = 1; cnt < MAX_OUTSTANDING_COMMANDS; cnt++) {
- spin_lock_irqsave(&ha->hardware_lock, flags);
- sp = ha->outstanding_cmds[cnt];
+ spin_lock_irqsave(&pha->hardware_lock, flags);
+ sp = pha->outstanding_cmds[cnt];
if (sp) {
cmd = sp->cmd;
- spin_unlock_irqrestore(&ha->hardware_lock, flags);
- if (cmd->device->id == t) {
+ spin_unlock_irqrestore(&pha->hardware_lock, flags);
+ if (cmd->device->id == t &&
+ ha->vp_idx == sp->ha->vp_idx) {
if (!qla2x00_eh_wait_on_command(ha, cmd)) {
status = 1;
break;
}
}
} else {
- spin_unlock_irqrestore(&ha->hardware_lock, flags);
+ spin_unlock_irqrestore(&pha->hardware_lock, flags);
}
}
return (status);
@@ -782,14 +787,12 @@ qla2xxx_eh_device_reset(struct scsi_cmnd *cmd)
{
scsi_qla_host_t *ha = to_qla_host(cmd->device->host);
fc_port_t *fcport = (struct fc_port *) cmd->device->hostdata;
- int ret;
+ int ret = FAILED;
unsigned int id, lun;
unsigned long serial;
qla2x00_block_error_handler(cmd);
- ret = FAILED;
-
id = cmd->device->id;
lun = cmd->device->lun;
serial = cmd->serial_number;
@@ -912,15 +915,14 @@ static int
qla2xxx_eh_bus_reset(struct scsi_cmnd *cmd)
{
scsi_qla_host_t *ha = to_qla_host(cmd->device->host);
+ scsi_qla_host_t *pha = to_qla_parent(ha);
fc_port_t *fcport = (struct fc_port *) cmd->device->hostdata;
- int ret;
+ int ret = FAILED;
unsigned int id, lun;
unsigned long serial;
qla2x00_block_error_handler(cmd);
- ret = FAILED;
-
id = cmd->device->id;
lun = cmd->device->lun;
serial = cmd->serial_number;
@@ -944,7 +946,7 @@ qla2xxx_eh_bus_reset(struct scsi_cmnd *cmd)
goto eh_bus_reset_done;
/* Flush outstanding commands. */
- if (!qla2x00_eh_wait_for_pending_commands(ha))
+ if (!qla2x00_eh_wait_for_pending_commands(pha))
ret = FAILED;
eh_bus_reset_done:
@@ -974,14 +976,13 @@ qla2xxx_eh_host_reset(struct scsi_cmnd *cmd)
{
scsi_qla_host_t *ha = to_qla_host(cmd->device->host);
fc_port_t *fcport = (struct fc_port *) cmd->device->hostdata;
- int ret;
+ int ret = FAILED;
unsigned int id, lun;
unsigned long serial;
+ scsi_qla_host_t *pha = to_qla_parent(ha);
qla2x00_block_error_handler(cmd);
- ret = FAILED;
-
id = cmd->device->id;
lun = cmd->device->lun;
serial = cmd->serial_number;
@@ -1004,21 +1005,24 @@ qla2xxx_eh_host_reset(struct scsi_cmnd *cmd)
* while dpc is stuck for the mailbox to complete.
*/
qla2x00_wait_for_loop_ready(ha);
- set_bit(ABORT_ISP_ACTIVE, &ha->dpc_flags);
- if (qla2x00_abort_isp(ha)) {
- clear_bit(ABORT_ISP_ACTIVE, &ha->dpc_flags);
+ set_bit(ABORT_ISP_ACTIVE, &pha->dpc_flags);
+ if (qla2x00_abort_isp(pha)) {
+ clear_bit(ABORT_ISP_ACTIVE, &pha->dpc_flags);
/* failed. schedule dpc to try */
- set_bit(ISP_ABORT_NEEDED, &ha->dpc_flags);
+ set_bit(ISP_ABORT_NEEDED, &pha->dpc_flags);
if (qla2x00_wait_for_hba_online(ha) != QLA_SUCCESS)
goto eh_host_reset_lock;
}
- clear_bit(ABORT_ISP_ACTIVE, &ha->dpc_flags);
+ clear_bit(ABORT_ISP_ACTIVE, &pha->dpc_flags);
/* Waiting for our command in done_queue to be returned to OS.*/
- if (qla2x00_eh_wait_for_pending_commands(ha))
+ if (qla2x00_eh_wait_for_pending_commands(pha))
ret = SUCCESS;
+ if (ha->parent)
+ qla2x00_vp_abort_isp(ha);
+
eh_host_reset_lock:
qla_printk(KERN_INFO, ha, "%s: reset %s\n", __func__,
(ret == FAILED) ? "failed" : "succeded");
@@ -1435,6 +1439,7 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
ha->host = host;
ha->host_no = host->host_no;
sprintf(ha->host_str, "%s_%ld", QLA2XXX_DRIVER_NAME, ha->host_no);
+ ha->parent = NULL;
/* Set ISP-type information. */
qla2x00_set_isp_flags(ha);
@@ -1452,7 +1457,7 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
ha->prev_topology = 0;
ha->init_cb_size = sizeof(init_cb_t);
- ha->mgmt_svr_loop_id = MANAGEMENT_SERVER;
+ ha->mgmt_svr_loop_id = MANAGEMENT_SERVER + ha->vp_idx;
ha->link_data_rate = PORT_SPEED_UNKNOWN;
ha->optrom_size = OPTROM_SIZE_2300;
@@ -1524,8 +1529,8 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
ha->request_q_length = REQUEST_ENTRY_CNT_24XX;
ha->response_q_length = RESPONSE_ENTRY_CNT_2300;
ha->last_loop_id = SNS_LAST_LOOP_ID_2300;
- ha->init_cb_size = sizeof(struct init_cb_24xx);
- ha->mgmt_svr_loop_id = 10;
+ ha->init_cb_size = sizeof(struct mid_init_cb_24xx);
+ ha->mgmt_svr_loop_id = 10 + ha->vp_idx;
ha->isp_ops.pci_config = qla24xx_pci_config;
ha->isp_ops.reset_chip = qla24xx_reset_chip;
ha->isp_ops.chip_diag = qla24xx_chip_diag;
@@ -1563,10 +1568,14 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
ha->instance = num_hosts;
init_MUTEX(&ha->mbx_cmd_sem);
+ init_MUTEX(&ha->vport_sem);
init_MUTEX_LOCKED(&ha->mbx_intr_sem);
INIT_LIST_HEAD(&ha->list);
INIT_LIST_HEAD(&ha->fcports);
+ INIT_LIST_HEAD(&ha->vp_list);
+
+ set_bit(0, (unsigned long *) ha->vp_idx_map);
qla2x00_config_dma_addressing(ha);
if (qla2x00_mem_alloc(ha)) {
@@ -1789,7 +1798,8 @@ qla2x00_schedule_rport_del(struct scsi_qla_host *ha, fc_port_t *fcport,
void qla2x00_mark_device_lost(scsi_qla_host_t *ha, fc_port_t *fcport,
int do_login, int defer)
{
- if (atomic_read(&fcport->state) == FCS_ONLINE)
+ if (atomic_read(&fcport->state) == FCS_ONLINE &&
+ ha->vp_idx == fcport->vp_idx)
qla2x00_schedule_rport_del(ha, fcport, defer);
/*
@@ -1840,19 +1850,23 @@ void
qla2x00_mark_all_devices_lost(scsi_qla_host_t *ha, int defer)
{
fc_port_t *fcport;
+ scsi_qla_host_t *pha = to_qla_parent(ha);
- list_for_each_entry(fcport, &ha->fcports, list) {
- if (fcport->port_type != FCT_TARGET)
+ list_for_each_entry(fcport, &pha->fcports, list) {
+ if (ha->vp_idx != 0 && ha->vp_idx != fcport->vp_idx)
continue;
-
/*
* No point in marking the device as lost, if the device is
* already DEAD.
*/
if (atomic_read(&fcport->state) == FCS_DEVICE_DEAD)
continue;
- if (atomic_read(&fcport->state) == FCS_ONLINE)
- qla2x00_schedule_rport_del(ha, fcport, defer);
+ if (atomic_read(&fcport->state) == FCS_ONLINE) {
+ if (defer)
+ qla2x00_schedule_rport_del(ha, fcport, defer);
+ else if (ha->vp_idx == fcport->vp_idx)
+ qla2x00_schedule_rport_del(ha, fcport, defer);
+ }
atomic_set(&fcport->state, FCS_DEVICE_LOST);
}
@@ -1868,7 +1882,7 @@ qla2x00_mark_all_devices_lost(scsi_qla_host_t *ha, int defer)
* 0 = success.
* 1 = failure.
*/
-static uint8_t
+uint8_t
qla2x00_mem_alloc(scsi_qla_host_t *ha)
{
char name[16];
@@ -1920,33 +1934,33 @@ qla2x00_mem_alloc(scsi_qla_host_t *ha)
continue;
}
- snprintf(name, sizeof(name), "%s_%ld", QLA2XXX_DRIVER_NAME,
- ha->host_no);
- ha->s_dma_pool = dma_pool_create(name, &ha->pdev->dev,
- DMA_POOL_SIZE, 8, 0);
- if (ha->s_dma_pool == NULL) {
+ /* get consistent memory allocated for init control block */
+ ha->init_cb = dma_alloc_coherent(&ha->pdev->dev,
+ ha->init_cb_size, &ha->init_cb_dma, GFP_KERNEL);
+ if (ha->init_cb == NULL) {
qla_printk(KERN_WARNING, ha,
- "Memory Allocation failed - s_dma_pool\n");
+ "Memory Allocation failed - init_cb\n");
qla2x00_mem_free(ha);
msleep(100);
continue;
}
+ memset(ha->init_cb, 0, ha->init_cb_size);
- /* get consistent memory allocated for init control block */
- ha->init_cb = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL,
- &ha->init_cb_dma);
- if (ha->init_cb == NULL) {
+ snprintf(name, sizeof(name), "%s_%ld", QLA2XXX_DRIVER_NAME,
+ ha->host_no);
+ ha->s_dma_pool = dma_pool_create(name, &ha->pdev->dev,
+ DMA_POOL_SIZE, 8, 0);
+ if (ha->s_dma_pool == NULL) {
qla_printk(KERN_WARNING, ha,
- "Memory Allocation failed - init_cb\n");
+ "Memory Allocation failed - s_dma_pool\n");
qla2x00_mem_free(ha);
msleep(100);
continue;
}
- memset(ha->init_cb, 0, ha->init_cb_size);
if (qla2x00_allocate_sp_pool(ha)) {
qla_printk(KERN_WARNING, ha,
@@ -2052,7 +2066,7 @@ qla2x00_mem_alloc(scsi_qla_host_t *ha)
* Input:
* ha = adapter block pointer.
*/
-static void
+void
qla2x00_mem_free(scsi_qla_host_t *ha)
{
struct list_head *fcpl, *fcptemp;
@@ -2088,12 +2102,13 @@ qla2x00_mem_free(scsi_qla_host_t *ha)
if (ha->ms_iocb)
dma_pool_free(ha->s_dma_pool, ha->ms_iocb, ha->ms_iocb_dma);
- if (ha->init_cb)
- dma_pool_free(ha->s_dma_pool, ha->init_cb, ha->init_cb_dma);
-
if (ha->s_dma_pool)
dma_pool_destroy(ha->s_dma_pool);
+ if (ha->init_cb)
+ dma_free_coherent(&ha->pdev->dev, ha->init_cb_size,
+ ha->init_cb, ha->init_cb_dma);
+
if (ha->gid_list)
dma_free_coherent(&ha->pdev->dev, GID_LIST_SIZE, ha->gid_list,
ha->gid_list_dma);
@@ -2199,6 +2214,7 @@ qla2x00_free_sp_pool( scsi_qla_host_t *ha)
static int
qla2x00_do_dpc(void *data)
{
+ int rval;
scsi_qla_host_t *ha;
fc_port_t *fcport;
uint8_t status;
@@ -2347,7 +2363,7 @@ qla2x00_do_dpc(void *data)
if (!(test_and_set_bit(LOOP_RESYNC_ACTIVE,
&ha->dpc_flags))) {
- qla2x00_loop_resync(ha);
+ rval = qla2x00_loop_resync(ha);
clear_bit(LOOP_RESYNC_ACTIVE, &ha->dpc_flags);
}
@@ -2374,6 +2390,8 @@ qla2x00_do_dpc(void *data)
if (test_and_clear_bit(BEACON_BLINK_NEEDED, &ha->dpc_flags))
ha->isp_ops.beacon_blink(ha);
+ qla2x00_do_dpc_all_vps(ha);
+
ha->dpc_active = 0;
} /* End of while(1) */
@@ -2426,13 +2444,7 @@ qla2x00_sp_free_dma(scsi_qla_host_t *ha, srb_t *sp)
struct scsi_cmnd *cmd = sp->cmd;
if (sp->flags & SRB_DMA_VALID) {
- if (cmd->use_sg) {
- dma_unmap_sg(&ha->pdev->dev, cmd->request_buffer,
- cmd->use_sg, cmd->sc_data_direction);
- } else if (cmd->request_bufflen) {
- dma_unmap_single(&ha->pdev->dev, sp->dma_handle,
- cmd->request_bufflen, cmd->sc_data_direction);
- }
+ scsi_dma_unmap(cmd);
sp->flags &= ~SRB_DMA_VALID;
}
CMD_SP(cmd) = NULL;
@@ -2458,7 +2470,7 @@ qla2x00_sp_compl(scsi_qla_host_t *ha, srb_t *sp)
*
* Context: Interrupt
***************************************************************************/
-static void
+void
qla2x00_timer(scsi_qla_host_t *ha)
{
unsigned long cpu_flags = 0;
@@ -2467,6 +2479,7 @@ qla2x00_timer(scsi_qla_host_t *ha)
int index;
srb_t *sp;
int t;
+ scsi_qla_host_t *pha = to_qla_parent(ha);
/*
* Ports - Port down timer.
@@ -2512,23 +2525,29 @@ qla2x00_timer(scsi_qla_host_t *ha)
atomic_set(&ha->loop_state, LOOP_DEAD);
/* Schedule an ISP abort to return any tape commands. */
- spin_lock_irqsave(&ha->hardware_lock, cpu_flags);
- for (index = 1; index < MAX_OUTSTANDING_COMMANDS;
- index++) {
- fc_port_t *sfcp;
+ /* NPIV - scan physical port only */
+ if (!ha->parent) {
+ spin_lock_irqsave(&ha->hardware_lock,
+ cpu_flags);
+ for (index = 1;
+ index < MAX_OUTSTANDING_COMMANDS;
+ index++) {
+ fc_port_t *sfcp;
+
+ sp = ha->outstanding_cmds[index];
+ if (!sp)
+ continue;
+ sfcp = sp->fcport;
+ if (!(sfcp->flags & FCF_TAPE_PRESENT))
+ continue;
- sp = ha->outstanding_cmds[index];
- if (!sp)
- continue;
- sfcp = sp->fcport;
- if (!(sfcp->flags & FCF_TAPE_PRESENT))
- continue;
-
- set_bit(ISP_ABORT_NEEDED, &ha->dpc_flags);
- break;
+ set_bit(ISP_ABORT_NEEDED,
+ &ha->dpc_flags);
+ break;
+ }
+ spin_unlock_irqrestore(&ha->hardware_lock,
+ cpu_flags);
}
- spin_unlock_irqrestore(&ha->hardware_lock, cpu_flags);
-
set_bit(ABORT_QUEUES_NEEDED, &ha->dpc_flags);
start_dpc++;
}
@@ -2572,8 +2591,9 @@ qla2x00_timer(scsi_qla_host_t *ha)
test_bit(LOGIN_RETRY_NEEDED, &ha->dpc_flags) ||
test_bit(RESET_MARKER_NEEDED, &ha->dpc_flags) ||
test_bit(BEACON_BLINK_NEEDED, &ha->dpc_flags) ||
+ test_bit(VP_DPC_NEEDED, &ha->dpc_flags) ||
test_bit(RELOGIN_NEEDED, &ha->dpc_flags)))
- qla2xxx_wake_dpc(ha);
+ qla2xxx_wake_dpc(pha);
qla2x00_restart_timer(ha, WATCH_INTERVAL);
}
@@ -2717,14 +2737,24 @@ qla2x00_module_init(void)
qla2xxx_transport_template =
fc_attach_transport(&qla2xxx_transport_functions);
- if (!qla2xxx_transport_template)
+ if (!qla2xxx_transport_template) {
+ kmem_cache_destroy(srb_cachep);
return -ENODEV;
+ }
+ qla2xxx_transport_vport_template =
+ fc_attach_transport(&qla2xxx_transport_vport_functions);
+ if (!qla2xxx_transport_vport_template) {
+ kmem_cache_destroy(srb_cachep);
+ fc_release_transport(qla2xxx_transport_template);
+ return -ENODEV;
+ }
printk(KERN_INFO "QLogic Fibre Channel HBA Driver\n");
ret = pci_register_driver(&qla2xxx_pci_driver);
if (ret) {
kmem_cache_destroy(srb_cachep);
fc_release_transport(qla2xxx_transport_template);
+ fc_release_transport(qla2xxx_transport_vport_template);
}
return ret;
}
@@ -2739,6 +2769,7 @@ qla2x00_module_exit(void)
qla2x00_release_firmware();
kmem_cache_destroy(srb_cachep);
fc_release_transport(qla2xxx_transport_template);
+ fc_release_transport(qla2xxx_transport_vport_template);
}
module_init(qla2x00_module_init);
diff --git a/drivers/scsi/qla2xxx/qla_version.h b/drivers/scsi/qla2xxx/qla_version.h
index c375a4efbc7..fd2f10a2534 100644
--- a/drivers/scsi/qla2xxx/qla_version.h
+++ b/drivers/scsi/qla2xxx/qla_version.h
@@ -7,9 +7,9 @@
/*
* Driver version
*/
-#define QLA2XXX_VERSION "8.01.07-k7"
+#define QLA2XXX_VERSION "8.02.00-k1"
#define QLA_DRIVER_MAJOR_VER 8
-#define QLA_DRIVER_MINOR_VER 1
-#define QLA_DRIVER_PATCH_VER 7
+#define QLA_DRIVER_MINOR_VER 2
+#define QLA_DRIVER_PATCH_VER 0
#define QLA_DRIVER_BETA_VER 0
diff --git a/drivers/scsi/qla4xxx/ql4_dbg.c b/drivers/scsi/qla4xxx/ql4_dbg.c
index 6437d024b0d..fcc184cd066 100644
--- a/drivers/scsi/qla4xxx/ql4_dbg.c
+++ b/drivers/scsi/qla4xxx/ql4_dbg.c
@@ -6,176 +6,9 @@
*/
#include "ql4_def.h"
-#include <scsi/scsi_dbg.h>
-
-#if 0
-
-static void qla4xxx_print_srb_info(struct srb * srb)
-{
- printk("%s: srb = 0x%p, flags=0x%02x\n", __func__, srb, srb->flags);
- printk("%s: cmd = 0x%p, saved_dma_handle = 0x%lx\n",
- __func__, srb->cmd, (unsigned long) srb->dma_handle);
- printk("%s: fw_ddb_index = %d, lun = %d\n",
- __func__, srb->fw_ddb_index, srb->cmd->device->lun);
- printk("%s: iocb_tov = %d\n",
- __func__, srb->iocb_tov);
- printk("%s: cc_stat = 0x%x, r_start = 0x%lx, u_start = 0x%lx\n\n",
- __func__, srb->cc_stat, srb->r_start, srb->u_start);
-}
-
-void qla4xxx_print_scsi_cmd(struct scsi_cmnd *cmd)
-{
- printk("SCSI Command = 0x%p, Handle=0x%p\n", cmd, cmd->host_scribble);
- printk(" b=%d, t=%02xh, l=%02xh, cmd_len = %02xh\n",
- cmd->device->channel, cmd->device->id, cmd->device->lun,
- cmd->cmd_len);
- scsi_print_command(cmd);
- printk(" seg_cnt = %d\n", cmd->use_sg);
- printk(" request buffer = 0x%p, request buffer len = 0x%x\n",
- cmd->request_buffer, cmd->request_bufflen);
- if (cmd->use_sg) {
- struct scatterlist *sg;
- sg = (struct scatterlist *)cmd->request_buffer;
- printk(" SG buffer: \n");
- qla4xxx_dump_buffer((caddr_t) sg,
- (cmd->use_sg * sizeof(*sg)));
- }
- printk(" tag = %d, transfersize = 0x%x \n", cmd->tag,
- cmd->transfersize);
- printk(" Pid = %d, SP = 0x%p\n", (int)cmd->pid, cmd->SCp.ptr);
- printk(" underflow size = 0x%x, direction=0x%x\n", cmd->underflow,
- cmd->sc_data_direction);
- printk(" Current time (jiffies) = 0x%lx, "
- "timeout expires = 0x%lx\n", jiffies, cmd->eh_timeout.expires);
- qla4xxx_print_srb_info((struct srb *) cmd->SCp.ptr);
-}
-
-void __dump_registers(struct scsi_qla_host *ha)
-{
- uint8_t i;
- for (i = 0; i < MBOX_REG_COUNT; i++) {
- printk(KERN_INFO "0x%02X mailbox[%d] = 0x%08X\n",
- (uint8_t) offsetof(struct isp_reg, mailbox[i]), i,
- readw(&ha->reg->mailbox[i]));
- }
- printk(KERN_INFO "0x%02X flash_address = 0x%08X\n",
- (uint8_t) offsetof(struct isp_reg, flash_address),
- readw(&ha->reg->flash_address));
- printk(KERN_INFO "0x%02X flash_data = 0x%08X\n",
- (uint8_t) offsetof(struct isp_reg, flash_data),
- readw(&ha->reg->flash_data));
- printk(KERN_INFO "0x%02X ctrl_status = 0x%08X\n",
- (uint8_t) offsetof(struct isp_reg, ctrl_status),
- readw(&ha->reg->ctrl_status));
- if (is_qla4010(ha)) {
- printk(KERN_INFO "0x%02X nvram = 0x%08X\n",
- (uint8_t) offsetof(struct isp_reg, u1.isp4010.nvram),
- readw(&ha->reg->u1.isp4010.nvram));
- }
-
- else if (is_qla4022(ha) | is_qla4032(ha)) {
- printk(KERN_INFO "0x%02X intr_mask = 0x%08X\n",
- (uint8_t) offsetof(struct isp_reg,
- u1.isp4022.intr_mask),
- readw(&ha->reg->u1.isp4022.intr_mask));
- printk(KERN_INFO "0x%02X nvram = 0x%08X\n",
- (uint8_t) offsetof(struct isp_reg, u1.isp4022.nvram),
- readw(&ha->reg->u1.isp4022.nvram));
- printk(KERN_INFO "0x%02X semaphore = 0x%08X\n",
- (uint8_t) offsetof(struct isp_reg,
- u1.isp4022.semaphore),
- readw(&ha->reg->u1.isp4022.semaphore));
- }
- printk(KERN_INFO "0x%02X req_q_in = 0x%08X\n",
- (uint8_t) offsetof(struct isp_reg, req_q_in),
- readw(&ha->reg->req_q_in));
- printk(KERN_INFO "0x%02X rsp_q_out = 0x%08X\n",
- (uint8_t) offsetof(struct isp_reg, rsp_q_out),
- readw(&ha->reg->rsp_q_out));
- if (is_qla4010(ha)) {
- printk(KERN_INFO "0x%02X ext_hw_conf = 0x%08X\n",
- (uint8_t) offsetof(struct isp_reg,
- u2.isp4010.ext_hw_conf),
- readw(&ha->reg->u2.isp4010.ext_hw_conf));
- printk(KERN_INFO "0x%02X port_ctrl = 0x%08X\n",
- (uint8_t) offsetof(struct isp_reg,
- u2.isp4010.port_ctrl),
- readw(&ha->reg->u2.isp4010.port_ctrl));
- printk(KERN_INFO "0x%02X port_status = 0x%08X\n",
- (uint8_t) offsetof(struct isp_reg,
- u2.isp4010.port_status),
- readw(&ha->reg->u2.isp4010.port_status));
- printk(KERN_INFO "0x%02X req_q_out = 0x%08X\n",
- (uint8_t) offsetof(struct isp_reg,
- u2.isp4010.req_q_out),
- readw(&ha->reg->u2.isp4010.req_q_out));
- printk(KERN_INFO "0x%02X gp_out = 0x%08X\n",
- (uint8_t) offsetof(struct isp_reg, u2.isp4010.gp_out),
- readw(&ha->reg->u2.isp4010.gp_out));
- printk(KERN_INFO "0x%02X gp_in = 0x%08X\n",
- (uint8_t) offsetof(struct isp_reg, u2.isp4010.gp_in),
- readw(&ha->reg->u2.isp4010.gp_in));
- printk(KERN_INFO "0x%02X port_err_status = 0x%08X\n",
- (uint8_t) offsetof(struct isp_reg,
- u2.isp4010.port_err_status),
- readw(&ha->reg->u2.isp4010.port_err_status));
- }
-
- else if (is_qla4022(ha) | is_qla4032(ha)) {
- printk(KERN_INFO "Page 0 Registers:\n");
- printk(KERN_INFO "0x%02X ext_hw_conf = 0x%08X\n",
- (uint8_t) offsetof(struct isp_reg,
- u2.isp4022.p0.ext_hw_conf),
- readw(&ha->reg->u2.isp4022.p0.ext_hw_conf));
- printk(KERN_INFO "0x%02X port_ctrl = 0x%08X\n",
- (uint8_t) offsetof(struct isp_reg,
- u2.isp4022.p0.port_ctrl),
- readw(&ha->reg->u2.isp4022.p0.port_ctrl));
- printk(KERN_INFO "0x%02X port_status = 0x%08X\n",
- (uint8_t) offsetof(struct isp_reg,
- u2.isp4022.p0.port_status),
- readw(&ha->reg->u2.isp4022.p0.port_status));
- printk(KERN_INFO "0x%02X gp_out = 0x%08X\n",
- (uint8_t) offsetof(struct isp_reg,
- u2.isp4022.p0.gp_out),
- readw(&ha->reg->u2.isp4022.p0.gp_out));
- printk(KERN_INFO "0x%02X gp_in = 0x%08X\n",
- (uint8_t) offsetof(struct isp_reg, u2.isp4022.p0.gp_in),
- readw(&ha->reg->u2.isp4022.p0.gp_in));
- printk(KERN_INFO "0x%02X port_err_status = 0x%08X\n",
- (uint8_t) offsetof(struct isp_reg,
- u2.isp4022.p0.port_err_status),
- readw(&ha->reg->u2.isp4022.p0.port_err_status));
- printk(KERN_INFO "Page 1 Registers:\n");
- writel(HOST_MEM_CFG_PAGE & set_rmask(CSR_SCSI_PAGE_SELECT),
- &ha->reg->ctrl_status);
- printk(KERN_INFO "0x%02X req_q_out = 0x%08X\n",
- (uint8_t) offsetof(struct isp_reg,
- u2.isp4022.p1.req_q_out),
- readw(&ha->reg->u2.isp4022.p1.req_q_out));
- writel(PORT_CTRL_STAT_PAGE & set_rmask(CSR_SCSI_PAGE_SELECT),
- &ha->reg->ctrl_status);
- }
-}
-
-void qla4xxx_dump_mbox_registers(struct scsi_qla_host *ha)
-{
- unsigned long flags = 0;
- int i = 0;
- spin_lock_irqsave(&ha->hardware_lock, flags);
- for (i = 1; i < MBOX_REG_COUNT; i++)
- printk(KERN_INFO " Mailbox[%d] = %08x\n", i,
- readw(&ha->reg->mailbox[i]));
- spin_unlock_irqrestore(&ha->hardware_lock, flags);
-}
-
-void qla4xxx_dump_registers(struct scsi_qla_host *ha)
-{
- unsigned long flags = 0;
- spin_lock_irqsave(&ha->hardware_lock, flags);
- __dump_registers(ha);
- spin_unlock_irqrestore(&ha->hardware_lock, flags);
-}
+#include "ql4_glbl.h"
+#include "ql4_dbg.h"
+#include "ql4_inline.h"
void qla4xxx_dump_buffer(void *b, uint32_t size)
{
@@ -198,4 +31,3 @@ void qla4xxx_dump_buffer(void *b, uint32_t size)
printk(KERN_DEBUG "\n");
}
-#endif /* 0 */
diff --git a/drivers/scsi/qla4xxx/ql4_def.h b/drivers/scsi/qla4xxx/ql4_def.h
index 6f4cf2dd2f4..accaf690eaf 100644
--- a/drivers/scsi/qla4xxx/ql4_def.h
+++ b/drivers/scsi/qla4xxx/ql4_def.h
@@ -122,8 +122,7 @@
#define ISCSI_IPADDR_SIZE 4 /* IP address size */
#define ISCSI_ALIAS_SIZE 32 /* ISCSI Alais name size */
-#define ISCSI_NAME_SIZE 255 /* ISCSI Name size -
- * usually a string */
+#define ISCSI_NAME_SIZE 0xE0 /* ISCSI Name size */
#define LSDW(x) ((u32)((u64)(x)))
#define MSDW(x) ((u32)((((u64)(x)) >> 16) >> 16))
@@ -187,9 +186,21 @@ struct srb {
u_long u_start; /* Time when we handed the cmd to F/W */
};
- /*
- * Device Database (DDB) structure
- */
+/*
+ * Asynchronous Event Queue structure
+ */
+struct aen {
+ uint32_t mbox_sts[MBOX_AEN_REG_COUNT];
+};
+
+struct ql4_aen_log {
+ int count;
+ struct aen entry[MAX_AEN_ENTRIES];
+};
+
+/*
+ * Device Database (DDB) structure
+ */
struct ddb_entry {
struct list_head list; /* ddb list */
struct scsi_qla_host *ha;
@@ -254,13 +265,6 @@ struct ddb_entry {
#define DF_ISNS_DISCOVERED 2 /* Device was discovered via iSNS */
#define DF_FO_MASKED 3
-/*
- * Asynchronous Event Queue structure
- */
-struct aen {
- uint32_t mbox_sts[MBOX_AEN_REG_COUNT];
-};
-
#include "ql4_fw.h"
#include "ql4_nvram.h"
@@ -270,31 +274,31 @@ struct aen {
*/
struct scsi_qla_host {
/* Linux adapter configuration data */
- struct Scsi_Host *host; /* pointer to host data */
- uint32_t tot_ddbs;
unsigned long flags;
-#define AF_ONLINE 0 /* 0x00000001 */
-#define AF_INIT_DONE 1 /* 0x00000002 */
-#define AF_MBOX_COMMAND 2 /* 0x00000004 */
-#define AF_MBOX_COMMAND_DONE 3 /* 0x00000008 */
-#define AF_INTERRUPTS_ON 6 /* 0x00000040 Not Used */
-#define AF_GET_CRASH_RECORD 7 /* 0x00000080 */
-#define AF_LINK_UP 8 /* 0x00000100 */
-#define AF_IRQ_ATTACHED 10 /* 0x00000400 */
-#define AF_ISNS_CMD_IN_PROCESS 12 /* 0x00001000 */
-#define AF_ISNS_CMD_DONE 13 /* 0x00002000 */
+#define AF_ONLINE 0 /* 0x00000001 */
+#define AF_INIT_DONE 1 /* 0x00000002 */
+#define AF_MBOX_COMMAND 2 /* 0x00000004 */
+#define AF_MBOX_COMMAND_DONE 3 /* 0x00000008 */
+#define AF_INTERRUPTS_ON 6 /* 0x00000040 */
+#define AF_GET_CRASH_RECORD 7 /* 0x00000080 */
+#define AF_LINK_UP 8 /* 0x00000100 */
+#define AF_IRQ_ATTACHED 10 /* 0x00000400 */
+#define AF_DISABLE_ACB_COMPLETE 11 /* 0x00000800 */
unsigned long dpc_flags;
-#define DPC_RESET_HA 1 /* 0x00000002 */
-#define DPC_RETRY_RESET_HA 2 /* 0x00000004 */
-#define DPC_RELOGIN_DEVICE 3 /* 0x00000008 */
-#define DPC_RESET_HA_DESTROY_DDB_LIST 4 /* 0x00000010 */
-#define DPC_RESET_HA_INTR 5 /* 0x00000020 */
-#define DPC_ISNS_RESTART 7 /* 0x00000080 */
-#define DPC_AEN 9 /* 0x00000200 */
-#define DPC_GET_DHCP_IP_ADDR 15 /* 0x00008000 */
+#define DPC_RESET_HA 1 /* 0x00000002 */
+#define DPC_RETRY_RESET_HA 2 /* 0x00000004 */
+#define DPC_RELOGIN_DEVICE 3 /* 0x00000008 */
+#define DPC_RESET_HA_DESTROY_DDB_LIST 4 /* 0x00000010 */
+#define DPC_RESET_HA_INTR 5 /* 0x00000020 */
+#define DPC_ISNS_RESTART 7 /* 0x00000080 */
+#define DPC_AEN 9 /* 0x00000200 */
+#define DPC_GET_DHCP_IP_ADDR 15 /* 0x00008000 */
+
+ struct Scsi_Host *host; /* pointer to host data */
+ uint32_t tot_ddbs;
uint16_t iocb_cnt;
uint16_t iocb_hiwat;
@@ -344,6 +348,7 @@ struct scsi_qla_host {
uint32_t firmware_version[2];
uint32_t patch_number;
uint32_t build_number;
+ uint32_t board_id;
/* --- From Init_FW --- */
/* init_cb_t *init_cb; */
@@ -363,7 +368,6 @@ struct scsi_qla_host {
/* --- From GetFwState --- */
uint32_t firmware_state;
- uint32_t board_id;
uint32_t addl_fw_state;
/* Linux kernel thread */
@@ -414,6 +418,8 @@ struct scsi_qla_host {
uint16_t aen_out;
struct aen aen_q[MAX_AEN_ENTRIES];
+ struct ql4_aen_log aen_log;/* tracks all aens */
+
/* This mutex protects several threads to do mailbox commands
* concurrently.
*/
@@ -585,10 +591,4 @@ static inline void ql4xxx_unlock_drvr(struct scsi_qla_host *a)
#define FLUSH_DDB_CHANGED_AENS 1
#define RELOGIN_DDB_CHANGED_AENS 2
-#include "ql4_version.h"
-#include "ql4_glbl.h"
-#include "ql4_dbg.h"
-#include "ql4_inline.h"
-
-
#endif /*_QLA4XXX_H */
diff --git a/drivers/scsi/qla4xxx/ql4_fw.h b/drivers/scsi/qla4xxx/ql4_fw.h
index 4eea8c57191..9bb3d1d2a92 100644
--- a/drivers/scsi/qla4xxx/ql4_fw.h
+++ b/drivers/scsi/qla4xxx/ql4_fw.h
@@ -20,143 +20,23 @@
*************************************************************************/
struct port_ctrl_stat_regs {
- __le32 ext_hw_conf; /* 80 x50 R/W */
- __le32 intChipConfiguration; /* 84 x54 */
- __le32 port_ctrl; /* 88 x58 */
- __le32 port_status; /* 92 x5c */
- __le32 HostPrimMACHi; /* 96 x60 */
- __le32 HostPrimMACLow; /* 100 x64 */
- __le32 HostSecMACHi; /* 104 x68 */
- __le32 HostSecMACLow; /* 108 x6c */
- __le32 EPPrimMACHi; /* 112 x70 */
- __le32 EPPrimMACLow; /* 116 x74 */
- __le32 EPSecMACHi; /* 120 x78 */
- __le32 EPSecMACLow; /* 124 x7c */
- __le32 HostPrimIPHi; /* 128 x80 */
- __le32 HostPrimIPMidHi; /* 132 x84 */
- __le32 HostPrimIPMidLow; /* 136 x88 */
- __le32 HostPrimIPLow; /* 140 x8c */
- __le32 HostSecIPHi; /* 144 x90 */
- __le32 HostSecIPMidHi; /* 148 x94 */
- __le32 HostSecIPMidLow; /* 152 x98 */
- __le32 HostSecIPLow; /* 156 x9c */
- __le32 EPPrimIPHi; /* 160 xa0 */
- __le32 EPPrimIPMidHi; /* 164 xa4 */
- __le32 EPPrimIPMidLow; /* 168 xa8 */
- __le32 EPPrimIPLow; /* 172 xac */
- __le32 EPSecIPHi; /* 176 xb0 */
- __le32 EPSecIPMidHi; /* 180 xb4 */
- __le32 EPSecIPMidLow; /* 184 xb8 */
- __le32 EPSecIPLow; /* 188 xbc */
- __le32 IPReassemblyTimeout; /* 192 xc0 */
- __le32 EthMaxFramePayload; /* 196 xc4 */
- __le32 TCPMaxWindowSize; /* 200 xc8 */
- __le32 TCPCurrentTimestampHi; /* 204 xcc */
- __le32 TCPCurrentTimestampLow; /* 208 xd0 */
- __le32 LocalRAMAddress; /* 212 xd4 */
- __le32 LocalRAMData; /* 216 xd8 */
- __le32 PCSReserved1; /* 220 xdc */
- __le32 gp_out; /* 224 xe0 */
- __le32 gp_in; /* 228 xe4 */
- __le32 ProbeMuxAddr; /* 232 xe8 */
- __le32 ProbeMuxData; /* 236 xec */
- __le32 ERMQueueBaseAddr0; /* 240 xf0 */
- __le32 ERMQueueBaseAddr1; /* 244 xf4 */
- __le32 MACConfiguration; /* 248 xf8 */
- __le32 port_err_status; /* 252 xfc COR */
+ __le32 ext_hw_conf; /* 0x50 R/W */
+ __le32 rsrvd0; /* 0x54 */
+ __le32 port_ctrl; /* 0x58 */
+ __le32 port_status; /* 0x5c */
+ __le32 rsrvd1[32]; /* 0x60-0xdf */
+ __le32 gp_out; /* 0xe0 */
+ __le32 gp_in; /* 0xe4 */
+ __le32 rsrvd2[5]; /* 0xe8-0xfb */
+ __le32 port_err_status; /* 0xfc */
};
struct host_mem_cfg_regs {
- __le32 NetRequestQueueOut; /* 80 x50 */
- __le32 NetRequestQueueOutAddrHi; /* 84 x54 */
- __le32 NetRequestQueueOutAddrLow; /* 88 x58 */
- __le32 NetRequestQueueBaseAddrHi; /* 92 x5c */
- __le32 NetRequestQueueBaseAddrLow; /* 96 x60 */
- __le32 NetRequestQueueLength; /* 100 x64 */
- __le32 NetResponseQueueIn; /* 104 x68 */
- __le32 NetResponseQueueInAddrHi; /* 108 x6c */
- __le32 NetResponseQueueInAddrLow; /* 112 x70 */
- __le32 NetResponseQueueBaseAddrHi; /* 116 x74 */
- __le32 NetResponseQueueBaseAddrLow; /* 120 x78 */
- __le32 NetResponseQueueLength; /* 124 x7c */
- __le32 req_q_out; /* 128 x80 */
- __le32 RequestQueueOutAddrHi; /* 132 x84 */
- __le32 RequestQueueOutAddrLow; /* 136 x88 */
- __le32 RequestQueueBaseAddrHi; /* 140 x8c */
- __le32 RequestQueueBaseAddrLow; /* 144 x90 */
- __le32 RequestQueueLength; /* 148 x94 */
- __le32 ResponseQueueIn; /* 152 x98 */
- __le32 ResponseQueueInAddrHi; /* 156 x9c */
- __le32 ResponseQueueInAddrLow; /* 160 xa0 */
- __le32 ResponseQueueBaseAddrHi; /* 164 xa4 */
- __le32 ResponseQueueBaseAddrLow; /* 168 xa8 */
- __le32 ResponseQueueLength; /* 172 xac */
- __le32 NetRxLargeBufferQueueOut; /* 176 xb0 */
- __le32 NetRxLargeBufferQueueBaseAddrHi; /* 180 xb4 */
- __le32 NetRxLargeBufferQueueBaseAddrLow; /* 184 xb8 */
- __le32 NetRxLargeBufferQueueLength; /* 188 xbc */
- __le32 NetRxLargeBufferLength; /* 192 xc0 */
- __le32 NetRxSmallBufferQueueOut; /* 196 xc4 */
- __le32 NetRxSmallBufferQueueBaseAddrHi; /* 200 xc8 */
- __le32 NetRxSmallBufferQueueBaseAddrLow; /* 204 xcc */
- __le32 NetRxSmallBufferQueueLength; /* 208 xd0 */
- __le32 NetRxSmallBufferLength; /* 212 xd4 */
- __le32 HMCReserved0[10]; /* 216 xd8 */
+ __le32 rsrvd0[12]; /* 0x50-0x79 */
+ __le32 req_q_out; /* 0x80 */
+ __le32 rsrvd1[31]; /* 0x84-0xFF */
};
-struct local_ram_cfg_regs {
- __le32 BufletSize; /* 80 x50 */
- __le32 BufletMaxCount; /* 84 x54 */
- __le32 BufletCurrCount; /* 88 x58 */
- __le32 BufletPauseThresholdCount; /* 92 x5c */
- __le32 BufletTCPWinThresholdHi; /* 96 x60 */
- __le32 BufletTCPWinThresholdLow; /* 100 x64 */
- __le32 IPHashTableBaseAddr; /* 104 x68 */
- __le32 IPHashTableSize; /* 108 x6c */
- __le32 TCPHashTableBaseAddr; /* 112 x70 */
- __le32 TCPHashTableSize; /* 116 x74 */
- __le32 NCBAreaBaseAddr; /* 120 x78 */
- __le32 NCBMaxCount; /* 124 x7c */
- __le32 NCBCurrCount; /* 128 x80 */
- __le32 DRBAreaBaseAddr; /* 132 x84 */
- __le32 DRBMaxCount; /* 136 x88 */
- __le32 DRBCurrCount; /* 140 x8c */
- __le32 LRCReserved[28]; /* 144 x90 */
-};
-
-struct prot_stat_regs {
- __le32 MACTxFrameCount; /* 80 x50 R */
- __le32 MACTxByteCount; /* 84 x54 R */
- __le32 MACRxFrameCount; /* 88 x58 R */
- __le32 MACRxByteCount; /* 92 x5c R */
- __le32 MACCRCErrCount; /* 96 x60 R */
- __le32 MACEncErrCount; /* 100 x64 R */
- __le32 MACRxLengthErrCount; /* 104 x68 R */
- __le32 IPTxPacketCount; /* 108 x6c R */
- __le32 IPTxByteCount; /* 112 x70 R */
- __le32 IPTxFragmentCount; /* 116 x74 R */
- __le32 IPRxPacketCount; /* 120 x78 R */
- __le32 IPRxByteCount; /* 124 x7c R */
- __le32 IPRxFragmentCount; /* 128 x80 R */
- __le32 IPDatagramReassemblyCount; /* 132 x84 R */
- __le32 IPV6RxPacketCount; /* 136 x88 R */
- __le32 IPErrPacketCount; /* 140 x8c R */
- __le32 IPReassemblyErrCount; /* 144 x90 R */
- __le32 TCPTxSegmentCount; /* 148 x94 R */
- __le32 TCPTxByteCount; /* 152 x98 R */
- __le32 TCPRxSegmentCount; /* 156 x9c R */
- __le32 TCPRxByteCount; /* 160 xa0 R */
- __le32 TCPTimerExpCount; /* 164 xa4 R */
- __le32 TCPRxAckCount; /* 168 xa8 R */
- __le32 TCPTxAckCount; /* 172 xac R */
- __le32 TCPRxErrOOOCount; /* 176 xb0 R */
- __le32 PSReserved0; /* 180 xb4 */
- __le32 TCPRxWindowProbeUpdateCount; /* 184 xb8 R */
- __le32 ECCErrCorrectionCount; /* 188 xbc R */
- __le32 PSReserved1[16]; /* 192 xc0 */
-};
-
-
/* remote register set (access via PCI memory read/write) */
struct isp_reg {
#define MBOX_REG_COUNT 8
@@ -207,11 +87,7 @@ struct isp_reg {
union {
struct port_ctrl_stat_regs p0;
struct host_mem_cfg_regs p1;
- struct local_ram_cfg_regs p2;
- struct prot_stat_regs p3;
- __le32 r_union[44];
};
-
} __attribute__ ((packed)) isp4022;
} u2;
}; /* 256 x100 */
@@ -296,6 +172,7 @@ static inline uint32_t clr_rmask(uint32_t val)
/* ISP Semaphore definitions */
/* ISP General Purpose Output definitions */
+#define GPOR_TOPCAT_RESET 0x00000004
/* shadow registers (DMA'd from HA to system memory. read only) */
struct shadow_regs {
@@ -337,6 +214,7 @@ union external_hw_config_reg {
/* Mailbox command definitions */
#define MBOX_CMD_ABOUT_FW 0x0009
+#define MBOX_CMD_PING 0x000B
#define MBOX_CMD_LUN_RESET 0x0016
#define MBOX_CMD_GET_MANAGEMENT_DATA 0x001E
#define MBOX_CMD_GET_FW_STATUS 0x001F
@@ -364,6 +242,17 @@ union external_hw_config_reg {
#define MBOX_CMD_GET_FW_STATE 0x0069
#define MBOX_CMD_GET_INIT_FW_CTRL_BLOCK_DEFAULTS 0x006A
#define MBOX_CMD_RESTORE_FACTORY_DEFAULTS 0x0087
+#define MBOX_CMD_SET_ACB 0x0088
+#define MBOX_CMD_GET_ACB 0x0089
+#define MBOX_CMD_DISABLE_ACB 0x008A
+#define MBOX_CMD_GET_IPV6_NEIGHBOR_CACHE 0x008B
+#define MBOX_CMD_GET_IPV6_DEST_CACHE 0x008C
+#define MBOX_CMD_GET_IPV6_DEF_ROUTER_LIST 0x008D
+#define MBOX_CMD_GET_IPV6_LCL_PREFIX_LIST 0x008E
+#define MBOX_CMD_SET_IPV6_NEIGHBOR_CACHE 0x0090
+#define MBOX_CMD_GET_IP_ADDR_STATE 0x0091
+#define MBOX_CMD_SEND_IPV6_ROUTER_SOL 0x0092
+#define MBOX_CMD_GET_DB_ENTRY_CURRENT_IP_ADDR 0x0093
/* Mailbox 1 */
#define FW_STATE_READY 0x0000
@@ -409,6 +298,16 @@ union external_hw_config_reg {
#define MBOX_ASTS_DHCP_LEASE_EXPIRED 0x801D
#define MBOX_ASTS_DHCP_LEASE_ACQUIRED 0x801F
#define MBOX_ASTS_ISNS_UNSOLICITED_PDU_RECEIVED 0x8021
+#define MBOX_ASTS_DUPLICATE_IP 0x8025
+#define MBOX_ASTS_ARP_COMPLETE 0x8026
+#define MBOX_ASTS_SUBNET_STATE_CHANGE 0x8027
+#define MBOX_ASTS_RESPONSE_QUEUE_FULL 0x8028
+#define MBOX_ASTS_IP_ADDR_STATE_CHANGED 0x8029
+#define MBOX_ASTS_IPV6_PREFIX_EXPIRED 0x802B
+#define MBOX_ASTS_IPV6_ND_PREFIX_IGNORED 0x802C
+#define MBOX_ASTS_IPV6_LCL_PREFIX_IGNORED 0x802D
+#define MBOX_ASTS_ICMPV6_ERROR_MSG_RCVD 0x802E
+
#define ISNS_EVENT_DATA_RECEIVED 0x0000
#define ISNS_EVENT_CONNECTION_OPENED 0x0001
#define ISNS_EVENT_CONNECTION_FAILED 0x0002
@@ -418,137 +317,166 @@ union external_hw_config_reg {
/*************************************************************************/
/* Host Adapter Initialization Control Block (from host) */
-struct init_fw_ctrl_blk {
- uint8_t Version; /* 00 */
- uint8_t Control; /* 01 */
+struct addr_ctrl_blk {
+ uint8_t version; /* 00 */
+ uint8_t control; /* 01 */
- uint16_t FwOptions; /* 02-03 */
+ uint16_t fw_options; /* 02-03 */
#define FWOPT_HEARTBEAT_ENABLE 0x1000
#define FWOPT_SESSION_MODE 0x0040
#define FWOPT_INITIATOR_MODE 0x0020
#define FWOPT_TARGET_MODE 0x0010
- uint16_t ExecThrottle; /* 04-05 */
- uint8_t RetryCount; /* 06 */
- uint8_t RetryDelay; /* 07 */
- uint16_t MaxEthFrPayloadSize; /* 08-09 */
- uint16_t AddFwOptions; /* 0A-0B */
-
- uint8_t HeartbeatInterval; /* 0C */
- uint8_t InstanceNumber; /* 0D */
- uint16_t RES2; /* 0E-0F */
- uint16_t ReqQConsumerIndex; /* 10-11 */
- uint16_t ComplQProducerIndex; /* 12-13 */
- uint16_t ReqQLen; /* 14-15 */
- uint16_t ComplQLen; /* 16-17 */
- uint32_t ReqQAddrLo; /* 18-1B */
- uint32_t ReqQAddrHi; /* 1C-1F */
- uint32_t ComplQAddrLo; /* 20-23 */
- uint32_t ComplQAddrHi; /* 24-27 */
- uint32_t ShadowRegBufAddrLo; /* 28-2B */
- uint32_t ShadowRegBufAddrHi; /* 2C-2F */
-
- uint16_t iSCSIOptions; /* 30-31 */
-
- uint16_t TCPOptions; /* 32-33 */
-
- uint16_t IPOptions; /* 34-35 */
-
- uint16_t MaxPDUSize; /* 36-37 */
- uint16_t RcvMarkerInt; /* 38-39 */
- uint16_t SndMarkerInt; /* 3A-3B */
- uint16_t InitMarkerlessInt; /* 3C-3D */
- uint16_t FirstBurstSize; /* 3E-3F */
- uint16_t DefaultTime2Wait; /* 40-41 */
- uint16_t DefaultTime2Retain; /* 42-43 */
- uint16_t MaxOutStndngR2T; /* 44-45 */
- uint16_t KeepAliveTimeout; /* 46-47 */
- uint16_t PortNumber; /* 48-49 */
- uint16_t MaxBurstSize; /* 4A-4B */
- uint32_t RES4; /* 4C-4F */
- uint8_t IPAddr[4]; /* 50-53 */
- uint8_t RES5[12]; /* 54-5F */
- uint8_t SubnetMask[4]; /* 60-63 */
- uint8_t RES6[12]; /* 64-6F */
- uint8_t GatewayIPAddr[4]; /* 70-73 */
- uint8_t RES7[12]; /* 74-7F */
- uint8_t PriDNSIPAddr[4]; /* 80-83 */
- uint8_t SecDNSIPAddr[4]; /* 84-87 */
- uint8_t RES8[8]; /* 88-8F */
- uint8_t Alias[32]; /* 90-AF */
- uint8_t TargAddr[8]; /* B0-B7 *//* /FIXME: Remove?? */
- uint8_t CHAPNameSecretsTable[8]; /* B8-BF */
- uint8_t EthernetMACAddr[6]; /* C0-C5 */
- uint16_t TargetPortalGroup; /* C6-C7 */
- uint8_t SendScale; /* C8 */
- uint8_t RecvScale; /* C9 */
- uint8_t TypeOfService; /* CA */
- uint8_t Time2Live; /* CB */
- uint16_t VLANPriority; /* CC-CD */
- uint16_t Reserved8; /* CE-CF */
- uint8_t SecIPAddr[4]; /* D0-D3 */
- uint8_t Reserved9[12]; /* D4-DF */
- uint8_t iSNSIPAddr[4]; /* E0-E3 */
- uint16_t iSNSServerPortNumber; /* E4-E5 */
- uint8_t Reserved10[10]; /* E6-EF */
- uint8_t SLPDAIPAddr[4]; /* F0-F3 */
- uint8_t Reserved11[12]; /* F4-FF */
- uint8_t iSCSINameString[256]; /* 100-1FF */
+ uint16_t exec_throttle; /* 04-05 */
+ uint8_t zio_count; /* 06 */
+ uint8_t res0; /* 07 */
+ uint16_t eth_mtu_size; /* 08-09 */
+ uint16_t add_fw_options; /* 0A-0B */
+
+ uint8_t hb_interval; /* 0C */
+ uint8_t inst_num; /* 0D */
+ uint16_t res1; /* 0E-0F */
+ uint16_t rqq_consumer_idx; /* 10-11 */
+ uint16_t compq_producer_idx; /* 12-13 */
+ uint16_t rqq_len; /* 14-15 */
+ uint16_t compq_len; /* 16-17 */
+ uint32_t rqq_addr_lo; /* 18-1B */
+ uint32_t rqq_addr_hi; /* 1C-1F */
+ uint32_t compq_addr_lo; /* 20-23 */
+ uint32_t compq_addr_hi; /* 24-27 */
+ uint32_t shdwreg_addr_lo; /* 28-2B */
+ uint32_t shdwreg_addr_hi; /* 2C-2F */
+
+ uint16_t iscsi_opts; /* 30-31 */
+ uint16_t ipv4_tcp_opts; /* 32-33 */
+ uint16_t ipv4_ip_opts; /* 34-35 */
+
+ uint16_t iscsi_max_pdu_size; /* 36-37 */
+ uint8_t ipv4_tos; /* 38 */
+ uint8_t ipv4_ttl; /* 39 */
+ uint8_t acb_version; /* 3A */
+ uint8_t res2; /* 3B */
+ uint16_t def_timeout; /* 3C-3D */
+ uint16_t iscsi_fburst_len; /* 3E-3F */
+ uint16_t iscsi_def_time2wait; /* 40-41 */
+ uint16_t iscsi_def_time2retain; /* 42-43 */
+ uint16_t iscsi_max_outstnd_r2t; /* 44-45 */
+ uint16_t conn_ka_timeout; /* 46-47 */
+ uint16_t ipv4_port; /* 48-49 */
+ uint16_t iscsi_max_burst_len; /* 4A-4B */
+ uint32_t res5; /* 4C-4F */
+ uint8_t ipv4_addr[4]; /* 50-53 */
+ uint16_t ipv4_vlan_tag; /* 54-55 */
+ uint8_t ipv4_addr_state; /* 56 */
+ uint8_t ipv4_cacheid; /* 57 */
+ uint8_t res6[8]; /* 58-5F */
+ uint8_t ipv4_subnet[4]; /* 60-63 */
+ uint8_t res7[12]; /* 64-6F */
+ uint8_t ipv4_gw_addr[4]; /* 70-73 */
+ uint8_t res8[0xc]; /* 74-7F */
+ uint8_t pri_dns_srvr_ip[4];/* 80-83 */
+ uint8_t sec_dns_srvr_ip[4];/* 84-87 */
+ uint16_t min_eph_port; /* 88-89 */
+ uint16_t max_eph_port; /* 8A-8B */
+ uint8_t res9[4]; /* 8C-8F */
+ uint8_t iscsi_alias[32];/* 90-AF */
+ uint8_t res9_1[0x16]; /* B0-C5 */
+ uint16_t tgt_portal_grp;/* C6-C7 */
+ uint8_t abort_timer; /* C8 */
+ uint8_t ipv4_tcp_wsf; /* C9 */
+ uint8_t res10[6]; /* CA-CF */
+ uint8_t ipv4_sec_ip_addr[4]; /* D0-D3 */
+ uint8_t ipv4_dhcp_vid_len; /* D4 */
+ uint8_t ipv4_dhcp_vid[11]; /* D5-DF */
+ uint8_t res11[20]; /* E0-F3 */
+ uint8_t ipv4_dhcp_alt_cid_len; /* F4 */
+ uint8_t ipv4_dhcp_alt_cid[11]; /* F5-FF */
+ uint8_t iscsi_name[224]; /* 100-1DF */
+ uint8_t res12[32]; /* 1E0-1FF */
+ uint32_t cookie; /* 200-203 */
+ uint16_t ipv6_port; /* 204-205 */
+ uint16_t ipv6_opts; /* 206-207 */
+ uint16_t ipv6_addtl_opts; /* 208-209 */
+ uint16_t ipv6_tcp_opts; /* 20A-20B */
+ uint8_t ipv6_tcp_wsf; /* 20C */
+ uint16_t ipv6_flow_lbl; /* 20D-20F */
+ uint8_t ipv6_gw_addr[16]; /* 210-21F */
+ uint16_t ipv6_vlan_tag; /* 220-221 */
+ uint8_t ipv6_lnk_lcl_addr_state;/* 222 */
+ uint8_t ipv6_addr0_state; /* 223 */
+ uint8_t ipv6_addr1_state; /* 224 */
+ uint8_t ipv6_gw_state; /* 225 */
+ uint8_t ipv6_traffic_class; /* 226 */
+ uint8_t ipv6_hop_limit; /* 227 */
+ uint8_t ipv6_if_id[8]; /* 228-22F */
+ uint8_t ipv6_addr0[16]; /* 230-23F */
+ uint8_t ipv6_addr1[16]; /* 240-24F */
+ uint32_t ipv6_nd_reach_time; /* 250-253 */
+ uint32_t ipv6_nd_rexmit_timer; /* 254-257 */
+ uint32_t ipv6_nd_stale_timeout; /* 258-25B */
+ uint8_t ipv6_dup_addr_detect_count; /* 25C */
+ uint8_t ipv6_cache_id; /* 25D */
+ uint8_t res13[18]; /* 25E-26F */
+ uint32_t ipv6_gw_advrt_mtu; /* 270-273 */
+ uint8_t res14[140]; /* 274-2FF */
+};
+
+struct init_fw_ctrl_blk {
+ struct addr_ctrl_blk pri;
+ struct addr_ctrl_blk sec;
};
/*************************************************************************/
struct dev_db_entry {
- uint8_t options; /* 00 */
+ uint16_t options; /* 00-01 */
#define DDB_OPT_DISC_SESSION 0x10
#define DDB_OPT_TARGET 0x02 /* device is a target */
- uint8_t control; /* 01 */
-
- uint16_t exeThrottle; /* 02-03 */
- uint16_t exeCount; /* 04-05 */
- uint8_t retryCount; /* 06 */
- uint8_t retryDelay; /* 07 */
- uint16_t iSCSIOptions; /* 08-09 */
-
- uint16_t TCPOptions; /* 0A-0B */
-
- uint16_t IPOptions; /* 0C-0D */
-
- uint16_t maxPDUSize; /* 0E-0F */
- uint16_t rcvMarkerInt; /* 10-11 */
- uint16_t sndMarkerInt; /* 12-13 */
- uint16_t iSCSIMaxSndDataSegLen; /* 14-15 */
- uint16_t firstBurstSize; /* 16-17 */
- uint16_t minTime2Wait; /* 18-19 : RA :default_time2wait */
- uint16_t maxTime2Retain; /* 1A-1B */
- uint16_t maxOutstndngR2T; /* 1C-1D */
- uint16_t keepAliveTimeout; /* 1E-1F */
- uint8_t ISID[6]; /* 20-25 big-endian, must be converted
+ uint16_t exec_throttle; /* 02-03 */
+ uint16_t exec_count; /* 04-05 */
+ uint16_t res0; /* 06-07 */
+ uint16_t iscsi_options; /* 08-09 */
+ uint16_t tcp_options; /* 0A-0B */
+ uint16_t ip_options; /* 0C-0D */
+ uint16_t iscsi_max_rcv_data_seg_len; /* 0E-0F */
+ uint32_t res1; /* 10-13 */
+ uint16_t iscsi_max_snd_data_seg_len; /* 14-15 */
+ uint16_t iscsi_first_burst_len; /* 16-17 */
+ uint16_t iscsi_def_time2wait; /* 18-19 */
+ uint16_t iscsi_def_time2retain; /* 1A-1B */
+ uint16_t iscsi_max_outsnd_r2t; /* 1C-1D */
+ uint16_t ka_timeout; /* 1E-1F */
+ uint8_t isid[6]; /* 20-25 big-endian, must be converted
* to little-endian */
- uint16_t TSID; /* 26-27 */
- uint16_t portNumber; /* 28-29 */
- uint16_t maxBurstSize; /* 2A-2B */
- uint16_t taskMngmntTimeout; /* 2C-2D */
- uint16_t reserved1; /* 2E-2F */
- uint8_t ipAddr[0x10]; /* 30-3F */
- uint8_t iSCSIAlias[0x20]; /* 40-5F */
- uint8_t targetAddr[0x20]; /* 60-7F */
- uint8_t userID[0x20]; /* 80-9F */
- uint8_t password[0x20]; /* A0-BF */
- uint8_t iscsiName[0x100]; /* C0-1BF : xxzzy Make this a
+ uint16_t tsid; /* 26-27 */
+ uint16_t port; /* 28-29 */
+ uint16_t iscsi_max_burst_len; /* 2A-2B */
+ uint16_t def_timeout; /* 2C-2D */
+ uint16_t res2; /* 2E-2F */
+ uint8_t ip_addr[0x10]; /* 30-3F */
+ uint8_t iscsi_alias[0x20]; /* 40-5F */
+ uint8_t tgt_addr[0x20]; /* 60-7F */
+ uint16_t mss; /* 80-81 */
+ uint16_t res3; /* 82-83 */
+ uint16_t lcl_port; /* 84-85 */
+ uint8_t ipv4_tos; /* 86 */
+ uint16_t ipv6_flow_lbl; /* 87-89 */
+ uint8_t res4[0x36]; /* 8A-BF */
+ uint8_t iscsi_name[0xE0]; /* C0-19F : xxzzy Make this a
* pointer to a string so we
* don't have to reserve soooo
* much RAM */
- uint16_t ddbLink; /* 1C0-1C1 */
- uint16_t CHAPTableIndex; /* 1C2-1C3 */
- uint16_t TargetPortalGroup; /* 1C4-1C5 */
- uint16_t reserved2[2]; /* 1C6-1C7 */
- uint32_t statSN; /* 1C8-1CB */
- uint32_t expStatSN; /* 1CC-1CF */
- uint16_t reserved3[0x2C]; /* 1D0-1FB */
- uint16_t ddbValidCookie; /* 1FC-1FD */
- uint16_t ddbValidSize; /* 1FE-1FF */
+ uint8_t ipv6_addr[0x10];/* 1A0-1AF */
+ uint8_t res5[0x10]; /* 1B0-1BF */
+ uint16_t ddb_link; /* 1C0-1C1 */
+ uint16_t chap_tbl_idx; /* 1C2-1C3 */
+ uint16_t tgt_portal_grp; /* 1C4-1C5 */
+ uint8_t tcp_xmt_wsf; /* 1C6 */
+ uint8_t tcp_rcv_wsf; /* 1C7 */
+ uint32_t stat_sn; /* 1C8-1CB */
+ uint32_t exp_stat_sn; /* 1CC-1CF */
+ uint8_t res6[0x30]; /* 1D0-1FF */
};
/*************************************************************************/
diff --git a/drivers/scsi/qla4xxx/ql4_glbl.h b/drivers/scsi/qla4xxx/ql4_glbl.h
index 5b00cb04e7c..a3608e028bf 100644
--- a/drivers/scsi/qla4xxx/ql4_glbl.h
+++ b/drivers/scsi/qla4xxx/ql4_glbl.h
@@ -8,6 +8,9 @@
#ifndef __QLA4x_GBL_H
#define __QLA4x_GBL_H
+struct iscsi_cls_conn;
+
+void qla4xxx_hw_reset(struct scsi_qla_host *ha);
int ql4xxx_lock_drvr_wait(struct scsi_qla_host *a);
int qla4xxx_send_tgts(struct scsi_qla_host *ha, char *ip, uint16_t port);
int qla4xxx_send_command_to_isp(struct scsi_qla_host *ha, struct srb * srb);
@@ -58,11 +61,13 @@ int qla4xxx_get_fw_version(struct scsi_qla_host * ha);
void qla4xxx_interrupt_service_routine(struct scsi_qla_host * ha,
uint32_t intr_status);
int qla4xxx_init_rings(struct scsi_qla_host * ha);
-struct srb * qla4xxx_del_from_active_array(struct scsi_qla_host *ha, uint32_t index);
+struct srb * qla4xxx_del_from_active_array(struct scsi_qla_host *ha,
+ uint32_t index);
void qla4xxx_srb_compl(struct scsi_qla_host *ha, struct srb *srb);
int qla4xxx_reinitialize_ddb_list(struct scsi_qla_host * ha);
int qla4xxx_process_ddb_changed(struct scsi_qla_host * ha,
uint32_t fw_ddb_index, uint32_t state);
+void qla4xxx_dump_buffer(void *b, uint32_t size);
extern int ql4xextended_error_logging;
extern int ql4xdiscoverywait;
diff --git a/drivers/scsi/qla4xxx/ql4_init.c b/drivers/scsi/qla4xxx/ql4_init.c
index 6365df26861..1e29f51d596 100644
--- a/drivers/scsi/qla4xxx/ql4_init.c
+++ b/drivers/scsi/qla4xxx/ql4_init.c
@@ -6,6 +6,9 @@
*/
#include "ql4_def.h"
+#include "ql4_glbl.h"
+#include "ql4_dbg.h"
+#include "ql4_inline.h"
static struct ddb_entry * qla4xxx_alloc_ddb(struct scsi_qla_host *ha,
uint32_t fw_ddb_index);
@@ -300,12 +303,12 @@ static int qla4xxx_init_firmware(struct scsi_qla_host *ha)
if (!qla4xxx_fw_ready(ha))
return status;
- set_bit(AF_ONLINE, &ha->flags);
return qla4xxx_get_firmware_status(ha);
}
static struct ddb_entry* qla4xxx_get_ddb_entry(struct scsi_qla_host *ha,
- uint32_t fw_ddb_index)
+ uint32_t fw_ddb_index,
+ uint32_t *new_tgt)
{
struct dev_db_entry *fw_ddb_entry = NULL;
dma_addr_t fw_ddb_entry_dma;
@@ -313,6 +316,7 @@ static struct ddb_entry* qla4xxx_get_ddb_entry(struct scsi_qla_host *ha,
int found = 0;
uint32_t device_state;
+ *new_tgt = 0;
/* Make sure the dma buffer is valid */
fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev,
sizeof(*fw_ddb_entry),
@@ -337,7 +341,7 @@ static struct ddb_entry* qla4xxx_get_ddb_entry(struct scsi_qla_host *ha,
DEBUG2(printk("scsi%ld: %s: Looking for ddb[%d]\n", ha->host_no,
__func__, fw_ddb_index));
list_for_each_entry(ddb_entry, &ha->ddb_list, list) {
- if (memcmp(ddb_entry->iscsi_name, fw_ddb_entry->iscsiName,
+ if (memcmp(ddb_entry->iscsi_name, fw_ddb_entry->iscsi_name,
ISCSI_NAME_SIZE) == 0) {
found++;
break;
@@ -348,6 +352,7 @@ static struct ddb_entry* qla4xxx_get_ddb_entry(struct scsi_qla_host *ha,
DEBUG2(printk("scsi%ld: %s: ddb[%d] not found - allocating "
"new ddb\n", ha->host_no, __func__,
fw_ddb_index));
+ *new_tgt = 1;
ddb_entry = qla4xxx_alloc_ddb(ha, fw_ddb_index);
}
@@ -409,26 +414,26 @@ static int qla4xxx_update_ddb_entry(struct scsi_qla_host *ha,
}
status = QLA_SUCCESS;
- ddb_entry->target_session_id = le16_to_cpu(fw_ddb_entry->TSID);
+ ddb_entry->target_session_id = le16_to_cpu(fw_ddb_entry->tsid);
ddb_entry->task_mgmt_timeout =
- le16_to_cpu(fw_ddb_entry->taskMngmntTimeout);
+ le16_to_cpu(fw_ddb_entry->def_timeout);
ddb_entry->CmdSn = 0;
- ddb_entry->exe_throttle = le16_to_cpu(fw_ddb_entry->exeThrottle);
+ ddb_entry->exe_throttle = le16_to_cpu(fw_ddb_entry->exec_throttle);
ddb_entry->default_relogin_timeout =
- le16_to_cpu(fw_ddb_entry->taskMngmntTimeout);
- ddb_entry->default_time2wait = le16_to_cpu(fw_ddb_entry->minTime2Wait);
+ le16_to_cpu(fw_ddb_entry->def_timeout);
+ ddb_entry->default_time2wait = le16_to_cpu(fw_ddb_entry->iscsi_def_time2wait);
/* Update index in case it changed */
ddb_entry->fw_ddb_index = fw_ddb_index;
ha->fw_ddb_index_map[fw_ddb_index] = ddb_entry;
- ddb_entry->port = le16_to_cpu(fw_ddb_entry->portNumber);
- ddb_entry->tpgt = le32_to_cpu(fw_ddb_entry->TargetPortalGroup);
- memcpy(&ddb_entry->iscsi_name[0], &fw_ddb_entry->iscsiName[0],
+ ddb_entry->port = le16_to_cpu(fw_ddb_entry->port);
+ ddb_entry->tpgt = le32_to_cpu(fw_ddb_entry->tgt_portal_grp);
+ memcpy(&ddb_entry->iscsi_name[0], &fw_ddb_entry->iscsi_name[0],
min(sizeof(ddb_entry->iscsi_name),
- sizeof(fw_ddb_entry->iscsiName)));
- memcpy(&ddb_entry->ip_addr[0], &fw_ddb_entry->ipAddr[0],
- min(sizeof(ddb_entry->ip_addr), sizeof(fw_ddb_entry->ipAddr)));
+ sizeof(fw_ddb_entry->iscsi_name)));
+ memcpy(&ddb_entry->ip_addr[0], &fw_ddb_entry->ip_addr[0],
+ min(sizeof(ddb_entry->ip_addr), sizeof(fw_ddb_entry->ip_addr)));
DEBUG2(printk("scsi%ld: %s: ddb[%d] - State= %x status= %d.\n",
ha->host_no, __func__, fw_ddb_index,
@@ -495,6 +500,7 @@ static int qla4xxx_build_ddb_list(struct scsi_qla_host *ha)
uint32_t ddb_state;
uint32_t conn_err, err_code;
struct ddb_entry *ddb_entry;
+ uint32_t new_tgt;
dev_info(&ha->pdev->dev, "Initializing DDBs ...\n");
for (fw_ddb_index = 0; fw_ddb_index < MAX_DDB_ENTRIES;
@@ -526,8 +532,19 @@ static int qla4xxx_build_ddb_list(struct scsi_qla_host *ha)
"completed "
"or access denied failure\n",
ha->host_no, __func__));
- } else
+ } else {
qla4xxx_set_ddb_entry(ha, fw_ddb_index, 0);
+ if (qla4xxx_get_fwddb_entry(ha, fw_ddb_index,
+ NULL, 0, NULL, &next_fw_ddb_index,
+ &ddb_state, &conn_err, NULL, NULL)
+ == QLA_ERROR) {
+ DEBUG2(printk("scsi%ld: %s:"
+ "get_ddb_entry %d failed\n",
+ ha->host_no,
+ __func__, fw_ddb_index));
+ return QLA_ERROR;
+ }
+ }
}
if (ddb_state != DDB_DS_SESSION_ACTIVE)
@@ -540,7 +557,7 @@ static int qla4xxx_build_ddb_list(struct scsi_qla_host *ha)
ha->host_no, __func__, fw_ddb_index));
/* Add DDB to internal our ddb list. */
- ddb_entry = qla4xxx_get_ddb_entry(ha, fw_ddb_index);
+ ddb_entry = qla4xxx_get_ddb_entry(ha, fw_ddb_index, &new_tgt);
if (ddb_entry == NULL) {
DEBUG2(printk("scsi%ld: %s: Unable to allocate memory "
"for device at fw_ddb_index %d\n",
@@ -865,21 +882,20 @@ static int qla4xxx_config_nvram(struct scsi_qla_host *ha)
static void qla4x00_pci_config(struct scsi_qla_host *ha)
{
- uint16_t w, mwi;
+ uint16_t w;
+ int status;
dev_info(&ha->pdev->dev, "Configuring PCI space...\n");
pci_set_master(ha->pdev);
- mwi = 0;
- if (pci_set_mwi(ha->pdev))
- mwi = PCI_COMMAND_INVALIDATE;
+ status = pci_set_mwi(ha->pdev);
/*
* We want to respect framework's setting of PCI configuration space
* command register and also want to make sure that all bits of
* interest to us are properly set in command register.
*/
pci_read_config_word(ha->pdev, PCI_COMMAND, &w);
- w |= mwi | (PCI_COMMAND_PARITY | PCI_COMMAND_SERR);
+ w |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
w &= ~PCI_COMMAND_INTX_DISABLE;
pci_write_config_word(ha->pdev, PCI_COMMAND, w);
}
@@ -911,6 +927,9 @@ static int qla4xxx_start_firmware_from_flash(struct scsi_qla_host *ha)
writel(set_rmask(NVR_WRITE_ENABLE),
&ha->reg->u1.isp4022.nvram);
+ writel(2, &ha->reg->mailbox[6]);
+ readl(&ha->reg->mailbox[6]);
+
writel(set_rmask(CSR_BOOT_ENABLE), &ha->reg->ctrl_status);
readl(&ha->reg->ctrl_status);
spin_unlock_irqrestore(&ha->hardware_lock, flags);
@@ -958,25 +977,25 @@ static int qla4xxx_start_firmware_from_flash(struct scsi_qla_host *ha)
return status;
}
-int ql4xxx_lock_drvr_wait(struct scsi_qla_host *ha)
+int ql4xxx_lock_drvr_wait(struct scsi_qla_host *a)
{
-#define QL4_LOCK_DRVR_WAIT 30
+#define QL4_LOCK_DRVR_WAIT 60
#define QL4_LOCK_DRVR_SLEEP 1
int drvr_wait = QL4_LOCK_DRVR_WAIT;
while (drvr_wait) {
- if (ql4xxx_lock_drvr(ha) == 0) {
+ if (ql4xxx_lock_drvr(a) == 0) {
ssleep(QL4_LOCK_DRVR_SLEEP);
if (drvr_wait) {
DEBUG2(printk("scsi%ld: %s: Waiting for "
- "Global Init Semaphore(%d)...n",
- ha->host_no,
+ "Global Init Semaphore(%d)...\n",
+ a->host_no,
__func__, drvr_wait));
}
drvr_wait -= QL4_LOCK_DRVR_SLEEP;
} else {
DEBUG2(printk("scsi%ld: %s: Global Init Semaphore "
- "acquired.n", ha->host_no, __func__));
+ "acquired\n", a->host_no, __func__));
return QLA_SUCCESS;
}
}
@@ -1125,17 +1144,17 @@ int qla4xxx_initialize_adapter(struct scsi_qla_host *ha,
/* Initialize the Host adapter request/response queues and firmware */
if (qla4xxx_start_firmware(ha) == QLA_ERROR)
- return status;
+ goto exit_init_hba;
if (qla4xxx_validate_mac_address(ha) == QLA_ERROR)
- return status;
+ goto exit_init_hba;
if (qla4xxx_init_local_data(ha) == QLA_ERROR)
- return status;
+ goto exit_init_hba;
status = qla4xxx_init_firmware(ha);
if (status == QLA_ERROR)
- return status;
+ goto exit_init_hba;
/*
* FW is waiting to get an IP address from DHCP server: Skip building
@@ -1143,12 +1162,12 @@ int qla4xxx_initialize_adapter(struct scsi_qla_host *ha,
* followed by 0x8014 aen" to trigger the tgt discovery process.
*/
if (ha->firmware_state & FW_STATE_DHCP_IN_PROGRESS)
- return status;
+ goto exit_init_online;
/* Skip device discovery if ip and subnet is zero */
if (memcmp(ha->ip_address, ip_address, IP_ADDR_LEN) == 0 ||
memcmp(ha->subnet_mask, ip_address, IP_ADDR_LEN) == 0)
- return status;
+ goto exit_init_online;
if (renew_ddb_list == PRESERVE_DDB_LIST) {
/*
@@ -1177,9 +1196,10 @@ int qla4xxx_initialize_adapter(struct scsi_qla_host *ha,
ha->host_no));
}
- exit_init_hba:
+exit_init_online:
+ set_bit(AF_ONLINE, &ha->flags);
+exit_init_hba:
return status;
-
}
/**
@@ -1193,9 +1213,10 @@ static void qla4xxx_add_device_dynamically(struct scsi_qla_host *ha,
uint32_t fw_ddb_index)
{
struct ddb_entry * ddb_entry;
+ uint32_t new_tgt;
/* First allocate a device structure */
- ddb_entry = qla4xxx_get_ddb_entry(ha, fw_ddb_index);
+ ddb_entry = qla4xxx_get_ddb_entry(ha, fw_ddb_index, &new_tgt);
if (ddb_entry == NULL) {
DEBUG2(printk(KERN_WARNING
"scsi%ld: Unable to allocate memory to add "
@@ -1203,6 +1224,18 @@ static void qla4xxx_add_device_dynamically(struct scsi_qla_host *ha,
return;
}
+ if (!new_tgt && (ddb_entry->fw_ddb_index != fw_ddb_index)) {
+ /* Target has been bound to a new fw_ddb_index */
+ qla4xxx_free_ddb(ha, ddb_entry);
+ ddb_entry = qla4xxx_alloc_ddb(ha, fw_ddb_index);
+ if (ddb_entry == NULL) {
+ DEBUG2(printk(KERN_WARNING
+ "scsi%ld: Unable to allocate memory"
+ " to add fw_ddb_index %d\n",
+ ha->host_no, fw_ddb_index));
+ return;
+ }
+ }
if (qla4xxx_update_ddb_entry(ha, ddb_entry, fw_ddb_index) ==
QLA_ERROR) {
ha->fw_ddb_index_map[fw_ddb_index] =
diff --git a/drivers/scsi/qla4xxx/ql4_iocb.c b/drivers/scsi/qla4xxx/ql4_iocb.c
index a216a1781af..5006ecb3ef5 100644
--- a/drivers/scsi/qla4xxx/ql4_iocb.c
+++ b/drivers/scsi/qla4xxx/ql4_iocb.c
@@ -6,6 +6,10 @@
*/
#include "ql4_def.h"
+#include "ql4_glbl.h"
+#include "ql4_dbg.h"
+#include "ql4_inline.h"
+
#include <scsi/scsi_tcq.h>
@@ -141,11 +145,13 @@ static void qla4xxx_build_scsi_iocbs(struct srb *srb,
uint16_t avail_dsds;
struct data_seg_a64 *cur_dsd;
struct scsi_cmnd *cmd;
+ struct scatterlist *sg;
+ int i;
cmd = srb->cmd;
ha = srb->ha;
- if (cmd->request_bufflen == 0 || cmd->sc_data_direction == DMA_NONE) {
+ if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
/* No data being transferred */
cmd_entry->ttlByteCnt = __constant_cpu_to_le32(0);
return;
@@ -154,40 +160,27 @@ static void qla4xxx_build_scsi_iocbs(struct srb *srb,
avail_dsds = COMMAND_SEG;
cur_dsd = (struct data_seg_a64 *) & (cmd_entry->dataseg[0]);
- /* Load data segments */
- if (cmd->use_sg) {
- struct scatterlist *cur_seg;
- struct scatterlist *end_seg;
-
- cur_seg = (struct scatterlist *)cmd->request_buffer;
- end_seg = cur_seg + tot_dsds;
- while (cur_seg < end_seg) {
- dma_addr_t sle_dma;
-
- /* Allocate additional continuation packets? */
- if (avail_dsds == 0) {
- struct continuation_t1_entry *cont_entry;
-
- cont_entry = qla4xxx_alloc_cont_entry(ha);
- cur_dsd =
- (struct data_seg_a64 *)
- &cont_entry->dataseg[0];
- avail_dsds = CONTINUE_SEG;
- }
-
- sle_dma = sg_dma_address(cur_seg);
- cur_dsd->base.addrLow = cpu_to_le32(LSDW(sle_dma));
- cur_dsd->base.addrHigh = cpu_to_le32(MSDW(sle_dma));
- cur_dsd->count = cpu_to_le32(sg_dma_len(cur_seg));
- avail_dsds--;
-
- cur_dsd++;
- cur_seg++;
+ scsi_for_each_sg(cmd, sg, tot_dsds, i) {
+ dma_addr_t sle_dma;
+
+ /* Allocate additional continuation packets? */
+ if (avail_dsds == 0) {
+ struct continuation_t1_entry *cont_entry;
+
+ cont_entry = qla4xxx_alloc_cont_entry(ha);
+ cur_dsd =
+ (struct data_seg_a64 *)
+ &cont_entry->dataseg[0];
+ avail_dsds = CONTINUE_SEG;
}
- } else {
- cur_dsd->base.addrLow = cpu_to_le32(LSDW(srb->dma_handle));
- cur_dsd->base.addrHigh = cpu_to_le32(MSDW(srb->dma_handle));
- cur_dsd->count = cpu_to_le32(cmd->request_bufflen);
+
+ sle_dma = sg_dma_address(sg);
+ cur_dsd->base.addrLow = cpu_to_le32(LSDW(sle_dma));
+ cur_dsd->base.addrHigh = cpu_to_le32(MSDW(sle_dma));
+ cur_dsd->count = cpu_to_le32(sg_dma_len(sg));
+ avail_dsds--;
+
+ cur_dsd++;
}
}
@@ -204,8 +197,8 @@ int qla4xxx_send_command_to_isp(struct scsi_qla_host *ha, struct srb * srb)
struct scsi_cmnd *cmd = srb->cmd;
struct ddb_entry *ddb_entry;
struct command_t3_entry *cmd_entry;
- struct scatterlist *sg = NULL;
+ int nseg;
uint16_t tot_dsds;
uint16_t req_cnt;
@@ -233,24 +226,11 @@ int qla4xxx_send_command_to_isp(struct scsi_qla_host *ha, struct srb * srb)
index = (uint32_t)cmd->request->tag;
/* Calculate the number of request entries needed. */
- if (cmd->use_sg) {
- sg = (struct scatterlist *)cmd->request_buffer;
- tot_dsds = pci_map_sg(ha->pdev, sg, cmd->use_sg,
- cmd->sc_data_direction);
- if (tot_dsds == 0)
- goto queuing_error;
- } else if (cmd->request_bufflen) {
- dma_addr_t req_dma;
-
- req_dma = pci_map_single(ha->pdev, cmd->request_buffer,
- cmd->request_bufflen,
- cmd->sc_data_direction);
- if (dma_mapping_error(req_dma))
- goto queuing_error;
-
- srb->dma_handle = req_dma;
- tot_dsds = 1;
- }
+ nseg = scsi_dma_map(cmd);
+ if (nseg < 0)
+ goto queuing_error;
+ tot_dsds = nseg;
+
req_cnt = qla4xxx_calc_request_entries(tot_dsds);
if (ha->req_q_count < (req_cnt + 2)) {
@@ -279,7 +259,7 @@ int qla4xxx_send_command_to_isp(struct scsi_qla_host *ha, struct srb * srb)
int_to_scsilun(cmd->device->lun, &cmd_entry->lun);
cmd_entry->cmdSeqNum = cpu_to_le32(ddb_entry->CmdSn);
- cmd_entry->ttlByteCnt = cpu_to_le32(cmd->request_bufflen);
+ cmd_entry->ttlByteCnt = cpu_to_le32(scsi_bufflen(cmd));
memcpy(cmd_entry->cdb, cmd->cmnd, cmd->cmd_len);
cmd_entry->dataSegCnt = cpu_to_le16(tot_dsds);
cmd_entry->hdr.entryCount = req_cnt;
@@ -289,13 +269,13 @@ int qla4xxx_send_command_to_isp(struct scsi_qla_host *ha, struct srb * srb)
* transferred, as the data direction bit is sometimed filled
* in when there is no data to be transferred */
cmd_entry->control_flags = CF_NO_DATA;
- if (cmd->request_bufflen) {
+ if (scsi_bufflen(cmd)) {
if (cmd->sc_data_direction == DMA_TO_DEVICE)
cmd_entry->control_flags = CF_WRITE;
else if (cmd->sc_data_direction == DMA_FROM_DEVICE)
cmd_entry->control_flags = CF_READ;
- ha->bytes_xfered += cmd->request_bufflen;
+ ha->bytes_xfered += scsi_bufflen(cmd);
if (ha->bytes_xfered & ~0xFFFFF){
ha->total_mbytes_xferred += ha->bytes_xfered >> 20;
ha->bytes_xfered &= 0xFFFFF;
@@ -359,14 +339,9 @@ int qla4xxx_send_command_to_isp(struct scsi_qla_host *ha, struct srb * srb)
return QLA_SUCCESS;
queuing_error:
+ if (tot_dsds)
+ scsi_dma_unmap(cmd);
- if (cmd->use_sg && tot_dsds) {
- sg = (struct scatterlist *) cmd->request_buffer;
- pci_unmap_sg(ha->pdev, sg, cmd->use_sg,
- cmd->sc_data_direction);
- } else if (tot_dsds)
- pci_unmap_single(ha->pdev, srb->dma_handle,
- cmd->request_bufflen, cmd->sc_data_direction);
spin_unlock_irqrestore(&ha->hardware_lock, flags);
return QLA_ERROR;
diff --git a/drivers/scsi/qla4xxx/ql4_isr.c b/drivers/scsi/qla4xxx/ql4_isr.c
index 35b9e36a0e8..4a154beb0d3 100644
--- a/drivers/scsi/qla4xxx/ql4_isr.c
+++ b/drivers/scsi/qla4xxx/ql4_isr.c
@@ -6,6 +6,9 @@
*/
#include "ql4_def.h"
+#include "ql4_glbl.h"
+#include "ql4_dbg.h"
+#include "ql4_inline.h"
/**
* qla2x00_process_completed_request() - Process a Fast Post response.
@@ -90,9 +93,29 @@ static void qla4xxx_status_entry(struct scsi_qla_host *ha,
break;
}
- if (sts_entry->iscsiFlags &
- (ISCSI_FLAG_RESIDUAL_OVER|ISCSI_FLAG_RESIDUAL_UNDER))
- cmd->resid = residual;
+ if (sts_entry->iscsiFlags & ISCSI_FLAG_RESIDUAL_OVER) {
+ cmd->result = DID_ERROR << 16;
+ break;
+ }
+
+ if (sts_entry->iscsiFlags &ISCSI_FLAG_RESIDUAL_UNDER) {
+ scsi_set_resid(cmd, residual);
+ if (!scsi_status && ((scsi_bufflen(cmd) - residual) <
+ cmd->underflow)) {
+
+ cmd->result = DID_ERROR << 16;
+
+ DEBUG2(printk("scsi%ld:%d:%d:%d: %s: "
+ "Mid-layer Data underrun0, "
+ "xferlen = 0x%x, "
+ "residual = 0x%x\n", ha->host_no,
+ cmd->device->channel,
+ cmd->device->id,
+ cmd->device->lun, __func__,
+ scsi_bufflen(cmd), residual));
+ break;
+ }
+ }
cmd->result = DID_OK << 16 | scsi_status;
@@ -161,7 +184,8 @@ static void qla4xxx_status_entry(struct scsi_qla_host *ha,
case SCS_DATA_UNDERRUN:
case SCS_DATA_OVERRUN:
- if (sts_entry->iscsiFlags & ISCSI_FLAG_RESIDUAL_OVER) {
+ if ((sts_entry->iscsiFlags & ISCSI_FLAG_RESIDUAL_OVER) ||
+ (sts_entry->completionStatus == SCS_DATA_OVERRUN)) {
DEBUG2(printk("scsi%ld:%d:%d:%d: %s: " "Data overrun, "
"residual = 0x%x\n", ha->host_no,
cmd->device->channel, cmd->device->id,
@@ -171,21 +195,7 @@ static void qla4xxx_status_entry(struct scsi_qla_host *ha,
break;
}
- if ((sts_entry->iscsiFlags & ISCSI_FLAG_RESIDUAL_UNDER) == 0) {
- /*
- * Firmware detected a SCSI transport underrun
- * condition
- */
- cmd->resid = residual;
- DEBUG2(printk("scsi%ld:%d:%d:%d: %s: UNDERRUN status "
- "detected, xferlen = 0x%x, residual = "
- "0x%x\n",
- ha->host_no, cmd->device->channel,
- cmd->device->id,
- cmd->device->lun, __func__,
- cmd->request_bufflen,
- residual));
- }
+ scsi_set_resid(cmd, residual);
/*
* If there is scsi_status, it takes precedense over
@@ -227,7 +237,7 @@ static void qla4xxx_status_entry(struct scsi_qla_host *ha,
if ((sts_entry->iscsiFlags &
ISCSI_FLAG_RESIDUAL_UNDER) == 0) {
cmd->result = DID_BUS_BUSY << 16;
- } else if ((cmd->request_bufflen - residual) <
+ } else if ((scsi_bufflen(cmd) - residual) <
cmd->underflow) {
/*
* Handle mid-layer underflow???
@@ -242,13 +252,13 @@ static void qla4xxx_status_entry(struct scsi_qla_host *ha,
* will return DID_ERROR.
*/
DEBUG2(printk("scsi%ld:%d:%d:%d: %s: "
- "Mid-layer Data underrun, "
- "xferlen = 0x%x, "
- "residual = 0x%x\n", ha->host_no,
- cmd->device->channel,
- cmd->device->id,
- cmd->device->lun, __func__,
- cmd->request_bufflen, residual));
+ "Mid-layer Data underrun1, "
+ "xferlen = 0x%x, "
+ "residual = 0x%x\n", ha->host_no,
+ cmd->device->channel,
+ cmd->device->id,
+ cmd->device->lun, __func__,
+ scsi_bufflen(cmd), residual));
cmd->result = DID_ERROR << 16;
} else {
@@ -417,6 +427,7 @@ static void qla4xxx_isr_decode_mailbox(struct scsi_qla_host * ha,
uint32_t mbox_status)
{
int i;
+ uint32_t mbox_stat2, mbox_stat3;
if ((mbox_status == MBOX_STS_BUSY) ||
(mbox_status == MBOX_STS_INTERMEDIATE_COMPLETION) ||
@@ -437,6 +448,12 @@ static void qla4xxx_isr_decode_mailbox(struct scsi_qla_host * ha,
} else if (mbox_status >> 12 == MBOX_ASYNC_EVENT_STATUS) {
/* Immediately process the AENs that don't require much work.
* Only queue the database_changed AENs */
+ if (ha->aen_log.count < MAX_AEN_ENTRIES) {
+ for (i = 0; i < MBOX_AEN_REG_COUNT; i++)
+ ha->aen_log.entry[ha->aen_log.count].mbox_sts[i] =
+ readl(&ha->reg->mailbox[i]);
+ ha->aen_log.count++;
+ }
switch (mbox_status) {
case MBOX_ASTS_SYSTEM_ERROR:
/* Log Mailbox registers */
@@ -493,6 +510,16 @@ static void qla4xxx_isr_decode_mailbox(struct scsi_qla_host * ha,
mbox_status));
break;
+ case MBOX_ASTS_IP_ADDR_STATE_CHANGED:
+ mbox_stat2 = readl(&ha->reg->mailbox[2]);
+ mbox_stat3 = readl(&ha->reg->mailbox[3]);
+
+ if ((mbox_stat3 == 5) && (mbox_stat2 == 3))
+ set_bit(DPC_GET_DHCP_IP_ADDR, &ha->dpc_flags);
+ else if ((mbox_stat3 == 2) && (mbox_stat2 == 5))
+ set_bit(DPC_RESET_HA, &ha->dpc_flags);
+ break;
+
case MBOX_ASTS_MAC_ADDRESS_CHANGED:
case MBOX_ASTS_DNS:
/* No action */
@@ -518,11 +545,6 @@ static void qla4xxx_isr_decode_mailbox(struct scsi_qla_host * ha,
/* Queue AEN information and process it in the DPC
* routine */
if (ha->aen_q_count > 0) {
- /* advance pointer */
- if (ha->aen_in == (MAX_AEN_ENTRIES - 1))
- ha->aen_in = 0;
- else
- ha->aen_in++;
/* decrement available counter */
ha->aen_q_count--;
@@ -542,6 +564,10 @@ static void qla4xxx_isr_decode_mailbox(struct scsi_qla_host * ha,
ha->aen_q[ha->aen_in].mbox_sts[2],
ha->aen_q[ha->aen_in].mbox_sts[3],
ha->aen_q[ha->aen_in]. mbox_sts[4]));
+ /* advance pointer */
+ ha->aen_in++;
+ if (ha->aen_in == MAX_AEN_ENTRIES)
+ ha->aen_in = 0;
/* The DPC routine will process the aen */
set_bit(DPC_AEN, &ha->dpc_flags);
@@ -724,25 +750,24 @@ void qla4xxx_process_aen(struct scsi_qla_host * ha, uint8_t process_aen)
spin_lock_irqsave(&ha->hardware_lock, flags);
while (ha->aen_out != ha->aen_in) {
- /* Advance pointers for next entry */
- if (ha->aen_out == (MAX_AEN_ENTRIES - 1))
- ha->aen_out = 0;
- else
- ha->aen_out++;
-
- ha->aen_q_count++;
aen = &ha->aen_q[ha->aen_out];
-
/* copy aen information to local structure */
for (i = 0; i < MBOX_AEN_REG_COUNT; i++)
mbox_sts[i] = aen->mbox_sts[i];
+ ha->aen_q_count++;
+ ha->aen_out++;
+
+ if (ha->aen_out == MAX_AEN_ENTRIES)
+ ha->aen_out = 0;
+
spin_unlock_irqrestore(&ha->hardware_lock, flags);
- DEBUG(printk("scsi%ld: AEN[%d] %04x, index [%d] state=%04x "
- "mod=%x conerr=%08x \n", ha->host_no, ha->aen_out,
- mbox_sts[0], mbox_sts[2], mbox_sts[3],
- mbox_sts[1], mbox_sts[4]));
+ DEBUG2(printk("qla4xxx(%ld): AEN[%d]=0x%08x, mbx1=0x%08x mbx2=0x%08x"
+ " mbx3=0x%08x mbx4=0x%08x\n", ha->host_no,
+ (ha->aen_out ? (ha->aen_out-1): (MAX_AEN_ENTRIES-1)),
+ mbox_sts[0], mbox_sts[1], mbox_sts[2],
+ mbox_sts[3], mbox_sts[4]));
switch (mbox_sts[0]) {
case MBOX_ASTS_DATABASE_CHANGED:
@@ -792,6 +817,5 @@ void qla4xxx_process_aen(struct scsi_qla_host * ha, uint8_t process_aen)
spin_lock_irqsave(&ha->hardware_lock, flags);
}
spin_unlock_irqrestore(&ha->hardware_lock, flags);
-
}
diff --git a/drivers/scsi/qla4xxx/ql4_mbx.c b/drivers/scsi/qla4xxx/ql4_mbx.c
index f116ff91723..35cd73c72a6 100644
--- a/drivers/scsi/qla4xxx/ql4_mbx.c
+++ b/drivers/scsi/qla4xxx/ql4_mbx.c
@@ -6,6 +6,9 @@
*/
#include "ql4_def.h"
+#include "ql4_glbl.h"
+#include "ql4_dbg.h"
+#include "ql4_inline.h"
/**
@@ -169,84 +172,6 @@ mbox_exit:
return status;
}
-
-#if 0
-
-/**
- * qla4xxx_issue_iocb - issue mailbox iocb command
- * @ha: adapter state pointer.
- * @buffer: buffer pointer.
- * @phys_addr: physical address of buffer.
- * @size: size of buffer.
- *
- * Issues iocbs via mailbox commands.
- * TARGET_QUEUE_LOCK must be released.
- * ADAPTER_STATE_LOCK must be released.
- **/
-int
-qla4xxx_issue_iocb(struct scsi_qla_host * ha, void *buffer,
- dma_addr_t phys_addr, size_t size)
-{
- uint32_t mbox_cmd[MBOX_REG_COUNT];
- uint32_t mbox_sts[MBOX_REG_COUNT];
- int status;
-
- memset(&mbox_cmd, 0, sizeof(mbox_cmd));
- memset(&mbox_sts, 0, sizeof(mbox_sts));
- mbox_cmd[0] = MBOX_CMD_EXECUTE_IOCB_A64;
- mbox_cmd[1] = 0;
- mbox_cmd[2] = LSDW(phys_addr);
- mbox_cmd[3] = MSDW(phys_addr);
- status = qla4xxx_mailbox_command(ha, 4, 1, &mbox_cmd[0], &mbox_sts[0]);
- return status;
-}
-
-int qla4xxx_conn_close_sess_logout(struct scsi_qla_host * ha,
- uint16_t fw_ddb_index,
- uint16_t connection_id,
- uint16_t option)
-{
- uint32_t mbox_cmd[MBOX_REG_COUNT];
- uint32_t mbox_sts[MBOX_REG_COUNT];
-
- memset(&mbox_cmd, 0, sizeof(mbox_cmd));
- memset(&mbox_sts, 0, sizeof(mbox_sts));
- mbox_cmd[0] = MBOX_CMD_CONN_CLOSE_SESS_LOGOUT;
- mbox_cmd[1] = fw_ddb_index;
- mbox_cmd[2] = connection_id;
- mbox_cmd[3] = LOGOUT_OPTION_RELOGIN;
- if (qla4xxx_mailbox_command(ha, 4, 2, &mbox_cmd[0], &mbox_sts[0]) !=
- QLA_SUCCESS) {
- DEBUG2(printk("scsi%ld: %s: MBOX_CMD_CONN_CLOSE_SESS_LOGOUT "
- "option %04x failed sts %04X %04X",
- ha->host_no, __func__,
- option, mbox_sts[0], mbox_sts[1]));
- if (mbox_sts[0] == 0x4005)
- DEBUG2(printk("%s reason %04X\n", __func__,
- mbox_sts[1]));
- }
- return QLA_SUCCESS;
-}
-
-int qla4xxx_clear_database_entry(struct scsi_qla_host * ha,
- uint16_t fw_ddb_index)
-{
- uint32_t mbox_cmd[MBOX_REG_COUNT];
- uint32_t mbox_sts[MBOX_REG_COUNT];
-
- memset(&mbox_cmd, 0, sizeof(mbox_cmd));
- memset(&mbox_sts, 0, sizeof(mbox_sts));
- mbox_cmd[0] = MBOX_CMD_CLEAR_DATABASE_ENTRY;
- mbox_cmd[1] = fw_ddb_index;
- if (qla4xxx_mailbox_command(ha, 2, 5, &mbox_cmd[0], &mbox_sts[0]) !=
- QLA_SUCCESS)
- return QLA_ERROR;
-
- return QLA_SUCCESS;
-}
-
-#endif /* 0 */
-
/**
* qla4xxx_initialize_fw_cb - initializes firmware control block.
* @ha: Pointer to host adapter structure.
@@ -272,10 +197,13 @@ int qla4xxx_initialize_fw_cb(struct scsi_qla_host * ha)
/* Get Initialize Firmware Control Block. */
memset(&mbox_cmd, 0, sizeof(mbox_cmd));
memset(&mbox_sts, 0, sizeof(mbox_sts));
+
mbox_cmd[0] = MBOX_CMD_GET_INIT_FW_CTRL_BLOCK;
mbox_cmd[2] = LSDW(init_fw_cb_dma);
mbox_cmd[3] = MSDW(init_fw_cb_dma);
- if (qla4xxx_mailbox_command(ha, 4, 1, &mbox_cmd[0], &mbox_sts[0]) !=
+ mbox_cmd[4] = sizeof(struct init_fw_ctrl_blk);
+
+ if (qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 1, &mbox_cmd[0], &mbox_sts[0]) !=
QLA_SUCCESS) {
dma_free_coherent(&ha->pdev->dev,
sizeof(struct init_fw_ctrl_blk),
@@ -287,51 +215,56 @@ int qla4xxx_initialize_fw_cb(struct scsi_qla_host * ha)
qla4xxx_init_rings(ha);
/* Fill in the request and response queue information. */
- init_fw_cb->ReqQConsumerIndex = cpu_to_le16(ha->request_out);
- init_fw_cb->ComplQProducerIndex = cpu_to_le16(ha->response_in);
- init_fw_cb->ReqQLen = __constant_cpu_to_le16(REQUEST_QUEUE_DEPTH);
- init_fw_cb->ComplQLen = __constant_cpu_to_le16(RESPONSE_QUEUE_DEPTH);
- init_fw_cb->ReqQAddrLo = cpu_to_le32(LSDW(ha->request_dma));
- init_fw_cb->ReqQAddrHi = cpu_to_le32(MSDW(ha->request_dma));
- init_fw_cb->ComplQAddrLo = cpu_to_le32(LSDW(ha->response_dma));
- init_fw_cb->ComplQAddrHi = cpu_to_le32(MSDW(ha->response_dma));
- init_fw_cb->ShadowRegBufAddrLo =
+ init_fw_cb->pri.rqq_consumer_idx = cpu_to_le16(ha->request_out);
+ init_fw_cb->pri.compq_producer_idx = cpu_to_le16(ha->response_in);
+ init_fw_cb->pri.rqq_len = __constant_cpu_to_le16(REQUEST_QUEUE_DEPTH);
+ init_fw_cb->pri.compq_len = __constant_cpu_to_le16(RESPONSE_QUEUE_DEPTH);
+ init_fw_cb->pri.rqq_addr_lo = cpu_to_le32(LSDW(ha->request_dma));
+ init_fw_cb->pri.rqq_addr_hi = cpu_to_le32(MSDW(ha->request_dma));
+ init_fw_cb->pri.compq_addr_lo = cpu_to_le32(LSDW(ha->response_dma));
+ init_fw_cb->pri.compq_addr_hi = cpu_to_le32(MSDW(ha->response_dma));
+ init_fw_cb->pri.shdwreg_addr_lo =
cpu_to_le32(LSDW(ha->shadow_regs_dma));
- init_fw_cb->ShadowRegBufAddrHi =
+ init_fw_cb->pri.shdwreg_addr_hi =
cpu_to_le32(MSDW(ha->shadow_regs_dma));
/* Set up required options. */
- init_fw_cb->FwOptions |=
+ init_fw_cb->pri.fw_options |=
__constant_cpu_to_le16(FWOPT_SESSION_MODE |
FWOPT_INITIATOR_MODE);
- init_fw_cb->FwOptions &= __constant_cpu_to_le16(~FWOPT_TARGET_MODE);
+ init_fw_cb->pri.fw_options &= __constant_cpu_to_le16(~FWOPT_TARGET_MODE);
/* Save some info in adapter structure. */
- ha->firmware_options = le16_to_cpu(init_fw_cb->FwOptions);
- ha->tcp_options = le16_to_cpu(init_fw_cb->TCPOptions);
- ha->heartbeat_interval = init_fw_cb->HeartbeatInterval;
- memcpy(ha->ip_address, init_fw_cb->IPAddr,
- min(sizeof(ha->ip_address), sizeof(init_fw_cb->IPAddr)));
- memcpy(ha->subnet_mask, init_fw_cb->SubnetMask,
- min(sizeof(ha->subnet_mask), sizeof(init_fw_cb->SubnetMask)));
- memcpy(ha->gateway, init_fw_cb->GatewayIPAddr,
- min(sizeof(ha->gateway), sizeof(init_fw_cb->GatewayIPAddr)));
- memcpy(ha->name_string, init_fw_cb->iSCSINameString,
+ ha->firmware_options = le16_to_cpu(init_fw_cb->pri.fw_options);
+ ha->tcp_options = le16_to_cpu(init_fw_cb->pri.ipv4_tcp_opts);
+ ha->heartbeat_interval = init_fw_cb->pri.hb_interval;
+ memcpy(ha->ip_address, init_fw_cb->pri.ipv4_addr,
+ min(sizeof(ha->ip_address), sizeof(init_fw_cb->pri.ipv4_addr)));
+ memcpy(ha->subnet_mask, init_fw_cb->pri.ipv4_subnet,
+ min(sizeof(ha->subnet_mask), sizeof(init_fw_cb->pri.ipv4_subnet)));
+ memcpy(ha->gateway, init_fw_cb->pri.ipv4_gw_addr,
+ min(sizeof(ha->gateway), sizeof(init_fw_cb->pri.ipv4_gw_addr)));
+ memcpy(ha->name_string, init_fw_cb->pri.iscsi_name,
min(sizeof(ha->name_string),
- sizeof(init_fw_cb->iSCSINameString)));
- memcpy(ha->alias, init_fw_cb->Alias,
- min(sizeof(ha->alias), sizeof(init_fw_cb->Alias)));
+ sizeof(init_fw_cb->pri.iscsi_name)));
+ /*memcpy(ha->alias, init_fw_cb->Alias,
+ min(sizeof(ha->alias), sizeof(init_fw_cb->Alias)));*/
/* Save Command Line Paramater info */
- ha->port_down_retry_count = le16_to_cpu(init_fw_cb->KeepAliveTimeout);
+ ha->port_down_retry_count = le16_to_cpu(init_fw_cb->pri.conn_ka_timeout);
ha->discovery_wait = ql4xdiscoverywait;
/* Send Initialize Firmware Control Block. */
+ memset(&mbox_cmd, 0, sizeof(mbox_cmd));
+ memset(&mbox_sts, 0, sizeof(mbox_sts));
+
mbox_cmd[0] = MBOX_CMD_INITIALIZE_FIRMWARE;
mbox_cmd[1] = 0;
mbox_cmd[2] = LSDW(init_fw_cb_dma);
mbox_cmd[3] = MSDW(init_fw_cb_dma);
- if (qla4xxx_mailbox_command(ha, 4, 1, &mbox_cmd[0], &mbox_sts[0]) ==
+ mbox_cmd[4] = sizeof(struct init_fw_ctrl_blk);
+
+ if (qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 1, &mbox_cmd[0], &mbox_sts[0]) ==
QLA_SUCCESS)
status = QLA_SUCCESS;
else {
@@ -368,12 +301,14 @@ int qla4xxx_get_dhcp_ip_address(struct scsi_qla_host * ha)
/* Get Initialize Firmware Control Block. */
memset(&mbox_cmd, 0, sizeof(mbox_cmd));
memset(&mbox_sts, 0, sizeof(mbox_sts));
+
memset(init_fw_cb, 0, sizeof(struct init_fw_ctrl_blk));
mbox_cmd[0] = MBOX_CMD_GET_INIT_FW_CTRL_BLOCK;
mbox_cmd[2] = LSDW(init_fw_cb_dma);
mbox_cmd[3] = MSDW(init_fw_cb_dma);
+ mbox_cmd[4] = sizeof(struct init_fw_ctrl_blk);
- if (qla4xxx_mailbox_command(ha, 4, 1, &mbox_cmd[0], &mbox_sts[0]) !=
+ if (qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 1, &mbox_cmd[0], &mbox_sts[0]) !=
QLA_SUCCESS) {
DEBUG2(printk("scsi%ld: %s: Failed to get init_fw_ctrl_blk\n",
ha->host_no, __func__));
@@ -384,12 +319,12 @@ int qla4xxx_get_dhcp_ip_address(struct scsi_qla_host * ha)
}
/* Save IP Address. */
- memcpy(ha->ip_address, init_fw_cb->IPAddr,
- min(sizeof(ha->ip_address), sizeof(init_fw_cb->IPAddr)));
- memcpy(ha->subnet_mask, init_fw_cb->SubnetMask,
- min(sizeof(ha->subnet_mask), sizeof(init_fw_cb->SubnetMask)));
- memcpy(ha->gateway, init_fw_cb->GatewayIPAddr,
- min(sizeof(ha->gateway), sizeof(init_fw_cb->GatewayIPAddr)));
+ memcpy(ha->ip_address, init_fw_cb->pri.ipv4_addr,
+ min(sizeof(ha->ip_address), sizeof(init_fw_cb->pri.ipv4_addr)));
+ memcpy(ha->subnet_mask, init_fw_cb->pri.ipv4_subnet,
+ min(sizeof(ha->subnet_mask), sizeof(init_fw_cb->pri.ipv4_subnet)));
+ memcpy(ha->gateway, init_fw_cb->pri.ipv4_gw_addr,
+ min(sizeof(ha->gateway), sizeof(init_fw_cb->pri.ipv4_gw_addr)));
dma_free_coherent(&ha->pdev->dev, sizeof(struct init_fw_ctrl_blk),
init_fw_cb, init_fw_cb_dma);
@@ -409,8 +344,10 @@ int qla4xxx_get_firmware_state(struct scsi_qla_host * ha)
/* Get firmware version */
memset(&mbox_cmd, 0, sizeof(mbox_cmd));
memset(&mbox_sts, 0, sizeof(mbox_sts));
+
mbox_cmd[0] = MBOX_CMD_GET_FW_STATE;
- if (qla4xxx_mailbox_command(ha, 1, 4, &mbox_cmd[0], &mbox_sts[0]) !=
+
+ if (qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 4, &mbox_cmd[0], &mbox_sts[0]) !=
QLA_SUCCESS) {
DEBUG2(printk("scsi%ld: %s: MBOX_CMD_GET_FW_STATE failed w/ "
"status %04X\n", ha->host_no, __func__,
@@ -438,8 +375,10 @@ int qla4xxx_get_firmware_status(struct scsi_qla_host * ha)
/* Get firmware version */
memset(&mbox_cmd, 0, sizeof(mbox_cmd));
memset(&mbox_sts, 0, sizeof(mbox_sts));
+
mbox_cmd[0] = MBOX_CMD_GET_FW_STATUS;
- if (qla4xxx_mailbox_command(ha, 1, 3, &mbox_cmd[0], &mbox_sts[0]) !=
+
+ if (qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 3, &mbox_cmd[0], &mbox_sts[0]) !=
QLA_SUCCESS) {
DEBUG2(printk("scsi%ld: %s: MBOX_CMD_GET_FW_STATUS failed w/ "
"status %04X\n", ha->host_no, __func__,
@@ -491,11 +430,14 @@ int qla4xxx_get_fwddb_entry(struct scsi_qla_host *ha,
}
memset(&mbox_cmd, 0, sizeof(mbox_cmd));
memset(&mbox_sts, 0, sizeof(mbox_sts));
+
mbox_cmd[0] = MBOX_CMD_GET_DATABASE_ENTRY;
mbox_cmd[1] = (uint32_t) fw_ddb_index;
mbox_cmd[2] = LSDW(fw_ddb_entry_dma);
mbox_cmd[3] = MSDW(fw_ddb_entry_dma);
- if (qla4xxx_mailbox_command(ha, 4, 7, &mbox_cmd[0], &mbox_sts[0]) ==
+ mbox_cmd[4] = sizeof(struct dev_db_entry);
+
+ if (qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 7, &mbox_cmd[0], &mbox_sts[0]) ==
QLA_ERROR) {
DEBUG2(printk("scsi%ld: %s: MBOX_CMD_GET_DATABASE_ENTRY failed"
" with status 0x%04X\n", ha->host_no, __func__,
@@ -512,11 +454,11 @@ int qla4xxx_get_fwddb_entry(struct scsi_qla_host *ha,
dev_info(&ha->pdev->dev, "DDB[%d] MB0 %04x Tot %d Next %d "
"State %04x ConnErr %08x %d.%d.%d.%d:%04d \"%s\"\n",
fw_ddb_index, mbox_sts[0], mbox_sts[2], mbox_sts[3],
- mbox_sts[4], mbox_sts[5], fw_ddb_entry->ipAddr[0],
- fw_ddb_entry->ipAddr[1], fw_ddb_entry->ipAddr[2],
- fw_ddb_entry->ipAddr[3],
- le16_to_cpu(fw_ddb_entry->portNumber),
- fw_ddb_entry->iscsiName);
+ mbox_sts[4], mbox_sts[5], fw_ddb_entry->ip_addr[0],
+ fw_ddb_entry->ip_addr[1], fw_ddb_entry->ip_addr[2],
+ fw_ddb_entry->ip_addr[3],
+ le16_to_cpu(fw_ddb_entry->port),
+ fw_ddb_entry->iscsi_name);
}
if (num_valid_ddb_entries)
*num_valid_ddb_entries = mbox_sts[2];
@@ -571,35 +513,10 @@ int qla4xxx_set_ddb_entry(struct scsi_qla_host * ha, uint16_t fw_ddb_index,
mbox_cmd[1] = (uint32_t) fw_ddb_index;
mbox_cmd[2] = LSDW(fw_ddb_entry_dma);
mbox_cmd[3] = MSDW(fw_ddb_entry_dma);
- return qla4xxx_mailbox_command(ha, 4, 1, &mbox_cmd[0], &mbox_sts[0]);
-}
+ mbox_cmd[4] = sizeof(struct dev_db_entry);
-#if 0
-int qla4xxx_conn_open_session_login(struct scsi_qla_host * ha,
- uint16_t fw_ddb_index)
-{
- int status = QLA_ERROR;
- uint32_t mbox_cmd[MBOX_REG_COUNT];
- uint32_t mbox_sts[MBOX_REG_COUNT];
-
- /* Do not wait for completion. The firmware will send us an
- * ASTS_DATABASE_CHANGED (0x8014) to notify us of the login status.
- */
- memset(&mbox_cmd, 0, sizeof(mbox_cmd));
- memset(&mbox_sts, 0, sizeof(mbox_sts));
- mbox_cmd[0] = MBOX_CMD_CONN_OPEN_SESS_LOGIN;
- mbox_cmd[1] = (uint32_t) fw_ddb_index;
- mbox_cmd[2] = 0;
- mbox_cmd[3] = 0;
- mbox_cmd[4] = 0;
- status = qla4xxx_mailbox_command(ha, 4, 0, &mbox_cmd[0], &mbox_sts[0]);
- DEBUG2(printk("%s fw_ddb_index=%d status=%d mbx0_1=0x%x :0x%x\n",
- __func__, fw_ddb_index, status, mbox_sts[0],
- mbox_sts[1]);)
-
- return status;
+ return qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 1, &mbox_cmd[0], &mbox_sts[0]);
}
-#endif /* 0 */
/**
* qla4xxx_get_crash_record - retrieves crash record.
@@ -614,12 +531,14 @@ void qla4xxx_get_crash_record(struct scsi_qla_host * ha)
struct crash_record *crash_record = NULL;
dma_addr_t crash_record_dma = 0;
uint32_t crash_record_size = 0;
+
memset(&mbox_cmd, 0, sizeof(mbox_cmd));
memset(&mbox_sts, 0, sizeof(mbox_cmd));
/* Get size of crash record. */
mbox_cmd[0] = MBOX_CMD_GET_CRASH_RECORD;
- if (qla4xxx_mailbox_command(ha, 5, 5, &mbox_cmd[0], &mbox_sts[0]) !=
+
+ if (qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 5, &mbox_cmd[0], &mbox_sts[0]) !=
QLA_SUCCESS) {
DEBUG2(printk("scsi%ld: %s: ERROR: Unable to retrieve size!\n",
ha->host_no, __func__));
@@ -639,11 +558,15 @@ void qla4xxx_get_crash_record(struct scsi_qla_host * ha)
goto exit_get_crash_record;
/* Get Crash Record. */
+ memset(&mbox_cmd, 0, sizeof(mbox_cmd));
+ memset(&mbox_sts, 0, sizeof(mbox_cmd));
+
mbox_cmd[0] = MBOX_CMD_GET_CRASH_RECORD;
mbox_cmd[2] = LSDW(crash_record_dma);
mbox_cmd[3] = MSDW(crash_record_dma);
mbox_cmd[4] = crash_record_size;
- if (qla4xxx_mailbox_command(ha, 5, 5, &mbox_cmd[0], &mbox_sts[0]) !=
+
+ if (qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 5, &mbox_cmd[0], &mbox_sts[0]) !=
QLA_SUCCESS)
goto exit_get_crash_record;
@@ -655,7 +578,6 @@ exit_get_crash_record:
crash_record, crash_record_dma);
}
-#if 0
/**
* qla4xxx_get_conn_event_log - retrieves connection event log
* @ha: Pointer to host adapter structure.
@@ -678,7 +600,8 @@ void qla4xxx_get_conn_event_log(struct scsi_qla_host * ha)
/* Get size of crash record. */
mbox_cmd[0] = MBOX_CMD_GET_CONN_EVENT_LOG;
- if (qla4xxx_mailbox_command(ha, 4, 5, &mbox_cmd[0], &mbox_sts[0]) !=
+
+ if (qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 5, &mbox_cmd[0], &mbox_sts[0]) !=
QLA_SUCCESS)
goto exit_get_event_log;
@@ -693,10 +616,14 @@ void qla4xxx_get_conn_event_log(struct scsi_qla_host * ha)
goto exit_get_event_log;
/* Get Crash Record. */
+ memset(&mbox_cmd, 0, sizeof(mbox_cmd));
+ memset(&mbox_sts, 0, sizeof(mbox_cmd));
+
mbox_cmd[0] = MBOX_CMD_GET_CONN_EVENT_LOG;
mbox_cmd[2] = LSDW(event_log_dma);
mbox_cmd[3] = MSDW(event_log_dma);
- if (qla4xxx_mailbox_command(ha, 4, 5, &mbox_cmd[0], &mbox_sts[0]) !=
+
+ if (qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 5, &mbox_cmd[0], &mbox_sts[0]) !=
QLA_SUCCESS) {
DEBUG2(printk("scsi%ld: %s: ERROR: Unable to retrieve event "
"log!\n", ha->host_no, __func__));
@@ -745,7 +672,6 @@ exit_get_event_log:
dma_free_coherent(&ha->pdev->dev, event_log_size, event_log,
event_log_dma);
}
-#endif /* 0 */
/**
* qla4xxx_reset_lun - issues LUN Reset
@@ -773,11 +699,13 @@ int qla4xxx_reset_lun(struct scsi_qla_host * ha, struct ddb_entry * ddb_entry,
*/
memset(&mbox_cmd, 0, sizeof(mbox_cmd));
memset(&mbox_sts, 0, sizeof(mbox_sts));
+
mbox_cmd[0] = MBOX_CMD_LUN_RESET;
mbox_cmd[1] = ddb_entry->fw_ddb_index;
mbox_cmd[2] = lun << 8;
mbox_cmd[5] = 0x01; /* Immediate Command Enable */
- qla4xxx_mailbox_command(ha, 6, 1, &mbox_cmd[0], &mbox_sts[0]);
+
+ qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 1, &mbox_cmd[0], &mbox_sts[0]);
if (mbox_sts[0] != MBOX_STS_COMMAND_COMPLETE &&
mbox_sts[0] != MBOX_STS_COMMAND_ERROR)
status = QLA_ERROR;
@@ -794,12 +722,14 @@ int qla4xxx_get_flash(struct scsi_qla_host * ha, dma_addr_t dma_addr,
memset(&mbox_cmd, 0, sizeof(mbox_cmd));
memset(&mbox_sts, 0, sizeof(mbox_sts));
+
mbox_cmd[0] = MBOX_CMD_READ_FLASH;
mbox_cmd[1] = LSDW(dma_addr);
mbox_cmd[2] = MSDW(dma_addr);
mbox_cmd[3] = offset;
mbox_cmd[4] = len;
- if (qla4xxx_mailbox_command(ha, 5, 2, &mbox_cmd[0], &mbox_sts[0]) !=
+
+ if (qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 2, &mbox_cmd[0], &mbox_sts[0]) !=
QLA_SUCCESS) {
DEBUG2(printk("scsi%ld: %s: MBOX_CMD_READ_FLASH, failed w/ "
"status %04X %04X, offset %08x, len %08x\n", ha->host_no,
@@ -825,8 +755,10 @@ int qla4xxx_get_fw_version(struct scsi_qla_host * ha)
/* Get firmware version. */
memset(&mbox_cmd, 0, sizeof(mbox_cmd));
memset(&mbox_sts, 0, sizeof(mbox_sts));
+
mbox_cmd[0] = MBOX_CMD_ABOUT_FW;
- if (qla4xxx_mailbox_command(ha, 4, 5, &mbox_cmd[0], &mbox_sts[0]) !=
+
+ if (qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 5, &mbox_cmd[0], &mbox_sts[0]) !=
QLA_SUCCESS) {
DEBUG2(printk("scsi%ld: %s: MBOX_CMD_ABOUT_FW failed w/ "
"status %04X\n", ha->host_no, __func__, mbox_sts[0]));
@@ -855,7 +787,7 @@ static int qla4xxx_get_default_ddb(struct scsi_qla_host *ha,
mbox_cmd[2] = LSDW(dma_addr);
mbox_cmd[3] = MSDW(dma_addr);
- if (qla4xxx_mailbox_command(ha, 4, 1, &mbox_cmd[0], &mbox_sts[0]) !=
+ if (qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 1, &mbox_cmd[0], &mbox_sts[0]) !=
QLA_SUCCESS) {
DEBUG2(printk("scsi%ld: %s: failed status %04X\n",
ha->host_no, __func__, mbox_sts[0]));
@@ -875,7 +807,7 @@ static int qla4xxx_req_ddb_entry(struct scsi_qla_host *ha, uint32_t *ddb_index)
mbox_cmd[0] = MBOX_CMD_REQUEST_DATABASE_ENTRY;
mbox_cmd[1] = MAX_PRST_DEV_DB_ENTRIES;
- if (qla4xxx_mailbox_command(ha, 2, 3, &mbox_cmd[0], &mbox_sts[0]) !=
+ if (qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 3, &mbox_cmd[0], &mbox_sts[0]) !=
QLA_SUCCESS) {
if (mbox_sts[0] == MBOX_STS_COMMAND_ERROR) {
*ddb_index = mbox_sts[2];
@@ -918,23 +850,23 @@ int qla4xxx_send_tgts(struct scsi_qla_host *ha, char *ip, uint16_t port)
if (ret_val != QLA_SUCCESS)
goto qla4xxx_send_tgts_exit;
- memset((void *)fw_ddb_entry->iSCSIAlias, 0,
- sizeof(fw_ddb_entry->iSCSIAlias));
+ memset(fw_ddb_entry->iscsi_alias, 0,
+ sizeof(fw_ddb_entry->iscsi_alias));
- memset((void *)fw_ddb_entry->iscsiName, 0,
- sizeof(fw_ddb_entry->iscsiName));
+ memset(fw_ddb_entry->iscsi_name, 0,
+ sizeof(fw_ddb_entry->iscsi_name));
- memset((void *)fw_ddb_entry->ipAddr, 0, sizeof(fw_ddb_entry->ipAddr));
- memset((void *)fw_ddb_entry->targetAddr, 0,
- sizeof(fw_ddb_entry->targetAddr));
+ memset(fw_ddb_entry->ip_addr, 0, sizeof(fw_ddb_entry->ip_addr));
+ memset(fw_ddb_entry->tgt_addr, 0,
+ sizeof(fw_ddb_entry->tgt_addr));
fw_ddb_entry->options = (DDB_OPT_DISC_SESSION | DDB_OPT_TARGET);
- fw_ddb_entry->portNumber = cpu_to_le16(ntohs(port));
+ fw_ddb_entry->port = cpu_to_le16(ntohs(port));
- fw_ddb_entry->ipAddr[0] = *ip;
- fw_ddb_entry->ipAddr[1] = *(ip + 1);
- fw_ddb_entry->ipAddr[2] = *(ip + 2);
- fw_ddb_entry->ipAddr[3] = *(ip + 3);
+ fw_ddb_entry->ip_addr[0] = *ip;
+ fw_ddb_entry->ip_addr[1] = *(ip + 1);
+ fw_ddb_entry->ip_addr[2] = *(ip + 2);
+ fw_ddb_entry->ip_addr[3] = *(ip + 3);
ret_val = qla4xxx_set_ddb_entry(ha, ddb_index, fw_ddb_entry_dma);
diff --git a/drivers/scsi/qla4xxx/ql4_nvram.c b/drivers/scsi/qla4xxx/ql4_nvram.c
index 58afd135aa1..7fe0482ecf0 100644
--- a/drivers/scsi/qla4xxx/ql4_nvram.c
+++ b/drivers/scsi/qla4xxx/ql4_nvram.c
@@ -6,6 +6,9 @@
*/
#include "ql4_def.h"
+#include "ql4_glbl.h"
+#include "ql4_dbg.h"
+#include "ql4_inline.h"
static inline void eeprom_cmd(uint32_t cmd, struct scsi_qla_host *ha)
{
diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
index da21f5fbbf8..e69160a7bc6 100644
--- a/drivers/scsi/qla4xxx/ql4_os.c
+++ b/drivers/scsi/qla4xxx/ql4_os.c
@@ -10,6 +10,10 @@
#include <scsi/scsicam.h>
#include "ql4_def.h"
+#include "ql4_version.h"
+#include "ql4_glbl.h"
+#include "ql4_dbg.h"
+#include "ql4_inline.h"
/*
* Driver version
@@ -50,12 +54,15 @@ static void qla4xxx_config_dma_addressing(struct scsi_qla_host *ha);
/*
* iSCSI template entry points
*/
-static int qla4xxx_tgt_dscvr(enum iscsi_tgt_dscvr type, uint32_t host_no,
- uint32_t enable, struct sockaddr *dst_addr);
+static int qla4xxx_tgt_dscvr(struct Scsi_Host *shost,
+ enum iscsi_tgt_dscvr type, uint32_t enable,
+ struct sockaddr *dst_addr);
static int qla4xxx_conn_get_param(struct iscsi_cls_conn *conn,
enum iscsi_param param, char *buf);
static int qla4xxx_sess_get_param(struct iscsi_cls_session *sess,
enum iscsi_param param, char *buf);
+static int qla4xxx_host_get_param(struct Scsi_Host *shost,
+ enum iscsi_host_param param, char *buf);
static void qla4xxx_conn_stop(struct iscsi_cls_conn *conn, int flag);
static int qla4xxx_conn_start(struct iscsi_cls_conn *conn);
static void qla4xxx_recovery_timedout(struct iscsi_cls_session *session);
@@ -95,16 +102,20 @@ static struct scsi_host_template qla4xxx_driver_template = {
static struct iscsi_transport qla4xxx_iscsi_transport = {
.owner = THIS_MODULE,
.name = DRIVER_NAME,
- .param_mask = ISCSI_CONN_PORT |
- ISCSI_CONN_ADDRESS |
- ISCSI_TARGET_NAME |
- ISCSI_TPGT,
+ .caps = CAP_FW_DB | CAP_SENDTARGETS_OFFLOAD |
+ CAP_DATA_PATH_OFFLOAD,
+ .param_mask = ISCSI_CONN_PORT | ISCSI_CONN_ADDRESS |
+ ISCSI_TARGET_NAME | ISCSI_TPGT,
+ .host_param_mask = ISCSI_HOST_HWADDRESS |
+ ISCSI_HOST_IPADDRESS |
+ ISCSI_HOST_INITIATOR_NAME,
.sessiondata_size = sizeof(struct ddb_entry),
.host_template = &qla4xxx_driver_template,
.tgt_dscvr = qla4xxx_tgt_dscvr,
.get_conn_param = qla4xxx_conn_get_param,
.get_session_param = qla4xxx_sess_get_param,
+ .get_host_param = qla4xxx_host_get_param,
.start_conn = qla4xxx_conn_start,
.stop_conn = qla4xxx_conn_stop,
.session_recovery_timedout = qla4xxx_recovery_timedout,
@@ -161,6 +172,43 @@ static void qla4xxx_conn_stop(struct iscsi_cls_conn *conn, int flag)
printk(KERN_ERR "iscsi: invalid stop flag %d\n", flag);
}
+static ssize_t format_addr(char *buf, const unsigned char *addr, int len)
+{
+ int i;
+ char *cp = buf;
+
+ for (i = 0; i < len; i++)
+ cp += sprintf(cp, "%02x%c", addr[i],
+ i == (len - 1) ? '\n' : ':');
+ return cp - buf;
+}
+
+
+static int qla4xxx_host_get_param(struct Scsi_Host *shost,
+ enum iscsi_host_param param, char *buf)
+{
+ struct scsi_qla_host *ha = to_qla_host(shost);
+ int len;
+
+ switch (param) {
+ case ISCSI_HOST_PARAM_HWADDRESS:
+ len = format_addr(buf, ha->my_mac, MAC_ADDR_LEN);
+ break;
+ case ISCSI_HOST_PARAM_IPADDRESS:
+ len = sprintf(buf, "%d.%d.%d.%d\n", ha->ip_address[0],
+ ha->ip_address[1], ha->ip_address[2],
+ ha->ip_address[3]);
+ break;
+ case ISCSI_HOST_PARAM_INITIATOR_NAME:
+ len = sprintf(buf, "%s\n", ha->name_string);
+ break;
+ default:
+ return -ENOSYS;
+ }
+
+ return len;
+}
+
static int qla4xxx_sess_get_param(struct iscsi_cls_session *sess,
enum iscsi_param param, char *buf)
{
@@ -208,21 +256,15 @@ static int qla4xxx_conn_get_param(struct iscsi_cls_conn *conn,
return len;
}
-static int qla4xxx_tgt_dscvr(enum iscsi_tgt_dscvr type, uint32_t host_no,
- uint32_t enable, struct sockaddr *dst_addr)
+static int qla4xxx_tgt_dscvr(struct Scsi_Host *shost,
+ enum iscsi_tgt_dscvr type, uint32_t enable,
+ struct sockaddr *dst_addr)
{
struct scsi_qla_host *ha;
- struct Scsi_Host *shost;
struct sockaddr_in *addr;
struct sockaddr_in6 *addr6;
int ret = 0;
- shost = scsi_host_lookup(host_no);
- if (IS_ERR(shost)) {
- printk(KERN_ERR "Could not find host no %u\n", host_no);
- return -ENODEV;
- }
-
ha = (struct scsi_qla_host *) shost->hostdata;
switch (type) {
@@ -246,8 +288,6 @@ static int qla4xxx_tgt_dscvr(enum iscsi_tgt_dscvr type, uint32_t host_no,
default:
ret = -ENOSYS;
}
-
- scsi_host_put(shost);
return ret;
}
@@ -369,14 +409,7 @@ static void qla4xxx_srb_free_dma(struct scsi_qla_host *ha, struct srb *srb)
struct scsi_cmnd *cmd = srb->cmd;
if (srb->flags & SRB_DMA_VALID) {
- if (cmd->use_sg) {
- pci_unmap_sg(ha->pdev, cmd->request_buffer,
- cmd->use_sg, cmd->sc_data_direction);
- } else if (cmd->request_bufflen) {
- pci_unmap_single(ha->pdev, srb->dma_handle,
- cmd->request_bufflen,
- cmd->sc_data_direction);
- }
+ scsi_dma_unmap(cmd);
srb->flags &= ~SRB_DMA_VALID;
}
cmd->SCp.ptr = NULL;
@@ -711,7 +744,7 @@ static int qla4xxx_cmd_wait(struct scsi_qla_host *ha)
return stat;
}
-static void qla4xxx_hw_reset(struct scsi_qla_host *ha)
+void qla4xxx_hw_reset(struct scsi_qla_host *ha)
{
uint32_t ctrl_status;
unsigned long flags = 0;
@@ -1081,13 +1114,13 @@ static void qla4xxx_free_adapter(struct scsi_qla_host *ha)
if (ha->timer_active)
qla4xxx_stop_timer(ha);
- /* free extra memory */
- qla4xxx_mem_free(ha);
-
/* Detach interrupts */
if (test_and_clear_bit(AF_IRQ_ATTACHED, &ha->flags))
free_irq(ha->pdev->irq, ha);
+ /* free extra memory */
+ qla4xxx_mem_free(ha);
+
pci_disable_device(ha->pdev);
}
@@ -1332,6 +1365,11 @@ static void __devexit qla4xxx_remove_adapter(struct pci_dev *pdev)
ha = pci_get_drvdata(pdev);
+ qla4xxx_disable_intrs(ha);
+
+ while (test_bit(DPC_RESET_HA_INTR, &ha->dpc_flags))
+ ssleep(1);
+
/* remove devs from iscsi_sessions to scsi_devices */
qla4xxx_free_ddb_list(ha);
diff --git a/drivers/scsi/qla4xxx/ql4_version.h b/drivers/scsi/qla4xxx/ql4_version.h
index e5183a697d1..ab984cb89ce 100644
--- a/drivers/scsi/qla4xxx/ql4_version.h
+++ b/drivers/scsi/qla4xxx/ql4_version.h
@@ -5,4 +5,5 @@
* See LICENSE.qla4xxx for copyright and licensing details.
*/
-#define QLA4XXX_DRIVER_VERSION "5.00.07-k1"
+#define QLA4XXX_DRIVER_VERSION "5.01.00-k8"
+
diff --git a/drivers/scsi/qlogicfas408.c b/drivers/scsi/qlogicfas408.c
index 2e7db18f5ae..2bfbf26c00e 100644
--- a/drivers/scsi/qlogicfas408.c
+++ b/drivers/scsi/qlogicfas408.c
@@ -265,8 +265,6 @@ static unsigned int ql_pcmd(struct scsi_cmnd *cmd)
unsigned int message; /* scsi returned message */
unsigned int phase; /* recorded scsi phase */
unsigned int reqlen; /* total length of transfer */
- struct scatterlist *sglist; /* scatter-gather list pointer */
- unsigned int sgcount; /* sg counter */
char *buf;
struct qlogicfas408_priv *priv = get_priv_by_cmd(cmd);
int qbase = priv->qbase;
@@ -301,9 +299,10 @@ static unsigned int ql_pcmd(struct scsi_cmnd *cmd)
if (inb(qbase + 7) & 0x1f) /* if some bytes in fifo */
outb(1, qbase + 3); /* clear fifo */
/* note that request_bufflen is the total xfer size when sg is used */
- reqlen = cmd->request_bufflen;
+ reqlen = scsi_bufflen(cmd);
/* note that it won't work if transfers > 16M are requested */
if (reqlen && !((phase = inb(qbase + 4)) & 6)) { /* data phase */
+ struct scatterlist *sg;
rtrc(2)
outb(reqlen, qbase); /* low-mid xfer cnt */
outb(reqlen >> 8, qbase + 1); /* low-mid xfer cnt */
@@ -311,23 +310,16 @@ static unsigned int ql_pcmd(struct scsi_cmnd *cmd)
outb(0x90, qbase + 3); /* command do xfer */
/* PIO pseudo DMA to buffer or sglist */
REG1;
- if (!cmd->use_sg)
- ql_pdma(priv, phase, cmd->request_buffer,
- cmd->request_bufflen);
- else {
- sgcount = cmd->use_sg;
- sglist = cmd->request_buffer;
- while (sgcount--) {
- if (priv->qabort) {
- REG0;
- return ((priv->qabort == 1 ?
- DID_ABORT : DID_RESET) << 16);
- }
- buf = page_address(sglist->page) + sglist->offset;
- if (ql_pdma(priv, phase, buf, sglist->length))
- break;
- sglist++;
+
+ scsi_for_each_sg(cmd, sg, scsi_sg_count(cmd), i) {
+ if (priv->qabort) {
+ REG0;
+ return ((priv->qabort == 1 ?
+ DID_ABORT : DID_RESET) << 16);
}
+ buf = page_address(sg->page) + sg->offset;
+ if (ql_pdma(priv, phase, buf, sg->length))
+ break;
}
REG0;
rtrc(2)
diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
index 4c1e3133476..a691dda40d2 100644
--- a/drivers/scsi/scsi.c
+++ b/drivers/scsi/scsi.c
@@ -368,7 +368,7 @@ void scsi_log_send(struct scsi_cmnd *cmd)
if (level > 3) {
printk(KERN_INFO "buffer = 0x%p, bufflen = %d,"
" done = 0x%p, queuecommand 0x%p\n",
- cmd->request_buffer, cmd->request_bufflen,
+ scsi_sglist(cmd), scsi_bufflen(cmd),
cmd->done,
cmd->device->host->hostt->queuecommand);
@@ -1016,52 +1016,6 @@ struct scsi_device *scsi_device_lookup(struct Scsi_Host *shost,
}
EXPORT_SYMBOL(scsi_device_lookup);
-/**
- * scsi_device_cancel - cancel outstanding IO to this device
- * @sdev: Pointer to struct scsi_device
- * @recovery: Boolean instructing function to recover device or not.
- *
- **/
-int scsi_device_cancel(struct scsi_device *sdev, int recovery)
-{
- struct scsi_cmnd *scmd;
- LIST_HEAD(active_list);
- struct list_head *lh, *lh_sf;
- unsigned long flags;
-
- scsi_device_set_state(sdev, SDEV_CANCEL);
-
- spin_lock_irqsave(&sdev->list_lock, flags);
- list_for_each_entry(scmd, &sdev->cmd_list, list) {
- if (scmd->request) {
- /*
- * If we are unable to remove the timer, it means
- * that the command has already timed out or
- * finished.
- */
- if (!scsi_delete_timer(scmd))
- continue;
- list_add_tail(&scmd->eh_entry, &active_list);
- }
- }
- spin_unlock_irqrestore(&sdev->list_lock, flags);
-
- if (!list_empty(&active_list)) {
- list_for_each_safe(lh, lh_sf, &active_list) {
- scmd = list_entry(lh, struct scsi_cmnd, eh_entry);
- list_del_init(lh);
- if (recovery &&
- !scsi_eh_scmd_add(scmd, SCSI_EH_CANCEL_CMD)) {
- scmd->result = (DID_ABORT << 16);
- scsi_finish_command(scmd);
- }
- }
- }
-
- return 0;
-}
-EXPORT_SYMBOL(scsi_device_cancel);
-
MODULE_DESCRIPTION("SCSI core");
MODULE_LICENSE("GPL");
diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c
index 06229f225ee..4cd9c58efef 100644
--- a/drivers/scsi/scsi_debug.c
+++ b/drivers/scsi/scsi_debug.c
@@ -2405,7 +2405,7 @@ MODULE_PARM_DESC(add_host, "0..127 hosts allowed(def=1)");
MODULE_PARM_DESC(delay, "# of jiffies to delay response(def=1)");
MODULE_PARM_DESC(dev_size_mb, "size in MB of ram shared by devs(def=8)");
MODULE_PARM_DESC(dsense, "use descriptor sense format(def=0 -> fixed)");
-MODULE_PARM_DESC(every_nth, "timeout every nth command(def=100)");
+MODULE_PARM_DESC(every_nth, "timeout every nth command(def=0)");
MODULE_PARM_DESC(fake_rw, "fake reads/writes instead of copying (def=0)");
MODULE_PARM_DESC(max_luns, "number of LUNs per target to simulate(def=1)");
MODULE_PARM_DESC(no_lun_0, "no LU number 0 (def=0 -> have lun 0)");
diff --git a/drivers/scsi/scsi_devinfo.c b/drivers/scsi/scsi_devinfo.c
index 18dd5cc4d7c..19c44f0781f 100644
--- a/drivers/scsi/scsi_devinfo.c
+++ b/drivers/scsi/scsi_devinfo.c
@@ -128,6 +128,7 @@ static struct {
{"ADAPTEC", "Adaptec 5400S", NULL, BLIST_FORCELUN},
{"AFT PRO", "-IX CF", "0.0>", BLIST_FORCELUN},
{"BELKIN", "USB 2 HS-CF", "1.95", BLIST_FORCELUN | BLIST_INQUIRY_36},
+ {"BROWNIE", "1200U3P", NULL, BLIST_NOREPORTLUN},
{"BROWNIE", "1600U3P", NULL, BLIST_NOREPORTLUN},
{"CANON", "IPUBJD", NULL, BLIST_SPARSELUN},
{"CBOX3", "USB Storage-SMC", "300A", BLIST_FORCELUN | BLIST_INQUIRY_36},
diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
index e8350c562d2..9adb64ac054 100644
--- a/drivers/scsi/scsi_error.c
+++ b/drivers/scsi/scsi_error.c
@@ -18,12 +18,12 @@
#include <linux/sched.h>
#include <linux/timer.h>
#include <linux/string.h>
-#include <linux/slab.h>
#include <linux/kernel.h>
#include <linux/kthread.h>
#include <linux/interrupt.h>
#include <linux/blkdev.h>
#include <linux/delay.h>
+#include <linux/scatterlist.h>
#include <scsi/scsi.h>
#include <scsi/scsi_cmnd.h>
@@ -640,16 +640,8 @@ static int scsi_send_eh_cmnd(struct scsi_cmnd *scmd, unsigned char *cmnd,
memcpy(scmd->cmnd, cmnd, cmnd_size);
if (copy_sense) {
- gfp_t gfp_mask = GFP_ATOMIC;
-
- if (shost->hostt->unchecked_isa_dma)
- gfp_mask |= __GFP_DMA;
-
- sgl.page = alloc_page(gfp_mask);
- if (!sgl.page)
- return FAILED;
- sgl.offset = 0;
- sgl.length = 252;
+ sg_init_one(&sgl, scmd->sense_buffer,
+ sizeof(scmd->sense_buffer));
scmd->sc_data_direction = DMA_FROM_DEVICE;
scmd->request_bufflen = sgl.length;
@@ -720,18 +712,6 @@ static int scsi_send_eh_cmnd(struct scsi_cmnd *scmd, unsigned char *cmnd,
/*
- * Last chance to have valid sense data.
- */
- if (copy_sense) {
- if (!SCSI_SENSE_VALID(scmd)) {
- memcpy(scmd->sense_buffer, page_address(sgl.page),
- sizeof(scmd->sense_buffer));
- }
- __free_page(sgl.page);
- }
-
-
- /*
* Restore original data
*/
scmd->request_buffer = old_buffer;
diff --git a/drivers/scsi/scsi_lib_dma.c b/drivers/scsi/scsi_lib_dma.c
new file mode 100644
index 00000000000..ac6855cd265
--- /dev/null
+++ b/drivers/scsi/scsi_lib_dma.c
@@ -0,0 +1,50 @@
+/*
+ * SCSI library functions depending on DMA
+ */
+
+#include <linux/blkdev.h>
+#include <linux/device.h>
+#include <linux/kernel.h>
+
+#include <scsi/scsi.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_host.h>
+
+/**
+ * scsi_dma_map - perform DMA mapping against command's sg lists
+ * @cmd: scsi command
+ *
+ * Returns the number of sg lists actually used, zero if the sg lists
+ * is NULL, or -ENOMEM if the mapping failed.
+ */
+int scsi_dma_map(struct scsi_cmnd *cmd)
+{
+ int nseg = 0;
+
+ if (scsi_sg_count(cmd)) {
+ struct device *dev = cmd->device->host->shost_gendev.parent;
+
+ nseg = dma_map_sg(dev, scsi_sglist(cmd), scsi_sg_count(cmd),
+ cmd->sc_data_direction);
+ if (unlikely(!nseg))
+ return -ENOMEM;
+ }
+ return nseg;
+}
+EXPORT_SYMBOL(scsi_dma_map);
+
+/**
+ * scsi_dma_unmap - unmap command's sg lists mapped by scsi_dma_map
+ * @cmd: scsi command
+ */
+void scsi_dma_unmap(struct scsi_cmnd *cmd)
+{
+ if (scsi_sg_count(cmd)) {
+ struct device *dev = cmd->device->host->shost_gendev.parent;
+
+ dma_unmap_sg(dev, scsi_sglist(cmd), scsi_sg_count(cmd),
+ cmd->sc_data_direction);
+ }
+}
+EXPORT_SYMBOL(scsi_dma_unmap);
diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c
index 662577fbe7a..a86e62f4b3b 100644
--- a/drivers/scsi/scsi_scan.c
+++ b/drivers/scsi/scsi_scan.c
@@ -703,16 +703,14 @@ static int scsi_probe_lun(struct scsi_device *sdev, unsigned char *inq_result,
/**
* scsi_add_lun - allocate and fully initialze a scsi_device
- * @sdevscan: holds information to be stored in the new scsi_device
- * @sdevnew: store the address of the newly allocated scsi_device
+ * @sdev: holds information to be stored in the new scsi_device
* @inq_result: holds the result of a previous INQUIRY to the LUN
* @bflags: black/white list flag
+ * @async: 1 if this device is being scanned asynchronously
*
* Description:
- * Allocate and initialize a scsi_device matching sdevscan. Optionally
- * set fields based on values in *@bflags. If @sdevnew is not
- * NULL, store the address of the new scsi_device in *@sdevnew (needed
- * when scanning a particular LUN).
+ * Initialize the scsi_device @sdev. Optionally set fields based
+ * on values in *@bflags.
*
* Return:
* SCSI_SCAN_NO_RESPONSE: could not allocate or setup a scsi_device
@@ -752,25 +750,15 @@ static int scsi_add_lun(struct scsi_device *sdev, unsigned char *inq_result,
sdev->rev = (char *) (sdev->inquiry + 32);
if (*bflags & BLIST_ISROM) {
- /*
- * It would be better to modify sdev->type, and set
- * sdev->removable; this can now be done since
- * print_inquiry has gone away.
- */
- inq_result[0] = TYPE_ROM;
- inq_result[1] |= 0x80; /* removable */
- } else if (*bflags & BLIST_NO_ULD_ATTACH)
- sdev->no_uld_attach = 1;
+ sdev->type = TYPE_ROM;
+ sdev->removable = 1;
+ } else {
+ sdev->type = (inq_result[0] & 0x1f);
+ sdev->removable = (inq_result[1] & 0x80) >> 7;
+ }
- switch (sdev->type = (inq_result[0] & 0x1f)) {
+ switch (sdev->type) {
case TYPE_RBC:
- /* RBC devices can return SCSI-3 compliance and yet
- * still not support REPORT LUNS, so make them act as
- * BLIST_NOREPORTLUN unless BLIST_REPORTLUN2 is
- * specifically set */
- if ((*bflags & BLIST_REPORTLUN2) == 0)
- *bflags |= BLIST_NOREPORTLUN;
- /* fall through */
case TYPE_TAPE:
case TYPE_DISK:
case TYPE_PRINTER:
@@ -784,13 +772,6 @@ static int scsi_add_lun(struct scsi_device *sdev, unsigned char *inq_result,
sdev->writeable = 1;
break;
case TYPE_ROM:
- /* MMC devices can return SCSI-3 compliance and yet
- * still not support REPORT LUNS, so make them act as
- * BLIST_NOREPORTLUN unless BLIST_REPORTLUN2 is
- * specifically set */
- if ((*bflags & BLIST_REPORTLUN2) == 0)
- *bflags |= BLIST_NOREPORTLUN;
- /* fall through */
case TYPE_WORM:
sdev->writeable = 0;
break;
@@ -798,6 +779,15 @@ static int scsi_add_lun(struct scsi_device *sdev, unsigned char *inq_result,
printk(KERN_INFO "scsi: unknown device type %d\n", sdev->type);
}
+ if (sdev->type == TYPE_RBC || sdev->type == TYPE_ROM) {
+ /* RBC and MMC devices can return SCSI-3 compliance and yet
+ * still not support REPORT LUNS, so make them act as
+ * BLIST_NOREPORTLUN unless BLIST_REPORTLUN2 is
+ * specifically set */
+ if ((*bflags & BLIST_REPORTLUN2) == 0)
+ *bflags |= BLIST_NOREPORTLUN;
+ }
+
/*
* For a peripheral qualifier (PQ) value of 1 (001b), the SCSI
* spec says: The device server is capable of supporting the
@@ -815,12 +805,11 @@ static int scsi_add_lun(struct scsi_device *sdev, unsigned char *inq_result,
*/
sdev->inq_periph_qual = (inq_result[0] >> 5) & 7;
- sdev->removable = (0x80 & inq_result[1]) >> 7;
sdev->lockable = sdev->removable;
sdev->soft_reset = (inq_result[7] & 1) && ((inq_result[3] & 7) == 2);
- if (sdev->scsi_level >= SCSI_3 || (sdev->inquiry_len > 56 &&
- inq_result[56] & 0x04))
+ if (sdev->scsi_level >= SCSI_3 ||
+ (sdev->inquiry_len > 56 && inq_result[56] & 0x04))
sdev->ppr = 1;
if (inq_result[7] & 0x60)
sdev->wdtr = 1;
@@ -833,13 +822,10 @@ static int scsi_add_lun(struct scsi_device *sdev, unsigned char *inq_result,
sdev->inq_periph_qual, inq_result[2] & 0x07,
(inq_result[3] & 0x0f) == 1 ? " CCS" : "");
- /*
- * End sysfs code.
- */
-
if ((sdev->scsi_level >= SCSI_2) && (inq_result[7] & 2) &&
!(*bflags & BLIST_NOTQ))
sdev->tagged_supported = 1;
+
/*
* Some devices (Texel CD ROM drives) have handshaking problems
* when used with the Seagate controllers. borken is initialized
@@ -848,6 +834,9 @@ static int scsi_add_lun(struct scsi_device *sdev, unsigned char *inq_result,
if ((*bflags & BLIST_BORKEN) == 0)
sdev->borken = 0;
+ if (*bflags & BLIST_NO_ULD_ATTACH)
+ sdev->no_uld_attach = 1;
+
/*
* Apparently some really broken devices (contrary to the SCSI
* standards) need to be selected without asserting ATN
@@ -872,7 +861,6 @@ static int scsi_add_lun(struct scsi_device *sdev, unsigned char *inq_result,
if (*bflags & BLIST_SINGLELUN)
sdev->single_lun = 1;
-
sdev->use_10_for_rw = 1;
if (*bflags & BLIST_MS_SKIP_PAGE_08)
@@ -1213,7 +1201,7 @@ static void scsi_sequential_lun_scan(struct scsi_target *starget,
* Given a struct scsi_lun of: 0a 04 0b 03 00 00 00 00, this function returns
* the integer: 0x0b030a04
**/
-static int scsilun_to_int(struct scsi_lun *scsilun)
+int scsilun_to_int(struct scsi_lun *scsilun)
{
int i;
unsigned int lun;
@@ -1224,6 +1212,7 @@ static int scsilun_to_int(struct scsi_lun *scsilun)
scsilun->scsi_lun[i + 1]) << (i * 8));
return lun;
}
+EXPORT_SYMBOL(scsilun_to_int);
/**
* int_to_scsilun: reverts an int into a scsi_lun
diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
index 67a38a1409b..ed720863ab9 100644
--- a/drivers/scsi/scsi_sysfs.c
+++ b/drivers/scsi/scsi_sysfs.c
@@ -293,30 +293,18 @@ static int scsi_bus_suspend(struct device * dev, pm_message_t state)
{
struct device_driver *drv = dev->driver;
struct scsi_device *sdev = to_scsi_device(dev);
- struct scsi_host_template *sht = sdev->host->hostt;
int err;
err = scsi_device_quiesce(sdev);
if (err)
return err;
- /* call HLD suspend first */
if (drv && drv->suspend) {
err = drv->suspend(dev, state);
if (err)
return err;
}
- /* then, call host suspend */
- if (sht->suspend) {
- err = sht->suspend(sdev, state);
- if (err) {
- if (drv && drv->resume)
- drv->resume(dev);
- return err;
- }
- }
-
return 0;
}
@@ -324,21 +312,14 @@ static int scsi_bus_resume(struct device * dev)
{
struct device_driver *drv = dev->driver;
struct scsi_device *sdev = to_scsi_device(dev);
- struct scsi_host_template *sht = sdev->host->hostt;
- int err = 0, err2 = 0;
-
- /* call host resume first */
- if (sht->resume)
- err = sht->resume(sdev);
+ int err = 0;
- /* then, call HLD resume */
if (drv && drv->resume)
- err2 = drv->resume(dev);
+ err = drv->resume(dev);
scsi_device_resume(sdev);
- /* favor LLD failure */
- return err ? err : err2;;
+ return err;
}
struct bus_type scsi_bus_type = {
diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c
index b4d1ece46f7..e8825709797 100644
--- a/drivers/scsi/scsi_transport_fc.c
+++ b/drivers/scsi/scsi_transport_fc.c
@@ -1,4 +1,4 @@
-/*
+/*
* FiberChannel transport specific attributes exported to sysfs.
*
* Copyright (c) 2003 Silicon Graphics, Inc. All rights reserved.
@@ -19,9 +19,10 @@
*
* ========
*
- * Copyright (C) 2004-2005 James Smart, Emulex Corporation
+ * Copyright (C) 2004-2007 James Smart, Emulex Corporation
* Rewrite for host, target, device, and remote port attributes,
* statistics, and service functions...
+ * Add vports, etc
*
*/
#include <linux/module.h>
@@ -37,6 +38,34 @@
#include "scsi_priv.h"
static int fc_queue_work(struct Scsi_Host *, struct work_struct *);
+static void fc_vport_sched_delete(struct work_struct *work);
+
+/*
+ * This is a temporary carrier for creating a vport. It will eventually
+ * be replaced by a real message definition for sgio or netlink.
+ *
+ * fc_vport_identifiers: This set of data contains all elements
+ * to uniquely identify and instantiate a FC virtual port.
+ *
+ * Notes:
+ * symbolic_name: The driver is to append the symbolic_name string data
+ * to the symbolic_node_name data that it generates by default.
+ * the resulting combination should then be registered with the switch.
+ * It is expected that things like Xen may stuff a VM title into
+ * this field.
+ */
+struct fc_vport_identifiers {
+ u64 node_name;
+ u64 port_name;
+ u32 roles;
+ bool disable;
+ enum fc_port_type vport_type; /* only FC_PORTTYPE_NPIV allowed */
+ char symbolic_name[FC_VPORT_SYMBOLIC_NAMELEN];
+};
+
+static int fc_vport_create(struct Scsi_Host *shost, int channel,
+ struct device *pdev, struct fc_vport_identifiers *ids,
+ struct fc_vport **vport);
/*
* Redefine so that we can have same named attributes in the
@@ -90,10 +119,14 @@ static struct {
{ FC_PORTTYPE_NLPORT, "NLPort (fabric via loop)" },
{ FC_PORTTYPE_LPORT, "LPort (private loop)" },
{ FC_PORTTYPE_PTP, "Point-To-Point (direct nport connection" },
+ { FC_PORTTYPE_NPIV, "NPIV VPORT" },
};
fc_enum_name_search(port_type, fc_port_type, fc_port_type_names)
#define FC_PORTTYPE_MAX_NAMELEN 50
+/* Reuse fc_port_type enum function for vport_type */
+#define get_fc_vport_type_name get_fc_port_type_name
+
/* Convert fc_host_event_code values to ascii string name */
static const struct {
@@ -139,6 +172,29 @@ fc_enum_name_search(port_state, fc_port_state, fc_port_state_names)
#define FC_PORTSTATE_MAX_NAMELEN 20
+/* Convert fc_vport_state values to ascii string name */
+static struct {
+ enum fc_vport_state value;
+ char *name;
+} fc_vport_state_names[] = {
+ { FC_VPORT_UNKNOWN, "Unknown" },
+ { FC_VPORT_ACTIVE, "Active" },
+ { FC_VPORT_DISABLED, "Disabled" },
+ { FC_VPORT_LINKDOWN, "Linkdown" },
+ { FC_VPORT_INITIALIZING, "Initializing" },
+ { FC_VPORT_NO_FABRIC_SUPP, "No Fabric Support" },
+ { FC_VPORT_NO_FABRIC_RSCS, "No Fabric Resources" },
+ { FC_VPORT_FABRIC_LOGOUT, "Fabric Logout" },
+ { FC_VPORT_FABRIC_REJ_WWN, "Fabric Rejected WWN" },
+ { FC_VPORT_FAILED, "VPort Failed" },
+};
+fc_enum_name_search(vport_state, fc_vport_state, fc_vport_state_names)
+#define FC_VPORTSTATE_MAX_NAMELEN 24
+
+/* Reuse fc_vport_state enum function for vport_last_state */
+#define get_fc_vport_last_state_name get_fc_vport_state_name
+
+
/* Convert fc_tgtid_binding_type values to ascii string name */
static const struct {
enum fc_tgtid_binding_type value;
@@ -219,16 +275,16 @@ show_fc_fc4s (char *buf, u8 *fc4_list)
}
-/* Convert FC_RPORT_ROLE bit values to ascii string name */
+/* Convert FC_PORT_ROLE bit values to ascii string name */
static const struct {
u32 value;
char *name;
-} fc_remote_port_role_names[] = {
- { FC_RPORT_ROLE_FCP_TARGET, "FCP Target" },
- { FC_RPORT_ROLE_FCP_INITIATOR, "FCP Initiator" },
- { FC_RPORT_ROLE_IP_PORT, "IP Port" },
+} fc_port_role_names[] = {
+ { FC_PORT_ROLE_FCP_TARGET, "FCP Target" },
+ { FC_PORT_ROLE_FCP_INITIATOR, "FCP Initiator" },
+ { FC_PORT_ROLE_IP_PORT, "IP Port" },
};
-fc_bitfield_name_search(remote_port_roles, fc_remote_port_role_names)
+fc_bitfield_name_search(port_roles, fc_port_role_names)
/*
* Define roles that are specific to port_id. Values are relative to ROLE_MASK.
@@ -252,7 +308,8 @@ static void fc_scsi_scan_rport(struct work_struct *work);
*/
#define FC_STARGET_NUM_ATTRS 3
#define FC_RPORT_NUM_ATTRS 10
-#define FC_HOST_NUM_ATTRS 17
+#define FC_VPORT_NUM_ATTRS 9
+#define FC_HOST_NUM_ATTRS 21
struct fc_internal {
struct scsi_transport_template t;
@@ -278,6 +335,10 @@ struct fc_internal {
struct transport_container rport_attr_cont;
struct class_device_attribute private_rport_attrs[FC_RPORT_NUM_ATTRS];
struct class_device_attribute *rport_attrs[FC_RPORT_NUM_ATTRS + 1];
+
+ struct transport_container vport_attr_cont;
+ struct class_device_attribute private_vport_attrs[FC_VPORT_NUM_ATTRS];
+ struct class_device_attribute *vport_attrs[FC_VPORT_NUM_ATTRS + 1];
};
#define to_fc_internal(tmpl) container_of(tmpl, struct fc_internal, t)
@@ -318,7 +379,7 @@ static int fc_host_setup(struct transport_container *tc, struct device *dev,
struct Scsi_Host *shost = dev_to_shost(dev);
struct fc_host_attrs *fc_host = shost_to_fc_host(shost);
- /*
+ /*
* Set default values easily detected by the midlayer as
* failure cases. The scsi lldd is responsible for initializing
* all transport attributes to valid values per host.
@@ -331,6 +392,7 @@ static int fc_host_setup(struct transport_container *tc, struct device *dev,
sizeof(fc_host->supported_fc4s));
fc_host->supported_speeds = FC_PORTSPEED_UNKNOWN;
fc_host->maxframe_size = -1;
+ fc_host->max_npiv_vports = 0;
memset(fc_host->serial_number, 0,
sizeof(fc_host->serial_number));
@@ -348,8 +410,11 @@ static int fc_host_setup(struct transport_container *tc, struct device *dev,
INIT_LIST_HEAD(&fc_host->rports);
INIT_LIST_HEAD(&fc_host->rport_bindings);
+ INIT_LIST_HEAD(&fc_host->vports);
fc_host->next_rport_number = 0;
fc_host->next_target_id = 0;
+ fc_host->next_vport_number = 0;
+ fc_host->npiv_vports_inuse = 0;
snprintf(fc_host->work_q_name, KOBJ_NAME_LEN, "fc_wq_%d",
shost->host_no);
@@ -388,6 +453,16 @@ static DECLARE_TRANSPORT_CLASS(fc_rport_class,
NULL);
/*
+ * Setup and Remove actions for virtual ports are handled
+ * in the service functions below.
+ */
+static DECLARE_TRANSPORT_CLASS(fc_vport_class,
+ "fc_vports",
+ NULL,
+ NULL,
+ NULL);
+
+/*
* Module Parameters
*/
@@ -585,6 +660,9 @@ static __init int fc_transport_init(void)
error = transport_class_register(&fc_host_class);
if (error)
return error;
+ error = transport_class_register(&fc_vport_class);
+ if (error)
+ return error;
error = transport_class_register(&fc_rport_class);
if (error)
return error;
@@ -596,6 +674,7 @@ static void __exit fc_transport_exit(void)
transport_class_unregister(&fc_transport_class);
transport_class_unregister(&fc_rport_class);
transport_class_unregister(&fc_host_class);
+ transport_class_unregister(&fc_vport_class);
}
/*
@@ -800,9 +879,9 @@ show_fc_rport_roles (struct class_device *cdev, char *buf)
return snprintf(buf, 30, "Unknown Fabric Entity\n");
}
} else {
- if (rport->roles == FC_RPORT_ROLE_UNKNOWN)
+ if (rport->roles == FC_PORT_ROLE_UNKNOWN)
return snprintf(buf, 20, "unknown\n");
- return get_fc_remote_port_roles_names(rport->roles, buf);
+ return get_fc_port_roles_names(rport->roles, buf);
}
}
static FC_CLASS_DEVICE_ATTR(rport, roles, S_IRUGO,
@@ -857,7 +936,7 @@ static FC_CLASS_DEVICE_ATTR(rport, fast_io_fail_tmo, S_IRUGO | S_IWUSR,
/*
* Note: in the target show function we recognize when the remote
- * port is in the hierarchy and do not allow the driver to get
+ * port is in the heirarchy and do not allow the driver to get
* involved in sysfs functions. The driver only gets involved if
* it's the "old" style that doesn't use rports.
*/
@@ -912,6 +991,257 @@ fc_starget_rd_attr(port_id, "0x%06x\n", 20);
/*
+ * FC Virtual Port Attribute Management
+ */
+
+#define fc_vport_show_function(field, format_string, sz, cast) \
+static ssize_t \
+show_fc_vport_##field (struct class_device *cdev, char *buf) \
+{ \
+ struct fc_vport *vport = transport_class_to_vport(cdev); \
+ struct Scsi_Host *shost = vport_to_shost(vport); \
+ struct fc_internal *i = to_fc_internal(shost->transportt); \
+ if ((i->f->get_vport_##field) && \
+ !(vport->flags & (FC_VPORT_DEL | FC_VPORT_CREATING))) \
+ i->f->get_vport_##field(vport); \
+ return snprintf(buf, sz, format_string, cast vport->field); \
+}
+
+#define fc_vport_store_function(field) \
+static ssize_t \
+store_fc_vport_##field(struct class_device *cdev, const char *buf, \
+ size_t count) \
+{ \
+ int val; \
+ struct fc_vport *vport = transport_class_to_vport(cdev); \
+ struct Scsi_Host *shost = vport_to_shost(vport); \
+ struct fc_internal *i = to_fc_internal(shost->transportt); \
+ char *cp; \
+ if (vport->flags & (FC_VPORT_DEL | FC_VPORT_CREATING)) \
+ return -EBUSY; \
+ val = simple_strtoul(buf, &cp, 0); \
+ if (*cp && (*cp != '\n')) \
+ return -EINVAL; \
+ i->f->set_vport_##field(vport, val); \
+ return count; \
+}
+
+#define fc_vport_store_str_function(field, slen) \
+static ssize_t \
+store_fc_vport_##field(struct class_device *cdev, const char *buf, \
+ size_t count) \
+{ \
+ struct fc_vport *vport = transport_class_to_vport(cdev); \
+ struct Scsi_Host *shost = vport_to_shost(vport); \
+ struct fc_internal *i = to_fc_internal(shost->transportt); \
+ unsigned int cnt=count; \
+ \
+ /* count may include a LF at end of string */ \
+ if (buf[cnt-1] == '\n') \
+ cnt--; \
+ if (cnt > ((slen) - 1)) \
+ return -EINVAL; \
+ memcpy(vport->field, buf, cnt); \
+ i->f->set_vport_##field(vport); \
+ return count; \
+}
+
+#define fc_vport_rd_attr(field, format_string, sz) \
+ fc_vport_show_function(field, format_string, sz, ) \
+static FC_CLASS_DEVICE_ATTR(vport, field, S_IRUGO, \
+ show_fc_vport_##field, NULL)
+
+#define fc_vport_rd_attr_cast(field, format_string, sz, cast) \
+ fc_vport_show_function(field, format_string, sz, (cast)) \
+static FC_CLASS_DEVICE_ATTR(vport, field, S_IRUGO, \
+ show_fc_vport_##field, NULL)
+
+#define fc_vport_rw_attr(field, format_string, sz) \
+ fc_vport_show_function(field, format_string, sz, ) \
+ fc_vport_store_function(field) \
+static FC_CLASS_DEVICE_ATTR(vport, field, S_IRUGO | S_IWUSR, \
+ show_fc_vport_##field, \
+ store_fc_vport_##field)
+
+#define fc_private_vport_show_function(field, format_string, sz, cast) \
+static ssize_t \
+show_fc_vport_##field (struct class_device *cdev, char *buf) \
+{ \
+ struct fc_vport *vport = transport_class_to_vport(cdev); \
+ return snprintf(buf, sz, format_string, cast vport->field); \
+}
+
+#define fc_private_vport_store_u32_function(field) \
+static ssize_t \
+store_fc_vport_##field(struct class_device *cdev, const char *buf, \
+ size_t count) \
+{ \
+ u32 val; \
+ struct fc_vport *vport = transport_class_to_vport(cdev); \
+ char *cp; \
+ if (vport->flags & (FC_VPORT_DEL | FC_VPORT_CREATING)) \
+ return -EBUSY; \
+ val = simple_strtoul(buf, &cp, 0); \
+ if (*cp && (*cp != '\n')) \
+ return -EINVAL; \
+ vport->field = val; \
+ return count; \
+}
+
+
+#define fc_private_vport_rd_attr(field, format_string, sz) \
+ fc_private_vport_show_function(field, format_string, sz, ) \
+static FC_CLASS_DEVICE_ATTR(vport, field, S_IRUGO, \
+ show_fc_vport_##field, NULL)
+
+#define fc_private_vport_rd_attr_cast(field, format_string, sz, cast) \
+ fc_private_vport_show_function(field, format_string, sz, (cast)) \
+static FC_CLASS_DEVICE_ATTR(vport, field, S_IRUGO, \
+ show_fc_vport_##field, NULL)
+
+#define fc_private_vport_rw_u32_attr(field, format_string, sz) \
+ fc_private_vport_show_function(field, format_string, sz, ) \
+ fc_private_vport_store_u32_function(field) \
+static FC_CLASS_DEVICE_ATTR(vport, field, S_IRUGO | S_IWUSR, \
+ show_fc_vport_##field, \
+ store_fc_vport_##field)
+
+
+#define fc_private_vport_rd_enum_attr(title, maxlen) \
+static ssize_t \
+show_fc_vport_##title (struct class_device *cdev, char *buf) \
+{ \
+ struct fc_vport *vport = transport_class_to_vport(cdev); \
+ const char *name; \
+ name = get_fc_##title##_name(vport->title); \
+ if (!name) \
+ return -EINVAL; \
+ return snprintf(buf, maxlen, "%s\n", name); \
+} \
+static FC_CLASS_DEVICE_ATTR(vport, title, S_IRUGO, \
+ show_fc_vport_##title, NULL)
+
+
+#define SETUP_VPORT_ATTRIBUTE_RD(field) \
+ i->private_vport_attrs[count] = class_device_attr_vport_##field; \
+ i->private_vport_attrs[count].attr.mode = S_IRUGO; \
+ i->private_vport_attrs[count].store = NULL; \
+ i->vport_attrs[count] = &i->private_vport_attrs[count]; \
+ if (i->f->get_##field) \
+ count++
+ /* NOTE: Above MACRO differs: checks function not show bit */
+
+#define SETUP_PRIVATE_VPORT_ATTRIBUTE_RD(field) \
+ i->private_vport_attrs[count] = class_device_attr_vport_##field; \
+ i->private_vport_attrs[count].attr.mode = S_IRUGO; \
+ i->private_vport_attrs[count].store = NULL; \
+ i->vport_attrs[count] = &i->private_vport_attrs[count]; \
+ count++
+
+#define SETUP_VPORT_ATTRIBUTE_WR(field) \
+ i->private_vport_attrs[count] = class_device_attr_vport_##field; \
+ i->vport_attrs[count] = &i->private_vport_attrs[count]; \
+ if (i->f->field) \
+ count++
+ /* NOTE: Above MACRO differs: checks function */
+
+#define SETUP_VPORT_ATTRIBUTE_RW(field) \
+ i->private_vport_attrs[count] = class_device_attr_vport_##field; \
+ if (!i->f->set_vport_##field) { \
+ i->private_vport_attrs[count].attr.mode = S_IRUGO; \
+ i->private_vport_attrs[count].store = NULL; \
+ } \
+ i->vport_attrs[count] = &i->private_vport_attrs[count]; \
+ count++
+ /* NOTE: Above MACRO differs: does not check show bit */
+
+#define SETUP_PRIVATE_VPORT_ATTRIBUTE_RW(field) \
+{ \
+ i->private_vport_attrs[count] = class_device_attr_vport_##field; \
+ i->vport_attrs[count] = &i->private_vport_attrs[count]; \
+ count++; \
+}
+
+
+/* The FC Transport Virtual Port Attributes: */
+
+/* Fixed Virtual Port Attributes */
+
+/* Dynamic Virtual Port Attributes */
+
+/* Private Virtual Port Attributes */
+
+fc_private_vport_rd_enum_attr(vport_state, FC_VPORTSTATE_MAX_NAMELEN);
+fc_private_vport_rd_enum_attr(vport_last_state, FC_VPORTSTATE_MAX_NAMELEN);
+fc_private_vport_rd_attr_cast(node_name, "0x%llx\n", 20, unsigned long long);
+fc_private_vport_rd_attr_cast(port_name, "0x%llx\n", 20, unsigned long long);
+
+static ssize_t
+show_fc_vport_roles (struct class_device *cdev, char *buf)
+{
+ struct fc_vport *vport = transport_class_to_vport(cdev);
+
+ if (vport->roles == FC_PORT_ROLE_UNKNOWN)
+ return snprintf(buf, 20, "unknown\n");
+ return get_fc_port_roles_names(vport->roles, buf);
+}
+static FC_CLASS_DEVICE_ATTR(vport, roles, S_IRUGO, show_fc_vport_roles, NULL);
+
+fc_private_vport_rd_enum_attr(vport_type, FC_PORTTYPE_MAX_NAMELEN);
+
+fc_private_vport_show_function(symbolic_name, "%s\n",
+ FC_VPORT_SYMBOLIC_NAMELEN + 1, )
+fc_vport_store_str_function(symbolic_name, FC_VPORT_SYMBOLIC_NAMELEN)
+static FC_CLASS_DEVICE_ATTR(vport, symbolic_name, S_IRUGO | S_IWUSR,
+ show_fc_vport_symbolic_name, store_fc_vport_symbolic_name);
+
+static ssize_t
+store_fc_vport_delete(struct class_device *cdev, const char *buf,
+ size_t count)
+{
+ struct fc_vport *vport = transport_class_to_vport(cdev);
+ struct Scsi_Host *shost = vport_to_shost(vport);
+
+ fc_queue_work(shost, &vport->vport_delete_work);
+ return count;
+}
+static FC_CLASS_DEVICE_ATTR(vport, vport_delete, S_IWUSR,
+ NULL, store_fc_vport_delete);
+
+
+/*
+ * Enable/Disable vport
+ * Write "1" to disable, write "0" to enable
+ */
+static ssize_t
+store_fc_vport_disable(struct class_device *cdev, const char *buf,
+ size_t count)
+{
+ struct fc_vport *vport = transport_class_to_vport(cdev);
+ struct Scsi_Host *shost = vport_to_shost(vport);
+ struct fc_internal *i = to_fc_internal(shost->transportt);
+ int stat;
+
+ if (vport->flags & (FC_VPORT_DEL | FC_VPORT_CREATING))
+ return -EBUSY;
+
+ if (*buf == '0') {
+ if (vport->vport_state != FC_VPORT_DISABLED)
+ return -EALREADY;
+ } else if (*buf == '1') {
+ if (vport->vport_state == FC_VPORT_DISABLED)
+ return -EALREADY;
+ } else
+ return -EINVAL;
+
+ stat = i->f->vport_disable(vport, ((*buf == '0') ? false : true));
+ return stat ? stat : count;
+}
+static FC_CLASS_DEVICE_ATTR(vport, vport_disable, S_IWUSR,
+ NULL, store_fc_vport_disable);
+
+
+/*
* Host Attribute Management
*/
@@ -1003,6 +1333,13 @@ static FC_CLASS_DEVICE_ATTR(host, title, S_IRUGO, show_fc_host_##title, NULL)
if (i->f->show_host_##field) \
count++
+#define SETUP_HOST_ATTRIBUTE_RD_NS(field) \
+ i->private_host_attrs[count] = class_device_attr_host_##field; \
+ i->private_host_attrs[count].attr.mode = S_IRUGO; \
+ i->private_host_attrs[count].store = NULL; \
+ i->host_attrs[count] = &i->private_host_attrs[count]; \
+ count++
+
#define SETUP_HOST_ATTRIBUTE_RW(field) \
i->private_host_attrs[count] = class_device_attr_host_##field; \
if (!i->f->set_host_##field) { \
@@ -1090,6 +1427,7 @@ fc_private_host_rd_attr_cast(port_name, "0x%llx\n", 20, unsigned long long);
fc_private_host_rd_attr_cast(permanent_port_name, "0x%llx\n", 20,
unsigned long long);
fc_private_host_rd_attr(maxframe_size, "%u bytes\n", 20);
+fc_private_host_rd_attr(max_npiv_vports, "%u\n", 20);
fc_private_host_rd_attr(serial_number, "%s\n", (FC_SERIAL_NUMBER_SIZE +1));
@@ -1210,6 +1548,9 @@ store_fc_private_host_issue_lip(struct class_device *cdev,
static FC_CLASS_DEVICE_ATTR(host, issue_lip, S_IWUSR, NULL,
store_fc_private_host_issue_lip);
+fc_private_host_rd_attr(npiv_vports_inuse, "%u\n", 20);
+
+
/*
* Host Statistics Management
*/
@@ -1285,7 +1626,6 @@ fc_reset_statistics(struct class_device *cdev, const char *buf,
static FC_CLASS_DEVICE_ATTR(host, reset_statistics, S_IWUSR, NULL,
fc_reset_statistics);
-
static struct attribute *fc_statistics_attrs[] = {
&class_device_attr_host_seconds_since_last_reset.attr,
&class_device_attr_host_tx_frames.attr,
@@ -1316,6 +1656,142 @@ static struct attribute_group fc_statistics_group = {
.attrs = fc_statistics_attrs,
};
+
+/* Host Vport Attributes */
+
+static int
+fc_parse_wwn(const char *ns, u64 *nm)
+{
+ unsigned int i, j;
+ u8 wwn[8];
+
+ memset(wwn, 0, sizeof(wwn));
+
+ /* Validate and store the new name */
+ for (i=0, j=0; i < 16; i++) {
+ if ((*ns >= 'a') && (*ns <= 'f'))
+ j = ((j << 4) | ((*ns++ -'a') + 10));
+ else if ((*ns >= 'A') && (*ns <= 'F'))
+ j = ((j << 4) | ((*ns++ -'A') + 10));
+ else if ((*ns >= '0') && (*ns <= '9'))
+ j = ((j << 4) | (*ns++ -'0'));
+ else
+ return -EINVAL;
+ if (i % 2) {
+ wwn[i/2] = j & 0xff;
+ j = 0;
+ }
+ }
+
+ *nm = wwn_to_u64(wwn);
+
+ return 0;
+}
+
+
+/*
+ * "Short-cut" sysfs variable to create a new vport on a FC Host.
+ * Input is a string of the form "<WWPN>:<WWNN>". Other attributes
+ * will default to a NPIV-based FCP_Initiator; The WWNs are specified
+ * as hex characters, and may *not* contain any prefixes (e.g. 0x, x, etc)
+ */
+static ssize_t
+store_fc_host_vport_create(struct class_device *cdev, const char *buf,
+ size_t count)
+{
+ struct Scsi_Host *shost = transport_class_to_shost(cdev);
+ struct fc_vport_identifiers vid;
+ struct fc_vport *vport;
+ unsigned int cnt=count;
+ int stat;
+
+ memset(&vid, 0, sizeof(vid));
+
+ /* count may include a LF at end of string */
+ if (buf[cnt-1] == '\n')
+ cnt--;
+
+ /* validate we have enough characters for WWPN */
+ if ((cnt != (16+1+16)) || (buf[16] != ':'))
+ return -EINVAL;
+
+ stat = fc_parse_wwn(&buf[0], &vid.port_name);
+ if (stat)
+ return stat;
+
+ stat = fc_parse_wwn(&buf[17], &vid.node_name);
+ if (stat)
+ return stat;
+
+ vid.roles = FC_PORT_ROLE_FCP_INITIATOR;
+ vid.vport_type = FC_PORTTYPE_NPIV;
+ /* vid.symbolic_name is already zero/NULL's */
+ vid.disable = false; /* always enabled */
+
+ /* we only allow support on Channel 0 !!! */
+ stat = fc_vport_create(shost, 0, &shost->shost_gendev, &vid, &vport);
+ return stat ? stat : count;
+}
+static FC_CLASS_DEVICE_ATTR(host, vport_create, S_IWUSR, NULL,
+ store_fc_host_vport_create);
+
+
+/*
+ * "Short-cut" sysfs variable to delete a vport on a FC Host.
+ * Vport is identified by a string containing "<WWPN>:<WWNN>".
+ * The WWNs are specified as hex characters, and may *not* contain
+ * any prefixes (e.g. 0x, x, etc)
+ */
+static ssize_t
+store_fc_host_vport_delete(struct class_device *cdev, const char *buf,
+ size_t count)
+{
+ struct Scsi_Host *shost = transport_class_to_shost(cdev);
+ struct fc_host_attrs *fc_host = shost_to_fc_host(shost);
+ struct fc_vport *vport;
+ u64 wwpn, wwnn;
+ unsigned long flags;
+ unsigned int cnt=count;
+ int stat, match;
+
+ /* count may include a LF at end of string */
+ if (buf[cnt-1] == '\n')
+ cnt--;
+
+ /* validate we have enough characters for WWPN */
+ if ((cnt != (16+1+16)) || (buf[16] != ':'))
+ return -EINVAL;
+
+ stat = fc_parse_wwn(&buf[0], &wwpn);
+ if (stat)
+ return stat;
+
+ stat = fc_parse_wwn(&buf[17], &wwnn);
+ if (stat)
+ return stat;
+
+ spin_lock_irqsave(shost->host_lock, flags);
+ match = 0;
+ /* we only allow support on Channel 0 !!! */
+ list_for_each_entry(vport, &fc_host->vports, peers) {
+ if ((vport->channel == 0) &&
+ (vport->port_name == wwpn) && (vport->node_name == wwnn)) {
+ match = 1;
+ break;
+ }
+ }
+ spin_unlock_irqrestore(shost->host_lock, flags);
+
+ if (!match)
+ return -ENODEV;
+
+ stat = fc_vport_terminate(vport);
+ return stat ? stat : count;
+}
+static FC_CLASS_DEVICE_ATTR(host, vport_delete, S_IWUSR, NULL,
+ store_fc_host_vport_delete);
+
+
static int fc_host_match(struct attribute_container *cont,
struct device *dev)
{
@@ -1387,6 +1863,40 @@ static int fc_rport_match(struct attribute_container *cont,
}
+static void fc_vport_dev_release(struct device *dev)
+{
+ struct fc_vport *vport = dev_to_vport(dev);
+ put_device(dev->parent); /* release kobj parent */
+ kfree(vport);
+}
+
+int scsi_is_fc_vport(const struct device *dev)
+{
+ return dev->release == fc_vport_dev_release;
+}
+EXPORT_SYMBOL(scsi_is_fc_vport);
+
+static int fc_vport_match(struct attribute_container *cont,
+ struct device *dev)
+{
+ struct fc_vport *vport;
+ struct Scsi_Host *shost;
+ struct fc_internal *i;
+
+ if (!scsi_is_fc_vport(dev))
+ return 0;
+ vport = dev_to_vport(dev);
+
+ shost = vport_to_shost(vport);
+ if (!shost->transportt || shost->transportt->host_attrs.ac.class
+ != &fc_host_class.class)
+ return 0;
+
+ i = to_fc_internal(shost->transportt);
+ return &i->vport_attr_cont.ac == cont;
+}
+
+
/**
* fc_timed_out - FC Transport I/O timeout intercept handler
*
@@ -1433,6 +1943,9 @@ static int fc_user_scan(struct Scsi_Host *shost, uint channel,
if (rport->scsi_target_id == -1)
continue;
+ if (rport->port_state != FC_PORTSTATE_ONLINE)
+ continue;
+
if ((channel == SCAN_WILD_CARD || channel == rport->channel) &&
(id == SCAN_WILD_CARD || id == rport->scsi_target_id)) {
scsi_scan_target(&rport->dev, rport->channel,
@@ -1472,6 +1985,11 @@ fc_attach_transport(struct fc_function_template *ft)
i->rport_attr_cont.ac.match = fc_rport_match;
transport_container_register(&i->rport_attr_cont);
+ i->vport_attr_cont.ac.attrs = &i->vport_attrs[0];
+ i->vport_attr_cont.ac.class = &fc_vport_class.class;
+ i->vport_attr_cont.ac.match = fc_vport_match;
+ transport_container_register(&i->vport_attr_cont);
+
i->f = ft;
/* Transport uses the shost workq for scsi scanning */
@@ -1480,7 +1998,7 @@ fc_attach_transport(struct fc_function_template *ft)
i->t.eh_timed_out = fc_timed_out;
i->t.user_scan = fc_user_scan;
-
+
/*
* Setup SCSI Target Attributes.
*/
@@ -1505,6 +2023,10 @@ fc_attach_transport(struct fc_function_template *ft)
SETUP_HOST_ATTRIBUTE_RD(supported_fc4s);
SETUP_HOST_ATTRIBUTE_RD(supported_speeds);
SETUP_HOST_ATTRIBUTE_RD(maxframe_size);
+ if (ft->vport_create) {
+ SETUP_HOST_ATTRIBUTE_RD_NS(max_npiv_vports);
+ SETUP_HOST_ATTRIBUTE_RD_NS(npiv_vports_inuse);
+ }
SETUP_HOST_ATTRIBUTE_RD(serial_number);
SETUP_HOST_ATTRIBUTE_RD(port_id);
@@ -1520,6 +2042,10 @@ fc_attach_transport(struct fc_function_template *ft)
SETUP_PRIVATE_HOST_ATTRIBUTE_RW(tgtid_bind_type);
if (ft->issue_fc_host_lip)
SETUP_PRIVATE_HOST_ATTRIBUTE_RW(issue_lip);
+ if (ft->vport_create)
+ SETUP_PRIVATE_HOST_ATTRIBUTE_RW(vport_create);
+ if (ft->vport_delete)
+ SETUP_PRIVATE_HOST_ATTRIBUTE_RW(vport_delete);
BUG_ON(count > FC_HOST_NUM_ATTRS);
@@ -1545,6 +2071,24 @@ fc_attach_transport(struct fc_function_template *ft)
i->rport_attrs[count] = NULL;
+ /*
+ * Setup Virtual Port Attributes.
+ */
+ count=0;
+ SETUP_PRIVATE_VPORT_ATTRIBUTE_RD(vport_state);
+ SETUP_PRIVATE_VPORT_ATTRIBUTE_RD(vport_last_state);
+ SETUP_PRIVATE_VPORT_ATTRIBUTE_RD(node_name);
+ SETUP_PRIVATE_VPORT_ATTRIBUTE_RD(port_name);
+ SETUP_PRIVATE_VPORT_ATTRIBUTE_RD(roles);
+ SETUP_PRIVATE_VPORT_ATTRIBUTE_RD(vport_type);
+ SETUP_VPORT_ATTRIBUTE_RW(symbolic_name);
+ SETUP_VPORT_ATTRIBUTE_WR(vport_delete);
+ SETUP_VPORT_ATTRIBUTE_WR(vport_disable);
+
+ BUG_ON(count > FC_VPORT_NUM_ATTRS);
+
+ i->vport_attrs[count] = NULL;
+
return &i->t;
}
EXPORT_SYMBOL(fc_attach_transport);
@@ -1556,6 +2100,7 @@ void fc_release_transport(struct scsi_transport_template *t)
transport_container_unregister(&i->t.target_attrs);
transport_container_unregister(&i->t.host_attrs);
transport_container_unregister(&i->rport_attr_cont);
+ transport_container_unregister(&i->vport_attr_cont);
kfree(i);
}
@@ -1667,9 +2212,17 @@ fc_flush_devloss(struct Scsi_Host *shost)
void
fc_remove_host(struct Scsi_Host *shost)
{
- struct fc_rport *rport, *next_rport;
+ struct fc_vport *vport = NULL, *next_vport = NULL;
+ struct fc_rport *rport = NULL, *next_rport = NULL;
struct workqueue_struct *work_q;
struct fc_host_attrs *fc_host = shost_to_fc_host(shost);
+ unsigned long flags;
+
+ spin_lock_irqsave(shost->host_lock, flags);
+
+ /* Remove any vports */
+ list_for_each_entry_safe(vport, next_vport, &fc_host->vports, peers)
+ fc_queue_work(shost, &vport->vport_delete_work);
/* Remove any remote ports */
list_for_each_entry_safe(rport, next_rport,
@@ -1686,6 +2239,8 @@ fc_remove_host(struct Scsi_Host *shost)
fc_queue_work(shost, &rport->rport_delete_work);
}
+ spin_unlock_irqrestore(shost->host_lock, flags);
+
/* flush all scan work items */
scsi_flush_work(shost);
@@ -1744,7 +2299,7 @@ fc_rport_final_delete(struct work_struct *work)
unsigned long flags;
/*
- * if a scan is pending, flush the SCSI Host work_q so that
+ * if a scan is pending, flush the SCSI Host work_q so that
* that we can reclaim the rport scan work element.
*/
if (rport->flags & FC_RPORT_SCAN_PENDING)
@@ -1844,7 +2399,7 @@ fc_rport_create(struct Scsi_Host *shost, int channel,
spin_lock_irqsave(shost->host_lock, flags);
rport->number = fc_host->next_rport_number++;
- if (rport->roles & FC_RPORT_ROLE_FCP_TARGET)
+ if (rport->roles & FC_PORT_ROLE_FCP_TARGET)
rport->scsi_target_id = fc_host->next_target_id++;
else
rport->scsi_target_id = -1;
@@ -1869,7 +2424,7 @@ fc_rport_create(struct Scsi_Host *shost, int channel,
transport_add_device(dev);
transport_configure_device(dev);
- if (rport->roles & FC_RPORT_ROLE_FCP_TARGET) {
+ if (rport->roles & FC_PORT_ROLE_FCP_TARGET) {
/* initiate a scan of the target */
rport->flags |= FC_RPORT_SCAN_PENDING;
scsi_queue_work(shost, &rport->scan_work);
@@ -2003,7 +2558,7 @@ fc_remote_port_add(struct Scsi_Host *shost, int channel,
/* was a target, not in roles */
if ((rport->scsi_target_id != -1) &&
- (!(ids->roles & FC_RPORT_ROLE_FCP_TARGET)))
+ (!(ids->roles & FC_PORT_ROLE_FCP_TARGET)))
return rport;
/*
@@ -2086,7 +2641,7 @@ fc_remote_port_add(struct Scsi_Host *shost, int channel,
memset(rport->dd_data, 0,
fci->f->dd_fcrport_size);
- if (rport->roles & FC_RPORT_ROLE_FCP_TARGET) {
+ if (rport->roles & FC_PORT_ROLE_FCP_TARGET) {
/* initiate a scan of the target */
rport->flags |= FC_RPORT_SCAN_PENDING;
scsi_queue_work(shost, &rport->scan_work);
@@ -2243,11 +2798,11 @@ fc_remote_port_rolechg(struct fc_rport *rport, u32 roles)
int create = 0;
spin_lock_irqsave(shost->host_lock, flags);
- if (roles & FC_RPORT_ROLE_FCP_TARGET) {
+ if (roles & FC_PORT_ROLE_FCP_TARGET) {
if (rport->scsi_target_id == -1) {
rport->scsi_target_id = fc_host->next_target_id++;
create = 1;
- } else if (!(rport->roles & FC_RPORT_ROLE_FCP_TARGET))
+ } else if (!(rport->roles & FC_PORT_ROLE_FCP_TARGET))
create = 1;
}
@@ -2294,7 +2849,7 @@ EXPORT_SYMBOL(fc_remote_port_rolechg);
* fc_timeout_deleted_rport - Timeout handler for a deleted remote port,
* which we blocked, and has now failed to return
* in the allotted time.
- *
+ *
* @work: rport target that failed to reappear in the allotted time.
**/
static void
@@ -2317,7 +2872,7 @@ fc_timeout_deleted_rport(struct work_struct *work)
*/
if ((rport->port_state == FC_PORTSTATE_ONLINE) &&
(rport->scsi_target_id != -1) &&
- !(rport->roles & FC_RPORT_ROLE_FCP_TARGET)) {
+ !(rport->roles & FC_PORT_ROLE_FCP_TARGET)) {
dev_printk(KERN_ERR, &rport->dev,
"blocked FC remote port time out: no longer"
" a FCP target, removing starget\n");
@@ -2367,7 +2922,7 @@ fc_timeout_deleted_rport(struct work_struct *work)
*/
rport->maxframe_size = -1;
rport->supported_classes = FC_COS_UNSPECIFIED;
- rport->roles = FC_RPORT_ROLE_UNKNOWN;
+ rport->roles = FC_PORT_ROLE_UNKNOWN;
rport->port_state = FC_PORTSTATE_NOTPRESENT;
/* remove the identifiers that aren't used in the consisting binding */
@@ -2436,7 +2991,7 @@ fc_scsi_scan_rport(struct work_struct *work)
unsigned long flags;
if ((rport->port_state == FC_PORTSTATE_ONLINE) &&
- (rport->roles & FC_RPORT_ROLE_FCP_TARGET)) {
+ (rport->roles & FC_PORT_ROLE_FCP_TARGET)) {
scsi_scan_target(&rport->dev, rport->channel,
rport->scsi_target_id, SCAN_WILD_CARD, 1);
}
@@ -2447,7 +3002,227 @@ fc_scsi_scan_rport(struct work_struct *work)
}
-MODULE_AUTHOR("Martin Hicks");
+/**
+ * fc_vport_create - allocates and creates a FC virtual port.
+ * @shost: scsi host the virtual port is connected to.
+ * @channel: Channel on shost port connected to.
+ * @pdev: parent device for vport
+ * @ids: The world wide names, FC4 port roles, etc for
+ * the virtual port.
+ * @ret_vport: The pointer to the created vport.
+ *
+ * Allocates and creates the vport structure, calls the parent host
+ * to instantiate the vport, the completes w/ class and sysfs creation.
+ *
+ * Notes:
+ * This routine assumes no locks are held on entry.
+ **/
+static int
+fc_vport_create(struct Scsi_Host *shost, int channel, struct device *pdev,
+ struct fc_vport_identifiers *ids, struct fc_vport **ret_vport)
+{
+ struct fc_host_attrs *fc_host = shost_to_fc_host(shost);
+ struct fc_internal *fci = to_fc_internal(shost->transportt);
+ struct fc_vport *vport;
+ struct device *dev;
+ unsigned long flags;
+ size_t size;
+ int error;
+
+ *ret_vport = NULL;
+
+ if ( ! fci->f->vport_create)
+ return -ENOENT;
+
+ size = (sizeof(struct fc_vport) + fci->f->dd_fcvport_size);
+ vport = kzalloc(size, GFP_KERNEL);
+ if (unlikely(!vport)) {
+ printk(KERN_ERR "%s: allocation failure\n", __FUNCTION__);
+ return -ENOMEM;
+ }
+
+ vport->vport_state = FC_VPORT_UNKNOWN;
+ vport->vport_last_state = FC_VPORT_UNKNOWN;
+ vport->node_name = ids->node_name;
+ vport->port_name = ids->port_name;
+ vport->roles = ids->roles;
+ vport->vport_type = ids->vport_type;
+ if (fci->f->dd_fcvport_size)
+ vport->dd_data = &vport[1];
+ vport->shost = shost;
+ vport->channel = channel;
+ vport->flags = FC_VPORT_CREATING;
+ INIT_WORK(&vport->vport_delete_work, fc_vport_sched_delete);
+
+ spin_lock_irqsave(shost->host_lock, flags);
+
+ if (fc_host->npiv_vports_inuse >= fc_host->max_npiv_vports) {
+ spin_unlock_irqrestore(shost->host_lock, flags);
+ kfree(vport);
+ return -ENOSPC;
+ }
+ fc_host->npiv_vports_inuse++;
+ vport->number = fc_host->next_vport_number++;
+ list_add_tail(&vport->peers, &fc_host->vports);
+ get_device(&shost->shost_gendev); /* for fc_host->vport list */
+
+ spin_unlock_irqrestore(shost->host_lock, flags);
+
+ dev = &vport->dev;
+ device_initialize(dev); /* takes self reference */
+ dev->parent = get_device(pdev); /* takes parent reference */
+ dev->release = fc_vport_dev_release;
+ sprintf(dev->bus_id, "vport-%d:%d-%d",
+ shost->host_no, channel, vport->number);
+ transport_setup_device(dev);
+
+ error = device_add(dev);
+ if (error) {
+ printk(KERN_ERR "FC Virtual Port device_add failed\n");
+ goto delete_vport;
+ }
+ transport_add_device(dev);
+ transport_configure_device(dev);
+
+ error = fci->f->vport_create(vport, ids->disable);
+ if (error) {
+ printk(KERN_ERR "FC Virtual Port LLDD Create failed\n");
+ goto delete_vport_all;
+ }
+
+ /*
+ * if the parent isn't the physical adapter's Scsi_Host, ensure
+ * the Scsi_Host at least contains ia symlink to the vport.
+ */
+ if (pdev != &shost->shost_gendev) {
+ error = sysfs_create_link(&shost->shost_gendev.kobj,
+ &dev->kobj, dev->bus_id);
+ if (error)
+ printk(KERN_ERR
+ "%s: Cannot create vport symlinks for "
+ "%s, err=%d\n",
+ __FUNCTION__, dev->bus_id, error);
+ }
+ spin_lock_irqsave(shost->host_lock, flags);
+ vport->flags &= ~FC_VPORT_CREATING;
+ spin_unlock_irqrestore(shost->host_lock, flags);
+
+ dev_printk(KERN_NOTICE, pdev,
+ "%s created via shost%d channel %d\n", dev->bus_id,
+ shost->host_no, channel);
+
+ *ret_vport = vport;
+
+ return 0;
+
+delete_vport_all:
+ transport_remove_device(dev);
+ device_del(dev);
+delete_vport:
+ transport_destroy_device(dev);
+ spin_lock_irqsave(shost->host_lock, flags);
+ list_del(&vport->peers);
+ put_device(&shost->shost_gendev); /* for fc_host->vport list */
+ fc_host->npiv_vports_inuse--;
+ spin_unlock_irqrestore(shost->host_lock, flags);
+ put_device(dev->parent);
+ kfree(vport);
+
+ return error;
+}
+
+
+/**
+ * fc_vport_terminate - Admin App or LLDD requests termination of a vport
+ * @vport: fc_vport to be terminated
+ *
+ * Calls the LLDD vport_delete() function, then deallocates and removes
+ * the vport from the shost and object tree.
+ *
+ * Notes:
+ * This routine assumes no locks are held on entry.
+ **/
+int
+fc_vport_terminate(struct fc_vport *vport)
+{
+ struct Scsi_Host *shost = vport_to_shost(vport);
+ struct fc_host_attrs *fc_host = shost_to_fc_host(shost);
+ struct fc_internal *i = to_fc_internal(shost->transportt);
+ struct device *dev = &vport->dev;
+ unsigned long flags;
+ int stat;
+
+ spin_lock_irqsave(shost->host_lock, flags);
+ if (vport->flags & FC_VPORT_CREATING) {
+ spin_unlock_irqrestore(shost->host_lock, flags);
+ return -EBUSY;
+ }
+ if (vport->flags & (FC_VPORT_DEL)) {
+ spin_unlock_irqrestore(shost->host_lock, flags);
+ return -EALREADY;
+ }
+ vport->flags |= FC_VPORT_DELETING;
+ spin_unlock_irqrestore(shost->host_lock, flags);
+
+ if (i->f->vport_delete)
+ stat = i->f->vport_delete(vport);
+ else
+ stat = -ENOENT;
+
+ spin_lock_irqsave(shost->host_lock, flags);
+ vport->flags &= ~FC_VPORT_DELETING;
+ if (!stat) {
+ vport->flags |= FC_VPORT_DELETED;
+ list_del(&vport->peers);
+ fc_host->npiv_vports_inuse--;
+ put_device(&shost->shost_gendev); /* for fc_host->vport list */
+ }
+ spin_unlock_irqrestore(shost->host_lock, flags);
+
+ if (stat)
+ return stat;
+
+ if (dev->parent != &shost->shost_gendev)
+ sysfs_remove_link(&shost->shost_gendev.kobj, dev->bus_id);
+ transport_remove_device(dev);
+ device_del(dev);
+ transport_destroy_device(dev);
+
+ /*
+ * Removing our self-reference should mean our
+ * release function gets called, which will drop the remaining
+ * parent reference and free the data structure.
+ */
+ put_device(dev); /* for self-reference */
+
+ return 0; /* SUCCESS */
+}
+EXPORT_SYMBOL(fc_vport_terminate);
+
+/**
+ * fc_vport_sched_delete - workq-based delete request for a vport
+ *
+ * @work: vport to be deleted.
+ **/
+static void
+fc_vport_sched_delete(struct work_struct *work)
+{
+ struct fc_vport *vport =
+ container_of(work, struct fc_vport, vport_delete_work);
+ int stat;
+
+ stat = fc_vport_terminate(vport);
+ if (stat)
+ dev_printk(KERN_ERR, vport->dev.parent,
+ "%s: %s could not be deleted created via "
+ "shost%d channel %d - error %d\n", __FUNCTION__,
+ vport->dev.bus_id, vport->shost->host_no,
+ vport->channel, stat);
+}
+
+
+/* Original Author: Martin Hicks */
+MODULE_AUTHOR("James Smart");
MODULE_DESCRIPTION("FC Transport Attributes");
MODULE_LICENSE("GPL");
diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
index caf1836bbec..34c1860a259 100644
--- a/drivers/scsi/scsi_transport_iscsi.c
+++ b/drivers/scsi/scsi_transport_iscsi.c
@@ -30,9 +30,9 @@
#include <scsi/scsi_transport_iscsi.h>
#include <scsi/iscsi_if.h>
-#define ISCSI_SESSION_ATTRS 11
+#define ISCSI_SESSION_ATTRS 15
#define ISCSI_CONN_ATTRS 11
-#define ISCSI_HOST_ATTRS 0
+#define ISCSI_HOST_ATTRS 4
#define ISCSI_TRANSPORT_VERSION "2.0-724"
struct iscsi_internal {
@@ -609,12 +609,10 @@ iscsi_if_send_reply(int pid, int seq, int type, int done, int multi,
int t = done ? NLMSG_DONE : type;
skb = alloc_skb(len, GFP_ATOMIC);
- /*
- * FIXME:
- * user is supposed to react on iferror == -ENOMEM;
- * see iscsi_if_rx().
- */
- BUG_ON(!skb);
+ if (!skb) {
+ printk(KERN_ERR "Could not allocate skb to send reply.\n");
+ return -ENOMEM;
+ }
nlh = __nlmsg_put(skb, pid, seq, t, (len - sizeof(*nlh)), 0);
nlh->nlmsg_flags = flags;
@@ -816,6 +814,8 @@ iscsi_if_create_session(struct iscsi_internal *priv, struct iscsi_uevent *ev)
uint32_t hostno;
session = transport->create_session(transport, &priv->t,
+ ev->u.c_session.cmds_max,
+ ev->u.c_session.queue_depth,
ev->u.c_session.initial_cmdsn,
&hostno);
if (!session)
@@ -947,15 +947,50 @@ static int
iscsi_tgt_dscvr(struct iscsi_transport *transport,
struct iscsi_uevent *ev)
{
+ struct Scsi_Host *shost;
struct sockaddr *dst_addr;
+ int err;
if (!transport->tgt_dscvr)
return -EINVAL;
+ shost = scsi_host_lookup(ev->u.tgt_dscvr.host_no);
+ if (IS_ERR(shost)) {
+ printk(KERN_ERR "target discovery could not find host no %u\n",
+ ev->u.tgt_dscvr.host_no);
+ return -ENODEV;
+ }
+
+
dst_addr = (struct sockaddr *)((char*)ev + sizeof(*ev));
- return transport->tgt_dscvr(ev->u.tgt_dscvr.type,
- ev->u.tgt_dscvr.host_no,
- ev->u.tgt_dscvr.enable, dst_addr);
+ err = transport->tgt_dscvr(shost, ev->u.tgt_dscvr.type,
+ ev->u.tgt_dscvr.enable, dst_addr);
+ scsi_host_put(shost);
+ return err;
+}
+
+static int
+iscsi_set_host_param(struct iscsi_transport *transport,
+ struct iscsi_uevent *ev)
+{
+ char *data = (char*)ev + sizeof(*ev);
+ struct Scsi_Host *shost;
+ int err;
+
+ if (!transport->set_host_param)
+ return -ENOSYS;
+
+ shost = scsi_host_lookup(ev->u.set_host_param.host_no);
+ if (IS_ERR(shost)) {
+ printk(KERN_ERR "set_host_param could not find host no %u\n",
+ ev->u.set_host_param.host_no);
+ return -ENODEV;
+ }
+
+ err = transport->set_host_param(shost, ev->u.set_host_param.param,
+ data, ev->u.set_host_param.len);
+ scsi_host_put(shost);
+ return err;
}
static int
@@ -1049,8 +1084,11 @@ iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
case ISCSI_UEVENT_TGT_DSCVR:
err = iscsi_tgt_dscvr(transport, ev);
break;
+ case ISCSI_UEVENT_SET_HOST_PARAM:
+ err = iscsi_set_host_param(transport, ev);
+ break;
default:
- err = -EINVAL;
+ err = -ENOSYS;
break;
}
@@ -1160,30 +1198,37 @@ iscsi_conn_attr(address, ISCSI_PARAM_CONN_ADDRESS);
/*
* iSCSI session attrs
*/
-#define iscsi_session_attr_show(param) \
+#define iscsi_session_attr_show(param, perm) \
static ssize_t \
show_session_param_##param(struct class_device *cdev, char *buf) \
{ \
struct iscsi_cls_session *session = iscsi_cdev_to_session(cdev); \
struct iscsi_transport *t = session->transport; \
+ \
+ if (perm && !capable(CAP_SYS_ADMIN)) \
+ return -EACCES; \
return t->get_session_param(session, param, buf); \
}
-#define iscsi_session_attr(field, param) \
- iscsi_session_attr_show(param) \
+#define iscsi_session_attr(field, param, perm) \
+ iscsi_session_attr_show(param, perm) \
static ISCSI_CLASS_ATTR(sess, field, S_IRUGO, show_session_param_##param, \
NULL);
-iscsi_session_attr(targetname, ISCSI_PARAM_TARGET_NAME);
-iscsi_session_attr(initial_r2t, ISCSI_PARAM_INITIAL_R2T_EN);
-iscsi_session_attr(max_outstanding_r2t, ISCSI_PARAM_MAX_R2T);
-iscsi_session_attr(immediate_data, ISCSI_PARAM_IMM_DATA_EN);
-iscsi_session_attr(first_burst_len, ISCSI_PARAM_FIRST_BURST);
-iscsi_session_attr(max_burst_len, ISCSI_PARAM_MAX_BURST);
-iscsi_session_attr(data_pdu_in_order, ISCSI_PARAM_PDU_INORDER_EN);
-iscsi_session_attr(data_seq_in_order, ISCSI_PARAM_DATASEQ_INORDER_EN);
-iscsi_session_attr(erl, ISCSI_PARAM_ERL);
-iscsi_session_attr(tpgt, ISCSI_PARAM_TPGT);
+iscsi_session_attr(targetname, ISCSI_PARAM_TARGET_NAME, 0);
+iscsi_session_attr(initial_r2t, ISCSI_PARAM_INITIAL_R2T_EN, 0);
+iscsi_session_attr(max_outstanding_r2t, ISCSI_PARAM_MAX_R2T, 0);
+iscsi_session_attr(immediate_data, ISCSI_PARAM_IMM_DATA_EN, 0);
+iscsi_session_attr(first_burst_len, ISCSI_PARAM_FIRST_BURST, 0);
+iscsi_session_attr(max_burst_len, ISCSI_PARAM_MAX_BURST, 0);
+iscsi_session_attr(data_pdu_in_order, ISCSI_PARAM_PDU_INORDER_EN, 0);
+iscsi_session_attr(data_seq_in_order, ISCSI_PARAM_DATASEQ_INORDER_EN, 0);
+iscsi_session_attr(erl, ISCSI_PARAM_ERL, 0);
+iscsi_session_attr(tpgt, ISCSI_PARAM_TPGT, 0);
+iscsi_session_attr(username, ISCSI_PARAM_USERNAME, 1);
+iscsi_session_attr(username_in, ISCSI_PARAM_USERNAME_IN, 1);
+iscsi_session_attr(password, ISCSI_PARAM_PASSWORD, 1);
+iscsi_session_attr(password_in, ISCSI_PARAM_PASSWORD_IN, 1);
#define iscsi_priv_session_attr_show(field, format) \
static ssize_t \
@@ -1199,6 +1244,28 @@ static ISCSI_CLASS_ATTR(priv_sess, field, S_IRUGO, show_priv_session_##field, \
NULL)
iscsi_priv_session_attr(recovery_tmo, "%d");
+/*
+ * iSCSI host attrs
+ */
+#define iscsi_host_attr_show(param) \
+static ssize_t \
+show_host_param_##param(struct class_device *cdev, char *buf) \
+{ \
+ struct Scsi_Host *shost = transport_class_to_shost(cdev); \
+ struct iscsi_internal *priv = to_iscsi_internal(shost->transportt); \
+ return priv->iscsi_transport->get_host_param(shost, param, buf); \
+}
+
+#define iscsi_host_attr(field, param) \
+ iscsi_host_attr_show(param) \
+static ISCSI_CLASS_ATTR(host, field, S_IRUGO, show_host_param_##param, \
+ NULL);
+
+iscsi_host_attr(netdev, ISCSI_HOST_PARAM_NETDEV_NAME);
+iscsi_host_attr(hwaddress, ISCSI_HOST_PARAM_HWADDRESS);
+iscsi_host_attr(ipaddress, ISCSI_HOST_PARAM_IPADDRESS);
+iscsi_host_attr(initiatorname, ISCSI_HOST_PARAM_INITIATOR_NAME);
+
#define SETUP_PRIV_SESSION_RD_ATTR(field) \
do { \
priv->session_attrs[count] = &class_device_attr_priv_sess_##field; \
@@ -1222,6 +1289,14 @@ do { \
} \
} while (0)
+#define SETUP_HOST_RD_ATTR(field, param_flag) \
+do { \
+ if (tt->host_param_mask & param_flag) { \
+ priv->host_attrs[count] = &class_device_attr_host_##field; \
+ count++; \
+ } \
+} while (0)
+
static int iscsi_session_match(struct attribute_container *cont,
struct device *dev)
{
@@ -1323,9 +1398,16 @@ iscsi_register_transport(struct iscsi_transport *tt)
priv->t.host_attrs.ac.class = &iscsi_host_class.class;
priv->t.host_attrs.ac.match = iscsi_host_match;
priv->t.host_size = sizeof(struct iscsi_host);
- priv->host_attrs[0] = NULL;
transport_container_register(&priv->t.host_attrs);
+ SETUP_HOST_RD_ATTR(netdev, ISCSI_HOST_NETDEV_NAME);
+ SETUP_HOST_RD_ATTR(ipaddress, ISCSI_HOST_IPADDRESS);
+ SETUP_HOST_RD_ATTR(hwaddress, ISCSI_HOST_HWADDRESS);
+ SETUP_HOST_RD_ATTR(initiatorname, ISCSI_HOST_INITIATOR_NAME);
+ BUG_ON(count > ISCSI_HOST_ATTRS);
+ priv->host_attrs[count] = NULL;
+ count = 0;
+
/* connection parameters */
priv->conn_cont.ac.attrs = &priv->conn_attrs[0];
priv->conn_cont.ac.class = &iscsi_connection_class.class;
@@ -1364,6 +1446,10 @@ iscsi_register_transport(struct iscsi_transport *tt)
SETUP_SESSION_RD_ATTR(erl, ISCSI_ERL);
SETUP_SESSION_RD_ATTR(targetname, ISCSI_TARGET_NAME);
SETUP_SESSION_RD_ATTR(tpgt, ISCSI_TPGT);
+ SETUP_SESSION_RD_ATTR(password, ISCSI_USERNAME);
+ SETUP_SESSION_RD_ATTR(password_in, ISCSI_USERNAME_IN);
+ SETUP_SESSION_RD_ATTR(username, ISCSI_PASSWORD);
+ SETUP_SESSION_RD_ATTR(username_in, ISCSI_PASSWORD_IN);
SETUP_PRIV_SESSION_RD_ATTR(recovery_tmo);
BUG_ON(count > ISCSI_SESSION_ATTRS);
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index 3d8c9cb24f9..448d316f12d 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -1515,7 +1515,7 @@ static int sd_revalidate_disk(struct gendisk *disk)
if (!scsi_device_online(sdp))
goto out;
- buffer = kmalloc(SD_BUF_SIZE, GFP_KERNEL | __GFP_DMA);
+ buffer = kmalloc(SD_BUF_SIZE, GFP_KERNEL);
if (!buffer) {
sd_printk(KERN_WARNING, sdkp, "sd_revalidate_disk: Memory "
"allocation failure.\n");
diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
index 0c691a60a75..85d38940a6c 100644
--- a/drivers/scsi/sg.c
+++ b/drivers/scsi/sg.c
@@ -1842,7 +1842,7 @@ sg_build_indirect(Sg_scatter_hold * schp, Sg_fd * sfp, int buff_size)
int blk_size = buff_size;
struct page *p = NULL;
- if ((blk_size < 0) || (!sfp))
+ if (blk_size < 0)
return -EFAULT;
if (0 == blk_size)
++blk_size; /* don't know why */
diff --git a/drivers/scsi/sni_53c710.c b/drivers/scsi/sni_53c710.c
index a7dfb65fb84..0a6b45b1b00 100644
--- a/drivers/scsi/sni_53c710.c
+++ b/drivers/scsi/sni_53c710.c
@@ -84,7 +84,7 @@ static int __init snirm710_probe(struct platform_device *dev)
hostdata->dev = &dev->dev;
dma_set_mask(&dev->dev, DMA_32BIT_MASK);
- hostdata->base = ioremap_nocache(CPHYSADDR(base), 0x100);
+ hostdata->base = ioremap_nocache(base, 0x100);
hostdata->differential = 0;
hostdata->clock = SNIRM710_CLOCK;
@@ -141,13 +141,7 @@ static struct platform_driver snirm710_driver = {
static int __init snirm710_init(void)
{
- int err;
-
- if ((err = platform_driver_register(&snirm710_driver))) {
- printk(KERN_ERR "Driver registration failed\n");
- return err;
- }
- return 0;
+ return platform_driver_register(&snirm710_driver);
}
static void __exit snirm710_exit(void)
diff --git a/drivers/scsi/sr.c b/drivers/scsi/sr.c
index f9a52af7f5b..5143c899084 100644
--- a/drivers/scsi/sr.c
+++ b/drivers/scsi/sr.c
@@ -885,7 +885,11 @@ static int __init init_sr(void)
rc = register_blkdev(SCSI_CDROM_MAJOR, "sr");
if (rc)
return rc;
- return scsi_register_driver(&sr_template.gendrv);
+ rc = scsi_register_driver(&sr_template.gendrv);
+ if (rc)
+ unregister_blkdev(SCSI_CDROM_MAJOR, "sr");
+
+ return rc;
}
static void __exit exit_sr(void)
diff --git a/drivers/scsi/stex.c b/drivers/scsi/stex.c
index 9ac83abc402..72f6d801535 100644
--- a/drivers/scsi/stex.c
+++ b/drivers/scsi/stex.c
@@ -395,53 +395,34 @@ static struct req_msg *stex_alloc_req(struct st_hba *hba)
static int stex_map_sg(struct st_hba *hba,
struct req_msg *req, struct st_ccb *ccb)
{
- struct pci_dev *pdev = hba->pdev;
struct scsi_cmnd *cmd;
- dma_addr_t dma_handle;
- struct scatterlist *src;
+ struct scatterlist *sg;
struct st_sgtable *dst;
- int i;
+ int i, nseg;
cmd = ccb->cmd;
dst = (struct st_sgtable *)req->variable;
dst->max_sg_count = cpu_to_le16(ST_MAX_SG);
- dst->sz_in_byte = cpu_to_le32(cmd->request_bufflen);
-
- if (cmd->use_sg) {
- int n_elem;
+ dst->sz_in_byte = cpu_to_le32(scsi_bufflen(cmd));
- src = (struct scatterlist *) cmd->request_buffer;
- n_elem = pci_map_sg(pdev, src,
- cmd->use_sg, cmd->sc_data_direction);
- if (n_elem <= 0)
- return -EIO;
+ nseg = scsi_dma_map(cmd);
+ if (nseg < 0)
+ return -EIO;
+ if (nseg) {
+ ccb->sg_count = nseg;
+ dst->sg_count = cpu_to_le16((u16)nseg);
- ccb->sg_count = n_elem;
- dst->sg_count = cpu_to_le16((u16)n_elem);
-
- for (i = 0; i < n_elem; i++, src++) {
- dst->table[i].count = cpu_to_le32((u32)sg_dma_len(src));
+ scsi_for_each_sg(cmd, sg, nseg, i) {
+ dst->table[i].count = cpu_to_le32((u32)sg_dma_len(sg));
dst->table[i].addr =
- cpu_to_le32(sg_dma_address(src) & 0xffffffff);
+ cpu_to_le32(sg_dma_address(sg) & 0xffffffff);
dst->table[i].addr_hi =
- cpu_to_le32((sg_dma_address(src) >> 16) >> 16);
+ cpu_to_le32((sg_dma_address(sg) >> 16) >> 16);
dst->table[i].ctrl = SG_CF_64B | SG_CF_HOST;
}
dst->table[--i].ctrl |= SG_CF_EOT;
- return 0;
}
- dma_handle = pci_map_single(pdev, cmd->request_buffer,
- cmd->request_bufflen, cmd->sc_data_direction);
- cmd->SCp.dma_handle = dma_handle;
-
- ccb->sg_count = 1;
- dst->sg_count = cpu_to_le16(1);
- dst->table[0].addr = cpu_to_le32(dma_handle & 0xffffffff);
- dst->table[0].addr_hi = cpu_to_le32((dma_handle >> 16) >> 16);
- dst->table[0].count = cpu_to_le32((u32)cmd->request_bufflen);
- dst->table[0].ctrl = SG_CF_EOT | SG_CF_64B | SG_CF_HOST;
-
return 0;
}
@@ -451,24 +432,24 @@ static void stex_internal_copy(struct scsi_cmnd *cmd,
size_t lcount;
size_t len;
void *s, *d, *base = NULL;
- if (*count > cmd->request_bufflen)
- *count = cmd->request_bufflen;
+ size_t offset;
+
+ if (*count > scsi_bufflen(cmd))
+ *count = scsi_bufflen(cmd);
lcount = *count;
while (lcount) {
len = lcount;
s = (void *)src;
- if (cmd->use_sg) {
- size_t offset = *count - lcount;
- s += offset;
- base = scsi_kmap_atomic_sg(cmd->request_buffer,
- sg_count, &offset, &len);
- if (base == NULL) {
- *count -= lcount;
- return;
- }
- d = base + offset;
- } else
- d = cmd->request_buffer;
+
+ offset = *count - lcount;
+ s += offset;
+ base = scsi_kmap_atomic_sg(scsi_sglist(cmd),
+ sg_count, &offset, &len);
+ if (!base) {
+ *count -= lcount;
+ return;
+ }
+ d = base + offset;
if (direction == ST_TO_CMD)
memcpy(d, s, len);
@@ -476,30 +457,24 @@ static void stex_internal_copy(struct scsi_cmnd *cmd,
memcpy(s, d, len);
lcount -= len;
- if (cmd->use_sg)
- scsi_kunmap_atomic_sg(base);
+ scsi_kunmap_atomic_sg(base);
}
}
static int stex_direct_copy(struct scsi_cmnd *cmd,
const void *src, size_t count)
{
- struct st_hba *hba = (struct st_hba *) &cmd->device->host->hostdata[0];
size_t cp_len = count;
int n_elem = 0;
- if (cmd->use_sg) {
- n_elem = pci_map_sg(hba->pdev, cmd->request_buffer,
- cmd->use_sg, cmd->sc_data_direction);
- if (n_elem <= 0)
- return 0;
- }
+ n_elem = scsi_dma_map(cmd);
+ if (n_elem < 0)
+ return 0;
stex_internal_copy(cmd, src, &cp_len, n_elem, ST_TO_CMD);
- if (cmd->use_sg)
- pci_unmap_sg(hba->pdev, cmd->request_buffer,
- cmd->use_sg, cmd->sc_data_direction);
+ scsi_dma_unmap(cmd);
+
return cp_len == count;
}
@@ -678,18 +653,6 @@ stex_queuecommand(struct scsi_cmnd *cmd, void (* done)(struct scsi_cmnd *))
return 0;
}
-static void stex_unmap_sg(struct st_hba *hba, struct scsi_cmnd *cmd)
-{
- if (cmd->sc_data_direction != DMA_NONE) {
- if (cmd->use_sg)
- pci_unmap_sg(hba->pdev, cmd->request_buffer,
- cmd->use_sg, cmd->sc_data_direction);
- else
- pci_unmap_single(hba->pdev, cmd->SCp.dma_handle,
- cmd->request_bufflen, cmd->sc_data_direction);
- }
-}
-
static void stex_scsi_done(struct st_ccb *ccb)
{
struct scsi_cmnd *cmd = ccb->cmd;
@@ -756,8 +719,8 @@ static void stex_ys_commands(struct st_hba *hba,
if (ccb->cmd->cmnd[0] == MGT_CMD &&
resp->scsi_status != SAM_STAT_CHECK_CONDITION) {
- ccb->cmd->request_bufflen =
- le32_to_cpu(*(__le32 *)&resp->variable[0]);
+ scsi_set_resid(ccb->cmd, scsi_bufflen(ccb->cmd) -
+ le32_to_cpu(*(__le32 *)&resp->variable[0]));
return;
}
@@ -855,7 +818,7 @@ static void stex_mu_intr(struct st_hba *hba, u32 doorbell)
ccb->cmd->cmnd[1] == PASSTHRU_GET_ADAPTER))
stex_controller_info(hba, ccb);
- stex_unmap_sg(hba, ccb->cmd);
+ scsi_dma_unmap(ccb->cmd);
stex_scsi_done(ccb);
hba->out_req_cnt--;
} else if (ccb->req_type & PASSTHRU_REQ_TYPE) {
@@ -1028,7 +991,7 @@ static int stex_abort(struct scsi_cmnd *cmd)
}
fail_out:
- stex_unmap_sg(hba, cmd);
+ scsi_dma_unmap(cmd);
hba->wait_ccb->req = NULL; /* nullify the req's future return */
hba->wait_ccb = NULL;
result = FAILED;
diff --git a/drivers/scsi/sun_esp.c b/drivers/scsi/sun_esp.c
index bbeb2451d32..2c87db98cdf 100644
--- a/drivers/scsi/sun_esp.c
+++ b/drivers/scsi/sun_esp.c
@@ -493,7 +493,7 @@ static int __devinit esp_sbus_probe_one(struct device *dev,
goto fail;
host->max_id = (hme ? 16 : 8);
- esp = host_to_esp(host);
+ esp = shost_priv(host);
esp->host = host;
esp->dev = esp_dev;
diff --git a/drivers/scsi/sym53c416.c b/drivers/scsi/sym53c416.c
index 2ca950582bc..92bfaeafe30 100644
--- a/drivers/scsi/sym53c416.c
+++ b/drivers/scsi/sym53c416.c
@@ -332,8 +332,7 @@ static irqreturn_t sym53c416_intr_handle(int irq, void *dev_id)
int i;
unsigned long flags = 0;
unsigned char status_reg, pio_int_reg, int_reg;
- struct scatterlist *sglist;
- unsigned int sgcount;
+ struct scatterlist *sg;
unsigned int tot_trans = 0;
/* We search the base address of the host adapter which caused the interrupt */
@@ -429,19 +428,15 @@ static irqreturn_t sym53c416_intr_handle(int irq, void *dev_id)
{
current_command->SCp.phase = data_out;
outb(FLUSH_FIFO, base + COMMAND_REG);
- sym53c416_set_transfer_counter(base, current_command->request_bufflen);
+ sym53c416_set_transfer_counter(base,
+ scsi_bufflen(current_command));
outb(TRANSFER_INFORMATION | PIO_MODE, base + COMMAND_REG);
- if(!current_command->use_sg)
- tot_trans = sym53c416_write(base, current_command->request_buffer, current_command->request_bufflen);
- else
- {
- sgcount = current_command->use_sg;
- sglist = current_command->request_buffer;
- while(sgcount--)
- {
- tot_trans += sym53c416_write(base, SG_ADDRESS(sglist), sglist->length);
- sglist++;
- }
+
+ scsi_for_each_sg(current_command,
+ sg, scsi_sg_count(current_command), i) {
+ tot_trans += sym53c416_write(base,
+ SG_ADDRESS(sg),
+ sg->length);
}
if(tot_trans < current_command->underflow)
printk(KERN_WARNING "sym53c416: Underflow, wrote %d bytes, request for %d bytes.\n", tot_trans, current_command->underflow);
@@ -455,19 +450,16 @@ static irqreturn_t sym53c416_intr_handle(int irq, void *dev_id)
{
current_command->SCp.phase = data_in;
outb(FLUSH_FIFO, base + COMMAND_REG);
- sym53c416_set_transfer_counter(base, current_command->request_bufflen);
+ sym53c416_set_transfer_counter(base,
+ scsi_bufflen(current_command));
+
outb(TRANSFER_INFORMATION | PIO_MODE, base + COMMAND_REG);
- if(!current_command->use_sg)
- tot_trans = sym53c416_read(base, current_command->request_buffer, current_command->request_bufflen);
- else
- {
- sgcount = current_command->use_sg;
- sglist = current_command->request_buffer;
- while(sgcount--)
- {
- tot_trans += sym53c416_read(base, SG_ADDRESS(sglist), sglist->length);
- sglist++;
- }
+
+ scsi_for_each_sg(current_command,
+ sg, scsi_sg_count(current_command), i) {
+ tot_trans += sym53c416_read(base,
+ SG_ADDRESS(sg),
+ sg->length);
}
if(tot_trans < current_command->underflow)
printk(KERN_WARNING "sym53c416: Underflow, read %d bytes, request for %d bytes.\n", tot_trans, current_command->underflow);
diff --git a/drivers/scsi/sym53c8xx_2/sym_glue.c b/drivers/scsi/sym53c8xx_2/sym_glue.c
index 4d78c7e87cc..15a51459c81 100644
--- a/drivers/scsi/sym53c8xx_2/sym_glue.c
+++ b/drivers/scsi/sym53c8xx_2/sym_glue.c
@@ -146,41 +146,17 @@ struct sym_ucmd { /* Override the SCSI pointer structure */
static void __unmap_scsi_data(struct pci_dev *pdev, struct scsi_cmnd *cmd)
{
- int dma_dir = cmd->sc_data_direction;
+ if (SYM_UCMD_PTR(cmd)->data_mapped)
+ scsi_dma_unmap(cmd);
- switch(SYM_UCMD_PTR(cmd)->data_mapped) {
- case 2:
- pci_unmap_sg(pdev, cmd->request_buffer, cmd->use_sg, dma_dir);
- break;
- case 1:
- pci_unmap_single(pdev, SYM_UCMD_PTR(cmd)->data_mapping,
- cmd->request_bufflen, dma_dir);
- break;
- }
SYM_UCMD_PTR(cmd)->data_mapped = 0;
}
-static dma_addr_t __map_scsi_single_data(struct pci_dev *pdev, struct scsi_cmnd *cmd)
-{
- dma_addr_t mapping;
- int dma_dir = cmd->sc_data_direction;
-
- mapping = pci_map_single(pdev, cmd->request_buffer,
- cmd->request_bufflen, dma_dir);
- if (mapping) {
- SYM_UCMD_PTR(cmd)->data_mapped = 1;
- SYM_UCMD_PTR(cmd)->data_mapping = mapping;
- }
-
- return mapping;
-}
-
static int __map_scsi_sg_data(struct pci_dev *pdev, struct scsi_cmnd *cmd)
{
int use_sg;
- int dma_dir = cmd->sc_data_direction;
- use_sg = pci_map_sg(pdev, cmd->request_buffer, cmd->use_sg, dma_dir);
+ use_sg = scsi_dma_map(cmd);
if (use_sg > 0) {
SYM_UCMD_PTR(cmd)->data_mapped = 2;
SYM_UCMD_PTR(cmd)->data_mapping = use_sg;
@@ -191,8 +167,6 @@ static int __map_scsi_sg_data(struct pci_dev *pdev, struct scsi_cmnd *cmd)
#define unmap_scsi_data(np, cmd) \
__unmap_scsi_data(np->s.device, cmd)
-#define map_scsi_single_data(np, cmd) \
- __map_scsi_single_data(np->s.device, cmd)
#define map_scsi_sg_data(np, cmd) \
__map_scsi_sg_data(np->s.device, cmd)
/*
@@ -322,55 +296,20 @@ void sym_set_cam_result_error(struct sym_hcb *np, struct sym_ccb *cp, int resid)
*/
cam_status = sym_xerr_cam_status(DID_ERROR, cp->xerr_status);
}
- cmd->resid = resid;
+ scsi_set_resid(cmd, resid);
cmd->result = (drv_status << 24) + (cam_status << 16) + scsi_status;
}
-
-/*
- * Build the scatter/gather array for an I/O.
- */
-
-static int sym_scatter_no_sglist(struct sym_hcb *np, struct sym_ccb *cp, struct scsi_cmnd *cmd)
-{
- struct sym_tblmove *data = &cp->phys.data[SYM_CONF_MAX_SG-1];
- int segment;
- unsigned int len = cmd->request_bufflen;
-
- if (len) {
- dma_addr_t baddr = map_scsi_single_data(np, cmd);
- if (baddr) {
- if (len & 1) {
- struct sym_tcb *tp = &np->target[cp->target];
- if (tp->head.wval & EWS) {
- len++;
- cp->odd_byte_adjustment++;
- }
- }
- cp->data_len = len;
- sym_build_sge(np, data, baddr, len);
- segment = 1;
- } else {
- segment = -2;
- }
- } else {
- segment = 0;
- }
-
- return segment;
-}
-
static int sym_scatter(struct sym_hcb *np, struct sym_ccb *cp, struct scsi_cmnd *cmd)
{
int segment;
- int use_sg = (int) cmd->use_sg;
+ int use_sg;
cp->data_len = 0;
- if (!use_sg)
- segment = sym_scatter_no_sglist(np, cp, cmd);
- else if ((use_sg = map_scsi_sg_data(np, cmd)) > 0) {
- struct scatterlist *scatter = (struct scatterlist *)cmd->request_buffer;
+ use_sg = map_scsi_sg_data(np, cmd);
+ if (use_sg > 0) {
+ struct scatterlist *sg;
struct sym_tcb *tp = &np->target[cp->target];
struct sym_tblmove *data;
@@ -381,9 +320,9 @@ static int sym_scatter(struct sym_hcb *np, struct sym_ccb *cp, struct scsi_cmnd
data = &cp->phys.data[SYM_CONF_MAX_SG - use_sg];
- for (segment = 0; segment < use_sg; segment++) {
- dma_addr_t baddr = sg_dma_address(&scatter[segment]);
- unsigned int len = sg_dma_len(&scatter[segment]);
+ scsi_for_each_sg(cmd, sg, use_sg, segment) {
+ dma_addr_t baddr = sg_dma_address(sg);
+ unsigned int len = sg_dma_len(sg);
if ((len & 1) && (tp->head.wval & EWS)) {
len++;
diff --git a/drivers/scsi/sym53c8xx_2/sym_glue.h b/drivers/scsi/sym53c8xx_2/sym_glue.h
index e022d3c71b5..0f097ba4f71 100644
--- a/drivers/scsi/sym53c8xx_2/sym_glue.h
+++ b/drivers/scsi/sym53c8xx_2/sym_glue.h
@@ -255,7 +255,7 @@ sym_get_cam_status(struct scsi_cmnd *cmd)
*/
static __inline void sym_set_cam_result_ok(struct sym_ccb *cp, struct scsi_cmnd *cmd, int resid)
{
- cmd->resid = resid;
+ scsi_set_resid(cmd, resid);
cmd->result = (((DID_OK) << 16) + ((cp->ssss_status) & 0x7f));
}
void sym_set_cam_result_error(struct sym_hcb *np, struct sym_ccb *cp, int resid);
diff --git a/drivers/scsi/tmscsim.c b/drivers/scsi/tmscsim.c
index e7b85e832eb..14cba1ca38b 100644
--- a/drivers/scsi/tmscsim.c
+++ b/drivers/scsi/tmscsim.c
@@ -457,28 +457,21 @@ static int dc390_pci_map (struct dc390_srb* pSRB)
error = 1;
DEBUG1(printk("%s(): Mapped sense buffer %p at %x\n", __FUNCTION__, pcmd->sense_buffer, cmdp->saved_dma_handle));
/* Map SG list */
- } else if (pcmd->use_sg) {
- pSRB->pSegmentList = (struct scatterlist *) pcmd->request_buffer;
- pSRB->SGcount = pci_map_sg(pdev, pSRB->pSegmentList, pcmd->use_sg,
- pcmd->sc_data_direction);
+ } else if (scsi_sg_count(pcmd)) {
+ int nseg;
+
+ nseg = scsi_dma_map(pcmd);
+
+ pSRB->pSegmentList = scsi_sglist(pcmd);
+ pSRB->SGcount = nseg;
+
/* TODO: error handling */
- if (!pSRB->SGcount)
+ if (nseg < 0)
error = 1;
DEBUG1(printk("%s(): Mapped SG %p with %d (%d) elements\n",\
- __FUNCTION__, pcmd->request_buffer, pSRB->SGcount, pcmd->use_sg));
+ __FUNCTION__, scsi_sglist(pcmd), nseg, scsi_sg_count(pcmd)));
/* Map single segment */
- } else if (pcmd->request_buffer && pcmd->request_bufflen) {
- pSRB->pSegmentList = dc390_sg_build_single(&pSRB->Segmentx, pcmd->request_buffer, pcmd->request_bufflen);
- pSRB->SGcount = pci_map_sg(pdev, pSRB->pSegmentList, 1,
- pcmd->sc_data_direction);
- cmdp->saved_dma_handle = sg_dma_address(pSRB->pSegmentList);
-
- /* TODO: error handling */
- if (pSRB->SGcount != 1)
- error = 1;
- DEBUG1(printk("%s(): Mapped request buffer %p at %x\n", __FUNCTION__, pcmd->request_buffer, cmdp->saved_dma_handle));
- /* No mapping !? */
- } else
+ } else
pSRB->SGcount = 0;
return error;
@@ -494,12 +487,10 @@ static void dc390_pci_unmap (struct dc390_srb* pSRB)
if (pSRB->SRBFlag) {
pci_unmap_sg(pdev, &pSRB->Segmentx, 1, DMA_FROM_DEVICE);
DEBUG1(printk("%s(): Unmapped sense buffer at %x\n", __FUNCTION__, cmdp->saved_dma_handle));
- } else if (pcmd->use_sg) {
- pci_unmap_sg(pdev, pcmd->request_buffer, pcmd->use_sg, pcmd->sc_data_direction);
- DEBUG1(printk("%s(): Unmapped SG at %p with %d elements\n", __FUNCTION__, pcmd->request_buffer, pcmd->use_sg));
- } else if (pcmd->request_buffer && pcmd->request_bufflen) {
- pci_unmap_sg(pdev, &pSRB->Segmentx, 1, pcmd->sc_data_direction);
- DEBUG1(printk("%s(): Unmapped request buffer at %x\n", __FUNCTION__, cmdp->saved_dma_handle));
+ } else {
+ scsi_dma_unmap(pcmd);
+ DEBUG1(printk("%s(): Unmapped SG at %p with %d elements\n",
+ __FUNCTION__, scsi_sglist(pcmd), scsi_sg_count(pcmd)));
}
}
@@ -1153,9 +1144,9 @@ dc390_restore_ptr (struct dc390_acb* pACB, struct dc390_srb* pSRB)
struct scatterlist *psgl;
pSRB->TotalXferredLen = 0;
pSRB->SGIndex = 0;
- if (pcmd->use_sg) {
+ if (scsi_sg_count(pcmd)) {
size_t saved;
- pSRB->pSegmentList = (struct scatterlist *)pcmd->request_buffer;
+ pSRB->pSegmentList = scsi_sglist(pcmd);
psgl = pSRB->pSegmentList;
//dc390_pci_sync(pSRB);
@@ -1179,12 +1170,6 @@ dc390_restore_ptr (struct dc390_acb* pACB, struct dc390_srb* pSRB)
printk (KERN_INFO "DC390: Pointer restored. Segment %i, Total %li, Bus %08lx\n",
pSRB->SGIndex, pSRB->Saved_Ptr, pSRB->SGBusAddr);
- } else if(pcmd->request_buffer) {
- //dc390_pci_sync(pSRB);
-
- sg_dma_len(&pSRB->Segmentx) = pcmd->request_bufflen - pSRB->Saved_Ptr;
- pSRB->SGcount = 1;
- pSRB->pSegmentList = (struct scatterlist *) &pSRB->Segmentx;
} else {
pSRB->SGcount = 0;
printk (KERN_INFO "DC390: RESTORE_PTR message for Transfer without Scatter-Gather ??\n");
@@ -1579,7 +1564,8 @@ dc390_Disconnect( struct dc390_acb* pACB )
if( (pSRB->SRBState & (SRB_START_+SRB_MSGOUT)) ||
!(pSRB->SRBState & (SRB_DISCONNECT+SRB_COMPLETED)) )
{ /* Selection time out */
- pSRB->TargetStatus = SCSI_STAT_SEL_TIMEOUT;
+ pSRB->AdaptStatus = H_SEL_TIMEOUT;
+ pSRB->TargetStatus = 0;
goto disc1;
}
else if (!(pSRB->SRBState & SRB_DISCONNECT) && (pSRB->SRBState & SRB_COMPLETED))
@@ -1612,7 +1598,7 @@ dc390_Reselect( struct dc390_acb* pACB )
if( !( pACB->scan_devices ) )
{
struct scsi_cmnd *pcmd = pSRB->pcmd;
- pcmd->resid = pcmd->request_bufflen;
+ scsi_set_resid(pcmd, scsi_bufflen(pcmd));
SET_RES_DID(pcmd->result, DID_SOFT_ERROR);
dc390_Going_remove(pDCB, pSRB);
dc390_Free_insert(pACB, pSRB);
@@ -1695,7 +1681,6 @@ dc390_RequestSense(struct dc390_acb* pACB, struct dc390_dcb* pDCB, struct dc390_
pcmd->cmnd[0], pDCB->TargetID, pDCB->TargetLUN));
pSRB->SRBFlag |= AUTO_REQSENSE;
- pSRB->SavedSGCount = pcmd->use_sg;
pSRB->SavedTotXLen = pSRB->TotalXferredLen;
pSRB->AdaptStatus = 0;
pSRB->TargetStatus = 0; /* CHECK_CONDITION<<1; */
@@ -1728,22 +1713,21 @@ dc390_SRBdone( struct dc390_acb* pACB, struct dc390_dcb* pDCB, struct dc390_srb*
{ /* Last command was a Request Sense */
pSRB->SRBFlag &= ~AUTO_REQSENSE;
pSRB->AdaptStatus = 0;
- pSRB->TargetStatus = CHECK_CONDITION << 1;
+ pSRB->TargetStatus = SAM_STAT_CHECK_CONDITION;
//pcmd->result = MK_RES(DRIVER_SENSE,DID_OK,0,status);
- if (status == (CHECK_CONDITION << 1))
+ if (status == SAM_STAT_CHECK_CONDITION)
pcmd->result = MK_RES_LNX(0, DID_BAD_TARGET, 0, /*CHECK_CONDITION*/0);
else /* Retry */
{
if( pSRB->pcmd->cmnd[0] == TEST_UNIT_READY /* || pSRB->pcmd->cmnd[0] == START_STOP */)
{
/* Don't retry on TEST_UNIT_READY */
- pcmd->result = MK_RES_LNX(DRIVER_SENSE,DID_OK,0,CHECK_CONDITION);
+ pcmd->result = MK_RES_LNX(DRIVER_SENSE, DID_OK, 0, SAM_STAT_CHECK_CONDITION);
REMOVABLEDEBUG(printk(KERN_INFO "Cmd=%02x, Result=%08x, XferL=%08x\n",pSRB->pcmd->cmnd[0],\
(u32) pcmd->result, (u32) pSRB->TotalXferredLen));
} else {
SET_RES_DRV(pcmd->result, DRIVER_SENSE);
- pcmd->use_sg = pSRB->SavedSGCount;
//pSRB->ScsiCmdLen = (u8) (pSRB->Segment1[0] >> 8);
DEBUG0 (printk ("DC390: RETRY pid %li (%02x), target %02i-%02i\n", pcmd->pid, pcmd->cmnd[0], pcmd->device->id, pcmd->device->lun));
pSRB->TotalXferredLen = 0;
@@ -1754,7 +1738,7 @@ dc390_SRBdone( struct dc390_acb* pACB, struct dc390_dcb* pDCB, struct dc390_srb*
}
if( status )
{
- if( status_byte(status) == CHECK_CONDITION )
+ if (status == SAM_STAT_CHECK_CONDITION)
{
if (dc390_RequestSense(pACB, pDCB, pSRB)) {
SET_RES_DID(pcmd->result, DID_ERROR);
@@ -1762,22 +1746,14 @@ dc390_SRBdone( struct dc390_acb* pACB, struct dc390_dcb* pDCB, struct dc390_srb*
}
return;
}
- else if( status_byte(status) == QUEUE_FULL )
+ else if (status == SAM_STAT_TASK_SET_FULL)
{
scsi_track_queue_full(pcmd->device, pDCB->GoingSRBCnt - 1);
- pcmd->use_sg = pSRB->SavedSGCount;
DEBUG0 (printk ("DC390: RETRY pid %li (%02x), target %02i-%02i\n", pcmd->pid, pcmd->cmnd[0], pcmd->device->id, pcmd->device->lun));
pSRB->TotalXferredLen = 0;
SET_RES_DID(pcmd->result, DID_SOFT_ERROR);
}
- else if(status == SCSI_STAT_SEL_TIMEOUT)
- {
- pSRB->AdaptStatus = H_SEL_TIMEOUT;
- pSRB->TargetStatus = 0;
- pcmd->result = MK_RES(0,DID_NO_CONNECT,0,0);
- /* Devices are removed below ... */
- }
- else if (status_byte(status) == BUSY &&
+ else if (status == SAM_STAT_BUSY &&
(pcmd->cmnd[0] == TEST_UNIT_READY || pcmd->cmnd[0] == INQUIRY) &&
pACB->scan_devices)
{
@@ -1795,12 +1771,17 @@ dc390_SRBdone( struct dc390_acb* pACB, struct dc390_dcb* pDCB, struct dc390_srb*
else
{ /* Target status == 0 */
status = pSRB->AdaptStatus;
- if(status & H_OVER_UNDER_RUN)
+ if (status == H_OVER_UNDER_RUN)
{
pSRB->TargetStatus = 0;
SET_RES_DID(pcmd->result,DID_OK);
SET_RES_MSG(pcmd->result,pSRB->EndMessage);
}
+ else if (status == H_SEL_TIMEOUT)
+ {
+ pcmd->result = MK_RES(0, DID_NO_CONNECT, 0, 0);
+ /* Devices are removed below ... */
+ }
else if( pSRB->SRBStatus & PARITY_ERROR)
{
//pcmd->result = MK_RES(0,DID_PARITY,pSRB->EndMessage,0);
@@ -1816,7 +1797,7 @@ dc390_SRBdone( struct dc390_acb* pACB, struct dc390_dcb* pDCB, struct dc390_srb*
}
cmd_done:
- pcmd->resid = pcmd->request_bufflen - pSRB->TotalXferredLen;
+ scsi_set_resid(pcmd, scsi_bufflen(pcmd) - pSRB->TotalXferredLen);
dc390_Going_remove (pDCB, pSRB);
/* Add to free list */
diff --git a/drivers/scsi/tmscsim.h b/drivers/scsi/tmscsim.h
index c3d8c80cfb3..77adc54dbd1 100644
--- a/drivers/scsi/tmscsim.h
+++ b/drivers/scsi/tmscsim.h
@@ -57,7 +57,6 @@ u8 SGcount;
u8 MsgCnt;
u8 EndMessage;
-u8 SavedSGCount;
u8 MsgInBuf[6];
u8 MsgOutBuf[6];
@@ -258,13 +257,6 @@ struct dc390_srb SRB_array[MAX_SRB_CNT]; /* 50 SRBs */
#define H_BAD_CCB_OR_SG 0x1A
#define H_ABORT 0x0FF
-/*; SCSI Status byte codes*/
-/* The values defined in include/scsi/scsi.h, to be shifted << 1 */
-
-#define SCSI_STAT_UNEXP_BUS_F 0xFD /*; Unexpect Bus Free */
-#define SCSI_STAT_BUS_RST_DETECT 0xFE /*; Scsi Bus Reset detected */
-#define SCSI_STAT_SEL_TIMEOUT 0xFF /*; Selection Time out */
-
/* cmd->result */
#define RES_TARGET 0x000000FF /* Target State */
#define RES_TARGET_LNX STATUS_MASK /* Only official ... */
@@ -273,7 +265,7 @@ struct dc390_srb SRB_array[MAX_SRB_CNT]; /* 50 SRBs */
#define RES_DRV 0xFF000000 /* DRIVER_ codes */
#define MK_RES(drv,did,msg,tgt) ((int)(drv)<<24 | (int)(did)<<16 | (int)(msg)<<8 | (int)(tgt))
-#define MK_RES_LNX(drv,did,msg,tgt) ((int)(drv)<<24 | (int)(did)<<16 | (int)(msg)<<8 | (int)(tgt)<<1)
+#define MK_RES_LNX(drv,did,msg,tgt) ((int)(drv)<<24 | (int)(did)<<16 | (int)(msg)<<8 | (int)(tgt))
#define SET_RES_TARGET(who, tgt) do { who &= ~RES_TARGET; who |= (int)(tgt); } while (0)
#define SET_RES_TARGET_LNX(who, tgt) do { who &= ~RES_TARGET_LNX; who |= (int)(tgt) << 1; } while (0)
diff --git a/drivers/scsi/u14-34f.c b/drivers/scsi/u14-34f.c
index 3de08a15de4..9e8232a1f16 100644
--- a/drivers/scsi/u14-34f.c
+++ b/drivers/scsi/u14-34f.c
@@ -1111,7 +1111,7 @@ static int u14_34f_detect(struct scsi_host_template *tpnt) {
static void map_dma(unsigned int i, unsigned int j) {
unsigned int data_len = 0;
unsigned int k, count, pci_dir;
- struct scatterlist *sgpnt;
+ struct scatterlist *sg;
struct mscp *cpp;
struct scsi_cmnd *SCpnt;
@@ -1124,33 +1124,28 @@ static void map_dma(unsigned int i, unsigned int j) {
cpp->sense_len = sizeof SCpnt->sense_buffer;
- if (!SCpnt->use_sg) {
-
- /* If we get here with PCI_DMA_NONE, pci_map_single triggers a BUG() */
- if (!SCpnt->request_bufflen) pci_dir = PCI_DMA_BIDIRECTIONAL;
-
- if (SCpnt->request_buffer)
- cpp->data_address = H2DEV(pci_map_single(HD(j)->pdev,
- SCpnt->request_buffer, SCpnt->request_bufflen, pci_dir));
-
- cpp->data_len = H2DEV(SCpnt->request_bufflen);
- return;
- }
-
- sgpnt = (struct scatterlist *) SCpnt->request_buffer;
- count = pci_map_sg(HD(j)->pdev, sgpnt, SCpnt->use_sg, pci_dir);
-
- for (k = 0; k < count; k++) {
- cpp->sglist[k].address = H2DEV(sg_dma_address(&sgpnt[k]));
- cpp->sglist[k].num_bytes = H2DEV(sg_dma_len(&sgpnt[k]));
- data_len += sgpnt[k].length;
- }
-
- cpp->sg = TRUE;
- cpp->use_sg = SCpnt->use_sg;
- cpp->data_address = H2DEV(pci_map_single(HD(j)->pdev, cpp->sglist,
- SCpnt->use_sg * sizeof(struct sg_list), pci_dir));
- cpp->data_len = H2DEV(data_len);
+ if (scsi_bufflen(SCpnt)) {
+ count = scsi_dma_map(SCpnt);
+ BUG_ON(count < 0);
+
+ scsi_for_each_sg(SCpnt, sg, count, k) {
+ cpp->sglist[k].address = H2DEV(sg_dma_address(sg));
+ cpp->sglist[k].num_bytes = H2DEV(sg_dma_len(sg));
+ data_len += sg->length;
+ }
+
+ cpp->sg = TRUE;
+ cpp->use_sg = scsi_sg_count(SCpnt);
+ cpp->data_address =
+ H2DEV(pci_map_single(HD(j)->pdev, cpp->sglist,
+ cpp->use_sg * sizeof(struct sg_list),
+ pci_dir));
+ cpp->data_len = H2DEV(data_len);
+
+ } else {
+ pci_dir = PCI_DMA_BIDIRECTIONAL;
+ cpp->data_len = H2DEV(scsi_bufflen(SCpnt));
+ }
}
static void unmap_dma(unsigned int i, unsigned int j) {
@@ -1165,8 +1160,7 @@ static void unmap_dma(unsigned int i, unsigned int j) {
pci_unmap_single(HD(j)->pdev, DEV2H(cpp->sense_addr),
DEV2H(cpp->sense_len), PCI_DMA_FROMDEVICE);
- if (SCpnt->use_sg)
- pci_unmap_sg(HD(j)->pdev, SCpnt->request_buffer, SCpnt->use_sg, pci_dir);
+ scsi_dma_unmap(SCpnt);
if (!DEV2H(cpp->data_len)) pci_dir = PCI_DMA_BIDIRECTIONAL;
@@ -1187,9 +1181,9 @@ static void sync_dma(unsigned int i, unsigned int j) {
pci_dma_sync_single_for_cpu(HD(j)->pdev, DEV2H(cpp->sense_addr),
DEV2H(cpp->sense_len), PCI_DMA_FROMDEVICE);
- if (SCpnt->use_sg)
- pci_dma_sync_sg_for_cpu(HD(j)->pdev, SCpnt->request_buffer,
- SCpnt->use_sg, pci_dir);
+ if (scsi_sg_count(SCpnt))
+ pci_dma_sync_sg_for_cpu(HD(j)->pdev, scsi_sglist(SCpnt),
+ scsi_sg_count(SCpnt), pci_dir);
if (!DEV2H(cpp->data_len)) pci_dir = PCI_DMA_BIDIRECTIONAL;
diff --git a/drivers/scsi/ultrastor.c b/drivers/scsi/ultrastor.c
index 56906aba5ee..c08235d5afc 100644
--- a/drivers/scsi/ultrastor.c
+++ b/drivers/scsi/ultrastor.c
@@ -675,16 +675,15 @@ static const char *ultrastor_info(struct Scsi_Host * shpnt)
static inline void build_sg_list(struct mscp *mscp, struct scsi_cmnd *SCpnt)
{
- struct scatterlist *sl;
+ struct scatterlist *sg;
long transfer_length = 0;
int i, max;
- sl = (struct scatterlist *) SCpnt->request_buffer;
- max = SCpnt->use_sg;
- for (i = 0; i < max; i++) {
- mscp->sglist[i].address = isa_page_to_bus(sl[i].page) + sl[i].offset;
- mscp->sglist[i].num_bytes = sl[i].length;
- transfer_length += sl[i].length;
+ max = scsi_sg_count(SCpnt);
+ scsi_for_each_sg(SCpnt, sg, max, i) {
+ mscp->sglist[i].address = isa_page_to_bus(sg->page) + sg->offset;
+ mscp->sglist[i].num_bytes = sg->length;
+ transfer_length += sg->length;
}
mscp->number_of_sg_list = max;
mscp->transfer_data = isa_virt_to_bus(mscp->sglist);
@@ -730,15 +729,15 @@ static int ultrastor_queuecommand(struct scsi_cmnd *SCpnt,
my_mscp->target_id = SCpnt->device->id;
my_mscp->ch_no = 0;
my_mscp->lun = SCpnt->device->lun;
- if (SCpnt->use_sg) {
+ if (scsi_sg_count(SCpnt)) {
/* Set scatter/gather flag in SCSI command packet */
my_mscp->sg = TRUE;
build_sg_list(my_mscp, SCpnt);
} else {
/* Unset scatter/gather flag in SCSI command packet */
my_mscp->sg = FALSE;
- my_mscp->transfer_data = isa_virt_to_bus(SCpnt->request_buffer);
- my_mscp->transfer_data_length = SCpnt->request_bufflen;
+ my_mscp->transfer_data = isa_virt_to_bus(scsi_sglist(SCpnt));
+ my_mscp->transfer_data_length = scsi_bufflen(SCpnt);
}
my_mscp->command_link = 0; /*???*/
my_mscp->scsi_command_link_id = 0; /*???*/
diff --git a/drivers/scsi/wd7000.c b/drivers/scsi/wd7000.c
index 30be76514c4..d6fd4259c56 100644
--- a/drivers/scsi/wd7000.c
+++ b/drivers/scsi/wd7000.c
@@ -1091,6 +1091,7 @@ static int wd7000_queuecommand(struct scsi_cmnd *SCpnt,
unchar *cdb = (unchar *) SCpnt->cmnd;
unchar idlun;
short cdblen;
+ int nseg;
Adapter *host = (Adapter *) SCpnt->device->host->hostdata;
cdblen = SCpnt->cmd_len;
@@ -1106,28 +1107,29 @@ static int wd7000_queuecommand(struct scsi_cmnd *SCpnt,
SCpnt->host_scribble = (unchar *) scb;
scb->host = host;
- if (SCpnt->use_sg) {
- struct scatterlist *sg = (struct scatterlist *) SCpnt->request_buffer;
+ nseg = scsi_sg_count(SCpnt);
+ if (nseg) {
+ struct scatterlist *sg;
unsigned i;
if (SCpnt->device->host->sg_tablesize == SG_NONE) {
panic("wd7000_queuecommand: scatter/gather not supported.\n");
}
- dprintk("Using scatter/gather with %d elements.\n", SCpnt->use_sg);
+ dprintk("Using scatter/gather with %d elements.\n", nseg);
sgb = scb->sgb;
scb->op = 1;
any2scsi(scb->dataptr, (int) sgb);
- any2scsi(scb->maxlen, SCpnt->use_sg * sizeof(Sgb));
+ any2scsi(scb->maxlen, nseg * sizeof(Sgb));
- for (i = 0; i < SCpnt->use_sg; i++) {
- any2scsi(sgb[i].ptr, isa_page_to_bus(sg[i].page) + sg[i].offset);
- any2scsi(sgb[i].len, sg[i].length);
+ scsi_for_each_sg(SCpnt, sg, nseg, i) {
+ any2scsi(sgb[i].ptr, isa_page_to_bus(sg->page) + sg->offset);
+ any2scsi(sgb[i].len, sg->length);
}
} else {
scb->op = 0;
- any2scsi(scb->dataptr, isa_virt_to_bus(SCpnt->request_buffer));
- any2scsi(scb->maxlen, SCpnt->request_bufflen);
+ any2scsi(scb->dataptr, isa_virt_to_bus(scsi_sglist(SCpnt)));
+ any2scsi(scb->maxlen, scsi_bufflen(SCpnt));
}
/* FIXME: drop lock and yield here ? */
diff --git a/drivers/scsi/zorro7xx.c b/drivers/scsi/zorro7xx.c
new file mode 100644
index 00000000000..50703877a58
--- /dev/null
+++ b/drivers/scsi/zorro7xx.c
@@ -0,0 +1,180 @@
+/*
+ * Detection routine for the NCR53c710 based Amiga SCSI Controllers for Linux.
+ * Amiga MacroSystemUS WarpEngine SCSI controller.
+ * Amiga Technologies/DKB A4091 SCSI controller.
+ *
+ * Written 1997 by Alan Hourihane <alanh@fairlite.demon.co.uk>
+ * plus modifications of the 53c7xx.c driver to support the Amiga.
+ *
+ * Rewritten to use 53c700.c by Kars de Jong <jongk@linux-m68k.org>
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/zorro.h>
+#include <asm/amigaints.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_transport_spi.h>
+
+#include "53c700.h"
+
+MODULE_AUTHOR("Alan Hourihane <alanh@fairlite.demon.co.uk> / Kars de Jong <jongk@linux-m68k.org>");
+MODULE_DESCRIPTION("Amiga Zorro NCR53C710 driver");
+MODULE_LICENSE("GPL");
+
+
+static struct scsi_host_template zorro7xx_scsi_driver_template = {
+ .proc_name = "zorro7xx",
+ .this_id = 7,
+ .module = THIS_MODULE,
+};
+
+static struct zorro_driver_data {
+ const char *name;
+ unsigned long offset;
+ int absolute; /* offset is absolute address */
+} zorro7xx_driver_data[] __devinitdata = {
+ { .name = "PowerUP 603e+", .offset = 0xf40000, .absolute = 1 },
+ { .name = "WarpEngine 40xx", .offset = 0x40000 },
+ { .name = "A4091", .offset = 0x800000 },
+ { .name = "GForce 040/060", .offset = 0x40000 },
+ { 0 }
+};
+
+static struct zorro_device_id zorro7xx_zorro_tbl[] __devinitdata = {
+ {
+ .id = ZORRO_PROD_PHASE5_BLIZZARD_603E_PLUS,
+ .driver_data = (unsigned long)&zorro7xx_driver_data[0],
+ },
+ {
+ .id = ZORRO_PROD_MACROSYSTEMS_WARP_ENGINE_40xx,
+ .driver_data = (unsigned long)&zorro7xx_driver_data[1],
+ },
+ {
+ .id = ZORRO_PROD_CBM_A4091_1,
+ .driver_data = (unsigned long)&zorro7xx_driver_data[2],
+ },
+ {
+ .id = ZORRO_PROD_CBM_A4091_2,
+ .driver_data = (unsigned long)&zorro7xx_driver_data[2],
+ },
+ {
+ .id = ZORRO_PROD_GVP_GFORCE_040_060,
+ .driver_data = (unsigned long)&zorro7xx_driver_data[3],
+ },
+ { 0 }
+};
+
+static int __devinit zorro7xx_init_one(struct zorro_dev *z,
+ const struct zorro_device_id *ent)
+{
+ struct Scsi_Host * host = NULL;
+ struct NCR_700_Host_Parameters *hostdata;
+ struct zorro_driver_data *zdd;
+ unsigned long board, ioaddr;
+
+ board = zorro_resource_start(z);
+ zdd = (struct zorro_driver_data *)ent->driver_data;
+
+ if (zdd->absolute) {
+ ioaddr = zdd->offset;
+ } else {
+ ioaddr = board + zdd->offset;
+ }
+
+ if (!zorro_request_device(z, zdd->name)) {
+ printk(KERN_ERR "zorro7xx: cannot reserve region 0x%lx, abort\n",
+ board);
+ return -EBUSY;
+ }
+
+ hostdata = kmalloc(sizeof(struct NCR_700_Host_Parameters), GFP_KERNEL);
+ if (hostdata == NULL) {
+ printk(KERN_ERR "zorro7xx: Failed to allocate host data\n");
+ goto out_release;
+ }
+
+ memset(hostdata, 0, sizeof(struct NCR_700_Host_Parameters));
+
+ /* Fill in the required pieces of hostdata */
+ if (ioaddr > 0x01000000)
+ hostdata->base = ioremap(ioaddr, zorro_resource_len(z));
+ else
+ hostdata->base = (void __iomem *)ZTWO_VADDR(ioaddr);
+
+ hostdata->clock = 50;
+ hostdata->chip710 = 1;
+
+ /* Settings for at least WarpEngine 40xx */
+ hostdata->ctest7_extra = CTEST7_TT1;
+
+ zorro7xx_scsi_driver_template.name = zdd->name;
+
+ /* and register the chip */
+ host = NCR_700_detect(&zorro7xx_scsi_driver_template, hostdata,
+ &z->dev);
+ if (!host) {
+ printk(KERN_ERR "zorro7xx: No host detected; "
+ "board configuration problem?\n");
+ goto out_free;
+ }
+
+ host->this_id = 7;
+ host->base = ioaddr;
+ host->irq = IRQ_AMIGA_PORTS;
+
+ if (request_irq(host->irq, NCR_700_intr, IRQF_SHARED, "zorro7xx-scsi",
+ host)) {
+ printk(KERN_ERR "zorro7xx: request_irq failed\n");
+ goto out_put_host;
+ }
+
+ scsi_scan_host(host);
+
+ return 0;
+
+ out_put_host:
+ scsi_host_put(host);
+ out_free:
+ if (ioaddr > 0x01000000)
+ iounmap(hostdata->base);
+ kfree(hostdata);
+ out_release:
+ zorro_release_device(z);
+
+ return -ENODEV;
+}
+
+static __devexit void zorro7xx_remove_one(struct zorro_dev *z)
+{
+ struct Scsi_Host *host = dev_to_shost(&z->dev);
+ struct NCR_700_Host_Parameters *hostdata = shost_priv(host);
+
+ scsi_remove_host(host);
+
+ NCR_700_release(host);
+ kfree(hostdata);
+ free_irq(host->irq, host);
+ zorro_release_device(z);
+}
+
+static struct zorro_driver zorro7xx_driver = {
+ .name = "zorro7xx-scsi",
+ .id_table = zorro7xx_zorro_tbl,
+ .probe = zorro7xx_init_one,
+ .remove = __devexit_p(zorro7xx_remove_one),
+};
+
+static int __init zorro7xx_scsi_init(void)
+{
+ return zorro_register_driver(&zorro7xx_driver);
+}
+
+static void __exit zorro7xx_scsi_exit(void)
+{
+ zorro_unregister_driver(&zorro7xx_driver);
+}
+
+module_init(zorro7xx_scsi_init);
+module_exit(zorro7xx_scsi_exit);
diff --git a/include/scsi/iscsi_if.h b/include/scsi/iscsi_if.h
index 55ebf035e62..50e907f4204 100644
--- a/include/scsi/iscsi_if.h
+++ b/include/scsi/iscsi_if.h
@@ -48,6 +48,7 @@ enum iscsi_uevent_e {
ISCSI_UEVENT_TRANSPORT_EP_DISCONNECT = UEVENT_BASE + 14,
ISCSI_UEVENT_TGT_DSCVR = UEVENT_BASE + 15,
+ ISCSI_UEVENT_SET_HOST_PARAM = UEVENT_BASE + 16,
/* up events */
ISCSI_KEVENT_RECV_PDU = KEVENT_BASE + 1,
@@ -71,6 +72,8 @@ struct iscsi_uevent {
/* messages u -> k */
struct msg_create_session {
uint32_t initial_cmdsn;
+ uint16_t cmds_max;
+ uint16_t queue_depth;
} c_session;
struct msg_destroy_session {
uint32_t sid;
@@ -136,6 +139,11 @@ struct iscsi_uevent {
*/
uint32_t enable;
} tgt_dscvr;
+ struct msg_set_host_param {
+ uint32_t host_no;
+ uint32_t param; /* enum iscsi_host_param */
+ uint32_t len;
+ } set_host_param;
} u;
union {
/* messages k -> u */
@@ -223,6 +231,11 @@ enum iscsi_param {
ISCSI_PARAM_CONN_PORT,
ISCSI_PARAM_CONN_ADDRESS,
+ ISCSI_PARAM_USERNAME,
+ ISCSI_PARAM_USERNAME_IN,
+ ISCSI_PARAM_PASSWORD,
+ ISCSI_PARAM_PASSWORD_IN,
+
/* must always be last */
ISCSI_PARAM_MAX,
};
@@ -249,6 +262,24 @@ enum iscsi_param {
#define ISCSI_SESS_RECOVERY_TMO (1 << ISCSI_PARAM_SESS_RECOVERY_TMO)
#define ISCSI_CONN_PORT (1 << ISCSI_PARAM_CONN_PORT)
#define ISCSI_CONN_ADDRESS (1 << ISCSI_PARAM_CONN_ADDRESS)
+#define ISCSI_USERNAME (1 << ISCSI_PARAM_USERNAME)
+#define ISCSI_USERNAME_IN (1 << ISCSI_PARAM_USERNAME_IN)
+#define ISCSI_PASSWORD (1 << ISCSI_PARAM_PASSWORD)
+#define ISCSI_PASSWORD_IN (1 << ISCSI_PARAM_PASSWORD_IN)
+
+/* iSCSI HBA params */
+enum iscsi_host_param {
+ ISCSI_HOST_PARAM_HWADDRESS,
+ ISCSI_HOST_PARAM_INITIATOR_NAME,
+ ISCSI_HOST_PARAM_NETDEV_NAME,
+ ISCSI_HOST_PARAM_IPADDRESS,
+ ISCSI_HOST_PARAM_MAX,
+};
+
+#define ISCSI_HOST_HWADDRESS (1 << ISCSI_HOST_PARAM_HWADDRESS)
+#define ISCSI_HOST_INITIATOR_NAME (1 << ISCSI_HOST_PARAM_INITIATOR_NAME)
+#define ISCSI_HOST_NETDEV_NAME (1 << ISCSI_HOST_PARAM_NETDEV_NAME)
+#define ISCSI_HOST_IPADDRESS (1 << ISCSI_HOST_PARAM_IPADDRESS)
#define iscsi_ptr(_handle) ((void*)(unsigned long)_handle)
#define iscsi_handle(_ptr) ((uint64_t)(unsigned long)_ptr)
@@ -272,6 +303,9 @@ enum iscsi_param {
#define CAP_MULTI_CONN 0x40
#define CAP_TEXT_NEGO 0x80
#define CAP_MARKERS 0x100
+#define CAP_FW_DB 0x200
+#define CAP_SENDTARGETS_OFFLOAD 0x400
+#define CAP_DATA_PATH_OFFLOAD 0x800
/*
* These flags describes reason of stop_conn() call
diff --git a/include/scsi/libiscsi.h b/include/scsi/libiscsi.h
index ea0816d4904..3f631b08a1a 100644
--- a/include/scsi/libiscsi.h
+++ b/include/scsi/libiscsi.h
@@ -48,9 +48,8 @@ struct iscsi_nopin;
#define debug_scsi(fmt...)
#endif
-#define ISCSI_XMIT_CMDS_MAX 128 /* must be power of 2 */
-#define ISCSI_MGMT_CMDS_MAX 32 /* must be power of 2 */
-#define ISCSI_CONN_MAX 1
+#define ISCSI_DEF_XMIT_CMDS_MAX 128 /* must be power of 2 */
+#define ISCSI_MGMT_CMDS_MAX 16 /* must be power of 2 */
#define ISCSI_MGMT_ITT_OFFSET 0xa00
@@ -73,6 +72,8 @@ struct iscsi_nopin;
#define ISCSI_AGE_SHIFT 28
#define ISCSI_AGE_MASK (0xf << ISCSI_AGE_SHIFT)
+#define ISCSI_ADDRESS_BUF_LEN 64
+
struct iscsi_mgmt_task {
/*
* Becuae LLDs allocate their hdr differently, this is a pointer to
@@ -80,7 +81,7 @@ struct iscsi_mgmt_task {
*/
struct iscsi_hdr *hdr;
char *data; /* mgmt payload */
- int data_count; /* counts data to be sent */
+ unsigned data_count; /* counts data to be sent */
uint32_t itt; /* this ITT */
void *dd_data; /* driver/transport data */
struct list_head running;
@@ -90,6 +91,7 @@ enum {
ISCSI_TASK_COMPLETED,
ISCSI_TASK_PENDING,
ISCSI_TASK_RUNNING,
+ ISCSI_TASK_ABORTING,
};
struct iscsi_cmd_task {
@@ -99,16 +101,14 @@ struct iscsi_cmd_task {
*/
struct iscsi_cmd *hdr;
int itt; /* this ITT */
- int datasn; /* DataSN */
uint32_t unsol_datasn;
- int imm_count; /* imm-data (bytes) */
- int unsol_count; /* unsolicited (bytes)*/
+ unsigned imm_count; /* imm-data (bytes) */
+ unsigned unsol_count; /* unsolicited (bytes)*/
/* offset in unsolicited stream (bytes); */
- int unsol_offset;
- int data_count; /* remaining Data-Out */
+ unsigned unsol_offset;
+ unsigned data_count; /* remaining Data-Out */
struct scsi_cmnd *sc; /* associated SCSI cmd*/
- int total_length;
struct iscsi_conn *conn; /* used connection */
struct iscsi_mgmt_task *mtask; /* tmf mtask in progr */
@@ -152,18 +152,11 @@ struct iscsi_conn {
struct iscsi_cmd_task *ctask; /* xmit ctask in progress */
/* xmit */
- struct kfifo *immqueue; /* immediate xmit queue */
struct kfifo *mgmtqueue; /* mgmt (control) xmit queue */
struct list_head mgmt_run_list; /* list of control tasks */
struct list_head xmitqueue; /* data-path cmd queue */
struct list_head run_list; /* list of cmds in progress */
struct work_struct xmitwork; /* per-conn. xmit workqueue */
- /*
- * serializes connection xmit, access to kfifos:
- * xmitqueue, immqueue, mgmtqueue
- */
- struct mutex xmitmutex;
-
unsigned long suspend_tx; /* suspend Tx */
unsigned long suspend_rx; /* suspend Rx */
@@ -174,8 +167,8 @@ struct iscsi_conn {
int tmabort_state; /* see TMABORT_INITIAL, etc.*/
/* negotiated params */
- int max_recv_dlength; /* initiator_max_recv_dsl*/
- int max_xmit_dlength; /* target_max_recv_dsl */
+ unsigned max_recv_dlength; /* initiator_max_recv_dsl*/
+ unsigned max_xmit_dlength; /* target_max_recv_dsl */
int hdrdgst_en;
int datadgst_en;
int ifmarker_en;
@@ -183,6 +176,12 @@ struct iscsi_conn {
/* values userspace uses to id a conn */
int persistent_port;
char *persistent_address;
+ /* remote portal currently connected to */
+ int portal_port;
+ char portal_address[ISCSI_ADDRESS_BUF_LEN];
+ /* local address */
+ int local_port;
+ char local_address[ISCSI_ADDRESS_BUF_LEN];
/* MIB-statistics */
uint64_t txdata_octets;
@@ -213,18 +212,25 @@ struct iscsi_session {
/* configuration */
int initial_r2t_en;
- int max_r2t;
+ unsigned max_r2t;
int imm_data_en;
- int first_burst;
- int max_burst;
+ unsigned first_burst;
+ unsigned max_burst;
int time2wait;
int time2retain;
int pdu_inorder_en;
int dataseq_inorder_en;
int erl;
int tpgt;
+ char *username;
+ char *username_in;
+ char *password;
+ char *password_in;
char *targetname;
-
+ char *initiatorname;
+ /* hw address or netdev iscsi connection is bound to */
+ char *hwaddress;
+ char *netdev;
/* control data */
struct iscsi_transport *tt;
struct Scsi_Host *host;
@@ -255,12 +261,22 @@ extern int iscsi_eh_host_reset(struct scsi_cmnd *sc);
extern int iscsi_queuecommand(struct scsi_cmnd *sc,
void (*done)(struct scsi_cmnd *));
+
+/*
+ * iSCSI host helpers.
+ */
+extern int iscsi_host_set_param(struct Scsi_Host *shost,
+ enum iscsi_host_param param, char *buf,
+ int buflen);
+extern int iscsi_host_get_param(struct Scsi_Host *shost,
+ enum iscsi_host_param param, char *buf);
+
/*
* session management
*/
extern struct iscsi_cls_session *
iscsi_session_setup(struct iscsi_transport *, struct scsi_transport_template *,
- int, int, uint32_t, uint32_t *);
+ uint16_t, uint16_t, int, int, uint32_t, uint32_t *);
extern void iscsi_session_teardown(struct iscsi_cls_session *);
extern struct iscsi_session *class_to_transport_session(struct iscsi_cls_session *);
extern void iscsi_session_recovery_timedout(struct iscsi_cls_session *);
@@ -289,8 +305,7 @@ extern int iscsi_conn_get_param(struct iscsi_cls_conn *cls_conn,
/*
* pdu and task processing
*/
-extern int iscsi_check_assign_cmdsn(struct iscsi_session *,
- struct iscsi_nopin *);
+extern void iscsi_update_cmdsn(struct iscsi_session *, struct iscsi_nopin *);
extern void iscsi_prep_unsolicit_data_pdu(struct iscsi_cmd_task *,
struct iscsi_data *hdr);
extern int iscsi_conn_send_pdu(struct iscsi_cls_conn *, struct iscsi_hdr *,
diff --git a/include/scsi/scsi_cmnd.h b/include/scsi/scsi_cmnd.h
index a2e0c103249..53e170586c2 100644
--- a/include/scsi/scsi_cmnd.h
+++ b/include/scsi/scsi_cmnd.h
@@ -135,4 +135,24 @@ extern void scsi_kunmap_atomic_sg(void *virt);
extern struct scatterlist *scsi_alloc_sgtable(struct scsi_cmnd *, gfp_t);
extern void scsi_free_sgtable(struct scatterlist *, int);
+extern int scsi_dma_map(struct scsi_cmnd *cmd);
+extern void scsi_dma_unmap(struct scsi_cmnd *cmd);
+
+#define scsi_sg_count(cmd) ((cmd)->use_sg)
+#define scsi_sglist(cmd) ((struct scatterlist *)(cmd)->request_buffer)
+#define scsi_bufflen(cmd) ((cmd)->request_bufflen)
+
+static inline void scsi_set_resid(struct scsi_cmnd *cmd, int resid)
+{
+ cmd->resid = resid;
+}
+
+static inline int scsi_get_resid(struct scsi_cmnd *cmd)
+{
+ return cmd->resid;
+}
+
+#define scsi_for_each_sg(cmd, sg, nseg, __i) \
+ for (__i = 0, sg = scsi_sglist(cmd); __i < (nseg); __i++, (sg)++)
+
#endif /* _SCSI_SCSI_CMND_H */
diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h
index 2f3c5b8b1d6..d5057bc338f 100644
--- a/include/scsi/scsi_device.h
+++ b/include/scsi/scsi_device.h
@@ -209,7 +209,6 @@ extern struct scsi_device *__scsi_add_device(struct Scsi_Host *,
extern int scsi_add_device(struct Scsi_Host *host, uint channel,
uint target, uint lun);
extern void scsi_remove_device(struct scsi_device *);
-extern int scsi_device_cancel(struct scsi_device *, int);
extern int scsi_device_get(struct scsi_device *);
extern void scsi_device_put(struct scsi_device *);
@@ -287,6 +286,7 @@ extern void scsi_target_block(struct device *);
extern void scsi_target_unblock(struct device *);
extern void scsi_remove_target(struct device *);
extern void int_to_scsilun(unsigned int, struct scsi_lun *);
+extern int scsilun_to_int(struct scsi_lun *);
extern const char *scsi_device_state_name(enum scsi_device_state);
extern int scsi_is_sdev_device(const struct device *);
extern int scsi_is_target_device(const struct device *);
diff --git a/include/scsi/scsi_host.h b/include/scsi/scsi_host.h
index 68f461b7a83..ba07cf7c04b 100644
--- a/include/scsi/scsi_host.h
+++ b/include/scsi/scsi_host.h
@@ -339,12 +339,6 @@ struct scsi_host_template {
enum scsi_eh_timer_return (* eh_timed_out)(struct scsi_cmnd *);
/*
- * suspend support
- */
- int (*resume)(struct scsi_device *);
- int (*suspend)(struct scsi_device *, pm_message_t state);
-
- /*
* Name of proc directory
*/
char *proc_name;
@@ -677,6 +671,10 @@ struct Scsi_Host {
#define shost_printk(prefix, shost, fmt, a...) \
dev_printk(prefix, &(shost)->shost_gendev, fmt, ##a)
+static inline void *shost_priv(struct Scsi_Host *shost)
+{
+ return (void *)shost->hostdata;
+}
int scsi_is_host_device(const struct device *);
diff --git a/include/scsi/scsi_transport_fc.h b/include/scsi/scsi_transport_fc.h
index 1e797308640..a0d80bcaa93 100644
--- a/include/scsi/scsi_transport_fc.h
+++ b/include/scsi/scsi_transport_fc.h
@@ -1,4 +1,4 @@
-/*
+/*
* FiberChannel transport specific attributes exported to sysfs.
*
* Copyright (c) 2003 Silicon Graphics, Inc. All rights reserved.
@@ -19,7 +19,7 @@
*
* ========
*
- * Copyright (C) 2004-2005 James Smart, Emulex Corporation
+ * Copyright (C) 2004-2007 James Smart, Emulex Corporation
* Rewrite for host, target, device, and remote port attributes,
* statistics, and service functions...
*
@@ -62,8 +62,10 @@ enum fc_port_type {
FC_PORTTYPE_NLPORT, /* (Public) Loop w/ FLPort */
FC_PORTTYPE_LPORT, /* (Private) Loop w/o FLPort */
FC_PORTTYPE_PTP, /* Point to Point w/ another NPort */
+ FC_PORTTYPE_NPIV, /* VPORT based on NPIV */
};
+
/*
* fc_port_state: If you alter this, you also need to alter scsi_transport_fc.c
* (for the ascii descriptions).
@@ -83,7 +85,26 @@ enum fc_port_state {
};
-/*
+/*
+ * fc_vport_state: If you alter this, you also need to alter
+ * scsi_transport_fc.c (for the ascii descriptions).
+ */
+enum fc_vport_state {
+ FC_VPORT_UNKNOWN,
+ FC_VPORT_ACTIVE,
+ FC_VPORT_DISABLED,
+ FC_VPORT_LINKDOWN,
+ FC_VPORT_INITIALIZING,
+ FC_VPORT_NO_FABRIC_SUPP,
+ FC_VPORT_NO_FABRIC_RSCS,
+ FC_VPORT_FABRIC_LOGOUT,
+ FC_VPORT_FABRIC_REJ_WWN,
+ FC_VPORT_FAILED,
+};
+
+
+
+/*
* FC Classes of Service
* Note: values are not enumerated, as they can be "or'd" together
* for reporting (e.g. report supported_classes). If you alter this list,
@@ -96,7 +117,7 @@ enum fc_port_state {
#define FC_COS_CLASS4 0x10
#define FC_COS_CLASS6 0x40
-/*
+/*
* FC Port Speeds
* Note: values are not enumerated, as they can be "or'd" together
* for reporting (e.g. report supported_speeds). If you alter this list,
@@ -124,16 +145,114 @@ enum fc_tgtid_binding_type {
};
/*
- * FC Remote Port Roles
+ * FC Port Roles
* Note: values are not enumerated, as they can be "or'd" together
* for reporting (e.g. report roles). If you alter this list,
* you also need to alter scsi_transport_fc.c (for the ascii descriptions).
*/
-#define FC_RPORT_ROLE_UNKNOWN 0x00
-#define FC_RPORT_ROLE_FCP_TARGET 0x01
-#define FC_RPORT_ROLE_FCP_INITIATOR 0x02
-#define FC_RPORT_ROLE_IP_PORT 0x04
+#define FC_PORT_ROLE_UNKNOWN 0x00
+#define FC_PORT_ROLE_FCP_TARGET 0x01
+#define FC_PORT_ROLE_FCP_INITIATOR 0x02
+#define FC_PORT_ROLE_IP_PORT 0x04
+
+/* The following are for compatibility */
+#define FC_RPORT_ROLE_UNKNOWN FC_PORT_ROLE_UNKNOWN
+#define FC_RPORT_ROLE_FCP_TARGET FC_PORT_ROLE_FCP_TARGET
+#define FC_RPORT_ROLE_FCP_INITIATOR FC_PORT_ROLE_FCP_INITIATOR
+#define FC_RPORT_ROLE_IP_PORT FC_PORT_ROLE_IP_PORT
+
+
+/* Macro for use in defining Virtual Port attributes */
+#define FC_VPORT_ATTR(_name,_mode,_show,_store) \
+struct class_device_attribute class_device_attr_vport_##_name = \
+ __ATTR(_name,_mode,_show,_store)
+
+
+/*
+ * FC Virtual Port Attributes
+ *
+ * This structure exists for each FC port is a virtual FC port. Virtual
+ * ports share the physical link with the Physical port. Each virtual
+ * ports has a unique presense on the SAN, and may be instantiated via
+ * NPIV, Virtual Fabrics, or via additional ALPAs. As the vport is a
+ * unique presense, each vport has it's own view of the fabric,
+ * authentication priviledge, and priorities.
+ *
+ * A virtual port may support 1 or more FC4 roles. Typically it is a
+ * FCP Initiator. It could be a FCP Target, or exist sole for an IP over FC
+ * roles. FC port attributes for the vport will be reported on any
+ * fc_host class object allocated for an FCP Initiator.
+ *
+ * --
+ *
+ * Fixed attributes are not expected to change. The driver is
+ * expected to set these values after receiving the fc_vport structure
+ * via the vport_create() call from the transport.
+ * The transport fully manages all get functions w/o driver interaction.
+ *
+ * Dynamic attributes are expected to change. The driver participates
+ * in all get/set operations via functions provided by the driver.
+ *
+ * Private attributes are transport-managed values. They are fully
+ * managed by the transport w/o driver interaction.
+ */
+#define FC_VPORT_SYMBOLIC_NAMELEN 64
+struct fc_vport {
+ /* Fixed Attributes */
+
+ /* Dynamic Attributes */
+
+ /* Private (Transport-managed) Attributes */
+ enum fc_vport_state vport_state;
+ enum fc_vport_state vport_last_state;
+ u64 node_name;
+ u64 port_name;
+ u32 roles;
+ u32 vport_id; /* Admin Identifier for the vport */
+ enum fc_port_type vport_type;
+ char symbolic_name[FC_VPORT_SYMBOLIC_NAMELEN];
+
+ /* exported data */
+ void *dd_data; /* Used for driver-specific storage */
+
+ /* internal data */
+ struct Scsi_Host *shost; /* Physical Port Parent */
+ unsigned int channel;
+ u32 number;
+ u8 flags;
+ struct list_head peers;
+ struct device dev;
+ struct work_struct vport_delete_work;
+} __attribute__((aligned(sizeof(unsigned long))));
+
+/* bit field values for struct fc_vport "flags" field: */
+#define FC_VPORT_CREATING 0x01
+#define FC_VPORT_DELETING 0x02
+#define FC_VPORT_DELETED 0x04
+#define FC_VPORT_DEL 0x06 /* Any DELETE state */
+
+#define dev_to_vport(d) \
+ container_of(d, struct fc_vport, dev)
+#define transport_class_to_vport(classdev) \
+ dev_to_vport(classdev->dev)
+#define vport_to_shost(v) \
+ (v->shost)
+#define vport_to_shost_channel(v) \
+ (v->channel)
+#define vport_to_parent(v) \
+ (v->dev.parent)
+
+
+/* Error return codes for vport_create() callback */
+#define VPCERR_UNSUPPORTED -ENOSYS /* no driver/adapter
+ support */
+#define VPCERR_BAD_WWN -ENOTUNIQ /* driver validation
+ of WWNs failed */
+#define VPCERR_NO_FABRIC_SUPP -EOPNOTSUPP /* Fabric connection
+ is loop or the
+ Fabric Port does
+ not support NPIV */
/*
* fc_rport_identifiers: This set of data contains all elements
@@ -149,6 +268,7 @@ struct fc_rport_identifiers {
u32 roles;
};
+
/* Macro for use in defining Remote Port attributes */
#define FC_RPORT_ATTR(_name,_mode,_show,_store) \
struct class_device_attribute class_device_attr_rport_##_name = \
@@ -278,7 +398,7 @@ struct fc_host_statistics {
u64 prim_seq_protocol_err_count;
u64 invalid_tx_word_count;
u64 invalid_crc_count;
-
+
/* fc4 statistics (only FCP supported currently) */
u64 fcp_input_requests;
u64 fcp_output_requests;
@@ -343,6 +463,7 @@ struct fc_host_attrs {
u8 supported_fc4s[FC_FC4_LIST_SIZE];
u32 supported_speeds;
u32 maxframe_size;
+ u16 max_npiv_vports;
char serial_number[FC_SERIAL_NUMBER_SIZE];
/* Dynamic Attributes */
@@ -361,8 +482,11 @@ struct fc_host_attrs {
/* internal data */
struct list_head rports;
struct list_head rport_bindings;
+ struct list_head vports;
u32 next_rport_number;
u32 next_target_id;
+ u32 next_vport_number;
+ u16 npiv_vports_inuse;
/* work queues for rport state manipulation */
char work_q_name[KOBJ_NAME_LEN];
@@ -388,6 +512,8 @@ struct fc_host_attrs {
(((struct fc_host_attrs *)(x)->shost_data)->supported_speeds)
#define fc_host_maxframe_size(x) \
(((struct fc_host_attrs *)(x)->shost_data)->maxframe_size)
+#define fc_host_max_npiv_vports(x) \
+ (((struct fc_host_attrs *)(x)->shost_data)->max_npiv_vports)
#define fc_host_serial_number(x) \
(((struct fc_host_attrs *)(x)->shost_data)->serial_number)
#define fc_host_port_id(x) \
@@ -412,10 +538,16 @@ struct fc_host_attrs {
(((struct fc_host_attrs *)(x)->shost_data)->rports)
#define fc_host_rport_bindings(x) \
(((struct fc_host_attrs *)(x)->shost_data)->rport_bindings)
+#define fc_host_vports(x) \
+ (((struct fc_host_attrs *)(x)->shost_data)->vports)
#define fc_host_next_rport_number(x) \
(((struct fc_host_attrs *)(x)->shost_data)->next_rport_number)
#define fc_host_next_target_id(x) \
(((struct fc_host_attrs *)(x)->shost_data)->next_target_id)
+#define fc_host_next_vport_number(x) \
+ (((struct fc_host_attrs *)(x)->shost_data)->next_vport_number)
+#define fc_host_npiv_vports_inuse(x) \
+ (((struct fc_host_attrs *)(x)->shost_data)->npiv_vports_inuse)
#define fc_host_work_q_name(x) \
(((struct fc_host_attrs *)(x)->shost_data)->work_q_name)
#define fc_host_work_q(x) \
@@ -452,14 +584,20 @@ struct fc_function_template {
void (*dev_loss_tmo_callbk)(struct fc_rport *);
void (*terminate_rport_io)(struct fc_rport *);
+ void (*set_vport_symbolic_name)(struct fc_vport *);
+ int (*vport_create)(struct fc_vport *, bool);
+ int (*vport_disable)(struct fc_vport *, bool);
+ int (*vport_delete)(struct fc_vport *);
+
/* allocation lengths for host-specific data */
u32 dd_fcrport_size;
+ u32 dd_fcvport_size;
- /*
+ /*
* The driver sets these to tell the transport class it
* wants the attributes displayed in sysfs. If the show_ flag
* is not set, the attribute will be private to the transport
- * class
+ * class
*/
/* remote port fixed attributes */
@@ -512,7 +650,7 @@ fc_remote_port_chkready(struct fc_rport *rport)
switch (rport->port_state) {
case FC_PORTSTATE_ONLINE:
- if (rport->roles & FC_RPORT_ROLE_FCP_TARGET)
+ if (rport->roles & FC_PORT_ROLE_FCP_TARGET)
result = 0;
else if (rport->flags & FC_RPORT_DEVLOSS_PENDING)
result = DID_IMM_RETRY << 16;
@@ -549,6 +687,27 @@ static inline void u64_to_wwn(u64 inm, u8 *wwn)
wwn[7] = inm & 0xff;
}
+/**
+ * fc_vport_set_state() - called to set a vport's state. Saves the old state,
+ * excepting the transitory states of initializing and sending the ELS
+ * traffic to instantiate the vport on the link.
+ *
+ * Assumes the driver has surrounded this with the proper locking to ensure
+ * a coherent state change.
+ *
+ * @vport: virtual port whose state is changing
+ * @new_state: new state
+ **/
+static inline void
+fc_vport_set_state(struct fc_vport *vport, enum fc_vport_state new_state)
+{
+ if ((new_state != FC_VPORT_UNKNOWN) &&
+ (new_state != FC_VPORT_INITIALIZING))
+ vport->vport_last_state = vport->vport_state;
+ vport->vport_state = new_state;
+}
+
+
struct scsi_transport_template *fc_attach_transport(
struct fc_function_template *);
void fc_release_transport(struct scsi_transport_template *);
@@ -567,5 +726,6 @@ void fc_host_post_vendor_event(struct Scsi_Host *shost, u32 event_number,
* be sure to read the Vendor Type and ID formatting requirements
* specified in scsi_netlink.h
*/
+int fc_vport_terminate(struct fc_vport *vport);
#endif /* SCSI_TRANSPORT_FC_H */
diff --git a/include/scsi/scsi_transport_iscsi.h b/include/scsi/scsi_transport_iscsi.h
index d5c218ddc52..706c0cd36c1 100644
--- a/include/scsi/scsi_transport_iscsi.h
+++ b/include/scsi/scsi_transport_iscsi.h
@@ -79,7 +79,8 @@ struct iscsi_transport {
char *name;
unsigned int caps;
/* LLD sets this to indicate what values it can export to sysfs */
- unsigned int param_mask;
+ uint64_t param_mask;
+ uint64_t host_param_mask;
struct scsi_host_template *host_template;
/* LLD connection data size */
int conndata_size;
@@ -89,7 +90,8 @@ struct iscsi_transport {
unsigned int max_conn;
unsigned int max_cmd_len;
struct iscsi_cls_session *(*create_session) (struct iscsi_transport *it,
- struct scsi_transport_template *t, uint32_t sn, uint32_t *hn);
+ struct scsi_transport_template *t, uint16_t, uint16_t,
+ uint32_t sn, uint32_t *hn);
void (*destroy_session) (struct iscsi_cls_session *session);
struct iscsi_cls_conn *(*create_conn) (struct iscsi_cls_session *sess,
uint32_t cid);
@@ -105,14 +107,18 @@ struct iscsi_transport {
enum iscsi_param param, char *buf);
int (*get_session_param) (struct iscsi_cls_session *session,
enum iscsi_param param, char *buf);
+ int (*get_host_param) (struct Scsi_Host *shost,
+ enum iscsi_host_param param, char *buf);
+ int (*set_host_param) (struct Scsi_Host *shost,
+ enum iscsi_host_param param, char *buf,
+ int buflen);
int (*send_pdu) (struct iscsi_cls_conn *conn, struct iscsi_hdr *hdr,
char *data, uint32_t data_size);
void (*get_stats) (struct iscsi_cls_conn *conn,
struct iscsi_stats *stats);
void (*init_cmd_task) (struct iscsi_cmd_task *ctask);
void (*init_mgmt_task) (struct iscsi_conn *conn,
- struct iscsi_mgmt_task *mtask,
- char *data, uint32_t data_size);
+ struct iscsi_mgmt_task *mtask);
int (*xmit_cmd_task) (struct iscsi_conn *conn,
struct iscsi_cmd_task *ctask);
void (*cleanup_cmd_task) (struct iscsi_conn *conn,
@@ -124,7 +130,7 @@ struct iscsi_transport {
uint64_t *ep_handle);
int (*ep_poll) (uint64_t ep_handle, int timeout_ms);
void (*ep_disconnect) (uint64_t ep_handle);
- int (*tgt_dscvr) (enum iscsi_tgt_dscvr type, uint32_t host_no,
+ int (*tgt_dscvr) (struct Scsi_Host *shost, enum iscsi_tgt_dscvr type,
uint32_t enable, struct sockaddr *dst_addr);
};