aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--Makefile2
-rw-r--r--arch/alpha/kernel/sys_dp264.c41
-rw-r--r--drivers/message/fusion/Kconfig17
-rw-r--r--drivers/message/fusion/Makefile1
-rw-r--r--drivers/message/fusion/mptbase.c963
-rw-r--r--drivers/message/fusion/mptbase.h56
-rw-r--r--drivers/message/fusion/mptctl.c4
-rw-r--r--drivers/message/fusion/mptfc.c2
-rw-r--r--drivers/message/fusion/mptlan.c7
-rw-r--r--drivers/message/fusion/mptsas.c1235
-rw-r--r--drivers/message/fusion/mptscsih.c463
-rw-r--r--drivers/message/fusion/mptscsih.h7
-rw-r--r--drivers/message/fusion/mptspi.c2
-rw-r--r--drivers/net/wan/hdlc_cisco.c2
-rw-r--r--drivers/s390/scsi/Makefile2
-rw-r--r--drivers/s390/scsi/zfcp_aux.c184
-rw-r--r--drivers/s390/scsi/zfcp_ccw.c10
-rw-r--r--drivers/s390/scsi/zfcp_dbf.c995
-rw-r--r--drivers/s390/scsi/zfcp_def.h307
-rw-r--r--drivers/s390/scsi/zfcp_erp.c135
-rw-r--r--drivers/s390/scsi/zfcp_ext.h30
-rw-r--r--drivers/s390/scsi/zfcp_fsf.c769
-rw-r--r--drivers/s390/scsi/zfcp_fsf.h54
-rw-r--r--drivers/s390/scsi/zfcp_qdio.c30
-rw-r--r--drivers/s390/scsi/zfcp_scsi.c297
-rw-r--r--drivers/s390/scsi/zfcp_sysfs_adapter.c14
-rw-r--r--drivers/scsi/aic7xxx/aic7xxx_osm.c9
-rw-r--r--drivers/scsi/aic7xxx/aic7xxx_osm.h2
-rw-r--r--drivers/scsi/aic7xxx/aic7xxx_osm_pci.c8
-rw-r--r--drivers/scsi/atp870u.c6
-rw-r--r--drivers/scsi/atp870u.h5
-rw-r--r--drivers/scsi/fd_mcs.c2
-rw-r--r--drivers/scsi/hosts.c35
-rw-r--r--drivers/scsi/ibmmca.c2
-rw-r--r--drivers/scsi/ibmvscsi/ibmvscsi.c10
-rw-r--r--drivers/scsi/scsi.c5
-rw-r--r--drivers/scsi/scsi_devinfo.c1
-rw-r--r--drivers/scsi/scsi_error.c78
-rw-r--r--drivers/scsi/scsi_ioctl.c2
-rw-r--r--drivers/scsi/scsi_lib.c12
-rw-r--r--drivers/scsi/scsi_scan.c20
-rw-r--r--drivers/scsi/scsi_sysfs.c17
-rw-r--r--drivers/scsi/sd.c1
-rw-r--r--drivers/scsi/sg.c2
-rw-r--r--drivers/scsi/sr.c1
-rw-r--r--drivers/scsi/st.c1
-rw-r--r--drivers/video/Kconfig1
-rw-r--r--drivers/video/fbcvt.c8
-rw-r--r--drivers/video/nvidia/nvidia.c5
-rw-r--r--fs/dcache.c3
-rw-r--r--fs/ntfs/ChangeLog2
-rw-r--r--fs/ntfs/aops.c122
-rw-r--r--fs/ntfs/inode.c9
-rw-r--r--fs/ntfs/malloc.h2
-rw-r--r--fs/ntfs/runlist.c169
-rw-r--r--include/linux/if_vlan.h8
-rw-r--r--include/linux/netfilter_ipv4/ip_conntrack.h14
-rw-r--r--include/linux/netfilter_ipv4/ip_conntrack_pptp.h332
-rw-r--r--include/linux/netfilter_ipv4/ip_conntrack_proto_gre.h114
-rw-r--r--include/linux/netfilter_ipv4/ip_conntrack_tuple.h7
-rw-r--r--include/linux/netfilter_ipv4/ip_nat_pptp.h11
-rw-r--r--include/linux/netfilter_ipv6/ip6_tables.h3
-rw-r--r--include/scsi/scsi_host.h11
-rw-r--r--include/scsi/scsi_transport_fc.h4
-rw-r--r--net/8021q/vlan_dev.c2
-rw-r--r--net/ipv4/fib_trie.c34
-rw-r--r--net/ipv4/netfilter/Kconfig22
-rw-r--r--net/ipv4/netfilter/Makefile5
-rw-r--r--net/ipv4/netfilter/ip_conntrack_core.c2
-rw-r--r--net/ipv4/netfilter/ip_conntrack_helper_pptp.c805
-rw-r--r--net/ipv4/netfilter/ip_conntrack_netlink.c4
-rw-r--r--net/ipv4/netfilter/ip_conntrack_proto_gre.c327
-rw-r--r--net/ipv4/netfilter/ip_conntrack_standalone.c4
-rw-r--r--net/ipv4/netfilter/ip_nat_core.c2
-rw-r--r--net/ipv4/netfilter/ip_nat_helper_pptp.c401
-rw-r--r--net/ipv4/netfilter/ip_nat_proto_gre.c214
-rw-r--r--net/ipv4/raw.c2
-rw-r--r--net/ipv4/tcp_output.c10
-rw-r--r--net/ipv6/netfilter/ip6_tables.c52
-rw-r--r--net/ipv6/netfilter/ip6t_ah.c81
-rw-r--r--net/ipv6/netfilter/ip6t_dst.c88
-rw-r--r--net/ipv6/netfilter/ip6t_esp.c73
-rw-r--r--net/ipv6/netfilter/ip6t_frag.c90
-rw-r--r--net/ipv6/netfilter/ip6t_hbh.c88
-rw-r--r--net/ipv6/netfilter/ip6t_rt.c83
-rw-r--r--net/ipv6/raw.c2
86 files changed, 6835 insertions, 2229 deletions
diff --git a/Makefile b/Makefile
index 4e0d7c68d22..8cf6becf68d 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
VERSION = 2
PATCHLEVEL = 6
SUBLEVEL = 14
-EXTRAVERSION =-rc1
+EXTRAVERSION =-rc2
NAME=Affluent Albatross
# *DOCUMENTATION*
diff --git a/arch/alpha/kernel/sys_dp264.c b/arch/alpha/kernel/sys_dp264.c
index 9e36b07fa94..d5da6b1b28e 100644
--- a/arch/alpha/kernel/sys_dp264.c
+++ b/arch/alpha/kernel/sys_dp264.c
@@ -395,6 +395,22 @@ clipper_init_irq(void)
*/
static int __init
+isa_irq_fixup(struct pci_dev *dev, int irq)
+{
+ u8 irq8;
+
+ if (irq > 0)
+ return irq;
+
+ /* This interrupt is routed via ISA bridge, so we'll
+ just have to trust whatever value the console might
+ have assigned. */
+ pci_read_config_byte(dev, PCI_INTERRUPT_LINE, &irq8);
+
+ return irq8 & 0xf;
+}
+
+static int __init
dp264_map_irq(struct pci_dev *dev, u8 slot, u8 pin)
{
static char irq_tab[6][5] __initdata = {
@@ -407,25 +423,13 @@ dp264_map_irq(struct pci_dev *dev, u8 slot, u8 pin)
{ 16+ 3, 16+ 3, 16+ 2, 16+ 1, 16+ 0} /* IdSel 10 slot 3 */
};
const long min_idsel = 5, max_idsel = 10, irqs_per_slot = 5;
-
struct pci_controller *hose = dev->sysdata;
int irq = COMMON_TABLE_LOOKUP;
- if (irq > 0) {
+ if (irq > 0)
irq += 16 * hose->index;
- } else {
- /* ??? The Contaq IDE controller on the ISA bridge uses
- "legacy" interrupts 14 and 15. I don't know if anything
- can wind up at the same slot+pin on hose1, so we'll
- just have to trust whatever value the console might
- have assigned. */
-
- u8 irq8;
- pci_read_config_byte(dev, PCI_INTERRUPT_LINE, &irq8);
- irq = irq8;
- }
- return irq;
+ return isa_irq_fixup(dev, irq);
}
static int __init
@@ -453,7 +457,8 @@ monet_map_irq(struct pci_dev *dev, u8 slot, u8 pin)
{ 24, 24, 25, 26, 27} /* IdSel 15 slot 5 PCI2*/
};
const long min_idsel = 3, max_idsel = 15, irqs_per_slot = 5;
- return COMMON_TABLE_LOOKUP;
+
+ return isa_irq_fixup(dev, COMMON_TABLE_LOOKUP);
}
static u8 __init
@@ -507,7 +512,8 @@ webbrick_map_irq(struct pci_dev *dev, u8 slot, u8 pin)
{ 47, 47, 46, 45, 44}, /* IdSel 17 slot 3 */
};
const long min_idsel = 7, max_idsel = 17, irqs_per_slot = 5;
- return COMMON_TABLE_LOOKUP;
+
+ return isa_irq_fixup(dev, COMMON_TABLE_LOOKUP);
}
static int __init
@@ -524,14 +530,13 @@ clipper_map_irq(struct pci_dev *dev, u8 slot, u8 pin)
{ -1, -1, -1, -1, -1} /* IdSel 7 ISA Bridge */
};
const long min_idsel = 1, max_idsel = 7, irqs_per_slot = 5;
-
struct pci_controller *hose = dev->sysdata;
int irq = COMMON_TABLE_LOOKUP;
if (irq > 0)
irq += 16 * hose->index;
- return irq;
+ return isa_irq_fixup(dev, irq);
}
static void __init
diff --git a/drivers/message/fusion/Kconfig b/drivers/message/fusion/Kconfig
index 33f209a39cb..1883d22cffe 100644
--- a/drivers/message/fusion/Kconfig
+++ b/drivers/message/fusion/Kconfig
@@ -35,6 +35,23 @@ config FUSION_FC
LSIFC929X
LSIFC929XL
+config FUSION_SAS
+ tristate "Fusion MPT ScsiHost drivers for SAS"
+ depends on PCI && SCSI
+ select FUSION
+ select SCSI_SAS_ATTRS
+ ---help---
+ SCSI HOST support for a SAS host adapters.
+
+ List of supported controllers:
+
+ LSISAS1064
+ LSISAS1066
+ LSISAS1068
+ LSISAS1064E
+ LSISAS1066E
+ LSISAS1068E
+
config FUSION_MAX_SGE
int "Maximum number of scatter gather entries (16 - 128)"
depends on FUSION
diff --git a/drivers/message/fusion/Makefile b/drivers/message/fusion/Makefile
index 1d2f9db813c..8a2e2657f4c 100644
--- a/drivers/message/fusion/Makefile
+++ b/drivers/message/fusion/Makefile
@@ -34,5 +34,6 @@
obj-$(CONFIG_FUSION_SPI) += mptbase.o mptscsih.o mptspi.o
obj-$(CONFIG_FUSION_FC) += mptbase.o mptscsih.o mptfc.o
+obj-$(CONFIG_FUSION_SAS) += mptbase.o mptscsih.o mptsas.o
obj-$(CONFIG_FUSION_CTL) += mptctl.o
obj-$(CONFIG_FUSION_LAN) += mptlan.o
diff --git a/drivers/message/fusion/mptbase.c b/drivers/message/fusion/mptbase.c
index f517d0692d5..790a2932ded 100644
--- a/drivers/message/fusion/mptbase.c
+++ b/drivers/message/fusion/mptbase.c
@@ -135,13 +135,12 @@ static void mpt_adapter_dispose(MPT_ADAPTER *ioc);
static void MptDisplayIocCapabilities(MPT_ADAPTER *ioc);
static int MakeIocReady(MPT_ADAPTER *ioc, int force, int sleepFlag);
-//static u32 mpt_GetIocState(MPT_ADAPTER *ioc, int cooked);
static int GetIocFacts(MPT_ADAPTER *ioc, int sleepFlag, int reason);
static int GetPortFacts(MPT_ADAPTER *ioc, int portnum, int sleepFlag);
static int SendIocInit(MPT_ADAPTER *ioc, int sleepFlag);
static int SendPortEnable(MPT_ADAPTER *ioc, int portnum, int sleepFlag);
static int mpt_do_upload(MPT_ADAPTER *ioc, int sleepFlag);
-static int mpt_downloadboot(MPT_ADAPTER *ioc, int sleepFlag);
+static int mpt_downloadboot(MPT_ADAPTER *ioc, MpiFwHeader_t *pFwHeader, int sleepFlag);
static int mpt_diag_reset(MPT_ADAPTER *ioc, int ignore, int sleepFlag);
static int KickStart(MPT_ADAPTER *ioc, int ignore, int sleepFlag);
static int SendIocReset(MPT_ADAPTER *ioc, u8 reset_type, int sleepFlag);
@@ -152,6 +151,7 @@ static int WaitForDoorbellReply(MPT_ADAPTER *ioc, int howlong, int sleepFlag);
static int GetLanConfigPages(MPT_ADAPTER *ioc);
static int GetFcPortPage0(MPT_ADAPTER *ioc, int portnum);
static int GetIoUnitPage2(MPT_ADAPTER *ioc);
+int mptbase_sas_persist_operation(MPT_ADAPTER *ioc, u8 persist_opcode);
static int mpt_GetScsiPortSettings(MPT_ADAPTER *ioc, int portnum);
static int mpt_readScsiDevicePageHeaders(MPT_ADAPTER *ioc, int portnum);
static void mpt_read_ioc_pg_1(MPT_ADAPTER *ioc);
@@ -159,6 +159,8 @@ static void mpt_read_ioc_pg_4(MPT_ADAPTER *ioc);
static void mpt_timer_expired(unsigned long data);
static int SendEventNotification(MPT_ADAPTER *ioc, u8 EvSwitch);
static int SendEventAck(MPT_ADAPTER *ioc, EventNotificationReply_t *evnp);
+static int mpt_host_page_access_control(MPT_ADAPTER *ioc, u8 access_control_value, int sleepFlag);
+static int mpt_host_page_alloc(MPT_ADAPTER *ioc, pIOCInit_t ioc_init);
#ifdef CONFIG_PROC_FS
static int procmpt_summary_read(char *buf, char **start, off_t offset,
@@ -175,6 +177,7 @@ static int ProcessEventNotification(MPT_ADAPTER *ioc, EventNotificationReply_t *
static void mpt_sp_ioc_info(MPT_ADAPTER *ioc, u32 ioc_status, MPT_FRAME_HDR *mf);
static void mpt_fc_log_info(MPT_ADAPTER *ioc, u32 log_info);
static void mpt_sp_log_info(MPT_ADAPTER *ioc, u32 log_info);
+static void mpt_sas_log_info(MPT_ADAPTER *ioc, u32 log_info);
/* module entry point */
static int __init fusion_init (void);
@@ -206,6 +209,144 @@ pci_enable_io_access(struct pci_dev *pdev)
pci_write_config_word(pdev, PCI_COMMAND, command_reg);
}
+/*
+ * Process turbo (context) reply...
+ */
+static void
+mpt_turbo_reply(MPT_ADAPTER *ioc, u32 pa)
+{
+ MPT_FRAME_HDR *mf = NULL;
+ MPT_FRAME_HDR *mr = NULL;
+ int req_idx = 0;
+ int cb_idx;
+
+ dmfprintk((MYIOC_s_INFO_FMT "Got TURBO reply req_idx=%08x\n",
+ ioc->name, pa));
+
+ switch (pa >> MPI_CONTEXT_REPLY_TYPE_SHIFT) {
+ case MPI_CONTEXT_REPLY_TYPE_SCSI_INIT:
+ req_idx = pa & 0x0000FFFF;
+ cb_idx = (pa & 0x00FF0000) >> 16;
+ mf = MPT_INDEX_2_MFPTR(ioc, req_idx);
+ break;
+ case MPI_CONTEXT_REPLY_TYPE_LAN:
+ cb_idx = mpt_lan_index;
+ /*
+ * Blind set of mf to NULL here was fatal
+ * after lan_reply says "freeme"
+ * Fix sort of combined with an optimization here;
+ * added explicit check for case where lan_reply
+ * was just returning 1 and doing nothing else.
+ * For this case skip the callback, but set up
+ * proper mf value first here:-)
+ */
+ if ((pa & 0x58000000) == 0x58000000) {
+ req_idx = pa & 0x0000FFFF;
+ mf = MPT_INDEX_2_MFPTR(ioc, req_idx);
+ mpt_free_msg_frame(ioc, mf);
+ mb();
+ return;
+ break;
+ }
+ mr = (MPT_FRAME_HDR *) CAST_U32_TO_PTR(pa);
+ break;
+ case MPI_CONTEXT_REPLY_TYPE_SCSI_TARGET:
+ cb_idx = mpt_stm_index;
+ mr = (MPT_FRAME_HDR *) CAST_U32_TO_PTR(pa);
+ break;
+ default:
+ cb_idx = 0;
+ BUG();
+ }
+
+ /* Check for (valid) IO callback! */
+ if (cb_idx < 1 || cb_idx >= MPT_MAX_PROTOCOL_DRIVERS ||
+ MptCallbacks[cb_idx] == NULL) {
+ printk(MYIOC_s_WARN_FMT "%s: Invalid cb_idx (%d)!\n",
+ __FUNCTION__, ioc->name, cb_idx);
+ goto out;
+ }
+
+ if (MptCallbacks[cb_idx](ioc, mf, mr))
+ mpt_free_msg_frame(ioc, mf);
+ out:
+ mb();
+}
+
+static void
+mpt_reply(MPT_ADAPTER *ioc, u32 pa)
+{
+ MPT_FRAME_HDR *mf;
+ MPT_FRAME_HDR *mr;
+ int req_idx;
+ int cb_idx;
+ int freeme;
+
+ u32 reply_dma_low;
+ u16 ioc_stat;
+
+ /* non-TURBO reply! Hmmm, something may be up...
+ * Newest turbo reply mechanism; get address
+ * via left shift 1 (get rid of MPI_ADDRESS_REPLY_A_BIT)!
+ */
+
+ /* Map DMA address of reply header to cpu address.
+ * pa is 32 bits - but the dma address may be 32 or 64 bits
+ * get offset based only only the low addresses
+ */
+
+ reply_dma_low = (pa <<= 1);
+ mr = (MPT_FRAME_HDR *)((u8 *)ioc->reply_frames +
+ (reply_dma_low - ioc->reply_frames_low_dma));
+
+ req_idx = le16_to_cpu(mr->u.frame.hwhdr.msgctxu.fld.req_idx);
+ cb_idx = mr->u.frame.hwhdr.msgctxu.fld.cb_idx;
+ mf = MPT_INDEX_2_MFPTR(ioc, req_idx);
+
+ dmfprintk((MYIOC_s_INFO_FMT "Got non-TURBO reply=%p req_idx=%x cb_idx=%x Function=%x\n",
+ ioc->name, mr, req_idx, cb_idx, mr->u.hdr.Function));
+ DBG_DUMP_REPLY_FRAME(mr)
+
+ /* Check/log IOC log info
+ */
+ ioc_stat = le16_to_cpu(mr->u.reply.IOCStatus);
+ if (ioc_stat & MPI_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE) {
+ u32 log_info = le32_to_cpu(mr->u.reply.IOCLogInfo);
+ if (ioc->bus_type == FC)
+ mpt_fc_log_info(ioc, log_info);
+ else if (ioc->bus_type == SCSI)
+ mpt_sp_log_info(ioc, log_info);
+ else if (ioc->bus_type == SAS)
+ mpt_sas_log_info(ioc, log_info);
+ }
+ if (ioc_stat & MPI_IOCSTATUS_MASK) {
+ if (ioc->bus_type == SCSI &&
+ cb_idx != mpt_stm_index &&
+ cb_idx != mpt_lan_index)
+ mpt_sp_ioc_info(ioc, (u32)ioc_stat, mf);
+ }
+
+
+ /* Check for (valid) IO callback! */
+ if (cb_idx < 1 || cb_idx >= MPT_MAX_PROTOCOL_DRIVERS ||
+ MptCallbacks[cb_idx] == NULL) {
+ printk(MYIOC_s_WARN_FMT "%s: Invalid cb_idx (%d)!\n",
+ __FUNCTION__, ioc->name, cb_idx);
+ freeme = 0;
+ goto out;
+ }
+
+ freeme = MptCallbacks[cb_idx](ioc, mf, mr);
+
+ out:
+ /* Flush (non-TURBO) reply with a WRITE! */
+ CHIPREG_WRITE32(&ioc->chip->ReplyFifo, pa);
+
+ if (freeme)
+ mpt_free_msg_frame(ioc, mf);
+ mb();
+}
+
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/*
* mpt_interrupt - MPT adapter (IOC) specific interrupt handler.
@@ -227,164 +368,21 @@ pci_enable_io_access(struct pci_dev *pdev)
static irqreturn_t
mpt_interrupt(int irq, void *bus_id, struct pt_regs *r)
{
- MPT_ADAPTER *ioc;
- MPT_FRAME_HDR *mf;
- MPT_FRAME_HDR *mr;
- u32 pa;
- int req_idx;
- int cb_idx;
- int type;
- int freeme;
-
- ioc = (MPT_ADAPTER *)bus_id;
+ MPT_ADAPTER *ioc = bus_id;
+ u32 pa;
/*
* Drain the reply FIFO!
- *
- * NOTES: I've seen up to 10 replies processed in this loop, so far...
- * Update: I've seen up to 9182 replies processed in this loop! ??
- * Update: Limit ourselves to processing max of N replies
- * (bottom of loop).
*/
while (1) {
-
- if ((pa = CHIPREG_READ32_dmasync(&ioc->chip->ReplyFifo)) == 0xFFFFFFFF)
+ pa = CHIPREG_READ32_dmasync(&ioc->chip->ReplyFifo);
+ if (pa == 0xFFFFFFFF)
return IRQ_HANDLED;
-
- cb_idx = 0;
- freeme = 0;
-
- /*
- * Check for non-TURBO reply!
- */
- if (pa & MPI_ADDRESS_REPLY_A_BIT) {
- u32 reply_dma_low;
- u16 ioc_stat;
-
- /* non-TURBO reply! Hmmm, something may be up...
- * Newest turbo reply mechanism; get address
- * via left shift 1 (get rid of MPI_ADDRESS_REPLY_A_BIT)!
- */
-
- /* Map DMA address of reply header to cpu address.
- * pa is 32 bits - but the dma address may be 32 or 64 bits
- * get offset based only only the low addresses
- */
- reply_dma_low = (pa = (pa << 1));
- mr = (MPT_FRAME_HDR *)((u8 *)ioc->reply_frames +
- (reply_dma_low - ioc->reply_frames_low_dma));
-
- req_idx = le16_to_cpu(mr->u.frame.hwhdr.msgctxu.fld.req_idx);
- cb_idx = mr->u.frame.hwhdr.msgctxu.fld.cb_idx;
- mf = MPT_INDEX_2_MFPTR(ioc, req_idx);
-
- dmfprintk((MYIOC_s_INFO_FMT "Got non-TURBO reply=%p req_idx=%x cb_idx=%x Function=%x\n",
- ioc->name, mr, req_idx, cb_idx, mr->u.hdr.Function));
- DBG_DUMP_REPLY_FRAME(mr)
-
- /* Check/log IOC log info
- */
- ioc_stat = le16_to_cpu(mr->u.reply.IOCStatus);
- if (ioc_stat & MPI_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE) {
- u32 log_info = le32_to_cpu(mr->u.reply.IOCLogInfo);
- if (ioc->bus_type == FC)
- mpt_fc_log_info(ioc, log_info);
- else if (ioc->bus_type == SCSI)
- mpt_sp_log_info(ioc, log_info);
- }
- if (ioc_stat & MPI_IOCSTATUS_MASK) {
- if (ioc->bus_type == SCSI)
- mpt_sp_ioc_info(ioc, (u32)ioc_stat, mf);
- }
- } else {
- /*
- * Process turbo (context) reply...
- */
- dmfprintk((MYIOC_s_INFO_FMT "Got TURBO reply req_idx=%08x\n", ioc->name, pa));
- type = (pa >> MPI_CONTEXT_REPLY_TYPE_SHIFT);
- if (type == MPI_CONTEXT_REPLY_TYPE_SCSI_TARGET) {
- cb_idx = mpt_stm_index;
- mf = NULL;
- mr = (MPT_FRAME_HDR *) CAST_U32_TO_PTR(pa);
- } else if (type == MPI_CONTEXT_REPLY_TYPE_LAN) {
- cb_idx = mpt_lan_index;
- /* Blind set of mf to NULL here was fatal
- * after lan_reply says "freeme"
- * Fix sort of combined with an optimization here;
- * added explicit check for case where lan_reply
- * was just returning 1 and doing nothing else.
- * For this case skip the callback, but set up
- * proper mf value first here:-)
- */
- if ((pa & 0x58000000) == 0x58000000) {
- req_idx = pa & 0x0000FFFF;
- mf = MPT_INDEX_2_MFPTR(ioc, req_idx);
- freeme = 1;
- /*
- * IMPORTANT! Invalidate the callback!
- */
- cb_idx = 0;
- } else {
- mf = NULL;
- }
- mr = (MPT_FRAME_HDR *) CAST_U32_TO_PTR(pa);
- } else {
- req_idx = pa & 0x0000FFFF;
- cb_idx = (pa & 0x00FF0000) >> 16;
- mf = MPT_INDEX_2_MFPTR(ioc, req_idx);
- mr = NULL;
- }
- pa = 0; /* No reply flush! */
- }
-
-#ifdef MPT_DEBUG_IRQ
- if (ioc->bus_type == SCSI) {
- /* Verify mf, mr are reasonable.
- */
- if ((mf) && ((mf >= MPT_INDEX_2_MFPTR(ioc, ioc->req_depth))
- || (mf < ioc->req_frames)) ) {
- printk(MYIOC_s_WARN_FMT
- "mpt_interrupt: Invalid mf (%p)!\n", ioc->name, (void *)mf);
- cb_idx = 0;
- pa = 0;
- freeme = 0;
- }
- if ((pa) && (mr) && ((mr >= MPT_INDEX_2_RFPTR(ioc, ioc->req_depth))
- || (mr < ioc->reply_frames)) ) {
- printk(MYIOC_s_WARN_FMT
- "mpt_interrupt: Invalid rf (%p)!\n", ioc->name, (void *)mr);
- cb_idx = 0;
- pa = 0;
- freeme = 0;
- }
- if (cb_idx > (MPT_MAX_PROTOCOL_DRIVERS-1)) {
- printk(MYIOC_s_WARN_FMT
- "mpt_interrupt: Invalid cb_idx (%d)!\n", ioc->name, cb_idx);
- cb_idx = 0;
- pa = 0;
- freeme = 0;
- }
- }
-#endif
-
- /* Check for (valid) IO callback! */
- if (cb_idx) {
- /* Do the callback! */
- freeme = (*(MptCallbacks[cb_idx]))(ioc, mf, mr);
- }
-
- if (pa) {
- /* Flush (non-TURBO) reply with a WRITE! */
- CHIPREG_WRITE32(&ioc->chip->ReplyFifo, pa);
- }
-
- if (freeme) {
- /* Put Request back on FreeQ! */
- mpt_free_msg_frame(ioc, mf);
- }
-
- mb();
- } /* drain reply FIFO */
+ else if (pa & MPI_ADDRESS_REPLY_A_BIT)
+ mpt_reply(ioc, pa);
+ else
+ mpt_turbo_reply(ioc, pa);
+ }
return IRQ_HANDLED;
}
@@ -509,6 +507,14 @@ mpt_base_reply(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *reply)
pCfg->wait_done = 1;
wake_up(&mpt_waitq);
}
+ } else if (func == MPI_FUNCTION_SAS_IO_UNIT_CONTROL) {
+ /* we should be always getting a reply frame */
+ memcpy(ioc->persist_reply_frame, reply,
+ min(MPT_DEFAULT_FRAME_SIZE,
+ 4*reply->u.reply.MsgLength));
+ del_timer(&ioc->persist_timer);
+ ioc->persist_wait_done = 1;
+ wake_up(&mpt_waitq);
} else {
printk(MYIOC_s_ERR_FMT "Unexpected msg function (=%02Xh) reply received!\n",
ioc->name, func);
@@ -750,6 +756,7 @@ mpt_get_msg_frame(int handle, MPT_ADAPTER *ioc)
mf = list_entry(ioc->FreeQ.next, MPT_FRAME_HDR,
u.frame.linkage.list);
list_del(&mf->u.frame.linkage.list);
+ mf->u.frame.linkage.arg1 = 0;
mf->u.frame.hwhdr.msgctxu.fld.cb_idx = handle; /* byte */
req_offset = (u8 *)mf - (u8 *)ioc->req_frames;
/* u16! */
@@ -845,6 +852,7 @@ mpt_free_msg_frame(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf)
/* Put Request back on FreeQ! */
spin_lock_irqsave(&ioc->FreeQlock, flags);
+ mf->u.frame.linkage.arg1 = 0xdeadbeaf; /* signature to know if this mf is freed */
list_add_tail(&mf->u.frame.linkage.list, &ioc->FreeQ);
#ifdef MFCNT
ioc->mfcnt--;
@@ -971,12 +979,123 @@ mpt_send_handshake_request(int handle, MPT_ADAPTER *ioc, int reqBytes, u32 *req,
/* Make sure there are no doorbells */
CHIPREG_WRITE32(&ioc->chip->IntStatus, 0);
-
+
return r;
}
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/**
+ * mpt_host_page_access_control - provides mechanism for the host
+ * driver to control the IOC's Host Page Buffer access.
+ * @ioc: Pointer to MPT adapter structure
+ * @access_control_value: define bits below
+ *
+ * Access Control Value - bits[15:12]
+ * 0h Reserved
+ * 1h Enable Access { MPI_DB_HPBAC_ENABLE_ACCESS }
+ * 2h Disable Access { MPI_DB_HPBAC_DISABLE_ACCESS }
+ * 3h Free Buffer { MPI_DB_HPBAC_FREE_BUFFER }
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+
+static int
+mpt_host_page_access_control(MPT_ADAPTER *ioc, u8 access_control_value, int sleepFlag)
+{
+ int r = 0;
+
+ /* return if in use */
+ if (CHIPREG_READ32(&ioc->chip->Doorbell)
+ & MPI_DOORBELL_ACTIVE)
+ return -1;
+
+ CHIPREG_WRITE32(&ioc->chip->IntStatus, 0);
+
+ CHIPREG_WRITE32(&ioc->chip->Doorbell,
+ ((MPI_FUNCTION_HOST_PAGEBUF_ACCESS_CONTROL
+ <<MPI_DOORBELL_FUNCTION_SHIFT) |
+ (access_control_value<<12)));
+
+ /* Wait for IOC to clear Doorbell Status bit */
+ if ((r = WaitForDoorbellAck(ioc, 5, sleepFlag)) < 0) {
+ return -2;
+ }else
+ return 0;
+}
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/**
+ * mpt_host_page_alloc - allocate system memory for the fw
+ * If we already allocated memory in past, then resend the same pointer.
+ * ioc@: Pointer to pointer to IOC adapter
+ * ioc_init@: Pointer to ioc init config page
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+static int
+mpt_host_page_alloc(MPT_ADAPTER *ioc, pIOCInit_t ioc_init)
+{
+ char *psge;
+ int flags_length;
+ u32 host_page_buffer_sz=0;
+
+ if(!ioc->HostPageBuffer) {
+
+ host_page_buffer_sz =
+ le32_to_cpu(ioc->facts.HostPageBufferSGE.FlagsLength) & 0xFFFFFF;
+
+ if(!host_page_buffer_sz)
+ return 0; /* fw doesn't need any host buffers */
+
+ /* spin till we get enough memory */
+ while(host_page_buffer_sz > 0) {
+
+ if((ioc->HostPageBuffer = pci_alloc_consistent(
+ ioc->pcidev,
+ host_page_buffer_sz,
+ &ioc->HostPageBuffer_dma)) != NULL) {
+
+ dinitprintk((MYIOC_s_INFO_FMT
+ "host_page_buffer @ %p, dma @ %x, sz=%d bytes\n",
+ ioc->name,
+ ioc->HostPageBuffer,
+ ioc->HostPageBuffer_dma,
+ host_page_buffer_sz));
+ ioc->alloc_total += host_page_buffer_sz;
+ ioc->HostPageBuffer_sz = host_page_buffer_sz;
+ break;
+ }
+
+ host_page_buffer_sz -= (4*1024);
+ }
+ }
+
+ if(!ioc->HostPageBuffer) {
+ printk(MYIOC_s_ERR_FMT
+ "Failed to alloc memory for host_page_buffer!\n",
+ ioc->name);
+ return -999;
+ }
+
+ psge = (char *)&ioc_init->HostPageBufferSGE;
+ flags_length = MPI_SGE_FLAGS_SIMPLE_ELEMENT |
+ MPI_SGE_FLAGS_SYSTEM_ADDRESS |
+ MPI_SGE_FLAGS_32_BIT_ADDRESSING |
+ MPI_SGE_FLAGS_HOST_TO_IOC |
+ MPI_SGE_FLAGS_END_OF_BUFFER;
+ if (sizeof(dma_addr_t) == sizeof(u64)) {
+ flags_length |= MPI_SGE_FLAGS_64_BIT_ADDRESSING;
+ }
+ flags_length = flags_length << MPI_SGE_FLAGS_SHIFT;
+ flags_length |= ioc->HostPageBuffer_sz;
+ mpt_add_sge(psge, flags_length, ioc->HostPageBuffer_dma);
+ ioc->facts.HostPageBufferSGE = ioc_init->HostPageBufferSGE;
+
+return 0;
+}
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/**
* mpt_verify_adapter - Given a unique IOC identifier, set pointer to
* the associated MPT adapter structure.
* @iocid: IOC unique identifier (integer)
@@ -1084,7 +1203,7 @@ mpt_attach(struct pci_dev *pdev, const struct pci_device_id *id)
/* Initilize SCSI Config Data structure
*/
- memset(&ioc->spi_data, 0, sizeof(ScsiCfgData));
+ memset(&ioc->spi_data, 0, sizeof(SpiCfgData));
/* Initialize the running configQ head.
*/
@@ -1213,6 +1332,33 @@ mpt_attach(struct pci_dev *pdev, const struct pci_device_id *id)
ioc->prod_name = "LSI53C1035";
ioc->bus_type = SCSI;
}
+ else if (pdev->device == MPI_MANUFACTPAGE_DEVID_SAS1064) {
+ ioc->prod_name = "LSISAS1064";
+ ioc->bus_type = SAS;
+ ioc->errata_flag_1064 = 1;
+ }
+ else if (pdev->device == MPI_MANUFACTPAGE_DEVID_SAS1066) {
+ ioc->prod_name = "LSISAS1066";
+ ioc->bus_type = SAS;
+ ioc->errata_flag_1064 = 1;
+ }
+ else if (pdev->device == MPI_MANUFACTPAGE_DEVID_SAS1068) {
+ ioc->prod_name = "LSISAS1068";
+ ioc->bus_type = SAS;
+ ioc->errata_flag_1064 = 1;
+ }
+ else if (pdev->device == MPI_MANUFACTPAGE_DEVID_SAS1064E) {
+ ioc->prod_name = "LSISAS1064E";
+ ioc->bus_type = SAS;
+ }
+ else if (pdev->device == MPI_MANUFACTPAGE_DEVID_SAS1066E) {
+ ioc->prod_name = "LSISAS1066E";
+ ioc->bus_type = SAS;
+ }
+ else if (pdev->device == MPI_MANUFACTPAGE_DEVID_SAS1068E) {
+ ioc->prod_name = "LSISAS1068E";
+ ioc->bus_type = SAS;
+ }
if (ioc->errata_flag_1064)
pci_disable_io_access(pdev);
@@ -1604,8 +1750,23 @@ mpt_do_ioc_recovery(MPT_ADAPTER *ioc, u32 reason, int sleepFlag)
*/
if (ret == 0) {
rc = mpt_do_upload(ioc, sleepFlag);
- if (rc != 0)
+ if (rc == 0) {
+ if (ioc->alt_ioc && ioc->alt_ioc->cached_fw) {
+ /*
+ * Maintain only one pointer to FW memory
+ * so there will not be two attempt to
+ * downloadboot onboard dual function
+ * chips (mpt_adapter_disable,
+ * mpt_diag_reset)
+ */
+ ioc->cached_fw = NULL;
+ ddlprintk((MYIOC_s_INFO_FMT ": mpt_upload: alt_%s has cached_fw=%p \n",
+ ioc->name, ioc->alt_ioc->name, ioc->alt_ioc->cached_fw));
+ }
+ } else {
printk(KERN_WARNING MYNAM ": firmware upload failure!\n");
+ ret = -5;
+ }
}
}
}
@@ -1640,7 +1801,22 @@ mpt_do_ioc_recovery(MPT_ADAPTER *ioc, u32 reason, int sleepFlag)
* and we try GetLanConfigPages again...
*/
if ((ret == 0) && (reason == MPT_HOSTEVENT_IOC_BRINGUP)) {
- if (ioc->bus_type == FC) {
+ if (ioc->bus_type == SAS) {
+
+ /* clear persistency table */
+ if(ioc->facts.IOCExceptions &
+ MPI_IOCFACTS_EXCEPT_PERSISTENT_TABLE_FULL) {
+ ret = mptbase_sas_persist_operation(ioc,
+ MPI_SAS_OP_CLEAR_NOT_PRESENT);
+ if(ret != 0)
+ return -1;
+ }
+
+ /* Find IM volumes
+ */
+ mpt_findImVolumes(ioc);
+
+ } else if (ioc->bus_type == FC) {
/*
* Pre-fetch FC port WWN and stuff...
* (FCPortPage0_t stuff)
@@ -1783,7 +1959,7 @@ mpt_adapter_disable(MPT_ADAPTER *ioc)
if (ioc->cached_fw != NULL) {
ddlprintk((KERN_INFO MYNAM ": mpt_adapter_disable: Pushing FW onto adapter\n"));
- if ((ret = mpt_downloadboot(ioc, NO_SLEEP)) < 0) {
+ if ((ret = mpt_downloadboot(ioc, (MpiFwHeader_t *)ioc->cached_fw, NO_SLEEP)) < 0) {
printk(KERN_WARNING MYNAM
": firmware downloadboot failure (%d)!\n", ret);
}
@@ -1831,9 +2007,9 @@ mpt_adapter_disable(MPT_ADAPTER *ioc)
}
kfree(ioc->spi_data.nvram);
- kfree(ioc->spi_data.pIocPg3);
+ kfree(ioc->raid_data.pIocPg3);
ioc->spi_data.nvram = NULL;
- ioc->spi_data.pIocPg3 = NULL;
+ ioc->raid_data.pIocPg3 = NULL;
if (ioc->spi_data.pIocPg4 != NULL) {
sz = ioc->spi_data.IocPg4Sz;
@@ -1852,6 +2028,23 @@ mpt_adapter_disable(MPT_ADAPTER *ioc)
kfree(ioc->ChainToChain);
ioc->ChainToChain = NULL;
+
+ if (ioc->HostPageBuffer != NULL) {
+ if((ret = mpt_host_page_access_control(ioc,
+ MPI_DB_HPBAC_FREE_BUFFER, NO_SLEEP)) != 0) {
+ printk(KERN_ERR MYNAM
+ ": %s: host page buffers free failed (%d)!\n",
+ __FUNCTION__, ret);
+ }
+ dexitprintk((KERN_INFO MYNAM ": %s HostPageBuffer free @ %p, sz=%d bytes\n",
+ ioc->name, ioc->HostPageBuffer, ioc->HostPageBuffer_sz));
+ pci_free_consistent(ioc->pcidev, ioc->HostPageBuffer_sz,
+ ioc->HostPageBuffer,
+ ioc->HostPageBuffer_dma);
+ ioc->HostPageBuffer = NULL;
+ ioc->HostPageBuffer_sz = 0;
+ ioc->alloc_total -= ioc->HostPageBuffer_sz;
+ }
}
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
@@ -2034,7 +2227,7 @@ MakeIocReady(MPT_ADAPTER *ioc, int force, int sleepFlag)
* Loop here waiting for IOC to come READY.
*/
ii = 0;
- cntdn = ((sleepFlag == CAN_SLEEP) ? HZ : 1000) * 15; /* 15 seconds */
+ cntdn = ((sleepFlag == CAN_SLEEP) ? HZ : 1000) * 5; /* 5 seconds */
while ((ioc_state = mpt_GetIocState(ioc, 1)) != MPI_IOC_STATE_READY) {
if (ioc_state == MPI_IOC_STATE_OPERATIONAL) {
@@ -2212,6 +2405,7 @@ GetIocFacts(MPT_ADAPTER *ioc, int sleepFlag, int reason)
le32_to_cpu(facts->CurrentSenseBufferHighAddr);
facts->CurReplyFrameSize =
le16_to_cpu(facts->CurReplyFrameSize);
+ facts->IOCCapabilities = le32_to_cpu(facts->IOCCapabilities);
/*
* Handle NEW (!) IOCFactsReply fields in MPI-1.01.xx
@@ -2383,13 +2577,25 @@ SendIocInit(MPT_ADAPTER *ioc, int sleepFlag)
ddlprintk((MYIOC_s_INFO_FMT "upload_fw %d facts.Flags=%x\n",
ioc->name, ioc->upload_fw, ioc->facts.Flags));
- if (ioc->bus_type == FC)
+ if(ioc->bus_type == SAS)
+ ioc_init.MaxDevices = ioc->facts.MaxDevices;
+ else if(ioc->bus_type == FC)
ioc_init.MaxDevices = MPT_MAX_FC_DEVICES;
else
ioc_init.MaxDevices = MPT_MAX_SCSI_DEVICES;
-
ioc_init.MaxBuses = MPT_MAX_BUS;
-
+ dinitprintk((MYIOC_s_INFO_FMT "facts.MsgVersion=%x\n",
+ ioc->name, ioc->facts.MsgVersion));
+ if (ioc->facts.MsgVersion >= MPI_VERSION_01_05) {
+ // set MsgVersion and HeaderVersion host driver was built with
+ ioc_init.MsgVersion = cpu_to_le16(MPI_VERSION);
+ ioc_init.HeaderVersion = cpu_to_le16(MPI_HEADER_VERSION);
+
+ if (ioc->facts.Flags & MPI_IOCFACTS_FLAGS_HOST_PAGE_BUFFER_PERSISTENT) {
+ ioc_init.HostPageBufferSGE = ioc->facts.HostPageBufferSGE;
+ } else if(mpt_host_page_alloc(ioc, &ioc_init))
+ return -99;
+ }
ioc_init.ReplyFrameSize = cpu_to_le16(ioc->reply_sz); /* in BYTES */
if (sizeof(dma_addr_t) == sizeof(u64)) {
@@ -2403,17 +2609,21 @@ SendIocInit(MPT_ADAPTER *ioc, int sleepFlag)
ioc_init.HostMfaHighAddr = cpu_to_le32(0);
ioc_init.SenseBufferHighAddr = cpu_to_le32(0);
}
-
+
ioc->facts.CurrentHostMfaHighAddr = ioc_init.HostMfaHighAddr;
ioc->facts.CurrentSenseBufferHighAddr = ioc_init.SenseBufferHighAddr;
+ ioc->facts.MaxDevices = ioc_init.MaxDevices;
+ ioc->facts.MaxBuses = ioc_init.MaxBuses;
dhsprintk((MYIOC_s_INFO_FMT "Sending IOCInit (req @ %p)\n",
ioc->name, &ioc_init));
r = mpt_handshake_req_reply_wait(ioc, sizeof(IOCInit_t), (u32*)&ioc_init,
sizeof(MPIDefaultReply_t), (u16*)&init_reply, 10 /*seconds*/, sleepFlag);
- if (r != 0)
+ if (r != 0) {
+ printk(MYIOC_s_ERR_FMT "Sending IOCInit failed(%d)!\n",ioc->name, r);
return r;
+ }
/* No need to byte swap the multibyte fields in the reply
* since we don't even look at it's contents.
@@ -2472,7 +2682,7 @@ SendPortEnable(MPT_ADAPTER *ioc, int portnum, int sleepFlag)
{
PortEnable_t port_enable;
MPIDefaultReply_t reply_buf;
- int ii;
+ int rc;
int req_sz;
int reply_sz;
@@ -2494,22 +2704,15 @@ SendPortEnable(MPT_ADAPTER *ioc, int portnum, int sleepFlag)
/* RAID FW may take a long time to enable
*/
- if (ioc->bus_type == FC) {
- ii = mpt_handshake_req_reply_wait(ioc, req_sz, (u32*)&port_enable,
- reply_sz, (u16*)&reply_buf, 65 /*seconds*/, sleepFlag);
- } else {
- ii = mpt_handshake_req_reply_wait(ioc, req_sz, (u32*)&port_enable,
+ if ( (ioc->facts.ProductID & MPI_FW_HEADER_PID_PROD_MASK)
+ > MPI_FW_HEADER_PID_PROD_TARGET_SCSI ) {
+ rc = mpt_handshake_req_reply_wait(ioc, req_sz, (u32*)&port_enable,
reply_sz, (u16*)&reply_buf, 300 /*seconds*/, sleepFlag);
+ } else {
+ rc = mpt_handshake_req_reply_wait(ioc, req_sz, (u32*)&port_enable,
+ reply_sz, (u16*)&reply_buf, 30 /*seconds*/, sleepFlag);
}
-
- if (ii != 0)
- return ii;
-
- /* We do not even look at the reply, so we need not
- * swap the multi-byte fields.
- */
-
- return 0;
+ return rc;
}
/*
@@ -2666,9 +2869,8 @@ mpt_do_upload(MPT_ADAPTER *ioc, int sleepFlag)
* <0 for fw upload failure.
*/
static int
-mpt_downloadboot(MPT_ADAPTER *ioc, int sleepFlag)
+mpt_downloadboot(MPT_ADAPTER *ioc, MpiFwHeader_t *pFwHeader, int sleepFlag)
{
- MpiFwHeader_t *pFwHeader;
MpiExtImageHeader_t *pExtImage;
u32 fwSize;
u32 diag0val;
@@ -2679,18 +2881,8 @@ mpt_downloadboot(MPT_ADAPTER *ioc, int sleepFlag)
u32 load_addr;
u32 ioc_state=0;
- ddlprintk((MYIOC_s_INFO_FMT "downloadboot: fw size 0x%x, ioc FW Ptr %p\n",
- ioc->name, ioc->facts.FWImageSize, ioc->cached_fw));
-
- if ( ioc->facts.FWImageSize == 0 )
- return -1;
-
- if (ioc->cached_fw == NULL)
- return -2;
-
- /* prevent a second downloadboot and memory free with alt_ioc */
- if (ioc->alt_ioc && ioc->alt_ioc->cached_fw)
- ioc->alt_ioc->cached_fw = NULL;
+ ddlprintk((MYIOC_s_INFO_FMT "downloadboot: fw size 0x%x (%d), FW Ptr %p\n",
+ ioc->name, pFwHeader->ImageSize, pFwHeader->ImageSize, pFwHeader));
CHIPREG_WRITE32(&ioc->chip->WriteSequence, 0xFF);
CHIPREG_WRITE32(&ioc->chip->WriteSequence, MPI_WRSEQ_1ST_KEY_VALUE);
@@ -2718,16 +2910,17 @@ mpt_downloadboot(MPT_ADAPTER *ioc, int sleepFlag)
ioc->name, count));
break;
}
- /* wait 1 sec */
+ /* wait .1 sec */
if (sleepFlag == CAN_SLEEP) {
- msleep_interruptible (1000);
+ msleep_interruptible (100);
} else {
- mdelay (1000);
+ mdelay (100);
}
}
if ( count == 30 ) {
- ddlprintk((MYIOC_s_INFO_FMT "downloadboot failed! Unable to RESET_ADAPTER diag0val=%x\n",
+ ddlprintk((MYIOC_s_INFO_FMT "downloadboot failed! "
+ "Unable to get MPI_DIAG_DRWE mode, diag0val=%x\n",
ioc->name, diag0val));
return -3;
}
@@ -2742,7 +2935,6 @@ mpt_downloadboot(MPT_ADAPTER *ioc, int sleepFlag)
/* Set the DiagRwEn and Disable ARM bits */
CHIPREG_WRITE32(&ioc->chip->Diagnostic, (MPI_DIAG_RW_ENABLE | MPI_DIAG_DISABLE_ARM));
- pFwHeader = (MpiFwHeader_t *) ioc->cached_fw;
fwSize = (pFwHeader->ImageSize + 3)/4;
ptrFw = (u32 *) pFwHeader;
@@ -2792,19 +2984,38 @@ mpt_downloadboot(MPT_ADAPTER *ioc, int sleepFlag)
/* Clear the internal flash bad bit - autoincrementing register,
* so must do two writes.
*/
- CHIPREG_PIO_WRITE32(&ioc->pio_chip->DiagRwAddress, 0x3F000000);
- diagRwData = CHIPREG_PIO_READ32(&ioc->pio_chip->DiagRwData);
- diagRwData |= 0x4000000;
- CHIPREG_PIO_WRITE32(&ioc->pio_chip->DiagRwAddress, 0x3F000000);
- CHIPREG_PIO_WRITE32(&ioc->pio_chip->DiagRwData, diagRwData);
+ if (ioc->bus_type == SCSI) {
+ /*
+ * 1030 and 1035 H/W errata, workaround to access
+ * the ClearFlashBadSignatureBit
+ */
+ CHIPREG_PIO_WRITE32(&ioc->pio_chip->DiagRwAddress, 0x3F000000);
+ diagRwData = CHIPREG_PIO_READ32(&ioc->pio_chip->DiagRwData);
+ diagRwData |= 0x40000000;
+ CHIPREG_PIO_WRITE32(&ioc->pio_chip->DiagRwAddress, 0x3F000000);
+ CHIPREG_PIO_WRITE32(&ioc->pio_chip->DiagRwData, diagRwData);
+
+ } else /* if((ioc->bus_type == SAS) || (ioc->bus_type == FC)) */ {
+ diag0val = CHIPREG_READ32(&ioc->chip->Diagnostic);
+ CHIPREG_WRITE32(&ioc->chip->Diagnostic, diag0val |
+ MPI_DIAG_CLEAR_FLASH_BAD_SIG);
+
+ /* wait 1 msec */
+ if (sleepFlag == CAN_SLEEP) {
+ msleep_interruptible (1);
+ } else {
+ mdelay (1);
+ }
+ }
if (ioc->errata_flag_1064)
pci_disable_io_access(ioc->pcidev);
diag0val = CHIPREG_READ32(&ioc->chip->Diagnostic);
- ddlprintk((MYIOC_s_INFO_FMT "downloadboot diag0val=%x, turning off PREVENT_IOC_BOOT, DISABLE_ARM\n",
+ ddlprintk((MYIOC_s_INFO_FMT "downloadboot diag0val=%x, "
+ "turning off PREVENT_IOC_BOOT, DISABLE_ARM, RW_ENABLE\n",
ioc->name, diag0val));
- diag0val &= ~(MPI_DIAG_PREVENT_IOC_BOOT | MPI_DIAG_DISABLE_ARM);
+ diag0val &= ~(MPI_DIAG_PREVENT_IOC_BOOT | MPI_DIAG_DISABLE_ARM | MPI_DIAG_RW_ENABLE);
ddlprintk((MYIOC_s_INFO_FMT "downloadboot now diag0val=%x\n",
ioc->name, diag0val));
CHIPREG_WRITE32(&ioc->chip->Diagnostic, diag0val);
@@ -2812,10 +3023,23 @@ mpt_downloadboot(MPT_ADAPTER *ioc, int sleepFlag)
/* Write 0xFF to reset the sequencer */
CHIPREG_WRITE32(&ioc->chip->WriteSequence, 0xFF);
+ if (ioc->bus_type == SAS) {
+ ioc_state = mpt_GetIocState(ioc, 0);
+ if ( (GetIocFacts(ioc, sleepFlag,
+ MPT_HOSTEVENT_IOC_BRINGUP)) != 0 ) {
+ ddlprintk((MYIOC_s_INFO_FMT "GetIocFacts failed: IocState=%x\n",
+ ioc->name, ioc_state));
+ return -EFAULT;
+ }
+ }
+
for (count=0; count<HZ*20; count++) {
if ((ioc_state = mpt_GetIocState(ioc, 0)) & MPI_IOC_STATE_READY) {
ddlprintk((MYIOC_s_INFO_FMT "downloadboot successful! (count=%d) IocState=%x\n",
ioc->name, count, ioc_state));
+ if (ioc->bus_type == SAS) {
+ return 0;
+ }
if ((SendIocInit(ioc, sleepFlag)) != 0) {
ddlprintk((MYIOC_s_INFO_FMT "downloadboot: SendIocInit failed\n",
ioc->name));
@@ -3049,12 +3273,13 @@ mpt_diag_reset(MPT_ADAPTER *ioc, int ignore, int sleepFlag)
/* wait 1 sec */
if (sleepFlag == CAN_SLEEP) {
- ssleep(1);
+ msleep_interruptible (1000);
} else {
mdelay (1000);
}
}
- if ((count = mpt_downloadboot(ioc, sleepFlag)) < 0) {
+ if ((count = mpt_downloadboot(ioc,
+ (MpiFwHeader_t *)ioc->cached_fw, sleepFlag)) < 0) {
printk(KERN_WARNING MYNAM
": firmware downloadboot failure (%d)!\n", count);
}
@@ -3637,7 +3862,7 @@ WaitForDoorbellAck(MPT_ADAPTER *ioc, int howlong, int sleepFlag)
int count = 0;
u32 intstat=0;
- cntdn = ((sleepFlag == CAN_SLEEP) ? HZ : 1000) * howlong;
+ cntdn = 1000 * howlong;
if (sleepFlag == CAN_SLEEP) {
while (--cntdn) {
@@ -3687,7 +3912,7 @@ WaitForDoorbellInt(MPT_ADAPTER *ioc, int howlong, int sleepFlag)
int count = 0;
u32 intstat=0;
- cntdn = ((sleepFlag == CAN_SLEEP) ? HZ : 1000) * howlong;
+ cntdn = 1000 * howlong;
if (sleepFlag == CAN_SLEEP) {
while (--cntdn) {
intstat = CHIPREG_READ32(&ioc->chip->IntStatus);
@@ -4001,6 +4226,85 @@ GetFcPortPage0(MPT_ADAPTER *ioc, int portnum)
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/*
+ * mptbase_sas_persist_operation - Perform operation on SAS Persitent Table
+ * @ioc: Pointer to MPT_ADAPTER structure
+ * @sas_address: 64bit SAS Address for operation.
+ * @target_id: specified target for operation
+ * @bus: specified bus for operation
+ * @persist_opcode: see below
+ *
+ * MPI_SAS_OP_CLEAR_NOT_PRESENT - Free all persist TargetID mappings for
+ * devices not currently present.
+ * MPI_SAS_OP_CLEAR_ALL_PERSISTENT - Clear al persist TargetID mappings
+ *
+ * NOTE: Don't use not this function during interrupt time.
+ *
+ * Returns: 0 for success, non-zero error
+ */
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+int
+mptbase_sas_persist_operation(MPT_ADAPTER *ioc, u8 persist_opcode)
+{
+ SasIoUnitControlRequest_t *sasIoUnitCntrReq;
+ SasIoUnitControlReply_t *sasIoUnitCntrReply;
+ MPT_FRAME_HDR *mf = NULL;
+ MPIHeader_t *mpi_hdr;
+
+
+ /* insure garbage is not sent to fw */
+ switch(persist_opcode) {
+
+ case MPI_SAS_OP_CLEAR_NOT_PRESENT:
+ case MPI_SAS_OP_CLEAR_ALL_PERSISTENT:
+ break;
+
+ default:
+ return -1;
+ break;
+ }
+
+ printk("%s: persist_opcode=%x\n",__FUNCTION__, persist_opcode);
+
+ /* Get a MF for this command.
+ */
+ if ((mf = mpt_get_msg_frame(mpt_base_index, ioc)) == NULL) {
+ printk("%s: no msg frames!\n",__FUNCTION__);
+ return -1;
+ }
+
+ mpi_hdr = (MPIHeader_t *) mf;
+ sasIoUnitCntrReq = (SasIoUnitControlRequest_t *)mf;
+ memset(sasIoUnitCntrReq,0,sizeof(SasIoUnitControlRequest_t));
+ sasIoUnitCntrReq->Function = MPI_FUNCTION_SAS_IO_UNIT_CONTROL;
+ sasIoUnitCntrReq->MsgContext = mpi_hdr->MsgContext;
+ sasIoUnitCntrReq->Operation = persist_opcode;
+
+ init_timer(&ioc->persist_timer);
+ ioc->persist_timer.data = (unsigned long) ioc;
+ ioc->persist_timer.function = mpt_timer_expired;
+ ioc->persist_timer.expires = jiffies + HZ*10 /* 10 sec */;
+ ioc->persist_wait_done=0;
+ add_timer(&ioc->persist_timer);
+ mpt_put_msg_frame(mpt_base_index, ioc, mf);
+ wait_event(mpt_waitq, ioc->persist_wait_done);
+
+ sasIoUnitCntrReply =
+ (SasIoUnitControlReply_t *)ioc->persist_reply_frame;
+ if (le16_to_cpu(sasIoUnitCntrReply->IOCStatus) != MPI_IOCSTATUS_SUCCESS) {
+ printk("%s: IOCStatus=0x%X IOCLogInfo=0x%X\n",
+ __FUNCTION__,
+ sasIoUnitCntrReply->IOCStatus,
+ sasIoUnitCntrReply->IOCLogInfo);
+ return -1;
+ }
+
+ printk("%s: success\n",__FUNCTION__);
+ return 0;
+}
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/*
* GetIoUnitPage2 - Retrieve BIOS version and boot order information.
* @ioc: Pointer to MPT_ADAPTER structure
*
@@ -4340,10 +4644,10 @@ mpt_findImVolumes(MPT_ADAPTER *ioc)
if (mpt_config(ioc, &cfg) != 0)
goto done_and_free;
- if ( (mem = (u8 *)ioc->spi_data.pIocPg2) == NULL ) {
+ if ( (mem = (u8 *)ioc->raid_data.pIocPg2) == NULL ) {
mem = kmalloc(iocpage2sz, GFP_ATOMIC);
if (mem) {
- ioc->spi_data.pIocPg2 = (IOCPage2_t *) mem;
+ ioc->raid_data.pIocPg2 = (IOCPage2_t *) mem;
} else {
goto done_and_free;
}
@@ -4360,7 +4664,7 @@ mpt_findImVolumes(MPT_ADAPTER *ioc)
/* At least 1 RAID Volume
*/
pIocRv = pIoc2->RaidVolume;
- ioc->spi_data.isRaid = 0;
+ ioc->raid_data.isRaid = 0;
for (jj = 0; jj < nVols; jj++, pIocRv++) {
vid = pIocRv->VolumeID;
vbus = pIocRv->VolumeBus;
@@ -4369,7 +4673,7 @@ mpt_findImVolumes(MPT_ADAPTER *ioc)
/* find the match
*/
if (vbus == 0) {
- ioc->spi_data.isRaid |= (1 << vid);
+ ioc->raid_data.isRaid |= (1 << vid);
} else {
/* Error! Always bus 0
*/
@@ -4404,8 +4708,8 @@ mpt_read_ioc_pg_3(MPT_ADAPTER *ioc)
/* Free the old page
*/
- kfree(ioc->spi_data.pIocPg3);
- ioc->spi_data.pIocPg3 = NULL;
+ kfree(ioc->raid_data.pIocPg3);
+ ioc->raid_data.pIocPg3 = NULL;
/* There is at least one physical disk.
* Read and save IOC Page 3
@@ -4442,7 +4746,7 @@ mpt_read_ioc_pg_3(MPT_ADAPTER *ioc)
mem = kmalloc(iocpage3sz, GFP_ATOMIC);
if (mem) {
memcpy(mem, (u8 *)pIoc3, iocpage3sz);
- ioc->spi_data.pIocPg3 = (IOCPage3_t *) mem;
+ ioc->raid_data.pIocPg3 = (IOCPage3_t *) mem;
}
}
@@ -5366,8 +5670,8 @@ mpt_HardResetHandler(MPT_ADAPTER *ioc, int sleepFlag)
}
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
-static char *
-EventDescriptionStr(u8 event, u32 evData0)
+static void
+EventDescriptionStr(u8 event, u32 evData0, char *evStr)
{
char *ds;
@@ -5420,8 +5724,95 @@ EventDescriptionStr(u8 event, u32 evData0)
ds = "Events(OFF) Change";
break;
case MPI_EVENT_INTEGRATED_RAID:
- ds = "Integrated Raid";
+ {
+ u8 ReasonCode = (u8)(evData0 >> 16);
+ switch (ReasonCode) {
+ case MPI_EVENT_RAID_RC_VOLUME_CREATED :
+ ds = "Integrated Raid: Volume Created";
+ break;
+ case MPI_EVENT_RAID_RC_VOLUME_DELETED :
+ ds = "Integrated Raid: Volume Deleted";
+ break;
+ case MPI_EVENT_RAID_RC_VOLUME_SETTINGS_CHANGED :
+ ds = "Integrated Raid: Volume Settings Changed";
+ break;
+ case MPI_EVENT_RAID_RC_VOLUME_STATUS_CHANGED :
+ ds = "Integrated Raid: Volume Status Changed";
+ break;
+ case MPI_EVENT_RAID_RC_VOLUME_PHYSDISK_CHANGED :
+ ds = "Integrated Raid: Volume Physdisk Changed";
+ break;
+ case MPI_EVENT_RAID_RC_PHYSDISK_CREATED :
+ ds = "Integrated Raid: Physdisk Created";
+ break;
+ case MPI_EVENT_RAID_RC_PHYSDISK_DELETED :
+ ds = "Integrated Raid: Physdisk Deleted";
+ break;
+ case MPI_EVENT_RAID_RC_PHYSDISK_SETTINGS_CHANGED :
+ ds = "Integrated Raid: Physdisk Settings Changed";
+ break;
+ case MPI_EVENT_RAID_RC_PHYSDISK_STATUS_CHANGED :
+ ds = "Integrated Raid: Physdisk Status Changed";
+ break;
+ case MPI_EVENT_RAID_RC_DOMAIN_VAL_NEEDED :
+ ds = "Integrated Raid: Domain Validation Needed";
+ break;
+ case MPI_EVENT_RAID_RC_SMART_DATA :
+ ds = "Integrated Raid; Smart Data";
+ break;
+ case MPI_EVENT_RAID_RC_REPLACE_ACTION_STARTED :
+ ds = "Integrated Raid: Replace Action Started";
+ break;
+ default:
+ ds = "Integrated Raid";
+ break;
+ }
+ break;
+ }
+ case MPI_EVENT_SCSI_DEVICE_STATUS_CHANGE:
+ ds = "SCSI Device Status Change";
+ break;
+ case MPI_EVENT_SAS_DEVICE_STATUS_CHANGE:
+ {
+ u8 ReasonCode = (u8)(evData0 >> 16);
+ switch (ReasonCode) {
+ case MPI_EVENT_SAS_DEV_STAT_RC_ADDED:
+ ds = "SAS Device Status Change: Added";
+ break;
+ case MPI_EVENT_SAS_DEV_STAT_RC_NOT_RESPONDING:
+ ds = "SAS Device Status Change: Deleted";
+ break;
+ case MPI_EVENT_SAS_DEV_STAT_RC_SMART_DATA:
+ ds = "SAS Device Status Change: SMART Data";
+ break;
+ case MPI_EVENT_SAS_DEV_STAT_RC_NO_PERSIST_ADDED:
+ ds = "SAS Device Status Change: No Persistancy Added";
+ break;
+ default:
+ ds = "SAS Device Status Change: Unknown";
+ break;
+ }
+ break;
+ }
+ case MPI_EVENT_ON_BUS_TIMER_EXPIRED:
+ ds = "Bus Timer Expired";
+ break;
+ case MPI_EVENT_QUEUE_FULL:
+ ds = "Queue Full";
+ break;
+ case MPI_EVENT_SAS_SES:
+ ds = "SAS SES Event";
+ break;
+ case MPI_EVENT_PERSISTENT_TABLE_FULL:
+ ds = "Persistent Table Full";
+ break;
+ case MPI_EVENT_SAS_PHY_LINK_STATUS:
+ ds = "SAS PHY Link Status";
+ break;
+ case MPI_EVENT_SAS_DISCOVERY_ERROR:
+ ds = "SAS Discovery Error";
break;
+
/*
* MPT base "custom" events may be added here...
*/
@@ -5429,7 +5820,7 @@ EventDescriptionStr(u8 event, u32 evData0)
ds = "Unknown";
break;
}
- return ds;
+ strcpy(evStr,ds);
}
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
@@ -5451,7 +5842,7 @@ ProcessEventNotification(MPT_ADAPTER *ioc, EventNotificationReply_t *pEventReply
int ii;
int r = 0;
int handlers = 0;
- char *evStr;
+ char evStr[100];
u8 event;
/*
@@ -5464,7 +5855,7 @@ ProcessEventNotification(MPT_ADAPTER *ioc, EventNotificationReply_t *pEventReply
evData0 = le32_to_cpu(pEventReply->Data[0]);
}
- evStr = EventDescriptionStr(event, evData0);
+ EventDescriptionStr(event, evData0, evStr);
devtprintk((MYIOC_s_INFO_FMT "MPT event (%s=%02Xh) detected!\n",
ioc->name,
evStr,
@@ -5481,20 +5872,6 @@ ProcessEventNotification(MPT_ADAPTER *ioc, EventNotificationReply_t *pEventReply
* Do general / base driver event processing
*/
switch(event) {
- case MPI_EVENT_NONE: /* 00 */
- case MPI_EVENT_LOG_DATA: /* 01 */
- case MPI_EVENT_STATE_CHANGE: /* 02 */
- case MPI_EVENT_UNIT_ATTENTION: /* 03 */
- case MPI_EVENT_IOC_BUS_RESET: /* 04 */
- case MPI_EVENT_EXT_BUS_RESET: /* 05 */
- case MPI_EVENT_RESCAN: /* 06 */
- case MPI_EVENT_LINK_STATUS_CHANGE: /* 07 */
- case MPI_EVENT_LOOP_STATE_CHANGE: /* 08 */
- case MPI_EVENT_LOGOUT: /* 09 */
- case MPI_EVENT_INTEGRATED_RAID: /* 0B */
- case MPI_EVENT_SCSI_DEVICE_STATUS_CHANGE: /* 0C */
- default:
- break;
case MPI_EVENT_EVENT_CHANGE: /* 0A */
if (evDataLen) {
u8 evState = evData0 & 0xFF;
@@ -5507,6 +5884,8 @@ ProcessEventNotification(MPT_ADAPTER *ioc, EventNotificationReply_t *pEventReply
}
}
break;
+ default:
+ break;
}
/*
@@ -5653,6 +6032,111 @@ mpt_sp_log_info(MPT_ADAPTER *ioc, u32 log_info)
printk(MYIOC_s_INFO_FMT "LogInfo(0x%08x): F/W: %s\n", ioc->name, log_info, desc);
}
+/* strings for sas loginfo */
+ static char *originator_str[] = {
+ "IOP", /* 00h */
+ "PL", /* 01h */
+ "IR" /* 02h */
+ };
+ static char *iop_code_str[] = {
+ NULL, /* 00h */
+ "Invalid SAS Address", /* 01h */
+ NULL, /* 02h */
+ "Invalid Page", /* 03h */
+ NULL, /* 04h */
+ "Task Terminated" /* 05h */
+ };
+ static char *pl_code_str[] = {
+ NULL, /* 00h */
+ "Open Failure", /* 01h */
+ "Invalid Scatter Gather List", /* 02h */
+ "Wrong Relative Offset or Frame Length", /* 03h */
+ "Frame Transfer Error", /* 04h */
+ "Transmit Frame Connected Low", /* 05h */
+ "SATA Non-NCQ RW Error Bit Set", /* 06h */
+ "SATA Read Log Receive Data Error", /* 07h */
+ "SATA NCQ Fail All Commands After Error", /* 08h */
+ "SATA Error in Receive Set Device Bit FIS", /* 09h */
+ "Receive Frame Invalid Message", /* 0Ah */
+ "Receive Context Message Valid Error", /* 0Bh */
+ "Receive Frame Current Frame Error", /* 0Ch */
+ "SATA Link Down", /* 0Dh */
+ "Discovery SATA Init W IOS", /* 0Eh */
+ "Config Invalid Page", /* 0Fh */
+ "Discovery SATA Init Timeout", /* 10h */
+ "Reset", /* 11h */
+ "Abort", /* 12h */
+ "IO Not Yet Executed", /* 13h */
+ "IO Executed", /* 14h */
+ NULL, /* 15h */
+ NULL, /* 16h */
+ NULL, /* 17h */
+ NULL, /* 18h */
+ NULL, /* 19h */
+ NULL, /* 1Ah */
+ NULL, /* 1Bh */
+ NULL, /* 1Ch */
+ NULL, /* 1Dh */
+ NULL, /* 1Eh */
+ NULL, /* 1Fh */
+ "Enclosure Management" /* 20h */
+ };
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/*
+ * mpt_sas_log_info - Log information returned from SAS IOC.
+ * @ioc: Pointer to MPT_ADAPTER structure
+ * @log_info: U32 LogInfo reply word from the IOC
+ *
+ * Refer to lsi/mpi_log_sas.h.
+ */
+static void
+mpt_sas_log_info(MPT_ADAPTER *ioc, u32 log_info)
+{
+union loginfo_type {
+ u32 loginfo;
+ struct {
+ u32 subcode:16;
+ u32 code:8;
+ u32 originator:4;
+ u32 bus_type:4;
+ }dw;
+};
+ union loginfo_type sas_loginfo;
+ char *code_desc = NULL;
+
+ sas_loginfo.loginfo = log_info;
+ if ((sas_loginfo.dw.bus_type != 3 /*SAS*/) &&
+ (sas_loginfo.dw.originator < sizeof(originator_str)/sizeof(char*)))
+ return;
+ if ((sas_loginfo.dw.originator == 0 /*IOP*/) &&
+ (sas_loginfo.dw.code < sizeof(iop_code_str)/sizeof(char*))) {
+ code_desc = iop_code_str[sas_loginfo.dw.code];
+ }else if ((sas_loginfo.dw.originator == 1 /*PL*/) &&
+ (sas_loginfo.dw.code < sizeof(pl_code_str)/sizeof(char*) )) {
+ code_desc = pl_code_str[sas_loginfo.dw.code];
+ }
+
+ if (code_desc != NULL)
+ printk(MYIOC_s_INFO_FMT
+ "LogInfo(0x%08x): Originator={%s}, Code={%s},"
+ " SubCode(0x%04x)\n",
+ ioc->name,
+ log_info,
+ originator_str[sas_loginfo.dw.originator],
+ code_desc,
+ sas_loginfo.dw.subcode);
+ else
+ printk(MYIOC_s_INFO_FMT
+ "LogInfo(0x%08x): Originator={%s}, Code=(0x%02x),"
+ " SubCode(0x%04x)\n",
+ ioc->name,
+ log_info,
+ originator_str[sas_loginfo.dw.originator],
+ sas_loginfo.dw.code,
+ sas_loginfo.dw.subcode);
+}
+
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/*
* mpt_sp_ioc_info - IOC information returned from SCSI Parallel IOC.
@@ -5814,6 +6298,7 @@ EXPORT_SYMBOL(mpt_findImVolumes);
EXPORT_SYMBOL(mpt_read_ioc_pg_3);
EXPORT_SYMBOL(mpt_alloc_fw_memory);
EXPORT_SYMBOL(mpt_free_fw_memory);
+EXPORT_SYMBOL(mptbase_sas_persist_operation);
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
diff --git a/drivers/message/fusion/mptbase.h b/drivers/message/fusion/mptbase.h
index f4827d92373..75105277e22 100644
--- a/drivers/message/fusion/mptbase.h
+++ b/drivers/message/fusion/mptbase.h
@@ -65,6 +65,7 @@
#include "lsi/mpi_fc.h" /* Fibre Channel (lowlevel) support */
#include "lsi/mpi_targ.h" /* SCSI/FCP Target protcol support */
#include "lsi/mpi_tool.h" /* Tools support */
+#include "lsi/mpi_sas.h" /* SAS support */
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
@@ -76,8 +77,8 @@
#define COPYRIGHT "Copyright (c) 1999-2005 " MODULEAUTHOR
#endif
-#define MPT_LINUX_VERSION_COMMON "3.03.02"
-#define MPT_LINUX_PACKAGE_NAME "@(#)mptlinux-3.03.02"
+#define MPT_LINUX_VERSION_COMMON "3.03.03"
+#define MPT_LINUX_PACKAGE_NAME "@(#)mptlinux-3.03.03"
#define WHAT_MAGIC_STRING "@" "(" "#" ")"
#define show_mptmod_ver(s,ver) \
@@ -423,7 +424,7 @@ typedef struct _MPT_IOCTL {
/*
* Event Structure and define
*/
-#define MPTCTL_EVENT_LOG_SIZE (0x0000000A)
+#define MPTCTL_EVENT_LOG_SIZE (0x000000032)
typedef struct _mpt_ioctl_events {
u32 event; /* Specified by define above */
u32 eventContext; /* Index or counter */
@@ -451,16 +452,13 @@ typedef struct _mpt_ioctl_events {
#define MPT_SCSICFG_ALL_IDS 0x02 /* WriteSDP1 to all IDS */
/* #define MPT_SCSICFG_BLK_NEGO 0x10 WriteSDP1 with WDTR and SDTR disabled */
-typedef struct _ScsiCfgData {
+typedef struct _SpiCfgData {
u32 PortFlags;
int *nvram; /* table of device NVRAM values */
- IOCPage2_t *pIocPg2; /* table of Raid Volumes */
- IOCPage3_t *pIocPg3; /* table of physical disks */
IOCPage4_t *pIocPg4; /* SEP devices addressing */
dma_addr_t IocPg4_dma; /* Phys Addr of IOCPage4 data */
int IocPg4Sz; /* IOCPage4 size */
u8 dvStatus[MPT_MAX_SCSI_DEVICES];
- int isRaid; /* bit field, 1 if RAID */
u8 minSyncFactor; /* 0xFF if async */
u8 maxSyncOffset; /* 0 if async */
u8 maxBusWidth; /* 0 if narrow, 1 if wide */
@@ -472,10 +470,28 @@ typedef struct _ScsiCfgData {
u8 dvScheduled; /* 1 if scheduled */
u8 forceDv; /* 1 to force DV scheduling */
u8 noQas; /* Disable QAS for this adapter */
- u8 Saf_Te; /* 1 to force all Processors as SAF-TE if Inquiry data length is too short to check for SAF-TE */
+ u8 Saf_Te; /* 1 to force all Processors as
+ * SAF-TE if Inquiry data length
+ * is too short to check for SAF-TE
+ */
u8 mpt_dv; /* command line option: enhanced=1, basic=0 */
+ u8 bus_reset; /* 1 to allow bus reset */
u8 rsvd[1];
-} ScsiCfgData;
+}SpiCfgData;
+
+typedef struct _SasCfgData {
+ u8 ptClear; /* 1 to automatically clear the
+ * persistent table.
+ * 0 to disable
+ * automatic clearing.
+ */
+}SasCfgData;
+
+typedef struct _RaidCfgData {
+ IOCPage2_t *pIocPg2; /* table of Raid Volumes */
+ IOCPage3_t *pIocPg3; /* table of physical disks */
+ int isRaid; /* bit field, 1 if RAID */
+}RaidCfgData;
/*
* Adapter Structure - pci_dev specific. Maximum: MPT_MAX_ADAPTERS
@@ -530,11 +546,16 @@ typedef struct _MPT_ADAPTER
u8 *sense_buf_pool;
dma_addr_t sense_buf_pool_dma;
u32 sense_buf_low_dma;
+ u8 *HostPageBuffer; /* SAS - host page buffer support */
+ u32 HostPageBuffer_sz;
+ dma_addr_t HostPageBuffer_dma;
int mtrr_reg;
struct pci_dev *pcidev; /* struct pci_dev pointer */
u8 __iomem *memmap; /* mmap address */
struct Scsi_Host *sh; /* Scsi Host pointer */
- ScsiCfgData spi_data; /* Scsi config. data */
+ SpiCfgData spi_data; /* Scsi config. data */
+ RaidCfgData raid_data; /* Raid config. data */
+ SasCfgData sas_data; /* Sas config. data */
MPT_IOCTL *ioctl; /* ioctl data pointer */
struct proc_dir_entry *ioc_dentry;
struct _MPT_ADAPTER *alt_ioc; /* ptr to 929 bound adapter port */
@@ -554,31 +575,35 @@ typedef struct _MPT_ADAPTER
#else
u32 mfcnt;
#endif
- u32 NB_for_64_byte_frame;
+ u32 NB_for_64_byte_frame;
u32 hs_req[MPT_MAX_FRAME_SIZE/sizeof(u32)];
u16 hs_reply[MPT_MAX_FRAME_SIZE/sizeof(u16)];
IOCFactsReply_t facts;
PortFactsReply_t pfacts[2];
FCPortPage0_t fc_port_page0[2];
+ struct timer_list persist_timer; /* persist table timer */
+ int persist_wait_done; /* persist completion flag */
+ u8 persist_reply_frame[MPT_DEFAULT_FRAME_SIZE]; /* persist reply */
LANPage0_t lan_cnfg_page0;
LANPage1_t lan_cnfg_page1;
- /*
+ /*
* Description: errata_flag_1064
* If a PCIX read occurs within 1 or 2 cycles after the chip receives
* a split completion for a read data, an internal address pointer incorrectly
* increments by 32 bytes
*/
- int errata_flag_1064;
+ int errata_flag_1064;
u8 FirstWhoInit;
u8 upload_fw; /* If set, do a fw upload */
u8 reload_fw; /* Force a FW Reload on next reset */
- u8 NBShiftFactor; /* NB Shift Factor based on Block Size (Facts) */
+ u8 NBShiftFactor; /* NB Shift Factor based on Block Size (Facts) */
u8 pad1[4];
int DoneCtx;
int TaskCtx;
int InternalCtx;
- struct list_head list;
+ struct list_head list;
struct net_device *netdev;
+ struct list_head sas_topology;
} MPT_ADAPTER;
/*
@@ -964,6 +989,7 @@ extern void mpt_alloc_fw_memory(MPT_ADAPTER *ioc, int size);
extern void mpt_free_fw_memory(MPT_ADAPTER *ioc);
extern int mpt_findImVolumes(MPT_ADAPTER *ioc);
extern int mpt_read_ioc_pg_3(MPT_ADAPTER *ioc);
+extern int mptbase_sas_persist_operation(MPT_ADAPTER *ioc, u8 persist_opcode);
/*
* Public data decl's...
diff --git a/drivers/message/fusion/mptctl.c b/drivers/message/fusion/mptctl.c
index 7577c2417e2..cb2d59d5f5a 100644
--- a/drivers/message/fusion/mptctl.c
+++ b/drivers/message/fusion/mptctl.c
@@ -1326,7 +1326,7 @@ mptctl_gettargetinfo (unsigned long arg)
*/
if (hd && hd->Targets) {
mpt_findImVolumes(ioc);
- pIoc2 = ioc->spi_data.pIocPg2;
+ pIoc2 = ioc->raid_data.pIocPg2;
for ( id = 0; id <= max_id; ) {
if ( pIoc2 && pIoc2->NumActiveVolumes ) {
if ( id == pIoc2->RaidVolume[0].VolumeID ) {
@@ -1348,7 +1348,7 @@ mptctl_gettargetinfo (unsigned long arg)
--maxWordsLeft;
goto next_id;
} else {
- pIoc3 = ioc->spi_data.pIocPg3;
+ pIoc3 = ioc->raid_data.pIocPg3;
for ( jj = 0; jj < pIoc3->NumPhysDisks; jj++ ) {
if ( pIoc3->PhysDisk[jj].PhysDiskID == id )
goto next_id;
diff --git a/drivers/message/fusion/mptfc.c b/drivers/message/fusion/mptfc.c
index 13771abea13..a628be9bbba 100644
--- a/drivers/message/fusion/mptfc.c
+++ b/drivers/message/fusion/mptfc.c
@@ -189,7 +189,7 @@ mptfc_probe(struct pci_dev *pdev, const struct pci_device_id *id)
printk(MYIOC_s_WARN_FMT
"Skipping ioc=%p because SCSI Initiator mode is NOT enabled!\n",
ioc->name, ioc);
- return -ENODEV;
+ return 0;
}
sh = scsi_host_alloc(&mptfc_driver_template, sizeof(MPT_SCSI_HOST));
diff --git a/drivers/message/fusion/mptlan.c b/drivers/message/fusion/mptlan.c
index 52794be5a95..ed3c891e388 100644
--- a/drivers/message/fusion/mptlan.c
+++ b/drivers/message/fusion/mptlan.c
@@ -312,7 +312,12 @@ static int
mpt_lan_ioc_reset(MPT_ADAPTER *ioc, int reset_phase)
{
struct net_device *dev = ioc->netdev;
- struct mpt_lan_priv *priv = netdev_priv(dev);
+ struct mpt_lan_priv *priv;
+
+ if (dev == NULL)
+ return(1);
+ else
+ priv = netdev_priv(dev);
dlprintk((KERN_INFO MYNAM ": IOC %s_reset routed to LAN driver!\n",
reset_phase==MPT_IOC_SETUP_RESET ? "setup" : (
diff --git a/drivers/message/fusion/mptsas.c b/drivers/message/fusion/mptsas.c
new file mode 100644
index 00000000000..429820e48c6
--- /dev/null
+++ b/drivers/message/fusion/mptsas.c
@@ -0,0 +1,1235 @@
+/*
+ * linux/drivers/message/fusion/mptsas.c
+ * For use with LSI Logic PCI chip/adapter(s)
+ * running LSI Logic Fusion MPT (Message Passing Technology) firmware.
+ *
+ * Copyright (c) 1999-2005 LSI Logic Corporation
+ * (mailto:mpt_linux_developer@lsil.com)
+ * Copyright (c) 2005 Dell
+ */
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/*
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; version 2 of the License.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ NO WARRANTY
+ THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
+ CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
+ LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
+ MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
+ solely responsible for determining the appropriateness of using and
+ distributing the Program and assumes all risks associated with its
+ exercise of rights under this Agreement, including but not limited to
+ the risks and costs of program errors, damage to or loss of data,
+ programs or equipment, and unavailability or interruption of operations.
+
+ DISCLAIMER OF LIABILITY
+ NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
+ DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
+ ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
+ TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+ USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
+ HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+*/
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/errno.h>
+#include <linux/sched.h>
+#include <linux/workqueue.h>
+
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_transport_sas.h>
+
+#include "mptbase.h"
+#include "mptscsih.h"
+
+
+#define my_NAME "Fusion MPT SAS Host driver"
+#define my_VERSION MPT_LINUX_VERSION_COMMON
+#define MYNAM "mptsas"
+
+MODULE_AUTHOR(MODULEAUTHOR);
+MODULE_DESCRIPTION(my_NAME);
+MODULE_LICENSE("GPL");
+
+static int mpt_pq_filter;
+module_param(mpt_pq_filter, int, 0);
+MODULE_PARM_DESC(mpt_pq_filter,
+ "Enable peripheral qualifier filter: enable=1 "
+ "(default=0)");
+
+static int mpt_pt_clear;
+module_param(mpt_pt_clear, int, 0);
+MODULE_PARM_DESC(mpt_pt_clear,
+ "Clear persistency table: enable=1 "
+ "(default=MPTSCSIH_PT_CLEAR=0)");
+
+static int mptsasDoneCtx = -1;
+static int mptsasTaskCtx = -1;
+static int mptsasInternalCtx = -1; /* Used only for internal commands */
+
+
+/*
+ * SAS topology structures
+ *
+ * The MPT Fusion firmware interface spreads information about the
+ * SAS topology over many manufacture pages, thus we need some data
+ * structure to collect it and process it for the SAS transport class.
+ */
+
+struct mptsas_devinfo {
+ u16 handle; /* unique id to address this device */
+ u8 phy_id; /* phy number of parent device */
+ u8 port_id; /* sas physical port this device
+ is assoc'd with */
+ u8 target; /* logical target id of this device */
+ u8 bus; /* logical bus number of this device */
+ u64 sas_address; /* WWN of this device,
+ SATA is assigned by HBA,expander */
+ u32 device_info; /* bitfield detailed info about this device */
+};
+
+struct mptsas_phyinfo {
+ u8 phy_id; /* phy index */
+ u8 port_id; /* port number this phy is part of */
+ u8 negotiated_link_rate; /* nego'd link rate for this phy */
+ u8 hw_link_rate; /* hardware max/min phys link rate */
+ u8 programmed_link_rate; /* programmed max/min phy link rate */
+ struct mptsas_devinfo identify; /* point to phy device info */
+ struct mptsas_devinfo attached; /* point to attached device info */
+ struct sas_rphy *rphy;
+};
+
+struct mptsas_portinfo {
+ struct list_head list;
+ u16 handle; /* unique id to address this */
+ u8 num_phys; /* number of phys */
+ struct mptsas_phyinfo *phy_info;
+};
+
+/*
+ * This is pretty ugly. We will be able to seriously clean it up
+ * once the DV code in mptscsih goes away and we can properly
+ * implement ->target_alloc.
+ */
+static int
+mptsas_slave_alloc(struct scsi_device *device)
+{
+ struct Scsi_Host *host = device->host;
+ MPT_SCSI_HOST *hd = (MPT_SCSI_HOST *)host->hostdata;
+ struct sas_rphy *rphy;
+ struct mptsas_portinfo *p;
+ VirtDevice *vdev;
+ uint target = device->id;
+ int i;
+
+ if ((vdev = hd->Targets[target]) != NULL)
+ goto out;
+
+ vdev = kmalloc(sizeof(VirtDevice), GFP_KERNEL);
+ if (!vdev) {
+ printk(MYIOC_s_ERR_FMT "slave_alloc kmalloc(%zd) FAILED!\n",
+ hd->ioc->name, sizeof(VirtDevice));
+ return -ENOMEM;
+ }
+
+ memset(vdev, 0, sizeof(VirtDevice));
+ vdev->tflags = MPT_TARGET_FLAGS_Q_YES|MPT_TARGET_FLAGS_VALID_INQUIRY;
+ vdev->ioc_id = hd->ioc->id;
+
+ rphy = dev_to_rphy(device->sdev_target->dev.parent);
+ list_for_each_entry(p, &hd->ioc->sas_topology, list) {
+ for (i = 0; i < p->num_phys; i++) {
+ if (p->phy_info[i].attached.sas_address ==
+ rphy->identify.sas_address) {
+ vdev->target_id =
+ p->phy_info[i].attached.target;
+ vdev->bus_id = p->phy_info[i].attached.bus;
+ hd->Targets[device->id] = vdev;
+ goto out;
+ }
+ }
+ }
+
+ printk("No matching SAS device found!!\n");
+ kfree(vdev);
+ return -ENODEV;
+
+ out:
+ vdev->num_luns++;
+ device->hostdata = vdev;
+ return 0;
+}
+
+static struct scsi_host_template mptsas_driver_template = {
+ .proc_name = "mptsas",
+ .proc_info = mptscsih_proc_info,
+ .name = "MPT SPI Host",
+ .info = mptscsih_info,
+ .queuecommand = mptscsih_qcmd,
+ .slave_alloc = mptsas_slave_alloc,
+ .slave_configure = mptscsih_slave_configure,
+ .slave_destroy = mptscsih_slave_destroy,
+ .change_queue_depth = mptscsih_change_queue_depth,
+ .eh_abort_handler = mptscsih_abort,
+ .eh_device_reset_handler = mptscsih_dev_reset,
+ .eh_bus_reset_handler = mptscsih_bus_reset,
+ .eh_host_reset_handler = mptscsih_host_reset,
+ .bios_param = mptscsih_bios_param,
+ .can_queue = MPT_FC_CAN_QUEUE,
+ .this_id = -1,
+ .sg_tablesize = MPT_SCSI_SG_DEPTH,
+ .max_sectors = 8192,
+ .cmd_per_lun = 7,
+ .use_clustering = ENABLE_CLUSTERING,
+};
+
+static struct sas_function_template mptsas_transport_functions = {
+};
+
+static struct scsi_transport_template *mptsas_transport_template;
+
+#ifdef SASDEBUG
+static void mptsas_print_phy_data(MPI_SAS_IO_UNIT0_PHY_DATA *phy_data)
+{
+ printk("---- IO UNIT PAGE 0 ------------\n");
+ printk("Handle=0x%X\n",
+ le16_to_cpu(phy_data->AttachedDeviceHandle));
+ printk("Controller Handle=0x%X\n",
+ le16_to_cpu(phy_data->ControllerDevHandle));
+ printk("Port=0x%X\n", phy_data->Port);
+ printk("Port Flags=0x%X\n", phy_data->PortFlags);
+ printk("PHY Flags=0x%X\n", phy_data->PhyFlags);
+ printk("Negotiated Link Rate=0x%X\n", phy_data->NegotiatedLinkRate);
+ printk("Controller PHY Device Info=0x%X\n",
+ le32_to_cpu(phy_data->ControllerPhyDeviceInfo));
+ printk("DiscoveryStatus=0x%X\n",
+ le32_to_cpu(phy_data->DiscoveryStatus));
+ printk("\n");
+}
+
+static void mptsas_print_phy_pg0(SasPhyPage0_t *pg0)
+{
+ __le64 sas_address;
+
+ memcpy(&sas_address, &pg0->SASAddress, sizeof(__le64));
+
+ printk("---- SAS PHY PAGE 0 ------------\n");
+ printk("Attached Device Handle=0x%X\n",
+ le16_to_cpu(pg0->AttachedDevHandle));
+ printk("SAS Address=0x%llX\n",
+ (unsigned long long)le64_to_cpu(sas_address));
+ printk("Attached PHY Identifier=0x%X\n", pg0->AttachedPhyIdentifier);
+ printk("Attached Device Info=0x%X\n",
+ le32_to_cpu(pg0->AttachedDeviceInfo));
+ printk("Programmed Link Rate=0x%X\n", pg0->ProgrammedLinkRate);
+ printk("Change Count=0x%X\n", pg0->ChangeCount);
+ printk("PHY Info=0x%X\n", le32_to_cpu(pg0->PhyInfo));
+ printk("\n");
+}
+
+static void mptsas_print_device_pg0(SasDevicePage0_t *pg0)
+{
+ __le64 sas_address;
+
+ memcpy(&sas_address, &pg0->SASAddress, sizeof(__le64));
+
+ printk("---- SAS DEVICE PAGE 0 ---------\n");
+ printk("Handle=0x%X\n" ,le16_to_cpu(pg0->DevHandle));
+ printk("Enclosure Handle=0x%X\n", le16_to_cpu(pg0->EnclosureHandle));
+ printk("Slot=0x%X\n", le16_to_cpu(pg0->Slot));
+ printk("SAS Address=0x%llX\n", le64_to_cpu(sas_address));
+ printk("Target ID=0x%X\n", pg0->TargetID);
+ printk("Bus=0x%X\n", pg0->Bus);
+ printk("PhyNum=0x%X\n", pg0->PhyNum);
+ printk("AccessStatus=0x%X\n", le16_to_cpu(pg0->AccessStatus));
+ printk("Device Info=0x%X\n", le32_to_cpu(pg0->DeviceInfo));
+ printk("Flags=0x%X\n", le16_to_cpu(pg0->Flags));
+ printk("Physical Port=0x%X\n", pg0->PhysicalPort);
+ printk("\n");
+}
+
+static void mptsas_print_expander_pg1(SasExpanderPage1_t *pg1)
+{
+ printk("---- SAS EXPANDER PAGE 1 ------------\n");
+
+ printk("Physical Port=0x%X\n", pg1->PhysicalPort);
+ printk("PHY Identifier=0x%X\n", pg1->Phy);
+ printk("Negotiated Link Rate=0x%X\n", pg1->NegotiatedLinkRate);
+ printk("Programmed Link Rate=0x%X\n", pg1->ProgrammedLinkRate);
+ printk("Hardware Link Rate=0x%X\n", pg1->HwLinkRate);
+ printk("Owner Device Handle=0x%X\n",
+ le16_to_cpu(pg1->OwnerDevHandle));
+ printk("Attached Device Handle=0x%X\n",
+ le16_to_cpu(pg1->AttachedDevHandle));
+}
+#else
+#define mptsas_print_phy_data(phy_data) do { } while (0)
+#define mptsas_print_phy_pg0(pg0) do { } while (0)
+#define mptsas_print_device_pg0(pg0) do { } while (0)
+#define mptsas_print_expander_pg1(pg1) do { } while (0)
+#endif
+
+static int
+mptsas_sas_io_unit_pg0(MPT_ADAPTER *ioc, struct mptsas_portinfo *port_info)
+{
+ ConfigExtendedPageHeader_t hdr;
+ CONFIGPARMS cfg;
+ SasIOUnitPage0_t *buffer;
+ dma_addr_t dma_handle;
+ int error, i;
+
+ hdr.PageVersion = MPI_SASIOUNITPAGE0_PAGEVERSION;
+ hdr.ExtPageLength = 0;
+ hdr.PageNumber = 0;
+ hdr.Reserved1 = 0;
+ hdr.Reserved2 = 0;
+ hdr.PageType = MPI_CONFIG_PAGETYPE_EXTENDED;
+ hdr.ExtPageType = MPI_CONFIG_EXTPAGETYPE_SAS_IO_UNIT;
+
+ cfg.cfghdr.ehdr = &hdr;
+ cfg.physAddr = -1;
+ cfg.pageAddr = 0;
+ cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER;
+ cfg.dir = 0; /* read */
+ cfg.timeout = 10;
+
+ error = mpt_config(ioc, &cfg);
+ if (error)
+ goto out;
+ if (!hdr.ExtPageLength) {
+ error = -ENXIO;
+ goto out;
+ }
+
+ buffer = pci_alloc_consistent(ioc->pcidev, hdr.ExtPageLength * 4,
+ &dma_handle);
+ if (!buffer) {
+ error = -ENOMEM;
+ goto out;
+ }
+
+ cfg.physAddr = dma_handle;
+ cfg.action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT;
+
+ error = mpt_config(ioc, &cfg);
+ if (error)
+ goto out_free_consistent;
+
+ port_info->num_phys = buffer->NumPhys;
+ port_info->phy_info = kcalloc(port_info->num_phys,
+ sizeof(struct mptsas_phyinfo),GFP_KERNEL);
+ if (!port_info->phy_info) {
+ error = -ENOMEM;
+ goto out_free_consistent;
+ }
+
+ for (i = 0; i < port_info->num_phys; i++) {
+ mptsas_print_phy_data(&buffer->PhyData[i]);
+ port_info->phy_info[i].phy_id = i;
+ port_info->phy_info[i].port_id =
+ buffer->PhyData[i].Port;
+ port_info->phy_info[i].negotiated_link_rate =
+ buffer->PhyData[i].NegotiatedLinkRate;
+ }
+
+ out_free_consistent:
+ pci_free_consistent(ioc->pcidev, hdr.ExtPageLength * 4,
+ buffer, dma_handle);
+ out:
+ return error;
+}
+
+static int
+mptsas_sas_phy_pg0(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info,
+ u32 form, u32 form_specific)
+{
+ ConfigExtendedPageHeader_t hdr;
+ CONFIGPARMS cfg;
+ SasPhyPage0_t *buffer;
+ dma_addr_t dma_handle;
+ int error;
+
+ hdr.PageVersion = MPI_SASPHY0_PAGEVERSION;
+ hdr.ExtPageLength = 0;
+ hdr.PageNumber = 0;
+ hdr.Reserved1 = 0;
+ hdr.Reserved2 = 0;
+ hdr.PageType = MPI_CONFIG_PAGETYPE_EXTENDED;
+ hdr.ExtPageType = MPI_CONFIG_EXTPAGETYPE_SAS_PHY;
+
+ cfg.cfghdr.ehdr = &hdr;
+ cfg.dir = 0; /* read */
+ cfg.timeout = 10;
+
+ /* Get Phy Pg 0 for each Phy. */
+ cfg.physAddr = -1;
+ cfg.pageAddr = form + form_specific;
+ cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER;
+
+ error = mpt_config(ioc, &cfg);
+ if (error)
+ goto out;
+
+ if (!hdr.ExtPageLength) {
+ error = -ENXIO;
+ goto out;
+ }
+
+ buffer = pci_alloc_consistent(ioc->pcidev, hdr.ExtPageLength * 4,
+ &dma_handle);
+ if (!buffer) {
+ error = -ENOMEM;
+ goto out;
+ }
+
+ cfg.physAddr = dma_handle;
+ cfg.action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT;
+
+ error = mpt_config(ioc, &cfg);
+ if (error)
+ goto out_free_consistent;
+
+ mptsas_print_phy_pg0(buffer);
+
+ phy_info->hw_link_rate = buffer->HwLinkRate;
+ phy_info->programmed_link_rate = buffer->ProgrammedLinkRate;
+ phy_info->identify.handle = le16_to_cpu(buffer->OwnerDevHandle);
+ phy_info->attached.handle = le16_to_cpu(buffer->AttachedDevHandle);
+
+ out_free_consistent:
+ pci_free_consistent(ioc->pcidev, hdr.ExtPageLength * 4,
+ buffer, dma_handle);
+ out:
+ return error;
+}
+
+static int
+mptsas_sas_device_pg0(MPT_ADAPTER *ioc, struct mptsas_devinfo *device_info,
+ u32 form, u32 form_specific)
+{
+ ConfigExtendedPageHeader_t hdr;
+ CONFIGPARMS cfg;
+ SasDevicePage0_t *buffer;
+ dma_addr_t dma_handle;
+ __le64 sas_address;
+ int error;
+
+ hdr.PageVersion = MPI_SASDEVICE0_PAGEVERSION;
+ hdr.ExtPageLength = 0;
+ hdr.PageNumber = 0;
+ hdr.Reserved1 = 0;
+ hdr.Reserved2 = 0;
+ hdr.PageType = MPI_CONFIG_PAGETYPE_EXTENDED;
+ hdr.ExtPageType = MPI_CONFIG_EXTPAGETYPE_SAS_DEVICE;
+
+ cfg.cfghdr.ehdr = &hdr;
+ cfg.pageAddr = form + form_specific;
+ cfg.physAddr = -1;
+ cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER;
+ cfg.dir = 0; /* read */
+ cfg.timeout = 10;
+
+ error = mpt_config(ioc, &cfg);
+ if (error)
+ goto out;
+ if (!hdr.ExtPageLength) {
+ error = -ENXIO;
+ goto out;
+ }
+
+ buffer = pci_alloc_consistent(ioc->pcidev, hdr.ExtPageLength * 4,
+ &dma_handle);
+ if (!buffer) {
+ error = -ENOMEM;
+ goto out;
+ }
+
+ cfg.physAddr = dma_handle;
+ cfg.action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT;
+
+ error = mpt_config(ioc, &cfg);
+ if (error)
+ goto out_free_consistent;
+
+ mptsas_print_device_pg0(buffer);
+
+ device_info->handle = le16_to_cpu(buffer->DevHandle);
+ device_info->phy_id = buffer->PhyNum;
+ device_info->port_id = buffer->PhysicalPort;
+ device_info->target = buffer->TargetID;
+ device_info->bus = buffer->Bus;
+ memcpy(&sas_address, &buffer->SASAddress, sizeof(__le64));
+ device_info->sas_address = le64_to_cpu(sas_address);
+ device_info->device_info =
+ le32_to_cpu(buffer->DeviceInfo);
+
+ out_free_consistent:
+ pci_free_consistent(ioc->pcidev, hdr.ExtPageLength * 4,
+ buffer, dma_handle);
+ out:
+ return error;
+}
+
+static int
+mptsas_sas_expander_pg0(MPT_ADAPTER *ioc, struct mptsas_portinfo *port_info,
+ u32 form, u32 form_specific)
+{
+ ConfigExtendedPageHeader_t hdr;
+ CONFIGPARMS cfg;
+ SasExpanderPage0_t *buffer;
+ dma_addr_t dma_handle;
+ int error;
+
+ hdr.PageVersion = MPI_SASEXPANDER0_PAGEVERSION;
+ hdr.ExtPageLength = 0;
+ hdr.PageNumber = 0;
+ hdr.Reserved1 = 0;
+ hdr.Reserved2 = 0;
+ hdr.PageType = MPI_CONFIG_PAGETYPE_EXTENDED;
+ hdr.ExtPageType = MPI_CONFIG_EXTPAGETYPE_SAS_EXPANDER;
+
+ cfg.cfghdr.ehdr = &hdr;
+ cfg.physAddr = -1;
+ cfg.pageAddr = form + form_specific;
+ cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER;
+ cfg.dir = 0; /* read */
+ cfg.timeout = 10;
+
+ error = mpt_config(ioc, &cfg);
+ if (error)
+ goto out;
+
+ if (!hdr.ExtPageLength) {
+ error = -ENXIO;
+ goto out;
+ }
+
+ buffer = pci_alloc_consistent(ioc->pcidev, hdr.ExtPageLength * 4,
+ &dma_handle);
+ if (!buffer) {
+ error = -ENOMEM;
+ goto out;
+ }
+
+ cfg.physAddr = dma_handle;
+ cfg.action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT;
+
+ error = mpt_config(ioc, &cfg);
+ if (error)
+ goto out_free_consistent;
+
+ /* save config data */
+ port_info->num_phys = buffer->NumPhys;
+ port_info->handle = le16_to_cpu(buffer->DevHandle);
+ port_info->phy_info = kcalloc(port_info->num_phys,
+ sizeof(struct mptsas_phyinfo),GFP_KERNEL);
+ if (!port_info->phy_info) {
+ error = -ENOMEM;
+ goto out_free_consistent;
+ }
+
+ out_free_consistent:
+ pci_free_consistent(ioc->pcidev, hdr.ExtPageLength * 4,
+ buffer, dma_handle);
+ out:
+ return error;
+}
+
+static int
+mptsas_sas_expander_pg1(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info,
+ u32 form, u32 form_specific)
+{
+ ConfigExtendedPageHeader_t hdr;
+ CONFIGPARMS cfg;
+ SasExpanderPage1_t *buffer;
+ dma_addr_t dma_handle;
+ int error;
+
+ hdr.PageVersion = MPI_SASEXPANDER0_PAGEVERSION;
+ hdr.ExtPageLength = 0;
+ hdr.PageNumber = 1;
+ hdr.Reserved1 = 0;
+ hdr.Reserved2 = 0;
+ hdr.PageType = MPI_CONFIG_PAGETYPE_EXTENDED;
+ hdr.ExtPageType = MPI_CONFIG_EXTPAGETYPE_SAS_EXPANDER;
+
+ cfg.cfghdr.ehdr = &hdr;
+ cfg.physAddr = -1;
+ cfg.pageAddr = form + form_specific;
+ cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER;
+ cfg.dir = 0; /* read */
+ cfg.timeout = 10;
+
+ error = mpt_config(ioc, &cfg);
+ if (error)
+ goto out;
+
+ if (!hdr.ExtPageLength) {
+ error = -ENXIO;
+ goto out;
+ }
+
+ buffer = pci_alloc_consistent(ioc->pcidev, hdr.ExtPageLength * 4,
+ &dma_handle);
+ if (!buffer) {
+ error = -ENOMEM;
+ goto out;
+ }
+
+ cfg.physAddr = dma_handle;
+ cfg.action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT;
+
+ error = mpt_config(ioc, &cfg);
+ if (error)
+ goto out_free_consistent;
+
+
+ mptsas_print_expander_pg1(buffer);
+
+ /* save config data */
+ phy_info->phy_id = buffer->Phy;
+ phy_info->port_id = buffer->PhysicalPort;
+ phy_info->negotiated_link_rate = buffer->NegotiatedLinkRate;
+ phy_info->programmed_link_rate = buffer->ProgrammedLinkRate;
+ phy_info->hw_link_rate = buffer->HwLinkRate;
+ phy_info->identify.handle = le16_to_cpu(buffer->OwnerDevHandle);
+ phy_info->attached.handle = le16_to_cpu(buffer->AttachedDevHandle);
+
+
+ out_free_consistent:
+ pci_free_consistent(ioc->pcidev, hdr.ExtPageLength * 4,
+ buffer, dma_handle);
+ out:
+ return error;
+}
+
+static void
+mptsas_parse_device_info(struct sas_identify *identify,
+ struct mptsas_devinfo *device_info)
+{
+ u16 protocols;
+
+ identify->sas_address = device_info->sas_address;
+ identify->phy_identifier = device_info->phy_id;
+
+ /*
+ * Fill in Phy Initiator Port Protocol.
+ * Bits 6:3, more than one bit can be set, fall through cases.
+ */
+ protocols = device_info->device_info & 0x78;
+ identify->initiator_port_protocols = 0;
+ if (protocols & MPI_SAS_DEVICE_INFO_SSP_INITIATOR)
+ identify->initiator_port_protocols |= SAS_PROTOCOL_SSP;
+ if (protocols & MPI_SAS_DEVICE_INFO_STP_INITIATOR)
+ identify->initiator_port_protocols |= SAS_PROTOCOL_STP;
+ if (protocols & MPI_SAS_DEVICE_INFO_SMP_INITIATOR)
+ identify->initiator_port_protocols |= SAS_PROTOCOL_SMP;
+ if (protocols & MPI_SAS_DEVICE_INFO_SATA_HOST)
+ identify->initiator_port_protocols |= SAS_PROTOCOL_SATA;
+
+ /*
+ * Fill in Phy Target Port Protocol.
+ * Bits 10:7, more than one bit can be set, fall through cases.
+ */
+ protocols = device_info->device_info & 0x780;
+ identify->target_port_protocols = 0;
+ if (protocols & MPI_SAS_DEVICE_INFO_SSP_TARGET)
+ identify->target_port_protocols |= SAS_PROTOCOL_SSP;
+ if (protocols & MPI_SAS_DEVICE_INFO_STP_TARGET)
+ identify->target_port_protocols |= SAS_PROTOCOL_STP;
+ if (protocols & MPI_SAS_DEVICE_INFO_SMP_TARGET)
+ identify->target_port_protocols |= SAS_PROTOCOL_SMP;
+ if (protocols & MPI_SAS_DEVICE_INFO_SATA_DEVICE)
+ identify->target_port_protocols |= SAS_PROTOCOL_SATA;
+
+ /*
+ * Fill in Attached device type.
+ */
+ switch (device_info->device_info &
+ MPI_SAS_DEVICE_INFO_MASK_DEVICE_TYPE) {
+ case MPI_SAS_DEVICE_INFO_NO_DEVICE:
+ identify->device_type = SAS_PHY_UNUSED;
+ break;
+ case MPI_SAS_DEVICE_INFO_END_DEVICE:
+ identify->device_type = SAS_END_DEVICE;
+ break;
+ case MPI_SAS_DEVICE_INFO_EDGE_EXPANDER:
+ identify->device_type = SAS_EDGE_EXPANDER_DEVICE;
+ break;
+ case MPI_SAS_DEVICE_INFO_FANOUT_EXPANDER:
+ identify->device_type = SAS_FANOUT_EXPANDER_DEVICE;
+ break;
+ }
+}
+
+static int mptsas_probe_one_phy(struct device *dev,
+ struct mptsas_phyinfo *phy_info, int index)
+{
+ struct sas_phy *port;
+ int error;
+
+ port = sas_phy_alloc(dev, index);
+ if (!port)
+ return -ENOMEM;
+
+ port->port_identifier = phy_info->port_id;
+ mptsas_parse_device_info(&port->identify, &phy_info->identify);
+
+ /*
+ * Set Negotiated link rate.
+ */
+ switch (phy_info->negotiated_link_rate) {
+ case MPI_SAS_IOUNIT0_RATE_PHY_DISABLED:
+ port->negotiated_linkrate = SAS_PHY_DISABLED;
+ break;
+ case MPI_SAS_IOUNIT0_RATE_FAILED_SPEED_NEGOTIATION:
+ port->negotiated_linkrate = SAS_LINK_RATE_FAILED;
+ break;
+ case MPI_SAS_IOUNIT0_RATE_1_5:
+ port->negotiated_linkrate = SAS_LINK_RATE_1_5_GBPS;
+ break;
+ case MPI_SAS_IOUNIT0_RATE_3_0:
+ port->negotiated_linkrate = SAS_LINK_RATE_3_0_GBPS;
+ break;
+ case MPI_SAS_IOUNIT0_RATE_SATA_OOB_COMPLETE:
+ case MPI_SAS_IOUNIT0_RATE_UNKNOWN:
+ default:
+ port->negotiated_linkrate = SAS_LINK_RATE_UNKNOWN;
+ break;
+ }
+
+ /*
+ * Set Max hardware link rate.
+ */
+ switch (phy_info->hw_link_rate & MPI_SAS_PHY0_PRATE_MAX_RATE_MASK) {
+ case MPI_SAS_PHY0_HWRATE_MAX_RATE_1_5:
+ port->maximum_linkrate_hw = SAS_LINK_RATE_1_5_GBPS;
+ break;
+ case MPI_SAS_PHY0_PRATE_MAX_RATE_3_0:
+ port->maximum_linkrate_hw = SAS_LINK_RATE_3_0_GBPS;
+ break;
+ default:
+ break;
+ }
+
+ /*
+ * Set Max programmed link rate.
+ */
+ switch (phy_info->programmed_link_rate &
+ MPI_SAS_PHY0_PRATE_MAX_RATE_MASK) {
+ case MPI_SAS_PHY0_PRATE_MAX_RATE_1_5:
+ port->maximum_linkrate = SAS_LINK_RATE_1_5_GBPS;
+ break;
+ case MPI_SAS_PHY0_PRATE_MAX_RATE_3_0:
+ port->maximum_linkrate = SAS_LINK_RATE_3_0_GBPS;
+ break;
+ default:
+ break;
+ }
+
+ /*
+ * Set Min hardware link rate.
+ */
+ switch (phy_info->hw_link_rate & MPI_SAS_PHY0_HWRATE_MIN_RATE_MASK) {
+ case MPI_SAS_PHY0_HWRATE_MIN_RATE_1_5:
+ port->minimum_linkrate_hw = SAS_LINK_RATE_1_5_GBPS;
+ break;
+ case MPI_SAS_PHY0_PRATE_MIN_RATE_3_0:
+ port->minimum_linkrate_hw = SAS_LINK_RATE_3_0_GBPS;
+ break;
+ default:
+ break;
+ }
+
+ /*
+ * Set Min programmed link rate.
+ */
+ switch (phy_info->programmed_link_rate &
+ MPI_SAS_PHY0_PRATE_MIN_RATE_MASK) {
+ case MPI_SAS_PHY0_PRATE_MIN_RATE_1_5:
+ port->minimum_linkrate = SAS_LINK_RATE_1_5_GBPS;
+ break;
+ case MPI_SAS_PHY0_PRATE_MIN_RATE_3_0:
+ port->minimum_linkrate = SAS_LINK_RATE_3_0_GBPS;
+ break;
+ default:
+ break;
+ }
+
+ error = sas_phy_add(port);
+ if (error) {
+ sas_phy_free(port);
+ return error;
+ }
+
+ if (phy_info->attached.handle) {
+ struct sas_rphy *rphy;
+
+ rphy = sas_rphy_alloc(port);
+ if (!rphy)
+ return 0; /* non-fatal: an rphy can be added later */
+
+ mptsas_parse_device_info(&rphy->identify, &phy_info->attached);
+ error = sas_rphy_add(rphy);
+ if (error) {
+ sas_rphy_free(rphy);
+ return error;
+ }
+
+ phy_info->rphy = rphy;
+ }
+
+ return 0;
+}
+
+static int
+mptsas_probe_hba_phys(MPT_ADAPTER *ioc, int *index)
+{
+ struct mptsas_portinfo *port_info;
+ u32 handle = 0xFFFF;
+ int error = -ENOMEM, i;
+
+ port_info = kmalloc(sizeof(*port_info), GFP_KERNEL);
+ if (!port_info)
+ goto out;
+ memset(port_info, 0, sizeof(*port_info));
+
+ error = mptsas_sas_io_unit_pg0(ioc, port_info);
+ if (error)
+ goto out_free_port_info;
+
+ list_add_tail(&port_info->list, &ioc->sas_topology);
+
+ for (i = 0; i < port_info->num_phys; i++) {
+ mptsas_sas_phy_pg0(ioc, &port_info->phy_info[i],
+ (MPI_SAS_PHY_PGAD_FORM_PHY_NUMBER <<
+ MPI_SAS_PHY_PGAD_FORM_SHIFT), i);
+
+ mptsas_sas_device_pg0(ioc, &port_info->phy_info[i].identify,
+ (MPI_SAS_DEVICE_PGAD_FORM_GET_NEXT_HANDLE <<
+ MPI_SAS_DEVICE_PGAD_FORM_SHIFT), handle);
+ handle = port_info->phy_info[i].identify.handle;
+
+ if (port_info->phy_info[i].attached.handle) {
+ mptsas_sas_device_pg0(ioc,
+ &port_info->phy_info[i].attached,
+ (MPI_SAS_DEVICE_PGAD_FORM_HANDLE <<
+ MPI_SAS_DEVICE_PGAD_FORM_SHIFT),
+ port_info->phy_info[i].attached.handle);
+ }
+
+ mptsas_probe_one_phy(&ioc->sh->shost_gendev,
+ &port_info->phy_info[i], *index);
+ (*index)++;
+ }
+
+ return 0;
+
+ out_free_port_info:
+ kfree(port_info);
+ out:
+ return error;
+}
+
+static int
+mptsas_probe_expander_phys(MPT_ADAPTER *ioc, u32 *handle, int *index)
+{
+ struct mptsas_portinfo *port_info, *p;
+ int error = -ENOMEM, i, j;
+
+ port_info = kmalloc(sizeof(*port_info), GFP_KERNEL);
+ if (!port_info)
+ goto out;
+ memset(port_info, 0, sizeof(*port_info));
+
+ error = mptsas_sas_expander_pg0(ioc, port_info,
+ (MPI_SAS_EXPAND_PGAD_FORM_GET_NEXT_HANDLE <<
+ MPI_SAS_EXPAND_PGAD_FORM_SHIFT), *handle);
+ if (error)
+ goto out_free_port_info;
+
+ *handle = port_info->handle;
+
+ list_add_tail(&port_info->list, &ioc->sas_topology);
+ for (i = 0; i < port_info->num_phys; i++) {
+ struct device *parent;
+
+ mptsas_sas_expander_pg1(ioc, &port_info->phy_info[i],
+ (MPI_SAS_EXPAND_PGAD_FORM_HANDLE_PHY_NUM <<
+ MPI_SAS_EXPAND_PGAD_FORM_SHIFT), (i << 16) + *handle);
+
+ if (port_info->phy_info[i].identify.handle) {
+ mptsas_sas_device_pg0(ioc,
+ &port_info->phy_info[i].identify,
+ (MPI_SAS_DEVICE_PGAD_FORM_HANDLE <<
+ MPI_SAS_DEVICE_PGAD_FORM_SHIFT),
+ port_info->phy_info[i].identify.handle);
+ }
+
+ if (port_info->phy_info[i].attached.handle) {
+ mptsas_sas_device_pg0(ioc,
+ &port_info->phy_info[i].attached,
+ (MPI_SAS_DEVICE_PGAD_FORM_HANDLE <<
+ MPI_SAS_DEVICE_PGAD_FORM_SHIFT),
+ port_info->phy_info[i].attached.handle);
+ }
+
+ /*
+ * If we find a parent port handle this expander is
+ * attached to another expander, else it hangs of the
+ * HBA phys.
+ */
+ parent = &ioc->sh->shost_gendev;
+ list_for_each_entry(p, &ioc->sas_topology, list) {
+ for (j = 0; j < p->num_phys; j++) {
+ if (port_info->phy_info[i].identify.handle ==
+ p->phy_info[j].attached.handle)
+ parent = &p->phy_info[j].rphy->dev;
+ }
+ }
+
+ mptsas_probe_one_phy(parent, &port_info->phy_info[i], *index);
+ (*index)++;
+ }
+
+ return 0;
+
+ out_free_port_info:
+ kfree(port_info);
+ out:
+ return error;
+}
+
+static void
+mptsas_scan_sas_topology(MPT_ADAPTER *ioc)
+{
+ u32 handle = 0xFFFF;
+ int index = 0;
+
+ mptsas_probe_hba_phys(ioc, &index);
+ while (!mptsas_probe_expander_phys(ioc, &handle, &index))
+ ;
+}
+
+static int
+mptsas_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+ struct Scsi_Host *sh;
+ MPT_SCSI_HOST *hd;
+ MPT_ADAPTER *ioc;
+ unsigned long flags;
+ int sz, ii;
+ int numSGE = 0;
+ int scale;
+ int ioc_cap;
+ u8 *mem;
+ int error=0;
+ int r;
+
+ r = mpt_attach(pdev,id);
+ if (r)
+ return r;
+
+ ioc = pci_get_drvdata(pdev);
+ ioc->DoneCtx = mptsasDoneCtx;
+ ioc->TaskCtx = mptsasTaskCtx;
+ ioc->InternalCtx = mptsasInternalCtx;
+
+ /* Added sanity check on readiness of the MPT adapter.
+ */
+ if (ioc->last_state != MPI_IOC_STATE_OPERATIONAL) {
+ printk(MYIOC_s_WARN_FMT
+ "Skipping because it's not operational!\n",
+ ioc->name);
+ return -ENODEV;
+ }
+
+ if (!ioc->active) {
+ printk(MYIOC_s_WARN_FMT "Skipping because it's disabled!\n",
+ ioc->name);
+ return -ENODEV;
+ }
+
+ /* Sanity check - ensure at least 1 port is INITIATOR capable
+ */
+ ioc_cap = 0;
+ for (ii = 0; ii < ioc->facts.NumberOfPorts; ii++) {
+ if (ioc->pfacts[ii].ProtocolFlags &
+ MPI_PORTFACTS_PROTOCOL_INITIATOR)
+ ioc_cap++;
+ }
+
+ if (!ioc_cap) {
+ printk(MYIOC_s_WARN_FMT
+ "Skipping ioc=%p because SCSI Initiator mode "
+ "is NOT enabled!\n", ioc->name, ioc);
+ return 0;
+ }
+
+ sh = scsi_host_alloc(&mptsas_driver_template, sizeof(MPT_SCSI_HOST));
+ if (!sh) {
+ printk(MYIOC_s_WARN_FMT
+ "Unable to register controller with SCSI subsystem\n",
+ ioc->name);
+ return -1;
+ }
+
+ spin_lock_irqsave(&ioc->FreeQlock, flags);
+
+ /* Attach the SCSI Host to the IOC structure
+ */
+ ioc->sh = sh;
+
+ sh->io_port = 0;
+ sh->n_io_port = 0;
+ sh->irq = 0;
+
+ /* set 16 byte cdb's */
+ sh->max_cmd_len = 16;
+
+ sh->max_id = ioc->pfacts->MaxDevices + 1;
+
+ sh->transportt = mptsas_transport_template;
+
+ sh->max_lun = MPT_LAST_LUN + 1;
+ sh->max_channel = 0;
+ sh->this_id = ioc->pfacts[0].PortSCSIID;
+
+ /* Required entry.
+ */
+ sh->unique_id = ioc->id;
+
+ INIT_LIST_HEAD(&ioc->sas_topology);
+
+ /* Verify that we won't exceed the maximum
+ * number of chain buffers
+ * We can optimize: ZZ = req_sz/sizeof(SGE)
+ * For 32bit SGE's:
+ * numSGE = 1 + (ZZ-1)*(maxChain -1) + ZZ
+ * + (req_sz - 64)/sizeof(SGE)
+ * A slightly different algorithm is required for
+ * 64bit SGEs.
+ */
+ scale = ioc->req_sz/(sizeof(dma_addr_t) + sizeof(u32));
+ if (sizeof(dma_addr_t) == sizeof(u64)) {
+ numSGE = (scale - 1) *
+ (ioc->facts.MaxChainDepth-1) + scale +
+ (ioc->req_sz - 60) / (sizeof(dma_addr_t) +
+ sizeof(u32));
+ } else {
+ numSGE = 1 + (scale - 1) *
+ (ioc->facts.MaxChainDepth-1) + scale +
+ (ioc->req_sz - 64) / (sizeof(dma_addr_t) +
+ sizeof(u32));
+ }
+
+ if (numSGE < sh->sg_tablesize) {
+ /* Reset this value */
+ dprintk((MYIOC_s_INFO_FMT
+ "Resetting sg_tablesize to %d from %d\n",
+ ioc->name, numSGE, sh->sg_tablesize));
+ sh->sg_tablesize = numSGE;
+ }
+
+ spin_unlock_irqrestore(&ioc->FreeQlock, flags);
+
+ hd = (MPT_SCSI_HOST *) sh->hostdata;
+ hd->ioc = ioc;
+
+ /* SCSI needs scsi_cmnd lookup table!
+ * (with size equal to req_depth*PtrSz!)
+ */
+ sz = ioc->req_depth * sizeof(void *);
+ mem = kmalloc(sz, GFP_ATOMIC);
+ if (mem == NULL) {
+ error = -ENOMEM;
+ goto mptsas_probe_failed;
+ }
+
+ memset(mem, 0, sz);
+ hd->ScsiLookup = (struct scsi_cmnd **) mem;
+
+ dprintk((MYIOC_s_INFO_FMT "ScsiLookup @ %p, sz=%d\n",
+ ioc->name, hd->ScsiLookup, sz));
+
+ /* Allocate memory for the device structures.
+ * A non-Null pointer at an offset
+ * indicates a device exists.
+ * max_id = 1 + maximum id (hosts.h)
+ */
+ sz = sh->max_id * sizeof(void *);
+ mem = kmalloc(sz, GFP_ATOMIC);
+ if (mem == NULL) {
+ error = -ENOMEM;
+ goto mptsas_probe_failed;
+ }
+
+ memset(mem, 0, sz);
+ hd->Targets = (VirtDevice **) mem;
+
+ dprintk((KERN_INFO
+ " Targets @ %p, sz=%d\n", hd->Targets, sz));
+
+ /* Clear the TM flags
+ */
+ hd->tmPending = 0;
+ hd->tmState = TM_STATE_NONE;
+ hd->resetPending = 0;
+ hd->abortSCpnt = NULL;
+
+ /* Clear the pointer used to store
+ * single-threaded commands, i.e., those
+ * issued during a bus scan, dv and
+ * configuration pages.
+ */
+ hd->cmdPtr = NULL;
+
+ /* Initialize this SCSI Hosts' timers
+ * To use, set the timer expires field
+ * and add_timer
+ */
+ init_timer(&hd->timer);
+ hd->timer.data = (unsigned long) hd;
+ hd->timer.function = mptscsih_timer_expired;
+
+ hd->mpt_pq_filter = mpt_pq_filter;
+ ioc->sas_data.ptClear = mpt_pt_clear;
+
+ if (ioc->sas_data.ptClear==1) {
+ mptbase_sas_persist_operation(
+ ioc, MPI_SAS_OP_CLEAR_ALL_PERSISTENT);
+ }
+
+ ddvprintk((MYIOC_s_INFO_FMT
+ "mpt_pq_filter %x mpt_pq_filter %x\n",
+ ioc->name,
+ mpt_pq_filter,
+ mpt_pq_filter));
+
+ init_waitqueue_head(&hd->scandv_waitq);
+ hd->scandv_wait_done = 0;
+ hd->last_queue_full = 0;
+
+ error = scsi_add_host(sh, &ioc->pcidev->dev);
+ if (error) {
+ dprintk((KERN_ERR MYNAM
+ "scsi_add_host failed\n"));
+ goto mptsas_probe_failed;
+ }
+
+ mptsas_scan_sas_topology(ioc);
+
+ return 0;
+
+mptsas_probe_failed:
+
+ mptscsih_remove(pdev);
+ return error;
+}
+
+static void __devexit mptsas_remove(struct pci_dev *pdev)
+{
+ MPT_ADAPTER *ioc = pci_get_drvdata(pdev);
+ struct mptsas_portinfo *p, *n;
+
+ sas_remove_host(ioc->sh);
+
+ list_for_each_entry_safe(p, n, &ioc->sas_topology, list) {
+ list_del(&p->list);
+ kfree(p);
+ }
+
+ mptscsih_remove(pdev);
+}
+
+static struct pci_device_id mptsas_pci_table[] = {
+ { PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS1064,
+ PCI_ANY_ID, PCI_ANY_ID },
+ { PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS1066,
+ PCI_ANY_ID, PCI_ANY_ID },
+ { PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS1068,
+ PCI_ANY_ID, PCI_ANY_ID },
+ { PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS1064E,
+ PCI_ANY_ID, PCI_ANY_ID },
+ { PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS1066E,
+ PCI_ANY_ID, PCI_ANY_ID },
+ { PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS1068E,
+ PCI_ANY_ID, PCI_ANY_ID },
+ {0} /* Terminating entry */
+};
+MODULE_DEVICE_TABLE(pci, mptsas_pci_table);
+
+
+static struct pci_driver mptsas_driver = {
+ .name = "mptsas",
+ .id_table = mptsas_pci_table,
+ .probe = mptsas_probe,
+ .remove = __devexit_p(mptsas_remove),
+ .shutdown = mptscsih_shutdown,
+#ifdef CONFIG_PM
+ .suspend = mptscsih_suspend,
+ .resume = mptscsih_resume,
+#endif
+};
+
+static int __init
+mptsas_init(void)
+{
+ show_mptmod_ver(my_NAME, my_VERSION);
+
+ mptsas_transport_template =
+ sas_attach_transport(&mptsas_transport_functions);
+ if (!mptsas_transport_template)
+ return -ENODEV;
+
+ mptsasDoneCtx = mpt_register(mptscsih_io_done, MPTSAS_DRIVER);
+ mptsasTaskCtx = mpt_register(mptscsih_taskmgmt_complete, MPTSAS_DRIVER);
+ mptsasInternalCtx =
+ mpt_register(mptscsih_scandv_complete, MPTSAS_DRIVER);
+
+ if (mpt_event_register(mptsasDoneCtx, mptscsih_event_process) == 0) {
+ devtprintk((KERN_INFO MYNAM
+ ": Registered for IOC event notifications\n"));
+ }
+
+ if (mpt_reset_register(mptsasDoneCtx, mptscsih_ioc_reset) == 0) {
+ dprintk((KERN_INFO MYNAM
+ ": Registered for IOC reset notifications\n"));
+ }
+
+ return pci_register_driver(&mptsas_driver);
+}
+
+static void __exit
+mptsas_exit(void)
+{
+ pci_unregister_driver(&mptsas_driver);
+ sas_release_transport(mptsas_transport_template);
+
+ mpt_reset_deregister(mptsasDoneCtx);
+ mpt_event_deregister(mptsasDoneCtx);
+
+ mpt_deregister(mptsasInternalCtx);
+ mpt_deregister(mptsasTaskCtx);
+ mpt_deregister(mptsasDoneCtx);
+}
+
+module_init(mptsas_init);
+module_exit(mptsas_exit);
diff --git a/drivers/message/fusion/mptscsih.c b/drivers/message/fusion/mptscsih.c
index 4a003dc5fde..5cb07eb224d 100644
--- a/drivers/message/fusion/mptscsih.c
+++ b/drivers/message/fusion/mptscsih.c
@@ -62,6 +62,7 @@
#include <scsi/scsi_device.h>
#include <scsi/scsi_host.h>
#include <scsi/scsi_tcq.h>
+#include <scsi/scsi_dbg.h>
#include "mptbase.h"
#include "mptscsih.h"
@@ -93,8 +94,9 @@ typedef struct _BIG_SENSE_BUF {
#define MPT_ICFLAG_BUF_CAP 0x01 /* ReadBuffer Read Capacity format */
#define MPT_ICFLAG_ECHO 0x02 /* ReadBuffer Echo buffer format */
-#define MPT_ICFLAG_PHYS_DISK 0x04 /* Any SCSI IO but do Phys Disk Format */
-#define MPT_ICFLAG_TAGGED_CMD 0x08 /* Do tagged IO */
+#define MPT_ICFLAG_EBOS 0x04 /* ReadBuffer Echo buffer has EBOS */
+#define MPT_ICFLAG_PHYS_DISK 0x08 /* Any SCSI IO but do Phys Disk Format */
+#define MPT_ICFLAG_TAGGED_CMD 0x10 /* Do tagged IO */
#define MPT_ICFLAG_DID_RESET 0x20 /* Bus Reset occurred with this command */
#define MPT_ICFLAG_RESERVED 0x40 /* Reserved has been issued */
@@ -159,6 +161,8 @@ int mptscsih_scandv_complete(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR
static int mptscsih_do_cmd(MPT_SCSI_HOST *hd, INTERNAL_CMD *iocmd);
static int mptscsih_synchronize_cache(MPT_SCSI_HOST *hd, int portnum);
+static struct work_struct mptscsih_persistTask;
+
#ifdef MPTSCSIH_ENABLE_DOMAIN_VALIDATION
static int mptscsih_do_raid(MPT_SCSI_HOST *hd, u8 action, INTERNAL_CMD *io);
static void mptscsih_domainValidation(void *hd);
@@ -167,6 +171,7 @@ static void mptscsih_qas_check(MPT_SCSI_HOST *hd, int id);
static int mptscsih_doDv(MPT_SCSI_HOST *hd, int channel, int target);
static void mptscsih_dv_parms(MPT_SCSI_HOST *hd, DVPARAMETERS *dv,void *pPage);
static void mptscsih_fillbuf(char *buffer, int size, int index, int width);
+static void mptscsih_set_dvflags_raid(MPT_SCSI_HOST *hd, int id);
#endif
void mptscsih_remove(struct pci_dev *);
@@ -606,11 +611,24 @@ mptscsih_io_done(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *mr)
xfer_cnt = le32_to_cpu(pScsiReply->TransferCount);
sc->resid = sc->request_bufflen - xfer_cnt;
+ /*
+ * if we get a data underrun indication, yet no data was
+ * transferred and the SCSI status indicates that the
+ * command was never started, change the data underrun
+ * to success
+ */
+ if (status == MPI_IOCSTATUS_SCSI_DATA_UNDERRUN && xfer_cnt == 0 &&
+ (scsi_status == MPI_SCSI_STATUS_BUSY ||
+ scsi_status == MPI_SCSI_STATUS_RESERVATION_CONFLICT ||
+ scsi_status == MPI_SCSI_STATUS_TASK_SET_FULL)) {
+ status = MPI_IOCSTATUS_SUCCESS;
+ }
+
dreplyprintk((KERN_NOTICE "Reply ha=%d id=%d lun=%d:\n"
"IOCStatus=%04xh SCSIState=%02xh SCSIStatus=%02xh\n"
"resid=%d bufflen=%d xfer_cnt=%d\n",
ioc->id, pScsiReq->TargetID, pScsiReq->LUN[1],
- status, scsi_state, scsi_status, sc->resid,
+ status, scsi_state, scsi_status, sc->resid,
sc->request_bufflen, xfer_cnt));
if (scsi_state & MPI_SCSI_STATE_AUTOSENSE_VALID)
@@ -619,8 +637,11 @@ mptscsih_io_done(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *mr)
/*
* Look for + dump FCP ResponseInfo[]!
*/
- if (scsi_state & MPI_SCSI_STATE_RESPONSE_INFO_VALID) {
- printk(KERN_NOTICE " FCP_ResponseInfo=%08xh\n",
+ if (scsi_state & MPI_SCSI_STATE_RESPONSE_INFO_VALID &&
+ pScsiReply->ResponseInfo) {
+ printk(KERN_NOTICE "ha=%d id=%d lun=%d: "
+ "FCP_ResponseInfo=%08xh\n",
+ ioc->id, pScsiReq->TargetID, pScsiReq->LUN[1],
le32_to_cpu(pScsiReply->ResponseInfo));
}
@@ -661,23 +682,13 @@ mptscsih_io_done(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *mr)
break;
case MPI_IOCSTATUS_SCSI_RESIDUAL_MISMATCH: /* 0x0049 */
- if ( xfer_cnt >= sc->underflow ) {
- /* Sufficient data transfer occurred */
+ sc->resid = sc->request_bufflen - xfer_cnt;
+ if((xfer_cnt==0)||(sc->underflow > xfer_cnt))
+ sc->result=DID_SOFT_ERROR << 16;
+ else /* Sufficient data transfer occurred */
sc->result = (DID_OK << 16) | scsi_status;
- } else if ( xfer_cnt == 0 ) {
- /* A CRC Error causes this condition; retry */
- sc->result = (DRIVER_SENSE << 24) | (DID_OK << 16) |
- (CHECK_CONDITION << 1);
- sc->sense_buffer[0] = 0x70;
- sc->sense_buffer[2] = NO_SENSE;
- sc->sense_buffer[12] = 0;
- sc->sense_buffer[13] = 0;
- } else {
- sc->result = DID_SOFT_ERROR << 16;
- }
- dreplyprintk((KERN_NOTICE
- "RESIDUAL_MISMATCH: result=%x on id=%d\n",
- sc->result, sc->device->id));
+ dreplyprintk((KERN_NOTICE
+ "RESIDUAL_MISMATCH: result=%x on id=%d\n", sc->result, sc->device->id));
break;
case MPI_IOCSTATUS_SCSI_DATA_UNDERRUN: /* 0x0045 */
@@ -692,7 +703,10 @@ mptscsih_io_done(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *mr)
;
} else {
if (xfer_cnt < sc->underflow) {
- sc->result = DID_SOFT_ERROR << 16;
+ if (scsi_status == SAM_STAT_BUSY)
+ sc->result = SAM_STAT_BUSY;
+ else
+ sc->result = DID_SOFT_ERROR << 16;
}
if (scsi_state & (MPI_SCSI_STATE_AUTOSENSE_FAILED | MPI_SCSI_STATE_NO_SCSI_STATUS)) {
/* What to do?
@@ -717,8 +731,10 @@ mptscsih_io_done(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *mr)
case MPI_IOCSTATUS_SCSI_RECOVERED_ERROR: /* 0x0040 */
case MPI_IOCSTATUS_SUCCESS: /* 0x0000 */
- scsi_status = pScsiReply->SCSIStatus;
- sc->result = (DID_OK << 16) | scsi_status;
+ if (scsi_status == MPI_SCSI_STATUS_BUSY)
+ sc->result = (DID_BUS_BUSY << 16) | scsi_status;
+ else
+ sc->result = (DID_OK << 16) | scsi_status;
if (scsi_state == 0) {
;
} else if (scsi_state & MPI_SCSI_STATE_AUTOSENSE_VALID) {
@@ -890,12 +906,13 @@ mptscsih_search_running_cmds(MPT_SCSI_HOST *hd, uint target, uint lun)
SCSIIORequest_t *mf = NULL;
int ii;
int max = hd->ioc->req_depth;
+ struct scsi_cmnd *sc;
dsprintk((KERN_INFO MYNAM ": search_running target %d lun %d max %d\n",
target, lun, max));
for (ii=0; ii < max; ii++) {
- if (hd->ScsiLookup[ii] != NULL) {
+ if ((sc = hd->ScsiLookup[ii]) != NULL) {
mf = (SCSIIORequest_t *)MPT_INDEX_2_MFPTR(hd->ioc, ii);
@@ -910,9 +927,22 @@ mptscsih_search_running_cmds(MPT_SCSI_HOST *hd, uint target, uint lun)
hd->ScsiLookup[ii] = NULL;
mptscsih_freeChainBuffers(hd->ioc, ii);
mpt_free_msg_frame(hd->ioc, (MPT_FRAME_HDR *)mf);
+ if (sc->use_sg) {
+ pci_unmap_sg(hd->ioc->pcidev,
+ (struct scatterlist *) sc->request_buffer,
+ sc->use_sg,
+ sc->sc_data_direction);
+ } else if (sc->request_bufflen) {
+ pci_unmap_single(hd->ioc->pcidev,
+ sc->SCp.dma_handle,
+ sc->request_bufflen,
+ sc->sc_data_direction);
+ }
+ sc->host_scribble = NULL;
+ sc->result = DID_NO_CONNECT << 16;
+ sc->scsi_done(sc);
}
}
-
return;
}
@@ -967,8 +997,10 @@ mptscsih_remove(struct pci_dev *pdev)
unsigned long flags;
int sz1;
- if(!host)
+ if(!host) {
+ mpt_detach(pdev);
return;
+ }
scsi_remove_host(host);
@@ -1256,8 +1288,7 @@ mptscsih_qcmd(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *))
MPT_SCSI_HOST *hd;
MPT_FRAME_HDR *mf;
SCSIIORequest_t *pScsiReq;
- VirtDevice *pTarget;
- int target;
+ VirtDevice *pTarget = SCpnt->device->hostdata;
int lun;
u32 datalen;
u32 scsictl;
@@ -1267,12 +1298,9 @@ mptscsih_qcmd(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *))
int ii;
hd = (MPT_SCSI_HOST *) SCpnt->device->host->hostdata;
- target = SCpnt->device->id;
lun = SCpnt->device->lun;
SCpnt->scsi_done = done;
- pTarget = hd->Targets[target];
-
dmfprintk((MYIOC_s_INFO_FMT "qcmd: SCpnt=%p, done()=%p\n",
(hd && hd->ioc) ? hd->ioc->name : "ioc?", SCpnt, done));
@@ -1315,7 +1343,7 @@ mptscsih_qcmd(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *))
/* Default to untagged. Once a target structure has been allocated,
* use the Inquiry data to determine if device supports tagged.
*/
- if ( pTarget
+ if (pTarget
&& (pTarget->tflags & MPT_TARGET_FLAGS_Q_YES)
&& (SCpnt->device->tagged_supported)) {
scsictl = scsidir | MPI_SCSIIO_CONTROL_SIMPLEQ;
@@ -1325,8 +1353,8 @@ mptscsih_qcmd(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *))
/* Use the above information to set up the message frame
*/
- pScsiReq->TargetID = (u8) target;
- pScsiReq->Bus = (u8) SCpnt->device->channel;
+ pScsiReq->TargetID = (u8) pTarget->target_id;
+ pScsiReq->Bus = pTarget->bus_id;
pScsiReq->ChainOffset = 0;
pScsiReq->Function = MPI_FUNCTION_SCSI_IO_REQUEST;
pScsiReq->CDBLength = SCpnt->cmd_len;
@@ -1378,7 +1406,7 @@ mptscsih_qcmd(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *))
#ifdef MPTSCSIH_ENABLE_DOMAIN_VALIDATION
if (hd->ioc->bus_type == SCSI) {
- int dvStatus = hd->ioc->spi_data.dvStatus[target];
+ int dvStatus = hd->ioc->spi_data.dvStatus[pTarget->target_id];
int issueCmd = 1;
if (dvStatus || hd->ioc->spi_data.forceDv) {
@@ -1426,6 +1454,7 @@ mptscsih_qcmd(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *))
return 0;
fail:
+ hd->ScsiLookup[my_idx] = NULL;
mptscsih_freeChainBuffers(hd->ioc, my_idx);
mpt_free_msg_frame(hd->ioc, mf);
return SCSI_MLQUEUE_HOST_BUSY;
@@ -1713,24 +1742,23 @@ mptscsih_abort(struct scsi_cmnd * SCpnt)
MPT_FRAME_HDR *mf;
u32 ctx2abort;
int scpnt_idx;
+ int retval;
/* If we can't locate our host adapter structure, return FAILED status.
*/
if ((hd = (MPT_SCSI_HOST *) SCpnt->device->host->hostdata) == NULL) {
SCpnt->result = DID_RESET << 16;
SCpnt->scsi_done(SCpnt);
- dfailprintk((KERN_WARNING MYNAM ": mptscsih_abort: "
+ dfailprintk((KERN_INFO MYNAM ": mptscsih_abort: "
"Can't locate host! (sc=%p)\n",
SCpnt));
return FAILED;
}
ioc = hd->ioc;
- if (hd->resetPending)
+ if (hd->resetPending) {
return FAILED;
-
- printk(KERN_WARNING MYNAM ": %s: >> Attempting task abort! (sc=%p)\n",
- hd->ioc->name, SCpnt);
+ }
if (hd->timeouts < -1)
hd->timeouts++;
@@ -1738,16 +1766,20 @@ mptscsih_abort(struct scsi_cmnd * SCpnt)
/* Find this command
*/
if ((scpnt_idx = SCPNT_TO_LOOKUP_IDX(SCpnt)) < 0) {
- /* Cmd not found in ScsiLookup.
+ /* Cmd not found in ScsiLookup.
* Do OS callback.
*/
SCpnt->result = DID_RESET << 16;
- dtmprintk((KERN_WARNING MYNAM ": %s: mptscsih_abort: "
+ dtmprintk((KERN_INFO MYNAM ": %s: mptscsih_abort: "
"Command not in the active list! (sc=%p)\n",
hd->ioc->name, SCpnt));
return SUCCESS;
}
+ printk(KERN_WARNING MYNAM ": %s: attempting task abort! (sc=%p)\n",
+ hd->ioc->name, SCpnt);
+ scsi_print_command(SCpnt);
+
/* Most important! Set TaskMsgContext to SCpnt's MsgContext!
* (the IO to be ABORT'd)
*
@@ -1760,38 +1792,22 @@ mptscsih_abort(struct scsi_cmnd * SCpnt)
hd->abortSCpnt = SCpnt;
- if (mptscsih_TMHandler(hd, MPI_SCSITASKMGMT_TASKTYPE_ABORT_TASK,
+ retval = mptscsih_TMHandler(hd, MPI_SCSITASKMGMT_TASKTYPE_ABORT_TASK,
SCpnt->device->channel, SCpnt->device->id, SCpnt->device->lun,
- ctx2abort, 2 /* 2 second timeout */)
- < 0) {
+ ctx2abort, 2 /* 2 second timeout */);
- /* The TM request failed and the subsequent FW-reload failed!
- * Fatal error case.
- */
- printk(MYIOC_s_WARN_FMT "Error issuing abort task! (sc=%p)\n",
- hd->ioc->name, SCpnt);
+ printk (KERN_WARNING MYNAM ": %s: task abort: %s (sc=%p)\n",
+ hd->ioc->name,
+ ((retval == 0) ? "SUCCESS" : "FAILED" ), SCpnt);
- /* We must clear our pending flag before clearing our state.
- */
+ if (retval == 0)
+ return SUCCESS;
+
+ if(retval != FAILED ) {
hd->tmPending = 0;
hd->tmState = TM_STATE_NONE;
-
- /* Unmap the DMA buffers, if any. */
- if (SCpnt->use_sg) {
- pci_unmap_sg(ioc->pcidev, (struct scatterlist *) SCpnt->request_buffer,
- SCpnt->use_sg, SCpnt->sc_data_direction);
- } else if (SCpnt->request_bufflen) {
- pci_unmap_single(ioc->pcidev, SCpnt->SCp.dma_handle,
- SCpnt->request_bufflen, SCpnt->sc_data_direction);
- }
- hd->ScsiLookup[scpnt_idx] = NULL;
- SCpnt->result = DID_RESET << 16;
- SCpnt->scsi_done(SCpnt); /* Issue the command callback */
- mptscsih_freeChainBuffers(ioc, scpnt_idx);
- mpt_free_msg_frame(ioc, mf);
- return FAILED;
}
- return SUCCESS;
+ return FAILED;
}
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
@@ -1807,11 +1823,12 @@ int
mptscsih_dev_reset(struct scsi_cmnd * SCpnt)
{
MPT_SCSI_HOST *hd;
+ int retval;
/* If we can't locate our host adapter structure, return FAILED status.
*/
if ((hd = (MPT_SCSI_HOST *) SCpnt->device->host->hostdata) == NULL){
- dtmprintk((KERN_WARNING MYNAM ": mptscsih_dev_reset: "
+ dtmprintk((KERN_INFO MYNAM ": mptscsih_dev_reset: "
"Can't locate host! (sc=%p)\n",
SCpnt));
return FAILED;
@@ -1820,24 +1837,26 @@ mptscsih_dev_reset(struct scsi_cmnd * SCpnt)
if (hd->resetPending)
return FAILED;
- printk(KERN_WARNING MYNAM ": %s: >> Attempting target reset! (sc=%p)\n",
+ printk(KERN_WARNING MYNAM ": %s: attempting target reset! (sc=%p)\n",
hd->ioc->name, SCpnt);
+ scsi_print_command(SCpnt);
- if (mptscsih_TMHandler(hd, MPI_SCSITASKMGMT_TASKTYPE_TARGET_RESET,
+ retval = mptscsih_TMHandler(hd, MPI_SCSITASKMGMT_TASKTYPE_TARGET_RESET,
SCpnt->device->channel, SCpnt->device->id,
- 0, 0, 5 /* 5 second timeout */)
- < 0){
- /* The TM request failed and the subsequent FW-reload failed!
- * Fatal error case.
- */
- printk(MYIOC_s_WARN_FMT "Error processing TaskMgmt request (sc=%p)\n",
- hd->ioc->name, SCpnt);
+ 0, 0, 5 /* 5 second timeout */);
+
+ printk (KERN_WARNING MYNAM ": %s: target reset: %s (sc=%p)\n",
+ hd->ioc->name,
+ ((retval == 0) ? "SUCCESS" : "FAILED" ), SCpnt);
+
+ if (retval == 0)
+ return SUCCESS;
+
+ if(retval != FAILED ) {
hd->tmPending = 0;
hd->tmState = TM_STATE_NONE;
- return FAILED;
}
-
- return SUCCESS;
+ return FAILED;
}
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
@@ -1853,41 +1872,39 @@ int
mptscsih_bus_reset(struct scsi_cmnd * SCpnt)
{
MPT_SCSI_HOST *hd;
- spinlock_t *host_lock = SCpnt->device->host->host_lock;
+ int retval;
/* If we can't locate our host adapter structure, return FAILED status.
*/
if ((hd = (MPT_SCSI_HOST *) SCpnt->device->host->hostdata) == NULL){
- dtmprintk((KERN_WARNING MYNAM ": mptscsih_bus_reset: "
+ dtmprintk((KERN_INFO MYNAM ": mptscsih_bus_reset: "
"Can't locate host! (sc=%p)\n",
SCpnt ) );
return FAILED;
}
- printk(KERN_WARNING MYNAM ": %s: >> Attempting bus reset! (sc=%p)\n",
+ printk(KERN_WARNING MYNAM ": %s: attempting bus reset! (sc=%p)\n",
hd->ioc->name, SCpnt);
+ scsi_print_command(SCpnt);
if (hd->timeouts < -1)
hd->timeouts++;
- /* We are now ready to execute the task management request. */
- if (mptscsih_TMHandler(hd, MPI_SCSITASKMGMT_TASKTYPE_RESET_BUS,
- SCpnt->device->channel, 0, 0, 0, 5 /* 5 second timeout */)
- < 0){
+ retval = mptscsih_TMHandler(hd, MPI_SCSITASKMGMT_TASKTYPE_RESET_BUS,
+ SCpnt->device->channel, 0, 0, 0, 5 /* 5 second timeout */);
- /* The TM request failed and the subsequent FW-reload failed!
- * Fatal error case.
- */
- printk(MYIOC_s_WARN_FMT
- "Error processing TaskMgmt request (sc=%p)\n",
- hd->ioc->name, SCpnt);
+ printk (KERN_WARNING MYNAM ": %s: bus reset: %s (sc=%p)\n",
+ hd->ioc->name,
+ ((retval == 0) ? "SUCCESS" : "FAILED" ), SCpnt);
+
+ if (retval == 0)
+ return SUCCESS;
+
+ if(retval != FAILED ) {
hd->tmPending = 0;
hd->tmState = TM_STATE_NONE;
- spin_lock_irq(host_lock);
- return FAILED;
}
-
- return SUCCESS;
+ return FAILED;
}
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
@@ -2169,7 +2186,7 @@ mptscsih_slave_alloc(struct scsi_device *device)
vdev->raidVolume = 0;
hd->Targets[device->id] = vdev;
if (hd->ioc->bus_type == SCSI) {
- if (hd->ioc->spi_data.isRaid & (1 << device->id)) {
+ if (hd->ioc->raid_data.isRaid & (1 << device->id)) {
vdev->raidVolume = 1;
ddvtprintk((KERN_INFO
"RAID Volume @ id %d\n", device->id));
@@ -2180,22 +2197,7 @@ mptscsih_slave_alloc(struct scsi_device *device)
out:
vdev->num_luns++;
- return 0;
-}
-
-static int
-mptscsih_is_raid_volume(MPT_SCSI_HOST *hd, uint id)
-{
- int i;
-
- if (!hd->ioc->spi_data.isRaid || !hd->ioc->spi_data.pIocPg3)
- return 0;
-
- for (i = 0; i < hd->ioc->spi_data.pIocPg3->NumPhysDisks; i++) {
- if (id == hd->ioc->spi_data.pIocPg3->PhysDisk[i].PhysDiskID)
- return 1;
- }
-
+ device->hostdata = vdev;
return 0;
}
@@ -2226,7 +2228,7 @@ mptscsih_slave_destroy(struct scsi_device *device)
hd->Targets[target] = NULL;
if (hd->ioc->bus_type == SCSI) {
- if (mptscsih_is_raid_volume(hd, target)) {
+ if (mptscsih_is_phys_disk(hd->ioc, target)) {
hd->ioc->spi_data.forceDv |= MPT_SCSICFG_RELOAD_IOC_PG3;
} else {
hd->ioc->spi_data.dvStatus[target] =
@@ -2439,6 +2441,7 @@ mptscsih_ioc_reset(MPT_ADAPTER *ioc, int reset_phase)
{
MPT_SCSI_HOST *hd;
unsigned long flags;
+ int ii;
dtmprintk((KERN_WARNING MYNAM
": IOC %s_reset routed to SCSI host driver!\n",
@@ -2496,11 +2499,8 @@ mptscsih_ioc_reset(MPT_ADAPTER *ioc, int reset_phase)
/* ScsiLookup initialization
*/
- {
- int ii;
- for (ii=0; ii < hd->ioc->req_depth; ii++)
- hd->ScsiLookup[ii] = NULL;
- }
+ for (ii=0; ii < hd->ioc->req_depth; ii++)
+ hd->ScsiLookup[ii] = NULL;
/* 2. Chain Buffer initialization
*/
@@ -2549,6 +2549,16 @@ mptscsih_ioc_reset(MPT_ADAPTER *ioc, int reset_phase)
}
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/* work queue thread to clear the persitency table */
+static void
+mptscsih_sas_persist_clear_table(void * arg)
+{
+ MPT_ADAPTER *ioc = (MPT_ADAPTER *)arg;
+
+ mptbase_sas_persist_operation(ioc, MPI_SAS_OP_CLEAR_NOT_PRESENT);
+}
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
int
mptscsih_event_process(MPT_ADAPTER *ioc, EventNotificationReply_t *pEvReply)
{
@@ -2558,18 +2568,18 @@ mptscsih_event_process(MPT_ADAPTER *ioc, EventNotificationReply_t *pEvReply)
devtprintk((MYIOC_s_INFO_FMT "MPT event (=%02Xh) routed to SCSI host driver!\n",
ioc->name, event));
+ if (ioc->sh == NULL ||
+ ((hd = (MPT_SCSI_HOST *)ioc->sh->hostdata) == NULL))
+ return 1;
+
switch (event) {
case MPI_EVENT_UNIT_ATTENTION: /* 03 */
/* FIXME! */
break;
case MPI_EVENT_IOC_BUS_RESET: /* 04 */
case MPI_EVENT_EXT_BUS_RESET: /* 05 */
- hd = NULL;
- if (ioc->sh) {
- hd = (MPT_SCSI_HOST *) ioc->sh->hostdata;
- if (hd && (ioc->bus_type == SCSI) && (hd->soft_resets < -1))
- hd->soft_resets++;
- }
+ if (hd && (ioc->bus_type == SCSI) && (hd->soft_resets < -1))
+ hd->soft_resets++;
break;
case MPI_EVENT_LOGOUT: /* 09 */
/* FIXME! */
@@ -2588,69 +2598,24 @@ mptscsih_event_process(MPT_ADAPTER *ioc, EventNotificationReply_t *pEvReply)
break;
case MPI_EVENT_INTEGRATED_RAID: /* 0B */
+ {
+ pMpiEventDataRaid_t pRaidEventData =
+ (pMpiEventDataRaid_t) pEvReply->Data;
#ifdef MPTSCSIH_ENABLE_DOMAIN_VALIDATION
- /* negoNvram set to 0 if DV enabled and to USE_NVRAM if
- * if DV disabled. Need to check for target mode.
- */
- hd = NULL;
- if (ioc->sh)
- hd = (MPT_SCSI_HOST *) ioc->sh->hostdata;
-
- if (hd && (ioc->bus_type == SCSI) && (hd->negoNvram == 0)) {
- ScsiCfgData *pSpi;
- Ioc3PhysDisk_t *pPDisk;
- int numPDisk;
- u8 reason;
- u8 physDiskNum;
-
- reason = (le32_to_cpu(pEvReply->Data[0]) & 0x00FF0000) >> 16;
- if (reason == MPI_EVENT_RAID_RC_DOMAIN_VAL_NEEDED) {
- /* New or replaced disk.
- * Set DV flag and schedule DV.
- */
- pSpi = &ioc->spi_data;
- physDiskNum = (le32_to_cpu(pEvReply->Data[0]) & 0xFF000000) >> 24;
- ddvtprintk(("DV requested for phys disk id %d\n", physDiskNum));
- if (pSpi->pIocPg3) {
- pPDisk = pSpi->pIocPg3->PhysDisk;
- numPDisk =pSpi->pIocPg3->NumPhysDisks;
-
- while (numPDisk) {
- if (physDiskNum == pPDisk->PhysDiskNum) {
- pSpi->dvStatus[pPDisk->PhysDiskID] = (MPT_SCSICFG_NEED_DV | MPT_SCSICFG_DV_NOT_DONE);
- pSpi->forceDv = MPT_SCSICFG_NEED_DV;
- ddvtprintk(("NEED_DV set for phys disk id %d\n", pPDisk->PhysDiskID));
- break;
- }
- pPDisk++;
- numPDisk--;
- }
-
- if (numPDisk == 0) {
- /* The physical disk that needs DV was not found
- * in the stored IOC Page 3. The driver must reload
- * this page. DV routine will set the NEED_DV flag for
- * all phys disks that have DV_NOT_DONE set.
- */
- pSpi->forceDv = MPT_SCSICFG_NEED_DV | MPT_SCSICFG_RELOAD_IOC_PG3;
- ddvtprintk(("phys disk %d not found. Setting reload IOC Pg3 Flag\n", physDiskNum));
- }
- }
- }
- }
+ /* Domain Validation Needed */
+ if (ioc->bus_type == SCSI &&
+ pRaidEventData->ReasonCode ==
+ MPI_EVENT_RAID_RC_DOMAIN_VAL_NEEDED)
+ mptscsih_set_dvflags_raid(hd, pRaidEventData->PhysDiskNum);
#endif
+ break;
+ }
-#if defined(MPT_DEBUG_DV) || defined(MPT_DEBUG_DV_TINY)
- printk("Raid Event RF: ");
- {
- u32 *m = (u32 *)pEvReply;
- int ii;
- int n = (int)pEvReply->MsgLength;
- for (ii=6; ii < n; ii++)
- printk(" %08x", le32_to_cpu(m[ii]));
- printk("\n");
- }
-#endif
+ /* Persistent table is full. */
+ case MPI_EVENT_PERSISTENT_TABLE_FULL:
+ INIT_WORK(&mptscsih_persistTask,
+ mptscsih_sas_persist_clear_table,(void *)ioc);
+ schedule_work(&mptscsih_persistTask);
break;
case MPI_EVENT_NONE: /* 00 */
@@ -2687,7 +2652,7 @@ mptscsih_initTarget(MPT_SCSI_HOST *hd, int bus_id, int target_id, u8 lun, char *
{
int indexed_lun, lun_index;
VirtDevice *vdev;
- ScsiCfgData *pSpi;
+ SpiCfgData *pSpi;
char data_56;
dinitprintk((MYIOC_s_INFO_FMT "initTarget bus=%d id=%d lun=%d hd=%p\n",
@@ -2794,7 +2759,7 @@ mptscsih_initTarget(MPT_SCSI_HOST *hd, int bus_id, int target_id, u8 lun, char *
static void
mptscsih_setTargetNegoParms(MPT_SCSI_HOST *hd, VirtDevice *target, char byte56)
{
- ScsiCfgData *pspi_data = &hd->ioc->spi_data;
+ SpiCfgData *pspi_data = &hd->ioc->spi_data;
int id = (int) target->target_id;
int nvram;
VirtDevice *vdev;
@@ -2973,11 +2938,13 @@ mptscsih_setTargetNegoParms(MPT_SCSI_HOST *hd, VirtDevice *target, char byte56)
static void
mptscsih_set_dvflags(MPT_SCSI_HOST *hd, SCSIIORequest_t *pReq)
{
+ MPT_ADAPTER *ioc = hd->ioc;
u8 cmd;
- ScsiCfgData *pSpi;
+ SpiCfgData *pSpi;
- ddvtprintk((" set_dvflags: id=%d lun=%d negoNvram=%x cmd=%x\n",
- pReq->TargetID, pReq->LUN[1], hd->negoNvram, pReq->CDB[0]));
+ ddvtprintk((MYIOC_s_NOTE_FMT
+ " set_dvflags: id=%d lun=%d negoNvram=%x cmd=%x\n",
+ hd->ioc->name, pReq->TargetID, pReq->LUN[1], hd->negoNvram, pReq->CDB[0]));
if ((pReq->LUN[1] != 0) || (hd->negoNvram != 0))
return;
@@ -2985,12 +2952,12 @@ mptscsih_set_dvflags(MPT_SCSI_HOST *hd, SCSIIORequest_t *pReq)
cmd = pReq->CDB[0];
if ((cmd == READ_CAPACITY) || (cmd == MODE_SENSE)) {
- pSpi = &hd->ioc->spi_data;
- if ((pSpi->isRaid & (1 << pReq->TargetID)) && pSpi->pIocPg3) {
+ pSpi = &ioc->spi_data;
+ if ((ioc->raid_data.isRaid & (1 << pReq->TargetID)) && ioc->raid_data.pIocPg3) {
/* Set NEED_DV for all hidden disks
*/
- Ioc3PhysDisk_t *pPDisk = pSpi->pIocPg3->PhysDisk;
- int numPDisk = pSpi->pIocPg3->NumPhysDisks;
+ Ioc3PhysDisk_t *pPDisk = ioc->raid_data.pIocPg3->PhysDisk;
+ int numPDisk = ioc->raid_data.pIocPg3->NumPhysDisks;
while (numPDisk) {
pSpi->dvStatus[pPDisk->PhysDiskID] |= MPT_SCSICFG_NEED_DV;
@@ -3004,6 +2971,50 @@ mptscsih_set_dvflags(MPT_SCSI_HOST *hd, SCSIIORequest_t *pReq)
}
}
+/* mptscsih_raid_set_dv_flags()
+ *
+ * New or replaced disk. Set DV flag and schedule DV.
+ */
+static void
+mptscsih_set_dvflags_raid(MPT_SCSI_HOST *hd, int id)
+{
+ MPT_ADAPTER *ioc = hd->ioc;
+ SpiCfgData *pSpi = &ioc->spi_data;
+ Ioc3PhysDisk_t *pPDisk;
+ int numPDisk;
+
+ if (hd->negoNvram != 0)
+ return;
+
+ ddvtprintk(("DV requested for phys disk id %d\n", id));
+ if (ioc->raid_data.pIocPg3) {
+ pPDisk = ioc->raid_data.pIocPg3->PhysDisk;
+ numPDisk = ioc->raid_data.pIocPg3->NumPhysDisks;
+ while (numPDisk) {
+ if (id == pPDisk->PhysDiskNum) {
+ pSpi->dvStatus[pPDisk->PhysDiskID] =
+ (MPT_SCSICFG_NEED_DV | MPT_SCSICFG_DV_NOT_DONE);
+ pSpi->forceDv = MPT_SCSICFG_NEED_DV;
+ ddvtprintk(("NEED_DV set for phys disk id %d\n",
+ pPDisk->PhysDiskID));
+ break;
+ }
+ pPDisk++;
+ numPDisk--;
+ }
+
+ if (numPDisk == 0) {
+ /* The physical disk that needs DV was not found
+ * in the stored IOC Page 3. The driver must reload
+ * this page. DV routine will set the NEED_DV flag for
+ * all phys disks that have DV_NOT_DONE set.
+ */
+ pSpi->forceDv = MPT_SCSICFG_NEED_DV | MPT_SCSICFG_RELOAD_IOC_PG3;
+ ddvtprintk(("phys disk %d not found. Setting reload IOC Pg3 Flag\n",id));
+ }
+ }
+}
+
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/*
* If no Target, bus reset on 1st I/O. Set the flag to
@@ -3091,7 +3102,7 @@ mptscsih_writeSDP1(MPT_SCSI_HOST *hd, int portnum, int target_id, int flags)
MPT_ADAPTER *ioc = hd->ioc;
Config_t *pReq;
SCSIDevicePage1_t *pData;
- VirtDevice *pTarget;
+ VirtDevice *pTarget=NULL;
MPT_FRAME_HDR *mf;
dma_addr_t dataDma;
u16 req_idx;
@@ -3190,7 +3201,7 @@ mptscsih_writeSDP1(MPT_SCSI_HOST *hd, int portnum, int target_id, int flags)
#endif
if (flags & MPT_SCSICFG_BLK_NEGO)
- negoFlags = MPT_TARGET_NO_NEGO_WIDE | MPT_TARGET_NO_NEGO_SYNC;
+ negoFlags |= MPT_TARGET_NO_NEGO_WIDE | MPT_TARGET_NO_NEGO_SYNC;
mptscsih_setDevicePage1Flags(width, factor, offset,
&requested, &configuration, negoFlags);
@@ -4011,7 +4022,7 @@ mptscsih_synchronize_cache(MPT_SCSI_HOST *hd, int portnum)
/* If target Ptr NULL or if this target is NOT a disk, skip.
*/
- if ((pTarget) && (pTarget->tflags & MPT_TARGET_FLAGS_Q_YES)){
+ if ((pTarget) && (pTarget->inq_data[0] == TYPE_DISK)){
for (lun=0; lun <= MPT_LAST_LUN; lun++) {
/* If LUN present, issue the command
*/
@@ -4106,9 +4117,9 @@ mptscsih_domainValidation(void *arg)
if ((ioc->spi_data.forceDv & MPT_SCSICFG_RELOAD_IOC_PG3) != 0) {
mpt_read_ioc_pg_3(ioc);
- if (ioc->spi_data.pIocPg3) {
- Ioc3PhysDisk_t *pPDisk = ioc->spi_data.pIocPg3->PhysDisk;
- int numPDisk = ioc->spi_data.pIocPg3->NumPhysDisks;
+ if (ioc->raid_data.pIocPg3) {
+ Ioc3PhysDisk_t *pPDisk = ioc->raid_data.pIocPg3->PhysDisk;
+ int numPDisk = ioc->raid_data.pIocPg3->NumPhysDisks;
while (numPDisk) {
if (ioc->spi_data.dvStatus[pPDisk->PhysDiskID] & MPT_SCSICFG_DV_NOT_DONE)
@@ -4147,7 +4158,7 @@ mptscsih_domainValidation(void *arg)
isPhysDisk = mptscsih_is_phys_disk(ioc, id);
if (isPhysDisk) {
for (ii=0; ii < MPT_MAX_SCSI_DEVICES; ii++) {
- if (hd->ioc->spi_data.isRaid & (1 << ii)) {
+ if (hd->ioc->raid_data.isRaid & (1 << ii)) {
hd->ioc->spi_data.dvStatus[ii] |= MPT_SCSICFG_DV_PENDING;
}
}
@@ -4166,7 +4177,7 @@ mptscsih_domainValidation(void *arg)
if (isPhysDisk) {
for (ii=0; ii < MPT_MAX_SCSI_DEVICES; ii++) {
- if (hd->ioc->spi_data.isRaid & (1 << ii)) {
+ if (hd->ioc->raid_data.isRaid & (1 << ii)) {
hd->ioc->spi_data.dvStatus[ii] &= ~MPT_SCSICFG_DV_PENDING;
}
}
@@ -4188,21 +4199,21 @@ mptscsih_domainValidation(void *arg)
/* Search IOC page 3 to determine if this is hidden physical disk
*/
-static int
+/* Search IOC page 3 to determine if this is hidden physical disk
+ */
+static int
mptscsih_is_phys_disk(MPT_ADAPTER *ioc, int id)
{
- if (ioc->spi_data.pIocPg3) {
- Ioc3PhysDisk_t *pPDisk = ioc->spi_data.pIocPg3->PhysDisk;
- int numPDisk = ioc->spi_data.pIocPg3->NumPhysDisks;
+ int i;
- while (numPDisk) {
- if (pPDisk->PhysDiskID == id) {
- return 1;
- }
- pPDisk++;
- numPDisk--;
- }
+ if (!ioc->raid_data.isRaid || !ioc->raid_data.pIocPg3)
+ return 0;
+
+ for (i = 0; i < ioc->raid_data.pIocPg3->NumPhysDisks; i++) {
+ if (id == ioc->raid_data.pIocPg3->PhysDisk[i].PhysDiskID)
+ return 1;
}
+
return 0;
}
@@ -4408,7 +4419,7 @@ mptscsih_doDv(MPT_SCSI_HOST *hd, int bus_number, int id)
/* Skip this ID? Set cfg.cfghdr.hdr to force config page write
*/
{
- ScsiCfgData *pspi_data = &hd->ioc->spi_data;
+ SpiCfgData *pspi_data = &hd->ioc->spi_data;
if (pspi_data->nvram && (pspi_data->nvram[id] != MPT_HOST_NVRAM_INVALID)) {
/* Set the factor from nvram */
nfactor = (pspi_data->nvram[id] & MPT_NVRAM_SYNC_MASK) >> 8;
@@ -4438,11 +4449,11 @@ mptscsih_doDv(MPT_SCSI_HOST *hd, int bus_number, int id)
}
/* Finish iocmd inititialization - hidden or visible disk? */
- if (ioc->spi_data.pIocPg3) {
+ if (ioc->raid_data.pIocPg3) {
/* Search IOC page 3 for matching id
*/
- Ioc3PhysDisk_t *pPDisk = ioc->spi_data.pIocPg3->PhysDisk;
- int numPDisk = ioc->spi_data.pIocPg3->NumPhysDisks;
+ Ioc3PhysDisk_t *pPDisk = ioc->raid_data.pIocPg3->PhysDisk;
+ int numPDisk = ioc->raid_data.pIocPg3->NumPhysDisks;
while (numPDisk) {
if (pPDisk->PhysDiskID == id) {
@@ -4466,7 +4477,7 @@ mptscsih_doDv(MPT_SCSI_HOST *hd, int bus_number, int id)
/* RAID Volume ID's may double for a physical device. If RAID but
* not a physical ID as well, skip DV.
*/
- if ((hd->ioc->spi_data.isRaid & (1 << id)) && !(iocmd.flags & MPT_ICFLAG_PHYS_DISK))
+ if ((hd->ioc->raid_data.isRaid & (1 << id)) && !(iocmd.flags & MPT_ICFLAG_PHYS_DISK))
goto target_done;
@@ -4815,6 +4826,8 @@ mptscsih_doDv(MPT_SCSI_HOST *hd, int bus_number, int id)
notDone = 0;
if (iocmd.flags & MPT_ICFLAG_ECHO) {
bufsize = ((pbuf1[2] & 0x1F) <<8) | pbuf1[3];
+ if (pbuf1[0] & 0x01)
+ iocmd.flags |= MPT_ICFLAG_EBOS;
} else {
bufsize = pbuf1[1]<<16 | pbuf1[2]<<8 | pbuf1[3];
}
@@ -4911,6 +4924,9 @@ mptscsih_doDv(MPT_SCSI_HOST *hd, int bus_number, int id)
}
iocmd.flags &= ~MPT_ICFLAG_DID_RESET;
+ if (iocmd.flags & MPT_ICFLAG_EBOS)
+ goto skip_Reserve;
+
repeat = 5;
while (repeat && (!(iocmd.flags & MPT_ICFLAG_RESERVED))) {
iocmd.cmd = RESERVE;
@@ -4954,6 +4970,7 @@ mptscsih_doDv(MPT_SCSI_HOST *hd, int bus_number, int id)
}
}
+skip_Reserve:
mptscsih_fillbuf(pbuf1, sz, patt, 1);
iocmd.cmd = WRITE_BUFFER;
iocmd.data_dma = buf1_dma;
@@ -5198,11 +5215,12 @@ mptscsih_dv_parms(MPT_SCSI_HOST *hd, DVPARAMETERS *dv,void *pPage)
* If not an LVD bus, the adapter minSyncFactor has been
* already throttled back.
*/
+ negoFlags = hd->ioc->spi_data.noQas;
if ((hd->Targets)&&((pTarget = hd->Targets[(int)id]) != NULL) && !pTarget->raidVolume) {
width = pTarget->maxWidth;
offset = pTarget->maxOffset;
factor = pTarget->minSyncFactor;
- negoFlags = pTarget->negoFlags;
+ negoFlags |= pTarget->negoFlags;
} else {
if (hd->ioc->spi_data.nvram && (hd->ioc->spi_data.nvram[id] != MPT_HOST_NVRAM_INVALID)) {
data = hd->ioc->spi_data.nvram[id];
@@ -5223,7 +5241,6 @@ mptscsih_dv_parms(MPT_SCSI_HOST *hd, DVPARAMETERS *dv,void *pPage)
}
/* Set the negotiation flags */
- negoFlags = hd->ioc->spi_data.noQas;
if (!width)
negoFlags |= MPT_TARGET_NO_NEGO_WIDE;
diff --git a/drivers/message/fusion/mptscsih.h b/drivers/message/fusion/mptscsih.h
index 51c0255ac16..971fda4b8b5 100644
--- a/drivers/message/fusion/mptscsih.h
+++ b/drivers/message/fusion/mptscsih.h
@@ -1,5 +1,5 @@
/*
- * linux/drivers/message/fusion/mptscsi.h
+ * linux/drivers/message/fusion/mptscsih.h
* High performance SCSI / Fibre Channel SCSI Host device driver.
* For use with PCI chip/adapter(s):
* LSIFC9xx/LSI409xx Fibre Channel
@@ -53,8 +53,8 @@
* SCSI Public stuff...
*/
-#define MPT_SCSI_CMD_PER_DEV_HIGH 31
-#define MPT_SCSI_CMD_PER_DEV_LOW 7
+#define MPT_SCSI_CMD_PER_DEV_HIGH 64
+#define MPT_SCSI_CMD_PER_DEV_LOW 32
#define MPT_SCSI_CMD_PER_LUN 7
@@ -77,6 +77,7 @@
#define MPTSCSIH_MAX_WIDTH 1
#define MPTSCSIH_MIN_SYNC 0x08
#define MPTSCSIH_SAF_TE 0
+#define MPTSCSIH_PT_CLEAR 0
#endif
diff --git a/drivers/message/fusion/mptspi.c b/drivers/message/fusion/mptspi.c
index 587d1274fd7..5c0e307d1d5 100644
--- a/drivers/message/fusion/mptspi.c
+++ b/drivers/message/fusion/mptspi.c
@@ -199,7 +199,7 @@ mptspi_probe(struct pci_dev *pdev, const struct pci_device_id *id)
printk(MYIOC_s_WARN_FMT
"Skipping ioc=%p because SCSI Initiator mode is NOT enabled!\n",
ioc->name, ioc);
- return -ENODEV;
+ return 0;
}
sh = scsi_host_alloc(&mptspi_driver_template, sizeof(MPT_SCSI_HOST));
diff --git a/drivers/net/wan/hdlc_cisco.c b/drivers/net/wan/hdlc_cisco.c
index 48c03c11cd9..a01efa6d5c6 100644
--- a/drivers/net/wan/hdlc_cisco.c
+++ b/drivers/net/wan/hdlc_cisco.c
@@ -72,7 +72,7 @@ static void cisco_keepalive_send(struct net_device *dev, u32 type,
}
skb_reserve(skb, 4);
cisco_hard_header(skb, dev, CISCO_KEEPALIVE, NULL, NULL, 0);
- data = (cisco_packet*)skb->data;
+ data = (cisco_packet*)(skb->data + 4);
data->type = htonl(type);
data->par1 = htonl(par1);
diff --git a/drivers/s390/scsi/Makefile b/drivers/s390/scsi/Makefile
index fc145307a7d..d6a78f1a2f1 100644
--- a/drivers/s390/scsi/Makefile
+++ b/drivers/s390/scsi/Makefile
@@ -3,7 +3,7 @@
#
zfcp-objs := zfcp_aux.o zfcp_ccw.o zfcp_scsi.o zfcp_erp.o zfcp_qdio.o \
- zfcp_fsf.o zfcp_sysfs_adapter.o zfcp_sysfs_port.o \
+ zfcp_fsf.o zfcp_dbf.o zfcp_sysfs_adapter.o zfcp_sysfs_port.o \
zfcp_sysfs_unit.o zfcp_sysfs_driver.o
obj-$(CONFIG_ZFCP) += zfcp.o
diff --git a/drivers/s390/scsi/zfcp_aux.c b/drivers/s390/scsi/zfcp_aux.c
index bfe3ba73bc0..0b5087f7cab 100644
--- a/drivers/s390/scsi/zfcp_aux.c
+++ b/drivers/s390/scsi/zfcp_aux.c
@@ -122,95 +122,6 @@ _zfcp_hex_dump(char *addr, int count)
#define ZFCP_LOG_AREA ZFCP_LOG_AREA_OTHER
-static inline int
-zfcp_fsf_req_is_scsi_cmnd(struct zfcp_fsf_req *fsf_req)
-{
- return ((fsf_req->fsf_command == FSF_QTCB_FCP_CMND) &&
- !(fsf_req->status & ZFCP_STATUS_FSFREQ_TASK_MANAGEMENT));
-}
-
-void
-zfcp_cmd_dbf_event_fsf(const char *text, struct zfcp_fsf_req *fsf_req,
- void *add_data, int add_length)
-{
- struct zfcp_adapter *adapter = fsf_req->adapter;
- struct scsi_cmnd *scsi_cmnd;
- int level = 3;
- int i;
- unsigned long flags;
-
- spin_lock_irqsave(&adapter->dbf_lock, flags);
- if (zfcp_fsf_req_is_scsi_cmnd(fsf_req)) {
- scsi_cmnd = fsf_req->data.send_fcp_command_task.scsi_cmnd;
- debug_text_event(adapter->cmd_dbf, level, "fsferror");
- debug_text_event(adapter->cmd_dbf, level, text);
- debug_event(adapter->cmd_dbf, level, &fsf_req,
- sizeof (unsigned long));
- debug_event(adapter->cmd_dbf, level, &fsf_req->seq_no,
- sizeof (u32));
- debug_event(adapter->cmd_dbf, level, &scsi_cmnd,
- sizeof (unsigned long));
- debug_event(adapter->cmd_dbf, level, &scsi_cmnd->cmnd,
- min(ZFCP_CMD_DBF_LENGTH, (int)scsi_cmnd->cmd_len));
- for (i = 0; i < add_length; i += ZFCP_CMD_DBF_LENGTH)
- debug_event(adapter->cmd_dbf,
- level,
- (char *) add_data + i,
- min(ZFCP_CMD_DBF_LENGTH, add_length - i));
- }
- spin_unlock_irqrestore(&adapter->dbf_lock, flags);
-}
-
-/* XXX additionally log unit if available */
-/* ---> introduce new parameter for unit, see 2.4 code */
-void
-zfcp_cmd_dbf_event_scsi(const char *text, struct scsi_cmnd *scsi_cmnd)
-{
- struct zfcp_adapter *adapter;
- union zfcp_req_data *req_data;
- struct zfcp_fsf_req *fsf_req;
- int level = ((host_byte(scsi_cmnd->result) != 0) ? 1 : 5);
- unsigned long flags;
-
- adapter = (struct zfcp_adapter *) scsi_cmnd->device->host->hostdata[0];
- req_data = (union zfcp_req_data *) scsi_cmnd->host_scribble;
- fsf_req = (req_data ? req_data->send_fcp_command_task.fsf_req : NULL);
- spin_lock_irqsave(&adapter->dbf_lock, flags);
- debug_text_event(adapter->cmd_dbf, level, "hostbyte");
- debug_text_event(adapter->cmd_dbf, level, text);
- debug_event(adapter->cmd_dbf, level, &scsi_cmnd->result, sizeof (u32));
- debug_event(adapter->cmd_dbf, level, &scsi_cmnd,
- sizeof (unsigned long));
- debug_event(adapter->cmd_dbf, level, &scsi_cmnd->cmnd,
- min(ZFCP_CMD_DBF_LENGTH, (int)scsi_cmnd->cmd_len));
- if (likely(fsf_req)) {
- debug_event(adapter->cmd_dbf, level, &fsf_req,
- sizeof (unsigned long));
- debug_event(adapter->cmd_dbf, level, &fsf_req->seq_no,
- sizeof (u32));
- } else {
- debug_text_event(adapter->cmd_dbf, level, "");
- debug_text_event(adapter->cmd_dbf, level, "");
- }
- spin_unlock_irqrestore(&adapter->dbf_lock, flags);
-}
-
-void
-zfcp_in_els_dbf_event(struct zfcp_adapter *adapter, const char *text,
- struct fsf_status_read_buffer *status_buffer, int length)
-{
- int level = 1;
- int i;
-
- debug_text_event(adapter->in_els_dbf, level, text);
- debug_event(adapter->in_els_dbf, level, &status_buffer->d_id, 8);
- for (i = 0; i < length; i += ZFCP_IN_ELS_DBF_LENGTH)
- debug_event(adapter->in_els_dbf,
- level,
- (char *) status_buffer->payload + i,
- min(ZFCP_IN_ELS_DBF_LENGTH, length - i));
-}
-
/**
* zfcp_device_setup - setup function
* @str: pointer to parameter string
@@ -1017,81 +928,6 @@ zfcp_free_low_mem_buffers(struct zfcp_adapter *adapter)
mempool_destroy(adapter->pool.data_gid_pn);
}
-/**
- * zfcp_adapter_debug_register - registers debug feature for an adapter
- * @adapter: pointer to adapter for which debug features should be registered
- * return: -ENOMEM on error, 0 otherwise
- */
-int
-zfcp_adapter_debug_register(struct zfcp_adapter *adapter)
-{
- char dbf_name[20];
-
- /* debug feature area which records SCSI command failures (hostbyte) */
- spin_lock_init(&adapter->dbf_lock);
-
- sprintf(dbf_name, ZFCP_CMD_DBF_NAME "%s",
- zfcp_get_busid_by_adapter(adapter));
- adapter->cmd_dbf = debug_register(dbf_name, ZFCP_CMD_DBF_INDEX,
- ZFCP_CMD_DBF_AREAS,
- ZFCP_CMD_DBF_LENGTH);
- debug_register_view(adapter->cmd_dbf, &debug_hex_ascii_view);
- debug_set_level(adapter->cmd_dbf, ZFCP_CMD_DBF_LEVEL);
-
- /* debug feature area which records SCSI command aborts */
- sprintf(dbf_name, ZFCP_ABORT_DBF_NAME "%s",
- zfcp_get_busid_by_adapter(adapter));
- adapter->abort_dbf = debug_register(dbf_name, ZFCP_ABORT_DBF_INDEX,
- ZFCP_ABORT_DBF_AREAS,
- ZFCP_ABORT_DBF_LENGTH);
- debug_register_view(adapter->abort_dbf, &debug_hex_ascii_view);
- debug_set_level(adapter->abort_dbf, ZFCP_ABORT_DBF_LEVEL);
-
- /* debug feature area which records incoming ELS commands */
- sprintf(dbf_name, ZFCP_IN_ELS_DBF_NAME "%s",
- zfcp_get_busid_by_adapter(adapter));
- adapter->in_els_dbf = debug_register(dbf_name, ZFCP_IN_ELS_DBF_INDEX,
- ZFCP_IN_ELS_DBF_AREAS,
- ZFCP_IN_ELS_DBF_LENGTH);
- debug_register_view(adapter->in_els_dbf, &debug_hex_ascii_view);
- debug_set_level(adapter->in_els_dbf, ZFCP_IN_ELS_DBF_LEVEL);
-
- /* debug feature area which records erp events */
- sprintf(dbf_name, ZFCP_ERP_DBF_NAME "%s",
- zfcp_get_busid_by_adapter(adapter));
- adapter->erp_dbf = debug_register(dbf_name, ZFCP_ERP_DBF_INDEX,
- ZFCP_ERP_DBF_AREAS,
- ZFCP_ERP_DBF_LENGTH);
- debug_register_view(adapter->erp_dbf, &debug_hex_ascii_view);
- debug_set_level(adapter->erp_dbf, ZFCP_ERP_DBF_LEVEL);
-
- if (!(adapter->cmd_dbf && adapter->abort_dbf &&
- adapter->in_els_dbf && adapter->erp_dbf)) {
- zfcp_adapter_debug_unregister(adapter);
- return -ENOMEM;
- }
-
- return 0;
-
-}
-
-/**
- * zfcp_adapter_debug_unregister - unregisters debug feature for an adapter
- * @adapter: pointer to adapter for which debug features should be unregistered
- */
-void
-zfcp_adapter_debug_unregister(struct zfcp_adapter *adapter)
-{
- debug_unregister(adapter->abort_dbf);
- debug_unregister(adapter->cmd_dbf);
- debug_unregister(adapter->erp_dbf);
- debug_unregister(adapter->in_els_dbf);
- adapter->abort_dbf = NULL;
- adapter->cmd_dbf = NULL;
- adapter->erp_dbf = NULL;
- adapter->in_els_dbf = NULL;
-}
-
void
zfcp_dummy_release(struct device *dev)
{
@@ -1462,10 +1298,6 @@ zfcp_fsf_incoming_els_rscn(struct zfcp_adapter *adapter,
/* see FC-FS */
no_entries = (fcp_rscn_head->payload_len / 4);
- zfcp_in_els_dbf_event(adapter, "##rscn", status_buffer,
- fcp_rscn_head->payload_len);
-
- debug_text_event(adapter->erp_dbf, 1, "unsol_els_rscn:");
for (i = 1; i < no_entries; i++) {
/* skip head and start with 1st element */
fcp_rscn_element++;
@@ -1497,8 +1329,6 @@ zfcp_fsf_incoming_els_rscn(struct zfcp_adapter *adapter,
(ZFCP_STATUS_PORT_DID_DID, &port->status)) {
ZFCP_LOG_INFO("incoming RSCN, trying to open "
"port 0x%016Lx\n", port->wwpn);
- debug_text_event(adapter->erp_dbf, 1,
- "unsol_els_rscnu:");
zfcp_erp_port_reopen(port,
ZFCP_STATUS_COMMON_ERP_FAILED);
continue;
@@ -1524,8 +1354,6 @@ zfcp_fsf_incoming_els_rscn(struct zfcp_adapter *adapter,
*/
ZFCP_LOG_INFO("incoming RSCN, trying to open "
"port 0x%016Lx\n", port->wwpn);
- debug_text_event(adapter->erp_dbf, 1,
- "unsol_els_rscnk:");
zfcp_test_link(port);
}
}
@@ -1541,8 +1369,6 @@ zfcp_fsf_incoming_els_plogi(struct zfcp_adapter *adapter,
struct zfcp_port *port;
unsigned long flags;
- zfcp_in_els_dbf_event(adapter, "##plogi", status_buffer, 28);
-
read_lock_irqsave(&zfcp_data.config_lock, flags);
list_for_each_entry(port, &adapter->port_list_head, list) {
if (port->wwpn == (*(wwn_t *) & els_logi->nport_wwn))
@@ -1556,8 +1382,6 @@ zfcp_fsf_incoming_els_plogi(struct zfcp_adapter *adapter,
status_buffer->d_id,
zfcp_get_busid_by_adapter(adapter));
} else {
- debug_text_event(adapter->erp_dbf, 1, "unsol_els_plogi:");
- debug_event(adapter->erp_dbf, 1, &els_logi->nport_wwn, 8);
zfcp_erp_port_forced_reopen(port, 0);
}
}
@@ -1570,8 +1394,6 @@ zfcp_fsf_incoming_els_logo(struct zfcp_adapter *adapter,
struct zfcp_port *port;
unsigned long flags;
- zfcp_in_els_dbf_event(adapter, "##logo", status_buffer, 16);
-
read_lock_irqsave(&zfcp_data.config_lock, flags);
list_for_each_entry(port, &adapter->port_list_head, list) {
if (port->wwpn == els_logo->nport_wwpn)
@@ -1585,8 +1407,6 @@ zfcp_fsf_incoming_els_logo(struct zfcp_adapter *adapter,
status_buffer->d_id,
zfcp_get_busid_by_adapter(adapter));
} else {
- debug_text_event(adapter->erp_dbf, 1, "unsol_els_logo:");
- debug_event(adapter->erp_dbf, 1, &els_logo->nport_wwpn, 8);
zfcp_erp_port_forced_reopen(port, 0);
}
}
@@ -1595,7 +1415,6 @@ static void
zfcp_fsf_incoming_els_unknown(struct zfcp_adapter *adapter,
struct fsf_status_read_buffer *status_buffer)
{
- zfcp_in_els_dbf_event(adapter, "##undef", status_buffer, 24);
ZFCP_LOG_NORMAL("warning: unknown incoming ELS 0x%08x "
"for adapter %s\n", *(u32 *) (status_buffer->payload),
zfcp_get_busid_by_adapter(adapter));
@@ -1609,10 +1428,11 @@ zfcp_fsf_incoming_els(struct zfcp_fsf_req *fsf_req)
u32 els_type;
struct zfcp_adapter *adapter;
- status_buffer = fsf_req->data.status_read.buffer;
+ status_buffer = (struct fsf_status_read_buffer *) fsf_req->data;
els_type = *(u32 *) (status_buffer->payload);
adapter = fsf_req->adapter;
+ zfcp_san_dbf_event_incoming_els(fsf_req);
if (els_type == LS_PLOGI)
zfcp_fsf_incoming_els_plogi(adapter, status_buffer);
else if (els_type == LS_LOGO)
diff --git a/drivers/s390/scsi/zfcp_ccw.c b/drivers/s390/scsi/zfcp_ccw.c
index b30abab77da..0fc46381fc2 100644
--- a/drivers/s390/scsi/zfcp_ccw.c
+++ b/drivers/s390/scsi/zfcp_ccw.c
@@ -202,19 +202,9 @@ static int
zfcp_ccw_set_offline(struct ccw_device *ccw_device)
{
struct zfcp_adapter *adapter;
- struct zfcp_port *port;
- struct fc_rport *rport;
down(&zfcp_data.config_sema);
adapter = dev_get_drvdata(&ccw_device->dev);
- /* might be racy, but we cannot take config_lock due to the fact that
- fc_remote_port_delete might sleep */
- list_for_each_entry(port, &adapter->port_list_head, list)
- if (port->rport) {
- rport = port->rport;
- port->rport = NULL;
- fc_remote_port_delete(rport);
- }
zfcp_erp_adapter_shutdown(adapter, 0);
zfcp_erp_wait(adapter);
zfcp_adapter_scsi_unregister(adapter);
diff --git a/drivers/s390/scsi/zfcp_dbf.c b/drivers/s390/scsi/zfcp_dbf.c
new file mode 100644
index 00000000000..826fb3b0060
--- /dev/null
+++ b/drivers/s390/scsi/zfcp_dbf.c
@@ -0,0 +1,995 @@
+/*
+ *
+ * linux/drivers/s390/scsi/zfcp_dbf.c
+ *
+ * FCP adapter driver for IBM eServer zSeries
+ *
+ * Debugging facilities
+ *
+ * (C) Copyright IBM Corp. 2005
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#define ZFCP_DBF_REVISION "$Revision$"
+
+#include <asm/debug.h>
+#include <linux/ctype.h>
+#include "zfcp_ext.h"
+
+static u32 dbfsize = 4;
+
+module_param(dbfsize, uint, 0400);
+MODULE_PARM_DESC(dbfsize,
+ "number of pages for each debug feature area (default 4)");
+
+#define ZFCP_LOG_AREA ZFCP_LOG_AREA_OTHER
+
+static inline int
+zfcp_dbf_stck(char *out_buf, const char *label, unsigned long long stck)
+{
+ unsigned long long sec;
+ struct timespec xtime;
+ int len = 0;
+
+ stck -= 0x8126d60e46000000LL - (0x3c26700LL * 1000000 * 4096);
+ sec = stck >> 12;
+ do_div(sec, 1000000);
+ xtime.tv_sec = sec;
+ stck -= (sec * 1000000) << 12;
+ xtime.tv_nsec = ((stck * 1000) >> 12);
+ len += sprintf(out_buf + len, "%-24s%011lu:%06lu\n",
+ label, xtime.tv_sec, xtime.tv_nsec);
+
+ return len;
+}
+
+static int zfcp_dbf_tag(char *out_buf, const char *label, const char *tag)
+{
+ int len = 0, i;
+
+ len += sprintf(out_buf + len, "%-24s", label);
+ for (i = 0; i < ZFCP_DBF_TAG_SIZE; i++)
+ len += sprintf(out_buf + len, "%c", tag[i]);
+ len += sprintf(out_buf + len, "\n");
+
+ return len;
+}
+
+static int
+zfcp_dbf_view(char *out_buf, const char *label, const char *format, ...)
+{
+ va_list arg;
+ int len = 0;
+
+ len += sprintf(out_buf + len, "%-24s", label);
+ va_start(arg, format);
+ len += vsprintf(out_buf + len, format, arg);
+ va_end(arg);
+ len += sprintf(out_buf + len, "\n");
+
+ return len;
+}
+
+static int
+zfcp_dbf_view_dump(char *out_buf, const char *label,
+ char *buffer, int buflen, int offset, int total_size)
+{
+ int len = 0;
+
+ if (offset == 0)
+ len += sprintf(out_buf + len, "%-24s ", label);
+
+ while (buflen--) {
+ if (offset > 0) {
+ if ((offset % 32) == 0)
+ len += sprintf(out_buf + len, "\n%-24c ", ' ');
+ else if ((offset % 4) == 0)
+ len += sprintf(out_buf + len, " ");
+ }
+ len += sprintf(out_buf + len, "%02x", *buffer++);
+ if (++offset == total_size) {
+ len += sprintf(out_buf + len, "\n");
+ break;
+ }
+ }
+
+ if (total_size == 0)
+ len += sprintf(out_buf + len, "\n");
+
+ return len;
+}
+
+static inline int
+zfcp_dbf_view_header(debug_info_t * id, struct debug_view *view, int area,
+ debug_entry_t * entry, char *out_buf)
+{
+ struct zfcp_dbf_dump *dump = (struct zfcp_dbf_dump *)DEBUG_DATA(entry);
+ int len = 0;
+
+ if (strncmp(dump->tag, "dump", ZFCP_DBF_TAG_SIZE) != 0) {
+ len += zfcp_dbf_stck(out_buf + len, "timestamp",
+ entry->id.stck);
+ len += zfcp_dbf_view(out_buf + len, "cpu", "%02i",
+ entry->id.fields.cpuid);
+ } else {
+ len += zfcp_dbf_view_dump(out_buf + len, NULL,
+ dump->data,
+ dump->size,
+ dump->offset, dump->total_size);
+ if ((dump->offset + dump->size) == dump->total_size)
+ len += sprintf(out_buf + len, "\n");
+ }
+
+ return len;
+}
+
+inline void zfcp_hba_dbf_event_fsf_response(struct zfcp_fsf_req *fsf_req)
+{
+ struct zfcp_adapter *adapter = fsf_req->adapter;
+ struct fsf_qtcb *qtcb = fsf_req->qtcb;
+ union fsf_prot_status_qual *prot_status_qual =
+ &qtcb->prefix.prot_status_qual;
+ union fsf_status_qual *fsf_status_qual = &qtcb->header.fsf_status_qual;
+ struct scsi_cmnd *scsi_cmnd;
+ struct zfcp_port *port;
+ struct zfcp_unit *unit;
+ struct zfcp_send_els *send_els;
+ struct zfcp_hba_dbf_record *rec = &adapter->hba_dbf_buf;
+ struct zfcp_hba_dbf_record_response *response = &rec->type.response;
+ int level;
+ unsigned long flags;
+
+ spin_lock_irqsave(&adapter->hba_dbf_lock, flags);
+ memset(rec, 0, sizeof(struct zfcp_hba_dbf_record));
+ strncpy(rec->tag, "resp", ZFCP_DBF_TAG_SIZE);
+
+ if ((qtcb->prefix.prot_status != FSF_PROT_GOOD) &&
+ (qtcb->prefix.prot_status != FSF_PROT_FSF_STATUS_PRESENTED)) {
+ strncpy(rec->tag2, "perr", ZFCP_DBF_TAG_SIZE);
+ level = 1;
+ } else if (qtcb->header.fsf_status != FSF_GOOD) {
+ strncpy(rec->tag2, "ferr", ZFCP_DBF_TAG_SIZE);
+ level = 1;
+ } else if ((fsf_req->fsf_command == FSF_QTCB_OPEN_PORT_WITH_DID) ||
+ (fsf_req->fsf_command == FSF_QTCB_OPEN_LUN)) {
+ strncpy(rec->tag2, "open", ZFCP_DBF_TAG_SIZE);
+ level = 4;
+ } else if ((prot_status_qual->doubleword[0] != 0) ||
+ (prot_status_qual->doubleword[1] != 0) ||
+ (fsf_status_qual->doubleword[0] != 0) ||
+ (fsf_status_qual->doubleword[1] != 0)) {
+ strncpy(rec->tag2, "qual", ZFCP_DBF_TAG_SIZE);
+ level = 3;
+ } else {
+ strncpy(rec->tag2, "norm", ZFCP_DBF_TAG_SIZE);
+ level = 6;
+ }
+
+ response->fsf_command = fsf_req->fsf_command;
+ response->fsf_reqid = (unsigned long)fsf_req;
+ response->fsf_seqno = fsf_req->seq_no;
+ response->fsf_issued = fsf_req->issued;
+ response->fsf_prot_status = qtcb->prefix.prot_status;
+ response->fsf_status = qtcb->header.fsf_status;
+ memcpy(response->fsf_prot_status_qual,
+ prot_status_qual, FSF_PROT_STATUS_QUAL_SIZE);
+ memcpy(response->fsf_status_qual,
+ fsf_status_qual, FSF_STATUS_QUALIFIER_SIZE);
+ response->fsf_req_status = fsf_req->status;
+ response->sbal_first = fsf_req->sbal_first;
+ response->sbal_curr = fsf_req->sbal_curr;
+ response->sbal_last = fsf_req->sbal_last;
+ response->pool = fsf_req->pool != NULL;
+ response->erp_action = (unsigned long)fsf_req->erp_action;
+
+ switch (fsf_req->fsf_command) {
+ case FSF_QTCB_FCP_CMND:
+ if (fsf_req->status & ZFCP_STATUS_FSFREQ_TASK_MANAGEMENT)
+ break;
+ scsi_cmnd = (struct scsi_cmnd *)fsf_req->data;
+ if (scsi_cmnd != NULL) {
+ response->data.send_fcp.scsi_cmnd
+ = (unsigned long)scsi_cmnd;
+ response->data.send_fcp.scsi_serial
+ = scsi_cmnd->serial_number;
+ }
+ break;
+
+ case FSF_QTCB_OPEN_PORT_WITH_DID:
+ case FSF_QTCB_CLOSE_PORT:
+ case FSF_QTCB_CLOSE_PHYSICAL_PORT:
+ port = (struct zfcp_port *)fsf_req->data;
+ response->data.port.wwpn = port->wwpn;
+ response->data.port.d_id = port->d_id;
+ response->data.port.port_handle = qtcb->header.port_handle;
+ break;
+
+ case FSF_QTCB_OPEN_LUN:
+ case FSF_QTCB_CLOSE_LUN:
+ unit = (struct zfcp_unit *)fsf_req->data;
+ port = unit->port;
+ response->data.unit.wwpn = port->wwpn;
+ response->data.unit.fcp_lun = unit->fcp_lun;
+ response->data.unit.port_handle = qtcb->header.port_handle;
+ response->data.unit.lun_handle = qtcb->header.lun_handle;
+ break;
+
+ case FSF_QTCB_SEND_ELS:
+ send_els = (struct zfcp_send_els *)fsf_req->data;
+ response->data.send_els.d_id = qtcb->bottom.support.d_id;
+ response->data.send_els.ls_code = send_els->ls_code >> 24;
+ break;
+
+ case FSF_QTCB_ABORT_FCP_CMND:
+ case FSF_QTCB_SEND_GENERIC:
+ case FSF_QTCB_EXCHANGE_CONFIG_DATA:
+ case FSF_QTCB_EXCHANGE_PORT_DATA:
+ case FSF_QTCB_DOWNLOAD_CONTROL_FILE:
+ case FSF_QTCB_UPLOAD_CONTROL_FILE:
+ break;
+ }
+
+ debug_event(adapter->hba_dbf, level,
+ rec, sizeof(struct zfcp_hba_dbf_record));
+ spin_unlock_irqrestore(&adapter->hba_dbf_lock, flags);
+}
+
+inline void
+zfcp_hba_dbf_event_fsf_unsol(const char *tag, struct zfcp_adapter *adapter,
+ struct fsf_status_read_buffer *status_buffer)
+{
+ struct zfcp_hba_dbf_record *rec = &adapter->hba_dbf_buf;
+ unsigned long flags;
+
+ spin_lock_irqsave(&adapter->hba_dbf_lock, flags);
+ memset(rec, 0, sizeof(struct zfcp_hba_dbf_record));
+ strncpy(rec->tag, "stat", ZFCP_DBF_TAG_SIZE);
+ strncpy(rec->tag2, tag, ZFCP_DBF_TAG_SIZE);
+
+ rec->type.status.failed = adapter->status_read_failed;
+ if (status_buffer != NULL) {
+ rec->type.status.status_type = status_buffer->status_type;
+ rec->type.status.status_subtype = status_buffer->status_subtype;
+ memcpy(&rec->type.status.queue_designator,
+ &status_buffer->queue_designator,
+ sizeof(struct fsf_queue_designator));
+
+ switch (status_buffer->status_type) {
+ case FSF_STATUS_READ_SENSE_DATA_AVAIL:
+ rec->type.status.payload_size =
+ ZFCP_DBF_UNSOL_PAYLOAD_SENSE_DATA_AVAIL;
+ break;
+
+ case FSF_STATUS_READ_BIT_ERROR_THRESHOLD:
+ rec->type.status.payload_size =
+ ZFCP_DBF_UNSOL_PAYLOAD_BIT_ERROR_THRESHOLD;
+ break;
+
+ case FSF_STATUS_READ_LINK_DOWN:
+ switch (status_buffer->status_subtype) {
+ case FSF_STATUS_READ_SUB_NO_PHYSICAL_LINK:
+ case FSF_STATUS_READ_SUB_FDISC_FAILED:
+ rec->type.status.payload_size =
+ sizeof(struct fsf_link_down_info);
+ }
+ break;
+
+ case FSF_STATUS_READ_FEATURE_UPDATE_ALERT:
+ rec->type.status.payload_size =
+ ZFCP_DBF_UNSOL_PAYLOAD_FEATURE_UPDATE_ALERT;
+ break;
+ }
+ memcpy(&rec->type.status.payload,
+ &status_buffer->payload, rec->type.status.payload_size);
+ }
+
+ debug_event(adapter->hba_dbf, 2,
+ rec, sizeof(struct zfcp_hba_dbf_record));
+ spin_unlock_irqrestore(&adapter->hba_dbf_lock, flags);
+}
+
+inline void
+zfcp_hba_dbf_event_qdio(struct zfcp_adapter *adapter, unsigned int status,
+ unsigned int qdio_error, unsigned int siga_error,
+ int sbal_index, int sbal_count)
+{
+ struct zfcp_hba_dbf_record *rec = &adapter->hba_dbf_buf;
+ unsigned long flags;
+
+ spin_lock_irqsave(&adapter->hba_dbf_lock, flags);
+ memset(rec, 0, sizeof(struct zfcp_hba_dbf_record));
+ strncpy(rec->tag, "qdio", ZFCP_DBF_TAG_SIZE);
+ rec->type.qdio.status = status;
+ rec->type.qdio.qdio_error = qdio_error;
+ rec->type.qdio.siga_error = siga_error;
+ rec->type.qdio.sbal_index = sbal_index;
+ rec->type.qdio.sbal_count = sbal_count;
+ debug_event(adapter->hba_dbf, 0,
+ rec, sizeof(struct zfcp_hba_dbf_record));
+ spin_unlock_irqrestore(&adapter->hba_dbf_lock, flags);
+}
+
+static inline int
+zfcp_hba_dbf_view_response(char *out_buf,
+ struct zfcp_hba_dbf_record_response *rec)
+{
+ int len = 0;
+
+ len += zfcp_dbf_view(out_buf + len, "fsf_command", "0x%08x",
+ rec->fsf_command);
+ len += zfcp_dbf_view(out_buf + len, "fsf_reqid", "0x%0Lx",
+ rec->fsf_reqid);
+ len += zfcp_dbf_view(out_buf + len, "fsf_seqno", "0x%08x",
+ rec->fsf_seqno);
+ len += zfcp_dbf_stck(out_buf + len, "fsf_issued", rec->fsf_issued);
+ len += zfcp_dbf_view(out_buf + len, "fsf_prot_status", "0x%08x",
+ rec->fsf_prot_status);
+ len += zfcp_dbf_view(out_buf + len, "fsf_status", "0x%08x",
+ rec->fsf_status);
+ len += zfcp_dbf_view_dump(out_buf + len, "fsf_prot_status_qual",
+ rec->fsf_prot_status_qual,
+ FSF_PROT_STATUS_QUAL_SIZE,
+ 0, FSF_PROT_STATUS_QUAL_SIZE);
+ len += zfcp_dbf_view_dump(out_buf + len, "fsf_status_qual",
+ rec->fsf_status_qual,
+ FSF_STATUS_QUALIFIER_SIZE,
+ 0, FSF_STATUS_QUALIFIER_SIZE);
+ len += zfcp_dbf_view(out_buf + len, "fsf_req_status", "0x%08x",
+ rec->fsf_req_status);
+ len += zfcp_dbf_view(out_buf + len, "sbal_first", "0x%02x",
+ rec->sbal_first);
+ len += zfcp_dbf_view(out_buf + len, "sbal_curr", "0x%02x",
+ rec->sbal_curr);
+ len += zfcp_dbf_view(out_buf + len, "sbal_last", "0x%02x",
+ rec->sbal_last);
+ len += zfcp_dbf_view(out_buf + len, "pool", "0x%02x", rec->pool);
+
+ switch (rec->fsf_command) {
+ case FSF_QTCB_FCP_CMND:
+ if (rec->fsf_req_status & ZFCP_STATUS_FSFREQ_TASK_MANAGEMENT)
+ break;
+ len += zfcp_dbf_view(out_buf + len, "scsi_cmnd", "0x%0Lx",
+ rec->data.send_fcp.scsi_cmnd);
+ len += zfcp_dbf_view(out_buf + len, "scsi_serial", "0x%016Lx",
+ rec->data.send_fcp.scsi_serial);
+ break;
+
+ case FSF_QTCB_OPEN_PORT_WITH_DID:
+ case FSF_QTCB_CLOSE_PORT:
+ case FSF_QTCB_CLOSE_PHYSICAL_PORT:
+ len += zfcp_dbf_view(out_buf + len, "wwpn", "0x%016Lx",
+ rec->data.port.wwpn);
+ len += zfcp_dbf_view(out_buf + len, "d_id", "0x%06x",
+ rec->data.port.d_id);
+ len += zfcp_dbf_view(out_buf + len, "port_handle", "0x%08x",
+ rec->data.port.port_handle);
+ break;
+
+ case FSF_QTCB_OPEN_LUN:
+ case FSF_QTCB_CLOSE_LUN:
+ len += zfcp_dbf_view(out_buf + len, "wwpn", "0x%016Lx",
+ rec->data.unit.wwpn);
+ len += zfcp_dbf_view(out_buf + len, "fcp_lun", "0x%016Lx",
+ rec->data.unit.fcp_lun);
+ len += zfcp_dbf_view(out_buf + len, "port_handle", "0x%08x",
+ rec->data.unit.port_handle);
+ len += zfcp_dbf_view(out_buf + len, "lun_handle", "0x%08x",
+ rec->data.unit.lun_handle);
+ break;
+
+ case FSF_QTCB_SEND_ELS:
+ len += zfcp_dbf_view(out_buf + len, "d_id", "0x%06x",
+ rec->data.send_els.d_id);
+ len += zfcp_dbf_view(out_buf + len, "ls_code", "0x%02x",
+ rec->data.send_els.ls_code);
+ break;
+
+ case FSF_QTCB_ABORT_FCP_CMND:
+ case FSF_QTCB_SEND_GENERIC:
+ case FSF_QTCB_EXCHANGE_CONFIG_DATA:
+ case FSF_QTCB_EXCHANGE_PORT_DATA:
+ case FSF_QTCB_DOWNLOAD_CONTROL_FILE:
+ case FSF_QTCB_UPLOAD_CONTROL_FILE:
+ break;
+ }
+
+ return len;
+}
+
+static inline int
+zfcp_hba_dbf_view_status(char *out_buf, struct zfcp_hba_dbf_record_status *rec)
+{
+ int len = 0;
+
+ len += zfcp_dbf_view(out_buf + len, "failed", "0x%02x", rec->failed);
+ len += zfcp_dbf_view(out_buf + len, "status_type", "0x%08x",
+ rec->status_type);
+ len += zfcp_dbf_view(out_buf + len, "status_subtype", "0x%08x",
+ rec->status_subtype);
+ len += zfcp_dbf_view_dump(out_buf + len, "queue_designator",
+ (char *)&rec->queue_designator,
+ sizeof(struct fsf_queue_designator),
+ 0, sizeof(struct fsf_queue_designator));
+ len += zfcp_dbf_view_dump(out_buf + len, "payload",
+ (char *)&rec->payload,
+ rec->payload_size, 0, rec->payload_size);
+
+ return len;
+}
+
+static inline int
+zfcp_hba_dbf_view_qdio(char *out_buf, struct zfcp_hba_dbf_record_qdio *rec)
+{
+ int len = 0;
+
+ len += zfcp_dbf_view(out_buf + len, "status", "0x%08x", rec->status);
+ len += zfcp_dbf_view(out_buf + len, "qdio_error", "0x%08x",
+ rec->qdio_error);
+ len += zfcp_dbf_view(out_buf + len, "siga_error", "0x%08x",
+ rec->siga_error);
+ len += zfcp_dbf_view(out_buf + len, "sbal_index", "0x%02x",
+ rec->sbal_index);
+ len += zfcp_dbf_view(out_buf + len, "sbal_count", "0x%02x",
+ rec->sbal_count);
+
+ return len;
+}
+
+static int
+zfcp_hba_dbf_view_format(debug_info_t * id, struct debug_view *view,
+ char *out_buf, const char *in_buf)
+{
+ struct zfcp_hba_dbf_record *rec = (struct zfcp_hba_dbf_record *)in_buf;
+ int len = 0;
+
+ if (strncmp(rec->tag, "dump", ZFCP_DBF_TAG_SIZE) == 0)
+ return 0;
+
+ len += zfcp_dbf_tag(out_buf + len, "tag", rec->tag);
+ if (isalpha(rec->tag2[0]))
+ len += zfcp_dbf_tag(out_buf + len, "tag2", rec->tag2);
+ if (strncmp(rec->tag, "resp", ZFCP_DBF_TAG_SIZE) == 0)
+ len += zfcp_hba_dbf_view_response(out_buf + len,
+ &rec->type.response);
+ else if (strncmp(rec->tag, "stat", ZFCP_DBF_TAG_SIZE) == 0)
+ len += zfcp_hba_dbf_view_status(out_buf + len,
+ &rec->type.status);
+ else if (strncmp(rec->tag, "qdio", ZFCP_DBF_TAG_SIZE) == 0)
+ len += zfcp_hba_dbf_view_qdio(out_buf + len, &rec->type.qdio);
+
+ len += sprintf(out_buf + len, "\n");
+
+ return len;
+}
+
+struct debug_view zfcp_hba_dbf_view = {
+ "structured",
+ NULL,
+ &zfcp_dbf_view_header,
+ &zfcp_hba_dbf_view_format,
+ NULL,
+ NULL
+};
+
+inline void
+_zfcp_san_dbf_event_common_ct(const char *tag, struct zfcp_fsf_req *fsf_req,
+ u32 s_id, u32 d_id, void *buffer, int buflen)
+{
+ struct zfcp_send_ct *send_ct = (struct zfcp_send_ct *)fsf_req->data;
+ struct zfcp_port *port = send_ct->port;
+ struct zfcp_adapter *adapter = port->adapter;
+ struct ct_hdr *header = (struct ct_hdr *)buffer;
+ struct zfcp_san_dbf_record *rec = &adapter->san_dbf_buf;
+ struct zfcp_san_dbf_record_ct *ct = &rec->type.ct;
+ unsigned long flags;
+
+ spin_lock_irqsave(&adapter->san_dbf_lock, flags);
+ memset(rec, 0, sizeof(struct zfcp_san_dbf_record));
+ strncpy(rec->tag, tag, ZFCP_DBF_TAG_SIZE);
+ rec->fsf_reqid = (unsigned long)fsf_req;
+ rec->fsf_seqno = fsf_req->seq_no;
+ rec->s_id = s_id;
+ rec->d_id = d_id;
+ if (strncmp(tag, "octc", ZFCP_DBF_TAG_SIZE) == 0) {
+ ct->type.request.cmd_req_code = header->cmd_rsp_code;
+ ct->type.request.revision = header->revision;
+ ct->type.request.gs_type = header->gs_type;
+ ct->type.request.gs_subtype = header->gs_subtype;
+ ct->type.request.options = header->options;
+ ct->type.request.max_res_size = header->max_res_size;
+ } else if (strncmp(tag, "rctc", ZFCP_DBF_TAG_SIZE) == 0) {
+ ct->type.response.cmd_rsp_code = header->cmd_rsp_code;
+ ct->type.response.revision = header->revision;
+ ct->type.response.reason_code = header->reason_code;
+ ct->type.response.reason_code_expl = header->reason_code_expl;
+ ct->type.response.vendor_unique = header->vendor_unique;
+ }
+ ct->payload_size =
+ min(buflen - (int)sizeof(struct ct_hdr), ZFCP_DBF_CT_PAYLOAD);
+ memcpy(ct->payload, buffer + sizeof(struct ct_hdr), ct->payload_size);
+ debug_event(adapter->san_dbf, 3,
+ rec, sizeof(struct zfcp_san_dbf_record));
+ spin_unlock_irqrestore(&adapter->san_dbf_lock, flags);
+}
+
+inline void zfcp_san_dbf_event_ct_request(struct zfcp_fsf_req *fsf_req)
+{
+ struct zfcp_send_ct *ct = (struct zfcp_send_ct *)fsf_req->data;
+ struct zfcp_port *port = ct->port;
+ struct zfcp_adapter *adapter = port->adapter;
+
+ _zfcp_san_dbf_event_common_ct("octc", fsf_req,
+ fc_host_port_id(adapter->scsi_host),
+ port->d_id, zfcp_sg_to_address(ct->req),
+ ct->req->length);
+}
+
+inline void zfcp_san_dbf_event_ct_response(struct zfcp_fsf_req *fsf_req)
+{
+ struct zfcp_send_ct *ct = (struct zfcp_send_ct *)fsf_req->data;
+ struct zfcp_port *port = ct->port;
+ struct zfcp_adapter *adapter = port->adapter;
+
+ _zfcp_san_dbf_event_common_ct("rctc", fsf_req, port->d_id,
+ fc_host_port_id(adapter->scsi_host),
+ zfcp_sg_to_address(ct->resp),
+ ct->resp->length);
+}
+
+static inline void
+_zfcp_san_dbf_event_common_els(const char *tag, int level,
+ struct zfcp_fsf_req *fsf_req, u32 s_id,
+ u32 d_id, u8 ls_code, void *buffer, int buflen)
+{
+ struct zfcp_adapter *adapter = fsf_req->adapter;
+ struct zfcp_san_dbf_record *rec = &adapter->san_dbf_buf;
+ struct zfcp_dbf_dump *dump = (struct zfcp_dbf_dump *)rec;
+ unsigned long flags;
+ int offset = 0;
+
+ spin_lock_irqsave(&adapter->san_dbf_lock, flags);
+ do {
+ memset(rec, 0, sizeof(struct zfcp_san_dbf_record));
+ if (offset == 0) {
+ strncpy(rec->tag, tag, ZFCP_DBF_TAG_SIZE);
+ rec->fsf_reqid = (unsigned long)fsf_req;
+ rec->fsf_seqno = fsf_req->seq_no;
+ rec->s_id = s_id;
+ rec->d_id = d_id;
+ rec->type.els.ls_code = ls_code;
+ buflen = min(buflen, ZFCP_DBF_ELS_MAX_PAYLOAD);
+ rec->type.els.payload_size = buflen;
+ memcpy(rec->type.els.payload,
+ buffer, min(buflen, ZFCP_DBF_ELS_PAYLOAD));
+ offset += min(buflen, ZFCP_DBF_ELS_PAYLOAD);
+ } else {
+ strncpy(dump->tag, "dump", ZFCP_DBF_TAG_SIZE);
+ dump->total_size = buflen;
+ dump->offset = offset;
+ dump->size = min(buflen - offset,
+ (int)sizeof(struct zfcp_san_dbf_record)
+ - (int)sizeof(struct zfcp_dbf_dump));
+ memcpy(dump->data, buffer + offset, dump->size);
+ offset += dump->size;
+ }
+ debug_event(adapter->san_dbf, level,
+ rec, sizeof(struct zfcp_san_dbf_record));
+ } while (offset < buflen);
+ spin_unlock_irqrestore(&adapter->san_dbf_lock, flags);
+}
+
+inline void zfcp_san_dbf_event_els_request(struct zfcp_fsf_req *fsf_req)
+{
+ struct zfcp_send_els *els = (struct zfcp_send_els *)fsf_req->data;
+
+ _zfcp_san_dbf_event_common_els("oels", 2, fsf_req,
+ fc_host_port_id(els->adapter->scsi_host),
+ els->d_id,
+ *(u8 *) zfcp_sg_to_address(els->req),
+ zfcp_sg_to_address(els->req),
+ els->req->length);
+}
+
+inline void zfcp_san_dbf_event_els_response(struct zfcp_fsf_req *fsf_req)
+{
+ struct zfcp_send_els *els = (struct zfcp_send_els *)fsf_req->data;
+
+ _zfcp_san_dbf_event_common_els("rels", 2, fsf_req, els->d_id,
+ fc_host_port_id(els->adapter->scsi_host),
+ *(u8 *) zfcp_sg_to_address(els->req),
+ zfcp_sg_to_address(els->resp),
+ els->resp->length);
+}
+
+inline void zfcp_san_dbf_event_incoming_els(struct zfcp_fsf_req *fsf_req)
+{
+ struct zfcp_adapter *adapter = fsf_req->adapter;
+ struct fsf_status_read_buffer *status_buffer =
+ (struct fsf_status_read_buffer *)fsf_req->data;
+ int length = (int)status_buffer->length -
+ (int)((void *)&status_buffer->payload - (void *)status_buffer);
+
+ _zfcp_san_dbf_event_common_els("iels", 1, fsf_req, status_buffer->d_id,
+ fc_host_port_id(adapter->scsi_host),
+ *(u8 *) status_buffer->payload,
+ (void *)status_buffer->payload, length);
+}
+
+static int
+zfcp_san_dbf_view_format(debug_info_t * id, struct debug_view *view,
+ char *out_buf, const char *in_buf)
+{
+ struct zfcp_san_dbf_record *rec = (struct zfcp_san_dbf_record *)in_buf;
+ char *buffer = NULL;
+ int buflen = 0, total = 0;
+ int len = 0;
+
+ if (strncmp(rec->tag, "dump", ZFCP_DBF_TAG_SIZE) == 0)
+ return 0;
+
+ len += zfcp_dbf_tag(out_buf + len, "tag", rec->tag);
+ len += zfcp_dbf_view(out_buf + len, "fsf_reqid", "0x%0Lx",
+ rec->fsf_reqid);
+ len += zfcp_dbf_view(out_buf + len, "fsf_seqno", "0x%08x",
+ rec->fsf_seqno);
+ len += zfcp_dbf_view(out_buf + len, "s_id", "0x%06x", rec->s_id);
+ len += zfcp_dbf_view(out_buf + len, "d_id", "0x%06x", rec->d_id);
+
+ if (strncmp(rec->tag, "octc", ZFCP_DBF_TAG_SIZE) == 0) {
+ len += zfcp_dbf_view(out_buf + len, "cmd_req_code", "0x%04x",
+ rec->type.ct.type.request.cmd_req_code);
+ len += zfcp_dbf_view(out_buf + len, "revision", "0x%02x",
+ rec->type.ct.type.request.revision);
+ len += zfcp_dbf_view(out_buf + len, "gs_type", "0x%02x",
+ rec->type.ct.type.request.gs_type);
+ len += zfcp_dbf_view(out_buf + len, "gs_subtype", "0x%02x",
+ rec->type.ct.type.request.gs_subtype);
+ len += zfcp_dbf_view(out_buf + len, "options", "0x%02x",
+ rec->type.ct.type.request.options);
+ len += zfcp_dbf_view(out_buf + len, "max_res_size", "0x%04x",
+ rec->type.ct.type.request.max_res_size);
+ total = rec->type.ct.payload_size;
+ buffer = rec->type.ct.payload;
+ buflen = min(total, ZFCP_DBF_CT_PAYLOAD);
+ } else if (strncmp(rec->tag, "rctc", ZFCP_DBF_TAG_SIZE) == 0) {
+ len += zfcp_dbf_view(out_buf + len, "cmd_rsp_code", "0x%04x",
+ rec->type.ct.type.response.cmd_rsp_code);
+ len += zfcp_dbf_view(out_buf + len, "revision", "0x%02x",
+ rec->type.ct.type.response.revision);
+ len += zfcp_dbf_view(out_buf + len, "reason_code", "0x%02x",
+ rec->type.ct.type.response.reason_code);
+ len +=
+ zfcp_dbf_view(out_buf + len, "reason_code_expl", "0x%02x",
+ rec->type.ct.type.response.reason_code_expl);
+ len +=
+ zfcp_dbf_view(out_buf + len, "vendor_unique", "0x%02x",
+ rec->type.ct.type.response.vendor_unique);
+ total = rec->type.ct.payload_size;
+ buffer = rec->type.ct.payload;
+ buflen = min(total, ZFCP_DBF_CT_PAYLOAD);
+ } else if (strncmp(rec->tag, "oels", ZFCP_DBF_TAG_SIZE) == 0 ||
+ strncmp(rec->tag, "rels", ZFCP_DBF_TAG_SIZE) == 0 ||
+ strncmp(rec->tag, "iels", ZFCP_DBF_TAG_SIZE) == 0) {
+ len += zfcp_dbf_view(out_buf + len, "ls_code", "0x%02x",
+ rec->type.els.ls_code);
+ total = rec->type.els.payload_size;
+ buffer = rec->type.els.payload;
+ buflen = min(total, ZFCP_DBF_ELS_PAYLOAD);
+ }
+
+ len += zfcp_dbf_view_dump(out_buf + len, "payload",
+ buffer, buflen, 0, total);
+
+ if (buflen == total)
+ len += sprintf(out_buf + len, "\n");
+
+ return len;
+}
+
+struct debug_view zfcp_san_dbf_view = {
+ "structured",
+ NULL,
+ &zfcp_dbf_view_header,
+ &zfcp_san_dbf_view_format,
+ NULL,
+ NULL
+};
+
+static inline void
+_zfcp_scsi_dbf_event_common(const char *tag, const char *tag2, int level,
+ struct zfcp_adapter *adapter,
+ struct scsi_cmnd *scsi_cmnd,
+ struct zfcp_fsf_req *new_fsf_req)
+{
+ struct zfcp_fsf_req *fsf_req =
+ (struct zfcp_fsf_req *)scsi_cmnd->host_scribble;
+ struct zfcp_scsi_dbf_record *rec = &adapter->scsi_dbf_buf;
+ struct zfcp_dbf_dump *dump = (struct zfcp_dbf_dump *)rec;
+ unsigned long flags;
+ struct fcp_rsp_iu *fcp_rsp;
+ char *fcp_rsp_info = NULL, *fcp_sns_info = NULL;
+ int offset = 0, buflen = 0;
+
+ spin_lock_irqsave(&adapter->scsi_dbf_lock, flags);
+ do {
+ memset(rec, 0, sizeof(struct zfcp_scsi_dbf_record));
+ if (offset == 0) {
+ strncpy(rec->tag, tag, ZFCP_DBF_TAG_SIZE);
+ strncpy(rec->tag2, tag2, ZFCP_DBF_TAG_SIZE);
+ if (scsi_cmnd->device) {
+ rec->scsi_id = scsi_cmnd->device->id;
+ rec->scsi_lun = scsi_cmnd->device->lun;
+ }
+ rec->scsi_result = scsi_cmnd->result;
+ rec->scsi_cmnd = (unsigned long)scsi_cmnd;
+ rec->scsi_serial = scsi_cmnd->serial_number;
+ memcpy(rec->scsi_opcode,
+ &scsi_cmnd->cmnd,
+ min((int)scsi_cmnd->cmd_len,
+ ZFCP_DBF_SCSI_OPCODE));
+ rec->scsi_retries = scsi_cmnd->retries;
+ rec->scsi_allowed = scsi_cmnd->allowed;
+ if (fsf_req != NULL) {
+ fcp_rsp = (struct fcp_rsp_iu *)
+ &(fsf_req->qtcb->bottom.io.fcp_rsp);
+ fcp_rsp_info =
+ zfcp_get_fcp_rsp_info_ptr(fcp_rsp);
+ fcp_sns_info =
+ zfcp_get_fcp_sns_info_ptr(fcp_rsp);
+
+ rec->type.fcp.rsp_validity =
+ fcp_rsp->validity.value;
+ rec->type.fcp.rsp_scsi_status =
+ fcp_rsp->scsi_status;
+ rec->type.fcp.rsp_resid = fcp_rsp->fcp_resid;
+ if (fcp_rsp->validity.bits.fcp_rsp_len_valid)
+ rec->type.fcp.rsp_code =
+ *(fcp_rsp_info + 3);
+ if (fcp_rsp->validity.bits.fcp_sns_len_valid) {
+ buflen = min((int)fcp_rsp->fcp_sns_len,
+ ZFCP_DBF_SCSI_MAX_FCP_SNS_INFO);
+ rec->type.fcp.sns_info_len = buflen;
+ memcpy(rec->type.fcp.sns_info,
+ fcp_sns_info,
+ min(buflen,
+ ZFCP_DBF_SCSI_FCP_SNS_INFO));
+ offset += min(buflen,
+ ZFCP_DBF_SCSI_FCP_SNS_INFO);
+ }
+
+ rec->fsf_reqid = (unsigned long)fsf_req;
+ rec->fsf_seqno = fsf_req->seq_no;
+ rec->fsf_issued = fsf_req->issued;
+ }
+ if (new_fsf_req != NULL) {
+ rec->type.new_fsf_req.fsf_reqid =
+ (unsigned long)
+ new_fsf_req;
+ rec->type.new_fsf_req.fsf_seqno =
+ new_fsf_req->seq_no;
+ rec->type.new_fsf_req.fsf_issued =
+ new_fsf_req->issued;
+ }
+ } else {
+ strncpy(dump->tag, "dump", ZFCP_DBF_TAG_SIZE);
+ dump->total_size = buflen;
+ dump->offset = offset;
+ dump->size = min(buflen - offset,
+ (int)sizeof(struct
+ zfcp_scsi_dbf_record) -
+ (int)sizeof(struct zfcp_dbf_dump));
+ memcpy(dump->data, fcp_sns_info + offset, dump->size);
+ offset += dump->size;
+ }
+ debug_event(adapter->scsi_dbf, level,
+ rec, sizeof(struct zfcp_scsi_dbf_record));
+ } while (offset < buflen);
+ spin_unlock_irqrestore(&adapter->scsi_dbf_lock, flags);
+}
+
+inline void
+zfcp_scsi_dbf_event_result(const char *tag, int level,
+ struct zfcp_adapter *adapter,
+ struct scsi_cmnd *scsi_cmnd)
+{
+ _zfcp_scsi_dbf_event_common("rslt",
+ tag, level, adapter, scsi_cmnd, NULL);
+}
+
+inline void
+zfcp_scsi_dbf_event_abort(const char *tag, struct zfcp_adapter *adapter,
+ struct scsi_cmnd *scsi_cmnd,
+ struct zfcp_fsf_req *new_fsf_req)
+{
+ _zfcp_scsi_dbf_event_common("abrt",
+ tag, 1, adapter, scsi_cmnd, new_fsf_req);
+}
+
+inline void
+zfcp_scsi_dbf_event_devreset(const char *tag, u8 flag, struct zfcp_unit *unit,
+ struct scsi_cmnd *scsi_cmnd)
+{
+ struct zfcp_adapter *adapter = unit->port->adapter;
+
+ _zfcp_scsi_dbf_event_common(flag == FCP_TARGET_RESET ? "trst" : "lrst",
+ tag, 1, adapter, scsi_cmnd, NULL);
+}
+
+static int
+zfcp_scsi_dbf_view_format(debug_info_t * id, struct debug_view *view,
+ char *out_buf, const char *in_buf)
+{
+ struct zfcp_scsi_dbf_record *rec =
+ (struct zfcp_scsi_dbf_record *)in_buf;
+ int len = 0;
+
+ if (strncmp(rec->tag, "dump", ZFCP_DBF_TAG_SIZE) == 0)
+ return 0;
+
+ len += zfcp_dbf_tag(out_buf + len, "tag", rec->tag);
+ len += zfcp_dbf_tag(out_buf + len, "tag2", rec->tag2);
+ len += zfcp_dbf_view(out_buf + len, "scsi_id", "0x%08x", rec->scsi_id);
+ len += zfcp_dbf_view(out_buf + len, "scsi_lun", "0x%08x",
+ rec->scsi_lun);
+ len += zfcp_dbf_view(out_buf + len, "scsi_result", "0x%08x",
+ rec->scsi_result);
+ len += zfcp_dbf_view(out_buf + len, "scsi_cmnd", "0x%0Lx",
+ rec->scsi_cmnd);
+ len += zfcp_dbf_view(out_buf + len, "scsi_serial", "0x%016Lx",
+ rec->scsi_serial);
+ len += zfcp_dbf_view_dump(out_buf + len, "scsi_opcode",
+ rec->scsi_opcode,
+ ZFCP_DBF_SCSI_OPCODE,
+ 0, ZFCP_DBF_SCSI_OPCODE);
+ len += zfcp_dbf_view(out_buf + len, "scsi_retries", "0x%02x",
+ rec->scsi_retries);
+ len += zfcp_dbf_view(out_buf + len, "scsi_allowed", "0x%02x",
+ rec->scsi_allowed);
+ len += zfcp_dbf_view(out_buf + len, "fsf_reqid", "0x%0Lx",
+ rec->fsf_reqid);
+ len += zfcp_dbf_view(out_buf + len, "fsf_seqno", "0x%08x",
+ rec->fsf_seqno);
+ len += zfcp_dbf_stck(out_buf + len, "fsf_issued", rec->fsf_issued);
+ if (strncmp(rec->tag, "rslt", ZFCP_DBF_TAG_SIZE) == 0) {
+ len +=
+ zfcp_dbf_view(out_buf + len, "fcp_rsp_validity", "0x%02x",
+ rec->type.fcp.rsp_validity);
+ len +=
+ zfcp_dbf_view(out_buf + len, "fcp_rsp_scsi_status",
+ "0x%02x", rec->type.fcp.rsp_scsi_status);
+ len +=
+ zfcp_dbf_view(out_buf + len, "fcp_rsp_resid", "0x%08x",
+ rec->type.fcp.rsp_resid);
+ len +=
+ zfcp_dbf_view(out_buf + len, "fcp_rsp_code", "0x%08x",
+ rec->type.fcp.rsp_code);
+ len +=
+ zfcp_dbf_view(out_buf + len, "fcp_sns_info_len", "0x%08x",
+ rec->type.fcp.sns_info_len);
+ len +=
+ zfcp_dbf_view_dump(out_buf + len, "fcp_sns_info",
+ rec->type.fcp.sns_info,
+ min((int)rec->type.fcp.sns_info_len,
+ ZFCP_DBF_SCSI_FCP_SNS_INFO), 0,
+ rec->type.fcp.sns_info_len);
+ } else if (strncmp(rec->tag, "abrt", ZFCP_DBF_TAG_SIZE) == 0) {
+ len += zfcp_dbf_view(out_buf + len, "fsf_reqid_abort", "0x%0Lx",
+ rec->type.new_fsf_req.fsf_reqid);
+ len += zfcp_dbf_view(out_buf + len, "fsf_seqno_abort", "0x%08x",
+ rec->type.new_fsf_req.fsf_seqno);
+ len += zfcp_dbf_stck(out_buf + len, "fsf_issued",
+ rec->type.new_fsf_req.fsf_issued);
+ } else if ((strncmp(rec->tag, "trst", ZFCP_DBF_TAG_SIZE) == 0) ||
+ (strncmp(rec->tag, "lrst", ZFCP_DBF_TAG_SIZE) == 0)) {
+ len += zfcp_dbf_view(out_buf + len, "fsf_reqid_reset", "0x%0Lx",
+ rec->type.new_fsf_req.fsf_reqid);
+ len += zfcp_dbf_view(out_buf + len, "fsf_seqno_reset", "0x%08x",
+ rec->type.new_fsf_req.fsf_seqno);
+ len += zfcp_dbf_stck(out_buf + len, "fsf_issued",
+ rec->type.new_fsf_req.fsf_issued);
+ }
+
+ len += sprintf(out_buf + len, "\n");
+
+ return len;
+}
+
+struct debug_view zfcp_scsi_dbf_view = {
+ "structured",
+ NULL,
+ &zfcp_dbf_view_header,
+ &zfcp_scsi_dbf_view_format,
+ NULL,
+ NULL
+};
+
+/**
+ * zfcp_adapter_debug_register - registers debug feature for an adapter
+ * @adapter: pointer to adapter for which debug features should be registered
+ * return: -ENOMEM on error, 0 otherwise
+ */
+int zfcp_adapter_debug_register(struct zfcp_adapter *adapter)
+{
+ char dbf_name[DEBUG_MAX_NAME_LEN];
+
+ /* debug feature area which records recovery activity */
+ spin_lock_init(&adapter->erp_dbf_lock);
+ sprintf(dbf_name, "zfcp_%s_erp", zfcp_get_busid_by_adapter(adapter));
+ adapter->erp_dbf = debug_register(dbf_name, dbfsize, 2,
+ sizeof(struct zfcp_erp_dbf_record));
+ if (!adapter->erp_dbf)
+ goto failed;
+ debug_register_view(adapter->erp_dbf, &debug_hex_ascii_view);
+ debug_set_level(adapter->erp_dbf, 3);
+
+ /* debug feature area which records HBA (FSF and QDIO) conditions */
+ spin_lock_init(&adapter->hba_dbf_lock);
+ sprintf(dbf_name, "zfcp_%s_hba", zfcp_get_busid_by_adapter(adapter));
+ adapter->hba_dbf = debug_register(dbf_name, dbfsize, 1,
+ sizeof(struct zfcp_hba_dbf_record));
+ if (!adapter->hba_dbf)
+ goto failed;
+ debug_register_view(adapter->hba_dbf, &debug_hex_ascii_view);
+ debug_register_view(adapter->hba_dbf, &zfcp_hba_dbf_view);
+ debug_set_level(adapter->hba_dbf, 3);
+
+ /* debug feature area which records SAN command failures and recovery */
+ spin_lock_init(&adapter->san_dbf_lock);
+ sprintf(dbf_name, "zfcp_%s_san", zfcp_get_busid_by_adapter(adapter));
+ adapter->san_dbf = debug_register(dbf_name, dbfsize, 1,
+ sizeof(struct zfcp_san_dbf_record));
+ if (!adapter->san_dbf)
+ goto failed;
+ debug_register_view(adapter->san_dbf, &debug_hex_ascii_view);
+ debug_register_view(adapter->san_dbf, &zfcp_san_dbf_view);
+ debug_set_level(adapter->san_dbf, 6);
+
+ /* debug feature area which records SCSI command failures and recovery */
+ spin_lock_init(&adapter->scsi_dbf_lock);
+ sprintf(dbf_name, "zfcp_%s_scsi", zfcp_get_busid_by_adapter(adapter));
+ adapter->scsi_dbf = debug_register(dbf_name, dbfsize, 1,
+ sizeof(struct zfcp_scsi_dbf_record));
+ if (!adapter->scsi_dbf)
+ goto failed;
+ debug_register_view(adapter->scsi_dbf, &debug_hex_ascii_view);
+ debug_register_view(adapter->scsi_dbf, &zfcp_scsi_dbf_view);
+ debug_set_level(adapter->scsi_dbf, 3);
+
+ return 0;
+
+ failed:
+ zfcp_adapter_debug_unregister(adapter);
+
+ return -ENOMEM;
+}
+
+/**
+ * zfcp_adapter_debug_unregister - unregisters debug feature for an adapter
+ * @adapter: pointer to adapter for which debug features should be unregistered
+ */
+void zfcp_adapter_debug_unregister(struct zfcp_adapter *adapter)
+{
+ debug_unregister(adapter->scsi_dbf);
+ debug_unregister(adapter->san_dbf);
+ debug_unregister(adapter->hba_dbf);
+ debug_unregister(adapter->erp_dbf);
+ adapter->scsi_dbf = NULL;
+ adapter->san_dbf = NULL;
+ adapter->hba_dbf = NULL;
+ adapter->erp_dbf = NULL;
+}
+
+#undef ZFCP_LOG_AREA
diff --git a/drivers/s390/scsi/zfcp_def.h b/drivers/s390/scsi/zfcp_def.h
index 455e902533a..d81b737d68c 100644
--- a/drivers/s390/scsi/zfcp_def.h
+++ b/drivers/s390/scsi/zfcp_def.h
@@ -66,7 +66,7 @@
/********************* GENERAL DEFINES *********************************/
/* zfcp version number, it consists of major, minor, and patch-level number */
-#define ZFCP_VERSION "4.3.0"
+#define ZFCP_VERSION "4.5.0"
/**
* zfcp_sg_to_address - determine kernel address from struct scatterlist
@@ -154,13 +154,17 @@ typedef u32 scsi_lun_t;
#define ZFCP_EXCHANGE_CONFIG_DATA_FIRST_SLEEP 100
#define ZFCP_EXCHANGE_CONFIG_DATA_RETRIES 7
+/* Retry 5 times every 2 second, then every minute */
+#define ZFCP_EXCHANGE_PORT_DATA_SHORT_RETRIES 5
+#define ZFCP_EXCHANGE_PORT_DATA_SHORT_SLEEP 200
+#define ZFCP_EXCHANGE_PORT_DATA_LONG_SLEEP 6000
+
/* timeout value for "default timer" for fsf requests */
#define ZFCP_FSF_REQUEST_TIMEOUT (60*HZ);
/*************** FIBRE CHANNEL PROTOCOL SPECIFIC DEFINES ********************/
typedef unsigned long long wwn_t;
-typedef unsigned int fc_id_t;
typedef unsigned long long fcp_lun_t;
/* data length field may be at variable position in FCP-2 FCP_CMND IU */
typedef unsigned int fcp_dl_t;
@@ -281,6 +285,171 @@ struct fcp_logo {
} __attribute__((packed));
/*
+ * DBF stuff
+ */
+#define ZFCP_DBF_TAG_SIZE 4
+
+struct zfcp_dbf_dump {
+ u8 tag[ZFCP_DBF_TAG_SIZE];
+ u32 total_size; /* size of total dump data */
+ u32 offset; /* how much data has being already dumped */
+ u32 size; /* how much data comes with this record */
+ u8 data[]; /* dump data */
+} __attribute__ ((packed));
+
+/* FIXME: to be inflated when reworking the erp dbf */
+struct zfcp_erp_dbf_record {
+ u8 dummy[16];
+} __attribute__ ((packed));
+
+struct zfcp_hba_dbf_record_response {
+ u32 fsf_command;
+ u64 fsf_reqid;
+ u32 fsf_seqno;
+ u64 fsf_issued;
+ u32 fsf_prot_status;
+ u32 fsf_status;
+ u8 fsf_prot_status_qual[FSF_PROT_STATUS_QUAL_SIZE];
+ u8 fsf_status_qual[FSF_STATUS_QUALIFIER_SIZE];
+ u32 fsf_req_status;
+ u8 sbal_first;
+ u8 sbal_curr;
+ u8 sbal_last;
+ u8 pool;
+ u64 erp_action;
+ union {
+ struct {
+ u64 scsi_cmnd;
+ u64 scsi_serial;
+ } send_fcp;
+ struct {
+ u64 wwpn;
+ u32 d_id;
+ u32 port_handle;
+ } port;
+ struct {
+ u64 wwpn;
+ u64 fcp_lun;
+ u32 port_handle;
+ u32 lun_handle;
+ } unit;
+ struct {
+ u32 d_id;
+ u8 ls_code;
+ } send_els;
+ } data;
+} __attribute__ ((packed));
+
+struct zfcp_hba_dbf_record_status {
+ u8 failed;
+ u32 status_type;
+ u32 status_subtype;
+ struct fsf_queue_designator
+ queue_designator;
+ u32 payload_size;
+#define ZFCP_DBF_UNSOL_PAYLOAD 80
+#define ZFCP_DBF_UNSOL_PAYLOAD_SENSE_DATA_AVAIL 32
+#define ZFCP_DBF_UNSOL_PAYLOAD_BIT_ERROR_THRESHOLD 56
+#define ZFCP_DBF_UNSOL_PAYLOAD_FEATURE_UPDATE_ALERT 2 * sizeof(u32)
+ u8 payload[ZFCP_DBF_UNSOL_PAYLOAD];
+} __attribute__ ((packed));
+
+struct zfcp_hba_dbf_record_qdio {
+ u32 status;
+ u32 qdio_error;
+ u32 siga_error;
+ u8 sbal_index;
+ u8 sbal_count;
+} __attribute__ ((packed));
+
+struct zfcp_hba_dbf_record {
+ u8 tag[ZFCP_DBF_TAG_SIZE];
+ u8 tag2[ZFCP_DBF_TAG_SIZE];
+ union {
+ struct zfcp_hba_dbf_record_response response;
+ struct zfcp_hba_dbf_record_status status;
+ struct zfcp_hba_dbf_record_qdio qdio;
+ } type;
+} __attribute__ ((packed));
+
+struct zfcp_san_dbf_record_ct {
+ union {
+ struct {
+ u16 cmd_req_code;
+ u8 revision;
+ u8 gs_type;
+ u8 gs_subtype;
+ u8 options;
+ u16 max_res_size;
+ } request;
+ struct {
+ u16 cmd_rsp_code;
+ u8 revision;
+ u8 reason_code;
+ u8 reason_code_expl;
+ u8 vendor_unique;
+ } response;
+ } type;
+ u32 payload_size;
+#define ZFCP_DBF_CT_PAYLOAD 24
+ u8 payload[ZFCP_DBF_CT_PAYLOAD];
+} __attribute__ ((packed));
+
+struct zfcp_san_dbf_record_els {
+ u8 ls_code;
+ u32 payload_size;
+#define ZFCP_DBF_ELS_PAYLOAD 32
+#define ZFCP_DBF_ELS_MAX_PAYLOAD 1024
+ u8 payload[ZFCP_DBF_ELS_PAYLOAD];
+} __attribute__ ((packed));
+
+struct zfcp_san_dbf_record {
+ u8 tag[ZFCP_DBF_TAG_SIZE];
+ u64 fsf_reqid;
+ u32 fsf_seqno;
+ u32 s_id;
+ u32 d_id;
+ union {
+ struct zfcp_san_dbf_record_ct ct;
+ struct zfcp_san_dbf_record_els els;
+ } type;
+} __attribute__ ((packed));
+
+struct zfcp_scsi_dbf_record {
+ u8 tag[ZFCP_DBF_TAG_SIZE];
+ u8 tag2[ZFCP_DBF_TAG_SIZE];
+ u32 scsi_id;
+ u32 scsi_lun;
+ u32 scsi_result;
+ u64 scsi_cmnd;
+ u64 scsi_serial;
+#define ZFCP_DBF_SCSI_OPCODE 16
+ u8 scsi_opcode[ZFCP_DBF_SCSI_OPCODE];
+ u8 scsi_retries;
+ u8 scsi_allowed;
+ u64 fsf_reqid;
+ u32 fsf_seqno;
+ u64 fsf_issued;
+ union {
+ struct {
+ u64 fsf_reqid;
+ u32 fsf_seqno;
+ u64 fsf_issued;
+ } new_fsf_req;
+ struct {
+ u8 rsp_validity;
+ u8 rsp_scsi_status;
+ u32 rsp_resid;
+ u8 rsp_code;
+#define ZFCP_DBF_SCSI_FCP_SNS_INFO 16
+#define ZFCP_DBF_SCSI_MAX_FCP_SNS_INFO 256
+ u32 sns_info_len;
+ u8 sns_info[ZFCP_DBF_SCSI_FCP_SNS_INFO];
+ } fcp;
+ } type;
+} __attribute__ ((packed));
+
+/*
* FC-FS stuff
*/
#define R_A_TOV 10 /* seconds */
@@ -339,34 +508,6 @@ struct zfcp_rc_entry {
*/
#define ZFCP_CT_TIMEOUT (3 * R_A_TOV)
-
-/***************** S390 DEBUG FEATURE SPECIFIC DEFINES ***********************/
-
-/* debug feature entries per adapter */
-#define ZFCP_ERP_DBF_INDEX 1
-#define ZFCP_ERP_DBF_AREAS 2
-#define ZFCP_ERP_DBF_LENGTH 16
-#define ZFCP_ERP_DBF_LEVEL 3
-#define ZFCP_ERP_DBF_NAME "zfcperp"
-
-#define ZFCP_CMD_DBF_INDEX 2
-#define ZFCP_CMD_DBF_AREAS 1
-#define ZFCP_CMD_DBF_LENGTH 8
-#define ZFCP_CMD_DBF_LEVEL 3
-#define ZFCP_CMD_DBF_NAME "zfcpcmd"
-
-#define ZFCP_ABORT_DBF_INDEX 2
-#define ZFCP_ABORT_DBF_AREAS 1
-#define ZFCP_ABORT_DBF_LENGTH 8
-#define ZFCP_ABORT_DBF_LEVEL 6
-#define ZFCP_ABORT_DBF_NAME "zfcpabt"
-
-#define ZFCP_IN_ELS_DBF_INDEX 2
-#define ZFCP_IN_ELS_DBF_AREAS 1
-#define ZFCP_IN_ELS_DBF_LENGTH 8
-#define ZFCP_IN_ELS_DBF_LEVEL 6
-#define ZFCP_IN_ELS_DBF_NAME "zfcpels"
-
/******************** LOGGING MACROS AND DEFINES *****************************/
/*
@@ -501,6 +642,7 @@ do { \
#define ZFCP_STATUS_ADAPTER_ERP_THREAD_KILL 0x00000080
#define ZFCP_STATUS_ADAPTER_ERP_PENDING 0x00000100
#define ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED 0x00000200
+#define ZFCP_STATUS_ADAPTER_XPORT_OK 0x00000800
#define ZFCP_STATUS_ADAPTER_SCSI_UP \
(ZFCP_STATUS_COMMON_UNBLOCKED | \
@@ -635,45 +777,6 @@ struct zfcp_adapter_mempool {
mempool_t *data_gid_pn;
};
-struct zfcp_exchange_config_data{
-};
-
-struct zfcp_open_port {
- struct zfcp_port *port;
-};
-
-struct zfcp_close_port {
- struct zfcp_port *port;
-};
-
-struct zfcp_open_unit {
- struct zfcp_unit *unit;
-};
-
-struct zfcp_close_unit {
- struct zfcp_unit *unit;
-};
-
-struct zfcp_close_physical_port {
- struct zfcp_port *port;
-};
-
-struct zfcp_send_fcp_command_task {
- struct zfcp_fsf_req *fsf_req;
- struct zfcp_unit *unit;
- struct scsi_cmnd *scsi_cmnd;
- unsigned long start_jiffies;
-};
-
-struct zfcp_send_fcp_command_task_management {
- struct zfcp_unit *unit;
-};
-
-struct zfcp_abort_fcp_command {
- struct zfcp_fsf_req *fsf_req;
- struct zfcp_unit *unit;
-};
-
/*
* header for CT_IU
*/
@@ -702,7 +805,7 @@ struct ct_iu_gid_pn_req {
/* FS_ACC IU and data unit for GID_PN nameserver request */
struct ct_iu_gid_pn_resp {
struct ct_hdr header;
- fc_id_t d_id;
+ u32 d_id;
} __attribute__ ((packed));
typedef void (*zfcp_send_ct_handler_t)(unsigned long);
@@ -768,7 +871,7 @@ typedef void (*zfcp_send_els_handler_t)(unsigned long);
struct zfcp_send_els {
struct zfcp_adapter *adapter;
struct zfcp_port *port;
- fc_id_t d_id;
+ u32 d_id;
struct scatterlist *req;
struct scatterlist *resp;
unsigned int req_count;
@@ -781,33 +884,6 @@ struct zfcp_send_els {
int status;
};
-struct zfcp_status_read {
- struct fsf_status_read_buffer *buffer;
-};
-
-struct zfcp_fsf_done {
- struct completion *complete;
- int status;
-};
-
-/* request specific data */
-union zfcp_req_data {
- struct zfcp_exchange_config_data exchange_config_data;
- struct zfcp_open_port open_port;
- struct zfcp_close_port close_port;
- struct zfcp_open_unit open_unit;
- struct zfcp_close_unit close_unit;
- struct zfcp_close_physical_port close_physical_port;
- struct zfcp_send_fcp_command_task send_fcp_command_task;
- struct zfcp_send_fcp_command_task_management
- send_fcp_command_task_management;
- struct zfcp_abort_fcp_command abort_fcp_command;
- struct zfcp_send_ct *send_ct;
- struct zfcp_send_els *send_els;
- struct zfcp_status_read status_read;
- struct fsf_qtcb_bottom_port *port_data;
-};
-
struct zfcp_qdio_queue {
struct qdio_buffer *buffer[QDIO_MAX_BUFFERS_PER_Q]; /* SBALs */
u8 free_index; /* index of next free bfr
@@ -838,21 +914,19 @@ struct zfcp_adapter {
atomic_t refcount; /* reference count */
wait_queue_head_t remove_wq; /* can be used to wait for
refcount drop to zero */
- wwn_t wwnn; /* WWNN */
- wwn_t wwpn; /* WWPN */
- fc_id_t s_id; /* N_Port ID */
wwn_t peer_wwnn; /* P2P peer WWNN */
wwn_t peer_wwpn; /* P2P peer WWPN */
- fc_id_t peer_d_id; /* P2P peer D_ID */
+ u32 peer_d_id; /* P2P peer D_ID */
+ wwn_t physical_wwpn; /* WWPN of physical port */
+ u32 physical_s_id; /* local FC port ID */
struct ccw_device *ccw_device; /* S/390 ccw device */
u8 fc_service_class;
u32 fc_topology; /* FC topology */
- u32 fc_link_speed; /* FC interface speed */
u32 hydra_version; /* Hydra version */
u32 fsf_lic_version;
- u32 supported_features;/* of FCP channel */
+ u32 adapter_features; /* FCP channel features */
+ u32 connection_features; /* host connection features */
u32 hardware_version; /* of FCP channel */
- u8 serial_number[32]; /* of hardware */
struct Scsi_Host *scsi_host; /* Pointer to mid-layer */
unsigned short scsi_host_no; /* Assigned host number */
unsigned char name[9];
@@ -889,11 +963,18 @@ struct zfcp_adapter {
u32 erp_low_mem_count; /* nr of erp actions waiting
for memory */
struct zfcp_port *nameserver_port; /* adapter's nameserver */
- debug_info_t *erp_dbf; /* S/390 debug features */
- debug_info_t *abort_dbf;
- debug_info_t *in_els_dbf;
- debug_info_t *cmd_dbf;
- spinlock_t dbf_lock;
+ debug_info_t *erp_dbf;
+ debug_info_t *hba_dbf;
+ debug_info_t *san_dbf; /* debug feature areas */
+ debug_info_t *scsi_dbf;
+ spinlock_t erp_dbf_lock;
+ spinlock_t hba_dbf_lock;
+ spinlock_t san_dbf_lock;
+ spinlock_t scsi_dbf_lock;
+ struct zfcp_erp_dbf_record erp_dbf_buf;
+ struct zfcp_hba_dbf_record hba_dbf_buf;
+ struct zfcp_san_dbf_record san_dbf_buf;
+ struct zfcp_scsi_dbf_record scsi_dbf_buf;
struct zfcp_adapter_mempool pool; /* Adapter memory pools */
struct qdio_initialize qdio_init_data; /* for qdio_establish */
struct device generic_services; /* directory for WKA ports */
@@ -919,7 +1000,7 @@ struct zfcp_port {
atomic_t status; /* status of this remote port */
wwn_t wwnn; /* WWNN if known */
wwn_t wwpn; /* WWPN */
- fc_id_t d_id; /* D_ID */
+ u32 d_id; /* D_ID */
u32 handle; /* handle assigned by FSF */
struct zfcp_erp_action erp_action; /* pending error recovery */
atomic_t erp_counter;
@@ -963,11 +1044,13 @@ struct zfcp_fsf_req {
u32 fsf_command; /* FSF Command copy */
struct fsf_qtcb *qtcb; /* address of associated QTCB */
u32 seq_no; /* Sequence number of request */
- union zfcp_req_data data; /* Info fields of request */
+ unsigned long data; /* private data of request */
struct zfcp_erp_action *erp_action; /* used if this request is
issued on behalf of erp */
mempool_t *pool; /* used if request was alloacted
from emergency pool */
+ unsigned long long issued; /* request sent time (STCK) */
+ struct zfcp_unit *unit;
};
typedef void zfcp_fsf_req_handler_t(struct zfcp_fsf_req*);
diff --git a/drivers/s390/scsi/zfcp_erp.c b/drivers/s390/scsi/zfcp_erp.c
index cb4f612550b..023f4e558ae 100644
--- a/drivers/s390/scsi/zfcp_erp.c
+++ b/drivers/s390/scsi/zfcp_erp.c
@@ -82,6 +82,7 @@ static int zfcp_erp_adapter_strategy_open(struct zfcp_erp_action *);
static int zfcp_erp_adapter_strategy_open_qdio(struct zfcp_erp_action *);
static int zfcp_erp_adapter_strategy_open_fsf(struct zfcp_erp_action *);
static int zfcp_erp_adapter_strategy_open_fsf_xconfig(struct zfcp_erp_action *);
+static int zfcp_erp_adapter_strategy_open_fsf_xport(struct zfcp_erp_action *);
static int zfcp_erp_adapter_strategy_open_fsf_statusread(
struct zfcp_erp_action *);
@@ -345,13 +346,13 @@ zfcp_erp_adisc(struct zfcp_port *port)
/* acc. to FC-FS, hard_nport_id in ADISC should not be set for ports
without FC-AL-2 capability, so we don't set it */
- adisc->wwpn = adapter->wwpn;
- adisc->wwnn = adapter->wwnn;
- adisc->nport_id = adapter->s_id;
+ adisc->wwpn = fc_host_port_name(adapter->scsi_host);
+ adisc->wwnn = fc_host_node_name(adapter->scsi_host);
+ adisc->nport_id = fc_host_port_id(adapter->scsi_host);
ZFCP_LOG_INFO("ADISC request from s_id 0x%08x to d_id 0x%08x "
"(wwpn=0x%016Lx, wwnn=0x%016Lx, "
"hard_nport_id=0x%08x, nport_id=0x%08x)\n",
- adapter->s_id, send_els->d_id, (wwn_t) adisc->wwpn,
+ adisc->nport_id, send_els->d_id, (wwn_t) adisc->wwpn,
(wwn_t) adisc->wwnn, adisc->hard_nport_id,
adisc->nport_id);
@@ -404,7 +405,7 @@ zfcp_erp_adisc_handler(unsigned long data)
struct zfcp_send_els *send_els;
struct zfcp_port *port;
struct zfcp_adapter *adapter;
- fc_id_t d_id;
+ u32 d_id;
struct zfcp_ls_adisc_acc *adisc;
send_els = (struct zfcp_send_els *) data;
@@ -435,9 +436,9 @@ zfcp_erp_adisc_handler(unsigned long data)
ZFCP_LOG_INFO("ADISC response from d_id 0x%08x to s_id "
"0x%08x (wwpn=0x%016Lx, wwnn=0x%016Lx, "
"hard_nport_id=0x%08x, nport_id=0x%08x)\n",
- d_id, adapter->s_id, (wwn_t) adisc->wwpn,
- (wwn_t) adisc->wwnn, adisc->hard_nport_id,
- adisc->nport_id);
+ d_id, fc_host_port_id(adapter->scsi_host),
+ (wwn_t) adisc->wwpn, (wwn_t) adisc->wwnn,
+ adisc->hard_nport_id, adisc->nport_id);
/* set wwnn for port */
if (port->wwnn == 0)
@@ -886,7 +887,7 @@ static int
zfcp_erp_strategy_check_fsfreq(struct zfcp_erp_action *erp_action)
{
int retval = 0;
- struct zfcp_fsf_req *fsf_req;
+ struct zfcp_fsf_req *fsf_req = NULL;
struct zfcp_adapter *adapter = erp_action->adapter;
if (erp_action->fsf_req) {
@@ -896,7 +897,7 @@ zfcp_erp_strategy_check_fsfreq(struct zfcp_erp_action *erp_action)
list_for_each_entry(fsf_req, &adapter->fsf_req_list_head, list)
if (fsf_req == erp_action->fsf_req)
break;
- if (fsf_req == erp_action->fsf_req) {
+ if (fsf_req && (fsf_req->erp_action == erp_action)) {
/* fsf_req still exists */
debug_text_event(adapter->erp_dbf, 3, "a_ca_req");
debug_event(adapter->erp_dbf, 3, &fsf_req,
@@ -2258,16 +2259,21 @@ zfcp_erp_adapter_strategy_close_qdio(struct zfcp_erp_action *erp_action)
static int
zfcp_erp_adapter_strategy_open_fsf(struct zfcp_erp_action *erp_action)
{
- int retval;
+ int xconfig, xport;
+
+ if (atomic_test_mask(ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED,
+ &erp_action->adapter->status)) {
+ zfcp_erp_adapter_strategy_open_fsf_xport(erp_action);
+ atomic_set(&erp_action->adapter->erp_counter, 0);
+ return ZFCP_ERP_FAILED;
+ }
- /* do 'exchange configuration data' */
- retval = zfcp_erp_adapter_strategy_open_fsf_xconfig(erp_action);
- if (retval == ZFCP_ERP_FAILED)
- return retval;
+ xconfig = zfcp_erp_adapter_strategy_open_fsf_xconfig(erp_action);
+ xport = zfcp_erp_adapter_strategy_open_fsf_xport(erp_action);
+ if ((xconfig == ZFCP_ERP_FAILED) || (xport == ZFCP_ERP_FAILED))
+ return ZFCP_ERP_FAILED;
- /* start the desired number of Status Reads */
- retval = zfcp_erp_adapter_strategy_open_fsf_statusread(erp_action);
- return retval;
+ return zfcp_erp_adapter_strategy_open_fsf_statusread(erp_action);
}
/*
@@ -2291,7 +2297,9 @@ zfcp_erp_adapter_strategy_open_fsf_xconfig(struct zfcp_erp_action *erp_action)
atomic_clear_mask(ZFCP_STATUS_ADAPTER_HOST_CON_INIT,
&adapter->status);
ZFCP_LOG_DEBUG("Doing exchange config data\n");
+ write_lock(&adapter->erp_lock);
zfcp_erp_action_to_running(erp_action);
+ write_unlock(&adapter->erp_lock);
zfcp_erp_timeout_init(erp_action);
if (zfcp_fsf_exchange_config_data(erp_action)) {
retval = ZFCP_ERP_FAILED;
@@ -2348,6 +2356,76 @@ zfcp_erp_adapter_strategy_open_fsf_xconfig(struct zfcp_erp_action *erp_action)
return retval;
}
+static int
+zfcp_erp_adapter_strategy_open_fsf_xport(struct zfcp_erp_action *erp_action)
+{
+ int retval = ZFCP_ERP_SUCCEEDED;
+ int retries;
+ int sleep;
+ struct zfcp_adapter *adapter = erp_action->adapter;
+
+ atomic_clear_mask(ZFCP_STATUS_ADAPTER_XPORT_OK, &adapter->status);
+
+ for (retries = 0; ; retries++) {
+ ZFCP_LOG_DEBUG("Doing exchange port data\n");
+ zfcp_erp_action_to_running(erp_action);
+ zfcp_erp_timeout_init(erp_action);
+ if (zfcp_fsf_exchange_port_data(erp_action, adapter, NULL)) {
+ retval = ZFCP_ERP_FAILED;
+ debug_text_event(adapter->erp_dbf, 5, "a_fstx_xf");
+ ZFCP_LOG_INFO("error: initiation of exchange of "
+ "port data failed for adapter %s\n",
+ zfcp_get_busid_by_adapter(adapter));
+ break;
+ }
+ debug_text_event(adapter->erp_dbf, 6, "a_fstx_xok");
+ ZFCP_LOG_DEBUG("Xchange underway\n");
+
+ /*
+ * Why this works:
+ * Both the normal completion handler as well as the timeout
+ * handler will do an 'up' when the 'exchange port data'
+ * request completes or times out. Thus, the signal to go on
+ * won't be lost utilizing this semaphore.
+ * Furthermore, this 'adapter_reopen' action is
+ * guaranteed to be the only action being there (highest action
+ * which prevents other actions from being created).
+ * Resulting from that, the wake signal recognized here
+ * _must_ be the one belonging to the 'exchange port
+ * data' request.
+ */
+ down(&adapter->erp_ready_sem);
+ if (erp_action->status & ZFCP_STATUS_ERP_TIMEDOUT) {
+ ZFCP_LOG_INFO("error: exchange of port data "
+ "for adapter %s timed out\n",
+ zfcp_get_busid_by_adapter(adapter));
+ break;
+ }
+
+ if (!atomic_test_mask(ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED,
+ &adapter->status))
+ break;
+
+ ZFCP_LOG_DEBUG("host connection still initialising... "
+ "waiting and retrying...\n");
+ /* sleep a little bit before retry */
+ sleep = retries < ZFCP_EXCHANGE_PORT_DATA_SHORT_RETRIES ?
+ ZFCP_EXCHANGE_PORT_DATA_SHORT_SLEEP :
+ ZFCP_EXCHANGE_PORT_DATA_LONG_SLEEP;
+ msleep(jiffies_to_msecs(sleep));
+ }
+
+ if (atomic_test_mask(ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED,
+ &adapter->status)) {
+ ZFCP_LOG_INFO("error: exchange of port data for "
+ "adapter %s failed\n",
+ zfcp_get_busid_by_adapter(adapter));
+ retval = ZFCP_ERP_FAILED;
+ }
+
+ return retval;
+}
+
/*
* function:
*
@@ -3194,11 +3272,19 @@ zfcp_erp_action_enqueue(int action,
/* fall through !!! */
case ZFCP_ERP_ACTION_REOPEN_PORT_FORCED:
- if (atomic_test_mask
- (ZFCP_STATUS_COMMON_ERP_INUSE, &port->status)
- && port->erp_action.action ==
- ZFCP_ERP_ACTION_REOPEN_PORT_FORCED) {
- debug_text_event(adapter->erp_dbf, 4, "pf_actenq_drp");
+ if (atomic_test_mask(ZFCP_STATUS_COMMON_ERP_INUSE,
+ &port->status)) {
+ if (port->erp_action.action !=
+ ZFCP_ERP_ACTION_REOPEN_PORT_FORCED) {
+ ZFCP_LOG_INFO("dropped erp action %i (port "
+ "0x%016Lx, action in use: %i)\n",
+ action, port->wwpn,
+ port->erp_action.action);
+ debug_text_event(adapter->erp_dbf, 4,
+ "pf_actenq_drp");
+ } else
+ debug_text_event(adapter->erp_dbf, 4,
+ "pf_actenq_drpcp");
debug_event(adapter->erp_dbf, 4, &port->wwpn,
sizeof (wwn_t));
goto out;
@@ -3589,6 +3675,9 @@ zfcp_erp_adapter_access_changed(struct zfcp_adapter *adapter)
struct zfcp_port *port;
unsigned long flags;
+ if (adapter->connection_features & FSF_FEATURE_NPIV_MODE)
+ return;
+
debug_text_event(adapter->erp_dbf, 3, "a_access_recover");
debug_event(adapter->erp_dbf, 3, &adapter->name, 8);
diff --git a/drivers/s390/scsi/zfcp_ext.h b/drivers/s390/scsi/zfcp_ext.h
index cd98a2de9f8..c3782261cb5 100644
--- a/drivers/s390/scsi/zfcp_ext.h
+++ b/drivers/s390/scsi/zfcp_ext.h
@@ -96,7 +96,8 @@ extern int zfcp_fsf_open_unit(struct zfcp_erp_action *);
extern int zfcp_fsf_close_unit(struct zfcp_erp_action *);
extern int zfcp_fsf_exchange_config_data(struct zfcp_erp_action *);
-extern int zfcp_fsf_exchange_port_data(struct zfcp_adapter *,
+extern int zfcp_fsf_exchange_port_data(struct zfcp_erp_action *,
+ struct zfcp_adapter *,
struct fsf_qtcb_bottom_port *);
extern int zfcp_fsf_control_file(struct zfcp_adapter *, struct zfcp_fsf_req **,
u32, u32, struct zfcp_sg_list *);
@@ -109,7 +110,6 @@ extern int zfcp_fsf_req_create(struct zfcp_adapter *, u32, int, mempool_t *,
extern int zfcp_fsf_send_ct(struct zfcp_send_ct *, mempool_t *,
struct zfcp_erp_action *);
extern int zfcp_fsf_send_els(struct zfcp_send_els *);
-extern int zfcp_fsf_req_wait_and_cleanup(struct zfcp_fsf_req *, int, u32 *);
extern int zfcp_fsf_send_fcp_command_task(struct zfcp_adapter *,
struct zfcp_unit *,
struct scsi_cmnd *,
@@ -182,9 +182,25 @@ extern void zfcp_erp_port_access_changed(struct zfcp_port *);
extern void zfcp_erp_unit_access_changed(struct zfcp_unit *);
/******************************** AUX ****************************************/
-extern void zfcp_cmd_dbf_event_fsf(const char *, struct zfcp_fsf_req *,
- void *, int);
-extern void zfcp_cmd_dbf_event_scsi(const char *, struct scsi_cmnd *);
-extern void zfcp_in_els_dbf_event(struct zfcp_adapter *, const char *,
- struct fsf_status_read_buffer *, int);
+extern void zfcp_hba_dbf_event_fsf_response(struct zfcp_fsf_req *);
+extern void zfcp_hba_dbf_event_fsf_unsol(const char *, struct zfcp_adapter *,
+ struct fsf_status_read_buffer *);
+extern void zfcp_hba_dbf_event_qdio(struct zfcp_adapter *,
+ unsigned int, unsigned int, unsigned int,
+ int, int);
+
+extern void zfcp_san_dbf_event_ct_request(struct zfcp_fsf_req *);
+extern void zfcp_san_dbf_event_ct_response(struct zfcp_fsf_req *);
+extern void zfcp_san_dbf_event_els_request(struct zfcp_fsf_req *);
+extern void zfcp_san_dbf_event_els_response(struct zfcp_fsf_req *);
+extern void zfcp_san_dbf_event_incoming_els(struct zfcp_fsf_req *);
+
+extern void zfcp_scsi_dbf_event_result(const char *, int, struct zfcp_adapter *,
+ struct scsi_cmnd *);
+extern void zfcp_scsi_dbf_event_abort(const char *, struct zfcp_adapter *,
+ struct scsi_cmnd *,
+ struct zfcp_fsf_req *);
+extern void zfcp_scsi_dbf_event_devreset(const char *, u8, struct zfcp_unit *,
+ struct scsi_cmnd *);
+
#endif /* ZFCP_EXT_H */
diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c
index c007b6424e7..3b0fc1163f5 100644
--- a/drivers/s390/scsi/zfcp_fsf.c
+++ b/drivers/s390/scsi/zfcp_fsf.c
@@ -59,6 +59,8 @@ static int zfcp_fsf_req_send(struct zfcp_fsf_req *, struct timer_list *);
static int zfcp_fsf_protstatus_eval(struct zfcp_fsf_req *);
static int zfcp_fsf_fsfstatus_eval(struct zfcp_fsf_req *);
static int zfcp_fsf_fsfstatus_qual_eval(struct zfcp_fsf_req *);
+static void zfcp_fsf_link_down_info_eval(struct zfcp_adapter *,
+ struct fsf_link_down_info *);
static int zfcp_fsf_req_dispatch(struct zfcp_fsf_req *);
static void zfcp_fsf_req_dismiss(struct zfcp_fsf_req *);
@@ -285,51 +287,51 @@ zfcp_fsf_protstatus_eval(struct zfcp_fsf_req *fsf_req)
{
int retval = 0;
struct zfcp_adapter *adapter = fsf_req->adapter;
+ struct fsf_qtcb *qtcb = fsf_req->qtcb;
+ union fsf_prot_status_qual *prot_status_qual =
+ &qtcb->prefix.prot_status_qual;
- ZFCP_LOG_DEBUG("QTCB is at %p\n", fsf_req->qtcb);
+ zfcp_hba_dbf_event_fsf_response(fsf_req);
if (fsf_req->status & ZFCP_STATUS_FSFREQ_DISMISSED) {
ZFCP_LOG_DEBUG("fsf_req 0x%lx has been dismissed\n",
(unsigned long) fsf_req);
fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR |
ZFCP_STATUS_FSFREQ_RETRY; /* only for SCSI cmnds. */
- zfcp_cmd_dbf_event_fsf("dismiss", fsf_req, NULL, 0);
goto skip_protstatus;
}
/* log additional information provided by FSF (if any) */
- if (unlikely(fsf_req->qtcb->header.log_length)) {
+ if (unlikely(qtcb->header.log_length)) {
/* do not trust them ;-) */
- if (fsf_req->qtcb->header.log_start > sizeof(struct fsf_qtcb)) {
+ if (qtcb->header.log_start > sizeof(struct fsf_qtcb)) {
ZFCP_LOG_NORMAL
("bug: ULP (FSF logging) log data starts "
"beyond end of packet header. Ignored. "
"(start=%i, size=%li)\n",
- fsf_req->qtcb->header.log_start,
+ qtcb->header.log_start,
sizeof(struct fsf_qtcb));
goto forget_log;
}
- if ((size_t) (fsf_req->qtcb->header.log_start +
- fsf_req->qtcb->header.log_length)
+ if ((size_t) (qtcb->header.log_start + qtcb->header.log_length)
> sizeof(struct fsf_qtcb)) {
ZFCP_LOG_NORMAL("bug: ULP (FSF logging) log data ends "
"beyond end of packet header. Ignored. "
"(start=%i, length=%i, size=%li)\n",
- fsf_req->qtcb->header.log_start,
- fsf_req->qtcb->header.log_length,
+ qtcb->header.log_start,
+ qtcb->header.log_length,
sizeof(struct fsf_qtcb));
goto forget_log;
}
ZFCP_LOG_TRACE("ULP log data: \n");
ZFCP_HEX_DUMP(ZFCP_LOG_LEVEL_TRACE,
- (char *) fsf_req->qtcb +
- fsf_req->qtcb->header.log_start,
- fsf_req->qtcb->header.log_length);
+ (char *) qtcb + qtcb->header.log_start,
+ qtcb->header.log_length);
}
forget_log:
/* evaluate FSF Protocol Status */
- switch (fsf_req->qtcb->prefix.prot_status) {
+ switch (qtcb->prefix.prot_status) {
case FSF_PROT_GOOD:
case FSF_PROT_FSF_STATUS_PRESENTED:
@@ -340,14 +342,9 @@ zfcp_fsf_protstatus_eval(struct zfcp_fsf_req *fsf_req)
"microcode of version 0x%x, the device driver "
"only supports 0x%x. Aborting.\n",
zfcp_get_busid_by_adapter(adapter),
- fsf_req->qtcb->prefix.prot_status_qual.
- version_error.fsf_version, ZFCP_QTCB_VERSION);
- /* stop operation for this adapter */
- debug_text_exception(adapter->erp_dbf, 0, "prot_ver_err");
+ prot_status_qual->version_error.fsf_version,
+ ZFCP_QTCB_VERSION);
zfcp_erp_adapter_shutdown(adapter, 0);
- zfcp_cmd_dbf_event_fsf("qverserr", fsf_req,
- &fsf_req->qtcb->prefix.prot_status_qual,
- sizeof (union fsf_prot_status_qual));
fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
break;
@@ -355,16 +352,10 @@ zfcp_fsf_protstatus_eval(struct zfcp_fsf_req *fsf_req)
ZFCP_LOG_NORMAL("bug: Sequence number mismatch between "
"driver (0x%x) and adapter %s (0x%x). "
"Restarting all operations on this adapter.\n",
- fsf_req->qtcb->prefix.req_seq_no,
+ qtcb->prefix.req_seq_no,
zfcp_get_busid_by_adapter(adapter),
- fsf_req->qtcb->prefix.prot_status_qual.
- sequence_error.exp_req_seq_no);
- debug_text_exception(adapter->erp_dbf, 0, "prot_seq_err");
- /* restart operation on this adapter */
+ prot_status_qual->sequence_error.exp_req_seq_no);
zfcp_erp_adapter_reopen(adapter, 0);
- zfcp_cmd_dbf_event_fsf("seqnoerr", fsf_req,
- &fsf_req->qtcb->prefix.prot_status_qual,
- sizeof (union fsf_prot_status_qual));
fsf_req->status |= ZFCP_STATUS_FSFREQ_RETRY;
fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
break;
@@ -375,116 +366,35 @@ zfcp_fsf_protstatus_eval(struct zfcp_fsf_req *fsf_req)
"that used on adapter %s. "
"Stopping all operations on this adapter.\n",
zfcp_get_busid_by_adapter(adapter));
- debug_text_exception(adapter->erp_dbf, 0, "prot_unsup_qtcb");
zfcp_erp_adapter_shutdown(adapter, 0);
- zfcp_cmd_dbf_event_fsf("unsqtcbt", fsf_req,
- &fsf_req->qtcb->prefix.prot_status_qual,
- sizeof (union fsf_prot_status_qual));
fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
break;
case FSF_PROT_HOST_CONNECTION_INITIALIZING:
- zfcp_cmd_dbf_event_fsf("hconinit", fsf_req,
- &fsf_req->qtcb->prefix.prot_status_qual,
- sizeof (union fsf_prot_status_qual));
fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
atomic_set_mask(ZFCP_STATUS_ADAPTER_HOST_CON_INIT,
&(adapter->status));
- debug_text_event(adapter->erp_dbf, 3, "prot_con_init");
break;
case FSF_PROT_DUPLICATE_REQUEST_ID:
- if (fsf_req->qtcb) {
ZFCP_LOG_NORMAL("bug: The request identifier 0x%Lx "
"to the adapter %s is ambiguous. "
- "Stopping all operations on this "
- "adapter.\n",
- *(unsigned long long *)
- (&fsf_req->qtcb->bottom.support.
- req_handle),
- zfcp_get_busid_by_adapter(adapter));
- } else {
- ZFCP_LOG_NORMAL("bug: The request identifier %p "
- "to the adapter %s is ambiguous. "
- "Stopping all operations on this "
- "adapter. "
- "(bug: got this for an unsolicited "
- "status read request)\n",
- fsf_req,
+ "Stopping all operations on this adapter.\n",
+ *(unsigned long long*)
+ (&qtcb->bottom.support.req_handle),
zfcp_get_busid_by_adapter(adapter));
- }
- debug_text_exception(adapter->erp_dbf, 0, "prot_dup_id");
zfcp_erp_adapter_shutdown(adapter, 0);
- zfcp_cmd_dbf_event_fsf("dupreqid", fsf_req,
- &fsf_req->qtcb->prefix.prot_status_qual,
- sizeof (union fsf_prot_status_qual));
fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
break;
case FSF_PROT_LINK_DOWN:
- /*
- * 'test and set' is not atomic here -
- * it's ok as long as calls to our response queue handler
- * (and thus execution of this code here) are serialized
- * by the qdio module
- */
- if (!atomic_test_mask(ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED,
- &adapter->status)) {
- switch (fsf_req->qtcb->prefix.prot_status_qual.
- locallink_error.code) {
- case FSF_PSQ_LINK_NOLIGHT:
- ZFCP_LOG_INFO("The local link to adapter %s "
- "is down (no light detected).\n",
- zfcp_get_busid_by_adapter(
- adapter));
- break;
- case FSF_PSQ_LINK_WRAPPLUG:
- ZFCP_LOG_INFO("The local link to adapter %s "
- "is down (wrap plug detected).\n",
- zfcp_get_busid_by_adapter(
- adapter));
- break;
- case FSF_PSQ_LINK_NOFCP:
- ZFCP_LOG_INFO("The local link to adapter %s "
- "is down (adjacent node on "
- "link does not support FCP).\n",
- zfcp_get_busid_by_adapter(
- adapter));
- break;
- default:
- ZFCP_LOG_INFO("The local link to adapter %s "
- "is down "
- "(warning: unknown reason "
- "code).\n",
- zfcp_get_busid_by_adapter(
- adapter));
- break;
-
- }
- /*
- * Due to the 'erp failed' flag the adapter won't
- * be recovered but will be just set to 'blocked'
- * state. All subordinary devices will have state
- * 'blocked' and 'erp failed', too.
- * Thus the adapter is still able to provide
- * 'link up' status without being flooded with
- * requests.
- * (note: even 'close port' is not permitted)
- */
- ZFCP_LOG_INFO("Stopping all operations for adapter "
- "%s.\n",
- zfcp_get_busid_by_adapter(adapter));
- atomic_set_mask(ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED |
- ZFCP_STATUS_COMMON_ERP_FAILED,
- &adapter->status);
- zfcp_erp_adapter_reopen(adapter, 0);
- }
+ zfcp_fsf_link_down_info_eval(adapter,
+ &prot_status_qual->link_down_info);
fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
break;
case FSF_PROT_REEST_QUEUE:
- debug_text_event(adapter->erp_dbf, 1, "prot_reest_queue");
- ZFCP_LOG_INFO("The local link to adapter with "
+ ZFCP_LOG_NORMAL("The local link to adapter with "
"%s was re-plugged. "
"Re-starting operations on this adapter.\n",
zfcp_get_busid_by_adapter(adapter));
@@ -495,9 +405,6 @@ zfcp_fsf_protstatus_eval(struct zfcp_fsf_req *fsf_req)
zfcp_erp_adapter_reopen(adapter,
ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED
| ZFCP_STATUS_COMMON_ERP_FAILED);
- zfcp_cmd_dbf_event_fsf("reestque", fsf_req,
- &fsf_req->qtcb->prefix.prot_status_qual,
- sizeof (union fsf_prot_status_qual));
fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
break;
@@ -507,12 +414,7 @@ zfcp_fsf_protstatus_eval(struct zfcp_fsf_req *fsf_req)
"Restarting all operations on this "
"adapter.\n",
zfcp_get_busid_by_adapter(adapter));
- debug_text_event(adapter->erp_dbf, 0, "prot_err_sta");
- /* restart operation on this adapter */
zfcp_erp_adapter_reopen(adapter, 0);
- zfcp_cmd_dbf_event_fsf("proterrs", fsf_req,
- &fsf_req->qtcb->prefix.prot_status_qual,
- sizeof (union fsf_prot_status_qual));
fsf_req->status |= ZFCP_STATUS_FSFREQ_RETRY;
fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
break;
@@ -524,11 +426,7 @@ zfcp_fsf_protstatus_eval(struct zfcp_fsf_req *fsf_req)
"Stopping all operations on this adapter. "
"(debug info 0x%x).\n",
zfcp_get_busid_by_adapter(adapter),
- fsf_req->qtcb->prefix.prot_status);
- debug_text_event(adapter->erp_dbf, 0, "prot_inval:");
- debug_exception(adapter->erp_dbf, 0,
- &fsf_req->qtcb->prefix.prot_status,
- sizeof (u32));
+ qtcb->prefix.prot_status);
zfcp_erp_adapter_shutdown(adapter, 0);
fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
}
@@ -568,28 +466,18 @@ zfcp_fsf_fsfstatus_eval(struct zfcp_fsf_req *fsf_req)
"(debug info 0x%x).\n",
zfcp_get_busid_by_adapter(fsf_req->adapter),
fsf_req->qtcb->header.fsf_command);
- debug_text_exception(fsf_req->adapter->erp_dbf, 0,
- "fsf_s_unknown");
zfcp_erp_adapter_shutdown(fsf_req->adapter, 0);
- zfcp_cmd_dbf_event_fsf("unknownc", fsf_req,
- &fsf_req->qtcb->header.fsf_status_qual,
- sizeof (union fsf_status_qual));
fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
break;
case FSF_FCP_RSP_AVAILABLE:
ZFCP_LOG_DEBUG("FCP Sense data will be presented to the "
"SCSI stack.\n");
- debug_text_event(fsf_req->adapter->erp_dbf, 3, "fsf_s_rsp");
break;
case FSF_ADAPTER_STATUS_AVAILABLE:
- debug_text_event(fsf_req->adapter->erp_dbf, 2, "fsf_s_astatus");
zfcp_fsf_fsfstatus_qual_eval(fsf_req);
break;
-
- default:
- break;
}
skip_fsfstatus:
@@ -617,44 +505,28 @@ zfcp_fsf_fsfstatus_qual_eval(struct zfcp_fsf_req *fsf_req)
switch (fsf_req->qtcb->header.fsf_status_qual.word[0]) {
case FSF_SQ_FCP_RSP_AVAILABLE:
- debug_text_event(fsf_req->adapter->erp_dbf, 4, "fsf_sq_rsp");
break;
case FSF_SQ_RETRY_IF_POSSIBLE:
/* The SCSI-stack may now issue retries or escalate */
- debug_text_event(fsf_req->adapter->erp_dbf, 2, "fsf_sq_retry");
- zfcp_cmd_dbf_event_fsf("sqretry", fsf_req,
- &fsf_req->qtcb->header.fsf_status_qual,
- sizeof (union fsf_status_qual));
fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
break;
case FSF_SQ_COMMAND_ABORTED:
/* Carry the aborted state on to upper layer */
- debug_text_event(fsf_req->adapter->erp_dbf, 2, "fsf_sq_abort");
- zfcp_cmd_dbf_event_fsf("sqabort", fsf_req,
- &fsf_req->qtcb->header.fsf_status_qual,
- sizeof (union fsf_status_qual));
fsf_req->status |= ZFCP_STATUS_FSFREQ_ABORTED;
fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
break;
case FSF_SQ_NO_RECOM:
- debug_text_exception(fsf_req->adapter->erp_dbf, 0,
- "fsf_sq_no_rec");
ZFCP_LOG_NORMAL("bug: No recommendation could be given for a"
"problem on the adapter %s "
"Stopping all operations on this adapter. ",
zfcp_get_busid_by_adapter(fsf_req->adapter));
zfcp_erp_adapter_shutdown(fsf_req->adapter, 0);
- zfcp_cmd_dbf_event_fsf("sqnrecom", fsf_req,
- &fsf_req->qtcb->header.fsf_status_qual,
- sizeof (union fsf_status_qual));
fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
break;
case FSF_SQ_ULP_PROGRAMMING_ERROR:
ZFCP_LOG_NORMAL("error: not enough SBALs for data transfer "
"(adapter %s)\n",
zfcp_get_busid_by_adapter(fsf_req->adapter));
- debug_text_exception(fsf_req->adapter->erp_dbf, 0,
- "fsf_sq_ulp_err");
fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
break;
case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE:
@@ -668,13 +540,6 @@ zfcp_fsf_fsfstatus_qual_eval(struct zfcp_fsf_req *fsf_req)
ZFCP_HEX_DUMP(ZFCP_LOG_LEVEL_NORMAL,
(char *) &fsf_req->qtcb->header.fsf_status_qual,
sizeof (union fsf_status_qual));
- debug_text_event(fsf_req->adapter->erp_dbf, 0, "fsf_sq_inval:");
- debug_exception(fsf_req->adapter->erp_dbf, 0,
- &fsf_req->qtcb->header.fsf_status_qual.word[0],
- sizeof (u32));
- zfcp_cmd_dbf_event_fsf("squndef", fsf_req,
- &fsf_req->qtcb->header.fsf_status_qual,
- sizeof (union fsf_status_qual));
fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
break;
}
@@ -682,6 +547,110 @@ zfcp_fsf_fsfstatus_qual_eval(struct zfcp_fsf_req *fsf_req)
return retval;
}
+/**
+ * zfcp_fsf_link_down_info_eval - evaluate link down information block
+ */
+static void
+zfcp_fsf_link_down_info_eval(struct zfcp_adapter *adapter,
+ struct fsf_link_down_info *link_down)
+{
+ switch (link_down->error_code) {
+ case FSF_PSQ_LINK_NO_LIGHT:
+ ZFCP_LOG_NORMAL("The local link to adapter %s is down "
+ "(no light detected)\n",
+ zfcp_get_busid_by_adapter(adapter));
+ break;
+ case FSF_PSQ_LINK_WRAP_PLUG:
+ ZFCP_LOG_NORMAL("The local link to adapter %s is down "
+ "(wrap plug detected)\n",
+ zfcp_get_busid_by_adapter(adapter));
+ break;
+ case FSF_PSQ_LINK_NO_FCP:
+ ZFCP_LOG_NORMAL("The local link to adapter %s is down "
+ "(adjacent node on link does not support FCP)\n",
+ zfcp_get_busid_by_adapter(adapter));
+ break;
+ case FSF_PSQ_LINK_FIRMWARE_UPDATE:
+ ZFCP_LOG_NORMAL("The local link to adapter %s is down "
+ "(firmware update in progress)\n",
+ zfcp_get_busid_by_adapter(adapter));
+ break;
+ case FSF_PSQ_LINK_INVALID_WWPN:
+ ZFCP_LOG_NORMAL("The local link to adapter %s is down "
+ "(duplicate or invalid WWPN detected)\n",
+ zfcp_get_busid_by_adapter(adapter));
+ break;
+ case FSF_PSQ_LINK_NO_NPIV_SUPPORT:
+ ZFCP_LOG_NORMAL("The local link to adapter %s is down "
+ "(no support for NPIV by Fabric)\n",
+ zfcp_get_busid_by_adapter(adapter));
+ break;
+ case FSF_PSQ_LINK_NO_FCP_RESOURCES:
+ ZFCP_LOG_NORMAL("The local link to adapter %s is down "
+ "(out of resource in FCP daughtercard)\n",
+ zfcp_get_busid_by_adapter(adapter));
+ break;
+ case FSF_PSQ_LINK_NO_FABRIC_RESOURCES:
+ ZFCP_LOG_NORMAL("The local link to adapter %s is down "
+ "(out of resource in Fabric)\n",
+ zfcp_get_busid_by_adapter(adapter));
+ break;
+ case FSF_PSQ_LINK_FABRIC_LOGIN_UNABLE:
+ ZFCP_LOG_NORMAL("The local link to adapter %s is down "
+ "(unable to Fabric login)\n",
+ zfcp_get_busid_by_adapter(adapter));
+ break;
+ case FSF_PSQ_LINK_WWPN_ASSIGNMENT_CORRUPTED:
+ ZFCP_LOG_NORMAL("WWPN assignment file corrupted on adapter %s\n",
+ zfcp_get_busid_by_adapter(adapter));
+ break;
+ case FSF_PSQ_LINK_MODE_TABLE_CURRUPTED:
+ ZFCP_LOG_NORMAL("Mode table corrupted on adapter %s\n",
+ zfcp_get_busid_by_adapter(adapter));
+ break;
+ case FSF_PSQ_LINK_NO_WWPN_ASSIGNMENT:
+ ZFCP_LOG_NORMAL("No WWPN for assignment table on adapter %s\n",
+ zfcp_get_busid_by_adapter(adapter));
+ break;
+ default:
+ ZFCP_LOG_NORMAL("The local link to adapter %s is down "
+ "(warning: unknown reason code %d)\n",
+ zfcp_get_busid_by_adapter(adapter),
+ link_down->error_code);
+ }
+
+ if (adapter->connection_features & FSF_FEATURE_NPIV_MODE)
+ ZFCP_LOG_DEBUG("Debug information to link down: "
+ "primary_status=0x%02x "
+ "ioerr_code=0x%02x "
+ "action_code=0x%02x "
+ "reason_code=0x%02x "
+ "explanation_code=0x%02x "
+ "vendor_specific_code=0x%02x\n",
+ link_down->primary_status,
+ link_down->ioerr_code,
+ link_down->action_code,
+ link_down->reason_code,
+ link_down->explanation_code,
+ link_down->vendor_specific_code);
+
+ if (!atomic_test_mask(ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED,
+ &adapter->status)) {
+ atomic_set_mask(ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED,
+ &adapter->status);
+ switch (link_down->error_code) {
+ case FSF_PSQ_LINK_NO_LIGHT:
+ case FSF_PSQ_LINK_WRAP_PLUG:
+ case FSF_PSQ_LINK_NO_FCP:
+ case FSF_PSQ_LINK_FIRMWARE_UPDATE:
+ zfcp_erp_adapter_reopen(adapter, 0);
+ break;
+ default:
+ zfcp_erp_adapter_failed(adapter);
+ }
+ }
+}
+
/*
* function: zfcp_fsf_req_dispatch
*
@@ -696,11 +665,6 @@ zfcp_fsf_req_dispatch(struct zfcp_fsf_req *fsf_req)
struct zfcp_adapter *adapter = fsf_req->adapter;
int retval = 0;
- if (unlikely(fsf_req->status & ZFCP_STATUS_FSFREQ_ERROR)) {
- ZFCP_LOG_TRACE("fsf_req=%p, QTCB=%p\n", fsf_req, fsf_req->qtcb);
- ZFCP_HEX_DUMP(ZFCP_LOG_LEVEL_TRACE,
- (char *) fsf_req->qtcb, sizeof(struct fsf_qtcb));
- }
switch (fsf_req->fsf_command) {
@@ -760,13 +724,13 @@ zfcp_fsf_req_dispatch(struct zfcp_fsf_req *fsf_req)
fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
ZFCP_LOG_NORMAL("bug: Command issued by the device driver is "
"not supported by the adapter %s\n",
- zfcp_get_busid_by_adapter(fsf_req->adapter));
+ zfcp_get_busid_by_adapter(adapter));
if (fsf_req->fsf_command != fsf_req->qtcb->header.fsf_command)
ZFCP_LOG_NORMAL
("bug: Command issued by the device driver differs "
"from the command returned by the adapter %s "
"(debug info 0x%x, 0x%x).\n",
- zfcp_get_busid_by_adapter(fsf_req->adapter),
+ zfcp_get_busid_by_adapter(adapter),
fsf_req->fsf_command,
fsf_req->qtcb->header.fsf_command);
}
@@ -774,8 +738,6 @@ zfcp_fsf_req_dispatch(struct zfcp_fsf_req *fsf_req)
if (!erp_action)
return retval;
- debug_text_event(adapter->erp_dbf, 3, "a_frh");
- debug_event(adapter->erp_dbf, 3, &erp_action->action, sizeof (int));
zfcp_erp_async_handler(erp_action, 0);
return retval;
@@ -821,7 +783,7 @@ zfcp_fsf_status_read(struct zfcp_adapter *adapter, int req_flags)
goto failed_buf;
}
memset(status_buffer, 0, sizeof (struct fsf_status_read_buffer));
- fsf_req->data.status_read.buffer = status_buffer;
+ fsf_req->data = (unsigned long) status_buffer;
/* insert pointer to respective buffer */
sbale = zfcp_qdio_sbale_curr(fsf_req);
@@ -846,6 +808,7 @@ zfcp_fsf_status_read(struct zfcp_adapter *adapter, int req_flags)
failed_buf:
zfcp_fsf_req_free(fsf_req);
failed_req_create:
+ zfcp_hba_dbf_event_fsf_unsol("fail", adapter, NULL);
out:
write_unlock_irqrestore(&adapter->request_queue.queue_lock, lock_flags);
return retval;
@@ -859,7 +822,7 @@ zfcp_fsf_status_read_port_closed(struct zfcp_fsf_req *fsf_req)
struct zfcp_port *port;
unsigned long flags;
- status_buffer = fsf_req->data.status_read.buffer;
+ status_buffer = (struct fsf_status_read_buffer *) fsf_req->data;
adapter = fsf_req->adapter;
read_lock_irqsave(&zfcp_data.config_lock, flags);
@@ -918,38 +881,33 @@ zfcp_fsf_status_read_handler(struct zfcp_fsf_req *fsf_req)
int retval = 0;
struct zfcp_adapter *adapter = fsf_req->adapter;
struct fsf_status_read_buffer *status_buffer =
- fsf_req->data.status_read.buffer;
+ (struct fsf_status_read_buffer *) fsf_req->data;
if (fsf_req->status & ZFCP_STATUS_FSFREQ_DISMISSED) {
+ zfcp_hba_dbf_event_fsf_unsol("dism", adapter, status_buffer);
mempool_free(status_buffer, adapter->pool.data_status_read);
zfcp_fsf_req_free(fsf_req);
goto out;
}
+ zfcp_hba_dbf_event_fsf_unsol("read", adapter, status_buffer);
+
switch (status_buffer->status_type) {
case FSF_STATUS_READ_PORT_CLOSED:
- debug_text_event(adapter->erp_dbf, 3, "unsol_pclosed:");
- debug_event(adapter->erp_dbf, 3,
- &status_buffer->d_id, sizeof (u32));
zfcp_fsf_status_read_port_closed(fsf_req);
break;
case FSF_STATUS_READ_INCOMING_ELS:
- debug_text_event(adapter->erp_dbf, 3, "unsol_els:");
zfcp_fsf_incoming_els(fsf_req);
break;
case FSF_STATUS_READ_SENSE_DATA_AVAIL:
- debug_text_event(adapter->erp_dbf, 3, "unsol_sense:");
ZFCP_LOG_INFO("unsolicited sense data received (adapter %s)\n",
zfcp_get_busid_by_adapter(adapter));
- ZFCP_HEX_DUMP(ZFCP_LOG_LEVEL_NORMAL, (char *) status_buffer,
- sizeof(struct fsf_status_read_buffer));
break;
case FSF_STATUS_READ_BIT_ERROR_THRESHOLD:
- debug_text_event(adapter->erp_dbf, 3, "unsol_bit_err:");
ZFCP_LOG_NORMAL("Bit error threshold data received:\n");
ZFCP_HEX_DUMP(ZFCP_LOG_LEVEL_NORMAL,
(char *) status_buffer,
@@ -957,17 +915,32 @@ zfcp_fsf_status_read_handler(struct zfcp_fsf_req *fsf_req)
break;
case FSF_STATUS_READ_LINK_DOWN:
- debug_text_event(adapter->erp_dbf, 0, "unsol_link_down:");
- ZFCP_LOG_INFO("Local link to adapter %s is down\n",
+ switch (status_buffer->status_subtype) {
+ case FSF_STATUS_READ_SUB_NO_PHYSICAL_LINK:
+ ZFCP_LOG_INFO("Physical link to adapter %s is down\n",
+ zfcp_get_busid_by_adapter(adapter));
+ break;
+ case FSF_STATUS_READ_SUB_FDISC_FAILED:
+ ZFCP_LOG_INFO("Local link to adapter %s is down "
+ "due to failed FDISC login\n",
zfcp_get_busid_by_adapter(adapter));
- atomic_set_mask(ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED,
- &adapter->status);
- zfcp_erp_adapter_failed(adapter);
+ break;
+ case FSF_STATUS_READ_SUB_FIRMWARE_UPDATE:
+ ZFCP_LOG_INFO("Local link to adapter %s is down "
+ "due to firmware update on adapter\n",
+ zfcp_get_busid_by_adapter(adapter));
+ break;
+ default:
+ ZFCP_LOG_INFO("Local link to adapter %s is down "
+ "due to unknown reason\n",
+ zfcp_get_busid_by_adapter(adapter));
+ };
+ zfcp_fsf_link_down_info_eval(adapter,
+ (struct fsf_link_down_info *) &status_buffer->payload);
break;
case FSF_STATUS_READ_LINK_UP:
- debug_text_event(adapter->erp_dbf, 2, "unsol_link_up:");
- ZFCP_LOG_INFO("Local link to adapter %s was replugged. "
+ ZFCP_LOG_NORMAL("Local link to adapter %s was replugged. "
"Restarting operations on this adapter\n",
zfcp_get_busid_by_adapter(adapter));
/* All ports should be marked as ready to run again */
@@ -980,35 +953,40 @@ zfcp_fsf_status_read_handler(struct zfcp_fsf_req *fsf_req)
break;
case FSF_STATUS_READ_CFDC_UPDATED:
- debug_text_event(adapter->erp_dbf, 2, "unsol_cfdc_update:");
- ZFCP_LOG_INFO("CFDC has been updated on the adapter %s\n",
+ ZFCP_LOG_NORMAL("CFDC has been updated on the adapter %s\n",
zfcp_get_busid_by_adapter(adapter));
zfcp_erp_adapter_access_changed(adapter);
break;
case FSF_STATUS_READ_CFDC_HARDENED:
- debug_text_event(adapter->erp_dbf, 2, "unsol_cfdc_harden:");
switch (status_buffer->status_subtype) {
case FSF_STATUS_READ_SUB_CFDC_HARDENED_ON_SE:
- ZFCP_LOG_INFO("CFDC of adapter %s saved on SE\n",
+ ZFCP_LOG_NORMAL("CFDC of adapter %s saved on SE\n",
zfcp_get_busid_by_adapter(adapter));
break;
case FSF_STATUS_READ_SUB_CFDC_HARDENED_ON_SE2:
- ZFCP_LOG_INFO("CFDC of adapter %s has been copied "
+ ZFCP_LOG_NORMAL("CFDC of adapter %s has been copied "
"to the secondary SE\n",
zfcp_get_busid_by_adapter(adapter));
break;
default:
- ZFCP_LOG_INFO("CFDC of adapter %s has been hardened\n",
+ ZFCP_LOG_NORMAL("CFDC of adapter %s has been hardened\n",
zfcp_get_busid_by_adapter(adapter));
}
break;
+ case FSF_STATUS_READ_FEATURE_UPDATE_ALERT:
+ debug_text_event(adapter->erp_dbf, 2, "unsol_features:");
+ ZFCP_LOG_INFO("List of supported features on adapter %s has "
+ "been changed from 0x%08X to 0x%08X\n",
+ zfcp_get_busid_by_adapter(adapter),
+ *(u32*) (status_buffer->payload + 4),
+ *(u32*) (status_buffer->payload));
+ adapter->adapter_features = *(u32*) status_buffer->payload;
+ break;
+
default:
- debug_text_event(adapter->erp_dbf, 0, "unsol_unknown:");
- debug_exception(adapter->erp_dbf, 0,
- &status_buffer->status_type, sizeof (u32));
- ZFCP_LOG_NORMAL("bug: An unsolicited status packet of unknown "
+ ZFCP_LOG_NORMAL("warning: An unsolicited status packet of unknown "
"type was received (debug info 0x%x)\n",
status_buffer->status_type);
ZFCP_LOG_DEBUG("Dump of status_read_buffer %p:\n",
@@ -1093,7 +1071,7 @@ zfcp_fsf_abort_fcp_command(unsigned long old_req_id,
sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
- fsf_req->data.abort_fcp_command.unit = unit;
+ fsf_req->data = (unsigned long) unit;
/* set handles of unit and its parent port in QTCB */
fsf_req->qtcb->header.lun_handle = unit->handle;
@@ -1139,7 +1117,7 @@ static int
zfcp_fsf_abort_fcp_command_handler(struct zfcp_fsf_req *new_fsf_req)
{
int retval = -EINVAL;
- struct zfcp_unit *unit = new_fsf_req->data.abort_fcp_command.unit;
+ struct zfcp_unit *unit;
unsigned char status_qual =
new_fsf_req->qtcb->header.fsf_status_qual.word[0];
@@ -1150,6 +1128,8 @@ zfcp_fsf_abort_fcp_command_handler(struct zfcp_fsf_req *new_fsf_req)
goto skip_fsfstatus;
}
+ unit = (struct zfcp_unit *) new_fsf_req->data;
+
/* evaluate FSF status in QTCB */
switch (new_fsf_req->qtcb->header.fsf_status) {
@@ -1364,7 +1344,7 @@ zfcp_fsf_send_ct(struct zfcp_send_ct *ct, mempool_t *pool,
sbale[3].addr = zfcp_sg_to_address(&ct->resp[0]);
sbale[3].length = ct->resp[0].length;
sbale[3].flags |= SBAL_FLAGS_LAST_ENTRY;
- } else if (adapter->supported_features &
+ } else if (adapter->adapter_features &
FSF_FEATURE_ELS_CT_CHAINED_SBALS) {
/* try to use chained SBALs */
bytes = zfcp_qdio_sbals_from_sg(fsf_req,
@@ -1414,7 +1394,9 @@ zfcp_fsf_send_ct(struct zfcp_send_ct *ct, mempool_t *pool,
fsf_req->qtcb->header.port_handle = port->handle;
fsf_req->qtcb->bottom.support.service_class = adapter->fc_service_class;
fsf_req->qtcb->bottom.support.timeout = ct->timeout;
- fsf_req->data.send_ct = ct;
+ fsf_req->data = (unsigned long) ct;
+
+ zfcp_san_dbf_event_ct_request(fsf_req);
/* start QDIO request for this FSF request */
ret = zfcp_fsf_req_send(fsf_req, ct->timer);
@@ -1445,10 +1427,10 @@ zfcp_fsf_send_ct(struct zfcp_send_ct *ct, mempool_t *pool,
* zfcp_fsf_send_ct_handler - handler for Generic Service requests
* @fsf_req: pointer to struct zfcp_fsf_req
*
- * Data specific for the Generic Service request is passed by
- * fsf_req->data.send_ct
- * Usually a specific handler for the request is called via
- * fsf_req->data.send_ct->handler at end of this function.
+ * Data specific for the Generic Service request is passed using
+ * fsf_req->data. There we find the pointer to struct zfcp_send_ct.
+ * Usually a specific handler for the CT request is called which is
+ * found in this structure.
*/
static int
zfcp_fsf_send_ct_handler(struct zfcp_fsf_req *fsf_req)
@@ -1462,7 +1444,7 @@ zfcp_fsf_send_ct_handler(struct zfcp_fsf_req *fsf_req)
u16 subtable, rule, counter;
adapter = fsf_req->adapter;
- send_ct = fsf_req->data.send_ct;
+ send_ct = (struct zfcp_send_ct *) fsf_req->data;
port = send_ct->port;
header = &fsf_req->qtcb->header;
bottom = &fsf_req->qtcb->bottom.support;
@@ -1474,6 +1456,7 @@ zfcp_fsf_send_ct_handler(struct zfcp_fsf_req *fsf_req)
switch (header->fsf_status) {
case FSF_GOOD:
+ zfcp_san_dbf_event_ct_response(fsf_req);
retval = 0;
break;
@@ -1634,7 +1617,7 @@ zfcp_fsf_send_els(struct zfcp_send_els *els)
{
volatile struct qdio_buffer_element *sbale;
struct zfcp_fsf_req *fsf_req;
- fc_id_t d_id;
+ u32 d_id;
struct zfcp_adapter *adapter;
unsigned long lock_flags;
int bytes;
@@ -1664,7 +1647,7 @@ zfcp_fsf_send_els(struct zfcp_send_els *els)
sbale[3].addr = zfcp_sg_to_address(&els->resp[0]);
sbale[3].length = els->resp[0].length;
sbale[3].flags |= SBAL_FLAGS_LAST_ENTRY;
- } else if (adapter->supported_features &
+ } else if (adapter->adapter_features &
FSF_FEATURE_ELS_CT_CHAINED_SBALS) {
/* try to use chained SBALs */
bytes = zfcp_qdio_sbals_from_sg(fsf_req,
@@ -1714,10 +1697,12 @@ zfcp_fsf_send_els(struct zfcp_send_els *els)
fsf_req->qtcb->bottom.support.d_id = d_id;
fsf_req->qtcb->bottom.support.service_class = adapter->fc_service_class;
fsf_req->qtcb->bottom.support.timeout = ZFCP_ELS_TIMEOUT;
- fsf_req->data.send_els = els;
+ fsf_req->data = (unsigned long) els;
sbale = zfcp_qdio_sbale_req(fsf_req, fsf_req->sbal_curr, 0);
+ zfcp_san_dbf_event_els_request(fsf_req);
+
/* start QDIO request for this FSF request */
ret = zfcp_fsf_req_send(fsf_req, els->timer);
if (ret) {
@@ -1746,23 +1731,23 @@ zfcp_fsf_send_els(struct zfcp_send_els *els)
* zfcp_fsf_send_els_handler - handler for ELS commands
* @fsf_req: pointer to struct zfcp_fsf_req
*
- * Data specific for the ELS command is passed by
- * fsf_req->data.send_els
- * Usually a specific handler for the command is called via
- * fsf_req->data.send_els->handler at end of this function.
+ * Data specific for the ELS command is passed using
+ * fsf_req->data. There we find the pointer to struct zfcp_send_els.
+ * Usually a specific handler for the ELS command is called which is
+ * found in this structure.
*/
static int zfcp_fsf_send_els_handler(struct zfcp_fsf_req *fsf_req)
{
struct zfcp_adapter *adapter;
struct zfcp_port *port;
- fc_id_t d_id;
+ u32 d_id;
struct fsf_qtcb_header *header;
struct fsf_qtcb_bottom_support *bottom;
struct zfcp_send_els *send_els;
int retval = -EINVAL;
u16 subtable, rule, counter;
- send_els = fsf_req->data.send_els;
+ send_els = (struct zfcp_send_els *) fsf_req->data;
adapter = send_els->adapter;
port = send_els->port;
d_id = send_els->d_id;
@@ -1775,6 +1760,7 @@ static int zfcp_fsf_send_els_handler(struct zfcp_fsf_req *fsf_req)
switch (header->fsf_status) {
case FSF_GOOD:
+ zfcp_san_dbf_event_els_response(fsf_req);
retval = 0;
break;
@@ -1954,7 +1940,9 @@ zfcp_fsf_exchange_config_data(struct zfcp_erp_action *erp_action)
erp_action->fsf_req->erp_action = erp_action;
erp_action->fsf_req->qtcb->bottom.config.feature_selection =
- (FSF_FEATURE_CFDC | FSF_FEATURE_LUN_SHARING);
+ FSF_FEATURE_CFDC |
+ FSF_FEATURE_LUN_SHARING |
+ FSF_FEATURE_UPDATE_ALERT;
/* start QDIO request for this FSF request */
retval = zfcp_fsf_req_send(erp_action->fsf_req, &erp_action->timer);
@@ -1990,29 +1978,36 @@ zfcp_fsf_exchange_config_evaluate(struct zfcp_fsf_req *fsf_req, int xchg_ok)
{
struct fsf_qtcb_bottom_config *bottom;
struct zfcp_adapter *adapter = fsf_req->adapter;
+ struct Scsi_Host *shost = adapter->scsi_host;
bottom = &fsf_req->qtcb->bottom.config;
ZFCP_LOG_DEBUG("low/high QTCB version 0x%x/0x%x of FSF\n",
bottom->low_qtcb_version, bottom->high_qtcb_version);
adapter->fsf_lic_version = bottom->lic_version;
- adapter->supported_features = bottom->supported_features;
+ adapter->adapter_features = bottom->adapter_features;
+ adapter->connection_features = bottom->connection_features;
adapter->peer_wwpn = 0;
adapter->peer_wwnn = 0;
adapter->peer_d_id = 0;
if (xchg_ok) {
- adapter->wwnn = bottom->nport_serv_param.wwnn;
- adapter->wwpn = bottom->nport_serv_param.wwpn;
- adapter->s_id = bottom->s_id & ZFCP_DID_MASK;
+ fc_host_node_name(shost) = bottom->nport_serv_param.wwnn;
+ fc_host_port_name(shost) = bottom->nport_serv_param.wwpn;
+ fc_host_port_id(shost) = bottom->s_id & ZFCP_DID_MASK;
+ fc_host_speed(shost) = bottom->fc_link_speed;
+ fc_host_supported_classes(shost) = FC_COS_CLASS2 | FC_COS_CLASS3;
adapter->fc_topology = bottom->fc_topology;
- adapter->fc_link_speed = bottom->fc_link_speed;
adapter->hydra_version = bottom->adapter_type;
+ if (adapter->physical_wwpn == 0)
+ adapter->physical_wwpn = fc_host_port_name(shost);
+ if (adapter->physical_s_id == 0)
+ adapter->physical_s_id = fc_host_port_id(shost);
} else {
- adapter->wwnn = 0;
- adapter->wwpn = 0;
- adapter->s_id = 0;
+ fc_host_node_name(shost) = 0;
+ fc_host_port_name(shost) = 0;
+ fc_host_port_id(shost) = 0;
+ fc_host_speed(shost) = FC_PORTSPEED_UNKNOWN;
adapter->fc_topology = 0;
- adapter->fc_link_speed = 0;
adapter->hydra_version = 0;
}
@@ -2022,26 +2017,28 @@ zfcp_fsf_exchange_config_evaluate(struct zfcp_fsf_req *fsf_req, int xchg_ok)
adapter->peer_wwnn = bottom->plogi_payload.wwnn;
}
- if(adapter->supported_features & FSF_FEATURE_HBAAPI_MANAGEMENT){
+ if (adapter->adapter_features & FSF_FEATURE_HBAAPI_MANAGEMENT) {
adapter->hardware_version = bottom->hardware_version;
- memcpy(adapter->serial_number, bottom->serial_number, 17);
- EBCASC(adapter->serial_number, sizeof(adapter->serial_number));
+ memcpy(fc_host_serial_number(shost), bottom->serial_number,
+ min(FC_SERIAL_NUMBER_SIZE, 17));
+ EBCASC(fc_host_serial_number(shost),
+ min(FC_SERIAL_NUMBER_SIZE, 17));
}
ZFCP_LOG_NORMAL("The adapter %s reported the following characteristics:\n"
- "WWNN 0x%016Lx, "
- "WWPN 0x%016Lx, "
- "S_ID 0x%08x,\n"
- "adapter version 0x%x, "
- "LIC version 0x%x, "
- "FC link speed %d Gb/s\n",
- zfcp_get_busid_by_adapter(adapter),
- adapter->wwnn,
- adapter->wwpn,
- (unsigned int) adapter->s_id,
- adapter->hydra_version,
- adapter->fsf_lic_version,
- adapter->fc_link_speed);
+ "WWNN 0x%016Lx, "
+ "WWPN 0x%016Lx, "
+ "S_ID 0x%08x,\n"
+ "adapter version 0x%x, "
+ "LIC version 0x%x, "
+ "FC link speed %d Gb/s\n",
+ zfcp_get_busid_by_adapter(adapter),
+ (wwn_t) fc_host_node_name(shost),
+ (wwn_t) fc_host_port_name(shost),
+ fc_host_port_id(shost),
+ adapter->hydra_version,
+ adapter->fsf_lic_version,
+ fc_host_speed(shost));
if (ZFCP_QTCB_VERSION < bottom->low_qtcb_version) {
ZFCP_LOG_NORMAL("error: the adapter %s "
"only supports newer control block "
@@ -2062,7 +2059,6 @@ zfcp_fsf_exchange_config_evaluate(struct zfcp_fsf_req *fsf_req, int xchg_ok)
zfcp_erp_adapter_shutdown(adapter, 0);
return -EIO;
}
- zfcp_set_fc_host_attrs(adapter);
return 0;
}
@@ -2078,11 +2074,12 @@ zfcp_fsf_exchange_config_data_handler(struct zfcp_fsf_req *fsf_req)
{
struct fsf_qtcb_bottom_config *bottom;
struct zfcp_adapter *adapter = fsf_req->adapter;
+ struct fsf_qtcb *qtcb = fsf_req->qtcb;
if (fsf_req->status & ZFCP_STATUS_FSFREQ_ERROR)
return -EIO;
- switch (fsf_req->qtcb->header.fsf_status) {
+ switch (qtcb->header.fsf_status) {
case FSF_GOOD:
if (zfcp_fsf_exchange_config_evaluate(fsf_req, 1))
@@ -2112,7 +2109,7 @@ zfcp_fsf_exchange_config_data_handler(struct zfcp_fsf_req *fsf_req)
zfcp_erp_adapter_shutdown(adapter, 0);
return -EIO;
case FSF_TOPO_FABRIC:
- ZFCP_LOG_INFO("Switched fabric fibrechannel "
+ ZFCP_LOG_NORMAL("Switched fabric fibrechannel "
"network detected at adapter %s.\n",
zfcp_get_busid_by_adapter(adapter));
break;
@@ -2130,7 +2127,7 @@ zfcp_fsf_exchange_config_data_handler(struct zfcp_fsf_req *fsf_req)
zfcp_erp_adapter_shutdown(adapter, 0);
return -EIO;
}
- bottom = &fsf_req->qtcb->bottom.config;
+ bottom = &qtcb->bottom.config;
if (bottom->max_qtcb_size < sizeof(struct fsf_qtcb)) {
ZFCP_LOG_NORMAL("bug: Maximum QTCB size (%d bytes) "
"allowed by the adapter %s "
@@ -2155,12 +2152,10 @@ zfcp_fsf_exchange_config_data_handler(struct zfcp_fsf_req *fsf_req)
if (zfcp_fsf_exchange_config_evaluate(fsf_req, 0))
return -EIO;
- ZFCP_LOG_INFO("Local link to adapter %s is down\n",
- zfcp_get_busid_by_adapter(adapter));
- atomic_set_mask(ZFCP_STATUS_ADAPTER_XCONFIG_OK |
- ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED,
- &adapter->status);
- zfcp_erp_adapter_failed(adapter);
+ atomic_set_mask(ZFCP_STATUS_ADAPTER_XCONFIG_OK, &adapter->status);
+
+ zfcp_fsf_link_down_info_eval(adapter,
+ &qtcb->header.fsf_status_qual.link_down_info);
break;
default:
debug_text_event(fsf_req->adapter->erp_dbf, 0, "fsf-stat-ng");
@@ -2174,11 +2169,13 @@ zfcp_fsf_exchange_config_data_handler(struct zfcp_fsf_req *fsf_req)
/**
* zfcp_fsf_exchange_port_data - request information about local port
+ * @erp_action: ERP action for the adapter for which port data is requested
* @adapter: for which port data is requested
* @data: response to exchange port data request
*/
int
-zfcp_fsf_exchange_port_data(struct zfcp_adapter *adapter,
+zfcp_fsf_exchange_port_data(struct zfcp_erp_action *erp_action,
+ struct zfcp_adapter *adapter,
struct fsf_qtcb_bottom_port *data)
{
volatile struct qdio_buffer_element *sbale;
@@ -2187,7 +2184,7 @@ zfcp_fsf_exchange_port_data(struct zfcp_adapter *adapter,
struct zfcp_fsf_req *fsf_req;
struct timer_list *timer;
- if(!(adapter->supported_features & FSF_FEATURE_HBAAPI_MANAGEMENT)){
+ if (!(adapter->adapter_features & FSF_FEATURE_HBAAPI_MANAGEMENT)) {
ZFCP_LOG_INFO("error: exchange port data "
"command not supported by adapter %s\n",
zfcp_get_busid_by_adapter(adapter));
@@ -2211,12 +2208,18 @@ zfcp_fsf_exchange_port_data(struct zfcp_adapter *adapter,
goto out;
}
+ if (erp_action) {
+ erp_action->fsf_req = fsf_req;
+ fsf_req->erp_action = erp_action;
+ }
+
+ if (data)
+ fsf_req->data = (unsigned long) data;
+
sbale = zfcp_qdio_sbale_req(fsf_req, fsf_req->sbal_curr, 0);
sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
- fsf_req->data.port_data = data;
-
init_timer(timer);
timer->function = zfcp_fsf_request_timeout_handler;
timer->data = (unsigned long) adapter;
@@ -2228,6 +2231,8 @@ zfcp_fsf_exchange_port_data(struct zfcp_adapter *adapter,
"command on the adapter %s\n",
zfcp_get_busid_by_adapter(adapter));
zfcp_fsf_req_free(fsf_req);
+ if (erp_action)
+ erp_action->fsf_req = NULL;
write_unlock_irqrestore(&adapter->request_queue.queue_lock,
lock_flags);
goto out;
@@ -2256,21 +2261,42 @@ zfcp_fsf_exchange_port_data(struct zfcp_adapter *adapter,
static void
zfcp_fsf_exchange_port_data_handler(struct zfcp_fsf_req *fsf_req)
{
- struct fsf_qtcb_bottom_port *bottom;
- struct fsf_qtcb_bottom_port *data = fsf_req->data.port_data;
+ struct zfcp_adapter *adapter = fsf_req->adapter;
+ struct Scsi_Host *shost = adapter->scsi_host;
+ struct fsf_qtcb *qtcb = fsf_req->qtcb;
+ struct fsf_qtcb_bottom_port *bottom, *data;
if (fsf_req->status & ZFCP_STATUS_FSFREQ_ERROR)
return;
- switch (fsf_req->qtcb->header.fsf_status) {
+ switch (qtcb->header.fsf_status) {
case FSF_GOOD:
- bottom = &fsf_req->qtcb->bottom.port;
- memcpy(data, bottom, sizeof(*data));
+ atomic_set_mask(ZFCP_STATUS_ADAPTER_XPORT_OK, &adapter->status);
+
+ bottom = &qtcb->bottom.port;
+ data = (struct fsf_qtcb_bottom_port*) fsf_req->data;
+ if (data)
+ memcpy(data, bottom, sizeof(struct fsf_qtcb_bottom_port));
+ if (adapter->connection_features & FSF_FEATURE_NPIV_MODE) {
+ adapter->physical_wwpn = bottom->wwpn;
+ adapter->physical_s_id = bottom->fc_port_id;
+ } else {
+ adapter->physical_wwpn = fc_host_port_name(shost);
+ adapter->physical_s_id = fc_host_port_id(shost);
+ }
+ fc_host_maxframe_size(shost) = bottom->maximum_frame_size;
+ break;
+
+ case FSF_EXCHANGE_CONFIG_DATA_INCOMPLETE:
+ atomic_set_mask(ZFCP_STATUS_ADAPTER_XPORT_OK, &adapter->status);
+
+ zfcp_fsf_link_down_info_eval(adapter,
+ &qtcb->header.fsf_status_qual.link_down_info);
break;
default:
- debug_text_event(fsf_req->adapter->erp_dbf, 0, "xchg-port-ng");
- debug_event(fsf_req->adapter->erp_dbf, 0,
+ debug_text_event(adapter->erp_dbf, 0, "xchg-port-ng");
+ debug_event(adapter->erp_dbf, 0,
&fsf_req->qtcb->header.fsf_status, sizeof(u32));
}
}
@@ -2312,7 +2338,7 @@ zfcp_fsf_open_port(struct zfcp_erp_action *erp_action)
erp_action->fsf_req->qtcb->bottom.support.d_id = erp_action->port->d_id;
atomic_set_mask(ZFCP_STATUS_COMMON_OPENING, &erp_action->port->status);
- erp_action->fsf_req->data.open_port.port = erp_action->port;
+ erp_action->fsf_req->data = (unsigned long) erp_action->port;
erp_action->fsf_req->erp_action = erp_action;
/* start QDIO request for this FSF request */
@@ -2353,7 +2379,7 @@ zfcp_fsf_open_port_handler(struct zfcp_fsf_req *fsf_req)
struct fsf_qtcb_header *header;
u16 subtable, rule, counter;
- port = fsf_req->data.open_port.port;
+ port = (struct zfcp_port *) fsf_req->data;
header = &fsf_req->qtcb->header;
if (fsf_req->status & ZFCP_STATUS_FSFREQ_ERROR) {
@@ -2566,7 +2592,7 @@ zfcp_fsf_close_port(struct zfcp_erp_action *erp_action)
sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
atomic_set_mask(ZFCP_STATUS_COMMON_CLOSING, &erp_action->port->status);
- erp_action->fsf_req->data.close_port.port = erp_action->port;
+ erp_action->fsf_req->data = (unsigned long) erp_action->port;
erp_action->fsf_req->erp_action = erp_action;
erp_action->fsf_req->qtcb->header.port_handle =
erp_action->port->handle;
@@ -2606,7 +2632,7 @@ zfcp_fsf_close_port_handler(struct zfcp_fsf_req *fsf_req)
int retval = -EINVAL;
struct zfcp_port *port;
- port = fsf_req->data.close_port.port;
+ port = (struct zfcp_port *) fsf_req->data;
if (fsf_req->status & ZFCP_STATUS_FSFREQ_ERROR) {
/* don't change port status in our bookkeeping */
@@ -2703,8 +2729,8 @@ zfcp_fsf_close_physical_port(struct zfcp_erp_action *erp_action)
atomic_set_mask(ZFCP_STATUS_PORT_PHYS_CLOSING,
&erp_action->port->status);
/* save a pointer to this port */
- erp_action->fsf_req->data.close_physical_port.port = erp_action->port;
- /* port to be closeed */
+ erp_action->fsf_req->data = (unsigned long) erp_action->port;
+ /* port to be closed */
erp_action->fsf_req->qtcb->header.port_handle =
erp_action->port->handle;
erp_action->fsf_req->erp_action = erp_action;
@@ -2747,7 +2773,7 @@ zfcp_fsf_close_physical_port_handler(struct zfcp_fsf_req *fsf_req)
struct fsf_qtcb_header *header;
u16 subtable, rule, counter;
- port = fsf_req->data.close_physical_port.port;
+ port = (struct zfcp_port *) fsf_req->data;
header = &fsf_req->qtcb->header;
if (fsf_req->status & ZFCP_STATUS_FSFREQ_ERROR) {
@@ -2908,10 +2934,11 @@ zfcp_fsf_open_unit(struct zfcp_erp_action *erp_action)
erp_action->port->handle;
erp_action->fsf_req->qtcb->bottom.support.fcp_lun =
erp_action->unit->fcp_lun;
+ if (!(erp_action->adapter->connection_features & FSF_FEATURE_NPIV_MODE))
erp_action->fsf_req->qtcb->bottom.support.option =
FSF_OPEN_LUN_SUPPRESS_BOXING;
atomic_set_mask(ZFCP_STATUS_COMMON_OPENING, &erp_action->unit->status);
- erp_action->fsf_req->data.open_unit.unit = erp_action->unit;
+ erp_action->fsf_req->data = (unsigned long) erp_action->unit;
erp_action->fsf_req->erp_action = erp_action;
/* start QDIO request for this FSF request */
@@ -2955,9 +2982,9 @@ zfcp_fsf_open_unit_handler(struct zfcp_fsf_req *fsf_req)
struct fsf_qtcb_bottom_support *bottom;
struct fsf_queue_designator *queue_designator;
u16 subtable, rule, counter;
- u32 allowed, exclusive, readwrite;
+ int exclusive, readwrite;
- unit = fsf_req->data.open_unit.unit;
+ unit = (struct zfcp_unit *) fsf_req->data;
if (fsf_req->status & ZFCP_STATUS_FSFREQ_ERROR) {
/* don't change unit status in our bookkeeping */
@@ -2969,10 +2996,6 @@ zfcp_fsf_open_unit_handler(struct zfcp_fsf_req *fsf_req)
bottom = &fsf_req->qtcb->bottom.support;
queue_designator = &header->fsf_status_qual.fsf_queue_designator;
- allowed = bottom->lun_access_info & FSF_UNIT_ACCESS_OPEN_LUN_ALLOWED;
- exclusive = bottom->lun_access_info & FSF_UNIT_ACCESS_EXCLUSIVE;
- readwrite = bottom->lun_access_info & FSF_UNIT_ACCESS_OUTBOUND_TRANSFER;
-
atomic_clear_mask(ZFCP_STATUS_COMMON_ACCESS_DENIED |
ZFCP_STATUS_UNIT_SHARED |
ZFCP_STATUS_UNIT_READONLY,
@@ -3146,10 +3169,15 @@ zfcp_fsf_open_unit_handler(struct zfcp_fsf_req *fsf_req)
unit->handle);
/* mark unit as open */
atomic_set_mask(ZFCP_STATUS_COMMON_OPEN, &unit->status);
- atomic_clear_mask(ZFCP_STATUS_COMMON_ACCESS_DENIED |
- ZFCP_STATUS_COMMON_ACCESS_BOXED,
- &unit->status);
- if (adapter->supported_features & FSF_FEATURE_LUN_SHARING){
+
+ if (!(adapter->connection_features & FSF_FEATURE_NPIV_MODE) &&
+ (adapter->adapter_features & FSF_FEATURE_LUN_SHARING) &&
+ (adapter->ccw_device->id.dev_model != ZFCP_DEVICE_MODEL_PRIV)) {
+ exclusive = (bottom->lun_access_info &
+ FSF_UNIT_ACCESS_EXCLUSIVE);
+ readwrite = (bottom->lun_access_info &
+ FSF_UNIT_ACCESS_OUTBOUND_TRANSFER);
+
if (!exclusive)
atomic_set_mask(ZFCP_STATUS_UNIT_SHARED,
&unit->status);
@@ -3242,7 +3270,7 @@ zfcp_fsf_close_unit(struct zfcp_erp_action *erp_action)
erp_action->port->handle;
erp_action->fsf_req->qtcb->header.lun_handle = erp_action->unit->handle;
atomic_set_mask(ZFCP_STATUS_COMMON_CLOSING, &erp_action->unit->status);
- erp_action->fsf_req->data.close_unit.unit = erp_action->unit;
+ erp_action->fsf_req->data = (unsigned long) erp_action->unit;
erp_action->fsf_req->erp_action = erp_action;
/* start QDIO request for this FSF request */
@@ -3281,7 +3309,7 @@ zfcp_fsf_close_unit_handler(struct zfcp_fsf_req *fsf_req)
int retval = -EINVAL;
struct zfcp_unit *unit;
- unit = fsf_req->data.close_unit.unit; /* restore unit */
+ unit = (struct zfcp_unit *) fsf_req->data;
if (fsf_req->status & ZFCP_STATUS_FSFREQ_ERROR) {
/* don't change unit status in our bookkeeping */
@@ -3305,9 +3333,6 @@ zfcp_fsf_close_unit_handler(struct zfcp_fsf_req *fsf_req)
debug_text_event(fsf_req->adapter->erp_dbf, 1,
"fsf_s_phand_nv");
zfcp_erp_adapter_reopen(unit->port->adapter, 0);
- zfcp_cmd_dbf_event_fsf("porthinv", fsf_req,
- &fsf_req->qtcb->header.fsf_status_qual,
- sizeof (union fsf_status_qual));
fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
break;
@@ -3326,9 +3351,6 @@ zfcp_fsf_close_unit_handler(struct zfcp_fsf_req *fsf_req)
debug_text_event(fsf_req->adapter->erp_dbf, 1,
"fsf_s_lhand_nv");
zfcp_erp_port_reopen(unit->port, 0);
- zfcp_cmd_dbf_event_fsf("lunhinv", fsf_req,
- &fsf_req->qtcb->header.fsf_status_qual,
- sizeof (union fsf_status_qual));
fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
break;
@@ -3436,21 +3458,14 @@ zfcp_fsf_send_fcp_command_task(struct zfcp_adapter *adapter,
goto failed_req_create;
}
- /*
- * associate FSF request with SCSI request
- * (need this for look up on abort)
- */
- fsf_req->data.send_fcp_command_task.fsf_req = fsf_req;
- scsi_cmnd->host_scribble = (char *) &(fsf_req->data);
+ zfcp_unit_get(unit);
+ fsf_req->unit = unit;
- /*
- * associate SCSI command with FSF request
- * (need this for look up on normal command completion)
- */
- fsf_req->data.send_fcp_command_task.scsi_cmnd = scsi_cmnd;
- fsf_req->data.send_fcp_command_task.start_jiffies = jiffies;
- fsf_req->data.send_fcp_command_task.unit = unit;
- ZFCP_LOG_DEBUG("unit=%p, fcp_lun=0x%016Lx\n", unit, unit->fcp_lun);
+ /* associate FSF request with SCSI request (for look up on abort) */
+ scsi_cmnd->host_scribble = (char *) fsf_req;
+
+ /* associate SCSI command with FSF request */
+ fsf_req->data = (unsigned long) scsi_cmnd;
/* set handles of unit and its parent port in QTCB */
fsf_req->qtcb->header.lun_handle = unit->handle;
@@ -3584,6 +3599,7 @@ zfcp_fsf_send_fcp_command_task(struct zfcp_adapter *adapter,
send_failed:
no_fit:
failed_scsi_cmnd:
+ zfcp_unit_put(unit);
zfcp_fsf_req_free(fsf_req);
fsf_req = NULL;
scsi_cmnd->host_scribble = NULL;
@@ -3640,7 +3656,7 @@ zfcp_fsf_send_fcp_command_task_management(struct zfcp_adapter *adapter,
* hold a pointer to the unit being target of this
* task management request
*/
- fsf_req->data.send_fcp_command_task_management.unit = unit;
+ fsf_req->data = (unsigned long) unit;
/* set FSF related fields in QTCB */
fsf_req->qtcb->header.lun_handle = unit->handle;
@@ -3706,9 +3722,9 @@ zfcp_fsf_send_fcp_command_handler(struct zfcp_fsf_req *fsf_req)
header = &fsf_req->qtcb->header;
if (unlikely(fsf_req->status & ZFCP_STATUS_FSFREQ_TASK_MANAGEMENT))
- unit = fsf_req->data.send_fcp_command_task_management.unit;
+ unit = (struct zfcp_unit *) fsf_req->data;
else
- unit = fsf_req->data.send_fcp_command_task.unit;
+ unit = fsf_req->unit;
if (unlikely(fsf_req->status & ZFCP_STATUS_FSFREQ_ERROR)) {
/* go directly to calls of special handlers */
@@ -3765,10 +3781,6 @@ zfcp_fsf_send_fcp_command_handler(struct zfcp_fsf_req *fsf_req)
debug_text_event(fsf_req->adapter->erp_dbf, 1,
"fsf_s_hand_mis");
zfcp_erp_adapter_reopen(unit->port->adapter, 0);
- zfcp_cmd_dbf_event_fsf("handmism",
- fsf_req,
- &header->fsf_status_qual,
- sizeof (union fsf_status_qual));
fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
break;
@@ -3789,10 +3801,6 @@ zfcp_fsf_send_fcp_command_handler(struct zfcp_fsf_req *fsf_req)
debug_text_exception(fsf_req->adapter->erp_dbf, 0,
"fsf_s_class_nsup");
zfcp_erp_adapter_shutdown(unit->port->adapter, 0);
- zfcp_cmd_dbf_event_fsf("unsclass",
- fsf_req,
- &header->fsf_status_qual,
- sizeof (union fsf_status_qual));
fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
break;
@@ -3811,10 +3819,6 @@ zfcp_fsf_send_fcp_command_handler(struct zfcp_fsf_req *fsf_req)
debug_text_event(fsf_req->adapter->erp_dbf, 1,
"fsf_s_fcp_lun_nv");
zfcp_erp_port_reopen(unit->port, 0);
- zfcp_cmd_dbf_event_fsf("fluninv",
- fsf_req,
- &header->fsf_status_qual,
- sizeof (union fsf_status_qual));
fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
break;
@@ -3853,10 +3857,6 @@ zfcp_fsf_send_fcp_command_handler(struct zfcp_fsf_req *fsf_req)
debug_text_event(fsf_req->adapter->erp_dbf, 0,
"fsf_s_dir_ind_nv");
zfcp_erp_adapter_shutdown(unit->port->adapter, 0);
- zfcp_cmd_dbf_event_fsf("dirinv",
- fsf_req,
- &header->fsf_status_qual,
- sizeof (union fsf_status_qual));
fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
break;
@@ -3872,10 +3872,6 @@ zfcp_fsf_send_fcp_command_handler(struct zfcp_fsf_req *fsf_req)
debug_text_event(fsf_req->adapter->erp_dbf, 0,
"fsf_s_cmd_len_nv");
zfcp_erp_adapter_shutdown(unit->port->adapter, 0);
- zfcp_cmd_dbf_event_fsf("cleninv",
- fsf_req,
- &header->fsf_status_qual,
- sizeof (union fsf_status_qual));
fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
break;
@@ -3947,6 +3943,8 @@ zfcp_fsf_send_fcp_command_handler(struct zfcp_fsf_req *fsf_req)
zfcp_fsf_send_fcp_command_task_management_handler(fsf_req);
} else {
retval = zfcp_fsf_send_fcp_command_task_handler(fsf_req);
+ fsf_req->unit = NULL;
+ zfcp_unit_put(unit);
}
return retval;
}
@@ -3970,10 +3968,10 @@ zfcp_fsf_send_fcp_command_task_handler(struct zfcp_fsf_req *fsf_req)
u32 sns_len;
char *fcp_rsp_info = zfcp_get_fcp_rsp_info_ptr(fcp_rsp_iu);
unsigned long flags;
- struct zfcp_unit *unit = fsf_req->data.send_fcp_command_task.unit;
+ struct zfcp_unit *unit = fsf_req->unit;
read_lock_irqsave(&fsf_req->adapter->abort_lock, flags);
- scpnt = fsf_req->data.send_fcp_command_task.scsi_cmnd;
+ scpnt = (struct scsi_cmnd *) fsf_req->data;
if (unlikely(!scpnt)) {
ZFCP_LOG_DEBUG
("Command with fsf_req %p is not associated to "
@@ -4043,7 +4041,6 @@ zfcp_fsf_send_fcp_command_task_handler(struct zfcp_fsf_req *fsf_req)
ZFCP_HEX_DUMP(ZFCP_LOG_LEVEL_DEBUG,
(char *) &fsf_req->qtcb->
bottom.io.fcp_cmnd, FSF_FCP_CMND_SIZE);
- zfcp_cmd_dbf_event_fsf("clenmis", fsf_req, NULL, 0);
set_host_byte(&scpnt->result, DID_ERROR);
goto skip_fsfstatus;
case RSP_CODE_FIELD_INVALID:
@@ -4062,7 +4059,6 @@ zfcp_fsf_send_fcp_command_task_handler(struct zfcp_fsf_req *fsf_req)
(char *) &fsf_req->qtcb->
bottom.io.fcp_cmnd, FSF_FCP_CMND_SIZE);
set_host_byte(&scpnt->result, DID_ERROR);
- zfcp_cmd_dbf_event_fsf("codeinv", fsf_req, NULL, 0);
goto skip_fsfstatus;
case RSP_CODE_RO_MISMATCH:
/* hardware bug */
@@ -4079,7 +4075,6 @@ zfcp_fsf_send_fcp_command_task_handler(struct zfcp_fsf_req *fsf_req)
ZFCP_HEX_DUMP(ZFCP_LOG_LEVEL_DEBUG,
(char *) &fsf_req->qtcb->
bottom.io.fcp_cmnd, FSF_FCP_CMND_SIZE);
- zfcp_cmd_dbf_event_fsf("codemism", fsf_req, NULL, 0);
set_host_byte(&scpnt->result, DID_ERROR);
goto skip_fsfstatus;
default:
@@ -4096,7 +4091,6 @@ zfcp_fsf_send_fcp_command_task_handler(struct zfcp_fsf_req *fsf_req)
ZFCP_HEX_DUMP(ZFCP_LOG_LEVEL_DEBUG,
(char *) &fsf_req->qtcb->
bottom.io.fcp_cmnd, FSF_FCP_CMND_SIZE);
- zfcp_cmd_dbf_event_fsf("undeffcp", fsf_req, NULL, 0);
set_host_byte(&scpnt->result, DID_ERROR);
goto skip_fsfstatus;
}
@@ -4158,19 +4152,17 @@ zfcp_fsf_send_fcp_command_task_handler(struct zfcp_fsf_req *fsf_req)
skip_fsfstatus:
ZFCP_LOG_DEBUG("scpnt->result =0x%x\n", scpnt->result);
- zfcp_cmd_dbf_event_scsi("response", scpnt);
+ if (scpnt->result != 0)
+ zfcp_scsi_dbf_event_result("erro", 3, fsf_req->adapter, scpnt);
+ else if (scpnt->retries > 0)
+ zfcp_scsi_dbf_event_result("retr", 4, fsf_req->adapter, scpnt);
+ else
+ zfcp_scsi_dbf_event_result("norm", 6, fsf_req->adapter, scpnt);
/* cleanup pointer (need this especially for abort) */
scpnt->host_scribble = NULL;
- /*
- * NOTE:
- * according to the outcome of a discussion on linux-scsi we
- * don't need to grab the io_request_lock here since we use
- * the new eh
- */
/* always call back */
-
(scpnt->scsi_done) (scpnt);
/*
@@ -4198,8 +4190,7 @@ zfcp_fsf_send_fcp_command_task_management_handler(struct zfcp_fsf_req *fsf_req)
struct fcp_rsp_iu *fcp_rsp_iu = (struct fcp_rsp_iu *)
&(fsf_req->qtcb->bottom.io.fcp_rsp);
char *fcp_rsp_info = zfcp_get_fcp_rsp_info_ptr(fcp_rsp_iu);
- struct zfcp_unit *unit =
- fsf_req->data.send_fcp_command_task_management.unit;
+ struct zfcp_unit *unit = (struct zfcp_unit *) fsf_req->data;
del_timer(&fsf_req->adapter->scsi_er_timer);
if (fsf_req->status & ZFCP_STATUS_FSFREQ_ERROR) {
@@ -4276,7 +4267,7 @@ zfcp_fsf_control_file(struct zfcp_adapter *adapter,
int direction;
int retval = 0;
- if (!(adapter->supported_features & FSF_FEATURE_CFDC)) {
+ if (!(adapter->adapter_features & FSF_FEATURE_CFDC)) {
ZFCP_LOG_INFO("cfdc not supported (adapter %s)\n",
zfcp_get_busid_by_adapter(adapter));
retval = -EOPNOTSUPP;
@@ -4549,52 +4540,6 @@ skip_fsfstatus:
return retval;
}
-
-/*
- * function: zfcp_fsf_req_wait_and_cleanup
- *
- * purpose:
- *
- * FIXME(design): signal seems to be <0 !!!
- * returns: 0 - request completed (*status is valid), cleanup succ.
- * <0 - request completed (*status is valid), cleanup failed
- * >0 - signal which interrupted waiting (*status invalid),
- * request not completed, no cleanup
- *
- * *status is a copy of status of completed fsf_req
- */
-int
-zfcp_fsf_req_wait_and_cleanup(struct zfcp_fsf_req *fsf_req,
- int interruptible, u32 * status)
-{
- int retval = 0;
- int signal = 0;
-
- if (interruptible) {
- __wait_event_interruptible(fsf_req->completion_wq,
- fsf_req->status &
- ZFCP_STATUS_FSFREQ_COMPLETED,
- signal);
- if (signal) {
- ZFCP_LOG_DEBUG("Caught signal %i while waiting for the "
- "completion of the request at %p\n",
- signal, fsf_req);
- retval = signal;
- goto out;
- }
- } else {
- __wait_event(fsf_req->completion_wq,
- fsf_req->status & ZFCP_STATUS_FSFREQ_COMPLETED);
- }
-
- *status = fsf_req->status;
-
- /* cleanup request */
- zfcp_fsf_req_free(fsf_req);
- out:
- return retval;
-}
-
static inline int
zfcp_fsf_req_sbal_check(unsigned long *flags,
struct zfcp_qdio_queue *queue, int needed)
@@ -4610,15 +4555,16 @@ zfcp_fsf_req_sbal_check(unsigned long *flags,
* set qtcb pointer in fsf_req and initialize QTCB
*/
static inline void
-zfcp_fsf_req_qtcb_init(struct zfcp_fsf_req *fsf_req, u32 fsf_cmd)
+zfcp_fsf_req_qtcb_init(struct zfcp_fsf_req *fsf_req)
{
if (likely(fsf_req->qtcb != NULL)) {
+ fsf_req->qtcb->prefix.req_seq_no = fsf_req->adapter->fsf_req_seq_no;
fsf_req->qtcb->prefix.req_id = (unsigned long)fsf_req;
fsf_req->qtcb->prefix.ulp_info = ZFCP_ULP_INFO_VERSION;
- fsf_req->qtcb->prefix.qtcb_type = fsf_qtcb_type[fsf_cmd];
+ fsf_req->qtcb->prefix.qtcb_type = fsf_qtcb_type[fsf_req->fsf_command];
fsf_req->qtcb->prefix.qtcb_version = ZFCP_QTCB_VERSION;
fsf_req->qtcb->header.req_handle = (unsigned long)fsf_req;
- fsf_req->qtcb->header.fsf_command = fsf_cmd;
+ fsf_req->qtcb->header.fsf_command = fsf_req->fsf_command;
}
}
@@ -4686,7 +4632,10 @@ zfcp_fsf_req_create(struct zfcp_adapter *adapter, u32 fsf_cmd, int req_flags,
goto failed_fsf_req;
}
- zfcp_fsf_req_qtcb_init(fsf_req, fsf_cmd);
+ fsf_req->adapter = adapter;
+ fsf_req->fsf_command = fsf_cmd;
+
+ zfcp_fsf_req_qtcb_init(fsf_req);
/* initialize waitqueue which may be used to wait on
this request completion */
@@ -4708,8 +4657,10 @@ zfcp_fsf_req_create(struct zfcp_adapter *adapter, u32 fsf_cmd, int req_flags,
goto failed_sbals;
}
- fsf_req->adapter = adapter; /* pointer to "parent" adapter */
- fsf_req->fsf_command = fsf_cmd;
+ if (fsf_req->qtcb) {
+ fsf_req->seq_no = adapter->fsf_req_seq_no;
+ fsf_req->qtcb->prefix.req_seq_no = adapter->fsf_req_seq_no;
+ }
fsf_req->sbal_number = 1;
fsf_req->sbal_first = req_queue->free_index;
fsf_req->sbal_curr = req_queue->free_index;
@@ -4760,9 +4711,9 @@ zfcp_fsf_req_send(struct zfcp_fsf_req *fsf_req, struct timer_list *timer)
struct zfcp_adapter *adapter;
struct zfcp_qdio_queue *req_queue;
volatile struct qdio_buffer_element *sbale;
+ int inc_seq_no;
int new_distance_from_int;
unsigned long flags;
- int inc_seq_no = 1;
int retval = 0;
adapter = fsf_req->adapter;
@@ -4776,23 +4727,13 @@ zfcp_fsf_req_send(struct zfcp_fsf_req *fsf_req, struct timer_list *timer)
ZFCP_HEX_DUMP(ZFCP_LOG_LEVEL_TRACE, (char *) sbale[1].addr,
sbale[1].length);
- /* set sequence counter in QTCB */
- if (likely(fsf_req->qtcb)) {
- fsf_req->qtcb->prefix.req_seq_no = adapter->fsf_req_seq_no;
- fsf_req->seq_no = adapter->fsf_req_seq_no;
- ZFCP_LOG_TRACE("FSF request %p of adapter %s gets "
- "FSF sequence counter value of %i\n",
- fsf_req,
- zfcp_get_busid_by_adapter(adapter),
- fsf_req->qtcb->prefix.req_seq_no);
- } else
- inc_seq_no = 0;
-
/* put allocated FSF request at list tail */
spin_lock_irqsave(&adapter->fsf_req_list_lock, flags);
list_add_tail(&fsf_req->list, &adapter->fsf_req_list_head);
spin_unlock_irqrestore(&adapter->fsf_req_list_lock, flags);
+ inc_seq_no = (fsf_req->qtcb != NULL);
+
/* figure out expiration time of timeout and start timeout */
if (unlikely(timer)) {
timer->expires += jiffies;
@@ -4822,6 +4763,8 @@ zfcp_fsf_req_send(struct zfcp_fsf_req *fsf_req, struct timer_list *timer)
req_queue->free_index %= QDIO_MAX_BUFFERS_PER_Q; /* wrap if needed */
new_distance_from_int = zfcp_qdio_determine_pci(req_queue, fsf_req);
+ fsf_req->issued = get_clock();
+
retval = do_QDIO(adapter->ccw_device,
QDIO_FLAG_SYNC_OUTPUT,
0, fsf_req->sbal_first, fsf_req->sbal_number, NULL);
@@ -4860,15 +4803,11 @@ zfcp_fsf_req_send(struct zfcp_fsf_req *fsf_req, struct timer_list *timer)
* routines resulting in missing sequence counter values
* otherwise,
*/
+
/* Don't increase for unsolicited status */
- if (likely(inc_seq_no)) {
+ if (inc_seq_no)
adapter->fsf_req_seq_no++;
- ZFCP_LOG_TRACE
- ("FSF sequence counter value of adapter %s "
- "increased to %i\n",
- zfcp_get_busid_by_adapter(adapter),
- adapter->fsf_req_seq_no);
- }
+
/* count FSF requests pending */
atomic_inc(&adapter->fsf_reqs_active);
}
diff --git a/drivers/s390/scsi/zfcp_fsf.h b/drivers/s390/scsi/zfcp_fsf.h
index 07140dfda2a..48719f05595 100644
--- a/drivers/s390/scsi/zfcp_fsf.h
+++ b/drivers/s390/scsi/zfcp_fsf.h
@@ -116,6 +116,7 @@
#define FSF_INVALID_COMMAND_OPTION 0x000000E5
/* #define FSF_ERROR 0x000000FF */
+#define FSF_PROT_STATUS_QUAL_SIZE 16
#define FSF_STATUS_QUALIFIER_SIZE 16
/* FSF status qualifier, recommendations */
@@ -139,9 +140,18 @@
#define FSF_SQ_CFDC_SUBTABLE_LUN 0x0004
/* FSF status qualifier (most significant 4 bytes), local link down */
-#define FSF_PSQ_LINK_NOLIGHT 0x00000004
-#define FSF_PSQ_LINK_WRAPPLUG 0x00000008
-#define FSF_PSQ_LINK_NOFCP 0x00000010
+#define FSF_PSQ_LINK_NO_LIGHT 0x00000004
+#define FSF_PSQ_LINK_WRAP_PLUG 0x00000008
+#define FSF_PSQ_LINK_NO_FCP 0x00000010
+#define FSF_PSQ_LINK_FIRMWARE_UPDATE 0x00000020
+#define FSF_PSQ_LINK_INVALID_WWPN 0x00000100
+#define FSF_PSQ_LINK_NO_NPIV_SUPPORT 0x00000200
+#define FSF_PSQ_LINK_NO_FCP_RESOURCES 0x00000400
+#define FSF_PSQ_LINK_NO_FABRIC_RESOURCES 0x00000800
+#define FSF_PSQ_LINK_FABRIC_LOGIN_UNABLE 0x00001000
+#define FSF_PSQ_LINK_WWPN_ASSIGNMENT_CORRUPTED 0x00002000
+#define FSF_PSQ_LINK_MODE_TABLE_CURRUPTED 0x00004000
+#define FSF_PSQ_LINK_NO_WWPN_ASSIGNMENT 0x00008000
/* payload size in status read buffer */
#define FSF_STATUS_READ_PAYLOAD_SIZE 4032
@@ -154,15 +164,21 @@
#define FSF_STATUS_READ_INCOMING_ELS 0x00000002
#define FSF_STATUS_READ_SENSE_DATA_AVAIL 0x00000003
#define FSF_STATUS_READ_BIT_ERROR_THRESHOLD 0x00000004
-#define FSF_STATUS_READ_LINK_DOWN 0x00000005 /* FIXME: really? */
+#define FSF_STATUS_READ_LINK_DOWN 0x00000005
#define FSF_STATUS_READ_LINK_UP 0x00000006
#define FSF_STATUS_READ_CFDC_UPDATED 0x0000000A
#define FSF_STATUS_READ_CFDC_HARDENED 0x0000000B
+#define FSF_STATUS_READ_FEATURE_UPDATE_ALERT 0x0000000C
/* status subtypes in status read buffer */
#define FSF_STATUS_READ_SUB_CLOSE_PHYS_PORT 0x00000001
#define FSF_STATUS_READ_SUB_ERROR_PORT 0x00000002
+/* status subtypes for link down */
+#define FSF_STATUS_READ_SUB_NO_PHYSICAL_LINK 0x00000000
+#define FSF_STATUS_READ_SUB_FDISC_FAILED 0x00000001
+#define FSF_STATUS_READ_SUB_FIRMWARE_UPDATE 0x00000002
+
/* status subtypes for CFDC */
#define FSF_STATUS_READ_SUB_CFDC_HARDENED_ON_SE 0x00000002
#define FSF_STATUS_READ_SUB_CFDC_HARDENED_ON_SE2 0x0000000F
@@ -193,11 +209,15 @@
#define FSF_QTCB_LOG_SIZE 1024
/* channel features */
-#define FSF_FEATURE_QTCB_SUPPRESSION 0x00000001
#define FSF_FEATURE_CFDC 0x00000002
#define FSF_FEATURE_LUN_SHARING 0x00000004
#define FSF_FEATURE_HBAAPI_MANAGEMENT 0x00000010
#define FSF_FEATURE_ELS_CT_CHAINED_SBALS 0x00000020
+#define FSF_FEATURE_UPDATE_ALERT 0x00000100
+
+/* host connection features */
+#define FSF_FEATURE_NPIV_MODE 0x00000001
+#define FSF_FEATURE_VM_ASSIGNED_WWPN 0x00000002
/* option */
#define FSF_OPEN_LUN_SUPPRESS_BOXING 0x00000001
@@ -305,15 +325,23 @@ struct fsf_qual_sequence_error {
u32 res1[3];
} __attribute__ ((packed));
-struct fsf_qual_locallink_error {
- u32 code;
- u32 res1[3];
+struct fsf_link_down_info {
+ u32 error_code;
+ u32 res1;
+ u8 res2[2];
+ u8 primary_status;
+ u8 ioerr_code;
+ u8 action_code;
+ u8 reason_code;
+ u8 explanation_code;
+ u8 vendor_specific_code;
} __attribute__ ((packed));
union fsf_prot_status_qual {
+ u64 doubleword[FSF_PROT_STATUS_QUAL_SIZE / sizeof(u64)];
struct fsf_qual_version_error version_error;
struct fsf_qual_sequence_error sequence_error;
- struct fsf_qual_locallink_error locallink_error;
+ struct fsf_link_down_info link_down_info;
} __attribute__ ((packed));
struct fsf_qtcb_prefix {
@@ -331,7 +359,9 @@ union fsf_status_qual {
u8 byte[FSF_STATUS_QUALIFIER_SIZE];
u16 halfword[FSF_STATUS_QUALIFIER_SIZE / sizeof (u16)];
u32 word[FSF_STATUS_QUALIFIER_SIZE / sizeof (u32)];
+ u64 doubleword[FSF_STATUS_QUALIFIER_SIZE / sizeof(u64)];
struct fsf_queue_designator fsf_queue_designator;
+ struct fsf_link_down_info link_down_info;
} __attribute__ ((packed));
struct fsf_qtcb_header {
@@ -406,8 +436,8 @@ struct fsf_qtcb_bottom_config {
u32 low_qtcb_version;
u32 max_qtcb_size;
u32 max_data_transfer_size;
- u32 supported_features;
- u8 res1[4];
+ u32 adapter_features;
+ u32 connection_features;
u32 fc_topology;
u32 fc_link_speed;
u32 adapter_type;
@@ -425,7 +455,7 @@ struct fsf_qtcb_bottom_config {
} __attribute__ ((packed));
struct fsf_qtcb_bottom_port {
- u8 res1[8];
+ u64 wwpn;
u32 fc_port_id;
u32 port_type;
u32 port_state;
diff --git a/drivers/s390/scsi/zfcp_qdio.c b/drivers/s390/scsi/zfcp_qdio.c
index 24e16ec331d..d719f66a29a 100644
--- a/drivers/s390/scsi/zfcp_qdio.c
+++ b/drivers/s390/scsi/zfcp_qdio.c
@@ -54,8 +54,7 @@ static inline int zfcp_qdio_sbals_from_buffer
static qdio_handler_t zfcp_qdio_request_handler;
static qdio_handler_t zfcp_qdio_response_handler;
static int zfcp_qdio_handler_error_check(struct zfcp_adapter *,
- unsigned int,
- unsigned int, unsigned int);
+ unsigned int, unsigned int, unsigned int, int, int);
#define ZFCP_LOG_AREA ZFCP_LOG_AREA_QDIO
@@ -214,22 +213,12 @@ zfcp_qdio_allocate(struct zfcp_adapter *adapter)
*
*/
static inline int
-zfcp_qdio_handler_error_check(struct zfcp_adapter *adapter,
- unsigned int status,
- unsigned int qdio_error, unsigned int siga_error)
+zfcp_qdio_handler_error_check(struct zfcp_adapter *adapter, unsigned int status,
+ unsigned int qdio_error, unsigned int siga_error,
+ int first_element, int elements_processed)
{
int retval = 0;
- if (ZFCP_LOG_CHECK(ZFCP_LOG_LEVEL_TRACE)) {
- if (status & QDIO_STATUS_INBOUND_INT) {
- ZFCP_LOG_TRACE("status is"
- " QDIO_STATUS_INBOUND_INT \n");
- }
- if (status & QDIO_STATUS_OUTBOUND_INT) {
- ZFCP_LOG_TRACE("status is"
- " QDIO_STATUS_OUTBOUND_INT \n");
- }
- }
if (unlikely(status & QDIO_STATUS_LOOK_FOR_ERROR)) {
retval = -EIO;
@@ -237,9 +226,10 @@ zfcp_qdio_handler_error_check(struct zfcp_adapter *adapter,
"qdio_error=0x%x, siga_error=0x%x)\n",
status, qdio_error, siga_error);
- /* Restarting IO on the failed adapter from scratch */
- debug_text_event(adapter->erp_dbf, 1, "qdio_err");
+ zfcp_hba_dbf_event_qdio(adapter, status, qdio_error, siga_error,
+ first_element, elements_processed);
/*
+ * Restarting IO on the failed adapter from scratch.
* Since we have been using this adapter, it is save to assume
* that it is not failed but recoverable. The card seems to
* report link-up events by self-initiated queue shutdown.
@@ -282,7 +272,8 @@ zfcp_qdio_request_handler(struct ccw_device *ccw_device,
first_element, elements_processed);
if (unlikely(zfcp_qdio_handler_error_check(adapter, status, qdio_error,
- siga_error)))
+ siga_error, first_element,
+ elements_processed)))
goto out;
/*
* we stored address of struct zfcp_adapter data structure
@@ -334,7 +325,8 @@ zfcp_qdio_response_handler(struct ccw_device *ccw_device,
queue = &adapter->response_queue;
if (unlikely(zfcp_qdio_handler_error_check(adapter, status, qdio_error,
- siga_error)))
+ siga_error, first_element,
+ elements_processed)))
goto out;
/*
diff --git a/drivers/s390/scsi/zfcp_scsi.c b/drivers/s390/scsi/zfcp_scsi.c
index 31a76065cf2..3dcd1bfba3b 100644
--- a/drivers/s390/scsi/zfcp_scsi.c
+++ b/drivers/s390/scsi/zfcp_scsi.c
@@ -44,7 +44,8 @@ static int zfcp_scsi_eh_abort_handler(struct scsi_cmnd *);
static int zfcp_scsi_eh_device_reset_handler(struct scsi_cmnd *);
static int zfcp_scsi_eh_bus_reset_handler(struct scsi_cmnd *);
static int zfcp_scsi_eh_host_reset_handler(struct scsi_cmnd *);
-static int zfcp_task_management_function(struct zfcp_unit *, u8);
+static int zfcp_task_management_function(struct zfcp_unit *, u8,
+ struct scsi_cmnd *);
static struct zfcp_unit *zfcp_unit_lookup(struct zfcp_adapter *, int, scsi_id_t,
scsi_lun_t);
@@ -242,7 +243,10 @@ static void
zfcp_scsi_command_fail(struct scsi_cmnd *scpnt, int result)
{
set_host_byte(&scpnt->result, result);
- zfcp_cmd_dbf_event_scsi("failing", scpnt);
+ if ((scpnt->device != NULL) && (scpnt->device->host != NULL))
+ zfcp_scsi_dbf_event_result("fail", 4,
+ (struct zfcp_adapter*) scpnt->device->host->hostdata[0],
+ scpnt);
/* return directly */
scpnt->scsi_done(scpnt);
}
@@ -414,67 +418,38 @@ zfcp_port_lookup(struct zfcp_adapter *adapter, int channel, scsi_id_t id)
return (struct zfcp_port *) NULL;
}
-/*
- * function: zfcp_scsi_eh_abort_handler
- *
- * purpose: tries to abort the specified (timed out) SCSI command
- *
- * note: We do not need to care for a SCSI command which completes
- * normally but late during this abort routine runs.
- * We are allowed to return late commands to the SCSI stack.
- * It tracks the state of commands and will handle late commands.
- * (Usually, the normal completion of late commands is ignored with
- * respect to the running abort operation. Grep for 'done_late'
- * in the SCSI stacks sources.)
+/**
+ * zfcp_scsi_eh_abort_handler - abort the specified SCSI command
+ * @scpnt: pointer to scsi_cmnd to be aborted
+ * Return: SUCCESS - command has been aborted and cleaned up in internal
+ * bookkeeping, SCSI stack won't be called for aborted command
+ * FAILED - otherwise
*
- * returns: SUCCESS - command has been aborted and cleaned up in internal
- * bookkeeping,
- * SCSI stack won't be called for aborted command
- * FAILED - otherwise
+ * We do not need to care for a SCSI command which completes normally
+ * but late during this abort routine runs. We are allowed to return
+ * late commands to the SCSI stack. It tracks the state of commands and
+ * will handle late commands. (Usually, the normal completion of late
+ * commands is ignored with respect to the running abort operation.)
*/
int
-__zfcp_scsi_eh_abort_handler(struct scsi_cmnd *scpnt)
+zfcp_scsi_eh_abort_handler(struct scsi_cmnd *scpnt)
{
+ struct Scsi_Host *scsi_host;
+ struct zfcp_adapter *adapter;
+ struct zfcp_unit *unit;
int retval = SUCCESS;
- struct zfcp_fsf_req *new_fsf_req, *old_fsf_req;
- struct zfcp_adapter *adapter = (struct zfcp_adapter *) scpnt->device->host->hostdata[0];
- struct zfcp_unit *unit = (struct zfcp_unit *) scpnt->device->hostdata;
- struct zfcp_port *port = unit->port;
- struct Scsi_Host *scsi_host = scpnt->device->host;
- union zfcp_req_data *req_data = NULL;
+ struct zfcp_fsf_req *new_fsf_req = NULL;
+ struct zfcp_fsf_req *old_fsf_req;
unsigned long flags;
- u32 status = 0;
-
- /* the components of a abort_dbf record (fixed size record) */
- u64 dbf_scsi_cmnd = (unsigned long) scpnt;
- char dbf_opcode[ZFCP_ABORT_DBF_LENGTH];
- wwn_t dbf_wwn = port->wwpn;
- fcp_lun_t dbf_fcp_lun = unit->fcp_lun;
- u64 dbf_retries = scpnt->retries;
- u64 dbf_allowed = scpnt->allowed;
- u64 dbf_timeout = 0;
- u64 dbf_fsf_req = 0;
- u64 dbf_fsf_status = 0;
- u64 dbf_fsf_qual[2] = { 0, 0 };
- char dbf_result[ZFCP_ABORT_DBF_LENGTH] = "##undef";
-
- memset(dbf_opcode, 0, ZFCP_ABORT_DBF_LENGTH);
- memcpy(dbf_opcode,
- scpnt->cmnd,
- min(scpnt->cmd_len, (unsigned char) ZFCP_ABORT_DBF_LENGTH));
+
+ scsi_host = scpnt->device->host;
+ adapter = (struct zfcp_adapter *) scsi_host->hostdata[0];
+ unit = (struct zfcp_unit *) scpnt->device->hostdata;
ZFCP_LOG_INFO("aborting scsi_cmnd=%p on adapter %s\n",
scpnt, zfcp_get_busid_by_adapter(adapter));
- spin_unlock_irq(scsi_host->host_lock);
-
- /*
- * Race condition between normal (late) completion and abort has
- * to be avoided.
- * The entirity of all accesses to scsi_req have to be atomic.
- * scsi_req is usually part of the fsf_req and thus we block the
- * release of fsf_req as long as we need to access scsi_req.
- */
+ /* avoid race condition between late normal completion and abort */
write_lock_irqsave(&adapter->abort_lock, flags);
/*
@@ -484,144 +459,47 @@ __zfcp_scsi_eh_abort_handler(struct scsi_cmnd *scpnt)
* this routine returns. (scpnt is parameter passed to this routine
* and must not disappear during abort even on late completion.)
*/
- req_data = (union zfcp_req_data *) scpnt->host_scribble;
- /* DEBUG */
- ZFCP_LOG_DEBUG("req_data=%p\n", req_data);
- if (!req_data) {
- ZFCP_LOG_DEBUG("late command completion overtook abort\n");
- /*
- * That's it.
- * Do not initiate abort but return SUCCESS.
- */
- write_unlock_irqrestore(&adapter->abort_lock, flags);
- retval = SUCCESS;
- strncpy(dbf_result, "##late1", ZFCP_ABORT_DBF_LENGTH);
- goto out;
- }
-
- /* Figure out which fsf_req needs to be aborted. */
- old_fsf_req = req_data->send_fcp_command_task.fsf_req;
-
- dbf_fsf_req = (unsigned long) old_fsf_req;
- dbf_timeout =
- (jiffies - req_data->send_fcp_command_task.start_jiffies) / HZ;
-
- ZFCP_LOG_DEBUG("old_fsf_req=%p\n", old_fsf_req);
+ old_fsf_req = (struct zfcp_fsf_req *) scpnt->host_scribble;
if (!old_fsf_req) {
write_unlock_irqrestore(&adapter->abort_lock, flags);
- ZFCP_LOG_NORMAL("bug: no old fsf request found\n");
- ZFCP_LOG_NORMAL("req_data:\n");
- ZFCP_HEX_DUMP(ZFCP_LOG_LEVEL_NORMAL,
- (char *) req_data, sizeof (union zfcp_req_data));
- ZFCP_LOG_NORMAL("scsi_cmnd:\n");
- ZFCP_HEX_DUMP(ZFCP_LOG_LEVEL_NORMAL,
- (char *) scpnt, sizeof (struct scsi_cmnd));
- retval = FAILED;
- strncpy(dbf_result, "##bug:r", ZFCP_ABORT_DBF_LENGTH);
+ zfcp_scsi_dbf_event_abort("lte1", adapter, scpnt, new_fsf_req);
+ retval = SUCCESS;
goto out;
}
- old_fsf_req->data.send_fcp_command_task.scsi_cmnd = NULL;
- /* mark old request as being aborted */
+ old_fsf_req->data = 0;
old_fsf_req->status |= ZFCP_STATUS_FSFREQ_ABORTING;
- /*
- * We have to collect all information (e.g. unit) needed by
- * zfcp_fsf_abort_fcp_command before calling that routine
- * since that routine is not allowed to access
- * fsf_req which it is going to abort.
- * This is because of we need to release fsf_req_list_lock
- * before calling zfcp_fsf_abort_fcp_command.
- * Since this lock will not be held, fsf_req may complete
- * late and may be released meanwhile.
- */
- ZFCP_LOG_DEBUG("unit 0x%016Lx (%p)\n", unit->fcp_lun, unit);
- /*
- * We block (call schedule)
- * That's why we must release the lock and enable the
- * interrupts before.
- * On the other hand we do not need the lock anymore since
- * all critical accesses to scsi_req are done.
- */
+ /* don't access old_fsf_req after releasing the abort_lock */
write_unlock_irqrestore(&adapter->abort_lock, flags);
/* call FSF routine which does the abort */
new_fsf_req = zfcp_fsf_abort_fcp_command((unsigned long) old_fsf_req,
adapter, unit, 0);
- ZFCP_LOG_DEBUG("new_fsf_req=%p\n", new_fsf_req);
if (!new_fsf_req) {
+ ZFCP_LOG_INFO("error: initiation of Abort FCP Cmnd failed\n");
retval = FAILED;
- ZFCP_LOG_NORMAL("error: initiation of Abort FCP Cmnd "
- "failed\n");
- strncpy(dbf_result, "##nores", ZFCP_ABORT_DBF_LENGTH);
goto out;
}
/* wait for completion of abort */
- ZFCP_LOG_DEBUG("waiting for cleanup...\n");
-#if 1
- /*
- * FIXME:
- * copying zfcp_fsf_req_wait_and_cleanup code is not really nice
- */
__wait_event(new_fsf_req->completion_wq,
new_fsf_req->status & ZFCP_STATUS_FSFREQ_COMPLETED);
- status = new_fsf_req->status;
- dbf_fsf_status = new_fsf_req->qtcb->header.fsf_status;
- /*
- * Ralphs special debug load provides timestamps in the FSF
- * status qualifier. This might be specified later if being
- * useful for debugging aborts.
- */
- dbf_fsf_qual[0] =
- *(u64 *) & new_fsf_req->qtcb->header.fsf_status_qual.word[0];
- dbf_fsf_qual[1] =
- *(u64 *) & new_fsf_req->qtcb->header.fsf_status_qual.word[2];
- zfcp_fsf_req_free(new_fsf_req);
-#else
- retval = zfcp_fsf_req_wait_and_cleanup(new_fsf_req,
- ZFCP_UNINTERRUPTIBLE, &status);
-#endif
- ZFCP_LOG_DEBUG("Waiting for cleanup complete, status=0x%x\n", status);
+
/* status should be valid since signals were not permitted */
- if (status & ZFCP_STATUS_FSFREQ_ABORTSUCCEEDED) {
+ if (new_fsf_req->status & ZFCP_STATUS_FSFREQ_ABORTSUCCEEDED) {
+ zfcp_scsi_dbf_event_abort("okay", adapter, scpnt, new_fsf_req);
retval = SUCCESS;
- strncpy(dbf_result, "##succ", ZFCP_ABORT_DBF_LENGTH);
- } else if (status & ZFCP_STATUS_FSFREQ_ABORTNOTNEEDED) {
+ } else if (new_fsf_req->status & ZFCP_STATUS_FSFREQ_ABORTNOTNEEDED) {
+ zfcp_scsi_dbf_event_abort("lte2", adapter, scpnt, new_fsf_req);
retval = SUCCESS;
- strncpy(dbf_result, "##late2", ZFCP_ABORT_DBF_LENGTH);
} else {
+ zfcp_scsi_dbf_event_abort("fail", adapter, scpnt, new_fsf_req);
retval = FAILED;
- strncpy(dbf_result, "##fail", ZFCP_ABORT_DBF_LENGTH);
}
-
+ zfcp_fsf_req_free(new_fsf_req);
out:
- debug_event(adapter->abort_dbf, 1, &dbf_scsi_cmnd, sizeof (u64));
- debug_event(adapter->abort_dbf, 1, &dbf_opcode, ZFCP_ABORT_DBF_LENGTH);
- debug_event(adapter->abort_dbf, 1, &dbf_wwn, sizeof (wwn_t));
- debug_event(adapter->abort_dbf, 1, &dbf_fcp_lun, sizeof (fcp_lun_t));
- debug_event(adapter->abort_dbf, 1, &dbf_retries, sizeof (u64));
- debug_event(adapter->abort_dbf, 1, &dbf_allowed, sizeof (u64));
- debug_event(adapter->abort_dbf, 1, &dbf_timeout, sizeof (u64));
- debug_event(adapter->abort_dbf, 1, &dbf_fsf_req, sizeof (u64));
- debug_event(adapter->abort_dbf, 1, &dbf_fsf_status, sizeof (u64));
- debug_event(adapter->abort_dbf, 1, &dbf_fsf_qual[0], sizeof (u64));
- debug_event(adapter->abort_dbf, 1, &dbf_fsf_qual[1], sizeof (u64));
- debug_text_event(adapter->abort_dbf, 1, dbf_result);
-
- spin_lock_irq(scsi_host->host_lock);
return retval;
}
-int
-zfcp_scsi_eh_abort_handler(struct scsi_cmnd *scpnt)
-{
- int rc;
- struct Scsi_Host *scsi_host = scpnt->device->host;
- spin_lock_irq(scsi_host->host_lock);
- rc = __zfcp_scsi_eh_abort_handler(scpnt);
- spin_unlock_irq(scsi_host->host_lock);
- return rc;
-}
-
/*
* function: zfcp_scsi_eh_device_reset_handler
*
@@ -651,8 +529,9 @@ zfcp_scsi_eh_device_reset_handler(struct scsi_cmnd *scpnt)
*/
if (!atomic_test_mask(ZFCP_STATUS_UNIT_NOTSUPPUNITRESET,
&unit->status)) {
- retval =
- zfcp_task_management_function(unit, FCP_LOGICAL_UNIT_RESET);
+ retval = zfcp_task_management_function(unit,
+ FCP_LOGICAL_UNIT_RESET,
+ scpnt);
if (retval) {
ZFCP_LOG_DEBUG("unit reset failed (unit=%p)\n", unit);
if (retval == -ENOTSUPP)
@@ -668,7 +547,7 @@ zfcp_scsi_eh_device_reset_handler(struct scsi_cmnd *scpnt)
goto out;
}
}
- retval = zfcp_task_management_function(unit, FCP_TARGET_RESET);
+ retval = zfcp_task_management_function(unit, FCP_TARGET_RESET, scpnt);
if (retval) {
ZFCP_LOG_DEBUG("target reset failed (unit=%p)\n", unit);
retval = FAILED;
@@ -681,12 +560,12 @@ zfcp_scsi_eh_device_reset_handler(struct scsi_cmnd *scpnt)
}
static int
-zfcp_task_management_function(struct zfcp_unit *unit, u8 tm_flags)
+zfcp_task_management_function(struct zfcp_unit *unit, u8 tm_flags,
+ struct scsi_cmnd *scpnt)
{
struct zfcp_adapter *adapter = unit->port->adapter;
- int retval;
- int status;
struct zfcp_fsf_req *fsf_req;
+ int retval = 0;
/* issue task management function */
fsf_req = zfcp_fsf_send_fcp_command_task_management
@@ -696,70 +575,63 @@ zfcp_task_management_function(struct zfcp_unit *unit, u8 tm_flags)
"failed for unit 0x%016Lx on port 0x%016Lx on "
"adapter %s\n", unit->fcp_lun, unit->port->wwpn,
zfcp_get_busid_by_adapter(adapter));
+ zfcp_scsi_dbf_event_devreset("nres", tm_flags, unit, scpnt);
retval = -ENOMEM;
goto out;
}
- retval = zfcp_fsf_req_wait_and_cleanup(fsf_req,
- ZFCP_UNINTERRUPTIBLE, &status);
+ __wait_event(fsf_req->completion_wq,
+ fsf_req->status & ZFCP_STATUS_FSFREQ_COMPLETED);
+
/*
* check completion status of task management function
- * (status should always be valid since no signals permitted)
*/
- if (status & ZFCP_STATUS_FSFREQ_TMFUNCFAILED)
+ if (fsf_req->status & ZFCP_STATUS_FSFREQ_TMFUNCFAILED) {
+ zfcp_scsi_dbf_event_devreset("fail", tm_flags, unit, scpnt);
retval = -EIO;
- else if (status & ZFCP_STATUS_FSFREQ_TMFUNCNOTSUPP)
+ } else if (fsf_req->status & ZFCP_STATUS_FSFREQ_TMFUNCNOTSUPP) {
+ zfcp_scsi_dbf_event_devreset("nsup", tm_flags, unit, scpnt);
retval = -ENOTSUPP;
- else
- retval = 0;
+ } else
+ zfcp_scsi_dbf_event_devreset("okay", tm_flags, unit, scpnt);
+
+ zfcp_fsf_req_free(fsf_req);
out:
return retval;
}
-/*
- * function: zfcp_scsi_eh_bus_reset_handler
- *
- * purpose:
- *
- * returns:
+/**
+ * zfcp_scsi_eh_bus_reset_handler - reset bus (reopen adapter)
*/
int
zfcp_scsi_eh_bus_reset_handler(struct scsi_cmnd *scpnt)
{
- int retval = 0;
- struct zfcp_unit *unit;
+ struct zfcp_unit *unit = (struct zfcp_unit*) scpnt->device->hostdata;
+ struct zfcp_adapter *adapter = unit->port->adapter;
- unit = (struct zfcp_unit *) scpnt->device->hostdata;
ZFCP_LOG_NORMAL("bus reset because of problems with "
"unit 0x%016Lx\n", unit->fcp_lun);
- zfcp_erp_adapter_reopen(unit->port->adapter, 0);
- zfcp_erp_wait(unit->port->adapter);
- retval = SUCCESS;
+ zfcp_erp_adapter_reopen(adapter, 0);
+ zfcp_erp_wait(adapter);
- return retval;
+ return SUCCESS;
}
-/*
- * function: zfcp_scsi_eh_host_reset_handler
- *
- * purpose:
- *
- * returns:
+/**
+ * zfcp_scsi_eh_host_reset_handler - reset host (reopen adapter)
*/
int
zfcp_scsi_eh_host_reset_handler(struct scsi_cmnd *scpnt)
{
- int retval = 0;
- struct zfcp_unit *unit;
+ struct zfcp_unit *unit = (struct zfcp_unit*) scpnt->device->hostdata;
+ struct zfcp_adapter *adapter = unit->port->adapter;
- unit = (struct zfcp_unit *) scpnt->device->hostdata;
ZFCP_LOG_NORMAL("host reset because of problems with "
"unit 0x%016Lx\n", unit->fcp_lun);
- zfcp_erp_adapter_reopen(unit->port->adapter, 0);
- zfcp_erp_wait(unit->port->adapter);
- retval = SUCCESS;
+ zfcp_erp_adapter_reopen(adapter, 0);
+ zfcp_erp_wait(adapter);
- return retval;
+ return SUCCESS;
}
/*
@@ -826,10 +698,16 @@ void
zfcp_adapter_scsi_unregister(struct zfcp_adapter *adapter)
{
struct Scsi_Host *shost;
+ struct zfcp_port *port;
shost = adapter->scsi_host;
if (!shost)
return;
+ read_lock_irq(&zfcp_data.config_lock);
+ list_for_each_entry(port, &adapter->port_list_head, list)
+ if (port->rport)
+ port->rport = NULL;
+ read_unlock_irq(&zfcp_data.config_lock);
fc_remove_host(shost);
scsi_remove_host(shost);
scsi_host_put(shost);
@@ -904,18 +782,6 @@ zfcp_get_node_name(struct scsi_target *starget)
read_unlock_irqrestore(&zfcp_data.config_lock, flags);
}
-void
-zfcp_set_fc_host_attrs(struct zfcp_adapter *adapter)
-{
- struct Scsi_Host *shost = adapter->scsi_host;
-
- fc_host_node_name(shost) = adapter->wwnn;
- fc_host_port_name(shost) = adapter->wwpn;
- strncpy(fc_host_serial_number(shost), adapter->serial_number,
- min(FC_SERIAL_NUMBER_SIZE, 32));
- fc_host_supported_classes(shost) = FC_COS_CLASS2 | FC_COS_CLASS3;
-}
-
struct fc_function_template zfcp_transport_functions = {
.get_starget_port_id = zfcp_get_port_id,
.get_starget_port_name = zfcp_get_port_name,
@@ -927,7 +793,10 @@ struct fc_function_template zfcp_transport_functions = {
.show_host_node_name = 1,
.show_host_port_name = 1,
.show_host_supported_classes = 1,
+ .show_host_maxframe_size = 1,
.show_host_serial_number = 1,
+ .show_host_speed = 1,
+ .show_host_port_id = 1,
};
/**
diff --git a/drivers/s390/scsi/zfcp_sysfs_adapter.c b/drivers/s390/scsi/zfcp_sysfs_adapter.c
index e7345a74800..0cd435280e7 100644
--- a/drivers/s390/scsi/zfcp_sysfs_adapter.c
+++ b/drivers/s390/scsi/zfcp_sysfs_adapter.c
@@ -62,21 +62,18 @@ static ssize_t zfcp_sysfs_adapter_##_name##_show(struct device *dev, struct devi
static DEVICE_ATTR(_name, S_IRUGO, zfcp_sysfs_adapter_##_name##_show, NULL);
ZFCP_DEFINE_ADAPTER_ATTR(status, "0x%08x\n", atomic_read(&adapter->status));
-ZFCP_DEFINE_ADAPTER_ATTR(wwnn, "0x%016llx\n", adapter->wwnn);
-ZFCP_DEFINE_ADAPTER_ATTR(wwpn, "0x%016llx\n", adapter->wwpn);
-ZFCP_DEFINE_ADAPTER_ATTR(s_id, "0x%06x\n", adapter->s_id);
ZFCP_DEFINE_ADAPTER_ATTR(peer_wwnn, "0x%016llx\n", adapter->peer_wwnn);
ZFCP_DEFINE_ADAPTER_ATTR(peer_wwpn, "0x%016llx\n", adapter->peer_wwpn);
ZFCP_DEFINE_ADAPTER_ATTR(peer_d_id, "0x%06x\n", adapter->peer_d_id);
+ZFCP_DEFINE_ADAPTER_ATTR(physical_wwpn, "0x%016llx\n", adapter->physical_wwpn);
+ZFCP_DEFINE_ADAPTER_ATTR(physical_s_id, "0x%06x\n", adapter->physical_s_id);
ZFCP_DEFINE_ADAPTER_ATTR(card_version, "0x%04x\n", adapter->hydra_version);
ZFCP_DEFINE_ADAPTER_ATTR(lic_version, "0x%08x\n", adapter->fsf_lic_version);
-ZFCP_DEFINE_ADAPTER_ATTR(fc_link_speed, "%d Gb/s\n", adapter->fc_link_speed);
ZFCP_DEFINE_ADAPTER_ATTR(fc_service_class, "%d\n", adapter->fc_service_class);
ZFCP_DEFINE_ADAPTER_ATTR(fc_topology, "%s\n",
fc_topologies[adapter->fc_topology]);
ZFCP_DEFINE_ADAPTER_ATTR(hardware_version, "0x%08x\n",
adapter->hardware_version);
-ZFCP_DEFINE_ADAPTER_ATTR(serial_number, "%17s\n", adapter->serial_number);
ZFCP_DEFINE_ADAPTER_ATTR(scsi_host_no, "0x%x\n", adapter->scsi_host_no);
ZFCP_DEFINE_ADAPTER_ATTR(in_recovery, "%d\n", atomic_test_mask
(ZFCP_STATUS_COMMON_ERP_INUSE, &adapter->status));
@@ -255,21 +252,18 @@ static struct attribute *zfcp_adapter_attrs[] = {
&dev_attr_in_recovery.attr,
&dev_attr_port_remove.attr,
&dev_attr_port_add.attr,
- &dev_attr_wwnn.attr,
- &dev_attr_wwpn.attr,
- &dev_attr_s_id.attr,
&dev_attr_peer_wwnn.attr,
&dev_attr_peer_wwpn.attr,
&dev_attr_peer_d_id.attr,
+ &dev_attr_physical_wwpn.attr,
+ &dev_attr_physical_s_id.attr,
&dev_attr_card_version.attr,
&dev_attr_lic_version.attr,
- &dev_attr_fc_link_speed.attr,
&dev_attr_fc_service_class.attr,
&dev_attr_fc_topology.attr,
&dev_attr_scsi_host_no.attr,
&dev_attr_status.attr,
&dev_attr_hardware_version.attr,
- &dev_attr_serial_number.attr,
NULL
};
diff --git a/drivers/scsi/aic7xxx/aic7xxx_osm.c b/drivers/scsi/aic7xxx/aic7xxx_osm.c
index c932b3b9449..876d1de8480 100644
--- a/drivers/scsi/aic7xxx/aic7xxx_osm.c
+++ b/drivers/scsi/aic7xxx/aic7xxx_osm.c
@@ -1109,15 +1109,6 @@ ahc_linux_register_host(struct ahc_softc *ahc, struct scsi_host_template *templa
return (0);
}
-uint64_t
-ahc_linux_get_memsize(void)
-{
- struct sysinfo si;
-
- si_meminfo(&si);
- return ((uint64_t)si.totalram << PAGE_SHIFT);
-}
-
/*
* Place the SCSI bus into a known state by either resetting it,
* or forcing transfer negotiations on the next command to any
diff --git a/drivers/scsi/aic7xxx/aic7xxx_osm.h b/drivers/scsi/aic7xxx/aic7xxx_osm.h
index c5299626924..be9edbe26db 100644
--- a/drivers/scsi/aic7xxx/aic7xxx_osm.h
+++ b/drivers/scsi/aic7xxx/aic7xxx_osm.h
@@ -494,8 +494,6 @@ ahc_insb(struct ahc_softc * ahc, long port, uint8_t *array, int count)
int ahc_linux_register_host(struct ahc_softc *,
struct scsi_host_template *);
-uint64_t ahc_linux_get_memsize(void);
-
/*************************** Pretty Printing **********************************/
struct info_str {
char *buffer;
diff --git a/drivers/scsi/aic7xxx/aic7xxx_osm_pci.c b/drivers/scsi/aic7xxx/aic7xxx_osm_pci.c
index 0d44a6907dd..3ce77ddc889 100644
--- a/drivers/scsi/aic7xxx/aic7xxx_osm_pci.c
+++ b/drivers/scsi/aic7xxx/aic7xxx_osm_pci.c
@@ -180,6 +180,7 @@ ahc_linux_pci_dev_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
struct ahc_pci_identity *entry;
char *name;
int error;
+ struct device *dev = &pdev->dev;
pci = pdev;
entry = ahc_find_pci_device(pci);
@@ -209,11 +210,12 @@ ahc_linux_pci_dev_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
pci_set_master(pdev);
if (sizeof(dma_addr_t) > 4
- && ahc_linux_get_memsize() > 0x80000000
- && pci_set_dma_mask(pdev, mask_39bit) == 0) {
+ && ahc->features & AHC_LARGE_SCBS
+ && dma_set_mask(dev, mask_39bit) == 0
+ && dma_get_required_mask(dev) > DMA_32BIT_MASK) {
ahc->flags |= AHC_39BIT_ADDRESSING;
} else {
- if (pci_set_dma_mask(pdev, DMA_32BIT_MASK)) {
+ if (dma_set_mask(dev, DMA_32BIT_MASK)) {
printk(KERN_WARNING "aic7xxx: No suitable DMA available.\n");
return (-ENODEV);
}
diff --git a/drivers/scsi/atp870u.c b/drivers/scsi/atp870u.c
index e6153fe5842..a8cfbef304b 100644
--- a/drivers/scsi/atp870u.c
+++ b/drivers/scsi/atp870u.c
@@ -996,6 +996,7 @@ oktosend:
#ifdef ED_DBGP
printk("send_s870: prdaddr_2 0x%8x tmpcip %x target_id %d\n", dev->id[c][target_id].prdaddr,tmpcip,target_id);
#endif
+ dev->id[c][target_id].prdaddr = dev->id[c][target_id].prd_bus;
outl(dev->id[c][target_id].prdaddr, tmpcip);
tmpcip = tmpcip - 2;
outb(0x06, tmpcip);
@@ -2572,7 +2573,7 @@ static void atp870u_free_tables(struct Scsi_Host *host)
for (k = 0; k < 16; k++) {
if (!atp_dev->id[j][k].prd_table)
continue;
- pci_free_consistent(atp_dev->pdev, 1024, atp_dev->id[j][k].prd_table, atp_dev->id[j][k].prdaddr);
+ pci_free_consistent(atp_dev->pdev, 1024, atp_dev->id[j][k].prd_table, atp_dev->id[j][k].prd_bus);
atp_dev->id[j][k].prd_table = NULL;
}
}
@@ -2584,12 +2585,13 @@ static int atp870u_init_tables(struct Scsi_Host *host)
int c,k;
for(c=0;c < 2;c++) {
for(k=0;k<16;k++) {
- atp_dev->id[c][k].prd_table = pci_alloc_consistent(atp_dev->pdev, 1024, &(atp_dev->id[c][k].prdaddr));
+ atp_dev->id[c][k].prd_table = pci_alloc_consistent(atp_dev->pdev, 1024, &(atp_dev->id[c][k].prd_bus));
if (!atp_dev->id[c][k].prd_table) {
printk("atp870u_init_tables fail\n");
atp870u_free_tables(host);
return -ENOMEM;
}
+ atp_dev->id[c][k].prdaddr = atp_dev->id[c][k].prd_bus;
atp_dev->id[c][k].devsp=0x20;
atp_dev->id[c][k].devtype = 0x7f;
atp_dev->id[c][k].curr_req = NULL;
diff --git a/drivers/scsi/atp870u.h b/drivers/scsi/atp870u.h
index 89f43af39cf..62bae64a01c 100644
--- a/drivers/scsi/atp870u.h
+++ b/drivers/scsi/atp870u.h
@@ -54,8 +54,9 @@ struct atp_unit
unsigned long tran_len;
unsigned long last_len;
unsigned char *prd_pos;
- unsigned char *prd_table;
- dma_addr_t prdaddr;
+ unsigned char *prd_table; /* Kernel address of PRD table */
+ dma_addr_t prd_bus; /* Bus address of PRD */
+ dma_addr_t prdaddr; /* Dynamically updated in driver */
struct scsi_cmnd *curr_req;
} id[2][16];
struct Scsi_Host *host;
diff --git a/drivers/scsi/fd_mcs.c b/drivers/scsi/fd_mcs.c
index fa652f8aa64..d59d449a9e4 100644
--- a/drivers/scsi/fd_mcs.c
+++ b/drivers/scsi/fd_mcs.c
@@ -1360,3 +1360,5 @@ static Scsi_Host_Template driver_template = {
.use_clustering = DISABLE_CLUSTERING,
};
#include "scsi_module.c"
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c
index 85503fad789..f2a72d33132 100644
--- a/drivers/scsi/hosts.c
+++ b/drivers/scsi/hosts.c
@@ -98,6 +98,7 @@ int scsi_host_set_state(struct Scsi_Host *shost, enum scsi_host_state state)
switch (oldstate) {
case SHOST_CREATED:
case SHOST_RUNNING:
+ case SHOST_CANCEL_RECOVERY:
break;
default:
goto illegal;
@@ -107,12 +108,31 @@ int scsi_host_set_state(struct Scsi_Host *shost, enum scsi_host_state state)
case SHOST_DEL:
switch (oldstate) {
case SHOST_CANCEL:
+ case SHOST_DEL_RECOVERY:
break;
default:
goto illegal;
}
break;
+ case SHOST_CANCEL_RECOVERY:
+ switch (oldstate) {
+ case SHOST_CANCEL:
+ case SHOST_RECOVERY:
+ break;
+ default:
+ goto illegal;
+ }
+ break;
+
+ case SHOST_DEL_RECOVERY:
+ switch (oldstate) {
+ case SHOST_CANCEL_RECOVERY:
+ break;
+ default:
+ goto illegal;
+ }
+ break;
}
shost->shost_state = state;
return 0;
@@ -134,13 +154,24 @@ EXPORT_SYMBOL(scsi_host_set_state);
**/
void scsi_remove_host(struct Scsi_Host *shost)
{
+ unsigned long flags;
down(&shost->scan_mutex);
- scsi_host_set_state(shost, SHOST_CANCEL);
+ spin_lock_irqsave(shost->host_lock, flags);
+ if (scsi_host_set_state(shost, SHOST_CANCEL))
+ if (scsi_host_set_state(shost, SHOST_CANCEL_RECOVERY)) {
+ spin_unlock_irqrestore(shost->host_lock, flags);
+ up(&shost->scan_mutex);
+ return;
+ }
+ spin_unlock_irqrestore(shost->host_lock, flags);
up(&shost->scan_mutex);
scsi_forget_host(shost);
scsi_proc_host_rm(shost);
- scsi_host_set_state(shost, SHOST_DEL);
+ spin_lock_irqsave(shost->host_lock, flags);
+ if (scsi_host_set_state(shost, SHOST_DEL))
+ BUG_ON(scsi_host_set_state(shost, SHOST_DEL_RECOVERY));
+ spin_unlock_irqrestore(shost->host_lock, flags);
transport_unregister_device(&shost->shost_gendev);
class_device_unregister(&shost->shost_classdev);
diff --git a/drivers/scsi/ibmmca.c b/drivers/scsi/ibmmca.c
index 6e54c7d9b33..19392f65127 100644
--- a/drivers/scsi/ibmmca.c
+++ b/drivers/scsi/ibmmca.c
@@ -460,6 +460,8 @@ MODULE_PARM(adisplay, "1i");
MODULE_PARM(normal, "1i");
MODULE_PARM(ansi, "1i");
#endif
+
+MODULE_LICENSE("GPL");
#endif
/*counter of concurrent disk read/writes, to turn on/off disk led */
static int disk_rw_in_progress = 0;
diff --git a/drivers/scsi/ibmvscsi/ibmvscsi.c b/drivers/scsi/ibmvscsi/ibmvscsi.c
index 5b14934ba86..ff25210b00b 100644
--- a/drivers/scsi/ibmvscsi/ibmvscsi.c
+++ b/drivers/scsi/ibmvscsi/ibmvscsi.c
@@ -727,6 +727,16 @@ static void adapter_info_rsp(struct srp_event_struct *evt_struct)
if (hostdata->madapter_info.port_max_txu[0])
hostdata->host->max_sectors =
hostdata->madapter_info.port_max_txu[0] >> 9;
+
+ if (hostdata->madapter_info.os_type == 3 &&
+ strcmp(hostdata->madapter_info.srp_version, "1.6a") <= 0) {
+ printk("ibmvscsi: host (Ver. %s) doesn't support large"
+ "transfers\n",
+ hostdata->madapter_info.srp_version);
+ printk("ibmvscsi: limiting scatterlists to %d\n",
+ MAX_INDIRECT_BUFS);
+ hostdata->host->sg_tablesize = MAX_INDIRECT_BUFS;
+ }
}
}
diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
index a780546eda9..1f0ebabf6d4 100644
--- a/drivers/scsi/scsi.c
+++ b/drivers/scsi/scsi.c
@@ -1265,9 +1265,8 @@ int scsi_device_cancel(struct scsi_device *sdev, int recovery)
list_for_each_safe(lh, lh_sf, &active_list) {
scmd = list_entry(lh, struct scsi_cmnd, eh_entry);
list_del_init(lh);
- if (recovery) {
- scsi_eh_scmd_add(scmd, SCSI_EH_CANCEL_CMD);
- } else {
+ if (recovery &&
+ !scsi_eh_scmd_add(scmd, SCSI_EH_CANCEL_CMD)) {
scmd->result = (DID_ABORT << 16);
scsi_finish_command(scmd);
}
diff --git a/drivers/scsi/scsi_devinfo.c b/drivers/scsi/scsi_devinfo.c
index 07b554affcf..64fc9e21f35 100644
--- a/drivers/scsi/scsi_devinfo.c
+++ b/drivers/scsi/scsi_devinfo.c
@@ -110,6 +110,7 @@ static struct {
{"RELISYS", "Scorpio", NULL, BLIST_NOLUN}, /* responds to all lun */
{"SANKYO", "CP525", "6.64", BLIST_NOLUN}, /* causes failed REQ SENSE, extra reset */
{"TEXEL", "CD-ROM", "1.06", BLIST_NOLUN},
+ {"transtec", "T5008", "0001", BLIST_NOREPORTLUN },
{"YAMAHA", "CDR100", "1.00", BLIST_NOLUN}, /* locks up */
{"YAMAHA", "CDR102", "1.00", BLIST_NOLUN}, /* locks up */
{"YAMAHA", "CRW8424S", "1.0", BLIST_NOLUN}, /* locks up */
diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
index 895c9452be4..ad534216507 100644
--- a/drivers/scsi/scsi_error.c
+++ b/drivers/scsi/scsi_error.c
@@ -50,7 +50,7 @@
void scsi_eh_wakeup(struct Scsi_Host *shost)
{
if (shost->host_busy == shost->host_failed) {
- up(shost->eh_wait);
+ wake_up_process(shost->ehandler);
SCSI_LOG_ERROR_RECOVERY(5,
printk("Waking error handler thread\n"));
}
@@ -68,19 +68,24 @@ int scsi_eh_scmd_add(struct scsi_cmnd *scmd, int eh_flag)
{
struct Scsi_Host *shost = scmd->device->host;
unsigned long flags;
+ int ret = 0;
- if (shost->eh_wait == NULL)
+ if (!shost->ehandler)
return 0;
spin_lock_irqsave(shost->host_lock, flags);
+ if (scsi_host_set_state(shost, SHOST_RECOVERY))
+ if (scsi_host_set_state(shost, SHOST_CANCEL_RECOVERY))
+ goto out_unlock;
+ ret = 1;
scmd->eh_eflags |= eh_flag;
list_add_tail(&scmd->eh_entry, &shost->eh_cmd_q);
- scsi_host_set_state(shost, SHOST_RECOVERY);
shost->host_failed++;
scsi_eh_wakeup(shost);
+ out_unlock:
spin_unlock_irqrestore(shost->host_lock, flags);
- return 1;
+ return ret;
}
/**
@@ -176,8 +181,8 @@ void scsi_times_out(struct scsi_cmnd *scmd)
}
if (unlikely(!scsi_eh_scmd_add(scmd, SCSI_EH_CANCEL_CMD))) {
- panic("Error handler thread not present at %p %p %s %d",
- scmd, scmd->device->host, __FILE__, __LINE__);
+ scmd->result |= DID_TIME_OUT << 16;
+ __scsi_done(scmd);
}
}
@@ -196,8 +201,7 @@ int scsi_block_when_processing_errors(struct scsi_device *sdev)
{
int online;
- wait_event(sdev->host->host_wait, (sdev->host->shost_state !=
- SHOST_RECOVERY));
+ wait_event(sdev->host->host_wait, !scsi_host_in_recovery(sdev->host));
online = scsi_device_online(sdev);
@@ -1441,6 +1445,7 @@ static void scsi_eh_lock_door(struct scsi_device *sdev)
static void scsi_restart_operations(struct Scsi_Host *shost)
{
struct scsi_device *sdev;
+ unsigned long flags;
/*
* If the door was locked, we need to insert a door lock request
@@ -1460,7 +1465,11 @@ static void scsi_restart_operations(struct Scsi_Host *shost)
SCSI_LOG_ERROR_RECOVERY(3, printk("%s: waking up host to restart\n",
__FUNCTION__));
- scsi_host_set_state(shost, SHOST_RUNNING);
+ spin_lock_irqsave(shost->host_lock, flags);
+ if (scsi_host_set_state(shost, SHOST_RUNNING))
+ if (scsi_host_set_state(shost, SHOST_CANCEL))
+ BUG_ON(scsi_host_set_state(shost, SHOST_DEL));
+ spin_unlock_irqrestore(shost->host_lock, flags);
wake_up(&shost->host_wait);
@@ -1582,40 +1591,31 @@ int scsi_error_handler(void *data)
{
struct Scsi_Host *shost = (struct Scsi_Host *) data;
int rtn;
- DECLARE_MUTEX_LOCKED(sem);
current->flags |= PF_NOFREEZE;
- shost->eh_wait = &sem;
+
/*
- * Wake up the thread that created us.
+ * Note - we always use TASK_INTERRUPTIBLE even if the module
+ * was loaded as part of the kernel. The reason is that
+ * UNINTERRUPTIBLE would cause this thread to be counted in
+ * the load average as a running process, and an interruptible
+ * wait doesn't.
*/
- SCSI_LOG_ERROR_RECOVERY(3, printk("Wake up parent of"
- " scsi_eh_%d\n",shost->host_no));
-
- while (1) {
- /*
- * If we get a signal, it means we are supposed to go
- * away and die. This typically happens if the user is
- * trying to unload a module.
- */
- SCSI_LOG_ERROR_RECOVERY(1, printk("Error handler"
- " scsi_eh_%d"
- " sleeping\n",shost->host_no));
-
- /*
- * Note - we always use down_interruptible with the semaphore
- * even if the module was loaded as part of the kernel. The
- * reason is that down() will cause this thread to be counted
- * in the load average as a running process, and down
- * interruptible doesn't. Given that we need to allow this
- * thread to die if the driver was loaded as a module, using
- * semaphores isn't unreasonable.
- */
- down_interruptible(&sem);
- if (kthread_should_stop())
- break;
+ set_current_state(TASK_INTERRUPTIBLE);
+ while (!kthread_should_stop()) {
+ if (shost->host_failed == 0 ||
+ shost->host_failed != shost->host_busy) {
+ SCSI_LOG_ERROR_RECOVERY(1, printk("Error handler"
+ " scsi_eh_%d"
+ " sleeping\n",
+ shost->host_no));
+ schedule();
+ set_current_state(TASK_INTERRUPTIBLE);
+ continue;
+ }
+ __set_current_state(TASK_RUNNING);
SCSI_LOG_ERROR_RECOVERY(1, printk("Error handler"
" scsi_eh_%d waking"
" up\n",shost->host_no));
@@ -1642,7 +1642,7 @@ int scsi_error_handler(void *data)
* which are still online.
*/
scsi_restart_operations(shost);
-
+ set_current_state(TASK_INTERRUPTIBLE);
}
SCSI_LOG_ERROR_RECOVERY(1, printk("Error handler scsi_eh_%d"
@@ -1651,7 +1651,7 @@ int scsi_error_handler(void *data)
/*
* Make sure that nobody tries to wake us up again.
*/
- shost->eh_wait = NULL;
+ shost->ehandler = NULL;
return 0;
}
diff --git a/drivers/scsi/scsi_ioctl.c b/drivers/scsi/scsi_ioctl.c
index b7fddac8134..de7f98cc38f 100644
--- a/drivers/scsi/scsi_ioctl.c
+++ b/drivers/scsi/scsi_ioctl.c
@@ -458,7 +458,7 @@ int scsi_nonblockable_ioctl(struct scsi_device *sdev, int cmd,
* error processing, as long as the device was opened
* non-blocking */
if (filp && filp->f_flags & O_NONBLOCK) {
- if (sdev->host->shost_state == SHOST_RECOVERY)
+ if (scsi_host_in_recovery(sdev->host))
return -ENODEV;
} else if (!scsi_block_when_processing_errors(sdev))
return -ENODEV;
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index 863bb6495da..dc9c772bc87 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -118,7 +118,6 @@ static void scsi_unprep_request(struct request *req)
req->flags &= ~REQ_DONTPREP;
req->special = (req->flags & REQ_SPECIAL) ? cmd->sc_request : NULL;
- scsi_release_buffers(cmd);
scsi_put_command(cmd);
}
@@ -140,14 +139,12 @@ static void scsi_unprep_request(struct request *req)
* commands.
* Notes: This could be called either from an interrupt context or a
* normal process context.
- * Notes: Upon return, cmd is a stale pointer.
*/
int scsi_queue_insert(struct scsi_cmnd *cmd, int reason)
{
struct Scsi_Host *host = cmd->device->host;
struct scsi_device *device = cmd->device;
struct request_queue *q = device->request_queue;
- struct request *req = cmd->request;
unsigned long flags;
SCSI_LOG_MLQUEUE(1,
@@ -188,9 +185,8 @@ int scsi_queue_insert(struct scsi_cmnd *cmd, int reason)
* function. The SCSI request function detects the blocked condition
* and plugs the queue appropriately.
*/
- scsi_unprep_request(req);
spin_lock_irqsave(q->queue_lock, flags);
- blk_requeue_request(q, req);
+ blk_requeue_request(q, cmd->request);
spin_unlock_irqrestore(q->queue_lock, flags);
scsi_run_queue(q);
@@ -451,7 +447,7 @@ void scsi_device_unbusy(struct scsi_device *sdev)
spin_lock_irqsave(shost->host_lock, flags);
shost->host_busy--;
- if (unlikely((shost->shost_state == SHOST_RECOVERY) &&
+ if (unlikely(scsi_host_in_recovery(shost) &&
shost->host_failed))
scsi_eh_wakeup(shost);
spin_unlock(shost->host_lock);
@@ -1268,6 +1264,7 @@ static int scsi_prep_fn(struct request_queue *q, struct request *req)
}
} else {
memcpy(cmd->cmnd, req->cmd, sizeof(cmd->cmnd));
+ cmd->cmd_len = req->cmd_len;
if (rq_data_dir(req) == WRITE)
cmd->sc_data_direction = DMA_TO_DEVICE;
else if (req->data_len)
@@ -1342,7 +1339,7 @@ static inline int scsi_host_queue_ready(struct request_queue *q,
struct Scsi_Host *shost,
struct scsi_device *sdev)
{
- if (shost->shost_state == SHOST_RECOVERY)
+ if (scsi_host_in_recovery(shost))
return 0;
if (shost->host_busy == 0 && shost->host_blocked) {
/*
@@ -1514,7 +1511,6 @@ static void scsi_request_fn(struct request_queue *q)
* cases (host limits or settings) should run the queue at some
* later time.
*/
- scsi_unprep_request(req);
spin_lock_irq(q->queue_lock);
blk_requeue_request(q, req);
sdev->device_busy--;
diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c
index b86f170fa8e..fcf9f6cbb14 100644
--- a/drivers/scsi/scsi_scan.c
+++ b/drivers/scsi/scsi_scan.c
@@ -1466,23 +1466,17 @@ EXPORT_SYMBOL(scsi_scan_single_target);
void scsi_forget_host(struct Scsi_Host *shost)
{
- struct scsi_target *starget, *tmp;
+ struct scsi_device *sdev;
unsigned long flags;
- /*
- * Ok, this look a bit strange. We always look for the first device
- * on the list as scsi_remove_device removes them from it - thus we
- * also have to release the lock.
- * We don't need to get another reference to the device before
- * releasing the lock as we already own the reference from
- * scsi_register_device that's release in scsi_remove_device. And
- * after that we don't look at sdev anymore.
- */
+ restart:
spin_lock_irqsave(shost->host_lock, flags);
- list_for_each_entry_safe(starget, tmp, &shost->__targets, siblings) {
+ list_for_each_entry(sdev, &shost->__devices, siblings) {
+ if (sdev->sdev_state == SDEV_DEL)
+ continue;
spin_unlock_irqrestore(shost->host_lock, flags);
- scsi_remove_target(&starget->dev);
- spin_lock_irqsave(shost->host_lock, flags);
+ __scsi_remove_device(sdev);
+ goto restart;
}
spin_unlock_irqrestore(shost->host_lock, flags);
}
diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
index b8052d5206c..72a6550a056 100644
--- a/drivers/scsi/scsi_sysfs.c
+++ b/drivers/scsi/scsi_sysfs.c
@@ -57,6 +57,8 @@ static struct {
{ SHOST_CANCEL, "cancel" },
{ SHOST_DEL, "deleted" },
{ SHOST_RECOVERY, "recovery" },
+ { SHOST_CANCEL_RECOVERY, "cancel/recovery" },
+ { SHOST_DEL_RECOVERY, "deleted/recovery", },
};
const char *scsi_host_state_name(enum scsi_host_state state)
{
@@ -707,9 +709,11 @@ void __scsi_remove_device(struct scsi_device *sdev)
**/
void scsi_remove_device(struct scsi_device *sdev)
{
- down(&sdev->host->scan_mutex);
+ struct Scsi_Host *shost = sdev->host;
+
+ down(&shost->scan_mutex);
__scsi_remove_device(sdev);
- up(&sdev->host->scan_mutex);
+ up(&shost->scan_mutex);
}
EXPORT_SYMBOL(scsi_remove_device);
@@ -717,17 +721,20 @@ void __scsi_remove_target(struct scsi_target *starget)
{
struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
unsigned long flags;
- struct scsi_device *sdev, *tmp;
+ struct scsi_device *sdev;
spin_lock_irqsave(shost->host_lock, flags);
starget->reap_ref++;
- list_for_each_entry_safe(sdev, tmp, &shost->__devices, siblings) {
+ restart:
+ list_for_each_entry(sdev, &shost->__devices, siblings) {
if (sdev->channel != starget->channel ||
- sdev->id != starget->id)
+ sdev->id != starget->id ||
+ sdev->sdev_state == SDEV_DEL)
continue;
spin_unlock_irqrestore(shost->host_lock, flags);
scsi_remove_device(sdev);
spin_lock_irqsave(shost->host_lock, flags);
+ goto restart;
}
spin_unlock_irqrestore(shost->host_lock, flags);
scsi_target_reap(starget);
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index de564b38605..9a1dc0cea03 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -235,6 +235,7 @@ static int sd_init_command(struct scsi_cmnd * SCpnt)
return 0;
memcpy(SCpnt->cmnd, rq->cmd, sizeof(SCpnt->cmnd));
+ SCpnt->cmd_len = rq->cmd_len;
if (rq_data_dir(rq) == WRITE)
SCpnt->sc_data_direction = DMA_TO_DEVICE;
else if (rq->data_len)
diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
index 9ea4765d1d1..4d09a6e4dd2 100644
--- a/drivers/scsi/sg.c
+++ b/drivers/scsi/sg.c
@@ -1027,7 +1027,7 @@ sg_ioctl(struct inode *inode, struct file *filp,
if (sdp->detached)
return -ENODEV;
if (filp->f_flags & O_NONBLOCK) {
- if (sdp->device->host->shost_state == SHOST_RECOVERY)
+ if (scsi_host_in_recovery(sdp->device->host))
return -EBUSY;
} else if (!scsi_block_when_processing_errors(sdp->device))
return -EBUSY;
diff --git a/drivers/scsi/sr.c b/drivers/scsi/sr.c
index ce63fc8312d..561901b1cf1 100644
--- a/drivers/scsi/sr.c
+++ b/drivers/scsi/sr.c
@@ -326,6 +326,7 @@ static int sr_init_command(struct scsi_cmnd * SCpnt)
return 0;
memcpy(SCpnt->cmnd, rq->cmd, sizeof(SCpnt->cmnd));
+ SCpnt->cmd_len = rq->cmd_len;
if (!rq->data_len)
SCpnt->sc_data_direction = DMA_NONE;
else if (rq_data_dir(rq) == WRITE)
diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c
index a93308ae973..d001c046551 100644
--- a/drivers/scsi/st.c
+++ b/drivers/scsi/st.c
@@ -4206,6 +4206,7 @@ static int st_init_command(struct scsi_cmnd *SCpnt)
return 0;
memcpy(SCpnt->cmnd, rq->cmd, sizeof(SCpnt->cmnd));
+ SCpnt->cmd_len = rq->cmd_len;
if (rq_data_dir(rq) == WRITE)
SCpnt->sc_data_direction = DMA_TO_DEVICE;
diff --git a/drivers/video/Kconfig b/drivers/video/Kconfig
index 31ee13eef7a..773ae11b4a1 100644
--- a/drivers/video/Kconfig
+++ b/drivers/video/Kconfig
@@ -650,6 +650,7 @@ config FB_NVIDIA
select FB_CFB_FILLRECT
select FB_CFB_COPYAREA
select FB_CFB_IMAGEBLIT
+ select FB_SOFT_CURSOR
help
This driver supports graphics boards with the nVidia chips, TNT
and newer. For very old chipsets, such as the RIVA128, then use
diff --git a/drivers/video/fbcvt.c b/drivers/video/fbcvt.c
index cfa61b512de..0b6af00d197 100644
--- a/drivers/video/fbcvt.c
+++ b/drivers/video/fbcvt.c
@@ -272,11 +272,11 @@ static void fb_cvt_convert_to_mode(struct fb_cvt_data *cvt,
{
mode->refresh = cvt->f_refresh;
mode->pixclock = KHZ2PICOS(cvt->pixclock/1000);
- mode->left_margin = cvt->h_front_porch;
- mode->right_margin = cvt->h_back_porch;
+ mode->left_margin = cvt->h_back_porch;
+ mode->right_margin = cvt->h_front_porch;
mode->hsync_len = cvt->hsync;
- mode->upper_margin = cvt->v_front_porch;
- mode->lower_margin = cvt->v_back_porch;
+ mode->upper_margin = cvt->v_back_porch;
+ mode->lower_margin = cvt->v_front_porch;
mode->vsync_len = cvt->vsync;
mode->sync &= ~(FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT);
diff --git a/drivers/video/nvidia/nvidia.c b/drivers/video/nvidia/nvidia.c
index 3620de0f252..a7f020ada63 100644
--- a/drivers/video/nvidia/nvidia.c
+++ b/drivers/video/nvidia/nvidia.c
@@ -893,7 +893,7 @@ static int nvidiafb_cursor(struct fb_info *info, struct fb_cursor *cursor)
int i, set = cursor->set;
u16 fg, bg;
- if (!hwcur || cursor->image.width > MAX_CURS || cursor->image.height > MAX_CURS)
+ if (cursor->image.width > MAX_CURS || cursor->image.height > MAX_CURS)
return -ENXIO;
NVShowHideCursor(par, 0);
@@ -1356,6 +1356,9 @@ static int __devinit nvidia_set_fbinfo(struct fb_info *info)
info->pixmap.size = 8 * 1024;
info->pixmap.flags = FB_PIXMAP_SYSTEM;
+ if (!hwcur)
+ info->fbops->fb_cursor = soft_cursor;
+
info->var.accel_flags = (!noaccel);
switch (par->Architecture) {
diff --git a/fs/dcache.c b/fs/dcache.c
index 7376b61269f..fb10386c59b 100644
--- a/fs/dcache.c
+++ b/fs/dcache.c
@@ -102,7 +102,8 @@ static inline void dentry_iput(struct dentry * dentry)
list_del_init(&dentry->d_alias);
spin_unlock(&dentry->d_lock);
spin_unlock(&dcache_lock);
- fsnotify_inoderemove(inode);
+ if (!inode->i_nlink)
+ fsnotify_inoderemove(inode);
if (dentry->d_op && dentry->d_op->d_iput)
dentry->d_op->d_iput(dentry, inode);
else
diff --git a/fs/ntfs/ChangeLog b/fs/ntfs/ChangeLog
index 49eafbdb15c..c7e9237379c 100644
--- a/fs/ntfs/ChangeLog
+++ b/fs/ntfs/ChangeLog
@@ -92,6 +92,8 @@ ToDo/Notes:
an octal number to conform to how chmod(1) works, too. Thanks to
Giuseppe Bilotta and Horst von Brand for pointing out the errors of
my ways.
+ - Fix various bugs in the runlist merging code. (Based on libntfs
+ changes by Richard Russon.)
2.1.23 - Implement extension of resident files and make writing safe as well as
many bug fixes, cleanups, and enhancements...
diff --git a/fs/ntfs/aops.c b/fs/ntfs/aops.c
index b6cc8cf2462..5e80c07c6a4 100644
--- a/fs/ntfs/aops.c
+++ b/fs/ntfs/aops.c
@@ -59,39 +59,49 @@ static void ntfs_end_buffer_async_read(struct buffer_head *bh, int uptodate)
unsigned long flags;
struct buffer_head *first, *tmp;
struct page *page;
+ struct inode *vi;
ntfs_inode *ni;
int page_uptodate = 1;
page = bh->b_page;
- ni = NTFS_I(page->mapping->host);
+ vi = page->mapping->host;
+ ni = NTFS_I(vi);
if (likely(uptodate)) {
- s64 file_ofs, initialized_size;
+ loff_t i_size;
+ s64 file_ofs, init_size;
set_buffer_uptodate(bh);
file_ofs = ((s64)page->index << PAGE_CACHE_SHIFT) +
bh_offset(bh);
read_lock_irqsave(&ni->size_lock, flags);
- initialized_size = ni->initialized_size;
+ init_size = ni->initialized_size;
+ i_size = i_size_read(vi);
read_unlock_irqrestore(&ni->size_lock, flags);
+ if (unlikely(init_size > i_size)) {
+ /* Race with shrinking truncate. */
+ init_size = i_size;
+ }
/* Check for the current buffer head overflowing. */
- if (file_ofs + bh->b_size > initialized_size) {
- char *addr;
- int ofs = 0;
-
- if (file_ofs < initialized_size)
- ofs = initialized_size - file_ofs;
- addr = kmap_atomic(page, KM_BIO_SRC_IRQ);
- memset(addr + bh_offset(bh) + ofs, 0, bh->b_size - ofs);
+ if (unlikely(file_ofs + bh->b_size > init_size)) {
+ u8 *kaddr;
+ int ofs;
+
+ ofs = 0;
+ if (file_ofs < init_size)
+ ofs = init_size - file_ofs;
+ kaddr = kmap_atomic(page, KM_BIO_SRC_IRQ);
+ memset(kaddr + bh_offset(bh) + ofs, 0,
+ bh->b_size - ofs);
+ kunmap_atomic(kaddr, KM_BIO_SRC_IRQ);
flush_dcache_page(page);
- kunmap_atomic(addr, KM_BIO_SRC_IRQ);
}
} else {
clear_buffer_uptodate(bh);
SetPageError(page);
- ntfs_error(ni->vol->sb, "Buffer I/O error, logical block %llu.",
- (unsigned long long)bh->b_blocknr);
+ ntfs_error(ni->vol->sb, "Buffer I/O error, logical block "
+ "0x%llx.", (unsigned long long)bh->b_blocknr);
}
first = page_buffers(page);
local_irq_save(flags);
@@ -124,7 +134,7 @@ static void ntfs_end_buffer_async_read(struct buffer_head *bh, int uptodate)
if (likely(page_uptodate && !PageError(page)))
SetPageUptodate(page);
} else {
- char *addr;
+ u8 *kaddr;
unsigned int i, recs;
u32 rec_size;
@@ -132,12 +142,12 @@ static void ntfs_end_buffer_async_read(struct buffer_head *bh, int uptodate)
recs = PAGE_CACHE_SIZE / rec_size;
/* Should have been verified before we got here... */
BUG_ON(!recs);
- addr = kmap_atomic(page, KM_BIO_SRC_IRQ);
+ kaddr = kmap_atomic(page, KM_BIO_SRC_IRQ);
for (i = 0; i < recs; i++)
- post_read_mst_fixup((NTFS_RECORD*)(addr +
+ post_read_mst_fixup((NTFS_RECORD*)(kaddr +
i * rec_size), rec_size);
+ kunmap_atomic(kaddr, KM_BIO_SRC_IRQ);
flush_dcache_page(page);
- kunmap_atomic(addr, KM_BIO_SRC_IRQ);
if (likely(page_uptodate && !PageError(page)))
SetPageUptodate(page);
}
@@ -168,8 +178,11 @@ still_busy:
*/
static int ntfs_read_block(struct page *page)
{
+ loff_t i_size;
VCN vcn;
LCN lcn;
+ s64 init_size;
+ struct inode *vi;
ntfs_inode *ni;
ntfs_volume *vol;
runlist_element *rl;
@@ -180,7 +193,8 @@ static int ntfs_read_block(struct page *page)
int i, nr;
unsigned char blocksize_bits;
- ni = NTFS_I(page->mapping->host);
+ vi = page->mapping->host;
+ ni = NTFS_I(vi);
vol = ni->vol;
/* $MFT/$DATA must have its complete runlist in memory at all times. */
@@ -199,11 +213,28 @@ static int ntfs_read_block(struct page *page)
bh = head = page_buffers(page);
BUG_ON(!bh);
+ /*
+ * We may be racing with truncate. To avoid some of the problems we
+ * now take a snapshot of the various sizes and use those for the whole
+ * of the function. In case of an extending truncate it just means we
+ * may leave some buffers unmapped which are now allocated. This is
+ * not a problem since these buffers will just get mapped when a write
+ * occurs. In case of a shrinking truncate, we will detect this later
+ * on due to the runlist being incomplete and if the page is being
+ * fully truncated, truncate will throw it away as soon as we unlock
+ * it so no need to worry what we do with it.
+ */
iblock = (s64)page->index << (PAGE_CACHE_SHIFT - blocksize_bits);
read_lock_irqsave(&ni->size_lock, flags);
lblock = (ni->allocated_size + blocksize - 1) >> blocksize_bits;
- zblock = (ni->initialized_size + blocksize - 1) >> blocksize_bits;
+ init_size = ni->initialized_size;
+ i_size = i_size_read(vi);
read_unlock_irqrestore(&ni->size_lock, flags);
+ if (unlikely(init_size > i_size)) {
+ /* Race with shrinking truncate. */
+ init_size = i_size;
+ }
+ zblock = (init_size + blocksize - 1) >> blocksize_bits;
/* Loop through all the buffers in the page. */
rl = NULL;
@@ -366,6 +397,8 @@ handle_zblock:
*/
static int ntfs_readpage(struct file *file, struct page *page)
{
+ loff_t i_size;
+ struct inode *vi;
ntfs_inode *ni, *base_ni;
u8 *kaddr;
ntfs_attr_search_ctx *ctx;
@@ -384,14 +417,17 @@ retry_readpage:
unlock_page(page);
return 0;
}
- ni = NTFS_I(page->mapping->host);
+ vi = page->mapping->host;
+ ni = NTFS_I(vi);
/*
* Only $DATA attributes can be encrypted and only unnamed $DATA
* attributes can be compressed. Index root can have the flags set but
* this means to create compressed/encrypted files, not that the
- * attribute is compressed/encrypted.
+ * attribute is compressed/encrypted. Note we need to check for
+ * AT_INDEX_ALLOCATION since this is the type of both directory and
+ * index inodes.
*/
- if (ni->type != AT_INDEX_ROOT) {
+ if (ni->type != AT_INDEX_ALLOCATION) {
/* If attribute is encrypted, deny access, just like NT4. */
if (NInoEncrypted(ni)) {
BUG_ON(ni->type != AT_DATA);
@@ -456,7 +492,12 @@ retry_readpage:
read_lock_irqsave(&ni->size_lock, flags);
if (unlikely(attr_len > ni->initialized_size))
attr_len = ni->initialized_size;
+ i_size = i_size_read(vi);
read_unlock_irqrestore(&ni->size_lock, flags);
+ if (unlikely(attr_len > i_size)) {
+ /* Race with shrinking truncate. */
+ attr_len = i_size;
+ }
kaddr = kmap_atomic(page, KM_USER0);
/* Copy the data to the page. */
memcpy(kaddr, (u8*)ctx->attr +
@@ -1341,9 +1382,11 @@ retry_writepage:
* Only $DATA attributes can be encrypted and only unnamed $DATA
* attributes can be compressed. Index root can have the flags set but
* this means to create compressed/encrypted files, not that the
- * attribute is compressed/encrypted.
+ * attribute is compressed/encrypted. Note we need to check for
+ * AT_INDEX_ALLOCATION since this is the type of both directory and
+ * index inodes.
*/
- if (ni->type != AT_INDEX_ROOT) {
+ if (ni->type != AT_INDEX_ALLOCATION) {
/* If file is encrypted, deny access, just like NT4. */
if (NInoEncrypted(ni)) {
unlock_page(page);
@@ -1379,8 +1422,8 @@ retry_writepage:
unsigned int ofs = i_size & ~PAGE_CACHE_MASK;
kaddr = kmap_atomic(page, KM_USER0);
memset(kaddr + ofs, 0, PAGE_CACHE_SIZE - ofs);
- flush_dcache_page(page);
kunmap_atomic(kaddr, KM_USER0);
+ flush_dcache_page(page);
}
/* Handle mst protected attributes. */
if (NInoMstProtected(ni))
@@ -1443,34 +1486,33 @@ retry_writepage:
BUG_ON(PageWriteback(page));
set_page_writeback(page);
unlock_page(page);
- /*
- * Here, we do not need to zero the out of bounds area everytime
- * because the below memcpy() already takes care of the
- * mmap-at-end-of-file requirements. If the file is converted to a
- * non-resident one, then the code path use is switched to the
- * non-resident one where the zeroing happens on each ntfs_writepage()
- * invocation.
- */
attr_len = le32_to_cpu(ctx->attr->data.resident.value_length);
i_size = i_size_read(vi);
if (unlikely(attr_len > i_size)) {
+ /* Race with shrinking truncate or a failed truncate. */
attr_len = i_size;
- ctx->attr->data.resident.value_length = cpu_to_le32(attr_len);
+ /*
+ * If the truncate failed, fix it up now. If a concurrent
+ * truncate, we do its job, so it does not have to do anything.
+ */
+ err = ntfs_resident_attr_value_resize(ctx->mrec, ctx->attr,
+ attr_len);
+ /* Shrinking cannot fail. */
+ BUG_ON(err);
}
kaddr = kmap_atomic(page, KM_USER0);
/* Copy the data from the page to the mft record. */
memcpy((u8*)ctx->attr +
le16_to_cpu(ctx->attr->data.resident.value_offset),
kaddr, attr_len);
- flush_dcache_mft_record_page(ctx->ntfs_ino);
/* Zero out of bounds area in the page cache page. */
memset(kaddr + attr_len, 0, PAGE_CACHE_SIZE - attr_len);
- flush_dcache_page(page);
kunmap_atomic(kaddr, KM_USER0);
-
+ flush_dcache_mft_record_page(ctx->ntfs_ino);
+ flush_dcache_page(page);
+ /* We are done with the page. */
end_page_writeback(page);
-
- /* Mark the mft record dirty, so it gets written back. */
+ /* Finally, mark the mft record dirty, so it gets written back. */
mark_mft_record_dirty(ctx->ntfs_ino);
ntfs_attr_put_search_ctx(ctx);
unmap_mft_record(base_ni);
diff --git a/fs/ntfs/inode.c b/fs/ntfs/inode.c
index dc4bbe3acf5..7ec04513180 100644
--- a/fs/ntfs/inode.c
+++ b/fs/ntfs/inode.c
@@ -1166,6 +1166,8 @@ err_out:
*
* Return 0 on success and -errno on error. In the error case, the inode will
* have had make_bad_inode() executed on it.
+ *
+ * Note this cannot be called for AT_INDEX_ALLOCATION.
*/
static int ntfs_read_locked_attr_inode(struct inode *base_vi, struct inode *vi)
{
@@ -1242,8 +1244,8 @@ static int ntfs_read_locked_attr_inode(struct inode *base_vi, struct inode *vi)
}
}
/*
- * The encryption flag set in an index root just means to
- * compress all files.
+ * The compressed/sparse flag set in an index root just means
+ * to compress all files.
*/
if (NInoMstProtected(ni) && ni->type != AT_INDEX_ROOT) {
ntfs_error(vi->i_sb, "Found mst protected attribute "
@@ -1319,8 +1321,7 @@ static int ntfs_read_locked_attr_inode(struct inode *base_vi, struct inode *vi)
"the mapping pairs array.");
goto unm_err_out;
}
- if ((NInoCompressed(ni) || NInoSparse(ni)) &&
- ni->type != AT_INDEX_ROOT) {
+ if (NInoCompressed(ni) || NInoSparse(ni)) {
if (a->data.non_resident.compression_unit != 4) {
ntfs_error(vi->i_sb, "Found nonstandard "
"compression unit (%u instead "
diff --git a/fs/ntfs/malloc.h b/fs/ntfs/malloc.h
index 3288bcc2c4a..006946efca8 100644
--- a/fs/ntfs/malloc.h
+++ b/fs/ntfs/malloc.h
@@ -1,7 +1,7 @@
/*
* malloc.h - NTFS kernel memory handling. Part of the Linux-NTFS project.
*
- * Copyright (c) 2001-2004 Anton Altaparmakov
+ * Copyright (c) 2001-2005 Anton Altaparmakov
*
* This program/include file is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as published
diff --git a/fs/ntfs/runlist.c b/fs/ntfs/runlist.c
index f5b2ac92908..061b5ff6b73 100644
--- a/fs/ntfs/runlist.c
+++ b/fs/ntfs/runlist.c
@@ -2,7 +2,7 @@
* runlist.c - NTFS runlist handling code. Part of the Linux-NTFS project.
*
* Copyright (c) 2001-2005 Anton Altaparmakov
- * Copyright (c) 2002 Richard Russon
+ * Copyright (c) 2002-2005 Richard Russon
*
* This program/include file is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as published
@@ -158,17 +158,21 @@ static inline BOOL ntfs_are_rl_mergeable(runlist_element *dst,
BUG_ON(!dst);
BUG_ON(!src);
- if ((dst->lcn < 0) || (src->lcn < 0)) { /* Are we merging holes? */
- if (dst->lcn == LCN_HOLE && src->lcn == LCN_HOLE)
- return TRUE;
+ /* We can merge unmapped regions even if they are misaligned. */
+ if ((dst->lcn == LCN_RL_NOT_MAPPED) && (src->lcn == LCN_RL_NOT_MAPPED))
+ return TRUE;
+ /* If the runs are misaligned, we cannot merge them. */
+ if ((dst->vcn + dst->length) != src->vcn)
return FALSE;
- }
- if ((dst->lcn + dst->length) != src->lcn) /* Are the runs contiguous? */
- return FALSE;
- if ((dst->vcn + dst->length) != src->vcn) /* Are the runs misaligned? */
- return FALSE;
-
- return TRUE;
+ /* If both runs are non-sparse and contiguous, we can merge them. */
+ if ((dst->lcn >= 0) && (src->lcn >= 0) &&
+ ((dst->lcn + dst->length) == src->lcn))
+ return TRUE;
+ /* If we are merging two holes, we can merge them. */
+ if ((dst->lcn == LCN_HOLE) && (src->lcn == LCN_HOLE))
+ return TRUE;
+ /* Cannot merge. */
+ return FALSE;
}
/**
@@ -214,14 +218,15 @@ static inline void __ntfs_rl_merge(runlist_element *dst, runlist_element *src)
static inline runlist_element *ntfs_rl_append(runlist_element *dst,
int dsize, runlist_element *src, int ssize, int loc)
{
- BOOL right;
- int magic;
+ BOOL right = FALSE; /* Right end of @src needs merging. */
+ int marker; /* End of the inserted runs. */
BUG_ON(!dst);
BUG_ON(!src);
/* First, check if the right hand end needs merging. */
- right = ntfs_are_rl_mergeable(src + ssize - 1, dst + loc + 1);
+ if ((loc + 1) < dsize)
+ right = ntfs_are_rl_mergeable(src + ssize - 1, dst + loc + 1);
/* Space required: @dst size + @src size, less one if we merged. */
dst = ntfs_rl_realloc(dst, dsize, dsize + ssize - right);
@@ -236,18 +241,19 @@ static inline runlist_element *ntfs_rl_append(runlist_element *dst,
if (right)
__ntfs_rl_merge(src + ssize - 1, dst + loc + 1);
- magic = loc + ssize;
+ /* First run after the @src runs that have been inserted. */
+ marker = loc + ssize + 1;
/* Move the tail of @dst out of the way, then copy in @src. */
- ntfs_rl_mm(dst, magic + 1, loc + 1 + right, dsize - loc - 1 - right);
+ ntfs_rl_mm(dst, marker, loc + 1 + right, dsize - (loc + 1 + right));
ntfs_rl_mc(dst, loc + 1, src, 0, ssize);
/* Adjust the size of the preceding hole. */
dst[loc].length = dst[loc + 1].vcn - dst[loc].vcn;
/* We may have changed the length of the file, so fix the end marker */
- if (dst[magic + 1].lcn == LCN_ENOENT)
- dst[magic + 1].vcn = dst[magic].vcn + dst[magic].length;
+ if (dst[marker].lcn == LCN_ENOENT)
+ dst[marker].vcn = dst[marker - 1].vcn + dst[marker - 1].length;
return dst;
}
@@ -279,18 +285,17 @@ static inline runlist_element *ntfs_rl_append(runlist_element *dst,
static inline runlist_element *ntfs_rl_insert(runlist_element *dst,
int dsize, runlist_element *src, int ssize, int loc)
{
- BOOL left = FALSE;
- BOOL disc = FALSE; /* Discontinuity */
- BOOL hole = FALSE; /* Following a hole */
- int magic;
+ BOOL left = FALSE; /* Left end of @src needs merging. */
+ BOOL disc = FALSE; /* Discontinuity between @dst and @src. */
+ int marker; /* End of the inserted runs. */
BUG_ON(!dst);
BUG_ON(!src);
- /* disc => Discontinuity between the end of @dst and the start of @src.
- * This means we might need to insert a hole.
- * hole => @dst ends with a hole or an unmapped region which we can
- * extend to match the discontinuity. */
+ /*
+ * disc => Discontinuity between the end of @dst and the start of @src.
+ * This means we might need to insert a "not mapped" run.
+ */
if (loc == 0)
disc = (src[0].vcn > 0);
else {
@@ -303,58 +308,49 @@ static inline runlist_element *ntfs_rl_insert(runlist_element *dst,
merged_length += src->length;
disc = (src[0].vcn > dst[loc - 1].vcn + merged_length);
- if (disc)
- hole = (dst[loc - 1].lcn == LCN_HOLE);
}
-
- /* Space required: @dst size + @src size, less one if we merged, plus
- * one if there was a discontinuity, less one for a trailing hole. */
- dst = ntfs_rl_realloc(dst, dsize, dsize + ssize - left + disc - hole);
+ /*
+ * Space required: @dst size + @src size, less one if we merged, plus
+ * one if there was a discontinuity.
+ */
+ dst = ntfs_rl_realloc(dst, dsize, dsize + ssize - left + disc);
if (IS_ERR(dst))
return dst;
/*
* We are guaranteed to succeed from here so can start modifying the
* original runlist.
*/
-
if (left)
__ntfs_rl_merge(dst + loc - 1, src);
-
- magic = loc + ssize - left + disc - hole;
+ /*
+ * First run after the @src runs that have been inserted.
+ * Nominally, @marker equals @loc + @ssize, i.e. location + number of
+ * runs in @src. However, if @left, then the first run in @src has
+ * been merged with one in @dst. And if @disc, then @dst and @src do
+ * not meet and we need an extra run to fill the gap.
+ */
+ marker = loc + ssize - left + disc;
/* Move the tail of @dst out of the way, then copy in @src. */
- ntfs_rl_mm(dst, magic, loc, dsize - loc);
- ntfs_rl_mc(dst, loc + disc - hole, src, left, ssize - left);
+ ntfs_rl_mm(dst, marker, loc, dsize - loc);
+ ntfs_rl_mc(dst, loc + disc, src, left, ssize - left);
- /* Adjust the VCN of the last run ... */
- if (dst[magic].lcn <= LCN_HOLE)
- dst[magic].vcn = dst[magic - 1].vcn + dst[magic - 1].length;
+ /* Adjust the VCN of the first run after the insertion... */
+ dst[marker].vcn = dst[marker - 1].vcn + dst[marker - 1].length;
/* ... and the length. */
- if (dst[magic].lcn == LCN_HOLE || dst[magic].lcn == LCN_RL_NOT_MAPPED)
- dst[magic].length = dst[magic + 1].vcn - dst[magic].vcn;
+ if (dst[marker].lcn == LCN_HOLE || dst[marker].lcn == LCN_RL_NOT_MAPPED)
+ dst[marker].length = dst[marker + 1].vcn - dst[marker].vcn;
- /* Writing beyond the end of the file and there's a discontinuity. */
+ /* Writing beyond the end of the file and there is a discontinuity. */
if (disc) {
- if (hole)
- dst[loc - 1].length = dst[loc].vcn - dst[loc - 1].vcn;
- else {
- if (loc > 0) {
- dst[loc].vcn = dst[loc - 1].vcn +
- dst[loc - 1].length;
- dst[loc].length = dst[loc + 1].vcn -
- dst[loc].vcn;
- } else {
- dst[loc].vcn = 0;
- dst[loc].length = dst[loc + 1].vcn;
- }
- dst[loc].lcn = LCN_RL_NOT_MAPPED;
+ if (loc > 0) {
+ dst[loc].vcn = dst[loc - 1].vcn + dst[loc - 1].length;
+ dst[loc].length = dst[loc + 1].vcn - dst[loc].vcn;
+ } else {
+ dst[loc].vcn = 0;
+ dst[loc].length = dst[loc + 1].vcn;
}
-
- magic += hole;
-
- if (dst[magic].lcn == LCN_ENOENT)
- dst[magic].vcn = dst[magic - 1].vcn +
- dst[magic - 1].length;
+ dst[loc].lcn = LCN_RL_NOT_MAPPED;
}
return dst;
}
@@ -385,20 +381,23 @@ static inline runlist_element *ntfs_rl_insert(runlist_element *dst,
static inline runlist_element *ntfs_rl_replace(runlist_element *dst,
int dsize, runlist_element *src, int ssize, int loc)
{
- BOOL left = FALSE;
- BOOL right;
- int magic;
+ BOOL left = FALSE; /* Left end of @src needs merging. */
+ BOOL right = FALSE; /* Right end of @src needs merging. */
+ int tail; /* Start of tail of @dst. */
+ int marker; /* End of the inserted runs. */
BUG_ON(!dst);
BUG_ON(!src);
- /* First, merge the left and right ends, if necessary. */
- right = ntfs_are_rl_mergeable(src + ssize - 1, dst + loc + 1);
+ /* First, see if the left and right ends need merging. */
+ if ((loc + 1) < dsize)
+ right = ntfs_are_rl_mergeable(src + ssize - 1, dst + loc + 1);
if (loc > 0)
left = ntfs_are_rl_mergeable(dst + loc - 1, src);
-
- /* Allocate some space. We'll need less if the left, right, or both
- * ends were merged. */
+ /*
+ * Allocate some space. We will need less if the left, right, or both
+ * ends get merged.
+ */
dst = ntfs_rl_realloc(dst, dsize, dsize + ssize - left - right);
if (IS_ERR(dst))
return dst;
@@ -406,21 +405,37 @@ static inline runlist_element *ntfs_rl_replace(runlist_element *dst,
* We are guaranteed to succeed from here so can start modifying the
* original runlists.
*/
+
+ /* First, merge the left and right ends, if necessary. */
if (right)
__ntfs_rl_merge(src + ssize - 1, dst + loc + 1);
if (left)
__ntfs_rl_merge(dst + loc - 1, src);
-
- /* FIXME: What does this mean? (AIA) */
- magic = loc + ssize - left;
+ /*
+ * Offset of the tail of @dst. This needs to be moved out of the way
+ * to make space for the runs to be copied from @src, i.e. the first
+ * run of the tail of @dst.
+ * Nominally, @tail equals @loc + 1, i.e. location, skipping the
+ * replaced run. However, if @right, then one of @dst's runs is
+ * already merged into @src.
+ */
+ tail = loc + right + 1;
+ /*
+ * First run after the @src runs that have been inserted, i.e. where
+ * the tail of @dst needs to be moved to.
+ * Nominally, @marker equals @loc + @ssize, i.e. location + number of
+ * runs in @src. However, if @left, then the first run in @src has
+ * been merged with one in @dst.
+ */
+ marker = loc + ssize - left;
/* Move the tail of @dst out of the way, then copy in @src. */
- ntfs_rl_mm(dst, magic, loc + right + 1, dsize - loc - right - 1);
+ ntfs_rl_mm(dst, marker, tail, dsize - tail);
ntfs_rl_mc(dst, loc, src, left, ssize - left);
- /* We may have changed the length of the file, so fix the end marker */
- if (dst[magic].lcn == LCN_ENOENT)
- dst[magic].vcn = dst[magic - 1].vcn + dst[magic - 1].length;
+ /* We may have changed the length of the file, so fix the end marker. */
+ if (dsize - tail > 0 && dst[marker].lcn == LCN_ENOENT)
+ dst[marker].vcn = dst[marker - 1].vcn + dst[marker - 1].length;
return dst;
}
diff --git a/include/linux/if_vlan.h b/include/linux/if_vlan.h
index 17d0c0d40b0..eef0876d830 100644
--- a/include/linux/if_vlan.h
+++ b/include/linux/if_vlan.h
@@ -42,8 +42,8 @@ struct hlist_node;
struct vlan_ethhdr {
unsigned char h_dest[ETH_ALEN]; /* destination eth addr */
unsigned char h_source[ETH_ALEN]; /* source ether addr */
- unsigned short h_vlan_proto; /* Should always be 0x8100 */
- unsigned short h_vlan_TCI; /* Encapsulates priority and VLAN ID */
+ __be16 h_vlan_proto; /* Should always be 0x8100 */
+ __be16 h_vlan_TCI; /* Encapsulates priority and VLAN ID */
unsigned short h_vlan_encapsulated_proto; /* packet type ID field (or len) */
};
@@ -55,8 +55,8 @@ static inline struct vlan_ethhdr *vlan_eth_hdr(const struct sk_buff *skb)
}
struct vlan_hdr {
- unsigned short h_vlan_TCI; /* Encapsulates priority and VLAN ID */
- unsigned short h_vlan_encapsulated_proto; /* packet type ID field (or len) */
+ __be16 h_vlan_TCI; /* Encapsulates priority and VLAN ID */
+ __be16 h_vlan_encapsulated_proto; /* packet type ID field (or len) */
};
#define VLAN_VID_MASK 0xfff
diff --git a/include/linux/netfilter_ipv4/ip_conntrack.h b/include/linux/netfilter_ipv4/ip_conntrack.h
index 7e033e9271a..bace72a76cc 100644
--- a/include/linux/netfilter_ipv4/ip_conntrack.h
+++ b/include/linux/netfilter_ipv4/ip_conntrack.h
@@ -133,11 +133,13 @@ enum ip_conntrack_expect_events {
#include <linux/netfilter_ipv4/ip_conntrack_tcp.h>
#include <linux/netfilter_ipv4/ip_conntrack_icmp.h>
+#include <linux/netfilter_ipv4/ip_conntrack_proto_gre.h>
#include <linux/netfilter_ipv4/ip_conntrack_sctp.h>
/* per conntrack: protocol private data */
union ip_conntrack_proto {
/* insert conntrack proto private data here */
+ struct ip_ct_gre gre;
struct ip_ct_sctp sctp;
struct ip_ct_tcp tcp;
struct ip_ct_icmp icmp;
@@ -148,6 +150,7 @@ union ip_conntrack_expect_proto {
};
/* Add protocol helper include file here */
+#include <linux/netfilter_ipv4/ip_conntrack_pptp.h>
#include <linux/netfilter_ipv4/ip_conntrack_amanda.h>
#include <linux/netfilter_ipv4/ip_conntrack_ftp.h>
#include <linux/netfilter_ipv4/ip_conntrack_irc.h>
@@ -155,12 +158,20 @@ union ip_conntrack_expect_proto {
/* per conntrack: application helper private data */
union ip_conntrack_help {
/* insert conntrack helper private data (master) here */
+ struct ip_ct_pptp_master ct_pptp_info;
struct ip_ct_ftp_master ct_ftp_info;
struct ip_ct_irc_master ct_irc_info;
};
#ifdef CONFIG_IP_NF_NAT_NEEDED
#include <linux/netfilter_ipv4/ip_nat.h>
+#include <linux/netfilter_ipv4/ip_nat_pptp.h>
+
+/* per conntrack: nat application helper private data */
+union ip_conntrack_nat_help {
+ /* insert nat helper private data here */
+ struct ip_nat_pptp nat_pptp_info;
+};
#endif
#include <linux/types.h>
@@ -223,6 +234,7 @@ struct ip_conntrack
#ifdef CONFIG_IP_NF_NAT_NEEDED
struct {
struct ip_nat_info info;
+ union ip_conntrack_nat_help help;
#if defined(CONFIG_IP_NF_TARGET_MASQUERADE) || \
defined(CONFIG_IP_NF_TARGET_MASQUERADE_MODULE)
int masq_index;
@@ -372,7 +384,7 @@ extern struct ip_conntrack_expect *
__ip_conntrack_expect_find(const struct ip_conntrack_tuple *tuple);
extern struct ip_conntrack_expect *
-ip_conntrack_expect_find_get(const struct ip_conntrack_tuple *tuple);
+ip_conntrack_expect_find(const struct ip_conntrack_tuple *tuple);
extern struct ip_conntrack_tuple_hash *
__ip_conntrack_find(const struct ip_conntrack_tuple *tuple,
diff --git a/include/linux/netfilter_ipv4/ip_conntrack_pptp.h b/include/linux/netfilter_ipv4/ip_conntrack_pptp.h
new file mode 100644
index 00000000000..389e3851d52
--- /dev/null
+++ b/include/linux/netfilter_ipv4/ip_conntrack_pptp.h
@@ -0,0 +1,332 @@
+/* PPTP constants and structs */
+#ifndef _CONNTRACK_PPTP_H
+#define _CONNTRACK_PPTP_H
+
+/* state of the control session */
+enum pptp_ctrlsess_state {
+ PPTP_SESSION_NONE, /* no session present */
+ PPTP_SESSION_ERROR, /* some session error */
+ PPTP_SESSION_STOPREQ, /* stop_sess request seen */
+ PPTP_SESSION_REQUESTED, /* start_sess request seen */
+ PPTP_SESSION_CONFIRMED, /* session established */
+};
+
+/* state of the call inside the control session */
+enum pptp_ctrlcall_state {
+ PPTP_CALL_NONE,
+ PPTP_CALL_ERROR,
+ PPTP_CALL_OUT_REQ,
+ PPTP_CALL_OUT_CONF,
+ PPTP_CALL_IN_REQ,
+ PPTP_CALL_IN_REP,
+ PPTP_CALL_IN_CONF,
+ PPTP_CALL_CLEAR_REQ,
+};
+
+
+/* conntrack private data */
+struct ip_ct_pptp_master {
+ enum pptp_ctrlsess_state sstate; /* session state */
+
+ /* everything below is going to be per-expectation in newnat,
+ * since there could be more than one call within one session */
+ enum pptp_ctrlcall_state cstate; /* call state */
+ u_int16_t pac_call_id; /* call id of PAC, host byte order */
+ u_int16_t pns_call_id; /* call id of PNS, host byte order */
+
+ /* in pre-2.6.11 this used to be per-expect. Now it is per-conntrack
+ * and therefore imposes a fixed limit on the number of maps */
+ struct ip_ct_gre_keymap *keymap_orig, *keymap_reply;
+};
+
+/* conntrack_expect private member */
+struct ip_ct_pptp_expect {
+ enum pptp_ctrlcall_state cstate; /* call state */
+ u_int16_t pac_call_id; /* call id of PAC */
+ u_int16_t pns_call_id; /* call id of PNS */
+};
+
+
+#ifdef __KERNEL__
+
+#define IP_CONNTR_PPTP PPTP_CONTROL_PORT
+
+#define PPTP_CONTROL_PORT 1723
+
+#define PPTP_PACKET_CONTROL 1
+#define PPTP_PACKET_MGMT 2
+
+#define PPTP_MAGIC_COOKIE 0x1a2b3c4d
+
+struct pptp_pkt_hdr {
+ __u16 packetLength;
+ __u16 packetType;
+ __u32 magicCookie;
+};
+
+/* PptpControlMessageType values */
+#define PPTP_START_SESSION_REQUEST 1
+#define PPTP_START_SESSION_REPLY 2
+#define PPTP_STOP_SESSION_REQUEST 3
+#define PPTP_STOP_SESSION_REPLY 4
+#define PPTP_ECHO_REQUEST 5
+#define PPTP_ECHO_REPLY 6
+#define PPTP_OUT_CALL_REQUEST 7
+#define PPTP_OUT_CALL_REPLY 8
+#define PPTP_IN_CALL_REQUEST 9
+#define PPTP_IN_CALL_REPLY 10
+#define PPTP_IN_CALL_CONNECT 11
+#define PPTP_CALL_CLEAR_REQUEST 12
+#define PPTP_CALL_DISCONNECT_NOTIFY 13
+#define PPTP_WAN_ERROR_NOTIFY 14
+#define PPTP_SET_LINK_INFO 15
+
+#define PPTP_MSG_MAX 15
+
+/* PptpGeneralError values */
+#define PPTP_ERROR_CODE_NONE 0
+#define PPTP_NOT_CONNECTED 1
+#define PPTP_BAD_FORMAT 2
+#define PPTP_BAD_VALUE 3
+#define PPTP_NO_RESOURCE 4
+#define PPTP_BAD_CALLID 5
+#define PPTP_REMOVE_DEVICE_ERROR 6
+
+struct PptpControlHeader {
+ __u16 messageType;
+ __u16 reserved;
+};
+
+/* FramingCapability Bitmap Values */
+#define PPTP_FRAME_CAP_ASYNC 0x1
+#define PPTP_FRAME_CAP_SYNC 0x2
+
+/* BearerCapability Bitmap Values */
+#define PPTP_BEARER_CAP_ANALOG 0x1
+#define PPTP_BEARER_CAP_DIGITAL 0x2
+
+struct PptpStartSessionRequest {
+ __u16 protocolVersion;
+ __u8 reserved1;
+ __u8 reserved2;
+ __u32 framingCapability;
+ __u32 bearerCapability;
+ __u16 maxChannels;
+ __u16 firmwareRevision;
+ __u8 hostName[64];
+ __u8 vendorString[64];
+};
+
+/* PptpStartSessionResultCode Values */
+#define PPTP_START_OK 1
+#define PPTP_START_GENERAL_ERROR 2
+#define PPTP_START_ALREADY_CONNECTED 3
+#define PPTP_START_NOT_AUTHORIZED 4
+#define PPTP_START_UNKNOWN_PROTOCOL 5
+
+struct PptpStartSessionReply {
+ __u16 protocolVersion;
+ __u8 resultCode;
+ __u8 generalErrorCode;
+ __u32 framingCapability;
+ __u32 bearerCapability;
+ __u16 maxChannels;
+ __u16 firmwareRevision;
+ __u8 hostName[64];
+ __u8 vendorString[64];
+};
+
+/* PptpStopReasons */
+#define PPTP_STOP_NONE 1
+#define PPTP_STOP_PROTOCOL 2
+#define PPTP_STOP_LOCAL_SHUTDOWN 3
+
+struct PptpStopSessionRequest {
+ __u8 reason;
+};
+
+/* PptpStopSessionResultCode */
+#define PPTP_STOP_OK 1
+#define PPTP_STOP_GENERAL_ERROR 2
+
+struct PptpStopSessionReply {
+ __u8 resultCode;
+ __u8 generalErrorCode;
+};
+
+struct PptpEchoRequest {
+ __u32 identNumber;
+};
+
+/* PptpEchoReplyResultCode */
+#define PPTP_ECHO_OK 1
+#define PPTP_ECHO_GENERAL_ERROR 2
+
+struct PptpEchoReply {
+ __u32 identNumber;
+ __u8 resultCode;
+ __u8 generalErrorCode;
+ __u16 reserved;
+};
+
+/* PptpFramingType */
+#define PPTP_ASYNC_FRAMING 1
+#define PPTP_SYNC_FRAMING 2
+#define PPTP_DONT_CARE_FRAMING 3
+
+/* PptpCallBearerType */
+#define PPTP_ANALOG_TYPE 1
+#define PPTP_DIGITAL_TYPE 2
+#define PPTP_DONT_CARE_BEARER_TYPE 3
+
+struct PptpOutCallRequest {
+ __u16 callID;
+ __u16 callSerialNumber;
+ __u32 minBPS;
+ __u32 maxBPS;
+ __u32 bearerType;
+ __u32 framingType;
+ __u16 packetWindow;
+ __u16 packetProcDelay;
+ __u16 reserved1;
+ __u16 phoneNumberLength;
+ __u16 reserved2;
+ __u8 phoneNumber[64];
+ __u8 subAddress[64];
+};
+
+/* PptpCallResultCode */
+#define PPTP_OUTCALL_CONNECT 1
+#define PPTP_OUTCALL_GENERAL_ERROR 2
+#define PPTP_OUTCALL_NO_CARRIER 3
+#define PPTP_OUTCALL_BUSY 4
+#define PPTP_OUTCALL_NO_DIAL_TONE 5
+#define PPTP_OUTCALL_TIMEOUT 6
+#define PPTP_OUTCALL_DONT_ACCEPT 7
+
+struct PptpOutCallReply {
+ __u16 callID;
+ __u16 peersCallID;
+ __u8 resultCode;
+ __u8 generalErrorCode;
+ __u16 causeCode;
+ __u32 connectSpeed;
+ __u16 packetWindow;
+ __u16 packetProcDelay;
+ __u32 physChannelID;
+};
+
+struct PptpInCallRequest {
+ __u16 callID;
+ __u16 callSerialNumber;
+ __u32 callBearerType;
+ __u32 physChannelID;
+ __u16 dialedNumberLength;
+ __u16 dialingNumberLength;
+ __u8 dialedNumber[64];
+ __u8 dialingNumber[64];
+ __u8 subAddress[64];
+};
+
+/* PptpInCallResultCode */
+#define PPTP_INCALL_ACCEPT 1
+#define PPTP_INCALL_GENERAL_ERROR 2
+#define PPTP_INCALL_DONT_ACCEPT 3
+
+struct PptpInCallReply {
+ __u16 callID;
+ __u16 peersCallID;
+ __u8 resultCode;
+ __u8 generalErrorCode;
+ __u16 packetWindow;
+ __u16 packetProcDelay;
+ __u16 reserved;
+};
+
+struct PptpInCallConnected {
+ __u16 peersCallID;
+ __u16 reserved;
+ __u32 connectSpeed;
+ __u16 packetWindow;
+ __u16 packetProcDelay;
+ __u32 callFramingType;
+};
+
+struct PptpClearCallRequest {
+ __u16 callID;
+ __u16 reserved;
+};
+
+struct PptpCallDisconnectNotify {
+ __u16 callID;
+ __u8 resultCode;
+ __u8 generalErrorCode;
+ __u16 causeCode;
+ __u16 reserved;
+ __u8 callStatistics[128];
+};
+
+struct PptpWanErrorNotify {
+ __u16 peersCallID;
+ __u16 reserved;
+ __u32 crcErrors;
+ __u32 framingErrors;
+ __u32 hardwareOverRuns;
+ __u32 bufferOverRuns;
+ __u32 timeoutErrors;
+ __u32 alignmentErrors;
+};
+
+struct PptpSetLinkInfo {
+ __u16 peersCallID;
+ __u16 reserved;
+ __u32 sendAccm;
+ __u32 recvAccm;
+};
+
+
+struct pptp_priv_data {
+ __u16 call_id;
+ __u16 mcall_id;
+ __u16 pcall_id;
+};
+
+union pptp_ctrl_union {
+ struct PptpStartSessionRequest sreq;
+ struct PptpStartSessionReply srep;
+ struct PptpStopSessionRequest streq;
+ struct PptpStopSessionReply strep;
+ struct PptpOutCallRequest ocreq;
+ struct PptpOutCallReply ocack;
+ struct PptpInCallRequest icreq;
+ struct PptpInCallReply icack;
+ struct PptpInCallConnected iccon;
+ struct PptpClearCallRequest clrreq;
+ struct PptpCallDisconnectNotify disc;
+ struct PptpWanErrorNotify wanerr;
+ struct PptpSetLinkInfo setlink;
+};
+
+extern int
+(*ip_nat_pptp_hook_outbound)(struct sk_buff **pskb,
+ struct ip_conntrack *ct,
+ enum ip_conntrack_info ctinfo,
+ struct PptpControlHeader *ctlh,
+ union pptp_ctrl_union *pptpReq);
+
+extern int
+(*ip_nat_pptp_hook_inbound)(struct sk_buff **pskb,
+ struct ip_conntrack *ct,
+ enum ip_conntrack_info ctinfo,
+ struct PptpControlHeader *ctlh,
+ union pptp_ctrl_union *pptpReq);
+
+extern int
+(*ip_nat_pptp_hook_exp_gre)(struct ip_conntrack_expect *exp_orig,
+ struct ip_conntrack_expect *exp_reply);
+
+extern void
+(*ip_nat_pptp_hook_expectfn)(struct ip_conntrack *ct,
+ struct ip_conntrack_expect *exp);
+#endif /* __KERNEL__ */
+#endif /* _CONNTRACK_PPTP_H */
diff --git a/include/linux/netfilter_ipv4/ip_conntrack_proto_gre.h b/include/linux/netfilter_ipv4/ip_conntrack_proto_gre.h
new file mode 100644
index 00000000000..8d090ef82f5
--- /dev/null
+++ b/include/linux/netfilter_ipv4/ip_conntrack_proto_gre.h
@@ -0,0 +1,114 @@
+#ifndef _CONNTRACK_PROTO_GRE_H
+#define _CONNTRACK_PROTO_GRE_H
+#include <asm/byteorder.h>
+
+/* GRE PROTOCOL HEADER */
+
+/* GRE Version field */
+#define GRE_VERSION_1701 0x0
+#define GRE_VERSION_PPTP 0x1
+
+/* GRE Protocol field */
+#define GRE_PROTOCOL_PPTP 0x880B
+
+/* GRE Flags */
+#define GRE_FLAG_C 0x80
+#define GRE_FLAG_R 0x40
+#define GRE_FLAG_K 0x20
+#define GRE_FLAG_S 0x10
+#define GRE_FLAG_A 0x80
+
+#define GRE_IS_C(f) ((f)&GRE_FLAG_C)
+#define GRE_IS_R(f) ((f)&GRE_FLAG_R)
+#define GRE_IS_K(f) ((f)&GRE_FLAG_K)
+#define GRE_IS_S(f) ((f)&GRE_FLAG_S)
+#define GRE_IS_A(f) ((f)&GRE_FLAG_A)
+
+/* GRE is a mess: Four different standards */
+struct gre_hdr {
+#if defined(__LITTLE_ENDIAN_BITFIELD)
+ __u16 rec:3,
+ srr:1,
+ seq:1,
+ key:1,
+ routing:1,
+ csum:1,
+ version:3,
+ reserved:4,
+ ack:1;
+#elif defined(__BIG_ENDIAN_BITFIELD)
+ __u16 csum:1,
+ routing:1,
+ key:1,
+ seq:1,
+ srr:1,
+ rec:3,
+ ack:1,
+ reserved:4,
+ version:3;
+#else
+#error "Adjust your <asm/byteorder.h> defines"
+#endif
+ __u16 protocol;
+};
+
+/* modified GRE header for PPTP */
+struct gre_hdr_pptp {
+ __u8 flags; /* bitfield */
+ __u8 version; /* should be GRE_VERSION_PPTP */
+ __u16 protocol; /* should be GRE_PROTOCOL_PPTP */
+ __u16 payload_len; /* size of ppp payload, not inc. gre header */
+ __u16 call_id; /* peer's call_id for this session */
+ __u32 seq; /* sequence number. Present if S==1 */
+ __u32 ack; /* seq number of highest packet recieved by */
+ /* sender in this session */
+};
+
+
+/* this is part of ip_conntrack */
+struct ip_ct_gre {
+ unsigned int stream_timeout;
+ unsigned int timeout;
+};
+
+#ifdef __KERNEL__
+struct ip_conntrack_expect;
+struct ip_conntrack;
+
+/* structure for original <-> reply keymap */
+struct ip_ct_gre_keymap {
+ struct list_head list;
+
+ struct ip_conntrack_tuple tuple;
+};
+
+/* add new tuple->key_reply pair to keymap */
+int ip_ct_gre_keymap_add(struct ip_conntrack *ct,
+ struct ip_conntrack_tuple *t,
+ int reply);
+
+/* delete keymap entries */
+void ip_ct_gre_keymap_destroy(struct ip_conntrack *ct);
+
+
+/* get pointer to gre key, if present */
+static inline u_int32_t *gre_key(struct gre_hdr *greh)
+{
+ if (!greh->key)
+ return NULL;
+ if (greh->csum || greh->routing)
+ return (u_int32_t *) (greh+sizeof(*greh)+4);
+ return (u_int32_t *) (greh+sizeof(*greh));
+}
+
+/* get pointer ot gre csum, if present */
+static inline u_int16_t *gre_csum(struct gre_hdr *greh)
+{
+ if (!greh->csum)
+ return NULL;
+ return (u_int16_t *) (greh+sizeof(*greh));
+}
+
+#endif /* __KERNEL__ */
+
+#endif /* _CONNTRACK_PROTO_GRE_H */
diff --git a/include/linux/netfilter_ipv4/ip_conntrack_tuple.h b/include/linux/netfilter_ipv4/ip_conntrack_tuple.h
index c33f0b5e0d0..14dc0f7b655 100644
--- a/include/linux/netfilter_ipv4/ip_conntrack_tuple.h
+++ b/include/linux/netfilter_ipv4/ip_conntrack_tuple.h
@@ -28,6 +28,9 @@ union ip_conntrack_manip_proto
struct {
u_int16_t port;
} sctp;
+ struct {
+ u_int16_t key; /* key is 32bit, pptp only uses 16 */
+ } gre;
};
/* The manipulable part of the tuple. */
@@ -61,6 +64,10 @@ struct ip_conntrack_tuple
struct {
u_int16_t port;
} sctp;
+ struct {
+ u_int16_t key; /* key is 32bit,
+ * pptp only uses 16 */
+ } gre;
} u;
/* The protocol. */
diff --git a/include/linux/netfilter_ipv4/ip_nat_pptp.h b/include/linux/netfilter_ipv4/ip_nat_pptp.h
new file mode 100644
index 00000000000..eaf66c2e8f9
--- /dev/null
+++ b/include/linux/netfilter_ipv4/ip_nat_pptp.h
@@ -0,0 +1,11 @@
+/* PPTP constants and structs */
+#ifndef _NAT_PPTP_H
+#define _NAT_PPTP_H
+
+/* conntrack private data */
+struct ip_nat_pptp {
+ u_int16_t pns_call_id; /* NAT'ed PNS call id */
+ u_int16_t pac_call_id; /* NAT'ed PAC call id */
+};
+
+#endif /* _NAT_PPTP_H */
diff --git a/include/linux/netfilter_ipv6/ip6_tables.h b/include/linux/netfilter_ipv6/ip6_tables.h
index 58c72a52dc6..59f70b34e02 100644
--- a/include/linux/netfilter_ipv6/ip6_tables.h
+++ b/include/linux/netfilter_ipv6/ip6_tables.h
@@ -455,6 +455,9 @@ extern unsigned int ip6t_do_table(struct sk_buff **pskb,
/* Check for an extension */
extern int ip6t_ext_hdr(u8 nexthdr);
+/* find specified header and get offset to it */
+extern int ipv6_find_hdr(const struct sk_buff *skb, unsigned int *offset,
+ u8 target);
#define IP6T_ALIGN(s) (((s) + (__alignof__(struct ip6t_entry)-1)) & ~(__alignof__(struct ip6t_entry)-1))
diff --git a/include/scsi/scsi_host.h b/include/scsi/scsi_host.h
index 916144be208..69313ba7505 100644
--- a/include/scsi/scsi_host.h
+++ b/include/scsi/scsi_host.h
@@ -439,6 +439,8 @@ enum scsi_host_state {
SHOST_CANCEL,
SHOST_DEL,
SHOST_RECOVERY,
+ SHOST_CANCEL_RECOVERY,
+ SHOST_DEL_RECOVERY,
};
struct Scsi_Host {
@@ -465,8 +467,6 @@ struct Scsi_Host {
struct list_head eh_cmd_q;
struct task_struct * ehandler; /* Error recovery thread. */
- struct semaphore * eh_wait; /* The error recovery thread waits
- on this. */
struct semaphore * eh_action; /* Wait for specific actions on the
host. */
unsigned int eh_active:1; /* Indicates the eh thread is awake and active if
@@ -621,6 +621,13 @@ static inline struct Scsi_Host *dev_to_shost(struct device *dev)
return container_of(dev, struct Scsi_Host, shost_gendev);
}
+static inline int scsi_host_in_recovery(struct Scsi_Host *shost)
+{
+ return shost->shost_state == SHOST_RECOVERY ||
+ shost->shost_state == SHOST_CANCEL_RECOVERY ||
+ shost->shost_state == SHOST_DEL_RECOVERY;
+}
+
extern int scsi_queue_work(struct Scsi_Host *, struct work_struct *);
extern void scsi_flush_work(struct Scsi_Host *);
diff --git a/include/scsi/scsi_transport_fc.h b/include/scsi/scsi_transport_fc.h
index 115db056dc6..b0d44543737 100644
--- a/include/scsi/scsi_transport_fc.h
+++ b/include/scsi/scsi_transport_fc.h
@@ -103,8 +103,8 @@ enum fc_port_state {
incapable of reporting */
#define FC_PORTSPEED_1GBIT 1
#define FC_PORTSPEED_2GBIT 2
-#define FC_PORTSPEED_10GBIT 4
-#define FC_PORTSPEED_4GBIT 8
+#define FC_PORTSPEED_4GBIT 4
+#define FC_PORTSPEED_10GBIT 8
#define FC_PORTSPEED_NOT_NEGOTIATED (1 << 15) /* Speed not established */
/*
diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c
index 145f5cde96c..b7486488967 100644
--- a/net/8021q/vlan_dev.c
+++ b/net/8021q/vlan_dev.c
@@ -120,7 +120,7 @@ int vlan_skb_recv(struct sk_buff *skb, struct net_device *dev,
unsigned short vid;
struct net_device_stats *stats;
unsigned short vlan_TCI;
- unsigned short proto;
+ __be16 proto;
/* vlan_TCI = ntohs(get_unaligned(&vhdr->h_vlan_TCI)); */
vlan_TCI = ntohs(vhdr->h_vlan_TCI);
diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c
index 1b63b482416..90ae70870a1 100644
--- a/net/ipv4/fib_trie.c
+++ b/net/ipv4/fib_trie.c
@@ -43,7 +43,7 @@
* 2 of the License, or (at your option) any later version.
*/
-#define VERSION "0.403"
+#define VERSION "0.404"
#include <linux/config.h>
#include <asm/uaccess.h>
@@ -224,7 +224,7 @@ static inline int tkey_mismatch(t_key a, int offset, t_key b)
Consider a node 'n' and its parent 'tp'.
If n is a leaf, every bit in its key is significant. Its presence is
- necessitaded by path compression, since during a tree traversal (when
+ necessitated by path compression, since during a tree traversal (when
searching for a leaf - unless we are doing an insertion) we will completely
ignore all skipped bits we encounter. Thus we need to verify, at the end of
a potentially successful search, that we have indeed been walking the
@@ -836,11 +836,12 @@ static void trie_init(struct trie *t)
#endif
}
-/* readside most use rcu_read_lock currently dump routines
+/* readside must use rcu_read_lock currently dump routines
via get_fa_head and dump */
-static struct leaf_info *find_leaf_info(struct hlist_head *head, int plen)
+static struct leaf_info *find_leaf_info(struct leaf *l, int plen)
{
+ struct hlist_head *head = &l->list;
struct hlist_node *node;
struct leaf_info *li;
@@ -853,7 +854,7 @@ static struct leaf_info *find_leaf_info(struct hlist_head *head, int plen)
static inline struct list_head * get_fa_head(struct leaf *l, int plen)
{
- struct leaf_info *li = find_leaf_info(&l->list, plen);
+ struct leaf_info *li = find_leaf_info(l, plen);
if (!li)
return NULL;
@@ -1248,7 +1249,7 @@ err:
}
-/* should be clalled with rcu_read_lock */
+/* should be called with rcu_read_lock */
static inline int check_leaf(struct trie *t, struct leaf *l,
t_key key, int *plen, const struct flowi *flp,
struct fib_result *res)
@@ -1590,7 +1591,7 @@ fn_trie_delete(struct fib_table *tb, struct rtmsg *r, struct kern_rta *rta,
rtmsg_fib(RTM_DELROUTE, htonl(key), fa, plen, tb->tb_id, nlhdr, req);
l = fib_find_node(t, key);
- li = find_leaf_info(&l->list, plen);
+ li = find_leaf_info(l, plen);
list_del_rcu(&fa->fa_list);
@@ -1714,7 +1715,6 @@ static int fn_trie_flush(struct fib_table *tb)
t->revision++;
- rcu_read_lock();
for (h = 0; (l = nextleaf(t, l)) != NULL; h++) {
found += trie_flush_leaf(t, l);
@@ -1722,7 +1722,6 @@ static int fn_trie_flush(struct fib_table *tb)
trie_leaf_remove(t, ll->key);
ll = l;
}
- rcu_read_unlock();
if (ll && hlist_empty(&ll->list))
trie_leaf_remove(t, ll->key);
@@ -2029,7 +2028,7 @@ static struct node *fib_trie_get_first(struct fib_trie_iter *iter,
iter->tnode = (struct tnode *) n;
iter->trie = t;
iter->index = 0;
- iter->depth = 0;
+ iter->depth = 1;
return n;
}
return NULL;
@@ -2274,11 +2273,12 @@ static int fib_trie_seq_show(struct seq_file *seq, void *v)
seq_puts(seq, "<local>:\n");
else
seq_puts(seq, "<main>:\n");
- } else {
- seq_indent(seq, iter->depth-1);
- seq_printf(seq, " +-- %d.%d.%d.%d/%d\n",
- NIPQUAD(prf), tn->pos);
- }
+ }
+ seq_indent(seq, iter->depth-1);
+ seq_printf(seq, " +-- %d.%d.%d.%d/%d %d %d %d\n",
+ NIPQUAD(prf), tn->pos, tn->bits, tn->full_children,
+ tn->empty_children);
+
} else {
struct leaf *l = (struct leaf *) n;
int i;
@@ -2287,7 +2287,7 @@ static int fib_trie_seq_show(struct seq_file *seq, void *v)
seq_indent(seq, iter->depth);
seq_printf(seq, " |-- %d.%d.%d.%d\n", NIPQUAD(val));
for (i = 32; i >= 0; i--) {
- struct leaf_info *li = find_leaf_info(&l->list, i);
+ struct leaf_info *li = find_leaf_info(l, i);
if (li) {
struct fib_alias *fa;
list_for_each_entry_rcu(fa, &li->falh, fa_list) {
@@ -2383,7 +2383,7 @@ static int fib_route_seq_show(struct seq_file *seq, void *v)
return 0;
for (i=32; i>=0; i--) {
- struct leaf_info *li = find_leaf_info(&l->list, i);
+ struct leaf_info *li = find_leaf_info(l, i);
struct fib_alias *fa;
u32 mask, prefix;
diff --git a/net/ipv4/netfilter/Kconfig b/net/ipv4/netfilter/Kconfig
index e2162d27007..3cf9b451675 100644
--- a/net/ipv4/netfilter/Kconfig
+++ b/net/ipv4/netfilter/Kconfig
@@ -137,6 +137,22 @@ config IP_NF_AMANDA
To compile it as a module, choose M here. If unsure, say Y.
+config IP_NF_PPTP
+ tristate 'PPTP protocol support'
+ help
+ This module adds support for PPTP (Point to Point Tunnelling
+ Protocol, RFC2637) conncection tracking and NAT.
+
+ If you are running PPTP sessions over a stateful firewall or NAT
+ box, you may want to enable this feature.
+
+ Please note that not all PPTP modes of operation are supported yet.
+ For more info, read top of the file
+ net/ipv4/netfilter/ip_conntrack_pptp.c
+
+ If you want to compile it as a module, say M here and read
+ Documentation/modules.txt. If unsure, say `N'.
+
config IP_NF_QUEUE
tristate "IP Userspace queueing via NETLINK (OBSOLETE)"
help
@@ -621,6 +637,12 @@ config IP_NF_NAT_AMANDA
default IP_NF_NAT if IP_NF_AMANDA=y
default m if IP_NF_AMANDA=m
+config IP_NF_NAT_PPTP
+ tristate
+ depends on IP_NF_NAT!=n && IP_NF_PPTP!=n
+ default IP_NF_NAT if IP_NF_PPTP=y
+ default m if IP_NF_PPTP=m
+
# mangle + specific targets
config IP_NF_MANGLE
tristate "Packet mangling"
diff --git a/net/ipv4/netfilter/Makefile b/net/ipv4/netfilter/Makefile
index 1ba0db74681..3d45d3c0283 100644
--- a/net/ipv4/netfilter/Makefile
+++ b/net/ipv4/netfilter/Makefile
@@ -6,6 +6,9 @@
ip_conntrack-objs := ip_conntrack_standalone.o ip_conntrack_core.o ip_conntrack_proto_generic.o ip_conntrack_proto_tcp.o ip_conntrack_proto_udp.o ip_conntrack_proto_icmp.o
iptable_nat-objs := ip_nat_standalone.o ip_nat_rule.o ip_nat_core.o ip_nat_helper.o ip_nat_proto_unknown.o ip_nat_proto_tcp.o ip_nat_proto_udp.o ip_nat_proto_icmp.o
+ip_conntrack_pptp-objs := ip_conntrack_helper_pptp.o ip_conntrack_proto_gre.o
+ip_nat_pptp-objs := ip_nat_helper_pptp.o ip_nat_proto_gre.o
+
# connection tracking
obj-$(CONFIG_IP_NF_CONNTRACK) += ip_conntrack.o
@@ -17,6 +20,7 @@ obj-$(CONFIG_IP_NF_CONNTRACK_NETLINK) += ip_conntrack_netlink.o
obj-$(CONFIG_IP_NF_CT_PROTO_SCTP) += ip_conntrack_proto_sctp.o
# connection tracking helpers
+obj-$(CONFIG_IP_NF_PPTP) += ip_conntrack_pptp.o
obj-$(CONFIG_IP_NF_AMANDA) += ip_conntrack_amanda.o
obj-$(CONFIG_IP_NF_TFTP) += ip_conntrack_tftp.o
obj-$(CONFIG_IP_NF_FTP) += ip_conntrack_ftp.o
@@ -24,6 +28,7 @@ obj-$(CONFIG_IP_NF_IRC) += ip_conntrack_irc.o
obj-$(CONFIG_IP_NF_NETBIOS_NS) += ip_conntrack_netbios_ns.o
# NAT helpers
+obj-$(CONFIG_IP_NF_NAT_PPTP) += ip_nat_pptp.o
obj-$(CONFIG_IP_NF_NAT_AMANDA) += ip_nat_amanda.o
obj-$(CONFIG_IP_NF_NAT_TFTP) += ip_nat_tftp.o
obj-$(CONFIG_IP_NF_NAT_FTP) += ip_nat_ftp.o
diff --git a/net/ipv4/netfilter/ip_conntrack_core.c b/net/ipv4/netfilter/ip_conntrack_core.c
index f8cd8e42961..c1f82e0c81c 100644
--- a/net/ipv4/netfilter/ip_conntrack_core.c
+++ b/net/ipv4/netfilter/ip_conntrack_core.c
@@ -233,7 +233,7 @@ __ip_conntrack_expect_find(const struct ip_conntrack_tuple *tuple)
/* Just find a expectation corresponding to a tuple. */
struct ip_conntrack_expect *
-ip_conntrack_expect_find_get(const struct ip_conntrack_tuple *tuple)
+ip_conntrack_expect_find(const struct ip_conntrack_tuple *tuple)
{
struct ip_conntrack_expect *i;
diff --git a/net/ipv4/netfilter/ip_conntrack_helper_pptp.c b/net/ipv4/netfilter/ip_conntrack_helper_pptp.c
new file mode 100644
index 00000000000..79db5b70d5f
--- /dev/null
+++ b/net/ipv4/netfilter/ip_conntrack_helper_pptp.c
@@ -0,0 +1,805 @@
+/*
+ * ip_conntrack_pptp.c - Version 3.0
+ *
+ * Connection tracking support for PPTP (Point to Point Tunneling Protocol).
+ * PPTP is a a protocol for creating virtual private networks.
+ * It is a specification defined by Microsoft and some vendors
+ * working with Microsoft. PPTP is built on top of a modified
+ * version of the Internet Generic Routing Encapsulation Protocol.
+ * GRE is defined in RFC 1701 and RFC 1702. Documentation of
+ * PPTP can be found in RFC 2637
+ *
+ * (C) 2000-2005 by Harald Welte <laforge@gnumonks.org>
+ *
+ * Development of this code funded by Astaro AG (http://www.astaro.com/)
+ *
+ * Limitations:
+ * - We blindly assume that control connections are always
+ * established in PNS->PAC direction. This is a violation
+ * of RFFC2673
+ * - We can only support one single call within each session
+ *
+ * TODO:
+ * - testing of incoming PPTP calls
+ *
+ * Changes:
+ * 2002-02-05 - Version 1.3
+ * - Call ip_conntrack_unexpect_related() from
+ * pptp_destroy_siblings() to destroy expectations in case
+ * CALL_DISCONNECT_NOTIFY or tcp fin packet was seen
+ * (Philip Craig <philipc@snapgear.com>)
+ * - Add Version information at module loadtime
+ * 2002-02-10 - Version 1.6
+ * - move to C99 style initializers
+ * - remove second expectation if first arrives
+ * 2004-10-22 - Version 2.0
+ * - merge Mandrake's 2.6.x port with recent 2.6.x API changes
+ * - fix lots of linear skb assumptions from Mandrake's port
+ * 2005-06-10 - Version 2.1
+ * - use ip_conntrack_expect_free() instead of kfree() on the
+ * expect's (which are from the slab for quite some time)
+ * 2005-06-10 - Version 3.0
+ * - port helper to post-2.6.11 API changes,
+ * funded by Oxcoda NetBox Blue (http://www.netboxblue.com/)
+ * 2005-07-30 - Version 3.1
+ * - port helper to 2.6.13 API changes
+ *
+ */
+
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/netfilter.h>
+#include <linux/ip.h>
+#include <net/checksum.h>
+#include <net/tcp.h>
+
+#include <linux/netfilter_ipv4/ip_conntrack.h>
+#include <linux/netfilter_ipv4/ip_conntrack_core.h>
+#include <linux/netfilter_ipv4/ip_conntrack_helper.h>
+#include <linux/netfilter_ipv4/ip_conntrack_proto_gre.h>
+#include <linux/netfilter_ipv4/ip_conntrack_pptp.h>
+
+#define IP_CT_PPTP_VERSION "3.1"
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Harald Welte <laforge@gnumonks.org>");
+MODULE_DESCRIPTION("Netfilter connection tracking helper module for PPTP");
+
+static DEFINE_SPINLOCK(ip_pptp_lock);
+
+int
+(*ip_nat_pptp_hook_outbound)(struct sk_buff **pskb,
+ struct ip_conntrack *ct,
+ enum ip_conntrack_info ctinfo,
+ struct PptpControlHeader *ctlh,
+ union pptp_ctrl_union *pptpReq);
+
+int
+(*ip_nat_pptp_hook_inbound)(struct sk_buff **pskb,
+ struct ip_conntrack *ct,
+ enum ip_conntrack_info ctinfo,
+ struct PptpControlHeader *ctlh,
+ union pptp_ctrl_union *pptpReq);
+
+int
+(*ip_nat_pptp_hook_exp_gre)(struct ip_conntrack_expect *expect_orig,
+ struct ip_conntrack_expect *expect_reply);
+
+void
+(*ip_nat_pptp_hook_expectfn)(struct ip_conntrack *ct,
+ struct ip_conntrack_expect *exp);
+
+#if 0
+/* PptpControlMessageType names */
+const char *pptp_msg_name[] = {
+ "UNKNOWN_MESSAGE",
+ "START_SESSION_REQUEST",
+ "START_SESSION_REPLY",
+ "STOP_SESSION_REQUEST",
+ "STOP_SESSION_REPLY",
+ "ECHO_REQUEST",
+ "ECHO_REPLY",
+ "OUT_CALL_REQUEST",
+ "OUT_CALL_REPLY",
+ "IN_CALL_REQUEST",
+ "IN_CALL_REPLY",
+ "IN_CALL_CONNECT",
+ "CALL_CLEAR_REQUEST",
+ "CALL_DISCONNECT_NOTIFY",
+ "WAN_ERROR_NOTIFY",
+ "SET_LINK_INFO"
+};
+EXPORT_SYMBOL(pptp_msg_name);
+#define DEBUGP(format, args...) printk(KERN_DEBUG "%s:%s: " format, __FILE__, __FUNCTION__, ## args)
+#else
+#define DEBUGP(format, args...)
+#endif
+
+#define SECS *HZ
+#define MINS * 60 SECS
+#define HOURS * 60 MINS
+
+#define PPTP_GRE_TIMEOUT (10 MINS)
+#define PPTP_GRE_STREAM_TIMEOUT (5 HOURS)
+
+static void pptp_expectfn(struct ip_conntrack *ct,
+ struct ip_conntrack_expect *exp)
+{
+ DEBUGP("increasing timeouts\n");
+
+ /* increase timeout of GRE data channel conntrack entry */
+ ct->proto.gre.timeout = PPTP_GRE_TIMEOUT;
+ ct->proto.gre.stream_timeout = PPTP_GRE_STREAM_TIMEOUT;
+
+ /* Can you see how rusty this code is, compared with the pre-2.6.11
+ * one? That's what happened to my shiny newnat of 2002 ;( -HW */
+
+ if (!ip_nat_pptp_hook_expectfn) {
+ struct ip_conntrack_tuple inv_t;
+ struct ip_conntrack_expect *exp_other;
+
+ /* obviously this tuple inversion only works until you do NAT */
+ invert_tuplepr(&inv_t, &exp->tuple);
+ DEBUGP("trying to unexpect other dir: ");
+ DUMP_TUPLE(&inv_t);
+
+ exp_other = ip_conntrack_expect_find(&inv_t);
+ if (exp_other) {
+ /* delete other expectation. */
+ DEBUGP("found\n");
+ ip_conntrack_unexpect_related(exp_other);
+ ip_conntrack_expect_put(exp_other);
+ } else {
+ DEBUGP("not found\n");
+ }
+ } else {
+ /* we need more than simple inversion */
+ ip_nat_pptp_hook_expectfn(ct, exp);
+ }
+}
+
+static int destroy_sibling_or_exp(const struct ip_conntrack_tuple *t)
+{
+ struct ip_conntrack_tuple_hash *h;
+ struct ip_conntrack_expect *exp;
+
+ DEBUGP("trying to timeout ct or exp for tuple ");
+ DUMP_TUPLE(t);
+
+ h = ip_conntrack_find_get(t, NULL);
+ if (h) {
+ struct ip_conntrack *sibling = tuplehash_to_ctrack(h);
+ DEBUGP("setting timeout of conntrack %p to 0\n", sibling);
+ sibling->proto.gre.timeout = 0;
+ sibling->proto.gre.stream_timeout = 0;
+ /* refresh_acct will not modify counters if skb == NULL */
+ if (del_timer(&sibling->timeout))
+ sibling->timeout.function((unsigned long)sibling);
+ ip_conntrack_put(sibling);
+ return 1;
+ } else {
+ exp = ip_conntrack_expect_find(t);
+ if (exp) {
+ DEBUGP("unexpect_related of expect %p\n", exp);
+ ip_conntrack_unexpect_related(exp);
+ ip_conntrack_expect_put(exp);
+ return 1;
+ }
+ }
+
+ return 0;
+}
+
+
+/* timeout GRE data connections */
+static void pptp_destroy_siblings(struct ip_conntrack *ct)
+{
+ struct ip_conntrack_tuple t;
+
+ /* Since ct->sibling_list has literally rusted away in 2.6.11,
+ * we now need another way to find out about our sibling
+ * contrack and expects... -HW */
+
+ /* try original (pns->pac) tuple */
+ memcpy(&t, &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple, sizeof(t));
+ t.dst.protonum = IPPROTO_GRE;
+ t.src.u.gre.key = htons(ct->help.ct_pptp_info.pns_call_id);
+ t.dst.u.gre.key = htons(ct->help.ct_pptp_info.pac_call_id);
+
+ if (!destroy_sibling_or_exp(&t))
+ DEBUGP("failed to timeout original pns->pac ct/exp\n");
+
+ /* try reply (pac->pns) tuple */
+ memcpy(&t, &ct->tuplehash[IP_CT_DIR_REPLY].tuple, sizeof(t));
+ t.dst.protonum = IPPROTO_GRE;
+ t.src.u.gre.key = htons(ct->help.ct_pptp_info.pac_call_id);
+ t.dst.u.gre.key = htons(ct->help.ct_pptp_info.pns_call_id);
+
+ if (!destroy_sibling_or_exp(&t))
+ DEBUGP("failed to timeout reply pac->pns ct/exp\n");
+}
+
+/* expect GRE connections (PNS->PAC and PAC->PNS direction) */
+static inline int
+exp_gre(struct ip_conntrack *master,
+ u_int32_t seq,
+ u_int16_t callid,
+ u_int16_t peer_callid)
+{
+ struct ip_conntrack_tuple inv_tuple;
+ struct ip_conntrack_tuple exp_tuples[] = {
+ /* tuple in original direction, PNS->PAC */
+ { .src = { .ip = master->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.ip,
+ .u = { .gre = { .key = peer_callid } }
+ },
+ .dst = { .ip = master->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.ip,
+ .u = { .gre = { .key = callid } },
+ .protonum = IPPROTO_GRE
+ },
+ },
+ /* tuple in reply direction, PAC->PNS */
+ { .src = { .ip = master->tuplehash[IP_CT_DIR_REPLY].tuple.src.ip,
+ .u = { .gre = { .key = callid } }
+ },
+ .dst = { .ip = master->tuplehash[IP_CT_DIR_REPLY].tuple.dst.ip,
+ .u = { .gre = { .key = peer_callid } },
+ .protonum = IPPROTO_GRE
+ },
+ }
+ };
+ struct ip_conntrack_expect *exp_orig, *exp_reply;
+ int ret = 1;
+
+ exp_orig = ip_conntrack_expect_alloc(master);
+ if (exp_orig == NULL)
+ goto out;
+
+ exp_reply = ip_conntrack_expect_alloc(master);
+ if (exp_reply == NULL)
+ goto out_put_orig;
+
+ memcpy(&exp_orig->tuple, &exp_tuples[0], sizeof(exp_orig->tuple));
+
+ exp_orig->mask.src.ip = 0xffffffff;
+ exp_orig->mask.src.u.all = 0;
+ exp_orig->mask.dst.u.all = 0;
+ exp_orig->mask.dst.u.gre.key = 0xffff;
+ exp_orig->mask.dst.ip = 0xffffffff;
+ exp_orig->mask.dst.protonum = 0xff;
+
+ exp_orig->master = master;
+ exp_orig->expectfn = pptp_expectfn;
+ exp_orig->flags = 0;
+
+ exp_orig->dir = IP_CT_DIR_ORIGINAL;
+
+ /* both expectations are identical apart from tuple */
+ memcpy(exp_reply, exp_orig, sizeof(*exp_reply));
+ memcpy(&exp_reply->tuple, &exp_tuples[1], sizeof(exp_reply->tuple));
+
+ exp_reply->dir = !exp_orig->dir;
+
+ if (ip_nat_pptp_hook_exp_gre)
+ ret = ip_nat_pptp_hook_exp_gre(exp_orig, exp_reply);
+ else {
+
+ DEBUGP("calling expect_related PNS->PAC");
+ DUMP_TUPLE(&exp_orig->tuple);
+
+ if (ip_conntrack_expect_related(exp_orig) != 0) {
+ DEBUGP("cannot expect_related()\n");
+ goto out_put_both;
+ }
+
+ DEBUGP("calling expect_related PAC->PNS");
+ DUMP_TUPLE(&exp_reply->tuple);
+
+ if (ip_conntrack_expect_related(exp_reply) != 0) {
+ DEBUGP("cannot expect_related()\n");
+ goto out_unexpect_orig;
+ }
+
+ /* Add GRE keymap entries */
+ if (ip_ct_gre_keymap_add(master, &exp_reply->tuple, 0) != 0) {
+ DEBUGP("cannot keymap_add() exp\n");
+ goto out_unexpect_both;
+ }
+
+ invert_tuplepr(&inv_tuple, &exp_reply->tuple);
+ if (ip_ct_gre_keymap_add(master, &inv_tuple, 1) != 0) {
+ ip_ct_gre_keymap_destroy(master);
+ DEBUGP("cannot keymap_add() exp_inv\n");
+ goto out_unexpect_both;
+ }
+ ret = 0;
+ }
+
+out_put_both:
+ ip_conntrack_expect_put(exp_reply);
+out_put_orig:
+ ip_conntrack_expect_put(exp_orig);
+out:
+ return ret;
+
+out_unexpect_both:
+ ip_conntrack_unexpect_related(exp_reply);
+out_unexpect_orig:
+ ip_conntrack_unexpect_related(exp_orig);
+ goto out_put_both;
+}
+
+static inline int
+pptp_inbound_pkt(struct sk_buff **pskb,
+ struct tcphdr *tcph,
+ unsigned int nexthdr_off,
+ unsigned int datalen,
+ struct ip_conntrack *ct,
+ enum ip_conntrack_info ctinfo)
+{
+ struct PptpControlHeader _ctlh, *ctlh;
+ unsigned int reqlen;
+ union pptp_ctrl_union _pptpReq, *pptpReq;
+ struct ip_ct_pptp_master *info = &ct->help.ct_pptp_info;
+ u_int16_t msg, *cid, *pcid;
+ u_int32_t seq;
+
+ ctlh = skb_header_pointer(*pskb, nexthdr_off, sizeof(_ctlh), &_ctlh);
+ if (!ctlh) {
+ DEBUGP("error during skb_header_pointer\n");
+ return NF_ACCEPT;
+ }
+ nexthdr_off += sizeof(_ctlh);
+ datalen -= sizeof(_ctlh);
+
+ reqlen = datalen;
+ if (reqlen > sizeof(*pptpReq))
+ reqlen = sizeof(*pptpReq);
+ pptpReq = skb_header_pointer(*pskb, nexthdr_off, reqlen, &_pptpReq);
+ if (!pptpReq) {
+ DEBUGP("error during skb_header_pointer\n");
+ return NF_ACCEPT;
+ }
+
+ msg = ntohs(ctlh->messageType);
+ DEBUGP("inbound control message %s\n", pptp_msg_name[msg]);
+
+ switch (msg) {
+ case PPTP_START_SESSION_REPLY:
+ if (reqlen < sizeof(_pptpReq.srep)) {
+ DEBUGP("%s: short packet\n", pptp_msg_name[msg]);
+ break;
+ }
+
+ /* server confirms new control session */
+ if (info->sstate < PPTP_SESSION_REQUESTED) {
+ DEBUGP("%s without START_SESS_REQUEST\n",
+ pptp_msg_name[msg]);
+ break;
+ }
+ if (pptpReq->srep.resultCode == PPTP_START_OK)
+ info->sstate = PPTP_SESSION_CONFIRMED;
+ else
+ info->sstate = PPTP_SESSION_ERROR;
+ break;
+
+ case PPTP_STOP_SESSION_REPLY:
+ if (reqlen < sizeof(_pptpReq.strep)) {
+ DEBUGP("%s: short packet\n", pptp_msg_name[msg]);
+ break;
+ }
+
+ /* server confirms end of control session */
+ if (info->sstate > PPTP_SESSION_STOPREQ) {
+ DEBUGP("%s without STOP_SESS_REQUEST\n",
+ pptp_msg_name[msg]);
+ break;
+ }
+ if (pptpReq->strep.resultCode == PPTP_STOP_OK)
+ info->sstate = PPTP_SESSION_NONE;
+ else
+ info->sstate = PPTP_SESSION_ERROR;
+ break;
+
+ case PPTP_OUT_CALL_REPLY:
+ if (reqlen < sizeof(_pptpReq.ocack)) {
+ DEBUGP("%s: short packet\n", pptp_msg_name[msg]);
+ break;
+ }
+
+ /* server accepted call, we now expect GRE frames */
+ if (info->sstate != PPTP_SESSION_CONFIRMED) {
+ DEBUGP("%s but no session\n", pptp_msg_name[msg]);
+ break;
+ }
+ if (info->cstate != PPTP_CALL_OUT_REQ &&
+ info->cstate != PPTP_CALL_OUT_CONF) {
+ DEBUGP("%s without OUTCALL_REQ\n", pptp_msg_name[msg]);
+ break;
+ }
+ if (pptpReq->ocack.resultCode != PPTP_OUTCALL_CONNECT) {
+ info->cstate = PPTP_CALL_NONE;
+ break;
+ }
+
+ cid = &pptpReq->ocack.callID;
+ pcid = &pptpReq->ocack.peersCallID;
+
+ info->pac_call_id = ntohs(*cid);
+
+ if (htons(info->pns_call_id) != *pcid) {
+ DEBUGP("%s for unknown callid %u\n",
+ pptp_msg_name[msg], ntohs(*pcid));
+ break;
+ }
+
+ DEBUGP("%s, CID=%X, PCID=%X\n", pptp_msg_name[msg],
+ ntohs(*cid), ntohs(*pcid));
+
+ info->cstate = PPTP_CALL_OUT_CONF;
+
+ seq = ntohl(tcph->seq) + sizeof(struct pptp_pkt_hdr)
+ + sizeof(struct PptpControlHeader)
+ + ((void *)pcid - (void *)pptpReq);
+
+ if (exp_gre(ct, seq, *cid, *pcid) != 0)
+ printk("ip_conntrack_pptp: error during exp_gre\n");
+ break;
+
+ case PPTP_IN_CALL_REQUEST:
+ if (reqlen < sizeof(_pptpReq.icack)) {
+ DEBUGP("%s: short packet\n", pptp_msg_name[msg]);
+ break;
+ }
+
+ /* server tells us about incoming call request */
+ if (info->sstate != PPTP_SESSION_CONFIRMED) {
+ DEBUGP("%s but no session\n", pptp_msg_name[msg]);
+ break;
+ }
+ pcid = &pptpReq->icack.peersCallID;
+ DEBUGP("%s, PCID=%X\n", pptp_msg_name[msg], ntohs(*pcid));
+ info->cstate = PPTP_CALL_IN_REQ;
+ info->pac_call_id = ntohs(*pcid);
+ break;
+
+ case PPTP_IN_CALL_CONNECT:
+ if (reqlen < sizeof(_pptpReq.iccon)) {
+ DEBUGP("%s: short packet\n", pptp_msg_name[msg]);
+ break;
+ }
+
+ /* server tells us about incoming call established */
+ if (info->sstate != PPTP_SESSION_CONFIRMED) {
+ DEBUGP("%s but no session\n", pptp_msg_name[msg]);
+ break;
+ }
+ if (info->sstate != PPTP_CALL_IN_REP
+ && info->sstate != PPTP_CALL_IN_CONF) {
+ DEBUGP("%s but never sent IN_CALL_REPLY\n",
+ pptp_msg_name[msg]);
+ break;
+ }
+
+ pcid = &pptpReq->iccon.peersCallID;
+ cid = &info->pac_call_id;
+
+ if (info->pns_call_id != ntohs(*pcid)) {
+ DEBUGP("%s for unknown CallID %u\n",
+ pptp_msg_name[msg], ntohs(*cid));
+ break;
+ }
+
+ DEBUGP("%s, PCID=%X\n", pptp_msg_name[msg], ntohs(*pcid));
+ info->cstate = PPTP_CALL_IN_CONF;
+
+ /* we expect a GRE connection from PAC to PNS */
+ seq = ntohl(tcph->seq) + sizeof(struct pptp_pkt_hdr)
+ + sizeof(struct PptpControlHeader)
+ + ((void *)pcid - (void *)pptpReq);
+
+ if (exp_gre(ct, seq, *cid, *pcid) != 0)
+ printk("ip_conntrack_pptp: error during exp_gre\n");
+
+ break;
+
+ case PPTP_CALL_DISCONNECT_NOTIFY:
+ if (reqlen < sizeof(_pptpReq.disc)) {
+ DEBUGP("%s: short packet\n", pptp_msg_name[msg]);
+ break;
+ }
+
+ /* server confirms disconnect */
+ cid = &pptpReq->disc.callID;
+ DEBUGP("%s, CID=%X\n", pptp_msg_name[msg], ntohs(*cid));
+ info->cstate = PPTP_CALL_NONE;
+
+ /* untrack this call id, unexpect GRE packets */
+ pptp_destroy_siblings(ct);
+ break;
+
+ case PPTP_WAN_ERROR_NOTIFY:
+ break;
+
+ case PPTP_ECHO_REQUEST:
+ case PPTP_ECHO_REPLY:
+ /* I don't have to explain these ;) */
+ break;
+ default:
+ DEBUGP("invalid %s (TY=%d)\n", (msg <= PPTP_MSG_MAX)
+ ? pptp_msg_name[msg]:pptp_msg_name[0], msg);
+ break;
+ }
+
+
+ if (ip_nat_pptp_hook_inbound)
+ return ip_nat_pptp_hook_inbound(pskb, ct, ctinfo, ctlh,
+ pptpReq);
+
+ return NF_ACCEPT;
+
+}
+
+static inline int
+pptp_outbound_pkt(struct sk_buff **pskb,
+ struct tcphdr *tcph,
+ unsigned int nexthdr_off,
+ unsigned int datalen,
+ struct ip_conntrack *ct,
+ enum ip_conntrack_info ctinfo)
+{
+ struct PptpControlHeader _ctlh, *ctlh;
+ unsigned int reqlen;
+ union pptp_ctrl_union _pptpReq, *pptpReq;
+ struct ip_ct_pptp_master *info = &ct->help.ct_pptp_info;
+ u_int16_t msg, *cid, *pcid;
+
+ ctlh = skb_header_pointer(*pskb, nexthdr_off, sizeof(_ctlh), &_ctlh);
+ if (!ctlh)
+ return NF_ACCEPT;
+ nexthdr_off += sizeof(_ctlh);
+ datalen -= sizeof(_ctlh);
+
+ reqlen = datalen;
+ if (reqlen > sizeof(*pptpReq))
+ reqlen = sizeof(*pptpReq);
+ pptpReq = skb_header_pointer(*pskb, nexthdr_off, reqlen, &_pptpReq);
+ if (!pptpReq)
+ return NF_ACCEPT;
+
+ msg = ntohs(ctlh->messageType);
+ DEBUGP("outbound control message %s\n", pptp_msg_name[msg]);
+
+ switch (msg) {
+ case PPTP_START_SESSION_REQUEST:
+ /* client requests for new control session */
+ if (info->sstate != PPTP_SESSION_NONE) {
+ DEBUGP("%s but we already have one",
+ pptp_msg_name[msg]);
+ }
+ info->sstate = PPTP_SESSION_REQUESTED;
+ break;
+ case PPTP_STOP_SESSION_REQUEST:
+ /* client requests end of control session */
+ info->sstate = PPTP_SESSION_STOPREQ;
+ break;
+
+ case PPTP_OUT_CALL_REQUEST:
+ if (reqlen < sizeof(_pptpReq.ocreq)) {
+ DEBUGP("%s: short packet\n", pptp_msg_name[msg]);
+ /* FIXME: break; */
+ }
+
+ /* client initiating connection to server */
+ if (info->sstate != PPTP_SESSION_CONFIRMED) {
+ DEBUGP("%s but no session\n",
+ pptp_msg_name[msg]);
+ break;
+ }
+ info->cstate = PPTP_CALL_OUT_REQ;
+ /* track PNS call id */
+ cid = &pptpReq->ocreq.callID;
+ DEBUGP("%s, CID=%X\n", pptp_msg_name[msg], ntohs(*cid));
+ info->pns_call_id = ntohs(*cid);
+ break;
+ case PPTP_IN_CALL_REPLY:
+ if (reqlen < sizeof(_pptpReq.icack)) {
+ DEBUGP("%s: short packet\n", pptp_msg_name[msg]);
+ break;
+ }
+
+ /* client answers incoming call */
+ if (info->cstate != PPTP_CALL_IN_REQ
+ && info->cstate != PPTP_CALL_IN_REP) {
+ DEBUGP("%s without incall_req\n",
+ pptp_msg_name[msg]);
+ break;
+ }
+ if (pptpReq->icack.resultCode != PPTP_INCALL_ACCEPT) {
+ info->cstate = PPTP_CALL_NONE;
+ break;
+ }
+ pcid = &pptpReq->icack.peersCallID;
+ if (info->pac_call_id != ntohs(*pcid)) {
+ DEBUGP("%s for unknown call %u\n",
+ pptp_msg_name[msg], ntohs(*pcid));
+ break;
+ }
+ DEBUGP("%s, CID=%X\n", pptp_msg_name[msg], ntohs(*pcid));
+ /* part two of the three-way handshake */
+ info->cstate = PPTP_CALL_IN_REP;
+ info->pns_call_id = ntohs(pptpReq->icack.callID);
+ break;
+
+ case PPTP_CALL_CLEAR_REQUEST:
+ /* client requests hangup of call */
+ if (info->sstate != PPTP_SESSION_CONFIRMED) {
+ DEBUGP("CLEAR_CALL but no session\n");
+ break;
+ }
+ /* FUTURE: iterate over all calls and check if
+ * call ID is valid. We don't do this without newnat,
+ * because we only know about last call */
+ info->cstate = PPTP_CALL_CLEAR_REQ;
+ break;
+ case PPTP_SET_LINK_INFO:
+ break;
+ case PPTP_ECHO_REQUEST:
+ case PPTP_ECHO_REPLY:
+ /* I don't have to explain these ;) */
+ break;
+ default:
+ DEBUGP("invalid %s (TY=%d)\n", (msg <= PPTP_MSG_MAX)?
+ pptp_msg_name[msg]:pptp_msg_name[0], msg);
+ /* unknown: no need to create GRE masq table entry */
+ break;
+ }
+
+ if (ip_nat_pptp_hook_outbound)
+ return ip_nat_pptp_hook_outbound(pskb, ct, ctinfo, ctlh,
+ pptpReq);
+
+ return NF_ACCEPT;
+}
+
+
+/* track caller id inside control connection, call expect_related */
+static int
+conntrack_pptp_help(struct sk_buff **pskb,
+ struct ip_conntrack *ct, enum ip_conntrack_info ctinfo)
+
+{
+ struct pptp_pkt_hdr _pptph, *pptph;
+ struct tcphdr _tcph, *tcph;
+ u_int32_t tcplen = (*pskb)->len - (*pskb)->nh.iph->ihl * 4;
+ u_int32_t datalen;
+ int dir = CTINFO2DIR(ctinfo);
+ struct ip_ct_pptp_master *info = &ct->help.ct_pptp_info;
+ unsigned int nexthdr_off;
+
+ int oldsstate, oldcstate;
+ int ret;
+
+ /* don't do any tracking before tcp handshake complete */
+ if (ctinfo != IP_CT_ESTABLISHED
+ && ctinfo != IP_CT_ESTABLISHED+IP_CT_IS_REPLY) {
+ DEBUGP("ctinfo = %u, skipping\n", ctinfo);
+ return NF_ACCEPT;
+ }
+
+ nexthdr_off = (*pskb)->nh.iph->ihl*4;
+ tcph = skb_header_pointer(*pskb, nexthdr_off, sizeof(_tcph), &_tcph);
+ BUG_ON(!tcph);
+ nexthdr_off += tcph->doff * 4;
+ datalen = tcplen - tcph->doff * 4;
+
+ if (tcph->fin || tcph->rst) {
+ DEBUGP("RST/FIN received, timeouting GRE\n");
+ /* can't do this after real newnat */
+ info->cstate = PPTP_CALL_NONE;
+
+ /* untrack this call id, unexpect GRE packets */
+ pptp_destroy_siblings(ct);
+ }
+
+ pptph = skb_header_pointer(*pskb, nexthdr_off, sizeof(_pptph), &_pptph);
+ if (!pptph) {
+ DEBUGP("no full PPTP header, can't track\n");
+ return NF_ACCEPT;
+ }
+ nexthdr_off += sizeof(_pptph);
+ datalen -= sizeof(_pptph);
+
+ /* if it's not a control message we can't do anything with it */
+ if (ntohs(pptph->packetType) != PPTP_PACKET_CONTROL ||
+ ntohl(pptph->magicCookie) != PPTP_MAGIC_COOKIE) {
+ DEBUGP("not a control packet\n");
+ return NF_ACCEPT;
+ }
+
+ oldsstate = info->sstate;
+ oldcstate = info->cstate;
+
+ spin_lock_bh(&ip_pptp_lock);
+
+ /* FIXME: We just blindly assume that the control connection is always
+ * established from PNS->PAC. However, RFC makes no guarantee */
+ if (dir == IP_CT_DIR_ORIGINAL)
+ /* client -> server (PNS -> PAC) */
+ ret = pptp_outbound_pkt(pskb, tcph, nexthdr_off, datalen, ct,
+ ctinfo);
+ else
+ /* server -> client (PAC -> PNS) */
+ ret = pptp_inbound_pkt(pskb, tcph, nexthdr_off, datalen, ct,
+ ctinfo);
+ DEBUGP("sstate: %d->%d, cstate: %d->%d\n",
+ oldsstate, info->sstate, oldcstate, info->cstate);
+ spin_unlock_bh(&ip_pptp_lock);
+
+ return ret;
+}
+
+/* control protocol helper */
+static struct ip_conntrack_helper pptp = {
+ .list = { NULL, NULL },
+ .name = "pptp",
+ .me = THIS_MODULE,
+ .max_expected = 2,
+ .timeout = 5 * 60,
+ .tuple = { .src = { .ip = 0,
+ .u = { .tcp = { .port =
+ __constant_htons(PPTP_CONTROL_PORT) } }
+ },
+ .dst = { .ip = 0,
+ .u = { .all = 0 },
+ .protonum = IPPROTO_TCP
+ }
+ },
+ .mask = { .src = { .ip = 0,
+ .u = { .tcp = { .port = 0xffff } }
+ },
+ .dst = { .ip = 0,
+ .u = { .all = 0 },
+ .protonum = 0xff
+ }
+ },
+ .help = conntrack_pptp_help
+};
+
+extern void __exit ip_ct_proto_gre_fini(void);
+extern int __init ip_ct_proto_gre_init(void);
+
+/* ip_conntrack_pptp initialization */
+static int __init init(void)
+{
+ int retcode;
+
+ retcode = ip_ct_proto_gre_init();
+ if (retcode < 0)
+ return retcode;
+
+ DEBUGP(" registering helper\n");
+ if ((retcode = ip_conntrack_helper_register(&pptp))) {
+ printk(KERN_ERR "Unable to register conntrack application "
+ "helper for pptp: %d\n", retcode);
+ ip_ct_proto_gre_fini();
+ return retcode;
+ }
+
+ printk("ip_conntrack_pptp version %s loaded\n", IP_CT_PPTP_VERSION);
+ return 0;
+}
+
+static void __exit fini(void)
+{
+ ip_conntrack_helper_unregister(&pptp);
+ ip_ct_proto_gre_fini();
+ printk("ip_conntrack_pptp version %s unloaded\n", IP_CT_PPTP_VERSION);
+}
+
+module_init(init);
+module_exit(fini);
+
+EXPORT_SYMBOL(ip_nat_pptp_hook_outbound);
+EXPORT_SYMBOL(ip_nat_pptp_hook_inbound);
+EXPORT_SYMBOL(ip_nat_pptp_hook_exp_gre);
+EXPORT_SYMBOL(ip_nat_pptp_hook_expectfn);
diff --git a/net/ipv4/netfilter/ip_conntrack_netlink.c b/net/ipv4/netfilter/ip_conntrack_netlink.c
index 15aef356474..b08a432efcf 100644
--- a/net/ipv4/netfilter/ip_conntrack_netlink.c
+++ b/net/ipv4/netfilter/ip_conntrack_netlink.c
@@ -1270,7 +1270,7 @@ ctnetlink_get_expect(struct sock *ctnl, struct sk_buff *skb,
if (err < 0)
return err;
- exp = ip_conntrack_expect_find_get(&tuple);
+ exp = ip_conntrack_expect_find(&tuple);
if (!exp)
return -ENOENT;
@@ -1318,7 +1318,7 @@ ctnetlink_del_expect(struct sock *ctnl, struct sk_buff *skb,
return err;
/* bump usage count to 2 */
- exp = ip_conntrack_expect_find_get(&tuple);
+ exp = ip_conntrack_expect_find(&tuple);
if (!exp)
return -ENOENT;
diff --git a/net/ipv4/netfilter/ip_conntrack_proto_gre.c b/net/ipv4/netfilter/ip_conntrack_proto_gre.c
new file mode 100644
index 00000000000..de3cb9db6f8
--- /dev/null
+++ b/net/ipv4/netfilter/ip_conntrack_proto_gre.c
@@ -0,0 +1,327 @@
+/*
+ * ip_conntrack_proto_gre.c - Version 3.0
+ *
+ * Connection tracking protocol helper module for GRE.
+ *
+ * GRE is a generic encapsulation protocol, which is generally not very
+ * suited for NAT, as it has no protocol-specific part as port numbers.
+ *
+ * It has an optional key field, which may help us distinguishing two
+ * connections between the same two hosts.
+ *
+ * GRE is defined in RFC 1701 and RFC 1702, as well as RFC 2784
+ *
+ * PPTP is built on top of a modified version of GRE, and has a mandatory
+ * field called "CallID", which serves us for the same purpose as the key
+ * field in plain GRE.
+ *
+ * Documentation about PPTP can be found in RFC 2637
+ *
+ * (C) 2000-2005 by Harald Welte <laforge@gnumonks.org>
+ *
+ * Development of this code funded by Astaro AG (http://www.astaro.com/)
+ *
+ */
+
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/timer.h>
+#include <linux/netfilter.h>
+#include <linux/ip.h>
+#include <linux/in.h>
+#include <linux/list.h>
+
+static DEFINE_RWLOCK(ip_ct_gre_lock);
+#define ASSERT_READ_LOCK(x)
+#define ASSERT_WRITE_LOCK(x)
+
+#include <linux/netfilter_ipv4/listhelp.h>
+#include <linux/netfilter_ipv4/ip_conntrack_protocol.h>
+#include <linux/netfilter_ipv4/ip_conntrack_helper.h>
+#include <linux/netfilter_ipv4/ip_conntrack_core.h>
+
+#include <linux/netfilter_ipv4/ip_conntrack_proto_gre.h>
+#include <linux/netfilter_ipv4/ip_conntrack_pptp.h>
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Harald Welte <laforge@gnumonks.org>");
+MODULE_DESCRIPTION("netfilter connection tracking protocol helper for GRE");
+
+/* shamelessly stolen from ip_conntrack_proto_udp.c */
+#define GRE_TIMEOUT (30*HZ)
+#define GRE_STREAM_TIMEOUT (180*HZ)
+
+#if 0
+#define DEBUGP(format, args...) printk(KERN_DEBUG "%s:%s: " format, __FILE__, __FUNCTION__, ## args)
+#define DUMP_TUPLE_GRE(x) printk("%u.%u.%u.%u:0x%x -> %u.%u.%u.%u:0x%x\n", \
+ NIPQUAD((x)->src.ip), ntohs((x)->src.u.gre.key), \
+ NIPQUAD((x)->dst.ip), ntohs((x)->dst.u.gre.key))
+#else
+#define DEBUGP(x, args...)
+#define DUMP_TUPLE_GRE(x)
+#endif
+
+/* GRE KEYMAP HANDLING FUNCTIONS */
+static LIST_HEAD(gre_keymap_list);
+
+static inline int gre_key_cmpfn(const struct ip_ct_gre_keymap *km,
+ const struct ip_conntrack_tuple *t)
+{
+ return ((km->tuple.src.ip == t->src.ip) &&
+ (km->tuple.dst.ip == t->dst.ip) &&
+ (km->tuple.dst.protonum == t->dst.protonum) &&
+ (km->tuple.dst.u.all == t->dst.u.all));
+}
+
+/* look up the source key for a given tuple */
+static u_int32_t gre_keymap_lookup(struct ip_conntrack_tuple *t)
+{
+ struct ip_ct_gre_keymap *km;
+ u_int32_t key = 0;
+
+ read_lock_bh(&ip_ct_gre_lock);
+ km = LIST_FIND(&gre_keymap_list, gre_key_cmpfn,
+ struct ip_ct_gre_keymap *, t);
+ if (km)
+ key = km->tuple.src.u.gre.key;
+ read_unlock_bh(&ip_ct_gre_lock);
+
+ DEBUGP("lookup src key 0x%x up key for ", key);
+ DUMP_TUPLE_GRE(t);
+
+ return key;
+}
+
+/* add a single keymap entry, associate with specified master ct */
+int
+ip_ct_gre_keymap_add(struct ip_conntrack *ct,
+ struct ip_conntrack_tuple *t, int reply)
+{
+ struct ip_ct_gre_keymap **exist_km, *km, *old;
+
+ if (!ct->helper || strcmp(ct->helper->name, "pptp")) {
+ DEBUGP("refusing to add GRE keymap to non-pptp session\n");
+ return -1;
+ }
+
+ if (!reply)
+ exist_km = &ct->help.ct_pptp_info.keymap_orig;
+ else
+ exist_km = &ct->help.ct_pptp_info.keymap_reply;
+
+ if (*exist_km) {
+ /* check whether it's a retransmission */
+ old = LIST_FIND(&gre_keymap_list, gre_key_cmpfn,
+ struct ip_ct_gre_keymap *, t);
+ if (old == *exist_km) {
+ DEBUGP("retransmission\n");
+ return 0;
+ }
+
+ DEBUGP("trying to override keymap_%s for ct %p\n",
+ reply? "reply":"orig", ct);
+ return -EEXIST;
+ }
+
+ km = kmalloc(sizeof(*km), GFP_ATOMIC);
+ if (!km)
+ return -ENOMEM;
+
+ memcpy(&km->tuple, t, sizeof(*t));
+ *exist_km = km;
+
+ DEBUGP("adding new entry %p: ", km);
+ DUMP_TUPLE_GRE(&km->tuple);
+
+ write_lock_bh(&ip_ct_gre_lock);
+ list_append(&gre_keymap_list, km);
+ write_unlock_bh(&ip_ct_gre_lock);
+
+ return 0;
+}
+
+/* destroy the keymap entries associated with specified master ct */
+void ip_ct_gre_keymap_destroy(struct ip_conntrack *ct)
+{
+ DEBUGP("entering for ct %p\n", ct);
+
+ if (!ct->helper || strcmp(ct->helper->name, "pptp")) {
+ DEBUGP("refusing to destroy GRE keymap to non-pptp session\n");
+ return;
+ }
+
+ write_lock_bh(&ip_ct_gre_lock);
+ if (ct->help.ct_pptp_info.keymap_orig) {
+ DEBUGP("removing %p from list\n",
+ ct->help.ct_pptp_info.keymap_orig);
+ list_del(&ct->help.ct_pptp_info.keymap_orig->list);
+ kfree(ct->help.ct_pptp_info.keymap_orig);
+ ct->help.ct_pptp_info.keymap_orig = NULL;
+ }
+ if (ct->help.ct_pptp_info.keymap_reply) {
+ DEBUGP("removing %p from list\n",
+ ct->help.ct_pptp_info.keymap_reply);
+ list_del(&ct->help.ct_pptp_info.keymap_reply->list);
+ kfree(ct->help.ct_pptp_info.keymap_reply);
+ ct->help.ct_pptp_info.keymap_reply = NULL;
+ }
+ write_unlock_bh(&ip_ct_gre_lock);
+}
+
+
+/* PUBLIC CONNTRACK PROTO HELPER FUNCTIONS */
+
+/* invert gre part of tuple */
+static int gre_invert_tuple(struct ip_conntrack_tuple *tuple,
+ const struct ip_conntrack_tuple *orig)
+{
+ tuple->dst.u.gre.key = orig->src.u.gre.key;
+ tuple->src.u.gre.key = orig->dst.u.gre.key;
+
+ return 1;
+}
+
+/* gre hdr info to tuple */
+static int gre_pkt_to_tuple(const struct sk_buff *skb,
+ unsigned int dataoff,
+ struct ip_conntrack_tuple *tuple)
+{
+ struct gre_hdr_pptp _pgrehdr, *pgrehdr;
+ u_int32_t srckey;
+ struct gre_hdr _grehdr, *grehdr;
+
+ /* first only delinearize old RFC1701 GRE header */
+ grehdr = skb_header_pointer(skb, dataoff, sizeof(_grehdr), &_grehdr);
+ if (!grehdr || grehdr->version != GRE_VERSION_PPTP) {
+ /* try to behave like "ip_conntrack_proto_generic" */
+ tuple->src.u.all = 0;
+ tuple->dst.u.all = 0;
+ return 1;
+ }
+
+ /* PPTP header is variable length, only need up to the call_id field */
+ pgrehdr = skb_header_pointer(skb, dataoff, 8, &_pgrehdr);
+ if (!pgrehdr)
+ return 1;
+
+ if (ntohs(grehdr->protocol) != GRE_PROTOCOL_PPTP) {
+ DEBUGP("GRE_VERSION_PPTP but unknown proto\n");
+ return 0;
+ }
+
+ tuple->dst.u.gre.key = pgrehdr->call_id;
+ srckey = gre_keymap_lookup(tuple);
+ tuple->src.u.gre.key = srckey;
+
+ return 1;
+}
+
+/* print gre part of tuple */
+static int gre_print_tuple(struct seq_file *s,
+ const struct ip_conntrack_tuple *tuple)
+{
+ return seq_printf(s, "srckey=0x%x dstkey=0x%x ",
+ ntohs(tuple->src.u.gre.key),
+ ntohs(tuple->dst.u.gre.key));
+}
+
+/* print private data for conntrack */
+static int gre_print_conntrack(struct seq_file *s,
+ const struct ip_conntrack *ct)
+{
+ return seq_printf(s, "timeout=%u, stream_timeout=%u ",
+ (ct->proto.gre.timeout / HZ),
+ (ct->proto.gre.stream_timeout / HZ));
+}
+
+/* Returns verdict for packet, and may modify conntrack */
+static int gre_packet(struct ip_conntrack *ct,
+ const struct sk_buff *skb,
+ enum ip_conntrack_info conntrackinfo)
+{
+ /* If we've seen traffic both ways, this is a GRE connection.
+ * Extend timeout. */
+ if (ct->status & IPS_SEEN_REPLY) {
+ ip_ct_refresh_acct(ct, conntrackinfo, skb,
+ ct->proto.gre.stream_timeout);
+ /* Also, more likely to be important, and not a probe. */
+ set_bit(IPS_ASSURED_BIT, &ct->status);
+ } else
+ ip_ct_refresh_acct(ct, conntrackinfo, skb,
+ ct->proto.gre.timeout);
+
+ return NF_ACCEPT;
+}
+
+/* Called when a new connection for this protocol found. */
+static int gre_new(struct ip_conntrack *ct,
+ const struct sk_buff *skb)
+{
+ DEBUGP(": ");
+ DUMP_TUPLE_GRE(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
+
+ /* initialize to sane value. Ideally a conntrack helper
+ * (e.g. in case of pptp) is increasing them */
+ ct->proto.gre.stream_timeout = GRE_STREAM_TIMEOUT;
+ ct->proto.gre.timeout = GRE_TIMEOUT;
+
+ return 1;
+}
+
+/* Called when a conntrack entry has already been removed from the hashes
+ * and is about to be deleted from memory */
+static void gre_destroy(struct ip_conntrack *ct)
+{
+ struct ip_conntrack *master = ct->master;
+ DEBUGP(" entering\n");
+
+ if (!master)
+ DEBUGP("no master !?!\n");
+ else
+ ip_ct_gre_keymap_destroy(master);
+}
+
+/* protocol helper struct */
+static struct ip_conntrack_protocol gre = {
+ .proto = IPPROTO_GRE,
+ .name = "gre",
+ .pkt_to_tuple = gre_pkt_to_tuple,
+ .invert_tuple = gre_invert_tuple,
+ .print_tuple = gre_print_tuple,
+ .print_conntrack = gre_print_conntrack,
+ .packet = gre_packet,
+ .new = gre_new,
+ .destroy = gre_destroy,
+ .me = THIS_MODULE,
+#if defined(CONFIG_IP_NF_CONNTRACK_NETLINK) || \
+ defined(CONFIG_IP_NF_CONNTRACK_NETLINK_MODULE)
+ .tuple_to_nfattr = ip_ct_port_tuple_to_nfattr,
+ .nfattr_to_tuple = ip_ct_port_nfattr_to_tuple,
+#endif
+};
+
+/* ip_conntrack_proto_gre initialization */
+int __init ip_ct_proto_gre_init(void)
+{
+ return ip_conntrack_protocol_register(&gre);
+}
+
+void __exit ip_ct_proto_gre_fini(void)
+{
+ struct list_head *pos, *n;
+
+ /* delete all keymap entries */
+ write_lock_bh(&ip_ct_gre_lock);
+ list_for_each_safe(pos, n, &gre_keymap_list) {
+ DEBUGP("deleting keymap %p at module unload time\n", pos);
+ list_del(pos);
+ kfree(pos);
+ }
+ write_unlock_bh(&ip_ct_gre_lock);
+
+ ip_conntrack_protocol_unregister(&gre);
+}
+
+EXPORT_SYMBOL(ip_ct_gre_keymap_add);
+EXPORT_SYMBOL(ip_ct_gre_keymap_destroy);
diff --git a/net/ipv4/netfilter/ip_conntrack_standalone.c b/net/ipv4/netfilter/ip_conntrack_standalone.c
index ae3e3e655db..d3c7808010e 100644
--- a/net/ipv4/netfilter/ip_conntrack_standalone.c
+++ b/net/ipv4/netfilter/ip_conntrack_standalone.c
@@ -993,11 +993,11 @@ EXPORT_SYMBOL(ip_ct_refresh_acct);
EXPORT_SYMBOL(ip_conntrack_expect_alloc);
EXPORT_SYMBOL(ip_conntrack_expect_put);
-EXPORT_SYMBOL_GPL(ip_conntrack_expect_find_get);
+EXPORT_SYMBOL_GPL(__ip_conntrack_expect_find);
+EXPORT_SYMBOL_GPL(ip_conntrack_expect_find);
EXPORT_SYMBOL(ip_conntrack_expect_related);
EXPORT_SYMBOL(ip_conntrack_unexpect_related);
EXPORT_SYMBOL_GPL(ip_conntrack_expect_list);
-EXPORT_SYMBOL_GPL(__ip_conntrack_expect_find);
EXPORT_SYMBOL_GPL(ip_ct_unlink_expect);
EXPORT_SYMBOL(ip_conntrack_tuple_taken);
diff --git a/net/ipv4/netfilter/ip_nat_core.c b/net/ipv4/netfilter/ip_nat_core.c
index 1adedb743f6..c3ea891d38e 100644
--- a/net/ipv4/netfilter/ip_nat_core.c
+++ b/net/ipv4/netfilter/ip_nat_core.c
@@ -578,6 +578,8 @@ ip_nat_port_nfattr_to_range(struct nfattr *tb[], struct ip_nat_range *range)
return ret;
}
+EXPORT_SYMBOL_GPL(ip_nat_port_nfattr_to_range);
+EXPORT_SYMBOL_GPL(ip_nat_port_range_to_nfattr);
#endif
int __init ip_nat_init(void)
diff --git a/net/ipv4/netfilter/ip_nat_helper_pptp.c b/net/ipv4/netfilter/ip_nat_helper_pptp.c
new file mode 100644
index 00000000000..3cdd0684d30
--- /dev/null
+++ b/net/ipv4/netfilter/ip_nat_helper_pptp.c
@@ -0,0 +1,401 @@
+/*
+ * ip_nat_pptp.c - Version 3.0
+ *
+ * NAT support for PPTP (Point to Point Tunneling Protocol).
+ * PPTP is a a protocol for creating virtual private networks.
+ * It is a specification defined by Microsoft and some vendors
+ * working with Microsoft. PPTP is built on top of a modified
+ * version of the Internet Generic Routing Encapsulation Protocol.
+ * GRE is defined in RFC 1701 and RFC 1702. Documentation of
+ * PPTP can be found in RFC 2637
+ *
+ * (C) 2000-2005 by Harald Welte <laforge@gnumonks.org>
+ *
+ * Development of this code funded by Astaro AG (http://www.astaro.com/)
+ *
+ * TODO: - NAT to a unique tuple, not to TCP source port
+ * (needs netfilter tuple reservation)
+ *
+ * Changes:
+ * 2002-02-10 - Version 1.3
+ * - Use ip_nat_mangle_tcp_packet() because of cloned skb's
+ * in local connections (Philip Craig <philipc@snapgear.com>)
+ * - add checks for magicCookie and pptp version
+ * - make argument list of pptp_{out,in}bound_packet() shorter
+ * - move to C99 style initializers
+ * - print version number at module loadtime
+ * 2003-09-22 - Version 1.5
+ * - use SNATed tcp sourceport as callid, since we get called before
+ * TCP header is mangled (Philip Craig <philipc@snapgear.com>)
+ * 2004-10-22 - Version 2.0
+ * - kernel 2.6.x version
+ * 2005-06-10 - Version 3.0
+ * - kernel >= 2.6.11 version,
+ * funded by Oxcoda NetBox Blue (http://www.netboxblue.com/)
+ *
+ */
+
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/ip.h>
+#include <linux/tcp.h>
+#include <net/tcp.h>
+
+#include <linux/netfilter_ipv4/ip_nat.h>
+#include <linux/netfilter_ipv4/ip_nat_rule.h>
+#include <linux/netfilter_ipv4/ip_nat_helper.h>
+#include <linux/netfilter_ipv4/ip_nat_pptp.h>
+#include <linux/netfilter_ipv4/ip_conntrack_core.h>
+#include <linux/netfilter_ipv4/ip_conntrack_helper.h>
+#include <linux/netfilter_ipv4/ip_conntrack_proto_gre.h>
+#include <linux/netfilter_ipv4/ip_conntrack_pptp.h>
+
+#define IP_NAT_PPTP_VERSION "3.0"
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Harald Welte <laforge@gnumonks.org>");
+MODULE_DESCRIPTION("Netfilter NAT helper module for PPTP");
+
+
+#if 0
+extern const char *pptp_msg_name[];
+#define DEBUGP(format, args...) printk(KERN_DEBUG "%s:%s: " format, __FILE__, \
+ __FUNCTION__, ## args)
+#else
+#define DEBUGP(format, args...)
+#endif
+
+static void pptp_nat_expected(struct ip_conntrack *ct,
+ struct ip_conntrack_expect *exp)
+{
+ struct ip_conntrack *master = ct->master;
+ struct ip_conntrack_expect *other_exp;
+ struct ip_conntrack_tuple t;
+ struct ip_ct_pptp_master *ct_pptp_info;
+ struct ip_nat_pptp *nat_pptp_info;
+
+ ct_pptp_info = &master->help.ct_pptp_info;
+ nat_pptp_info = &master->nat.help.nat_pptp_info;
+
+ /* And here goes the grand finale of corrosion... */
+
+ if (exp->dir == IP_CT_DIR_ORIGINAL) {
+ DEBUGP("we are PNS->PAC\n");
+ /* therefore, build tuple for PAC->PNS */
+ t.src.ip = master->tuplehash[IP_CT_DIR_REPLY].tuple.src.ip;
+ t.src.u.gre.key = htons(master->help.ct_pptp_info.pac_call_id);
+ t.dst.ip = master->tuplehash[IP_CT_DIR_REPLY].tuple.dst.ip;
+ t.dst.u.gre.key = htons(master->help.ct_pptp_info.pns_call_id);
+ t.dst.protonum = IPPROTO_GRE;
+ } else {
+ DEBUGP("we are PAC->PNS\n");
+ /* build tuple for PNS->PAC */
+ t.src.ip = master->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.ip;
+ t.src.u.gre.key =
+ htons(master->nat.help.nat_pptp_info.pns_call_id);
+ t.dst.ip = master->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.ip;
+ t.dst.u.gre.key =
+ htons(master->nat.help.nat_pptp_info.pac_call_id);
+ t.dst.protonum = IPPROTO_GRE;
+ }
+
+ DEBUGP("trying to unexpect other dir: ");
+ DUMP_TUPLE(&t);
+ other_exp = ip_conntrack_expect_find(&t);
+ if (other_exp) {
+ ip_conntrack_unexpect_related(other_exp);
+ ip_conntrack_expect_put(other_exp);
+ DEBUGP("success\n");
+ } else {
+ DEBUGP("not found!\n");
+ }
+
+ ip_nat_follow_master(ct, exp);
+}
+
+/* outbound packets == from PNS to PAC */
+static int
+pptp_outbound_pkt(struct sk_buff **pskb,
+ struct ip_conntrack *ct,
+ enum ip_conntrack_info ctinfo,
+ struct PptpControlHeader *ctlh,
+ union pptp_ctrl_union *pptpReq)
+
+{
+ struct ip_ct_pptp_master *ct_pptp_info = &ct->help.ct_pptp_info;
+ struct ip_nat_pptp *nat_pptp_info = &ct->nat.help.nat_pptp_info;
+
+ u_int16_t msg, *cid = NULL, new_callid;
+
+ new_callid = htons(ct_pptp_info->pns_call_id);
+
+ switch (msg = ntohs(ctlh->messageType)) {
+ case PPTP_OUT_CALL_REQUEST:
+ cid = &pptpReq->ocreq.callID;
+ /* FIXME: ideally we would want to reserve a call ID
+ * here. current netfilter NAT core is not able to do
+ * this :( For now we use TCP source port. This breaks
+ * multiple calls within one control session */
+
+ /* save original call ID in nat_info */
+ nat_pptp_info->pns_call_id = ct_pptp_info->pns_call_id;
+
+ /* don't use tcph->source since we are at a DSTmanip
+ * hook (e.g. PREROUTING) and pkt is not mangled yet */
+ new_callid = ct->tuplehash[IP_CT_DIR_REPLY].tuple.dst.u.tcp.port;
+
+ /* save new call ID in ct info */
+ ct_pptp_info->pns_call_id = ntohs(new_callid);
+ break;
+ case PPTP_IN_CALL_REPLY:
+ cid = &pptpReq->icreq.callID;
+ break;
+ case PPTP_CALL_CLEAR_REQUEST:
+ cid = &pptpReq->clrreq.callID;
+ break;
+ default:
+ DEBUGP("unknown outbound packet 0x%04x:%s\n", msg,
+ (msg <= PPTP_MSG_MAX)?
+ pptp_msg_name[msg]:pptp_msg_name[0]);
+ /* fall through */
+
+ case PPTP_SET_LINK_INFO:
+ /* only need to NAT in case PAC is behind NAT box */
+ case PPTP_START_SESSION_REQUEST:
+ case PPTP_START_SESSION_REPLY:
+ case PPTP_STOP_SESSION_REQUEST:
+ case PPTP_STOP_SESSION_REPLY:
+ case PPTP_ECHO_REQUEST:
+ case PPTP_ECHO_REPLY:
+ /* no need to alter packet */
+ return NF_ACCEPT;
+ }
+
+ /* only OUT_CALL_REQUEST, IN_CALL_REPLY, CALL_CLEAR_REQUEST pass
+ * down to here */
+
+ IP_NF_ASSERT(cid);
+
+ DEBUGP("altering call id from 0x%04x to 0x%04x\n",
+ ntohs(*cid), ntohs(new_callid));
+
+ /* mangle packet */
+ if (ip_nat_mangle_tcp_packet(pskb, ct, ctinfo,
+ (void *)cid - ((void *)ctlh - sizeof(struct pptp_pkt_hdr)),
+ sizeof(new_callid),
+ (char *)&new_callid,
+ sizeof(new_callid)) == 0)
+ return NF_DROP;
+
+ return NF_ACCEPT;
+}
+
+static int
+pptp_exp_gre(struct ip_conntrack_expect *expect_orig,
+ struct ip_conntrack_expect *expect_reply)
+{
+ struct ip_ct_pptp_master *ct_pptp_info =
+ &expect_orig->master->help.ct_pptp_info;
+ struct ip_nat_pptp *nat_pptp_info =
+ &expect_orig->master->nat.help.nat_pptp_info;
+
+ struct ip_conntrack *ct = expect_orig->master;
+
+ struct ip_conntrack_tuple inv_t;
+ struct ip_conntrack_tuple *orig_t, *reply_t;
+
+ /* save original PAC call ID in nat_info */
+ nat_pptp_info->pac_call_id = ct_pptp_info->pac_call_id;
+
+ /* alter expectation */
+ orig_t = &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple;
+ reply_t = &ct->tuplehash[IP_CT_DIR_REPLY].tuple;
+
+ /* alter expectation for PNS->PAC direction */
+ invert_tuplepr(&inv_t, &expect_orig->tuple);
+ expect_orig->saved_proto.gre.key = htons(nat_pptp_info->pac_call_id);
+ expect_orig->tuple.src.u.gre.key = htons(nat_pptp_info->pns_call_id);
+ expect_orig->tuple.dst.u.gre.key = htons(ct_pptp_info->pac_call_id);
+ inv_t.src.ip = reply_t->src.ip;
+ inv_t.dst.ip = reply_t->dst.ip;
+ inv_t.src.u.gre.key = htons(nat_pptp_info->pac_call_id);
+ inv_t.dst.u.gre.key = htons(ct_pptp_info->pns_call_id);
+
+ if (!ip_conntrack_expect_related(expect_orig)) {
+ DEBUGP("successfully registered expect\n");
+ } else {
+ DEBUGP("can't expect_related(expect_orig)\n");
+ return 1;
+ }
+
+ /* alter expectation for PAC->PNS direction */
+ invert_tuplepr(&inv_t, &expect_reply->tuple);
+ expect_reply->saved_proto.gre.key = htons(nat_pptp_info->pns_call_id);
+ expect_reply->tuple.src.u.gre.key = htons(nat_pptp_info->pac_call_id);
+ expect_reply->tuple.dst.u.gre.key = htons(ct_pptp_info->pns_call_id);
+ inv_t.src.ip = orig_t->src.ip;
+ inv_t.dst.ip = orig_t->dst.ip;
+ inv_t.src.u.gre.key = htons(nat_pptp_info->pns_call_id);
+ inv_t.dst.u.gre.key = htons(ct_pptp_info->pac_call_id);
+
+ if (!ip_conntrack_expect_related(expect_reply)) {
+ DEBUGP("successfully registered expect\n");
+ } else {
+ DEBUGP("can't expect_related(expect_reply)\n");
+ ip_conntrack_unexpect_related(expect_orig);
+ return 1;
+ }
+
+ if (ip_ct_gre_keymap_add(ct, &expect_reply->tuple, 0) < 0) {
+ DEBUGP("can't register original keymap\n");
+ ip_conntrack_unexpect_related(expect_orig);
+ ip_conntrack_unexpect_related(expect_reply);
+ return 1;
+ }
+
+ if (ip_ct_gre_keymap_add(ct, &inv_t, 1) < 0) {
+ DEBUGP("can't register reply keymap\n");
+ ip_conntrack_unexpect_related(expect_orig);
+ ip_conntrack_unexpect_related(expect_reply);
+ ip_ct_gre_keymap_destroy(ct);
+ return 1;
+ }
+
+ return 0;
+}
+
+/* inbound packets == from PAC to PNS */
+static int
+pptp_inbound_pkt(struct sk_buff **pskb,
+ struct ip_conntrack *ct,
+ enum ip_conntrack_info ctinfo,
+ struct PptpControlHeader *ctlh,
+ union pptp_ctrl_union *pptpReq)
+{
+ struct ip_nat_pptp *nat_pptp_info = &ct->nat.help.nat_pptp_info;
+ u_int16_t msg, new_cid = 0, new_pcid, *pcid = NULL, *cid = NULL;
+
+ int ret = NF_ACCEPT, rv;
+
+ new_pcid = htons(nat_pptp_info->pns_call_id);
+
+ switch (msg = ntohs(ctlh->messageType)) {
+ case PPTP_OUT_CALL_REPLY:
+ pcid = &pptpReq->ocack.peersCallID;
+ cid = &pptpReq->ocack.callID;
+ break;
+ case PPTP_IN_CALL_CONNECT:
+ pcid = &pptpReq->iccon.peersCallID;
+ break;
+ case PPTP_IN_CALL_REQUEST:
+ /* only need to nat in case PAC is behind NAT box */
+ break;
+ case PPTP_WAN_ERROR_NOTIFY:
+ pcid = &pptpReq->wanerr.peersCallID;
+ break;
+ case PPTP_CALL_DISCONNECT_NOTIFY:
+ pcid = &pptpReq->disc.callID;
+ break;
+ case PPTP_SET_LINK_INFO:
+ pcid = &pptpReq->setlink.peersCallID;
+ break;
+
+ default:
+ DEBUGP("unknown inbound packet %s\n", (msg <= PPTP_MSG_MAX)?
+ pptp_msg_name[msg]:pptp_msg_name[0]);
+ /* fall through */
+
+ case PPTP_START_SESSION_REQUEST:
+ case PPTP_START_SESSION_REPLY:
+ case PPTP_STOP_SESSION_REQUEST:
+ case PPTP_STOP_SESSION_REPLY:
+ case PPTP_ECHO_REQUEST:
+ case PPTP_ECHO_REPLY:
+ /* no need to alter packet */
+ return NF_ACCEPT;
+ }
+
+ /* only OUT_CALL_REPLY, IN_CALL_CONNECT, IN_CALL_REQUEST,
+ * WAN_ERROR_NOTIFY, CALL_DISCONNECT_NOTIFY pass down here */
+
+ /* mangle packet */
+ IP_NF_ASSERT(pcid);
+ DEBUGP("altering peer call id from 0x%04x to 0x%04x\n",
+ ntohs(*pcid), ntohs(new_pcid));
+
+ rv = ip_nat_mangle_tcp_packet(pskb, ct, ctinfo,
+ (void *)pcid - ((void *)ctlh - sizeof(struct pptp_pkt_hdr)),
+ sizeof(new_pcid), (char *)&new_pcid,
+ sizeof(new_pcid));
+ if (rv != NF_ACCEPT)
+ return rv;
+
+ if (new_cid) {
+ IP_NF_ASSERT(cid);
+ DEBUGP("altering call id from 0x%04x to 0x%04x\n",
+ ntohs(*cid), ntohs(new_cid));
+ rv = ip_nat_mangle_tcp_packet(pskb, ct, ctinfo,
+ (void *)cid - ((void *)ctlh - sizeof(struct pptp_pkt_hdr)),
+ sizeof(new_cid),
+ (char *)&new_cid,
+ sizeof(new_cid));
+ if (rv != NF_ACCEPT)
+ return rv;
+ }
+
+ /* check for earlier return value of 'switch' above */
+ if (ret != NF_ACCEPT)
+ return ret;
+
+ /* great, at least we don't need to resize packets */
+ return NF_ACCEPT;
+}
+
+
+extern int __init ip_nat_proto_gre_init(void);
+extern void __exit ip_nat_proto_gre_fini(void);
+
+static int __init init(void)
+{
+ int ret;
+
+ DEBUGP("%s: registering NAT helper\n", __FILE__);
+
+ ret = ip_nat_proto_gre_init();
+ if (ret < 0)
+ return ret;
+
+ BUG_ON(ip_nat_pptp_hook_outbound);
+ ip_nat_pptp_hook_outbound = &pptp_outbound_pkt;
+
+ BUG_ON(ip_nat_pptp_hook_inbound);
+ ip_nat_pptp_hook_inbound = &pptp_inbound_pkt;
+
+ BUG_ON(ip_nat_pptp_hook_exp_gre);
+ ip_nat_pptp_hook_exp_gre = &pptp_exp_gre;
+
+ BUG_ON(ip_nat_pptp_hook_expectfn);
+ ip_nat_pptp_hook_expectfn = &pptp_nat_expected;
+
+ printk("ip_nat_pptp version %s loaded\n", IP_NAT_PPTP_VERSION);
+ return 0;
+}
+
+static void __exit fini(void)
+{
+ DEBUGP("cleanup_module\n" );
+
+ ip_nat_pptp_hook_expectfn = NULL;
+ ip_nat_pptp_hook_exp_gre = NULL;
+ ip_nat_pptp_hook_inbound = NULL;
+ ip_nat_pptp_hook_outbound = NULL;
+
+ ip_nat_proto_gre_fini();
+ /* Make sure noone calls it, meanwhile */
+ synchronize_net();
+
+ printk("ip_nat_pptp version %s unloaded\n", IP_NAT_PPTP_VERSION);
+}
+
+module_init(init);
+module_exit(fini);
diff --git a/net/ipv4/netfilter/ip_nat_proto_gre.c b/net/ipv4/netfilter/ip_nat_proto_gre.c
new file mode 100644
index 00000000000..7c128540167
--- /dev/null
+++ b/net/ipv4/netfilter/ip_nat_proto_gre.c
@@ -0,0 +1,214 @@
+/*
+ * ip_nat_proto_gre.c - Version 2.0
+ *
+ * NAT protocol helper module for GRE.
+ *
+ * GRE is a generic encapsulation protocol, which is generally not very
+ * suited for NAT, as it has no protocol-specific part as port numbers.
+ *
+ * It has an optional key field, which may help us distinguishing two
+ * connections between the same two hosts.
+ *
+ * GRE is defined in RFC 1701 and RFC 1702, as well as RFC 2784
+ *
+ * PPTP is built on top of a modified version of GRE, and has a mandatory
+ * field called "CallID", which serves us for the same purpose as the key
+ * field in plain GRE.
+ *
+ * Documentation about PPTP can be found in RFC 2637
+ *
+ * (C) 2000-2005 by Harald Welte <laforge@gnumonks.org>
+ *
+ * Development of this code funded by Astaro AG (http://www.astaro.com/)
+ *
+ */
+
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/ip.h>
+#include <linux/netfilter_ipv4/ip_nat.h>
+#include <linux/netfilter_ipv4/ip_nat_rule.h>
+#include <linux/netfilter_ipv4/ip_nat_protocol.h>
+#include <linux/netfilter_ipv4/ip_conntrack_proto_gre.h>
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Harald Welte <laforge@gnumonks.org>");
+MODULE_DESCRIPTION("Netfilter NAT protocol helper module for GRE");
+
+#if 0
+#define DEBUGP(format, args...) printk(KERN_DEBUG "%s:%s: " format, __FILE__, \
+ __FUNCTION__, ## args)
+#else
+#define DEBUGP(x, args...)
+#endif
+
+/* is key in given range between min and max */
+static int
+gre_in_range(const struct ip_conntrack_tuple *tuple,
+ enum ip_nat_manip_type maniptype,
+ const union ip_conntrack_manip_proto *min,
+ const union ip_conntrack_manip_proto *max)
+{
+ u_int32_t key;
+
+ if (maniptype == IP_NAT_MANIP_SRC)
+ key = tuple->src.u.gre.key;
+ else
+ key = tuple->dst.u.gre.key;
+
+ return ntohl(key) >= ntohl(min->gre.key)
+ && ntohl(key) <= ntohl(max->gre.key);
+}
+
+/* generate unique tuple ... */
+static int
+gre_unique_tuple(struct ip_conntrack_tuple *tuple,
+ const struct ip_nat_range *range,
+ enum ip_nat_manip_type maniptype,
+ const struct ip_conntrack *conntrack)
+{
+ static u_int16_t key;
+ u_int16_t *keyptr;
+ unsigned int min, i, range_size;
+
+ if (maniptype == IP_NAT_MANIP_SRC)
+ keyptr = &tuple->src.u.gre.key;
+ else
+ keyptr = &tuple->dst.u.gre.key;
+
+ if (!(range->flags & IP_NAT_RANGE_PROTO_SPECIFIED)) {
+ DEBUGP("%p: NATing GRE PPTP\n", conntrack);
+ min = 1;
+ range_size = 0xffff;
+ } else {
+ min = ntohl(range->min.gre.key);
+ range_size = ntohl(range->max.gre.key) - min + 1;
+ }
+
+ DEBUGP("min = %u, range_size = %u\n", min, range_size);
+
+ for (i = 0; i < range_size; i++, key++) {
+ *keyptr = htonl(min + key % range_size);
+ if (!ip_nat_used_tuple(tuple, conntrack))
+ return 1;
+ }
+
+ DEBUGP("%p: no NAT mapping\n", conntrack);
+
+ return 0;
+}
+
+/* manipulate a GRE packet according to maniptype */
+static int
+gre_manip_pkt(struct sk_buff **pskb,
+ unsigned int iphdroff,
+ const struct ip_conntrack_tuple *tuple,
+ enum ip_nat_manip_type maniptype)
+{
+ struct gre_hdr *greh;
+ struct gre_hdr_pptp *pgreh;
+ struct iphdr *iph = (struct iphdr *)((*pskb)->data + iphdroff);
+ unsigned int hdroff = iphdroff + iph->ihl*4;
+
+ /* pgreh includes two optional 32bit fields which are not required
+ * to be there. That's where the magic '8' comes from */
+ if (!skb_make_writable(pskb, hdroff + sizeof(*pgreh)-8))
+ return 0;
+
+ greh = (void *)(*pskb)->data + hdroff;
+ pgreh = (struct gre_hdr_pptp *) greh;
+
+ /* we only have destination manip of a packet, since 'source key'
+ * is not present in the packet itself */
+ if (maniptype == IP_NAT_MANIP_DST) {
+ /* key manipulation is always dest */
+ switch (greh->version) {
+ case 0:
+ if (!greh->key) {
+ DEBUGP("can't nat GRE w/o key\n");
+ break;
+ }
+ if (greh->csum) {
+ /* FIXME: Never tested this code... */
+ *(gre_csum(greh)) =
+ ip_nat_cheat_check(~*(gre_key(greh)),
+ tuple->dst.u.gre.key,
+ *(gre_csum(greh)));
+ }
+ *(gre_key(greh)) = tuple->dst.u.gre.key;
+ break;
+ case GRE_VERSION_PPTP:
+ DEBUGP("call_id -> 0x%04x\n",
+ ntohl(tuple->dst.u.gre.key));
+ pgreh->call_id = htons(ntohl(tuple->dst.u.gre.key));
+ break;
+ default:
+ DEBUGP("can't nat unknown GRE version\n");
+ return 0;
+ break;
+ }
+ }
+ return 1;
+}
+
+/* print out a nat tuple */
+static unsigned int
+gre_print(char *buffer,
+ const struct ip_conntrack_tuple *match,
+ const struct ip_conntrack_tuple *mask)
+{
+ unsigned int len = 0;
+
+ if (mask->src.u.gre.key)
+ len += sprintf(buffer + len, "srckey=0x%x ",
+ ntohl(match->src.u.gre.key));
+
+ if (mask->dst.u.gre.key)
+ len += sprintf(buffer + len, "dstkey=0x%x ",
+ ntohl(match->src.u.gre.key));
+
+ return len;
+}
+
+/* print a range of keys */
+static unsigned int
+gre_print_range(char *buffer, const struct ip_nat_range *range)
+{
+ if (range->min.gre.key != 0
+ || range->max.gre.key != 0xFFFF) {
+ if (range->min.gre.key == range->max.gre.key)
+ return sprintf(buffer, "key 0x%x ",
+ ntohl(range->min.gre.key));
+ else
+ return sprintf(buffer, "keys 0x%u-0x%u ",
+ ntohl(range->min.gre.key),
+ ntohl(range->max.gre.key));
+ } else
+ return 0;
+}
+
+/* nat helper struct */
+static struct ip_nat_protocol gre = {
+ .name = "GRE",
+ .protonum = IPPROTO_GRE,
+ .manip_pkt = gre_manip_pkt,
+ .in_range = gre_in_range,
+ .unique_tuple = gre_unique_tuple,
+ .print = gre_print,
+ .print_range = gre_print_range,
+#if defined(CONFIG_IP_NF_CONNTRACK_NETLINK) || \
+ defined(CONFIG_IP_NF_CONNTRACK_NETLINK_MODULE)
+ .range_to_nfattr = ip_nat_port_range_to_nfattr,
+ .nfattr_to_range = ip_nat_port_nfattr_to_range,
+#endif
+};
+
+int __init ip_nat_proto_gre_init(void)
+{
+ return ip_nat_protocol_register(&gre);
+}
+
+void __exit ip_nat_proto_gre_fini(void)
+{
+ ip_nat_protocol_unregister(&gre);
+}
diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
index 304bb0a1d4f..4b0d7e4d626 100644
--- a/net/ipv4/raw.c
+++ b/net/ipv4/raw.c
@@ -361,7 +361,7 @@ static void raw_probe_proto_opt(struct flowi *fl, struct msghdr *msg)
if (type && code) {
get_user(fl->fl_icmp_type, type);
- __get_user(fl->fl_icmp_code, code);
+ get_user(fl->fl_icmp_code, code);
probed = 1;
}
break;
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index b018e31b653..5dd6dd7d091 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -461,9 +461,7 @@ int tcp_fragment(struct sock *sk, struct sk_buff *skb, u32 len, unsigned int mss
flags = TCP_SKB_CB(skb)->flags;
TCP_SKB_CB(skb)->flags = flags & ~(TCPCB_FLAG_FIN|TCPCB_FLAG_PSH);
TCP_SKB_CB(buff)->flags = flags;
- TCP_SKB_CB(buff)->sacked =
- (TCP_SKB_CB(skb)->sacked &
- (TCPCB_LOST | TCPCB_EVER_RETRANS | TCPCB_AT_TAIL));
+ TCP_SKB_CB(buff)->sacked = TCP_SKB_CB(skb)->sacked;
TCP_SKB_CB(skb)->sacked &= ~TCPCB_AT_TAIL;
if (!skb_shinfo(skb)->nr_frags && skb->ip_summed != CHECKSUM_HW) {
@@ -501,6 +499,12 @@ int tcp_fragment(struct sock *sk, struct sk_buff *skb, u32 len, unsigned int mss
tcp_skb_pcount(buff);
tp->packets_out -= diff;
+
+ if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED)
+ tp->sacked_out -= diff;
+ if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_RETRANS)
+ tp->retrans_out -= diff;
+
if (TCP_SKB_CB(skb)->sacked & TCPCB_LOST) {
tp->lost_out -= diff;
tp->left_out -= diff;
diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c
index 1cb8adb2787..2da514b16d9 100644
--- a/net/ipv6/netfilter/ip6_tables.c
+++ b/net/ipv6/netfilter/ip6_tables.c
@@ -1955,6 +1955,57 @@ static void __exit fini(void)
#endif
}
+/*
+ * find specified header up to transport protocol header.
+ * If found target header, the offset to the header is set to *offset
+ * and return 0. otherwise, return -1.
+ *
+ * Notes: - non-1st Fragment Header isn't skipped.
+ * - ESP header isn't skipped.
+ * - The target header may be trancated.
+ */
+int ipv6_find_hdr(const struct sk_buff *skb, unsigned int *offset, u8 target)
+{
+ unsigned int start = (u8*)(skb->nh.ipv6h + 1) - skb->data;
+ u8 nexthdr = skb->nh.ipv6h->nexthdr;
+ unsigned int len = skb->len - start;
+
+ while (nexthdr != target) {
+ struct ipv6_opt_hdr _hdr, *hp;
+ unsigned int hdrlen;
+
+ if ((!ipv6_ext_hdr(nexthdr)) || nexthdr == NEXTHDR_NONE)
+ return -1;
+ hp = skb_header_pointer(skb, start, sizeof(_hdr), &_hdr);
+ if (hp == NULL)
+ return -1;
+ if (nexthdr == NEXTHDR_FRAGMENT) {
+ unsigned short _frag_off, *fp;
+ fp = skb_header_pointer(skb,
+ start+offsetof(struct frag_hdr,
+ frag_off),
+ sizeof(_frag_off),
+ &_frag_off);
+ if (fp == NULL)
+ return -1;
+
+ if (ntohs(*fp) & ~0x7)
+ return -1;
+ hdrlen = 8;
+ } else if (nexthdr == NEXTHDR_AUTH)
+ hdrlen = (hp->hdrlen + 2) << 2;
+ else
+ hdrlen = ipv6_optlen(hp);
+
+ nexthdr = hp->nexthdr;
+ len -= hdrlen;
+ start += hdrlen;
+ }
+
+ *offset = start;
+ return 0;
+}
+
EXPORT_SYMBOL(ip6t_register_table);
EXPORT_SYMBOL(ip6t_unregister_table);
EXPORT_SYMBOL(ip6t_do_table);
@@ -1963,6 +2014,7 @@ EXPORT_SYMBOL(ip6t_unregister_match);
EXPORT_SYMBOL(ip6t_register_target);
EXPORT_SYMBOL(ip6t_unregister_target);
EXPORT_SYMBOL(ip6t_ext_hdr);
+EXPORT_SYMBOL(ipv6_find_hdr);
module_init(init);
module_exit(fini);
diff --git a/net/ipv6/netfilter/ip6t_ah.c b/net/ipv6/netfilter/ip6t_ah.c
index d5b94f142bb..dde37793d20 100644
--- a/net/ipv6/netfilter/ip6t_ah.c
+++ b/net/ipv6/netfilter/ip6t_ah.c
@@ -48,92 +48,21 @@ match(const struct sk_buff *skb,
unsigned int protoff,
int *hotdrop)
{
- struct ip_auth_hdr *ah = NULL, _ah;
+ struct ip_auth_hdr *ah, _ah;
const struct ip6t_ah *ahinfo = matchinfo;
- unsigned int temp;
- int len;
- u8 nexthdr;
unsigned int ptr;
unsigned int hdrlen = 0;
- /*DEBUGP("IPv6 AH entered\n");*/
- /* if (opt->auth == 0) return 0;
- * It does not filled on output */
-
- /* type of the 1st exthdr */
- nexthdr = skb->nh.ipv6h->nexthdr;
- /* pointer to the 1st exthdr */
- ptr = sizeof(struct ipv6hdr);
- /* available length */
- len = skb->len - ptr;
- temp = 0;
-
- while (ip6t_ext_hdr(nexthdr)) {
- struct ipv6_opt_hdr _hdr, *hp;
-
- DEBUGP("ipv6_ah header iteration \n");
-
- /* Is there enough space for the next ext header? */
- if (len < sizeof(struct ipv6_opt_hdr))
- return 0;
- /* No more exthdr -> evaluate */
- if (nexthdr == NEXTHDR_NONE)
- break;
- /* ESP -> evaluate */
- if (nexthdr == NEXTHDR_ESP)
- break;
-
- hp = skb_header_pointer(skb, ptr, sizeof(_hdr), &_hdr);
- BUG_ON(hp == NULL);
-
- /* Calculate the header length */
- if (nexthdr == NEXTHDR_FRAGMENT)
- hdrlen = 8;
- else if (nexthdr == NEXTHDR_AUTH)
- hdrlen = (hp->hdrlen+2)<<2;
- else
- hdrlen = ipv6_optlen(hp);
-
- /* AH -> evaluate */
- if (nexthdr == NEXTHDR_AUTH) {
- temp |= MASK_AH;
- break;
- }
-
-
- /* set the flag */
- switch (nexthdr) {
- case NEXTHDR_HOP:
- case NEXTHDR_ROUTING:
- case NEXTHDR_FRAGMENT:
- case NEXTHDR_AUTH:
- case NEXTHDR_DEST:
- break;
- default:
- DEBUGP("ipv6_ah match: unknown nextheader %u\n",nexthdr);
- return 0;
- }
-
- nexthdr = hp->nexthdr;
- len -= hdrlen;
- ptr += hdrlen;
- if (ptr > skb->len) {
- DEBUGP("ipv6_ah: new pointer too large! \n");
- break;
- }
- }
-
- /* AH header not found */
- if (temp != MASK_AH)
+ if (ipv6_find_hdr(skb, &ptr, NEXTHDR_AUTH) < 0)
return 0;
- if (len < sizeof(struct ip_auth_hdr)){
+ ah = skb_header_pointer(skb, ptr, sizeof(_ah), &_ah);
+ if (ah == NULL) {
*hotdrop = 1;
return 0;
}
- ah = skb_header_pointer(skb, ptr, sizeof(_ah), &_ah);
- BUG_ON(ah == NULL);
+ hdrlen = (ah->hdrlen + 2) << 2;
DEBUGP("IPv6 AH LEN %u %u ", hdrlen, ah->hdrlen);
DEBUGP("RES %04X ", ah->reserved);
diff --git a/net/ipv6/netfilter/ip6t_dst.c b/net/ipv6/netfilter/ip6t_dst.c
index 540925e4a7a..c450a635e54 100644
--- a/net/ipv6/netfilter/ip6t_dst.c
+++ b/net/ipv6/netfilter/ip6t_dst.c
@@ -63,8 +63,6 @@ match(const struct sk_buff *skb,
struct ipv6_opt_hdr _optsh, *oh;
const struct ip6t_opts *optinfo = matchinfo;
unsigned int temp;
- unsigned int len;
- u8 nexthdr;
unsigned int ptr;
unsigned int hdrlen = 0;
unsigned int ret = 0;
@@ -72,97 +70,25 @@ match(const struct sk_buff *skb,
u8 _optlen, *lp = NULL;
unsigned int optlen;
- /* type of the 1st exthdr */
- nexthdr = skb->nh.ipv6h->nexthdr;
- /* pointer to the 1st exthdr */
- ptr = sizeof(struct ipv6hdr);
- /* available length */
- len = skb->len - ptr;
- temp = 0;
-
- while (ip6t_ext_hdr(nexthdr)) {
- struct ipv6_opt_hdr _hdr, *hp;
-
- DEBUGP("ipv6_opts header iteration \n");
-
- /* Is there enough space for the next ext header? */
- if (len < (int)sizeof(struct ipv6_opt_hdr))
- return 0;
- /* No more exthdr -> evaluate */
- if (nexthdr == NEXTHDR_NONE) {
- break;
- }
- /* ESP -> evaluate */
- if (nexthdr == NEXTHDR_ESP) {
- break;
- }
-
- hp = skb_header_pointer(skb, ptr, sizeof(_hdr), &_hdr);
- BUG_ON(hp == NULL);
-
- /* Calculate the header length */
- if (nexthdr == NEXTHDR_FRAGMENT) {
- hdrlen = 8;
- } else if (nexthdr == NEXTHDR_AUTH)
- hdrlen = (hp->hdrlen+2)<<2;
- else
- hdrlen = ipv6_optlen(hp);
-
- /* OPTS -> evaluate */
#if HOPBYHOP
- if (nexthdr == NEXTHDR_HOP) {
- temp |= MASK_HOPOPTS;
+ if (ipv6_find_hdr(skb, &ptr, NEXTHDR_HOP) < 0)
#else
- if (nexthdr == NEXTHDR_DEST) {
- temp |= MASK_DSTOPTS;
+ if (ipv6_find_hdr(skb, &ptr, NEXTHDR_DEST) < 0)
#endif
- break;
- }
-
+ return 0;
- /* set the flag */
- switch (nexthdr){
- case NEXTHDR_HOP:
- case NEXTHDR_ROUTING:
- case NEXTHDR_FRAGMENT:
- case NEXTHDR_AUTH:
- case NEXTHDR_DEST:
- break;
- default:
- DEBUGP("ipv6_opts match: unknown nextheader %u\n",nexthdr);
- return 0;
- break;
- }
-
- nexthdr = hp->nexthdr;
- len -= hdrlen;
- ptr += hdrlen;
- if ( ptr > skb->len ) {
- DEBUGP("ipv6_opts: new pointer is too large! \n");
- break;
- }
- }
-
- /* OPTIONS header not found */
-#if HOPBYHOP
- if ( temp != MASK_HOPOPTS ) return 0;
-#else
- if ( temp != MASK_DSTOPTS ) return 0;
-#endif
-
- if (len < (int)sizeof(struct ipv6_opt_hdr)){
+ oh = skb_header_pointer(skb, ptr, sizeof(_optsh), &_optsh);
+ if (oh == NULL){
*hotdrop = 1;
return 0;
}
- if (len < hdrlen){
+ hdrlen = ipv6_optlen(oh);
+ if (skb->len - ptr < hdrlen){
/* Packet smaller than it's length field */
return 0;
}
- oh = skb_header_pointer(skb, ptr, sizeof(_optsh), &_optsh);
- BUG_ON(oh == NULL);
-
DEBUGP("IPv6 OPTS LEN %u %u ", hdrlen, oh->hdrlen);
DEBUGP("len %02X %04X %02X ",
diff --git a/net/ipv6/netfilter/ip6t_esp.c b/net/ipv6/netfilter/ip6t_esp.c
index e39dd236fd8..24bc0cde43a 100644
--- a/net/ipv6/netfilter/ip6t_esp.c
+++ b/net/ipv6/netfilter/ip6t_esp.c
@@ -48,87 +48,22 @@ match(const struct sk_buff *skb,
unsigned int protoff,
int *hotdrop)
{
- struct ip_esp_hdr _esp, *eh = NULL;
+ struct ip_esp_hdr _esp, *eh;
const struct ip6t_esp *espinfo = matchinfo;
- unsigned int temp;
- int len;
- u8 nexthdr;
unsigned int ptr;
/* Make sure this isn't an evil packet */
/*DEBUGP("ipv6_esp entered \n");*/
- /* type of the 1st exthdr */
- nexthdr = skb->nh.ipv6h->nexthdr;
- /* pointer to the 1st exthdr */
- ptr = sizeof(struct ipv6hdr);
- /* available length */
- len = skb->len - ptr;
- temp = 0;
-
- while (ip6t_ext_hdr(nexthdr)) {
- struct ipv6_opt_hdr _hdr, *hp;
- int hdrlen;
-
- DEBUGP("ipv6_esp header iteration \n");
-
- /* Is there enough space for the next ext header? */
- if (len < sizeof(struct ipv6_opt_hdr))
- return 0;
- /* No more exthdr -> evaluate */
- if (nexthdr == NEXTHDR_NONE)
- break;
- /* ESP -> evaluate */
- if (nexthdr == NEXTHDR_ESP) {
- temp |= MASK_ESP;
- break;
- }
-
- hp = skb_header_pointer(skb, ptr, sizeof(_hdr), &_hdr);
- BUG_ON(hp == NULL);
-
- /* Calculate the header length */
- if (nexthdr == NEXTHDR_FRAGMENT)
- hdrlen = 8;
- else if (nexthdr == NEXTHDR_AUTH)
- hdrlen = (hp->hdrlen+2)<<2;
- else
- hdrlen = ipv6_optlen(hp);
-
- /* set the flag */
- switch (nexthdr) {
- case NEXTHDR_HOP:
- case NEXTHDR_ROUTING:
- case NEXTHDR_FRAGMENT:
- case NEXTHDR_AUTH:
- case NEXTHDR_DEST:
- break;
- default:
- DEBUGP("ipv6_esp match: unknown nextheader %u\n",nexthdr);
- return 0;
- }
-
- nexthdr = hp->nexthdr;
- len -= hdrlen;
- ptr += hdrlen;
- if (ptr > skb->len) {
- DEBUGP("ipv6_esp: new pointer too large! \n");
- break;
- }
- }
-
- /* ESP header not found */
- if (temp != MASK_ESP)
+ if (ipv6_find_hdr(skb, &ptr, NEXTHDR_ESP) < 0)
return 0;
- if (len < sizeof(struct ip_esp_hdr)) {
+ eh = skb_header_pointer(skb, ptr, sizeof(_esp), &_esp);
+ if (eh == NULL) {
*hotdrop = 1;
return 0;
}
- eh = skb_header_pointer(skb, ptr, sizeof(_esp), &_esp);
- BUG_ON(eh == NULL);
-
DEBUGP("IPv6 ESP SPI %u %08X\n", ntohl(eh->spi), ntohl(eh->spi));
return (eh != NULL)
diff --git a/net/ipv6/netfilter/ip6t_frag.c b/net/ipv6/netfilter/ip6t_frag.c
index 4bfa30a9bc8..085d5f8eea2 100644
--- a/net/ipv6/netfilter/ip6t_frag.c
+++ b/net/ipv6/netfilter/ip6t_frag.c
@@ -48,90 +48,18 @@ match(const struct sk_buff *skb,
unsigned int protoff,
int *hotdrop)
{
- struct frag_hdr _frag, *fh = NULL;
+ struct frag_hdr _frag, *fh;
const struct ip6t_frag *fraginfo = matchinfo;
- unsigned int temp;
- int len;
- u8 nexthdr;
unsigned int ptr;
- unsigned int hdrlen = 0;
-
- /* type of the 1st exthdr */
- nexthdr = skb->nh.ipv6h->nexthdr;
- /* pointer to the 1st exthdr */
- ptr = sizeof(struct ipv6hdr);
- /* available length */
- len = skb->len - ptr;
- temp = 0;
-
- while (ip6t_ext_hdr(nexthdr)) {
- struct ipv6_opt_hdr _hdr, *hp;
-
- DEBUGP("ipv6_frag header iteration \n");
-
- /* Is there enough space for the next ext header? */
- if (len < (int)sizeof(struct ipv6_opt_hdr))
- return 0;
- /* No more exthdr -> evaluate */
- if (nexthdr == NEXTHDR_NONE) {
- break;
- }
- /* ESP -> evaluate */
- if (nexthdr == NEXTHDR_ESP) {
- break;
- }
-
- hp = skb_header_pointer(skb, ptr, sizeof(_hdr), &_hdr);
- BUG_ON(hp == NULL);
-
- /* Calculate the header length */
- if (nexthdr == NEXTHDR_FRAGMENT) {
- hdrlen = 8;
- } else if (nexthdr == NEXTHDR_AUTH)
- hdrlen = (hp->hdrlen+2)<<2;
- else
- hdrlen = ipv6_optlen(hp);
-
- /* FRAG -> evaluate */
- if (nexthdr == NEXTHDR_FRAGMENT) {
- temp |= MASK_FRAGMENT;
- break;
- }
-
-
- /* set the flag */
- switch (nexthdr){
- case NEXTHDR_HOP:
- case NEXTHDR_ROUTING:
- case NEXTHDR_FRAGMENT:
- case NEXTHDR_AUTH:
- case NEXTHDR_DEST:
- break;
- default:
- DEBUGP("ipv6_frag match: unknown nextheader %u\n",nexthdr);
- return 0;
- break;
- }
-
- nexthdr = hp->nexthdr;
- len -= hdrlen;
- ptr += hdrlen;
- if ( ptr > skb->len ) {
- DEBUGP("ipv6_frag: new pointer too large! \n");
- break;
- }
- }
-
- /* FRAG header not found */
- if ( temp != MASK_FRAGMENT ) return 0;
-
- if (len < sizeof(struct frag_hdr)){
- *hotdrop = 1;
- return 0;
- }
- fh = skb_header_pointer(skb, ptr, sizeof(_frag), &_frag);
- BUG_ON(fh == NULL);
+ if (ipv6_find_hdr(skb, &ptr, NEXTHDR_FRAGMENT) < 0)
+ return 0;
+
+ fh = skb_header_pointer(skb, ptr, sizeof(_frag), &_frag);
+ if (fh == NULL){
+ *hotdrop = 1;
+ return 0;
+ }
DEBUGP("INFO %04X ", fh->frag_off);
DEBUGP("OFFSET %04X ", ntohs(fh->frag_off) & ~0x7);
diff --git a/net/ipv6/netfilter/ip6t_hbh.c b/net/ipv6/netfilter/ip6t_hbh.c
index 27f3650d127..1d09485111d 100644
--- a/net/ipv6/netfilter/ip6t_hbh.c
+++ b/net/ipv6/netfilter/ip6t_hbh.c
@@ -63,8 +63,6 @@ match(const struct sk_buff *skb,
struct ipv6_opt_hdr _optsh, *oh;
const struct ip6t_opts *optinfo = matchinfo;
unsigned int temp;
- unsigned int len;
- u8 nexthdr;
unsigned int ptr;
unsigned int hdrlen = 0;
unsigned int ret = 0;
@@ -72,97 +70,25 @@ match(const struct sk_buff *skb,
u8 _optlen, *lp = NULL;
unsigned int optlen;
- /* type of the 1st exthdr */
- nexthdr = skb->nh.ipv6h->nexthdr;
- /* pointer to the 1st exthdr */
- ptr = sizeof(struct ipv6hdr);
- /* available length */
- len = skb->len - ptr;
- temp = 0;
-
- while (ip6t_ext_hdr(nexthdr)) {
- struct ipv6_opt_hdr _hdr, *hp;
-
- DEBUGP("ipv6_opts header iteration \n");
-
- /* Is there enough space for the next ext header? */
- if (len < (int)sizeof(struct ipv6_opt_hdr))
- return 0;
- /* No more exthdr -> evaluate */
- if (nexthdr == NEXTHDR_NONE) {
- break;
- }
- /* ESP -> evaluate */
- if (nexthdr == NEXTHDR_ESP) {
- break;
- }
-
- hp = skb_header_pointer(skb, ptr, sizeof(_hdr), &_hdr);
- BUG_ON(hp == NULL);
-
- /* Calculate the header length */
- if (nexthdr == NEXTHDR_FRAGMENT) {
- hdrlen = 8;
- } else if (nexthdr == NEXTHDR_AUTH)
- hdrlen = (hp->hdrlen+2)<<2;
- else
- hdrlen = ipv6_optlen(hp);
-
- /* OPTS -> evaluate */
#if HOPBYHOP
- if (nexthdr == NEXTHDR_HOP) {
- temp |= MASK_HOPOPTS;
+ if (ipv6_find_hdr(skb, &ptr, NEXTHDR_HOP) < 0)
#else
- if (nexthdr == NEXTHDR_DEST) {
- temp |= MASK_DSTOPTS;
+ if (ipv6_find_hdr(skb, &ptr, NEXTHDR_DEST) < 0)
#endif
- break;
- }
-
+ return 0;
- /* set the flag */
- switch (nexthdr){
- case NEXTHDR_HOP:
- case NEXTHDR_ROUTING:
- case NEXTHDR_FRAGMENT:
- case NEXTHDR_AUTH:
- case NEXTHDR_DEST:
- break;
- default:
- DEBUGP("ipv6_opts match: unknown nextheader %u\n",nexthdr);
- return 0;
- break;
- }
-
- nexthdr = hp->nexthdr;
- len -= hdrlen;
- ptr += hdrlen;
- if ( ptr > skb->len ) {
- DEBUGP("ipv6_opts: new pointer is too large! \n");
- break;
- }
- }
-
- /* OPTIONS header not found */
-#if HOPBYHOP
- if ( temp != MASK_HOPOPTS ) return 0;
-#else
- if ( temp != MASK_DSTOPTS ) return 0;
-#endif
-
- if (len < (int)sizeof(struct ipv6_opt_hdr)){
+ oh = skb_header_pointer(skb, ptr, sizeof(_optsh), &_optsh);
+ if (oh == NULL){
*hotdrop = 1;
return 0;
}
- if (len < hdrlen){
+ hdrlen = ipv6_optlen(oh);
+ if (skb->len - ptr < hdrlen){
/* Packet smaller than it's length field */
return 0;
}
- oh = skb_header_pointer(skb, ptr, sizeof(_optsh), &_optsh);
- BUG_ON(oh == NULL);
-
DEBUGP("IPv6 OPTS LEN %u %u ", hdrlen, oh->hdrlen);
DEBUGP("len %02X %04X %02X ",
diff --git a/net/ipv6/netfilter/ip6t_rt.c b/net/ipv6/netfilter/ip6t_rt.c
index 2bb670037df..beb2fd5cebb 100644
--- a/net/ipv6/netfilter/ip6t_rt.c
+++ b/net/ipv6/netfilter/ip6t_rt.c
@@ -50,98 +50,29 @@ match(const struct sk_buff *skb,
unsigned int protoff,
int *hotdrop)
{
- struct ipv6_rt_hdr _route, *rh = NULL;
+ struct ipv6_rt_hdr _route, *rh;
const struct ip6t_rt *rtinfo = matchinfo;
unsigned int temp;
- unsigned int len;
- u8 nexthdr;
unsigned int ptr;
unsigned int hdrlen = 0;
unsigned int ret = 0;
struct in6_addr *ap, _addr;
- /* type of the 1st exthdr */
- nexthdr = skb->nh.ipv6h->nexthdr;
- /* pointer to the 1st exthdr */
- ptr = sizeof(struct ipv6hdr);
- /* available length */
- len = skb->len - ptr;
- temp = 0;
+ if (ipv6_find_hdr(skb, &ptr, NEXTHDR_ROUTING) < 0)
+ return 0;
- while (ip6t_ext_hdr(nexthdr)) {
- struct ipv6_opt_hdr _hdr, *hp;
-
- DEBUGP("ipv6_rt header iteration \n");
-
- /* Is there enough space for the next ext header? */
- if (len < (int)sizeof(struct ipv6_opt_hdr))
- return 0;
- /* No more exthdr -> evaluate */
- if (nexthdr == NEXTHDR_NONE) {
- break;
- }
- /* ESP -> evaluate */
- if (nexthdr == NEXTHDR_ESP) {
- break;
- }
-
- hp = skb_header_pointer(skb, ptr, sizeof(_hdr), &_hdr);
- BUG_ON(hp == NULL);
-
- /* Calculate the header length */
- if (nexthdr == NEXTHDR_FRAGMENT) {
- hdrlen = 8;
- } else if (nexthdr == NEXTHDR_AUTH)
- hdrlen = (hp->hdrlen+2)<<2;
- else
- hdrlen = ipv6_optlen(hp);
-
- /* ROUTING -> evaluate */
- if (nexthdr == NEXTHDR_ROUTING) {
- temp |= MASK_ROUTING;
- break;
- }
-
-
- /* set the flag */
- switch (nexthdr){
- case NEXTHDR_HOP:
- case NEXTHDR_ROUTING:
- case NEXTHDR_FRAGMENT:
- case NEXTHDR_AUTH:
- case NEXTHDR_DEST:
- break;
- default:
- DEBUGP("ipv6_rt match: unknown nextheader %u\n",nexthdr);
- return 0;
- break;
- }
-
- nexthdr = hp->nexthdr;
- len -= hdrlen;
- ptr += hdrlen;
- if ( ptr > skb->len ) {
- DEBUGP("ipv6_rt: new pointer is too large! \n");
- break;
- }
- }
-
- /* ROUTING header not found */
- if ( temp != MASK_ROUTING ) return 0;
-
- if (len < (int)sizeof(struct ipv6_rt_hdr)){
+ rh = skb_header_pointer(skb, ptr, sizeof(_route), &_route);
+ if (rh == NULL){
*hotdrop = 1;
return 0;
}
- if (len < hdrlen){
+ hdrlen = ipv6_optlen(rh);
+ if (skb->len - ptr < hdrlen){
/* Pcket smaller than its length field */
return 0;
}
- rh = skb_header_pointer(skb, ptr, sizeof(_route), &_route);
- BUG_ON(rh == NULL);
-
DEBUGP("IPv6 RT LEN %u %u ", hdrlen, rh->hdrlen);
DEBUGP("TYPE %04X ", rh->type);
DEBUGP("SGS_LEFT %u %02X\n", rh->segments_left, rh->segments_left);
diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
index 5aa3691c578..a1265a320b1 100644
--- a/net/ipv6/raw.c
+++ b/net/ipv6/raw.c
@@ -627,7 +627,7 @@ static void rawv6_probe_proto_opt(struct flowi *fl, struct msghdr *msg)
if (type && code) {
get_user(fl->fl_icmp_type, type);
- __get_user(fl->fl_icmp_code, code);
+ get_user(fl->fl_icmp_code, code);
probed = 1;
}
break;