aboutsummaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/acorn/char/pcf8583.c3
-rw-r--r--drivers/base/attribute_container.c5
-rw-r--r--drivers/base/class.c23
-rw-r--r--drivers/base/dd.c3
-rw-r--r--drivers/base/firmware_class.c9
-rw-r--r--drivers/base/map.c3
-rw-r--r--drivers/base/platform.c3
-rw-r--r--drivers/block/cciss.c636
-rw-r--r--drivers/block/cciss.h10
-rw-r--r--drivers/block/cciss_cmd.h8
-rw-r--r--drivers/block/cciss_scsi.c69
-rw-r--r--drivers/block/ll_rw_blk.c38
-rw-r--r--drivers/block/paride/pf.c22
-rw-r--r--drivers/block/pktcdvd.c85
-rw-r--r--drivers/block/scsi_ioctl.c1
-rw-r--r--drivers/block/ub.c55
-rw-r--r--drivers/bluetooth/hci_usb.c19
-rw-r--r--drivers/bluetooth/hci_usb.h5
-rw-r--r--drivers/char/agp/hp-agp.c2
-rw-r--r--drivers/char/amiserial.c4
-rw-r--r--drivers/char/drm/drm_drv.c2
-rw-r--r--drivers/char/drm/drm_proc.c2
-rw-r--r--drivers/char/epca.c84
-rw-r--r--drivers/char/epca.h12
-rw-r--r--drivers/char/hpet.c1
-rw-r--r--drivers/char/hvc_console.c6
-rw-r--r--drivers/char/ipmi/ipmi_msghandler.c6
-rw-r--r--drivers/char/ipmi/ipmi_poweroff.c2
-rw-r--r--drivers/char/n_r3964.c84
-rw-r--r--drivers/char/vt.c5
-rw-r--r--drivers/char/watchdog/Kconfig93
-rw-r--r--drivers/char/watchdog/Makefile7
-rw-r--r--drivers/char/watchdog/i6300esb.c527
-rw-r--r--drivers/char/watchdog/ibmasr.c405
-rw-r--r--drivers/char/watchdog/mpcore_wdt.c2
-rw-r--r--drivers/char/watchdog/mv64x60_wdt.c252
-rw-r--r--drivers/char/watchdog/pcwd_pci.c44
-rw-r--r--drivers/char/watchdog/s3c2410_wdt.c2
-rw-r--r--drivers/char/watchdog/sbc8360.c414
-rw-r--r--drivers/char/watchdog/w83977f_wdt.c543
-rw-r--r--drivers/connector/cn_queue.c32
-rw-r--r--drivers/connector/connector.c74
-rw-r--r--drivers/firmware/dell_rbu.c241
-rw-r--r--drivers/hwmon/Kconfig9
-rw-r--r--drivers/hwmon/hdaps.c369
-rw-r--r--drivers/i2c/busses/Kconfig12
-rw-r--r--drivers/i2c/busses/Makefile1
-rw-r--r--drivers/i2c/busses/i2c-keywest.c1
-rw-r--r--drivers/i2c/busses/i2c-pmac-smu.c316
-rw-r--r--drivers/i2c/busses/i2c-pxa.c12
-rw-r--r--drivers/ide/ide-iops.c41
-rw-r--r--drivers/ide/ide-taskfile.c1
-rw-r--r--drivers/ide/legacy/ide-cs.c2
-rw-r--r--drivers/ide/pci/cmd64x.c2
-rw-r--r--drivers/ide/pci/hpt34x.c2
-rw-r--r--drivers/ieee1394/amdtp.c1
-rw-r--r--drivers/ieee1394/csr1212.h1
-rw-r--r--drivers/ieee1394/dv1394.c1
-rw-r--r--drivers/ieee1394/eth1394.c12
-rw-r--r--drivers/ieee1394/eth1394.h6
-rw-r--r--drivers/ieee1394/hosts.c3
-rw-r--r--drivers/ieee1394/hosts.h8
-rw-r--r--drivers/ieee1394/ieee1394_core.c32
-rw-r--r--drivers/ieee1394/nodemgr.c23
-rw-r--r--drivers/ieee1394/ohci1394.c4
-rw-r--r--drivers/ieee1394/raw1394.c1
-rw-r--r--drivers/ieee1394/sbp2.c107
-rw-r--r--drivers/ieee1394/video1394.c1
-rw-r--r--drivers/infiniband/core/mad_rmpp.c19
-rw-r--r--drivers/infiniband/core/user_mad.c5
-rw-r--r--drivers/infiniband/core/uverbs.h1
-rw-r--r--drivers/infiniband/core/uverbs_cmd.c120
-rw-r--r--drivers/infiniband/core/uverbs_main.c27
-rw-r--r--drivers/infiniband/hw/mthca/mthca_cmd.c4
-rw-r--r--drivers/infiniband/hw/mthca/mthca_eq.c18
-rw-r--r--drivers/infiniband/hw/mthca/mthca_memfree.c19
-rw-r--r--drivers/infiniband/hw/mthca/mthca_provider.c2
-rw-r--r--drivers/infiniband/hw/mthca/mthca_qp.c51
-rw-r--r--drivers/infiniband/hw/mthca/mthca_srq.c25
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib.h2
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_ib.c4
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_main.c2
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_multicast.c13
-rw-r--r--drivers/input/input.c1
-rw-r--r--drivers/input/keyboard/Kconfig11
-rw-r--r--drivers/input/keyboard/Makefile1
-rw-r--r--drivers/input/keyboard/spitzkbd.c478
-rw-r--r--drivers/input/touchscreen/Kconfig6
-rw-r--r--drivers/input/touchscreen/corgi_ts.c70
-rw-r--r--drivers/isdn/divert/divert_procfs.c6
-rw-r--r--drivers/isdn/hardware/eicon/diva_didd.c6
-rw-r--r--drivers/isdn/hardware/eicon/divasproc.c2
-rw-r--r--drivers/isdn/hisax/hfc_pci.c1
-rw-r--r--drivers/isdn/hisax/sedlbauer_cs.c2
-rw-r--r--drivers/isdn/hisax/st5481.h4
-rw-r--r--drivers/isdn/hisax/st5481_b.c20
-rw-r--r--drivers/isdn/hisax/st5481_d.c26
-rw-r--r--drivers/isdn/hisax/st5481_init.c4
-rw-r--r--drivers/isdn/hisax/st5481_usb.c68
-rw-r--r--drivers/isdn/hysdn/hysdn_procconf.c2
-rw-r--r--drivers/isdn/sc/init.c4
-rw-r--r--drivers/macintosh/smu.c1032
-rw-r--r--drivers/macintosh/therm_adt746x.c2
-rw-r--r--drivers/macintosh/therm_pm72.c2
-rw-r--r--drivers/macintosh/therm_windtunnel.c2
-rw-r--r--drivers/md/dm-ioctl.c9
-rw-r--r--drivers/md/dm-mpath.c16
-rw-r--r--drivers/md/raid6.h4
-rw-r--r--drivers/md/raid6algos.c1
-rw-r--r--drivers/md/raid6altivec.uc18
-rw-r--r--drivers/md/raid6test/Makefile27
-rw-r--r--drivers/media/dvb/frontends/tda10021.c4
-rw-r--r--drivers/media/radio/radio-aimslab.c2
-rw-r--r--drivers/media/radio/radio-aztech.c2
-rw-r--r--drivers/media/radio/radio-cadet.c2
-rw-r--r--drivers/media/radio/radio-gemtek.c2
-rw-r--r--drivers/media/radio/radio-rtrack2.c2
-rw-r--r--drivers/media/radio/radio-sf16fmi.c2
-rw-r--r--drivers/media/radio/radio-sf16fmr2.c2
-rw-r--r--drivers/media/radio/radio-terratec.c2
-rw-r--r--drivers/media/radio/radio-typhoon.c2
-rw-r--r--drivers/media/radio/radio-zoltrix.c2
-rw-r--r--drivers/media/video/bttv-cards.c2
-rw-r--r--drivers/media/video/bttv-driver.c14
-rw-r--r--drivers/media/video/bttvp.h2
-rw-r--r--drivers/media/video/cpia.c2
-rw-r--r--drivers/media/video/cx88/cx88-dvb.c7
-rw-r--r--drivers/media/video/rds.h2
-rw-r--r--drivers/media/video/saa6588.c4
-rw-r--r--drivers/message/fusion/Kconfig17
-rw-r--r--drivers/message/fusion/Makefile1
-rw-r--r--drivers/message/fusion/mptbase.c963
-rw-r--r--drivers/message/fusion/mptbase.h56
-rw-r--r--drivers/message/fusion/mptctl.c4
-rw-r--r--drivers/message/fusion/mptfc.c2
-rw-r--r--drivers/message/fusion/mptlan.c7
-rw-r--r--drivers/message/fusion/mptsas.c1235
-rw-r--r--drivers/message/fusion/mptscsih.c463
-rw-r--r--drivers/message/fusion/mptscsih.h7
-rw-r--r--drivers/message/fusion/mptspi.c2
-rw-r--r--drivers/message/i2o/config-osm.c5
-rw-r--r--drivers/mfd/ucb1x00-ts.c4
-rw-r--r--drivers/mtd/devices/docecc.c8
-rw-r--r--drivers/mtd/maps/bast-flash.c1
-rw-r--r--drivers/mtd/maps/ixp2000.c1
-rw-r--r--drivers/mtd/maps/ixp4xx.c3
-rw-r--r--drivers/mtd/maps/omap_nor.c1
-rw-r--r--drivers/mtd/maps/sa1100-flash.c1
-rw-r--r--drivers/mtd/maps/sharpsl-flash.c2
-rw-r--r--drivers/mtd/nand/s3c2410.c1
-rw-r--r--drivers/mtd/nand/sharpsl.c10
-rw-r--r--drivers/net/8139cp.c46
-rw-r--r--drivers/net/8390.c2
-rw-r--r--drivers/net/Kconfig10
-rw-r--r--drivers/net/Makefile1
-rw-r--r--drivers/net/arcnet/com90io.c4
-rw-r--r--drivers/net/arm/am79c961a.c22
-rw-r--r--drivers/net/bmac.c2
-rw-r--r--drivers/net/bonding/bond_main.c16
-rw-r--r--drivers/net/cassini.c5237
-rw-r--r--drivers/net/cassini.h4425
-rw-r--r--drivers/net/cs89x0.c1
-rw-r--r--drivers/net/e100.c4
-rw-r--r--drivers/net/e1000/e1000_main.c1
-rw-r--r--drivers/net/ibmveth.c6
-rw-r--r--drivers/net/irda/vlsi_ir.c4
-rw-r--r--drivers/net/ixgb/ixgb_main.c2
-rw-r--r--drivers/net/pppoe.c4
-rw-r--r--drivers/net/r8169.c7
-rw-r--r--drivers/net/s2io.c9
-rw-r--r--drivers/net/sk98lin/skge.c20
-rw-r--r--drivers/net/skge.c312
-rw-r--r--drivers/net/skge.h4
-rw-r--r--drivers/net/spider_net.c4
-rw-r--r--drivers/net/tg3.c215
-rw-r--r--drivers/net/tg3.h1
-rw-r--r--drivers/net/tulip/xircom_cb.c2
-rw-r--r--drivers/net/wan/hdlc_cisco.c2
-rw-r--r--drivers/net/wan/sdlamain.c23
-rw-r--r--drivers/net/wan/syncppp.c2
-rw-r--r--drivers/net/wireless/airo.c5
-rw-r--r--drivers/net/wireless/orinoco.c2
-rw-r--r--drivers/net/wireless/orinoco_cs.c1
-rw-r--r--drivers/net/wireless/strip.c4
-rw-r--r--drivers/parisc/led.c5
-rw-r--r--drivers/pci/hotplug.c4
-rw-r--r--drivers/pci/hotplug/ibmphp_pci.c2
-rw-r--r--drivers/pci/hotplug/pciehp_ctrl.c4
-rw-r--r--drivers/pci/hotplug/rpadlpar_sysfs.c4
-rw-r--r--drivers/pci/hotplug/sgi_hotplug.c6
-rw-r--r--drivers/pci/hotplug/shpchp_ctrl.c3
-rw-r--r--drivers/pci/pci-sysfs.c2
-rw-r--r--drivers/pci/pci.c16
-rw-r--r--drivers/pci/probe.c22
-rw-r--r--drivers/pcmcia/Kconfig2
-rw-r--r--drivers/pcmcia/cardbus.c5
-rw-r--r--drivers/pcmcia/omap_cf.c1
-rw-r--r--drivers/pcmcia/pcmcia_ioctl.c12
-rw-r--r--drivers/pcmcia/rsrc_nonstatic.c10
-rw-r--r--drivers/pcmcia/ti113x.h115
-rw-r--r--drivers/pcmcia/yenta_socket.c75
-rw-r--r--drivers/s390/cio/blacklist.c4
-rw-r--r--drivers/s390/cio/ccwgroup.c2
-rw-r--r--drivers/s390/crypto/z90main.c3
-rw-r--r--drivers/s390/net/ctcmain.c41
-rw-r--r--drivers/s390/net/qeth.h4
-rw-r--r--drivers/s390/net/qeth_main.c137
-rw-r--r--drivers/s390/net/qeth_sys.c17
-rw-r--r--drivers/s390/scsi/Makefile2
-rw-r--r--drivers/s390/scsi/zfcp_aux.c184
-rw-r--r--drivers/s390/scsi/zfcp_ccw.c10
-rw-r--r--drivers/s390/scsi/zfcp_dbf.c995
-rw-r--r--drivers/s390/scsi/zfcp_def.h307
-rw-r--r--drivers/s390/scsi/zfcp_erp.c135
-rw-r--r--drivers/s390/scsi/zfcp_ext.h30
-rw-r--r--drivers/s390/scsi/zfcp_fsf.c769
-rw-r--r--drivers/s390/scsi/zfcp_fsf.h54
-rw-r--r--drivers/s390/scsi/zfcp_qdio.c30
-rw-r--r--drivers/s390/scsi/zfcp_scsi.c297
-rw-r--r--drivers/s390/scsi/zfcp_sysfs_adapter.c14
-rw-r--r--drivers/sbus/char/display7seg.c2
-rw-r--r--drivers/scsi/3w-9xxx.c55
-rw-r--r--drivers/scsi/3w-9xxx.h17
-rw-r--r--drivers/scsi/Makefile1
-rw-r--r--drivers/scsi/aacraid/aachba.c283
-rw-r--r--drivers/scsi/aacraid/aacraid.h17
-rw-r--r--drivers/scsi/aacraid/comminit.c17
-rw-r--r--drivers/scsi/aacraid/commsup.c581
-rw-r--r--drivers/scsi/aacraid/linit.c12
-rw-r--r--drivers/scsi/aic7xxx/aic7770_osm.c3
-rw-r--r--drivers/scsi/aic7xxx/aic79xx_osm.c8
-rw-r--r--drivers/scsi/aic7xxx/aic79xx_osm_pci.c3
-rw-r--r--drivers/scsi/aic7xxx/aic7xxx_osm.c17
-rw-r--r--drivers/scsi/aic7xxx/aic7xxx_osm.h2
-rw-r--r--drivers/scsi/aic7xxx/aic7xxx_osm_pci.c11
-rw-r--r--drivers/scsi/ata_piix.c1
-rw-r--r--drivers/scsi/atp870u.c6
-rw-r--r--drivers/scsi/atp870u.h5
-rw-r--r--drivers/scsi/fd_mcs.c2
-rw-r--r--drivers/scsi/hosts.c37
-rw-r--r--drivers/scsi/ibmmca.c2
-rw-r--r--drivers/scsi/ibmvscsi/ibmvscsi.c10
-rw-r--r--drivers/scsi/libata-core.c118
-rw-r--r--drivers/scsi/lpfc/lpfc_attr.c8
-rw-r--r--drivers/scsi/lpfc/lpfc_hbadisc.c4
-rw-r--r--drivers/scsi/lpfc/lpfc_hw.h4
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c6
-rw-r--r--drivers/scsi/megaraid.c70
-rw-r--r--drivers/scsi/megaraid/Kconfig.megaraid9
-rw-r--r--drivers/scsi/megaraid/Makefile1
-rw-r--r--drivers/scsi/megaraid/megaraid_sas.c2805
-rw-r--r--drivers/scsi/megaraid/megaraid_sas.h1142
-rw-r--r--drivers/scsi/mesh.c29
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c21
-rw-r--r--drivers/scsi/qla2xxx/qla_rscn.c2
-rw-r--r--drivers/scsi/sata_nv.c2
-rw-r--r--drivers/scsi/sata_sis.c4
-rw-r--r--drivers/scsi/scsi.c5
-rw-r--r--drivers/scsi/scsi_devinfo.c1
-rw-r--r--drivers/scsi/scsi_error.c78
-rw-r--r--drivers/scsi/scsi_ioctl.c2
-rw-r--r--drivers/scsi/scsi_lib.c12
-rw-r--r--drivers/scsi/scsi_scan.c116
-rw-r--r--drivers/scsi/scsi_sysfs.c17
-rw-r--r--drivers/scsi/scsi_transport_sas.c9
-rw-r--r--drivers/scsi/sd.c1
-rw-r--r--drivers/scsi/sg.c5
-rw-r--r--drivers/scsi/sr.c1
-rw-r--r--drivers/scsi/st.c1
-rw-r--r--drivers/serial/21285.c2
-rw-r--r--drivers/serial/amba-pl010.c2
-rw-r--r--drivers/serial/amba-pl011.c2
-rw-r--r--drivers/serial/clps711x.c4
-rw-r--r--drivers/serial/imx.c2
-rw-r--r--drivers/serial/ioc4_serial.c12
-rw-r--r--drivers/serial/mpc52xx_uart.c2
-rw-r--r--drivers/serial/pxa.c4
-rw-r--r--drivers/serial/s3c2410.c6
-rw-r--r--drivers/serial/sa1100.c2
-rw-r--r--drivers/serial/serial_cs.c1
-rw-r--r--drivers/serial/serial_lh7a40x.c2
-rw-r--r--drivers/tc/zs.c2
-rw-r--r--drivers/usb/core/hcd-pci.c9
-rw-r--r--drivers/usb/core/message.c2
-rw-r--r--drivers/usb/core/usb.c6
-rw-r--r--drivers/usb/gadget/pxa2xx_udc.c4
-rw-r--r--drivers/usb/gadget/pxa2xx_udc.h8
-rw-r--r--drivers/usb/host/ohci-lh7a404.c2
-rw-r--r--drivers/usb/host/ohci-omap.c1
-rw-r--r--drivers/usb/host/ohci-s3c2410.c1
-rw-r--r--drivers/usb/host/sl811-hcd.c16
-rw-r--r--drivers/usb/media/vicam.c4
-rw-r--r--drivers/usb/net/pegasus.c29
-rw-r--r--drivers/usb/serial/airprime.c3
-rw-r--r--drivers/usb/serial/ftdi_sio.c8
-rw-r--r--drivers/usb/serial/option.c11
-rw-r--r--drivers/video/Kconfig5
-rw-r--r--drivers/video/aty/radeon_base.c2
-rw-r--r--drivers/video/aty/radeon_pm.c14
-rw-r--r--drivers/video/aty/radeonfb.h2
-rw-r--r--drivers/video/aty/xlinit.c11
-rw-r--r--drivers/video/backlight/corgi_bl.c33
-rw-r--r--drivers/video/console/fbcon.c18
-rw-r--r--drivers/video/console/fbcon.h2
-rw-r--r--drivers/video/console/font_10x18.c4
-rw-r--r--drivers/video/console/font_6x11.c4
-rw-r--r--drivers/video/console/font_7x14.c4
-rw-r--r--drivers/video/console/font_8x16.c4
-rw-r--r--drivers/video/console/font_8x8.c4
-rw-r--r--drivers/video/console/font_acorn_8x8.c4
-rw-r--r--drivers/video/console/font_mini_4x6.c4
-rw-r--r--drivers/video/console/font_pearl_8x8.c4
-rw-r--r--drivers/video/console/font_sun12x22.c4
-rw-r--r--drivers/video/console/font_sun8x16.c4
-rw-r--r--drivers/video/console/fonts.c9
-rw-r--r--drivers/video/console/vgacon.c4
-rw-r--r--drivers/video/cyblafb.c11
-rw-r--r--drivers/video/fbcvt.c8
-rw-r--r--drivers/video/i810/i810-i2c.c16
-rw-r--r--drivers/video/imxfb.c3
-rw-r--r--drivers/video/intelfb/intelfbdrv.c21
-rw-r--r--drivers/video/matrox/matroxfb_base.c13
-rw-r--r--drivers/video/nvidia/nv_i2c.c11
-rw-r--r--drivers/video/nvidia/nvidia.c5
-rw-r--r--drivers/video/pm3fb.c3
-rw-r--r--drivers/video/pxafb.c108
-rw-r--r--drivers/video/pxafb.h9
-rw-r--r--drivers/video/s3c2410fb.c4
-rw-r--r--drivers/video/savage/savagefb-i2c.c11
-rw-r--r--drivers/video/savage/savagefb.h4
-rw-r--r--drivers/video/savage/savagefb_driver.c11
331 files changed, 25418 insertions, 4194 deletions
diff --git a/drivers/acorn/char/pcf8583.c b/drivers/acorn/char/pcf8583.c
index 141b4c237a5..2b850e5860a 100644
--- a/drivers/acorn/char/pcf8583.c
+++ b/drivers/acorn/char/pcf8583.c
@@ -23,12 +23,13 @@ static struct i2c_driver pcf8583_driver;
static unsigned short ignore[] = { I2C_CLIENT_END };
static unsigned short normal_addr[] = { 0x50, I2C_CLIENT_END };
+static unsigned short *forces[] = { NULL };
static struct i2c_client_address_data addr_data = {
.normal_i2c = normal_addr,
.probe = ignore,
.ignore = ignore,
- .force = ignore,
+ .forces = forces,
};
#define DAT(x) ((unsigned int)(x->dev.driver_data))
diff --git a/drivers/base/attribute_container.c b/drivers/base/attribute_container.c
index 373e7b728fa..6b2eb6f39b4 100644
--- a/drivers/base/attribute_container.c
+++ b/drivers/base/attribute_container.c
@@ -152,12 +152,13 @@ attribute_container_add_device(struct device *dev,
if (!cont->match(cont, dev))
continue;
- ic = kmalloc(sizeof(struct internal_container), GFP_KERNEL);
+
+ ic = kzalloc(sizeof(*ic), GFP_KERNEL);
if (!ic) {
dev_printk(KERN_ERR, dev, "failed to allocate class container\n");
continue;
}
- memset(ic, 0, sizeof(struct internal_container));
+
ic->cont = cont;
class_device_initialize(&ic->classdev);
ic->classdev.dev = get_device(dev);
diff --git a/drivers/base/class.c b/drivers/base/class.c
index d164c32a97a..ce23dc8c18c 100644
--- a/drivers/base/class.c
+++ b/drivers/base/class.c
@@ -189,12 +189,11 @@ struct class *class_create(struct module *owner, char *name)
struct class *cls;
int retval;
- cls = kmalloc(sizeof(struct class), GFP_KERNEL);
+ cls = kzalloc(sizeof(*cls), GFP_KERNEL);
if (!cls) {
retval = -ENOMEM;
goto error;
}
- memset(cls, 0x00, sizeof(struct class));
cls->name = name;
cls->owner = owner;
@@ -500,13 +499,13 @@ int class_device_add(struct class_device *class_dev)
/* add the needed attributes to this device */
if (MAJOR(class_dev->devt)) {
struct class_device_attribute *attr;
- attr = kmalloc(sizeof(*attr), GFP_KERNEL);
+ attr = kzalloc(sizeof(*attr), GFP_KERNEL);
if (!attr) {
error = -ENOMEM;
kobject_del(&class_dev->kobj);
goto register_done;
}
- memset(attr, sizeof(*attr), 0x00);
+
attr->attr.name = "dev";
attr->attr.mode = S_IRUGO;
attr->attr.owner = parent->owner;
@@ -577,12 +576,11 @@ struct class_device *class_device_create(struct class *cls, dev_t devt,
if (cls == NULL || IS_ERR(cls))
goto error;
- class_dev = kmalloc(sizeof(struct class_device), GFP_KERNEL);
+ class_dev = kzalloc(sizeof(*class_dev), GFP_KERNEL);
if (!class_dev) {
retval = -ENOMEM;
goto error;
}
- memset(class_dev, 0x00, sizeof(struct class_device));
class_dev->devt = devt;
class_dev->dev = device;
@@ -671,6 +669,7 @@ void class_device_destroy(struct class *cls, dev_t devt)
int class_device_rename(struct class_device *class_dev, char *new_name)
{
int error = 0;
+ char *old_class_name = NULL, *new_class_name = NULL;
class_dev = class_device_get(class_dev);
if (!class_dev)
@@ -679,12 +678,24 @@ int class_device_rename(struct class_device *class_dev, char *new_name)
pr_debug("CLASS: renaming '%s' to '%s'\n", class_dev->class_id,
new_name);
+ if (class_dev->dev)
+ old_class_name = make_class_name(class_dev);
+
strlcpy(class_dev->class_id, new_name, KOBJ_NAME_LEN);
error = kobject_rename(&class_dev->kobj, new_name);
+ if (class_dev->dev) {
+ new_class_name = make_class_name(class_dev);
+ sysfs_create_link(&class_dev->dev->kobj, &class_dev->kobj,
+ new_class_name);
+ sysfs_remove_link(&class_dev->dev->kobj, old_class_name);
+ }
class_device_put(class_dev);
+ kfree(old_class_name);
+ kfree(new_class_name);
+
return error;
}
diff --git a/drivers/base/dd.c b/drivers/base/dd.c
index d5bbce38282..3565e979530 100644
--- a/drivers/base/dd.c
+++ b/drivers/base/dd.c
@@ -40,6 +40,9 @@
*/
void device_bind_driver(struct device * dev)
{
+ if (klist_node_attached(&dev->knode_driver))
+ return;
+
pr_debug("bound device '%s' to driver '%s'\n",
dev->bus_id, dev->driver->name);
klist_add_tail(&dev->knode_driver, &dev->driver->klist_devices);
diff --git a/drivers/base/firmware_class.c b/drivers/base/firmware_class.c
index 5bfa2e9a7c2..4acb2c5733c 100644
--- a/drivers/base/firmware_class.c
+++ b/drivers/base/firmware_class.c
@@ -301,9 +301,9 @@ fw_register_class_device(struct class_device **class_dev_p,
const char *fw_name, struct device *device)
{
int retval;
- struct firmware_priv *fw_priv = kmalloc(sizeof (struct firmware_priv),
+ struct firmware_priv *fw_priv = kzalloc(sizeof(*fw_priv),
GFP_KERNEL);
- struct class_device *class_dev = kmalloc(sizeof (struct class_device),
+ struct class_device *class_dev = kzalloc(sizeof(*class_dev),
GFP_KERNEL);
*class_dev_p = NULL;
@@ -313,8 +313,6 @@ fw_register_class_device(struct class_device **class_dev_p,
retval = -ENOMEM;
goto error_kfree;
}
- memset(fw_priv, 0, sizeof (*fw_priv));
- memset(class_dev, 0, sizeof (*class_dev));
init_completion(&fw_priv->completion);
fw_priv->attr_data = firmware_attr_data_tmpl;
@@ -402,14 +400,13 @@ _request_firmware(const struct firmware **firmware_p, const char *name,
if (!firmware_p)
return -EINVAL;
- *firmware_p = firmware = kmalloc(sizeof (struct firmware), GFP_KERNEL);
+ *firmware_p = firmware = kzalloc(sizeof(*firmware), GFP_KERNEL);
if (!firmware) {
printk(KERN_ERR "%s: kmalloc(struct firmware) failed\n",
__FUNCTION__);
retval = -ENOMEM;
goto out;
}
- memset(firmware, 0, sizeof (*firmware));
retval = fw_setup_class_device(firmware, &class_dev, name, device,
hotplug);
diff --git a/drivers/base/map.c b/drivers/base/map.c
index 2f455d86793..b449dae6f0d 100644
--- a/drivers/base/map.c
+++ b/drivers/base/map.c
@@ -135,7 +135,7 @@ retry:
struct kobj_map *kobj_map_init(kobj_probe_t *base_probe, struct semaphore *sem)
{
struct kobj_map *p = kmalloc(sizeof(struct kobj_map), GFP_KERNEL);
- struct probe *base = kmalloc(sizeof(struct probe), GFP_KERNEL);
+ struct probe *base = kzalloc(sizeof(*base), GFP_KERNEL);
int i;
if ((p == NULL) || (base == NULL)) {
@@ -144,7 +144,6 @@ struct kobj_map *kobj_map_init(kobj_probe_t *base_probe, struct semaphore *sem)
return NULL;
}
- memset(base, 0, sizeof(struct probe));
base->dev = 1;
base->range = ~0;
base->get = base_probe;
diff --git a/drivers/base/platform.c b/drivers/base/platform.c
index 3a5f4c99179..361e204209e 100644
--- a/drivers/base/platform.c
+++ b/drivers/base/platform.c
@@ -225,13 +225,12 @@ struct platform_device *platform_device_register_simple(char *name, unsigned int
struct platform_object *pobj;
int retval;
- pobj = kmalloc(sizeof(struct platform_object) + sizeof(struct resource) * num, GFP_KERNEL);
+ pobj = kzalloc(sizeof(*pobj) + sizeof(struct resource) * num, GFP_KERNEL);
if (!pobj) {
retval = -ENOMEM;
goto error;
}
- memset(pobj, 0, sizeof(*pobj));
pobj->pdev.name = name;
pobj->pdev.id = id;
pobj->pdev.dev.release = platform_device_release_simple;
diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c
index 28f2c177a54..486b6e1c7df 100644
--- a/drivers/block/cciss.c
+++ b/drivers/block/cciss.c
@@ -47,14 +47,14 @@
#include <linux/completion.h>
#define CCISS_DRIVER_VERSION(maj,min,submin) ((maj<<16)|(min<<8)|(submin))
-#define DRIVER_NAME "HP CISS Driver (v 2.6.6)"
-#define DRIVER_VERSION CCISS_DRIVER_VERSION(2,6,6)
+#define DRIVER_NAME "HP CISS Driver (v 2.6.8)"
+#define DRIVER_VERSION CCISS_DRIVER_VERSION(2,6,8)
/* Embedded module documentation macros - see modules.h */
MODULE_AUTHOR("Hewlett-Packard Company");
-MODULE_DESCRIPTION("Driver for HP Controller SA5xxx SA6xxx version 2.6.6");
+MODULE_DESCRIPTION("Driver for HP Controller SA5xxx SA6xxx version 2.6.8");
MODULE_SUPPORTED_DEVICE("HP SA5i SA5i+ SA532 SA5300 SA5312 SA641 SA642 SA6400"
- " SA6i P600 P800 E400 E300");
+ " SA6i P600 P800 P400 P400i E200 E200i");
MODULE_LICENSE("GPL");
#include "cciss_cmd.h"
@@ -83,12 +83,22 @@ static const struct pci_device_id cciss_pci_device_id[] = {
0x0E11, 0x4091, 0, 0, 0},
{ PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSA,
0x103C, 0x3225, 0, 0, 0},
- { PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSB,
+ { PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSC,
0x103c, 0x3223, 0, 0, 0},
{ PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSC,
- 0x103c, 0x3231, 0, 0, 0},
+ 0x103c, 0x3234, 0, 0, 0},
{ PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSC,
- 0x103c, 0x3233, 0, 0, 0},
+ 0x103c, 0x3235, 0, 0, 0},
+ { PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSD,
+ 0x103c, 0x3211, 0, 0, 0},
+ { PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSD,
+ 0x103c, 0x3212, 0, 0, 0},
+ { PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSD,
+ 0x103c, 0x3213, 0, 0, 0},
+ { PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSD,
+ 0x103c, 0x3214, 0, 0, 0},
+ { PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSD,
+ 0x103c, 0x3215, 0, 0, 0},
{0,}
};
MODULE_DEVICE_TABLE(pci, cciss_pci_device_id);
@@ -111,8 +121,13 @@ static struct board_type products[] = {
{ 0x40910E11, "Smart Array 6i", &SA5_access},
{ 0x3225103C, "Smart Array P600", &SA5_access},
{ 0x3223103C, "Smart Array P800", &SA5_access},
- { 0x3231103C, "Smart Array E400", &SA5_access},
- { 0x3233103C, "Smart Array E300", &SA5_access},
+ { 0x3234103C, "Smart Array P400", &SA5_access},
+ { 0x3235103C, "Smart Array P400i", &SA5_access},
+ { 0x3211103C, "Smart Array E200i", &SA5_access},
+ { 0x3212103C, "Smart Array E200", &SA5_access},
+ { 0x3213103C, "Smart Array E200i", &SA5_access},
+ { 0x3214103C, "Smart Array E200i", &SA5_access},
+ { 0x3215103C, "Smart Array E200i", &SA5_access},
};
/* How long to wait (in millesconds) for board to go into simple mode */
@@ -140,15 +155,26 @@ static int cciss_ioctl(struct inode *inode, struct file *filep,
static int revalidate_allvol(ctlr_info_t *host);
static int cciss_revalidate(struct gendisk *disk);
-static int deregister_disk(struct gendisk *disk);
-static int register_new_disk(ctlr_info_t *h);
+static int rebuild_lun_table(ctlr_info_t *h, struct gendisk *del_disk);
+static int deregister_disk(struct gendisk *disk, drive_info_struct *drv, int clear_all);
+static void cciss_read_capacity(int ctlr, int logvol, ReadCapdata_struct *buf,
+ int withirq, unsigned int *total_size, unsigned int *block_size);
+static void cciss_geometry_inquiry(int ctlr, int logvol,
+ int withirq, unsigned int total_size,
+ unsigned int block_size, InquiryData_struct *inq_buff,
+ drive_info_struct *drv);
static void cciss_getgeometry(int cntl_num);
static void start_io( ctlr_info_t *h);
static int sendcmd( __u8 cmd, int ctlr, void *buff, size_t size,
unsigned int use_unit_num, unsigned int log_unit, __u8 page_code,
unsigned char *scsi3addr, int cmd_type);
+static int sendcmd_withirq(__u8 cmd, int ctlr, void *buff, size_t size,
+ unsigned int use_unit_num, unsigned int log_unit, __u8 page_code,
+ int cmd_type);
+
+static void fail_all_cmds(unsigned long ctlr);
#ifdef CONFIG_PROC_FS
static int cciss_proc_get_info(char *buffer, char **start, off_t offset,
@@ -265,7 +291,7 @@ static int cciss_proc_get_info(char *buffer, char **start, off_t offset,
for(i=0; i<=h->highest_lun; i++) {
drv = &h->drv[i];
- if (drv->block_size == 0)
+ if (drv->heads == 0)
continue;
vol_sz = drv->nr_blocks;
@@ -363,6 +389,8 @@ static CommandList_struct * cmd_alloc(ctlr_info_t *h, int get_from_pool)
return NULL;
memset(c, 0, sizeof(CommandList_struct));
+ c->cmdindex = -1;
+
c->err_info = (ErrorInfo_struct *)pci_alloc_consistent(
h->pdev, sizeof(ErrorInfo_struct),
&err_dma_handle);
@@ -393,6 +421,8 @@ static CommandList_struct * cmd_alloc(ctlr_info_t *h, int get_from_pool)
err_dma_handle = h->errinfo_pool_dhandle
+ i*sizeof(ErrorInfo_struct);
h->nr_allocs++;
+
+ c->cmdindex = i;
}
c->busaddr = (__u32) cmd_dma_handle;
@@ -453,6 +483,8 @@ static int cciss_open(struct inode *inode, struct file *filep)
printk(KERN_DEBUG "cciss_open %s\n", inode->i_bdev->bd_disk->disk_name);
#endif /* CCISS_DEBUG */
+ if (host->busy_initializing || drv->busy_configuring)
+ return -EBUSY;
/*
* Root is allowed to open raw volume zero even if it's not configured
* so array config can still work. Root is also allowed to open any
@@ -796,10 +828,10 @@ static int cciss_ioctl(struct inode *inode, struct file *filep,
return(0);
}
case CCISS_DEREGDISK:
- return deregister_disk(disk);
+ return rebuild_lun_table(host, disk);
case CCISS_REGNEWD:
- return register_new_disk(host);
+ return rebuild_lun_table(host, NULL);
case CCISS_PASSTHRU:
{
@@ -1143,48 +1175,323 @@ static int revalidate_allvol(ctlr_info_t *host)
return 0;
}
-static int deregister_disk(struct gendisk *disk)
+/* This function will check the usage_count of the drive to be updated/added.
+ * If the usage_count is zero then the drive information will be updated and
+ * the disk will be re-registered with the kernel. If not then it will be
+ * left alone for the next reboot. The exception to this is disk 0 which
+ * will always be left registered with the kernel since it is also the
+ * controller node. Any changes to disk 0 will show up on the next
+ * reboot.
+*/
+static void cciss_update_drive_info(int ctlr, int drv_index)
+ {
+ ctlr_info_t *h = hba[ctlr];
+ struct gendisk *disk;
+ ReadCapdata_struct *size_buff = NULL;
+ InquiryData_struct *inq_buff = NULL;
+ unsigned int block_size;
+ unsigned int total_size;
+ unsigned long flags = 0;
+ int ret = 0;
+
+ /* if the disk already exists then deregister it before proceeding*/
+ if (h->drv[drv_index].raid_level != -1){
+ spin_lock_irqsave(CCISS_LOCK(h->ctlr), flags);
+ h->drv[drv_index].busy_configuring = 1;
+ spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
+ ret = deregister_disk(h->gendisk[drv_index],
+ &h->drv[drv_index], 0);
+ h->drv[drv_index].busy_configuring = 0;
+ }
+
+ /* If the disk is in use return */
+ if (ret)
+ return;
+
+
+ /* Get information about the disk and modify the driver sturcture */
+ size_buff = kmalloc(sizeof( ReadCapdata_struct), GFP_KERNEL);
+ if (size_buff == NULL)
+ goto mem_msg;
+ inq_buff = kmalloc(sizeof( InquiryData_struct), GFP_KERNEL);
+ if (inq_buff == NULL)
+ goto mem_msg;
+
+ cciss_read_capacity(ctlr, drv_index, size_buff, 1,
+ &total_size, &block_size);
+ cciss_geometry_inquiry(ctlr, drv_index, 1, total_size, block_size,
+ inq_buff, &h->drv[drv_index]);
+
+ ++h->num_luns;
+ disk = h->gendisk[drv_index];
+ set_capacity(disk, h->drv[drv_index].nr_blocks);
+
+
+ /* if it's the controller it's already added */
+ if (drv_index){
+ disk->queue = blk_init_queue(do_cciss_request, &h->lock);
+
+ /* Set up queue information */
+ disk->queue->backing_dev_info.ra_pages = READ_AHEAD;
+ blk_queue_bounce_limit(disk->queue, hba[ctlr]->pdev->dma_mask);
+
+ /* This is a hardware imposed limit. */
+ blk_queue_max_hw_segments(disk->queue, MAXSGENTRIES);
+
+ /* This is a limit in the driver and could be eliminated. */
+ blk_queue_max_phys_segments(disk->queue, MAXSGENTRIES);
+
+ blk_queue_max_sectors(disk->queue, 512);
+
+ disk->queue->queuedata = hba[ctlr];
+
+ blk_queue_hardsect_size(disk->queue,
+ hba[ctlr]->drv[drv_index].block_size);
+
+ h->drv[drv_index].queue = disk->queue;
+ add_disk(disk);
+ }
+
+freeret:
+ kfree(size_buff);
+ kfree(inq_buff);
+ return;
+mem_msg:
+ printk(KERN_ERR "cciss: out of memory\n");
+ goto freeret;
+}
+
+/* This function will find the first index of the controllers drive array
+ * that has a -1 for the raid_level and will return that index. This is
+ * where new drives will be added. If the index to be returned is greater
+ * than the highest_lun index for the controller then highest_lun is set
+ * to this new index. If there are no available indexes then -1 is returned.
+*/
+static int cciss_find_free_drive_index(int ctlr)
{
+ int i;
+
+ for (i=0; i < CISS_MAX_LUN; i++){
+ if (hba[ctlr]->drv[i].raid_level == -1){
+ if (i > hba[ctlr]->highest_lun)
+ hba[ctlr]->highest_lun = i;
+ return i;
+ }
+ }
+ return -1;
+}
+
+/* This function will add and remove logical drives from the Logical
+ * drive array of the controller and maintain persistancy of ordering
+ * so that mount points are preserved until the next reboot. This allows
+ * for the removal of logical drives in the middle of the drive array
+ * without a re-ordering of those drives.
+ * INPUT
+ * h = The controller to perform the operations on
+ * del_disk = The disk to remove if specified. If the value given
+ * is NULL then no disk is removed.
+*/
+static int rebuild_lun_table(ctlr_info_t *h, struct gendisk *del_disk)
+{
+ int ctlr = h->ctlr;
+ int num_luns;
+ ReportLunData_struct *ld_buff = NULL;
+ drive_info_struct *drv = NULL;
+ int return_code;
+ int listlength = 0;
+ int i;
+ int drv_found;
+ int drv_index = 0;
+ __u32 lunid = 0;
unsigned long flags;
+
+ /* Set busy_configuring flag for this operation */
+ spin_lock_irqsave(CCISS_LOCK(h->ctlr), flags);
+ if (h->num_luns >= CISS_MAX_LUN){
+ spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
+ return -EINVAL;
+ }
+
+ if (h->busy_configuring){
+ spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
+ return -EBUSY;
+ }
+ h->busy_configuring = 1;
+
+ /* if del_disk is NULL then we are being called to add a new disk
+ * and update the logical drive table. If it is not NULL then
+ * we will check if the disk is in use or not.
+ */
+ if (del_disk != NULL){
+ drv = get_drv(del_disk);
+ drv->busy_configuring = 1;
+ spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
+ return_code = deregister_disk(del_disk, drv, 1);
+ drv->busy_configuring = 0;
+ h->busy_configuring = 0;
+ return return_code;
+ } else {
+ spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
+ if (!capable(CAP_SYS_RAWIO))
+ return -EPERM;
+
+ ld_buff = kzalloc(sizeof(ReportLunData_struct), GFP_KERNEL);
+ if (ld_buff == NULL)
+ goto mem_msg;
+
+ return_code = sendcmd_withirq(CISS_REPORT_LOG, ctlr, ld_buff,
+ sizeof(ReportLunData_struct), 0, 0, 0,
+ TYPE_CMD);
+
+ if (return_code == IO_OK){
+ listlength |= (0xff & (unsigned int)(ld_buff->LUNListLength[0])) << 24;
+ listlength |= (0xff & (unsigned int)(ld_buff->LUNListLength[1])) << 16;
+ listlength |= (0xff & (unsigned int)(ld_buff->LUNListLength[2])) << 8;
+ listlength |= 0xff & (unsigned int)(ld_buff->LUNListLength[3]);
+ } else{ /* reading number of logical volumes failed */
+ printk(KERN_WARNING "cciss: report logical volume"
+ " command failed\n");
+ listlength = 0;
+ goto freeret;
+ }
+
+ num_luns = listlength / 8; /* 8 bytes per entry */
+ if (num_luns > CISS_MAX_LUN){
+ num_luns = CISS_MAX_LUN;
+ printk(KERN_WARNING "cciss: more luns configured"
+ " on controller than can be handled by"
+ " this driver.\n");
+ }
+
+ /* Compare controller drive array to drivers drive array.
+ * Check for updates in the drive information and any new drives
+ * on the controller.
+ */
+ for (i=0; i < num_luns; i++){
+ int j;
+
+ drv_found = 0;
+
+ lunid = (0xff &
+ (unsigned int)(ld_buff->LUN[i][3])) << 24;
+ lunid |= (0xff &
+ (unsigned int)(ld_buff->LUN[i][2])) << 16;
+ lunid |= (0xff &
+ (unsigned int)(ld_buff->LUN[i][1])) << 8;
+ lunid |= 0xff &
+ (unsigned int)(ld_buff->LUN[i][0]);
+
+ /* Find if the LUN is already in the drive array
+ * of the controller. If so then update its info
+ * if not is use. If it does not exist then find
+ * the first free index and add it.
+ */
+ for (j=0; j <= h->highest_lun; j++){
+ if (h->drv[j].LunID == lunid){
+ drv_index = j;
+ drv_found = 1;
+ }
+ }
+
+ /* check if the drive was found already in the array */
+ if (!drv_found){
+ drv_index = cciss_find_free_drive_index(ctlr);
+ if (drv_index == -1)
+ goto freeret;
+
+ }
+ h->drv[drv_index].LunID = lunid;
+ cciss_update_drive_info(ctlr, drv_index);
+ } /* end for */
+ } /* end else */
+
+freeret:
+ kfree(ld_buff);
+ h->busy_configuring = 0;
+ /* We return -1 here to tell the ACU that we have registered/updated
+ * all of the drives that we can and to keep it from calling us
+ * additional times.
+ */
+ return -1;
+mem_msg:
+ printk(KERN_ERR "cciss: out of memory\n");
+ goto freeret;
+}
+
+/* This function will deregister the disk and it's queue from the
+ * kernel. It must be called with the controller lock held and the
+ * drv structures busy_configuring flag set. It's parameters are:
+ *
+ * disk = This is the disk to be deregistered
+ * drv = This is the drive_info_struct associated with the disk to be
+ * deregistered. It contains information about the disk used
+ * by the driver.
+ * clear_all = This flag determines whether or not the disk information
+ * is going to be completely cleared out and the highest_lun
+ * reset. Sometimes we want to clear out information about
+ * the disk in preperation for re-adding it. In this case
+ * the highest_lun should be left unchanged and the LunID
+ * should not be cleared.
+*/
+static int deregister_disk(struct gendisk *disk, drive_info_struct *drv,
+ int clear_all)
+{
ctlr_info_t *h = get_host(disk);
- drive_info_struct *drv = get_drv(disk);
- int ctlr = h->ctlr;
if (!capable(CAP_SYS_RAWIO))
return -EPERM;
- spin_lock_irqsave(CCISS_LOCK(ctlr), flags);
/* make sure logical volume is NOT is use */
- if( drv->usage_count > 1) {
- spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
+ if(clear_all || (h->gendisk[0] == disk)) {
+ if (drv->usage_count > 1)
return -EBUSY;
}
- drv->usage_count++;
- spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
+ else
+ if( drv->usage_count > 0 )
+ return -EBUSY;
- /* invalidate the devices and deregister the disk */
- if (disk->flags & GENHD_FL_UP)
+ /* invalidate the devices and deregister the disk. If it is disk
+ * zero do not deregister it but just zero out it's values. This
+ * allows us to delete disk zero but keep the controller registered.
+ */
+ if (h->gendisk[0] != disk){
+ if (disk->flags & GENHD_FL_UP){
+ blk_cleanup_queue(disk->queue);
del_gendisk(disk);
+ drv->queue = NULL;
+ }
+ }
+
+ --h->num_luns;
+ /* zero out the disk size info */
+ drv->nr_blocks = 0;
+ drv->block_size = 0;
+ drv->heads = 0;
+ drv->sectors = 0;
+ drv->cylinders = 0;
+ drv->raid_level = -1; /* This can be used as a flag variable to
+ * indicate that this element of the drive
+ * array is free.
+ */
+
+ if (clear_all){
/* check to see if it was the last disk */
if (drv == h->drv + h->highest_lun) {
/* if so, find the new hightest lun */
int i, newhighest =-1;
for(i=0; i<h->highest_lun; i++) {
/* if the disk has size > 0, it is available */
- if (h->drv[i].nr_blocks)
+ if (h->drv[i].heads)
newhighest = i;
}
h->highest_lun = newhighest;
-
}
- --h->num_luns;
- /* zero out the disk size info */
- drv->nr_blocks = 0;
- drv->block_size = 0;
- drv->cylinders = 0;
+
drv->LunID = 0;
+ }
return(0);
}
+
static int fill_cmd(CommandList_struct *c, __u8 cmd, int ctlr, void *buff,
size_t size,
unsigned int use_unit_num, /* 0: address the controller,
@@ -1420,8 +1727,10 @@ case CMD_HARDWARE_ERR:
}
}
/* unlock the buffers from DMA */
+ buff_dma_handle.val32.lower = c->SG[0].Addr.lower;
+ buff_dma_handle.val32.upper = c->SG[0].Addr.upper;
pci_unmap_single( h->pdev, (dma_addr_t) buff_dma_handle.val,
- size, PCI_DMA_BIDIRECTIONAL);
+ c->SG[0].Len, PCI_DMA_BIDIRECTIONAL);
cmd_free(h, c, 0);
return(return_status);
@@ -1495,164 +1804,6 @@ cciss_read_capacity(int ctlr, int logvol, ReadCapdata_struct *buf,
return;
}
-static int register_new_disk(ctlr_info_t *h)
-{
- struct gendisk *disk;
- int ctlr = h->ctlr;
- int i;
- int num_luns;
- int logvol;
- int new_lun_found = 0;
- int new_lun_index = 0;
- int free_index_found = 0;
- int free_index = 0;
- ReportLunData_struct *ld_buff = NULL;
- ReadCapdata_struct *size_buff = NULL;
- InquiryData_struct *inq_buff = NULL;
- int return_code;
- int listlength = 0;
- __u32 lunid = 0;
- unsigned int block_size;
- unsigned int total_size;
-
- if (!capable(CAP_SYS_RAWIO))
- return -EPERM;
- /* if we have no space in our disk array left to add anything */
- if( h->num_luns >= CISS_MAX_LUN)
- return -EINVAL;
-
- ld_buff = kmalloc(sizeof(ReportLunData_struct), GFP_KERNEL);
- if (ld_buff == NULL)
- goto mem_msg;
- memset(ld_buff, 0, sizeof(ReportLunData_struct));
- size_buff = kmalloc(sizeof( ReadCapdata_struct), GFP_KERNEL);
- if (size_buff == NULL)
- goto mem_msg;
- inq_buff = kmalloc(sizeof( InquiryData_struct), GFP_KERNEL);
- if (inq_buff == NULL)
- goto mem_msg;
-
- return_code = sendcmd_withirq(CISS_REPORT_LOG, ctlr, ld_buff,
- sizeof(ReportLunData_struct), 0, 0, 0, TYPE_CMD);
-
- if( return_code == IO_OK)
- {
-
- // printk("LUN Data\n--------------------------\n");
-
- listlength |= (0xff & (unsigned int)(ld_buff->LUNListLength[0])) << 24;
- listlength |= (0xff & (unsigned int)(ld_buff->LUNListLength[1])) << 16;
- listlength |= (0xff & (unsigned int)(ld_buff->LUNListLength[2])) << 8;
- listlength |= 0xff & (unsigned int)(ld_buff->LUNListLength[3]);
- } else /* reading number of logical volumes failed */
- {
- printk(KERN_WARNING "cciss: report logical volume"
- " command failed\n");
- listlength = 0;
- goto free_err;
- }
- num_luns = listlength / 8; // 8 bytes pre entry
- if (num_luns > CISS_MAX_LUN)
- {
- num_luns = CISS_MAX_LUN;
- }
-#ifdef CCISS_DEBUG
- printk(KERN_DEBUG "Length = %x %x %x %x = %d\n", ld_buff->LUNListLength[0],
- ld_buff->LUNListLength[1], ld_buff->LUNListLength[2],
- ld_buff->LUNListLength[3], num_luns);
-#endif
- for(i=0; i< num_luns; i++)
- {
- int j;
- int lunID_found = 0;
-
- lunid = (0xff & (unsigned int)(ld_buff->LUN[i][3])) << 24;
- lunid |= (0xff & (unsigned int)(ld_buff->LUN[i][2])) << 16;
- lunid |= (0xff & (unsigned int)(ld_buff->LUN[i][1])) << 8;
- lunid |= 0xff & (unsigned int)(ld_buff->LUN[i][0]);
-
- /* check to see if this is a new lun */
- for(j=0; j <= h->highest_lun; j++)
- {
-#ifdef CCISS_DEBUG
- printk("Checking %d %x against %x\n", j,h->drv[j].LunID,
- lunid);
-#endif /* CCISS_DEBUG */
- if (h->drv[j].LunID == lunid)
- {
- lunID_found = 1;
- break;
- }
-
- }
- if( lunID_found == 1)
- continue;
- else
- { /* It is the new lun we have been looking for */
-#ifdef CCISS_DEBUG
- printk("new lun found at %d\n", i);
-#endif /* CCISS_DEBUG */
- new_lun_index = i;
- new_lun_found = 1;
- break;
- }
- }
- if (!new_lun_found)
- {
- printk(KERN_WARNING "cciss: New Logical Volume not found\n");
- goto free_err;
- }
- /* Now find the free index */
- for(i=0; i <CISS_MAX_LUN; i++)
- {
-#ifdef CCISS_DEBUG
- printk("Checking Index %d\n", i);
-#endif /* CCISS_DEBUG */
- if(h->drv[i].LunID == 0)
- {
-#ifdef CCISS_DEBUG
- printk("free index found at %d\n", i);
-#endif /* CCISS_DEBUG */
- free_index_found = 1;
- free_index = i;
- break;
- }
- }
- if (!free_index_found)
- {
- printk(KERN_WARNING "cciss: unable to find free slot for disk\n");
- goto free_err;
- }
-
- logvol = free_index;
- h->drv[logvol].LunID = lunid;
- /* there could be gaps in lun numbers, track hightest */
- if(h->highest_lun < lunid)
- h->highest_lun = logvol;
- cciss_read_capacity(ctlr, logvol, size_buff, 1,
- &total_size, &block_size);
- cciss_geometry_inquiry(ctlr, logvol, 1, total_size, block_size,
- inq_buff, &h->drv[logvol]);
- h->drv[logvol].usage_count = 0;
- ++h->num_luns;
- /* setup partitions per disk */
- disk = h->gendisk[logvol];
- set_capacity(disk, h->drv[logvol].nr_blocks);
- /* if it's the controller it's already added */
- if(logvol)
- add_disk(disk);
-freeret:
- kfree(ld_buff);
- kfree(size_buff);
- kfree(inq_buff);
- return (logvol);
-mem_msg:
- printk(KERN_ERR "cciss: out of memory\n");
-free_err:
- logvol = -1;
- goto freeret;
-}
-
static int cciss_revalidate(struct gendisk *disk)
{
ctlr_info_t *h = get_host(disk);
@@ -1859,8 +2010,10 @@ resend_cmd1:
cleanup1:
/* unlock the data buffer from DMA */
+ buff_dma_handle.val32.lower = c->SG[0].Addr.lower;
+ buff_dma_handle.val32.upper = c->SG[0].Addr.upper;
pci_unmap_single(info_p->pdev, (dma_addr_t) buff_dma_handle.val,
- size, PCI_DMA_BIDIRECTIONAL);
+ c->SG[0].Len, PCI_DMA_BIDIRECTIONAL);
cmd_free(info_p, c, 1);
return (status);
}
@@ -2111,7 +2264,11 @@ queue:
/* fill in the request */
drv = creq->rq_disk->private_data;
c->Header.ReplyQueue = 0; // unused in simple mode
- c->Header.Tag.lower = c->busaddr; // use the physical address the cmd block for tag
+ /* got command from pool, so use the command block index instead */
+ /* for direct lookups. */
+ /* The first 2 bits are reserved for controller error reporting. */
+ c->Header.Tag.lower = (c->cmdindex << 3);
+ c->Header.Tag.lower |= 0x04; /* flag for direct lookup. */
c->Header.LUN.LogDev.VolId= drv->LunID;
c->Header.LUN.LogDev.Mode = 1;
c->Request.CDBLen = 10; // 12 byte commands not in FW yet;
@@ -2186,7 +2343,7 @@ static irqreturn_t do_cciss_intr(int irq, void *dev_id, struct pt_regs *regs)
ctlr_info_t *h = dev_id;
CommandList_struct *c;
unsigned long flags;
- __u32 a, a1;
+ __u32 a, a1, a2;
int j;
int start_queue = h->next_to_run;
@@ -2204,10 +2361,21 @@ static irqreturn_t do_cciss_intr(int irq, void *dev_id, struct pt_regs *regs)
while((a = h->access.command_completed(h)) != FIFO_EMPTY)
{
a1 = a;
+ if ((a & 0x04)) {
+ a2 = (a >> 3);
+ if (a2 >= NR_CMDS) {
+ printk(KERN_WARNING "cciss: controller cciss%d failed, stopping.\n", h->ctlr);
+ fail_all_cmds(h->ctlr);
+ return IRQ_HANDLED;
+ }
+
+ c = h->cmd_pool + a2;
+ a = c->busaddr;
+
+ } else {
a &= ~3;
- if ((c = h->cmpQ) == NULL)
- {
- printk(KERN_WARNING "cciss: Completion of %08lx ignored\n", (unsigned long)a1);
+ if ((c = h->cmpQ) == NULL) {
+ printk(KERN_WARNING "cciss: Completion of %08x ignored\n", a1);
continue;
}
while(c->busaddr != a) {
@@ -2215,6 +2383,7 @@ static irqreturn_t do_cciss_intr(int irq, void *dev_id, struct pt_regs *regs)
if (c == h->cmpQ)
break;
}
+ }
/*
* If we've found the command, take it off the
* completion Q and free it
@@ -2634,12 +2803,16 @@ static void cciss_getgeometry(int cntl_num)
#endif /* CCISS_DEBUG */
hba[cntl_num]->highest_lun = hba[cntl_num]->num_luns-1;
- for(i=0; i< hba[cntl_num]->num_luns; i++)
+// for(i=0; i< hba[cntl_num]->num_luns; i++)
+ for(i=0; i < CISS_MAX_LUN; i++)
{
-
- lunid = (0xff & (unsigned int)(ld_buff->LUN[i][3])) << 24;
- lunid |= (0xff & (unsigned int)(ld_buff->LUN[i][2])) << 16;
- lunid |= (0xff & (unsigned int)(ld_buff->LUN[i][1])) << 8;
+ if (i < hba[cntl_num]->num_luns){
+ lunid = (0xff & (unsigned int)(ld_buff->LUN[i][3]))
+ << 24;
+ lunid |= (0xff & (unsigned int)(ld_buff->LUN[i][2]))
+ << 16;
+ lunid |= (0xff & (unsigned int)(ld_buff->LUN[i][1]))
+ << 8;
lunid |= 0xff & (unsigned int)(ld_buff->LUN[i][0]);
hba[cntl_num]->drv[i].LunID = lunid;
@@ -2647,13 +2820,18 @@ static void cciss_getgeometry(int cntl_num)
#ifdef CCISS_DEBUG
printk(KERN_DEBUG "LUN[%d]: %x %x %x %x = %x\n", i,
- ld_buff->LUN[i][0], ld_buff->LUN[i][1],ld_buff->LUN[i][2],
- ld_buff->LUN[i][3], hba[cntl_num]->drv[i].LunID);
+ ld_buff->LUN[i][0], ld_buff->LUN[i][1],
+ ld_buff->LUN[i][2], ld_buff->LUN[i][3],
+ hba[cntl_num]->drv[i].LunID);
#endif /* CCISS_DEBUG */
cciss_read_capacity(cntl_num, i, size_buff, 0,
&total_size, &block_size);
- cciss_geometry_inquiry(cntl_num, i, 0, total_size, block_size,
- inq_buff, &hba[cntl_num]->drv[i]);
+ cciss_geometry_inquiry(cntl_num, i, 0, total_size,
+ block_size, inq_buff, &hba[cntl_num]->drv[i]);
+ } else {
+ /* initialize raid_level to indicate a free space */
+ hba[cntl_num]->drv[i].raid_level = -1;
+ }
}
kfree(ld_buff);
kfree(size_buff);
@@ -2727,6 +2905,9 @@ static int __devinit cciss_init_one(struct pci_dev *pdev,
i = alloc_cciss_hba();
if(i < 0)
return (-1);
+
+ hba[i]->busy_initializing = 1;
+
if (cciss_pci_init(hba[i], pdev) != 0)
goto clean1;
@@ -2807,6 +2988,7 @@ static int __devinit cciss_init_one(struct pci_dev *pdev,
hba[i]->access.set_intr_mask(hba[i], CCISS_INTR_ON);
cciss_procinit(i);
+ hba[i]->busy_initializing = 0;
for(j=0; j < NWD; j++) { /* mfm */
drive_info_struct *drv = &(hba[i]->drv[j]);
@@ -2869,6 +3051,7 @@ clean2:
clean1:
release_io_mem(hba[i]);
free_hba(i);
+ hba[i]->busy_initializing = 0;
return(-1);
}
@@ -2913,9 +3096,10 @@ static void __devexit cciss_remove_one (struct pci_dev *pdev)
/* remove it from the disk list */
for (j = 0; j < NWD; j++) {
struct gendisk *disk = hba[i]->gendisk[j];
- if (disk->flags & GENHD_FL_UP)
- blk_cleanup_queue(disk->queue);
+ if (disk->flags & GENHD_FL_UP) {
del_gendisk(disk);
+ blk_cleanup_queue(disk->queue);
+ }
}
pci_free_consistent(hba[i]->pdev, NR_CMDS * sizeof(CommandList_struct),
@@ -2964,5 +3148,43 @@ static void __exit cciss_cleanup(void)
remove_proc_entry("cciss", proc_root_driver);
}
+static void fail_all_cmds(unsigned long ctlr)
+{
+ /* If we get here, the board is apparently dead. */
+ ctlr_info_t *h = hba[ctlr];
+ CommandList_struct *c;
+ unsigned long flags;
+
+ printk(KERN_WARNING "cciss%d: controller not responding.\n", h->ctlr);
+ h->alive = 0; /* the controller apparently died... */
+
+ spin_lock_irqsave(CCISS_LOCK(ctlr), flags);
+
+ pci_disable_device(h->pdev); /* Make sure it is really dead. */
+
+ /* move everything off the request queue onto the completed queue */
+ while( (c = h->reqQ) != NULL ) {
+ removeQ(&(h->reqQ), c);
+ h->Qdepth--;
+ addQ (&(h->cmpQ), c);
+ }
+
+ /* Now, fail everything on the completed queue with a HW error */
+ while( (c = h->cmpQ) != NULL ) {
+ removeQ(&h->cmpQ, c);
+ c->err_info->CommandStatus = CMD_HARDWARE_ERR;
+ if (c->cmd_type == CMD_RWREQ) {
+ complete_command(h, c, 0);
+ } else if (c->cmd_type == CMD_IOCTL_PEND)
+ complete(c->waiting);
+#ifdef CONFIG_CISS_SCSI_TAPE
+ else if (c->cmd_type == CMD_SCSI)
+ complete_scsi_command(c, 0, 0);
+#endif
+ }
+ spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
+ return;
+}
+
module_init(cciss_init);
module_exit(cciss_cleanup);
diff --git a/drivers/block/cciss.h b/drivers/block/cciss.h
index 566587d0a50..ef277baee9f 100644
--- a/drivers/block/cciss.h
+++ b/drivers/block/cciss.h
@@ -35,7 +35,13 @@ typedef struct _drive_info_struct
int heads;
int sectors;
int cylinders;
- int raid_level;
+ int raid_level; /* set to -1 to indicate that
+ * the drive is not in use/configured
+ */
+ int busy_configuring; /*This is set when the drive is being removed
+ *to prevent it from being opened or it's queue
+ *from being started.
+ */
} drive_info_struct;
struct ctlr_info
@@ -83,6 +89,7 @@ struct ctlr_info
int nr_allocs;
int nr_frees;
int busy_configuring;
+ int busy_initializing;
/* This element holds the zero based queue number of the last
* queue to be started. It is used for fairness.
@@ -94,6 +101,7 @@ struct ctlr_info
#ifdef CONFIG_CISS_SCSI_TAPE
void *scsi_ctlr; /* ptr to structure containing scsi related stuff */
#endif
+ unsigned char alive;
};
/* Defining the diffent access_menthods */
diff --git a/drivers/block/cciss_cmd.h b/drivers/block/cciss_cmd.h
index a88a8881762..53fea549ba8 100644
--- a/drivers/block/cciss_cmd.h
+++ b/drivers/block/cciss_cmd.h
@@ -226,6 +226,10 @@ typedef struct _ErrorInfo_struct {
#define CMD_MSG_DONE 0x04
#define CMD_MSG_TIMEOUT 0x05
+/* This structure needs to be divisible by 8 for new
+ * indexing method.
+ */
+#define PADSIZE (sizeof(long) - 4)
typedef struct _CommandList_struct {
CommandListHeader_struct Header;
RequestBlock_struct Request;
@@ -236,14 +240,14 @@ typedef struct _CommandList_struct {
ErrorInfo_struct * err_info; /* pointer to the allocated mem */
int ctlr;
int cmd_type;
+ long cmdindex;
struct _CommandList_struct *prev;
struct _CommandList_struct *next;
struct request * rq;
struct completion *waiting;
int retry_count;
-#ifdef CONFIG_CISS_SCSI_TAPE
void * scsi_cmd;
-#endif
+ char pad[PADSIZE];
} CommandList_struct;
//Configuration Table Structure
diff --git a/drivers/block/cciss_scsi.c b/drivers/block/cciss_scsi.c
index f16e3caed58..e183a3ef783 100644
--- a/drivers/block/cciss_scsi.c
+++ b/drivers/block/cciss_scsi.c
@@ -93,6 +93,7 @@ struct cciss_scsi_cmd_stack_elem_t {
CommandList_struct cmd;
ErrorInfo_struct Err;
__u32 busaddr;
+ __u32 pad;
};
#pragma pack()
@@ -877,7 +878,7 @@ cciss_scsi_interpret_error(CommandList_struct *cp)
static int
cciss_scsi_do_inquiry(ctlr_info_t *c, unsigned char *scsi3addr,
- InquiryData_struct *buf)
+ unsigned char *buf, unsigned char bufsize)
{
int rc;
CommandList_struct *cp;
@@ -900,11 +901,10 @@ cciss_scsi_do_inquiry(ctlr_info_t *c, unsigned char *scsi3addr,
cdb[1] = 0;
cdb[2] = 0;
cdb[3] = 0;
- cdb[4] = sizeof(*buf) & 0xff;
+ cdb[4] = bufsize;
cdb[5] = 0;
rc = cciss_scsi_do_simple_cmd(c, cp, scsi3addr, cdb,
- 6, (unsigned char *) buf,
- sizeof(*buf), XFER_READ);
+ 6, buf, bufsize, XFER_READ);
if (rc != 0) return rc; /* something went wrong */
@@ -1000,9 +1000,10 @@ cciss_update_non_disk_devices(int cntl_num, int hostno)
that though.
*/
-
+#define OBDR_TAPE_INQ_SIZE 49
+#define OBDR_TAPE_SIG "$DR-10"
ReportLunData_struct *ld_buff;
- InquiryData_struct *inq_buff;
+ unsigned char *inq_buff;
unsigned char scsi3addr[8];
ctlr_info_t *c;
__u32 num_luns=0;
@@ -1020,7 +1021,7 @@ cciss_update_non_disk_devices(int cntl_num, int hostno)
return;
}
memset(ld_buff, 0, reportlunsize);
- inq_buff = kmalloc(sizeof( InquiryData_struct), GFP_KERNEL);
+ inq_buff = kmalloc(OBDR_TAPE_INQ_SIZE, GFP_KERNEL);
if (inq_buff == NULL) {
printk(KERN_ERR "cciss: out of memory\n");
kfree(ld_buff);
@@ -1051,19 +1052,36 @@ cciss_update_non_disk_devices(int cntl_num, int hostno)
/* for each physical lun, do an inquiry */
if (ld_buff->LUN[i][3] & 0xC0) continue;
- memset(inq_buff, 0, sizeof(InquiryData_struct));
+ memset(inq_buff, 0, OBDR_TAPE_INQ_SIZE);
memcpy(&scsi3addr[0], &ld_buff->LUN[i][0], 8);
- if (cciss_scsi_do_inquiry(hba[cntl_num],
- scsi3addr, inq_buff) != 0)
- {
+ if (cciss_scsi_do_inquiry(hba[cntl_num], scsi3addr, inq_buff,
+ (unsigned char) OBDR_TAPE_INQ_SIZE) != 0) {
/* Inquiry failed (msg printed already) */
devtype = 0; /* so we will skip this device. */
} else /* what kind of device is this? */
- devtype = (inq_buff->data_byte[0] & 0x1f);
+ devtype = (inq_buff[0] & 0x1f);
switch (devtype)
{
+ case 0x05: /* CD-ROM */ {
+
+ /* We don't *really* support actual CD-ROM devices,
+ * just this "One Button Disaster Recovery" tape drive
+ * which temporarily pretends to be a CD-ROM drive.
+ * So we check that the device is really an OBDR tape
+ * device by checking for "$DR-10" in bytes 43-48 of
+ * the inquiry data.
+ */
+ char obdr_sig[7];
+
+ strncpy(obdr_sig, &inq_buff[43], 6);
+ obdr_sig[6] = '\0';
+ if (strncmp(obdr_sig, OBDR_TAPE_SIG, 6) != 0)
+ /* Not OBDR device, ignore it. */
+ break;
+ }
+ /* fall through . . . */
case 0x01: /* sequential access, (tape) */
case 0x08: /* medium changer */
if (ncurrent >= CCISS_MAX_SCSI_DEVS_PER_HBA) {
@@ -1126,6 +1144,7 @@ cciss_scsi_proc_info(struct Scsi_Host *sh,
int buflen, datalen;
ctlr_info_t *ci;
+ int i;
int cntl_num;
@@ -1136,8 +1155,28 @@ cciss_scsi_proc_info(struct Scsi_Host *sh,
cntl_num = ci->ctlr; /* Get our index into the hba[] array */
if (func == 0) { /* User is reading from /proc/scsi/ciss*?/?* */
- buflen = sprintf(buffer, "hostnum=%d\n", sh->host_no);
-
+ buflen = sprintf(buffer, "cciss%d: SCSI host: %d\n",
+ cntl_num, sh->host_no);
+
+ /* this information is needed by apps to know which cciss
+ device corresponds to which scsi host number without
+ having to open a scsi target device node. The device
+ information is not a duplicate of /proc/scsi/scsi because
+ the two may be out of sync due to scsi hotplug, rather
+ this info is for an app to be able to use to know how to
+ get them back in sync. */
+
+ for (i=0;i<ccissscsi[cntl_num].ndevices;i++) {
+ struct cciss_scsi_dev_t *sd = &ccissscsi[cntl_num].dev[i];
+ buflen += sprintf(&buffer[buflen], "c%db%dt%dl%d %02d "
+ "0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
+ sh->host_no, sd->bus, sd->target, sd->lun,
+ sd->devtype,
+ sd->scsi3addr[0], sd->scsi3addr[1],
+ sd->scsi3addr[2], sd->scsi3addr[3],
+ sd->scsi3addr[4], sd->scsi3addr[5],
+ sd->scsi3addr[6], sd->scsi3addr[7]);
+ }
datalen = buflen - offset;
if (datalen < 0) { /* they're reading past EOF. */
datalen = 0;
@@ -1399,7 +1438,7 @@ cciss_proc_tape_report(int ctlr, unsigned char *buffer, off_t *pos, off_t *len)
CPQ_TAPE_LOCK(ctlr, flags);
size = sprintf(buffer + *len,
- " Sequential access devices: %d\n\n",
+ "Sequential access devices: %d\n\n",
ccissscsi[ctlr].ndevices);
CPQ_TAPE_UNLOCK(ctlr, flags);
*pos += size; *len += size;
diff --git a/drivers/block/ll_rw_blk.c b/drivers/block/ll_rw_blk.c
index 483d71b10cf..baedac52294 100644
--- a/drivers/block/ll_rw_blk.c
+++ b/drivers/block/ll_rw_blk.c
@@ -2373,44 +2373,6 @@ int blkdev_issue_flush(struct block_device *bdev, sector_t *error_sector)
EXPORT_SYMBOL(blkdev_issue_flush);
-/**
- * blkdev_scsi_issue_flush_fn - issue flush for SCSI devices
- * @q: device queue
- * @disk: gendisk
- * @error_sector: error offset
- *
- * Description:
- * Devices understanding the SCSI command set, can use this function as
- * a helper for issuing a cache flush. Note: driver is required to store
- * the error offset (in case of error flushing) in ->sector of struct
- * request.
- */
-int blkdev_scsi_issue_flush_fn(request_queue_t *q, struct gendisk *disk,
- sector_t *error_sector)
-{
- struct request *rq = blk_get_request(q, WRITE, __GFP_WAIT);
- int ret;
-
- rq->flags |= REQ_BLOCK_PC | REQ_SOFTBARRIER;
- rq->sector = 0;
- memset(rq->cmd, 0, sizeof(rq->cmd));
- rq->cmd[0] = 0x35;
- rq->cmd_len = 12;
- rq->data = NULL;
- rq->data_len = 0;
- rq->timeout = 60 * HZ;
-
- ret = blk_execute_rq(q, disk, rq, 0);
-
- if (ret && error_sector)
- *error_sector = rq->sector;
-
- blk_put_request(rq);
- return ret;
-}
-
-EXPORT_SYMBOL(blkdev_scsi_issue_flush_fn);
-
static void drive_stat_acct(struct request *rq, int nr_sectors, int new_io)
{
int rw = rq_data_dir(rq);
diff --git a/drivers/block/paride/pf.c b/drivers/block/paride/pf.c
index 711d2f314ac..94af920465b 100644
--- a/drivers/block/paride/pf.c
+++ b/drivers/block/paride/pf.c
@@ -750,6 +750,14 @@ static int pf_ready(void)
static struct request_queue *pf_queue;
+static void pf_end_request(int uptodate)
+{
+ if (pf_req) {
+ end_request(pf_req, uptodate);
+ pf_req = NULL;
+ }
+}
+
static void do_pf_request(request_queue_t * q)
{
if (pf_busy)
@@ -765,7 +773,7 @@ repeat:
pf_count = pf_req->current_nr_sectors;
if (pf_block + pf_count > get_capacity(pf_req->rq_disk)) {
- end_request(pf_req, 0);
+ pf_end_request(0);
goto repeat;
}
@@ -780,7 +788,7 @@ repeat:
pi_do_claimed(pf_current->pi, do_pf_write);
else {
pf_busy = 0;
- end_request(pf_req, 0);
+ pf_end_request(0);
goto repeat;
}
}
@@ -798,9 +806,11 @@ static int pf_next_buf(void)
if (!pf_count)
return 1;
spin_lock_irqsave(&pf_spin_lock, saved_flags);
- end_request(pf_req, 1);
- pf_count = pf_req->current_nr_sectors;
- pf_buf = pf_req->buffer;
+ pf_end_request(1);
+ if (pf_req) {
+ pf_count = pf_req->current_nr_sectors;
+ pf_buf = pf_req->buffer;
+ }
spin_unlock_irqrestore(&pf_spin_lock, saved_flags);
return 1;
}
@@ -810,7 +820,7 @@ static inline void next_request(int success)
unsigned long saved_flags;
spin_lock_irqsave(&pf_spin_lock, saved_flags);
- end_request(pf_req, success);
+ pf_end_request(success);
pf_busy = 0;
do_pf_request(pf_queue);
spin_unlock_irqrestore(&pf_spin_lock, saved_flags);
diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c
index 7b838342f0a..7e22a58926b 100644
--- a/drivers/block/pktcdvd.c
+++ b/drivers/block/pktcdvd.c
@@ -5,29 +5,41 @@
* May be copied or modified under the terms of the GNU General Public
* License. See linux/COPYING for more information.
*
- * Packet writing layer for ATAPI and SCSI CD-R, CD-RW, DVD-R, and
- * DVD-RW devices (aka an exercise in block layer masturbation)
+ * Packet writing layer for ATAPI and SCSI CD-RW, DVD+RW, DVD-RW and
+ * DVD-RAM devices.
*
+ * Theory of operation:
*
- * TODO: (circa order of when I will fix it)
- * - Only able to write on CD-RW media right now.
- * - check host application code on media and set it in write page
- * - interface for UDF <-> packet to negotiate a new location when a write
- * fails.
- * - handle OPC, especially for -RW media
+ * At the lowest level, there is the standard driver for the CD/DVD device,
+ * typically ide-cd.c or sr.c. This driver can handle read and write requests,
+ * but it doesn't know anything about the special restrictions that apply to
+ * packet writing. One restriction is that write requests must be aligned to
+ * packet boundaries on the physical media, and the size of a write request
+ * must be equal to the packet size. Another restriction is that a
+ * GPCMD_FLUSH_CACHE command has to be issued to the drive before a read
+ * command, if the previous command was a write.
*
- * Theory of operation:
+ * The purpose of the packet writing driver is to hide these restrictions from
+ * higher layers, such as file systems, and present a block device that can be
+ * randomly read and written using 2kB-sized blocks.
+ *
+ * The lowest layer in the packet writing driver is the packet I/O scheduler.
+ * Its data is defined by the struct packet_iosched and includes two bio
+ * queues with pending read and write requests. These queues are processed
+ * by the pkt_iosched_process_queue() function. The write requests in this
+ * queue are already properly aligned and sized. This layer is responsible for
+ * issuing the flush cache commands and scheduling the I/O in a good order.
*
- * We use a custom make_request_fn function that forwards reads directly to
- * the underlying CD device. Write requests are either attached directly to
- * a live packet_data object, or simply stored sequentially in a list for
- * later processing by the kcdrwd kernel thread. This driver doesn't use
- * any elevator functionally as defined by the elevator_s struct, but the
- * underlying CD device uses a standard elevator.
+ * The next layer transforms unaligned write requests to aligned writes. This
+ * transformation requires reading missing pieces of data from the underlying
+ * block device, assembling the pieces to full packets and queuing them to the
+ * packet I/O scheduler.
*
- * This strategy makes it possible to do very late merging of IO requests.
- * A new bio sent to pkt_make_request can be merged with a live packet_data
- * object even if the object is in the data gathering state.
+ * At the top layer there is a custom make_request_fn function that forwards
+ * read requests directly to the iosched queue and puts write requests in the
+ * unaligned write queue. A kernel thread performs the necessary read
+ * gathering to convert the unaligned writes to aligned writes and then feeds
+ * them to the packet I/O scheduler.
*
*************************************************************************/
@@ -100,10 +112,9 @@ static struct bio *pkt_bio_alloc(int nr_iovecs)
goto no_bio;
bio_init(bio);
- bvl = kmalloc(nr_iovecs * sizeof(struct bio_vec), GFP_KERNEL);
+ bvl = kcalloc(nr_iovecs, sizeof(struct bio_vec), GFP_KERNEL);
if (!bvl)
goto no_bvl;
- memset(bvl, 0, nr_iovecs * sizeof(struct bio_vec));
bio->bi_max_vecs = nr_iovecs;
bio->bi_io_vec = bvl;
@@ -125,10 +136,9 @@ static struct packet_data *pkt_alloc_packet_data(void)
int i;
struct packet_data *pkt;
- pkt = kmalloc(sizeof(struct packet_data), GFP_KERNEL);
+ pkt = kzalloc(sizeof(struct packet_data), GFP_KERNEL);
if (!pkt)
goto no_pkt;
- memset(pkt, 0, sizeof(struct packet_data));
pkt->w_bio = pkt_bio_alloc(PACKET_MAX_SIZE);
if (!pkt->w_bio)
@@ -659,7 +669,6 @@ static void pkt_make_local_copy(struct packet_data *pkt, struct page **pages, in
}
offs += CD_FRAMESIZE;
if (offs >= PAGE_SIZE) {
- BUG_ON(offs > PAGE_SIZE);
offs = 0;
p++;
}
@@ -724,12 +733,6 @@ static void pkt_gather_data(struct pktcdvd_device *pd, struct packet_data *pkt)
atomic_set(&pkt->io_wait, 0);
atomic_set(&pkt->io_errors, 0);
- if (pkt->cache_valid) {
- VPRINTK("pkt_gather_data: zone %llx cached\n",
- (unsigned long long)pkt->sector);
- goto out_account;
- }
-
/*
* Figure out which frames we need to read before we can write.
*/
@@ -738,6 +741,7 @@ static void pkt_gather_data(struct pktcdvd_device *pd, struct packet_data *pkt)
for (bio = pkt->orig_bios; bio; bio = bio->bi_next) {
int first_frame = (bio->bi_sector - pkt->sector) / (CD_FRAMESIZE >> 9);
int num_frames = bio->bi_size / CD_FRAMESIZE;
+ pd->stats.secs_w += num_frames * (CD_FRAMESIZE >> 9);
BUG_ON(first_frame < 0);
BUG_ON(first_frame + num_frames > pkt->frames);
for (f = first_frame; f < first_frame + num_frames; f++)
@@ -745,6 +749,12 @@ static void pkt_gather_data(struct pktcdvd_device *pd, struct packet_data *pkt)
}
spin_unlock(&pkt->lock);
+ if (pkt->cache_valid) {
+ VPRINTK("pkt_gather_data: zone %llx cached\n",
+ (unsigned long long)pkt->sector);
+ goto out_account;
+ }
+
/*
* Schedule reads for missing parts of the packet.
*/
@@ -778,7 +788,6 @@ out_account:
frames_read, (unsigned long long)pkt->sector);
pd->stats.pkt_started++;
pd->stats.secs_rg += frames_read * (CD_FRAMESIZE >> 9);
- pd->stats.secs_w += pd->settings.size;
}
/*
@@ -794,10 +803,11 @@ static struct packet_data *pkt_get_packet_data(struct pktcdvd_device *pd, int zo
list_del_init(&pkt->list);
if (pkt->sector != zone)
pkt->cache_valid = 0;
- break;
+ return pkt;
}
}
- return pkt;
+ BUG();
+ return NULL;
}
static void pkt_put_packet_data(struct pktcdvd_device *pd, struct packet_data *pkt)
@@ -941,12 +951,10 @@ try_next_bio:
}
pkt = pkt_get_packet_data(pd, zone);
- BUG_ON(!pkt);
pd->current_sector = zone + pd->settings.size;
pkt->sector = zone;
pkt->frames = pd->settings.size >> 2;
- BUG_ON(pkt->frames > PACKET_MAX_SIZE);
pkt->write_size = 0;
/*
@@ -1636,6 +1644,10 @@ static int pkt_probe_settings(struct pktcdvd_device *pd)
printk("pktcdvd: detected zero packet size!\n");
pd->settings.size = 128;
}
+ if (pd->settings.size > PACKET_MAX_SECTORS) {
+ printk("pktcdvd: packet size is too big\n");
+ return -ENXIO;
+ }
pd->settings.fp = ti.fp;
pd->offset = (be32_to_cpu(ti.track_start) << 2) & (pd->settings.size - 1);
@@ -2198,7 +2210,6 @@ static int pkt_make_request(request_queue_t *q, struct bio *bio)
* No matching packet found. Store the bio in the work queue.
*/
node = mempool_alloc(pd->rb_pool, GFP_NOIO);
- BUG_ON(!node);
node->bio = bio;
spin_lock(&pd->lock);
BUG_ON(pd->bio_queue_size < 0);
@@ -2406,7 +2417,6 @@ static int pkt_ioctl(struct inode *inode, struct file *file, unsigned int cmd, u
struct pktcdvd_device *pd = inode->i_bdev->bd_disk->private_data;
VPRINTK("pkt_ioctl: cmd %x, dev %d:%d\n", cmd, imajor(inode), iminor(inode));
- BUG_ON(!pd);
switch (cmd) {
/*
@@ -2477,10 +2487,9 @@ static int pkt_setup_dev(struct pkt_ctrl_command *ctrl_cmd)
return -EBUSY;
}
- pd = kmalloc(sizeof(struct pktcdvd_device), GFP_KERNEL);
+ pd = kzalloc(sizeof(struct pktcdvd_device), GFP_KERNEL);
if (!pd)
return ret;
- memset(pd, 0, sizeof(struct pktcdvd_device));
pd->rb_pool = mempool_create(PKT_RB_POOL_SIZE, pkt_rb_alloc, pkt_rb_free, NULL);
if (!pd->rb_pool)
diff --git a/drivers/block/scsi_ioctl.c b/drivers/block/scsi_ioctl.c
index 856c2278e9d..079ec344eb4 100644
--- a/drivers/block/scsi_ioctl.c
+++ b/drivers/block/scsi_ioctl.c
@@ -168,6 +168,7 @@ static int verify_command(struct file *file, unsigned char *cmd)
safe_for_write(WRITE_VERIFY_12),
safe_for_write(WRITE_16),
safe_for_write(WRITE_LONG),
+ safe_for_write(WRITE_LONG_2),
safe_for_write(ERASE),
safe_for_write(GPCMD_MODE_SELECT_10),
safe_for_write(MODE_SELECT),
diff --git a/drivers/block/ub.c b/drivers/block/ub.c
index aa0bf7ee008..ed4d5006fe6 100644
--- a/drivers/block/ub.c
+++ b/drivers/block/ub.c
@@ -172,7 +172,7 @@ struct bulk_cs_wrap {
*/
struct ub_dev;
-#define UB_MAX_REQ_SG 4
+#define UB_MAX_REQ_SG 9 /* cdrecord requires 32KB and maybe a header */
#define UB_MAX_SECTORS 64
/*
@@ -387,7 +387,7 @@ struct ub_dev {
struct bulk_cs_wrap work_bcs;
struct usb_ctrlrequest work_cr;
- int sg_stat[UB_MAX_REQ_SG+1];
+ int sg_stat[6];
struct ub_scsi_trace tr;
};
@@ -525,12 +525,13 @@ static ssize_t ub_diag_show(struct device *dev, struct device_attribute *attr,
"qlen %d qmax %d\n",
sc->cmd_queue.qlen, sc->cmd_queue.qmax);
cnt += sprintf(page + cnt,
- "sg %d %d %d %d %d\n",
+ "sg %d %d %d %d %d .. %d\n",
sc->sg_stat[0],
sc->sg_stat[1],
sc->sg_stat[2],
sc->sg_stat[3],
- sc->sg_stat[4]);
+ sc->sg_stat[4],
+ sc->sg_stat[5]);
list_for_each (p, &sc->luns) {
lun = list_entry(p, struct ub_lun, link);
@@ -835,7 +836,7 @@ static int ub_cmd_build_block(struct ub_dev *sc, struct ub_lun *lun,
return -1;
}
cmd->nsg = n_elem;
- sc->sg_stat[n_elem]++;
+ sc->sg_stat[n_elem < 5 ? n_elem : 5]++;
/*
* build the command
@@ -891,7 +892,7 @@ static int ub_cmd_build_packet(struct ub_dev *sc, struct ub_lun *lun,
return -1;
}
cmd->nsg = n_elem;
- sc->sg_stat[n_elem]++;
+ sc->sg_stat[n_elem < 5 ? n_elem : 5]++;
memcpy(&cmd->cdb, rq->cmd, rq->cmd_len);
cmd->cdb_len = rq->cmd_len;
@@ -1010,7 +1011,6 @@ static int ub_scsi_cmd_start(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
sc->last_pipe = sc->send_bulk_pipe;
usb_fill_bulk_urb(&sc->work_urb, sc->dev, sc->send_bulk_pipe,
bcb, US_BULK_CB_WRAP_LEN, ub_urb_complete, sc);
- sc->work_urb.transfer_flags = 0;
/* Fill what we shouldn't be filling, because usb-storage did so. */
sc->work_urb.actual_length = 0;
@@ -1019,7 +1019,6 @@ static int ub_scsi_cmd_start(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
if ((rc = usb_submit_urb(&sc->work_urb, GFP_ATOMIC)) != 0) {
/* XXX Clear stalls */
- printk("ub: cmd #%d start failed (%d)\n", cmd->tag, rc); /* P3 */
ub_complete(&sc->work_done);
return rc;
}
@@ -1190,11 +1189,9 @@ static void ub_scsi_urb_compl(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
return;
}
if (urb->status != 0) {
- printk("ub: cmd #%d cmd status (%d)\n", cmd->tag, urb->status); /* P3 */
goto Bad_End;
}
if (urb->actual_length != US_BULK_CB_WRAP_LEN) {
- printk("ub: cmd #%d xferred %d\n", cmd->tag, urb->actual_length); /* P3 */
/* XXX Must do reset here to unconfuse the device */
goto Bad_End;
}
@@ -1395,14 +1392,12 @@ static void ub_data_start(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
usb_fill_bulk_urb(&sc->work_urb, sc->dev, pipe,
page_address(sg->page) + sg->offset, sg->length,
ub_urb_complete, sc);
- sc->work_urb.transfer_flags = 0;
sc->work_urb.actual_length = 0;
sc->work_urb.error_count = 0;
sc->work_urb.status = 0;
if ((rc = usb_submit_urb(&sc->work_urb, GFP_ATOMIC)) != 0) {
/* XXX Clear stalls */
- printk("ub: data #%d submit failed (%d)\n", cmd->tag, rc); /* P3 */
ub_complete(&sc->work_done);
ub_state_done(sc, cmd, rc);
return;
@@ -1442,7 +1437,6 @@ static int __ub_state_stat(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
sc->last_pipe = sc->recv_bulk_pipe;
usb_fill_bulk_urb(&sc->work_urb, sc->dev, sc->recv_bulk_pipe,
&sc->work_bcs, US_BULK_CS_WRAP_LEN, ub_urb_complete, sc);
- sc->work_urb.transfer_flags = 0;
sc->work_urb.actual_length = 0;
sc->work_urb.error_count = 0;
sc->work_urb.status = 0;
@@ -1563,7 +1557,6 @@ static int ub_submit_clear_stall(struct ub_dev *sc, struct ub_scsi_cmd *cmd,
usb_fill_control_urb(&sc->work_urb, sc->dev, sc->send_ctrl_pipe,
(unsigned char*) cr, NULL, 0, ub_urb_complete, sc);
- sc->work_urb.transfer_flags = 0;
sc->work_urb.actual_length = 0;
sc->work_urb.error_count = 0;
sc->work_urb.status = 0;
@@ -2000,17 +1993,16 @@ static int ub_sync_getmaxlun(struct ub_dev *sc)
usb_fill_control_urb(&sc->work_urb, sc->dev, sc->recv_ctrl_pipe,
(unsigned char*) cr, p, 1, ub_probe_urb_complete, &compl);
- sc->work_urb.transfer_flags = 0;
sc->work_urb.actual_length = 0;
sc->work_urb.error_count = 0;
sc->work_urb.status = 0;
if ((rc = usb_submit_urb(&sc->work_urb, GFP_KERNEL)) != 0) {
if (rc == -EPIPE) {
- printk("%s: Stall at GetMaxLUN, using 1 LUN\n",
+ printk("%s: Stall submitting GetMaxLUN, using 1 LUN\n",
sc->name); /* P3 */
} else {
- printk(KERN_WARNING
+ printk(KERN_NOTICE
"%s: Unable to submit GetMaxLUN (%d)\n",
sc->name, rc);
}
@@ -2028,6 +2020,18 @@ static int ub_sync_getmaxlun(struct ub_dev *sc)
del_timer_sync(&timer);
usb_kill_urb(&sc->work_urb);
+ if ((rc = sc->work_urb.status) < 0) {
+ if (rc == -EPIPE) {
+ printk("%s: Stall at GetMaxLUN, using 1 LUN\n",
+ sc->name); /* P3 */
+ } else {
+ printk(KERN_NOTICE
+ "%s: Error at GetMaxLUN (%d)\n",
+ sc->name, rc);
+ }
+ goto err_io;
+ }
+
if (sc->work_urb.actual_length != 1) {
printk("%s: GetMaxLUN returned %d bytes\n", sc->name,
sc->work_urb.actual_length); /* P3 */
@@ -2048,6 +2052,7 @@ static int ub_sync_getmaxlun(struct ub_dev *sc)
kfree(p);
return nluns;
+err_io:
err_submit:
kfree(p);
err_alloc:
@@ -2080,7 +2085,6 @@ static int ub_probe_clear_stall(struct ub_dev *sc, int stalled_pipe)
usb_fill_control_urb(&sc->work_urb, sc->dev, sc->send_ctrl_pipe,
(unsigned char*) cr, NULL, 0, ub_probe_urb_complete, &compl);
- sc->work_urb.transfer_flags = 0;
sc->work_urb.actual_length = 0;
sc->work_urb.error_count = 0;
sc->work_urb.status = 0;
@@ -2213,8 +2217,10 @@ static int ub_probe(struct usb_interface *intf,
* This is needed to clear toggles. It is a problem only if we do
* `rmmod ub && modprobe ub` without disconnects, but we like that.
*/
+#if 0 /* iPod Mini fails if we do this (big white iPod works) */
ub_probe_clear_stall(sc, sc->recv_bulk_pipe);
ub_probe_clear_stall(sc, sc->send_bulk_pipe);
+#endif
/*
* The way this is used by the startup code is a little specific.
@@ -2241,10 +2247,10 @@ static int ub_probe(struct usb_interface *intf,
for (i = 0; i < 3; i++) {
if ((rc = ub_sync_getmaxlun(sc)) < 0) {
/*
- * Some devices (i.e. Iomega Zip100) need this --
- * apparently the bulk pipes get STALLed when the
- * GetMaxLUN request is processed.
- * XXX I have a ZIP-100, verify it does this.
+ * This segment is taken from usb-storage. They say
+ * that ZIP-100 needs this, but my own ZIP-100 works
+ * fine without this.
+ * Still, it does not seem to hurt anything.
*/
if (rc == -EPIPE) {
ub_probe_clear_stall(sc, sc->recv_bulk_pipe);
@@ -2313,7 +2319,7 @@ static int ub_probe_lun(struct ub_dev *sc, int lnum)
disk->first_minor = lun->id * UB_MINORS_PER_MAJOR;
disk->fops = &ub_bd_fops;
disk->private_data = lun;
- disk->driverfs_dev = &sc->intf->dev; /* XXX Many to one ok? */
+ disk->driverfs_dev = &sc->intf->dev;
rc = -ENOMEM;
if ((q = blk_init_queue(ub_request_fn, &sc->lock)) == NULL)
@@ -2466,9 +2472,6 @@ static int __init ub_init(void)
{
int rc;
- /* P3 */ printk("ub: sizeof ub_scsi_cmd %zu ub_dev %zu ub_lun %zu\n",
- sizeof(struct ub_scsi_cmd), sizeof(struct ub_dev), sizeof(struct ub_lun));
-
if ((rc = register_blkdev(UB_MAJOR, DRV_NAME)) != 0)
goto err_regblkdev;
devfs_mk_dir(DEVFS_NAME);
diff --git a/drivers/bluetooth/hci_usb.c b/drivers/bluetooth/hci_usb.c
index 67d96b5cbb9..57c48bbf6fe 100644
--- a/drivers/bluetooth/hci_usb.c
+++ b/drivers/bluetooth/hci_usb.c
@@ -65,13 +65,15 @@
#endif
static int ignore = 0;
+static int ignore_csr = 0;
+static int ignore_sniffer = 0;
static int reset = 0;
#ifdef CONFIG_BT_HCIUSB_SCO
static int isoc = 2;
#endif
-#define VERSION "2.8"
+#define VERSION "2.9"
static struct usb_driver hci_usb_driver;
@@ -98,6 +100,9 @@ static struct usb_device_id bluetooth_ids[] = {
MODULE_DEVICE_TABLE (usb, bluetooth_ids);
static struct usb_device_id blacklist_ids[] = {
+ /* CSR BlueCore devices */
+ { USB_DEVICE(0x0a12, 0x0001), .driver_info = HCI_CSR },
+
/* Broadcom BCM2033 without firmware */
{ USB_DEVICE(0x0a5c, 0x2033), .driver_info = HCI_IGNORE },
@@ -836,6 +841,12 @@ static int hci_usb_probe(struct usb_interface *intf, const struct usb_device_id
if (ignore || id->driver_info & HCI_IGNORE)
return -ENODEV;
+ if (ignore_csr && id->driver_info & HCI_CSR)
+ return -ENODEV;
+
+ if (ignore_sniffer && id->driver_info & HCI_SNIFFER)
+ return -ENODEV;
+
if (intf->cur_altsetting->desc.bInterfaceNumber > 0)
return -ENODEV;
@@ -1061,6 +1072,12 @@ module_exit(hci_usb_exit);
module_param(ignore, bool, 0644);
MODULE_PARM_DESC(ignore, "Ignore devices from the matching table");
+module_param(ignore_csr, bool, 0644);
+MODULE_PARM_DESC(ignore_csr, "Ignore devices with id 0a12:0001");
+
+module_param(ignore_sniffer, bool, 0644);
+MODULE_PARM_DESC(ignore_sniffer, "Ignore devices with id 0a12:0002");
+
module_param(reset, bool, 0644);
MODULE_PARM_DESC(reset, "Send HCI reset command on initialization");
diff --git a/drivers/bluetooth/hci_usb.h b/drivers/bluetooth/hci_usb.h
index 29936b43d4f..37100a6ea1a 100644
--- a/drivers/bluetooth/hci_usb.h
+++ b/drivers/bluetooth/hci_usb.h
@@ -31,9 +31,10 @@
#define HCI_IGNORE 0x01
#define HCI_RESET 0x02
#define HCI_DIGIANSWER 0x04
-#define HCI_SNIFFER 0x08
-#define HCI_BROKEN_ISOC 0x10
+#define HCI_CSR 0x08
+#define HCI_SNIFFER 0x10
#define HCI_BCM92035 0x20
+#define HCI_BROKEN_ISOC 0x40
#define HCI_MAX_IFACE_NUM 3
diff --git a/drivers/char/agp/hp-agp.c b/drivers/char/agp/hp-agp.c
index 99762b6c19a..de5d6d21267 100644
--- a/drivers/char/agp/hp-agp.c
+++ b/drivers/char/agp/hp-agp.c
@@ -252,7 +252,7 @@ hp_zx1_configure (void)
readl(hp->ioc_regs+HP_ZX1_PDIR_BASE);
writel(hp->io_tlb_ps, hp->ioc_regs+HP_ZX1_TCNFG);
readl(hp->ioc_regs+HP_ZX1_TCNFG);
- writel(~(HP_ZX1_IOVA_SIZE-1), hp->ioc_regs+HP_ZX1_IMASK);
+ writel((unsigned int)(~(HP_ZX1_IOVA_SIZE-1)), hp->ioc_regs+HP_ZX1_IMASK);
readl(hp->ioc_regs+HP_ZX1_IMASK);
writel(hp->iova_base|1, hp->ioc_regs+HP_ZX1_IBASE);
readl(hp->ioc_regs+HP_ZX1_IBASE);
diff --git a/drivers/char/amiserial.c b/drivers/char/amiserial.c
index 2a36561eec6..a124f8c5d06 100644
--- a/drivers/char/amiserial.c
+++ b/drivers/char/amiserial.c
@@ -2053,10 +2053,6 @@ static int __init rs_init(void)
state->icount.rx = state->icount.tx = 0;
state->icount.frame = state->icount.parity = 0;
state->icount.overrun = state->icount.brk = 0;
- /*
- if(state->port && check_region(state->port,REGION_LENGTH(state)))
- continue;
- */
printk(KERN_INFO "ttyS%d is the amiga builtin serial port\n",
state->line);
diff --git a/drivers/char/drm/drm_drv.c b/drivers/char/drm/drm_drv.c
index 6ba48f346fc..041bb47b5c3 100644
--- a/drivers/char/drm/drm_drv.c
+++ b/drivers/char/drm/drm_drv.c
@@ -376,7 +376,7 @@ static int __init drm_core_init(void)
goto err_p2;
}
- drm_proc_root = create_proc_entry("dri", S_IFDIR, NULL);
+ drm_proc_root = proc_mkdir("dri", NULL);
if (!drm_proc_root) {
DRM_ERROR("Cannot create /proc/dri\n");
ret = -1;
diff --git a/drivers/char/drm/drm_proc.c b/drivers/char/drm/drm_proc.c
index 32d2bb99462..97796100248 100644
--- a/drivers/char/drm/drm_proc.c
+++ b/drivers/char/drm/drm_proc.c
@@ -95,7 +95,7 @@ int drm_proc_init(drm_device_t *dev, int minor,
char name[64];
sprintf(name, "%d", minor);
- *dev_root = create_proc_entry(name, S_IFDIR, root);
+ *dev_root = proc_mkdir(name, root);
if (!*dev_root) {
DRM_ERROR("Cannot create /proc/dri/%s\n", name);
return -1;
diff --git a/drivers/char/epca.c b/drivers/char/epca.c
index 58d3738a2b7..407708a001e 100644
--- a/drivers/char/epca.c
+++ b/drivers/char/epca.c
@@ -534,7 +534,7 @@ static void shutdown(struct channel *ch)
unsigned long flags;
struct tty_struct *tty;
- struct board_chan *bc;
+ struct board_chan __iomem *bc;
if (!(ch->asyncflags & ASYNC_INITIALIZED))
return;
@@ -618,7 +618,7 @@ static int pc_write(struct tty_struct * tty,
struct channel *ch;
unsigned long flags;
int remain;
- struct board_chan *bc;
+ struct board_chan __iomem *bc;
/* ----------------------------------------------------------------
pc_write is primarily called directly by the kernel routine
@@ -685,7 +685,7 @@ static int pc_write(struct tty_struct * tty,
------------------------------------------------------------------- */
dataLen = min(bytesAvailable, dataLen);
- memcpy(ch->txptr + head, buf, dataLen);
+ memcpy_toio(ch->txptr + head, buf, dataLen);
buf += dataLen;
head += dataLen;
amountCopied += dataLen;
@@ -726,7 +726,7 @@ static int pc_write_room(struct tty_struct *tty)
struct channel *ch;
unsigned long flags;
unsigned int head, tail;
- struct board_chan *bc;
+ struct board_chan __iomem *bc;
remain = 0;
@@ -773,7 +773,7 @@ static int pc_chars_in_buffer(struct tty_struct *tty)
int remain;
unsigned long flags;
struct channel *ch;
- struct board_chan *bc;
+ struct board_chan __iomem *bc;
/* ---------------------------------------------------------
verifyChannel returns the channel from the tty struct
@@ -830,7 +830,7 @@ static void pc_flush_buffer(struct tty_struct *tty)
unsigned int tail;
unsigned long flags;
struct channel *ch;
- struct board_chan *bc;
+ struct board_chan __iomem *bc;
/* ---------------------------------------------------------
verifyChannel returns the channel from the tty struct
if it is valid. This serves as a sanity check.
@@ -976,7 +976,7 @@ static int pc_open(struct tty_struct *tty, struct file * filp)
struct channel *ch;
unsigned long flags;
int line, retval, boardnum;
- struct board_chan *bc;
+ struct board_chan __iomem *bc;
unsigned int head;
line = tty->index;
@@ -1041,7 +1041,7 @@ static int pc_open(struct tty_struct *tty, struct file * filp)
ch->statusflags = 0;
/* Save boards current modem status */
- ch->imodem = bc->mstat;
+ ch->imodem = readb(&bc->mstat);
/* ----------------------------------------------------------------
Set receive head and tail ptrs to each other. This indicates
@@ -1399,10 +1399,10 @@ static void post_fep_init(unsigned int crd)
{ /* Begin post_fep_init */
int i;
- unsigned char *memaddr;
- struct global_data *gd;
+ void __iomem *memaddr;
+ struct global_data __iomem *gd;
struct board_info *bd;
- struct board_chan *bc;
+ struct board_chan __iomem *bc;
struct channel *ch;
int shrinkmem = 0, lowwater ;
@@ -1461,7 +1461,7 @@ static void post_fep_init(unsigned int crd)
8 and 64 of these structures.
-------------------------------------------------------------------- */
- bc = (struct board_chan *)(memaddr + CHANSTRUCT);
+ bc = memaddr + CHANSTRUCT;
/* -------------------------------------------------------------------
The below assignment will set gd to point at the BEGINING of
@@ -1470,7 +1470,7 @@ static void post_fep_init(unsigned int crd)
pointer begins at 0xd10.
---------------------------------------------------------------------- */
- gd = (struct global_data *)(memaddr + GLOBAL);
+ gd = memaddr + GLOBAL;
/* --------------------------------------------------------------------
XEPORTS (address 0xc22) points at the number of channels the
@@ -1493,6 +1493,7 @@ static void post_fep_init(unsigned int crd)
for (i = 0; i < bd->numports; i++, ch++, bc++) { /* Begin for each port */
unsigned long flags;
+ u16 tseg, rseg;
ch->brdchan = bc;
ch->mailbox = gd;
@@ -1553,50 +1554,53 @@ static void post_fep_init(unsigned int crd)
shrinkmem = 0;
}
+ tseg = readw(&bc->tseg);
+ rseg = readw(&bc->rseg);
+
switch (bd->type) {
case PCIXEM:
case PCIXRJ:
case PCIXR:
/* Cover all the 2MEG cards */
- ch->txptr = memaddr + (((bc->tseg) << 4) & 0x1fffff);
- ch->rxptr = memaddr + (((bc->rseg) << 4) & 0x1fffff);
- ch->txwin = FEPWIN | ((bc->tseg) >> 11);
- ch->rxwin = FEPWIN | ((bc->rseg) >> 11);
+ ch->txptr = memaddr + ((tseg << 4) & 0x1fffff);
+ ch->rxptr = memaddr + ((rseg << 4) & 0x1fffff);
+ ch->txwin = FEPWIN | (tseg >> 11);
+ ch->rxwin = FEPWIN | (rseg >> 11);
break;
case PCXEM:
case EISAXEM:
/* Cover all the 32K windowed cards */
/* Mask equal to window size - 1 */
- ch->txptr = memaddr + (((bc->tseg) << 4) & 0x7fff);
- ch->rxptr = memaddr + (((bc->rseg) << 4) & 0x7fff);
- ch->txwin = FEPWIN | ((bc->tseg) >> 11);
- ch->rxwin = FEPWIN | ((bc->rseg) >> 11);
+ ch->txptr = memaddr + ((tseg << 4) & 0x7fff);
+ ch->rxptr = memaddr + ((rseg << 4) & 0x7fff);
+ ch->txwin = FEPWIN | (tseg >> 11);
+ ch->rxwin = FEPWIN | (rseg >> 11);
break;
case PCXEVE:
case PCXE:
- ch->txptr = memaddr + (((bc->tseg - bd->memory_seg) << 4) & 0x1fff);
- ch->txwin = FEPWIN | ((bc->tseg - bd->memory_seg) >> 9);
- ch->rxptr = memaddr + (((bc->rseg - bd->memory_seg) << 4) & 0x1fff);
- ch->rxwin = FEPWIN | ((bc->rseg - bd->memory_seg) >>9 );
+ ch->txptr = memaddr + (((tseg - bd->memory_seg) << 4) & 0x1fff);
+ ch->txwin = FEPWIN | ((tseg - bd->memory_seg) >> 9);
+ ch->rxptr = memaddr + (((rseg - bd->memory_seg) << 4) & 0x1fff);
+ ch->rxwin = FEPWIN | ((rseg - bd->memory_seg) >>9 );
break;
case PCXI:
case PC64XE:
- ch->txptr = memaddr + ((bc->tseg - bd->memory_seg) << 4);
- ch->rxptr = memaddr + ((bc->rseg - bd->memory_seg) << 4);
+ ch->txptr = memaddr + ((tseg - bd->memory_seg) << 4);
+ ch->rxptr = memaddr + ((rseg - bd->memory_seg) << 4);
ch->txwin = ch->rxwin = 0;
break;
} /* End switch bd->type */
ch->txbufhead = 0;
- ch->txbufsize = bc->tmax + 1;
+ ch->txbufsize = readw(&bc->tmax) + 1;
ch->rxbufhead = 0;
- ch->rxbufsize = bc->rmax + 1;
+ ch->rxbufsize = readw(&bc->rmax) + 1;
lowwater = ch->txbufsize >= 2000 ? 1024 : (ch->txbufsize / 2);
@@ -1718,11 +1722,11 @@ static void epcapoll(unsigned long ignored)
static void doevent(int crd)
{ /* Begin doevent */
- void *eventbuf;
+ void __iomem *eventbuf;
struct channel *ch, *chan0;
static struct tty_struct *tty;
struct board_info *bd;
- struct board_chan *bc;
+ struct board_chan __iomem *bc;
unsigned int tail, head;
int event, channel;
int mstat, lstat;
@@ -1817,7 +1821,7 @@ static void doevent(int crd)
static void fepcmd(struct channel *ch, int cmd, int word_or_byte,
int byte2, int ncmds, int bytecmd)
{ /* Begin fepcmd */
- unchar *memaddr;
+ unchar __iomem *memaddr;
unsigned int head, cmdTail, cmdStart, cmdMax;
long count;
int n;
@@ -2000,7 +2004,7 @@ static void epcaparam(struct tty_struct *tty, struct channel *ch)
unsigned int cmdHead;
struct termios *ts;
- struct board_chan *bc;
+ struct board_chan __iomem *bc;
unsigned mval, hflow, cflag, iflag;
bc = ch->brdchan;
@@ -2010,7 +2014,7 @@ static void epcaparam(struct tty_struct *tty, struct channel *ch)
ts = tty->termios;
if ((ts->c_cflag & CBAUD) == 0) { /* Begin CBAUD detected */
cmdHead = readw(&bc->rin);
- bc->rout = cmdHead;
+ writew(cmdHead, &bc->rout);
cmdHead = readw(&bc->tin);
/* Changing baud in mid-stream transmission can be wonderful */
/* ---------------------------------------------------------------
@@ -2116,7 +2120,7 @@ static void receive_data(struct channel *ch)
unchar *rptr;
struct termios *ts = NULL;
struct tty_struct *tty;
- struct board_chan *bc;
+ struct board_chan __iomem *bc;
int dataToRead, wrapgap, bytesAvailable;
unsigned int tail, head;
unsigned int wrapmask;
@@ -2154,7 +2158,7 @@ static void receive_data(struct channel *ch)
--------------------------------------------------------------------- */
if (!tty || !ts || !(ts->c_cflag & CREAD)) {
- bc->rout = head;
+ writew(head, &bc->rout);
return;
}
@@ -2270,7 +2274,7 @@ static int info_ioctl(struct tty_struct *tty, struct file * file,
static int pc_tiocmget(struct tty_struct *tty, struct file *file)
{
struct channel *ch = (struct channel *) tty->driver_data;
- struct board_chan *bc;
+ struct board_chan __iomem *bc;
unsigned int mstat, mflag = 0;
unsigned long flags;
@@ -2351,7 +2355,7 @@ static int pc_ioctl(struct tty_struct *tty, struct file * file,
unsigned long flags;
unsigned int mflag, mstat;
unsigned char startc, stopc;
- struct board_chan *bc;
+ struct board_chan __iomem *bc;
struct channel *ch = (struct channel *) tty->driver_data;
void __user *argp = (void __user *)arg;
@@ -2633,7 +2637,7 @@ static void pc_start(struct tty_struct *tty)
spin_lock_irqsave(&epca_lock, flags);
/* Just in case output was resumed because of a change in Digi-flow */
if (ch->statusflags & TXSTOPPED) { /* Begin transmit resume requested */
- struct board_chan *bc;
+ struct board_chan __iomem *bc;
globalwinon(ch);
bc = ch->brdchan;
if (ch->statusflags & LOWWAIT)
@@ -2727,7 +2731,7 @@ void digi_send_break(struct channel *ch, int msec)
static void setup_empty_event(struct tty_struct *tty, struct channel *ch)
{ /* Begin setup_empty_event */
- struct board_chan *bc = ch->brdchan;
+ struct board_chan __iomem *bc = ch->brdchan;
globalwinon(ch);
ch->statusflags |= EMPTYWAIT;
diff --git a/drivers/char/epca.h b/drivers/char/epca.h
index 20eeb5a70e1..456d6c8f94a 100644
--- a/drivers/char/epca.h
+++ b/drivers/char/epca.h
@@ -128,17 +128,17 @@ struct channel
unsigned long c_cflag;
unsigned long c_lflag;
unsigned long c_oflag;
- unsigned char *txptr;
- unsigned char *rxptr;
+ unsigned char __iomem *txptr;
+ unsigned char __iomem *rxptr;
unsigned char *tmp_buf;
struct board_info *board;
- struct board_chan *brdchan;
+ struct board_chan __iomem *brdchan;
struct digi_struct digiext;
struct tty_struct *tty;
wait_queue_head_t open_wait;
wait_queue_head_t close_wait;
struct work_struct tqueue;
- struct global_data *mailbox;
+ struct global_data __iomem *mailbox;
};
struct board_info
@@ -149,8 +149,8 @@ struct board_info
unsigned short numports;
unsigned long port;
unsigned long membase;
- unsigned char __iomem *re_map_port;
- unsigned char *re_map_membase;
+ void __iomem *re_map_port;
+ void __iomem *re_map_membase;
unsigned long memory_seg;
void ( * memwinon ) (struct board_info *, unsigned int) ;
void ( * memwinoff ) (struct board_info *, unsigned int) ;
diff --git a/drivers/char/hpet.c b/drivers/char/hpet.c
index de0379b6d50..c055bb630ff 100644
--- a/drivers/char/hpet.c
+++ b/drivers/char/hpet.c
@@ -273,7 +273,6 @@ static int hpet_mmap(struct file *file, struct vm_area_struct *vma)
vma->vm_flags |= VM_IO;
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
- addr = __pa(addr);
if (io_remap_pfn_range(vma, vma->vm_start, addr >> PAGE_SHIFT,
PAGE_SIZE, vma->vm_page_prot)) {
diff --git a/drivers/char/hvc_console.c b/drivers/char/hvc_console.c
index cddb789902d..f9217763467 100644
--- a/drivers/char/hvc_console.c
+++ b/drivers/char/hvc_console.c
@@ -839,9 +839,6 @@ int __init hvc_init(void)
hvc_driver->flags = TTY_DRIVER_REAL_RAW;
tty_set_operations(hvc_driver, &hvc_ops);
- if (tty_register_driver(hvc_driver))
- panic("Couldn't register hvc console driver\n");
-
/* Always start the kthread because there can be hotplug vty adapters
* added later. */
hvc_task = kthread_run(khvcd, NULL, "khvcd");
@@ -851,6 +848,9 @@ int __init hvc_init(void)
return -EIO;
}
+ if (tty_register_driver(hvc_driver))
+ panic("Couldn't register hvc console driver\n");
+
return 0;
}
module_init(hvc_init);
diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
index 463351d4f94..32fa82c78c7 100644
--- a/drivers/char/ipmi/ipmi_msghandler.c
+++ b/drivers/char/ipmi/ipmi_msghandler.c
@@ -2620,7 +2620,7 @@ void ipmi_smi_msg_received(ipmi_smi_t intf,
spin_lock_irqsave(&(intf->waiting_msgs_lock), flags);
if (!list_empty(&(intf->waiting_msgs))) {
list_add_tail(&(msg->link), &(intf->waiting_msgs));
- spin_unlock(&(intf->waiting_msgs_lock));
+ spin_unlock_irqrestore(&(intf->waiting_msgs_lock), flags);
goto out_unlock;
}
spin_unlock_irqrestore(&(intf->waiting_msgs_lock), flags);
@@ -2629,9 +2629,9 @@ void ipmi_smi_msg_received(ipmi_smi_t intf,
if (rv > 0) {
/* Could not handle the message now, just add it to a
list to handle later. */
- spin_lock(&(intf->waiting_msgs_lock));
+ spin_lock_irqsave(&(intf->waiting_msgs_lock), flags);
list_add_tail(&(msg->link), &(intf->waiting_msgs));
- spin_unlock(&(intf->waiting_msgs_lock));
+ spin_unlock_irqrestore(&(intf->waiting_msgs_lock), flags);
} else if (rv == 0) {
ipmi_free_smi_msg(msg);
}
diff --git a/drivers/char/ipmi/ipmi_poweroff.c b/drivers/char/ipmi/ipmi_poweroff.c
index e82a96ba396..f66947722e1 100644
--- a/drivers/char/ipmi/ipmi_poweroff.c
+++ b/drivers/char/ipmi/ipmi_poweroff.c
@@ -55,7 +55,7 @@ extern void (*pm_power_off)(void);
static int poweroff_powercycle;
/* parameter definition to allow user to flag power cycle */
-module_param(poweroff_powercycle, int, 0);
+module_param(poweroff_powercycle, int, 0644);
MODULE_PARM_DESC(poweroff_powercycles, " Set to non-zero to enable power cycle instead of power down. Power cycle is contingent on hardware support, otherwise it defaults back to power down.");
/* Stuff from the get device id command. */
diff --git a/drivers/char/n_r3964.c b/drivers/char/n_r3964.c
index 2291a87e8ad..97d6dc24b80 100644
--- a/drivers/char/n_r3964.c
+++ b/drivers/char/n_r3964.c
@@ -229,8 +229,8 @@ static int __init r3964_init(void)
TRACE_L("line discipline %d registered", N_R3964);
TRACE_L("flags=%x num=%x", tty_ldisc_N_R3964.flags,
tty_ldisc_N_R3964.num);
- TRACE_L("open=%x", (int)tty_ldisc_N_R3964.open);
- TRACE_L("tty_ldisc_N_R3964 = %x", (int)&tty_ldisc_N_R3964);
+ TRACE_L("open=%p", tty_ldisc_N_R3964.open);
+ TRACE_L("tty_ldisc_N_R3964 = %p", &tty_ldisc_N_R3964);
}
else
{
@@ -267,8 +267,8 @@ static void add_tx_queue(struct r3964_info *pInfo, struct r3964_block_header *pH
spin_unlock_irqrestore(&pInfo->lock, flags);
- TRACE_Q("add_tx_queue %x, length %d, tx_first = %x",
- (int)pHeader, pHeader->length, (int)pInfo->tx_first );
+ TRACE_Q("add_tx_queue %p, length %d, tx_first = %p",
+ pHeader, pHeader->length, pInfo->tx_first );
}
static void remove_from_tx_queue(struct r3964_info *pInfo, int error_code)
@@ -285,10 +285,10 @@ static void remove_from_tx_queue(struct r3964_info *pInfo, int error_code)
return;
#ifdef DEBUG_QUEUE
- printk("r3964: remove_from_tx_queue: %x, length %d - ",
- (int)pHeader, (int)pHeader->length );
+ printk("r3964: remove_from_tx_queue: %p, length %u - ",
+ pHeader, pHeader->length );
for(pDump=pHeader;pDump;pDump=pDump->next)
- printk("%x ", (int)pDump);
+ printk("%p ", pDump);
printk("\n");
#endif
@@ -319,10 +319,10 @@ static void remove_from_tx_queue(struct r3964_info *pInfo, int error_code)
spin_unlock_irqrestore(&pInfo->lock, flags);
kfree(pHeader);
- TRACE_M("remove_from_tx_queue - kfree %x",(int)pHeader);
+ TRACE_M("remove_from_tx_queue - kfree %p",pHeader);
- TRACE_Q("remove_from_tx_queue: tx_first = %x, tx_last = %x",
- (int)pInfo->tx_first, (int)pInfo->tx_last );
+ TRACE_Q("remove_from_tx_queue: tx_first = %p, tx_last = %p",
+ pInfo->tx_first, pInfo->tx_last );
}
static void add_rx_queue(struct r3964_info *pInfo, struct r3964_block_header *pHeader)
@@ -346,9 +346,9 @@ static void add_rx_queue(struct r3964_info *pInfo, struct r3964_block_header *pH
spin_unlock_irqrestore(&pInfo->lock, flags);
- TRACE_Q("add_rx_queue: %x, length = %d, rx_first = %x, count = %d",
- (int)pHeader, pHeader->length,
- (int)pInfo->rx_first, pInfo->blocks_in_rx_queue);
+ TRACE_Q("add_rx_queue: %p, length = %d, rx_first = %p, count = %d",
+ pHeader, pHeader->length,
+ pInfo->rx_first, pInfo->blocks_in_rx_queue);
}
static void remove_from_rx_queue(struct r3964_info *pInfo,
@@ -360,10 +360,10 @@ static void remove_from_rx_queue(struct r3964_info *pInfo,
if(pHeader==NULL)
return;
- TRACE_Q("remove_from_rx_queue: rx_first = %x, rx_last = %x, count = %d",
- (int)pInfo->rx_first, (int)pInfo->rx_last, pInfo->blocks_in_rx_queue );
- TRACE_Q("remove_from_rx_queue: %x, length %d",
- (int)pHeader, (int)pHeader->length );
+ TRACE_Q("remove_from_rx_queue: rx_first = %p, rx_last = %p, count = %d",
+ pInfo->rx_first, pInfo->rx_last, pInfo->blocks_in_rx_queue );
+ TRACE_Q("remove_from_rx_queue: %p, length %u",
+ pHeader, pHeader->length );
spin_lock_irqsave(&pInfo->lock, flags);
@@ -401,10 +401,10 @@ static void remove_from_rx_queue(struct r3964_info *pInfo,
spin_unlock_irqrestore(&pInfo->lock, flags);
kfree(pHeader);
- TRACE_M("remove_from_rx_queue - kfree %x",(int)pHeader);
+ TRACE_M("remove_from_rx_queue - kfree %p",pHeader);
- TRACE_Q("remove_from_rx_queue: rx_first = %x, rx_last = %x, count = %d",
- (int)pInfo->rx_first, (int)pInfo->rx_last, pInfo->blocks_in_rx_queue );
+ TRACE_Q("remove_from_rx_queue: rx_first = %p, rx_last = %p, count = %d",
+ pInfo->rx_first, pInfo->rx_last, pInfo->blocks_in_rx_queue );
}
static void put_char(struct r3964_info *pInfo, unsigned char ch)
@@ -506,8 +506,8 @@ static void transmit_block(struct r3964_info *pInfo)
if(tty->driver->write_room)
room=tty->driver->write_room(tty);
- TRACE_PS("transmit_block %x, room %d, length %d",
- (int)pBlock, room, pBlock->length);
+ TRACE_PS("transmit_block %p, room %d, length %d",
+ pBlock, room, pBlock->length);
while(pInfo->tx_position < pBlock->length)
{
@@ -588,7 +588,7 @@ static void on_receive_block(struct r3964_info *pInfo)
/* prepare struct r3964_block_header: */
pBlock = kmalloc(length+sizeof(struct r3964_block_header), GFP_KERNEL);
- TRACE_M("on_receive_block - kmalloc %x",(int)pBlock);
+ TRACE_M("on_receive_block - kmalloc %p",pBlock);
if(pBlock==NULL)
return;
@@ -868,11 +868,11 @@ static int enable_signals(struct r3964_info *pInfo, pid_t pid, int arg)
if(pMsg)
{
kfree(pMsg);
- TRACE_M("enable_signals - msg kfree %x",(int)pMsg);
+ TRACE_M("enable_signals - msg kfree %p",pMsg);
}
}
kfree(pClient);
- TRACE_M("enable_signals - kfree %x",(int)pClient);
+ TRACE_M("enable_signals - kfree %p",pClient);
return 0;
}
}
@@ -890,7 +890,7 @@ static int enable_signals(struct r3964_info *pInfo, pid_t pid, int arg)
{
/* add client to client list */
pClient=kmalloc(sizeof(struct r3964_client_info), GFP_KERNEL);
- TRACE_M("enable_signals - kmalloc %x",(int)pClient);
+ TRACE_M("enable_signals - kmalloc %p",pClient);
if(pClient==NULL)
return -ENOMEM;
@@ -954,7 +954,7 @@ static void add_msg(struct r3964_client_info *pClient, int msg_id, int arg,
queue_the_message:
pMsg = kmalloc(sizeof(struct r3964_message), GFP_KERNEL);
- TRACE_M("add_msg - kmalloc %x",(int)pMsg);
+ TRACE_M("add_msg - kmalloc %p",pMsg);
if(pMsg==NULL) {
return;
}
@@ -1067,11 +1067,11 @@ static int r3964_open(struct tty_struct *tty)
struct r3964_info *pInfo;
TRACE_L("open");
- TRACE_L("tty=%x, PID=%d, disc_data=%x",
- (int)tty, current->pid, (int)tty->disc_data);
+ TRACE_L("tty=%p, PID=%d, disc_data=%p",
+ tty, current->pid, tty->disc_data);
pInfo=kmalloc(sizeof(struct r3964_info), GFP_KERNEL);
- TRACE_M("r3964_open - info kmalloc %x",(int)pInfo);
+ TRACE_M("r3964_open - info kmalloc %p",pInfo);
if(!pInfo)
{
@@ -1080,26 +1080,26 @@ static int r3964_open(struct tty_struct *tty)
}
pInfo->rx_buf = kmalloc(RX_BUF_SIZE, GFP_KERNEL);
- TRACE_M("r3964_open - rx_buf kmalloc %x",(int)pInfo->rx_buf);
+ TRACE_M("r3964_open - rx_buf kmalloc %p",pInfo->rx_buf);
if(!pInfo->rx_buf)
{
printk(KERN_ERR "r3964: failed to alloc receive buffer\n");
kfree(pInfo);
- TRACE_M("r3964_open - info kfree %x",(int)pInfo);
+ TRACE_M("r3964_open - info kfree %p",pInfo);
return -ENOMEM;
}
pInfo->tx_buf = kmalloc(TX_BUF_SIZE, GFP_KERNEL);
- TRACE_M("r3964_open - tx_buf kmalloc %x",(int)pInfo->tx_buf);
+ TRACE_M("r3964_open - tx_buf kmalloc %p",pInfo->tx_buf);
if(!pInfo->tx_buf)
{
printk(KERN_ERR "r3964: failed to alloc transmit buffer\n");
kfree(pInfo->rx_buf);
- TRACE_M("r3964_open - rx_buf kfree %x",(int)pInfo->rx_buf);
+ TRACE_M("r3964_open - rx_buf kfree %p",pInfo->rx_buf);
kfree(pInfo);
- TRACE_M("r3964_open - info kfree %x",(int)pInfo);
+ TRACE_M("r3964_open - info kfree %p",pInfo);
return -ENOMEM;
}
@@ -1154,11 +1154,11 @@ static void r3964_close(struct tty_struct *tty)
if(pMsg)
{
kfree(pMsg);
- TRACE_M("r3964_close - msg kfree %x",(int)pMsg);
+ TRACE_M("r3964_close - msg kfree %p",pMsg);
}
}
kfree(pClient);
- TRACE_M("r3964_close - client kfree %x",(int)pClient);
+ TRACE_M("r3964_close - client kfree %p",pClient);
pClient=pNext;
}
/* Remove jobs from tx_queue: */
@@ -1177,11 +1177,11 @@ static void r3964_close(struct tty_struct *tty)
/* Free buffers: */
wake_up_interruptible(&pInfo->read_wait);
kfree(pInfo->rx_buf);
- TRACE_M("r3964_close - rx_buf kfree %x",(int)pInfo->rx_buf);
+ TRACE_M("r3964_close - rx_buf kfree %p",pInfo->rx_buf);
kfree(pInfo->tx_buf);
- TRACE_M("r3964_close - tx_buf kfree %x",(int)pInfo->tx_buf);
+ TRACE_M("r3964_close - tx_buf kfree %p",pInfo->tx_buf);
kfree(pInfo);
- TRACE_M("r3964_close - info kfree %x",(int)pInfo);
+ TRACE_M("r3964_close - info kfree %p",pInfo);
}
static ssize_t r3964_read(struct tty_struct *tty, struct file *file,
@@ -1234,7 +1234,7 @@ repeat:
count = sizeof(struct r3964_client_message);
kfree(pMsg);
- TRACE_M("r3964_read - msg kfree %x",(int)pMsg);
+ TRACE_M("r3964_read - msg kfree %p",pMsg);
if (copy_to_user(buf,&theMsg, count))
return -EFAULT;
@@ -1279,7 +1279,7 @@ static ssize_t r3964_write(struct tty_struct * tty, struct file * file,
* Allocate a buffer for the data and copy it from the buffer with header prepended
*/
new_data = kmalloc (count+sizeof(struct r3964_block_header), GFP_KERNEL);
- TRACE_M("r3964_write - kmalloc %x",(int)new_data);
+ TRACE_M("r3964_write - kmalloc %p",new_data);
if (new_data == NULL) {
if (pInfo->flags & R3964_DEBUG)
{
diff --git a/drivers/char/vt.c b/drivers/char/vt.c
index 1e33cb032e0..e91268e8683 100644
--- a/drivers/char/vt.c
+++ b/drivers/char/vt.c
@@ -810,13 +810,14 @@ int vc_resize(struct vc_data *vc, unsigned int cols, unsigned int lines)
* from the top and bottom of cursor position
*/
old_origin += (vc->vc_y - new_rows/2) * old_row_size;
- end = old_origin + new_screen_size;
+ end = old_origin + (old_row_size * new_rows);
}
} else
/*
* Cursor near the top, copy contents from the top of buffer
*/
- end = (old_rows > new_rows) ? old_origin + new_screen_size :
+ end = (old_rows > new_rows) ? old_origin +
+ (old_row_size * new_rows) :
vc->vc_scr_end;
update_attr(vc);
diff --git a/drivers/char/watchdog/Kconfig b/drivers/char/watchdog/Kconfig
index fa789ea36bb..344001b45af 100644
--- a/drivers/char/watchdog/Kconfig
+++ b/drivers/char/watchdog/Kconfig
@@ -84,6 +84,17 @@ config 977_WATCHDOG
Not sure? It's safe to say N.
+config IXP2000_WATCHDOG
+ tristate "IXP2000 Watchdog"
+ depends on WATCHDOG && ARCH_IXP2000
+ help
+ Say Y here if to include support for the watchdog timer
+ in the Intel IXP2000(2400, 2800, 2850) network processors.
+ This driver can be built as a module by choosing M. The module
+ will be called ixp2000_wdt.
+
+ Say N if you are unsure.
+
config IXP4XX_WATCHDOG
tristate "IXP4xx Watchdog"
depends on WATCHDOG && ARCH_IXP4XX
@@ -100,17 +111,6 @@ config IXP4XX_WATCHDOG
Say N if you are unsure.
-config IXP2000_WATCHDOG
- tristate "IXP2000 Watchdog"
- depends on WATCHDOG && ARCH_IXP2000
- help
- Say Y here if to include support for the watchdog timer
- in the Intel IXP2000(2400, 2800, 2850) network processors.
- This driver can be built as a module by choosing M. The module
- will be called ixp2000_wdt.
-
- Say N if you are unsure.
-
config S3C2410_WATCHDOG
tristate "S3C2410 Watchdog"
depends on WATCHDOG && ARCH_S3C2410
@@ -233,6 +233,16 @@ config IB700_WDT
Most people will say N.
+config IBMASR
+ tristate "IBM Automatic Server Restart"
+ depends on WATCHDOG && X86
+ help
+ This is the driver for the IBM Automatic Server Restart watchdog
+ timer builtin into some eServer xSeries machines.
+
+ To compile this driver as a module, choose M here: the
+ module will be called ibmasr.
+
config WAFER_WDT
tristate "ICP Wafer 5823 Single Board Computer Watchdog"
depends on WATCHDOG && X86
@@ -243,6 +253,16 @@ config WAFER_WDT
To compile this driver as a module, choose M here: the
module will be called wafer5823wdt.
+config I6300ESB_WDT
+ tristate "Intel 6300ESB Timer/Watchdog"
+ depends on WATCHDOG && X86 && PCI
+ ---help---
+ Hardware driver for the watchdog timer built into the Intel
+ 6300ESB controller hub.
+
+ To compile this driver as a module, choose M here: the
+ module will be called i6300esb.
+
config I8XX_TCO
tristate "Intel i8xx TCO Timer/Watchdog"
depends on WATCHDOG && (X86 || IA64) && PCI
@@ -298,6 +318,19 @@ config 60XX_WDT
You can compile this driver directly into the kernel, or use
it as a module. The module will be called sbc60xxwdt.
+config SBC8360_WDT
+ tristate "SBC8360 Watchdog Timer"
+ depends on WATCHDOG && X86
+ ---help---
+
+ This is the driver for the hardware watchdog on the SBC8360 Single
+ Board Computer produced by Axiomtek Co., Ltd. (www.axiomtek.com).
+
+ To compile this driver as a module, choose M here: the
+ module will be called sbc8360.ko.
+
+ Most people will say N.
+
config CPU5_WDT
tristate "SMA CPU5 Watchdog"
depends on WATCHDOG && X86
@@ -336,6 +369,19 @@ config W83877F_WDT
Most people will say N.
+config W83977F_WDT
+ tristate "W83977F (PCM-5335) Watchdog Timer"
+ depends on WATCHDOG && X86
+ ---help---
+ This is the driver for the hardware watchdog on the W83977F I/O chip
+ as used in AAEON's PCM-5335 SBC (and likely others). This
+ watchdog simply watches your kernel to make sure it doesn't freeze,
+ and if it does, it reboots your computer after a certain amount of
+ time.
+
+ To compile this driver as a module, choose M here: the
+ module will be called w83977f_wdt.
+
config MACHZ_WDT
tristate "ZF MachZ Watchdog"
depends on WATCHDOG && X86
@@ -355,6 +401,10 @@ config 8xx_WDT
tristate "MPC8xx Watchdog Timer"
depends on WATCHDOG && 8xx
+config MV64X60_WDT
+ tristate "MV64X60 (Marvell Discovery) Watchdog Timer"
+ depends on WATCHDOG && MV64X60
+
config BOOKE_WDT
tristate "PowerPC Book-E Watchdog Timer"
depends on WATCHDOG && (BOOKE || 4xx)
@@ -362,6 +412,17 @@ config BOOKE_WDT
Please see Documentation/watchdog/watchdog-api.txt for
more information.
+# PPC64 Architecture
+
+config WATCHDOG_RTAS
+ tristate "RTAS watchdog"
+ depends on WATCHDOG && PPC_RTAS
+ help
+ This driver adds watchdog support for the RTAS watchdog.
+
+ To compile this driver as a module, choose M here. The module
+ will be called wdrtas.
+
# MIPS Architecture
config INDYDOG
@@ -430,16 +491,6 @@ config WATCHDOG_RIO
machines. The watchdog timeout period is normally one minute but
can be changed with a boot-time parameter.
-# ppc64 RTAS watchdog
-config WATCHDOG_RTAS
- tristate "RTAS watchdog"
- depends on WATCHDOG && PPC_RTAS
- help
- This driver adds watchdog support for the RTAS watchdog.
-
- To compile this driver as a module, choose M here. The module
- will be called wdrtas.
-
#
# ISA-based Watchdog Cards
#
diff --git a/drivers/char/watchdog/Makefile b/drivers/char/watchdog/Makefile
index bc6f5fe88c8..cfd0a398771 100644
--- a/drivers/char/watchdog/Makefile
+++ b/drivers/char/watchdog/Makefile
@@ -39,22 +39,27 @@ obj-$(CONFIG_ALIM7101_WDT) += alim7101_wdt.o
obj-$(CONFIG_SC520_WDT) += sc520_wdt.o
obj-$(CONFIG_EUROTECH_WDT) += eurotechwdt.o
obj-$(CONFIG_IB700_WDT) += ib700wdt.o
+obj-$(CONFIG_IBMASR) += ibmasr.o
obj-$(CONFIG_WAFER_WDT) += wafer5823wdt.o
+obj-$(CONFIG_I6300ESB_WDT) += i6300esb.o
obj-$(CONFIG_I8XX_TCO) += i8xx_tco.o
obj-$(CONFIG_SC1200_WDT) += sc1200wdt.o
obj-$(CONFIG_SCx200_WDT) += scx200_wdt.o
obj-$(CONFIG_60XX_WDT) += sbc60xxwdt.o
+obj-$(CONFIG_SBC8360_WDT) += sbc8360.o
obj-$(CONFIG_CPU5_WDT) += cpu5wdt.o
obj-$(CONFIG_W83627HF_WDT) += w83627hf_wdt.o
obj-$(CONFIG_W83877F_WDT) += w83877f_wdt.o
+obj-$(CONFIG_W83977F_WDT) += w83977f_wdt.o
obj-$(CONFIG_MACHZ_WDT) += machzwd.o
# PowerPC Architecture
obj-$(CONFIG_8xx_WDT) += mpc8xx_wdt.o
+obj-$(CONFIG_MV64X60_WDT) += mv64x60_wdt.o
+obj-$(CONFIG_BOOKE_WDT) += booke_wdt.o
# PPC64 Architecture
obj-$(CONFIG_WATCHDOG_RTAS) += wdrtas.o
-obj-$(CONFIG_BOOKE_WDT) += booke_wdt.o
# MIPS Architecture
obj-$(CONFIG_INDYDOG) += indydog.o
diff --git a/drivers/char/watchdog/i6300esb.c b/drivers/char/watchdog/i6300esb.c
new file mode 100644
index 00000000000..93785f13242
--- /dev/null
+++ b/drivers/char/watchdog/i6300esb.c
@@ -0,0 +1,527 @@
+/*
+ * i6300esb: Watchdog timer driver for Intel 6300ESB chipset
+ *
+ * (c) Copyright 2004 Google Inc.
+ * (c) Copyright 2005 David Härdeman <david@2gen.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ * based on i810-tco.c which is in turn based on softdog.c
+ *
+ * The timer is implemented in the following I/O controller hubs:
+ * (See the intel documentation on http://developer.intel.com.)
+ * 6300ESB chip : document number 300641-003
+ *
+ * 2004YYZZ Ross Biro
+ * Initial version 0.01
+ * 2004YYZZ Ross Biro
+ * Version 0.02
+ * 20050210 David Härdeman <david@2gen.com>
+ * Ported driver to kernel 2.6
+ */
+
+/*
+ * Includes, defines, variables, module parameters, ...
+ */
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/fs.h>
+#include <linux/mm.h>
+#include <linux/miscdevice.h>
+#include <linux/watchdog.h>
+#include <linux/reboot.h>
+#include <linux/init.h>
+#include <linux/pci.h>
+#include <linux/ioport.h>
+
+#include <asm/uaccess.h>
+#include <asm/io.h>
+
+/* Module and version information */
+#define ESB_VERSION "0.03"
+#define ESB_MODULE_NAME "i6300ESB timer"
+#define ESB_DRIVER_NAME ESB_MODULE_NAME ", v" ESB_VERSION
+#define PFX ESB_MODULE_NAME ": "
+
+/* PCI configuration registers */
+#define ESB_CONFIG_REG 0x60 /* Config register */
+#define ESB_LOCK_REG 0x68 /* WDT lock register */
+
+/* Memory mapped registers */
+#define ESB_TIMER1_REG BASEADDR + 0x00 /* Timer1 value after each reset */
+#define ESB_TIMER2_REG BASEADDR + 0x04 /* Timer2 value after each reset */
+#define ESB_GINTSR_REG BASEADDR + 0x08 /* General Interrupt Status Register */
+#define ESB_RELOAD_REG BASEADDR + 0x0c /* Reload register */
+
+/* Lock register bits */
+#define ESB_WDT_FUNC ( 0x01 << 2 ) /* Watchdog functionality */
+#define ESB_WDT_ENABLE ( 0x01 << 1 ) /* Enable WDT */
+#define ESB_WDT_LOCK ( 0x01 << 0 ) /* Lock (nowayout) */
+
+/* Config register bits */
+#define ESB_WDT_REBOOT ( 0x01 << 5 ) /* Enable reboot on timeout */
+#define ESB_WDT_FREQ ( 0x01 << 2 ) /* Decrement frequency */
+#define ESB_WDT_INTTYPE ( 0x11 << 0 ) /* Interrupt type on timer1 timeout */
+
+/* Reload register bits */
+#define ESB_WDT_RELOAD ( 0x01 << 8 ) /* prevent timeout */
+
+/* Magic constants */
+#define ESB_UNLOCK1 0x80 /* Step 1 to unlock reset registers */
+#define ESB_UNLOCK2 0x86 /* Step 2 to unlock reset registers */
+
+/* internal variables */
+static void __iomem *BASEADDR;
+static spinlock_t esb_lock; /* Guards the hardware */
+static unsigned long timer_alive;
+static struct pci_dev *esb_pci;
+static unsigned short triggered; /* The status of the watchdog upon boot */
+static char esb_expect_close;
+
+/* module parameters */
+#define WATCHDOG_HEARTBEAT 30 /* 30 sec default heartbeat (1<heartbeat<2*1023) */
+static int heartbeat = WATCHDOG_HEARTBEAT; /* in seconds */
+module_param(heartbeat, int, 0);
+MODULE_PARM_DESC(heartbeat, "Watchdog heartbeat in seconds. (1<heartbeat<2046, default=" __MODULE_STRING(WATCHDOG_HEARTBEAT) ")");
+
+static int nowayout = WATCHDOG_NOWAYOUT;
+module_param(nowayout, int, 0);
+MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default=CONFIG_WATCHDOG_NOWAYOUT)");
+
+/*
+ * Some i6300ESB specific functions
+ */
+
+/*
+ * Prepare for reloading the timer by unlocking the proper registers.
+ * This is performed by first writing 0x80 followed by 0x86 to the
+ * reload register. After this the appropriate registers can be written
+ * to once before they need to be unlocked again.
+ */
+static inline void esb_unlock_registers(void) {
+ writeb(ESB_UNLOCK1, ESB_RELOAD_REG);
+ writeb(ESB_UNLOCK2, ESB_RELOAD_REG);
+}
+
+static void esb_timer_start(void)
+{
+ u8 val;
+
+ /* Enable or Enable + Lock? */
+ val = 0x02 | (nowayout ? 0x01 : 0x00);
+
+ pci_write_config_byte(esb_pci, ESB_LOCK_REG, val);
+}
+
+static int esb_timer_stop(void)
+{
+ u8 val;
+
+ spin_lock(&esb_lock);
+ /* First, reset timers as suggested by the docs */
+ esb_unlock_registers();
+ writew(ESB_WDT_RELOAD, ESB_RELOAD_REG);
+ /* Then disable the WDT */
+ pci_write_config_byte(esb_pci, ESB_LOCK_REG, 0x0);
+ pci_read_config_byte(esb_pci, ESB_LOCK_REG, &val);
+ spin_unlock(&esb_lock);
+
+ /* Returns 0 if the timer was disabled, non-zero otherwise */
+ return (val & 0x01);
+}
+
+static void esb_timer_keepalive(void)
+{
+ spin_lock(&esb_lock);
+ esb_unlock_registers();
+ writew(ESB_WDT_RELOAD, ESB_RELOAD_REG);
+ /* FIXME: Do we need to flush anything here? */
+ spin_unlock(&esb_lock);
+}
+
+static int esb_timer_set_heartbeat(int time)
+{
+ u32 val;
+
+ if (time < 0x1 || time > (2 * 0x03ff))
+ return -EINVAL;
+
+ spin_lock(&esb_lock);
+
+ /* We shift by 9, so if we are passed a value of 1 sec,
+ * val will be 1 << 9 = 512, then write that to two
+ * timers => 2 * 512 = 1024 (which is decremented at 1KHz)
+ */
+ val = time << 9;
+
+ /* Write timer 1 */
+ esb_unlock_registers();
+ writel(val, ESB_TIMER1_REG);
+
+ /* Write timer 2 */
+ esb_unlock_registers();
+ writel(val, ESB_TIMER2_REG);
+
+ /* Reload */
+ esb_unlock_registers();
+ writew(ESB_WDT_RELOAD, ESB_RELOAD_REG);
+
+ /* FIXME: Do we need to flush everything out? */
+
+ /* Done */
+ heartbeat = time;
+ spin_unlock(&esb_lock);
+ return 0;
+}
+
+static int esb_timer_read (void)
+{
+ u32 count;
+
+ /* This isn't documented, and doesn't take into
+ * acount which stage is running, but it looks
+ * like a 20 bit count down, so we might as well report it.
+ */
+ pci_read_config_dword(esb_pci, 0x64, &count);
+ return (int)count;
+}
+
+/*
+ * /dev/watchdog handling
+ */
+
+static int esb_open (struct inode *inode, struct file *file)
+{
+ /* /dev/watchdog can only be opened once */
+ if (test_and_set_bit(0, &timer_alive))
+ return -EBUSY;
+
+ /* Reload and activate timer */
+ esb_timer_keepalive ();
+ esb_timer_start ();
+
+ return nonseekable_open(inode, file);
+}
+
+static int esb_release (struct inode *inode, struct file *file)
+{
+ /* Shut off the timer. */
+ if (esb_expect_close == 42) {
+ esb_timer_stop ();
+ } else {
+ printk(KERN_CRIT PFX "Unexpected close, not stopping watchdog!\n");
+ esb_timer_keepalive ();
+ }
+ clear_bit(0, &timer_alive);
+ esb_expect_close = 0;
+ return 0;
+}
+
+static ssize_t esb_write (struct file *file, const char __user *data,
+ size_t len, loff_t * ppos)
+{
+ /* See if we got the magic character 'V' and reload the timer */
+ if (len) {
+ if (!nowayout) {
+ size_t i;
+
+ /* note: just in case someone wrote the magic character
+ * five months ago... */
+ esb_expect_close = 0;
+
+ /* scan to see whether or not we got the magic character */
+ for (i = 0; i != len; i++) {
+ char c;
+ if(get_user(c, data+i))
+ return -EFAULT;
+ if (c == 'V')
+ esb_expect_close = 42;
+ }
+ }
+
+ /* someone wrote to us, we should reload the timer */
+ esb_timer_keepalive ();
+ }
+ return len;
+}
+
+static int esb_ioctl (struct inode *inode, struct file *file,
+ unsigned int cmd, unsigned long arg)
+{
+ int new_options, retval = -EINVAL;
+ int new_heartbeat;
+ void __user *argp = (void __user *)arg;
+ int __user *p = argp;
+ static struct watchdog_info ident = {
+ .options = WDIOF_SETTIMEOUT |
+ WDIOF_KEEPALIVEPING |
+ WDIOF_MAGICCLOSE,
+ .firmware_version = 0,
+ .identity = ESB_MODULE_NAME,
+ };
+
+ switch (cmd) {
+ case WDIOC_GETSUPPORT:
+ return copy_to_user(argp, &ident,
+ sizeof (ident)) ? -EFAULT : 0;
+
+ case WDIOC_GETSTATUS:
+ return put_user (esb_timer_read(), p);
+
+ case WDIOC_GETBOOTSTATUS:
+ return put_user (triggered, p);
+
+ case WDIOC_KEEPALIVE:
+ esb_timer_keepalive ();
+ return 0;
+
+ case WDIOC_SETOPTIONS:
+ {
+ if (get_user (new_options, p))
+ return -EFAULT;
+
+ if (new_options & WDIOS_DISABLECARD) {
+ esb_timer_stop ();
+ retval = 0;
+ }
+
+ if (new_options & WDIOS_ENABLECARD) {
+ esb_timer_keepalive ();
+ esb_timer_start ();
+ retval = 0;
+ }
+
+ return retval;
+ }
+
+ case WDIOC_SETTIMEOUT:
+ {
+ if (get_user(new_heartbeat, p))
+ return -EFAULT;
+
+ if (esb_timer_set_heartbeat(new_heartbeat))
+ return -EINVAL;
+
+ esb_timer_keepalive ();
+ /* Fall */
+ }
+
+ case WDIOC_GETTIMEOUT:
+ return put_user(heartbeat, p);
+
+ default:
+ return -ENOIOCTLCMD;
+ }
+}
+
+/*
+ * Notify system
+ */
+
+static int esb_notify_sys (struct notifier_block *this, unsigned long code, void *unused)
+{
+ if (code==SYS_DOWN || code==SYS_HALT) {
+ /* Turn the WDT off */
+ esb_timer_stop ();
+ }
+
+ return NOTIFY_DONE;
+}
+
+/*
+ * Kernel Interfaces
+ */
+
+static struct file_operations esb_fops = {
+ .owner = THIS_MODULE,
+ .llseek = no_llseek,
+ .write = esb_write,
+ .ioctl = esb_ioctl,
+ .open = esb_open,
+ .release = esb_release,
+};
+
+static struct miscdevice esb_miscdev = {
+ .minor = WATCHDOG_MINOR,
+ .name = "watchdog",
+ .fops = &esb_fops,
+};
+
+static struct notifier_block esb_notifier = {
+ .notifier_call = esb_notify_sys,
+};
+
+/*
+ * Data for PCI driver interface
+ *
+ * This data only exists for exporting the supported
+ * PCI ids via MODULE_DEVICE_TABLE. We do not actually
+ * register a pci_driver, because someone else might one day
+ * want to register another driver on the same PCI id.
+ */
+static struct pci_device_id esb_pci_tbl[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ESB_9), },
+ { 0, }, /* End of list */
+};
+MODULE_DEVICE_TABLE (pci, esb_pci_tbl);
+
+/*
+ * Init & exit routines
+ */
+
+static unsigned char __init esb_getdevice (void)
+{
+ u8 val1;
+ unsigned short val2;
+
+ struct pci_dev *dev = NULL;
+ /*
+ * Find the PCI device
+ */
+
+ for_each_pci_dev(dev) {
+ if (pci_match_id(esb_pci_tbl, dev)) {
+ esb_pci = dev;
+ break;
+ }
+ }
+
+ if (esb_pci) {
+ if (pci_enable_device(esb_pci)) {
+ printk (KERN_ERR PFX "failed to enable device\n");
+ goto err_devput;
+ }
+
+ if (pci_request_region(esb_pci, 0, ESB_MODULE_NAME)) {
+ printk (KERN_ERR PFX "failed to request region\n");
+ goto err_disable;
+ }
+
+ BASEADDR = ioremap(pci_resource_start(esb_pci, 0),
+ pci_resource_len(esb_pci, 0));
+ if (BASEADDR == NULL) {
+ /* Something's wrong here, BASEADDR has to be set */
+ printk (KERN_ERR PFX "failed to get BASEADDR\n");
+ goto err_release;
+ }
+
+ /*
+ * The watchdog has two timers, it can be setup so that the
+ * expiry of timer1 results in an interrupt and the expiry of
+ * timer2 results in a reboot. We set it to not generate
+ * any interrupts as there is not much we can do with it
+ * right now.
+ *
+ * We also enable reboots and set the timer frequency to
+ * the PCI clock divided by 2^15 (approx 1KHz).
+ */
+ pci_write_config_word(esb_pci, ESB_CONFIG_REG, 0x0003);
+
+ /* Check that the WDT isn't already locked */
+ pci_read_config_byte(esb_pci, ESB_LOCK_REG, &val1);
+ if (val1 & ESB_WDT_LOCK)
+ printk (KERN_WARNING PFX "nowayout already set\n");
+
+ /* Set the timer to watchdog mode and disable it for now */
+ pci_write_config_byte(esb_pci, ESB_LOCK_REG, 0x00);
+
+ /* Check if the watchdog was previously triggered */
+ esb_unlock_registers();
+ val2 = readw(ESB_RELOAD_REG);
+ triggered = (val2 & (0x01 << 9) >> 9);
+
+ /* Reset trigger flag and timers */
+ esb_unlock_registers();
+ writew((0x11 << 8), ESB_RELOAD_REG);
+
+ /* Done */
+ return 1;
+
+err_release:
+ pci_release_region(esb_pci, 0);
+err_disable:
+ pci_disable_device(esb_pci);
+err_devput:
+ pci_dev_put(esb_pci);
+ }
+ return 0;
+}
+
+static int __init watchdog_init (void)
+{
+ int ret;
+
+ spin_lock_init(&esb_lock);
+
+ /* Check whether or not the hardware watchdog is there */
+ if (!esb_getdevice () || esb_pci == NULL)
+ return -ENODEV;
+
+ /* Check that the heartbeat value is within it's range ; if not reset to the default */
+ if (esb_timer_set_heartbeat (heartbeat)) {
+ esb_timer_set_heartbeat (WATCHDOG_HEARTBEAT);
+ printk(KERN_INFO PFX "heartbeat value must be 1<heartbeat<2046, using %d\n",
+ heartbeat);
+ }
+
+ ret = register_reboot_notifier(&esb_notifier);
+ if (ret != 0) {
+ printk(KERN_ERR PFX "cannot register reboot notifier (err=%d)\n",
+ ret);
+ goto err_unmap;
+ }
+
+ ret = misc_register(&esb_miscdev);
+ if (ret != 0) {
+ printk(KERN_ERR PFX "cannot register miscdev on minor=%d (err=%d)\n",
+ WATCHDOG_MINOR, ret);
+ goto err_notifier;
+ }
+
+ esb_timer_stop ();
+
+ printk (KERN_INFO PFX "initialized (0x%p). heartbeat=%d sec (nowayout=%d)\n",
+ BASEADDR, heartbeat, nowayout);
+
+ return 0;
+
+err_notifier:
+ unregister_reboot_notifier(&esb_notifier);
+err_unmap:
+ iounmap(BASEADDR);
+/* err_release: */
+ pci_release_region(esb_pci, 0);
+/* err_disable: */
+ pci_disable_device(esb_pci);
+/* err_devput: */
+ pci_dev_put(esb_pci);
+ return ret;
+}
+
+static void __exit watchdog_cleanup (void)
+{
+ /* Stop the timer before we leave */
+ if (!nowayout)
+ esb_timer_stop ();
+
+ /* Deregister */
+ misc_deregister(&esb_miscdev);
+ unregister_reboot_notifier(&esb_notifier);
+ iounmap(BASEADDR);
+ pci_release_region(esb_pci, 0);
+ pci_disable_device(esb_pci);
+ pci_dev_put(esb_pci);
+}
+
+module_init(watchdog_init);
+module_exit(watchdog_cleanup);
+
+MODULE_AUTHOR("Ross Biro and David Härdeman");
+MODULE_DESCRIPTION("Watchdog driver for Intel 6300ESB chipsets");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
diff --git a/drivers/char/watchdog/ibmasr.c b/drivers/char/watchdog/ibmasr.c
new file mode 100644
index 00000000000..294c474ae48
--- /dev/null
+++ b/drivers/char/watchdog/ibmasr.c
@@ -0,0 +1,405 @@
+/*
+ * IBM Automatic Server Restart driver.
+ *
+ * Copyright (c) 2005 Andrey Panin <pazke@donpac.ru>
+ *
+ * Based on driver written by Pete Reynolds.
+ * Copyright (c) IBM Corporation, 1998-2004.
+ *
+ * This software may be used and distributed according to the terms
+ * of the GNU Public License, incorporated herein by reference.
+ */
+
+#include <linux/config.h>
+#include <linux/fs.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/timer.h>
+#include <linux/miscdevice.h>
+#include <linux/watchdog.h>
+#include <linux/dmi.h>
+
+#include <asm/io.h>
+#include <asm/uaccess.h>
+
+
+enum {
+ ASMTYPE_UNKNOWN,
+ ASMTYPE_TOPAZ,
+ ASMTYPE_JASPER,
+ ASMTYPE_PEARL,
+ ASMTYPE_JUNIPER,
+ ASMTYPE_SPRUCE,
+};
+
+#define PFX "ibmasr: "
+
+#define TOPAZ_ASR_REG_OFFSET 4
+#define TOPAZ_ASR_TOGGLE 0x40
+#define TOPAZ_ASR_DISABLE 0x80
+
+/* PEARL ASR S/W REGISTER SUPERIO PORT ADDRESSES */
+#define PEARL_BASE 0xe04
+#define PEARL_WRITE 0xe06
+#define PEARL_READ 0xe07
+
+#define PEARL_ASR_DISABLE_MASK 0x80 /* bit 7: disable = 1, enable = 0 */
+#define PEARL_ASR_TOGGLE_MASK 0x40 /* bit 6: 0, then 1, then 0 */
+
+/* JASPER OFFSET FROM SIO BASE ADDR TO ASR S/W REGISTERS. */
+#define JASPER_ASR_REG_OFFSET 0x38
+
+#define JASPER_ASR_DISABLE_MASK 0x01 /* bit 0: disable = 1, enable = 0 */
+#define JASPER_ASR_TOGGLE_MASK 0x02 /* bit 1: 0, then 1, then 0 */
+
+#define JUNIPER_BASE_ADDRESS 0x54b /* Base address of Juniper ASR */
+#define JUNIPER_ASR_DISABLE_MASK 0x01 /* bit 0: disable = 1 enable = 0 */
+#define JUNIPER_ASR_TOGGLE_MASK 0x02 /* bit 1: 0, then 1, then 0 */
+
+#define SPRUCE_BASE_ADDRESS 0x118e /* Base address of Spruce ASR */
+#define SPRUCE_ASR_DISABLE_MASK 0x01 /* bit 1: disable = 1 enable = 0 */
+#define SPRUCE_ASR_TOGGLE_MASK 0x02 /* bit 0: 0, then 1, then 0 */
+
+
+static int nowayout = WATCHDOG_NOWAYOUT;
+
+static unsigned long asr_is_open;
+static char asr_expect_close;
+
+static unsigned int asr_type, asr_base, asr_length;
+static unsigned int asr_read_addr, asr_write_addr;
+static unsigned char asr_toggle_mask, asr_disable_mask;
+
+static void asr_toggle(void)
+{
+ unsigned char reg = inb(asr_read_addr);
+
+ outb(reg & ~asr_toggle_mask, asr_write_addr);
+ reg = inb(asr_read_addr);
+
+ outb(reg | asr_toggle_mask, asr_write_addr);
+ reg = inb(asr_read_addr);
+
+ outb(reg & ~asr_toggle_mask, asr_write_addr);
+ reg = inb(asr_read_addr);
+}
+
+static void asr_enable(void)
+{
+ unsigned char reg;
+
+ if (asr_type == ASMTYPE_TOPAZ) {
+ /* asr_write_addr == asr_read_addr */
+ reg = inb(asr_read_addr);
+ outb(reg & ~(TOPAZ_ASR_TOGGLE | TOPAZ_ASR_DISABLE),
+ asr_read_addr);
+ } else {
+ /*
+ * First make sure the hardware timer is reset by toggling
+ * ASR hardware timer line.
+ */
+ asr_toggle();
+
+ reg = inb(asr_read_addr);
+ outb(reg & ~asr_disable_mask, asr_write_addr);
+ }
+ reg = inb(asr_read_addr);
+}
+
+static void asr_disable(void)
+{
+ unsigned char reg = inb(asr_read_addr);
+
+ if (asr_type == ASMTYPE_TOPAZ)
+ /* asr_write_addr == asr_read_addr */
+ outb(reg | TOPAZ_ASR_TOGGLE | TOPAZ_ASR_DISABLE,
+ asr_read_addr);
+ else {
+ outb(reg | asr_toggle_mask, asr_write_addr);
+ reg = inb(asr_read_addr);
+
+ outb(reg | asr_disable_mask, asr_write_addr);
+ }
+ reg = inb(asr_read_addr);
+}
+
+static int __init asr_get_base_address(void)
+{
+ unsigned char low, high;
+ const char *type = "";
+
+ asr_length = 1;
+
+ switch (asr_type) {
+ case ASMTYPE_TOPAZ:
+ /* SELECT SuperIO CHIP FOR QUERYING (WRITE 0x07 TO BOTH 0x2E and 0x2F) */
+ outb(0x07, 0x2e);
+ outb(0x07, 0x2f);
+
+ /* SELECT AND READ THE HIGH-NIBBLE OF THE GPIO BASE ADDRESS */
+ outb(0x60, 0x2e);
+ high = inb(0x2f);
+
+ /* SELECT AND READ THE LOW-NIBBLE OF THE GPIO BASE ADDRESS */
+ outb(0x61, 0x2e);
+ low = inb(0x2f);
+
+ asr_base = (high << 16) | low;
+ asr_read_addr = asr_write_addr =
+ asr_base + TOPAZ_ASR_REG_OFFSET;
+ asr_length = 5;
+
+ break;
+
+ case ASMTYPE_JASPER:
+ type = "Jaspers ";
+
+ /* FIXME: need to use pci_config_lock here, but it's not exported */
+
+/* spin_lock_irqsave(&pci_config_lock, flags);*/
+
+ /* Select the SuperIO chip in the PCI I/O port register */
+ outl(0x8000f858, 0xcf8);
+
+ /*
+ * Read the base address for the SuperIO chip.
+ * Only the lower 16 bits are valid, but the address is word
+ * aligned so the last bit must be masked off.
+ */
+ asr_base = inl(0xcfc) & 0xfffe;
+
+/* spin_unlock_irqrestore(&pci_config_lock, flags);*/
+
+ asr_read_addr = asr_write_addr =
+ asr_base + JASPER_ASR_REG_OFFSET;
+ asr_toggle_mask = JASPER_ASR_TOGGLE_MASK;
+ asr_disable_mask = JASPER_ASR_DISABLE_MASK;
+ asr_length = JASPER_ASR_REG_OFFSET + 1;
+
+ break;
+
+ case ASMTYPE_PEARL:
+ type = "Pearls ";
+ asr_base = PEARL_BASE;
+ asr_read_addr = PEARL_READ;
+ asr_write_addr = PEARL_WRITE;
+ asr_toggle_mask = PEARL_ASR_TOGGLE_MASK;
+ asr_disable_mask = PEARL_ASR_DISABLE_MASK;
+ asr_length = 4;
+ break;
+
+ case ASMTYPE_JUNIPER:
+ type = "Junipers ";
+ asr_base = JUNIPER_BASE_ADDRESS;
+ asr_read_addr = asr_write_addr = asr_base;
+ asr_toggle_mask = JUNIPER_ASR_TOGGLE_MASK;
+ asr_disable_mask = JUNIPER_ASR_DISABLE_MASK;
+ break;
+
+ case ASMTYPE_SPRUCE:
+ type = "Spruce's ";
+ asr_base = SPRUCE_BASE_ADDRESS;
+ asr_read_addr = asr_write_addr = asr_base;
+ asr_toggle_mask = SPRUCE_ASR_TOGGLE_MASK;
+ asr_disable_mask = SPRUCE_ASR_DISABLE_MASK;
+ break;
+ }
+
+ if (!request_region(asr_base, asr_length, "ibmasr")) {
+ printk(KERN_ERR PFX "address %#x already in use\n",
+ asr_base);
+ return -EBUSY;
+ }
+
+ printk(KERN_INFO PFX "found %sASR @ addr %#x\n", type, asr_base);
+
+ return 0;
+}
+
+
+static ssize_t asr_write(struct file *file, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ if (count) {
+ if (!nowayout) {
+ size_t i;
+
+ /* In case it was set long ago */
+ asr_expect_close = 0;
+
+ for (i = 0; i != count; i++) {
+ char c;
+ if (get_user(c, buf + i))
+ return -EFAULT;
+ if (c == 'V')
+ asr_expect_close = 42;
+ }
+ }
+ asr_toggle();
+ }
+ return count;
+}
+
+static int asr_ioctl(struct inode *inode, struct file *file,
+ unsigned int cmd, unsigned long arg)
+{
+ static const struct watchdog_info ident = {
+ .options = WDIOF_KEEPALIVEPING |
+ WDIOF_MAGICCLOSE,
+ .identity = "IBM ASR"
+ };
+ void __user *argp = (void __user *)arg;
+ int __user *p = argp;
+ int heartbeat;
+
+ switch (cmd) {
+ case WDIOC_GETSUPPORT:
+ return copy_to_user(argp, &ident, sizeof(ident)) ?
+ -EFAULT : 0;
+
+ case WDIOC_GETSTATUS:
+ case WDIOC_GETBOOTSTATUS:
+ return put_user(0, p);
+
+ case WDIOC_KEEPALIVE:
+ asr_toggle();
+ return 0;
+
+ /*
+ * The hardware has a fixed timeout value, so no WDIOC_SETTIMEOUT
+ * and WDIOC_GETTIMEOUT always returns 256.
+ */
+ case WDIOC_GETTIMEOUT:
+ heartbeat = 256;
+ return put_user(heartbeat, p);
+
+ case WDIOC_SETOPTIONS: {
+ int new_options, retval = -EINVAL;
+
+ if (get_user(new_options, p))
+ return -EFAULT;
+
+ if (new_options & WDIOS_DISABLECARD) {
+ asr_disable();
+ retval = 0;
+ }
+
+ if (new_options & WDIOS_ENABLECARD) {
+ asr_enable();
+ asr_toggle();
+ retval = 0;
+ }
+
+ return retval;
+ }
+ }
+
+ return -ENOIOCTLCMD;
+}
+
+static int asr_open(struct inode *inode, struct file *file)
+{
+ if(test_and_set_bit(0, &asr_is_open))
+ return -EBUSY;
+
+ asr_toggle();
+ asr_enable();
+
+ return nonseekable_open(inode, file);
+}
+
+static int asr_release(struct inode *inode, struct file *file)
+{
+ if (asr_expect_close == 42)
+ asr_disable();
+ else {
+ printk(KERN_CRIT PFX "unexpected close, not stopping watchdog!\n");
+ asr_toggle();
+ }
+ clear_bit(0, &asr_is_open);
+ asr_expect_close = 0;
+ return 0;
+}
+
+static struct file_operations asr_fops = {
+ .owner = THIS_MODULE,
+ .llseek = no_llseek,
+ .write = asr_write,
+ .ioctl = asr_ioctl,
+ .open = asr_open,
+ .release = asr_release,
+};
+
+static struct miscdevice asr_miscdev = {
+ .minor = WATCHDOG_MINOR,
+ .name = "watchdog",
+ .fops = &asr_fops,
+};
+
+
+struct ibmasr_id {
+ const char *desc;
+ int type;
+};
+
+static struct ibmasr_id __initdata ibmasr_id_table[] = {
+ { "IBM Automatic Server Restart - eserver xSeries 220", ASMTYPE_TOPAZ },
+ { "IBM Automatic Server Restart - Machine Type 8673", ASMTYPE_PEARL },
+ { "IBM Automatic Server Restart - Machine Type 8480", ASMTYPE_JASPER },
+ { "IBM Automatic Server Restart - Machine Type 8482", ASMTYPE_JUNIPER },
+ { "IBM Automatic Server Restart - Machine Type 8648", ASMTYPE_SPRUCE },
+ { NULL }
+};
+
+static int __init ibmasr_init(void)
+{
+ struct ibmasr_id *id;
+ int rc;
+
+ for (id = ibmasr_id_table; id->desc; id++) {
+ if (dmi_find_device(DMI_DEV_TYPE_OTHER, id->desc, NULL)) {
+ asr_type = id->type;
+ break;
+ }
+ }
+
+ if (!asr_type)
+ return -ENODEV;
+
+ rc = misc_register(&asr_miscdev);
+ if (rc < 0) {
+ printk(KERN_ERR PFX "failed to register misc device\n");
+ return rc;
+ }
+
+ rc = asr_get_base_address();
+ if (rc) {
+ misc_deregister(&asr_miscdev);
+ return rc;
+ }
+
+ return 0;
+}
+
+static void __exit ibmasr_exit(void)
+{
+ if (!nowayout)
+ asr_disable();
+
+ misc_deregister(&asr_miscdev);
+
+ release_region(asr_base, asr_length);
+}
+
+module_init(ibmasr_init);
+module_exit(ibmasr_exit);
+
+module_param(nowayout, int, 0);
+MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default=CONFIG_WATCHDOG_NOWAYOUT)");
+
+MODULE_DESCRIPTION("IBM Automatic Server Restart driver");
+MODULE_AUTHOR("Andrey Panin");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
diff --git a/drivers/char/watchdog/mpcore_wdt.c b/drivers/char/watchdog/mpcore_wdt.c
index c694eee1fb2..75ca84ed4ad 100644
--- a/drivers/char/watchdog/mpcore_wdt.c
+++ b/drivers/char/watchdog/mpcore_wdt.c
@@ -30,6 +30,8 @@
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/device.h>
+
+#include <asm/hardware/arm_twd.h>
#include <asm/uaccess.h>
struct mpcore_wdt {
diff --git a/drivers/char/watchdog/mv64x60_wdt.c b/drivers/char/watchdog/mv64x60_wdt.c
new file mode 100644
index 00000000000..6d3ff0836c4
--- /dev/null
+++ b/drivers/char/watchdog/mv64x60_wdt.c
@@ -0,0 +1,252 @@
+/*
+ * mv64x60_wdt.c - MV64X60 (Marvell Discovery) watchdog userspace interface
+ *
+ * Author: James Chapman <jchapman@katalix.com>
+ *
+ * Platform-specific setup code should configure the dog to generate
+ * interrupt or reset as required. This code only enables/disables
+ * and services the watchdog.
+ *
+ * Derived from mpc8xx_wdt.c, with the following copyright.
+ *
+ * 2002 (c) Florian Schirmer <jolt@tuxbox.org> This file is licensed under
+ * the terms of the GNU General Public License version 2. This program
+ * is licensed "as is" without any warranty of any kind, whether express
+ * or implied.
+ */
+
+#include <linux/config.h>
+#include <linux/fs.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/miscdevice.h>
+#include <linux/module.h>
+#include <linux/watchdog.h>
+#include <asm/mv64x60.h>
+#include <asm/uaccess.h>
+#include <asm/io.h>
+
+/* MV64x60 WDC (config) register access definitions */
+#define MV64x60_WDC_CTL1_MASK (3 << 24)
+#define MV64x60_WDC_CTL1(val) ((val & 3) << 24)
+#define MV64x60_WDC_CTL2_MASK (3 << 26)
+#define MV64x60_WDC_CTL2(val) ((val & 3) << 26)
+
+/* Flags bits */
+#define MV64x60_WDOG_FLAG_OPENED 0
+#define MV64x60_WDOG_FLAG_ENABLED 1
+
+static unsigned long wdt_flags;
+static int wdt_status;
+static void __iomem *mv64x60_regs;
+static int mv64x60_wdt_timeout;
+
+static void mv64x60_wdt_reg_write(u32 val)
+{
+ /* Allow write only to CTL1 / CTL2 fields, retaining values in
+ * other fields.
+ */
+ u32 data = readl(mv64x60_regs + MV64x60_WDT_WDC);
+ data &= ~(MV64x60_WDC_CTL1_MASK | MV64x60_WDC_CTL2_MASK);
+ data |= val;
+ writel(data, mv64x60_regs + MV64x60_WDT_WDC);
+}
+
+static void mv64x60_wdt_service(void)
+{
+ /* Write 01 followed by 10 to CTL2 */
+ mv64x60_wdt_reg_write(MV64x60_WDC_CTL2(0x01));
+ mv64x60_wdt_reg_write(MV64x60_WDC_CTL2(0x02));
+}
+
+static void mv64x60_wdt_handler_disable(void)
+{
+ if (test_and_clear_bit(MV64x60_WDOG_FLAG_ENABLED, &wdt_flags)) {
+ /* Write 01 followed by 10 to CTL1 */
+ mv64x60_wdt_reg_write(MV64x60_WDC_CTL1(0x01));
+ mv64x60_wdt_reg_write(MV64x60_WDC_CTL1(0x02));
+ printk(KERN_NOTICE "mv64x60_wdt: watchdog deactivated\n");
+ }
+}
+
+static void mv64x60_wdt_handler_enable(void)
+{
+ if (!test_and_set_bit(MV64x60_WDOG_FLAG_ENABLED, &wdt_flags)) {
+ /* Write 01 followed by 10 to CTL1 */
+ mv64x60_wdt_reg_write(MV64x60_WDC_CTL1(0x01));
+ mv64x60_wdt_reg_write(MV64x60_WDC_CTL1(0x02));
+ printk(KERN_NOTICE "mv64x60_wdt: watchdog activated\n");
+ }
+}
+
+static int mv64x60_wdt_open(struct inode *inode, struct file *file)
+{
+ if (test_and_set_bit(MV64x60_WDOG_FLAG_OPENED, &wdt_flags))
+ return -EBUSY;
+
+ mv64x60_wdt_service();
+ mv64x60_wdt_handler_enable();
+
+ nonseekable_open(inode, file);
+
+ return 0;
+}
+
+static int mv64x60_wdt_release(struct inode *inode, struct file *file)
+{
+ mv64x60_wdt_service();
+
+#if !defined(CONFIG_WATCHDOG_NOWAYOUT)
+ mv64x60_wdt_handler_disable();
+#endif
+
+ clear_bit(MV64x60_WDOG_FLAG_OPENED, &wdt_flags);
+
+ return 0;
+}
+
+static ssize_t mv64x60_wdt_write(struct file *file, const char __user *data,
+ size_t len, loff_t * ppos)
+{
+ if (len)
+ mv64x60_wdt_service();
+
+ return len;
+}
+
+static int mv64x60_wdt_ioctl(struct inode *inode, struct file *file,
+ unsigned int cmd, unsigned long arg)
+{
+ int timeout;
+ void __user *argp = (void __user *)arg;
+ static struct watchdog_info info = {
+ .options = WDIOF_KEEPALIVEPING,
+ .firmware_version = 0,
+ .identity = "MV64x60 watchdog",
+ };
+
+ switch (cmd) {
+ case WDIOC_GETSUPPORT:
+ if (copy_to_user(argp, &info, sizeof(info)))
+ return -EFAULT;
+ break;
+
+ case WDIOC_GETSTATUS:
+ case WDIOC_GETBOOTSTATUS:
+ if (put_user(wdt_status, (int __user *)argp))
+ return -EFAULT;
+ wdt_status &= ~WDIOF_KEEPALIVEPING;
+ break;
+
+ case WDIOC_GETTEMP:
+ return -EOPNOTSUPP;
+
+ case WDIOC_SETOPTIONS:
+ return -EOPNOTSUPP;
+
+ case WDIOC_KEEPALIVE:
+ mv64x60_wdt_service();
+ wdt_status |= WDIOF_KEEPALIVEPING;
+ break;
+
+ case WDIOC_SETTIMEOUT:
+ return -EOPNOTSUPP;
+
+ case WDIOC_GETTIMEOUT:
+ timeout = mv64x60_wdt_timeout * HZ;
+ if (put_user(timeout, (int __user *)argp))
+ return -EFAULT;
+ break;
+
+ default:
+ return -ENOIOCTLCMD;
+ }
+
+ return 0;
+}
+
+static struct file_operations mv64x60_wdt_fops = {
+ .owner = THIS_MODULE,
+ .llseek = no_llseek,
+ .write = mv64x60_wdt_write,
+ .ioctl = mv64x60_wdt_ioctl,
+ .open = mv64x60_wdt_open,
+ .release = mv64x60_wdt_release,
+};
+
+static struct miscdevice mv64x60_wdt_miscdev = {
+ .minor = WATCHDOG_MINOR,
+ .name = "watchdog",
+ .fops = &mv64x60_wdt_fops,
+};
+
+static int __devinit mv64x60_wdt_probe(struct device *dev)
+{
+ struct platform_device *pd = to_platform_device(dev);
+ struct mv64x60_wdt_pdata *pdata = pd->dev.platform_data;
+ int bus_clk = 133;
+
+ mv64x60_wdt_timeout = 10;
+ if (pdata) {
+ mv64x60_wdt_timeout = pdata->timeout;
+ bus_clk = pdata->bus_clk;
+ }
+
+ mv64x60_regs = mv64x60_get_bridge_vbase();
+
+ writel((mv64x60_wdt_timeout * (bus_clk * 1000000)) >> 8,
+ mv64x60_regs + MV64x60_WDT_WDC);
+
+ return misc_register(&mv64x60_wdt_miscdev);
+}
+
+static int __devexit mv64x60_wdt_remove(struct device *dev)
+{
+ misc_deregister(&mv64x60_wdt_miscdev);
+
+ mv64x60_wdt_service();
+ mv64x60_wdt_handler_disable();
+
+ return 0;
+}
+
+static struct device_driver mv64x60_wdt_driver = {
+ .name = MV64x60_WDT_NAME,
+ .bus = &platform_bus_type,
+ .probe = mv64x60_wdt_probe,
+ .remove = __devexit_p(mv64x60_wdt_remove),
+};
+
+static struct platform_device *mv64x60_wdt_dev;
+
+static int __init mv64x60_wdt_init(void)
+{
+ int ret;
+
+ printk(KERN_INFO "MV64x60 watchdog driver\n");
+
+ mv64x60_wdt_dev = platform_device_register_simple(MV64x60_WDT_NAME,
+ -1, NULL, 0);
+ if (IS_ERR(mv64x60_wdt_dev)) {
+ ret = PTR_ERR(mv64x60_wdt_dev);
+ goto out;
+ }
+
+ ret = driver_register(&mv64x60_wdt_driver);
+ out:
+ return ret;
+}
+
+static void __exit mv64x60_wdt_exit(void)
+{
+ driver_unregister(&mv64x60_wdt_driver);
+ platform_device_unregister(mv64x60_wdt_dev);
+}
+
+module_init(mv64x60_wdt_init);
+module_exit(mv64x60_wdt_exit);
+
+MODULE_AUTHOR("James Chapman <jchapman@katalix.com>");
+MODULE_DESCRIPTION("MV64x60 watchdog driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
diff --git a/drivers/char/watchdog/pcwd_pci.c b/drivers/char/watchdog/pcwd_pci.c
index 2b13afb09c5..5a80adbf803 100644
--- a/drivers/char/watchdog/pcwd_pci.c
+++ b/drivers/char/watchdog/pcwd_pci.c
@@ -29,27 +29,29 @@
* Includes, defines, variables, module parameters, ...
*/
-#include <linux/config.h>
-#include <linux/module.h>
-#include <linux/moduleparam.h>
-#include <linux/types.h>
-#include <linux/delay.h>
-#include <linux/miscdevice.h>
-#include <linux/watchdog.h>
-#include <linux/notifier.h>
-#include <linux/reboot.h>
-#include <linux/init.h>
-#include <linux/fs.h>
-#include <linux/pci.h>
-#include <linux/ioport.h>
-#include <linux/spinlock.h>
-
-#include <asm/uaccess.h>
-#include <asm/io.h>
+#include <linux/config.h> /* For CONFIG_WATCHDOG_NOWAYOUT/... */
+#include <linux/module.h> /* For module specific items */
+#include <linux/moduleparam.h> /* For new moduleparam's */
+#include <linux/types.h> /* For standard types (like size_t) */
+#include <linux/errno.h> /* For the -ENODEV/... values */
+#include <linux/kernel.h> /* For printk/panic/... */
+#include <linux/delay.h> /* For mdelay function */
+#include <linux/miscdevice.h> /* For MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR) */
+#include <linux/watchdog.h> /* For the watchdog specific items */
+#include <linux/notifier.h> /* For notifier support */
+#include <linux/reboot.h> /* For reboot_notifier stuff */
+#include <linux/init.h> /* For __init/__exit/... */
+#include <linux/fs.h> /* For file operations */
+#include <linux/pci.h> /* For pci functions */
+#include <linux/ioport.h> /* For io-port access */
+#include <linux/spinlock.h> /* For spin_lock/spin_unlock/... */
+
+#include <asm/uaccess.h> /* For copy_to_user/put_user/... */
+#include <asm/io.h> /* For inb/outb/... */
/* Module and version information */
#define WATCHDOG_VERSION "1.01"
-#define WATCHDOG_DATE "15 Mar 2005"
+#define WATCHDOG_DATE "02 Sep 2005"
#define WATCHDOG_DRIVER_NAME "PCI-PC Watchdog"
#define WATCHDOG_NAME "pcwd_pci"
#define PFX WATCHDOG_NAME ": "
@@ -335,12 +337,14 @@ static int pcipcwd_ioctl(struct inode *inode, struct file *file,
return -EFAULT;
if (new_options & WDIOS_DISABLECARD) {
- pcipcwd_stop();
+ if (pcipcwd_stop())
+ return -EIO;
retval = 0;
}
if (new_options & WDIOS_ENABLECARD) {
- pcipcwd_start();
+ if (pcipcwd_start())
+ return -EIO;
retval = 0;
}
diff --git a/drivers/char/watchdog/s3c2410_wdt.c b/drivers/char/watchdog/s3c2410_wdt.c
index 8b292bf343c..3625b2601b4 100644
--- a/drivers/char/watchdog/s3c2410_wdt.c
+++ b/drivers/char/watchdog/s3c2410_wdt.c
@@ -464,7 +464,7 @@ static void s3c2410wdt_shutdown(struct device *dev)
static unsigned long wtcon_save;
static unsigned long wtdat_save;
-static int s3c2410wdt_suspend(struct device *dev, u32 state, u32 level)
+static int s3c2410wdt_suspend(struct device *dev, pm_message_t state, u32 level)
{
if (level == SUSPEND_POWER_DOWN) {
/* Save watchdog state, and turn it off. */
diff --git a/drivers/char/watchdog/sbc8360.c b/drivers/char/watchdog/sbc8360.c
new file mode 100644
index 00000000000..c6cbf808d8c
--- /dev/null
+++ b/drivers/char/watchdog/sbc8360.c
@@ -0,0 +1,414 @@
+/*
+ * SBC8360 Watchdog driver
+ *
+ * (c) Copyright 2005 Webcon, Inc.
+ *
+ * Based on ib700wdt.c, which is based on advantechwdt.c which is based
+ * on acquirewdt.c which is based on wdt.c.
+ *
+ * (c) Copyright 2001 Charles Howes <chowes@vsol.net>
+ *
+ * Based on advantechwdt.c which is based on acquirewdt.c which
+ * is based on wdt.c.
+ *
+ * (c) Copyright 2000-2001 Marek Michalkiewicz <marekm@linux.org.pl>
+ *
+ * Based on acquirewdt.c which is based on wdt.c.
+ * Original copyright messages:
+ *
+ * (c) Copyright 1996 Alan Cox <alan@redhat.com>, All Rights Reserved.
+ * http://www.redhat.com
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ * Neither Alan Cox nor CymruNet Ltd. admit liability nor provide
+ * warranty for any of this software. This material is provided
+ * "AS-IS" and at no charge.
+ *
+ * (c) Copyright 1995 Alan Cox <alan@redhat.com>
+ *
+ * 14-Dec-2001 Matt Domsch <Matt_Domsch@dell.com>
+ * Added nowayout module option to override CONFIG_WATCHDOG_NOWAYOUT
+ * Added timeout module option to override default
+ *
+ */
+
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/miscdevice.h>
+#include <linux/watchdog.h>
+#include <linux/ioport.h>
+#include <linux/delay.h>
+#include <linux/notifier.h>
+#include <linux/fs.h>
+#include <linux/reboot.h>
+#include <linux/init.h>
+#include <linux/spinlock.h>
+#include <linux/moduleparam.h>
+
+#include <asm/io.h>
+#include <asm/uaccess.h>
+#include <asm/system.h>
+
+static unsigned long sbc8360_is_open;
+static spinlock_t sbc8360_lock;
+static char expect_close;
+
+#define PFX "sbc8360: "
+
+/*
+ *
+ * Watchdog Timer Configuration
+ *
+ * The function of the watchdog timer is to reset the system automatically
+ * and is defined at I/O port 0120H and 0121H. To enable the watchdog timer
+ * and allow the system to reset, write appropriate values from the table
+ * below to I/O port 0120H and 0121H. To disable the timer, write a zero
+ * value to I/O port 0121H for the system to stop the watchdog function.
+ *
+ * The following describes how the timer should be programmed (according to
+ * the vendor documentation)
+ *
+ * Enabling Watchdog:
+ * MOV AX,000AH (enable, phase I)
+ * MOV DX,0120H
+ * OUT DX,AX
+ * MOV AX,000BH (enable, phase II)
+ * MOV DX,0120H
+ * OUT DX,AX
+ * MOV AX,000nH (set multiplier n, from 1-4)
+ * MOV DX,0120H
+ * OUT DX,AX
+ * MOV AX,000mH (set base timer m, from 0-F)
+ * MOV DX,0121H
+ * OUT DX,AX
+ *
+ * Reset timer:
+ * MOV AX,000mH (same as set base timer, above)
+ * MOV DX,0121H
+ * OUT DX,AX
+ *
+ * Disabling Watchdog:
+ * MOV AX,0000H (a zero value)
+ * MOV DX,0120H
+ * OUT DX,AX
+ *
+ * Watchdog timeout configuration values:
+ * N
+ * M | 1 2 3 4
+ * --|----------------------------------
+ * 0 | 0.5s 5s 50s 100s
+ * 1 | 1s 10s 100s 200s
+ * 2 | 1.5s 15s 150s 300s
+ * 3 | 2s 20s 200s 400s
+ * 4 | 2.5s 25s 250s 500s
+ * 5 | 3s 30s 300s 600s
+ * 6 | 3.5s 35s 350s 700s
+ * 7 | 4s 40s 400s 800s
+ * 8 | 4.5s 45s 450s 900s
+ * 9 | 5s 50s 500s 1000s
+ * A | 5.5s 55s 550s 1100s
+ * B | 6s 60s 600s 1200s
+ * C | 6.5s 65s 650s 1300s
+ * D | 7s 70s 700s 1400s
+ * E | 7.5s 75s 750s 1500s
+ * F | 8s 80s 800s 1600s
+ *
+ * Another way to say the same things is:
+ * For N=1, Timeout = (M+1) * 0.5s
+ * For N=2, Timeout = (M+1) * 5s
+ * For N=3, Timeout = (M+1) * 50s
+ * For N=4, Timeout = (M+1) * 100s
+ *
+ */
+
+static int wd_times[64][2] = {
+ {0, 1}, /* 0 = 0.5s */
+ {1, 1}, /* 1 = 1s */
+ {2, 1}, /* 2 = 1.5s */
+ {3, 1}, /* 3 = 2s */
+ {4, 1}, /* 4 = 2.5s */
+ {5, 1}, /* 5 = 3s */
+ {6, 1}, /* 6 = 3.5s */
+ {7, 1}, /* 7 = 4s */
+ {8, 1}, /* 8 = 4.5s */
+ {9, 1}, /* 9 = 5s */
+ {0xA, 1}, /* 10 = 5.5s */
+ {0xB, 1}, /* 11 = 6s */
+ {0xC, 1}, /* 12 = 6.5s */
+ {0xD, 1}, /* 13 = 7s */
+ {0xE, 1}, /* 14 = 7.5s */
+ {0xF, 1}, /* 15 = 8s */
+ {0, 2}, /* 16 = 5s */
+ {1, 2}, /* 17 = 10s */
+ {2, 2}, /* 18 = 15s */
+ {3, 2}, /* 19 = 20s */
+ {4, 2}, /* 20 = 25s */
+ {5, 2}, /* 21 = 30s */
+ {6, 2}, /* 22 = 35s */
+ {7, 2}, /* 23 = 40s */
+ {8, 2}, /* 24 = 45s */
+ {9, 2}, /* 25 = 50s */
+ {0xA, 2}, /* 26 = 55s */
+ {0xB, 2}, /* 27 = 60s */
+ {0xC, 2}, /* 28 = 65s */
+ {0xD, 2}, /* 29 = 70s */
+ {0xE, 2}, /* 30 = 75s */
+ {0xF, 2}, /* 31 = 80s */
+ {0, 3}, /* 32 = 50s */
+ {1, 3}, /* 33 = 100s */
+ {2, 3}, /* 34 = 150s */
+ {3, 3}, /* 35 = 200s */
+ {4, 3}, /* 36 = 250s */
+ {5, 3}, /* 37 = 300s */
+ {6, 3}, /* 38 = 350s */
+ {7, 3}, /* 39 = 400s */
+ {8, 3}, /* 40 = 450s */
+ {9, 3}, /* 41 = 500s */
+ {0xA, 3}, /* 42 = 550s */
+ {0xB, 3}, /* 43 = 600s */
+ {0xC, 3}, /* 44 = 650s */
+ {0xD, 3}, /* 45 = 700s */
+ {0xE, 3}, /* 46 = 750s */
+ {0xF, 3}, /* 47 = 800s */
+ {0, 4}, /* 48 = 100s */
+ {1, 4}, /* 49 = 200s */
+ {2, 4}, /* 50 = 300s */
+ {3, 4}, /* 51 = 400s */
+ {4, 4}, /* 52 = 500s */
+ {5, 4}, /* 53 = 600s */
+ {6, 4}, /* 54 = 700s */
+ {7, 4}, /* 55 = 800s */
+ {8, 4}, /* 56 = 900s */
+ {9, 4}, /* 57 = 1000s */
+ {0xA, 4}, /* 58 = 1100s */
+ {0xB, 4}, /* 59 = 1200s */
+ {0xC, 4}, /* 60 = 1300s */
+ {0xD, 4}, /* 61 = 1400s */
+ {0xE, 4}, /* 62 = 1500s */
+ {0xF, 4} /* 63 = 1600s */
+};
+
+#define SBC8360_ENABLE 0x120
+#define SBC8360_BASETIME 0x121
+
+static int timeout = 27;
+static int wd_margin = 0xB;
+static int wd_multiplier = 2;
+static int nowayout = WATCHDOG_NOWAYOUT;
+
+module_param(timeout, int, 27);
+MODULE_PARM_DESC(timeout, "Index into timeout table (0-63) (default=27 (60s))");
+module_param(nowayout, int, 0);
+MODULE_PARM_DESC(nowayout,
+ "Watchdog cannot be stopped once started (default=CONFIG_WATCHDOG_NOWAYOUT)");
+
+/*
+ * Kernel methods.
+ */
+
+/* Activate and pre-configure watchdog */
+static void sbc8360_activate(void)
+{
+ /* Enable the watchdog */
+ outb(0x0A, SBC8360_ENABLE);
+ msleep_interruptible(100);
+ outb(0x0B, SBC8360_ENABLE);
+ msleep_interruptible(100);
+ /* Set timeout multiplier */
+ outb(wd_multiplier, SBC8360_ENABLE);
+ msleep_interruptible(100);
+ /* Nothing happens until first sbc8360_ping() */
+}
+
+/* Kernel pings watchdog */
+static void sbc8360_ping(void)
+{
+ /* Write the base timer register */
+ outb(wd_margin, SBC8360_BASETIME);
+}
+
+/* Userspace pings kernel driver, or requests clean close */
+static ssize_t sbc8360_write(struct file *file, const char __user * buf,
+ size_t count, loff_t * ppos)
+{
+ if (count) {
+ if (!nowayout) {
+ size_t i;
+
+ /* In case it was set long ago */
+ expect_close = 0;
+
+ for (i = 0; i != count; i++) {
+ char c;
+ if (get_user(c, buf + i))
+ return -EFAULT;
+ if (c == 'V')
+ expect_close = 42;
+ }
+ }
+ sbc8360_ping();
+ }
+ return count;
+}
+
+static int sbc8360_open(struct inode *inode, struct file *file)
+{
+ spin_lock(&sbc8360_lock);
+ if (test_and_set_bit(0, &sbc8360_is_open)) {
+ spin_unlock(&sbc8360_lock);
+ return -EBUSY;
+ }
+ if (nowayout)
+ __module_get(THIS_MODULE);
+
+ /* Activate and ping once to start the countdown */
+ spin_unlock(&sbc8360_lock);
+ sbc8360_activate();
+ sbc8360_ping();
+ return nonseekable_open(inode, file);
+}
+
+static int sbc8360_close(struct inode *inode, struct file *file)
+{
+ spin_lock(&sbc8360_lock);
+ if (expect_close == 42)
+ outb(0, SBC8360_ENABLE);
+ else
+ printk(KERN_CRIT PFX
+ "SBC8360 device closed unexpectedly. SBC8360 will not stop!\n");
+
+ clear_bit(0, &sbc8360_is_open);
+ expect_close = 0;
+ spin_unlock(&sbc8360_lock);
+ return 0;
+}
+
+/*
+ * Notifier for system down
+ */
+
+static int sbc8360_notify_sys(struct notifier_block *this, unsigned long code,
+ void *unused)
+{
+ if (code == SYS_DOWN || code == SYS_HALT) {
+ /* Disable the SBC8360 Watchdog */
+ outb(0, SBC8360_ENABLE);
+ }
+ return NOTIFY_DONE;
+}
+
+/*
+ * Kernel Interfaces
+ */
+
+static struct file_operations sbc8360_fops = {
+ .owner = THIS_MODULE,
+ .llseek = no_llseek,
+ .write = sbc8360_write,
+ .open = sbc8360_open,
+ .release = sbc8360_close,
+};
+
+static struct miscdevice sbc8360_miscdev = {
+ .minor = WATCHDOG_MINOR,
+ .name = "watchdog",
+ .fops = &sbc8360_fops,
+};
+
+/*
+ * The SBC8360 needs to learn about soft shutdowns in order to
+ * turn the timebomb registers off.
+ */
+
+static struct notifier_block sbc8360_notifier = {
+ .notifier_call = sbc8360_notify_sys,
+};
+
+static int __init sbc8360_init(void)
+{
+ int res;
+ unsigned long int mseconds = 60000;
+
+ spin_lock_init(&sbc8360_lock);
+ res = misc_register(&sbc8360_miscdev);
+ if (res) {
+ printk(KERN_ERR PFX "failed to register misc device\n");
+ goto out_nomisc;
+ }
+
+ if (!request_region(SBC8360_ENABLE, 1, "SBC8360")) {
+ printk(KERN_ERR PFX "ENABLE method I/O %X is not available.\n",
+ SBC8360_ENABLE);
+ res = -EIO;
+ goto out_noenablereg;
+ }
+ if (!request_region(SBC8360_BASETIME, 1, "SBC8360")) {
+ printk(KERN_ERR PFX
+ "BASETIME method I/O %X is not available.\n",
+ SBC8360_BASETIME);
+ res = -EIO;
+ goto out_nobasetimereg;
+ }
+
+ res = register_reboot_notifier(&sbc8360_notifier);
+ if (res) {
+ printk(KERN_ERR PFX "Failed to register reboot notifier.\n");
+ goto out_noreboot;
+ }
+
+ if (timeout < 0 || timeout > 63) {
+ printk(KERN_ERR PFX "Invalid timeout index (must be 0-63).\n");
+ res = -EINVAL;
+ goto out_noreboot;
+ }
+
+ wd_margin = wd_times[timeout][0];
+ wd_multiplier = wd_times[timeout][1];
+
+ if (wd_multiplier == 1)
+ mseconds = (wd_margin + 1) * 500;
+ else if (wd_multiplier == 2)
+ mseconds = (wd_margin + 1) * 5000;
+ else if (wd_multiplier == 3)
+ mseconds = (wd_margin + 1) * 50000;
+ else if (wd_multiplier == 4)
+ mseconds = (wd_margin + 1) * 100000;
+
+ /* My kingdom for the ability to print "0.5 seconds" in the kernel! */
+ printk(KERN_INFO PFX "Timeout set at %ld ms.\n", mseconds);
+
+ return 0;
+
+ out_noreboot:
+ release_region(SBC8360_ENABLE, 1);
+ release_region(SBC8360_BASETIME, 1);
+ out_noenablereg:
+ out_nobasetimereg:
+ misc_deregister(&sbc8360_miscdev);
+ out_nomisc:
+ return res;
+}
+
+static void __exit sbc8360_exit(void)
+{
+ misc_deregister(&sbc8360_miscdev);
+ unregister_reboot_notifier(&sbc8360_notifier);
+ release_region(SBC8360_ENABLE, 1);
+ release_region(SBC8360_BASETIME, 1);
+}
+
+module_init(sbc8360_init);
+module_exit(sbc8360_exit);
+
+MODULE_AUTHOR("Ian E. Morgan <imorgan@webcon.ca>");
+MODULE_DESCRIPTION("SBC8360 watchdog driver");
+MODULE_LICENSE("GPL");
+MODULE_VERSION("1.0");
+MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
+
+/* end of sbc8360.c */
diff --git a/drivers/char/watchdog/w83977f_wdt.c b/drivers/char/watchdog/w83977f_wdt.c
new file mode 100644
index 00000000000..a7ff64c8921
--- /dev/null
+++ b/drivers/char/watchdog/w83977f_wdt.c
@@ -0,0 +1,543 @@
+/*
+ * W83977F Watchdog Timer Driver for Winbond W83977F I/O Chip
+ *
+ * (c) Copyright 2005 Jose Goncalves <jose.goncalves@inov.pt>
+ *
+ * Based on w83877f_wdt.c by Scott Jennings,
+ * and wdt977.c by Woody Suwalski
+ *
+ * -----------------------
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/config.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/fs.h>
+#include <linux/miscdevice.h>
+#include <linux/init.h>
+#include <linux/ioport.h>
+#include <linux/watchdog.h>
+#include <linux/notifier.h>
+#include <linux/reboot.h>
+
+#include <asm/io.h>
+#include <asm/system.h>
+#include <asm/uaccess.h>
+
+#define WATCHDOG_VERSION "1.00"
+#define WATCHDOG_NAME "W83977F WDT"
+#define PFX WATCHDOG_NAME ": "
+#define DRIVER_VERSION WATCHDOG_NAME " driver, v" WATCHDOG_VERSION "\n"
+
+#define IO_INDEX_PORT 0x3F0
+#define IO_DATA_PORT (IO_INDEX_PORT+1)
+
+#define UNLOCK_DATA 0x87
+#define LOCK_DATA 0xAA
+#define DEVICE_REGISTER 0x07
+
+#define DEFAULT_TIMEOUT 45 /* default timeout in seconds */
+
+static int timeout = DEFAULT_TIMEOUT;
+static int timeoutW; /* timeout in watchdog counter units */
+static unsigned long timer_alive;
+static int testmode;
+static char expect_close;
+static spinlock_t spinlock;
+
+module_param(timeout, int, 0);
+MODULE_PARM_DESC(timeout,"Watchdog timeout in seconds (15..7635), default=" __MODULE_STRING(DEFAULT_TIMEOUT) ")");
+module_param(testmode, int, 0);
+MODULE_PARM_DESC(testmode,"Watchdog testmode (1 = no reboot), default=0");
+
+static int nowayout = WATCHDOG_NOWAYOUT;
+module_param(nowayout, int, 0);
+MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default=CONFIG_WATCHDOG_NOWAYOUT)");
+
+/*
+ * Start the watchdog
+ */
+
+static int wdt_start(void)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&spinlock, flags);
+
+ /* Unlock the SuperIO chip */
+ outb_p(UNLOCK_DATA,IO_INDEX_PORT);
+ outb_p(UNLOCK_DATA,IO_INDEX_PORT);
+
+ /*
+ * Select device Aux2 (device=8) to set watchdog regs F2, F3 and F4.
+ * F2 has the timeout in watchdog counter units.
+ * F3 is set to enable watchdog LED blink at timeout.
+ * F4 is used to just clear the TIMEOUT'ed state (bit 0).
+ */
+ outb_p(DEVICE_REGISTER,IO_INDEX_PORT);
+ outb_p(0x08,IO_DATA_PORT);
+ outb_p(0xF2,IO_INDEX_PORT);
+ outb_p(timeoutW,IO_DATA_PORT);
+ outb_p(0xF3,IO_INDEX_PORT);
+ outb_p(0x08,IO_DATA_PORT);
+ outb_p(0xF4,IO_INDEX_PORT);
+ outb_p(0x00,IO_DATA_PORT);
+
+ /* Set device Aux2 active */
+ outb_p(0x30,IO_INDEX_PORT);
+ outb_p(0x01,IO_DATA_PORT);
+
+ /*
+ * Select device Aux1 (dev=7) to set GP16 as the watchdog output
+ * (in reg E6) and GP13 as the watchdog LED output (in reg E3).
+ * Map GP16 at pin 119.
+ * In test mode watch the bit 0 on F4 to indicate "triggered" or
+ * check watchdog LED on SBC.
+ */
+ outb_p(DEVICE_REGISTER,IO_INDEX_PORT);
+ outb_p(0x07,IO_DATA_PORT);
+ if (!testmode)
+ {
+ unsigned pin_map;
+
+ outb_p(0xE6,IO_INDEX_PORT);
+ outb_p(0x0A,IO_DATA_PORT);
+ outb_p(0x2C,IO_INDEX_PORT);
+ pin_map = inb_p(IO_DATA_PORT);
+ pin_map |= 0x10;
+ pin_map &= ~(0x20);
+ outb_p(0x2C,IO_INDEX_PORT);
+ outb_p(pin_map,IO_DATA_PORT);
+ }
+ outb_p(0xE3,IO_INDEX_PORT);
+ outb_p(0x08,IO_DATA_PORT);
+
+ /* Set device Aux1 active */
+ outb_p(0x30,IO_INDEX_PORT);
+ outb_p(0x01,IO_DATA_PORT);
+
+ /* Lock the SuperIO chip */
+ outb_p(LOCK_DATA,IO_INDEX_PORT);
+
+ spin_unlock_irqrestore(&spinlock, flags);
+
+ printk(KERN_INFO PFX "activated.\n");
+
+ return 0;
+}
+
+/*
+ * Stop the watchdog
+ */
+
+static int wdt_stop(void)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&spinlock, flags);
+
+ /* Unlock the SuperIO chip */
+ outb_p(UNLOCK_DATA,IO_INDEX_PORT);
+ outb_p(UNLOCK_DATA,IO_INDEX_PORT);
+
+ /*
+ * Select device Aux2 (device=8) to set watchdog regs F2, F3 and F4.
+ * F2 is reset to its default value (watchdog timer disabled).
+ * F3 is reset to its default state.
+ * F4 clears the TIMEOUT'ed state (bit 0) - back to default.
+ */
+ outb_p(DEVICE_REGISTER,IO_INDEX_PORT);
+ outb_p(0x08,IO_DATA_PORT);
+ outb_p(0xF2,IO_INDEX_PORT);
+ outb_p(0xFF,IO_DATA_PORT);
+ outb_p(0xF3,IO_INDEX_PORT);
+ outb_p(0x00,IO_DATA_PORT);
+ outb_p(0xF4,IO_INDEX_PORT);
+ outb_p(0x00,IO_DATA_PORT);
+ outb_p(0xF2,IO_INDEX_PORT);
+ outb_p(0x00,IO_DATA_PORT);
+
+ /*
+ * Select device Aux1 (dev=7) to set GP16 (in reg E6) and
+ * Gp13 (in reg E3) as inputs.
+ */
+ outb_p(DEVICE_REGISTER,IO_INDEX_PORT);
+ outb_p(0x07,IO_DATA_PORT);
+ if (!testmode)
+ {
+ outb_p(0xE6,IO_INDEX_PORT);
+ outb_p(0x01,IO_DATA_PORT);
+ }
+ outb_p(0xE3,IO_INDEX_PORT);
+ outb_p(0x01,IO_DATA_PORT);
+
+ /* Lock the SuperIO chip */
+ outb_p(LOCK_DATA,IO_INDEX_PORT);
+
+ spin_unlock_irqrestore(&spinlock, flags);
+
+ printk(KERN_INFO PFX "shutdown.\n");
+
+ return 0;
+}
+
+/*
+ * Send a keepalive ping to the watchdog
+ * This is done by simply re-writing the timeout to reg. 0xF2
+ */
+
+static int wdt_keepalive(void)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&spinlock, flags);
+
+ /* Unlock the SuperIO chip */
+ outb_p(UNLOCK_DATA,IO_INDEX_PORT);
+ outb_p(UNLOCK_DATA,IO_INDEX_PORT);
+
+ /* Select device Aux2 (device=8) to kick watchdog reg F2 */
+ outb_p(DEVICE_REGISTER,IO_INDEX_PORT);
+ outb_p(0x08,IO_DATA_PORT);
+ outb_p(0xF2,IO_INDEX_PORT);
+ outb_p(timeoutW,IO_DATA_PORT);
+
+ /* Lock the SuperIO chip */
+ outb_p(LOCK_DATA,IO_INDEX_PORT);
+
+ spin_unlock_irqrestore(&spinlock, flags);
+
+ return 0;
+}
+
+/*
+ * Set the watchdog timeout value
+ */
+
+static int wdt_set_timeout(int t)
+{
+ int tmrval;
+
+ /*
+ * Convert seconds to watchdog counter time units, rounding up.
+ * On PCM-5335 watchdog units are 30 seconds/step with 15 sec startup
+ * value. This information is supplied in the PCM-5335 manual and was
+ * checked by me on a real board. This is a bit strange because W83977f
+ * datasheet says counter unit is in minutes!
+ */
+ if (t < 15)
+ return -EINVAL;
+
+ tmrval = ((t + 15) + 29) / 30;
+
+ if (tmrval > 255)
+ return -EINVAL;
+
+ /*
+ * timeout is the timeout in seconds,
+ * timeoutW is the timeout in watchdog counter units.
+ */
+ timeoutW = tmrval;
+ timeout = (timeoutW * 30) - 15;
+ return 0;
+}
+
+/*
+ * Get the watchdog status
+ */
+
+static int wdt_get_status(int *status)
+{
+ int new_status;
+ unsigned long flags;
+
+ spin_lock_irqsave(&spinlock, flags);
+
+ /* Unlock the SuperIO chip */
+ outb_p(UNLOCK_DATA,IO_INDEX_PORT);
+ outb_p(UNLOCK_DATA,IO_INDEX_PORT);
+
+ /* Select device Aux2 (device=8) to read watchdog reg F4 */
+ outb_p(DEVICE_REGISTER,IO_INDEX_PORT);
+ outb_p(0x08,IO_DATA_PORT);
+ outb_p(0xF4,IO_INDEX_PORT);
+ new_status = inb_p(IO_DATA_PORT);
+
+ /* Lock the SuperIO chip */
+ outb_p(LOCK_DATA,IO_INDEX_PORT);
+
+ spin_unlock_irqrestore(&spinlock, flags);
+
+ *status = 0;
+ if (new_status & 1)
+ *status |= WDIOF_CARDRESET;
+
+ return 0;
+}
+
+
+/*
+ * /dev/watchdog handling
+ */
+
+static int wdt_open(struct inode *inode, struct file *file)
+{
+ /* If the watchdog is alive we don't need to start it again */
+ if( test_and_set_bit(0, &timer_alive) )
+ return -EBUSY;
+
+ if (nowayout)
+ __module_get(THIS_MODULE);
+
+ wdt_start();
+ return nonseekable_open(inode, file);
+}
+
+static int wdt_release(struct inode *inode, struct file *file)
+{
+ /*
+ * Shut off the timer.
+ * Lock it in if it's a module and we set nowayout
+ */
+ if (expect_close == 42)
+ {
+ wdt_stop();
+ clear_bit(0, &timer_alive);
+ } else {
+ wdt_keepalive();
+ printk(KERN_CRIT PFX "unexpected close, not stopping watchdog!\n");
+ }
+ expect_close = 0;
+ return 0;
+}
+
+/*
+ * wdt_write:
+ * @file: file handle to the watchdog
+ * @buf: buffer to write (unused as data does not matter here
+ * @count: count of bytes
+ * @ppos: pointer to the position to write. No seeks allowed
+ *
+ * A write to a watchdog device is defined as a keepalive signal. Any
+ * write of data will do, as we we don't define content meaning.
+ */
+
+static ssize_t wdt_write(struct file *file, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ /* See if we got the magic character 'V' and reload the timer */
+ if(count)
+ {
+ if (!nowayout)
+ {
+ size_t ofs;
+
+ /* note: just in case someone wrote the magic character long ago */
+ expect_close = 0;
+
+ /* scan to see whether or not we got the magic character */
+ for(ofs = 0; ofs != count; ofs++)
+ {
+ char c;
+ if (get_user(c, buf + ofs))
+ return -EFAULT;
+ if (c == 'V') {
+ expect_close = 42;
+ }
+ }
+ }
+
+ /* someone wrote to us, we should restart timer */
+ wdt_keepalive();
+ }
+ return count;
+}
+
+/*
+ * wdt_ioctl:
+ * @inode: inode of the device
+ * @file: file handle to the device
+ * @cmd: watchdog command
+ * @arg: argument pointer
+ *
+ * The watchdog API defines a common set of functions for all watchdogs
+ * according to their available features.
+ */
+
+static struct watchdog_info ident = {
+ .options = WDIOF_SETTIMEOUT | WDIOF_MAGICCLOSE | WDIOF_KEEPALIVEPING,
+ .firmware_version = 1,
+ .identity = WATCHDOG_NAME,
+};
+
+static int wdt_ioctl(struct inode *inode, struct file *file,
+ unsigned int cmd, unsigned long arg)
+{
+ int status;
+ int new_options, retval = -EINVAL;
+ int new_timeout;
+ union {
+ struct watchdog_info __user *ident;
+ int __user *i;
+ } uarg;
+
+ uarg.i = (int __user *)arg;
+
+ switch(cmd)
+ {
+ default:
+ return -ENOIOCTLCMD;
+
+ case WDIOC_GETSUPPORT:
+ return copy_to_user(uarg.ident, &ident, sizeof(ident)) ? -EFAULT : 0;
+
+ case WDIOC_GETSTATUS:
+ wdt_get_status(&status);
+ return put_user(status, uarg.i);
+
+ case WDIOC_GETBOOTSTATUS:
+ return put_user(0, uarg.i);
+
+ case WDIOC_KEEPALIVE:
+ wdt_keepalive();
+ return 0;
+
+ case WDIOC_SETOPTIONS:
+ if (get_user (new_options, uarg.i))
+ return -EFAULT;
+
+ if (new_options & WDIOS_DISABLECARD) {
+ wdt_stop();
+ retval = 0;
+ }
+
+ if (new_options & WDIOS_ENABLECARD) {
+ wdt_start();
+ retval = 0;
+ }
+
+ return retval;
+
+ case WDIOC_SETTIMEOUT:
+ if (get_user(new_timeout, uarg.i))
+ return -EFAULT;
+
+ if (wdt_set_timeout(new_timeout))
+ return -EINVAL;
+
+ wdt_keepalive();
+ /* Fall */
+
+ case WDIOC_GETTIMEOUT:
+ return put_user(timeout, uarg.i);
+
+ }
+}
+
+static int wdt_notify_sys(struct notifier_block *this, unsigned long code,
+ void *unused)
+{
+ if (code==SYS_DOWN || code==SYS_HALT)
+ wdt_stop();
+ return NOTIFY_DONE;
+}
+
+static struct file_operations wdt_fops=
+{
+ .owner = THIS_MODULE,
+ .llseek = no_llseek,
+ .write = wdt_write,
+ .ioctl = wdt_ioctl,
+ .open = wdt_open,
+ .release = wdt_release,
+};
+
+static struct miscdevice wdt_miscdev=
+{
+ .minor = WATCHDOG_MINOR,
+ .name = "watchdog",
+ .fops = &wdt_fops,
+};
+
+static struct notifier_block wdt_notifier = {
+ .notifier_call = wdt_notify_sys,
+};
+
+static int __init w83977f_wdt_init(void)
+{
+ int rc;
+
+ printk(KERN_INFO PFX DRIVER_VERSION);
+
+ spin_lock_init(&spinlock);
+
+ /*
+ * Check that the timeout value is within it's range ;
+ * if not reset to the default
+ */
+ if (wdt_set_timeout(timeout)) {
+ wdt_set_timeout(DEFAULT_TIMEOUT);
+ printk(KERN_INFO PFX "timeout value must be 15<=timeout<=7635, using %d\n",
+ DEFAULT_TIMEOUT);
+ }
+
+ if (!request_region(IO_INDEX_PORT, 2, WATCHDOG_NAME))
+ {
+ printk(KERN_ERR PFX "I/O address 0x%04x already in use\n",
+ IO_INDEX_PORT);
+ rc = -EIO;
+ goto err_out;
+ }
+
+ rc = misc_register(&wdt_miscdev);
+ if (rc)
+ {
+ printk(KERN_ERR PFX "cannot register miscdev on minor=%d (err=%d)\n",
+ wdt_miscdev.minor, rc);
+ goto err_out_region;
+ }
+
+ rc = register_reboot_notifier(&wdt_notifier);
+ if (rc)
+ {
+ printk(KERN_ERR PFX "cannot register reboot notifier (err=%d)\n",
+ rc);
+ goto err_out_miscdev;
+ }
+
+ printk(KERN_INFO PFX "initialized. timeout=%d sec (nowayout=%d testmode=%d)\n",
+ timeout, nowayout, testmode);
+
+ return 0;
+
+err_out_miscdev:
+ misc_deregister(&wdt_miscdev);
+err_out_region:
+ release_region(IO_INDEX_PORT,2);
+err_out:
+ return rc;
+}
+
+static void __exit w83977f_wdt_exit(void)
+{
+ wdt_stop();
+ misc_deregister(&wdt_miscdev);
+ unregister_reboot_notifier(&wdt_notifier);
+ release_region(IO_INDEX_PORT,2);
+}
+
+module_init(w83977f_wdt_init);
+module_exit(w83977f_wdt_exit);
+
+MODULE_AUTHOR("Jose Goncalves <jose.goncalves@inov.pt>");
+MODULE_DESCRIPTION("Driver for watchdog timer in W83977F I/O chip");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
diff --git a/drivers/connector/cn_queue.c b/drivers/connector/cn_queue.c
index 966632182e2..9f2f00d8291 100644
--- a/drivers/connector/cn_queue.c
+++ b/drivers/connector/cn_queue.c
@@ -31,16 +31,19 @@
#include <linux/connector.h>
#include <linux/delay.h>
-static void cn_queue_wrapper(void *data)
+void cn_queue_wrapper(void *data)
{
- struct cn_callback_entry *cbq = data;
+ struct cn_callback_data *d = data;
- cbq->cb->callback(cbq->cb->priv);
- cbq->destruct_data(cbq->ddata);
- cbq->ddata = NULL;
+ d->callback(d->callback_priv);
+
+ d->destruct_data(d->ddata);
+ d->ddata = NULL;
+
+ kfree(d->free);
}
-static struct cn_callback_entry *cn_queue_alloc_callback_entry(struct cn_callback *cb)
+static struct cn_callback_entry *cn_queue_alloc_callback_entry(char *name, struct cb_id *id, void (*callback)(void *))
{
struct cn_callback_entry *cbq;
@@ -50,8 +53,11 @@ static struct cn_callback_entry *cn_queue_alloc_callback_entry(struct cn_callbac
return NULL;
}
- cbq->cb = cb;
- INIT_WORK(&cbq->work, &cn_queue_wrapper, cbq);
+ snprintf(cbq->id.name, sizeof(cbq->id.name), "%s", name);
+ memcpy(&cbq->id.id, id, sizeof(struct cb_id));
+ cbq->data.callback = callback;
+
+ INIT_WORK(&cbq->work, &cn_queue_wrapper, &cbq->data);
return cbq;
}
@@ -68,12 +74,12 @@ int cn_cb_equal(struct cb_id *i1, struct cb_id *i2)
return ((i1->idx == i2->idx) && (i1->val == i2->val));
}
-int cn_queue_add_callback(struct cn_queue_dev *dev, struct cn_callback *cb)
+int cn_queue_add_callback(struct cn_queue_dev *dev, char *name, struct cb_id *id, void (*callback)(void *))
{
struct cn_callback_entry *cbq, *__cbq;
int found = 0;
- cbq = cn_queue_alloc_callback_entry(cb);
+ cbq = cn_queue_alloc_callback_entry(name, id, callback);
if (!cbq)
return -ENOMEM;
@@ -82,7 +88,7 @@ int cn_queue_add_callback(struct cn_queue_dev *dev, struct cn_callback *cb)
spin_lock_bh(&dev->queue_lock);
list_for_each_entry(__cbq, &dev->queue_list, callback_entry) {
- if (cn_cb_equal(&__cbq->cb->id, &cb->id)) {
+ if (cn_cb_equal(&__cbq->id.id, id)) {
found = 1;
break;
}
@@ -99,7 +105,7 @@ int cn_queue_add_callback(struct cn_queue_dev *dev, struct cn_callback *cb)
cbq->nls = dev->nls;
cbq->seq = 0;
- cbq->group = cbq->cb->id.idx;
+ cbq->group = cbq->id.id.idx;
return 0;
}
@@ -111,7 +117,7 @@ void cn_queue_del_callback(struct cn_queue_dev *dev, struct cb_id *id)
spin_lock_bh(&dev->queue_lock);
list_for_each_entry_safe(cbq, n, &dev->queue_list, callback_entry) {
- if (cn_cb_equal(&cbq->cb->id, id)) {
+ if (cn_cb_equal(&cbq->id.id, id)) {
list_del(&cbq->callback_entry);
found = 1;
break;
diff --git a/drivers/connector/connector.c b/drivers/connector/connector.c
index aaf6d468a8b..bb0b3a8de14 100644
--- a/drivers/connector/connector.c
+++ b/drivers/connector/connector.c
@@ -84,7 +84,7 @@ int cn_netlink_send(struct cn_msg *msg, u32 __group, int gfp_mask)
spin_lock_bh(&dev->cbdev->queue_lock);
list_for_each_entry(__cbq, &dev->cbdev->queue_list,
callback_entry) {
- if (cn_cb_equal(&__cbq->cb->id, &msg->id)) {
+ if (cn_cb_equal(&__cbq->id.id, &msg->id)) {
found = 1;
group = __cbq->group;
}
@@ -127,42 +127,56 @@ static int cn_call_callback(struct cn_msg *msg, void (*destruct_data)(void *), v
{
struct cn_callback_entry *__cbq;
struct cn_dev *dev = &cdev;
- int found = 0;
+ int err = -ENODEV;
spin_lock_bh(&dev->cbdev->queue_lock);
list_for_each_entry(__cbq, &dev->cbdev->queue_list, callback_entry) {
- if (cn_cb_equal(&__cbq->cb->id, &msg->id)) {
- /*
- * Let's scream if there is some magic and the
- * data will arrive asynchronously here.
- * [i.e. netlink messages will be queued].
- * After the first warning I will fix it
- * quickly, but now I think it is
- * impossible. --zbr (2004_04_27).
- */
+ if (cn_cb_equal(&__cbq->id.id, &msg->id)) {
if (likely(!test_bit(0, &__cbq->work.pending) &&
- __cbq->ddata == NULL)) {
- __cbq->cb->priv = msg;
+ __cbq->data.ddata == NULL)) {
+ __cbq->data.callback_priv = msg;
- __cbq->ddata = data;
- __cbq->destruct_data = destruct_data;
+ __cbq->data.ddata = data;
+ __cbq->data.destruct_data = destruct_data;
if (queue_work(dev->cbdev->cn_queue,
&__cbq->work))
- found = 1;
+ err = 0;
} else {
- printk("%s: cbq->data=%p, "
- "work->pending=%08lx.\n",
- __func__, __cbq->ddata,
- __cbq->work.pending);
- WARN_ON(1);
+ struct work_struct *w;
+ struct cn_callback_data *d;
+
+ w = kzalloc(sizeof(*w) + sizeof(*d), GFP_ATOMIC);
+ if (w) {
+ d = (struct cn_callback_data *)(w+1);
+
+ d->callback_priv = msg;
+ d->callback = __cbq->data.callback;
+ d->ddata = data;
+ d->destruct_data = destruct_data;
+ d->free = w;
+
+ INIT_LIST_HEAD(&w->entry);
+ w->pending = 0;
+ w->func = &cn_queue_wrapper;
+ w->data = d;
+ init_timer(&w->timer);
+
+ if (queue_work(dev->cbdev->cn_queue, w))
+ err = 0;
+ else {
+ kfree(w);
+ err = -EINVAL;
+ }
+ } else
+ err = -ENOMEM;
}
break;
}
}
spin_unlock_bh(&dev->cbdev->queue_lock);
- return found ? 0 : -ENODEV;
+ return err;
}
/*
@@ -291,22 +305,10 @@ int cn_add_callback(struct cb_id *id, char *name, void (*callback)(void *))
{
int err;
struct cn_dev *dev = &cdev;
- struct cn_callback *cb;
-
- cb = kzalloc(sizeof(*cb), GFP_KERNEL);
- if (!cb)
- return -ENOMEM;
-
- scnprintf(cb->name, sizeof(cb->name), "%s", name);
- memcpy(&cb->id, id, sizeof(cb->id));
- cb->callback = callback;
-
- err = cn_queue_add_callback(dev->cbdev, cb);
- if (err) {
- kfree(cb);
+ err = cn_queue_add_callback(dev->cbdev, name, id, callback);
+ if (err)
return err;
- }
cn_notify(id, 0);
diff --git a/drivers/firmware/dell_rbu.c b/drivers/firmware/dell_rbu.c
index 3b865f34a09..b6678239825 100644
--- a/drivers/firmware/dell_rbu.c
+++ b/drivers/firmware/dell_rbu.c
@@ -50,7 +50,7 @@
MODULE_AUTHOR("Abhay Salunke <abhay_salunke@dell.com>");
MODULE_DESCRIPTION("Driver for updating BIOS image on DELL systems");
MODULE_LICENSE("GPL");
-MODULE_VERSION("1.0");
+MODULE_VERSION("2.0");
#define BIOS_SCAN_LIMIT 0xffffffff
#define MAX_IMAGE_LENGTH 16
@@ -65,10 +65,11 @@ static struct _rbu_data {
unsigned long packet_write_count;
unsigned long num_packets;
unsigned long packetsize;
+ int entry_created;
} rbu_data;
-static char image_type[MAX_IMAGE_LENGTH] = "mono";
-module_param_string(image_type, image_type, sizeof(image_type), 0);
+static char image_type[MAX_IMAGE_LENGTH + 1] = "mono";
+module_param_string(image_type, image_type, sizeof (image_type), 0);
MODULE_PARM_DESC(image_type, "BIOS image type. choose- mono or packet");
struct packet_data {
@@ -114,7 +115,7 @@ static int fill_last_packet(void *data, size_t length)
if ((rbu_data.packet_write_count + length) > rbu_data.packetsize) {
pr_debug("dell_rbu:%s: packet size data "
- "overrun\n", __FUNCTION__);
+ "overrun\n", __FUNCTION__);
return -EINVAL;
}
@@ -146,12 +147,14 @@ static int create_packet(size_t length)
pr_debug("create_packet: packetsize not specified\n");
return -EINVAL;
}
+ spin_unlock(&rbu_data.lock);
+ newpacket = kmalloc(sizeof (struct packet_data), GFP_KERNEL);
+ spin_lock(&rbu_data.lock);
- newpacket = kmalloc(sizeof(struct packet_data), GFP_KERNEL);
if (!newpacket) {
printk(KERN_WARNING
- "dell_rbu:%s: failed to allocate new "
- "packet\n", __FUNCTION__);
+ "dell_rbu:%s: failed to allocate new "
+ "packet\n", __FUNCTION__);
return -ENOMEM;
}
@@ -160,15 +163,17 @@ static int create_packet(size_t length)
* there is no upper limit on memory
* address for packetized mechanism
*/
- newpacket->data = (unsigned char *)__get_free_pages(GFP_KERNEL,
- ordernum);
+ spin_unlock(&rbu_data.lock);
+ newpacket->data = (unsigned char *) __get_free_pages(GFP_KERNEL,
+ ordernum);
+ spin_lock(&rbu_data.lock);
pr_debug("create_packet: newpacket %p\n", newpacket->data);
if (!newpacket->data) {
printk(KERN_WARNING
- "dell_rbu:%s: failed to allocate new "
- "packet\n", __FUNCTION__);
+ "dell_rbu:%s: failed to allocate new "
+ "packet\n", __FUNCTION__);
kfree(newpacket);
return -ENOMEM;
}
@@ -204,9 +209,8 @@ static int packetize_data(void *data, size_t length)
return rc;
}
-static int
-do_packet_read(char *data, struct list_head *ptemp_list,
- int length, int bytes_read, int *list_read_count)
+static int do_packet_read(char *data, struct list_head *ptemp_list,
+ int length, int bytes_read, int *list_read_count)
{
void *ptemp_buf;
struct packet_data *newpacket = NULL;
@@ -239,7 +243,7 @@ do_packet_read(char *data, struct list_head *ptemp_list,
return bytes_copied;
}
-static int packet_read_list(char *data, size_t * pread_length)
+static int packet_read_list(char *data, size_t *pread_length)
{
struct list_head *ptemp_list;
int temp_count = 0;
@@ -258,8 +262,7 @@ static int packet_read_list(char *data, size_t * pread_length)
ptemp_list = (&packet_data_head.list)->next;
while (!list_empty(ptemp_list)) {
bytes_copied = do_packet_read(pdest, ptemp_list,
- remaining_bytes, bytes_read,
- &temp_count);
+ remaining_bytes, bytes_read, &temp_count);
remaining_bytes -= bytes_copied;
bytes_read += bytes_copied;
pdest += bytes_copied;
@@ -287,7 +290,7 @@ static void packet_empty_list(void)
ptemp_list = (&packet_data_head.list)->next;
while (!list_empty(ptemp_list)) {
newpacket =
- list_entry(ptemp_list, struct packet_data, list);
+ list_entry(ptemp_list, struct packet_data, list);
pnext_list = ptemp_list->next;
list_del(ptemp_list);
ptemp_list = pnext_list;
@@ -296,8 +299,8 @@ static void packet_empty_list(void)
* to make sure there are no stale RBU packets left in memory
*/
memset(newpacket->data, 0, rbu_data.packetsize);
- free_pages((unsigned long)newpacket->data,
- newpacket->ordernum);
+ free_pages((unsigned long) newpacket->data,
+ newpacket->ordernum);
kfree(newpacket);
}
rbu_data.packet_write_count = 0;
@@ -319,14 +322,13 @@ static void img_update_free(void)
* BIOS image copied in memory.
*/
memset(rbu_data.image_update_buffer, 0,
- rbu_data.image_update_buffer_size);
+ rbu_data.image_update_buffer_size);
if (rbu_data.dma_alloc == 1)
dma_free_coherent(NULL, rbu_data.bios_image_size,
- rbu_data.image_update_buffer,
- dell_rbu_dmaaddr);
+ rbu_data.image_update_buffer, dell_rbu_dmaaddr);
else
- free_pages((unsigned long)rbu_data.image_update_buffer,
- rbu_data.image_update_ordernum);
+ free_pages((unsigned long) rbu_data.image_update_buffer,
+ rbu_data.image_update_ordernum);
/*
* Re-initialize the rbu_data variables after a free
@@ -366,7 +368,7 @@ static int img_update_realloc(unsigned long size)
*/
if ((size != 0) && (rbu_data.image_update_buffer == NULL)) {
printk(KERN_ERR "dell_rbu:%s: corruption "
- "check failed\n", __FUNCTION__);
+ "check failed\n", __FUNCTION__);
return -EINVAL;
}
/*
@@ -385,17 +387,16 @@ static int img_update_realloc(unsigned long size)
ordernum = get_order(size);
image_update_buffer =
- (unsigned char *)__get_free_pages(GFP_KERNEL, ordernum);
+ (unsigned char *) __get_free_pages(GFP_KERNEL, ordernum);
img_buf_phys_addr =
- (unsigned long)virt_to_phys(image_update_buffer);
+ (unsigned long) virt_to_phys(image_update_buffer);
if (img_buf_phys_addr > BIOS_SCAN_LIMIT) {
- free_pages((unsigned long)image_update_buffer, ordernum);
+ free_pages((unsigned long) image_update_buffer, ordernum);
ordernum = -1;
image_update_buffer = dma_alloc_coherent(NULL, size,
- &dell_rbu_dmaaddr,
- GFP_KERNEL);
+ &dell_rbu_dmaaddr, GFP_KERNEL);
dma_alloc = 1;
}
@@ -405,13 +406,13 @@ static int img_update_realloc(unsigned long size)
rbu_data.image_update_buffer = image_update_buffer;
rbu_data.image_update_buffer_size = size;
rbu_data.bios_image_size =
- rbu_data.image_update_buffer_size;
+ rbu_data.image_update_buffer_size;
rbu_data.image_update_ordernum = ordernum;
rbu_data.dma_alloc = dma_alloc;
rc = 0;
} else {
pr_debug("Not enough memory for image update:"
- "size = %ld\n", size);
+ "size = %ld\n", size);
rc = -ENOMEM;
}
@@ -438,7 +439,7 @@ static ssize_t read_packet_data(char *buffer, loff_t pos, size_t count)
if (pos > imagesize) {
retval = 0;
printk(KERN_WARNING "dell_rbu:read_packet_data: "
- "data underrun\n");
+ "data underrun\n");
goto read_rbu_data_exit;
}
@@ -468,11 +469,11 @@ static ssize_t read_rbu_mono_data(char *buffer, loff_t pos, size_t count)
/* check to see if we have something to return */
if ((rbu_data.image_update_buffer == NULL) ||
- (rbu_data.bios_image_size == 0)) {
+ (rbu_data.bios_image_size == 0)) {
pr_debug("read_rbu_data_mono: image_update_buffer %p ,"
- "bios_image_size %lu\n",
- rbu_data.image_update_buffer,
- rbu_data.bios_image_size);
+ "bios_image_size %lu\n",
+ rbu_data.image_update_buffer,
+ rbu_data.bios_image_size);
ret_count = -ENOMEM;
goto read_rbu_data_exit;
}
@@ -497,8 +498,8 @@ static ssize_t read_rbu_mono_data(char *buffer, loff_t pos, size_t count)
return ret_count;
}
-static ssize_t
-read_rbu_data(struct kobject *kobj, char *buffer, loff_t pos, size_t count)
+static ssize_t read_rbu_data(struct kobject *kobj, char *buffer,
+ loff_t pos, size_t count)
{
ssize_t ret_count = 0;
@@ -515,62 +516,20 @@ read_rbu_data(struct kobject *kobj, char *buffer, loff_t pos, size_t count)
return ret_count;
}
-static ssize_t
-read_rbu_image_type(struct kobject *kobj, char *buffer, loff_t pos,
- size_t count)
-{
- int size = 0;
- if (!pos)
- size = sprintf(buffer, "%s\n", image_type);
- return size;
-}
-
-static ssize_t
-write_rbu_image_type(struct kobject *kobj, char *buffer, loff_t pos,
- size_t count)
-{
- int rc = count;
- spin_lock(&rbu_data.lock);
-
- if (strlen(buffer) < MAX_IMAGE_LENGTH)
- sscanf(buffer, "%s", image_type);
- else
- printk(KERN_WARNING "dell_rbu: image_type is invalid"
- "max chars = %d, \n incoming str--%s-- \n",
- MAX_IMAGE_LENGTH, buffer);
-
- /* we must free all previous allocations */
- packet_empty_list();
- img_update_free();
-
- spin_unlock(&rbu_data.lock);
- return rc;
-
-}
-
-static struct bin_attribute rbu_data_attr = {
- .attr = {.name = "data",.owner = THIS_MODULE,.mode = 0444},
- .read = read_rbu_data,
-};
-
-static struct bin_attribute rbu_image_type_attr = {
- .attr = {.name = "image_type",.owner = THIS_MODULE,.mode = 0644},
- .read = read_rbu_image_type,
- .write = write_rbu_image_type,
-};
-
static void callbackfn_rbu(const struct firmware *fw, void *context)
{
int rc = 0;
- if (!fw || !fw->size)
+ if (!fw || !fw->size) {
+ rbu_data.entry_created = 0;
return;
+ }
spin_lock(&rbu_data.lock);
if (!strcmp(image_type, "mono")) {
if (!img_update_realloc(fw->size))
memcpy(rbu_data.image_update_buffer,
- fw->data, fw->size);
+ fw->data, fw->size);
} else if (!strcmp(image_type, "packet")) {
if (!rbu_data.packetsize)
rbu_data.packetsize = fw->size;
@@ -584,14 +543,103 @@ static void callbackfn_rbu(const struct firmware *fw, void *context)
spin_unlock(&rbu_data.lock);
rc = request_firmware_nowait(THIS_MODULE, FW_ACTION_NOHOTPLUG,
- "dell_rbu", &rbu_device->dev,
- &context, callbackfn_rbu);
+ "dell_rbu", &rbu_device->dev, &context, callbackfn_rbu);
if (rc)
printk(KERN_ERR
- "dell_rbu:%s request_firmware_nowait failed"
- " %d\n", __FUNCTION__, rc);
+ "dell_rbu:%s request_firmware_nowait failed"
+ " %d\n", __FUNCTION__, rc);
+ else
+ rbu_data.entry_created = 1;
+}
+
+static ssize_t read_rbu_image_type(struct kobject *kobj, char *buffer,
+ loff_t pos, size_t count)
+{
+ int size = 0;
+ if (!pos)
+ size = sprintf(buffer, "%s\n", image_type);
+ return size;
+}
+
+static ssize_t write_rbu_image_type(struct kobject *kobj, char *buffer,
+ loff_t pos, size_t count)
+{
+ int rc = count;
+ int req_firm_rc = 0;
+ int i;
+ spin_lock(&rbu_data.lock);
+ /*
+ * Find the first newline or space
+ */
+ for (i = 0; i < count; ++i)
+ if (buffer[i] == '\n' || buffer[i] == ' ') {
+ buffer[i] = '\0';
+ break;
+ }
+ if (i == count)
+ buffer[count] = '\0';
+
+ if (strstr(buffer, "mono"))
+ strcpy(image_type, "mono");
+ else if (strstr(buffer, "packet"))
+ strcpy(image_type, "packet");
+ else if (strstr(buffer, "init")) {
+ /*
+ * If due to the user error the driver gets in a bad
+ * state where even though it is loaded , the
+ * /sys/class/firmware/dell_rbu entries are missing.
+ * to cover this situation the user can recreate entries
+ * by writing init to image_type.
+ */
+ if (!rbu_data.entry_created) {
+ spin_unlock(&rbu_data.lock);
+ req_firm_rc = request_firmware_nowait(THIS_MODULE,
+ FW_ACTION_NOHOTPLUG, "dell_rbu",
+ &rbu_device->dev, &context,
+ callbackfn_rbu);
+ if (req_firm_rc) {
+ printk(KERN_ERR
+ "dell_rbu:%s request_firmware_nowait"
+ " failed %d\n", __FUNCTION__, rc);
+ rc = -EIO;
+ } else
+ rbu_data.entry_created = 1;
+
+ spin_lock(&rbu_data.lock);
+ }
+ } else {
+ printk(KERN_WARNING "dell_rbu: image_type is invalid\n");
+ spin_unlock(&rbu_data.lock);
+ return -EINVAL;
+ }
+
+ /* we must free all previous allocations */
+ packet_empty_list();
+ img_update_free();
+ spin_unlock(&rbu_data.lock);
+
+ return rc;
}
+static struct bin_attribute rbu_data_attr = {
+ .attr = {
+ .name = "data",
+ .owner = THIS_MODULE,
+ .mode = 0444,
+ },
+ .read = read_rbu_data,
+};
+
+static struct bin_attribute rbu_image_type_attr = {
+ .attr = {
+ .name = "image_type",
+ .owner = THIS_MODULE,
+ .mode = 0644,
+ },
+ .read = read_rbu_image_type,
+ .write = write_rbu_image_type,
+};
+
static int __init dcdrbu_init(void)
{
int rc = 0;
@@ -599,11 +647,11 @@ static int __init dcdrbu_init(void)
init_packet_head();
rbu_device =
- platform_device_register_simple("dell_rbu", -1, NULL, 0);
+ platform_device_register_simple("dell_rbu", -1, NULL, 0);
if (!rbu_device) {
printk(KERN_ERR
- "dell_rbu:%s:platform_device_register_simple "
- "failed\n", __FUNCTION__);
+ "dell_rbu:%s:platform_device_register_simple "
+ "failed\n", __FUNCTION__);
return -EIO;
}
@@ -611,11 +659,12 @@ static int __init dcdrbu_init(void)
sysfs_create_bin_file(&rbu_device->dev.kobj, &rbu_image_type_attr);
rc = request_firmware_nowait(THIS_MODULE, FW_ACTION_NOHOTPLUG,
- "dell_rbu", &rbu_device->dev,
- &context, callbackfn_rbu);
+ "dell_rbu", &rbu_device->dev, &context, callbackfn_rbu);
if (rc)
printk(KERN_ERR "dell_rbu:%s:request_firmware_nowait"
- " failed %d\n", __FUNCTION__, rc);
+ " failed %d\n", __FUNCTION__, rc);
+ else
+ rbu_data.entry_created = 1;
return rc;
diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig
index 7e72e922b41..db358cfa7cb 100644
--- a/drivers/hwmon/Kconfig
+++ b/drivers/hwmon/Kconfig
@@ -418,12 +418,11 @@ config SENSORS_HDAPS
help
This driver provides support for the IBM Hard Drive Active Protection
System (hdaps), which provides an accelerometer and other misc. data.
- Supported laptops include the IBM ThinkPad T41, T42, T43, and R51.
- The accelerometer data is readable via sysfs.
+ ThinkPads starting with the R50, T41, and X40 are supported. The
+ accelerometer data is readable via sysfs.
- This driver also provides an input class device, allowing the
- laptop to act as a pinball machine-esque mouse. This is off by
- default but enabled via sysfs or the module parameter "mousedev".
+ This driver also provides an absolute input class device, allowing
+ the laptop to act as a pinball machine-esque joystick.
Say Y here if you have an applicable laptop and want to experience
the awesome power of hdaps.
diff --git a/drivers/hwmon/hdaps.c b/drivers/hwmon/hdaps.c
index eaebfc14c93..7f010761382 100644
--- a/drivers/hwmon/hdaps.c
+++ b/drivers/hwmon/hdaps.c
@@ -4,11 +4,11 @@
* Copyright (C) 2005 Robert Love <rml@novell.com>
* Copyright (C) 2005 Jesper Juhl <jesper.juhl@gmail.com>
*
- * The HardDisk Active Protection System (hdaps) is present in the IBM ThinkPad
- * T41, T42, T43, R51, and X40, at least. It provides a basic two-axis
+ * The HardDisk Active Protection System (hdaps) is present in IBM ThinkPads
+ * starting with the R40, T41, and X40. It provides a basic two-axis
* accelerometer and other data, such as the device's temperature.
*
- * Based on the document by Mark A. Smith available at
+ * This driver is based on the document by Mark A. Smith available at
* http://www.almaden.ibm.com/cs/people/marksmith/tpaps.html and a lot of trial
* and error.
*
@@ -36,12 +36,7 @@
#include <asm/io.h>
#define HDAPS_LOW_PORT 0x1600 /* first port used by hdaps */
-#define HDAPS_NR_PORTS 0x30 /* 0x1600 - 0x162f */
-
-#define STATE_FRESH 0x50 /* accelerometer data is fresh */
-
-#define REFRESH_ASYNC 0x00 /* do asynchronous refresh */
-#define REFRESH_SYNC 0x01 /* do synchronous refresh */
+#define HDAPS_NR_PORTS 0x30 /* number of ports: 0x1600 - 0x162f */
#define HDAPS_PORT_STATE 0x1611 /* device state */
#define HDAPS_PORT_YPOS 0x1612 /* y-axis position */
@@ -53,7 +48,7 @@
#define HDAPS_PORT_UNKNOWN 0x161c /* what is this? */
#define HDAPS_PORT_KMACT 0x161d /* keyboard or mouse activity */
-#define HDAPS_READ_MASK 0xff /* some reads have the low 8 bits set */
+#define STATE_FRESH 0x50 /* accelerometer data is fresh */
#define KEYBD_MASK 0x20 /* set if keyboard activity */
#define MOUSE_MASK 0x40 /* set if mouse activity */
@@ -63,12 +58,11 @@
#define INIT_TIMEOUT_MSECS 4000 /* wait up to 4s for device init ... */
#define INIT_WAIT_MSECS 200 /* ... in 200ms increments */
-static struct platform_device *pdev;
-static struct input_dev hdaps_idev;
+#define HDAPS_POLL_PERIOD (HZ/20) /* poll for input every 1/20s */
+#define HDAPS_INPUT_FUZZ 4 /* input event threshold */
+
static struct timer_list hdaps_timer;
-static unsigned int hdaps_mousedev_threshold = 4;
-static unsigned long hdaps_poll_ms = 50;
-static unsigned int hdaps_mousedev;
+static struct platform_device *pdev;
static unsigned int hdaps_invert;
static u8 km_activity;
static int rest_x;
@@ -81,14 +75,14 @@ static DECLARE_MUTEX(hdaps_sem);
*/
static inline u8 __get_latch(u16 port)
{
- return inb(port) & HDAPS_READ_MASK;
+ return inb(port) & 0xff;
}
/*
- * __check_latch - Check a port latch for a given value. Callers must hold
- * hdaps_sem. Returns zero if the port contains the given value.
+ * __check_latch - Check a port latch for a given value. Returns zero if the
+ * port contains the given value. Callers must hold hdaps_sem.
*/
-static inline unsigned int __check_latch(u16 port, u8 val)
+static inline int __check_latch(u16 port, u8 val)
{
if (__get_latch(port) == val)
return 0;
@@ -99,7 +93,7 @@ static inline unsigned int __check_latch(u16 port, u8 val)
* __wait_latch - Wait up to 100us for a port latch to get a certain value,
* returning zero if the value is obtained. Callers must hold hdaps_sem.
*/
-static unsigned int __wait_latch(u16 port, u8 val)
+static int __wait_latch(u16 port, u8 val)
{
unsigned int i;
@@ -109,59 +103,42 @@ static unsigned int __wait_latch(u16 port, u8 val)
udelay(5);
}
- return -EINVAL;
+ return -EIO;
}
/*
- * __device_refresh - Request a refresh from the accelerometer.
- *
- * If sync is REFRESH_SYNC, we perform a synchronous refresh and will wait.
- * Returns zero if successful and nonzero on error.
- *
- * If sync is REFRESH_ASYNC, we merely kick off a new refresh if the device is
- * not up-to-date. Always returns zero.
- *
- * Callers must hold hdaps_sem.
+ * __device_refresh - request a refresh from the accelerometer. Does not wait
+ * for refresh to complete. Callers must hold hdaps_sem.
*/
-static int __device_refresh(unsigned int sync)
+static void __device_refresh(void)
{
- u8 state;
-
- udelay(100);
-
- state = inb(0x1604);
- if (state == STATE_FRESH)
- return 0;
-
- outb(0x11, 0x1610);
- outb(0x01, 0x161f);
- if (sync == REFRESH_ASYNC)
- return 0;
+ udelay(200);
+ if (inb(0x1604) != STATE_FRESH) {
+ outb(0x11, 0x1610);
+ outb(0x01, 0x161f);
+ }
+}
+/*
+ * __device_refresh_sync - request a synchronous refresh from the
+ * accelerometer. We wait for the refresh to complete. Returns zero if
+ * successful and nonzero on error. Callers must hold hdaps_sem.
+ */
+static int __device_refresh_sync(void)
+{
+ __device_refresh();
return __wait_latch(0x1604, STATE_FRESH);
}
/*
- * __device_complete - Indicate to the accelerometer that we are done reading
+ * __device_complete - indicate to the accelerometer that we are done reading
* data, and then initiate an async refresh. Callers must hold hdaps_sem.
*/
static inline void __device_complete(void)
{
inb(0x161f);
inb(0x1604);
- __device_refresh(REFRESH_ASYNC);
-}
-
-static int __hdaps_readb_one(unsigned int port, u8 *val)
-{
- /* do a sync refresh -- we need to be sure that we read fresh data */
- if (__device_refresh(REFRESH_SYNC))
- return -EIO;
-
- *val = inb(port);
- __device_complete();
-
- return 0;
+ __device_refresh();
}
/*
@@ -174,17 +151,26 @@ static int hdaps_readb_one(unsigned int port, u8 *val)
int ret;
down(&hdaps_sem);
- ret = __hdaps_readb_one(port, val);
- up(&hdaps_sem);
+ /* do a sync refresh -- we need to be sure that we read fresh data */
+ ret = __device_refresh_sync();
+ if (ret)
+ goto out;
+
+ *val = inb(port);
+ __device_complete();
+
+out:
+ up(&hdaps_sem);
return ret;
}
+/* __hdaps_read_pair - internal lockless helper for hdaps_read_pair(). */
static int __hdaps_read_pair(unsigned int port1, unsigned int port2,
int *x, int *y)
{
/* do a sync refresh -- we need to be sure that we read fresh data */
- if (__device_refresh(REFRESH_SYNC))
+ if (__device_refresh_sync())
return -EIO;
*y = inw(port2);
@@ -217,11 +203,13 @@ static int hdaps_read_pair(unsigned int port1, unsigned int port2,
return ret;
}
-/* initialize the accelerometer */
+/*
+ * hdaps_device_init - initialize the accelerometer. Returns zero on success
+ * and negative error code on failure. Can sleep.
+ */
static int hdaps_device_init(void)
{
- unsigned int total_msecs = INIT_TIMEOUT_MSECS;
- int ret = -ENXIO;
+ int total, ret = -ENXIO;
down(&hdaps_sem);
@@ -231,8 +219,10 @@ static int hdaps_device_init(void)
goto out;
/*
- * The 0x03 value appears to only work on some thinkpads, such as the
- * T42p. Others return 0x01.
+ * Most ThinkPads return 0x01.
+ *
+ * Others--namely the R50p, T41p, and T42p--return 0x03. These laptops
+ * have "inverted" axises.
*
* The 0x02 value occurs when the chip has been previously initialized.
*/
@@ -267,24 +257,23 @@ static int hdaps_device_init(void)
outb(0x01, 0x161f);
if (__wait_latch(0x161f, 0x00))
goto out;
- if (__device_refresh(REFRESH_SYNC))
+ if (__device_refresh_sync())
goto out;
if (__wait_latch(0x1611, 0x00))
goto out;
/* we have done our dance, now let's wait for the applause */
- while (total_msecs > 0) {
- u8 ignored;
+ for (total = INIT_TIMEOUT_MSECS; total > 0; total -= INIT_WAIT_MSECS) {
+ int x, y;
/* a read of the device helps push it into action */
- __hdaps_readb_one(HDAPS_PORT_UNKNOWN, &ignored);
+ __hdaps_read_pair(HDAPS_PORT_XPOS, HDAPS_PORT_YPOS, &x, &y);
if (!__wait_latch(0x1611, 0x02)) {
ret = 0;
break;
}
msleep(INIT_WAIT_MSECS);
- total_msecs -= INIT_WAIT_MSECS;
}
out:
@@ -293,96 +282,6 @@ out:
}
-/* Input class stuff */
-
-/*
- * hdaps_calibrate - Zero out our "resting" values. Callers must hold hdaps_sem.
- */
-static void hdaps_calibrate(void)
-{
- int x, y;
-
- if (__hdaps_read_pair(HDAPS_PORT_XPOS, HDAPS_PORT_YPOS, &x, &y))
- return;
-
- rest_x = x;
- rest_y = y;
-}
-
-static void hdaps_mousedev_poll(unsigned long unused)
-{
- int x, y;
-
- /* Cannot sleep. Try nonblockingly. If we fail, try again later. */
- if (down_trylock(&hdaps_sem)) {
- mod_timer(&hdaps_timer,jiffies+msecs_to_jiffies(hdaps_poll_ms));
- return;
- }
-
- if (__hdaps_read_pair(HDAPS_PORT_XPOS, HDAPS_PORT_YPOS, &x, &y))
- goto out;
-
- x -= rest_x;
- y -= rest_y;
- if (abs(x) > hdaps_mousedev_threshold)
- input_report_rel(&hdaps_idev, REL_X, x);
- if (abs(y) > hdaps_mousedev_threshold)
- input_report_rel(&hdaps_idev, REL_Y, y);
- input_sync(&hdaps_idev);
-
- mod_timer(&hdaps_timer, jiffies + msecs_to_jiffies(hdaps_poll_ms));
-
-out:
- up(&hdaps_sem);
-}
-
-/*
- * hdaps_mousedev_enable - enable the input class device. Can sleep.
- */
-static void hdaps_mousedev_enable(void)
-{
- down(&hdaps_sem);
-
- /* calibrate the device before enabling */
- hdaps_calibrate();
-
- /* initialize the input class */
- init_input_dev(&hdaps_idev);
- hdaps_idev.dev = &pdev->dev;
- hdaps_idev.evbit[0] = BIT(EV_KEY) | BIT(EV_REL);
- hdaps_idev.relbit[0] = BIT(REL_X) | BIT(REL_Y);
- hdaps_idev.keybit[LONG(BTN_LEFT)] = BIT(BTN_LEFT);
- input_register_device(&hdaps_idev);
-
- /* start up our timer */
- init_timer(&hdaps_timer);
- hdaps_timer.function = hdaps_mousedev_poll;
- hdaps_timer.expires = jiffies + msecs_to_jiffies(hdaps_poll_ms);
- add_timer(&hdaps_timer);
-
- hdaps_mousedev = 1;
-
- up(&hdaps_sem);
-
- printk(KERN_INFO "hdaps: input device enabled.\n");
-}
-
-/*
- * hdaps_mousedev_disable - disable the input class device. Caller must hold
- * hdaps_sem.
- */
-static void hdaps_mousedev_disable(void)
-{
- down(&hdaps_sem);
- if (hdaps_mousedev) {
- hdaps_mousedev = 0;
- del_timer_sync(&hdaps_timer);
- input_unregister_device(&hdaps_idev);
- }
- up(&hdaps_sem);
-}
-
-
/* Device model stuff */
static int hdaps_probe(struct device *dev)
@@ -412,6 +311,49 @@ static struct device_driver hdaps_driver = {
.resume = hdaps_resume
};
+/* Input class stuff */
+
+static struct input_dev hdaps_idev = {
+ .name = "hdaps",
+ .evbit = { BIT(EV_ABS) },
+ .absbit = { BIT(ABS_X) | BIT(ABS_Y) },
+ .absmin = { [ABS_X] = -256, [ABS_Y] = -256 },
+ .absmax = { [ABS_X] = 256, [ABS_Y] = 256 },
+ .absfuzz = { [ABS_X] = HDAPS_INPUT_FUZZ, [ABS_Y] = HDAPS_INPUT_FUZZ },
+ .absflat = { [ABS_X] = HDAPS_INPUT_FUZZ, [ABS_Y] = HDAPS_INPUT_FUZZ },
+};
+
+/*
+ * hdaps_calibrate - Set our "resting" values. Callers must hold hdaps_sem.
+ */
+static void hdaps_calibrate(void)
+{
+ __hdaps_read_pair(HDAPS_PORT_XPOS, HDAPS_PORT_YPOS, &rest_x, &rest_y);
+}
+
+static void hdaps_mousedev_poll(unsigned long unused)
+{
+ int x, y;
+
+ /* Cannot sleep. Try nonblockingly. If we fail, try again later. */
+ if (down_trylock(&hdaps_sem)) {
+ mod_timer(&hdaps_timer,jiffies + HDAPS_POLL_PERIOD);
+ return;
+ }
+
+ if (__hdaps_read_pair(HDAPS_PORT_XPOS, HDAPS_PORT_YPOS, &x, &y))
+ goto out;
+
+ input_report_abs(&hdaps_idev, ABS_X, x - rest_x);
+ input_report_abs(&hdaps_idev, ABS_Y, y - rest_y);
+ input_sync(&hdaps_idev);
+
+ mod_timer(&hdaps_timer, jiffies + HDAPS_POLL_PERIOD);
+
+out:
+ up(&hdaps_sem);
+}
+
/* Sysfs Files */
@@ -517,69 +459,6 @@ static ssize_t hdaps_invert_store(struct device *dev,
return count;
}
-static ssize_t hdaps_mousedev_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- return sprintf(buf, "%d\n", hdaps_mousedev);
-}
-
-static ssize_t hdaps_mousedev_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- int enable;
-
- if (sscanf(buf, "%d", &enable) != 1)
- return -EINVAL;
-
- if (enable == 1)
- hdaps_mousedev_enable();
- else if (enable == 0)
- hdaps_mousedev_disable();
- else
- return -EINVAL;
-
- return count;
-}
-
-static ssize_t hdaps_poll_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- return sprintf(buf, "%lu\n", hdaps_poll_ms);
-}
-
-static ssize_t hdaps_poll_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- unsigned int poll;
-
- if (sscanf(buf, "%u", &poll) != 1 || poll == 0)
- return -EINVAL;
- hdaps_poll_ms = poll;
-
- return count;
-}
-
-static ssize_t hdaps_threshold_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- return sprintf(buf, "%u\n", hdaps_mousedev_threshold);
-}
-
-static ssize_t hdaps_threshold_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- unsigned int threshold;
-
- if (sscanf(buf, "%u", &threshold) != 1 || threshold == 0)
- return -EINVAL;
- hdaps_mousedev_threshold = threshold;
-
- return count;
-}
-
static DEVICE_ATTR(position, 0444, hdaps_position_show, NULL);
static DEVICE_ATTR(variance, 0444, hdaps_variance_show, NULL);
static DEVICE_ATTR(temp1, 0444, hdaps_temp1_show, NULL);
@@ -588,10 +467,6 @@ static DEVICE_ATTR(keyboard_activity, 0444, hdaps_keyboard_activity_show, NULL);
static DEVICE_ATTR(mouse_activity, 0444, hdaps_mouse_activity_show, NULL);
static DEVICE_ATTR(calibrate, 0644, hdaps_calibrate_show,hdaps_calibrate_store);
static DEVICE_ATTR(invert, 0644, hdaps_invert_show, hdaps_invert_store);
-static DEVICE_ATTR(mousedev, 0644, hdaps_mousedev_show, hdaps_mousedev_store);
-static DEVICE_ATTR(mousedev_poll_ms, 0644, hdaps_poll_show, hdaps_poll_store);
-static DEVICE_ATTR(mousedev_threshold, 0644, hdaps_threshold_show,
- hdaps_threshold_store);
static struct attribute *hdaps_attributes[] = {
&dev_attr_position.attr,
@@ -601,9 +476,6 @@ static struct attribute *hdaps_attributes[] = {
&dev_attr_keyboard_activity.attr,
&dev_attr_mouse_activity.attr,
&dev_attr_calibrate.attr,
- &dev_attr_mousedev.attr,
- &dev_attr_mousedev_threshold.attr,
- &dev_attr_mousedev_poll_ms.attr,
&dev_attr_invert.attr,
NULL,
};
@@ -615,24 +487,19 @@ static struct attribute_group hdaps_attribute_group = {
/* Module stuff */
-/*
- * XXX: We should be able to return nonzero and halt the detection process.
- * But there is a bug in dmi_check_system() where a nonzero return from the
- * first match will result in a return of failure from dmi_check_system().
- * I fixed this; the patch is in 2.6-mm. Once in Linus's tree we can make
- * hdaps_dmi_match_invert() return hdaps_dmi_match(), which in turn returns 1.
- */
+/* hdaps_dmi_match - found a match. return one, short-circuiting the hunt. */
static int hdaps_dmi_match(struct dmi_system_id *id)
{
printk(KERN_INFO "hdaps: %s detected.\n", id->ident);
- return 0;
+ return 1;
}
+/* hdaps_dmi_match_invert - found an inverted match. */
static int hdaps_dmi_match_invert(struct dmi_system_id *id)
{
hdaps_invert = 1;
printk(KERN_INFO "hdaps: inverting axis readings.\n");
- return 0;
+ return hdaps_dmi_match(id);
}
#define HDAPS_DMI_MATCH_NORMAL(model) { \
@@ -662,12 +529,15 @@ static int __init hdaps_init(void)
HDAPS_DMI_MATCH_INVERT("ThinkPad R50p"),
HDAPS_DMI_MATCH_NORMAL("ThinkPad R50"),
HDAPS_DMI_MATCH_NORMAL("ThinkPad R51"),
+ HDAPS_DMI_MATCH_NORMAL("ThinkPad R52"),
HDAPS_DMI_MATCH_INVERT("ThinkPad T41p"),
HDAPS_DMI_MATCH_NORMAL("ThinkPad T41"),
HDAPS_DMI_MATCH_INVERT("ThinkPad T42p"),
HDAPS_DMI_MATCH_NORMAL("ThinkPad T42"),
HDAPS_DMI_MATCH_NORMAL("ThinkPad T43"),
HDAPS_DMI_MATCH_NORMAL("ThinkPad X40"),
+ HDAPS_DMI_MATCH_NORMAL("ThinkPad X41 Tablet"),
+ HDAPS_DMI_MATCH_NORMAL("ThinkPad X41"),
{ .ident = NULL }
};
@@ -696,8 +566,18 @@ static int __init hdaps_init(void)
if (ret)
goto out_device;
- if (hdaps_mousedev)
- hdaps_mousedev_enable();
+ /* initial calibrate for the input device */
+ hdaps_calibrate();
+
+ /* initialize the input class */
+ hdaps_idev.dev = &pdev->dev;
+ input_register_device(&hdaps_idev);
+
+ /* start up our timer for the input device */
+ init_timer(&hdaps_timer);
+ hdaps_timer.function = hdaps_mousedev_poll;
+ hdaps_timer.expires = jiffies + HDAPS_POLL_PERIOD;
+ add_timer(&hdaps_timer);
printk(KERN_INFO "hdaps: driver successfully loaded.\n");
return 0;
@@ -715,8 +595,8 @@ out:
static void __exit hdaps_exit(void)
{
- hdaps_mousedev_disable();
-
+ del_timer_sync(&hdaps_timer);
+ input_unregister_device(&hdaps_idev);
sysfs_remove_group(&pdev->dev.kobj, &hdaps_attribute_group);
platform_device_unregister(pdev);
driver_unregister(&hdaps_driver);
@@ -728,9 +608,6 @@ static void __exit hdaps_exit(void)
module_init(hdaps_init);
module_exit(hdaps_exit);
-module_param_named(mousedev, hdaps_mousedev, bool, 0);
-MODULE_PARM_DESC(mousedev, "enable the input class device");
-
module_param_named(invert, hdaps_invert, bool, 0);
MODULE_PARM_DESC(invert, "invert data along each axis");
diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig
index 8334496a7e0..3badfec75b1 100644
--- a/drivers/i2c/busses/Kconfig
+++ b/drivers/i2c/busses/Kconfig
@@ -245,6 +245,18 @@ config I2C_KEYWEST
This support is also available as a module. If so, the module
will be called i2c-keywest.
+config I2C_PMAC_SMU
+ tristate "Powermac SMU I2C interface"
+ depends on I2C && PMAC_SMU
+ help
+ This supports the use of the I2C interface in the SMU
+ chip on recent Apple machines like the iMac G5. It is used
+ among others by the thermal control driver for those machines.
+ Say Y if you have such a machine.
+
+ This support is also available as a module. If so, the module
+ will be called i2c-pmac-smu.
+
config I2C_MPC
tristate "MPC107/824x/85xx/52xx"
depends on I2C && PPC32
diff --git a/drivers/i2c/busses/Makefile b/drivers/i2c/busses/Makefile
index 980b3e98367..f1df00f66c6 100644
--- a/drivers/i2c/busses/Makefile
+++ b/drivers/i2c/busses/Makefile
@@ -20,6 +20,7 @@ obj-$(CONFIG_I2C_ITE) += i2c-ite.o
obj-$(CONFIG_I2C_IXP2000) += i2c-ixp2000.o
obj-$(CONFIG_I2C_IXP4XX) += i2c-ixp4xx.o
obj-$(CONFIG_I2C_KEYWEST) += i2c-keywest.o
+obj-$(CONFIG_I2C_PMAC_SMU) += i2c-pmac-smu.o
obj-$(CONFIG_I2C_MPC) += i2c-mpc.o
obj-$(CONFIG_I2C_MV64XXX) += i2c-mv64xxx.o
obj-$(CONFIG_I2C_NFORCE2) += i2c-nforce2.o
diff --git a/drivers/i2c/busses/i2c-keywest.c b/drivers/i2c/busses/i2c-keywest.c
index 37b49c2daf5..eff5896ce86 100644
--- a/drivers/i2c/busses/i2c-keywest.c
+++ b/drivers/i2c/busses/i2c-keywest.c
@@ -611,7 +611,6 @@ create_iface(struct device_node *np, struct device *dev)
for (i=0; i<nchan; i++) {
struct keywest_chan* chan = &iface->channels[i];
- u8 addr;
sprintf(chan->adapter.name, "%s %d", np->parent->name, i);
chan->iface = iface;
diff --git a/drivers/i2c/busses/i2c-pmac-smu.c b/drivers/i2c/busses/i2c-pmac-smu.c
new file mode 100644
index 00000000000..8a9f5648a23
--- /dev/null
+++ b/drivers/i2c/busses/i2c-pmac-smu.c
@@ -0,0 +1,316 @@
+/*
+ i2c Support for Apple SMU Controller
+
+ Copyright (c) 2005 Benjamin Herrenschmidt, IBM Corp.
+ <benh@kernel.crashing.org>
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+
+*/
+
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/i2c.h>
+#include <linux/init.h>
+#include <linux/completion.h>
+#include <linux/device.h>
+#include <asm/prom.h>
+#include <asm/of_device.h>
+#include <asm/smu.h>
+
+static int probe;
+
+MODULE_AUTHOR("Benjamin Herrenschmidt <benh@kernel.crashing.org>");
+MODULE_DESCRIPTION("I2C driver for Apple's SMU");
+MODULE_LICENSE("GPL");
+module_param(probe, bool, 0);
+
+
+/* Physical interface */
+struct smu_iface
+{
+ struct i2c_adapter adapter;
+ struct completion complete;
+ u32 busid;
+};
+
+static void smu_i2c_done(struct smu_i2c_cmd *cmd, void *misc)
+{
+ struct smu_iface *iface = misc;
+ complete(&iface->complete);
+}
+
+/*
+ * SMBUS-type transfer entrypoint
+ */
+static s32 smu_smbus_xfer( struct i2c_adapter* adap,
+ u16 addr,
+ unsigned short flags,
+ char read_write,
+ u8 command,
+ int size,
+ union i2c_smbus_data* data)
+{
+ struct smu_iface *iface = i2c_get_adapdata(adap);
+ struct smu_i2c_cmd cmd;
+ int rc = 0;
+ int read = (read_write == I2C_SMBUS_READ);
+
+ cmd.info.bus = iface->busid;
+ cmd.info.devaddr = (addr << 1) | (read ? 0x01 : 0x00);
+
+ /* Prepare datas & select mode */
+ switch (size) {
+ case I2C_SMBUS_QUICK:
+ cmd.info.type = SMU_I2C_TRANSFER_SIMPLE;
+ cmd.info.datalen = 0;
+ break;
+ case I2C_SMBUS_BYTE:
+ cmd.info.type = SMU_I2C_TRANSFER_SIMPLE;
+ cmd.info.datalen = 1;
+ if (!read)
+ cmd.info.data[0] = data->byte;
+ break;
+ case I2C_SMBUS_BYTE_DATA:
+ cmd.info.type = SMU_I2C_TRANSFER_STDSUB;
+ cmd.info.datalen = 1;
+ cmd.info.sublen = 1;
+ cmd.info.subaddr[0] = command;
+ cmd.info.subaddr[1] = 0;
+ cmd.info.subaddr[2] = 0;
+ if (!read)
+ cmd.info.data[0] = data->byte;
+ break;
+ case I2C_SMBUS_WORD_DATA:
+ cmd.info.type = SMU_I2C_TRANSFER_STDSUB;
+ cmd.info.datalen = 2;
+ cmd.info.sublen = 1;
+ cmd.info.subaddr[0] = command;
+ cmd.info.subaddr[1] = 0;
+ cmd.info.subaddr[2] = 0;
+ if (!read) {
+ cmd.info.data[0] = data->byte & 0xff;
+ cmd.info.data[1] = (data->byte >> 8) & 0xff;
+ }
+ break;
+ /* Note that these are broken vs. the expected smbus API where
+ * on reads, the lenght is actually returned from the function,
+ * but I think the current API makes no sense and I don't want
+ * any driver that I haven't verified for correctness to go
+ * anywhere near a pmac i2c bus anyway ...
+ */
+ case I2C_SMBUS_BLOCK_DATA:
+ cmd.info.type = SMU_I2C_TRANSFER_STDSUB;
+ cmd.info.datalen = data->block[0] + 1;
+ if (cmd.info.datalen > 6)
+ return -EINVAL;
+ if (!read)
+ memcpy(cmd.info.data, data->block, cmd.info.datalen);
+ cmd.info.sublen = 1;
+ cmd.info.subaddr[0] = command;
+ cmd.info.subaddr[1] = 0;
+ cmd.info.subaddr[2] = 0;
+ break;
+ case I2C_SMBUS_I2C_BLOCK_DATA:
+ cmd.info.type = SMU_I2C_TRANSFER_STDSUB;
+ cmd.info.datalen = data->block[0];
+ if (cmd.info.datalen > 7)
+ return -EINVAL;
+ if (!read)
+ memcpy(cmd.info.data, &data->block[1],
+ cmd.info.datalen);
+ cmd.info.sublen = 1;
+ cmd.info.subaddr[0] = command;
+ cmd.info.subaddr[1] = 0;
+ cmd.info.subaddr[2] = 0;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ /* Turn a standardsub read into a combined mode access */
+ if (read_write == I2C_SMBUS_READ &&
+ cmd.info.type == SMU_I2C_TRANSFER_STDSUB)
+ cmd.info.type = SMU_I2C_TRANSFER_COMBINED;
+
+ /* Finish filling command and submit it */
+ cmd.done = smu_i2c_done;
+ cmd.misc = iface;
+ rc = smu_queue_i2c(&cmd);
+ if (rc < 0)
+ return rc;
+ wait_for_completion(&iface->complete);
+ rc = cmd.status;
+
+ if (!read || rc < 0)
+ return rc;
+
+ switch (size) {
+ case I2C_SMBUS_BYTE:
+ case I2C_SMBUS_BYTE_DATA:
+ data->byte = cmd.info.data[0];
+ break;
+ case I2C_SMBUS_WORD_DATA:
+ data->word = ((u16)cmd.info.data[1]) << 8;
+ data->word |= cmd.info.data[0];
+ break;
+ /* Note that these are broken vs. the expected smbus API where
+ * on reads, the lenght is actually returned from the function,
+ * but I think the current API makes no sense and I don't want
+ * any driver that I haven't verified for correctness to go
+ * anywhere near a pmac i2c bus anyway ...
+ */
+ case I2C_SMBUS_BLOCK_DATA:
+ case I2C_SMBUS_I2C_BLOCK_DATA:
+ memcpy(&data->block[0], cmd.info.data, cmd.info.datalen);
+ break;
+ }
+
+ return rc;
+}
+
+static u32
+smu_smbus_func(struct i2c_adapter * adapter)
+{
+ return I2C_FUNC_SMBUS_QUICK | I2C_FUNC_SMBUS_BYTE |
+ I2C_FUNC_SMBUS_BYTE_DATA | I2C_FUNC_SMBUS_WORD_DATA |
+ I2C_FUNC_SMBUS_BLOCK_DATA;
+}
+
+/* For now, we only handle combined mode (smbus) */
+static struct i2c_algorithm smu_algorithm = {
+ .smbus_xfer = smu_smbus_xfer,
+ .functionality = smu_smbus_func,
+};
+
+static int create_iface(struct device_node *np, struct device *dev)
+{
+ struct smu_iface* iface;
+ u32 *reg, busid;
+ int rc;
+
+ reg = (u32 *)get_property(np, "reg", NULL);
+ if (reg == NULL) {
+ printk(KERN_ERR "i2c-pmac-smu: can't find bus number !\n");
+ return -ENXIO;
+ }
+ busid = *reg;
+
+ iface = kmalloc(sizeof(struct smu_iface), GFP_KERNEL);
+ if (iface == NULL) {
+ printk(KERN_ERR "i2c-pmac-smu: can't allocate inteface !\n");
+ return -ENOMEM;
+ }
+ memset(iface, 0, sizeof(struct smu_iface));
+ init_completion(&iface->complete);
+ iface->busid = busid;
+
+ dev_set_drvdata(dev, iface);
+
+ sprintf(iface->adapter.name, "smu-i2c-%02x", busid);
+ iface->adapter.algo = &smu_algorithm;
+ iface->adapter.algo_data = NULL;
+ iface->adapter.client_register = NULL;
+ iface->adapter.client_unregister = NULL;
+ i2c_set_adapdata(&iface->adapter, iface);
+ iface->adapter.dev.parent = dev;
+
+ rc = i2c_add_adapter(&iface->adapter);
+ if (rc) {
+ printk(KERN_ERR "i2c-pamc-smu.c: Adapter %s registration "
+ "failed\n", iface->adapter.name);
+ i2c_set_adapdata(&iface->adapter, NULL);
+ }
+
+ if (probe) {
+ unsigned char addr;
+ printk("Probe: ");
+ for (addr = 0x00; addr <= 0x7f; addr++) {
+ if (i2c_smbus_xfer(&iface->adapter,addr,
+ 0,0,0,I2C_SMBUS_QUICK,NULL) >= 0)
+ printk("%02x ", addr);
+ }
+ printk("\n");
+ }
+
+ printk(KERN_INFO "SMU i2c bus %x registered\n", busid);
+
+ return 0;
+}
+
+static int dispose_iface(struct device *dev)
+{
+ struct smu_iface *iface = dev_get_drvdata(dev);
+ int rc;
+
+ rc = i2c_del_adapter(&iface->adapter);
+ i2c_set_adapdata(&iface->adapter, NULL);
+ /* We aren't that prepared to deal with this... */
+ if (rc)
+ printk("i2c-pmac-smu.c: Failed to remove bus %s !\n",
+ iface->adapter.name);
+ dev_set_drvdata(dev, NULL);
+ kfree(iface);
+
+ return 0;
+}
+
+
+static int create_iface_of_platform(struct of_device* dev,
+ const struct of_device_id *match)
+{
+ return create_iface(dev->node, &dev->dev);
+}
+
+
+static int dispose_iface_of_platform(struct of_device* dev)
+{
+ return dispose_iface(&dev->dev);
+}
+
+
+static struct of_device_id i2c_smu_match[] =
+{
+ {
+ .compatible = "smu-i2c",
+ },
+ {},
+};
+static struct of_platform_driver i2c_smu_of_platform_driver =
+{
+ .name = "i2c-smu",
+ .match_table = i2c_smu_match,
+ .probe = create_iface_of_platform,
+ .remove = dispose_iface_of_platform
+};
+
+
+static int __init i2c_pmac_smu_init(void)
+{
+ of_register_driver(&i2c_smu_of_platform_driver);
+ return 0;
+}
+
+
+static void __exit i2c_pmac_smu_cleanup(void)
+{
+ of_unregister_driver(&i2c_smu_of_platform_driver);
+}
+
+module_init(i2c_pmac_smu_init);
+module_exit(i2c_pmac_smu_cleanup);
diff --git a/drivers/i2c/busses/i2c-pxa.c b/drivers/i2c/busses/i2c-pxa.c
index fdf53ce0424..44b595d90a4 100644
--- a/drivers/i2c/busses/i2c-pxa.c
+++ b/drivers/i2c/busses/i2c-pxa.c
@@ -914,19 +914,23 @@ static int i2c_pxa_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num
return ret;
}
+static u32 i2c_pxa_functionality(struct i2c_adapter *adap)
+{
+ return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
+}
+
static struct i2c_algorithm i2c_pxa_algorithm = {
- .name = "PXA-I2C-Algorithm",
- .id = I2C_ALGO_PXA,
.master_xfer = i2c_pxa_xfer,
+ .functionality = i2c_pxa_functionality,
};
static struct pxa_i2c i2c_pxa = {
.lock = SPIN_LOCK_UNLOCKED,
.wait = __WAIT_QUEUE_HEAD_INITIALIZER(i2c_pxa.wait),
.adap = {
- .name = "pxa2xx-i2c",
- .id = I2C_ALGO_PXA,
+ .owner = THIS_MODULE,
.algo = &i2c_pxa_algorithm,
+ .name = "pxa2xx-i2c",
.retries = 5,
},
};
diff --git a/drivers/ide/ide-iops.c b/drivers/ide/ide-iops.c
index b443b04a4c5..0b0aa4f5162 100644
--- a/drivers/ide/ide-iops.c
+++ b/drivers/ide/ide-iops.c
@@ -601,44 +601,15 @@ EXPORT_SYMBOL(ide_wait_stat);
*/
u8 eighty_ninty_three (ide_drive_t *drive)
{
-#if 0
- if (!HWIF(drive)->udma_four)
+ if(HWIF(drive)->udma_four == 0)
+ return 0;
+ if (!(drive->id->hw_config & 0x6000))
return 0;
-
- if (drive->id->major_rev_num) {
- int hssbd = 0;
- int i;
- /*
- * Determine highest Supported SPEC
- */
- for (i=1; i<=15; i++)
- if (drive->id->major_rev_num & (1<<i))
- hssbd++;
-
- switch (hssbd) {
- case 7:
- case 6:
- case 5:
- /* ATA-4 and older do not support above Ultra 33 */
- default:
- return 0;
- }
- }
-
- return ((u8) (
-#ifndef CONFIG_IDEDMA_IVB
- (drive->id->hw_config & 0x4000) &&
-#endif /* CONFIG_IDEDMA_IVB */
- (drive->id->hw_config & 0x6000)) ? 1 : 0);
-
-#else
-
- return ((u8) ((HWIF(drive)->udma_four) &&
#ifndef CONFIG_IDEDMA_IVB
- (drive->id->hw_config & 0x4000) &&
+ if(!(drive->id->hw_config & 0x4000))
+ return 0;
#endif /* CONFIG_IDEDMA_IVB */
- (drive->id->hw_config & 0x6000)) ? 1 : 0);
-#endif
+ return 1;
}
EXPORT_SYMBOL(eighty_ninty_three);
diff --git a/drivers/ide/ide-taskfile.c b/drivers/ide/ide-taskfile.c
index d04f62ab5de..ace8edad6e9 100644
--- a/drivers/ide/ide-taskfile.c
+++ b/drivers/ide/ide-taskfile.c
@@ -500,6 +500,7 @@ static int ide_diag_taskfile(ide_drive_t *drive, ide_task_t *args, unsigned long
}
rq.special = args;
+ args->rq = &rq;
return ide_do_drive_cmd(drive, &rq, ide_wait);
}
diff --git a/drivers/ide/legacy/ide-cs.c b/drivers/ide/legacy/ide-cs.c
index 0ccf85fcee3..a35a58bef1a 100644
--- a/drivers/ide/legacy/ide-cs.c
+++ b/drivers/ide/legacy/ide-cs.c
@@ -477,7 +477,7 @@ static struct pcmcia_device_id ide_ids[] = {
PCMCIA_DEVICE_PROD_ID12("IO DATA", "PCIDE", 0x547e66dc, 0x5c5ab149),
PCMCIA_DEVICE_PROD_ID12("IO DATA", "PCIDEII", 0x547e66dc, 0xb3662674),
PCMCIA_DEVICE_PROD_ID12("LOOKMEET", "CBIDE2 ", 0xe37be2b5, 0x8671043b),
- PCMCIA_DEVICE_PROD_ID12(" ", "NinjaATA-", 0x3b6e20c8, 0xebe0bd79),
+ PCMCIA_DEVICE_PROD_ID2("NinjaATA-", 0xebe0bd79),
PCMCIA_DEVICE_PROD_ID12("PCMCIA", "CD-ROM", 0x281f1c5d, 0x66536591),
PCMCIA_DEVICE_PROD_ID12("PCMCIA", "PnPIDE", 0x281f1c5d, 0x0c694728),
PCMCIA_DEVICE_PROD_ID12("SHUTTLE TECHNOLOGY LTD.", "PCCARD-IDE/ATAPI Adapter", 0x4a3f0ba0, 0x322560e1),
diff --git a/drivers/ide/pci/cmd64x.c b/drivers/ide/pci/cmd64x.c
index 3de9ab897e4..3d9c7afc869 100644
--- a/drivers/ide/pci/cmd64x.c
+++ b/drivers/ide/pci/cmd64x.c
@@ -608,7 +608,7 @@ static unsigned int __devinit init_chipset_cmd64x(struct pci_dev *dev, const cha
#ifdef __i386__
if (dev->resource[PCI_ROM_RESOURCE].start) {
- pci_write_config_byte(dev, PCI_ROM_ADDRESS, dev->resource[PCI_ROM_RESOURCE].start | PCI_ROM_ADDRESS_ENABLE);
+ pci_write_config_dword(dev, PCI_ROM_ADDRESS, dev->resource[PCI_ROM_RESOURCE].start | PCI_ROM_ADDRESS_ENABLE);
printk(KERN_INFO "%s: ROM enabled at 0x%08lx\n", name, dev->resource[PCI_ROM_RESOURCE].start);
}
#endif
diff --git a/drivers/ide/pci/hpt34x.c b/drivers/ide/pci/hpt34x.c
index bbde4627998..be334da7a75 100644
--- a/drivers/ide/pci/hpt34x.c
+++ b/drivers/ide/pci/hpt34x.c
@@ -173,7 +173,7 @@ static unsigned int __devinit init_chipset_hpt34x(struct pci_dev *dev, const cha
if (cmd & PCI_COMMAND_MEMORY) {
if (pci_resource_start(dev, PCI_ROM_RESOURCE)) {
- pci_write_config_byte(dev, PCI_ROM_ADDRESS,
+ pci_write_config_dword(dev, PCI_ROM_ADDRESS,
dev->resource[PCI_ROM_RESOURCE].start | PCI_ROM_ADDRESS_ENABLE);
printk(KERN_INFO "HPT345: ROM enabled at 0x%08lx\n",
dev->resource[PCI_ROM_RESOURCE].start);
diff --git a/drivers/ieee1394/amdtp.c b/drivers/ieee1394/amdtp.c
index 84ae027b021..e8e28569a66 100644
--- a/drivers/ieee1394/amdtp.c
+++ b/drivers/ieee1394/amdtp.c
@@ -1297,4 +1297,3 @@ static void __exit amdtp_exit_module (void)
module_init(amdtp_init_module);
module_exit(amdtp_exit_module);
-MODULE_ALIAS_CHARDEV(IEEE1394_MAJOR, IEEE1394_MINOR_BLOCK_AMDTP * 16);
diff --git a/drivers/ieee1394/csr1212.h b/drivers/ieee1394/csr1212.h
index e6734263a1d..28c5f4b726e 100644
--- a/drivers/ieee1394/csr1212.h
+++ b/drivers/ieee1394/csr1212.h
@@ -37,7 +37,6 @@
#include <linux/types.h>
#include <linux/slab.h>
#include <linux/interrupt.h>
-#include <linux/sched.h>
#include <linux/vmalloc.h>
#include <asm/pgalloc.h>
diff --git a/drivers/ieee1394/dv1394.c b/drivers/ieee1394/dv1394.c
index 4538b0235ca..e34730c7a87 100644
--- a/drivers/ieee1394/dv1394.c
+++ b/drivers/ieee1394/dv1394.c
@@ -2660,4 +2660,3 @@ static int __init dv1394_init_module(void)
module_init(dv1394_init_module);
module_exit(dv1394_exit_module);
-MODULE_ALIAS_CHARDEV(IEEE1394_MAJOR, IEEE1394_MINOR_BLOCK_DV1394 * 16);
diff --git a/drivers/ieee1394/eth1394.c b/drivers/ieee1394/eth1394.c
index cd53c174ced..4802bbbb6dc 100644
--- a/drivers/ieee1394/eth1394.c
+++ b/drivers/ieee1394/eth1394.c
@@ -89,7 +89,7 @@
#define TRACE() printk(KERN_ERR "%s:%s[%d] ---- TRACE\n", driver_name, __FUNCTION__, __LINE__)
static char version[] __devinitdata =
- "$Rev: 1264 $ Ben Collins <bcollins@debian.org>";
+ "$Rev: 1312 $ Ben Collins <bcollins@debian.org>";
struct fragment_info {
struct list_head list;
@@ -221,9 +221,7 @@ static int ether1394_open (struct net_device *dev)
if (priv->bc_state == ETHER1394_BC_ERROR) {
/* we'll try again */
priv->iso = hpsb_iso_recv_init(priv->host,
- ETHER1394_GASP_BUFFERS * 2 *
- (1 << (priv->host->csr.max_rec +
- 1)),
+ ETHER1394_ISO_BUF_SIZE,
ETHER1394_GASP_BUFFERS,
priv->broadcast_channel,
HPSB_ISO_DMA_PACKET_PER_BUFFER,
@@ -635,8 +633,8 @@ static void ether1394_add_host (struct hpsb_host *host)
* be checked when the eth device is opened. */
priv->broadcast_channel = host->csr.broadcast_channel & 0x3f;
- priv->iso = hpsb_iso_recv_init(host, (ETHER1394_GASP_BUFFERS * 2 *
- (1 << (host->csr.max_rec + 1))),
+ priv->iso = hpsb_iso_recv_init(host,
+ ETHER1394_ISO_BUF_SIZE,
ETHER1394_GASP_BUFFERS,
priv->broadcast_channel,
HPSB_ISO_DMA_PACKET_PER_BUFFER,
@@ -1770,7 +1768,7 @@ fail:
static void ether1394_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
{
strcpy (info->driver, driver_name);
- strcpy (info->version, "$Rev: 1264 $");
+ strcpy (info->version, "$Rev: 1312 $");
/* FIXME XXX provide sane businfo */
strcpy (info->bus_info, "ieee1394");
}
diff --git a/drivers/ieee1394/eth1394.h b/drivers/ieee1394/eth1394.h
index ed8f1c4b7fd..a77213cfc48 100644
--- a/drivers/ieee1394/eth1394.h
+++ b/drivers/ieee1394/eth1394.h
@@ -44,6 +44,12 @@
#define ETHER1394_GASP_BUFFERS 16
+/* rawiso buffer size - due to a limitation in rawiso, we must limit each
+ * GASP buffer to be less than PAGE_SIZE. */
+#define ETHER1394_ISO_BUF_SIZE ETHER1394_GASP_BUFFERS * \
+ min((unsigned int)PAGE_SIZE, \
+ 2 * (1U << (priv->host->csr.max_rec + 1)))
+
/* Node set == 64 */
#define NODE_SET (ALL_NODES + 1)
diff --git a/drivers/ieee1394/hosts.c b/drivers/ieee1394/hosts.c
index c502c6e9c44..aeeaeb670d0 100644
--- a/drivers/ieee1394/hosts.c
+++ b/drivers/ieee1394/hosts.c
@@ -18,6 +18,7 @@
#include <linux/slab.h>
#include <linux/pci.h>
#include <linux/timer.h>
+#include <linux/jiffies.h>
#include "csr1212.h"
#include "ieee1394.h"
@@ -217,7 +218,7 @@ int hpsb_update_config_rom_image(struct hpsb_host *host)
/* IEEE 1394a-2000 prohibits using the same generation number
* twice in a 60 second period. */
- if (jiffies - host->csr.gen_timestamp[next_gen] < 60 * HZ)
+ if (time_before(jiffies, host->csr.gen_timestamp[next_gen] + 60 * HZ))
/* Wait 60 seconds from the last time this generation number was
* used. */
reset_delay = (60 * HZ) + host->csr.gen_timestamp[next_gen] - jiffies;
diff --git a/drivers/ieee1394/hosts.h b/drivers/ieee1394/hosts.h
index 739e76840d5..38f42112dff 100644
--- a/drivers/ieee1394/hosts.h
+++ b/drivers/ieee1394/hosts.h
@@ -135,17 +135,17 @@ enum isoctl_cmd {
enum reset_types {
/* 166 microsecond reset -- only type of reset available on
- non-1394a capable IEEE 1394 controllers */
+ non-1394a capable controllers */
LONG_RESET,
/* Short (arbitrated) reset -- only available on 1394a capable
- IEEE 1394 capable controllers */
+ controllers */
SHORT_RESET,
- /* Variants, that set force_root before issueing the bus reset */
+ /* Variants that set force_root before issueing the bus reset */
LONG_RESET_FORCE_ROOT, SHORT_RESET_FORCE_ROOT,
- /* Variants, that clear force_root before issueing the bus reset */
+ /* Variants that clear force_root before issueing the bus reset */
LONG_RESET_NO_FORCE_ROOT, SHORT_RESET_NO_FORCE_ROOT
};
diff --git a/drivers/ieee1394/ieee1394_core.c b/drivers/ieee1394/ieee1394_core.c
index d633770fac8..32a1e016c85 100644
--- a/drivers/ieee1394/ieee1394_core.c
+++ b/drivers/ieee1394/ieee1394_core.c
@@ -70,7 +70,7 @@ const char *hpsb_speedto_str[] = { "S100", "S200", "S400", "S800", "S1600", "S32
struct class *hpsb_protocol_class;
#ifdef CONFIG_IEEE1394_VERBOSEDEBUG
-static void dump_packet(const char *text, quadlet_t *data, int size)
+static void dump_packet(const char *text, quadlet_t *data, int size, int speed)
{
int i;
@@ -78,12 +78,15 @@ static void dump_packet(const char *text, quadlet_t *data, int size)
size = (size > 4 ? 4 : size);
printk(KERN_DEBUG "ieee1394: %s", text);
+ if (speed > -1 && speed < 6)
+ printk(" at %s", hpsb_speedto_str[speed]);
+ printk(":");
for (i = 0; i < size; i++)
printk(" %08x", data[i]);
printk("\n");
}
#else
-#define dump_packet(x,y,z)
+#define dump_packet(a,b,c,d)
#endif
static void abort_requests(struct hpsb_host *host);
@@ -544,8 +547,7 @@ int hpsb_send_packet(struct hpsb_packet *packet)
if (packet->data_size)
memcpy(((u8*)data) + packet->header_size, packet->data, packet->data_size);
- dump_packet("send packet local:", packet->header,
- packet->header_size);
+ dump_packet("send packet local", packet->header, packet->header_size, -1);
hpsb_packet_sent(host, packet, packet->expect_response ? ACK_PENDING : ACK_COMPLETE);
hpsb_packet_received(host, data, size, 0);
@@ -561,21 +563,7 @@ int hpsb_send_packet(struct hpsb_packet *packet)
+ NODEID_TO_NODE(packet->node_id)];
}
-#ifdef CONFIG_IEEE1394_VERBOSEDEBUG
- switch (packet->speed_code) {
- case 2:
- dump_packet("send packet 400:", packet->header,
- packet->header_size);
- break;
- case 1:
- dump_packet("send packet 200:", packet->header,
- packet->header_size);
- break;
- default:
- dump_packet("send packet 100:", packet->header,
- packet->header_size);
- }
-#endif
+ dump_packet("send packet", packet->header, packet->header_size, packet->speed_code);
return host->driver->transmit_packet(host, packet);
}
@@ -636,7 +624,7 @@ static void handle_packet_response(struct hpsb_host *host, int tcode,
if (packet == NULL) {
HPSB_DEBUG("unsolicited response packet received - no tlabel match");
- dump_packet("contents:", data, 16);
+ dump_packet("contents", data, 16, -1);
spin_unlock_irqrestore(&host->pending_packet_queue.lock, flags);
return;
}
@@ -677,7 +665,7 @@ static void handle_packet_response(struct hpsb_host *host, int tcode,
if (!tcode_match) {
spin_unlock_irqrestore(&host->pending_packet_queue.lock, flags);
HPSB_INFO("unsolicited response packet received - tcode mismatch");
- dump_packet("contents:", data, 16);
+ dump_packet("contents", data, 16, -1);
return;
}
@@ -914,7 +902,7 @@ void hpsb_packet_received(struct hpsb_host *host, quadlet_t *data, size_t size,
return;
}
- dump_packet("received packet:", data, size);
+ dump_packet("received packet", data, size, -1);
tcode = (data[0] >> 4) & 0xf;
diff --git a/drivers/ieee1394/nodemgr.c b/drivers/ieee1394/nodemgr.c
index b23322523ef..347ece6b583 100644
--- a/drivers/ieee1394/nodemgr.c
+++ b/drivers/ieee1394/nodemgr.c
@@ -64,10 +64,10 @@ static int nodemgr_bus_read(struct csr1212_csr *csr, u64 addr, u16 length,
struct nodemgr_csr_info *ci = (struct nodemgr_csr_info*)__ci;
int i, ret = 0;
- for (i = 0; i < 3; i++) {
+ for (i = 1; ; i++) {
ret = hpsb_read(ci->host, ci->nodeid, ci->generation, addr,
buffer, length);
- if (!ret)
+ if (!ret || i == 3)
break;
if (msleep_interruptible(334))
@@ -1438,9 +1438,13 @@ static int nodemgr_do_irm_duties(struct hpsb_host *host, int cycles)
if (host->busmgr_id == 0xffff && host->node_count > 1)
{
u16 root_node = host->node_count - 1;
- struct node_entry *ne = find_entry_by_nodeid(host, root_node | LOCAL_BUS);
- if (ne && ne->busopt.cmc)
+ /* get cycle master capability flag from root node */
+ if (host->is_cycmst ||
+ (!hpsb_read(host, LOCAL_BUS | root_node, get_hpsb_generation(host),
+ (CSR_REGISTER_BASE + CSR_CONFIG_ROM + 2 * sizeof(quadlet_t)),
+ &bc, sizeof(quadlet_t)) &&
+ be32_to_cpu(bc) & 1 << CSR_CMC_SHIFT))
hpsb_send_phy_config(host, root_node, -1);
else {
HPSB_DEBUG("The root node is not cycle master capable; "
@@ -1557,24 +1561,19 @@ static int nodemgr_host_thread(void *__hi)
}
}
- if (!nodemgr_check_irm_capability(host, reset_cycles)) {
+ if (!nodemgr_check_irm_capability(host, reset_cycles) ||
+ !nodemgr_do_irm_duties(host, reset_cycles)) {
reset_cycles++;
up(&nodemgr_serialize);
continue;
}
+ reset_cycles = 0;
/* Scan our nodes to get the bus options and create node
* entries. This does not do the sysfs stuff, since that
* would trigger hotplug callbacks and such, which is a
* bad idea at this point. */
nodemgr_node_scan(hi, generation);
- if (!nodemgr_do_irm_duties(host, reset_cycles)) {
- reset_cycles++;
- up(&nodemgr_serialize);
- continue;
- }
-
- reset_cycles = 0;
/* This actually does the full probe, with sysfs
* registration. */
diff --git a/drivers/ieee1394/ohci1394.c b/drivers/ieee1394/ohci1394.c
index 27018c8efc2..6a6acbd80af 100644
--- a/drivers/ieee1394/ohci1394.c
+++ b/drivers/ieee1394/ohci1394.c
@@ -162,7 +162,7 @@ printk(level "%s: " fmt "\n" , OHCI1394_DRIVER_NAME , ## args)
printk(level "%s: fw-host%d: " fmt "\n" , OHCI1394_DRIVER_NAME, ohci->host->id , ## args)
static char version[] __devinitdata =
- "$Rev: 1299 $ Ben Collins <bcollins@debian.org>";
+ "$Rev: 1313 $ Ben Collins <bcollins@debian.org>";
/* Module Parameters */
static int phys_dma = 1;
@@ -1084,7 +1084,7 @@ static int ohci_devctl(struct hpsb_host *host, enum devctl_cmd cmd, int arg)
initialize_dma_rcv_ctx(&ohci->ir_legacy_context, 1);
if (printk_ratelimit())
- PRINT(KERN_ERR, "IR legacy activated");
+ DBGMSG("IR legacy activated");
}
spin_lock_irqsave(&ohci->IR_channel_lock, flags);
diff --git a/drivers/ieee1394/raw1394.c b/drivers/ieee1394/raw1394.c
index b4fa14793fe..5fe4f2ba097 100644
--- a/drivers/ieee1394/raw1394.c
+++ b/drivers/ieee1394/raw1394.c
@@ -2958,4 +2958,3 @@ static void __exit cleanup_raw1394(void)
module_init(init_raw1394);
module_exit(cleanup_raw1394);
MODULE_LICENSE("GPL");
-MODULE_ALIAS_CHARDEV(IEEE1394_MAJOR, IEEE1394_MINOR_BLOCK_RAW1394 * 16);
diff --git a/drivers/ieee1394/sbp2.c b/drivers/ieee1394/sbp2.c
index de88218ef7c..12cec7c4a34 100644
--- a/drivers/ieee1394/sbp2.c
+++ b/drivers/ieee1394/sbp2.c
@@ -97,16 +97,18 @@ static char version[] __devinitdata =
*/
static int max_speed = IEEE1394_SPEED_MAX;
module_param(max_speed, int, 0644);
-MODULE_PARM_DESC(max_speed, "Force max speed (3 = 800mb, 2 = 400mb default, 1 = 200mb, 0 = 100mb)");
+MODULE_PARM_DESC(max_speed, "Force max speed (3 = 800mb, 2 = 400mb, 1 = 200mb, 0 = 100mb)");
/*
* Set serialize_io to 1 if you'd like only one scsi command sent
* down to us at a time (debugging). This might be necessary for very
* badly behaved sbp2 devices.
+ *
+ * TODO: Make this configurable per device.
*/
-static int serialize_io;
+static int serialize_io = 1;
module_param(serialize_io, int, 0444);
-MODULE_PARM_DESC(serialize_io, "Serialize all I/O coming down from the scsi drivers (default = 0)");
+MODULE_PARM_DESC(serialize_io, "Serialize I/O coming from scsi drivers (default = 1, faster = 0)");
/*
* Bump up max_sectors if you'd like to support very large sized
@@ -596,6 +598,14 @@ static void sbp2util_mark_command_completed(struct scsi_id_instance_data *scsi_i
spin_unlock_irqrestore(&scsi_id->sbp2_command_orb_lock, flags);
}
+/*
+ * Is scsi_id valid? Is the 1394 node still present?
+ */
+static inline int sbp2util_node_is_available(struct scsi_id_instance_data *scsi_id)
+{
+ return scsi_id && scsi_id->ne && !scsi_id->ne->in_limbo;
+}
+
/*********************************************
@@ -631,11 +641,23 @@ static int sbp2_remove(struct device *dev)
{
struct unit_directory *ud;
struct scsi_id_instance_data *scsi_id;
+ struct scsi_device *sdev;
SBP2_DEBUG("sbp2_remove");
ud = container_of(dev, struct unit_directory, device);
scsi_id = ud->device.driver_data;
+ if (!scsi_id)
+ return 0;
+
+ /* Trigger shutdown functions in scsi's highlevel. */
+ if (scsi_id->scsi_host)
+ scsi_unblock_requests(scsi_id->scsi_host);
+ sdev = scsi_id->sdev;
+ if (sdev) {
+ scsi_id->sdev = NULL;
+ scsi_remove_device(sdev);
+ }
sbp2_logout_device(scsi_id);
sbp2_remove_device(scsi_id);
@@ -2473,37 +2495,26 @@ static int sbp2scsi_queuecommand(struct scsi_cmnd *SCpnt,
struct scsi_id_instance_data *scsi_id =
(struct scsi_id_instance_data *)SCpnt->device->host->hostdata[0];
struct sbp2scsi_host_info *hi;
+ int result = DID_NO_CONNECT << 16;
SBP2_DEBUG("sbp2scsi_queuecommand");
- /*
- * If scsi_id is null, it means there is no device in this slot,
- * so we should return selection timeout.
- */
- if (!scsi_id) {
- SCpnt->result = DID_NO_CONNECT << 16;
- done (SCpnt);
- return 0;
- }
+ if (!sbp2util_node_is_available(scsi_id))
+ goto done;
hi = scsi_id->hi;
if (!hi) {
SBP2_ERR("sbp2scsi_host_info is NULL - this is bad!");
- SCpnt->result = DID_NO_CONNECT << 16;
- done (SCpnt);
- return(0);
+ goto done;
}
/*
* Until we handle multiple luns, just return selection time-out
* to any IO directed at non-zero LUNs
*/
- if (SCpnt->device->lun) {
- SCpnt->result = DID_NO_CONNECT << 16;
- done (SCpnt);
- return(0);
- }
+ if (SCpnt->device->lun)
+ goto done;
/*
* Check for request sense command, and handle it here
@@ -2514,7 +2525,7 @@ static int sbp2scsi_queuecommand(struct scsi_cmnd *SCpnt,
memcpy(SCpnt->request_buffer, SCpnt->sense_buffer, SCpnt->request_bufflen);
memset(SCpnt->sense_buffer, 0, sizeof(SCpnt->sense_buffer));
sbp2scsi_complete_command(scsi_id, SBP2_SCSI_STATUS_GOOD, SCpnt, done);
- return(0);
+ return 0;
}
/*
@@ -2522,9 +2533,8 @@ static int sbp2scsi_queuecommand(struct scsi_cmnd *SCpnt,
*/
if (!hpsb_node_entry_valid(scsi_id->ne)) {
SBP2_ERR("Bus reset in progress - rejecting command");
- SCpnt->result = DID_BUS_BUSY << 16;
- done (SCpnt);
- return(0);
+ result = DID_BUS_BUSY << 16;
+ goto done;
}
/*
@@ -2535,8 +2545,12 @@ static int sbp2scsi_queuecommand(struct scsi_cmnd *SCpnt,
sbp2scsi_complete_command(scsi_id, SBP2_SCSI_STATUS_SELECTION_TIMEOUT,
SCpnt, done);
}
+ return 0;
- return(0);
+done:
+ SCpnt->result = result;
+ done(SCpnt);
+ return 0;
}
/*
@@ -2683,14 +2697,27 @@ static void sbp2scsi_complete_command(struct scsi_id_instance_data *scsi_id,
}
-static int sbp2scsi_slave_configure (struct scsi_device *sdev)
+static int sbp2scsi_slave_alloc(struct scsi_device *sdev)
{
- blk_queue_dma_alignment(sdev->request_queue, (512 - 1));
+ ((struct scsi_id_instance_data *)sdev->host->hostdata[0])->sdev = sdev;
+ return 0;
+}
+
+static int sbp2scsi_slave_configure(struct scsi_device *sdev)
+{
+ blk_queue_dma_alignment(sdev->request_queue, (512 - 1));
return 0;
}
+static void sbp2scsi_slave_destroy(struct scsi_device *sdev)
+{
+ ((struct scsi_id_instance_data *)sdev->host->hostdata[0])->sdev = NULL;
+ return;
+}
+
+
/*
* Called by scsi stack when something has really gone wrong. Usually
* called when a command has timed-out for some reason.
@@ -2705,7 +2732,7 @@ static int sbp2scsi_abort(struct scsi_cmnd *SCpnt)
SBP2_ERR("aborting sbp2 command");
scsi_print_command(SCpnt);
- if (scsi_id) {
+ if (sbp2util_node_is_available(scsi_id)) {
/*
* Right now, just return any matching command structures
@@ -2742,31 +2769,24 @@ static int sbp2scsi_abort(struct scsi_cmnd *SCpnt)
/*
* Called by scsi stack when something has really gone wrong.
*/
-static int __sbp2scsi_reset(struct scsi_cmnd *SCpnt)
+static int sbp2scsi_reset(struct scsi_cmnd *SCpnt)
{
struct scsi_id_instance_data *scsi_id =
(struct scsi_id_instance_data *)SCpnt->device->host->hostdata[0];
+ unsigned long flags;
SBP2_ERR("reset requested");
- if (scsi_id) {
+ spin_lock_irqsave(SCpnt->device->host->host_lock, flags);
+
+ if (sbp2util_node_is_available(scsi_id)) {
SBP2_ERR("Generating sbp2 fetch agent reset");
sbp2_agent_reset(scsi_id, 0);
}
- return(SUCCESS);
-}
-
-static int sbp2scsi_reset(struct scsi_cmnd *SCpnt)
-{
- unsigned long flags;
- int rc;
-
- spin_lock_irqsave(SCpnt->device->host->host_lock, flags);
- rc = __sbp2scsi_reset(SCpnt);
spin_unlock_irqrestore(SCpnt->device->host->host_lock, flags);
- return rc;
+ return SUCCESS;
}
static const char *sbp2scsi_info (struct Scsi_Host *host)
@@ -2817,7 +2837,9 @@ static struct scsi_host_template scsi_driver_template = {
.eh_device_reset_handler = sbp2scsi_reset,
.eh_bus_reset_handler = sbp2scsi_reset,
.eh_host_reset_handler = sbp2scsi_reset,
+ .slave_alloc = sbp2scsi_slave_alloc,
.slave_configure = sbp2scsi_slave_configure,
+ .slave_destroy = sbp2scsi_slave_destroy,
.this_id = -1,
.sg_tablesize = SG_ALL,
.use_clustering = ENABLE_CLUSTERING,
@@ -2837,7 +2859,8 @@ static int sbp2_module_init(void)
/* Module load debug option to force one command at a time (serializing I/O) */
if (serialize_io) {
- SBP2_ERR("Driver forced to serialize I/O (serialize_io = 1)");
+ SBP2_INFO("Driver forced to serialize I/O (serialize_io=1)");
+ SBP2_INFO("Try serialize_io=0 for better performance");
scsi_driver_template.can_queue = 1;
scsi_driver_template.cmd_per_lun = 1;
}
diff --git a/drivers/ieee1394/video1394.c b/drivers/ieee1394/video1394.c
index 9d6facf2f78..11be9c9c82a 100644
--- a/drivers/ieee1394/video1394.c
+++ b/drivers/ieee1394/video1394.c
@@ -1571,4 +1571,3 @@ static int __init video1394_init_module (void)
module_init(video1394_init_module);
module_exit(video1394_exit_module);
-MODULE_ALIAS_CHARDEV(IEEE1394_MAJOR, IEEE1394_MINOR_BLOCK_VIDEO1394 * 16);
diff --git a/drivers/infiniband/core/mad_rmpp.c b/drivers/infiniband/core/mad_rmpp.c
index 2bd8b1cc57c..e23836d0e21 100644
--- a/drivers/infiniband/core/mad_rmpp.c
+++ b/drivers/infiniband/core/mad_rmpp.c
@@ -412,8 +412,8 @@ static inline int get_mad_len(struct mad_rmpp_recv *rmpp_recv)
hdr_size = data_offset(rmpp_mad->mad_hdr.mgmt_class);
data_size = sizeof(struct ib_rmpp_mad) - hdr_size;
- pad = data_size - be32_to_cpu(rmpp_mad->rmpp_hdr.paylen_newwin);
- if (pad > data_size || pad < 0)
+ pad = IB_MGMT_RMPP_DATA - be32_to_cpu(rmpp_mad->rmpp_hdr.paylen_newwin);
+ if (pad > IB_MGMT_RMPP_DATA || pad < 0)
pad = 0;
return hdr_size + rmpp_recv->seg_num * data_size - pad;
@@ -583,6 +583,7 @@ static int send_next_seg(struct ib_mad_send_wr_private *mad_send_wr)
{
struct ib_rmpp_mad *rmpp_mad;
int timeout;
+ u32 paylen;
rmpp_mad = (struct ib_rmpp_mad *)mad_send_wr->send_wr.wr.ud.mad_hdr;
ib_set_rmpp_flags(&rmpp_mad->rmpp_hdr, IB_MGMT_RMPP_FLAG_ACTIVE);
@@ -590,11 +591,9 @@ static int send_next_seg(struct ib_mad_send_wr_private *mad_send_wr)
if (mad_send_wr->seg_num == 1) {
rmpp_mad->rmpp_hdr.rmpp_rtime_flags |= IB_MGMT_RMPP_FLAG_FIRST;
- rmpp_mad->rmpp_hdr.paylen_newwin =
- cpu_to_be32(mad_send_wr->total_seg *
- (sizeof(struct ib_rmpp_mad) -
- offsetof(struct ib_rmpp_mad, data)) -
- mad_send_wr->pad);
+ paylen = mad_send_wr->total_seg * IB_MGMT_RMPP_DATA -
+ mad_send_wr->pad;
+ rmpp_mad->rmpp_hdr.paylen_newwin = cpu_to_be32(paylen);
mad_send_wr->sg_list[0].length = sizeof(struct ib_rmpp_mad);
} else {
mad_send_wr->send_wr.num_sge = 2;
@@ -608,10 +607,8 @@ static int send_next_seg(struct ib_mad_send_wr_private *mad_send_wr)
if (mad_send_wr->seg_num == mad_send_wr->total_seg) {
rmpp_mad->rmpp_hdr.rmpp_rtime_flags |= IB_MGMT_RMPP_FLAG_LAST;
- rmpp_mad->rmpp_hdr.paylen_newwin =
- cpu_to_be32(sizeof(struct ib_rmpp_mad) -
- offsetof(struct ib_rmpp_mad, data) -
- mad_send_wr->pad);
+ paylen = IB_MGMT_RMPP_DATA - mad_send_wr->pad;
+ rmpp_mad->rmpp_hdr.paylen_newwin = cpu_to_be32(paylen);
}
/* 2 seconds for an ACK until we can find the packet lifetime */
diff --git a/drivers/infiniband/core/user_mad.c b/drivers/infiniband/core/user_mad.c
index 7c2f03057dd..a64d6b4dcc1 100644
--- a/drivers/infiniband/core/user_mad.c
+++ b/drivers/infiniband/core/user_mad.c
@@ -334,10 +334,11 @@ static ssize_t ib_umad_write(struct file *filp, const char __user *buf,
ret = -EINVAL;
goto err_ah;
}
- /* Validate that management class can support RMPP */
+
+ /* Validate that the management class can support RMPP */
if (rmpp_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_ADM) {
hdr_len = offsetof(struct ib_sa_mad, data);
- data_len = length;
+ data_len = length - hdr_len;
} else if ((rmpp_mad->mad_hdr.mgmt_class >= IB_MGMT_CLASS_VENDOR_RANGE2_START) &&
(rmpp_mad->mad_hdr.mgmt_class <= IB_MGMT_CLASS_VENDOR_RANGE2_END)) {
hdr_len = offsetof(struct ib_vendor_mad, data);
diff --git a/drivers/infiniband/core/uverbs.h b/drivers/infiniband/core/uverbs.h
index b1897bed14a..cc124344dd2 100644
--- a/drivers/infiniband/core/uverbs.h
+++ b/drivers/infiniband/core/uverbs.h
@@ -69,6 +69,7 @@ struct ib_uverbs_event_file {
struct ib_uverbs_file {
struct kref ref;
+ struct semaphore mutex;
struct ib_uverbs_device *device;
struct ib_ucontext *ucontext;
struct ib_event_handler event_handler;
diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c
index e91ebde4648..562445165d2 100644
--- a/drivers/infiniband/core/uverbs_cmd.c
+++ b/drivers/infiniband/core/uverbs_cmd.c
@@ -76,8 +76,9 @@ ssize_t ib_uverbs_get_context(struct ib_uverbs_file *file,
struct ib_uverbs_get_context_resp resp;
struct ib_udata udata;
struct ib_device *ibdev = file->device->ib_dev;
+ struct ib_ucontext *ucontext;
int i;
- int ret = in_len;
+ int ret;
if (out_len < sizeof resp)
return -ENOSPC;
@@ -85,45 +86,56 @@ ssize_t ib_uverbs_get_context(struct ib_uverbs_file *file,
if (copy_from_user(&cmd, buf, sizeof cmd))
return -EFAULT;
+ down(&file->mutex);
+
+ if (file->ucontext) {
+ ret = -EINVAL;
+ goto err;
+ }
+
INIT_UDATA(&udata, buf + sizeof cmd,
(unsigned long) cmd.response + sizeof resp,
in_len - sizeof cmd, out_len - sizeof resp);
- file->ucontext = ibdev->alloc_ucontext(ibdev, &udata);
- if (IS_ERR(file->ucontext)) {
- ret = PTR_ERR(file->ucontext);
- file->ucontext = NULL;
- return ret;
- }
+ ucontext = ibdev->alloc_ucontext(ibdev, &udata);
+ if (IS_ERR(ucontext))
+ return PTR_ERR(file->ucontext);
- file->ucontext->device = ibdev;
- INIT_LIST_HEAD(&file->ucontext->pd_list);
- INIT_LIST_HEAD(&file->ucontext->mr_list);
- INIT_LIST_HEAD(&file->ucontext->mw_list);
- INIT_LIST_HEAD(&file->ucontext->cq_list);
- INIT_LIST_HEAD(&file->ucontext->qp_list);
- INIT_LIST_HEAD(&file->ucontext->srq_list);
- INIT_LIST_HEAD(&file->ucontext->ah_list);
- spin_lock_init(&file->ucontext->lock);
+ ucontext->device = ibdev;
+ INIT_LIST_HEAD(&ucontext->pd_list);
+ INIT_LIST_HEAD(&ucontext->mr_list);
+ INIT_LIST_HEAD(&ucontext->mw_list);
+ INIT_LIST_HEAD(&ucontext->cq_list);
+ INIT_LIST_HEAD(&ucontext->qp_list);
+ INIT_LIST_HEAD(&ucontext->srq_list);
+ INIT_LIST_HEAD(&ucontext->ah_list);
resp.async_fd = file->async_file.fd;
for (i = 0; i < file->device->num_comp; ++i)
if (copy_to_user((void __user *) (unsigned long) cmd.cq_fd_tab +
i * sizeof (__u32),
- &file->comp_file[i].fd, sizeof (__u32)))
- goto err;
+ &file->comp_file[i].fd, sizeof (__u32))) {
+ ret = -EFAULT;
+ goto err_free;
+ }
if (copy_to_user((void __user *) (unsigned long) cmd.response,
- &resp, sizeof resp))
- goto err;
+ &resp, sizeof resp)) {
+ ret = -EFAULT;
+ goto err_free;
+ }
+
+ file->ucontext = ucontext;
+ up(&file->mutex);
return in_len;
-err:
- ibdev->dealloc_ucontext(file->ucontext);
- file->ucontext = NULL;
+err_free:
+ ibdev->dealloc_ucontext(ucontext);
- return -EFAULT;
+err:
+ up(&file->mutex);
+ return ret;
}
ssize_t ib_uverbs_query_device(struct ib_uverbs_file *file,
@@ -352,9 +364,9 @@ retry:
if (ret)
goto err_pd;
- spin_lock_irq(&file->ucontext->lock);
+ down(&file->mutex);
list_add_tail(&uobj->list, &file->ucontext->pd_list);
- spin_unlock_irq(&file->ucontext->lock);
+ up(&file->mutex);
memset(&resp, 0, sizeof resp);
resp.pd_handle = uobj->id;
@@ -368,9 +380,9 @@ retry:
return in_len;
err_list:
- spin_lock_irq(&file->ucontext->lock);
+ down(&file->mutex);
list_del(&uobj->list);
- spin_unlock_irq(&file->ucontext->lock);
+ up(&file->mutex);
down(&ib_uverbs_idr_mutex);
idr_remove(&ib_uverbs_pd_idr, uobj->id);
@@ -410,9 +422,9 @@ ssize_t ib_uverbs_dealloc_pd(struct ib_uverbs_file *file,
idr_remove(&ib_uverbs_pd_idr, cmd.pd_handle);
- spin_lock_irq(&file->ucontext->lock);
+ down(&file->mutex);
list_del(&uobj->list);
- spin_unlock_irq(&file->ucontext->lock);
+ up(&file->mutex);
kfree(uobj);
@@ -512,9 +524,9 @@ retry:
resp.mr_handle = obj->uobject.id;
- spin_lock_irq(&file->ucontext->lock);
+ down(&file->mutex);
list_add_tail(&obj->uobject.list, &file->ucontext->mr_list);
- spin_unlock_irq(&file->ucontext->lock);
+ up(&file->mutex);
if (copy_to_user((void __user *) (unsigned long) cmd.response,
&resp, sizeof resp)) {
@@ -527,9 +539,9 @@ retry:
return in_len;
err_list:
- spin_lock_irq(&file->ucontext->lock);
+ down(&file->mutex);
list_del(&obj->uobject.list);
- spin_unlock_irq(&file->ucontext->lock);
+ up(&file->mutex);
err_unreg:
ib_dereg_mr(mr);
@@ -570,9 +582,9 @@ ssize_t ib_uverbs_dereg_mr(struct ib_uverbs_file *file,
idr_remove(&ib_uverbs_mr_idr, cmd.mr_handle);
- spin_lock_irq(&file->ucontext->lock);
+ down(&file->mutex);
list_del(&memobj->uobject.list);
- spin_unlock_irq(&file->ucontext->lock);
+ up(&file->mutex);
ib_umem_release(file->device->ib_dev, &memobj->umem);
kfree(memobj);
@@ -647,9 +659,9 @@ retry:
if (ret)
goto err_cq;
- spin_lock_irq(&file->ucontext->lock);
+ down(&file->mutex);
list_add_tail(&uobj->uobject.list, &file->ucontext->cq_list);
- spin_unlock_irq(&file->ucontext->lock);
+ up(&file->mutex);
memset(&resp, 0, sizeof resp);
resp.cq_handle = uobj->uobject.id;
@@ -664,9 +676,9 @@ retry:
return in_len;
err_list:
- spin_lock_irq(&file->ucontext->lock);
+ down(&file->mutex);
list_del(&uobj->uobject.list);
- spin_unlock_irq(&file->ucontext->lock);
+ up(&file->mutex);
down(&ib_uverbs_idr_mutex);
idr_remove(&ib_uverbs_cq_idr, uobj->uobject.id);
@@ -712,9 +724,9 @@ ssize_t ib_uverbs_destroy_cq(struct ib_uverbs_file *file,
idr_remove(&ib_uverbs_cq_idr, cmd.cq_handle);
- spin_lock_irq(&file->ucontext->lock);
+ down(&file->mutex);
list_del(&uobj->uobject.list);
- spin_unlock_irq(&file->ucontext->lock);
+ up(&file->mutex);
spin_lock_irq(&file->comp_file[0].lock);
list_for_each_entry_safe(evt, tmp, &uobj->comp_list, obj_list) {
@@ -847,9 +859,9 @@ retry:
resp.qp_handle = uobj->uobject.id;
- spin_lock_irq(&file->ucontext->lock);
+ down(&file->mutex);
list_add_tail(&uobj->uobject.list, &file->ucontext->qp_list);
- spin_unlock_irq(&file->ucontext->lock);
+ up(&file->mutex);
if (copy_to_user((void __user *) (unsigned long) cmd.response,
&resp, sizeof resp)) {
@@ -862,9 +874,9 @@ retry:
return in_len;
err_list:
- spin_lock_irq(&file->ucontext->lock);
+ down(&file->mutex);
list_del(&uobj->uobject.list);
- spin_unlock_irq(&file->ucontext->lock);
+ up(&file->mutex);
err_destroy:
ib_destroy_qp(qp);
@@ -989,9 +1001,9 @@ ssize_t ib_uverbs_destroy_qp(struct ib_uverbs_file *file,
idr_remove(&ib_uverbs_qp_idr, cmd.qp_handle);
- spin_lock_irq(&file->ucontext->lock);
+ down(&file->mutex);
list_del(&uobj->uobject.list);
- spin_unlock_irq(&file->ucontext->lock);
+ up(&file->mutex);
spin_lock_irq(&file->async_file.lock);
list_for_each_entry_safe(evt, tmp, &uobj->event_list, obj_list) {
@@ -1136,9 +1148,9 @@ retry:
resp.srq_handle = uobj->uobject.id;
- spin_lock_irq(&file->ucontext->lock);
+ down(&file->mutex);
list_add_tail(&uobj->uobject.list, &file->ucontext->srq_list);
- spin_unlock_irq(&file->ucontext->lock);
+ up(&file->mutex);
if (copy_to_user((void __user *) (unsigned long) cmd.response,
&resp, sizeof resp)) {
@@ -1151,9 +1163,9 @@ retry:
return in_len;
err_list:
- spin_lock_irq(&file->ucontext->lock);
+ down(&file->mutex);
list_del(&uobj->uobject.list);
- spin_unlock_irq(&file->ucontext->lock);
+ up(&file->mutex);
err_destroy:
ib_destroy_srq(srq);
@@ -1227,9 +1239,9 @@ ssize_t ib_uverbs_destroy_srq(struct ib_uverbs_file *file,
idr_remove(&ib_uverbs_srq_idr, cmd.srq_handle);
- spin_lock_irq(&file->ucontext->lock);
+ down(&file->mutex);
list_del(&uobj->uobject.list);
- spin_unlock_irq(&file->ucontext->lock);
+ up(&file->mutex);
spin_lock_irq(&file->async_file.lock);
list_for_each_entry_safe(evt, tmp, &uobj->event_list, obj_list) {
diff --git a/drivers/infiniband/core/uverbs_main.c b/drivers/infiniband/core/uverbs_main.c
index ce5bdb7af30..12511808de2 100644
--- a/drivers/infiniband/core/uverbs_main.c
+++ b/drivers/infiniband/core/uverbs_main.c
@@ -448,7 +448,9 @@ static ssize_t ib_uverbs_write(struct file *filp, const char __user *buf,
if (hdr.in_words * 4 != count)
return -EINVAL;
- if (hdr.command < 0 || hdr.command >= ARRAY_SIZE(uverbs_cmd_table))
+ if (hdr.command < 0 ||
+ hdr.command >= ARRAY_SIZE(uverbs_cmd_table) ||
+ !uverbs_cmd_table[hdr.command])
return -EINVAL;
if (!file->ucontext &&
@@ -484,27 +486,29 @@ static int ib_uverbs_open(struct inode *inode, struct file *filp)
file = kmalloc(sizeof *file +
(dev->num_comp - 1) * sizeof (struct ib_uverbs_event_file),
GFP_KERNEL);
- if (!file)
- return -ENOMEM;
+ if (!file) {
+ ret = -ENOMEM;
+ goto err;
+ }
file->device = dev;
kref_init(&file->ref);
+ init_MUTEX(&file->mutex);
file->ucontext = NULL;
+ kref_get(&file->ref);
ret = ib_uverbs_event_init(&file->async_file, file);
if (ret)
- goto err;
+ goto err_kref;
file->async_file.is_async = 1;
- kref_get(&file->ref);
-
for (i = 0; i < dev->num_comp; ++i) {
+ kref_get(&file->ref);
ret = ib_uverbs_event_init(&file->comp_file[i], file);
if (ret)
goto err_async;
- kref_get(&file->ref);
file->comp_file[i].is_async = 0;
}
@@ -524,9 +528,16 @@ err_async:
ib_uverbs_event_release(&file->async_file);
-err:
+err_kref:
+ /*
+ * One extra kref_put() because we took a reference before the
+ * event file creation that failed and got us here.
+ */
+ kref_put(&file->ref, ib_uverbs_release_file);
kref_put(&file->ref, ib_uverbs_release_file);
+err:
+ module_put(dev->ib_dev->owner);
return ret;
}
diff --git a/drivers/infiniband/hw/mthca/mthca_cmd.c b/drivers/infiniband/hw/mthca/mthca_cmd.c
index cc758a2d2bc..f6a8ac02655 100644
--- a/drivers/infiniband/hw/mthca/mthca_cmd.c
+++ b/drivers/infiniband/hw/mthca/mthca_cmd.c
@@ -605,7 +605,7 @@ static int mthca_map_cmd(struct mthca_dev *dev, u16 op, struct mthca_icm *icm,
err = -EINVAL;
goto out;
}
- for (i = 0; i < mthca_icm_size(&iter) / (1 << lg); ++i, ++nent) {
+ for (i = 0; i < mthca_icm_size(&iter) / (1 << lg); ++i) {
if (virt != -1) {
pages[nent * 2] = cpu_to_be64(virt);
virt += 1 << lg;
@@ -616,7 +616,7 @@ static int mthca_map_cmd(struct mthca_dev *dev, u16 op, struct mthca_icm *icm,
ts += 1 << (lg - 10);
++tc;
- if (nent == MTHCA_MAILBOX_SIZE / 16) {
+ if (++nent == MTHCA_MAILBOX_SIZE / 16) {
err = mthca_cmd(dev, mailbox->dma, nent, 0, op,
CMD_TIME_CLASS_B, status);
if (err || *status)
diff --git a/drivers/infiniband/hw/mthca/mthca_eq.c b/drivers/infiniband/hw/mthca/mthca_eq.c
index 18f0981eb0c..c81fa8e975e 100644
--- a/drivers/infiniband/hw/mthca/mthca_eq.c
+++ b/drivers/infiniband/hw/mthca/mthca_eq.c
@@ -476,12 +476,8 @@ static int __devinit mthca_create_eq(struct mthca_dev *dev,
int i;
u8 status;
- /* Make sure EQ size is aligned to a power of 2 size. */
- for (i = 1; i < nent; i <<= 1)
- ; /* nothing */
- nent = i;
-
- eq->dev = dev;
+ eq->dev = dev;
+ eq->nent = roundup_pow_of_two(max(nent, 2));
eq->page_list = kmalloc(npages * sizeof *eq->page_list,
GFP_KERNEL);
@@ -512,7 +508,7 @@ static int __devinit mthca_create_eq(struct mthca_dev *dev,
memset(eq->page_list[i].buf, 0, PAGE_SIZE);
}
- for (i = 0; i < nent; ++i)
+ for (i = 0; i < eq->nent; ++i)
set_eqe_hw(get_eqe(eq, i));
eq->eqn = mthca_alloc(&dev->eq_table.alloc);
@@ -528,8 +524,6 @@ static int __devinit mthca_create_eq(struct mthca_dev *dev,
if (err)
goto err_out_free_eq;
- eq->nent = nent;
-
memset(eq_context, 0, sizeof *eq_context);
eq_context->flags = cpu_to_be32(MTHCA_EQ_STATUS_OK |
MTHCA_EQ_OWNER_HW |
@@ -538,7 +532,7 @@ static int __devinit mthca_create_eq(struct mthca_dev *dev,
if (mthca_is_memfree(dev))
eq_context->flags |= cpu_to_be32(MTHCA_EQ_STATE_ARBEL);
- eq_context->logsize_usrpage = cpu_to_be32((ffs(nent) - 1) << 24);
+ eq_context->logsize_usrpage = cpu_to_be32((ffs(eq->nent) - 1) << 24);
if (mthca_is_memfree(dev)) {
eq_context->arbel_pd = cpu_to_be32(dev->driver_pd.pd_num);
} else {
@@ -569,7 +563,7 @@ static int __devinit mthca_create_eq(struct mthca_dev *dev,
dev->eq_table.arm_mask |= eq->eqn_mask;
mthca_dbg(dev, "Allocated EQ %d with %d entries\n",
- eq->eqn, nent);
+ eq->eqn, eq->nent);
return err;
@@ -842,7 +836,7 @@ int __devinit mthca_init_eq_table(struct mthca_dev *dev)
dev->eq_table.clr_mask =
swab32(1 << (dev->eq_table.inta_pin & 31));
dev->eq_table.clr_int = dev->clr_base +
- (dev->eq_table.inta_pin < 31 ? 4 : 0);
+ (dev->eq_table.inta_pin < 32 ? 4 : 0);
}
dev->eq_table.arm_mask = 0;
diff --git a/drivers/infiniband/hw/mthca/mthca_memfree.c b/drivers/infiniband/hw/mthca/mthca_memfree.c
index 1827400f189..7bd7a4bec7b 100644
--- a/drivers/infiniband/hw/mthca/mthca_memfree.c
+++ b/drivers/infiniband/hw/mthca/mthca_memfree.c
@@ -290,7 +290,7 @@ struct mthca_icm_table *mthca_alloc_icm_table(struct mthca_dev *dev,
int i;
u8 status;
- num_icm = obj_size * nobj / MTHCA_TABLE_CHUNK_SIZE;
+ num_icm = (obj_size * nobj + MTHCA_TABLE_CHUNK_SIZE - 1) / MTHCA_TABLE_CHUNK_SIZE;
table = kmalloc(sizeof *table + num_icm * sizeof *table->icm, GFP_KERNEL);
if (!table)
@@ -529,12 +529,25 @@ int mthca_alloc_db(struct mthca_dev *dev, int type, u32 qn, __be32 **db)
goto found;
}
+ for (i = start; i != end; i += dir)
+ if (!dev->db_tab->page[i].db_rec) {
+ page = dev->db_tab->page + i;
+ goto alloc;
+ }
+
if (dev->db_tab->max_group1 >= dev->db_tab->min_group2 - 1) {
ret = -ENOMEM;
goto out;
}
+ if (group == 0)
+ ++dev->db_tab->max_group1;
+ else
+ --dev->db_tab->min_group2;
+
page = dev->db_tab->page + end;
+
+alloc:
page->db_rec = dma_alloc_coherent(&dev->pdev->dev, 4096,
&page->mapping, GFP_KERNEL);
if (!page->db_rec) {
@@ -554,10 +567,6 @@ int mthca_alloc_db(struct mthca_dev *dev, int type, u32 qn, __be32 **db)
}
bitmap_zero(page->used, MTHCA_DB_REC_PER_PAGE);
- if (group == 0)
- ++dev->db_tab->max_group1;
- else
- --dev->db_tab->min_group2;
found:
j = find_first_zero_bit(page->used, MTHCA_DB_REC_PER_PAGE);
diff --git a/drivers/infiniband/hw/mthca/mthca_provider.c b/drivers/infiniband/hw/mthca/mthca_provider.c
index 1c1c2e23087..3f5319a4657 100644
--- a/drivers/infiniband/hw/mthca/mthca_provider.c
+++ b/drivers/infiniband/hw/mthca/mthca_provider.c
@@ -84,7 +84,7 @@ static int mthca_query_device(struct ib_device *ibdev,
props->vendor_id = be32_to_cpup((__be32 *) (out_mad->data + 36)) &
0xffffff;
props->vendor_part_id = be16_to_cpup((__be16 *) (out_mad->data + 30));
- props->hw_ver = be16_to_cpup((__be16 *) (out_mad->data + 32));
+ props->hw_ver = be32_to_cpup((__be32 *) (out_mad->data + 32));
memcpy(&props->sys_image_guid, out_mad->data + 4, 8);
memcpy(&props->node_guid, out_mad->data + 12, 8);
diff --git a/drivers/infiniband/hw/mthca/mthca_qp.c b/drivers/infiniband/hw/mthca/mthca_qp.c
index bcef06bf15e..5fa00669f9b 100644
--- a/drivers/infiniband/hw/mthca/mthca_qp.c
+++ b/drivers/infiniband/hw/mthca/mthca_qp.c
@@ -227,7 +227,6 @@ static void mthca_wq_init(struct mthca_wq *wq)
wq->last_comp = wq->max - 1;
wq->head = 0;
wq->tail = 0;
- wq->last = NULL;
}
void mthca_qp_event(struct mthca_dev *dev, u32 qpn,
@@ -687,7 +686,7 @@ int mthca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask)
}
if (attr_mask & IB_QP_TIMEOUT) {
- qp_context->pri_path.ackto = attr->timeout;
+ qp_context->pri_path.ackto = attr->timeout << 3;
qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_ACK_TIMEOUT);
}
@@ -1103,6 +1102,9 @@ static int mthca_alloc_qp_common(struct mthca_dev *dev,
}
}
+ qp->sq.last = get_send_wqe(qp, qp->sq.max - 1);
+ qp->rq.last = get_recv_wqe(qp, qp->rq.max - 1);
+
return 0;
}
@@ -1583,15 +1585,13 @@ int mthca_tavor_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
goto out;
}
- if (prev_wqe) {
- ((struct mthca_next_seg *) prev_wqe)->nda_op =
- cpu_to_be32(((ind << qp->sq.wqe_shift) +
- qp->send_wqe_offset) |
- mthca_opcode[wr->opcode]);
- wmb();
- ((struct mthca_next_seg *) prev_wqe)->ee_nds =
- cpu_to_be32((size0 ? 0 : MTHCA_NEXT_DBD) | size);
- }
+ ((struct mthca_next_seg *) prev_wqe)->nda_op =
+ cpu_to_be32(((ind << qp->sq.wqe_shift) +
+ qp->send_wqe_offset) |
+ mthca_opcode[wr->opcode]);
+ wmb();
+ ((struct mthca_next_seg *) prev_wqe)->ee_nds =
+ cpu_to_be32((size0 ? 0 : MTHCA_NEXT_DBD) | size);
if (!size0) {
size0 = size;
@@ -1688,13 +1688,11 @@ int mthca_tavor_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,
qp->wrid[ind] = wr->wr_id;
- if (likely(prev_wqe)) {
- ((struct mthca_next_seg *) prev_wqe)->nda_op =
- cpu_to_be32((ind << qp->rq.wqe_shift) | 1);
- wmb();
- ((struct mthca_next_seg *) prev_wqe)->ee_nds =
- cpu_to_be32(MTHCA_NEXT_DBD | size);
- }
+ ((struct mthca_next_seg *) prev_wqe)->nda_op =
+ cpu_to_be32((ind << qp->rq.wqe_shift) | 1);
+ wmb();
+ ((struct mthca_next_seg *) prev_wqe)->ee_nds =
+ cpu_to_be32(MTHCA_NEXT_DBD | size);
if (!size0)
size0 = size;
@@ -1905,15 +1903,13 @@ int mthca_arbel_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
goto out;
}
- if (likely(prev_wqe)) {
- ((struct mthca_next_seg *) prev_wqe)->nda_op =
- cpu_to_be32(((ind << qp->sq.wqe_shift) +
- qp->send_wqe_offset) |
- mthca_opcode[wr->opcode]);
- wmb();
- ((struct mthca_next_seg *) prev_wqe)->ee_nds =
- cpu_to_be32(MTHCA_NEXT_DBD | size);
- }
+ ((struct mthca_next_seg *) prev_wqe)->nda_op =
+ cpu_to_be32(((ind << qp->sq.wqe_shift) +
+ qp->send_wqe_offset) |
+ mthca_opcode[wr->opcode]);
+ wmb();
+ ((struct mthca_next_seg *) prev_wqe)->ee_nds =
+ cpu_to_be32(MTHCA_NEXT_DBD | size);
if (!size0) {
size0 = size;
@@ -2127,5 +2123,6 @@ void __devexit mthca_cleanup_qp_table(struct mthca_dev *dev)
for (i = 0; i < 2; ++i)
mthca_CONF_SPECIAL_QP(dev, i, 0, &status);
+ mthca_array_cleanup(&dev->qp_table.qp, dev->limits.num_qps);
mthca_alloc_cleanup(&dev->qp_table.alloc);
}
diff --git a/drivers/infiniband/hw/mthca/mthca_srq.c b/drivers/infiniband/hw/mthca/mthca_srq.c
index 75cd2d84ef1..18998d48c53 100644
--- a/drivers/infiniband/hw/mthca/mthca_srq.c
+++ b/drivers/infiniband/hw/mthca/mthca_srq.c
@@ -172,6 +172,8 @@ static int mthca_alloc_srq_buf(struct mthca_dev *dev, struct mthca_pd *pd,
scatter->lkey = cpu_to_be32(MTHCA_INVAL_LKEY);
}
+ srq->last = get_wqe(srq, srq->max - 1);
+
return 0;
}
@@ -189,7 +191,6 @@ int mthca_alloc_srq(struct mthca_dev *dev, struct mthca_pd *pd,
srq->max = attr->max_wr;
srq->max_gs = attr->max_sge;
- srq->last = NULL;
srq->counter = 0;
if (mthca_is_memfree(dev))
@@ -409,7 +410,7 @@ int mthca_tavor_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
mthca_err(dev, "SRQ %06x full\n", srq->srqn);
err = -ENOMEM;
*bad_wr = wr;
- return nreq;
+ break;
}
wqe = get_wqe(srq, ind);
@@ -427,7 +428,7 @@ int mthca_tavor_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
err = -EINVAL;
*bad_wr = wr;
srq->last = prev_wqe;
- return nreq;
+ break;
}
for (i = 0; i < wr->num_sge; ++i) {
@@ -446,20 +447,16 @@ int mthca_tavor_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
((struct mthca_data_seg *) wqe)->addr = 0;
}
- if (likely(prev_wqe)) {
- ((struct mthca_next_seg *) prev_wqe)->nda_op =
- cpu_to_be32((ind << srq->wqe_shift) | 1);
- wmb();
- ((struct mthca_next_seg *) prev_wqe)->ee_nds =
- cpu_to_be32(MTHCA_NEXT_DBD);
- }
+ ((struct mthca_next_seg *) prev_wqe)->nda_op =
+ cpu_to_be32((ind << srq->wqe_shift) | 1);
+ wmb();
+ ((struct mthca_next_seg *) prev_wqe)->ee_nds =
+ cpu_to_be32(MTHCA_NEXT_DBD);
srq->wrid[ind] = wr->wr_id;
srq->first_free = next_ind;
}
- return nreq;
-
if (likely(nreq)) {
__be32 doorbell[2];
@@ -503,7 +500,7 @@ int mthca_arbel_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
mthca_err(dev, "SRQ %06x full\n", srq->srqn);
err = -ENOMEM;
*bad_wr = wr;
- return nreq;
+ break;
}
wqe = get_wqe(srq, ind);
@@ -519,7 +516,7 @@ int mthca_arbel_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
if (unlikely(wr->num_sge > srq->max_gs)) {
err = -EINVAL;
*bad_wr = wr;
- return nreq;
+ break;
}
for (i = 0; i < wr->num_sge; ++i) {
diff --git a/drivers/infiniband/ulp/ipoib/ipoib.h b/drivers/infiniband/ulp/ipoib/ipoib.h
index bea960b8191..4ea1c1ca85b 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib.h
+++ b/drivers/infiniband/ulp/ipoib/ipoib.h
@@ -257,7 +257,7 @@ void ipoib_mcast_send(struct net_device *dev, union ib_gid *mgid,
void ipoib_mcast_restart_task(void *dev_ptr);
int ipoib_mcast_start_thread(struct net_device *dev);
-int ipoib_mcast_stop_thread(struct net_device *dev);
+int ipoib_mcast_stop_thread(struct net_device *dev, int flush);
void ipoib_mcast_dev_down(struct net_device *dev);
void ipoib_mcast_dev_flush(struct net_device *dev);
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ib.c b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
index ef0e3894863..f7440096b5e 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
@@ -432,7 +432,7 @@ int ipoib_ib_dev_down(struct net_device *dev)
flush_workqueue(ipoib_workqueue);
}
- ipoib_mcast_stop_thread(dev);
+ ipoib_mcast_stop_thread(dev, 1);
/*
* Flush the multicast groups first so we stop any multicast joins. The
@@ -599,7 +599,7 @@ void ipoib_ib_dev_cleanup(struct net_device *dev)
ipoib_dbg(priv, "cleaning up ib_dev\n");
- ipoib_mcast_stop_thread(dev);
+ ipoib_mcast_stop_thread(dev, 1);
/* Delete the broadcast address and the local address */
ipoib_mcast_dev_down(dev);
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
index 49d120d2b92..704f48e0b6a 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
@@ -1005,6 +1005,7 @@ debug_failed:
register_failed:
ib_unregister_event_handler(&priv->event_handler);
+ flush_scheduled_work();
event_failed:
ipoib_dev_cleanup(priv->dev);
@@ -1057,6 +1058,7 @@ static void ipoib_remove_one(struct ib_device *device)
list_for_each_entry_safe(priv, tmp, dev_list, list) {
ib_unregister_event_handler(&priv->event_handler);
+ flush_scheduled_work();
unregister_netdev(priv->dev);
ipoib_dev_cleanup(priv->dev);
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
index aca7aea18a6..36ce29836bf 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
@@ -145,7 +145,7 @@ static struct ipoib_mcast *ipoib_mcast_alloc(struct net_device *dev,
mcast->dev = dev;
mcast->created = jiffies;
- mcast->backoff = HZ;
+ mcast->backoff = 1;
mcast->logcount = 0;
INIT_LIST_HEAD(&mcast->list);
@@ -396,7 +396,7 @@ static void ipoib_mcast_join_complete(int status,
IPOIB_GID_ARG(mcast->mcmember.mgid), status);
if (!status && !ipoib_mcast_join_finish(mcast, mcmember)) {
- mcast->backoff = HZ;
+ mcast->backoff = 1;
down(&mcast_mutex);
if (test_bit(IPOIB_MCAST_RUN, &priv->flags))
queue_work(ipoib_workqueue, &priv->mcast_task);
@@ -496,7 +496,7 @@ static void ipoib_mcast_join(struct net_device *dev, struct ipoib_mcast *mcast,
if (test_bit(IPOIB_MCAST_RUN, &priv->flags))
queue_delayed_work(ipoib_workqueue,
&priv->mcast_task,
- mcast->backoff);
+ mcast->backoff * HZ);
up(&mcast_mutex);
} else
mcast->query_id = ret;
@@ -598,7 +598,7 @@ int ipoib_mcast_start_thread(struct net_device *dev)
return 0;
}
-int ipoib_mcast_stop_thread(struct net_device *dev)
+int ipoib_mcast_stop_thread(struct net_device *dev, int flush)
{
struct ipoib_dev_priv *priv = netdev_priv(dev);
struct ipoib_mcast *mcast;
@@ -610,7 +610,8 @@ int ipoib_mcast_stop_thread(struct net_device *dev)
cancel_delayed_work(&priv->mcast_task);
up(&mcast_mutex);
- flush_workqueue(ipoib_workqueue);
+ if (flush)
+ flush_workqueue(ipoib_workqueue);
if (priv->broadcast && priv->broadcast->query) {
ib_sa_cancel_query(priv->broadcast->query_id, priv->broadcast->query);
@@ -832,7 +833,7 @@ void ipoib_mcast_restart_task(void *dev_ptr)
ipoib_dbg_mcast(priv, "restarting multicast task\n");
- ipoib_mcast_stop_thread(dev);
+ ipoib_mcast_stop_thread(dev, 0);
spin_lock_irqsave(&priv->lock, flags);
diff --git a/drivers/input/input.c b/drivers/input/input.c
index 88636a20452..14ae5583e19 100644
--- a/drivers/input/input.c
+++ b/drivers/input/input.c
@@ -308,6 +308,7 @@ static struct input_device_id *input_match_device(struct input_device_id *id, st
MATCH_BIT(ledbit, LED_MAX);
MATCH_BIT(sndbit, SND_MAX);
MATCH_BIT(ffbit, FF_MAX);
+ MATCH_BIT(swbit, SW_MAX);
return id;
}
diff --git a/drivers/input/keyboard/Kconfig b/drivers/input/keyboard/Kconfig
index e55dee39077..444f7756fee 100644
--- a/drivers/input/keyboard/Kconfig
+++ b/drivers/input/keyboard/Kconfig
@@ -132,6 +132,17 @@ config KEYBOARD_CORGI
To compile this driver as a module, choose M here: the
module will be called corgikbd.
+config KEYBOARD_SPITZ
+ tristate "Spitz keyboard"
+ depends on PXA_SHARPSL
+ default y
+ help
+ Say Y here to enable the keyboard on the Sharp Zaurus SL-C1000,
+ SL-C3000 and Sl-C3100 series of PDAs.
+
+ To compile this driver as a module, choose M here: the
+ module will be called spitzkbd.
+
config KEYBOARD_MAPLE
tristate "Maple bus keyboard"
depends on SH_DREAMCAST && MAPLE
diff --git a/drivers/input/keyboard/Makefile b/drivers/input/keyboard/Makefile
index b02eeceea3c..9ce0b87f2fa 100644
--- a/drivers/input/keyboard/Makefile
+++ b/drivers/input/keyboard/Makefile
@@ -14,6 +14,7 @@ obj-$(CONFIG_KEYBOARD_LOCOMO) += locomokbd.o
obj-$(CONFIG_KEYBOARD_NEWTON) += newtonkbd.o
obj-$(CONFIG_KEYBOARD_98KBD) += 98kbd.o
obj-$(CONFIG_KEYBOARD_CORGI) += corgikbd.o
+obj-$(CONFIG_KEYBOARD_SPITZ) += spitzkbd.o
obj-$(CONFIG_KEYBOARD_HIL) += hil_kbd.o
obj-$(CONFIG_KEYBOARD_HIL_OLD) += hilkbd.o
diff --git a/drivers/input/keyboard/spitzkbd.c b/drivers/input/keyboard/spitzkbd.c
new file mode 100644
index 00000000000..1714045a182
--- /dev/null
+++ b/drivers/input/keyboard/spitzkbd.c
@@ -0,0 +1,478 @@
+/*
+ * Keyboard driver for Sharp Spitz, Borzoi and Akita (SL-Cxx00 series)
+ *
+ * Copyright (c) 2005 Richard Purdie
+ *
+ * Based on corgikbd.c
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/init.h>
+#include <linux/input.h>
+#include <linux/interrupt.h>
+#include <linux/jiffies.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <asm/irq.h>
+
+#include <asm/arch/spitz.h>
+#include <asm/arch/hardware.h>
+#include <asm/arch/pxa-regs.h>
+
+#define KB_ROWS 7
+#define KB_COLS 11
+#define KB_ROWMASK(r) (1 << (r))
+#define SCANCODE(r,c) (((r)<<4) + (c) + 1)
+#define NR_SCANCODES ((KB_ROWS<<4) + 1)
+
+#define HINGE_SCAN_INTERVAL (150) /* ms */
+
+#define SPITZ_KEY_CALENDER KEY_F1
+#define SPITZ_KEY_ADDRESS KEY_F2
+#define SPITZ_KEY_FN KEY_F3
+#define SPITZ_KEY_CANCEL KEY_F4
+#define SPITZ_KEY_EXOK KEY_F5
+#define SPITZ_KEY_EXCANCEL KEY_F6
+#define SPITZ_KEY_EXJOGDOWN KEY_F7
+#define SPITZ_KEY_EXJOGUP KEY_F8
+#define SPITZ_KEY_JAP1 KEY_LEFTALT
+#define SPITZ_KEY_JAP2 KEY_RIGHTCTRL
+#define SPITZ_KEY_SYNC KEY_F9
+#define SPITZ_KEY_MAIL KEY_F10
+#define SPITZ_KEY_OK KEY_F11
+#define SPITZ_KEY_MENU KEY_F12
+
+static unsigned char spitzkbd_keycode[NR_SCANCODES] = {
+ 0, /* 0 */
+ KEY_LEFTCTRL, KEY_1, KEY_3, KEY_5, KEY_6, KEY_7, KEY_9, KEY_0, KEY_BACKSPACE, SPITZ_KEY_EXOK, SPITZ_KEY_EXCANCEL, 0, 0, 0, 0, 0, /* 1-16 */
+ 0, KEY_2, KEY_4, KEY_R, KEY_Y, KEY_8, KEY_I, KEY_O, KEY_P, SPITZ_KEY_EXJOGDOWN, SPITZ_KEY_EXJOGUP, 0, 0, 0, 0, 0, /* 17-32 */
+ KEY_TAB, KEY_Q, KEY_E, KEY_T, KEY_G, KEY_U, KEY_J, KEY_K, 0, 0, 0, 0, 0, 0, 0, 0, /* 33-48 */
+ SPITZ_KEY_CALENDER, KEY_W, KEY_S, KEY_F, KEY_V, KEY_H, KEY_M, KEY_L, 0, 0, KEY_RIGHTSHIFT, 0, 0, 0, 0, 0, /* 49-64 */
+ SPITZ_KEY_ADDRESS, KEY_A, KEY_D, KEY_C, KEY_B, KEY_N, KEY_DOT, 0, KEY_ENTER, KEY_LEFTSHIFT, 0, 0, 0, 0, 0, 0, /* 65-80 */
+ SPITZ_KEY_MAIL, KEY_Z, KEY_X, KEY_MINUS, KEY_SPACE, KEY_COMMA, 0, KEY_UP, 0, 0, SPITZ_KEY_FN, 0, 0, 0, 0, 0, /* 81-96 */
+ KEY_SYSRQ, SPITZ_KEY_JAP1, SPITZ_KEY_JAP2, SPITZ_KEY_CANCEL, SPITZ_KEY_OK, SPITZ_KEY_MENU, KEY_LEFT, KEY_DOWN, KEY_RIGHT, 0, 0, 0, 0, 0, 0, 0 /* 97-112 */
+};
+
+static int spitz_strobes[] = {
+ SPITZ_GPIO_KEY_STROBE0,
+ SPITZ_GPIO_KEY_STROBE1,
+ SPITZ_GPIO_KEY_STROBE2,
+ SPITZ_GPIO_KEY_STROBE3,
+ SPITZ_GPIO_KEY_STROBE4,
+ SPITZ_GPIO_KEY_STROBE5,
+ SPITZ_GPIO_KEY_STROBE6,
+ SPITZ_GPIO_KEY_STROBE7,
+ SPITZ_GPIO_KEY_STROBE8,
+ SPITZ_GPIO_KEY_STROBE9,
+ SPITZ_GPIO_KEY_STROBE10,
+};
+
+static int spitz_senses[] = {
+ SPITZ_GPIO_KEY_SENSE0,
+ SPITZ_GPIO_KEY_SENSE1,
+ SPITZ_GPIO_KEY_SENSE2,
+ SPITZ_GPIO_KEY_SENSE3,
+ SPITZ_GPIO_KEY_SENSE4,
+ SPITZ_GPIO_KEY_SENSE5,
+ SPITZ_GPIO_KEY_SENSE6,
+};
+
+struct spitzkbd {
+ unsigned char keycode[ARRAY_SIZE(spitzkbd_keycode)];
+ struct input_dev input;
+ char phys[32];
+
+ spinlock_t lock;
+ struct timer_list timer;
+ struct timer_list htimer;
+
+ unsigned int suspended;
+ unsigned long suspend_jiffies;
+};
+
+#define KB_DISCHARGE_DELAY 10
+#define KB_ACTIVATE_DELAY 10
+
+/* Helper functions for reading the keyboard matrix
+ * Note: We should really be using pxa_gpio_mode to alter GPDR but it
+ * requires a function call per GPIO bit which is excessive
+ * when we need to access 11 bits at once, multiple times.
+ * These functions must be called within local_irq_save()/local_irq_restore()
+ * or similar.
+ */
+static inline void spitzkbd_discharge_all(void)
+{
+ /* STROBE All HiZ */
+ GPCR0 = SPITZ_GPIO_G0_STROBE_BIT;
+ GPDR0 &= ~SPITZ_GPIO_G0_STROBE_BIT;
+ GPCR1 = SPITZ_GPIO_G1_STROBE_BIT;
+ GPDR1 &= ~SPITZ_GPIO_G1_STROBE_BIT;
+ GPCR2 = SPITZ_GPIO_G2_STROBE_BIT;
+ GPDR2 &= ~SPITZ_GPIO_G2_STROBE_BIT;
+ GPCR3 = SPITZ_GPIO_G3_STROBE_BIT;
+ GPDR3 &= ~SPITZ_GPIO_G3_STROBE_BIT;
+}
+
+static inline void spitzkbd_activate_all(void)
+{
+ /* STROBE ALL -> High */
+ GPSR0 = SPITZ_GPIO_G0_STROBE_BIT;
+ GPDR0 |= SPITZ_GPIO_G0_STROBE_BIT;
+ GPSR1 = SPITZ_GPIO_G1_STROBE_BIT;
+ GPDR1 |= SPITZ_GPIO_G1_STROBE_BIT;
+ GPSR2 = SPITZ_GPIO_G2_STROBE_BIT;
+ GPDR2 |= SPITZ_GPIO_G2_STROBE_BIT;
+ GPSR3 = SPITZ_GPIO_G3_STROBE_BIT;
+ GPDR3 |= SPITZ_GPIO_G3_STROBE_BIT;
+
+ udelay(KB_DISCHARGE_DELAY);
+
+ /* Clear any interrupts we may have triggered when altering the GPIO lines */
+ GEDR0 = SPITZ_GPIO_G0_SENSE_BIT;
+ GEDR1 = SPITZ_GPIO_G1_SENSE_BIT;
+ GEDR2 = SPITZ_GPIO_G2_SENSE_BIT;
+ GEDR3 = SPITZ_GPIO_G3_SENSE_BIT;
+}
+
+static inline void spitzkbd_activate_col(int col)
+{
+ int gpio = spitz_strobes[col];
+ GPDR0 &= ~SPITZ_GPIO_G0_STROBE_BIT;
+ GPDR1 &= ~SPITZ_GPIO_G1_STROBE_BIT;
+ GPDR2 &= ~SPITZ_GPIO_G2_STROBE_BIT;
+ GPDR3 &= ~SPITZ_GPIO_G3_STROBE_BIT;
+ GPSR(gpio) = GPIO_bit(gpio);
+ GPDR(gpio) |= GPIO_bit(gpio);
+}
+
+static inline void spitzkbd_reset_col(int col)
+{
+ int gpio = spitz_strobes[col];
+ GPDR0 &= ~SPITZ_GPIO_G0_STROBE_BIT;
+ GPDR1 &= ~SPITZ_GPIO_G1_STROBE_BIT;
+ GPDR2 &= ~SPITZ_GPIO_G2_STROBE_BIT;
+ GPDR3 &= ~SPITZ_GPIO_G3_STROBE_BIT;
+ GPCR(gpio) = GPIO_bit(gpio);
+ GPDR(gpio) |= GPIO_bit(gpio);
+}
+
+static inline int spitzkbd_get_row_status(int col)
+{
+ return ((GPLR0 >> 12) & 0x01) | ((GPLR0 >> 16) & 0x02)
+ | ((GPLR2 >> 25) & 0x04) | ((GPLR1 << 1) & 0x08)
+ | ((GPLR1 >> 0) & 0x10) | ((GPLR1 >> 1) & 0x60);
+}
+
+/*
+ * The spitz keyboard only generates interrupts when a key is pressed.
+ * When a key is pressed, we enable a timer which then scans the
+ * keyboard to detect when the key is released.
+ */
+
+/* Scan the hardware keyboard and push any changes up through the input layer */
+static void spitzkbd_scankeyboard(struct spitzkbd *spitzkbd_data, struct pt_regs *regs)
+{
+ unsigned int row, col, rowd;
+ unsigned long flags;
+ unsigned int num_pressed, pwrkey = ((GPLR(SPITZ_GPIO_ON_KEY) & GPIO_bit(SPITZ_GPIO_ON_KEY)) != 0);
+
+ if (spitzkbd_data->suspended)
+ return;
+
+ spin_lock_irqsave(&spitzkbd_data->lock, flags);
+
+ if (regs)
+ input_regs(&spitzkbd_data->input, regs);
+
+ num_pressed = 0;
+ for (col = 0; col < KB_COLS; col++) {
+ /*
+ * Discharge the output driver capacitatance
+ * in the keyboard matrix. (Yes it is significant..)
+ */
+
+ spitzkbd_discharge_all();
+ udelay(KB_DISCHARGE_DELAY);
+
+ spitzkbd_activate_col(col);
+ udelay(KB_ACTIVATE_DELAY);
+
+ rowd = spitzkbd_get_row_status(col);
+ for (row = 0; row < KB_ROWS; row++) {
+ unsigned int scancode, pressed;
+
+ scancode = SCANCODE(row, col);
+ pressed = rowd & KB_ROWMASK(row);
+
+ input_report_key(&spitzkbd_data->input, spitzkbd_data->keycode[scancode], pressed);
+
+ if (pressed)
+ num_pressed++;
+ }
+ spitzkbd_reset_col(col);
+ }
+
+ spitzkbd_activate_all();
+
+ input_report_key(&spitzkbd_data->input, SPITZ_KEY_SYNC, (GPLR(SPITZ_GPIO_SYNC) & GPIO_bit(SPITZ_GPIO_SYNC)) != 0 );
+ input_report_key(&spitzkbd_data->input, KEY_SUSPEND, pwrkey);
+
+ if (pwrkey && time_after(jiffies, spitzkbd_data->suspend_jiffies + msecs_to_jiffies(1000))) {
+ input_event(&spitzkbd_data->input, EV_PWR, KEY_SUSPEND, 1);
+ spitzkbd_data->suspend_jiffies = jiffies;
+ }
+
+ input_sync(&spitzkbd_data->input);
+
+ /* if any keys are pressed, enable the timer */
+ if (num_pressed)
+ mod_timer(&spitzkbd_data->timer, jiffies + msecs_to_jiffies(100));
+
+ spin_unlock_irqrestore(&spitzkbd_data->lock, flags);
+}
+
+/*
+ * spitz keyboard interrupt handler.
+ */
+static irqreturn_t spitzkbd_interrupt(int irq, void *dev_id, struct pt_regs *regs)
+{
+ struct spitzkbd *spitzkbd_data = dev_id;
+
+ if (!timer_pending(&spitzkbd_data->timer)) {
+ /** wait chattering delay **/
+ udelay(20);
+ spitzkbd_scankeyboard(spitzkbd_data, regs);
+ }
+
+ return IRQ_HANDLED;
+}
+
+/*
+ * spitz timer checking for released keys
+ */
+static void spitzkbd_timer_callback(unsigned long data)
+{
+ struct spitzkbd *spitzkbd_data = (struct spitzkbd *) data;
+ spitzkbd_scankeyboard(spitzkbd_data, NULL);
+}
+
+/*
+ * The hinge switches generate an interrupt.
+ * We debounce the switches and pass them to the input system.
+ */
+
+static irqreturn_t spitzkbd_hinge_isr(int irq, void *dev_id, struct pt_regs *regs)
+{
+ struct spitzkbd *spitzkbd_data = dev_id;
+
+ if (!timer_pending(&spitzkbd_data->htimer))
+ mod_timer(&spitzkbd_data->htimer, jiffies + msecs_to_jiffies(HINGE_SCAN_INTERVAL));
+
+ return IRQ_HANDLED;
+}
+
+#define HINGE_STABLE_COUNT 2
+static int sharpsl_hinge_state;
+static int hinge_count;
+
+static void spitzkbd_hinge_timer(unsigned long data)
+{
+ struct spitzkbd *spitzkbd_data = (struct spitzkbd *) data;
+ unsigned long state;
+ unsigned long flags;
+
+ state = GPLR(SPITZ_GPIO_SWA) & (GPIO_bit(SPITZ_GPIO_SWA)|GPIO_bit(SPITZ_GPIO_SWB));
+ if (state != sharpsl_hinge_state) {
+ hinge_count = 0;
+ sharpsl_hinge_state = state;
+ } else if (hinge_count < HINGE_STABLE_COUNT) {
+ hinge_count++;
+ }
+
+ if (hinge_count >= HINGE_STABLE_COUNT) {
+ spin_lock_irqsave(&spitzkbd_data->lock, flags);
+
+ input_report_switch(&spitzkbd_data->input, SW_0, ((GPLR(SPITZ_GPIO_SWA) & GPIO_bit(SPITZ_GPIO_SWA)) != 0));
+ input_report_switch(&spitzkbd_data->input, SW_1, ((GPLR(SPITZ_GPIO_SWB) & GPIO_bit(SPITZ_GPIO_SWB)) != 0));
+ input_sync(&spitzkbd_data->input);
+
+ spin_unlock_irqrestore(&spitzkbd_data->lock, flags);
+ } else {
+ mod_timer(&spitzkbd_data->htimer, jiffies + msecs_to_jiffies(HINGE_SCAN_INTERVAL));
+ }
+}
+
+#ifdef CONFIG_PM
+static int spitzkbd_suspend(struct device *dev, pm_message_t state, uint32_t level)
+{
+ if (level == SUSPEND_POWER_DOWN) {
+ int i;
+ struct spitzkbd *spitzkbd = dev_get_drvdata(dev);
+ spitzkbd->suspended = 1;
+
+ /* Set Strobe lines as inputs - *except* strobe line 0 leave this
+ enabled so we can detect a power button press for resume */
+ for (i = 1; i < SPITZ_KEY_STROBE_NUM; i++)
+ pxa_gpio_mode(spitz_strobes[i] | GPIO_IN);
+ }
+ return 0;
+}
+
+static int spitzkbd_resume(struct device *dev, uint32_t level)
+{
+ if (level == RESUME_POWER_ON) {
+ int i;
+ struct spitzkbd *spitzkbd = dev_get_drvdata(dev);
+
+ for (i = 0; i < SPITZ_KEY_STROBE_NUM; i++)
+ pxa_gpio_mode(spitz_strobes[i] | GPIO_OUT | GPIO_DFLT_HIGH);
+
+ /* Upon resume, ignore the suspend key for a short while */
+ spitzkbd->suspend_jiffies = jiffies;
+ spitzkbd->suspended = 0;
+ }
+ return 0;
+}
+#else
+#define spitzkbd_suspend NULL
+#define spitzkbd_resume NULL
+#endif
+
+static int __init spitzkbd_probe(struct device *dev)
+{
+ int i;
+ struct spitzkbd *spitzkbd;
+
+ spitzkbd = kzalloc(sizeof(struct spitzkbd), GFP_KERNEL);
+ if (!spitzkbd)
+ return -ENOMEM;
+
+ dev_set_drvdata(dev,spitzkbd);
+ strcpy(spitzkbd->phys, "spitzkbd/input0");
+
+ spin_lock_init(&spitzkbd->lock);
+
+ /* Init Keyboard rescan timer */
+ init_timer(&spitzkbd->timer);
+ spitzkbd->timer.function = spitzkbd_timer_callback;
+ spitzkbd->timer.data = (unsigned long) spitzkbd;
+
+ /* Init Hinge Timer */
+ init_timer(&spitzkbd->htimer);
+ spitzkbd->htimer.function = spitzkbd_hinge_timer;
+ spitzkbd->htimer.data = (unsigned long) spitzkbd;
+
+ spitzkbd->suspend_jiffies=jiffies;
+
+ init_input_dev(&spitzkbd->input);
+ spitzkbd->input.private = spitzkbd;
+ spitzkbd->input.name = "Spitz Keyboard";
+ spitzkbd->input.dev = dev;
+ spitzkbd->input.phys = spitzkbd->phys;
+ spitzkbd->input.id.bustype = BUS_HOST;
+ spitzkbd->input.id.vendor = 0x0001;
+ spitzkbd->input.id.product = 0x0001;
+ spitzkbd->input.id.version = 0x0100;
+ spitzkbd->input.evbit[0] = BIT(EV_KEY) | BIT(EV_REP) | BIT(EV_PWR) | BIT(EV_SW);
+ spitzkbd->input.keycode = spitzkbd->keycode;
+ spitzkbd->input.keycodesize = sizeof(unsigned char);
+ spitzkbd->input.keycodemax = ARRAY_SIZE(spitzkbd_keycode);
+
+ memcpy(spitzkbd->keycode, spitzkbd_keycode, sizeof(spitzkbd->keycode));
+ for (i = 0; i < ARRAY_SIZE(spitzkbd_keycode); i++)
+ set_bit(spitzkbd->keycode[i], spitzkbd->input.keybit);
+ clear_bit(0, spitzkbd->input.keybit);
+ set_bit(SW_0, spitzkbd->input.swbit);
+ set_bit(SW_1, spitzkbd->input.swbit);
+
+ input_register_device(&spitzkbd->input);
+ mod_timer(&spitzkbd->htimer, jiffies + msecs_to_jiffies(HINGE_SCAN_INTERVAL));
+
+ /* Setup sense interrupts - RisingEdge Detect, sense lines as inputs */
+ for (i = 0; i < SPITZ_KEY_SENSE_NUM; i++) {
+ pxa_gpio_mode(spitz_senses[i] | GPIO_IN);
+ if (request_irq(IRQ_GPIO(spitz_senses[i]), spitzkbd_interrupt,
+ SA_INTERRUPT, "Spitzkbd Sense", spitzkbd))
+ printk(KERN_WARNING "spitzkbd: Can't get Sense IRQ: %d!\n", i);
+ else
+ set_irq_type(IRQ_GPIO(spitz_senses[i]),IRQT_RISING);
+ }
+
+ /* Set Strobe lines as outputs - set high */
+ for (i = 0; i < SPITZ_KEY_STROBE_NUM; i++)
+ pxa_gpio_mode(spitz_strobes[i] | GPIO_OUT | GPIO_DFLT_HIGH);
+
+ pxa_gpio_mode(SPITZ_GPIO_SYNC | GPIO_IN);
+ pxa_gpio_mode(SPITZ_GPIO_ON_KEY | GPIO_IN);
+ pxa_gpio_mode(SPITZ_GPIO_SWA | GPIO_IN);
+ pxa_gpio_mode(SPITZ_GPIO_SWB | GPIO_IN);
+
+ request_irq(SPITZ_IRQ_GPIO_SYNC, spitzkbd_interrupt, SA_INTERRUPT, "Spitzkbd Sync", spitzkbd);
+ request_irq(SPITZ_IRQ_GPIO_ON_KEY, spitzkbd_interrupt, SA_INTERRUPT, "Spitzkbd PwrOn", spitzkbd);
+ request_irq(SPITZ_IRQ_GPIO_SWA, spitzkbd_hinge_isr, SA_INTERRUPT, "Spitzkbd SWA", spitzkbd);
+ request_irq(SPITZ_IRQ_GPIO_SWB, spitzkbd_hinge_isr, SA_INTERRUPT, "Spitzkbd SWB", spitzkbd);
+
+ set_irq_type(SPITZ_IRQ_GPIO_SYNC, IRQT_BOTHEDGE);
+ set_irq_type(SPITZ_IRQ_GPIO_ON_KEY, IRQT_BOTHEDGE);
+ set_irq_type(SPITZ_IRQ_GPIO_SWA, IRQT_BOTHEDGE);
+ set_irq_type(SPITZ_IRQ_GPIO_SWB, IRQT_BOTHEDGE);
+
+ printk(KERN_INFO "input: Spitz Keyboard Registered\n");
+
+ return 0;
+}
+
+static int spitzkbd_remove(struct device *dev)
+{
+ int i;
+ struct spitzkbd *spitzkbd = dev_get_drvdata(dev);
+
+ for (i = 0; i < SPITZ_KEY_SENSE_NUM; i++)
+ free_irq(IRQ_GPIO(spitz_senses[i]), spitzkbd);
+
+ free_irq(SPITZ_IRQ_GPIO_SYNC, spitzkbd);
+ free_irq(SPITZ_IRQ_GPIO_ON_KEY, spitzkbd);
+ free_irq(SPITZ_IRQ_GPIO_SWA, spitzkbd);
+ free_irq(SPITZ_IRQ_GPIO_SWB, spitzkbd);
+
+ del_timer_sync(&spitzkbd->htimer);
+ del_timer_sync(&spitzkbd->timer);
+
+ input_unregister_device(&spitzkbd->input);
+
+ kfree(spitzkbd);
+
+ return 0;
+}
+
+static struct device_driver spitzkbd_driver = {
+ .name = "spitz-keyboard",
+ .bus = &platform_bus_type,
+ .probe = spitzkbd_probe,
+ .remove = spitzkbd_remove,
+ .suspend = spitzkbd_suspend,
+ .resume = spitzkbd_resume,
+};
+
+static int __devinit spitzkbd_init(void)
+{
+ return driver_register(&spitzkbd_driver);
+}
+
+static void __exit spitzkbd_exit(void)
+{
+ driver_unregister(&spitzkbd_driver);
+}
+
+module_init(spitzkbd_init);
+module_exit(spitzkbd_exit);
+
+MODULE_AUTHOR("Richard Purdie <rpurdie@rpsys.net>");
+MODULE_DESCRIPTION("Spitz Keyboard Driver");
+MODULE_LICENSE("GPLv2");
diff --git a/drivers/input/touchscreen/Kconfig b/drivers/input/touchscreen/Kconfig
index 0489af5a80c..21d55ed4b88 100644
--- a/drivers/input/touchscreen/Kconfig
+++ b/drivers/input/touchscreen/Kconfig
@@ -24,17 +24,17 @@ config TOUCHSCREEN_BITSY
module will be called h3600_ts_input.
config TOUCHSCREEN_CORGI
- tristate "Corgi touchscreen (for Sharp SL-C7xx)"
+ tristate "SharpSL (Corgi and Spitz series) touchscreen driver"
depends on PXA_SHARPSL
default y
help
Say Y here to enable the driver for the touchscreen on the
- Sharp SL-C7xx series of PDAs.
+ Sharp SL-C7xx and SL-Cxx00 series of PDAs.
If unsure, say N.
To compile this driver as a module, choose M here: the
- module will be called ads7846_ts.
+ module will be called corgi_ts.
config TOUCHSCREEN_GUNZE
tristate "Gunze AHL-51S touchscreen"
diff --git a/drivers/input/touchscreen/corgi_ts.c b/drivers/input/touchscreen/corgi_ts.c
index 5d19261b884..4c7fbe55036 100644
--- a/drivers/input/touchscreen/corgi_ts.c
+++ b/drivers/input/touchscreen/corgi_ts.c
@@ -1,5 +1,5 @@
/*
- * Touchscreen driver for Sharp Corgi models (SL-C7xx)
+ * Touchscreen driver for Sharp SL-C7xx and SL-Cxx00 models
*
* Copyright (c) 2004-2005 Richard Purdie
*
@@ -19,7 +19,7 @@
#include <linux/slab.h>
#include <asm/irq.h>
-#include <asm/arch/corgi.h>
+#include <asm/arch/sharpsl.h>
#include <asm/arch/hardware.h>
#include <asm/arch/pxa-regs.h>
@@ -47,15 +47,20 @@ struct corgi_ts {
struct ts_event tc;
int pendown;
int power_mode;
+ int irq_gpio;
+ struct corgits_machinfo *machinfo;
};
-#define STATUS_HSYNC (GPLR(CORGI_GPIO_HSYNC) & GPIO_bit(CORGI_GPIO_HSYNC))
-
-#define SyncHS() while((STATUS_HSYNC) == 0); while((STATUS_HSYNC) != 0);
+#ifdef CONFIG_PXA25x
#define CCNT(a) asm volatile ("mrc p14, 0, %0, C1, C0, 0" : "=r"(a))
#define PMNC_GET(x) asm volatile ("mrc p14, 0, %0, C0, C0, 0" : "=r"(x))
#define PMNC_SET(x) asm volatile ("mcr p14, 0, %0, C0, C0, 0" : : "r"(x))
-
+#endif
+#ifdef CONFIG_PXA27x
+#define CCNT(a) asm volatile ("mrc p14, 0, %0, C1, C1, 0" : "=r"(a))
+#define PMNC_GET(x) asm volatile ("mrc p14, 0, %0, C0, C1, 0" : "=r"(x))
+#define PMNC_SET(x) asm volatile ("mcr p14, 0, %0, C0, C1, 0" : : "r"(x))
+#endif
/* ADS7846 Touch Screen Controller bit definitions */
#define ADSCTRL_PD0 (1u << 0) /* PD0 */
@@ -66,12 +71,11 @@ struct corgi_ts {
#define ADSCTRL_STS (1u << 7) /* Start Bit */
/* External Functions */
-extern unsigned long w100fb_get_hsynclen(struct device *dev);
extern unsigned int get_clk_frequency_khz(int info);
-static unsigned long calc_waittime(void)
+static unsigned long calc_waittime(struct corgi_ts *corgi_ts)
{
- unsigned long hsync_len = w100fb_get_hsynclen(&corgifb_device.dev);
+ unsigned long hsync_len = corgi_ts->machinfo->get_hsync_len();
if (hsync_len)
return get_clk_frequency_khz(0)*1000/hsync_len;
@@ -79,7 +83,8 @@ static unsigned long calc_waittime(void)
return 0;
}
-static int sync_receive_data_send_cmd(int doRecive, int doSend, unsigned int address, unsigned long wait_time)
+static int sync_receive_data_send_cmd(struct corgi_ts *corgi_ts, int doRecive, int doSend,
+ unsigned int address, unsigned long wait_time)
{
unsigned long timer1 = 0, timer2, pmnc = 0;
int pos = 0;
@@ -90,7 +95,7 @@ static int sync_receive_data_send_cmd(int doRecive, int doSend, unsigned int add
PMNC_SET(0x01);
/* polling HSync */
- SyncHS();
+ corgi_ts->machinfo->wait_hsync();
/* get CCNT */
CCNT(timer1);
}
@@ -109,7 +114,7 @@ static int sync_receive_data_send_cmd(int doRecive, int doSend, unsigned int add
CCNT(timer2);
if (timer2-timer1 > wait_time) {
/* too slow - timeout, try again */
- SyncHS();
+ corgi_ts->machinfo->wait_hsync();
/* get OSCR */
CCNT(timer1);
/* Wait after HSync */
@@ -133,23 +138,23 @@ static int read_xydata(struct corgi_ts *corgi_ts)
/* critical section */
local_irq_save(flags);
corgi_ssp_ads7846_lock();
- wait_time=calc_waittime();
+ wait_time = calc_waittime(corgi_ts);
/* Y-axis */
- sync_receive_data_send_cmd(0, 1, 1u, wait_time);
+ sync_receive_data_send_cmd(corgi_ts, 0, 1, 1u, wait_time);
/* Y-axis */
- sync_receive_data_send_cmd(1, 1, 1u, wait_time);
+ sync_receive_data_send_cmd(corgi_ts, 1, 1, 1u, wait_time);
/* X-axis */
- y = sync_receive_data_send_cmd(1, 1, 5u, wait_time);
+ y = sync_receive_data_send_cmd(corgi_ts, 1, 1, 5u, wait_time);
/* Z1 */
- x = sync_receive_data_send_cmd(1, 1, 3u, wait_time);
+ x = sync_receive_data_send_cmd(corgi_ts, 1, 1, 3u, wait_time);
/* Z2 */
- z1 = sync_receive_data_send_cmd(1, 1, 4u, wait_time);
- z2 = sync_receive_data_send_cmd(1, 0, 4u, wait_time);
+ z1 = sync_receive_data_send_cmd(corgi_ts, 1, 1, 4u, wait_time);
+ z2 = sync_receive_data_send_cmd(corgi_ts, 1, 0, 4u, wait_time);
/* Power-Down Enable */
corgi_ssp_ads7846_put((1u << ADSCTRL_ADR_SH) | ADSCTRL_STS);
@@ -189,9 +194,9 @@ static void new_data(struct corgi_ts *corgi_ts, struct pt_regs *regs)
static void ts_interrupt_main(struct corgi_ts *corgi_ts, int isTimer, struct pt_regs *regs)
{
- if ((GPLR(CORGI_GPIO_TP_INT) & GPIO_bit(CORGI_GPIO_TP_INT)) == 0) {
+ if ((GPLR(IRQ_TO_GPIO(corgi_ts->irq_gpio)) & GPIO_bit(IRQ_TO_GPIO(corgi_ts->irq_gpio))) == 0) {
/* Disable Interrupt */
- set_irq_type(CORGI_IRQ_GPIO_TP_INT, IRQT_NOEDGE);
+ set_irq_type(corgi_ts->irq_gpio, IRQT_NOEDGE);
if (read_xydata(corgi_ts)) {
corgi_ts->pendown = 1;
new_data(corgi_ts, regs);
@@ -210,7 +215,7 @@ static void ts_interrupt_main(struct corgi_ts *corgi_ts, int isTimer, struct pt_
}
/* Enable Falling Edge */
- set_irq_type(CORGI_IRQ_GPIO_TP_INT, IRQT_FALLING);
+ set_irq_type(corgi_ts->irq_gpio, IRQT_FALLING);
corgi_ts->pendown = 0;
}
}
@@ -254,7 +259,7 @@ static int corgits_resume(struct device *dev, uint32_t level)
corgi_ssp_ads7846_putget((4u << ADSCTRL_ADR_SH) | ADSCTRL_STS);
/* Enable Falling Edge */
- set_irq_type(CORGI_IRQ_GPIO_TP_INT, IRQT_FALLING);
+ set_irq_type(corgi_ts->irq_gpio, IRQT_FALLING);
corgi_ts->power_mode = PWR_MODE_ACTIVE;
}
return 0;
@@ -267,6 +272,7 @@ static int corgits_resume(struct device *dev, uint32_t level)
static int __init corgits_probe(struct device *dev)
{
struct corgi_ts *corgi_ts;
+ struct platform_device *pdev = to_platform_device(dev);
if (!(corgi_ts = kmalloc(sizeof(struct corgi_ts), GFP_KERNEL)))
return -ENOMEM;
@@ -275,6 +281,14 @@ static int __init corgits_probe(struct device *dev)
memset(corgi_ts, 0, sizeof(struct corgi_ts));
+ corgi_ts->machinfo = dev->platform_data;
+ corgi_ts->irq_gpio = platform_get_irq(pdev, 0);
+
+ if (corgi_ts->irq_gpio < 0) {
+ kfree(corgi_ts);
+ return -ENODEV;
+ }
+
init_input_dev(&corgi_ts->input);
corgi_ts->input.evbit[0] = BIT(EV_KEY) | BIT(EV_ABS);
corgi_ts->input.keybit[LONG(BTN_TOUCH)] = BIT(BTN_TOUCH);
@@ -293,8 +307,7 @@ static int __init corgits_probe(struct device *dev)
corgi_ts->input.id.product = 0x0002;
corgi_ts->input.id.version = 0x0100;
- pxa_gpio_mode(CORGI_GPIO_TP_INT | GPIO_IN);
- pxa_gpio_mode(CORGI_GPIO_HSYNC | GPIO_IN);
+ pxa_gpio_mode(IRQ_TO_GPIO(corgi_ts->irq_gpio) | GPIO_IN);
/* Initiaize ADS7846 Difference Reference mode */
corgi_ssp_ads7846_putget((1u << ADSCTRL_ADR_SH) | ADSCTRL_STS);
@@ -313,14 +326,14 @@ static int __init corgits_probe(struct device *dev)
input_register_device(&corgi_ts->input);
corgi_ts->power_mode = PWR_MODE_ACTIVE;
- if (request_irq(CORGI_IRQ_GPIO_TP_INT, ts_interrupt, SA_INTERRUPT, "ts", corgi_ts)) {
+ if (request_irq(corgi_ts->irq_gpio, ts_interrupt, SA_INTERRUPT, "ts", corgi_ts)) {
input_unregister_device(&corgi_ts->input);
kfree(corgi_ts);
return -EBUSY;
}
/* Enable Falling Edge */
- set_irq_type(CORGI_IRQ_GPIO_TP_INT, IRQT_FALLING);
+ set_irq_type(corgi_ts->irq_gpio, IRQT_FALLING);
printk(KERN_INFO "input: Corgi Touchscreen Registered\n");
@@ -331,8 +344,9 @@ static int corgits_remove(struct device *dev)
{
struct corgi_ts *corgi_ts = dev_get_drvdata(dev);
- free_irq(CORGI_IRQ_GPIO_TP_INT, NULL);
+ free_irq(corgi_ts->irq_gpio, NULL);
del_timer_sync(&corgi_ts->timer);
+ corgi_ts->machinfo->put_hsync();
input_unregister_device(&corgi_ts->input);
kfree(corgi_ts);
return 0;
diff --git a/drivers/isdn/divert/divert_procfs.c b/drivers/isdn/divert/divert_procfs.c
index e1f0d87de0e..0b0ea26023e 100644
--- a/drivers/isdn/divert/divert_procfs.c
+++ b/drivers/isdn/divert/divert_procfs.c
@@ -287,12 +287,12 @@ divert_dev_init(void)
init_waitqueue_head(&rd_queue);
#ifdef CONFIG_PROC_FS
- isdn_proc_entry = create_proc_entry("isdn", S_IFDIR | S_IRUGO | S_IXUGO, proc_net);
+ isdn_proc_entry = proc_mkdir("net/isdn", NULL);
if (!isdn_proc_entry)
return (-1);
isdn_divert_entry = create_proc_entry("divert", S_IFREG | S_IRUGO, isdn_proc_entry);
if (!isdn_divert_entry) {
- remove_proc_entry("isdn", proc_net);
+ remove_proc_entry("net/isdn", NULL);
return (-1);
}
isdn_divert_entry->proc_fops = &isdn_fops;
@@ -312,7 +312,7 @@ divert_dev_deinit(void)
#ifdef CONFIG_PROC_FS
remove_proc_entry("divert", isdn_proc_entry);
- remove_proc_entry("isdn", proc_net);
+ remove_proc_entry("net/isdn", NULL);
#endif /* CONFIG_PROC_FS */
return (0);
diff --git a/drivers/isdn/hardware/eicon/diva_didd.c b/drivers/isdn/hardware/eicon/diva_didd.c
index 7fdf8ae5be5..27204f4b111 100644
--- a/drivers/isdn/hardware/eicon/diva_didd.c
+++ b/drivers/isdn/hardware/eicon/diva_didd.c
@@ -30,8 +30,6 @@ static char *DRIVERNAME =
static char *DRIVERLNAME = "divadidd";
char *DRIVERRELEASE_DIDD = "2.0";
-static char *main_proc_dir = "eicon";
-
MODULE_DESCRIPTION("DIDD table driver for diva drivers");
MODULE_AUTHOR("Cytronics & Melware, Eicon Networks");
MODULE_SUPPORTED_DEVICE("Eicon diva drivers");
@@ -89,7 +87,7 @@ proc_read(char *page, char **start, off_t off, int count, int *eof,
static int DIVA_INIT_FUNCTION create_proc(void)
{
- proc_net_eicon = create_proc_entry(main_proc_dir, S_IFDIR, proc_net);
+ proc_net_eicon = proc_mkdir("net/eicon", NULL);
if (proc_net_eicon) {
if ((proc_didd =
@@ -105,7 +103,7 @@ static int DIVA_INIT_FUNCTION create_proc(void)
static void DIVA_EXIT_FUNCTION remove_proc(void)
{
remove_proc_entry(DRIVERLNAME, proc_net_eicon);
- remove_proc_entry(main_proc_dir, proc_net);
+ remove_proc_entry("net/eicon", NULL);
}
static int DIVA_INIT_FUNCTION divadidd_init(void)
diff --git a/drivers/isdn/hardware/eicon/divasproc.c b/drivers/isdn/hardware/eicon/divasproc.c
index b6435589d45..c12efa6f842 100644
--- a/drivers/isdn/hardware/eicon/divasproc.c
+++ b/drivers/isdn/hardware/eicon/divasproc.c
@@ -381,7 +381,7 @@ int create_adapter_proc(diva_os_xdi_adapter_t * a)
char tmp[16];
sprintf(tmp, "%s%d", adapter_dir_name, a->controller);
- if (!(de = create_proc_entry(tmp, S_IFDIR, proc_net_eicon)))
+ if (!(de = proc_mkdir(tmp, proc_net_eicon)))
return (0);
a->proc_adapter_dir = (void *) de;
diff --git a/drivers/isdn/hisax/hfc_pci.c b/drivers/isdn/hisax/hfc_pci.c
index 8337b0f26cc..4866fc32d8d 100644
--- a/drivers/isdn/hisax/hfc_pci.c
+++ b/drivers/isdn/hisax/hfc_pci.c
@@ -61,6 +61,7 @@ static const PCI_ENTRY id_list[] =
{PCI_VENDOR_ID_DIGI, PCI_DEVICE_ID_DIGI_DF_M_E,"Digi International", "Digi DataFire Micro V (Europe)"},
{PCI_VENDOR_ID_DIGI, PCI_DEVICE_ID_DIGI_DF_M_IOM2_A,"Digi International", "Digi DataFire Micro V IOM2 (North America)"},
{PCI_VENDOR_ID_DIGI, PCI_DEVICE_ID_DIGI_DF_M_A,"Digi International", "Digi DataFire Micro V (North America)"},
+ {PCI_VENDOR_ID_SITECOM, PCI_DEVICE_ID_SITECOM_DC105V2, "Sitecom Europe", "DC-105 ISDN PCI"},
{0, 0, NULL, NULL},
};
diff --git a/drivers/isdn/hisax/sedlbauer_cs.c b/drivers/isdn/hisax/sedlbauer_cs.c
index c6b5bf7d2ac..dc334aab433 100644
--- a/drivers/isdn/hisax/sedlbauer_cs.c
+++ b/drivers/isdn/hisax/sedlbauer_cs.c
@@ -611,7 +611,7 @@ static int sedlbauer_event(event_t event, int priority,
} /* sedlbauer_event */
static struct pcmcia_device_id sedlbauer_ids[] = {
- PCMCIA_DEVICE_PROD_ID1234("SEDLBAUER", "speed star II", "V 3.1", "(c) 93 - 98 cb ", 0x81fb79f5, 0xf3612e1d, 0x6b95c78a, 0x50d4149c),
+ PCMCIA_DEVICE_PROD_ID123("SEDLBAUER", "speed star II", "V 3.1", 0x81fb79f5, 0xf3612e1d, 0x6b95c78a),
PCMCIA_DEVICE_PROD_ID123("SEDLBAUER", "ISDN-Adapter", "4D67", 0x81fb79f5, 0xe4e9bc12, 0x397b7e90),
PCMCIA_DEVICE_PROD_ID123("SEDLBAUER", "ISDN-Adapter", "4D98", 0x81fb79f5, 0xe4e9bc12, 0x2e5c7fce),
PCMCIA_DEVICE_PROD_ID123("SEDLBAUER", "ISDN-Adapter", " (C) 93-94 VK", 0x81fb79f5, 0xe4e9bc12, 0x8db143fe),
diff --git a/drivers/isdn/hisax/st5481.h b/drivers/isdn/hisax/st5481.h
index 0fda5c89429..9ffaae7c657 100644
--- a/drivers/isdn/hisax/st5481.h
+++ b/drivers/isdn/hisax/st5481.h
@@ -466,10 +466,10 @@ void st5481_stop(struct st5481_adapter *adapter);
#define __debug_variable st5481_debug
#include "hisax_debug.h"
-#ifdef CONFIG_HISAX_DEBUG
-
extern int st5481_debug;
+#ifdef CONFIG_HISAX_DEBUG
+
#define DBG_ISO_PACKET(level,urb) \
if (level & __debug_variable) dump_iso_packet(__FUNCTION__,urb)
diff --git a/drivers/isdn/hisax/st5481_b.c b/drivers/isdn/hisax/st5481_b.c
index 2fcd093921d..657817a591f 100644
--- a/drivers/isdn/hisax/st5481_b.c
+++ b/drivers/isdn/hisax/st5481_b.c
@@ -172,14 +172,18 @@ static void usb_b_out_complete(struct urb *urb, struct pt_regs *regs)
test_and_clear_bit(buf_nr, &b_out->busy);
if (unlikely(urb->status < 0)) {
- if (urb->status != -ENOENT && urb->status != -ESHUTDOWN) {
- WARN("urb status %d",urb->status);
- if (b_out->busy == 0) {
- st5481_usb_pipe_reset(adapter, (bcs->channel+1)*2 | USB_DIR_OUT, NULL, NULL);
- }
- } else {
- DBG(1,"urb killed");
- return; // Give up
+ switch (urb->status) {
+ case -ENOENT:
+ case -ESHUTDOWN:
+ case -ECONNRESET:
+ DBG(4,"urb killed status %d", urb->status);
+ return; // Give up
+ default:
+ WARN("urb status %d",urb->status);
+ if (b_out->busy == 0) {
+ st5481_usb_pipe_reset(adapter, (bcs->channel+1)*2 | USB_DIR_OUT, NULL, NULL);
+ }
+ break;
}
}
diff --git a/drivers/isdn/hisax/st5481_d.c b/drivers/isdn/hisax/st5481_d.c
index 071b1d31999..941f7022ada 100644
--- a/drivers/isdn/hisax/st5481_d.c
+++ b/drivers/isdn/hisax/st5481_d.c
@@ -382,16 +382,20 @@ static void usb_d_out_complete(struct urb *urb, struct pt_regs *regs)
test_and_clear_bit(buf_nr, &d_out->busy);
if (unlikely(urb->status < 0)) {
- if (urb->status != -ENOENT && urb->status != -ESHUTDOWN) {
- WARN("urb status %d",urb->status);
- if (d_out->busy == 0) {
- st5481_usb_pipe_reset(adapter, EP_D_OUT | USB_DIR_OUT, fifo_reseted, adapter);
- }
- return;
- } else {
- DBG(1,"urb killed");
- return; // Give up
+ switch (urb->status) {
+ case -ENOENT:
+ case -ESHUTDOWN:
+ case -ECONNRESET:
+ DBG(1,"urb killed status %d", urb->status);
+ break;
+ default:
+ WARN("urb status %d",urb->status);
+ if (d_out->busy == 0) {
+ st5481_usb_pipe_reset(adapter, EP_D_OUT | USB_DIR_OUT, fifo_reseted, adapter);
+ }
+ break;
}
+ return; // Give up
}
FsmEvent(&adapter->d_out.fsm, EV_DOUT_COMPLETE, (void *) buf_nr);
@@ -709,14 +713,14 @@ int st5481_setup_d(struct st5481_adapter *adapter)
adapter->l1m.fsm = &l1fsm;
adapter->l1m.state = ST_L1_F3;
- adapter->l1m.debug = 1;
+ adapter->l1m.debug = st5481_debug & 0x100;
adapter->l1m.userdata = adapter;
adapter->l1m.printdebug = l1m_debug;
FsmInitTimer(&adapter->l1m, &adapter->timer);
adapter->d_out.fsm.fsm = &dout_fsm;
adapter->d_out.fsm.state = ST_DOUT_NONE;
- adapter->d_out.fsm.debug = 1;
+ adapter->d_out.fsm.debug = st5481_debug & 0x100;
adapter->d_out.fsm.userdata = adapter;
adapter->d_out.fsm.printdebug = dout_debug;
diff --git a/drivers/isdn/hisax/st5481_init.c b/drivers/isdn/hisax/st5481_init.c
index 7aa810d5d33..2cf5d1a6df6 100644
--- a/drivers/isdn/hisax/st5481_init.c
+++ b/drivers/isdn/hisax/st5481_init.c
@@ -43,10 +43,10 @@ static int number_of_leds = 2; /* 2 LEDs on the adpater default */
module_param(number_of_leds, int, 0);
#ifdef CONFIG_HISAX_DEBUG
-static int debug = 0x1;
+static int debug = 0;
module_param(debug, int, 0);
-int st5481_debug;
#endif
+int st5481_debug;
static LIST_HEAD(adapter_list);
diff --git a/drivers/isdn/hisax/st5481_usb.c b/drivers/isdn/hisax/st5481_usb.c
index ab62223297a..89fbeb58485 100644
--- a/drivers/isdn/hisax/st5481_usb.c
+++ b/drivers/isdn/hisax/st5481_usb.c
@@ -132,11 +132,15 @@ static void usb_ctrl_complete(struct urb *urb, struct pt_regs *regs)
struct ctrl_msg *ctrl_msg;
if (unlikely(urb->status < 0)) {
- if (urb->status != -ENOENT && urb->status != -ESHUTDOWN) {
- WARN("urb status %d",urb->status);
- } else {
- DBG(1,"urb killed");
- return; // Give up
+ switch (urb->status) {
+ case -ENOENT:
+ case -ESHUTDOWN:
+ case -ECONNRESET:
+ DBG(1,"urb killed status %d", urb->status);
+ return; // Give up
+ default:
+ WARN("urb status %d",urb->status);
+ break;
}
}
@@ -184,22 +188,22 @@ static void usb_int_complete(struct urb *urb, struct pt_regs *regs)
int status;
switch (urb->status) {
- case 0:
- /* success */
- break;
- case -ECONNRESET:
- case -ENOENT:
- case -ESHUTDOWN:
- /* this urb is terminated, clean up */
- DBG(1, "urb shutting down with status: %d", urb->status);
- return;
- default:
- WARN("nonzero urb status received: %d", urb->status);
- goto exit;
+ case 0:
+ /* success */
+ break;
+ case -ECONNRESET:
+ case -ENOENT:
+ case -ESHUTDOWN:
+ /* this urb is terminated, clean up */
+ DBG(2, "urb shutting down with status: %d", urb->status);
+ return;
+ default:
+ WARN("nonzero urb status received: %d", urb->status);
+ goto exit;
}
- DBG_PACKET(1, data, INT_PKT_SIZE);
+ DBG_PACKET(2, data, INT_PKT_SIZE);
if (urb->actual_length == 0) {
goto exit;
@@ -250,7 +254,7 @@ int st5481_setup_usb(struct st5481_adapter *adapter)
struct urb *urb;
u8 *buf;
- DBG(1,"");
+ DBG(2,"");
if ((status = usb_reset_configuration (dev)) < 0) {
WARN("reset_configuration failed,status=%d",status);
@@ -330,15 +334,17 @@ void st5481_release_usb(struct st5481_adapter *adapter)
DBG(1,"");
// Stop and free Control and Interrupt URBs
- usb_unlink_urb(ctrl->urb);
+ usb_kill_urb(ctrl->urb);
if (ctrl->urb->transfer_buffer)
kfree(ctrl->urb->transfer_buffer);
usb_free_urb(ctrl->urb);
+ ctrl->urb = NULL;
- usb_unlink_urb(intr->urb);
+ usb_kill_urb(intr->urb);
if (intr->urb->transfer_buffer)
kfree(intr->urb->transfer_buffer);
usb_free_urb(intr->urb);
+ ctrl->urb = NULL;
}
/*
@@ -406,6 +412,7 @@ fill_isoc_urb(struct urb *urb, struct usb_device *dev,
spin_lock_init(&urb->lock);
urb->dev=dev;
urb->pipe=pipe;
+ urb->interval = 1;
urb->transfer_buffer=buf;
urb->number_of_packets = num_packets;
urb->transfer_buffer_length=num_packets*packet_size;
@@ -452,7 +459,9 @@ st5481_setup_isocpipes(struct urb* urb[2], struct usb_device *dev,
if (urb[j]) {
if (urb[j]->transfer_buffer)
kfree(urb[j]->transfer_buffer);
+ urb[j]->transfer_buffer = NULL;
usb_free_urb(urb[j]);
+ urb[j] = NULL;
}
}
return retval;
@@ -463,10 +472,11 @@ void st5481_release_isocpipes(struct urb* urb[2])
int j;
for (j = 0; j < 2; j++) {
- usb_unlink_urb(urb[j]);
+ usb_kill_urb(urb[j]);
if (urb[j]->transfer_buffer)
kfree(urb[j]->transfer_buffer);
usb_free_urb(urb[j]);
+ urb[j] = NULL;
}
}
@@ -485,11 +495,15 @@ static void usb_in_complete(struct urb *urb, struct pt_regs *regs)
int len, count, status;
if (unlikely(urb->status < 0)) {
- if (urb->status != -ENOENT && urb->status != -ESHUTDOWN) {
- WARN("urb status %d",urb->status);
- } else {
- DBG(1,"urb killed");
- return; // Give up
+ switch (urb->status) {
+ case -ENOENT:
+ case -ESHUTDOWN:
+ case -ECONNRESET:
+ DBG(1,"urb killed status %d", urb->status);
+ return; // Give up
+ default:
+ WARN("urb status %d",urb->status);
+ break;
}
}
diff --git a/drivers/isdn/hysdn/hysdn_procconf.c b/drivers/isdn/hysdn/hysdn_procconf.c
index 5da507e532f..639582f61f4 100644
--- a/drivers/isdn/hysdn/hysdn_procconf.c
+++ b/drivers/isdn/hysdn/hysdn_procconf.c
@@ -394,7 +394,7 @@ hysdn_procconf_init(void)
hysdn_card *card;
uchar conf_name[20];
- hysdn_proc_entry = create_proc_entry(PROC_SUBDIR_NAME, S_IFDIR | S_IRUGO | S_IXUGO, proc_net);
+ hysdn_proc_entry = proc_mkdir(PROC_SUBDIR_NAME, proc_net);
if (!hysdn_proc_entry) {
printk(KERN_ERR "HYSDN: unable to create hysdn subdir\n");
return (-1);
diff --git a/drivers/isdn/sc/init.c b/drivers/isdn/sc/init.c
index 40b0df04ed9..1ebed041672 100644
--- a/drivers/isdn/sc/init.c
+++ b/drivers/isdn/sc/init.c
@@ -87,7 +87,7 @@ static int __init sc_init(void)
*/
for (i = 0 ; i < MAX_IO_REGS - 1 ; i++) {
if(!request_region(io[b] + i * 0x400, 1, "sc test")) {
- pr_debug("check_region for 0x%x failed\n", io[b] + i * 0x400);
+ pr_debug("request_region for 0x%x failed\n", io[b] + i * 0x400);
io[b] = 0;
break;
} else
@@ -181,7 +181,7 @@ static int __init sc_init(void)
for (i = SRAM_MIN ; i < SRAM_MAX ; i += SRAM_PAGESIZE) {
pr_debug("Checking RAM address 0x%x...\n", i);
if(request_region(i, SRAM_PAGESIZE, "sc test")) {
- pr_debug(" check_region succeeded\n");
+ pr_debug(" request_region succeeded\n");
model = identify_board(i, io[b]);
release_region(i, SRAM_PAGESIZE);
if (model >= 0) {
diff --git a/drivers/macintosh/smu.c b/drivers/macintosh/smu.c
index fb535737d17..9b38674fbf7 100644
--- a/drivers/macintosh/smu.c
+++ b/drivers/macintosh/smu.c
@@ -8,21 +8,15 @@
*/
/*
- * For now, this driver includes:
- * - RTC get & set
- * - reboot & shutdown commands
- * all synchronous with IRQ disabled (ugh)
- *
* TODO:
- * rework in a way the PMU driver works, that is asynchronous
- * with a queue of commands. I'll do that as soon as I have an
- * SMU based machine at hand. Some more cleanup is needed too,
- * like maybe fitting it into a platform device, etc...
- * Also check what's up with cache coherency, and if we really
- * can't do better than flushing the cache, maybe build a table
- * of command len/reply len like the PMU driver to only flush
- * what is actually necessary.
- * --BenH.
+ * - maybe add timeout to commands ?
+ * - blocking version of time functions
+ * - polling version of i2c commands (including timer that works with
+ * interrutps off)
+ * - maybe avoid some data copies with i2c by directly using the smu cmd
+ * buffer and a lower level internal interface
+ * - understand SMU -> CPU events and implement reception of them via
+ * the userland interface
*/
#include <linux/config.h>
@@ -36,6 +30,11 @@
#include <linux/jiffies.h>
#include <linux/interrupt.h>
#include <linux/rtc.h>
+#include <linux/completion.h>
+#include <linux/miscdevice.h>
+#include <linux/delay.h>
+#include <linux/sysdev.h>
+#include <linux/poll.h>
#include <asm/byteorder.h>
#include <asm/io.h>
@@ -45,8 +44,13 @@
#include <asm/smu.h>
#include <asm/sections.h>
#include <asm/abs_addr.h>
+#include <asm/uaccess.h>
+#include <asm/of_device.h>
+
+#define VERSION "0.6"
+#define AUTHOR "(c) 2005 Benjamin Herrenschmidt, IBM Corp."
-#define DEBUG_SMU 1
+#undef DEBUG_SMU
#ifdef DEBUG_SMU
#define DPRINTK(fmt, args...) do { printk(KERN_DEBUG fmt , ##args); } while (0)
@@ -57,20 +61,30 @@
/*
* This is the command buffer passed to the SMU hardware
*/
+#define SMU_MAX_DATA 254
+
struct smu_cmd_buf {
u8 cmd;
u8 length;
- u8 data[0x0FFE];
+ u8 data[SMU_MAX_DATA];
};
struct smu_device {
spinlock_t lock;
struct device_node *of_node;
- int db_ack; /* doorbell ack GPIO */
- int db_req; /* doorbell req GPIO */
+ struct of_device *of_dev;
+ int doorbell; /* doorbell gpio */
u32 __iomem *db_buf; /* doorbell buffer */
+ int db_irq;
+ int msg;
+ int msg_irq;
struct smu_cmd_buf *cmd_buf; /* command buffer virtual */
u32 cmd_buf_abs; /* command buffer absolute */
+ struct list_head cmd_list;
+ struct smu_cmd *cmd_cur; /* pending command */
+ struct list_head cmd_i2c_list;
+ struct smu_i2c_cmd *cmd_i2c_cur; /* pending i2c command */
+ struct timer_list i2c_timer;
};
/*
@@ -79,113 +93,245 @@ struct smu_device {
*/
static struct smu_device *smu;
+
/*
- * SMU low level communication stuff
+ * SMU driver low level stuff
*/
-static inline int smu_cmd_stat(struct smu_cmd_buf *cmd_buf, u8 cmd_ack)
-{
- rmb();
- return cmd_buf->cmd == cmd_ack && cmd_buf->length != 0;
-}
-static inline u8 smu_save_ack_cmd(struct smu_cmd_buf *cmd_buf)
+static void smu_start_cmd(void)
{
- return (~cmd_buf->cmd) & 0xff;
-}
+ unsigned long faddr, fend;
+ struct smu_cmd *cmd;
-static void smu_send_cmd(struct smu_device *dev)
-{
- /* SMU command buf is currently cacheable, we need a physical
- * address. This isn't exactly a DMA mapping here, I suspect
+ if (list_empty(&smu->cmd_list))
+ return;
+
+ /* Fetch first command in queue */
+ cmd = list_entry(smu->cmd_list.next, struct smu_cmd, link);
+ smu->cmd_cur = cmd;
+ list_del(&cmd->link);
+
+ DPRINTK("SMU: starting cmd %x, %d bytes data\n", cmd->cmd,
+ cmd->data_len);
+ DPRINTK("SMU: data buffer: %02x %02x %02x %02x ...\n",
+ ((u8 *)cmd->data_buf)[0], ((u8 *)cmd->data_buf)[1],
+ ((u8 *)cmd->data_buf)[2], ((u8 *)cmd->data_buf)[3]);
+
+ /* Fill the SMU command buffer */
+ smu->cmd_buf->cmd = cmd->cmd;
+ smu->cmd_buf->length = cmd->data_len;
+ memcpy(smu->cmd_buf->data, cmd->data_buf, cmd->data_len);
+
+ /* Flush command and data to RAM */
+ faddr = (unsigned long)smu->cmd_buf;
+ fend = faddr + smu->cmd_buf->length + 2;
+ flush_inval_dcache_range(faddr, fend);
+
+ /* This isn't exactly a DMA mapping here, I suspect
* the SMU is actually communicating with us via i2c to the
* northbridge or the CPU to access RAM.
*/
- writel(dev->cmd_buf_abs, dev->db_buf);
+ writel(smu->cmd_buf_abs, smu->db_buf);
/* Ring the SMU doorbell */
- pmac_do_feature_call(PMAC_FTR_WRITE_GPIO, NULL, dev->db_req, 4);
- pmac_do_feature_call(PMAC_FTR_READ_GPIO, NULL, dev->db_req, 4);
+ pmac_do_feature_call(PMAC_FTR_WRITE_GPIO, NULL, smu->doorbell, 4);
}
-static int smu_cmd_done(struct smu_device *dev)
+
+static irqreturn_t smu_db_intr(int irq, void *arg, struct pt_regs *regs)
{
- unsigned long wait = 0;
- int gpio;
+ unsigned long flags;
+ struct smu_cmd *cmd;
+ void (*done)(struct smu_cmd *cmd, void *misc) = NULL;
+ void *misc = NULL;
+ u8 gpio;
+ int rc = 0;
- /* Check the SMU doorbell */
- do {
- gpio = pmac_do_feature_call(PMAC_FTR_READ_GPIO,
- NULL, dev->db_ack);
- if ((gpio & 7) == 7)
- return 0;
- udelay(100);
- } while(++wait < 10000);
+ /* SMU completed the command, well, we hope, let's make sure
+ * of it
+ */
+ spin_lock_irqsave(&smu->lock, flags);
- printk(KERN_ERR "SMU timeout !\n");
- return -ENXIO;
+ gpio = pmac_do_feature_call(PMAC_FTR_READ_GPIO, NULL, smu->doorbell);
+ if ((gpio & 7) != 7) {
+ spin_unlock_irqrestore(&smu->lock, flags);
+ return IRQ_HANDLED;
+ }
+
+ cmd = smu->cmd_cur;
+ smu->cmd_cur = NULL;
+ if (cmd == NULL)
+ goto bail;
+
+ if (rc == 0) {
+ unsigned long faddr;
+ int reply_len;
+ u8 ack;
+
+ /* CPU might have brought back the cache line, so we need
+ * to flush again before peeking at the SMU response. We
+ * flush the entire buffer for now as we haven't read the
+ * reply lenght (it's only 2 cache lines anyway)
+ */
+ faddr = (unsigned long)smu->cmd_buf;
+ flush_inval_dcache_range(faddr, faddr + 256);
+
+ /* Now check ack */
+ ack = (~cmd->cmd) & 0xff;
+ if (ack != smu->cmd_buf->cmd) {
+ DPRINTK("SMU: incorrect ack, want %x got %x\n",
+ ack, smu->cmd_buf->cmd);
+ rc = -EIO;
+ }
+ reply_len = rc == 0 ? smu->cmd_buf->length : 0;
+ DPRINTK("SMU: reply len: %d\n", reply_len);
+ if (reply_len > cmd->reply_len) {
+ printk(KERN_WARNING "SMU: reply buffer too small,"
+ "got %d bytes for a %d bytes buffer\n",
+ reply_len, cmd->reply_len);
+ reply_len = cmd->reply_len;
+ }
+ cmd->reply_len = reply_len;
+ if (cmd->reply_buf && reply_len)
+ memcpy(cmd->reply_buf, smu->cmd_buf->data, reply_len);
+ }
+
+ /* Now complete the command. Write status last in order as we lost
+ * ownership of the command structure as soon as it's no longer -1
+ */
+ done = cmd->done;
+ misc = cmd->misc;
+ mb();
+ cmd->status = rc;
+ bail:
+ /* Start next command if any */
+ smu_start_cmd();
+ spin_unlock_irqrestore(&smu->lock, flags);
+
+ /* Call command completion handler if any */
+ if (done)
+ done(cmd, misc);
+
+ /* It's an edge interrupt, nothing to do */
+ return IRQ_HANDLED;
}
-static int smu_do_cmd(struct smu_device *dev)
+
+static irqreturn_t smu_msg_intr(int irq, void *arg, struct pt_regs *regs)
{
- int rc;
- u8 cmd_ack;
+ /* I don't quite know what to do with this one, we seem to never
+ * receive it, so I suspect we have to arm it someway in the SMU
+ * to start getting events that way.
+ */
- DPRINTK("SMU do_cmd %02x len=%d %02x\n",
- dev->cmd_buf->cmd, dev->cmd_buf->length,
- dev->cmd_buf->data[0]);
+ printk(KERN_INFO "SMU: message interrupt !\n");
- cmd_ack = smu_save_ack_cmd(dev->cmd_buf);
+ /* It's an edge interrupt, nothing to do */
+ return IRQ_HANDLED;
+}
- /* Clear cmd_buf cache lines */
- flush_inval_dcache_range((unsigned long)dev->cmd_buf,
- ((unsigned long)dev->cmd_buf) +
- sizeof(struct smu_cmd_buf));
- smu_send_cmd(dev);
- rc = smu_cmd_done(dev);
- if (rc == 0)
- rc = smu_cmd_stat(dev->cmd_buf, cmd_ack) ? 0 : -1;
- DPRINTK("SMU do_cmd %02x len=%d %02x => %d (%02x)\n",
- dev->cmd_buf->cmd, dev->cmd_buf->length,
- dev->cmd_buf->data[0], rc, cmd_ack);
+/*
+ * Queued command management.
+ *
+ */
+
+int smu_queue_cmd(struct smu_cmd *cmd)
+{
+ unsigned long flags;
- return rc;
+ if (smu == NULL)
+ return -ENODEV;
+ if (cmd->data_len > SMU_MAX_DATA ||
+ cmd->reply_len > SMU_MAX_DATA)
+ return -EINVAL;
+
+ cmd->status = 1;
+ spin_lock_irqsave(&smu->lock, flags);
+ list_add_tail(&cmd->link, &smu->cmd_list);
+ if (smu->cmd_cur == NULL)
+ smu_start_cmd();
+ spin_unlock_irqrestore(&smu->lock, flags);
+
+ return 0;
}
+EXPORT_SYMBOL(smu_queue_cmd);
-/* RTC low level commands */
-static inline int bcd2hex (int n)
+
+int smu_queue_simple(struct smu_simple_cmd *scmd, u8 command,
+ unsigned int data_len,
+ void (*done)(struct smu_cmd *cmd, void *misc),
+ void *misc, ...)
{
- return (((n & 0xf0) >> 4) * 10) + (n & 0xf);
+ struct smu_cmd *cmd = &scmd->cmd;
+ va_list list;
+ int i;
+
+ if (data_len > sizeof(scmd->buffer))
+ return -EINVAL;
+
+ memset(scmd, 0, sizeof(*scmd));
+ cmd->cmd = command;
+ cmd->data_len = data_len;
+ cmd->data_buf = scmd->buffer;
+ cmd->reply_len = sizeof(scmd->buffer);
+ cmd->reply_buf = scmd->buffer;
+ cmd->done = done;
+ cmd->misc = misc;
+
+ va_start(list, misc);
+ for (i = 0; i < data_len; ++i)
+ scmd->buffer[i] = (u8)va_arg(list, int);
+ va_end(list);
+
+ return smu_queue_cmd(cmd);
}
+EXPORT_SYMBOL(smu_queue_simple);
-static inline int hex2bcd (int n)
+
+void smu_poll(void)
{
- return ((n / 10) << 4) + (n % 10);
+ u8 gpio;
+
+ if (smu == NULL)
+ return;
+
+ gpio = pmac_do_feature_call(PMAC_FTR_READ_GPIO, NULL, smu->doorbell);
+ if ((gpio & 7) == 7)
+ smu_db_intr(smu->db_irq, smu, NULL);
}
+EXPORT_SYMBOL(smu_poll);
-#if 0
-static inline void smu_fill_set_pwrup_timer_cmd(struct smu_cmd_buf *cmd_buf)
+
+void smu_done_complete(struct smu_cmd *cmd, void *misc)
{
- cmd_buf->cmd = 0x8e;
- cmd_buf->length = 8;
- cmd_buf->data[0] = 0x00;
- memset(cmd_buf->data + 1, 0, 7);
+ struct completion *comp = misc;
+
+ complete(comp);
}
+EXPORT_SYMBOL(smu_done_complete);
+
-static inline void smu_fill_get_pwrup_timer_cmd(struct smu_cmd_buf *cmd_buf)
+void smu_spinwait_cmd(struct smu_cmd *cmd)
{
- cmd_buf->cmd = 0x8e;
- cmd_buf->length = 1;
- cmd_buf->data[0] = 0x01;
+ while(cmd->status == 1)
+ smu_poll();
}
+EXPORT_SYMBOL(smu_spinwait_cmd);
+
-static inline void smu_fill_dis_pwrup_timer_cmd(struct smu_cmd_buf *cmd_buf)
+/* RTC low level commands */
+static inline int bcd2hex (int n)
{
- cmd_buf->cmd = 0x8e;
- cmd_buf->length = 1;
- cmd_buf->data[0] = 0x02;
+ return (((n & 0xf0) >> 4) * 10) + (n & 0xf);
}
-#endif
+
+
+static inline int hex2bcd (int n)
+{
+ return ((n / 10) << 4) + (n % 10);
+}
+
static inline void smu_fill_set_rtc_cmd(struct smu_cmd_buf *cmd_buf,
struct rtc_time *time)
@@ -202,100 +348,96 @@ static inline void smu_fill_set_rtc_cmd(struct smu_cmd_buf *cmd_buf,
cmd_buf->data[7] = hex2bcd(time->tm_year - 100);
}
-static inline void smu_fill_get_rtc_cmd(struct smu_cmd_buf *cmd_buf)
-{
- cmd_buf->cmd = 0x8e;
- cmd_buf->length = 1;
- cmd_buf->data[0] = 0x81;
-}
-static void smu_parse_get_rtc_reply(struct smu_cmd_buf *cmd_buf,
- struct rtc_time *time)
+int smu_get_rtc_time(struct rtc_time *time, int spinwait)
{
- time->tm_sec = bcd2hex(cmd_buf->data[0]);
- time->tm_min = bcd2hex(cmd_buf->data[1]);
- time->tm_hour = bcd2hex(cmd_buf->data[2]);
- time->tm_wday = bcd2hex(cmd_buf->data[3]);
- time->tm_mday = bcd2hex(cmd_buf->data[4]);
- time->tm_mon = bcd2hex(cmd_buf->data[5]) - 1;
- time->tm_year = bcd2hex(cmd_buf->data[6]) + 100;
-}
-
-int smu_get_rtc_time(struct rtc_time *time)
-{
- unsigned long flags;
+ struct smu_simple_cmd cmd;
int rc;
if (smu == NULL)
return -ENODEV;
memset(time, 0, sizeof(struct rtc_time));
- spin_lock_irqsave(&smu->lock, flags);
- smu_fill_get_rtc_cmd(smu->cmd_buf);
- rc = smu_do_cmd(smu);
- if (rc == 0)
- smu_parse_get_rtc_reply(smu->cmd_buf, time);
- spin_unlock_irqrestore(&smu->lock, flags);
+ rc = smu_queue_simple(&cmd, SMU_CMD_RTC_COMMAND, 1, NULL, NULL,
+ SMU_CMD_RTC_GET_DATETIME);
+ if (rc)
+ return rc;
+ smu_spinwait_simple(&cmd);
- return rc;
+ time->tm_sec = bcd2hex(cmd.buffer[0]);
+ time->tm_min = bcd2hex(cmd.buffer[1]);
+ time->tm_hour = bcd2hex(cmd.buffer[2]);
+ time->tm_wday = bcd2hex(cmd.buffer[3]);
+ time->tm_mday = bcd2hex(cmd.buffer[4]);
+ time->tm_mon = bcd2hex(cmd.buffer[5]) - 1;
+ time->tm_year = bcd2hex(cmd.buffer[6]) + 100;
+
+ return 0;
}
-int smu_set_rtc_time(struct rtc_time *time)
+
+int smu_set_rtc_time(struct rtc_time *time, int spinwait)
{
- unsigned long flags;
+ struct smu_simple_cmd cmd;
int rc;
if (smu == NULL)
return -ENODEV;
- spin_lock_irqsave(&smu->lock, flags);
- smu_fill_set_rtc_cmd(smu->cmd_buf, time);
- rc = smu_do_cmd(smu);
- spin_unlock_irqrestore(&smu->lock, flags);
+ rc = smu_queue_simple(&cmd, SMU_CMD_RTC_COMMAND, 8, NULL, NULL,
+ SMU_CMD_RTC_SET_DATETIME,
+ hex2bcd(time->tm_sec),
+ hex2bcd(time->tm_min),
+ hex2bcd(time->tm_hour),
+ time->tm_wday,
+ hex2bcd(time->tm_mday),
+ hex2bcd(time->tm_mon) + 1,
+ hex2bcd(time->tm_year - 100));
+ if (rc)
+ return rc;
+ smu_spinwait_simple(&cmd);
- return rc;
+ return 0;
}
+
void smu_shutdown(void)
{
- const unsigned char *command = "SHUTDOWN";
- unsigned long flags;
+ struct smu_simple_cmd cmd;
if (smu == NULL)
return;
- spin_lock_irqsave(&smu->lock, flags);
- smu->cmd_buf->cmd = 0xaa;
- smu->cmd_buf->length = strlen(command);
- strcpy(smu->cmd_buf->data, command);
- smu_do_cmd(smu);
+ if (smu_queue_simple(&cmd, SMU_CMD_POWER_COMMAND, 9, NULL, NULL,
+ 'S', 'H', 'U', 'T', 'D', 'O', 'W', 'N', 0))
+ return;
+ smu_spinwait_simple(&cmd);
for (;;)
;
- spin_unlock_irqrestore(&smu->lock, flags);
}
+
void smu_restart(void)
{
- const unsigned char *command = "RESTART";
- unsigned long flags;
+ struct smu_simple_cmd cmd;
if (smu == NULL)
return;
- spin_lock_irqsave(&smu->lock, flags);
- smu->cmd_buf->cmd = 0xaa;
- smu->cmd_buf->length = strlen(command);
- strcpy(smu->cmd_buf->data, command);
- smu_do_cmd(smu);
+ if (smu_queue_simple(&cmd, SMU_CMD_POWER_COMMAND, 8, NULL, NULL,
+ 'R', 'E', 'S', 'T', 'A', 'R', 'T', 0))
+ return;
+ smu_spinwait_simple(&cmd);
for (;;)
;
- spin_unlock_irqrestore(&smu->lock, flags);
}
+
int smu_present(void)
{
return smu != NULL;
}
+EXPORT_SYMBOL(smu_present);
int smu_init (void)
@@ -307,6 +449,8 @@ int smu_init (void)
if (np == NULL)
return -ENODEV;
+ printk(KERN_INFO "SMU driver %s %s\n", VERSION, AUTHOR);
+
if (smu_cmdbuf_abs == 0) {
printk(KERN_ERR "SMU: Command buffer not allocated !\n");
return -EINVAL;
@@ -318,7 +462,13 @@ int smu_init (void)
memset(smu, 0, sizeof(*smu));
spin_lock_init(&smu->lock);
+ INIT_LIST_HEAD(&smu->cmd_list);
+ INIT_LIST_HEAD(&smu->cmd_i2c_list);
smu->of_node = np;
+ smu->db_irq = NO_IRQ;
+ smu->msg_irq = NO_IRQ;
+ init_timer(&smu->i2c_timer);
+
/* smu_cmdbuf_abs is in the low 2G of RAM, can be converted to a
* 32 bits value safely
*/
@@ -331,8 +481,8 @@ int smu_init (void)
goto fail;
}
data = (u32 *)get_property(np, "reg", NULL);
- of_node_put(np);
if (data == NULL) {
+ of_node_put(np);
printk(KERN_ERR "SMU: Can't find doorbell GPIO address !\n");
goto fail;
}
@@ -341,8 +491,31 @@ int smu_init (void)
* and ack. GPIOs are at 0x50, best would be to find that out
* in the device-tree though.
*/
- smu->db_req = 0x50 + *data;
- smu->db_ack = 0x50 + *data;
+ smu->doorbell = *data;
+ if (smu->doorbell < 0x50)
+ smu->doorbell += 0x50;
+ if (np->n_intrs > 0)
+ smu->db_irq = np->intrs[0].line;
+
+ of_node_put(np);
+
+ /* Now look for the smu-interrupt GPIO */
+ do {
+ np = of_find_node_by_name(NULL, "smu-interrupt");
+ if (np == NULL)
+ break;
+ data = (u32 *)get_property(np, "reg", NULL);
+ if (data == NULL) {
+ of_node_put(np);
+ break;
+ }
+ smu->msg = *data;
+ if (smu->msg < 0x50)
+ smu->msg += 0x50;
+ if (np->n_intrs > 0)
+ smu->msg_irq = np->intrs[0].line;
+ of_node_put(np);
+ } while(0);
/* Doorbell buffer is currently hard-coded, I didn't find a proper
* device-tree entry giving the address. Best would probably to use
@@ -362,3 +535,584 @@ int smu_init (void)
return -ENXIO;
}
+
+
+static int smu_late_init(void)
+{
+ if (!smu)
+ return 0;
+
+ /*
+ * Try to request the interrupts
+ */
+
+ if (smu->db_irq != NO_IRQ) {
+ if (request_irq(smu->db_irq, smu_db_intr,
+ SA_SHIRQ, "SMU doorbell", smu) < 0) {
+ printk(KERN_WARNING "SMU: can't "
+ "request interrupt %d\n",
+ smu->db_irq);
+ smu->db_irq = NO_IRQ;
+ }
+ }
+
+ if (smu->msg_irq != NO_IRQ) {
+ if (request_irq(smu->msg_irq, smu_msg_intr,
+ SA_SHIRQ, "SMU message", smu) < 0) {
+ printk(KERN_WARNING "SMU: can't "
+ "request interrupt %d\n",
+ smu->msg_irq);
+ smu->msg_irq = NO_IRQ;
+ }
+ }
+
+ return 0;
+}
+arch_initcall(smu_late_init);
+
+/*
+ * sysfs visibility
+ */
+
+static void smu_expose_childs(void *unused)
+{
+ struct device_node *np;
+
+ for (np = NULL; (np = of_get_next_child(smu->of_node, np)) != NULL;) {
+ if (device_is_compatible(np, "smu-i2c")) {
+ char name[32];
+ u32 *reg = (u32 *)get_property(np, "reg", NULL);
+
+ if (reg == NULL)
+ continue;
+ sprintf(name, "smu-i2c-%02x", *reg);
+ of_platform_device_create(np, name, &smu->of_dev->dev);
+ }
+ }
+
+}
+
+static DECLARE_WORK(smu_expose_childs_work, smu_expose_childs, NULL);
+
+static int smu_platform_probe(struct of_device* dev,
+ const struct of_device_id *match)
+{
+ if (!smu)
+ return -ENODEV;
+ smu->of_dev = dev;
+
+ /*
+ * Ok, we are matched, now expose all i2c busses. We have to defer
+ * that unfortunately or it would deadlock inside the device model
+ */
+ schedule_work(&smu_expose_childs_work);
+
+ return 0;
+}
+
+static struct of_device_id smu_platform_match[] =
+{
+ {
+ .type = "smu",
+ },
+ {},
+};
+
+static struct of_platform_driver smu_of_platform_driver =
+{
+ .name = "smu",
+ .match_table = smu_platform_match,
+ .probe = smu_platform_probe,
+};
+
+static int __init smu_init_sysfs(void)
+{
+ int rc;
+
+ /*
+ * Due to sysfs bogosity, a sysdev is not a real device, so
+ * we should in fact create both if we want sysdev semantics
+ * for power management.
+ * For now, we don't power manage machines with an SMU chip,
+ * I'm a bit too far from figuring out how that works with those
+ * new chipsets, but that will come back and bite us
+ */
+ rc = of_register_driver(&smu_of_platform_driver);
+ return 0;
+}
+
+device_initcall(smu_init_sysfs);
+
+struct of_device *smu_get_ofdev(void)
+{
+ if (!smu)
+ return NULL;
+ return smu->of_dev;
+}
+
+EXPORT_SYMBOL_GPL(smu_get_ofdev);
+
+/*
+ * i2c interface
+ */
+
+static void smu_i2c_complete_command(struct smu_i2c_cmd *cmd, int fail)
+{
+ void (*done)(struct smu_i2c_cmd *cmd, void *misc) = cmd->done;
+ void *misc = cmd->misc;
+ unsigned long flags;
+
+ /* Check for read case */
+ if (!fail && cmd->read) {
+ if (cmd->pdata[0] < 1)
+ fail = 1;
+ else
+ memcpy(cmd->info.data, &cmd->pdata[1],
+ cmd->info.datalen);
+ }
+
+ DPRINTK("SMU: completing, success: %d\n", !fail);
+
+ /* Update status and mark no pending i2c command with lock
+ * held so nobody comes in while we dequeue an eventual
+ * pending next i2c command
+ */
+ spin_lock_irqsave(&smu->lock, flags);
+ smu->cmd_i2c_cur = NULL;
+ wmb();
+ cmd->status = fail ? -EIO : 0;
+
+ /* Is there another i2c command waiting ? */
+ if (!list_empty(&smu->cmd_i2c_list)) {
+ struct smu_i2c_cmd *newcmd;
+
+ /* Fetch it, new current, remove from list */
+ newcmd = list_entry(smu->cmd_i2c_list.next,
+ struct smu_i2c_cmd, link);
+ smu->cmd_i2c_cur = newcmd;
+ list_del(&cmd->link);
+
+ /* Queue with low level smu */
+ list_add_tail(&cmd->scmd.link, &smu->cmd_list);
+ if (smu->cmd_cur == NULL)
+ smu_start_cmd();
+ }
+ spin_unlock_irqrestore(&smu->lock, flags);
+
+ /* Call command completion handler if any */
+ if (done)
+ done(cmd, misc);
+
+}
+
+
+static void smu_i2c_retry(unsigned long data)
+{
+ struct smu_i2c_cmd *cmd = (struct smu_i2c_cmd *)data;
+
+ DPRINTK("SMU: i2c failure, requeuing...\n");
+
+ /* requeue command simply by resetting reply_len */
+ cmd->pdata[0] = 0xff;
+ cmd->scmd.reply_len = 0x10;
+ smu_queue_cmd(&cmd->scmd);
+}
+
+
+static void smu_i2c_low_completion(struct smu_cmd *scmd, void *misc)
+{
+ struct smu_i2c_cmd *cmd = misc;
+ int fail = 0;
+
+ DPRINTK("SMU: i2c compl. stage=%d status=%x pdata[0]=%x rlen: %x\n",
+ cmd->stage, scmd->status, cmd->pdata[0], scmd->reply_len);
+
+ /* Check for possible status */
+ if (scmd->status < 0)
+ fail = 1;
+ else if (cmd->read) {
+ if (cmd->stage == 0)
+ fail = cmd->pdata[0] != 0;
+ else
+ fail = cmd->pdata[0] >= 0x80;
+ } else {
+ fail = cmd->pdata[0] != 0;
+ }
+
+ /* Handle failures by requeuing command, after 5ms interval
+ */
+ if (fail && --cmd->retries > 0) {
+ DPRINTK("SMU: i2c failure, starting timer...\n");
+ smu->i2c_timer.function = smu_i2c_retry;
+ smu->i2c_timer.data = (unsigned long)cmd;
+ smu->i2c_timer.expires = jiffies + msecs_to_jiffies(5);
+ add_timer(&smu->i2c_timer);
+ return;
+ }
+
+ /* If failure or stage 1, command is complete */
+ if (fail || cmd->stage != 0) {
+ smu_i2c_complete_command(cmd, fail);
+ return;
+ }
+
+ DPRINTK("SMU: going to stage 1\n");
+
+ /* Ok, initial command complete, now poll status */
+ scmd->reply_buf = cmd->pdata;
+ scmd->reply_len = 0x10;
+ scmd->data_buf = cmd->pdata;
+ scmd->data_len = 1;
+ cmd->pdata[0] = 0;
+ cmd->stage = 1;
+ cmd->retries = 20;
+ smu_queue_cmd(scmd);
+}
+
+
+int smu_queue_i2c(struct smu_i2c_cmd *cmd)
+{
+ unsigned long flags;
+
+ if (smu == NULL)
+ return -ENODEV;
+
+ /* Fill most fields of scmd */
+ cmd->scmd.cmd = SMU_CMD_I2C_COMMAND;
+ cmd->scmd.done = smu_i2c_low_completion;
+ cmd->scmd.misc = cmd;
+ cmd->scmd.reply_buf = cmd->pdata;
+ cmd->scmd.reply_len = 0x10;
+ cmd->scmd.data_buf = (u8 *)(char *)&cmd->info;
+ cmd->scmd.status = 1;
+ cmd->stage = 0;
+ cmd->pdata[0] = 0xff;
+ cmd->retries = 20;
+ cmd->status = 1;
+
+ /* Check transfer type, sanitize some "info" fields
+ * based on transfer type and do more checking
+ */
+ cmd->info.caddr = cmd->info.devaddr;
+ cmd->read = cmd->info.devaddr & 0x01;
+ switch(cmd->info.type) {
+ case SMU_I2C_TRANSFER_SIMPLE:
+ memset(&cmd->info.sublen, 0, 4);
+ break;
+ case SMU_I2C_TRANSFER_COMBINED:
+ cmd->info.devaddr &= 0xfe;
+ case SMU_I2C_TRANSFER_STDSUB:
+ if (cmd->info.sublen > 3)
+ return -EINVAL;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /* Finish setting up command based on transfer direction
+ */
+ if (cmd->read) {
+ if (cmd->info.datalen > SMU_I2C_READ_MAX)
+ return -EINVAL;
+ memset(cmd->info.data, 0xff, cmd->info.datalen);
+ cmd->scmd.data_len = 9;
+ } else {
+ if (cmd->info.datalen > SMU_I2C_WRITE_MAX)
+ return -EINVAL;
+ cmd->scmd.data_len = 9 + cmd->info.datalen;
+ }
+
+ DPRINTK("SMU: i2c enqueuing command\n");
+ DPRINTK("SMU: %s, len=%d bus=%x addr=%x sub0=%x type=%x\n",
+ cmd->read ? "read" : "write", cmd->info.datalen,
+ cmd->info.bus, cmd->info.caddr,
+ cmd->info.subaddr[0], cmd->info.type);
+
+
+ /* Enqueue command in i2c list, and if empty, enqueue also in
+ * main command list
+ */
+ spin_lock_irqsave(&smu->lock, flags);
+ if (smu->cmd_i2c_cur == NULL) {
+ smu->cmd_i2c_cur = cmd;
+ list_add_tail(&cmd->scmd.link, &smu->cmd_list);
+ if (smu->cmd_cur == NULL)
+ smu_start_cmd();
+ } else
+ list_add_tail(&cmd->link, &smu->cmd_i2c_list);
+ spin_unlock_irqrestore(&smu->lock, flags);
+
+ return 0;
+}
+
+
+
+/*
+ * Userland driver interface
+ */
+
+
+static LIST_HEAD(smu_clist);
+static DEFINE_SPINLOCK(smu_clist_lock);
+
+enum smu_file_mode {
+ smu_file_commands,
+ smu_file_events,
+ smu_file_closing
+};
+
+struct smu_private
+{
+ struct list_head list;
+ enum smu_file_mode mode;
+ int busy;
+ struct smu_cmd cmd;
+ spinlock_t lock;
+ wait_queue_head_t wait;
+ u8 buffer[SMU_MAX_DATA];
+};
+
+
+static int smu_open(struct inode *inode, struct file *file)
+{
+ struct smu_private *pp;
+ unsigned long flags;
+
+ pp = kmalloc(sizeof(struct smu_private), GFP_KERNEL);
+ if (pp == 0)
+ return -ENOMEM;
+ memset(pp, 0, sizeof(struct smu_private));
+ spin_lock_init(&pp->lock);
+ pp->mode = smu_file_commands;
+ init_waitqueue_head(&pp->wait);
+
+ spin_lock_irqsave(&smu_clist_lock, flags);
+ list_add(&pp->list, &smu_clist);
+ spin_unlock_irqrestore(&smu_clist_lock, flags);
+ file->private_data = pp;
+
+ return 0;
+}
+
+
+static void smu_user_cmd_done(struct smu_cmd *cmd, void *misc)
+{
+ struct smu_private *pp = misc;
+
+ wake_up_all(&pp->wait);
+}
+
+
+static ssize_t smu_write(struct file *file, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct smu_private *pp = file->private_data;
+ unsigned long flags;
+ struct smu_user_cmd_hdr hdr;
+ int rc = 0;
+
+ if (pp->busy)
+ return -EBUSY;
+ else if (copy_from_user(&hdr, buf, sizeof(hdr)))
+ return -EFAULT;
+ else if (hdr.cmdtype == SMU_CMDTYPE_WANTS_EVENTS) {
+ pp->mode = smu_file_events;
+ return 0;
+ } else if (hdr.cmdtype != SMU_CMDTYPE_SMU)
+ return -EINVAL;
+ else if (pp->mode != smu_file_commands)
+ return -EBADFD;
+ else if (hdr.data_len > SMU_MAX_DATA)
+ return -EINVAL;
+
+ spin_lock_irqsave(&pp->lock, flags);
+ if (pp->busy) {
+ spin_unlock_irqrestore(&pp->lock, flags);
+ return -EBUSY;
+ }
+ pp->busy = 1;
+ pp->cmd.status = 1;
+ spin_unlock_irqrestore(&pp->lock, flags);
+
+ if (copy_from_user(pp->buffer, buf + sizeof(hdr), hdr.data_len)) {
+ pp->busy = 0;
+ return -EFAULT;
+ }
+
+ pp->cmd.cmd = hdr.cmd;
+ pp->cmd.data_len = hdr.data_len;
+ pp->cmd.reply_len = SMU_MAX_DATA;
+ pp->cmd.data_buf = pp->buffer;
+ pp->cmd.reply_buf = pp->buffer;
+ pp->cmd.done = smu_user_cmd_done;
+ pp->cmd.misc = pp;
+ rc = smu_queue_cmd(&pp->cmd);
+ if (rc < 0)
+ return rc;
+ return count;
+}
+
+
+static ssize_t smu_read_command(struct file *file, struct smu_private *pp,
+ char __user *buf, size_t count)
+{
+ DECLARE_WAITQUEUE(wait, current);
+ struct smu_user_reply_hdr hdr;
+ unsigned long flags;
+ int size, rc = 0;
+
+ if (!pp->busy)
+ return 0;
+ if (count < sizeof(struct smu_user_reply_hdr))
+ return -EOVERFLOW;
+ spin_lock_irqsave(&pp->lock, flags);
+ if (pp->cmd.status == 1) {
+ if (file->f_flags & O_NONBLOCK)
+ return -EAGAIN;
+ add_wait_queue(&pp->wait, &wait);
+ for (;;) {
+ set_current_state(TASK_INTERRUPTIBLE);
+ rc = 0;
+ if (pp->cmd.status != 1)
+ break;
+ rc = -ERESTARTSYS;
+ if (signal_pending(current))
+ break;
+ spin_unlock_irqrestore(&pp->lock, flags);
+ schedule();
+ spin_lock_irqsave(&pp->lock, flags);
+ }
+ set_current_state(TASK_RUNNING);
+ remove_wait_queue(&pp->wait, &wait);
+ }
+ spin_unlock_irqrestore(&pp->lock, flags);
+ if (rc)
+ return rc;
+ if (pp->cmd.status != 0)
+ pp->cmd.reply_len = 0;
+ size = sizeof(hdr) + pp->cmd.reply_len;
+ if (count < size)
+ size = count;
+ rc = size;
+ hdr.status = pp->cmd.status;
+ hdr.reply_len = pp->cmd.reply_len;
+ if (copy_to_user(buf, &hdr, sizeof(hdr)))
+ return -EFAULT;
+ size -= sizeof(hdr);
+ if (size && copy_to_user(buf + sizeof(hdr), pp->buffer, size))
+ return -EFAULT;
+ pp->busy = 0;
+
+ return rc;
+}
+
+
+static ssize_t smu_read_events(struct file *file, struct smu_private *pp,
+ char __user *buf, size_t count)
+{
+ /* Not implemented */
+ msleep_interruptible(1000);
+ return 0;
+}
+
+
+static ssize_t smu_read(struct file *file, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct smu_private *pp = file->private_data;
+
+ if (pp->mode == smu_file_commands)
+ return smu_read_command(file, pp, buf, count);
+ if (pp->mode == smu_file_events)
+ return smu_read_events(file, pp, buf, count);
+
+ return -EBADFD;
+}
+
+static unsigned int smu_fpoll(struct file *file, poll_table *wait)
+{
+ struct smu_private *pp = file->private_data;
+ unsigned int mask = 0;
+ unsigned long flags;
+
+ if (pp == 0)
+ return 0;
+
+ if (pp->mode == smu_file_commands) {
+ poll_wait(file, &pp->wait, wait);
+
+ spin_lock_irqsave(&pp->lock, flags);
+ if (pp->busy && pp->cmd.status != 1)
+ mask |= POLLIN;
+ spin_unlock_irqrestore(&pp->lock, flags);
+ } if (pp->mode == smu_file_events) {
+ /* Not yet implemented */
+ }
+ return mask;
+}
+
+static int smu_release(struct inode *inode, struct file *file)
+{
+ struct smu_private *pp = file->private_data;
+ unsigned long flags;
+ unsigned int busy;
+
+ if (pp == 0)
+ return 0;
+
+ file->private_data = NULL;
+
+ /* Mark file as closing to avoid races with new request */
+ spin_lock_irqsave(&pp->lock, flags);
+ pp->mode = smu_file_closing;
+ busy = pp->busy;
+
+ /* Wait for any pending request to complete */
+ if (busy && pp->cmd.status == 1) {
+ DECLARE_WAITQUEUE(wait, current);
+
+ add_wait_queue(&pp->wait, &wait);
+ for (;;) {
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ if (pp->cmd.status != 1)
+ break;
+ spin_lock_irqsave(&pp->lock, flags);
+ schedule();
+ spin_unlock_irqrestore(&pp->lock, flags);
+ }
+ set_current_state(TASK_RUNNING);
+ remove_wait_queue(&pp->wait, &wait);
+ }
+ spin_unlock_irqrestore(&pp->lock, flags);
+
+ spin_lock_irqsave(&smu_clist_lock, flags);
+ list_del(&pp->list);
+ spin_unlock_irqrestore(&smu_clist_lock, flags);
+ kfree(pp);
+
+ return 0;
+}
+
+
+static struct file_operations smu_device_fops __pmacdata = {
+ .llseek = no_llseek,
+ .read = smu_read,
+ .write = smu_write,
+ .poll = smu_fpoll,
+ .open = smu_open,
+ .release = smu_release,
+};
+
+static struct miscdevice pmu_device __pmacdata = {
+ MISC_DYNAMIC_MINOR, "smu", &smu_device_fops
+};
+
+static int smu_device_init(void)
+{
+ if (!smu)
+ return -ENODEV;
+ if (misc_register(&pmu_device) < 0)
+ printk(KERN_ERR "via-pmu: cannot register misc device.\n");
+ return 0;
+}
+device_initcall(smu_device_init);
diff --git a/drivers/macintosh/therm_adt746x.c b/drivers/macintosh/therm_adt746x.c
index c9ca1118e44..f38696622eb 100644
--- a/drivers/macintosh/therm_adt746x.c
+++ b/drivers/macintosh/therm_adt746x.c
@@ -599,7 +599,7 @@ thermostat_init(void)
sensor_location[2] = "?";
}
- of_dev = of_platform_device_create(np, "temperatures");
+ of_dev = of_platform_device_create(np, "temperatures", NULL);
if (of_dev == NULL) {
printk(KERN_ERR "Can't register temperatures device !\n");
diff --git a/drivers/macintosh/therm_pm72.c b/drivers/macintosh/therm_pm72.c
index 703e3197331..cc507ceef15 100644
--- a/drivers/macintosh/therm_pm72.c
+++ b/drivers/macintosh/therm_pm72.c
@@ -2051,7 +2051,7 @@ static int __init therm_pm72_init(void)
return -ENODEV;
}
}
- of_dev = of_platform_device_create(np, "temperature");
+ of_dev = of_platform_device_create(np, "temperature", NULL);
if (of_dev == NULL) {
printk(KERN_ERR "Can't register FCU platform device !\n");
return -ENODEV;
diff --git a/drivers/macintosh/therm_windtunnel.c b/drivers/macintosh/therm_windtunnel.c
index cbb72eb0426..6aaa1df1a64 100644
--- a/drivers/macintosh/therm_windtunnel.c
+++ b/drivers/macintosh/therm_windtunnel.c
@@ -504,7 +504,7 @@ g4fan_init( void )
}
if( !(np=of_find_node_by_name(NULL, "fan")) )
return -ENODEV;
- x.of_dev = of_platform_device_create( np, "temperature" );
+ x.of_dev = of_platform_device_create(np, "temperature", NULL);
of_node_put( np );
if( !x.of_dev ) {
diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c
index 200a0688f71..54ec737195e 100644
--- a/drivers/md/dm-ioctl.c
+++ b/drivers/md/dm-ioctl.c
@@ -230,11 +230,20 @@ static int dm_hash_insert(const char *name, const char *uuid, struct mapped_devi
static void __hash_remove(struct hash_cell *hc)
{
+ struct dm_table *table;
+
/* remove from the dev hash */
list_del(&hc->uuid_list);
list_del(&hc->name_list);
unregister_with_devfs(hc);
dm_set_mdptr(hc->md, NULL);
+
+ table = dm_get_table(hc->md);
+ if (table) {
+ dm_table_event(table);
+ dm_table_put(table);
+ }
+
dm_put(hc->md);
if (hc->new_map)
dm_table_put(hc->new_map);
diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c
index 785806bdb24..f9b7b32d5d5 100644
--- a/drivers/md/dm-mpath.c
+++ b/drivers/md/dm-mpath.c
@@ -329,13 +329,17 @@ static int map_io(struct multipath *m, struct bio *bio, struct mpath_io *mpio,
/*
* If we run out of usable paths, should we queue I/O or error it?
*/
-static int queue_if_no_path(struct multipath *m, unsigned queue_if_no_path)
+static int queue_if_no_path(struct multipath *m, unsigned queue_if_no_path,
+ unsigned save_old_value)
{
unsigned long flags;
spin_lock_irqsave(&m->lock, flags);
- m->saved_queue_if_no_path = m->queue_if_no_path;
+ if (save_old_value)
+ m->saved_queue_if_no_path = m->queue_if_no_path;
+ else
+ m->saved_queue_if_no_path = queue_if_no_path;
m->queue_if_no_path = queue_if_no_path;
if (!m->queue_if_no_path && m->queue_size)
queue_work(kmultipathd, &m->process_queued_ios);
@@ -677,7 +681,7 @@ static int parse_features(struct arg_set *as, struct multipath *m,
return 0;
if (!strnicmp(shift(as), MESG_STR("queue_if_no_path")))
- return queue_if_no_path(m, 1);
+ return queue_if_no_path(m, 1, 0);
else {
ti->error = "Unrecognised multipath feature request";
return -EINVAL;
@@ -1077,7 +1081,7 @@ static void multipath_presuspend(struct dm_target *ti)
{
struct multipath *m = (struct multipath *) ti->private;
- queue_if_no_path(m, 0);
+ queue_if_no_path(m, 0, 1);
}
/*
@@ -1222,9 +1226,9 @@ static int multipath_message(struct dm_target *ti, unsigned argc, char **argv)
if (argc == 1) {
if (!strnicmp(argv[0], MESG_STR("queue_if_no_path")))
- return queue_if_no_path(m, 1);
+ return queue_if_no_path(m, 1, 0);
else if (!strnicmp(argv[0], MESG_STR("fail_if_no_path")))
- return queue_if_no_path(m, 0);
+ return queue_if_no_path(m, 0, 0);
}
if (argc != 2)
diff --git a/drivers/md/raid6.h b/drivers/md/raid6.h
index f80ee6350ed..31cbee71365 100644
--- a/drivers/md/raid6.h
+++ b/drivers/md/raid6.h
@@ -69,9 +69,13 @@ extern const char raid6_empty_zero_page[PAGE_SIZE];
#define __init
#define __exit
#define __attribute_const__ __attribute__((const))
+#define noinline __attribute__((noinline))
#define preempt_enable()
#define preempt_disable()
+#define cpu_has_feature(x) 1
+#define enable_kernel_altivec()
+#define disable_kernel_altivec()
#endif /* __KERNEL__ */
diff --git a/drivers/md/raid6algos.c b/drivers/md/raid6algos.c
index acf386fc4b4..51c63c0cf1c 100644
--- a/drivers/md/raid6algos.c
+++ b/drivers/md/raid6algos.c
@@ -19,6 +19,7 @@
#include "raid6.h"
#ifndef __KERNEL__
#include <sys/mman.h>
+#include <stdio.h>
#endif
struct raid6_calls raid6_call;
diff --git a/drivers/md/raid6altivec.uc b/drivers/md/raid6altivec.uc
index 1de8f030eee..b9afd35b881 100644
--- a/drivers/md/raid6altivec.uc
+++ b/drivers/md/raid6altivec.uc
@@ -27,16 +27,20 @@
#ifdef CONFIG_ALTIVEC
#include <altivec.h>
-#include <asm/system.h>
-#include <asm/cputable.h>
+#ifdef __KERNEL__
+# include <asm/system.h>
+# include <asm/cputable.h>
+#endif
/*
- * This is the C data type to use
+ * This is the C data type to use. We use a vector of
+ * signed char so vec_cmpgt() will generate the right
+ * instruction.
*/
-typedef vector unsigned char unative_t;
+typedef vector signed char unative_t;
-#define NBYTES(x) ((vector unsigned char) {x,x,x,x, x,x,x,x, x,x,x,x, x,x,x,x})
+#define NBYTES(x) ((vector signed char) {x,x,x,x, x,x,x,x, x,x,x,x, x,x,x,x})
#define NSIZE sizeof(unative_t)
/*
@@ -108,7 +112,11 @@ int raid6_have_altivec(void);
int raid6_have_altivec(void)
{
/* This assumes either all CPUs have Altivec or none does */
+# ifdef __KERNEL__
return cpu_has_feature(CPU_FTR_ALTIVEC);
+# else
+ return 1;
+# endif
}
#endif
diff --git a/drivers/md/raid6test/Makefile b/drivers/md/raid6test/Makefile
index 55780672860..78e0396adf2 100644
--- a/drivers/md/raid6test/Makefile
+++ b/drivers/md/raid6test/Makefile
@@ -8,6 +8,8 @@ OPTFLAGS = -O2 # Adjust as desired
CFLAGS = -I.. -g $(OPTFLAGS)
LD = ld
PERL = perl
+AR = ar
+RANLIB = ranlib
.c.o:
$(CC) $(CFLAGS) -c -o $@ $<
@@ -18,18 +20,33 @@ PERL = perl
%.uc: ../%.uc
cp -f $< $@
-all: raid6.o raid6test
+all: raid6.a raid6test
-raid6.o: raid6int1.o raid6int2.o raid6int4.o raid6int8.o raid6int16.o \
+raid6.a: raid6int1.o raid6int2.o raid6int4.o raid6int8.o raid6int16.o \
raid6int32.o \
raid6mmx.o raid6sse1.o raid6sse2.o \
+ raid6altivec1.o raid6altivec2.o raid6altivec4.o raid6altivec8.o \
raid6recov.o raid6algos.o \
raid6tables.o
- $(LD) -r -o $@ $^
+ rm -f $@
+ $(AR) cq $@ $^
+ $(RANLIB) $@
-raid6test: raid6.o test.c
+raid6test: test.c raid6.a
$(CC) $(CFLAGS) -o raid6test $^
+raid6altivec1.c: raid6altivec.uc ../unroll.pl
+ $(PERL) ../unroll.pl 1 < raid6altivec.uc > $@
+
+raid6altivec2.c: raid6altivec.uc ../unroll.pl
+ $(PERL) ../unroll.pl 2 < raid6altivec.uc > $@
+
+raid6altivec4.c: raid6altivec.uc ../unroll.pl
+ $(PERL) ../unroll.pl 4 < raid6altivec.uc > $@
+
+raid6altivec8.c: raid6altivec.uc ../unroll.pl
+ $(PERL) ../unroll.pl 8 < raid6altivec.uc > $@
+
raid6int1.c: raid6int.uc ../unroll.pl
$(PERL) ../unroll.pl 1 < raid6int.uc > $@
@@ -52,7 +69,7 @@ raid6tables.c: mktables
./mktables > raid6tables.c
clean:
- rm -f *.o mktables mktables.c raid6int.uc raid6*.c raid6test
+ rm -f *.o *.a mktables mktables.c raid6int.uc raid6*.c raid6test
spotless: clean
rm -f *~
diff --git a/drivers/media/dvb/frontends/tda10021.c b/drivers/media/dvb/frontends/tda10021.c
index 87d5f4d8790..eaf130e666d 100644
--- a/drivers/media/dvb/frontends/tda10021.c
+++ b/drivers/media/dvb/frontends/tda10021.c
@@ -100,8 +100,8 @@ static u8 tda10021_readreg (struct tda10021_state* state, u8 reg)
ret = i2c_transfer (state->i2c, msg, 2);
if (ret != 2)
- printk("DVB: TDA10021(%d): %s: readreg error (ret == %i)\n",
- state->frontend.dvb->num, __FUNCTION__, ret);
+ printk("DVB: TDA10021: %s: readreg error (ret == %i)\n",
+ __FUNCTION__, ret);
return b1[0];
}
diff --git a/drivers/media/radio/radio-aimslab.c b/drivers/media/radio/radio-aimslab.c
index 8b4ad70dd1b..877c770558e 100644
--- a/drivers/media/radio/radio-aimslab.c
+++ b/drivers/media/radio/radio-aimslab.c
@@ -29,7 +29,7 @@
#include <linux/module.h> /* Modules */
#include <linux/init.h> /* Initdata */
-#include <linux/ioport.h> /* check_region, request_region */
+#include <linux/ioport.h> /* request_region */
#include <linux/delay.h> /* udelay */
#include <asm/io.h> /* outb, outb_p */
#include <asm/uaccess.h> /* copy to/from user */
diff --git a/drivers/media/radio/radio-aztech.c b/drivers/media/radio/radio-aztech.c
index 013c835ed91..5319a9c9a97 100644
--- a/drivers/media/radio/radio-aztech.c
+++ b/drivers/media/radio/radio-aztech.c
@@ -26,7 +26,7 @@
#include <linux/module.h> /* Modules */
#include <linux/init.h> /* Initdata */
-#include <linux/ioport.h> /* check_region, request_region */
+#include <linux/ioport.h> /* request_region */
#include <linux/delay.h> /* udelay */
#include <asm/io.h> /* outb, outb_p */
#include <asm/uaccess.h> /* copy to/from user */
diff --git a/drivers/media/radio/radio-cadet.c b/drivers/media/radio/radio-cadet.c
index 53d399b6652..022913da8c5 100644
--- a/drivers/media/radio/radio-cadet.c
+++ b/drivers/media/radio/radio-cadet.c
@@ -29,7 +29,7 @@
#include <linux/module.h> /* Modules */
#include <linux/init.h> /* Initdata */
-#include <linux/ioport.h> /* check_region, request_region */
+#include <linux/ioport.h> /* request_region */
#include <linux/delay.h> /* udelay */
#include <asm/io.h> /* outb, outb_p */
#include <asm/uaccess.h> /* copy to/from user */
diff --git a/drivers/media/radio/radio-gemtek.c b/drivers/media/radio/radio-gemtek.c
index 202bfe6819b..6418f03b9ce 100644
--- a/drivers/media/radio/radio-gemtek.c
+++ b/drivers/media/radio/radio-gemtek.c
@@ -17,7 +17,7 @@
#include <linux/module.h> /* Modules */
#include <linux/init.h> /* Initdata */
-#include <linux/ioport.h> /* check_region, request_region */
+#include <linux/ioport.h> /* request_region */
#include <linux/delay.h> /* udelay */
#include <asm/io.h> /* outb, outb_p */
#include <asm/uaccess.h> /* copy to/from user */
diff --git a/drivers/media/radio/radio-rtrack2.c b/drivers/media/radio/radio-rtrack2.c
index c00245d4d24..b2256d675b4 100644
--- a/drivers/media/radio/radio-rtrack2.c
+++ b/drivers/media/radio/radio-rtrack2.c
@@ -10,7 +10,7 @@
#include <linux/module.h> /* Modules */
#include <linux/init.h> /* Initdata */
-#include <linux/ioport.h> /* check_region, request_region */
+#include <linux/ioport.h> /* request_region */
#include <linux/delay.h> /* udelay */
#include <asm/io.h> /* outb, outb_p */
#include <asm/uaccess.h> /* copy to/from user */
diff --git a/drivers/media/radio/radio-sf16fmi.c b/drivers/media/radio/radio-sf16fmi.c
index 3a464a09221..6f03ce4dd7b 100644
--- a/drivers/media/radio/radio-sf16fmi.c
+++ b/drivers/media/radio/radio-sf16fmi.c
@@ -18,7 +18,7 @@
#include <linux/kernel.h> /* __setup */
#include <linux/module.h> /* Modules */
#include <linux/init.h> /* Initdata */
-#include <linux/ioport.h> /* check_region, request_region */
+#include <linux/ioport.h> /* request_region */
#include <linux/delay.h> /* udelay */
#include <linux/videodev.h> /* kernel radio structs */
#include <linux/isapnp.h>
diff --git a/drivers/media/radio/radio-sf16fmr2.c b/drivers/media/radio/radio-sf16fmr2.c
index 0732efda6a9..71971e9bb34 100644
--- a/drivers/media/radio/radio-sf16fmr2.c
+++ b/drivers/media/radio/radio-sf16fmr2.c
@@ -14,7 +14,7 @@
#include <linux/module.h> /* Modules */
#include <linux/init.h> /* Initdata */
-#include <linux/ioport.h> /* check_region, request_region */
+#include <linux/ioport.h> /* request_region */
#include <linux/delay.h> /* udelay */
#include <asm/io.h> /* outb, outb_p */
#include <asm/uaccess.h> /* copy to/from user */
diff --git a/drivers/media/radio/radio-terratec.c b/drivers/media/radio/radio-terratec.c
index 248d67fde03..b03573c6840 100644
--- a/drivers/media/radio/radio-terratec.c
+++ b/drivers/media/radio/radio-terratec.c
@@ -25,7 +25,7 @@
#include <linux/module.h> /* Modules */
#include <linux/init.h> /* Initdata */
-#include <linux/ioport.h> /* check_region, request_region */
+#include <linux/ioport.h> /* request_region */
#include <linux/delay.h> /* udelay */
#include <asm/io.h> /* outb, outb_p */
#include <asm/uaccess.h> /* copy to/from user */
diff --git a/drivers/media/radio/radio-typhoon.c b/drivers/media/radio/radio-typhoon.c
index d7da901ebe9..f304f3c1476 100644
--- a/drivers/media/radio/radio-typhoon.c
+++ b/drivers/media/radio/radio-typhoon.c
@@ -31,7 +31,7 @@
#include <linux/module.h> /* Modules */
#include <linux/init.h> /* Initdata */
-#include <linux/ioport.h> /* check_region, request_region */
+#include <linux/ioport.h> /* request_region */
#include <linux/proc_fs.h> /* radio card status report */
#include <asm/io.h> /* outb, outb_p */
#include <asm/uaccess.h> /* copy to/from user */
diff --git a/drivers/media/radio/radio-zoltrix.c b/drivers/media/radio/radio-zoltrix.c
index 342f92df4ab..4c6d6fb4903 100644
--- a/drivers/media/radio/radio-zoltrix.c
+++ b/drivers/media/radio/radio-zoltrix.c
@@ -28,7 +28,7 @@
#include <linux/module.h> /* Modules */
#include <linux/init.h> /* Initdata */
-#include <linux/ioport.h> /* check_region, request_region */
+#include <linux/ioport.h> /* request_region */
#include <linux/delay.h> /* udelay, msleep */
#include <asm/io.h> /* outb, outb_p */
#include <asm/uaccess.h> /* copy to/from user */
diff --git a/drivers/media/video/bttv-cards.c b/drivers/media/video/bttv-cards.c
index 190977a1e54..6c332800d6a 100644
--- a/drivers/media/video/bttv-cards.c
+++ b/drivers/media/video/bttv-cards.c
@@ -2398,7 +2398,7 @@ struct tvcard bttv_tvcards[] = {
.svhs = 2,
.muxsel = { 2, 3 },
.gpiomask = 0x00e00007,
- .audiomux = { 0x00400005, 0, 0, 0, 0, 0 },
+ .audiomux = { 0x00400005, 0, 0x00000001, 0, 0x00c00007, 0 },
.no_msp34xx = 1,
.no_tda9875 = 1,
.no_tda7432 = 1,
diff --git a/drivers/media/video/bttv-driver.c b/drivers/media/video/bttv-driver.c
index a564321db2f..c062a017491 100644
--- a/drivers/media/video/bttv-driver.c
+++ b/drivers/media/video/bttv-driver.c
@@ -763,21 +763,21 @@ static void set_pll(struct bttv *btv)
/* no PLL needed */
if (btv->pll.pll_current == 0)
return;
- vprintk(KERN_INFO "bttv%d: PLL can sleep, using XTAL (%d).\n",
- btv->c.nr,btv->pll.pll_ifreq);
+ bttv_printk(KERN_INFO "bttv%d: PLL can sleep, using XTAL (%d).\n",
+ btv->c.nr,btv->pll.pll_ifreq);
btwrite(0x00,BT848_TGCTRL);
btwrite(0x00,BT848_PLL_XCI);
btv->pll.pll_current = 0;
return;
}
- vprintk(KERN_INFO "bttv%d: PLL: %d => %d ",btv->c.nr,
- btv->pll.pll_ifreq, btv->pll.pll_ofreq);
+ bttv_printk(KERN_INFO "bttv%d: PLL: %d => %d ",btv->c.nr,
+ btv->pll.pll_ifreq, btv->pll.pll_ofreq);
set_pll_freq(btv, btv->pll.pll_ifreq, btv->pll.pll_ofreq);
for (i=0; i<10; i++) {
/* Let other people run while the PLL stabilizes */
- vprintk(".");
+ bttv_printk(".");
msleep(10);
if (btread(BT848_DSTATUS) & BT848_DSTATUS_PLOCK) {
@@ -785,12 +785,12 @@ static void set_pll(struct bttv *btv)
} else {
btwrite(0x08,BT848_TGCTRL);
btv->pll.pll_current = btv->pll.pll_ofreq;
- vprintk(" ok\n");
+ bttv_printk(" ok\n");
return;
}
}
btv->pll.pll_current = -1;
- vprintk("failed\n");
+ bttv_printk("failed\n");
return;
}
diff --git a/drivers/media/video/bttvp.h b/drivers/media/video/bttvp.h
index 9b0b7ca035f..7a312f79340 100644
--- a/drivers/media/video/bttvp.h
+++ b/drivers/media/video/bttvp.h
@@ -221,7 +221,7 @@ extern void bttv_gpio_tracking(struct bttv *btv, char *comment);
extern int init_bttv_i2c(struct bttv *btv);
extern int fini_bttv_i2c(struct bttv *btv);
-#define vprintk if (bttv_verbose) printk
+#define bttv_printk if (bttv_verbose) printk
#define dprintk if (bttv_debug >= 1) printk
#define d2printk if (bttv_debug >= 2) printk
diff --git a/drivers/media/video/cpia.c b/drivers/media/video/cpia.c
index 8c08b7f1ad2..b7ec9bf4508 100644
--- a/drivers/media/video/cpia.c
+++ b/drivers/media/video/cpia.c
@@ -1397,7 +1397,7 @@ static void destroy_proc_cpia_cam(struct cam_data *cam)
static void proc_cpia_create(void)
{
- cpia_proc_root = create_proc_entry("cpia", S_IFDIR, NULL);
+ cpia_proc_root = proc_mkdir("cpia", NULL);
if (cpia_proc_root)
cpia_proc_root->owner = THIS_MODULE;
diff --git a/drivers/media/video/cx88/cx88-dvb.c b/drivers/media/video/cx88/cx88-dvb.c
index c9106b1d79d..4334744652d 100644
--- a/drivers/media/video/cx88/cx88-dvb.c
+++ b/drivers/media/video/cx88/cx88-dvb.c
@@ -221,9 +221,7 @@ static int lgdt330x_pll_set(struct dvb_frontend* fe,
int err;
/* Put the analog decoder in standby to keep it quiet */
- if (core->tda9887_conf) {
- cx88_call_i2c_clients (dev->core, TUNER_SET_STANDBY, NULL);
- }
+ cx88_call_i2c_clients (dev->core, TUNER_SET_STANDBY, NULL);
dvb_pll_configure(core->pll_desc, buf, params->frequency, 0);
dprintk(1, "%s: tuner at 0x%02x bytes: 0x%02x 0x%02x 0x%02x 0x%02x\n",
@@ -402,6 +400,9 @@ static int dvb_register(struct cx8802_dev *dev)
dev->dvb.frontend->ops->info.frequency_max = dev->core->pll_desc->max;
}
+ /* Put the analog decoder in standby to keep it quiet */
+ cx88_call_i2c_clients (dev->core, TUNER_SET_STANDBY, NULL);
+
/* register everything */
return videobuf_dvb_register(&dev->dvb, THIS_MODULE, dev);
}
diff --git a/drivers/media/video/rds.h b/drivers/media/video/rds.h
index 30337d0f1a8..0d30eb744e6 100644
--- a/drivers/media/video/rds.h
+++ b/drivers/media/video/rds.h
@@ -31,7 +31,7 @@
struct rds_command {
unsigned int block_count;
int result;
- unsigned char *buffer;
+ unsigned char __user *buffer;
struct file *instance;
poll_table *event_list;
};
diff --git a/drivers/media/video/saa6588.c b/drivers/media/video/saa6588.c
index 1a657a70ff4..72b70eb5da1 100644
--- a/drivers/media/video/saa6588.c
+++ b/drivers/media/video/saa6588.c
@@ -157,7 +157,7 @@ static struct i2c_client client_template;
/* ---------------------------------------------------------------------- */
-static int block_to_user_buf(struct saa6588 *s, unsigned char *user_buf)
+static int block_to_user_buf(struct saa6588 *s, unsigned char __user *user_buf)
{
int i;
@@ -191,7 +191,7 @@ static void read_from_buf(struct saa6588 *s, struct rds_command *a)
{
unsigned long flags;
- unsigned char *buf_ptr = a->buffer; /* This is a user space buffer! */
+ unsigned char __user *buf_ptr = a->buffer;
unsigned int i;
unsigned int rd_blocks;
diff --git a/drivers/message/fusion/Kconfig b/drivers/message/fusion/Kconfig
index 33f209a39cb..1883d22cffe 100644
--- a/drivers/message/fusion/Kconfig
+++ b/drivers/message/fusion/Kconfig
@@ -35,6 +35,23 @@ config FUSION_FC
LSIFC929X
LSIFC929XL
+config FUSION_SAS
+ tristate "Fusion MPT ScsiHost drivers for SAS"
+ depends on PCI && SCSI
+ select FUSION
+ select SCSI_SAS_ATTRS
+ ---help---
+ SCSI HOST support for a SAS host adapters.
+
+ List of supported controllers:
+
+ LSISAS1064
+ LSISAS1066
+ LSISAS1068
+ LSISAS1064E
+ LSISAS1066E
+ LSISAS1068E
+
config FUSION_MAX_SGE
int "Maximum number of scatter gather entries (16 - 128)"
depends on FUSION
diff --git a/drivers/message/fusion/Makefile b/drivers/message/fusion/Makefile
index 1d2f9db813c..8a2e2657f4c 100644
--- a/drivers/message/fusion/Makefile
+++ b/drivers/message/fusion/Makefile
@@ -34,5 +34,6 @@
obj-$(CONFIG_FUSION_SPI) += mptbase.o mptscsih.o mptspi.o
obj-$(CONFIG_FUSION_FC) += mptbase.o mptscsih.o mptfc.o
+obj-$(CONFIG_FUSION_SAS) += mptbase.o mptscsih.o mptsas.o
obj-$(CONFIG_FUSION_CTL) += mptctl.o
obj-$(CONFIG_FUSION_LAN) += mptlan.o
diff --git a/drivers/message/fusion/mptbase.c b/drivers/message/fusion/mptbase.c
index f517d0692d5..790a2932ded 100644
--- a/drivers/message/fusion/mptbase.c
+++ b/drivers/message/fusion/mptbase.c
@@ -135,13 +135,12 @@ static void mpt_adapter_dispose(MPT_ADAPTER *ioc);
static void MptDisplayIocCapabilities(MPT_ADAPTER *ioc);
static int MakeIocReady(MPT_ADAPTER *ioc, int force, int sleepFlag);
-//static u32 mpt_GetIocState(MPT_ADAPTER *ioc, int cooked);
static int GetIocFacts(MPT_ADAPTER *ioc, int sleepFlag, int reason);
static int GetPortFacts(MPT_ADAPTER *ioc, int portnum, int sleepFlag);
static int SendIocInit(MPT_ADAPTER *ioc, int sleepFlag);
static int SendPortEnable(MPT_ADAPTER *ioc, int portnum, int sleepFlag);
static int mpt_do_upload(MPT_ADAPTER *ioc, int sleepFlag);
-static int mpt_downloadboot(MPT_ADAPTER *ioc, int sleepFlag);
+static int mpt_downloadboot(MPT_ADAPTER *ioc, MpiFwHeader_t *pFwHeader, int sleepFlag);
static int mpt_diag_reset(MPT_ADAPTER *ioc, int ignore, int sleepFlag);
static int KickStart(MPT_ADAPTER *ioc, int ignore, int sleepFlag);
static int SendIocReset(MPT_ADAPTER *ioc, u8 reset_type, int sleepFlag);
@@ -152,6 +151,7 @@ static int WaitForDoorbellReply(MPT_ADAPTER *ioc, int howlong, int sleepFlag);
static int GetLanConfigPages(MPT_ADAPTER *ioc);
static int GetFcPortPage0(MPT_ADAPTER *ioc, int portnum);
static int GetIoUnitPage2(MPT_ADAPTER *ioc);
+int mptbase_sas_persist_operation(MPT_ADAPTER *ioc, u8 persist_opcode);
static int mpt_GetScsiPortSettings(MPT_ADAPTER *ioc, int portnum);
static int mpt_readScsiDevicePageHeaders(MPT_ADAPTER *ioc, int portnum);
static void mpt_read_ioc_pg_1(MPT_ADAPTER *ioc);
@@ -159,6 +159,8 @@ static void mpt_read_ioc_pg_4(MPT_ADAPTER *ioc);
static void mpt_timer_expired(unsigned long data);
static int SendEventNotification(MPT_ADAPTER *ioc, u8 EvSwitch);
static int SendEventAck(MPT_ADAPTER *ioc, EventNotificationReply_t *evnp);
+static int mpt_host_page_access_control(MPT_ADAPTER *ioc, u8 access_control_value, int sleepFlag);
+static int mpt_host_page_alloc(MPT_ADAPTER *ioc, pIOCInit_t ioc_init);
#ifdef CONFIG_PROC_FS
static int procmpt_summary_read(char *buf, char **start, off_t offset,
@@ -175,6 +177,7 @@ static int ProcessEventNotification(MPT_ADAPTER *ioc, EventNotificationReply_t *
static void mpt_sp_ioc_info(MPT_ADAPTER *ioc, u32 ioc_status, MPT_FRAME_HDR *mf);
static void mpt_fc_log_info(MPT_ADAPTER *ioc, u32 log_info);
static void mpt_sp_log_info(MPT_ADAPTER *ioc, u32 log_info);
+static void mpt_sas_log_info(MPT_ADAPTER *ioc, u32 log_info);
/* module entry point */
static int __init fusion_init (void);
@@ -206,6 +209,144 @@ pci_enable_io_access(struct pci_dev *pdev)
pci_write_config_word(pdev, PCI_COMMAND, command_reg);
}
+/*
+ * Process turbo (context) reply...
+ */
+static void
+mpt_turbo_reply(MPT_ADAPTER *ioc, u32 pa)
+{
+ MPT_FRAME_HDR *mf = NULL;
+ MPT_FRAME_HDR *mr = NULL;
+ int req_idx = 0;
+ int cb_idx;
+
+ dmfprintk((MYIOC_s_INFO_FMT "Got TURBO reply req_idx=%08x\n",
+ ioc->name, pa));
+
+ switch (pa >> MPI_CONTEXT_REPLY_TYPE_SHIFT) {
+ case MPI_CONTEXT_REPLY_TYPE_SCSI_INIT:
+ req_idx = pa & 0x0000FFFF;
+ cb_idx = (pa & 0x00FF0000) >> 16;
+ mf = MPT_INDEX_2_MFPTR(ioc, req_idx);
+ break;
+ case MPI_CONTEXT_REPLY_TYPE_LAN:
+ cb_idx = mpt_lan_index;
+ /*
+ * Blind set of mf to NULL here was fatal
+ * after lan_reply says "freeme"
+ * Fix sort of combined with an optimization here;
+ * added explicit check for case where lan_reply
+ * was just returning 1 and doing nothing else.
+ * For this case skip the callback, but set up
+ * proper mf value first here:-)
+ */
+ if ((pa & 0x58000000) == 0x58000000) {
+ req_idx = pa & 0x0000FFFF;
+ mf = MPT_INDEX_2_MFPTR(ioc, req_idx);
+ mpt_free_msg_frame(ioc, mf);
+ mb();
+ return;
+ break;
+ }
+ mr = (MPT_FRAME_HDR *) CAST_U32_TO_PTR(pa);
+ break;
+ case MPI_CONTEXT_REPLY_TYPE_SCSI_TARGET:
+ cb_idx = mpt_stm_index;
+ mr = (MPT_FRAME_HDR *) CAST_U32_TO_PTR(pa);
+ break;
+ default:
+ cb_idx = 0;
+ BUG();
+ }
+
+ /* Check for (valid) IO callback! */
+ if (cb_idx < 1 || cb_idx >= MPT_MAX_PROTOCOL_DRIVERS ||
+ MptCallbacks[cb_idx] == NULL) {
+ printk(MYIOC_s_WARN_FMT "%s: Invalid cb_idx (%d)!\n",
+ __FUNCTION__, ioc->name, cb_idx);
+ goto out;
+ }
+
+ if (MptCallbacks[cb_idx](ioc, mf, mr))
+ mpt_free_msg_frame(ioc, mf);
+ out:
+ mb();
+}
+
+static void
+mpt_reply(MPT_ADAPTER *ioc, u32 pa)
+{
+ MPT_FRAME_HDR *mf;
+ MPT_FRAME_HDR *mr;
+ int req_idx;
+ int cb_idx;
+ int freeme;
+
+ u32 reply_dma_low;
+ u16 ioc_stat;
+
+ /* non-TURBO reply! Hmmm, something may be up...
+ * Newest turbo reply mechanism; get address
+ * via left shift 1 (get rid of MPI_ADDRESS_REPLY_A_BIT)!
+ */
+
+ /* Map DMA address of reply header to cpu address.
+ * pa is 32 bits - but the dma address may be 32 or 64 bits
+ * get offset based only only the low addresses
+ */
+
+ reply_dma_low = (pa <<= 1);
+ mr = (MPT_FRAME_HDR *)((u8 *)ioc->reply_frames +
+ (reply_dma_low - ioc->reply_frames_low_dma));
+
+ req_idx = le16_to_cpu(mr->u.frame.hwhdr.msgctxu.fld.req_idx);
+ cb_idx = mr->u.frame.hwhdr.msgctxu.fld.cb_idx;
+ mf = MPT_INDEX_2_MFPTR(ioc, req_idx);
+
+ dmfprintk((MYIOC_s_INFO_FMT "Got non-TURBO reply=%p req_idx=%x cb_idx=%x Function=%x\n",
+ ioc->name, mr, req_idx, cb_idx, mr->u.hdr.Function));
+ DBG_DUMP_REPLY_FRAME(mr)
+
+ /* Check/log IOC log info
+ */
+ ioc_stat = le16_to_cpu(mr->u.reply.IOCStatus);
+ if (ioc_stat & MPI_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE) {
+ u32 log_info = le32_to_cpu(mr->u.reply.IOCLogInfo);
+ if (ioc->bus_type == FC)
+ mpt_fc_log_info(ioc, log_info);
+ else if (ioc->bus_type == SCSI)
+ mpt_sp_log_info(ioc, log_info);
+ else if (ioc->bus_type == SAS)
+ mpt_sas_log_info(ioc, log_info);
+ }
+ if (ioc_stat & MPI_IOCSTATUS_MASK) {
+ if (ioc->bus_type == SCSI &&
+ cb_idx != mpt_stm_index &&
+ cb_idx != mpt_lan_index)
+ mpt_sp_ioc_info(ioc, (u32)ioc_stat, mf);
+ }
+
+
+ /* Check for (valid) IO callback! */
+ if (cb_idx < 1 || cb_idx >= MPT_MAX_PROTOCOL_DRIVERS ||
+ MptCallbacks[cb_idx] == NULL) {
+ printk(MYIOC_s_WARN_FMT "%s: Invalid cb_idx (%d)!\n",
+ __FUNCTION__, ioc->name, cb_idx);
+ freeme = 0;
+ goto out;
+ }
+
+ freeme = MptCallbacks[cb_idx](ioc, mf, mr);
+
+ out:
+ /* Flush (non-TURBO) reply with a WRITE! */
+ CHIPREG_WRITE32(&ioc->chip->ReplyFifo, pa);
+
+ if (freeme)
+ mpt_free_msg_frame(ioc, mf);
+ mb();
+}
+
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/*
* mpt_interrupt - MPT adapter (IOC) specific interrupt handler.
@@ -227,164 +368,21 @@ pci_enable_io_access(struct pci_dev *pdev)
static irqreturn_t
mpt_interrupt(int irq, void *bus_id, struct pt_regs *r)
{
- MPT_ADAPTER *ioc;
- MPT_FRAME_HDR *mf;
- MPT_FRAME_HDR *mr;
- u32 pa;
- int req_idx;
- int cb_idx;
- int type;
- int freeme;
-
- ioc = (MPT_ADAPTER *)bus_id;
+ MPT_ADAPTER *ioc = bus_id;
+ u32 pa;
/*
* Drain the reply FIFO!
- *
- * NOTES: I've seen up to 10 replies processed in this loop, so far...
- * Update: I've seen up to 9182 replies processed in this loop! ??
- * Update: Limit ourselves to processing max of N replies
- * (bottom of loop).
*/
while (1) {
-
- if ((pa = CHIPREG_READ32_dmasync(&ioc->chip->ReplyFifo)) == 0xFFFFFFFF)
+ pa = CHIPREG_READ32_dmasync(&ioc->chip->ReplyFifo);
+ if (pa == 0xFFFFFFFF)
return IRQ_HANDLED;
-
- cb_idx = 0;
- freeme = 0;
-
- /*
- * Check for non-TURBO reply!
- */
- if (pa & MPI_ADDRESS_REPLY_A_BIT) {
- u32 reply_dma_low;
- u16 ioc_stat;
-
- /* non-TURBO reply! Hmmm, something may be up...
- * Newest turbo reply mechanism; get address
- * via left shift 1 (get rid of MPI_ADDRESS_REPLY_A_BIT)!
- */
-
- /* Map DMA address of reply header to cpu address.
- * pa is 32 bits - but the dma address may be 32 or 64 bits
- * get offset based only only the low addresses
- */
- reply_dma_low = (pa = (pa << 1));
- mr = (MPT_FRAME_HDR *)((u8 *)ioc->reply_frames +
- (reply_dma_low - ioc->reply_frames_low_dma));
-
- req_idx = le16_to_cpu(mr->u.frame.hwhdr.msgctxu.fld.req_idx);
- cb_idx = mr->u.frame.hwhdr.msgctxu.fld.cb_idx;
- mf = MPT_INDEX_2_MFPTR(ioc, req_idx);
-
- dmfprintk((MYIOC_s_INFO_FMT "Got non-TURBO reply=%p req_idx=%x cb_idx=%x Function=%x\n",
- ioc->name, mr, req_idx, cb_idx, mr->u.hdr.Function));
- DBG_DUMP_REPLY_FRAME(mr)
-
- /* Check/log IOC log info
- */
- ioc_stat = le16_to_cpu(mr->u.reply.IOCStatus);
- if (ioc_stat & MPI_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE) {
- u32 log_info = le32_to_cpu(mr->u.reply.IOCLogInfo);
- if (ioc->bus_type == FC)
- mpt_fc_log_info(ioc, log_info);
- else if (ioc->bus_type == SCSI)
- mpt_sp_log_info(ioc, log_info);
- }
- if (ioc_stat & MPI_IOCSTATUS_MASK) {
- if (ioc->bus_type == SCSI)
- mpt_sp_ioc_info(ioc, (u32)ioc_stat, mf);
- }
- } else {
- /*
- * Process turbo (context) reply...
- */
- dmfprintk((MYIOC_s_INFO_FMT "Got TURBO reply req_idx=%08x\n", ioc->name, pa));
- type = (pa >> MPI_CONTEXT_REPLY_TYPE_SHIFT);
- if (type == MPI_CONTEXT_REPLY_TYPE_SCSI_TARGET) {
- cb_idx = mpt_stm_index;
- mf = NULL;
- mr = (MPT_FRAME_HDR *) CAST_U32_TO_PTR(pa);
- } else if (type == MPI_CONTEXT_REPLY_TYPE_LAN) {
- cb_idx = mpt_lan_index;
- /* Blind set of mf to NULL here was fatal
- * after lan_reply says "freeme"
- * Fix sort of combined with an optimization here;
- * added explicit check for case where lan_reply
- * was just returning 1 and doing nothing else.
- * For this case skip the callback, but set up
- * proper mf value first here:-)
- */
- if ((pa & 0x58000000) == 0x58000000) {
- req_idx = pa & 0x0000FFFF;
- mf = MPT_INDEX_2_MFPTR(ioc, req_idx);
- freeme = 1;
- /*
- * IMPORTANT! Invalidate the callback!
- */
- cb_idx = 0;
- } else {
- mf = NULL;
- }
- mr = (MPT_FRAME_HDR *) CAST_U32_TO_PTR(pa);
- } else {
- req_idx = pa & 0x0000FFFF;
- cb_idx = (pa & 0x00FF0000) >> 16;
- mf = MPT_INDEX_2_MFPTR(ioc, req_idx);
- mr = NULL;
- }
- pa = 0; /* No reply flush! */
- }
-
-#ifdef MPT_DEBUG_IRQ
- if (ioc->bus_type == SCSI) {
- /* Verify mf, mr are reasonable.
- */
- if ((mf) && ((mf >= MPT_INDEX_2_MFPTR(ioc, ioc->req_depth))
- || (mf < ioc->req_frames)) ) {
- printk(MYIOC_s_WARN_FMT
- "mpt_interrupt: Invalid mf (%p)!\n", ioc->name, (void *)mf);
- cb_idx = 0;
- pa = 0;
- freeme = 0;
- }
- if ((pa) && (mr) && ((mr >= MPT_INDEX_2_RFPTR(ioc, ioc->req_depth))
- || (mr < ioc->reply_frames)) ) {
- printk(MYIOC_s_WARN_FMT
- "mpt_interrupt: Invalid rf (%p)!\n", ioc->name, (void *)mr);
- cb_idx = 0;
- pa = 0;
- freeme = 0;
- }
- if (cb_idx > (MPT_MAX_PROTOCOL_DRIVERS-1)) {
- printk(MYIOC_s_WARN_FMT
- "mpt_interrupt: Invalid cb_idx (%d)!\n", ioc->name, cb_idx);
- cb_idx = 0;
- pa = 0;
- freeme = 0;
- }
- }
-#endif
-
- /* Check for (valid) IO callback! */
- if (cb_idx) {
- /* Do the callback! */
- freeme = (*(MptCallbacks[cb_idx]))(ioc, mf, mr);
- }
-
- if (pa) {
- /* Flush (non-TURBO) reply with a WRITE! */
- CHIPREG_WRITE32(&ioc->chip->ReplyFifo, pa);
- }
-
- if (freeme) {
- /* Put Request back on FreeQ! */
- mpt_free_msg_frame(ioc, mf);
- }
-
- mb();
- } /* drain reply FIFO */
+ else if (pa & MPI_ADDRESS_REPLY_A_BIT)
+ mpt_reply(ioc, pa);
+ else
+ mpt_turbo_reply(ioc, pa);
+ }
return IRQ_HANDLED;
}
@@ -509,6 +507,14 @@ mpt_base_reply(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *reply)
pCfg->wait_done = 1;
wake_up(&mpt_waitq);
}
+ } else if (func == MPI_FUNCTION_SAS_IO_UNIT_CONTROL) {
+ /* we should be always getting a reply frame */
+ memcpy(ioc->persist_reply_frame, reply,
+ min(MPT_DEFAULT_FRAME_SIZE,
+ 4*reply->u.reply.MsgLength));
+ del_timer(&ioc->persist_timer);
+ ioc->persist_wait_done = 1;
+ wake_up(&mpt_waitq);
} else {
printk(MYIOC_s_ERR_FMT "Unexpected msg function (=%02Xh) reply received!\n",
ioc->name, func);
@@ -750,6 +756,7 @@ mpt_get_msg_frame(int handle, MPT_ADAPTER *ioc)
mf = list_entry(ioc->FreeQ.next, MPT_FRAME_HDR,
u.frame.linkage.list);
list_del(&mf->u.frame.linkage.list);
+ mf->u.frame.linkage.arg1 = 0;
mf->u.frame.hwhdr.msgctxu.fld.cb_idx = handle; /* byte */
req_offset = (u8 *)mf - (u8 *)ioc->req_frames;
/* u16! */
@@ -845,6 +852,7 @@ mpt_free_msg_frame(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf)
/* Put Request back on FreeQ! */
spin_lock_irqsave(&ioc->FreeQlock, flags);
+ mf->u.frame.linkage.arg1 = 0xdeadbeaf; /* signature to know if this mf is freed */
list_add_tail(&mf->u.frame.linkage.list, &ioc->FreeQ);
#ifdef MFCNT
ioc->mfcnt--;
@@ -971,12 +979,123 @@ mpt_send_handshake_request(int handle, MPT_ADAPTER *ioc, int reqBytes, u32 *req,
/* Make sure there are no doorbells */
CHIPREG_WRITE32(&ioc->chip->IntStatus, 0);
-
+
return r;
}
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/**
+ * mpt_host_page_access_control - provides mechanism for the host
+ * driver to control the IOC's Host Page Buffer access.
+ * @ioc: Pointer to MPT adapter structure
+ * @access_control_value: define bits below
+ *
+ * Access Control Value - bits[15:12]
+ * 0h Reserved
+ * 1h Enable Access { MPI_DB_HPBAC_ENABLE_ACCESS }
+ * 2h Disable Access { MPI_DB_HPBAC_DISABLE_ACCESS }
+ * 3h Free Buffer { MPI_DB_HPBAC_FREE_BUFFER }
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+
+static int
+mpt_host_page_access_control(MPT_ADAPTER *ioc, u8 access_control_value, int sleepFlag)
+{
+ int r = 0;
+
+ /* return if in use */
+ if (CHIPREG_READ32(&ioc->chip->Doorbell)
+ & MPI_DOORBELL_ACTIVE)
+ return -1;
+
+ CHIPREG_WRITE32(&ioc->chip->IntStatus, 0);
+
+ CHIPREG_WRITE32(&ioc->chip->Doorbell,
+ ((MPI_FUNCTION_HOST_PAGEBUF_ACCESS_CONTROL
+ <<MPI_DOORBELL_FUNCTION_SHIFT) |
+ (access_control_value<<12)));
+
+ /* Wait for IOC to clear Doorbell Status bit */
+ if ((r = WaitForDoorbellAck(ioc, 5, sleepFlag)) < 0) {
+ return -2;
+ }else
+ return 0;
+}
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/**
+ * mpt_host_page_alloc - allocate system memory for the fw
+ * If we already allocated memory in past, then resend the same pointer.
+ * ioc@: Pointer to pointer to IOC adapter
+ * ioc_init@: Pointer to ioc init config page
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+static int
+mpt_host_page_alloc(MPT_ADAPTER *ioc, pIOCInit_t ioc_init)
+{
+ char *psge;
+ int flags_length;
+ u32 host_page_buffer_sz=0;
+
+ if(!ioc->HostPageBuffer) {
+
+ host_page_buffer_sz =
+ le32_to_cpu(ioc->facts.HostPageBufferSGE.FlagsLength) & 0xFFFFFF;
+
+ if(!host_page_buffer_sz)
+ return 0; /* fw doesn't need any host buffers */
+
+ /* spin till we get enough memory */
+ while(host_page_buffer_sz > 0) {
+
+ if((ioc->HostPageBuffer = pci_alloc_consistent(
+ ioc->pcidev,
+ host_page_buffer_sz,
+ &ioc->HostPageBuffer_dma)) != NULL) {
+
+ dinitprintk((MYIOC_s_INFO_FMT
+ "host_page_buffer @ %p, dma @ %x, sz=%d bytes\n",
+ ioc->name,
+ ioc->HostPageBuffer,
+ ioc->HostPageBuffer_dma,
+ host_page_buffer_sz));
+ ioc->alloc_total += host_page_buffer_sz;
+ ioc->HostPageBuffer_sz = host_page_buffer_sz;
+ break;
+ }
+
+ host_page_buffer_sz -= (4*1024);
+ }
+ }
+
+ if(!ioc->HostPageBuffer) {
+ printk(MYIOC_s_ERR_FMT
+ "Failed to alloc memory for host_page_buffer!\n",
+ ioc->name);
+ return -999;
+ }
+
+ psge = (char *)&ioc_init->HostPageBufferSGE;
+ flags_length = MPI_SGE_FLAGS_SIMPLE_ELEMENT |
+ MPI_SGE_FLAGS_SYSTEM_ADDRESS |
+ MPI_SGE_FLAGS_32_BIT_ADDRESSING |
+ MPI_SGE_FLAGS_HOST_TO_IOC |
+ MPI_SGE_FLAGS_END_OF_BUFFER;
+ if (sizeof(dma_addr_t) == sizeof(u64)) {
+ flags_length |= MPI_SGE_FLAGS_64_BIT_ADDRESSING;
+ }
+ flags_length = flags_length << MPI_SGE_FLAGS_SHIFT;
+ flags_length |= ioc->HostPageBuffer_sz;
+ mpt_add_sge(psge, flags_length, ioc->HostPageBuffer_dma);
+ ioc->facts.HostPageBufferSGE = ioc_init->HostPageBufferSGE;
+
+return 0;
+}
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/**
* mpt_verify_adapter - Given a unique IOC identifier, set pointer to
* the associated MPT adapter structure.
* @iocid: IOC unique identifier (integer)
@@ -1084,7 +1203,7 @@ mpt_attach(struct pci_dev *pdev, const struct pci_device_id *id)
/* Initilize SCSI Config Data structure
*/
- memset(&ioc->spi_data, 0, sizeof(ScsiCfgData));
+ memset(&ioc->spi_data, 0, sizeof(SpiCfgData));
/* Initialize the running configQ head.
*/
@@ -1213,6 +1332,33 @@ mpt_attach(struct pci_dev *pdev, const struct pci_device_id *id)
ioc->prod_name = "LSI53C1035";
ioc->bus_type = SCSI;
}
+ else if (pdev->device == MPI_MANUFACTPAGE_DEVID_SAS1064) {
+ ioc->prod_name = "LSISAS1064";
+ ioc->bus_type = SAS;
+ ioc->errata_flag_1064 = 1;
+ }
+ else if (pdev->device == MPI_MANUFACTPAGE_DEVID_SAS1066) {
+ ioc->prod_name = "LSISAS1066";
+ ioc->bus_type = SAS;
+ ioc->errata_flag_1064 = 1;
+ }
+ else if (pdev->device == MPI_MANUFACTPAGE_DEVID_SAS1068) {
+ ioc->prod_name = "LSISAS1068";
+ ioc->bus_type = SAS;
+ ioc->errata_flag_1064 = 1;
+ }
+ else if (pdev->device == MPI_MANUFACTPAGE_DEVID_SAS1064E) {
+ ioc->prod_name = "LSISAS1064E";
+ ioc->bus_type = SAS;
+ }
+ else if (pdev->device == MPI_MANUFACTPAGE_DEVID_SAS1066E) {
+ ioc->prod_name = "LSISAS1066E";
+ ioc->bus_type = SAS;
+ }
+ else if (pdev->device == MPI_MANUFACTPAGE_DEVID_SAS1068E) {
+ ioc->prod_name = "LSISAS1068E";
+ ioc->bus_type = SAS;
+ }
if (ioc->errata_flag_1064)
pci_disable_io_access(pdev);
@@ -1604,8 +1750,23 @@ mpt_do_ioc_recovery(MPT_ADAPTER *ioc, u32 reason, int sleepFlag)
*/
if (ret == 0) {
rc = mpt_do_upload(ioc, sleepFlag);
- if (rc != 0)
+ if (rc == 0) {
+ if (ioc->alt_ioc && ioc->alt_ioc->cached_fw) {
+ /*
+ * Maintain only one pointer to FW memory
+ * so there will not be two attempt to
+ * downloadboot onboard dual function
+ * chips (mpt_adapter_disable,
+ * mpt_diag_reset)
+ */
+ ioc->cached_fw = NULL;
+ ddlprintk((MYIOC_s_INFO_FMT ": mpt_upload: alt_%s has cached_fw=%p \n",
+ ioc->name, ioc->alt_ioc->name, ioc->alt_ioc->cached_fw));
+ }
+ } else {
printk(KERN_WARNING MYNAM ": firmware upload failure!\n");
+ ret = -5;
+ }
}
}
}
@@ -1640,7 +1801,22 @@ mpt_do_ioc_recovery(MPT_ADAPTER *ioc, u32 reason, int sleepFlag)
* and we try GetLanConfigPages again...
*/
if ((ret == 0) && (reason == MPT_HOSTEVENT_IOC_BRINGUP)) {
- if (ioc->bus_type == FC) {
+ if (ioc->bus_type == SAS) {
+
+ /* clear persistency table */
+ if(ioc->facts.IOCExceptions &
+ MPI_IOCFACTS_EXCEPT_PERSISTENT_TABLE_FULL) {
+ ret = mptbase_sas_persist_operation(ioc,
+ MPI_SAS_OP_CLEAR_NOT_PRESENT);
+ if(ret != 0)
+ return -1;
+ }
+
+ /* Find IM volumes
+ */
+ mpt_findImVolumes(ioc);
+
+ } else if (ioc->bus_type == FC) {
/*
* Pre-fetch FC port WWN and stuff...
* (FCPortPage0_t stuff)
@@ -1783,7 +1959,7 @@ mpt_adapter_disable(MPT_ADAPTER *ioc)
if (ioc->cached_fw != NULL) {
ddlprintk((KERN_INFO MYNAM ": mpt_adapter_disable: Pushing FW onto adapter\n"));
- if ((ret = mpt_downloadboot(ioc, NO_SLEEP)) < 0) {
+ if ((ret = mpt_downloadboot(ioc, (MpiFwHeader_t *)ioc->cached_fw, NO_SLEEP)) < 0) {
printk(KERN_WARNING MYNAM
": firmware downloadboot failure (%d)!\n", ret);
}
@@ -1831,9 +2007,9 @@ mpt_adapter_disable(MPT_ADAPTER *ioc)
}
kfree(ioc->spi_data.nvram);
- kfree(ioc->spi_data.pIocPg3);
+ kfree(ioc->raid_data.pIocPg3);
ioc->spi_data.nvram = NULL;
- ioc->spi_data.pIocPg3 = NULL;
+ ioc->raid_data.pIocPg3 = NULL;
if (ioc->spi_data.pIocPg4 != NULL) {
sz = ioc->spi_data.IocPg4Sz;
@@ -1852,6 +2028,23 @@ mpt_adapter_disable(MPT_ADAPTER *ioc)
kfree(ioc->ChainToChain);
ioc->ChainToChain = NULL;
+
+ if (ioc->HostPageBuffer != NULL) {
+ if((ret = mpt_host_page_access_control(ioc,
+ MPI_DB_HPBAC_FREE_BUFFER, NO_SLEEP)) != 0) {
+ printk(KERN_ERR MYNAM
+ ": %s: host page buffers free failed (%d)!\n",
+ __FUNCTION__, ret);
+ }
+ dexitprintk((KERN_INFO MYNAM ": %s HostPageBuffer free @ %p, sz=%d bytes\n",
+ ioc->name, ioc->HostPageBuffer, ioc->HostPageBuffer_sz));
+ pci_free_consistent(ioc->pcidev, ioc->HostPageBuffer_sz,
+ ioc->HostPageBuffer,
+ ioc->HostPageBuffer_dma);
+ ioc->HostPageBuffer = NULL;
+ ioc->HostPageBuffer_sz = 0;
+ ioc->alloc_total -= ioc->HostPageBuffer_sz;
+ }
}
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
@@ -2034,7 +2227,7 @@ MakeIocReady(MPT_ADAPTER *ioc, int force, int sleepFlag)
* Loop here waiting for IOC to come READY.
*/
ii = 0;
- cntdn = ((sleepFlag == CAN_SLEEP) ? HZ : 1000) * 15; /* 15 seconds */
+ cntdn = ((sleepFlag == CAN_SLEEP) ? HZ : 1000) * 5; /* 5 seconds */
while ((ioc_state = mpt_GetIocState(ioc, 1)) != MPI_IOC_STATE_READY) {
if (ioc_state == MPI_IOC_STATE_OPERATIONAL) {
@@ -2212,6 +2405,7 @@ GetIocFacts(MPT_ADAPTER *ioc, int sleepFlag, int reason)
le32_to_cpu(facts->CurrentSenseBufferHighAddr);
facts->CurReplyFrameSize =
le16_to_cpu(facts->CurReplyFrameSize);
+ facts->IOCCapabilities = le32_to_cpu(facts->IOCCapabilities);
/*
* Handle NEW (!) IOCFactsReply fields in MPI-1.01.xx
@@ -2383,13 +2577,25 @@ SendIocInit(MPT_ADAPTER *ioc, int sleepFlag)
ddlprintk((MYIOC_s_INFO_FMT "upload_fw %d facts.Flags=%x\n",
ioc->name, ioc->upload_fw, ioc->facts.Flags));
- if (ioc->bus_type == FC)
+ if(ioc->bus_type == SAS)
+ ioc_init.MaxDevices = ioc->facts.MaxDevices;
+ else if(ioc->bus_type == FC)
ioc_init.MaxDevices = MPT_MAX_FC_DEVICES;
else
ioc_init.MaxDevices = MPT_MAX_SCSI_DEVICES;
-
ioc_init.MaxBuses = MPT_MAX_BUS;
-
+ dinitprintk((MYIOC_s_INFO_FMT "facts.MsgVersion=%x\n",
+ ioc->name, ioc->facts.MsgVersion));
+ if (ioc->facts.MsgVersion >= MPI_VERSION_01_05) {
+ // set MsgVersion and HeaderVersion host driver was built with
+ ioc_init.MsgVersion = cpu_to_le16(MPI_VERSION);
+ ioc_init.HeaderVersion = cpu_to_le16(MPI_HEADER_VERSION);
+
+ if (ioc->facts.Flags & MPI_IOCFACTS_FLAGS_HOST_PAGE_BUFFER_PERSISTENT) {
+ ioc_init.HostPageBufferSGE = ioc->facts.HostPageBufferSGE;
+ } else if(mpt_host_page_alloc(ioc, &ioc_init))
+ return -99;
+ }
ioc_init.ReplyFrameSize = cpu_to_le16(ioc->reply_sz); /* in BYTES */
if (sizeof(dma_addr_t) == sizeof(u64)) {
@@ -2403,17 +2609,21 @@ SendIocInit(MPT_ADAPTER *ioc, int sleepFlag)
ioc_init.HostMfaHighAddr = cpu_to_le32(0);
ioc_init.SenseBufferHighAddr = cpu_to_le32(0);
}
-
+
ioc->facts.CurrentHostMfaHighAddr = ioc_init.HostMfaHighAddr;
ioc->facts.CurrentSenseBufferHighAddr = ioc_init.SenseBufferHighAddr;
+ ioc->facts.MaxDevices = ioc_init.MaxDevices;
+ ioc->facts.MaxBuses = ioc_init.MaxBuses;
dhsprintk((MYIOC_s_INFO_FMT "Sending IOCInit (req @ %p)\n",
ioc->name, &ioc_init));
r = mpt_handshake_req_reply_wait(ioc, sizeof(IOCInit_t), (u32*)&ioc_init,
sizeof(MPIDefaultReply_t), (u16*)&init_reply, 10 /*seconds*/, sleepFlag);
- if (r != 0)
+ if (r != 0) {
+ printk(MYIOC_s_ERR_FMT "Sending IOCInit failed(%d)!\n",ioc->name, r);
return r;
+ }
/* No need to byte swap the multibyte fields in the reply
* since we don't even look at it's contents.
@@ -2472,7 +2682,7 @@ SendPortEnable(MPT_ADAPTER *ioc, int portnum, int sleepFlag)
{
PortEnable_t port_enable;
MPIDefaultReply_t reply_buf;
- int ii;
+ int rc;
int req_sz;
int reply_sz;
@@ -2494,22 +2704,15 @@ SendPortEnable(MPT_ADAPTER *ioc, int portnum, int sleepFlag)
/* RAID FW may take a long time to enable
*/
- if (ioc->bus_type == FC) {
- ii = mpt_handshake_req_reply_wait(ioc, req_sz, (u32*)&port_enable,
- reply_sz, (u16*)&reply_buf, 65 /*seconds*/, sleepFlag);
- } else {
- ii = mpt_handshake_req_reply_wait(ioc, req_sz, (u32*)&port_enable,
+ if ( (ioc->facts.ProductID & MPI_FW_HEADER_PID_PROD_MASK)
+ > MPI_FW_HEADER_PID_PROD_TARGET_SCSI ) {
+ rc = mpt_handshake_req_reply_wait(ioc, req_sz, (u32*)&port_enable,
reply_sz, (u16*)&reply_buf, 300 /*seconds*/, sleepFlag);
+ } else {
+ rc = mpt_handshake_req_reply_wait(ioc, req_sz, (u32*)&port_enable,
+ reply_sz, (u16*)&reply_buf, 30 /*seconds*/, sleepFlag);
}
-
- if (ii != 0)
- return ii;
-
- /* We do not even look at the reply, so we need not
- * swap the multi-byte fields.
- */
-
- return 0;
+ return rc;
}
/*
@@ -2666,9 +2869,8 @@ mpt_do_upload(MPT_ADAPTER *ioc, int sleepFlag)
* <0 for fw upload failure.
*/
static int
-mpt_downloadboot(MPT_ADAPTER *ioc, int sleepFlag)
+mpt_downloadboot(MPT_ADAPTER *ioc, MpiFwHeader_t *pFwHeader, int sleepFlag)
{
- MpiFwHeader_t *pFwHeader;
MpiExtImageHeader_t *pExtImage;
u32 fwSize;
u32 diag0val;
@@ -2679,18 +2881,8 @@ mpt_downloadboot(MPT_ADAPTER *ioc, int sleepFlag)
u32 load_addr;
u32 ioc_state=0;
- ddlprintk((MYIOC_s_INFO_FMT "downloadboot: fw size 0x%x, ioc FW Ptr %p\n",
- ioc->name, ioc->facts.FWImageSize, ioc->cached_fw));
-
- if ( ioc->facts.FWImageSize == 0 )
- return -1;
-
- if (ioc->cached_fw == NULL)
- return -2;
-
- /* prevent a second downloadboot and memory free with alt_ioc */
- if (ioc->alt_ioc && ioc->alt_ioc->cached_fw)
- ioc->alt_ioc->cached_fw = NULL;
+ ddlprintk((MYIOC_s_INFO_FMT "downloadboot: fw size 0x%x (%d), FW Ptr %p\n",
+ ioc->name, pFwHeader->ImageSize, pFwHeader->ImageSize, pFwHeader));
CHIPREG_WRITE32(&ioc->chip->WriteSequence, 0xFF);
CHIPREG_WRITE32(&ioc->chip->WriteSequence, MPI_WRSEQ_1ST_KEY_VALUE);
@@ -2718,16 +2910,17 @@ mpt_downloadboot(MPT_ADAPTER *ioc, int sleepFlag)
ioc->name, count));
break;
}
- /* wait 1 sec */
+ /* wait .1 sec */
if (sleepFlag == CAN_SLEEP) {
- msleep_interruptible (1000);
+ msleep_interruptible (100);
} else {
- mdelay (1000);
+ mdelay (100);
}
}
if ( count == 30 ) {
- ddlprintk((MYIOC_s_INFO_FMT "downloadboot failed! Unable to RESET_ADAPTER diag0val=%x\n",
+ ddlprintk((MYIOC_s_INFO_FMT "downloadboot failed! "
+ "Unable to get MPI_DIAG_DRWE mode, diag0val=%x\n",
ioc->name, diag0val));
return -3;
}
@@ -2742,7 +2935,6 @@ mpt_downloadboot(MPT_ADAPTER *ioc, int sleepFlag)
/* Set the DiagRwEn and Disable ARM bits */
CHIPREG_WRITE32(&ioc->chip->Diagnostic, (MPI_DIAG_RW_ENABLE | MPI_DIAG_DISABLE_ARM));
- pFwHeader = (MpiFwHeader_t *) ioc->cached_fw;
fwSize = (pFwHeader->ImageSize + 3)/4;
ptrFw = (u32 *) pFwHeader;
@@ -2792,19 +2984,38 @@ mpt_downloadboot(MPT_ADAPTER *ioc, int sleepFlag)
/* Clear the internal flash bad bit - autoincrementing register,
* so must do two writes.
*/
- CHIPREG_PIO_WRITE32(&ioc->pio_chip->DiagRwAddress, 0x3F000000);
- diagRwData = CHIPREG_PIO_READ32(&ioc->pio_chip->DiagRwData);
- diagRwData |= 0x4000000;
- CHIPREG_PIO_WRITE32(&ioc->pio_chip->DiagRwAddress, 0x3F000000);
- CHIPREG_PIO_WRITE32(&ioc->pio_chip->DiagRwData, diagRwData);
+ if (ioc->bus_type == SCSI) {
+ /*
+ * 1030 and 1035 H/W errata, workaround to access
+ * the ClearFlashBadSignatureBit
+ */
+ CHIPREG_PIO_WRITE32(&ioc->pio_chip->DiagRwAddress, 0x3F000000);
+ diagRwData = CHIPREG_PIO_READ32(&ioc->pio_chip->DiagRwData);
+ diagRwData |= 0x40000000;
+ CHIPREG_PIO_WRITE32(&ioc->pio_chip->DiagRwAddress, 0x3F000000);
+ CHIPREG_PIO_WRITE32(&ioc->pio_chip->DiagRwData, diagRwData);
+
+ } else /* if((ioc->bus_type == SAS) || (ioc->bus_type == FC)) */ {
+ diag0val = CHIPREG_READ32(&ioc->chip->Diagnostic);
+ CHIPREG_WRITE32(&ioc->chip->Diagnostic, diag0val |
+ MPI_DIAG_CLEAR_FLASH_BAD_SIG);
+
+ /* wait 1 msec */
+ if (sleepFlag == CAN_SLEEP) {
+ msleep_interruptible (1);
+ } else {
+ mdelay (1);
+ }
+ }
if (ioc->errata_flag_1064)
pci_disable_io_access(ioc->pcidev);
diag0val = CHIPREG_READ32(&ioc->chip->Diagnostic);
- ddlprintk((MYIOC_s_INFO_FMT "downloadboot diag0val=%x, turning off PREVENT_IOC_BOOT, DISABLE_ARM\n",
+ ddlprintk((MYIOC_s_INFO_FMT "downloadboot diag0val=%x, "
+ "turning off PREVENT_IOC_BOOT, DISABLE_ARM, RW_ENABLE\n",
ioc->name, diag0val));
- diag0val &= ~(MPI_DIAG_PREVENT_IOC_BOOT | MPI_DIAG_DISABLE_ARM);
+ diag0val &= ~(MPI_DIAG_PREVENT_IOC_BOOT | MPI_DIAG_DISABLE_ARM | MPI_DIAG_RW_ENABLE);
ddlprintk((MYIOC_s_INFO_FMT "downloadboot now diag0val=%x\n",
ioc->name, diag0val));
CHIPREG_WRITE32(&ioc->chip->Diagnostic, diag0val);
@@ -2812,10 +3023,23 @@ mpt_downloadboot(MPT_ADAPTER *ioc, int sleepFlag)
/* Write 0xFF to reset the sequencer */
CHIPREG_WRITE32(&ioc->chip->WriteSequence, 0xFF);
+ if (ioc->bus_type == SAS) {
+ ioc_state = mpt_GetIocState(ioc, 0);
+ if ( (GetIocFacts(ioc, sleepFlag,
+ MPT_HOSTEVENT_IOC_BRINGUP)) != 0 ) {
+ ddlprintk((MYIOC_s_INFO_FMT "GetIocFacts failed: IocState=%x\n",
+ ioc->name, ioc_state));
+ return -EFAULT;
+ }
+ }
+
for (count=0; count<HZ*20; count++) {
if ((ioc_state = mpt_GetIocState(ioc, 0)) & MPI_IOC_STATE_READY) {
ddlprintk((MYIOC_s_INFO_FMT "downloadboot successful! (count=%d) IocState=%x\n",
ioc->name, count, ioc_state));
+ if (ioc->bus_type == SAS) {
+ return 0;
+ }
if ((SendIocInit(ioc, sleepFlag)) != 0) {
ddlprintk((MYIOC_s_INFO_FMT "downloadboot: SendIocInit failed\n",
ioc->name));
@@ -3049,12 +3273,13 @@ mpt_diag_reset(MPT_ADAPTER *ioc, int ignore, int sleepFlag)
/* wait 1 sec */
if (sleepFlag == CAN_SLEEP) {
- ssleep(1);
+ msleep_interruptible (1000);
} else {
mdelay (1000);
}
}
- if ((count = mpt_downloadboot(ioc, sleepFlag)) < 0) {
+ if ((count = mpt_downloadboot(ioc,
+ (MpiFwHeader_t *)ioc->cached_fw, sleepFlag)) < 0) {
printk(KERN_WARNING MYNAM
": firmware downloadboot failure (%d)!\n", count);
}
@@ -3637,7 +3862,7 @@ WaitForDoorbellAck(MPT_ADAPTER *ioc, int howlong, int sleepFlag)
int count = 0;
u32 intstat=0;
- cntdn = ((sleepFlag == CAN_SLEEP) ? HZ : 1000) * howlong;
+ cntdn = 1000 * howlong;
if (sleepFlag == CAN_SLEEP) {
while (--cntdn) {
@@ -3687,7 +3912,7 @@ WaitForDoorbellInt(MPT_ADAPTER *ioc, int howlong, int sleepFlag)
int count = 0;
u32 intstat=0;
- cntdn = ((sleepFlag == CAN_SLEEP) ? HZ : 1000) * howlong;
+ cntdn = 1000 * howlong;
if (sleepFlag == CAN_SLEEP) {
while (--cntdn) {
intstat = CHIPREG_READ32(&ioc->chip->IntStatus);
@@ -4001,6 +4226,85 @@ GetFcPortPage0(MPT_ADAPTER *ioc, int portnum)
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/*
+ * mptbase_sas_persist_operation - Perform operation on SAS Persitent Table
+ * @ioc: Pointer to MPT_ADAPTER structure
+ * @sas_address: 64bit SAS Address for operation.
+ * @target_id: specified target for operation
+ * @bus: specified bus for operation
+ * @persist_opcode: see below
+ *
+ * MPI_SAS_OP_CLEAR_NOT_PRESENT - Free all persist TargetID mappings for
+ * devices not currently present.
+ * MPI_SAS_OP_CLEAR_ALL_PERSISTENT - Clear al persist TargetID mappings
+ *
+ * NOTE: Don't use not this function during interrupt time.
+ *
+ * Returns: 0 for success, non-zero error
+ */
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+int
+mptbase_sas_persist_operation(MPT_ADAPTER *ioc, u8 persist_opcode)
+{
+ SasIoUnitControlRequest_t *sasIoUnitCntrReq;
+ SasIoUnitControlReply_t *sasIoUnitCntrReply;
+ MPT_FRAME_HDR *mf = NULL;
+ MPIHeader_t *mpi_hdr;
+
+
+ /* insure garbage is not sent to fw */
+ switch(persist_opcode) {
+
+ case MPI_SAS_OP_CLEAR_NOT_PRESENT:
+ case MPI_SAS_OP_CLEAR_ALL_PERSISTENT:
+ break;
+
+ default:
+ return -1;
+ break;
+ }
+
+ printk("%s: persist_opcode=%x\n",__FUNCTION__, persist_opcode);
+
+ /* Get a MF for this command.
+ */
+ if ((mf = mpt_get_msg_frame(mpt_base_index, ioc)) == NULL) {
+ printk("%s: no msg frames!\n",__FUNCTION__);
+ return -1;
+ }
+
+ mpi_hdr = (MPIHeader_t *) mf;
+ sasIoUnitCntrReq = (SasIoUnitControlRequest_t *)mf;
+ memset(sasIoUnitCntrReq,0,sizeof(SasIoUnitControlRequest_t));
+ sasIoUnitCntrReq->Function = MPI_FUNCTION_SAS_IO_UNIT_CONTROL;
+ sasIoUnitCntrReq->MsgContext = mpi_hdr->MsgContext;
+ sasIoUnitCntrReq->Operation = persist_opcode;
+
+ init_timer(&ioc->persist_timer);
+ ioc->persist_timer.data = (unsigned long) ioc;
+ ioc->persist_timer.function = mpt_timer_expired;
+ ioc->persist_timer.expires = jiffies + HZ*10 /* 10 sec */;
+ ioc->persist_wait_done=0;
+ add_timer(&ioc->persist_timer);
+ mpt_put_msg_frame(mpt_base_index, ioc, mf);
+ wait_event(mpt_waitq, ioc->persist_wait_done);
+
+ sasIoUnitCntrReply =
+ (SasIoUnitControlReply_t *)ioc->persist_reply_frame;
+ if (le16_to_cpu(sasIoUnitCntrReply->IOCStatus) != MPI_IOCSTATUS_SUCCESS) {
+ printk("%s: IOCStatus=0x%X IOCLogInfo=0x%X\n",
+ __FUNCTION__,
+ sasIoUnitCntrReply->IOCStatus,
+ sasIoUnitCntrReply->IOCLogInfo);
+ return -1;
+ }
+
+ printk("%s: success\n",__FUNCTION__);
+ return 0;
+}
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/*
* GetIoUnitPage2 - Retrieve BIOS version and boot order information.
* @ioc: Pointer to MPT_ADAPTER structure
*
@@ -4340,10 +4644,10 @@ mpt_findImVolumes(MPT_ADAPTER *ioc)
if (mpt_config(ioc, &cfg) != 0)
goto done_and_free;
- if ( (mem = (u8 *)ioc->spi_data.pIocPg2) == NULL ) {
+ if ( (mem = (u8 *)ioc->raid_data.pIocPg2) == NULL ) {
mem = kmalloc(iocpage2sz, GFP_ATOMIC);
if (mem) {
- ioc->spi_data.pIocPg2 = (IOCPage2_t *) mem;
+ ioc->raid_data.pIocPg2 = (IOCPage2_t *) mem;
} else {
goto done_and_free;
}
@@ -4360,7 +4664,7 @@ mpt_findImVolumes(MPT_ADAPTER *ioc)
/* At least 1 RAID Volume
*/
pIocRv = pIoc2->RaidVolume;
- ioc->spi_data.isRaid = 0;
+ ioc->raid_data.isRaid = 0;
for (jj = 0; jj < nVols; jj++, pIocRv++) {
vid = pIocRv->VolumeID;
vbus = pIocRv->VolumeBus;
@@ -4369,7 +4673,7 @@ mpt_findImVolumes(MPT_ADAPTER *ioc)
/* find the match
*/
if (vbus == 0) {
- ioc->spi_data.isRaid |= (1 << vid);
+ ioc->raid_data.isRaid |= (1 << vid);
} else {
/* Error! Always bus 0
*/
@@ -4404,8 +4708,8 @@ mpt_read_ioc_pg_3(MPT_ADAPTER *ioc)
/* Free the old page
*/
- kfree(ioc->spi_data.pIocPg3);
- ioc->spi_data.pIocPg3 = NULL;
+ kfree(ioc->raid_data.pIocPg3);
+ ioc->raid_data.pIocPg3 = NULL;
/* There is at least one physical disk.
* Read and save IOC Page 3
@@ -4442,7 +4746,7 @@ mpt_read_ioc_pg_3(MPT_ADAPTER *ioc)
mem = kmalloc(iocpage3sz, GFP_ATOMIC);
if (mem) {
memcpy(mem, (u8 *)pIoc3, iocpage3sz);
- ioc->spi_data.pIocPg3 = (IOCPage3_t *) mem;
+ ioc->raid_data.pIocPg3 = (IOCPage3_t *) mem;
}
}
@@ -5366,8 +5670,8 @@ mpt_HardResetHandler(MPT_ADAPTER *ioc, int sleepFlag)
}
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
-static char *
-EventDescriptionStr(u8 event, u32 evData0)
+static void
+EventDescriptionStr(u8 event, u32 evData0, char *evStr)
{
char *ds;
@@ -5420,8 +5724,95 @@ EventDescriptionStr(u8 event, u32 evData0)
ds = "Events(OFF) Change";
break;
case MPI_EVENT_INTEGRATED_RAID:
- ds = "Integrated Raid";
+ {
+ u8 ReasonCode = (u8)(evData0 >> 16);
+ switch (ReasonCode) {
+ case MPI_EVENT_RAID_RC_VOLUME_CREATED :
+ ds = "Integrated Raid: Volume Created";
+ break;
+ case MPI_EVENT_RAID_RC_VOLUME_DELETED :
+ ds = "Integrated Raid: Volume Deleted";
+ break;
+ case MPI_EVENT_RAID_RC_VOLUME_SETTINGS_CHANGED :
+ ds = "Integrated Raid: Volume Settings Changed";
+ break;
+ case MPI_EVENT_RAID_RC_VOLUME_STATUS_CHANGED :
+ ds = "Integrated Raid: Volume Status Changed";
+ break;
+ case MPI_EVENT_RAID_RC_VOLUME_PHYSDISK_CHANGED :
+ ds = "Integrated Raid: Volume Physdisk Changed";
+ break;
+ case MPI_EVENT_RAID_RC_PHYSDISK_CREATED :
+ ds = "Integrated Raid: Physdisk Created";
+ break;
+ case MPI_EVENT_RAID_RC_PHYSDISK_DELETED :
+ ds = "Integrated Raid: Physdisk Deleted";
+ break;
+ case MPI_EVENT_RAID_RC_PHYSDISK_SETTINGS_CHANGED :
+ ds = "Integrated Raid: Physdisk Settings Changed";
+ break;
+ case MPI_EVENT_RAID_RC_PHYSDISK_STATUS_CHANGED :
+ ds = "Integrated Raid: Physdisk Status Changed";
+ break;
+ case MPI_EVENT_RAID_RC_DOMAIN_VAL_NEEDED :
+ ds = "Integrated Raid: Domain Validation Needed";
+ break;
+ case MPI_EVENT_RAID_RC_SMART_DATA :
+ ds = "Integrated Raid; Smart Data";
+ break;
+ case MPI_EVENT_RAID_RC_REPLACE_ACTION_STARTED :
+ ds = "Integrated Raid: Replace Action Started";
+ break;
+ default:
+ ds = "Integrated Raid";
+ break;
+ }
+ break;
+ }
+ case MPI_EVENT_SCSI_DEVICE_STATUS_CHANGE:
+ ds = "SCSI Device Status Change";
+ break;
+ case MPI_EVENT_SAS_DEVICE_STATUS_CHANGE:
+ {
+ u8 ReasonCode = (u8)(evData0 >> 16);
+ switch (ReasonCode) {
+ case MPI_EVENT_SAS_DEV_STAT_RC_ADDED:
+ ds = "SAS Device Status Change: Added";
+ break;
+ case MPI_EVENT_SAS_DEV_STAT_RC_NOT_RESPONDING:
+ ds = "SAS Device Status Change: Deleted";
+ break;
+ case MPI_EVENT_SAS_DEV_STAT_RC_SMART_DATA:
+ ds = "SAS Device Status Change: SMART Data";
+ break;
+ case MPI_EVENT_SAS_DEV_STAT_RC_NO_PERSIST_ADDED:
+ ds = "SAS Device Status Change: No Persistancy Added";
+ break;
+ default:
+ ds = "SAS Device Status Change: Unknown";
+ break;
+ }
+ break;
+ }
+ case MPI_EVENT_ON_BUS_TIMER_EXPIRED:
+ ds = "Bus Timer Expired";
+ break;
+ case MPI_EVENT_QUEUE_FULL:
+ ds = "Queue Full";
+ break;
+ case MPI_EVENT_SAS_SES:
+ ds = "SAS SES Event";
+ break;
+ case MPI_EVENT_PERSISTENT_TABLE_FULL:
+ ds = "Persistent Table Full";
+ break;
+ case MPI_EVENT_SAS_PHY_LINK_STATUS:
+ ds = "SAS PHY Link Status";
+ break;
+ case MPI_EVENT_SAS_DISCOVERY_ERROR:
+ ds = "SAS Discovery Error";
break;
+
/*
* MPT base "custom" events may be added here...
*/
@@ -5429,7 +5820,7 @@ EventDescriptionStr(u8 event, u32 evData0)
ds = "Unknown";
break;
}
- return ds;
+ strcpy(evStr,ds);
}
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
@@ -5451,7 +5842,7 @@ ProcessEventNotification(MPT_ADAPTER *ioc, EventNotificationReply_t *pEventReply
int ii;
int r = 0;
int handlers = 0;
- char *evStr;
+ char evStr[100];
u8 event;
/*
@@ -5464,7 +5855,7 @@ ProcessEventNotification(MPT_ADAPTER *ioc, EventNotificationReply_t *pEventReply
evData0 = le32_to_cpu(pEventReply->Data[0]);
}
- evStr = EventDescriptionStr(event, evData0);
+ EventDescriptionStr(event, evData0, evStr);
devtprintk((MYIOC_s_INFO_FMT "MPT event (%s=%02Xh) detected!\n",
ioc->name,
evStr,
@@ -5481,20 +5872,6 @@ ProcessEventNotification(MPT_ADAPTER *ioc, EventNotificationReply_t *pEventReply
* Do general / base driver event processing
*/
switch(event) {
- case MPI_EVENT_NONE: /* 00 */
- case MPI_EVENT_LOG_DATA: /* 01 */
- case MPI_EVENT_STATE_CHANGE: /* 02 */
- case MPI_EVENT_UNIT_ATTENTION: /* 03 */
- case MPI_EVENT_IOC_BUS_RESET: /* 04 */
- case MPI_EVENT_EXT_BUS_RESET: /* 05 */
- case MPI_EVENT_RESCAN: /* 06 */
- case MPI_EVENT_LINK_STATUS_CHANGE: /* 07 */
- case MPI_EVENT_LOOP_STATE_CHANGE: /* 08 */
- case MPI_EVENT_LOGOUT: /* 09 */
- case MPI_EVENT_INTEGRATED_RAID: /* 0B */
- case MPI_EVENT_SCSI_DEVICE_STATUS_CHANGE: /* 0C */
- default:
- break;
case MPI_EVENT_EVENT_CHANGE: /* 0A */
if (evDataLen) {
u8 evState = evData0 & 0xFF;
@@ -5507,6 +5884,8 @@ ProcessEventNotification(MPT_ADAPTER *ioc, EventNotificationReply_t *pEventReply
}
}
break;
+ default:
+ break;
}
/*
@@ -5653,6 +6032,111 @@ mpt_sp_log_info(MPT_ADAPTER *ioc, u32 log_info)
printk(MYIOC_s_INFO_FMT "LogInfo(0x%08x): F/W: %s\n", ioc->name, log_info, desc);
}
+/* strings for sas loginfo */
+ static char *originator_str[] = {
+ "IOP", /* 00h */
+ "PL", /* 01h */
+ "IR" /* 02h */
+ };
+ static char *iop_code_str[] = {
+ NULL, /* 00h */
+ "Invalid SAS Address", /* 01h */
+ NULL, /* 02h */
+ "Invalid Page", /* 03h */
+ NULL, /* 04h */
+ "Task Terminated" /* 05h */
+ };
+ static char *pl_code_str[] = {
+ NULL, /* 00h */
+ "Open Failure", /* 01h */
+ "Invalid Scatter Gather List", /* 02h */
+ "Wrong Relative Offset or Frame Length", /* 03h */
+ "Frame Transfer Error", /* 04h */
+ "Transmit Frame Connected Low", /* 05h */
+ "SATA Non-NCQ RW Error Bit Set", /* 06h */
+ "SATA Read Log Receive Data Error", /* 07h */
+ "SATA NCQ Fail All Commands After Error", /* 08h */
+ "SATA Error in Receive Set Device Bit FIS", /* 09h */
+ "Receive Frame Invalid Message", /* 0Ah */
+ "Receive Context Message Valid Error", /* 0Bh */
+ "Receive Frame Current Frame Error", /* 0Ch */
+ "SATA Link Down", /* 0Dh */
+ "Discovery SATA Init W IOS", /* 0Eh */
+ "Config Invalid Page", /* 0Fh */
+ "Discovery SATA Init Timeout", /* 10h */
+ "Reset", /* 11h */
+ "Abort", /* 12h */
+ "IO Not Yet Executed", /* 13h */
+ "IO Executed", /* 14h */
+ NULL, /* 15h */
+ NULL, /* 16h */
+ NULL, /* 17h */
+ NULL, /* 18h */
+ NULL, /* 19h */
+ NULL, /* 1Ah */
+ NULL, /* 1Bh */
+ NULL, /* 1Ch */
+ NULL, /* 1Dh */
+ NULL, /* 1Eh */
+ NULL, /* 1Fh */
+ "Enclosure Management" /* 20h */
+ };
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/*
+ * mpt_sas_log_info - Log information returned from SAS IOC.
+ * @ioc: Pointer to MPT_ADAPTER structure
+ * @log_info: U32 LogInfo reply word from the IOC
+ *
+ * Refer to lsi/mpi_log_sas.h.
+ */
+static void
+mpt_sas_log_info(MPT_ADAPTER *ioc, u32 log_info)
+{
+union loginfo_type {
+ u32 loginfo;
+ struct {
+ u32 subcode:16;
+ u32 code:8;
+ u32 originator:4;
+ u32 bus_type:4;
+ }dw;
+};
+ union loginfo_type sas_loginfo;
+ char *code_desc = NULL;
+
+ sas_loginfo.loginfo = log_info;
+ if ((sas_loginfo.dw.bus_type != 3 /*SAS*/) &&
+ (sas_loginfo.dw.originator < sizeof(originator_str)/sizeof(char*)))
+ return;
+ if ((sas_loginfo.dw.originator == 0 /*IOP*/) &&
+ (sas_loginfo.dw.code < sizeof(iop_code_str)/sizeof(char*))) {
+ code_desc = iop_code_str[sas_loginfo.dw.code];
+ }else if ((sas_loginfo.dw.originator == 1 /*PL*/) &&
+ (sas_loginfo.dw.code < sizeof(pl_code_str)/sizeof(char*) )) {
+ code_desc = pl_code_str[sas_loginfo.dw.code];
+ }
+
+ if (code_desc != NULL)
+ printk(MYIOC_s_INFO_FMT
+ "LogInfo(0x%08x): Originator={%s}, Code={%s},"
+ " SubCode(0x%04x)\n",
+ ioc->name,
+ log_info,
+ originator_str[sas_loginfo.dw.originator],
+ code_desc,
+ sas_loginfo.dw.subcode);
+ else
+ printk(MYIOC_s_INFO_FMT
+ "LogInfo(0x%08x): Originator={%s}, Code=(0x%02x),"
+ " SubCode(0x%04x)\n",
+ ioc->name,
+ log_info,
+ originator_str[sas_loginfo.dw.originator],
+ sas_loginfo.dw.code,
+ sas_loginfo.dw.subcode);
+}
+
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/*
* mpt_sp_ioc_info - IOC information returned from SCSI Parallel IOC.
@@ -5814,6 +6298,7 @@ EXPORT_SYMBOL(mpt_findImVolumes);
EXPORT_SYMBOL(mpt_read_ioc_pg_3);
EXPORT_SYMBOL(mpt_alloc_fw_memory);
EXPORT_SYMBOL(mpt_free_fw_memory);
+EXPORT_SYMBOL(mptbase_sas_persist_operation);
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
diff --git a/drivers/message/fusion/mptbase.h b/drivers/message/fusion/mptbase.h
index f4827d92373..75105277e22 100644
--- a/drivers/message/fusion/mptbase.h
+++ b/drivers/message/fusion/mptbase.h
@@ -65,6 +65,7 @@
#include "lsi/mpi_fc.h" /* Fibre Channel (lowlevel) support */
#include "lsi/mpi_targ.h" /* SCSI/FCP Target protcol support */
#include "lsi/mpi_tool.h" /* Tools support */
+#include "lsi/mpi_sas.h" /* SAS support */
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
@@ -76,8 +77,8 @@
#define COPYRIGHT "Copyright (c) 1999-2005 " MODULEAUTHOR
#endif
-#define MPT_LINUX_VERSION_COMMON "3.03.02"
-#define MPT_LINUX_PACKAGE_NAME "@(#)mptlinux-3.03.02"
+#define MPT_LINUX_VERSION_COMMON "3.03.03"
+#define MPT_LINUX_PACKAGE_NAME "@(#)mptlinux-3.03.03"
#define WHAT_MAGIC_STRING "@" "(" "#" ")"
#define show_mptmod_ver(s,ver) \
@@ -423,7 +424,7 @@ typedef struct _MPT_IOCTL {
/*
* Event Structure and define
*/
-#define MPTCTL_EVENT_LOG_SIZE (0x0000000A)
+#define MPTCTL_EVENT_LOG_SIZE (0x000000032)
typedef struct _mpt_ioctl_events {
u32 event; /* Specified by define above */
u32 eventContext; /* Index or counter */
@@ -451,16 +452,13 @@ typedef struct _mpt_ioctl_events {
#define MPT_SCSICFG_ALL_IDS 0x02 /* WriteSDP1 to all IDS */
/* #define MPT_SCSICFG_BLK_NEGO 0x10 WriteSDP1 with WDTR and SDTR disabled */
-typedef struct _ScsiCfgData {
+typedef struct _SpiCfgData {
u32 PortFlags;
int *nvram; /* table of device NVRAM values */
- IOCPage2_t *pIocPg2; /* table of Raid Volumes */
- IOCPage3_t *pIocPg3; /* table of physical disks */
IOCPage4_t *pIocPg4; /* SEP devices addressing */
dma_addr_t IocPg4_dma; /* Phys Addr of IOCPage4 data */
int IocPg4Sz; /* IOCPage4 size */
u8 dvStatus[MPT_MAX_SCSI_DEVICES];
- int isRaid; /* bit field, 1 if RAID */
u8 minSyncFactor; /* 0xFF if async */
u8 maxSyncOffset; /* 0 if async */
u8 maxBusWidth; /* 0 if narrow, 1 if wide */
@@ -472,10 +470,28 @@ typedef struct _ScsiCfgData {
u8 dvScheduled; /* 1 if scheduled */
u8 forceDv; /* 1 to force DV scheduling */
u8 noQas; /* Disable QAS for this adapter */
- u8 Saf_Te; /* 1 to force all Processors as SAF-TE if Inquiry data length is too short to check for SAF-TE */
+ u8 Saf_Te; /* 1 to force all Processors as
+ * SAF-TE if Inquiry data length
+ * is too short to check for SAF-TE
+ */
u8 mpt_dv; /* command line option: enhanced=1, basic=0 */
+ u8 bus_reset; /* 1 to allow bus reset */
u8 rsvd[1];
-} ScsiCfgData;
+}SpiCfgData;
+
+typedef struct _SasCfgData {
+ u8 ptClear; /* 1 to automatically clear the
+ * persistent table.
+ * 0 to disable
+ * automatic clearing.
+ */
+}SasCfgData;
+
+typedef struct _RaidCfgData {
+ IOCPage2_t *pIocPg2; /* table of Raid Volumes */
+ IOCPage3_t *pIocPg3; /* table of physical disks */
+ int isRaid; /* bit field, 1 if RAID */
+}RaidCfgData;
/*
* Adapter Structure - pci_dev specific. Maximum: MPT_MAX_ADAPTERS
@@ -530,11 +546,16 @@ typedef struct _MPT_ADAPTER
u8 *sense_buf_pool;
dma_addr_t sense_buf_pool_dma;
u32 sense_buf_low_dma;
+ u8 *HostPageBuffer; /* SAS - host page buffer support */
+ u32 HostPageBuffer_sz;
+ dma_addr_t HostPageBuffer_dma;
int mtrr_reg;
struct pci_dev *pcidev; /* struct pci_dev pointer */
u8 __iomem *memmap; /* mmap address */
struct Scsi_Host *sh; /* Scsi Host pointer */
- ScsiCfgData spi_data; /* Scsi config. data */
+ SpiCfgData spi_data; /* Scsi config. data */
+ RaidCfgData raid_data; /* Raid config. data */
+ SasCfgData sas_data; /* Sas config. data */
MPT_IOCTL *ioctl; /* ioctl data pointer */
struct proc_dir_entry *ioc_dentry;
struct _MPT_ADAPTER *alt_ioc; /* ptr to 929 bound adapter port */
@@ -554,31 +575,35 @@ typedef struct _MPT_ADAPTER
#else
u32 mfcnt;
#endif
- u32 NB_for_64_byte_frame;
+ u32 NB_for_64_byte_frame;
u32 hs_req[MPT_MAX_FRAME_SIZE/sizeof(u32)];
u16 hs_reply[MPT_MAX_FRAME_SIZE/sizeof(u16)];
IOCFactsReply_t facts;
PortFactsReply_t pfacts[2];
FCPortPage0_t fc_port_page0[2];
+ struct timer_list persist_timer; /* persist table timer */
+ int persist_wait_done; /* persist completion flag */
+ u8 persist_reply_frame[MPT_DEFAULT_FRAME_SIZE]; /* persist reply */
LANPage0_t lan_cnfg_page0;
LANPage1_t lan_cnfg_page1;
- /*
+ /*
* Description: errata_flag_1064
* If a PCIX read occurs within 1 or 2 cycles after the chip receives
* a split completion for a read data, an internal address pointer incorrectly
* increments by 32 bytes
*/
- int errata_flag_1064;
+ int errata_flag_1064;
u8 FirstWhoInit;
u8 upload_fw; /* If set, do a fw upload */
u8 reload_fw; /* Force a FW Reload on next reset */
- u8 NBShiftFactor; /* NB Shift Factor based on Block Size (Facts) */
+ u8 NBShiftFactor; /* NB Shift Factor based on Block Size (Facts) */
u8 pad1[4];
int DoneCtx;
int TaskCtx;
int InternalCtx;
- struct list_head list;
+ struct list_head list;
struct net_device *netdev;
+ struct list_head sas_topology;
} MPT_ADAPTER;
/*
@@ -964,6 +989,7 @@ extern void mpt_alloc_fw_memory(MPT_ADAPTER *ioc, int size);
extern void mpt_free_fw_memory(MPT_ADAPTER *ioc);
extern int mpt_findImVolumes(MPT_ADAPTER *ioc);
extern int mpt_read_ioc_pg_3(MPT_ADAPTER *ioc);
+extern int mptbase_sas_persist_operation(MPT_ADAPTER *ioc, u8 persist_opcode);
/*
* Public data decl's...
diff --git a/drivers/message/fusion/mptctl.c b/drivers/message/fusion/mptctl.c
index 7577c2417e2..cb2d59d5f5a 100644
--- a/drivers/message/fusion/mptctl.c
+++ b/drivers/message/fusion/mptctl.c
@@ -1326,7 +1326,7 @@ mptctl_gettargetinfo (unsigned long arg)
*/
if (hd && hd->Targets) {
mpt_findImVolumes(ioc);
- pIoc2 = ioc->spi_data.pIocPg2;
+ pIoc2 = ioc->raid_data.pIocPg2;
for ( id = 0; id <= max_id; ) {
if ( pIoc2 && pIoc2->NumActiveVolumes ) {
if ( id == pIoc2->RaidVolume[0].VolumeID ) {
@@ -1348,7 +1348,7 @@ mptctl_gettargetinfo (unsigned long arg)
--maxWordsLeft;
goto next_id;
} else {
- pIoc3 = ioc->spi_data.pIocPg3;
+ pIoc3 = ioc->raid_data.pIocPg3;
for ( jj = 0; jj < pIoc3->NumPhysDisks; jj++ ) {
if ( pIoc3->PhysDisk[jj].PhysDiskID == id )
goto next_id;
diff --git a/drivers/message/fusion/mptfc.c b/drivers/message/fusion/mptfc.c
index 13771abea13..a628be9bbba 100644
--- a/drivers/message/fusion/mptfc.c
+++ b/drivers/message/fusion/mptfc.c
@@ -189,7 +189,7 @@ mptfc_probe(struct pci_dev *pdev, const struct pci_device_id *id)
printk(MYIOC_s_WARN_FMT
"Skipping ioc=%p because SCSI Initiator mode is NOT enabled!\n",
ioc->name, ioc);
- return -ENODEV;
+ return 0;
}
sh = scsi_host_alloc(&mptfc_driver_template, sizeof(MPT_SCSI_HOST));
diff --git a/drivers/message/fusion/mptlan.c b/drivers/message/fusion/mptlan.c
index 52794be5a95..ed3c891e388 100644
--- a/drivers/message/fusion/mptlan.c
+++ b/drivers/message/fusion/mptlan.c
@@ -312,7 +312,12 @@ static int
mpt_lan_ioc_reset(MPT_ADAPTER *ioc, int reset_phase)
{
struct net_device *dev = ioc->netdev;
- struct mpt_lan_priv *priv = netdev_priv(dev);
+ struct mpt_lan_priv *priv;
+
+ if (dev == NULL)
+ return(1);
+ else
+ priv = netdev_priv(dev);
dlprintk((KERN_INFO MYNAM ": IOC %s_reset routed to LAN driver!\n",
reset_phase==MPT_IOC_SETUP_RESET ? "setup" : (
diff --git a/drivers/message/fusion/mptsas.c b/drivers/message/fusion/mptsas.c
new file mode 100644
index 00000000000..429820e48c6
--- /dev/null
+++ b/drivers/message/fusion/mptsas.c
@@ -0,0 +1,1235 @@
+/*
+ * linux/drivers/message/fusion/mptsas.c
+ * For use with LSI Logic PCI chip/adapter(s)
+ * running LSI Logic Fusion MPT (Message Passing Technology) firmware.
+ *
+ * Copyright (c) 1999-2005 LSI Logic Corporation
+ * (mailto:mpt_linux_developer@lsil.com)
+ * Copyright (c) 2005 Dell
+ */
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/*
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; version 2 of the License.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ NO WARRANTY
+ THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
+ CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
+ LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
+ MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
+ solely responsible for determining the appropriateness of using and
+ distributing the Program and assumes all risks associated with its
+ exercise of rights under this Agreement, including but not limited to
+ the risks and costs of program errors, damage to or loss of data,
+ programs or equipment, and unavailability or interruption of operations.
+
+ DISCLAIMER OF LIABILITY
+ NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
+ DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
+ ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
+ TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+ USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
+ HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+*/
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/errno.h>
+#include <linux/sched.h>
+#include <linux/workqueue.h>
+
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_transport_sas.h>
+
+#include "mptbase.h"
+#include "mptscsih.h"
+
+
+#define my_NAME "Fusion MPT SAS Host driver"
+#define my_VERSION MPT_LINUX_VERSION_COMMON
+#define MYNAM "mptsas"
+
+MODULE_AUTHOR(MODULEAUTHOR);
+MODULE_DESCRIPTION(my_NAME);
+MODULE_LICENSE("GPL");
+
+static int mpt_pq_filter;
+module_param(mpt_pq_filter, int, 0);
+MODULE_PARM_DESC(mpt_pq_filter,
+ "Enable peripheral qualifier filter: enable=1 "
+ "(default=0)");
+
+static int mpt_pt_clear;
+module_param(mpt_pt_clear, int, 0);
+MODULE_PARM_DESC(mpt_pt_clear,
+ "Clear persistency table: enable=1 "
+ "(default=MPTSCSIH_PT_CLEAR=0)");
+
+static int mptsasDoneCtx = -1;
+static int mptsasTaskCtx = -1;
+static int mptsasInternalCtx = -1; /* Used only for internal commands */
+
+
+/*
+ * SAS topology structures
+ *
+ * The MPT Fusion firmware interface spreads information about the
+ * SAS topology over many manufacture pages, thus we need some data
+ * structure to collect it and process it for the SAS transport class.
+ */
+
+struct mptsas_devinfo {
+ u16 handle; /* unique id to address this device */
+ u8 phy_id; /* phy number of parent device */
+ u8 port_id; /* sas physical port this device
+ is assoc'd with */
+ u8 target; /* logical target id of this device */
+ u8 bus; /* logical bus number of this device */
+ u64 sas_address; /* WWN of this device,
+ SATA is assigned by HBA,expander */
+ u32 device_info; /* bitfield detailed info about this device */
+};
+
+struct mptsas_phyinfo {
+ u8 phy_id; /* phy index */
+ u8 port_id; /* port number this phy is part of */
+ u8 negotiated_link_rate; /* nego'd link rate for this phy */
+ u8 hw_link_rate; /* hardware max/min phys link rate */
+ u8 programmed_link_rate; /* programmed max/min phy link rate */
+ struct mptsas_devinfo identify; /* point to phy device info */
+ struct mptsas_devinfo attached; /* point to attached device info */
+ struct sas_rphy *rphy;
+};
+
+struct mptsas_portinfo {
+ struct list_head list;
+ u16 handle; /* unique id to address this */
+ u8 num_phys; /* number of phys */
+ struct mptsas_phyinfo *phy_info;
+};
+
+/*
+ * This is pretty ugly. We will be able to seriously clean it up
+ * once the DV code in mptscsih goes away and we can properly
+ * implement ->target_alloc.
+ */
+static int
+mptsas_slave_alloc(struct scsi_device *device)
+{
+ struct Scsi_Host *host = device->host;
+ MPT_SCSI_HOST *hd = (MPT_SCSI_HOST *)host->hostdata;
+ struct sas_rphy *rphy;
+ struct mptsas_portinfo *p;
+ VirtDevice *vdev;
+ uint target = device->id;
+ int i;
+
+ if ((vdev = hd->Targets[target]) != NULL)
+ goto out;
+
+ vdev = kmalloc(sizeof(VirtDevice), GFP_KERNEL);
+ if (!vdev) {
+ printk(MYIOC_s_ERR_FMT "slave_alloc kmalloc(%zd) FAILED!\n",
+ hd->ioc->name, sizeof(VirtDevice));
+ return -ENOMEM;
+ }
+
+ memset(vdev, 0, sizeof(VirtDevice));
+ vdev->tflags = MPT_TARGET_FLAGS_Q_YES|MPT_TARGET_FLAGS_VALID_INQUIRY;
+ vdev->ioc_id = hd->ioc->id;
+
+ rphy = dev_to_rphy(device->sdev_target->dev.parent);
+ list_for_each_entry(p, &hd->ioc->sas_topology, list) {
+ for (i = 0; i < p->num_phys; i++) {
+ if (p->phy_info[i].attached.sas_address ==
+ rphy->identify.sas_address) {
+ vdev->target_id =
+ p->phy_info[i].attached.target;
+ vdev->bus_id = p->phy_info[i].attached.bus;
+ hd->Targets[device->id] = vdev;
+ goto out;
+ }
+ }
+ }
+
+ printk("No matching SAS device found!!\n");
+ kfree(vdev);
+ return -ENODEV;
+
+ out:
+ vdev->num_luns++;
+ device->hostdata = vdev;
+ return 0;
+}
+
+static struct scsi_host_template mptsas_driver_template = {
+ .proc_name = "mptsas",
+ .proc_info = mptscsih_proc_info,
+ .name = "MPT SPI Host",
+ .info = mptscsih_info,
+ .queuecommand = mptscsih_qcmd,
+ .slave_alloc = mptsas_slave_alloc,
+ .slave_configure = mptscsih_slave_configure,
+ .slave_destroy = mptscsih_slave_destroy,
+ .change_queue_depth = mptscsih_change_queue_depth,
+ .eh_abort_handler = mptscsih_abort,
+ .eh_device_reset_handler = mptscsih_dev_reset,
+ .eh_bus_reset_handler = mptscsih_bus_reset,
+ .eh_host_reset_handler = mptscsih_host_reset,
+ .bios_param = mptscsih_bios_param,
+ .can_queue = MPT_FC_CAN_QUEUE,
+ .this_id = -1,
+ .sg_tablesize = MPT_SCSI_SG_DEPTH,
+ .max_sectors = 8192,
+ .cmd_per_lun = 7,
+ .use_clustering = ENABLE_CLUSTERING,
+};
+
+static struct sas_function_template mptsas_transport_functions = {
+};
+
+static struct scsi_transport_template *mptsas_transport_template;
+
+#ifdef SASDEBUG
+static void mptsas_print_phy_data(MPI_SAS_IO_UNIT0_PHY_DATA *phy_data)
+{
+ printk("---- IO UNIT PAGE 0 ------------\n");
+ printk("Handle=0x%X\n",
+ le16_to_cpu(phy_data->AttachedDeviceHandle));
+ printk("Controller Handle=0x%X\n",
+ le16_to_cpu(phy_data->ControllerDevHandle));
+ printk("Port=0x%X\n", phy_data->Port);
+ printk("Port Flags=0x%X\n", phy_data->PortFlags);
+ printk("PHY Flags=0x%X\n", phy_data->PhyFlags);
+ printk("Negotiated Link Rate=0x%X\n", phy_data->NegotiatedLinkRate);
+ printk("Controller PHY Device Info=0x%X\n",
+ le32_to_cpu(phy_data->ControllerPhyDeviceInfo));
+ printk("DiscoveryStatus=0x%X\n",
+ le32_to_cpu(phy_data->DiscoveryStatus));
+ printk("\n");
+}
+
+static void mptsas_print_phy_pg0(SasPhyPage0_t *pg0)
+{
+ __le64 sas_address;
+
+ memcpy(&sas_address, &pg0->SASAddress, sizeof(__le64));
+
+ printk("---- SAS PHY PAGE 0 ------------\n");
+ printk("Attached Device Handle=0x%X\n",
+ le16_to_cpu(pg0->AttachedDevHandle));
+ printk("SAS Address=0x%llX\n",
+ (unsigned long long)le64_to_cpu(sas_address));
+ printk("Attached PHY Identifier=0x%X\n", pg0->AttachedPhyIdentifier);
+ printk("Attached Device Info=0x%X\n",
+ le32_to_cpu(pg0->AttachedDeviceInfo));
+ printk("Programmed Link Rate=0x%X\n", pg0->ProgrammedLinkRate);
+ printk("Change Count=0x%X\n", pg0->ChangeCount);
+ printk("PHY Info=0x%X\n", le32_to_cpu(pg0->PhyInfo));
+ printk("\n");
+}
+
+static void mptsas_print_device_pg0(SasDevicePage0_t *pg0)
+{
+ __le64 sas_address;
+
+ memcpy(&sas_address, &pg0->SASAddress, sizeof(__le64));
+
+ printk("---- SAS DEVICE PAGE 0 ---------\n");
+ printk("Handle=0x%X\n" ,le16_to_cpu(pg0->DevHandle));
+ printk("Enclosure Handle=0x%X\n", le16_to_cpu(pg0->EnclosureHandle));
+ printk("Slot=0x%X\n", le16_to_cpu(pg0->Slot));
+ printk("SAS Address=0x%llX\n", le64_to_cpu(sas_address));
+ printk("Target ID=0x%X\n", pg0->TargetID);
+ printk("Bus=0x%X\n", pg0->Bus);
+ printk("PhyNum=0x%X\n", pg0->PhyNum);
+ printk("AccessStatus=0x%X\n", le16_to_cpu(pg0->AccessStatus));
+ printk("Device Info=0x%X\n", le32_to_cpu(pg0->DeviceInfo));
+ printk("Flags=0x%X\n", le16_to_cpu(pg0->Flags));
+ printk("Physical Port=0x%X\n", pg0->PhysicalPort);
+ printk("\n");
+}
+
+static void mptsas_print_expander_pg1(SasExpanderPage1_t *pg1)
+{
+ printk("---- SAS EXPANDER PAGE 1 ------------\n");
+
+ printk("Physical Port=0x%X\n", pg1->PhysicalPort);
+ printk("PHY Identifier=0x%X\n", pg1->Phy);
+ printk("Negotiated Link Rate=0x%X\n", pg1->NegotiatedLinkRate);
+ printk("Programmed Link Rate=0x%X\n", pg1->ProgrammedLinkRate);
+ printk("Hardware Link Rate=0x%X\n", pg1->HwLinkRate);
+ printk("Owner Device Handle=0x%X\n",
+ le16_to_cpu(pg1->OwnerDevHandle));
+ printk("Attached Device Handle=0x%X\n",
+ le16_to_cpu(pg1->AttachedDevHandle));
+}
+#else
+#define mptsas_print_phy_data(phy_data) do { } while (0)
+#define mptsas_print_phy_pg0(pg0) do { } while (0)
+#define mptsas_print_device_pg0(pg0) do { } while (0)
+#define mptsas_print_expander_pg1(pg1) do { } while (0)
+#endif
+
+static int
+mptsas_sas_io_unit_pg0(MPT_ADAPTER *ioc, struct mptsas_portinfo *port_info)
+{
+ ConfigExtendedPageHeader_t hdr;
+ CONFIGPARMS cfg;
+ SasIOUnitPage0_t *buffer;
+ dma_addr_t dma_handle;
+ int error, i;
+
+ hdr.PageVersion = MPI_SASIOUNITPAGE0_PAGEVERSION;
+ hdr.ExtPageLength = 0;
+ hdr.PageNumber = 0;
+ hdr.Reserved1 = 0;
+ hdr.Reserved2 = 0;
+ hdr.PageType = MPI_CONFIG_PAGETYPE_EXTENDED;
+ hdr.ExtPageType = MPI_CONFIG_EXTPAGETYPE_SAS_IO_UNIT;
+
+ cfg.cfghdr.ehdr = &hdr;
+ cfg.physAddr = -1;
+ cfg.pageAddr = 0;
+ cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER;
+ cfg.dir = 0; /* read */
+ cfg.timeout = 10;
+
+ error = mpt_config(ioc, &cfg);
+ if (error)
+ goto out;
+ if (!hdr.ExtPageLength) {
+ error = -ENXIO;
+ goto out;
+ }
+
+ buffer = pci_alloc_consistent(ioc->pcidev, hdr.ExtPageLength * 4,
+ &dma_handle);
+ if (!buffer) {
+ error = -ENOMEM;
+ goto out;
+ }
+
+ cfg.physAddr = dma_handle;
+ cfg.action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT;
+
+ error = mpt_config(ioc, &cfg);
+ if (error)
+ goto out_free_consistent;
+
+ port_info->num_phys = buffer->NumPhys;
+ port_info->phy_info = kcalloc(port_info->num_phys,
+ sizeof(struct mptsas_phyinfo),GFP_KERNEL);
+ if (!port_info->phy_info) {
+ error = -ENOMEM;
+ goto out_free_consistent;
+ }
+
+ for (i = 0; i < port_info->num_phys; i++) {
+ mptsas_print_phy_data(&buffer->PhyData[i]);
+ port_info->phy_info[i].phy_id = i;
+ port_info->phy_info[i].port_id =
+ buffer->PhyData[i].Port;
+ port_info->phy_info[i].negotiated_link_rate =
+ buffer->PhyData[i].NegotiatedLinkRate;
+ }
+
+ out_free_consistent:
+ pci_free_consistent(ioc->pcidev, hdr.ExtPageLength * 4,
+ buffer, dma_handle);
+ out:
+ return error;
+}
+
+static int
+mptsas_sas_phy_pg0(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info,
+ u32 form, u32 form_specific)
+{
+ ConfigExtendedPageHeader_t hdr;
+ CONFIGPARMS cfg;
+ SasPhyPage0_t *buffer;
+ dma_addr_t dma_handle;
+ int error;
+
+ hdr.PageVersion = MPI_SASPHY0_PAGEVERSION;
+ hdr.ExtPageLength = 0;
+ hdr.PageNumber = 0;
+ hdr.Reserved1 = 0;
+ hdr.Reserved2 = 0;
+ hdr.PageType = MPI_CONFIG_PAGETYPE_EXTENDED;
+ hdr.ExtPageType = MPI_CONFIG_EXTPAGETYPE_SAS_PHY;
+
+ cfg.cfghdr.ehdr = &hdr;
+ cfg.dir = 0; /* read */
+ cfg.timeout = 10;
+
+ /* Get Phy Pg 0 for each Phy. */
+ cfg.physAddr = -1;
+ cfg.pageAddr = form + form_specific;
+ cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER;
+
+ error = mpt_config(ioc, &cfg);
+ if (error)
+ goto out;
+
+ if (!hdr.ExtPageLength) {
+ error = -ENXIO;
+ goto out;
+ }
+
+ buffer = pci_alloc_consistent(ioc->pcidev, hdr.ExtPageLength * 4,
+ &dma_handle);
+ if (!buffer) {
+ error = -ENOMEM;
+ goto out;
+ }
+
+ cfg.physAddr = dma_handle;
+ cfg.action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT;
+
+ error = mpt_config(ioc, &cfg);
+ if (error)
+ goto out_free_consistent;
+
+ mptsas_print_phy_pg0(buffer);
+
+ phy_info->hw_link_rate = buffer->HwLinkRate;
+ phy_info->programmed_link_rate = buffer->ProgrammedLinkRate;
+ phy_info->identify.handle = le16_to_cpu(buffer->OwnerDevHandle);
+ phy_info->attached.handle = le16_to_cpu(buffer->AttachedDevHandle);
+
+ out_free_consistent:
+ pci_free_consistent(ioc->pcidev, hdr.ExtPageLength * 4,
+ buffer, dma_handle);
+ out:
+ return error;
+}
+
+static int
+mptsas_sas_device_pg0(MPT_ADAPTER *ioc, struct mptsas_devinfo *device_info,
+ u32 form, u32 form_specific)
+{
+ ConfigExtendedPageHeader_t hdr;
+ CONFIGPARMS cfg;
+ SasDevicePage0_t *buffer;
+ dma_addr_t dma_handle;
+ __le64 sas_address;
+ int error;
+
+ hdr.PageVersion = MPI_SASDEVICE0_PAGEVERSION;
+ hdr.ExtPageLength = 0;
+ hdr.PageNumber = 0;
+ hdr.Reserved1 = 0;
+ hdr.Reserved2 = 0;
+ hdr.PageType = MPI_CONFIG_PAGETYPE_EXTENDED;
+ hdr.ExtPageType = MPI_CONFIG_EXTPAGETYPE_SAS_DEVICE;
+
+ cfg.cfghdr.ehdr = &hdr;
+ cfg.pageAddr = form + form_specific;
+ cfg.physAddr = -1;
+ cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER;
+ cfg.dir = 0; /* read */
+ cfg.timeout = 10;
+
+ error = mpt_config(ioc, &cfg);
+ if (error)
+ goto out;
+ if (!hdr.ExtPageLength) {
+ error = -ENXIO;
+ goto out;
+ }
+
+ buffer = pci_alloc_consistent(ioc->pcidev, hdr.ExtPageLength * 4,
+ &dma_handle);
+ if (!buffer) {
+ error = -ENOMEM;
+ goto out;
+ }
+
+ cfg.physAddr = dma_handle;
+ cfg.action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT;
+
+ error = mpt_config(ioc, &cfg);
+ if (error)
+ goto out_free_consistent;
+
+ mptsas_print_device_pg0(buffer);
+
+ device_info->handle = le16_to_cpu(buffer->DevHandle);
+ device_info->phy_id = buffer->PhyNum;
+ device_info->port_id = buffer->PhysicalPort;
+ device_info->target = buffer->TargetID;
+ device_info->bus = buffer->Bus;
+ memcpy(&sas_address, &buffer->SASAddress, sizeof(__le64));
+ device_info->sas_address = le64_to_cpu(sas_address);
+ device_info->device_info =
+ le32_to_cpu(buffer->DeviceInfo);
+
+ out_free_consistent:
+ pci_free_consistent(ioc->pcidev, hdr.ExtPageLength * 4,
+ buffer, dma_handle);
+ out:
+ return error;
+}
+
+static int
+mptsas_sas_expander_pg0(MPT_ADAPTER *ioc, struct mptsas_portinfo *port_info,
+ u32 form, u32 form_specific)
+{
+ ConfigExtendedPageHeader_t hdr;
+ CONFIGPARMS cfg;
+ SasExpanderPage0_t *buffer;
+ dma_addr_t dma_handle;
+ int error;
+
+ hdr.PageVersion = MPI_SASEXPANDER0_PAGEVERSION;
+ hdr.ExtPageLength = 0;
+ hdr.PageNumber = 0;
+ hdr.Reserved1 = 0;
+ hdr.Reserved2 = 0;
+ hdr.PageType = MPI_CONFIG_PAGETYPE_EXTENDED;
+ hdr.ExtPageType = MPI_CONFIG_EXTPAGETYPE_SAS_EXPANDER;
+
+ cfg.cfghdr.ehdr = &hdr;
+ cfg.physAddr = -1;
+ cfg.pageAddr = form + form_specific;
+ cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER;
+ cfg.dir = 0; /* read */
+ cfg.timeout = 10;
+
+ error = mpt_config(ioc, &cfg);
+ if (error)
+ goto out;
+
+ if (!hdr.ExtPageLength) {
+ error = -ENXIO;
+ goto out;
+ }
+
+ buffer = pci_alloc_consistent(ioc->pcidev, hdr.ExtPageLength * 4,
+ &dma_handle);
+ if (!buffer) {
+ error = -ENOMEM;
+ goto out;
+ }
+
+ cfg.physAddr = dma_handle;
+ cfg.action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT;
+
+ error = mpt_config(ioc, &cfg);
+ if (error)
+ goto out_free_consistent;
+
+ /* save config data */
+ port_info->num_phys = buffer->NumPhys;
+ port_info->handle = le16_to_cpu(buffer->DevHandle);
+ port_info->phy_info = kcalloc(port_info->num_phys,
+ sizeof(struct mptsas_phyinfo),GFP_KERNEL);
+ if (!port_info->phy_info) {
+ error = -ENOMEM;
+ goto out_free_consistent;
+ }
+
+ out_free_consistent:
+ pci_free_consistent(ioc->pcidev, hdr.ExtPageLength * 4,
+ buffer, dma_handle);
+ out:
+ return error;
+}
+
+static int
+mptsas_sas_expander_pg1(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info,
+ u32 form, u32 form_specific)
+{
+ ConfigExtendedPageHeader_t hdr;
+ CONFIGPARMS cfg;
+ SasExpanderPage1_t *buffer;
+ dma_addr_t dma_handle;
+ int error;
+
+ hdr.PageVersion = MPI_SASEXPANDER0_PAGEVERSION;
+ hdr.ExtPageLength = 0;
+ hdr.PageNumber = 1;
+ hdr.Reserved1 = 0;
+ hdr.Reserved2 = 0;
+ hdr.PageType = MPI_CONFIG_PAGETYPE_EXTENDED;
+ hdr.ExtPageType = MPI_CONFIG_EXTPAGETYPE_SAS_EXPANDER;
+
+ cfg.cfghdr.ehdr = &hdr;
+ cfg.physAddr = -1;
+ cfg.pageAddr = form + form_specific;
+ cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER;
+ cfg.dir = 0; /* read */
+ cfg.timeout = 10;
+
+ error = mpt_config(ioc, &cfg);
+ if (error)
+ goto out;
+
+ if (!hdr.ExtPageLength) {
+ error = -ENXIO;
+ goto out;
+ }
+
+ buffer = pci_alloc_consistent(ioc->pcidev, hdr.ExtPageLength * 4,
+ &dma_handle);
+ if (!buffer) {
+ error = -ENOMEM;
+ goto out;
+ }
+
+ cfg.physAddr = dma_handle;
+ cfg.action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT;
+
+ error = mpt_config(ioc, &cfg);
+ if (error)
+ goto out_free_consistent;
+
+
+ mptsas_print_expander_pg1(buffer);
+
+ /* save config data */
+ phy_info->phy_id = buffer->Phy;
+ phy_info->port_id = buffer->PhysicalPort;
+ phy_info->negotiated_link_rate = buffer->NegotiatedLinkRate;
+ phy_info->programmed_link_rate = buffer->ProgrammedLinkRate;
+ phy_info->hw_link_rate = buffer->HwLinkRate;
+ phy_info->identify.handle = le16_to_cpu(buffer->OwnerDevHandle);
+ phy_info->attached.handle = le16_to_cpu(buffer->AttachedDevHandle);
+
+
+ out_free_consistent:
+ pci_free_consistent(ioc->pcidev, hdr.ExtPageLength * 4,
+ buffer, dma_handle);
+ out:
+ return error;
+}
+
+static void
+mptsas_parse_device_info(struct sas_identify *identify,
+ struct mptsas_devinfo *device_info)
+{
+ u16 protocols;
+
+ identify->sas_address = device_info->sas_address;
+ identify->phy_identifier = device_info->phy_id;
+
+ /*
+ * Fill in Phy Initiator Port Protocol.
+ * Bits 6:3, more than one bit can be set, fall through cases.
+ */
+ protocols = device_info->device_info & 0x78;
+ identify->initiator_port_protocols = 0;
+ if (protocols & MPI_SAS_DEVICE_INFO_SSP_INITIATOR)
+ identify->initiator_port_protocols |= SAS_PROTOCOL_SSP;
+ if (protocols & MPI_SAS_DEVICE_INFO_STP_INITIATOR)
+ identify->initiator_port_protocols |= SAS_PROTOCOL_STP;
+ if (protocols & MPI_SAS_DEVICE_INFO_SMP_INITIATOR)
+ identify->initiator_port_protocols |= SAS_PROTOCOL_SMP;
+ if (protocols & MPI_SAS_DEVICE_INFO_SATA_HOST)
+ identify->initiator_port_protocols |= SAS_PROTOCOL_SATA;
+
+ /*
+ * Fill in Phy Target Port Protocol.
+ * Bits 10:7, more than one bit can be set, fall through cases.
+ */
+ protocols = device_info->device_info & 0x780;
+ identify->target_port_protocols = 0;
+ if (protocols & MPI_SAS_DEVICE_INFO_SSP_TARGET)
+ identify->target_port_protocols |= SAS_PROTOCOL_SSP;
+ if (protocols & MPI_SAS_DEVICE_INFO_STP_TARGET)
+ identify->target_port_protocols |= SAS_PROTOCOL_STP;
+ if (protocols & MPI_SAS_DEVICE_INFO_SMP_TARGET)
+ identify->target_port_protocols |= SAS_PROTOCOL_SMP;
+ if (protocols & MPI_SAS_DEVICE_INFO_SATA_DEVICE)
+ identify->target_port_protocols |= SAS_PROTOCOL_SATA;
+
+ /*
+ * Fill in Attached device type.
+ */
+ switch (device_info->device_info &
+ MPI_SAS_DEVICE_INFO_MASK_DEVICE_TYPE) {
+ case MPI_SAS_DEVICE_INFO_NO_DEVICE:
+ identify->device_type = SAS_PHY_UNUSED;
+ break;
+ case MPI_SAS_DEVICE_INFO_END_DEVICE:
+ identify->device_type = SAS_END_DEVICE;
+ break;
+ case MPI_SAS_DEVICE_INFO_EDGE_EXPANDER:
+ identify->device_type = SAS_EDGE_EXPANDER_DEVICE;
+ break;
+ case MPI_SAS_DEVICE_INFO_FANOUT_EXPANDER:
+ identify->device_type = SAS_FANOUT_EXPANDER_DEVICE;
+ break;
+ }
+}
+
+static int mptsas_probe_one_phy(struct device *dev,
+ struct mptsas_phyinfo *phy_info, int index)
+{
+ struct sas_phy *port;
+ int error;
+
+ port = sas_phy_alloc(dev, index);
+ if (!port)
+ return -ENOMEM;
+
+ port->port_identifier = phy_info->port_id;
+ mptsas_parse_device_info(&port->identify, &phy_info->identify);
+
+ /*
+ * Set Negotiated link rate.
+ */
+ switch (phy_info->negotiated_link_rate) {
+ case MPI_SAS_IOUNIT0_RATE_PHY_DISABLED:
+ port->negotiated_linkrate = SAS_PHY_DISABLED;
+ break;
+ case MPI_SAS_IOUNIT0_RATE_FAILED_SPEED_NEGOTIATION:
+ port->negotiated_linkrate = SAS_LINK_RATE_FAILED;
+ break;
+ case MPI_SAS_IOUNIT0_RATE_1_5:
+ port->negotiated_linkrate = SAS_LINK_RATE_1_5_GBPS;
+ break;
+ case MPI_SAS_IOUNIT0_RATE_3_0:
+ port->negotiated_linkrate = SAS_LINK_RATE_3_0_GBPS;
+ break;
+ case MPI_SAS_IOUNIT0_RATE_SATA_OOB_COMPLETE:
+ case MPI_SAS_IOUNIT0_RATE_UNKNOWN:
+ default:
+ port->negotiated_linkrate = SAS_LINK_RATE_UNKNOWN;
+ break;
+ }
+
+ /*
+ * Set Max hardware link rate.
+ */
+ switch (phy_info->hw_link_rate & MPI_SAS_PHY0_PRATE_MAX_RATE_MASK) {
+ case MPI_SAS_PHY0_HWRATE_MAX_RATE_1_5:
+ port->maximum_linkrate_hw = SAS_LINK_RATE_1_5_GBPS;
+ break;
+ case MPI_SAS_PHY0_PRATE_MAX_RATE_3_0:
+ port->maximum_linkrate_hw = SAS_LINK_RATE_3_0_GBPS;
+ break;
+ default:
+ break;
+ }
+
+ /*
+ * Set Max programmed link rate.
+ */
+ switch (phy_info->programmed_link_rate &
+ MPI_SAS_PHY0_PRATE_MAX_RATE_MASK) {
+ case MPI_SAS_PHY0_PRATE_MAX_RATE_1_5:
+ port->maximum_linkrate = SAS_LINK_RATE_1_5_GBPS;
+ break;
+ case MPI_SAS_PHY0_PRATE_MAX_RATE_3_0:
+ port->maximum_linkrate = SAS_LINK_RATE_3_0_GBPS;
+ break;
+ default:
+ break;
+ }
+
+ /*
+ * Set Min hardware link rate.
+ */
+ switch (phy_info->hw_link_rate & MPI_SAS_PHY0_HWRATE_MIN_RATE_MASK) {
+ case MPI_SAS_PHY0_HWRATE_MIN_RATE_1_5:
+ port->minimum_linkrate_hw = SAS_LINK_RATE_1_5_GBPS;
+ break;
+ case MPI_SAS_PHY0_PRATE_MIN_RATE_3_0:
+ port->minimum_linkrate_hw = SAS_LINK_RATE_3_0_GBPS;
+ break;
+ default:
+ break;
+ }
+
+ /*
+ * Set Min programmed link rate.
+ */
+ switch (phy_info->programmed_link_rate &
+ MPI_SAS_PHY0_PRATE_MIN_RATE_MASK) {
+ case MPI_SAS_PHY0_PRATE_MIN_RATE_1_5:
+ port->minimum_linkrate = SAS_LINK_RATE_1_5_GBPS;
+ break;
+ case MPI_SAS_PHY0_PRATE_MIN_RATE_3_0:
+ port->minimum_linkrate = SAS_LINK_RATE_3_0_GBPS;
+ break;
+ default:
+ break;
+ }
+
+ error = sas_phy_add(port);
+ if (error) {
+ sas_phy_free(port);
+ return error;
+ }
+
+ if (phy_info->attached.handle) {
+ struct sas_rphy *rphy;
+
+ rphy = sas_rphy_alloc(port);
+ if (!rphy)
+ return 0; /* non-fatal: an rphy can be added later */
+
+ mptsas_parse_device_info(&rphy->identify, &phy_info->attached);
+ error = sas_rphy_add(rphy);
+ if (error) {
+ sas_rphy_free(rphy);
+ return error;
+ }
+
+ phy_info->rphy = rphy;
+ }
+
+ return 0;
+}
+
+static int
+mptsas_probe_hba_phys(MPT_ADAPTER *ioc, int *index)
+{
+ struct mptsas_portinfo *port_info;
+ u32 handle = 0xFFFF;
+ int error = -ENOMEM, i;
+
+ port_info = kmalloc(sizeof(*port_info), GFP_KERNEL);
+ if (!port_info)
+ goto out;
+ memset(port_info, 0, sizeof(*port_info));
+
+ error = mptsas_sas_io_unit_pg0(ioc, port_info);
+ if (error)
+ goto out_free_port_info;
+
+ list_add_tail(&port_info->list, &ioc->sas_topology);
+
+ for (i = 0; i < port_info->num_phys; i++) {
+ mptsas_sas_phy_pg0(ioc, &port_info->phy_info[i],
+ (MPI_SAS_PHY_PGAD_FORM_PHY_NUMBER <<
+ MPI_SAS_PHY_PGAD_FORM_SHIFT), i);
+
+ mptsas_sas_device_pg0(ioc, &port_info->phy_info[i].identify,
+ (MPI_SAS_DEVICE_PGAD_FORM_GET_NEXT_HANDLE <<
+ MPI_SAS_DEVICE_PGAD_FORM_SHIFT), handle);
+ handle = port_info->phy_info[i].identify.handle;
+
+ if (port_info->phy_info[i].attached.handle) {
+ mptsas_sas_device_pg0(ioc,
+ &port_info->phy_info[i].attached,
+ (MPI_SAS_DEVICE_PGAD_FORM_HANDLE <<
+ MPI_SAS_DEVICE_PGAD_FORM_SHIFT),
+ port_info->phy_info[i].attached.handle);
+ }
+
+ mptsas_probe_one_phy(&ioc->sh->shost_gendev,
+ &port_info->phy_info[i], *index);
+ (*index)++;
+ }
+
+ return 0;
+
+ out_free_port_info:
+ kfree(port_info);
+ out:
+ return error;
+}
+
+static int
+mptsas_probe_expander_phys(MPT_ADAPTER *ioc, u32 *handle, int *index)
+{
+ struct mptsas_portinfo *port_info, *p;
+ int error = -ENOMEM, i, j;
+
+ port_info = kmalloc(sizeof(*port_info), GFP_KERNEL);
+ if (!port_info)
+ goto out;
+ memset(port_info, 0, sizeof(*port_info));
+
+ error = mptsas_sas_expander_pg0(ioc, port_info,
+ (MPI_SAS_EXPAND_PGAD_FORM_GET_NEXT_HANDLE <<
+ MPI_SAS_EXPAND_PGAD_FORM_SHIFT), *handle);
+ if (error)
+ goto out_free_port_info;
+
+ *handle = port_info->handle;
+
+ list_add_tail(&port_info->list, &ioc->sas_topology);
+ for (i = 0; i < port_info->num_phys; i++) {
+ struct device *parent;
+
+ mptsas_sas_expander_pg1(ioc, &port_info->phy_info[i],
+ (MPI_SAS_EXPAND_PGAD_FORM_HANDLE_PHY_NUM <<
+ MPI_SAS_EXPAND_PGAD_FORM_SHIFT), (i << 16) + *handle);
+
+ if (port_info->phy_info[i].identify.handle) {
+ mptsas_sas_device_pg0(ioc,
+ &port_info->phy_info[i].identify,
+ (MPI_SAS_DEVICE_PGAD_FORM_HANDLE <<
+ MPI_SAS_DEVICE_PGAD_FORM_SHIFT),
+ port_info->phy_info[i].identify.handle);
+ }
+
+ if (port_info->phy_info[i].attached.handle) {
+ mptsas_sas_device_pg0(ioc,
+ &port_info->phy_info[i].attached,
+ (MPI_SAS_DEVICE_PGAD_FORM_HANDLE <<
+ MPI_SAS_DEVICE_PGAD_FORM_SHIFT),
+ port_info->phy_info[i].attached.handle);
+ }
+
+ /*
+ * If we find a parent port handle this expander is
+ * attached to another expander, else it hangs of the
+ * HBA phys.
+ */
+ parent = &ioc->sh->shost_gendev;
+ list_for_each_entry(p, &ioc->sas_topology, list) {
+ for (j = 0; j < p->num_phys; j++) {
+ if (port_info->phy_info[i].identify.handle ==
+ p->phy_info[j].attached.handle)
+ parent = &p->phy_info[j].rphy->dev;
+ }
+ }
+
+ mptsas_probe_one_phy(parent, &port_info->phy_info[i], *index);
+ (*index)++;
+ }
+
+ return 0;
+
+ out_free_port_info:
+ kfree(port_info);
+ out:
+ return error;
+}
+
+static void
+mptsas_scan_sas_topology(MPT_ADAPTER *ioc)
+{
+ u32 handle = 0xFFFF;
+ int index = 0;
+
+ mptsas_probe_hba_phys(ioc, &index);
+ while (!mptsas_probe_expander_phys(ioc, &handle, &index))
+ ;
+}
+
+static int
+mptsas_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+ struct Scsi_Host *sh;
+ MPT_SCSI_HOST *hd;
+ MPT_ADAPTER *ioc;
+ unsigned long flags;
+ int sz, ii;
+ int numSGE = 0;
+ int scale;
+ int ioc_cap;
+ u8 *mem;
+ int error=0;
+ int r;
+
+ r = mpt_attach(pdev,id);
+ if (r)
+ return r;
+
+ ioc = pci_get_drvdata(pdev);
+ ioc->DoneCtx = mptsasDoneCtx;
+ ioc->TaskCtx = mptsasTaskCtx;
+ ioc->InternalCtx = mptsasInternalCtx;
+
+ /* Added sanity check on readiness of the MPT adapter.
+ */
+ if (ioc->last_state != MPI_IOC_STATE_OPERATIONAL) {
+ printk(MYIOC_s_WARN_FMT
+ "Skipping because it's not operational!\n",
+ ioc->name);
+ return -ENODEV;
+ }
+
+ if (!ioc->active) {
+ printk(MYIOC_s_WARN_FMT "Skipping because it's disabled!\n",
+ ioc->name);
+ return -ENODEV;
+ }
+
+ /* Sanity check - ensure at least 1 port is INITIATOR capable
+ */
+ ioc_cap = 0;
+ for (ii = 0; ii < ioc->facts.NumberOfPorts; ii++) {
+ if (ioc->pfacts[ii].ProtocolFlags &
+ MPI_PORTFACTS_PROTOCOL_INITIATOR)
+ ioc_cap++;
+ }
+
+ if (!ioc_cap) {
+ printk(MYIOC_s_WARN_FMT
+ "Skipping ioc=%p because SCSI Initiator mode "
+ "is NOT enabled!\n", ioc->name, ioc);
+ return 0;
+ }
+
+ sh = scsi_host_alloc(&mptsas_driver_template, sizeof(MPT_SCSI_HOST));
+ if (!sh) {
+ printk(MYIOC_s_WARN_FMT
+ "Unable to register controller with SCSI subsystem\n",
+ ioc->name);
+ return -1;
+ }
+
+ spin_lock_irqsave(&ioc->FreeQlock, flags);
+
+ /* Attach the SCSI Host to the IOC structure
+ */
+ ioc->sh = sh;
+
+ sh->io_port = 0;
+ sh->n_io_port = 0;
+ sh->irq = 0;
+
+ /* set 16 byte cdb's */
+ sh->max_cmd_len = 16;
+
+ sh->max_id = ioc->pfacts->MaxDevices + 1;
+
+ sh->transportt = mptsas_transport_template;
+
+ sh->max_lun = MPT_LAST_LUN + 1;
+ sh->max_channel = 0;
+ sh->this_id = ioc->pfacts[0].PortSCSIID;
+
+ /* Required entry.
+ */
+ sh->unique_id = ioc->id;
+
+ INIT_LIST_HEAD(&ioc->sas_topology);
+
+ /* Verify that we won't exceed the maximum
+ * number of chain buffers
+ * We can optimize: ZZ = req_sz/sizeof(SGE)
+ * For 32bit SGE's:
+ * numSGE = 1 + (ZZ-1)*(maxChain -1) + ZZ
+ * + (req_sz - 64)/sizeof(SGE)
+ * A slightly different algorithm is required for
+ * 64bit SGEs.
+ */
+ scale = ioc->req_sz/(sizeof(dma_addr_t) + sizeof(u32));
+ if (sizeof(dma_addr_t) == sizeof(u64)) {
+ numSGE = (scale - 1) *
+ (ioc->facts.MaxChainDepth-1) + scale +
+ (ioc->req_sz - 60) / (sizeof(dma_addr_t) +
+ sizeof(u32));
+ } else {
+ numSGE = 1 + (scale - 1) *
+ (ioc->facts.MaxChainDepth-1) + scale +
+ (ioc->req_sz - 64) / (sizeof(dma_addr_t) +
+ sizeof(u32));
+ }
+
+ if (numSGE < sh->sg_tablesize) {
+ /* Reset this value */
+ dprintk((MYIOC_s_INFO_FMT
+ "Resetting sg_tablesize to %d from %d\n",
+ ioc->name, numSGE, sh->sg_tablesize));
+ sh->sg_tablesize = numSGE;
+ }
+
+ spin_unlock_irqrestore(&ioc->FreeQlock, flags);
+
+ hd = (MPT_SCSI_HOST *) sh->hostdata;
+ hd->ioc = ioc;
+
+ /* SCSI needs scsi_cmnd lookup table!
+ * (with size equal to req_depth*PtrSz!)
+ */
+ sz = ioc->req_depth * sizeof(void *);
+ mem = kmalloc(sz, GFP_ATOMIC);
+ if (mem == NULL) {
+ error = -ENOMEM;
+ goto mptsas_probe_failed;
+ }
+
+ memset(mem, 0, sz);
+ hd->ScsiLookup = (struct scsi_cmnd **) mem;
+
+ dprintk((MYIOC_s_INFO_FMT "ScsiLookup @ %p, sz=%d\n",
+ ioc->name, hd->ScsiLookup, sz));
+
+ /* Allocate memory for the device structures.
+ * A non-Null pointer at an offset
+ * indicates a device exists.
+ * max_id = 1 + maximum id (hosts.h)
+ */
+ sz = sh->max_id * sizeof(void *);
+ mem = kmalloc(sz, GFP_ATOMIC);
+ if (mem == NULL) {
+ error = -ENOMEM;
+ goto mptsas_probe_failed;
+ }
+
+ memset(mem, 0, sz);
+ hd->Targets = (VirtDevice **) mem;
+
+ dprintk((KERN_INFO
+ " Targets @ %p, sz=%d\n", hd->Targets, sz));
+
+ /* Clear the TM flags
+ */
+ hd->tmPending = 0;
+ hd->tmState = TM_STATE_NONE;
+ hd->resetPending = 0;
+ hd->abortSCpnt = NULL;
+
+ /* Clear the pointer used to store
+ * single-threaded commands, i.e., those
+ * issued during a bus scan, dv and
+ * configuration pages.
+ */
+ hd->cmdPtr = NULL;
+
+ /* Initialize this SCSI Hosts' timers
+ * To use, set the timer expires field
+ * and add_timer
+ */
+ init_timer(&hd->timer);
+ hd->timer.data = (unsigned long) hd;
+ hd->timer.function = mptscsih_timer_expired;
+
+ hd->mpt_pq_filter = mpt_pq_filter;
+ ioc->sas_data.ptClear = mpt_pt_clear;
+
+ if (ioc->sas_data.ptClear==1) {
+ mptbase_sas_persist_operation(
+ ioc, MPI_SAS_OP_CLEAR_ALL_PERSISTENT);
+ }
+
+ ddvprintk((MYIOC_s_INFO_FMT
+ "mpt_pq_filter %x mpt_pq_filter %x\n",
+ ioc->name,
+ mpt_pq_filter,
+ mpt_pq_filter));
+
+ init_waitqueue_head(&hd->scandv_waitq);
+ hd->scandv_wait_done = 0;
+ hd->last_queue_full = 0;
+
+ error = scsi_add_host(sh, &ioc->pcidev->dev);
+ if (error) {
+ dprintk((KERN_ERR MYNAM
+ "scsi_add_host failed\n"));
+ goto mptsas_probe_failed;
+ }
+
+ mptsas_scan_sas_topology(ioc);
+
+ return 0;
+
+mptsas_probe_failed:
+
+ mptscsih_remove(pdev);
+ return error;
+}
+
+static void __devexit mptsas_remove(struct pci_dev *pdev)
+{
+ MPT_ADAPTER *ioc = pci_get_drvdata(pdev);
+ struct mptsas_portinfo *p, *n;
+
+ sas_remove_host(ioc->sh);
+
+ list_for_each_entry_safe(p, n, &ioc->sas_topology, list) {
+ list_del(&p->list);
+ kfree(p);
+ }
+
+ mptscsih_remove(pdev);
+}
+
+static struct pci_device_id mptsas_pci_table[] = {
+ { PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS1064,
+ PCI_ANY_ID, PCI_ANY_ID },
+ { PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS1066,
+ PCI_ANY_ID, PCI_ANY_ID },
+ { PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS1068,
+ PCI_ANY_ID, PCI_ANY_ID },
+ { PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS1064E,
+ PCI_ANY_ID, PCI_ANY_ID },
+ { PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS1066E,
+ PCI_ANY_ID, PCI_ANY_ID },
+ { PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS1068E,
+ PCI_ANY_ID, PCI_ANY_ID },
+ {0} /* Terminating entry */
+};
+MODULE_DEVICE_TABLE(pci, mptsas_pci_table);
+
+
+static struct pci_driver mptsas_driver = {
+ .name = "mptsas",
+ .id_table = mptsas_pci_table,
+ .probe = mptsas_probe,
+ .remove = __devexit_p(mptsas_remove),
+ .shutdown = mptscsih_shutdown,
+#ifdef CONFIG_PM
+ .suspend = mptscsih_suspend,
+ .resume = mptscsih_resume,
+#endif
+};
+
+static int __init
+mptsas_init(void)
+{
+ show_mptmod_ver(my_NAME, my_VERSION);
+
+ mptsas_transport_template =
+ sas_attach_transport(&mptsas_transport_functions);
+ if (!mptsas_transport_template)
+ return -ENODEV;
+
+ mptsasDoneCtx = mpt_register(mptscsih_io_done, MPTSAS_DRIVER);
+ mptsasTaskCtx = mpt_register(mptscsih_taskmgmt_complete, MPTSAS_DRIVER);
+ mptsasInternalCtx =
+ mpt_register(mptscsih_scandv_complete, MPTSAS_DRIVER);
+
+ if (mpt_event_register(mptsasDoneCtx, mptscsih_event_process) == 0) {
+ devtprintk((KERN_INFO MYNAM
+ ": Registered for IOC event notifications\n"));
+ }
+
+ if (mpt_reset_register(mptsasDoneCtx, mptscsih_ioc_reset) == 0) {
+ dprintk((KERN_INFO MYNAM
+ ": Registered for IOC reset notifications\n"));
+ }
+
+ return pci_register_driver(&mptsas_driver);
+}
+
+static void __exit
+mptsas_exit(void)
+{
+ pci_unregister_driver(&mptsas_driver);
+ sas_release_transport(mptsas_transport_template);
+
+ mpt_reset_deregister(mptsasDoneCtx);
+ mpt_event_deregister(mptsasDoneCtx);
+
+ mpt_deregister(mptsasInternalCtx);
+ mpt_deregister(mptsasTaskCtx);
+ mpt_deregister(mptsasDoneCtx);
+}
+
+module_init(mptsas_init);
+module_exit(mptsas_exit);
diff --git a/drivers/message/fusion/mptscsih.c b/drivers/message/fusion/mptscsih.c
index 4a003dc5fde..5cb07eb224d 100644
--- a/drivers/message/fusion/mptscsih.c
+++ b/drivers/message/fusion/mptscsih.c
@@ -62,6 +62,7 @@
#include <scsi/scsi_device.h>
#include <scsi/scsi_host.h>
#include <scsi/scsi_tcq.h>
+#include <scsi/scsi_dbg.h>
#include "mptbase.h"
#include "mptscsih.h"
@@ -93,8 +94,9 @@ typedef struct _BIG_SENSE_BUF {
#define MPT_ICFLAG_BUF_CAP 0x01 /* ReadBuffer Read Capacity format */
#define MPT_ICFLAG_ECHO 0x02 /* ReadBuffer Echo buffer format */
-#define MPT_ICFLAG_PHYS_DISK 0x04 /* Any SCSI IO but do Phys Disk Format */
-#define MPT_ICFLAG_TAGGED_CMD 0x08 /* Do tagged IO */
+#define MPT_ICFLAG_EBOS 0x04 /* ReadBuffer Echo buffer has EBOS */
+#define MPT_ICFLAG_PHYS_DISK 0x08 /* Any SCSI IO but do Phys Disk Format */
+#define MPT_ICFLAG_TAGGED_CMD 0x10 /* Do tagged IO */
#define MPT_ICFLAG_DID_RESET 0x20 /* Bus Reset occurred with this command */
#define MPT_ICFLAG_RESERVED 0x40 /* Reserved has been issued */
@@ -159,6 +161,8 @@ int mptscsih_scandv_complete(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR
static int mptscsih_do_cmd(MPT_SCSI_HOST *hd, INTERNAL_CMD *iocmd);
static int mptscsih_synchronize_cache(MPT_SCSI_HOST *hd, int portnum);
+static struct work_struct mptscsih_persistTask;
+
#ifdef MPTSCSIH_ENABLE_DOMAIN_VALIDATION
static int mptscsih_do_raid(MPT_SCSI_HOST *hd, u8 action, INTERNAL_CMD *io);
static void mptscsih_domainValidation(void *hd);
@@ -167,6 +171,7 @@ static void mptscsih_qas_check(MPT_SCSI_HOST *hd, int id);
static int mptscsih_doDv(MPT_SCSI_HOST *hd, int channel, int target);
static void mptscsih_dv_parms(MPT_SCSI_HOST *hd, DVPARAMETERS *dv,void *pPage);
static void mptscsih_fillbuf(char *buffer, int size, int index, int width);
+static void mptscsih_set_dvflags_raid(MPT_SCSI_HOST *hd, int id);
#endif
void mptscsih_remove(struct pci_dev *);
@@ -606,11 +611,24 @@ mptscsih_io_done(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *mr)
xfer_cnt = le32_to_cpu(pScsiReply->TransferCount);
sc->resid = sc->request_bufflen - xfer_cnt;
+ /*
+ * if we get a data underrun indication, yet no data was
+ * transferred and the SCSI status indicates that the
+ * command was never started, change the data underrun
+ * to success
+ */
+ if (status == MPI_IOCSTATUS_SCSI_DATA_UNDERRUN && xfer_cnt == 0 &&
+ (scsi_status == MPI_SCSI_STATUS_BUSY ||
+ scsi_status == MPI_SCSI_STATUS_RESERVATION_CONFLICT ||
+ scsi_status == MPI_SCSI_STATUS_TASK_SET_FULL)) {
+ status = MPI_IOCSTATUS_SUCCESS;
+ }
+
dreplyprintk((KERN_NOTICE "Reply ha=%d id=%d lun=%d:\n"
"IOCStatus=%04xh SCSIState=%02xh SCSIStatus=%02xh\n"
"resid=%d bufflen=%d xfer_cnt=%d\n",
ioc->id, pScsiReq->TargetID, pScsiReq->LUN[1],
- status, scsi_state, scsi_status, sc->resid,
+ status, scsi_state, scsi_status, sc->resid,
sc->request_bufflen, xfer_cnt));
if (scsi_state & MPI_SCSI_STATE_AUTOSENSE_VALID)
@@ -619,8 +637,11 @@ mptscsih_io_done(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *mr)
/*
* Look for + dump FCP ResponseInfo[]!
*/
- if (scsi_state & MPI_SCSI_STATE_RESPONSE_INFO_VALID) {
- printk(KERN_NOTICE " FCP_ResponseInfo=%08xh\n",
+ if (scsi_state & MPI_SCSI_STATE_RESPONSE_INFO_VALID &&
+ pScsiReply->ResponseInfo) {
+ printk(KERN_NOTICE "ha=%d id=%d lun=%d: "
+ "FCP_ResponseInfo=%08xh\n",
+ ioc->id, pScsiReq->TargetID, pScsiReq->LUN[1],
le32_to_cpu(pScsiReply->ResponseInfo));
}
@@ -661,23 +682,13 @@ mptscsih_io_done(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *mr)
break;
case MPI_IOCSTATUS_SCSI_RESIDUAL_MISMATCH: /* 0x0049 */
- if ( xfer_cnt >= sc->underflow ) {
- /* Sufficient data transfer occurred */
+ sc->resid = sc->request_bufflen - xfer_cnt;
+ if((xfer_cnt==0)||(sc->underflow > xfer_cnt))
+ sc->result=DID_SOFT_ERROR << 16;
+ else /* Sufficient data transfer occurred */
sc->result = (DID_OK << 16) | scsi_status;
- } else if ( xfer_cnt == 0 ) {
- /* A CRC Error causes this condition; retry */
- sc->result = (DRIVER_SENSE << 24) | (DID_OK << 16) |
- (CHECK_CONDITION << 1);
- sc->sense_buffer[0] = 0x70;
- sc->sense_buffer[2] = NO_SENSE;
- sc->sense_buffer[12] = 0;
- sc->sense_buffer[13] = 0;
- } else {
- sc->result = DID_SOFT_ERROR << 16;
- }
- dreplyprintk((KERN_NOTICE
- "RESIDUAL_MISMATCH: result=%x on id=%d\n",
- sc->result, sc->device->id));
+ dreplyprintk((KERN_NOTICE
+ "RESIDUAL_MISMATCH: result=%x on id=%d\n", sc->result, sc->device->id));
break;
case MPI_IOCSTATUS_SCSI_DATA_UNDERRUN: /* 0x0045 */
@@ -692,7 +703,10 @@ mptscsih_io_done(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *mr)
;
} else {
if (xfer_cnt < sc->underflow) {
- sc->result = DID_SOFT_ERROR << 16;
+ if (scsi_status == SAM_STAT_BUSY)
+ sc->result = SAM_STAT_BUSY;
+ else
+ sc->result = DID_SOFT_ERROR << 16;
}
if (scsi_state & (MPI_SCSI_STATE_AUTOSENSE_FAILED | MPI_SCSI_STATE_NO_SCSI_STATUS)) {
/* What to do?
@@ -717,8 +731,10 @@ mptscsih_io_done(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *mr)
case MPI_IOCSTATUS_SCSI_RECOVERED_ERROR: /* 0x0040 */
case MPI_IOCSTATUS_SUCCESS: /* 0x0000 */
- scsi_status = pScsiReply->SCSIStatus;
- sc->result = (DID_OK << 16) | scsi_status;
+ if (scsi_status == MPI_SCSI_STATUS_BUSY)
+ sc->result = (DID_BUS_BUSY << 16) | scsi_status;
+ else
+ sc->result = (DID_OK << 16) | scsi_status;
if (scsi_state == 0) {
;
} else if (scsi_state & MPI_SCSI_STATE_AUTOSENSE_VALID) {
@@ -890,12 +906,13 @@ mptscsih_search_running_cmds(MPT_SCSI_HOST *hd, uint target, uint lun)
SCSIIORequest_t *mf = NULL;
int ii;
int max = hd->ioc->req_depth;
+ struct scsi_cmnd *sc;
dsprintk((KERN_INFO MYNAM ": search_running target %d lun %d max %d\n",
target, lun, max));
for (ii=0; ii < max; ii++) {
- if (hd->ScsiLookup[ii] != NULL) {
+ if ((sc = hd->ScsiLookup[ii]) != NULL) {
mf = (SCSIIORequest_t *)MPT_INDEX_2_MFPTR(hd->ioc, ii);
@@ -910,9 +927,22 @@ mptscsih_search_running_cmds(MPT_SCSI_HOST *hd, uint target, uint lun)
hd->ScsiLookup[ii] = NULL;
mptscsih_freeChainBuffers(hd->ioc, ii);
mpt_free_msg_frame(hd->ioc, (MPT_FRAME_HDR *)mf);
+ if (sc->use_sg) {
+ pci_unmap_sg(hd->ioc->pcidev,
+ (struct scatterlist *) sc->request_buffer,
+ sc->use_sg,
+ sc->sc_data_direction);
+ } else if (sc->request_bufflen) {
+ pci_unmap_single(hd->ioc->pcidev,
+ sc->SCp.dma_handle,
+ sc->request_bufflen,
+ sc->sc_data_direction);
+ }
+ sc->host_scribble = NULL;
+ sc->result = DID_NO_CONNECT << 16;
+ sc->scsi_done(sc);
}
}
-
return;
}
@@ -967,8 +997,10 @@ mptscsih_remove(struct pci_dev *pdev)
unsigned long flags;
int sz1;
- if(!host)
+ if(!host) {
+ mpt_detach(pdev);
return;
+ }
scsi_remove_host(host);
@@ -1256,8 +1288,7 @@ mptscsih_qcmd(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *))
MPT_SCSI_HOST *hd;
MPT_FRAME_HDR *mf;
SCSIIORequest_t *pScsiReq;
- VirtDevice *pTarget;
- int target;
+ VirtDevice *pTarget = SCpnt->device->hostdata;
int lun;
u32 datalen;
u32 scsictl;
@@ -1267,12 +1298,9 @@ mptscsih_qcmd(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *))
int ii;
hd = (MPT_SCSI_HOST *) SCpnt->device->host->hostdata;
- target = SCpnt->device->id;
lun = SCpnt->device->lun;
SCpnt->scsi_done = done;
- pTarget = hd->Targets[target];
-
dmfprintk((MYIOC_s_INFO_FMT "qcmd: SCpnt=%p, done()=%p\n",
(hd && hd->ioc) ? hd->ioc->name : "ioc?", SCpnt, done));
@@ -1315,7 +1343,7 @@ mptscsih_qcmd(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *))
/* Default to untagged. Once a target structure has been allocated,
* use the Inquiry data to determine if device supports tagged.
*/
- if ( pTarget
+ if (pTarget
&& (pTarget->tflags & MPT_TARGET_FLAGS_Q_YES)
&& (SCpnt->device->tagged_supported)) {
scsictl = scsidir | MPI_SCSIIO_CONTROL_SIMPLEQ;
@@ -1325,8 +1353,8 @@ mptscsih_qcmd(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *))
/* Use the above information to set up the message frame
*/
- pScsiReq->TargetID = (u8) target;
- pScsiReq->Bus = (u8) SCpnt->device->channel;
+ pScsiReq->TargetID = (u8) pTarget->target_id;
+ pScsiReq->Bus = pTarget->bus_id;
pScsiReq->ChainOffset = 0;
pScsiReq->Function = MPI_FUNCTION_SCSI_IO_REQUEST;
pScsiReq->CDBLength = SCpnt->cmd_len;
@@ -1378,7 +1406,7 @@ mptscsih_qcmd(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *))
#ifdef MPTSCSIH_ENABLE_DOMAIN_VALIDATION
if (hd->ioc->bus_type == SCSI) {
- int dvStatus = hd->ioc->spi_data.dvStatus[target];
+ int dvStatus = hd->ioc->spi_data.dvStatus[pTarget->target_id];
int issueCmd = 1;
if (dvStatus || hd->ioc->spi_data.forceDv) {
@@ -1426,6 +1454,7 @@ mptscsih_qcmd(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *))
return 0;
fail:
+ hd->ScsiLookup[my_idx] = NULL;
mptscsih_freeChainBuffers(hd->ioc, my_idx);
mpt_free_msg_frame(hd->ioc, mf);
return SCSI_MLQUEUE_HOST_BUSY;
@@ -1713,24 +1742,23 @@ mptscsih_abort(struct scsi_cmnd * SCpnt)
MPT_FRAME_HDR *mf;
u32 ctx2abort;
int scpnt_idx;
+ int retval;
/* If we can't locate our host adapter structure, return FAILED status.
*/
if ((hd = (MPT_SCSI_HOST *) SCpnt->device->host->hostdata) == NULL) {
SCpnt->result = DID_RESET << 16;
SCpnt->scsi_done(SCpnt);
- dfailprintk((KERN_WARNING MYNAM ": mptscsih_abort: "
+ dfailprintk((KERN_INFO MYNAM ": mptscsih_abort: "
"Can't locate host! (sc=%p)\n",
SCpnt));
return FAILED;
}
ioc = hd->ioc;
- if (hd->resetPending)
+ if (hd->resetPending) {
return FAILED;
-
- printk(KERN_WARNING MYNAM ": %s: >> Attempting task abort! (sc=%p)\n",
- hd->ioc->name, SCpnt);
+ }
if (hd->timeouts < -1)
hd->timeouts++;
@@ -1738,16 +1766,20 @@ mptscsih_abort(struct scsi_cmnd * SCpnt)
/* Find this command
*/
if ((scpnt_idx = SCPNT_TO_LOOKUP_IDX(SCpnt)) < 0) {
- /* Cmd not found in ScsiLookup.
+ /* Cmd not found in ScsiLookup.
* Do OS callback.
*/
SCpnt->result = DID_RESET << 16;
- dtmprintk((KERN_WARNING MYNAM ": %s: mptscsih_abort: "
+ dtmprintk((KERN_INFO MYNAM ": %s: mptscsih_abort: "
"Command not in the active list! (sc=%p)\n",
hd->ioc->name, SCpnt));
return SUCCESS;
}
+ printk(KERN_WARNING MYNAM ": %s: attempting task abort! (sc=%p)\n",
+ hd->ioc->name, SCpnt);
+ scsi_print_command(SCpnt);
+
/* Most important! Set TaskMsgContext to SCpnt's MsgContext!
* (the IO to be ABORT'd)
*
@@ -1760,38 +1792,22 @@ mptscsih_abort(struct scsi_cmnd * SCpnt)
hd->abortSCpnt = SCpnt;
- if (mptscsih_TMHandler(hd, MPI_SCSITASKMGMT_TASKTYPE_ABORT_TASK,
+ retval = mptscsih_TMHandler(hd, MPI_SCSITASKMGMT_TASKTYPE_ABORT_TASK,
SCpnt->device->channel, SCpnt->device->id, SCpnt->device->lun,
- ctx2abort, 2 /* 2 second timeout */)
- < 0) {
+ ctx2abort, 2 /* 2 second timeout */);
- /* The TM request failed and the subsequent FW-reload failed!
- * Fatal error case.
- */
- printk(MYIOC_s_WARN_FMT "Error issuing abort task! (sc=%p)\n",
- hd->ioc->name, SCpnt);
+ printk (KERN_WARNING MYNAM ": %s: task abort: %s (sc=%p)\n",
+ hd->ioc->name,
+ ((retval == 0) ? "SUCCESS" : "FAILED" ), SCpnt);
- /* We must clear our pending flag before clearing our state.
- */
+ if (retval == 0)
+ return SUCCESS;
+
+ if(retval != FAILED ) {
hd->tmPending = 0;
hd->tmState = TM_STATE_NONE;
-
- /* Unmap the DMA buffers, if any. */
- if (SCpnt->use_sg) {
- pci_unmap_sg(ioc->pcidev, (struct scatterlist *) SCpnt->request_buffer,
- SCpnt->use_sg, SCpnt->sc_data_direction);
- } else if (SCpnt->request_bufflen) {
- pci_unmap_single(ioc->pcidev, SCpnt->SCp.dma_handle,
- SCpnt->request_bufflen, SCpnt->sc_data_direction);
- }
- hd->ScsiLookup[scpnt_idx] = NULL;
- SCpnt->result = DID_RESET << 16;
- SCpnt->scsi_done(SCpnt); /* Issue the command callback */
- mptscsih_freeChainBuffers(ioc, scpnt_idx);
- mpt_free_msg_frame(ioc, mf);
- return FAILED;
}
- return SUCCESS;
+ return FAILED;
}
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
@@ -1807,11 +1823,12 @@ int
mptscsih_dev_reset(struct scsi_cmnd * SCpnt)
{
MPT_SCSI_HOST *hd;
+ int retval;
/* If we can't locate our host adapter structure, return FAILED status.
*/
if ((hd = (MPT_SCSI_HOST *) SCpnt->device->host->hostdata) == NULL){
- dtmprintk((KERN_WARNING MYNAM ": mptscsih_dev_reset: "
+ dtmprintk((KERN_INFO MYNAM ": mptscsih_dev_reset: "
"Can't locate host! (sc=%p)\n",
SCpnt));
return FAILED;
@@ -1820,24 +1837,26 @@ mptscsih_dev_reset(struct scsi_cmnd * SCpnt)
if (hd->resetPending)
return FAILED;
- printk(KERN_WARNING MYNAM ": %s: >> Attempting target reset! (sc=%p)\n",
+ printk(KERN_WARNING MYNAM ": %s: attempting target reset! (sc=%p)\n",
hd->ioc->name, SCpnt);
+ scsi_print_command(SCpnt);
- if (mptscsih_TMHandler(hd, MPI_SCSITASKMGMT_TASKTYPE_TARGET_RESET,
+ retval = mptscsih_TMHandler(hd, MPI_SCSITASKMGMT_TASKTYPE_TARGET_RESET,
SCpnt->device->channel, SCpnt->device->id,
- 0, 0, 5 /* 5 second timeout */)
- < 0){
- /* The TM request failed and the subsequent FW-reload failed!
- * Fatal error case.
- */
- printk(MYIOC_s_WARN_FMT "Error processing TaskMgmt request (sc=%p)\n",
- hd->ioc->name, SCpnt);
+ 0, 0, 5 /* 5 second timeout */);
+
+ printk (KERN_WARNING MYNAM ": %s: target reset: %s (sc=%p)\n",
+ hd->ioc->name,
+ ((retval == 0) ? "SUCCESS" : "FAILED" ), SCpnt);
+
+ if (retval == 0)
+ return SUCCESS;
+
+ if(retval != FAILED ) {
hd->tmPending = 0;
hd->tmState = TM_STATE_NONE;
- return FAILED;
}
-
- return SUCCESS;
+ return FAILED;
}
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
@@ -1853,41 +1872,39 @@ int
mptscsih_bus_reset(struct scsi_cmnd * SCpnt)
{
MPT_SCSI_HOST *hd;
- spinlock_t *host_lock = SCpnt->device->host->host_lock;
+ int retval;
/* If we can't locate our host adapter structure, return FAILED status.
*/
if ((hd = (MPT_SCSI_HOST *) SCpnt->device->host->hostdata) == NULL){
- dtmprintk((KERN_WARNING MYNAM ": mptscsih_bus_reset: "
+ dtmprintk((KERN_INFO MYNAM ": mptscsih_bus_reset: "
"Can't locate host! (sc=%p)\n",
SCpnt ) );
return FAILED;
}
- printk(KERN_WARNING MYNAM ": %s: >> Attempting bus reset! (sc=%p)\n",
+ printk(KERN_WARNING MYNAM ": %s: attempting bus reset! (sc=%p)\n",
hd->ioc->name, SCpnt);
+ scsi_print_command(SCpnt);
if (hd->timeouts < -1)
hd->timeouts++;
- /* We are now ready to execute the task management request. */
- if (mptscsih_TMHandler(hd, MPI_SCSITASKMGMT_TASKTYPE_RESET_BUS,
- SCpnt->device->channel, 0, 0, 0, 5 /* 5 second timeout */)
- < 0){
+ retval = mptscsih_TMHandler(hd, MPI_SCSITASKMGMT_TASKTYPE_RESET_BUS,
+ SCpnt->device->channel, 0, 0, 0, 5 /* 5 second timeout */);
- /* The TM request failed and the subsequent FW-reload failed!
- * Fatal error case.
- */
- printk(MYIOC_s_WARN_FMT
- "Error processing TaskMgmt request (sc=%p)\n",
- hd->ioc->name, SCpnt);
+ printk (KERN_WARNING MYNAM ": %s: bus reset: %s (sc=%p)\n",
+ hd->ioc->name,
+ ((retval == 0) ? "SUCCESS" : "FAILED" ), SCpnt);
+
+ if (retval == 0)
+ return SUCCESS;
+
+ if(retval != FAILED ) {
hd->tmPending = 0;
hd->tmState = TM_STATE_NONE;
- spin_lock_irq(host_lock);
- return FAILED;
}
-
- return SUCCESS;
+ return FAILED;
}
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
@@ -2169,7 +2186,7 @@ mptscsih_slave_alloc(struct scsi_device *device)
vdev->raidVolume = 0;
hd->Targets[device->id] = vdev;
if (hd->ioc->bus_type == SCSI) {
- if (hd->ioc->spi_data.isRaid & (1 << device->id)) {
+ if (hd->ioc->raid_data.isRaid & (1 << device->id)) {
vdev->raidVolume = 1;
ddvtprintk((KERN_INFO
"RAID Volume @ id %d\n", device->id));
@@ -2180,22 +2197,7 @@ mptscsih_slave_alloc(struct scsi_device *device)
out:
vdev->num_luns++;
- return 0;
-}
-
-static int
-mptscsih_is_raid_volume(MPT_SCSI_HOST *hd, uint id)
-{
- int i;
-
- if (!hd->ioc->spi_data.isRaid || !hd->ioc->spi_data.pIocPg3)
- return 0;
-
- for (i = 0; i < hd->ioc->spi_data.pIocPg3->NumPhysDisks; i++) {
- if (id == hd->ioc->spi_data.pIocPg3->PhysDisk[i].PhysDiskID)
- return 1;
- }
-
+ device->hostdata = vdev;
return 0;
}
@@ -2226,7 +2228,7 @@ mptscsih_slave_destroy(struct scsi_device *device)
hd->Targets[target] = NULL;
if (hd->ioc->bus_type == SCSI) {
- if (mptscsih_is_raid_volume(hd, target)) {
+ if (mptscsih_is_phys_disk(hd->ioc, target)) {
hd->ioc->spi_data.forceDv |= MPT_SCSICFG_RELOAD_IOC_PG3;
} else {
hd->ioc->spi_data.dvStatus[target] =
@@ -2439,6 +2441,7 @@ mptscsih_ioc_reset(MPT_ADAPTER *ioc, int reset_phase)
{
MPT_SCSI_HOST *hd;
unsigned long flags;
+ int ii;
dtmprintk((KERN_WARNING MYNAM
": IOC %s_reset routed to SCSI host driver!\n",
@@ -2496,11 +2499,8 @@ mptscsih_ioc_reset(MPT_ADAPTER *ioc, int reset_phase)
/* ScsiLookup initialization
*/
- {
- int ii;
- for (ii=0; ii < hd->ioc->req_depth; ii++)
- hd->ScsiLookup[ii] = NULL;
- }
+ for (ii=0; ii < hd->ioc->req_depth; ii++)
+ hd->ScsiLookup[ii] = NULL;
/* 2. Chain Buffer initialization
*/
@@ -2549,6 +2549,16 @@ mptscsih_ioc_reset(MPT_ADAPTER *ioc, int reset_phase)
}
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/* work queue thread to clear the persitency table */
+static void
+mptscsih_sas_persist_clear_table(void * arg)
+{
+ MPT_ADAPTER *ioc = (MPT_ADAPTER *)arg;
+
+ mptbase_sas_persist_operation(ioc, MPI_SAS_OP_CLEAR_NOT_PRESENT);
+}
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
int
mptscsih_event_process(MPT_ADAPTER *ioc, EventNotificationReply_t *pEvReply)
{
@@ -2558,18 +2568,18 @@ mptscsih_event_process(MPT_ADAPTER *ioc, EventNotificationReply_t *pEvReply)
devtprintk((MYIOC_s_INFO_FMT "MPT event (=%02Xh) routed to SCSI host driver!\n",
ioc->name, event));
+ if (ioc->sh == NULL ||
+ ((hd = (MPT_SCSI_HOST *)ioc->sh->hostdata) == NULL))
+ return 1;
+
switch (event) {
case MPI_EVENT_UNIT_ATTENTION: /* 03 */
/* FIXME! */
break;
case MPI_EVENT_IOC_BUS_RESET: /* 04 */
case MPI_EVENT_EXT_BUS_RESET: /* 05 */
- hd = NULL;
- if (ioc->sh) {
- hd = (MPT_SCSI_HOST *) ioc->sh->hostdata;
- if (hd && (ioc->bus_type == SCSI) && (hd->soft_resets < -1))
- hd->soft_resets++;
- }
+ if (hd && (ioc->bus_type == SCSI) && (hd->soft_resets < -1))
+ hd->soft_resets++;
break;
case MPI_EVENT_LOGOUT: /* 09 */
/* FIXME! */
@@ -2588,69 +2598,24 @@ mptscsih_event_process(MPT_ADAPTER *ioc, EventNotificationReply_t *pEvReply)
break;
case MPI_EVENT_INTEGRATED_RAID: /* 0B */
+ {
+ pMpiEventDataRaid_t pRaidEventData =
+ (pMpiEventDataRaid_t) pEvReply->Data;
#ifdef MPTSCSIH_ENABLE_DOMAIN_VALIDATION
- /* negoNvram set to 0 if DV enabled and to USE_NVRAM if
- * if DV disabled. Need to check for target mode.
- */
- hd = NULL;
- if (ioc->sh)
- hd = (MPT_SCSI_HOST *) ioc->sh->hostdata;
-
- if (hd && (ioc->bus_type == SCSI) && (hd->negoNvram == 0)) {
- ScsiCfgData *pSpi;
- Ioc3PhysDisk_t *pPDisk;
- int numPDisk;
- u8 reason;
- u8 physDiskNum;
-
- reason = (le32_to_cpu(pEvReply->Data[0]) & 0x00FF0000) >> 16;
- if (reason == MPI_EVENT_RAID_RC_DOMAIN_VAL_NEEDED) {
- /* New or replaced disk.
- * Set DV flag and schedule DV.
- */
- pSpi = &ioc->spi_data;
- physDiskNum = (le32_to_cpu(pEvReply->Data[0]) & 0xFF000000) >> 24;
- ddvtprintk(("DV requested for phys disk id %d\n", physDiskNum));
- if (pSpi->pIocPg3) {
- pPDisk = pSpi->pIocPg3->PhysDisk;
- numPDisk =pSpi->pIocPg3->NumPhysDisks;
-
- while (numPDisk) {
- if (physDiskNum == pPDisk->PhysDiskNum) {
- pSpi->dvStatus[pPDisk->PhysDiskID] = (MPT_SCSICFG_NEED_DV | MPT_SCSICFG_DV_NOT_DONE);
- pSpi->forceDv = MPT_SCSICFG_NEED_DV;
- ddvtprintk(("NEED_DV set for phys disk id %d\n", pPDisk->PhysDiskID));
- break;
- }
- pPDisk++;
- numPDisk--;
- }
-
- if (numPDisk == 0) {
- /* The physical disk that needs DV was not found
- * in the stored IOC Page 3. The driver must reload
- * this page. DV routine will set the NEED_DV flag for
- * all phys disks that have DV_NOT_DONE set.
- */
- pSpi->forceDv = MPT_SCSICFG_NEED_DV | MPT_SCSICFG_RELOAD_IOC_PG3;
- ddvtprintk(("phys disk %d not found. Setting reload IOC Pg3 Flag\n", physDiskNum));
- }
- }
- }
- }
+ /* Domain Validation Needed */
+ if (ioc->bus_type == SCSI &&
+ pRaidEventData->ReasonCode ==
+ MPI_EVENT_RAID_RC_DOMAIN_VAL_NEEDED)
+ mptscsih_set_dvflags_raid(hd, pRaidEventData->PhysDiskNum);
#endif
+ break;
+ }
-#if defined(MPT_DEBUG_DV) || defined(MPT_DEBUG_DV_TINY)
- printk("Raid Event RF: ");
- {
- u32 *m = (u32 *)pEvReply;
- int ii;
- int n = (int)pEvReply->MsgLength;
- for (ii=6; ii < n; ii++)
- printk(" %08x", le32_to_cpu(m[ii]));
- printk("\n");
- }
-#endif
+ /* Persistent table is full. */
+ case MPI_EVENT_PERSISTENT_TABLE_FULL:
+ INIT_WORK(&mptscsih_persistTask,
+ mptscsih_sas_persist_clear_table,(void *)ioc);
+ schedule_work(&mptscsih_persistTask);
break;
case MPI_EVENT_NONE: /* 00 */
@@ -2687,7 +2652,7 @@ mptscsih_initTarget(MPT_SCSI_HOST *hd, int bus_id, int target_id, u8 lun, char *
{
int indexed_lun, lun_index;
VirtDevice *vdev;
- ScsiCfgData *pSpi;
+ SpiCfgData *pSpi;
char data_56;
dinitprintk((MYIOC_s_INFO_FMT "initTarget bus=%d id=%d lun=%d hd=%p\n",
@@ -2794,7 +2759,7 @@ mptscsih_initTarget(MPT_SCSI_HOST *hd, int bus_id, int target_id, u8 lun, char *
static void
mptscsih_setTargetNegoParms(MPT_SCSI_HOST *hd, VirtDevice *target, char byte56)
{
- ScsiCfgData *pspi_data = &hd->ioc->spi_data;
+ SpiCfgData *pspi_data = &hd->ioc->spi_data;
int id = (int) target->target_id;
int nvram;
VirtDevice *vdev;
@@ -2973,11 +2938,13 @@ mptscsih_setTargetNegoParms(MPT_SCSI_HOST *hd, VirtDevice *target, char byte56)
static void
mptscsih_set_dvflags(MPT_SCSI_HOST *hd, SCSIIORequest_t *pReq)
{
+ MPT_ADAPTER *ioc = hd->ioc;
u8 cmd;
- ScsiCfgData *pSpi;
+ SpiCfgData *pSpi;
- ddvtprintk((" set_dvflags: id=%d lun=%d negoNvram=%x cmd=%x\n",
- pReq->TargetID, pReq->LUN[1], hd->negoNvram, pReq->CDB[0]));
+ ddvtprintk((MYIOC_s_NOTE_FMT
+ " set_dvflags: id=%d lun=%d negoNvram=%x cmd=%x\n",
+ hd->ioc->name, pReq->TargetID, pReq->LUN[1], hd->negoNvram, pReq->CDB[0]));
if ((pReq->LUN[1] != 0) || (hd->negoNvram != 0))
return;
@@ -2985,12 +2952,12 @@ mptscsih_set_dvflags(MPT_SCSI_HOST *hd, SCSIIORequest_t *pReq)
cmd = pReq->CDB[0];
if ((cmd == READ_CAPACITY) || (cmd == MODE_SENSE)) {
- pSpi = &hd->ioc->spi_data;
- if ((pSpi->isRaid & (1 << pReq->TargetID)) && pSpi->pIocPg3) {
+ pSpi = &ioc->spi_data;
+ if ((ioc->raid_data.isRaid & (1 << pReq->TargetID)) && ioc->raid_data.pIocPg3) {
/* Set NEED_DV for all hidden disks
*/
- Ioc3PhysDisk_t *pPDisk = pSpi->pIocPg3->PhysDisk;
- int numPDisk = pSpi->pIocPg3->NumPhysDisks;
+ Ioc3PhysDisk_t *pPDisk = ioc->raid_data.pIocPg3->PhysDisk;
+ int numPDisk = ioc->raid_data.pIocPg3->NumPhysDisks;
while (numPDisk) {
pSpi->dvStatus[pPDisk->PhysDiskID] |= MPT_SCSICFG_NEED_DV;
@@ -3004,6 +2971,50 @@ mptscsih_set_dvflags(MPT_SCSI_HOST *hd, SCSIIORequest_t *pReq)
}
}
+/* mptscsih_raid_set_dv_flags()
+ *
+ * New or replaced disk. Set DV flag and schedule DV.
+ */
+static void
+mptscsih_set_dvflags_raid(MPT_SCSI_HOST *hd, int id)
+{
+ MPT_ADAPTER *ioc = hd->ioc;
+ SpiCfgData *pSpi = &ioc->spi_data;
+ Ioc3PhysDisk_t *pPDisk;
+ int numPDisk;
+
+ if (hd->negoNvram != 0)
+ return;
+
+ ddvtprintk(("DV requested for phys disk id %d\n", id));
+ if (ioc->raid_data.pIocPg3) {
+ pPDisk = ioc->raid_data.pIocPg3->PhysDisk;
+ numPDisk = ioc->raid_data.pIocPg3->NumPhysDisks;
+ while (numPDisk) {
+ if (id == pPDisk->PhysDiskNum) {
+ pSpi->dvStatus[pPDisk->PhysDiskID] =
+ (MPT_SCSICFG_NEED_DV | MPT_SCSICFG_DV_NOT_DONE);
+ pSpi->forceDv = MPT_SCSICFG_NEED_DV;
+ ddvtprintk(("NEED_DV set for phys disk id %d\n",
+ pPDisk->PhysDiskID));
+ break;
+ }
+ pPDisk++;
+ numPDisk--;
+ }
+
+ if (numPDisk == 0) {
+ /* The physical disk that needs DV was not found
+ * in the stored IOC Page 3. The driver must reload
+ * this page. DV routine will set the NEED_DV flag for
+ * all phys disks that have DV_NOT_DONE set.
+ */
+ pSpi->forceDv = MPT_SCSICFG_NEED_DV | MPT_SCSICFG_RELOAD_IOC_PG3;
+ ddvtprintk(("phys disk %d not found. Setting reload IOC Pg3 Flag\n",id));
+ }
+ }
+}
+
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/*
* If no Target, bus reset on 1st I/O. Set the flag to
@@ -3091,7 +3102,7 @@ mptscsih_writeSDP1(MPT_SCSI_HOST *hd, int portnum, int target_id, int flags)
MPT_ADAPTER *ioc = hd->ioc;
Config_t *pReq;
SCSIDevicePage1_t *pData;
- VirtDevice *pTarget;
+ VirtDevice *pTarget=NULL;
MPT_FRAME_HDR *mf;
dma_addr_t dataDma;
u16 req_idx;
@@ -3190,7 +3201,7 @@ mptscsih_writeSDP1(MPT_SCSI_HOST *hd, int portnum, int target_id, int flags)
#endif
if (flags & MPT_SCSICFG_BLK_NEGO)
- negoFlags = MPT_TARGET_NO_NEGO_WIDE | MPT_TARGET_NO_NEGO_SYNC;
+ negoFlags |= MPT_TARGET_NO_NEGO_WIDE | MPT_TARGET_NO_NEGO_SYNC;
mptscsih_setDevicePage1Flags(width, factor, offset,
&requested, &configuration, negoFlags);
@@ -4011,7 +4022,7 @@ mptscsih_synchronize_cache(MPT_SCSI_HOST *hd, int portnum)
/* If target Ptr NULL or if this target is NOT a disk, skip.
*/
- if ((pTarget) && (pTarget->tflags & MPT_TARGET_FLAGS_Q_YES)){
+ if ((pTarget) && (pTarget->inq_data[0] == TYPE_DISK)){
for (lun=0; lun <= MPT_LAST_LUN; lun++) {
/* If LUN present, issue the command
*/
@@ -4106,9 +4117,9 @@ mptscsih_domainValidation(void *arg)
if ((ioc->spi_data.forceDv & MPT_SCSICFG_RELOAD_IOC_PG3) != 0) {
mpt_read_ioc_pg_3(ioc);
- if (ioc->spi_data.pIocPg3) {
- Ioc3PhysDisk_t *pPDisk = ioc->spi_data.pIocPg3->PhysDisk;
- int numPDisk = ioc->spi_data.pIocPg3->NumPhysDisks;
+ if (ioc->raid_data.pIocPg3) {
+ Ioc3PhysDisk_t *pPDisk = ioc->raid_data.pIocPg3->PhysDisk;
+ int numPDisk = ioc->raid_data.pIocPg3->NumPhysDisks;
while (numPDisk) {
if (ioc->spi_data.dvStatus[pPDisk->PhysDiskID] & MPT_SCSICFG_DV_NOT_DONE)
@@ -4147,7 +4158,7 @@ mptscsih_domainValidation(void *arg)
isPhysDisk = mptscsih_is_phys_disk(ioc, id);
if (isPhysDisk) {
for (ii=0; ii < MPT_MAX_SCSI_DEVICES; ii++) {
- if (hd->ioc->spi_data.isRaid & (1 << ii)) {
+ if (hd->ioc->raid_data.isRaid & (1 << ii)) {
hd->ioc->spi_data.dvStatus[ii] |= MPT_SCSICFG_DV_PENDING;
}
}
@@ -4166,7 +4177,7 @@ mptscsih_domainValidation(void *arg)
if (isPhysDisk) {
for (ii=0; ii < MPT_MAX_SCSI_DEVICES; ii++) {
- if (hd->ioc->spi_data.isRaid & (1 << ii)) {
+ if (hd->ioc->raid_data.isRaid & (1 << ii)) {
hd->ioc->spi_data.dvStatus[ii] &= ~MPT_SCSICFG_DV_PENDING;
}
}
@@ -4188,21 +4199,21 @@ mptscsih_domainValidation(void *arg)
/* Search IOC page 3 to determine if this is hidden physical disk
*/
-static int
+/* Search IOC page 3 to determine if this is hidden physical disk
+ */
+static int
mptscsih_is_phys_disk(MPT_ADAPTER *ioc, int id)
{
- if (ioc->spi_data.pIocPg3) {
- Ioc3PhysDisk_t *pPDisk = ioc->spi_data.pIocPg3->PhysDisk;
- int numPDisk = ioc->spi_data.pIocPg3->NumPhysDisks;
+ int i;
- while (numPDisk) {
- if (pPDisk->PhysDiskID == id) {
- return 1;
- }
- pPDisk++;
- numPDisk--;
- }
+ if (!ioc->raid_data.isRaid || !ioc->raid_data.pIocPg3)
+ return 0;
+
+ for (i = 0; i < ioc->raid_data.pIocPg3->NumPhysDisks; i++) {
+ if (id == ioc->raid_data.pIocPg3->PhysDisk[i].PhysDiskID)
+ return 1;
}
+
return 0;
}
@@ -4408,7 +4419,7 @@ mptscsih_doDv(MPT_SCSI_HOST *hd, int bus_number, int id)
/* Skip this ID? Set cfg.cfghdr.hdr to force config page write
*/
{
- ScsiCfgData *pspi_data = &hd->ioc->spi_data;
+ SpiCfgData *pspi_data = &hd->ioc->spi_data;
if (pspi_data->nvram && (pspi_data->nvram[id] != MPT_HOST_NVRAM_INVALID)) {
/* Set the factor from nvram */
nfactor = (pspi_data->nvram[id] & MPT_NVRAM_SYNC_MASK) >> 8;
@@ -4438,11 +4449,11 @@ mptscsih_doDv(MPT_SCSI_HOST *hd, int bus_number, int id)
}
/* Finish iocmd inititialization - hidden or visible disk? */
- if (ioc->spi_data.pIocPg3) {
+ if (ioc->raid_data.pIocPg3) {
/* Search IOC page 3 for matching id
*/
- Ioc3PhysDisk_t *pPDisk = ioc->spi_data.pIocPg3->PhysDisk;
- int numPDisk = ioc->spi_data.pIocPg3->NumPhysDisks;
+ Ioc3PhysDisk_t *pPDisk = ioc->raid_data.pIocPg3->PhysDisk;
+ int numPDisk = ioc->raid_data.pIocPg3->NumPhysDisks;
while (numPDisk) {
if (pPDisk->PhysDiskID == id) {
@@ -4466,7 +4477,7 @@ mptscsih_doDv(MPT_SCSI_HOST *hd, int bus_number, int id)
/* RAID Volume ID's may double for a physical device. If RAID but
* not a physical ID as well, skip DV.
*/
- if ((hd->ioc->spi_data.isRaid & (1 << id)) && !(iocmd.flags & MPT_ICFLAG_PHYS_DISK))
+ if ((hd->ioc->raid_data.isRaid & (1 << id)) && !(iocmd.flags & MPT_ICFLAG_PHYS_DISK))
goto target_done;
@@ -4815,6 +4826,8 @@ mptscsih_doDv(MPT_SCSI_HOST *hd, int bus_number, int id)
notDone = 0;
if (iocmd.flags & MPT_ICFLAG_ECHO) {
bufsize = ((pbuf1[2] & 0x1F) <<8) | pbuf1[3];
+ if (pbuf1[0] & 0x01)
+ iocmd.flags |= MPT_ICFLAG_EBOS;
} else {
bufsize = pbuf1[1]<<16 | pbuf1[2]<<8 | pbuf1[3];
}
@@ -4911,6 +4924,9 @@ mptscsih_doDv(MPT_SCSI_HOST *hd, int bus_number, int id)
}
iocmd.flags &= ~MPT_ICFLAG_DID_RESET;
+ if (iocmd.flags & MPT_ICFLAG_EBOS)
+ goto skip_Reserve;
+
repeat = 5;
while (repeat && (!(iocmd.flags & MPT_ICFLAG_RESERVED))) {
iocmd.cmd = RESERVE;
@@ -4954,6 +4970,7 @@ mptscsih_doDv(MPT_SCSI_HOST *hd, int bus_number, int id)
}
}
+skip_Reserve:
mptscsih_fillbuf(pbuf1, sz, patt, 1);
iocmd.cmd = WRITE_BUFFER;
iocmd.data_dma = buf1_dma;
@@ -5198,11 +5215,12 @@ mptscsih_dv_parms(MPT_SCSI_HOST *hd, DVPARAMETERS *dv,void *pPage)
* If not an LVD bus, the adapter minSyncFactor has been
* already throttled back.
*/
+ negoFlags = hd->ioc->spi_data.noQas;
if ((hd->Targets)&&((pTarget = hd->Targets[(int)id]) != NULL) && !pTarget->raidVolume) {
width = pTarget->maxWidth;
offset = pTarget->maxOffset;
factor = pTarget->minSyncFactor;
- negoFlags = pTarget->negoFlags;
+ negoFlags |= pTarget->negoFlags;
} else {
if (hd->ioc->spi_data.nvram && (hd->ioc->spi_data.nvram[id] != MPT_HOST_NVRAM_INVALID)) {
data = hd->ioc->spi_data.nvram[id];
@@ -5223,7 +5241,6 @@ mptscsih_dv_parms(MPT_SCSI_HOST *hd, DVPARAMETERS *dv,void *pPage)
}
/* Set the negotiation flags */
- negoFlags = hd->ioc->spi_data.noQas;
if (!width)
negoFlags |= MPT_TARGET_NO_NEGO_WIDE;
diff --git a/drivers/message/fusion/mptscsih.h b/drivers/message/fusion/mptscsih.h
index 51c0255ac16..971fda4b8b5 100644
--- a/drivers/message/fusion/mptscsih.h
+++ b/drivers/message/fusion/mptscsih.h
@@ -1,5 +1,5 @@
/*
- * linux/drivers/message/fusion/mptscsi.h
+ * linux/drivers/message/fusion/mptscsih.h
* High performance SCSI / Fibre Channel SCSI Host device driver.
* For use with PCI chip/adapter(s):
* LSIFC9xx/LSI409xx Fibre Channel
@@ -53,8 +53,8 @@
* SCSI Public stuff...
*/
-#define MPT_SCSI_CMD_PER_DEV_HIGH 31
-#define MPT_SCSI_CMD_PER_DEV_LOW 7
+#define MPT_SCSI_CMD_PER_DEV_HIGH 64
+#define MPT_SCSI_CMD_PER_DEV_LOW 32
#define MPT_SCSI_CMD_PER_LUN 7
@@ -77,6 +77,7 @@
#define MPTSCSIH_MAX_WIDTH 1
#define MPTSCSIH_MIN_SYNC 0x08
#define MPTSCSIH_SAF_TE 0
+#define MPTSCSIH_PT_CLEAR 0
#endif
diff --git a/drivers/message/fusion/mptspi.c b/drivers/message/fusion/mptspi.c
index 587d1274fd7..5c0e307d1d5 100644
--- a/drivers/message/fusion/mptspi.c
+++ b/drivers/message/fusion/mptspi.c
@@ -199,7 +199,7 @@ mptspi_probe(struct pci_dev *pdev, const struct pci_device_id *id)
printk(MYIOC_s_WARN_FMT
"Skipping ioc=%p because SCSI Initiator mode is NOT enabled!\n",
ioc->name, ioc);
- return -ENODEV;
+ return 0;
}
sh = scsi_host_alloc(&mptspi_driver_template, sizeof(MPT_SCSI_HOST));
diff --git a/drivers/message/i2o/config-osm.c b/drivers/message/i2o/config-osm.c
index af32ab4e90c..10432f66520 100644
--- a/drivers/message/i2o/config-osm.c
+++ b/drivers/message/i2o/config-osm.c
@@ -56,8 +56,11 @@ static int __init i2o_config_init(void)
return -EBUSY;
}
#ifdef CONFIG_I2O_CONFIG_OLD_IOCTL
- if (i2o_config_old_init())
+ if (i2o_config_old_init()) {
+ osm_err("old config handler initialization failed\n");
i2o_driver_unregister(&i2o_config_driver);
+ return -EBUSY;
+ }
#endif
return 0;
diff --git a/drivers/mfd/ucb1x00-ts.c b/drivers/mfd/ucb1x00-ts.c
index a851d65c7cf..a260f83bcb0 100644
--- a/drivers/mfd/ucb1x00-ts.c
+++ b/drivers/mfd/ucb1x00-ts.c
@@ -48,8 +48,8 @@ struct ucb1x00_ts {
u16 x_res;
u16 y_res;
- int restart:1;
- int adcsync:1;
+ unsigned int restart:1;
+ unsigned int adcsync:1;
};
static int adcsync;
diff --git a/drivers/mtd/devices/docecc.c b/drivers/mtd/devices/docecc.c
index 9a087c1fb0b..24f670b5a4f 100644
--- a/drivers/mtd/devices/docecc.c
+++ b/drivers/mtd/devices/docecc.c
@@ -40,7 +40,7 @@
#include <linux/mtd/mtd.h>
#include <linux/mtd/doc2000.h>
-#define DEBUG 0
+#define DEBUG_ECC 0
/* need to undef it (from asm/termbits.h) */
#undef B0
@@ -249,7 +249,7 @@ eras_dec_rs(dtype Alpha_to[NN + 1], dtype Index_of[NN + 1],
lambda[j] ^= Alpha_to[modnn(u + tmp)];
}
}
-#if DEBUG >= 1
+#if DEBUG_ECC >= 1
/* Test code that verifies the erasure locator polynomial just constructed
Needed only for decoder debugging. */
@@ -276,7 +276,7 @@ eras_dec_rs(dtype Alpha_to[NN + 1], dtype Index_of[NN + 1],
count = -1;
goto finish;
}
-#if DEBUG >= 2
+#if DEBUG_ECC >= 2
printf("\n Erasure positions as determined by roots of Eras Loc Poly:\n");
for (i = 0; i < count; i++)
printf("%d ", loc[i]);
@@ -409,7 +409,7 @@ eras_dec_rs(dtype Alpha_to[NN + 1], dtype Index_of[NN + 1],
den ^= Alpha_to[modnn(lambda[i+1] + i * root[j])];
}
if (den == 0) {
-#if DEBUG >= 1
+#if DEBUG_ECC >= 1
printf("\n ERROR: denominator = 0\n");
#endif
/* Convert to dual- basis */
diff --git a/drivers/mtd/maps/bast-flash.c b/drivers/mtd/maps/bast-flash.c
index 0c45464e3f7..0ba0ff7d43b 100644
--- a/drivers/mtd/maps/bast-flash.c
+++ b/drivers/mtd/maps/bast-flash.c
@@ -39,7 +39,6 @@
#include <linux/mtd/partitions.h>
#include <asm/io.h>
-#include <asm/mach-types.h>
#include <asm/mach/flash.h>
#include <asm/arch/map.h>
diff --git a/drivers/mtd/maps/ixp2000.c b/drivers/mtd/maps/ixp2000.c
index 3e94b616743..a9f86c7fbd5 100644
--- a/drivers/mtd/maps/ixp2000.c
+++ b/drivers/mtd/maps/ixp2000.c
@@ -30,7 +30,6 @@
#include <asm/io.h>
#include <asm/hardware.h>
-#include <asm/mach-types.h>
#include <asm/mach/flash.h>
#include <linux/reboot.h>
diff --git a/drivers/mtd/maps/ixp4xx.c b/drivers/mtd/maps/ixp4xx.c
index 5afe660aa2c..3fcc3288407 100644
--- a/drivers/mtd/maps/ixp4xx.c
+++ b/drivers/mtd/maps/ixp4xx.c
@@ -26,7 +26,6 @@
#include <linux/ioport.h>
#include <linux/device.h>
#include <asm/io.h>
-#include <asm/mach-types.h>
#include <asm/mach/flash.h>
#include <linux/reboot.h>
@@ -254,6 +253,6 @@ module_init(ixp4xx_flash_init);
module_exit(ixp4xx_flash_exit);
MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("MTD map driver for Intel IXP4xx systems")
+MODULE_DESCRIPTION("MTD map driver for Intel IXP4xx systems");
MODULE_AUTHOR("Deepak Saxena");
diff --git a/drivers/mtd/maps/omap_nor.c b/drivers/mtd/maps/omap_nor.c
index 8cc71409a32..b17bca657da 100644
--- a/drivers/mtd/maps/omap_nor.c
+++ b/drivers/mtd/maps/omap_nor.c
@@ -42,7 +42,6 @@
#include <asm/io.h>
#include <asm/hardware.h>
-#include <asm/mach-types.h>
#include <asm/mach/flash.h>
#include <asm/arch/tc.h>
diff --git a/drivers/mtd/maps/sa1100-flash.c b/drivers/mtd/maps/sa1100-flash.c
index 52385705da0..8dcaa357b4b 100644
--- a/drivers/mtd/maps/sa1100-flash.c
+++ b/drivers/mtd/maps/sa1100-flash.c
@@ -21,7 +21,6 @@
#include <linux/mtd/partitions.h>
#include <linux/mtd/concat.h>
-#include <asm/mach-types.h>
#include <asm/io.h>
#include <asm/sizes.h>
#include <asm/mach/flash.h>
diff --git a/drivers/mtd/maps/sharpsl-flash.c b/drivers/mtd/maps/sharpsl-flash.c
index d15da6fd84c..b7f093fbf9b 100644
--- a/drivers/mtd/maps/sharpsl-flash.c
+++ b/drivers/mtd/maps/sharpsl-flash.c
@@ -82,7 +82,7 @@ int __init init_sharpsl(void)
} else if (machine_is_tosa()) {
sharpsl_partitions[0].size=0x006a0000;
sharpsl_partitions[0].offset=0x00160000;
- } else if (machine_is_spitz()) {
+ } else if (machine_is_spitz() || machine_is_akita() || machine_is_borzoi()) {
sharpsl_partitions[0].size=0x006b0000;
sharpsl_partitions[0].offset=0x00140000;
} else {
diff --git a/drivers/mtd/nand/s3c2410.c b/drivers/mtd/nand/s3c2410.c
index 891e3a1b911..b47ebcb31e0 100644
--- a/drivers/mtd/nand/s3c2410.c
+++ b/drivers/mtd/nand/s3c2410.c
@@ -58,7 +58,6 @@
#include <linux/mtd/partitions.h>
#include <asm/io.h>
-#include <asm/mach-types.h>
#include <asm/hardware/clock.h>
#include <asm/arch/regs-nand.h>
diff --git a/drivers/mtd/nand/sharpsl.c b/drivers/mtd/nand/sharpsl.c
index 9853b87bb75..88b5b5b40b4 100644
--- a/drivers/mtd/nand/sharpsl.c
+++ b/drivers/mtd/nand/sharpsl.c
@@ -221,10 +221,16 @@ sharpsl_nand_init(void)
sharpsl_partition_info[1].size=25 * 1024 * 1024;
} else if (machine_is_husky()) {
sharpsl_partition_info[1].size=53 * 1024 * 1024;
- }
+ } else if (machine_is_spitz()) {
+ sharpsl_partition_info[1].size=5 * 1024 * 1024;
+ } else if (machine_is_akita()) {
+ sharpsl_partition_info[1].size=58 * 1024 * 1024;
+ } else if (machine_is_borzoi()) {
+ sharpsl_partition_info[1].size=32 * 1024 * 1024;
+ }
}
- if (machine_is_husky()) {
+ if (machine_is_husky() || machine_is_borzoi()) {
/* Need to use small eraseblock size for backward compatibility */
sharpsl_mtd->flags |= MTD_NO_VIRTBLOCKS;
}
diff --git a/drivers/net/8139cp.c b/drivers/net/8139cp.c
index 34b80de34fa..bc537440ca0 100644
--- a/drivers/net/8139cp.c
+++ b/drivers/net/8139cp.c
@@ -353,8 +353,6 @@ struct cp_private {
struct net_device_stats net_stats;
struct cp_extra_stats cp_stats;
- struct cp_dma_stats *nic_stats;
- dma_addr_t nic_stats_dma;
unsigned rx_tail ____cacheline_aligned;
struct cp_desc *rx_ring;
@@ -1143,10 +1141,6 @@ static int cp_alloc_rings (struct cp_private *cp)
cp->rx_ring = mem;
cp->tx_ring = &cp->rx_ring[CP_RX_RING_SIZE];
- mem += (CP_RING_BYTES - CP_STATS_SIZE);
- cp->nic_stats = mem;
- cp->nic_stats_dma = cp->ring_dma + (CP_RING_BYTES - CP_STATS_SIZE);
-
return cp_init_rings(cp);
}
@@ -1187,7 +1181,6 @@ static void cp_free_rings (struct cp_private *cp)
pci_free_consistent(cp->pdev, CP_RING_BYTES, cp->rx_ring, cp->ring_dma);
cp->rx_ring = NULL;
cp->tx_ring = NULL;
- cp->nic_stats = NULL;
}
static int cp_open (struct net_device *dev)
@@ -1516,13 +1509,17 @@ static void cp_get_ethtool_stats (struct net_device *dev,
struct ethtool_stats *estats, u64 *tmp_stats)
{
struct cp_private *cp = netdev_priv(dev);
+ struct cp_dma_stats *nic_stats;
+ dma_addr_t dma;
int i;
- memset(cp->nic_stats, 0, sizeof(struct cp_dma_stats));
+ nic_stats = pci_alloc_consistent(cp->pdev, sizeof(*nic_stats), &dma);
+ if (!nic_stats)
+ return;
/* begin NIC statistics dump */
- cpw32(StatsAddr + 4, (cp->nic_stats_dma >> 16) >> 16);
- cpw32(StatsAddr, (cp->nic_stats_dma & 0xffffffff) | DumpStats);
+ cpw32(StatsAddr + 4, (u64)dma >> 32);
+ cpw32(StatsAddr, ((u64)dma & DMA_32BIT_MASK) | DumpStats);
cpr32(StatsAddr);
for (i = 0; i < 1000; i++) {
@@ -1532,24 +1529,27 @@ static void cp_get_ethtool_stats (struct net_device *dev,
}
cpw32(StatsAddr, 0);
cpw32(StatsAddr + 4, 0);
+ cpr32(StatsAddr);
i = 0;
- tmp_stats[i++] = le64_to_cpu(cp->nic_stats->tx_ok);
- tmp_stats[i++] = le64_to_cpu(cp->nic_stats->rx_ok);
- tmp_stats[i++] = le64_to_cpu(cp->nic_stats->tx_err);
- tmp_stats[i++] = le32_to_cpu(cp->nic_stats->rx_err);
- tmp_stats[i++] = le16_to_cpu(cp->nic_stats->rx_fifo);
- tmp_stats[i++] = le16_to_cpu(cp->nic_stats->frame_align);
- tmp_stats[i++] = le32_to_cpu(cp->nic_stats->tx_ok_1col);
- tmp_stats[i++] = le32_to_cpu(cp->nic_stats->tx_ok_mcol);
- tmp_stats[i++] = le64_to_cpu(cp->nic_stats->rx_ok_phys);
- tmp_stats[i++] = le64_to_cpu(cp->nic_stats->rx_ok_bcast);
- tmp_stats[i++] = le32_to_cpu(cp->nic_stats->rx_ok_mcast);
- tmp_stats[i++] = le16_to_cpu(cp->nic_stats->tx_abort);
- tmp_stats[i++] = le16_to_cpu(cp->nic_stats->tx_underrun);
+ tmp_stats[i++] = le64_to_cpu(nic_stats->tx_ok);
+ tmp_stats[i++] = le64_to_cpu(nic_stats->rx_ok);
+ tmp_stats[i++] = le64_to_cpu(nic_stats->tx_err);
+ tmp_stats[i++] = le32_to_cpu(nic_stats->rx_err);
+ tmp_stats[i++] = le16_to_cpu(nic_stats->rx_fifo);
+ tmp_stats[i++] = le16_to_cpu(nic_stats->frame_align);
+ tmp_stats[i++] = le32_to_cpu(nic_stats->tx_ok_1col);
+ tmp_stats[i++] = le32_to_cpu(nic_stats->tx_ok_mcol);
+ tmp_stats[i++] = le64_to_cpu(nic_stats->rx_ok_phys);
+ tmp_stats[i++] = le64_to_cpu(nic_stats->rx_ok_bcast);
+ tmp_stats[i++] = le32_to_cpu(nic_stats->rx_ok_mcast);
+ tmp_stats[i++] = le16_to_cpu(nic_stats->tx_abort);
+ tmp_stats[i++] = le16_to_cpu(nic_stats->tx_underrun);
tmp_stats[i++] = cp->cp_stats.rx_frags;
if (i != CP_NUM_STATS)
BUG();
+
+ pci_free_consistent(cp->pdev, sizeof(*nic_stats), nic_stats, dma);
}
static struct ethtool_ops cp_ethtool_ops = {
diff --git a/drivers/net/8390.c b/drivers/net/8390.c
index 6d76f3a99b1..f8702742008 100644
--- a/drivers/net/8390.c
+++ b/drivers/net/8390.c
@@ -1094,7 +1094,7 @@ static void NS8390_trigger_send(struct net_device *dev, unsigned int length,
outb_p(E8390_NODMA+E8390_PAGE0, e8390_base+E8390_CMD);
- if (inb_p(e8390_base) & E8390_TRANS)
+ if (inb_p(e8390_base + E8390_CMD) & E8390_TRANS)
{
printk(KERN_WARNING "%s: trigger_send() called with the transmitter busy.\n",
dev->name);
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index 54fff9c2e80..2a908c4690a 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -548,6 +548,14 @@ config SUNGEM
Support for the Sun GEM chip, aka Sun GigabitEthernet/P 2.0. See also
<http://www.sun.com/products-n-solutions/hardware/docs/pdf/806-3985-10.pdf>.
+config CASSINI
+ tristate "Sun Cassini support"
+ depends on NET_ETHERNET && PCI
+ select CRC32
+ help
+ Support for the Sun Cassini chip, aka Sun GigaSwift Ethernet. See also
+ <http://www.sun.com/products-n-solutions/hardware/docs/pdf/817-4341-10.pdf>
+
config NET_VENDOR_3COM
bool "3COM cards"
depends on NET_ETHERNET && (ISA || EISA || MCA || PCI)
@@ -1951,7 +1959,7 @@ config SKGE
---help---
This driver support the Marvell Yukon or SysKonnect SK-98xx/SK-95xx
and related Gigabit Ethernet adapters. It is a new smaller driver
- driver with better performance and more complete ethtool support.
+ with better performance and more complete ethtool support.
It does not support the link failover and network management
features that "portable" vendor supplied sk98lin driver does.
diff --git a/drivers/net/Makefile b/drivers/net/Makefile
index 8645c843cf4..8aeec9f2495 100644
--- a/drivers/net/Makefile
+++ b/drivers/net/Makefile
@@ -28,6 +28,7 @@ obj-$(CONFIG_SUNQE) += sunqe.o
obj-$(CONFIG_SUNBMAC) += sunbmac.o
obj-$(CONFIG_MYRI_SBUS) += myri_sbus.o
obj-$(CONFIG_SUNGEM) += sungem.o sungem_phy.o
+obj-$(CONFIG_CASSINI) += cassini.o
obj-$(CONFIG_MACE) += mace.o
obj-$(CONFIG_BMAC) += bmac.o
diff --git a/drivers/net/arcnet/com90io.c b/drivers/net/arcnet/com90io.c
index 52c77cbe8c6..1f030273541 100644
--- a/drivers/net/arcnet/com90io.c
+++ b/drivers/net/arcnet/com90io.c
@@ -160,7 +160,7 @@ static int __init com90io_probe(struct net_device *dev)
return -ENODEV;
}
if (!request_region(ioaddr, ARCNET_TOTAL_SIZE, "com90io probe")) {
- BUGMSG(D_INIT_REASONS, "IO check_region %x-%x failed.\n",
+ BUGMSG(D_INIT_REASONS, "IO request_region %x-%x failed.\n",
ioaddr, ioaddr + ARCNET_TOTAL_SIZE - 1);
return -ENXIO;
}
@@ -242,7 +242,7 @@ static int __init com90io_found(struct net_device *dev)
BUGMSG(D_NORMAL, "Can't get IRQ %d!\n", dev->irq);
return -ENODEV;
}
- /* Reserve the I/O region - guaranteed to work by check_region */
+ /* Reserve the I/O region */
if (!request_region(dev->base_addr, ARCNET_TOTAL_SIZE, "arcnet (COM90xx-IO)")) {
free_irq(dev->irq, dev);
return -EBUSY;
diff --git a/drivers/net/arm/am79c961a.c b/drivers/net/arm/am79c961a.c
index 9b659e3c8d6..c56d86d371a 100644
--- a/drivers/net/arm/am79c961a.c
+++ b/drivers/net/arm/am79c961a.c
@@ -15,16 +15,13 @@
*/
#include <linux/kernel.h>
#include <linux/types.h>
-#include <linux/fcntl.h>
#include <linux/interrupt.h>
#include <linux/ioport.h>
-#include <linux/in.h>
#include <linux/slab.h>
#include <linux/string.h>
#include <linux/errno.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
-#include <linux/skbuff.h>
#include <linux/delay.h>
#include <linux/init.h>
#include <linux/crc32.h>
@@ -33,7 +30,6 @@
#include <asm/system.h>
#include <asm/irq.h>
#include <asm/io.h>
-#include <asm/dma.h>
#define TX_BUFFERS 15
#define RX_BUFFERS 25
@@ -85,7 +81,7 @@ static inline unsigned short read_ireg(u_long base_addr, u_int reg)
u_short v;
__asm__(
"str%?h %1, [%2] @ NAT_RAP\n\t"
- "str%?h %0, [%2, #8] @ NET_IDP\n\t"
+ "ldr%?h %0, [%2, #8] @ NET_IDP\n\t"
: "=r" (v)
: "r" (reg), "r" (ISAIO_BASE + 0x0464));
return v;
@@ -288,7 +284,7 @@ static void am79c961_timer(unsigned long data)
else if (!lnkstat && carrier)
netif_carrier_off(dev);
- mod_timer(&priv->timer, jiffies + 5*HZ);
+ mod_timer(&priv->timer, jiffies + msecs_to_jiffies(500));
}
/*
@@ -709,13 +705,9 @@ static int __init am79c961_init(void)
goto release;
am79c961_banner();
- printk(KERN_INFO "%s: ether address ", dev->name);
- /* Retrive and print the ethernet address. */
- for (i = 0; i < 6; i++) {
+ for (i = 0; i < 6; i++)
dev->dev_addr[i] = inb(dev->base_addr + i * 2) & 0xff;
- printk (i == 5 ? "%02x\n" : "%02x:", dev->dev_addr[i]);
- }
spin_lock_init(&priv->chip_lock);
init_timer(&priv->timer);
@@ -736,8 +728,14 @@ static int __init am79c961_init(void)
#endif
ret = register_netdev(dev);
- if (ret == 0)
+ if (ret == 0) {
+ printk(KERN_INFO "%s: ether address ", dev->name);
+
+ for (i = 0; i < 6; i++)
+ printk (i == 5 ? "%02x\n" : "%02x:", dev->dev_addr[i]);
+
return 0;
+ }
release:
release_region(dev->base_addr, 0x18);
diff --git a/drivers/net/bmac.c b/drivers/net/bmac.c
index 8dc657fc8af..60dba4a1ca5 100644
--- a/drivers/net/bmac.c
+++ b/drivers/net/bmac.c
@@ -218,7 +218,7 @@ void bmwrite(struct net_device *dev, unsigned long reg_offset, unsigned data )
static inline
-volatile unsigned short bmread(struct net_device *dev, unsigned long reg_offset )
+unsigned short bmread(struct net_device *dev, unsigned long reg_offset )
{
return in_le16((void __iomem *)dev->base_addr + reg_offset);
}
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 94c9f68dd16..bf81cd45e4d 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -1653,7 +1653,8 @@ static int bond_enslave(struct net_device *bond_dev, struct net_device *slave_de
int old_features = bond_dev->features;
int res = 0;
- if (slave_dev->do_ioctl == NULL) {
+ if (!bond->params.use_carrier && slave_dev->ethtool_ops == NULL &&
+ slave_dev->do_ioctl == NULL) {
printk(KERN_WARNING DRV_NAME
": Warning : no link monitoring support for %s\n",
slave_dev->name);
@@ -2775,7 +2776,7 @@ static u32 bond_glean_dev_ip(struct net_device *dev)
return 0;
rcu_read_lock();
- idev = __in_dev_get(dev);
+ idev = __in_dev_get_rcu(dev);
if (!idev)
goto out;
@@ -2879,6 +2880,7 @@ static void bond_arp_send_all(struct bonding *bond, struct slave *slave)
* This target is not on a VLAN
*/
if (rt->u.dst.dev == bond->dev) {
+ ip_rt_put(rt);
dprintk("basa: rtdev == bond->dev: arp_send\n");
bond_arp_send(slave->dev, ARPOP_REQUEST, targets[i],
bond->master_ip, 0);
@@ -2898,6 +2900,7 @@ static void bond_arp_send_all(struct bonding *bond, struct slave *slave)
}
if (vlan_id) {
+ ip_rt_put(rt);
bond_arp_send(slave->dev, ARPOP_REQUEST, targets[i],
vlan->vlan_ip, vlan_id);
continue;
@@ -2909,6 +2912,7 @@ static void bond_arp_send_all(struct bonding *bond, struct slave *slave)
bond->dev->name, NIPQUAD(fl.fl4_dst),
rt->u.dst.dev ? rt->u.dst.dev->name : "NULL");
}
+ ip_rt_put(rt);
}
}
@@ -5036,6 +5040,14 @@ static int __init bonding_init(void)
return 0;
out_err:
+ /*
+ * rtnl_unlock() will run netdev_run_todo(), putting the
+ * thus-far-registered bonding devices into a state which
+ * unregigister_netdevice() will accept
+ */
+ rtnl_unlock();
+ rtnl_lock();
+
/* free and unregister all bonds that were successfully added */
bond_free_all();
diff --git a/drivers/net/cassini.c b/drivers/net/cassini.c
new file mode 100644
index 00000000000..2e617424d3f
--- /dev/null
+++ b/drivers/net/cassini.c
@@ -0,0 +1,5237 @@
+/* cassini.c: Sun Microsystems Cassini(+) ethernet driver.
+ *
+ * Copyright (C) 2004 Sun Microsystems Inc.
+ * Copyright (C) 2003 Adrian Sun (asun@darksunrising.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
+ * 02111-1307, USA.
+ *
+ * This driver uses the sungem driver (c) David Miller
+ * (davem@redhat.com) as its basis.
+ *
+ * The cassini chip has a number of features that distinguish it from
+ * the gem chip:
+ * 4 transmit descriptor rings that are used for either QoS (VLAN) or
+ * load balancing (non-VLAN mode)
+ * batching of multiple packets
+ * multiple CPU dispatching
+ * page-based RX descriptor engine with separate completion rings
+ * Gigabit support (GMII and PCS interface)
+ * MIF link up/down detection works
+ *
+ * RX is handled by page sized buffers that are attached as fragments to
+ * the skb. here's what's done:
+ * -- driver allocates pages at a time and keeps reference counts
+ * on them.
+ * -- the upper protocol layers assume that the header is in the skb
+ * itself. as a result, cassini will copy a small amount (64 bytes)
+ * to make them happy.
+ * -- driver appends the rest of the data pages as frags to skbuffs
+ * and increments the reference count
+ * -- on page reclamation, the driver swaps the page with a spare page.
+ * if that page is still in use, it frees its reference to that page,
+ * and allocates a new page for use. otherwise, it just recycles the
+ * the page.
+ *
+ * NOTE: cassini can parse the header. however, it's not worth it
+ * as long as the network stack requires a header copy.
+ *
+ * TX has 4 queues. currently these queues are used in a round-robin
+ * fashion for load balancing. They can also be used for QoS. for that
+ * to work, however, QoS information needs to be exposed down to the driver
+ * level so that subqueues get targetted to particular transmit rings.
+ * alternatively, the queues can be configured via use of the all-purpose
+ * ioctl.
+ *
+ * RX DATA: the rx completion ring has all the info, but the rx desc
+ * ring has all of the data. RX can conceivably come in under multiple
+ * interrupts, but the INT# assignment needs to be set up properly by
+ * the BIOS and conveyed to the driver. PCI BIOSes don't know how to do
+ * that. also, the two descriptor rings are designed to distinguish between
+ * encrypted and non-encrypted packets, but we use them for buffering
+ * instead.
+ *
+ * by default, the selective clear mask is set up to process rx packets.
+ */
+
+#include <linux/config.h>
+#include <linux/version.h>
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/compiler.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/ioport.h>
+#include <linux/pci.h>
+#include <linux/mm.h>
+#include <linux/highmem.h>
+#include <linux/list.h>
+#include <linux/dma-mapping.h>
+
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/ethtool.h>
+#include <linux/crc32.h>
+#include <linux/random.h>
+#include <linux/mii.h>
+#include <linux/ip.h>
+#include <linux/tcp.h>
+
+#include <net/checksum.h>
+
+#include <asm/atomic.h>
+#include <asm/system.h>
+#include <asm/io.h>
+#include <asm/byteorder.h>
+#include <asm/uaccess.h>
+
+#define cas_page_map(x) kmap_atomic((x), KM_SKB_DATA_SOFTIRQ)
+#define cas_page_unmap(x) kunmap_atomic((x), KM_SKB_DATA_SOFTIRQ)
+#define CAS_NCPUS num_online_cpus()
+
+#if defined(CONFIG_CASSINI_NAPI) && defined(HAVE_NETDEV_POLL)
+#define USE_NAPI
+#define cas_skb_release(x) netif_receive_skb(x)
+#else
+#define cas_skb_release(x) netif_rx(x)
+#endif
+
+/* select which firmware to use */
+#define USE_HP_WORKAROUND
+#define HP_WORKAROUND_DEFAULT /* select which firmware to use as default */
+#define CAS_HP_ALT_FIRMWARE cas_prog_null /* alternate firmware */
+
+#include "cassini.h"
+
+#define USE_TX_COMPWB /* use completion writeback registers */
+#define USE_CSMA_CD_PROTO /* standard CSMA/CD */
+#define USE_RX_BLANK /* hw interrupt mitigation */
+#undef USE_ENTROPY_DEV /* don't test for entropy device */
+
+/* NOTE: these aren't useable unless PCI interrupts can be assigned.
+ * also, we need to make cp->lock finer-grained.
+ */
+#undef USE_PCI_INTB
+#undef USE_PCI_INTC
+#undef USE_PCI_INTD
+#undef USE_QOS
+
+#undef USE_VPD_DEBUG /* debug vpd information if defined */
+
+/* rx processing options */
+#define USE_PAGE_ORDER /* specify to allocate large rx pages */
+#define RX_DONT_BATCH 0 /* if 1, don't batch flows */
+#define RX_COPY_ALWAYS 0 /* if 0, use frags */
+#define RX_COPY_MIN 64 /* copy a little to make upper layers happy */
+#undef RX_COUNT_BUFFERS /* define to calculate RX buffer stats */
+
+#define DRV_MODULE_NAME "cassini"
+#define PFX DRV_MODULE_NAME ": "
+#define DRV_MODULE_VERSION "1.4"
+#define DRV_MODULE_RELDATE "1 July 2004"
+
+#define CAS_DEF_MSG_ENABLE \
+ (NETIF_MSG_DRV | \
+ NETIF_MSG_PROBE | \
+ NETIF_MSG_LINK | \
+ NETIF_MSG_TIMER | \
+ NETIF_MSG_IFDOWN | \
+ NETIF_MSG_IFUP | \
+ NETIF_MSG_RX_ERR | \
+ NETIF_MSG_TX_ERR)
+
+/* length of time before we decide the hardware is borked,
+ * and dev->tx_timeout() should be called to fix the problem
+ */
+#define CAS_TX_TIMEOUT (HZ)
+#define CAS_LINK_TIMEOUT (22*HZ/10)
+#define CAS_LINK_FAST_TIMEOUT (1)
+
+/* timeout values for state changing. these specify the number
+ * of 10us delays to be used before giving up.
+ */
+#define STOP_TRIES_PHY 1000
+#define STOP_TRIES 5000
+
+/* specify a minimum frame size to deal with some fifo issues
+ * max mtu == 2 * page size - ethernet header - 64 - swivel =
+ * 2 * page_size - 0x50
+ */
+#define CAS_MIN_FRAME 97
+#define CAS_1000MB_MIN_FRAME 255
+#define CAS_MIN_MTU 60
+#define CAS_MAX_MTU min(((cp->page_size << 1) - 0x50), 9000)
+
+#if 1
+/*
+ * Eliminate these and use separate atomic counters for each, to
+ * avoid a race condition.
+ */
+#else
+#define CAS_RESET_MTU 1
+#define CAS_RESET_ALL 2
+#define CAS_RESET_SPARE 3
+#endif
+
+static char version[] __devinitdata =
+ DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
+
+MODULE_AUTHOR("Adrian Sun (asun@darksunrising.com)");
+MODULE_DESCRIPTION("Sun Cassini(+) ethernet driver");
+MODULE_LICENSE("GPL");
+MODULE_PARM(cassini_debug, "i");
+MODULE_PARM_DESC(cassini_debug, "Cassini bitmapped debugging message enable value");
+MODULE_PARM(link_mode, "i");
+MODULE_PARM_DESC(link_mode, "default link mode");
+
+/*
+ * Work around for a PCS bug in which the link goes down due to the chip
+ * being confused and never showing a link status of "up."
+ */
+#define DEFAULT_LINKDOWN_TIMEOUT 5
+/*
+ * Value in seconds, for user input.
+ */
+static int linkdown_timeout = DEFAULT_LINKDOWN_TIMEOUT;
+MODULE_PARM(linkdown_timeout, "i");
+MODULE_PARM_DESC(linkdown_timeout,
+"min reset interval in sec. for PCS linkdown issue; disabled if not positive");
+
+/*
+ * value in 'ticks' (units used by jiffies). Set when we init the
+ * module because 'HZ' in actually a function call on some flavors of
+ * Linux. This will default to DEFAULT_LINKDOWN_TIMEOUT * HZ.
+ */
+static int link_transition_timeout;
+
+
+static int cassini_debug = -1; /* -1 == use CAS_DEF_MSG_ENABLE as value */
+static int link_mode;
+
+static u16 link_modes[] __devinitdata = {
+ BMCR_ANENABLE, /* 0 : autoneg */
+ 0, /* 1 : 10bt half duplex */
+ BMCR_SPEED100, /* 2 : 100bt half duplex */
+ BMCR_FULLDPLX, /* 3 : 10bt full duplex */
+ BMCR_SPEED100|BMCR_FULLDPLX, /* 4 : 100bt full duplex */
+ CAS_BMCR_SPEED1000|BMCR_FULLDPLX /* 5 : 1000bt full duplex */
+};
+
+static struct pci_device_id cas_pci_tbl[] __devinitdata = {
+ { PCI_VENDOR_ID_SUN, PCI_DEVICE_ID_SUN_CASSINI,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
+ { PCI_VENDOR_ID_NS, PCI_DEVICE_ID_NS_SATURN,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
+ { 0, }
+};
+
+MODULE_DEVICE_TABLE(pci, cas_pci_tbl);
+
+static void cas_set_link_modes(struct cas *cp);
+
+static inline void cas_lock_tx(struct cas *cp)
+{
+ int i;
+
+ for (i = 0; i < N_TX_RINGS; i++)
+ spin_lock(&cp->tx_lock[i]);
+}
+
+static inline void cas_lock_all(struct cas *cp)
+{
+ spin_lock_irq(&cp->lock);
+ cas_lock_tx(cp);
+}
+
+/* WTZ: QA was finding deadlock problems with the previous
+ * versions after long test runs with multiple cards per machine.
+ * See if replacing cas_lock_all with safer versions helps. The
+ * symptoms QA is reporting match those we'd expect if interrupts
+ * aren't being properly restored, and we fixed a previous deadlock
+ * with similar symptoms by using save/restore versions in other
+ * places.
+ */
+#define cas_lock_all_save(cp, flags) \
+do { \
+ struct cas *xxxcp = (cp); \
+ spin_lock_irqsave(&xxxcp->lock, flags); \
+ cas_lock_tx(xxxcp); \
+} while (0)
+
+static inline void cas_unlock_tx(struct cas *cp)
+{
+ int i;
+
+ for (i = N_TX_RINGS; i > 0; i--)
+ spin_unlock(&cp->tx_lock[i - 1]);
+}
+
+static inline void cas_unlock_all(struct cas *cp)
+{
+ cas_unlock_tx(cp);
+ spin_unlock_irq(&cp->lock);
+}
+
+#define cas_unlock_all_restore(cp, flags) \
+do { \
+ struct cas *xxxcp = (cp); \
+ cas_unlock_tx(xxxcp); \
+ spin_unlock_irqrestore(&xxxcp->lock, flags); \
+} while (0)
+
+static void cas_disable_irq(struct cas *cp, const int ring)
+{
+ /* Make sure we won't get any more interrupts */
+ if (ring == 0) {
+ writel(0xFFFFFFFF, cp->regs + REG_INTR_MASK);
+ return;
+ }
+
+ /* disable completion interrupts and selectively mask */
+ if (cp->cas_flags & CAS_FLAG_REG_PLUS) {
+ switch (ring) {
+#if defined (USE_PCI_INTB) || defined(USE_PCI_INTC) || defined(USE_PCI_INTD)
+#ifdef USE_PCI_INTB
+ case 1:
+#endif
+#ifdef USE_PCI_INTC
+ case 2:
+#endif
+#ifdef USE_PCI_INTD
+ case 3:
+#endif
+ writel(INTRN_MASK_CLEAR_ALL | INTRN_MASK_RX_EN,
+ cp->regs + REG_PLUS_INTRN_MASK(ring));
+ break;
+#endif
+ default:
+ writel(INTRN_MASK_CLEAR_ALL, cp->regs +
+ REG_PLUS_INTRN_MASK(ring));
+ break;
+ }
+ }
+}
+
+static inline void cas_mask_intr(struct cas *cp)
+{
+ int i;
+
+ for (i = 0; i < N_RX_COMP_RINGS; i++)
+ cas_disable_irq(cp, i);
+}
+
+static void cas_enable_irq(struct cas *cp, const int ring)
+{
+ if (ring == 0) { /* all but TX_DONE */
+ writel(INTR_TX_DONE, cp->regs + REG_INTR_MASK);
+ return;
+ }
+
+ if (cp->cas_flags & CAS_FLAG_REG_PLUS) {
+ switch (ring) {
+#if defined (USE_PCI_INTB) || defined(USE_PCI_INTC) || defined(USE_PCI_INTD)
+#ifdef USE_PCI_INTB
+ case 1:
+#endif
+#ifdef USE_PCI_INTC
+ case 2:
+#endif
+#ifdef USE_PCI_INTD
+ case 3:
+#endif
+ writel(INTRN_MASK_RX_EN, cp->regs +
+ REG_PLUS_INTRN_MASK(ring));
+ break;
+#endif
+ default:
+ break;
+ }
+ }
+}
+
+static inline void cas_unmask_intr(struct cas *cp)
+{
+ int i;
+
+ for (i = 0; i < N_RX_COMP_RINGS; i++)
+ cas_enable_irq(cp, i);
+}
+
+static inline void cas_entropy_gather(struct cas *cp)
+{
+#ifdef USE_ENTROPY_DEV
+ if ((cp->cas_flags & CAS_FLAG_ENTROPY_DEV) == 0)
+ return;
+
+ batch_entropy_store(readl(cp->regs + REG_ENTROPY_IV),
+ readl(cp->regs + REG_ENTROPY_IV),
+ sizeof(uint64_t)*8);
+#endif
+}
+
+static inline void cas_entropy_reset(struct cas *cp)
+{
+#ifdef USE_ENTROPY_DEV
+ if ((cp->cas_flags & CAS_FLAG_ENTROPY_DEV) == 0)
+ return;
+
+ writel(BIM_LOCAL_DEV_PAD | BIM_LOCAL_DEV_PROM | BIM_LOCAL_DEV_EXT,
+ cp->regs + REG_BIM_LOCAL_DEV_EN);
+ writeb(ENTROPY_RESET_STC_MODE, cp->regs + REG_ENTROPY_RESET);
+ writeb(0x55, cp->regs + REG_ENTROPY_RAND_REG);
+
+ /* if we read back 0x0, we don't have an entropy device */
+ if (readb(cp->regs + REG_ENTROPY_RAND_REG) == 0)
+ cp->cas_flags &= ~CAS_FLAG_ENTROPY_DEV;
+#endif
+}
+
+/* access to the phy. the following assumes that we've initialized the MIF to
+ * be in frame rather than bit-bang mode
+ */
+static u16 cas_phy_read(struct cas *cp, int reg)
+{
+ u32 cmd;
+ int limit = STOP_TRIES_PHY;
+
+ cmd = MIF_FRAME_ST | MIF_FRAME_OP_READ;
+ cmd |= CAS_BASE(MIF_FRAME_PHY_ADDR, cp->phy_addr);
+ cmd |= CAS_BASE(MIF_FRAME_REG_ADDR, reg);
+ cmd |= MIF_FRAME_TURN_AROUND_MSB;
+ writel(cmd, cp->regs + REG_MIF_FRAME);
+
+ /* poll for completion */
+ while (limit-- > 0) {
+ udelay(10);
+ cmd = readl(cp->regs + REG_MIF_FRAME);
+ if (cmd & MIF_FRAME_TURN_AROUND_LSB)
+ return (cmd & MIF_FRAME_DATA_MASK);
+ }
+ return 0xFFFF; /* -1 */
+}
+
+static int cas_phy_write(struct cas *cp, int reg, u16 val)
+{
+ int limit = STOP_TRIES_PHY;
+ u32 cmd;
+
+ cmd = MIF_FRAME_ST | MIF_FRAME_OP_WRITE;
+ cmd |= CAS_BASE(MIF_FRAME_PHY_ADDR, cp->phy_addr);
+ cmd |= CAS_BASE(MIF_FRAME_REG_ADDR, reg);
+ cmd |= MIF_FRAME_TURN_AROUND_MSB;
+ cmd |= val & MIF_FRAME_DATA_MASK;
+ writel(cmd, cp->regs + REG_MIF_FRAME);
+
+ /* poll for completion */
+ while (limit-- > 0) {
+ udelay(10);
+ cmd = readl(cp->regs + REG_MIF_FRAME);
+ if (cmd & MIF_FRAME_TURN_AROUND_LSB)
+ return 0;
+ }
+ return -1;
+}
+
+static void cas_phy_powerup(struct cas *cp)
+{
+ u16 ctl = cas_phy_read(cp, MII_BMCR);
+
+ if ((ctl & BMCR_PDOWN) == 0)
+ return;
+ ctl &= ~BMCR_PDOWN;
+ cas_phy_write(cp, MII_BMCR, ctl);
+}
+
+static void cas_phy_powerdown(struct cas *cp)
+{
+ u16 ctl = cas_phy_read(cp, MII_BMCR);
+
+ if (ctl & BMCR_PDOWN)
+ return;
+ ctl |= BMCR_PDOWN;
+ cas_phy_write(cp, MII_BMCR, ctl);
+}
+
+/* cp->lock held. note: the last put_page will free the buffer */
+static int cas_page_free(struct cas *cp, cas_page_t *page)
+{
+ pci_unmap_page(cp->pdev, page->dma_addr, cp->page_size,
+ PCI_DMA_FROMDEVICE);
+ __free_pages(page->buffer, cp->page_order);
+ kfree(page);
+ return 0;
+}
+
+#ifdef RX_COUNT_BUFFERS
+#define RX_USED_ADD(x, y) ((x)->used += (y))
+#define RX_USED_SET(x, y) ((x)->used = (y))
+#else
+#define RX_USED_ADD(x, y)
+#define RX_USED_SET(x, y)
+#endif
+
+/* local page allocation routines for the receive buffers. jumbo pages
+ * require at least 8K contiguous and 8K aligned buffers.
+ */
+static cas_page_t *cas_page_alloc(struct cas *cp, const int flags)
+{
+ cas_page_t *page;
+
+ page = kmalloc(sizeof(cas_page_t), flags);
+ if (!page)
+ return NULL;
+
+ INIT_LIST_HEAD(&page->list);
+ RX_USED_SET(page, 0);
+ page->buffer = alloc_pages(flags, cp->page_order);
+ if (!page->buffer)
+ goto page_err;
+ page->dma_addr = pci_map_page(cp->pdev, page->buffer, 0,
+ cp->page_size, PCI_DMA_FROMDEVICE);
+ return page;
+
+page_err:
+ kfree(page);
+ return NULL;
+}
+
+/* initialize spare pool of rx buffers, but allocate during the open */
+static void cas_spare_init(struct cas *cp)
+{
+ spin_lock(&cp->rx_inuse_lock);
+ INIT_LIST_HEAD(&cp->rx_inuse_list);
+ spin_unlock(&cp->rx_inuse_lock);
+
+ spin_lock(&cp->rx_spare_lock);
+ INIT_LIST_HEAD(&cp->rx_spare_list);
+ cp->rx_spares_needed = RX_SPARE_COUNT;
+ spin_unlock(&cp->rx_spare_lock);
+}
+
+/* used on close. free all the spare buffers. */
+static void cas_spare_free(struct cas *cp)
+{
+ struct list_head list, *elem, *tmp;
+
+ /* free spare buffers */
+ INIT_LIST_HEAD(&list);
+ spin_lock(&cp->rx_spare_lock);
+ list_splice(&cp->rx_spare_list, &list);
+ INIT_LIST_HEAD(&cp->rx_spare_list);
+ spin_unlock(&cp->rx_spare_lock);
+ list_for_each_safe(elem, tmp, &list) {
+ cas_page_free(cp, list_entry(elem, cas_page_t, list));
+ }
+
+ INIT_LIST_HEAD(&list);
+#if 1
+ /*
+ * Looks like Adrian had protected this with a different
+ * lock than used everywhere else to manipulate this list.
+ */
+ spin_lock(&cp->rx_inuse_lock);
+ list_splice(&cp->rx_inuse_list, &list);
+ INIT_LIST_HEAD(&cp->rx_inuse_list);
+ spin_unlock(&cp->rx_inuse_lock);
+#else
+ spin_lock(&cp->rx_spare_lock);
+ list_splice(&cp->rx_inuse_list, &list);
+ INIT_LIST_HEAD(&cp->rx_inuse_list);
+ spin_unlock(&cp->rx_spare_lock);
+#endif
+ list_for_each_safe(elem, tmp, &list) {
+ cas_page_free(cp, list_entry(elem, cas_page_t, list));
+ }
+}
+
+/* replenish spares if needed */
+static void cas_spare_recover(struct cas *cp, const int flags)
+{
+ struct list_head list, *elem, *tmp;
+ int needed, i;
+
+ /* check inuse list. if we don't need any more free buffers,
+ * just free it
+ */
+
+ /* make a local copy of the list */
+ INIT_LIST_HEAD(&list);
+ spin_lock(&cp->rx_inuse_lock);
+ list_splice(&cp->rx_inuse_list, &list);
+ INIT_LIST_HEAD(&cp->rx_inuse_list);
+ spin_unlock(&cp->rx_inuse_lock);
+
+ list_for_each_safe(elem, tmp, &list) {
+ cas_page_t *page = list_entry(elem, cas_page_t, list);
+
+ if (page_count(page->buffer) > 1)
+ continue;
+
+ list_del(elem);
+ spin_lock(&cp->rx_spare_lock);
+ if (cp->rx_spares_needed > 0) {
+ list_add(elem, &cp->rx_spare_list);
+ cp->rx_spares_needed--;
+ spin_unlock(&cp->rx_spare_lock);
+ } else {
+ spin_unlock(&cp->rx_spare_lock);
+ cas_page_free(cp, page);
+ }
+ }
+
+ /* put any inuse buffers back on the list */
+ if (!list_empty(&list)) {
+ spin_lock(&cp->rx_inuse_lock);
+ list_splice(&list, &cp->rx_inuse_list);
+ spin_unlock(&cp->rx_inuse_lock);
+ }
+
+ spin_lock(&cp->rx_spare_lock);
+ needed = cp->rx_spares_needed;
+ spin_unlock(&cp->rx_spare_lock);
+ if (!needed)
+ return;
+
+ /* we still need spares, so try to allocate some */
+ INIT_LIST_HEAD(&list);
+ i = 0;
+ while (i < needed) {
+ cas_page_t *spare = cas_page_alloc(cp, flags);
+ if (!spare)
+ break;
+ list_add(&spare->list, &list);
+ i++;
+ }
+
+ spin_lock(&cp->rx_spare_lock);
+ list_splice(&list, &cp->rx_spare_list);
+ cp->rx_spares_needed -= i;
+ spin_unlock(&cp->rx_spare_lock);
+}
+
+/* pull a page from the list. */
+static cas_page_t *cas_page_dequeue(struct cas *cp)
+{
+ struct list_head *entry;
+ int recover;
+
+ spin_lock(&cp->rx_spare_lock);
+ if (list_empty(&cp->rx_spare_list)) {
+ /* try to do a quick recovery */
+ spin_unlock(&cp->rx_spare_lock);
+ cas_spare_recover(cp, GFP_ATOMIC);
+ spin_lock(&cp->rx_spare_lock);
+ if (list_empty(&cp->rx_spare_list)) {
+ if (netif_msg_rx_err(cp))
+ printk(KERN_ERR "%s: no spare buffers "
+ "available.\n", cp->dev->name);
+ spin_unlock(&cp->rx_spare_lock);
+ return NULL;
+ }
+ }
+
+ entry = cp->rx_spare_list.next;
+ list_del(entry);
+ recover = ++cp->rx_spares_needed;
+ spin_unlock(&cp->rx_spare_lock);
+
+ /* trigger the timer to do the recovery */
+ if ((recover & (RX_SPARE_RECOVER_VAL - 1)) == 0) {
+#if 1
+ atomic_inc(&cp->reset_task_pending);
+ atomic_inc(&cp->reset_task_pending_spare);
+ schedule_work(&cp->reset_task);
+#else
+ atomic_set(&cp->reset_task_pending, CAS_RESET_SPARE);
+ schedule_work(&cp->reset_task);
+#endif
+ }
+ return list_entry(entry, cas_page_t, list);
+}
+
+
+static void cas_mif_poll(struct cas *cp, const int enable)
+{
+ u32 cfg;
+
+ cfg = readl(cp->regs + REG_MIF_CFG);
+ cfg &= (MIF_CFG_MDIO_0 | MIF_CFG_MDIO_1);
+
+ if (cp->phy_type & CAS_PHY_MII_MDIO1)
+ cfg |= MIF_CFG_PHY_SELECT;
+
+ /* poll and interrupt on link status change. */
+ if (enable) {
+ cfg |= MIF_CFG_POLL_EN;
+ cfg |= CAS_BASE(MIF_CFG_POLL_REG, MII_BMSR);
+ cfg |= CAS_BASE(MIF_CFG_POLL_PHY, cp->phy_addr);
+ }
+ writel((enable) ? ~(BMSR_LSTATUS | BMSR_ANEGCOMPLETE) : 0xFFFF,
+ cp->regs + REG_MIF_MASK);
+ writel(cfg, cp->regs + REG_MIF_CFG);
+}
+
+/* Must be invoked under cp->lock */
+static void cas_begin_auto_negotiation(struct cas *cp, struct ethtool_cmd *ep)
+{
+ u16 ctl;
+#if 1
+ int lcntl;
+ int changed = 0;
+ int oldstate = cp->lstate;
+ int link_was_not_down = !(oldstate == link_down);
+#endif
+ /* Setup link parameters */
+ if (!ep)
+ goto start_aneg;
+ lcntl = cp->link_cntl;
+ if (ep->autoneg == AUTONEG_ENABLE)
+ cp->link_cntl = BMCR_ANENABLE;
+ else {
+ cp->link_cntl = 0;
+ if (ep->speed == SPEED_100)
+ cp->link_cntl |= BMCR_SPEED100;
+ else if (ep->speed == SPEED_1000)
+ cp->link_cntl |= CAS_BMCR_SPEED1000;
+ if (ep->duplex == DUPLEX_FULL)
+ cp->link_cntl |= BMCR_FULLDPLX;
+ }
+#if 1
+ changed = (lcntl != cp->link_cntl);
+#endif
+start_aneg:
+ if (cp->lstate == link_up) {
+ printk(KERN_INFO "%s: PCS link down.\n",
+ cp->dev->name);
+ } else {
+ if (changed) {
+ printk(KERN_INFO "%s: link configuration changed\n",
+ cp->dev->name);
+ }
+ }
+ cp->lstate = link_down;
+ cp->link_transition = LINK_TRANSITION_LINK_DOWN;
+ if (!cp->hw_running)
+ return;
+#if 1
+ /*
+ * WTZ: If the old state was link_up, we turn off the carrier
+ * to replicate everything we do elsewhere on a link-down
+ * event when we were already in a link-up state..
+ */
+ if (oldstate == link_up)
+ netif_carrier_off(cp->dev);
+ if (changed && link_was_not_down) {
+ /*
+ * WTZ: This branch will simply schedule a full reset after
+ * we explicitly changed link modes in an ioctl. See if this
+ * fixes the link-problems we were having for forced mode.
+ */
+ atomic_inc(&cp->reset_task_pending);
+ atomic_inc(&cp->reset_task_pending_all);
+ schedule_work(&cp->reset_task);
+ cp->timer_ticks = 0;
+ mod_timer(&cp->link_timer, jiffies + CAS_LINK_TIMEOUT);
+ return;
+ }
+#endif
+ if (cp->phy_type & CAS_PHY_SERDES) {
+ u32 val = readl(cp->regs + REG_PCS_MII_CTRL);
+
+ if (cp->link_cntl & BMCR_ANENABLE) {
+ val |= (PCS_MII_RESTART_AUTONEG | PCS_MII_AUTONEG_EN);
+ cp->lstate = link_aneg;
+ } else {
+ if (cp->link_cntl & BMCR_FULLDPLX)
+ val |= PCS_MII_CTRL_DUPLEX;
+ val &= ~PCS_MII_AUTONEG_EN;
+ cp->lstate = link_force_ok;
+ }
+ cp->link_transition = LINK_TRANSITION_LINK_CONFIG;
+ writel(val, cp->regs + REG_PCS_MII_CTRL);
+
+ } else {
+ cas_mif_poll(cp, 0);
+ ctl = cas_phy_read(cp, MII_BMCR);
+ ctl &= ~(BMCR_FULLDPLX | BMCR_SPEED100 |
+ CAS_BMCR_SPEED1000 | BMCR_ANENABLE);
+ ctl |= cp->link_cntl;
+ if (ctl & BMCR_ANENABLE) {
+ ctl |= BMCR_ANRESTART;
+ cp->lstate = link_aneg;
+ } else {
+ cp->lstate = link_force_ok;
+ }
+ cp->link_transition = LINK_TRANSITION_LINK_CONFIG;
+ cas_phy_write(cp, MII_BMCR, ctl);
+ cas_mif_poll(cp, 1);
+ }
+
+ cp->timer_ticks = 0;
+ mod_timer(&cp->link_timer, jiffies + CAS_LINK_TIMEOUT);
+}
+
+/* Must be invoked under cp->lock. */
+static int cas_reset_mii_phy(struct cas *cp)
+{
+ int limit = STOP_TRIES_PHY;
+ u16 val;
+
+ cas_phy_write(cp, MII_BMCR, BMCR_RESET);
+ udelay(100);
+ while (limit--) {
+ val = cas_phy_read(cp, MII_BMCR);
+ if ((val & BMCR_RESET) == 0)
+ break;
+ udelay(10);
+ }
+ return (limit <= 0);
+}
+
+static void cas_saturn_firmware_load(struct cas *cp)
+{
+ cas_saturn_patch_t *patch = cas_saturn_patch;
+
+ cas_phy_powerdown(cp);
+
+ /* expanded memory access mode */
+ cas_phy_write(cp, DP83065_MII_MEM, 0x0);
+
+ /* pointer configuration for new firmware */
+ cas_phy_write(cp, DP83065_MII_REGE, 0x8ff9);
+ cas_phy_write(cp, DP83065_MII_REGD, 0xbd);
+ cas_phy_write(cp, DP83065_MII_REGE, 0x8ffa);
+ cas_phy_write(cp, DP83065_MII_REGD, 0x82);
+ cas_phy_write(cp, DP83065_MII_REGE, 0x8ffb);
+ cas_phy_write(cp, DP83065_MII_REGD, 0x0);
+ cas_phy_write(cp, DP83065_MII_REGE, 0x8ffc);
+ cas_phy_write(cp, DP83065_MII_REGD, 0x39);
+
+ /* download new firmware */
+ cas_phy_write(cp, DP83065_MII_MEM, 0x1);
+ cas_phy_write(cp, DP83065_MII_REGE, patch->addr);
+ while (patch->addr) {
+ cas_phy_write(cp, DP83065_MII_REGD, patch->val);
+ patch++;
+ }
+
+ /* enable firmware */
+ cas_phy_write(cp, DP83065_MII_REGE, 0x8ff8);
+ cas_phy_write(cp, DP83065_MII_REGD, 0x1);
+}
+
+
+/* phy initialization */
+static void cas_phy_init(struct cas *cp)
+{
+ u16 val;
+
+ /* if we're in MII/GMII mode, set up phy */
+ if (CAS_PHY_MII(cp->phy_type)) {
+ writel(PCS_DATAPATH_MODE_MII,
+ cp->regs + REG_PCS_DATAPATH_MODE);
+
+ cas_mif_poll(cp, 0);
+ cas_reset_mii_phy(cp); /* take out of isolate mode */
+
+ if (PHY_LUCENT_B0 == cp->phy_id) {
+ /* workaround link up/down issue with lucent */
+ cas_phy_write(cp, LUCENT_MII_REG, 0x8000);
+ cas_phy_write(cp, MII_BMCR, 0x00f1);
+ cas_phy_write(cp, LUCENT_MII_REG, 0x0);
+
+ } else if (PHY_BROADCOM_B0 == (cp->phy_id & 0xFFFFFFFC)) {
+ /* workarounds for broadcom phy */
+ cas_phy_write(cp, BROADCOM_MII_REG8, 0x0C20);
+ cas_phy_write(cp, BROADCOM_MII_REG7, 0x0012);
+ cas_phy_write(cp, BROADCOM_MII_REG5, 0x1804);
+ cas_phy_write(cp, BROADCOM_MII_REG7, 0x0013);
+ cas_phy_write(cp, BROADCOM_MII_REG5, 0x1204);
+ cas_phy_write(cp, BROADCOM_MII_REG7, 0x8006);
+ cas_phy_write(cp, BROADCOM_MII_REG5, 0x0132);
+ cas_phy_write(cp, BROADCOM_MII_REG7, 0x8006);
+ cas_phy_write(cp, BROADCOM_MII_REG5, 0x0232);
+ cas_phy_write(cp, BROADCOM_MII_REG7, 0x201F);
+ cas_phy_write(cp, BROADCOM_MII_REG5, 0x0A20);
+
+ } else if (PHY_BROADCOM_5411 == cp->phy_id) {
+ val = cas_phy_read(cp, BROADCOM_MII_REG4);
+ val = cas_phy_read(cp, BROADCOM_MII_REG4);
+ if (val & 0x0080) {
+ /* link workaround */
+ cas_phy_write(cp, BROADCOM_MII_REG4,
+ val & ~0x0080);
+ }
+
+ } else if (cp->cas_flags & CAS_FLAG_SATURN) {
+ writel((cp->phy_type & CAS_PHY_MII_MDIO0) ?
+ SATURN_PCFG_FSI : 0x0,
+ cp->regs + REG_SATURN_PCFG);
+
+ /* load firmware to address 10Mbps auto-negotiation
+ * issue. NOTE: this will need to be changed if the
+ * default firmware gets fixed.
+ */
+ if (PHY_NS_DP83065 == cp->phy_id) {
+ cas_saturn_firmware_load(cp);
+ }
+ cas_phy_powerup(cp);
+ }
+
+ /* advertise capabilities */
+ val = cas_phy_read(cp, MII_BMCR);
+ val &= ~BMCR_ANENABLE;
+ cas_phy_write(cp, MII_BMCR, val);
+ udelay(10);
+
+ cas_phy_write(cp, MII_ADVERTISE,
+ cas_phy_read(cp, MII_ADVERTISE) |
+ (ADVERTISE_10HALF | ADVERTISE_10FULL |
+ ADVERTISE_100HALF | ADVERTISE_100FULL |
+ CAS_ADVERTISE_PAUSE |
+ CAS_ADVERTISE_ASYM_PAUSE));
+
+ if (cp->cas_flags & CAS_FLAG_1000MB_CAP) {
+ /* make sure that we don't advertise half
+ * duplex to avoid a chip issue
+ */
+ val = cas_phy_read(cp, CAS_MII_1000_CTRL);
+ val &= ~CAS_ADVERTISE_1000HALF;
+ val |= CAS_ADVERTISE_1000FULL;
+ cas_phy_write(cp, CAS_MII_1000_CTRL, val);
+ }
+
+ } else {
+ /* reset pcs for serdes */
+ u32 val;
+ int limit;
+
+ writel(PCS_DATAPATH_MODE_SERDES,
+ cp->regs + REG_PCS_DATAPATH_MODE);
+
+ /* enable serdes pins on saturn */
+ if (cp->cas_flags & CAS_FLAG_SATURN)
+ writel(0, cp->regs + REG_SATURN_PCFG);
+
+ /* Reset PCS unit. */
+ val = readl(cp->regs + REG_PCS_MII_CTRL);
+ val |= PCS_MII_RESET;
+ writel(val, cp->regs + REG_PCS_MII_CTRL);
+
+ limit = STOP_TRIES;
+ while (limit-- > 0) {
+ udelay(10);
+ if ((readl(cp->regs + REG_PCS_MII_CTRL) &
+ PCS_MII_RESET) == 0)
+ break;
+ }
+ if (limit <= 0)
+ printk(KERN_WARNING "%s: PCS reset bit would not "
+ "clear [%08x].\n", cp->dev->name,
+ readl(cp->regs + REG_PCS_STATE_MACHINE));
+
+ /* Make sure PCS is disabled while changing advertisement
+ * configuration.
+ */
+ writel(0x0, cp->regs + REG_PCS_CFG);
+
+ /* Advertise all capabilities except half-duplex. */
+ val = readl(cp->regs + REG_PCS_MII_ADVERT);
+ val &= ~PCS_MII_ADVERT_HD;
+ val |= (PCS_MII_ADVERT_FD | PCS_MII_ADVERT_SYM_PAUSE |
+ PCS_MII_ADVERT_ASYM_PAUSE);
+ writel(val, cp->regs + REG_PCS_MII_ADVERT);
+
+ /* enable PCS */
+ writel(PCS_CFG_EN, cp->regs + REG_PCS_CFG);
+
+ /* pcs workaround: enable sync detect */
+ writel(PCS_SERDES_CTRL_SYNCD_EN,
+ cp->regs + REG_PCS_SERDES_CTRL);
+ }
+}
+
+
+static int cas_pcs_link_check(struct cas *cp)
+{
+ u32 stat, state_machine;
+ int retval = 0;
+
+ /* The link status bit latches on zero, so you must
+ * read it twice in such a case to see a transition
+ * to the link being up.
+ */
+ stat = readl(cp->regs + REG_PCS_MII_STATUS);
+ if ((stat & PCS_MII_STATUS_LINK_STATUS) == 0)
+ stat = readl(cp->regs + REG_PCS_MII_STATUS);
+
+ /* The remote-fault indication is only valid
+ * when autoneg has completed.
+ */
+ if ((stat & (PCS_MII_STATUS_AUTONEG_COMP |
+ PCS_MII_STATUS_REMOTE_FAULT)) ==
+ (PCS_MII_STATUS_AUTONEG_COMP | PCS_MII_STATUS_REMOTE_FAULT)) {
+ if (netif_msg_link(cp))
+ printk(KERN_INFO "%s: PCS RemoteFault\n",
+ cp->dev->name);
+ }
+
+ /* work around link detection issue by querying the PCS state
+ * machine directly.
+ */
+ state_machine = readl(cp->regs + REG_PCS_STATE_MACHINE);
+ if ((state_machine & PCS_SM_LINK_STATE_MASK) != SM_LINK_STATE_UP) {
+ stat &= ~PCS_MII_STATUS_LINK_STATUS;
+ } else if (state_machine & PCS_SM_WORD_SYNC_STATE_MASK) {
+ stat |= PCS_MII_STATUS_LINK_STATUS;
+ }
+
+ if (stat & PCS_MII_STATUS_LINK_STATUS) {
+ if (cp->lstate != link_up) {
+ if (cp->opened) {
+ cp->lstate = link_up;
+ cp->link_transition = LINK_TRANSITION_LINK_UP;
+
+ cas_set_link_modes(cp);
+ netif_carrier_on(cp->dev);
+ }
+ }
+ } else if (cp->lstate == link_up) {
+ cp->lstate = link_down;
+ if (link_transition_timeout != 0 &&
+ cp->link_transition != LINK_TRANSITION_REQUESTED_RESET &&
+ !cp->link_transition_jiffies_valid) {
+ /*
+ * force a reset, as a workaround for the
+ * link-failure problem. May want to move this to a
+ * point a bit earlier in the sequence. If we had
+ * generated a reset a short time ago, we'll wait for
+ * the link timer to check the status until a
+ * timer expires (link_transistion_jiffies_valid is
+ * true when the timer is running.) Instead of using
+ * a system timer, we just do a check whenever the
+ * link timer is running - this clears the flag after
+ * a suitable delay.
+ */
+ retval = 1;
+ cp->link_transition = LINK_TRANSITION_REQUESTED_RESET;
+ cp->link_transition_jiffies = jiffies;
+ cp->link_transition_jiffies_valid = 1;
+ } else {
+ cp->link_transition = LINK_TRANSITION_ON_FAILURE;
+ }
+ netif_carrier_off(cp->dev);
+ if (cp->opened && netif_msg_link(cp)) {
+ printk(KERN_INFO "%s: PCS link down.\n",
+ cp->dev->name);
+ }
+
+ /* Cassini only: if you force a mode, there can be
+ * sync problems on link down. to fix that, the following
+ * things need to be checked:
+ * 1) read serialink state register
+ * 2) read pcs status register to verify link down.
+ * 3) if link down and serial link == 0x03, then you need
+ * to global reset the chip.
+ */
+ if ((cp->cas_flags & CAS_FLAG_REG_PLUS) == 0) {
+ /* should check to see if we're in a forced mode */
+ stat = readl(cp->regs + REG_PCS_SERDES_STATE);
+ if (stat == 0x03)
+ return 1;
+ }
+ } else if (cp->lstate == link_down) {
+ if (link_transition_timeout != 0 &&
+ cp->link_transition != LINK_TRANSITION_REQUESTED_RESET &&
+ !cp->link_transition_jiffies_valid) {
+ /* force a reset, as a workaround for the
+ * link-failure problem. May want to move
+ * this to a point a bit earlier in the
+ * sequence.
+ */
+ retval = 1;
+ cp->link_transition = LINK_TRANSITION_REQUESTED_RESET;
+ cp->link_transition_jiffies = jiffies;
+ cp->link_transition_jiffies_valid = 1;
+ } else {
+ cp->link_transition = LINK_TRANSITION_STILL_FAILED;
+ }
+ }
+
+ return retval;
+}
+
+static int cas_pcs_interrupt(struct net_device *dev,
+ struct cas *cp, u32 status)
+{
+ u32 stat = readl(cp->regs + REG_PCS_INTR_STATUS);
+
+ if ((stat & PCS_INTR_STATUS_LINK_CHANGE) == 0)
+ return 0;
+ return cas_pcs_link_check(cp);
+}
+
+static int cas_txmac_interrupt(struct net_device *dev,
+ struct cas *cp, u32 status)
+{
+ u32 txmac_stat = readl(cp->regs + REG_MAC_TX_STATUS);
+
+ if (!txmac_stat)
+ return 0;
+
+ if (netif_msg_intr(cp))
+ printk(KERN_DEBUG "%s: txmac interrupt, txmac_stat: 0x%x\n",
+ cp->dev->name, txmac_stat);
+
+ /* Defer timer expiration is quite normal,
+ * don't even log the event.
+ */
+ if ((txmac_stat & MAC_TX_DEFER_TIMER) &&
+ !(txmac_stat & ~MAC_TX_DEFER_TIMER))
+ return 0;
+
+ spin_lock(&cp->stat_lock[0]);
+ if (txmac_stat & MAC_TX_UNDERRUN) {
+ printk(KERN_ERR "%s: TX MAC xmit underrun.\n",
+ dev->name);
+ cp->net_stats[0].tx_fifo_errors++;
+ }
+
+ if (txmac_stat & MAC_TX_MAX_PACKET_ERR) {
+ printk(KERN_ERR "%s: TX MAC max packet size error.\n",
+ dev->name);
+ cp->net_stats[0].tx_errors++;
+ }
+
+ /* The rest are all cases of one of the 16-bit TX
+ * counters expiring.
+ */
+ if (txmac_stat & MAC_TX_COLL_NORMAL)
+ cp->net_stats[0].collisions += 0x10000;
+
+ if (txmac_stat & MAC_TX_COLL_EXCESS) {
+ cp->net_stats[0].tx_aborted_errors += 0x10000;
+ cp->net_stats[0].collisions += 0x10000;
+ }
+
+ if (txmac_stat & MAC_TX_COLL_LATE) {
+ cp->net_stats[0].tx_aborted_errors += 0x10000;
+ cp->net_stats[0].collisions += 0x10000;
+ }
+ spin_unlock(&cp->stat_lock[0]);
+
+ /* We do not keep track of MAC_TX_COLL_FIRST and
+ * MAC_TX_PEAK_ATTEMPTS events.
+ */
+ return 0;
+}
+
+static void cas_load_firmware(struct cas *cp, cas_hp_inst_t *firmware)
+{
+ cas_hp_inst_t *inst;
+ u32 val;
+ int i;
+
+ i = 0;
+ while ((inst = firmware) && inst->note) {
+ writel(i, cp->regs + REG_HP_INSTR_RAM_ADDR);
+
+ val = CAS_BASE(HP_INSTR_RAM_HI_VAL, inst->val);
+ val |= CAS_BASE(HP_INSTR_RAM_HI_MASK, inst->mask);
+ writel(val, cp->regs + REG_HP_INSTR_RAM_DATA_HI);
+
+ val = CAS_BASE(HP_INSTR_RAM_MID_OUTARG, inst->outarg >> 10);
+ val |= CAS_BASE(HP_INSTR_RAM_MID_OUTOP, inst->outop);
+ val |= CAS_BASE(HP_INSTR_RAM_MID_FNEXT, inst->fnext);
+ val |= CAS_BASE(HP_INSTR_RAM_MID_FOFF, inst->foff);
+ val |= CAS_BASE(HP_INSTR_RAM_MID_SNEXT, inst->snext);
+ val |= CAS_BASE(HP_INSTR_RAM_MID_SOFF, inst->soff);
+ val |= CAS_BASE(HP_INSTR_RAM_MID_OP, inst->op);
+ writel(val, cp->regs + REG_HP_INSTR_RAM_DATA_MID);
+
+ val = CAS_BASE(HP_INSTR_RAM_LOW_OUTMASK, inst->outmask);
+ val |= CAS_BASE(HP_INSTR_RAM_LOW_OUTSHIFT, inst->outshift);
+ val |= CAS_BASE(HP_INSTR_RAM_LOW_OUTEN, inst->outenab);
+ val |= CAS_BASE(HP_INSTR_RAM_LOW_OUTARG, inst->outarg);
+ writel(val, cp->regs + REG_HP_INSTR_RAM_DATA_LOW);
+ ++firmware;
+ ++i;
+ }
+}
+
+static void cas_init_rx_dma(struct cas *cp)
+{
+ u64 desc_dma = cp->block_dvma;
+ u32 val;
+ int i, size;
+
+ /* rx free descriptors */
+ val = CAS_BASE(RX_CFG_SWIVEL, RX_SWIVEL_OFF_VAL);
+ val |= CAS_BASE(RX_CFG_DESC_RING, RX_DESC_RINGN_INDEX(0));
+ val |= CAS_BASE(RX_CFG_COMP_RING, RX_COMP_RINGN_INDEX(0));
+ if ((N_RX_DESC_RINGS > 1) &&
+ (cp->cas_flags & CAS_FLAG_REG_PLUS)) /* do desc 2 */
+ val |= CAS_BASE(RX_CFG_DESC_RING1, RX_DESC_RINGN_INDEX(1));
+ writel(val, cp->regs + REG_RX_CFG);
+
+ val = (unsigned long) cp->init_rxds[0] -
+ (unsigned long) cp->init_block;
+ writel((desc_dma + val) >> 32, cp->regs + REG_RX_DB_HI);
+ writel((desc_dma + val) & 0xffffffff, cp->regs + REG_RX_DB_LOW);
+ writel(RX_DESC_RINGN_SIZE(0) - 4, cp->regs + REG_RX_KICK);
+
+ if (cp->cas_flags & CAS_FLAG_REG_PLUS) {
+ /* rx desc 2 is for IPSEC packets. however,
+ * we don't it that for that purpose.
+ */
+ val = (unsigned long) cp->init_rxds[1] -
+ (unsigned long) cp->init_block;
+ writel((desc_dma + val) >> 32, cp->regs + REG_PLUS_RX_DB1_HI);
+ writel((desc_dma + val) & 0xffffffff, cp->regs +
+ REG_PLUS_RX_DB1_LOW);
+ writel(RX_DESC_RINGN_SIZE(1) - 4, cp->regs +
+ REG_PLUS_RX_KICK1);
+ }
+
+ /* rx completion registers */
+ val = (unsigned long) cp->init_rxcs[0] -
+ (unsigned long) cp->init_block;
+ writel((desc_dma + val) >> 32, cp->regs + REG_RX_CB_HI);
+ writel((desc_dma + val) & 0xffffffff, cp->regs + REG_RX_CB_LOW);
+
+ if (cp->cas_flags & CAS_FLAG_REG_PLUS) {
+ /* rx comp 2-4 */
+ for (i = 1; i < MAX_RX_COMP_RINGS; i++) {
+ val = (unsigned long) cp->init_rxcs[i] -
+ (unsigned long) cp->init_block;
+ writel((desc_dma + val) >> 32, cp->regs +
+ REG_PLUS_RX_CBN_HI(i));
+ writel((desc_dma + val) & 0xffffffff, cp->regs +
+ REG_PLUS_RX_CBN_LOW(i));
+ }
+ }
+
+ /* read selective clear regs to prevent spurious interrupts
+ * on reset because complete == kick.
+ * selective clear set up to prevent interrupts on resets
+ */
+ readl(cp->regs + REG_INTR_STATUS_ALIAS);
+ writel(INTR_RX_DONE | INTR_RX_BUF_UNAVAIL, cp->regs + REG_ALIAS_CLEAR);
+ if (cp->cas_flags & CAS_FLAG_REG_PLUS) {
+ for (i = 1; i < N_RX_COMP_RINGS; i++)
+ readl(cp->regs + REG_PLUS_INTRN_STATUS_ALIAS(i));
+
+ /* 2 is different from 3 and 4 */
+ if (N_RX_COMP_RINGS > 1)
+ writel(INTR_RX_DONE_ALT | INTR_RX_BUF_UNAVAIL_1,
+ cp->regs + REG_PLUS_ALIASN_CLEAR(1));
+
+ for (i = 2; i < N_RX_COMP_RINGS; i++)
+ writel(INTR_RX_DONE_ALT,
+ cp->regs + REG_PLUS_ALIASN_CLEAR(i));
+ }
+
+ /* set up pause thresholds */
+ val = CAS_BASE(RX_PAUSE_THRESH_OFF,
+ cp->rx_pause_off / RX_PAUSE_THRESH_QUANTUM);
+ val |= CAS_BASE(RX_PAUSE_THRESH_ON,
+ cp->rx_pause_on / RX_PAUSE_THRESH_QUANTUM);
+ writel(val, cp->regs + REG_RX_PAUSE_THRESH);
+
+ /* zero out dma reassembly buffers */
+ for (i = 0; i < 64; i++) {
+ writel(i, cp->regs + REG_RX_TABLE_ADDR);
+ writel(0x0, cp->regs + REG_RX_TABLE_DATA_LOW);
+ writel(0x0, cp->regs + REG_RX_TABLE_DATA_MID);
+ writel(0x0, cp->regs + REG_RX_TABLE_DATA_HI);
+ }
+
+ /* make sure address register is 0 for normal operation */
+ writel(0x0, cp->regs + REG_RX_CTRL_FIFO_ADDR);
+ writel(0x0, cp->regs + REG_RX_IPP_FIFO_ADDR);
+
+ /* interrupt mitigation */
+#ifdef USE_RX_BLANK
+ val = CAS_BASE(RX_BLANK_INTR_TIME, RX_BLANK_INTR_TIME_VAL);
+ val |= CAS_BASE(RX_BLANK_INTR_PKT, RX_BLANK_INTR_PKT_VAL);
+ writel(val, cp->regs + REG_RX_BLANK);
+#else
+ writel(0x0, cp->regs + REG_RX_BLANK);
+#endif
+
+ /* interrupt generation as a function of low water marks for
+ * free desc and completion entries. these are used to trigger
+ * housekeeping for rx descs. we don't use the free interrupt
+ * as it's not very useful
+ */
+ /* val = CAS_BASE(RX_AE_THRESH_FREE, RX_AE_FREEN_VAL(0)); */
+ val = CAS_BASE(RX_AE_THRESH_COMP, RX_AE_COMP_VAL);
+ writel(val, cp->regs + REG_RX_AE_THRESH);
+ if (cp->cas_flags & CAS_FLAG_REG_PLUS) {
+ val = CAS_BASE(RX_AE1_THRESH_FREE, RX_AE_FREEN_VAL(1));
+ writel(val, cp->regs + REG_PLUS_RX_AE1_THRESH);
+ }
+
+ /* Random early detect registers. useful for congestion avoidance.
+ * this should be tunable.
+ */
+ writel(0x0, cp->regs + REG_RX_RED);
+
+ /* receive page sizes. default == 2K (0x800) */
+ val = 0;
+ if (cp->page_size == 0x1000)
+ val = 0x1;
+ else if (cp->page_size == 0x2000)
+ val = 0x2;
+ else if (cp->page_size == 0x4000)
+ val = 0x3;
+
+ /* round mtu + offset. constrain to page size. */
+ size = cp->dev->mtu + 64;
+ if (size > cp->page_size)
+ size = cp->page_size;
+
+ if (size <= 0x400)
+ i = 0x0;
+ else if (size <= 0x800)
+ i = 0x1;
+ else if (size <= 0x1000)
+ i = 0x2;
+ else
+ i = 0x3;
+
+ cp->mtu_stride = 1 << (i + 10);
+ val = CAS_BASE(RX_PAGE_SIZE, val);
+ val |= CAS_BASE(RX_PAGE_SIZE_MTU_STRIDE, i);
+ val |= CAS_BASE(RX_PAGE_SIZE_MTU_COUNT, cp->page_size >> (i + 10));
+ val |= CAS_BASE(RX_PAGE_SIZE_MTU_OFF, 0x1);
+ writel(val, cp->regs + REG_RX_PAGE_SIZE);
+
+ /* enable the header parser if desired */
+ if (CAS_HP_FIRMWARE == cas_prog_null)
+ return;
+
+ val = CAS_BASE(HP_CFG_NUM_CPU, CAS_NCPUS > 63 ? 0 : CAS_NCPUS);
+ val |= HP_CFG_PARSE_EN | HP_CFG_SYN_INC_MASK;
+ val |= CAS_BASE(HP_CFG_TCP_THRESH, HP_TCP_THRESH_VAL);
+ writel(val, cp->regs + REG_HP_CFG);
+}
+
+static inline void cas_rxc_init(struct cas_rx_comp *rxc)
+{
+ memset(rxc, 0, sizeof(*rxc));
+ rxc->word4 = cpu_to_le64(RX_COMP4_ZERO);
+}
+
+/* NOTE: we use the ENC RX DESC ring for spares. the rx_page[0,1]
+ * flipping is protected by the fact that the chip will not
+ * hand back the same page index while it's being processed.
+ */
+static inline cas_page_t *cas_page_spare(struct cas *cp, const int index)
+{
+ cas_page_t *page = cp->rx_pages[1][index];
+ cas_page_t *new;
+
+ if (page_count(page->buffer) == 1)
+ return page;
+
+ new = cas_page_dequeue(cp);
+ if (new) {
+ spin_lock(&cp->rx_inuse_lock);
+ list_add(&page->list, &cp->rx_inuse_list);
+ spin_unlock(&cp->rx_inuse_lock);
+ }
+ return new;
+}
+
+/* this needs to be changed if we actually use the ENC RX DESC ring */
+static cas_page_t *cas_page_swap(struct cas *cp, const int ring,
+ const int index)
+{
+ cas_page_t **page0 = cp->rx_pages[0];
+ cas_page_t **page1 = cp->rx_pages[1];
+
+ /* swap if buffer is in use */
+ if (page_count(page0[index]->buffer) > 1) {
+ cas_page_t *new = cas_page_spare(cp, index);
+ if (new) {
+ page1[index] = page0[index];
+ page0[index] = new;
+ }
+ }
+ RX_USED_SET(page0[index], 0);
+ return page0[index];
+}
+
+static void cas_clean_rxds(struct cas *cp)
+{
+ /* only clean ring 0 as ring 1 is used for spare buffers */
+ struct cas_rx_desc *rxd = cp->init_rxds[0];
+ int i, size;
+
+ /* release all rx flows */
+ for (i = 0; i < N_RX_FLOWS; i++) {
+ struct sk_buff *skb;
+ while ((skb = __skb_dequeue(&cp->rx_flows[i]))) {
+ cas_skb_release(skb);
+ }
+ }
+
+ /* initialize descriptors */
+ size = RX_DESC_RINGN_SIZE(0);
+ for (i = 0; i < size; i++) {
+ cas_page_t *page = cas_page_swap(cp, 0, i);
+ rxd[i].buffer = cpu_to_le64(page->dma_addr);
+ rxd[i].index = cpu_to_le64(CAS_BASE(RX_INDEX_NUM, i) |
+ CAS_BASE(RX_INDEX_RING, 0));
+ }
+
+ cp->rx_old[0] = RX_DESC_RINGN_SIZE(0) - 4;
+ cp->rx_last[0] = 0;
+ cp->cas_flags &= ~CAS_FLAG_RXD_POST(0);
+}
+
+static void cas_clean_rxcs(struct cas *cp)
+{
+ int i, j;
+
+ /* take ownership of rx comp descriptors */
+ memset(cp->rx_cur, 0, sizeof(*cp->rx_cur)*N_RX_COMP_RINGS);
+ memset(cp->rx_new, 0, sizeof(*cp->rx_new)*N_RX_COMP_RINGS);
+ for (i = 0; i < N_RX_COMP_RINGS; i++) {
+ struct cas_rx_comp *rxc = cp->init_rxcs[i];
+ for (j = 0; j < RX_COMP_RINGN_SIZE(i); j++) {
+ cas_rxc_init(rxc + j);
+ }
+ }
+}
+
+#if 0
+/* When we get a RX fifo overflow, the RX unit is probably hung
+ * so we do the following.
+ *
+ * If any part of the reset goes wrong, we return 1 and that causes the
+ * whole chip to be reset.
+ */
+static int cas_rxmac_reset(struct cas *cp)
+{
+ struct net_device *dev = cp->dev;
+ int limit;
+ u32 val;
+
+ /* First, reset MAC RX. */
+ writel(cp->mac_rx_cfg & ~MAC_RX_CFG_EN, cp->regs + REG_MAC_RX_CFG);
+ for (limit = 0; limit < STOP_TRIES; limit++) {
+ if (!(readl(cp->regs + REG_MAC_RX_CFG) & MAC_RX_CFG_EN))
+ break;
+ udelay(10);
+ }
+ if (limit == STOP_TRIES) {
+ printk(KERN_ERR "%s: RX MAC will not disable, resetting whole "
+ "chip.\n", dev->name);
+ return 1;
+ }
+
+ /* Second, disable RX DMA. */
+ writel(0, cp->regs + REG_RX_CFG);
+ for (limit = 0; limit < STOP_TRIES; limit++) {
+ if (!(readl(cp->regs + REG_RX_CFG) & RX_CFG_DMA_EN))
+ break;
+ udelay(10);
+ }
+ if (limit == STOP_TRIES) {
+ printk(KERN_ERR "%s: RX DMA will not disable, resetting whole "
+ "chip.\n", dev->name);
+ return 1;
+ }
+
+ mdelay(5);
+
+ /* Execute RX reset command. */
+ writel(SW_RESET_RX, cp->regs + REG_SW_RESET);
+ for (limit = 0; limit < STOP_TRIES; limit++) {
+ if (!(readl(cp->regs + REG_SW_RESET) & SW_RESET_RX))
+ break;
+ udelay(10);
+ }
+ if (limit == STOP_TRIES) {
+ printk(KERN_ERR "%s: RX reset command will not execute, "
+ "resetting whole chip.\n", dev->name);
+ return 1;
+ }
+
+ /* reset driver rx state */
+ cas_clean_rxds(cp);
+ cas_clean_rxcs(cp);
+
+ /* Now, reprogram the rest of RX unit. */
+ cas_init_rx_dma(cp);
+
+ /* re-enable */
+ val = readl(cp->regs + REG_RX_CFG);
+ writel(val | RX_CFG_DMA_EN, cp->regs + REG_RX_CFG);
+ writel(MAC_RX_FRAME_RECV, cp->regs + REG_MAC_RX_MASK);
+ val = readl(cp->regs + REG_MAC_RX_CFG);
+ writel(val | MAC_RX_CFG_EN, cp->regs + REG_MAC_RX_CFG);
+ return 0;
+}
+#endif
+
+static int cas_rxmac_interrupt(struct net_device *dev, struct cas *cp,
+ u32 status)
+{
+ u32 stat = readl(cp->regs + REG_MAC_RX_STATUS);
+
+ if (!stat)
+ return 0;
+
+ if (netif_msg_intr(cp))
+ printk(KERN_DEBUG "%s: rxmac interrupt, stat: 0x%x\n",
+ cp->dev->name, stat);
+
+ /* these are all rollovers */
+ spin_lock(&cp->stat_lock[0]);
+ if (stat & MAC_RX_ALIGN_ERR)
+ cp->net_stats[0].rx_frame_errors += 0x10000;
+
+ if (stat & MAC_RX_CRC_ERR)
+ cp->net_stats[0].rx_crc_errors += 0x10000;
+
+ if (stat & MAC_RX_LEN_ERR)
+ cp->net_stats[0].rx_length_errors += 0x10000;
+
+ if (stat & MAC_RX_OVERFLOW) {
+ cp->net_stats[0].rx_over_errors++;
+ cp->net_stats[0].rx_fifo_errors++;
+ }
+
+ /* We do not track MAC_RX_FRAME_COUNT and MAC_RX_VIOL_ERR
+ * events.
+ */
+ spin_unlock(&cp->stat_lock[0]);
+ return 0;
+}
+
+static int cas_mac_interrupt(struct net_device *dev, struct cas *cp,
+ u32 status)
+{
+ u32 stat = readl(cp->regs + REG_MAC_CTRL_STATUS);
+
+ if (!stat)
+ return 0;
+
+ if (netif_msg_intr(cp))
+ printk(KERN_DEBUG "%s: mac interrupt, stat: 0x%x\n",
+ cp->dev->name, stat);
+
+ /* This interrupt is just for pause frame and pause
+ * tracking. It is useful for diagnostics and debug
+ * but probably by default we will mask these events.
+ */
+ if (stat & MAC_CTRL_PAUSE_STATE)
+ cp->pause_entered++;
+
+ if (stat & MAC_CTRL_PAUSE_RECEIVED)
+ cp->pause_last_time_recvd = (stat >> 16);
+
+ return 0;
+}
+
+
+/* Must be invoked under cp->lock. */
+static inline int cas_mdio_link_not_up(struct cas *cp)
+{
+ u16 val;
+
+ switch (cp->lstate) {
+ case link_force_ret:
+ if (netif_msg_link(cp))
+ printk(KERN_INFO "%s: Autoneg failed again, keeping"
+ " forced mode\n", cp->dev->name);
+ cas_phy_write(cp, MII_BMCR, cp->link_fcntl);
+ cp->timer_ticks = 5;
+ cp->lstate = link_force_ok;
+ cp->link_transition = LINK_TRANSITION_LINK_CONFIG;
+ break;
+
+ case link_aneg:
+ val = cas_phy_read(cp, MII_BMCR);
+
+ /* Try forced modes. we try things in the following order:
+ * 1000 full -> 100 full/half -> 10 half
+ */
+ val &= ~(BMCR_ANRESTART | BMCR_ANENABLE);
+ val |= BMCR_FULLDPLX;
+ val |= (cp->cas_flags & CAS_FLAG_1000MB_CAP) ?
+ CAS_BMCR_SPEED1000 : BMCR_SPEED100;
+ cas_phy_write(cp, MII_BMCR, val);
+ cp->timer_ticks = 5;
+ cp->lstate = link_force_try;
+ cp->link_transition = LINK_TRANSITION_LINK_CONFIG;
+ break;
+
+ case link_force_try:
+ /* Downgrade from 1000 to 100 to 10 Mbps if necessary. */
+ val = cas_phy_read(cp, MII_BMCR);
+ cp->timer_ticks = 5;
+ if (val & CAS_BMCR_SPEED1000) { /* gigabit */
+ val &= ~CAS_BMCR_SPEED1000;
+ val |= (BMCR_SPEED100 | BMCR_FULLDPLX);
+ cas_phy_write(cp, MII_BMCR, val);
+ break;
+ }
+
+ if (val & BMCR_SPEED100) {
+ if (val & BMCR_FULLDPLX) /* fd failed */
+ val &= ~BMCR_FULLDPLX;
+ else { /* 100Mbps failed */
+ val &= ~BMCR_SPEED100;
+ }
+ cas_phy_write(cp, MII_BMCR, val);
+ break;
+ }
+ default:
+ break;
+ }
+ return 0;
+}
+
+
+/* must be invoked with cp->lock held */
+static int cas_mii_link_check(struct cas *cp, const u16 bmsr)
+{
+ int restart;
+
+ if (bmsr & BMSR_LSTATUS) {
+ /* Ok, here we got a link. If we had it due to a forced
+ * fallback, and we were configured for autoneg, we
+ * retry a short autoneg pass. If you know your hub is
+ * broken, use ethtool ;)
+ */
+ if ((cp->lstate == link_force_try) &&
+ (cp->link_cntl & BMCR_ANENABLE)) {
+ cp->lstate = link_force_ret;
+ cp->link_transition = LINK_TRANSITION_LINK_CONFIG;
+ cas_mif_poll(cp, 0);
+ cp->link_fcntl = cas_phy_read(cp, MII_BMCR);
+ cp->timer_ticks = 5;
+ if (cp->opened && netif_msg_link(cp))
+ printk(KERN_INFO "%s: Got link after fallback, retrying"
+ " autoneg once...\n", cp->dev->name);
+ cas_phy_write(cp, MII_BMCR,
+ cp->link_fcntl | BMCR_ANENABLE |
+ BMCR_ANRESTART);
+ cas_mif_poll(cp, 1);
+
+ } else if (cp->lstate != link_up) {
+ cp->lstate = link_up;
+ cp->link_transition = LINK_TRANSITION_LINK_UP;
+
+ if (cp->opened) {
+ cas_set_link_modes(cp);
+ netif_carrier_on(cp->dev);
+ }
+ }
+ return 0;
+ }
+
+ /* link not up. if the link was previously up, we restart the
+ * whole process
+ */
+ restart = 0;
+ if (cp->lstate == link_up) {
+ cp->lstate = link_down;
+ cp->link_transition = LINK_TRANSITION_LINK_DOWN;
+
+ netif_carrier_off(cp->dev);
+ if (cp->opened && netif_msg_link(cp))
+ printk(KERN_INFO "%s: Link down\n",
+ cp->dev->name);
+ restart = 1;
+
+ } else if (++cp->timer_ticks > 10)
+ cas_mdio_link_not_up(cp);
+
+ return restart;
+}
+
+static int cas_mif_interrupt(struct net_device *dev, struct cas *cp,
+ u32 status)
+{
+ u32 stat = readl(cp->regs + REG_MIF_STATUS);
+ u16 bmsr;
+
+ /* check for a link change */
+ if (CAS_VAL(MIF_STATUS_POLL_STATUS, stat) == 0)
+ return 0;
+
+ bmsr = CAS_VAL(MIF_STATUS_POLL_DATA, stat);
+ return cas_mii_link_check(cp, bmsr);
+}
+
+static int cas_pci_interrupt(struct net_device *dev, struct cas *cp,
+ u32 status)
+{
+ u32 stat = readl(cp->regs + REG_PCI_ERR_STATUS);
+
+ if (!stat)
+ return 0;
+
+ printk(KERN_ERR "%s: PCI error [%04x:%04x] ", dev->name, stat,
+ readl(cp->regs + REG_BIM_DIAG));
+
+ /* cassini+ has this reserved */
+ if ((stat & PCI_ERR_BADACK) &&
+ ((cp->cas_flags & CAS_FLAG_REG_PLUS) == 0))
+ printk("<No ACK64# during ABS64 cycle> ");
+
+ if (stat & PCI_ERR_DTRTO)
+ printk("<Delayed transaction timeout> ");
+ if (stat & PCI_ERR_OTHER)
+ printk("<other> ");
+ if (stat & PCI_ERR_BIM_DMA_WRITE)
+ printk("<BIM DMA 0 write req> ");
+ if (stat & PCI_ERR_BIM_DMA_READ)
+ printk("<BIM DMA 0 read req> ");
+ printk("\n");
+
+ if (stat & PCI_ERR_OTHER) {
+ u16 cfg;
+
+ /* Interrogate PCI config space for the
+ * true cause.
+ */
+ pci_read_config_word(cp->pdev, PCI_STATUS, &cfg);
+ printk(KERN_ERR "%s: Read PCI cfg space status [%04x]\n",
+ dev->name, cfg);
+ if (cfg & PCI_STATUS_PARITY)
+ printk(KERN_ERR "%s: PCI parity error detected.\n",
+ dev->name);
+ if (cfg & PCI_STATUS_SIG_TARGET_ABORT)
+ printk(KERN_ERR "%s: PCI target abort.\n",
+ dev->name);
+ if (cfg & PCI_STATUS_REC_TARGET_ABORT)
+ printk(KERN_ERR "%s: PCI master acks target abort.\n",
+ dev->name);
+ if (cfg & PCI_STATUS_REC_MASTER_ABORT)
+ printk(KERN_ERR "%s: PCI master abort.\n", dev->name);
+ if (cfg & PCI_STATUS_SIG_SYSTEM_ERROR)
+ printk(KERN_ERR "%s: PCI system error SERR#.\n",
+ dev->name);
+ if (cfg & PCI_STATUS_DETECTED_PARITY)
+ printk(KERN_ERR "%s: PCI parity error.\n",
+ dev->name);
+
+ /* Write the error bits back to clear them. */
+ cfg &= (PCI_STATUS_PARITY |
+ PCI_STATUS_SIG_TARGET_ABORT |
+ PCI_STATUS_REC_TARGET_ABORT |
+ PCI_STATUS_REC_MASTER_ABORT |
+ PCI_STATUS_SIG_SYSTEM_ERROR |
+ PCI_STATUS_DETECTED_PARITY);
+ pci_write_config_word(cp->pdev, PCI_STATUS, cfg);
+ }
+
+ /* For all PCI errors, we should reset the chip. */
+ return 1;
+}
+
+/* All non-normal interrupt conditions get serviced here.
+ * Returns non-zero if we should just exit the interrupt
+ * handler right now (ie. if we reset the card which invalidates
+ * all of the other original irq status bits).
+ */
+static int cas_abnormal_irq(struct net_device *dev, struct cas *cp,
+ u32 status)
+{
+ if (status & INTR_RX_TAG_ERROR) {
+ /* corrupt RX tag framing */
+ if (netif_msg_rx_err(cp))
+ printk(KERN_DEBUG "%s: corrupt rx tag framing\n",
+ cp->dev->name);
+ spin_lock(&cp->stat_lock[0]);
+ cp->net_stats[0].rx_errors++;
+ spin_unlock(&cp->stat_lock[0]);
+ goto do_reset;
+ }
+
+ if (status & INTR_RX_LEN_MISMATCH) {
+ /* length mismatch. */
+ if (netif_msg_rx_err(cp))
+ printk(KERN_DEBUG "%s: length mismatch for rx frame\n",
+ cp->dev->name);
+ spin_lock(&cp->stat_lock[0]);
+ cp->net_stats[0].rx_errors++;
+ spin_unlock(&cp->stat_lock[0]);
+ goto do_reset;
+ }
+
+ if (status & INTR_PCS_STATUS) {
+ if (cas_pcs_interrupt(dev, cp, status))
+ goto do_reset;
+ }
+
+ if (status & INTR_TX_MAC_STATUS) {
+ if (cas_txmac_interrupt(dev, cp, status))
+ goto do_reset;
+ }
+
+ if (status & INTR_RX_MAC_STATUS) {
+ if (cas_rxmac_interrupt(dev, cp, status))
+ goto do_reset;
+ }
+
+ if (status & INTR_MAC_CTRL_STATUS) {
+ if (cas_mac_interrupt(dev, cp, status))
+ goto do_reset;
+ }
+
+ if (status & INTR_MIF_STATUS) {
+ if (cas_mif_interrupt(dev, cp, status))
+ goto do_reset;
+ }
+
+ if (status & INTR_PCI_ERROR_STATUS) {
+ if (cas_pci_interrupt(dev, cp, status))
+ goto do_reset;
+ }
+ return 0;
+
+do_reset:
+#if 1
+ atomic_inc(&cp->reset_task_pending);
+ atomic_inc(&cp->reset_task_pending_all);
+ printk(KERN_ERR "%s:reset called in cas_abnormal_irq [0x%x]\n",
+ dev->name, status);
+ schedule_work(&cp->reset_task);
+#else
+ atomic_set(&cp->reset_task_pending, CAS_RESET_ALL);
+ printk(KERN_ERR "reset called in cas_abnormal_irq\n");
+ schedule_work(&cp->reset_task);
+#endif
+ return 1;
+}
+
+/* NOTE: CAS_TABORT returns 1 or 2 so that it can be used when
+ * determining whether to do a netif_stop/wakeup
+ */
+#define CAS_TABORT(x) (((x)->cas_flags & CAS_FLAG_TARGET_ABORT) ? 2 : 1)
+#define CAS_ROUND_PAGE(x) (((x) + PAGE_SIZE - 1) & PAGE_MASK)
+static inline int cas_calc_tabort(struct cas *cp, const unsigned long addr,
+ const int len)
+{
+ unsigned long off = addr + len;
+
+ if (CAS_TABORT(cp) == 1)
+ return 0;
+ if ((CAS_ROUND_PAGE(off) - off) > TX_TARGET_ABORT_LEN)
+ return 0;
+ return TX_TARGET_ABORT_LEN;
+}
+
+static inline void cas_tx_ringN(struct cas *cp, int ring, int limit)
+{
+ struct cas_tx_desc *txds;
+ struct sk_buff **skbs;
+ struct net_device *dev = cp->dev;
+ int entry, count;
+
+ spin_lock(&cp->tx_lock[ring]);
+ txds = cp->init_txds[ring];
+ skbs = cp->tx_skbs[ring];
+ entry = cp->tx_old[ring];
+
+ count = TX_BUFF_COUNT(ring, entry, limit);
+ while (entry != limit) {
+ struct sk_buff *skb = skbs[entry];
+ dma_addr_t daddr;
+ u32 dlen;
+ int frag;
+
+ if (!skb) {
+ /* this should never occur */
+ entry = TX_DESC_NEXT(ring, entry);
+ continue;
+ }
+
+ /* however, we might get only a partial skb release. */
+ count -= skb_shinfo(skb)->nr_frags +
+ + cp->tx_tiny_use[ring][entry].nbufs + 1;
+ if (count < 0)
+ break;
+
+ if (netif_msg_tx_done(cp))
+ printk(KERN_DEBUG "%s: tx[%d] done, slot %d\n",
+ cp->dev->name, ring, entry);
+
+ skbs[entry] = NULL;
+ cp->tx_tiny_use[ring][entry].nbufs = 0;
+
+ for (frag = 0; frag <= skb_shinfo(skb)->nr_frags; frag++) {
+ struct cas_tx_desc *txd = txds + entry;
+
+ daddr = le64_to_cpu(txd->buffer);
+ dlen = CAS_VAL(TX_DESC_BUFLEN,
+ le64_to_cpu(txd->control));
+ pci_unmap_page(cp->pdev, daddr, dlen,
+ PCI_DMA_TODEVICE);
+ entry = TX_DESC_NEXT(ring, entry);
+
+ /* tiny buffer may follow */
+ if (cp->tx_tiny_use[ring][entry].used) {
+ cp->tx_tiny_use[ring][entry].used = 0;
+ entry = TX_DESC_NEXT(ring, entry);
+ }
+ }
+
+ spin_lock(&cp->stat_lock[ring]);
+ cp->net_stats[ring].tx_packets++;
+ cp->net_stats[ring].tx_bytes += skb->len;
+ spin_unlock(&cp->stat_lock[ring]);
+ dev_kfree_skb_irq(skb);
+ }
+ cp->tx_old[ring] = entry;
+
+ /* this is wrong for multiple tx rings. the net device needs
+ * multiple queues for this to do the right thing. we wait
+ * for 2*packets to be available when using tiny buffers
+ */
+ if (netif_queue_stopped(dev) &&
+ (TX_BUFFS_AVAIL(cp, ring) > CAS_TABORT(cp)*(MAX_SKB_FRAGS + 1)))
+ netif_wake_queue(dev);
+ spin_unlock(&cp->tx_lock[ring]);
+}
+
+static void cas_tx(struct net_device *dev, struct cas *cp,
+ u32 status)
+{
+ int limit, ring;
+#ifdef USE_TX_COMPWB
+ u64 compwb = le64_to_cpu(cp->init_block->tx_compwb);
+#endif
+ if (netif_msg_intr(cp))
+ printk(KERN_DEBUG "%s: tx interrupt, status: 0x%x, %lx\n",
+ cp->dev->name, status, compwb);
+ /* process all the rings */
+ for (ring = 0; ring < N_TX_RINGS; ring++) {
+#ifdef USE_TX_COMPWB
+ /* use the completion writeback registers */
+ limit = (CAS_VAL(TX_COMPWB_MSB, compwb) << 8) |
+ CAS_VAL(TX_COMPWB_LSB, compwb);
+ compwb = TX_COMPWB_NEXT(compwb);
+#else
+ limit = readl(cp->regs + REG_TX_COMPN(ring));
+#endif
+ if (cp->tx_old[ring] != limit)
+ cas_tx_ringN(cp, ring, limit);
+ }
+}
+
+
+static int cas_rx_process_pkt(struct cas *cp, struct cas_rx_comp *rxc,
+ int entry, const u64 *words,
+ struct sk_buff **skbref)
+{
+ int dlen, hlen, len, i, alloclen;
+ int off, swivel = RX_SWIVEL_OFF_VAL;
+ struct cas_page *page;
+ struct sk_buff *skb;
+ void *addr, *crcaddr;
+ char *p;
+
+ hlen = CAS_VAL(RX_COMP2_HDR_SIZE, words[1]);
+ dlen = CAS_VAL(RX_COMP1_DATA_SIZE, words[0]);
+ len = hlen + dlen;
+
+ if (RX_COPY_ALWAYS || (words[2] & RX_COMP3_SMALL_PKT))
+ alloclen = len;
+ else
+ alloclen = max(hlen, RX_COPY_MIN);
+
+ skb = dev_alloc_skb(alloclen + swivel + cp->crc_size);
+ if (skb == NULL)
+ return -1;
+
+ *skbref = skb;
+ skb->dev = cp->dev;
+ skb_reserve(skb, swivel);
+
+ p = skb->data;
+ addr = crcaddr = NULL;
+ if (hlen) { /* always copy header pages */
+ i = CAS_VAL(RX_COMP2_HDR_INDEX, words[1]);
+ page = cp->rx_pages[CAS_VAL(RX_INDEX_RING, i)][CAS_VAL(RX_INDEX_NUM, i)];
+ off = CAS_VAL(RX_COMP2_HDR_OFF, words[1]) * 0x100 +
+ swivel;
+
+ i = hlen;
+ if (!dlen) /* attach FCS */
+ i += cp->crc_size;
+ pci_dma_sync_single_for_cpu(cp->pdev, page->dma_addr + off, i,
+ PCI_DMA_FROMDEVICE);
+ addr = cas_page_map(page->buffer);
+ memcpy(p, addr + off, i);
+ pci_dma_sync_single_for_device(cp->pdev, page->dma_addr + off, i,
+ PCI_DMA_FROMDEVICE);
+ cas_page_unmap(addr);
+ RX_USED_ADD(page, 0x100);
+ p += hlen;
+ swivel = 0;
+ }
+
+
+ if (alloclen < (hlen + dlen)) {
+ skb_frag_t *frag = skb_shinfo(skb)->frags;
+
+ /* normal or jumbo packets. we use frags */
+ i = CAS_VAL(RX_COMP1_DATA_INDEX, words[0]);
+ page = cp->rx_pages[CAS_VAL(RX_INDEX_RING, i)][CAS_VAL(RX_INDEX_NUM, i)];
+ off = CAS_VAL(RX_COMP1_DATA_OFF, words[0]) + swivel;
+
+ hlen = min(cp->page_size - off, dlen);
+ if (hlen < 0) {
+ if (netif_msg_rx_err(cp)) {
+ printk(KERN_DEBUG "%s: rx page overflow: "
+ "%d\n", cp->dev->name, hlen);
+ }
+ dev_kfree_skb_irq(skb);
+ return -1;
+ }
+ i = hlen;
+ if (i == dlen) /* attach FCS */
+ i += cp->crc_size;
+ pci_dma_sync_single_for_cpu(cp->pdev, page->dma_addr + off, i,
+ PCI_DMA_FROMDEVICE);
+
+ /* make sure we always copy a header */
+ swivel = 0;
+ if (p == (char *) skb->data) { /* not split */
+ addr = cas_page_map(page->buffer);
+ memcpy(p, addr + off, RX_COPY_MIN);
+ pci_dma_sync_single_for_device(cp->pdev, page->dma_addr + off, i,
+ PCI_DMA_FROMDEVICE);
+ cas_page_unmap(addr);
+ off += RX_COPY_MIN;
+ swivel = RX_COPY_MIN;
+ RX_USED_ADD(page, cp->mtu_stride);
+ } else {
+ RX_USED_ADD(page, hlen);
+ }
+ skb_put(skb, alloclen);
+
+ skb_shinfo(skb)->nr_frags++;
+ skb->data_len += hlen - swivel;
+ skb->len += hlen - swivel;
+
+ get_page(page->buffer);
+ frag->page = page->buffer;
+ frag->page_offset = off;
+ frag->size = hlen - swivel;
+
+ /* any more data? */
+ if ((words[0] & RX_COMP1_SPLIT_PKT) && ((dlen -= hlen) > 0)) {
+ hlen = dlen;
+ off = 0;
+
+ i = CAS_VAL(RX_COMP2_NEXT_INDEX, words[1]);
+ page = cp->rx_pages[CAS_VAL(RX_INDEX_RING, i)][CAS_VAL(RX_INDEX_NUM, i)];
+ pci_dma_sync_single_for_cpu(cp->pdev, page->dma_addr,
+ hlen + cp->crc_size,
+ PCI_DMA_FROMDEVICE);
+ pci_dma_sync_single_for_device(cp->pdev, page->dma_addr,
+ hlen + cp->crc_size,
+ PCI_DMA_FROMDEVICE);
+
+ skb_shinfo(skb)->nr_frags++;
+ skb->data_len += hlen;
+ skb->len += hlen;
+ frag++;
+
+ get_page(page->buffer);
+ frag->page = page->buffer;
+ frag->page_offset = 0;
+ frag->size = hlen;
+ RX_USED_ADD(page, hlen + cp->crc_size);
+ }
+
+ if (cp->crc_size) {
+ addr = cas_page_map(page->buffer);
+ crcaddr = addr + off + hlen;
+ }
+
+ } else {
+ /* copying packet */
+ if (!dlen)
+ goto end_copy_pkt;
+
+ i = CAS_VAL(RX_COMP1_DATA_INDEX, words[0]);
+ page = cp->rx_pages[CAS_VAL(RX_INDEX_RING, i)][CAS_VAL(RX_INDEX_NUM, i)];
+ off = CAS_VAL(RX_COMP1_DATA_OFF, words[0]) + swivel;
+ hlen = min(cp->page_size - off, dlen);
+ if (hlen < 0) {
+ if (netif_msg_rx_err(cp)) {
+ printk(KERN_DEBUG "%s: rx page overflow: "
+ "%d\n", cp->dev->name, hlen);
+ }
+ dev_kfree_skb_irq(skb);
+ return -1;
+ }
+ i = hlen;
+ if (i == dlen) /* attach FCS */
+ i += cp->crc_size;
+ pci_dma_sync_single_for_cpu(cp->pdev, page->dma_addr + off, i,
+ PCI_DMA_FROMDEVICE);
+ addr = cas_page_map(page->buffer);
+ memcpy(p, addr + off, i);
+ pci_dma_sync_single_for_device(cp->pdev, page->dma_addr + off, i,
+ PCI_DMA_FROMDEVICE);
+ cas_page_unmap(addr);
+ if (p == (char *) skb->data) /* not split */
+ RX_USED_ADD(page, cp->mtu_stride);
+ else
+ RX_USED_ADD(page, i);
+
+ /* any more data? */
+ if ((words[0] & RX_COMP1_SPLIT_PKT) && ((dlen -= hlen) > 0)) {
+ p += hlen;
+ i = CAS_VAL(RX_COMP2_NEXT_INDEX, words[1]);
+ page = cp->rx_pages[CAS_VAL(RX_INDEX_RING, i)][CAS_VAL(RX_INDEX_NUM, i)];
+ pci_dma_sync_single_for_cpu(cp->pdev, page->dma_addr,
+ dlen + cp->crc_size,
+ PCI_DMA_FROMDEVICE);
+ addr = cas_page_map(page->buffer);
+ memcpy(p, addr, dlen + cp->crc_size);
+ pci_dma_sync_single_for_device(cp->pdev, page->dma_addr,
+ dlen + cp->crc_size,
+ PCI_DMA_FROMDEVICE);
+ cas_page_unmap(addr);
+ RX_USED_ADD(page, dlen + cp->crc_size);
+ }
+end_copy_pkt:
+ if (cp->crc_size) {
+ addr = NULL;
+ crcaddr = skb->data + alloclen;
+ }
+ skb_put(skb, alloclen);
+ }
+
+ i = CAS_VAL(RX_COMP4_TCP_CSUM, words[3]);
+ if (cp->crc_size) {
+ /* checksum includes FCS. strip it out. */
+ i = csum_fold(csum_partial(crcaddr, cp->crc_size, i));
+ if (addr)
+ cas_page_unmap(addr);
+ }
+ skb->csum = ntohs(i ^ 0xffff);
+ skb->ip_summed = CHECKSUM_HW;
+ skb->protocol = eth_type_trans(skb, cp->dev);
+ return len;
+}
+
+
+/* we can handle up to 64 rx flows at a time. we do the same thing
+ * as nonreassm except that we batch up the buffers.
+ * NOTE: we currently just treat each flow as a bunch of packets that
+ * we pass up. a better way would be to coalesce the packets
+ * into a jumbo packet. to do that, we need to do the following:
+ * 1) the first packet will have a clean split between header and
+ * data. save both.
+ * 2) each time the next flow packet comes in, extend the
+ * data length and merge the checksums.
+ * 3) on flow release, fix up the header.
+ * 4) make sure the higher layer doesn't care.
+ * because packets get coalesced, we shouldn't run into fragment count
+ * issues.
+ */
+static inline void cas_rx_flow_pkt(struct cas *cp, const u64 *words,
+ struct sk_buff *skb)
+{
+ int flowid = CAS_VAL(RX_COMP3_FLOWID, words[2]) & (N_RX_FLOWS - 1);
+ struct sk_buff_head *flow = &cp->rx_flows[flowid];
+
+ /* this is protected at a higher layer, so no need to
+ * do any additional locking here. stick the buffer
+ * at the end.
+ */
+ __skb_insert(skb, flow->prev, (struct sk_buff *) flow, flow);
+ if (words[0] & RX_COMP1_RELEASE_FLOW) {
+ while ((skb = __skb_dequeue(flow))) {
+ cas_skb_release(skb);
+ }
+ }
+}
+
+/* put rx descriptor back on ring. if a buffer is in use by a higher
+ * layer, this will need to put in a replacement.
+ */
+static void cas_post_page(struct cas *cp, const int ring, const int index)
+{
+ cas_page_t *new;
+ int entry;
+
+ entry = cp->rx_old[ring];
+
+ new = cas_page_swap(cp, ring, index);
+ cp->init_rxds[ring][entry].buffer = cpu_to_le64(new->dma_addr);
+ cp->init_rxds[ring][entry].index =
+ cpu_to_le64(CAS_BASE(RX_INDEX_NUM, index) |
+ CAS_BASE(RX_INDEX_RING, ring));
+
+ entry = RX_DESC_ENTRY(ring, entry + 1);
+ cp->rx_old[ring] = entry;
+
+ if (entry % 4)
+ return;
+
+ if (ring == 0)
+ writel(entry, cp->regs + REG_RX_KICK);
+ else if ((N_RX_DESC_RINGS > 1) &&
+ (cp->cas_flags & CAS_FLAG_REG_PLUS))
+ writel(entry, cp->regs + REG_PLUS_RX_KICK1);
+}
+
+
+/* only when things are bad */
+static int cas_post_rxds_ringN(struct cas *cp, int ring, int num)
+{
+ unsigned int entry, last, count, released;
+ int cluster;
+ cas_page_t **page = cp->rx_pages[ring];
+
+ entry = cp->rx_old[ring];
+
+ if (netif_msg_intr(cp))
+ printk(KERN_DEBUG "%s: rxd[%d] interrupt, done: %d\n",
+ cp->dev->name, ring, entry);
+
+ cluster = -1;
+ count = entry & 0x3;
+ last = RX_DESC_ENTRY(ring, num ? entry + num - 4: entry - 4);
+ released = 0;
+ while (entry != last) {
+ /* make a new buffer if it's still in use */
+ if (page_count(page[entry]->buffer) > 1) {
+ cas_page_t *new = cas_page_dequeue(cp);
+ if (!new) {
+ /* let the timer know that we need to
+ * do this again
+ */
+ cp->cas_flags |= CAS_FLAG_RXD_POST(ring);
+ if (!timer_pending(&cp->link_timer))
+ mod_timer(&cp->link_timer, jiffies +
+ CAS_LINK_FAST_TIMEOUT);
+ cp->rx_old[ring] = entry;
+ cp->rx_last[ring] = num ? num - released : 0;
+ return -ENOMEM;
+ }
+ spin_lock(&cp->rx_inuse_lock);
+ list_add(&page[entry]->list, &cp->rx_inuse_list);
+ spin_unlock(&cp->rx_inuse_lock);
+ cp->init_rxds[ring][entry].buffer =
+ cpu_to_le64(new->dma_addr);
+ page[entry] = new;
+
+ }
+
+ if (++count == 4) {
+ cluster = entry;
+ count = 0;
+ }
+ released++;
+ entry = RX_DESC_ENTRY(ring, entry + 1);
+ }
+ cp->rx_old[ring] = entry;
+
+ if (cluster < 0)
+ return 0;
+
+ if (ring == 0)
+ writel(cluster, cp->regs + REG_RX_KICK);
+ else if ((N_RX_DESC_RINGS > 1) &&
+ (cp->cas_flags & CAS_FLAG_REG_PLUS))
+ writel(cluster, cp->regs + REG_PLUS_RX_KICK1);
+ return 0;
+}
+
+
+/* process a completion ring. packets are set up in three basic ways:
+ * small packets: should be copied header + data in single buffer.
+ * large packets: header and data in a single buffer.
+ * split packets: header in a separate buffer from data.
+ * data may be in multiple pages. data may be > 256
+ * bytes but in a single page.
+ *
+ * NOTE: RX page posting is done in this routine as well. while there's
+ * the capability of using multiple RX completion rings, it isn't
+ * really worthwhile due to the fact that the page posting will
+ * force serialization on the single descriptor ring.
+ */
+static int cas_rx_ringN(struct cas *cp, int ring, int budget)
+{
+ struct cas_rx_comp *rxcs = cp->init_rxcs[ring];
+ int entry, drops;
+ int npackets = 0;
+
+ if (netif_msg_intr(cp))
+ printk(KERN_DEBUG "%s: rx[%d] interrupt, done: %d/%d\n",
+ cp->dev->name, ring,
+ readl(cp->regs + REG_RX_COMP_HEAD),
+ cp->rx_new[ring]);
+
+ entry = cp->rx_new[ring];
+ drops = 0;
+ while (1) {
+ struct cas_rx_comp *rxc = rxcs + entry;
+ struct sk_buff *skb;
+ int type, len;
+ u64 words[4];
+ int i, dring;
+
+ words[0] = le64_to_cpu(rxc->word1);
+ words[1] = le64_to_cpu(rxc->word2);
+ words[2] = le64_to_cpu(rxc->word3);
+ words[3] = le64_to_cpu(rxc->word4);
+
+ /* don't touch if still owned by hw */
+ type = CAS_VAL(RX_COMP1_TYPE, words[0]);
+ if (type == 0)
+ break;
+
+ /* hw hasn't cleared the zero bit yet */
+ if (words[3] & RX_COMP4_ZERO) {
+ break;
+ }
+
+ /* get info on the packet */
+ if (words[3] & (RX_COMP4_LEN_MISMATCH | RX_COMP4_BAD)) {
+ spin_lock(&cp->stat_lock[ring]);
+ cp->net_stats[ring].rx_errors++;
+ if (words[3] & RX_COMP4_LEN_MISMATCH)
+ cp->net_stats[ring].rx_length_errors++;
+ if (words[3] & RX_COMP4_BAD)
+ cp->net_stats[ring].rx_crc_errors++;
+ spin_unlock(&cp->stat_lock[ring]);
+
+ /* We'll just return it to Cassini. */
+ drop_it:
+ spin_lock(&cp->stat_lock[ring]);
+ ++cp->net_stats[ring].rx_dropped;
+ spin_unlock(&cp->stat_lock[ring]);
+ goto next;
+ }
+
+ len = cas_rx_process_pkt(cp, rxc, entry, words, &skb);
+ if (len < 0) {
+ ++drops;
+ goto drop_it;
+ }
+
+ /* see if it's a flow re-assembly or not. the driver
+ * itself handles release back up.
+ */
+ if (RX_DONT_BATCH || (type == 0x2)) {
+ /* non-reassm: these always get released */
+ cas_skb_release(skb);
+ } else {
+ cas_rx_flow_pkt(cp, words, skb);
+ }
+
+ spin_lock(&cp->stat_lock[ring]);
+ cp->net_stats[ring].rx_packets++;
+ cp->net_stats[ring].rx_bytes += len;
+ spin_unlock(&cp->stat_lock[ring]);
+ cp->dev->last_rx = jiffies;
+
+ next:
+ npackets++;
+
+ /* should it be released? */
+ if (words[0] & RX_COMP1_RELEASE_HDR) {
+ i = CAS_VAL(RX_COMP2_HDR_INDEX, words[1]);
+ dring = CAS_VAL(RX_INDEX_RING, i);
+ i = CAS_VAL(RX_INDEX_NUM, i);
+ cas_post_page(cp, dring, i);
+ }
+
+ if (words[0] & RX_COMP1_RELEASE_DATA) {
+ i = CAS_VAL(RX_COMP1_DATA_INDEX, words[0]);
+ dring = CAS_VAL(RX_INDEX_RING, i);
+ i = CAS_VAL(RX_INDEX_NUM, i);
+ cas_post_page(cp, dring, i);
+ }
+
+ if (words[0] & RX_COMP1_RELEASE_NEXT) {
+ i = CAS_VAL(RX_COMP2_NEXT_INDEX, words[1]);
+ dring = CAS_VAL(RX_INDEX_RING, i);
+ i = CAS_VAL(RX_INDEX_NUM, i);
+ cas_post_page(cp, dring, i);
+ }
+
+ /* skip to the next entry */
+ entry = RX_COMP_ENTRY(ring, entry + 1 +
+ CAS_VAL(RX_COMP1_SKIP, words[0]));
+#ifdef USE_NAPI
+ if (budget && (npackets >= budget))
+ break;
+#endif
+ }
+ cp->rx_new[ring] = entry;
+
+ if (drops)
+ printk(KERN_INFO "%s: Memory squeeze, deferring packet.\n",
+ cp->dev->name);
+ return npackets;
+}
+
+
+/* put completion entries back on the ring */
+static void cas_post_rxcs_ringN(struct net_device *dev,
+ struct cas *cp, int ring)
+{
+ struct cas_rx_comp *rxc = cp->init_rxcs[ring];
+ int last, entry;
+
+ last = cp->rx_cur[ring];
+ entry = cp->rx_new[ring];
+ if (netif_msg_intr(cp))
+ printk(KERN_DEBUG "%s: rxc[%d] interrupt, done: %d/%d\n",
+ dev->name, ring, readl(cp->regs + REG_RX_COMP_HEAD),
+ entry);
+
+ /* zero and re-mark descriptors */
+ while (last != entry) {
+ cas_rxc_init(rxc + last);
+ last = RX_COMP_ENTRY(ring, last + 1);
+ }
+ cp->rx_cur[ring] = last;
+
+ if (ring == 0)
+ writel(last, cp->regs + REG_RX_COMP_TAIL);
+ else if (cp->cas_flags & CAS_FLAG_REG_PLUS)
+ writel(last, cp->regs + REG_PLUS_RX_COMPN_TAIL(ring));
+}
+
+
+
+/* cassini can use all four PCI interrupts for the completion ring.
+ * rings 3 and 4 are identical
+ */
+#if defined(USE_PCI_INTC) || defined(USE_PCI_INTD)
+static inline void cas_handle_irqN(struct net_device *dev,
+ struct cas *cp, const u32 status,
+ const int ring)
+{
+ if (status & (INTR_RX_COMP_FULL_ALT | INTR_RX_COMP_AF_ALT))
+ cas_post_rxcs_ringN(dev, cp, ring);
+}
+
+static irqreturn_t cas_interruptN(int irq, void *dev_id, struct pt_regs *regs)
+{
+ struct net_device *dev = dev_id;
+ struct cas *cp = netdev_priv(dev);
+ unsigned long flags;
+ int ring;
+ u32 status = readl(cp->regs + REG_PLUS_INTRN_STATUS(ring));
+
+ /* check for shared irq */
+ if (status == 0)
+ return IRQ_NONE;
+
+ ring = (irq == cp->pci_irq_INTC) ? 2 : 3;
+ spin_lock_irqsave(&cp->lock, flags);
+ if (status & INTR_RX_DONE_ALT) { /* handle rx separately */
+#ifdef USE_NAPI
+ cas_mask_intr(cp);
+ netif_rx_schedule(dev);
+#else
+ cas_rx_ringN(cp, ring, 0);
+#endif
+ status &= ~INTR_RX_DONE_ALT;
+ }
+
+ if (status)
+ cas_handle_irqN(dev, cp, status, ring);
+ spin_unlock_irqrestore(&cp->lock, flags);
+ return IRQ_HANDLED;
+}
+#endif
+
+#ifdef USE_PCI_INTB
+/* everything but rx packets */
+static inline void cas_handle_irq1(struct cas *cp, const u32 status)
+{
+ if (status & INTR_RX_BUF_UNAVAIL_1) {
+ /* Frame arrived, no free RX buffers available.
+ * NOTE: we can get this on a link transition. */
+ cas_post_rxds_ringN(cp, 1, 0);
+ spin_lock(&cp->stat_lock[1]);
+ cp->net_stats[1].rx_dropped++;
+ spin_unlock(&cp->stat_lock[1]);
+ }
+
+ if (status & INTR_RX_BUF_AE_1)
+ cas_post_rxds_ringN(cp, 1, RX_DESC_RINGN_SIZE(1) -
+ RX_AE_FREEN_VAL(1));
+
+ if (status & (INTR_RX_COMP_AF | INTR_RX_COMP_FULL))
+ cas_post_rxcs_ringN(cp, 1);
+}
+
+/* ring 2 handles a few more events than 3 and 4 */
+static irqreturn_t cas_interrupt1(int irq, void *dev_id, struct pt_regs *regs)
+{
+ struct net_device *dev = dev_id;
+ struct cas *cp = netdev_priv(dev);
+ unsigned long flags;
+ u32 status = readl(cp->regs + REG_PLUS_INTRN_STATUS(1));
+
+ /* check for shared interrupt */
+ if (status == 0)
+ return IRQ_NONE;
+
+ spin_lock_irqsave(&cp->lock, flags);
+ if (status & INTR_RX_DONE_ALT) { /* handle rx separately */
+#ifdef USE_NAPI
+ cas_mask_intr(cp);
+ netif_rx_schedule(dev);
+#else
+ cas_rx_ringN(cp, 1, 0);
+#endif
+ status &= ~INTR_RX_DONE_ALT;
+ }
+ if (status)
+ cas_handle_irq1(cp, status);
+ spin_unlock_irqrestore(&cp->lock, flags);
+ return IRQ_HANDLED;
+}
+#endif
+
+static inline void cas_handle_irq(struct net_device *dev,
+ struct cas *cp, const u32 status)
+{
+ /* housekeeping interrupts */
+ if (status & INTR_ERROR_MASK)
+ cas_abnormal_irq(dev, cp, status);
+
+ if (status & INTR_RX_BUF_UNAVAIL) {
+ /* Frame arrived, no free RX buffers available.
+ * NOTE: we can get this on a link transition.
+ */
+ cas_post_rxds_ringN(cp, 0, 0);
+ spin_lock(&cp->stat_lock[0]);
+ cp->net_stats[0].rx_dropped++;
+ spin_unlock(&cp->stat_lock[0]);
+ } else if (status & INTR_RX_BUF_AE) {
+ cas_post_rxds_ringN(cp, 0, RX_DESC_RINGN_SIZE(0) -
+ RX_AE_FREEN_VAL(0));
+ }
+
+ if (status & (INTR_RX_COMP_AF | INTR_RX_COMP_FULL))
+ cas_post_rxcs_ringN(dev, cp, 0);
+}
+
+static irqreturn_t cas_interrupt(int irq, void *dev_id, struct pt_regs *regs)
+{
+ struct net_device *dev = dev_id;
+ struct cas *cp = netdev_priv(dev);
+ unsigned long flags;
+ u32 status = readl(cp->regs + REG_INTR_STATUS);
+
+ if (status == 0)
+ return IRQ_NONE;
+
+ spin_lock_irqsave(&cp->lock, flags);
+ if (status & (INTR_TX_ALL | INTR_TX_INTME)) {
+ cas_tx(dev, cp, status);
+ status &= ~(INTR_TX_ALL | INTR_TX_INTME);
+ }
+
+ if (status & INTR_RX_DONE) {
+#ifdef USE_NAPI
+ cas_mask_intr(cp);
+ netif_rx_schedule(dev);
+#else
+ cas_rx_ringN(cp, 0, 0);
+#endif
+ status &= ~INTR_RX_DONE;
+ }
+
+ if (status)
+ cas_handle_irq(dev, cp, status);
+ spin_unlock_irqrestore(&cp->lock, flags);
+ return IRQ_HANDLED;
+}
+
+
+#ifdef USE_NAPI
+static int cas_poll(struct net_device *dev, int *budget)
+{
+ struct cas *cp = netdev_priv(dev);
+ int i, enable_intr, todo, credits;
+ u32 status = readl(cp->regs + REG_INTR_STATUS);
+ unsigned long flags;
+
+ spin_lock_irqsave(&cp->lock, flags);
+ cas_tx(dev, cp, status);
+ spin_unlock_irqrestore(&cp->lock, flags);
+
+ /* NAPI rx packets. we spread the credits across all of the
+ * rxc rings
+ */
+ todo = min(*budget, dev->quota);
+
+ /* to make sure we're fair with the work we loop through each
+ * ring N_RX_COMP_RING times with a request of
+ * todo / N_RX_COMP_RINGS
+ */
+ enable_intr = 1;
+ credits = 0;
+ for (i = 0; i < N_RX_COMP_RINGS; i++) {
+ int j;
+ for (j = 0; j < N_RX_COMP_RINGS; j++) {
+ credits += cas_rx_ringN(cp, j, todo / N_RX_COMP_RINGS);
+ if (credits >= todo) {
+ enable_intr = 0;
+ goto rx_comp;
+ }
+ }
+ }
+
+rx_comp:
+ *budget -= credits;
+ dev->quota -= credits;
+
+ /* final rx completion */
+ spin_lock_irqsave(&cp->lock, flags);
+ if (status)
+ cas_handle_irq(dev, cp, status);
+
+#ifdef USE_PCI_INTB
+ if (N_RX_COMP_RINGS > 1) {
+ status = readl(cp->regs + REG_PLUS_INTRN_STATUS(1));
+ if (status)
+ cas_handle_irq1(dev, cp, status);
+ }
+#endif
+
+#ifdef USE_PCI_INTC
+ if (N_RX_COMP_RINGS > 2) {
+ status = readl(cp->regs + REG_PLUS_INTRN_STATUS(2));
+ if (status)
+ cas_handle_irqN(dev, cp, status, 2);
+ }
+#endif
+
+#ifdef USE_PCI_INTD
+ if (N_RX_COMP_RINGS > 3) {
+ status = readl(cp->regs + REG_PLUS_INTRN_STATUS(3));
+ if (status)
+ cas_handle_irqN(dev, cp, status, 3);
+ }
+#endif
+ spin_unlock_irqrestore(&cp->lock, flags);
+ if (enable_intr) {
+ netif_rx_complete(dev);
+ cas_unmask_intr(cp);
+ return 0;
+ }
+ return 1;
+}
+#endif
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static void cas_netpoll(struct net_device *dev)
+{
+ struct cas *cp = netdev_priv(dev);
+
+ cas_disable_irq(cp, 0);
+ cas_interrupt(cp->pdev->irq, dev, NULL);
+ cas_enable_irq(cp, 0);
+
+#ifdef USE_PCI_INTB
+ if (N_RX_COMP_RINGS > 1) {
+ /* cas_interrupt1(); */
+ }
+#endif
+#ifdef USE_PCI_INTC
+ if (N_RX_COMP_RINGS > 2) {
+ /* cas_interruptN(); */
+ }
+#endif
+#ifdef USE_PCI_INTD
+ if (N_RX_COMP_RINGS > 3) {
+ /* cas_interruptN(); */
+ }
+#endif
+}
+#endif
+
+static void cas_tx_timeout(struct net_device *dev)
+{
+ struct cas *cp = netdev_priv(dev);
+
+ printk(KERN_ERR "%s: transmit timed out, resetting\n", dev->name);
+ if (!cp->hw_running) {
+ printk("%s: hrm.. hw not running!\n", dev->name);
+ return;
+ }
+
+ printk(KERN_ERR "%s: MIF_STATE[%08x]\n",
+ dev->name, readl(cp->regs + REG_MIF_STATE_MACHINE));
+
+ printk(KERN_ERR "%s: MAC_STATE[%08x]\n",
+ dev->name, readl(cp->regs + REG_MAC_STATE_MACHINE));
+
+ printk(KERN_ERR "%s: TX_STATE[%08x:%08x:%08x] "
+ "FIFO[%08x:%08x:%08x] SM1[%08x] SM2[%08x]\n",
+ dev->name,
+ readl(cp->regs + REG_TX_CFG),
+ readl(cp->regs + REG_MAC_TX_STATUS),
+ readl(cp->regs + REG_MAC_TX_CFG),
+ readl(cp->regs + REG_TX_FIFO_PKT_CNT),
+ readl(cp->regs + REG_TX_FIFO_WRITE_PTR),
+ readl(cp->regs + REG_TX_FIFO_READ_PTR),
+ readl(cp->regs + REG_TX_SM_1),
+ readl(cp->regs + REG_TX_SM_2));
+
+ printk(KERN_ERR "%s: RX_STATE[%08x:%08x:%08x]\n",
+ dev->name,
+ readl(cp->regs + REG_RX_CFG),
+ readl(cp->regs + REG_MAC_RX_STATUS),
+ readl(cp->regs + REG_MAC_RX_CFG));
+
+ printk(KERN_ERR "%s: HP_STATE[%08x:%08x:%08x:%08x]\n",
+ dev->name,
+ readl(cp->regs + REG_HP_STATE_MACHINE),
+ readl(cp->regs + REG_HP_STATUS0),
+ readl(cp->regs + REG_HP_STATUS1),
+ readl(cp->regs + REG_HP_STATUS2));
+
+#if 1
+ atomic_inc(&cp->reset_task_pending);
+ atomic_inc(&cp->reset_task_pending_all);
+ schedule_work(&cp->reset_task);
+#else
+ atomic_set(&cp->reset_task_pending, CAS_RESET_ALL);
+ schedule_work(&cp->reset_task);
+#endif
+}
+
+static inline int cas_intme(int ring, int entry)
+{
+ /* Algorithm: IRQ every 1/2 of descriptors. */
+ if (!(entry & ((TX_DESC_RINGN_SIZE(ring) >> 1) - 1)))
+ return 1;
+ return 0;
+}
+
+
+static void cas_write_txd(struct cas *cp, int ring, int entry,
+ dma_addr_t mapping, int len, u64 ctrl, int last)
+{
+ struct cas_tx_desc *txd = cp->init_txds[ring] + entry;
+
+ ctrl |= CAS_BASE(TX_DESC_BUFLEN, len);
+ if (cas_intme(ring, entry))
+ ctrl |= TX_DESC_INTME;
+ if (last)
+ ctrl |= TX_DESC_EOF;
+ txd->control = cpu_to_le64(ctrl);
+ txd->buffer = cpu_to_le64(mapping);
+}
+
+static inline void *tx_tiny_buf(struct cas *cp, const int ring,
+ const int entry)
+{
+ return cp->tx_tiny_bufs[ring] + TX_TINY_BUF_LEN*entry;
+}
+
+static inline dma_addr_t tx_tiny_map(struct cas *cp, const int ring,
+ const int entry, const int tentry)
+{
+ cp->tx_tiny_use[ring][tentry].nbufs++;
+ cp->tx_tiny_use[ring][entry].used = 1;
+ return cp->tx_tiny_dvma[ring] + TX_TINY_BUF_LEN*entry;
+}
+
+static inline int cas_xmit_tx_ringN(struct cas *cp, int ring,
+ struct sk_buff *skb)
+{
+ struct net_device *dev = cp->dev;
+ int entry, nr_frags, frag, tabort, tentry;
+ dma_addr_t mapping;
+ unsigned long flags;
+ u64 ctrl;
+ u32 len;
+
+ spin_lock_irqsave(&cp->tx_lock[ring], flags);
+
+ /* This is a hard error, log it. */
+ if (TX_BUFFS_AVAIL(cp, ring) <=
+ CAS_TABORT(cp)*(skb_shinfo(skb)->nr_frags + 1)) {
+ netif_stop_queue(dev);
+ spin_unlock_irqrestore(&cp->tx_lock[ring], flags);
+ printk(KERN_ERR PFX "%s: BUG! Tx Ring full when "
+ "queue awake!\n", dev->name);
+ return 1;
+ }
+
+ ctrl = 0;
+ if (skb->ip_summed == CHECKSUM_HW) {
+ u64 csum_start_off, csum_stuff_off;
+
+ csum_start_off = (u64) (skb->h.raw - skb->data);
+ csum_stuff_off = (u64) ((skb->h.raw + skb->csum) - skb->data);
+
+ ctrl = TX_DESC_CSUM_EN |
+ CAS_BASE(TX_DESC_CSUM_START, csum_start_off) |
+ CAS_BASE(TX_DESC_CSUM_STUFF, csum_stuff_off);
+ }
+
+ entry = cp->tx_new[ring];
+ cp->tx_skbs[ring][entry] = skb;
+
+ nr_frags = skb_shinfo(skb)->nr_frags;
+ len = skb_headlen(skb);
+ mapping = pci_map_page(cp->pdev, virt_to_page(skb->data),
+ offset_in_page(skb->data), len,
+ PCI_DMA_TODEVICE);
+
+ tentry = entry;
+ tabort = cas_calc_tabort(cp, (unsigned long) skb->data, len);
+ if (unlikely(tabort)) {
+ /* NOTE: len is always > tabort */
+ cas_write_txd(cp, ring, entry, mapping, len - tabort,
+ ctrl | TX_DESC_SOF, 0);
+ entry = TX_DESC_NEXT(ring, entry);
+
+ memcpy(tx_tiny_buf(cp, ring, entry), skb->data +
+ len - tabort, tabort);
+ mapping = tx_tiny_map(cp, ring, entry, tentry);
+ cas_write_txd(cp, ring, entry, mapping, tabort, ctrl,
+ (nr_frags == 0));
+ } else {
+ cas_write_txd(cp, ring, entry, mapping, len, ctrl |
+ TX_DESC_SOF, (nr_frags == 0));
+ }
+ entry = TX_DESC_NEXT(ring, entry);
+
+ for (frag = 0; frag < nr_frags; frag++) {
+ skb_frag_t *fragp = &skb_shinfo(skb)->frags[frag];
+
+ len = fragp->size;
+ mapping = pci_map_page(cp->pdev, fragp->page,
+ fragp->page_offset, len,
+ PCI_DMA_TODEVICE);
+
+ tabort = cas_calc_tabort(cp, fragp->page_offset, len);
+ if (unlikely(tabort)) {
+ void *addr;
+
+ /* NOTE: len is always > tabort */
+ cas_write_txd(cp, ring, entry, mapping, len - tabort,
+ ctrl, 0);
+ entry = TX_DESC_NEXT(ring, entry);
+
+ addr = cas_page_map(fragp->page);
+ memcpy(tx_tiny_buf(cp, ring, entry),
+ addr + fragp->page_offset + len - tabort,
+ tabort);
+ cas_page_unmap(addr);
+ mapping = tx_tiny_map(cp, ring, entry, tentry);
+ len = tabort;
+ }
+
+ cas_write_txd(cp, ring, entry, mapping, len, ctrl,
+ (frag + 1 == nr_frags));
+ entry = TX_DESC_NEXT(ring, entry);
+ }
+
+ cp->tx_new[ring] = entry;
+ if (TX_BUFFS_AVAIL(cp, ring) <= CAS_TABORT(cp)*(MAX_SKB_FRAGS + 1))
+ netif_stop_queue(dev);
+
+ if (netif_msg_tx_queued(cp))
+ printk(KERN_DEBUG "%s: tx[%d] queued, slot %d, skblen %d, "
+ "avail %d\n",
+ dev->name, ring, entry, skb->len,
+ TX_BUFFS_AVAIL(cp, ring));
+ writel(entry, cp->regs + REG_TX_KICKN(ring));
+ spin_unlock_irqrestore(&cp->tx_lock[ring], flags);
+ return 0;
+}
+
+static int cas_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct cas *cp = netdev_priv(dev);
+
+ /* this is only used as a load-balancing hint, so it doesn't
+ * need to be SMP safe
+ */
+ static int ring;
+
+ skb = skb_padto(skb, cp->min_frame_size);
+ if (!skb)
+ return 0;
+
+ /* XXX: we need some higher-level QoS hooks to steer packets to
+ * individual queues.
+ */
+ if (cas_xmit_tx_ringN(cp, ring++ & N_TX_RINGS_MASK, skb))
+ return 1;
+ dev->trans_start = jiffies;
+ return 0;
+}
+
+static void cas_init_tx_dma(struct cas *cp)
+{
+ u64 desc_dma = cp->block_dvma;
+ unsigned long off;
+ u32 val;
+ int i;
+
+ /* set up tx completion writeback registers. must be 8-byte aligned */
+#ifdef USE_TX_COMPWB
+ off = offsetof(struct cas_init_block, tx_compwb);
+ writel((desc_dma + off) >> 32, cp->regs + REG_TX_COMPWB_DB_HI);
+ writel((desc_dma + off) & 0xffffffff, cp->regs + REG_TX_COMPWB_DB_LOW);
+#endif
+
+ /* enable completion writebacks, enable paced mode,
+ * disable read pipe, and disable pre-interrupt compwbs
+ */
+ val = TX_CFG_COMPWB_Q1 | TX_CFG_COMPWB_Q2 |
+ TX_CFG_COMPWB_Q3 | TX_CFG_COMPWB_Q4 |
+ TX_CFG_DMA_RDPIPE_DIS | TX_CFG_PACED_MODE |
+ TX_CFG_INTR_COMPWB_DIS;
+
+ /* write out tx ring info and tx desc bases */
+ for (i = 0; i < MAX_TX_RINGS; i++) {
+ off = (unsigned long) cp->init_txds[i] -
+ (unsigned long) cp->init_block;
+
+ val |= CAS_TX_RINGN_BASE(i);
+ writel((desc_dma + off) >> 32, cp->regs + REG_TX_DBN_HI(i));
+ writel((desc_dma + off) & 0xffffffff, cp->regs +
+ REG_TX_DBN_LOW(i));
+ /* don't zero out the kick register here as the system
+ * will wedge
+ */
+ }
+ writel(val, cp->regs + REG_TX_CFG);
+
+ /* program max burst sizes. these numbers should be different
+ * if doing QoS.
+ */
+#ifdef USE_QOS
+ writel(0x800, cp->regs + REG_TX_MAXBURST_0);
+ writel(0x1600, cp->regs + REG_TX_MAXBURST_1);
+ writel(0x2400, cp->regs + REG_TX_MAXBURST_2);
+ writel(0x4800, cp->regs + REG_TX_MAXBURST_3);
+#else
+ writel(0x800, cp->regs + REG_TX_MAXBURST_0);
+ writel(0x800, cp->regs + REG_TX_MAXBURST_1);
+ writel(0x800, cp->regs + REG_TX_MAXBURST_2);
+ writel(0x800, cp->regs + REG_TX_MAXBURST_3);
+#endif
+}
+
+/* Must be invoked under cp->lock. */
+static inline void cas_init_dma(struct cas *cp)
+{
+ cas_init_tx_dma(cp);
+ cas_init_rx_dma(cp);
+}
+
+/* Must be invoked under cp->lock. */
+static u32 cas_setup_multicast(struct cas *cp)
+{
+ u32 rxcfg = 0;
+ int i;
+
+ if (cp->dev->flags & IFF_PROMISC) {
+ rxcfg |= MAC_RX_CFG_PROMISC_EN;
+
+ } else if (cp->dev->flags & IFF_ALLMULTI) {
+ for (i=0; i < 16; i++)
+ writel(0xFFFF, cp->regs + REG_MAC_HASH_TABLEN(i));
+ rxcfg |= MAC_RX_CFG_HASH_FILTER_EN;
+
+ } else {
+ u16 hash_table[16];
+ u32 crc;
+ struct dev_mc_list *dmi = cp->dev->mc_list;
+ int i;
+
+ /* use the alternate mac address registers for the
+ * first 15 multicast addresses
+ */
+ for (i = 1; i <= CAS_MC_EXACT_MATCH_SIZE; i++) {
+ if (!dmi) {
+ writel(0x0, cp->regs + REG_MAC_ADDRN(i*3 + 0));
+ writel(0x0, cp->regs + REG_MAC_ADDRN(i*3 + 1));
+ writel(0x0, cp->regs + REG_MAC_ADDRN(i*3 + 2));
+ continue;
+ }
+ writel((dmi->dmi_addr[4] << 8) | dmi->dmi_addr[5],
+ cp->regs + REG_MAC_ADDRN(i*3 + 0));
+ writel((dmi->dmi_addr[2] << 8) | dmi->dmi_addr[3],
+ cp->regs + REG_MAC_ADDRN(i*3 + 1));
+ writel((dmi->dmi_addr[0] << 8) | dmi->dmi_addr[1],
+ cp->regs + REG_MAC_ADDRN(i*3 + 2));
+ dmi = dmi->next;
+ }
+
+ /* use hw hash table for the next series of
+ * multicast addresses
+ */
+ memset(hash_table, 0, sizeof(hash_table));
+ while (dmi) {
+ crc = ether_crc_le(ETH_ALEN, dmi->dmi_addr);
+ crc >>= 24;
+ hash_table[crc >> 4] |= 1 << (15 - (crc & 0xf));
+ dmi = dmi->next;
+ }
+ for (i=0; i < 16; i++)
+ writel(hash_table[i], cp->regs +
+ REG_MAC_HASH_TABLEN(i));
+ rxcfg |= MAC_RX_CFG_HASH_FILTER_EN;
+ }
+
+ return rxcfg;
+}
+
+/* must be invoked under cp->stat_lock[N_TX_RINGS] */
+static void cas_clear_mac_err(struct cas *cp)
+{
+ writel(0, cp->regs + REG_MAC_COLL_NORMAL);
+ writel(0, cp->regs + REG_MAC_COLL_FIRST);
+ writel(0, cp->regs + REG_MAC_COLL_EXCESS);
+ writel(0, cp->regs + REG_MAC_COLL_LATE);
+ writel(0, cp->regs + REG_MAC_TIMER_DEFER);
+ writel(0, cp->regs + REG_MAC_ATTEMPTS_PEAK);
+ writel(0, cp->regs + REG_MAC_RECV_FRAME);
+ writel(0, cp->regs + REG_MAC_LEN_ERR);
+ writel(0, cp->regs + REG_MAC_ALIGN_ERR);
+ writel(0, cp->regs + REG_MAC_FCS_ERR);
+ writel(0, cp->regs + REG_MAC_RX_CODE_ERR);
+}
+
+
+static void cas_mac_reset(struct cas *cp)
+{
+ int i;
+
+ /* do both TX and RX reset */
+ writel(0x1, cp->regs + REG_MAC_TX_RESET);
+ writel(0x1, cp->regs + REG_MAC_RX_RESET);
+
+ /* wait for TX */
+ i = STOP_TRIES;
+ while (i-- > 0) {
+ if (readl(cp->regs + REG_MAC_TX_RESET) == 0)
+ break;
+ udelay(10);
+ }
+
+ /* wait for RX */
+ i = STOP_TRIES;
+ while (i-- > 0) {
+ if (readl(cp->regs + REG_MAC_RX_RESET) == 0)
+ break;
+ udelay(10);
+ }
+
+ if (readl(cp->regs + REG_MAC_TX_RESET) |
+ readl(cp->regs + REG_MAC_RX_RESET))
+ printk(KERN_ERR "%s: mac tx[%d]/rx[%d] reset failed [%08x]\n",
+ cp->dev->name, readl(cp->regs + REG_MAC_TX_RESET),
+ readl(cp->regs + REG_MAC_RX_RESET),
+ readl(cp->regs + REG_MAC_STATE_MACHINE));
+}
+
+
+/* Must be invoked under cp->lock. */
+static void cas_init_mac(struct cas *cp)
+{
+ unsigned char *e = &cp->dev->dev_addr[0];
+ int i;
+#ifdef CONFIG_CASSINI_MULTICAST_REG_WRITE
+ u32 rxcfg;
+#endif
+ cas_mac_reset(cp);
+
+ /* setup core arbitration weight register */
+ writel(CAWR_RR_DIS, cp->regs + REG_CAWR);
+
+ /* XXX Use pci_dma_burst_advice() */
+#if !defined(CONFIG_SPARC64) && !defined(CONFIG_ALPHA)
+ /* set the infinite burst register for chips that don't have
+ * pci issues.
+ */
+ if ((cp->cas_flags & CAS_FLAG_TARGET_ABORT) == 0)
+ writel(INF_BURST_EN, cp->regs + REG_INF_BURST);
+#endif
+
+ writel(0x1BF0, cp->regs + REG_MAC_SEND_PAUSE);
+
+ writel(0x00, cp->regs + REG_MAC_IPG0);
+ writel(0x08, cp->regs + REG_MAC_IPG1);
+ writel(0x04, cp->regs + REG_MAC_IPG2);
+
+ /* change later for 802.3z */
+ writel(0x40, cp->regs + REG_MAC_SLOT_TIME);
+
+ /* min frame + FCS */
+ writel(ETH_ZLEN + 4, cp->regs + REG_MAC_FRAMESIZE_MIN);
+
+ /* Ethernet payload + header + FCS + optional VLAN tag. NOTE: we
+ * specify the maximum frame size to prevent RX tag errors on
+ * oversized frames.
+ */
+ writel(CAS_BASE(MAC_FRAMESIZE_MAX_BURST, 0x2000) |
+ CAS_BASE(MAC_FRAMESIZE_MAX_FRAME,
+ (CAS_MAX_MTU + ETH_HLEN + 4 + 4)),
+ cp->regs + REG_MAC_FRAMESIZE_MAX);
+
+ /* NOTE: crc_size is used as a surrogate for half-duplex.
+ * workaround saturn half-duplex issue by increasing preamble
+ * size to 65 bytes.
+ */
+ if ((cp->cas_flags & CAS_FLAG_SATURN) && cp->crc_size)
+ writel(0x41, cp->regs + REG_MAC_PA_SIZE);
+ else
+ writel(0x07, cp->regs + REG_MAC_PA_SIZE);
+ writel(0x04, cp->regs + REG_MAC_JAM_SIZE);
+ writel(0x10, cp->regs + REG_MAC_ATTEMPT_LIMIT);
+ writel(0x8808, cp->regs + REG_MAC_CTRL_TYPE);
+
+ writel((e[5] | (e[4] << 8)) & 0x3ff, cp->regs + REG_MAC_RANDOM_SEED);
+
+ writel(0, cp->regs + REG_MAC_ADDR_FILTER0);
+ writel(0, cp->regs + REG_MAC_ADDR_FILTER1);
+ writel(0, cp->regs + REG_MAC_ADDR_FILTER2);
+ writel(0, cp->regs + REG_MAC_ADDR_FILTER2_1_MASK);
+ writel(0, cp->regs + REG_MAC_ADDR_FILTER0_MASK);
+
+ /* setup mac address in perfect filter array */
+ for (i = 0; i < 45; i++)
+ writel(0x0, cp->regs + REG_MAC_ADDRN(i));
+
+ writel((e[4] << 8) | e[5], cp->regs + REG_MAC_ADDRN(0));
+ writel((e[2] << 8) | e[3], cp->regs + REG_MAC_ADDRN(1));
+ writel((e[0] << 8) | e[1], cp->regs + REG_MAC_ADDRN(2));
+
+ writel(0x0001, cp->regs + REG_MAC_ADDRN(42));
+ writel(0xc200, cp->regs + REG_MAC_ADDRN(43));
+ writel(0x0180, cp->regs + REG_MAC_ADDRN(44));
+
+#ifndef CONFIG_CASSINI_MULTICAST_REG_WRITE
+ cp->mac_rx_cfg = cas_setup_multicast(cp);
+#else
+ /* WTZ: Do what Adrian did in cas_set_multicast. Doing
+ * a writel does not seem to be necessary because Cassini
+ * seems to preserve the configuration when we do the reset.
+ * If the chip is in trouble, though, it is not clear if we
+ * can really count on this behavior. cas_set_multicast uses
+ * spin_lock_irqsave, but we are called only in cas_init_hw and
+ * cas_init_hw is protected by cas_lock_all, which calls
+ * spin_lock_irq (so it doesn't need to save the flags, and
+ * we should be OK for the writel, as that is the only
+ * difference).
+ */
+ cp->mac_rx_cfg = rxcfg = cas_setup_multicast(cp);
+ writel(rxcfg, cp->regs + REG_MAC_RX_CFG);
+#endif
+ spin_lock(&cp->stat_lock[N_TX_RINGS]);
+ cas_clear_mac_err(cp);
+ spin_unlock(&cp->stat_lock[N_TX_RINGS]);
+
+ /* Setup MAC interrupts. We want to get all of the interesting
+ * counter expiration events, but we do not want to hear about
+ * normal rx/tx as the DMA engine tells us that.
+ */
+ writel(MAC_TX_FRAME_XMIT, cp->regs + REG_MAC_TX_MASK);
+ writel(MAC_RX_FRAME_RECV, cp->regs + REG_MAC_RX_MASK);
+
+ /* Don't enable even the PAUSE interrupts for now, we
+ * make no use of those events other than to record them.
+ */
+ writel(0xffffffff, cp->regs + REG_MAC_CTRL_MASK);
+}
+
+/* Must be invoked under cp->lock. */
+static void cas_init_pause_thresholds(struct cas *cp)
+{
+ /* Calculate pause thresholds. Setting the OFF threshold to the
+ * full RX fifo size effectively disables PAUSE generation
+ */
+ if (cp->rx_fifo_size <= (2 * 1024)) {
+ cp->rx_pause_off = cp->rx_pause_on = cp->rx_fifo_size;
+ } else {
+ int max_frame = (cp->dev->mtu + ETH_HLEN + 4 + 4 + 64) & ~63;
+ if (max_frame * 3 > cp->rx_fifo_size) {
+ cp->rx_pause_off = 7104;
+ cp->rx_pause_on = 960;
+ } else {
+ int off = (cp->rx_fifo_size - (max_frame * 2));
+ int on = off - max_frame;
+ cp->rx_pause_off = off;
+ cp->rx_pause_on = on;
+ }
+ }
+}
+
+static int cas_vpd_match(const void __iomem *p, const char *str)
+{
+ int len = strlen(str) + 1;
+ int i;
+
+ for (i = 0; i < len; i++) {
+ if (readb(p + i) != str[i])
+ return 0;
+ }
+ return 1;
+}
+
+
+/* get the mac address by reading the vpd information in the rom.
+ * also get the phy type and determine if there's an entropy generator.
+ * NOTE: this is a bit convoluted for the following reasons:
+ * 1) vpd info has order-dependent mac addresses for multinic cards
+ * 2) the only way to determine the nic order is to use the slot
+ * number.
+ * 3) fiber cards don't have bridges, so their slot numbers don't
+ * mean anything.
+ * 4) we don't actually know we have a fiber card until after
+ * the mac addresses are parsed.
+ */
+static int cas_get_vpd_info(struct cas *cp, unsigned char *dev_addr,
+ const int offset)
+{
+ void __iomem *p = cp->regs + REG_EXPANSION_ROM_RUN_START;
+ void __iomem *base, *kstart;
+ int i, len;
+ int found = 0;
+#define VPD_FOUND_MAC 0x01
+#define VPD_FOUND_PHY 0x02
+
+ int phy_type = CAS_PHY_MII_MDIO0; /* default phy type */
+ int mac_off = 0;
+
+ /* give us access to the PROM */
+ writel(BIM_LOCAL_DEV_PROM | BIM_LOCAL_DEV_PAD,
+ cp->regs + REG_BIM_LOCAL_DEV_EN);
+
+ /* check for an expansion rom */
+ if (readb(p) != 0x55 || readb(p + 1) != 0xaa)
+ goto use_random_mac_addr;
+
+ /* search for beginning of vpd */
+ base = NULL;
+ for (i = 2; i < EXPANSION_ROM_SIZE; i++) {
+ /* check for PCIR */
+ if ((readb(p + i + 0) == 0x50) &&
+ (readb(p + i + 1) == 0x43) &&
+ (readb(p + i + 2) == 0x49) &&
+ (readb(p + i + 3) == 0x52)) {
+ base = p + (readb(p + i + 8) |
+ (readb(p + i + 9) << 8));
+ break;
+ }
+ }
+
+ if (!base || (readb(base) != 0x82))
+ goto use_random_mac_addr;
+
+ i = (readb(base + 1) | (readb(base + 2) << 8)) + 3;
+ while (i < EXPANSION_ROM_SIZE) {
+ if (readb(base + i) != 0x90) /* no vpd found */
+ goto use_random_mac_addr;
+
+ /* found a vpd field */
+ len = readb(base + i + 1) | (readb(base + i + 2) << 8);
+
+ /* extract keywords */
+ kstart = base + i + 3;
+ p = kstart;
+ while ((p - kstart) < len) {
+ int klen = readb(p + 2);
+ int j;
+ char type;
+
+ p += 3;
+
+ /* look for the following things:
+ * -- correct length == 29
+ * 3 (type) + 2 (size) +
+ * 18 (strlen("local-mac-address") + 1) +
+ * 6 (mac addr)
+ * -- VPD Instance 'I'
+ * -- VPD Type Bytes 'B'
+ * -- VPD data length == 6
+ * -- property string == local-mac-address
+ *
+ * -- correct length == 24
+ * 3 (type) + 2 (size) +
+ * 12 (strlen("entropy-dev") + 1) +
+ * 7 (strlen("vms110") + 1)
+ * -- VPD Instance 'I'
+ * -- VPD Type String 'B'
+ * -- VPD data length == 7
+ * -- property string == entropy-dev
+ *
+ * -- correct length == 18
+ * 3 (type) + 2 (size) +
+ * 9 (strlen("phy-type") + 1) +
+ * 4 (strlen("pcs") + 1)
+ * -- VPD Instance 'I'
+ * -- VPD Type String 'S'
+ * -- VPD data length == 4
+ * -- property string == phy-type
+ *
+ * -- correct length == 23
+ * 3 (type) + 2 (size) +
+ * 14 (strlen("phy-interface") + 1) +
+ * 4 (strlen("pcs") + 1)
+ * -- VPD Instance 'I'
+ * -- VPD Type String 'S'
+ * -- VPD data length == 4
+ * -- property string == phy-interface
+ */
+ if (readb(p) != 'I')
+ goto next;
+
+ /* finally, check string and length */
+ type = readb(p + 3);
+ if (type == 'B') {
+ if ((klen == 29) && readb(p + 4) == 6 &&
+ cas_vpd_match(p + 5,
+ "local-mac-address")) {
+ if (mac_off++ > offset)
+ goto next;
+
+ /* set mac address */
+ for (j = 0; j < 6; j++)
+ dev_addr[j] =
+ readb(p + 23 + j);
+ goto found_mac;
+ }
+ }
+
+ if (type != 'S')
+ goto next;
+
+#ifdef USE_ENTROPY_DEV
+ if ((klen == 24) &&
+ cas_vpd_match(p + 5, "entropy-dev") &&
+ cas_vpd_match(p + 17, "vms110")) {
+ cp->cas_flags |= CAS_FLAG_ENTROPY_DEV;
+ goto next;
+ }
+#endif
+
+ if (found & VPD_FOUND_PHY)
+ goto next;
+
+ if ((klen == 18) && readb(p + 4) == 4 &&
+ cas_vpd_match(p + 5, "phy-type")) {
+ if (cas_vpd_match(p + 14, "pcs")) {
+ phy_type = CAS_PHY_SERDES;
+ goto found_phy;
+ }
+ }
+
+ if ((klen == 23) && readb(p + 4) == 4 &&
+ cas_vpd_match(p + 5, "phy-interface")) {
+ if (cas_vpd_match(p + 19, "pcs")) {
+ phy_type = CAS_PHY_SERDES;
+ goto found_phy;
+ }
+ }
+found_mac:
+ found |= VPD_FOUND_MAC;
+ goto next;
+
+found_phy:
+ found |= VPD_FOUND_PHY;
+
+next:
+ p += klen;
+ }
+ i += len + 3;
+ }
+
+use_random_mac_addr:
+ if (found & VPD_FOUND_MAC)
+ goto done;
+
+ /* Sun MAC prefix then 3 random bytes. */
+ printk(PFX "MAC address not found in ROM VPD\n");
+ dev_addr[0] = 0x08;
+ dev_addr[1] = 0x00;
+ dev_addr[2] = 0x20;
+ get_random_bytes(dev_addr + 3, 3);
+
+done:
+ writel(0, cp->regs + REG_BIM_LOCAL_DEV_EN);
+ return phy_type;
+}
+
+/* check pci invariants */
+static void cas_check_pci_invariants(struct cas *cp)
+{
+ struct pci_dev *pdev = cp->pdev;
+ u8 rev;
+
+ cp->cas_flags = 0;
+ pci_read_config_byte(pdev, PCI_REVISION_ID, &rev);
+ if ((pdev->vendor == PCI_VENDOR_ID_SUN) &&
+ (pdev->device == PCI_DEVICE_ID_SUN_CASSINI)) {
+ if (rev >= CAS_ID_REVPLUS)
+ cp->cas_flags |= CAS_FLAG_REG_PLUS;
+ if (rev < CAS_ID_REVPLUS02u)
+ cp->cas_flags |= CAS_FLAG_TARGET_ABORT;
+
+ /* Original Cassini supports HW CSUM, but it's not
+ * enabled by default as it can trigger TX hangs.
+ */
+ if (rev < CAS_ID_REV2)
+ cp->cas_flags |= CAS_FLAG_NO_HW_CSUM;
+ } else {
+ /* Only sun has original cassini chips. */
+ cp->cas_flags |= CAS_FLAG_REG_PLUS;
+
+ /* We use a flag because the same phy might be externally
+ * connected.
+ */
+ if ((pdev->vendor == PCI_VENDOR_ID_NS) &&
+ (pdev->device == PCI_DEVICE_ID_NS_SATURN))
+ cp->cas_flags |= CAS_FLAG_SATURN;
+ }
+}
+
+
+static int cas_check_invariants(struct cas *cp)
+{
+ struct pci_dev *pdev = cp->pdev;
+ u32 cfg;
+ int i;
+
+ /* get page size for rx buffers. */
+ cp->page_order = 0;
+#ifdef USE_PAGE_ORDER
+ if (PAGE_SHIFT < CAS_JUMBO_PAGE_SHIFT) {
+ /* see if we can allocate larger pages */
+ struct page *page = alloc_pages(GFP_ATOMIC,
+ CAS_JUMBO_PAGE_SHIFT -
+ PAGE_SHIFT);
+ if (page) {
+ __free_pages(page, CAS_JUMBO_PAGE_SHIFT - PAGE_SHIFT);
+ cp->page_order = CAS_JUMBO_PAGE_SHIFT - PAGE_SHIFT;
+ } else {
+ printk(PFX "MTU limited to %d bytes\n", CAS_MAX_MTU);
+ }
+ }
+#endif
+ cp->page_size = (PAGE_SIZE << cp->page_order);
+
+ /* Fetch the FIFO configurations. */
+ cp->tx_fifo_size = readl(cp->regs + REG_TX_FIFO_SIZE) * 64;
+ cp->rx_fifo_size = RX_FIFO_SIZE;
+
+ /* finish phy determination. MDIO1 takes precedence over MDIO0 if
+ * they're both connected.
+ */
+ cp->phy_type = cas_get_vpd_info(cp, cp->dev->dev_addr,
+ PCI_SLOT(pdev->devfn));
+ if (cp->phy_type & CAS_PHY_SERDES) {
+ cp->cas_flags |= CAS_FLAG_1000MB_CAP;
+ return 0; /* no more checking needed */
+ }
+
+ /* MII */
+ cfg = readl(cp->regs + REG_MIF_CFG);
+ if (cfg & MIF_CFG_MDIO_1) {
+ cp->phy_type = CAS_PHY_MII_MDIO1;
+ } else if (cfg & MIF_CFG_MDIO_0) {
+ cp->phy_type = CAS_PHY_MII_MDIO0;
+ }
+
+ cas_mif_poll(cp, 0);
+ writel(PCS_DATAPATH_MODE_MII, cp->regs + REG_PCS_DATAPATH_MODE);
+
+ for (i = 0; i < 32; i++) {
+ u32 phy_id;
+ int j;
+
+ for (j = 0; j < 3; j++) {
+ cp->phy_addr = i;
+ phy_id = cas_phy_read(cp, MII_PHYSID1) << 16;
+ phy_id |= cas_phy_read(cp, MII_PHYSID2);
+ if (phy_id && (phy_id != 0xFFFFFFFF)) {
+ cp->phy_id = phy_id;
+ goto done;
+ }
+ }
+ }
+ printk(KERN_ERR PFX "MII phy did not respond [%08x]\n",
+ readl(cp->regs + REG_MIF_STATE_MACHINE));
+ return -1;
+
+done:
+ /* see if we can do gigabit */
+ cfg = cas_phy_read(cp, MII_BMSR);
+ if ((cfg & CAS_BMSR_1000_EXTEND) &&
+ cas_phy_read(cp, CAS_MII_1000_EXTEND))
+ cp->cas_flags |= CAS_FLAG_1000MB_CAP;
+ return 0;
+}
+
+/* Must be invoked under cp->lock. */
+static inline void cas_start_dma(struct cas *cp)
+{
+ int i;
+ u32 val;
+ int txfailed = 0;
+
+ /* enable dma */
+ val = readl(cp->regs + REG_TX_CFG) | TX_CFG_DMA_EN;
+ writel(val, cp->regs + REG_TX_CFG);
+ val = readl(cp->regs + REG_RX_CFG) | RX_CFG_DMA_EN;
+ writel(val, cp->regs + REG_RX_CFG);
+
+ /* enable the mac */
+ val = readl(cp->regs + REG_MAC_TX_CFG) | MAC_TX_CFG_EN;
+ writel(val, cp->regs + REG_MAC_TX_CFG);
+ val = readl(cp->regs + REG_MAC_RX_CFG) | MAC_RX_CFG_EN;
+ writel(val, cp->regs + REG_MAC_RX_CFG);
+
+ i = STOP_TRIES;
+ while (i-- > 0) {
+ val = readl(cp->regs + REG_MAC_TX_CFG);
+ if ((val & MAC_TX_CFG_EN))
+ break;
+ udelay(10);
+ }
+ if (i < 0) txfailed = 1;
+ i = STOP_TRIES;
+ while (i-- > 0) {
+ val = readl(cp->regs + REG_MAC_RX_CFG);
+ if ((val & MAC_RX_CFG_EN)) {
+ if (txfailed) {
+ printk(KERN_ERR
+ "%s: enabling mac failed [tx:%08x:%08x].\n",
+ cp->dev->name,
+ readl(cp->regs + REG_MIF_STATE_MACHINE),
+ readl(cp->regs + REG_MAC_STATE_MACHINE));
+ }
+ goto enable_rx_done;
+ }
+ udelay(10);
+ }
+ printk(KERN_ERR "%s: enabling mac failed [%s:%08x:%08x].\n",
+ cp->dev->name,
+ (txfailed? "tx,rx":"rx"),
+ readl(cp->regs + REG_MIF_STATE_MACHINE),
+ readl(cp->regs + REG_MAC_STATE_MACHINE));
+
+enable_rx_done:
+ cas_unmask_intr(cp); /* enable interrupts */
+ writel(RX_DESC_RINGN_SIZE(0) - 4, cp->regs + REG_RX_KICK);
+ writel(0, cp->regs + REG_RX_COMP_TAIL);
+
+ if (cp->cas_flags & CAS_FLAG_REG_PLUS) {
+ if (N_RX_DESC_RINGS > 1)
+ writel(RX_DESC_RINGN_SIZE(1) - 4,
+ cp->regs + REG_PLUS_RX_KICK1);
+
+ for (i = 1; i < N_RX_COMP_RINGS; i++)
+ writel(0, cp->regs + REG_PLUS_RX_COMPN_TAIL(i));
+ }
+}
+
+/* Must be invoked under cp->lock. */
+static void cas_read_pcs_link_mode(struct cas *cp, int *fd, int *spd,
+ int *pause)
+{
+ u32 val = readl(cp->regs + REG_PCS_MII_LPA);
+ *fd = (val & PCS_MII_LPA_FD) ? 1 : 0;
+ *pause = (val & PCS_MII_LPA_SYM_PAUSE) ? 0x01 : 0x00;
+ if (val & PCS_MII_LPA_ASYM_PAUSE)
+ *pause |= 0x10;
+ *spd = 1000;
+}
+
+/* Must be invoked under cp->lock. */
+static void cas_read_mii_link_mode(struct cas *cp, int *fd, int *spd,
+ int *pause)
+{
+ u32 val;
+
+ *fd = 0;
+ *spd = 10;
+ *pause = 0;
+
+ /* use GMII registers */
+ val = cas_phy_read(cp, MII_LPA);
+ if (val & CAS_LPA_PAUSE)
+ *pause = 0x01;
+
+ if (val & CAS_LPA_ASYM_PAUSE)
+ *pause |= 0x10;
+
+ if (val & LPA_DUPLEX)
+ *fd = 1;
+ if (val & LPA_100)
+ *spd = 100;
+
+ if (cp->cas_flags & CAS_FLAG_1000MB_CAP) {
+ val = cas_phy_read(cp, CAS_MII_1000_STATUS);
+ if (val & (CAS_LPA_1000FULL | CAS_LPA_1000HALF))
+ *spd = 1000;
+ if (val & CAS_LPA_1000FULL)
+ *fd = 1;
+ }
+}
+
+/* A link-up condition has occurred, initialize and enable the
+ * rest of the chip.
+ *
+ * Must be invoked under cp->lock.
+ */
+static void cas_set_link_modes(struct cas *cp)
+{
+ u32 val;
+ int full_duplex, speed, pause;
+
+ full_duplex = 0;
+ speed = 10;
+ pause = 0;
+
+ if (CAS_PHY_MII(cp->phy_type)) {
+ cas_mif_poll(cp, 0);
+ val = cas_phy_read(cp, MII_BMCR);
+ if (val & BMCR_ANENABLE) {
+ cas_read_mii_link_mode(cp, &full_duplex, &speed,
+ &pause);
+ } else {
+ if (val & BMCR_FULLDPLX)
+ full_duplex = 1;
+
+ if (val & BMCR_SPEED100)
+ speed = 100;
+ else if (val & CAS_BMCR_SPEED1000)
+ speed = (cp->cas_flags & CAS_FLAG_1000MB_CAP) ?
+ 1000 : 100;
+ }
+ cas_mif_poll(cp, 1);
+
+ } else {
+ val = readl(cp->regs + REG_PCS_MII_CTRL);
+ cas_read_pcs_link_mode(cp, &full_duplex, &speed, &pause);
+ if ((val & PCS_MII_AUTONEG_EN) == 0) {
+ if (val & PCS_MII_CTRL_DUPLEX)
+ full_duplex = 1;
+ }
+ }
+
+ if (netif_msg_link(cp))
+ printk(KERN_INFO "%s: Link up at %d Mbps, %s-duplex.\n",
+ cp->dev->name, speed, (full_duplex ? "full" : "half"));
+
+ val = MAC_XIF_TX_MII_OUTPUT_EN | MAC_XIF_LINK_LED;
+ if (CAS_PHY_MII(cp->phy_type)) {
+ val |= MAC_XIF_MII_BUFFER_OUTPUT_EN;
+ if (!full_duplex)
+ val |= MAC_XIF_DISABLE_ECHO;
+ }
+ if (full_duplex)
+ val |= MAC_XIF_FDPLX_LED;
+ if (speed == 1000)
+ val |= MAC_XIF_GMII_MODE;
+ writel(val, cp->regs + REG_MAC_XIF_CFG);
+
+ /* deal with carrier and collision detect. */
+ val = MAC_TX_CFG_IPG_EN;
+ if (full_duplex) {
+ val |= MAC_TX_CFG_IGNORE_CARRIER;
+ val |= MAC_TX_CFG_IGNORE_COLL;
+ } else {
+#ifndef USE_CSMA_CD_PROTO
+ val |= MAC_TX_CFG_NEVER_GIVE_UP_EN;
+ val |= MAC_TX_CFG_NEVER_GIVE_UP_LIM;
+#endif
+ }
+ /* val now set up for REG_MAC_TX_CFG */
+
+ /* If gigabit and half-duplex, enable carrier extension
+ * mode. increase slot time to 512 bytes as well.
+ * else, disable it and make sure slot time is 64 bytes.
+ * also activate checksum bug workaround
+ */
+ if ((speed == 1000) && !full_duplex) {
+ writel(val | MAC_TX_CFG_CARRIER_EXTEND,
+ cp->regs + REG_MAC_TX_CFG);
+
+ val = readl(cp->regs + REG_MAC_RX_CFG);
+ val &= ~MAC_RX_CFG_STRIP_FCS; /* checksum workaround */
+ writel(val | MAC_RX_CFG_CARRIER_EXTEND,
+ cp->regs + REG_MAC_RX_CFG);
+
+ writel(0x200, cp->regs + REG_MAC_SLOT_TIME);
+
+ cp->crc_size = 4;
+ /* minimum size gigabit frame at half duplex */
+ cp->min_frame_size = CAS_1000MB_MIN_FRAME;
+
+ } else {
+ writel(val, cp->regs + REG_MAC_TX_CFG);
+
+ /* checksum bug workaround. don't strip FCS when in
+ * half-duplex mode
+ */
+ val = readl(cp->regs + REG_MAC_RX_CFG);
+ if (full_duplex) {
+ val |= MAC_RX_CFG_STRIP_FCS;
+ cp->crc_size = 0;
+ cp->min_frame_size = CAS_MIN_MTU;
+ } else {
+ val &= ~MAC_RX_CFG_STRIP_FCS;
+ cp->crc_size = 4;
+ cp->min_frame_size = CAS_MIN_FRAME;
+ }
+ writel(val & ~MAC_RX_CFG_CARRIER_EXTEND,
+ cp->regs + REG_MAC_RX_CFG);
+ writel(0x40, cp->regs + REG_MAC_SLOT_TIME);
+ }
+
+ if (netif_msg_link(cp)) {
+ if (pause & 0x01) {
+ printk(KERN_INFO "%s: Pause is enabled "
+ "(rxfifo: %d off: %d on: %d)\n",
+ cp->dev->name,
+ cp->rx_fifo_size,
+ cp->rx_pause_off,
+ cp->rx_pause_on);
+ } else if (pause & 0x10) {
+ printk(KERN_INFO "%s: TX pause enabled\n",
+ cp->dev->name);
+ } else {
+ printk(KERN_INFO "%s: Pause is disabled\n",
+ cp->dev->name);
+ }
+ }
+
+ val = readl(cp->regs + REG_MAC_CTRL_CFG);
+ val &= ~(MAC_CTRL_CFG_SEND_PAUSE_EN | MAC_CTRL_CFG_RECV_PAUSE_EN);
+ if (pause) { /* symmetric or asymmetric pause */
+ val |= MAC_CTRL_CFG_SEND_PAUSE_EN;
+ if (pause & 0x01) { /* symmetric pause */
+ val |= MAC_CTRL_CFG_RECV_PAUSE_EN;
+ }
+ }
+ writel(val, cp->regs + REG_MAC_CTRL_CFG);
+ cas_start_dma(cp);
+}
+
+/* Must be invoked under cp->lock. */
+static void cas_init_hw(struct cas *cp, int restart_link)
+{
+ if (restart_link)
+ cas_phy_init(cp);
+
+ cas_init_pause_thresholds(cp);
+ cas_init_mac(cp);
+ cas_init_dma(cp);
+
+ if (restart_link) {
+ /* Default aneg parameters */
+ cp->timer_ticks = 0;
+ cas_begin_auto_negotiation(cp, NULL);
+ } else if (cp->lstate == link_up) {
+ cas_set_link_modes(cp);
+ netif_carrier_on(cp->dev);
+ }
+}
+
+/* Must be invoked under cp->lock. on earlier cassini boards,
+ * SOFT_0 is tied to PCI reset. we use this to force a pci reset,
+ * let it settle out, and then restore pci state.
+ */
+static void cas_hard_reset(struct cas *cp)
+{
+ writel(BIM_LOCAL_DEV_SOFT_0, cp->regs + REG_BIM_LOCAL_DEV_EN);
+ udelay(20);
+ pci_restore_state(cp->pdev);
+}
+
+
+static void cas_global_reset(struct cas *cp, int blkflag)
+{
+ int limit;
+
+ /* issue a global reset. don't use RSTOUT. */
+ if (blkflag && !CAS_PHY_MII(cp->phy_type)) {
+ /* For PCS, when the blkflag is set, we should set the
+ * SW_REST_BLOCK_PCS_SLINK bit to prevent the results of
+ * the last autonegotiation from being cleared. We'll
+ * need some special handling if the chip is set into a
+ * loopback mode.
+ */
+ writel((SW_RESET_TX | SW_RESET_RX | SW_RESET_BLOCK_PCS_SLINK),
+ cp->regs + REG_SW_RESET);
+ } else {
+ writel(SW_RESET_TX | SW_RESET_RX, cp->regs + REG_SW_RESET);
+ }
+
+ /* need to wait at least 3ms before polling register */
+ mdelay(3);
+
+ limit = STOP_TRIES;
+ while (limit-- > 0) {
+ u32 val = readl(cp->regs + REG_SW_RESET);
+ if ((val & (SW_RESET_TX | SW_RESET_RX)) == 0)
+ goto done;
+ udelay(10);
+ }
+ printk(KERN_ERR "%s: sw reset failed.\n", cp->dev->name);
+
+done:
+ /* enable various BIM interrupts */
+ writel(BIM_CFG_DPAR_INTR_ENABLE | BIM_CFG_RMA_INTR_ENABLE |
+ BIM_CFG_RTA_INTR_ENABLE, cp->regs + REG_BIM_CFG);
+
+ /* clear out pci error status mask for handled errors.
+ * we don't deal with DMA counter overflows as they happen
+ * all the time.
+ */
+ writel(0xFFFFFFFFU & ~(PCI_ERR_BADACK | PCI_ERR_DTRTO |
+ PCI_ERR_OTHER | PCI_ERR_BIM_DMA_WRITE |
+ PCI_ERR_BIM_DMA_READ), cp->regs +
+ REG_PCI_ERR_STATUS_MASK);
+
+ /* set up for MII by default to address mac rx reset timeout
+ * issue
+ */
+ writel(PCS_DATAPATH_MODE_MII, cp->regs + REG_PCS_DATAPATH_MODE);
+}
+
+static void cas_reset(struct cas *cp, int blkflag)
+{
+ u32 val;
+
+ cas_mask_intr(cp);
+ cas_global_reset(cp, blkflag);
+ cas_mac_reset(cp);
+ cas_entropy_reset(cp);
+
+ /* disable dma engines. */
+ val = readl(cp->regs + REG_TX_CFG);
+ val &= ~TX_CFG_DMA_EN;
+ writel(val, cp->regs + REG_TX_CFG);
+
+ val = readl(cp->regs + REG_RX_CFG);
+ val &= ~RX_CFG_DMA_EN;
+ writel(val, cp->regs + REG_RX_CFG);
+
+ /* program header parser */
+ if ((cp->cas_flags & CAS_FLAG_TARGET_ABORT) ||
+ (CAS_HP_ALT_FIRMWARE == cas_prog_null)) {
+ cas_load_firmware(cp, CAS_HP_FIRMWARE);
+ } else {
+ cas_load_firmware(cp, CAS_HP_ALT_FIRMWARE);
+ }
+
+ /* clear out error registers */
+ spin_lock(&cp->stat_lock[N_TX_RINGS]);
+ cas_clear_mac_err(cp);
+ spin_unlock(&cp->stat_lock[N_TX_RINGS]);
+}
+
+/* Shut down the chip, must be called with pm_sem held. */
+static void cas_shutdown(struct cas *cp)
+{
+ unsigned long flags;
+
+ /* Make us not-running to avoid timers respawning */
+ cp->hw_running = 0;
+
+ del_timer_sync(&cp->link_timer);
+
+ /* Stop the reset task */
+#if 0
+ while (atomic_read(&cp->reset_task_pending_mtu) ||
+ atomic_read(&cp->reset_task_pending_spare) ||
+ atomic_read(&cp->reset_task_pending_all))
+ schedule();
+
+#else
+ while (atomic_read(&cp->reset_task_pending))
+ schedule();
+#endif
+ /* Actually stop the chip */
+ cas_lock_all_save(cp, flags);
+ cas_reset(cp, 0);
+ if (cp->cas_flags & CAS_FLAG_SATURN)
+ cas_phy_powerdown(cp);
+ cas_unlock_all_restore(cp, flags);
+}
+
+static int cas_change_mtu(struct net_device *dev, int new_mtu)
+{
+ struct cas *cp = netdev_priv(dev);
+
+ if (new_mtu < CAS_MIN_MTU || new_mtu > CAS_MAX_MTU)
+ return -EINVAL;
+
+ dev->mtu = new_mtu;
+ if (!netif_running(dev) || !netif_device_present(dev))
+ return 0;
+
+ /* let the reset task handle it */
+#if 1
+ atomic_inc(&cp->reset_task_pending);
+ if ((cp->phy_type & CAS_PHY_SERDES)) {
+ atomic_inc(&cp->reset_task_pending_all);
+ } else {
+ atomic_inc(&cp->reset_task_pending_mtu);
+ }
+ schedule_work(&cp->reset_task);
+#else
+ atomic_set(&cp->reset_task_pending, (cp->phy_type & CAS_PHY_SERDES) ?
+ CAS_RESET_ALL : CAS_RESET_MTU);
+ printk(KERN_ERR "reset called in cas_change_mtu\n");
+ schedule_work(&cp->reset_task);
+#endif
+
+ flush_scheduled_work();
+ return 0;
+}
+
+static void cas_clean_txd(struct cas *cp, int ring)
+{
+ struct cas_tx_desc *txd = cp->init_txds[ring];
+ struct sk_buff *skb, **skbs = cp->tx_skbs[ring];
+ u64 daddr, dlen;
+ int i, size;
+
+ size = TX_DESC_RINGN_SIZE(ring);
+ for (i = 0; i < size; i++) {
+ int frag;
+
+ if (skbs[i] == NULL)
+ continue;
+
+ skb = skbs[i];
+ skbs[i] = NULL;
+
+ for (frag = 0; frag <= skb_shinfo(skb)->nr_frags; frag++) {
+ int ent = i & (size - 1);
+
+ /* first buffer is never a tiny buffer and so
+ * needs to be unmapped.
+ */
+ daddr = le64_to_cpu(txd[ent].buffer);
+ dlen = CAS_VAL(TX_DESC_BUFLEN,
+ le64_to_cpu(txd[ent].control));
+ pci_unmap_page(cp->pdev, daddr, dlen,
+ PCI_DMA_TODEVICE);
+
+ if (frag != skb_shinfo(skb)->nr_frags) {
+ i++;
+
+ /* next buffer might by a tiny buffer.
+ * skip past it.
+ */
+ ent = i & (size - 1);
+ if (cp->tx_tiny_use[ring][ent].used)
+ i++;
+ }
+ }
+ dev_kfree_skb_any(skb);
+ }
+
+ /* zero out tiny buf usage */
+ memset(cp->tx_tiny_use[ring], 0, size*sizeof(*cp->tx_tiny_use[ring]));
+}
+
+/* freed on close */
+static inline void cas_free_rx_desc(struct cas *cp, int ring)
+{
+ cas_page_t **page = cp->rx_pages[ring];
+ int i, size;
+
+ size = RX_DESC_RINGN_SIZE(ring);
+ for (i = 0; i < size; i++) {
+ if (page[i]) {
+ cas_page_free(cp, page[i]);
+ page[i] = NULL;
+ }
+ }
+}
+
+static void cas_free_rxds(struct cas *cp)
+{
+ int i;
+
+ for (i = 0; i < N_RX_DESC_RINGS; i++)
+ cas_free_rx_desc(cp, i);
+}
+
+/* Must be invoked under cp->lock. */
+static void cas_clean_rings(struct cas *cp)
+{
+ int i;
+
+ /* need to clean all tx rings */
+ memset(cp->tx_old, 0, sizeof(*cp->tx_old)*N_TX_RINGS);
+ memset(cp->tx_new, 0, sizeof(*cp->tx_new)*N_TX_RINGS);
+ for (i = 0; i < N_TX_RINGS; i++)
+ cas_clean_txd(cp, i);
+
+ /* zero out init block */
+ memset(cp->init_block, 0, sizeof(struct cas_init_block));
+ cas_clean_rxds(cp);
+ cas_clean_rxcs(cp);
+}
+
+/* allocated on open */
+static inline int cas_alloc_rx_desc(struct cas *cp, int ring)
+{
+ cas_page_t **page = cp->rx_pages[ring];
+ int size, i = 0;
+
+ size = RX_DESC_RINGN_SIZE(ring);
+ for (i = 0; i < size; i++) {
+ if ((page[i] = cas_page_alloc(cp, GFP_KERNEL)) == NULL)
+ return -1;
+ }
+ return 0;
+}
+
+static int cas_alloc_rxds(struct cas *cp)
+{
+ int i;
+
+ for (i = 0; i < N_RX_DESC_RINGS; i++) {
+ if (cas_alloc_rx_desc(cp, i) < 0) {
+ cas_free_rxds(cp);
+ return -1;
+ }
+ }
+ return 0;
+}
+
+static void cas_reset_task(void *data)
+{
+ struct cas *cp = (struct cas *) data;
+#if 0
+ int pending = atomic_read(&cp->reset_task_pending);
+#else
+ int pending_all = atomic_read(&cp->reset_task_pending_all);
+ int pending_spare = atomic_read(&cp->reset_task_pending_spare);
+ int pending_mtu = atomic_read(&cp->reset_task_pending_mtu);
+
+ if (pending_all == 0 && pending_spare == 0 && pending_mtu == 0) {
+ /* We can have more tasks scheduled than actually
+ * needed.
+ */
+ atomic_dec(&cp->reset_task_pending);
+ return;
+ }
+#endif
+ /* The link went down, we reset the ring, but keep
+ * DMA stopped. Use this function for reset
+ * on error as well.
+ */
+ if (cp->hw_running) {
+ unsigned long flags;
+
+ /* Make sure we don't get interrupts or tx packets */
+ netif_device_detach(cp->dev);
+ cas_lock_all_save(cp, flags);
+
+ if (cp->opened) {
+ /* We call cas_spare_recover when we call cas_open.
+ * but we do not initialize the lists cas_spare_recover
+ * uses until cas_open is called.
+ */
+ cas_spare_recover(cp, GFP_ATOMIC);
+ }
+#if 1
+ /* test => only pending_spare set */
+ if (!pending_all && !pending_mtu)
+ goto done;
+#else
+ if (pending == CAS_RESET_SPARE)
+ goto done;
+#endif
+ /* when pending == CAS_RESET_ALL, the following
+ * call to cas_init_hw will restart auto negotiation.
+ * Setting the second argument of cas_reset to
+ * !(pending == CAS_RESET_ALL) will set this argument
+ * to 1 (avoiding reinitializing the PHY for the normal
+ * PCS case) when auto negotiation is not restarted.
+ */
+#if 1
+ cas_reset(cp, !(pending_all > 0));
+ if (cp->opened)
+ cas_clean_rings(cp);
+ cas_init_hw(cp, (pending_all > 0));
+#else
+ cas_reset(cp, !(pending == CAS_RESET_ALL));
+ if (cp->opened)
+ cas_clean_rings(cp);
+ cas_init_hw(cp, pending == CAS_RESET_ALL);
+#endif
+
+done:
+ cas_unlock_all_restore(cp, flags);
+ netif_device_attach(cp->dev);
+ }
+#if 1
+ atomic_sub(pending_all, &cp->reset_task_pending_all);
+ atomic_sub(pending_spare, &cp->reset_task_pending_spare);
+ atomic_sub(pending_mtu, &cp->reset_task_pending_mtu);
+ atomic_dec(&cp->reset_task_pending);
+#else
+ atomic_set(&cp->reset_task_pending, 0);
+#endif
+}
+
+static void cas_link_timer(unsigned long data)
+{
+ struct cas *cp = (struct cas *) data;
+ int mask, pending = 0, reset = 0;
+ unsigned long flags;
+
+ if (link_transition_timeout != 0 &&
+ cp->link_transition_jiffies_valid &&
+ ((jiffies - cp->link_transition_jiffies) >
+ (link_transition_timeout))) {
+ /* One-second counter so link-down workaround doesn't
+ * cause resets to occur so fast as to fool the switch
+ * into thinking the link is down.
+ */
+ cp->link_transition_jiffies_valid = 0;
+ }
+
+ if (!cp->hw_running)
+ return;
+
+ spin_lock_irqsave(&cp->lock, flags);
+ cas_lock_tx(cp);
+ cas_entropy_gather(cp);
+
+ /* If the link task is still pending, we just
+ * reschedule the link timer
+ */
+#if 1
+ if (atomic_read(&cp->reset_task_pending_all) ||
+ atomic_read(&cp->reset_task_pending_spare) ||
+ atomic_read(&cp->reset_task_pending_mtu))
+ goto done;
+#else
+ if (atomic_read(&cp->reset_task_pending))
+ goto done;
+#endif
+
+ /* check for rx cleaning */
+ if ((mask = (cp->cas_flags & CAS_FLAG_RXD_POST_MASK))) {
+ int i, rmask;
+
+ for (i = 0; i < MAX_RX_DESC_RINGS; i++) {
+ rmask = CAS_FLAG_RXD_POST(i);
+ if ((mask & rmask) == 0)
+ continue;
+
+ /* post_rxds will do a mod_timer */
+ if (cas_post_rxds_ringN(cp, i, cp->rx_last[i]) < 0) {
+ pending = 1;
+ continue;
+ }
+ cp->cas_flags &= ~rmask;
+ }
+ }
+
+ if (CAS_PHY_MII(cp->phy_type)) {
+ u16 bmsr;
+ cas_mif_poll(cp, 0);
+ bmsr = cas_phy_read(cp, MII_BMSR);
+ /* WTZ: Solaris driver reads this twice, but that
+ * may be due to the PCS case and the use of a
+ * common implementation. Read it twice here to be
+ * safe.
+ */
+ bmsr = cas_phy_read(cp, MII_BMSR);
+ cas_mif_poll(cp, 1);
+ readl(cp->regs + REG_MIF_STATUS); /* avoid dups */
+ reset = cas_mii_link_check(cp, bmsr);
+ } else {
+ reset = cas_pcs_link_check(cp);
+ }
+
+ if (reset)
+ goto done;
+
+ /* check for tx state machine confusion */
+ if ((readl(cp->regs + REG_MAC_TX_STATUS) & MAC_TX_FRAME_XMIT) == 0) {
+ u32 val = readl(cp->regs + REG_MAC_STATE_MACHINE);
+ u32 wptr, rptr;
+ int tlm = CAS_VAL(MAC_SM_TLM, val);
+
+ if (((tlm == 0x5) || (tlm == 0x3)) &&
+ (CAS_VAL(MAC_SM_ENCAP_SM, val) == 0)) {
+ if (netif_msg_tx_err(cp))
+ printk(KERN_DEBUG "%s: tx err: "
+ "MAC_STATE[%08x]\n",
+ cp->dev->name, val);
+ reset = 1;
+ goto done;
+ }
+
+ val = readl(cp->regs + REG_TX_FIFO_PKT_CNT);
+ wptr = readl(cp->regs + REG_TX_FIFO_WRITE_PTR);
+ rptr = readl(cp->regs + REG_TX_FIFO_READ_PTR);
+ if ((val == 0) && (wptr != rptr)) {
+ if (netif_msg_tx_err(cp))
+ printk(KERN_DEBUG "%s: tx err: "
+ "TX_FIFO[%08x:%08x:%08x]\n",
+ cp->dev->name, val, wptr, rptr);
+ reset = 1;
+ }
+
+ if (reset)
+ cas_hard_reset(cp);
+ }
+
+done:
+ if (reset) {
+#if 1
+ atomic_inc(&cp->reset_task_pending);
+ atomic_inc(&cp->reset_task_pending_all);
+ schedule_work(&cp->reset_task);
+#else
+ atomic_set(&cp->reset_task_pending, CAS_RESET_ALL);
+ printk(KERN_ERR "reset called in cas_link_timer\n");
+ schedule_work(&cp->reset_task);
+#endif
+ }
+
+ if (!pending)
+ mod_timer(&cp->link_timer, jiffies + CAS_LINK_TIMEOUT);
+ cas_unlock_tx(cp);
+ spin_unlock_irqrestore(&cp->lock, flags);
+}
+
+/* tiny buffers are used to avoid target abort issues with
+ * older cassini's
+ */
+static void cas_tx_tiny_free(struct cas *cp)
+{
+ struct pci_dev *pdev = cp->pdev;
+ int i;
+
+ for (i = 0; i < N_TX_RINGS; i++) {
+ if (!cp->tx_tiny_bufs[i])
+ continue;
+
+ pci_free_consistent(pdev, TX_TINY_BUF_BLOCK,
+ cp->tx_tiny_bufs[i],
+ cp->tx_tiny_dvma[i]);
+ cp->tx_tiny_bufs[i] = NULL;
+ }
+}
+
+static int cas_tx_tiny_alloc(struct cas *cp)
+{
+ struct pci_dev *pdev = cp->pdev;
+ int i;
+
+ for (i = 0; i < N_TX_RINGS; i++) {
+ cp->tx_tiny_bufs[i] =
+ pci_alloc_consistent(pdev, TX_TINY_BUF_BLOCK,
+ &cp->tx_tiny_dvma[i]);
+ if (!cp->tx_tiny_bufs[i]) {
+ cas_tx_tiny_free(cp);
+ return -1;
+ }
+ }
+ return 0;
+}
+
+
+static int cas_open(struct net_device *dev)
+{
+ struct cas *cp = netdev_priv(dev);
+ int hw_was_up, err;
+ unsigned long flags;
+
+ down(&cp->pm_sem);
+
+ hw_was_up = cp->hw_running;
+
+ /* The power-management semaphore protects the hw_running
+ * etc. state so it is safe to do this bit without cp->lock
+ */
+ if (!cp->hw_running) {
+ /* Reset the chip */
+ cas_lock_all_save(cp, flags);
+ /* We set the second arg to cas_reset to zero
+ * because cas_init_hw below will have its second
+ * argument set to non-zero, which will force
+ * autonegotiation to start.
+ */
+ cas_reset(cp, 0);
+ cp->hw_running = 1;
+ cas_unlock_all_restore(cp, flags);
+ }
+
+ if (cas_tx_tiny_alloc(cp) < 0)
+ return -ENOMEM;
+
+ /* alloc rx descriptors */
+ err = -ENOMEM;
+ if (cas_alloc_rxds(cp) < 0)
+ goto err_tx_tiny;
+
+ /* allocate spares */
+ cas_spare_init(cp);
+ cas_spare_recover(cp, GFP_KERNEL);
+
+ /* We can now request the interrupt as we know it's masked
+ * on the controller. cassini+ has up to 4 interrupts
+ * that can be used, but you need to do explicit pci interrupt
+ * mapping to expose them
+ */
+ if (request_irq(cp->pdev->irq, cas_interrupt,
+ SA_SHIRQ, dev->name, (void *) dev)) {
+ printk(KERN_ERR "%s: failed to request irq !\n",
+ cp->dev->name);
+ err = -EAGAIN;
+ goto err_spare;
+ }
+
+ /* init hw */
+ cas_lock_all_save(cp, flags);
+ cas_clean_rings(cp);
+ cas_init_hw(cp, !hw_was_up);
+ cp->opened = 1;
+ cas_unlock_all_restore(cp, flags);
+
+ netif_start_queue(dev);
+ up(&cp->pm_sem);
+ return 0;
+
+err_spare:
+ cas_spare_free(cp);
+ cas_free_rxds(cp);
+err_tx_tiny:
+ cas_tx_tiny_free(cp);
+ up(&cp->pm_sem);
+ return err;
+}
+
+static int cas_close(struct net_device *dev)
+{
+ unsigned long flags;
+ struct cas *cp = netdev_priv(dev);
+
+ /* Make sure we don't get distracted by suspend/resume */
+ down(&cp->pm_sem);
+
+ netif_stop_queue(dev);
+
+ /* Stop traffic, mark us closed */
+ cas_lock_all_save(cp, flags);
+ cp->opened = 0;
+ cas_reset(cp, 0);
+ cas_phy_init(cp);
+ cas_begin_auto_negotiation(cp, NULL);
+ cas_clean_rings(cp);
+ cas_unlock_all_restore(cp, flags);
+
+ free_irq(cp->pdev->irq, (void *) dev);
+ cas_spare_free(cp);
+ cas_free_rxds(cp);
+ cas_tx_tiny_free(cp);
+ up(&cp->pm_sem);
+ return 0;
+}
+
+static struct {
+ const char name[ETH_GSTRING_LEN];
+} ethtool_cassini_statnames[] = {
+ {"collisions"},
+ {"rx_bytes"},
+ {"rx_crc_errors"},
+ {"rx_dropped"},
+ {"rx_errors"},
+ {"rx_fifo_errors"},
+ {"rx_frame_errors"},
+ {"rx_length_errors"},
+ {"rx_over_errors"},
+ {"rx_packets"},
+ {"tx_aborted_errors"},
+ {"tx_bytes"},
+ {"tx_dropped"},
+ {"tx_errors"},
+ {"tx_fifo_errors"},
+ {"tx_packets"}
+};
+#define CAS_NUM_STAT_KEYS (sizeof(ethtool_cassini_statnames)/ETH_GSTRING_LEN)
+
+static struct {
+ const int offsets; /* neg. values for 2nd arg to cas_read_phy */
+} ethtool_register_table[] = {
+ {-MII_BMSR},
+ {-MII_BMCR},
+ {REG_CAWR},
+ {REG_INF_BURST},
+ {REG_BIM_CFG},
+ {REG_RX_CFG},
+ {REG_HP_CFG},
+ {REG_MAC_TX_CFG},
+ {REG_MAC_RX_CFG},
+ {REG_MAC_CTRL_CFG},
+ {REG_MAC_XIF_CFG},
+ {REG_MIF_CFG},
+ {REG_PCS_CFG},
+ {REG_SATURN_PCFG},
+ {REG_PCS_MII_STATUS},
+ {REG_PCS_STATE_MACHINE},
+ {REG_MAC_COLL_EXCESS},
+ {REG_MAC_COLL_LATE}
+};
+#define CAS_REG_LEN (sizeof(ethtool_register_table)/sizeof(int))
+#define CAS_MAX_REGS (sizeof (u32)*CAS_REG_LEN)
+
+static void cas_read_regs(struct cas *cp, u8 *ptr, int len)
+{
+ u8 *p;
+ int i;
+ unsigned long flags;
+
+ spin_lock_irqsave(&cp->lock, flags);
+ for (i = 0, p = ptr; i < len ; i ++, p += sizeof(u32)) {
+ u16 hval;
+ u32 val;
+ if (ethtool_register_table[i].offsets < 0) {
+ hval = cas_phy_read(cp,
+ -ethtool_register_table[i].offsets);
+ val = hval;
+ } else {
+ val= readl(cp->regs+ethtool_register_table[i].offsets);
+ }
+ memcpy(p, (u8 *)&val, sizeof(u32));
+ }
+ spin_unlock_irqrestore(&cp->lock, flags);
+}
+
+static struct net_device_stats *cas_get_stats(struct net_device *dev)
+{
+ struct cas *cp = netdev_priv(dev);
+ struct net_device_stats *stats = cp->net_stats;
+ unsigned long flags;
+ int i;
+ unsigned long tmp;
+
+ /* we collate all of the stats into net_stats[N_TX_RING] */
+ if (!cp->hw_running)
+ return stats + N_TX_RINGS;
+
+ /* collect outstanding stats */
+ /* WTZ: the Cassini spec gives these as 16 bit counters but
+ * stored in 32-bit words. Added a mask of 0xffff to be safe,
+ * in case the chip somehow puts any garbage in the other bits.
+ * Also, counter usage didn't seem to mach what Adrian did
+ * in the parts of the code that set these quantities. Made
+ * that consistent.
+ */
+ spin_lock_irqsave(&cp->stat_lock[N_TX_RINGS], flags);
+ stats[N_TX_RINGS].rx_crc_errors +=
+ readl(cp->regs + REG_MAC_FCS_ERR) & 0xffff;
+ stats[N_TX_RINGS].rx_frame_errors +=
+ readl(cp->regs + REG_MAC_ALIGN_ERR) &0xffff;
+ stats[N_TX_RINGS].rx_length_errors +=
+ readl(cp->regs + REG_MAC_LEN_ERR) & 0xffff;
+#if 1
+ tmp = (readl(cp->regs + REG_MAC_COLL_EXCESS) & 0xffff) +
+ (readl(cp->regs + REG_MAC_COLL_LATE) & 0xffff);
+ stats[N_TX_RINGS].tx_aborted_errors += tmp;
+ stats[N_TX_RINGS].collisions +=
+ tmp + (readl(cp->regs + REG_MAC_COLL_NORMAL) & 0xffff);
+#else
+ stats[N_TX_RINGS].tx_aborted_errors +=
+ readl(cp->regs + REG_MAC_COLL_EXCESS);
+ stats[N_TX_RINGS].collisions += readl(cp->regs + REG_MAC_COLL_EXCESS) +
+ readl(cp->regs + REG_MAC_COLL_LATE);
+#endif
+ cas_clear_mac_err(cp);
+
+ /* saved bits that are unique to ring 0 */
+ spin_lock(&cp->stat_lock[0]);
+ stats[N_TX_RINGS].collisions += stats[0].collisions;
+ stats[N_TX_RINGS].rx_over_errors += stats[0].rx_over_errors;
+ stats[N_TX_RINGS].rx_frame_errors += stats[0].rx_frame_errors;
+ stats[N_TX_RINGS].rx_fifo_errors += stats[0].rx_fifo_errors;
+ stats[N_TX_RINGS].tx_aborted_errors += stats[0].tx_aborted_errors;
+ stats[N_TX_RINGS].tx_fifo_errors += stats[0].tx_fifo_errors;
+ spin_unlock(&cp->stat_lock[0]);
+
+ for (i = 0; i < N_TX_RINGS; i++) {
+ spin_lock(&cp->stat_lock[i]);
+ stats[N_TX_RINGS].rx_length_errors +=
+ stats[i].rx_length_errors;
+ stats[N_TX_RINGS].rx_crc_errors += stats[i].rx_crc_errors;
+ stats[N_TX_RINGS].rx_packets += stats[i].rx_packets;
+ stats[N_TX_RINGS].tx_packets += stats[i].tx_packets;
+ stats[N_TX_RINGS].rx_bytes += stats[i].rx_bytes;
+ stats[N_TX_RINGS].tx_bytes += stats[i].tx_bytes;
+ stats[N_TX_RINGS].rx_errors += stats[i].rx_errors;
+ stats[N_TX_RINGS].tx_errors += stats[i].tx_errors;
+ stats[N_TX_RINGS].rx_dropped += stats[i].rx_dropped;
+ stats[N_TX_RINGS].tx_dropped += stats[i].tx_dropped;
+ memset(stats + i, 0, sizeof(struct net_device_stats));
+ spin_unlock(&cp->stat_lock[i]);
+ }
+ spin_unlock_irqrestore(&cp->stat_lock[N_TX_RINGS], flags);
+ return stats + N_TX_RINGS;
+}
+
+
+static void cas_set_multicast(struct net_device *dev)
+{
+ struct cas *cp = netdev_priv(dev);
+ u32 rxcfg, rxcfg_new;
+ unsigned long flags;
+ int limit = STOP_TRIES;
+
+ if (!cp->hw_running)
+ return;
+
+ spin_lock_irqsave(&cp->lock, flags);
+ rxcfg = readl(cp->regs + REG_MAC_RX_CFG);
+
+ /* disable RX MAC and wait for completion */
+ writel(rxcfg & ~MAC_RX_CFG_EN, cp->regs + REG_MAC_RX_CFG);
+ while (readl(cp->regs + REG_MAC_RX_CFG) & MAC_RX_CFG_EN) {
+ if (!limit--)
+ break;
+ udelay(10);
+ }
+
+ /* disable hash filter and wait for completion */
+ limit = STOP_TRIES;
+ rxcfg &= ~(MAC_RX_CFG_PROMISC_EN | MAC_RX_CFG_HASH_FILTER_EN);
+ writel(rxcfg & ~MAC_RX_CFG_EN, cp->regs + REG_MAC_RX_CFG);
+ while (readl(cp->regs + REG_MAC_RX_CFG) & MAC_RX_CFG_HASH_FILTER_EN) {
+ if (!limit--)
+ break;
+ udelay(10);
+ }
+
+ /* program hash filters */
+ cp->mac_rx_cfg = rxcfg_new = cas_setup_multicast(cp);
+ rxcfg |= rxcfg_new;
+ writel(rxcfg, cp->regs + REG_MAC_RX_CFG);
+ spin_unlock_irqrestore(&cp->lock, flags);
+}
+
+static void cas_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
+{
+ struct cas *cp = netdev_priv(dev);
+ strncpy(info->driver, DRV_MODULE_NAME, ETHTOOL_BUSINFO_LEN);
+ strncpy(info->version, DRV_MODULE_VERSION, ETHTOOL_BUSINFO_LEN);
+ info->fw_version[0] = '\0';
+ strncpy(info->bus_info, pci_name(cp->pdev), ETHTOOL_BUSINFO_LEN);
+ info->regdump_len = cp->casreg_len < CAS_MAX_REGS ?
+ cp->casreg_len : CAS_MAX_REGS;
+ info->n_stats = CAS_NUM_STAT_KEYS;
+}
+
+static int cas_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+ struct cas *cp = netdev_priv(dev);
+ u16 bmcr;
+ int full_duplex, speed, pause;
+ unsigned long flags;
+ enum link_state linkstate = link_up;
+
+ cmd->advertising = 0;
+ cmd->supported = SUPPORTED_Autoneg;
+ if (cp->cas_flags & CAS_FLAG_1000MB_CAP) {
+ cmd->supported |= SUPPORTED_1000baseT_Full;
+ cmd->advertising |= ADVERTISED_1000baseT_Full;
+ }
+
+ /* Record PHY settings if HW is on. */
+ spin_lock_irqsave(&cp->lock, flags);
+ bmcr = 0;
+ linkstate = cp->lstate;
+ if (CAS_PHY_MII(cp->phy_type)) {
+ cmd->port = PORT_MII;
+ cmd->transceiver = (cp->cas_flags & CAS_FLAG_SATURN) ?
+ XCVR_INTERNAL : XCVR_EXTERNAL;
+ cmd->phy_address = cp->phy_addr;
+ cmd->advertising |= ADVERTISED_TP | ADVERTISED_MII |
+ ADVERTISED_10baseT_Half |
+ ADVERTISED_10baseT_Full |
+ ADVERTISED_100baseT_Half |
+ ADVERTISED_100baseT_Full;
+
+ cmd->supported |=
+ (SUPPORTED_10baseT_Half |
+ SUPPORTED_10baseT_Full |
+ SUPPORTED_100baseT_Half |
+ SUPPORTED_100baseT_Full |
+ SUPPORTED_TP | SUPPORTED_MII);
+
+ if (cp->hw_running) {
+ cas_mif_poll(cp, 0);
+ bmcr = cas_phy_read(cp, MII_BMCR);
+ cas_read_mii_link_mode(cp, &full_duplex,
+ &speed, &pause);
+ cas_mif_poll(cp, 1);
+ }
+
+ } else {
+ cmd->port = PORT_FIBRE;
+ cmd->transceiver = XCVR_INTERNAL;
+ cmd->phy_address = 0;
+ cmd->supported |= SUPPORTED_FIBRE;
+ cmd->advertising |= ADVERTISED_FIBRE;
+
+ if (cp->hw_running) {
+ /* pcs uses the same bits as mii */
+ bmcr = readl(cp->regs + REG_PCS_MII_CTRL);
+ cas_read_pcs_link_mode(cp, &full_duplex,
+ &speed, &pause);
+ }
+ }
+ spin_unlock_irqrestore(&cp->lock, flags);
+
+ if (bmcr & BMCR_ANENABLE) {
+ cmd->advertising |= ADVERTISED_Autoneg;
+ cmd->autoneg = AUTONEG_ENABLE;
+ cmd->speed = ((speed == 10) ?
+ SPEED_10 :
+ ((speed == 1000) ?
+ SPEED_1000 : SPEED_100));
+ cmd->duplex = full_duplex ? DUPLEX_FULL : DUPLEX_HALF;
+ } else {
+ cmd->autoneg = AUTONEG_DISABLE;
+ cmd->speed =
+ (bmcr & CAS_BMCR_SPEED1000) ?
+ SPEED_1000 :
+ ((bmcr & BMCR_SPEED100) ? SPEED_100:
+ SPEED_10);
+ cmd->duplex =
+ (bmcr & BMCR_FULLDPLX) ?
+ DUPLEX_FULL : DUPLEX_HALF;
+ }
+ if (linkstate != link_up) {
+ /* Force these to "unknown" if the link is not up and
+ * autonogotiation in enabled. We can set the link
+ * speed to 0, but not cmd->duplex,
+ * because its legal values are 0 and 1. Ethtool will
+ * print the value reported in parentheses after the
+ * word "Unknown" for unrecognized values.
+ *
+ * If in forced mode, we report the speed and duplex
+ * settings that we configured.
+ */
+ if (cp->link_cntl & BMCR_ANENABLE) {
+ cmd->speed = 0;
+ cmd->duplex = 0xff;
+ } else {
+ cmd->speed = SPEED_10;
+ if (cp->link_cntl & BMCR_SPEED100) {
+ cmd->speed = SPEED_100;
+ } else if (cp->link_cntl & CAS_BMCR_SPEED1000) {
+ cmd->speed = SPEED_1000;
+ }
+ cmd->duplex = (cp->link_cntl & BMCR_FULLDPLX)?
+ DUPLEX_FULL : DUPLEX_HALF;
+ }
+ }
+ return 0;
+}
+
+static int cas_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+ struct cas *cp = netdev_priv(dev);
+ unsigned long flags;
+
+ /* Verify the settings we care about. */
+ if (cmd->autoneg != AUTONEG_ENABLE &&
+ cmd->autoneg != AUTONEG_DISABLE)
+ return -EINVAL;
+
+ if (cmd->autoneg == AUTONEG_DISABLE &&
+ ((cmd->speed != SPEED_1000 &&
+ cmd->speed != SPEED_100 &&
+ cmd->speed != SPEED_10) ||
+ (cmd->duplex != DUPLEX_HALF &&
+ cmd->duplex != DUPLEX_FULL)))
+ return -EINVAL;
+
+ /* Apply settings and restart link process. */
+ spin_lock_irqsave(&cp->lock, flags);
+ cas_begin_auto_negotiation(cp, cmd);
+ spin_unlock_irqrestore(&cp->lock, flags);
+ return 0;
+}
+
+static int cas_nway_reset(struct net_device *dev)
+{
+ struct cas *cp = netdev_priv(dev);
+ unsigned long flags;
+
+ if ((cp->link_cntl & BMCR_ANENABLE) == 0)
+ return -EINVAL;
+
+ /* Restart link process. */
+ spin_lock_irqsave(&cp->lock, flags);
+ cas_begin_auto_negotiation(cp, NULL);
+ spin_unlock_irqrestore(&cp->lock, flags);
+
+ return 0;
+}
+
+static u32 cas_get_link(struct net_device *dev)
+{
+ struct cas *cp = netdev_priv(dev);
+ return cp->lstate == link_up;
+}
+
+static u32 cas_get_msglevel(struct net_device *dev)
+{
+ struct cas *cp = netdev_priv(dev);
+ return cp->msg_enable;
+}
+
+static void cas_set_msglevel(struct net_device *dev, u32 value)
+{
+ struct cas *cp = netdev_priv(dev);
+ cp->msg_enable = value;
+}
+
+static int cas_get_regs_len(struct net_device *dev)
+{
+ struct cas *cp = netdev_priv(dev);
+ return cp->casreg_len < CAS_MAX_REGS ? cp->casreg_len: CAS_MAX_REGS;
+}
+
+static void cas_get_regs(struct net_device *dev, struct ethtool_regs *regs,
+ void *p)
+{
+ struct cas *cp = netdev_priv(dev);
+ regs->version = 0;
+ /* cas_read_regs handles locks (cp->lock). */
+ cas_read_regs(cp, p, regs->len / sizeof(u32));
+}
+
+static int cas_get_stats_count(struct net_device *dev)
+{
+ return CAS_NUM_STAT_KEYS;
+}
+
+static void cas_get_strings(struct net_device *dev, u32 stringset, u8 *data)
+{
+ memcpy(data, &ethtool_cassini_statnames,
+ CAS_NUM_STAT_KEYS * ETH_GSTRING_LEN);
+}
+
+static void cas_get_ethtool_stats(struct net_device *dev,
+ struct ethtool_stats *estats, u64 *data)
+{
+ struct cas *cp = netdev_priv(dev);
+ struct net_device_stats *stats = cas_get_stats(cp->dev);
+ int i = 0;
+ data[i++] = stats->collisions;
+ data[i++] = stats->rx_bytes;
+ data[i++] = stats->rx_crc_errors;
+ data[i++] = stats->rx_dropped;
+ data[i++] = stats->rx_errors;
+ data[i++] = stats->rx_fifo_errors;
+ data[i++] = stats->rx_frame_errors;
+ data[i++] = stats->rx_length_errors;
+ data[i++] = stats->rx_over_errors;
+ data[i++] = stats->rx_packets;
+ data[i++] = stats->tx_aborted_errors;
+ data[i++] = stats->tx_bytes;
+ data[i++] = stats->tx_dropped;
+ data[i++] = stats->tx_errors;
+ data[i++] = stats->tx_fifo_errors;
+ data[i++] = stats->tx_packets;
+ BUG_ON(i != CAS_NUM_STAT_KEYS);
+}
+
+static struct ethtool_ops cas_ethtool_ops = {
+ .get_drvinfo = cas_get_drvinfo,
+ .get_settings = cas_get_settings,
+ .set_settings = cas_set_settings,
+ .nway_reset = cas_nway_reset,
+ .get_link = cas_get_link,
+ .get_msglevel = cas_get_msglevel,
+ .set_msglevel = cas_set_msglevel,
+ .get_regs_len = cas_get_regs_len,
+ .get_regs = cas_get_regs,
+ .get_stats_count = cas_get_stats_count,
+ .get_strings = cas_get_strings,
+ .get_ethtool_stats = cas_get_ethtool_stats,
+};
+
+static int cas_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+{
+ struct cas *cp = netdev_priv(dev);
+ struct mii_ioctl_data *data = if_mii(ifr);
+ unsigned long flags;
+ int rc = -EOPNOTSUPP;
+
+ /* Hold the PM semaphore while doing ioctl's or we may collide
+ * with open/close and power management and oops.
+ */
+ down(&cp->pm_sem);
+ switch (cmd) {
+ case SIOCGMIIPHY: /* Get address of MII PHY in use. */
+ data->phy_id = cp->phy_addr;
+ /* Fallthrough... */
+
+ case SIOCGMIIREG: /* Read MII PHY register. */
+ spin_lock_irqsave(&cp->lock, flags);
+ cas_mif_poll(cp, 0);
+ data->val_out = cas_phy_read(cp, data->reg_num & 0x1f);
+ cas_mif_poll(cp, 1);
+ spin_unlock_irqrestore(&cp->lock, flags);
+ rc = 0;
+ break;
+
+ case SIOCSMIIREG: /* Write MII PHY register. */
+ if (!capable(CAP_NET_ADMIN)) {
+ rc = -EPERM;
+ break;
+ }
+ spin_lock_irqsave(&cp->lock, flags);
+ cas_mif_poll(cp, 0);
+ rc = cas_phy_write(cp, data->reg_num & 0x1f, data->val_in);
+ cas_mif_poll(cp, 1);
+ spin_unlock_irqrestore(&cp->lock, flags);
+ break;
+ default:
+ break;
+ };
+
+ up(&cp->pm_sem);
+ return rc;
+}
+
+static int __devinit cas_init_one(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ static int cas_version_printed = 0;
+ unsigned long casreg_base, casreg_len;
+ struct net_device *dev;
+ struct cas *cp;
+ int i, err, pci_using_dac;
+ u16 pci_cmd;
+ u8 orig_cacheline_size = 0, cas_cacheline_size = 0;
+
+ if (cas_version_printed++ == 0)
+ printk(KERN_INFO "%s", version);
+
+ err = pci_enable_device(pdev);
+ if (err) {
+ printk(KERN_ERR PFX "Cannot enable PCI device, "
+ "aborting.\n");
+ return err;
+ }
+
+ if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
+ printk(KERN_ERR PFX "Cannot find proper PCI device "
+ "base address, aborting.\n");
+ err = -ENODEV;
+ goto err_out_disable_pdev;
+ }
+
+ dev = alloc_etherdev(sizeof(*cp));
+ if (!dev) {
+ printk(KERN_ERR PFX "Etherdev alloc failed, aborting.\n");
+ err = -ENOMEM;
+ goto err_out_disable_pdev;
+ }
+ SET_MODULE_OWNER(dev);
+ SET_NETDEV_DEV(dev, &pdev->dev);
+
+ err = pci_request_regions(pdev, dev->name);
+ if (err) {
+ printk(KERN_ERR PFX "Cannot obtain PCI resources, "
+ "aborting.\n");
+ goto err_out_free_netdev;
+ }
+ pci_set_master(pdev);
+
+ /* we must always turn on parity response or else parity
+ * doesn't get generated properly. disable SERR/PERR as well.
+ * in addition, we want to turn MWI on.
+ */
+ pci_read_config_word(pdev, PCI_COMMAND, &pci_cmd);
+ pci_cmd &= ~PCI_COMMAND_SERR;
+ pci_cmd |= PCI_COMMAND_PARITY;
+ pci_write_config_word(pdev, PCI_COMMAND, pci_cmd);
+ pci_set_mwi(pdev);
+ /*
+ * On some architectures, the default cache line size set
+ * by pci_set_mwi reduces perforamnce. We have to increase
+ * it for this case. To start, we'll print some configuration
+ * data.
+ */
+#if 1
+ pci_read_config_byte(pdev, PCI_CACHE_LINE_SIZE,
+ &orig_cacheline_size);
+ if (orig_cacheline_size < CAS_PREF_CACHELINE_SIZE) {
+ cas_cacheline_size =
+ (CAS_PREF_CACHELINE_SIZE < SMP_CACHE_BYTES) ?
+ CAS_PREF_CACHELINE_SIZE : SMP_CACHE_BYTES;
+ if (pci_write_config_byte(pdev,
+ PCI_CACHE_LINE_SIZE,
+ cas_cacheline_size)) {
+ printk(KERN_ERR PFX "Could not set PCI cache "
+ "line size\n");
+ goto err_write_cacheline;
+ }
+ }
+#endif
+
+
+ /* Configure DMA attributes. */
+ if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
+ pci_using_dac = 1;
+ err = pci_set_consistent_dma_mask(pdev,
+ DMA_64BIT_MASK);
+ if (err < 0) {
+ printk(KERN_ERR PFX "Unable to obtain 64-bit DMA "
+ "for consistent allocations\n");
+ goto err_out_free_res;
+ }
+
+ } else {
+ err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
+ if (err) {
+ printk(KERN_ERR PFX "No usable DMA configuration, "
+ "aborting.\n");
+ goto err_out_free_res;
+ }
+ pci_using_dac = 0;
+ }
+
+ casreg_base = pci_resource_start(pdev, 0);
+ casreg_len = pci_resource_len(pdev, 0);
+
+ cp = netdev_priv(dev);
+ cp->pdev = pdev;
+#if 1
+ /* A value of 0 indicates we never explicitly set it */
+ cp->orig_cacheline_size = cas_cacheline_size ? orig_cacheline_size: 0;
+#endif
+ cp->dev = dev;
+ cp->msg_enable = (cassini_debug < 0) ? CAS_DEF_MSG_ENABLE :
+ cassini_debug;
+
+ cp->link_transition = LINK_TRANSITION_UNKNOWN;
+ cp->link_transition_jiffies_valid = 0;
+
+ spin_lock_init(&cp->lock);
+ spin_lock_init(&cp->rx_inuse_lock);
+ spin_lock_init(&cp->rx_spare_lock);
+ for (i = 0; i < N_TX_RINGS; i++) {
+ spin_lock_init(&cp->stat_lock[i]);
+ spin_lock_init(&cp->tx_lock[i]);
+ }
+ spin_lock_init(&cp->stat_lock[N_TX_RINGS]);
+ init_MUTEX(&cp->pm_sem);
+
+ init_timer(&cp->link_timer);
+ cp->link_timer.function = cas_link_timer;
+ cp->link_timer.data = (unsigned long) cp;
+
+#if 1
+ /* Just in case the implementation of atomic operations
+ * change so that an explicit initialization is necessary.
+ */
+ atomic_set(&cp->reset_task_pending, 0);
+ atomic_set(&cp->reset_task_pending_all, 0);
+ atomic_set(&cp->reset_task_pending_spare, 0);
+ atomic_set(&cp->reset_task_pending_mtu, 0);
+#endif
+ INIT_WORK(&cp->reset_task, cas_reset_task, cp);
+
+ /* Default link parameters */
+ if (link_mode >= 0 && link_mode <= 6)
+ cp->link_cntl = link_modes[link_mode];
+ else
+ cp->link_cntl = BMCR_ANENABLE;
+ cp->lstate = link_down;
+ cp->link_transition = LINK_TRANSITION_LINK_DOWN;
+ netif_carrier_off(cp->dev);
+ cp->timer_ticks = 0;
+
+ /* give us access to cassini registers */
+ cp->regs = ioremap(casreg_base, casreg_len);
+ if (cp->regs == 0UL) {
+ printk(KERN_ERR PFX "Cannot map device registers, "
+ "aborting.\n");
+ goto err_out_free_res;
+ }
+ cp->casreg_len = casreg_len;
+
+ pci_save_state(pdev);
+ cas_check_pci_invariants(cp);
+ cas_hard_reset(cp);
+ cas_reset(cp, 0);
+ if (cas_check_invariants(cp))
+ goto err_out_iounmap;
+
+ cp->init_block = (struct cas_init_block *)
+ pci_alloc_consistent(pdev, sizeof(struct cas_init_block),
+ &cp->block_dvma);
+ if (!cp->init_block) {
+ printk(KERN_ERR PFX "Cannot allocate init block, "
+ "aborting.\n");
+ goto err_out_iounmap;
+ }
+
+ for (i = 0; i < N_TX_RINGS; i++)
+ cp->init_txds[i] = cp->init_block->txds[i];
+
+ for (i = 0; i < N_RX_DESC_RINGS; i++)
+ cp->init_rxds[i] = cp->init_block->rxds[i];
+
+ for (i = 0; i < N_RX_COMP_RINGS; i++)
+ cp->init_rxcs[i] = cp->init_block->rxcs[i];
+
+ for (i = 0; i < N_RX_FLOWS; i++)
+ skb_queue_head_init(&cp->rx_flows[i]);
+
+ dev->open = cas_open;
+ dev->stop = cas_close;
+ dev->hard_start_xmit = cas_start_xmit;
+ dev->get_stats = cas_get_stats;
+ dev->set_multicast_list = cas_set_multicast;
+ dev->do_ioctl = cas_ioctl;
+ dev->ethtool_ops = &cas_ethtool_ops;
+ dev->tx_timeout = cas_tx_timeout;
+ dev->watchdog_timeo = CAS_TX_TIMEOUT;
+ dev->change_mtu = cas_change_mtu;
+#ifdef USE_NAPI
+ dev->poll = cas_poll;
+ dev->weight = 64;
+#endif
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ dev->poll_controller = cas_netpoll;
+#endif
+ dev->irq = pdev->irq;
+ dev->dma = 0;
+
+ /* Cassini features. */
+ if ((cp->cas_flags & CAS_FLAG_NO_HW_CSUM) == 0)
+ dev->features |= NETIF_F_HW_CSUM | NETIF_F_SG;
+
+ if (pci_using_dac)
+ dev->features |= NETIF_F_HIGHDMA;
+
+ if (register_netdev(dev)) {
+ printk(KERN_ERR PFX "Cannot register net device, "
+ "aborting.\n");
+ goto err_out_free_consistent;
+ }
+
+ i = readl(cp->regs + REG_BIM_CFG);
+ printk(KERN_INFO "%s: Sun Cassini%s (%sbit/%sMHz PCI/%s) "
+ "Ethernet[%d] ", dev->name,
+ (cp->cas_flags & CAS_FLAG_REG_PLUS) ? "+" : "",
+ (i & BIM_CFG_32BIT) ? "32" : "64",
+ (i & BIM_CFG_66MHZ) ? "66" : "33",
+ (cp->phy_type == CAS_PHY_SERDES) ? "Fi" : "Cu", pdev->irq);
+
+ for (i = 0; i < 6; i++)
+ printk("%2.2x%c", dev->dev_addr[i],
+ i == 5 ? ' ' : ':');
+ printk("\n");
+
+ pci_set_drvdata(pdev, dev);
+ cp->hw_running = 1;
+ cas_entropy_reset(cp);
+ cas_phy_init(cp);
+ cas_begin_auto_negotiation(cp, NULL);
+ return 0;
+
+err_out_free_consistent:
+ pci_free_consistent(pdev, sizeof(struct cas_init_block),
+ cp->init_block, cp->block_dvma);
+
+err_out_iounmap:
+ down(&cp->pm_sem);
+ if (cp->hw_running)
+ cas_shutdown(cp);
+ up(&cp->pm_sem);
+
+ iounmap(cp->regs);
+
+
+err_out_free_res:
+ pci_release_regions(pdev);
+
+err_write_cacheline:
+ /* Try to restore it in case the error occured after we
+ * set it.
+ */
+ pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE, orig_cacheline_size);
+
+err_out_free_netdev:
+ free_netdev(dev);
+
+err_out_disable_pdev:
+ pci_disable_device(pdev);
+ pci_set_drvdata(pdev, NULL);
+ return -ENODEV;
+}
+
+static void __devexit cas_remove_one(struct pci_dev *pdev)
+{
+ struct net_device *dev = pci_get_drvdata(pdev);
+ struct cas *cp;
+ if (!dev)
+ return;
+
+ cp = netdev_priv(dev);
+ unregister_netdev(dev);
+
+ down(&cp->pm_sem);
+ flush_scheduled_work();
+ if (cp->hw_running)
+ cas_shutdown(cp);
+ up(&cp->pm_sem);
+
+#if 1
+ if (cp->orig_cacheline_size) {
+ /* Restore the cache line size if we had modified
+ * it.
+ */
+ pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE,
+ cp->orig_cacheline_size);
+ }
+#endif
+ pci_free_consistent(pdev, sizeof(struct cas_init_block),
+ cp->init_block, cp->block_dvma);
+ iounmap(cp->regs);
+ free_netdev(dev);
+ pci_release_regions(pdev);
+ pci_disable_device(pdev);
+ pci_set_drvdata(pdev, NULL);
+}
+
+#ifdef CONFIG_PM
+static int cas_suspend(struct pci_dev *pdev, pm_message_t state)
+{
+ struct net_device *dev = pci_get_drvdata(pdev);
+ struct cas *cp = netdev_priv(dev);
+ unsigned long flags;
+
+ /* We hold the PM semaphore during entire driver
+ * sleep time
+ */
+ down(&cp->pm_sem);
+
+ /* If the driver is opened, we stop the DMA */
+ if (cp->opened) {
+ netif_device_detach(dev);
+
+ cas_lock_all_save(cp, flags);
+
+ /* We can set the second arg of cas_reset to 0
+ * because on resume, we'll call cas_init_hw with
+ * its second arg set so that autonegotiation is
+ * restarted.
+ */
+ cas_reset(cp, 0);
+ cas_clean_rings(cp);
+ cas_unlock_all_restore(cp, flags);
+ }
+
+ if (cp->hw_running)
+ cas_shutdown(cp);
+
+ return 0;
+}
+
+static int cas_resume(struct pci_dev *pdev)
+{
+ struct net_device *dev = pci_get_drvdata(pdev);
+ struct cas *cp = netdev_priv(dev);
+
+ printk(KERN_INFO "%s: resuming\n", dev->name);
+
+ cas_hard_reset(cp);
+ if (cp->opened) {
+ unsigned long flags;
+ cas_lock_all_save(cp, flags);
+ cas_reset(cp, 0);
+ cp->hw_running = 1;
+ cas_clean_rings(cp);
+ cas_init_hw(cp, 1);
+ cas_unlock_all_restore(cp, flags);
+
+ netif_device_attach(dev);
+ }
+ up(&cp->pm_sem);
+ return 0;
+}
+#endif /* CONFIG_PM */
+
+static struct pci_driver cas_driver = {
+ .name = DRV_MODULE_NAME,
+ .id_table = cas_pci_tbl,
+ .probe = cas_init_one,
+ .remove = __devexit_p(cas_remove_one),
+#ifdef CONFIG_PM
+ .suspend = cas_suspend,
+ .resume = cas_resume
+#endif
+};
+
+static int __init cas_init(void)
+{
+ if (linkdown_timeout > 0)
+ link_transition_timeout = linkdown_timeout * HZ;
+ else
+ link_transition_timeout = 0;
+
+ return pci_module_init(&cas_driver);
+}
+
+static void __exit cas_cleanup(void)
+{
+ pci_unregister_driver(&cas_driver);
+}
+
+module_init(cas_init);
+module_exit(cas_cleanup);
diff --git a/drivers/net/cassini.h b/drivers/net/cassini.h
new file mode 100644
index 00000000000..88063ef16cf
--- /dev/null
+++ b/drivers/net/cassini.h
@@ -0,0 +1,4425 @@
+/* $Id: cassini.h,v 1.16 2004/08/17 21:15:16 zaumen Exp $
+ * cassini.h: Definitions for Sun Microsystems Cassini(+) ethernet driver.
+ *
+ * Copyright (C) 2004 Sun Microsystems Inc.
+ * Copyright (c) 2003 Adrian Sun (asun@darksunrising.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
+ * 02111-1307, USA.
+ *
+ * vendor id: 0x108E (Sun Microsystems, Inc.)
+ * device id: 0xabba (Cassini)
+ * revision ids: 0x01 = Cassini
+ * 0x02 = Cassini rev 2
+ * 0x10 = Cassini+
+ * 0x11 = Cassini+ 0.2u
+ *
+ * vendor id: 0x100b (National Semiconductor)
+ * device id: 0x0035 (DP83065/Saturn)
+ * revision ids: 0x30 = Saturn B2
+ *
+ * rings are all offset from 0.
+ *
+ * there are two clock domains:
+ * PCI: 33/66MHz clock
+ * chip: 125MHz clock
+ */
+
+#ifndef _CASSINI_H
+#define _CASSINI_H
+
+/* cassini register map: 2M memory mapped in 32-bit memory space accessible as
+ * 32-bit words. there is no i/o port access. REG_ addresses are
+ * shared between cassini and cassini+. REG_PLUS_ addresses only
+ * appear in cassini+. REG_MINUS_ addresses only appear in cassini.
+ */
+#define CAS_ID_REV2 0x02
+#define CAS_ID_REVPLUS 0x10
+#define CAS_ID_REVPLUS02u 0x11
+#define CAS_ID_REVSATURNB2 0x30
+
+/** global resources **/
+
+/* this register sets the weights for the weighted round robin arbiter. e.g.,
+ * if rx weight == 1 and tx weight == 0, rx == 2x tx transfer credit
+ * for its next turn to access the pci bus.
+ * map: 0x0 = x1, 0x1 = x2, 0x2 = x4, 0x3 = x8
+ * DEFAULT: 0x0, SIZE: 5 bits
+ */
+#define REG_CAWR 0x0004 /* core arbitration weight */
+#define CAWR_RX_DMA_WEIGHT_SHIFT 0
+#define CAWR_RX_DMA_WEIGHT_MASK 0x03 /* [0:1] */
+#define CAWR_TX_DMA_WEIGHT_SHIFT 2
+#define CAWR_TX_DMA_WEIGHT_MASK 0x0C /* [3:2] */
+#define CAWR_RR_DIS 0x10 /* [4] */
+
+/* if enabled, BIM can send bursts across PCI bus > cacheline size. burst
+ * sizes determined by length of packet or descriptor transfer and the
+ * max length allowed by the target.
+ * DEFAULT: 0x0, SIZE: 1 bit
+ */
+#define REG_INF_BURST 0x0008 /* infinite burst enable reg */
+#define INF_BURST_EN 0x1 /* enable */
+
+/* top level interrupts [0-9] are auto-cleared to 0 when the status
+ * register is read. second level interrupts [13 - 18] are cleared at
+ * the source. tx completion register 3 is replicated in [19 - 31]
+ * DEFAULT: 0x00000000, SIZE: 29 bits
+ */
+#define REG_INTR_STATUS 0x000C /* interrupt status register */
+#define INTR_TX_INTME 0x00000001 /* frame w/ INT ME desc bit set
+ xferred from host queue to
+ TX FIFO */
+#define INTR_TX_ALL 0x00000002 /* all xmit frames xferred into
+ TX FIFO. i.e.,
+ TX Kick == TX complete. if
+ PACED_MODE set, then TX FIFO
+ also empty */
+#define INTR_TX_DONE 0x00000004 /* any frame xferred into tx
+ FIFO */
+#define INTR_TX_TAG_ERROR 0x00000008 /* TX FIFO tag framing
+ corrupted. FATAL ERROR */
+#define INTR_RX_DONE 0x00000010 /* at least 1 frame xferred
+ from RX FIFO to host mem.
+ RX completion reg updated.
+ may be delayed by recv
+ intr blanking. */
+#define INTR_RX_BUF_UNAVAIL 0x00000020 /* no more receive buffers.
+ RX Kick == RX complete */
+#define INTR_RX_TAG_ERROR 0x00000040 /* RX FIFO tag framing
+ corrupted. FATAL ERROR */
+#define INTR_RX_COMP_FULL 0x00000080 /* no more room in completion
+ ring to post descriptors.
+ RX complete head incr to
+ almost reach RX complete
+ tail */
+#define INTR_RX_BUF_AE 0x00000100 /* less than the
+ programmable threshold #
+ of free descr avail for
+ hw use */
+#define INTR_RX_COMP_AF 0x00000200 /* less than the
+ programmable threshold #
+ of descr spaces for hw
+ use in completion descr
+ ring */
+#define INTR_RX_LEN_MISMATCH 0x00000400 /* len field from MAC !=
+ len of non-reassembly pkt
+ from fifo during DMA or
+ header parser provides TCP
+ header and payload size >
+ MAC packet size.
+ FATAL ERROR */
+#define INTR_SUMMARY 0x00001000 /* summary interrupt bit. this
+ bit will be set if an interrupt
+ generated on the pci bus. useful
+ when driver is polling for
+ interrupts */
+#define INTR_PCS_STATUS 0x00002000 /* PCS interrupt status register */
+#define INTR_TX_MAC_STATUS 0x00004000 /* TX MAC status register has at
+ least 1 unmasked interrupt set */
+#define INTR_RX_MAC_STATUS 0x00008000 /* RX MAC status register has at
+ least 1 unmasked interrupt set */
+#define INTR_MAC_CTRL_STATUS 0x00010000 /* MAC control status register has
+ at least 1 unmasked interrupt
+ set */
+#define INTR_MIF_STATUS 0x00020000 /* MIF status register has at least
+ 1 unmasked interrupt set */
+#define INTR_PCI_ERROR_STATUS 0x00040000 /* PCI error status register in the
+ BIF has at least 1 unmasked
+ interrupt set */
+#define INTR_TX_COMP_3_MASK 0xFFF80000 /* mask for TX completion
+ 3 reg data */
+#define INTR_TX_COMP_3_SHIFT 19
+#define INTR_ERROR_MASK (INTR_MIF_STATUS | INTR_PCI_ERROR_STATUS | \
+ INTR_PCS_STATUS | INTR_RX_LEN_MISMATCH | \
+ INTR_TX_MAC_STATUS | INTR_RX_MAC_STATUS | \
+ INTR_TX_TAG_ERROR | INTR_RX_TAG_ERROR | \
+ INTR_MAC_CTRL_STATUS)
+
+/* determines which status events will cause an interrupt. layout same
+ * as REG_INTR_STATUS.
+ * DEFAULT: 0xFFFFFFFF, SIZE: 16 bits
+ */
+#define REG_INTR_MASK 0x0010 /* Interrupt mask */
+
+/* top level interrupt bits that are cleared during read of REG_INTR_STATUS_ALIAS.
+ * useful when driver is polling for interrupts. layout same as REG_INTR_MASK.
+ * DEFAULT: 0x00000000, SIZE: 12 bits
+ */
+#define REG_ALIAS_CLEAR 0x0014 /* alias clear mask
+ (used w/ status alias) */
+/* same as REG_INTR_STATUS except that only bits cleared are those selected by
+ * REG_ALIAS_CLEAR
+ * DEFAULT: 0x00000000, SIZE: 29 bits
+ */
+#define REG_INTR_STATUS_ALIAS 0x001C /* interrupt status alias
+ (selective clear) */
+
+/* DEFAULT: 0x0, SIZE: 3 bits */
+#define REG_PCI_ERR_STATUS 0x1000 /* PCI error status */
+#define PCI_ERR_BADACK 0x01 /* reserved in Cassini+.
+ set if no ACK64# during ABS64 cycle
+ in Cassini. */
+#define PCI_ERR_DTRTO 0x02 /* delayed xaction timeout. set if
+ no read retry after 2^15 clocks */
+#define PCI_ERR_OTHER 0x04 /* other PCI errors */
+#define PCI_ERR_BIM_DMA_WRITE 0x08 /* BIM received 0 count DMA write req.
+ unused in Cassini. */
+#define PCI_ERR_BIM_DMA_READ 0x10 /* BIM received 0 count DMA read req.
+ unused in Cassini. */
+#define PCI_ERR_BIM_DMA_TIMEOUT 0x20 /* BIM received 255 retries during
+ DMA. unused in cassini. */
+
+/* mask for PCI status events that will set PCI_ERR_STATUS. if cleared, event
+ * causes an interrupt to be generated.
+ * DEFAULT: 0x7, SIZE: 3 bits
+ */
+#define REG_PCI_ERR_STATUS_MASK 0x1004 /* PCI Error status mask */
+
+/* used to configure PCI related parameters that are not in PCI config space.
+ * DEFAULT: 0bxx000, SIZE: 5 bits
+ */
+#define REG_BIM_CFG 0x1008 /* BIM Configuration */
+#define BIM_CFG_RESERVED0 0x001 /* reserved */
+#define BIM_CFG_RESERVED1 0x002 /* reserved */
+#define BIM_CFG_64BIT_DISABLE 0x004 /* disable 64-bit mode */
+#define BIM_CFG_66MHZ 0x008 /* (ro) 1 = 66MHz, 0 = < 66MHz */
+#define BIM_CFG_32BIT 0x010 /* (ro) 1 = 32-bit slot, 0 = 64-bit */
+#define BIM_CFG_DPAR_INTR_ENABLE 0x020 /* detected parity err enable */
+#define BIM_CFG_RMA_INTR_ENABLE 0x040 /* master abort intr enable */
+#define BIM_CFG_RTA_INTR_ENABLE 0x080 /* target abort intr enable */
+#define BIM_CFG_RESERVED2 0x100 /* reserved */
+#define BIM_CFG_BIM_DISABLE 0x200 /* stop BIM DMA. use before global
+ reset. reserved in Cassini. */
+#define BIM_CFG_BIM_STATUS 0x400 /* (ro) 1 = BIM DMA suspended.
+ reserved in Cassini. */
+#define BIM_CFG_PERROR_BLOCK 0x800 /* block PERR# to pci bus. def: 0.
+ reserved in Cassini. */
+
+/* DEFAULT: 0x00000000, SIZE: 32 bits */
+#define REG_BIM_DIAG 0x100C /* BIM Diagnostic */
+#define BIM_DIAG_MSTR_SM_MASK 0x3FFFFF00 /* PCI master controller state
+ machine bits [21:0] */
+#define BIM_DIAG_BRST_SM_MASK 0x7F /* PCI burst controller state
+ machine bits [6:0] */
+
+/* writing to SW_RESET_TX and SW_RESET_RX will issue a global
+ * reset. poll until TX and RX read back as 0's for completion.
+ */
+#define REG_SW_RESET 0x1010 /* Software reset */
+#define SW_RESET_TX 0x00000001 /* reset TX DMA engine. poll until
+ cleared to 0. */
+#define SW_RESET_RX 0x00000002 /* reset RX DMA engine. poll until
+ cleared to 0. */
+#define SW_RESET_RSTOUT 0x00000004 /* force RSTOUT# pin active (low).
+ resets PHY and anything else
+ connected to RSTOUT#. RSTOUT#
+ is also activated by local PCI
+ reset when hot-swap is being
+ done. */
+#define SW_RESET_BLOCK_PCS_SLINK 0x00000008 /* if a global reset is done with
+ this bit set, PCS and SLINK
+ modules won't be reset.
+ i.e., link won't drop. */
+#define SW_RESET_BREQ_SM_MASK 0x00007F00 /* breq state machine [6:0] */
+#define SW_RESET_PCIARB_SM_MASK 0x00070000 /* pci arbitration state bits:
+ 0b000: ARB_IDLE1
+ 0b001: ARB_IDLE2
+ 0b010: ARB_WB_ACK
+ 0b011: ARB_WB_WAT
+ 0b100: ARB_RB_ACK
+ 0b101: ARB_RB_WAT
+ 0b110: ARB_RB_END
+ 0b111: ARB_WB_END */
+#define SW_RESET_RDPCI_SM_MASK 0x00300000 /* read pci state bits:
+ 0b00: RD_PCI_WAT
+ 0b01: RD_PCI_RDY
+ 0b11: RD_PCI_ACK */
+#define SW_RESET_RDARB_SM_MASK 0x00C00000 /* read arbitration state bits:
+ 0b00: AD_IDL_RX
+ 0b01: AD_ACK_RX
+ 0b10: AD_ACK_TX
+ 0b11: AD_IDL_TX */
+#define SW_RESET_WRPCI_SM_MASK 0x06000000 /* write pci state bits
+ 0b00: WR_PCI_WAT
+ 0b01: WR_PCI_RDY
+ 0b11: WR_PCI_ACK */
+#define SW_RESET_WRARB_SM_MASK 0x38000000 /* write arbitration state bits:
+ 0b000: ARB_IDLE1
+ 0b001: ARB_IDLE2
+ 0b010: ARB_TX_ACK
+ 0b011: ARB_TX_WAT
+ 0b100: ARB_RX_ACK
+ 0b110: ARB_RX_WAT */
+
+/* Cassini only. 64-bit register used to check PCI datapath. when read,
+ * value written has both lower and upper 32-bit halves rotated to the right
+ * one bit position. e.g., FFFFFFFF FFFFFFFF -> 7FFFFFFF 7FFFFFFF
+ */
+#define REG_MINUS_BIM_DATAPATH_TEST 0x1018 /* Cassini: BIM datapath test
+ Cassini+: reserved */
+
+/* output enables are provided for each device's chip select and for the rest
+ * of the outputs from cassini to its local bus devices. two sw programmable
+ * bits are connected to general purpus control/status bits.
+ * DEFAULT: 0x7
+ */
+#define REG_BIM_LOCAL_DEV_EN 0x1020 /* BIM local device
+ output EN. default: 0x7 */
+#define BIM_LOCAL_DEV_PAD 0x01 /* address bus, RW signal, and
+ OE signal output enable on the
+ local bus interface. these
+ are shared between both local
+ bus devices. tristate when 0. */
+#define BIM_LOCAL_DEV_PROM 0x02 /* PROM chip select */
+#define BIM_LOCAL_DEV_EXT 0x04 /* secondary local bus device chip
+ select output enable */
+#define BIM_LOCAL_DEV_SOFT_0 0x08 /* sw programmable ctrl bit 0 */
+#define BIM_LOCAL_DEV_SOFT_1 0x10 /* sw programmable ctrl bit 1 */
+#define BIM_LOCAL_DEV_HW_RESET 0x20 /* internal hw reset. Cassini+ only. */
+
+/* access 24 entry BIM read and write buffers. put address in REG_BIM_BUFFER_ADDR
+ * and read/write from/to it REG_BIM_BUFFER_DATA_LOW and _DATA_HI.
+ * _DATA_HI should be the last access of the sequence.
+ * DEFAULT: undefined
+ */
+#define REG_BIM_BUFFER_ADDR 0x1024 /* BIM buffer address. for
+ purposes. */
+#define BIM_BUFFER_ADDR_MASK 0x3F /* index (0 - 23) of buffer */
+#define BIM_BUFFER_WR_SELECT 0x40 /* write buffer access = 1
+ read buffer access = 0 */
+/* DEFAULT: undefined */
+#define REG_BIM_BUFFER_DATA_LOW 0x1028 /* BIM buffer data low */
+#define REG_BIM_BUFFER_DATA_HI 0x102C /* BIM buffer data high */
+
+/* set BIM_RAM_BIST_START to start built-in self test for BIM read buffer.
+ * bit auto-clears when done with status read from _SUMMARY and _PASS bits.
+ */
+#define REG_BIM_RAM_BIST 0x102C /* BIM RAM (read buffer) BIST
+ control/status */
+#define BIM_RAM_BIST_RD_START 0x01 /* start BIST for BIM read buffer */
+#define BIM_RAM_BIST_WR_START 0x02 /* start BIST for BIM write buffer.
+ Cassini only. reserved in
+ Cassini+. */
+#define BIM_RAM_BIST_RD_PASS 0x04 /* summary BIST pass status for read
+ buffer. */
+#define BIM_RAM_BIST_WR_PASS 0x08 /* summary BIST pass status for write
+ buffer. Cassini only. reserved
+ in Cassini+. */
+#define BIM_RAM_BIST_RD_LOW_PASS 0x10 /* read low bank passes BIST */
+#define BIM_RAM_BIST_RD_HI_PASS 0x20 /* read high bank passes BIST */
+#define BIM_RAM_BIST_WR_LOW_PASS 0x40 /* write low bank passes BIST.
+ Cassini only. reserved in
+ Cassini+. */
+#define BIM_RAM_BIST_WR_HI_PASS 0x80 /* write high bank passes BIST.
+ Cassini only. reserved in
+ Cassini+. */
+
+/* ASUN: i'm not sure what this does as it's not in the spec.
+ * DEFAULT: 0xFC
+ */
+#define REG_BIM_DIAG_MUX 0x1030 /* BIM diagnostic probe mux
+ select register */
+
+/* enable probe monitoring mode and select data appearing on the P_A* bus. bit
+ * values for _SEL_HI_MASK and _SEL_LOW_MASK:
+ * 0x0: internal probe[7:0] (pci arb state, wtc empty w, wtc full w, wtc empty w,
+ * wtc empty r, post pci)
+ * 0x1: internal probe[15:8] (pci wbuf comp, pci wpkt comp, pci rbuf comp,
+ * pci rpkt comp, txdma wr req, txdma wr ack,
+ * txdma wr rdy, txdma wr xfr done)
+ * 0x2: internal probe[23:16] (txdma rd req, txdma rd ack, txdma rd rdy, rxdma rd,
+ * rd arb state, rd pci state)
+ * 0x3: internal probe[31:24] (rxdma req, rxdma ack, rxdma rdy, wrarb state,
+ * wrpci state)
+ * 0x4: pci io probe[7:0] 0x5: pci io probe[15:8]
+ * 0x6: pci io probe[23:16] 0x7: pci io probe[31:24]
+ * 0x8: pci io probe[39:32] 0x9: pci io probe[47:40]
+ * 0xa: pci io probe[55:48] 0xb: pci io probe[63:56]
+ * the following are not available in Cassini:
+ * 0xc: rx probe[7:0] 0xd: tx probe[7:0]
+ * 0xe: hp probe[7:0] 0xf: mac probe[7:0]
+ */
+#define REG_PLUS_PROBE_MUX_SELECT 0x1034 /* Cassini+: PROBE MUX SELECT */
+#define PROBE_MUX_EN 0x80000000 /* allow probe signals to be
+ driven on local bus P_A[15:0]
+ for debugging */
+#define PROBE_MUX_SUB_MUX_MASK 0x0000FF00 /* select sub module probe signals:
+ 0x03 = mac[1:0]
+ 0x0C = rx[1:0]
+ 0x30 = tx[1:0]
+ 0xC0 = hp[1:0] */
+#define PROBE_MUX_SEL_HI_MASK 0x000000F0 /* select which module to appear
+ on P_A[15:8]. see above for
+ values. */
+#define PROBE_MUX_SEL_LOW_MASK 0x0000000F /* select which module to appear
+ on P_A[7:0]. see above for
+ values. */
+
+/* values mean the same thing as REG_INTR_MASK excep that it's for INTB.
+ DEFAULT: 0x1F */
+#define REG_PLUS_INTR_MASK_1 0x1038 /* Cassini+: interrupt mask
+ register 2 for INTB */
+#define REG_PLUS_INTRN_MASK(x) (REG_PLUS_INTR_MASK_1 + ((x) - 1)*16)
+/* bits correspond to both _MASK and _STATUS registers. _ALT corresponds to
+ * all of the alternate (2-4) INTR registers while _1 corresponds to only
+ * _MASK_1 and _STATUS_1 registers.
+ * DEFAULT: 0x7 for MASK registers, 0x0 for ALIAS_CLEAR registers
+ */
+#define INTR_RX_DONE_ALT 0x01
+#define INTR_RX_COMP_FULL_ALT 0x02
+#define INTR_RX_COMP_AF_ALT 0x04
+#define INTR_RX_BUF_UNAVAIL_1 0x08
+#define INTR_RX_BUF_AE_1 0x10 /* almost empty */
+#define INTRN_MASK_RX_EN 0x80
+#define INTRN_MASK_CLEAR_ALL (INTR_RX_DONE_ALT | \
+ INTR_RX_COMP_FULL_ALT | \
+ INTR_RX_COMP_AF_ALT | \
+ INTR_RX_BUF_UNAVAIL_1 | \
+ INTR_RX_BUF_AE_1)
+#define REG_PLUS_INTR_STATUS_1 0x103C /* Cassini+: interrupt status
+ register 2 for INTB. default: 0x1F */
+#define REG_PLUS_INTRN_STATUS(x) (REG_PLUS_INTR_STATUS_1 + ((x) - 1)*16)
+#define INTR_STATUS_ALT_INTX_EN 0x80 /* generate INTX when one of the
+ flags are set. enables desc ring. */
+
+#define REG_PLUS_ALIAS_CLEAR_1 0x1040 /* Cassini+: alias clear mask
+ register 2 for INTB */
+#define REG_PLUS_ALIASN_CLEAR(x) (REG_PLUS_ALIAS_CLEAR_1 + ((x) - 1)*16)
+
+#define REG_PLUS_INTR_STATUS_ALIAS_1 0x1044 /* Cassini+: interrupt status
+ register alias 2 for INTB */
+#define REG_PLUS_INTRN_STATUS_ALIAS(x) (REG_PLUS_INTR_STATUS_ALIAS_1 + ((x) - 1)*16)
+
+#define REG_SATURN_PCFG 0x106c /* pin configuration register for
+ integrated macphy */
+
+#define SATURN_PCFG_TLA 0x00000001 /* 1 = phy actled */
+#define SATURN_PCFG_FLA 0x00000002 /* 1 = phy link10led */
+#define SATURN_PCFG_CLA 0x00000004 /* 1 = phy link100led */
+#define SATURN_PCFG_LLA 0x00000008 /* 1 = phy link1000led */
+#define SATURN_PCFG_RLA 0x00000010 /* 1 = phy duplexled */
+#define SATURN_PCFG_PDS 0x00000020 /* phy debug mode.
+ 0 = normal */
+#define SATURN_PCFG_MTP 0x00000080 /* test point select */
+#define SATURN_PCFG_GMO 0x00000100 /* GMII observe. 1 =
+ GMII on SERDES pins for
+ monitoring. */
+#define SATURN_PCFG_FSI 0x00000200 /* 1 = freeze serdes/gmii. all
+ pins configed as outputs.
+ for power saving when using
+ internal phy. */
+#define SATURN_PCFG_LAD 0x00000800 /* 0 = mac core led ctrl
+ polarity from strapping
+ value.
+ 1 = mac core led ctrl
+ polarity active low. */
+
+
+/** transmit dma registers **/
+#define MAX_TX_RINGS_SHIFT 2
+#define MAX_TX_RINGS (1 << MAX_TX_RINGS_SHIFT)
+#define MAX_TX_RINGS_MASK (MAX_TX_RINGS - 1)
+
+/* TX configuration.
+ * descr ring sizes size = 32 * (1 << n), n < 9. e.g., 0x8 = 8k. default: 0x8
+ * DEFAULT: 0x3F000001
+ */
+#define REG_TX_CFG 0x2004 /* TX config */
+#define TX_CFG_DMA_EN 0x00000001 /* enable TX DMA. if cleared, DMA
+ will stop after xfer of current
+ buffer has been completed. */
+#define TX_CFG_FIFO_PIO_SEL 0x00000002 /* TX DMA FIFO can be
+ accessed w/ FIFO addr
+ and data registers.
+ TX DMA should be
+ disabled. */
+#define TX_CFG_DESC_RING0_MASK 0x0000003C /* # desc entries in
+ ring 1. */
+#define TX_CFG_DESC_RING0_SHIFT 2
+#define TX_CFG_DESC_RINGN_MASK(a) (TX_CFG_DESC_RING0_MASK << (a)*4)
+#define TX_CFG_DESC_RINGN_SHIFT(a) (TX_CFG_DESC_RING0_SHIFT + (a)*4)
+#define TX_CFG_PACED_MODE 0x00100000 /* TX_ALL only set after
+ TX FIFO becomes empty.
+ if 0, TX_ALL set
+ if descr queue empty. */
+#define TX_CFG_DMA_RDPIPE_DIS 0x01000000 /* always set to 1 */
+#define TX_CFG_COMPWB_Q1 0x02000000 /* completion writeback happens at
+ the end of every packet kicked
+ through Q1. */
+#define TX_CFG_COMPWB_Q2 0x04000000 /* completion writeback happens at
+ the end of every packet kicked
+ through Q2. */
+#define TX_CFG_COMPWB_Q3 0x08000000 /* completion writeback happens at
+ the end of every packet kicked
+ through Q3 */
+#define TX_CFG_COMPWB_Q4 0x10000000 /* completion writeback happens at
+ the end of every packet kicked
+ through Q4 */
+#define TX_CFG_INTR_COMPWB_DIS 0x20000000 /* disable pre-interrupt completion
+ writeback */
+#define TX_CFG_CTX_SEL_MASK 0xC0000000 /* selects tx test port
+ connection
+ 0b00: tx mac req,
+ tx mac retry req,
+ tx ack and tx tag.
+ 0b01: txdma rd req,
+ txdma rd ack,
+ txdma rd rdy,
+ txdma rd type0
+ 0b11: txdma wr req,
+ txdma wr ack,
+ txdma wr rdy,
+ txdma wr xfr done. */
+#define TX_CFG_CTX_SEL_SHIFT 30
+
+/* 11-bit counters that point to next location in FIFO to be loaded/retrieved.
+ * used for diagnostics only.
+ */
+#define REG_TX_FIFO_WRITE_PTR 0x2014 /* TX FIFO write pointer */
+#define REG_TX_FIFO_SHADOW_WRITE_PTR 0x2018 /* TX FIFO shadow write
+ pointer. temp hold reg.
+ diagnostics only. */
+#define REG_TX_FIFO_READ_PTR 0x201C /* TX FIFO read pointer */
+#define REG_TX_FIFO_SHADOW_READ_PTR 0x2020 /* TX FIFO shadow read
+ pointer */
+
+/* (ro) 11-bit up/down counter w/ # of frames currently in TX FIFO */
+#define REG_TX_FIFO_PKT_CNT 0x2024 /* TX FIFO packet counter */
+
+/* current state of all state machines in TX */
+#define REG_TX_SM_1 0x2028 /* TX state machine reg #1 */
+#define TX_SM_1_CHAIN_MASK 0x000003FF /* chaining state machine */
+#define TX_SM_1_CSUM_MASK 0x00000C00 /* checksum state machine */
+#define TX_SM_1_FIFO_LOAD_MASK 0x0003F000 /* FIFO load state machine.
+ = 0x01 when TX disabled. */
+#define TX_SM_1_FIFO_UNLOAD_MASK 0x003C0000 /* FIFO unload state machine */
+#define TX_SM_1_CACHE_MASK 0x03C00000 /* desc. prefetch cache controller
+ state machine */
+#define TX_SM_1_CBQ_ARB_MASK 0xF8000000 /* CBQ arbiter state machine */
+
+#define REG_TX_SM_2 0x202C /* TX state machine reg #2 */
+#define TX_SM_2_COMP_WB_MASK 0x07 /* completion writeback sm */
+#define TX_SM_2_SUB_LOAD_MASK 0x38 /* sub load state machine */
+#define TX_SM_2_KICK_MASK 0xC0 /* kick state machine */
+
+/* 64-bit pointer to the transmit data buffer. only the 50 LSB are incremented
+ * while the upper 23 bits are taken from the TX descriptor
+ */
+#define REG_TX_DATA_PTR_LOW 0x2030 /* TX data pointer low */
+#define REG_TX_DATA_PTR_HI 0x2034 /* TX data pointer high */
+
+/* 13 bit registers written by driver w/ descriptor value that follows
+ * last valid xmit descriptor. kick # and complete # values are used by
+ * the xmit dma engine to control tx descr fetching. if > 1 valid
+ * tx descr is available within the cache line being read, cassini will
+ * internally cache up to 4 of them. 0 on reset. _KICK = rw, _COMP = ro.
+ */
+#define REG_TX_KICK0 0x2038 /* TX kick reg #1 */
+#define REG_TX_KICKN(x) (REG_TX_KICK0 + (x)*4)
+#define REG_TX_COMP0 0x2048 /* TX completion reg #1 */
+#define REG_TX_COMPN(x) (REG_TX_COMP0 + (x)*4)
+
+/* values of TX_COMPLETE_1-4 are written. each completion register
+ * is 2bytes in size and contiguous. 8B allocation w/ 8B alignment.
+ * NOTE: completion reg values are only written back prior to TX_INTME and
+ * TX_ALL interrupts. at all other times, the most up-to-date index values
+ * should be obtained from the REG_TX_COMPLETE_# registers.
+ * here's the layout:
+ * offset from base addr completion # byte
+ * 0 TX_COMPLETE_1_MSB
+ * 1 TX_COMPLETE_1_LSB
+ * 2 TX_COMPLETE_2_MSB
+ * 3 TX_COMPLETE_2_LSB
+ * 4 TX_COMPLETE_3_MSB
+ * 5 TX_COMPLETE_3_LSB
+ * 6 TX_COMPLETE_4_MSB
+ * 7 TX_COMPLETE_4_LSB
+ */
+#define TX_COMPWB_SIZE 8
+#define REG_TX_COMPWB_DB_LOW 0x2058 /* TX completion write back
+ base low */
+#define REG_TX_COMPWB_DB_HI 0x205C /* TX completion write back
+ base high */
+#define TX_COMPWB_MSB_MASK 0x00000000000000FFULL
+#define TX_COMPWB_MSB_SHIFT 0
+#define TX_COMPWB_LSB_MASK 0x000000000000FF00ULL
+#define TX_COMPWB_LSB_SHIFT 8
+#define TX_COMPWB_NEXT(x) ((x) >> 16)
+
+/* 53 MSB used as base address. 11 LSB assumed to be 0. TX desc pointer must
+ * be 2KB-aligned. */
+#define REG_TX_DB0_LOW 0x2060 /* TX descriptor base low #1 */
+#define REG_TX_DB0_HI 0x2064 /* TX descriptor base hi #1 */
+#define REG_TX_DBN_LOW(x) (REG_TX_DB0_LOW + (x)*8)
+#define REG_TX_DBN_HI(x) (REG_TX_DB0_HI + (x)*8)
+
+/* 16-bit registers hold weights for the weighted round-robin of the
+ * four CBQ TX descr rings. weights correspond to # bytes xferred from
+ * host to TXFIFO in a round of WRR arbitration. can be set
+ * dynamically with new weights set upon completion of the current
+ * packet transfer from host memory to TXFIFO. a dummy write to any of
+ * these registers causes a queue1 pre-emption with all historical bw
+ * deficit data reset to 0 (useful when congestion requires a
+ * pre-emption/re-allocation of network bandwidth
+ */
+#define REG_TX_MAXBURST_0 0x2080 /* TX MaxBurst #1 */
+#define REG_TX_MAXBURST_1 0x2084 /* TX MaxBurst #2 */
+#define REG_TX_MAXBURST_2 0x2088 /* TX MaxBurst #3 */
+#define REG_TX_MAXBURST_3 0x208C /* TX MaxBurst #4 */
+
+/* diagnostics access to any TX FIFO location. every access is 65
+ * bits. _DATA_LOW = 32 LSB, _DATA_HI_T1/T0 = 32 MSB. _TAG = tag bit.
+ * writing _DATA_HI_T0 sets tag bit low, writing _DATA_HI_T1 sets tag
+ * bit high. TX_FIFO_PIO_SEL must be set for TX FIFO PIO access. if
+ * TX FIFO data integrity is desired, TX DMA should be
+ * disabled. _DATA_HI_Tx should be the last access of the sequence.
+ */
+#define REG_TX_FIFO_ADDR 0x2104 /* TX FIFO address */
+#define REG_TX_FIFO_TAG 0x2108 /* TX FIFO tag */
+#define REG_TX_FIFO_DATA_LOW 0x210C /* TX FIFO data low */
+#define REG_TX_FIFO_DATA_HI_T1 0x2110 /* TX FIFO data high t1 */
+#define REG_TX_FIFO_DATA_HI_T0 0x2114 /* TX FIFO data high t0 */
+#define REG_TX_FIFO_SIZE 0x2118 /* (ro) TX FIFO size = 0x090 = 9KB */
+
+/* 9-bit register controls BIST of TX FIFO. bit set indicates that the BIST
+ * passed for the specified memory
+ */
+#define REG_TX_RAMBIST 0x211C /* TX RAMBIST control/status */
+#define TX_RAMBIST_STATE 0x01C0 /* progress state of RAMBIST
+ controller state machine */
+#define TX_RAMBIST_RAM33A_PASS 0x0020 /* RAM33A passed */
+#define TX_RAMBIST_RAM32A_PASS 0x0010 /* RAM32A passed */
+#define TX_RAMBIST_RAM33B_PASS 0x0008 /* RAM33B passed */
+#define TX_RAMBIST_RAM32B_PASS 0x0004 /* RAM32B passed */
+#define TX_RAMBIST_SUMMARY 0x0002 /* all RAM passed */
+#define TX_RAMBIST_START 0x0001 /* write 1 to start BIST. self
+ clears on completion. */
+
+/** receive dma registers **/
+#define MAX_RX_DESC_RINGS 2
+#define MAX_RX_COMP_RINGS 4
+
+/* receive DMA channel configuration. default: 0x80910
+ * free ring size = (1 << n)*32 -> [32 - 8k]
+ * completion ring size = (1 << n)*128 -> [128 - 32k], n < 9
+ * DEFAULT: 0x80910
+ */
+#define REG_RX_CFG 0x4000 /* RX config */
+#define RX_CFG_DMA_EN 0x00000001 /* enable RX DMA. 0 stops
+ channel as soon as current
+ frame xfer has completed.
+ driver should disable MAC
+ for 200ms before disabling
+ RX */
+#define RX_CFG_DESC_RING_MASK 0x0000001E /* # desc entries in RX
+ free desc ring.
+ def: 0x8 = 8k */
+#define RX_CFG_DESC_RING_SHIFT 1
+#define RX_CFG_COMP_RING_MASK 0x000001E0 /* # desc entries in RX complete
+ ring. def: 0x8 = 32k */
+#define RX_CFG_COMP_RING_SHIFT 5
+#define RX_CFG_BATCH_DIS 0x00000200 /* disable receive desc
+ batching. def: 0x0 =
+ enabled */
+#define RX_CFG_SWIVEL_MASK 0x00001C00 /* byte offset of the 1st
+ data byte of the packet
+ w/in 8 byte boundares.
+ this swivels the data
+ DMA'ed to header
+ buffers, jumbo buffers
+ when header split is not
+ requested and MTU sized
+ buffers. def: 0x2 */
+#define RX_CFG_SWIVEL_SHIFT 10
+
+/* cassini+ only */
+#define RX_CFG_DESC_RING1_MASK 0x000F0000 /* # of desc entries in
+ RX free desc ring 2.
+ def: 0x8 = 8k */
+#define RX_CFG_DESC_RING1_SHIFT 16
+
+
+/* the page size register allows cassini chips to do the following with
+ * received data:
+ * [--------------------------------------------------------------] page
+ * [off][buf1][pad][off][buf2][pad][off][buf3][pad][off][buf4][pad]
+ * |--------------| = PAGE_SIZE_BUFFER_STRIDE
+ * page = PAGE_SIZE
+ * offset = PAGE_SIZE_MTU_OFF
+ * for the above example, MTU_BUFFER_COUNT = 4.
+ * NOTE: as is apparent, you need to ensure that the following holds:
+ * MTU_BUFFER_COUNT <= PAGE_SIZE/PAGE_SIZE_BUFFER_STRIDE
+ * DEFAULT: 0x48002002 (8k pages)
+ */
+#define REG_RX_PAGE_SIZE 0x4004 /* RX page size */
+#define RX_PAGE_SIZE_MASK 0x00000003 /* size of pages pointed to
+ by receive descriptors.
+ if jumbo buffers are
+ supported the page size
+ should not be < 8k.
+ 0b00 = 2k, 0b01 = 4k
+ 0b10 = 8k, 0b11 = 16k
+ DEFAULT: 8k */
+#define RX_PAGE_SIZE_SHIFT 0
+#define RX_PAGE_SIZE_MTU_COUNT_MASK 0x00007800 /* # of MTU buffers the hw
+ packs into a page.
+ DEFAULT: 4 */
+#define RX_PAGE_SIZE_MTU_COUNT_SHIFT 11
+#define RX_PAGE_SIZE_MTU_STRIDE_MASK 0x18000000 /* # of bytes that separate
+ each MTU buffer +
+ offset from each
+ other.
+ 0b00 = 1k, 0b01 = 2k
+ 0b10 = 4k, 0b11 = 8k
+ DEFAULT: 0x1 */
+#define RX_PAGE_SIZE_MTU_STRIDE_SHIFT 27
+#define RX_PAGE_SIZE_MTU_OFF_MASK 0xC0000000 /* offset in each page that
+ hw writes the MTU buffer
+ into.
+ 0b00 = 0,
+ 0b01 = 64 bytes
+ 0b10 = 96, 0b11 = 128
+ DEFAULT: 0x1 */
+#define RX_PAGE_SIZE_MTU_OFF_SHIFT 30
+
+/* 11-bit counter points to next location in RX FIFO to be loaded/read.
+ * shadow write pointers enable retries in case of early receive aborts.
+ * DEFAULT: 0x0. generated on 64-bit boundaries.
+ */
+#define REG_RX_FIFO_WRITE_PTR 0x4008 /* RX FIFO write pointer */
+#define REG_RX_FIFO_READ_PTR 0x400C /* RX FIFO read pointer */
+#define REG_RX_IPP_FIFO_SHADOW_WRITE_PTR 0x4010 /* RX IPP FIFO shadow write
+ pointer */
+#define REG_RX_IPP_FIFO_SHADOW_READ_PTR 0x4014 /* RX IPP FIFO shadow read
+ pointer */
+#define REG_RX_IPP_FIFO_READ_PTR 0x400C /* RX IPP FIFO read
+ pointer. (8-bit counter) */
+
+/* current state of RX DMA state engines + other info
+ * DEFAULT: 0x0
+ */
+#define REG_RX_DEBUG 0x401C /* RX debug */
+#define RX_DEBUG_LOAD_STATE_MASK 0x0000000F /* load state machine w/ MAC:
+ 0x0 = idle, 0x1 = load_bop
+ 0x2 = load 1, 0x3 = load 2
+ 0x4 = load 3, 0x5 = load 4
+ 0x6 = last detect
+ 0x7 = wait req
+ 0x8 = wait req statuss 1st
+ 0x9 = load st
+ 0xa = bubble mac
+ 0xb = error */
+#define RX_DEBUG_LM_STATE_MASK 0x00000070 /* load state machine w/ HP and
+ RX FIFO:
+ 0x0 = idle, 0x1 = hp xfr
+ 0x2 = wait hp ready
+ 0x3 = wait flow code
+ 0x4 = fifo xfer
+ 0x5 = make status
+ 0x6 = csum ready
+ 0x7 = error */
+#define RX_DEBUG_FC_STATE_MASK 0x000000180 /* flow control state machine
+ w/ MAC:
+ 0x0 = idle
+ 0x1 = wait xoff ack
+ 0x2 = wait xon
+ 0x3 = wait xon ack */
+#define RX_DEBUG_DATA_STATE_MASK 0x000001E00 /* unload data state machine
+ states:
+ 0x0 = idle data
+ 0x1 = header begin
+ 0x2 = xfer header
+ 0x3 = xfer header ld
+ 0x4 = mtu begin
+ 0x5 = xfer mtu
+ 0x6 = xfer mtu ld
+ 0x7 = jumbo begin
+ 0x8 = xfer jumbo
+ 0x9 = xfer jumbo ld
+ 0xa = reas begin
+ 0xb = xfer reas
+ 0xc = flush tag
+ 0xd = xfer reas ld
+ 0xe = error
+ 0xf = bubble idle */
+#define RX_DEBUG_DESC_STATE_MASK 0x0001E000 /* unload desc state machine
+ states:
+ 0x0 = idle desc
+ 0x1 = wait ack
+ 0x9 = wait ack 2
+ 0x2 = fetch desc 1
+ 0xa = fetch desc 2
+ 0x3 = load ptrs
+ 0x4 = wait dma
+ 0x5 = wait ack batch
+ 0x6 = post batch
+ 0x7 = xfr done */
+#define RX_DEBUG_INTR_READ_PTR_MASK 0x30000000 /* interrupt read ptr of the
+ interrupt queue */
+#define RX_DEBUG_INTR_WRITE_PTR_MASK 0xC0000000 /* interrupt write pointer
+ of the interrupt queue */
+
+/* flow control frames are emmitted using two PAUSE thresholds:
+ * XOFF PAUSE uses pause time value pre-programmed in the Send PAUSE MAC reg
+ * XON PAUSE uses a pause time of 0. granularity of threshold is 64bytes.
+ * PAUSE thresholds defined in terms of FIFO occupancy and may be translated
+ * into FIFO vacancy using RX_FIFO_SIZE. setting ON will trigger XON frames
+ * when FIFO reaches 0. OFF threshold should not be > size of RX FIFO. max
+ * value is is 0x6F.
+ * DEFAULT: 0x00078
+ */
+#define REG_RX_PAUSE_THRESH 0x4020 /* RX pause thresholds */
+#define RX_PAUSE_THRESH_QUANTUM 64
+#define RX_PAUSE_THRESH_OFF_MASK 0x000001FF /* XOFF PAUSE emitted when
+ RX FIFO occupancy >
+ value*64B */
+#define RX_PAUSE_THRESH_OFF_SHIFT 0
+#define RX_PAUSE_THRESH_ON_MASK 0x001FF000 /* XON PAUSE emitted after
+ emitting XOFF PAUSE when RX
+ FIFO occupancy falls below
+ this value*64B. must be
+ < XOFF threshold. if =
+ RX_FIFO_SIZE< XON frames are
+ never emitted. */
+#define RX_PAUSE_THRESH_ON_SHIFT 12
+
+/* 13-bit register used to control RX desc fetching and intr generation. if 4+
+ * valid RX descriptors are available, Cassini will read 4 at a time.
+ * writing N means that all desc up to *but* excluding N are available. N must
+ * be a multiple of 4 (N % 4 = 0). first desc should be cache-line aligned.
+ * DEFAULT: 0 on reset
+ */
+#define REG_RX_KICK 0x4024 /* RX kick reg */
+
+/* 8KB aligned 64-bit pointer to the base of the RX free/completion rings.
+ * lower 13 bits of the low register are hard-wired to 0.
+ */
+#define REG_RX_DB_LOW 0x4028 /* RX descriptor ring
+ base low */
+#define REG_RX_DB_HI 0x402C /* RX descriptor ring
+ base hi */
+#define REG_RX_CB_LOW 0x4030 /* RX completion ring
+ base low */
+#define REG_RX_CB_HI 0x4034 /* RX completion ring
+ base hi */
+/* 13-bit register indicate desc used by cassini for receive frames. used
+ * for diagnostic purposes.
+ * DEFAULT: 0 on reset
+ */
+#define REG_RX_COMP 0x4038 /* (ro) RX completion */
+
+/* HEAD and TAIL are used to control RX desc posting and interrupt
+ * generation. hw moves the head register to pass ownership to sw. sw
+ * moves the tail register to pass ownership back to hw. to give all
+ * entries to hw, set TAIL = HEAD. if HEAD and TAIL indicate that no
+ * more entries are available, DMA will pause and an interrupt will be
+ * generated to indicate no more entries are available. sw can use
+ * this interrupt to reduce the # of times it must update the
+ * completion tail register.
+ * DEFAULT: 0 on reset
+ */
+#define REG_RX_COMP_HEAD 0x403C /* RX completion head */
+#define REG_RX_COMP_TAIL 0x4040 /* RX completion tail */
+
+/* values used for receive interrupt blanking. loaded each time the ISR is read
+ * DEFAULT: 0x00000000
+ */
+#define REG_RX_BLANK 0x4044 /* RX blanking register
+ for ISR read */
+#define RX_BLANK_INTR_PKT_MASK 0x000001FF /* RX_DONE intr asserted if
+ this many sets of completion
+ writebacks (up to 2 packets)
+ occur since the last time
+ the ISR was read. 0 = no
+ packet blanking */
+#define RX_BLANK_INTR_PKT_SHIFT 0
+#define RX_BLANK_INTR_TIME_MASK 0x3FFFF000 /* RX_DONE interrupt asserted
+ if that many clocks were
+ counted since last time the
+ ISR was read.
+ each count is 512 core
+ clocks (125MHz). 0 = no
+ time blanking */
+#define RX_BLANK_INTR_TIME_SHIFT 12
+
+/* values used for interrupt generation based on threshold values of how
+ * many free desc and completion entries are available for hw use.
+ * DEFAULT: 0x00000000
+ */
+#define REG_RX_AE_THRESH 0x4048 /* RX almost empty
+ thresholds */
+#define RX_AE_THRESH_FREE_MASK 0x00001FFF /* RX_BUF_AE will be
+ generated if # desc
+ avail for hw use <=
+ # */
+#define RX_AE_THRESH_FREE_SHIFT 0
+#define RX_AE_THRESH_COMP_MASK 0x0FFFE000 /* RX_COMP_AE will be
+ generated if # of
+ completion entries
+ avail for hw use <=
+ # */
+#define RX_AE_THRESH_COMP_SHIFT 13
+
+/* probabilities for random early drop (RED) thresholds on a FIFO threshold
+ * basis. probability should increase when the FIFO level increases. control
+ * packets are never dropped and not counted in stats. probability programmed
+ * on a 12.5% granularity. e.g., 0x1 = 1/8 packets dropped.
+ * DEFAULT: 0x00000000
+ */
+#define REG_RX_RED 0x404C /* RX random early detect enable */
+#define RX_RED_4K_6K_FIFO_MASK 0x000000FF /* 4KB < FIFO thresh < 6KB */
+#define RX_RED_6K_8K_FIFO_MASK 0x0000FF00 /* 6KB < FIFO thresh < 8KB */
+#define RX_RED_8K_10K_FIFO_MASK 0x00FF0000 /* 8KB < FIFO thresh < 10KB */
+#define RX_RED_10K_12K_FIFO_MASK 0xFF000000 /* 10KB < FIFO thresh < 12KB */
+
+/* FIFO fullness levels for RX FIFO, RX control FIFO, and RX IPP FIFO.
+ * RX control FIFO = # of packets in RX FIFO.
+ * DEFAULT: 0x0
+ */
+#define REG_RX_FIFO_FULLNESS 0x4050 /* (ro) RX FIFO fullness */
+#define RX_FIFO_FULLNESS_RX_FIFO_MASK 0x3FF80000 /* level w/ 8B granularity */
+#define RX_FIFO_FULLNESS_IPP_FIFO_MASK 0x0007FF00 /* level w/ 8B granularity */
+#define RX_FIFO_FULLNESS_RX_PKT_MASK 0x000000FF /* # packets in RX FIFO */
+#define REG_RX_IPP_PACKET_COUNT 0x4054 /* RX IPP packet counter */
+#define REG_RX_WORK_DMA_PTR_LOW 0x4058 /* RX working DMA ptr low */
+#define REG_RX_WORK_DMA_PTR_HI 0x405C /* RX working DMA ptr
+ high */
+
+/* BIST testing ro RX FIFO, RX control FIFO, and RX IPP FIFO. only RX BIST
+ * START/COMPLETE is writeable. START will clear when the BIST has completed
+ * checking all 17 RAMS.
+ * DEFAULT: 0bxxxx xxxxx xxxx xxxx xxxx x000 0000 0000 00x0
+ */
+#define REG_RX_BIST 0x4060 /* (ro) RX BIST */
+#define RX_BIST_32A_PASS 0x80000000 /* RX FIFO 32A passed */
+#define RX_BIST_33A_PASS 0x40000000 /* RX FIFO 33A passed */
+#define RX_BIST_32B_PASS 0x20000000 /* RX FIFO 32B passed */
+#define RX_BIST_33B_PASS 0x10000000 /* RX FIFO 33B passed */
+#define RX_BIST_32C_PASS 0x08000000 /* RX FIFO 32C passed */
+#define RX_BIST_33C_PASS 0x04000000 /* RX FIFO 33C passed */
+#define RX_BIST_IPP_32A_PASS 0x02000000 /* RX IPP FIFO 33B passed */
+#define RX_BIST_IPP_33A_PASS 0x01000000 /* RX IPP FIFO 33A passed */
+#define RX_BIST_IPP_32B_PASS 0x00800000 /* RX IPP FIFO 32B passed */
+#define RX_BIST_IPP_33B_PASS 0x00400000 /* RX IPP FIFO 33B passed */
+#define RX_BIST_IPP_32C_PASS 0x00200000 /* RX IPP FIFO 32C passed */
+#define RX_BIST_IPP_33C_PASS 0x00100000 /* RX IPP FIFO 33C passed */
+#define RX_BIST_CTRL_32_PASS 0x00800000 /* RX CTRL FIFO 32 passed */
+#define RX_BIST_CTRL_33_PASS 0x00400000 /* RX CTRL FIFO 33 passed */
+#define RX_BIST_REAS_26A_PASS 0x00200000 /* RX Reas 26A passed */
+#define RX_BIST_REAS_26B_PASS 0x00100000 /* RX Reas 26B passed */
+#define RX_BIST_REAS_27_PASS 0x00080000 /* RX Reas 27 passed */
+#define RX_BIST_STATE_MASK 0x00078000 /* BIST state machine */
+#define RX_BIST_SUMMARY 0x00000002 /* when BIST complete,
+ summary pass bit
+ contains AND of BIST
+ results of all 16
+ RAMS */
+#define RX_BIST_START 0x00000001 /* write 1 to start
+ BIST. self clears
+ on completion. */
+
+/* next location in RX CTRL FIFO that will be loaded w/ data from RX IPP/read
+ * from to retrieve packet control info.
+ * DEFAULT: 0
+ */
+#define REG_RX_CTRL_FIFO_WRITE_PTR 0x4064 /* (ro) RX control FIFO
+ write ptr */
+#define REG_RX_CTRL_FIFO_READ_PTR 0x4068 /* (ro) RX control FIFO read
+ ptr */
+
+/* receive interrupt blanking. loaded each time interrupt alias register is
+ * read.
+ * DEFAULT: 0x0
+ */
+#define REG_RX_BLANK_ALIAS_READ 0x406C /* RX blanking register for
+ alias read */
+#define RX_BAR_INTR_PACKET_MASK 0x000001FF /* assert RX_DONE if #
+ completion writebacks
+ > # since last ISR
+ read. 0 = no
+ blanking. up to 2
+ packets per
+ completion wb. */
+#define RX_BAR_INTR_TIME_MASK 0x3FFFF000 /* assert RX_DONE if #
+ clocks > # since last
+ ISR read. each count
+ is 512 core clocks
+ (125MHz). 0 = no
+ blanking. */
+
+/* diagnostic access to RX FIFO. 32 LSB accessed via DATA_LOW. 32 MSB accessed
+ * via DATA_HI_T0 or DATA_HI_T1. TAG reads the tag bit. writing HI_T0
+ * will unset the tag bit while writing HI_T1 will set the tag bit. to reset
+ * to normal operation after diagnostics, write to address location 0x0.
+ * RX_DMA_EN bit must be set to 0x0 for RX FIFO PIO access. DATA_HI should
+ * be the last write access of a write sequence.
+ * DEFAULT: undefined
+ */
+#define REG_RX_FIFO_ADDR 0x4080 /* RX FIFO address */
+#define REG_RX_FIFO_TAG 0x4084 /* RX FIFO tag */
+#define REG_RX_FIFO_DATA_LOW 0x4088 /* RX FIFO data low */
+#define REG_RX_FIFO_DATA_HI_T0 0x408C /* RX FIFO data high T0 */
+#define REG_RX_FIFO_DATA_HI_T1 0x4090 /* RX FIFO data high T1 */
+
+/* diagnostic assess to RX CTRL FIFO. 8-bit FIFO_ADDR holds address of
+ * 81 bit control entry and 6 bit flow id. LOW and MID are both 32-bit
+ * accesses. HI is 7-bits with 6-bit flow id and 1 bit control
+ * word. RX_DMA_EN must be 0 for RX CTRL FIFO PIO access. DATA_HI
+ * should be last write access of the write sequence.
+ * DEFAULT: undefined
+ */
+#define REG_RX_CTRL_FIFO_ADDR 0x4094 /* RX Control FIFO and
+ Batching FIFO addr */
+#define REG_RX_CTRL_FIFO_DATA_LOW 0x4098 /* RX Control FIFO data
+ low */
+#define REG_RX_CTRL_FIFO_DATA_MID 0x409C /* RX Control FIFO data
+ mid */
+#define REG_RX_CTRL_FIFO_DATA_HI 0x4100 /* RX Control FIFO data
+ hi and flow id */
+#define RX_CTRL_FIFO_DATA_HI_CTRL 0x0001 /* upper bit of ctrl word */
+#define RX_CTRL_FIFO_DATA_HI_FLOW_MASK 0x007E /* flow id */
+
+/* diagnostic access to RX IPP FIFO. same semantics as RX_FIFO.
+ * DEFAULT: undefined
+ */
+#define REG_RX_IPP_FIFO_ADDR 0x4104 /* RX IPP FIFO address */
+#define REG_RX_IPP_FIFO_TAG 0x4108 /* RX IPP FIFO tag */
+#define REG_RX_IPP_FIFO_DATA_LOW 0x410C /* RX IPP FIFO data low */
+#define REG_RX_IPP_FIFO_DATA_HI_T0 0x4110 /* RX IPP FIFO data high
+ T0 */
+#define REG_RX_IPP_FIFO_DATA_HI_T1 0x4114 /* RX IPP FIFO data high
+ T1 */
+
+/* 64-bit pointer to receive data buffer in host memory used for headers and
+ * small packets. MSB in high register. loaded by DMA state machine and
+ * increments as DMA writes receive data. only 50 LSB are incremented. top
+ * 13 bits taken from RX descriptor.
+ * DEFAULT: undefined
+ */
+#define REG_RX_HEADER_PAGE_PTR_LOW 0x4118 /* (ro) RX header page ptr
+ low */
+#define REG_RX_HEADER_PAGE_PTR_HI 0x411C /* (ro) RX header page ptr
+ high */
+#define REG_RX_MTU_PAGE_PTR_LOW 0x4120 /* (ro) RX MTU page pointer
+ low */
+#define REG_RX_MTU_PAGE_PTR_HI 0x4124 /* (ro) RX MTU page pointer
+ high */
+
+/* PIO diagnostic access to RX reassembly DMA Table RAM. 6-bit register holds
+ * one of 64 79-bit locations in the RX Reassembly DMA table and the addr of
+ * one of the 64 byte locations in the Batching table. LOW holds 32 LSB.
+ * MID holds the next 32 LSB. HIGH holds the 15 MSB. RX_DMA_EN must be set
+ * to 0 for PIO access. DATA_HIGH should be last write of write sequence.
+ * layout:
+ * reassmbl ptr [78:15] | reassmbl index [14:1] | reassmbl entry valid [0]
+ * DEFAULT: undefined
+ */
+#define REG_RX_TABLE_ADDR 0x4128 /* RX reassembly DMA table
+ address */
+#define RX_TABLE_ADDR_MASK 0x0000003F /* address mask */
+
+#define REG_RX_TABLE_DATA_LOW 0x412C /* RX reassembly DMA table
+ data low */
+#define REG_RX_TABLE_DATA_MID 0x4130 /* RX reassembly DMA table
+ data mid */
+#define REG_RX_TABLE_DATA_HI 0x4134 /* RX reassembly DMA table
+ data high */
+
+/* cassini+ only */
+/* 8KB aligned 64-bit pointer to base of RX rings. lower 13 bits hardwired to
+ * 0. same semantics as primary desc/complete rings.
+ */
+#define REG_PLUS_RX_DB1_LOW 0x4200 /* RX descriptor ring
+ 2 base low */
+#define REG_PLUS_RX_DB1_HI 0x4204 /* RX descriptor ring
+ 2 base high */
+#define REG_PLUS_RX_CB1_LOW 0x4208 /* RX completion ring
+ 2 base low. 4 total */
+#define REG_PLUS_RX_CB1_HI 0x420C /* RX completion ring
+ 2 base high. 4 total */
+#define REG_PLUS_RX_CBN_LOW(x) (REG_PLUS_RX_CB1_LOW + 8*((x) - 1))
+#define REG_PLUS_RX_CBN_HI(x) (REG_PLUS_RX_CB1_HI + 8*((x) - 1))
+#define REG_PLUS_RX_KICK1 0x4220 /* RX Kick 2 register */
+#define REG_PLUS_RX_COMP1 0x4224 /* (ro) RX completion 2
+ reg */
+#define REG_PLUS_RX_COMP1_HEAD 0x4228 /* (ro) RX completion 2
+ head reg. 4 total. */
+#define REG_PLUS_RX_COMP1_TAIL 0x422C /* RX completion 2
+ tail reg. 4 total. */
+#define REG_PLUS_RX_COMPN_HEAD(x) (REG_PLUS_RX_COMP1_HEAD + 8*((x) - 1))
+#define REG_PLUS_RX_COMPN_TAIL(x) (REG_PLUS_RX_COMP1_TAIL + 8*((x) - 1))
+#define REG_PLUS_RX_AE1_THRESH 0x4240 /* RX almost empty 2
+ thresholds */
+#define RX_AE1_THRESH_FREE_MASK RX_AE_THRESH_FREE_MASK
+#define RX_AE1_THRESH_FREE_SHIFT RX_AE_THRESH_FREE_SHIFT
+
+/** header parser registers **/
+
+/* RX parser configuration register.
+ * DEFAULT: 0x1651004
+ */
+#define REG_HP_CFG 0x4140 /* header parser
+ configuration reg */
+#define HP_CFG_PARSE_EN 0x00000001 /* enab header parsing */
+#define HP_CFG_NUM_CPU_MASK 0x000000FC /* # processors
+ 0 = 64. 0x3f = 63 */
+#define HP_CFG_NUM_CPU_SHIFT 2
+#define HP_CFG_SYN_INC_MASK 0x00000100 /* SYN bit won't increment
+ TCP seq # by one when
+ stored in FDBM */
+#define HP_CFG_TCP_THRESH_MASK 0x000FFE00 /* # bytes of TCP data
+ needed to be considered
+ for reassembly */
+#define HP_CFG_TCP_THRESH_SHIFT 9
+
+/* access to RX Instruction RAM. 5-bit register/counter holds addr
+ * of 39 bit entry to be read/written. 32 LSB in _DATA_LOW. 7 MSB in _DATA_HI.
+ * RX_DMA_EN must be 0 for RX instr PIO access. DATA_HI should be last access
+ * of sequence.
+ * DEFAULT: undefined
+ */
+#define REG_HP_INSTR_RAM_ADDR 0x4144 /* HP instruction RAM
+ address */
+#define HP_INSTR_RAM_ADDR_MASK 0x01F /* 5-bit mask */
+#define REG_HP_INSTR_RAM_DATA_LOW 0x4148 /* HP instruction RAM
+ data low */
+#define HP_INSTR_RAM_LOW_OUTMASK_MASK 0x0000FFFF
+#define HP_INSTR_RAM_LOW_OUTMASK_SHIFT 0
+#define HP_INSTR_RAM_LOW_OUTSHIFT_MASK 0x000F0000
+#define HP_INSTR_RAM_LOW_OUTSHIFT_SHIFT 16
+#define HP_INSTR_RAM_LOW_OUTEN_MASK 0x00300000
+#define HP_INSTR_RAM_LOW_OUTEN_SHIFT 20
+#define HP_INSTR_RAM_LOW_OUTARG_MASK 0xFFC00000
+#define HP_INSTR_RAM_LOW_OUTARG_SHIFT 22
+#define REG_HP_INSTR_RAM_DATA_MID 0x414C /* HP instruction RAM
+ data mid */
+#define HP_INSTR_RAM_MID_OUTARG_MASK 0x00000003
+#define HP_INSTR_RAM_MID_OUTARG_SHIFT 0
+#define HP_INSTR_RAM_MID_OUTOP_MASK 0x0000003C
+#define HP_INSTR_RAM_MID_OUTOP_SHIFT 2
+#define HP_INSTR_RAM_MID_FNEXT_MASK 0x000007C0
+#define HP_INSTR_RAM_MID_FNEXT_SHIFT 6
+#define HP_INSTR_RAM_MID_FOFF_MASK 0x0003F800
+#define HP_INSTR_RAM_MID_FOFF_SHIFT 11
+#define HP_INSTR_RAM_MID_SNEXT_MASK 0x007C0000
+#define HP_INSTR_RAM_MID_SNEXT_SHIFT 18
+#define HP_INSTR_RAM_MID_SOFF_MASK 0x3F800000
+#define HP_INSTR_RAM_MID_SOFF_SHIFT 23
+#define HP_INSTR_RAM_MID_OP_MASK 0xC0000000
+#define HP_INSTR_RAM_MID_OP_SHIFT 30
+#define REG_HP_INSTR_RAM_DATA_HI 0x4150 /* HP instruction RAM
+ data high */
+#define HP_INSTR_RAM_HI_VAL_MASK 0x0000FFFF
+#define HP_INSTR_RAM_HI_VAL_SHIFT 0
+#define HP_INSTR_RAM_HI_MASK_MASK 0xFFFF0000
+#define HP_INSTR_RAM_HI_MASK_SHIFT 16
+
+/* PIO access into RX Header parser data RAM and flow database.
+ * 11-bit register. Data fills the LSB portion of bus if less than 32 bits.
+ * DATA_RAM: write RAM_FDB_DATA with index to access DATA_RAM.
+ * RAM bytes = 4*(x - 1) + [3:0]. e.g., 0 -> [3:0], 31 -> [123:120]
+ * FLOWDB: write DATA_RAM_FDB register and then read/write FDB1-12 to access
+ * flow database.
+ * RX_DMA_EN must be 0 for RX parser RAM PIO access. RX Parser RAM data reg
+ * should be the last write access of the write sequence.
+ * DEFAULT: undefined
+ */
+#define REG_HP_DATA_RAM_FDB_ADDR 0x4154 /* HP data and FDB
+ RAM address */
+#define HP_DATA_RAM_FDB_DATA_MASK 0x001F /* select 1 of 86 byte
+ locations in header
+ parser data ram to
+ read/write */
+#define HP_DATA_RAM_FDB_FDB_MASK 0x3F00 /* 1 of 64 353-bit locations
+ in the flow database */
+#define REG_HP_DATA_RAM_DATA 0x4158 /* HP data RAM data */
+
+/* HP flow database registers: 1 - 12, 0x415C - 0x4188, 4 8-bit bytes
+ * FLOW_DB(1) = IP_SA[127:96], FLOW_DB(2) = IP_SA[95:64]
+ * FLOW_DB(3) = IP_SA[63:32], FLOW_DB(4) = IP_SA[31:0]
+ * FLOW_DB(5) = IP_DA[127:96], FLOW_DB(6) = IP_DA[95:64]
+ * FLOW_DB(7) = IP_DA[63:32], FLOW_DB(8) = IP_DA[31:0]
+ * FLOW_DB(9) = {TCP_SP[15:0],TCP_DP[15:0]}
+ * FLOW_DB(10) = bit 0 has value for flow valid
+ * FLOW_DB(11) = TCP_SEQ[63:32], FLOW_DB(12) = TCP_SEQ[31:0]
+ */
+#define REG_HP_FLOW_DB0 0x415C /* HP flow database 1 reg */
+#define REG_HP_FLOW_DBN(x) (REG_HP_FLOW_DB0 + (x)*4)
+
+/* diagnostics for RX Header Parser block.
+ * ASUN: the header parser state machine register is used for diagnostics
+ * purposes. however, the spec doesn't have any details on it.
+ */
+#define REG_HP_STATE_MACHINE 0x418C /* (ro) HP state machine */
+#define REG_HP_STATUS0 0x4190 /* (ro) HP status 1 */
+#define HP_STATUS0_SAP_MASK 0xFFFF0000 /* SAP */
+#define HP_STATUS0_L3_OFF_MASK 0x0000FE00 /* L3 offset */
+#define HP_STATUS0_LB_CPUNUM_MASK 0x000001F8 /* load balancing CPU
+ number */
+#define HP_STATUS0_HRP_OPCODE_MASK 0x00000007 /* HRP opcode */
+
+#define REG_HP_STATUS1 0x4194 /* (ro) HP status 2 */
+#define HP_STATUS1_ACCUR2_MASK 0xE0000000 /* accu R2[6:4] */
+#define HP_STATUS1_FLOWID_MASK 0x1F800000 /* flow id */
+#define HP_STATUS1_TCP_OFF_MASK 0x007F0000 /* tcp payload offset */
+#define HP_STATUS1_TCP_SIZE_MASK 0x0000FFFF /* tcp payload size */
+
+#define REG_HP_STATUS2 0x4198 /* (ro) HP status 3 */
+#define HP_STATUS2_ACCUR2_MASK 0xF0000000 /* accu R2[3:0] */
+#define HP_STATUS2_CSUM_OFF_MASK 0x07F00000 /* checksum start
+ start offset */
+#define HP_STATUS2_ACCUR1_MASK 0x000FE000 /* accu R1 */
+#define HP_STATUS2_FORCE_DROP 0x00001000 /* force drop */
+#define HP_STATUS2_BWO_REASSM 0x00000800 /* batching w/o
+ reassembly */
+#define HP_STATUS2_JH_SPLIT_EN 0x00000400 /* jumbo header split
+ enable */
+#define HP_STATUS2_FORCE_TCP_NOCHECK 0x00000200 /* force tcp no payload
+ check */
+#define HP_STATUS2_DATA_MASK_ZERO 0x00000100 /* mask of data length
+ equal to zero */
+#define HP_STATUS2_FORCE_TCP_CHECK 0x00000080 /* force tcp payload
+ chk */
+#define HP_STATUS2_MASK_TCP_THRESH 0x00000040 /* mask of payload
+ threshold */
+#define HP_STATUS2_NO_ASSIST 0x00000020 /* no assist */
+#define HP_STATUS2_CTRL_PACKET_FLAG 0x00000010 /* control packet flag */
+#define HP_STATUS2_TCP_FLAG_CHECK 0x00000008 /* tcp flag check */
+#define HP_STATUS2_SYN_FLAG 0x00000004 /* syn flag */
+#define HP_STATUS2_TCP_CHECK 0x00000002 /* tcp payload chk */
+#define HP_STATUS2_TCP_NOCHECK 0x00000001 /* tcp no payload chk */
+
+/* BIST for header parser(HP) and flow database memories (FDBM). set _START
+ * to start BIST. controller clears _START on completion. _START can also
+ * be cleared to force termination of BIST. a bit set indicates that that
+ * memory passed its BIST.
+ */
+#define REG_HP_RAM_BIST 0x419C /* HP RAM BIST reg */
+#define HP_RAM_BIST_HP_DATA_PASS 0x80000000 /* HP data ram */
+#define HP_RAM_BIST_HP_INSTR0_PASS 0x40000000 /* HP instr ram 0 */
+#define HP_RAM_BIST_HP_INSTR1_PASS 0x20000000 /* HP instr ram 1 */
+#define HP_RAM_BIST_HP_INSTR2_PASS 0x10000000 /* HP instr ram 2 */
+#define HP_RAM_BIST_FDBM_AGE0_PASS 0x08000000 /* FDBM aging RAM0 */
+#define HP_RAM_BIST_FDBM_AGE1_PASS 0x04000000 /* FDBM aging RAM1 */
+#define HP_RAM_BIST_FDBM_FLOWID00_PASS 0x02000000 /* FDBM flowid RAM0
+ bank 0 */
+#define HP_RAM_BIST_FDBM_FLOWID10_PASS 0x01000000 /* FDBM flowid RAM1
+ bank 0 */
+#define HP_RAM_BIST_FDBM_FLOWID20_PASS 0x00800000 /* FDBM flowid RAM2
+ bank 0 */
+#define HP_RAM_BIST_FDBM_FLOWID30_PASS 0x00400000 /* FDBM flowid RAM3
+ bank 0 */
+#define HP_RAM_BIST_FDBM_FLOWID01_PASS 0x00200000 /* FDBM flowid RAM0
+ bank 1 */
+#define HP_RAM_BIST_FDBM_FLOWID11_PASS 0x00100000 /* FDBM flowid RAM1
+ bank 2 */
+#define HP_RAM_BIST_FDBM_FLOWID21_PASS 0x00080000 /* FDBM flowid RAM2
+ bank 1 */
+#define HP_RAM_BIST_FDBM_FLOWID31_PASS 0x00040000 /* FDBM flowid RAM3
+ bank 1 */
+#define HP_RAM_BIST_FDBM_TCPSEQ_PASS 0x00020000 /* FDBM tcp sequence
+ RAM */
+#define HP_RAM_BIST_SUMMARY 0x00000002 /* all BIST tests */
+#define HP_RAM_BIST_START 0x00000001 /* start/stop BIST */
+
+
+/** MAC registers. **/
+/* reset bits are set using a PIO write and self-cleared after the command
+ * execution has completed.
+ */
+#define REG_MAC_TX_RESET 0x6000 /* TX MAC software reset
+ command (default: 0x0) */
+#define REG_MAC_RX_RESET 0x6004 /* RX MAC software reset
+ command (default: 0x0) */
+/* execute a pause flow control frame transmission
+ DEFAULT: 0x0XXXX */
+#define REG_MAC_SEND_PAUSE 0x6008 /* send pause command reg */
+#define MAC_SEND_PAUSE_TIME_MASK 0x0000FFFF /* value of pause time
+ to be sent on network
+ in units of slot
+ times */
+#define MAC_SEND_PAUSE_SEND 0x00010000 /* send pause flow ctrl
+ frame on network */
+
+/* bit set indicates that event occurred. auto-cleared when status register
+ * is read and have corresponding mask bits in mask register. events will
+ * trigger an interrupt if the corresponding mask bit is 0.
+ * status register default: 0x00000000
+ * mask register default = 0xFFFFFFFF on reset
+ */
+#define REG_MAC_TX_STATUS 0x6010 /* TX MAC status reg */
+#define MAC_TX_FRAME_XMIT 0x0001 /* successful frame
+ transmision */
+#define MAC_TX_UNDERRUN 0x0002 /* terminated frame
+ transmission due to
+ data starvation in the
+ xmit data path */
+#define MAC_TX_MAX_PACKET_ERR 0x0004 /* frame exceeds max allowed
+ length passed to TX MAC
+ by the DMA engine */
+#define MAC_TX_COLL_NORMAL 0x0008 /* rollover of the normal
+ collision counter */
+#define MAC_TX_COLL_EXCESS 0x0010 /* rollover of the excessive
+ collision counter */
+#define MAC_TX_COLL_LATE 0x0020 /* rollover of the late
+ collision counter */
+#define MAC_TX_COLL_FIRST 0x0040 /* rollover of the first
+ collision counter */
+#define MAC_TX_DEFER_TIMER 0x0080 /* rollover of the defer
+ timer */
+#define MAC_TX_PEAK_ATTEMPTS 0x0100 /* rollover of the peak
+ attempts counter */
+
+#define REG_MAC_RX_STATUS 0x6014 /* RX MAC status reg */
+#define MAC_RX_FRAME_RECV 0x0001 /* successful receipt of
+ a frame */
+#define MAC_RX_OVERFLOW 0x0002 /* dropped frame due to
+ RX FIFO overflow */
+#define MAC_RX_FRAME_COUNT 0x0004 /* rollover of receive frame
+ counter */
+#define MAC_RX_ALIGN_ERR 0x0008 /* rollover of alignment
+ error counter */
+#define MAC_RX_CRC_ERR 0x0010 /* rollover of crc error
+ counter */
+#define MAC_RX_LEN_ERR 0x0020 /* rollover of length
+ error counter */
+#define MAC_RX_VIOL_ERR 0x0040 /* rollover of code
+ violation error */
+
+/* DEFAULT: 0xXXXX0000 on reset */
+#define REG_MAC_CTRL_STATUS 0x6018 /* MAC control status reg */
+#define MAC_CTRL_PAUSE_RECEIVED 0x00000001 /* successful
+ reception of a
+ pause control
+ frame */
+#define MAC_CTRL_PAUSE_STATE 0x00000002 /* MAC has made a
+ transition from
+ "not paused" to
+ "paused" */
+#define MAC_CTRL_NOPAUSE_STATE 0x00000004 /* MAC has made a
+ transition from
+ "paused" to "not
+ paused" */
+#define MAC_CTRL_PAUSE_TIME_MASK 0xFFFF0000 /* value of pause time
+ operand that was
+ received in the last
+ pause flow control
+ frame */
+
+/* layout identical to TX MAC[8:0] */
+#define REG_MAC_TX_MASK 0x6020 /* TX MAC mask reg */
+/* layout identical to RX MAC[6:0] */
+#define REG_MAC_RX_MASK 0x6024 /* RX MAC mask reg */
+/* layout identical to CTRL MAC[2:0] */
+#define REG_MAC_CTRL_MASK 0x6028 /* MAC control mask reg */
+
+/* to ensure proper operation, CFG_EN must be cleared to 0 and a delay
+ * imposed before writes to other bits in the TX_MAC_CFG register or any of
+ * the MAC parameters is performed. delay dependent upon time required to
+ * transmit a maximum size frame (= MAC_FRAMESIZE_MAX*8/Mbps). e.g.,
+ * the delay for a 1518-byte frame on a 100Mbps network is 125us.
+ * alternatively, just poll TX_CFG_EN until it reads back as 0.
+ * NOTE: on half-duplex 1Gbps, TX_CFG_CARRIER_EXTEND and
+ * RX_CFG_CARRIER_EXTEND should be set and the SLOT_TIME register should
+ * be 0x200 (slot time of 512 bytes)
+ */
+#define REG_MAC_TX_CFG 0x6030 /* TX MAC config reg */
+#define MAC_TX_CFG_EN 0x0001 /* enable TX MAC. 0 will
+ force TXMAC state
+ machine to remain in
+ idle state or to
+ transition to idle state
+ on completion of an
+ ongoing packet. */
+#define MAC_TX_CFG_IGNORE_CARRIER 0x0002 /* disable CSMA/CD deferral
+ process. set to 1 when
+ full duplex and 0 when
+ half duplex */
+#define MAC_TX_CFG_IGNORE_COLL 0x0004 /* disable CSMA/CD backoff
+ algorithm. set to 1 when
+ full duplex and 0 when
+ half duplex */
+#define MAC_TX_CFG_IPG_EN 0x0008 /* enable extension of the
+ Rx-to-TX IPG. after
+ receiving a frame, TX
+ MAC will reset its
+ deferral process to
+ carrier sense for the
+ amount of time = IPG0 +
+ IPG1 and commit to
+ transmission for time
+ specified in IPG2. when
+ 0 or when xmitting frames
+ back-to-pack (Tx-to-Tx
+ IPG), TX MAC ignores
+ IPG0 and will only use
+ IPG1 for deferral time.
+ IPG2 still used. */
+#define MAC_TX_CFG_NEVER_GIVE_UP_EN 0x0010 /* TX MAC will not easily
+ give up on frame
+ xmission. if backoff
+ algorithm reaches the
+ ATTEMPT_LIMIT, it will
+ clear attempts counter
+ and continue trying to
+ send the frame as
+ specified by
+ GIVE_UP_LIM. when 0,
+ TX MAC will execute
+ standard CSMA/CD prot. */
+#define MAC_TX_CFG_NEVER_GIVE_UP_LIM 0x0020 /* when set, TX MAC will
+ continue to try to xmit
+ until successful. when
+ 0, TX MAC will continue
+ to try xmitting until
+ successful or backoff
+ algorithm reaches
+ ATTEMPT_LIMIT*16 */
+#define MAC_TX_CFG_NO_BACKOFF 0x0040 /* modify CSMA/CD to disable
+ backoff algorithm. TX
+ MAC will not back off
+ after a xmission attempt
+ that resulted in a
+ collision. */
+#define MAC_TX_CFG_SLOW_DOWN 0x0080 /* modify CSMA/CD so that
+ deferral process is reset
+ in response to carrier
+ sense during the entire
+ duration of IPG. TX MAC
+ will only commit to frame
+ xmission after frame
+ xmission has actually
+ begun. */
+#define MAC_TX_CFG_NO_FCS 0x0100 /* TX MAC will not generate
+ CRC for all xmitted
+ packets. when clear, CRC
+ generation is dependent
+ upon NO_CRC bit in the
+ xmit control word from
+ TX DMA */
+#define MAC_TX_CFG_CARRIER_EXTEND 0x0200 /* enables xmit part of the
+ carrier extension
+ feature. this allows for
+ longer collision domains
+ by extending the carrier
+ and collision window
+ from the end of FCS until
+ the end of the slot time
+ if necessary. Required
+ for half-duplex at 1Gbps,
+ clear otherwise. */
+
+/* when CRC is not stripped, reassembly packets will not contain the CRC.
+ * these will be stripped by HRP because it reassembles layer 4 data, and the
+ * CRC is layer 2. however, non-reassembly packets will still contain the CRC
+ * when passed to the host. to ensure proper operation, need to wait 3.2ms
+ * after clearing RX_CFG_EN before writing to any other RX MAC registers
+ * or other MAC parameters. alternatively, poll RX_CFG_EN until it clears
+ * to 0. similary, HASH_FILTER_EN and ADDR_FILTER_EN have the same
+ * restrictions as CFG_EN.
+ */
+#define REG_MAC_RX_CFG 0x6034 /* RX MAC config reg */
+#define MAC_RX_CFG_EN 0x0001 /* enable RX MAC */
+#define MAC_RX_CFG_STRIP_PAD 0x0002 /* always program to 0.
+ feature not supported */
+#define MAC_RX_CFG_STRIP_FCS 0x0004 /* RX MAC will strip the
+ last 4 bytes of a
+ received frame. */
+#define MAC_RX_CFG_PROMISC_EN 0x0008 /* promiscuous mode */
+#define MAC_RX_CFG_PROMISC_GROUP_EN 0x0010 /* accept all valid
+ multicast frames (group
+ bit in DA field set) */
+#define MAC_RX_CFG_HASH_FILTER_EN 0x0020 /* use hash table to filter
+ multicast addresses */
+#define MAC_RX_CFG_ADDR_FILTER_EN 0x0040 /* cause RX MAC to use
+ address filtering regs
+ to filter both unicast
+ and multicast
+ addresses */
+#define MAC_RX_CFG_DISABLE_DISCARD 0x0080 /* pass errored frames to
+ RX DMA by setting BAD
+ bit but not Abort bit
+ in the status. CRC,
+ framing, and length errs
+ will not increment
+ error counters. frames
+ which don't match dest
+ addr will be passed up
+ w/ BAD bit set. */
+#define MAC_RX_CFG_CARRIER_EXTEND 0x0100 /* enable reception of
+ packet bursts generated
+ by carrier extension
+ with packet bursting
+ senders. only applies
+ to half-duplex 1Gbps */
+
+/* DEFAULT: 0x0 */
+#define REG_MAC_CTRL_CFG 0x6038 /* MAC control config reg */
+#define MAC_CTRL_CFG_SEND_PAUSE_EN 0x0001 /* respond to requests for
+ sending pause flow ctrl
+ frames */
+#define MAC_CTRL_CFG_RECV_PAUSE_EN 0x0002 /* respond to received
+ pause flow ctrl frames */
+#define MAC_CTRL_CFG_PASS_CTRL 0x0004 /* pass valid MAC ctrl
+ packets to RX DMA */
+
+/* to ensure proper operation, a global initialization sequence should be
+ * performed when a loopback config is entered or exited. if programmed after
+ * a hw or global sw reset, RX/TX MAC software reset and initialization
+ * should be done to ensure stable clocking.
+ * DEFAULT: 0x0
+ */
+#define REG_MAC_XIF_CFG 0x603C /* XIF config reg */
+#define MAC_XIF_TX_MII_OUTPUT_EN 0x0001 /* enable output drivers
+ on MII xmit bus */
+#define MAC_XIF_MII_INT_LOOPBACK 0x0002 /* loopback GMII xmit data
+ path to GMII recv data
+ path. phy mode register
+ clock selection must be
+ set to GMII mode and
+ GMII_MODE should be set
+ to 1. in loopback mode,
+ REFCLK will drive the
+ entire mac core. 0 for
+ normal operation. */
+#define MAC_XIF_DISABLE_ECHO 0x0004 /* disables receive data
+ path during packet
+ xmission. clear to 0
+ in any full duplex mode,
+ in any loopback mode,
+ or in half-duplex SERDES
+ or SLINK modes. set when
+ in half-duplex when
+ using external phy. */
+#define MAC_XIF_GMII_MODE 0x0008 /* MAC operates with GMII
+ clocks and datapath */
+#define MAC_XIF_MII_BUFFER_OUTPUT_EN 0x0010 /* MII_BUF_EN pin. enable
+ external tristate buffer
+ on the MII receive
+ bus. */
+#define MAC_XIF_LINK_LED 0x0020 /* LINKLED# active (low) */
+#define MAC_XIF_FDPLX_LED 0x0040 /* FDPLXLED# active (low) */
+
+#define REG_MAC_IPG0 0x6040 /* inter-packet gap0 reg.
+ recommended: 0x00 */
+#define REG_MAC_IPG1 0x6044 /* inter-packet gap1 reg
+ recommended: 0x08 */
+#define REG_MAC_IPG2 0x6048 /* inter-packet gap2 reg
+ recommended: 0x04 */
+#define REG_MAC_SLOT_TIME 0x604C /* slot time reg
+ recommended: 0x40 */
+#define REG_MAC_FRAMESIZE_MIN 0x6050 /* min frame size reg
+ recommended: 0x40 */
+
+/* FRAMESIZE_MAX holds both the max frame size as well as the max burst size.
+ * recommended value: 0x2000.05EE
+ */
+#define REG_MAC_FRAMESIZE_MAX 0x6054 /* max frame size reg */
+#define MAC_FRAMESIZE_MAX_BURST_MASK 0x3FFF0000 /* max burst size */
+#define MAC_FRAMESIZE_MAX_BURST_SHIFT 16
+#define MAC_FRAMESIZE_MAX_FRAME_MASK 0x00007FFF /* max frame size */
+#define MAC_FRAMESIZE_MAX_FRAME_SHIFT 0
+#define REG_MAC_PA_SIZE 0x6058 /* PA size reg. number of
+ preamble bytes that the
+ TX MAC will xmit at the
+ beginning of each frame
+ value should be 2 or
+ greater. recommended
+ value: 0x07 */
+#define REG_MAC_JAM_SIZE 0x605C /* jam size reg. duration
+ of jam in units of media
+ byte time. recommended
+ value: 0x04 */
+#define REG_MAC_ATTEMPT_LIMIT 0x6060 /* attempt limit reg. #
+ of attempts TX MAC will
+ make to xmit a frame
+ before it resets its
+ attempts counter. after
+ the limit has been
+ reached, TX MAC may or
+ may not drop the frame
+ dependent upon value
+ in TX_MAC_CFG.
+ recommended
+ value: 0x10 */
+#define REG_MAC_CTRL_TYPE 0x6064 /* MAC control type reg.
+ type field of a MAC
+ ctrl frame. recommended
+ value: 0x8808 */
+
+/* mac address registers: 0 - 44, 0x6080 - 0x6130, 4 8-bit bytes.
+ * register contains comparison
+ * 0 16 MSB of primary MAC addr [47:32] of DA field
+ * 1 16 middle bits "" [31:16] of DA field
+ * 2 16 LSB "" [15:0] of DA field
+ * 3*x 16MSB of alt MAC addr 1-15 [47:32] of DA field
+ * 4*x 16 middle bits "" [31:16]
+ * 5*x 16 LSB "" [15:0]
+ * 42 16 MSB of MAC CTRL addr [47:32] of DA.
+ * 43 16 middle bits "" [31:16]
+ * 44 16 LSB "" [15:0]
+ * MAC CTRL addr must be the reserved multicast addr for MAC CTRL frames.
+ * if there is a match, MAC will set the bit for alternative address
+ * filter pass [15]
+
+ * here is the map of registers given MAC address notation: a:b:c:d:e:f
+ * ab cd ef
+ * primary addr reg 2 reg 1 reg 0
+ * alt addr 1 reg 5 reg 4 reg 3
+ * alt addr x reg 5*x reg 4*x reg 3*x
+ * ctrl addr reg 44 reg 43 reg 42
+ */
+#define REG_MAC_ADDR0 0x6080 /* MAC address 0 reg */
+#define REG_MAC_ADDRN(x) (REG_MAC_ADDR0 + (x)*4)
+#define REG_MAC_ADDR_FILTER0 0x614C /* address filter 0 reg
+ [47:32] */
+#define REG_MAC_ADDR_FILTER1 0x6150 /* address filter 1 reg
+ [31:16] */
+#define REG_MAC_ADDR_FILTER2 0x6154 /* address filter 2 reg
+ [15:0] */
+#define REG_MAC_ADDR_FILTER2_1_MASK 0x6158 /* address filter 2 and 1
+ mask reg. 8-bit reg
+ contains nibble mask for
+ reg 2 and 1. */
+#define REG_MAC_ADDR_FILTER0_MASK 0x615C /* address filter 0 mask
+ reg */
+
+/* hash table registers: 0 - 15, 0x6160 - 0x619C, 4 8-bit bytes
+ * 16-bit registers contain bits of the hash table.
+ * reg x -> [16*(15 - x) + 15 : 16*(15 - x)].
+ * e.g., 15 -> [15:0], 0 -> [255:240]
+ */
+#define REG_MAC_HASH_TABLE0 0x6160 /* hash table 0 reg */
+#define REG_MAC_HASH_TABLEN(x) (REG_MAC_HASH_TABLE0 + (x)*4)
+
+/* statistics registers. these registers generate an interrupt on
+ * overflow. recommended initialization: 0x0000. most are 16-bits except
+ * for PEAK_ATTEMPTS register which is 8 bits.
+ */
+#define REG_MAC_COLL_NORMAL 0x61A0 /* normal collision
+ counter. */
+#define REG_MAC_COLL_FIRST 0x61A4 /* first attempt
+ successful collision
+ counter */
+#define REG_MAC_COLL_EXCESS 0x61A8 /* excessive collision
+ counter */
+#define REG_MAC_COLL_LATE 0x61AC /* late collision counter */
+#define REG_MAC_TIMER_DEFER 0x61B0 /* defer timer. time base
+ is the media byte
+ clock/256 */
+#define REG_MAC_ATTEMPTS_PEAK 0x61B4 /* peak attempts reg */
+#define REG_MAC_RECV_FRAME 0x61B8 /* receive frame counter */
+#define REG_MAC_LEN_ERR 0x61BC /* length error counter */
+#define REG_MAC_ALIGN_ERR 0x61C0 /* alignment error counter */
+#define REG_MAC_FCS_ERR 0x61C4 /* FCS error counter */
+#define REG_MAC_RX_CODE_ERR 0x61C8 /* RX code violation
+ error counter */
+
+/* misc registers */
+#define REG_MAC_RANDOM_SEED 0x61CC /* random number seed reg.
+ 10-bit register used as a
+ seed for the random number
+ generator for the CSMA/CD
+ backoff algorithm. only
+ programmed after power-on
+ reset and should be a
+ random value which has a
+ high likelihood of being
+ unique for each MAC
+ attached to a network
+ segment (e.g., 10 LSB of
+ MAC address) */
+
+/* ASUN: there's a PAUSE_TIMER (ro) described, but it's not in the address
+ * map
+ */
+
+/* 27-bit register has the current state for key state machines in the MAC */
+#define REG_MAC_STATE_MACHINE 0x61D0 /* (ro) state machine reg */
+#define MAC_SM_RLM_MASK 0x07800000
+#define MAC_SM_RLM_SHIFT 23
+#define MAC_SM_RX_FC_MASK 0x00700000
+#define MAC_SM_RX_FC_SHIFT 20
+#define MAC_SM_TLM_MASK 0x000F0000
+#define MAC_SM_TLM_SHIFT 16
+#define MAC_SM_ENCAP_SM_MASK 0x0000F000
+#define MAC_SM_ENCAP_SM_SHIFT 12
+#define MAC_SM_TX_REQ_MASK 0x00000C00
+#define MAC_SM_TX_REQ_SHIFT 10
+#define MAC_SM_TX_FC_MASK 0x000003C0
+#define MAC_SM_TX_FC_SHIFT 6
+#define MAC_SM_FIFO_WRITE_SEL_MASK 0x00000038
+#define MAC_SM_FIFO_WRITE_SEL_SHIFT 3
+#define MAC_SM_TX_FIFO_EMPTY_MASK 0x00000007
+#define MAC_SM_TX_FIFO_EMPTY_SHIFT 0
+
+/** MIF registers. the MIF can be programmed in either bit-bang or
+ * frame mode.
+ **/
+#define REG_MIF_BIT_BANG_CLOCK 0x6200 /* MIF bit-bang clock.
+ 1 -> 0 will generate a
+ rising edge. 0 -> 1 will
+ generate a falling edge. */
+#define REG_MIF_BIT_BANG_DATA 0x6204 /* MIF bit-bang data. 1-bit
+ register generates data */
+#define REG_MIF_BIT_BANG_OUTPUT_EN 0x6208 /* MIF bit-bang output
+ enable. enable when
+ xmitting data from MIF to
+ transceiver. */
+
+/* 32-bit register serves as an instruction register when the MIF is
+ * programmed in frame mode. load this register w/ a valid instruction
+ * (as per IEEE 802.3u MII spec). poll this register to check for instruction
+ * execution completion. during a read operation, this register will also
+ * contain the 16-bit data returned by the tranceiver. unless specified
+ * otherwise, fields are considered "don't care" when polling for
+ * completion.
+ */
+#define REG_MIF_FRAME 0x620C /* MIF frame/output reg */
+#define MIF_FRAME_START_MASK 0xC0000000 /* start of frame.
+ load w/ 01 when
+ issuing an instr */
+#define MIF_FRAME_ST 0x40000000 /* STart of frame */
+#define MIF_FRAME_OPCODE_MASK 0x30000000 /* opcode. 01 for a
+ write. 10 for a
+ read */
+#define MIF_FRAME_OP_READ 0x20000000 /* read OPcode */
+#define MIF_FRAME_OP_WRITE 0x10000000 /* write OPcode */
+#define MIF_FRAME_PHY_ADDR_MASK 0x0F800000 /* phy address. when
+ issuing an instr,
+ this field should be
+ loaded w/ the XCVR
+ addr */
+#define MIF_FRAME_PHY_ADDR_SHIFT 23
+#define MIF_FRAME_REG_ADDR_MASK 0x007C0000 /* register address.
+ when issuing an instr,
+ addr of register
+ to be read/written */
+#define MIF_FRAME_REG_ADDR_SHIFT 18
+#define MIF_FRAME_TURN_AROUND_MSB 0x00020000 /* turn around, MSB.
+ when issuing an instr,
+ set this bit to 1 */
+#define MIF_FRAME_TURN_AROUND_LSB 0x00010000 /* turn around, LSB.
+ when issuing an instr,
+ set this bit to 0.
+ when polling for
+ completion, 1 means
+ that instr execution
+ has been completed */
+#define MIF_FRAME_DATA_MASK 0x0000FFFF /* instruction payload
+ load with 16-bit data
+ to be written in
+ transceiver reg for a
+ write. doesn't matter
+ in a read. when
+ polling for
+ completion, field is
+ "don't care" for write
+ and 16-bit data
+ returned by the
+ transceiver for a
+ read (if valid bit
+ is set) */
+#define REG_MIF_CFG 0x6210 /* MIF config reg */
+#define MIF_CFG_PHY_SELECT 0x0001 /* 1 -> select MDIO_1
+ 0 -> select MDIO_0 */
+#define MIF_CFG_POLL_EN 0x0002 /* enable polling
+ mechanism. if set,
+ BB_MODE should be 0 */
+#define MIF_CFG_BB_MODE 0x0004 /* 1 -> bit-bang mode
+ 0 -> frame mode */
+#define MIF_CFG_POLL_REG_MASK 0x00F8 /* register address to be
+ used by polling mode.
+ only meaningful if POLL_EN
+ is set to 1 */
+#define MIF_CFG_POLL_REG_SHIFT 3
+#define MIF_CFG_MDIO_0 0x0100 /* (ro) dual purpose.
+ when MDIO_0 is idle,
+ 1 -> tranceiver is
+ connected to MDIO_0.
+ when MIF is communicating
+ w/ MDIO_0 in bit-bang
+ mode, this bit indicates
+ the incoming bit stream
+ during a read op */
+#define MIF_CFG_MDIO_1 0x0200 /* (ro) dual purpose.
+ when MDIO_1 is idle,
+ 1 -> transceiver is
+ connected to MDIO_1.
+ when MIF is communicating
+ w/ MDIO_1 in bit-bang
+ mode, this bit indicates
+ the incoming bit stream
+ during a read op */
+#define MIF_CFG_POLL_PHY_MASK 0x7C00 /* tranceiver address to
+ be polled */
+#define MIF_CFG_POLL_PHY_SHIFT 10
+
+/* 16-bit register used to determine which bits in the POLL_STATUS portion of
+ * the MIF_STATUS register will cause an interrupt. if a mask bit is 0,
+ * corresponding bit of the POLL_STATUS will generate a MIF interrupt when
+ * set. DEFAULT: 0xFFFF
+ */
+#define REG_MIF_MASK 0x6214 /* MIF mask reg */
+
+/* 32-bit register used when in poll mode. auto-cleared after being read */
+#define REG_MIF_STATUS 0x6218 /* MIF status reg */
+#define MIF_STATUS_POLL_DATA_MASK 0xFFFF0000 /* poll data contains
+ the "latest image"
+ update of the XCVR
+ reg being read */
+#define MIF_STATUS_POLL_DATA_SHIFT 16
+#define MIF_STATUS_POLL_STATUS_MASK 0x0000FFFF /* poll status indicates
+ which bits in the
+ POLL_DATA field have
+ changed since the
+ MIF_STATUS reg was
+ last read */
+#define MIF_STATUS_POLL_STATUS_SHIFT 0
+
+/* 7-bit register has current state for all state machines in the MIF */
+#define REG_MIF_STATE_MACHINE 0x621C /* MIF state machine reg */
+#define MIF_SM_CONTROL_MASK 0x07 /* control state machine
+ state */
+#define MIF_SM_EXECUTION_MASK 0x60 /* execution state machine
+ state */
+
+/** PCS/Serialink. the following registers are equivalent to the standard
+ * MII management registers except that they're directly mapped in
+ * Cassini's register space.
+ **/
+
+/* the auto-negotiation enable bit should be programmed the same at
+ * the link partner as in the local device to enable auto-negotiation to
+ * complete. when that bit is reprogrammed, auto-neg/manual config is
+ * restarted automatically.
+ * DEFAULT: 0x1040
+ */
+#define REG_PCS_MII_CTRL 0x9000 /* PCS MII control reg */
+#define PCS_MII_CTRL_1000_SEL 0x0040 /* reads 1. ignored on
+ writes */
+#define PCS_MII_CTRL_COLLISION_TEST 0x0080 /* COL signal at the PCS
+ to MAC interface is
+ activated regardless
+ of activity */
+#define PCS_MII_CTRL_DUPLEX 0x0100 /* forced 0x0. PCS
+ behaviour same for
+ half and full dplx */
+#define PCS_MII_RESTART_AUTONEG 0x0200 /* self clearing.
+ restart auto-
+ negotiation */
+#define PCS_MII_ISOLATE 0x0400 /* read as 0. ignored
+ on writes */
+#define PCS_MII_POWER_DOWN 0x0800 /* read as 0. ignored
+ on writes */
+#define PCS_MII_AUTONEG_EN 0x1000 /* default 1. PCS goes
+ through automatic
+ link config before it
+ can be used. when 0,
+ link can be used
+ w/out any link config
+ phase */
+#define PCS_MII_10_100_SEL 0x2000 /* read as 0. ignored on
+ writes */
+#define PCS_MII_RESET 0x8000 /* reset PCS. self-clears
+ when done */
+
+/* DEFAULT: 0x0108 */
+#define REG_PCS_MII_STATUS 0x9004 /* PCS MII status reg */
+#define PCS_MII_STATUS_EXTEND_CAP 0x0001 /* reads 0 */
+#define PCS_MII_STATUS_JABBER_DETECT 0x0002 /* reads 0 */
+#define PCS_MII_STATUS_LINK_STATUS 0x0004 /* 1 -> link up.
+ 0 -> link down. 0 is
+ latched so that 0 is
+ kept until read. read
+ 2x to determine if the
+ link has gone up again */
+#define PCS_MII_STATUS_AUTONEG_ABLE 0x0008 /* reads 1 (able to perform
+ auto-neg) */
+#define PCS_MII_STATUS_REMOTE_FAULT 0x0010 /* 1 -> remote fault detected
+ from received link code
+ word. only valid after
+ auto-neg completed */
+#define PCS_MII_STATUS_AUTONEG_COMP 0x0020 /* 1 -> auto-negotiation
+ completed
+ 0 -> auto-negotiation not
+ completed */
+#define PCS_MII_STATUS_EXTEND_STATUS 0x0100 /* reads as 1. used as an
+ indication that this is
+ a 1000 Base-X PHY. writes
+ to it are ignored */
+
+/* used during auto-negotiation.
+ * DEFAULT: 0x00E0
+ */
+#define REG_PCS_MII_ADVERT 0x9008 /* PCS MII advertisement
+ reg */
+#define PCS_MII_ADVERT_FD 0x0020 /* advertise full duplex
+ 1000 Base-X */
+#define PCS_MII_ADVERT_HD 0x0040 /* advertise half-duplex
+ 1000 Base-X */
+#define PCS_MII_ADVERT_SYM_PAUSE 0x0080 /* advertise PAUSE
+ symmetric capability */
+#define PCS_MII_ADVERT_ASYM_PAUSE 0x0100 /* advertises PAUSE
+ asymmetric capability */
+#define PCS_MII_ADVERT_RF_MASK 0x3000 /* remote fault. write bit13
+ to optionally indicate to
+ link partner that chip is
+ going off-line. bit12 will
+ get set when signal
+ detect == FAIL and will
+ remain set until
+ successful negotiation */
+#define PCS_MII_ADVERT_ACK 0x4000 /* (ro) */
+#define PCS_MII_ADVERT_NEXT_PAGE 0x8000 /* (ro) forced 0x0 */
+
+/* contents updated as a result of autonegotiation. layout and definitions
+ * identical to PCS_MII_ADVERT
+ */
+#define REG_PCS_MII_LPA 0x900C /* PCS MII link partner
+ ability reg */
+#define PCS_MII_LPA_FD PCS_MII_ADVERT_FD
+#define PCS_MII_LPA_HD PCS_MII_ADVERT_HD
+#define PCS_MII_LPA_SYM_PAUSE PCS_MII_ADVERT_SYM_PAUSE
+#define PCS_MII_LPA_ASYM_PAUSE PCS_MII_ADVERT_ASYM_PAUSE
+#define PCS_MII_LPA_RF_MASK PCS_MII_ADVERT_RF_MASK
+#define PCS_MII_LPA_ACK PCS_MII_ADVERT_ACK
+#define PCS_MII_LPA_NEXT_PAGE PCS_MII_ADVERT_NEXT_PAGE
+
+/* DEFAULT: 0x0 */
+#define REG_PCS_CFG 0x9010 /* PCS config reg */
+#define PCS_CFG_EN 0x01 /* enable PCS. must be
+ 0 when modifying
+ PCS_MII_ADVERT */
+#define PCS_CFG_SD_OVERRIDE 0x02 /* sets signal detect to
+ OK. bit is
+ non-resettable */
+#define PCS_CFG_SD_ACTIVE_LOW 0x04 /* changes interpretation
+ of optical signal to make
+ signal detect okay when
+ signal is low */
+#define PCS_CFG_JITTER_STUDY_MASK 0x18 /* used to make jitter
+ measurements. a single
+ code group is xmitted
+ regularly.
+ 0x0 = normal operation
+ 0x1 = high freq test
+ pattern, D21.5
+ 0x2 = low freq test
+ pattern, K28.7
+ 0x3 = reserved */
+#define PCS_CFG_10MS_TIMER_OVERRIDE 0x20 /* shortens 10-20ms auto-
+ negotiation timer to
+ a few cycles for test
+ purposes */
+
+/* used for diagnostic purposes. bits 20-22 autoclear on read */
+#define REG_PCS_STATE_MACHINE 0x9014 /* (ro) PCS state machine
+ and diagnostic reg */
+#define PCS_SM_TX_STATE_MASK 0x0000000F /* 0 and 1 indicate
+ xmission of idle.
+ otherwise, xmission of
+ a packet */
+#define PCS_SM_RX_STATE_MASK 0x000000F0 /* 0 indicates reception
+ of idle. otherwise,
+ reception of packet */
+#define PCS_SM_WORD_SYNC_STATE_MASK 0x00000700 /* 0 indicates loss of
+ sync */
+#define PCS_SM_SEQ_DETECT_STATE_MASK 0x00001800 /* cycling through 0-3
+ indicates reception of
+ Config codes. cycling
+ through 0-1 indicates
+ reception of idles */
+#define PCS_SM_LINK_STATE_MASK 0x0001E000
+#define SM_LINK_STATE_UP 0x00016000 /* link state is up */
+
+#define PCS_SM_LOSS_LINK_C 0x00100000 /* loss of link due to
+ recept of Config
+ codes */
+#define PCS_SM_LOSS_LINK_SYNC 0x00200000 /* loss of link due to
+ loss of sync */
+#define PCS_SM_LOSS_SIGNAL_DETECT 0x00400000 /* signal detect goes
+ from OK to FAIL. bit29
+ will also be set if
+ this is set */
+#define PCS_SM_NO_LINK_BREAKLINK 0x01000000 /* link not up due to
+ receipt of breaklink
+ C codes from partner.
+ C codes w/ 0 content
+ received triggering
+ start/restart of
+ autonegotiation.
+ should be sent for
+ no longer than 20ms */
+#define PCS_SM_NO_LINK_SERDES 0x02000000 /* serdes being
+ initialized. see serdes
+ state reg */
+#define PCS_SM_NO_LINK_C 0x04000000 /* C codes not stable or
+ not received */
+#define PCS_SM_NO_LINK_SYNC 0x08000000 /* word sync not
+ achieved */
+#define PCS_SM_NO_LINK_WAIT_C 0x10000000 /* waiting for C codes
+ w/ ack bit set */
+#define PCS_SM_NO_LINK_NO_IDLE 0x20000000 /* link partner continues
+ to send C codes
+ instead of idle
+ symbols or pkt data */
+
+/* this register indicates interrupt changes in specific PCS MII status bits.
+ * PCS_INT may be masked at the ISR level. only a single bit is implemented
+ * for link status change.
+ */
+#define REG_PCS_INTR_STATUS 0x9018 /* PCS interrupt status */
+#define PCS_INTR_STATUS_LINK_CHANGE 0x04 /* link status has changed
+ since last read */
+
+/* control which network interface is used. no more than one bit should
+ * be set.
+ * DEFAULT: none
+ */
+#define REG_PCS_DATAPATH_MODE 0x9050 /* datapath mode reg */
+#define PCS_DATAPATH_MODE_MII 0x00 /* PCS is not used and
+ MII/GMII is selected.
+ selection between MII and
+ GMII is controlled by
+ XIF_CFG */
+#define PCS_DATAPATH_MODE_SERDES 0x02 /* PCS is used via the
+ 10-bit interface */
+
+/* input to serdes chip or serialink block */
+#define REG_PCS_SERDES_CTRL 0x9054 /* serdes control reg */
+#define PCS_SERDES_CTRL_LOOPBACK 0x01 /* enable loopback on
+ serdes interface */
+#define PCS_SERDES_CTRL_SYNCD_EN 0x02 /* enable sync carrier
+ detection. should be
+ 0x0 for normal
+ operation */
+#define PCS_SERDES_CTRL_LOCKREF 0x04 /* frequency-lock RBC[0:1]
+ to REFCLK when set.
+ when clear, receiver
+ clock locks to incoming
+ serial data */
+
+/* multiplex test outputs into the PROM address (PA_3 through PA_0) pins.
+ * should be 0x0 for normal operations.
+ * 0b000 normal operation, PROM address[3:0] selected
+ * 0b001 rxdma req, rxdma ack, rxdma ready, rxdma read
+ * 0b010 rxmac req, rx ack, rx tag, rx clk shared
+ * 0b011 txmac req, tx ack, tx tag, tx retry req
+ * 0b100 tx tp3, tx tp2, tx tp1, tx tp0
+ * 0b101 R period RX, R period TX, R period HP, R period BIM
+ * DEFAULT: 0x0
+ */
+#define REG_PCS_SHARED_OUTPUT_SEL 0x9058 /* shared output select */
+#define PCS_SOS_PROM_ADDR_MASK 0x0007
+
+/* used for diagnostics. this register indicates progress of the SERDES
+ * boot up.
+ * 0b00 undergoing reset
+ * 0b01 waiting 500us while lockrefn is asserted
+ * 0b10 waiting for comma detect
+ * 0b11 receive data is synchronized
+ * DEFAULT: 0x0
+ */
+#define REG_PCS_SERDES_STATE 0x905C /* (ro) serdes state */
+#define PCS_SERDES_STATE_MASK 0x03
+
+/* used for diagnostics. indicates number of packets transmitted or received.
+ * counters rollover w/out generating an interrupt.
+ * DEFAULT: 0x0
+ */
+#define REG_PCS_PACKET_COUNT 0x9060 /* (ro) PCS packet counter */
+#define PCS_PACKET_COUNT_TX 0x000007FF /* pkts xmitted by PCS */
+#define PCS_PACKET_COUNT_RX 0x07FF0000 /* pkts recvd by PCS
+ whether they
+ encountered an error
+ or not */
+
+/** LocalBus Devices. the following provides run-time access to the
+ * Cassini's PROM
+ ***/
+#define REG_EXPANSION_ROM_RUN_START 0x100000 /* expansion rom run time
+ access */
+#define REG_EXPANSION_ROM_RUN_END 0x17FFFF
+
+#define REG_SECOND_LOCALBUS_START 0x180000 /* secondary local bus
+ device */
+#define REG_SECOND_LOCALBUS_END 0x1FFFFF
+
+/* entropy device */
+#define REG_ENTROPY_START REG_SECOND_LOCALBUS_START
+#define REG_ENTROPY_DATA (REG_ENTROPY_START + 0x00)
+#define REG_ENTROPY_STATUS (REG_ENTROPY_START + 0x04)
+#define ENTROPY_STATUS_DRDY 0x01
+#define ENTROPY_STATUS_BUSY 0x02
+#define ENTROPY_STATUS_CIPHER 0x04
+#define ENTROPY_STATUS_BYPASS_MASK 0x18
+#define REG_ENTROPY_MODE (REG_ENTROPY_START + 0x05)
+#define ENTROPY_MODE_KEY_MASK 0x07
+#define ENTROPY_MODE_ENCRYPT 0x40
+#define REG_ENTROPY_RAND_REG (REG_ENTROPY_START + 0x06)
+#define REG_ENTROPY_RESET (REG_ENTROPY_START + 0x07)
+#define ENTROPY_RESET_DES_IO 0x01
+#define ENTROPY_RESET_STC_MODE 0x02
+#define ENTROPY_RESET_KEY_CACHE 0x04
+#define ENTROPY_RESET_IV 0x08
+#define REG_ENTROPY_IV (REG_ENTROPY_START + 0x08)
+#define REG_ENTROPY_KEY0 (REG_ENTROPY_START + 0x10)
+#define REG_ENTROPY_KEYN(x) (REG_ENTROPY_KEY0 + 4*(x))
+
+/* phys of interest w/ their special mii registers */
+#define PHY_LUCENT_B0 0x00437421
+#define LUCENT_MII_REG 0x1F
+
+#define PHY_NS_DP83065 0x20005c78
+#define DP83065_MII_MEM 0x16
+#define DP83065_MII_REGD 0x1D
+#define DP83065_MII_REGE 0x1E
+
+#define PHY_BROADCOM_5411 0x00206071
+#define PHY_BROADCOM_B0 0x00206050
+#define BROADCOM_MII_REG4 0x14
+#define BROADCOM_MII_REG5 0x15
+#define BROADCOM_MII_REG7 0x17
+#define BROADCOM_MII_REG8 0x18
+
+#define CAS_MII_ANNPTR 0x07
+#define CAS_MII_ANNPRR 0x08
+#define CAS_MII_1000_CTRL 0x09
+#define CAS_MII_1000_STATUS 0x0A
+#define CAS_MII_1000_EXTEND 0x0F
+
+#define CAS_BMSR_1000_EXTEND 0x0100 /* supports 1000Base-T extended status */
+/*
+ * if autoneg is disabled, here's the table:
+ * BMCR_SPEED100 = 100Mbps
+ * BMCR_SPEED1000 = 1000Mbps
+ * ~(BMCR_SPEED100 | BMCR_SPEED1000) = 10Mbps
+ */
+#define CAS_BMCR_SPEED1000 0x0040 /* Select 1000Mbps */
+
+#define CAS_ADVERTISE_1000HALF 0x0100
+#define CAS_ADVERTISE_1000FULL 0x0200
+#define CAS_ADVERTISE_PAUSE 0x0400
+#define CAS_ADVERTISE_ASYM_PAUSE 0x0800
+
+/* regular lpa register */
+#define CAS_LPA_PAUSE CAS_ADVERTISE_PAUSE
+#define CAS_LPA_ASYM_PAUSE CAS_ADVERTISE_ASYM_PAUSE
+
+/* 1000_STATUS register */
+#define CAS_LPA_1000HALF 0x0400
+#define CAS_LPA_1000FULL 0x0800
+
+#define CAS_EXTEND_1000XFULL 0x8000
+#define CAS_EXTEND_1000XHALF 0x4000
+#define CAS_EXTEND_1000TFULL 0x2000
+#define CAS_EXTEND_1000THALF 0x1000
+
+/* cassini header parser firmware */
+typedef struct cas_hp_inst {
+ const char *note;
+
+ u16 mask, val;
+
+ u8 op;
+ u8 soff, snext; /* if match succeeds, new offset and match */
+ u8 foff, fnext; /* if match fails, new offset and match */
+ /* output info */
+ u8 outop; /* output opcode */
+
+ u16 outarg; /* output argument */
+ u8 outenab; /* output enable: 0 = not, 1 = if match
+ 2 = if !match, 3 = always */
+ u8 outshift; /* barrel shift right, 4 bits */
+ u16 outmask;
+} cas_hp_inst_t;
+
+/* comparison */
+#define OP_EQ 0 /* packet == value */
+#define OP_LT 1 /* packet < value */
+#define OP_GT 2 /* packet > value */
+#define OP_NP 3 /* new packet */
+
+/* output opcodes */
+#define CL_REG 0
+#define LD_FID 1
+#define LD_SEQ 2
+#define LD_CTL 3
+#define LD_SAP 4
+#define LD_R1 5
+#define LD_L3 6
+#define LD_SUM 7
+#define LD_HDR 8
+#define IM_FID 9
+#define IM_SEQ 10
+#define IM_SAP 11
+#define IM_R1 12
+#define IM_CTL 13
+#define LD_LEN 14
+#define ST_FLG 15
+
+/* match setp #s for IP4TCP4 */
+#define S1_PCKT 0
+#define S1_VLAN 1
+#define S1_CFI 2
+#define S1_8023 3
+#define S1_LLC 4
+#define S1_LLCc 5
+#define S1_IPV4 6
+#define S1_IPV4c 7
+#define S1_IPV4F 8
+#define S1_TCP44 9
+#define S1_IPV6 10
+#define S1_IPV6L 11
+#define S1_IPV6c 12
+#define S1_TCP64 13
+#define S1_TCPSQ 14
+#define S1_TCPFG 15
+#define S1_TCPHL 16
+#define S1_TCPHc 17
+#define S1_CLNP 18
+#define S1_CLNP2 19
+#define S1_DROP 20
+#define S2_HTTP 21
+#define S1_ESP4 22
+#define S1_AH4 23
+#define S1_ESP6 24
+#define S1_AH6 25
+
+#define CAS_PROG_IP46TCP4_PREAMBLE \
+{ "packet arrival?", 0xffff, 0x0000, OP_NP, 6, S1_VLAN, 0, S1_PCKT, \
+ CL_REG, 0x3ff, 1, 0x0, 0x0000}, \
+{ "VLAN?", 0xffff, 0x8100, OP_EQ, 1, S1_CFI, 0, S1_8023, \
+ IM_CTL, 0x00a, 3, 0x0, 0xffff}, \
+{ "CFI?", 0x1000, 0x1000, OP_EQ, 0, S1_DROP, 1, S1_8023, \
+ CL_REG, 0x000, 0, 0x0, 0x0000}, \
+{ "8023?", 0xffff, 0x0600, OP_LT, 1, S1_LLC, 0, S1_IPV4, \
+ CL_REG, 0x000, 0, 0x0, 0x0000}, \
+{ "LLC?", 0xffff, 0xaaaa, OP_EQ, 1, S1_LLCc, 0, S1_CLNP, \
+ CL_REG, 0x000, 0, 0x0, 0x0000}, \
+{ "LLCc?", 0xff00, 0x0300, OP_EQ, 2, S1_IPV4, 0, S1_CLNP, \
+ CL_REG, 0x000, 0, 0x0, 0x0000}, \
+{ "IPV4?", 0xffff, 0x0800, OP_EQ, 1, S1_IPV4c, 0, S1_IPV6, \
+ LD_SAP, 0x100, 3, 0x0, 0xffff}, \
+{ "IPV4 cont?", 0xff00, 0x4500, OP_EQ, 3, S1_IPV4F, 0, S1_CLNP, \
+ LD_SUM, 0x00a, 1, 0x0, 0x0000}, \
+{ "IPV4 frag?", 0x3fff, 0x0000, OP_EQ, 1, S1_TCP44, 0, S1_CLNP, \
+ LD_LEN, 0x03e, 1, 0x0, 0xffff}, \
+{ "TCP44?", 0x00ff, 0x0006, OP_EQ, 7, S1_TCPSQ, 0, S1_CLNP, \
+ LD_FID, 0x182, 1, 0x0, 0xffff}, /* FID IP4&TCP src+dst */ \
+{ "IPV6?", 0xffff, 0x86dd, OP_EQ, 1, S1_IPV6L, 0, S1_CLNP, \
+ LD_SUM, 0x015, 1, 0x0, 0x0000}, \
+{ "IPV6 len", 0xf000, 0x6000, OP_EQ, 0, S1_IPV6c, 0, S1_CLNP, \
+ IM_R1, 0x128, 1, 0x0, 0xffff}, \
+{ "IPV6 cont?", 0x0000, 0x0000, OP_EQ, 3, S1_TCP64, 0, S1_CLNP, \
+ LD_FID, 0x484, 1, 0x0, 0xffff}, /* FID IP6&TCP src+dst */ \
+{ "TCP64?", 0xff00, 0x0600, OP_EQ, 18, S1_TCPSQ, 0, S1_CLNP, \
+ LD_LEN, 0x03f, 1, 0x0, 0xffff}
+
+#ifdef USE_HP_IP46TCP4
+static cas_hp_inst_t cas_prog_ip46tcp4tab[] = {
+ CAS_PROG_IP46TCP4_PREAMBLE,
+ { "TCP seq", /* DADDR should point to dest port */
+ 0x0000, 0x0000, OP_EQ, 0, S1_TCPFG, 4, S1_TCPFG, LD_SEQ,
+ 0x081, 3, 0x0, 0xffff}, /* Load TCP seq # */
+ { "TCP control flags", 0x0000, 0x0000, OP_EQ, 0, S1_TCPHL, 0,
+ S1_TCPHL, ST_FLG, 0x045, 3, 0x0, 0x002f}, /* Load TCP flags */
+ { "TCP length", 0x0000, 0x0000, OP_EQ, 0, S1_TCPHc, 0,
+ S1_TCPHc, LD_R1, 0x205, 3, 0xB, 0xf000},
+ { "TCP length cont", 0x0000, 0x0000, OP_EQ, 0, S1_PCKT, 0,
+ S1_PCKT, LD_HDR, 0x0ff, 3, 0x0, 0xffff},
+ { "Cleanup", 0x0000, 0x0000, OP_EQ, 0, S1_CLNP2, 0, S1_CLNP2,
+ IM_CTL, 0x001, 3, 0x0, 0x0001},
+ { "Cleanup 2", 0x0000, 0x0000, OP_EQ, 0, S1_PCKT, 0, S1_PCKT,
+ IM_CTL, 0x000, 0, 0x0, 0x0000},
+ { "Drop packet", 0x0000, 0x0000, OP_EQ, 0, S1_PCKT, 0, S1_PCKT,
+ IM_CTL, 0x080, 3, 0x0, 0xffff},
+ { NULL },
+};
+#ifdef HP_IP46TCP4_DEFAULT
+#define CAS_HP_FIRMWARE cas_prog_ip46tcp4tab
+#endif
+#endif
+
+/*
+ * Alternate table load which excludes HTTP server traffic from reassembly.
+ * It is substantially similar to the basic table, with one extra state
+ * and a few extra compares. */
+#ifdef USE_HP_IP46TCP4NOHTTP
+static cas_hp_inst_t cas_prog_ip46tcp4nohttptab[] = {
+ CAS_PROG_IP46TCP4_PREAMBLE,
+ { "TCP seq", /* DADDR should point to dest port */
+ 0xFFFF, 0x0080, OP_EQ, 0, S2_HTTP, 0, S1_TCPFG, LD_SEQ,
+ 0x081, 3, 0x0, 0xffff} , /* Load TCP seq # */
+ { "TCP control flags", 0xFFFF, 0x8080, OP_EQ, 0, S2_HTTP, 0,
+ S1_TCPHL, ST_FLG, 0x145, 2, 0x0, 0x002f, }, /* Load TCP flags */
+ { "TCP length", 0x0000, 0x0000, OP_EQ, 0, S1_TCPHc, 0, S1_TCPHc,
+ LD_R1, 0x205, 3, 0xB, 0xf000},
+ { "TCP length cont", 0x0000, 0x0000, OP_EQ, 0, S1_PCKT, 0, S1_PCKT,
+ LD_HDR, 0x0ff, 3, 0x0, 0xffff},
+ { "Cleanup", 0x0000, 0x0000, OP_EQ, 0, S1_CLNP2, 0, S1_CLNP2,
+ IM_CTL, 0x001, 3, 0x0, 0x0001},
+ { "Cleanup 2", 0x0000, 0x0000, OP_EQ, 0, S1_PCKT, 0, S1_PCKT,
+ CL_REG, 0x002, 3, 0x0, 0x0000},
+ { "Drop packet", 0x0000, 0x0000, OP_EQ, 0, S1_PCKT, 0, S1_PCKT,
+ IM_CTL, 0x080, 3, 0x0, 0xffff},
+ { "No HTTP", 0x0000, 0x0000, OP_EQ, 0, S1_PCKT, 0, S1_PCKT,
+ IM_CTL, 0x044, 3, 0x0, 0xffff},
+ { NULL },
+};
+#ifdef HP_IP46TCP4NOHTTP_DEFAULT
+#define CAS_HP_FIRMWARE cas_prog_ip46tcp4nohttptab
+#endif
+#endif
+
+/* match step #s for IP4FRAG */
+#define S3_IPV6c 11
+#define S3_TCP64 12
+#define S3_TCPSQ 13
+#define S3_TCPFG 14
+#define S3_TCPHL 15
+#define S3_TCPHc 16
+#define S3_FRAG 17
+#define S3_FOFF 18
+#define S3_CLNP 19
+
+#ifdef USE_HP_IP4FRAG
+static cas_hp_inst_t cas_prog_ip4fragtab[] = {
+ { "packet arrival?", 0xffff, 0x0000, OP_NP, 6, S1_VLAN, 0, S1_PCKT,
+ CL_REG, 0x3ff, 1, 0x0, 0x0000},
+ { "VLAN?", 0xffff, 0x8100, OP_EQ, 1, S1_CFI, 0, S1_8023,
+ IM_CTL, 0x00a, 3, 0x0, 0xffff},
+ { "CFI?", 0x1000, 0x1000, OP_EQ, 0, S3_CLNP, 1, S1_8023,
+ CL_REG, 0x000, 0, 0x0, 0x0000},
+ { "8023?", 0xffff, 0x0600, OP_LT, 1, S1_LLC, 0, S1_IPV4,
+ CL_REG, 0x000, 0, 0x0, 0x0000},
+ { "LLC?", 0xffff, 0xaaaa, OP_EQ, 1, S1_LLCc, 0, S3_CLNP,
+ CL_REG, 0x000, 0, 0x0, 0x0000},
+ { "LLCc?",0xff00, 0x0300, OP_EQ, 2, S1_IPV4, 0, S3_CLNP,
+ CL_REG, 0x000, 0, 0x0, 0x0000},
+ { "IPV4?", 0xffff, 0x0800, OP_EQ, 1, S1_IPV4c, 0, S1_IPV6,
+ LD_SAP, 0x100, 3, 0x0, 0xffff},
+ { "IPV4 cont?", 0xff00, 0x4500, OP_EQ, 3, S1_IPV4F, 0, S3_CLNP,
+ LD_SUM, 0x00a, 1, 0x0, 0x0000},
+ { "IPV4 frag?", 0x3fff, 0x0000, OP_EQ, 1, S1_TCP44, 0, S3_FRAG,
+ LD_LEN, 0x03e, 3, 0x0, 0xffff},
+ { "TCP44?", 0x00ff, 0x0006, OP_EQ, 7, S3_TCPSQ, 0, S3_CLNP,
+ LD_FID, 0x182, 3, 0x0, 0xffff}, /* FID IP4&TCP src+dst */
+ { "IPV6?", 0xffff, 0x86dd, OP_EQ, 1, S3_IPV6c, 0, S3_CLNP,
+ LD_SUM, 0x015, 1, 0x0, 0x0000},
+ { "IPV6 cont?", 0xf000, 0x6000, OP_EQ, 3, S3_TCP64, 0, S3_CLNP,
+ LD_FID, 0x484, 1, 0x0, 0xffff}, /* FID IP6&TCP src+dst */
+ { "TCP64?", 0xff00, 0x0600, OP_EQ, 18, S3_TCPSQ, 0, S3_CLNP,
+ LD_LEN, 0x03f, 1, 0x0, 0xffff},
+ { "TCP seq", /* DADDR should point to dest port */
+ 0x0000, 0x0000, OP_EQ, 0, S3_TCPFG, 4, S3_TCPFG, LD_SEQ,
+ 0x081, 3, 0x0, 0xffff}, /* Load TCP seq # */
+ { "TCP control flags", 0x0000, 0x0000, OP_EQ, 0, S3_TCPHL, 0,
+ S3_TCPHL, ST_FLG, 0x045, 3, 0x0, 0x002f}, /* Load TCP flags */
+ { "TCP length", 0x0000, 0x0000, OP_EQ, 0, S3_TCPHc, 0, S3_TCPHc,
+ LD_R1, 0x205, 3, 0xB, 0xf000},
+ { "TCP length cont", 0x0000, 0x0000, OP_EQ, 0, S1_PCKT, 0, S1_PCKT,
+ LD_HDR, 0x0ff, 3, 0x0, 0xffff},
+ { "IP4 Fragment", 0x0000, 0x0000, OP_EQ, 0, S3_FOFF, 0, S3_FOFF,
+ LD_FID, 0x103, 3, 0x0, 0xffff}, /* FID IP4 src+dst */
+ { "IP4 frag offset", 0x0000, 0x0000, OP_EQ, 0, S3_FOFF, 0, S3_FOFF,
+ LD_SEQ, 0x040, 1, 0xD, 0xfff8},
+ { "Cleanup", 0x0000, 0x0000, OP_EQ, 0, S1_PCKT, 0, S1_PCKT,
+ IM_CTL, 0x001, 3, 0x0, 0x0001},
+ { NULL },
+};
+#ifdef HP_IP4FRAG_DEFAULT
+#define CAS_HP_FIRMWARE cas_prog_ip4fragtab
+#endif
+#endif
+
+/*
+ * Alternate table which does batching without reassembly
+ */
+#ifdef USE_HP_IP46TCP4BATCH
+static cas_hp_inst_t cas_prog_ip46tcp4batchtab[] = {
+ CAS_PROG_IP46TCP4_PREAMBLE,
+ { "TCP seq", /* DADDR should point to dest port */
+ 0x0000, 0x0000, OP_EQ, 0, S1_TCPFG, 0, S1_TCPFG, LD_SEQ,
+ 0x081, 3, 0x0, 0xffff}, /* Load TCP seq # */
+ { "TCP control flags", 0x0000, 0x0000, OP_EQ, 0, S1_TCPHL, 0,
+ S1_TCPHL, ST_FLG, 0x000, 3, 0x0, 0x0000}, /* Load TCP flags */
+ { "TCP length", 0x0000, 0x0000, OP_EQ, 0, S1_TCPHc, 0,
+ S1_TCPHc, LD_R1, 0x205, 3, 0xB, 0xf000},
+ { "TCP length cont", 0x0000, 0x0000, OP_EQ, 0, S1_PCKT, 0,
+ S1_PCKT, IM_CTL, 0x040, 3, 0x0, 0xffff}, /* set batch bit */
+ { "Cleanup", 0x0000, 0x0000, OP_EQ, 0, S1_PCKT, 0, S1_PCKT,
+ IM_CTL, 0x001, 3, 0x0, 0x0001},
+ { "Drop packet", 0x0000, 0x0000, OP_EQ, 0, S1_PCKT, 0,
+ S1_PCKT, IM_CTL, 0x080, 3, 0x0, 0xffff},
+ { NULL },
+};
+#ifdef HP_IP46TCP4BATCH_DEFAULT
+#define CAS_HP_FIRMWARE cas_prog_ip46tcp4batchtab
+#endif
+#endif
+
+/* Workaround for Cassini rev2 descriptor corruption problem.
+ * Does batching without reassembly, and sets the SAP to a known
+ * data pattern for all packets.
+ */
+#ifdef USE_HP_WORKAROUND
+static cas_hp_inst_t cas_prog_workaroundtab[] = {
+ { "packet arrival?", 0xffff, 0x0000, OP_NP, 6, S1_VLAN, 0,
+ S1_PCKT, CL_REG, 0x3ff, 1, 0x0, 0x0000} ,
+ { "VLAN?", 0xffff, 0x8100, OP_EQ, 1, S1_CFI, 0, S1_8023,
+ IM_CTL, 0x04a, 3, 0x0, 0xffff},
+ { "CFI?", 0x1000, 0x1000, OP_EQ, 0, S1_CLNP, 1, S1_8023,
+ CL_REG, 0x000, 0, 0x0, 0x0000},
+ { "8023?", 0xffff, 0x0600, OP_LT, 1, S1_LLC, 0, S1_IPV4,
+ CL_REG, 0x000, 0, 0x0, 0x0000},
+ { "LLC?", 0xffff, 0xaaaa, OP_EQ, 1, S1_LLCc, 0, S1_CLNP,
+ CL_REG, 0x000, 0, 0x0, 0x0000},
+ { "LLCc?", 0xff00, 0x0300, OP_EQ, 2, S1_IPV4, 0, S1_CLNP,
+ CL_REG, 0x000, 0, 0x0, 0x0000},
+ { "IPV4?", 0xffff, 0x0800, OP_EQ, 1, S1_IPV4c, 0, S1_IPV6,
+ IM_SAP, 0x6AE, 3, 0x0, 0xffff},
+ { "IPV4 cont?", 0xff00, 0x4500, OP_EQ, 3, S1_IPV4F, 0, S1_CLNP,
+ LD_SUM, 0x00a, 1, 0x0, 0x0000},
+ { "IPV4 frag?", 0x3fff, 0x0000, OP_EQ, 1, S1_TCP44, 0, S1_CLNP,
+ LD_LEN, 0x03e, 1, 0x0, 0xffff},
+ { "TCP44?", 0x00ff, 0x0006, OP_EQ, 7, S1_TCPSQ, 0, S1_CLNP,
+ LD_FID, 0x182, 3, 0x0, 0xffff}, /* FID IP4&TCP src+dst */
+ { "IPV6?", 0xffff, 0x86dd, OP_EQ, 1, S1_IPV6L, 0, S1_CLNP,
+ LD_SUM, 0x015, 1, 0x0, 0x0000},
+ { "IPV6 len", 0xf000, 0x6000, OP_EQ, 0, S1_IPV6c, 0, S1_CLNP,
+ IM_R1, 0x128, 1, 0x0, 0xffff},
+ { "IPV6 cont?", 0x0000, 0x0000, OP_EQ, 3, S1_TCP64, 0, S1_CLNP,
+ LD_FID, 0x484, 1, 0x0, 0xffff}, /* FID IP6&TCP src+dst */
+ { "TCP64?", 0xff00, 0x0600, OP_EQ, 18, S1_TCPSQ, 0, S1_CLNP,
+ LD_LEN, 0x03f, 1, 0x0, 0xffff},
+ { "TCP seq", /* DADDR should point to dest port */
+ 0x0000, 0x0000, OP_EQ, 0, S1_TCPFG, 4, S1_TCPFG, LD_SEQ,
+ 0x081, 3, 0x0, 0xffff}, /* Load TCP seq # */
+ { "TCP control flags", 0x0000, 0x0000, OP_EQ, 0, S1_TCPHL, 0,
+ S1_TCPHL, ST_FLG, 0x045, 3, 0x0, 0x002f}, /* Load TCP flags */
+ { "TCP length", 0x0000, 0x0000, OP_EQ, 0, S1_TCPHc, 0, S1_TCPHc,
+ LD_R1, 0x205, 3, 0xB, 0xf000},
+ { "TCP length cont", 0x0000, 0x0000, OP_EQ, 0, S1_PCKT, 0,
+ S1_PCKT, LD_HDR, 0x0ff, 3, 0x0, 0xffff},
+ { "Cleanup", 0x0000, 0x0000, OP_EQ, 0, S1_CLNP2, 0, S1_CLNP2,
+ IM_SAP, 0x6AE, 3, 0x0, 0xffff} ,
+ { "Cleanup 2", 0x0000, 0x0000, OP_EQ, 0, S1_PCKT, 0, S1_PCKT,
+ IM_CTL, 0x001, 3, 0x0, 0x0001},
+ { NULL },
+};
+#ifdef HP_WORKAROUND_DEFAULT
+#define CAS_HP_FIRMWARE cas_prog_workaroundtab
+#endif
+#endif
+
+#ifdef USE_HP_ENCRYPT
+static cas_hp_inst_t cas_prog_encryptiontab[] = {
+ { "packet arrival?", 0xffff, 0x0000, OP_NP, 6, S1_VLAN, 0,
+ S1_PCKT, CL_REG, 0x3ff, 1, 0x0, 0x0000},
+ { "VLAN?", 0xffff, 0x8100, OP_EQ, 1, S1_CFI, 0, S1_8023,
+ IM_CTL, 0x00a, 3, 0x0, 0xffff},
+#if 0
+//"CFI?", /* 02 FIND CFI and If FIND go to S1_DROP */
+//0x1000, 0x1000, OP_EQ, 0, S1_DROP, 1, S1_8023, CL_REG, 0x000, 0, 0x0, 0x00
+ 00,
+#endif
+ { "CFI?", /* FIND CFI and If FIND go to CleanUP1 (ignore and send to host) */
+ 0x1000, 0x1000, OP_EQ, 0, S1_CLNP, 1, S1_8023,
+ CL_REG, 0x000, 0, 0x0, 0x0000},
+ { "8023?", 0xffff, 0x0600, OP_LT, 1, S1_LLC, 0, S1_IPV4,
+ CL_REG, 0x000, 0, 0x0, 0x0000},
+ { "LLC?", 0xffff, 0xaaaa, OP_EQ, 1, S1_LLCc, 0, S1_CLNP,
+ CL_REG, 0x000, 0, 0x0, 0x0000},
+ { "LLCc?", 0xff00, 0x0300, OP_EQ, 2, S1_IPV4, 0, S1_CLNP,
+ CL_REG, 0x000, 0, 0x0, 0x0000},
+ { "IPV4?", 0xffff, 0x0800, OP_EQ, 1, S1_IPV4c, 0, S1_IPV6,
+ LD_SAP, 0x100, 3, 0x0, 0xffff},
+ { "IPV4 cont?", 0xff00, 0x4500, OP_EQ, 3, S1_IPV4F, 0, S1_CLNP,
+ LD_SUM, 0x00a, 1, 0x0, 0x0000},
+ { "IPV4 frag?", 0x3fff, 0x0000, OP_EQ, 1, S1_TCP44, 0, S1_CLNP,
+ LD_LEN, 0x03e, 1, 0x0, 0xffff},
+ { "TCP44?", 0x00ff, 0x0006, OP_EQ, 7, S1_TCPSQ, 0, S1_ESP4,
+ LD_FID, 0x182, 1, 0x0, 0xffff}, /* FID IP4&TCP src+dst */
+ { "IPV6?", 0xffff, 0x86dd, OP_EQ, 1, S1_IPV6L, 0, S1_CLNP,
+ LD_SUM, 0x015, 1, 0x0, 0x0000},
+ { "IPV6 len", 0xf000, 0x6000, OP_EQ, 0, S1_IPV6c, 0, S1_CLNP,
+ IM_R1, 0x128, 1, 0x0, 0xffff},
+ { "IPV6 cont?", 0x0000, 0x0000, OP_EQ, 3, S1_TCP64, 0, S1_CLNP,
+ LD_FID, 0x484, 1, 0x0, 0xffff}, /* FID IP6&TCP src+dst */
+ { "TCP64?",
+#if 0
+//@@@0xff00, 0x0600, OP_EQ, 18, S1_TCPSQ, 0, S1_ESP6, LD_LEN, 0x03f, 1, 0x0, 0xffff,
+#endif
+ 0xff00, 0x0600, OP_EQ, 12, S1_TCPSQ, 0, S1_ESP6, LD_LEN,
+ 0x03f, 1, 0x0, 0xffff},
+ { "TCP seq", /* 14:DADDR should point to dest port */
+ 0xFFFF, 0x0080, OP_EQ, 0, S2_HTTP, 0, S1_TCPFG, LD_SEQ,
+ 0x081, 3, 0x0, 0xffff}, /* Load TCP seq # */
+ { "TCP control flags", 0xFFFF, 0x8080, OP_EQ, 0, S2_HTTP, 0,
+ S1_TCPHL, ST_FLG, 0x145, 2, 0x0, 0x002f}, /* Load TCP flags */
+ { "TCP length", 0x0000, 0x0000, OP_EQ, 0, S1_TCPHc, 0, S1_TCPHc,
+ LD_R1, 0x205, 3, 0xB, 0xf000} ,
+ { "TCP length cont", 0x0000, 0x0000, OP_EQ, 0, S1_PCKT, 0,
+ S1_PCKT, LD_HDR, 0x0ff, 3, 0x0, 0xffff},
+ { "Cleanup", 0x0000, 0x0000, OP_EQ, 0, S1_CLNP2, 0, S1_CLNP2,
+ IM_CTL, 0x001, 3, 0x0, 0x0001},
+ { "Cleanup 2", 0x0000, 0x0000, OP_EQ, 0, S1_PCKT, 0, S1_PCKT,
+ CL_REG, 0x002, 3, 0x0, 0x0000},
+ { "Drop packet", 0x0000, 0x0000, OP_EQ, 0, S1_PCKT, 0, S1_PCKT,
+ IM_CTL, 0x080, 3, 0x0, 0xffff},
+ { "No HTTP", 0x0000, 0x0000, OP_EQ, 0, S1_PCKT, 0, S1_PCKT,
+ IM_CTL, 0x044, 3, 0x0, 0xffff},
+ { "IPV4 ESP encrypted?", /* S1_ESP4 */
+ 0x00ff, 0x0032, OP_EQ, 0, S1_CLNP2, 0, S1_AH4, IM_CTL,
+ 0x021, 1, 0x0, 0xffff},
+ { "IPV4 AH encrypted?", /* S1_AH4 */
+ 0x00ff, 0x0033, OP_EQ, 0, S1_CLNP2, 0, S1_CLNP, IM_CTL,
+ 0x021, 1, 0x0, 0xffff},
+ { "IPV6 ESP encrypted?", /* S1_ESP6 */
+#if 0
+//@@@0x00ff, 0x0032, OP_EQ, 0, S1_CLNP2, 0, S1_AH6, IM_CTL, 0x021, 1, 0x0, 0xffff,
+#endif
+ 0xff00, 0x3200, OP_EQ, 0, S1_CLNP2, 0, S1_AH6, IM_CTL,
+ 0x021, 1, 0x0, 0xffff},
+ { "IPV6 AH encrypted?", /* S1_AH6 */
+#if 0
+//@@@0x00ff, 0x0033, OP_EQ, 0, S1_CLNP2, 0, S1_CLNP, IM_CTL, 0x021, 1, 0x0, 0xffff,
+#endif
+ 0xff00, 0x3300, OP_EQ, 0, S1_CLNP2, 0, S1_CLNP, IM_CTL,
+ 0x021, 1, 0x0, 0xffff},
+ { NULL },
+};
+#ifdef HP_ENCRYPT_DEFAULT
+#define CAS_HP_FIRMWARE cas_prog_encryptiontab
+#endif
+#endif
+
+static cas_hp_inst_t cas_prog_null[] = { {NULL} };
+#ifdef HP_NULL_DEFAULT
+#define CAS_HP_FIRMWARE cas_prog_null
+#endif
+
+/* firmware patch for NS_DP83065 */
+typedef struct cas_saturn_patch {
+ u16 addr;
+ u16 val;
+} cas_saturn_patch_t;
+
+#if 1
+cas_saturn_patch_t cas_saturn_patch[] = {
+{0x8200, 0x007e}, {0x8201, 0x0082}, {0x8202, 0x0009},
+{0x8203, 0x0000}, {0x8204, 0x0000}, {0x8205, 0x0000},
+{0x8206, 0x0000}, {0x8207, 0x0000}, {0x8208, 0x0000},
+{0x8209, 0x008e}, {0x820a, 0x008e}, {0x820b, 0x00ff},
+{0x820c, 0x00ce}, {0x820d, 0x0082}, {0x820e, 0x0025},
+{0x820f, 0x00ff}, {0x8210, 0x0001}, {0x8211, 0x000f},
+{0x8212, 0x00ce}, {0x8213, 0x0084}, {0x8214, 0x0026},
+{0x8215, 0x00ff}, {0x8216, 0x0001}, {0x8217, 0x0011},
+{0x8218, 0x00ce}, {0x8219, 0x0085}, {0x821a, 0x003d},
+{0x821b, 0x00df}, {0x821c, 0x00e5}, {0x821d, 0x0086},
+{0x821e, 0x0039}, {0x821f, 0x00b7}, {0x8220, 0x008f},
+{0x8221, 0x00f8}, {0x8222, 0x007e}, {0x8223, 0x00c3},
+{0x8224, 0x00c2}, {0x8225, 0x0096}, {0x8226, 0x0047},
+{0x8227, 0x0084}, {0x8228, 0x00f3}, {0x8229, 0x008a},
+{0x822a, 0x0000}, {0x822b, 0x0097}, {0x822c, 0x0047},
+{0x822d, 0x00ce}, {0x822e, 0x0082}, {0x822f, 0x0033},
+{0x8230, 0x00ff}, {0x8231, 0x0001}, {0x8232, 0x000f},
+{0x8233, 0x0096}, {0x8234, 0x0046}, {0x8235, 0x0084},
+{0x8236, 0x000c}, {0x8237, 0x0081}, {0x8238, 0x0004},
+{0x8239, 0x0027}, {0x823a, 0x000b}, {0x823b, 0x0096},
+{0x823c, 0x0046}, {0x823d, 0x0084}, {0x823e, 0x000c},
+{0x823f, 0x0081}, {0x8240, 0x0008}, {0x8241, 0x0027},
+{0x8242, 0x0057}, {0x8243, 0x007e}, {0x8244, 0x0084},
+{0x8245, 0x0025}, {0x8246, 0x0096}, {0x8247, 0x0047},
+{0x8248, 0x0084}, {0x8249, 0x00f3}, {0x824a, 0x008a},
+{0x824b, 0x0004}, {0x824c, 0x0097}, {0x824d, 0x0047},
+{0x824e, 0x00ce}, {0x824f, 0x0082}, {0x8250, 0x0054},
+{0x8251, 0x00ff}, {0x8252, 0x0001}, {0x8253, 0x000f},
+{0x8254, 0x0096}, {0x8255, 0x0046}, {0x8256, 0x0084},
+{0x8257, 0x000c}, {0x8258, 0x0081}, {0x8259, 0x0004},
+{0x825a, 0x0026}, {0x825b, 0x0038}, {0x825c, 0x00b6},
+{0x825d, 0x0012}, {0x825e, 0x0020}, {0x825f, 0x0084},
+{0x8260, 0x0020}, {0x8261, 0x0026}, {0x8262, 0x0003},
+{0x8263, 0x007e}, {0x8264, 0x0084}, {0x8265, 0x0025},
+{0x8266, 0x0096}, {0x8267, 0x007b}, {0x8268, 0x00d6},
+{0x8269, 0x007c}, {0x826a, 0x00fe}, {0x826b, 0x008f},
+{0x826c, 0x0056}, {0x826d, 0x00bd}, {0x826e, 0x00f7},
+{0x826f, 0x00b6}, {0x8270, 0x00fe}, {0x8271, 0x008f},
+{0x8272, 0x004e}, {0x8273, 0x00bd}, {0x8274, 0x00ec},
+{0x8275, 0x008e}, {0x8276, 0x00bd}, {0x8277, 0x00fa},
+{0x8278, 0x00f7}, {0x8279, 0x00bd}, {0x827a, 0x00f7},
+{0x827b, 0x0028}, {0x827c, 0x00ce}, {0x827d, 0x0082},
+{0x827e, 0x0082}, {0x827f, 0x00ff}, {0x8280, 0x0001},
+{0x8281, 0x000f}, {0x8282, 0x0096}, {0x8283, 0x0046},
+{0x8284, 0x0084}, {0x8285, 0x000c}, {0x8286, 0x0081},
+{0x8287, 0x0004}, {0x8288, 0x0026}, {0x8289, 0x000a},
+{0x828a, 0x00b6}, {0x828b, 0x0012}, {0x828c, 0x0020},
+{0x828d, 0x0084}, {0x828e, 0x0020}, {0x828f, 0x0027},
+{0x8290, 0x00b5}, {0x8291, 0x007e}, {0x8292, 0x0084},
+{0x8293, 0x0025}, {0x8294, 0x00bd}, {0x8295, 0x00f7},
+{0x8296, 0x001f}, {0x8297, 0x007e}, {0x8298, 0x0084},
+{0x8299, 0x001f}, {0x829a, 0x0096}, {0x829b, 0x0047},
+{0x829c, 0x0084}, {0x829d, 0x00f3}, {0x829e, 0x008a},
+{0x829f, 0x0008}, {0x82a0, 0x0097}, {0x82a1, 0x0047},
+{0x82a2, 0x00de}, {0x82a3, 0x00e1}, {0x82a4, 0x00ad},
+{0x82a5, 0x0000}, {0x82a6, 0x00ce}, {0x82a7, 0x0082},
+{0x82a8, 0x00af}, {0x82a9, 0x00ff}, {0x82aa, 0x0001},
+{0x82ab, 0x000f}, {0x82ac, 0x007e}, {0x82ad, 0x0084},
+{0x82ae, 0x0025}, {0x82af, 0x0096}, {0x82b0, 0x0041},
+{0x82b1, 0x0085}, {0x82b2, 0x0010}, {0x82b3, 0x0026},
+{0x82b4, 0x0006}, {0x82b5, 0x0096}, {0x82b6, 0x0023},
+{0x82b7, 0x0085}, {0x82b8, 0x0040}, {0x82b9, 0x0027},
+{0x82ba, 0x0006}, {0x82bb, 0x00bd}, {0x82bc, 0x00ed},
+{0x82bd, 0x0000}, {0x82be, 0x007e}, {0x82bf, 0x0083},
+{0x82c0, 0x00a2}, {0x82c1, 0x00de}, {0x82c2, 0x0042},
+{0x82c3, 0x00bd}, {0x82c4, 0x00eb}, {0x82c5, 0x008e},
+{0x82c6, 0x0096}, {0x82c7, 0x0024}, {0x82c8, 0x0084},
+{0x82c9, 0x0008}, {0x82ca, 0x0027}, {0x82cb, 0x0003},
+{0x82cc, 0x007e}, {0x82cd, 0x0083}, {0x82ce, 0x00df},
+{0x82cf, 0x0096}, {0x82d0, 0x007b}, {0x82d1, 0x00d6},
+{0x82d2, 0x007c}, {0x82d3, 0x00fe}, {0x82d4, 0x008f},
+{0x82d5, 0x0056}, {0x82d6, 0x00bd}, {0x82d7, 0x00f7},
+{0x82d8, 0x00b6}, {0x82d9, 0x00fe}, {0x82da, 0x008f},
+{0x82db, 0x0050}, {0x82dc, 0x00bd}, {0x82dd, 0x00ec},
+{0x82de, 0x008e}, {0x82df, 0x00bd}, {0x82e0, 0x00fa},
+{0x82e1, 0x00f7}, {0x82e2, 0x0086}, {0x82e3, 0x0011},
+{0x82e4, 0x00c6}, {0x82e5, 0x0049}, {0x82e6, 0x00bd},
+{0x82e7, 0x00e4}, {0x82e8, 0x0012}, {0x82e9, 0x00ce},
+{0x82ea, 0x0082}, {0x82eb, 0x00ef}, {0x82ec, 0x00ff},
+{0x82ed, 0x0001}, {0x82ee, 0x000f}, {0x82ef, 0x0096},
+{0x82f0, 0x0046}, {0x82f1, 0x0084}, {0x82f2, 0x000c},
+{0x82f3, 0x0081}, {0x82f4, 0x0000}, {0x82f5, 0x0027},
+{0x82f6, 0x0017}, {0x82f7, 0x00c6}, {0x82f8, 0x0049},
+{0x82f9, 0x00bd}, {0x82fa, 0x00e4}, {0x82fb, 0x0091},
+{0x82fc, 0x0024}, {0x82fd, 0x000d}, {0x82fe, 0x00b6},
+{0x82ff, 0x0012}, {0x8300, 0x0020}, {0x8301, 0x0085},
+{0x8302, 0x0020}, {0x8303, 0x0026}, {0x8304, 0x000c},
+{0x8305, 0x00ce}, {0x8306, 0x0082}, {0x8307, 0x00c1},
+{0x8308, 0x00ff}, {0x8309, 0x0001}, {0x830a, 0x000f},
+{0x830b, 0x007e}, {0x830c, 0x0084}, {0x830d, 0x0025},
+{0x830e, 0x007e}, {0x830f, 0x0084}, {0x8310, 0x0016},
+{0x8311, 0x00fe}, {0x8312, 0x008f}, {0x8313, 0x0052},
+{0x8314, 0x00bd}, {0x8315, 0x00ec}, {0x8316, 0x008e},
+{0x8317, 0x00bd}, {0x8318, 0x00fa}, {0x8319, 0x00f7},
+{0x831a, 0x0086}, {0x831b, 0x006a}, {0x831c, 0x00c6},
+{0x831d, 0x0049}, {0x831e, 0x00bd}, {0x831f, 0x00e4},
+{0x8320, 0x0012}, {0x8321, 0x00ce}, {0x8322, 0x0083},
+{0x8323, 0x0027}, {0x8324, 0x00ff}, {0x8325, 0x0001},
+{0x8326, 0x000f}, {0x8327, 0x0096}, {0x8328, 0x0046},
+{0x8329, 0x0084}, {0x832a, 0x000c}, {0x832b, 0x0081},
+{0x832c, 0x0000}, {0x832d, 0x0027}, {0x832e, 0x000a},
+{0x832f, 0x00c6}, {0x8330, 0x0049}, {0x8331, 0x00bd},
+{0x8332, 0x00e4}, {0x8333, 0x0091}, {0x8334, 0x0025},
+{0x8335, 0x0006}, {0x8336, 0x007e}, {0x8337, 0x0084},
+{0x8338, 0x0025}, {0x8339, 0x007e}, {0x833a, 0x0084},
+{0x833b, 0x0016}, {0x833c, 0x00b6}, {0x833d, 0x0018},
+{0x833e, 0x0070}, {0x833f, 0x00bb}, {0x8340, 0x0019},
+{0x8341, 0x0070}, {0x8342, 0x002a}, {0x8343, 0x0004},
+{0x8344, 0x0081}, {0x8345, 0x00af}, {0x8346, 0x002e},
+{0x8347, 0x0019}, {0x8348, 0x0096}, {0x8349, 0x007b},
+{0x834a, 0x00f6}, {0x834b, 0x0020}, {0x834c, 0x0007},
+{0x834d, 0x00fa}, {0x834e, 0x0020}, {0x834f, 0x0027},
+{0x8350, 0x00c4}, {0x8351, 0x0038}, {0x8352, 0x0081},
+{0x8353, 0x0038}, {0x8354, 0x0027}, {0x8355, 0x000b},
+{0x8356, 0x00f6}, {0x8357, 0x0020}, {0x8358, 0x0007},
+{0x8359, 0x00fa}, {0x835a, 0x0020}, {0x835b, 0x0027},
+{0x835c, 0x00cb}, {0x835d, 0x0008}, {0x835e, 0x007e},
+{0x835f, 0x0082}, {0x8360, 0x00d3}, {0x8361, 0x00bd},
+{0x8362, 0x00f7}, {0x8363, 0x0066}, {0x8364, 0x0086},
+{0x8365, 0x0074}, {0x8366, 0x00c6}, {0x8367, 0x0049},
+{0x8368, 0x00bd}, {0x8369, 0x00e4}, {0x836a, 0x0012},
+{0x836b, 0x00ce}, {0x836c, 0x0083}, {0x836d, 0x0071},
+{0x836e, 0x00ff}, {0x836f, 0x0001}, {0x8370, 0x000f},
+{0x8371, 0x0096}, {0x8372, 0x0046}, {0x8373, 0x0084},
+{0x8374, 0x000c}, {0x8375, 0x0081}, {0x8376, 0x0008},
+{0x8377, 0x0026}, {0x8378, 0x000a}, {0x8379, 0x00c6},
+{0x837a, 0x0049}, {0x837b, 0x00bd}, {0x837c, 0x00e4},
+{0x837d, 0x0091}, {0x837e, 0x0025}, {0x837f, 0x0006},
+{0x8380, 0x007e}, {0x8381, 0x0084}, {0x8382, 0x0025},
+{0x8383, 0x007e}, {0x8384, 0x0084}, {0x8385, 0x0016},
+{0x8386, 0x00bd}, {0x8387, 0x00f7}, {0x8388, 0x003e},
+{0x8389, 0x0026}, {0x838a, 0x000e}, {0x838b, 0x00bd},
+{0x838c, 0x00e5}, {0x838d, 0x0009}, {0x838e, 0x0026},
+{0x838f, 0x0006}, {0x8390, 0x00ce}, {0x8391, 0x0082},
+{0x8392, 0x00c1}, {0x8393, 0x00ff}, {0x8394, 0x0001},
+{0x8395, 0x000f}, {0x8396, 0x007e}, {0x8397, 0x0084},
+{0x8398, 0x0025}, {0x8399, 0x00fe}, {0x839a, 0x008f},
+{0x839b, 0x0054}, {0x839c, 0x00bd}, {0x839d, 0x00ec},
+{0x839e, 0x008e}, {0x839f, 0x00bd}, {0x83a0, 0x00fa},
+{0x83a1, 0x00f7}, {0x83a2, 0x00bd}, {0x83a3, 0x00f7},
+{0x83a4, 0x0033}, {0x83a5, 0x0086}, {0x83a6, 0x000f},
+{0x83a7, 0x00c6}, {0x83a8, 0x0051}, {0x83a9, 0x00bd},
+{0x83aa, 0x00e4}, {0x83ab, 0x0012}, {0x83ac, 0x00ce},
+{0x83ad, 0x0083}, {0x83ae, 0x00b2}, {0x83af, 0x00ff},
+{0x83b0, 0x0001}, {0x83b1, 0x000f}, {0x83b2, 0x0096},
+{0x83b3, 0x0046}, {0x83b4, 0x0084}, {0x83b5, 0x000c},
+{0x83b6, 0x0081}, {0x83b7, 0x0008}, {0x83b8, 0x0026},
+{0x83b9, 0x005c}, {0x83ba, 0x00b6}, {0x83bb, 0x0012},
+{0x83bc, 0x0020}, {0x83bd, 0x0084}, {0x83be, 0x003f},
+{0x83bf, 0x0081}, {0x83c0, 0x003a}, {0x83c1, 0x0027},
+{0x83c2, 0x001c}, {0x83c3, 0x0096}, {0x83c4, 0x0023},
+{0x83c5, 0x0085}, {0x83c6, 0x0040}, {0x83c7, 0x0027},
+{0x83c8, 0x0003}, {0x83c9, 0x007e}, {0x83ca, 0x0084},
+{0x83cb, 0x0025}, {0x83cc, 0x00c6}, {0x83cd, 0x0051},
+{0x83ce, 0x00bd}, {0x83cf, 0x00e4}, {0x83d0, 0x0091},
+{0x83d1, 0x0025}, {0x83d2, 0x0003}, {0x83d3, 0x007e},
+{0x83d4, 0x0084}, {0x83d5, 0x0025}, {0x83d6, 0x00ce},
+{0x83d7, 0x0082}, {0x83d8, 0x00c1}, {0x83d9, 0x00ff},
+{0x83da, 0x0001}, {0x83db, 0x000f}, {0x83dc, 0x007e},
+{0x83dd, 0x0084}, {0x83de, 0x0025}, {0x83df, 0x00bd},
+{0x83e0, 0x00f8}, {0x83e1, 0x0037}, {0x83e2, 0x007c},
+{0x83e3, 0x0000}, {0x83e4, 0x007a}, {0x83e5, 0x00ce},
+{0x83e6, 0x0083}, {0x83e7, 0x00ee}, {0x83e8, 0x00ff},
+{0x83e9, 0x0001}, {0x83ea, 0x000f}, {0x83eb, 0x007e},
+{0x83ec, 0x0084}, {0x83ed, 0x0025}, {0x83ee, 0x0096},
+{0x83ef, 0x0046}, {0x83f0, 0x0084}, {0x83f1, 0x000c},
+{0x83f2, 0x0081}, {0x83f3, 0x0008}, {0x83f4, 0x0026},
+{0x83f5, 0x0020}, {0x83f6, 0x0096}, {0x83f7, 0x0024},
+{0x83f8, 0x0084}, {0x83f9, 0x0008}, {0x83fa, 0x0026},
+{0x83fb, 0x0029}, {0x83fc, 0x00b6}, {0x83fd, 0x0018},
+{0x83fe, 0x0082}, {0x83ff, 0x00bb}, {0x8400, 0x0019},
+{0x8401, 0x0082}, {0x8402, 0x00b1}, {0x8403, 0x0001},
+{0x8404, 0x003b}, {0x8405, 0x0022}, {0x8406, 0x0009},
+{0x8407, 0x00b6}, {0x8408, 0x0012}, {0x8409, 0x0020},
+{0x840a, 0x0084}, {0x840b, 0x0037}, {0x840c, 0x0081},
+{0x840d, 0x0032}, {0x840e, 0x0027}, {0x840f, 0x0015},
+{0x8410, 0x00bd}, {0x8411, 0x00f8}, {0x8412, 0x0044},
+{0x8413, 0x007e}, {0x8414, 0x0082}, {0x8415, 0x00c1},
+{0x8416, 0x00bd}, {0x8417, 0x00f7}, {0x8418, 0x001f},
+{0x8419, 0x00bd}, {0x841a, 0x00f8}, {0x841b, 0x0044},
+{0x841c, 0x00bd}, {0x841d, 0x00fc}, {0x841e, 0x0029},
+{0x841f, 0x00ce}, {0x8420, 0x0082}, {0x8421, 0x0025},
+{0x8422, 0x00ff}, {0x8423, 0x0001}, {0x8424, 0x000f},
+{0x8425, 0x0039}, {0x8426, 0x0096}, {0x8427, 0x0047},
+{0x8428, 0x0084}, {0x8429, 0x00fc}, {0x842a, 0x008a},
+{0x842b, 0x0000}, {0x842c, 0x0097}, {0x842d, 0x0047},
+{0x842e, 0x00ce}, {0x842f, 0x0084}, {0x8430, 0x0034},
+{0x8431, 0x00ff}, {0x8432, 0x0001}, {0x8433, 0x0011},
+{0x8434, 0x0096}, {0x8435, 0x0046}, {0x8436, 0x0084},
+{0x8437, 0x0003}, {0x8438, 0x0081}, {0x8439, 0x0002},
+{0x843a, 0x0027}, {0x843b, 0x0003}, {0x843c, 0x007e},
+{0x843d, 0x0085}, {0x843e, 0x001e}, {0x843f, 0x0096},
+{0x8440, 0x0047}, {0x8441, 0x0084}, {0x8442, 0x00fc},
+{0x8443, 0x008a}, {0x8444, 0x0002}, {0x8445, 0x0097},
+{0x8446, 0x0047}, {0x8447, 0x00de}, {0x8448, 0x00e1},
+{0x8449, 0x00ad}, {0x844a, 0x0000}, {0x844b, 0x0086},
+{0x844c, 0x0001}, {0x844d, 0x00b7}, {0x844e, 0x0012},
+{0x844f, 0x0051}, {0x8450, 0x00bd}, {0x8451, 0x00f7},
+{0x8452, 0x0014}, {0x8453, 0x00b6}, {0x8454, 0x0010},
+{0x8455, 0x0031}, {0x8456, 0x0084}, {0x8457, 0x00fd},
+{0x8458, 0x00b7}, {0x8459, 0x0010}, {0x845a, 0x0031},
+{0x845b, 0x00bd}, {0x845c, 0x00f8}, {0x845d, 0x001e},
+{0x845e, 0x0096}, {0x845f, 0x0081}, {0x8460, 0x00d6},
+{0x8461, 0x0082}, {0x8462, 0x00fe}, {0x8463, 0x008f},
+{0x8464, 0x005a}, {0x8465, 0x00bd}, {0x8466, 0x00f7},
+{0x8467, 0x00b6}, {0x8468, 0x00fe}, {0x8469, 0x008f},
+{0x846a, 0x005c}, {0x846b, 0x00bd}, {0x846c, 0x00ec},
+{0x846d, 0x008e}, {0x846e, 0x00bd}, {0x846f, 0x00fa},
+{0x8470, 0x00f7}, {0x8471, 0x0086}, {0x8472, 0x0008},
+{0x8473, 0x00d6}, {0x8474, 0x0000}, {0x8475, 0x00c5},
+{0x8476, 0x0010}, {0x8477, 0x0026}, {0x8478, 0x0002},
+{0x8479, 0x008b}, {0x847a, 0x0020}, {0x847b, 0x00c6},
+{0x847c, 0x0051}, {0x847d, 0x00bd}, {0x847e, 0x00e4},
+{0x847f, 0x0012}, {0x8480, 0x00ce}, {0x8481, 0x0084},
+{0x8482, 0x0086}, {0x8483, 0x00ff}, {0x8484, 0x0001},
+{0x8485, 0x0011}, {0x8486, 0x0096}, {0x8487, 0x0046},
+{0x8488, 0x0084}, {0x8489, 0x0003}, {0x848a, 0x0081},
+{0x848b, 0x0002}, {0x848c, 0x0027}, {0x848d, 0x0003},
+{0x848e, 0x007e}, {0x848f, 0x0085}, {0x8490, 0x000f},
+{0x8491, 0x00c6}, {0x8492, 0x0051}, {0x8493, 0x00bd},
+{0x8494, 0x00e4}, {0x8495, 0x0091}, {0x8496, 0x0025},
+{0x8497, 0x0003}, {0x8498, 0x007e}, {0x8499, 0x0085},
+{0x849a, 0x001e}, {0x849b, 0x0096}, {0x849c, 0x0044},
+{0x849d, 0x0085}, {0x849e, 0x0010}, {0x849f, 0x0026},
+{0x84a0, 0x000a}, {0x84a1, 0x00b6}, {0x84a2, 0x0012},
+{0x84a3, 0x0050}, {0x84a4, 0x00ba}, {0x84a5, 0x0001},
+{0x84a6, 0x003c}, {0x84a7, 0x0085}, {0x84a8, 0x0010},
+{0x84a9, 0x0027}, {0x84aa, 0x00a8}, {0x84ab, 0x00bd},
+{0x84ac, 0x00f7}, {0x84ad, 0x0066}, {0x84ae, 0x00ce},
+{0x84af, 0x0084}, {0x84b0, 0x00b7}, {0x84b1, 0x00ff},
+{0x84b2, 0x0001}, {0x84b3, 0x0011}, {0x84b4, 0x007e},
+{0x84b5, 0x0085}, {0x84b6, 0x001e}, {0x84b7, 0x0096},
+{0x84b8, 0x0046}, {0x84b9, 0x0084}, {0x84ba, 0x0003},
+{0x84bb, 0x0081}, {0x84bc, 0x0002}, {0x84bd, 0x0026},
+{0x84be, 0x0050}, {0x84bf, 0x00b6}, {0x84c0, 0x0012},
+{0x84c1, 0x0030}, {0x84c2, 0x0084}, {0x84c3, 0x0003},
+{0x84c4, 0x0081}, {0x84c5, 0x0001}, {0x84c6, 0x0027},
+{0x84c7, 0x0003}, {0x84c8, 0x007e}, {0x84c9, 0x0085},
+{0x84ca, 0x001e}, {0x84cb, 0x0096}, {0x84cc, 0x0044},
+{0x84cd, 0x0085}, {0x84ce, 0x0010}, {0x84cf, 0x0026},
+{0x84d0, 0x0013}, {0x84d1, 0x00b6}, {0x84d2, 0x0012},
+{0x84d3, 0x0050}, {0x84d4, 0x00ba}, {0x84d5, 0x0001},
+{0x84d6, 0x003c}, {0x84d7, 0x0085}, {0x84d8, 0x0010},
+{0x84d9, 0x0026}, {0x84da, 0x0009}, {0x84db, 0x00ce},
+{0x84dc, 0x0084}, {0x84dd, 0x0053}, {0x84de, 0x00ff},
+{0x84df, 0x0001}, {0x84e0, 0x0011}, {0x84e1, 0x007e},
+{0x84e2, 0x0085}, {0x84e3, 0x001e}, {0x84e4, 0x00b6},
+{0x84e5, 0x0010}, {0x84e6, 0x0031}, {0x84e7, 0x008a},
+{0x84e8, 0x0002}, {0x84e9, 0x00b7}, {0x84ea, 0x0010},
+{0x84eb, 0x0031}, {0x84ec, 0x00bd}, {0x84ed, 0x0085},
+{0x84ee, 0x001f}, {0x84ef, 0x00bd}, {0x84f0, 0x00f8},
+{0x84f1, 0x0037}, {0x84f2, 0x007c}, {0x84f3, 0x0000},
+{0x84f4, 0x0080}, {0x84f5, 0x00ce}, {0x84f6, 0x0084},
+{0x84f7, 0x00fe}, {0x84f8, 0x00ff}, {0x84f9, 0x0001},
+{0x84fa, 0x0011}, {0x84fb, 0x007e}, {0x84fc, 0x0085},
+{0x84fd, 0x001e}, {0x84fe, 0x0096}, {0x84ff, 0x0046},
+{0x8500, 0x0084}, {0x8501, 0x0003}, {0x8502, 0x0081},
+{0x8503, 0x0002}, {0x8504, 0x0026}, {0x8505, 0x0009},
+{0x8506, 0x00b6}, {0x8507, 0x0012}, {0x8508, 0x0030},
+{0x8509, 0x0084}, {0x850a, 0x0003}, {0x850b, 0x0081},
+{0x850c, 0x0001}, {0x850d, 0x0027}, {0x850e, 0x000f},
+{0x850f, 0x00bd}, {0x8510, 0x00f8}, {0x8511, 0x0044},
+{0x8512, 0x00bd}, {0x8513, 0x00f7}, {0x8514, 0x000b},
+{0x8515, 0x00bd}, {0x8516, 0x00fc}, {0x8517, 0x0029},
+{0x8518, 0x00ce}, {0x8519, 0x0084}, {0x851a, 0x0026},
+{0x851b, 0x00ff}, {0x851c, 0x0001}, {0x851d, 0x0011},
+{0x851e, 0x0039}, {0x851f, 0x00d6}, {0x8520, 0x0022},
+{0x8521, 0x00c4}, {0x8522, 0x000f}, {0x8523, 0x00b6},
+{0x8524, 0x0012}, {0x8525, 0x0030}, {0x8526, 0x00ba},
+{0x8527, 0x0012}, {0x8528, 0x0032}, {0x8529, 0x0084},
+{0x852a, 0x0004}, {0x852b, 0x0027}, {0x852c, 0x000d},
+{0x852d, 0x0096}, {0x852e, 0x0022}, {0x852f, 0x0085},
+{0x8530, 0x0004}, {0x8531, 0x0027}, {0x8532, 0x0005},
+{0x8533, 0x00ca}, {0x8534, 0x0010}, {0x8535, 0x007e},
+{0x8536, 0x0085}, {0x8537, 0x003a}, {0x8538, 0x00ca},
+{0x8539, 0x0020}, {0x853a, 0x00d7}, {0x853b, 0x0022},
+{0x853c, 0x0039}, {0x853d, 0x0086}, {0x853e, 0x0000},
+{0x853f, 0x0097}, {0x8540, 0x0083}, {0x8541, 0x0018},
+{0x8542, 0x00ce}, {0x8543, 0x001c}, {0x8544, 0x0000},
+{0x8545, 0x00bd}, {0x8546, 0x00eb}, {0x8547, 0x0046},
+{0x8548, 0x0096}, {0x8549, 0x0057}, {0x854a, 0x0085},
+{0x854b, 0x0001}, {0x854c, 0x0027}, {0x854d, 0x0002},
+{0x854e, 0x004f}, {0x854f, 0x0039}, {0x8550, 0x0085},
+{0x8551, 0x0002}, {0x8552, 0x0027}, {0x8553, 0x0001},
+{0x8554, 0x0039}, {0x8555, 0x007f}, {0x8556, 0x008f},
+{0x8557, 0x007d}, {0x8558, 0x0086}, {0x8559, 0x0004},
+{0x855a, 0x00b7}, {0x855b, 0x0012}, {0x855c, 0x0004},
+{0x855d, 0x0086}, {0x855e, 0x0008}, {0x855f, 0x00b7},
+{0x8560, 0x0012}, {0x8561, 0x0007}, {0x8562, 0x0086},
+{0x8563, 0x0010}, {0x8564, 0x00b7}, {0x8565, 0x0012},
+{0x8566, 0x000c}, {0x8567, 0x0086}, {0x8568, 0x0007},
+{0x8569, 0x00b7}, {0x856a, 0x0012}, {0x856b, 0x0006},
+{0x856c, 0x00b6}, {0x856d, 0x008f}, {0x856e, 0x007d},
+{0x856f, 0x00b7}, {0x8570, 0x0012}, {0x8571, 0x0070},
+{0x8572, 0x0086}, {0x8573, 0x0001}, {0x8574, 0x00ba},
+{0x8575, 0x0012}, {0x8576, 0x0004}, {0x8577, 0x00b7},
+{0x8578, 0x0012}, {0x8579, 0x0004}, {0x857a, 0x0001},
+{0x857b, 0x0001}, {0x857c, 0x0001}, {0x857d, 0x0001},
+{0x857e, 0x0001}, {0x857f, 0x0001}, {0x8580, 0x00b6},
+{0x8581, 0x0012}, {0x8582, 0x0004}, {0x8583, 0x0084},
+{0x8584, 0x00fe}, {0x8585, 0x008a}, {0x8586, 0x0002},
+{0x8587, 0x00b7}, {0x8588, 0x0012}, {0x8589, 0x0004},
+{0x858a, 0x0001}, {0x858b, 0x0001}, {0x858c, 0x0001},
+{0x858d, 0x0001}, {0x858e, 0x0001}, {0x858f, 0x0001},
+{0x8590, 0x0086}, {0x8591, 0x00fd}, {0x8592, 0x00b4},
+{0x8593, 0x0012}, {0x8594, 0x0004}, {0x8595, 0x00b7},
+{0x8596, 0x0012}, {0x8597, 0x0004}, {0x8598, 0x00b6},
+{0x8599, 0x0012}, {0x859a, 0x0000}, {0x859b, 0x0084},
+{0x859c, 0x0008}, {0x859d, 0x0081}, {0x859e, 0x0008},
+{0x859f, 0x0027}, {0x85a0, 0x0016}, {0x85a1, 0x00b6},
+{0x85a2, 0x008f}, {0x85a3, 0x007d}, {0x85a4, 0x0081},
+{0x85a5, 0x000c}, {0x85a6, 0x0027}, {0x85a7, 0x0008},
+{0x85a8, 0x008b}, {0x85a9, 0x0004}, {0x85aa, 0x00b7},
+{0x85ab, 0x008f}, {0x85ac, 0x007d}, {0x85ad, 0x007e},
+{0x85ae, 0x0085}, {0x85af, 0x006c}, {0x85b0, 0x0086},
+{0x85b1, 0x0003}, {0x85b2, 0x0097}, {0x85b3, 0x0040},
+{0x85b4, 0x007e}, {0x85b5, 0x0089}, {0x85b6, 0x006e},
+{0x85b7, 0x0086}, {0x85b8, 0x0007}, {0x85b9, 0x00b7},
+{0x85ba, 0x0012}, {0x85bb, 0x0006}, {0x85bc, 0x005f},
+{0x85bd, 0x00f7}, {0x85be, 0x008f}, {0x85bf, 0x0082},
+{0x85c0, 0x005f}, {0x85c1, 0x00f7}, {0x85c2, 0x008f},
+{0x85c3, 0x007f}, {0x85c4, 0x00f7}, {0x85c5, 0x008f},
+{0x85c6, 0x0070}, {0x85c7, 0x00f7}, {0x85c8, 0x008f},
+{0x85c9, 0x0071}, {0x85ca, 0x00f7}, {0x85cb, 0x008f},
+{0x85cc, 0x0072}, {0x85cd, 0x00f7}, {0x85ce, 0x008f},
+{0x85cf, 0x0073}, {0x85d0, 0x00f7}, {0x85d1, 0x008f},
+{0x85d2, 0x0074}, {0x85d3, 0x00f7}, {0x85d4, 0x008f},
+{0x85d5, 0x0075}, {0x85d6, 0x00f7}, {0x85d7, 0x008f},
+{0x85d8, 0x0076}, {0x85d9, 0x00f7}, {0x85da, 0x008f},
+{0x85db, 0x0077}, {0x85dc, 0x00f7}, {0x85dd, 0x008f},
+{0x85de, 0x0078}, {0x85df, 0x00f7}, {0x85e0, 0x008f},
+{0x85e1, 0x0079}, {0x85e2, 0x00f7}, {0x85e3, 0x008f},
+{0x85e4, 0x007a}, {0x85e5, 0x00f7}, {0x85e6, 0x008f},
+{0x85e7, 0x007b}, {0x85e8, 0x00b6}, {0x85e9, 0x0012},
+{0x85ea, 0x0004}, {0x85eb, 0x008a}, {0x85ec, 0x0010},
+{0x85ed, 0x00b7}, {0x85ee, 0x0012}, {0x85ef, 0x0004},
+{0x85f0, 0x0086}, {0x85f1, 0x00e4}, {0x85f2, 0x00b7},
+{0x85f3, 0x0012}, {0x85f4, 0x0070}, {0x85f5, 0x00b7},
+{0x85f6, 0x0012}, {0x85f7, 0x0007}, {0x85f8, 0x00f7},
+{0x85f9, 0x0012}, {0x85fa, 0x0005}, {0x85fb, 0x00f7},
+{0x85fc, 0x0012}, {0x85fd, 0x0009}, {0x85fe, 0x0086},
+{0x85ff, 0x0008}, {0x8600, 0x00ba}, {0x8601, 0x0012},
+{0x8602, 0x0004}, {0x8603, 0x00b7}, {0x8604, 0x0012},
+{0x8605, 0x0004}, {0x8606, 0x0086}, {0x8607, 0x00f7},
+{0x8608, 0x00b4}, {0x8609, 0x0012}, {0x860a, 0x0004},
+{0x860b, 0x00b7}, {0x860c, 0x0012}, {0x860d, 0x0004},
+{0x860e, 0x0001}, {0x860f, 0x0001}, {0x8610, 0x0001},
+{0x8611, 0x0001}, {0x8612, 0x0001}, {0x8613, 0x0001},
+{0x8614, 0x00b6}, {0x8615, 0x0012}, {0x8616, 0x0008},
+{0x8617, 0x0027}, {0x8618, 0x007f}, {0x8619, 0x0081},
+{0x861a, 0x0080}, {0x861b, 0x0026}, {0x861c, 0x000b},
+{0x861d, 0x0086}, {0x861e, 0x0008}, {0x861f, 0x00ce},
+{0x8620, 0x008f}, {0x8621, 0x0079}, {0x8622, 0x00bd},
+{0x8623, 0x0089}, {0x8624, 0x007b}, {0x8625, 0x007e},
+{0x8626, 0x0086}, {0x8627, 0x008e}, {0x8628, 0x0081},
+{0x8629, 0x0040}, {0x862a, 0x0026}, {0x862b, 0x000b},
+{0x862c, 0x0086}, {0x862d, 0x0004}, {0x862e, 0x00ce},
+{0x862f, 0x008f}, {0x8630, 0x0076}, {0x8631, 0x00bd},
+{0x8632, 0x0089}, {0x8633, 0x007b}, {0x8634, 0x007e},
+{0x8635, 0x0086}, {0x8636, 0x008e}, {0x8637, 0x0081},
+{0x8638, 0x0020}, {0x8639, 0x0026}, {0x863a, 0x000b},
+{0x863b, 0x0086}, {0x863c, 0x0002}, {0x863d, 0x00ce},
+{0x863e, 0x008f}, {0x863f, 0x0073}, {0x8640, 0x00bd},
+{0x8641, 0x0089}, {0x8642, 0x007b}, {0x8643, 0x007e},
+{0x8644, 0x0086}, {0x8645, 0x008e}, {0x8646, 0x0081},
+{0x8647, 0x0010}, {0x8648, 0x0026}, {0x8649, 0x000b},
+{0x864a, 0x0086}, {0x864b, 0x0001}, {0x864c, 0x00ce},
+{0x864d, 0x008f}, {0x864e, 0x0070}, {0x864f, 0x00bd},
+{0x8650, 0x0089}, {0x8651, 0x007b}, {0x8652, 0x007e},
+{0x8653, 0x0086}, {0x8654, 0x008e}, {0x8655, 0x0081},
+{0x8656, 0x0008}, {0x8657, 0x0026}, {0x8658, 0x000b},
+{0x8659, 0x0086}, {0x865a, 0x0008}, {0x865b, 0x00ce},
+{0x865c, 0x008f}, {0x865d, 0x0079}, {0x865e, 0x00bd},
+{0x865f, 0x0089}, {0x8660, 0x007f}, {0x8661, 0x007e},
+{0x8662, 0x0086}, {0x8663, 0x008e}, {0x8664, 0x0081},
+{0x8665, 0x0004}, {0x8666, 0x0026}, {0x8667, 0x000b},
+{0x8668, 0x0086}, {0x8669, 0x0004}, {0x866a, 0x00ce},
+{0x866b, 0x008f}, {0x866c, 0x0076}, {0x866d, 0x00bd},
+{0x866e, 0x0089}, {0x866f, 0x007f}, {0x8670, 0x007e},
+{0x8671, 0x0086}, {0x8672, 0x008e}, {0x8673, 0x0081},
+{0x8674, 0x0002}, {0x8675, 0x0026}, {0x8676, 0x000b},
+{0x8677, 0x008a}, {0x8678, 0x0002}, {0x8679, 0x00ce},
+{0x867a, 0x008f}, {0x867b, 0x0073}, {0x867c, 0x00bd},
+{0x867d, 0x0089}, {0x867e, 0x007f}, {0x867f, 0x007e},
+{0x8680, 0x0086}, {0x8681, 0x008e}, {0x8682, 0x0081},
+{0x8683, 0x0001}, {0x8684, 0x0026}, {0x8685, 0x0008},
+{0x8686, 0x0086}, {0x8687, 0x0001}, {0x8688, 0x00ce},
+{0x8689, 0x008f}, {0x868a, 0x0070}, {0x868b, 0x00bd},
+{0x868c, 0x0089}, {0x868d, 0x007f}, {0x868e, 0x00b6},
+{0x868f, 0x008f}, {0x8690, 0x007f}, {0x8691, 0x0081},
+{0x8692, 0x000f}, {0x8693, 0x0026}, {0x8694, 0x0003},
+{0x8695, 0x007e}, {0x8696, 0x0087}, {0x8697, 0x0047},
+{0x8698, 0x00b6}, {0x8699, 0x0012}, {0x869a, 0x0009},
+{0x869b, 0x0084}, {0x869c, 0x0003}, {0x869d, 0x0081},
+{0x869e, 0x0003}, {0x869f, 0x0027}, {0x86a0, 0x0006},
+{0x86a1, 0x007c}, {0x86a2, 0x0012}, {0x86a3, 0x0009},
+{0x86a4, 0x007e}, {0x86a5, 0x0085}, {0x86a6, 0x00fe},
+{0x86a7, 0x00b6}, {0x86a8, 0x0012}, {0x86a9, 0x0006},
+{0x86aa, 0x0084}, {0x86ab, 0x0007}, {0x86ac, 0x0081},
+{0x86ad, 0x0007}, {0x86ae, 0x0027}, {0x86af, 0x0008},
+{0x86b0, 0x008b}, {0x86b1, 0x0001}, {0x86b2, 0x00b7},
+{0x86b3, 0x0012}, {0x86b4, 0x0006}, {0x86b5, 0x007e},
+{0x86b6, 0x0086}, {0x86b7, 0x00d5}, {0x86b8, 0x00b6},
+{0x86b9, 0x008f}, {0x86ba, 0x0082}, {0x86bb, 0x0026},
+{0x86bc, 0x000a}, {0x86bd, 0x007c}, {0x86be, 0x008f},
+{0x86bf, 0x0082}, {0x86c0, 0x004f}, {0x86c1, 0x00b7},
+{0x86c2, 0x0012}, {0x86c3, 0x0006}, {0x86c4, 0x007e},
+{0x86c5, 0x0085}, {0x86c6, 0x00c0}, {0x86c7, 0x00b6},
+{0x86c8, 0x0012}, {0x86c9, 0x0006}, {0x86ca, 0x0084},
+{0x86cb, 0x003f}, {0x86cc, 0x0081}, {0x86cd, 0x003f},
+{0x86ce, 0x0027}, {0x86cf, 0x0010}, {0x86d0, 0x008b},
+{0x86d1, 0x0008}, {0x86d2, 0x00b7}, {0x86d3, 0x0012},
+{0x86d4, 0x0006}, {0x86d5, 0x00b6}, {0x86d6, 0x0012},
+{0x86d7, 0x0009}, {0x86d8, 0x0084}, {0x86d9, 0x00fc},
+{0x86da, 0x00b7}, {0x86db, 0x0012}, {0x86dc, 0x0009},
+{0x86dd, 0x007e}, {0x86de, 0x0085}, {0x86df, 0x00fe},
+{0x86e0, 0x00ce}, {0x86e1, 0x008f}, {0x86e2, 0x0070},
+{0x86e3, 0x0018}, {0x86e4, 0x00ce}, {0x86e5, 0x008f},
+{0x86e6, 0x0084}, {0x86e7, 0x00c6}, {0x86e8, 0x000c},
+{0x86e9, 0x00bd}, {0x86ea, 0x0089}, {0x86eb, 0x006f},
+{0x86ec, 0x00ce}, {0x86ed, 0x008f}, {0x86ee, 0x0084},
+{0x86ef, 0x0018}, {0x86f0, 0x00ce}, {0x86f1, 0x008f},
+{0x86f2, 0x0070}, {0x86f3, 0x00c6}, {0x86f4, 0x000c},
+{0x86f5, 0x00bd}, {0x86f6, 0x0089}, {0x86f7, 0x006f},
+{0x86f8, 0x00d6}, {0x86f9, 0x0083}, {0x86fa, 0x00c1},
+{0x86fb, 0x004f}, {0x86fc, 0x002d}, {0x86fd, 0x0003},
+{0x86fe, 0x007e}, {0x86ff, 0x0087}, {0x8700, 0x0040},
+{0x8701, 0x00b6}, {0x8702, 0x008f}, {0x8703, 0x007f},
+{0x8704, 0x0081}, {0x8705, 0x0007}, {0x8706, 0x0027},
+{0x8707, 0x000f}, {0x8708, 0x0081}, {0x8709, 0x000b},
+{0x870a, 0x0027}, {0x870b, 0x0015}, {0x870c, 0x0081},
+{0x870d, 0x000d}, {0x870e, 0x0027}, {0x870f, 0x001b},
+{0x8710, 0x0081}, {0x8711, 0x000e}, {0x8712, 0x0027},
+{0x8713, 0x0021}, {0x8714, 0x007e}, {0x8715, 0x0087},
+{0x8716, 0x0040}, {0x8717, 0x00f7}, {0x8718, 0x008f},
+{0x8719, 0x007b}, {0x871a, 0x0086}, {0x871b, 0x0002},
+{0x871c, 0x00b7}, {0x871d, 0x008f}, {0x871e, 0x007a},
+{0x871f, 0x0020}, {0x8720, 0x001c}, {0x8721, 0x00f7},
+{0x8722, 0x008f}, {0x8723, 0x0078}, {0x8724, 0x0086},
+{0x8725, 0x0002}, {0x8726, 0x00b7}, {0x8727, 0x008f},
+{0x8728, 0x0077}, {0x8729, 0x0020}, {0x872a, 0x0012},
+{0x872b, 0x00f7}, {0x872c, 0x008f}, {0x872d, 0x0075},
+{0x872e, 0x0086}, {0x872f, 0x0002}, {0x8730, 0x00b7},
+{0x8731, 0x008f}, {0x8732, 0x0074}, {0x8733, 0x0020},
+{0x8734, 0x0008}, {0x8735, 0x00f7}, {0x8736, 0x008f},
+{0x8737, 0x0072}, {0x8738, 0x0086}, {0x8739, 0x0002},
+{0x873a, 0x00b7}, {0x873b, 0x008f}, {0x873c, 0x0071},
+{0x873d, 0x007e}, {0x873e, 0x0087}, {0x873f, 0x0047},
+{0x8740, 0x0086}, {0x8741, 0x0004}, {0x8742, 0x0097},
+{0x8743, 0x0040}, {0x8744, 0x007e}, {0x8745, 0x0089},
+{0x8746, 0x006e}, {0x8747, 0x00ce}, {0x8748, 0x008f},
+{0x8749, 0x0072}, {0x874a, 0x00bd}, {0x874b, 0x0089},
+{0x874c, 0x00f7}, {0x874d, 0x00ce}, {0x874e, 0x008f},
+{0x874f, 0x0075}, {0x8750, 0x00bd}, {0x8751, 0x0089},
+{0x8752, 0x00f7}, {0x8753, 0x00ce}, {0x8754, 0x008f},
+{0x8755, 0x0078}, {0x8756, 0x00bd}, {0x8757, 0x0089},
+{0x8758, 0x00f7}, {0x8759, 0x00ce}, {0x875a, 0x008f},
+{0x875b, 0x007b}, {0x875c, 0x00bd}, {0x875d, 0x0089},
+{0x875e, 0x00f7}, {0x875f, 0x004f}, {0x8760, 0x00b7},
+{0x8761, 0x008f}, {0x8762, 0x007d}, {0x8763, 0x00b7},
+{0x8764, 0x008f}, {0x8765, 0x0081}, {0x8766, 0x00b6},
+{0x8767, 0x008f}, {0x8768, 0x0072}, {0x8769, 0x0027},
+{0x876a, 0x0047}, {0x876b, 0x007c}, {0x876c, 0x008f},
+{0x876d, 0x007d}, {0x876e, 0x00b6}, {0x876f, 0x008f},
+{0x8770, 0x0075}, {0x8771, 0x0027}, {0x8772, 0x003f},
+{0x8773, 0x007c}, {0x8774, 0x008f}, {0x8775, 0x007d},
+{0x8776, 0x00b6}, {0x8777, 0x008f}, {0x8778, 0x0078},
+{0x8779, 0x0027}, {0x877a, 0x0037}, {0x877b, 0x007c},
+{0x877c, 0x008f}, {0x877d, 0x007d}, {0x877e, 0x00b6},
+{0x877f, 0x008f}, {0x8780, 0x007b}, {0x8781, 0x0027},
+{0x8782, 0x002f}, {0x8783, 0x007f}, {0x8784, 0x008f},
+{0x8785, 0x007d}, {0x8786, 0x007c}, {0x8787, 0x008f},
+{0x8788, 0x0081}, {0x8789, 0x007a}, {0x878a, 0x008f},
+{0x878b, 0x0072}, {0x878c, 0x0027}, {0x878d, 0x001b},
+{0x878e, 0x007c}, {0x878f, 0x008f}, {0x8790, 0x007d},
+{0x8791, 0x007a}, {0x8792, 0x008f}, {0x8793, 0x0075},
+{0x8794, 0x0027}, {0x8795, 0x0016}, {0x8796, 0x007c},
+{0x8797, 0x008f}, {0x8798, 0x007d}, {0x8799, 0x007a},
+{0x879a, 0x008f}, {0x879b, 0x0078}, {0x879c, 0x0027},
+{0x879d, 0x0011}, {0x879e, 0x007c}, {0x879f, 0x008f},
+{0x87a0, 0x007d}, {0x87a1, 0x007a}, {0x87a2, 0x008f},
+{0x87a3, 0x007b}, {0x87a4, 0x0027}, {0x87a5, 0x000c},
+{0x87a6, 0x007e}, {0x87a7, 0x0087}, {0x87a8, 0x0083},
+{0x87a9, 0x007a}, {0x87aa, 0x008f}, {0x87ab, 0x0075},
+{0x87ac, 0x007a}, {0x87ad, 0x008f}, {0x87ae, 0x0078},
+{0x87af, 0x007a}, {0x87b0, 0x008f}, {0x87b1, 0x007b},
+{0x87b2, 0x00ce}, {0x87b3, 0x00c1}, {0x87b4, 0x00fc},
+{0x87b5, 0x00f6}, {0x87b6, 0x008f}, {0x87b7, 0x007d},
+{0x87b8, 0x003a}, {0x87b9, 0x00a6}, {0x87ba, 0x0000},
+{0x87bb, 0x00b7}, {0x87bc, 0x0012}, {0x87bd, 0x0070},
+{0x87be, 0x00b6}, {0x87bf, 0x008f}, {0x87c0, 0x0072},
+{0x87c1, 0x0026}, {0x87c2, 0x0003}, {0x87c3, 0x007e},
+{0x87c4, 0x0087}, {0x87c5, 0x00fa}, {0x87c6, 0x00b6},
+{0x87c7, 0x008f}, {0x87c8, 0x0075}, {0x87c9, 0x0026},
+{0x87ca, 0x000a}, {0x87cb, 0x0018}, {0x87cc, 0x00ce},
+{0x87cd, 0x008f}, {0x87ce, 0x0073}, {0x87cf, 0x00bd},
+{0x87d0, 0x0089}, {0x87d1, 0x00d5}, {0x87d2, 0x007e},
+{0x87d3, 0x0087}, {0x87d4, 0x00fa}, {0x87d5, 0x00b6},
+{0x87d6, 0x008f}, {0x87d7, 0x0078}, {0x87d8, 0x0026},
+{0x87d9, 0x000a}, {0x87da, 0x0018}, {0x87db, 0x00ce},
+{0x87dc, 0x008f}, {0x87dd, 0x0076}, {0x87de, 0x00bd},
+{0x87df, 0x0089}, {0x87e0, 0x00d5}, {0x87e1, 0x007e},
+{0x87e2, 0x0087}, {0x87e3, 0x00fa}, {0x87e4, 0x00b6},
+{0x87e5, 0x008f}, {0x87e6, 0x007b}, {0x87e7, 0x0026},
+{0x87e8, 0x000a}, {0x87e9, 0x0018}, {0x87ea, 0x00ce},
+{0x87eb, 0x008f}, {0x87ec, 0x0079}, {0x87ed, 0x00bd},
+{0x87ee, 0x0089}, {0x87ef, 0x00d5}, {0x87f0, 0x007e},
+{0x87f1, 0x0087}, {0x87f2, 0x00fa}, {0x87f3, 0x0086},
+{0x87f4, 0x0005}, {0x87f5, 0x0097}, {0x87f6, 0x0040},
+{0x87f7, 0x007e}, {0x87f8, 0x0089}, {0x87f9, 0x0000},
+{0x87fa, 0x00b6}, {0x87fb, 0x008f}, {0x87fc, 0x0075},
+{0x87fd, 0x0081}, {0x87fe, 0x0007}, {0x87ff, 0x002e},
+{0x8800, 0x00f2}, {0x8801, 0x00f6}, {0x8802, 0x0012},
+{0x8803, 0x0006}, {0x8804, 0x00c4}, {0x8805, 0x00f8},
+{0x8806, 0x001b}, {0x8807, 0x00b7}, {0x8808, 0x0012},
+{0x8809, 0x0006}, {0x880a, 0x00b6}, {0x880b, 0x008f},
+{0x880c, 0x0078}, {0x880d, 0x0081}, {0x880e, 0x0007},
+{0x880f, 0x002e}, {0x8810, 0x00e2}, {0x8811, 0x0048},
+{0x8812, 0x0048}, {0x8813, 0x0048}, {0x8814, 0x00f6},
+{0x8815, 0x0012}, {0x8816, 0x0006}, {0x8817, 0x00c4},
+{0x8818, 0x00c7}, {0x8819, 0x001b}, {0x881a, 0x00b7},
+{0x881b, 0x0012}, {0x881c, 0x0006}, {0x881d, 0x00b6},
+{0x881e, 0x008f}, {0x881f, 0x007b}, {0x8820, 0x0081},
+{0x8821, 0x0007}, {0x8822, 0x002e}, {0x8823, 0x00cf},
+{0x8824, 0x00f6}, {0x8825, 0x0012}, {0x8826, 0x0005},
+{0x8827, 0x00c4}, {0x8828, 0x00f8}, {0x8829, 0x001b},
+{0x882a, 0x00b7}, {0x882b, 0x0012}, {0x882c, 0x0005},
+{0x882d, 0x0086}, {0x882e, 0x0000}, {0x882f, 0x00f6},
+{0x8830, 0x008f}, {0x8831, 0x0071}, {0x8832, 0x00bd},
+{0x8833, 0x0089}, {0x8834, 0x0094}, {0x8835, 0x0086},
+{0x8836, 0x0001}, {0x8837, 0x00f6}, {0x8838, 0x008f},
+{0x8839, 0x0074}, {0x883a, 0x00bd}, {0x883b, 0x0089},
+{0x883c, 0x0094}, {0x883d, 0x0086}, {0x883e, 0x0002},
+{0x883f, 0x00f6}, {0x8840, 0x008f}, {0x8841, 0x0077},
+{0x8842, 0x00bd}, {0x8843, 0x0089}, {0x8844, 0x0094},
+{0x8845, 0x0086}, {0x8846, 0x0003}, {0x8847, 0x00f6},
+{0x8848, 0x008f}, {0x8849, 0x007a}, {0x884a, 0x00bd},
+{0x884b, 0x0089}, {0x884c, 0x0094}, {0x884d, 0x00ce},
+{0x884e, 0x008f}, {0x884f, 0x0070}, {0x8850, 0x00a6},
+{0x8851, 0x0001}, {0x8852, 0x0081}, {0x8853, 0x0001},
+{0x8854, 0x0027}, {0x8855, 0x0007}, {0x8856, 0x0081},
+{0x8857, 0x0003}, {0x8858, 0x0027}, {0x8859, 0x0003},
+{0x885a, 0x007e}, {0x885b, 0x0088}, {0x885c, 0x0066},
+{0x885d, 0x00a6}, {0x885e, 0x0000}, {0x885f, 0x00b8},
+{0x8860, 0x008f}, {0x8861, 0x0081}, {0x8862, 0x0084},
+{0x8863, 0x0001}, {0x8864, 0x0026}, {0x8865, 0x000b},
+{0x8866, 0x008c}, {0x8867, 0x008f}, {0x8868, 0x0079},
+{0x8869, 0x002c}, {0x886a, 0x000e}, {0x886b, 0x0008},
+{0x886c, 0x0008}, {0x886d, 0x0008}, {0x886e, 0x007e},
+{0x886f, 0x0088}, {0x8870, 0x0050}, {0x8871, 0x00b6},
+{0x8872, 0x0012}, {0x8873, 0x0004}, {0x8874, 0x008a},
+{0x8875, 0x0040}, {0x8876, 0x00b7}, {0x8877, 0x0012},
+{0x8878, 0x0004}, {0x8879, 0x00b6}, {0x887a, 0x0012},
+{0x887b, 0x0004}, {0x887c, 0x0084}, {0x887d, 0x00fb},
+{0x887e, 0x0084}, {0x887f, 0x00ef}, {0x8880, 0x00b7},
+{0x8881, 0x0012}, {0x8882, 0x0004}, {0x8883, 0x00b6},
+{0x8884, 0x0012}, {0x8885, 0x0007}, {0x8886, 0x0036},
+{0x8887, 0x00b6}, {0x8888, 0x008f}, {0x8889, 0x007c},
+{0x888a, 0x0048}, {0x888b, 0x0048}, {0x888c, 0x00b7},
+{0x888d, 0x0012}, {0x888e, 0x0007}, {0x888f, 0x0086},
+{0x8890, 0x0001}, {0x8891, 0x00ba}, {0x8892, 0x0012},
+{0x8893, 0x0004}, {0x8894, 0x00b7}, {0x8895, 0x0012},
+{0x8896, 0x0004}, {0x8897, 0x0001}, {0x8898, 0x0001},
+{0x8899, 0x0001}, {0x889a, 0x0001}, {0x889b, 0x0001},
+{0x889c, 0x0001}, {0x889d, 0x0086}, {0x889e, 0x00fe},
+{0x889f, 0x00b4}, {0x88a0, 0x0012}, {0x88a1, 0x0004},
+{0x88a2, 0x00b7}, {0x88a3, 0x0012}, {0x88a4, 0x0004},
+{0x88a5, 0x0086}, {0x88a6, 0x0002}, {0x88a7, 0x00ba},
+{0x88a8, 0x0012}, {0x88a9, 0x0004}, {0x88aa, 0x00b7},
+{0x88ab, 0x0012}, {0x88ac, 0x0004}, {0x88ad, 0x0086},
+{0x88ae, 0x00fd}, {0x88af, 0x00b4}, {0x88b0, 0x0012},
+{0x88b1, 0x0004}, {0x88b2, 0x00b7}, {0x88b3, 0x0012},
+{0x88b4, 0x0004}, {0x88b5, 0x0032}, {0x88b6, 0x00b7},
+{0x88b7, 0x0012}, {0x88b8, 0x0007}, {0x88b9, 0x00b6},
+{0x88ba, 0x0012}, {0x88bb, 0x0000}, {0x88bc, 0x0084},
+{0x88bd, 0x0008}, {0x88be, 0x0081}, {0x88bf, 0x0008},
+{0x88c0, 0x0027}, {0x88c1, 0x000f}, {0x88c2, 0x007c},
+{0x88c3, 0x0082}, {0x88c4, 0x0008}, {0x88c5, 0x0026},
+{0x88c6, 0x0007}, {0x88c7, 0x0086}, {0x88c8, 0x0076},
+{0x88c9, 0x0097}, {0x88ca, 0x0040}, {0x88cb, 0x007e},
+{0x88cc, 0x0089}, {0x88cd, 0x006e}, {0x88ce, 0x007e},
+{0x88cf, 0x0086}, {0x88d0, 0x00ec}, {0x88d1, 0x00b6},
+{0x88d2, 0x008f}, {0x88d3, 0x007f}, {0x88d4, 0x0081},
+{0x88d5, 0x000f}, {0x88d6, 0x0027}, {0x88d7, 0x003c},
+{0x88d8, 0x00bd}, {0x88d9, 0x00e6}, {0x88da, 0x00c7},
+{0x88db, 0x00b7}, {0x88dc, 0x0012}, {0x88dd, 0x000d},
+{0x88de, 0x00bd}, {0x88df, 0x00e6}, {0x88e0, 0x00cb},
+{0x88e1, 0x00b6}, {0x88e2, 0x0012}, {0x88e3, 0x0004},
+{0x88e4, 0x008a}, {0x88e5, 0x0020}, {0x88e6, 0x00b7},
+{0x88e7, 0x0012}, {0x88e8, 0x0004}, {0x88e9, 0x00ce},
+{0x88ea, 0x00ff}, {0x88eb, 0x00ff}, {0x88ec, 0x00b6},
+{0x88ed, 0x0012}, {0x88ee, 0x0000}, {0x88ef, 0x0081},
+{0x88f0, 0x000c}, {0x88f1, 0x0026}, {0x88f2, 0x0005},
+{0x88f3, 0x0009}, {0x88f4, 0x0026}, {0x88f5, 0x00f6},
+{0x88f6, 0x0027}, {0x88f7, 0x001c}, {0x88f8, 0x00b6},
+{0x88f9, 0x0012}, {0x88fa, 0x0004}, {0x88fb, 0x0084},
+{0x88fc, 0x00df}, {0x88fd, 0x00b7}, {0x88fe, 0x0012},
+{0x88ff, 0x0004}, {0x8900, 0x0096}, {0x8901, 0x0083},
+{0x8902, 0x0081}, {0x8903, 0x0007}, {0x8904, 0x002c},
+{0x8905, 0x0005}, {0x8906, 0x007c}, {0x8907, 0x0000},
+{0x8908, 0x0083}, {0x8909, 0x0020}, {0x890a, 0x0006},
+{0x890b, 0x0096}, {0x890c, 0x0083}, {0x890d, 0x008b},
+{0x890e, 0x0008}, {0x890f, 0x0097}, {0x8910, 0x0083},
+{0x8911, 0x007e}, {0x8912, 0x0085}, {0x8913, 0x0041},
+{0x8914, 0x007f}, {0x8915, 0x008f}, {0x8916, 0x007e},
+{0x8917, 0x0086}, {0x8918, 0x0080}, {0x8919, 0x00b7},
+{0x891a, 0x0012}, {0x891b, 0x000c}, {0x891c, 0x0086},
+{0x891d, 0x0001}, {0x891e, 0x00b7}, {0x891f, 0x008f},
+{0x8920, 0x007d}, {0x8921, 0x00b6}, {0x8922, 0x0012},
+{0x8923, 0x000c}, {0x8924, 0x0084}, {0x8925, 0x007f},
+{0x8926, 0x00b7}, {0x8927, 0x0012}, {0x8928, 0x000c},
+{0x8929, 0x008a}, {0x892a, 0x0080}, {0x892b, 0x00b7},
+{0x892c, 0x0012}, {0x892d, 0x000c}, {0x892e, 0x0086},
+{0x892f, 0x000a}, {0x8930, 0x00bd}, {0x8931, 0x008a},
+{0x8932, 0x0006}, {0x8933, 0x00b6}, {0x8934, 0x0012},
+{0x8935, 0x000a}, {0x8936, 0x002a}, {0x8937, 0x0009},
+{0x8938, 0x00b6}, {0x8939, 0x0012}, {0x893a, 0x000c},
+{0x893b, 0x00ba}, {0x893c, 0x008f}, {0x893d, 0x007d},
+{0x893e, 0x00b7}, {0x893f, 0x0012}, {0x8940, 0x000c},
+{0x8941, 0x00b6}, {0x8942, 0x008f}, {0x8943, 0x007e},
+{0x8944, 0x0081}, {0x8945, 0x0060}, {0x8946, 0x0027},
+{0x8947, 0x001a}, {0x8948, 0x008b}, {0x8949, 0x0020},
+{0x894a, 0x00b7}, {0x894b, 0x008f}, {0x894c, 0x007e},
+{0x894d, 0x00b6}, {0x894e, 0x0012}, {0x894f, 0x000c},
+{0x8950, 0x0084}, {0x8951, 0x009f}, {0x8952, 0x00ba},
+{0x8953, 0x008f}, {0x8954, 0x007e}, {0x8955, 0x00b7},
+{0x8956, 0x0012}, {0x8957, 0x000c}, {0x8958, 0x00b6},
+{0x8959, 0x008f}, {0x895a, 0x007d}, {0x895b, 0x0048},
+{0x895c, 0x00b7}, {0x895d, 0x008f}, {0x895e, 0x007d},
+{0x895f, 0x007e}, {0x8960, 0x0089}, {0x8961, 0x0021},
+{0x8962, 0x00b6}, {0x8963, 0x0012}, {0x8964, 0x0004},
+{0x8965, 0x008a}, {0x8966, 0x0020}, {0x8967, 0x00b7},
+{0x8968, 0x0012}, {0x8969, 0x0004}, {0x896a, 0x00bd},
+{0x896b, 0x008a}, {0x896c, 0x000a}, {0x896d, 0x004f},
+{0x896e, 0x0039}, {0x896f, 0x00a6}, {0x8970, 0x0000},
+{0x8971, 0x0018}, {0x8972, 0x00a7}, {0x8973, 0x0000},
+{0x8974, 0x0008}, {0x8975, 0x0018}, {0x8976, 0x0008},
+{0x8977, 0x005a}, {0x8978, 0x0026}, {0x8979, 0x00f5},
+{0x897a, 0x0039}, {0x897b, 0x0036}, {0x897c, 0x006c},
+{0x897d, 0x0000}, {0x897e, 0x0032}, {0x897f, 0x00ba},
+{0x8980, 0x008f}, {0x8981, 0x007f}, {0x8982, 0x00b7},
+{0x8983, 0x008f}, {0x8984, 0x007f}, {0x8985, 0x00b6},
+{0x8986, 0x0012}, {0x8987, 0x0009}, {0x8988, 0x0084},
+{0x8989, 0x0003}, {0x898a, 0x00a7}, {0x898b, 0x0001},
+{0x898c, 0x00b6}, {0x898d, 0x0012}, {0x898e, 0x0006},
+{0x898f, 0x0084}, {0x8990, 0x003f}, {0x8991, 0x00a7},
+{0x8992, 0x0002}, {0x8993, 0x0039}, {0x8994, 0x0036},
+{0x8995, 0x0086}, {0x8996, 0x0003}, {0x8997, 0x00b7},
+{0x8998, 0x008f}, {0x8999, 0x0080}, {0x899a, 0x0032},
+{0x899b, 0x00c1}, {0x899c, 0x0000}, {0x899d, 0x0026},
+{0x899e, 0x0006}, {0x899f, 0x00b7}, {0x89a0, 0x008f},
+{0x89a1, 0x007c}, {0x89a2, 0x007e}, {0x89a3, 0x0089},
+{0x89a4, 0x00c9}, {0x89a5, 0x00c1}, {0x89a6, 0x0001},
+{0x89a7, 0x0027}, {0x89a8, 0x0018}, {0x89a9, 0x00c1},
+{0x89aa, 0x0002}, {0x89ab, 0x0027}, {0x89ac, 0x000c},
+{0x89ad, 0x00c1}, {0x89ae, 0x0003}, {0x89af, 0x0027},
+{0x89b0, 0x0000}, {0x89b1, 0x00f6}, {0x89b2, 0x008f},
+{0x89b3, 0x0080}, {0x89b4, 0x0005}, {0x89b5, 0x0005},
+{0x89b6, 0x00f7}, {0x89b7, 0x008f}, {0x89b8, 0x0080},
+{0x89b9, 0x00f6}, {0x89ba, 0x008f}, {0x89bb, 0x0080},
+{0x89bc, 0x0005}, {0x89bd, 0x0005}, {0x89be, 0x00f7},
+{0x89bf, 0x008f}, {0x89c0, 0x0080}, {0x89c1, 0x00f6},
+{0x89c2, 0x008f}, {0x89c3, 0x0080}, {0x89c4, 0x0005},
+{0x89c5, 0x0005}, {0x89c6, 0x00f7}, {0x89c7, 0x008f},
+{0x89c8, 0x0080}, {0x89c9, 0x00f6}, {0x89ca, 0x008f},
+{0x89cb, 0x0080}, {0x89cc, 0x0053}, {0x89cd, 0x00f4},
+{0x89ce, 0x0012}, {0x89cf, 0x0007}, {0x89d0, 0x001b},
+{0x89d1, 0x00b7}, {0x89d2, 0x0012}, {0x89d3, 0x0007},
+{0x89d4, 0x0039}, {0x89d5, 0x00ce}, {0x89d6, 0x008f},
+{0x89d7, 0x0070}, {0x89d8, 0x00a6}, {0x89d9, 0x0000},
+{0x89da, 0x0018}, {0x89db, 0x00e6}, {0x89dc, 0x0000},
+{0x89dd, 0x0018}, {0x89de, 0x00a7}, {0x89df, 0x0000},
+{0x89e0, 0x00e7}, {0x89e1, 0x0000}, {0x89e2, 0x00a6},
+{0x89e3, 0x0001}, {0x89e4, 0x0018}, {0x89e5, 0x00e6},
+{0x89e6, 0x0001}, {0x89e7, 0x0018}, {0x89e8, 0x00a7},
+{0x89e9, 0x0001}, {0x89ea, 0x00e7}, {0x89eb, 0x0001},
+{0x89ec, 0x00a6}, {0x89ed, 0x0002}, {0x89ee, 0x0018},
+{0x89ef, 0x00e6}, {0x89f0, 0x0002}, {0x89f1, 0x0018},
+{0x89f2, 0x00a7}, {0x89f3, 0x0002}, {0x89f4, 0x00e7},
+{0x89f5, 0x0002}, {0x89f6, 0x0039}, {0x89f7, 0x00a6},
+{0x89f8, 0x0000}, {0x89f9, 0x0084}, {0x89fa, 0x0007},
+{0x89fb, 0x00e6}, {0x89fc, 0x0000}, {0x89fd, 0x00c4},
+{0x89fe, 0x0038}, {0x89ff, 0x0054}, {0x8a00, 0x0054},
+{0x8a01, 0x0054}, {0x8a02, 0x001b}, {0x8a03, 0x00a7},
+{0x8a04, 0x0000}, {0x8a05, 0x0039}, {0x8a06, 0x004a},
+{0x8a07, 0x0026}, {0x8a08, 0x00fd}, {0x8a09, 0x0039},
+{0x8a0a, 0x0096}, {0x8a0b, 0x0022}, {0x8a0c, 0x0084},
+{0x8a0d, 0x000f}, {0x8a0e, 0x0097}, {0x8a0f, 0x0022},
+{0x8a10, 0x0086}, {0x8a11, 0x0001}, {0x8a12, 0x00b7},
+{0x8a13, 0x008f}, {0x8a14, 0x0070}, {0x8a15, 0x00b6},
+{0x8a16, 0x0012}, {0x8a17, 0x0007}, {0x8a18, 0x00b7},
+{0x8a19, 0x008f}, {0x8a1a, 0x0071}, {0x8a1b, 0x00f6},
+{0x8a1c, 0x0012}, {0x8a1d, 0x000c}, {0x8a1e, 0x00c4},
+{0x8a1f, 0x000f}, {0x8a20, 0x00c8}, {0x8a21, 0x000f},
+{0x8a22, 0x00f7}, {0x8a23, 0x008f}, {0x8a24, 0x0072},
+{0x8a25, 0x00f6}, {0x8a26, 0x008f}, {0x8a27, 0x0072},
+{0x8a28, 0x00b6}, {0x8a29, 0x008f}, {0x8a2a, 0x0071},
+{0x8a2b, 0x0084}, {0x8a2c, 0x0003}, {0x8a2d, 0x0027},
+{0x8a2e, 0x0014}, {0x8a2f, 0x0081}, {0x8a30, 0x0001},
+{0x8a31, 0x0027}, {0x8a32, 0x001c}, {0x8a33, 0x0081},
+{0x8a34, 0x0002}, {0x8a35, 0x0027}, {0x8a36, 0x0024},
+{0x8a37, 0x00f4}, {0x8a38, 0x008f}, {0x8a39, 0x0070},
+{0x8a3a, 0x0027}, {0x8a3b, 0x002a}, {0x8a3c, 0x0096},
+{0x8a3d, 0x0022}, {0x8a3e, 0x008a}, {0x8a3f, 0x0080},
+{0x8a40, 0x007e}, {0x8a41, 0x008a}, {0x8a42, 0x0064},
+{0x8a43, 0x00f4}, {0x8a44, 0x008f}, {0x8a45, 0x0070},
+{0x8a46, 0x0027}, {0x8a47, 0x001e}, {0x8a48, 0x0096},
+{0x8a49, 0x0022}, {0x8a4a, 0x008a}, {0x8a4b, 0x0010},
+{0x8a4c, 0x007e}, {0x8a4d, 0x008a}, {0x8a4e, 0x0064},
+{0x8a4f, 0x00f4}, {0x8a50, 0x008f}, {0x8a51, 0x0070},
+{0x8a52, 0x0027}, {0x8a53, 0x0012}, {0x8a54, 0x0096},
+{0x8a55, 0x0022}, {0x8a56, 0x008a}, {0x8a57, 0x0020},
+{0x8a58, 0x007e}, {0x8a59, 0x008a}, {0x8a5a, 0x0064},
+{0x8a5b, 0x00f4}, {0x8a5c, 0x008f}, {0x8a5d, 0x0070},
+{0x8a5e, 0x0027}, {0x8a5f, 0x0006}, {0x8a60, 0x0096},
+{0x8a61, 0x0022}, {0x8a62, 0x008a}, {0x8a63, 0x0040},
+{0x8a64, 0x0097}, {0x8a65, 0x0022}, {0x8a66, 0x0074},
+{0x8a67, 0x008f}, {0x8a68, 0x0071}, {0x8a69, 0x0074},
+{0x8a6a, 0x008f}, {0x8a6b, 0x0071}, {0x8a6c, 0x0078},
+{0x8a6d, 0x008f}, {0x8a6e, 0x0070}, {0x8a6f, 0x00b6},
+{0x8a70, 0x008f}, {0x8a71, 0x0070}, {0x8a72, 0x0085},
+{0x8a73, 0x0010}, {0x8a74, 0x0027}, {0x8a75, 0x00af},
+{0x8a76, 0x00d6}, {0x8a77, 0x0022}, {0x8a78, 0x00c4},
+{0x8a79, 0x0010}, {0x8a7a, 0x0058}, {0x8a7b, 0x00b6},
+{0x8a7c, 0x0012}, {0x8a7d, 0x0070}, {0x8a7e, 0x0081},
+{0x8a7f, 0x00e4}, {0x8a80, 0x0027}, {0x8a81, 0x0036},
+{0x8a82, 0x0081}, {0x8a83, 0x00e1}, {0x8a84, 0x0026},
+{0x8a85, 0x000c}, {0x8a86, 0x0096}, {0x8a87, 0x0022},
+{0x8a88, 0x0084}, {0x8a89, 0x0020}, {0x8a8a, 0x0044},
+{0x8a8b, 0x001b}, {0x8a8c, 0x00d6}, {0x8a8d, 0x0022},
+{0x8a8e, 0x00c4}, {0x8a8f, 0x00cf}, {0x8a90, 0x0020},
+{0x8a91, 0x0023}, {0x8a92, 0x0058}, {0x8a93, 0x0081},
+{0x8a94, 0x00c6}, {0x8a95, 0x0026}, {0x8a96, 0x000d},
+{0x8a97, 0x0096}, {0x8a98, 0x0022}, {0x8a99, 0x0084},
+{0x8a9a, 0x0040}, {0x8a9b, 0x0044}, {0x8a9c, 0x0044},
+{0x8a9d, 0x001b}, {0x8a9e, 0x00d6}, {0x8a9f, 0x0022},
+{0x8aa0, 0x00c4}, {0x8aa1, 0x00af}, {0x8aa2, 0x0020},
+{0x8aa3, 0x0011}, {0x8aa4, 0x0058}, {0x8aa5, 0x0081},
+{0x8aa6, 0x0027}, {0x8aa7, 0x0026}, {0x8aa8, 0x000f},
+{0x8aa9, 0x0096}, {0x8aaa, 0x0022}, {0x8aab, 0x0084},
+{0x8aac, 0x0080}, {0x8aad, 0x0044}, {0x8aae, 0x0044},
+{0x8aaf, 0x0044}, {0x8ab0, 0x001b}, {0x8ab1, 0x00d6},
+{0x8ab2, 0x0022}, {0x8ab3, 0x00c4}, {0x8ab4, 0x006f},
+{0x8ab5, 0x001b}, {0x8ab6, 0x0097}, {0x8ab7, 0x0022},
+{0x8ab8, 0x0039}, {0x8ab9, 0x0027}, {0x8aba, 0x000c},
+{0x8abb, 0x007c}, {0x8abc, 0x0082}, {0x8abd, 0x0006},
+{0x8abe, 0x00bd}, {0x8abf, 0x00d9}, {0x8ac0, 0x00ed},
+{0x8ac1, 0x00b6}, {0x8ac2, 0x0082}, {0x8ac3, 0x0007},
+{0x8ac4, 0x007e}, {0x8ac5, 0x008a}, {0x8ac6, 0x00b9},
+{0x8ac7, 0x007f}, {0x8ac8, 0x0082}, {0x8ac9, 0x0006},
+{0x8aca, 0x0039}, { 0x0, 0x0 }
+};
+#else
+cas_saturn_patch_t cas_saturn_patch[] = {
+{0x8200, 0x007e}, {0x8201, 0x0082}, {0x8202, 0x0009},
+{0x8203, 0x0000}, {0x8204, 0x0000}, {0x8205, 0x0000},
+{0x8206, 0x0000}, {0x8207, 0x0000}, {0x8208, 0x0000},
+{0x8209, 0x008e}, {0x820a, 0x008e}, {0x820b, 0x00ff},
+{0x820c, 0x00ce}, {0x820d, 0x0082}, {0x820e, 0x0025},
+{0x820f, 0x00ff}, {0x8210, 0x0001}, {0x8211, 0x000f},
+{0x8212, 0x00ce}, {0x8213, 0x0084}, {0x8214, 0x0026},
+{0x8215, 0x00ff}, {0x8216, 0x0001}, {0x8217, 0x0011},
+{0x8218, 0x00ce}, {0x8219, 0x0085}, {0x821a, 0x003d},
+{0x821b, 0x00df}, {0x821c, 0x00e5}, {0x821d, 0x0086},
+{0x821e, 0x0039}, {0x821f, 0x00b7}, {0x8220, 0x008f},
+{0x8221, 0x00f8}, {0x8222, 0x007e}, {0x8223, 0x00c3},
+{0x8224, 0x00c2}, {0x8225, 0x0096}, {0x8226, 0x0047},
+{0x8227, 0x0084}, {0x8228, 0x00f3}, {0x8229, 0x008a},
+{0x822a, 0x0000}, {0x822b, 0x0097}, {0x822c, 0x0047},
+{0x822d, 0x00ce}, {0x822e, 0x0082}, {0x822f, 0x0033},
+{0x8230, 0x00ff}, {0x8231, 0x0001}, {0x8232, 0x000f},
+{0x8233, 0x0096}, {0x8234, 0x0046}, {0x8235, 0x0084},
+{0x8236, 0x000c}, {0x8237, 0x0081}, {0x8238, 0x0004},
+{0x8239, 0x0027}, {0x823a, 0x000b}, {0x823b, 0x0096},
+{0x823c, 0x0046}, {0x823d, 0x0084}, {0x823e, 0x000c},
+{0x823f, 0x0081}, {0x8240, 0x0008}, {0x8241, 0x0027},
+{0x8242, 0x0057}, {0x8243, 0x007e}, {0x8244, 0x0084},
+{0x8245, 0x0025}, {0x8246, 0x0096}, {0x8247, 0x0047},
+{0x8248, 0x0084}, {0x8249, 0x00f3}, {0x824a, 0x008a},
+{0x824b, 0x0004}, {0x824c, 0x0097}, {0x824d, 0x0047},
+{0x824e, 0x00ce}, {0x824f, 0x0082}, {0x8250, 0x0054},
+{0x8251, 0x00ff}, {0x8252, 0x0001}, {0x8253, 0x000f},
+{0x8254, 0x0096}, {0x8255, 0x0046}, {0x8256, 0x0084},
+{0x8257, 0x000c}, {0x8258, 0x0081}, {0x8259, 0x0004},
+{0x825a, 0x0026}, {0x825b, 0x0038}, {0x825c, 0x00b6},
+{0x825d, 0x0012}, {0x825e, 0x0020}, {0x825f, 0x0084},
+{0x8260, 0x0020}, {0x8261, 0x0026}, {0x8262, 0x0003},
+{0x8263, 0x007e}, {0x8264, 0x0084}, {0x8265, 0x0025},
+{0x8266, 0x0096}, {0x8267, 0x007b}, {0x8268, 0x00d6},
+{0x8269, 0x007c}, {0x826a, 0x00fe}, {0x826b, 0x008f},
+{0x826c, 0x0056}, {0x826d, 0x00bd}, {0x826e, 0x00f7},
+{0x826f, 0x00b6}, {0x8270, 0x00fe}, {0x8271, 0x008f},
+{0x8272, 0x004e}, {0x8273, 0x00bd}, {0x8274, 0x00ec},
+{0x8275, 0x008e}, {0x8276, 0x00bd}, {0x8277, 0x00fa},
+{0x8278, 0x00f7}, {0x8279, 0x00bd}, {0x827a, 0x00f7},
+{0x827b, 0x0028}, {0x827c, 0x00ce}, {0x827d, 0x0082},
+{0x827e, 0x0082}, {0x827f, 0x00ff}, {0x8280, 0x0001},
+{0x8281, 0x000f}, {0x8282, 0x0096}, {0x8283, 0x0046},
+{0x8284, 0x0084}, {0x8285, 0x000c}, {0x8286, 0x0081},
+{0x8287, 0x0004}, {0x8288, 0x0026}, {0x8289, 0x000a},
+{0x828a, 0x00b6}, {0x828b, 0x0012}, {0x828c, 0x0020},
+{0x828d, 0x0084}, {0x828e, 0x0020}, {0x828f, 0x0027},
+{0x8290, 0x00b5}, {0x8291, 0x007e}, {0x8292, 0x0084},
+{0x8293, 0x0025}, {0x8294, 0x00bd}, {0x8295, 0x00f7},
+{0x8296, 0x001f}, {0x8297, 0x007e}, {0x8298, 0x0084},
+{0x8299, 0x001f}, {0x829a, 0x0096}, {0x829b, 0x0047},
+{0x829c, 0x0084}, {0x829d, 0x00f3}, {0x829e, 0x008a},
+{0x829f, 0x0008}, {0x82a0, 0x0097}, {0x82a1, 0x0047},
+{0x82a2, 0x00de}, {0x82a3, 0x00e1}, {0x82a4, 0x00ad},
+{0x82a5, 0x0000}, {0x82a6, 0x00ce}, {0x82a7, 0x0082},
+{0x82a8, 0x00af}, {0x82a9, 0x00ff}, {0x82aa, 0x0001},
+{0x82ab, 0x000f}, {0x82ac, 0x007e}, {0x82ad, 0x0084},
+{0x82ae, 0x0025}, {0x82af, 0x0096}, {0x82b0, 0x0041},
+{0x82b1, 0x0085}, {0x82b2, 0x0010}, {0x82b3, 0x0026},
+{0x82b4, 0x0006}, {0x82b5, 0x0096}, {0x82b6, 0x0023},
+{0x82b7, 0x0085}, {0x82b8, 0x0040}, {0x82b9, 0x0027},
+{0x82ba, 0x0006}, {0x82bb, 0x00bd}, {0x82bc, 0x00ed},
+{0x82bd, 0x0000}, {0x82be, 0x007e}, {0x82bf, 0x0083},
+{0x82c0, 0x00a2}, {0x82c1, 0x00de}, {0x82c2, 0x0042},
+{0x82c3, 0x00bd}, {0x82c4, 0x00eb}, {0x82c5, 0x008e},
+{0x82c6, 0x0096}, {0x82c7, 0x0024}, {0x82c8, 0x0084},
+{0x82c9, 0x0008}, {0x82ca, 0x0027}, {0x82cb, 0x0003},
+{0x82cc, 0x007e}, {0x82cd, 0x0083}, {0x82ce, 0x00df},
+{0x82cf, 0x0096}, {0x82d0, 0x007b}, {0x82d1, 0x00d6},
+{0x82d2, 0x007c}, {0x82d3, 0x00fe}, {0x82d4, 0x008f},
+{0x82d5, 0x0056}, {0x82d6, 0x00bd}, {0x82d7, 0x00f7},
+{0x82d8, 0x00b6}, {0x82d9, 0x00fe}, {0x82da, 0x008f},
+{0x82db, 0x0050}, {0x82dc, 0x00bd}, {0x82dd, 0x00ec},
+{0x82de, 0x008e}, {0x82df, 0x00bd}, {0x82e0, 0x00fa},
+{0x82e1, 0x00f7}, {0x82e2, 0x0086}, {0x82e3, 0x0011},
+{0x82e4, 0x00c6}, {0x82e5, 0x0049}, {0x82e6, 0x00bd},
+{0x82e7, 0x00e4}, {0x82e8, 0x0012}, {0x82e9, 0x00ce},
+{0x82ea, 0x0082}, {0x82eb, 0x00ef}, {0x82ec, 0x00ff},
+{0x82ed, 0x0001}, {0x82ee, 0x000f}, {0x82ef, 0x0096},
+{0x82f0, 0x0046}, {0x82f1, 0x0084}, {0x82f2, 0x000c},
+{0x82f3, 0x0081}, {0x82f4, 0x0000}, {0x82f5, 0x0027},
+{0x82f6, 0x0017}, {0x82f7, 0x00c6}, {0x82f8, 0x0049},
+{0x82f9, 0x00bd}, {0x82fa, 0x00e4}, {0x82fb, 0x0091},
+{0x82fc, 0x0024}, {0x82fd, 0x000d}, {0x82fe, 0x00b6},
+{0x82ff, 0x0012}, {0x8300, 0x0020}, {0x8301, 0x0085},
+{0x8302, 0x0020}, {0x8303, 0x0026}, {0x8304, 0x000c},
+{0x8305, 0x00ce}, {0x8306, 0x0082}, {0x8307, 0x00c1},
+{0x8308, 0x00ff}, {0x8309, 0x0001}, {0x830a, 0x000f},
+{0x830b, 0x007e}, {0x830c, 0x0084}, {0x830d, 0x0025},
+{0x830e, 0x007e}, {0x830f, 0x0084}, {0x8310, 0x0016},
+{0x8311, 0x00fe}, {0x8312, 0x008f}, {0x8313, 0x0052},
+{0x8314, 0x00bd}, {0x8315, 0x00ec}, {0x8316, 0x008e},
+{0x8317, 0x00bd}, {0x8318, 0x00fa}, {0x8319, 0x00f7},
+{0x831a, 0x0086}, {0x831b, 0x006a}, {0x831c, 0x00c6},
+{0x831d, 0x0049}, {0x831e, 0x00bd}, {0x831f, 0x00e4},
+{0x8320, 0x0012}, {0x8321, 0x00ce}, {0x8322, 0x0083},
+{0x8323, 0x0027}, {0x8324, 0x00ff}, {0x8325, 0x0001},
+{0x8326, 0x000f}, {0x8327, 0x0096}, {0x8328, 0x0046},
+{0x8329, 0x0084}, {0x832a, 0x000c}, {0x832b, 0x0081},
+{0x832c, 0x0000}, {0x832d, 0x0027}, {0x832e, 0x000a},
+{0x832f, 0x00c6}, {0x8330, 0x0049}, {0x8331, 0x00bd},
+{0x8332, 0x00e4}, {0x8333, 0x0091}, {0x8334, 0x0025},
+{0x8335, 0x0006}, {0x8336, 0x007e}, {0x8337, 0x0084},
+{0x8338, 0x0025}, {0x8339, 0x007e}, {0x833a, 0x0084},
+{0x833b, 0x0016}, {0x833c, 0x00b6}, {0x833d, 0x0018},
+{0x833e, 0x0070}, {0x833f, 0x00bb}, {0x8340, 0x0019},
+{0x8341, 0x0070}, {0x8342, 0x002a}, {0x8343, 0x0004},
+{0x8344, 0x0081}, {0x8345, 0x00af}, {0x8346, 0x002e},
+{0x8347, 0x0019}, {0x8348, 0x0096}, {0x8349, 0x007b},
+{0x834a, 0x00f6}, {0x834b, 0x0020}, {0x834c, 0x0007},
+{0x834d, 0x00fa}, {0x834e, 0x0020}, {0x834f, 0x0027},
+{0x8350, 0x00c4}, {0x8351, 0x0038}, {0x8352, 0x0081},
+{0x8353, 0x0038}, {0x8354, 0x0027}, {0x8355, 0x000b},
+{0x8356, 0x00f6}, {0x8357, 0x0020}, {0x8358, 0x0007},
+{0x8359, 0x00fa}, {0x835a, 0x0020}, {0x835b, 0x0027},
+{0x835c, 0x00cb}, {0x835d, 0x0008}, {0x835e, 0x007e},
+{0x835f, 0x0082}, {0x8360, 0x00d3}, {0x8361, 0x00bd},
+{0x8362, 0x00f7}, {0x8363, 0x0066}, {0x8364, 0x0086},
+{0x8365, 0x0074}, {0x8366, 0x00c6}, {0x8367, 0x0049},
+{0x8368, 0x00bd}, {0x8369, 0x00e4}, {0x836a, 0x0012},
+{0x836b, 0x00ce}, {0x836c, 0x0083}, {0x836d, 0x0071},
+{0x836e, 0x00ff}, {0x836f, 0x0001}, {0x8370, 0x000f},
+{0x8371, 0x0096}, {0x8372, 0x0046}, {0x8373, 0x0084},
+{0x8374, 0x000c}, {0x8375, 0x0081}, {0x8376, 0x0008},
+{0x8377, 0x0026}, {0x8378, 0x000a}, {0x8379, 0x00c6},
+{0x837a, 0x0049}, {0x837b, 0x00bd}, {0x837c, 0x00e4},
+{0x837d, 0x0091}, {0x837e, 0x0025}, {0x837f, 0x0006},
+{0x8380, 0x007e}, {0x8381, 0x0084}, {0x8382, 0x0025},
+{0x8383, 0x007e}, {0x8384, 0x0084}, {0x8385, 0x0016},
+{0x8386, 0x00bd}, {0x8387, 0x00f7}, {0x8388, 0x003e},
+{0x8389, 0x0026}, {0x838a, 0x000e}, {0x838b, 0x00bd},
+{0x838c, 0x00e5}, {0x838d, 0x0009}, {0x838e, 0x0026},
+{0x838f, 0x0006}, {0x8390, 0x00ce}, {0x8391, 0x0082},
+{0x8392, 0x00c1}, {0x8393, 0x00ff}, {0x8394, 0x0001},
+{0x8395, 0x000f}, {0x8396, 0x007e}, {0x8397, 0x0084},
+{0x8398, 0x0025}, {0x8399, 0x00fe}, {0x839a, 0x008f},
+{0x839b, 0x0054}, {0x839c, 0x00bd}, {0x839d, 0x00ec},
+{0x839e, 0x008e}, {0x839f, 0x00bd}, {0x83a0, 0x00fa},
+{0x83a1, 0x00f7}, {0x83a2, 0x00bd}, {0x83a3, 0x00f7},
+{0x83a4, 0x0033}, {0x83a5, 0x0086}, {0x83a6, 0x000f},
+{0x83a7, 0x00c6}, {0x83a8, 0x0051}, {0x83a9, 0x00bd},
+{0x83aa, 0x00e4}, {0x83ab, 0x0012}, {0x83ac, 0x00ce},
+{0x83ad, 0x0083}, {0x83ae, 0x00b2}, {0x83af, 0x00ff},
+{0x83b0, 0x0001}, {0x83b1, 0x000f}, {0x83b2, 0x0096},
+{0x83b3, 0x0046}, {0x83b4, 0x0084}, {0x83b5, 0x000c},
+{0x83b6, 0x0081}, {0x83b7, 0x0008}, {0x83b8, 0x0026},
+{0x83b9, 0x005c}, {0x83ba, 0x00b6}, {0x83bb, 0x0012},
+{0x83bc, 0x0020}, {0x83bd, 0x0084}, {0x83be, 0x003f},
+{0x83bf, 0x0081}, {0x83c0, 0x003a}, {0x83c1, 0x0027},
+{0x83c2, 0x001c}, {0x83c3, 0x0096}, {0x83c4, 0x0023},
+{0x83c5, 0x0085}, {0x83c6, 0x0040}, {0x83c7, 0x0027},
+{0x83c8, 0x0003}, {0x83c9, 0x007e}, {0x83ca, 0x0084},
+{0x83cb, 0x0025}, {0x83cc, 0x00c6}, {0x83cd, 0x0051},
+{0x83ce, 0x00bd}, {0x83cf, 0x00e4}, {0x83d0, 0x0091},
+{0x83d1, 0x0025}, {0x83d2, 0x0003}, {0x83d3, 0x007e},
+{0x83d4, 0x0084}, {0x83d5, 0x0025}, {0x83d6, 0x00ce},
+{0x83d7, 0x0082}, {0x83d8, 0x00c1}, {0x83d9, 0x00ff},
+{0x83da, 0x0001}, {0x83db, 0x000f}, {0x83dc, 0x007e},
+{0x83dd, 0x0084}, {0x83de, 0x0025}, {0x83df, 0x00bd},
+{0x83e0, 0x00f8}, {0x83e1, 0x0037}, {0x83e2, 0x007c},
+{0x83e3, 0x0000}, {0x83e4, 0x007a}, {0x83e5, 0x00ce},
+{0x83e6, 0x0083}, {0x83e7, 0x00ee}, {0x83e8, 0x00ff},
+{0x83e9, 0x0001}, {0x83ea, 0x000f}, {0x83eb, 0x007e},
+{0x83ec, 0x0084}, {0x83ed, 0x0025}, {0x83ee, 0x0096},
+{0x83ef, 0x0046}, {0x83f0, 0x0084}, {0x83f1, 0x000c},
+{0x83f2, 0x0081}, {0x83f3, 0x0008}, {0x83f4, 0x0026},
+{0x83f5, 0x0020}, {0x83f6, 0x0096}, {0x83f7, 0x0024},
+{0x83f8, 0x0084}, {0x83f9, 0x0008}, {0x83fa, 0x0026},
+{0x83fb, 0x0029}, {0x83fc, 0x00b6}, {0x83fd, 0x0018},
+{0x83fe, 0x0082}, {0x83ff, 0x00bb}, {0x8400, 0x0019},
+{0x8401, 0x0082}, {0x8402, 0x00b1}, {0x8403, 0x0001},
+{0x8404, 0x003b}, {0x8405, 0x0022}, {0x8406, 0x0009},
+{0x8407, 0x00b6}, {0x8408, 0x0012}, {0x8409, 0x0020},
+{0x840a, 0x0084}, {0x840b, 0x0037}, {0x840c, 0x0081},
+{0x840d, 0x0032}, {0x840e, 0x0027}, {0x840f, 0x0015},
+{0x8410, 0x00bd}, {0x8411, 0x00f8}, {0x8412, 0x0044},
+{0x8413, 0x007e}, {0x8414, 0x0082}, {0x8415, 0x00c1},
+{0x8416, 0x00bd}, {0x8417, 0x00f7}, {0x8418, 0x001f},
+{0x8419, 0x00bd}, {0x841a, 0x00f8}, {0x841b, 0x0044},
+{0x841c, 0x00bd}, {0x841d, 0x00fc}, {0x841e, 0x0029},
+{0x841f, 0x00ce}, {0x8420, 0x0082}, {0x8421, 0x0025},
+{0x8422, 0x00ff}, {0x8423, 0x0001}, {0x8424, 0x000f},
+{0x8425, 0x0039}, {0x8426, 0x0096}, {0x8427, 0x0047},
+{0x8428, 0x0084}, {0x8429, 0x00fc}, {0x842a, 0x008a},
+{0x842b, 0x0000}, {0x842c, 0x0097}, {0x842d, 0x0047},
+{0x842e, 0x00ce}, {0x842f, 0x0084}, {0x8430, 0x0034},
+{0x8431, 0x00ff}, {0x8432, 0x0001}, {0x8433, 0x0011},
+{0x8434, 0x0096}, {0x8435, 0x0046}, {0x8436, 0x0084},
+{0x8437, 0x0003}, {0x8438, 0x0081}, {0x8439, 0x0002},
+{0x843a, 0x0027}, {0x843b, 0x0003}, {0x843c, 0x007e},
+{0x843d, 0x0085}, {0x843e, 0x001e}, {0x843f, 0x0096},
+{0x8440, 0x0047}, {0x8441, 0x0084}, {0x8442, 0x00fc},
+{0x8443, 0x008a}, {0x8444, 0x0002}, {0x8445, 0x0097},
+{0x8446, 0x0047}, {0x8447, 0x00de}, {0x8448, 0x00e1},
+{0x8449, 0x00ad}, {0x844a, 0x0000}, {0x844b, 0x0086},
+{0x844c, 0x0001}, {0x844d, 0x00b7}, {0x844e, 0x0012},
+{0x844f, 0x0051}, {0x8450, 0x00bd}, {0x8451, 0x00f7},
+{0x8452, 0x0014}, {0x8453, 0x00b6}, {0x8454, 0x0010},
+{0x8455, 0x0031}, {0x8456, 0x0084}, {0x8457, 0x00fd},
+{0x8458, 0x00b7}, {0x8459, 0x0010}, {0x845a, 0x0031},
+{0x845b, 0x00bd}, {0x845c, 0x00f8}, {0x845d, 0x001e},
+{0x845e, 0x0096}, {0x845f, 0x0081}, {0x8460, 0x00d6},
+{0x8461, 0x0082}, {0x8462, 0x00fe}, {0x8463, 0x008f},
+{0x8464, 0x005a}, {0x8465, 0x00bd}, {0x8466, 0x00f7},
+{0x8467, 0x00b6}, {0x8468, 0x00fe}, {0x8469, 0x008f},
+{0x846a, 0x005c}, {0x846b, 0x00bd}, {0x846c, 0x00ec},
+{0x846d, 0x008e}, {0x846e, 0x00bd}, {0x846f, 0x00fa},
+{0x8470, 0x00f7}, {0x8471, 0x0086}, {0x8472, 0x0008},
+{0x8473, 0x00d6}, {0x8474, 0x0000}, {0x8475, 0x00c5},
+{0x8476, 0x0010}, {0x8477, 0x0026}, {0x8478, 0x0002},
+{0x8479, 0x008b}, {0x847a, 0x0020}, {0x847b, 0x00c6},
+{0x847c, 0x0051}, {0x847d, 0x00bd}, {0x847e, 0x00e4},
+{0x847f, 0x0012}, {0x8480, 0x00ce}, {0x8481, 0x0084},
+{0x8482, 0x0086}, {0x8483, 0x00ff}, {0x8484, 0x0001},
+{0x8485, 0x0011}, {0x8486, 0x0096}, {0x8487, 0x0046},
+{0x8488, 0x0084}, {0x8489, 0x0003}, {0x848a, 0x0081},
+{0x848b, 0x0002}, {0x848c, 0x0027}, {0x848d, 0x0003},
+{0x848e, 0x007e}, {0x848f, 0x0085}, {0x8490, 0x000f},
+{0x8491, 0x00c6}, {0x8492, 0x0051}, {0x8493, 0x00bd},
+{0x8494, 0x00e4}, {0x8495, 0x0091}, {0x8496, 0x0025},
+{0x8497, 0x0003}, {0x8498, 0x007e}, {0x8499, 0x0085},
+{0x849a, 0x001e}, {0x849b, 0x0096}, {0x849c, 0x0044},
+{0x849d, 0x0085}, {0x849e, 0x0010}, {0x849f, 0x0026},
+{0x84a0, 0x000a}, {0x84a1, 0x00b6}, {0x84a2, 0x0012},
+{0x84a3, 0x0050}, {0x84a4, 0x00ba}, {0x84a5, 0x0001},
+{0x84a6, 0x003c}, {0x84a7, 0x0085}, {0x84a8, 0x0010},
+{0x84a9, 0x0027}, {0x84aa, 0x00a8}, {0x84ab, 0x00bd},
+{0x84ac, 0x00f7}, {0x84ad, 0x0066}, {0x84ae, 0x00ce},
+{0x84af, 0x0084}, {0x84b0, 0x00b7}, {0x84b1, 0x00ff},
+{0x84b2, 0x0001}, {0x84b3, 0x0011}, {0x84b4, 0x007e},
+{0x84b5, 0x0085}, {0x84b6, 0x001e}, {0x84b7, 0x0096},
+{0x84b8, 0x0046}, {0x84b9, 0x0084}, {0x84ba, 0x0003},
+{0x84bb, 0x0081}, {0x84bc, 0x0002}, {0x84bd, 0x0026},
+{0x84be, 0x0050}, {0x84bf, 0x00b6}, {0x84c0, 0x0012},
+{0x84c1, 0x0030}, {0x84c2, 0x0084}, {0x84c3, 0x0003},
+{0x84c4, 0x0081}, {0x84c5, 0x0001}, {0x84c6, 0x0027},
+{0x84c7, 0x0003}, {0x84c8, 0x007e}, {0x84c9, 0x0085},
+{0x84ca, 0x001e}, {0x84cb, 0x0096}, {0x84cc, 0x0044},
+{0x84cd, 0x0085}, {0x84ce, 0x0010}, {0x84cf, 0x0026},
+{0x84d0, 0x0013}, {0x84d1, 0x00b6}, {0x84d2, 0x0012},
+{0x84d3, 0x0050}, {0x84d4, 0x00ba}, {0x84d5, 0x0001},
+{0x84d6, 0x003c}, {0x84d7, 0x0085}, {0x84d8, 0x0010},
+{0x84d9, 0x0026}, {0x84da, 0x0009}, {0x84db, 0x00ce},
+{0x84dc, 0x0084}, {0x84dd, 0x0053}, {0x84de, 0x00ff},
+{0x84df, 0x0001}, {0x84e0, 0x0011}, {0x84e1, 0x007e},
+{0x84e2, 0x0085}, {0x84e3, 0x001e}, {0x84e4, 0x00b6},
+{0x84e5, 0x0010}, {0x84e6, 0x0031}, {0x84e7, 0x008a},
+{0x84e8, 0x0002}, {0x84e9, 0x00b7}, {0x84ea, 0x0010},
+{0x84eb, 0x0031}, {0x84ec, 0x00bd}, {0x84ed, 0x0085},
+{0x84ee, 0x001f}, {0x84ef, 0x00bd}, {0x84f0, 0x00f8},
+{0x84f1, 0x0037}, {0x84f2, 0x007c}, {0x84f3, 0x0000},
+{0x84f4, 0x0080}, {0x84f5, 0x00ce}, {0x84f6, 0x0084},
+{0x84f7, 0x00fe}, {0x84f8, 0x00ff}, {0x84f9, 0x0001},
+{0x84fa, 0x0011}, {0x84fb, 0x007e}, {0x84fc, 0x0085},
+{0x84fd, 0x001e}, {0x84fe, 0x0096}, {0x84ff, 0x0046},
+{0x8500, 0x0084}, {0x8501, 0x0003}, {0x8502, 0x0081},
+{0x8503, 0x0002}, {0x8504, 0x0026}, {0x8505, 0x0009},
+{0x8506, 0x00b6}, {0x8507, 0x0012}, {0x8508, 0x0030},
+{0x8509, 0x0084}, {0x850a, 0x0003}, {0x850b, 0x0081},
+{0x850c, 0x0001}, {0x850d, 0x0027}, {0x850e, 0x000f},
+{0x850f, 0x00bd}, {0x8510, 0x00f8}, {0x8511, 0x0044},
+{0x8512, 0x00bd}, {0x8513, 0x00f7}, {0x8514, 0x000b},
+{0x8515, 0x00bd}, {0x8516, 0x00fc}, {0x8517, 0x0029},
+{0x8518, 0x00ce}, {0x8519, 0x0084}, {0x851a, 0x0026},
+{0x851b, 0x00ff}, {0x851c, 0x0001}, {0x851d, 0x0011},
+{0x851e, 0x0039}, {0x851f, 0x00d6}, {0x8520, 0x0022},
+{0x8521, 0x00c4}, {0x8522, 0x000f}, {0x8523, 0x00b6},
+{0x8524, 0x0012}, {0x8525, 0x0030}, {0x8526, 0x00ba},
+{0x8527, 0x0012}, {0x8528, 0x0032}, {0x8529, 0x0084},
+{0x852a, 0x0004}, {0x852b, 0x0027}, {0x852c, 0x000d},
+{0x852d, 0x0096}, {0x852e, 0x0022}, {0x852f, 0x0085},
+{0x8530, 0x0004}, {0x8531, 0x0027}, {0x8532, 0x0005},
+{0x8533, 0x00ca}, {0x8534, 0x0010}, {0x8535, 0x007e},
+{0x8536, 0x0085}, {0x8537, 0x003a}, {0x8538, 0x00ca},
+{0x8539, 0x0020}, {0x853a, 0x00d7}, {0x853b, 0x0022},
+{0x853c, 0x0039}, {0x853d, 0x0086}, {0x853e, 0x0000},
+{0x853f, 0x0097}, {0x8540, 0x0083}, {0x8541, 0x0018},
+{0x8542, 0x00ce}, {0x8543, 0x001c}, {0x8544, 0x0000},
+{0x8545, 0x00bd}, {0x8546, 0x00eb}, {0x8547, 0x0046},
+{0x8548, 0x0096}, {0x8549, 0x0057}, {0x854a, 0x0085},
+{0x854b, 0x0001}, {0x854c, 0x0027}, {0x854d, 0x0002},
+{0x854e, 0x004f}, {0x854f, 0x0039}, {0x8550, 0x0085},
+{0x8551, 0x0002}, {0x8552, 0x0027}, {0x8553, 0x0001},
+{0x8554, 0x0039}, {0x8555, 0x007f}, {0x8556, 0x008f},
+{0x8557, 0x007d}, {0x8558, 0x0086}, {0x8559, 0x0004},
+{0x855a, 0x00b7}, {0x855b, 0x0012}, {0x855c, 0x0004},
+{0x855d, 0x0086}, {0x855e, 0x0008}, {0x855f, 0x00b7},
+{0x8560, 0x0012}, {0x8561, 0x0007}, {0x8562, 0x0086},
+{0x8563, 0x0010}, {0x8564, 0x00b7}, {0x8565, 0x0012},
+{0x8566, 0x000c}, {0x8567, 0x0086}, {0x8568, 0x0007},
+{0x8569, 0x00b7}, {0x856a, 0x0012}, {0x856b, 0x0006},
+{0x856c, 0x00b6}, {0x856d, 0x008f}, {0x856e, 0x007d},
+{0x856f, 0x00b7}, {0x8570, 0x0012}, {0x8571, 0x0070},
+{0x8572, 0x0086}, {0x8573, 0x0001}, {0x8574, 0x00ba},
+{0x8575, 0x0012}, {0x8576, 0x0004}, {0x8577, 0x00b7},
+{0x8578, 0x0012}, {0x8579, 0x0004}, {0x857a, 0x0001},
+{0x857b, 0x0001}, {0x857c, 0x0001}, {0x857d, 0x0001},
+{0x857e, 0x0001}, {0x857f, 0x0001}, {0x8580, 0x00b6},
+{0x8581, 0x0012}, {0x8582, 0x0004}, {0x8583, 0x0084},
+{0x8584, 0x00fe}, {0x8585, 0x008a}, {0x8586, 0x0002},
+{0x8587, 0x00b7}, {0x8588, 0x0012}, {0x8589, 0x0004},
+{0x858a, 0x0001}, {0x858b, 0x0001}, {0x858c, 0x0001},
+{0x858d, 0x0001}, {0x858e, 0x0001}, {0x858f, 0x0001},
+{0x8590, 0x0086}, {0x8591, 0x00fd}, {0x8592, 0x00b4},
+{0x8593, 0x0012}, {0x8594, 0x0004}, {0x8595, 0x00b7},
+{0x8596, 0x0012}, {0x8597, 0x0004}, {0x8598, 0x00b6},
+{0x8599, 0x0012}, {0x859a, 0x0000}, {0x859b, 0x0084},
+{0x859c, 0x0008}, {0x859d, 0x0081}, {0x859e, 0x0008},
+{0x859f, 0x0027}, {0x85a0, 0x0016}, {0x85a1, 0x00b6},
+{0x85a2, 0x008f}, {0x85a3, 0x007d}, {0x85a4, 0x0081},
+{0x85a5, 0x000c}, {0x85a6, 0x0027}, {0x85a7, 0x0008},
+{0x85a8, 0x008b}, {0x85a9, 0x0004}, {0x85aa, 0x00b7},
+{0x85ab, 0x008f}, {0x85ac, 0x007d}, {0x85ad, 0x007e},
+{0x85ae, 0x0085}, {0x85af, 0x006c}, {0x85b0, 0x0086},
+{0x85b1, 0x0003}, {0x85b2, 0x0097}, {0x85b3, 0x0040},
+{0x85b4, 0x007e}, {0x85b5, 0x0089}, {0x85b6, 0x006e},
+{0x85b7, 0x0086}, {0x85b8, 0x0007}, {0x85b9, 0x00b7},
+{0x85ba, 0x0012}, {0x85bb, 0x0006}, {0x85bc, 0x005f},
+{0x85bd, 0x00f7}, {0x85be, 0x008f}, {0x85bf, 0x0082},
+{0x85c0, 0x005f}, {0x85c1, 0x00f7}, {0x85c2, 0x008f},
+{0x85c3, 0x007f}, {0x85c4, 0x00f7}, {0x85c5, 0x008f},
+{0x85c6, 0x0070}, {0x85c7, 0x00f7}, {0x85c8, 0x008f},
+{0x85c9, 0x0071}, {0x85ca, 0x00f7}, {0x85cb, 0x008f},
+{0x85cc, 0x0072}, {0x85cd, 0x00f7}, {0x85ce, 0x008f},
+{0x85cf, 0x0073}, {0x85d0, 0x00f7}, {0x85d1, 0x008f},
+{0x85d2, 0x0074}, {0x85d3, 0x00f7}, {0x85d4, 0x008f},
+{0x85d5, 0x0075}, {0x85d6, 0x00f7}, {0x85d7, 0x008f},
+{0x85d8, 0x0076}, {0x85d9, 0x00f7}, {0x85da, 0x008f},
+{0x85db, 0x0077}, {0x85dc, 0x00f7}, {0x85dd, 0x008f},
+{0x85de, 0x0078}, {0x85df, 0x00f7}, {0x85e0, 0x008f},
+{0x85e1, 0x0079}, {0x85e2, 0x00f7}, {0x85e3, 0x008f},
+{0x85e4, 0x007a}, {0x85e5, 0x00f7}, {0x85e6, 0x008f},
+{0x85e7, 0x007b}, {0x85e8, 0x00b6}, {0x85e9, 0x0012},
+{0x85ea, 0x0004}, {0x85eb, 0x008a}, {0x85ec, 0x0010},
+{0x85ed, 0x00b7}, {0x85ee, 0x0012}, {0x85ef, 0x0004},
+{0x85f0, 0x0086}, {0x85f1, 0x00e4}, {0x85f2, 0x00b7},
+{0x85f3, 0x0012}, {0x85f4, 0x0070}, {0x85f5, 0x00b7},
+{0x85f6, 0x0012}, {0x85f7, 0x0007}, {0x85f8, 0x00f7},
+{0x85f9, 0x0012}, {0x85fa, 0x0005}, {0x85fb, 0x00f7},
+{0x85fc, 0x0012}, {0x85fd, 0x0009}, {0x85fe, 0x0086},
+{0x85ff, 0x0008}, {0x8600, 0x00ba}, {0x8601, 0x0012},
+{0x8602, 0x0004}, {0x8603, 0x00b7}, {0x8604, 0x0012},
+{0x8605, 0x0004}, {0x8606, 0x0086}, {0x8607, 0x00f7},
+{0x8608, 0x00b4}, {0x8609, 0x0012}, {0x860a, 0x0004},
+{0x860b, 0x00b7}, {0x860c, 0x0012}, {0x860d, 0x0004},
+{0x860e, 0x0001}, {0x860f, 0x0001}, {0x8610, 0x0001},
+{0x8611, 0x0001}, {0x8612, 0x0001}, {0x8613, 0x0001},
+{0x8614, 0x00b6}, {0x8615, 0x0012}, {0x8616, 0x0008},
+{0x8617, 0x0027}, {0x8618, 0x007f}, {0x8619, 0x0081},
+{0x861a, 0x0080}, {0x861b, 0x0026}, {0x861c, 0x000b},
+{0x861d, 0x0086}, {0x861e, 0x0008}, {0x861f, 0x00ce},
+{0x8620, 0x008f}, {0x8621, 0x0079}, {0x8622, 0x00bd},
+{0x8623, 0x0089}, {0x8624, 0x007b}, {0x8625, 0x007e},
+{0x8626, 0x0086}, {0x8627, 0x008e}, {0x8628, 0x0081},
+{0x8629, 0x0040}, {0x862a, 0x0026}, {0x862b, 0x000b},
+{0x862c, 0x0086}, {0x862d, 0x0004}, {0x862e, 0x00ce},
+{0x862f, 0x008f}, {0x8630, 0x0076}, {0x8631, 0x00bd},
+{0x8632, 0x0089}, {0x8633, 0x007b}, {0x8634, 0x007e},
+{0x8635, 0x0086}, {0x8636, 0x008e}, {0x8637, 0x0081},
+{0x8638, 0x0020}, {0x8639, 0x0026}, {0x863a, 0x000b},
+{0x863b, 0x0086}, {0x863c, 0x0002}, {0x863d, 0x00ce},
+{0x863e, 0x008f}, {0x863f, 0x0073}, {0x8640, 0x00bd},
+{0x8641, 0x0089}, {0x8642, 0x007b}, {0x8643, 0x007e},
+{0x8644, 0x0086}, {0x8645, 0x008e}, {0x8646, 0x0081},
+{0x8647, 0x0010}, {0x8648, 0x0026}, {0x8649, 0x000b},
+{0x864a, 0x0086}, {0x864b, 0x0001}, {0x864c, 0x00ce},
+{0x864d, 0x008f}, {0x864e, 0x0070}, {0x864f, 0x00bd},
+{0x8650, 0x0089}, {0x8651, 0x007b}, {0x8652, 0x007e},
+{0x8653, 0x0086}, {0x8654, 0x008e}, {0x8655, 0x0081},
+{0x8656, 0x0008}, {0x8657, 0x0026}, {0x8658, 0x000b},
+{0x8659, 0x0086}, {0x865a, 0x0008}, {0x865b, 0x00ce},
+{0x865c, 0x008f}, {0x865d, 0x0079}, {0x865e, 0x00bd},
+{0x865f, 0x0089}, {0x8660, 0x007f}, {0x8661, 0x007e},
+{0x8662, 0x0086}, {0x8663, 0x008e}, {0x8664, 0x0081},
+{0x8665, 0x0004}, {0x8666, 0x0026}, {0x8667, 0x000b},
+{0x8668, 0x0086}, {0x8669, 0x0004}, {0x866a, 0x00ce},
+{0x866b, 0x008f}, {0x866c, 0x0076}, {0x866d, 0x00bd},
+{0x866e, 0x0089}, {0x866f, 0x007f}, {0x8670, 0x007e},
+{0x8671, 0x0086}, {0x8672, 0x008e}, {0x8673, 0x0081},
+{0x8674, 0x0002}, {0x8675, 0x0026}, {0x8676, 0x000b},
+{0x8677, 0x008a}, {0x8678, 0x0002}, {0x8679, 0x00ce},
+{0x867a, 0x008f}, {0x867b, 0x0073}, {0x867c, 0x00bd},
+{0x867d, 0x0089}, {0x867e, 0x007f}, {0x867f, 0x007e},
+{0x8680, 0x0086}, {0x8681, 0x008e}, {0x8682, 0x0081},
+{0x8683, 0x0001}, {0x8684, 0x0026}, {0x8685, 0x0008},
+{0x8686, 0x0086}, {0x8687, 0x0001}, {0x8688, 0x00ce},
+{0x8689, 0x008f}, {0x868a, 0x0070}, {0x868b, 0x00bd},
+{0x868c, 0x0089}, {0x868d, 0x007f}, {0x868e, 0x00b6},
+{0x868f, 0x008f}, {0x8690, 0x007f}, {0x8691, 0x0081},
+{0x8692, 0x000f}, {0x8693, 0x0026}, {0x8694, 0x0003},
+{0x8695, 0x007e}, {0x8696, 0x0087}, {0x8697, 0x0047},
+{0x8698, 0x00b6}, {0x8699, 0x0012}, {0x869a, 0x0009},
+{0x869b, 0x0084}, {0x869c, 0x0003}, {0x869d, 0x0081},
+{0x869e, 0x0003}, {0x869f, 0x0027}, {0x86a0, 0x0006},
+{0x86a1, 0x007c}, {0x86a2, 0x0012}, {0x86a3, 0x0009},
+{0x86a4, 0x007e}, {0x86a5, 0x0085}, {0x86a6, 0x00fe},
+{0x86a7, 0x00b6}, {0x86a8, 0x0012}, {0x86a9, 0x0006},
+{0x86aa, 0x0084}, {0x86ab, 0x0007}, {0x86ac, 0x0081},
+{0x86ad, 0x0007}, {0x86ae, 0x0027}, {0x86af, 0x0008},
+{0x86b0, 0x008b}, {0x86b1, 0x0001}, {0x86b2, 0x00b7},
+{0x86b3, 0x0012}, {0x86b4, 0x0006}, {0x86b5, 0x007e},
+{0x86b6, 0x0086}, {0x86b7, 0x00d5}, {0x86b8, 0x00b6},
+{0x86b9, 0x008f}, {0x86ba, 0x0082}, {0x86bb, 0x0026},
+{0x86bc, 0x000a}, {0x86bd, 0x007c}, {0x86be, 0x008f},
+{0x86bf, 0x0082}, {0x86c0, 0x004f}, {0x86c1, 0x00b7},
+{0x86c2, 0x0012}, {0x86c3, 0x0006}, {0x86c4, 0x007e},
+{0x86c5, 0x0085}, {0x86c6, 0x00c0}, {0x86c7, 0x00b6},
+{0x86c8, 0x0012}, {0x86c9, 0x0006}, {0x86ca, 0x0084},
+{0x86cb, 0x003f}, {0x86cc, 0x0081}, {0x86cd, 0x003f},
+{0x86ce, 0x0027}, {0x86cf, 0x0010}, {0x86d0, 0x008b},
+{0x86d1, 0x0008}, {0x86d2, 0x00b7}, {0x86d3, 0x0012},
+{0x86d4, 0x0006}, {0x86d5, 0x00b6}, {0x86d6, 0x0012},
+{0x86d7, 0x0009}, {0x86d8, 0x0084}, {0x86d9, 0x00fc},
+{0x86da, 0x00b7}, {0x86db, 0x0012}, {0x86dc, 0x0009},
+{0x86dd, 0x007e}, {0x86de, 0x0085}, {0x86df, 0x00fe},
+{0x86e0, 0x00ce}, {0x86e1, 0x008f}, {0x86e2, 0x0070},
+{0x86e3, 0x0018}, {0x86e4, 0x00ce}, {0x86e5, 0x008f},
+{0x86e6, 0x0084}, {0x86e7, 0x00c6}, {0x86e8, 0x000c},
+{0x86e9, 0x00bd}, {0x86ea, 0x0089}, {0x86eb, 0x006f},
+{0x86ec, 0x00ce}, {0x86ed, 0x008f}, {0x86ee, 0x0084},
+{0x86ef, 0x0018}, {0x86f0, 0x00ce}, {0x86f1, 0x008f},
+{0x86f2, 0x0070}, {0x86f3, 0x00c6}, {0x86f4, 0x000c},
+{0x86f5, 0x00bd}, {0x86f6, 0x0089}, {0x86f7, 0x006f},
+{0x86f8, 0x00d6}, {0x86f9, 0x0083}, {0x86fa, 0x00c1},
+{0x86fb, 0x004f}, {0x86fc, 0x002d}, {0x86fd, 0x0003},
+{0x86fe, 0x007e}, {0x86ff, 0x0087}, {0x8700, 0x0040},
+{0x8701, 0x00b6}, {0x8702, 0x008f}, {0x8703, 0x007f},
+{0x8704, 0x0081}, {0x8705, 0x0007}, {0x8706, 0x0027},
+{0x8707, 0x000f}, {0x8708, 0x0081}, {0x8709, 0x000b},
+{0x870a, 0x0027}, {0x870b, 0x0015}, {0x870c, 0x0081},
+{0x870d, 0x000d}, {0x870e, 0x0027}, {0x870f, 0x001b},
+{0x8710, 0x0081}, {0x8711, 0x000e}, {0x8712, 0x0027},
+{0x8713, 0x0021}, {0x8714, 0x007e}, {0x8715, 0x0087},
+{0x8716, 0x0040}, {0x8717, 0x00f7}, {0x8718, 0x008f},
+{0x8719, 0x007b}, {0x871a, 0x0086}, {0x871b, 0x0002},
+{0x871c, 0x00b7}, {0x871d, 0x008f}, {0x871e, 0x007a},
+{0x871f, 0x0020}, {0x8720, 0x001c}, {0x8721, 0x00f7},
+{0x8722, 0x008f}, {0x8723, 0x0078}, {0x8724, 0x0086},
+{0x8725, 0x0002}, {0x8726, 0x00b7}, {0x8727, 0x008f},
+{0x8728, 0x0077}, {0x8729, 0x0020}, {0x872a, 0x0012},
+{0x872b, 0x00f7}, {0x872c, 0x008f}, {0x872d, 0x0075},
+{0x872e, 0x0086}, {0x872f, 0x0002}, {0x8730, 0x00b7},
+{0x8731, 0x008f}, {0x8732, 0x0074}, {0x8733, 0x0020},
+{0x8734, 0x0008}, {0x8735, 0x00f7}, {0x8736, 0x008f},
+{0x8737, 0x0072}, {0x8738, 0x0086}, {0x8739, 0x0002},
+{0x873a, 0x00b7}, {0x873b, 0x008f}, {0x873c, 0x0071},
+{0x873d, 0x007e}, {0x873e, 0x0087}, {0x873f, 0x0047},
+{0x8740, 0x0086}, {0x8741, 0x0004}, {0x8742, 0x0097},
+{0x8743, 0x0040}, {0x8744, 0x007e}, {0x8745, 0x0089},
+{0x8746, 0x006e}, {0x8747, 0x00ce}, {0x8748, 0x008f},
+{0x8749, 0x0072}, {0x874a, 0x00bd}, {0x874b, 0x0089},
+{0x874c, 0x00f7}, {0x874d, 0x00ce}, {0x874e, 0x008f},
+{0x874f, 0x0075}, {0x8750, 0x00bd}, {0x8751, 0x0089},
+{0x8752, 0x00f7}, {0x8753, 0x00ce}, {0x8754, 0x008f},
+{0x8755, 0x0078}, {0x8756, 0x00bd}, {0x8757, 0x0089},
+{0x8758, 0x00f7}, {0x8759, 0x00ce}, {0x875a, 0x008f},
+{0x875b, 0x007b}, {0x875c, 0x00bd}, {0x875d, 0x0089},
+{0x875e, 0x00f7}, {0x875f, 0x004f}, {0x8760, 0x00b7},
+{0x8761, 0x008f}, {0x8762, 0x007d}, {0x8763, 0x00b7},
+{0x8764, 0x008f}, {0x8765, 0x0081}, {0x8766, 0x00b6},
+{0x8767, 0x008f}, {0x8768, 0x0072}, {0x8769, 0x0027},
+{0x876a, 0x0047}, {0x876b, 0x007c}, {0x876c, 0x008f},
+{0x876d, 0x007d}, {0x876e, 0x00b6}, {0x876f, 0x008f},
+{0x8770, 0x0075}, {0x8771, 0x0027}, {0x8772, 0x003f},
+{0x8773, 0x007c}, {0x8774, 0x008f}, {0x8775, 0x007d},
+{0x8776, 0x00b6}, {0x8777, 0x008f}, {0x8778, 0x0078},
+{0x8779, 0x0027}, {0x877a, 0x0037}, {0x877b, 0x007c},
+{0x877c, 0x008f}, {0x877d, 0x007d}, {0x877e, 0x00b6},
+{0x877f, 0x008f}, {0x8780, 0x007b}, {0x8781, 0x0027},
+{0x8782, 0x002f}, {0x8783, 0x007f}, {0x8784, 0x008f},
+{0x8785, 0x007d}, {0x8786, 0x007c}, {0x8787, 0x008f},
+{0x8788, 0x0081}, {0x8789, 0x007a}, {0x878a, 0x008f},
+{0x878b, 0x0072}, {0x878c, 0x0027}, {0x878d, 0x001b},
+{0x878e, 0x007c}, {0x878f, 0x008f}, {0x8790, 0x007d},
+{0x8791, 0x007a}, {0x8792, 0x008f}, {0x8793, 0x0075},
+{0x8794, 0x0027}, {0x8795, 0x0016}, {0x8796, 0x007c},
+{0x8797, 0x008f}, {0x8798, 0x007d}, {0x8799, 0x007a},
+{0x879a, 0x008f}, {0x879b, 0x0078}, {0x879c, 0x0027},
+{0x879d, 0x0011}, {0x879e, 0x007c}, {0x879f, 0x008f},
+{0x87a0, 0x007d}, {0x87a1, 0x007a}, {0x87a2, 0x008f},
+{0x87a3, 0x007b}, {0x87a4, 0x0027}, {0x87a5, 0x000c},
+{0x87a6, 0x007e}, {0x87a7, 0x0087}, {0x87a8, 0x0083},
+{0x87a9, 0x007a}, {0x87aa, 0x008f}, {0x87ab, 0x0075},
+{0x87ac, 0x007a}, {0x87ad, 0x008f}, {0x87ae, 0x0078},
+{0x87af, 0x007a}, {0x87b0, 0x008f}, {0x87b1, 0x007b},
+{0x87b2, 0x00ce}, {0x87b3, 0x00c1}, {0x87b4, 0x00fc},
+{0x87b5, 0x00f6}, {0x87b6, 0x008f}, {0x87b7, 0x007d},
+{0x87b8, 0x003a}, {0x87b9, 0x00a6}, {0x87ba, 0x0000},
+{0x87bb, 0x00b7}, {0x87bc, 0x0012}, {0x87bd, 0x0070},
+{0x87be, 0x00b6}, {0x87bf, 0x008f}, {0x87c0, 0x0072},
+{0x87c1, 0x0026}, {0x87c2, 0x0003}, {0x87c3, 0x007e},
+{0x87c4, 0x0087}, {0x87c5, 0x00fa}, {0x87c6, 0x00b6},
+{0x87c7, 0x008f}, {0x87c8, 0x0075}, {0x87c9, 0x0026},
+{0x87ca, 0x000a}, {0x87cb, 0x0018}, {0x87cc, 0x00ce},
+{0x87cd, 0x008f}, {0x87ce, 0x0073}, {0x87cf, 0x00bd},
+{0x87d0, 0x0089}, {0x87d1, 0x00d5}, {0x87d2, 0x007e},
+{0x87d3, 0x0087}, {0x87d4, 0x00fa}, {0x87d5, 0x00b6},
+{0x87d6, 0x008f}, {0x87d7, 0x0078}, {0x87d8, 0x0026},
+{0x87d9, 0x000a}, {0x87da, 0x0018}, {0x87db, 0x00ce},
+{0x87dc, 0x008f}, {0x87dd, 0x0076}, {0x87de, 0x00bd},
+{0x87df, 0x0089}, {0x87e0, 0x00d5}, {0x87e1, 0x007e},
+{0x87e2, 0x0087}, {0x87e3, 0x00fa}, {0x87e4, 0x00b6},
+{0x87e5, 0x008f}, {0x87e6, 0x007b}, {0x87e7, 0x0026},
+{0x87e8, 0x000a}, {0x87e9, 0x0018}, {0x87ea, 0x00ce},
+{0x87eb, 0x008f}, {0x87ec, 0x0079}, {0x87ed, 0x00bd},
+{0x87ee, 0x0089}, {0x87ef, 0x00d5}, {0x87f0, 0x007e},
+{0x87f1, 0x0087}, {0x87f2, 0x00fa}, {0x87f3, 0x0086},
+{0x87f4, 0x0005}, {0x87f5, 0x0097}, {0x87f6, 0x0040},
+{0x87f7, 0x007e}, {0x87f8, 0x0089}, {0x87f9, 0x006e},
+{0x87fa, 0x00b6}, {0x87fb, 0x008f}, {0x87fc, 0x0075},
+{0x87fd, 0x0081}, {0x87fe, 0x0007}, {0x87ff, 0x002e},
+{0x8800, 0x00f2}, {0x8801, 0x00f6}, {0x8802, 0x0012},
+{0x8803, 0x0006}, {0x8804, 0x00c4}, {0x8805, 0x00f8},
+{0x8806, 0x001b}, {0x8807, 0x00b7}, {0x8808, 0x0012},
+{0x8809, 0x0006}, {0x880a, 0x00b6}, {0x880b, 0x008f},
+{0x880c, 0x0078}, {0x880d, 0x0081}, {0x880e, 0x0007},
+{0x880f, 0x002e}, {0x8810, 0x00e2}, {0x8811, 0x0048},
+{0x8812, 0x0048}, {0x8813, 0x0048}, {0x8814, 0x00f6},
+{0x8815, 0x0012}, {0x8816, 0x0006}, {0x8817, 0x00c4},
+{0x8818, 0x00c7}, {0x8819, 0x001b}, {0x881a, 0x00b7},
+{0x881b, 0x0012}, {0x881c, 0x0006}, {0x881d, 0x00b6},
+{0x881e, 0x008f}, {0x881f, 0x007b}, {0x8820, 0x0081},
+{0x8821, 0x0007}, {0x8822, 0x002e}, {0x8823, 0x00cf},
+{0x8824, 0x00f6}, {0x8825, 0x0012}, {0x8826, 0x0005},
+{0x8827, 0x00c4}, {0x8828, 0x00f8}, {0x8829, 0x001b},
+{0x882a, 0x00b7}, {0x882b, 0x0012}, {0x882c, 0x0005},
+{0x882d, 0x0086}, {0x882e, 0x0000}, {0x882f, 0x00f6},
+{0x8830, 0x008f}, {0x8831, 0x0071}, {0x8832, 0x00bd},
+{0x8833, 0x0089}, {0x8834, 0x0094}, {0x8835, 0x0086},
+{0x8836, 0x0001}, {0x8837, 0x00f6}, {0x8838, 0x008f},
+{0x8839, 0x0074}, {0x883a, 0x00bd}, {0x883b, 0x0089},
+{0x883c, 0x0094}, {0x883d, 0x0086}, {0x883e, 0x0002},
+{0x883f, 0x00f6}, {0x8840, 0x008f}, {0x8841, 0x0077},
+{0x8842, 0x00bd}, {0x8843, 0x0089}, {0x8844, 0x0094},
+{0x8845, 0x0086}, {0x8846, 0x0003}, {0x8847, 0x00f6},
+{0x8848, 0x008f}, {0x8849, 0x007a}, {0x884a, 0x00bd},
+{0x884b, 0x0089}, {0x884c, 0x0094}, {0x884d, 0x00ce},
+{0x884e, 0x008f}, {0x884f, 0x0070}, {0x8850, 0x00a6},
+{0x8851, 0x0001}, {0x8852, 0x0081}, {0x8853, 0x0001},
+{0x8854, 0x0027}, {0x8855, 0x0007}, {0x8856, 0x0081},
+{0x8857, 0x0003}, {0x8858, 0x0027}, {0x8859, 0x0003},
+{0x885a, 0x007e}, {0x885b, 0x0088}, {0x885c, 0x0066},
+{0x885d, 0x00a6}, {0x885e, 0x0000}, {0x885f, 0x00b8},
+{0x8860, 0x008f}, {0x8861, 0x0081}, {0x8862, 0x0084},
+{0x8863, 0x0001}, {0x8864, 0x0026}, {0x8865, 0x000b},
+{0x8866, 0x008c}, {0x8867, 0x008f}, {0x8868, 0x0079},
+{0x8869, 0x002c}, {0x886a, 0x000e}, {0x886b, 0x0008},
+{0x886c, 0x0008}, {0x886d, 0x0008}, {0x886e, 0x007e},
+{0x886f, 0x0088}, {0x8870, 0x0050}, {0x8871, 0x00b6},
+{0x8872, 0x0012}, {0x8873, 0x0004}, {0x8874, 0x008a},
+{0x8875, 0x0040}, {0x8876, 0x00b7}, {0x8877, 0x0012},
+{0x8878, 0x0004}, {0x8879, 0x00b6}, {0x887a, 0x0012},
+{0x887b, 0x0004}, {0x887c, 0x0084}, {0x887d, 0x00fb},
+{0x887e, 0x0084}, {0x887f, 0x00ef}, {0x8880, 0x00b7},
+{0x8881, 0x0012}, {0x8882, 0x0004}, {0x8883, 0x00b6},
+{0x8884, 0x0012}, {0x8885, 0x0007}, {0x8886, 0x0036},
+{0x8887, 0x00b6}, {0x8888, 0x008f}, {0x8889, 0x007c},
+{0x888a, 0x0048}, {0x888b, 0x0048}, {0x888c, 0x00b7},
+{0x888d, 0x0012}, {0x888e, 0x0007}, {0x888f, 0x0086},
+{0x8890, 0x0001}, {0x8891, 0x00ba}, {0x8892, 0x0012},
+{0x8893, 0x0004}, {0x8894, 0x00b7}, {0x8895, 0x0012},
+{0x8896, 0x0004}, {0x8897, 0x0001}, {0x8898, 0x0001},
+{0x8899, 0x0001}, {0x889a, 0x0001}, {0x889b, 0x0001},
+{0x889c, 0x0001}, {0x889d, 0x0086}, {0x889e, 0x00fe},
+{0x889f, 0x00b4}, {0x88a0, 0x0012}, {0x88a1, 0x0004},
+{0x88a2, 0x00b7}, {0x88a3, 0x0012}, {0x88a4, 0x0004},
+{0x88a5, 0x0086}, {0x88a6, 0x0002}, {0x88a7, 0x00ba},
+{0x88a8, 0x0012}, {0x88a9, 0x0004}, {0x88aa, 0x00b7},
+{0x88ab, 0x0012}, {0x88ac, 0x0004}, {0x88ad, 0x0086},
+{0x88ae, 0x00fd}, {0x88af, 0x00b4}, {0x88b0, 0x0012},
+{0x88b1, 0x0004}, {0x88b2, 0x00b7}, {0x88b3, 0x0012},
+{0x88b4, 0x0004}, {0x88b5, 0x0032}, {0x88b6, 0x00b7},
+{0x88b7, 0x0012}, {0x88b8, 0x0007}, {0x88b9, 0x00b6},
+{0x88ba, 0x0012}, {0x88bb, 0x0000}, {0x88bc, 0x0084},
+{0x88bd, 0x0008}, {0x88be, 0x0081}, {0x88bf, 0x0008},
+{0x88c0, 0x0027}, {0x88c1, 0x000f}, {0x88c2, 0x007c},
+{0x88c3, 0x0082}, {0x88c4, 0x0008}, {0x88c5, 0x0026},
+{0x88c6, 0x0007}, {0x88c7, 0x0086}, {0x88c8, 0x0076},
+{0x88c9, 0x0097}, {0x88ca, 0x0040}, {0x88cb, 0x007e},
+{0x88cc, 0x0089}, {0x88cd, 0x006e}, {0x88ce, 0x007e},
+{0x88cf, 0x0086}, {0x88d0, 0x00ec}, {0x88d1, 0x00b6},
+{0x88d2, 0x008f}, {0x88d3, 0x007f}, {0x88d4, 0x0081},
+{0x88d5, 0x000f}, {0x88d6, 0x0027}, {0x88d7, 0x003c},
+{0x88d8, 0x00bd}, {0x88d9, 0x00e6}, {0x88da, 0x00c7},
+{0x88db, 0x00b7}, {0x88dc, 0x0012}, {0x88dd, 0x000d},
+{0x88de, 0x00bd}, {0x88df, 0x00e6}, {0x88e0, 0x00cb},
+{0x88e1, 0x00b6}, {0x88e2, 0x0012}, {0x88e3, 0x0004},
+{0x88e4, 0x008a}, {0x88e5, 0x0020}, {0x88e6, 0x00b7},
+{0x88e7, 0x0012}, {0x88e8, 0x0004}, {0x88e9, 0x00ce},
+{0x88ea, 0x00ff}, {0x88eb, 0x00ff}, {0x88ec, 0x00b6},
+{0x88ed, 0x0012}, {0x88ee, 0x0000}, {0x88ef, 0x0081},
+{0x88f0, 0x000c}, {0x88f1, 0x0026}, {0x88f2, 0x0005},
+{0x88f3, 0x0009}, {0x88f4, 0x0026}, {0x88f5, 0x00f6},
+{0x88f6, 0x0027}, {0x88f7, 0x001c}, {0x88f8, 0x00b6},
+{0x88f9, 0x0012}, {0x88fa, 0x0004}, {0x88fb, 0x0084},
+{0x88fc, 0x00df}, {0x88fd, 0x00b7}, {0x88fe, 0x0012},
+{0x88ff, 0x0004}, {0x8900, 0x0096}, {0x8901, 0x0083},
+{0x8902, 0x0081}, {0x8903, 0x0007}, {0x8904, 0x002c},
+{0x8905, 0x0005}, {0x8906, 0x007c}, {0x8907, 0x0000},
+{0x8908, 0x0083}, {0x8909, 0x0020}, {0x890a, 0x0006},
+{0x890b, 0x0096}, {0x890c, 0x0083}, {0x890d, 0x008b},
+{0x890e, 0x0008}, {0x890f, 0x0097}, {0x8910, 0x0083},
+{0x8911, 0x007e}, {0x8912, 0x0085}, {0x8913, 0x0041},
+{0x8914, 0x007f}, {0x8915, 0x008f}, {0x8916, 0x007e},
+{0x8917, 0x0086}, {0x8918, 0x0080}, {0x8919, 0x00b7},
+{0x891a, 0x0012}, {0x891b, 0x000c}, {0x891c, 0x0086},
+{0x891d, 0x0001}, {0x891e, 0x00b7}, {0x891f, 0x008f},
+{0x8920, 0x007d}, {0x8921, 0x00b6}, {0x8922, 0x0012},
+{0x8923, 0x000c}, {0x8924, 0x0084}, {0x8925, 0x007f},
+{0x8926, 0x00b7}, {0x8927, 0x0012}, {0x8928, 0x000c},
+{0x8929, 0x008a}, {0x892a, 0x0080}, {0x892b, 0x00b7},
+{0x892c, 0x0012}, {0x892d, 0x000c}, {0x892e, 0x0086},
+{0x892f, 0x000a}, {0x8930, 0x00bd}, {0x8931, 0x008a},
+{0x8932, 0x0006}, {0x8933, 0x00b6}, {0x8934, 0x0012},
+{0x8935, 0x000a}, {0x8936, 0x002a}, {0x8937, 0x0009},
+{0x8938, 0x00b6}, {0x8939, 0x0012}, {0x893a, 0x000c},
+{0x893b, 0x00ba}, {0x893c, 0x008f}, {0x893d, 0x007d},
+{0x893e, 0x00b7}, {0x893f, 0x0012}, {0x8940, 0x000c},
+{0x8941, 0x00b6}, {0x8942, 0x008f}, {0x8943, 0x007e},
+{0x8944, 0x0081}, {0x8945, 0x0060}, {0x8946, 0x0027},
+{0x8947, 0x001a}, {0x8948, 0x008b}, {0x8949, 0x0020},
+{0x894a, 0x00b7}, {0x894b, 0x008f}, {0x894c, 0x007e},
+{0x894d, 0x00b6}, {0x894e, 0x0012}, {0x894f, 0x000c},
+{0x8950, 0x0084}, {0x8951, 0x009f}, {0x8952, 0x00ba},
+{0x8953, 0x008f}, {0x8954, 0x007e}, {0x8955, 0x00b7},
+{0x8956, 0x0012}, {0x8957, 0x000c}, {0x8958, 0x00b6},
+{0x8959, 0x008f}, {0x895a, 0x007d}, {0x895b, 0x0048},
+{0x895c, 0x00b7}, {0x895d, 0x008f}, {0x895e, 0x007d},
+{0x895f, 0x007e}, {0x8960, 0x0089}, {0x8961, 0x0021},
+{0x8962, 0x00b6}, {0x8963, 0x0012}, {0x8964, 0x0004},
+{0x8965, 0x008a}, {0x8966, 0x0020}, {0x8967, 0x00b7},
+{0x8968, 0x0012}, {0x8969, 0x0004}, {0x896a, 0x00bd},
+{0x896b, 0x008a}, {0x896c, 0x000a}, {0x896d, 0x004f},
+{0x896e, 0x0039}, {0x896f, 0x00a6}, {0x8970, 0x0000},
+{0x8971, 0x0018}, {0x8972, 0x00a7}, {0x8973, 0x0000},
+{0x8974, 0x0008}, {0x8975, 0x0018}, {0x8976, 0x0008},
+{0x8977, 0x005a}, {0x8978, 0x0026}, {0x8979, 0x00f5},
+{0x897a, 0x0039}, {0x897b, 0x0036}, {0x897c, 0x006c},
+{0x897d, 0x0000}, {0x897e, 0x0032}, {0x897f, 0x00ba},
+{0x8980, 0x008f}, {0x8981, 0x007f}, {0x8982, 0x00b7},
+{0x8983, 0x008f}, {0x8984, 0x007f}, {0x8985, 0x00b6},
+{0x8986, 0x0012}, {0x8987, 0x0009}, {0x8988, 0x0084},
+{0x8989, 0x0003}, {0x898a, 0x00a7}, {0x898b, 0x0001},
+{0x898c, 0x00b6}, {0x898d, 0x0012}, {0x898e, 0x0006},
+{0x898f, 0x0084}, {0x8990, 0x003f}, {0x8991, 0x00a7},
+{0x8992, 0x0002}, {0x8993, 0x0039}, {0x8994, 0x0036},
+{0x8995, 0x0086}, {0x8996, 0x0003}, {0x8997, 0x00b7},
+{0x8998, 0x008f}, {0x8999, 0x0080}, {0x899a, 0x0032},
+{0x899b, 0x00c1}, {0x899c, 0x0000}, {0x899d, 0x0026},
+{0x899e, 0x0006}, {0x899f, 0x00b7}, {0x89a0, 0x008f},
+{0x89a1, 0x007c}, {0x89a2, 0x007e}, {0x89a3, 0x0089},
+{0x89a4, 0x00c9}, {0x89a5, 0x00c1}, {0x89a6, 0x0001},
+{0x89a7, 0x0027}, {0x89a8, 0x0018}, {0x89a9, 0x00c1},
+{0x89aa, 0x0002}, {0x89ab, 0x0027}, {0x89ac, 0x000c},
+{0x89ad, 0x00c1}, {0x89ae, 0x0003}, {0x89af, 0x0027},
+{0x89b0, 0x0000}, {0x89b1, 0x00f6}, {0x89b2, 0x008f},
+{0x89b3, 0x0080}, {0x89b4, 0x0005}, {0x89b5, 0x0005},
+{0x89b6, 0x00f7}, {0x89b7, 0x008f}, {0x89b8, 0x0080},
+{0x89b9, 0x00f6}, {0x89ba, 0x008f}, {0x89bb, 0x0080},
+{0x89bc, 0x0005}, {0x89bd, 0x0005}, {0x89be, 0x00f7},
+{0x89bf, 0x008f}, {0x89c0, 0x0080}, {0x89c1, 0x00f6},
+{0x89c2, 0x008f}, {0x89c3, 0x0080}, {0x89c4, 0x0005},
+{0x89c5, 0x0005}, {0x89c6, 0x00f7}, {0x89c7, 0x008f},
+{0x89c8, 0x0080}, {0x89c9, 0x00f6}, {0x89ca, 0x008f},
+{0x89cb, 0x0080}, {0x89cc, 0x0053}, {0x89cd, 0x00f4},
+{0x89ce, 0x0012}, {0x89cf, 0x0007}, {0x89d0, 0x001b},
+{0x89d1, 0x00b7}, {0x89d2, 0x0012}, {0x89d3, 0x0007},
+{0x89d4, 0x0039}, {0x89d5, 0x00ce}, {0x89d6, 0x008f},
+{0x89d7, 0x0070}, {0x89d8, 0x00a6}, {0x89d9, 0x0000},
+{0x89da, 0x0018}, {0x89db, 0x00e6}, {0x89dc, 0x0000},
+{0x89dd, 0x0018}, {0x89de, 0x00a7}, {0x89df, 0x0000},
+{0x89e0, 0x00e7}, {0x89e1, 0x0000}, {0x89e2, 0x00a6},
+{0x89e3, 0x0001}, {0x89e4, 0x0018}, {0x89e5, 0x00e6},
+{0x89e6, 0x0001}, {0x89e7, 0x0018}, {0x89e8, 0x00a7},
+{0x89e9, 0x0001}, {0x89ea, 0x00e7}, {0x89eb, 0x0001},
+{0x89ec, 0x00a6}, {0x89ed, 0x0002}, {0x89ee, 0x0018},
+{0x89ef, 0x00e6}, {0x89f0, 0x0002}, {0x89f1, 0x0018},
+{0x89f2, 0x00a7}, {0x89f3, 0x0002}, {0x89f4, 0x00e7},
+{0x89f5, 0x0002}, {0x89f6, 0x0039}, {0x89f7, 0x00a6},
+{0x89f8, 0x0000}, {0x89f9, 0x0084}, {0x89fa, 0x0007},
+{0x89fb, 0x00e6}, {0x89fc, 0x0000}, {0x89fd, 0x00c4},
+{0x89fe, 0x0038}, {0x89ff, 0x0054}, {0x8a00, 0x0054},
+{0x8a01, 0x0054}, {0x8a02, 0x001b}, {0x8a03, 0x00a7},
+{0x8a04, 0x0000}, {0x8a05, 0x0039}, {0x8a06, 0x004a},
+{0x8a07, 0x0026}, {0x8a08, 0x00fd}, {0x8a09, 0x0039},
+{0x8a0a, 0x0096}, {0x8a0b, 0x0022}, {0x8a0c, 0x0084},
+{0x8a0d, 0x000f}, {0x8a0e, 0x0097}, {0x8a0f, 0x0022},
+{0x8a10, 0x0086}, {0x8a11, 0x0001}, {0x8a12, 0x00b7},
+{0x8a13, 0x008f}, {0x8a14, 0x0070}, {0x8a15, 0x00b6},
+{0x8a16, 0x0012}, {0x8a17, 0x0007}, {0x8a18, 0x00b7},
+{0x8a19, 0x008f}, {0x8a1a, 0x0071}, {0x8a1b, 0x00f6},
+{0x8a1c, 0x0012}, {0x8a1d, 0x000c}, {0x8a1e, 0x00c4},
+{0x8a1f, 0x000f}, {0x8a20, 0x00c8}, {0x8a21, 0x000f},
+{0x8a22, 0x00f7}, {0x8a23, 0x008f}, {0x8a24, 0x0072},
+{0x8a25, 0x00f6}, {0x8a26, 0x008f}, {0x8a27, 0x0072},
+{0x8a28, 0x00b6}, {0x8a29, 0x008f}, {0x8a2a, 0x0071},
+{0x8a2b, 0x0084}, {0x8a2c, 0x0003}, {0x8a2d, 0x0027},
+{0x8a2e, 0x0014}, {0x8a2f, 0x0081}, {0x8a30, 0x0001},
+{0x8a31, 0x0027}, {0x8a32, 0x001c}, {0x8a33, 0x0081},
+{0x8a34, 0x0002}, {0x8a35, 0x0027}, {0x8a36, 0x0024},
+{0x8a37, 0x00f4}, {0x8a38, 0x008f}, {0x8a39, 0x0070},
+{0x8a3a, 0x0027}, {0x8a3b, 0x002a}, {0x8a3c, 0x0096},
+{0x8a3d, 0x0022}, {0x8a3e, 0x008a}, {0x8a3f, 0x0080},
+{0x8a40, 0x007e}, {0x8a41, 0x008a}, {0x8a42, 0x0064},
+{0x8a43, 0x00f4}, {0x8a44, 0x008f}, {0x8a45, 0x0070},
+{0x8a46, 0x0027}, {0x8a47, 0x001e}, {0x8a48, 0x0096},
+{0x8a49, 0x0022}, {0x8a4a, 0x008a}, {0x8a4b, 0x0010},
+{0x8a4c, 0x007e}, {0x8a4d, 0x008a}, {0x8a4e, 0x0064},
+{0x8a4f, 0x00f4}, {0x8a50, 0x008f}, {0x8a51, 0x0070},
+{0x8a52, 0x0027}, {0x8a53, 0x0012}, {0x8a54, 0x0096},
+{0x8a55, 0x0022}, {0x8a56, 0x008a}, {0x8a57, 0x0020},
+{0x8a58, 0x007e}, {0x8a59, 0x008a}, {0x8a5a, 0x0064},
+{0x8a5b, 0x00f4}, {0x8a5c, 0x008f}, {0x8a5d, 0x0070},
+{0x8a5e, 0x0027}, {0x8a5f, 0x0006}, {0x8a60, 0x0096},
+{0x8a61, 0x0022}, {0x8a62, 0x008a}, {0x8a63, 0x0040},
+{0x8a64, 0x0097}, {0x8a65, 0x0022}, {0x8a66, 0x0074},
+{0x8a67, 0x008f}, {0x8a68, 0x0071}, {0x8a69, 0x0074},
+{0x8a6a, 0x008f}, {0x8a6b, 0x0071}, {0x8a6c, 0x0078},
+{0x8a6d, 0x008f}, {0x8a6e, 0x0070}, {0x8a6f, 0x00b6},
+{0x8a70, 0x008f}, {0x8a71, 0x0070}, {0x8a72, 0x0085},
+{0x8a73, 0x0010}, {0x8a74, 0x0027}, {0x8a75, 0x00af},
+{0x8a76, 0x00d6}, {0x8a77, 0x0022}, {0x8a78, 0x00c4},
+{0x8a79, 0x0010}, {0x8a7a, 0x0058}, {0x8a7b, 0x00b6},
+{0x8a7c, 0x0012}, {0x8a7d, 0x0070}, {0x8a7e, 0x0081},
+{0x8a7f, 0x00e4}, {0x8a80, 0x0027}, {0x8a81, 0x0036},
+{0x8a82, 0x0081}, {0x8a83, 0x00e1}, {0x8a84, 0x0026},
+{0x8a85, 0x000c}, {0x8a86, 0x0096}, {0x8a87, 0x0022},
+{0x8a88, 0x0084}, {0x8a89, 0x0020}, {0x8a8a, 0x0044},
+{0x8a8b, 0x001b}, {0x8a8c, 0x00d6}, {0x8a8d, 0x0022},
+{0x8a8e, 0x00c4}, {0x8a8f, 0x00cf}, {0x8a90, 0x0020},
+{0x8a91, 0x0023}, {0x8a92, 0x0058}, {0x8a93, 0x0081},
+{0x8a94, 0x00c6}, {0x8a95, 0x0026}, {0x8a96, 0x000d},
+{0x8a97, 0x0096}, {0x8a98, 0x0022}, {0x8a99, 0x0084},
+{0x8a9a, 0x0040}, {0x8a9b, 0x0044}, {0x8a9c, 0x0044},
+{0x8a9d, 0x001b}, {0x8a9e, 0x00d6}, {0x8a9f, 0x0022},
+{0x8aa0, 0x00c4}, {0x8aa1, 0x00af}, {0x8aa2, 0x0020},
+{0x8aa3, 0x0011}, {0x8aa4, 0x0058}, {0x8aa5, 0x0081},
+{0x8aa6, 0x0027}, {0x8aa7, 0x0026}, {0x8aa8, 0x000f},
+{0x8aa9, 0x0096}, {0x8aaa, 0x0022}, {0x8aab, 0x0084},
+{0x8aac, 0x0080}, {0x8aad, 0x0044}, {0x8aae, 0x0044},
+{0x8aaf, 0x0044}, {0x8ab0, 0x001b}, {0x8ab1, 0x00d6},
+{0x8ab2, 0x0022}, {0x8ab3, 0x00c4}, {0x8ab4, 0x006f},
+{0x8ab5, 0x001b}, {0x8ab6, 0x0097}, {0x8ab7, 0x0022},
+{0x8ab8, 0x0039}, {0x8ab9, 0x0027}, {0x8aba, 0x000c},
+{0x8abb, 0x007c}, {0x8abc, 0x0082}, {0x8abd, 0x0006},
+{0x8abe, 0x00bd}, {0x8abf, 0x00d9}, {0x8ac0, 0x00ed},
+{0x8ac1, 0x00b6}, {0x8ac2, 0x0082}, {0x8ac3, 0x0007},
+{0x8ac4, 0x007e}, {0x8ac5, 0x008a}, {0x8ac6, 0x00b9},
+{0x8ac7, 0x007f}, {0x8ac8, 0x0082}, {0x8ac9, 0x0006},
+{0x8aca, 0x0039}, { 0x0, 0x0 }
+};
+#endif
+
+
+/* phy types */
+#define CAS_PHY_UNKNOWN 0x00
+#define CAS_PHY_SERDES 0x01
+#define CAS_PHY_MII_MDIO0 0x02
+#define CAS_PHY_MII_MDIO1 0x04
+#define CAS_PHY_MII(x) ((x) & (CAS_PHY_MII_MDIO0 | CAS_PHY_MII_MDIO1))
+
+/* _RING_INDEX is the index for the ring sizes to be used. _RING_SIZE
+ * is the actual size. the default index for the various rings is
+ * 8. NOTE: there a bunch of alignment constraints for the rings. to
+ * deal with that, i just allocate rings to create the desired
+ * alignment. here are the constraints:
+ * RX DESC and COMP rings must be 8KB aligned
+ * TX DESC must be 2KB aligned.
+ * if you change the numbers, be cognizant of how the alignment will change
+ * in INIT_BLOCK as well.
+ */
+
+#define DESC_RING_I_TO_S(x) (32*(1 << (x)))
+#define COMP_RING_I_TO_S(x) (128*(1 << (x)))
+#define TX_DESC_RING_INDEX 4 /* 512 = 8k */
+#define RX_DESC_RING_INDEX 4 /* 512 = 8k */
+#define RX_COMP_RING_INDEX 4 /* 2048 = 64k: should be 4x rx ring size */
+
+#if (TX_DESC_RING_INDEX > 8) || (TX_DESC_RING_INDEX < 0)
+#error TX_DESC_RING_INDEX must be between 0 and 8
+#endif
+
+#if (RX_DESC_RING_INDEX > 8) || (RX_DESC_RING_INDEX < 0)
+#error RX_DESC_RING_INDEX must be between 0 and 8
+#endif
+
+#if (RX_COMP_RING_INDEX > 8) || (RX_COMP_RING_INDEX < 0)
+#error RX_COMP_RING_INDEX must be between 0 and 8
+#endif
+
+#define N_TX_RINGS MAX_TX_RINGS /* for QoS */
+#define N_TX_RINGS_MASK MAX_TX_RINGS_MASK
+#define N_RX_DESC_RINGS MAX_RX_DESC_RINGS /* 1 for ipsec */
+#define N_RX_COMP_RINGS 0x1 /* for mult. PCI interrupts */
+
+/* number of flows that can go through re-assembly */
+#define N_RX_FLOWS 64
+
+#define TX_DESC_RING_SIZE DESC_RING_I_TO_S(TX_DESC_RING_INDEX)
+#define RX_DESC_RING_SIZE DESC_RING_I_TO_S(RX_DESC_RING_INDEX)
+#define RX_COMP_RING_SIZE COMP_RING_I_TO_S(RX_COMP_RING_INDEX)
+#define TX_DESC_RINGN_INDEX(x) TX_DESC_RING_INDEX
+#define RX_DESC_RINGN_INDEX(x) RX_DESC_RING_INDEX
+#define RX_COMP_RINGN_INDEX(x) RX_COMP_RING_INDEX
+#define TX_DESC_RINGN_SIZE(x) TX_DESC_RING_SIZE
+#define RX_DESC_RINGN_SIZE(x) RX_DESC_RING_SIZE
+#define RX_COMP_RINGN_SIZE(x) RX_COMP_RING_SIZE
+
+/* convert values */
+#define CAS_BASE(x, y) (((y) << (x ## _SHIFT)) & (x ## _MASK))
+#define CAS_VAL(x, y) (((y) & (x ## _MASK)) >> (x ## _SHIFT))
+#define CAS_TX_RINGN_BASE(y) ((TX_DESC_RINGN_INDEX(y) << \
+ TX_CFG_DESC_RINGN_SHIFT(y)) & \
+ TX_CFG_DESC_RINGN_MASK(y))
+
+/* min is 2k, but we can't do jumbo frames unless it's at least 8k */
+#define CAS_MIN_PAGE_SHIFT 11 /* 2048 */
+#define CAS_JUMBO_PAGE_SHIFT 13 /* 8192 */
+#define CAS_MAX_PAGE_SHIFT 14 /* 16384 */
+
+#define TX_DESC_BUFLEN_MASK 0x0000000000003FFFULL /* buffer length in
+ bytes. 0 - 9256 */
+#define TX_DESC_BUFLEN_SHIFT 0
+#define TX_DESC_CSUM_START_MASK 0x00000000001F8000ULL /* checksum start. #
+ of bytes to be
+ skipped before
+ csum calc begins.
+ value must be
+ even */
+#define TX_DESC_CSUM_START_SHIFT 15
+#define TX_DESC_CSUM_STUFF_MASK 0x000000001FE00000ULL /* checksum stuff.
+ byte offset w/in
+ the pkt for the
+ 1st csum byte.
+ must be > 8 */
+#define TX_DESC_CSUM_STUFF_SHIFT 21
+#define TX_DESC_CSUM_EN 0x0000000020000000ULL /* enable checksum */
+#define TX_DESC_EOF 0x0000000040000000ULL /* end of frame */
+#define TX_DESC_SOF 0x0000000080000000ULL /* start of frame */
+#define TX_DESC_INTME 0x0000000100000000ULL /* interrupt me */
+#define TX_DESC_NO_CRC 0x0000000200000000ULL /* debugging only.
+ CRC will not be
+ inserted into
+ outgoing frame. */
+struct cas_tx_desc {
+ u64 control;
+ u64 buffer;
+};
+
+/* descriptor ring for free buffers contains page-sized buffers. the index
+ * value is not used by the hw in any way. it's just stored and returned in
+ * the completion ring.
+ */
+struct cas_rx_desc {
+ u64 index;
+ u64 buffer;
+};
+
+/* received packets are put on the completion ring. */
+/* word 1 */
+#define RX_COMP1_DATA_SIZE_MASK 0x0000000007FFE000ULL
+#define RX_COMP1_DATA_SIZE_SHIFT 13
+#define RX_COMP1_DATA_OFF_MASK 0x000001FFF8000000ULL
+#define RX_COMP1_DATA_OFF_SHIFT 27
+#define RX_COMP1_DATA_INDEX_MASK 0x007FFE0000000000ULL
+#define RX_COMP1_DATA_INDEX_SHIFT 41
+#define RX_COMP1_SKIP_MASK 0x0180000000000000ULL
+#define RX_COMP1_SKIP_SHIFT 55
+#define RX_COMP1_RELEASE_NEXT 0x0200000000000000ULL
+#define RX_COMP1_SPLIT_PKT 0x0400000000000000ULL
+#define RX_COMP1_RELEASE_FLOW 0x0800000000000000ULL
+#define RX_COMP1_RELEASE_DATA 0x1000000000000000ULL
+#define RX_COMP1_RELEASE_HDR 0x2000000000000000ULL
+#define RX_COMP1_TYPE_MASK 0xC000000000000000ULL
+#define RX_COMP1_TYPE_SHIFT 62
+
+/* word 2 */
+#define RX_COMP2_NEXT_INDEX_MASK 0x00000007FFE00000ULL
+#define RX_COMP2_NEXT_INDEX_SHIFT 21
+#define RX_COMP2_HDR_SIZE_MASK 0x00000FF800000000ULL
+#define RX_COMP2_HDR_SIZE_SHIFT 35
+#define RX_COMP2_HDR_OFF_MASK 0x0003F00000000000ULL
+#define RX_COMP2_HDR_OFF_SHIFT 44
+#define RX_COMP2_HDR_INDEX_MASK 0xFFFC000000000000ULL
+#define RX_COMP2_HDR_INDEX_SHIFT 50
+
+/* word 3 */
+#define RX_COMP3_SMALL_PKT 0x0000000000000001ULL
+#define RX_COMP3_JUMBO_PKT 0x0000000000000002ULL
+#define RX_COMP3_JUMBO_HDR_SPLIT_EN 0x0000000000000004ULL
+#define RX_COMP3_CSUM_START_MASK 0x000000000007F000ULL
+#define RX_COMP3_CSUM_START_SHIFT 12
+#define RX_COMP3_FLOWID_MASK 0x0000000001F80000ULL
+#define RX_COMP3_FLOWID_SHIFT 19
+#define RX_COMP3_OPCODE_MASK 0x000000000E000000ULL
+#define RX_COMP3_OPCODE_SHIFT 25
+#define RX_COMP3_FORCE_FLAG 0x0000000010000000ULL
+#define RX_COMP3_NO_ASSIST 0x0000000020000000ULL
+#define RX_COMP3_LOAD_BAL_MASK 0x000001F800000000ULL
+#define RX_COMP3_LOAD_BAL_SHIFT 35
+#define RX_PLUS_COMP3_ENC_PKT 0x0000020000000000ULL /* cas+ */
+#define RX_COMP3_L3_HEAD_OFF_MASK 0x0000FE0000000000ULL /* cas */
+#define RX_COMP3_L3_HEAD_OFF_SHIFT 41
+#define RX_PLUS_COMP_L3_HEAD_OFF_MASK 0x0000FC0000000000ULL /* cas+ */
+#define RX_PLUS_COMP_L3_HEAD_OFF_SHIFT 42
+#define RX_COMP3_SAP_MASK 0xFFFF000000000000ULL
+#define RX_COMP3_SAP_SHIFT 48
+
+/* word 4 */
+#define RX_COMP4_TCP_CSUM_MASK 0x000000000000FFFFULL
+#define RX_COMP4_TCP_CSUM_SHIFT 0
+#define RX_COMP4_PKT_LEN_MASK 0x000000003FFF0000ULL
+#define RX_COMP4_PKT_LEN_SHIFT 16
+#define RX_COMP4_PERFECT_MATCH_MASK 0x00000003C0000000ULL
+#define RX_COMP4_PERFECT_MATCH_SHIFT 30
+#define RX_COMP4_ZERO 0x0000080000000000ULL
+#define RX_COMP4_HASH_VAL_MASK 0x0FFFF00000000000ULL
+#define RX_COMP4_HASH_VAL_SHIFT 44
+#define RX_COMP4_HASH_PASS 0x1000000000000000ULL
+#define RX_COMP4_BAD 0x4000000000000000ULL
+#define RX_COMP4_LEN_MISMATCH 0x8000000000000000ULL
+
+/* we encode the following: ring/index/release. only 14 bits
+ * are usable.
+ * NOTE: the encoding is dependent upon RX_DESC_RING_SIZE and
+ * MAX_RX_DESC_RINGS. */
+#define RX_INDEX_NUM_MASK 0x0000000000000FFFULL
+#define RX_INDEX_NUM_SHIFT 0
+#define RX_INDEX_RING_MASK 0x0000000000001000ULL
+#define RX_INDEX_RING_SHIFT 12
+#define RX_INDEX_RELEASE 0x0000000000002000ULL
+
+struct cas_rx_comp {
+ u64 word1;
+ u64 word2;
+ u64 word3;
+ u64 word4;
+};
+
+enum link_state {
+ link_down = 0, /* No link, will retry */
+ link_aneg, /* Autoneg in progress */
+ link_force_try, /* Try Forced link speed */
+ link_force_ret, /* Forced mode worked, retrying autoneg */
+ link_force_ok, /* Stay in forced mode */
+ link_up /* Link is up */
+};
+
+typedef struct cas_page {
+ struct list_head list;
+ struct page *buffer;
+ dma_addr_t dma_addr;
+ int used;
+} cas_page_t;
+
+
+/* some alignment constraints:
+ * TX DESC, RX DESC, and RX COMP must each be 8K aligned.
+ * TX COMPWB must be 8-byte aligned.
+ * to accomplish this, here's what we do:
+ *
+ * INIT_BLOCK_RX_COMP = 64k (already aligned)
+ * INIT_BLOCK_RX_DESC = 8k
+ * INIT_BLOCK_TX = 8k
+ * INIT_BLOCK_RX1_DESC = 8k
+ * TX COMPWB
+ */
+#define INIT_BLOCK_TX (TX_DESC_RING_SIZE)
+#define INIT_BLOCK_RX_DESC (RX_DESC_RING_SIZE)
+#define INIT_BLOCK_RX_COMP (RX_COMP_RING_SIZE)
+
+struct cas_init_block {
+ struct cas_rx_comp rxcs[N_RX_COMP_RINGS][INIT_BLOCK_RX_COMP];
+ struct cas_rx_desc rxds[N_RX_DESC_RINGS][INIT_BLOCK_RX_DESC];
+ struct cas_tx_desc txds[N_TX_RINGS][INIT_BLOCK_TX];
+ u64 tx_compwb;
+};
+
+/* tiny buffers to deal with target abort issue. we allocate a bit
+ * over so that we don't have target abort issues with these buffers
+ * as well.
+ */
+#define TX_TINY_BUF_LEN 0x100
+#define TX_TINY_BUF_BLOCK ((INIT_BLOCK_TX + 1)*TX_TINY_BUF_LEN)
+
+struct cas_tiny_count {
+ int nbufs;
+ int used;
+};
+
+struct cas {
+ spinlock_t lock; /* for most bits */
+ spinlock_t tx_lock[N_TX_RINGS]; /* tx bits */
+ spinlock_t stat_lock[N_TX_RINGS + 1]; /* for stat gathering */
+ spinlock_t rx_inuse_lock; /* rx inuse list */
+ spinlock_t rx_spare_lock; /* rx spare list */
+
+ void __iomem *regs;
+ int tx_new[N_TX_RINGS], tx_old[N_TX_RINGS];
+ int rx_old[N_RX_DESC_RINGS];
+ int rx_cur[N_RX_COMP_RINGS], rx_new[N_RX_COMP_RINGS];
+ int rx_last[N_RX_DESC_RINGS];
+
+ /* Set when chip is actually in operational state
+ * (ie. not power managed) */
+ int hw_running;
+ int opened;
+ struct semaphore pm_sem; /* open/close/suspend/resume */
+
+ struct cas_init_block *init_block;
+ struct cas_tx_desc *init_txds[MAX_TX_RINGS];
+ struct cas_rx_desc *init_rxds[MAX_RX_DESC_RINGS];
+ struct cas_rx_comp *init_rxcs[MAX_RX_COMP_RINGS];
+
+ /* we use sk_buffs for tx and pages for rx. the rx skbuffs
+ * are there for flow re-assembly. */
+ struct sk_buff *tx_skbs[N_TX_RINGS][TX_DESC_RING_SIZE];
+ struct sk_buff_head rx_flows[N_RX_FLOWS];
+ cas_page_t *rx_pages[N_RX_DESC_RINGS][RX_DESC_RING_SIZE];
+ struct list_head rx_spare_list, rx_inuse_list;
+ int rx_spares_needed;
+
+ /* for small packets when copying would be quicker than
+ mapping */
+ struct cas_tiny_count tx_tiny_use[N_TX_RINGS][TX_DESC_RING_SIZE];
+ u8 *tx_tiny_bufs[N_TX_RINGS];
+
+ u32 msg_enable;
+
+ /* N_TX_RINGS must be >= N_RX_DESC_RINGS */
+ struct net_device_stats net_stats[N_TX_RINGS + 1];
+
+ u32 pci_cfg[64 >> 2];
+ u8 pci_revision;
+
+ int phy_type;
+ int phy_addr;
+ u32 phy_id;
+#define CAS_FLAG_1000MB_CAP 0x00000001
+#define CAS_FLAG_REG_PLUS 0x00000002
+#define CAS_FLAG_TARGET_ABORT 0x00000004
+#define CAS_FLAG_SATURN 0x00000008
+#define CAS_FLAG_RXD_POST_MASK 0x000000F0
+#define CAS_FLAG_RXD_POST_SHIFT 4
+#define CAS_FLAG_RXD_POST(x) ((1 << (CAS_FLAG_RXD_POST_SHIFT + (x))) & \
+ CAS_FLAG_RXD_POST_MASK)
+#define CAS_FLAG_ENTROPY_DEV 0x00000100
+#define CAS_FLAG_NO_HW_CSUM 0x00000200
+ u32 cas_flags;
+ int packet_min; /* minimum packet size */
+ int tx_fifo_size;
+ int rx_fifo_size;
+ int rx_pause_off;
+ int rx_pause_on;
+ int crc_size; /* 4 if half-duplex */
+
+ int pci_irq_INTC;
+ int min_frame_size; /* for tx fifo workaround */
+
+ /* page size allocation */
+ int page_size;
+ int page_order;
+ int mtu_stride;
+
+ u32 mac_rx_cfg;
+
+ /* Autoneg & PHY control */
+ int link_cntl;
+ int link_fcntl;
+ enum link_state lstate;
+ struct timer_list link_timer;
+ int timer_ticks;
+ struct work_struct reset_task;
+#if 0
+ atomic_t reset_task_pending;
+#else
+ atomic_t reset_task_pending;
+ atomic_t reset_task_pending_mtu;
+ atomic_t reset_task_pending_spare;
+ atomic_t reset_task_pending_all;
+#endif
+
+#ifdef CONFIG_CASSINI_QGE_DEBUG
+ atomic_t interrupt_seen; /* 1 if any interrupts are getting through */
+#endif
+
+ /* Link-down problem workaround */
+#define LINK_TRANSITION_UNKNOWN 0
+#define LINK_TRANSITION_ON_FAILURE 1
+#define LINK_TRANSITION_STILL_FAILED 2
+#define LINK_TRANSITION_LINK_UP 3
+#define LINK_TRANSITION_LINK_CONFIG 4
+#define LINK_TRANSITION_LINK_DOWN 5
+#define LINK_TRANSITION_REQUESTED_RESET 6
+ int link_transition;
+ int link_transition_jiffies_valid;
+ unsigned long link_transition_jiffies;
+
+ /* Tuning */
+ u8 orig_cacheline_size; /* value when loaded */
+#define CAS_PREF_CACHELINE_SIZE 0x20 /* Minimum desired */
+
+ /* Diagnostic counters and state. */
+ int casreg_len; /* reg-space size for dumping */
+ u64 pause_entered;
+ u16 pause_last_time_recvd;
+
+ dma_addr_t block_dvma, tx_tiny_dvma[N_TX_RINGS];
+ struct pci_dev *pdev;
+ struct net_device *dev;
+};
+
+#define TX_DESC_NEXT(r, x) (((x) + 1) & (TX_DESC_RINGN_SIZE(r) - 1))
+#define RX_DESC_ENTRY(r, x) ((x) & (RX_DESC_RINGN_SIZE(r) - 1))
+#define RX_COMP_ENTRY(r, x) ((x) & (RX_COMP_RINGN_SIZE(r) - 1))
+
+#define TX_BUFF_COUNT(r, x, y) ((x) <= (y) ? ((y) - (x)) : \
+ (TX_DESC_RINGN_SIZE(r) - (x) + (y)))
+
+#define TX_BUFFS_AVAIL(cp, i) ((cp)->tx_old[(i)] <= (cp)->tx_new[(i)] ? \
+ (cp)->tx_old[(i)] + (TX_DESC_RINGN_SIZE(i) - 1) - (cp)->tx_new[(i)] : \
+ (cp)->tx_old[(i)] - (cp)->tx_new[(i)] - 1)
+
+#define CAS_ALIGN(addr, align) \
+ (((unsigned long) (addr) + ((align) - 1UL)) & ~((align) - 1))
+
+#define RX_FIFO_SIZE 16384
+#define EXPANSION_ROM_SIZE 65536
+
+#define CAS_MC_EXACT_MATCH_SIZE 15
+#define CAS_MC_HASH_SIZE 256
+#define CAS_MC_HASH_MAX (CAS_MC_EXACT_MATCH_SIZE + \
+ CAS_MC_HASH_SIZE)
+
+#define TX_TARGET_ABORT_LEN 0x20
+#define RX_SWIVEL_OFF_VAL 0x2
+#define RX_AE_FREEN_VAL(x) (RX_DESC_RINGN_SIZE(x) >> 1)
+#define RX_AE_COMP_VAL (RX_COMP_RING_SIZE >> 1)
+#define RX_BLANK_INTR_PKT_VAL 0x05
+#define RX_BLANK_INTR_TIME_VAL 0x0F
+#define HP_TCP_THRESH_VAL 1530 /* reduce to enable reassembly */
+
+#define RX_SPARE_COUNT (RX_DESC_RING_SIZE >> 1)
+#define RX_SPARE_RECOVER_VAL (RX_SPARE_COUNT >> 2)
+
+#endif /* _CASSINI_H */
diff --git a/drivers/net/cs89x0.c b/drivers/net/cs89x0.c
index cdc07ccd733..a6078ad9b65 100644
--- a/drivers/net/cs89x0.c
+++ b/drivers/net/cs89x0.c
@@ -140,6 +140,7 @@
#include <asm/system.h>
#include <asm/io.h>
+#include <asm/irq.h>
#if ALLOW_DMA
#include <asm/dma.h>
#endif
diff --git a/drivers/net/e100.c b/drivers/net/e100.c
index 25cc20e415d..fbf1c06ec5c 100644
--- a/drivers/net/e100.c
+++ b/drivers/net/e100.c
@@ -1387,13 +1387,13 @@ static void e100_update_stats(struct nic *nic)
ns->collisions += nic->tx_collisions;
ns->tx_errors += le32_to_cpu(s->tx_max_collisions) +
le32_to_cpu(s->tx_lost_crs);
- ns->rx_dropped += le32_to_cpu(s->rx_resource_errors);
ns->rx_length_errors += le32_to_cpu(s->rx_short_frame_errors) +
nic->rx_over_length_errors;
ns->rx_crc_errors += le32_to_cpu(s->rx_crc_errors);
ns->rx_frame_errors += le32_to_cpu(s->rx_alignment_errors);
ns->rx_over_errors += le32_to_cpu(s->rx_overrun_errors);
ns->rx_fifo_errors += le32_to_cpu(s->rx_overrun_errors);
+ ns->rx_missed_errors += le32_to_cpu(s->rx_resource_errors);
ns->rx_errors += le32_to_cpu(s->rx_crc_errors) +
le32_to_cpu(s->rx_alignment_errors) +
le32_to_cpu(s->rx_short_frame_errors) +
@@ -1727,12 +1727,10 @@ static inline int e100_rx_indicate(struct nic *nic, struct rx *rx,
if(unlikely(!(rfd_status & cb_ok))) {
/* Don't indicate if hardware indicates errors */
- nic->net_stats.rx_dropped++;
dev_kfree_skb_any(skb);
} else if(actual_size > ETH_DATA_LEN + VLAN_ETH_HLEN) {
/* Don't indicate oversized frames */
nic->rx_over_length_errors++;
- nic->net_stats.rx_dropped++;
dev_kfree_skb_any(skb);
} else {
nic->net_stats.rx_packets++;
diff --git a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c
index 7c8a0a22dcd..ee687c902a2 100644
--- a/drivers/net/e1000/e1000_main.c
+++ b/drivers/net/e1000/e1000_main.c
@@ -2544,7 +2544,6 @@ e1000_update_stats(struct e1000_adapter *adapter)
adapter->stats.crcerrs + adapter->stats.algnerrc +
adapter->stats.rlec + adapter->stats.mpc +
adapter->stats.cexterr;
- adapter->net_stats.rx_dropped = adapter->stats.mpc;
adapter->net_stats.rx_length_errors = adapter->stats.rlec;
adapter->net_stats.rx_crc_errors = adapter->stats.crcerrs;
adapter->net_stats.rx_frame_errors = adapter->stats.algnerrc;
diff --git a/drivers/net/ibmveth.c b/drivers/net/ibmveth.c
index 32d5fabd4b1..a2c4dd4fb22 100644
--- a/drivers/net/ibmveth.c
+++ b/drivers/net/ibmveth.c
@@ -99,7 +99,7 @@ static irqreturn_t ibmveth_interrupt(int irq, void *dev_instance, struct pt_regs
static inline void ibmveth_schedule_replenishing(struct ibmveth_adapter*);
#ifdef CONFIG_PROC_FS
-#define IBMVETH_PROC_DIR "ibmveth"
+#define IBMVETH_PROC_DIR "net/ibmveth"
static struct proc_dir_entry *ibmveth_proc_dir;
#endif
@@ -1010,7 +1010,7 @@ static int __devexit ibmveth_remove(struct vio_dev *dev)
#ifdef CONFIG_PROC_FS
static void ibmveth_proc_register_driver(void)
{
- ibmveth_proc_dir = create_proc_entry(IBMVETH_PROC_DIR, S_IFDIR, proc_net);
+ ibmveth_proc_dir = proc_mkdir(IBMVETH_PROC_DIR, NULL);
if (ibmveth_proc_dir) {
SET_MODULE_OWNER(ibmveth_proc_dir);
}
@@ -1018,7 +1018,7 @@ static void ibmveth_proc_register_driver(void)
static void ibmveth_proc_unregister_driver(void)
{
- remove_proc_entry(IBMVETH_PROC_DIR, proc_net);
+ remove_proc_entry(IBMVETH_PROC_DIR, NULL);
}
static void *ibmveth_seq_start(struct seq_file *seq, loff_t *pos)
diff --git a/drivers/net/irda/vlsi_ir.c b/drivers/net/irda/vlsi_ir.c
index 6d9de626c96..651c5a6578f 100644
--- a/drivers/net/irda/vlsi_ir.c
+++ b/drivers/net/irda/vlsi_ir.c
@@ -1875,11 +1875,11 @@ static int __init vlsi_mod_init(void)
sirpulse = !!sirpulse;
- /* create_proc_entry returns NULL if !CONFIG_PROC_FS.
+ /* proc_mkdir returns NULL if !CONFIG_PROC_FS.
* Failure to create the procfs entry is handled like running
* without procfs - it's not required for the driver to work.
*/
- vlsi_proc_root = create_proc_entry(PROC_DIR, S_IFDIR, NULL);
+ vlsi_proc_root = proc_mkdir(PROC_DIR, NULL);
if (vlsi_proc_root) {
/* protect registered procdir against module removal.
* Because we are in the module init path there's no race
diff --git a/drivers/net/ixgb/ixgb_main.c b/drivers/net/ixgb/ixgb_main.c
index 5c555373adb..89d6d69be38 100644
--- a/drivers/net/ixgb/ixgb_main.c
+++ b/drivers/net/ixgb/ixgb_main.c
@@ -1616,8 +1616,6 @@ ixgb_update_stats(struct ixgb_adapter *adapter)
adapter->stats.icbc +
adapter->stats.ecbc + adapter->stats.mpc;
- adapter->net_stats.rx_dropped = adapter->stats.mpc;
-
/* see above
* adapter->net_stats.rx_length_errors = adapter->stats.rlec;
*/
diff --git a/drivers/net/pppoe.c b/drivers/net/pppoe.c
index 82f236cc3b9..a842ecc60a3 100644
--- a/drivers/net/pppoe.c
+++ b/drivers/net/pppoe.c
@@ -1070,7 +1070,7 @@ static int __init pppoe_proc_init(void)
{
struct proc_dir_entry *p;
- p = create_proc_entry("pppoe", S_IRUGO, proc_net);
+ p = create_proc_entry("net/pppoe", S_IRUGO, NULL);
if (!p)
return -ENOMEM;
@@ -1142,7 +1142,7 @@ static void __exit pppoe_exit(void)
dev_remove_pack(&pppoes_ptype);
dev_remove_pack(&pppoed_ptype);
unregister_netdevice_notifier(&pppoe_notifier);
- remove_proc_entry("pppoe", proc_net);
+ remove_proc_entry("net/pppoe", NULL);
proto_unregister(&pppoe_sk_proto);
}
diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c
index f0471d102e3..afb3f186b88 100644
--- a/drivers/net/r8169.c
+++ b/drivers/net/r8169.c
@@ -92,19 +92,18 @@ VERSION 2.2LK <2005/01/25>
#endif /* RTL8169_DEBUG */
#define R8169_MSG_DEFAULT \
- (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | NETIF_MSG_IFUP | \
- NETIF_MSG_IFDOWN)
+ (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_IFUP | NETIF_MSG_IFDOWN)
#define TX_BUFFS_AVAIL(tp) \
(tp->dirty_tx + NUM_TX_DESC - tp->cur_tx - 1)
#ifdef CONFIG_R8169_NAPI
#define rtl8169_rx_skb netif_receive_skb
-#define rtl8169_rx_hwaccel_skb vlan_hwaccel_rx
+#define rtl8169_rx_hwaccel_skb vlan_hwaccel_receive_skb
#define rtl8169_rx_quota(count, quota) min(count, quota)
#else
#define rtl8169_rx_skb netif_rx
-#define rtl8169_rx_hwaccel_skb vlan_hwaccel_receive_skb
+#define rtl8169_rx_hwaccel_skb vlan_hwaccel_rx
#define rtl8169_rx_quota(count, quota) count
#endif
diff --git a/drivers/net/s2io.c b/drivers/net/s2io.c
index c829e6a2e8a..dd451e099a4 100644
--- a/drivers/net/s2io.c
+++ b/drivers/net/s2io.c
@@ -428,7 +428,7 @@ static int init_shared_mem(struct s2io_nic *nic)
DBG_PRINT(INIT_DBG,
"%s: Zero DMA address for TxDL. ", dev->name);
DBG_PRINT(INIT_DBG,
- "Virtual address %llx\n", (u64)tmp_v);
+ "Virtual address %p\n", tmp_v);
tmp_v = pci_alloc_consistent(nic->pdev,
PAGE_SIZE, &tmp_p);
if (!tmp_v) {
@@ -657,9 +657,10 @@ static void free_shared_mem(struct s2io_nic *nic)
mac_control->zerodma_virt_addr,
(dma_addr_t)0);
DBG_PRINT(INIT_DBG,
- "%s: Freeing TxDL with zero DMA addr. ", dev->name);
- DBG_PRINT(INIT_DBG, "Virtual address %llx\n",
- (u64)(mac_control->zerodma_virt_addr));
+ "%s: Freeing TxDL with zero DMA addr. ",
+ dev->name);
+ DBG_PRINT(INIT_DBG, "Virtual address %p\n",
+ mac_control->zerodma_virt_addr);
}
kfree(mac_control->fifos[i].list_info);
}
diff --git a/drivers/net/sk98lin/skge.c b/drivers/net/sk98lin/skge.c
index 6ee4771addf..b18c92cb629 100644
--- a/drivers/net/sk98lin/skge.c
+++ b/drivers/net/sk98lin/skge.c
@@ -235,7 +235,7 @@ static int SkDrvDeInitAdapter(SK_AC *pAC, int devNbr);
* Extern Function Prototypes
*
******************************************************************************/
-static const char SKRootName[] = "sk98lin";
+static const char SKRootName[] = "net/sk98lin";
static struct proc_dir_entry *pSkRootDir;
extern struct file_operations sk_proc_fops;
@@ -5216,17 +5216,15 @@ static struct pci_device_id skge_pci_tbl[] = {
{ PCI_VENDOR_ID_3COM, 0x80eb, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
{ PCI_VENDOR_ID_SYSKONNECT, 0x4300, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
{ PCI_VENDOR_ID_SYSKONNECT, 0x4320, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
- { PCI_VENDOR_ID_DLINK, 0x4c00, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
+/* DLink card does not have valid VPD so this driver gags
+ * { PCI_VENDOR_ID_DLINK, 0x4c00, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
+ */
{ PCI_VENDOR_ID_MARVELL, 0x4320, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
-#if 0 /* don't handle Yukon2 cards at the moment -- mlindner@syskonnect.de */
- { PCI_VENDOR_ID_MARVELL, 0x4360, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
- { PCI_VENDOR_ID_MARVELL, 0x4361, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
-#endif
{ PCI_VENDOR_ID_MARVELL, 0x5005, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
{ PCI_VENDOR_ID_CNET, 0x434e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
- { PCI_VENDOR_ID_LINKSYS, 0x1032, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
+ { PCI_VENDOR_ID_LINKSYS, 0x1032, PCI_ANY_ID, 0x0015, },
{ PCI_VENDOR_ID_LINKSYS, 0x1064, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
- { 0, }
+ { 0 }
};
MODULE_DEVICE_TABLE(pci, skge_pci_tbl);
@@ -5244,20 +5242,20 @@ static int __init skge_init(void)
{
int error;
- pSkRootDir = proc_mkdir(SKRootName, proc_net);
+ pSkRootDir = proc_mkdir(SKRootName, NULL);
if (pSkRootDir)
pSkRootDir->owner = THIS_MODULE;
error = pci_register_driver(&skge_driver);
if (error)
- proc_net_remove(SKRootName);
+ remove_proc_entry(SKRootName, NULL);
return error;
}
static void __exit skge_exit(void)
{
pci_unregister_driver(&skge_driver);
- proc_net_remove(SKRootName);
+ remove_proc_entry(SKRootName, NULL);
}
diff --git a/drivers/net/skge.c b/drivers/net/skge.c
index d7c98515fdf..fd398da4993 100644
--- a/drivers/net/skge.c
+++ b/drivers/net/skge.c
@@ -42,7 +42,7 @@
#include "skge.h"
#define DRV_NAME "skge"
-#define DRV_VERSION "0.9"
+#define DRV_VERSION "1.1"
#define PFX DRV_NAME " "
#define DEFAULT_TX_RING_SIZE 128
@@ -105,41 +105,28 @@ static const u32 rxirqmask[] = { IS_R1_F, IS_R2_F };
static const u32 txirqmask[] = { IS_XA1_F, IS_XA2_F };
static const u32 portirqmask[] = { IS_PORT_1, IS_PORT_2 };
-/* Don't need to look at whole 16K.
- * last interesting register is descriptor poll timer.
- */
-#define SKGE_REGS_LEN (29*128)
-
static int skge_get_regs_len(struct net_device *dev)
{
- return SKGE_REGS_LEN;
+ return 0x4000;
}
/*
- * Returns copy of control register region
- * I/O region is divided into banks and certain regions are unreadable
+ * Returns copy of whole control register region
+ * Note: skip RAM address register because accessing it will
+ * cause bus hangs!
*/
static void skge_get_regs(struct net_device *dev, struct ethtool_regs *regs,
void *p)
{
const struct skge_port *skge = netdev_priv(dev);
- unsigned long offs;
const void __iomem *io = skge->hw->regs;
- static const unsigned long bankmap
- = (1<<0) | (1<<2) | (1<<8) | (1<<9)
- | (1<<12) | (1<<13) | (1<<14) | (1<<15) | (1<<16)
- | (1<<17) | (1<<20) | (1<<21) | (1<<22) | (1<<23)
- | (1<<24) | (1<<25) | (1<<26) | (1<<27) | (1<<28);
regs->version = 1;
- for (offs = 0; offs < regs->len; offs += 128) {
- u32 len = min_t(u32, 128, regs->len - offs);
+ memset(p, 0, regs->len);
+ memcpy_fromio(p, io, B3_RAM_ADDR);
- if (bankmap & (1<<(offs/128)))
- memcpy_fromio(p + offs, io + offs, len);
- else
- memset(p + offs, 0, len);
- }
+ memcpy_fromio(p + B3_RI_WTO_R1, io + B3_RI_WTO_R1,
+ regs->len - B3_RI_WTO_R1);
}
/* Wake on Lan only supported on Yukon chps with rev 1 or above */
@@ -669,7 +656,7 @@ static void skge_led(struct skge_port *skge, enum led_mode mode)
PHY_M_LED_BLINK_RT(BLINK_84MS) |
PHY_M_LEDC_TX_CTRL |
PHY_M_LEDC_DP_CTRL);
-
+
gm_phy_write(hw, port, PHY_MARV_LED_OVER,
PHY_M_LED_MO_RX(MO_LED_OFF) |
(skge->speed == SPEED_100 ?
@@ -775,17 +762,6 @@ static int skge_ring_alloc(struct skge_ring *ring, void *vaddr, u64 base)
return 0;
}
-static struct sk_buff *skge_rx_alloc(struct net_device *dev, unsigned int size)
-{
- struct sk_buff *skb = dev_alloc_skb(size);
-
- if (likely(skb)) {
- skb->dev = dev;
- skb_reserve(skb, NET_IP_ALIGN);
- }
- return skb;
-}
-
/* Allocate and setup a new buffer for receiving */
static void skge_rx_setup(struct skge_port *skge, struct skge_element *e,
struct sk_buff *skb, unsigned int bufsize)
@@ -858,16 +834,17 @@ static int skge_rx_fill(struct skge_port *skge)
{
struct skge_ring *ring = &skge->rx_ring;
struct skge_element *e;
- unsigned int bufsize = skge->rx_buf_size;
e = ring->start;
do {
- struct sk_buff *skb = skge_rx_alloc(skge->netdev, bufsize);
+ struct sk_buff *skb;
+ skb = dev_alloc_skb(skge->rx_buf_size + NET_IP_ALIGN);
if (!skb)
return -ENOMEM;
- skge_rx_setup(skge, e, skb, bufsize);
+ skb_reserve(skb, NET_IP_ALIGN);
+ skge_rx_setup(skge, e, skb, skge->rx_buf_size);
} while ( (e = e->next) != ring->start);
ring->to_clean = ring->start;
@@ -876,7 +853,7 @@ static int skge_rx_fill(struct skge_port *skge)
static void skge_link_up(struct skge_port *skge)
{
- skge_write8(skge->hw, SK_REG(skge->port, LNK_LED_REG),
+ skge_write8(skge->hw, SK_REG(skge->port, LNK_LED_REG),
LED_BLK_OFF|LED_SYNC_OFF|LED_ON);
netif_carrier_on(skge->netdev);
@@ -987,6 +964,8 @@ static void genesis_reset(struct skge_hw *hw, int port)
{
const u8 zero[8] = { 0 };
+ skge_write8(hw, SK_REG(port, GMAC_IRQ_MSK), 0);
+
/* reset the statistics module */
xm_write32(hw, port, XM_GP_PORT, XM_GP_RES_STAT);
xm_write16(hw, port, XM_IMSK, 0xffff); /* disable XMAC IRQs */
@@ -1021,8 +1000,6 @@ static void bcom_check_link(struct skge_hw *hw, int port)
(void) xm_phy_read(hw, port, PHY_BCOM_STAT);
status = xm_phy_read(hw, port, PHY_BCOM_STAT);
- pr_debug("bcom_check_link status=0x%x\n", status);
-
if ((status & PHY_ST_LSYNC) == 0) {
u16 cmd = xm_read16(hw, port, XM_MMU_CMD);
cmd &= ~(XM_MMU_ENA_RX | XM_MMU_ENA_TX);
@@ -1106,8 +1083,6 @@ static void bcom_phy_init(struct skge_port *skge, int jumbo)
{ 0x17, 0x0013 }, { 0x15, 0x0A04 }, { 0x18, 0x0420 },
};
- pr_debug("bcom_phy_init\n");
-
/* read Id from external PHY (all have the same address) */
id1 = xm_phy_read(hw, port, PHY_XMAC_ID1);
@@ -1340,6 +1315,8 @@ static void genesis_stop(struct skge_port *skge)
int port = skge->port;
u32 reg;
+ genesis_reset(hw, port);
+
/* Clear Tx packet arbiter timeout IRQ */
skge_write16(hw, B3_PA_CTRL,
port == 0 ? PA_CLR_TO_TX1 : PA_CLR_TO_TX2);
@@ -1465,7 +1442,6 @@ static void genesis_link_up(struct skge_port *skge)
u16 cmd;
u32 mode, msk;
- pr_debug("genesis_link_up\n");
cmd = xm_read16(hw, port, XM_MMU_CMD);
/*
@@ -1578,7 +1554,6 @@ static void yukon_init(struct skge_hw *hw, int port)
struct skge_port *skge = netdev_priv(hw->dev[port]);
u16 ctrl, ct1000, adv;
- pr_debug("yukon_init\n");
if (skge->autoneg == AUTONEG_ENABLE) {
u16 ectrl = gm_phy_read(hw, port, PHY_MARV_EXT_CTRL);
@@ -1668,6 +1643,22 @@ static void yukon_reset(struct skge_hw *hw, int port)
| GM_RXCR_UCF_ENA | GM_RXCR_MCF_ENA);
}
+/* Apparently, early versions of Yukon-Lite had wrong chip_id? */
+static int is_yukon_lite_a0(struct skge_hw *hw)
+{
+ u32 reg;
+ int ret;
+
+ if (hw->chip_id != CHIP_ID_YUKON)
+ return 0;
+
+ reg = skge_read32(hw, B2_FAR);
+ skge_write8(hw, B2_FAR + 3, 0xff);
+ ret = (skge_read8(hw, B2_FAR + 3) != 0);
+ skge_write32(hw, B2_FAR, reg);
+ return ret;
+}
+
static void yukon_mac_init(struct skge_hw *hw, int port)
{
struct skge_port *skge = netdev_priv(hw->dev[port]);
@@ -1677,9 +1668,11 @@ static void yukon_mac_init(struct skge_hw *hw, int port)
/* WA code for COMA mode -- set PHY reset */
if (hw->chip_id == CHIP_ID_YUKON_LITE &&
- hw->chip_rev >= CHIP_REV_YU_LITE_A3)
- skge_write32(hw, B2_GP_IO,
- (skge_read32(hw, B2_GP_IO) | GP_DIR_9 | GP_IO_9));
+ hw->chip_rev >= CHIP_REV_YU_LITE_A3) {
+ reg = skge_read32(hw, B2_GP_IO);
+ reg |= GP_DIR_9 | GP_IO_9;
+ skge_write32(hw, B2_GP_IO, reg);
+ }
/* hard reset */
skge_write32(hw, SK_REG(port, GPHY_CTRL), GPC_RST_SET);
@@ -1687,10 +1680,12 @@ static void yukon_mac_init(struct skge_hw *hw, int port)
/* WA code for COMA mode -- clear PHY reset */
if (hw->chip_id == CHIP_ID_YUKON_LITE &&
- hw->chip_rev >= CHIP_REV_YU_LITE_A3)
- skge_write32(hw, B2_GP_IO,
- (skge_read32(hw, B2_GP_IO) | GP_DIR_9)
- & ~GP_IO_9);
+ hw->chip_rev >= CHIP_REV_YU_LITE_A3) {
+ reg = skge_read32(hw, B2_GP_IO);
+ reg |= GP_DIR_9;
+ reg &= ~GP_IO_9;
+ skge_write32(hw, B2_GP_IO, reg);
+ }
/* Set hardware config mode */
reg = GPC_INT_POL_HI | GPC_DIS_FC | GPC_DIS_SLEEP |
@@ -1729,7 +1724,7 @@ static void yukon_mac_init(struct skge_hw *hw, int port)
}
gma_write16(hw, port, GM_GP_CTRL, reg);
- skge_read16(hw, GMAC_IRQ_SRC);
+ skge_read16(hw, SK_REG(port, GMAC_IRQ_SRC));
yukon_init(hw, port);
@@ -1779,9 +1774,11 @@ static void yukon_mac_init(struct skge_hw *hw, int port)
/* Configure Rx MAC FIFO */
skge_write16(hw, SK_REG(port, RX_GMF_FL_MSK), RX_FF_FL_DEF_MSK);
reg = GMF_OPER_ON | GMF_RX_F_FL_ON;
- if (hw->chip_id == CHIP_ID_YUKON_LITE &&
- hw->chip_rev >= CHIP_REV_YU_LITE_A3)
+
+ /* disable Rx GMAC FIFO Flush for YUKON-Lite Rev. A0 only */
+ if (is_yukon_lite_a0(hw))
reg &= ~GMF_RX_F_FL_ON;
+
skge_write8(hw, SK_REG(port, RX_GMF_CTRL_T), GMF_RST_CLR);
skge_write16(hw, SK_REG(port, RX_GMF_CTRL_T), reg);
/*
@@ -1801,20 +1798,26 @@ static void yukon_stop(struct skge_port *skge)
struct skge_hw *hw = skge->hw;
int port = skge->port;
- if (hw->chip_id == CHIP_ID_YUKON_LITE &&
- hw->chip_rev >= CHIP_REV_YU_LITE_A3) {
- skge_write32(hw, B2_GP_IO,
- skge_read32(hw, B2_GP_IO) | GP_DIR_9 | GP_IO_9);
- }
+ skge_write8(hw, SK_REG(port, GMAC_IRQ_MSK), 0);
+ yukon_reset(hw, port);
gma_write16(hw, port, GM_GP_CTRL,
gma_read16(hw, port, GM_GP_CTRL)
& ~(GM_GPCR_TX_ENA|GM_GPCR_RX_ENA));
gma_read16(hw, port, GM_GP_CTRL);
+ if (hw->chip_id == CHIP_ID_YUKON_LITE &&
+ hw->chip_rev >= CHIP_REV_YU_LITE_A3) {
+ u32 io = skge_read32(hw, B2_GP_IO);
+
+ io |= GP_DIR_9 | GP_IO_9;
+ skge_write32(hw, B2_GP_IO, io);
+ skge_read32(hw, B2_GP_IO);
+ }
+
/* set GPHY Control reset */
- skge_write32(hw, SK_REG(port, GPHY_CTRL), GPC_RST_SET);
- skge_write32(hw, SK_REG(port, GMAC_CTRL), GMC_RST_SET);
+ skge_write8(hw, SK_REG(port, GPHY_CTRL), GPC_RST_SET);
+ skge_write8(hw, SK_REG(port, GMAC_CTRL), GMC_RST_SET);
}
static void yukon_get_stats(struct skge_port *skge, u64 *data)
@@ -1873,10 +1876,8 @@ static void yukon_link_up(struct skge_port *skge)
int port = skge->port;
u16 reg;
- pr_debug("yukon_link_up\n");
-
/* Enable Transmit FIFO Underrun */
- skge_write8(hw, GMAC_IRQ_MSK, GMAC_DEF_MSK);
+ skge_write8(hw, SK_REG(port, GMAC_IRQ_MSK), GMAC_DEF_MSK);
reg = gma_read16(hw, port, GM_GP_CTRL);
if (skge->duplex == DUPLEX_FULL || skge->autoneg == AUTONEG_ENABLE)
@@ -1896,7 +1897,6 @@ static void yukon_link_down(struct skge_port *skge)
int port = skge->port;
u16 ctrl;
- pr_debug("yukon_link_down\n");
gm_phy_write(hw, port, PHY_MARV_INT_MASK, 0);
ctrl = gma_read16(hw, port, GM_GP_CTRL);
@@ -2112,7 +2112,6 @@ static int skge_up(struct net_device *dev)
skge_write8(hw, Q_ADDR(rxqaddr[port], Q_CSR), CSR_START | CSR_IRQ_CL_F);
skge_led(skge, LED_MODE_ON);
- pr_debug("skge_up completed\n");
return 0;
free_rx_ring:
@@ -2135,15 +2134,20 @@ static int skge_down(struct net_device *dev)
netif_stop_queue(dev);
+ skge_write8(skge->hw, SK_REG(skge->port, LNK_LED_REG), LED_OFF);
+ if (hw->chip_id == CHIP_ID_GENESIS)
+ genesis_stop(skge);
+ else
+ yukon_stop(skge);
+
+ hw->intr_mask &= ~portirqmask[skge->port];
+ skge_write32(hw, B0_IMSK, hw->intr_mask);
+
/* Stop transmitter */
skge_write8(hw, Q_ADDR(txqaddr[port], Q_CSR), CSR_STOP);
skge_write32(hw, RB_ADDR(txqaddr[port], RB_CTRL),
RB_RST_SET|RB_DIS_OP_MD);
- if (hw->chip_id == CHIP_ID_GENESIS)
- genesis_stop(skge);
- else
- yukon_stop(skge);
/* Disable Force Sync bit and Enable Alloc bit */
skge_write8(hw, SK_REG(port, TXA_CTRL),
@@ -2367,8 +2371,6 @@ static void genesis_set_multicast(struct net_device *dev)
u32 mode;
u8 filter[8];
- pr_debug("genesis_set_multicast flags=%x count=%d\n", dev->flags, dev->mc_count);
-
mode = xm_read32(hw, port, XM_MODE);
mode |= XM_MD_ENA_HASH;
if (dev->flags & IFF_PROMISC)
@@ -2435,6 +2437,14 @@ static void yukon_set_multicast(struct net_device *dev)
gma_write16(hw, port, GM_RX_CTRL, reg);
}
+static inline u16 phy_length(const struct skge_hw *hw, u32 status)
+{
+ if (hw->chip_id == CHIP_ID_GENESIS)
+ return status >> XMR_FS_LEN_SHIFT;
+ else
+ return status >> GMR_FS_LEN_SHIFT;
+}
+
static inline int bad_phy_status(const struct skge_hw *hw, u32 status)
{
if (hw->chip_id == CHIP_ID_GENESIS)
@@ -2444,80 +2454,99 @@ static inline int bad_phy_status(const struct skge_hw *hw, u32 status)
(status & GMR_FS_RX_OK) == 0;
}
-static void skge_rx_error(struct skge_port *skge, int slot,
- u32 control, u32 status)
-{
- if (netif_msg_rx_err(skge))
- printk(KERN_DEBUG PFX "%s: rx err, slot %d control 0x%x status 0x%x\n",
- skge->netdev->name, slot, control, status);
-
- if ((control & (BMU_EOF|BMU_STF)) != (BMU_STF|BMU_EOF))
- skge->net_stats.rx_length_errors++;
- else if (skge->hw->chip_id == CHIP_ID_GENESIS) {
- if (status & (XMR_FS_RUNT|XMR_FS_LNG_ERR))
- skge->net_stats.rx_length_errors++;
- if (status & XMR_FS_FRA_ERR)
- skge->net_stats.rx_frame_errors++;
- if (status & XMR_FS_FCS_ERR)
- skge->net_stats.rx_crc_errors++;
- } else {
- if (status & (GMR_FS_LONG_ERR|GMR_FS_UN_SIZE))
- skge->net_stats.rx_length_errors++;
- if (status & GMR_FS_FRAGMENT)
- skge->net_stats.rx_frame_errors++;
- if (status & GMR_FS_CRC_ERR)
- skge->net_stats.rx_crc_errors++;
- }
-}
/* Get receive buffer from descriptor.
* Handles copy of small buffers and reallocation failures
*/
static inline struct sk_buff *skge_rx_get(struct skge_port *skge,
struct skge_element *e,
- unsigned int len)
+ u32 control, u32 status, u16 csum)
{
- struct sk_buff *nskb, *skb;
+ struct sk_buff *skb;
+ u16 len = control & BMU_BBC;
+
+ if (unlikely(netif_msg_rx_status(skge)))
+ printk(KERN_DEBUG PFX "%s: rx slot %td status 0x%x len %d\n",
+ skge->netdev->name, e - skge->rx_ring.start,
+ status, len);
+
+ if (len > skge->rx_buf_size)
+ goto error;
+
+ if ((control & (BMU_EOF|BMU_STF)) != (BMU_STF|BMU_EOF))
+ goto error;
+
+ if (bad_phy_status(skge->hw, status))
+ goto error;
+
+ if (phy_length(skge->hw, status) != len)
+ goto error;
if (len < RX_COPY_THRESHOLD) {
- nskb = skge_rx_alloc(skge->netdev, len + NET_IP_ALIGN);
- if (unlikely(!nskb))
- return NULL;
+ skb = dev_alloc_skb(len + 2);
+ if (!skb)
+ goto resubmit;
+ skb_reserve(skb, 2);
pci_dma_sync_single_for_cpu(skge->hw->pdev,
pci_unmap_addr(e, mapaddr),
len, PCI_DMA_FROMDEVICE);
- memcpy(nskb->data, e->skb->data, len);
+ memcpy(skb->data, e->skb->data, len);
pci_dma_sync_single_for_device(skge->hw->pdev,
pci_unmap_addr(e, mapaddr),
len, PCI_DMA_FROMDEVICE);
-
- if (skge->rx_csum) {
- struct skge_rx_desc *rd = e->desc;
- nskb->csum = le16_to_cpu(rd->csum2);
- nskb->ip_summed = CHECKSUM_HW;
- }
skge_rx_reuse(e, skge->rx_buf_size);
- return nskb;
} else {
- nskb = skge_rx_alloc(skge->netdev, skge->rx_buf_size);
- if (unlikely(!nskb))
- return NULL;
+ struct sk_buff *nskb;
+ nskb = dev_alloc_skb(skge->rx_buf_size + NET_IP_ALIGN);
+ if (!nskb)
+ goto resubmit;
pci_unmap_single(skge->hw->pdev,
pci_unmap_addr(e, mapaddr),
pci_unmap_len(e, maplen),
PCI_DMA_FROMDEVICE);
skb = e->skb;
- if (skge->rx_csum) {
- struct skge_rx_desc *rd = e->desc;
- skb->csum = le16_to_cpu(rd->csum2);
- skb->ip_summed = CHECKSUM_HW;
- }
-
+ prefetch(skb->data);
skge_rx_setup(skge, e, nskb, skge->rx_buf_size);
- return skb;
}
+
+ skb_put(skb, len);
+ skb->dev = skge->netdev;
+ if (skge->rx_csum) {
+ skb->csum = csum;
+ skb->ip_summed = CHECKSUM_HW;
+ }
+
+ skb->protocol = eth_type_trans(skb, skge->netdev);
+
+ return skb;
+error:
+
+ if (netif_msg_rx_err(skge))
+ printk(KERN_DEBUG PFX "%s: rx err, slot %td control 0x%x status 0x%x\n",
+ skge->netdev->name, e - skge->rx_ring.start,
+ control, status);
+
+ if (skge->hw->chip_id == CHIP_ID_GENESIS) {
+ if (status & (XMR_FS_RUNT|XMR_FS_LNG_ERR))
+ skge->net_stats.rx_length_errors++;
+ if (status & XMR_FS_FRA_ERR)
+ skge->net_stats.rx_frame_errors++;
+ if (status & XMR_FS_FCS_ERR)
+ skge->net_stats.rx_crc_errors++;
+ } else {
+ if (status & (GMR_FS_LONG_ERR|GMR_FS_UN_SIZE))
+ skge->net_stats.rx_length_errors++;
+ if (status & GMR_FS_FRAGMENT)
+ skge->net_stats.rx_frame_errors++;
+ if (status & GMR_FS_CRC_ERR)
+ skge->net_stats.rx_crc_errors++;
+ }
+
+resubmit:
+ skge_rx_reuse(e, skge->rx_buf_size);
+ return NULL;
}
@@ -2530,37 +2559,19 @@ static int skge_poll(struct net_device *dev, int *budget)
unsigned int to_do = min(dev->quota, *budget);
unsigned int work_done = 0;
- pr_debug("skge_poll\n");
-
for (e = ring->to_clean; work_done < to_do; e = e->next) {
struct skge_rx_desc *rd = e->desc;
struct sk_buff *skb;
- u32 control, len, status;
+ u32 control;
rmb();
control = rd->control;
if (control & BMU_OWN)
break;
- len = control & BMU_BBC;
- status = rd->status;
-
- if (unlikely((control & (BMU_EOF|BMU_STF)) != (BMU_STF|BMU_EOF)
- || bad_phy_status(hw, status))) {
- skge_rx_error(skge, e - ring->start, control, status);
- skge_rx_reuse(e, skge->rx_buf_size);
- continue;
- }
-
- if (netif_msg_rx_status(skge))
- printk(KERN_DEBUG PFX "%s: rx slot %td status 0x%x len %d\n",
- dev->name, e - ring->start, rd->status, len);
-
- skb = skge_rx_get(skge, e, len);
+ skb = skge_rx_get(skge, e, control, rd->status,
+ le16_to_cpu(rd->csum2));
if (likely(skb)) {
- skb_put(skb, len);
- skb->protocol = eth_type_trans(skb, dev);
-
dev->last_rx = jiffies;
netif_receive_skb(skb);
@@ -2672,9 +2683,9 @@ static void skge_error_irq(struct skge_hw *hw)
if (hw->chip_id == CHIP_ID_GENESIS) {
/* clear xmac errors */
if (hwstatus & (IS_NO_STAT_M1|IS_NO_TIST_M1))
- skge_write16(hw, SK_REG(0, RX_MFF_CTRL1), MFF_CLR_INSTAT);
+ skge_write16(hw, RX_MFF_CTRL1, MFF_CLR_INSTAT);
if (hwstatus & (IS_NO_STAT_M2|IS_NO_TIST_M2))
- skge_write16(hw, SK_REG(0, RX_MFF_CTRL2), MFF_CLR_INSTAT);
+ skge_write16(hw, RX_MFF_CTRL2, MFF_CLR_INSTAT);
} else {
/* Timestamp (unused) overflow */
if (hwstatus & IS_IRQ_TIST_OV)
@@ -3000,9 +3011,6 @@ static int skge_reset(struct skge_hw *hw)
skge_write32(hw, B0_IMSK, hw->intr_mask);
- if (hw->chip_id != CHIP_ID_GENESIS)
- skge_write8(hw, GMAC_IRQ_MSK, 0);
-
spin_lock_bh(&hw->phy_lock);
for (i = 0; i < hw->ports; i++) {
if (hw->chip_id == CHIP_ID_GENESIS)
@@ -3230,6 +3238,11 @@ static void __devexit skge_remove(struct pci_dev *pdev)
dev0 = hw->dev[0];
unregister_netdev(dev0);
+ skge_write32(hw, B0_IMSK, 0);
+ skge_write16(hw, B0_LED, LED_STAT_OFF);
+ skge_pci_clear(hw);
+ skge_write8(hw, B0_CTST, CS_RST_SET);
+
tasklet_kill(&hw->ext_tasklet);
free_irq(pdev->irq, hw);
@@ -3238,7 +3251,7 @@ static void __devexit skge_remove(struct pci_dev *pdev)
if (dev1)
free_netdev(dev1);
free_netdev(dev0);
- skge_write16(hw, B0_LED, LED_STAT_OFF);
+
iounmap(hw->regs);
kfree(hw);
pci_set_drvdata(pdev, NULL);
@@ -3257,7 +3270,10 @@ static int skge_suspend(struct pci_dev *pdev, pm_message_t state)
struct skge_port *skge = netdev_priv(dev);
if (netif_running(dev)) {
netif_carrier_off(dev);
- skge_down(dev);
+ if (skge->wol)
+ netif_stop_queue(dev);
+ else
+ skge_down(dev);
}
netif_device_detach(dev);
wol |= skge->wol;
diff --git a/drivers/net/skge.h b/drivers/net/skge.h
index f1680beb8e6..72c175b87a5 100644
--- a/drivers/net/skge.h
+++ b/drivers/net/skge.h
@@ -953,6 +953,7 @@ enum {
*/
enum {
XMR_FS_LEN = 0x3fff<<18, /* Bit 31..18: Rx Frame Length */
+ XMR_FS_LEN_SHIFT = 18,
XMR_FS_2L_VLAN = 1<<17, /* Bit 17: tagged wh 2Lev VLAN ID*/
XMR_FS_1_VLAN = 1<<16, /* Bit 16: tagged wh 1ev VLAN ID*/
XMR_FS_BC = 1<<15, /* Bit 15: Broadcast Frame */
@@ -1868,6 +1869,7 @@ enum {
/* Receive Frame Status Encoding */
enum {
GMR_FS_LEN = 0xffff<<16, /* Bit 31..16: Rx Frame Length */
+ GMR_FS_LEN_SHIFT = 16,
GMR_FS_VLAN = 1<<13, /* Bit 13: VLAN Packet */
GMR_FS_JABBER = 1<<12, /* Bit 12: Jabber Packet */
GMR_FS_UN_SIZE = 1<<11, /* Bit 11: Undersize Packet */
@@ -2008,7 +2010,7 @@ enum {
GM_IS_RX_FF_OR = 1<<1, /* Receive FIFO Overrun */
GM_IS_RX_COMPL = 1<<0, /* Frame Reception Complete */
-#define GMAC_DEF_MSK (GM_IS_TX_CO_OV | GM_IS_RX_CO_OV | GM_IS_TX_FF_UR)
+#define GMAC_DEF_MSK (GM_IS_RX_FF_OR | GM_IS_TX_FF_UR)
/* GMAC_LINK_CTRL 16 bit GMAC Link Control Reg (YUKON only) */
/* Bits 15.. 2: reserved */
diff --git a/drivers/net/spider_net.c b/drivers/net/spider_net.c
index 4e19220473d..c796f41b4a5 100644
--- a/drivers/net/spider_net.c
+++ b/drivers/net/spider_net.c
@@ -1817,6 +1817,10 @@ spider_net_setup_phy(struct spider_net_card *card)
/* LEDs active in both modes, autosense prio = fiber */
spider_net_write_phy(card->netdev, 1, MII_NCONFIG, 0x945f);
+ /* switch off fibre autoneg */
+ spider_net_write_phy(card->netdev, 1, MII_NCONFIG, 0xfc01);
+ spider_net_write_phy(card->netdev, 1, 0x0b, 0x0004);
+
phy->def->ops->read_link(phy);
pr_info("Found %s with %i Mbps, %s-duplex.\n", phy->def->name,
phy->speed, phy->duplex==1 ? "Full" : "Half");
diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c
index 7599f52e15b..1802c3b4879 100644
--- a/drivers/net/tg3.c
+++ b/drivers/net/tg3.c
@@ -67,8 +67,8 @@
#define DRV_MODULE_NAME "tg3"
#define PFX DRV_MODULE_NAME ": "
-#define DRV_MODULE_VERSION "3.39"
-#define DRV_MODULE_RELDATE "September 5, 2005"
+#define DRV_MODULE_VERSION "3.42"
+#define DRV_MODULE_RELDATE "Oct 3, 2005"
#define TG3_DEF_MAC_MODE 0
#define TG3_DEF_RX_MODE 0
@@ -3389,7 +3389,8 @@ static irqreturn_t tg3_test_isr(int irq, void *dev_id,
struct tg3 *tp = netdev_priv(dev);
struct tg3_hw_status *sblk = tp->hw_status;
- if (sblk->status & SD_STATUS_UPDATED) {
+ if ((sblk->status & SD_STATUS_UPDATED) ||
+ !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
0x00000001);
return IRQ_RETVAL(1);
@@ -3442,31 +3443,47 @@ static void tg3_tx_timeout(struct net_device *dev)
schedule_work(&tp->reset_task);
}
+/* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
+static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
+{
+ u32 base = (u32) mapping & 0xffffffff;
+
+ return ((base > 0xffffdcc0) &&
+ (base + len + 8 < base));
+}
+
static void tg3_set_txd(struct tg3 *, int, dma_addr_t, int, u32, u32);
static int tigon3_4gb_hwbug_workaround(struct tg3 *tp, struct sk_buff *skb,
- u32 guilty_entry, int guilty_len,
- u32 last_plus_one, u32 *start, u32 mss)
+ u32 last_plus_one, u32 *start,
+ u32 base_flags, u32 mss)
{
struct sk_buff *new_skb = skb_copy(skb, GFP_ATOMIC);
- dma_addr_t new_addr;
+ dma_addr_t new_addr = 0;
u32 entry = *start;
- int i;
+ int i, ret = 0;
if (!new_skb) {
- dev_kfree_skb(skb);
- return -1;
+ ret = -1;
+ } else {
+ /* New SKB is guaranteed to be linear. */
+ entry = *start;
+ new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
+ PCI_DMA_TODEVICE);
+ /* Make sure new skb does not cross any 4G boundaries.
+ * Drop the packet if it does.
+ */
+ if (tg3_4g_overflow_test(new_addr, new_skb->len)) {
+ ret = -1;
+ dev_kfree_skb(new_skb);
+ new_skb = NULL;
+ } else {
+ tg3_set_txd(tp, entry, new_addr, new_skb->len,
+ base_flags, 1 | (mss << 1));
+ *start = NEXT_TX(entry);
+ }
}
- /* New SKB is guaranteed to be linear. */
- entry = *start;
- new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
- PCI_DMA_TODEVICE);
- tg3_set_txd(tp, entry, new_addr, new_skb->len,
- (skb->ip_summed == CHECKSUM_HW) ?
- TXD_FLAG_TCPUDP_CSUM : 0, 1 | (mss << 1));
- *start = NEXT_TX(entry);
-
/* Now clean up the sw ring entries. */
i = 0;
while (entry != last_plus_one) {
@@ -3491,7 +3508,7 @@ static int tigon3_4gb_hwbug_workaround(struct tg3 *tp, struct sk_buff *skb,
dev_kfree_skb(skb);
- return 0;
+ return ret;
}
static void tg3_set_txd(struct tg3 *tp, int entry,
@@ -3517,19 +3534,10 @@ static void tg3_set_txd(struct tg3 *tp, int entry,
txd->vlan_tag = vlan_tag << TXD_VLAN_TAG_SHIFT;
}
-static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
-{
- u32 base = (u32) mapping & 0xffffffff;
-
- return ((base > 0xffffdcc0) &&
- (base + len + 8 < base));
-}
-
static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct tg3 *tp = netdev_priv(dev);
dma_addr_t mapping;
- unsigned int i;
u32 len, entry, base_flags, mss;
int would_hit_hwbug;
@@ -3624,7 +3632,7 @@ static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
would_hit_hwbug = 0;
if (tg3_4g_overflow_test(mapping, len))
- would_hit_hwbug = entry + 1;
+ would_hit_hwbug = 1;
tg3_set_txd(tp, entry, mapping, len, base_flags,
(skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
@@ -3648,12 +3656,8 @@ static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
tp->tx_buffers[entry].skb = NULL;
pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
- if (tg3_4g_overflow_test(mapping, len)) {
- /* Only one should match. */
- if (would_hit_hwbug)
- BUG();
- would_hit_hwbug = entry + 1;
- }
+ if (tg3_4g_overflow_test(mapping, len))
+ would_hit_hwbug = 1;
if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
tg3_set_txd(tp, entry, mapping, len,
@@ -3669,34 +3673,15 @@ static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
if (would_hit_hwbug) {
u32 last_plus_one = entry;
u32 start;
- unsigned int len = 0;
-
- would_hit_hwbug -= 1;
- entry = entry - 1 - skb_shinfo(skb)->nr_frags;
- entry &= (TG3_TX_RING_SIZE - 1);
- start = entry;
- i = 0;
- while (entry != last_plus_one) {
- if (i == 0)
- len = skb_headlen(skb);
- else
- len = skb_shinfo(skb)->frags[i-1].size;
- if (entry == would_hit_hwbug)
- break;
-
- i++;
- entry = NEXT_TX(entry);
-
- }
+ start = entry - 1 - skb_shinfo(skb)->nr_frags;
+ start &= (TG3_TX_RING_SIZE - 1);
/* If the workaround fails due to memory/mapping
* failure, silently drop this packet.
*/
- if (tigon3_4gb_hwbug_workaround(tp, skb,
- entry, len,
- last_plus_one,
- &start, mss))
+ if (tigon3_4gb_hwbug_workaround(tp, skb, last_plus_one,
+ &start, base_flags, mss))
goto out_unlock;
entry = start;
@@ -5411,6 +5396,9 @@ static int tg3_set_mac_addr(struct net_device *dev, void *p)
struct tg3 *tp = netdev_priv(dev);
struct sockaddr *addr = p;
+ if (!is_valid_ether_addr(addr->sa_data))
+ return -EINVAL;
+
memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
spin_lock_bh(&tp->lock);
@@ -5822,6 +5810,13 @@ static int tg3_reset_hw(struct tg3 *tp)
}
memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
+ if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
+ tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
+ /* reset to prevent losing 1st rx packet intermittently */
+ tw32_f(MAC_RX_MODE, RX_MODE_RESET);
+ udelay(10);
+ }
+
tp->mac_mode = MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE | MAC_MODE_FHDE_ENABLE;
tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
@@ -5953,7 +5948,7 @@ static int tg3_reset_hw(struct tg3 *tp)
tw32(MAC_LED_CTRL, tp->led_ctrl);
tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
- if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) {
+ if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
tw32_f(MAC_RX_MODE, RX_MODE_RESET);
udelay(10);
}
@@ -7376,12 +7371,17 @@ static int tg3_nway_reset(struct net_device *dev)
if (!netif_running(dev))
return -EAGAIN;
+ if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
+ return -EINVAL;
+
spin_lock_bh(&tp->lock);
r = -EINVAL;
tg3_readphy(tp, MII_BMCR, &bmcr);
if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
- (bmcr & BMCR_ANENABLE)) {
- tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART);
+ ((bmcr & BMCR_ANENABLE) ||
+ (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT))) {
+ tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
+ BMCR_ANENABLE);
r = 0;
}
spin_unlock_bh(&tp->lock);
@@ -7943,19 +7943,32 @@ static int tg3_run_loopback(struct tg3 *tp, int loopback_mode)
struct tg3_rx_buffer_desc *desc;
if (loopback_mode == TG3_MAC_LOOPBACK) {
+ /* HW errata - mac loopback fails in some cases on 5780.
+ * Normal traffic and PHY loopback are not affected by
+ * errata.
+ */
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780)
+ return 0;
+
mac_mode = (tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK) |
MAC_MODE_PORT_INT_LPBACK | MAC_MODE_LINK_POLARITY |
MAC_MODE_PORT_MODE_GMII;
tw32(MAC_MODE, mac_mode);
} else if (loopback_mode == TG3_PHY_LOOPBACK) {
+ tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK | BMCR_FULLDPLX |
+ BMCR_SPEED1000);
+ udelay(40);
+ /* reset to prevent losing 1st rx packet intermittently */
+ if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
+ tw32_f(MAC_RX_MODE, RX_MODE_RESET);
+ udelay(10);
+ tw32_f(MAC_RX_MODE, tp->rx_mode);
+ }
mac_mode = (tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK) |
MAC_MODE_LINK_POLARITY | MAC_MODE_PORT_MODE_GMII;
if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401)
mac_mode &= ~MAC_MODE_LINK_POLARITY;
tw32(MAC_MODE, mac_mode);
-
- tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK | BMCR_FULLDPLX |
- BMCR_SPEED1000);
}
else
return -EINVAL;
@@ -9271,6 +9284,8 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
static struct pci_device_id write_reorder_chipsets[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_AMD,
PCI_DEVICE_ID_AMD_FE_GATE_700C) },
+ { PCI_DEVICE(PCI_VENDOR_ID_VIA,
+ PCI_DEVICE_ID_VIA_8385_0) },
{ },
};
u32 misc_ctrl_reg;
@@ -9285,15 +9300,6 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
tp->tg3_flags2 |= TG3_FLG2_SUN_570X;
#endif
- /* If we have an AMD 762 chipset, write
- * reordering to the mailbox registers done by the host
- * controller can cause major troubles. We read back from
- * every mailbox register write to force the writes to be
- * posted to the chip in order.
- */
- if (pci_dev_present(write_reorder_chipsets))
- tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER;
-
/* Force memory write invalidate off. If we leave it on,
* then on 5700_BX chips we have to enable a workaround.
* The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
@@ -9424,6 +9430,16 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
if (pci_find_capability(tp->pdev, PCI_CAP_ID_EXP) != 0)
tp->tg3_flags2 |= TG3_FLG2_PCI_EXPRESS;
+ /* If we have an AMD 762 or VIA K8T800 chipset, write
+ * reordering to the mailbox registers done by the host
+ * controller can cause major troubles. We read back from
+ * every mailbox register write to force the writes to be
+ * posted to the chip in order.
+ */
+ if (pci_dev_present(write_reorder_chipsets) &&
+ !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
+ tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER;
+
if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
tp->pci_lat_timer < 64) {
tp->pci_lat_timer = 64;
@@ -9532,7 +9548,7 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
tp->write32_rx_mbox = tg3_write_indirect_mbox;
iounmap(tp->regs);
- tp->regs = 0;
+ tp->regs = NULL;
pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
pci_cmd &= ~PCI_COMMAND_MEMORY;
@@ -10338,6 +10354,44 @@ static char * __devinit tg3_phy_string(struct tg3 *tp)
};
}
+static char * __devinit tg3_bus_string(struct tg3 *tp, char *str)
+{
+ if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
+ strcpy(str, "PCI Express");
+ return str;
+ } else if (tp->tg3_flags & TG3_FLAG_PCIX_MODE) {
+ u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
+
+ strcpy(str, "PCIX:");
+
+ if ((clock_ctrl == 7) ||
+ ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
+ GRC_MISC_CFG_BOARD_ID_5704CIOBE))
+ strcat(str, "133MHz");
+ else if (clock_ctrl == 0)
+ strcat(str, "33MHz");
+ else if (clock_ctrl == 2)
+ strcat(str, "50MHz");
+ else if (clock_ctrl == 4)
+ strcat(str, "66MHz");
+ else if (clock_ctrl == 6)
+ strcat(str, "100MHz");
+ else if (clock_ctrl == 7)
+ strcat(str, "133MHz");
+ } else {
+ strcpy(str, "PCI:");
+ if (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED)
+ strcat(str, "66MHz");
+ else
+ strcat(str, "33MHz");
+ }
+ if (tp->tg3_flags & TG3_FLAG_PCI_32BIT)
+ strcat(str, ":32-bit");
+ else
+ strcat(str, ":64-bit");
+ return str;
+}
+
static struct pci_dev * __devinit tg3_find_5704_peer(struct tg3 *tp)
{
struct pci_dev *peer;
@@ -10400,6 +10454,7 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
struct net_device *dev;
struct tg3 *tp;
int i, err, pci_using_dac, pm_cap;
+ char str[40];
if (tg3_version_printed++ == 0)
printk(KERN_INFO "%s", version);
@@ -10645,16 +10700,12 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
pci_set_drvdata(pdev, dev);
- printk(KERN_INFO "%s: Tigon3 [partno(%s) rev %04x PHY(%s)] (PCI%s:%s:%s) %sBaseT Ethernet ",
+ printk(KERN_INFO "%s: Tigon3 [partno(%s) rev %04x PHY(%s)] (%s) %sBaseT Ethernet ",
dev->name,
tp->board_part_number,
tp->pci_chip_rev_id,
tg3_phy_string(tp),
- ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) ? "X" : ""),
- ((tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED) ?
- ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) ? "133MHz" : "66MHz") :
- ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) ? "100MHz" : "33MHz")),
- ((tp->tg3_flags & TG3_FLAG_PCI_32BIT) ? "32-bit" : "64-bit"),
+ tg3_bus_string(tp, str),
(tp->tg3_flags & TG3_FLAG_10_100_ONLY) ? "10/100" : "10/100/1000");
for (i = 0; i < 6; i++)
@@ -10680,7 +10731,7 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
err_out_iounmap:
if (tp->regs) {
iounmap(tp->regs);
- tp->regs = 0;
+ tp->regs = NULL;
}
err_out_free_dev:
@@ -10705,7 +10756,7 @@ static void __devexit tg3_remove_one(struct pci_dev *pdev)
unregister_netdev(dev);
if (tp->regs) {
iounmap(tp->regs);
- tp->regs = 0;
+ tp->regs = NULL;
}
free_netdev(dev);
pci_release_regions(pdev);
diff --git a/drivers/net/tg3.h b/drivers/net/tg3.h
index c184b773e58..2e733c60bfa 100644
--- a/drivers/net/tg3.h
+++ b/drivers/net/tg3.h
@@ -2246,6 +2246,7 @@ struct tg3 {
(X) == PHY_ID_BCM5411 || (X) == PHY_ID_BCM5701 || \
(X) == PHY_ID_BCM5703 || (X) == PHY_ID_BCM5704 || \
(X) == PHY_ID_BCM5705 || (X) == PHY_ID_BCM5750 || \
+ (X) == PHY_ID_BCM5752 || (X) == PHY_ID_BCM5780 || \
(X) == PHY_ID_BCM8002)
struct tg3_hw_stats *hw_stats;
diff --git a/drivers/net/tulip/xircom_cb.c b/drivers/net/tulip/xircom_cb.c
index 26cc4f6378c..60d1e05ab73 100644
--- a/drivers/net/tulip/xircom_cb.c
+++ b/drivers/net/tulip/xircom_cb.c
@@ -117,7 +117,7 @@ static int xircom_open(struct net_device *dev);
static int xircom_close(struct net_device *dev);
static void xircom_up(struct xircom_private *card);
static struct net_device_stats *xircom_get_stats(struct net_device *dev);
-#if CONFIG_NET_POLL_CONTROLLER
+#ifdef CONFIG_NET_POLL_CONTROLLER
static void xircom_poll_controller(struct net_device *dev);
#endif
diff --git a/drivers/net/wan/hdlc_cisco.c b/drivers/net/wan/hdlc_cisco.c
index 48c03c11cd9..a01efa6d5c6 100644
--- a/drivers/net/wan/hdlc_cisco.c
+++ b/drivers/net/wan/hdlc_cisco.c
@@ -72,7 +72,7 @@ static void cisco_keepalive_send(struct net_device *dev, u32 type,
}
skb_reserve(skb, 4);
cisco_hard_header(skb, dev, CISCO_KEEPALIVE, NULL, NULL, 0);
- data = (cisco_packet*)skb->data;
+ data = (cisco_packet*)(skb->data + 4);
data->type = htonl(type);
data->par1 = htonl(par1);
diff --git a/drivers/net/wan/sdlamain.c b/drivers/net/wan/sdlamain.c
index 74e151acef3..7a8b22a7ea3 100644
--- a/drivers/net/wan/sdlamain.c
+++ b/drivers/net/wan/sdlamain.c
@@ -57,6 +57,7 @@
#include <linux/ioport.h> /* request_region(), release_region() */
#include <linux/wanrouter.h> /* WAN router definitions */
#include <linux/wanpipe.h> /* WANPIPE common user API definitions */
+#include <linux/rcupdate.h>
#include <linux/in.h>
#include <asm/io.h> /* phys_to_virt() */
@@ -1268,37 +1269,41 @@ unsigned long get_ip_address(struct net_device *dev, int option)
struct in_ifaddr *ifaddr;
struct in_device *in_dev;
+ unsigned long addr = 0;
- if ((in_dev = __in_dev_get(dev)) == NULL){
- return 0;
+ rcu_read_lock();
+ if ((in_dev = __in_dev_get_rcu(dev)) == NULL){
+ goto out;
}
if ((ifaddr = in_dev->ifa_list)== NULL ){
- return 0;
+ goto out;
}
switch (option){
case WAN_LOCAL_IP:
- return ifaddr->ifa_local;
+ addr = ifaddr->ifa_local;
break;
case WAN_POINTOPOINT_IP:
- return ifaddr->ifa_address;
+ addr = ifaddr->ifa_address;
break;
case WAN_NETMASK_IP:
- return ifaddr->ifa_mask;
+ addr = ifaddr->ifa_mask;
break;
case WAN_BROADCAST_IP:
- return ifaddr->ifa_broadcast;
+ addr = ifaddr->ifa_broadcast;
break;
default:
- return 0;
+ break;
}
- return 0;
+out:
+ rcu_read_unlock();
+ return addr;
}
void add_gateway(sdla_t *card, struct net_device *dev)
diff --git a/drivers/net/wan/syncppp.c b/drivers/net/wan/syncppp.c
index b56a7b516d2..a6d3b55013a 100644
--- a/drivers/net/wan/syncppp.c
+++ b/drivers/net/wan/syncppp.c
@@ -769,7 +769,7 @@ static void sppp_cisco_input (struct sppp *sp, struct sk_buff *skb)
u32 addr = 0, mask = ~0; /* FIXME: is the mask correct? */
#ifdef CONFIG_INET
rcu_read_lock();
- if ((in_dev = __in_dev_get(dev)) != NULL)
+ if ((in_dev = __in_dev_get_rcu(dev)) != NULL)
{
for (ifa=in_dev->ifa_list; ifa != NULL;
ifa=ifa->ifa_next) {
diff --git a/drivers/net/wireless/airo.c b/drivers/net/wireless/airo.c
index 2be65d308fb..06998c2240d 100644
--- a/drivers/net/wireless/airo.c
+++ b/drivers/net/wireless/airo.c
@@ -6852,7 +6852,10 @@ static inline char *airo_translate_scan(struct net_device *dev,
/* Add frequency */
iwe.cmd = SIOCGIWFREQ;
iwe.u.freq.m = le16_to_cpu(bss->dsChannel);
- iwe.u.freq.m = frequency_list[iwe.u.freq.m] * 100000;
+ /* iwe.u.freq.m containt the channel (starting 1), our
+ * frequency_list array start at index 0...
+ */
+ iwe.u.freq.m = frequency_list[iwe.u.freq.m - 1] * 100000;
iwe.u.freq.e = 1;
current_ev = iwe_stream_add_event(current_ev, end_buf, &iwe, IW_EV_FREQ_LEN);
diff --git a/drivers/net/wireless/orinoco.c b/drivers/net/wireless/orinoco.c
index 8de49fe5723..6deb7cc810c 100644
--- a/drivers/net/wireless/orinoco.c
+++ b/drivers/net/wireless/orinoco.c
@@ -2458,7 +2458,6 @@ struct net_device *alloc_orinocodev(int sizeof_card,
dev->watchdog_timeo = HZ; /* 1 second timeout */
dev->get_stats = orinoco_get_stats;
dev->ethtool_ops = &orinoco_ethtool_ops;
- dev->get_wireless_stats = orinoco_get_wireless_stats;
dev->wireless_handlers = (struct iw_handler_def *)&orinoco_handler_def;
dev->change_mtu = orinoco_change_mtu;
dev->set_multicast_list = orinoco_set_multicast_list;
@@ -4399,6 +4398,7 @@ static const struct iw_handler_def orinoco_handler_def = {
.standard = orinoco_handler,
.private = orinoco_private_handler,
.private_args = orinoco_privtab,
+ .get_wireless_stats = orinoco_get_wireless_stats,
};
static void orinoco_get_drvinfo(struct net_device *dev,
diff --git a/drivers/net/wireless/orinoco_cs.c b/drivers/net/wireless/orinoco_cs.c
index d1fb1bab8aa..bedd7f9f23e 100644
--- a/drivers/net/wireless/orinoco_cs.c
+++ b/drivers/net/wireless/orinoco_cs.c
@@ -629,6 +629,7 @@ static struct pcmcia_device_id orinoco_cs_ids[] = {
PCMCIA_DEVICE_PROD_ID12("Cabletron", "RoamAbout 802.11 DS", 0x32d445f5, 0xedeffd90),
PCMCIA_DEVICE_PROD_ID12("corega K.K.", "Wireless LAN PCC-11", 0x5261440f, 0xa6405584),
PCMCIA_DEVICE_PROD_ID12("corega K.K.", "Wireless LAN PCCA-11", 0x5261440f, 0xdf6115f9),
+ PCMCIA_DEVICE_PROD_ID12("corega_K.K.", "Wireless_LAN_PCCB-11", 0x29e33311, 0xee7a27ae),
PCMCIA_DEVICE_PROD_ID12("D", "Link DRC-650 11Mbps WLAN Card", 0x71b18589, 0xf144e3ac),
PCMCIA_DEVICE_PROD_ID12("D", "Link DWL-650 11Mbps WLAN Card", 0x71b18589, 0xb6f1b0ab),
PCMCIA_DEVICE_PROD_ID12("ELSA", "AirLancer MC-11", 0x4507a33a, 0xef54f0e3),
diff --git a/drivers/net/wireless/strip.c b/drivers/net/wireless/strip.c
index 4b0acae22b0..7bc7fc82312 100644
--- a/drivers/net/wireless/strip.c
+++ b/drivers/net/wireless/strip.c
@@ -1352,7 +1352,7 @@ static unsigned char *strip_make_packet(unsigned char *buffer,
struct in_device *in_dev;
rcu_read_lock();
- in_dev = __in_dev_get(strip_info->dev);
+ in_dev = __in_dev_get_rcu(strip_info->dev);
if (in_dev == NULL) {
rcu_read_unlock();
return NULL;
@@ -1508,7 +1508,7 @@ static void strip_send(struct strip *strip_info, struct sk_buff *skb)
brd = addr = 0;
rcu_read_lock();
- in_dev = __in_dev_get(strip_info->dev);
+ in_dev = __in_dev_get_rcu(strip_info->dev);
if (in_dev) {
if (in_dev->ifa_list) {
brd = in_dev->ifa_list->ifa_broadcast;
diff --git a/drivers/parisc/led.c b/drivers/parisc/led.c
index e90fb72a696..286902298e3 100644
--- a/drivers/parisc/led.c
+++ b/drivers/parisc/led.c
@@ -37,6 +37,7 @@
#include <linux/proc_fs.h>
#include <linux/ctype.h>
#include <linux/blkdev.h>
+#include <linux/rcupdate.h>
#include <asm/io.h>
#include <asm/processor.h>
#include <asm/hardware.h>
@@ -358,9 +359,10 @@ static __inline__ int led_get_net_activity(void)
/* we are running as tasklet, so locking dev_base
* for reading should be OK */
read_lock(&dev_base_lock);
+ rcu_read_lock();
for (dev = dev_base; dev; dev = dev->next) {
struct net_device_stats *stats;
- struct in_device *in_dev = __in_dev_get(dev);
+ struct in_device *in_dev = __in_dev_get_rcu(dev);
if (!in_dev || !in_dev->ifa_list)
continue;
if (LOOPBACK(in_dev->ifa_list->ifa_local))
@@ -371,6 +373,7 @@ static __inline__ int led_get_net_activity(void)
rx_total += stats->rx_packets;
tx_total += stats->tx_packets;
}
+ rcu_read_unlock();
read_unlock(&dev_base_lock);
retval = 0;
diff --git a/drivers/pci/hotplug.c b/drivers/pci/hotplug.c
index 10444988a10..e1743be3190 100644
--- a/drivers/pci/hotplug.c
+++ b/drivers/pci/hotplug.c
@@ -7,7 +7,6 @@ int pci_hotplug (struct device *dev, char **envp, int num_envp,
char *buffer, int buffer_size)
{
struct pci_dev *pdev;
- char *scratch;
int i = 0;
int length = 0;
@@ -18,9 +17,6 @@ int pci_hotplug (struct device *dev, char **envp, int num_envp,
if (!pdev)
return -ENODEV;
- scratch = buffer;
-
-
if (add_hotplug_env_var(envp, num_envp, &i,
buffer, buffer_size, &length,
"PCI_CLASS=%04X", pdev->class))
diff --git a/drivers/pci/hotplug/ibmphp_pci.c b/drivers/pci/hotplug/ibmphp_pci.c
index 8122fe734aa..b1ba429e0a2 100644
--- a/drivers/pci/hotplug/ibmphp_pci.c
+++ b/drivers/pci/hotplug/ibmphp_pci.c
@@ -558,7 +558,7 @@ static int configure_device (struct pci_func *func)
pci_bus_write_config_byte (ibmphp_pci_bus, devfn, PCI_CACHE_LINE_SIZE, CACHE);
pci_bus_write_config_byte (ibmphp_pci_bus, devfn, PCI_LATENCY_TIMER, LATENCY);
- pci_bus_write_config_word (ibmphp_pci_bus, devfn, PCI_ROM_ADDRESS, 0x00L);
+ pci_bus_write_config_dword (ibmphp_pci_bus, devfn, PCI_ROM_ADDRESS, 0x00L);
pci_bus_write_config_word (ibmphp_pci_bus, devfn, PCI_COMMAND, DEVICEENABLE);
return 0;
diff --git a/drivers/pci/hotplug/pciehp_ctrl.c b/drivers/pci/hotplug/pciehp_ctrl.c
index 0e094760152..898f6da6f0d 100644
--- a/drivers/pci/hotplug/pciehp_ctrl.c
+++ b/drivers/pci/hotplug/pciehp_ctrl.c
@@ -2526,7 +2526,6 @@ configure_new_function(struct controller *ctrl, struct pci_func *func,
int cloop;
u8 temp_byte;
u8 class_code;
- u16 temp_word;
u32 rc;
u32 temp_register;
u32 base;
@@ -2682,8 +2681,7 @@ configure_new_function(struct controller *ctrl, struct pci_func *func,
} /* End of base register loop */
/* disable ROM base Address */
- temp_word = 0x00L;
- rc = pci_bus_write_config_word (pci_bus, devfn, PCI_ROM_ADDRESS, temp_word);
+ rc = pci_bus_write_config_dword (pci_bus, devfn, PCI_ROM_ADDRESS, 0x00);
/* Set HP parameters (Cache Line Size, Latency Timer) */
rc = pciehprm_set_hpp(ctrl, func, PCI_HEADER_TYPE_NORMAL);
diff --git a/drivers/pci/hotplug/rpadlpar_sysfs.c b/drivers/pci/hotplug/rpadlpar_sysfs.c
index 752e6513c44..db69be85b45 100644
--- a/drivers/pci/hotplug/rpadlpar_sysfs.c
+++ b/drivers/pci/hotplug/rpadlpar_sysfs.c
@@ -62,7 +62,7 @@ static ssize_t add_slot_store(struct dlpar_io_attr *dlpar_attr,
char drc_name[MAX_DRC_NAME_LEN];
char *end;
- if (nbytes > MAX_DRC_NAME_LEN)
+ if (nbytes >= MAX_DRC_NAME_LEN)
return 0;
memcpy(drc_name, buf, nbytes);
@@ -83,7 +83,7 @@ static ssize_t remove_slot_store(struct dlpar_io_attr *dlpar_attr,
char drc_name[MAX_DRC_NAME_LEN];
char *end;
- if (nbytes > MAX_DRC_NAME_LEN)
+ if (nbytes >= MAX_DRC_NAME_LEN)
return 0;
memcpy(drc_name, buf, nbytes);
diff --git a/drivers/pci/hotplug/sgi_hotplug.c b/drivers/pci/hotplug/sgi_hotplug.c
index b1409441c1c..a32ae82e592 100644
--- a/drivers/pci/hotplug/sgi_hotplug.c
+++ b/drivers/pci/hotplug/sgi_hotplug.c
@@ -159,7 +159,7 @@ static int sn_hp_slot_private_alloc(struct hotplug_slot *bss_hotplug_slot,
pcibus_info = SN_PCIBUS_BUSSOFT_INFO(pci_bus);
- slot = kcalloc(1, sizeof(*slot), GFP_KERNEL);
+ slot = kzalloc(sizeof(*slot), GFP_KERNEL);
if (!slot)
return -ENOMEM;
bss_hotplug_slot->private = slot;
@@ -491,7 +491,7 @@ static int sn_hotplug_slot_register(struct pci_bus *pci_bus)
if (sn_pci_slot_valid(pci_bus, device) != 1)
continue;
- bss_hotplug_slot = kcalloc(1, sizeof(*bss_hotplug_slot),
+ bss_hotplug_slot = kzalloc(sizeof(*bss_hotplug_slot),
GFP_KERNEL);
if (!bss_hotplug_slot) {
rc = -ENOMEM;
@@ -499,7 +499,7 @@ static int sn_hotplug_slot_register(struct pci_bus *pci_bus)
}
bss_hotplug_slot->info =
- kcalloc(1, sizeof(struct hotplug_slot_info),
+ kzalloc(sizeof(struct hotplug_slot_info),
GFP_KERNEL);
if (!bss_hotplug_slot->info) {
rc = -ENOMEM;
diff --git a/drivers/pci/hotplug/shpchp_ctrl.c b/drivers/pci/hotplug/shpchp_ctrl.c
index 783b5abb071..91c9903e621 100644
--- a/drivers/pci/hotplug/shpchp_ctrl.c
+++ b/drivers/pci/hotplug/shpchp_ctrl.c
@@ -2824,8 +2824,7 @@ static int configure_new_function (struct controller * ctrl, struct pci_func * f
}
#endif
/* Disable ROM base Address */
- temp_word = 0x00L;
- rc = pci_bus_write_config_word (pci_bus, devfn, PCI_ROM_ADDRESS, temp_word);
+ rc = pci_bus_write_config_dword (pci_bus, devfn, PCI_ROM_ADDRESS, 0x00);
/* Set HP parameters (Cache Line Size, Latency Timer) */
rc = shpchprm_set_hpp(ctrl, func, PCI_HEADER_TYPE_NORMAL);
diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c
index 56a3b397efe..2898830c496 100644
--- a/drivers/pci/pci-sysfs.c
+++ b/drivers/pci/pci-sysfs.c
@@ -360,7 +360,7 @@ pci_create_resource_files(struct pci_dev *pdev)
continue;
/* allocate attribute structure, piggyback attribute name */
- res_attr = kcalloc(1, sizeof(*res_attr) + 10, GFP_ATOMIC);
+ res_attr = kzalloc(sizeof(*res_attr) + 10, GFP_ATOMIC);
if (res_attr) {
char *res_attr_name = (char *)(res_attr + 1);
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index 992db89adce..259d247b755 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -309,17 +309,25 @@ pci_set_power_state(struct pci_dev *dev, pci_power_t state)
pci_read_config_word(dev, pm + PCI_PM_CTRL, &pmcsr);
- /* If we're in D3, force entire word to 0.
+ /* If we're (effectively) in D3, force entire word to 0.
* This doesn't affect PME_Status, disables PME_En, and
* sets PowerState to 0.
*/
- if (dev->current_state >= PCI_D3hot) {
- if (!(pmcsr & PCI_PM_CTRL_NO_SOFT_RESET))
+ switch (dev->current_state) {
+ case PCI_UNKNOWN: /* Boot-up */
+ if ((pmcsr & PCI_PM_CTRL_STATE_MASK) == PCI_D3hot
+ && !(pmcsr & PCI_PM_CTRL_NO_SOFT_RESET))
need_restore = 1;
+ /* Fall-through: force to D0 */
+ case PCI_D3hot:
+ case PCI_D3cold:
+ case PCI_POWER_ERROR:
pmcsr = 0;
- } else {
+ break;
+ default:
pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
pmcsr |= state;
+ break;
}
/* enter specified state */
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
index 26a55d08b50..005786416bb 100644
--- a/drivers/pci/probe.c
+++ b/drivers/pci/probe.c
@@ -165,7 +165,7 @@ static void pci_read_bases(struct pci_dev *dev, unsigned int howmany, int rom)
if (l == 0xffffffff)
l = 0;
if ((l & PCI_BASE_ADDRESS_SPACE) == PCI_BASE_ADDRESS_SPACE_MEMORY) {
- sz = pci_size(l, sz, PCI_BASE_ADDRESS_MEM_MASK);
+ sz = pci_size(l, sz, (u32)PCI_BASE_ADDRESS_MEM_MASK);
if (!sz)
continue;
res->start = l & PCI_BASE_ADDRESS_MEM_MASK;
@@ -215,7 +215,7 @@ static void pci_read_bases(struct pci_dev *dev, unsigned int howmany, int rom)
if (l == 0xffffffff)
l = 0;
if (sz && sz != 0xffffffff) {
- sz = pci_size(l, sz, PCI_ROM_ADDRESS_MASK);
+ sz = pci_size(l, sz, (u32)PCI_ROM_ADDRESS_MASK);
if (sz) {
res->flags = (l & IORESOURCE_ROM_ENABLE) |
IORESOURCE_MEM | IORESOURCE_PREFETCH |
@@ -402,6 +402,12 @@ static void pci_enable_crs(struct pci_dev *dev)
static void __devinit pci_fixup_parent_subordinate_busnr(struct pci_bus *child, int max)
{
struct pci_bus *parent = child->parent;
+
+ /* Attempts to fix that up are really dangerous unless
+ we're going to re-assign all bus numbers. */
+ if (!pcibios_assign_all_busses())
+ return;
+
while (parent->parent && parent->subordinate < max) {
parent->subordinate = max;
pci_write_config_byte(parent->self, PCI_SUBORDINATE_BUS, max);
@@ -478,8 +484,18 @@ int __devinit pci_scan_bridge(struct pci_bus *bus, struct pci_dev * dev, int max
* We need to assign a number to this bus which we always
* do in the second pass.
*/
- if (!pass)
+ if (!pass) {
+ if (pcibios_assign_all_busses())
+ /* Temporarily disable forwarding of the
+ configuration cycles on all bridges in
+ this bus segment to avoid possible
+ conflicts in the second pass between two
+ bridges programmed with overlapping
+ bus ranges. */
+ pci_write_config_dword(dev, PCI_PRIMARY_BUS,
+ buses & ~0xffffff);
return max;
+ }
/* Clear errors */
pci_write_config_word(dev, PCI_STATUS, 0xffff);
diff --git a/drivers/pcmcia/Kconfig b/drivers/pcmcia/Kconfig
index ddc741e6ecb..36cc9a96a33 100644
--- a/drivers/pcmcia/Kconfig
+++ b/drivers/pcmcia/Kconfig
@@ -146,7 +146,7 @@ config I82365
config TCIC
tristate "Databook TCIC host bridge support"
- depends on PCMCIA
+ depends on PCMCIA && ISA
select PCCARD_NONSTATIC
help
Say Y here to include support for the Databook TCIC family of PCMCIA
diff --git a/drivers/pcmcia/cardbus.c b/drivers/pcmcia/cardbus.c
index 1d755e20880..3f6d51d1137 100644
--- a/drivers/pcmcia/cardbus.c
+++ b/drivers/pcmcia/cardbus.c
@@ -228,6 +228,11 @@ int cb_alloc(struct pcmcia_socket * s)
pci_bus_size_bridges(bus);
pci_bus_assign_resources(bus);
cardbus_assign_irqs(bus, s->pci_irq);
+
+ /* socket specific tune function */
+ if (s->tune_bridge)
+ s->tune_bridge(s, bus);
+
pci_enable_bridges(bus);
pci_bus_add_devices(bus);
diff --git a/drivers/pcmcia/omap_cf.c b/drivers/pcmcia/omap_cf.c
index 08d1c928826..94be9e51654 100644
--- a/drivers/pcmcia/omap_cf.c
+++ b/drivers/pcmcia/omap_cf.c
@@ -22,7 +22,6 @@
#include <asm/hardware.h>
#include <asm/io.h>
-#include <asm/mach-types.h>
#include <asm/sizes.h>
#include <asm/arch/mux.h>
diff --git a/drivers/pcmcia/pcmcia_ioctl.c b/drivers/pcmcia/pcmcia_ioctl.c
index 39ba6406fd5..80969f7e7a0 100644
--- a/drivers/pcmcia/pcmcia_ioctl.c
+++ b/drivers/pcmcia/pcmcia_ioctl.c
@@ -376,6 +376,7 @@ static int ds_open(struct inode *inode, struct file *file)
socket_t i = iminor(inode);
struct pcmcia_socket *s;
user_info_t *user;
+ static int warning_printed = 0;
ds_dbg(0, "ds_open(socket %d)\n", i);
@@ -407,6 +408,17 @@ static int ds_open(struct inode *inode, struct file *file)
s->user = user;
file->private_data = user;
+ if (!warning_printed) {
+ printk(KERN_INFO "pcmcia: Detected deprecated PCMCIA ioctl "
+ "usage.\n");
+ printk(KERN_INFO "pcmcia: This interface will soon be removed from "
+ "the kernel; please expect breakage unless you upgrade "
+ "to new tools.\n");
+ printk(KERN_INFO "pcmcia: see http://www.kernel.org/pub/linux/"
+ "utils/kernel/pcmcia/pcmcia.html for details.\n");
+ warning_printed = 1;
+ }
+
if (s->pcmcia_state.present)
queue_event(user, CS_EVENT_CARD_INSERTION);
return 0;
diff --git a/drivers/pcmcia/rsrc_nonstatic.c b/drivers/pcmcia/rsrc_nonstatic.c
index c42455d20eb..f9a5c70284b 100644
--- a/drivers/pcmcia/rsrc_nonstatic.c
+++ b/drivers/pcmcia/rsrc_nonstatic.c
@@ -691,7 +691,7 @@ static int adjust_memory(struct pcmcia_socket *s, unsigned int action, unsigned
unsigned long size = end - start + 1;
int ret = 0;
- if (end <= start)
+ if (end < start)
return -EINVAL;
down(&rsrc_sem);
@@ -724,7 +724,7 @@ static int adjust_io(struct pcmcia_socket *s, unsigned int action, unsigned long
unsigned long size = end - start + 1;
int ret = 0;
- if (end <= start)
+ if (end < start)
return -EINVAL;
if (end > IO_SPACE_LIMIT)
@@ -817,7 +817,7 @@ static int nonstatic_autoadd_resources(struct pcmcia_socket *s)
/* if we got at least one of IO, and one of MEM, we can be glad and
* activate the PCMCIA subsystem */
- if (done & (IORESOURCE_MEM | IORESOURCE_IO))
+ if (done == (IORESOURCE_MEM | IORESOURCE_IO))
s->resource_setup_done = 1;
return 0;
@@ -925,7 +925,7 @@ static ssize_t store_io_db(struct class_device *class_dev, const char *buf, size
return -EINVAL;
}
}
- if (end_addr <= start_addr)
+ if (end_addr < start_addr)
return -EINVAL;
ret = adjust_io(s, add, start_addr, end_addr);
@@ -977,7 +977,7 @@ static ssize_t store_mem_db(struct class_device *class_dev, const char *buf, siz
return -EINVAL;
}
}
- if (end_addr <= start_addr)
+ if (end_addr < start_addr)
return -EINVAL;
ret = adjust_memory(s, add, start_addr, end_addr);
diff --git a/drivers/pcmcia/ti113x.h b/drivers/pcmcia/ti113x.h
index fbe233e19ce..da0b404561c 100644
--- a/drivers/pcmcia/ti113x.h
+++ b/drivers/pcmcia/ti113x.h
@@ -59,6 +59,7 @@
#define TI122X_SCR_SER_STEP 0xc0000000
#define TI122X_SCR_INTRTIE 0x20000000
+#define TIXX21_SCR_TIEALL 0x10000000
#define TI122X_SCR_CBRSVD 0x00400000
#define TI122X_SCR_MRBURSTDN 0x00008000
#define TI122X_SCR_MRBURSTUP 0x00004000
@@ -153,6 +154,12 @@
/* EnE test register */
#define ENE_TEST_C9 0xc9 /* 8bit */
#define ENE_TEST_C9_TLTENABLE 0x02
+#define ENE_TEST_C9_PFENABLE_F0 0x04
+#define ENE_TEST_C9_PFENABLE_F1 0x08
+#define ENE_TEST_C9_PFENABLE (ENE_TEST_C9_PFENABLE_F0 | ENE_TEST_C9_PFENABLE_F0)
+#define ENE_TEST_C9_WPDISALBLE_F0 0x40
+#define ENE_TEST_C9_WPDISALBLE_F1 0x80
+#define ENE_TEST_C9_WPDISALBLE (ENE_TEST_C9_WPDISALBLE_F0 | ENE_TEST_C9_WPDISALBLE_F1)
/*
* Texas Instruments CardBus controller overrides.
@@ -618,6 +625,7 @@ static int ti12xx_2nd_slot_empty(struct yenta_socket *socket)
int devfn;
unsigned int state;
int ret = 1;
+ u32 sysctl;
/* catch the two-slot controllers */
switch (socket->dev->device) {
@@ -640,6 +648,24 @@ static int ti12xx_2nd_slot_empty(struct yenta_socket *socket)
*/
break;
+ case PCI_DEVICE_ID_TI_X515:
+ case PCI_DEVICE_ID_TI_X420:
+ case PCI_DEVICE_ID_TI_X620:
+ case PCI_DEVICE_ID_TI_XX21_XX11:
+ case PCI_DEVICE_ID_TI_7410:
+ case PCI_DEVICE_ID_TI_7610:
+ /*
+ * those are either single or dual slot CB with additional functions
+ * like 1394, smartcard reader, etc. check the TIEALL flag for them
+ * the TIEALL flag binds the IRQ of all functions toghether.
+ * we catch the single slot variants later.
+ */
+ sysctl = config_readl(socket, TI113X_SYSTEM_CONTROL);
+ if (sysctl & TIXX21_SCR_TIEALL)
+ return 0;
+
+ break;
+
/* single-slot controllers have the 2nd slot empty always :) */
default:
return 1;
@@ -652,6 +678,15 @@ static int ti12xx_2nd_slot_empty(struct yenta_socket *socket)
if (!func)
return 1;
+ /*
+ * check that the device id of both slots match. this is needed for the
+ * XX21 and the XX11 controller that share the same device id for single
+ * and dual slot controllers. return '2nd slot empty'. we already checked
+ * if the interrupt is tied to another function.
+ */
+ if (socket->dev->device != func->device)
+ goto out;
+
slot2 = pci_get_drvdata(func);
if (!slot2)
goto out;
@@ -791,16 +826,6 @@ static int ti12xx_override(struct yenta_socket *socket)
config_writel(socket, TI113X_SYSTEM_CONTROL, val);
/*
- * for EnE bridges only: clear testbit TLTEnable. this makes the
- * RME Hammerfall DSP sound card working.
- */
- if (socket->dev->vendor == PCI_VENDOR_ID_ENE) {
- u8 test_c9 = config_readb(socket, ENE_TEST_C9);
- test_c9 &= ~ENE_TEST_C9_TLTENABLE;
- config_writeb(socket, ENE_TEST_C9, test_c9);
- }
-
- /*
* Yenta expects controllers to use CSCINT to route
* CSC interrupts to PCI rather than INTVAL.
*/
@@ -841,5 +866,75 @@ static int ti1250_override(struct yenta_socket *socket)
return ti12xx_override(socket);
}
+
+/**
+ * EnE specific part. EnE bridges are register compatible with TI bridges but
+ * have their own test registers and more important their own little problems.
+ * Some fixup code to make everybody happy (TM).
+ */
+
+/**
+ * set/clear various test bits:
+ * Defaults to clear the bit.
+ * - mask (u8) defines what bits to change
+ * - bits (u8) is the values to change them to
+ * -> it's
+ * current = (current & ~mask) | bits
+ */
+/* pci ids of devices that wants to have the bit set */
+#define DEVID(_vend,_dev,_subvend,_subdev,mask,bits) { \
+ .vendor = _vend, \
+ .device = _dev, \
+ .subvendor = _subvend, \
+ .subdevice = _subdev, \
+ .driver_data = ((mask) << 8 | (bits)), \
+ }
+static struct pci_device_id ene_tune_tbl[] = {
+ /* Echo Audio products based on motorola DSP56301 and DSP56361 */
+ DEVID(PCI_VENDOR_ID_MOTOROLA, 0x1801, 0xECC0, PCI_ANY_ID,
+ ENE_TEST_C9_TLTENABLE | ENE_TEST_C9_PFENABLE, ENE_TEST_C9_TLTENABLE),
+ DEVID(PCI_VENDOR_ID_MOTOROLA, 0x3410, 0xECC0, PCI_ANY_ID,
+ ENE_TEST_C9_TLTENABLE | ENE_TEST_C9_PFENABLE, ENE_TEST_C9_TLTENABLE),
+
+ {}
+};
+
+static void ene_tune_bridge(struct pcmcia_socket *sock, struct pci_bus *bus)
+{
+ struct yenta_socket *socket = container_of(sock, struct yenta_socket, socket);
+ struct pci_dev *dev;
+ struct pci_device_id *id = NULL;
+ u8 test_c9, old_c9, mask, bits;
+
+ list_for_each_entry(dev, &bus->devices, bus_list) {
+ id = (struct pci_device_id *) pci_match_id(ene_tune_tbl, dev);
+ if (id)
+ break;
+ }
+
+ test_c9 = old_c9 = config_readb(socket, ENE_TEST_C9);
+ if (id) {
+ mask = (id->driver_data >> 8) & 0xFF;
+ bits = id->driver_data & 0xFF;
+
+ test_c9 = (test_c9 & ~mask) | bits;
+ }
+ else
+ /* default to clear TLTEnable bit, old behaviour */
+ test_c9 &= ~ENE_TEST_C9_TLTENABLE;
+
+ printk(KERN_INFO "yenta EnE: chaning testregister 0xC9, %02x -> %02x\n", old_c9, test_c9);
+ config_writeb(socket, ENE_TEST_C9, test_c9);
+}
+
+
+static int ene_override(struct yenta_socket *socket)
+{
+ /* install tune_bridge() function */
+ socket->socket.tune_bridge = ene_tune_bridge;
+
+ return ti1250_override(socket);
+}
+
#endif /* _LINUX_TI113X_H */
diff --git a/drivers/pcmcia/yenta_socket.c b/drivers/pcmcia/yenta_socket.c
index f0997c36c9b..db9f952f9e3 100644
--- a/drivers/pcmcia/yenta_socket.c
+++ b/drivers/pcmcia/yenta_socket.c
@@ -559,12 +559,6 @@ static void yenta_interrogate(struct yenta_socket *socket)
static int yenta_sock_init(struct pcmcia_socket *sock)
{
struct yenta_socket *socket = container_of(sock, struct yenta_socket, socket);
- u16 bridge;
-
- bridge = config_readw(socket, CB_BRIDGE_CONTROL) & ~CB_BRIDGE_INTR;
- if (!socket->cb_irq)
- bridge |= CB_BRIDGE_INTR;
- config_writew(socket, CB_BRIDGE_CONTROL, bridge);
exca_writeb(socket, I365_GBLCTL, 0x00);
exca_writeb(socket, I365_GENCTL, 0x00);
@@ -819,6 +813,7 @@ enum {
CARDBUS_TYPE_TOPIC95,
CARDBUS_TYPE_TOPIC97,
CARDBUS_TYPE_O2MICRO,
+ CARDBUS_TYPE_ENE,
};
/*
@@ -865,6 +860,12 @@ static struct cardbus_type cardbus_type[] = {
.override = o2micro_override,
.restore_state = o2micro_restore_state,
},
+ [CARDBUS_TYPE_ENE] = {
+ .override = ene_override,
+ .save_state = ti_save_state,
+ .restore_state = ti_restore_state,
+ .sock_init = ti_init,
+ },
};
@@ -883,16 +884,8 @@ static unsigned int yenta_probe_irq(struct yenta_socket *socket, u32 isa_irq_mas
{
int i;
unsigned long val;
- u16 bridge_ctrl;
u32 mask;
- /* Set up ISA irq routing to probe the ISA irqs.. */
- bridge_ctrl = config_readw(socket, CB_BRIDGE_CONTROL);
- if (!(bridge_ctrl & CB_BRIDGE_INTR)) {
- bridge_ctrl |= CB_BRIDGE_INTR;
- config_writew(socket, CB_BRIDGE_CONTROL, bridge_ctrl);
- }
-
/*
* Probe for usable interrupts using the force
* register to generate bogus card status events.
@@ -914,9 +907,6 @@ static unsigned int yenta_probe_irq(struct yenta_socket *socket, u32 isa_irq_mas
mask = probe_irq_mask(val) & 0xffff;
- bridge_ctrl &= ~CB_BRIDGE_INTR;
- config_writew(socket, CB_BRIDGE_CONTROL, bridge_ctrl);
-
return mask;
}
@@ -944,18 +934,11 @@ static irqreturn_t yenta_probe_handler(int irq, void *dev_id, struct pt_regs *re
/* probes the PCI interrupt, use only on override functions */
static int yenta_probe_cb_irq(struct yenta_socket *socket)
{
- u16 bridge_ctrl;
-
if (!socket->cb_irq)
return -1;
socket->probe_status = 0;
- /* disable ISA interrupts */
- bridge_ctrl = config_readw(socket, CB_BRIDGE_CONTROL);
- bridge_ctrl &= ~CB_BRIDGE_INTR;
- config_writew(socket, CB_BRIDGE_CONTROL, bridge_ctrl);
-
if (request_irq(socket->cb_irq, yenta_probe_handler, SA_SHIRQ, "yenta", socket)) {
printk(KERN_WARNING "Yenta: request_irq() in yenta_probe_cb_irq() failed!\n");
return -1;
@@ -966,7 +949,7 @@ static int yenta_probe_cb_irq(struct yenta_socket *socket)
cb_writel(socket, CB_SOCKET_EVENT, -1);
cb_writel(socket, CB_SOCKET_MASK, CB_CSTSMASK);
cb_writel(socket, CB_SOCKET_FORCE, CB_FCARDSTS);
-
+
msleep(100);
/* disable interrupts */
@@ -1004,11 +987,12 @@ static void yenta_config_init(struct yenta_socket *socket)
{
u16 bridge;
struct pci_dev *dev = socket->dev;
+ struct pci_bus_region region;
- pci_set_power_state(socket->dev, 0);
+ pcibios_resource_to_bus(socket->dev, &region, &dev->resource[0]);
config_writel(socket, CB_LEGACY_MODE_BASE, 0);
- config_writel(socket, PCI_BASE_ADDRESS_0, dev->resource[0].start);
+ config_writel(socket, PCI_BASE_ADDRESS_0, region.start);
config_writew(socket, PCI_COMMAND,
PCI_COMMAND_IO |
PCI_COMMAND_MEMORY |
@@ -1031,8 +1015,8 @@ static void yenta_config_init(struct yenta_socket *socket)
* - PCI interrupts enabled if a PCI interrupt exists..
*/
bridge = config_readw(socket, CB_BRIDGE_CONTROL);
- bridge &= ~(CB_BRIDGE_CRST | CB_BRIDGE_PREFETCH1 | CB_BRIDGE_INTR | CB_BRIDGE_ISAEN | CB_BRIDGE_VGAEN);
- bridge |= CB_BRIDGE_PREFETCH0 | CB_BRIDGE_POSTEN | CB_BRIDGE_INTR;
+ bridge &= ~(CB_BRIDGE_CRST | CB_BRIDGE_PREFETCH1 | CB_BRIDGE_ISAEN | CB_BRIDGE_VGAEN);
+ bridge |= CB_BRIDGE_PREFETCH0 | CB_BRIDGE_POSTEN;
config_writew(socket, CB_BRIDGE_CONTROL, bridge);
}
@@ -1045,7 +1029,18 @@ static int __devinit yenta_probe (struct pci_dev *dev, const struct pci_device_i
{
struct yenta_socket *socket;
int ret;
-
+
+ /*
+ * If we failed to assign proper bus numbers for this cardbus
+ * controller during PCI probe, its subordinate pci_bus is NULL.
+ * Bail out if so.
+ */
+ if (!dev->subordinate) {
+ printk(KERN_ERR "Yenta: no bus associated with %s! "
+ "(try 'pci=assign-busses')\n", pci_name(dev));
+ return -ENODEV;
+ }
+
socket = kmalloc(sizeof(struct yenta_socket), GFP_KERNEL);
if (!socket)
return -ENOMEM;
@@ -1254,10 +1249,22 @@ static struct pci_device_id yenta_table [] = {
CB_ID(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_1250, TI1250),
CB_ID(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_1410, TI1250),
- CB_ID(PCI_VENDOR_ID_ENE, PCI_DEVICE_ID_ENE_1211, TI12XX),
- CB_ID(PCI_VENDOR_ID_ENE, PCI_DEVICE_ID_ENE_1225, TI12XX),
- CB_ID(PCI_VENDOR_ID_ENE, PCI_DEVICE_ID_ENE_1410, TI1250),
- CB_ID(PCI_VENDOR_ID_ENE, PCI_DEVICE_ID_ENE_1420, TI12XX),
+ CB_ID(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_XX21_XX11, TI12XX),
+ CB_ID(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_X515, TI12XX),
+ CB_ID(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_X420, TI12XX),
+ CB_ID(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_X620, TI12XX),
+ CB_ID(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_7410, TI12XX),
+ CB_ID(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_7510, TI12XX),
+ CB_ID(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_7610, TI12XX),
+
+ CB_ID(PCI_VENDOR_ID_ENE, PCI_DEVICE_ID_ENE_710, TI12XX),
+ CB_ID(PCI_VENDOR_ID_ENE, PCI_DEVICE_ID_ENE_712, TI12XX),
+ CB_ID(PCI_VENDOR_ID_ENE, PCI_DEVICE_ID_ENE_720, TI12XX),
+ CB_ID(PCI_VENDOR_ID_ENE, PCI_DEVICE_ID_ENE_722, TI12XX),
+ CB_ID(PCI_VENDOR_ID_ENE, PCI_DEVICE_ID_ENE_1211, ENE),
+ CB_ID(PCI_VENDOR_ID_ENE, PCI_DEVICE_ID_ENE_1225, ENE),
+ CB_ID(PCI_VENDOR_ID_ENE, PCI_DEVICE_ID_ENE_1410, ENE),
+ CB_ID(PCI_VENDOR_ID_ENE, PCI_DEVICE_ID_ENE_1420, ENE),
CB_ID(PCI_VENDOR_ID_RICOH, PCI_DEVICE_ID_RICOH_RL5C465, RICOH),
CB_ID(PCI_VENDOR_ID_RICOH, PCI_DEVICE_ID_RICOH_RL5C466, RICOH),
diff --git a/drivers/s390/cio/blacklist.c b/drivers/s390/cio/blacklist.c
index aac83ce6469..a1c52a68219 100644
--- a/drivers/s390/cio/blacklist.c
+++ b/drivers/s390/cio/blacklist.c
@@ -1,7 +1,7 @@
/*
* drivers/s390/cio/blacklist.c
* S/390 common I/O routines -- blacklisting of specific devices
- * $Revision: 1.34 $
+ * $Revision: 1.35 $
*
* Copyright (C) 1999-2002 IBM Deutschland Entwicklung GmbH,
* IBM Corporation
@@ -35,7 +35,7 @@
*/
/* 65536 bits to indicate if a devno is blacklisted or not */
-#define __BL_DEV_WORDS (__MAX_SUBCHANNELS + (8*sizeof(long) - 1) / \
+#define __BL_DEV_WORDS ((__MAX_SUBCHANNELS + (8*sizeof(long) - 1)) / \
(8*sizeof(long)))
static unsigned long bl_dev[__BL_DEV_WORDS];
typedef enum {add, free} range_action;
diff --git a/drivers/s390/cio/ccwgroup.c b/drivers/s390/cio/ccwgroup.c
index 91ea8e4777f..dbb3eb0e330 100644
--- a/drivers/s390/cio/ccwgroup.c
+++ b/drivers/s390/cio/ccwgroup.c
@@ -437,7 +437,7 @@ __ccwgroup_get_gdev_by_cdev(struct ccw_device *cdev)
if (cdev->dev.driver_data) {
gdev = (struct ccwgroup_device *)cdev->dev.driver_data;
if (get_device(&gdev->dev)) {
- if (klist_node_attached(&gdev->dev.knode_bus))
+ if (device_is_registered(&gdev->dev))
return gdev;
put_device(&gdev->dev);
}
diff --git a/drivers/s390/crypto/z90main.c b/drivers/s390/crypto/z90main.c
index 6aeef3bacc3..0cb47eca91f 100644
--- a/drivers/s390/crypto/z90main.c
+++ b/drivers/s390/crypto/z90main.c
@@ -682,9 +682,6 @@ z90crypt_cleanup_module(void)
del_timer(&config_timer);
del_timer(&cleanup_timer);
- if (z90_device_work)
- destroy_workqueue(z90_device_work);
-
destroy_z90crypt();
PRINTKN("Unloaded.\n");
diff --git a/drivers/s390/net/ctcmain.c b/drivers/s390/net/ctcmain.c
index 96ca863eaff..0db4f57a6a9 100644
--- a/drivers/s390/net/ctcmain.c
+++ b/drivers/s390/net/ctcmain.c
@@ -1,5 +1,5 @@
/*
- * $Id: ctcmain.c,v 1.74 2005/03/24 09:04:17 mschwide Exp $
+ * $Id: ctcmain.c,v 1.78 2005/09/07 12:18:02 pavlic Exp $
*
* CTC / ESCON network driver
*
@@ -37,10 +37,9 @@
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*
- * RELEASE-TAG: CTC/ESCON network driver $Revision: 1.74 $
+ * RELEASE-TAG: CTC/ESCON network driver $Revision: 1.78 $
*
*/
-
#undef DEBUG
#include <linux/module.h>
#include <linux/init.h>
@@ -135,7 +134,7 @@ static const char *dev_event_names[] = {
"TX down",
"Restart",
};
-
+
/**
* Events of the channel statemachine
*/
@@ -249,7 +248,7 @@ static void
print_banner(void)
{
static int printed = 0;
- char vbuf[] = "$Revision: 1.74 $";
+ char vbuf[] = "$Revision: 1.78 $";
char *version = vbuf;
if (printed)
@@ -334,7 +333,7 @@ static const char *ch_state_names[] = {
"Restarting",
"Not operational",
};
-
+
#ifdef DEBUG
/**
* Dump header and first 16 bytes of an sk_buff for debugging purposes.
@@ -671,7 +670,7 @@ static void
fsm_action_nop(fsm_instance * fi, int event, void *arg)
{
}
-
+
/**
* Actions for channel - statemachines.
*****************************************************************************/
@@ -1514,7 +1513,6 @@ ch_action_reinit(fsm_instance *fi, int event, void *arg)
fsm_addtimer(&privptr->restart_timer, 1000, DEV_EVENT_RESTART, dev);
}
-
/**
* The statemachine for a channel.
*/
@@ -1625,7 +1623,7 @@ static const fsm_node ch_fsm[] = {
};
static const int CH_FSM_LEN = sizeof (ch_fsm) / sizeof (fsm_node);
-
+
/**
* Functions related to setup and device detection.
*****************************************************************************/
@@ -1976,7 +1974,7 @@ ctc_irq_handler(struct ccw_device *cdev, unsigned long intparm, struct irb *irb)
fsm_event(ch->fsm, CH_EVENT_IRQ, ch);
}
-
+
/**
* Actions for interface - statemachine.
*****************************************************************************/
@@ -2209,13 +2207,18 @@ transmit_skb(struct channel *ch, struct sk_buff *skb)
int rc = 0;
DBF_TEXT(trace, 5, __FUNCTION__);
+ /* we need to acquire the lock for testing the state
+ * otherwise we can have an IRQ changing the state to
+ * TXIDLE after the test but before acquiring the lock.
+ */
+ spin_lock_irqsave(&ch->collect_lock, saveflags);
if (fsm_getstate(ch->fsm) != CH_STATE_TXIDLE) {
int l = skb->len + LL_HEADER_LENGTH;
- spin_lock_irqsave(&ch->collect_lock, saveflags);
- if (ch->collect_len + l > ch->max_bufsize - 2)
- rc = -EBUSY;
- else {
+ if (ch->collect_len + l > ch->max_bufsize - 2) {
+ spin_unlock_irqrestore(&ch->collect_lock, saveflags);
+ return -EBUSY;
+ } else {
atomic_inc(&skb->users);
header.length = l;
header.type = skb->protocol;
@@ -2231,7 +2234,7 @@ transmit_skb(struct channel *ch, struct sk_buff *skb)
int ccw_idx;
struct sk_buff *nskb;
unsigned long hi;
-
+ spin_unlock_irqrestore(&ch->collect_lock, saveflags);
/**
* Protect skb against beeing free'd by upper
* layers.
@@ -2256,6 +2259,7 @@ transmit_skb(struct channel *ch, struct sk_buff *skb)
if (!nskb) {
atomic_dec(&skb->users);
skb_pull(skb, LL_HEADER_LENGTH + 2);
+ ctc_clear_busy(ch->netdev);
return -ENOMEM;
} else {
memcpy(skb_put(nskb, skb->len),
@@ -2281,6 +2285,7 @@ transmit_skb(struct channel *ch, struct sk_buff *skb)
*/
atomic_dec(&skb->users);
skb_pull(skb, LL_HEADER_LENGTH + 2);
+ ctc_clear_busy(ch->netdev);
return -EBUSY;
}
@@ -2327,9 +2332,10 @@ transmit_skb(struct channel *ch, struct sk_buff *skb)
}
}
+ ctc_clear_busy(ch->netdev);
return rc;
}
-
+
/**
* Interface API for upper network layers
*****************************************************************************/
@@ -2421,7 +2427,6 @@ ctc_tx(struct sk_buff *skb, struct net_device * dev)
dev->trans_start = jiffies;
if (transmit_skb(privptr->channel[WRITE], skb) != 0)
rc = 1;
- ctc_clear_busy(dev);
return rc;
}
@@ -2610,7 +2615,6 @@ stats_write(struct device *dev, struct device_attribute *attr, const char *buf,
return count;
}
-
static void
ctc_netdev_unregister(struct net_device * dev)
{
@@ -2685,7 +2689,6 @@ ctc_proto_store(struct device *dev, struct device_attribute *attr, const char *b
return count;
}
-
static ssize_t
ctc_type_show(struct device *dev, struct device_attribute *attr, char *buf)
{
diff --git a/drivers/s390/net/qeth.h b/drivers/s390/net/qeth.h
index 3a0285669ad..2ad4797ce02 100644
--- a/drivers/s390/net/qeth.h
+++ b/drivers/s390/net/qeth.h
@@ -24,7 +24,7 @@
#include "qeth_mpc.h"
-#define VERSION_QETH_H "$Revision: 1.139 $"
+#define VERSION_QETH_H "$Revision: 1.142 $"
#ifdef CONFIG_QETH_IPV6
#define QETH_VERSION_IPV6 ":IPv6"
@@ -1172,7 +1172,7 @@ extern int
qeth_realloc_buffer_pool(struct qeth_card *, int);
extern int
-qeth_set_large_send(struct qeth_card *);
+qeth_set_large_send(struct qeth_card *, enum qeth_large_send_types);
extern void
qeth_fill_header(struct qeth_card *, struct qeth_hdr *,
diff --git a/drivers/s390/net/qeth_main.c b/drivers/s390/net/qeth_main.c
index 79c74f3a11f..71de834ece1 100644
--- a/drivers/s390/net/qeth_main.c
+++ b/drivers/s390/net/qeth_main.c
@@ -1,6 +1,6 @@
/*
*
- * linux/drivers/s390/net/qeth_main.c ($Revision: 1.214 $)
+ * linux/drivers/s390/net/qeth_main.c ($Revision: 1.224 $)
*
* Linux on zSeries OSA Express and HiperSockets support
*
@@ -12,7 +12,7 @@
* Frank Pavlic (pavlic@de.ibm.com) and
* Thomas Spatzier <tspat@de.ibm.com>
*
- * $Revision: 1.214 $ $Date: 2005/05/04 20:19:18 $
+ * $Revision: 1.224 $ $Date: 2005/05/04 20:19:18 $
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -29,14 +29,6 @@
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
-/***
- * eye catcher; just for debugging purposes
- */
-void volatile
-qeth_eyecatcher(void)
-{
- return;
-}
#include <linux/config.h>
#include <linux/module.h>
@@ -80,7 +72,7 @@ qeth_eyecatcher(void)
#include "qeth_eddp.h"
#include "qeth_tso.h"
-#define VERSION_QETH_C "$Revision: 1.214 $"
+#define VERSION_QETH_C "$Revision: 1.224 $"
static const char *version = "qeth S/390 OSA-Express driver";
/**
@@ -2759,11 +2751,9 @@ qeth_flush_buffers(struct qeth_qdio_out_q *queue, int under_int,
queue->card->perf_stats.outbound_do_qdio_start_time;
#endif
if (rc){
- QETH_DBF_SPRINTF(trace, 0, "qeth_flush_buffers: do_QDIO "
- "returned error (%i) on device %s.",
- rc, CARD_DDEV_ID(queue->card));
QETH_DBF_TEXT(trace, 2, "flushbuf");
QETH_DBF_TEXT_(trace, 2, " err%d", rc);
+ QETH_DBF_TEXT_(trace, 2, "%s", CARD_DDEV_ID(queue->card));
queue->card->stats.tx_errors += count;
/* this must not happen under normal circumstances. if it
* happens something is really wrong -> recover */
@@ -2909,11 +2899,8 @@ qeth_qdio_output_handler(struct ccw_device * ccwdev, unsigned int status,
QETH_DBF_TEXT(trace, 6, "qdouhdl");
if (status & QDIO_STATUS_LOOK_FOR_ERROR) {
if (status & QDIO_STATUS_ACTIVATE_CHECK_CONDITION){
- QETH_DBF_SPRINTF(trace, 2, "On device %s: "
- "received active check "
- "condition (0x%08x).",
- CARD_BUS_ID(card), status);
- QETH_DBF_TEXT(trace, 2, "chkcond");
+ QETH_DBF_TEXT(trace, 2, "achkcond");
+ QETH_DBF_TEXT_(trace, 2, "%s", CARD_BUS_ID(card));
QETH_DBF_TEXT_(trace, 2, "%08x", status);
netif_stop_queue(card->dev);
qeth_schedule_recovery(card);
@@ -3356,26 +3343,32 @@ qeth_halt_channel(struct qeth_channel *channel)
static int
qeth_halt_channels(struct qeth_card *card)
{
- int rc = 0;
+ int rc1 = 0, rc2=0, rc3 = 0;
QETH_DBF_TEXT(trace,3,"haltchs");
- if ((rc = qeth_halt_channel(&card->read)))
- return rc;
- if ((rc = qeth_halt_channel(&card->write)))
- return rc;
- return qeth_halt_channel(&card->data);
+ rc1 = qeth_halt_channel(&card->read);
+ rc2 = qeth_halt_channel(&card->write);
+ rc3 = qeth_halt_channel(&card->data);
+ if (rc1)
+ return rc1;
+ if (rc2)
+ return rc2;
+ return rc3;
}
static int
qeth_clear_channels(struct qeth_card *card)
{
- int rc = 0;
+ int rc1 = 0, rc2=0, rc3 = 0;
QETH_DBF_TEXT(trace,3,"clearchs");
- if ((rc = qeth_clear_channel(&card->read)))
- return rc;
- if ((rc = qeth_clear_channel(&card->write)))
- return rc;
- return qeth_clear_channel(&card->data);
+ rc1 = qeth_clear_channel(&card->read);
+ rc2 = qeth_clear_channel(&card->write);
+ rc3 = qeth_clear_channel(&card->data);
+ if (rc1)
+ return rc1;
+ if (rc2)
+ return rc2;
+ return rc3;
}
static int
@@ -3445,23 +3438,23 @@ qeth_mpc_initialize(struct qeth_card *card)
}
if ((rc = qeth_cm_enable(card))){
QETH_DBF_TEXT_(setup, 2, "2err%d", rc);
- return rc;
+ goto out_qdio;
}
if ((rc = qeth_cm_setup(card))){
QETH_DBF_TEXT_(setup, 2, "3err%d", rc);
- return rc;
+ goto out_qdio;
}
if ((rc = qeth_ulp_enable(card))){
QETH_DBF_TEXT_(setup, 2, "4err%d", rc);
- return rc;
+ goto out_qdio;
}
if ((rc = qeth_ulp_setup(card))){
QETH_DBF_TEXT_(setup, 2, "5err%d", rc);
- return rc;
+ goto out_qdio;
}
if ((rc = qeth_alloc_qdio_buffers(card))){
QETH_DBF_TEXT_(setup, 2, "5err%d", rc);
- return rc;
+ goto out_qdio;
}
if ((rc = qeth_qdio_establish(card))){
QETH_DBF_TEXT_(setup, 2, "6err%d", rc);
@@ -3795,12 +3788,16 @@ static inline int
qeth_prepare_skb(struct qeth_card *card, struct sk_buff **skb,
struct qeth_hdr **hdr, int ipv)
{
+ int rc;
#ifdef CONFIG_QETH_VLAN
u16 *tag;
#endif
QETH_DBF_TEXT(trace, 6, "prepskb");
+ rc = qeth_realloc_headroom(card, skb, sizeof(struct qeth_hdr));
+ if (rc)
+ return rc;
#ifdef CONFIG_QETH_VLAN
if (card->vlangrp && vlan_tx_tag_present(*skb) &&
((ipv == 6) || card->options.layer2) ) {
@@ -4251,7 +4248,8 @@ out:
}
static inline int
-qeth_get_elements_no(struct qeth_card *card, void *hdr, struct sk_buff *skb)
+qeth_get_elements_no(struct qeth_card *card, void *hdr,
+ struct sk_buff *skb, int elems)
{
int elements_needed = 0;
@@ -4261,9 +4259,10 @@ qeth_get_elements_no(struct qeth_card *card, void *hdr, struct sk_buff *skb)
if (elements_needed == 0 )
elements_needed = 1 + (((((unsigned long) hdr) % PAGE_SIZE)
+ skb->len) >> PAGE_SHIFT);
- if (elements_needed > QETH_MAX_BUFFER_ELEMENTS(card)){
+ if ((elements_needed + elems) > QETH_MAX_BUFFER_ELEMENTS(card)){
PRINT_ERR("qeth_do_send_packet: invalid size of "
- "IP packet. Discarded.");
+ "IP packet (Number=%d / Length=%d). Discarded.\n",
+ (elements_needed+elems), skb->len);
return 0;
}
return elements_needed;
@@ -4275,7 +4274,7 @@ qeth_send_packet(struct qeth_card *card, struct sk_buff *skb)
int ipv = 0;
int cast_type;
struct qeth_qdio_out_q *queue;
- struct qeth_hdr *hdr;
+ struct qeth_hdr *hdr = NULL;
int elements_needed = 0;
enum qeth_large_send_types large_send = QETH_LARGE_SEND_NO;
struct qeth_eddp_context *ctx = NULL;
@@ -4337,9 +4336,11 @@ qeth_send_packet(struct qeth_card *card, struct sk_buff *skb)
return -EINVAL;
}
} else {
- elements_needed += qeth_get_elements_no(card,(void*) hdr, skb);
- if (!elements_needed)
+ int elems = qeth_get_elements_no(card,(void*) hdr, skb,
+ elements_needed);
+ if (!elems)
return -EINVAL;
+ elements_needed += elems;
}
if (card->info.type != QETH_CARD_TYPE_IQD)
@@ -4504,7 +4505,11 @@ qeth_arp_set_no_entries(struct qeth_card *card, int no_entries)
QETH_DBF_TEXT(trace,3,"arpstnoe");
- /* TODO: really not supported by GuestLAN? */
+ /*
+ * currently GuestLAN only supports the ARP assist function
+ * IPA_CMD_ASS_ARP_QUERY_INFO, but not IPA_CMD_ASS_ARP_SET_NO_ENTRIES;
+ * thus we say EOPNOTSUPP for this ARP function
+ */
if (card->info.guestlan)
return -EOPNOTSUPP;
if (!qeth_is_supported(card,IPA_ARP_PROCESSING)) {
@@ -4681,14 +4686,6 @@ qeth_arp_query(struct qeth_card *card, char *udata)
QETH_DBF_TEXT(trace,3,"arpquery");
- /*
- * currently GuestLAN does only deliver all zeros on query arp,
- * even though arp processing is supported (according to IPA supp.
- * funcs flags); since all zeros is no valueable information,
- * we say EOPNOTSUPP for all ARP functions
- */
- /*if (card->info.guestlan)
- return -EOPNOTSUPP; */
if (!qeth_is_supported(card,/*IPA_QUERY_ARP_ADDR_INFO*/
IPA_ARP_PROCESSING)) {
PRINT_WARN("ARP processing not supported "
@@ -4894,10 +4891,9 @@ qeth_arp_add_entry(struct qeth_card *card, struct qeth_arp_cache_entry *entry)
QETH_DBF_TEXT(trace,3,"arpadent");
/*
- * currently GuestLAN does only deliver all zeros on query arp,
- * even though arp processing is supported (according to IPA supp.
- * funcs flags); since all zeros is no valueable information,
- * we say EOPNOTSUPP for all ARP functions
+ * currently GuestLAN only supports the ARP assist function
+ * IPA_CMD_ASS_ARP_QUERY_INFO, but not IPA_CMD_ASS_ARP_ADD_ENTRY;
+ * thus we say EOPNOTSUPP for this ARP function
*/
if (card->info.guestlan)
return -EOPNOTSUPP;
@@ -4937,10 +4933,9 @@ qeth_arp_remove_entry(struct qeth_card *card, struct qeth_arp_cache_entry *entry
QETH_DBF_TEXT(trace,3,"arprment");
/*
- * currently GuestLAN does only deliver all zeros on query arp,
- * even though arp processing is supported (according to IPA supp.
- * funcs flags); since all zeros is no valueable information,
- * we say EOPNOTSUPP for all ARP functions
+ * currently GuestLAN only supports the ARP assist function
+ * IPA_CMD_ASS_ARP_QUERY_INFO, but not IPA_CMD_ASS_ARP_REMOVE_ENTRY;
+ * thus we say EOPNOTSUPP for this ARP function
*/
if (card->info.guestlan)
return -EOPNOTSUPP;
@@ -4978,11 +4973,10 @@ qeth_arp_flush_cache(struct qeth_card *card)
QETH_DBF_TEXT(trace,3,"arpflush");
/*
- * currently GuestLAN does only deliver all zeros on query arp,
- * even though arp processing is supported (according to IPA supp.
- * funcs flags); since all zeros is no valueable information,
- * we say EOPNOTSUPP for all ARP functions
- */
+ * currently GuestLAN only supports the ARP assist function
+ * IPA_CMD_ASS_ARP_QUERY_INFO, but not IPA_CMD_ASS_ARP_FLUSH_CACHE;
+ * thus we say EOPNOTSUPP for this ARP function
+ */
if (card->info.guestlan || (card->info.type == QETH_CARD_TYPE_IQD))
return -EOPNOTSUPP;
if (!qeth_is_supported(card,IPA_ARP_PROCESSING)) {
@@ -5206,7 +5200,7 @@ qeth_free_vlan_addresses4(struct qeth_card *card, unsigned short vid)
if (!card->vlangrp)
return;
rcu_read_lock();
- in_dev = __in_dev_get(card->vlangrp->vlan_devices[vid]);
+ in_dev = __in_dev_get_rcu(card->vlangrp->vlan_devices[vid]);
if (!in_dev)
goto out;
for (ifa = in_dev->ifa_list; ifa; ifa = ifa->ifa_next) {
@@ -7038,14 +7032,16 @@ qeth_setrouting_v6(struct qeth_card *card)
}
int
-qeth_set_large_send(struct qeth_card *card)
+qeth_set_large_send(struct qeth_card *card, enum qeth_large_send_types type)
{
int rc = 0;
- if (card->dev == NULL)
+ if (card->dev == NULL) {
+ card->options.large_send = type;
return 0;
-
+ }
netif_stop_queue(card->dev);
+ card->options.large_send = type;
switch (card->options.large_send) {
case QETH_LARGE_SEND_EDDP:
card->dev->features |= NETIF_F_TSO | NETIF_F_SG;
@@ -7066,7 +7062,6 @@ qeth_set_large_send(struct qeth_card *card)
card->dev->features &= ~(NETIF_F_TSO | NETIF_F_SG);
break;
}
-
netif_wake_queue(card->dev);
return rc;
}
@@ -7730,7 +7725,7 @@ qeth_arp_constructor(struct neighbour *neigh)
goto out;
rcu_read_lock();
- in_dev = rcu_dereference(__in_dev_get(dev));
+ in_dev = __in_dev_get_rcu(dev);
if (in_dev == NULL) {
rcu_read_unlock();
return -EINVAL;
@@ -8257,7 +8252,6 @@ qeth_init(void)
{
int rc=0;
- qeth_eyecatcher();
PRINT_INFO("loading %s (%s/%s/%s/%s/%s/%s/%s %s %s)\n",
version, VERSION_QETH_C, VERSION_QETH_H,
VERSION_QETH_MPC_H, VERSION_QETH_MPC_C,
@@ -8338,7 +8332,6 @@ again:
printk("qeth: removed\n");
}
-EXPORT_SYMBOL(qeth_eyecatcher);
module_init(qeth_init);
module_exit(qeth_exit);
MODULE_AUTHOR("Frank Pavlic <pavlic@de.ibm.com>");
diff --git a/drivers/s390/net/qeth_sys.c b/drivers/s390/net/qeth_sys.c
index 98bedb0cb38..dda105b7306 100644
--- a/drivers/s390/net/qeth_sys.c
+++ b/drivers/s390/net/qeth_sys.c
@@ -1,6 +1,6 @@
/*
*
- * linux/drivers/s390/net/qeth_sys.c ($Revision: 1.51 $)
+ * linux/drivers/s390/net/qeth_sys.c ($Revision: 1.54 $)
*
* Linux on zSeries OSA Express and HiperSockets support
* This file contains code related to sysfs.
@@ -20,7 +20,7 @@
#include "qeth_mpc.h"
#include "qeth_fs.h"
-const char *VERSION_QETH_SYS_C = "$Revision: 1.51 $";
+const char *VERSION_QETH_SYS_C = "$Revision: 1.54 $";
/*****************************************************************************/
/* */
@@ -722,10 +722,13 @@ qeth_dev_layer2_store(struct device *dev, struct device_attribute *attr, const c
if (!card)
return -EINVAL;
+ if (card->info.type == QETH_CARD_TYPE_IQD) {
+ PRINT_WARN("Layer2 on Hipersockets is not supported! \n");
+ return -EPERM;
+ }
if (((card->state != CARD_STATE_DOWN) &&
- (card->state != CARD_STATE_RECOVER)) ||
- (card->info.type != QETH_CARD_TYPE_OSAE))
+ (card->state != CARD_STATE_RECOVER)))
return -EPERM;
i = simple_strtoul(buf, &tmp, 16);
@@ -771,9 +774,7 @@ qeth_dev_large_send_store(struct device *dev, struct device_attribute *attr, con
if (!card)
return -EINVAL;
-
tmp = strsep((char **) &buf, "\n");
-
if (!strcmp(tmp, "no")){
type = QETH_LARGE_SEND_NO;
} else if (!strcmp(tmp, "EDDP")) {
@@ -786,10 +787,8 @@ qeth_dev_large_send_store(struct device *dev, struct device_attribute *attr, con
}
if (card->options.large_send == type)
return count;
- card->options.large_send = type;
- if ((rc = qeth_set_large_send(card)))
+ if ((rc = qeth_set_large_send(card, type)))
return rc;
-
return count;
}
diff --git a/drivers/s390/scsi/Makefile b/drivers/s390/scsi/Makefile
index fc145307a7d..d6a78f1a2f1 100644
--- a/drivers/s390/scsi/Makefile
+++ b/drivers/s390/scsi/Makefile
@@ -3,7 +3,7 @@
#
zfcp-objs := zfcp_aux.o zfcp_ccw.o zfcp_scsi.o zfcp_erp.o zfcp_qdio.o \
- zfcp_fsf.o zfcp_sysfs_adapter.o zfcp_sysfs_port.o \
+ zfcp_fsf.o zfcp_dbf.o zfcp_sysfs_adapter.o zfcp_sysfs_port.o \
zfcp_sysfs_unit.o zfcp_sysfs_driver.o
obj-$(CONFIG_ZFCP) += zfcp.o
diff --git a/drivers/s390/scsi/zfcp_aux.c b/drivers/s390/scsi/zfcp_aux.c
index bfe3ba73bc0..0b5087f7cab 100644
--- a/drivers/s390/scsi/zfcp_aux.c
+++ b/drivers/s390/scsi/zfcp_aux.c
@@ -122,95 +122,6 @@ _zfcp_hex_dump(char *addr, int count)
#define ZFCP_LOG_AREA ZFCP_LOG_AREA_OTHER
-static inline int
-zfcp_fsf_req_is_scsi_cmnd(struct zfcp_fsf_req *fsf_req)
-{
- return ((fsf_req->fsf_command == FSF_QTCB_FCP_CMND) &&
- !(fsf_req->status & ZFCP_STATUS_FSFREQ_TASK_MANAGEMENT));
-}
-
-void
-zfcp_cmd_dbf_event_fsf(const char *text, struct zfcp_fsf_req *fsf_req,
- void *add_data, int add_length)
-{
- struct zfcp_adapter *adapter = fsf_req->adapter;
- struct scsi_cmnd *scsi_cmnd;
- int level = 3;
- int i;
- unsigned long flags;
-
- spin_lock_irqsave(&adapter->dbf_lock, flags);
- if (zfcp_fsf_req_is_scsi_cmnd(fsf_req)) {
- scsi_cmnd = fsf_req->data.send_fcp_command_task.scsi_cmnd;
- debug_text_event(adapter->cmd_dbf, level, "fsferror");
- debug_text_event(adapter->cmd_dbf, level, text);
- debug_event(adapter->cmd_dbf, level, &fsf_req,
- sizeof (unsigned long));
- debug_event(adapter->cmd_dbf, level, &fsf_req->seq_no,
- sizeof (u32));
- debug_event(adapter->cmd_dbf, level, &scsi_cmnd,
- sizeof (unsigned long));
- debug_event(adapter->cmd_dbf, level, &scsi_cmnd->cmnd,
- min(ZFCP_CMD_DBF_LENGTH, (int)scsi_cmnd->cmd_len));
- for (i = 0; i < add_length; i += ZFCP_CMD_DBF_LENGTH)
- debug_event(adapter->cmd_dbf,
- level,
- (char *) add_data + i,
- min(ZFCP_CMD_DBF_LENGTH, add_length - i));
- }
- spin_unlock_irqrestore(&adapter->dbf_lock, flags);
-}
-
-/* XXX additionally log unit if available */
-/* ---> introduce new parameter for unit, see 2.4 code */
-void
-zfcp_cmd_dbf_event_scsi(const char *text, struct scsi_cmnd *scsi_cmnd)
-{
- struct zfcp_adapter *adapter;
- union zfcp_req_data *req_data;
- struct zfcp_fsf_req *fsf_req;
- int level = ((host_byte(scsi_cmnd->result) != 0) ? 1 : 5);
- unsigned long flags;
-
- adapter = (struct zfcp_adapter *) scsi_cmnd->device->host->hostdata[0];
- req_data = (union zfcp_req_data *) scsi_cmnd->host_scribble;
- fsf_req = (req_data ? req_data->send_fcp_command_task.fsf_req : NULL);
- spin_lock_irqsave(&adapter->dbf_lock, flags);
- debug_text_event(adapter->cmd_dbf, level, "hostbyte");
- debug_text_event(adapter->cmd_dbf, level, text);
- debug_event(adapter->cmd_dbf, level, &scsi_cmnd->result, sizeof (u32));
- debug_event(adapter->cmd_dbf, level, &scsi_cmnd,
- sizeof (unsigned long));
- debug_event(adapter->cmd_dbf, level, &scsi_cmnd->cmnd,
- min(ZFCP_CMD_DBF_LENGTH, (int)scsi_cmnd->cmd_len));
- if (likely(fsf_req)) {
- debug_event(adapter->cmd_dbf, level, &fsf_req,
- sizeof (unsigned long));
- debug_event(adapter->cmd_dbf, level, &fsf_req->seq_no,
- sizeof (u32));
- } else {
- debug_text_event(adapter->cmd_dbf, level, "");
- debug_text_event(adapter->cmd_dbf, level, "");
- }
- spin_unlock_irqrestore(&adapter->dbf_lock, flags);
-}
-
-void
-zfcp_in_els_dbf_event(struct zfcp_adapter *adapter, const char *text,
- struct fsf_status_read_buffer *status_buffer, int length)
-{
- int level = 1;
- int i;
-
- debug_text_event(adapter->in_els_dbf, level, text);
- debug_event(adapter->in_els_dbf, level, &status_buffer->d_id, 8);
- for (i = 0; i < length; i += ZFCP_IN_ELS_DBF_LENGTH)
- debug_event(adapter->in_els_dbf,
- level,
- (char *) status_buffer->payload + i,
- min(ZFCP_IN_ELS_DBF_LENGTH, length - i));
-}
-
/**
* zfcp_device_setup - setup function
* @str: pointer to parameter string
@@ -1017,81 +928,6 @@ zfcp_free_low_mem_buffers(struct zfcp_adapter *adapter)
mempool_destroy(adapter->pool.data_gid_pn);
}
-/**
- * zfcp_adapter_debug_register - registers debug feature for an adapter
- * @adapter: pointer to adapter for which debug features should be registered
- * return: -ENOMEM on error, 0 otherwise
- */
-int
-zfcp_adapter_debug_register(struct zfcp_adapter *adapter)
-{
- char dbf_name[20];
-
- /* debug feature area which records SCSI command failures (hostbyte) */
- spin_lock_init(&adapter->dbf_lock);
-
- sprintf(dbf_name, ZFCP_CMD_DBF_NAME "%s",
- zfcp_get_busid_by_adapter(adapter));
- adapter->cmd_dbf = debug_register(dbf_name, ZFCP_CMD_DBF_INDEX,
- ZFCP_CMD_DBF_AREAS,
- ZFCP_CMD_DBF_LENGTH);
- debug_register_view(adapter->cmd_dbf, &debug_hex_ascii_view);
- debug_set_level(adapter->cmd_dbf, ZFCP_CMD_DBF_LEVEL);
-
- /* debug feature area which records SCSI command aborts */
- sprintf(dbf_name, ZFCP_ABORT_DBF_NAME "%s",
- zfcp_get_busid_by_adapter(adapter));
- adapter->abort_dbf = debug_register(dbf_name, ZFCP_ABORT_DBF_INDEX,
- ZFCP_ABORT_DBF_AREAS,
- ZFCP_ABORT_DBF_LENGTH);
- debug_register_view(adapter->abort_dbf, &debug_hex_ascii_view);
- debug_set_level(adapter->abort_dbf, ZFCP_ABORT_DBF_LEVEL);
-
- /* debug feature area which records incoming ELS commands */
- sprintf(dbf_name, ZFCP_IN_ELS_DBF_NAME "%s",
- zfcp_get_busid_by_adapter(adapter));
- adapter->in_els_dbf = debug_register(dbf_name, ZFCP_IN_ELS_DBF_INDEX,
- ZFCP_IN_ELS_DBF_AREAS,
- ZFCP_IN_ELS_DBF_LENGTH);
- debug_register_view(adapter->in_els_dbf, &debug_hex_ascii_view);
- debug_set_level(adapter->in_els_dbf, ZFCP_IN_ELS_DBF_LEVEL);
-
- /* debug feature area which records erp events */
- sprintf(dbf_name, ZFCP_ERP_DBF_NAME "%s",
- zfcp_get_busid_by_adapter(adapter));
- adapter->erp_dbf = debug_register(dbf_name, ZFCP_ERP_DBF_INDEX,
- ZFCP_ERP_DBF_AREAS,
- ZFCP_ERP_DBF_LENGTH);
- debug_register_view(adapter->erp_dbf, &debug_hex_ascii_view);
- debug_set_level(adapter->erp_dbf, ZFCP_ERP_DBF_LEVEL);
-
- if (!(adapter->cmd_dbf && adapter->abort_dbf &&
- adapter->in_els_dbf && adapter->erp_dbf)) {
- zfcp_adapter_debug_unregister(adapter);
- return -ENOMEM;
- }
-
- return 0;
-
-}
-
-/**
- * zfcp_adapter_debug_unregister - unregisters debug feature for an adapter
- * @adapter: pointer to adapter for which debug features should be unregistered
- */
-void
-zfcp_adapter_debug_unregister(struct zfcp_adapter *adapter)
-{
- debug_unregister(adapter->abort_dbf);
- debug_unregister(adapter->cmd_dbf);
- debug_unregister(adapter->erp_dbf);
- debug_unregister(adapter->in_els_dbf);
- adapter->abort_dbf = NULL;
- adapter->cmd_dbf = NULL;
- adapter->erp_dbf = NULL;
- adapter->in_els_dbf = NULL;
-}
-
void
zfcp_dummy_release(struct device *dev)
{
@@ -1462,10 +1298,6 @@ zfcp_fsf_incoming_els_rscn(struct zfcp_adapter *adapter,
/* see FC-FS */
no_entries = (fcp_rscn_head->payload_len / 4);
- zfcp_in_els_dbf_event(adapter, "##rscn", status_buffer,
- fcp_rscn_head->payload_len);
-
- debug_text_event(adapter->erp_dbf, 1, "unsol_els_rscn:");
for (i = 1; i < no_entries; i++) {
/* skip head and start with 1st element */
fcp_rscn_element++;
@@ -1497,8 +1329,6 @@ zfcp_fsf_incoming_els_rscn(struct zfcp_adapter *adapter,
(ZFCP_STATUS_PORT_DID_DID, &port->status)) {
ZFCP_LOG_INFO("incoming RSCN, trying to open "
"port 0x%016Lx\n", port->wwpn);
- debug_text_event(adapter->erp_dbf, 1,
- "unsol_els_rscnu:");
zfcp_erp_port_reopen(port,
ZFCP_STATUS_COMMON_ERP_FAILED);
continue;
@@ -1524,8 +1354,6 @@ zfcp_fsf_incoming_els_rscn(struct zfcp_adapter *adapter,
*/
ZFCP_LOG_INFO("incoming RSCN, trying to open "
"port 0x%016Lx\n", port->wwpn);
- debug_text_event(adapter->erp_dbf, 1,
- "unsol_els_rscnk:");
zfcp_test_link(port);
}
}
@@ -1541,8 +1369,6 @@ zfcp_fsf_incoming_els_plogi(struct zfcp_adapter *adapter,
struct zfcp_port *port;
unsigned long flags;
- zfcp_in_els_dbf_event(adapter, "##plogi", status_buffer, 28);
-
read_lock_irqsave(&zfcp_data.config_lock, flags);
list_for_each_entry(port, &adapter->port_list_head, list) {
if (port->wwpn == (*(wwn_t *) & els_logi->nport_wwn))
@@ -1556,8 +1382,6 @@ zfcp_fsf_incoming_els_plogi(struct zfcp_adapter *adapter,
status_buffer->d_id,
zfcp_get_busid_by_adapter(adapter));
} else {
- debug_text_event(adapter->erp_dbf, 1, "unsol_els_plogi:");
- debug_event(adapter->erp_dbf, 1, &els_logi->nport_wwn, 8);
zfcp_erp_port_forced_reopen(port, 0);
}
}
@@ -1570,8 +1394,6 @@ zfcp_fsf_incoming_els_logo(struct zfcp_adapter *adapter,
struct zfcp_port *port;
unsigned long flags;
- zfcp_in_els_dbf_event(adapter, "##logo", status_buffer, 16);
-
read_lock_irqsave(&zfcp_data.config_lock, flags);
list_for_each_entry(port, &adapter->port_list_head, list) {
if (port->wwpn == els_logo->nport_wwpn)
@@ -1585,8 +1407,6 @@ zfcp_fsf_incoming_els_logo(struct zfcp_adapter *adapter,
status_buffer->d_id,
zfcp_get_busid_by_adapter(adapter));
} else {
- debug_text_event(adapter->erp_dbf, 1, "unsol_els_logo:");
- debug_event(adapter->erp_dbf, 1, &els_logo->nport_wwpn, 8);
zfcp_erp_port_forced_reopen(port, 0);
}
}
@@ -1595,7 +1415,6 @@ static void
zfcp_fsf_incoming_els_unknown(struct zfcp_adapter *adapter,
struct fsf_status_read_buffer *status_buffer)
{
- zfcp_in_els_dbf_event(adapter, "##undef", status_buffer, 24);
ZFCP_LOG_NORMAL("warning: unknown incoming ELS 0x%08x "
"for adapter %s\n", *(u32 *) (status_buffer->payload),
zfcp_get_busid_by_adapter(adapter));
@@ -1609,10 +1428,11 @@ zfcp_fsf_incoming_els(struct zfcp_fsf_req *fsf_req)
u32 els_type;
struct zfcp_adapter *adapter;
- status_buffer = fsf_req->data.status_read.buffer;
+ status_buffer = (struct fsf_status_read_buffer *) fsf_req->data;
els_type = *(u32 *) (status_buffer->payload);
adapter = fsf_req->adapter;
+ zfcp_san_dbf_event_incoming_els(fsf_req);
if (els_type == LS_PLOGI)
zfcp_fsf_incoming_els_plogi(adapter, status_buffer);
else if (els_type == LS_LOGO)
diff --git a/drivers/s390/scsi/zfcp_ccw.c b/drivers/s390/scsi/zfcp_ccw.c
index b30abab77da..0fc46381fc2 100644
--- a/drivers/s390/scsi/zfcp_ccw.c
+++ b/drivers/s390/scsi/zfcp_ccw.c
@@ -202,19 +202,9 @@ static int
zfcp_ccw_set_offline(struct ccw_device *ccw_device)
{
struct zfcp_adapter *adapter;
- struct zfcp_port *port;
- struct fc_rport *rport;
down(&zfcp_data.config_sema);
adapter = dev_get_drvdata(&ccw_device->dev);
- /* might be racy, but we cannot take config_lock due to the fact that
- fc_remote_port_delete might sleep */
- list_for_each_entry(port, &adapter->port_list_head, list)
- if (port->rport) {
- rport = port->rport;
- port->rport = NULL;
- fc_remote_port_delete(rport);
- }
zfcp_erp_adapter_shutdown(adapter, 0);
zfcp_erp_wait(adapter);
zfcp_adapter_scsi_unregister(adapter);
diff --git a/drivers/s390/scsi/zfcp_dbf.c b/drivers/s390/scsi/zfcp_dbf.c
new file mode 100644
index 00000000000..826fb3b0060
--- /dev/null
+++ b/drivers/s390/scsi/zfcp_dbf.c
@@ -0,0 +1,995 @@
+/*
+ *
+ * linux/drivers/s390/scsi/zfcp_dbf.c
+ *
+ * FCP adapter driver for IBM eServer zSeries
+ *
+ * Debugging facilities
+ *
+ * (C) Copyright IBM Corp. 2005
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#define ZFCP_DBF_REVISION "$Revision$"
+
+#include <asm/debug.h>
+#include <linux/ctype.h>
+#include "zfcp_ext.h"
+
+static u32 dbfsize = 4;
+
+module_param(dbfsize, uint, 0400);
+MODULE_PARM_DESC(dbfsize,
+ "number of pages for each debug feature area (default 4)");
+
+#define ZFCP_LOG_AREA ZFCP_LOG_AREA_OTHER
+
+static inline int
+zfcp_dbf_stck(char *out_buf, const char *label, unsigned long long stck)
+{
+ unsigned long long sec;
+ struct timespec xtime;
+ int len = 0;
+
+ stck -= 0x8126d60e46000000LL - (0x3c26700LL * 1000000 * 4096);
+ sec = stck >> 12;
+ do_div(sec, 1000000);
+ xtime.tv_sec = sec;
+ stck -= (sec * 1000000) << 12;
+ xtime.tv_nsec = ((stck * 1000) >> 12);
+ len += sprintf(out_buf + len, "%-24s%011lu:%06lu\n",
+ label, xtime.tv_sec, xtime.tv_nsec);
+
+ return len;
+}
+
+static int zfcp_dbf_tag(char *out_buf, const char *label, const char *tag)
+{
+ int len = 0, i;
+
+ len += sprintf(out_buf + len, "%-24s", label);
+ for (i = 0; i < ZFCP_DBF_TAG_SIZE; i++)
+ len += sprintf(out_buf + len, "%c", tag[i]);
+ len += sprintf(out_buf + len, "\n");
+
+ return len;
+}
+
+static int
+zfcp_dbf_view(char *out_buf, const char *label, const char *format, ...)
+{
+ va_list arg;
+ int len = 0;
+
+ len += sprintf(out_buf + len, "%-24s", label);
+ va_start(arg, format);
+ len += vsprintf(out_buf + len, format, arg);
+ va_end(arg);
+ len += sprintf(out_buf + len, "\n");
+
+ return len;
+}
+
+static int
+zfcp_dbf_view_dump(char *out_buf, const char *label,
+ char *buffer, int buflen, int offset, int total_size)
+{
+ int len = 0;
+
+ if (offset == 0)
+ len += sprintf(out_buf + len, "%-24s ", label);
+
+ while (buflen--) {
+ if (offset > 0) {
+ if ((offset % 32) == 0)
+ len += sprintf(out_buf + len, "\n%-24c ", ' ');
+ else if ((offset % 4) == 0)
+ len += sprintf(out_buf + len, " ");
+ }
+ len += sprintf(out_buf + len, "%02x", *buffer++);
+ if (++offset == total_size) {
+ len += sprintf(out_buf + len, "\n");
+ break;
+ }
+ }
+
+ if (total_size == 0)
+ len += sprintf(out_buf + len, "\n");
+
+ return len;
+}
+
+static inline int
+zfcp_dbf_view_header(debug_info_t * id, struct debug_view *view, int area,
+ debug_entry_t * entry, char *out_buf)
+{
+ struct zfcp_dbf_dump *dump = (struct zfcp_dbf_dump *)DEBUG_DATA(entry);
+ int len = 0;
+
+ if (strncmp(dump->tag, "dump", ZFCP_DBF_TAG_SIZE) != 0) {
+ len += zfcp_dbf_stck(out_buf + len, "timestamp",
+ entry->id.stck);
+ len += zfcp_dbf_view(out_buf + len, "cpu", "%02i",
+ entry->id.fields.cpuid);
+ } else {
+ len += zfcp_dbf_view_dump(out_buf + len, NULL,
+ dump->data,
+ dump->size,
+ dump->offset, dump->total_size);
+ if ((dump->offset + dump->size) == dump->total_size)
+ len += sprintf(out_buf + len, "\n");
+ }
+
+ return len;
+}
+
+inline void zfcp_hba_dbf_event_fsf_response(struct zfcp_fsf_req *fsf_req)
+{
+ struct zfcp_adapter *adapter = fsf_req->adapter;
+ struct fsf_qtcb *qtcb = fsf_req->qtcb;
+ union fsf_prot_status_qual *prot_status_qual =
+ &qtcb->prefix.prot_status_qual;
+ union fsf_status_qual *fsf_status_qual = &qtcb->header.fsf_status_qual;
+ struct scsi_cmnd *scsi_cmnd;
+ struct zfcp_port *port;
+ struct zfcp_unit *unit;
+ struct zfcp_send_els *send_els;
+ struct zfcp_hba_dbf_record *rec = &adapter->hba_dbf_buf;
+ struct zfcp_hba_dbf_record_response *response = &rec->type.response;
+ int level;
+ unsigned long flags;
+
+ spin_lock_irqsave(&adapter->hba_dbf_lock, flags);
+ memset(rec, 0, sizeof(struct zfcp_hba_dbf_record));
+ strncpy(rec->tag, "resp", ZFCP_DBF_TAG_SIZE);
+
+ if ((qtcb->prefix.prot_status != FSF_PROT_GOOD) &&
+ (qtcb->prefix.prot_status != FSF_PROT_FSF_STATUS_PRESENTED)) {
+ strncpy(rec->tag2, "perr", ZFCP_DBF_TAG_SIZE);
+ level = 1;
+ } else if (qtcb->header.fsf_status != FSF_GOOD) {
+ strncpy(rec->tag2, "ferr", ZFCP_DBF_TAG_SIZE);
+ level = 1;
+ } else if ((fsf_req->fsf_command == FSF_QTCB_OPEN_PORT_WITH_DID) ||
+ (fsf_req->fsf_command == FSF_QTCB_OPEN_LUN)) {
+ strncpy(rec->tag2, "open", ZFCP_DBF_TAG_SIZE);
+ level = 4;
+ } else if ((prot_status_qual->doubleword[0] != 0) ||
+ (prot_status_qual->doubleword[1] != 0) ||
+ (fsf_status_qual->doubleword[0] != 0) ||
+ (fsf_status_qual->doubleword[1] != 0)) {
+ strncpy(rec->tag2, "qual", ZFCP_DBF_TAG_SIZE);
+ level = 3;
+ } else {
+ strncpy(rec->tag2, "norm", ZFCP_DBF_TAG_SIZE);
+ level = 6;
+ }
+
+ response->fsf_command = fsf_req->fsf_command;
+ response->fsf_reqid = (unsigned long)fsf_req;
+ response->fsf_seqno = fsf_req->seq_no;
+ response->fsf_issued = fsf_req->issued;
+ response->fsf_prot_status = qtcb->prefix.prot_status;
+ response->fsf_status = qtcb->header.fsf_status;
+ memcpy(response->fsf_prot_status_qual,
+ prot_status_qual, FSF_PROT_STATUS_QUAL_SIZE);
+ memcpy(response->fsf_status_qual,
+ fsf_status_qual, FSF_STATUS_QUALIFIER_SIZE);
+ response->fsf_req_status = fsf_req->status;
+ response->sbal_first = fsf_req->sbal_first;
+ response->sbal_curr = fsf_req->sbal_curr;
+ response->sbal_last = fsf_req->sbal_last;
+ response->pool = fsf_req->pool != NULL;
+ response->erp_action = (unsigned long)fsf_req->erp_action;
+
+ switch (fsf_req->fsf_command) {
+ case FSF_QTCB_FCP_CMND:
+ if (fsf_req->status & ZFCP_STATUS_FSFREQ_TASK_MANAGEMENT)
+ break;
+ scsi_cmnd = (struct scsi_cmnd *)fsf_req->data;
+ if (scsi_cmnd != NULL) {
+ response->data.send_fcp.scsi_cmnd
+ = (unsigned long)scsi_cmnd;
+ response->data.send_fcp.scsi_serial
+ = scsi_cmnd->serial_number;
+ }
+ break;
+
+ case FSF_QTCB_OPEN_PORT_WITH_DID:
+ case FSF_QTCB_CLOSE_PORT:
+ case FSF_QTCB_CLOSE_PHYSICAL_PORT:
+ port = (struct zfcp_port *)fsf_req->data;
+ response->data.port.wwpn = port->wwpn;
+ response->data.port.d_id = port->d_id;
+ response->data.port.port_handle = qtcb->header.port_handle;
+ break;
+
+ case FSF_QTCB_OPEN_LUN:
+ case FSF_QTCB_CLOSE_LUN:
+ unit = (struct zfcp_unit *)fsf_req->data;
+ port = unit->port;
+ response->data.unit.wwpn = port->wwpn;
+ response->data.unit.fcp_lun = unit->fcp_lun;
+ response->data.unit.port_handle = qtcb->header.port_handle;
+ response->data.unit.lun_handle = qtcb->header.lun_handle;
+ break;
+
+ case FSF_QTCB_SEND_ELS:
+ send_els = (struct zfcp_send_els *)fsf_req->data;
+ response->data.send_els.d_id = qtcb->bottom.support.d_id;
+ response->data.send_els.ls_code = send_els->ls_code >> 24;
+ break;
+
+ case FSF_QTCB_ABORT_FCP_CMND:
+ case FSF_QTCB_SEND_GENERIC:
+ case FSF_QTCB_EXCHANGE_CONFIG_DATA:
+ case FSF_QTCB_EXCHANGE_PORT_DATA:
+ case FSF_QTCB_DOWNLOAD_CONTROL_FILE:
+ case FSF_QTCB_UPLOAD_CONTROL_FILE:
+ break;
+ }
+
+ debug_event(adapter->hba_dbf, level,
+ rec, sizeof(struct zfcp_hba_dbf_record));
+ spin_unlock_irqrestore(&adapter->hba_dbf_lock, flags);
+}
+
+inline void
+zfcp_hba_dbf_event_fsf_unsol(const char *tag, struct zfcp_adapter *adapter,
+ struct fsf_status_read_buffer *status_buffer)
+{
+ struct zfcp_hba_dbf_record *rec = &adapter->hba_dbf_buf;
+ unsigned long flags;
+
+ spin_lock_irqsave(&adapter->hba_dbf_lock, flags);
+ memset(rec, 0, sizeof(struct zfcp_hba_dbf_record));
+ strncpy(rec->tag, "stat", ZFCP_DBF_TAG_SIZE);
+ strncpy(rec->tag2, tag, ZFCP_DBF_TAG_SIZE);
+
+ rec->type.status.failed = adapter->status_read_failed;
+ if (status_buffer != NULL) {
+ rec->type.status.status_type = status_buffer->status_type;
+ rec->type.status.status_subtype = status_buffer->status_subtype;
+ memcpy(&rec->type.status.queue_designator,
+ &status_buffer->queue_designator,
+ sizeof(struct fsf_queue_designator));
+
+ switch (status_buffer->status_type) {
+ case FSF_STATUS_READ_SENSE_DATA_AVAIL:
+ rec->type.status.payload_size =
+ ZFCP_DBF_UNSOL_PAYLOAD_SENSE_DATA_AVAIL;
+ break;
+
+ case FSF_STATUS_READ_BIT_ERROR_THRESHOLD:
+ rec->type.status.payload_size =
+ ZFCP_DBF_UNSOL_PAYLOAD_BIT_ERROR_THRESHOLD;
+ break;
+
+ case FSF_STATUS_READ_LINK_DOWN:
+ switch (status_buffer->status_subtype) {
+ case FSF_STATUS_READ_SUB_NO_PHYSICAL_LINK:
+ case FSF_STATUS_READ_SUB_FDISC_FAILED:
+ rec->type.status.payload_size =
+ sizeof(struct fsf_link_down_info);
+ }
+ break;
+
+ case FSF_STATUS_READ_FEATURE_UPDATE_ALERT:
+ rec->type.status.payload_size =
+ ZFCP_DBF_UNSOL_PAYLOAD_FEATURE_UPDATE_ALERT;
+ break;
+ }
+ memcpy(&rec->type.status.payload,
+ &status_buffer->payload, rec->type.status.payload_size);
+ }
+
+ debug_event(adapter->hba_dbf, 2,
+ rec, sizeof(struct zfcp_hba_dbf_record));
+ spin_unlock_irqrestore(&adapter->hba_dbf_lock, flags);
+}
+
+inline void
+zfcp_hba_dbf_event_qdio(struct zfcp_adapter *adapter, unsigned int status,
+ unsigned int qdio_error, unsigned int siga_error,
+ int sbal_index, int sbal_count)
+{
+ struct zfcp_hba_dbf_record *rec = &adapter->hba_dbf_buf;
+ unsigned long flags;
+
+ spin_lock_irqsave(&adapter->hba_dbf_lock, flags);
+ memset(rec, 0, sizeof(struct zfcp_hba_dbf_record));
+ strncpy(rec->tag, "qdio", ZFCP_DBF_TAG_SIZE);
+ rec->type.qdio.status = status;
+ rec->type.qdio.qdio_error = qdio_error;
+ rec->type.qdio.siga_error = siga_error;
+ rec->type.qdio.sbal_index = sbal_index;
+ rec->type.qdio.sbal_count = sbal_count;
+ debug_event(adapter->hba_dbf, 0,
+ rec, sizeof(struct zfcp_hba_dbf_record));
+ spin_unlock_irqrestore(&adapter->hba_dbf_lock, flags);
+}
+
+static inline int
+zfcp_hba_dbf_view_response(char *out_buf,
+ struct zfcp_hba_dbf_record_response *rec)
+{
+ int len = 0;
+
+ len += zfcp_dbf_view(out_buf + len, "fsf_command", "0x%08x",
+ rec->fsf_command);
+ len += zfcp_dbf_view(out_buf + len, "fsf_reqid", "0x%0Lx",
+ rec->fsf_reqid);
+ len += zfcp_dbf_view(out_buf + len, "fsf_seqno", "0x%08x",
+ rec->fsf_seqno);
+ len += zfcp_dbf_stck(out_buf + len, "fsf_issued", rec->fsf_issued);
+ len += zfcp_dbf_view(out_buf + len, "fsf_prot_status", "0x%08x",
+ rec->fsf_prot_status);
+ len += zfcp_dbf_view(out_buf + len, "fsf_status", "0x%08x",
+ rec->fsf_status);
+ len += zfcp_dbf_view_dump(out_buf + len, "fsf_prot_status_qual",
+ rec->fsf_prot_status_qual,
+ FSF_PROT_STATUS_QUAL_SIZE,
+ 0, FSF_PROT_STATUS_QUAL_SIZE);
+ len += zfcp_dbf_view_dump(out_buf + len, "fsf_status_qual",
+ rec->fsf_status_qual,
+ FSF_STATUS_QUALIFIER_SIZE,
+ 0, FSF_STATUS_QUALIFIER_SIZE);
+ len += zfcp_dbf_view(out_buf + len, "fsf_req_status", "0x%08x",
+ rec->fsf_req_status);
+ len += zfcp_dbf_view(out_buf + len, "sbal_first", "0x%02x",
+ rec->sbal_first);
+ len += zfcp_dbf_view(out_buf + len, "sbal_curr", "0x%02x",
+ rec->sbal_curr);
+ len += zfcp_dbf_view(out_buf + len, "sbal_last", "0x%02x",
+ rec->sbal_last);
+ len += zfcp_dbf_view(out_buf + len, "pool", "0x%02x", rec->pool);
+
+ switch (rec->fsf_command) {
+ case FSF_QTCB_FCP_CMND:
+ if (rec->fsf_req_status & ZFCP_STATUS_FSFREQ_TASK_MANAGEMENT)
+ break;
+ len += zfcp_dbf_view(out_buf + len, "scsi_cmnd", "0x%0Lx",
+ rec->data.send_fcp.scsi_cmnd);
+ len += zfcp_dbf_view(out_buf + len, "scsi_serial", "0x%016Lx",
+ rec->data.send_fcp.scsi_serial);
+ break;
+
+ case FSF_QTCB_OPEN_PORT_WITH_DID:
+ case FSF_QTCB_CLOSE_PORT:
+ case FSF_QTCB_CLOSE_PHYSICAL_PORT:
+ len += zfcp_dbf_view(out_buf + len, "wwpn", "0x%016Lx",
+ rec->data.port.wwpn);
+ len += zfcp_dbf_view(out_buf + len, "d_id", "0x%06x",
+ rec->data.port.d_id);
+ len += zfcp_dbf_view(out_buf + len, "port_handle", "0x%08x",
+ rec->data.port.port_handle);
+ break;
+
+ case FSF_QTCB_OPEN_LUN:
+ case FSF_QTCB_CLOSE_LUN:
+ len += zfcp_dbf_view(out_buf + len, "wwpn", "0x%016Lx",
+ rec->data.unit.wwpn);
+ len += zfcp_dbf_view(out_buf + len, "fcp_lun", "0x%016Lx",
+ rec->data.unit.fcp_lun);
+ len += zfcp_dbf_view(out_buf + len, "port_handle", "0x%08x",
+ rec->data.unit.port_handle);
+ len += zfcp_dbf_view(out_buf + len, "lun_handle", "0x%08x",
+ rec->data.unit.lun_handle);
+ break;
+
+ case FSF_QTCB_SEND_ELS:
+ len += zfcp_dbf_view(out_buf + len, "d_id", "0x%06x",
+ rec->data.send_els.d_id);
+ len += zfcp_dbf_view(out_buf + len, "ls_code", "0x%02x",
+ rec->data.send_els.ls_code);
+ break;
+
+ case FSF_QTCB_ABORT_FCP_CMND:
+ case FSF_QTCB_SEND_GENERIC:
+ case FSF_QTCB_EXCHANGE_CONFIG_DATA:
+ case FSF_QTCB_EXCHANGE_PORT_DATA:
+ case FSF_QTCB_DOWNLOAD_CONTROL_FILE:
+ case FSF_QTCB_UPLOAD_CONTROL_FILE:
+ break;
+ }
+
+ return len;
+}
+
+static inline int
+zfcp_hba_dbf_view_status(char *out_buf, struct zfcp_hba_dbf_record_status *rec)
+{
+ int len = 0;
+
+ len += zfcp_dbf_view(out_buf + len, "failed", "0x%02x", rec->failed);
+ len += zfcp_dbf_view(out_buf + len, "status_type", "0x%08x",
+ rec->status_type);
+ len += zfcp_dbf_view(out_buf + len, "status_subtype", "0x%08x",
+ rec->status_subtype);
+ len += zfcp_dbf_view_dump(out_buf + len, "queue_designator",
+ (char *)&rec->queue_designator,
+ sizeof(struct fsf_queue_designator),
+ 0, sizeof(struct fsf_queue_designator));
+ len += zfcp_dbf_view_dump(out_buf + len, "payload",
+ (char *)&rec->payload,
+ rec->payload_size, 0, rec->payload_size);
+
+ return len;
+}
+
+static inline int
+zfcp_hba_dbf_view_qdio(char *out_buf, struct zfcp_hba_dbf_record_qdio *rec)
+{
+ int len = 0;
+
+ len += zfcp_dbf_view(out_buf + len, "status", "0x%08x", rec->status);
+ len += zfcp_dbf_view(out_buf + len, "qdio_error", "0x%08x",
+ rec->qdio_error);
+ len += zfcp_dbf_view(out_buf + len, "siga_error", "0x%08x",
+ rec->siga_error);
+ len += zfcp_dbf_view(out_buf + len, "sbal_index", "0x%02x",
+ rec->sbal_index);
+ len += zfcp_dbf_view(out_buf + len, "sbal_count", "0x%02x",
+ rec->sbal_count);
+
+ return len;
+}
+
+static int
+zfcp_hba_dbf_view_format(debug_info_t * id, struct debug_view *view,
+ char *out_buf, const char *in_buf)
+{
+ struct zfcp_hba_dbf_record *rec = (struct zfcp_hba_dbf_record *)in_buf;
+ int len = 0;
+
+ if (strncmp(rec->tag, "dump", ZFCP_DBF_TAG_SIZE) == 0)
+ return 0;
+
+ len += zfcp_dbf_tag(out_buf + len, "tag", rec->tag);
+ if (isalpha(rec->tag2[0]))
+ len += zfcp_dbf_tag(out_buf + len, "tag2", rec->tag2);
+ if (strncmp(rec->tag, "resp", ZFCP_DBF_TAG_SIZE) == 0)
+ len += zfcp_hba_dbf_view_response(out_buf + len,
+ &rec->type.response);
+ else if (strncmp(rec->tag, "stat", ZFCP_DBF_TAG_SIZE) == 0)
+ len += zfcp_hba_dbf_view_status(out_buf + len,
+ &rec->type.status);
+ else if (strncmp(rec->tag, "qdio", ZFCP_DBF_TAG_SIZE) == 0)
+ len += zfcp_hba_dbf_view_qdio(out_buf + len, &rec->type.qdio);
+
+ len += sprintf(out_buf + len, "\n");
+
+ return len;
+}
+
+struct debug_view zfcp_hba_dbf_view = {
+ "structured",
+ NULL,
+ &zfcp_dbf_view_header,
+ &zfcp_hba_dbf_view_format,
+ NULL,
+ NULL
+};
+
+inline void
+_zfcp_san_dbf_event_common_ct(const char *tag, struct zfcp_fsf_req *fsf_req,
+ u32 s_id, u32 d_id, void *buffer, int buflen)
+{
+ struct zfcp_send_ct *send_ct = (struct zfcp_send_ct *)fsf_req->data;
+ struct zfcp_port *port = send_ct->port;
+ struct zfcp_adapter *adapter = port->adapter;
+ struct ct_hdr *header = (struct ct_hdr *)buffer;
+ struct zfcp_san_dbf_record *rec = &adapter->san_dbf_buf;
+ struct zfcp_san_dbf_record_ct *ct = &rec->type.ct;
+ unsigned long flags;
+
+ spin_lock_irqsave(&adapter->san_dbf_lock, flags);
+ memset(rec, 0, sizeof(struct zfcp_san_dbf_record));
+ strncpy(rec->tag, tag, ZFCP_DBF_TAG_SIZE);
+ rec->fsf_reqid = (unsigned long)fsf_req;
+ rec->fsf_seqno = fsf_req->seq_no;
+ rec->s_id = s_id;
+ rec->d_id = d_id;
+ if (strncmp(tag, "octc", ZFCP_DBF_TAG_SIZE) == 0) {
+ ct->type.request.cmd_req_code = header->cmd_rsp_code;
+ ct->type.request.revision = header->revision;
+ ct->type.request.gs_type = header->gs_type;
+ ct->type.request.gs_subtype = header->gs_subtype;
+ ct->type.request.options = header->options;
+ ct->type.request.max_res_size = header->max_res_size;
+ } else if (strncmp(tag, "rctc", ZFCP_DBF_TAG_SIZE) == 0) {
+ ct->type.response.cmd_rsp_code = header->cmd_rsp_code;
+ ct->type.response.revision = header->revision;
+ ct->type.response.reason_code = header->reason_code;
+ ct->type.response.reason_code_expl = header->reason_code_expl;
+ ct->type.response.vendor_unique = header->vendor_unique;
+ }
+ ct->payload_size =
+ min(buflen - (int)sizeof(struct ct_hdr), ZFCP_DBF_CT_PAYLOAD);
+ memcpy(ct->payload, buffer + sizeof(struct ct_hdr), ct->payload_size);
+ debug_event(adapter->san_dbf, 3,
+ rec, sizeof(struct zfcp_san_dbf_record));
+ spin_unlock_irqrestore(&adapter->san_dbf_lock, flags);
+}
+
+inline void zfcp_san_dbf_event_ct_request(struct zfcp_fsf_req *fsf_req)
+{
+ struct zfcp_send_ct *ct = (struct zfcp_send_ct *)fsf_req->data;
+ struct zfcp_port *port = ct->port;
+ struct zfcp_adapter *adapter = port->adapter;
+
+ _zfcp_san_dbf_event_common_ct("octc", fsf_req,
+ fc_host_port_id(adapter->scsi_host),
+ port->d_id, zfcp_sg_to_address(ct->req),
+ ct->req->length);
+}
+
+inline void zfcp_san_dbf_event_ct_response(struct zfcp_fsf_req *fsf_req)
+{
+ struct zfcp_send_ct *ct = (struct zfcp_send_ct *)fsf_req->data;
+ struct zfcp_port *port = ct->port;
+ struct zfcp_adapter *adapter = port->adapter;
+
+ _zfcp_san_dbf_event_common_ct("rctc", fsf_req, port->d_id,
+ fc_host_port_id(adapter->scsi_host),
+ zfcp_sg_to_address(ct->resp),
+ ct->resp->length);
+}
+
+static inline void
+_zfcp_san_dbf_event_common_els(const char *tag, int level,
+ struct zfcp_fsf_req *fsf_req, u32 s_id,
+ u32 d_id, u8 ls_code, void *buffer, int buflen)
+{
+ struct zfcp_adapter *adapter = fsf_req->adapter;
+ struct zfcp_san_dbf_record *rec = &adapter->san_dbf_buf;
+ struct zfcp_dbf_dump *dump = (struct zfcp_dbf_dump *)rec;
+ unsigned long flags;
+ int offset = 0;
+
+ spin_lock_irqsave(&adapter->san_dbf_lock, flags);
+ do {
+ memset(rec, 0, sizeof(struct zfcp_san_dbf_record));
+ if (offset == 0) {
+ strncpy(rec->tag, tag, ZFCP_DBF_TAG_SIZE);
+ rec->fsf_reqid = (unsigned long)fsf_req;
+ rec->fsf_seqno = fsf_req->seq_no;
+ rec->s_id = s_id;
+ rec->d_id = d_id;
+ rec->type.els.ls_code = ls_code;
+ buflen = min(buflen, ZFCP_DBF_ELS_MAX_PAYLOAD);
+ rec->type.els.payload_size = buflen;
+ memcpy(rec->type.els.payload,
+ buffer, min(buflen, ZFCP_DBF_ELS_PAYLOAD));
+ offset += min(buflen, ZFCP_DBF_ELS_PAYLOAD);
+ } else {
+ strncpy(dump->tag, "dump", ZFCP_DBF_TAG_SIZE);
+ dump->total_size = buflen;
+ dump->offset = offset;
+ dump->size = min(buflen - offset,
+ (int)sizeof(struct zfcp_san_dbf_record)
+ - (int)sizeof(struct zfcp_dbf_dump));
+ memcpy(dump->data, buffer + offset, dump->size);
+ offset += dump->size;
+ }
+ debug_event(adapter->san_dbf, level,
+ rec, sizeof(struct zfcp_san_dbf_record));
+ } while (offset < buflen);
+ spin_unlock_irqrestore(&adapter->san_dbf_lock, flags);
+}
+
+inline void zfcp_san_dbf_event_els_request(struct zfcp_fsf_req *fsf_req)
+{
+ struct zfcp_send_els *els = (struct zfcp_send_els *)fsf_req->data;
+
+ _zfcp_san_dbf_event_common_els("oels", 2, fsf_req,
+ fc_host_port_id(els->adapter->scsi_host),
+ els->d_id,
+ *(u8 *) zfcp_sg_to_address(els->req),
+ zfcp_sg_to_address(els->req),
+ els->req->length);
+}
+
+inline void zfcp_san_dbf_event_els_response(struct zfcp_fsf_req *fsf_req)
+{
+ struct zfcp_send_els *els = (struct zfcp_send_els *)fsf_req->data;
+
+ _zfcp_san_dbf_event_common_els("rels", 2, fsf_req, els->d_id,
+ fc_host_port_id(els->adapter->scsi_host),
+ *(u8 *) zfcp_sg_to_address(els->req),
+ zfcp_sg_to_address(els->resp),
+ els->resp->length);
+}
+
+inline void zfcp_san_dbf_event_incoming_els(struct zfcp_fsf_req *fsf_req)
+{
+ struct zfcp_adapter *adapter = fsf_req->adapter;
+ struct fsf_status_read_buffer *status_buffer =
+ (struct fsf_status_read_buffer *)fsf_req->data;
+ int length = (int)status_buffer->length -
+ (int)((void *)&status_buffer->payload - (void *)status_buffer);
+
+ _zfcp_san_dbf_event_common_els("iels", 1, fsf_req, status_buffer->d_id,
+ fc_host_port_id(adapter->scsi_host),
+ *(u8 *) status_buffer->payload,
+ (void *)status_buffer->payload, length);
+}
+
+static int
+zfcp_san_dbf_view_format(debug_info_t * id, struct debug_view *view,
+ char *out_buf, const char *in_buf)
+{
+ struct zfcp_san_dbf_record *rec = (struct zfcp_san_dbf_record *)in_buf;
+ char *buffer = NULL;
+ int buflen = 0, total = 0;
+ int len = 0;
+
+ if (strncmp(rec->tag, "dump", ZFCP_DBF_TAG_SIZE) == 0)
+ return 0;
+
+ len += zfcp_dbf_tag(out_buf + len, "tag", rec->tag);
+ len += zfcp_dbf_view(out_buf + len, "fsf_reqid", "0x%0Lx",
+ rec->fsf_reqid);
+ len += zfcp_dbf_view(out_buf + len, "fsf_seqno", "0x%08x",
+ rec->fsf_seqno);
+ len += zfcp_dbf_view(out_buf + len, "s_id", "0x%06x", rec->s_id);
+ len += zfcp_dbf_view(out_buf + len, "d_id", "0x%06x", rec->d_id);
+
+ if (strncmp(rec->tag, "octc", ZFCP_DBF_TAG_SIZE) == 0) {
+ len += zfcp_dbf_view(out_buf + len, "cmd_req_code", "0x%04x",
+ rec->type.ct.type.request.cmd_req_code);
+ len += zfcp_dbf_view(out_buf + len, "revision", "0x%02x",
+ rec->type.ct.type.request.revision);
+ len += zfcp_dbf_view(out_buf + len, "gs_type", "0x%02x",
+ rec->type.ct.type.request.gs_type);
+ len += zfcp_dbf_view(out_buf + len, "gs_subtype", "0x%02x",
+ rec->type.ct.type.request.gs_subtype);
+ len += zfcp_dbf_view(out_buf + len, "options", "0x%02x",
+ rec->type.ct.type.request.options);
+ len += zfcp_dbf_view(out_buf + len, "max_res_size", "0x%04x",
+ rec->type.ct.type.request.max_res_size);
+ total = rec->type.ct.payload_size;
+ buffer = rec->type.ct.payload;
+ buflen = min(total, ZFCP_DBF_CT_PAYLOAD);
+ } else if (strncmp(rec->tag, "rctc", ZFCP_DBF_TAG_SIZE) == 0) {
+ len += zfcp_dbf_view(out_buf + len, "cmd_rsp_code", "0x%04x",
+ rec->type.ct.type.response.cmd_rsp_code);
+ len += zfcp_dbf_view(out_buf + len, "revision", "0x%02x",
+ rec->type.ct.type.response.revision);
+ len += zfcp_dbf_view(out_buf + len, "reason_code", "0x%02x",
+ rec->type.ct.type.response.reason_code);
+ len +=
+ zfcp_dbf_view(out_buf + len, "reason_code_expl", "0x%02x",
+ rec->type.ct.type.response.reason_code_expl);
+ len +=
+ zfcp_dbf_view(out_buf + len, "vendor_unique", "0x%02x",
+ rec->type.ct.type.response.vendor_unique);
+ total = rec->type.ct.payload_size;
+ buffer = rec->type.ct.payload;
+ buflen = min(total, ZFCP_DBF_CT_PAYLOAD);
+ } else if (strncmp(rec->tag, "oels", ZFCP_DBF_TAG_SIZE) == 0 ||
+ strncmp(rec->tag, "rels", ZFCP_DBF_TAG_SIZE) == 0 ||
+ strncmp(rec->tag, "iels", ZFCP_DBF_TAG_SIZE) == 0) {
+ len += zfcp_dbf_view(out_buf + len, "ls_code", "0x%02x",
+ rec->type.els.ls_code);
+ total = rec->type.els.payload_size;
+ buffer = rec->type.els.payload;
+ buflen = min(total, ZFCP_DBF_ELS_PAYLOAD);
+ }
+
+ len += zfcp_dbf_view_dump(out_buf + len, "payload",
+ buffer, buflen, 0, total);
+
+ if (buflen == total)
+ len += sprintf(out_buf + len, "\n");
+
+ return len;
+}
+
+struct debug_view zfcp_san_dbf_view = {
+ "structured",
+ NULL,
+ &zfcp_dbf_view_header,
+ &zfcp_san_dbf_view_format,
+ NULL,
+ NULL
+};
+
+static inline void
+_zfcp_scsi_dbf_event_common(const char *tag, const char *tag2, int level,
+ struct zfcp_adapter *adapter,
+ struct scsi_cmnd *scsi_cmnd,
+ struct zfcp_fsf_req *new_fsf_req)
+{
+ struct zfcp_fsf_req *fsf_req =
+ (struct zfcp_fsf_req *)scsi_cmnd->host_scribble;
+ struct zfcp_scsi_dbf_record *rec = &adapter->scsi_dbf_buf;
+ struct zfcp_dbf_dump *dump = (struct zfcp_dbf_dump *)rec;
+ unsigned long flags;
+ struct fcp_rsp_iu *fcp_rsp;
+ char *fcp_rsp_info = NULL, *fcp_sns_info = NULL;
+ int offset = 0, buflen = 0;
+
+ spin_lock_irqsave(&adapter->scsi_dbf_lock, flags);
+ do {
+ memset(rec, 0, sizeof(struct zfcp_scsi_dbf_record));
+ if (offset == 0) {
+ strncpy(rec->tag, tag, ZFCP_DBF_TAG_SIZE);
+ strncpy(rec->tag2, tag2, ZFCP_DBF_TAG_SIZE);
+ if (scsi_cmnd->device) {
+ rec->scsi_id = scsi_cmnd->device->id;
+ rec->scsi_lun = scsi_cmnd->device->lun;
+ }
+ rec->scsi_result = scsi_cmnd->result;
+ rec->scsi_cmnd = (unsigned long)scsi_cmnd;
+ rec->scsi_serial = scsi_cmnd->serial_number;
+ memcpy(rec->scsi_opcode,
+ &scsi_cmnd->cmnd,
+ min((int)scsi_cmnd->cmd_len,
+ ZFCP_DBF_SCSI_OPCODE));
+ rec->scsi_retries = scsi_cmnd->retries;
+ rec->scsi_allowed = scsi_cmnd->allowed;
+ if (fsf_req != NULL) {
+ fcp_rsp = (struct fcp_rsp_iu *)
+ &(fsf_req->qtcb->bottom.io.fcp_rsp);
+ fcp_rsp_info =
+ zfcp_get_fcp_rsp_info_ptr(fcp_rsp);
+ fcp_sns_info =
+ zfcp_get_fcp_sns_info_ptr(fcp_rsp);
+
+ rec->type.fcp.rsp_validity =
+ fcp_rsp->validity.value;
+ rec->type.fcp.rsp_scsi_status =
+ fcp_rsp->scsi_status;
+ rec->type.fcp.rsp_resid = fcp_rsp->fcp_resid;
+ if (fcp_rsp->validity.bits.fcp_rsp_len_valid)
+ rec->type.fcp.rsp_code =
+ *(fcp_rsp_info + 3);
+ if (fcp_rsp->validity.bits.fcp_sns_len_valid) {
+ buflen = min((int)fcp_rsp->fcp_sns_len,
+ ZFCP_DBF_SCSI_MAX_FCP_SNS_INFO);
+ rec->type.fcp.sns_info_len = buflen;
+ memcpy(rec->type.fcp.sns_info,
+ fcp_sns_info,
+ min(buflen,
+ ZFCP_DBF_SCSI_FCP_SNS_INFO));
+ offset += min(buflen,
+ ZFCP_DBF_SCSI_FCP_SNS_INFO);
+ }
+
+ rec->fsf_reqid = (unsigned long)fsf_req;
+ rec->fsf_seqno = fsf_req->seq_no;
+ rec->fsf_issued = fsf_req->issued;
+ }
+ if (new_fsf_req != NULL) {
+ rec->type.new_fsf_req.fsf_reqid =
+ (unsigned long)
+ new_fsf_req;
+ rec->type.new_fsf_req.fsf_seqno =
+ new_fsf_req->seq_no;
+ rec->type.new_fsf_req.fsf_issued =
+ new_fsf_req->issued;
+ }
+ } else {
+ strncpy(dump->tag, "dump", ZFCP_DBF_TAG_SIZE);
+ dump->total_size = buflen;
+ dump->offset = offset;
+ dump->size = min(buflen - offset,
+ (int)sizeof(struct
+ zfcp_scsi_dbf_record) -
+ (int)sizeof(struct zfcp_dbf_dump));
+ memcpy(dump->data, fcp_sns_info + offset, dump->size);
+ offset += dump->size;
+ }
+ debug_event(adapter->scsi_dbf, level,
+ rec, sizeof(struct zfcp_scsi_dbf_record));
+ } while (offset < buflen);
+ spin_unlock_irqrestore(&adapter->scsi_dbf_lock, flags);
+}
+
+inline void
+zfcp_scsi_dbf_event_result(const char *tag, int level,
+ struct zfcp_adapter *adapter,
+ struct scsi_cmnd *scsi_cmnd)
+{
+ _zfcp_scsi_dbf_event_common("rslt",
+ tag, level, adapter, scsi_cmnd, NULL);
+}
+
+inline void
+zfcp_scsi_dbf_event_abort(const char *tag, struct zfcp_adapter *adapter,
+ struct scsi_cmnd *scsi_cmnd,
+ struct zfcp_fsf_req *new_fsf_req)
+{
+ _zfcp_scsi_dbf_event_common("abrt",
+ tag, 1, adapter, scsi_cmnd, new_fsf_req);
+}
+
+inline void
+zfcp_scsi_dbf_event_devreset(const char *tag, u8 flag, struct zfcp_unit *unit,
+ struct scsi_cmnd *scsi_cmnd)
+{
+ struct zfcp_adapter *adapter = unit->port->adapter;
+
+ _zfcp_scsi_dbf_event_common(flag == FCP_TARGET_RESET ? "trst" : "lrst",
+ tag, 1, adapter, scsi_cmnd, NULL);
+}
+
+static int
+zfcp_scsi_dbf_view_format(debug_info_t * id, struct debug_view *view,
+ char *out_buf, const char *in_buf)
+{
+ struct zfcp_scsi_dbf_record *rec =
+ (struct zfcp_scsi_dbf_record *)in_buf;
+ int len = 0;
+
+ if (strncmp(rec->tag, "dump", ZFCP_DBF_TAG_SIZE) == 0)
+ return 0;
+
+ len += zfcp_dbf_tag(out_buf + len, "tag", rec->tag);
+ len += zfcp_dbf_tag(out_buf + len, "tag2", rec->tag2);
+ len += zfcp_dbf_view(out_buf + len, "scsi_id", "0x%08x", rec->scsi_id);
+ len += zfcp_dbf_view(out_buf + len, "scsi_lun", "0x%08x",
+ rec->scsi_lun);
+ len += zfcp_dbf_view(out_buf + len, "scsi_result", "0x%08x",
+ rec->scsi_result);
+ len += zfcp_dbf_view(out_buf + len, "scsi_cmnd", "0x%0Lx",
+ rec->scsi_cmnd);
+ len += zfcp_dbf_view(out_buf + len, "scsi_serial", "0x%016Lx",
+ rec->scsi_serial);
+ len += zfcp_dbf_view_dump(out_buf + len, "scsi_opcode",
+ rec->scsi_opcode,
+ ZFCP_DBF_SCSI_OPCODE,
+ 0, ZFCP_DBF_SCSI_OPCODE);
+ len += zfcp_dbf_view(out_buf + len, "scsi_retries", "0x%02x",
+ rec->scsi_retries);
+ len += zfcp_dbf_view(out_buf + len, "scsi_allowed", "0x%02x",
+ rec->scsi_allowed);
+ len += zfcp_dbf_view(out_buf + len, "fsf_reqid", "0x%0Lx",
+ rec->fsf_reqid);
+ len += zfcp_dbf_view(out_buf + len, "fsf_seqno", "0x%08x",
+ rec->fsf_seqno);
+ len += zfcp_dbf_stck(out_buf + len, "fsf_issued", rec->fsf_issued);
+ if (strncmp(rec->tag, "rslt", ZFCP_DBF_TAG_SIZE) == 0) {
+ len +=
+ zfcp_dbf_view(out_buf + len, "fcp_rsp_validity", "0x%02x",
+ rec->type.fcp.rsp_validity);
+ len +=
+ zfcp_dbf_view(out_buf + len, "fcp_rsp_scsi_status",
+ "0x%02x", rec->type.fcp.rsp_scsi_status);
+ len +=
+ zfcp_dbf_view(out_buf + len, "fcp_rsp_resid", "0x%08x",
+ rec->type.fcp.rsp_resid);
+ len +=
+ zfcp_dbf_view(out_buf + len, "fcp_rsp_code", "0x%08x",
+ rec->type.fcp.rsp_code);
+ len +=
+ zfcp_dbf_view(out_buf + len, "fcp_sns_info_len", "0x%08x",
+ rec->type.fcp.sns_info_len);
+ len +=
+ zfcp_dbf_view_dump(out_buf + len, "fcp_sns_info",
+ rec->type.fcp.sns_info,
+ min((int)rec->type.fcp.sns_info_len,
+ ZFCP_DBF_SCSI_FCP_SNS_INFO), 0,
+ rec->type.fcp.sns_info_len);
+ } else if (strncmp(rec->tag, "abrt", ZFCP_DBF_TAG_SIZE) == 0) {
+ len += zfcp_dbf_view(out_buf + len, "fsf_reqid_abort", "0x%0Lx",
+ rec->type.new_fsf_req.fsf_reqid);
+ len += zfcp_dbf_view(out_buf + len, "fsf_seqno_abort", "0x%08x",
+ rec->type.new_fsf_req.fsf_seqno);
+ len += zfcp_dbf_stck(out_buf + len, "fsf_issued",
+ rec->type.new_fsf_req.fsf_issued);
+ } else if ((strncmp(rec->tag, "trst", ZFCP_DBF_TAG_SIZE) == 0) ||
+ (strncmp(rec->tag, "lrst", ZFCP_DBF_TAG_SIZE) == 0)) {
+ len += zfcp_dbf_view(out_buf + len, "fsf_reqid_reset", "0x%0Lx",
+ rec->type.new_fsf_req.fsf_reqid);
+ len += zfcp_dbf_view(out_buf + len, "fsf_seqno_reset", "0x%08x",
+ rec->type.new_fsf_req.fsf_seqno);
+ len += zfcp_dbf_stck(out_buf + len, "fsf_issued",
+ rec->type.new_fsf_req.fsf_issued);
+ }
+
+ len += sprintf(out_buf + len, "\n");
+
+ return len;
+}
+
+struct debug_view zfcp_scsi_dbf_view = {
+ "structured",
+ NULL,
+ &zfcp_dbf_view_header,
+ &zfcp_scsi_dbf_view_format,
+ NULL,
+ NULL
+};
+
+/**
+ * zfcp_adapter_debug_register - registers debug feature for an adapter
+ * @adapter: pointer to adapter for which debug features should be registered
+ * return: -ENOMEM on error, 0 otherwise
+ */
+int zfcp_adapter_debug_register(struct zfcp_adapter *adapter)
+{
+ char dbf_name[DEBUG_MAX_NAME_LEN];
+
+ /* debug feature area which records recovery activity */
+ spin_lock_init(&adapter->erp_dbf_lock);
+ sprintf(dbf_name, "zfcp_%s_erp", zfcp_get_busid_by_adapter(adapter));
+ adapter->erp_dbf = debug_register(dbf_name, dbfsize, 2,
+ sizeof(struct zfcp_erp_dbf_record));
+ if (!adapter->erp_dbf)
+ goto failed;
+ debug_register_view(adapter->erp_dbf, &debug_hex_ascii_view);
+ debug_set_level(adapter->erp_dbf, 3);
+
+ /* debug feature area which records HBA (FSF and QDIO) conditions */
+ spin_lock_init(&adapter->hba_dbf_lock);
+ sprintf(dbf_name, "zfcp_%s_hba", zfcp_get_busid_by_adapter(adapter));
+ adapter->hba_dbf = debug_register(dbf_name, dbfsize, 1,
+ sizeof(struct zfcp_hba_dbf_record));
+ if (!adapter->hba_dbf)
+ goto failed;
+ debug_register_view(adapter->hba_dbf, &debug_hex_ascii_view);
+ debug_register_view(adapter->hba_dbf, &zfcp_hba_dbf_view);
+ debug_set_level(adapter->hba_dbf, 3);
+
+ /* debug feature area which records SAN command failures and recovery */
+ spin_lock_init(&adapter->san_dbf_lock);
+ sprintf(dbf_name, "zfcp_%s_san", zfcp_get_busid_by_adapter(adapter));
+ adapter->san_dbf = debug_register(dbf_name, dbfsize, 1,
+ sizeof(struct zfcp_san_dbf_record));
+ if (!adapter->san_dbf)
+ goto failed;
+ debug_register_view(adapter->san_dbf, &debug_hex_ascii_view);
+ debug_register_view(adapter->san_dbf, &zfcp_san_dbf_view);
+ debug_set_level(adapter->san_dbf, 6);
+
+ /* debug feature area which records SCSI command failures and recovery */
+ spin_lock_init(&adapter->scsi_dbf_lock);
+ sprintf(dbf_name, "zfcp_%s_scsi", zfcp_get_busid_by_adapter(adapter));
+ adapter->scsi_dbf = debug_register(dbf_name, dbfsize, 1,
+ sizeof(struct zfcp_scsi_dbf_record));
+ if (!adapter->scsi_dbf)
+ goto failed;
+ debug_register_view(adapter->scsi_dbf, &debug_hex_ascii_view);
+ debug_register_view(adapter->scsi_dbf, &zfcp_scsi_dbf_view);
+ debug_set_level(adapter->scsi_dbf, 3);
+
+ return 0;
+
+ failed:
+ zfcp_adapter_debug_unregister(adapter);
+
+ return -ENOMEM;
+}
+
+/**
+ * zfcp_adapter_debug_unregister - unregisters debug feature for an adapter
+ * @adapter: pointer to adapter for which debug features should be unregistered
+ */
+void zfcp_adapter_debug_unregister(struct zfcp_adapter *adapter)
+{
+ debug_unregister(adapter->scsi_dbf);
+ debug_unregister(adapter->san_dbf);
+ debug_unregister(adapter->hba_dbf);
+ debug_unregister(adapter->erp_dbf);
+ adapter->scsi_dbf = NULL;
+ adapter->san_dbf = NULL;
+ adapter->hba_dbf = NULL;
+ adapter->erp_dbf = NULL;
+}
+
+#undef ZFCP_LOG_AREA
diff --git a/drivers/s390/scsi/zfcp_def.h b/drivers/s390/scsi/zfcp_def.h
index 455e902533a..d81b737d68c 100644
--- a/drivers/s390/scsi/zfcp_def.h
+++ b/drivers/s390/scsi/zfcp_def.h
@@ -66,7 +66,7 @@
/********************* GENERAL DEFINES *********************************/
/* zfcp version number, it consists of major, minor, and patch-level number */
-#define ZFCP_VERSION "4.3.0"
+#define ZFCP_VERSION "4.5.0"
/**
* zfcp_sg_to_address - determine kernel address from struct scatterlist
@@ -154,13 +154,17 @@ typedef u32 scsi_lun_t;
#define ZFCP_EXCHANGE_CONFIG_DATA_FIRST_SLEEP 100
#define ZFCP_EXCHANGE_CONFIG_DATA_RETRIES 7
+/* Retry 5 times every 2 second, then every minute */
+#define ZFCP_EXCHANGE_PORT_DATA_SHORT_RETRIES 5
+#define ZFCP_EXCHANGE_PORT_DATA_SHORT_SLEEP 200
+#define ZFCP_EXCHANGE_PORT_DATA_LONG_SLEEP 6000
+
/* timeout value for "default timer" for fsf requests */
#define ZFCP_FSF_REQUEST_TIMEOUT (60*HZ);
/*************** FIBRE CHANNEL PROTOCOL SPECIFIC DEFINES ********************/
typedef unsigned long long wwn_t;
-typedef unsigned int fc_id_t;
typedef unsigned long long fcp_lun_t;
/* data length field may be at variable position in FCP-2 FCP_CMND IU */
typedef unsigned int fcp_dl_t;
@@ -281,6 +285,171 @@ struct fcp_logo {
} __attribute__((packed));
/*
+ * DBF stuff
+ */
+#define ZFCP_DBF_TAG_SIZE 4
+
+struct zfcp_dbf_dump {
+ u8 tag[ZFCP_DBF_TAG_SIZE];
+ u32 total_size; /* size of total dump data */
+ u32 offset; /* how much data has being already dumped */
+ u32 size; /* how much data comes with this record */
+ u8 data[]; /* dump data */
+} __attribute__ ((packed));
+
+/* FIXME: to be inflated when reworking the erp dbf */
+struct zfcp_erp_dbf_record {
+ u8 dummy[16];
+} __attribute__ ((packed));
+
+struct zfcp_hba_dbf_record_response {
+ u32 fsf_command;
+ u64 fsf_reqid;
+ u32 fsf_seqno;
+ u64 fsf_issued;
+ u32 fsf_prot_status;
+ u32 fsf_status;
+ u8 fsf_prot_status_qual[FSF_PROT_STATUS_QUAL_SIZE];
+ u8 fsf_status_qual[FSF_STATUS_QUALIFIER_SIZE];
+ u32 fsf_req_status;
+ u8 sbal_first;
+ u8 sbal_curr;
+ u8 sbal_last;
+ u8 pool;
+ u64 erp_action;
+ union {
+ struct {
+ u64 scsi_cmnd;
+ u64 scsi_serial;
+ } send_fcp;
+ struct {
+ u64 wwpn;
+ u32 d_id;
+ u32 port_handle;
+ } port;
+ struct {
+ u64 wwpn;
+ u64 fcp_lun;
+ u32 port_handle;
+ u32 lun_handle;
+ } unit;
+ struct {
+ u32 d_id;
+ u8 ls_code;
+ } send_els;
+ } data;
+} __attribute__ ((packed));
+
+struct zfcp_hba_dbf_record_status {
+ u8 failed;
+ u32 status_type;
+ u32 status_subtype;
+ struct fsf_queue_designator
+ queue_designator;
+ u32 payload_size;
+#define ZFCP_DBF_UNSOL_PAYLOAD 80
+#define ZFCP_DBF_UNSOL_PAYLOAD_SENSE_DATA_AVAIL 32
+#define ZFCP_DBF_UNSOL_PAYLOAD_BIT_ERROR_THRESHOLD 56
+#define ZFCP_DBF_UNSOL_PAYLOAD_FEATURE_UPDATE_ALERT 2 * sizeof(u32)
+ u8 payload[ZFCP_DBF_UNSOL_PAYLOAD];
+} __attribute__ ((packed));
+
+struct zfcp_hba_dbf_record_qdio {
+ u32 status;
+ u32 qdio_error;
+ u32 siga_error;
+ u8 sbal_index;
+ u8 sbal_count;
+} __attribute__ ((packed));
+
+struct zfcp_hba_dbf_record {
+ u8 tag[ZFCP_DBF_TAG_SIZE];
+ u8 tag2[ZFCP_DBF_TAG_SIZE];
+ union {
+ struct zfcp_hba_dbf_record_response response;
+ struct zfcp_hba_dbf_record_status status;
+ struct zfcp_hba_dbf_record_qdio qdio;
+ } type;
+} __attribute__ ((packed));
+
+struct zfcp_san_dbf_record_ct {
+ union {
+ struct {
+ u16 cmd_req_code;
+ u8 revision;
+ u8 gs_type;
+ u8 gs_subtype;
+ u8 options;
+ u16 max_res_size;
+ } request;
+ struct {
+ u16 cmd_rsp_code;
+ u8 revision;
+ u8 reason_code;
+ u8 reason_code_expl;
+ u8 vendor_unique;
+ } response;
+ } type;
+ u32 payload_size;
+#define ZFCP_DBF_CT_PAYLOAD 24
+ u8 payload[ZFCP_DBF_CT_PAYLOAD];
+} __attribute__ ((packed));
+
+struct zfcp_san_dbf_record_els {
+ u8 ls_code;
+ u32 payload_size;
+#define ZFCP_DBF_ELS_PAYLOAD 32
+#define ZFCP_DBF_ELS_MAX_PAYLOAD 1024
+ u8 payload[ZFCP_DBF_ELS_PAYLOAD];
+} __attribute__ ((packed));
+
+struct zfcp_san_dbf_record {
+ u8 tag[ZFCP_DBF_TAG_SIZE];
+ u64 fsf_reqid;
+ u32 fsf_seqno;
+ u32 s_id;
+ u32 d_id;
+ union {
+ struct zfcp_san_dbf_record_ct ct;
+ struct zfcp_san_dbf_record_els els;
+ } type;
+} __attribute__ ((packed));
+
+struct zfcp_scsi_dbf_record {
+ u8 tag[ZFCP_DBF_TAG_SIZE];
+ u8 tag2[ZFCP_DBF_TAG_SIZE];
+ u32 scsi_id;
+ u32 scsi_lun;
+ u32 scsi_result;
+ u64 scsi_cmnd;
+ u64 scsi_serial;
+#define ZFCP_DBF_SCSI_OPCODE 16
+ u8 scsi_opcode[ZFCP_DBF_SCSI_OPCODE];
+ u8 scsi_retries;
+ u8 scsi_allowed;
+ u64 fsf_reqid;
+ u32 fsf_seqno;
+ u64 fsf_issued;
+ union {
+ struct {
+ u64 fsf_reqid;
+ u32 fsf_seqno;
+ u64 fsf_issued;
+ } new_fsf_req;
+ struct {
+ u8 rsp_validity;
+ u8 rsp_scsi_status;
+ u32 rsp_resid;
+ u8 rsp_code;
+#define ZFCP_DBF_SCSI_FCP_SNS_INFO 16
+#define ZFCP_DBF_SCSI_MAX_FCP_SNS_INFO 256
+ u32 sns_info_len;
+ u8 sns_info[ZFCP_DBF_SCSI_FCP_SNS_INFO];
+ } fcp;
+ } type;
+} __attribute__ ((packed));
+
+/*
* FC-FS stuff
*/
#define R_A_TOV 10 /* seconds */
@@ -339,34 +508,6 @@ struct zfcp_rc_entry {
*/
#define ZFCP_CT_TIMEOUT (3 * R_A_TOV)
-
-/***************** S390 DEBUG FEATURE SPECIFIC DEFINES ***********************/
-
-/* debug feature entries per adapter */
-#define ZFCP_ERP_DBF_INDEX 1
-#define ZFCP_ERP_DBF_AREAS 2
-#define ZFCP_ERP_DBF_LENGTH 16
-#define ZFCP_ERP_DBF_LEVEL 3
-#define ZFCP_ERP_DBF_NAME "zfcperp"
-
-#define ZFCP_CMD_DBF_INDEX 2
-#define ZFCP_CMD_DBF_AREAS 1
-#define ZFCP_CMD_DBF_LENGTH 8
-#define ZFCP_CMD_DBF_LEVEL 3
-#define ZFCP_CMD_DBF_NAME "zfcpcmd"
-
-#define ZFCP_ABORT_DBF_INDEX 2
-#define ZFCP_ABORT_DBF_AREAS 1
-#define ZFCP_ABORT_DBF_LENGTH 8
-#define ZFCP_ABORT_DBF_LEVEL 6
-#define ZFCP_ABORT_DBF_NAME "zfcpabt"
-
-#define ZFCP_IN_ELS_DBF_INDEX 2
-#define ZFCP_IN_ELS_DBF_AREAS 1
-#define ZFCP_IN_ELS_DBF_LENGTH 8
-#define ZFCP_IN_ELS_DBF_LEVEL 6
-#define ZFCP_IN_ELS_DBF_NAME "zfcpels"
-
/******************** LOGGING MACROS AND DEFINES *****************************/
/*
@@ -501,6 +642,7 @@ do { \
#define ZFCP_STATUS_ADAPTER_ERP_THREAD_KILL 0x00000080
#define ZFCP_STATUS_ADAPTER_ERP_PENDING 0x00000100
#define ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED 0x00000200
+#define ZFCP_STATUS_ADAPTER_XPORT_OK 0x00000800
#define ZFCP_STATUS_ADAPTER_SCSI_UP \
(ZFCP_STATUS_COMMON_UNBLOCKED | \
@@ -635,45 +777,6 @@ struct zfcp_adapter_mempool {
mempool_t *data_gid_pn;
};
-struct zfcp_exchange_config_data{
-};
-
-struct zfcp_open_port {
- struct zfcp_port *port;
-};
-
-struct zfcp_close_port {
- struct zfcp_port *port;
-};
-
-struct zfcp_open_unit {
- struct zfcp_unit *unit;
-};
-
-struct zfcp_close_unit {
- struct zfcp_unit *unit;
-};
-
-struct zfcp_close_physical_port {
- struct zfcp_port *port;
-};
-
-struct zfcp_send_fcp_command_task {
- struct zfcp_fsf_req *fsf_req;
- struct zfcp_unit *unit;
- struct scsi_cmnd *scsi_cmnd;
- unsigned long start_jiffies;
-};
-
-struct zfcp_send_fcp_command_task_management {
- struct zfcp_unit *unit;
-};
-
-struct zfcp_abort_fcp_command {
- struct zfcp_fsf_req *fsf_req;
- struct zfcp_unit *unit;
-};
-
/*
* header for CT_IU
*/
@@ -702,7 +805,7 @@ struct ct_iu_gid_pn_req {
/* FS_ACC IU and data unit for GID_PN nameserver request */
struct ct_iu_gid_pn_resp {
struct ct_hdr header;
- fc_id_t d_id;
+ u32 d_id;
} __attribute__ ((packed));
typedef void (*zfcp_send_ct_handler_t)(unsigned long);
@@ -768,7 +871,7 @@ typedef void (*zfcp_send_els_handler_t)(unsigned long);
struct zfcp_send_els {
struct zfcp_adapter *adapter;
struct zfcp_port *port;
- fc_id_t d_id;
+ u32 d_id;
struct scatterlist *req;
struct scatterlist *resp;
unsigned int req_count;
@@ -781,33 +884,6 @@ struct zfcp_send_els {
int status;
};
-struct zfcp_status_read {
- struct fsf_status_read_buffer *buffer;
-};
-
-struct zfcp_fsf_done {
- struct completion *complete;
- int status;
-};
-
-/* request specific data */
-union zfcp_req_data {
- struct zfcp_exchange_config_data exchange_config_data;
- struct zfcp_open_port open_port;
- struct zfcp_close_port close_port;
- struct zfcp_open_unit open_unit;
- struct zfcp_close_unit close_unit;
- struct zfcp_close_physical_port close_physical_port;
- struct zfcp_send_fcp_command_task send_fcp_command_task;
- struct zfcp_send_fcp_command_task_management
- send_fcp_command_task_management;
- struct zfcp_abort_fcp_command abort_fcp_command;
- struct zfcp_send_ct *send_ct;
- struct zfcp_send_els *send_els;
- struct zfcp_status_read status_read;
- struct fsf_qtcb_bottom_port *port_data;
-};
-
struct zfcp_qdio_queue {
struct qdio_buffer *buffer[QDIO_MAX_BUFFERS_PER_Q]; /* SBALs */
u8 free_index; /* index of next free bfr
@@ -838,21 +914,19 @@ struct zfcp_adapter {
atomic_t refcount; /* reference count */
wait_queue_head_t remove_wq; /* can be used to wait for
refcount drop to zero */
- wwn_t wwnn; /* WWNN */
- wwn_t wwpn; /* WWPN */
- fc_id_t s_id; /* N_Port ID */
wwn_t peer_wwnn; /* P2P peer WWNN */
wwn_t peer_wwpn; /* P2P peer WWPN */
- fc_id_t peer_d_id; /* P2P peer D_ID */
+ u32 peer_d_id; /* P2P peer D_ID */
+ wwn_t physical_wwpn; /* WWPN of physical port */
+ u32 physical_s_id; /* local FC port ID */
struct ccw_device *ccw_device; /* S/390 ccw device */
u8 fc_service_class;
u32 fc_topology; /* FC topology */
- u32 fc_link_speed; /* FC interface speed */
u32 hydra_version; /* Hydra version */
u32 fsf_lic_version;
- u32 supported_features;/* of FCP channel */
+ u32 adapter_features; /* FCP channel features */
+ u32 connection_features; /* host connection features */
u32 hardware_version; /* of FCP channel */
- u8 serial_number[32]; /* of hardware */
struct Scsi_Host *scsi_host; /* Pointer to mid-layer */
unsigned short scsi_host_no; /* Assigned host number */
unsigned char name[9];
@@ -889,11 +963,18 @@ struct zfcp_adapter {
u32 erp_low_mem_count; /* nr of erp actions waiting
for memory */
struct zfcp_port *nameserver_port; /* adapter's nameserver */
- debug_info_t *erp_dbf; /* S/390 debug features */
- debug_info_t *abort_dbf;
- debug_info_t *in_els_dbf;
- debug_info_t *cmd_dbf;
- spinlock_t dbf_lock;
+ debug_info_t *erp_dbf;
+ debug_info_t *hba_dbf;
+ debug_info_t *san_dbf; /* debug feature areas */
+ debug_info_t *scsi_dbf;
+ spinlock_t erp_dbf_lock;
+ spinlock_t hba_dbf_lock;
+ spinlock_t san_dbf_lock;
+ spinlock_t scsi_dbf_lock;
+ struct zfcp_erp_dbf_record erp_dbf_buf;
+ struct zfcp_hba_dbf_record hba_dbf_buf;
+ struct zfcp_san_dbf_record san_dbf_buf;
+ struct zfcp_scsi_dbf_record scsi_dbf_buf;
struct zfcp_adapter_mempool pool; /* Adapter memory pools */
struct qdio_initialize qdio_init_data; /* for qdio_establish */
struct device generic_services; /* directory for WKA ports */
@@ -919,7 +1000,7 @@ struct zfcp_port {
atomic_t status; /* status of this remote port */
wwn_t wwnn; /* WWNN if known */
wwn_t wwpn; /* WWPN */
- fc_id_t d_id; /* D_ID */
+ u32 d_id; /* D_ID */
u32 handle; /* handle assigned by FSF */
struct zfcp_erp_action erp_action; /* pending error recovery */
atomic_t erp_counter;
@@ -963,11 +1044,13 @@ struct zfcp_fsf_req {
u32 fsf_command; /* FSF Command copy */
struct fsf_qtcb *qtcb; /* address of associated QTCB */
u32 seq_no; /* Sequence number of request */
- union zfcp_req_data data; /* Info fields of request */
+ unsigned long data; /* private data of request */
struct zfcp_erp_action *erp_action; /* used if this request is
issued on behalf of erp */
mempool_t *pool; /* used if request was alloacted
from emergency pool */
+ unsigned long long issued; /* request sent time (STCK) */
+ struct zfcp_unit *unit;
};
typedef void zfcp_fsf_req_handler_t(struct zfcp_fsf_req*);
diff --git a/drivers/s390/scsi/zfcp_erp.c b/drivers/s390/scsi/zfcp_erp.c
index cb4f612550b..023f4e558ae 100644
--- a/drivers/s390/scsi/zfcp_erp.c
+++ b/drivers/s390/scsi/zfcp_erp.c
@@ -82,6 +82,7 @@ static int zfcp_erp_adapter_strategy_open(struct zfcp_erp_action *);
static int zfcp_erp_adapter_strategy_open_qdio(struct zfcp_erp_action *);
static int zfcp_erp_adapter_strategy_open_fsf(struct zfcp_erp_action *);
static int zfcp_erp_adapter_strategy_open_fsf_xconfig(struct zfcp_erp_action *);
+static int zfcp_erp_adapter_strategy_open_fsf_xport(struct zfcp_erp_action *);
static int zfcp_erp_adapter_strategy_open_fsf_statusread(
struct zfcp_erp_action *);
@@ -345,13 +346,13 @@ zfcp_erp_adisc(struct zfcp_port *port)
/* acc. to FC-FS, hard_nport_id in ADISC should not be set for ports
without FC-AL-2 capability, so we don't set it */
- adisc->wwpn = adapter->wwpn;
- adisc->wwnn = adapter->wwnn;
- adisc->nport_id = adapter->s_id;
+ adisc->wwpn = fc_host_port_name(adapter->scsi_host);
+ adisc->wwnn = fc_host_node_name(adapter->scsi_host);
+ adisc->nport_id = fc_host_port_id(adapter->scsi_host);
ZFCP_LOG_INFO("ADISC request from s_id 0x%08x to d_id 0x%08x "
"(wwpn=0x%016Lx, wwnn=0x%016Lx, "
"hard_nport_id=0x%08x, nport_id=0x%08x)\n",
- adapter->s_id, send_els->d_id, (wwn_t) adisc->wwpn,
+ adisc->nport_id, send_els->d_id, (wwn_t) adisc->wwpn,
(wwn_t) adisc->wwnn, adisc->hard_nport_id,
adisc->nport_id);
@@ -404,7 +405,7 @@ zfcp_erp_adisc_handler(unsigned long data)
struct zfcp_send_els *send_els;
struct zfcp_port *port;
struct zfcp_adapter *adapter;
- fc_id_t d_id;
+ u32 d_id;
struct zfcp_ls_adisc_acc *adisc;
send_els = (struct zfcp_send_els *) data;
@@ -435,9 +436,9 @@ zfcp_erp_adisc_handler(unsigned long data)
ZFCP_LOG_INFO("ADISC response from d_id 0x%08x to s_id "
"0x%08x (wwpn=0x%016Lx, wwnn=0x%016Lx, "
"hard_nport_id=0x%08x, nport_id=0x%08x)\n",
- d_id, adapter->s_id, (wwn_t) adisc->wwpn,
- (wwn_t) adisc->wwnn, adisc->hard_nport_id,
- adisc->nport_id);
+ d_id, fc_host_port_id(adapter->scsi_host),
+ (wwn_t) adisc->wwpn, (wwn_t) adisc->wwnn,
+ adisc->hard_nport_id, adisc->nport_id);
/* set wwnn for port */
if (port->wwnn == 0)
@@ -886,7 +887,7 @@ static int
zfcp_erp_strategy_check_fsfreq(struct zfcp_erp_action *erp_action)
{
int retval = 0;
- struct zfcp_fsf_req *fsf_req;
+ struct zfcp_fsf_req *fsf_req = NULL;
struct zfcp_adapter *adapter = erp_action->adapter;
if (erp_action->fsf_req) {
@@ -896,7 +897,7 @@ zfcp_erp_strategy_check_fsfreq(struct zfcp_erp_action *erp_action)
list_for_each_entry(fsf_req, &adapter->fsf_req_list_head, list)
if (fsf_req == erp_action->fsf_req)
break;
- if (fsf_req == erp_action->fsf_req) {
+ if (fsf_req && (fsf_req->erp_action == erp_action)) {
/* fsf_req still exists */
debug_text_event(adapter->erp_dbf, 3, "a_ca_req");
debug_event(adapter->erp_dbf, 3, &fsf_req,
@@ -2258,16 +2259,21 @@ zfcp_erp_adapter_strategy_close_qdio(struct zfcp_erp_action *erp_action)
static int
zfcp_erp_adapter_strategy_open_fsf(struct zfcp_erp_action *erp_action)
{
- int retval;
+ int xconfig, xport;
+
+ if (atomic_test_mask(ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED,
+ &erp_action->adapter->status)) {
+ zfcp_erp_adapter_strategy_open_fsf_xport(erp_action);
+ atomic_set(&erp_action->adapter->erp_counter, 0);
+ return ZFCP_ERP_FAILED;
+ }
- /* do 'exchange configuration data' */
- retval = zfcp_erp_adapter_strategy_open_fsf_xconfig(erp_action);
- if (retval == ZFCP_ERP_FAILED)
- return retval;
+ xconfig = zfcp_erp_adapter_strategy_open_fsf_xconfig(erp_action);
+ xport = zfcp_erp_adapter_strategy_open_fsf_xport(erp_action);
+ if ((xconfig == ZFCP_ERP_FAILED) || (xport == ZFCP_ERP_FAILED))
+ return ZFCP_ERP_FAILED;
- /* start the desired number of Status Reads */
- retval = zfcp_erp_adapter_strategy_open_fsf_statusread(erp_action);
- return retval;
+ return zfcp_erp_adapter_strategy_open_fsf_statusread(erp_action);
}
/*
@@ -2291,7 +2297,9 @@ zfcp_erp_adapter_strategy_open_fsf_xconfig(struct zfcp_erp_action *erp_action)
atomic_clear_mask(ZFCP_STATUS_ADAPTER_HOST_CON_INIT,
&adapter->status);
ZFCP_LOG_DEBUG("Doing exchange config data\n");
+ write_lock(&adapter->erp_lock);
zfcp_erp_action_to_running(erp_action);
+ write_unlock(&adapter->erp_lock);
zfcp_erp_timeout_init(erp_action);
if (zfcp_fsf_exchange_config_data(erp_action)) {
retval = ZFCP_ERP_FAILED;
@@ -2348,6 +2356,76 @@ zfcp_erp_adapter_strategy_open_fsf_xconfig(struct zfcp_erp_action *erp_action)
return retval;
}
+static int
+zfcp_erp_adapter_strategy_open_fsf_xport(struct zfcp_erp_action *erp_action)
+{
+ int retval = ZFCP_ERP_SUCCEEDED;
+ int retries;
+ int sleep;
+ struct zfcp_adapter *adapter = erp_action->adapter;
+
+ atomic_clear_mask(ZFCP_STATUS_ADAPTER_XPORT_OK, &adapter->status);
+
+ for (retries = 0; ; retries++) {
+ ZFCP_LOG_DEBUG("Doing exchange port data\n");
+ zfcp_erp_action_to_running(erp_action);
+ zfcp_erp_timeout_init(erp_action);
+ if (zfcp_fsf_exchange_port_data(erp_action, adapter, NULL)) {
+ retval = ZFCP_ERP_FAILED;
+ debug_text_event(adapter->erp_dbf, 5, "a_fstx_xf");
+ ZFCP_LOG_INFO("error: initiation of exchange of "
+ "port data failed for adapter %s\n",
+ zfcp_get_busid_by_adapter(adapter));
+ break;
+ }
+ debug_text_event(adapter->erp_dbf, 6, "a_fstx_xok");
+ ZFCP_LOG_DEBUG("Xchange underway\n");
+
+ /*
+ * Why this works:
+ * Both the normal completion handler as well as the timeout
+ * handler will do an 'up' when the 'exchange port data'
+ * request completes or times out. Thus, the signal to go on
+ * won't be lost utilizing this semaphore.
+ * Furthermore, this 'adapter_reopen' action is
+ * guaranteed to be the only action being there (highest action
+ * which prevents other actions from being created).
+ * Resulting from that, the wake signal recognized here
+ * _must_ be the one belonging to the 'exchange port
+ * data' request.
+ */
+ down(&adapter->erp_ready_sem);
+ if (erp_action->status & ZFCP_STATUS_ERP_TIMEDOUT) {
+ ZFCP_LOG_INFO("error: exchange of port data "
+ "for adapter %s timed out\n",
+ zfcp_get_busid_by_adapter(adapter));
+ break;
+ }
+
+ if (!atomic_test_mask(ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED,
+ &adapter->status))
+ break;
+
+ ZFCP_LOG_DEBUG("host connection still initialising... "
+ "waiting and retrying...\n");
+ /* sleep a little bit before retry */
+ sleep = retries < ZFCP_EXCHANGE_PORT_DATA_SHORT_RETRIES ?
+ ZFCP_EXCHANGE_PORT_DATA_SHORT_SLEEP :
+ ZFCP_EXCHANGE_PORT_DATA_LONG_SLEEP;
+ msleep(jiffies_to_msecs(sleep));
+ }
+
+ if (atomic_test_mask(ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED,
+ &adapter->status)) {
+ ZFCP_LOG_INFO("error: exchange of port data for "
+ "adapter %s failed\n",
+ zfcp_get_busid_by_adapter(adapter));
+ retval = ZFCP_ERP_FAILED;
+ }
+
+ return retval;
+}
+
/*
* function:
*
@@ -3194,11 +3272,19 @@ zfcp_erp_action_enqueue(int action,
/* fall through !!! */
case ZFCP_ERP_ACTION_REOPEN_PORT_FORCED:
- if (atomic_test_mask
- (ZFCP_STATUS_COMMON_ERP_INUSE, &port->status)
- && port->erp_action.action ==
- ZFCP_ERP_ACTION_REOPEN_PORT_FORCED) {
- debug_text_event(adapter->erp_dbf, 4, "pf_actenq_drp");
+ if (atomic_test_mask(ZFCP_STATUS_COMMON_ERP_INUSE,
+ &port->status)) {
+ if (port->erp_action.action !=
+ ZFCP_ERP_ACTION_REOPEN_PORT_FORCED) {
+ ZFCP_LOG_INFO("dropped erp action %i (port "
+ "0x%016Lx, action in use: %i)\n",
+ action, port->wwpn,
+ port->erp_action.action);
+ debug_text_event(adapter->erp_dbf, 4,
+ "pf_actenq_drp");
+ } else
+ debug_text_event(adapter->erp_dbf, 4,
+ "pf_actenq_drpcp");
debug_event(adapter->erp_dbf, 4, &port->wwpn,
sizeof (wwn_t));
goto out;
@@ -3589,6 +3675,9 @@ zfcp_erp_adapter_access_changed(struct zfcp_adapter *adapter)
struct zfcp_port *port;
unsigned long flags;
+ if (adapter->connection_features & FSF_FEATURE_NPIV_MODE)
+ return;
+
debug_text_event(adapter->erp_dbf, 3, "a_access_recover");
debug_event(adapter->erp_dbf, 3, &adapter->name, 8);
diff --git a/drivers/s390/scsi/zfcp_ext.h b/drivers/s390/scsi/zfcp_ext.h
index cd98a2de9f8..c3782261cb5 100644
--- a/drivers/s390/scsi/zfcp_ext.h
+++ b/drivers/s390/scsi/zfcp_ext.h
@@ -96,7 +96,8 @@ extern int zfcp_fsf_open_unit(struct zfcp_erp_action *);
extern int zfcp_fsf_close_unit(struct zfcp_erp_action *);
extern int zfcp_fsf_exchange_config_data(struct zfcp_erp_action *);
-extern int zfcp_fsf_exchange_port_data(struct zfcp_adapter *,
+extern int zfcp_fsf_exchange_port_data(struct zfcp_erp_action *,
+ struct zfcp_adapter *,
struct fsf_qtcb_bottom_port *);
extern int zfcp_fsf_control_file(struct zfcp_adapter *, struct zfcp_fsf_req **,
u32, u32, struct zfcp_sg_list *);
@@ -109,7 +110,6 @@ extern int zfcp_fsf_req_create(struct zfcp_adapter *, u32, int, mempool_t *,
extern int zfcp_fsf_send_ct(struct zfcp_send_ct *, mempool_t *,
struct zfcp_erp_action *);
extern int zfcp_fsf_send_els(struct zfcp_send_els *);
-extern int zfcp_fsf_req_wait_and_cleanup(struct zfcp_fsf_req *, int, u32 *);
extern int zfcp_fsf_send_fcp_command_task(struct zfcp_adapter *,
struct zfcp_unit *,
struct scsi_cmnd *,
@@ -182,9 +182,25 @@ extern void zfcp_erp_port_access_changed(struct zfcp_port *);
extern void zfcp_erp_unit_access_changed(struct zfcp_unit *);
/******************************** AUX ****************************************/
-extern void zfcp_cmd_dbf_event_fsf(const char *, struct zfcp_fsf_req *,
- void *, int);
-extern void zfcp_cmd_dbf_event_scsi(const char *, struct scsi_cmnd *);
-extern void zfcp_in_els_dbf_event(struct zfcp_adapter *, const char *,
- struct fsf_status_read_buffer *, int);
+extern void zfcp_hba_dbf_event_fsf_response(struct zfcp_fsf_req *);
+extern void zfcp_hba_dbf_event_fsf_unsol(const char *, struct zfcp_adapter *,
+ struct fsf_status_read_buffer *);
+extern void zfcp_hba_dbf_event_qdio(struct zfcp_adapter *,
+ unsigned int, unsigned int, unsigned int,
+ int, int);
+
+extern void zfcp_san_dbf_event_ct_request(struct zfcp_fsf_req *);
+extern void zfcp_san_dbf_event_ct_response(struct zfcp_fsf_req *);
+extern void zfcp_san_dbf_event_els_request(struct zfcp_fsf_req *);
+extern void zfcp_san_dbf_event_els_response(struct zfcp_fsf_req *);
+extern void zfcp_san_dbf_event_incoming_els(struct zfcp_fsf_req *);
+
+extern void zfcp_scsi_dbf_event_result(const char *, int, struct zfcp_adapter *,
+ struct scsi_cmnd *);
+extern void zfcp_scsi_dbf_event_abort(const char *, struct zfcp_adapter *,
+ struct scsi_cmnd *,
+ struct zfcp_fsf_req *);
+extern void zfcp_scsi_dbf_event_devreset(const char *, u8, struct zfcp_unit *,
+ struct scsi_cmnd *);
+
#endif /* ZFCP_EXT_H */
diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c
index c007b6424e7..3b0fc1163f5 100644
--- a/drivers/s390/scsi/zfcp_fsf.c
+++ b/drivers/s390/scsi/zfcp_fsf.c
@@ -59,6 +59,8 @@ static int zfcp_fsf_req_send(struct zfcp_fsf_req *, struct timer_list *);
static int zfcp_fsf_protstatus_eval(struct zfcp_fsf_req *);
static int zfcp_fsf_fsfstatus_eval(struct zfcp_fsf_req *);
static int zfcp_fsf_fsfstatus_qual_eval(struct zfcp_fsf_req *);
+static void zfcp_fsf_link_down_info_eval(struct zfcp_adapter *,
+ struct fsf_link_down_info *);
static int zfcp_fsf_req_dispatch(struct zfcp_fsf_req *);
static void zfcp_fsf_req_dismiss(struct zfcp_fsf_req *);
@@ -285,51 +287,51 @@ zfcp_fsf_protstatus_eval(struct zfcp_fsf_req *fsf_req)
{
int retval = 0;
struct zfcp_adapter *adapter = fsf_req->adapter;
+ struct fsf_qtcb *qtcb = fsf_req->qtcb;
+ union fsf_prot_status_qual *prot_status_qual =
+ &qtcb->prefix.prot_status_qual;
- ZFCP_LOG_DEBUG("QTCB is at %p\n", fsf_req->qtcb);
+ zfcp_hba_dbf_event_fsf_response(fsf_req);
if (fsf_req->status & ZFCP_STATUS_FSFREQ_DISMISSED) {
ZFCP_LOG_DEBUG("fsf_req 0x%lx has been dismissed\n",
(unsigned long) fsf_req);
fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR |
ZFCP_STATUS_FSFREQ_RETRY; /* only for SCSI cmnds. */
- zfcp_cmd_dbf_event_fsf("dismiss", fsf_req, NULL, 0);
goto skip_protstatus;
}
/* log additional information provided by FSF (if any) */
- if (unlikely(fsf_req->qtcb->header.log_length)) {
+ if (unlikely(qtcb->header.log_length)) {
/* do not trust them ;-) */
- if (fsf_req->qtcb->header.log_start > sizeof(struct fsf_qtcb)) {
+ if (qtcb->header.log_start > sizeof(struct fsf_qtcb)) {
ZFCP_LOG_NORMAL
("bug: ULP (FSF logging) log data starts "
"beyond end of packet header. Ignored. "
"(start=%i, size=%li)\n",
- fsf_req->qtcb->header.log_start,
+ qtcb->header.log_start,
sizeof(struct fsf_qtcb));
goto forget_log;
}
- if ((size_t) (fsf_req->qtcb->header.log_start +
- fsf_req->qtcb->header.log_length)
+ if ((size_t) (qtcb->header.log_start + qtcb->header.log_length)
> sizeof(struct fsf_qtcb)) {
ZFCP_LOG_NORMAL("bug: ULP (FSF logging) log data ends "
"beyond end of packet header. Ignored. "
"(start=%i, length=%i, size=%li)\n",
- fsf_req->qtcb->header.log_start,
- fsf_req->qtcb->header.log_length,
+ qtcb->header.log_start,
+ qtcb->header.log_length,
sizeof(struct fsf_qtcb));
goto forget_log;
}
ZFCP_LOG_TRACE("ULP log data: \n");
ZFCP_HEX_DUMP(ZFCP_LOG_LEVEL_TRACE,
- (char *) fsf_req->qtcb +
- fsf_req->qtcb->header.log_start,
- fsf_req->qtcb->header.log_length);
+ (char *) qtcb + qtcb->header.log_start,
+ qtcb->header.log_length);
}
forget_log:
/* evaluate FSF Protocol Status */
- switch (fsf_req->qtcb->prefix.prot_status) {
+ switch (qtcb->prefix.prot_status) {
case FSF_PROT_GOOD:
case FSF_PROT_FSF_STATUS_PRESENTED:
@@ -340,14 +342,9 @@ zfcp_fsf_protstatus_eval(struct zfcp_fsf_req *fsf_req)
"microcode of version 0x%x, the device driver "
"only supports 0x%x. Aborting.\n",
zfcp_get_busid_by_adapter(adapter),
- fsf_req->qtcb->prefix.prot_status_qual.
- version_error.fsf_version, ZFCP_QTCB_VERSION);
- /* stop operation for this adapter */
- debug_text_exception(adapter->erp_dbf, 0, "prot_ver_err");
+ prot_status_qual->version_error.fsf_version,
+ ZFCP_QTCB_VERSION);
zfcp_erp_adapter_shutdown(adapter, 0);
- zfcp_cmd_dbf_event_fsf("qverserr", fsf_req,
- &fsf_req->qtcb->prefix.prot_status_qual,
- sizeof (union fsf_prot_status_qual));
fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
break;
@@ -355,16 +352,10 @@ zfcp_fsf_protstatus_eval(struct zfcp_fsf_req *fsf_req)
ZFCP_LOG_NORMAL("bug: Sequence number mismatch between "
"driver (0x%x) and adapter %s (0x%x). "
"Restarting all operations on this adapter.\n",
- fsf_req->qtcb->prefix.req_seq_no,
+ qtcb->prefix.req_seq_no,
zfcp_get_busid_by_adapter(adapter),
- fsf_req->qtcb->prefix.prot_status_qual.
- sequence_error.exp_req_seq_no);
- debug_text_exception(adapter->erp_dbf, 0, "prot_seq_err");
- /* restart operation on this adapter */
+ prot_status_qual->sequence_error.exp_req_seq_no);
zfcp_erp_adapter_reopen(adapter, 0);
- zfcp_cmd_dbf_event_fsf("seqnoerr", fsf_req,
- &fsf_req->qtcb->prefix.prot_status_qual,
- sizeof (union fsf_prot_status_qual));
fsf_req->status |= ZFCP_STATUS_FSFREQ_RETRY;
fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
break;
@@ -375,116 +366,35 @@ zfcp_fsf_protstatus_eval(struct zfcp_fsf_req *fsf_req)
"that used on adapter %s. "
"Stopping all operations on this adapter.\n",
zfcp_get_busid_by_adapter(adapter));
- debug_text_exception(adapter->erp_dbf, 0, "prot_unsup_qtcb");
zfcp_erp_adapter_shutdown(adapter, 0);
- zfcp_cmd_dbf_event_fsf("unsqtcbt", fsf_req,
- &fsf_req->qtcb->prefix.prot_status_qual,
- sizeof (union fsf_prot_status_qual));
fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
break;
case FSF_PROT_HOST_CONNECTION_INITIALIZING:
- zfcp_cmd_dbf_event_fsf("hconinit", fsf_req,
- &fsf_req->qtcb->prefix.prot_status_qual,
- sizeof (union fsf_prot_status_qual));
fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
atomic_set_mask(ZFCP_STATUS_ADAPTER_HOST_CON_INIT,
&(adapter->status));
- debug_text_event(adapter->erp_dbf, 3, "prot_con_init");
break;
case FSF_PROT_DUPLICATE_REQUEST_ID:
- if (fsf_req->qtcb) {
ZFCP_LOG_NORMAL("bug: The request identifier 0x%Lx "
"to the adapter %s is ambiguous. "
- "Stopping all operations on this "
- "adapter.\n",
- *(unsigned long long *)
- (&fsf_req->qtcb->bottom.support.
- req_handle),
- zfcp_get_busid_by_adapter(adapter));
- } else {
- ZFCP_LOG_NORMAL("bug: The request identifier %p "
- "to the adapter %s is ambiguous. "
- "Stopping all operations on this "
- "adapter. "
- "(bug: got this for an unsolicited "
- "status read request)\n",
- fsf_req,
+ "Stopping all operations on this adapter.\n",
+ *(unsigned long long*)
+ (&qtcb->bottom.support.req_handle),
zfcp_get_busid_by_adapter(adapter));
- }
- debug_text_exception(adapter->erp_dbf, 0, "prot_dup_id");
zfcp_erp_adapter_shutdown(adapter, 0);
- zfcp_cmd_dbf_event_fsf("dupreqid", fsf_req,
- &fsf_req->qtcb->prefix.prot_status_qual,
- sizeof (union fsf_prot_status_qual));
fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
break;
case FSF_PROT_LINK_DOWN:
- /*
- * 'test and set' is not atomic here -
- * it's ok as long as calls to our response queue handler
- * (and thus execution of this code here) are serialized
- * by the qdio module
- */
- if (!atomic_test_mask(ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED,
- &adapter->status)) {
- switch (fsf_req->qtcb->prefix.prot_status_qual.
- locallink_error.code) {
- case FSF_PSQ_LINK_NOLIGHT:
- ZFCP_LOG_INFO("The local link to adapter %s "
- "is down (no light detected).\n",
- zfcp_get_busid_by_adapter(
- adapter));
- break;
- case FSF_PSQ_LINK_WRAPPLUG:
- ZFCP_LOG_INFO("The local link to adapter %s "
- "is down (wrap plug detected).\n",
- zfcp_get_busid_by_adapter(
- adapter));
- break;
- case FSF_PSQ_LINK_NOFCP:
- ZFCP_LOG_INFO("The local link to adapter %s "
- "is down (adjacent node on "
- "link does not support FCP).\n",
- zfcp_get_busid_by_adapter(
- adapter));
- break;
- default:
- ZFCP_LOG_INFO("The local link to adapter %s "
- "is down "
- "(warning: unknown reason "
- "code).\n",
- zfcp_get_busid_by_adapter(
- adapter));
- break;
-
- }
- /*
- * Due to the 'erp failed' flag the adapter won't
- * be recovered but will be just set to 'blocked'
- * state. All subordinary devices will have state
- * 'blocked' and 'erp failed', too.
- * Thus the adapter is still able to provide
- * 'link up' status without being flooded with
- * requests.
- * (note: even 'close port' is not permitted)
- */
- ZFCP_LOG_INFO("Stopping all operations for adapter "
- "%s.\n",
- zfcp_get_busid_by_adapter(adapter));
- atomic_set_mask(ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED |
- ZFCP_STATUS_COMMON_ERP_FAILED,
- &adapter->status);
- zfcp_erp_adapter_reopen(adapter, 0);
- }
+ zfcp_fsf_link_down_info_eval(adapter,
+ &prot_status_qual->link_down_info);
fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
break;
case FSF_PROT_REEST_QUEUE:
- debug_text_event(adapter->erp_dbf, 1, "prot_reest_queue");
- ZFCP_LOG_INFO("The local link to adapter with "
+ ZFCP_LOG_NORMAL("The local link to adapter with "
"%s was re-plugged. "
"Re-starting operations on this adapter.\n",
zfcp_get_busid_by_adapter(adapter));
@@ -495,9 +405,6 @@ zfcp_fsf_protstatus_eval(struct zfcp_fsf_req *fsf_req)
zfcp_erp_adapter_reopen(adapter,
ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED
| ZFCP_STATUS_COMMON_ERP_FAILED);
- zfcp_cmd_dbf_event_fsf("reestque", fsf_req,
- &fsf_req->qtcb->prefix.prot_status_qual,
- sizeof (union fsf_prot_status_qual));
fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
break;
@@ -507,12 +414,7 @@ zfcp_fsf_protstatus_eval(struct zfcp_fsf_req *fsf_req)
"Restarting all operations on this "
"adapter.\n",
zfcp_get_busid_by_adapter(adapter));
- debug_text_event(adapter->erp_dbf, 0, "prot_err_sta");
- /* restart operation on this adapter */
zfcp_erp_adapter_reopen(adapter, 0);
- zfcp_cmd_dbf_event_fsf("proterrs", fsf_req,
- &fsf_req->qtcb->prefix.prot_status_qual,
- sizeof (union fsf_prot_status_qual));
fsf_req->status |= ZFCP_STATUS_FSFREQ_RETRY;
fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
break;
@@ -524,11 +426,7 @@ zfcp_fsf_protstatus_eval(struct zfcp_fsf_req *fsf_req)
"Stopping all operations on this adapter. "
"(debug info 0x%x).\n",
zfcp_get_busid_by_adapter(adapter),
- fsf_req->qtcb->prefix.prot_status);
- debug_text_event(adapter->erp_dbf, 0, "prot_inval:");
- debug_exception(adapter->erp_dbf, 0,
- &fsf_req->qtcb->prefix.prot_status,
- sizeof (u32));
+ qtcb->prefix.prot_status);
zfcp_erp_adapter_shutdown(adapter, 0);
fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
}
@@ -568,28 +466,18 @@ zfcp_fsf_fsfstatus_eval(struct zfcp_fsf_req *fsf_req)
"(debug info 0x%x).\n",
zfcp_get_busid_by_adapter(fsf_req->adapter),
fsf_req->qtcb->header.fsf_command);
- debug_text_exception(fsf_req->adapter->erp_dbf, 0,
- "fsf_s_unknown");
zfcp_erp_adapter_shutdown(fsf_req->adapter, 0);
- zfcp_cmd_dbf_event_fsf("unknownc", fsf_req,
- &fsf_req->qtcb->header.fsf_status_qual,
- sizeof (union fsf_status_qual));
fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
break;
case FSF_FCP_RSP_AVAILABLE:
ZFCP_LOG_DEBUG("FCP Sense data will be presented to the "
"SCSI stack.\n");
- debug_text_event(fsf_req->adapter->erp_dbf, 3, "fsf_s_rsp");
break;
case FSF_ADAPTER_STATUS_AVAILABLE:
- debug_text_event(fsf_req->adapter->erp_dbf, 2, "fsf_s_astatus");
zfcp_fsf_fsfstatus_qual_eval(fsf_req);
break;
-
- default:
- break;
}
skip_fsfstatus:
@@ -617,44 +505,28 @@ zfcp_fsf_fsfstatus_qual_eval(struct zfcp_fsf_req *fsf_req)
switch (fsf_req->qtcb->header.fsf_status_qual.word[0]) {
case FSF_SQ_FCP_RSP_AVAILABLE:
- debug_text_event(fsf_req->adapter->erp_dbf, 4, "fsf_sq_rsp");
break;
case FSF_SQ_RETRY_IF_POSSIBLE:
/* The SCSI-stack may now issue retries or escalate */
- debug_text_event(fsf_req->adapter->erp_dbf, 2, "fsf_sq_retry");
- zfcp_cmd_dbf_event_fsf("sqretry", fsf_req,
- &fsf_req->qtcb->header.fsf_status_qual,
- sizeof (union fsf_status_qual));
fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
break;
case FSF_SQ_COMMAND_ABORTED:
/* Carry the aborted state on to upper layer */
- debug_text_event(fsf_req->adapter->erp_dbf, 2, "fsf_sq_abort");
- zfcp_cmd_dbf_event_fsf("sqabort", fsf_req,
- &fsf_req->qtcb->header.fsf_status_qual,
- sizeof (union fsf_status_qual));
fsf_req->status |= ZFCP_STATUS_FSFREQ_ABORTED;
fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
break;
case FSF_SQ_NO_RECOM:
- debug_text_exception(fsf_req->adapter->erp_dbf, 0,
- "fsf_sq_no_rec");
ZFCP_LOG_NORMAL("bug: No recommendation could be given for a"
"problem on the adapter %s "
"Stopping all operations on this adapter. ",
zfcp_get_busid_by_adapter(fsf_req->adapter));
zfcp_erp_adapter_shutdown(fsf_req->adapter, 0);
- zfcp_cmd_dbf_event_fsf("sqnrecom", fsf_req,
- &fsf_req->qtcb->header.fsf_status_qual,
- sizeof (union fsf_status_qual));
fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
break;
case FSF_SQ_ULP_PROGRAMMING_ERROR:
ZFCP_LOG_NORMAL("error: not enough SBALs for data transfer "
"(adapter %s)\n",
zfcp_get_busid_by_adapter(fsf_req->adapter));
- debug_text_exception(fsf_req->adapter->erp_dbf, 0,
- "fsf_sq_ulp_err");
fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
break;
case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE:
@@ -668,13 +540,6 @@ zfcp_fsf_fsfstatus_qual_eval(struct zfcp_fsf_req *fsf_req)
ZFCP_HEX_DUMP(ZFCP_LOG_LEVEL_NORMAL,
(char *) &fsf_req->qtcb->header.fsf_status_qual,
sizeof (union fsf_status_qual));
- debug_text_event(fsf_req->adapter->erp_dbf, 0, "fsf_sq_inval:");
- debug_exception(fsf_req->adapter->erp_dbf, 0,
- &fsf_req->qtcb->header.fsf_status_qual.word[0],
- sizeof (u32));
- zfcp_cmd_dbf_event_fsf("squndef", fsf_req,
- &fsf_req->qtcb->header.fsf_status_qual,
- sizeof (union fsf_status_qual));
fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
break;
}
@@ -682,6 +547,110 @@ zfcp_fsf_fsfstatus_qual_eval(struct zfcp_fsf_req *fsf_req)
return retval;
}
+/**
+ * zfcp_fsf_link_down_info_eval - evaluate link down information block
+ */
+static void
+zfcp_fsf_link_down_info_eval(struct zfcp_adapter *adapter,
+ struct fsf_link_down_info *link_down)
+{
+ switch (link_down->error_code) {
+ case FSF_PSQ_LINK_NO_LIGHT:
+ ZFCP_LOG_NORMAL("The local link to adapter %s is down "
+ "(no light detected)\n",
+ zfcp_get_busid_by_adapter(adapter));
+ break;
+ case FSF_PSQ_LINK_WRAP_PLUG:
+ ZFCP_LOG_NORMAL("The local link to adapter %s is down "
+ "(wrap plug detected)\n",
+ zfcp_get_busid_by_adapter(adapter));
+ break;
+ case FSF_PSQ_LINK_NO_FCP:
+ ZFCP_LOG_NORMAL("The local link to adapter %s is down "
+ "(adjacent node on link does not support FCP)\n",
+ zfcp_get_busid_by_adapter(adapter));
+ break;
+ case FSF_PSQ_LINK_FIRMWARE_UPDATE:
+ ZFCP_LOG_NORMAL("The local link to adapter %s is down "
+ "(firmware update in progress)\n",
+ zfcp_get_busid_by_adapter(adapter));
+ break;
+ case FSF_PSQ_LINK_INVALID_WWPN:
+ ZFCP_LOG_NORMAL("The local link to adapter %s is down "
+ "(duplicate or invalid WWPN detected)\n",
+ zfcp_get_busid_by_adapter(adapter));
+ break;
+ case FSF_PSQ_LINK_NO_NPIV_SUPPORT:
+ ZFCP_LOG_NORMAL("The local link to adapter %s is down "
+ "(no support for NPIV by Fabric)\n",
+ zfcp_get_busid_by_adapter(adapter));
+ break;
+ case FSF_PSQ_LINK_NO_FCP_RESOURCES:
+ ZFCP_LOG_NORMAL("The local link to adapter %s is down "
+ "(out of resource in FCP daughtercard)\n",
+ zfcp_get_busid_by_adapter(adapter));
+ break;
+ case FSF_PSQ_LINK_NO_FABRIC_RESOURCES:
+ ZFCP_LOG_NORMAL("The local link to adapter %s is down "
+ "(out of resource in Fabric)\n",
+ zfcp_get_busid_by_adapter(adapter));
+ break;
+ case FSF_PSQ_LINK_FABRIC_LOGIN_UNABLE:
+ ZFCP_LOG_NORMAL("The local link to adapter %s is down "
+ "(unable to Fabric login)\n",
+ zfcp_get_busid_by_adapter(adapter));
+ break;
+ case FSF_PSQ_LINK_WWPN_ASSIGNMENT_CORRUPTED:
+ ZFCP_LOG_NORMAL("WWPN assignment file corrupted on adapter %s\n",
+ zfcp_get_busid_by_adapter(adapter));
+ break;
+ case FSF_PSQ_LINK_MODE_TABLE_CURRUPTED:
+ ZFCP_LOG_NORMAL("Mode table corrupted on adapter %s\n",
+ zfcp_get_busid_by_adapter(adapter));
+ break;
+ case FSF_PSQ_LINK_NO_WWPN_ASSIGNMENT:
+ ZFCP_LOG_NORMAL("No WWPN for assignment table on adapter %s\n",
+ zfcp_get_busid_by_adapter(adapter));
+ break;
+ default:
+ ZFCP_LOG_NORMAL("The local link to adapter %s is down "
+ "(warning: unknown reason code %d)\n",
+ zfcp_get_busid_by_adapter(adapter),
+ link_down->error_code);
+ }
+
+ if (adapter->connection_features & FSF_FEATURE_NPIV_MODE)
+ ZFCP_LOG_DEBUG("Debug information to link down: "
+ "primary_status=0x%02x "
+ "ioerr_code=0x%02x "
+ "action_code=0x%02x "
+ "reason_code=0x%02x "
+ "explanation_code=0x%02x "
+ "vendor_specific_code=0x%02x\n",
+ link_down->primary_status,
+ link_down->ioerr_code,
+ link_down->action_code,
+ link_down->reason_code,
+ link_down->explanation_code,
+ link_down->vendor_specific_code);
+
+ if (!atomic_test_mask(ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED,
+ &adapter->status)) {
+ atomic_set_mask(ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED,
+ &adapter->status);
+ switch (link_down->error_code) {
+ case FSF_PSQ_LINK_NO_LIGHT:
+ case FSF_PSQ_LINK_WRAP_PLUG:
+ case FSF_PSQ_LINK_NO_FCP:
+ case FSF_PSQ_LINK_FIRMWARE_UPDATE:
+ zfcp_erp_adapter_reopen(adapter, 0);
+ break;
+ default:
+ zfcp_erp_adapter_failed(adapter);
+ }
+ }
+}
+
/*
* function: zfcp_fsf_req_dispatch
*
@@ -696,11 +665,6 @@ zfcp_fsf_req_dispatch(struct zfcp_fsf_req *fsf_req)
struct zfcp_adapter *adapter = fsf_req->adapter;
int retval = 0;
- if (unlikely(fsf_req->status & ZFCP_STATUS_FSFREQ_ERROR)) {
- ZFCP_LOG_TRACE("fsf_req=%p, QTCB=%p\n", fsf_req, fsf_req->qtcb);
- ZFCP_HEX_DUMP(ZFCP_LOG_LEVEL_TRACE,
- (char *) fsf_req->qtcb, sizeof(struct fsf_qtcb));
- }
switch (fsf_req->fsf_command) {
@@ -760,13 +724,13 @@ zfcp_fsf_req_dispatch(struct zfcp_fsf_req *fsf_req)
fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
ZFCP_LOG_NORMAL("bug: Command issued by the device driver is "
"not supported by the adapter %s\n",
- zfcp_get_busid_by_adapter(fsf_req->adapter));
+ zfcp_get_busid_by_adapter(adapter));
if (fsf_req->fsf_command != fsf_req->qtcb->header.fsf_command)
ZFCP_LOG_NORMAL
("bug: Command issued by the device driver differs "
"from the command returned by the adapter %s "
"(debug info 0x%x, 0x%x).\n",
- zfcp_get_busid_by_adapter(fsf_req->adapter),
+ zfcp_get_busid_by_adapter(adapter),
fsf_req->fsf_command,
fsf_req->qtcb->header.fsf_command);
}
@@ -774,8 +738,6 @@ zfcp_fsf_req_dispatch(struct zfcp_fsf_req *fsf_req)
if (!erp_action)
return retval;
- debug_text_event(adapter->erp_dbf, 3, "a_frh");
- debug_event(adapter->erp_dbf, 3, &erp_action->action, sizeof (int));
zfcp_erp_async_handler(erp_action, 0);
return retval;
@@ -821,7 +783,7 @@ zfcp_fsf_status_read(struct zfcp_adapter *adapter, int req_flags)
goto failed_buf;
}
memset(status_buffer, 0, sizeof (struct fsf_status_read_buffer));
- fsf_req->data.status_read.buffer = status_buffer;
+ fsf_req->data = (unsigned long) status_buffer;
/* insert pointer to respective buffer */
sbale = zfcp_qdio_sbale_curr(fsf_req);
@@ -846,6 +808,7 @@ zfcp_fsf_status_read(struct zfcp_adapter *adapter, int req_flags)
failed_buf:
zfcp_fsf_req_free(fsf_req);
failed_req_create:
+ zfcp_hba_dbf_event_fsf_unsol("fail", adapter, NULL);
out:
write_unlock_irqrestore(&adapter->request_queue.queue_lock, lock_flags);
return retval;
@@ -859,7 +822,7 @@ zfcp_fsf_status_read_port_closed(struct zfcp_fsf_req *fsf_req)
struct zfcp_port *port;
unsigned long flags;
- status_buffer = fsf_req->data.status_read.buffer;
+ status_buffer = (struct fsf_status_read_buffer *) fsf_req->data;
adapter = fsf_req->adapter;
read_lock_irqsave(&zfcp_data.config_lock, flags);
@@ -918,38 +881,33 @@ zfcp_fsf_status_read_handler(struct zfcp_fsf_req *fsf_req)
int retval = 0;
struct zfcp_adapter *adapter = fsf_req->adapter;
struct fsf_status_read_buffer *status_buffer =
- fsf_req->data.status_read.buffer;
+ (struct fsf_status_read_buffer *) fsf_req->data;
if (fsf_req->status & ZFCP_STATUS_FSFREQ_DISMISSED) {
+ zfcp_hba_dbf_event_fsf_unsol("dism", adapter, status_buffer);
mempool_free(status_buffer, adapter->pool.data_status_read);
zfcp_fsf_req_free(fsf_req);
goto out;
}
+ zfcp_hba_dbf_event_fsf_unsol("read", adapter, status_buffer);
+
switch (status_buffer->status_type) {
case FSF_STATUS_READ_PORT_CLOSED:
- debug_text_event(adapter->erp_dbf, 3, "unsol_pclosed:");
- debug_event(adapter->erp_dbf, 3,
- &status_buffer->d_id, sizeof (u32));
zfcp_fsf_status_read_port_closed(fsf_req);
break;
case FSF_STATUS_READ_INCOMING_ELS:
- debug_text_event(adapter->erp_dbf, 3, "unsol_els:");
zfcp_fsf_incoming_els(fsf_req);
break;
case FSF_STATUS_READ_SENSE_DATA_AVAIL:
- debug_text_event(adapter->erp_dbf, 3, "unsol_sense:");
ZFCP_LOG_INFO("unsolicited sense data received (adapter %s)\n",
zfcp_get_busid_by_adapter(adapter));
- ZFCP_HEX_DUMP(ZFCP_LOG_LEVEL_NORMAL, (char *) status_buffer,
- sizeof(struct fsf_status_read_buffer));
break;
case FSF_STATUS_READ_BIT_ERROR_THRESHOLD:
- debug_text_event(adapter->erp_dbf, 3, "unsol_bit_err:");
ZFCP_LOG_NORMAL("Bit error threshold data received:\n");
ZFCP_HEX_DUMP(ZFCP_LOG_LEVEL_NORMAL,
(char *) status_buffer,
@@ -957,17 +915,32 @@ zfcp_fsf_status_read_handler(struct zfcp_fsf_req *fsf_req)
break;
case FSF_STATUS_READ_LINK_DOWN:
- debug_text_event(adapter->erp_dbf, 0, "unsol_link_down:");
- ZFCP_LOG_INFO("Local link to adapter %s is down\n",
+ switch (status_buffer->status_subtype) {
+ case FSF_STATUS_READ_SUB_NO_PHYSICAL_LINK:
+ ZFCP_LOG_INFO("Physical link to adapter %s is down\n",
+ zfcp_get_busid_by_adapter(adapter));
+ break;
+ case FSF_STATUS_READ_SUB_FDISC_FAILED:
+ ZFCP_LOG_INFO("Local link to adapter %s is down "
+ "due to failed FDISC login\n",
zfcp_get_busid_by_adapter(adapter));
- atomic_set_mask(ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED,
- &adapter->status);
- zfcp_erp_adapter_failed(adapter);
+ break;
+ case FSF_STATUS_READ_SUB_FIRMWARE_UPDATE:
+ ZFCP_LOG_INFO("Local link to adapter %s is down "
+ "due to firmware update on adapter\n",
+ zfcp_get_busid_by_adapter(adapter));
+ break;
+ default:
+ ZFCP_LOG_INFO("Local link to adapter %s is down "
+ "due to unknown reason\n",
+ zfcp_get_busid_by_adapter(adapter));
+ };
+ zfcp_fsf_link_down_info_eval(adapter,
+ (struct fsf_link_down_info *) &status_buffer->payload);
break;
case FSF_STATUS_READ_LINK_UP:
- debug_text_event(adapter->erp_dbf, 2, "unsol_link_up:");
- ZFCP_LOG_INFO("Local link to adapter %s was replugged. "
+ ZFCP_LOG_NORMAL("Local link to adapter %s was replugged. "
"Restarting operations on this adapter\n",
zfcp_get_busid_by_adapter(adapter));
/* All ports should be marked as ready to run again */
@@ -980,35 +953,40 @@ zfcp_fsf_status_read_handler(struct zfcp_fsf_req *fsf_req)
break;
case FSF_STATUS_READ_CFDC_UPDATED:
- debug_text_event(adapter->erp_dbf, 2, "unsol_cfdc_update:");
- ZFCP_LOG_INFO("CFDC has been updated on the adapter %s\n",
+ ZFCP_LOG_NORMAL("CFDC has been updated on the adapter %s\n",
zfcp_get_busid_by_adapter(adapter));
zfcp_erp_adapter_access_changed(adapter);
break;
case FSF_STATUS_READ_CFDC_HARDENED:
- debug_text_event(adapter->erp_dbf, 2, "unsol_cfdc_harden:");
switch (status_buffer->status_subtype) {
case FSF_STATUS_READ_SUB_CFDC_HARDENED_ON_SE:
- ZFCP_LOG_INFO("CFDC of adapter %s saved on SE\n",
+ ZFCP_LOG_NORMAL("CFDC of adapter %s saved on SE\n",
zfcp_get_busid_by_adapter(adapter));
break;
case FSF_STATUS_READ_SUB_CFDC_HARDENED_ON_SE2:
- ZFCP_LOG_INFO("CFDC of adapter %s has been copied "
+ ZFCP_LOG_NORMAL("CFDC of adapter %s has been copied "
"to the secondary SE\n",
zfcp_get_busid_by_adapter(adapter));
break;
default:
- ZFCP_LOG_INFO("CFDC of adapter %s has been hardened\n",
+ ZFCP_LOG_NORMAL("CFDC of adapter %s has been hardened\n",
zfcp_get_busid_by_adapter(adapter));
}
break;
+ case FSF_STATUS_READ_FEATURE_UPDATE_ALERT:
+ debug_text_event(adapter->erp_dbf, 2, "unsol_features:");
+ ZFCP_LOG_INFO("List of supported features on adapter %s has "
+ "been changed from 0x%08X to 0x%08X\n",
+ zfcp_get_busid_by_adapter(adapter),
+ *(u32*) (status_buffer->payload + 4),
+ *(u32*) (status_buffer->payload));
+ adapter->adapter_features = *(u32*) status_buffer->payload;
+ break;
+
default:
- debug_text_event(adapter->erp_dbf, 0, "unsol_unknown:");
- debug_exception(adapter->erp_dbf, 0,
- &status_buffer->status_type, sizeof (u32));
- ZFCP_LOG_NORMAL("bug: An unsolicited status packet of unknown "
+ ZFCP_LOG_NORMAL("warning: An unsolicited status packet of unknown "
"type was received (debug info 0x%x)\n",
status_buffer->status_type);
ZFCP_LOG_DEBUG("Dump of status_read_buffer %p:\n",
@@ -1093,7 +1071,7 @@ zfcp_fsf_abort_fcp_command(unsigned long old_req_id,
sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
- fsf_req->data.abort_fcp_command.unit = unit;
+ fsf_req->data = (unsigned long) unit;
/* set handles of unit and its parent port in QTCB */
fsf_req->qtcb->header.lun_handle = unit->handle;
@@ -1139,7 +1117,7 @@ static int
zfcp_fsf_abort_fcp_command_handler(struct zfcp_fsf_req *new_fsf_req)
{
int retval = -EINVAL;
- struct zfcp_unit *unit = new_fsf_req->data.abort_fcp_command.unit;
+ struct zfcp_unit *unit;
unsigned char status_qual =
new_fsf_req->qtcb->header.fsf_status_qual.word[0];
@@ -1150,6 +1128,8 @@ zfcp_fsf_abort_fcp_command_handler(struct zfcp_fsf_req *new_fsf_req)
goto skip_fsfstatus;
}
+ unit = (struct zfcp_unit *) new_fsf_req->data;
+
/* evaluate FSF status in QTCB */
switch (new_fsf_req->qtcb->header.fsf_status) {
@@ -1364,7 +1344,7 @@ zfcp_fsf_send_ct(struct zfcp_send_ct *ct, mempool_t *pool,
sbale[3].addr = zfcp_sg_to_address(&ct->resp[0]);
sbale[3].length = ct->resp[0].length;
sbale[3].flags |= SBAL_FLAGS_LAST_ENTRY;
- } else if (adapter->supported_features &
+ } else if (adapter->adapter_features &
FSF_FEATURE_ELS_CT_CHAINED_SBALS) {
/* try to use chained SBALs */
bytes = zfcp_qdio_sbals_from_sg(fsf_req,
@@ -1414,7 +1394,9 @@ zfcp_fsf_send_ct(struct zfcp_send_ct *ct, mempool_t *pool,
fsf_req->qtcb->header.port_handle = port->handle;
fsf_req->qtcb->bottom.support.service_class = adapter->fc_service_class;
fsf_req->qtcb->bottom.support.timeout = ct->timeout;
- fsf_req->data.send_ct = ct;
+ fsf_req->data = (unsigned long) ct;
+
+ zfcp_san_dbf_event_ct_request(fsf_req);
/* start QDIO request for this FSF request */
ret = zfcp_fsf_req_send(fsf_req, ct->timer);
@@ -1445,10 +1427,10 @@ zfcp_fsf_send_ct(struct zfcp_send_ct *ct, mempool_t *pool,
* zfcp_fsf_send_ct_handler - handler for Generic Service requests
* @fsf_req: pointer to struct zfcp_fsf_req
*
- * Data specific for the Generic Service request is passed by
- * fsf_req->data.send_ct
- * Usually a specific handler for the request is called via
- * fsf_req->data.send_ct->handler at end of this function.
+ * Data specific for the Generic Service request is passed using
+ * fsf_req->data. There we find the pointer to struct zfcp_send_ct.
+ * Usually a specific handler for the CT request is called which is
+ * found in this structure.
*/
static int
zfcp_fsf_send_ct_handler(struct zfcp_fsf_req *fsf_req)
@@ -1462,7 +1444,7 @@ zfcp_fsf_send_ct_handler(struct zfcp_fsf_req *fsf_req)
u16 subtable, rule, counter;
adapter = fsf_req->adapter;
- send_ct = fsf_req->data.send_ct;
+ send_ct = (struct zfcp_send_ct *) fsf_req->data;
port = send_ct->port;
header = &fsf_req->qtcb->header;
bottom = &fsf_req->qtcb->bottom.support;
@@ -1474,6 +1456,7 @@ zfcp_fsf_send_ct_handler(struct zfcp_fsf_req *fsf_req)
switch (header->fsf_status) {
case FSF_GOOD:
+ zfcp_san_dbf_event_ct_response(fsf_req);
retval = 0;
break;
@@ -1634,7 +1617,7 @@ zfcp_fsf_send_els(struct zfcp_send_els *els)
{
volatile struct qdio_buffer_element *sbale;
struct zfcp_fsf_req *fsf_req;
- fc_id_t d_id;
+ u32 d_id;
struct zfcp_adapter *adapter;
unsigned long lock_flags;
int bytes;
@@ -1664,7 +1647,7 @@ zfcp_fsf_send_els(struct zfcp_send_els *els)
sbale[3].addr = zfcp_sg_to_address(&els->resp[0]);
sbale[3].length = els->resp[0].length;
sbale[3].flags |= SBAL_FLAGS_LAST_ENTRY;
- } else if (adapter->supported_features &
+ } else if (adapter->adapter_features &
FSF_FEATURE_ELS_CT_CHAINED_SBALS) {
/* try to use chained SBALs */
bytes = zfcp_qdio_sbals_from_sg(fsf_req,
@@ -1714,10 +1697,12 @@ zfcp_fsf_send_els(struct zfcp_send_els *els)
fsf_req->qtcb->bottom.support.d_id = d_id;
fsf_req->qtcb->bottom.support.service_class = adapter->fc_service_class;
fsf_req->qtcb->bottom.support.timeout = ZFCP_ELS_TIMEOUT;
- fsf_req->data.send_els = els;
+ fsf_req->data = (unsigned long) els;
sbale = zfcp_qdio_sbale_req(fsf_req, fsf_req->sbal_curr, 0);
+ zfcp_san_dbf_event_els_request(fsf_req);
+
/* start QDIO request for this FSF request */
ret = zfcp_fsf_req_send(fsf_req, els->timer);
if (ret) {
@@ -1746,23 +1731,23 @@ zfcp_fsf_send_els(struct zfcp_send_els *els)
* zfcp_fsf_send_els_handler - handler for ELS commands
* @fsf_req: pointer to struct zfcp_fsf_req
*
- * Data specific for the ELS command is passed by
- * fsf_req->data.send_els
- * Usually a specific handler for the command is called via
- * fsf_req->data.send_els->handler at end of this function.
+ * Data specific for the ELS command is passed using
+ * fsf_req->data. There we find the pointer to struct zfcp_send_els.
+ * Usually a specific handler for the ELS command is called which is
+ * found in this structure.
*/
static int zfcp_fsf_send_els_handler(struct zfcp_fsf_req *fsf_req)
{
struct zfcp_adapter *adapter;
struct zfcp_port *port;
- fc_id_t d_id;
+ u32 d_id;
struct fsf_qtcb_header *header;
struct fsf_qtcb_bottom_support *bottom;
struct zfcp_send_els *send_els;
int retval = -EINVAL;
u16 subtable, rule, counter;
- send_els = fsf_req->data.send_els;
+ send_els = (struct zfcp_send_els *) fsf_req->data;
adapter = send_els->adapter;
port = send_els->port;
d_id = send_els->d_id;
@@ -1775,6 +1760,7 @@ static int zfcp_fsf_send_els_handler(struct zfcp_fsf_req *fsf_req)
switch (header->fsf_status) {
case FSF_GOOD:
+ zfcp_san_dbf_event_els_response(fsf_req);
retval = 0;
break;
@@ -1954,7 +1940,9 @@ zfcp_fsf_exchange_config_data(struct zfcp_erp_action *erp_action)
erp_action->fsf_req->erp_action = erp_action;
erp_action->fsf_req->qtcb->bottom.config.feature_selection =
- (FSF_FEATURE_CFDC | FSF_FEATURE_LUN_SHARING);
+ FSF_FEATURE_CFDC |
+ FSF_FEATURE_LUN_SHARING |
+ FSF_FEATURE_UPDATE_ALERT;
/* start QDIO request for this FSF request */
retval = zfcp_fsf_req_send(erp_action->fsf_req, &erp_action->timer);
@@ -1990,29 +1978,36 @@ zfcp_fsf_exchange_config_evaluate(struct zfcp_fsf_req *fsf_req, int xchg_ok)
{
struct fsf_qtcb_bottom_config *bottom;
struct zfcp_adapter *adapter = fsf_req->adapter;
+ struct Scsi_Host *shost = adapter->scsi_host;
bottom = &fsf_req->qtcb->bottom.config;
ZFCP_LOG_DEBUG("low/high QTCB version 0x%x/0x%x of FSF\n",
bottom->low_qtcb_version, bottom->high_qtcb_version);
adapter->fsf_lic_version = bottom->lic_version;
- adapter->supported_features = bottom->supported_features;
+ adapter->adapter_features = bottom->adapter_features;
+ adapter->connection_features = bottom->connection_features;
adapter->peer_wwpn = 0;
adapter->peer_wwnn = 0;
adapter->peer_d_id = 0;
if (xchg_ok) {
- adapter->wwnn = bottom->nport_serv_param.wwnn;
- adapter->wwpn = bottom->nport_serv_param.wwpn;
- adapter->s_id = bottom->s_id & ZFCP_DID_MASK;
+ fc_host_node_name(shost) = bottom->nport_serv_param.wwnn;
+ fc_host_port_name(shost) = bottom->nport_serv_param.wwpn;
+ fc_host_port_id(shost) = bottom->s_id & ZFCP_DID_MASK;
+ fc_host_speed(shost) = bottom->fc_link_speed;
+ fc_host_supported_classes(shost) = FC_COS_CLASS2 | FC_COS_CLASS3;
adapter->fc_topology = bottom->fc_topology;
- adapter->fc_link_speed = bottom->fc_link_speed;
adapter->hydra_version = bottom->adapter_type;
+ if (adapter->physical_wwpn == 0)
+ adapter->physical_wwpn = fc_host_port_name(shost);
+ if (adapter->physical_s_id == 0)
+ adapter->physical_s_id = fc_host_port_id(shost);
} else {
- adapter->wwnn = 0;
- adapter->wwpn = 0;
- adapter->s_id = 0;
+ fc_host_node_name(shost) = 0;
+ fc_host_port_name(shost) = 0;
+ fc_host_port_id(shost) = 0;
+ fc_host_speed(shost) = FC_PORTSPEED_UNKNOWN;
adapter->fc_topology = 0;
- adapter->fc_link_speed = 0;
adapter->hydra_version = 0;
}
@@ -2022,26 +2017,28 @@ zfcp_fsf_exchange_config_evaluate(struct zfcp_fsf_req *fsf_req, int xchg_ok)
adapter->peer_wwnn = bottom->plogi_payload.wwnn;
}
- if(adapter->supported_features & FSF_FEATURE_HBAAPI_MANAGEMENT){
+ if (adapter->adapter_features & FSF_FEATURE_HBAAPI_MANAGEMENT) {
adapter->hardware_version = bottom->hardware_version;
- memcpy(adapter->serial_number, bottom->serial_number, 17);
- EBCASC(adapter->serial_number, sizeof(adapter->serial_number));
+ memcpy(fc_host_serial_number(shost), bottom->serial_number,
+ min(FC_SERIAL_NUMBER_SIZE, 17));
+ EBCASC(fc_host_serial_number(shost),
+ min(FC_SERIAL_NUMBER_SIZE, 17));
}
ZFCP_LOG_NORMAL("The adapter %s reported the following characteristics:\n"
- "WWNN 0x%016Lx, "
- "WWPN 0x%016Lx, "
- "S_ID 0x%08x,\n"
- "adapter version 0x%x, "
- "LIC version 0x%x, "
- "FC link speed %d Gb/s\n",
- zfcp_get_busid_by_adapter(adapter),
- adapter->wwnn,
- adapter->wwpn,
- (unsigned int) adapter->s_id,
- adapter->hydra_version,
- adapter->fsf_lic_version,
- adapter->fc_link_speed);
+ "WWNN 0x%016Lx, "
+ "WWPN 0x%016Lx, "
+ "S_ID 0x%08x,\n"
+ "adapter version 0x%x, "
+ "LIC version 0x%x, "
+ "FC link speed %d Gb/s\n",
+ zfcp_get_busid_by_adapter(adapter),
+ (wwn_t) fc_host_node_name(shost),
+ (wwn_t) fc_host_port_name(shost),
+ fc_host_port_id(shost),
+ adapter->hydra_version,
+ adapter->fsf_lic_version,
+ fc_host_speed(shost));
if (ZFCP_QTCB_VERSION < bottom->low_qtcb_version) {
ZFCP_LOG_NORMAL("error: the adapter %s "
"only supports newer control block "
@@ -2062,7 +2059,6 @@ zfcp_fsf_exchange_config_evaluate(struct zfcp_fsf_req *fsf_req, int xchg_ok)
zfcp_erp_adapter_shutdown(adapter, 0);
return -EIO;
}
- zfcp_set_fc_host_attrs(adapter);
return 0;
}
@@ -2078,11 +2074,12 @@ zfcp_fsf_exchange_config_data_handler(struct zfcp_fsf_req *fsf_req)
{
struct fsf_qtcb_bottom_config *bottom;
struct zfcp_adapter *adapter = fsf_req->adapter;
+ struct fsf_qtcb *qtcb = fsf_req->qtcb;
if (fsf_req->status & ZFCP_STATUS_FSFREQ_ERROR)
return -EIO;
- switch (fsf_req->qtcb->header.fsf_status) {
+ switch (qtcb->header.fsf_status) {
case FSF_GOOD:
if (zfcp_fsf_exchange_config_evaluate(fsf_req, 1))
@@ -2112,7 +2109,7 @@ zfcp_fsf_exchange_config_data_handler(struct zfcp_fsf_req *fsf_req)
zfcp_erp_adapter_shutdown(adapter, 0);
return -EIO;
case FSF_TOPO_FABRIC:
- ZFCP_LOG_INFO("Switched fabric fibrechannel "
+ ZFCP_LOG_NORMAL("Switched fabric fibrechannel "
"network detected at adapter %s.\n",
zfcp_get_busid_by_adapter(adapter));
break;
@@ -2130,7 +2127,7 @@ zfcp_fsf_exchange_config_data_handler(struct zfcp_fsf_req *fsf_req)
zfcp_erp_adapter_shutdown(adapter, 0);
return -EIO;
}
- bottom = &fsf_req->qtcb->bottom.config;
+ bottom = &qtcb->bottom.config;
if (bottom->max_qtcb_size < sizeof(struct fsf_qtcb)) {
ZFCP_LOG_NORMAL("bug: Maximum QTCB size (%d bytes) "
"allowed by the adapter %s "
@@ -2155,12 +2152,10 @@ zfcp_fsf_exchange_config_data_handler(struct zfcp_fsf_req *fsf_req)
if (zfcp_fsf_exchange_config_evaluate(fsf_req, 0))
return -EIO;
- ZFCP_LOG_INFO("Local link to adapter %s is down\n",
- zfcp_get_busid_by_adapter(adapter));
- atomic_set_mask(ZFCP_STATUS_ADAPTER_XCONFIG_OK |
- ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED,
- &adapter->status);
- zfcp_erp_adapter_failed(adapter);
+ atomic_set_mask(ZFCP_STATUS_ADAPTER_XCONFIG_OK, &adapter->status);
+
+ zfcp_fsf_link_down_info_eval(adapter,
+ &qtcb->header.fsf_status_qual.link_down_info);
break;
default:
debug_text_event(fsf_req->adapter->erp_dbf, 0, "fsf-stat-ng");
@@ -2174,11 +2169,13 @@ zfcp_fsf_exchange_config_data_handler(struct zfcp_fsf_req *fsf_req)
/**
* zfcp_fsf_exchange_port_data - request information about local port
+ * @erp_action: ERP action for the adapter for which port data is requested
* @adapter: for which port data is requested
* @data: response to exchange port data request
*/
int
-zfcp_fsf_exchange_port_data(struct zfcp_adapter *adapter,
+zfcp_fsf_exchange_port_data(struct zfcp_erp_action *erp_action,
+ struct zfcp_adapter *adapter,
struct fsf_qtcb_bottom_port *data)
{
volatile struct qdio_buffer_element *sbale;
@@ -2187,7 +2184,7 @@ zfcp_fsf_exchange_port_data(struct zfcp_adapter *adapter,
struct zfcp_fsf_req *fsf_req;
struct timer_list *timer;
- if(!(adapter->supported_features & FSF_FEATURE_HBAAPI_MANAGEMENT)){
+ if (!(adapter->adapter_features & FSF_FEATURE_HBAAPI_MANAGEMENT)) {
ZFCP_LOG_INFO("error: exchange port data "
"command not supported by adapter %s\n",
zfcp_get_busid_by_adapter(adapter));
@@ -2211,12 +2208,18 @@ zfcp_fsf_exchange_port_data(struct zfcp_adapter *adapter,
goto out;
}
+ if (erp_action) {
+ erp_action->fsf_req = fsf_req;
+ fsf_req->erp_action = erp_action;
+ }
+
+ if (data)
+ fsf_req->data = (unsigned long) data;
+
sbale = zfcp_qdio_sbale_req(fsf_req, fsf_req->sbal_curr, 0);
sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
- fsf_req->data.port_data = data;
-
init_timer(timer);
timer->function = zfcp_fsf_request_timeout_handler;
timer->data = (unsigned long) adapter;
@@ -2228,6 +2231,8 @@ zfcp_fsf_exchange_port_data(struct zfcp_adapter *adapter,
"command on the adapter %s\n",
zfcp_get_busid_by_adapter(adapter));
zfcp_fsf_req_free(fsf_req);
+ if (erp_action)
+ erp_action->fsf_req = NULL;
write_unlock_irqrestore(&adapter->request_queue.queue_lock,
lock_flags);
goto out;
@@ -2256,21 +2261,42 @@ zfcp_fsf_exchange_port_data(struct zfcp_adapter *adapter,
static void
zfcp_fsf_exchange_port_data_handler(struct zfcp_fsf_req *fsf_req)
{
- struct fsf_qtcb_bottom_port *bottom;
- struct fsf_qtcb_bottom_port *data = fsf_req->data.port_data;
+ struct zfcp_adapter *adapter = fsf_req->adapter;
+ struct Scsi_Host *shost = adapter->scsi_host;
+ struct fsf_qtcb *qtcb = fsf_req->qtcb;
+ struct fsf_qtcb_bottom_port *bottom, *data;
if (fsf_req->status & ZFCP_STATUS_FSFREQ_ERROR)
return;
- switch (fsf_req->qtcb->header.fsf_status) {
+ switch (qtcb->header.fsf_status) {
case FSF_GOOD:
- bottom = &fsf_req->qtcb->bottom.port;
- memcpy(data, bottom, sizeof(*data));
+ atomic_set_mask(ZFCP_STATUS_ADAPTER_XPORT_OK, &adapter->status);
+
+ bottom = &qtcb->bottom.port;
+ data = (struct fsf_qtcb_bottom_port*) fsf_req->data;
+ if (data)
+ memcpy(data, bottom, sizeof(struct fsf_qtcb_bottom_port));
+ if (adapter->connection_features & FSF_FEATURE_NPIV_MODE) {
+ adapter->physical_wwpn = bottom->wwpn;
+ adapter->physical_s_id = bottom->fc_port_id;
+ } else {
+ adapter->physical_wwpn = fc_host_port_name(shost);
+ adapter->physical_s_id = fc_host_port_id(shost);
+ }
+ fc_host_maxframe_size(shost) = bottom->maximum_frame_size;
+ break;
+
+ case FSF_EXCHANGE_CONFIG_DATA_INCOMPLETE:
+ atomic_set_mask(ZFCP_STATUS_ADAPTER_XPORT_OK, &adapter->status);
+
+ zfcp_fsf_link_down_info_eval(adapter,
+ &qtcb->header.fsf_status_qual.link_down_info);
break;
default:
- debug_text_event(fsf_req->adapter->erp_dbf, 0, "xchg-port-ng");
- debug_event(fsf_req->adapter->erp_dbf, 0,
+ debug_text_event(adapter->erp_dbf, 0, "xchg-port-ng");
+ debug_event(adapter->erp_dbf, 0,
&fsf_req->qtcb->header.fsf_status, sizeof(u32));
}
}
@@ -2312,7 +2338,7 @@ zfcp_fsf_open_port(struct zfcp_erp_action *erp_action)
erp_action->fsf_req->qtcb->bottom.support.d_id = erp_action->port->d_id;
atomic_set_mask(ZFCP_STATUS_COMMON_OPENING, &erp_action->port->status);
- erp_action->fsf_req->data.open_port.port = erp_action->port;
+ erp_action->fsf_req->data = (unsigned long) erp_action->port;
erp_action->fsf_req->erp_action = erp_action;
/* start QDIO request for this FSF request */
@@ -2353,7 +2379,7 @@ zfcp_fsf_open_port_handler(struct zfcp_fsf_req *fsf_req)
struct fsf_qtcb_header *header;
u16 subtable, rule, counter;
- port = fsf_req->data.open_port.port;
+ port = (struct zfcp_port *) fsf_req->data;
header = &fsf_req->qtcb->header;
if (fsf_req->status & ZFCP_STATUS_FSFREQ_ERROR) {
@@ -2566,7 +2592,7 @@ zfcp_fsf_close_port(struct zfcp_erp_action *erp_action)
sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
atomic_set_mask(ZFCP_STATUS_COMMON_CLOSING, &erp_action->port->status);
- erp_action->fsf_req->data.close_port.port = erp_action->port;
+ erp_action->fsf_req->data = (unsigned long) erp_action->port;
erp_action->fsf_req->erp_action = erp_action;
erp_action->fsf_req->qtcb->header.port_handle =
erp_action->port->handle;
@@ -2606,7 +2632,7 @@ zfcp_fsf_close_port_handler(struct zfcp_fsf_req *fsf_req)
int retval = -EINVAL;
struct zfcp_port *port;
- port = fsf_req->data.close_port.port;
+ port = (struct zfcp_port *) fsf_req->data;
if (fsf_req->status & ZFCP_STATUS_FSFREQ_ERROR) {
/* don't change port status in our bookkeeping */
@@ -2703,8 +2729,8 @@ zfcp_fsf_close_physical_port(struct zfcp_erp_action *erp_action)
atomic_set_mask(ZFCP_STATUS_PORT_PHYS_CLOSING,
&erp_action->port->status);
/* save a pointer to this port */
- erp_action->fsf_req->data.close_physical_port.port = erp_action->port;
- /* port to be closeed */
+ erp_action->fsf_req->data = (unsigned long) erp_action->port;
+ /* port to be closed */
erp_action->fsf_req->qtcb->header.port_handle =
erp_action->port->handle;
erp_action->fsf_req->erp_action = erp_action;
@@ -2747,7 +2773,7 @@ zfcp_fsf_close_physical_port_handler(struct zfcp_fsf_req *fsf_req)
struct fsf_qtcb_header *header;
u16 subtable, rule, counter;
- port = fsf_req->data.close_physical_port.port;
+ port = (struct zfcp_port *) fsf_req->data;
header = &fsf_req->qtcb->header;
if (fsf_req->status & ZFCP_STATUS_FSFREQ_ERROR) {
@@ -2908,10 +2934,11 @@ zfcp_fsf_open_unit(struct zfcp_erp_action *erp_action)
erp_action->port->handle;
erp_action->fsf_req->qtcb->bottom.support.fcp_lun =
erp_action->unit->fcp_lun;
+ if (!(erp_action->adapter->connection_features & FSF_FEATURE_NPIV_MODE))
erp_action->fsf_req->qtcb->bottom.support.option =
FSF_OPEN_LUN_SUPPRESS_BOXING;
atomic_set_mask(ZFCP_STATUS_COMMON_OPENING, &erp_action->unit->status);
- erp_action->fsf_req->data.open_unit.unit = erp_action->unit;
+ erp_action->fsf_req->data = (unsigned long) erp_action->unit;
erp_action->fsf_req->erp_action = erp_action;
/* start QDIO request for this FSF request */
@@ -2955,9 +2982,9 @@ zfcp_fsf_open_unit_handler(struct zfcp_fsf_req *fsf_req)
struct fsf_qtcb_bottom_support *bottom;
struct fsf_queue_designator *queue_designator;
u16 subtable, rule, counter;
- u32 allowed, exclusive, readwrite;
+ int exclusive, readwrite;
- unit = fsf_req->data.open_unit.unit;
+ unit = (struct zfcp_unit *) fsf_req->data;
if (fsf_req->status & ZFCP_STATUS_FSFREQ_ERROR) {
/* don't change unit status in our bookkeeping */
@@ -2969,10 +2996,6 @@ zfcp_fsf_open_unit_handler(struct zfcp_fsf_req *fsf_req)
bottom = &fsf_req->qtcb->bottom.support;
queue_designator = &header->fsf_status_qual.fsf_queue_designator;
- allowed = bottom->lun_access_info & FSF_UNIT_ACCESS_OPEN_LUN_ALLOWED;
- exclusive = bottom->lun_access_info & FSF_UNIT_ACCESS_EXCLUSIVE;
- readwrite = bottom->lun_access_info & FSF_UNIT_ACCESS_OUTBOUND_TRANSFER;
-
atomic_clear_mask(ZFCP_STATUS_COMMON_ACCESS_DENIED |
ZFCP_STATUS_UNIT_SHARED |
ZFCP_STATUS_UNIT_READONLY,
@@ -3146,10 +3169,15 @@ zfcp_fsf_open_unit_handler(struct zfcp_fsf_req *fsf_req)
unit->handle);
/* mark unit as open */
atomic_set_mask(ZFCP_STATUS_COMMON_OPEN, &unit->status);
- atomic_clear_mask(ZFCP_STATUS_COMMON_ACCESS_DENIED |
- ZFCP_STATUS_COMMON_ACCESS_BOXED,
- &unit->status);
- if (adapter->supported_features & FSF_FEATURE_LUN_SHARING){
+
+ if (!(adapter->connection_features & FSF_FEATURE_NPIV_MODE) &&
+ (adapter->adapter_features & FSF_FEATURE_LUN_SHARING) &&
+ (adapter->ccw_device->id.dev_model != ZFCP_DEVICE_MODEL_PRIV)) {
+ exclusive = (bottom->lun_access_info &
+ FSF_UNIT_ACCESS_EXCLUSIVE);
+ readwrite = (bottom->lun_access_info &
+ FSF_UNIT_ACCESS_OUTBOUND_TRANSFER);
+
if (!exclusive)
atomic_set_mask(ZFCP_STATUS_UNIT_SHARED,
&unit->status);
@@ -3242,7 +3270,7 @@ zfcp_fsf_close_unit(struct zfcp_erp_action *erp_action)
erp_action->port->handle;
erp_action->fsf_req->qtcb->header.lun_handle = erp_action->unit->handle;
atomic_set_mask(ZFCP_STATUS_COMMON_CLOSING, &erp_action->unit->status);
- erp_action->fsf_req->data.close_unit.unit = erp_action->unit;
+ erp_action->fsf_req->data = (unsigned long) erp_action->unit;
erp_action->fsf_req->erp_action = erp_action;
/* start QDIO request for this FSF request */
@@ -3281,7 +3309,7 @@ zfcp_fsf_close_unit_handler(struct zfcp_fsf_req *fsf_req)
int retval = -EINVAL;
struct zfcp_unit *unit;
- unit = fsf_req->data.close_unit.unit; /* restore unit */
+ unit = (struct zfcp_unit *) fsf_req->data;
if (fsf_req->status & ZFCP_STATUS_FSFREQ_ERROR) {
/* don't change unit status in our bookkeeping */
@@ -3305,9 +3333,6 @@ zfcp_fsf_close_unit_handler(struct zfcp_fsf_req *fsf_req)
debug_text_event(fsf_req->adapter->erp_dbf, 1,
"fsf_s_phand_nv");
zfcp_erp_adapter_reopen(unit->port->adapter, 0);
- zfcp_cmd_dbf_event_fsf("porthinv", fsf_req,
- &fsf_req->qtcb->header.fsf_status_qual,
- sizeof (union fsf_status_qual));
fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
break;
@@ -3326,9 +3351,6 @@ zfcp_fsf_close_unit_handler(struct zfcp_fsf_req *fsf_req)
debug_text_event(fsf_req->adapter->erp_dbf, 1,
"fsf_s_lhand_nv");
zfcp_erp_port_reopen(unit->port, 0);
- zfcp_cmd_dbf_event_fsf("lunhinv", fsf_req,
- &fsf_req->qtcb->header.fsf_status_qual,
- sizeof (union fsf_status_qual));
fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
break;
@@ -3436,21 +3458,14 @@ zfcp_fsf_send_fcp_command_task(struct zfcp_adapter *adapter,
goto failed_req_create;
}
- /*
- * associate FSF request with SCSI request
- * (need this for look up on abort)
- */
- fsf_req->data.send_fcp_command_task.fsf_req = fsf_req;
- scsi_cmnd->host_scribble = (char *) &(fsf_req->data);
+ zfcp_unit_get(unit);
+ fsf_req->unit = unit;
- /*
- * associate SCSI command with FSF request
- * (need this for look up on normal command completion)
- */
- fsf_req->data.send_fcp_command_task.scsi_cmnd = scsi_cmnd;
- fsf_req->data.send_fcp_command_task.start_jiffies = jiffies;
- fsf_req->data.send_fcp_command_task.unit = unit;
- ZFCP_LOG_DEBUG("unit=%p, fcp_lun=0x%016Lx\n", unit, unit->fcp_lun);
+ /* associate FSF request with SCSI request (for look up on abort) */
+ scsi_cmnd->host_scribble = (char *) fsf_req;
+
+ /* associate SCSI command with FSF request */
+ fsf_req->data = (unsigned long) scsi_cmnd;
/* set handles of unit and its parent port in QTCB */
fsf_req->qtcb->header.lun_handle = unit->handle;
@@ -3584,6 +3599,7 @@ zfcp_fsf_send_fcp_command_task(struct zfcp_adapter *adapter,
send_failed:
no_fit:
failed_scsi_cmnd:
+ zfcp_unit_put(unit);
zfcp_fsf_req_free(fsf_req);
fsf_req = NULL;
scsi_cmnd->host_scribble = NULL;
@@ -3640,7 +3656,7 @@ zfcp_fsf_send_fcp_command_task_management(struct zfcp_adapter *adapter,
* hold a pointer to the unit being target of this
* task management request
*/
- fsf_req->data.send_fcp_command_task_management.unit = unit;
+ fsf_req->data = (unsigned long) unit;
/* set FSF related fields in QTCB */
fsf_req->qtcb->header.lun_handle = unit->handle;
@@ -3706,9 +3722,9 @@ zfcp_fsf_send_fcp_command_handler(struct zfcp_fsf_req *fsf_req)
header = &fsf_req->qtcb->header;
if (unlikely(fsf_req->status & ZFCP_STATUS_FSFREQ_TASK_MANAGEMENT))
- unit = fsf_req->data.send_fcp_command_task_management.unit;
+ unit = (struct zfcp_unit *) fsf_req->data;
else
- unit = fsf_req->data.send_fcp_command_task.unit;
+ unit = fsf_req->unit;
if (unlikely(fsf_req->status & ZFCP_STATUS_FSFREQ_ERROR)) {
/* go directly to calls of special handlers */
@@ -3765,10 +3781,6 @@ zfcp_fsf_send_fcp_command_handler(struct zfcp_fsf_req *fsf_req)
debug_text_event(fsf_req->adapter->erp_dbf, 1,
"fsf_s_hand_mis");
zfcp_erp_adapter_reopen(unit->port->adapter, 0);
- zfcp_cmd_dbf_event_fsf("handmism",
- fsf_req,
- &header->fsf_status_qual,
- sizeof (union fsf_status_qual));
fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
break;
@@ -3789,10 +3801,6 @@ zfcp_fsf_send_fcp_command_handler(struct zfcp_fsf_req *fsf_req)
debug_text_exception(fsf_req->adapter->erp_dbf, 0,
"fsf_s_class_nsup");
zfcp_erp_adapter_shutdown(unit->port->adapter, 0);
- zfcp_cmd_dbf_event_fsf("unsclass",
- fsf_req,
- &header->fsf_status_qual,
- sizeof (union fsf_status_qual));
fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
break;
@@ -3811,10 +3819,6 @@ zfcp_fsf_send_fcp_command_handler(struct zfcp_fsf_req *fsf_req)
debug_text_event(fsf_req->adapter->erp_dbf, 1,
"fsf_s_fcp_lun_nv");
zfcp_erp_port_reopen(unit->port, 0);
- zfcp_cmd_dbf_event_fsf("fluninv",
- fsf_req,
- &header->fsf_status_qual,
- sizeof (union fsf_status_qual));
fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
break;
@@ -3853,10 +3857,6 @@ zfcp_fsf_send_fcp_command_handler(struct zfcp_fsf_req *fsf_req)
debug_text_event(fsf_req->adapter->erp_dbf, 0,
"fsf_s_dir_ind_nv");
zfcp_erp_adapter_shutdown(unit->port->adapter, 0);
- zfcp_cmd_dbf_event_fsf("dirinv",
- fsf_req,
- &header->fsf_status_qual,
- sizeof (union fsf_status_qual));
fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
break;
@@ -3872,10 +3872,6 @@ zfcp_fsf_send_fcp_command_handler(struct zfcp_fsf_req *fsf_req)
debug_text_event(fsf_req->adapter->erp_dbf, 0,
"fsf_s_cmd_len_nv");
zfcp_erp_adapter_shutdown(unit->port->adapter, 0);
- zfcp_cmd_dbf_event_fsf("cleninv",
- fsf_req,
- &header->fsf_status_qual,
- sizeof (union fsf_status_qual));
fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
break;
@@ -3947,6 +3943,8 @@ zfcp_fsf_send_fcp_command_handler(struct zfcp_fsf_req *fsf_req)
zfcp_fsf_send_fcp_command_task_management_handler(fsf_req);
} else {
retval = zfcp_fsf_send_fcp_command_task_handler(fsf_req);
+ fsf_req->unit = NULL;
+ zfcp_unit_put(unit);
}
return retval;
}
@@ -3970,10 +3968,10 @@ zfcp_fsf_send_fcp_command_task_handler(struct zfcp_fsf_req *fsf_req)
u32 sns_len;
char *fcp_rsp_info = zfcp_get_fcp_rsp_info_ptr(fcp_rsp_iu);
unsigned long flags;
- struct zfcp_unit *unit = fsf_req->data.send_fcp_command_task.unit;
+ struct zfcp_unit *unit = fsf_req->unit;
read_lock_irqsave(&fsf_req->adapter->abort_lock, flags);
- scpnt = fsf_req->data.send_fcp_command_task.scsi_cmnd;
+ scpnt = (struct scsi_cmnd *) fsf_req->data;
if (unlikely(!scpnt)) {
ZFCP_LOG_DEBUG
("Command with fsf_req %p is not associated to "
@@ -4043,7 +4041,6 @@ zfcp_fsf_send_fcp_command_task_handler(struct zfcp_fsf_req *fsf_req)
ZFCP_HEX_DUMP(ZFCP_LOG_LEVEL_DEBUG,
(char *) &fsf_req->qtcb->
bottom.io.fcp_cmnd, FSF_FCP_CMND_SIZE);
- zfcp_cmd_dbf_event_fsf("clenmis", fsf_req, NULL, 0);
set_host_byte(&scpnt->result, DID_ERROR);
goto skip_fsfstatus;
case RSP_CODE_FIELD_INVALID:
@@ -4062,7 +4059,6 @@ zfcp_fsf_send_fcp_command_task_handler(struct zfcp_fsf_req *fsf_req)
(char *) &fsf_req->qtcb->
bottom.io.fcp_cmnd, FSF_FCP_CMND_SIZE);
set_host_byte(&scpnt->result, DID_ERROR);
- zfcp_cmd_dbf_event_fsf("codeinv", fsf_req, NULL, 0);
goto skip_fsfstatus;
case RSP_CODE_RO_MISMATCH:
/* hardware bug */
@@ -4079,7 +4075,6 @@ zfcp_fsf_send_fcp_command_task_handler(struct zfcp_fsf_req *fsf_req)
ZFCP_HEX_DUMP(ZFCP_LOG_LEVEL_DEBUG,
(char *) &fsf_req->qtcb->
bottom.io.fcp_cmnd, FSF_FCP_CMND_SIZE);
- zfcp_cmd_dbf_event_fsf("codemism", fsf_req, NULL, 0);
set_host_byte(&scpnt->result, DID_ERROR);
goto skip_fsfstatus;
default:
@@ -4096,7 +4091,6 @@ zfcp_fsf_send_fcp_command_task_handler(struct zfcp_fsf_req *fsf_req)
ZFCP_HEX_DUMP(ZFCP_LOG_LEVEL_DEBUG,
(char *) &fsf_req->qtcb->
bottom.io.fcp_cmnd, FSF_FCP_CMND_SIZE);
- zfcp_cmd_dbf_event_fsf("undeffcp", fsf_req, NULL, 0);
set_host_byte(&scpnt->result, DID_ERROR);
goto skip_fsfstatus;
}
@@ -4158,19 +4152,17 @@ zfcp_fsf_send_fcp_command_task_handler(struct zfcp_fsf_req *fsf_req)
skip_fsfstatus:
ZFCP_LOG_DEBUG("scpnt->result =0x%x\n", scpnt->result);
- zfcp_cmd_dbf_event_scsi("response", scpnt);
+ if (scpnt->result != 0)
+ zfcp_scsi_dbf_event_result("erro", 3, fsf_req->adapter, scpnt);
+ else if (scpnt->retries > 0)
+ zfcp_scsi_dbf_event_result("retr", 4, fsf_req->adapter, scpnt);
+ else
+ zfcp_scsi_dbf_event_result("norm", 6, fsf_req->adapter, scpnt);
/* cleanup pointer (need this especially for abort) */
scpnt->host_scribble = NULL;
- /*
- * NOTE:
- * according to the outcome of a discussion on linux-scsi we
- * don't need to grab the io_request_lock here since we use
- * the new eh
- */
/* always call back */
-
(scpnt->scsi_done) (scpnt);
/*
@@ -4198,8 +4190,7 @@ zfcp_fsf_send_fcp_command_task_management_handler(struct zfcp_fsf_req *fsf_req)
struct fcp_rsp_iu *fcp_rsp_iu = (struct fcp_rsp_iu *)
&(fsf_req->qtcb->bottom.io.fcp_rsp);
char *fcp_rsp_info = zfcp_get_fcp_rsp_info_ptr(fcp_rsp_iu);
- struct zfcp_unit *unit =
- fsf_req->data.send_fcp_command_task_management.unit;
+ struct zfcp_unit *unit = (struct zfcp_unit *) fsf_req->data;
del_timer(&fsf_req->adapter->scsi_er_timer);
if (fsf_req->status & ZFCP_STATUS_FSFREQ_ERROR) {
@@ -4276,7 +4267,7 @@ zfcp_fsf_control_file(struct zfcp_adapter *adapter,
int direction;
int retval = 0;
- if (!(adapter->supported_features & FSF_FEATURE_CFDC)) {
+ if (!(adapter->adapter_features & FSF_FEATURE_CFDC)) {
ZFCP_LOG_INFO("cfdc not supported (adapter %s)\n",
zfcp_get_busid_by_adapter(adapter));
retval = -EOPNOTSUPP;
@@ -4549,52 +4540,6 @@ skip_fsfstatus:
return retval;
}
-
-/*
- * function: zfcp_fsf_req_wait_and_cleanup
- *
- * purpose:
- *
- * FIXME(design): signal seems to be <0 !!!
- * returns: 0 - request completed (*status is valid), cleanup succ.
- * <0 - request completed (*status is valid), cleanup failed
- * >0 - signal which interrupted waiting (*status invalid),
- * request not completed, no cleanup
- *
- * *status is a copy of status of completed fsf_req
- */
-int
-zfcp_fsf_req_wait_and_cleanup(struct zfcp_fsf_req *fsf_req,
- int interruptible, u32 * status)
-{
- int retval = 0;
- int signal = 0;
-
- if (interruptible) {
- __wait_event_interruptible(fsf_req->completion_wq,
- fsf_req->status &
- ZFCP_STATUS_FSFREQ_COMPLETED,
- signal);
- if (signal) {
- ZFCP_LOG_DEBUG("Caught signal %i while waiting for the "
- "completion of the request at %p\n",
- signal, fsf_req);
- retval = signal;
- goto out;
- }
- } else {
- __wait_event(fsf_req->completion_wq,
- fsf_req->status & ZFCP_STATUS_FSFREQ_COMPLETED);
- }
-
- *status = fsf_req->status;
-
- /* cleanup request */
- zfcp_fsf_req_free(fsf_req);
- out:
- return retval;
-}
-
static inline int
zfcp_fsf_req_sbal_check(unsigned long *flags,
struct zfcp_qdio_queue *queue, int needed)
@@ -4610,15 +4555,16 @@ zfcp_fsf_req_sbal_check(unsigned long *flags,
* set qtcb pointer in fsf_req and initialize QTCB
*/
static inline void
-zfcp_fsf_req_qtcb_init(struct zfcp_fsf_req *fsf_req, u32 fsf_cmd)
+zfcp_fsf_req_qtcb_init(struct zfcp_fsf_req *fsf_req)
{
if (likely(fsf_req->qtcb != NULL)) {
+ fsf_req->qtcb->prefix.req_seq_no = fsf_req->adapter->fsf_req_seq_no;
fsf_req->qtcb->prefix.req_id = (unsigned long)fsf_req;
fsf_req->qtcb->prefix.ulp_info = ZFCP_ULP_INFO_VERSION;
- fsf_req->qtcb->prefix.qtcb_type = fsf_qtcb_type[fsf_cmd];
+ fsf_req->qtcb->prefix.qtcb_type = fsf_qtcb_type[fsf_req->fsf_command];
fsf_req->qtcb->prefix.qtcb_version = ZFCP_QTCB_VERSION;
fsf_req->qtcb->header.req_handle = (unsigned long)fsf_req;
- fsf_req->qtcb->header.fsf_command = fsf_cmd;
+ fsf_req->qtcb->header.fsf_command = fsf_req->fsf_command;
}
}
@@ -4686,7 +4632,10 @@ zfcp_fsf_req_create(struct zfcp_adapter *adapter, u32 fsf_cmd, int req_flags,
goto failed_fsf_req;
}
- zfcp_fsf_req_qtcb_init(fsf_req, fsf_cmd);
+ fsf_req->adapter = adapter;
+ fsf_req->fsf_command = fsf_cmd;
+
+ zfcp_fsf_req_qtcb_init(fsf_req);
/* initialize waitqueue which may be used to wait on
this request completion */
@@ -4708,8 +4657,10 @@ zfcp_fsf_req_create(struct zfcp_adapter *adapter, u32 fsf_cmd, int req_flags,
goto failed_sbals;
}
- fsf_req->adapter = adapter; /* pointer to "parent" adapter */
- fsf_req->fsf_command = fsf_cmd;
+ if (fsf_req->qtcb) {
+ fsf_req->seq_no = adapter->fsf_req_seq_no;
+ fsf_req->qtcb->prefix.req_seq_no = adapter->fsf_req_seq_no;
+ }
fsf_req->sbal_number = 1;
fsf_req->sbal_first = req_queue->free_index;
fsf_req->sbal_curr = req_queue->free_index;
@@ -4760,9 +4711,9 @@ zfcp_fsf_req_send(struct zfcp_fsf_req *fsf_req, struct timer_list *timer)
struct zfcp_adapter *adapter;
struct zfcp_qdio_queue *req_queue;
volatile struct qdio_buffer_element *sbale;
+ int inc_seq_no;
int new_distance_from_int;
unsigned long flags;
- int inc_seq_no = 1;
int retval = 0;
adapter = fsf_req->adapter;
@@ -4776,23 +4727,13 @@ zfcp_fsf_req_send(struct zfcp_fsf_req *fsf_req, struct timer_list *timer)
ZFCP_HEX_DUMP(ZFCP_LOG_LEVEL_TRACE, (char *) sbale[1].addr,
sbale[1].length);
- /* set sequence counter in QTCB */
- if (likely(fsf_req->qtcb)) {
- fsf_req->qtcb->prefix.req_seq_no = adapter->fsf_req_seq_no;
- fsf_req->seq_no = adapter->fsf_req_seq_no;
- ZFCP_LOG_TRACE("FSF request %p of adapter %s gets "
- "FSF sequence counter value of %i\n",
- fsf_req,
- zfcp_get_busid_by_adapter(adapter),
- fsf_req->qtcb->prefix.req_seq_no);
- } else
- inc_seq_no = 0;
-
/* put allocated FSF request at list tail */
spin_lock_irqsave(&adapter->fsf_req_list_lock, flags);
list_add_tail(&fsf_req->list, &adapter->fsf_req_list_head);
spin_unlock_irqrestore(&adapter->fsf_req_list_lock, flags);
+ inc_seq_no = (fsf_req->qtcb != NULL);
+
/* figure out expiration time of timeout and start timeout */
if (unlikely(timer)) {
timer->expires += jiffies;
@@ -4822,6 +4763,8 @@ zfcp_fsf_req_send(struct zfcp_fsf_req *fsf_req, struct timer_list *timer)
req_queue->free_index %= QDIO_MAX_BUFFERS_PER_Q; /* wrap if needed */
new_distance_from_int = zfcp_qdio_determine_pci(req_queue, fsf_req);
+ fsf_req->issued = get_clock();
+
retval = do_QDIO(adapter->ccw_device,
QDIO_FLAG_SYNC_OUTPUT,
0, fsf_req->sbal_first, fsf_req->sbal_number, NULL);
@@ -4860,15 +4803,11 @@ zfcp_fsf_req_send(struct zfcp_fsf_req *fsf_req, struct timer_list *timer)
* routines resulting in missing sequence counter values
* otherwise,
*/
+
/* Don't increase for unsolicited status */
- if (likely(inc_seq_no)) {
+ if (inc_seq_no)
adapter->fsf_req_seq_no++;
- ZFCP_LOG_TRACE
- ("FSF sequence counter value of adapter %s "
- "increased to %i\n",
- zfcp_get_busid_by_adapter(adapter),
- adapter->fsf_req_seq_no);
- }
+
/* count FSF requests pending */
atomic_inc(&adapter->fsf_reqs_active);
}
diff --git a/drivers/s390/scsi/zfcp_fsf.h b/drivers/s390/scsi/zfcp_fsf.h
index 07140dfda2a..48719f05595 100644
--- a/drivers/s390/scsi/zfcp_fsf.h
+++ b/drivers/s390/scsi/zfcp_fsf.h
@@ -116,6 +116,7 @@
#define FSF_INVALID_COMMAND_OPTION 0x000000E5
/* #define FSF_ERROR 0x000000FF */
+#define FSF_PROT_STATUS_QUAL_SIZE 16
#define FSF_STATUS_QUALIFIER_SIZE 16
/* FSF status qualifier, recommendations */
@@ -139,9 +140,18 @@
#define FSF_SQ_CFDC_SUBTABLE_LUN 0x0004
/* FSF status qualifier (most significant 4 bytes), local link down */
-#define FSF_PSQ_LINK_NOLIGHT 0x00000004
-#define FSF_PSQ_LINK_WRAPPLUG 0x00000008
-#define FSF_PSQ_LINK_NOFCP 0x00000010
+#define FSF_PSQ_LINK_NO_LIGHT 0x00000004
+#define FSF_PSQ_LINK_WRAP_PLUG 0x00000008
+#define FSF_PSQ_LINK_NO_FCP 0x00000010
+#define FSF_PSQ_LINK_FIRMWARE_UPDATE 0x00000020
+#define FSF_PSQ_LINK_INVALID_WWPN 0x00000100
+#define FSF_PSQ_LINK_NO_NPIV_SUPPORT 0x00000200
+#define FSF_PSQ_LINK_NO_FCP_RESOURCES 0x00000400
+#define FSF_PSQ_LINK_NO_FABRIC_RESOURCES 0x00000800
+#define FSF_PSQ_LINK_FABRIC_LOGIN_UNABLE 0x00001000
+#define FSF_PSQ_LINK_WWPN_ASSIGNMENT_CORRUPTED 0x00002000
+#define FSF_PSQ_LINK_MODE_TABLE_CURRUPTED 0x00004000
+#define FSF_PSQ_LINK_NO_WWPN_ASSIGNMENT 0x00008000
/* payload size in status read buffer */
#define FSF_STATUS_READ_PAYLOAD_SIZE 4032
@@ -154,15 +164,21 @@
#define FSF_STATUS_READ_INCOMING_ELS 0x00000002
#define FSF_STATUS_READ_SENSE_DATA_AVAIL 0x00000003
#define FSF_STATUS_READ_BIT_ERROR_THRESHOLD 0x00000004
-#define FSF_STATUS_READ_LINK_DOWN 0x00000005 /* FIXME: really? */
+#define FSF_STATUS_READ_LINK_DOWN 0x00000005
#define FSF_STATUS_READ_LINK_UP 0x00000006
#define FSF_STATUS_READ_CFDC_UPDATED 0x0000000A
#define FSF_STATUS_READ_CFDC_HARDENED 0x0000000B
+#define FSF_STATUS_READ_FEATURE_UPDATE_ALERT 0x0000000C
/* status subtypes in status read buffer */
#define FSF_STATUS_READ_SUB_CLOSE_PHYS_PORT 0x00000001
#define FSF_STATUS_READ_SUB_ERROR_PORT 0x00000002
+/* status subtypes for link down */
+#define FSF_STATUS_READ_SUB_NO_PHYSICAL_LINK 0x00000000
+#define FSF_STATUS_READ_SUB_FDISC_FAILED 0x00000001
+#define FSF_STATUS_READ_SUB_FIRMWARE_UPDATE 0x00000002
+
/* status subtypes for CFDC */
#define FSF_STATUS_READ_SUB_CFDC_HARDENED_ON_SE 0x00000002
#define FSF_STATUS_READ_SUB_CFDC_HARDENED_ON_SE2 0x0000000F
@@ -193,11 +209,15 @@
#define FSF_QTCB_LOG_SIZE 1024
/* channel features */
-#define FSF_FEATURE_QTCB_SUPPRESSION 0x00000001
#define FSF_FEATURE_CFDC 0x00000002
#define FSF_FEATURE_LUN_SHARING 0x00000004
#define FSF_FEATURE_HBAAPI_MANAGEMENT 0x00000010
#define FSF_FEATURE_ELS_CT_CHAINED_SBALS 0x00000020
+#define FSF_FEATURE_UPDATE_ALERT 0x00000100
+
+/* host connection features */
+#define FSF_FEATURE_NPIV_MODE 0x00000001
+#define FSF_FEATURE_VM_ASSIGNED_WWPN 0x00000002
/* option */
#define FSF_OPEN_LUN_SUPPRESS_BOXING 0x00000001
@@ -305,15 +325,23 @@ struct fsf_qual_sequence_error {
u32 res1[3];
} __attribute__ ((packed));
-struct fsf_qual_locallink_error {
- u32 code;
- u32 res1[3];
+struct fsf_link_down_info {
+ u32 error_code;
+ u32 res1;
+ u8 res2[2];
+ u8 primary_status;
+ u8 ioerr_code;
+ u8 action_code;
+ u8 reason_code;
+ u8 explanation_code;
+ u8 vendor_specific_code;
} __attribute__ ((packed));
union fsf_prot_status_qual {
+ u64 doubleword[FSF_PROT_STATUS_QUAL_SIZE / sizeof(u64)];
struct fsf_qual_version_error version_error;
struct fsf_qual_sequence_error sequence_error;
- struct fsf_qual_locallink_error locallink_error;
+ struct fsf_link_down_info link_down_info;
} __attribute__ ((packed));
struct fsf_qtcb_prefix {
@@ -331,7 +359,9 @@ union fsf_status_qual {
u8 byte[FSF_STATUS_QUALIFIER_SIZE];
u16 halfword[FSF_STATUS_QUALIFIER_SIZE / sizeof (u16)];
u32 word[FSF_STATUS_QUALIFIER_SIZE / sizeof (u32)];
+ u64 doubleword[FSF_STATUS_QUALIFIER_SIZE / sizeof(u64)];
struct fsf_queue_designator fsf_queue_designator;
+ struct fsf_link_down_info link_down_info;
} __attribute__ ((packed));
struct fsf_qtcb_header {
@@ -406,8 +436,8 @@ struct fsf_qtcb_bottom_config {
u32 low_qtcb_version;
u32 max_qtcb_size;
u32 max_data_transfer_size;
- u32 supported_features;
- u8 res1[4];
+ u32 adapter_features;
+ u32 connection_features;
u32 fc_topology;
u32 fc_link_speed;
u32 adapter_type;
@@ -425,7 +455,7 @@ struct fsf_qtcb_bottom_config {
} __attribute__ ((packed));
struct fsf_qtcb_bottom_port {
- u8 res1[8];
+ u64 wwpn;
u32 fc_port_id;
u32 port_type;
u32 port_state;
diff --git a/drivers/s390/scsi/zfcp_qdio.c b/drivers/s390/scsi/zfcp_qdio.c
index 24e16ec331d..d719f66a29a 100644
--- a/drivers/s390/scsi/zfcp_qdio.c
+++ b/drivers/s390/scsi/zfcp_qdio.c
@@ -54,8 +54,7 @@ static inline int zfcp_qdio_sbals_from_buffer
static qdio_handler_t zfcp_qdio_request_handler;
static qdio_handler_t zfcp_qdio_response_handler;
static int zfcp_qdio_handler_error_check(struct zfcp_adapter *,
- unsigned int,
- unsigned int, unsigned int);
+ unsigned int, unsigned int, unsigned int, int, int);
#define ZFCP_LOG_AREA ZFCP_LOG_AREA_QDIO
@@ -214,22 +213,12 @@ zfcp_qdio_allocate(struct zfcp_adapter *adapter)
*
*/
static inline int
-zfcp_qdio_handler_error_check(struct zfcp_adapter *adapter,
- unsigned int status,
- unsigned int qdio_error, unsigned int siga_error)
+zfcp_qdio_handler_error_check(struct zfcp_adapter *adapter, unsigned int status,
+ unsigned int qdio_error, unsigned int siga_error,
+ int first_element, int elements_processed)
{
int retval = 0;
- if (ZFCP_LOG_CHECK(ZFCP_LOG_LEVEL_TRACE)) {
- if (status & QDIO_STATUS_INBOUND_INT) {
- ZFCP_LOG_TRACE("status is"
- " QDIO_STATUS_INBOUND_INT \n");
- }
- if (status & QDIO_STATUS_OUTBOUND_INT) {
- ZFCP_LOG_TRACE("status is"
- " QDIO_STATUS_OUTBOUND_INT \n");
- }
- }
if (unlikely(status & QDIO_STATUS_LOOK_FOR_ERROR)) {
retval = -EIO;
@@ -237,9 +226,10 @@ zfcp_qdio_handler_error_check(struct zfcp_adapter *adapter,
"qdio_error=0x%x, siga_error=0x%x)\n",
status, qdio_error, siga_error);
- /* Restarting IO on the failed adapter from scratch */
- debug_text_event(adapter->erp_dbf, 1, "qdio_err");
+ zfcp_hba_dbf_event_qdio(adapter, status, qdio_error, siga_error,
+ first_element, elements_processed);
/*
+ * Restarting IO on the failed adapter from scratch.
* Since we have been using this adapter, it is save to assume
* that it is not failed but recoverable. The card seems to
* report link-up events by self-initiated queue shutdown.
@@ -282,7 +272,8 @@ zfcp_qdio_request_handler(struct ccw_device *ccw_device,
first_element, elements_processed);
if (unlikely(zfcp_qdio_handler_error_check(adapter, status, qdio_error,
- siga_error)))
+ siga_error, first_element,
+ elements_processed)))
goto out;
/*
* we stored address of struct zfcp_adapter data structure
@@ -334,7 +325,8 @@ zfcp_qdio_response_handler(struct ccw_device *ccw_device,
queue = &adapter->response_queue;
if (unlikely(zfcp_qdio_handler_error_check(adapter, status, qdio_error,
- siga_error)))
+ siga_error, first_element,
+ elements_processed)))
goto out;
/*
diff --git a/drivers/s390/scsi/zfcp_scsi.c b/drivers/s390/scsi/zfcp_scsi.c
index 31a76065cf2..3dcd1bfba3b 100644
--- a/drivers/s390/scsi/zfcp_scsi.c
+++ b/drivers/s390/scsi/zfcp_scsi.c
@@ -44,7 +44,8 @@ static int zfcp_scsi_eh_abort_handler(struct scsi_cmnd *);
static int zfcp_scsi_eh_device_reset_handler(struct scsi_cmnd *);
static int zfcp_scsi_eh_bus_reset_handler(struct scsi_cmnd *);
static int zfcp_scsi_eh_host_reset_handler(struct scsi_cmnd *);
-static int zfcp_task_management_function(struct zfcp_unit *, u8);
+static int zfcp_task_management_function(struct zfcp_unit *, u8,
+ struct scsi_cmnd *);
static struct zfcp_unit *zfcp_unit_lookup(struct zfcp_adapter *, int, scsi_id_t,
scsi_lun_t);
@@ -242,7 +243,10 @@ static void
zfcp_scsi_command_fail(struct scsi_cmnd *scpnt, int result)
{
set_host_byte(&scpnt->result, result);
- zfcp_cmd_dbf_event_scsi("failing", scpnt);
+ if ((scpnt->device != NULL) && (scpnt->device->host != NULL))
+ zfcp_scsi_dbf_event_result("fail", 4,
+ (struct zfcp_adapter*) scpnt->device->host->hostdata[0],
+ scpnt);
/* return directly */
scpnt->scsi_done(scpnt);
}
@@ -414,67 +418,38 @@ zfcp_port_lookup(struct zfcp_adapter *adapter, int channel, scsi_id_t id)
return (struct zfcp_port *) NULL;
}
-/*
- * function: zfcp_scsi_eh_abort_handler
- *
- * purpose: tries to abort the specified (timed out) SCSI command
- *
- * note: We do not need to care for a SCSI command which completes
- * normally but late during this abort routine runs.
- * We are allowed to return late commands to the SCSI stack.
- * It tracks the state of commands and will handle late commands.
- * (Usually, the normal completion of late commands is ignored with
- * respect to the running abort operation. Grep for 'done_late'
- * in the SCSI stacks sources.)
+/**
+ * zfcp_scsi_eh_abort_handler - abort the specified SCSI command
+ * @scpnt: pointer to scsi_cmnd to be aborted
+ * Return: SUCCESS - command has been aborted and cleaned up in internal
+ * bookkeeping, SCSI stack won't be called for aborted command
+ * FAILED - otherwise
*
- * returns: SUCCESS - command has been aborted and cleaned up in internal
- * bookkeeping,
- * SCSI stack won't be called for aborted command
- * FAILED - otherwise
+ * We do not need to care for a SCSI command which completes normally
+ * but late during this abort routine runs. We are allowed to return
+ * late commands to the SCSI stack. It tracks the state of commands and
+ * will handle late commands. (Usually, the normal completion of late
+ * commands is ignored with respect to the running abort operation.)
*/
int
-__zfcp_scsi_eh_abort_handler(struct scsi_cmnd *scpnt)
+zfcp_scsi_eh_abort_handler(struct scsi_cmnd *scpnt)
{
+ struct Scsi_Host *scsi_host;
+ struct zfcp_adapter *adapter;
+ struct zfcp_unit *unit;
int retval = SUCCESS;
- struct zfcp_fsf_req *new_fsf_req, *old_fsf_req;
- struct zfcp_adapter *adapter = (struct zfcp_adapter *) scpnt->device->host->hostdata[0];
- struct zfcp_unit *unit = (struct zfcp_unit *) scpnt->device->hostdata;
- struct zfcp_port *port = unit->port;
- struct Scsi_Host *scsi_host = scpnt->device->host;
- union zfcp_req_data *req_data = NULL;
+ struct zfcp_fsf_req *new_fsf_req = NULL;
+ struct zfcp_fsf_req *old_fsf_req;
unsigned long flags;
- u32 status = 0;
-
- /* the components of a abort_dbf record (fixed size record) */
- u64 dbf_scsi_cmnd = (unsigned long) scpnt;
- char dbf_opcode[ZFCP_ABORT_DBF_LENGTH];
- wwn_t dbf_wwn = port->wwpn;
- fcp_lun_t dbf_fcp_lun = unit->fcp_lun;
- u64 dbf_retries = scpnt->retries;
- u64 dbf_allowed = scpnt->allowed;
- u64 dbf_timeout = 0;
- u64 dbf_fsf_req = 0;
- u64 dbf_fsf_status = 0;
- u64 dbf_fsf_qual[2] = { 0, 0 };
- char dbf_result[ZFCP_ABORT_DBF_LENGTH] = "##undef";
-
- memset(dbf_opcode, 0, ZFCP_ABORT_DBF_LENGTH);
- memcpy(dbf_opcode,
- scpnt->cmnd,
- min(scpnt->cmd_len, (unsigned char) ZFCP_ABORT_DBF_LENGTH));
+
+ scsi_host = scpnt->device->host;
+ adapter = (struct zfcp_adapter *) scsi_host->hostdata[0];
+ unit = (struct zfcp_unit *) scpnt->device->hostdata;
ZFCP_LOG_INFO("aborting scsi_cmnd=%p on adapter %s\n",
scpnt, zfcp_get_busid_by_adapter(adapter));
- spin_unlock_irq(scsi_host->host_lock);
-
- /*
- * Race condition between normal (late) completion and abort has
- * to be avoided.
- * The entirity of all accesses to scsi_req have to be atomic.
- * scsi_req is usually part of the fsf_req and thus we block the
- * release of fsf_req as long as we need to access scsi_req.
- */
+ /* avoid race condition between late normal completion and abort */
write_lock_irqsave(&adapter->abort_lock, flags);
/*
@@ -484,144 +459,47 @@ __zfcp_scsi_eh_abort_handler(struct scsi_cmnd *scpnt)
* this routine returns. (scpnt is parameter passed to this routine
* and must not disappear during abort even on late completion.)
*/
- req_data = (union zfcp_req_data *) scpnt->host_scribble;
- /* DEBUG */
- ZFCP_LOG_DEBUG("req_data=%p\n", req_data);
- if (!req_data) {
- ZFCP_LOG_DEBUG("late command completion overtook abort\n");
- /*
- * That's it.
- * Do not initiate abort but return SUCCESS.
- */
- write_unlock_irqrestore(&adapter->abort_lock, flags);
- retval = SUCCESS;
- strncpy(dbf_result, "##late1", ZFCP_ABORT_DBF_LENGTH);
- goto out;
- }
-
- /* Figure out which fsf_req needs to be aborted. */
- old_fsf_req = req_data->send_fcp_command_task.fsf_req;
-
- dbf_fsf_req = (unsigned long) old_fsf_req;
- dbf_timeout =
- (jiffies - req_data->send_fcp_command_task.start_jiffies) / HZ;
-
- ZFCP_LOG_DEBUG("old_fsf_req=%p\n", old_fsf_req);
+ old_fsf_req = (struct zfcp_fsf_req *) scpnt->host_scribble;
if (!old_fsf_req) {
write_unlock_irqrestore(&adapter->abort_lock, flags);
- ZFCP_LOG_NORMAL("bug: no old fsf request found\n");
- ZFCP_LOG_NORMAL("req_data:\n");
- ZFCP_HEX_DUMP(ZFCP_LOG_LEVEL_NORMAL,
- (char *) req_data, sizeof (union zfcp_req_data));
- ZFCP_LOG_NORMAL("scsi_cmnd:\n");
- ZFCP_HEX_DUMP(ZFCP_LOG_LEVEL_NORMAL,
- (char *) scpnt, sizeof (struct scsi_cmnd));
- retval = FAILED;
- strncpy(dbf_result, "##bug:r", ZFCP_ABORT_DBF_LENGTH);
+ zfcp_scsi_dbf_event_abort("lte1", adapter, scpnt, new_fsf_req);
+ retval = SUCCESS;
goto out;
}
- old_fsf_req->data.send_fcp_command_task.scsi_cmnd = NULL;
- /* mark old request as being aborted */
+ old_fsf_req->data = 0;
old_fsf_req->status |= ZFCP_STATUS_FSFREQ_ABORTING;
- /*
- * We have to collect all information (e.g. unit) needed by
- * zfcp_fsf_abort_fcp_command before calling that routine
- * since that routine is not allowed to access
- * fsf_req which it is going to abort.
- * This is because of we need to release fsf_req_list_lock
- * before calling zfcp_fsf_abort_fcp_command.
- * Since this lock will not be held, fsf_req may complete
- * late and may be released meanwhile.
- */
- ZFCP_LOG_DEBUG("unit 0x%016Lx (%p)\n", unit->fcp_lun, unit);
- /*
- * We block (call schedule)
- * That's why we must release the lock and enable the
- * interrupts before.
- * On the other hand we do not need the lock anymore since
- * all critical accesses to scsi_req are done.
- */
+ /* don't access old_fsf_req after releasing the abort_lock */
write_unlock_irqrestore(&adapter->abort_lock, flags);
/* call FSF routine which does the abort */
new_fsf_req = zfcp_fsf_abort_fcp_command((unsigned long) old_fsf_req,
adapter, unit, 0);
- ZFCP_LOG_DEBUG("new_fsf_req=%p\n", new_fsf_req);
if (!new_fsf_req) {
+ ZFCP_LOG_INFO("error: initiation of Abort FCP Cmnd failed\n");
retval = FAILED;
- ZFCP_LOG_NORMAL("error: initiation of Abort FCP Cmnd "
- "failed\n");
- strncpy(dbf_result, "##nores", ZFCP_ABORT_DBF_LENGTH);
goto out;
}
/* wait for completion of abort */
- ZFCP_LOG_DEBUG("waiting for cleanup...\n");
-#if 1
- /*
- * FIXME:
- * copying zfcp_fsf_req_wait_and_cleanup code is not really nice
- */
__wait_event(new_fsf_req->completion_wq,
new_fsf_req->status & ZFCP_STATUS_FSFREQ_COMPLETED);
- status = new_fsf_req->status;
- dbf_fsf_status = new_fsf_req->qtcb->header.fsf_status;
- /*
- * Ralphs special debug load provides timestamps in the FSF
- * status qualifier. This might be specified later if being
- * useful for debugging aborts.
- */
- dbf_fsf_qual[0] =
- *(u64 *) & new_fsf_req->qtcb->header.fsf_status_qual.word[0];
- dbf_fsf_qual[1] =
- *(u64 *) & new_fsf_req->qtcb->header.fsf_status_qual.word[2];
- zfcp_fsf_req_free(new_fsf_req);
-#else
- retval = zfcp_fsf_req_wait_and_cleanup(new_fsf_req,
- ZFCP_UNINTERRUPTIBLE, &status);
-#endif
- ZFCP_LOG_DEBUG("Waiting for cleanup complete, status=0x%x\n", status);
+
/* status should be valid since signals were not permitted */
- if (status & ZFCP_STATUS_FSFREQ_ABORTSUCCEEDED) {
+ if (new_fsf_req->status & ZFCP_STATUS_FSFREQ_ABORTSUCCEEDED) {
+ zfcp_scsi_dbf_event_abort("okay", adapter, scpnt, new_fsf_req);
retval = SUCCESS;
- strncpy(dbf_result, "##succ", ZFCP_ABORT_DBF_LENGTH);
- } else if (status & ZFCP_STATUS_FSFREQ_ABORTNOTNEEDED) {
+ } else if (new_fsf_req->status & ZFCP_STATUS_FSFREQ_ABORTNOTNEEDED) {
+ zfcp_scsi_dbf_event_abort("lte2", adapter, scpnt, new_fsf_req);
retval = SUCCESS;
- strncpy(dbf_result, "##late2", ZFCP_ABORT_DBF_LENGTH);
} else {
+ zfcp_scsi_dbf_event_abort("fail", adapter, scpnt, new_fsf_req);
retval = FAILED;
- strncpy(dbf_result, "##fail", ZFCP_ABORT_DBF_LENGTH);
}
-
+ zfcp_fsf_req_free(new_fsf_req);
out:
- debug_event(adapter->abort_dbf, 1, &dbf_scsi_cmnd, sizeof (u64));
- debug_event(adapter->abort_dbf, 1, &dbf_opcode, ZFCP_ABORT_DBF_LENGTH);
- debug_event(adapter->abort_dbf, 1, &dbf_wwn, sizeof (wwn_t));
- debug_event(adapter->abort_dbf, 1, &dbf_fcp_lun, sizeof (fcp_lun_t));
- debug_event(adapter->abort_dbf, 1, &dbf_retries, sizeof (u64));
- debug_event(adapter->abort_dbf, 1, &dbf_allowed, sizeof (u64));
- debug_event(adapter->abort_dbf, 1, &dbf_timeout, sizeof (u64));
- debug_event(adapter->abort_dbf, 1, &dbf_fsf_req, sizeof (u64));
- debug_event(adapter->abort_dbf, 1, &dbf_fsf_status, sizeof (u64));
- debug_event(adapter->abort_dbf, 1, &dbf_fsf_qual[0], sizeof (u64));
- debug_event(adapter->abort_dbf, 1, &dbf_fsf_qual[1], sizeof (u64));
- debug_text_event(adapter->abort_dbf, 1, dbf_result);
-
- spin_lock_irq(scsi_host->host_lock);
return retval;
}
-int
-zfcp_scsi_eh_abort_handler(struct scsi_cmnd *scpnt)
-{
- int rc;
- struct Scsi_Host *scsi_host = scpnt->device->host;
- spin_lock_irq(scsi_host->host_lock);
- rc = __zfcp_scsi_eh_abort_handler(scpnt);
- spin_unlock_irq(scsi_host->host_lock);
- return rc;
-}
-
/*
* function: zfcp_scsi_eh_device_reset_handler
*
@@ -651,8 +529,9 @@ zfcp_scsi_eh_device_reset_handler(struct scsi_cmnd *scpnt)
*/
if (!atomic_test_mask(ZFCP_STATUS_UNIT_NOTSUPPUNITRESET,
&unit->status)) {
- retval =
- zfcp_task_management_function(unit, FCP_LOGICAL_UNIT_RESET);
+ retval = zfcp_task_management_function(unit,
+ FCP_LOGICAL_UNIT_RESET,
+ scpnt);
if (retval) {
ZFCP_LOG_DEBUG("unit reset failed (unit=%p)\n", unit);
if (retval == -ENOTSUPP)
@@ -668,7 +547,7 @@ zfcp_scsi_eh_device_reset_handler(struct scsi_cmnd *scpnt)
goto out;
}
}
- retval = zfcp_task_management_function(unit, FCP_TARGET_RESET);
+ retval = zfcp_task_management_function(unit, FCP_TARGET_RESET, scpnt);
if (retval) {
ZFCP_LOG_DEBUG("target reset failed (unit=%p)\n", unit);
retval = FAILED;
@@ -681,12 +560,12 @@ zfcp_scsi_eh_device_reset_handler(struct scsi_cmnd *scpnt)
}
static int
-zfcp_task_management_function(struct zfcp_unit *unit, u8 tm_flags)
+zfcp_task_management_function(struct zfcp_unit *unit, u8 tm_flags,
+ struct scsi_cmnd *scpnt)
{
struct zfcp_adapter *adapter = unit->port->adapter;
- int retval;
- int status;
struct zfcp_fsf_req *fsf_req;
+ int retval = 0;
/* issue task management function */
fsf_req = zfcp_fsf_send_fcp_command_task_management
@@ -696,70 +575,63 @@ zfcp_task_management_function(struct zfcp_unit *unit, u8 tm_flags)
"failed for unit 0x%016Lx on port 0x%016Lx on "
"adapter %s\n", unit->fcp_lun, unit->port->wwpn,
zfcp_get_busid_by_adapter(adapter));
+ zfcp_scsi_dbf_event_devreset("nres", tm_flags, unit, scpnt);
retval = -ENOMEM;
goto out;
}
- retval = zfcp_fsf_req_wait_and_cleanup(fsf_req,
- ZFCP_UNINTERRUPTIBLE, &status);
+ __wait_event(fsf_req->completion_wq,
+ fsf_req->status & ZFCP_STATUS_FSFREQ_COMPLETED);
+
/*
* check completion status of task management function
- * (status should always be valid since no signals permitted)
*/
- if (status & ZFCP_STATUS_FSFREQ_TMFUNCFAILED)
+ if (fsf_req->status & ZFCP_STATUS_FSFREQ_TMFUNCFAILED) {
+ zfcp_scsi_dbf_event_devreset("fail", tm_flags, unit, scpnt);
retval = -EIO;
- else if (status & ZFCP_STATUS_FSFREQ_TMFUNCNOTSUPP)
+ } else if (fsf_req->status & ZFCP_STATUS_FSFREQ_TMFUNCNOTSUPP) {
+ zfcp_scsi_dbf_event_devreset("nsup", tm_flags, unit, scpnt);
retval = -ENOTSUPP;
- else
- retval = 0;
+ } else
+ zfcp_scsi_dbf_event_devreset("okay", tm_flags, unit, scpnt);
+
+ zfcp_fsf_req_free(fsf_req);
out:
return retval;
}
-/*
- * function: zfcp_scsi_eh_bus_reset_handler
- *
- * purpose:
- *
- * returns:
+/**
+ * zfcp_scsi_eh_bus_reset_handler - reset bus (reopen adapter)
*/
int
zfcp_scsi_eh_bus_reset_handler(struct scsi_cmnd *scpnt)
{
- int retval = 0;
- struct zfcp_unit *unit;
+ struct zfcp_unit *unit = (struct zfcp_unit*) scpnt->device->hostdata;
+ struct zfcp_adapter *adapter = unit->port->adapter;
- unit = (struct zfcp_unit *) scpnt->device->hostdata;
ZFCP_LOG_NORMAL("bus reset because of problems with "
"unit 0x%016Lx\n", unit->fcp_lun);
- zfcp_erp_adapter_reopen(unit->port->adapter, 0);
- zfcp_erp_wait(unit->port->adapter);
- retval = SUCCESS;
+ zfcp_erp_adapter_reopen(adapter, 0);
+ zfcp_erp_wait(adapter);
- return retval;
+ return SUCCESS;
}
-/*
- * function: zfcp_scsi_eh_host_reset_handler
- *
- * purpose:
- *
- * returns:
+/**
+ * zfcp_scsi_eh_host_reset_handler - reset host (reopen adapter)
*/
int
zfcp_scsi_eh_host_reset_handler(struct scsi_cmnd *scpnt)
{
- int retval = 0;
- struct zfcp_unit *unit;
+ struct zfcp_unit *unit = (struct zfcp_unit*) scpnt->device->hostdata;
+ struct zfcp_adapter *adapter = unit->port->adapter;
- unit = (struct zfcp_unit *) scpnt->device->hostdata;
ZFCP_LOG_NORMAL("host reset because of problems with "
"unit 0x%016Lx\n", unit->fcp_lun);
- zfcp_erp_adapter_reopen(unit->port->adapter, 0);
- zfcp_erp_wait(unit->port->adapter);
- retval = SUCCESS;
+ zfcp_erp_adapter_reopen(adapter, 0);
+ zfcp_erp_wait(adapter);
- return retval;
+ return SUCCESS;
}
/*
@@ -826,10 +698,16 @@ void
zfcp_adapter_scsi_unregister(struct zfcp_adapter *adapter)
{
struct Scsi_Host *shost;
+ struct zfcp_port *port;
shost = adapter->scsi_host;
if (!shost)
return;
+ read_lock_irq(&zfcp_data.config_lock);
+ list_for_each_entry(port, &adapter->port_list_head, list)
+ if (port->rport)
+ port->rport = NULL;
+ read_unlock_irq(&zfcp_data.config_lock);
fc_remove_host(shost);
scsi_remove_host(shost);
scsi_host_put(shost);
@@ -904,18 +782,6 @@ zfcp_get_node_name(struct scsi_target *starget)
read_unlock_irqrestore(&zfcp_data.config_lock, flags);
}
-void
-zfcp_set_fc_host_attrs(struct zfcp_adapter *adapter)
-{
- struct Scsi_Host *shost = adapter->scsi_host;
-
- fc_host_node_name(shost) = adapter->wwnn;
- fc_host_port_name(shost) = adapter->wwpn;
- strncpy(fc_host_serial_number(shost), adapter->serial_number,
- min(FC_SERIAL_NUMBER_SIZE, 32));
- fc_host_supported_classes(shost) = FC_COS_CLASS2 | FC_COS_CLASS3;
-}
-
struct fc_function_template zfcp_transport_functions = {
.get_starget_port_id = zfcp_get_port_id,
.get_starget_port_name = zfcp_get_port_name,
@@ -927,7 +793,10 @@ struct fc_function_template zfcp_transport_functions = {
.show_host_node_name = 1,
.show_host_port_name = 1,
.show_host_supported_classes = 1,
+ .show_host_maxframe_size = 1,
.show_host_serial_number = 1,
+ .show_host_speed = 1,
+ .show_host_port_id = 1,
};
/**
diff --git a/drivers/s390/scsi/zfcp_sysfs_adapter.c b/drivers/s390/scsi/zfcp_sysfs_adapter.c
index e7345a74800..0cd435280e7 100644
--- a/drivers/s390/scsi/zfcp_sysfs_adapter.c
+++ b/drivers/s390/scsi/zfcp_sysfs_adapter.c
@@ -62,21 +62,18 @@ static ssize_t zfcp_sysfs_adapter_##_name##_show(struct device *dev, struct devi
static DEVICE_ATTR(_name, S_IRUGO, zfcp_sysfs_adapter_##_name##_show, NULL);
ZFCP_DEFINE_ADAPTER_ATTR(status, "0x%08x\n", atomic_read(&adapter->status));
-ZFCP_DEFINE_ADAPTER_ATTR(wwnn, "0x%016llx\n", adapter->wwnn);
-ZFCP_DEFINE_ADAPTER_ATTR(wwpn, "0x%016llx\n", adapter->wwpn);
-ZFCP_DEFINE_ADAPTER_ATTR(s_id, "0x%06x\n", adapter->s_id);
ZFCP_DEFINE_ADAPTER_ATTR(peer_wwnn, "0x%016llx\n", adapter->peer_wwnn);
ZFCP_DEFINE_ADAPTER_ATTR(peer_wwpn, "0x%016llx\n", adapter->peer_wwpn);
ZFCP_DEFINE_ADAPTER_ATTR(peer_d_id, "0x%06x\n", adapter->peer_d_id);
+ZFCP_DEFINE_ADAPTER_ATTR(physical_wwpn, "0x%016llx\n", adapter->physical_wwpn);
+ZFCP_DEFINE_ADAPTER_ATTR(physical_s_id, "0x%06x\n", adapter->physical_s_id);
ZFCP_DEFINE_ADAPTER_ATTR(card_version, "0x%04x\n", adapter->hydra_version);
ZFCP_DEFINE_ADAPTER_ATTR(lic_version, "0x%08x\n", adapter->fsf_lic_version);
-ZFCP_DEFINE_ADAPTER_ATTR(fc_link_speed, "%d Gb/s\n", adapter->fc_link_speed);
ZFCP_DEFINE_ADAPTER_ATTR(fc_service_class, "%d\n", adapter->fc_service_class);
ZFCP_DEFINE_ADAPTER_ATTR(fc_topology, "%s\n",
fc_topologies[adapter->fc_topology]);
ZFCP_DEFINE_ADAPTER_ATTR(hardware_version, "0x%08x\n",
adapter->hardware_version);
-ZFCP_DEFINE_ADAPTER_ATTR(serial_number, "%17s\n", adapter->serial_number);
ZFCP_DEFINE_ADAPTER_ATTR(scsi_host_no, "0x%x\n", adapter->scsi_host_no);
ZFCP_DEFINE_ADAPTER_ATTR(in_recovery, "%d\n", atomic_test_mask
(ZFCP_STATUS_COMMON_ERP_INUSE, &adapter->status));
@@ -255,21 +252,18 @@ static struct attribute *zfcp_adapter_attrs[] = {
&dev_attr_in_recovery.attr,
&dev_attr_port_remove.attr,
&dev_attr_port_add.attr,
- &dev_attr_wwnn.attr,
- &dev_attr_wwpn.attr,
- &dev_attr_s_id.attr,
&dev_attr_peer_wwnn.attr,
&dev_attr_peer_wwpn.attr,
&dev_attr_peer_d_id.attr,
+ &dev_attr_physical_wwpn.attr,
+ &dev_attr_physical_s_id.attr,
&dev_attr_card_version.attr,
&dev_attr_lic_version.attr,
- &dev_attr_fc_link_speed.attr,
&dev_attr_fc_service_class.attr,
&dev_attr_fc_topology.attr,
&dev_attr_scsi_host_no.attr,
&dev_attr_status.attr,
&dev_attr_hardware_version.attr,
- &dev_attr_serial_number.attr,
NULL
};
diff --git a/drivers/sbus/char/display7seg.c b/drivers/sbus/char/display7seg.c
index dbad7f35eb0..24ed5893b4f 100644
--- a/drivers/sbus/char/display7seg.c
+++ b/drivers/sbus/char/display7seg.c
@@ -14,7 +14,7 @@
#include <linux/major.h>
#include <linux/init.h>
#include <linux/miscdevice.h>
-#include <linux/ioport.h> /* request_region, check_region */
+#include <linux/ioport.h> /* request_region */
#include <asm/atomic.h>
#include <asm/ebus.h> /* EBus device */
#include <asm/oplib.h> /* OpenProm Library */
diff --git a/drivers/scsi/3w-9xxx.c b/drivers/scsi/3w-9xxx.c
index a6ac61611f3..a748fbfb669 100644
--- a/drivers/scsi/3w-9xxx.c
+++ b/drivers/scsi/3w-9xxx.c
@@ -60,6 +60,7 @@
Remove un-needed eh_abort handler.
Add support for embedded firmware error strings.
2.26.02.003 - Correctly handle single sgl's with use_sg=1.
+ 2.26.02.004 - Add support for 9550SX controllers.
*/
#include <linux/module.h>
@@ -82,7 +83,7 @@
#include "3w-9xxx.h"
/* Globals */
-#define TW_DRIVER_VERSION "2.26.02.003"
+#define TW_DRIVER_VERSION "2.26.02.004"
static TW_Device_Extension *twa_device_extension_list[TW_MAX_SLOT];
static unsigned int twa_device_extension_count;
static int twa_major = -1;
@@ -892,11 +893,6 @@ static int twa_decode_bits(TW_Device_Extension *tw_dev, u32 status_reg_value)
writel(TW_CONTROL_CLEAR_QUEUE_ERROR, TW_CONTROL_REG_ADDR(tw_dev));
}
- if (status_reg_value & TW_STATUS_SBUF_WRITE_ERROR) {
- TW_PRINTK(tw_dev->host, TW_DRIVER, 0xf, "SBUF Write Error: clearing");
- writel(TW_CONTROL_CLEAR_SBUF_WRITE_ERROR, TW_CONTROL_REG_ADDR(tw_dev));
- }
-
if (status_reg_value & TW_STATUS_MICROCONTROLLER_ERROR) {
if (tw_dev->reset_print == 0) {
TW_PRINTK(tw_dev->host, TW_DRIVER, 0x10, "Microcontroller Error: clearing");
@@ -930,6 +926,36 @@ out:
return retval;
} /* End twa_empty_response_queue() */
+/* This function will clear the pchip/response queue on 9550SX */
+static int twa_empty_response_queue_large(TW_Device_Extension *tw_dev)
+{
+ u32 status_reg_value, response_que_value;
+ int count = 0, retval = 1;
+
+ if (tw_dev->tw_pci_dev->device == PCI_DEVICE_ID_3WARE_9550SX) {
+ status_reg_value = readl(TW_STATUS_REG_ADDR(tw_dev));
+
+ while (((status_reg_value & TW_STATUS_RESPONSE_QUEUE_EMPTY) == 0) && (count < TW_MAX_RESPONSE_DRAIN)) {
+ response_que_value = readl(TW_RESPONSE_QUEUE_REG_ADDR_LARGE(tw_dev));
+ if ((response_que_value & TW_9550SX_DRAIN_COMPLETED) == TW_9550SX_DRAIN_COMPLETED) {
+ /* P-chip settle time */
+ msleep(500);
+ retval = 0;
+ goto out;
+ }
+ status_reg_value = readl(TW_STATUS_REG_ADDR(tw_dev));
+ count++;
+ }
+ if (count == TW_MAX_RESPONSE_DRAIN)
+ goto out;
+
+ retval = 0;
+ } else
+ retval = 0;
+out:
+ return retval;
+} /* End twa_empty_response_queue_large() */
+
/* This function passes sense keys from firmware to scsi layer */
static int twa_fill_sense(TW_Device_Extension *tw_dev, int request_id, int copy_sense, int print_host)
{
@@ -1613,8 +1639,16 @@ static int twa_reset_sequence(TW_Device_Extension *tw_dev, int soft_reset)
int tries = 0, retval = 1, flashed = 0, do_soft_reset = soft_reset;
while (tries < TW_MAX_RESET_TRIES) {
- if (do_soft_reset)
+ if (do_soft_reset) {
TW_SOFT_RESET(tw_dev);
+ /* Clear pchip/response queue on 9550SX */
+ if (twa_empty_response_queue_large(tw_dev)) {
+ TW_PRINTK(tw_dev->host, TW_DRIVER, 0x36, "Response queue (large) empty failed during reset sequence");
+ do_soft_reset = 1;
+ tries++;
+ continue;
+ }
+ }
/* Make sure controller is in a good state */
if (twa_poll_status(tw_dev, TW_STATUS_MICROCONTROLLER_READY | (do_soft_reset == 1 ? TW_STATUS_ATTENTION_INTERRUPT : 0), 60)) {
@@ -2034,7 +2068,10 @@ static int __devinit twa_probe(struct pci_dev *pdev, const struct pci_device_id
goto out_free_device_extension;
}
- mem_addr = pci_resource_start(pdev, 1);
+ if (pdev->device == PCI_DEVICE_ID_3WARE_9000)
+ mem_addr = pci_resource_start(pdev, 1);
+ else
+ mem_addr = pci_resource_start(pdev, 2);
/* Save base address */
tw_dev->base_addr = ioremap(mem_addr, PAGE_SIZE);
@@ -2148,6 +2185,8 @@ static void twa_remove(struct pci_dev *pdev)
static struct pci_device_id twa_pci_tbl[] __devinitdata = {
{ PCI_VENDOR_ID_3WARE, PCI_DEVICE_ID_3WARE_9000,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
+ { PCI_VENDOR_ID_3WARE, PCI_DEVICE_ID_3WARE_9550SX,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
{ }
};
MODULE_DEVICE_TABLE(pci, twa_pci_tbl);
diff --git a/drivers/scsi/3w-9xxx.h b/drivers/scsi/3w-9xxx.h
index 8c8ecbed3b5..46f22cdc829 100644
--- a/drivers/scsi/3w-9xxx.h
+++ b/drivers/scsi/3w-9xxx.h
@@ -267,7 +267,6 @@ static twa_message_type twa_error_table[] = {
#define TW_CONTROL_CLEAR_PARITY_ERROR 0x00800000
#define TW_CONTROL_CLEAR_QUEUE_ERROR 0x00400000
#define TW_CONTROL_CLEAR_PCI_ABORT 0x00100000
-#define TW_CONTROL_CLEAR_SBUF_WRITE_ERROR 0x00000008
/* Status register bit definitions */
#define TW_STATUS_MAJOR_VERSION_MASK 0xF0000000
@@ -285,9 +284,8 @@ static twa_message_type twa_error_table[] = {
#define TW_STATUS_MICROCONTROLLER_READY 0x00002000
#define TW_STATUS_COMMAND_QUEUE_EMPTY 0x00001000
#define TW_STATUS_EXPECTED_BITS 0x00002000
-#define TW_STATUS_UNEXPECTED_BITS 0x00F00008
-#define TW_STATUS_SBUF_WRITE_ERROR 0x00000008
-#define TW_STATUS_VALID_INTERRUPT 0x00DF0008
+#define TW_STATUS_UNEXPECTED_BITS 0x00F00000
+#define TW_STATUS_VALID_INTERRUPT 0x00DF0000
/* RESPONSE QUEUE BIT DEFINITIONS */
#define TW_RESPONSE_ID_MASK 0x00000FF0
@@ -324,9 +322,9 @@ static twa_message_type twa_error_table[] = {
/* Compatibility defines */
#define TW_9000_ARCH_ID 0x5
-#define TW_CURRENT_DRIVER_SRL 28
-#define TW_CURRENT_DRIVER_BUILD 9
-#define TW_CURRENT_DRIVER_BRANCH 4
+#define TW_CURRENT_DRIVER_SRL 30
+#define TW_CURRENT_DRIVER_BUILD 80
+#define TW_CURRENT_DRIVER_BRANCH 0
/* Phase defines */
#define TW_PHASE_INITIAL 0
@@ -334,6 +332,7 @@ static twa_message_type twa_error_table[] = {
#define TW_PHASE_SGLIST 2
/* Misc defines */
+#define TW_9550SX_DRAIN_COMPLETED 0xFFFF
#define TW_SECTOR_SIZE 512
#define TW_ALIGNMENT_9000 4 /* 4 bytes */
#define TW_ALIGNMENT_9000_SGL 0x3
@@ -417,6 +416,9 @@ static twa_message_type twa_error_table[] = {
#ifndef PCI_DEVICE_ID_3WARE_9000
#define PCI_DEVICE_ID_3WARE_9000 0x1002
#endif
+#ifndef PCI_DEVICE_ID_3WARE_9550SX
+#define PCI_DEVICE_ID_3WARE_9550SX 0x1003
+#endif
/* Bitmask macros to eliminate bitfields */
@@ -443,6 +445,7 @@ static twa_message_type twa_error_table[] = {
#define TW_STATUS_REG_ADDR(x) ((unsigned char __iomem *)x->base_addr + 0x4)
#define TW_COMMAND_QUEUE_REG_ADDR(x) (sizeof(dma_addr_t) > 4 ? ((unsigned char __iomem *)x->base_addr + 0x20) : ((unsigned char __iomem *)x->base_addr + 0x8))
#define TW_RESPONSE_QUEUE_REG_ADDR(x) ((unsigned char __iomem *)x->base_addr + 0xC)
+#define TW_RESPONSE_QUEUE_REG_ADDR_LARGE(x) ((unsigned char __iomem *)x->base_addr + 0x30)
#define TW_CLEAR_ALL_INTERRUPTS(x) (writel(TW_STATUS_VALID_INTERRUPT, TW_CONTROL_REG_ADDR(x)))
#define TW_CLEAR_ATTENTION_INTERRUPT(x) (writel(TW_CONTROL_CLEAR_ATTENTION_INTERRUPT, TW_CONTROL_REG_ADDR(x)))
#define TW_CLEAR_HOST_INTERRUPT(x) (writel(TW_CONTROL_CLEAR_HOST_INTERRUPT, TW_CONTROL_REG_ADDR(x)))
diff --git a/drivers/scsi/Makefile b/drivers/scsi/Makefile
index 1e491ae9a7c..e2e3d867193 100644
--- a/drivers/scsi/Makefile
+++ b/drivers/scsi/Makefile
@@ -99,6 +99,7 @@ obj-$(CONFIG_SCSI_DC395x) += dc395x.o
obj-$(CONFIG_SCSI_DC390T) += tmscsim.o
obj-$(CONFIG_MEGARAID_LEGACY) += megaraid.o
obj-$(CONFIG_MEGARAID_NEWGEN) += megaraid/
+obj-$(CONFIG_MEGARAID_SAS) += megaraid/
obj-$(CONFIG_SCSI_ACARD) += atp870u.o
obj-$(CONFIG_SCSI_SUNESP) += esp.o
obj-$(CONFIG_SCSI_GDTH) += gdth.o
diff --git a/drivers/scsi/aacraid/aachba.c b/drivers/scsi/aacraid/aachba.c
index a8e3dfcd0dc..93416f760e5 100644
--- a/drivers/scsi/aacraid/aachba.c
+++ b/drivers/scsi/aacraid/aachba.c
@@ -313,18 +313,37 @@ int aac_get_containers(struct aac_dev *dev)
}
dresp = (struct aac_mount *)fib_data(fibptr);
+ if ((le32_to_cpu(dresp->status) == ST_OK) &&
+ (le32_to_cpu(dresp->mnt[0].vol) == CT_NONE)) {
+ dinfo->command = cpu_to_le32(VM_NameServe64);
+ dinfo->count = cpu_to_le32(index);
+ dinfo->type = cpu_to_le32(FT_FILESYS);
+
+ if (fib_send(ContainerCommand,
+ fibptr,
+ sizeof(struct aac_query_mount),
+ FsaNormal,
+ 1, 1,
+ NULL, NULL) < 0)
+ continue;
+ } else
+ dresp->mnt[0].capacityhigh = 0;
+
dprintk ((KERN_DEBUG
- "VM_NameServe cid=%d status=%d vol=%d state=%d cap=%u\n",
+ "VM_NameServe cid=%d status=%d vol=%d state=%d cap=%llu\n",
(int)index, (int)le32_to_cpu(dresp->status),
(int)le32_to_cpu(dresp->mnt[0].vol),
(int)le32_to_cpu(dresp->mnt[0].state),
- (unsigned)le32_to_cpu(dresp->mnt[0].capacity)));
+ ((u64)le32_to_cpu(dresp->mnt[0].capacity)) +
+ (((u64)le32_to_cpu(dresp->mnt[0].capacityhigh)) << 32)));
if ((le32_to_cpu(dresp->status) == ST_OK) &&
(le32_to_cpu(dresp->mnt[0].vol) != CT_NONE) &&
(le32_to_cpu(dresp->mnt[0].state) != FSCS_HIDDEN)) {
fsa_dev_ptr[index].valid = 1;
fsa_dev_ptr[index].type = le32_to_cpu(dresp->mnt[0].vol);
- fsa_dev_ptr[index].size = le32_to_cpu(dresp->mnt[0].capacity);
+ fsa_dev_ptr[index].size
+ = ((u64)le32_to_cpu(dresp->mnt[0].capacity)) +
+ (((u64)le32_to_cpu(dresp->mnt[0].capacityhigh)) << 32);
if (le32_to_cpu(dresp->mnt[0].state) & FSCS_READONLY)
fsa_dev_ptr[index].ro = 1;
}
@@ -460,7 +479,7 @@ static int aac_get_container_name(struct scsi_cmnd * scsicmd, int cid)
* is updated in the struct fsa_dev_info structure rather than returned.
*/
-static int probe_container(struct aac_dev *dev, int cid)
+int probe_container(struct aac_dev *dev, int cid)
{
struct fsa_dev_info *fsa_dev_ptr;
int status;
@@ -497,11 +516,29 @@ static int probe_container(struct aac_dev *dev, int cid)
dresp = (struct aac_mount *) fib_data(fibptr);
if ((le32_to_cpu(dresp->status) == ST_OK) &&
+ (le32_to_cpu(dresp->mnt[0].vol) == CT_NONE)) {
+ dinfo->command = cpu_to_le32(VM_NameServe64);
+ dinfo->count = cpu_to_le32(cid);
+ dinfo->type = cpu_to_le32(FT_FILESYS);
+
+ if (fib_send(ContainerCommand,
+ fibptr,
+ sizeof(struct aac_query_mount),
+ FsaNormal,
+ 1, 1,
+ NULL, NULL) < 0)
+ goto error;
+ } else
+ dresp->mnt[0].capacityhigh = 0;
+
+ if ((le32_to_cpu(dresp->status) == ST_OK) &&
(le32_to_cpu(dresp->mnt[0].vol) != CT_NONE) &&
(le32_to_cpu(dresp->mnt[0].state) != FSCS_HIDDEN)) {
fsa_dev_ptr[cid].valid = 1;
fsa_dev_ptr[cid].type = le32_to_cpu(dresp->mnt[0].vol);
- fsa_dev_ptr[cid].size = le32_to_cpu(dresp->mnt[0].capacity);
+ fsa_dev_ptr[cid].size
+ = ((u64)le32_to_cpu(dresp->mnt[0].capacity)) +
+ (((u64)le32_to_cpu(dresp->mnt[0].capacityhigh)) << 32);
if (le32_to_cpu(dresp->mnt[0].state) & FSCS_READONLY)
fsa_dev_ptr[cid].ro = 1;
}
@@ -655,7 +692,7 @@ int aac_get_adapter_info(struct aac_dev* dev)
fibptr,
sizeof(*info),
FsaNormal,
- 1, 1,
+ -1, 1, /* First `interrupt' command uses special wait */
NULL,
NULL);
@@ -806,8 +843,8 @@ int aac_get_adapter_info(struct aac_dev* dev)
if (!(dev->raw_io_interface)) {
dev->scsi_host_ptr->sg_tablesize = (dev->max_fib_size -
sizeof(struct aac_fibhdr) -
- sizeof(struct aac_write) + sizeof(struct sgmap)) /
- sizeof(struct sgmap);
+ sizeof(struct aac_write) + sizeof(struct sgentry)) /
+ sizeof(struct sgentry);
if (dev->dac_support) {
/*
* 38 scatter gather elements
@@ -816,8 +853,8 @@ int aac_get_adapter_info(struct aac_dev* dev)
(dev->max_fib_size -
sizeof(struct aac_fibhdr) -
sizeof(struct aac_write64) +
- sizeof(struct sgmap64)) /
- sizeof(struct sgmap64);
+ sizeof(struct sgentry64)) /
+ sizeof(struct sgentry64);
}
dev->scsi_host_ptr->max_sectors = AAC_MAX_32BIT_SGBCOUNT;
if(!(dev->adapter_info.options & AAC_OPT_NEW_COMM)) {
@@ -854,7 +891,40 @@ static void io_callback(void *context, struct fib * fibptr)
dev = (struct aac_dev *)scsicmd->device->host->hostdata;
cid = ID_LUN_TO_CONTAINER(scsicmd->device->id, scsicmd->device->lun);
- dprintk((KERN_DEBUG "io_callback[cpu %d]: lba = %u, t = %ld.\n", smp_processor_id(), ((scsicmd->cmnd[1] & 0x1F) << 16) | (scsicmd->cmnd[2] << 8) | scsicmd->cmnd[3], jiffies));
+ if (nblank(dprintk(x))) {
+ u64 lba;
+ switch (scsicmd->cmnd[0]) {
+ case WRITE_6:
+ case READ_6:
+ lba = ((scsicmd->cmnd[1] & 0x1F) << 16) |
+ (scsicmd->cmnd[2] << 8) | scsicmd->cmnd[3];
+ break;
+ case WRITE_16:
+ case READ_16:
+ lba = ((u64)scsicmd->cmnd[2] << 56) |
+ ((u64)scsicmd->cmnd[3] << 48) |
+ ((u64)scsicmd->cmnd[4] << 40) |
+ ((u64)scsicmd->cmnd[5] << 32) |
+ ((u64)scsicmd->cmnd[6] << 24) |
+ (scsicmd->cmnd[7] << 16) |
+ (scsicmd->cmnd[8] << 8) | scsicmd->cmnd[9];
+ break;
+ case WRITE_12:
+ case READ_12:
+ lba = ((u64)scsicmd->cmnd[2] << 24) |
+ (scsicmd->cmnd[3] << 16) |
+ (scsicmd->cmnd[4] << 8) | scsicmd->cmnd[5];
+ break;
+ default:
+ lba = ((u64)scsicmd->cmnd[2] << 24) |
+ (scsicmd->cmnd[3] << 16) |
+ (scsicmd->cmnd[4] << 8) | scsicmd->cmnd[5];
+ break;
+ }
+ printk(KERN_DEBUG
+ "io_callback[cpu %d]: lba = %llu, t = %ld.\n",
+ smp_processor_id(), (unsigned long long)lba, jiffies);
+ }
if (fibptr == NULL)
BUG();
@@ -895,7 +965,7 @@ static void io_callback(void *context, struct fib * fibptr)
static int aac_read(struct scsi_cmnd * scsicmd, int cid)
{
- u32 lba;
+ u64 lba;
u32 count;
int status;
@@ -907,23 +977,69 @@ static int aac_read(struct scsi_cmnd * scsicmd, int cid)
/*
* Get block address and transfer length
*/
- if (scsicmd->cmnd[0] == READ_6) /* 6 byte command */
- {
+ switch (scsicmd->cmnd[0]) {
+ case READ_6:
dprintk((KERN_DEBUG "aachba: received a read(6) command on id %d.\n", cid));
- lba = ((scsicmd->cmnd[1] & 0x1F) << 16) | (scsicmd->cmnd[2] << 8) | scsicmd->cmnd[3];
+ lba = ((scsicmd->cmnd[1] & 0x1F) << 16) |
+ (scsicmd->cmnd[2] << 8) | scsicmd->cmnd[3];
count = scsicmd->cmnd[4];
if (count == 0)
count = 256;
- } else {
+ break;
+ case READ_16:
+ dprintk((KERN_DEBUG "aachba: received a read(16) command on id %d.\n", cid));
+
+ lba = ((u64)scsicmd->cmnd[2] << 56) |
+ ((u64)scsicmd->cmnd[3] << 48) |
+ ((u64)scsicmd->cmnd[4] << 40) |
+ ((u64)scsicmd->cmnd[5] << 32) |
+ ((u64)scsicmd->cmnd[6] << 24) |
+ (scsicmd->cmnd[7] << 16) |
+ (scsicmd->cmnd[8] << 8) | scsicmd->cmnd[9];
+ count = (scsicmd->cmnd[10] << 24) |
+ (scsicmd->cmnd[11] << 16) |
+ (scsicmd->cmnd[12] << 8) | scsicmd->cmnd[13];
+ break;
+ case READ_12:
+ dprintk((KERN_DEBUG "aachba: received a read(12) command on id %d.\n", cid));
+
+ lba = ((u64)scsicmd->cmnd[2] << 24) |
+ (scsicmd->cmnd[3] << 16) |
+ (scsicmd->cmnd[4] << 8) | scsicmd->cmnd[5];
+ count = (scsicmd->cmnd[6] << 24) |
+ (scsicmd->cmnd[7] << 16) |
+ (scsicmd->cmnd[8] << 8) | scsicmd->cmnd[9];
+ break;
+ default:
dprintk((KERN_DEBUG "aachba: received a read(10) command on id %d.\n", cid));
- lba = (scsicmd->cmnd[2] << 24) | (scsicmd->cmnd[3] << 16) | (scsicmd->cmnd[4] << 8) | scsicmd->cmnd[5];
+ lba = ((u64)scsicmd->cmnd[2] << 24) |
+ (scsicmd->cmnd[3] << 16) |
+ (scsicmd->cmnd[4] << 8) | scsicmd->cmnd[5];
count = (scsicmd->cmnd[7] << 8) | scsicmd->cmnd[8];
+ break;
}
- dprintk((KERN_DEBUG "aac_read[cpu %d]: lba = %u, t = %ld.\n",
+ dprintk((KERN_DEBUG "aac_read[cpu %d]: lba = %llu, t = %ld.\n",
smp_processor_id(), (unsigned long long)lba, jiffies));
+ if ((!(dev->raw_io_interface) || !(dev->raw_io_64)) &&
+ (lba & 0xffffffff00000000LL)) {
+ dprintk((KERN_DEBUG "aac_read: Illegal lba\n"));
+ scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 |
+ SAM_STAT_CHECK_CONDITION;
+ set_sense((u8 *) &dev->fsa_dev[cid].sense_data,
+ HARDWARE_ERROR,
+ SENCODE_INTERNAL_TARGET_FAILURE,
+ ASENCODE_INTERNAL_TARGET_FAILURE, 0, 0,
+ 0, 0);
+ memcpy(scsicmd->sense_buffer, &dev->fsa_dev[cid].sense_data,
+ (sizeof(dev->fsa_dev[cid].sense_data) > sizeof(scsicmd->sense_buffer))
+ ? sizeof(scsicmd->sense_buffer)
+ : sizeof(dev->fsa_dev[cid].sense_data));
+ scsicmd->scsi_done(scsicmd);
+ return 0;
+ }
/*
* Alocate and initialize a Fib
*/
@@ -936,8 +1052,8 @@ static int aac_read(struct scsi_cmnd * scsicmd, int cid)
if (dev->raw_io_interface) {
struct aac_raw_io *readcmd;
readcmd = (struct aac_raw_io *) fib_data(cmd_fibcontext);
- readcmd->block[0] = cpu_to_le32(lba);
- readcmd->block[1] = 0;
+ readcmd->block[0] = cpu_to_le32((u32)(lba&0xffffffff));
+ readcmd->block[1] = cpu_to_le32((u32)((lba&0xffffffff00000000LL)>>32));
readcmd->count = cpu_to_le32(count<<9);
readcmd->cid = cpu_to_le16(cid);
readcmd->flags = cpu_to_le16(1);
@@ -964,7 +1080,7 @@ static int aac_read(struct scsi_cmnd * scsicmd, int cid)
readcmd->command = cpu_to_le32(VM_CtHostRead64);
readcmd->cid = cpu_to_le16(cid);
readcmd->sector_count = cpu_to_le16(count);
- readcmd->block = cpu_to_le32(lba);
+ readcmd->block = cpu_to_le32((u32)(lba&0xffffffff));
readcmd->pad = 0;
readcmd->flags = 0;
@@ -989,7 +1105,7 @@ static int aac_read(struct scsi_cmnd * scsicmd, int cid)
readcmd = (struct aac_read *) fib_data(cmd_fibcontext);
readcmd->command = cpu_to_le32(VM_CtBlockRead);
readcmd->cid = cpu_to_le32(cid);
- readcmd->block = cpu_to_le32(lba);
+ readcmd->block = cpu_to_le32((u32)(lba&0xffffffff));
readcmd->count = cpu_to_le32(count * 512);
aac_build_sg(scsicmd, &readcmd->sg);
@@ -1031,7 +1147,7 @@ static int aac_read(struct scsi_cmnd * scsicmd, int cid)
static int aac_write(struct scsi_cmnd * scsicmd, int cid)
{
- u32 lba;
+ u64 lba;
u32 count;
int status;
u16 fibsize;
@@ -1048,13 +1164,48 @@ static int aac_write(struct scsi_cmnd * scsicmd, int cid)
count = scsicmd->cmnd[4];
if (count == 0)
count = 256;
+ } else if (scsicmd->cmnd[0] == WRITE_16) { /* 16 byte command */
+ dprintk((KERN_DEBUG "aachba: received a write(16) command on id %d.\n", cid));
+
+ lba = ((u64)scsicmd->cmnd[2] << 56) |
+ ((u64)scsicmd->cmnd[3] << 48) |
+ ((u64)scsicmd->cmnd[4] << 40) |
+ ((u64)scsicmd->cmnd[5] << 32) |
+ ((u64)scsicmd->cmnd[6] << 24) |
+ (scsicmd->cmnd[7] << 16) |
+ (scsicmd->cmnd[8] << 8) | scsicmd->cmnd[9];
+ count = (scsicmd->cmnd[10] << 24) | (scsicmd->cmnd[11] << 16) |
+ (scsicmd->cmnd[12] << 8) | scsicmd->cmnd[13];
+ } else if (scsicmd->cmnd[0] == WRITE_12) { /* 12 byte command */
+ dprintk((KERN_DEBUG "aachba: received a write(12) command on id %d.\n", cid));
+
+ lba = ((u64)scsicmd->cmnd[2] << 24) | (scsicmd->cmnd[3] << 16)
+ | (scsicmd->cmnd[4] << 8) | scsicmd->cmnd[5];
+ count = (scsicmd->cmnd[6] << 24) | (scsicmd->cmnd[7] << 16)
+ | (scsicmd->cmnd[8] << 8) | scsicmd->cmnd[9];
} else {
dprintk((KERN_DEBUG "aachba: received a write(10) command on id %d.\n", cid));
- lba = (scsicmd->cmnd[2] << 24) | (scsicmd->cmnd[3] << 16) | (scsicmd->cmnd[4] << 8) | scsicmd->cmnd[5];
+ lba = ((u64)scsicmd->cmnd[2] << 24) | (scsicmd->cmnd[3] << 16) | (scsicmd->cmnd[4] << 8) | scsicmd->cmnd[5];
count = (scsicmd->cmnd[7] << 8) | scsicmd->cmnd[8];
}
- dprintk((KERN_DEBUG "aac_write[cpu %d]: lba = %u, t = %ld.\n",
+ dprintk((KERN_DEBUG "aac_write[cpu %d]: lba = %llu, t = %ld.\n",
smp_processor_id(), (unsigned long long)lba, jiffies));
+ if ((!(dev->raw_io_interface) || !(dev->raw_io_64))
+ && (lba & 0xffffffff00000000LL)) {
+ dprintk((KERN_DEBUG "aac_write: Illegal lba\n"));
+ scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_CHECK_CONDITION;
+ set_sense((u8 *) &dev->fsa_dev[cid].sense_data,
+ HARDWARE_ERROR,
+ SENCODE_INTERNAL_TARGET_FAILURE,
+ ASENCODE_INTERNAL_TARGET_FAILURE, 0, 0,
+ 0, 0);
+ memcpy(scsicmd->sense_buffer, &dev->fsa_dev[cid].sense_data,
+ (sizeof(dev->fsa_dev[cid].sense_data) > sizeof(scsicmd->sense_buffer))
+ ? sizeof(scsicmd->sense_buffer)
+ : sizeof(dev->fsa_dev[cid].sense_data));
+ scsicmd->scsi_done(scsicmd);
+ return 0;
+ }
/*
* Allocate and initialize a Fib then setup a BlockWrite command
*/
@@ -1068,8 +1219,8 @@ static int aac_write(struct scsi_cmnd * scsicmd, int cid)
if (dev->raw_io_interface) {
struct aac_raw_io *writecmd;
writecmd = (struct aac_raw_io *) fib_data(cmd_fibcontext);
- writecmd->block[0] = cpu_to_le32(lba);
- writecmd->block[1] = 0;
+ writecmd->block[0] = cpu_to_le32((u32)(lba&0xffffffff));
+ writecmd->block[1] = cpu_to_le32((u32)((lba&0xffffffff00000000LL)>>32));
writecmd->count = cpu_to_le32(count<<9);
writecmd->cid = cpu_to_le16(cid);
writecmd->flags = 0;
@@ -1096,7 +1247,7 @@ static int aac_write(struct scsi_cmnd * scsicmd, int cid)
writecmd->command = cpu_to_le32(VM_CtHostWrite64);
writecmd->cid = cpu_to_le16(cid);
writecmd->sector_count = cpu_to_le16(count);
- writecmd->block = cpu_to_le32(lba);
+ writecmd->block = cpu_to_le32((u32)(lba&0xffffffff));
writecmd->pad = 0;
writecmd->flags = 0;
@@ -1121,7 +1272,7 @@ static int aac_write(struct scsi_cmnd * scsicmd, int cid)
writecmd = (struct aac_write *) fib_data(cmd_fibcontext);
writecmd->command = cpu_to_le32(VM_CtBlockWrite);
writecmd->cid = cpu_to_le32(cid);
- writecmd->block = cpu_to_le32(lba);
+ writecmd->block = cpu_to_le32((u32)(lba&0xffffffff));
writecmd->count = cpu_to_le32(count * 512);
writecmd->sg.count = cpu_to_le32(1);
/* ->stable is not used - it did mean which type of write */
@@ -1310,11 +1461,18 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
*/
if ((fsa_dev_ptr[cid].valid & 1) == 0) {
switch (scsicmd->cmnd[0]) {
+ case SERVICE_ACTION_IN:
+ if (!(dev->raw_io_interface) ||
+ !(dev->raw_io_64) ||
+ ((scsicmd->cmnd[1] & 0x1f) != SAI_READ_CAPACITY_16))
+ break;
case INQUIRY:
case READ_CAPACITY:
case TEST_UNIT_READY:
spin_unlock_irq(host->host_lock);
probe_container(dev, cid);
+ if ((fsa_dev_ptr[cid].valid & 1) == 0)
+ fsa_dev_ptr[cid].valid = 0;
spin_lock_irq(host->host_lock);
if (fsa_dev_ptr[cid].valid == 0) {
scsicmd->result = DID_NO_CONNECT << 16;
@@ -1375,7 +1533,6 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
memset(&inq_data, 0, sizeof (struct inquiry_data));
inq_data.inqd_ver = 2; /* claim compliance to SCSI-2 */
- inq_data.inqd_dtq = 0x80; /* set RMB bit to one indicating that the medium is removable */
inq_data.inqd_rdf = 2; /* A response data format value of two indicates that the data shall be in the format specified in SCSI-2 */
inq_data.inqd_len = 31;
/*Format for "pad2" is RelAdr | WBus32 | WBus16 | Sync | Linked |Reserved| CmdQue | SftRe */
@@ -1397,13 +1554,55 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
aac_internal_transfer(scsicmd, &inq_data, 0, sizeof(inq_data));
return aac_get_container_name(scsicmd, cid);
}
+ case SERVICE_ACTION_IN:
+ if (!(dev->raw_io_interface) ||
+ !(dev->raw_io_64) ||
+ ((scsicmd->cmnd[1] & 0x1f) != SAI_READ_CAPACITY_16))
+ break;
+ {
+ u64 capacity;
+ char cp[12];
+ unsigned int offset = 0;
+
+ dprintk((KERN_DEBUG "READ CAPACITY_16 command.\n"));
+ capacity = fsa_dev_ptr[cid].size - 1;
+ if (scsicmd->cmnd[13] > 12) {
+ offset = scsicmd->cmnd[13] - 12;
+ if (offset > sizeof(cp))
+ break;
+ memset(cp, 0, offset);
+ aac_internal_transfer(scsicmd, cp, 0, offset);
+ }
+ cp[0] = (capacity >> 56) & 0xff;
+ cp[1] = (capacity >> 48) & 0xff;
+ cp[2] = (capacity >> 40) & 0xff;
+ cp[3] = (capacity >> 32) & 0xff;
+ cp[4] = (capacity >> 24) & 0xff;
+ cp[5] = (capacity >> 16) & 0xff;
+ cp[6] = (capacity >> 8) & 0xff;
+ cp[7] = (capacity >> 0) & 0xff;
+ cp[8] = 0;
+ cp[9] = 0;
+ cp[10] = 2;
+ cp[11] = 0;
+ aac_internal_transfer(scsicmd, cp, offset, sizeof(cp));
+
+ /* Do not cache partition table for arrays */
+ scsicmd->device->removable = 1;
+
+ scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_GOOD;
+ scsicmd->scsi_done(scsicmd);
+
+ return 0;
+ }
+
case READ_CAPACITY:
{
u32 capacity;
char cp[8];
dprintk((KERN_DEBUG "READ CAPACITY command.\n"));
- if (fsa_dev_ptr[cid].size <= 0x100000000LL)
+ if (fsa_dev_ptr[cid].size <= 0x100000000ULL)
capacity = fsa_dev_ptr[cid].size - 1;
else
capacity = (u32)-1;
@@ -1417,6 +1616,8 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
cp[6] = 2;
cp[7] = 0;
aac_internal_transfer(scsicmd, cp, 0, sizeof(cp));
+ /* Do not cache partition table for arrays */
+ scsicmd->device->removable = 1;
scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_GOOD;
scsicmd->scsi_done(scsicmd);
@@ -1497,6 +1698,8 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
{
case READ_6:
case READ_10:
+ case READ_12:
+ case READ_16:
/*
* Hack to keep track of ordinal number of the device that
* corresponds to a container. Needed to convert
@@ -1504,17 +1707,19 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
*/
spin_unlock_irq(host->host_lock);
- if (scsicmd->request->rq_disk)
- memcpy(fsa_dev_ptr[cid].devname,
- scsicmd->request->rq_disk->disk_name,
- 8);
-
+ if (scsicmd->request->rq_disk)
+ strlcpy(fsa_dev_ptr[cid].devname,
+ scsicmd->request->rq_disk->disk_name,
+ min(sizeof(fsa_dev_ptr[cid].devname),
+ sizeof(scsicmd->request->rq_disk->disk_name) + 1));
ret = aac_read(scsicmd, cid);
spin_lock_irq(host->host_lock);
return ret;
case WRITE_6:
case WRITE_10:
+ case WRITE_12:
+ case WRITE_16:
spin_unlock_irq(host->host_lock);
ret = aac_write(scsicmd, cid);
spin_lock_irq(host->host_lock);
@@ -1745,6 +1950,8 @@ static void aac_srb_callback(void *context, struct fib * fibptr)
case WRITE_10:
case READ_12:
case WRITE_12:
+ case READ_16:
+ case WRITE_16:
if(le32_to_cpu(srbreply->data_xfer_length) < scsicmd->underflow ) {
printk(KERN_WARNING"aacraid: SCSI CMD underflow\n");
} else {
@@ -1850,8 +2057,8 @@ static void aac_srb_callback(void *context, struct fib * fibptr)
sizeof(scsicmd->sense_buffer) :
le32_to_cpu(srbreply->sense_data_size);
#ifdef AAC_DETAILED_STATUS_INFO
- dprintk((KERN_WARNING "aac_srb_callback: check condition, status = %d len=%d\n",
- le32_to_cpu(srbreply->status), len));
+ printk(KERN_WARNING "aac_srb_callback: check condition, status = %d len=%d\n",
+ le32_to_cpu(srbreply->status), len);
#endif
memcpy(scsicmd->sense_buffer, srbreply->sense_data, len);
diff --git a/drivers/scsi/aacraid/aacraid.h b/drivers/scsi/aacraid/aacraid.h
index e40528185d4..4a99d2f000f 100644
--- a/drivers/scsi/aacraid/aacraid.h
+++ b/drivers/scsi/aacraid/aacraid.h
@@ -1,6 +1,10 @@
#if (!defined(dprintk))
# define dprintk(x)
#endif
+/* eg: if (nblank(dprintk(x))) */
+#define _nblank(x) #x
+#define nblank(x) _nblank(x)[0]
+
/*------------------------------------------------------------------------------
* D E F I N E S
@@ -302,7 +306,6 @@ enum aac_queue_types {
*/
#define FsaNormal 1
-#define FsaHigh 2
/*
* Define the FIB. The FIB is the where all the requested data and
@@ -546,8 +549,6 @@ struct aac_queue {
/* This is only valid for adapter to host command queues. */
spinlock_t *lock; /* Spinlock for this queue must take this lock before accessing the lock */
spinlock_t lockdata; /* Actual lock (used only on one side of the lock) */
- unsigned long SavedIrql; /* Previous IRQL when the spin lock is taken */
- u32 padding; /* Padding - FIXME - can remove I believe */
struct list_head cmdq; /* A queue of FIBs which need to be prcessed by the FS thread. This is */
/* only valid for command queues which receive entries from the adapter. */
struct list_head pendingq; /* A queue of outstanding fib's to the adapter. */
@@ -776,7 +777,9 @@ struct fsa_dev_info {
u64 last;
u64 size;
u32 type;
+ u32 config_waiting_on;
u16 queue_depth;
+ u8 config_needed;
u8 valid;
u8 ro;
u8 locked;
@@ -1012,6 +1015,7 @@ struct aac_dev
/* macro side-effects BEWARE */
# define raw_io_interface \
init->InitStructRevision==cpu_to_le32(ADAPTER_INIT_STRUCT_REVISION_4)
+ u8 raw_io_64;
u8 printf_enabled;
};
@@ -1362,8 +1366,10 @@ struct aac_srb_reply
#define VM_CtBlockVerify64 18
#define VM_CtHostRead64 19
#define VM_CtHostWrite64 20
+#define VM_DrvErrTblLog 21
+#define VM_NameServe64 22
-#define MAX_VMCOMMAND_NUM 21 /* used for sizing stats array - leave last */
+#define MAX_VMCOMMAND_NUM 23 /* used for sizing stats array - leave last */
/*
* Descriptive information (eg, vital stats)
@@ -1472,6 +1478,7 @@ struct aac_mntent {
manager (eg, filesystem) */
__le32 altoid; /* != oid <==> snapshot or
broken mirror exists */
+ __le32 capacityhigh;
};
#define FSCS_NOTCLEAN 0x0001 /* fsck is neccessary before mounting */
@@ -1707,6 +1714,7 @@ extern struct aac_common aac_config;
#define AifCmdJobProgress 2 /* Progress report */
#define AifJobCtrZero 101 /* Array Zero progress */
#define AifJobStsSuccess 1 /* Job completes */
+#define AifJobStsRunning 102 /* Job running */
#define AifCmdAPIReport 3 /* Report from other user of API */
#define AifCmdDriverNotify 4 /* Notify host driver of event */
#define AifDenMorphComplete 200 /* A morph operation completed */
@@ -1777,6 +1785,7 @@ int fib_adapter_complete(struct fib * fibptr, unsigned short size);
struct aac_driver_ident* aac_get_driver_ident(int devtype);
int aac_get_adapter_info(struct aac_dev* dev);
int aac_send_shutdown(struct aac_dev *dev);
+int probe_container(struct aac_dev *dev, int cid);
extern int numacb;
extern int acbsize;
extern char aac_driver_version[];
diff --git a/drivers/scsi/aacraid/comminit.c b/drivers/scsi/aacraid/comminit.c
index 75abd045328..59a341b2aed 100644
--- a/drivers/scsi/aacraid/comminit.c
+++ b/drivers/scsi/aacraid/comminit.c
@@ -195,7 +195,7 @@ int aac_send_shutdown(struct aac_dev * dev)
fibctx,
sizeof(struct aac_close),
FsaNormal,
- 1, 1,
+ -2 /* Timeout silently */, 1,
NULL, NULL);
if (status == 0)
@@ -313,8 +313,15 @@ struct aac_dev *aac_init_adapter(struct aac_dev *dev)
dev->max_fib_size = sizeof(struct hw_fib);
dev->sg_tablesize = host->sg_tablesize = (dev->max_fib_size
- sizeof(struct aac_fibhdr)
- - sizeof(struct aac_write) + sizeof(struct sgmap))
- / sizeof(struct sgmap);
+ - sizeof(struct aac_write) + sizeof(struct sgentry))
+ / sizeof(struct sgentry);
+ dev->raw_io_64 = 0;
+ if ((!aac_adapter_sync_cmd(dev, GET_ADAPTER_PROPERTIES,
+ 0, 0, 0, 0, 0, 0, status+0, status+1, status+2, NULL, NULL)) &&
+ (status[0] == 0x00000001)) {
+ if (status[1] & AAC_OPT_NEW_COMM_64)
+ dev->raw_io_64 = 1;
+ }
if ((!aac_adapter_sync_cmd(dev, GET_COMM_PREFERRED_SETTINGS,
0, 0, 0, 0, 0, 0,
status+0, status+1, status+2, status+3, status+4))
@@ -342,8 +349,8 @@ struct aac_dev *aac_init_adapter(struct aac_dev *dev)
dev->max_fib_size = 512;
dev->sg_tablesize = host->sg_tablesize
= (512 - sizeof(struct aac_fibhdr)
- - sizeof(struct aac_write) + sizeof(struct sgmap))
- / sizeof(struct sgmap);
+ - sizeof(struct aac_write) + sizeof(struct sgentry))
+ / sizeof(struct sgentry);
host->can_queue = AAC_NUM_IO_FIB;
} else if (acbsize == 2048) {
host->max_sectors = 512;
diff --git a/drivers/scsi/aacraid/commsup.c b/drivers/scsi/aacraid/commsup.c
index a1d303f0348..e4d543a474a 100644
--- a/drivers/scsi/aacraid/commsup.c
+++ b/drivers/scsi/aacraid/commsup.c
@@ -39,7 +39,9 @@
#include <linux/completion.h>
#include <linux/blkdev.h>
#include <scsi/scsi_host.h>
+#include <scsi/scsi_device.h>
#include <asm/semaphore.h>
+#include <asm/delay.h>
#include "aacraid.h"
@@ -269,40 +271,22 @@ static int aac_get_entry (struct aac_dev * dev, u32 qid, struct aac_entry **entr
/* Interrupt Moderation, only interrupt for first two entries */
if (idx != le32_to_cpu(*(q->headers.consumer))) {
if (--idx == 0) {
- if (qid == AdapHighCmdQueue)
- idx = ADAP_HIGH_CMD_ENTRIES;
- else if (qid == AdapNormCmdQueue)
+ if (qid == AdapNormCmdQueue)
idx = ADAP_NORM_CMD_ENTRIES;
- else if (qid == AdapHighRespQueue)
- idx = ADAP_HIGH_RESP_ENTRIES;
- else if (qid == AdapNormRespQueue)
+ else
idx = ADAP_NORM_RESP_ENTRIES;
}
if (idx != le32_to_cpu(*(q->headers.consumer)))
*nonotify = 1;
}
- if (qid == AdapHighCmdQueue) {
- if (*index >= ADAP_HIGH_CMD_ENTRIES)
- *index = 0;
- } else if (qid == AdapNormCmdQueue) {
+ if (qid == AdapNormCmdQueue) {
if (*index >= ADAP_NORM_CMD_ENTRIES)
*index = 0; /* Wrap to front of the Producer Queue. */
- }
- else if (qid == AdapHighRespQueue)
- {
- if (*index >= ADAP_HIGH_RESP_ENTRIES)
- *index = 0;
- }
- else if (qid == AdapNormRespQueue)
- {
+ } else {
if (*index >= ADAP_NORM_RESP_ENTRIES)
*index = 0; /* Wrap to front of the Producer Queue. */
}
- else {
- printk("aacraid: invalid qid\n");
- BUG();
- }
if ((*index + 1) == le32_to_cpu(*(q->headers.consumer))) { /* Queue is full */
printk(KERN_WARNING "Queue %d full, %u outstanding.\n",
@@ -334,12 +318,8 @@ static int aac_queue_get(struct aac_dev * dev, u32 * index, u32 qid, struct hw_f
{
struct aac_entry * entry = NULL;
int map = 0;
- struct aac_queue * q = &dev->queues->queue[qid];
-
- spin_lock_irqsave(q->lock, q->SavedIrql);
- if (qid == AdapHighCmdQueue || qid == AdapNormCmdQueue)
- {
+ if (qid == AdapNormCmdQueue) {
/* if no entries wait for some if caller wants to */
while (!aac_get_entry(dev, qid, &entry, index, nonotify))
{
@@ -350,9 +330,7 @@ static int aac_queue_get(struct aac_dev * dev, u32 * index, u32 qid, struct hw_f
*/
entry->size = cpu_to_le32(le16_to_cpu(hw_fib->header.Size));
map = 1;
- }
- else if (qid == AdapHighRespQueue || qid == AdapNormRespQueue)
- {
+ } else {
while(!aac_get_entry(dev, qid, &entry, index, nonotify))
{
/* if no entries wait for some if caller wants to */
@@ -375,42 +353,6 @@ static int aac_queue_get(struct aac_dev * dev, u32 * index, u32 qid, struct hw_f
return 0;
}
-
-/**
- * aac_insert_entry - insert a queue entry
- * @dev: Adapter
- * @index: Index of entry to insert
- * @qid: Queue number
- * @nonotify: Suppress adapter notification
- *
- * Gets the next free QE off the requested priorty adapter command
- * queue and associates the Fib with the QE. The QE represented by
- * index is ready to insert on the queue when this routine returns
- * success.
- */
-
-static int aac_insert_entry(struct aac_dev * dev, u32 index, u32 qid, unsigned long nonotify)
-{
- struct aac_queue * q = &dev->queues->queue[qid];
-
- if(q == NULL)
- BUG();
- *(q->headers.producer) = cpu_to_le32(index + 1);
- spin_unlock_irqrestore(q->lock, q->SavedIrql);
-
- if (qid == AdapHighCmdQueue ||
- qid == AdapNormCmdQueue ||
- qid == AdapHighRespQueue ||
- qid == AdapNormRespQueue)
- {
- if (!nonotify)
- aac_adapter_notify(dev, qid);
- }
- else
- printk("Suprise insert!\n");
- return 0;
-}
-
/*
* Define the highest level of host to adapter communication routines.
* These routines will support host to adapter FS commuication. These
@@ -439,12 +381,13 @@ static int aac_insert_entry(struct aac_dev * dev, u32 index, u32 qid, unsigned l
int fib_send(u16 command, struct fib * fibptr, unsigned long size, int priority, int wait, int reply, fib_callback callback, void * callback_data)
{
u32 index;
- u32 qid;
struct aac_dev * dev = fibptr->dev;
unsigned long nointr = 0;
struct hw_fib * hw_fib = fibptr->hw_fib;
struct aac_queue * q;
unsigned long flags = 0;
+ unsigned long qflags;
+
if (!(hw_fib->header.XferState & cpu_to_le32(HostOwned)))
return -EBUSY;
/*
@@ -497,26 +440,8 @@ int fib_send(u16 command, struct fib * fibptr, unsigned long size, int priority
* Get a queue entry connect the FIB to it and send an notify
* the adapter a command is ready.
*/
- if (priority == FsaHigh) {
- hw_fib->header.XferState |= cpu_to_le32(HighPriority);
- qid = AdapHighCmdQueue;
- } else {
- hw_fib->header.XferState |= cpu_to_le32(NormalPriority);
- qid = AdapNormCmdQueue;
- }
- q = &dev->queues->queue[qid];
+ hw_fib->header.XferState |= cpu_to_le32(NormalPriority);
- if(wait)
- spin_lock_irqsave(&fibptr->event_lock, flags);
- if(aac_queue_get( dev, &index, qid, hw_fib, 1, fibptr, &nointr)<0)
- return -EWOULDBLOCK;
- dprintk((KERN_DEBUG "fib_send: inserting a queue entry at index %d.\n",index));
- dprintk((KERN_DEBUG "Fib contents:.\n"));
- dprintk((KERN_DEBUG " Command = %d.\n", hw_fib->header.Command));
- dprintk((KERN_DEBUG " XferState = %x.\n", hw_fib->header.XferState));
- dprintk((KERN_DEBUG " hw_fib va being sent=%p\n",fibptr->hw_fib));
- dprintk((KERN_DEBUG " hw_fib pa being sent=%lx\n",(ulong)fibptr->hw_fib_pa));
- dprintk((KERN_DEBUG " fib being sent=%p\n",fibptr));
/*
* Fill in the Callback and CallbackContext if we are not
* going to wait.
@@ -525,22 +450,67 @@ int fib_send(u16 command, struct fib * fibptr, unsigned long size, int priority
fibptr->callback = callback;
fibptr->callback_data = callback_data;
}
- FIB_COUNTER_INCREMENT(aac_config.FibsSent);
- list_add_tail(&fibptr->queue, &q->pendingq);
- q->numpending++;
fibptr->done = 0;
fibptr->flags = 0;
- if(aac_insert_entry(dev, index, qid, (nointr & aac_config.irq_mod)) < 0)
- return -EWOULDBLOCK;
+ FIB_COUNTER_INCREMENT(aac_config.FibsSent);
+
+ dprintk((KERN_DEBUG "fib_send: inserting a queue entry at index %d.\n",index));
+ dprintk((KERN_DEBUG "Fib contents:.\n"));
+ dprintk((KERN_DEBUG " Command = %d.\n", hw_fib->header.Command));
+ dprintk((KERN_DEBUG " XferState = %x.\n", hw_fib->header.XferState));
+ dprintk((KERN_DEBUG " hw_fib va being sent=%p\n",fibptr->hw_fib));
+ dprintk((KERN_DEBUG " hw_fib pa being sent=%lx\n",(ulong)fibptr->hw_fib_pa));
+ dprintk((KERN_DEBUG " fib being sent=%p\n",fibptr));
+
+ q = &dev->queues->queue[AdapNormCmdQueue];
+
+ if(wait)
+ spin_lock_irqsave(&fibptr->event_lock, flags);
+ spin_lock_irqsave(q->lock, qflags);
+ aac_queue_get( dev, &index, AdapNormCmdQueue, hw_fib, 1, fibptr, &nointr);
+
+ list_add_tail(&fibptr->queue, &q->pendingq);
+ q->numpending++;
+ *(q->headers.producer) = cpu_to_le32(index + 1);
+ spin_unlock_irqrestore(q->lock, qflags);
+ if (!(nointr & aac_config.irq_mod))
+ aac_adapter_notify(dev, AdapNormCmdQueue);
/*
* If the caller wanted us to wait for response wait now.
*/
if (wait) {
spin_unlock_irqrestore(&fibptr->event_lock, flags);
- down(&fibptr->event_wait);
+ /* Only set for first known interruptable command */
+ if (wait < 0) {
+ /*
+ * *VERY* Dangerous to time out a command, the
+ * assumption is made that we have no hope of
+ * functioning because an interrupt routing or other
+ * hardware failure has occurred.
+ */
+ unsigned long count = 36000000L; /* 3 minutes */
+ unsigned long qflags;
+ while (down_trylock(&fibptr->event_wait)) {
+ if (--count == 0) {
+ spin_lock_irqsave(q->lock, qflags);
+ q->numpending--;
+ list_del(&fibptr->queue);
+ spin_unlock_irqrestore(q->lock, qflags);
+ if (wait == -1) {
+ printk(KERN_ERR "aacraid: fib_send: first asynchronous command timed out.\n"
+ "Usually a result of a PCI interrupt routing problem;\n"
+ "update mother board BIOS or consider utilizing one of\n"
+ "the SAFE mode kernel options (acpi, apic etc)\n");
+ }
+ return -ETIMEDOUT;
+ }
+ udelay(5);
+ }
+ } else
+ down(&fibptr->event_wait);
if(fibptr->done == 0)
BUG();
@@ -622,15 +592,9 @@ void aac_consumer_free(struct aac_dev * dev, struct aac_queue *q, u32 qid)
case HostNormCmdQueue:
notify = HostNormCmdNotFull;
break;
- case HostHighCmdQueue:
- notify = HostHighCmdNotFull;
- break;
case HostNormRespQueue:
notify = HostNormRespNotFull;
break;
- case HostHighRespQueue:
- notify = HostHighRespNotFull;
- break;
default:
BUG();
return;
@@ -652,9 +616,13 @@ int fib_adapter_complete(struct fib * fibptr, unsigned short size)
{
struct hw_fib * hw_fib = fibptr->hw_fib;
struct aac_dev * dev = fibptr->dev;
+ struct aac_queue * q;
unsigned long nointr = 0;
- if (hw_fib->header.XferState == 0)
+ unsigned long qflags;
+
+ if (hw_fib->header.XferState == 0) {
return 0;
+ }
/*
* If we plan to do anything check the structure type first.
*/
@@ -669,37 +637,21 @@ int fib_adapter_complete(struct fib * fibptr, unsigned short size)
* send the completed cdb to the adapter.
*/
if (hw_fib->header.XferState & cpu_to_le32(SentFromAdapter)) {
+ u32 index;
hw_fib->header.XferState |= cpu_to_le32(HostProcessed);
- if (hw_fib->header.XferState & cpu_to_le32(HighPriority)) {
- u32 index;
- if (size)
- {
- size += sizeof(struct aac_fibhdr);
- if (size > le16_to_cpu(hw_fib->header.SenderSize))
- return -EMSGSIZE;
- hw_fib->header.Size = cpu_to_le16(size);
- }
- if(aac_queue_get(dev, &index, AdapHighRespQueue, hw_fib, 1, NULL, &nointr) < 0) {
- return -EWOULDBLOCK;
- }
- if (aac_insert_entry(dev, index, AdapHighRespQueue, (nointr & (int)aac_config.irq_mod)) != 0) {
- }
- } else if (hw_fib->header.XferState &
- cpu_to_le32(NormalPriority)) {
- u32 index;
-
- if (size) {
- size += sizeof(struct aac_fibhdr);
- if (size > le16_to_cpu(hw_fib->header.SenderSize))
- return -EMSGSIZE;
- hw_fib->header.Size = cpu_to_le16(size);
- }
- if (aac_queue_get(dev, &index, AdapNormRespQueue, hw_fib, 1, NULL, &nointr) < 0)
- return -EWOULDBLOCK;
- if (aac_insert_entry(dev, index, AdapNormRespQueue, (nointr & (int)aac_config.irq_mod)) != 0)
- {
- }
+ if (size) {
+ size += sizeof(struct aac_fibhdr);
+ if (size > le16_to_cpu(hw_fib->header.SenderSize))
+ return -EMSGSIZE;
+ hw_fib->header.Size = cpu_to_le16(size);
}
+ q = &dev->queues->queue[AdapNormRespQueue];
+ spin_lock_irqsave(q->lock, qflags);
+ aac_queue_get(dev, &index, AdapNormRespQueue, hw_fib, 1, NULL, &nointr);
+ *(q->headers.producer) = cpu_to_le32(index + 1);
+ spin_unlock_irqrestore(q->lock, qflags);
+ if (!(nointr & (int)aac_config.irq_mod))
+ aac_adapter_notify(dev, AdapNormRespQueue);
}
else
{
@@ -791,6 +743,268 @@ void aac_printf(struct aac_dev *dev, u32 val)
memset(cp, 0, 256);
}
+
+/**
+ * aac_handle_aif - Handle a message from the firmware
+ * @dev: Which adapter this fib is from
+ * @fibptr: Pointer to fibptr from adapter
+ *
+ * This routine handles a driver notify fib from the adapter and
+ * dispatches it to the appropriate routine for handling.
+ */
+
+static void aac_handle_aif(struct aac_dev * dev, struct fib * fibptr)
+{
+ struct hw_fib * hw_fib = fibptr->hw_fib;
+ struct aac_aifcmd * aifcmd = (struct aac_aifcmd *)hw_fib->data;
+ int busy;
+ u32 container;
+ struct scsi_device *device;
+ enum {
+ NOTHING,
+ DELETE,
+ ADD,
+ CHANGE
+ } device_config_needed;
+
+ /* Sniff for container changes */
+
+ if (!dev)
+ return;
+ container = (u32)-1;
+
+ /*
+ * We have set this up to try and minimize the number of
+ * re-configures that take place. As a result of this when
+ * certain AIF's come in we will set a flag waiting for another
+ * type of AIF before setting the re-config flag.
+ */
+ switch (le32_to_cpu(aifcmd->command)) {
+ case AifCmdDriverNotify:
+ switch (le32_to_cpu(((u32 *)aifcmd->data)[0])) {
+ /*
+ * Morph or Expand complete
+ */
+ case AifDenMorphComplete:
+ case AifDenVolumeExtendComplete:
+ container = le32_to_cpu(((u32 *)aifcmd->data)[1]);
+ if (container >= dev->maximum_num_containers)
+ break;
+
+ /*
+ * Find the Scsi_Device associated with the SCSI
+ * address. Make sure we have the right array, and if
+ * so set the flag to initiate a new re-config once we
+ * see an AifEnConfigChange AIF come through.
+ */
+
+ if ((dev != NULL) && (dev->scsi_host_ptr != NULL)) {
+ device = scsi_device_lookup(dev->scsi_host_ptr,
+ CONTAINER_TO_CHANNEL(container),
+ CONTAINER_TO_ID(container),
+ CONTAINER_TO_LUN(container));
+ if (device) {
+ dev->fsa_dev[container].config_needed = CHANGE;
+ dev->fsa_dev[container].config_waiting_on = AifEnConfigChange;
+ scsi_device_put(device);
+ }
+ }
+ }
+
+ /*
+ * If we are waiting on something and this happens to be
+ * that thing then set the re-configure flag.
+ */
+ if (container != (u32)-1) {
+ if (container >= dev->maximum_num_containers)
+ break;
+ if (dev->fsa_dev[container].config_waiting_on ==
+ le32_to_cpu(*(u32 *)aifcmd->data))
+ dev->fsa_dev[container].config_waiting_on = 0;
+ } else for (container = 0;
+ container < dev->maximum_num_containers; ++container) {
+ if (dev->fsa_dev[container].config_waiting_on ==
+ le32_to_cpu(*(u32 *)aifcmd->data))
+ dev->fsa_dev[container].config_waiting_on = 0;
+ }
+ break;
+
+ case AifCmdEventNotify:
+ switch (le32_to_cpu(((u32 *)aifcmd->data)[0])) {
+ /*
+ * Add an Array.
+ */
+ case AifEnAddContainer:
+ container = le32_to_cpu(((u32 *)aifcmd->data)[1]);
+ if (container >= dev->maximum_num_containers)
+ break;
+ dev->fsa_dev[container].config_needed = ADD;
+ dev->fsa_dev[container].config_waiting_on =
+ AifEnConfigChange;
+ break;
+
+ /*
+ * Delete an Array.
+ */
+ case AifEnDeleteContainer:
+ container = le32_to_cpu(((u32 *)aifcmd->data)[1]);
+ if (container >= dev->maximum_num_containers)
+ break;
+ dev->fsa_dev[container].config_needed = DELETE;
+ dev->fsa_dev[container].config_waiting_on =
+ AifEnConfigChange;
+ break;
+
+ /*
+ * Container change detected. If we currently are not
+ * waiting on something else, setup to wait on a Config Change.
+ */
+ case AifEnContainerChange:
+ container = le32_to_cpu(((u32 *)aifcmd->data)[1]);
+ if (container >= dev->maximum_num_containers)
+ break;
+ if (dev->fsa_dev[container].config_waiting_on)
+ break;
+ dev->fsa_dev[container].config_needed = CHANGE;
+ dev->fsa_dev[container].config_waiting_on =
+ AifEnConfigChange;
+ break;
+
+ case AifEnConfigChange:
+ break;
+
+ }
+
+ /*
+ * If we are waiting on something and this happens to be
+ * that thing then set the re-configure flag.
+ */
+ if (container != (u32)-1) {
+ if (container >= dev->maximum_num_containers)
+ break;
+ if (dev->fsa_dev[container].config_waiting_on ==
+ le32_to_cpu(*(u32 *)aifcmd->data))
+ dev->fsa_dev[container].config_waiting_on = 0;
+ } else for (container = 0;
+ container < dev->maximum_num_containers; ++container) {
+ if (dev->fsa_dev[container].config_waiting_on ==
+ le32_to_cpu(*(u32 *)aifcmd->data))
+ dev->fsa_dev[container].config_waiting_on = 0;
+ }
+ break;
+
+ case AifCmdJobProgress:
+ /*
+ * These are job progress AIF's. When a Clear is being
+ * done on a container it is initially created then hidden from
+ * the OS. When the clear completes we don't get a config
+ * change so we monitor the job status complete on a clear then
+ * wait for a container change.
+ */
+
+ if ((((u32 *)aifcmd->data)[1] == cpu_to_le32(AifJobCtrZero))
+ && ((((u32 *)aifcmd->data)[6] == ((u32 *)aifcmd->data)[5])
+ || (((u32 *)aifcmd->data)[4] == cpu_to_le32(AifJobStsSuccess)))) {
+ for (container = 0;
+ container < dev->maximum_num_containers;
+ ++container) {
+ /*
+ * Stomp on all config sequencing for all
+ * containers?
+ */
+ dev->fsa_dev[container].config_waiting_on =
+ AifEnContainerChange;
+ dev->fsa_dev[container].config_needed = ADD;
+ }
+ }
+ if ((((u32 *)aifcmd->data)[1] == cpu_to_le32(AifJobCtrZero))
+ && (((u32 *)aifcmd->data)[6] == 0)
+ && (((u32 *)aifcmd->data)[4] == cpu_to_le32(AifJobStsRunning))) {
+ for (container = 0;
+ container < dev->maximum_num_containers;
+ ++container) {
+ /*
+ * Stomp on all config sequencing for all
+ * containers?
+ */
+ dev->fsa_dev[container].config_waiting_on =
+ AifEnContainerChange;
+ dev->fsa_dev[container].config_needed = DELETE;
+ }
+ }
+ break;
+ }
+
+ device_config_needed = NOTHING;
+ for (container = 0; container < dev->maximum_num_containers;
+ ++container) {
+ if ((dev->fsa_dev[container].config_waiting_on == 0)
+ && (dev->fsa_dev[container].config_needed != NOTHING)) {
+ device_config_needed =
+ dev->fsa_dev[container].config_needed;
+ dev->fsa_dev[container].config_needed = NOTHING;
+ break;
+ }
+ }
+ if (device_config_needed == NOTHING)
+ return;
+
+ /*
+ * If we decided that a re-configuration needs to be done,
+ * schedule it here on the way out the door, please close the door
+ * behind you.
+ */
+
+ busy = 0;
+
+
+ /*
+ * Find the Scsi_Device associated with the SCSI address,
+ * and mark it as changed, invalidating the cache. This deals
+ * with changes to existing device IDs.
+ */
+
+ if (!dev || !dev->scsi_host_ptr)
+ return;
+ /*
+ * force reload of disk info via probe_container
+ */
+ if ((device_config_needed == CHANGE)
+ && (dev->fsa_dev[container].valid == 1))
+ dev->fsa_dev[container].valid = 2;
+ if ((device_config_needed == CHANGE) ||
+ (device_config_needed == ADD))
+ probe_container(dev, container);
+ device = scsi_device_lookup(dev->scsi_host_ptr,
+ CONTAINER_TO_CHANNEL(container),
+ CONTAINER_TO_ID(container),
+ CONTAINER_TO_LUN(container));
+ if (device) {
+ switch (device_config_needed) {
+ case DELETE:
+ scsi_remove_device(device);
+ break;
+ case CHANGE:
+ if (!dev->fsa_dev[container].valid) {
+ scsi_remove_device(device);
+ break;
+ }
+ scsi_rescan_device(&device->sdev_gendev);
+
+ default:
+ break;
+ }
+ scsi_device_put(device);
+ }
+ if (device_config_needed == ADD) {
+ scsi_add_device(dev->scsi_host_ptr,
+ CONTAINER_TO_CHANNEL(container),
+ CONTAINER_TO_ID(container),
+ CONTAINER_TO_LUN(container));
+ }
+
+}
+
/**
* aac_command_thread - command processing thread
* @dev: Adapter to monitor
@@ -805,7 +1019,6 @@ int aac_command_thread(struct aac_dev * dev)
{
struct hw_fib *hw_fib, *hw_newfib;
struct fib *fib, *newfib;
- struct aac_queue_block *queues = dev->queues;
struct aac_fib_context *fibctx;
unsigned long flags;
DECLARE_WAITQUEUE(wait, current);
@@ -825,21 +1038,22 @@ int aac_command_thread(struct aac_dev * dev)
* Let the DPC know it has a place to send the AIF's to.
*/
dev->aif_thread = 1;
- add_wait_queue(&queues->queue[HostNormCmdQueue].cmdready, &wait);
+ add_wait_queue(&dev->queues->queue[HostNormCmdQueue].cmdready, &wait);
set_current_state(TASK_INTERRUPTIBLE);
+ dprintk ((KERN_INFO "aac_command_thread start\n"));
while(1)
{
- spin_lock_irqsave(queues->queue[HostNormCmdQueue].lock, flags);
- while(!list_empty(&(queues->queue[HostNormCmdQueue].cmdq))) {
+ spin_lock_irqsave(dev->queues->queue[HostNormCmdQueue].lock, flags);
+ while(!list_empty(&(dev->queues->queue[HostNormCmdQueue].cmdq))) {
struct list_head *entry;
struct aac_aifcmd * aifcmd;
set_current_state(TASK_RUNNING);
-
- entry = queues->queue[HostNormCmdQueue].cmdq.next;
+
+ entry = dev->queues->queue[HostNormCmdQueue].cmdq.next;
list_del(entry);
-
- spin_unlock_irqrestore(queues->queue[HostNormCmdQueue].lock, flags);
+
+ spin_unlock_irqrestore(dev->queues->queue[HostNormCmdQueue].lock, flags);
fib = list_entry(entry, struct fib, fiblink);
/*
* We will process the FIB here or pass it to a
@@ -860,6 +1074,7 @@ int aac_command_thread(struct aac_dev * dev)
aifcmd = (struct aac_aifcmd *) hw_fib->data;
if (aifcmd->command == cpu_to_le32(AifCmdDriverNotify)) {
/* Handle Driver Notify Events */
+ aac_handle_aif(dev, fib);
*(__le32 *)hw_fib->data = cpu_to_le32(ST_OK);
fib_adapter_complete(fib, (u16)sizeof(u32));
} else {
@@ -869,9 +1084,62 @@ int aac_command_thread(struct aac_dev * dev)
u32 time_now, time_last;
unsigned long flagv;
-
+ unsigned num;
+ struct hw_fib ** hw_fib_pool, ** hw_fib_p;
+ struct fib ** fib_pool, ** fib_p;
+
+ /* Sniff events */
+ if ((aifcmd->command ==
+ cpu_to_le32(AifCmdEventNotify)) ||
+ (aifcmd->command ==
+ cpu_to_le32(AifCmdJobProgress))) {
+ aac_handle_aif(dev, fib);
+ }
+
time_now = jiffies/HZ;
+ /*
+ * Warning: no sleep allowed while
+ * holding spinlock. We take the estimate
+ * and pre-allocate a set of fibs outside the
+ * lock.
+ */
+ num = le32_to_cpu(dev->init->AdapterFibsSize)
+ / sizeof(struct hw_fib); /* some extra */
+ spin_lock_irqsave(&dev->fib_lock, flagv);
+ entry = dev->fib_list.next;
+ while (entry != &dev->fib_list) {
+ entry = entry->next;
+ ++num;
+ }
+ spin_unlock_irqrestore(&dev->fib_lock, flagv);
+ hw_fib_pool = NULL;
+ fib_pool = NULL;
+ if (num
+ && ((hw_fib_pool = kmalloc(sizeof(struct hw_fib *) * num, GFP_KERNEL)))
+ && ((fib_pool = kmalloc(sizeof(struct fib *) * num, GFP_KERNEL)))) {
+ hw_fib_p = hw_fib_pool;
+ fib_p = fib_pool;
+ while (hw_fib_p < &hw_fib_pool[num]) {
+ if (!(*(hw_fib_p++) = kmalloc(sizeof(struct hw_fib), GFP_KERNEL))) {
+ --hw_fib_p;
+ break;
+ }
+ if (!(*(fib_p++) = kmalloc(sizeof(struct fib), GFP_KERNEL))) {
+ kfree(*(--hw_fib_p));
+ break;
+ }
+ }
+ if ((num = hw_fib_p - hw_fib_pool) == 0) {
+ kfree(fib_pool);
+ fib_pool = NULL;
+ kfree(hw_fib_pool);
+ hw_fib_pool = NULL;
+ }
+ } else if (hw_fib_pool) {
+ kfree(hw_fib_pool);
+ hw_fib_pool = NULL;
+ }
spin_lock_irqsave(&dev->fib_lock, flagv);
entry = dev->fib_list.next;
/*
@@ -880,6 +1148,8 @@ int aac_command_thread(struct aac_dev * dev)
* fib, and then set the event to wake up the
* thread that is waiting for it.
*/
+ hw_fib_p = hw_fib_pool;
+ fib_p = fib_pool;
while (entry != &dev->fib_list) {
/*
* Extract the fibctx
@@ -912,9 +1182,11 @@ int aac_command_thread(struct aac_dev * dev)
* Warning: no sleep allowed while
* holding spinlock
*/
- hw_newfib = kmalloc(sizeof(struct hw_fib), GFP_ATOMIC);
- newfib = kmalloc(sizeof(struct fib), GFP_ATOMIC);
- if (newfib && hw_newfib) {
+ if (hw_fib_p < &hw_fib_pool[num]) {
+ hw_newfib = *hw_fib_p;
+ *(hw_fib_p++) = NULL;
+ newfib = *fib_p;
+ *(fib_p++) = NULL;
/*
* Make the copy of the FIB
*/
@@ -929,15 +1201,11 @@ int aac_command_thread(struct aac_dev * dev)
fibctx->count++;
/*
* Set the event to wake up the
- * thread that will waiting.
+ * thread that is waiting.
*/
up(&fibctx->wait_sem);
} else {
printk(KERN_WARNING "aifd: didn't allocate NewFib.\n");
- if(newfib)
- kfree(newfib);
- if(hw_newfib)
- kfree(hw_newfib);
}
entry = entry->next;
}
@@ -947,21 +1215,38 @@ int aac_command_thread(struct aac_dev * dev)
*(__le32 *)hw_fib->data = cpu_to_le32(ST_OK);
fib_adapter_complete(fib, sizeof(u32));
spin_unlock_irqrestore(&dev->fib_lock, flagv);
+ /* Free up the remaining resources */
+ hw_fib_p = hw_fib_pool;
+ fib_p = fib_pool;
+ while (hw_fib_p < &hw_fib_pool[num]) {
+ if (*hw_fib_p)
+ kfree(*hw_fib_p);
+ if (*fib_p)
+ kfree(*fib_p);
+ ++fib_p;
+ ++hw_fib_p;
+ }
+ if (hw_fib_pool)
+ kfree(hw_fib_pool);
+ if (fib_pool)
+ kfree(fib_pool);
}
- spin_lock_irqsave(queues->queue[HostNormCmdQueue].lock, flags);
kfree(fib);
+ spin_lock_irqsave(dev->queues->queue[HostNormCmdQueue].lock, flags);
}
/*
* There are no more AIF's
*/
- spin_unlock_irqrestore(queues->queue[HostNormCmdQueue].lock, flags);
+ spin_unlock_irqrestore(dev->queues->queue[HostNormCmdQueue].lock, flags);
schedule();
if(signal_pending(current))
break;
set_current_state(TASK_INTERRUPTIBLE);
}
- remove_wait_queue(&queues->queue[HostNormCmdQueue].cmdready, &wait);
+ if (dev->queues)
+ remove_wait_queue(&dev->queues->queue[HostNormCmdQueue].cmdready, &wait);
dev->aif_thread = 0;
complete_and_exit(&dev->aif_completion, 0);
+ return 0;
}
diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c
index 4ff29d7f582..de8490a9283 100644
--- a/drivers/scsi/aacraid/linit.c
+++ b/drivers/scsi/aacraid/linit.c
@@ -748,7 +748,8 @@ static int __devinit aac_probe_one(struct pci_dev *pdev,
unique_id++;
}
- if (pci_enable_device(pdev))
+ error = pci_enable_device(pdev);
+ if (error)
goto out;
if (pci_set_dma_mask(pdev, 0xFFFFFFFFULL) ||
@@ -772,6 +773,7 @@ static int __devinit aac_probe_one(struct pci_dev *pdev,
shost->irq = pdev->irq;
shost->base = pci_resource_start(pdev, 0);
shost->unique_id = unique_id;
+ shost->max_cmd_len = 16;
aac = (struct aac_dev *)shost->hostdata;
aac->scsi_host_ptr = shost;
@@ -799,7 +801,9 @@ static int __devinit aac_probe_one(struct pci_dev *pdev,
goto out_free_fibs;
aac->maximum_num_channels = aac_drivers[index].channels;
- aac_get_adapter_info(aac);
+ error = aac_get_adapter_info(aac);
+ if (error < 0)
+ goto out_deinit;
/*
* Lets override negotiations and drop the maximum SG limit to 34
@@ -927,8 +931,8 @@ static int __init aac_init(void)
printk(KERN_INFO "Adaptec %s driver (%s)\n",
AAC_DRIVERNAME, aac_driver_version);
- error = pci_module_init(&aac_pci_driver);
- if (error)
+ error = pci_register_driver(&aac_pci_driver);
+ if (error < 0)
return error;
aac_cfg_major = register_chrdev( 0, "aac", &aac_cfg_fops);
diff --git a/drivers/scsi/aic7xxx/aic7770_osm.c b/drivers/scsi/aic7xxx/aic7770_osm.c
index 70c5fb59c9e..d754b326786 100644
--- a/drivers/scsi/aic7xxx/aic7770_osm.c
+++ b/drivers/scsi/aic7xxx/aic7770_osm.c
@@ -112,6 +112,9 @@ aic7770_remove(struct device *dev)
struct ahc_softc *ahc = dev_get_drvdata(dev);
u_long s;
+ if (ahc->platform_data && ahc->platform_data->host)
+ scsi_remove_host(ahc->platform_data->host);
+
ahc_lock(ahc, &s);
ahc_intr_enable(ahc, FALSE);
ahc_unlock(ahc, &s);
diff --git a/drivers/scsi/aic7xxx/aic79xx_osm.c b/drivers/scsi/aic7xxx/aic79xx_osm.c
index 6b6d4e28779..95c285cc83e 100644
--- a/drivers/scsi/aic7xxx/aic79xx_osm.c
+++ b/drivers/scsi/aic7xxx/aic79xx_osm.c
@@ -1192,11 +1192,6 @@ ahd_platform_free(struct ahd_softc *ahd)
int i, j;
if (ahd->platform_data != NULL) {
- if (ahd->platform_data->host != NULL) {
- scsi_remove_host(ahd->platform_data->host);
- scsi_host_put(ahd->platform_data->host);
- }
-
/* destroy all of the device and target objects */
for (i = 0; i < AHD_NUM_TARGETS; i++) {
starget = ahd->platform_data->starget[i];
@@ -1226,6 +1221,9 @@ ahd_platform_free(struct ahd_softc *ahd)
release_mem_region(ahd->platform_data->mem_busaddr,
0x1000);
}
+ if (ahd->platform_data->host)
+ scsi_host_put(ahd->platform_data->host);
+
free(ahd->platform_data, M_DEVBUF);
}
}
diff --git a/drivers/scsi/aic7xxx/aic79xx_osm_pci.c b/drivers/scsi/aic7xxx/aic79xx_osm_pci.c
index 390b53852d4..bf360ae021a 100644
--- a/drivers/scsi/aic7xxx/aic79xx_osm_pci.c
+++ b/drivers/scsi/aic7xxx/aic79xx_osm_pci.c
@@ -95,6 +95,9 @@ ahd_linux_pci_dev_remove(struct pci_dev *pdev)
struct ahd_softc *ahd = pci_get_drvdata(pdev);
u_long s;
+ if (ahd->platform_data && ahd->platform_data->host)
+ scsi_remove_host(ahd->platform_data->host);
+
ahd_lock(ahd, &s);
ahd_intr_enable(ahd, FALSE);
ahd_unlock(ahd, &s);
diff --git a/drivers/scsi/aic7xxx/aic7xxx_osm.c b/drivers/scsi/aic7xxx/aic7xxx_osm.c
index c932b3b9449..6ee1435d37f 100644
--- a/drivers/scsi/aic7xxx/aic7xxx_osm.c
+++ b/drivers/scsi/aic7xxx/aic7xxx_osm.c
@@ -1109,15 +1109,6 @@ ahc_linux_register_host(struct ahc_softc *ahc, struct scsi_host_template *templa
return (0);
}
-uint64_t
-ahc_linux_get_memsize(void)
-{
- struct sysinfo si;
-
- si_meminfo(&si);
- return ((uint64_t)si.totalram << PAGE_SHIFT);
-}
-
/*
* Place the SCSI bus into a known state by either resetting it,
* or forcing transfer negotiations on the next command to any
@@ -1218,11 +1209,6 @@ ahc_platform_free(struct ahc_softc *ahc)
int i, j;
if (ahc->platform_data != NULL) {
- if (ahc->platform_data->host != NULL) {
- scsi_remove_host(ahc->platform_data->host);
- scsi_host_put(ahc->platform_data->host);
- }
-
/* destroy all of the device and target objects */
for (i = 0; i < AHC_NUM_TARGETS; i++) {
starget = ahc->platform_data->starget[i];
@@ -1251,6 +1237,9 @@ ahc_platform_free(struct ahc_softc *ahc)
0x1000);
}
+ if (ahc->platform_data->host)
+ scsi_host_put(ahc->platform_data->host);
+
free(ahc->platform_data, M_DEVBUF);
}
}
diff --git a/drivers/scsi/aic7xxx/aic7xxx_osm.h b/drivers/scsi/aic7xxx/aic7xxx_osm.h
index c5299626924..be9edbe26db 100644
--- a/drivers/scsi/aic7xxx/aic7xxx_osm.h
+++ b/drivers/scsi/aic7xxx/aic7xxx_osm.h
@@ -494,8 +494,6 @@ ahc_insb(struct ahc_softc * ahc, long port, uint8_t *array, int count)
int ahc_linux_register_host(struct ahc_softc *,
struct scsi_host_template *);
-uint64_t ahc_linux_get_memsize(void);
-
/*************************** Pretty Printing **********************************/
struct info_str {
char *buffer;
diff --git a/drivers/scsi/aic7xxx/aic7xxx_osm_pci.c b/drivers/scsi/aic7xxx/aic7xxx_osm_pci.c
index 0d44a6907dd..cb30d9c1153 100644
--- a/drivers/scsi/aic7xxx/aic7xxx_osm_pci.c
+++ b/drivers/scsi/aic7xxx/aic7xxx_osm_pci.c
@@ -143,6 +143,9 @@ ahc_linux_pci_dev_remove(struct pci_dev *pdev)
struct ahc_softc *ahc = pci_get_drvdata(pdev);
u_long s;
+ if (ahc->platform_data && ahc->platform_data->host)
+ scsi_remove_host(ahc->platform_data->host);
+
ahc_lock(ahc, &s);
ahc_intr_enable(ahc, FALSE);
ahc_unlock(ahc, &s);
@@ -180,6 +183,7 @@ ahc_linux_pci_dev_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
struct ahc_pci_identity *entry;
char *name;
int error;
+ struct device *dev = &pdev->dev;
pci = pdev;
entry = ahc_find_pci_device(pci);
@@ -209,11 +213,12 @@ ahc_linux_pci_dev_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
pci_set_master(pdev);
if (sizeof(dma_addr_t) > 4
- && ahc_linux_get_memsize() > 0x80000000
- && pci_set_dma_mask(pdev, mask_39bit) == 0) {
+ && ahc->features & AHC_LARGE_SCBS
+ && dma_set_mask(dev, mask_39bit) == 0
+ && dma_get_required_mask(dev) > DMA_32BIT_MASK) {
ahc->flags |= AHC_39BIT_ADDRESSING;
} else {
- if (pci_set_dma_mask(pdev, DMA_32BIT_MASK)) {
+ if (dma_set_mask(dev, DMA_32BIT_MASK)) {
printk(KERN_WARNING "aic7xxx: No suitable DMA available.\n");
return (-ENODEV);
}
diff --git a/drivers/scsi/ata_piix.c b/drivers/scsi/ata_piix.c
index 87e0c36f155..d71cef767ce 100644
--- a/drivers/scsi/ata_piix.c
+++ b/drivers/scsi/ata_piix.c
@@ -442,7 +442,6 @@ static void piix_sata_phy_reset(struct ata_port *ap)
* piix_set_piomode - Initialize host controller PATA PIO timings
* @ap: Port whose timings we are configuring
* @adev: um
- * @pio: PIO mode, 0 - 4
*
* Set PIO mode for device, in host controller PCI config space.
*
diff --git a/drivers/scsi/atp870u.c b/drivers/scsi/atp870u.c
index e6153fe5842..a8cfbef304b 100644
--- a/drivers/scsi/atp870u.c
+++ b/drivers/scsi/atp870u.c
@@ -996,6 +996,7 @@ oktosend:
#ifdef ED_DBGP
printk("send_s870: prdaddr_2 0x%8x tmpcip %x target_id %d\n", dev->id[c][target_id].prdaddr,tmpcip,target_id);
#endif
+ dev->id[c][target_id].prdaddr = dev->id[c][target_id].prd_bus;
outl(dev->id[c][target_id].prdaddr, tmpcip);
tmpcip = tmpcip - 2;
outb(0x06, tmpcip);
@@ -2572,7 +2573,7 @@ static void atp870u_free_tables(struct Scsi_Host *host)
for (k = 0; k < 16; k++) {
if (!atp_dev->id[j][k].prd_table)
continue;
- pci_free_consistent(atp_dev->pdev, 1024, atp_dev->id[j][k].prd_table, atp_dev->id[j][k].prdaddr);
+ pci_free_consistent(atp_dev->pdev, 1024, atp_dev->id[j][k].prd_table, atp_dev->id[j][k].prd_bus);
atp_dev->id[j][k].prd_table = NULL;
}
}
@@ -2584,12 +2585,13 @@ static int atp870u_init_tables(struct Scsi_Host *host)
int c,k;
for(c=0;c < 2;c++) {
for(k=0;k<16;k++) {
- atp_dev->id[c][k].prd_table = pci_alloc_consistent(atp_dev->pdev, 1024, &(atp_dev->id[c][k].prdaddr));
+ atp_dev->id[c][k].prd_table = pci_alloc_consistent(atp_dev->pdev, 1024, &(atp_dev->id[c][k].prd_bus));
if (!atp_dev->id[c][k].prd_table) {
printk("atp870u_init_tables fail\n");
atp870u_free_tables(host);
return -ENOMEM;
}
+ atp_dev->id[c][k].prdaddr = atp_dev->id[c][k].prd_bus;
atp_dev->id[c][k].devsp=0x20;
atp_dev->id[c][k].devtype = 0x7f;
atp_dev->id[c][k].curr_req = NULL;
diff --git a/drivers/scsi/atp870u.h b/drivers/scsi/atp870u.h
index 89f43af39cf..62bae64a01c 100644
--- a/drivers/scsi/atp870u.h
+++ b/drivers/scsi/atp870u.h
@@ -54,8 +54,9 @@ struct atp_unit
unsigned long tran_len;
unsigned long last_len;
unsigned char *prd_pos;
- unsigned char *prd_table;
- dma_addr_t prdaddr;
+ unsigned char *prd_table; /* Kernel address of PRD table */
+ dma_addr_t prd_bus; /* Bus address of PRD */
+ dma_addr_t prdaddr; /* Dynamically updated in driver */
struct scsi_cmnd *curr_req;
} id[2][16];
struct Scsi_Host *host;
diff --git a/drivers/scsi/fd_mcs.c b/drivers/scsi/fd_mcs.c
index fa652f8aa64..d59d449a9e4 100644
--- a/drivers/scsi/fd_mcs.c
+++ b/drivers/scsi/fd_mcs.c
@@ -1360,3 +1360,5 @@ static Scsi_Host_Template driver_template = {
.use_clustering = DISABLE_CLUSTERING,
};
#include "scsi_module.c"
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c
index 85503fad789..02fe371b0ab 100644
--- a/drivers/scsi/hosts.c
+++ b/drivers/scsi/hosts.c
@@ -98,6 +98,7 @@ int scsi_host_set_state(struct Scsi_Host *shost, enum scsi_host_state state)
switch (oldstate) {
case SHOST_CREATED:
case SHOST_RUNNING:
+ case SHOST_CANCEL_RECOVERY:
break;
default:
goto illegal;
@@ -107,12 +108,31 @@ int scsi_host_set_state(struct Scsi_Host *shost, enum scsi_host_state state)
case SHOST_DEL:
switch (oldstate) {
case SHOST_CANCEL:
+ case SHOST_DEL_RECOVERY:
break;
default:
goto illegal;
}
break;
+ case SHOST_CANCEL_RECOVERY:
+ switch (oldstate) {
+ case SHOST_CANCEL:
+ case SHOST_RECOVERY:
+ break;
+ default:
+ goto illegal;
+ }
+ break;
+
+ case SHOST_DEL_RECOVERY:
+ switch (oldstate) {
+ case SHOST_CANCEL_RECOVERY:
+ break;
+ default:
+ goto illegal;
+ }
+ break;
}
shost->shost_state = state;
return 0;
@@ -134,17 +154,29 @@ EXPORT_SYMBOL(scsi_host_set_state);
**/
void scsi_remove_host(struct Scsi_Host *shost)
{
+ unsigned long flags;
down(&shost->scan_mutex);
- scsi_host_set_state(shost, SHOST_CANCEL);
+ spin_lock_irqsave(shost->host_lock, flags);
+ if (scsi_host_set_state(shost, SHOST_CANCEL))
+ if (scsi_host_set_state(shost, SHOST_CANCEL_RECOVERY)) {
+ spin_unlock_irqrestore(shost->host_lock, flags);
+ up(&shost->scan_mutex);
+ return;
+ }
+ spin_unlock_irqrestore(shost->host_lock, flags);
up(&shost->scan_mutex);
scsi_forget_host(shost);
scsi_proc_host_rm(shost);
- scsi_host_set_state(shost, SHOST_DEL);
+ spin_lock_irqsave(shost->host_lock, flags);
+ if (scsi_host_set_state(shost, SHOST_DEL))
+ BUG_ON(scsi_host_set_state(shost, SHOST_DEL_RECOVERY));
+ spin_unlock_irqrestore(shost->host_lock, flags);
transport_unregister_device(&shost->shost_gendev);
class_device_unregister(&shost->shost_classdev);
device_del(&shost->shost_gendev);
+ scsi_proc_hostdir_rm(shost->hostt);
}
EXPORT_SYMBOL(scsi_remove_host);
@@ -231,7 +263,6 @@ static void scsi_host_dev_release(struct device *dev)
if (shost->work_q)
destroy_workqueue(shost->work_q);
- scsi_proc_hostdir_rm(shost->hostt);
scsi_destroy_command_freelist(shost);
kfree(shost->shost_data);
diff --git a/drivers/scsi/ibmmca.c b/drivers/scsi/ibmmca.c
index 6e54c7d9b33..19392f65127 100644
--- a/drivers/scsi/ibmmca.c
+++ b/drivers/scsi/ibmmca.c
@@ -460,6 +460,8 @@ MODULE_PARM(adisplay, "1i");
MODULE_PARM(normal, "1i");
MODULE_PARM(ansi, "1i");
#endif
+
+MODULE_LICENSE("GPL");
#endif
/*counter of concurrent disk read/writes, to turn on/off disk led */
static int disk_rw_in_progress = 0;
diff --git a/drivers/scsi/ibmvscsi/ibmvscsi.c b/drivers/scsi/ibmvscsi/ibmvscsi.c
index 5b14934ba86..ff25210b00b 100644
--- a/drivers/scsi/ibmvscsi/ibmvscsi.c
+++ b/drivers/scsi/ibmvscsi/ibmvscsi.c
@@ -727,6 +727,16 @@ static void adapter_info_rsp(struct srp_event_struct *evt_struct)
if (hostdata->madapter_info.port_max_txu[0])
hostdata->host->max_sectors =
hostdata->madapter_info.port_max_txu[0] >> 9;
+
+ if (hostdata->madapter_info.os_type == 3 &&
+ strcmp(hostdata->madapter_info.srp_version, "1.6a") <= 0) {
+ printk("ibmvscsi: host (Ver. %s) doesn't support large"
+ "transfers\n",
+ hostdata->madapter_info.srp_version);
+ printk("ibmvscsi: limiting scatterlists to %d\n",
+ MAX_INDIRECT_BUFS);
+ hostdata->host->sg_tablesize = MAX_INDIRECT_BUFS;
+ }
}
}
diff --git a/drivers/scsi/libata-core.c b/drivers/scsi/libata-core.c
index 5cc53cd9323..e5b01997117 100644
--- a/drivers/scsi/libata-core.c
+++ b/drivers/scsi/libata-core.c
@@ -2465,9 +2465,12 @@ static unsigned long ata_pio_poll(struct ata_port *ap)
*
* LOCKING:
* None. (executing in kernel thread context)
+ *
+ * RETURNS:
+ * Non-zero if qc completed, zero otherwise.
*/
-static void ata_pio_complete (struct ata_port *ap)
+static int ata_pio_complete (struct ata_port *ap)
{
struct ata_queued_cmd *qc;
u8 drv_stat;
@@ -2486,14 +2489,14 @@ static void ata_pio_complete (struct ata_port *ap)
if (drv_stat & (ATA_BUSY | ATA_DRQ)) {
ap->pio_task_state = PIO_ST_LAST_POLL;
ap->pio_task_timeout = jiffies + ATA_TMOUT_PIO;
- return;
+ return 0;
}
}
drv_stat = ata_wait_idle(ap);
if (!ata_ok(drv_stat)) {
ap->pio_task_state = PIO_ST_ERR;
- return;
+ return 0;
}
qc = ata_qc_from_tag(ap, ap->active_tag);
@@ -2502,6 +2505,10 @@ static void ata_pio_complete (struct ata_port *ap)
ap->pio_task_state = PIO_ST_IDLE;
ata_poll_qc_complete(qc, drv_stat);
+
+ /* another command may start at this point */
+
+ return 1;
}
@@ -2709,7 +2716,7 @@ static void __atapi_pio_bytes(struct ata_queued_cmd *qc, unsigned int bytes)
next_sg:
if (unlikely(qc->cursg >= qc->n_elem)) {
- /*
+ /*
* The end of qc->sg is reached and the device expects
* more data to transfer. In order not to overrun qc->sg
* and fulfill length specified in the byte count register,
@@ -2721,7 +2728,7 @@ next_sg:
unsigned int i;
if (words) /* warning if bytes > 1 */
- printk(KERN_WARNING "ata%u: %u bytes trailing data\n",
+ printk(KERN_WARNING "ata%u: %u bytes trailing data\n",
ap->id, bytes);
for (i = 0; i < words; i++)
@@ -2849,9 +2856,7 @@ static void ata_pio_block(struct ata_port *ap)
if (is_atapi_taskfile(&qc->tf)) {
/* no more data to transfer or unsupported ATAPI command */
if ((status & ATA_DRQ) == 0) {
- ap->pio_task_state = PIO_ST_IDLE;
-
- ata_poll_qc_complete(qc, status);
+ ap->pio_task_state = PIO_ST_LAST;
return;
}
@@ -2887,7 +2892,12 @@ static void ata_pio_error(struct ata_port *ap)
static void ata_pio_task(void *_data)
{
struct ata_port *ap = _data;
- unsigned long timeout = 0;
+ unsigned long timeout;
+ int qc_completed;
+
+fsm_start:
+ timeout = 0;
+ qc_completed = 0;
switch (ap->pio_task_state) {
case PIO_ST_IDLE:
@@ -2898,7 +2908,7 @@ static void ata_pio_task(void *_data)
break;
case PIO_ST_LAST:
- ata_pio_complete(ap);
+ qc_completed = ata_pio_complete(ap);
break;
case PIO_ST_POLL:
@@ -2913,10 +2923,9 @@ static void ata_pio_task(void *_data)
}
if (timeout)
- queue_delayed_work(ata_wq, &ap->pio_task,
- timeout);
- else
- queue_work(ata_wq, &ap->pio_task);
+ queue_delayed_work(ata_wq, &ap->pio_task, timeout);
+ else if (!qc_completed)
+ goto fsm_start;
}
static void atapi_request_sense(struct ata_port *ap, struct ata_device *dev,
@@ -4123,6 +4132,53 @@ err_out:
}
/**
+ * ata_host_set_remove - PCI layer callback for device removal
+ * @host_set: ATA host set that was removed
+ *
+ * Unregister all objects associated with this host set. Free those
+ * objects.
+ *
+ * LOCKING:
+ * Inherited from calling layer (may sleep).
+ */
+
+
+void ata_host_set_remove(struct ata_host_set *host_set)
+{
+ struct ata_port *ap;
+ unsigned int i;
+
+ for (i = 0; i < host_set->n_ports; i++) {
+ ap = host_set->ports[i];
+ scsi_remove_host(ap->host);
+ }
+
+ free_irq(host_set->irq, host_set);
+
+ for (i = 0; i < host_set->n_ports; i++) {
+ ap = host_set->ports[i];
+
+ ata_scsi_release(ap->host);
+
+ if ((ap->flags & ATA_FLAG_NO_LEGACY) == 0) {
+ struct ata_ioports *ioaddr = &ap->ioaddr;
+
+ if (ioaddr->cmd_addr == 0x1f0)
+ release_region(0x1f0, 8);
+ else if (ioaddr->cmd_addr == 0x170)
+ release_region(0x170, 8);
+ }
+
+ scsi_host_put(ap->host);
+ }
+
+ if (host_set->ops->host_stop)
+ host_set->ops->host_stop(host_set);
+
+ kfree(host_set);
+}
+
+/**
* ata_scsi_release - SCSI layer callback hook for host unload
* @host: libata host to be unloaded
*
@@ -4462,39 +4518,8 @@ void ata_pci_remove_one (struct pci_dev *pdev)
{
struct device *dev = pci_dev_to_dev(pdev);
struct ata_host_set *host_set = dev_get_drvdata(dev);
- struct ata_port *ap;
- unsigned int i;
-
- for (i = 0; i < host_set->n_ports; i++) {
- ap = host_set->ports[i];
-
- scsi_remove_host(ap->host);
- }
-
- free_irq(host_set->irq, host_set);
-
- for (i = 0; i < host_set->n_ports; i++) {
- ap = host_set->ports[i];
-
- ata_scsi_release(ap->host);
-
- if ((ap->flags & ATA_FLAG_NO_LEGACY) == 0) {
- struct ata_ioports *ioaddr = &ap->ioaddr;
-
- if (ioaddr->cmd_addr == 0x1f0)
- release_region(0x1f0, 8);
- else if (ioaddr->cmd_addr == 0x170)
- release_region(0x170, 8);
- }
-
- scsi_host_put(ap->host);
- }
-
- if (host_set->ops->host_stop)
- host_set->ops->host_stop(host_set);
-
- kfree(host_set);
+ ata_host_set_remove(host_set);
pci_release_regions(pdev);
pci_disable_device(pdev);
dev_set_drvdata(dev, NULL);
@@ -4564,6 +4589,7 @@ module_exit(ata_exit);
EXPORT_SYMBOL_GPL(ata_std_bios_param);
EXPORT_SYMBOL_GPL(ata_std_ports);
EXPORT_SYMBOL_GPL(ata_device_add);
+EXPORT_SYMBOL_GPL(ata_host_set_remove);
EXPORT_SYMBOL_GPL(ata_sg_init);
EXPORT_SYMBOL_GPL(ata_sg_init_one);
EXPORT_SYMBOL_GPL(ata_qc_complete);
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c
index 86eaf6d408d..acae7c48ef7 100644
--- a/drivers/scsi/lpfc/lpfc_attr.c
+++ b/drivers/scsi/lpfc/lpfc_attr.c
@@ -973,10 +973,10 @@ lpfc_get_host_fabric_name (struct Scsi_Host *shost)
if ((phba->fc_flag & FC_FABRIC) ||
((phba->fc_topology == TOPOLOGY_LOOP) &&
(phba->fc_flag & FC_PUBLIC_LOOP)))
- node_name = wwn_to_u64(phba->fc_fabparam.nodeName.wwn);
+ node_name = wwn_to_u64(phba->fc_fabparam.nodeName.u.wwn);
else
/* fabric is local port if there is no F/FL_Port */
- node_name = wwn_to_u64(phba->fc_nodename.wwn);
+ node_name = wwn_to_u64(phba->fc_nodename.u.wwn);
spin_unlock_irq(shost->host_lock);
@@ -1110,7 +1110,7 @@ lpfc_get_starget_node_name(struct scsi_target *starget)
/* Search the mapped list for this target ID */
list_for_each_entry(ndlp, &phba->fc_nlpmap_list, nlp_listp) {
if (starget->id == ndlp->nlp_sid) {
- node_name = wwn_to_u64(ndlp->nlp_nodename.wwn);
+ node_name = wwn_to_u64(ndlp->nlp_nodename.u.wwn);
break;
}
}
@@ -1131,7 +1131,7 @@ lpfc_get_starget_port_name(struct scsi_target *starget)
/* Search the mapped list for this target ID */
list_for_each_entry(ndlp, &phba->fc_nlpmap_list, nlp_listp) {
if (starget->id == ndlp->nlp_sid) {
- port_name = wwn_to_u64(ndlp->nlp_portname.wwn);
+ port_name = wwn_to_u64(ndlp->nlp_portname.u.wwn);
break;
}
}
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
index 4fb8eb0c84c..56052f4510c 100644
--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
+++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
@@ -1019,8 +1019,8 @@ lpfc_register_remote_port(struct lpfc_hba * phba,
struct fc_rport_identifiers rport_ids;
/* Remote port has reappeared. Re-register w/ FC transport */
- rport_ids.node_name = wwn_to_u64(ndlp->nlp_nodename.wwn);
- rport_ids.port_name = wwn_to_u64(ndlp->nlp_portname.wwn);
+ rport_ids.node_name = wwn_to_u64(ndlp->nlp_nodename.u.wwn);
+ rport_ids.port_name = wwn_to_u64(ndlp->nlp_portname.u.wwn);
rport_ids.port_id = ndlp->nlp_DID;
rport_ids.roles = FC_RPORT_ROLE_UNKNOWN;
if (ndlp->nlp_type & NLP_FCP_TARGET)
diff --git a/drivers/scsi/lpfc/lpfc_hw.h b/drivers/scsi/lpfc/lpfc_hw.h
index 047a87c26cc..86c41981188 100644
--- a/drivers/scsi/lpfc/lpfc_hw.h
+++ b/drivers/scsi/lpfc/lpfc_hw.h
@@ -280,9 +280,9 @@ struct lpfc_name {
#define NAME_CCITT_GR_TYPE 0xE
uint8_t IEEEextLsb; /* FC Word 0, bit 16:23, IEEE extended Lsb */
uint8_t IEEE[6]; /* FC IEEE address */
- };
+ } s;
uint8_t wwn[8];
- };
+ } u;
};
struct csp {
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index 454058f655d..0856ff7d3b3 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -285,7 +285,7 @@ lpfc_config_port_post(struct lpfc_hba * phba)
if (phba->SerialNumber[0] == 0) {
uint8_t *outptr;
- outptr = (uint8_t *) & phba->fc_nodename.IEEE[0];
+ outptr = &phba->fc_nodename.u.s.IEEE[0];
for (i = 0; i < 12; i++) {
status = *outptr++;
j = ((status & 0xf0) >> 4);
@@ -1523,8 +1523,8 @@ lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid)
* Must done after lpfc_sli_hba_setup()
*/
- fc_host_node_name(host) = wwn_to_u64(phba->fc_nodename.wwn);
- fc_host_port_name(host) = wwn_to_u64(phba->fc_portname.wwn);
+ fc_host_node_name(host) = wwn_to_u64(phba->fc_nodename.u.wwn);
+ fc_host_port_name(host) = wwn_to_u64(phba->fc_portname.u.wwn);
fc_host_supported_classes(host) = FC_COS_CLASS3;
memset(fc_host_supported_fc4s(host), 0,
diff --git a/drivers/scsi/megaraid.c b/drivers/scsi/megaraid.c
index 6f308ebe3e7..61a6fd810bb 100644
--- a/drivers/scsi/megaraid.c
+++ b/drivers/scsi/megaraid.c
@@ -621,8 +621,6 @@ mega_build_cmd(adapter_t *adapter, Scsi_Cmnd *cmd, int *busy)
if(islogical) {
switch (cmd->cmnd[0]) {
case TEST_UNIT_READY:
- memset(cmd->request_buffer, 0, cmd->request_bufflen);
-
#if MEGA_HAVE_CLUSTERING
/*
* Do we support clustering and is the support enabled
@@ -652,11 +650,28 @@ mega_build_cmd(adapter_t *adapter, Scsi_Cmnd *cmd, int *busy)
return NULL;
#endif
- case MODE_SENSE:
+ case MODE_SENSE: {
+ char *buf;
+
+ if (cmd->use_sg) {
+ struct scatterlist *sg;
+
+ sg = (struct scatterlist *)cmd->request_buffer;
+ buf = kmap_atomic(sg->page, KM_IRQ0) +
+ sg->offset;
+ } else
+ buf = cmd->request_buffer;
memset(cmd->request_buffer, 0, cmd->cmnd[4]);
+ if (cmd->use_sg) {
+ struct scatterlist *sg;
+
+ sg = (struct scatterlist *)cmd->request_buffer;
+ kunmap_atomic(buf - sg->offset, KM_IRQ0);
+ }
cmd->result = (DID_OK << 16);
cmd->scsi_done(cmd);
return NULL;
+ }
case READ_CAPACITY:
case INQUIRY:
@@ -1685,14 +1700,23 @@ mega_rundoneq (adapter_t *adapter)
static void
mega_free_scb(adapter_t *adapter, scb_t *scb)
{
+ unsigned long length;
+
switch( scb->dma_type ) {
case MEGA_DMA_TYPE_NONE:
break;
case MEGA_BULK_DATA:
+ if (scb->cmd->use_sg == 0)
+ length = scb->cmd->request_bufflen;
+ else {
+ struct scatterlist *sgl =
+ (struct scatterlist *)scb->cmd->request_buffer;
+ length = sgl->length;
+ }
pci_unmap_page(adapter->dev, scb->dma_h_bulkdata,
- scb->cmd->request_bufflen, scb->dma_direction);
+ length, scb->dma_direction);
break;
case MEGA_SGLIST:
@@ -1741,6 +1765,7 @@ mega_build_sglist(adapter_t *adapter, scb_t *scb, u32 *buf, u32 *len)
struct scatterlist *sgl;
struct page *page;
unsigned long offset;
+ unsigned int length;
Scsi_Cmnd *cmd;
int sgcnt;
int idx;
@@ -1748,14 +1773,23 @@ mega_build_sglist(adapter_t *adapter, scb_t *scb, u32 *buf, u32 *len)
cmd = scb->cmd;
/* Scatter-gather not used */
- if( !cmd->use_sg ) {
-
- page = virt_to_page(cmd->request_buffer);
- offset = offset_in_page(cmd->request_buffer);
+ if( cmd->use_sg == 0 || (cmd->use_sg == 1 &&
+ !adapter->has_64bit_addr)) {
+
+ if (cmd->use_sg == 0) {
+ page = virt_to_page(cmd->request_buffer);
+ offset = offset_in_page(cmd->request_buffer);
+ length = cmd->request_bufflen;
+ } else {
+ sgl = (struct scatterlist *)cmd->request_buffer;
+ page = sgl->page;
+ offset = sgl->offset;
+ length = sgl->length;
+ }
scb->dma_h_bulkdata = pci_map_page(adapter->dev,
page, offset,
- cmd->request_bufflen,
+ length,
scb->dma_direction);
scb->dma_type = MEGA_BULK_DATA;
@@ -1765,14 +1799,14 @@ mega_build_sglist(adapter_t *adapter, scb_t *scb, u32 *buf, u32 *len)
*/
if( adapter->has_64bit_addr ) {
scb->sgl64[0].address = scb->dma_h_bulkdata;
- scb->sgl64[0].length = cmd->request_bufflen;
+ scb->sgl64[0].length = length;
*buf = (u32)scb->sgl_dma_addr;
- *len = (u32)cmd->request_bufflen;
+ *len = (u32)length;
return 1;
}
else {
*buf = (u32)scb->dma_h_bulkdata;
- *len = (u32)cmd->request_bufflen;
+ *len = (u32)length;
}
return 0;
}
@@ -1791,27 +1825,23 @@ mega_build_sglist(adapter_t *adapter, scb_t *scb, u32 *buf, u32 *len)
if( sgcnt > adapter->sglen ) BUG();
+ *len = 0;
+
for( idx = 0; idx < sgcnt; idx++, sgl++ ) {
if( adapter->has_64bit_addr ) {
scb->sgl64[idx].address = sg_dma_address(sgl);
- scb->sgl64[idx].length = sg_dma_len(sgl);
+ *len += scb->sgl64[idx].length = sg_dma_len(sgl);
}
else {
scb->sgl[idx].address = sg_dma_address(sgl);
- scb->sgl[idx].length = sg_dma_len(sgl);
+ *len += scb->sgl[idx].length = sg_dma_len(sgl);
}
}
/* Reset pointer and length fields */
*buf = scb->sgl_dma_addr;
- /*
- * For passthru command, dataxferlen must be set, even for commands
- * with a sg list
- */
- *len = (u32)cmd->request_bufflen;
-
/* Return count of SG requests */
return sgcnt;
}
diff --git a/drivers/scsi/megaraid/Kconfig.megaraid b/drivers/scsi/megaraid/Kconfig.megaraid
index 917d591d90b..7363e12663a 100644
--- a/drivers/scsi/megaraid/Kconfig.megaraid
+++ b/drivers/scsi/megaraid/Kconfig.megaraid
@@ -76,3 +76,12 @@ config MEGARAID_LEGACY
To compile this driver as a module, choose M here: the
module will be called megaraid
endif
+
+config MEGARAID_SAS
+ tristate "LSI Logic MegaRAID SAS RAID Module"
+ depends on PCI && SCSI
+ help
+ Module for LSI Logic's SAS based RAID controllers.
+ To compile this driver as a module, choose 'm' here.
+ Module will be called megaraid_sas
+
diff --git a/drivers/scsi/megaraid/Makefile b/drivers/scsi/megaraid/Makefile
index 6dd99f27572..f469915b97c 100644
--- a/drivers/scsi/megaraid/Makefile
+++ b/drivers/scsi/megaraid/Makefile
@@ -1,2 +1,3 @@
obj-$(CONFIG_MEGARAID_MM) += megaraid_mm.o
obj-$(CONFIG_MEGARAID_MAILBOX) += megaraid_mbox.o
+obj-$(CONFIG_MEGARAID_SAS) += megaraid_sas.o
diff --git a/drivers/scsi/megaraid/megaraid_sas.c b/drivers/scsi/megaraid/megaraid_sas.c
new file mode 100644
index 00000000000..1b3148e842a
--- /dev/null
+++ b/drivers/scsi/megaraid/megaraid_sas.c
@@ -0,0 +1,2805 @@
+/*
+ *
+ * Linux MegaRAID driver for SAS based RAID controllers
+ *
+ * Copyright (c) 2003-2005 LSI Logic Corporation.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ * FILE : megaraid_sas.c
+ * Version : v00.00.02.00-rc4
+ *
+ * Authors:
+ * Sreenivas Bagalkote <Sreenivas.Bagalkote@lsil.com>
+ * Sumant Patro <Sumant.Patro@lsil.com>
+ *
+ * List of supported controllers
+ *
+ * OEM Product Name VID DID SSVID SSID
+ * --- ------------ --- --- ---- ----
+ */
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/pci.h>
+#include <linux/list.h>
+#include <linux/version.h>
+#include <linux/moduleparam.h>
+#include <linux/module.h>
+#include <linux/spinlock.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/uio.h>
+#include <asm/uaccess.h>
+#include <linux/compat.h>
+
+#include <scsi/scsi.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_host.h>
+#include "megaraid_sas.h"
+
+MODULE_LICENSE("GPL");
+MODULE_VERSION(MEGASAS_VERSION);
+MODULE_AUTHOR("sreenivas.bagalkote@lsil.com");
+MODULE_DESCRIPTION("LSI Logic MegaRAID SAS Driver");
+
+/*
+ * PCI ID table for all supported controllers
+ */
+static struct pci_device_id megasas_pci_table[] = {
+
+ {
+ PCI_VENDOR_ID_LSI_LOGIC,
+ PCI_DEVICE_ID_LSI_SAS1064R,
+ PCI_ANY_ID,
+ PCI_ANY_ID,
+ },
+ {
+ PCI_VENDOR_ID_DELL,
+ PCI_DEVICE_ID_DELL_PERC5,
+ PCI_ANY_ID,
+ PCI_ANY_ID,
+ },
+ {0} /* Terminating entry */
+};
+
+MODULE_DEVICE_TABLE(pci, megasas_pci_table);
+
+static int megasas_mgmt_majorno;
+static struct megasas_mgmt_info megasas_mgmt_info;
+static struct fasync_struct *megasas_async_queue;
+static DECLARE_MUTEX(megasas_async_queue_mutex);
+
+/**
+ * megasas_get_cmd - Get a command from the free pool
+ * @instance: Adapter soft state
+ *
+ * Returns a free command from the pool
+ */
+static inline struct megasas_cmd *megasas_get_cmd(struct megasas_instance
+ *instance)
+{
+ unsigned long flags;
+ struct megasas_cmd *cmd = NULL;
+
+ spin_lock_irqsave(&instance->cmd_pool_lock, flags);
+
+ if (!list_empty(&instance->cmd_pool)) {
+ cmd = list_entry((&instance->cmd_pool)->next,
+ struct megasas_cmd, list);
+ list_del_init(&cmd->list);
+ } else {
+ printk(KERN_ERR "megasas: Command pool empty!\n");
+ }
+
+ spin_unlock_irqrestore(&instance->cmd_pool_lock, flags);
+ return cmd;
+}
+
+/**
+ * megasas_return_cmd - Return a cmd to free command pool
+ * @instance: Adapter soft state
+ * @cmd: Command packet to be returned to free command pool
+ */
+static inline void
+megasas_return_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&instance->cmd_pool_lock, flags);
+
+ cmd->scmd = NULL;
+ list_add_tail(&cmd->list, &instance->cmd_pool);
+
+ spin_unlock_irqrestore(&instance->cmd_pool_lock, flags);
+}
+
+/**
+ * megasas_enable_intr - Enables interrupts
+ * @regs: MFI register set
+ */
+static inline void
+megasas_enable_intr(struct megasas_register_set __iomem * regs)
+{
+ writel(1, &(regs)->outbound_intr_mask);
+
+ /* Dummy readl to force pci flush */
+ readl(&regs->outbound_intr_mask);
+}
+
+/**
+ * megasas_disable_intr - Disables interrupts
+ * @regs: MFI register set
+ */
+static inline void
+megasas_disable_intr(struct megasas_register_set __iomem * regs)
+{
+ u32 mask = readl(&regs->outbound_intr_mask) & (~0x00000001);
+ writel(mask, &regs->outbound_intr_mask);
+
+ /* Dummy readl to force pci flush */
+ readl(&regs->outbound_intr_mask);
+}
+
+/**
+ * megasas_issue_polled - Issues a polling command
+ * @instance: Adapter soft state
+ * @cmd: Command packet to be issued
+ *
+ * For polling, MFI requires the cmd_status to be set to 0xFF before posting.
+ */
+static int
+megasas_issue_polled(struct megasas_instance *instance, struct megasas_cmd *cmd)
+{
+ int i;
+ u32 msecs = MFI_POLL_TIMEOUT_SECS * 1000;
+
+ struct megasas_header *frame_hdr = &cmd->frame->hdr;
+
+ frame_hdr->cmd_status = 0xFF;
+ frame_hdr->flags |= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE;
+
+ /*
+ * Issue the frame using inbound queue port
+ */
+ writel(cmd->frame_phys_addr >> 3,
+ &instance->reg_set->inbound_queue_port);
+
+ /*
+ * Wait for cmd_status to change
+ */
+ for (i = 0; (i < msecs) && (frame_hdr->cmd_status == 0xff); i++) {
+ rmb();
+ msleep(1);
+ }
+
+ if (frame_hdr->cmd_status == 0xff)
+ return -ETIME;
+
+ return 0;
+}
+
+/**
+ * megasas_issue_blocked_cmd - Synchronous wrapper around regular FW cmds
+ * @instance: Adapter soft state
+ * @cmd: Command to be issued
+ *
+ * This function waits on an event for the command to be returned from ISR.
+ * Used to issue ioctl commands.
+ */
+static int
+megasas_issue_blocked_cmd(struct megasas_instance *instance,
+ struct megasas_cmd *cmd)
+{
+ cmd->cmd_status = ENODATA;
+
+ writel(cmd->frame_phys_addr >> 3,
+ &instance->reg_set->inbound_queue_port);
+
+ wait_event(instance->int_cmd_wait_q, (cmd->cmd_status != ENODATA));
+
+ return 0;
+}
+
+/**
+ * megasas_issue_blocked_abort_cmd - Aborts previously issued cmd
+ * @instance: Adapter soft state
+ * @cmd_to_abort: Previously issued cmd to be aborted
+ *
+ * MFI firmware can abort previously issued AEN comamnd (automatic event
+ * notification). The megasas_issue_blocked_abort_cmd() issues such abort
+ * cmd and blocks till it is completed.
+ */
+static int
+megasas_issue_blocked_abort_cmd(struct megasas_instance *instance,
+ struct megasas_cmd *cmd_to_abort)
+{
+ struct megasas_cmd *cmd;
+ struct megasas_abort_frame *abort_fr;
+
+ cmd = megasas_get_cmd(instance);
+
+ if (!cmd)
+ return -1;
+
+ abort_fr = &cmd->frame->abort;
+
+ /*
+ * Prepare and issue the abort frame
+ */
+ abort_fr->cmd = MFI_CMD_ABORT;
+ abort_fr->cmd_status = 0xFF;
+ abort_fr->flags = 0;
+ abort_fr->abort_context = cmd_to_abort->index;
+ abort_fr->abort_mfi_phys_addr_lo = cmd_to_abort->frame_phys_addr;
+ abort_fr->abort_mfi_phys_addr_hi = 0;
+
+ cmd->sync_cmd = 1;
+ cmd->cmd_status = 0xFF;
+
+ writel(cmd->frame_phys_addr >> 3,
+ &instance->reg_set->inbound_queue_port);
+
+ /*
+ * Wait for this cmd to complete
+ */
+ wait_event(instance->abort_cmd_wait_q, (cmd->cmd_status != 0xFF));
+
+ megasas_return_cmd(instance, cmd);
+ return 0;
+}
+
+/**
+ * megasas_make_sgl32 - Prepares 32-bit SGL
+ * @instance: Adapter soft state
+ * @scp: SCSI command from the mid-layer
+ * @mfi_sgl: SGL to be filled in
+ *
+ * If successful, this function returns the number of SG elements. Otherwise,
+ * it returnes -1.
+ */
+static inline int
+megasas_make_sgl32(struct megasas_instance *instance, struct scsi_cmnd *scp,
+ union megasas_sgl *mfi_sgl)
+{
+ int i;
+ int sge_count;
+ struct scatterlist *os_sgl;
+
+ /*
+ * Return 0 if there is no data transfer
+ */
+ if (!scp->request_buffer || !scp->request_bufflen)
+ return 0;
+
+ if (!scp->use_sg) {
+ mfi_sgl->sge32[0].phys_addr = pci_map_single(instance->pdev,
+ scp->
+ request_buffer,
+ scp->
+ request_bufflen,
+ scp->
+ sc_data_direction);
+ mfi_sgl->sge32[0].length = scp->request_bufflen;
+
+ return 1;
+ }
+
+ os_sgl = (struct scatterlist *)scp->request_buffer;
+ sge_count = pci_map_sg(instance->pdev, os_sgl, scp->use_sg,
+ scp->sc_data_direction);
+
+ for (i = 0; i < sge_count; i++, os_sgl++) {
+ mfi_sgl->sge32[i].length = sg_dma_len(os_sgl);
+ mfi_sgl->sge32[i].phys_addr = sg_dma_address(os_sgl);
+ }
+
+ return sge_count;
+}
+
+/**
+ * megasas_make_sgl64 - Prepares 64-bit SGL
+ * @instance: Adapter soft state
+ * @scp: SCSI command from the mid-layer
+ * @mfi_sgl: SGL to be filled in
+ *
+ * If successful, this function returns the number of SG elements. Otherwise,
+ * it returnes -1.
+ */
+static inline int
+megasas_make_sgl64(struct megasas_instance *instance, struct scsi_cmnd *scp,
+ union megasas_sgl *mfi_sgl)
+{
+ int i;
+ int sge_count;
+ struct scatterlist *os_sgl;
+
+ /*
+ * Return 0 if there is no data transfer
+ */
+ if (!scp->request_buffer || !scp->request_bufflen)
+ return 0;
+
+ if (!scp->use_sg) {
+ mfi_sgl->sge64[0].phys_addr = pci_map_single(instance->pdev,
+ scp->
+ request_buffer,
+ scp->
+ request_bufflen,
+ scp->
+ sc_data_direction);
+
+ mfi_sgl->sge64[0].length = scp->request_bufflen;
+
+ return 1;
+ }
+
+ os_sgl = (struct scatterlist *)scp->request_buffer;
+ sge_count = pci_map_sg(instance->pdev, os_sgl, scp->use_sg,
+ scp->sc_data_direction);
+
+ for (i = 0; i < sge_count; i++, os_sgl++) {
+ mfi_sgl->sge64[i].length = sg_dma_len(os_sgl);
+ mfi_sgl->sge64[i].phys_addr = sg_dma_address(os_sgl);
+ }
+
+ return sge_count;
+}
+
+/**
+ * megasas_build_dcdb - Prepares a direct cdb (DCDB) command
+ * @instance: Adapter soft state
+ * @scp: SCSI command
+ * @cmd: Command to be prepared in
+ *
+ * This function prepares CDB commands. These are typcially pass-through
+ * commands to the devices.
+ */
+static inline int
+megasas_build_dcdb(struct megasas_instance *instance, struct scsi_cmnd *scp,
+ struct megasas_cmd *cmd)
+{
+ u32 sge_sz;
+ int sge_bytes;
+ u32 is_logical;
+ u32 device_id;
+ u16 flags = 0;
+ struct megasas_pthru_frame *pthru;
+
+ is_logical = MEGASAS_IS_LOGICAL(scp);
+ device_id = MEGASAS_DEV_INDEX(instance, scp);
+ pthru = (struct megasas_pthru_frame *)cmd->frame;
+
+ if (scp->sc_data_direction == PCI_DMA_TODEVICE)
+ flags = MFI_FRAME_DIR_WRITE;
+ else if (scp->sc_data_direction == PCI_DMA_FROMDEVICE)
+ flags = MFI_FRAME_DIR_READ;
+ else if (scp->sc_data_direction == PCI_DMA_NONE)
+ flags = MFI_FRAME_DIR_NONE;
+
+ /*
+ * Prepare the DCDB frame
+ */
+ pthru->cmd = (is_logical) ? MFI_CMD_LD_SCSI_IO : MFI_CMD_PD_SCSI_IO;
+ pthru->cmd_status = 0x0;
+ pthru->scsi_status = 0x0;
+ pthru->target_id = device_id;
+ pthru->lun = scp->device->lun;
+ pthru->cdb_len = scp->cmd_len;
+ pthru->timeout = 0;
+ pthru->flags = flags;
+ pthru->data_xfer_len = scp->request_bufflen;
+
+ memcpy(pthru->cdb, scp->cmnd, scp->cmd_len);
+
+ /*
+ * Construct SGL
+ */
+ sge_sz = (IS_DMA64) ? sizeof(struct megasas_sge64) :
+ sizeof(struct megasas_sge32);
+
+ if (IS_DMA64) {
+ pthru->flags |= MFI_FRAME_SGL64;
+ pthru->sge_count = megasas_make_sgl64(instance, scp,
+ &pthru->sgl);
+ } else
+ pthru->sge_count = megasas_make_sgl32(instance, scp,
+ &pthru->sgl);
+
+ /*
+ * Sense info specific
+ */
+ pthru->sense_len = SCSI_SENSE_BUFFERSIZE;
+ pthru->sense_buf_phys_addr_hi = 0;
+ pthru->sense_buf_phys_addr_lo = cmd->sense_phys_addr;
+
+ sge_bytes = sge_sz * pthru->sge_count;
+
+ /*
+ * Compute the total number of frames this command consumes. FW uses
+ * this number to pull sufficient number of frames from host memory.
+ */
+ cmd->frame_count = (sge_bytes / MEGAMFI_FRAME_SIZE) +
+ ((sge_bytes % MEGAMFI_FRAME_SIZE) ? 1 : 0) + 1;
+
+ if (cmd->frame_count > 7)
+ cmd->frame_count = 8;
+
+ return cmd->frame_count;
+}
+
+/**
+ * megasas_build_ldio - Prepares IOs to logical devices
+ * @instance: Adapter soft state
+ * @scp: SCSI command
+ * @cmd: Command to to be prepared
+ *
+ * Frames (and accompanying SGLs) for regular SCSI IOs use this function.
+ */
+static inline int
+megasas_build_ldio(struct megasas_instance *instance, struct scsi_cmnd *scp,
+ struct megasas_cmd *cmd)
+{
+ u32 sge_sz;
+ int sge_bytes;
+ u32 device_id;
+ u8 sc = scp->cmnd[0];
+ u16 flags = 0;
+ struct megasas_io_frame *ldio;
+
+ device_id = MEGASAS_DEV_INDEX(instance, scp);
+ ldio = (struct megasas_io_frame *)cmd->frame;
+
+ if (scp->sc_data_direction == PCI_DMA_TODEVICE)
+ flags = MFI_FRAME_DIR_WRITE;
+ else if (scp->sc_data_direction == PCI_DMA_FROMDEVICE)
+ flags = MFI_FRAME_DIR_READ;
+
+ /*
+ * Preare the Logical IO frame: 2nd bit is zero for all read cmds
+ */
+ ldio->cmd = (sc & 0x02) ? MFI_CMD_LD_WRITE : MFI_CMD_LD_READ;
+ ldio->cmd_status = 0x0;
+ ldio->scsi_status = 0x0;
+ ldio->target_id = device_id;
+ ldio->timeout = 0;
+ ldio->reserved_0 = 0;
+ ldio->pad_0 = 0;
+ ldio->flags = flags;
+ ldio->start_lba_hi = 0;
+ ldio->access_byte = (scp->cmd_len != 6) ? scp->cmnd[1] : 0;
+
+ /*
+ * 6-byte READ(0x08) or WRITE(0x0A) cdb
+ */
+ if (scp->cmd_len == 6) {
+ ldio->lba_count = (u32) scp->cmnd[4];
+ ldio->start_lba_lo = ((u32) scp->cmnd[1] << 16) |
+ ((u32) scp->cmnd[2] << 8) | (u32) scp->cmnd[3];
+
+ ldio->start_lba_lo &= 0x1FFFFF;
+ }
+
+ /*
+ * 10-byte READ(0x28) or WRITE(0x2A) cdb
+ */
+ else if (scp->cmd_len == 10) {
+ ldio->lba_count = (u32) scp->cmnd[8] |
+ ((u32) scp->cmnd[7] << 8);
+ ldio->start_lba_lo = ((u32) scp->cmnd[2] << 24) |
+ ((u32) scp->cmnd[3] << 16) |
+ ((u32) scp->cmnd[4] << 8) | (u32) scp->cmnd[5];
+ }
+
+ /*
+ * 12-byte READ(0xA8) or WRITE(0xAA) cdb
+ */
+ else if (scp->cmd_len == 12) {
+ ldio->lba_count = ((u32) scp->cmnd[6] << 24) |
+ ((u32) scp->cmnd[7] << 16) |
+ ((u32) scp->cmnd[8] << 8) | (u32) scp->cmnd[9];
+
+ ldio->start_lba_lo = ((u32) scp->cmnd[2] << 24) |
+ ((u32) scp->cmnd[3] << 16) |
+ ((u32) scp->cmnd[4] << 8) | (u32) scp->cmnd[5];
+ }
+
+ /*
+ * 16-byte READ(0x88) or WRITE(0x8A) cdb
+ */
+ else if (scp->cmd_len == 16) {
+ ldio->lba_count = ((u32) scp->cmnd[10] << 24) |
+ ((u32) scp->cmnd[11] << 16) |
+ ((u32) scp->cmnd[12] << 8) | (u32) scp->cmnd[13];
+
+ ldio->start_lba_lo = ((u32) scp->cmnd[6] << 24) |
+ ((u32) scp->cmnd[7] << 16) |
+ ((u32) scp->cmnd[8] << 8) | (u32) scp->cmnd[9];
+
+ ldio->start_lba_hi = ((u32) scp->cmnd[2] << 24) |
+ ((u32) scp->cmnd[3] << 16) |
+ ((u32) scp->cmnd[4] << 8) | (u32) scp->cmnd[5];
+
+ }
+
+ /*
+ * Construct SGL
+ */
+ sge_sz = (IS_DMA64) ? sizeof(struct megasas_sge64) :
+ sizeof(struct megasas_sge32);
+
+ if (IS_DMA64) {
+ ldio->flags |= MFI_FRAME_SGL64;
+ ldio->sge_count = megasas_make_sgl64(instance, scp, &ldio->sgl);
+ } else
+ ldio->sge_count = megasas_make_sgl32(instance, scp, &ldio->sgl);
+
+ /*
+ * Sense info specific
+ */
+ ldio->sense_len = SCSI_SENSE_BUFFERSIZE;
+ ldio->sense_buf_phys_addr_hi = 0;
+ ldio->sense_buf_phys_addr_lo = cmd->sense_phys_addr;
+
+ sge_bytes = sge_sz * ldio->sge_count;
+
+ cmd->frame_count = (sge_bytes / MEGAMFI_FRAME_SIZE) +
+ ((sge_bytes % MEGAMFI_FRAME_SIZE) ? 1 : 0) + 1;
+
+ if (cmd->frame_count > 7)
+ cmd->frame_count = 8;
+
+ return cmd->frame_count;
+}
+
+/**
+ * megasas_build_cmd - Prepares a command packet
+ * @instance: Adapter soft state
+ * @scp: SCSI command
+ * @frame_count: [OUT] Number of frames used to prepare this command
+ */
+static inline struct megasas_cmd *megasas_build_cmd(struct megasas_instance
+ *instance,
+ struct scsi_cmnd *scp,
+ int *frame_count)
+{
+ u32 logical_cmd;
+ struct megasas_cmd *cmd;
+
+ /*
+ * Find out if this is logical or physical drive command.
+ */
+ logical_cmd = MEGASAS_IS_LOGICAL(scp);
+
+ /*
+ * Logical drive command
+ */
+ if (logical_cmd) {
+
+ if (scp->device->id >= MEGASAS_MAX_LD) {
+ scp->result = DID_BAD_TARGET << 16;
+ return NULL;
+ }
+
+ switch (scp->cmnd[0]) {
+
+ case READ_10:
+ case WRITE_10:
+ case READ_12:
+ case WRITE_12:
+ case READ_6:
+ case WRITE_6:
+ case READ_16:
+ case WRITE_16:
+ /*
+ * Fail for LUN > 0
+ */
+ if (scp->device->lun) {
+ scp->result = DID_BAD_TARGET << 16;
+ return NULL;
+ }
+
+ cmd = megasas_get_cmd(instance);
+
+ if (!cmd) {
+ scp->result = DID_IMM_RETRY << 16;
+ return NULL;
+ }
+
+ *frame_count = megasas_build_ldio(instance, scp, cmd);
+
+ if (!(*frame_count)) {
+ megasas_return_cmd(instance, cmd);
+ return NULL;
+ }
+
+ return cmd;
+
+ default:
+ /*
+ * Fail for LUN > 0
+ */
+ if (scp->device->lun) {
+ scp->result = DID_BAD_TARGET << 16;
+ return NULL;
+ }
+
+ cmd = megasas_get_cmd(instance);
+
+ if (!cmd) {
+ scp->result = DID_IMM_RETRY << 16;
+ return NULL;
+ }
+
+ *frame_count = megasas_build_dcdb(instance, scp, cmd);
+
+ if (!(*frame_count)) {
+ megasas_return_cmd(instance, cmd);
+ return NULL;
+ }
+
+ return cmd;
+ }
+ } else {
+ cmd = megasas_get_cmd(instance);
+
+ if (!cmd) {
+ scp->result = DID_IMM_RETRY << 16;
+ return NULL;
+ }
+
+ *frame_count = megasas_build_dcdb(instance, scp, cmd);
+
+ if (!(*frame_count)) {
+ megasas_return_cmd(instance, cmd);
+ return NULL;
+ }
+
+ return cmd;
+ }
+
+ return NULL;
+}
+
+/**
+ * megasas_queue_command - Queue entry point
+ * @scmd: SCSI command to be queued
+ * @done: Callback entry point
+ */
+static int
+megasas_queue_command(struct scsi_cmnd *scmd, void (*done) (struct scsi_cmnd *))
+{
+ u32 frame_count;
+ unsigned long flags;
+ struct megasas_cmd *cmd;
+ struct megasas_instance *instance;
+
+ instance = (struct megasas_instance *)
+ scmd->device->host->hostdata;
+ scmd->scsi_done = done;
+ scmd->result = 0;
+
+ cmd = megasas_build_cmd(instance, scmd, &frame_count);
+
+ if (!cmd) {
+ done(scmd);
+ return 0;
+ }
+
+ cmd->scmd = scmd;
+ scmd->SCp.ptr = (char *)cmd;
+ scmd->SCp.sent_command = jiffies;
+
+ /*
+ * Issue the command to the FW
+ */
+ spin_lock_irqsave(&instance->instance_lock, flags);
+ instance->fw_outstanding++;
+ spin_unlock_irqrestore(&instance->instance_lock, flags);
+
+ writel(((cmd->frame_phys_addr >> 3) | (cmd->frame_count - 1)),
+ &instance->reg_set->inbound_queue_port);
+
+ return 0;
+}
+
+/**
+ * megasas_wait_for_outstanding - Wait for all outstanding cmds
+ * @instance: Adapter soft state
+ *
+ * This function waits for upto MEGASAS_RESET_WAIT_TIME seconds for FW to
+ * complete all its outstanding commands. Returns error if one or more IOs
+ * are pending after this time period. It also marks the controller dead.
+ */
+static int megasas_wait_for_outstanding(struct megasas_instance *instance)
+{
+ int i;
+ u32 wait_time = MEGASAS_RESET_WAIT_TIME;
+
+ for (i = 0; i < wait_time; i++) {
+
+ if (!instance->fw_outstanding)
+ break;
+
+ if (!(i % MEGASAS_RESET_NOTICE_INTERVAL)) {
+ printk(KERN_NOTICE "megasas: [%2d]waiting for %d "
+ "commands to complete\n", i,
+ instance->fw_outstanding);
+ }
+
+ msleep(1000);
+ }
+
+ if (instance->fw_outstanding) {
+ instance->hw_crit_error = 1;
+ return FAILED;
+ }
+
+ return SUCCESS;
+}
+
+/**
+ * megasas_generic_reset - Generic reset routine
+ * @scmd: Mid-layer SCSI command
+ *
+ * This routine implements a generic reset handler for device, bus and host
+ * reset requests. Device, bus and host specific reset handlers can use this
+ * function after they do their specific tasks.
+ */
+static int megasas_generic_reset(struct scsi_cmnd *scmd)
+{
+ int ret_val;
+ struct megasas_instance *instance;
+
+ instance = (struct megasas_instance *)scmd->device->host->hostdata;
+
+ printk(KERN_NOTICE "megasas: RESET -%ld cmd=%x <c=%d t=%d l=%d>\n",
+ scmd->serial_number, scmd->cmnd[0], scmd->device->channel,
+ scmd->device->id, scmd->device->lun);
+
+ if (instance->hw_crit_error) {
+ printk(KERN_ERR "megasas: cannot recover from previous reset "
+ "failures\n");
+ return FAILED;
+ }
+
+ spin_unlock(scmd->device->host->host_lock);
+
+ ret_val = megasas_wait_for_outstanding(instance);
+
+ if (ret_val == SUCCESS)
+ printk(KERN_NOTICE "megasas: reset successful \n");
+ else
+ printk(KERN_ERR "megasas: failed to do reset\n");
+
+ spin_lock(scmd->device->host->host_lock);
+
+ return ret_val;
+}
+
+static enum scsi_eh_timer_return megasas_reset_timer(struct scsi_cmnd *scmd)
+{
+ unsigned long seconds;
+
+ if (scmd->SCp.ptr) {
+ seconds = (jiffies - scmd->SCp.sent_command) / HZ;
+
+ if (seconds < 90) {
+ return EH_RESET_TIMER;
+ } else {
+ return EH_NOT_HANDLED;
+ }
+ }
+
+ return EH_HANDLED;
+}
+
+/**
+ * megasas_reset_device - Device reset handler entry point
+ */
+static int megasas_reset_device(struct scsi_cmnd *scmd)
+{
+ int ret;
+
+ /*
+ * First wait for all commands to complete
+ */
+ ret = megasas_generic_reset(scmd);
+
+ return ret;
+}
+
+/**
+ * megasas_reset_bus_host - Bus & host reset handler entry point
+ */
+static int megasas_reset_bus_host(struct scsi_cmnd *scmd)
+{
+ int ret;
+
+ /*
+ * Frist wait for all commands to complete
+ */
+ ret = megasas_generic_reset(scmd);
+
+ return ret;
+}
+
+/**
+ * megasas_service_aen - Processes an event notification
+ * @instance: Adapter soft state
+ * @cmd: AEN command completed by the ISR
+ *
+ * For AEN, driver sends a command down to FW that is held by the FW till an
+ * event occurs. When an event of interest occurs, FW completes the command
+ * that it was previously holding.
+ *
+ * This routines sends SIGIO signal to processes that have registered with the
+ * driver for AEN.
+ */
+static void
+megasas_service_aen(struct megasas_instance *instance, struct megasas_cmd *cmd)
+{
+ /*
+ * Don't signal app if it is just an aborted previously registered aen
+ */
+ if (!cmd->abort_aen)
+ kill_fasync(&megasas_async_queue, SIGIO, POLL_IN);
+ else
+ cmd->abort_aen = 0;
+
+ instance->aen_cmd = NULL;
+ megasas_return_cmd(instance, cmd);
+}
+
+/*
+ * Scsi host template for megaraid_sas driver
+ */
+static struct scsi_host_template megasas_template = {
+
+ .module = THIS_MODULE,
+ .name = "LSI Logic SAS based MegaRAID driver",
+ .proc_name = "megaraid_sas",
+ .queuecommand = megasas_queue_command,
+ .eh_device_reset_handler = megasas_reset_device,
+ .eh_bus_reset_handler = megasas_reset_bus_host,
+ .eh_host_reset_handler = megasas_reset_bus_host,
+ .eh_timed_out = megasas_reset_timer,
+ .use_clustering = ENABLE_CLUSTERING,
+};
+
+/**
+ * megasas_complete_int_cmd - Completes an internal command
+ * @instance: Adapter soft state
+ * @cmd: Command to be completed
+ *
+ * The megasas_issue_blocked_cmd() function waits for a command to complete
+ * after it issues a command. This function wakes up that waiting routine by
+ * calling wake_up() on the wait queue.
+ */
+static void
+megasas_complete_int_cmd(struct megasas_instance *instance,
+ struct megasas_cmd *cmd)
+{
+ cmd->cmd_status = cmd->frame->io.cmd_status;
+
+ if (cmd->cmd_status == ENODATA) {
+ cmd->cmd_status = 0;
+ }
+ wake_up(&instance->int_cmd_wait_q);
+}
+
+/**
+ * megasas_complete_abort - Completes aborting a command
+ * @instance: Adapter soft state
+ * @cmd: Cmd that was issued to abort another cmd
+ *
+ * The megasas_issue_blocked_abort_cmd() function waits on abort_cmd_wait_q
+ * after it issues an abort on a previously issued command. This function
+ * wakes up all functions waiting on the same wait queue.
+ */
+static void
+megasas_complete_abort(struct megasas_instance *instance,
+ struct megasas_cmd *cmd)
+{
+ if (cmd->sync_cmd) {
+ cmd->sync_cmd = 0;
+ cmd->cmd_status = 0;
+ wake_up(&instance->abort_cmd_wait_q);
+ }
+
+ return;
+}
+
+/**
+ * megasas_unmap_sgbuf - Unmap SG buffers
+ * @instance: Adapter soft state
+ * @cmd: Completed command
+ */
+static inline void
+megasas_unmap_sgbuf(struct megasas_instance *instance, struct megasas_cmd *cmd)
+{
+ dma_addr_t buf_h;
+ u8 opcode;
+
+ if (cmd->scmd->use_sg) {
+ pci_unmap_sg(instance->pdev, cmd->scmd->request_buffer,
+ cmd->scmd->use_sg, cmd->scmd->sc_data_direction);
+ return;
+ }
+
+ if (!cmd->scmd->request_bufflen)
+ return;
+
+ opcode = cmd->frame->hdr.cmd;
+
+ if ((opcode == MFI_CMD_LD_READ) || (opcode == MFI_CMD_LD_WRITE)) {
+ if (IS_DMA64)
+ buf_h = cmd->frame->io.sgl.sge64[0].phys_addr;
+ else
+ buf_h = cmd->frame->io.sgl.sge32[0].phys_addr;
+ } else {
+ if (IS_DMA64)
+ buf_h = cmd->frame->pthru.sgl.sge64[0].phys_addr;
+ else
+ buf_h = cmd->frame->pthru.sgl.sge32[0].phys_addr;
+ }
+
+ pci_unmap_single(instance->pdev, buf_h, cmd->scmd->request_bufflen,
+ cmd->scmd->sc_data_direction);
+ return;
+}
+
+/**
+ * megasas_complete_cmd - Completes a command
+ * @instance: Adapter soft state
+ * @cmd: Command to be completed
+ * @alt_status: If non-zero, use this value as status to
+ * SCSI mid-layer instead of the value returned
+ * by the FW. This should be used if caller wants
+ * an alternate status (as in the case of aborted
+ * commands)
+ */
+static inline void
+megasas_complete_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd,
+ u8 alt_status)
+{
+ int exception = 0;
+ struct megasas_header *hdr = &cmd->frame->hdr;
+ unsigned long flags;
+
+ if (cmd->scmd) {
+ cmd->scmd->SCp.ptr = (char *)0;
+ }
+
+ switch (hdr->cmd) {
+
+ case MFI_CMD_PD_SCSI_IO:
+ case MFI_CMD_LD_SCSI_IO:
+
+ /*
+ * MFI_CMD_PD_SCSI_IO and MFI_CMD_LD_SCSI_IO could have been
+ * issued either through an IO path or an IOCTL path. If it
+ * was via IOCTL, we will send it to internal completion.
+ */
+ if (cmd->sync_cmd) {
+ cmd->sync_cmd = 0;
+ megasas_complete_int_cmd(instance, cmd);
+ break;
+ }
+
+ /*
+ * Don't export physical disk devices to mid-layer.
+ */
+ if (!MEGASAS_IS_LOGICAL(cmd->scmd) &&
+ (hdr->cmd_status == MFI_STAT_OK) &&
+ (cmd->scmd->cmnd[0] == INQUIRY)) {
+
+ if (((*(u8 *) cmd->scmd->request_buffer) & 0x1F) ==
+ TYPE_DISK) {
+ cmd->scmd->result = DID_BAD_TARGET << 16;
+ exception = 1;
+ }
+ }
+
+ case MFI_CMD_LD_READ:
+ case MFI_CMD_LD_WRITE:
+
+ if (alt_status) {
+ cmd->scmd->result = alt_status << 16;
+ exception = 1;
+ }
+
+ if (exception) {
+
+ spin_lock_irqsave(&instance->instance_lock, flags);
+ instance->fw_outstanding--;
+ spin_unlock_irqrestore(&instance->instance_lock, flags);
+
+ megasas_unmap_sgbuf(instance, cmd);
+ cmd->scmd->scsi_done(cmd->scmd);
+ megasas_return_cmd(instance, cmd);
+
+ break;
+ }
+
+ switch (hdr->cmd_status) {
+
+ case MFI_STAT_OK:
+ cmd->scmd->result = DID_OK << 16;
+ break;
+
+ case MFI_STAT_SCSI_IO_FAILED:
+ case MFI_STAT_LD_INIT_IN_PROGRESS:
+ cmd->scmd->result =
+ (DID_ERROR << 16) | hdr->scsi_status;
+ break;
+
+ case MFI_STAT_SCSI_DONE_WITH_ERROR:
+
+ cmd->scmd->result = (DID_OK << 16) | hdr->scsi_status;
+
+ if (hdr->scsi_status == SAM_STAT_CHECK_CONDITION) {
+ memset(cmd->scmd->sense_buffer, 0,
+ SCSI_SENSE_BUFFERSIZE);
+ memcpy(cmd->scmd->sense_buffer, cmd->sense,
+ hdr->sense_len);
+
+ cmd->scmd->result |= DRIVER_SENSE << 24;
+ }
+
+ break;
+
+ case MFI_STAT_LD_OFFLINE:
+ case MFI_STAT_DEVICE_NOT_FOUND:
+ cmd->scmd->result = DID_BAD_TARGET << 16;
+ break;
+
+ default:
+ printk(KERN_DEBUG "megasas: MFI FW status %#x\n",
+ hdr->cmd_status);
+ cmd->scmd->result = DID_ERROR << 16;
+ break;
+ }
+
+ spin_lock_irqsave(&instance->instance_lock, flags);
+ instance->fw_outstanding--;
+ spin_unlock_irqrestore(&instance->instance_lock, flags);
+
+ megasas_unmap_sgbuf(instance, cmd);
+ cmd->scmd->scsi_done(cmd->scmd);
+ megasas_return_cmd(instance, cmd);
+
+ break;
+
+ case MFI_CMD_SMP:
+ case MFI_CMD_STP:
+ case MFI_CMD_DCMD:
+
+ /*
+ * See if got an event notification
+ */
+ if (cmd->frame->dcmd.opcode == MR_DCMD_CTRL_EVENT_WAIT)
+ megasas_service_aen(instance, cmd);
+ else
+ megasas_complete_int_cmd(instance, cmd);
+
+ break;
+
+ case MFI_CMD_ABORT:
+ /*
+ * Cmd issued to abort another cmd returned
+ */
+ megasas_complete_abort(instance, cmd);
+ break;
+
+ default:
+ printk("megasas: Unknown command completed! [0x%X]\n",
+ hdr->cmd);
+ break;
+ }
+}
+
+/**
+ * megasas_deplete_reply_queue - Processes all completed commands
+ * @instance: Adapter soft state
+ * @alt_status: Alternate status to be returned to
+ * SCSI mid-layer instead of the status
+ * returned by the FW
+ */
+static inline int
+megasas_deplete_reply_queue(struct megasas_instance *instance, u8 alt_status)
+{
+ u32 status;
+ u32 producer;
+ u32 consumer;
+ u32 context;
+ struct megasas_cmd *cmd;
+
+ /*
+ * Check if it is our interrupt
+ */
+ status = readl(&instance->reg_set->outbound_intr_status);
+
+ if (!(status & MFI_OB_INTR_STATUS_MASK)) {
+ return IRQ_NONE;
+ }
+
+ /*
+ * Clear the interrupt by writing back the same value
+ */
+ writel(status, &instance->reg_set->outbound_intr_status);
+
+ producer = *instance->producer;
+ consumer = *instance->consumer;
+
+ while (consumer != producer) {
+ context = instance->reply_queue[consumer];
+
+ cmd = instance->cmd_list[context];
+
+ megasas_complete_cmd(instance, cmd, alt_status);
+
+ consumer++;
+ if (consumer == (instance->max_fw_cmds + 1)) {
+ consumer = 0;
+ }
+ }
+
+ *instance->consumer = producer;
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * megasas_isr - isr entry point
+ */
+static irqreturn_t megasas_isr(int irq, void *devp, struct pt_regs *regs)
+{
+ return megasas_deplete_reply_queue((struct megasas_instance *)devp,
+ DID_OK);
+}
+
+/**
+ * megasas_transition_to_ready - Move the FW to READY state
+ * @reg_set: MFI register set
+ *
+ * During the initialization, FW passes can potentially be in any one of
+ * several possible states. If the FW in operational, waiting-for-handshake
+ * states, driver must take steps to bring it to ready state. Otherwise, it
+ * has to wait for the ready state.
+ */
+static int
+megasas_transition_to_ready(struct megasas_register_set __iomem * reg_set)
+{
+ int i;
+ u8 max_wait;
+ u32 fw_state;
+ u32 cur_state;
+
+ fw_state = readl(&reg_set->outbound_msg_0) & MFI_STATE_MASK;
+
+ while (fw_state != MFI_STATE_READY) {
+
+ printk(KERN_INFO "megasas: Waiting for FW to come to ready"
+ " state\n");
+ switch (fw_state) {
+
+ case MFI_STATE_FAULT:
+
+ printk(KERN_DEBUG "megasas: FW in FAULT state!!\n");
+ return -ENODEV;
+
+ case MFI_STATE_WAIT_HANDSHAKE:
+ /*
+ * Set the CLR bit in inbound doorbell
+ */
+ writel(MFI_INIT_CLEAR_HANDSHAKE,
+ &reg_set->inbound_doorbell);
+
+ max_wait = 2;
+ cur_state = MFI_STATE_WAIT_HANDSHAKE;
+ break;
+
+ case MFI_STATE_OPERATIONAL:
+ /*
+ * Bring it to READY state; assuming max wait 2 secs
+ */
+ megasas_disable_intr(reg_set);
+ writel(MFI_INIT_READY, &reg_set->inbound_doorbell);
+
+ max_wait = 10;
+ cur_state = MFI_STATE_OPERATIONAL;
+ break;
+
+ case MFI_STATE_UNDEFINED:
+ /*
+ * This state should not last for more than 2 seconds
+ */
+ max_wait = 2;
+ cur_state = MFI_STATE_UNDEFINED;
+ break;
+
+ case MFI_STATE_BB_INIT:
+ max_wait = 2;
+ cur_state = MFI_STATE_BB_INIT;
+ break;
+
+ case MFI_STATE_FW_INIT:
+ max_wait = 20;
+ cur_state = MFI_STATE_FW_INIT;
+ break;
+
+ case MFI_STATE_FW_INIT_2:
+ max_wait = 20;
+ cur_state = MFI_STATE_FW_INIT_2;
+ break;
+
+ case MFI_STATE_DEVICE_SCAN:
+ max_wait = 20;
+ cur_state = MFI_STATE_DEVICE_SCAN;
+ break;
+
+ case MFI_STATE_FLUSH_CACHE:
+ max_wait = 20;
+ cur_state = MFI_STATE_FLUSH_CACHE;
+ break;
+
+ default:
+ printk(KERN_DEBUG "megasas: Unknown state 0x%x\n",
+ fw_state);
+ return -ENODEV;
+ }
+
+ /*
+ * The cur_state should not last for more than max_wait secs
+ */
+ for (i = 0; i < (max_wait * 1000); i++) {
+ fw_state = MFI_STATE_MASK &
+ readl(&reg_set->outbound_msg_0);
+
+ if (fw_state == cur_state) {
+ msleep(1);
+ } else
+ break;
+ }
+
+ /*
+ * Return error if fw_state hasn't changed after max_wait
+ */
+ if (fw_state == cur_state) {
+ printk(KERN_DEBUG "FW state [%d] hasn't changed "
+ "in %d secs\n", fw_state, max_wait);
+ return -ENODEV;
+ }
+ };
+
+ return 0;
+}
+
+/**
+ * megasas_teardown_frame_pool - Destroy the cmd frame DMA pool
+ * @instance: Adapter soft state
+ */
+static void megasas_teardown_frame_pool(struct megasas_instance *instance)
+{
+ int i;
+ u32 max_cmd = instance->max_fw_cmds;
+ struct megasas_cmd *cmd;
+
+ if (!instance->frame_dma_pool)
+ return;
+
+ /*
+ * Return all frames to pool
+ */
+ for (i = 0; i < max_cmd; i++) {
+
+ cmd = instance->cmd_list[i];
+
+ if (cmd->frame)
+ pci_pool_free(instance->frame_dma_pool, cmd->frame,
+ cmd->frame_phys_addr);
+
+ if (cmd->sense)
+ pci_pool_free(instance->sense_dma_pool, cmd->frame,
+ cmd->sense_phys_addr);
+ }
+
+ /*
+ * Now destroy the pool itself
+ */
+ pci_pool_destroy(instance->frame_dma_pool);
+ pci_pool_destroy(instance->sense_dma_pool);
+
+ instance->frame_dma_pool = NULL;
+ instance->sense_dma_pool = NULL;
+}
+
+/**
+ * megasas_create_frame_pool - Creates DMA pool for cmd frames
+ * @instance: Adapter soft state
+ *
+ * Each command packet has an embedded DMA memory buffer that is used for
+ * filling MFI frame and the SG list that immediately follows the frame. This
+ * function creates those DMA memory buffers for each command packet by using
+ * PCI pool facility.
+ */
+static int megasas_create_frame_pool(struct megasas_instance *instance)
+{
+ int i;
+ u32 max_cmd;
+ u32 sge_sz;
+ u32 sgl_sz;
+ u32 total_sz;
+ u32 frame_count;
+ struct megasas_cmd *cmd;
+
+ max_cmd = instance->max_fw_cmds;
+
+ /*
+ * Size of our frame is 64 bytes for MFI frame, followed by max SG
+ * elements and finally SCSI_SENSE_BUFFERSIZE bytes for sense buffer
+ */
+ sge_sz = (IS_DMA64) ? sizeof(struct megasas_sge64) :
+ sizeof(struct megasas_sge32);
+
+ /*
+ * Calculated the number of 64byte frames required for SGL
+ */
+ sgl_sz = sge_sz * instance->max_num_sge;
+ frame_count = (sgl_sz + MEGAMFI_FRAME_SIZE - 1) / MEGAMFI_FRAME_SIZE;
+
+ /*
+ * We need one extra frame for the MFI command
+ */
+ frame_count++;
+
+ total_sz = MEGAMFI_FRAME_SIZE * frame_count;
+ /*
+ * Use DMA pool facility provided by PCI layer
+ */
+ instance->frame_dma_pool = pci_pool_create("megasas frame pool",
+ instance->pdev, total_sz, 64,
+ 0);
+
+ if (!instance->frame_dma_pool) {
+ printk(KERN_DEBUG "megasas: failed to setup frame pool\n");
+ return -ENOMEM;
+ }
+
+ instance->sense_dma_pool = pci_pool_create("megasas sense pool",
+ instance->pdev, 128, 4, 0);
+
+ if (!instance->sense_dma_pool) {
+ printk(KERN_DEBUG "megasas: failed to setup sense pool\n");
+
+ pci_pool_destroy(instance->frame_dma_pool);
+ instance->frame_dma_pool = NULL;
+
+ return -ENOMEM;
+ }
+
+ /*
+ * Allocate and attach a frame to each of the commands in cmd_list.
+ * By making cmd->index as the context instead of the &cmd, we can
+ * always use 32bit context regardless of the architecture
+ */
+ for (i = 0; i < max_cmd; i++) {
+
+ cmd = instance->cmd_list[i];
+
+ cmd->frame = pci_pool_alloc(instance->frame_dma_pool,
+ GFP_KERNEL, &cmd->frame_phys_addr);
+
+ cmd->sense = pci_pool_alloc(instance->sense_dma_pool,
+ GFP_KERNEL, &cmd->sense_phys_addr);
+
+ /*
+ * megasas_teardown_frame_pool() takes care of freeing
+ * whatever has been allocated
+ */
+ if (!cmd->frame || !cmd->sense) {
+ printk(KERN_DEBUG "megasas: pci_pool_alloc failed \n");
+ megasas_teardown_frame_pool(instance);
+ return -ENOMEM;
+ }
+
+ cmd->frame->io.context = cmd->index;
+ }
+
+ return 0;
+}
+
+/**
+ * megasas_free_cmds - Free all the cmds in the free cmd pool
+ * @instance: Adapter soft state
+ */
+static void megasas_free_cmds(struct megasas_instance *instance)
+{
+ int i;
+ /* First free the MFI frame pool */
+ megasas_teardown_frame_pool(instance);
+
+ /* Free all the commands in the cmd_list */
+ for (i = 0; i < instance->max_fw_cmds; i++)
+ kfree(instance->cmd_list[i]);
+
+ /* Free the cmd_list buffer itself */
+ kfree(instance->cmd_list);
+ instance->cmd_list = NULL;
+
+ INIT_LIST_HEAD(&instance->cmd_pool);
+}
+
+/**
+ * megasas_alloc_cmds - Allocates the command packets
+ * @instance: Adapter soft state
+ *
+ * Each command that is issued to the FW, whether IO commands from the OS or
+ * internal commands like IOCTLs, are wrapped in local data structure called
+ * megasas_cmd. The frame embedded in this megasas_cmd is actually issued to
+ * the FW.
+ *
+ * Each frame has a 32-bit field called context (tag). This context is used
+ * to get back the megasas_cmd from the frame when a frame gets completed in
+ * the ISR. Typically the address of the megasas_cmd itself would be used as
+ * the context. But we wanted to keep the differences between 32 and 64 bit
+ * systems to the mininum. We always use 32 bit integers for the context. In
+ * this driver, the 32 bit values are the indices into an array cmd_list.
+ * This array is used only to look up the megasas_cmd given the context. The
+ * free commands themselves are maintained in a linked list called cmd_pool.
+ */
+static int megasas_alloc_cmds(struct megasas_instance *instance)
+{
+ int i;
+ int j;
+ u32 max_cmd;
+ struct megasas_cmd *cmd;
+
+ max_cmd = instance->max_fw_cmds;
+
+ /*
+ * instance->cmd_list is an array of struct megasas_cmd pointers.
+ * Allocate the dynamic array first and then allocate individual
+ * commands.
+ */
+ instance->cmd_list = kmalloc(sizeof(struct megasas_cmd *) * max_cmd,
+ GFP_KERNEL);
+
+ if (!instance->cmd_list) {
+ printk(KERN_DEBUG "megasas: out of memory\n");
+ return -ENOMEM;
+ }
+
+ memset(instance->cmd_list, 0, sizeof(struct megasas_cmd *) * max_cmd);
+
+ for (i = 0; i < max_cmd; i++) {
+ instance->cmd_list[i] = kmalloc(sizeof(struct megasas_cmd),
+ GFP_KERNEL);
+
+ if (!instance->cmd_list[i]) {
+
+ for (j = 0; j < i; j++)
+ kfree(instance->cmd_list[j]);
+
+ kfree(instance->cmd_list);
+ instance->cmd_list = NULL;
+
+ return -ENOMEM;
+ }
+ }
+
+ /*
+ * Add all the commands to command pool (instance->cmd_pool)
+ */
+ for (i = 0; i < max_cmd; i++) {
+ cmd = instance->cmd_list[i];
+ memset(cmd, 0, sizeof(struct megasas_cmd));
+ cmd->index = i;
+ cmd->instance = instance;
+
+ list_add_tail(&cmd->list, &instance->cmd_pool);
+ }
+
+ /*
+ * Create a frame pool and assign one frame to each cmd
+ */
+ if (megasas_create_frame_pool(instance)) {
+ printk(KERN_DEBUG "megasas: Error creating frame DMA pool\n");
+ megasas_free_cmds(instance);
+ }
+
+ return 0;
+}
+
+/**
+ * megasas_get_controller_info - Returns FW's controller structure
+ * @instance: Adapter soft state
+ * @ctrl_info: Controller information structure
+ *
+ * Issues an internal command (DCMD) to get the FW's controller structure.
+ * This information is mainly used to find out the maximum IO transfer per
+ * command supported by the FW.
+ */
+static int
+megasas_get_ctrl_info(struct megasas_instance *instance,
+ struct megasas_ctrl_info *ctrl_info)
+{
+ int ret = 0;
+ struct megasas_cmd *cmd;
+ struct megasas_dcmd_frame *dcmd;
+ struct megasas_ctrl_info *ci;
+ dma_addr_t ci_h = 0;
+
+ cmd = megasas_get_cmd(instance);
+
+ if (!cmd) {
+ printk(KERN_DEBUG "megasas: Failed to get a free cmd\n");
+ return -ENOMEM;
+ }
+
+ dcmd = &cmd->frame->dcmd;
+
+ ci = pci_alloc_consistent(instance->pdev,
+ sizeof(struct megasas_ctrl_info), &ci_h);
+
+ if (!ci) {
+ printk(KERN_DEBUG "Failed to alloc mem for ctrl info\n");
+ megasas_return_cmd(instance, cmd);
+ return -ENOMEM;
+ }
+
+ memset(ci, 0, sizeof(*ci));
+ memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
+
+ dcmd->cmd = MFI_CMD_DCMD;
+ dcmd->cmd_status = 0xFF;
+ dcmd->sge_count = 1;
+ dcmd->flags = MFI_FRAME_DIR_READ;
+ dcmd->timeout = 0;
+ dcmd->data_xfer_len = sizeof(struct megasas_ctrl_info);
+ dcmd->opcode = MR_DCMD_CTRL_GET_INFO;
+ dcmd->sgl.sge32[0].phys_addr = ci_h;
+ dcmd->sgl.sge32[0].length = sizeof(struct megasas_ctrl_info);
+
+ if (!megasas_issue_polled(instance, cmd)) {
+ ret = 0;
+ memcpy(ctrl_info, ci, sizeof(struct megasas_ctrl_info));
+ } else {
+ ret = -1;
+ }
+
+ pci_free_consistent(instance->pdev, sizeof(struct megasas_ctrl_info),
+ ci, ci_h);
+
+ megasas_return_cmd(instance, cmd);
+ return ret;
+}
+
+/**
+ * megasas_init_mfi - Initializes the FW
+ * @instance: Adapter soft state
+ *
+ * This is the main function for initializing MFI firmware.
+ */
+static int megasas_init_mfi(struct megasas_instance *instance)
+{
+ u32 context_sz;
+ u32 reply_q_sz;
+ u32 max_sectors_1;
+ u32 max_sectors_2;
+ struct megasas_register_set __iomem *reg_set;
+
+ struct megasas_cmd *cmd;
+ struct megasas_ctrl_info *ctrl_info;
+
+ struct megasas_init_frame *init_frame;
+ struct megasas_init_queue_info *initq_info;
+ dma_addr_t init_frame_h;
+ dma_addr_t initq_info_h;
+
+ /*
+ * Map the message registers
+ */
+ instance->base_addr = pci_resource_start(instance->pdev, 0);
+
+ if (pci_request_regions(instance->pdev, "megasas: LSI Logic")) {
+ printk(KERN_DEBUG "megasas: IO memory region busy!\n");
+ return -EBUSY;
+ }
+
+ instance->reg_set = ioremap_nocache(instance->base_addr, 8192);
+
+ if (!instance->reg_set) {
+ printk(KERN_DEBUG "megasas: Failed to map IO mem\n");
+ goto fail_ioremap;
+ }
+
+ reg_set = instance->reg_set;
+
+ /*
+ * We expect the FW state to be READY
+ */
+ if (megasas_transition_to_ready(instance->reg_set))
+ goto fail_ready_state;
+
+ /*
+ * Get various operational parameters from status register
+ */
+ instance->max_fw_cmds = readl(&reg_set->outbound_msg_0) & 0x00FFFF;
+ instance->max_num_sge = (readl(&reg_set->outbound_msg_0) & 0xFF0000) >>
+ 0x10;
+ /*
+ * Create a pool of commands
+ */
+ if (megasas_alloc_cmds(instance))
+ goto fail_alloc_cmds;
+
+ /*
+ * Allocate memory for reply queue. Length of reply queue should
+ * be _one_ more than the maximum commands handled by the firmware.
+ *
+ * Note: When FW completes commands, it places corresponding contex
+ * values in this circular reply queue. This circular queue is a fairly
+ * typical producer-consumer queue. FW is the producer (of completed
+ * commands) and the driver is the consumer.
+ */
+ context_sz = sizeof(u32);
+ reply_q_sz = context_sz * (instance->max_fw_cmds + 1);
+
+ instance->reply_queue = pci_alloc_consistent(instance->pdev,
+ reply_q_sz,
+ &instance->reply_queue_h);
+
+ if (!instance->reply_queue) {
+ printk(KERN_DEBUG "megasas: Out of DMA mem for reply queue\n");
+ goto fail_reply_queue;
+ }
+
+ /*
+ * Prepare a init frame. Note the init frame points to queue info
+ * structure. Each frame has SGL allocated after first 64 bytes. For
+ * this frame - since we don't need any SGL - we use SGL's space as
+ * queue info structure
+ *
+ * We will not get a NULL command below. We just created the pool.
+ */
+ cmd = megasas_get_cmd(instance);
+
+ init_frame = (struct megasas_init_frame *)cmd->frame;
+ initq_info = (struct megasas_init_queue_info *)
+ ((unsigned long)init_frame + 64);
+
+ init_frame_h = cmd->frame_phys_addr;
+ initq_info_h = init_frame_h + 64;
+
+ memset(init_frame, 0, MEGAMFI_FRAME_SIZE);
+ memset(initq_info, 0, sizeof(struct megasas_init_queue_info));
+
+ initq_info->reply_queue_entries = instance->max_fw_cmds + 1;
+ initq_info->reply_queue_start_phys_addr_lo = instance->reply_queue_h;
+
+ initq_info->producer_index_phys_addr_lo = instance->producer_h;
+ initq_info->consumer_index_phys_addr_lo = instance->consumer_h;
+
+ init_frame->cmd = MFI_CMD_INIT;
+ init_frame->cmd_status = 0xFF;
+ init_frame->queue_info_new_phys_addr_lo = initq_info_h;
+
+ init_frame->data_xfer_len = sizeof(struct megasas_init_queue_info);
+
+ /*
+ * Issue the init frame in polled mode
+ */
+ if (megasas_issue_polled(instance, cmd)) {
+ printk(KERN_DEBUG "megasas: Failed to init firmware\n");
+ goto fail_fw_init;
+ }
+
+ megasas_return_cmd(instance, cmd);
+
+ ctrl_info = kmalloc(sizeof(struct megasas_ctrl_info), GFP_KERNEL);
+
+ /*
+ * Compute the max allowed sectors per IO: The controller info has two
+ * limits on max sectors. Driver should use the minimum of these two.
+ *
+ * 1 << stripe_sz_ops.min = max sectors per strip
+ *
+ * Note that older firmwares ( < FW ver 30) didn't report information
+ * to calculate max_sectors_1. So the number ended up as zero always.
+ */
+ if (ctrl_info && !megasas_get_ctrl_info(instance, ctrl_info)) {
+
+ max_sectors_1 = (1 << ctrl_info->stripe_sz_ops.min) *
+ ctrl_info->max_strips_per_io;
+ max_sectors_2 = ctrl_info->max_request_size;
+
+ instance->max_sectors_per_req = (max_sectors_1 < max_sectors_2)
+ ? max_sectors_1 : max_sectors_2;
+ } else
+ instance->max_sectors_per_req = instance->max_num_sge *
+ PAGE_SIZE / 512;
+
+ kfree(ctrl_info);
+
+ return 0;
+
+ fail_fw_init:
+ megasas_return_cmd(instance, cmd);
+
+ pci_free_consistent(instance->pdev, reply_q_sz,
+ instance->reply_queue, instance->reply_queue_h);
+ fail_reply_queue:
+ megasas_free_cmds(instance);
+
+ fail_alloc_cmds:
+ fail_ready_state:
+ iounmap(instance->reg_set);
+
+ fail_ioremap:
+ pci_release_regions(instance->pdev);
+
+ return -EINVAL;
+}
+
+/**
+ * megasas_release_mfi - Reverses the FW initialization
+ * @intance: Adapter soft state
+ */
+static void megasas_release_mfi(struct megasas_instance *instance)
+{
+ u32 reply_q_sz = sizeof(u32) * (instance->max_fw_cmds + 1);
+
+ pci_free_consistent(instance->pdev, reply_q_sz,
+ instance->reply_queue, instance->reply_queue_h);
+
+ megasas_free_cmds(instance);
+
+ iounmap(instance->reg_set);
+
+ pci_release_regions(instance->pdev);
+}
+
+/**
+ * megasas_get_seq_num - Gets latest event sequence numbers
+ * @instance: Adapter soft state
+ * @eli: FW event log sequence numbers information
+ *
+ * FW maintains a log of all events in a non-volatile area. Upper layers would
+ * usually find out the latest sequence number of the events, the seq number at
+ * the boot etc. They would "read" all the events below the latest seq number
+ * by issuing a direct fw cmd (DCMD). For the future events (beyond latest seq
+ * number), they would subsribe to AEN (asynchronous event notification) and
+ * wait for the events to happen.
+ */
+static int
+megasas_get_seq_num(struct megasas_instance *instance,
+ struct megasas_evt_log_info *eli)
+{
+ struct megasas_cmd *cmd;
+ struct megasas_dcmd_frame *dcmd;
+ struct megasas_evt_log_info *el_info;
+ dma_addr_t el_info_h = 0;
+
+ cmd = megasas_get_cmd(instance);
+
+ if (!cmd) {
+ return -ENOMEM;
+ }
+
+ dcmd = &cmd->frame->dcmd;
+ el_info = pci_alloc_consistent(instance->pdev,
+ sizeof(struct megasas_evt_log_info),
+ &el_info_h);
+
+ if (!el_info) {
+ megasas_return_cmd(instance, cmd);
+ return -ENOMEM;
+ }
+
+ memset(el_info, 0, sizeof(*el_info));
+ memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
+
+ dcmd->cmd = MFI_CMD_DCMD;
+ dcmd->cmd_status = 0x0;
+ dcmd->sge_count = 1;
+ dcmd->flags = MFI_FRAME_DIR_READ;
+ dcmd->timeout = 0;
+ dcmd->data_xfer_len = sizeof(struct megasas_evt_log_info);
+ dcmd->opcode = MR_DCMD_CTRL_EVENT_GET_INFO;
+ dcmd->sgl.sge32[0].phys_addr = el_info_h;
+ dcmd->sgl.sge32[0].length = sizeof(struct megasas_evt_log_info);
+
+ megasas_issue_blocked_cmd(instance, cmd);
+
+ /*
+ * Copy the data back into callers buffer
+ */
+ memcpy(eli, el_info, sizeof(struct megasas_evt_log_info));
+
+ pci_free_consistent(instance->pdev, sizeof(struct megasas_evt_log_info),
+ el_info, el_info_h);
+
+ megasas_return_cmd(instance, cmd);
+
+ return 0;
+}
+
+/**
+ * megasas_register_aen - Registers for asynchronous event notification
+ * @instance: Adapter soft state
+ * @seq_num: The starting sequence number
+ * @class_locale: Class of the event
+ *
+ * This function subscribes for AEN for events beyond the @seq_num. It requests
+ * to be notified if and only if the event is of type @class_locale
+ */
+static int
+megasas_register_aen(struct megasas_instance *instance, u32 seq_num,
+ u32 class_locale_word)
+{
+ int ret_val;
+ struct megasas_cmd *cmd;
+ struct megasas_dcmd_frame *dcmd;
+ union megasas_evt_class_locale curr_aen;
+ union megasas_evt_class_locale prev_aen;
+
+ /*
+ * If there an AEN pending already (aen_cmd), check if the
+ * class_locale of that pending AEN is inclusive of the new
+ * AEN request we currently have. If it is, then we don't have
+ * to do anything. In other words, whichever events the current
+ * AEN request is subscribing to, have already been subscribed
+ * to.
+ *
+ * If the old_cmd is _not_ inclusive, then we have to abort
+ * that command, form a class_locale that is superset of both
+ * old and current and re-issue to the FW
+ */
+
+ curr_aen.word = class_locale_word;
+
+ if (instance->aen_cmd) {
+
+ prev_aen.word = instance->aen_cmd->frame->dcmd.mbox.w[1];
+
+ /*
+ * A class whose enum value is smaller is inclusive of all
+ * higher values. If a PROGRESS (= -1) was previously
+ * registered, then a new registration requests for higher
+ * classes need not be sent to FW. They are automatically
+ * included.
+ *
+ * Locale numbers don't have such hierarchy. They are bitmap
+ * values
+ */
+ if ((prev_aen.members.class <= curr_aen.members.class) &&
+ !((prev_aen.members.locale & curr_aen.members.locale) ^
+ curr_aen.members.locale)) {
+ /*
+ * Previously issued event registration includes
+ * current request. Nothing to do.
+ */
+ return 0;
+ } else {
+ curr_aen.members.locale |= prev_aen.members.locale;
+
+ if (prev_aen.members.class < curr_aen.members.class)
+ curr_aen.members.class = prev_aen.members.class;
+
+ instance->aen_cmd->abort_aen = 1;
+ ret_val = megasas_issue_blocked_abort_cmd(instance,
+ instance->
+ aen_cmd);
+
+ if (ret_val) {
+ printk(KERN_DEBUG "megasas: Failed to abort "
+ "previous AEN command\n");
+ return ret_val;
+ }
+ }
+ }
+
+ cmd = megasas_get_cmd(instance);
+
+ if (!cmd)
+ return -ENOMEM;
+
+ dcmd = &cmd->frame->dcmd;
+
+ memset(instance->evt_detail, 0, sizeof(struct megasas_evt_detail));
+
+ /*
+ * Prepare DCMD for aen registration
+ */
+ memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
+
+ dcmd->cmd = MFI_CMD_DCMD;
+ dcmd->cmd_status = 0x0;
+ dcmd->sge_count = 1;
+ dcmd->flags = MFI_FRAME_DIR_READ;
+ dcmd->timeout = 0;
+ dcmd->data_xfer_len = sizeof(struct megasas_evt_detail);
+ dcmd->opcode = MR_DCMD_CTRL_EVENT_WAIT;
+ dcmd->mbox.w[0] = seq_num;
+ dcmd->mbox.w[1] = curr_aen.word;
+ dcmd->sgl.sge32[0].phys_addr = (u32) instance->evt_detail_h;
+ dcmd->sgl.sge32[0].length = sizeof(struct megasas_evt_detail);
+
+ /*
+ * Store reference to the cmd used to register for AEN. When an
+ * application wants us to register for AEN, we have to abort this
+ * cmd and re-register with a new EVENT LOCALE supplied by that app
+ */
+ instance->aen_cmd = cmd;
+
+ /*
+ * Issue the aen registration frame
+ */
+ writel(cmd->frame_phys_addr >> 3,
+ &instance->reg_set->inbound_queue_port);
+
+ return 0;
+}
+
+/**
+ * megasas_start_aen - Subscribes to AEN during driver load time
+ * @instance: Adapter soft state
+ */
+static int megasas_start_aen(struct megasas_instance *instance)
+{
+ struct megasas_evt_log_info eli;
+ union megasas_evt_class_locale class_locale;
+
+ /*
+ * Get the latest sequence number from FW
+ */
+ memset(&eli, 0, sizeof(eli));
+
+ if (megasas_get_seq_num(instance, &eli))
+ return -1;
+
+ /*
+ * Register AEN with FW for latest sequence number plus 1
+ */
+ class_locale.members.reserved = 0;
+ class_locale.members.locale = MR_EVT_LOCALE_ALL;
+ class_locale.members.class = MR_EVT_CLASS_DEBUG;
+
+ return megasas_register_aen(instance, eli.newest_seq_num + 1,
+ class_locale.word);
+}
+
+/**
+ * megasas_io_attach - Attaches this driver to SCSI mid-layer
+ * @instance: Adapter soft state
+ */
+static int megasas_io_attach(struct megasas_instance *instance)
+{
+ struct Scsi_Host *host = instance->host;
+
+ /*
+ * Export parameters required by SCSI mid-layer
+ */
+ host->irq = instance->pdev->irq;
+ host->unique_id = instance->unique_id;
+ host->can_queue = instance->max_fw_cmds - MEGASAS_INT_CMDS;
+ host->this_id = instance->init_id;
+ host->sg_tablesize = instance->max_num_sge;
+ host->max_sectors = instance->max_sectors_per_req;
+ host->cmd_per_lun = 128;
+ host->max_channel = MEGASAS_MAX_CHANNELS - 1;
+ host->max_id = MEGASAS_MAX_DEV_PER_CHANNEL;
+ host->max_lun = MEGASAS_MAX_LUN;
+
+ /*
+ * Notify the mid-layer about the new controller
+ */
+ if (scsi_add_host(host, &instance->pdev->dev)) {
+ printk(KERN_DEBUG "megasas: scsi_add_host failed\n");
+ return -ENODEV;
+ }
+
+ /*
+ * Trigger SCSI to scan our drives
+ */
+ scsi_scan_host(host);
+ return 0;
+}
+
+/**
+ * megasas_probe_one - PCI hotplug entry point
+ * @pdev: PCI device structure
+ * @id: PCI ids of supported hotplugged adapter
+ */
+static int __devinit
+megasas_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+ int rval;
+ struct Scsi_Host *host;
+ struct megasas_instance *instance;
+
+ /*
+ * Announce PCI information
+ */
+ printk(KERN_INFO "megasas: %#4.04x:%#4.04x:%#4.04x:%#4.04x: ",
+ pdev->vendor, pdev->device, pdev->subsystem_vendor,
+ pdev->subsystem_device);
+
+ printk("bus %d:slot %d:func %d\n",
+ pdev->bus->number, PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn));
+
+ /*
+ * PCI prepping: enable device set bus mastering and dma mask
+ */
+ rval = pci_enable_device(pdev);
+
+ if (rval) {
+ return rval;
+ }
+
+ pci_set_master(pdev);
+
+ /*
+ * All our contollers are capable of performing 64-bit DMA
+ */
+ if (IS_DMA64) {
+ if (pci_set_dma_mask(pdev, DMA_64BIT_MASK) != 0) {
+
+ if (pci_set_dma_mask(pdev, DMA_32BIT_MASK) != 0)
+ goto fail_set_dma_mask;
+ }
+ } else {
+ if (pci_set_dma_mask(pdev, DMA_32BIT_MASK) != 0)
+ goto fail_set_dma_mask;
+ }
+
+ host = scsi_host_alloc(&megasas_template,
+ sizeof(struct megasas_instance));
+
+ if (!host) {
+ printk(KERN_DEBUG "megasas: scsi_host_alloc failed\n");
+ goto fail_alloc_instance;
+ }
+
+ instance = (struct megasas_instance *)host->hostdata;
+ memset(instance, 0, sizeof(*instance));
+
+ instance->producer = pci_alloc_consistent(pdev, sizeof(u32),
+ &instance->producer_h);
+ instance->consumer = pci_alloc_consistent(pdev, sizeof(u32),
+ &instance->consumer_h);
+
+ if (!instance->producer || !instance->consumer) {
+ printk(KERN_DEBUG "megasas: Failed to allocate memory for "
+ "producer, consumer\n");
+ goto fail_alloc_dma_buf;
+ }
+
+ *instance->producer = 0;
+ *instance->consumer = 0;
+
+ instance->evt_detail = pci_alloc_consistent(pdev,
+ sizeof(struct
+ megasas_evt_detail),
+ &instance->evt_detail_h);
+
+ if (!instance->evt_detail) {
+ printk(KERN_DEBUG "megasas: Failed to allocate memory for "
+ "event detail structure\n");
+ goto fail_alloc_dma_buf;
+ }
+
+ /*
+ * Initialize locks and queues
+ */
+ INIT_LIST_HEAD(&instance->cmd_pool);
+
+ init_waitqueue_head(&instance->int_cmd_wait_q);
+ init_waitqueue_head(&instance->abort_cmd_wait_q);
+
+ spin_lock_init(&instance->cmd_pool_lock);
+ spin_lock_init(&instance->instance_lock);
+
+ sema_init(&instance->aen_mutex, 1);
+ sema_init(&instance->ioctl_sem, MEGASAS_INT_CMDS);
+
+ /*
+ * Initialize PCI related and misc parameters
+ */
+ instance->pdev = pdev;
+ instance->host = host;
+ instance->unique_id = pdev->bus->number << 8 | pdev->devfn;
+ instance->init_id = MEGASAS_DEFAULT_INIT_ID;
+
+ /*
+ * Initialize MFI Firmware
+ */
+ if (megasas_init_mfi(instance))
+ goto fail_init_mfi;
+
+ /*
+ * Register IRQ
+ */
+ if (request_irq(pdev->irq, megasas_isr, SA_SHIRQ, "megasas", instance)) {
+ printk(KERN_DEBUG "megasas: Failed to register IRQ\n");
+ goto fail_irq;
+ }
+
+ megasas_enable_intr(instance->reg_set);
+
+ /*
+ * Store instance in PCI softstate
+ */
+ pci_set_drvdata(pdev, instance);
+
+ /*
+ * Add this controller to megasas_mgmt_info structure so that it
+ * can be exported to management applications
+ */
+ megasas_mgmt_info.count++;
+ megasas_mgmt_info.instance[megasas_mgmt_info.max_index] = instance;
+ megasas_mgmt_info.max_index++;
+
+ /*
+ * Initiate AEN (Asynchronous Event Notification)
+ */
+ if (megasas_start_aen(instance)) {
+ printk(KERN_DEBUG "megasas: start aen failed\n");
+ goto fail_start_aen;
+ }
+
+ /*
+ * Register with SCSI mid-layer
+ */
+ if (megasas_io_attach(instance))
+ goto fail_io_attach;
+
+ return 0;
+
+ fail_start_aen:
+ fail_io_attach:
+ megasas_mgmt_info.count--;
+ megasas_mgmt_info.instance[megasas_mgmt_info.max_index] = NULL;
+ megasas_mgmt_info.max_index--;
+
+ pci_set_drvdata(pdev, NULL);
+ megasas_disable_intr(instance->reg_set);
+ free_irq(instance->pdev->irq, instance);
+
+ megasas_release_mfi(instance);
+
+ fail_irq:
+ fail_init_mfi:
+ fail_alloc_dma_buf:
+ if (instance->evt_detail)
+ pci_free_consistent(pdev, sizeof(struct megasas_evt_detail),
+ instance->evt_detail,
+ instance->evt_detail_h);
+
+ if (instance->producer)
+ pci_free_consistent(pdev, sizeof(u32), instance->producer,
+ instance->producer_h);
+ if (instance->consumer)
+ pci_free_consistent(pdev, sizeof(u32), instance->consumer,
+ instance->consumer_h);
+ scsi_host_put(host);
+
+ fail_alloc_instance:
+ fail_set_dma_mask:
+ pci_disable_device(pdev);
+
+ return -ENODEV;
+}
+
+/**
+ * megasas_flush_cache - Requests FW to flush all its caches
+ * @instance: Adapter soft state
+ */
+static void megasas_flush_cache(struct megasas_instance *instance)
+{
+ struct megasas_cmd *cmd;
+ struct megasas_dcmd_frame *dcmd;
+
+ cmd = megasas_get_cmd(instance);
+
+ if (!cmd)
+ return;
+
+ dcmd = &cmd->frame->dcmd;
+
+ memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
+
+ dcmd->cmd = MFI_CMD_DCMD;
+ dcmd->cmd_status = 0x0;
+ dcmd->sge_count = 0;
+ dcmd->flags = MFI_FRAME_DIR_NONE;
+ dcmd->timeout = 0;
+ dcmd->data_xfer_len = 0;
+ dcmd->opcode = MR_DCMD_CTRL_CACHE_FLUSH;
+ dcmd->mbox.b[0] = MR_FLUSH_CTRL_CACHE | MR_FLUSH_DISK_CACHE;
+
+ megasas_issue_blocked_cmd(instance, cmd);
+
+ megasas_return_cmd(instance, cmd);
+
+ return;
+}
+
+/**
+ * megasas_shutdown_controller - Instructs FW to shutdown the controller
+ * @instance: Adapter soft state
+ */
+static void megasas_shutdown_controller(struct megasas_instance *instance)
+{
+ struct megasas_cmd *cmd;
+ struct megasas_dcmd_frame *dcmd;
+
+ cmd = megasas_get_cmd(instance);
+
+ if (!cmd)
+ return;
+
+ if (instance->aen_cmd)
+ megasas_issue_blocked_abort_cmd(instance, instance->aen_cmd);
+
+ dcmd = &cmd->frame->dcmd;
+
+ memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
+
+ dcmd->cmd = MFI_CMD_DCMD;
+ dcmd->cmd_status = 0x0;
+ dcmd->sge_count = 0;
+ dcmd->flags = MFI_FRAME_DIR_NONE;
+ dcmd->timeout = 0;
+ dcmd->data_xfer_len = 0;
+ dcmd->opcode = MR_DCMD_CTRL_SHUTDOWN;
+
+ megasas_issue_blocked_cmd(instance, cmd);
+
+ megasas_return_cmd(instance, cmd);
+
+ return;
+}
+
+/**
+ * megasas_detach_one - PCI hot"un"plug entry point
+ * @pdev: PCI device structure
+ */
+static void megasas_detach_one(struct pci_dev *pdev)
+{
+ int i;
+ struct Scsi_Host *host;
+ struct megasas_instance *instance;
+
+ instance = pci_get_drvdata(pdev);
+ host = instance->host;
+
+ scsi_remove_host(instance->host);
+ megasas_flush_cache(instance);
+ megasas_shutdown_controller(instance);
+
+ /*
+ * Take the instance off the instance array. Note that we will not
+ * decrement the max_index. We let this array be sparse array
+ */
+ for (i = 0; i < megasas_mgmt_info.max_index; i++) {
+ if (megasas_mgmt_info.instance[i] == instance) {
+ megasas_mgmt_info.count--;
+ megasas_mgmt_info.instance[i] = NULL;
+
+ break;
+ }
+ }
+
+ pci_set_drvdata(instance->pdev, NULL);
+
+ megasas_disable_intr(instance->reg_set);
+
+ free_irq(instance->pdev->irq, instance);
+
+ megasas_release_mfi(instance);
+
+ pci_free_consistent(pdev, sizeof(struct megasas_evt_detail),
+ instance->evt_detail, instance->evt_detail_h);
+
+ pci_free_consistent(pdev, sizeof(u32), instance->producer,
+ instance->producer_h);
+
+ pci_free_consistent(pdev, sizeof(u32), instance->consumer,
+ instance->consumer_h);
+
+ scsi_host_put(host);
+
+ pci_set_drvdata(pdev, NULL);
+
+ pci_disable_device(pdev);
+
+ return;
+}
+
+/**
+ * megasas_shutdown - Shutdown entry point
+ * @device: Generic device structure
+ */
+static void megasas_shutdown(struct pci_dev *pdev)
+{
+ struct megasas_instance *instance = pci_get_drvdata(pdev);
+ megasas_flush_cache(instance);
+}
+
+/**
+ * megasas_mgmt_open - char node "open" entry point
+ */
+static int megasas_mgmt_open(struct inode *inode, struct file *filep)
+{
+ /*
+ * Allow only those users with admin rights
+ */
+ if (!capable(CAP_SYS_ADMIN))
+ return -EACCES;
+
+ return 0;
+}
+
+/**
+ * megasas_mgmt_release - char node "release" entry point
+ */
+static int megasas_mgmt_release(struct inode *inode, struct file *filep)
+{
+ filep->private_data = NULL;
+ fasync_helper(-1, filep, 0, &megasas_async_queue);
+
+ return 0;
+}
+
+/**
+ * megasas_mgmt_fasync - Async notifier registration from applications
+ *
+ * This function adds the calling process to a driver global queue. When an
+ * event occurs, SIGIO will be sent to all processes in this queue.
+ */
+static int megasas_mgmt_fasync(int fd, struct file *filep, int mode)
+{
+ int rc;
+
+ down(&megasas_async_queue_mutex);
+
+ rc = fasync_helper(fd, filep, mode, &megasas_async_queue);
+
+ up(&megasas_async_queue_mutex);
+
+ if (rc >= 0) {
+ /* For sanity check when we get ioctl */
+ filep->private_data = filep;
+ return 0;
+ }
+
+ printk(KERN_DEBUG "megasas: fasync_helper failed [%d]\n", rc);
+
+ return rc;
+}
+
+/**
+ * megasas_mgmt_fw_ioctl - Issues management ioctls to FW
+ * @instance: Adapter soft state
+ * @argp: User's ioctl packet
+ */
+static int
+megasas_mgmt_fw_ioctl(struct megasas_instance *instance,
+ struct megasas_iocpacket __user * user_ioc,
+ struct megasas_iocpacket *ioc)
+{
+ struct megasas_sge32 *kern_sge32;
+ struct megasas_cmd *cmd;
+ void *kbuff_arr[MAX_IOCTL_SGE];
+ dma_addr_t buf_handle = 0;
+ int error = 0, i;
+ void *sense = NULL;
+ dma_addr_t sense_handle;
+ u32 *sense_ptr;
+
+ memset(kbuff_arr, 0, sizeof(kbuff_arr));
+
+ if (ioc->sge_count > MAX_IOCTL_SGE) {
+ printk(KERN_DEBUG "megasas: SGE count [%d] > max limit [%d]\n",
+ ioc->sge_count, MAX_IOCTL_SGE);
+ return -EINVAL;
+ }
+
+ cmd = megasas_get_cmd(instance);
+ if (!cmd) {
+ printk(KERN_DEBUG "megasas: Failed to get a cmd packet\n");
+ return -ENOMEM;
+ }
+
+ /*
+ * User's IOCTL packet has 2 frames (maximum). Copy those two
+ * frames into our cmd's frames. cmd->frame's context will get
+ * overwritten when we copy from user's frames. So set that value
+ * alone separately
+ */
+ memcpy(cmd->frame, ioc->frame.raw, 2 * MEGAMFI_FRAME_SIZE);
+ cmd->frame->hdr.context = cmd->index;
+
+ /*
+ * The management interface between applications and the fw uses
+ * MFI frames. E.g, RAID configuration changes, LD property changes
+ * etc are accomplishes through different kinds of MFI frames. The
+ * driver needs to care only about substituting user buffers with
+ * kernel buffers in SGLs. The location of SGL is embedded in the
+ * struct iocpacket itself.
+ */
+ kern_sge32 = (struct megasas_sge32 *)
+ ((unsigned long)cmd->frame + ioc->sgl_off);
+
+ /*
+ * For each user buffer, create a mirror buffer and copy in
+ */
+ for (i = 0; i < ioc->sge_count; i++) {
+ kbuff_arr[i] = pci_alloc_consistent(instance->pdev,
+ ioc->sgl[i].iov_len,
+ &buf_handle);
+ if (!kbuff_arr[i]) {
+ printk(KERN_DEBUG "megasas: Failed to alloc "
+ "kernel SGL buffer for IOCTL \n");
+ error = -ENOMEM;
+ goto out;
+ }
+
+ /*
+ * We don't change the dma_coherent_mask, so
+ * pci_alloc_consistent only returns 32bit addresses
+ */
+ kern_sge32[i].phys_addr = (u32) buf_handle;
+ kern_sge32[i].length = ioc->sgl[i].iov_len;
+
+ /*
+ * We created a kernel buffer corresponding to the
+ * user buffer. Now copy in from the user buffer
+ */
+ if (copy_from_user(kbuff_arr[i], ioc->sgl[i].iov_base,
+ (u32) (ioc->sgl[i].iov_len))) {
+ error = -EFAULT;
+ goto out;
+ }
+ }
+
+ if (ioc->sense_len) {
+ sense = pci_alloc_consistent(instance->pdev, ioc->sense_len,
+ &sense_handle);
+ if (!sense) {
+ error = -ENOMEM;
+ goto out;
+ }
+
+ sense_ptr =
+ (u32 *) ((unsigned long)cmd->frame + ioc->sense_off);
+ *sense_ptr = sense_handle;
+ }
+
+ /*
+ * Set the sync_cmd flag so that the ISR knows not to complete this
+ * cmd to the SCSI mid-layer
+ */
+ cmd->sync_cmd = 1;
+ megasas_issue_blocked_cmd(instance, cmd);
+ cmd->sync_cmd = 0;
+
+ /*
+ * copy out the kernel buffers to user buffers
+ */
+ for (i = 0; i < ioc->sge_count; i++) {
+ if (copy_to_user(ioc->sgl[i].iov_base, kbuff_arr[i],
+ ioc->sgl[i].iov_len)) {
+ error = -EFAULT;
+ goto out;
+ }
+ }
+
+ /*
+ * copy out the sense
+ */
+ if (ioc->sense_len) {
+ /*
+ * sense_ptr points to the location that has the user
+ * sense buffer address
+ */
+ sense_ptr = (u32 *) ((unsigned long)ioc->frame.raw +
+ ioc->sense_off);
+
+ if (copy_to_user((void __user *)((unsigned long)(*sense_ptr)),
+ sense, ioc->sense_len)) {
+ error = -EFAULT;
+ goto out;
+ }
+ }
+
+ /*
+ * copy the status codes returned by the fw
+ */
+ if (copy_to_user(&user_ioc->frame.hdr.cmd_status,
+ &cmd->frame->hdr.cmd_status, sizeof(u8))) {
+ printk(KERN_DEBUG "megasas: Error copying out cmd_status\n");
+ error = -EFAULT;
+ }
+
+ out:
+ if (sense) {
+ pci_free_consistent(instance->pdev, ioc->sense_len,
+ sense, sense_handle);
+ }
+
+ for (i = 0; i < ioc->sge_count && kbuff_arr[i]; i++) {
+ pci_free_consistent(instance->pdev,
+ kern_sge32[i].length,
+ kbuff_arr[i], kern_sge32[i].phys_addr);
+ }
+
+ megasas_return_cmd(instance, cmd);
+ return error;
+}
+
+static struct megasas_instance *megasas_lookup_instance(u16 host_no)
+{
+ int i;
+
+ for (i = 0; i < megasas_mgmt_info.max_index; i++) {
+
+ if ((megasas_mgmt_info.instance[i]) &&
+ (megasas_mgmt_info.instance[i]->host->host_no == host_no))
+ return megasas_mgmt_info.instance[i];
+ }
+
+ return NULL;
+}
+
+static int megasas_mgmt_ioctl_fw(struct file *file, unsigned long arg)
+{
+ struct megasas_iocpacket __user *user_ioc =
+ (struct megasas_iocpacket __user *)arg;
+ struct megasas_iocpacket *ioc;
+ struct megasas_instance *instance;
+ int error;
+
+ ioc = kmalloc(sizeof(*ioc), GFP_KERNEL);
+ if (!ioc)
+ return -ENOMEM;
+
+ if (copy_from_user(ioc, user_ioc, sizeof(*ioc))) {
+ error = -EFAULT;
+ goto out_kfree_ioc;
+ }
+
+ instance = megasas_lookup_instance(ioc->host_no);
+ if (!instance) {
+ error = -ENODEV;
+ goto out_kfree_ioc;
+ }
+
+ /*
+ * We will allow only MEGASAS_INT_CMDS number of parallel ioctl cmds
+ */
+ if (down_interruptible(&instance->ioctl_sem)) {
+ error = -ERESTARTSYS;
+ goto out_kfree_ioc;
+ }
+ error = megasas_mgmt_fw_ioctl(instance, user_ioc, ioc);
+ up(&instance->ioctl_sem);
+
+ out_kfree_ioc:
+ kfree(ioc);
+ return error;
+}
+
+static int megasas_mgmt_ioctl_aen(struct file *file, unsigned long arg)
+{
+ struct megasas_instance *instance;
+ struct megasas_aen aen;
+ int error;
+
+ if (file->private_data != file) {
+ printk(KERN_DEBUG "megasas: fasync_helper was not "
+ "called first\n");
+ return -EINVAL;
+ }
+
+ if (copy_from_user(&aen, (void __user *)arg, sizeof(aen)))
+ return -EFAULT;
+
+ instance = megasas_lookup_instance(aen.host_no);
+
+ if (!instance)
+ return -ENODEV;
+
+ down(&instance->aen_mutex);
+ error = megasas_register_aen(instance, aen.seq_num,
+ aen.class_locale_word);
+ up(&instance->aen_mutex);
+ return error;
+}
+
+/**
+ * megasas_mgmt_ioctl - char node ioctl entry point
+ */
+static long
+megasas_mgmt_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+ switch (cmd) {
+ case MEGASAS_IOC_FIRMWARE:
+ return megasas_mgmt_ioctl_fw(file, arg);
+
+ case MEGASAS_IOC_GET_AEN:
+ return megasas_mgmt_ioctl_aen(file, arg);
+ }
+
+ return -ENOTTY;
+}
+
+#ifdef CONFIG_COMPAT
+static int megasas_mgmt_compat_ioctl_fw(struct file *file, unsigned long arg)
+{
+ struct compat_megasas_iocpacket __user *cioc =
+ (struct compat_megasas_iocpacket __user *)arg;
+ struct megasas_iocpacket __user *ioc =
+ compat_alloc_user_space(sizeof(struct megasas_iocpacket));
+ int i;
+ int error = 0;
+
+ clear_user(ioc, sizeof(*ioc));
+
+ if (copy_in_user(&ioc->host_no, &cioc->host_no, sizeof(u16)) ||
+ copy_in_user(&ioc->sgl_off, &cioc->sgl_off, sizeof(u32)) ||
+ copy_in_user(&ioc->sense_off, &cioc->sense_off, sizeof(u32)) ||
+ copy_in_user(&ioc->sense_len, &cioc->sense_len, sizeof(u32)) ||
+ copy_in_user(ioc->frame.raw, cioc->frame.raw, 128) ||
+ copy_in_user(&ioc->sge_count, &cioc->sge_count, sizeof(u32)))
+ return -EFAULT;
+
+ for (i = 0; i < MAX_IOCTL_SGE; i++) {
+ compat_uptr_t ptr;
+
+ if (get_user(ptr, &cioc->sgl[i].iov_base) ||
+ put_user(compat_ptr(ptr), &ioc->sgl[i].iov_base) ||
+ copy_in_user(&ioc->sgl[i].iov_len,
+ &cioc->sgl[i].iov_len, sizeof(compat_size_t)))
+ return -EFAULT;
+ }
+
+ error = megasas_mgmt_ioctl_fw(file, (unsigned long)ioc);
+
+ if (copy_in_user(&cioc->frame.hdr.cmd_status,
+ &ioc->frame.hdr.cmd_status, sizeof(u8))) {
+ printk(KERN_DEBUG "megasas: error copy_in_user cmd_status\n");
+ return -EFAULT;
+ }
+ return error;
+}
+
+static long
+megasas_mgmt_compat_ioctl(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ switch (cmd) {
+ case MEGASAS_IOC_FIRMWARE:{
+ return megasas_mgmt_compat_ioctl_fw(file, arg);
+ }
+ case MEGASAS_IOC_GET_AEN:
+ return megasas_mgmt_ioctl_aen(file, arg);
+ }
+
+ return -ENOTTY;
+}
+#endif
+
+/*
+ * File operations structure for management interface
+ */
+static struct file_operations megasas_mgmt_fops = {
+ .owner = THIS_MODULE,
+ .open = megasas_mgmt_open,
+ .release = megasas_mgmt_release,
+ .fasync = megasas_mgmt_fasync,
+ .unlocked_ioctl = megasas_mgmt_ioctl,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = megasas_mgmt_compat_ioctl,
+#endif
+};
+
+/*
+ * PCI hotplug support registration structure
+ */
+static struct pci_driver megasas_pci_driver = {
+
+ .name = "megaraid_sas",
+ .id_table = megasas_pci_table,
+ .probe = megasas_probe_one,
+ .remove = __devexit_p(megasas_detach_one),
+ .shutdown = megasas_shutdown,
+};
+
+/*
+ * Sysfs driver attributes
+ */
+static ssize_t megasas_sysfs_show_version(struct device_driver *dd, char *buf)
+{
+ return snprintf(buf, strlen(MEGASAS_VERSION) + 2, "%s\n",
+ MEGASAS_VERSION);
+}
+
+static DRIVER_ATTR(version, S_IRUGO, megasas_sysfs_show_version, NULL);
+
+static ssize_t
+megasas_sysfs_show_release_date(struct device_driver *dd, char *buf)
+{
+ return snprintf(buf, strlen(MEGASAS_RELDATE) + 2, "%s\n",
+ MEGASAS_RELDATE);
+}
+
+static DRIVER_ATTR(release_date, S_IRUGO, megasas_sysfs_show_release_date,
+ NULL);
+
+/**
+ * megasas_init - Driver load entry point
+ */
+static int __init megasas_init(void)
+{
+ int rval;
+
+ /*
+ * Announce driver version and other information
+ */
+ printk(KERN_INFO "megasas: %s %s\n", MEGASAS_VERSION,
+ MEGASAS_EXT_VERSION);
+
+ memset(&megasas_mgmt_info, 0, sizeof(megasas_mgmt_info));
+
+ /*
+ * Register character device node
+ */
+ rval = register_chrdev(0, "megaraid_sas_ioctl", &megasas_mgmt_fops);
+
+ if (rval < 0) {
+ printk(KERN_DEBUG "megasas: failed to open device node\n");
+ return rval;
+ }
+
+ megasas_mgmt_majorno = rval;
+
+ /*
+ * Register ourselves as PCI hotplug module
+ */
+ rval = pci_module_init(&megasas_pci_driver);
+
+ if (rval) {
+ printk(KERN_DEBUG "megasas: PCI hotplug regisration failed \n");
+ unregister_chrdev(megasas_mgmt_majorno, "megaraid_sas_ioctl");
+ }
+
+ driver_create_file(&megasas_pci_driver.driver, &driver_attr_version);
+ driver_create_file(&megasas_pci_driver.driver,
+ &driver_attr_release_date);
+
+ return rval;
+}
+
+/**
+ * megasas_exit - Driver unload entry point
+ */
+static void __exit megasas_exit(void)
+{
+ driver_remove_file(&megasas_pci_driver.driver, &driver_attr_version);
+ driver_remove_file(&megasas_pci_driver.driver,
+ &driver_attr_release_date);
+
+ pci_unregister_driver(&megasas_pci_driver);
+ unregister_chrdev(megasas_mgmt_majorno, "megaraid_sas_ioctl");
+}
+
+module_init(megasas_init);
+module_exit(megasas_exit);
diff --git a/drivers/scsi/megaraid/megaraid_sas.h b/drivers/scsi/megaraid/megaraid_sas.h
new file mode 100644
index 00000000000..eaec9d53142
--- /dev/null
+++ b/drivers/scsi/megaraid/megaraid_sas.h
@@ -0,0 +1,1142 @@
+/*
+ *
+ * Linux MegaRAID driver for SAS based RAID controllers
+ *
+ * Copyright (c) 2003-2005 LSI Logic Corporation.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ * FILE : megaraid_sas.h
+ */
+
+#ifndef LSI_MEGARAID_SAS_H
+#define LSI_MEGARAID_SAS_H
+
+/**
+ * MegaRAID SAS Driver meta data
+ */
+#define MEGASAS_VERSION "00.00.02.00-rc4"
+#define MEGASAS_RELDATE "Sep 16, 2005"
+#define MEGASAS_EXT_VERSION "Fri Sep 16 12:37:08 EDT 2005"
+
+/*
+ * =====================================
+ * MegaRAID SAS MFI firmware definitions
+ * =====================================
+ */
+
+/*
+ * MFI stands for MegaRAID SAS FW Interface. This is just a moniker for
+ * protocol between the software and firmware. Commands are issued using
+ * "message frames"
+ */
+
+/**
+ * FW posts its state in upper 4 bits of outbound_msg_0 register
+ */
+#define MFI_STATE_MASK 0xF0000000
+#define MFI_STATE_UNDEFINED 0x00000000
+#define MFI_STATE_BB_INIT 0x10000000
+#define MFI_STATE_FW_INIT 0x40000000
+#define MFI_STATE_WAIT_HANDSHAKE 0x60000000
+#define MFI_STATE_FW_INIT_2 0x70000000
+#define MFI_STATE_DEVICE_SCAN 0x80000000
+#define MFI_STATE_FLUSH_CACHE 0xA0000000
+#define MFI_STATE_READY 0xB0000000
+#define MFI_STATE_OPERATIONAL 0xC0000000
+#define MFI_STATE_FAULT 0xF0000000
+
+#define MEGAMFI_FRAME_SIZE 64
+
+/**
+ * During FW init, clear pending cmds & reset state using inbound_msg_0
+ *
+ * ABORT : Abort all pending cmds
+ * READY : Move from OPERATIONAL to READY state; discard queue info
+ * MFIMODE : Discard (possible) low MFA posted in 64-bit mode (??)
+ * CLR_HANDSHAKE: FW is waiting for HANDSHAKE from BIOS or Driver
+ */
+#define MFI_INIT_ABORT 0x00000000
+#define MFI_INIT_READY 0x00000002
+#define MFI_INIT_MFIMODE 0x00000004
+#define MFI_INIT_CLEAR_HANDSHAKE 0x00000008
+#define MFI_RESET_FLAGS MFI_INIT_READY|MFI_INIT_MFIMODE
+
+/**
+ * MFI frame flags
+ */
+#define MFI_FRAME_POST_IN_REPLY_QUEUE 0x0000
+#define MFI_FRAME_DONT_POST_IN_REPLY_QUEUE 0x0001
+#define MFI_FRAME_SGL32 0x0000
+#define MFI_FRAME_SGL64 0x0002
+#define MFI_FRAME_SENSE32 0x0000
+#define MFI_FRAME_SENSE64 0x0004
+#define MFI_FRAME_DIR_NONE 0x0000
+#define MFI_FRAME_DIR_WRITE 0x0008
+#define MFI_FRAME_DIR_READ 0x0010
+#define MFI_FRAME_DIR_BOTH 0x0018
+
+/**
+ * Definition for cmd_status
+ */
+#define MFI_CMD_STATUS_POLL_MODE 0xFF
+
+/**
+ * MFI command opcodes
+ */
+#define MFI_CMD_INIT 0x00
+#define MFI_CMD_LD_READ 0x01
+#define MFI_CMD_LD_WRITE 0x02
+#define MFI_CMD_LD_SCSI_IO 0x03
+#define MFI_CMD_PD_SCSI_IO 0x04
+#define MFI_CMD_DCMD 0x05
+#define MFI_CMD_ABORT 0x06
+#define MFI_CMD_SMP 0x07
+#define MFI_CMD_STP 0x08
+
+#define MR_DCMD_CTRL_GET_INFO 0x01010000
+
+#define MR_DCMD_CTRL_CACHE_FLUSH 0x01101000
+#define MR_FLUSH_CTRL_CACHE 0x01
+#define MR_FLUSH_DISK_CACHE 0x02
+
+#define MR_DCMD_CTRL_SHUTDOWN 0x01050000
+#define MR_ENABLE_DRIVE_SPINDOWN 0x01
+
+#define MR_DCMD_CTRL_EVENT_GET_INFO 0x01040100
+#define MR_DCMD_CTRL_EVENT_GET 0x01040300
+#define MR_DCMD_CTRL_EVENT_WAIT 0x01040500
+#define MR_DCMD_LD_GET_PROPERTIES 0x03030000
+
+#define MR_DCMD_CLUSTER 0x08000000
+#define MR_DCMD_CLUSTER_RESET_ALL 0x08010100
+#define MR_DCMD_CLUSTER_RESET_LD 0x08010200
+
+/**
+ * MFI command completion codes
+ */
+enum MFI_STAT {
+ MFI_STAT_OK = 0x00,
+ MFI_STAT_INVALID_CMD = 0x01,
+ MFI_STAT_INVALID_DCMD = 0x02,
+ MFI_STAT_INVALID_PARAMETER = 0x03,
+ MFI_STAT_INVALID_SEQUENCE_NUMBER = 0x04,
+ MFI_STAT_ABORT_NOT_POSSIBLE = 0x05,
+ MFI_STAT_APP_HOST_CODE_NOT_FOUND = 0x06,
+ MFI_STAT_APP_IN_USE = 0x07,
+ MFI_STAT_APP_NOT_INITIALIZED = 0x08,
+ MFI_STAT_ARRAY_INDEX_INVALID = 0x09,
+ MFI_STAT_ARRAY_ROW_NOT_EMPTY = 0x0a,
+ MFI_STAT_CONFIG_RESOURCE_CONFLICT = 0x0b,
+ MFI_STAT_DEVICE_NOT_FOUND = 0x0c,
+ MFI_STAT_DRIVE_TOO_SMALL = 0x0d,
+ MFI_STAT_FLASH_ALLOC_FAIL = 0x0e,
+ MFI_STAT_FLASH_BUSY = 0x0f,
+ MFI_STAT_FLASH_ERROR = 0x10,
+ MFI_STAT_FLASH_IMAGE_BAD = 0x11,
+ MFI_STAT_FLASH_IMAGE_INCOMPLETE = 0x12,
+ MFI_STAT_FLASH_NOT_OPEN = 0x13,
+ MFI_STAT_FLASH_NOT_STARTED = 0x14,
+ MFI_STAT_FLUSH_FAILED = 0x15,
+ MFI_STAT_HOST_CODE_NOT_FOUNT = 0x16,
+ MFI_STAT_LD_CC_IN_PROGRESS = 0x17,
+ MFI_STAT_LD_INIT_IN_PROGRESS = 0x18,
+ MFI_STAT_LD_LBA_OUT_OF_RANGE = 0x19,
+ MFI_STAT_LD_MAX_CONFIGURED = 0x1a,
+ MFI_STAT_LD_NOT_OPTIMAL = 0x1b,
+ MFI_STAT_LD_RBLD_IN_PROGRESS = 0x1c,
+ MFI_STAT_LD_RECON_IN_PROGRESS = 0x1d,
+ MFI_STAT_LD_WRONG_RAID_LEVEL = 0x1e,
+ MFI_STAT_MAX_SPARES_EXCEEDED = 0x1f,
+ MFI_STAT_MEMORY_NOT_AVAILABLE = 0x20,
+ MFI_STAT_MFC_HW_ERROR = 0x21,
+ MFI_STAT_NO_HW_PRESENT = 0x22,
+ MFI_STAT_NOT_FOUND = 0x23,
+ MFI_STAT_NOT_IN_ENCL = 0x24,
+ MFI_STAT_PD_CLEAR_IN_PROGRESS = 0x25,
+ MFI_STAT_PD_TYPE_WRONG = 0x26,
+ MFI_STAT_PR_DISABLED = 0x27,
+ MFI_STAT_ROW_INDEX_INVALID = 0x28,
+ MFI_STAT_SAS_CONFIG_INVALID_ACTION = 0x29,
+ MFI_STAT_SAS_CONFIG_INVALID_DATA = 0x2a,
+ MFI_STAT_SAS_CONFIG_INVALID_PAGE = 0x2b,
+ MFI_STAT_SAS_CONFIG_INVALID_TYPE = 0x2c,
+ MFI_STAT_SCSI_DONE_WITH_ERROR = 0x2d,
+ MFI_STAT_SCSI_IO_FAILED = 0x2e,
+ MFI_STAT_SCSI_RESERVATION_CONFLICT = 0x2f,
+ MFI_STAT_SHUTDOWN_FAILED = 0x30,
+ MFI_STAT_TIME_NOT_SET = 0x31,
+ MFI_STAT_WRONG_STATE = 0x32,
+ MFI_STAT_LD_OFFLINE = 0x33,
+ MFI_STAT_PEER_NOTIFICATION_REJECTED = 0x34,
+ MFI_STAT_PEER_NOTIFICATION_FAILED = 0x35,
+ MFI_STAT_RESERVATION_IN_PROGRESS = 0x36,
+ MFI_STAT_I2C_ERRORS_DETECTED = 0x37,
+ MFI_STAT_PCI_ERRORS_DETECTED = 0x38,
+
+ MFI_STAT_INVALID_STATUS = 0xFF
+};
+
+/*
+ * Number of mailbox bytes in DCMD message frame
+ */
+#define MFI_MBOX_SIZE 12
+
+enum MR_EVT_CLASS {
+
+ MR_EVT_CLASS_DEBUG = -2,
+ MR_EVT_CLASS_PROGRESS = -1,
+ MR_EVT_CLASS_INFO = 0,
+ MR_EVT_CLASS_WARNING = 1,
+ MR_EVT_CLASS_CRITICAL = 2,
+ MR_EVT_CLASS_FATAL = 3,
+ MR_EVT_CLASS_DEAD = 4,
+
+};
+
+enum MR_EVT_LOCALE {
+
+ MR_EVT_LOCALE_LD = 0x0001,
+ MR_EVT_LOCALE_PD = 0x0002,
+ MR_EVT_LOCALE_ENCL = 0x0004,
+ MR_EVT_LOCALE_BBU = 0x0008,
+ MR_EVT_LOCALE_SAS = 0x0010,
+ MR_EVT_LOCALE_CTRL = 0x0020,
+ MR_EVT_LOCALE_CONFIG = 0x0040,
+ MR_EVT_LOCALE_CLUSTER = 0x0080,
+ MR_EVT_LOCALE_ALL = 0xffff,
+
+};
+
+enum MR_EVT_ARGS {
+
+ MR_EVT_ARGS_NONE,
+ MR_EVT_ARGS_CDB_SENSE,
+ MR_EVT_ARGS_LD,
+ MR_EVT_ARGS_LD_COUNT,
+ MR_EVT_ARGS_LD_LBA,
+ MR_EVT_ARGS_LD_OWNER,
+ MR_EVT_ARGS_LD_LBA_PD_LBA,
+ MR_EVT_ARGS_LD_PROG,
+ MR_EVT_ARGS_LD_STATE,
+ MR_EVT_ARGS_LD_STRIP,
+ MR_EVT_ARGS_PD,
+ MR_EVT_ARGS_PD_ERR,
+ MR_EVT_ARGS_PD_LBA,
+ MR_EVT_ARGS_PD_LBA_LD,
+ MR_EVT_ARGS_PD_PROG,
+ MR_EVT_ARGS_PD_STATE,
+ MR_EVT_ARGS_PCI,
+ MR_EVT_ARGS_RATE,
+ MR_EVT_ARGS_STR,
+ MR_EVT_ARGS_TIME,
+ MR_EVT_ARGS_ECC,
+
+};
+
+/*
+ * SAS controller properties
+ */
+struct megasas_ctrl_prop {
+
+ u16 seq_num;
+ u16 pred_fail_poll_interval;
+ u16 intr_throttle_count;
+ u16 intr_throttle_timeouts;
+ u8 rebuild_rate;
+ u8 patrol_read_rate;
+ u8 bgi_rate;
+ u8 cc_rate;
+ u8 recon_rate;
+ u8 cache_flush_interval;
+ u8 spinup_drv_count;
+ u8 spinup_delay;
+ u8 cluster_enable;
+ u8 coercion_mode;
+ u8 alarm_enable;
+ u8 disable_auto_rebuild;
+ u8 disable_battery_warn;
+ u8 ecc_bucket_size;
+ u16 ecc_bucket_leak_rate;
+ u8 restore_hotspare_on_insertion;
+ u8 expose_encl_devices;
+ u8 reserved[38];
+
+} __attribute__ ((packed));
+
+/*
+ * SAS controller information
+ */
+struct megasas_ctrl_info {
+
+ /*
+ * PCI device information
+ */
+ struct {
+
+ u16 vendor_id;
+ u16 device_id;
+ u16 sub_vendor_id;
+ u16 sub_device_id;
+ u8 reserved[24];
+
+ } __attribute__ ((packed)) pci;
+
+ /*
+ * Host interface information
+ */
+ struct {
+
+ u8 PCIX:1;
+ u8 PCIE:1;
+ u8 iSCSI:1;
+ u8 SAS_3G:1;
+ u8 reserved_0:4;
+ u8 reserved_1[6];
+ u8 port_count;
+ u64 port_addr[8];
+
+ } __attribute__ ((packed)) host_interface;
+
+ /*
+ * Device (backend) interface information
+ */
+ struct {
+
+ u8 SPI:1;
+ u8 SAS_3G:1;
+ u8 SATA_1_5G:1;
+ u8 SATA_3G:1;
+ u8 reserved_0:4;
+ u8 reserved_1[6];
+ u8 port_count;
+ u64 port_addr[8];
+
+ } __attribute__ ((packed)) device_interface;
+
+ /*
+ * List of components residing in flash. All str are null terminated
+ */
+ u32 image_check_word;
+ u32 image_component_count;
+
+ struct {
+
+ char name[8];
+ char version[32];
+ char build_date[16];
+ char built_time[16];
+
+ } __attribute__ ((packed)) image_component[8];
+
+ /*
+ * List of flash components that have been flashed on the card, but
+ * are not in use, pending reset of the adapter. This list will be
+ * empty if a flash operation has not occurred. All stings are null
+ * terminated
+ */
+ u32 pending_image_component_count;
+
+ struct {
+
+ char name[8];
+ char version[32];
+ char build_date[16];
+ char build_time[16];
+
+ } __attribute__ ((packed)) pending_image_component[8];
+
+ u8 max_arms;
+ u8 max_spans;
+ u8 max_arrays;
+ u8 max_lds;
+
+ char product_name[80];
+ char serial_no[32];
+
+ /*
+ * Other physical/controller/operation information. Indicates the
+ * presence of the hardware
+ */
+ struct {
+
+ u32 bbu:1;
+ u32 alarm:1;
+ u32 nvram:1;
+ u32 uart:1;
+ u32 reserved:28;
+
+ } __attribute__ ((packed)) hw_present;
+
+ u32 current_fw_time;
+
+ /*
+ * Maximum data transfer sizes
+ */
+ u16 max_concurrent_cmds;
+ u16 max_sge_count;
+ u32 max_request_size;
+
+ /*
+ * Logical and physical device counts
+ */
+ u16 ld_present_count;
+ u16 ld_degraded_count;
+ u16 ld_offline_count;
+
+ u16 pd_present_count;
+ u16 pd_disk_present_count;
+ u16 pd_disk_pred_failure_count;
+ u16 pd_disk_failed_count;
+
+ /*
+ * Memory size information
+ */
+ u16 nvram_size;
+ u16 memory_size;
+ u16 flash_size;
+
+ /*
+ * Error counters
+ */
+ u16 mem_correctable_error_count;
+ u16 mem_uncorrectable_error_count;
+
+ /*
+ * Cluster information
+ */
+ u8 cluster_permitted;
+ u8 cluster_active;
+
+ /*
+ * Additional max data transfer sizes
+ */
+ u16 max_strips_per_io;
+
+ /*
+ * Controller capabilities structures
+ */
+ struct {
+
+ u32 raid_level_0:1;
+ u32 raid_level_1:1;
+ u32 raid_level_5:1;
+ u32 raid_level_1E:1;
+ u32 raid_level_6:1;
+ u32 reserved:27;
+
+ } __attribute__ ((packed)) raid_levels;
+
+ struct {
+
+ u32 rbld_rate:1;
+ u32 cc_rate:1;
+ u32 bgi_rate:1;
+ u32 recon_rate:1;
+ u32 patrol_rate:1;
+ u32 alarm_control:1;
+ u32 cluster_supported:1;
+ u32 bbu:1;
+ u32 spanning_allowed:1;
+ u32 dedicated_hotspares:1;
+ u32 revertible_hotspares:1;
+ u32 foreign_config_import:1;
+ u32 self_diagnostic:1;
+ u32 mixed_redundancy_arr:1;
+ u32 global_hot_spares:1;
+ u32 reserved:17;
+
+ } __attribute__ ((packed)) adapter_operations;
+
+ struct {
+
+ u32 read_policy:1;
+ u32 write_policy:1;
+ u32 io_policy:1;
+ u32 access_policy:1;
+ u32 disk_cache_policy:1;
+ u32 reserved:27;
+
+ } __attribute__ ((packed)) ld_operations;
+
+ struct {
+
+ u8 min;
+ u8 max;
+ u8 reserved[2];
+
+ } __attribute__ ((packed)) stripe_sz_ops;
+
+ struct {
+
+ u32 force_online:1;
+ u32 force_offline:1;
+ u32 force_rebuild:1;
+ u32 reserved:29;
+
+ } __attribute__ ((packed)) pd_operations;
+
+ struct {
+
+ u32 ctrl_supports_sas:1;
+ u32 ctrl_supports_sata:1;
+ u32 allow_mix_in_encl:1;
+ u32 allow_mix_in_ld:1;
+ u32 allow_sata_in_cluster:1;
+ u32 reserved:27;
+
+ } __attribute__ ((packed)) pd_mix_support;
+
+ /*
+ * Define ECC single-bit-error bucket information
+ */
+ u8 ecc_bucket_count;
+ u8 reserved_2[11];
+
+ /*
+ * Include the controller properties (changeable items)
+ */
+ struct megasas_ctrl_prop properties;
+
+ /*
+ * Define FW pkg version (set in envt v'bles on OEM basis)
+ */
+ char package_version[0x60];
+
+ u8 pad[0x800 - 0x6a0];
+
+} __attribute__ ((packed));
+
+/*
+ * ===============================
+ * MegaRAID SAS driver definitions
+ * ===============================
+ */
+#define MEGASAS_MAX_PD_CHANNELS 2
+#define MEGASAS_MAX_LD_CHANNELS 2
+#define MEGASAS_MAX_CHANNELS (MEGASAS_MAX_PD_CHANNELS + \
+ MEGASAS_MAX_LD_CHANNELS)
+#define MEGASAS_MAX_DEV_PER_CHANNEL 128
+#define MEGASAS_DEFAULT_INIT_ID -1
+#define MEGASAS_MAX_LUN 8
+#define MEGASAS_MAX_LD 64
+
+/*
+ * When SCSI mid-layer calls driver's reset routine, driver waits for
+ * MEGASAS_RESET_WAIT_TIME seconds for all outstanding IO to complete. Note
+ * that the driver cannot _actually_ abort or reset pending commands. While
+ * it is waiting for the commands to complete, it prints a diagnostic message
+ * every MEGASAS_RESET_NOTICE_INTERVAL seconds
+ */
+#define MEGASAS_RESET_WAIT_TIME 180
+#define MEGASAS_RESET_NOTICE_INTERVAL 5
+
+#define MEGASAS_IOCTL_CMD 0
+
+/*
+ * FW reports the maximum of number of commands that it can accept (maximum
+ * commands that can be outstanding) at any time. The driver must report a
+ * lower number to the mid layer because it can issue a few internal commands
+ * itself (E.g, AEN, abort cmd, IOCTLs etc). The number of commands it needs
+ * is shown below
+ */
+#define MEGASAS_INT_CMDS 32
+
+/*
+ * FW can accept both 32 and 64 bit SGLs. We want to allocate 32/64 bit
+ * SGLs based on the size of dma_addr_t
+ */
+#define IS_DMA64 (sizeof(dma_addr_t) == 8)
+
+#define MFI_OB_INTR_STATUS_MASK 0x00000002
+#define MFI_POLL_TIMEOUT_SECS 10
+
+struct megasas_register_set {
+
+ u32 reserved_0[4]; /*0000h */
+
+ u32 inbound_msg_0; /*0010h */
+ u32 inbound_msg_1; /*0014h */
+ u32 outbound_msg_0; /*0018h */
+ u32 outbound_msg_1; /*001Ch */
+
+ u32 inbound_doorbell; /*0020h */
+ u32 inbound_intr_status; /*0024h */
+ u32 inbound_intr_mask; /*0028h */
+
+ u32 outbound_doorbell; /*002Ch */
+ u32 outbound_intr_status; /*0030h */
+ u32 outbound_intr_mask; /*0034h */
+
+ u32 reserved_1[2]; /*0038h */
+
+ u32 inbound_queue_port; /*0040h */
+ u32 outbound_queue_port; /*0044h */
+
+ u32 reserved_2; /*004Ch */
+
+ u32 index_registers[1004]; /*0050h */
+
+} __attribute__ ((packed));
+
+struct megasas_sge32 {
+
+ u32 phys_addr;
+ u32 length;
+
+} __attribute__ ((packed));
+
+struct megasas_sge64 {
+
+ u64 phys_addr;
+ u32 length;
+
+} __attribute__ ((packed));
+
+union megasas_sgl {
+
+ struct megasas_sge32 sge32[1];
+ struct megasas_sge64 sge64[1];
+
+} __attribute__ ((packed));
+
+struct megasas_header {
+
+ u8 cmd; /*00h */
+ u8 sense_len; /*01h */
+ u8 cmd_status; /*02h */
+ u8 scsi_status; /*03h */
+
+ u8 target_id; /*04h */
+ u8 lun; /*05h */
+ u8 cdb_len; /*06h */
+ u8 sge_count; /*07h */
+
+ u32 context; /*08h */
+ u32 pad_0; /*0Ch */
+
+ u16 flags; /*10h */
+ u16 timeout; /*12h */
+ u32 data_xferlen; /*14h */
+
+} __attribute__ ((packed));
+
+union megasas_sgl_frame {
+
+ struct megasas_sge32 sge32[8];
+ struct megasas_sge64 sge64[5];
+
+} __attribute__ ((packed));
+
+struct megasas_init_frame {
+
+ u8 cmd; /*00h */
+ u8 reserved_0; /*01h */
+ u8 cmd_status; /*02h */
+
+ u8 reserved_1; /*03h */
+ u32 reserved_2; /*04h */
+
+ u32 context; /*08h */
+ u32 pad_0; /*0Ch */
+
+ u16 flags; /*10h */
+ u16 reserved_3; /*12h */
+ u32 data_xfer_len; /*14h */
+
+ u32 queue_info_new_phys_addr_lo; /*18h */
+ u32 queue_info_new_phys_addr_hi; /*1Ch */
+ u32 queue_info_old_phys_addr_lo; /*20h */
+ u32 queue_info_old_phys_addr_hi; /*24h */
+
+ u32 reserved_4[6]; /*28h */
+
+} __attribute__ ((packed));
+
+struct megasas_init_queue_info {
+
+ u32 init_flags; /*00h */
+ u32 reply_queue_entries; /*04h */
+
+ u32 reply_queue_start_phys_addr_lo; /*08h */
+ u32 reply_queue_start_phys_addr_hi; /*0Ch */
+ u32 producer_index_phys_addr_lo; /*10h */
+ u32 producer_index_phys_addr_hi; /*14h */
+ u32 consumer_index_phys_addr_lo; /*18h */
+ u32 consumer_index_phys_addr_hi; /*1Ch */
+
+} __attribute__ ((packed));
+
+struct megasas_io_frame {
+
+ u8 cmd; /*00h */
+ u8 sense_len; /*01h */
+ u8 cmd_status; /*02h */
+ u8 scsi_status; /*03h */
+
+ u8 target_id; /*04h */
+ u8 access_byte; /*05h */
+ u8 reserved_0; /*06h */
+ u8 sge_count; /*07h */
+
+ u32 context; /*08h */
+ u32 pad_0; /*0Ch */
+
+ u16 flags; /*10h */
+ u16 timeout; /*12h */
+ u32 lba_count; /*14h */
+
+ u32 sense_buf_phys_addr_lo; /*18h */
+ u32 sense_buf_phys_addr_hi; /*1Ch */
+
+ u32 start_lba_lo; /*20h */
+ u32 start_lba_hi; /*24h */
+
+ union megasas_sgl sgl; /*28h */
+
+} __attribute__ ((packed));
+
+struct megasas_pthru_frame {
+
+ u8 cmd; /*00h */
+ u8 sense_len; /*01h */
+ u8 cmd_status; /*02h */
+ u8 scsi_status; /*03h */
+
+ u8 target_id; /*04h */
+ u8 lun; /*05h */
+ u8 cdb_len; /*06h */
+ u8 sge_count; /*07h */
+
+ u32 context; /*08h */
+ u32 pad_0; /*0Ch */
+
+ u16 flags; /*10h */
+ u16 timeout; /*12h */
+ u32 data_xfer_len; /*14h */
+
+ u32 sense_buf_phys_addr_lo; /*18h */
+ u32 sense_buf_phys_addr_hi; /*1Ch */
+
+ u8 cdb[16]; /*20h */
+ union megasas_sgl sgl; /*30h */
+
+} __attribute__ ((packed));
+
+struct megasas_dcmd_frame {
+
+ u8 cmd; /*00h */
+ u8 reserved_0; /*01h */
+ u8 cmd_status; /*02h */
+ u8 reserved_1[4]; /*03h */
+ u8 sge_count; /*07h */
+
+ u32 context; /*08h */
+ u32 pad_0; /*0Ch */
+
+ u16 flags; /*10h */
+ u16 timeout; /*12h */
+
+ u32 data_xfer_len; /*14h */
+ u32 opcode; /*18h */
+
+ union { /*1Ch */
+ u8 b[12];
+ u16 s[6];
+ u32 w[3];
+ } mbox;
+
+ union megasas_sgl sgl; /*28h */
+
+} __attribute__ ((packed));
+
+struct megasas_abort_frame {
+
+ u8 cmd; /*00h */
+ u8 reserved_0; /*01h */
+ u8 cmd_status; /*02h */
+
+ u8 reserved_1; /*03h */
+ u32 reserved_2; /*04h */
+
+ u32 context; /*08h */
+ u32 pad_0; /*0Ch */
+
+ u16 flags; /*10h */
+ u16 reserved_3; /*12h */
+ u32 reserved_4; /*14h */
+
+ u32 abort_context; /*18h */
+ u32 pad_1; /*1Ch */
+
+ u32 abort_mfi_phys_addr_lo; /*20h */
+ u32 abort_mfi_phys_addr_hi; /*24h */
+
+ u32 reserved_5[6]; /*28h */
+
+} __attribute__ ((packed));
+
+struct megasas_smp_frame {
+
+ u8 cmd; /*00h */
+ u8 reserved_1; /*01h */
+ u8 cmd_status; /*02h */
+ u8 connection_status; /*03h */
+
+ u8 reserved_2[3]; /*04h */
+ u8 sge_count; /*07h */
+
+ u32 context; /*08h */
+ u32 pad_0; /*0Ch */
+
+ u16 flags; /*10h */
+ u16 timeout; /*12h */
+
+ u32 data_xfer_len; /*14h */
+ u64 sas_addr; /*18h */
+
+ union {
+ struct megasas_sge32 sge32[2]; /* [0]: resp [1]: req */
+ struct megasas_sge64 sge64[2]; /* [0]: resp [1]: req */
+ } sgl;
+
+} __attribute__ ((packed));
+
+struct megasas_stp_frame {
+
+ u8 cmd; /*00h */
+ u8 reserved_1; /*01h */
+ u8 cmd_status; /*02h */
+ u8 reserved_2; /*03h */
+
+ u8 target_id; /*04h */
+ u8 reserved_3[2]; /*05h */
+ u8 sge_count; /*07h */
+
+ u32 context; /*08h */
+ u32 pad_0; /*0Ch */
+
+ u16 flags; /*10h */
+ u16 timeout; /*12h */
+
+ u32 data_xfer_len; /*14h */
+
+ u16 fis[10]; /*18h */
+ u32 stp_flags;
+
+ union {
+ struct megasas_sge32 sge32[2]; /* [0]: resp [1]: data */
+ struct megasas_sge64 sge64[2]; /* [0]: resp [1]: data */
+ } sgl;
+
+} __attribute__ ((packed));
+
+union megasas_frame {
+
+ struct megasas_header hdr;
+ struct megasas_init_frame init;
+ struct megasas_io_frame io;
+ struct megasas_pthru_frame pthru;
+ struct megasas_dcmd_frame dcmd;
+ struct megasas_abort_frame abort;
+ struct megasas_smp_frame smp;
+ struct megasas_stp_frame stp;
+
+ u8 raw_bytes[64];
+};
+
+struct megasas_cmd;
+
+union megasas_evt_class_locale {
+
+ struct {
+ u16 locale;
+ u8 reserved;
+ s8 class;
+ } __attribute__ ((packed)) members;
+
+ u32 word;
+
+} __attribute__ ((packed));
+
+struct megasas_evt_log_info {
+ u32 newest_seq_num;
+ u32 oldest_seq_num;
+ u32 clear_seq_num;
+ u32 shutdown_seq_num;
+ u32 boot_seq_num;
+
+} __attribute__ ((packed));
+
+struct megasas_progress {
+
+ u16 progress;
+ u16 elapsed_seconds;
+
+} __attribute__ ((packed));
+
+struct megasas_evtarg_ld {
+
+ u16 target_id;
+ u8 ld_index;
+ u8 reserved;
+
+} __attribute__ ((packed));
+
+struct megasas_evtarg_pd {
+ u16 device_id;
+ u8 encl_index;
+ u8 slot_number;
+
+} __attribute__ ((packed));
+
+struct megasas_evt_detail {
+
+ u32 seq_num;
+ u32 time_stamp;
+ u32 code;
+ union megasas_evt_class_locale cl;
+ u8 arg_type;
+ u8 reserved1[15];
+
+ union {
+ struct {
+ struct megasas_evtarg_pd pd;
+ u8 cdb_length;
+ u8 sense_length;
+ u8 reserved[2];
+ u8 cdb[16];
+ u8 sense[64];
+ } __attribute__ ((packed)) cdbSense;
+
+ struct megasas_evtarg_ld ld;
+
+ struct {
+ struct megasas_evtarg_ld ld;
+ u64 count;
+ } __attribute__ ((packed)) ld_count;
+
+ struct {
+ u64 lba;
+ struct megasas_evtarg_ld ld;
+ } __attribute__ ((packed)) ld_lba;
+
+ struct {
+ struct megasas_evtarg_ld ld;
+ u32 prevOwner;
+ u32 newOwner;
+ } __attribute__ ((packed)) ld_owner;
+
+ struct {
+ u64 ld_lba;
+ u64 pd_lba;
+ struct megasas_evtarg_ld ld;
+ struct megasas_evtarg_pd pd;
+ } __attribute__ ((packed)) ld_lba_pd_lba;
+
+ struct {
+ struct megasas_evtarg_ld ld;
+ struct megasas_progress prog;
+ } __attribute__ ((packed)) ld_prog;
+
+ struct {
+ struct megasas_evtarg_ld ld;
+ u32 prev_state;
+ u32 new_state;
+ } __attribute__ ((packed)) ld_state;
+
+ struct {
+ u64 strip;
+ struct megasas_evtarg_ld ld;
+ } __attribute__ ((packed)) ld_strip;
+
+ struct megasas_evtarg_pd pd;
+
+ struct {
+ struct megasas_evtarg_pd pd;
+ u32 err;
+ } __attribute__ ((packed)) pd_err;
+
+ struct {
+ u64 lba;
+ struct megasas_evtarg_pd pd;
+ } __attribute__ ((packed)) pd_lba;
+
+ struct {
+ u64 lba;
+ struct megasas_evtarg_pd pd;
+ struct megasas_evtarg_ld ld;
+ } __attribute__ ((packed)) pd_lba_ld;
+
+ struct {
+ struct megasas_evtarg_pd pd;
+ struct megasas_progress prog;
+ } __attribute__ ((packed)) pd_prog;
+
+ struct {
+ struct megasas_evtarg_pd pd;
+ u32 prevState;
+ u32 newState;
+ } __attribute__ ((packed)) pd_state;
+
+ struct {
+ u16 vendorId;
+ u16 deviceId;
+ u16 subVendorId;
+ u16 subDeviceId;
+ } __attribute__ ((packed)) pci;
+
+ u32 rate;
+ char str[96];
+
+ struct {
+ u32 rtc;
+ u32 elapsedSeconds;
+ } __attribute__ ((packed)) time;
+
+ struct {
+ u32 ecar;
+ u32 elog;
+ char str[64];
+ } __attribute__ ((packed)) ecc;
+
+ u8 b[96];
+ u16 s[48];
+ u32 w[24];
+ u64 d[12];
+ } args;
+
+ char description[128];
+
+} __attribute__ ((packed));
+
+struct megasas_instance {
+
+ u32 *producer;
+ dma_addr_t producer_h;
+ u32 *consumer;
+ dma_addr_t consumer_h;
+
+ u32 *reply_queue;
+ dma_addr_t reply_queue_h;
+
+ unsigned long base_addr;
+ struct megasas_register_set __iomem *reg_set;
+
+ s8 init_id;
+ u8 reserved[3];
+
+ u16 max_num_sge;
+ u16 max_fw_cmds;
+ u32 max_sectors_per_req;
+
+ struct megasas_cmd **cmd_list;
+ struct list_head cmd_pool;
+ spinlock_t cmd_pool_lock;
+ struct dma_pool *frame_dma_pool;
+ struct dma_pool *sense_dma_pool;
+
+ struct megasas_evt_detail *evt_detail;
+ dma_addr_t evt_detail_h;
+ struct megasas_cmd *aen_cmd;
+ struct semaphore aen_mutex;
+ struct semaphore ioctl_sem;
+
+ struct Scsi_Host *host;
+
+ wait_queue_head_t int_cmd_wait_q;
+ wait_queue_head_t abort_cmd_wait_q;
+
+ struct pci_dev *pdev;
+ u32 unique_id;
+
+ u32 fw_outstanding;
+ u32 hw_crit_error;
+ spinlock_t instance_lock;
+};
+
+#define MEGASAS_IS_LOGICAL(scp) \
+ (scp->device->channel < MEGASAS_MAX_PD_CHANNELS) ? 0 : 1
+
+#define MEGASAS_DEV_INDEX(inst, scp) \
+ ((scp->device->channel % 2) * MEGASAS_MAX_DEV_PER_CHANNEL) + \
+ scp->device->id
+
+struct megasas_cmd {
+
+ union megasas_frame *frame;
+ dma_addr_t frame_phys_addr;
+ u8 *sense;
+ dma_addr_t sense_phys_addr;
+
+ u32 index;
+ u8 sync_cmd;
+ u8 cmd_status;
+ u16 abort_aen;
+
+ struct list_head list;
+ struct scsi_cmnd *scmd;
+ struct megasas_instance *instance;
+ u32 frame_count;
+};
+
+#define MAX_MGMT_ADAPTERS 1024
+#define MAX_IOCTL_SGE 16
+
+struct megasas_iocpacket {
+
+ u16 host_no;
+ u16 __pad1;
+ u32 sgl_off;
+ u32 sge_count;
+ u32 sense_off;
+ u32 sense_len;
+ union {
+ u8 raw[128];
+ struct megasas_header hdr;
+ } frame;
+
+ struct iovec sgl[MAX_IOCTL_SGE];
+
+} __attribute__ ((packed));
+
+struct megasas_aen {
+ u16 host_no;
+ u16 __pad1;
+ u32 seq_num;
+ u32 class_locale_word;
+} __attribute__ ((packed));
+
+#ifdef CONFIG_COMPAT
+struct compat_megasas_iocpacket {
+ u16 host_no;
+ u16 __pad1;
+ u32 sgl_off;
+ u32 sge_count;
+ u32 sense_off;
+ u32 sense_len;
+ union {
+ u8 raw[128];
+ struct megasas_header hdr;
+ } frame;
+ struct compat_iovec sgl[MAX_IOCTL_SGE];
+} __attribute__ ((packed));
+
+#define MEGASAS_IOC_FIRMWARE _IOWR('M', 1, struct compat_megasas_iocpacket)
+#else
+#define MEGASAS_IOC_FIRMWARE _IOWR('M', 1, struct megasas_iocpacket)
+#endif
+
+#define MEGASAS_IOC_GET_AEN _IOW('M', 3, struct megasas_aen)
+
+struct megasas_mgmt_info {
+
+ u16 count;
+ struct megasas_instance *instance[MAX_MGMT_ADAPTERS];
+ int max_index;
+};
+
+#endif /*LSI_MEGARAID_SAS_H */
diff --git a/drivers/scsi/mesh.c b/drivers/scsi/mesh.c
index a4857db4f9b..b235556b7b6 100644
--- a/drivers/scsi/mesh.c
+++ b/drivers/scsi/mesh.c
@@ -1959,22 +1959,35 @@ static int mesh_probe(struct macio_dev *mdev, const struct of_device_id *match)
/* Set it up */
mesh_init(ms);
- /* XXX FIXME: error should be fatal */
- if (request_irq(ms->meshintr, do_mesh_interrupt, 0, "MESH", ms))
+ /* Request interrupt */
+ if (request_irq(ms->meshintr, do_mesh_interrupt, 0, "MESH", ms)) {
printk(KERN_ERR "MESH: can't get irq %d\n", ms->meshintr);
+ goto out_shutdown;
+ }
- /* XXX FIXME: handle failure */
- scsi_add_host(mesh_host, &mdev->ofdev.dev);
+ /* Add scsi host & scan */
+ if (scsi_add_host(mesh_host, &mdev->ofdev.dev))
+ goto out_release_irq;
scsi_scan_host(mesh_host);
return 0;
-out_unmap:
+ out_release_irq:
+ free_irq(ms->meshintr, ms);
+ out_shutdown:
+ /* shutdown & reset bus in case of error or macos can be confused
+ * at reboot if the bus was set to synchronous mode already
+ */
+ mesh_shutdown(mdev);
+ set_mesh_power(ms, 0);
+ pci_free_consistent(macio_get_pci_dev(mdev), ms->dma_cmd_size,
+ ms->dma_cmd_space, ms->dma_cmd_bus);
+ out_unmap:
iounmap(ms->dma);
iounmap(ms->mesh);
-out_free:
+ out_free:
scsi_host_put(mesh_host);
-out_release:
+ out_release:
macio_release_resources(mdev);
return -ENODEV;
@@ -2001,7 +2014,7 @@ static int mesh_remove(struct macio_dev *mdev)
/* Free DMA commands memory */
pci_free_consistent(macio_get_pci_dev(mdev), ms->dma_cmd_size,
- ms->dma_cmd_space, ms->dma_cmd_bus);
+ ms->dma_cmd_space, ms->dma_cmd_bus);
/* Release memory resources */
macio_release_resources(mdev);
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index 3e9b6413787..23d095d3817 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -201,6 +201,7 @@ int
qla2100_pci_config(scsi_qla_host_t *ha)
{
uint16_t w, mwi;
+ uint32_t d;
unsigned long flags;
struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
@@ -215,9 +216,9 @@ qla2100_pci_config(scsi_qla_host_t *ha)
pci_write_config_word(ha->pdev, PCI_COMMAND, w);
/* Reset expansion ROM address decode enable */
- pci_read_config_word(ha->pdev, PCI_ROM_ADDRESS, &w);
- w &= ~PCI_ROM_ADDRESS_ENABLE;
- pci_write_config_word(ha->pdev, PCI_ROM_ADDRESS, w);
+ pci_read_config_dword(ha->pdev, PCI_ROM_ADDRESS, &d);
+ d &= ~PCI_ROM_ADDRESS_ENABLE;
+ pci_write_config_dword(ha->pdev, PCI_ROM_ADDRESS, d);
/* Get PCI bus information. */
spin_lock_irqsave(&ha->hardware_lock, flags);
@@ -237,6 +238,7 @@ int
qla2300_pci_config(scsi_qla_host_t *ha)
{
uint16_t w, mwi;
+ uint32_t d;
unsigned long flags = 0;
uint32_t cnt;
struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
@@ -302,9 +304,9 @@ qla2300_pci_config(scsi_qla_host_t *ha)
pci_write_config_byte(ha->pdev, PCI_LATENCY_TIMER, 0x80);
/* Reset expansion ROM address decode enable */
- pci_read_config_word(ha->pdev, PCI_ROM_ADDRESS, &w);
- w &= ~PCI_ROM_ADDRESS_ENABLE;
- pci_write_config_word(ha->pdev, PCI_ROM_ADDRESS, w);
+ pci_read_config_dword(ha->pdev, PCI_ROM_ADDRESS, &d);
+ d &= ~PCI_ROM_ADDRESS_ENABLE;
+ pci_write_config_dword(ha->pdev, PCI_ROM_ADDRESS, d);
/* Get PCI bus information. */
spin_lock_irqsave(&ha->hardware_lock, flags);
@@ -324,6 +326,7 @@ int
qla24xx_pci_config(scsi_qla_host_t *ha)
{
uint16_t w, mwi;
+ uint32_t d;
unsigned long flags = 0;
struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
int pcix_cmd_reg, pcie_dctl_reg;
@@ -366,9 +369,9 @@ qla24xx_pci_config(scsi_qla_host_t *ha)
}
/* Reset expansion ROM address decode enable */
- pci_read_config_word(ha->pdev, PCI_ROM_ADDRESS, &w);
- w &= ~PCI_ROM_ADDRESS_ENABLE;
- pci_write_config_word(ha->pdev, PCI_ROM_ADDRESS, w);
+ pci_read_config_dword(ha->pdev, PCI_ROM_ADDRESS, &d);
+ d &= ~PCI_ROM_ADDRESS_ENABLE;
+ pci_write_config_dword(ha->pdev, PCI_ROM_ADDRESS, d);
/* Get PCI bus information. */
spin_lock_irqsave(&ha->hardware_lock, flags);
diff --git a/drivers/scsi/qla2xxx/qla_rscn.c b/drivers/scsi/qla2xxx/qla_rscn.c
index bdc3bc74bbe..1eba9882863 100644
--- a/drivers/scsi/qla2xxx/qla_rscn.c
+++ b/drivers/scsi/qla2xxx/qla_rscn.c
@@ -330,6 +330,8 @@ qla2x00_update_login_fcport(scsi_qla_host_t *ha, struct mbx_entry *mbxstat,
fcport->flags &= ~FCF_FAILOVER_NEEDED;
fcport->iodesc_idx_sent = IODESC_INVALID_INDEX;
atomic_set(&fcport->state, FCS_ONLINE);
+ if (fcport->rport)
+ fc_remote_port_unblock(fcport->rport);
}
diff --git a/drivers/scsi/sata_nv.c b/drivers/scsi/sata_nv.c
index a1d62dee3be..c05653c7779 100644
--- a/drivers/scsi/sata_nv.c
+++ b/drivers/scsi/sata_nv.c
@@ -158,6 +158,8 @@ static struct pci_device_id nv_pci_tbl[] = {
PCI_ANY_ID, PCI_ANY_ID, 0, 0, MCP51 },
{ PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, MCP55 },
+ { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA2,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, MCP55 },
{ PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID,
PCI_ANY_ID, PCI_ANY_ID,
PCI_CLASS_STORAGE_IDE<<8, 0xffff00, GENERIC },
diff --git a/drivers/scsi/sata_sis.c b/drivers/scsi/sata_sis.c
index a63f93186e4..b227e51d12f 100644
--- a/drivers/scsi/sata_sis.c
+++ b/drivers/scsi/sata_sis.c
@@ -161,7 +161,7 @@ static u32 sis_scr_cfg_read (struct ata_port *ap, unsigned int sc_reg)
{
struct pci_dev *pdev = to_pci_dev(ap->host_set->dev);
unsigned int cfg_addr = get_scr_cfg_addr(ap->port_no, sc_reg, pdev->device);
- u32 val, val2;
+ u32 val, val2 = 0;
u8 pmr;
if (sc_reg == SCR_ERROR) /* doesn't exist in PCI cfg space */
@@ -289,7 +289,7 @@ static int sis_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
if (ent->device != 0x182) {
if ((pmr & SIS_PMR_COMBINED) == 0) {
printk(KERN_INFO "sata_sis: Detected SiS 180/181 chipset in SATA mode\n");
- port2_start=0x64;
+ port2_start = 64;
}
else {
printk(KERN_INFO "sata_sis: Detected SiS 180/181 chipset in combined mode\n");
diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
index a780546eda9..1f0ebabf6d4 100644
--- a/drivers/scsi/scsi.c
+++ b/drivers/scsi/scsi.c
@@ -1265,9 +1265,8 @@ int scsi_device_cancel(struct scsi_device *sdev, int recovery)
list_for_each_safe(lh, lh_sf, &active_list) {
scmd = list_entry(lh, struct scsi_cmnd, eh_entry);
list_del_init(lh);
- if (recovery) {
- scsi_eh_scmd_add(scmd, SCSI_EH_CANCEL_CMD);
- } else {
+ if (recovery &&
+ !scsi_eh_scmd_add(scmd, SCSI_EH_CANCEL_CMD)) {
scmd->result = (DID_ABORT << 16);
scsi_finish_command(scmd);
}
diff --git a/drivers/scsi/scsi_devinfo.c b/drivers/scsi/scsi_devinfo.c
index 07b554affcf..64fc9e21f35 100644
--- a/drivers/scsi/scsi_devinfo.c
+++ b/drivers/scsi/scsi_devinfo.c
@@ -110,6 +110,7 @@ static struct {
{"RELISYS", "Scorpio", NULL, BLIST_NOLUN}, /* responds to all lun */
{"SANKYO", "CP525", "6.64", BLIST_NOLUN}, /* causes failed REQ SENSE, extra reset */
{"TEXEL", "CD-ROM", "1.06", BLIST_NOLUN},
+ {"transtec", "T5008", "0001", BLIST_NOREPORTLUN },
{"YAMAHA", "CDR100", "1.00", BLIST_NOLUN}, /* locks up */
{"YAMAHA", "CDR102", "1.00", BLIST_NOLUN}, /* locks up */
{"YAMAHA", "CRW8424S", "1.0", BLIST_NOLUN}, /* locks up */
diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
index 895c9452be4..ad534216507 100644
--- a/drivers/scsi/scsi_error.c
+++ b/drivers/scsi/scsi_error.c
@@ -50,7 +50,7 @@
void scsi_eh_wakeup(struct Scsi_Host *shost)
{
if (shost->host_busy == shost->host_failed) {
- up(shost->eh_wait);
+ wake_up_process(shost->ehandler);
SCSI_LOG_ERROR_RECOVERY(5,
printk("Waking error handler thread\n"));
}
@@ -68,19 +68,24 @@ int scsi_eh_scmd_add(struct scsi_cmnd *scmd, int eh_flag)
{
struct Scsi_Host *shost = scmd->device->host;
unsigned long flags;
+ int ret = 0;
- if (shost->eh_wait == NULL)
+ if (!shost->ehandler)
return 0;
spin_lock_irqsave(shost->host_lock, flags);
+ if (scsi_host_set_state(shost, SHOST_RECOVERY))
+ if (scsi_host_set_state(shost, SHOST_CANCEL_RECOVERY))
+ goto out_unlock;
+ ret = 1;
scmd->eh_eflags |= eh_flag;
list_add_tail(&scmd->eh_entry, &shost->eh_cmd_q);
- scsi_host_set_state(shost, SHOST_RECOVERY);
shost->host_failed++;
scsi_eh_wakeup(shost);
+ out_unlock:
spin_unlock_irqrestore(shost->host_lock, flags);
- return 1;
+ return ret;
}
/**
@@ -176,8 +181,8 @@ void scsi_times_out(struct scsi_cmnd *scmd)
}
if (unlikely(!scsi_eh_scmd_add(scmd, SCSI_EH_CANCEL_CMD))) {
- panic("Error handler thread not present at %p %p %s %d",
- scmd, scmd->device->host, __FILE__, __LINE__);
+ scmd->result |= DID_TIME_OUT << 16;
+ __scsi_done(scmd);
}
}
@@ -196,8 +201,7 @@ int scsi_block_when_processing_errors(struct scsi_device *sdev)
{
int online;
- wait_event(sdev->host->host_wait, (sdev->host->shost_state !=
- SHOST_RECOVERY));
+ wait_event(sdev->host->host_wait, !scsi_host_in_recovery(sdev->host));
online = scsi_device_online(sdev);
@@ -1441,6 +1445,7 @@ static void scsi_eh_lock_door(struct scsi_device *sdev)
static void scsi_restart_operations(struct Scsi_Host *shost)
{
struct scsi_device *sdev;
+ unsigned long flags;
/*
* If the door was locked, we need to insert a door lock request
@@ -1460,7 +1465,11 @@ static void scsi_restart_operations(struct Scsi_Host *shost)
SCSI_LOG_ERROR_RECOVERY(3, printk("%s: waking up host to restart\n",
__FUNCTION__));
- scsi_host_set_state(shost, SHOST_RUNNING);
+ spin_lock_irqsave(shost->host_lock, flags);
+ if (scsi_host_set_state(shost, SHOST_RUNNING))
+ if (scsi_host_set_state(shost, SHOST_CANCEL))
+ BUG_ON(scsi_host_set_state(shost, SHOST_DEL));
+ spin_unlock_irqrestore(shost->host_lock, flags);
wake_up(&shost->host_wait);
@@ -1582,40 +1591,31 @@ int scsi_error_handler(void *data)
{
struct Scsi_Host *shost = (struct Scsi_Host *) data;
int rtn;
- DECLARE_MUTEX_LOCKED(sem);
current->flags |= PF_NOFREEZE;
- shost->eh_wait = &sem;
+
/*
- * Wake up the thread that created us.
+ * Note - we always use TASK_INTERRUPTIBLE even if the module
+ * was loaded as part of the kernel. The reason is that
+ * UNINTERRUPTIBLE would cause this thread to be counted in
+ * the load average as a running process, and an interruptible
+ * wait doesn't.
*/
- SCSI_LOG_ERROR_RECOVERY(3, printk("Wake up parent of"
- " scsi_eh_%d\n",shost->host_no));
-
- while (1) {
- /*
- * If we get a signal, it means we are supposed to go
- * away and die. This typically happens if the user is
- * trying to unload a module.
- */
- SCSI_LOG_ERROR_RECOVERY(1, printk("Error handler"
- " scsi_eh_%d"
- " sleeping\n",shost->host_no));
-
- /*
- * Note - we always use down_interruptible with the semaphore
- * even if the module was loaded as part of the kernel. The
- * reason is that down() will cause this thread to be counted
- * in the load average as a running process, and down
- * interruptible doesn't. Given that we need to allow this
- * thread to die if the driver was loaded as a module, using
- * semaphores isn't unreasonable.
- */
- down_interruptible(&sem);
- if (kthread_should_stop())
- break;
+ set_current_state(TASK_INTERRUPTIBLE);
+ while (!kthread_should_stop()) {
+ if (shost->host_failed == 0 ||
+ shost->host_failed != shost->host_busy) {
+ SCSI_LOG_ERROR_RECOVERY(1, printk("Error handler"
+ " scsi_eh_%d"
+ " sleeping\n",
+ shost->host_no));
+ schedule();
+ set_current_state(TASK_INTERRUPTIBLE);
+ continue;
+ }
+ __set_current_state(TASK_RUNNING);
SCSI_LOG_ERROR_RECOVERY(1, printk("Error handler"
" scsi_eh_%d waking"
" up\n",shost->host_no));
@@ -1642,7 +1642,7 @@ int scsi_error_handler(void *data)
* which are still online.
*/
scsi_restart_operations(shost);
-
+ set_current_state(TASK_INTERRUPTIBLE);
}
SCSI_LOG_ERROR_RECOVERY(1, printk("Error handler scsi_eh_%d"
@@ -1651,7 +1651,7 @@ int scsi_error_handler(void *data)
/*
* Make sure that nobody tries to wake us up again.
*/
- shost->eh_wait = NULL;
+ shost->ehandler = NULL;
return 0;
}
diff --git a/drivers/scsi/scsi_ioctl.c b/drivers/scsi/scsi_ioctl.c
index b7fddac8134..de7f98cc38f 100644
--- a/drivers/scsi/scsi_ioctl.c
+++ b/drivers/scsi/scsi_ioctl.c
@@ -458,7 +458,7 @@ int scsi_nonblockable_ioctl(struct scsi_device *sdev, int cmd,
* error processing, as long as the device was opened
* non-blocking */
if (filp && filp->f_flags & O_NONBLOCK) {
- if (sdev->host->shost_state == SHOST_RECOVERY)
+ if (scsi_host_in_recovery(sdev->host))
return -ENODEV;
} else if (!scsi_block_when_processing_errors(sdev))
return -ENODEV;
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index 863bb6495da..dc9c772bc87 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -118,7 +118,6 @@ static void scsi_unprep_request(struct request *req)
req->flags &= ~REQ_DONTPREP;
req->special = (req->flags & REQ_SPECIAL) ? cmd->sc_request : NULL;
- scsi_release_buffers(cmd);
scsi_put_command(cmd);
}
@@ -140,14 +139,12 @@ static void scsi_unprep_request(struct request *req)
* commands.
* Notes: This could be called either from an interrupt context or a
* normal process context.
- * Notes: Upon return, cmd is a stale pointer.
*/
int scsi_queue_insert(struct scsi_cmnd *cmd, int reason)
{
struct Scsi_Host *host = cmd->device->host;
struct scsi_device *device = cmd->device;
struct request_queue *q = device->request_queue;
- struct request *req = cmd->request;
unsigned long flags;
SCSI_LOG_MLQUEUE(1,
@@ -188,9 +185,8 @@ int scsi_queue_insert(struct scsi_cmnd *cmd, int reason)
* function. The SCSI request function detects the blocked condition
* and plugs the queue appropriately.
*/
- scsi_unprep_request(req);
spin_lock_irqsave(q->queue_lock, flags);
- blk_requeue_request(q, req);
+ blk_requeue_request(q, cmd->request);
spin_unlock_irqrestore(q->queue_lock, flags);
scsi_run_queue(q);
@@ -451,7 +447,7 @@ void scsi_device_unbusy(struct scsi_device *sdev)
spin_lock_irqsave(shost->host_lock, flags);
shost->host_busy--;
- if (unlikely((shost->shost_state == SHOST_RECOVERY) &&
+ if (unlikely(scsi_host_in_recovery(shost) &&
shost->host_failed))
scsi_eh_wakeup(shost);
spin_unlock(shost->host_lock);
@@ -1268,6 +1264,7 @@ static int scsi_prep_fn(struct request_queue *q, struct request *req)
}
} else {
memcpy(cmd->cmnd, req->cmd, sizeof(cmd->cmnd));
+ cmd->cmd_len = req->cmd_len;
if (rq_data_dir(req) == WRITE)
cmd->sc_data_direction = DMA_TO_DEVICE;
else if (req->data_len)
@@ -1342,7 +1339,7 @@ static inline int scsi_host_queue_ready(struct request_queue *q,
struct Scsi_Host *shost,
struct scsi_device *sdev)
{
- if (shost->shost_state == SHOST_RECOVERY)
+ if (scsi_host_in_recovery(shost))
return 0;
if (shost->host_busy == 0 && shost->host_blocked) {
/*
@@ -1514,7 +1511,6 @@ static void scsi_request_fn(struct request_queue *q)
* cases (host limits or settings) should run the queue at some
* later time.
*/
- scsi_unprep_request(req);
spin_lock_irq(q->queue_lock);
blk_requeue_request(q, req);
sdev->device_busy--;
diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c
index b86f170fa8e..327c5d7e5bd 100644
--- a/drivers/scsi/scsi_scan.c
+++ b/drivers/scsi/scsi_scan.c
@@ -587,6 +587,7 @@ static int scsi_probe_lun(struct scsi_device *sdev, char *inq_result,
if (sdev->scsi_level >= 2 ||
(sdev->scsi_level == 1 && (inq_result[3] & 0x0f) == 1))
sdev->scsi_level++;
+ sdev->sdev_target->scsi_level = sdev->scsi_level;
return 0;
}
@@ -771,6 +772,15 @@ static int scsi_add_lun(struct scsi_device *sdev, char *inq_result, int *bflags)
return SCSI_SCAN_LUN_PRESENT;
}
+static inline void scsi_destroy_sdev(struct scsi_device *sdev)
+{
+ if (sdev->host->hostt->slave_destroy)
+ sdev->host->hostt->slave_destroy(sdev);
+ transport_destroy_device(&sdev->sdev_gendev);
+ put_device(&sdev->sdev_gendev);
+}
+
+
/**
* scsi_probe_and_add_lun - probe a LUN, if a LUN is found add it
* @starget: pointer to target device structure
@@ -803,9 +813,9 @@ static int scsi_probe_and_add_lun(struct scsi_target *starget,
* The rescan flag is used as an optimization, the first scan of a
* host adapter calls into here with rescan == 0.
*/
- if (rescan) {
- sdev = scsi_device_lookup_by_target(starget, lun);
- if (sdev) {
+ sdev = scsi_device_lookup_by_target(starget, lun);
+ if (sdev) {
+ if (rescan || sdev->sdev_state != SDEV_CREATED) {
SCSI_LOG_SCAN_BUS(3, printk(KERN_INFO
"scsi scan: device exists on %s\n",
sdev->sdev_gendev.bus_id));
@@ -820,9 +830,9 @@ static int scsi_probe_and_add_lun(struct scsi_target *starget,
sdev->model);
return SCSI_SCAN_LUN_PRESENT;
}
- }
-
- sdev = scsi_alloc_sdev(starget, lun, hostdata);
+ scsi_device_put(sdev);
+ } else
+ sdev = scsi_alloc_sdev(starget, lun, hostdata);
if (!sdev)
goto out;
@@ -877,12 +887,8 @@ static int scsi_probe_and_add_lun(struct scsi_target *starget,
res = SCSI_SCAN_NO_RESPONSE;
}
}
- } else {
- if (sdev->host->hostt->slave_destroy)
- sdev->host->hostt->slave_destroy(sdev);
- transport_destroy_device(&sdev->sdev_gendev);
- put_device(&sdev->sdev_gendev);
- }
+ } else
+ scsi_destroy_sdev(sdev);
out:
return res;
}
@@ -1054,7 +1060,7 @@ EXPORT_SYMBOL(int_to_scsilun);
* 0: scan completed (or no memory, so further scanning is futile)
* 1: no report lun scan, or not configured
**/
-static int scsi_report_lun_scan(struct scsi_device *sdev, int bflags,
+static int scsi_report_lun_scan(struct scsi_target *starget, int bflags,
int rescan)
{
char devname[64];
@@ -1067,7 +1073,8 @@ static int scsi_report_lun_scan(struct scsi_device *sdev, int bflags,
struct scsi_lun *lunp, *lun_data;
u8 *data;
struct scsi_sense_hdr sshdr;
- struct scsi_target *starget = scsi_target(sdev);
+ struct scsi_device *sdev;
+ struct Scsi_Host *shost = dev_to_shost(&starget->dev);
/*
* Only support SCSI-3 and up devices if BLIST_NOREPORTLUN is not set.
@@ -1075,15 +1082,23 @@ static int scsi_report_lun_scan(struct scsi_device *sdev, int bflags,
* support more than 8 LUNs.
*/
if ((bflags & BLIST_NOREPORTLUN) ||
- sdev->scsi_level < SCSI_2 ||
- (sdev->scsi_level < SCSI_3 &&
- (!(bflags & BLIST_REPORTLUN2) || sdev->host->max_lun <= 8)) )
+ starget->scsi_level < SCSI_2 ||
+ (starget->scsi_level < SCSI_3 &&
+ (!(bflags & BLIST_REPORTLUN2) || shost->max_lun <= 8)) )
return 1;
if (bflags & BLIST_NOLUN)
return 0;
+ if (!(sdev = scsi_device_lookup_by_target(starget, 0))) {
+ sdev = scsi_alloc_sdev(starget, 0, NULL);
+ if (!sdev)
+ return 0;
+ if (scsi_device_get(sdev))
+ return 0;
+ }
+
sprintf(devname, "host %d channel %d id %d",
- sdev->host->host_no, sdev->channel, sdev->id);
+ shost->host_no, sdev->channel, sdev->id);
/*
* Allocate enough to hold the header (the same size as one scsi_lun)
@@ -1098,8 +1113,10 @@ static int scsi_report_lun_scan(struct scsi_device *sdev, int bflags,
length = (max_scsi_report_luns + 1) * sizeof(struct scsi_lun);
lun_data = kmalloc(length, GFP_ATOMIC |
(sdev->host->unchecked_isa_dma ? __GFP_DMA : 0));
- if (!lun_data)
+ if (!lun_data) {
+ printk(ALLOC_FAILURE_MSG, __FUNCTION__);
goto out;
+ }
scsi_cmd[0] = REPORT_LUNS;
@@ -1201,10 +1218,6 @@ static int scsi_report_lun_scan(struct scsi_device *sdev, int bflags,
for (i = 0; i < sizeof(struct scsi_lun); i++)
printk("%02x", data[i]);
printk(" has a LUN larger than currently supported.\n");
- } else if (lun == 0) {
- /*
- * LUN 0 has already been scanned.
- */
} else if (lun > sdev->host->max_lun) {
printk(KERN_WARNING "scsi: %s lun%d has a LUN larger"
" than allowed by the host adapter\n",
@@ -1227,13 +1240,13 @@ static int scsi_report_lun_scan(struct scsi_device *sdev, int bflags,
}
kfree(lun_data);
- return 0;
-
out:
- /*
- * We are out of memory, don't try scanning any further.
- */
- printk(ALLOC_FAILURE_MSG, __FUNCTION__);
+ scsi_device_put(sdev);
+ if (sdev->sdev_state == SDEV_CREATED)
+ /*
+ * the sdev we used didn't appear in the report luns scan
+ */
+ scsi_destroy_sdev(sdev);
return 0;
}
@@ -1299,7 +1312,6 @@ static void __scsi_scan_target(struct device *parent, unsigned int channel,
struct Scsi_Host *shost = dev_to_shost(parent);
int bflags = 0;
int res;
- struct scsi_device *sdev = NULL;
struct scsi_target *starget;
if (shost->this_id == id)
@@ -1325,27 +1337,16 @@ static void __scsi_scan_target(struct device *parent, unsigned int channel,
* Scan LUN 0, if there is some response, scan further. Ideally, we
* would not configure LUN 0 until all LUNs are scanned.
*/
- res = scsi_probe_and_add_lun(starget, 0, &bflags, &sdev, rescan, NULL);
- if (res == SCSI_SCAN_LUN_PRESENT) {
- if (scsi_report_lun_scan(sdev, bflags, rescan) != 0)
+ res = scsi_probe_and_add_lun(starget, 0, &bflags, NULL, rescan, NULL);
+ if (res == SCSI_SCAN_LUN_PRESENT || res == SCSI_SCAN_TARGET_PRESENT) {
+ if (scsi_report_lun_scan(starget, bflags, rescan) != 0)
/*
* The REPORT LUN did not scan the target,
* do a sequential scan.
*/
scsi_sequential_lun_scan(starget, bflags,
- res, sdev->scsi_level, rescan);
- } else if (res == SCSI_SCAN_TARGET_PRESENT) {
- /*
- * There's a target here, but lun 0 is offline so we
- * can't use the report_lun scan. Fall back to a
- * sequential lun scan with a bflags of SPARSELUN and
- * a default scsi level of SCSI_2
- */
- scsi_sequential_lun_scan(starget, BLIST_SPARSELUN,
- SCSI_SCAN_TARGET_PRESENT, SCSI_2, rescan);
+ res, starget->scsi_level, rescan);
}
- if (sdev)
- scsi_device_put(sdev);
out_reap:
/* now determine if the target has any children at all
@@ -1466,23 +1467,17 @@ EXPORT_SYMBOL(scsi_scan_single_target);
void scsi_forget_host(struct Scsi_Host *shost)
{
- struct scsi_target *starget, *tmp;
+ struct scsi_device *sdev;
unsigned long flags;
- /*
- * Ok, this look a bit strange. We always look for the first device
- * on the list as scsi_remove_device removes them from it - thus we
- * also have to release the lock.
- * We don't need to get another reference to the device before
- * releasing the lock as we already own the reference from
- * scsi_register_device that's release in scsi_remove_device. And
- * after that we don't look at sdev anymore.
- */
+ restart:
spin_lock_irqsave(shost->host_lock, flags);
- list_for_each_entry_safe(starget, tmp, &shost->__targets, siblings) {
+ list_for_each_entry(sdev, &shost->__devices, siblings) {
+ if (sdev->sdev_state == SDEV_DEL)
+ continue;
spin_unlock_irqrestore(shost->host_lock, flags);
- scsi_remove_target(&starget->dev);
- spin_lock_irqsave(shost->host_lock, flags);
+ __scsi_remove_device(sdev);
+ goto restart;
}
spin_unlock_irqrestore(shost->host_lock, flags);
}
@@ -1548,10 +1543,7 @@ void scsi_free_host_dev(struct scsi_device *sdev)
{
BUG_ON(sdev->id != sdev->host->this_id);
- if (sdev->host->hostt->slave_destroy)
- sdev->host->hostt->slave_destroy(sdev);
- transport_destroy_device(&sdev->sdev_gendev);
- put_device(&sdev->sdev_gendev);
+ scsi_destroy_sdev(sdev);
}
EXPORT_SYMBOL(scsi_free_host_dev);
diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
index b8052d5206c..72a6550a056 100644
--- a/drivers/scsi/scsi_sysfs.c
+++ b/drivers/scsi/scsi_sysfs.c
@@ -57,6 +57,8 @@ static struct {
{ SHOST_CANCEL, "cancel" },
{ SHOST_DEL, "deleted" },
{ SHOST_RECOVERY, "recovery" },
+ { SHOST_CANCEL_RECOVERY, "cancel/recovery" },
+ { SHOST_DEL_RECOVERY, "deleted/recovery", },
};
const char *scsi_host_state_name(enum scsi_host_state state)
{
@@ -707,9 +709,11 @@ void __scsi_remove_device(struct scsi_device *sdev)
**/
void scsi_remove_device(struct scsi_device *sdev)
{
- down(&sdev->host->scan_mutex);
+ struct Scsi_Host *shost = sdev->host;
+
+ down(&shost->scan_mutex);
__scsi_remove_device(sdev);
- up(&sdev->host->scan_mutex);
+ up(&shost->scan_mutex);
}
EXPORT_SYMBOL(scsi_remove_device);
@@ -717,17 +721,20 @@ void __scsi_remove_target(struct scsi_target *starget)
{
struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
unsigned long flags;
- struct scsi_device *sdev, *tmp;
+ struct scsi_device *sdev;
spin_lock_irqsave(shost->host_lock, flags);
starget->reap_ref++;
- list_for_each_entry_safe(sdev, tmp, &shost->__devices, siblings) {
+ restart:
+ list_for_each_entry(sdev, &shost->__devices, siblings) {
if (sdev->channel != starget->channel ||
- sdev->id != starget->id)
+ sdev->id != starget->id ||
+ sdev->sdev_state == SDEV_DEL)
continue;
spin_unlock_irqrestore(shost->host_lock, flags);
scsi_remove_device(sdev);
spin_lock_irqsave(shost->host_lock, flags);
+ goto restart;
}
spin_unlock_irqrestore(shost->host_lock, flags);
scsi_target_reap(starget);
diff --git a/drivers/scsi/scsi_transport_sas.c b/drivers/scsi/scsi_transport_sas.c
index ff724bbe661..1d145d2f9a3 100644
--- a/drivers/scsi/scsi_transport_sas.c
+++ b/drivers/scsi/scsi_transport_sas.c
@@ -628,17 +628,16 @@ sas_rphy_delete(struct sas_rphy *rphy)
struct Scsi_Host *shost = dev_to_shost(parent->dev.parent);
struct sas_host_attrs *sas_host = to_sas_host_attrs(shost);
- transport_destroy_device(&rphy->dev);
+ scsi_remove_target(dev);
- scsi_remove_target(&rphy->dev);
+ transport_remove_device(dev);
+ device_del(dev);
+ transport_destroy_device(dev);
spin_lock(&sas_host->lock);
list_del(&rphy->list);
spin_unlock(&sas_host->lock);
- transport_remove_device(dev);
- device_del(dev);
- transport_destroy_device(dev);
put_device(&parent->dev);
}
EXPORT_SYMBOL(sas_rphy_delete);
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index de564b38605..9a1dc0cea03 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -235,6 +235,7 @@ static int sd_init_command(struct scsi_cmnd * SCpnt)
return 0;
memcpy(SCpnt->cmnd, rq->cmd, sizeof(SCpnt->cmnd));
+ SCpnt->cmd_len = rq->cmd_len;
if (rq_data_dir(rq) == WRITE)
SCpnt->sc_data_direction = DMA_TO_DEVICE;
else if (rq->data_len)
diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
index 9ea4765d1d1..ad94367df43 100644
--- a/drivers/scsi/sg.c
+++ b/drivers/scsi/sg.c
@@ -1027,7 +1027,7 @@ sg_ioctl(struct inode *inode, struct file *filp,
if (sdp->detached)
return -ENODEV;
if (filp->f_flags & O_NONBLOCK) {
- if (sdp->device->host->shost_state == SHOST_RECOVERY)
+ if (scsi_host_in_recovery(sdp->device->host))
return -EBUSY;
} else if (!scsi_block_when_processing_errors(sdp->device))
return -EBUSY;
@@ -2849,8 +2849,7 @@ sg_proc_init(void)
struct proc_dir_entry *pdep;
struct sg_proc_leaf * leaf;
- sg_proc_sgp = create_proc_entry(sg_proc_sg_dirname,
- S_IFDIR | S_IRUGO | S_IXUGO, NULL);
+ sg_proc_sgp = proc_mkdir(sg_proc_sg_dirname, NULL);
if (!sg_proc_sgp)
return 1;
for (k = 0; k < num_leaves; ++k) {
diff --git a/drivers/scsi/sr.c b/drivers/scsi/sr.c
index ce63fc8312d..561901b1cf1 100644
--- a/drivers/scsi/sr.c
+++ b/drivers/scsi/sr.c
@@ -326,6 +326,7 @@ static int sr_init_command(struct scsi_cmnd * SCpnt)
return 0;
memcpy(SCpnt->cmnd, rq->cmd, sizeof(SCpnt->cmnd));
+ SCpnt->cmd_len = rq->cmd_len;
if (!rq->data_len)
SCpnt->sc_data_direction = DMA_NONE;
else if (rq_data_dir(rq) == WRITE)
diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c
index a93308ae973..d001c046551 100644
--- a/drivers/scsi/st.c
+++ b/drivers/scsi/st.c
@@ -4206,6 +4206,7 @@ static int st_init_command(struct scsi_cmnd *SCpnt)
return 0;
memcpy(SCpnt->cmnd, rq->cmd, sizeof(SCpnt->cmnd));
+ SCpnt->cmd_len = rq->cmd_len;
if (rq_data_dir(rq) == WRITE)
SCpnt->sc_data_direction = DMA_TO_DEVICE;
diff --git a/drivers/serial/21285.c b/drivers/serial/21285.c
index aec39fb261c..b5cf39468d1 100644
--- a/drivers/serial/21285.c
+++ b/drivers/serial/21285.c
@@ -463,7 +463,7 @@ static int __init serial21285_console_setup(struct console *co, char *options)
return uart_set_options(port, co, baud, parity, bits, flow);
}
-extern struct uart_driver serial21285_reg;
+static struct uart_driver serial21285_reg;
static struct console serial21285_console =
{
diff --git a/drivers/serial/amba-pl010.c b/drivers/serial/amba-pl010.c
index 978e12437e6..679e678c7e6 100644
--- a/drivers/serial/amba-pl010.c
+++ b/drivers/serial/amba-pl010.c
@@ -689,7 +689,7 @@ static int __init pl010_console_setup(struct console *co, char *options)
return uart_set_options(port, co, baud, parity, bits, flow);
}
-extern struct uart_driver amba_reg;
+static struct uart_driver amba_reg;
static struct console amba_console = {
.name = "ttyAM",
.write = pl010_console_write,
diff --git a/drivers/serial/amba-pl011.c b/drivers/serial/amba-pl011.c
index 56071309744..1ff629c7475 100644
--- a/drivers/serial/amba-pl011.c
+++ b/drivers/serial/amba-pl011.c
@@ -701,7 +701,7 @@ static int __init pl011_console_setup(struct console *co, char *options)
return uart_set_options(&uap->port, co, baud, parity, bits, flow);
}
-extern struct uart_driver amba_reg;
+static struct uart_driver amba_reg;
static struct console amba_console = {
.name = "ttyAMA",
.write = pl011_console_write,
diff --git a/drivers/serial/clps711x.c b/drivers/serial/clps711x.c
index d822896b488..87ef368384f 100644
--- a/drivers/serial/clps711x.c
+++ b/drivers/serial/clps711x.c
@@ -98,7 +98,7 @@ static irqreturn_t clps711xuart_int_rx(int irq, void *dev_id, struct pt_regs *re
{
struct uart_port *port = dev_id;
struct tty_struct *tty = port->info->tty;
- unsigned int status, ch, flg, ignored = 0;
+ unsigned int status, ch, flg;
status = clps_readl(SYSFLG(port));
while (!(status & SYSFLG_URXFE)) {
@@ -525,7 +525,7 @@ static int __init clps711xuart_console_setup(struct console *co, char *options)
return uart_set_options(port, co, baud, parity, bits, flow);
}
-extern struct uart_driver clps711x_reg;
+static struct uart_driver clps711x_reg;
static struct console clps711x_console = {
.name = "ttyCL",
.write = clps711xuart_console_write,
diff --git a/drivers/serial/imx.c b/drivers/serial/imx.c
index 4c985e6b378..4e1e80adaf1 100644
--- a/drivers/serial/imx.c
+++ b/drivers/serial/imx.c
@@ -860,7 +860,7 @@ imx_console_setup(struct console *co, char *options)
return uart_set_options(&sport->port, co, baud, parity, bits, flow);
}
-extern struct uart_driver imx_reg;
+static struct uart_driver imx_reg;
static struct console imx_console = {
.name = "ttySMX",
.write = imx_console_write,
diff --git a/drivers/serial/ioc4_serial.c b/drivers/serial/ioc4_serial.c
index 0c5c96a582b..f88fdd48068 100644
--- a/drivers/serial/ioc4_serial.c
+++ b/drivers/serial/ioc4_serial.c
@@ -973,18 +973,6 @@ static irqreturn_t ioc4_intr(int irq, void *arg, struct pt_regs *regs)
this_ir &= ~this_mir;
}
}
- if (this_ir) {
- printk(KERN_ERR
- "unknown IOC4 %s interrupt 0x%x, sio_ir = 0x%x,"
- " sio_ies = 0x%x, other_ir = 0x%x :"
- "other_ies = 0x%x\n",
- (intr_type == IOC4_SIO_INTR_TYPE) ? "sio" :
- "other", this_ir,
- readl(&soft->is_ioc4_misc_addr->sio_ir.raw),
- readl(&soft->is_ioc4_misc_addr->sio_ies.raw),
- readl(&soft->is_ioc4_misc_addr->other_ir.raw),
- readl(&soft->is_ioc4_misc_addr->other_ies.raw));
- }
}
#ifdef DEBUG_INTERRUPTS
{
diff --git a/drivers/serial/mpc52xx_uart.c b/drivers/serial/mpc52xx_uart.c
index a3cd0ee8486..0585ab27ffd 100644
--- a/drivers/serial/mpc52xx_uart.c
+++ b/drivers/serial/mpc52xx_uart.c
@@ -781,7 +781,7 @@ mpc52xx_uart_remove(struct device *dev)
#ifdef CONFIG_PM
static int
-mpc52xx_uart_suspend(struct device *dev, u32 state, u32 level)
+mpc52xx_uart_suspend(struct device *dev, pm_message_t state, u32 level)
{
struct uart_port *port = (struct uart_port *) dev_get_drvdata(dev);
diff --git a/drivers/serial/pxa.c b/drivers/serial/pxa.c
index eaa0af83529..672b359b07c 100644
--- a/drivers/serial/pxa.c
+++ b/drivers/serial/pxa.c
@@ -589,8 +589,8 @@ serial_pxa_type(struct uart_port *port)
#ifdef CONFIG_SERIAL_PXA_CONSOLE
-extern struct uart_pxa_port serial_pxa_ports[];
-extern struct uart_driver serial_pxa_reg;
+static struct uart_pxa_port serial_pxa_ports[];
+static struct uart_driver serial_pxa_reg;
#define BOTH_EMPTY (UART_LSR_TEMT | UART_LSR_THRE)
diff --git a/drivers/serial/s3c2410.c b/drivers/serial/s3c2410.c
index c361c6fb080..50d7870d92b 100644
--- a/drivers/serial/s3c2410.c
+++ b/drivers/serial/s3c2410.c
@@ -82,8 +82,6 @@
#include <asm/arch/regs-serial.h>
#include <asm/arch/regs-gpio.h>
-#include <asm/mach-types.h>
-
/* structures */
struct s3c24xx_uart_info {
@@ -753,8 +751,8 @@ static void s3c24xx_serial_set_termios(struct uart_port *port,
{
struct s3c2410_uartcfg *cfg = s3c24xx_port_to_cfg(port);
struct s3c24xx_uart_port *ourport = to_ourport(port);
- struct s3c24xx_uart_clksrc *clksrc;
- struct clk *clk;
+ struct s3c24xx_uart_clksrc *clksrc = NULL;
+ struct clk *clk = NULL;
unsigned long flags;
unsigned int baud, quot;
unsigned int ulcon;
diff --git a/drivers/serial/sa1100.c b/drivers/serial/sa1100.c
index 1225b14f6e9..dd8aed24235 100644
--- a/drivers/serial/sa1100.c
+++ b/drivers/serial/sa1100.c
@@ -799,7 +799,7 @@ sa1100_console_setup(struct console *co, char *options)
return uart_set_options(&sport->port, co, baud, parity, bits, flow);
}
-extern struct uart_driver sa1100_reg;
+static struct uart_driver sa1100_reg;
static struct console sa1100_console = {
.name = "ttySA",
.write = sa1100_console_write,
diff --git a/drivers/serial/serial_cs.c b/drivers/serial/serial_cs.c
index 1ae0b381c16..2c7d3ef76e8 100644
--- a/drivers/serial/serial_cs.c
+++ b/drivers/serial/serial_cs.c
@@ -859,6 +859,7 @@ static struct pcmcia_device_id serial_ids[] = {
PCMCIA_MFC_DEVICE_CIS_MANF_CARD(1, 0x0175, 0x0000, "DP83903.cis"),
PCMCIA_MFC_DEVICE_CIS_MANF_CARD(1, 0x0101, 0x0035, "3CXEM556.cis"),
PCMCIA_MFC_DEVICE_CIS_MANF_CARD(1, 0x0101, 0x003d, "3CXEM556.cis"),
+ PCMCIA_DEVICE_CIS_MANF_CARD(0x0192, 0x0710, "SW_7xx_SER.cis"), /* Sierra Wireless AC710/AC750 GPRS Network Adapter R1 */
PCMCIA_DEVICE_CIS_PROD_ID12("MultiTech", "PCMCIA 56K DataFax", 0x842047ee, 0xc2efcf03, "MT5634ZLX.cis"),
PCMCIA_DEVICE_CIS_PROD_ID12("ADVANTECH", "COMpad-32/85B-4", 0x96913a85, 0xcec8f102, "COMpad4.cis"),
PCMCIA_DEVICE_CIS_PROD_ID123("ADVANTECH", "COMpad-32/85", "1.0", 0x96913a85, 0x8fbe92ae, 0x0877b627, "COMpad2.cis"),
diff --git a/drivers/serial/serial_lh7a40x.c b/drivers/serial/serial_lh7a40x.c
index 8302376800c..d01dbe5da3b 100644
--- a/drivers/serial/serial_lh7a40x.c
+++ b/drivers/serial/serial_lh7a40x.c
@@ -632,7 +632,7 @@ static int __init lh7a40xuart_console_setup (struct console* co, char* options)
return uart_set_options (port, co, baud, parity, bits, flow);
}
-extern struct uart_driver lh7a40x_reg;
+static struct uart_driver lh7a40x_reg;
static struct console lh7a40x_console = {
.name = "ttyAM",
.write = lh7a40xuart_console_write,
diff --git a/drivers/tc/zs.c b/drivers/tc/zs.c
index 4382ee60b6a..6bed8713897 100644
--- a/drivers/tc/zs.c
+++ b/drivers/tc/zs.c
@@ -1683,7 +1683,7 @@ static void __init probe_sccs(void)
#ifndef CONFIG_SERIAL_DEC_CONSOLE
/*
* We're called early and memory managment isn't up, yet.
- * Thus check_region would fail.
+ * Thus request_region would fail.
*/
if (!request_region((unsigned long)
zs_channels[n_channels].control,
diff --git a/drivers/usb/core/hcd-pci.c b/drivers/usb/core/hcd-pci.c
index cbb451d227d..6385d1a99b6 100644
--- a/drivers/usb/core/hcd-pci.c
+++ b/drivers/usb/core/hcd-pci.c
@@ -242,7 +242,6 @@ int usb_hcd_pci_suspend (struct pci_dev *dev, pm_message_t message)
case HC_STATE_SUSPENDED:
/* no DMA or IRQs except when HC is active */
if (dev->current_state == PCI_D0) {
- free_irq (hcd->irq, hcd);
pci_save_state (dev);
pci_disable_device (dev);
}
@@ -374,14 +373,6 @@ int usb_hcd_pci_resume (struct pci_dev *dev)
hcd->state = HC_STATE_RESUMING;
hcd->saw_irq = 0;
- retval = request_irq (dev->irq, usb_hcd_irq, SA_SHIRQ,
- hcd->irq_descr, hcd);
- if (retval < 0) {
- dev_err (hcd->self.controller,
- "can't restore IRQ after resume!\n");
- usb_hc_died (hcd);
- return retval;
- }
retval = hcd->driver->resume (hcd);
if (!HC_IS_RUNNING (hcd->state)) {
diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c
index c47c8052b48..f1fb67fe22a 100644
--- a/drivers/usb/core/message.c
+++ b/drivers/usb/core/message.c
@@ -987,7 +987,7 @@ void usb_disable_device(struct usb_device *dev, int skip_ep0)
/* remove this interface if it has been registered */
interface = dev->actconfig->interface[i];
- if (!klist_node_attached(&interface->dev.knode_bus))
+ if (!device_is_registered(&interface->dev))
continue;
dev_dbg (&dev->dev, "unregistering interface %s\n",
interface->dev.bus_id);
diff --git a/drivers/usb/core/usb.c b/drivers/usb/core/usb.c
index 087af73a59d..7d131509e41 100644
--- a/drivers/usb/core/usb.c
+++ b/drivers/usb/core/usb.c
@@ -303,7 +303,7 @@ int usb_driver_claim_interface(struct usb_driver *driver,
/* if interface was already added, bind now; else let
* the future device_add() bind it, bypassing probe()
*/
- if (klist_node_attached(&dev->knode_bus))
+ if (device_is_registered(dev))
device_bind_driver(dev);
return 0;
@@ -336,8 +336,8 @@ void usb_driver_release_interface(struct usb_driver *driver,
if (iface->condition != USB_INTERFACE_BOUND)
return;
- /* release only after device_add() */
- if (klist_node_attached(&dev->knode_bus)) {
+ /* don't release if the interface hasn't been added yet */
+ if (device_is_registered(dev)) {
iface->condition = USB_INTERFACE_UNBINDING;
device_release_driver(dev);
}
diff --git a/drivers/usb/gadget/pxa2xx_udc.c b/drivers/usb/gadget/pxa2xx_udc.c
index 1507738337c..73f8c940415 100644
--- a/drivers/usb/gadget/pxa2xx_udc.c
+++ b/drivers/usb/gadget/pxa2xx_udc.c
@@ -422,7 +422,7 @@ static inline void ep0_idle (struct pxa2xx_udc *dev)
}
static int
-write_packet(volatile u32 *uddr, struct pxa2xx_request *req, unsigned max)
+write_packet(volatile unsigned long *uddr, struct pxa2xx_request *req, unsigned max)
{
u8 *buf;
unsigned length, count;
@@ -2602,7 +2602,7 @@ static int __exit pxa2xx_udc_remove(struct device *_dev)
* VBUS IRQs should probably be ignored so that the PXA device just acts
* "dead" to USB hosts until system resume.
*/
-static int pxa2xx_udc_suspend(struct device *dev, u32 state, u32 level)
+static int pxa2xx_udc_suspend(struct device *dev, pm_message_t state, u32 level)
{
struct pxa2xx_udc *udc = dev_get_drvdata(dev);
diff --git a/drivers/usb/gadget/pxa2xx_udc.h b/drivers/usb/gadget/pxa2xx_udc.h
index d0bc396a85d..a58f3e6e71f 100644
--- a/drivers/usb/gadget/pxa2xx_udc.h
+++ b/drivers/usb/gadget/pxa2xx_udc.h
@@ -69,11 +69,11 @@ struct pxa2xx_ep {
* UDDR = UDC Endpoint Data Register (the fifo)
* DRCM = DMA Request Channel Map
*/
- volatile u32 *reg_udccs;
- volatile u32 *reg_ubcr;
- volatile u32 *reg_uddr;
+ volatile unsigned long *reg_udccs;
+ volatile unsigned long *reg_ubcr;
+ volatile unsigned long *reg_uddr;
#ifdef USE_DMA
- volatile u32 *reg_drcmr;
+ volatile unsigned long *reg_drcmr;
#define drcmr(n) .reg_drcmr = & DRCMR ## n ,
#else
#define drcmr(n)
diff --git a/drivers/usb/host/ohci-lh7a404.c b/drivers/usb/host/ohci-lh7a404.c
index 817620d7384..859aca7be75 100644
--- a/drivers/usb/host/ohci-lh7a404.c
+++ b/drivers/usb/host/ohci-lh7a404.c
@@ -17,8 +17,6 @@
*/
#include <asm/hardware.h>
-#include <asm/mach-types.h>
-#include <asm/arch/hardware.h>
extern int usb_disabled(void);
diff --git a/drivers/usb/host/ohci-omap.c b/drivers/usb/host/ohci-omap.c
index 5cde76faab9..d8f3ba7ad52 100644
--- a/drivers/usb/host/ohci-omap.c
+++ b/drivers/usb/host/ohci-omap.c
@@ -18,7 +18,6 @@
#include <asm/io.h>
#include <asm/mach-types.h>
-#include <asm/arch/hardware.h>
#include <asm/arch/mux.h>
#include <asm/arch/irqs.h>
#include <asm/arch/gpio.h>
diff --git a/drivers/usb/host/ohci-s3c2410.c b/drivers/usb/host/ohci-s3c2410.c
index 3d9bcf78a9a..da7d5478f74 100644
--- a/drivers/usb/host/ohci-s3c2410.c
+++ b/drivers/usb/host/ohci-s3c2410.c
@@ -20,7 +20,6 @@
*/
#include <asm/hardware.h>
-#include <asm/mach-types.h>
#include <asm/hardware/clock.h>
#include <asm/arch/usb-control.h>
diff --git a/drivers/usb/host/sl811-hcd.c b/drivers/usb/host/sl811-hcd.c
index d2a1fd40dfc..d42a15d10a4 100644
--- a/drivers/usb/host/sl811-hcd.c
+++ b/drivers/usb/host/sl811-hcd.c
@@ -782,6 +782,9 @@ retry:
/* usb 1.1 says max 90% of a frame is available for periodic transfers.
* this driver doesn't promise that much since it's got to handle an
* IRQ per packet; irq handling latencies also use up that time.
+ *
+ * NOTE: the periodic schedule is a sparse tree, with the load for
+ * each branch minimized. see fig 3.5 in the OHCI spec for example.
*/
#define MAX_PERIODIC_LOAD 500 /* out of 1000 usec */
@@ -843,6 +846,7 @@ static int sl811h_urb_enqueue(
if (!(sl811->port1 & (1 << USB_PORT_FEAT_ENABLE))
|| !HC_IS_RUNNING(hcd->state)) {
retval = -ENODEV;
+ kfree(ep);
goto fail;
}
@@ -911,8 +915,16 @@ static int sl811h_urb_enqueue(
case PIPE_ISOCHRONOUS:
case PIPE_INTERRUPT:
urb->interval = ep->period;
- if (ep->branch < PERIODIC_SIZE)
+ if (ep->branch < PERIODIC_SIZE) {
+ /* NOTE: the phase is correct here, but the value
+ * needs offsetting by the transfer queue depth.
+ * All current drivers ignore start_frame, so this
+ * is unlikely to ever matter...
+ */
+ urb->start_frame = (sl811->frame & (PERIODIC_SIZE - 1))
+ + ep->branch;
break;
+ }
retval = balance(sl811, ep->period, ep->load);
if (retval < 0)
@@ -1122,7 +1134,7 @@ sl811h_hub_descriptor (
desc->wHubCharacteristics = (__force __u16)cpu_to_le16(temp);
/* two bitmaps: ports removable, and legacy PortPwrCtrlMask */
- desc->bitmap[0] = 1 << 1;
+ desc->bitmap[0] = 0 << 1;
desc->bitmap[1] = ~0;
}
diff --git a/drivers/usb/media/vicam.c b/drivers/usb/media/vicam.c
index 4a5857c53f1..0bc0b1247a6 100644
--- a/drivers/usb/media/vicam.c
+++ b/drivers/usb/media/vicam.c
@@ -1148,7 +1148,7 @@ vicam_write_proc_gain(struct file *file, const char *buffer,
static void
vicam_create_proc_root(void)
{
- vicam_proc_root = create_proc_entry("video/vicam", S_IFDIR, 0);
+ vicam_proc_root = proc_mkdir("video/vicam", NULL);
if (vicam_proc_root)
vicam_proc_root->owner = THIS_MODULE;
@@ -1181,7 +1181,7 @@ vicam_create_proc_entry(struct vicam_camera *cam)
sprintf(name, "video%d", cam->vdev.minor);
- cam->proc_dir = create_proc_entry(name, S_IFDIR, vicam_proc_root);
+ cam->proc_dir = proc_mkdir(name, vicam_proc_root);
if ( !cam->proc_dir )
return; // FIXME: We should probably return an error here
diff --git a/drivers/usb/net/pegasus.c b/drivers/usb/net/pegasus.c
index 7484d34780f..6a4ffe6c397 100644
--- a/drivers/usb/net/pegasus.c
+++ b/drivers/usb/net/pegasus.c
@@ -648,6 +648,13 @@ static void read_bulk_callback(struct urb *urb, struct pt_regs *regs)
}
/*
+ * If the packet is unreasonably long, quietly drop it rather than
+ * kernel panicing by calling skb_put.
+ */
+ if (pkt_len > PEGASUS_MTU)
+ goto goon;
+
+ /*
* at this point we are sure pegasus->rx_skb != NULL
* so we go ahead and pass up the packet.
*/
@@ -886,15 +893,17 @@ static inline void get_interrupt_interval(pegasus_t * pegasus)
__u8 data[2];
read_eprom_word(pegasus, 4, (__u16 *) data);
- if (data[1] < 0x80) {
- if (netif_msg_timer(pegasus))
- dev_info(&pegasus->intf->dev,
- "intr interval changed from %ums to %ums\n",
- data[1], 0x80);
- data[1] = 0x80;
-#ifdef PEGASUS_WRITE_EEPROM
- write_eprom_word(pegasus, 4, *(__u16 *) data);
+ if (pegasus->usb->speed != USB_SPEED_HIGH) {
+ if (data[1] < 0x80) {
+ if (netif_msg_timer(pegasus))
+ dev_info(&pegasus->intf->dev, "intr interval "
+ "changed from %ums to %ums\n",
+ data[1], 0x80);
+ data[1] = 0x80;
+#ifdef PEGASUS_WRITE_EEPROM
+ write_eprom_word(pegasus, 4, *(__u16 *) data);
#endif
+ }
}
pegasus->intr_interval = data[1];
}
@@ -904,8 +913,9 @@ static void set_carrier(struct net_device *net)
pegasus_t *pegasus = netdev_priv(net);
u16 tmp;
- if (read_mii_word(pegasus, pegasus->phy, MII_BMSR, &tmp))
+ if (!read_mii_word(pegasus, pegasus->phy, MII_BMSR, &tmp))
return;
+
if (tmp & BMSR_LSTATUS)
netif_carrier_on(net);
else
@@ -1355,6 +1365,7 @@ static void pegasus_disconnect(struct usb_interface *intf)
cancel_delayed_work(&pegasus->carrier_check);
unregister_netdev(pegasus->net);
usb_put_dev(interface_to_usbdev(intf));
+ unlink_all_urbs(pegasus);
free_all_urbs(pegasus);
free_skb_pool(pegasus);
if (pegasus->rx_skb)
diff --git a/drivers/usb/serial/airprime.c b/drivers/usb/serial/airprime.c
index a4ce0008d69..926d4c2c160 100644
--- a/drivers/usb/serial/airprime.c
+++ b/drivers/usb/serial/airprime.c
@@ -16,7 +16,8 @@
#include "usb-serial.h"
static struct usb_device_id id_table [] = {
- { USB_DEVICE(0xf3d, 0x0112) },
+ { USB_DEVICE(0xf3d, 0x0112) }, /* AirPrime CDMA Wireless PC Card */
+ { USB_DEVICE(0x1410, 0x1110) }, /* Novatel Wireless Merlin CDMA */
{ },
};
MODULE_DEVICE_TABLE(usb, id_table);
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
index 4e434cb10bb..5a8631c8a4a 100644
--- a/drivers/usb/serial/ftdi_sio.c
+++ b/drivers/usb/serial/ftdi_sio.c
@@ -1846,10 +1846,12 @@ static void ftdi_set_termios (struct usb_serial_port *port, struct termios *old_
} else {
/* set the baudrate determined before */
if (change_speed(port)) {
- err("%s urb failed to set baurdrate", __FUNCTION__);
+ err("%s urb failed to set baudrate", __FUNCTION__);
+ }
+ /* Ensure RTS and DTR are raised when baudrate changed from 0 */
+ if ((old_termios->c_cflag & CBAUD) == B0) {
+ set_mctrl(port, TIOCM_DTR | TIOCM_RTS);
}
- /* Ensure RTS and DTR are raised */
- set_mctrl(port, TIOCM_DTR | TIOCM_RTS);
}
/* Set flow control */
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index 92d0f925d05..4989e5740d1 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -25,6 +25,9 @@
2005-06-20 v0.4.1 add missing braces :-/
killed end-of-line whitespace
2005-07-15 v0.4.2 rename WLAN product to FUSION, add FUSION2
+ 2005-09-10 v0.4.3 added HUAWEI E600 card and Audiovox AirCard
+ 2005-09-20 v0.4.4 increased recv buffer size: the card sometimes
+ wants to send >2000 bytes.
Work sponsored by: Sigos GmbH, Germany <info@sigos.de>
@@ -71,15 +74,21 @@ static int option_send_setup(struct usb_serial_port *port);
/* Vendor and product IDs */
#define OPTION_VENDOR_ID 0x0AF0
+#define HUAWEI_VENDOR_ID 0x12D1
+#define AUDIOVOX_VENDOR_ID 0x0F3D
#define OPTION_PRODUCT_OLD 0x5000
#define OPTION_PRODUCT_FUSION 0x6000
#define OPTION_PRODUCT_FUSION2 0x6300
+#define HUAWEI_PRODUCT_E600 0x1001
+#define AUDIOVOX_PRODUCT_AIRCARD 0x0112
static struct usb_device_id option_ids[] = {
{ USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_OLD) },
{ USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_FUSION) },
{ USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_FUSION2) },
+ { USB_DEVICE(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E600) },
+ { USB_DEVICE(AUDIOVOX_VENDOR_ID, AUDIOVOX_PRODUCT_AIRCARD) },
{ } /* Terminating entry */
};
@@ -132,7 +141,7 @@ static int debug;
#define N_IN_URB 4
#define N_OUT_URB 1
-#define IN_BUFLEN 1024
+#define IN_BUFLEN 4096
#define OUT_BUFLEN 128
struct option_port_private {
diff --git a/drivers/video/Kconfig b/drivers/video/Kconfig
index 615874e03ce..1cd942abb58 100644
--- a/drivers/video/Kconfig
+++ b/drivers/video/Kconfig
@@ -650,6 +650,7 @@ config FB_NVIDIA
select FB_CFB_FILLRECT
select FB_CFB_COPYAREA
select FB_CFB_IMAGEBLIT
+ select FB_SOFT_CURSOR
help
This driver supports graphics boards with the nVidia chips, TNT
and newer. For very old chipsets, such as the RIVA128, then use
@@ -753,7 +754,8 @@ config FB_I810_GTF
config FB_I810_I2C
bool "Enable DDC Support"
- depends on FB_I810 && I2C && FB_I810_GTF
+ depends on FB_I810 && FB_I810_GTF
+ select I2C
select I2C_ALGOBIT
help
@@ -766,6 +768,7 @@ config FB_INTEL
select FB_CFB_FILLRECT
select FB_CFB_COPYAREA
select FB_CFB_IMAGEBLIT
+ select FB_SOFT_CURSOR
help
This driver supports the on-board graphics built in to the Intel
830M/845G/852GM/855GM/865G chipsets.
diff --git a/drivers/video/aty/radeon_base.c b/drivers/video/aty/radeon_base.c
index 046b4786026..8a24a66d9ba 100644
--- a/drivers/video/aty/radeon_base.c
+++ b/drivers/video/aty/radeon_base.c
@@ -475,7 +475,7 @@ static int __devinit radeon_probe_pll_params(struct radeonfb_info *rinfo)
*/
/* Flush PCI buffers ? */
- tmp = INREG(DEVICE_ID);
+ tmp = INREG16(DEVICE_ID);
local_irq_disable();
diff --git a/drivers/video/aty/radeon_pm.c b/drivers/video/aty/radeon_pm.c
index 59a1b6f8506..097d668c4fe 100644
--- a/drivers/video/aty/radeon_pm.c
+++ b/drivers/video/aty/radeon_pm.c
@@ -62,9 +62,9 @@ static void radeon_pm_disable_dynamic_mode(struct radeonfb_info *rinfo)
OUTPLL(pllSCLK_CNTL, tmp);
return;
}
- /* RV350 (M10) */
+ /* RV350 (M10/M11) */
if (rinfo->family == CHIP_FAMILY_RV350) {
- /* for RV350/M10, no delays are required. */
+ /* for RV350/M10/M11, no delays are required. */
tmp = INPLL(pllSCLK_CNTL2);
tmp |= (SCLK_CNTL2__R300_FORCE_TCL |
SCLK_CNTL2__R300_FORCE_GA |
@@ -248,7 +248,7 @@ static void radeon_pm_enable_dynamic_mode(struct radeonfb_info *rinfo)
return;
}
- /* M10 */
+ /* M10/M11 */
if (rinfo->family == CHIP_FAMILY_RV350) {
tmp = INPLL(pllSCLK_CNTL2);
tmp &= ~(SCLK_CNTL2__R300_FORCE_TCL |
@@ -1155,7 +1155,7 @@ static void radeon_pm_full_reset_sdram(struct radeonfb_info *rinfo)
OUTREG( CRTC_GEN_CNTL, (crtcGenCntl | CRTC_GEN_CNTL__CRTC_DISP_REQ_EN_B) );
OUTREG( CRTC2_GEN_CNTL, (crtcGenCntl2 | CRTC2_GEN_CNTL__CRTC2_DISP_REQ_EN_B) );
- /* This is the code for the Aluminium PowerBooks M10 */
+ /* This is the code for the Aluminium PowerBooks M10 / iBooks M11 */
if (rinfo->family == CHIP_FAMILY_RV350) {
u32 sdram_mode_reg = rinfo->save_regs[35];
static u32 default_mrtable[] =
@@ -2741,9 +2741,11 @@ void radeonfb_pm_init(struct radeonfb_info *rinfo, int dynclk)
rinfo->pm_mode |= radeon_pm_d2;
/* We can restart Jasper (M10 chip in albooks), BlueStone (7500 chip
- * in some desktop G4s), and Via (M9+ chip on iBook G4)
+ * in some desktop G4s), Via (M9+ chip on iBook G4) and
+ * Snowy (M11 chip on iBook G4 manufactured after July 2005)
*/
- if (!strcmp(rinfo->of_node->name, "ATY,JasperParent")) {
+ if (!strcmp(rinfo->of_node->name, "ATY,JasperParent") ||
+ !strcmp(rinfo->of_node->name, "ATY,SnowyParent")) {
rinfo->reinit_func = radeon_reinitialize_M10;
rinfo->pm_mode |= radeon_pm_off;
}
diff --git a/drivers/video/aty/radeonfb.h b/drivers/video/aty/radeonfb.h
index 659bc9f6224..01b8b2f7851 100644
--- a/drivers/video/aty/radeonfb.h
+++ b/drivers/video/aty/radeonfb.h
@@ -395,6 +395,8 @@ static inline void _radeon_msleep(struct radeonfb_info *rinfo, unsigned long ms)
#define INREG8(addr) readb((rinfo->mmio_base)+addr)
#define OUTREG8(addr,val) writeb(val, (rinfo->mmio_base)+addr)
+#define INREG16(addr) readw((rinfo->mmio_base)+addr)
+#define OUTREG16(addr,val) writew(val, (rinfo->mmio_base)+addr)
#define INREG(addr) readl((rinfo->mmio_base)+addr)
#define OUTREG(addr,val) writel(val, (rinfo->mmio_base)+addr)
diff --git a/drivers/video/aty/xlinit.c b/drivers/video/aty/xlinit.c
index 92643af1258..a085cbf74ec 100644
--- a/drivers/video/aty/xlinit.c
+++ b/drivers/video/aty/xlinit.c
@@ -174,7 +174,7 @@ int atyfb_xl_init(struct fb_info *info)
const struct xl_card_cfg_t * card = &card_cfg[xl_card];
struct atyfb_par *par = (struct atyfb_par *) info->par;
union aty_pll pll;
- int i, err;
+ int err;
u32 temp;
aty_st_8(CONFIG_STAT0, 0x85, par);
@@ -252,9 +252,14 @@ int atyfb_xl_init(struct fb_info *info)
aty_st_le32(0xEC, 0x00000000, par);
aty_st_le32(0xFC, 0x00000000, par);
- for (i=0; i<sizeof(lcd_tbl)/sizeof(lcd_tbl_t); i++) {
- aty_st_lcd(lcd_tbl[i].lcd_reg, lcd_tbl[i].val, par);
+#if defined (CONFIG_FB_ATY_GENERIC_LCD)
+ {
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(lcd_tbl); i++)
+ aty_st_lcd(lcd_tbl[i].lcd_reg, lcd_tbl[i].val, par);
}
+#endif
aty_st_le16(CONFIG_STAT0, 0x00A4, par);
mdelay(10);
diff --git a/drivers/video/backlight/corgi_bl.c b/drivers/video/backlight/corgi_bl.c
index 353cb3f73cf..3c72c627e65 100644
--- a/drivers/video/backlight/corgi_bl.c
+++ b/drivers/video/backlight/corgi_bl.c
@@ -19,17 +19,17 @@
#include <linux/fb.h>
#include <linux/backlight.h>
-#include <asm/arch-pxa/corgi.h>
-#include <asm/hardware/scoop.h>
+#include <asm/arch/sharpsl.h>
-#define CORGI_MAX_INTENSITY 0x3e
#define CORGI_DEFAULT_INTENSITY 0x1f
-#define CORGI_LIMIT_MASK 0x0b
+#define CORGI_LIMIT_MASK 0x0b
static int corgibl_powermode = FB_BLANK_UNBLANK;
static int current_intensity = 0;
static int corgibl_limit = 0;
+static void (*corgibl_mach_set_intensity)(int intensity);
static spinlock_t bl_lock = SPIN_LOCK_UNLOCKED;
+static struct backlight_properties corgibl_data;
static void corgibl_send_intensity(int intensity)
{
@@ -43,18 +43,10 @@ static void corgibl_send_intensity(int intensity)
intensity &= CORGI_LIMIT_MASK;
}
- /* Skip 0x20 as it will blank the display */
- if (intensity >= 0x20)
- intensity++;
-
spin_lock_irqsave(&bl_lock, flags);
- /* Bits 0-4 are accessed via the SSP interface */
- corgi_ssp_blduty_set(intensity & 0x1f);
- /* Bit 5 is via SCOOP */
- if (intensity & 0x0020)
- set_scoop_gpio(&corgiscoop_device.dev, CORGI_SCP_BACKLIGHT_CONT);
- else
- reset_scoop_gpio(&corgiscoop_device.dev, CORGI_SCP_BACKLIGHT_CONT);
+
+ corgibl_mach_set_intensity(intensity);
+
spin_unlock_irqrestore(&bl_lock, flags);
}
@@ -113,8 +105,8 @@ static int corgibl_get_power(struct backlight_device *bd)
static int corgibl_set_intensity(struct backlight_device *bd, int intensity)
{
- if (intensity > CORGI_MAX_INTENSITY)
- intensity = CORGI_MAX_INTENSITY;
+ if (intensity > corgibl_data.max_brightness)
+ intensity = corgibl_data.max_brightness;
corgibl_send_intensity(intensity);
current_intensity=intensity;
return 0;
@@ -141,7 +133,6 @@ static struct backlight_properties corgibl_data = {
.owner = THIS_MODULE,
.get_power = corgibl_get_power,
.set_power = corgibl_set_power,
- .max_brightness = CORGI_MAX_INTENSITY,
.get_brightness = corgibl_get_intensity,
.set_brightness = corgibl_set_intensity,
};
@@ -150,12 +141,18 @@ static struct backlight_device *corgi_backlight_device;
static int __init corgibl_probe(struct device *dev)
{
+ struct corgibl_machinfo *machinfo = dev->platform_data;
+
+ corgibl_data.max_brightness = machinfo->max_intensity;
+ corgibl_mach_set_intensity = machinfo->set_bl_intensity;
+
corgi_backlight_device = backlight_device_register ("corgi-bl",
NULL, &corgibl_data);
if (IS_ERR (corgi_backlight_device))
return PTR_ERR (corgi_backlight_device);
corgibl_set_intensity(NULL, CORGI_DEFAULT_INTENSITY);
+ corgibl_limit_intensity(0);
printk("Corgi Backlight Driver Initialized.\n");
return 0;
diff --git a/drivers/video/console/fbcon.c b/drivers/video/console/fbcon.c
index 2e93224d2d5..0fc8bb499c3 100644
--- a/drivers/video/console/fbcon.c
+++ b/drivers/video/console/fbcon.c
@@ -767,7 +767,7 @@ static const char *fbcon_startup(void)
const char *display_desc = "frame buffer device";
struct display *p = &fb_display[fg_console];
struct vc_data *vc = vc_cons[fg_console].d;
- struct font_desc *font = NULL;
+ const struct font_desc *font = NULL;
struct module *owner;
struct fb_info *info = NULL;
struct fbcon_ops *ops;
@@ -841,7 +841,7 @@ static const char *fbcon_startup(void)
info->var.yres);
vc->vc_font.width = font->width;
vc->vc_font.height = font->height;
- vc->vc_font.data = p->fontdata = font->data;
+ vc->vc_font.data = (void *)(p->fontdata = font->data);
vc->vc_font.charcount = 256; /* FIXME Need to support more fonts */
}
@@ -941,7 +941,7 @@ static void fbcon_init(struct vc_data *vc, int init)
fb, copy the font from that console */
t = &fb_display[svc->vc_num];
if (!vc->vc_font.data) {
- vc->vc_font.data = p->fontdata = t->fontdata;
+ vc->vc_font.data = (void *)(p->fontdata = t->fontdata);
vc->vc_font.width = (*default_mode)->vc_font.width;
vc->vc_font.height = (*default_mode)->vc_font.height;
p->userfont = t->userfont;
@@ -1188,7 +1188,7 @@ static void fbcon_set_disp(struct fb_info *info, struct fb_var_screeninfo *var,
return;
t = &fb_display[svc->vc_num];
if (!vc->vc_font.data) {
- vc->vc_font.data = p->fontdata = t->fontdata;
+ vc->vc_font.data = (void *)(p->fontdata = t->fontdata);
vc->vc_font.width = (*default_mode)->vc_font.width;
vc->vc_font.height = (*default_mode)->vc_font.height;
p->userfont = t->userfont;
@@ -1687,6 +1687,8 @@ static int fbcon_scroll(struct vc_data *vc, int t, int b, int dir,
case SM_DOWN:
if (count > vc->vc_rows) /* Maximum realistic size */
count = vc->vc_rows;
+ if (logo_shown >= 0)
+ goto redraw_down;
switch (p->scrollmode) {
case SCROLL_MOVE:
ops->bmove(vc, info, t, 0, t + count, 0,
@@ -2148,7 +2150,7 @@ static int fbcon_get_font(struct vc_data *vc, struct console_font *font)
}
static int fbcon_do_set_font(struct vc_data *vc, int w, int h,
- u8 * data, int userfont)
+ const u8 * data, int userfont)
{
struct fb_info *info = registered_fb[con2fb_map[vc->vc_num]];
struct display *p = &fb_display[vc->vc_num];
@@ -2166,7 +2168,7 @@ static int fbcon_do_set_font(struct vc_data *vc, int w, int h,
cnt = FNTCHARCNT(data);
else
cnt = 256;
- vc->vc_font.data = p->fontdata = data;
+ vc->vc_font.data = (void *)(p->fontdata = data);
if ((p->userfont = userfont))
REFCOUNT(data)++;
vc->vc_font.width = w;
@@ -2323,7 +2325,7 @@ static int fbcon_set_font(struct vc_data *vc, struct console_font *font, unsigne
tmp->vc_font.width == w &&
!memcmp(fb_display[i].fontdata, new_data, size)) {
kfree(new_data - FONT_EXTRA_WORDS * sizeof(int));
- new_data = fb_display[i].fontdata;
+ new_data = (u8 *)fb_display[i].fontdata;
break;
}
}
@@ -2333,7 +2335,7 @@ static int fbcon_set_font(struct vc_data *vc, struct console_font *font, unsigne
static int fbcon_set_def_font(struct vc_data *vc, struct console_font *font, char *name)
{
struct fb_info *info = registered_fb[con2fb_map[vc->vc_num]];
- struct font_desc *f;
+ const struct font_desc *f;
if (!name)
f = get_default_font(info->var.xres, info->var.yres);
diff --git a/drivers/video/console/fbcon.h b/drivers/video/console/fbcon.h
index 08befafe11d..0738cd62def 100644
--- a/drivers/video/console/fbcon.h
+++ b/drivers/video/console/fbcon.h
@@ -30,7 +30,7 @@ struct display {
/* Filled in by the frame buffer device */
u_short inverse; /* != 0 text black on white as default */
/* Filled in by the low-level console driver */
- u_char *fontdata;
+ const u_char *fontdata;
int userfont; /* != 0 if fontdata kmalloc()ed */
u_short scrollmode; /* Scroll Method */
short yscroll; /* Hardware scrolling */
diff --git a/drivers/video/console/font_10x18.c b/drivers/video/console/font_10x18.c
index ff0af96e4df..e6aa0eab5bb 100644
--- a/drivers/video/console/font_10x18.c
+++ b/drivers/video/console/font_10x18.c
@@ -7,7 +7,7 @@
#define FONTDATAMAX 9216
-static unsigned char fontdata_10x18[FONTDATAMAX] = {
+static const unsigned char fontdata_10x18[FONTDATAMAX] = {
/* 0 0x00 '^@' */
0x00, 0x00, /* 0000000000 */
@@ -5132,7 +5132,7 @@ static unsigned char fontdata_10x18[FONTDATAMAX] = {
};
-struct font_desc font_10x18 = {
+const struct font_desc font_10x18 = {
FONT10x18_IDX,
"10x18",
10,
diff --git a/drivers/video/console/font_6x11.c b/drivers/video/console/font_6x11.c
index c52f1294044..89976cd9749 100644
--- a/drivers/video/console/font_6x11.c
+++ b/drivers/video/console/font_6x11.c
@@ -8,7 +8,7 @@
#define FONTDATAMAX (11*256)
-static unsigned char fontdata_6x11[FONTDATAMAX] = {
+static const unsigned char fontdata_6x11[FONTDATAMAX] = {
/* 0 0x00 '^@' */
0x00, /* 00000000 */
@@ -3341,7 +3341,7 @@ static unsigned char fontdata_6x11[FONTDATAMAX] = {
};
-struct font_desc font_vga_6x11 = {
+const struct font_desc font_vga_6x11 = {
VGA6x11_IDX,
"ProFont6x11",
6,
diff --git a/drivers/video/console/font_7x14.c b/drivers/video/console/font_7x14.c
index 1fa7fcf2ff7..bbf11664739 100644
--- a/drivers/video/console/font_7x14.c
+++ b/drivers/video/console/font_7x14.c
@@ -7,7 +7,7 @@
#define FONTDATAMAX 3584
-static unsigned char fontdata_7x14[FONTDATAMAX] = {
+static const unsigned char fontdata_7x14[FONTDATAMAX] = {
/* 0 0x00 '^@' */
0x00, /* 0000000 */
@@ -4108,7 +4108,7 @@ static unsigned char fontdata_7x14[FONTDATAMAX] = {
};
-struct font_desc font_7x14 = {
+const struct font_desc font_7x14 = {
FONT7x14_IDX,
"7x14",
7,
diff --git a/drivers/video/console/font_8x16.c b/drivers/video/console/font_8x16.c
index e6f8dbaa122..74fe86f28ff 100644
--- a/drivers/video/console/font_8x16.c
+++ b/drivers/video/console/font_8x16.c
@@ -8,7 +8,7 @@
#define FONTDATAMAX 4096
-static unsigned char fontdata_8x16[FONTDATAMAX] = {
+static const unsigned char fontdata_8x16[FONTDATAMAX] = {
/* 0 0x00 '^@' */
0x00, /* 00000000 */
@@ -4621,7 +4621,7 @@ static unsigned char fontdata_8x16[FONTDATAMAX] = {
};
-struct font_desc font_vga_8x16 = {
+const struct font_desc font_vga_8x16 = {
VGA8x16_IDX,
"VGA8x16",
8,
diff --git a/drivers/video/console/font_8x8.c b/drivers/video/console/font_8x8.c
index 57fbe266a6b..26199f8ee90 100644
--- a/drivers/video/console/font_8x8.c
+++ b/drivers/video/console/font_8x8.c
@@ -8,7 +8,7 @@
#define FONTDATAMAX 2048
-static unsigned char fontdata_8x8[FONTDATAMAX] = {
+static const unsigned char fontdata_8x8[FONTDATAMAX] = {
/* 0 0x00 '^@' */
0x00, /* 00000000 */
@@ -2573,7 +2573,7 @@ static unsigned char fontdata_8x8[FONTDATAMAX] = {
};
-struct font_desc font_vga_8x8 = {
+const struct font_desc font_vga_8x8 = {
VGA8x8_IDX,
"VGA8x8",
8,
diff --git a/drivers/video/console/font_acorn_8x8.c b/drivers/video/console/font_acorn_8x8.c
index d565505e306..2d2e39632e2 100644
--- a/drivers/video/console/font_acorn_8x8.c
+++ b/drivers/video/console/font_acorn_8x8.c
@@ -3,7 +3,7 @@
#include <linux/config.h>
#include <linux/font.h>
-static unsigned char acorndata_8x8[] = {
+static const unsigned char acorndata_8x8[] = {
/* 00 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* ^@ */
/* 01 */ 0x7e, 0x81, 0xa5, 0x81, 0xbd, 0x99, 0x81, 0x7e, /* ^A */
/* 02 */ 0x7e, 0xff, 0xbd, 0xff, 0xc3, 0xe7, 0xff, 0x7e, /* ^B */
@@ -262,7 +262,7 @@ static unsigned char acorndata_8x8[] = {
/* FF */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
};
-struct font_desc font_acorn_8x8 = {
+const struct font_desc font_acorn_8x8 = {
ACORN8x8_IDX,
"Acorn8x8",
8,
diff --git a/drivers/video/console/font_mini_4x6.c b/drivers/video/console/font_mini_4x6.c
index 593b95500a0..d818234fdf1 100644
--- a/drivers/video/console/font_mini_4x6.c
+++ b/drivers/video/console/font_mini_4x6.c
@@ -43,7 +43,7 @@ __END__;
#define FONTDATAMAX 1536
-static unsigned char fontdata_mini_4x6[FONTDATAMAX] = {
+static const unsigned char fontdata_mini_4x6[FONTDATAMAX] = {
/*{*/
/* Char 0: ' ' */
@@ -2147,7 +2147,7 @@ static unsigned char fontdata_mini_4x6[FONTDATAMAX] = {
/*}*/
};
-struct font_desc font_mini_4x6 = {
+const struct font_desc font_mini_4x6 = {
MINI4x6_IDX,
"MINI4x6",
4,
diff --git a/drivers/video/console/font_pearl_8x8.c b/drivers/video/console/font_pearl_8x8.c
index 5fa95f11881..e646c88f55c 100644
--- a/drivers/video/console/font_pearl_8x8.c
+++ b/drivers/video/console/font_pearl_8x8.c
@@ -13,7 +13,7 @@
#define FONTDATAMAX 2048
-static unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
+static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
/* 0 0x00 '^@' */
0x00, /* 00000000 */
@@ -2577,7 +2577,7 @@ static unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
};
-struct font_desc font_pearl_8x8 = {
+const struct font_desc font_pearl_8x8 = {
PEARL8x8_IDX,
"PEARL8x8",
8,
diff --git a/drivers/video/console/font_sun12x22.c b/drivers/video/console/font_sun12x22.c
index c7bd967ea10..ab5eb93407b 100644
--- a/drivers/video/console/font_sun12x22.c
+++ b/drivers/video/console/font_sun12x22.c
@@ -2,7 +2,7 @@
#define FONTDATAMAX 11264
-static unsigned char fontdata_sun12x22[FONTDATAMAX] = {
+static const unsigned char fontdata_sun12x22[FONTDATAMAX] = {
/* 0 0x00 '^@' */
0x00, 0x00, /* 000000000000 */
@@ -6151,7 +6151,7 @@ static unsigned char fontdata_sun12x22[FONTDATAMAX] = {
};
-struct font_desc font_sun_12x22 = {
+const struct font_desc font_sun_12x22 = {
SUN12x22_IDX,
"SUN12x22",
12,
diff --git a/drivers/video/console/font_sun8x16.c b/drivers/video/console/font_sun8x16.c
index 2af3ab35465..41f910f5529 100644
--- a/drivers/video/console/font_sun8x16.c
+++ b/drivers/video/console/font_sun8x16.c
@@ -2,7 +2,7 @@
#define FONTDATAMAX 4096
-static unsigned char fontdata_sun8x16[FONTDATAMAX] = {
+static const unsigned char fontdata_sun8x16[FONTDATAMAX] = {
/* */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
/* */ 0x00,0x00,0x7e,0x81,0xa5,0x81,0x81,0xbd,0x99,0x81,0x81,0x7e,0x00,0x00,0x00,0x00,
/* */ 0x00,0x00,0x7e,0xff,0xdb,0xff,0xff,0xc3,0xe7,0xff,0xff,0x7e,0x00,0x00,0x00,0x00,
@@ -261,7 +261,7 @@ static unsigned char fontdata_sun8x16[FONTDATAMAX] = {
/* */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
};
-struct font_desc font_sun_8x16 = {
+const struct font_desc font_sun_8x16 = {
SUN8x16_IDX,
"SUN8x16",
8,
diff --git a/drivers/video/console/fonts.c b/drivers/video/console/fonts.c
index e79b2970264..4fd07d9eca0 100644
--- a/drivers/video/console/fonts.c
+++ b/drivers/video/console/fonts.c
@@ -23,7 +23,7 @@
#define NO_FONTS
-static struct font_desc *fonts[] = {
+static const struct font_desc *fonts[] = {
#ifdef CONFIG_FONT_8x8
#undef NO_FONTS
&font_vga_8x8,
@@ -84,7 +84,7 @@ static struct font_desc *fonts[] = {
*
*/
-struct font_desc *find_font(char *name)
+const struct font_desc *find_font(const char *name)
{
unsigned int i;
@@ -108,10 +108,10 @@ struct font_desc *find_font(char *name)
*
*/
-struct font_desc *get_default_font(int xres, int yres)
+const struct font_desc *get_default_font(int xres, int yres)
{
int i, c, cc;
- struct font_desc *f, *g;
+ const struct font_desc *f, *g;
g = NULL;
cc = -10000;
@@ -138,7 +138,6 @@ struct font_desc *get_default_font(int xres, int yres)
return g;
}
-EXPORT_SYMBOL(fonts);
EXPORT_SYMBOL(find_font);
EXPORT_SYMBOL(get_default_font);
diff --git a/drivers/video/console/vgacon.c b/drivers/video/console/vgacon.c
index 0705cd74141..6ef6f7760e4 100644
--- a/drivers/video/console/vgacon.c
+++ b/drivers/video/console/vgacon.c
@@ -1020,7 +1020,9 @@ static int vgacon_font_get(struct vc_data *c, struct console_font *font)
static int vgacon_resize(struct vc_data *c, unsigned int width,
unsigned int height)
{
- if (width % 2 || width > ORIG_VIDEO_COLS || height > ORIG_VIDEO_LINES)
+ if (width % 2 || width > ORIG_VIDEO_COLS ||
+ height > (ORIG_VIDEO_LINES * vga_default_font_height)/
+ c->vc_font.height)
return -EINVAL;
if (CON_IS_VISIBLE(c) && !vga_is_gfx) /* who knows */
diff --git a/drivers/video/cyblafb.c b/drivers/video/cyblafb.c
index ae2762cb560..6992100a508 100644
--- a/drivers/video/cyblafb.c
+++ b/drivers/video/cyblafb.c
@@ -410,20 +410,21 @@ static void cyblafb_imageblit(struct fb_info *info,
out32(GE0C,point(image->dx+image->width-1,image->dy+image->height-1));
while(index < index_end) {
+ const char *p = image->data + index;
for(i=0;i<width_dds;i++) {
- out32(GE9C,*((u32*) ((u32)image->data + index)));
+ out32(GE9C,*(u32*)p);
+ p+=4;
index+=4;
}
switch(width_dbs) {
case 0: break;
- case 8: out32(GE9C,*((u8*)((u32)image->data+index)));
+ case 8: out32(GE9C,*(u8*)p);
index+=1;
break;
- case 16: out32(GE9C,*((u16*)((u32)image->data+index)));
+ case 16: out32(GE9C,*(u16*)p);
index+=2;
break;
- case 24: out32(GE9C,(u32)(*((u16*)((u32)image->data+index))) |
- (u32)(*((u8*)((u32)image->data+index+2)))<<16);
+ case 24: out32(GE9C,*(u16*)p | *(u8*)(p+2)<<16);
index+=3;
break;
}
diff --git a/drivers/video/fbcvt.c b/drivers/video/fbcvt.c
index cfa61b512de..0b6af00d197 100644
--- a/drivers/video/fbcvt.c
+++ b/drivers/video/fbcvt.c
@@ -272,11 +272,11 @@ static void fb_cvt_convert_to_mode(struct fb_cvt_data *cvt,
{
mode->refresh = cvt->f_refresh;
mode->pixclock = KHZ2PICOS(cvt->pixclock/1000);
- mode->left_margin = cvt->h_front_porch;
- mode->right_margin = cvt->h_back_porch;
+ mode->left_margin = cvt->h_back_porch;
+ mode->right_margin = cvt->h_front_porch;
mode->hsync_len = cvt->hsync;
- mode->upper_margin = cvt->v_front_porch;
- mode->lower_margin = cvt->v_back_porch;
+ mode->upper_margin = cvt->v_back_porch;
+ mode->lower_margin = cvt->v_front_porch;
mode->vsync_len = cvt->vsync;
mode->sync &= ~(FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT);
diff --git a/drivers/video/i810/i810-i2c.c b/drivers/video/i810/i810-i2c.c
index fda53aac1fc..689d2586366 100644
--- a/drivers/video/i810/i810-i2c.c
+++ b/drivers/video/i810/i810-i2c.c
@@ -44,7 +44,7 @@ static void i810i2c_setscl(void *data, int state)
{
struct i810fb_i2c_chan *chan = (struct i810fb_i2c_chan *)data;
struct i810fb_par *par = chan->par;
- u8 *mmio = par->mmio_start_virtual;
+ u8 __iomem *mmio = par->mmio_start_virtual;
i810_writel(mmio, GPIOB, (state ? SCL_VAL_OUT : 0) | SCL_DIR |
SCL_DIR_MASK | SCL_VAL_MASK);
@@ -55,7 +55,7 @@ static void i810i2c_setsda(void *data, int state)
{
struct i810fb_i2c_chan *chan = (struct i810fb_i2c_chan *)data;
struct i810fb_par *par = chan->par;
- u8 *mmio = par->mmio_start_virtual;
+ u8 __iomem *mmio = par->mmio_start_virtual;
i810_writel(mmio, GPIOB, (state ? SDA_VAL_OUT : 0) | SDA_DIR |
SDA_DIR_MASK | SDA_VAL_MASK);
@@ -66,7 +66,7 @@ static int i810i2c_getscl(void *data)
{
struct i810fb_i2c_chan *chan = (struct i810fb_i2c_chan *)data;
struct i810fb_par *par = chan->par;
- u8 *mmio = par->mmio_start_virtual;
+ u8 __iomem *mmio = par->mmio_start_virtual;
i810_writel(mmio, GPIOB, SCL_DIR_MASK);
i810_writel(mmio, GPIOB, 0);
@@ -77,7 +77,7 @@ static int i810i2c_getsda(void *data)
{
struct i810fb_i2c_chan *chan = (struct i810fb_i2c_chan *)data;
struct i810fb_par *par = chan->par;
- u8 *mmio = par->mmio_start_virtual;
+ u8 __iomem *mmio = par->mmio_start_virtual;
i810_writel(mmio, GPIOB, SDA_DIR_MASK);
i810_writel(mmio, GPIOB, 0);
@@ -88,7 +88,7 @@ static void i810ddc_setscl(void *data, int state)
{
struct i810fb_i2c_chan *chan = (struct i810fb_i2c_chan *)data;
struct i810fb_par *par = chan->par;
- u8 *mmio = par->mmio_start_virtual;
+ u8 __iomem *mmio = par->mmio_start_virtual;
i810_writel(mmio, GPIOA, (state ? SCL_VAL_OUT : 0) | SCL_DIR |
SCL_DIR_MASK | SCL_VAL_MASK);
@@ -99,7 +99,7 @@ static void i810ddc_setsda(void *data, int state)
{
struct i810fb_i2c_chan *chan = (struct i810fb_i2c_chan *)data;
struct i810fb_par *par = chan->par;
- u8 *mmio = par->mmio_start_virtual;
+ u8 __iomem *mmio = par->mmio_start_virtual;
i810_writel(mmio, GPIOA, (state ? SDA_VAL_OUT : 0) | SDA_DIR |
SDA_DIR_MASK | SDA_VAL_MASK);
@@ -110,7 +110,7 @@ static int i810ddc_getscl(void *data)
{
struct i810fb_i2c_chan *chan = (struct i810fb_i2c_chan *)data;
struct i810fb_par *par = chan->par;
- u8 *mmio = par->mmio_start_virtual;
+ u8 __iomem *mmio = par->mmio_start_virtual;
i810_writel(mmio, GPIOA, SCL_DIR_MASK);
i810_writel(mmio, GPIOA, 0);
@@ -121,7 +121,7 @@ static int i810ddc_getsda(void *data)
{
struct i810fb_i2c_chan *chan = (struct i810fb_i2c_chan *)data;
struct i810fb_par *par = chan->par;
- u8 *mmio = par->mmio_start_virtual;
+ u8 __iomem *mmio = par->mmio_start_virtual;
i810_writel(mmio, GPIOA, SDA_DIR_MASK);
i810_writel(mmio, GPIOA, 0);
diff --git a/drivers/video/imxfb.c b/drivers/video/imxfb.c
index cabd53cec99..1d54d3d6960 100644
--- a/drivers/video/imxfb.c
+++ b/drivers/video/imxfb.c
@@ -36,7 +36,6 @@
#include <asm/hardware.h>
#include <asm/io.h>
-#include <asm/mach-types.h>
#include <asm/uaccess.h>
#include <asm/arch/imxfb.h>
@@ -425,7 +424,7 @@ static void imxfb_setup_gpio(struct imxfb_info *fbi)
* Power management hooks. Note that we won't be called from IRQ context,
* unlike the blank functions above, so we may sleep.
*/
-static int imxfb_suspend(struct device *dev, u32 state, u32 level)
+static int imxfb_suspend(struct device *dev, pm_message_t state, u32 level)
{
struct imxfb_info *fbi = dev_get_drvdata(dev);
pr_debug("%s\n",__FUNCTION__);
diff --git a/drivers/video/intelfb/intelfbdrv.c b/drivers/video/intelfb/intelfbdrv.c
index bf62e6ed038..80a09344f1a 100644
--- a/drivers/video/intelfb/intelfbdrv.c
+++ b/drivers/video/intelfb/intelfbdrv.c
@@ -226,7 +226,7 @@ MODULE_DEVICE_TABLE(pci, intelfb_pci_table);
static int accel = 1;
static int vram = 4;
-static int hwcursor = 1;
+static int hwcursor = 0;
static int mtrr = 1;
static int fixed = 0;
static int noinit = 0;
@@ -609,15 +609,9 @@ intelfb_pci_register(struct pci_dev *pdev, const struct pci_device_id *ent)
dinfo->accel = 0;
}
- if (MB(voffset) < stolen_size)
- offset = (stolen_size >> 12);
- else
- offset = ROUND_UP_TO_PAGE(MB(voffset))/GTT_PAGE_SIZE;
-
/* Framebuffer parameters - Use all the stolen memory if >= vram */
- if (ROUND_UP_TO_PAGE(stolen_size) >= ((offset << 12) + MB(vram))) {
+ if (ROUND_UP_TO_PAGE(stolen_size) >= MB(vram)) {
dinfo->fb.size = ROUND_UP_TO_PAGE(stolen_size);
- dinfo->fb.offset = 0;
dinfo->fbmem_gart = 0;
} else {
dinfo->fb.size = MB(vram);
@@ -648,6 +642,11 @@ intelfb_pci_register(struct pci_dev *pdev, const struct pci_device_id *ent)
return -ENODEV;
}
+ if (MB(voffset) < stolen_size)
+ offset = (stolen_size >> 12);
+ else
+ offset = ROUND_UP_TO_PAGE(MB(voffset))/GTT_PAGE_SIZE;
+
/* set the mem offsets - set them after the already used pages */
if (dinfo->accel) {
dinfo->ring.offset = offset + gtt_info.current_memory;
@@ -662,10 +661,11 @@ intelfb_pci_register(struct pci_dev *pdev, const struct pci_device_id *ent)
+ (dinfo->cursor.size >> 12);
}
+ /* Allocate memories (which aren't stolen) */
/* Map the fb and MMIO regions */
/* ioremap only up to the end of used aperture */
dinfo->aperture.virtual = (u8 __iomem *)ioremap_nocache
- (dinfo->aperture.physical, (dinfo->fb.offset << 12)
+ (dinfo->aperture.physical, ((offset + dinfo->fb.offset) << 12)
+ dinfo->fb.size);
if (!dinfo->aperture.virtual) {
ERR_MSG("Cannot remap FB region.\n");
@@ -682,7 +682,6 @@ intelfb_pci_register(struct pci_dev *pdev, const struct pci_device_id *ent)
return -ENODEV;
}
- /* Allocate memories (which aren't stolen) */
if (dinfo->accel) {
if (!(dinfo->gtt_ring_mem =
agp_allocate_memory(bridge, dinfo->ring.size >> 12,
@@ -1484,7 +1483,7 @@ intelfb_cursor(struct fb_info *info, struct fb_cursor *cursor)
#endif
if (!dinfo->hwcursor)
- return -ENXIO;
+ return soft_cursor(info, cursor);
intelfbhw_cursor_hide(dinfo);
diff --git a/drivers/video/matrox/matroxfb_base.c b/drivers/video/matrox/matroxfb_base.c
index 98e00d8601e..e02da41f1b2 100644
--- a/drivers/video/matrox/matroxfb_base.c
+++ b/drivers/video/matrox/matroxfb_base.c
@@ -1285,7 +1285,7 @@ static int matroxfb_getmemory(WPMINFO unsigned int maxSize, unsigned int *realSi
vaddr_t vm;
unsigned int offs;
unsigned int offs2;
- unsigned char store;
+ unsigned char store, orig;
unsigned char bytes[32];
unsigned char* tmp;
@@ -1298,7 +1298,8 @@ static int matroxfb_getmemory(WPMINFO unsigned int maxSize, unsigned int *realSi
if (maxSize > 0x2000000) maxSize = 0x2000000;
mga_outb(M_EXTVGA_INDEX, 0x03);
- mga_outb(M_EXTVGA_DATA, mga_inb(M_EXTVGA_DATA) | 0x80);
+ orig = mga_inb(M_EXTVGA_DATA);
+ mga_outb(M_EXTVGA_DATA, orig | 0x80);
store = mga_readb(vm, 0x1234);
tmp = bytes;
@@ -1323,7 +1324,7 @@ static int matroxfb_getmemory(WPMINFO unsigned int maxSize, unsigned int *realSi
mga_writeb(vm, 0x1234, store);
mga_outb(M_EXTVGA_INDEX, 0x03);
- mga_outb(M_EXTVGA_DATA, mga_inb(M_EXTVGA_DATA) & ~0x80);
+ mga_outb(M_EXTVGA_DATA, orig);
*realSize = offs - 0x100000;
#ifdef CONFIG_FB_MATROX_MILLENIUM
@@ -1858,6 +1859,8 @@ static int initMatrox2(WPMINFO struct board* b){
to yres_virtual * xres_virtual < 2^32 */
}
matroxfb_init_fix(PMINFO2);
+ ACCESS_FBINFO(fbcon.screen_base) = vaddr_va(ACCESS_FBINFO(video.vbase));
+ matroxfb_update_fix(PMINFO2);
/* Normalize values (namely yres_virtual) */
matroxfb_check_var(&vesafb_defined, &ACCESS_FBINFO(fbcon));
/* And put it into "current" var. Do NOT program hardware yet, or we'll not take over
@@ -2010,11 +2013,11 @@ static int matroxfb_probe(struct pci_dev* pdev, const struct pci_device_id* dumm
}
/* not match... */
if (!b->vendor)
- return -1;
+ return -ENODEV;
if (dev > 0) {
/* not requested one... */
dev--;
- return -1;
+ return -ENODEV;
}
pci_read_config_dword(pdev, PCI_COMMAND, &cmd);
if (pci_enable_device(pdev)) {
diff --git a/drivers/video/nvidia/nv_i2c.c b/drivers/video/nvidia/nv_i2c.c
index ace484fa61c..12f2884d3f0 100644
--- a/drivers/video/nvidia/nv_i2c.c
+++ b/drivers/video/nvidia/nv_i2c.c
@@ -209,10 +209,13 @@ int nvidia_probe_i2c_connector(struct fb_info *info, int conn, u8 **out_edid)
if (!edid && conn == 1) {
/* try to get from firmware */
- edid = kmalloc(EDID_LENGTH, GFP_KERNEL);
- if (edid)
- memcpy(edid, fb_firmware_edid(info->device),
- EDID_LENGTH);
+ const u8 *e = fb_firmware_edid(info->device);
+
+ if (e != NULL) {
+ edid = kmalloc(EDID_LENGTH, GFP_KERNEL);
+ if (edid)
+ memcpy(edid, e, EDID_LENGTH);
+ }
}
if (out_edid)
diff --git a/drivers/video/nvidia/nvidia.c b/drivers/video/nvidia/nvidia.c
index 3620de0f252..a7f020ada63 100644
--- a/drivers/video/nvidia/nvidia.c
+++ b/drivers/video/nvidia/nvidia.c
@@ -893,7 +893,7 @@ static int nvidiafb_cursor(struct fb_info *info, struct fb_cursor *cursor)
int i, set = cursor->set;
u16 fg, bg;
- if (!hwcur || cursor->image.width > MAX_CURS || cursor->image.height > MAX_CURS)
+ if (cursor->image.width > MAX_CURS || cursor->image.height > MAX_CURS)
return -ENXIO;
NVShowHideCursor(par, 0);
@@ -1356,6 +1356,9 @@ static int __devinit nvidia_set_fbinfo(struct fb_info *info)
info->pixmap.size = 8 * 1024;
info->pixmap.flags = FB_PIXMAP_SYSTEM;
+ if (!hwcur)
+ info->fbops->fb_cursor = soft_cursor;
+
info->var.accel_flags = (!noaccel);
switch (par->Architecture) {
diff --git a/drivers/video/pm3fb.c b/drivers/video/pm3fb.c
index e0dad948467..2e11b601c48 100644
--- a/drivers/video/pm3fb.c
+++ b/drivers/video/pm3fb.c
@@ -67,6 +67,7 @@
#include <linux/init.h>
#include <linux/pci.h>
#include <linux/ioport.h>
+#include <linux/ctype.h>
#include <video/fbcon.h>
#include <video/fbcon-mfb.h>
@@ -2594,7 +2595,7 @@ static char *pm3fb_boardnum_setup(char *options, unsigned long *bn)
{
char *next;
- if (!(CHAR_IS_NUM(options[0]))) {
+ if (!(isdigit(options[0]))) {
(*bn) = 0;
return (options);
}
diff --git a/drivers/video/pxafb.c b/drivers/video/pxafb.c
index 34d4dcc0320..194eed0a238 100644
--- a/drivers/video/pxafb.c
+++ b/drivers/video/pxafb.c
@@ -260,9 +260,9 @@ static int pxafb_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
}
#ifdef CONFIG_CPU_FREQ
- DPRINTK("dma period = %d ps, clock = %d kHz\n",
- pxafb_display_dma_period(var),
- get_clk_frequency_khz(0));
+ pr_debug("pxafb: dma period = %d ps, clock = %d kHz\n",
+ pxafb_display_dma_period(var),
+ get_clk_frequency_khz(0));
#endif
return 0;
@@ -270,7 +270,7 @@ static int pxafb_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
static inline void pxafb_set_truecolor(u_int is_true_color)
{
- DPRINTK("true_color = %d\n", is_true_color);
+ pr_debug("pxafb: true_color = %d\n", is_true_color);
// do your machine-specific setup if needed
}
@@ -284,7 +284,7 @@ static int pxafb_set_par(struct fb_info *info)
struct fb_var_screeninfo *var = &info->var;
unsigned long palette_mem_size;
- DPRINTK("set_par\n");
+ pr_debug("pxafb: set_par\n");
if (var->bits_per_pixel == 16)
fbi->fb.fix.visual = FB_VISUAL_TRUECOLOR;
@@ -308,7 +308,7 @@ static int pxafb_set_par(struct fb_info *info)
palette_mem_size = fbi->palette_size * sizeof(u16);
- DPRINTK("palette_mem_size = 0x%08lx\n", (u_long) palette_mem_size);
+ pr_debug("pxafb: palette_mem_size = 0x%08lx\n", palette_mem_size);
fbi->palette_cpu = (u16 *)(fbi->map_cpu + PAGE_SIZE - palette_mem_size);
fbi->palette_dma = fbi->map_dma + PAGE_SIZE - palette_mem_size;
@@ -369,7 +369,7 @@ static int pxafb_blank(int blank, struct fb_info *info)
struct pxafb_info *fbi = (struct pxafb_info *)info;
int i;
- DPRINTK("pxafb_blank: blank=%d\n", blank);
+ pr_debug("pxafb: blank=%d\n", blank);
switch (blank) {
case FB_BLANK_POWERDOWN:
@@ -508,15 +508,15 @@ static int pxafb_activate_var(struct fb_var_screeninfo *var, struct pxafb_info *
u_long flags;
u_int lines_per_panel, pcd = get_pcd(var->pixclock);
- DPRINTK("Configuring PXA LCD\n");
+ pr_debug("pxafb: Configuring PXA LCD\n");
- DPRINTK("var: xres=%d hslen=%d lm=%d rm=%d\n",
- var->xres, var->hsync_len,
- var->left_margin, var->right_margin);
- DPRINTK("var: yres=%d vslen=%d um=%d bm=%d\n",
- var->yres, var->vsync_len,
- var->upper_margin, var->lower_margin);
- DPRINTK("var: pixclock=%d pcd=%d\n", var->pixclock, pcd);
+ pr_debug("var: xres=%d hslen=%d lm=%d rm=%d\n",
+ var->xres, var->hsync_len,
+ var->left_margin, var->right_margin);
+ pr_debug("var: yres=%d vslen=%d um=%d bm=%d\n",
+ var->yres, var->vsync_len,
+ var->upper_margin, var->lower_margin);
+ pr_debug("var: pixclock=%d pcd=%d\n", var->pixclock, pcd);
#if DEBUG_VAR
if (var->xres < 16 || var->xres > 1024)
@@ -589,10 +589,10 @@ static int pxafb_activate_var(struct fb_var_screeninfo *var, struct pxafb_info *
if (pcd)
new_regs.lccr3 |= LCCR3_PixClkDiv(pcd);
- DPRINTK("nlccr0 = 0x%08x\n", new_regs.lccr0);
- DPRINTK("nlccr1 = 0x%08x\n", new_regs.lccr1);
- DPRINTK("nlccr2 = 0x%08x\n", new_regs.lccr2);
- DPRINTK("nlccr3 = 0x%08x\n", new_regs.lccr3);
+ pr_debug("nlccr0 = 0x%08x\n", new_regs.lccr0);
+ pr_debug("nlccr1 = 0x%08x\n", new_regs.lccr1);
+ pr_debug("nlccr2 = 0x%08x\n", new_regs.lccr2);
+ pr_debug("nlccr3 = 0x%08x\n", new_regs.lccr3);
/* Update shadow copy atomically */
local_irq_save(flags);
@@ -637,24 +637,24 @@ static int pxafb_activate_var(struct fb_var_screeninfo *var, struct pxafb_info *
}
#if 0
- DPRINTK("fbi->dmadesc_fblow_cpu = 0x%p\n", fbi->dmadesc_fblow_cpu);
- DPRINTK("fbi->dmadesc_fbhigh_cpu = 0x%p\n", fbi->dmadesc_fbhigh_cpu);
- DPRINTK("fbi->dmadesc_palette_cpu = 0x%p\n", fbi->dmadesc_palette_cpu);
- DPRINTK("fbi->dmadesc_fblow_dma = 0x%x\n", fbi->dmadesc_fblow_dma);
- DPRINTK("fbi->dmadesc_fbhigh_dma = 0x%x\n", fbi->dmadesc_fbhigh_dma);
- DPRINTK("fbi->dmadesc_palette_dma = 0x%x\n", fbi->dmadesc_palette_dma);
-
- DPRINTK("fbi->dmadesc_fblow_cpu->fdadr = 0x%x\n", fbi->dmadesc_fblow_cpu->fdadr);
- DPRINTK("fbi->dmadesc_fbhigh_cpu->fdadr = 0x%x\n", fbi->dmadesc_fbhigh_cpu->fdadr);
- DPRINTK("fbi->dmadesc_palette_cpu->fdadr = 0x%x\n", fbi->dmadesc_palette_cpu->fdadr);
-
- DPRINTK("fbi->dmadesc_fblow_cpu->fsadr = 0x%x\n", fbi->dmadesc_fblow_cpu->fsadr);
- DPRINTK("fbi->dmadesc_fbhigh_cpu->fsadr = 0x%x\n", fbi->dmadesc_fbhigh_cpu->fsadr);
- DPRINTK("fbi->dmadesc_palette_cpu->fsadr = 0x%x\n", fbi->dmadesc_palette_cpu->fsadr);
-
- DPRINTK("fbi->dmadesc_fblow_cpu->ldcmd = 0x%x\n", fbi->dmadesc_fblow_cpu->ldcmd);
- DPRINTK("fbi->dmadesc_fbhigh_cpu->ldcmd = 0x%x\n", fbi->dmadesc_fbhigh_cpu->ldcmd);
- DPRINTK("fbi->dmadesc_palette_cpu->ldcmd = 0x%x\n", fbi->dmadesc_palette_cpu->ldcmd);
+ pr_debug("fbi->dmadesc_fblow_cpu = 0x%p\n", fbi->dmadesc_fblow_cpu);
+ pr_debug("fbi->dmadesc_fbhigh_cpu = 0x%p\n", fbi->dmadesc_fbhigh_cpu);
+ pr_debug("fbi->dmadesc_palette_cpu = 0x%p\n", fbi->dmadesc_palette_cpu);
+ pr_debug("fbi->dmadesc_fblow_dma = 0x%x\n", fbi->dmadesc_fblow_dma);
+ pr_debug("fbi->dmadesc_fbhigh_dma = 0x%x\n", fbi->dmadesc_fbhigh_dma);
+ pr_debug("fbi->dmadesc_palette_dma = 0x%x\n", fbi->dmadesc_palette_dma);
+
+ pr_debug("fbi->dmadesc_fblow_cpu->fdadr = 0x%x\n", fbi->dmadesc_fblow_cpu->fdadr);
+ pr_debug("fbi->dmadesc_fbhigh_cpu->fdadr = 0x%x\n", fbi->dmadesc_fbhigh_cpu->fdadr);
+ pr_debug("fbi->dmadesc_palette_cpu->fdadr = 0x%x\n", fbi->dmadesc_palette_cpu->fdadr);
+
+ pr_debug("fbi->dmadesc_fblow_cpu->fsadr = 0x%x\n", fbi->dmadesc_fblow_cpu->fsadr);
+ pr_debug("fbi->dmadesc_fbhigh_cpu->fsadr = 0x%x\n", fbi->dmadesc_fbhigh_cpu->fsadr);
+ pr_debug("fbi->dmadesc_palette_cpu->fsadr = 0x%x\n", fbi->dmadesc_palette_cpu->fsadr);
+
+ pr_debug("fbi->dmadesc_fblow_cpu->ldcmd = 0x%x\n", fbi->dmadesc_fblow_cpu->ldcmd);
+ pr_debug("fbi->dmadesc_fbhigh_cpu->ldcmd = 0x%x\n", fbi->dmadesc_fbhigh_cpu->ldcmd);
+ pr_debug("fbi->dmadesc_palette_cpu->ldcmd = 0x%x\n", fbi->dmadesc_palette_cpu->ldcmd);
#endif
fbi->reg_lccr0 = new_regs.lccr0;
@@ -684,7 +684,7 @@ static int pxafb_activate_var(struct fb_var_screeninfo *var, struct pxafb_info *
*/
static inline void __pxafb_backlight_power(struct pxafb_info *fbi, int on)
{
- DPRINTK("backlight o%s\n", on ? "n" : "ff");
+ pr_debug("pxafb: backlight o%s\n", on ? "n" : "ff");
if (pxafb_backlight_power)
pxafb_backlight_power(on);
@@ -692,7 +692,7 @@ static inline void __pxafb_backlight_power(struct pxafb_info *fbi, int on)
static inline void __pxafb_lcd_power(struct pxafb_info *fbi, int on)
{
- DPRINTK("LCD power o%s\n", on ? "n" : "ff");
+ pr_debug("pxafb: LCD power o%s\n", on ? "n" : "ff");
if (pxafb_lcd_power)
pxafb_lcd_power(on);
@@ -740,13 +740,13 @@ static void pxafb_setup_gpio(struct pxafb_info *fbi)
static void pxafb_enable_controller(struct pxafb_info *fbi)
{
- DPRINTK("Enabling LCD controller\n");
- DPRINTK("fdadr0 0x%08x\n", (unsigned int) fbi->fdadr0);
- DPRINTK("fdadr1 0x%08x\n", (unsigned int) fbi->fdadr1);
- DPRINTK("reg_lccr0 0x%08x\n", (unsigned int) fbi->reg_lccr0);
- DPRINTK("reg_lccr1 0x%08x\n", (unsigned int) fbi->reg_lccr1);
- DPRINTK("reg_lccr2 0x%08x\n", (unsigned int) fbi->reg_lccr2);
- DPRINTK("reg_lccr3 0x%08x\n", (unsigned int) fbi->reg_lccr3);
+ pr_debug("pxafb: Enabling LCD controller\n");
+ pr_debug("fdadr0 0x%08x\n", (unsigned int) fbi->fdadr0);
+ pr_debug("fdadr1 0x%08x\n", (unsigned int) fbi->fdadr1);
+ pr_debug("reg_lccr0 0x%08x\n", (unsigned int) fbi->reg_lccr0);
+ pr_debug("reg_lccr1 0x%08x\n", (unsigned int) fbi->reg_lccr1);
+ pr_debug("reg_lccr2 0x%08x\n", (unsigned int) fbi->reg_lccr2);
+ pr_debug("reg_lccr3 0x%08x\n", (unsigned int) fbi->reg_lccr3);
/* enable LCD controller clock */
pxa_set_cken(CKEN16_LCD, 1);
@@ -761,19 +761,19 @@ static void pxafb_enable_controller(struct pxafb_info *fbi)
FDADR1 = fbi->fdadr1;
LCCR0 |= LCCR0_ENB;
- DPRINTK("FDADR0 0x%08x\n", (unsigned int) FDADR0);
- DPRINTK("FDADR1 0x%08x\n", (unsigned int) FDADR1);
- DPRINTK("LCCR0 0x%08x\n", (unsigned int) LCCR0);
- DPRINTK("LCCR1 0x%08x\n", (unsigned int) LCCR1);
- DPRINTK("LCCR2 0x%08x\n", (unsigned int) LCCR2);
- DPRINTK("LCCR3 0x%08x\n", (unsigned int) LCCR3);
+ pr_debug("FDADR0 0x%08x\n", (unsigned int) FDADR0);
+ pr_debug("FDADR1 0x%08x\n", (unsigned int) FDADR1);
+ pr_debug("LCCR0 0x%08x\n", (unsigned int) LCCR0);
+ pr_debug("LCCR1 0x%08x\n", (unsigned int) LCCR1);
+ pr_debug("LCCR2 0x%08x\n", (unsigned int) LCCR2);
+ pr_debug("LCCR3 0x%08x\n", (unsigned int) LCCR3);
}
static void pxafb_disable_controller(struct pxafb_info *fbi)
{
DECLARE_WAITQUEUE(wait, current);
- DPRINTK("Disabling LCD controller\n");
+ pr_debug("pxafb: disabling LCD controller\n");
set_current_state(TASK_UNINTERRUPTIBLE);
add_wait_queue(&fbi->ctrlr_wait, &wait);
@@ -1039,7 +1039,7 @@ static int __init pxafb_map_video_memory(struct pxafb_info *fbi)
fbi->palette_size = fbi->fb.var.bits_per_pixel == 8 ? 256 : 16;
palette_mem_size = fbi->palette_size * sizeof(u16);
- DPRINTK("palette_mem_size = 0x%08lx\n", (u_long) palette_mem_size);
+ pr_debug("pxafb: palette_mem_size = 0x%08lx\n", palette_mem_size);
fbi->palette_cpu = (u16 *)(fbi->map_cpu + PAGE_SIZE - palette_mem_size);
fbi->palette_dma = fbi->map_dma + PAGE_SIZE - palette_mem_size;
diff --git a/drivers/video/pxafb.h b/drivers/video/pxafb.h
index 22c00be786a..47f41f70db7 100644
--- a/drivers/video/pxafb.h
+++ b/drivers/video/pxafb.h
@@ -114,15 +114,6 @@ struct pxafb_info {
#define PXA_NAME "PXA"
/*
- * Debug macros
- */
-#if DEBUG
-# define DPRINTK(fmt, args...) printk("%s: " fmt, __FUNCTION__ , ## args)
-#else
-# define DPRINTK(fmt, args...)
-#endif
-
-/*
* Minimum X and Y resolutions
*/
#define MIN_XRES 64
diff --git a/drivers/video/s3c2410fb.c b/drivers/video/s3c2410fb.c
index 00c0223a352..5ab79afb53b 100644
--- a/drivers/video/s3c2410fb.c
+++ b/drivers/video/s3c2410fb.c
@@ -228,8 +228,8 @@ static int s3c2410fb_check_var(struct fb_var_screeninfo *var,
* information
*/
-static int s3c2410fb_activate_var(struct s3c2410fb_info *fbi,
- struct fb_var_screeninfo *var)
+static void s3c2410fb_activate_var(struct s3c2410fb_info *fbi,
+ struct fb_var_screeninfo *var)
{
fbi->regs.lcdcon1 &= ~S3C2410_LCDCON1_MODEMASK;
diff --git a/drivers/video/savage/savagefb-i2c.c b/drivers/video/savage/savagefb-i2c.c
index 959404ad68f..3c98457783c 100644
--- a/drivers/video/savage/savagefb-i2c.c
+++ b/drivers/video/savage/savagefb-i2c.c
@@ -274,10 +274,13 @@ int savagefb_probe_i2c_connector(struct fb_info *info, u8 **out_edid)
if (!edid) {
/* try to get from firmware */
- edid = kmalloc(EDID_LENGTH, GFP_KERNEL);
- if (edid)
- memcpy(edid, fb_firmware_edid(info->device),
- EDID_LENGTH);
+ const u8 *e = fb_firmware_edid(info->device);
+
+ if (e) {
+ edid = kmalloc(EDID_LENGTH, GFP_KERNEL);
+ if (edid)
+ memcpy(edid, e, EDID_LENGTH);
+ }
}
if (out_edid)
diff --git a/drivers/video/savage/savagefb.h b/drivers/video/savage/savagefb.h
index d6f94742c9f..ea17f7e0482 100644
--- a/drivers/video/savage/savagefb.h
+++ b/drivers/video/savage/savagefb.h
@@ -60,8 +60,6 @@
#define S3_SAVAGE_SERIES(chip) ((chip>=S3_SAVAGE3D) && (chip<=S3_SAVAGE2000))
-#define S3_MOBILE_TWISTER_SERIES(chip) ((chip==S3_TWISTER) || (chip == S3_PROSAVAGEDDR))
-
/* Chip tags. These are used to group the adapters into
* related families.
*/
@@ -74,8 +72,6 @@ typedef enum {
S3_PROSAVAGE,
S3_SUPERSAVAGE,
S3_SAVAGE2000,
- S3_PROSAVAGEDDR,
- S3_TWISTER,
S3_LAST
} savage_chipset;
diff --git a/drivers/video/savage/savagefb_driver.c b/drivers/video/savage/savagefb_driver.c
index b5ca3ef8271..7c285455c92 100644
--- a/drivers/video/savage/savagefb_driver.c
+++ b/drivers/video/savage/savagefb_driver.c
@@ -1773,8 +1773,7 @@ static int __devinit savage_init_hw (struct savagefb_par *par)
}
}
- if (S3_SAVAGE_MOBILE_SERIES(par->chip) ||
- (S3_MOBILE_TWISTER_SERIES(par->chip) && !par->crtonly))
+ if (S3_SAVAGE_MOBILE_SERIES(par->chip) && !par->crtonly)
par->display_type = DISP_LCD;
else if (dvi || (par->chip == S3_SAVAGE4 && par->dvi))
par->display_type = DISP_DFP;
@@ -1783,7 +1782,7 @@ static int __devinit savage_init_hw (struct savagefb_par *par)
/* Check LCD panel parrmation */
- if (par->chip == S3_SAVAGE_MX) {
+ if (par->display_type == DISP_LCD) {
unsigned char cr6b = VGArCR( 0x6b );
int panelX = (VGArSEQ (0x61) +
@@ -1922,15 +1921,15 @@ static int __devinit savage_init_fb_info (struct fb_info *info,
snprintf (info->fix.id, 16, "ProSavageKM");
break;
case FB_ACCEL_S3TWISTER_P:
- par->chip = S3_TWISTER;
+ par->chip = S3_PROSAVAGE;
snprintf (info->fix.id, 16, "TwisterP");
break;
case FB_ACCEL_S3TWISTER_K:
- par->chip = S3_TWISTER;
+ par->chip = S3_PROSAVAGE;
snprintf (info->fix.id, 16, "TwisterK");
break;
case FB_ACCEL_PROSAVAGE_DDR:
- par->chip = S3_PROSAVAGEDDR;
+ par->chip = S3_PROSAVAGE;
snprintf (info->fix.id, 16, "ProSavageDDR");
break;
case FB_ACCEL_PROSAVAGE_DDRK: